text
stringlengths 56
7.94M
|
---|
\begin{document}
\newtheorem{remark}[theorem]{Remark}
\title{Eager Functions as Processes}
\author{Adrien Durier}
\affiliation{
\institution{Univ. Lyon, ENS de Lyon, CNRS,\\ UCB Lyon 1, LIP UMR 5668}
}
\author{Daniel Hirschkoff}
\affiliation{
\institution{Univ. Lyon, ENS de Lyon, CNRS,\\ UCB Lyon 1, LIP UMR 5668}
}
\author{Davide Sangiorgi}
\affiliation{
\institution{Universit{\`a} di Bologna and INRIA}
}
\begin{abstract}
We study Milner's encoding of the call-by-value $\lambda$-calculus
into the $\pi$-calculus. We show that, by tuning the encoding to two
subcalculi of the $\pi$-calculus (Internal $\pi$ and Asynchronous Local
$\pi$), the equivalence on $\lambda$-terms induced by the encoding
coincides with Lassen's eager normal-form bisimilarity, extended to
handle $\eta$-equality. As behavioural equivalence in the
$\pi$-calculus we consider contextual equivalence and barbed
congruence. We also extend the results to preorders.
A crucial technical ingredient in the proofs is the
recently-intro\-du\-ced technique of unique solutions of equations,
further developed in this paper. In this respect, the paper also
intends to be an extended case study on the applicability and
expressiveness of the technique.
\end{abstract}
\begin{CCSXML}
<ccs2012>
<concept>
<concept_id>10011007.10011006.10011008</concept_id>
<concept_desc>Software and its engineering~General programming languages</concept_desc>
<concept_significance>500</concept_significance>
</concept>
<concept>
<concept_id>10003456.10003457.10003521.10003525</concept_id>
<concept_desc>Social and professional topics~History of programming languages</concept_desc>
<concept_significance>300</concept_significance>
</concept>
</ccs2012>
\end{CCSXML}
\ccsdesc[500]{Software and its engineering~General programming languages}
\ccsdesc[300]{Social and professional topics~History of programming languages}
\keywords{pi-calculus, lambda-calculus, full abstraction, call-by-value}
\copyrightyear{2018}
\acmYear{2018}
\setcopyright{licensedothergov}
\acmConference[LICS '18]{LICS '18: 33rd Annual ACM/IEEE Symposium on Logic in Computer Science}{July 9--12, 2018}{Oxford, United Kingdom}
\acmBooktitle{LICS '18: LICS '18: 33rd Annual ACM/IEEE Symposium on Logic in Computer Science, July 9--12, 2018, Oxford, United Kingdom}
\acmPrice{15.00}
\acmDOI{10.1145/3209108.3209152}
\acmISBN{978-1-4503-5583-4/18/07}
\maketitle
\begin{abstract}
We study Milner's encoding of the call-by-value $\lambda$-calculus
into the $\pi$-calculus. We show that, by tuning the encoding to two
subcalculi of the $\pi$-calculus (Internal $\pi$ and Asynchronous Local
$\pi$), the equivalence on $\lambda$-terms induced by the encoding
coincides with Lassen's eager normal-form bisimilarity, extended to
handle $\eta$-equality. As behavioural equivalence in the
$\pi$-calculus we consider contextual equivalence and barbed
congruence. We also extend the results to preorders.
A crucial technical ingredient in the proofs is the
recently-intro\-du\-ced technique of unique solutions of equations,
further developed in this paper. In this respect, the paper also
intends to be an extended case study on the applicability and
expressiveness of the technique.
\end{abstract}
\section*{Introduction}
Milner's work on functions as processes~\cite{milner:inria-00075405,encodingsmilner},
that shows how the evaluation strategies of {\em call-by-name
$\lambda$-calculus} and {\em call-by-value
$\lambda$-calculus}~\cite{Abr88,DBLP:journals/tcs/Plotkin75} can be faithfully mimicked in
the $\pi$-calculus,
is generally considered a
landmark in Concurrency Theory, and more generally in Programming Language Theory.
The comparison with the $\lambda$-calculus
is a significant expressiveness test for
the $\pi$-calculus.
More than that,
it
promotes the $\pi$-calculus to be a basis for general-purpose
programming languages in which communication is the fundamental
computing primitive.
From the $\lambda$-calculus point of view, the comparison provides the means to
study
$\lambda$-terms
in contexts other than purely sequential ones,
and with the instruments available to reason about
processes.
Further, Milner's work, and the works that followed it, have contributed to
understanding and developing the theory of the $\pi$-calculus.
More precisely, Milner shows the operational correspondence between
reductions in the $\lambda$-terms and in the encoding $\pi$-terms.
He then uses the correspondence to prove that the encodings are
\emph{sound}, i.e., if the processes encoding
two $\lambda$-terms are
behaviourally equivalent, then the source $\lambda$-terms are also
behaviourally equivalent in the $\lambda$-calculus.
Milner also shows that the converse, \emph{completeness}, fails, intuitively because
the encodings allow one to test the $\lambda$-terms in all contexts
of the $\pi$-calculus~--- more
diverse than those of the $\lambda$-calculus.
The main problem that Milner work left open is the characterisation
of the equivalence on $\lambda$-terms induced by the encoding, whereby
two $\lambda$-terms are equal if their encodings are behaviourally
equivalent $\pi$-calculus terms.
The question is largely independent of the precise form of
behavioural equivalence adopted in the $\pi$-calculus
because the encodings are deterministic (or at
least confluent). In the
paper we consider contextual equivalence (that coincides with may
testing and trace equivalence) and barbed congruence (that coincides
with bisimilarity).
\iffull
$\pi$-calculus one normally uses bisimilarity (or
its
contextual correspondent, barbed congruence) --- the same will be done in this paper.
\DS{maybe here shows that original quote by Milner}
\fi
For the call-by-name $\lambda$-calculus, the answer was found shortly later
\cite{San93,cbn}:
the equality induced is the equality of L{\'e}vy-Longo
Trees~\cite{LONGO1983153}, the lazy
variant of B{\"o}hm Trees.
It is actually also possible to
obtain B{\"o}hm Trees, by modifying the call-by-name encoding so to allow also reductions
underneath a $\lambda$-abstraction, and by including divergence among the observables \cite{xian}.
These results show that, at least for call-by-name, the $\pi$-calculus encoding, while not
fully abstract for the contextual equivalence of the $\lambda$-calculus, is in remarkable
agreement with the theory of the $\lambda$-calculus: several well-known models of the
$\lambda$-calculus yield L{\'e}vy-Longo Trees
or B{\"o}hm Trees as
their induced equivalence~\cite{levy75,LONGO1983153,barendregt1984lambda}.
For call-by-value, in contrast, the problem of identifying the equivalence induced by the
encoding has remained open, for two
main reasons. First, tree structures in call-by-value are less studied
and less established than in call-by-name. Secondly, proving
completeness of an encoding of $\lambda$ into $\pi$ requires sophisticated proof techniques. For
call-by-name, for instance, a central role is played by
\emph{bisimulation up-to contexts}. For call-by-value, however,
existing proof techniques, including `up-to contexts',
appeared not to be powerful enough.
In this paper we study the above open problem for call-by-value.
Our main result is that the equivalence induced on $\lambda$-terms by their call-by-value
encoding into the $\pi$-calculus is
\emph{eager normal-form bisimilarity} \cite{lassentrees,lassentrees2}.
This is a tree structure for call-by-value,
proposed by Lassen as the call-by-value counterpart
of
L{\'e}vy-Longo Trees.
Precisely we obtain the variant that is insensitive to
$\eta$-expan\-sion, called \emph{$\eta$-eager
normal-form bisimilarity}.
\iffull
\DS{maybe show here the key rule}
\fi
To obtain the results we have however to make a few adjustments
to Milner's encoding and/or specialise the target language of the encoding.
These adjustments have to do with the presence
of free
outputs
(outputs of known names) in the encoding.
\iffull
Milner had initially
translated call-by-value $\lambda$-variables using a free output:
\begin{equation}
\label{e:VarOne}
\app{ \enca {x}} p \defi \out px
\enspace.
\end{equation}
However this rule is troublesome for
the validity of $\betav$-reduction
(the property that $\lambda$-terms that are equated {\alert
[``related'' instead of ``equated''?]} by
$\betav$-reduction~--- the call-by-value $\beta$-reduction~--- are
are also equal in the $\pi$-calculus).
Milner solved the problem by ruling out the initial
free output thus:
\begin{equation}
\label{e:VarMore}
\app{ \enca {x}} p \defi \res y \out p y. ! \inp y{\tila}. \out x
\tila
\enspace.
\end{equation}
It was indeed shown later \cite{..} that with \reff{e:VarOne}
the validity of $\betav$-reduction fails.
Accordingly, the final journal paper~\cite{encodingsmilner} does not even mention
encoding~\reff{e:VarOne}.
If one wants to maintain the simpler rule \reff{e:VarOne},
then the validity of $\betav$-reduction can be regained
by taking, as target language, a subset of the $\pi$-calculus
in which only the output capability of names is communicated.
This can be enforced either by imposing a behavioural type system including capabilities
\cite{...}, or by syntactically taking a dialect of the $\pi$-calculus in which only the
output capability of names is communicated, such as Local $\pi$~\cite{localpi}.
The encoding \reff{e:VarMore} still makes use of free
outputs~--- the final particle $\out x
\tila$.
While this limited form of free
output is harmless for the validity of $\betav$-reduction,
we show in the paper that
\DSa such free outputs
bring problems when analysing $\lambda$-terms with free
variables: desirable call-by-value equalities fail.
\fi
We show in the paper that
this
brings problems when analysing $\lambda$-terms with free
variables: desirable call-by-value equalities fail.
An example is given by the law:
\begin{equation}
\label{eq:nonlaw}
I(x\val) = x\val
\end{equation}
where $I$ is $\abs zz$ and $\val$ is a value.
\iffull
\DS{references?}
\DS{discuss example; and explain that the law is valid in all theories of open
call-by-value -- ask $\lambda$-calculus people here, maybe }
{\alert As for the validity of $\betav $-reduction, [DH: I don't
understand this part of the sentence]} there are two possible
solutions:
\else
Two possible
solutions are:
\fi
\begin{enumerate}
\item rule out the free outputs; this essentially means transplanting the encoding
onto the Internal $\pi$-calculus~\cite{internalpi}, a version of the
$\pi$-calculus in which any name emitted in an output is fresh;
\item control the use of capabilities in the $\pi$-calculus; for
instance taking Asynchronous Local
$\pi$~\cite{localpi} as the target of the translation.
(Controlling capabilities allows one to impose a directionality on
names, which, under certain
technical conditions, may hide the identity of the emitted names.)
\end{enumerate}
In the paper we consider both approaches, and show that
in both cases, the equivalence induced coincides with
$\eta$-eager
normal-form bisimilarity.
In summary, there are two contributions in the paper:
\begin{enumerate}
\item Showing that Milner's encoding fails to equate terms that should be equal in call-by-value.
\item Rectifying the encoding, by considering different target calculi, and
investigating Milner's problem in such a setting.
\end{enumerate}
The rectification we make does not really change the essence of the encoding --
in one case, the encoding actually remains the same. Moreover, the languages used
are well-known dialects of the \pc, studied in the literature for other reasons.
In the encoding, they allow us to avoid certain accidental misuses of the names
emitted in the communications. The calculi were not known at the time of Milner's
paper \cite{encodingsmilner}.
A key role in the completeness proof is played by a technique of
\emph{unique solution of equations}, recently proposed~\cite{usol}.
The structure induced by Milner's
call-by-value encoding was expected to look like Lassen's trees;
however existing proof
techniques did not seem powerful enough to prove it.
The unique solution technique allows one to derive process bisimilarities from
equations whose infinite unfolding does not introduce divergences, by
proving that the processes are solutions of the same equations. The
technique can be generalised to possibly-infinite systems of
equations, and can be strengthened by allowing certain
kinds of divergences in equations. In this respect, another goal
of the paper is to carry out an extended case study on the
applicability and expressiveness of the techniques. Then, a
by-product of the study are a few further developments of the
technique.
In particular, one such result allows us to transplant uniqueness of
solutions from a system of equations, for which divergences
are easy to analyse, to another one. Another result is about
the application of the technique to preorders.
Finally, we consider
preorders~--- thus referring to the
preorder on $\lambda$-terms induced by a behavioural preorder on their $\pi$-calculus
encodings.
We introduce a preorder on Lassen's trees (preorders had not been considered by Lassen)
and show that this is the preorder on $\lambda$-terms induced by the call-by-value
encoding, when the behavioural relation on $\pi$-calculus terms is
the ordinary contextual preorder (again, with the caveat of points (1)
and (2) above).
With the move from equivalences to preorders, the overall structure of
the proofs of our full abstraction results remains the same.
However, the impact on the application of the unique-solution
technique is substantial, because the phrasing of this technique in
the cases of preorders and of equivalences is quite different.
\paragraph{Further related work.}
The standard behavioural equivalence in the $\lambda$-calculus
is contextual equivalence. Encodings into the $\pi$-calculus
(be it for call-by-name or call-by-value) break contextual equivalence
because $\pi$-calculus contexts
are richer than those in the (pure) $\lambda$-calculus. In the paper
we try to understand how far beyond contextual equivalence the
discriminating power of the $\pi$-calculus brings us, for
call-by-value.
The opposite approach is to restrict the set of 'legal' $\pi$-contexts
so to remain faithful to contextual equivalence. This approach has been
followed, for call-by-name, and using type systems, in
\cite{BHYseqpi,toninho:yoshida:esop18}.
Open call-by-value has been studied in~\cite{accattolicbv}, where the
focus is on operational properties of $\lambda$-terms; behavioural
equivalences are not considered.
An extensive presentation of call-by-value, including denotational
models,
is Ronchi della Rocca and
Paolini's book~\cite{DBLP:series/txtcs/RoccaP04}.
In~\cite{usol}, the unique-solution technique is used in the
completeness proof for Milner's call-by-name encoding. That proof
essentially revisits the proof of~\cite{cbn}, which is based on
bisimulation up-to context. We have explained
above that the case for
call-by-value is quite different.
\paragraph{Structure of the paper.} We recall basic definitions about
the call-by-value $\lambda$-calculus and the $\pi$-calculus in
Section~\ref{s:background}. The technique of unique solution of
equations is introduced in Section~\ref{s:usol}, together with some
new developments. Section~\ref{s:enc:cbv}
presents our analysis of Milner's encoding, beginning with the shortcomings
related to the presence of free outputs.
{
The first solution to these shortcomings is to move to the Internal
$\pi$-calculus: this is described in Section~\ref{s:enc:pii}. }
For the proof of completeness, in Section~\ref{s:complete},
we rely on unique solution of equations; we also compare
such technique with the `up-to techniques'.
{
The second solution is to move to the Asynchronous Local
$\pi$-calculus: this is discussed in
Section~\ref{s:localpi}.}
We show in Section~\ref{s:contextual} how our
results can be adapted to preorders and to contextual equivalence.
\iffull
, and analyse the model
of call-by-value yielded by our results in Section~\ref{s:model}.
\fi
Finally in Section~\ref{s:concl} we highlight conclusions and
possible future work.
\section{Background material}
\label{s:background}
Throughout the paper, $\R$ ranges over relations.
The composition of two relations
$\R$ and $\R'$ is written
$\R \: \R'$.
We often use infix notation for relations; thus
$P \RR Q$ means ${(P, Q)}\in\R$.
A tilde represents a tuple.
The $i$-th element of a tuple $\til P$ is referred to as $P_i$.
Our notations are extended to tuples componentwise. Thus
$\til P \RR \til Q$ means $P_i \RR Q_i$
for all components.
\ifapp
We anticipate that
Appendix \ref{a:tab}
presents a summary of the behavioural relations
used in this paper.
\fi
\subsection{The call-by-value \lc}
We let $x$ and $y$ range over the set of $\lambda$-calculus variables.
The set $\Lao$ of $\lambda$-terms is
defined by the grammar
\begin{center}
$ M := \; x \midd \lambda x. M \midd
M_1 M_2 \, .$
\end{center}
Free variables, closed terms, substitution,
$\alpha$-conversion
etc.\ are defined as usual \cite{barendregt1984lambda,DBLP:books/cu/HindleyS86}.
Here and in the rest of the paper (including when reasoning about
$\pi$ processes), we adopt the usual
``Barendregt convention''. This will allow us to assume freshness
of bound variables and names whenever needed.
The set of free variables
in the term $M$ is $\fv M$.
\iffull
, and the
subclass of $\Lao$ only containing
the closed terms is $\Lambda $.
\fi We group brackets on the left; therefore $M N L $
is $(M N ) L$.
We abbreviate $\lambda x_1. \cdots. \lambda x_n.M $ as
$\lambda x_1 \cdots x_n.M $, or $\lambda \tilde{x}. M$ if the length of
$\tilde x$ is not important.
Symbol $\Omega $ stands for the always-divergent term
$(\lambda x . x x)(\lambda x . x x)$.
\txthere{explain the following things ("defined as usual"?):}{scope
and associativity in the \lc, substitutions, free variables
$\fv{M}$, $\fv{M,N}$ is short for $\fv M\cup \fv N$, values and
evaluation contexts as syntactic categories (of terms and contexts),
open terms (here, everything is defined for open terms)}
A \emph{context} is a term with a hole \hdot,
possibly occurring more than once. If $C$ is a context, $C[M]$ is
a shorthand for $C$ where the hole \hdot is substituted by $M$. An
\emph{evaluation context} is a special kind of
\iffull
inductively defined
\fi
context,
with exactly one hole \hdot, and in which the inserted term can
immediately run. In the pure $\lambda$-calculus \emph{values} are abstractions and variables.
\begin{center}
\begin{tabular}{rl}
{Evaluation contexts} & $\evctxt\scdef \hdot\OR \evctxt M\OR\val \evctxt$ \\
{Values} & $\val\scdef x \OR \abs x M$
\end{tabular}
\end{center}
\iffull
, and we
accordingly write \fv\evctxt.
\fi
In call-by-value, substitutions replace variables with values; we call
them \emph{value substitutions}.
Eager reduction (or $\betav$-reduction),
${\red}
\subseteq \Lao \times \Lao $,
\iffull
(for our purposes
we need it
defined on open terms)
\fi
is determined by the rule:
\txthere{}{Eager reduction relation (definition?):}
$$ \evctxt[(\lambda x . M) \val]\red \evctxt[M\{\val/x\}]
\enspace.
$$
We write \reds\ for the reflexive transitive closure of \red.
A term in \emph{eager normal form} is a term that has no eager
reduction.
\begin{proposition}
\iffull
The following hold:
\fi
\begin{enumerate}
\iffull
\item Any term $M$ either is a value or admits a unique decomposition
$M=\evctxt[\val {\valp}]$.
\fi
\item If $M\red M'$, then $\evctxt[M]\red \evctxt[M']$ and $M\sigma
\red M'\sigma$, for any value substitution $\sigma$.
\item Terms in eager normal form are either values or of the shape
$\evctxt[x \val]$.
\end{enumerate}
\end{proposition}
Therefore, given a term $M$, either $M\reds M'$ where $M'$ is a term in
eager normal form, or there is an infinite reduction sequence
starting from $M$.
In the first case, $M$ \emph{has eager
normal form $M'$}, written $M\converges M'$, in the second $M$
\emph{diverges}, written $M\diverges$. We write $M\converges$ when
$M\converges M'$ for some $M'$.
\begin{definition}[Contextual equivalence]\label{d:ctxeq}
Given $M,~N\in \Lao$, we say that $M$ and $N$ are contextually
equivalent, written $M\ctxeq N$, if for any context $C$, we have
$C[M]\converges$ iff $C[N]\converges$.
\end{definition}
\subsection{Tree semantics for call-by-value}
\label{s:back:trees}
\iffull
In this section
\fi
We recall
\iffull
Lassen's
\fi
\emph{eager normal-form bisimilarity} \cite{lassentrees,lassentrees2,lassenfa}.
\begin{definition}[Eager normal-form bisimulation]
\label{enfbsim}
A relation $\R$ between \lterms is an \emph{eager normal-form bisimulation} if, whenever
$M\RR N$, one of the following holds:
\begin{enumerate}
\item both $M$ and $N$ diverge;
\item
\label{ie:split}
$M\converges \evctxt[x\val]$ and $N\converges \evctxtp[x\valp]$ for
some $x$, values $\val$, $\valp$,
and evaluation contexts
$\evctxt$ and $\evctxtp$ with $\val\RR
\valp$ and $\evctxt[z]\RR \evctxtp[z]$ for a fresh $z$;
\item $M\converges \abs x M'$ and $N\converges \abs x N'$ for some $x$, $M'$, $N'$ with $M'\RR N'$;
\item $M\converges x$ and $N\converges x$ for some $x$.
\end{enumerate}
\emph{Eager normal-form bisimilarity}, $\enf$, is the largest eager normal-form bisimulation.
\end{definition}
Essentially, the structure of a $\lambda$-term that is unveiled by
Definition~\ref{enfbsim} is that of a (possibly infinite) tree
obtained by repeatedly applying $\betav$-reduction, and branching a tree whenever
instantiation of a variable is needed to continue the reduction (clause \reff{ie:split}).
We call such trees \emph{Eager Trees} (ETs) and accordingly also call
eager normal-form bisimilarity the \emph{Eager-Tree equality}.
\begin{example}
\label{exa:cteq}
Relation $\enf$ is strictly finer than contextual equivalence
$\ctxeq$: the inclusion ${\enf} \subseteq {\ctxeq}$ follows from the
congruence properties of $\enf$ \cite{lassentrees}; for the strictness,
examples are the following equalities, that hold for $ \ctxeq$ but not
for $\enf$:
$$
\Omega = (\abs y \Omega) (x\val)
\qquad
x\val = (\abs y x\val)(x\val)
\enspace.
$$
\end{example}
\begin{example}[$\eta$ rule]
\label{exa:eta}
The $\eta$-rule is not valid for $\enf$. For instance, we have
$\Omega \not\enf \abs x \Omega x$.
The rule is not even valid on values, as we also have
$ \abs y x y \not\enf x$. It holds however
for abstractions:
$ \abs y (\abs xM) y\enf \abs xM
$ when $y\notin\fv{M}$.
\end{example}
The failure of the $\eta$-rule $ \abs y x y \not\enf x$ is
troublesome as, under any closed value substitution, the two terms are
indeed {
eager normal-form bisimilar} (as well as contextually equivalent).
Thus \emph{$\eta$-eager normal-form bisimilarity}~\cite{lassentrees} takes
$\eta$-expansion into account so to recover such missing equalities.
\begin{definition}[\enfbsim] \label{enfebsim}
A relation $\R$ between \lterms is an \emph{\enfbsim} if, whenever $M\RR N$, either one of the clauses of Definition \ref{enfbsim},
or one of the two following additional clauses, hold:
\begin{enumerate}
\setcounter{enumi}{4}
\item\label{lab:five} $M\converges x$ and $N\converges \abs y N'$ for some
$x$, $y$, and $N'$ such that $ N'\converges\evctxt[x\val]$,
with $y\RR \val$ and $z\RR
\evctxt[z]$ for some value $\val$, evaluation context
\evctxt, and fresh $z$.
\item\label{def:enfe:case:eta}
the converse of \reff{lab:five}, i.e.,
$N\converges x$ and
$M\converges \abs y M'$ for some
$x$, $y$, and $M'$ such that $ M'\converges\evctxt[x\val]$,
with
$ \val\RR y$ and
$
\evctxt[z]\RR z$ for some value $\val$, evaluation context
\evctxt, and fresh $z$.
\end{enumerate}
Then \emph{$\eta$-eager normal-form bisimilarity}, $\enfe$, is the largest $\eta$-eager normal-form bisimulation.
\end{definition}
We sometimes call relation $\enfe$ the \emph{$\eta$-Eager-Tree equality}.
\begin{remark}Definition~\ref{enfebsim} coinductively allows
$\eta$-expansions to occur underneath other $\eta$-expansions, hence
trees with infinite $\eta$-expansions may be equated with finite trees.
For instance, $$x\enfe \abs y xy\enfe \abs y x (\abs z yz)\enfe \abs y x (\abs z y(\abs w zw))\enfe\dots$$
A concrete example is given by taking a fixpoint $Y$, and setting
$f \defi (\abs {zxy} x(zy))$. We then have $Yfx \reds \abs y x (Yfy)$, and then
$x(Yfy)\reds x(\abs z y(Yfz))$, and so on. Hence,
we have $x\enfe Yfx$.
\end{remark}
\subsection{The \pc, \Intp\ and \alpi}
\label{s:back:pi}
In all encodings
we consider,
the encoding of a $\lambda$-term
is parametric on a name,
i.e., it is a
function from names to $\pi$-calculus
processes. We also need parametric processes (over one or several names) for writing recursive process definitions
and equations.
We call such parametric processes {\em abstractions}.
The actual instantiation of the parameters of an abstraction $F$ is done
via the {\em application} construct $\app F \tila$.
We use $P,Q$ for processes, $F$ for abstractions.
Processes and abstractions form the set of {\em $\pi$-agents} (or
simply \emph{agents}), ranged
over by $A$.
Small letters
$a,b, \ldots, x,y, \ldots$
range over the infinite set of names.
The grammar of the $\pi$-calculus is thus:
$$ \begin{array}{ccll}
A & := & P \midd F & \mbox{(agents)}\\[\mypt]
P & := & \nil \midd \inp a \tilb . P \midd \out a \tilb . P
\midd \res a P
& \mbox{(processes)} \\[\myptSmall]
& &
\midd P_1 | P_2 \midd ! \inp a \tilb . P
\midd \app F \tila
\\[\mypt]
F & := & \bind \tila P \midd K & \mbox{(abstractions)}
\end{array}
$$
In prefixes $\inp a \tilb$ and $\out a \tilb$, we call
$a$ the {\em subject} and $\tilb$ the {\em object}.
\iffull
We use $\alpha $
to range over prefixes.
\fi When the tilde is empty, the surrounding brackets in prefixes
will be omitted.
We
often abbreviate
\iffull
$\alpha . \nil$ as $\alpha $, and
\fi
$\res a \res b P$ as $\resb{
a,b} P$.
An input prefix $a (\tilb) .P$, a restriction
$\res{b} P$, and an abstraction $\bind\tilb P$ are binders for
names $\tilb$ and $b$, respectively, and give
rise in the expected way to the definition of {\em free names}
(\mbox{\rmsf fn}) and {\em
bound names} (\mbox{\rmsf bn})
of a term or a prefix, and $\alpha$-conversion.
An agent is \emph{name-closed} if it does not contain free names.
As in the $\lambda$-calculus, following the
usual Barendregt convention
we identify
processes or actions which only differ on the choice of the
bound names.
The symbol $=$ will
mean ``syntactic identity modulo
$\alpha$-conversion''.
Sometimes, we use $\defi$ as abbreviation mechanism, to
assign a name to an expression to which we want to refer later.
We use constants, ranged over by $K$ for writing recursive definitions. Each
constant has a defining equation of the form
$K \Defi \bind{\tilx} P$, where $\bind{\tilx} P $ is name-closed; $\tilx$
are the formal parameters of
the constant (replaced by the actual parameters whenever the constant
is used).
Since the calculus is polyadic,
we assume a \emph{sorting system}~\cite{milner1993polyadic}
to avoid disagreements in the arities of
the tuples of names
carried by a given name
and in applications of abstractions.
We will not present the sorting system
because it is not essential.
The reader should
take for granted that all agents described obey a sorting.
A \emph{context} $\qct$ of $\pi$ is a $\pi$-agent in which some
subterms have been replaced by the hole $\hdot{}$ or, if the context is
polyadic, with indexed holes $\holei 1, \ldots, \holei n$;
then
$\ct A$ or $\ct {\til A}$
is the agent resulting from replacing the holes with the terms $A$ or
$\til A$.
We omit the operators of sum and matching (not needed in
the encodings).
\iffull
$\nil$ is the inactive process. An input-prefixed process $a (\tilb) .
P$, where $\tilb$ has pairwise distinct components,
waits for a tuple of names $\tilc$
to be sent along $a$ and then behaves like
$P \sub {\tilc} \tilb $, where $\sub{\tilc}\tilb $ is the
simultaneous
substitution
of names
$\tilb$ with names $\tilc$. An output particle
$\opw a \tilb $ emits names $\tilb$ at $a$.
Parallel composition is to run two processes in parallel.
The restriction $\res a P$ makes name $a$ local, or private, to $P$.
A replication $ \bango P$ stands for a countable infinite number
of copies of $P$ in parallel.
\else
We refer to~\cite{milner1993polyadic} for detailed discussions on the
operators of the language.
\fi
We assign parallel composition the lowest precedence among the
operators.
\iffull
Substitutions are of the form $\sub \tilb \tila$, and
are finite assignments of names to names.
We use $\sigma$ and $\rho$ to range over substitutions.
The application of a
substitution $\sigma $ to an expression $H$ is
written $H \sigma $.
Substitutions have
precedence over the operators of the language; $\sigma
\rho$ is the composition of substitutions where $\sigma $ is performed
first,
therefore $P \sigma
\rho$ is $ (P \sigma ) \rho$.
\fi
\iffull
Throughout the paper, we allow ourselves some freedom in the use
of
$\alpha$-conversion on names; thus we assume that
the application of a substitution
does not affect
bound names of expressions;
similarly, when comparing the transitions of two processes, we
assume that the bound names of the transitions do not occur free in
the processes.
In a statement, we say that a name is {\em fresh} to mean that it
is different from any other name which
occurs in the statement or in objects of the statement like
processes and substitutions.
\fi
\paragraph{Operational semantics.}\ifapp
The operational semantics of the $\pi$-calcu\-lus is standard
\cite{SW01a} (including the labelled transition system),
and given in Appendix \ref{a:opsem}.
\else
The operational semantics of the $\pi$-calcu\-lus is standard
\cite{SW01a} (including the labelled transition system).
\fi
The reference behavioural equivalence for $\pi$-calculi will be
the usual \emph{barbed congruence}.
We recall its definition, on a generic subset $\LL$ of
$\pi$-calculus processes.
A \emph{$\LL$-context} is a process of $\LL$ with a single
hole $\contexthole$ in it (the hole has a sort too, as it could be in
place of an abstraction).
We write $P \Dwa_{a}\;$
if $P$ can make an output action
whose subject is $a$, possibly after some internal moves. (We make only
output observable because this is standard in asynchronous
calculi; adding also observability of inputs does not affect barbed
congruence on the synchronous calculi we will consider.)
\iffull
\daniel{
maybe rephrase, like\\
``in the case of a synchronous calculus like \Intp, Definition~\ref{d:bc}
below yields synchronous barbed congruence, and adding also
observability of inputs does not change the induced equivalence.''}
Details on this, and on the transition system for $\pi$-calculi, as
well other aspects of their operational semantics
are given in Appendix \ref{a:opsem}.
\fi
\begin{definition}[Barbed congruence]
\label{d:bc}
{\em Barbed bisimilarity} is the largest
symmetric relation $ \wbb$ on
$\pi$-calculus processes
such that
$P \wbb Q$ implies:
\begin{enumerate}
\item
If $P \Longrightarrow P'$ then there is $ Q'$ such that
$Q \Longrightarrow Q'$ and
$P' \wbb Q'$.
\item $P \Dwa_{ a}\/$ iff $Q \Dwa_{a}\/$.
\end{enumerate}
Let $\LL$ be a set of $\pi$-calculus agents, and
$A, B \in \LL$. We say that $A$ and $B$ are
{\em barbed congruent in
$\LL$}, written $A \wbc\LL B$,
if for each (well-sorted) $\LL$-context $C$, it holds that $C[A] \wbb C[B]$.
\end{definition}
\begin{remark}
\label{r:abs}
Barbed congruence has been uniformly defined on processes and
abstractions (via a quantification on all process contexts).
Usually, however,
definitions will only be given for processes; it is
then intended that they are extended to abstractions by requiring
closure under ground parameters, i.e., by supplying fresh names as
arguments.
\end{remark}
As for
all contextually-defined behavioural relations, so
barbed congruence is
hard to work with. In all calculi we consider, it can be
characterised in terms of \emph{ground bisimilarity}, under the (mild)
condition that the processes are image-finite up to $\bsim$.
{
(We recall that
the class of processes {\em image-finite up to \bsim}
is the largest subset ${\mathcal {IF}}$ of
$\pi$-calculus processes which is derivation closed and such that
$P \in {\mathcal {IF}}$ implies that, for all actions $\mu$,
the set
$\{P' \st P \Arr{\mu } P'\}$
quotiented by \bsim\
is finite. The definition is extended to abstractions as by
Remark~\ref{r:abs}.) }
\iffull
An agent $F$ is image-finite if its ground
instantiation $\app F \tila$, where $\tila$ are fresh, is an
image-finite process.
\fi
All the agents in the paper, including those obtained by encodings
of the $\lambda$-calculus, are image-finite up to \bsim.
The distinctive feature of \emph{ground} bisimilarity is that it does not
involve instantiation of the bound names of inputs (other than by
means of fresh names), and similarly for abstractions.
In the remainder, we omit the adjective `{ground}'.
\begin{definition}[Bisimilarity]
\label{d:bisimulation}
A symmetric relation $\R$
on $\pi$-pro\-cesses is a
\emph{bisimulation}, if whenever $P \,\R\, Q$ and $P \arr\mu P'$, then $Q \Arcap\mu Q'$
for some $Q'$ with $P' \,\R\, Q'$.
Processes $P$ and $Q$ are \emph{bisimilar}, written
$P\approx Q$, if $P \,\R\, Q$ for some bisimulation $\R$.
\end{definition}
\iffull
We extend $\approx$ to abstractions:
$F
\approx G
$ if $\app {F}\tilb \approx \app {G} \tilb $ for fresh $\tilb$.
\fi
\iffull
Transitions are of the form $ P \arr{\inp a \tilb}P'$ (an input, $\tilb$
are the bound names of the input prefix that has been fired),
$P
\arr{\res {\til{d}}\out a\tilb}P'$ (an output, where $\til d \subseteq
\tilb$ are private names extruded in the output), and $P \arr\tau P'$
(an internal action). We use $\mu$ to range over the labels of
transitions.
We write
$\Arr {}$
for the reflexive transitive closure of $\arr{\tau}$, and
$\Arr{\mu}$ for $\Longrightarrow \arr{\mu}\Longrightarrow$; then
$\Arcap \mu$ is $\Arr{\mu}$ if $\mu$ is not $\tau$, and $\Arr{}$
otherwise.
In
bisimilarity or other behavioural relations for the
$\pi$-calculus we consider, no name instantiation
is used in the input clause or elsewhere; technically, the relations are \emph{ground}.
In the subcalculi we consider ground bisimilarity is a congruence and coincides with
barbed congruence (congruence breaks in the full $\pi$-calculus). Besides the simplicity
of their definition, the ground relations make more effective the theory of unique
solutions of equations (checking divergences will be simpler, see Section~\ref{s:}).
\fi
We will use two subcalculi: the Internal $\pi$-calculus (\Intp),
and the Asynchronous Local $\pi$-calculus (\alpi), obtained by placing certain constraints
on prefixes.
\paragraph{\Intp.}
In \Intp, all outputs are bound. This is
syntactically enforced by replacing the output construct with
the bound-output construct $\bout a \tilb .P$, which, with respect to
the grammar
of the ordinary $\pi$-calculus, is an abbreviation for $\res \tilb
\out a \tilb . P$. In all tuples (input, output, abstractions, applications) the
components are pairwise distinct so to make sure that distinctions among names are
preserved by reduction.
\txthere{Forwarders}{How to fix forwarders for internal pi: infinite forwarders. We need also:\\
We consider a sorting system over the channel names, so that a name carries its sort (and therefore the number of names that are transmitted over it, as well as whether the channel is linear or not).
}
\paragraph{\alpi.}
\alpi\ is defined by enforcing that in an input $\inp a\tilb.P$, all
names in $\tilb$ appear only in output position in $P$.
Moreover,
\alpi{} being \emph{asynchronous},
output prefixes have no continuation;
in the grammar of the $\pi$-calculus this corresponds to having only outputs of the form
$\out a\tilb.\nil$ (which we will simply write $\out a\tilb$).
In
\alpi, to maintain the characterisation of barbed congruence as (ground) bisimilarity,
the transition system has to be modified ~\cite{localpi},
allowing the dynamic introduction of additional processes (the
`links', sometimes also
called forwarders).
\ifapp Details are given in Appendix~\ref{a:alpi}.\fi
\begin{theorem}
\label{t:bisbc}
\begin{enumerate}
\item
In \Intp, on agents that are image-finite up to~\bsim, barbed congruence and
bisimilarity
coincide.
\iffull
\item {\alert In \Intp,
contextual equivalence and trace equivalence coincide; furthermore,
contextual precongruence and trace inclusion coincide}
\adrien{\\i added this,
check if this is true and in the right theorem. If so, remove comment}
\fi
\item\label{bisbc:alpi}
In \alpi, on agents that are image-finite up to~\bsim
and where no free name is used in input,
barbed congruence and
bisimilarity
coincide.
\end{enumerate}
\end{theorem}
All encodings of the $\lambda$-calculus (into \Intp\ and \alpi) in the
paper satisfy the conditions of Theorem~\ref{t:bisbc}.
Thus we will be able to use
bisimilarity as a proof technique for barbed congruence.
(In part (2) of the theorem, the condition on inputs can be removed by
adopting an asynchronous variant of bisimilarity; however, the
synchronous version is easier to use in our proofs based on unique
solution of equations).
\iffull
\DS{Theorem~\ref{t:bisbc} can be made stronger by requiring that
whenever an input appears free in an agent, then this name cannot
appear free in other positions (output, application). If we did not
have application, one could simply say that no names appear at the
same time both in an input and in an output. Probably it is not
necessary to explain that the result is new. We will do this in the
journal version. Idea for the proof of the theorem (2): }
\fi
\section{Unique solutions in \Intp\ and \alpi}
\label{s:usol}
We adapt the proof technique of unique solution of equations, from~\cite{usol} to
the calculi \Intp\ and \alpi, in order to derive bisimilarity results.
The technique is discussed in ~\cite{usol} on the asynchronous $\pi$-calculus
(for possibly-infinite systems of equations).
The structure of the
proofs for \Intp\ and \alpi\ is similar;
in particular the completeness part
is essentially the same
because
bisimilarity is the same.
The differences in the syntax of \Intp, and in the transition
system of \alpi, show up only in certain technical details of the
soundness
proofs.
\iffull
The results presented in this section hold both for \Intp\ and for
\alpi.
\fi
We need variables to write equations. We use
capital
letters $X,Y,Z$
for these variables and call them \emph{equation variables}.
The body of an equation is a name-closed abstraction
possibly containing equation variables
(that is, applications can also be of the form $\app X\tila$).
We use $E$ to range over such expressions; and
$\EE$ to range over systems of equations, defined as follows.
In the definitions below, the indexing set $I$ can be infinite.
\begin{definition}
Assume that, for each $i$ of
a countable indexing set $I$, we have a variable $X_i$, and an expression
$E_i$, possibly containing some variables.
Then
$\{ X_i = E_i\}_{i\in I}$
(sometimes written $\til X = \til E$)
is
a \emph{system of equations}. (There is one equation for each
variable $X_i$; we sometimes use $X_i$ to refer to that equation.)
A system of equations is \emph{guarded} if each
occurrence of a variable in the body of an equation is underneath a
prefix.
\end{definition}
\iffull
We can remark that in \alpi, an equation guarded is all occurrences of
variables are below an input prefix (because the calculus is
asynchronous).
\fi
$E[\til F]$ is the abstraction resulting from $E$ by
replacing each variable $X_i$ with the abstraction $F_i$ (as usual
assuming
$\til F$ and $\til X$ have the same sort).
\iffull
(This is syntactic
replacement.)
\fi
\begin{definition}\label{d:un_sol}
Suppose $\{ X_i = E_i\}_{i\in I}$ is a system of equations. We say that:
\begin{itemize}
\item
$\til F$ is a \emph{solution of the
system of equations for $\bsim$}
if for each $i$ it holds
that $F_i \bsim E_i [\til F]$.
\item The system has
\emph{a unique solution for $\bsim$} if whenever
$\til F$ and $\til G$ are both solutions for $\bsim$, we have
$\til F \bsim \til G$. \end{itemize}
\end{definition}
\begin{definition}[Syntactic solutions]
The syntactic solutions of the system of equations $\til X =\EeqBody{}{}$ are the
recursively defined constants $\KEi E \Defi E_i[\KE]$, for each
$i\in I$, where $I$ is the indexing set of the system.
\end{definition}
The syntactic solutions
of a system of equations
are indeed solutions of
it.
A process $P$ \emph{diverges} if it can perform an infinite sequence
of internal moves, possibly after some visible ones
(i.e., actions different from $\tau$); formally, there are
processes $P_i$, $i\geq 0$, and some $n$, such that
$P=P_0\arr{\mu_0} P_1 \arr{\mu_1} P_2 \arr{\mu_2}\dots$ and for all
$i>n$, $\mu_i=\tau$. We call a \emph{divergence of $P$} the sequence
of transitions $\big(P_i\arr{\mu_i}P_{i+1}\big)_{i}$.
In the case of an abstraction,
\iffull
one first has to
instantiate the parameters with fresh names; thus \fi
$F$ has a divergence
if the process $\app F\tila$ has a divergence, where $\tila$ are fresh
names. A tuple of agents $\til A$ \emph{is divergence-free} if none of the
components $A_i$ has a divergence.
The following result is the technique we rely on to establish
completeness of the encoding. As announced above, it holds in both
\Intp\ and \alpi.
\begin{theorem}\label{thm:usol}
In \Intp\ and \alpi,
a guarded system of equations
with divergence-free syntactic
solutions has unique solution for \bsim.
\iffull
{\alert ~and for $\treq$}.
\fi
\end{theorem}
Techniques for ensuring termination, hence divergence freedom, for the
$\pi$-calculus have been
studied in, e.g., \cite{termination1,termination2,termination3}.
\subsection{Further Developments}
We present some further developments to the theory of unique solution of equations,
that are needed for the results in this paper.
The first result allows us to derive the unique-solution property for a system of
equations from the analogous property of an extended system.
\begin{definition}
\label{d:extend}
A system of equations $\Eeq'$ \emph{extends} system $\Eeq$
if there exists a fixed set of indices $J$ such that any solution of
$\Eeq$ can be obtained from a solution of $\Eeq'$ by removing the
components corresponding to indices in $J$.
\end{definition}
\begin{theorem}
\label{t:transf:equations}
Consider two systems of equations
$\Eeq'$
and $\Eeq$
where
$\Eeq'$
extends $\Eeq$.
If $\Eeq'$ has a unique solution, then the property also holds for
$\Eeq$.
\end{theorem}
We shall use Theorem~\ref{t:transf:equations} in
Section~\ref{s:complete}, in a situation where we transform a certain
system into another one, whose uniqueness of solutions
is easier to establish.
\iffull
Then, by
Theorem~\ref{t:transf:equations}, the property
holds for the initial system.
\Mybar
\DS{I would remove the paragraph below, very technical}
\daniel{The point here would be to say ``while
Theorem~\ref{t:transf:equations} is not deep technically, it allows us
to avoid more complex things like the notion of innocuous
divergence''.}
More precisely, \eqcbv, the system we study in
Section~\ref{s:complete}, has equations of the form $X=\enca\Omega$
associated to any diverging \lterm. Such equations give rise to
\emph{innocuous divergences}, using the terminology of~\cite{usol}. A
refined version of Theorem~\ref{thm:usol} is stated
in~\cite{usol}, in order to handle such divergences. This refined
version is arguably more intricate; using
Theorem~\ref{t:transf:equations} allows us to work in a simpler
framework.
\Mybar
\fi
\begin{remark}
We cannot derive Theorem~\ref{t:transf:equations} by comparing the syntactic solutions of
the two systems $\Eeq'$ and $\Eeq$.
For instance, the equations
$X=\tau.X$ and $X=\tau.\tau.\tau\dots$ have (strongly) bisimilar syntactic
solutions, yet only the latter equation has the unique-solution property.
(Further, Theorem~\ref{t:transf:equations} allows us to compare systems
of different size.)
\iffull
We can moreover notice that when computing the modified version of
\eqcbv, we need to add some equations, and then use
Theorem~\ref{t:transf:equations}.
The following version of Theorem~\ref{t:transf:equations} would hence
not be useful in that situation: \textsl{consider two equations $E$
and $E'$ such that for all $P$, $E[P]$ is equivalent to $E'[P]$,
then $E$ and $E'$ have the same sets of solutions.}
\fi
\end{remark}
\iffull
\fi
The second development is a generalisation of Theorem~\ref{thm:usol}
to preorders; we postpone its presentation to Section~\ref{s:contextual}.
\iffull
\adrien{I think previous sentence is a bit misleading, as we already had an
extension of Theorem~\ref{thm:usol} to preorders in the CONCUR paper;
it would rather be a reformulation of such a theorem. Do you agree?\\
DH: true, but I think this remark is ``too fine''; we present a \underline{new}
generalisation, and we don't need here to explain the whole story.}
\fi
\section{Milner's encodings}
\label{s:enc:cbv}
\subsection{Background}
Milner noticed \cite{milner:inria-00075405,encodingsmilner} that his
call-by-value
encoding can be easily tuned so to mimic
forms of
evaluation in which, in an application $MN$, the function $M$ is run
first, or the argument $N$ is run first, or function and argument are
run in parallel (the proofs are actually carried out for this last
option). We chose here the first one, because it is more in line with
ordinary call-by-value.
A discussion on the `parallel' call-by-value is deferred to
Section~\ref{s:concl}.
The core of any encoding of the $\lambda$-calculus into a process calculus is the
translation of function application. This
becomes a particular form of
parallel combination of two processes, the function and its argument;
$\betav$-reduction is then modeled as process interaction.
The encoding of a $\lambda$-term is parametric over a name; this may
be thought of as the \emph{location} of that term, or as its
\emph{continuation}.
A term that becomes a value signals so at its continuation name
and, in doing so,
it grants
access
to the body of the
value. Such body is replicated, so that the value may be
copied several times. When the value is a function, its body can
receive two names: (the access to) its value-argument, and the
following continuation.
In the translation of application, first the function is run, then
the argument; finally the function is informed of its argument and
continuation.
In the original paper~\cite{milner:inria-00075405},
Milner presented
two candidates for the encoding of call-by-value
$\lambda$-calculus~\cite{DBLP:journals/tcs/Plotkin75}. They follow the same idea of
translation, but with a technical difference in the rule for
variables. One encoding, $\qencm$, is so defined:
\iffull
(adapting the encoding of application as described above):
\fi
\[
\begin{array}{rcl}
\encm{\abs xM} & \deff &
\bind p\outb p y.!\inp y{x,q}.\encma Mq \\[\mypt]
\encm{MN}& \deff & \\[\myptSmall]
\multicolumn{3}{r}{
\bind p(\new q)(\encma Mq
|\inp qy.\new r(\encma Nr| \inp rw.\out
y{w,p}))
}\\[\mypt]
\encm{x}& \deff & \bind p \out p x
\end{array}
\]
In the other encoding, $\qencmp$,
application and $\lambda$-abstraction are treated as in
$\qencm$; the rule for variables is:
$$
\encmp{x} \deff
\bind p \outb p y.!\inp y {z,q}.\out x{z,q}
\enspace.
$$
The encoding $\qencm$ is more efficient than $\qencmp$,
\iffull
In $\qencmp$, a
$\lambda$-calculus variable gives rise to a one-place buffer. As the
computation proceeds, these buffers are chained together, gradually
increasing the number of steps necessary to simulate a
$\beta$-reduction. This phenomenon does not occur in $\qencm$, where
a variable disappears after it is used.
\else
as it uses fewer communications.
\fi
\subsection{Some problems with the encoding}
\label{ss:ot}
The immediate free output in the encoding of variables in $\qencm$
breaks the validity of $\betav$-reduction; i.e., there exist a term $M$
and a value $V$ such that $\encm{(\lambda x. M)V } \not\bsim
\encm{ M \sub V x }$~\cite{sangiorgiphd}.
The encoding $\qencmp$ fixes
this by communicating, instead
of a
free name, a
fresh pointer to that name.
Technically, the initial free output of $x$ is replaced by a
bound output coupled with a link to $x$ (the process
$!\inp y {z,q}.\out x{z,q}$, receiving at $y$ and re-emitting at $x$).
Thus $\betav$-reduction is validated~\cite{sangiorgiphd}.
\iffull
, i.e.,
$\encm{(\lambda x. M)V } \bsim
\encm{ M \sub V x }$ for any $M$ and $V$~\cite{..}.
\fi
(The final version of Milner's paper~\cite{encodingsmilner},
\iffull
which appeared in the {\em Journal of Mathematical Structures in
Computer Science},
\fi
was
written after the results in~\cite{sangiorgiphd} were known and presents
only the encoding $\qencmp$.)
Nevertheless, $\qencmp$ only delays the free output, as the added link
contains itself a free output.
As a consequence, we can show that other desirable equalities of
call-by-value are broken. An example is law~\reff{eq:nonlaw} from the
Introduction, as stated by Proposition~\ref{p:nonlaw} below.
This law is desirable (and indeed valid for contextual equivalence,
or the Eager-Tree equality)
intuitively because, in any substitution closure
of the law, either both terms diverge, or they
converge to the same value. The same argument holds
for their $\lambda$-closures, $\abs x x\val$ and $\abs x I(x\val)$.
\iffull
(depending on whether the
computation resulting from the instantiation of $xv$ diverges or not).
\fi
We recall that $\wbc\pi$ is barbed congruence in the
$\pi$-calculus.
\begin{proposition}\label{p:nonlaw}
For any value $\val$, we have:
$$\encmp {I(x\val)}
\nwbc\pi
\encmp {x\val}
\mbox{ and }
\encm {I(x\val)}
\nwbc\pi
\encm {x\val}
\enspace.
$$
\end{proposition}
(The law is
violated also under coarser equivalences, such as
contextual equivalence.)
Technically, the reason why the law fails in $\pi$ can be illustrated
when $\val=y$, for encoding $\qencm$. We have:
\begin{alignat*}{3}
\encma {xy} p &\wbc\pi \outb x {v} .\new {w}&&(\out v {w, p}|!\inp w u.\out y
u)
\mbox{ \hspace{1ex}}
\\[\mypt]
\encma {I(xy)} p&\wbc\pi \outb x v.\resb {w,q}&&(\out v {w, q} | !\inp{w} u.\out y
u
\\
& && |
{ \inp q z.\outb p {z'}.!\inp {z'} {w'}.\out z {w'}})
\end{alignat*}
In presence
of the normal form $x y$, the identity $I$ becomes observable. Indeed, in the second
term, a fresh name, $q$, is sent instead of continuation $p$, and a
link between $q$ and $p$ is installed. This corresponds to a
law
which is valid in
\alpi, but not in $\pi$.
\iffull
\DS{do we have more examples? \\
DH: Discussion with Adrien: $(\lambda z.M)~ (x v) \not\approx M[(x
v)/z]$, \\
{\alert il faut imposer que M utilise z (M stricte en z)}\\
\adrien{yes, and this is because
the equation is not supposed to hold if $M$ is not strict in $z$}
and maybe also $(\lambda z.M)~(x v_1\cdots v_k) ~\not\approx
M[(x v_1\cdots v_k)/z]$.
\\
+ also the same equations with evaluation contexts around
}
\fi
This problem can be avoided by iterating the transformation that takes us
from $\qencm $ to $\qencmp$ (i.e., the replacement of a free output
with a bound output so to avoid all emissions of free names). Thus the
target language becomes Internal $\pi$; the resulting encoding is
analysed in Section~\ref{s:enc:pii}.
Another solution is to control the use of name capabilities in
processes. In this case the target language becomes \alpi,
and we need not modify
the initial encoding $\qencm$. This
situation is analysed in Section~\ref{s:localpi}.
\iffull
\Mybar
The encoding uses three kinds of names: \emph{triggers} $x,y,
\dots$, \emph{continuations} $p,q,r,\dots$,
\emph{value body} names $v,w,\dots$.
For simplicity, we assume that the set of trigger names is the same as
the set of $\lambda$-variables.
\textbf{Remark:}
we should say here that this is a very mild form of typing. We could
avoid the distinction between two kinds of names, at the cost of
introducing additional replications in the encoding (Adrien: maybe you
could add 2 words to say where the replications would go).
\fi
Moreover, in both solutions,
the use of link processes
validates the following law~---
a form of $\eta$-expansion~---
(the law fails for Milner's encoding into the $\pi$-calculus):
\[
\abs y x y = x
\]
In the call-by-value $\lambda$-calculus this is a useful law
(that holds because substitutions replace variables with values).
\section{Encoding in the Internal \pc}
\label{s:enc:pii}
\subsection{Encoding and soundness}
\label{ss:enc_pii}
\begin{figure}
\caption{The encoding into \Intp}
\label{f:enc_internal}
\end{figure}
Figure~\ref{f:enc_internal} presents the encoding into \Intp, derived
from Milner's encoding by removing the
free outputs as explained in Section~\ref{s:enc:cbv}.
Process $\fwd ab$ represents a \emph{link} (sometimes called forwarder;
for readability we have adopted the infix
notation $\fwd ab$ for the constant $\fwd{}{}$). It
transforms all outputs at $a$ into outputs at $b$ (therefore $a,b$ are
names of the same sort). Thus the body of $\fwd ab$ is replicated,
unless $a$ and $b$ are \emph{continuation names} (names such as
$p,q,r$ over which the encoding of a term is abstracted).
The
definition of the constant $\fwd{}{}$ therefore is:
\iffull
{\alert \textbf{is it ok now? DH: what about two mutual recursive
definitions? could be more clear}}
\fi
$$
\begin{array}{rcl}
\fwd{}{} &\Defi &
\left\{ \begin{array}{l}
\bind{p,q} \inp p {x}.\outb {q} {y}
. {\fwd{ y}{ x}}\\[\myptSmall]
\multicolumn{1}{r}{
~\quad \mbox{if $p,q$ are continuation names}
} \\[\mypt]
\bind{x,y} ! \inp x {p,z}.\outb y {q,w}
.({\fwd{ q}{ p}}|\fwd w z) \\[\myptSmall]
\multicolumn{1}{r}{
~\quad \mbox{otherwise}
}\end{array} \right.
\end{array}
$$
(The distinction between continuation names and the other sorts of
names is not necessary, but simplifies the proofs.)
\iffull
We now discuss the soundness (in this section) and the completeness (in the next section)
for the encoding.
\fi
The encoding validates $\betav$-reduction.
\begin{lemma}[Validity of $\betav$-reduction]\label{l:beta}For any
$M,N$ in $ \Lao$, $M\longrightarrow N$ implies
$\enca M\bsim\enca N$.
\end{lemma}
\iffull
\begin{proof}
One shows
$\enca{(\abs x M)~\val} \bsim \enca {M\subst x\val}$
exploiting algebraic properties of replication; then the result follows by the
compositionality of the encoding and the congruence of $\bsim$.
\end{proof}
\fi
The structure of the
proof of soundness of the encoding is similar to that for the
analogous property for
Milner's call-by-name encoding with respect to Levy-Longo Trees \cite{cbn}.
The details are however different, as in call-by-value both the encoding
and the trees (the Eager Trees extended to handle $\eta$-expansion) are
more complex.
We first need to establish an operational
correspondence for the encoding.
For this we make use of
an optimised encoding,
obtained from
the one
in Figure~\ref{f:enc_internal} by performing a few (deterministic) reductions,
at the price of
a more complex definition. Precisely, in the encoding of
application,
we remove some of the initial
communications, including those with which a term signals that it has
become a value. Correctness of the optimisations is established
by algebraic reasoning.
Using the operational correspondence, we then show that the observables
for bisimilarity in the encoding $\pi$-terms imply the observables for
$\eta$-eager normal-form bisimilarity in the encoded $\lambda$-terms.
The delicate cases are those
in which a branch in the tree of the terms is produced~---
case \reff{ie:split} of Definition~\ref{enfbsim}~--- and where
an $\eta$-expansion occurs~--- thus
a variable is equivalent to an abstraction,
cases~\reff{lab:five} and~\reff{def:enfe:case:eta} of Definition~\ref{enfebsim}.
For the branching, we exploit a decomposition property on $\pi$-terms, roughly allowing
us to derive from the bisimilarity of two parallel compositions the componentwise
bisimilarity of the single components.
For the $\eta$-expansion,
if $\enca x \bsim\enca {\abs zM}$,
where $M\converges \evctxt[x\val]$,
we use a coinductive argument to
derive $\val\enfe z$ and $\evctxt [y]\enfe y$, for $y$ fresh;
from this we then obtain
$\abs zM \enfe x$.
\ifapp More details for the proof of soundness are given in
Appendix~\ref{app:fa}.\fi
\iffull
We only sketch the soundness proof, so to leave more space for the completeness proof.
\fi
\iffull
To prove soundness we need to establish an operational
correspondence for the encoding. For this it is easier to relate
$\lambda$-terms and \Intp-terms via an optimised encoding, presented in Figure~\ref{f:opt_encod}.
\DS{the Figure of the optimised encoding looks really horrible...}
This encoding is
obtained from
the one
in Figure~\ref{f:enc_internal} by performing a few (deterministic) reductions,
at the price of
a more complex definition. Precisely, in the encoding of application
we remove some of the initial
communications, including those with which a term signals to have become a value.
Thus the encoding of an application goes by a case analysis (4 cases)
on the occurrences of values in the subterms.
\fi
\iffull
formulate an operational
correspondence between the encoding and \cbv terms, we need to remove
some of the internal transitions of the encoding, as they prevent the
use of the expansion $\exn$, which is paramount: the idea is that if
$M\reds N$, we want $\enca N$ to be faster than $\enca M$ (i.e., to
have less internal steps before a visible transition). In other words,
that a term in normal form would have an encoding ready to perform a
visible transition.
The general idea of the optimized encoding can be illustrated on two
particular cases. For $\encba{\val M}$, the corresponding equation is
the result of unfolding the original encoding, and performing one
(deterministic) communication.
In the case of $\encba{x\val}$, not only do we unfold the original
encoding and reduce along deterministic communications, but we also
{\alert BLA}.
\daniel{why don't we want to optimize further the case of a beta-v
redex, by unfolding the encoding of the abstraction and calculating
the communication on $y$?}
\adrien{We could, and the optimized encoding we would get would be easier to
write. However the operational correspondence would also be more annoying:
for instance, showing that $\beta$ reductions in $ M$ induce $\tau$s in
$\enca M$ is a bit heavier, i think. This is interesting because i did not
realise it was possible, but i don't think we can do much with this. }
\fi
\iffull
Using the operational correspondence, we can now show that the observables
for bisimilarity in the encoding $\pi$-terms imply the observables for
$\eta$-eager normal-form bisimilarity in the encoded $\lambda$-terms.
The delicate cases are those
in which a branch in the trees of the terms is produced
(case \reff{ie:split} of Definition~\ref{enfbsim}) or where
an $\eta$-expansion occurs (the two cases of Definition~\ref{enfebsim}).
For the branching, we exploit a decomposition property on $\pi$-terms, roughly allowing
us to derive from the bisimilarity of two parallel compositions the componentwise
bisimilarity of the single components.
\fi
\begin{lemma}[Soundness]\label{l:sound}
For any $M,N \in \Lao$, if $\enca M\bsim\enca N$ then $M\enfe N$.
\end{lemma}
\subsection{Completeness and Full Abstraction}
\label{s:complete}
\iffull
We now show that if $M\R N$, for some \enfbsim $\R$, we have $\enca
M\bsim\enca N$.
\fi
To ease the reader into the proof, we first show the completeness for $\enf$, rather than $\enfe$.
\paragraph{The system of equations.}
Suppose $\R$ is an eager normal-form bisimulation.
We
define a (possibly infinite) system of equations $\eqcbv$, solutions of which will be
obtained from the encodings of the pairs in $\R$.
We then use Theorem~\ref{thm:usol} and Theorem~\ref{t:transf:equations}
to show that $\eqcbv$ has a unique solution.
We assume an ordering on names and variables, so to be able
to view (finite) sets of these as tuples.
Moreover, if $F$ is an abstraction, say $\bind \tila P$, then $\bind \tily
F$ is an abbreviation for its uncurrying $\bind{\tily,\tila}P$.
There is one equation $ X_{M,N} = E_{M,N}$ for each pair $(M, N)\in\R$.
The body $E_{M,N}$ is essentially the encoding
of the eager normal form
\iffull
(or absence
thereof)
\fi
of $M$ and $N$, with the
variables of the equations representing the coinductive hypothesis.
To formalise this,
we extend the encoding of the $\lambda$-calculus to equation variables
by setting
\[
\enca {X_{M,N}}{} \deff \bind {p} \app{X_{M,N}}{\tily,p}
\hskip .5cm \mbox{ ~~ where $\tily = \fv{M,N}$}
\enspace.
\]
\iffull
Given $M$,
$N$, and if $\til y=\fv{M,N}$, then
$\bind {\til y,p} \enc M p$ and
$\bind {\til y,p} \enc N p$ are closed abstractions.
\fi
We now describe the equation $ X_{M,N} = E_{M,N}$, for
$(M,N)\in\R$.
The equation is
parametrised on the free variables
of $M$ and $N $ (to ensure that the body $E_{M,N}$
is a name-closed
abstraction) and an additional continuation
name (as all encodings of terms). Below $\tily =\fv{M,N}$.
\begin{enumerate}
\item If $M\converges x$ and $N\converges x$, then
the equation
is
the encoding of $x$:
\begin{align*}
X_{M,N}&=\bind {\til y} \enca x \\[\myptSmall]
&= \bind {\til y,p} \outb p z. \fwd z x
\end{align*}
\iffull
Since $x$ is the \enform of $M$ and $N$, $x\in\til y$.
Note that $\til y$ can contain more names, occurring free in $M$ or $N$.
\fi
\item
If
$M\diverges$ and
$N\diverges$, then the equation uses a purely-divergent term;
\iffull
(actually any term behaviourally indistinguishable with $\nil$ would
do);
\fi
we choose the encoding of $\Omega$:
\iffull for this: \fi
\begin{align*}
X_{M,N} = \bind {\til y} \enca \Omega
\end{align*}
\iffull
Note that the encoding of any diverging term is
bisimilar to $\zero$,
we could
replace the body of this equation
with $\bind {\til y,p}
\zero$.
\fi
\item If $M\converges \abs x M'$ and $N\converges \abs x N'$, then
the equation encodes an abstraction whose body refers
to the normal forms of $M',N'$, via the variable $X_{M',N'}$:
\[
\begin{array}{rcl}
X_{M,N}&=&\bind {\til y}
\enca{\abs x X_{M',N'}
}
\\[\myptSmall]
&= & \bind {\til y,p}
\outb p z .!\inp z
{x,q}.X_{M',N'}\param {\tilprime y,q}
\end{array}
\]
\item\label{item:decomp:eqcbv} If $M\converges \evctxt [x\val]$ and
$N\converges \evctxt' [x\valp]$,
we separate the evaluation contexts and the values, as in
Definition~\ref{enfbsim}.
In the body of the equation, this is achieved by: $(i)$ rewriting
$\evctxt[x\val]$ into $(\abs z\evctxt [z])(x\val)$, for some fresh $z$,
and similarly for $\evctxt'$ and $\valp$ (such a transformation is
valid for $\enf$); and $(ii)$ referring to the variable for the
evaluation contexts, $X_{\evctxt[z],\evctxt'[z]}$,
and to the variable for the values, $X_{\val,\valp}$.
This yields the equation (for $z$ fresh):
\begin{align*}
X_{M,N} = \bind {\til y}
\enca {(\abs z X_{\evctxt[z],\evctxt'[z]}
)
~(x~X_{\val,\valp}
)}
\end{align*}
\end{enumerate}
As an example,
suppose
$(I,\abs
xM)\in\R$,
where $I=\abs x x$ and $M= (\abs {zy}z) x x'$.
The
free variables of $M$ are $x$ and $x'$.
We obtain
the following equations:
\iffull
(assuming $x$ is before $x'$ in the
ordering of variables):
\fi
\begin{enumerate}
\item $\begin{aligned}[t]
X_{I,\abs x M}
&=\bind {x'}\enca {\abs x X_{x,M}
}
\\%[\myptSmall]
&=\bind {x',p} \outb p y .!\inp y {x,q}.X_{x,M}\param {x,x',q}
\end{aligned}$
\item $\begin{aligned}[t]
X_{x,M
}
&=\bind {x,x'}\enca x \\
&=\bind{x,x',p} \outb p y.\fwd y x
\end{aligned}$
\end{enumerate}
\paragraph{Solutions of \eqcbv.}
Having set the system of equations for $\R$, we now define
solutions for it from the encoding of the pairs in $\R$.
We can view the relation $\R $ as an ordered
sequence of pairs (e.g., assuming some lexicographical ordering).
Then $\R_i$ indicates the tuple obtained by
projecting the pairs in $\R$ onto the $i$-th component ($i=1,2$).
Moreover $(M_j,N_j)$ is the $j$-th pair in $\R$, and $\til{y_j}$ is
$\fvars {M_j,N_j}$.
We write $\encOO\R$ for the closed abstractions
resulting from the
encoding of $\R_1$, i.e., the tuple
whose $j$-th component is
$
\bind {\til{y_j}}\enca{M_j}$, and similarly for
$\encTO\R$.
\iffull
We can extract from $\R$ two solutions of \eqcbv as follows:
\begin{definition}
Given an eager normal-form bisimulation $\R$,
we define \iffull $\encleft \R$ as follows: \fi
$$
\begin{array}{rcl}
\encleft \R&\scdef&\{\bind {\til y}\enca M\st \exists N, M\R N\\
&& \text{
and } \til y \text{ is the ordering of }\fvars {M,N}\}
\end{array}
$$
$\enca {\R_2}$ is defined similarly, based on the right-hand side of
the relation.
\end{definition}
\fi
\begin{lemma}\label{l:sol}
\iffull
If $\R$ is an eager normal-form bisimulation,
then
\fi
$\encleft{\R}$
and $\encright{\R}$
are solutions of \iffull the system of equations\fi
\eqcbv.
\end{lemma}
\begin{proof}
We show that each component of $\encOO\R$ is solution of the
corresponding equation, i.e.,
for the $j$-th component
we show
$\bind { \til{y_j}} \enca{M_j}\bsim \Eqsing{M_j,N_j} {\encOO {\R}}$.
We reason by cases over the shape of the
eager normal form of $M_j,N_j$.
\iffull
By the validity of $\betav$, if $M\converges \abs x M'$, then
$\enca M \bsim \enca {\abs x M'}$).
if
$M\converges \abs x M'$, we simply have to show that
$\enca M \bsim \enca {\abs x M'}$.
\fi
The most interesting case is when $M_j\converges \evctxt [x\val]$, in which
case we use the following equality (for $z$ fresh), which is proved
using
algebraic reasoning:
\begin{equation} \label{eq:l:solMAIN}
\enca {(\abs z\evctxt [z])(x\val)} \bsim \enca {\evctxt[x\val]}
\enspace.
\end{equation}
We also exploit the validity of $\betav$ for $\bsim$ (Lemma~\ref{l:beta}).
\iffull
If $M\diverges$,
we need to show that any diverging term has an encoding
equivalent to $\zero$ (hence $\enca M \bsim \enca \Omega$).
This is a consequence of the operational correspondence from
Section \ref{ss:enc_pii}.
\fi
\ifapp More details are found in Appendix \ref{app:complete}. \fi
\end{proof}
\paragraph{Unique solution for \eqcbv.}
We use Theorem~\ref{t:transf:equations} to prove uniqueness
of solutions for \eqcbv.
The only delicate requirement is the one on divergence for the syntactic
solution.
We introduce for this
an auxiliary system of equations, \eqcbvp, that extends \eqcbv, and
whose syntactic solutions have no $\tau$-transition and hence trivially
satisfy the requirement.
Like the original system
\eqcbv, so the new one
\eqcbvp\ is defined by inspection of the pairs in $\R$;
in
\eqcbvp, however, a pair of $\R$ may sometimes yield more than one
equation.
Thus, let $(M,N)\in \R$ with $\til y=\fvars {M,N}$.
\begin{enumerate}
\item When $M\Uparrow$ and $N\Uparrow$, the equation is
\begin{align*}
X_{M,N} = \bind{\til y,p} \zero
\enspace.
\end{align*}
\item When $M\converges \val$ and $N\converges \valp$, we introduce a
new equation variable $\XV_{\val,\valp}$ and a new equation;
this will allow us, in the following step (3), to
perform some optimisations. The equation is
\begin{align*}
X_{M,N} = \bind {\til y,p} \outb p z . \XV_{\val,\valp}\param{z,\tilprime y}
\enspace,
\end{align*}
and we have, accordingly, the two following additional equations
corresponding to the cases where values are functions or variables:
\[
\begin{array}{rcl}
\XV_{\abs x M',\abs x N'}&=& \bind {z,\til y}!\inp z {x,q}
. X_{M',N'}\param{{\tilprime y},q}
\\[\mypt]
\XV_{x,x}&=&\bind {z,x} \fwd z x
\end{array}
\]
\item When $M\converges \evctxt[x\val]$ and $N\converges\evctxt[x\valp]$, we
refer
to $\XV_{\val,\valp}$, instead of $X_{\val,\valp}$, so to remove all
initial reductions in the corresponding equation for
\eqcbv. The first action thus becomes
an output:
\[
\begin{array}{rcl}
X_{M,N} &=& \\[\myptSmall]
\multicolumn{3}{r}{
\bind {\til y,p} \outb x
{z,q}.(\XV_{\val,\valp}\param{z,{\tilprime y}}
|\inp q
w.X_{\evctxt[w],\evctxt'[w]}\param{{\tilpprime y},p})
}
\end{array}
\]
\end{enumerate}
Lemmas~\ref{l:div_aux} and~\ref{l:div}
are needed to apply Theorem~\ref{t:transf:equations}.
(In the statement of Lemma~\ref{l:div_aux}, `extend' is as by Definition~\ref{d:extend}.)
\begin{lemma}\label{l:div_aux}The system of equations \eqcbvp extends
the system of equations \eqcbv.
\end{lemma}
\begin{proof}
The new system
\eqcbvp\ is obtained from \eqcbv\
by modifying the equations and
adding new ones.
Ones shows that the solutions to the common equations are the same,
using algebraic reasoning.
\end{proof}
\iffull
\begin{pfsketch}
The new system
\eqcbvp\ is obtained from \eqcbv\
by modifying the equations and
adding new ones.
We show that the solutions to the common equations are the same,
using algebraic reasoning.
{\alert [More details are
found in Appendix \ref{app:complete}.]}
\adrien{can we remove previous sentence? The only relevant things we could add in
appendix are long calculations that are not very interesting (though i do have
them somewhere), so maybe we should not develop this part too much in appendix}
\end{pfsketch}
\fi
\begin{lemma}\label{l:div}
\iffull The system of equations \fi
\eqcbvp has a unique solution.
\end{lemma}
\begin{proof}
Divergence-freedom for the
syntactic solutions
of \eqcbvp\
holds because in
the equations each name (bound or free) can appear either only in inputs
or only in outputs. As a consequence, since the labelled transition system is ground (names
are only replaced by fresh ones), no $\tau$-transition can ever be
performed, after any number of visible actions.
Further, \eqcbvp is guarded. Hence we can apply Theorem~\ref{thm:usol}.
\iffull
\DS{ for the full paper it would be good here to have a general result
for pi: if in a process each name, free or bound,
may appear either only in inputs
or only in outputs, then no $\tau$-transition is ever possible.
However the statement needs care because we also have constants.
}
We observe that in the syntactic solutions
of \eqcbvp, linear names ($p,q,\dots$) are used exactly once in
subject position, and non-linear names ($x,y,w,\dots$), when used in
subject position, are either used exclusively in input or
exclusively in output.
Since we work using ground transitions, this is enough to deduce
absence of divergences. It is easy to check that
\eqcbv is strongly guarded,
hence we can apply Theorem~\ref{thm:usol}.
\fi
\end{proof}
\iffull
Hence, by Theorem~\ref{t:transf:equations}, \eqcbv\ has a unique solution.
A more direct proof of Lemma~\ref{l:div} would have been possible, by
reasoning coinductively over the \enfbsim defining
the system of equations.
\daniel{Above, the argument is incomplete | for now I commented out the
explanations, which were not clear.}
\adrien{Actually, a direct proof would be really bothersome (probably
possible, but it's not like i checked), so i propose to remove above
sentence altogether. Otherwise it will be handwaving (but i can certainly
write some better handwaving).}
\fi
\begin{lemma}[Completeness for $\enf$]\label{l:complete_enf}
$M\enf N$ implies $\enca M \bsim \enca N$,
for any $M,N \in\Lao$.
\end{lemma}
\begin{proof}
Consider
an eager normal-form bisimulation $\R$, and
the corresponding systems of equations \eqcbv\ and \eqcbvp.
Lemmas~\ref{l:div} and~\ref{l:div_aux} allow us to apply
Theorem~\ref{t:transf:equations} and deduce that \eqcbv\ has a unique
solution.
By Lemma \ref{l:sol},
$\encOO \R$ and $\encTO\R$ are solutions of \eqcbv.
Thus, from $M\RR N$,
we deduce
$\bind {\til y} \enca M \bsim \bind{\til y} \enca N$,
where $\til y=\fvars {M,N}$.
Hence also
$ \enca M \bsim \enca N$.
\end{proof}
\paragraph{Completeness for $\enfe$.}
The proof for $\enf$
is extended to $\enfe$, maintaining its
structure. We highlight the main differences.
We enrich \eqcbv\ with the equations corresponding to the two
additional clauses of $\enfe$ (Definition~\ref{enfebsim}).
When $M\converges x$ and $N\converges \abs z N'$, where $N'\enfe xz$,
we proceed as in case~\ref{item:decomp:eqcbv} of the definition of \eqcbv,
given that
$N\enfe \abs z \left( (\abs w\evctxt [w]) (x\val)\right)$; the
equation is:
\begin{align*}
X_{M,N} =\bind {\til y}
\enca{\abs z \left((\abs w X_{w,\evctxt[w]}) ~ (x~X_{z,\val}
)\right)}
\enspace.
\end{align*}
We proceed likewise for the symmetric case.
In the optimised equations that we use to derive unique solutions,
we add the following equation (relating
values), as well as its symmetric counterpart:
\begin{align*}
\begin{array}{rcl}
\XV_{x,\abs z N'}
&=&\bind {y_0,\til y}
\\
\multicolumn{3}{r}{
!\inp {y_0}{z,q}. \outb x {z',q'}. (\XV_{z,\val}\param{z',\tilprime y }
|\inp {q'}{w}.X_{w,\evctxt[w]}\param{\tilpprime y,q})
\enspace.
} \end{array}
\end{align*}
\iffull
We can then prove unique solution for \eqcbv, as is done with
Lemmas \ref{l:div_aux} and \ref{l:div}.
\fi
Finally,
to
prove that $\encOO \R$ and $\encTO \R$ are
solutions of \eqcbv, we
show that, whenever $M\converges x$ and $N\converges \abs z N'$,
with $N'\converges\evctxt[x\val]$:
\begin{align*}
\enca M &\bsim \Eqsing{M,N} {\encleft\R}\param{\til y}\\[\myptSmall]
&= \enca {\abs z\left( (\abs w w)(x z)\right)}
\quad\mbox{}
\end{align*}
\mbox{and}
\begin{align*}
\enca N &\bsim \Eqsing{M,N} {\encright\R}\param{\til y}\\[\myptSmall]
&= \enca {\abs z\left( (\abs w \evctxt [w])(x \val)\right)}
\enspace.
\end{align*}
To establish the former, we
use algebraic reasoning to infer
$\enca x \bsim \enca {\abs z xz}$.
For the latter, we use law~\reff{eq:l:solMAIN} (given in the proof
of Lemma~\ref{l:sol}).
\ifapp More details are provided in Appendix \ref{app:complete}. \fi
\iffull
Given the previous results, we can reason as for the proof of Proposition
\ref{p:complete} to establish completeness.
\fi
\begin{lemma}[Completeness for $\enfe$] \label{l:complete}
For any $M,N$ in $\Lao$,
$M\enfe N$ implies $\enca M \bsim \enca N$.
\end{lemma}
\iffull
{\alert
We have shown that the equivalence induced by the encoding, with weak
bisimilarity as the equivalence for the encoded terms, is fully abstract
w.r.t. $\enfe$: indeed, it is sound (Lemma~\ref{l:sound}) and
complete (Lemma~\ref{l:complete}).
We combine this fact with
the characterisation of barbed congruence as
weak bisimilarity (Theorem~\ref{t:bisbc}),
to get the full abstraction of the encoding w.r.t. $\enfe$:}
\fi
Combining Lemmas~\ref{l:sound} and~\ref{l:complete}, and
Theorem~\ref{t:bisbc} we derive Full Abstraction for
$\enfe$ with respect to barbed congruence.
\begin{theorem}[Full Abstraction for $\enfe$]
For any $M,N$ in $\Lao$, we have $M\enfe N$ iff $\enca M\wbc\IntpSmall \enca N$
\end{theorem}
\begin{remark}[Unique solutions versus up-to techniques]
\label{r:upto}
For Milner's encoding of call-by-name $\lambda$-calculus,
the completeness part of the full abstraction result with respect to
L{\'e}vy-Longo Trees~\cite{cbn} relies on
\emph{up-to techniques for bisimilarity}.
Precisely, given a relation $\R$ on $\lambda$-terms that represents a
tree bisimulation, one shows that the $\pi$-calculus encoding of
$\R$ is a $\pi$-calculus bisimulation \emph{up-to context and
expansion}. Expansion is a preorder that intuitively guarantees that a
term is `more efficient' than another one\ifapp ~(Appendix
\ref{app:complete})\fi .
In the up-to technique, expansion is used
to manipulate the derivatives of two transitions so to bring up a
common context.
Such up-to technique is not powerful enough for the
call-by-value encoding and the Eager
Trees
because
some of the required transformations would violate expansion (i.e., they
would require to replace a term by a `less efficient' one).
An example of this is law~\reff{eq:l:solMAIN} (in the proof
of Lemma~\ref{l:sol}), that would have to be
applied from right to left so to
implement the branching in clause
\reff{ie:split} of Definition~\ref{enfbsim}
(as a context with two holes).
The use of the technique of unique solution of equations allows us to
overcome the problem: law \reff{eq:l:solMAIN} and similar laws that
introduce 'inefficiencies' can be used (and they are indeed used,
in various places),
as long as they
do not produce new divergences.
\iffull
\DS{old things here}
For call-by-name, completeness of the of the $\pi$-calculus encoding
is established using \emph{up-to techniques for bisimulation}, and in
particular up-to contexts and up-to expansion~\cite{cbn}.
One may wonder whether the same approach can be used in the present
setting, instead of relying on the technique of unique solutions.
While we cannot exclude that a completeness proof using up-to
techniques is possible, we believe it would be quite involved,
essentially because we cannot rely on the `up-to expansion'
technique.
The completeness proof requires contexts and stuck redexes to
be separated: we decompose $\evctxt[x\val]$ into $\abs z \evctxt[z]$ and
$x\val$, which would also be required in a proof using
up to techniques.
However, $\enca {\evctxt [x\val]}\not\exn \enca{(\abs z \evctxt[z])(x\val)}$:
internal steps are added rather than removed. This has two causes:
the addition of a $\beta$-redex, and the insertion of an abstraction
above the context $\evctxt$,
which forces some synchronisations to happen later - this being not
compatible with expansion.
This difficulty (together with other technical aspects, like the
handling of free output prefixes, in relation with validity of
$\betav$) might be the reason why the characterisation of the
equivalence induced by the call-by-value encoding has remained open
for quite long.
\fi
\end{remark}
\section{Encoding into \alpi}
\label{s:localpi}
Full abstraction
with respect to $\eta$-Eager-Tree equality also holds
for Milner's simplest encoding, namely $\qencm$
(Section~\ref{s:enc:cbv}),
provided that
the target
language of the encoding is taken to be \alpi.
The adoption of \alpi\ implicitly allows us to control capabilities,
avoiding violations of laws such as~\reff{eq:nonlaw} in
the Introduction.
In \alpi, bound output prefixes such as $\outb a x .\inp x y$ are abbreviations for
$\new x(\out a x| \inp x y )$.
\begin{theorem}
\label{t:faALpi}
$M\enfe N$ iff $ \encm M \wbc\alpiSmall \encm N$, for any $M,N \in \Lao$.
\end{theorem}
The main difference
with respect to the proofs of Lemmas~\ref{l:complete_enf}
and~\ref{l:complete}
is when proving absence of divergences for the (optimised) system of equations.
Indeed, in \alpi\ the characterisation of barbed congruence
($ \wbc\alpiSmall$) as bisimilarity
makes use
of
a different labelled transition system \ifapp(Appendix~\ref{a:alpi})\fi
where visible transitions may create
new processes (the `static links'), that could thus produce new
reductions. Thus one has to show that the added processes
do not introduce
new divergences.
\iffull
For instance, if $P\arr{\out a b} P'$, then in the
new LTS we have $P\alpiar{\outb a x} (\alpilink x b | P')$.
We show that this cannot yield divergences if $P$ did not already have
divergences.
The replicated input guarding a static link
created by the execution of a process ($x$ in the previous example)
is always fresh.
Hence, any synchronisation created by the link has to be preceded
by a visible action. Furthermore, names transmitted through this
synchronisation are name freshly received (in the ground LTS),
therefore cannot create additional synchronisations, nor can they
induce divergences.
\fi
\section{Contextual equivalence and preorders}
\label{s:contextual}
We have presented full abstraction for $\eta$-Eager-Tree equality
taking
a `branching' behavioural equivalence, namely
barbed congruence, on the $\pi$-processes.
We show here the same result
for contextual
equivalence, the most common
`linear' behavioural equivalence.
We also extend the results to preorders.
We only discuss the encoding
$\qenca$ into
\Intp. Similar results however hold for
the encoding $\qencm$ into
\alpi.
\subsection{Contextual relations and traces}
\label{ss:preorders}
Contextual equivalence is defined in the $\pi$-calculus
analogously to its definition in the
$\lambda$-calculus (Definition~\ref{d:ctxeq});
thus, with
respect to barbed congruence, the bisimulation game on reduction is
dropped.
Since we wish to handle preorders, we also introduce
the \emph{contextual preorder}.
\begin{definition}
\label{d:ctx_pi}
Two \Intp\ agents
$A,B$ are in the \emph{contextual preorder}, written
$A \ctxpre B $,
if
$C[A]\Dwa_{ a}$ implies $C[B]\Dwa_{ a}$,
for all contexts $C$. They are
\emph{contextually equivalent},
written
$A \ctxeqPI B $, if both
$A \ctxpre B $ and $B \ctxpre A $ hold.
\end{definition}
To manage contextual preorder and equivalence in proofs, we
exploit
characterisations of them as trace inclusion and equivalence.
For $s = \mu_1, \ldots, \mu_n$,
where each $\mu_i$ is a visible action,
we set $P \Arr{s} $
if $P \Arr{\mu_1} P_1 \Arr{\mu_2}P_2 \ldots P_{n-1} \Arr{\mu_n}P_n$, for
some processes $P_1, \ldots, P_n$.
\begin{definition}
\label{d:trace}
Two \Intp\ processes $P,Q$ are in the \emph{trace inclusion}, written
$P \trincl Q $, if $P \Arr{s} $ implies $Q \Arr{s} $, for each trace
$s$. They are
\emph{trace equivalent},
written
$P \treq Q $, if both
$P \trincl Q $ and
$Q \trincl P $ hold.
\end{definition}
As usual, these relations are extended to abstractions by requiring
instantiation of the parameters with fresh names.
\begin{theorem}
\label{t:cha_tr}
In \Intp, relation $ \ctxpre$ coincides with $ \trincl$, and
relation $ \ctxeqPI$ coincides with $ \treq $.
\end{theorem}
\iffull
The completeness proof w.r.t.\ to trace equivalence
is similar to that of Lemma~\ref{l:complete}, using a
unique solution theorem for trace equivalence (along the lines of
Theorem~\ref{thm:usol} for barbed congruence).
However, contrary to bisimulation, trace equivalence, as
a proof technique, is asymmetrical: to show $P\treq Q$,
one has to show first the traces of $P$ are traces of $Q$,
and then the reverse. Therefore, such a unique-solution theorem
need to be formulated through the means of a behavioural preorder,
namely trace inclusion, written $\trincl$. Trace inclusion $P\trincl Q$
is simply defined by saying that all traces of $P$ are traces of $Q$.
\fi
\subsection{A proof technique for preorders}
\label{ss:usol_pre}
We modify the technique of unique solution of equations to
reason about preorders, precisely the
trace inclusion preorder.
\iffull
{\alert
From a system of equations \system one
may derive two systems of pre-equations,
$\{ X_i \leq
E_i\}_{i\in I}$ and $\{ E_i \leq X_i\}_{i\in I}$ .
We call solutions of the former \emph{prefixpoints of $E$},
and of the latter \emph{postfixpoints}.}
In this case we work with systems of \emph{\ineqs}
(rather than \emph{equations}), and we use the corresponding proof technique
to establish results about behavioural preorders.
We still call \emph{syntactic solution of a system
} the
agents obtained by turning the \ineqs into recursive agent
definitions.
\fi
In the case of equivalence,
the
technique of unique solutions exploits symmetry arguments,
but
symmetry does not hold for preorders.
We overcome the problem by referring to the
syntactic solution of the system in an asymmetric manner.
This yields the two lemmas below, intuitively stating that the
syntactic solution of a system
is its smallest pre-fixed point, as well as, under the divergence-freeness
hypothesis, its greatest
post-fixed point.
We say that $\til F$ is a \emph{pre-fixed
point
for $\trincl$} of
a system of equations $\{\til X= \til E\}$
if $\til E[\til F]\trincl \til F$;
similarly, $\til F$ is a \emph{post-fixed
point for $\trincl$}
if
$\til F\trincl \til E[\til F]$.
\begin{lemma}[Pre-fixed points, $\trincl$]\label{least}
Let $\Eeq$ be a
system of equations,
and $\KEE$ its syntactic solution.
If $\til F$ is
a pre-fixed point for $\trincl$ of $\Eeq$,
then $\KEE \trincl \til F$
\end{lemma}
\iffull
\begin{proof}
Take a finite trace $\til\alpha$ of $\KEi i$. As it is finite, there must be an
$n$ such that it is a trace of $E_i^n$, hence it is also a trace of $E_i^n[\til P]$.
$\til E[\til P]\trincl \til P$, hence, by congruence, $E_i^n[\til P]\trincl P_i$,
and $\til\alpha$ is a trace of $P_i$. This concludes.
\end{proof}
\fi
\begin{lemma}[Post-fixed points, $\trincl$]\label{greatest}
Let $\Eeq$ be a guarded
system of equations,
and $\KEE$ its syntactic solution.
Suppose $\KEE$ has no divergences.
If $\til F$ is
a post-fixed point for $\trincl$ of $\Eeq$,
then
$\til F \trincl \KEE$.
\end{lemma}
Lemma \ref{least} is immediate;
the proof of Lemma \ref{greatest}
is similar to the
proof of Theorem~\ref{thm:usol} (for bisimilarity).
We thus derive the following proof technique.
\begin{theorem}
\label{thm:usol_pre}
Suppose that $\Eeq$ is a guarded
system of equations
with a
divergence-free
syntactic
solution.
\iffull
, and $\P,~\Q$ systems of closed abstractions
{\alert [what are systems of closed abstractions?]}
of the same size.
\fi
If $\til F$ is a pre-fixed point for $\trincl$ of $\Eeq$, and
$\til G$ a post-fixed point,
then
${\til F}\trincl {\til G}$.
\end{theorem}
\iffull
This result is reminiscent of
Theorem 19 in~\cite{usol}, but its statement is more useful for
proofs. In particular, it does not need to refer
to the syntactic solution outside
of the absence of divergences.
\fi
We can also extend
Theorem~\ref{t:transf:equations} to preorders.
We say that a system of equations $\Eeq'$
\emph{extends $\Eeq$ with respect to a given preorder
}
if there exists a fixed set of indices $J$ such that:
\begin{enumerate}
\item
any pre-fixed point
of
$\Eeq$ for the preorder can be obtained from a pre-fixed point
of $\Eeq'$ (for the same preorder) by removing the
components corresponding to indices in $J$;
\item
the same as (1) with post-fixed points in place of pre-fixed points.
\end{enumerate}
\begin{theorem}\label{t:transf:preorders}
Consider two systems of equations $\Eeq'$ and $\Eeq$
where $\Eeq'$
extends $\Eeq$ with respect to $\trincl$.
Furthermore, suppose $\Eeq'$ is guarded and has a divergence-free
syntactic solution.
If $\til F$ is a pre-fixed point for $\trincl$ of $\Eeq$, and
$\til G$ a post-fixed point,
then
${\til F}\trincl {\til G}$.
\end{theorem}
\subsection{Full abstraction results}
\label{ss:fa_preorders}
The preorder on $\lambda$-terms induced by the contextual preorder
is
\emph{$\eta$-eager normal-form similarity}, $\esim$. It is obtained by
imposing that $M \esim N$ for all $N$, whenever $M$ is divergent.
Thus, with respect to the bisimilarity relation $\enfe$, we only have to change
clause (1) of Definition~\ref{enfbsim},
by requiring
only $M$
to be divergent. (The bisimilarity $\enfe$ is then the
intersection of $\esim$ and its converse $\revesim$.)
\begin{theorem}[Full abstraction on preorders]
\label{t:preorders}
For any $M,N \in \Lao$, we have
$M\esim N$ iff $ \enca M \ctxpre \enca N$.
\end{theorem}
The structure of the proofs is similar to
that for bisimilarity,
using however Theorem \ref{thm:usol_pre}.
We discuss the main aspects of the completeness part.
Given an \enfse $\R$, we define a system of equations \eqcbv as in
Section \ref{s:complete}. The only notable difference in the definition
of the equations is in the case
where $M\R N$, $M$ diverges and $N$ has an \enform. In this case, we
use the following equation instead:
\begin{equation}\label{e:tr}
X_{M,N}=\bind {\til y} \enca \Omega
\enspace.
\end{equation}
As in Section \ref{s:complete}, we define a system of
guarded equations \eqcbvp\
whose syntactic solutions do not diverge.
Equation~\reff{e:tr} is replaced with $X_{M,N}=\bind {\til y,p} \zero$.
Exploiting Theorem~\ref{t:transf:preorders},
we can use unique solution for preorders
(Theorem~\ref{thm:usol_pre})
{with \eqcbv instead of \eqcbvp.}
Defining $\encleft \R$ and $\encright \R$ as previously, we need to
prove that $\encleft\R \trincl\Eq \R{\encleft\R}$ and
$\Eq \R {\encright\R}\trincl \encright\R$. The former result is
established along the lines of the analogous result in
Section~\ref{s:complete}: indeed, $\encleft\R$ is a solution of
\eqcbv for $\bsim$, and $\treq$ is coarser than $\bsim$.
For the latter, the only difference is due to equation \reff{e:tr},
when $M\R N$, and $M$ diverges but not $N$.
In that case, we have to prove that $\enca \Omega\trincl \enca N$, which
follows easily because
the only trace of $\enca\Omega$ is the empty one, hence
$\enc \Omega p \trincl P$ for any $P$.
\begin{corollary}[Full abstraction for $\ctxeqPI$]
\label{t:faCTXpiI}
For any $M,N$ in $\Lao$,
$M\enfe N$ iff $\enca M \ctxeqPI \enca N$.
\end{corollary}
\section{Conclusions and future work}
\label{s:concl}
In the paper we have studied the main question raised in Milner's
landmark paper on functions as $\pi$-calculus processes, which is
about the equivalence induced on $\lambda$-terms by their process
encoding. We have focused on call-by-value, where the problem was
still open; as behavioural equivalence on $\pi$-calculus we have
taken contextual equivalence and barbed congruence (the most common
`linear' and 'branching' equivalences).
First we have shown that some expected equalities
for open terms fail under Milner's encoding. We have considered two
ways for overcoming this issue: rectifying the encodings (precisely,
avoiding free outputs); restricting the target language to \alpi,
so to
control the capabilities of exported names.
We have proved that, in both cases, the equivalence induced is
Eager-Tree equality, modulo $\eta$
(i.e., Lassen's \enfbsim).
\iffull
, i.e., $\eta$ Eager-Tree equality.
\fi
We have then introduced a preorder on these trees, and
derived similar full abstraction results for them with respect to
the contextual preorder on $\pi$-terms.
The paper is also a test case for
the technique of
unique solution of equations (and inequations),
which is essential in all our
completeness proofs.
Lassen had introduced Eager Trees as the call-by-value analogous of
L{\'e}vy-Longo and B{\"o}hm Trees.
The results in the paper confirm the claim, on process encodings of
$\lambda$-terms: it was known that for (weak and strong) call-by-name, the equalities
induced are those of L{\'e}vy-Longo Trees and B{\"o}hm Trees~\cite{xian}.
For controlling capabilities, we have used \alpi.
Another possibility would have been to use a type system.
In this case however, the technique of unique solution of equations
needs to be extended to typed calculi. We leave this for future work.
We also leave for future work a thorough comparison between the technique
of unique solution of equations and techniques based on enhancements
of the bisimulation proof method (the ``up-to'' proof techniques),
including if and how our completeness results can be
derived using the latter techniques. (We recall that the ``up-to''
proof techniques are used in the completeness proofs with
respect to L{\'e}vy-Longo Trees and B{\"o}hm Trees for the
\emph{call-by-name} encodings.
We have discussed the problems with
call-by-value in Remark~\ref{r:upto}.) In any case, even if
other solutions existed, for this specific problem the unique solution
technique appears to provide an elegant and natural framework to
carry out the proofs.
For our encodings we have used the polyadic $\pi$-calculus; Milner's
original paper \cite{milner:inria-00075405} used the monadic calculus (the polyadic $\pi$-calculus
makes the encoding easier to read; it had not been introduced at the
time of \cite{milner:inria-00075405}). We believe that polyadicity
does not affect the results in the paper (the possibility of
autoconcurrency breaks full abstraction of the
encoding of the polyadic
$\pi$-calculus into the monadic one, but autoconcurrency does not
appear in the encoding of $\lambda$-terms).
In the call-by-value strategy we have followed, the function is
reduced before the argument in an application. Our results can be
adapted to the case in which the argument runs first, changing the
definition of evaluation contexts. The parallel call-by-value, in
which function and argument can run in parallel (considered in
\cite{encodingsmilner}), appears more delicate, as we cannot rely on the usual
notion of evaluation context.
Interpretations of $\lambda$-calculi into
$\pi$-calculi
appear related to
game
semantics~\cite{BHYseqpi,DBLP:conf/fpca/HylandO95,DBLP:journals/tcs/HondaY99}.
In particular, for untyped call-by-name they both allow us to derive
B{\"o}hm Trees and L{\'e}vy-Longo
Trees~\cite{DBLP:journals/tcs/KerNO03,DBLP:journals/tcs/OngG04}.
To our knowledge,
game semantics exist based on
typed call-by-value,
e.g.,~\cite{DBLP:conf/csl/AbramskyM97,DBLP:journals/tcs/HondaY99}, but
not in the untyped case.
In this respect, it would be interesting to see whether the
relationship between $\pi$-calculus and Eager Trees studied in
this paper could help to establish similar relationships in game
semantics.
\iffull
\DS{maybe somewhere say that there are works in which the pi-calculus
is constrained by type systems so to have coarser equivalences in
such a way to obtain as induced equivalence, contextual equivalence
of $\lambda$-calculus. In this paper we stick to Milner's original
questions. Also, the type systems are non-trivial (for instance,
having to ensure the sequentiality of $\lambda$-terms. }
Fully abstract encodings in the $\pi$-calculus of typed versions of
the $\lambda$-calculus include~\cite{BHYseqpi}, where a type system
for $\pi$ based on ideas of affineness and stateless replication
insures full abstraction, as well as~\cite{toninho:yoshida:esop18},
where polymorphic session types for $\pi$ make it possible to derive
full abstraction for a linear formulation of System F.
\DS{another paper probably to mention:
Lassen shows in~\cite{lassenfa} that, in order for \enfbsim to
coincide with contextual equivalence, references and control operators
need to be added to the language.
}
Open Call-by-Value has been studied in~\cite{accattolicbv}, where the
focus is on operational properties of $\lambda$-terms, but behavioural
equivalences are not taken into consideration.
The book by Ronchi della Rocca and
Paolini~\cite{DBLP:series/txtcs/RoccaP04} presents denotational models
of call-by-value.
Game semantics have been used to provide a characterisation of
B{\"o}hm~\cite{DBLP:journals/tcs/KerNO03} and
L{\'e}vy-Longo~\cite{DBLP:journals/tcs/OngG04} trees. To our knowledge,
no similar work exists for call-by-value.
Game semantics accounts for (typed) call-by-value
include~\cite{DBLP:conf/csl/AbramskyM97,DBLP:journals/tcs/HondaY99}.
\begin{itemize}
\item
in \alpi\ only the output capability of names is communicated:
the recipient of a name may only use it in outputs (subject or object
position). Moreover the calculus is asynchronous: output is not a
prefixing construct. We do not know if asynchrony is essential for the
results presented in this section. The reason why we took it, thus
working in \alpi\, is its theory~\cite{localpi}.
Specifically, we will exploit a labeled characterisation
(as forms of labeled bisimilarity)
of barbed
congruence~\cite{localpi} in which bisimilarity is the ordinary ground
bisimilarity one of the
$\pi$-calculus but the underlying LTS is appropriately modified.
As bisimilarity is the ordinary one, we can straightforwardly adapt
our results of unique solutions of equations (at the heart of our full
abstraction results for Eager Trees) to the setting of \alpi.
\item
Milner's original question in~\cite{milner:inria-00075405} was about the
preorder induced by the encoding of
\lterms.
We therefore move to the study of the
encoding through behavioural \emph{preorders}, as opposed to
behavioural \emph{equivalences}. This allows us to showcase the robustness of
the proof techniques used, as they
allow a seamless treatment of preorders. It also allows us to formulate the
preorder associated with \enfbsim, eager normal form similarity.
\end{itemize}
Going back to Milner's question, what did we learn?
\begin{itemize}
\item le codage ne marche pas completement, it has to be rectified **pour des
calculs non types**, afin d'etre en ligne avec la theorie du cbv.
\item on a repris la question de milner avec l'encodage que l'on a
presente. la reponse est : Lassen's equivalence.
\end{itemize}
\textbf{A propos de la possibilite de faire une preuve up to expansion : }
\begin{itemize}
\item ce que l'on ne peut pas faire c'est la preuve up to expansion
\emph{directement sur les termes qui viennent de la traduction} (les
``objets initiaux'').
il faut peut-etre faire des transformations, et ne pas faire up to
expansion tel quel.
\item on pense donc a la transformation qui introduit des
triggers. l'expansion est dans le mauvais sens. mais on pourrait
esperer s'en sortir avec le trigger, there are some taus that we
could handle in a better way, we could hope to find a way.. but
still not.
\item maintenant qu'on a fait notre preuve, on pourrait penser qu'une
autre approche marche, ou l'on ne traite pas dans la bisimulation
les termes tels quels. this might indeed work, but we don't
investigate this in full details for now.
\end{itemize}
\textbf{$\boldsymbol\eta$-expansion:} we might be able to
recover the trees without eta by using i/o types. i/o types make it
possible to avoid infinite forwarders, which intuitively bring eta in.
\textbf{Contextual equivalence.} (maybe move in intro?)
Can we say something about contextual equivalence, and how far we are
from that?
Lassen shows in~\cite{lassenfa} that, in order for \enfbsim to
coincide with contextual equivalence, references and control operators
need to be added to the language. The first example illustrates the
need for control operators, the second one the need for references.
Is there something to say about a logic \textsl{a la Ambramsky} to
characterise contextual equivalence, in the case of cbv? (discussions
in Torino)
\paragraph{Other strategies for $\lambda$.}
Differences w.r.t. parallel call-by-value: the situation
is less simple with parallel call-by-value, in particular because,
when computing the encoding of $(x I)~(y I)$, we obtain two concurrent
outputs, while in our development we used the fact that we have at
most one visible transition in the encoding of a lambda term.
Moreover, it would be interesting to try and adapt our results to
handle the call-by-need strategy. \textit{Do we have something to say
about strong cbv?}
\paragraph{\alpi.}
In Section~\ref{s:localpi}, we rely on the syntax of \alpi\ to control
how names are used, in order to validate desirable laws such as~\reff{eq:nonlaw}.
Another approach would be to enforce the appropriate capability usages
onto the encoding $\pi$-calculus terms
by means of a type system (as opposed
to a syntactic approach as that of adopting \alpi); this would allow
for instance to retain the synchrony of the full $\pi$-calculus.
However, in this case, the labeled bisimilarity characterising
barbed congruence is more complex. We leave it for future work to
study if and how the theorems about unique solution of equations can
be adapted to this typed setting.
\paragraph{About the treatment of forwarders.}
\Mybar
Shall we give explanations about the forwarders involved in the
encoding of applications (we focused on forwarders in the encoding of
variables)? I commented out some notes here.
\paragraph{Related work.}
Accattoli and Guerrieri's ``Open Call-by-Value''~\cite{accattolicbv}.
\textbf{Game semantics.}
game semantics is close to pi-calculus encodings.
Q: en cbn, est-ce que la game semantics donne les arbres de levy?
(there is at least a paper by Ong and Di Gianantonio)
see in the source for some input from P. Clairambault, about relevant
references (this is in comments in the source).
\fi
\begin{acks}
This work has been supported by the \grantsponsor{}{
European
Research Council (ERC)}{} under the Horizon 2020 programme (CoVeCe,
grant agreement No \grantnum{}{678157});
the \grantsponsor{}{ANR}{} under the programmes
``Investissements d'Avenir'' (\grantnum{}{ANR-11-IDEX-0007}), \grantsponsor{}{LABEX MILYON}{}
(\grantnum{}{ANR-10-LABX-0070}),
and \grantsponsor{}{Elica}{}
(\grantnum{}{ANR-14-CE25-0005}); and the
\grantsponsor{}{Universit{\'e} Franco-Italienne}{} under
the programme Vinci.
\end{acks}
\end{document}
\end{document} |
\mathfrak{b}egin{document}
\mathfrak{b}egin{abstract} In this paper we give a closed formula for the graded dimension of the cyclotomic quiver Hecke algebra $\RR^\Lambda(\mathfrak{b}eta)$ associated to an {\it arbitrary} symmetrizable Cartan matrix $A=(a_{ij})_{i,j}\in I$, where $\Lambda\in P^+$ and $\mathfrak{b}eta\in Q_n^+$. As applications, we obtain some {\it necessary and sufficient conditions} for the KLR idempotent $e(\nu)$ (for any $\nu\in I^\mathfrak{b}eta$) to be nonzero in the cyclotomic quiver Hecke algebra
$\RR^\Lambda(\mathfrak{b}eta)$. We prove several level reduction results which decomposes $\dim\RR^\Lambda(\mathfrak{b}eta)$ into a sum of some products of $\dim\RR^{\Lambda^i}(\mathfrak{b}eta_i)$ with $\Lambda=\mathfrak{s}um_i\Lambda^i$ and $\mathfrak{b}eta=\mathfrak{s}um_{i}\mathfrak{b}eta_i$, where $\Lambda^i\in P^+, \mathfrak{b}eta^i\in Q^+$ for each $i$. We construct some explicit monomial bases for the subspaces $e(\widetilde{\nu})\RR^\Lambda(\mathfrak{b}eta)e(\mu)$ and $e(\widetilde{\nu})\RR^\Lambda(\mathfrak{b}eta)e(\mu)$ of $\RR^\Lambda(\mathfrak{b}eta)$, where $\mu\in I^\mathfrak{b}eta$ is {\it arbitrary} and $\widetilde{\nu}\in I^\mathfrak{b}eta$ is a certain specific $n$-tuple (see Section 5). We also use our graded dimension formulae to provide some examples which show that $\RR^\Lambda(n)$ is in general not graded free over its natural embedded subalgebra $\RR^\Lambda(m)$ with $m<n$.
\end{abstract}
\maketitle
\mathfrak{s}etcounter{tocdepth}{1}
\mathfrak{t}ableofcontents
\mathfrak{s}ection{Introduction}
The idea of ``categorification'' originates from the work \cite{Cr} and \cite{CF} in their study of quantum gravity and four-dimensional topological quantum field theory. Many important knot invariants (e.g., Jones polynomials \cite{Kh00}) can be categorified and categorification has now become an intensively studied subject in several mathematical and physical areas. For each symmetrizable Cartan matrix $A=(a_{ij})_{i,j\in I}$, Khovanov-Lauda \cite{KL1,KL2} and Rouquier \cite{Rou1, Rou2} introduced a remarkable family of $\mathbb{Z}$-graded algebras $\RR=\mathfrak{b}igoplus_{\mathfrak{b}eta\in Q_n^+}\RR(\mathfrak{b}eta)$, called quiver Hecke (or KLR) algebras, and used them to categorify the negative parts $U_q(\mathfrak{g})^{-}$ of the quantum group $U_q(\mathfrak{g})$ associated to $A$. For each dominant integral weight $\Lambda\in P^+$, they also defined their graded quotients, $\RR^\Lambda=\mathfrak{b}igoplus_{\mathfrak{b}eta\in Q_n^+}\RR^\Lambda(\mathfrak{b}eta)$, called cyclotomic quiver Hecke (or cyclotomic KLR) algebras, and conjectured that they can be used to categorify the integrable highest weight module $V(\Lambda)$ over the quantum group $U_q(\mathfrak{g})$. The conjecture was proved by Kang and Kashiwara in \cite{KK}. When the ground field $K$ has characteristic $0$ and $A$ is symmetric, Rouquier \cite{Rou2} and Varagnolo-Vasserot \cite{VV} have proved that the categorification sends the indecomposable projective modules over the quiver Hecke algebra $\RR$ to the canonical bases of $U_q(\mathfrak{g})^{-}$.
In many aspects the structure and representation theory of the quiver Hecke algebra $\RR(\mathfrak{b}eta)$ resemble that of the affine Hecke algebras (\cite{G},\cite{Klesh:book}). For example, the standard (monomial) bases of $\RR(\mathfrak{b}eta)$ and faithful polynomial representations over $\RR(\mathfrak{b}eta)$ are constructed in \cite{KL1} and \cite{Rou2}, where it is also proved that the centers of the quiver Hecke algebras $\RR(\mathfrak{b}eta)$ consist of all symmetric elements in its KLR generators $x_1,\cdots,x_n$ and $e(\nu), \nu\in I^\mathfrak{b}eta$, which is similar to the well-known Bernstein's theorem on the centers of affine Hecke algebras. The representation theory of $\RR(\mathfrak{b}eta)$ has been well-studied in the literature, see e.g., \cite{BKOP}, \cite{BKM}, \cite{K14}, \cite{KLo}, \cite{KR10,KR11} and the references therein. In contrast to these results, little is known about the structure and representation theory of the cyclotomic quiver Hecke algebra $\RR^\Lambda(\mathfrak{b}eta)$ except the cases of type $A$, type $C$ and some special $\Lambda$ (\cite{AP14, AP16c, APS, BK:GradedKL, BKgraded, HM}).
One of the main obstacles for the understanding of $\RR^\Lambda(\mathfrak{b}eta)$ is the lack of an explicit basis or even a closed formula for its graded dimension.
In the case of types $A_\ell^{(1)}$ and $A_\infty$, Brundan and Kleshchev gave in \cite[Theorem 4.20]{BKgraded} a graded dimension formula for $\RR^\Lambda(\mathfrak{b}eta)$ using the enumerative combinatoric of standard tableaux for multi-partitions, and they constructed in \cite{BK:GradedKL} an explicit $K$-algebra isomorphism between $\RR^\Lambda(\mathfrak{b}eta)$ and the block algebra labelled by $\mathfrak{b}eta$ of the cyclotomic Hecke algebra of type $G(\ell,1,n)$ when $\Lambda$ has level $\ell$. In this type $A$ case, Ariki's celebrated categorification work \cite{Ariki:can} was upgraded in \cite{BKgraded} to the $\mathbb{Z}$-graded setting via quiver Hecke algebras.
Based on \cite{BK:GradedKL}, the first author of this paper and Mathas have constructed a graded cellular basis for the cyclotomic quiver Hecke algebra $\RR^\Lambda(\mathfrak{b}eta)$ in these cases. In the case of types $C_\ell^{(1)}$ and $C_\infty$, Ariki, Park and Speyer obtained in \cite{AP16c} and \cite[Theorem 2.5]{APS} a graded dimension formula for $\RR^\Lambda(\mathfrak{b}eta)$ in a similar way as \cite[Theorem 4.20]{BKgraded}. In the case of types $A_{2\ell}^{(2)}$ and $D_{\ell+1}^{(2)}$, S. Oh and E. Park have also obtained in \cite[Theorem 6.3]{OP} (see also \cite{AP14}) a graded dimension formula for the finite quiver Hecke algebra $\RR^{\Lambda_0}(\mathfrak{b}eta)$ using the enumerative combinatoric of standard tableaux for proper Young walls. Both \cite[Theorem 2.5]{APS}, \cite[Theorem 4.20]{BKgraded} and \cite[Theorem 6.3]{OP} rely on the realizations of the Fock space representations of the quantum groups of affine types. Park has given in \cite[Theorem 2.9]{P} an explicit basis of the cyclotomic quiver Hecke algebra corresponding to a minuscule representation of finite type. Recently, Mathas and Tubbenhauer have constructed graded cellular bases for some special affine types, see \cite{MT1}, \cite{MT2}.
In this paper we give a simple and closed formula for the graded dimension of the cyclotomic quiver Hecke algebra $\RR^\Lambda(\mathfrak{b}eta)$ associated to an {\it arbitrary} symmetrizable Cartan matrix $A=(a_{ij})_{i,j\in I}$, where $\Lambda\in P^+$ and $\mathfrak{b}eta\in Q_n^+$. Our new dimension formula is a simple function in terms of the dominant integral weight $\Lambda$, simple roots and certain Weyl group elements, and involves no enumerative combinatoric of standard tableaux or Young walls. The following theorem is the first main result of this paper.
\mathfrak{b}egin{thm}\label{mainthmA} Let $\mathfrak{b}eta\in Q_n^+$ and $\nu=(\nu_1,\cdots,\nu_n),\nu'=(\nu'_1,\cdots,\nu'_n)\in I^\mathfrak{b}eta$. Then $$
\dim_q e(\nu)\RR^\Lambda(\mathfrak{b}eta)e(\nu')=\mathfrak{s}um_{\mathfrak{s}ubstack{w\in\mathfrak{S}(\nu,\nu')}}\mathfrak{p}rod_{t=1}^{n}\Bigl([N^{\Lambda}(w,\nu,t)]_{\nu_t}
q_{\nu_t}^{N^{\Lambda}(1,\nu,t)-1}\Bigr).
$$
where $N^{\Lambda}(w,\nu,t)$ is a certain integer introduced in Definition \ref{keydfn1}, $\mathfrak{S}(\nu,\nu'):=\{w\in\mathfrak{S}_n|w\nu=\nu'\}$, $q_{\nu_t}:=q^{d_{\nu_t}}$, $[m]_{\nu_t}$ is the quantum integer introduced in (\ref{quantum1}) and (\ref{quantum2}).
\end{thm}
Specializing $q$ to $1$, we get that \mathfrak{b}egin{equation}\label{dimFormula}
\dim e(\nu)\RR^{\Lambdabda}(\mathfrak{b}eta)e(\nu')=\mathfrak{s}um\limits_{w\in\mathfrak{S}(\nu,\nu')}\mathfrak{p}rod\limits_{t=1}^{n}N^\Lambda(w,\nu,t).
\end{equation}
A priori, those integers $N^{\Lambda}(w,\nu,t)$ appeared in the above equality could be negative. Since $\dim e(\nu)\RR^{\Lambdabda}(\mathfrak{b}eta)e(\nu')\mathfrak{g}eq 0$, the summation in the right-hand side of the above equality must be always non-negative. This is surprising as we see no reason why this should be true from only the right-hand side formula itself. A second simplified (or divided power) version of the dimension formula for $e(\nu)\RR^{\Lambdabda}(\mathfrak{b}eta)e(\nu)$ is also obtained in Theorem \ref{mainthm1b}.
Note that those integers $N^{\Lambda}(w,\nu,t)$ depend only on the symmetric group element $w$, the dominant weight $\Lambda$ and the integer $t$. The above dimension formula is new even in the special cases of (affine) type $A$ or (affine) type $C$. By the main results of \cite{BK:GradedKL}, the block algebra labelled by $\mathfrak{b}eta\in Q_n^+$ of the symmetric group $\mathfrak{S}_n$ in characteristic $e>0$ and of the Iwahori-Hecke algebra at a primitive $e$th root of unity can be identified with the corresponding cyclotomic quiver Hecke algebra $\RR^{\Lambda_0}(\mathfrak{b}eta)$. Thus Theorem \ref{mainthmA} and (\ref{dimFormula}) give some closed formulae for the dimensions of these block algebras, which is new to the best of our knowledge.
It would be very interesting to relate those integers $N^{\Lambda}(w,\nu,t)$ to the Fock space realization of affine quantum groups for general types.
It is well-known that any KLR idempotent $e(\nu)$ in the quiver Hecke algebra $\RR(\mathfrak{b}eta)$ is nonzero. In contrast, this is in general not the case for the KLR idempotent $e(\nu)$ in the cyclotomic quiver Hecke algebra $\RR^\Lambda(\mathfrak{b}eta)$. In fact, one of the unsolved open problems in the structure and representation theory of $\RR^\Lambda(\mathfrak{b}eta)$ is to determine when the KLR idempotent $e(\nu)$ is nonzero in $\RR^\Lambda(\mathfrak{b}eta)$. As a first application of our new dimension formula Theorem \ref{mainthmA} and (\ref{dimFormula}), we obtain the following second main result of this paper, which gives a simple criterion and thus completely solves the above problem for {\it arbitrary} symmetrizable Cartan matrix.
\mathfrak{b}egin{thm}\label{mainthmB} Let $\Lambda\in P^+$, $\mathfrak{b}eta\in Q^+$ and $\nu=(\nu_1,\cdots,\nu_n)\in I^\mathfrak{b}eta$. Then $e(\nu)\neq 0$ in $\RR^\Lambda(\mathfrak{b}eta)$ if and only if $$
\mathfrak{s}um\limits_{w\in\mathfrak{S}(\nu,\nu)}\mathfrak{p}rod\limits_{t=1}^{n}N^\Lambda(w,\nu,t)\neq 0 .
$$
\end{thm}
Using a second version of the dimension formula for $e(\nu)\RR^{\Lambdabda}(\mathfrak{b}eta)e(\nu)$ given in Theorem \ref{mainthm1b}, we also obtain in Theorem \ref{mainthmB2} a simplified (or divided power) version of the criterion for $e(\nu)\neq 0$ in $\RR^\Lambda(\mathfrak{b}eta)$.
In a second application of our new dimension formula Theorem \ref{mainthmA} and (\ref{dimFormula}), we prove the following third main result of this paper, which gives a decomposition of $\dim\RR^\Lambda(\mathfrak{b}eta)$ into a sum of some products of $\dim\RR^{\Lambda^i}(\mathfrak{b}eta_i)$ with $\Lambda=\mathfrak{s}um_i\Lambda^i$ and $\mathfrak{b}eta=\mathfrak{s}um_{i}\mathfrak{b}eta_i$.
\mathfrak{b}egin{thm} \label{mainthmC}
Suppose $\Lambda=\Lambda^1+\cdots+\Lambda^l$, where $\Lambda^i\in P^+$ for each $1\leq i\leq l$. Then
$$\dim \RR^\Lambda(\mathfrak{b}eta)=\mathfrak{s}um_{\mathfrak{s}ubstack{\mathfrak{b}eta_1,\cdots,\mathfrak{b}eta_l\in Q^+\\ \mathfrak{b}eta=\mathfrak{b}eta_1+\cdots+\mathfrak{b}eta_l}}\Bigl(\frac{(|\mathfrak{b}eta_1|+\cdots+|\mathfrak{b}eta_l|)!}{|\mathfrak{b}eta_1|!\cdots|\mathfrak{b}eta_l|!}\Bigr)^2 \dim \RR^{\Lambda^1}(\mathfrak{b}eta_1)\cdots \dim \RR^{\Lambda^l}(\mathfrak{b}eta_l).
$$
\end{thm}
Our third application of Theorem \ref{mainthmA} is the construction of monomial bases for $\RR^\Lambda(\mathfrak{b}eta)$, which is the starting point of this work. As is well known, constructing monomial bases for the cyclotomic quiver Hecke algebra $\RR^\Lambda(\mathfrak{b}eta)$ is a challenging problem. The first author of this paper and Liang have constructed a monomial basis for the cyclotomic nilHecke algebra in \cite{HuL}. In general, even in the special case of type $A$, no such monomial basis is known at the moment. Our new dimension formula for $\dim\RR^\Lambda(\mathfrak{b}eta)$ gives us a very strong indication that those integers $N^{\Lambda}(w,\nu,t)$ might play a key role in the construction of monomial bases of $\RR^\Lambda(\mathfrak{b}eta)$ for general types. The following theorem, which gives explicit monomial bases for a large classes of direct summand of $\RR^\Lambda(\mathfrak{b}eta)$, is the fourth main result of this paper. Once again, the following theorem is valid for {\it arbitrary} symmetrizable Cartan matrix.
\mathfrak{b}egin{thm}\label{mainthmD} Let $\mu\in I^\mathfrak{b}eta$ and $\widetilde{\nu}$ be given as in (\ref{wnu}). Then $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)\neq 0$ if and only if $N^\Lambda(\mu,k)>0$ for any $1\leq k\leq n$, where $N^\Lambda(\mu,k)$ is defined as in (\ref{wt}). In that case, fix any reduced expression $w=s_{i_1}\cdots s_{i_t}\in \mathfrak{S}(\mu,\widetilde{\nu})$ and define $\mathfrak{p}si_w=\mathfrak{p}si_{i_1}\cdots\mathfrak{p}si_{i_t}$. The elements in the following set $$
\Bigl\{\mathfrak{p}si_{w }\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu)\Bigm| w \in\mathfrak{S}(\mu,\widetilde{\nu}), 0\leq r_{k}<N^\Lambda(\mu,k), \forall\,1\leq k\leq n\Bigr\}$$
form a $K$-basis of $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)$.
\end{thm}
We call the above basis a {\it monomial basis} of $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)$. Applying the anti-isomorphism ``$\ast$'', one can also get a monomial basis for the subspace $e(\mu)\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})$. The main difficulty in generalizing the above theorem to arbitrary direct summand
$e(\mu)\RR^\Lambda({\mathfrak{b}eta})e(\nu)$ lies in the fact the integers $N^\Lambda(w,\mu,k)$ could be negative. However, we construct the monomial bases for all the direct summands in the $n=3$ case in Subsection 5.3. The construction still indicates the expected monomial bases have some close relationships with those integers $N^\Lambda(w,\mu,k)$. We also apply our main results Theorem \ref{mainthmA} and Corollary \ref{maincor1} to give some concrete examples to show that the cyclotomic quiver Hecke algebra $\RR^\Lambda(n):={\text{op}}lus_{\mathfrak{b}eta\in Q_n^+}\RR^\Lambda(\mathfrak{b}eta)$ is in general not graded free over its subalgebra $\RR^\Lambda(m)$ for $m\leq n$.
The content of the paper is organised as follows. In Section 2 we give some preliminary definitions and results on the quantum groups
$U_q(\mathfrak{g})$ associated to an arbitrary symmetrizable generalized Cartan matrix $A$, quiver Hecke algebra $\RR(\mathfrak{b}eta)$ and cyclotomic quiver Hecke algebra $\RR^\Lambda(\mathfrak{b}eta)$ associated to $A, \mathfrak{b}eta\in Q_n^+$, polynomials $\{Q_{i,j}(u,v)\}$ and $\Lambda\in P^+$. In Section 3 we give the proof of our first main result Theorem \ref{mainthmA}. The proof of Theorem \ref{mainthmA} essentially relies on Kang-Kashiwara's categorification of the integral highest weight module $V(\Lambda)$ via the category of finite dimensional projective modules over $\RR^\Lambda(\mathfrak{b}eta)$. We give in Theorem \ref{mainthm1b} a second version of the dimension formula for the direct summand $e(\nu)\RR^\Lambda(\mathfrak{b}eta)e(\nu)$. Our second main results Theorem \ref{mainthmB} is proved in Subsection 3.3. In Section 4 we prove several level reduction results in Theorem \ref{mainthmC1} and Corollary \ref{genaralizetion} for the dimension formulae. As a consequence, we obtain in Corollary \ref{maincorC3} a third necessary and sufficient condition for the KLR idempotent $e(\nu)$ to be nonzero in $\RR^\Lambda(\mathfrak{b}eta)$. In Section 5 we apply Theorem \ref{mainthmA} to the construction of monomial bases of $\RR^\Lambda(\mathfrak{b}eta)$. We give the proof of our fourth main result Theorem \ref{mainthmD} in this section. We first construct a monomial bases of $e(\widetilde{\nu})\RR^\Lambda(\mathfrak{b}eta)e(\widetilde{\nu})$ in Subsection 5.1, where $\widetilde{\nu}$ is as defined in (\ref{wnu}). Then we construct a monomial bases of $e(\widetilde{\nu})\RR^\Lambda(\mathfrak{b}eta)e(\mu)$ for arbitrary $\mu$ in Subsection 5.2. Using the results obtained in Subsections 5.1, 5.2, we are able to construct in Subsection 5.3 a monomial basis for arbitrary direct summand $e(\mu)\RR^\Lambda(\mathfrak{b}eta)e(\nu)$ of $\RR^\Lambda(\mathfrak{b}eta)$ in the case $n=3$. Finally we give in Subsection 5.4 some concrete examples to show that the cyclotomic quiver Hecke algebra $\RR^\Lambda(n):={\text{op}}lus_{\mathfrak{b}eta\in Q_n^+}\RR^\Lambda(\mathfrak{b}eta)$ is in general not graded free over its subalgebra $\RR^\Lambda(m)$ for $m<n$.
\mathfrak{b}igskip
\centerline{Acknowledgements}
\mathfrak{b}igskip
The research was supported by the National Natural Science Foundation of China (No. 12171029).
\mathfrak{b}igskip
\mathfrak{s}ection{Preliminary}
In this section we shall recall some basic knowledge about the quantum groups and (cyclotomic) quiver Hecke algebras.
Let $A:=(a_{ij})_{i,j\in I}$ be a symmetrizable generalized Cartan matrix. Let $\{d_i\in\mathbb{Z}_{>0}|i\in I\}$ be a family of positive integers such that $(d_ia_{ij})_{i,j\in I}$ is symmetric.
Let $(P,\mathscr{P}i,\mathscr{P}i^\mathfrak{v}ee)$ be a realization of $A$ and $\mathfrak{g}$ be the corresponding Kac-Moody Lie algebra (\cite{Kac}). In other words, $P$ is a free abelian group called the weight lattice, $\mathscr{P}i=\{\alpha_i|i\in I\}$ is the set of simple roots, $\mathscr{P}i^\mathfrak{v}ee=\{h_i|i\in I\}\mathfrak{s}ubset P^\mathfrak{v}ee:=\Hom_\mathbb{Z}(P,\mathbb{Z})$ is the set of simple coroots, $\<\alpha_j,h_i\>=a_{ij}$, $\forall\,i,j\in I$, and $\mathscr{P}i, \mathscr{P}i^\mathfrak{v}ee$ are linearly independent sets.
There is a symmetric bilinear pairing $(-|-)$ on $P$ satisfying $$
(\alpha_j|\alpha_i)=d_ia_{ij},\quad (\Lambda|\alpha_i)=d_i\<\Lambda,h_i\>, \,\,\forall\,\Lambda\in P.
$$
In particular, $d_i=(\alpha_i|\alpha_i)/2$. We denote by $P^+=\{\Lambda\in P|\<\Lambda,h_i\>\mathfrak{g}eq 0,\forall\,i\in I\}$ the set of dominant integral weights. For each $i\in I$, let $\Lambda_i$ be the $i$th fundamental weight, i.e., $\<\Lambda_i,h_j\>=\delta_{ij}, \forall\,j\in I$. Then each $\Lambda\in P^+$ can be written as $\Lambda=\mathfrak{s}um_{i\in I}k_i\Lambda_i$, and we call $\ell(\Lambda):=\mathfrak{s}um k_i$ the level of $\Lambda$.
Let $q$ be an indeterminate. For any $k\in I$, we set $q_k:=q^{d_k}=q^{(\alpha_k|\alpha_k)/2}$. For any $m\in\mathbb{Z}$, we define \mathfrak{b}egin{equation}\label{quantum1}
[m]_{k}:=\frac{q_k^m-q_k^{-m}}{q_k-q_{k}^{-1}}.
\end{equation}
And for any $m,n\in\mathbb{N}$ with $m\mathfrak{g}eq n$, we define \mathfrak{b}egin{equation}\label{quantum2}
[m]^{!}_{k}:=\mathfrak{p}rod_{t=1}^{m}[t]_k,\,\,\mathfrak{b}iggl[\mathfrak{b}egin{matrix}m\\ n\end{matrix}\mathfrak{b}iggr]_k:=\frac{[m]_k^{!}}{[m-n]_k^{!}[n]_k^{!}} .
\end{equation}
If $d_k=1$ for any $k\in I$, then we shall omit the subscript $k$ and write $[m]$ instead of $[m]_{k}$.
\mathfrak{b}egin{dfn}\label{qgrp} The quantum group (or quantized enveloping algebra) $U_q(\mathfrak{g})$ (\cite{Lu}) associated with $(A,P,\mathscr{P}i,\mathscr{P}i^\mathfrak{v}ee)$ is the associative algebra over $\mathbb{Q}(q)$ with $1$ generated by $e_i,f_i$ ($i\in I$) and $q^h$ ($h\in P^\mathfrak{v}ee$) satisfying the following relations: $$\mathfrak{b}egin{aligned}
(1)\,\,& q^0=1,\,\, q^h q^{h'}=q^{h+h'},\,\,\forall\, h,h'\in P^\mathfrak{v}ee;\\
(2)\,\,& q^he_iq^{-h}=q^{\<\alpha_i,h\>}e_i,\,q^hf_iq^{-h}=q^{-\<\alpha_i,h\>}f_i,\,\,\forall\,h\in P^\mathfrak{v}ee, i\in I;\\
(3)\,\,& e_if_j-f_je_i=\delta_{ij}\frac{K_i-K_i^{-1}}{q_i-q_i^{-1}},\,\,\mathfrak{t}ext{where $K_i=q^{h_i}$};\\
(4)\,\,& \mathfrak{s}um_{k=0}^{1-a_{ij}}(-1)^k\mathfrak{b}iggl[\mathfrak{b}egin{matrix}1-a_{ij}\\ k\end{matrix}\mathfrak{b}iggr]_ie_i^{1-a_{ij}-k}e_je_i^k=0,\,\,\forall\,i\neq j;\\
(5)\,\,& \mathfrak{s}um_{k=0}^{1-a_{ij}}(-1)^k\mathfrak{b}iggl[\mathfrak{b}egin{matrix}1-a_{ij}\\ k\end{matrix}\mathfrak{b}iggr]_if_i^{1-a_{ij}-k}f_jf_i^k=0,,\,\forall\,i\neq j.
\end{aligned}
$$
\end{dfn}
We set $Q:=\mathfrak{b}igoplus_{i\in I}\mathbb{Z}\alpha_i$, and call it the root lattice. Set $Q^+:=\mathfrak{b}igoplus_{i\in I}\mathbb{N}\alpha_i$, and call it the positive root lattice. For each $\mathfrak{b}eta=\mathfrak{s}um_{i\in I}k_i\alpha_i\in Q^+$, we define $|\mathfrak{b}eta|:=\mathfrak{s}um_{i\in I}k_i$. For each $n\in\mathbb{N}$, we set $Q_n^+:=\{\mathfrak{b}eta\in Q^+||\mathfrak{b}eta|=n\}$.
Let $u,v$ be two indeterminates. For any $i,j\in I$, let $Q_{i,j}(u,v)\in K[u.v]$ be a polynomial of the form $$
Q_{i,j}(u,v)=\mathfrak{b}egin{cases} \mathfrak{s}um_{p(\alpha_i|\alpha_i)+q(\alpha_j|\alpha_j)+2(\alpha_i|\alpha_j)=0}t_{i,j;p,q}u^pv^q, &\mathfrak{t}ext{if $i\neq j$;}\\
0, &\mathfrak{t}ext{if $i=j$,}
\end{cases}
$$
where $t_{i,j;p,q}\in K$ are such that $t_{i,j;-a_{ij},0}\in K^\mathfrak{t}imes$, and they satisfy that $Q_{i,j}(u,v)=Q_{j,i}(v,u)$, $\forall\,i,j\in I$.
In particular, if we regard $Q_{i,j}(u,v)$ as a polynomial on $u$, then the highest degree of $u$ in $Q_{i,j}(u,v)$ is $-a_{ij}$ with leading coefficient $t_{i,j;-a_{ij},0}\in K^\mathfrak{t}imes$.
Let $I^n:=\{\nu=(\nu_1,\cdots,\nu_n)|\nu_i\in I,\forall\,1\leq i\leq n\}$. For any $\mathfrak{b}eta\in Q_n^+$, we define $$
I^\mathfrak{b}eta=\mathfrak{b}iggl\{\nu=(\nu_1,\cdots,\nu_n)\in I^n\mathfrak{b}iggm|\mathfrak{s}um_{i=1}^n\alpha_{\nu_i}=\mathfrak{b}eta\mathfrak{b}iggr\}.
$$
Let $\mathfrak{S}_n$ be the symmetric group on $\{1,2,\cdots,n\}$. Then $\mathfrak{S}_n$ acts on $I^n$ from the left-hand side by places permutation. That is, for any $w\in\mathfrak{S}_n$, $\nu=(\nu_1,\cdots,\nu_n)$, $$
w\nu=w(\nu_1,\cdots,\nu_n):=(\nu_{w^{-1}(1)},\cdots,\nu_{w^{-1}(n)}) .
$$
One can also consider the action of $\mathfrak{S}_n$ on $I^n$ from the right-hand side, then we have $$
\nu w=(\nu_1,\cdots,\nu_n)w:=(\nu_{w(1)},\cdots,\nu_{w(n)}) .
$$
In particular, $w\nu=\nu w^{-1}$.
\mathfrak{b}egin{dfn}\label{D:QuiverRelations}
Let $K$ be a field. Let $n\in\mathbb{N}$ and $\mathfrak{b}eta\in Q_n^{+}$. The quiver Hecke (or KLR) algebra $\RR(\mathfrak{b}eta)$ associated with polynomial $(Q_{i,j}(u,v))_{i,j\in I}$ and $\mathfrak{b}eta\in Q_n^+$ is the unital associative $K$-algebra with generators
$$\{\mathfrak{p}si_1,\dots,\mathfrak{p}si_{n-1}\} \cup \{x_1,\dots, x_n \} \cup \{e(\nu)|\nu\in I^\mathfrak{b}eta\} $$
and relations
\mathfrak{b}group
\mathfrak{s}etlength{\abovedisplayskip}{1pt}
\mathfrak{s}etlength{\mathfrak{b}elowdisplayskip}{1pt}
\mathfrak{b}egin{align*}
e(\nu) e(\nu') &= \delta_{\nu\nu'} e(\nu),
& \mathfrak{s}um_{\nu\in I^\mathfrak{b}eta}e(\nu)=1,& & &\\
x_r e(\nu) &= e(\nu) x_r,
&\mathfrak{p}si_r e(\nu)&= e(s_r\nu) \mathfrak{p}si_r,
&x_r x_s &= x_s x_r,
\end{align*}
\mathfrak{b}egin{align*}
\mathfrak{p}si_r x_{r+1} e(\nu)&=(x_r\mathfrak{p}si_r+\delta_{\nu_r\nu_{r+1}})e(\nu),&
x_{r+1}\mathfrak{p}si_re(\mathfrak{b}i)&=(\mathfrak{p}si_r x_r+\delta_{\nu_r\nu_{r+1}})e(\nu),\\
\mathfrak{p}si_r x_s &= x_s \mathfrak{p}si_r,&&\mathfrak{t}ext{if }s \neq r,r+1,\\
\mathfrak{p}si_r \mathfrak{p}si_s &= \mathfrak{p}si_s \mathfrak{p}si_r,&&\mathfrak{t}ext{if }|r-s|>1,\notag
\end{align*}
\mathfrak{b}egin{align*}
\mathfrak{p}si_r^2e(\nu) &= Q_{\nu_r,\nu_{r+1}}(x_r,x_{r+1})e(\nu),\\
\mathfrak{p}si_{r+1}\mathfrak{p}si_{r} \mathfrak{p}si_{r+1} e(\nu)-\mathfrak{p}si_{r}\mathfrak{p}si_{r+1} \mathfrak{p}si_{r} e(\nu) &=\delta_{\nu_r \nu_{r+2}}\frac{Q_{\nu_r,\nu_{r+1}}(x_r,x_{r+1})-Q_{\nu_r,\nu_{r+1}}(x_{r+2},x_{r+1})}{x_r-x_{r+2}}e(\nu),
\end{align*}
\egroup
for $\nu,\nu'\in I^\mathfrak{b}eta$ and all admissible $r$ and $s$.
\end{dfn}
For $\Lambda\in P^+$, $i\in I$, we define $$
a^\Lambda_i(x)=x^{\<\Lambda,h_i\>}.
$$
\mathfrak{b}egin{dfn} The cyclotomic quiver Hecke (or cyclotomic KLR) algebra $\RR^\Lambda(\mathfrak{b}eta)$ associated with the polynomial $(Q_{i,j}(u,v))_{i,j\in I}$, $\mathfrak{b}eta\in Q_n^+$ and $\Lambda\in P^+$ is defined to be the quotient of $\RR(\mathfrak{b}eta)$ by the two-sided ideal of $\RR(\mathfrak{b}eta)$ generated by $a_{\nu_1}^\Lambda(x_1)e(\nu)$, $\nu\in I^\mathfrak{b}eta$.
\end{dfn}
The algebra $\RR(\mathfrak{b}eta)$ is $\mathbb{Z}$-graded with its grading structure given by $$
\deg e(\nu)=0,\,\,\deg(x_ke(\nu)):=(\alpha_{\nu_k}|\alpha_{\nu_k}),\,\,\deg(\mathfrak{p}si_ke(\nu)):=-(\alpha_{\nu_k}|\alpha_{\nu_{k+1}}) .
$$
Inheriting the $\mathbb{Z}$-grading from $\RR(\mathfrak{b}eta)$, the cyclotomic quiver Hecke algebra $\RR^\Lambda(\mathfrak{b}eta)$ is $\mathbb{Z}$-graded too. There is a unique $K$-algebra anti-isomorphism ``$\ast$'' of $\RR^\Lambda(\mathfrak{b}eta)$ which is defined on its KLR generators by $$
e(\nu)^\ast=e(\nu),\,\,\mathfrak{p}si_r^{\ast}:=\mathfrak{p}si_r,\,\,x_s^\ast:=x_s,\,\,\,\forall\, \nu\in I^\mathfrak{b}eta, 1\leq r<n, 1\leq s\leq n .
$$
We use $q$ to denote the grading shift functor on $\Mod(\RR^\Lambda(\mathfrak{b}eta))$. That means $$(qM)_j=M_{j-1},
$$ for any $M={\text{op}}lus_{\mathfrak{s}ubstack{j\in \mathbb{Z}}}M_j\in\Mod(\RR^\Lambda(\mathfrak{b}eta))$. Then the Grothendieck group $[\Mod(\RR^\Lambda(\mathfrak{b}eta))]$ becomes a $\mathbb{Z}[q,q^{-1}]$-module, where $q[M]=[qM]$ for $M\in \Mod(\RR^\Lambda(\mathfrak{b}eta))$.
Let $\mathfrak{b}eta\in Q_n^+$ and $i\in I$, we set $$
e(\mathfrak{b}eta,i):=\mathfrak{s}um_{\nu=(\nu_1,\cdots,\nu_n)\in I^{\mathfrak{b}eta}}e(\nu_1,\cdots,\nu_n,i).
$$
Kang and Kashiwara have introduced restriction functors and induction functors in \cite{KK} as follows: $$\mathfrak{b}egin{aligned}
E_i^\Lambda:\, \Mod(\RR^\Lambda(\mathfrak{b}eta+\alpha_i))&\rightarrow \Mod(\RR^\Lambda(\mathfrak{b}eta)),\\
N&\mapsto e(\mathfrak{b}eta,i)N=e(\mathfrak{b}eta,i)\RR^\Lambda(\mathfrak{b}eta+\alpha_i)\otimes_{\RR^{\Lambda}(\mathfrak{b}eta+\alpha_i)}N,\\
F_i^\Lambda:\, \Mod(\RR^\Lambda(\mathfrak{b}eta))&\rightarrow \Mod(\RR^\Lambda(\mathfrak{b}eta+\alpha_i)),\\
M&\mapsto \RR^\Lambda(\mathfrak{b}eta+\alpha_i)e(\mathfrak{b}eta,i)\otimes_{\RR^{\Lambda}(\mathfrak{b}eta)}M .
\end{aligned}
$$
Let $\mathscr{P}roj(\RR^\Lambda(\mathfrak{b}eta))$ be the category of finite dimensional projective $\RR^\Lambda(\mathfrak{b}eta)$-modules and $K\mathfrak{b}igl(\mathscr{P}roj(\RR^\Lambda(\mathfrak{b}eta))\mathfrak{b}igr)$ its Grothendieck group. Let $\rm{K}_i$ be the endomorphism of $K\mathfrak{b}igl(\mathscr{P}roj(\RR^\Lambda(\mathfrak{b}eta))\mathfrak{b}igr)$ given by multiplication of $q_i^{1-\<\Lambda-\mathfrak{b}eta,h_i\>}$.
Let ${\rm E}_i:=[E_i^\Lambda]$, ${\rm F}_i:=q_i^{1-\<\Lambda-\mathfrak{b}eta,h_i\>}[F_i^\Lambda]$, where $[E_i^\Lambda]: K\mathfrak{b}igl(\mathscr{P}roj(\RR^\Lambda(\mathfrak{b}eta+\alpha_i))\mathfrak{b}igr)\rightarrow K\mathfrak{b}igl(\mathscr{P}roj(\RR^\Lambda(\mathfrak{b}eta))\mathfrak{b}igr)$ and $[F_i^\Lambda]: K\mathfrak{b}igl(\mathscr{P}roj(\RR^\Lambda(\mathfrak{b}eta))\mathfrak{b}igr)\rightarrow K\mathfrak{b}igl(\mathscr{P}roj(\RR^\Lambda(\mathfrak{b}eta+\alpha_i))\mathfrak{b}igr)$ are the naturally induced map on the Grothendieck groups. Then by \cite[Lemma 6.1]{KK}, \mathfrak{b}egin{equation}\label{effe}
{\rm E}_i{\rm F}_j-{\rm F}_j{\rm E}_i=\delta_{ij}\frac{{\rm K}_i-{\rm K}_i^{-1}}{q_i-q_i^{-1}} .
\end{equation}
Let $U_{\mathbb{Z}[q,q^{-1}]}(\mathfrak{g})$ be the Lusztg's $\mathbb{Z}[q,q^{-1}]$-form of the quantum group $U_q(\mathfrak{g})$. Let $v_\Lambda$ be a fixed highest weight vector of the irreducible highest weight $U_q(\mathfrak{g})$-module $V(\Lambda)$. Set $V_{\mathbb{Z}[q,q^{-1}]}(\Lambda):=U_{\mathbb{Z}[q,q^{-1}]}(\mathfrak{g})v_\Lambda$.
\mathfrak{b}egin{thm}\mathfrak{t}ext{(\cite{KK})} For each $\Lambda\in P^+$, there is an $U_{\mathbb{Z}[q,q^{-1}]}(\mathfrak{g})$-module isomorphism: $K\mathfrak{b}igl(\mathscr{P}roj\RR^\Lambda\mathfrak{b}igr)\cong V_{\mathbb{Z}[q,q^{-1}]}(\Lambda)$.
\end{thm}
For each $1\leq i<n$, we define $s_i:=(i,i+1)$. Then $s_1,\cdots,s_{n-1}$ generates $\mathfrak{S}_n$. A word $w=s_{i_{1}}s_{i_{2}}\ldots s_{i_{k}}$ for $w\in \mathfrak{S}_{m}$ is called a reduced expression of $w$ if $k$ is minimal; in this case we say that $w$ has length $k$ and we write $\ell(w)=k$. We use ``$\leq$'' to denote the Bruhat partial order on $\mathfrak{S}_n$. That is, for any $x,y\in\mathfrak{S}_n$, $x\leq y$ if and only if $x=s_{i_{j_1}}\cdots s_{i_{j_t}}$ for some reduced expression
$y=s_{i_1}\cdots s_{i_m}$ of $y$ and some integers $1\leq t\leq m$, $1\leq j_1<\cdots< j_t\leq m$. If $x\leq y$ and $x\neq y$ then we write $x<y$.
\mathfrak{b}egin{lem}\label{deg1} Let $w\in\mathfrak{S}_b$ and $\nu=(\nu_1,\cdots,\nu_n)\in I^n$. We fix a reduced expression $s_{r_1}\cdots s_{r_k}$ of $w$, and define $\mathfrak{p}si_w:=\mathfrak{p}si_{r_1}\cdots\mathfrak{p}si_{r_k}$. Then $$
\deg\mathfrak{p}si_we(\nu)=\mathfrak{s}um_{t=1}^{n}\mathfrak{s}um_{\mathfrak{s}ubstack{1\leq i<t\\ w(i)>w(t)}}(\alpha_{\nu_i}|\alpha_{\nu_t}).
$$
In particular, $\deg\mathfrak{p}si_we(\nu)$ is independent of the choice of the reduced expression $s_{r_1}\cdots s_{r_k}$ of $w$.
\end{lem}
\mathfrak{b}egin{proof} We define $n(w)=\{(i,j)|1\leq i<j\leq n, w(i)>w(j)\}$. To prove the lemma we make induction on $\ell(w)$. If $\ell(w)=1$, the lemma follows from
the definition of $\deg\mathfrak{p}si_r$.
Now suppose $\ell(w)>1$. Then we can always choose $1\leq t<n$ such that $s_tw<w$. In particular, $\ell(s_{t}w)+1=\ell(w)$. In this case it is easy to check $$
n(w)=n(s_{t}w)\cup\{(w^{-1}(t),w^{-1}(t+1))\}. $$
Therefore, we have $$\mathfrak{b}egin{aligned}
\deg(\mathfrak{p}si_{w}e(\nu))&=\deg(\mathfrak{p}si_{s_{t}}e(s_{t}w\,\nu))+\deg(\mathfrak{p}si_{s_{t}w}e(\nu))\\
&=\deg(\mathfrak{p}si_{s_{t}}e\mathfrak{b}igl(\nu_{w^{-1}(1)},\cdots,\nu_{w^{-1}(t+1)},\nu_{w^{-1}(t)},\cdots,\nu_{w^{-1}(n)})\mathfrak{b}igr)\\
&\qquad\qquad -\mathfrak{s}um_{\mathfrak{s}ubstack{i<j\\ s_{t}w(i)>s_{t}w(j)}}(\alpha_{\nu_{j}}\,|\alpha_{\nu_{i}})\quad \mathfrak{t}ext{(by induction hypothesis)}\\
&=-(\alpha_{\nu_{w^{-1}(t)}}\,|\alpha_{\nu_{w^{-1}(t+1))}})-\mathfrak{s}um_{\mathfrak{s}ubstack{i<j \\ s_{t}w(i)>s_{t}w(j)}}(\alpha_{\nu_{j}}\,|\alpha_{\nu_{i}})\\
&=-\mathfrak{s}um_{\mathfrak{s}ubstack{i<j \\w(i)>w(j)}}(\alpha_{\nu_{j}}\,|\alpha_{\nu_{i}}).\end{aligned} $$
This completes the proof of the lemma.
\end{proof}
\mathfrak{b}igskip
\mathfrak{s}ection{Graded dimensions of cyclotomic quiver Hecke algebras}
In this section we shall first give a proof of our first main result Theorem \ref{mainthmA}. That is, to give a closed formula for the graded dimension of the cyclotomic quiver Hecke algebra $\RR^\Lambda(\mathfrak{b}eta)$. Then, as an application of Theorem \ref{mainthmA}, we shall give two criteria for the KLR idempotent $e(\nu)$ to be nonzero in $\RR^\Lambda(\mathfrak{b}eta)$. In particular, we shall give the proof of our second main result Theorem \ref{mainthmB} of this paper.
\mathfrak{s}ubsection{A graded dimension formula for $\RR^\Lambda(\mathfrak{b}eta)$}
Since $\{e(\nu)|\nu\in I^\mathfrak{b}eta\}$ are pairwise orthogonal idempotents in $\RR^\Lambda(\mathfrak{b}eta)$ which sum to $1$, we have $$\RR^\Lambda(\mathfrak{b}eta)={\text{op}}lus_{\mu,\nu\in I^\mathfrak{b}eta}e(\mu)\RR^\Lambda(\mathfrak{b}eta)e(\nu).$$
Thus to give the graded dimension formula for $\RR^\Lambda(\mathfrak{b}eta)$, it suffices to give the graded dimension formula for each $e(\mu)\RR^\Lambda(\mathfrak{b}eta)e(\nu)$, where $\mu,\nu\in I^\mathfrak{b}eta$.
For $\Lambda\in P^+$, $\mathfrak{b}eta\in Q^+$, we define $$
\df(\Lambda,\mathfrak{b}eta):=(\Lambda |\mathfrak{b}eta)-\frac{1}{2}(\mathfrak{b}eta |\mathfrak{b}eta) .
$$
\mathfrak{b}egin{lem}\label{identity1} $\Lambda\in P^+$, $\mathfrak{b}eta\in Q^+$. Then for any $\alpha_i\in\mathscr{P}i$, we have $$
\df(\Lambda,\mathfrak{b}eta)-\df(\Lambda,\mathfrak{b}eta-\alpha_i)=d_i\mathfrak{b}ig(1+\<\Lambda-\mathfrak{b}eta,h_i\>\mathfrak{b}ig).
$$
\end{lem}
\mathfrak{b}egin{proof} By definition, $d_i=(\alpha_i |\alpha_i)/2$. It follows that $$\mathfrak{b}egin{aligned}
&\quad\,\df(\Lambda,\mathfrak{b}eta)-\df(\Lambda,\mathfrak{b}eta-\alpha_i)\\
&=(\Lambda |\alpha_i)-(\mathfrak{b}eta |\alpha_i)+\frac{1}{2}(\alpha_i|\alpha_i)=d_i\mathfrak{b}ig(1+\<\Lambda-\mathfrak{b}eta,h_i\>\mathfrak{b}ig) .
\end{aligned}
$$
This proves the lemma.
\end{proof}
\mathfrak{b}egin{dfn}\label{keydfn1} For any $w\in\mathfrak{S}_n$, $t\in\{1,2,\cdots,n\}$, we define $$
J_w^{<t}:=\{1\leq j<t|w(j)<w(t)\} .
$$
Let $\Lambda\in P^+$. For any $\nu=(\nu_1,\cdots,\nu_n)\in I^n$ and $1\leq t\leq n$, we define \mathfrak{b}egin{equation}\label{Ndef}
N^\Lambda(w,\nu,t):=\<\Lambda-\mathfrak{s}um_{j\in J_w^{<t}}\alpha_{\nu_j}, h_{\nu_t}\>.
\end{equation}
\end{dfn}
If $\Lambda$ is clear from the context, we shall omit the superscript $\Lambda$ and write $N(w,\nu,t)$ instead of $N^\Lambda(w,\nu,t)$.
For any $\nu,\nu'\in I^n$, we define
$$
\mathfrak{S}(\nu,\nu'):=\mathfrak{b}ig\{w\in\mathfrak{S}_n | w\nu =\nu' \mathfrak{b}ig\} .
$$
Let $w\in\mathfrak{S}(\nu,\nu')$ and $1\leq t\leq n$. We define \mathfrak{b}egin{equation}\label{length2}
\check{N}^\Lambda(w,\nu,t):=\<\Lambda-\mathfrak{s}um_{\mathfrak{s}ubstack{1\leq j<w(t),\\ j\in \{w (1),\cdots\, ,w (t-1)\}}}\alpha_{\nu'_{j}}, h_{\nu_{t}}\>.
\end{equation}
Note that the subscript of $\alpha$ in the summation of the definition of $N^\Lambda(w,\nu,t)$ is $\nu_j$, while the subscript of $\alpha$ in the summation of the definition of $\check{N}^\Lambda(w,\nu,t)$ is $\nu'_j$. Comparing with $\check{N}^\Lambda(w,\nu,t)$, $N^\Lambda(w,\nu,t)$ has the advantage that its definition involves only $w,\nu,t$ but no $\nu'$. The following Lemma reveals that they are exactly the same.
\mathfrak{b}egin{lem}\label{eqa1} Let $\nu,\nu'\in I^n$. For any $w\in\mathfrak{S}(\nu,\nu')$ and $1\leq t\leq n$, we have that $N^\Lambda(w,\nu,t)=\check{N}^\Lambda(w,\nu,t)$.
\end{lem}
\mathfrak{b}egin{proof} For any $1\leq i<w(t)$ with $i\in \{w (1),\cdots\, ,w (t-1)\}$, we can find a unique $j\in J_w^{<t}$ such that $i=w(j)$ and hence $\nu'_i=\nu'_{w(j)}=\nu_j$ because $w\in\mathfrak{S}(\nu,\nu')$.
The lemma follows at once.
\end{proof}
Let $M$ be a finite dimensional $\mathbb{Z}$-graded $K$-linear space. For each $k\in\mathbb{Z}$, we use $M_k$ to denote its degree $k$ homogeneous component. The graded dimension of $M$ is defined by $$
\dim_q M:=\mathfrak{s}um_{k\in\mathbb{Z}}(\dim M_k)q^k .
$$
By the definitions given at the end of last section, we have $$
{\rm F}_i[\RR^\Lambda(\mathfrak{b}eta)]=[\RR^\Lambda(\mathfrak{b}eta+\alpha_i)e(\mathfrak{b}eta,i)],\quad {\rm E}_i[\RR^\Lambda(\mathfrak{b}eta+\alpha_i)]=q_i^{1-\<\Lambda-\mathfrak{b}eta,h_i\>}[e(\mathfrak{b}eta,i)\RR^\Lambda(\mathfrak{b}eta+\alpha_i)].
$$
As a result, Oh and Park deduced the following proposition in \cite[Proposition 3.3]{OP}.
\mathfrak{b}egin{prop}[\mathfrak{t}ext{\cite[Proposition 3.3]{OP}}]\label{op1} Let $\Lambda\in P^+$, $v_\Lambda\in V(\Lambda)$ be a highest weight vector in $V(\Lambda)$ of weight $\Lambda$. Let $\mathfrak{b}eta\in Q^+$ and $\nu=(\nu_1,\cdots,\nu_n),\nu'=(\nu'_1,\cdots,\nu'_n)\in I^\mathfrak{b}eta$. Then $$
e_{\nu_1}\cdots e_{\nu_n}f_{\nu'_n}\cdots f_{\nu'_1}v_{\Lambda}=q^{-\df(\Lambda,\mathfrak{b}eta)}\mathfrak{b}igl(\dim_q e(\nu)\RR^\Lambda(\mathfrak{b}eta)e(\nu')\mathfrak{b}igr)v_\Lambda .
$$
\end{prop}
For each monomial of the form $f_{j_1}\cdots f_{j_n}$, we use the notation $f_{j_1}\cdots \widehat{i}dehat{f_{j_k}}\cdots f_{j_n}$ to denote the monomial obtained by removing $f_{j_k}$ from the monomial $f_{j_1}\cdots f_{j_n}$. That is, $$
f_{j_1}\cdots \widehat{i}dehat{f_{j_k}}\cdots f_{j_n}:=f_{j_1}\cdots f_{j_{k-1}}f_{j_{k+1}}\cdots f_{j_n}.
$$
Similarly, for any $\nu=(\nu_1,\cdots,\nu_n)\in I^\mathfrak{b}eta$, we define $$
(\nu_1,\cdots,\widehat{i}dehat{\nu_k},\cdots,\nu_n):=(\nu_1,\cdots,\nu_{k-1},\nu_{k+1},\cdots,\nu_n)\in I^{\mathfrak{b}eta-\alpha_{\nu_k}}.
$$
\noindent
{\mathfrak{b}f Proof of Theorem \ref{mainthmA}}: We claim that $$\mathfrak{b}egin{aligned}
&\quad\,\dim_{q}e(\nu)\RR^{\Lambda}(\mathfrak{b}eta)e(\nu')\\
&=\mathfrak{s}um_{\mathfrak{s}ubstack{1\leq k_1,\cdots,k_n\leq n\\ \nu_i=\nu'_{k_i}, \forall\,1\leq i\leq n\\ k_a\neq k_b,\forall\,1\leq a\neq b\leq n}}\mathfrak{p}rod_{t=1}^{n}\Biggl(
\Bigl[\mathfrak{b}igl(\Lambda-\mathfrak{s}um\limits_{\mathfrak{s}ubstack{1\leq i<k_t\\ i\neq k_s,\forall\,t\leq s\leq n}}\alpha_{\nu'_i}\mathfrak{b}igr)(h_{\nu_t})\Bigr]_{\nu_t}q_{\nu_t}^{N^{\Lambda}(1,\nu,t)-1}\Biggr)
\end{aligned}
$$
We use induction on $|\mathfrak{b}eta|$. Suppose that the claim holds for any $\mathfrak{b}eta\in Q_{n-1}^+$. Now we assume $\mathfrak{b}eta\in Q_n^+$. Applying Proposition \ref{op1}, we get that $$\mathfrak{b}egin{aligned}
&\quad\, \Bigl(\dim_{q}e(\nu)\RR^{\Lambda}(\mathfrak{b}eta)e(\nu')\Bigl)v_{\Lambda}\\
&=q^{\df(\Lambda,\mathfrak{b}eta)}e_{\nu_1}\cdots\, e_{\nu_n}f_{\nu'_n} \cdots\, f_{\nu'_1}v_{\Lambda}\\
&=\mathfrak{s}um_{\mathfrak{s}ubstack{1\leq k_n\leq n\\ \nu_n=\nu'_{k_n}}}{q^{\df(\Lambda,\mathfrak{b}eta)}}\Bigl[(\Lambda-\mathfrak{s}um\limits_{i=1}^{k_{n}-1}\alpha_{\nu'_i})(h_{\nu_n})\Bigr]_{\nu_n}e_{\nu_{1}}\cdots\, e_{\nu_{n-1}}f_{\nu'_n} \cdots\, \widehat{i}dehat{f_{\nu'_{k_n}}} \\
&\qquad\qquad \mathfrak{t}imes\cdots\mathfrak{t}imes f_{\nu'_1}v_{\Lambda}\qquad\qquad\mathfrak{t}ext{(by (\ref{effe}) and Definition \ref{qgrp} (2),(3))}\\
&=\mathfrak{s}um_{\mathfrak{s}ubstack{1\leq k_n\leq n\\ \nu_n=\nu'_{k_n}}} q^{\df(\Lambda,\mathfrak{b}eta)-\df(\Lambda,\mathfrak{b}eta-\alpha_{\nu_n})}
\Bigl[(\Lambda-\mathfrak{s}um\limits_{i=1}^{k_{n}-1}\alpha_{\nu'_i})(h_{\nu_n})\Bigr]_{\nu_n}\\
&\qquad\quad\mathfrak{t}imes \dim_q e(\nu_1,\cdots,\nu_{n-1})\RR^\Lambda(\mathfrak{b}eta-\alpha_{\nu_n})e(\nu'_1,\cdots,\widehat{i}dehat{\nu'_{k_n}},\cdots,\nu'_n)v_{\Lambda}\quad\mathfrak{t}ext{(by Proposition \ref{op1})}\\
&=\mathfrak{s}um_{\mathfrak{s}ubstack{1\leq k_n\leq n\\ \nu_n=\nu'_{k_n}}}{q_{\nu_{n}}^{1+(\Lambdabda-\mathfrak{b}eta)(h_{\nu_{n}})}}\Bigl[(\Lambda-\mathfrak{s}um\limits_{i=1}^{k_{n}-1}\alpha_{\nu'_i})(h_{\nu_n})\Bigr]_{\nu_n}
\\
&\qquad\quad\mathfrak{t}imes\Bigl(\dim_q e(\nu_1,\cdots,\nu_{n-1})\RR^{\Lambda}(\mathfrak{b}eta-\alpha_{\nu_n})e(\nu'_1,\cdots,\widehat{i}dehat{\nu'_{k_n}} \cdots\, \nu'_n)\Bigl)v_{\Lambda}\qquad\mathfrak{t}ext{(by Lemma \ref{identity1})}.
\end{aligned} $$
It follows that \mathfrak{b}egin{equation}\label{ind}\mathfrak{b}egin{aligned}
\dim_{q}e(\nu)\RR^{\Lambda}(\mathfrak{b}eta)e(\nu')
&=\mathfrak{s}um_{\mathfrak{s}ubstack{1\leq k_n\leq n\\ \nu_n=\nu'_{k_n}}}{q_{\nu_{n}}^{1+(\Lambdabda-\mathfrak{b}eta)(h_{\nu_{n}})}}\Bigl[(\Lambda-\mathfrak{s}um\limits_{i=1}^{k_{n}-1}\alpha_{\nu'_i})(h_{\nu_n})\Bigr]_{\nu_n}\\
&\qquad\qquad\mathfrak{t}imes\dim_q e(\nu_1,\cdots,\nu_{n-1})\RR^{\Lambda}(\mathfrak{b}eta-\alpha_{\nu_n})e(\nu'_1,\cdots,\widehat{i}dehat{\nu'_{k_n}} \cdots\, \nu'_n).
\end{aligned}\end{equation}
We define $\mathfrak{t}ilde{\nu}'=(\mathfrak{t}ilde{\nu}'_1,\cdots,\mathfrak{t}ilde{\nu}'_{n-1}):=(\nu'_1,\cdots,\widehat{i}dehat{\nu'_{k_n}},\cdots,\nu'_n)$. Applying induction hypothesis, we can deduce that $$
\mathfrak{b}egin{aligned}
&\quad\, \Bigl(\dim_q e(\nu_1,\cdots\,\nu_{n-1})\RR^{\Lambdabda}(\mathfrak{b}eta-\alpha_{\nu_n})e(\nu'_1,\cdots,\widehat{i}dehat{\nu'_{k_n}},\cdots,\nu'_n)\Bigr)v_{\Lambdabda}\\
&=\Bigl(\dim_q e(\nu_1,\cdots\,\nu_{n-1})\RR^{\Lambdabda}(\mathfrak{b}eta-\alpha_{\nu_n})e(\mathfrak{t}ilde{\nu}'_1,\cdots,\mathfrak{t}ilde{\nu}'_{n-1})\Bigl)v_{\Lambdabda}\\
&=\mathfrak{s}um_{\mathfrak{s}ubstack{1\leq \mathfrak{t}ilde{k}_1,\cdots,\mathfrak{t}ilde{k}_{n-1}\leq n-1\\ \nu_i=\mathfrak{t}ilde{\nu}'_{\mathfrak{t}ilde{k}_i}, \forall\,1\leq i\leq n-1\\ \mathfrak{t}ilde{k}_a\neq \mathfrak{t}ilde{k}_b,\forall\,a\neq b}}\mathfrak{p}rod_{t=1}^{n-1}
\Bigl(\Bigl[\mathfrak{b}igl(\Lambda-\mathfrak{s}um\limits_{\mathfrak{s}ubstack{1\leq i<\mathfrak{t}ilde{k}_t\\ i\neq \mathfrak{t}ilde{k}_s,\forall\,t\leq s\leq n-1}}\alpha_{\mathfrak{t}ilde{\nu}'_i}\mathfrak{b}igr)(h_{\nu_t})\Bigl]_{\nu_t}q_{\nu_t}^{N^{\Lambda}(1,\nu,t)-1}\Bigr)
v_{\Lambdabda}.
\end{aligned}
$$
Note that the $(n-1)$-tuple in the summation is a permutation of $\{1,2,\cdots,n-1\}$. For any given integer $1\leq k_n\leq n$, there is an associated natural bijection $\mathfrak{p}i_{k_n}$ from the set $$
\Bigl\{(k_1,\cdots,k_{n-1})\Bigm|\mathfrak{b}egin{matrix}\mathfrak{t}ext{$1\leq k_1,\cdots,k_{n-1}\leq n$, $\nu_i=\nu'_{k_i},\forall\,1\leq i\leq n-1$}\\ \mathfrak{t}ext{$k_n\neq k_a\neq k_b, \forall\,1\leq a\neq b<n$}
\end{matrix}\Bigr\}
$$
onto the set $$
\Bigl\{(\mathfrak{t}ilde{k}_1,\cdots,\mathfrak{t}ilde{k}_{n-1})\Bigm|\mathfrak{b}egin{matrix}\mathfrak{t}ext{$1\leq \mathfrak{t}ilde{k}_1,\cdots,\mathfrak{t}ilde{k}_{n-1}\leq n-1$, $\nu_i=\mathfrak{t}ilde{\nu}'_{\mathfrak{t}ilde{k}_i},\forall\,1\leq i\leq n-1$}\\ \mathfrak{t}ext{$\mathfrak{t}ilde{k}_a\neq \mathfrak{t}ilde{k}_b, \forall\,1\leq a\neq b<n$}
\end{matrix}\Bigr\}
$$
which is defined by $$
\mathfrak{p}i_{k_n}(k_1,\cdots,k_{n-1})=(\mathfrak{t}ilde{k}_1,\cdots,\mathfrak{t}ilde{k}_{n-1}),\quad
\mathfrak{t}ilde{k}_j:=\mathfrak{b}egin{cases}k_j, &\mathfrak{t}ext{if $k_j<k_n$;}\\ k_j-1, &\mathfrak{t}ext{if $k_j>k_n$.}\end{cases}\,\,\forall\,1\leq j\leq n-1.
$$
With this bijection $\mathfrak{p}i_{k_n}$ in mind, we can deduce from the above calculation that $$
\mathfrak{b}egin{aligned}
&\quad\, \Bigl(\dim_q e(\nu_1,\cdots\,\nu_{n-1})\RR^{\Lambdabda}(\mathfrak{b}eta-\alpha_{\nu_n})e(\nu'_1,\cdots,\widehat{i}dehat{\nu'_{k_n}},\cdots,\nu'_n)\Bigl)v_{\Lambdabda}\\
&=\mathfrak{s}um_{\mathfrak{s}ubstack{1\leq k_1,\cdots,k_{n-1}\leq n\\ \nu_i={\nu}'_{k_i}, \forall\,1\leq i\leq n-1\\ k_n\neq k_a\neq k_b,\forall\,1\leq a\neq b<n}}\mathfrak{p}rod_{t=1}^{n-1}
\Bigl(\Bigl[\mathfrak{b}igl(\Lambda-\mathfrak{s}um\limits_{\mathfrak{s}ubstack{1\leq i<k_t\\ i\neq k_s,\forall\,t\leq s\leq n-1}}\alpha_{\nu'_i}\mathfrak{b}igr)(h_{\nu_t})\Bigr]_{\nu_t}q_{\nu_t}^{N^{\Lambda}(1,\nu,t)-1}\Bigr)
v_{\Lambdabda}.
\end{aligned}
$$
Combining this with the equality (\ref{ind}), we prove our claim.
Finally, $\{k_1,\cdots,k_n\}$ is a permutation of $\{1,\cdots,n\}$ and $\nu_i={\nu}'_{k_i}, \forall\,1\leq i\leq n$ mean that there exists $w\in\mathfrak{S}(\nu,\nu')$ such that $k_j=w(j)$, $\forall\,1\leq j\leq n$. Then it is clear that the theorem follows from our above claim and Lemma \ref{eqa1}.\qed
\mathfrak{b}egin{cor}\label{maincor1} Let $\mathfrak{b}eta\in Q^+$ and $\nu=(\nu_1,\cdots,\nu_n),\nu'=(\nu'_1,\cdots,\nu'_n)\in I^\mathfrak{b}eta$. Then
$\dim e(\nu)\RR^{\Lambdabda}(\mathfrak{b}eta)e(\nu')=\mathfrak{s}um\limits_{w\in\mathfrak{S}(\nu,\nu')}\mathfrak{p}rod\limits_{t=1}^{n}N^\Lambda(w,\nu,t)$.
\end{cor}
\mathfrak{b}egin{proof} We evaluate the formula in Theorem \ref{mainthmA} at $q=1$ by applying the L'Hospital rule. The corollary follows.
\end{proof}
Let $\nu,\nu'\in I^\mathfrak{b}eta$. We fix an element $w \in\mathfrak{S}(\nu,\nu')$. Applying Lemma \ref{deg1}, we can get $$
\mathfrak{p}rod_{t=1}^{n}q_{\nu_t}^{N^{\Lambda}(1,\nu,t)-1}
=q^{\deg\mathfrak{p}si_we(\nu)}\mathfrak{p}rod_{t=1}^{n}q_{\nu_t}^{N^{\Lambda}(w,\nu,t)-1}
$$
It follows that $$\mathfrak{b}egin{aligned}
&\quad\,\dim_q e(\nu)\RR^\Lambda(\mathfrak{b}eta)e(\nu')=\mathfrak{s}um_{\mathfrak{s}ubstack{w\in\mathfrak{S}(\nu,\nu')}}\mathfrak{p}rod_{t=1}^{n}\Bigl([N^{\Lambda}(w,\nu,t)]_{\nu_t}
q_{\nu_t}^{N^{\Lambda}(1,\nu,t)-1}\Bigr)\\
&=\mathfrak{s}um_{\mathfrak{s}ubstack{w\in\mathfrak{S}(\nu,\nu')}}\mathfrak{p}rod_{\mathfrak{s}ubstack{1\leq t\leq n\\ N^{\Lambda}(w,\nu,t)\neq 0,\forall\,t}}\Bigl([N^{\Lambda}(w,\nu,t)]_{\nu_t}
q_{\nu_t}^{N^{\Lambda}(1,\nu,t)-1}\Bigr)\\
&=\mathfrak{s}um_{\mathfrak{s}ubstack{w\in\mathfrak{S}(\nu,\nu')}}q^{\deg(\mathfrak{p}si_we(\nu))}\mathfrak{p}rod_{\mathfrak{s}ubstack{1\leq t\leq n\\ N^{\Lambda}(w,\nu,t)\neq 0,\forall\,t}}\Bigl([N^{\Lambda}(w,\nu,t)]_{\nu_t}
q_{\nu_t}^{N^{\Lambda}(w,\nu,t)-1}\Bigr).
\end{aligned}
$$
If $N^{\Lambda}(w,\nu,t)>0$, then \mathfrak{b}egin{equation}\label{eq1}
[N^{\Lambda}(w,\nu,t)]_{\nu_t}q_{\nu_t}^{N^{\Lambda}(w,\nu,t)-1}=\mathfrak{s}um_{a=0}^{N^\Lambda(w,\nu,t)-1}q_{\nu_t}^{2a} ;
\end{equation}
If $N^{\Lambda}(w,\nu,t)<0$, then \mathfrak{b}egin{equation}\label{eq2}
[N^{\Lambda}(w,\nu,t)]_{\nu_t}q_{\nu_t}^{N^{\Lambda}(w,\nu,t)-1}=-\mathfrak{s}um_{a=1}^{-N^\Lambda(w,\nu,t)}q_{\nu_t}^{-2a} .
\end{equation}
Those integers $N^{\Lambda}(w,\nu,t)$ could be negative or zero. Note that we always have $\mathfrak{s}um\limits_{w\in\mathfrak{S}(\nu,\nu')}\mathfrak{p}rod\limits_{t=1}^{n}N^\Lambda(w,\nu,t)\mathfrak{g}eq 0$ as it is the dimension of a subspace by Corollary \ref{maincor1}. However, from the formula $\mathfrak{s}um\limits_{w\in\mathfrak{S}(\nu,\nu')}\mathfrak{p}rod\limits_{t=1}^{n}N^\Lambda(w,\nu,t)$ itself, it is surprising to us why it is always non-negative.
The identity (\ref{eq1}) indicates that one might be able to obtain a monomial basis of $\RR^\Lambda(\mathfrak{b}eta)$ of the form $\{e(\nu')\mathfrak{p}si_wy_1^{c_1}\cdots y_n^{c_n}e(\nu)|0\leq c_t<N^\Lambda(w,\nu,t),\forall\,1\leq t\leq n\}$.
The following example shows that this is not the case.
\mathfrak{b}egin{examp} Let $\mathscr{H}^{0}_{\ell,n}$ be the cyclotomic nilHecke algebra with level $\ell$ and size $n$. That is, $\mathscr{H}^{0}_{\ell,n}=\RR^\Lambda(\mathfrak{b}eta)$ with $\Lambda=\ell\Lambda_0$, $\mathfrak{b}eta=n\alpha_0$. We consider the special case when $\ell=5, n=2$. Then $\Lambda=5\Lambda_0, \nu=(0,0)$ and $\mathfrak{S}(\nu,\nu)=\{1,s_{1}\}$. By direct calculation, one gets that
$$
N^\Lambda(1,\nu,1)=5,\,\, N^\Lambda(1,\nu,2)=3,\,\, N^\Lambda(s_1,\nu,1)=5,\,\,N^\Lambda(s_1,\nu,2)=5. $$
On the other hand, by \cite[Proposition 7]{HL} and \cite[Lemma 2.20]{HuL}, we have $$
\mathfrak{s}um_{\mathfrak{s}ubstack{k_{1}+k_{2}=5-2+1=4}}x_{1}^{k_{1}}x_{2}^{k_{2}}=0. $$
Thus the elements in the set $\{\mathfrak{p}si_{s_1}x_{1}^{a_{1}}x_{2}^{a_{2}}e(\nu)|0\leq a_{t}<N^\Lambda(s_1,\nu,t)=5,\,t=1,2\}$ are $K$-linearly dependent.
\end{examp}
\mathfrak{s}mallskip
\mathfrak{s}ubsection{A second formula for the dimension of $e(\nu)\RR^\Lambda(\mathfrak{b}eta)e(\nu)$}
Let $\mathfrak{b}eta\in Q_n^+$ and $\nu\in I^{\mathfrak{b}eta}$. We can always write\mathfrak{b}egin{equation}\label{nu2}
\nu=(\nu_1,\cdots,\nu_n)=(\mathfrak{u}nderbrace{\nu^{1},\nu^{1},\cdots,\nu^{1}}_{b_{1}\,copies},\cdots,\mathfrak{u}nderbrace{\nu^{p},\nu^{p},\cdots,\nu^{p}}_{b_{p}\,copies}),
\end{equation}
where $p\in\mathbb{N}$, $b_1,\cdots,b_p\in\mathbb{N}$ with $\mathfrak{s}um_{i=1}^{p}b_i=n$ and $\nu^{j}\neq\nu^{j+1}$ for any $1\leq j<p$.
The purpose of this subsection is to give a second formula for the dimension of $e(\nu)\RR^\Lambda(\mathfrak{b}eta)e(\nu)$.
Define the set $$
\Sigma_n:=\mathfrak{b}igl\{(k_1,\cdots,k_n)\in\mathbb{Z}^n\mathfrak{b}igm|k_j\in\{0,1,\cdots,j-1\},\forall\,1\leq j\leq n\mathfrak{b}igr\}.
$$
Consider the map $$\mathfrak{b}egin{aligned}
\mathfrak{t}heta_n: \mathfrak{S}_n&\rightarrow \Sigma_n,\\
w&\mapsto \mathfrak{b}igl(|J_w^{<1}|,\cdots,|J_w^{<n}|\mathfrak{b}igr).
\end{aligned}
$$
It is clear that $\mathfrak{t}heta$ is well-defined by the definition of $J_w^{<t}$.
\mathfrak{b}egin{lem}\label{bij} With the above definitions and notations, we have that the map $\mathfrak{t}heta_n$ is a bijection.
\end{lem}
\mathfrak{b}egin{proof} Since both $\mathfrak{S}_n$ and $\Sigma_n$ have cardinality $n!$, to prove the lemma, it suffices to show that $\mathfrak{t}heta_n$ is injective.
Let $w,u\in\mathfrak{S}_n$ with $\mathfrak{t}heta_n(w)=\mathfrak{t}heta_n(u)$. Suppose that $u\neq w$. Let $1\leq t\leq n$ be the unique integer such that $w(t)\neq u(t)$ and $w(i)=u(i)$ for any $t<i\leq n$. Assume that $w(t)<u(t)$. Then $w(t)=u(m_t)$ for some $m_t\in\{1,2,\cdots,t\}$.
Note that if $1\leq j<t$ and $w(j)<w(t)$, then for these $j$ we have $u(m_j)=w(j)<w(t)<u(t)$ for some $1\leq m_t<t$. It follows that $|J_w^{<t}|\leq |J_u^{<t}|-1$, a contradiction. In a similar (and symmetric) argument one can show that $u(t)<w(t)$ can not happen. Thus we get that $w(t)=u(t)$ which is a contradiction. This proves that $\mathfrak{t}heta_n$ is injective. Hence we complete the proof of the lemma.
\end{proof}
Let $\nu\in I^\mathfrak{b}eta$ be given as in (\ref{nu2}). For $0\leq t\leq p$, we define $$
b_0:=0,\quad c_t:=\mathfrak{s}um_{i=0}^{t}b_i,\quad \mathfrak{S}_{\mathbf{b}}:=\mathfrak{S}_{\{1,\cdots,c_1\}}\mathfrak{t}imes\mathfrak{S}_{\{c_1+1,\cdots,c_2\}}\mathfrak{t}imes\cdots\mathfrak{t}imes\mathfrak{S}_{\{c_{p-1}+1,\cdots,n\}}.
$$
Let $\mathcal{D}_{\mathbf{b}}$ be the set of minimal length left $\mathfrak{S}_{\mathbf{b}}$-coset representatives in $\mathfrak{S}_{n}$. Set $\mathcal{D}(\nu):=\mathcal{D}_{\mathbf{b}}\cap\mathfrak{S}(\nu,\nu)$. Then we have $\mathfrak{S}(\nu,\nu)=\mathcal{D}(\nu)\mathfrak{S}_{\mathbf{b}}$.
\mathfrak{b}egin{lem}\label{independ} Let $k$ be an integer with $c_{i-1}<k\leq c_{i}$, where $1\leq i\leq p$. Let $d\in\mathcal{D}(\nu)$, $w=w_{1}\mathfrak{t}imes\cdots\mathfrak{t}imes
w_{p}$, where $w_{j}\in \mathfrak{S}_{\{c_{j-1}+1,\cdots,c_j\}},\,\,\forall\,1\leq j\leq p$. Then we have that $$N^\Lambda(dw,\nu,k)=N^\Lambda(d,\nu,w_i(k))-2|\mathfrak{t}ilde{J}_{w_i}^{<k}|+2(w_i(k)-c_{i-1}-1), $$
where $$
\mathfrak{t}ilde{J}_{w_i}^{<k}:=\{c_{i-1}+1\leq a<k|w_i(a)<w_i(k)\}. $$
In particular, $N^\Lambda(dw,\nu,k)$ does not depend on $w_j$ for any $1\leq j\neq i\leq p$.
\end{lem}
\mathfrak{b}egin{proof}
By Definition \ref{keydfn1} and the definition of $\mathcal{D}(\nu)$, we have $$
\mathfrak{b}egin{aligned}
J_{dw}^{<k}&=\cup_{j<i}\{c_{j-1}+1\leq a\leq c_j|dw(a)<dw(k)\}\cup\{c_{i-1}+1\leq a<k|dw(a)<dw(k)\}\\
&=\cup_{j<i}\{c_{j-1}+1\leq a\leq c_j|dw_j(a)<dw_i(k)\}\cup\{c_{i-1}+1\leq a<k|dw_i(a)<dw_i(k)\}\\
&=\cup_{j<i}\{c_{j-1}+1\leq a\leq c_j|dw_j(a)<dw_i(k)\}\cup\{c_{i-1}+1\leq a<k|w_i(a)<w_i(k)\}\\
&=\cup_{j<i}\{c_{j-1}+1\leq a\leq c_j|dw_j(a)<dw_i(k)\}\cup\mathfrak{t}ilde{J}_{w_i}^{<k}.
\end{aligned}$$
and $$
\mathfrak{b}egin{aligned}
J_{d}^{<w_i(k)}&=\cup_{j<i}\{c_{j-1}+1\leq a\leq c_j|d(a)<dw_{i}(k)\}\cup\{c_{i-1}+1\leq a<w_{i}(k)|d(a)<dw_{i}(k)\}\\
&=\cup_{j<i}\{c_{j-1}+1\leq a\leq c_j|d(a)<dw_{i}(k)\}\cup\{c_{i-1}+1\leq a<w_{i}(k)|a<w_{i}(k)\}.
\end{aligned}
$$
Since the map $$\mathfrak{b}egin{aligned}
\mathfrak{g}amma_{j}: \{c_{j-1}+1\leq a\leq c_j|dw_j(a)<dw_i(k)\}&\rightarrow \{c_{j-1}+1\leq a\leq c_j|d(a)<dw_{i}(k)\},\\
a&\mapsto w_j(a)
\end{aligned}
$$ is a well-defined bijection for $j<i$, we have $$
|\{c_{j-1}+1\leq a\leq c_j|dw_j(a)<dw_i(k)\}|=|\{c_{j-1}+1\leq a\leq c_j|d(a)<dw_{i}(k)\}|
$$ when $j<i$.
Now the result follows directly from (\ref{Ndef}).
\end{proof}
\mathfrak{b}egin{rem}\label{rem2} The significance of the above lemma lies in that it means the integer $N^\Lambda(dw,\nu,k)$ depends only on the interval $(c_{i-1},c_i]$ to which $k$ belongs and the element $w_i$, but not on the elements
$w_j$ for any $j\in\{1,2,\cdots,p\}\mathfrak{s}etminus\{i\}$.
\end{rem}
\mathfrak{b}egin{dfn} Let $\nu\in I^\mathfrak{b}eta$ be given as in (\ref{nu2}). For any $d\in\mathcal{D}(\nu)$, $1\leq i\leq p$ and $c_{i-1}<k\leq c_i$, we define \mathfrak{b}egin{equation}
\label{Ntilde}
\widehat{i}detilde{N}^\Lambda(d,\nu,k):=N^\Lambda(d,\nu,k)+k-c_{i-1}-1 .
\end{equation}
\end{dfn}
\mathfrak{b}egin{thm}\label{mainthm1b} Let $\nu\in I^\mathfrak{b}eta$ be given as in (\ref{nu2}). Then $$
\dim\,e(\nu) \RR^\Lambda({\mathfrak{b}eta})e(\nu)=\Bigl(\mathfrak{p}rod_{i=1}^{p}b_{i}!\Bigr)\mathfrak{s}um_{d\in \mathcal{D}(\nu)}\Bigl(\mathfrak{p}rod_{t=1}^{n}\widehat{i}detilde{N}^\Lambda(d,\nu,t)\Bigr). $$
\end{thm}
\mathfrak{b}egin{proof}
By Corollary \ref{maincor1} and Lemma \ref{independ}, we have $$
\mathfrak{b}egin{aligned}
&\quad\,e(\nu) \RR^\Lambda({\mathfrak{b}eta})e(\nu)\\
=&\mathfrak{s}um\limits_{w\in\mathfrak{S}(\nu,\nu)}\mathfrak{p}rod\limits_{t=1}^{n}N^\Lambda(w,\nu,t)=\mathfrak{s}um\limits_{d\in\mathcal{D}(\nu)}\mathfrak{s}um\limits_{w\in d\mathfrak{S}_\mathbf{b}}\mathfrak{p}rod\limits_{t=1}^{n}N^\Lambda(w,\nu,t)\\
&=\mathfrak{s}um\limits_{d\in\mathcal{D}(\nu)}\mathfrak{s}um_{w\in d\mathfrak{S}_\mathbf{b}}\mathfrak{p}rod_{i=1}^{p}\mathfrak{p}rod\limits_{t=c_{i-1}+1}^{c_i}N^\Lambda(w,\nu,t)\\
&=\mathfrak{s}um\limits_{d\in\mathcal{D}(\nu)}\mathfrak{s}um_{\mathfrak{s}ubstack{w_j\in \mathfrak{S}_{\{c_{j-1}+1,\cdots,c_{j}\}}\\ \forall 1\leq j\leq p}}\mathfrak{p}rod_{i=1}^{p}\mathfrak{p}rod\limits_{t=c_{i-1}+1}^{c_i}N^\Lambda(dw_1\cdots w_p,\nu,t)\\
&=\mathfrak{s}um\limits_{d\in\mathcal{D}(\nu)}\mathfrak{s}um_{\mathfrak{s}ubstack{w_j\in \mathfrak{S}_{\{c_{j-1}+1,\cdots,c_{j}\}}\\ \forall 1\leq j\leq p}}\mathfrak{p}rod_{i=1}^{p}\mathfrak{p}rod\limits_{t=c_{i-1}+1}^{c_i}N^\Lambda(dw_i,\nu,t)\\
&=\mathfrak{s}um\limits_{d\in\mathcal{D}(\nu)}\mathfrak{p}rod_{i=1}^{p}\mathfrak{s}um_{w_i\in \mathfrak{S}_{\{c_{i-1}+1,\cdots,c_{i}\}}}\mathfrak{p}rod\limits_{t=c_{i-1}+1}^{c_i}(N^\Lambda(d,\nu,w_i(t))-2|\mathfrak{t}ilde{J}_{w_i}^{<t}|+2(w_i(t)-c_{i-1}-1))\\
\end{aligned}
$$
Note that the map $$\mathfrak{t}ilde{\mathfrak{g}amma}_{i}: \mathfrak{t}ilde{J}_{w_i}^{<w_i^{-1}(k)}\rightarrow \mathfrak{t}ilde{J}_{w_i^{-1}}^{<k},\,\, a\mapsto w_i(a),$$ is a well-defined bijection for $c_{i-1}+1\leq k\leq c_i$. In particular, $|\mathfrak{t}ilde{J}_{w_i}^{<w_i^{-1}(k)}|=|\mathfrak{t}ilde{J}_{w_i^{-1}}^{<k}| $ for $c_{i-1}+1\leq k\leq c_i$. Combing this equality with the bijection in Lemma \ref{bij}, we get that $$
\mathfrak{b}egin{aligned}
&\mathfrak{s}um_{w_i\in \mathfrak{S}_{\{c_{i-1}+1,\cdots,c_{i}\}}}\mathfrak{p}rod\limits_{t=c_{i-1}+1}^{c_i}(N^\Lambda(d,\nu,w_i(t))-2|\mathfrak{t}ilde{J}_{w_i}^{<t}|+2(w_i(t)-c_{i-1}-1))\\
&=\mathfrak{s}um_{w_i\in \mathfrak{S}_{\{c_{i-1}+1,\cdots,c_{i}\}}}\mathfrak{p}rod\limits_{k=c_{i-1}+1}^{c_i}(N^\Lambda(d,\nu,k)-2|\mathfrak{t}ilde{J}_{w_i}^{<w_i^{-1}(k)}|+2(k-c_{i-1}-1))\\
&=\mathfrak{s}um_{w_i\in \mathfrak{S}_{\{c_{i-1}+1,\cdots,c_{i}\}}}\mathfrak{p}rod\limits_{k=c_{i-1}+1}^{c_i}(N^\Lambda(d,\nu,k)-2|\mathfrak{t}ilde{J}_{w_i^{-1}}^{<k}|+2(k-c_{i-1}-1))\\
&=\mathfrak{s}um_{w_i\in \mathfrak{S}_{\{c_{i-1}+1,\cdots,c_{i}\}}}\mathfrak{p}rod\limits_{k=c_{i-1}+1}^{c_i}(N^\Lambda(d,\nu,k)-2|\mathfrak{t}ilde{J}_{w_i}^{<k}|+2(k-c_{i-1}-1))\\
&=\mathfrak{p}rod\limits_{k=c_{i-1}+1}^{c_i}\Bigl(N^\Lambda(d,\nu,k)+2(k-c_{i-1}-1)+N^\Lambda(d,\nu,k)-2+2(k-c_{i-1}-1)\\
&\qquad +\cdots+N^\Lambda(d,\nu,k)-2(k-c_{i-1}-1)+2(k-c_{i-1}-1)\Bigr)\\
&=\mathfrak{p}rod\limits_{k=c_{i-1}+1}^{c_i}(k-c_{i-1})(N^\Lambda(d,\nu,k)+k-c_{i-1}-1)\\
&=\mathfrak{p}rod\limits_{k=c_{i-1}+1}^{c_i}(k-c_{i-1})\mathfrak{t}ilde{N}^\Lambda(d,\nu,k)\\
&=b_i!\mathfrak{p}rod\limits_{k=c_{i-1}+1}^{c_i}\mathfrak{t}ilde{N}^\Lambda(d,\nu,k).
\end{aligned}
$$
Combining this equality with the equality obtained in the first paragraph of this proof, we prove the theorem.
\end{proof}
\mathfrak{b}egin{lem}\label{Lemma 4} Let $t\in\mathbb{Z}^{\mathfrak{g}eq 1}$ and $l\in\mathbb{Z}$. Then $$
\mathfrak{s}um_{k=0}^{t-1}[l-2k]q^{l-t}=[t](1+q^{2}+\cdots+q^{2(l-t)}). $$
\end{lem}
\mathfrak{b}egin{proof} It suffices to show that $$
\mathfrak{s}um_{k=0}^{t-1}(q^{l-2k}-q^{-(l-2k)})q^{l-t}=(q^{t}-q^{-t})(1+q^{2}+\cdots+q^{2(l-t)}).
$$
In fact, the left-hand side of the above equality is equal to $$\mathfrak{b}egin{aligned}
\mathfrak{s}um_{k=0}^{t-1}q^{2l-t}q^{-2k}-\mathfrak{s}um_{k=0}^{t-1}q^{-t}q^{2k}
&=q^{2l-t}\frac{1-q^{-2t}}{1-q^{-2}}-q^{-t}\frac{1-q^{2t}}{1-q^{2}}\\
&=\frac{q^{2l-3t+2}-q^{2l-t+2}}{1-q^{2}}-\frac{q^{-t}-q^{t}}{1-q^{2}},\end{aligned} $$
while the right-hand side of the above equality is equal to $$
(q^{t}-q^{-t})\frac{1-q^{2(l-t+1)}}{1-q^{2}}.
$$
Hence, they are equal to each other.
\end{proof}
In the rest of this subsection we consider the cyclotomic nilHecke algebra $\mathscr{H}^{0}_{\ell,n}=\RR^\Lambda(\mathfrak{b}eta)$ with $\Lambda=\ell\Lambda_0$ and $\mathfrak{b}eta=n\alpha_0$. In this case, by definition, we have $$
N^\Lambda(w,\nu,t)=\ell-2|J_w^{<t}|,\quad N^\Lambda(1,\nu,t)=\ell-2(t-1),\,\,\forall\,1\leq t\leq n .
$$
The bijection $\mathfrak{t}heta$ between $\mathfrak{S}_n$ and $\Sigma_n$ established in Lemma \ref{bij} implies that \mathfrak{b}egin{equation}\label{swap}
\mathfrak{s}um_{w\in\mathfrak{S}_n}\mathfrak{p}rod_{t=1}^{n}[N^\Lambda(w,\nu,t)]_{\nu_t}=\mathfrak{s}um_{w\in\mathfrak{S}_n}\mathfrak{p}rod_{t=1}^{n}[\ell-2|J_w^{<t}|]=\mathfrak{p}rod_{t=1}^{n}\mathfrak{s}um_{k=0}^{t-1}[\ell-2k].
\end{equation}
Combining the above results with Theorem $\ref{mainthmA}$, we derive the following graded dimension formula for the cyclotomic nilHecke algebra $\mathscr{H}^{(0)}_{\ell,n}$.
\mathfrak{b}egin{cor}\label{maincor2a} Let $\Lambda:=\ell\Lambda_0, \mathfrak{b}eta=n\alpha_0$. We have
$$
\dim_{q}\,\mathscr{H}^{(0)}_{\ell,n}=\Bigl(\mathfrak{p}rod_{k=1}^{n}\frac{q^{-2k}-1}{q^{-2}-1}\Bigr)\Bigl(\mathfrak{p}rod_{t=1}^{n}(1+q^{2}+\cdots+q^{2(\ell-t)})\Bigr). $$
\end{cor}
\mathfrak{b}egin{proof} Applying Theorem \ref{mainthmA} in our special case $\Lambda:=\ell\Lambda_0, \mathfrak{b}eta=n\alpha_0$, we can get that $$\mathfrak{b}egin{aligned}
&\quad\,\dim_{q}\,\mathscr{H}^{(0)}_{\ell,n}=\mathfrak{s}um_{w\in\mathfrak{S}_n}\mathfrak{p}rod_{t=1}^{n}\mathfrak{b}igl([\ell-2|J_w^{<t}|]q^{\ell-2t+1}\mathfrak{b}igr)\\
&=q^{n(\ell-n)}\mathfrak{s}um_{w\in\mathfrak{S}_n}\mathfrak{p}rod_{t=1}^{n}[\ell-2|J_w^{<t}|]\\
&=q^{n(\ell-n)}\mathfrak{p}rod_{t=1}^{n}\mathfrak{s}um_{k=0}^{t-1}[\ell-2k]\qquad\qquad\qquad\mathfrak{t}ext{(by (\ref{swap}))}\\
&=q^{-n(\ell-n)/2}\mathfrak{p}rod_{t=1}^{n}\mathfrak{s}um_{k=0}^{t-1}\Bigl([\ell-2k]q^{\ell-t}\Bigr)\\
&=q^{-n(n-1)/2}\mathfrak{p}rod_{t=1}^{n}\frac{(q^{t}-q^{-t})(1+q^{2}+\cdots+q^{2(\ell-t)})}{q-q^{-1}}\qquad\qquad \mathfrak{t}ext{(by Lemma \ref{Lemma 4})}\\
&=\Bigl(\mathfrak{p}rod_{k=1}^{n}\frac{q^{-2k}-1}{q^{-2}-1}\Bigr)\Bigl(\mathfrak{p}rod_{t=1}^{n}(1+q^{2}+\cdots+q^{2(\ell-t)})\Bigr).
\end{aligned}$$
This completes the proof of the corollary.
\end{proof}
Note that the above graded dimension formula for $\mathscr{H}^{(0)}_{\ell,n}$ also follows from \cite[Theorem 2.34]{HuL}. The polynomial $\mathfrak{p}rod_{k=1}^{n}\frac{q^{k}-1}{q-1}=\mathfrak{s}um_{w\in\mathfrak{S}_n}q^{\ell(w)}$ is the Poincare polynomial for the Iwahori-Hecke algebra $\HH_q(\mathfrak{S}_n)$ associated to the symmetric group $\mathfrak{S}_n$. Specializing $q$ to $1$, we obtain the following well-known dimension formula for the (ungraded) cyclotomic nilHecke algebra $\mathscr{H}^{(0)}_{\ell,n}$.
\mathfrak{b}egin{cor}\label{maincor2b}
$dim\,\mathscr{H}^{(0)}_{\ell,n}=n!\mathfrak{p}rod\limits_{j=0}^{n-1}(\ell-j).$
\end{cor}
\mathfrak{s}ubsection{Criteria for $e(\nu)\neq 0$ in $\RR^\Lambda(\mathfrak{b}eta)$}
In this subsection, we shall give some criteria for $e(\nu)\neq 0$ in $\RR^\Lambda(\mathfrak{b}eta)$. In particular, we shall give a proof of Theorems \ref{mainthmB} here.
In the special cases of types $A_\ell^{(1)}$ and $A_\infty$, it was shown in \cite[Lemma 4.1]{HM} that $e(\nu)\neq 0$ in $\RR^\Lambda(\mathfrak{b}eta)$ if and only if $\nu=(\nu_1,\cdots,\nu_n)$ is the residue sequence of a standard tableau in the subset $\mathscr{P}_\mathfrak{b}eta^\Lambda$ of multi-partitions of $n$ determined by $\mathfrak{b}eta$. Similar criteria in the cases of types $C_\ell^{(1)}$ and $C_\infty$ can be obtained from \cite[Theorem 2.5]{APS}. These are not effective criteria in the sense that one has to check many standard tableaux in $\mathscr{P}_\mathfrak{b}eta^\Lambda$. Our second main result Theorem \ref{mainthmB} of this paper solves the problems on determining when the KLR idempotent $e(\nu)\neq 0$ in $\RR^\Lambda(\mathfrak{b}eta)$ for {\it arbitrary} symmetrizable Cartan matrix.
\noindent
{\mathfrak{t}extbf{Proof of Theorem \ref{mainthmB}}}: Let $\Lambda\in P^+$, $\mathfrak{b}eta\in Q^+$ and $\nu=(\nu_1,\cdots,\nu_n)\in I^\mathfrak{b}eta$. It is clear that $e(\nu)\neq 0$ in $\RR^\Lambda(\mathfrak{b}eta)$ if and only if $e(\nu)\RR^{\Lambdabda}(\mathfrak{b}eta)e(\nu)\neq 0$. Thus Theorem \ref{mainthmB} follows from Corollary \ref{maincor1}.
\qed
Using our second version of the dimension formula for $e(\nu)\RR^{\Lambdabda}(\mathfrak{b}eta)e(\nu)$ given in Theorem \ref{mainthm1b}, we also obtain in Theorem \ref{mainthmB2} a second simplified (or divided power) version of the criterion for the KLR idempotent $e(\nu)$ to be nonzero in $\RR^\Lambda(\mathfrak{b}eta)$.
As in the beginning of last subsection, we can always write\mathfrak{b}egin{equation}
\nu=(\nu_1,\cdots,\nu_n)=(\mathfrak{u}nderbrace{\nu^{1},\nu^{1},\cdots,\nu^{1}}_{b_{1}\,copies},\cdots,\mathfrak{u}nderbrace{\nu^{p},\nu^{p},\cdots,\nu^{p}}_{b_{p}\,copies}),
\end{equation}
where $p\in\mathbb{N}$, $b_1,\cdots,b_p\in\mathbb{N}$ with $\mathfrak{s}um_{i=1}^{p}b_i=n$ and $\nu^{j}\neq\nu^{j+1}$ for any $1\leq j<p$. Let $\widehat{i}detilde{N}^\Lambda(d,\nu,t)$ be the integer as defined in (\ref{Ntilde}) and $\mathcal{D}(\nu)$ be defined as before.
\mathfrak{b}egin{thm}\label{mainthmB2} Let $\Lambda\in P^+$, $\mathfrak{b}eta\in Q^+$ and $\nu=(\nu_1,\cdots,\nu_n)\in I^\mathfrak{b}eta$. Then $e(\nu)\neq 0$ in $\RR^\Lambda(\mathfrak{b}eta)$ if and only if $$
\mathfrak{s}um_{d\in \mathcal{D}(\nu)}\mathfrak{p}rod_{t=1}^{n}\widehat{i}detilde{N}^\Lambda(d,\nu,t)\neq 0 .
$$
\end{thm}
\mathfrak{b}egin{proof} The proof is the same as the proof of Theorem \ref{mainthmB} by using Theorem \ref{mainthm1b}.
\end{proof}
To sum all, we have given two criteria for $e(\nu)\neq 0$ in $\RR^\Lambda(\mathfrak{b}eta)$ in this subsection. A third criterion (Corollary \ref{maincorC3}) for $e(\nu)\neq 0$ in $\RR^\Lambda(\mathfrak{b}eta)$ will be given at the end of the next section.
\mathfrak{b}igskip
\mathfrak{s}ection{Level reduction for dimension formulae}
In this section we shall give a second application---level reduction for our dimension formula, which reveals some surprising connections between the dimension of the higher level cyclotomic quiver Hecke algebras with a sum of some products of the dimensions of some lower level cyclotomic quiver Hecke algebras. In particular, we shall give the proof of the fourth main result Theorem \ref{mainthmC} of this paper.
For any $\nu=(\nu_1,\cdots,\nu_n)\in I^n$, we define \mathfrak{b}egin{equation}\label{betanu}
\mathfrak{b}eta_{\nu}:=\mathfrak{s}um_{i=1}^{n}\alpha_{\nu_i},\quad |\nu|:=n. \end{equation}
Let $\mathcal{D}_{(k,n-k)}$ be the set of minimal length left coset representatives of $\mathfrak{S}_{(k,n-k)}$ in $\mathfrak{S}_n$. We define $D^2(n)$ to be the set of all $(k,n-k)$-shuffles of $(1,2,\cdots,n)$ for $k=0,1,\cdots,n$. That is, $$ D^2(n)=\Bigl\{\mathfrak{b}igl((w(1),\cdots,w(k)),(w(k+1),\cdots,w(n))\mathfrak{b}igr)\Bigm|
\mathfrak{b}egin{matrix}\mathfrak{t}ext{$w\in\mathcal{D}_{(k,n-k)}$,}\\ \mathfrak{t}ext{$k=0,1,\cdots,n$}\end{matrix}\Bigr\}.
$$
In particular, we always have $|D^2(n)|=2^n$.
\mathfrak{b}egin{dfn}\label{ssize} Let $\nu=(\nu_1,\cdots,\nu_n)\in I^n$. For any $k$-tuple ${\mathfrak{b}f s}=(s_1,s_2,\cdots,s_k)$ of integers with $1\leq s_1<\cdots<s_k\leq n$, we define $$
|{\mathfrak{b}f s}|:=k,\quad \nu_{\mathfrak{b}f s}:=(\nu_{s_1},\cdots,\nu_{s_k}). $$
For any $\mu\in I^n$, we define $$
D^2(\nu,\mu):=\mathfrak{b}igl\{\mathfrak{b}ig(({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2),({\mathfrak{b}f t}^1,{\mathfrak{b}f t}^2)\mathfrak{b}ig)\in D^2(n)\mathfrak{t}imes D^2(n)\mathfrak{b}igm|\mathfrak{b}eta_{\nu_{{\mathfrak{b}f s}^i}}=\mathfrak{b}eta_{\mu_{{\mathfrak{b}f t}^i}},\,\,i=1,2\mathfrak{b}igr\}.
$$
\end{dfn}
Let $\mathfrak{b}ig(({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2),({\mathfrak{b}f t}^1,{\mathfrak{b}f t}^2)\mathfrak{b}ig)\in D^2(\nu,\mu)$. By construction, each $w_1\mathfrak{t}imes w_2\in \mathfrak{S}(\nu_{{\mathfrak{b}f s}^1},
\mu_{{\mathfrak{b}f t}^1})\mathfrak{t}imes \mathfrak{S}(\nu_{{\mathfrak{b}f s}^2},\mu_{{\mathfrak{b}f t}^2})$
can determine a unique element $w\in \mathfrak{S}(\nu,\mu)$. Hence, we can get a canonical map:$$\mathfrak{t}au:\,\mathfrak{b}igsqcup_{\mathfrak{s}ubstack{\mathfrak{b}ig(({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2),({\mathfrak{b}f t}^1,{\mathfrak{b}f t}^2)\mathfrak{b}ig)\in D^2(\nu,\mu)}}\Bigl(\mathfrak{S}(\nu_{{\mathfrak{b}f s}^1},\mu_{{\mathfrak{b}f t}^1})\mathfrak{t}imes \mathfrak{S}(\nu_{{\mathfrak{b}f s}^2},\mu_{{\mathfrak{b}f t}^2})\Bigr)\rightarrow\,\mathfrak{S}(\nu,\mu).
$$
We can visualize any $w\in\mathfrak{S}(\nu,\mu)$ as a planar diagram as follows: the diagram has two rows of vertices, each of them are labelled by $1,2,\cdots,n$, and there is an edge connecting the vertex $i$ in the top row with the vertex $j$ in the bottom row if and only if $w(i)=j$ and $\nu_{i}=\mu_j$.
For ${\mathfrak{b}f s}^1=(s_1,\cdots,s_k), {\mathfrak{b}f t}^1=(t_1,\cdots,t_k)$ with $1\leq s_1<s_2<\cdots<s_k\leq n, 1\leq t_1<t_2<\cdots<t_k\leq n$,
any $w_1\in\mathfrak{S}(\nu_{{\mathfrak{b}f s}^1},\mu_{{\mathfrak{b}f t}^1})$ can be identified as a planar diagram as follows: the diagram has two rows of vertices, the top row vertices are labelled by $s_1,s_2,\cdots,s_k$, the bottom row vertices are labelled by $t_1,t_2,\cdots,t_k$, and there is an edge connecting the vertex $s_i$ in the top row with the vertex $t_j$ in the bottom row if and only if $w_1(i)=j$ and $\nu_{s_{i}}=\mu_{t_j}$. Similarly, we have the planar diagram for $({\mathfrak{b}f s}^2,{\mathfrak{b}f t}^2)$. Then the map $\mathfrak{t}au$ is the native way to incorporate the two planar diagrams associated to $({\mathfrak{b}f s}^1,{\mathfrak{b}f t}^1),({\mathfrak{b}f s}^2,{\mathfrak{b}f t}^2)$ to a new diagram without breaking any edges in the diagram.
\mathfrak{b}egin{lem}\label{surjective}\mathfrak{b}egin{enumerate}
\item Let $\mu,\nu\in I^n$ and $w\in\mathfrak{S}(\nu,\mu)$. Then for each ${\mathfrak{b}f s}:=({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)$, there exists a unique $w_1\in\mathfrak{S}_{|{\mathfrak{b}f s}^1|}$, a unique $w_2\in\mathfrak{S}_{|{\mathfrak{b}f s}^2|}$ and a unique
$({\mathfrak{b}f t}^1,{\mathfrak{b}f t}^2)\in D^2(n)$, such that $w_1\mathfrak{t}imes w_2\in \mathfrak{S}(\nu_{{\mathfrak{b}f s}^1},\nu_{{\mathfrak{b}f t}^1})\mathfrak{t}imes \mathfrak{S}(\nu_{{\mathfrak{b}f s}^2},\nu_{{\mathfrak{b}f t}^2})$ and $\mathfrak{t}au(w_1\mathfrak{t}imes w_2)=w$. In particular, $\mathfrak{t}au$ is surjective;
\item For each $w\in\mathfrak{S}(\nu,\mu)$, the cardinality of $\mathfrak{t}au^{-1}(w)$ is $2^n$.
\end{enumerate}
\end{lem}
\mathfrak{b}egin{proof} Let $w\in\mathfrak{S}(\nu,\mu)$ and ${\mathfrak{b}f s}:=({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)$, where ${\mathfrak{b}f s}^1=(i_1,\cdots,i_a)$,
${\mathfrak{b}f s}^2=(\mathfrak{h}at{i}_1,\cdots,\mathfrak{h}at{i}_{n-a})$, $1\leq i_1<\cdots<i_a\leq n$, $1\leq\mathfrak{h}at{i}_1<\cdots<\mathfrak{h}at{i}_{n-a}\leq n$. Then ${\mathfrak{b}f t}^1=(j_1,\cdots,j_a)$ is the unique rearrangement of $(w(i_1),\cdots,w(i_a))$ such that $1\leq j_1<\cdots<j_a\leq n$, while ${\mathfrak{b}f t}^2=(\mathfrak{h}at{j}_1,\cdots,\mathfrak{h}at{j}_{n-a})$ is the unique rearrangement of $(w(\mathfrak{h}at{i}_1),\cdots,w(\mathfrak{h}at{i}_{n-a}))$ such that $1\leq \mathfrak{h}at{j}_1<\cdots<\mathfrak{h}at{j}_{n-a}\leq n$. We set $w_1\in\mathfrak{S}_{a}$ to be the unique element such that $j_t=w(i_{w_1^{-1}(t)})$ for any $1\leq t\leq a$, while $w_2\in\mathfrak{S}_{n-a}$ is the unique element such that
$\mathfrak{h}at{j}_t=w(\mathfrak{h}at{i}_{w_2^{-1}(t)})$ for any $1\leq t\leq n-a$. This proves 1). Now 2) follows from 1) and the fact that $|D^2(n)|=2^n$.
\end{proof}
\mathfrak{b}egin{dfn} Let $\mu,\nu\in I^n$ and $w\in\mathfrak{S}(\nu,\mu)$. For $i\in\{1,2\}$, we define $w_{{\mathfrak{b}f s},i}\in\mathfrak{S}_{|{\mathfrak{b}f s}^i|}$ to be the unique element $w_i$ determined by $w$ and ${\mathfrak{b}f s}=({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)$ which was introduced in Lemma \ref{surjective}.
\end{dfn}
\mathfrak{b}egin{thm}\label{mainthmC1}
Let $\mu,\nu\in I^n$. Suppose $\Lambda=\Lambda^1+\Lambda^2$, where $\Lambda^1,\Lambda^2\in P^+$. Then
$$\mathfrak{b}egin{aligned}\dim e(\nu)\RR^\Lambda(\mathfrak{b}eta) e(\mu)&=\mathfrak{s}um_{\mathfrak{s}ubstack{\mathfrak{b}ig(({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2),({\mathfrak{b}f t}^1,{\mathfrak{b}f t}^2)\mathfrak{b}ig)\in D^2(\nu,\mu)}} \dim e(\nu_{{\mathfrak{b}f s}^1})\RR^{\Lambda^1}(\mathfrak{b}eta_{\nu_{{\mathfrak{b}f s}^1}})e(\mu_{{\mathfrak{b}f t}^1})\\
&\qquad\qquad\qquad \mathfrak{t}imes\dim e(\nu_{{\mathfrak{b}f s}^2})\RR^{\Lambda^2}(\mathfrak{b}eta_{\nu_{{\mathfrak{b}f s}^2}})e(\mu_{{\mathfrak{b}f t}^2}). \end{aligned}$$
\end{thm}
\mathfrak{b}egin{proof}
By dimension formula in Corollary \ref{maincor1} and Lemma \ref{surjective}, we have:$$
\mathfrak{b}egin{aligned}{\mathfrak{t}ext{RHS}}=&\mathfrak{s}um_{\mathfrak{s}ubstack{\mathfrak{b}ig(({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2),({\mathfrak{b}f t}^1,{\mathfrak{b}f t}^2)\mathfrak{b}ig)\in D^2(\nu,\mu)}} \mathfrak{s}um_{\mathfrak{s}ubstack{w_1\in \mathfrak{S}(\nu_{{\mathfrak{b}f s}^1},\mu_{{\mathfrak{b}f t}^1})\\w_2\in\mathfrak{S}(\nu_{{\mathfrak{b}f s}^2},\mu_{{\mathfrak{b}f t}^2})}}\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2|}}N^{\Lambda^1}(w_1,\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_2,\nu_{{\mathfrak{b}f s}^2},k_2)\\
=&\mathfrak{s}um_{w\in \mathfrak{S}(\nu,\mu)}\mathfrak{s}um_{({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)}\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2).
\end{aligned}$$
To prove the theorem, it suffices to show for each $w\in \mathfrak{S}(\nu,\mu)$, \mathfrak{b}egin{equation}\label{claimrd11}\mathfrak{s}um_{({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)}\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2)=\mathfrak{p}rod_{t=1}^{n}N^{\Lambda}(w,\nu,t).
\end{equation}
To see this, we consider the following map: $$f_n: D^2(n)\rightarrow D^2(n)$$
\[({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\mapsto
\mathfrak{b}egin{cases}
({\mathfrak{b}f s}^1\mathfrak{s}etminus\{n\},{\mathfrak{b}f s}^2\cup\{n\}),& \mathfrak{t}ext{if $n\in {\mathfrak{b}f s}^1$;}\\
({\mathfrak{b}f s}^1\cup\{n\},{\mathfrak{b}f s}^2\mathfrak{s}etminus\{n\}),& \mathfrak{t}ext{if $n\in {\mathfrak{b}f s}^2$,}
\end{cases}\]
where ${\mathfrak{b}f s}^i\mathfrak{s}etminus\{n\}$ means that we remove the integer $n$ from ${\mathfrak{b}f s}^i$ and ${\mathfrak{b}f s}^i\cup\{n\}$ means we add the integer $n$ to the end of ${\mathfrak{b}f s}^i$. It's easy to see $f_n$ is a well-defined involution. For any $({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)$, we set $({\mathfrak{b}f \widehat{i}detilde{s}}^1,{\mathfrak{b}f \widehat{i}detilde{s}}^2):=f_n({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)$. Note that if $n\in {\mathfrak{b}f s}^i$ then $n$ must sit at the end of ${\mathfrak{b}f s}^i$. Clearly, by the discussion in the paragraph above Lemma \ref{surjective} and Definition \ref{keydfn1}, $$\mathfrak{b}egin{aligned}
&\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1\mathfrak{s}etminus\{n\}|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2\mathfrak{s}etminus\{n\}|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2)\\
&\qquad =\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f \widehat{i}detilde{s}}^1\mathfrak{s}etminus\{n\}|,\\k_2=1,\cdots,|{\mathfrak{b}f \widehat{i}detilde{s}}^2\mathfrak{s}etminus\{n\}|}}N^{\Lambda^1}(w_{{\mathfrak{b}f\widehat{i}detilde{s}},1},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f\widehat{i}detilde{s}},2},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^2},k_2) .
\end{aligned}
$$
If $n\in {\mathfrak{b}f s}^1$, then $$
\mathfrak{b}egin{aligned}
&\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2)\\
&\qquad\qquad+\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f \widehat{i}detilde{s}}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f \widehat{i}detilde{s}}^2|}}N^{\Lambda^1}(w_{{\mathfrak{b}f\widehat{i}detilde{s}},1},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f\widehat{i}detilde{s}},2},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^2},k_2)\\
=&N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},|{\mathfrak{b}f s}^1|)\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1\mathfrak{s}etminus\{n\}|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2\mathfrak{s}etminus\{n\}|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2)\\
&\qquad\qquad +N^{\Lambda^2}(w_{{\mathfrak{b}f\widehat{i}detilde{s}},2},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^2},|{\mathfrak{b}f \widehat{i}detilde{s}}^2|)\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f \widehat{i}detilde{s}}^1\mathfrak{s}etminus\{n\}|,\\k_2=1,\cdots,|{\mathfrak{b}f \widehat{i}detilde{s}}^2\mathfrak{s}etminus\{n\}|}}N^{\Lambda^1}(w_{{\mathfrak{b}f\widehat{i}detilde{s}},1},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f\widehat{i}detilde{s}},2},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^2},k_2).
\end{aligned}$$
By assumption, $\mathfrak{t}au(w_{{\mathfrak{b}f s},1}\mathfrak{t}imes w_{{\mathfrak{b}f s},2})=w=\mathfrak{t}au(w_{{\mathfrak{b}f\widehat{i}detilde{s}},1}\mathfrak{t}imes w_{{\mathfrak{b}f\widehat{i}detilde{s}},2})$ and $n\in {\mathfrak{b}f s}^1\cap {\mathfrak{b}f \widehat{i}detilde{s}}^2$. To simplify the notations, we set $$\mathfrak{b}egin{aligned}
& a:=|\nu_{{\mathfrak{b}f s}^1}|,\,\,\, {\mathfrak{b}f s}^1=(i_1,\cdots,i_{a-1},n),\,\,\, {\mathfrak{b}f s}^2=(\mathfrak{h}at{i}_1,\cdots,\mathfrak{h}at{i}_{n-a}),\,\,\mu=w\nu=(\mu_1,\cdots,\mu_n),\\
& w_{{\mathfrak{b}f s},1}\nu_{{\mathfrak{b}f s}^1}=(\mu_{j_1},\cdots,\mu_{j_a}),\,\, w_{{\mathfrak{b}f s},2}\nu_{{\mathfrak{b}f s}^2}=(\mu_{\mathfrak{h}at{j}_1},\cdots,\mu_{\mathfrak{h}at{j}_{n-a}}),
\end{aligned}
$$
where $((j_1,\cdots,j_a),(\mathfrak{h}at{j}_1,\cdots,\mathfrak{h}at{j}_{n-a}))$ is an $(a,n-a)$-shuffle of $(1,2,\cdots,n)$.
Then $$\mathfrak{b}egin{aligned}
&\widehat{i}detilde{\mathfrak{b}f s}^1=(i_1,\cdots,i_{a-1}),\,\,\, \widehat{i}detilde{\mathfrak{b}f s}^2=(\mathfrak{h}at{i}_1,\cdots,\mathfrak{h}at{i}_{n-a},n),\\
& w_{\widehat{i}detilde{\mathfrak{b}f s},1}\nu_{\widehat{i}detilde{\mathfrak{b}f s}^1}=(\mu_{j_1},\cdots,\mu_{j_{a-1}}),\,\, w_{\widehat{i}detilde{\mathfrak{b}f s},2}\nu_{\widehat{i}detilde{\mathfrak{b}f s}^2}=(\mu_{\mathfrak{h}at{j}_1},\cdots,\mu_{\mathfrak{h}at{j}_{k}},\mu_{j_a},\mu_{\mathfrak{h}at{j}_{k+1}},\cdots,\mu_{\mathfrak{h}at{j}_{n-a}}), \end{aligned}
$$
where $1\leq k\leq n-a$ is such that $\mathfrak{h}at{j}_1<\cdots<\mathfrak{h}at{j}_k<j_a<\mathfrak{h}at{j}_{k+1}<\cdots<\mathfrak{h}at{j}_{n-a}$.
Given $1\leq k\leq n$ with $w(k)<w(n)$, we have either $k=i_t$ for some $1\leq t<a$, or $k=\mathfrak{h}at{i}_l$ for some $1\leq l\leq n-a$. In the former case, $w(i_t)=j_{w_{{\mathfrak{b}f s},1}(t)}$, $w(n)=j_{w_{{\mathfrak{b}f s},1}(a)}$, and thus $w(i_t)<w(n)$ implies that
$w_{{\mathfrak{b}f s},1}(t)<w_{{\mathfrak{b}f s},1}(a)$; in the latter case, $w(\mathfrak{h}at{i}_l)=\mathfrak{h}at{j}_{w_{\widehat{i}detilde{\mathfrak{b}f s},2}(l)}$, and thus $w(\mathfrak{h}at{i}_l)<w(n)$ implies that
$w_{\widehat{i}detilde{\mathfrak{b}f s},2}(l)<j_a=w_{\widehat{i}detilde{\mathfrak{b}f s},2}(n-a+1)$. As a result, we see from Definition \ref{keydfn1} that $$N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},|{\mathfrak{b}f s}^1|)+N^{\Lambda^2}(w_{{\mathfrak{b}f\widehat{i}detilde{s}},2},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^2},|{\mathfrak{b}f \widehat{i}detilde{s}}^2|)=N^{\Lambda}(w,\nu,n).$$
We get that $$\mathfrak{b}egin{aligned}
&\quad \mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2)\\
&\qquad\qquad +\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f \widehat{i}detilde{s}}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f \widehat{i}detilde{s}}^2|}}N^{\Lambda^1}(w_{\widehat{i}detilde{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^1},k_1)N^{\Lambda^2}(w_{\widehat{i}detilde{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^2},k_2)\\
&=N^{\Lambda}(w,\nu,n)\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1\mathfrak{s}etminus\{n\}|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2\mathfrak{s}etminus\{n\}|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2)
\end{aligned}
$$
If $n\in {\mathfrak{b}f s}^2$, then we can compute in a similar way and deduce the same equality as above.
Since $f_n$ is an involution, we get that $$\mathfrak{b}egin{aligned}
&\quad\,\mathfrak{s}um_{({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)}\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2)\\
&=\frac{1}{2}\mathfrak{s}um_{({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)}\Big(\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2)+\\
&\qquad\qquad \mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f \widehat{i}detilde{s}}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f \widehat{i}detilde{s}}^2|}}N^{\Lambda^1}(w_{\widehat{i}detilde{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^1},k_1)N^{\Lambda^2}(w_{\widehat{i}detilde{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f \widehat{i}detilde{s}}^2},k_2)\Big)\\
&=\frac{1}{2}N^{\Lambda}(w,\nu,n)\mathfrak{s}um_{({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)}\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1\mathfrak{s}etminus\{n\}|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2\mathfrak{s}etminus\{n\}|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2).
\end{aligned}
$$
Similarly, we can define $$f_{n-1}: D^2(n)\rightarrow D^2(n)$$
\[({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\mapsto
\mathfrak{b}egin{cases}
({\mathfrak{b}f s}^1\mathfrak{s}etminus\{n-1\},{\mathfrak{b}f s}^2\cup\{n-1\}), &\mathfrak{t}ext{if $n-1\in {\mathfrak{b}f s}^1$;}\\
({\mathfrak{b}f s}^1\cup\{n-1\},{\mathfrak{b}f s}^2\mathfrak{s}etminus\{n-1\}), &\mathfrak{t}ext{if $n-1\in {\mathfrak{b}f s}^2$,}
\end{cases}\]
where ${\mathfrak{b}f s}^i\mathfrak{s}etminus\{n-1\}$ means we remove the integer $n-1$ from ${\mathfrak{b}f s}^i$, and ${\mathfrak{b}f s}^i\cup\{n-1\}$ means we inset the integer $n-1$ into ${\mathfrak{b}f s}^i$ such that it is again in increasing order. We define $({\mathfrak{b}f \mathfrak{h}at{s}}^1,{\mathfrak{b}f \mathfrak{h}at{s}}^2):=f_{n-1}({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)$.
It's easy to see $f_{n-1}$ is a well-defined bijection. Using the same argument as in the second last paragraph and the definition of $N^{\Lambda}(w,\nu,n-1)$, we can deduce that $$\mathfrak{b}egin{aligned}
&\quad \mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1\mathfrak{s}etminus\{n\}|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2\mathfrak{s}etminus\{n\}|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2)+\\
&\qquad\qquad\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f\mathfrak{h}at{s}}^1\mathfrak{s}etminus\{n\}|,\\k_2=1,\cdots,|{\mathfrak{b}f\mathfrak{h}at{s}}^2\mathfrak{s}etminus\{n\}|}}N^{\Lambda^1}(w_{\mathfrak{h}at{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f \mathfrak{h}at{s}}^1},k_1)N^{\Lambda^2}(w_{\mathfrak{h}at{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f \mathfrak{h}at{s}}^2},k_2)\\
&=N^{\Lambda}(w,\nu,n-1)\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1\mathfrak{s}etminus\{n-1,\,n\}|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2\mathfrak{s}etminus\{n-1,
,n\}|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2).
\end{aligned}
$$
Hence, we have:$$\mathfrak{b}egin{aligned}
&\quad\,\mathfrak{s}um_{({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)}\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2)\\
&=\frac{1}{2^2}N^{\Lambda}(w,\nu,n-1)N^{\Lambda}(w,\nu,n)\mathfrak{t}imes\\
&\qquad \mathfrak{s}um_{({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)}\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1\mathfrak{s}etminus\{n-1,n\}|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2\mathfrak{s}etminus\{n-1,n\}|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2).
\end{aligned}
$$
Repeating the above argument with $n-1$ replaced by $n-2,n-3,\cdots,1$ and remember $|D^2(n)|=2^n$, we can get that
$$\mathfrak{b}egin{aligned}
&\quad\,\mathfrak{s}um_{({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)}\mathfrak{p}rod_{\mathfrak{s}ubstack{k_1=1,\cdots,|{\mathfrak{b}f s}^1|,\\k_2=1,\cdots,|{\mathfrak{b}f s}^2|}}N^{\Lambda^1}(w_{{\mathfrak{b}f s},1},\nu_{{\mathfrak{b}f s}^1},k_1)N^{\Lambda^2}(w_{{\mathfrak{b}f s},2},\nu_{{\mathfrak{b}f s}^2},k_2)\\
&=\frac{1}{2^n}N^{\Lambda}(w,\nu,1)\cdots N^{\Lambda}(w,\nu,n)\mathfrak{s}um_{({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)}1\\
&=N^{\Lambda}(w,\nu,1)\cdots N^{\Lambda}(w,\nu,n),
\end{aligned}
$$ which completes the proof of our claim (\ref{claimrd11}).
\end{proof}
Recall that for each $\mathfrak{b}eta=\mathfrak{s}um_{i\in I}k_i\alpha_i\in Q^+$, $|\mathfrak{b}eta|=\mathfrak{s}um_{i\in I}k_i$.
\mathfrak{b}egin{cor}\label{level2cor} Let $\mu\in I^\mathfrak{b}eta$, $\Lambda=\Lambda^1+\Lambda^2$ with $\Lambda^1,\Lambda^2\in P^+$. Then
$$\mathfrak{b}egin{aligned}
\dim\RR^\Lambda(\mathfrak{b}eta) e(\mu)=&\mathfrak{s}um_{({\mathfrak{b}f t}^1,{\mathfrak{b}f t}^2)\in D^2(n)}\mathfrak{b}egin{pmatrix}|\mathfrak{b}eta|\\ |{\mathfrak{b}f t}^1|\end{pmatrix} \dim\RR^{\Lambda^1}(\mathfrak{b}eta_{\mu_{{\mathfrak{b}f t}^1}})e(\mu_{{\mathfrak{b}f t}^1})\mathfrak{t}imes
\dim\RR^{\Lambda^2}(\mathfrak{b}eta_{\mu_{{\mathfrak{b}f t}^2}})e(\mu_{{\mathfrak{b}f t}^2}),\\
\dim\RR^\Lambda(\mathfrak{b}eta)=&\mathfrak{s}um_{\mathfrak{s}ubstack{\mathfrak{b}eta_1,\mathfrak{b}eta_2\in Q^+\\ \mathfrak{b}eta=\mathfrak{b}eta_1+\mathfrak{b}eta_2}}\Bigl(\mathfrak{b}egin{matrix}|\mathfrak{b}eta|\\ |\mathfrak{b}eta_1|\end{matrix}\Bigr)^2\dim\RR^{\Lambda^1}(\mathfrak{b}eta_1)\mathfrak{t}imes\dim\RR^{\Lambda^2}(\mathfrak{b}eta_2).
\end{aligned}$$
\end{cor}
\mathfrak{b}egin{proof} Applying Theorem \ref{mainthmC1}, we can get that $$\mathfrak{b}egin{aligned}
\dim\RR^\Lambda(\mathfrak{b}eta)e(\mu)&=\mathfrak{s}um_{\nu\in I^{\mathfrak{b}eta}}\mathfrak{s}um_{\mathfrak{s}ubstack{\mathfrak{b}ig(({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2),({\mathfrak{b}f t}^1,{\mathfrak{b}f t}^2)\mathfrak{b}ig)\in D^2(\nu,\mu)}}\dim e(\nu_{{\mathfrak{b}f s}^1})\RR^{\Lambda^1}(\mathfrak{b}eta_{\nu_{{\mathfrak{b}f s}^1}})e(\mu_{{\mathfrak{b}f t}^1})\\
&\qquad\qquad\qquad \mathfrak{t}imes\dim e(\nu_{{\mathfrak{b}f s}^2})\RR^{\Lambda^2}(\mathfrak{b}eta_{\nu_{{\mathfrak{b}f s}^2}})e(\mu_{{\mathfrak{b}f t}^2}).
\end{aligned}
$$
Note that the for any $\mathfrak{b}i\in I^{\mathfrak{b}eta_{\mu_{{\mathfrak{b}f t}^1}}}, \mathfrak{b}j\in I^{\mathfrak{b}eta_{\mu_{{\mathfrak{b}f t}^2}}}$, the number of triples $(\nu,{\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)$ such that
$({\mathfrak{b}f s}^1,{\mathfrak{b}f s}^2)\in D^2(n)$, $\nu\in I^\mathfrak{b}eta$, $\nu_{{\mathfrak{b}f s}^1}=\mathfrak{b}i$ and $\nu_{{\mathfrak{b}f s}^2}=\mathfrak{b}j$, is exactly $\mathfrak{b}egin{pmatrix}|\mathfrak{b}eta|\\ |{\mathfrak{b}f s}^1|\end{pmatrix}=\mathfrak{b}egin{pmatrix}|\mathfrak{b}eta|\\ |{\mathfrak{b}f t}^1|\end{pmatrix}$. Hence we get the first equation. The proof of the second equation is similar.
\end{proof}
Generalizing a little further, we call an $l$-tuple $\mathfrak{u}nderline{k}=(k_1,\cdots,k_l)$ of non-negative integers a composition of $n$ with length $l$ if $k_1+\cdots+k_l=n$. We denote by $\mathcal{CP}_n^l$ the set of composition of $n$ with length $l$. For any $\mathfrak{u}nderline{k}\in \mathcal{CP}_n^l$, we define $D^{\mathfrak{u}nderline{k}}(n)$ to be the set of $\mathfrak{u}nderline{k}=(k_1,\cdots,k_l)$-shuffles $({\mathfrak{b}f s}^1,\cdots,{\mathfrak{b}f s}^l)$ of $(1,2,\cdots,n)$. In particular, ${\mathfrak{b}f s}^j$ is a strictly increasing sequence of $k_j$ integers for each $1\leq j\leq l$. Again, we allow some ${\mathfrak{b}f s}^i$ to be empty. Now we define $$D^l(n)=:\mathfrak{b}igsqcup_{\mathfrak{s}ubstack{\mathfrak{u}nderline{k}\in \mathcal{CP}_n^l}}D^{\mathfrak{u}nderline{k}}(n).$$ For any $\mu,\nu\in I^\mathfrak{b}eta$, we define $$
D^{l}(\nu,\mu):=\{\mathfrak{b}ig(({\mathfrak{b}f s}^1,\cdots,{\mathfrak{b}f s}^l),({\mathfrak{b}f t}^1,\cdots,{\mathfrak{b}f t}^l)\mathfrak{b}ig)\in D^{l}(n)\mathfrak{t}imes D^l(n)|\mathfrak{b}eta_{\nu_{{\mathfrak{b}f s}^i}}=\mathfrak{b}eta_{\mu_{{\mathfrak{b}f t}^i}},\,\,i=1,\cdots,l\}.$$
\mathfrak{b}egin{cor}\label{genaralizetion}
Suppose $\Lambda=\Lambda^1+\cdots+\Lambda^l$, where $\Lambda^i\in P^+$ for each $1\leq i\leq l$. Then
$$\mathfrak{b}egin{aligned}
\dim e(\nu)\RR^\Lambda e(\mu)&=\mathfrak{s}um_{\mathfrak{s}ubstack{\mathfrak{b}ig(({\mathfrak{b}f s}^1,\cdots,{\mathfrak{b}f s}^l),({\mathfrak{b}f t}^1,\cdots,{\mathfrak{b}f t}^l)\mathfrak{b}ig)\in D^l(\nu,\mu)}} \dim e(\nu_{{\mathfrak{b}f s}^1})\RR^{\Lambda^1}(\mathfrak{b}eta_{\mu_{{\mathfrak{b}f t}^1}})e(\mu_{{\mathfrak{b}f t}^1})\mathfrak{t}imes\cdots\\
&\qquad\qquad \mathfrak{t}imes \dim e(\nu_{{\mathfrak{b}f s}^l})\RR^{\Lambda^l}(\mathfrak{b}eta_{\mu_{{\mathfrak{b}f t}^l}})e(\mu_{{\mathfrak{b}f t}^l})\\
\dim \RR^\Lambda e(\mu)&=\mathfrak{s}um_{({\mathfrak{b}f t}^1,\cdots,{\mathfrak{b}f t}^l)\in D^l(n)}\frac{(|{\mathfrak{b}f t}^1|+\cdots+|{\mathfrak{b}f t}^l|)!}{|{\mathfrak{b}f t}^1|!\cdots|{\mathfrak{b}f t}^l|!} \dim \RR^{\Lambda^1}(\mathfrak{b}eta_{\mu_{{\mathfrak{b}f t}^1}})e(\mu_{{\mathfrak{b}f t}^1})\mathfrak{t}imes\cdots\\
&\qquad\qquad\mathfrak{t}imes\dim \RR^{\Lambda^l}(\mathfrak{b}eta_{\mu_{{\mathfrak{b}f t}^l}})e(\mu_{{\mathfrak{b}f t}^l})
\end{aligned}$$
\end{cor}
\mathfrak{b}egin{proof} This follows from Theorem \ref{mainthmC1}, Corollary \ref{level2cor} and an induction on $l$.
\end{proof}
\noindent
{\mathfrak{t}extbf{Proof of Theorem \ref{mainthmC}}}: This follows from Corollary \ref{genaralizetion} or induction on $l$ and using Corollary \ref{level2cor}.\qed
\mathfrak{b}egin{rem}
The Level reduction formula does not hold for graded dimension. For example, we consider $NH_1^2$, i.e. the cyclotomic nilHecke algebra. Then we have $$\mathfrak{b}egin{aligned}
\dim_q NH_1^2&=1+q^2\\
&\neq \dim_q NH_1^1 \dim_q NH_0^1+\dim_q NH_1^1 \dim_q NH_0^1=1+1.
\end{aligned}$$
\end{rem}
Corollary \ref{genaralizetion} and Theorem \ref{mainthmC} give us a way to compute the dimensions of higher level cyclotomic quiver Hecke algebras via the dimensions of some lower level (e.g., level $1$) cyclotomic quiver Hecke algebras. Using the combinatoric of shifted Young diagrams and Fock space realizations, Ariki and Park have given a dimension formula of finite quiver Hecke algebra (i.e., $\RR^{\Lambda_0}(\mathfrak{b}eta)$) of type $A^{(2)}_{2k}$ in \cite[Theorem 3.4]{AP14}. Now using corollary \ref{genaralizetion}, we can generalize their combinatorial formula to $\RR^{l\Lambda_0}(\mathfrak{b}eta)$, $l\in \mathbb{N}$ without Fock space realizations. Corollary \ref{genaralizetion} also sheds some light on the construction of higher level Fock spaces of arbitrary type via the tensor products of some level $1$ Fock spaces.
\mathfrak{b}egin{cor}\label{sufficient} Let $\Lambda^i\in P^+, \mathfrak{b}eta_i\in Q^+$ for each $1\leq i\leq l$. Assume $\nu^i\in I^{\mathfrak{b}eta_i}$ and $e(\nu^i)\neq 0$ in $\RR^{\Lambda^i}(\mathfrak{b}eta^i)$ for each $1\leq i\leq l$. Then $e(\nu)\neq 0$ in $\RR^{\Lambda^1+\cdots+\Lambda^l}(\mathfrak{b}eta_1+\cdots+\mathfrak{b}eta_l)$, for any $\nu\in\,{\rm Shuff}(\nu^1,\cdots,\nu^l)$, where ${\rm Shuff}(\nu^1,\cdots,\nu^l)$ means the set of all possible shuffles of $\nu^1,\cdots,\nu^l$.
\end{cor}
\mathfrak{b}egin{proof}
By assumption, $\dim e(\nu^1)\RR^{\Lambda^1}(\mathfrak{b}eta_1)e(\nu^1)\cdots\dim e(\nu^l)\RR^{\Lambda^l}(\mathfrak{b}eta_l)e(\nu^l)\neq 0$. Applying Corollary \ref{genaralizetion}, we deduce that $e(\nu)\neq 0$ in $\RR^{\Lambda^1+\cdots+\Lambda^l}(\mathfrak{b}eta_1+\cdots+\mathfrak{b}eta_l)$.
\end{proof}
\mathfrak{b}egin{cor}\label{necessary}
Suppose $e(\nu)\neq 0$ in $\RR^{\Lambda}(\mathfrak{b}eta)$. Write $\Lambda=\Lambda^1+\cdots+\Lambda^l$ to be a sum of $l$ dominant weights with lower levels. Then there exists $\nu^1,\cdots,\nu^l$, where $\nu^i\in I^{\mathfrak{b}eta_i},$ and $\mathfrak{b}eta_1+\cdots+\mathfrak{b}eta_l=\mathfrak{b}eta$, such that $e(\nu^i)\neq 0$ in $\RR^{\Lambda^i}(\mathfrak{b}eta_i)$, $i=1,\cdots,l$ and $\nu$ is a shuffle of $\nu_1,\cdots,\nu_l$.
\end{cor}
\mathfrak{b}egin{proof} This follows directly from Corollary \ref{genaralizetion}.
\end{proof}
The following corollary gives a third criterion for $e(\nu)\neq 0$ in $\RR^\Lambda(\mathfrak{b}eta)$. In type $A$ or type $C$, this follows from the Fock space realizations. Our result here is valid for {\it arbitrary} symmetrizable Cartan matrix.
\mathfrak{b}egin{cor}\label{maincorC3}
Let $\mathfrak{b}eta\in I^n, \nu\in I^\mathfrak{b}eta$. Assume $\Lambda=\Lambda_{t_1}+\cdots+\Lambda_{t_l}$, where $t_i\in I$ for each $1\leq i\leq l$. Then $e(\nu)\neq 0$ in $\RR^\Lambda_{\mathfrak{b}eta}$ if and only if $\nu$ is a shuffle of some $l$-tuple $(\nu^1,\nu^2,\cdots,\nu^l)$, such that $\mathfrak{b}eta=\mathfrak{b}eta_{\nu^1}+\cdots+\mathfrak{b}eta_{\nu^l}$, and $e(\nu^i)\neq 0$ in $\RR^{\Lambda_{t_i}}(\mathfrak{b}eta_{\nu^i})$.
\end{cor}
\mathfrak{b}egin{proof}
The necessary part follows from Corollary \ref{necessary} and the sufficient part follows from Corollary \ref{sufficient}.
\end{proof}
\mathfrak{b}igskip
\mathfrak{s}ection{Monomial bases of $e(\widetilde{\nu})\RR^\Lambda(\mathfrak{b}eta)e(\mu)$ and $e(\mu)\RR^\Lambda(\mathfrak{b}eta)e(\widetilde{\nu})$}
Throughout this section, we fix $p\in\mathbb{N}$, $\mathbf{b}:=(b_1,\cdots,b_p)\in\mathbb{N}^p$ and $\nu^1,\cdots,\nu^p\in I$ such that $\nu^i\neq\nu^j$ for any $1\leq i\neq j\leq p$ and $\mathfrak{s}um_{i=1}^{p}b_i=n$. We define \mathfrak{b}egin{equation}\label{wnu}
\widetilde{\nu}=(\widetilde{\nu}_1,\cdots,\widetilde{\nu}_n):=\mathfrak{b}igl(\mathfrak{u}nderbrace{\nu^1,\cdots,\nu^1}_{\mathfrak{t}ext{$b_1$ copies}},\cdots,\mathfrak{u}nderbrace{\nu^p,\cdots,\nu^p}_{\mathfrak{t}ext{$b_p$ copies}}\mathfrak{b}igr)\in I^\mathfrak{b}eta ,
\end{equation}
where $\mathfrak{b}eta\in Q_n^+$. We call the $b_i$-tuple $(\mathfrak{u}nderbrace{\nu^{i},\nu^{i},\cdots,\nu^{i}}_{b_{i}})$ the $i$th part of $\widetilde{\nu}$. As before, we set $b_0:=0, c_t:=\mathfrak{s}um_{i=0}^{t}b_i$ for any $0\leq t\leq p$. The purpose of this section is to construct monomial bases for the subspaces $e(\widetilde{\nu})\RR^\Lambda(\mathfrak{b}eta)e(\mu)$ and $e(\mu)\RR^\Lambda(\mathfrak{b}eta)e(\widetilde{\nu})$ for arbitrary $\mu\in I^\mathfrak{b}eta$. In particular, we shall give the proof of our fourth main result Theorem \ref{mainthmD}.
\mathfrak{s}mallskip
\mathfrak{s}ubsection{The case when $\mu=\widetilde{\nu}$}
The purpose of this section is to construct monomial bases for the subspace $e(\widetilde{\nu})\RR^\Lambda(\mathfrak{b}eta)e(\widetilde{\nu})$.
\mathfrak{b}egin{dfn} For each $1\leq t\leq p$, we define $$
N^\Lambda_t(\widetilde{\nu}):=N^\Lambda(1,\widetilde{\nu},c_{t-1}+1).
$$
\end{dfn}
Our assumption that $\nu^i\neq\nu^j$ for any $1\leq i\neq j\leq p$ implies that $\mathfrak{S}(\widetilde{\nu},\widetilde{\nu})$ is the standard Young subgroup
$\mathfrak{S}_{\mathbf{b}}:=\mathfrak{S}_{\{1,\cdots,c_1\}}\mathfrak{t}imes\cdots\mathfrak{t}imes\mathfrak{S}_{\{c_{p-1}+1,\cdots,n\}}$ of $\mathfrak{S}_n$. Moreover, since $\nu^t\neq\nu^j$ for any $1\leq j<t$, it follows from the original definition (\ref{Ndef}) that \mathfrak{b}egin{equation}\label{positive}
N^\Lambda_t(\widetilde{\nu})\mathfrak{g}eq 0,\quad\forall\,1\leq t\leq p.
\end{equation}
\mathfrak{b}egin{thm}\label{mainthm2a} Let $\Lambda\in P^+$ be arbitrary. Let $\mathfrak{b}eta\in Q_n^+$ such that $\widetilde{\nu}\in I^\mathfrak{b}eta$. Then we have
$$ \dim e(\widetilde{\nu})\RR^{\Lambda}(\mathfrak{b}eta)e(\widetilde{\nu})=\mathfrak{p}rod_{i=1}^{p}\Bigl(b_{i}!\mathfrak{p}rod_{j=0}^{b_i-1}(N^\Lambda_{i}(\widetilde{\nu})-j)\Bigr). $$
In particular, $e(\widetilde{\nu})\neq 0$ if and only if $N^\Lambda_{i}(\widetilde{\nu})\mathfrak{g}eq b_{i}$ for any $1\leq i\leq p$.
\end{thm}
\mathfrak{b}egin{proof}
The first part of the theorem follows from Theorem \ref{mainthm1b}.
We now consider the second part. If $N^\Lambda_{i}(\widetilde{\nu})\mathfrak{g}eq b_{i}$ for any $1\leq i\leq p$, then by the first part of the theorem we have $\dim e(\widetilde{\nu})\RR^{\Lambda}(\mathfrak{b}eta)e(\widetilde{\nu})>0$. In particular, $e(\widetilde{\nu})\neq 0$. Conversely, suppose that
$N^\Lambda_{i}(\widetilde{\nu})\leq b_{i}-1$ for some $1\leq i\leq p$. By (\ref{positive}), $N^\Lambda_i(\widetilde{\nu})\mathfrak{g}eq 0$ for any $1\leq i\leq p$. It follows that $0$ must appears as a factor in the product $\mathfrak{p}rod_{j=0}^{b_i-1}(N^\Lambda_{i}(\widetilde{\nu})-j)$. Hence $\dim e(\widetilde{\nu})\RR^{\Lambda}(\mathfrak{b}eta)e(\widetilde{\nu})=0$, which implies that $e(\widetilde{\nu})=0$. This completes the proof of the second part and hence the whole theorem.
\end{proof}
Comparing Corollary \ref{maincor2b} with the above dimension formula, it is natural to expect that $e(\widetilde{\nu})\RR^{\Lambda}(\mathfrak{b}eta)e(\widetilde{\nu})$ to be isomorphic to some tensor product of some cyclotomic nilHecke algebras. In this subsection we shall show that this is indeed the case.
Let $1\leq a<n$. Following \cite[(3.6)]{KK}, we define the operator $\mathfrak{p}artial_{a}$ on $$
\mathfrak{b}igoplus_{\mu\in I^\mathfrak{b}eta}K[x_1,\cdots,x_n]e(\mu)\mathfrak{s}ubset\RR(\mathfrak{b}eta)$$ by $$
\mathfrak{p}artial_a f:=\frac{s_a(f)-f}{x_a-x_{a+1}}\mathfrak{s}um_{\mathfrak{s}ubstack{\mu\in I^\mathfrak{b}eta\\ \mu_a=\mu_{a+1}}}e(\mu),\,\,\, \forall\,f\in K[x_{1},x_{2},\cdots,x_{n}]e(\mu).
$$
\mathfrak{b}egin{lem}\label{Lemma 8}
Let $\mathfrak{b}eta\in Q_n^+$, $f\in K[x_{1},x_{2},\cdots,x_{n}]$, and $\nu\in I^{\mathfrak{b}eta}$ such that $\nu_{k}=\nu_{k+1}$, where $1\leq k<n$. If we have $fe(\nu)=0$ in $\RR^{\Lambda}(\mathfrak{b}eta)$, then $\mathfrak{p}artial_{k}(f)e(\nu)=0$ in $\RR^{\Lambda}(\mathfrak{b}eta)$.
\end{lem}
\mathfrak{b}egin{proof} This follows from \cite[Lemma 4.2]{KK} by taking $M=\RR^\Lambda(\mathfrak{b}eta)$ there.
\end{proof}
\mathfrak{b}egin{lem}\label{Lemma 9} Let $p_1:=a^{\Lambdabda}_{\nu^{1}}(x_{1})$. For any $1<i\leq p$, we set $$
p_{c_{i-1}+1}=a^{\Lambdabda}_{\nu^{i}}(x_{c_{i-1}+1})\mathfrak{p}rod_{\mathfrak{s}ubstack{t=1}}^{i-1}\mathfrak{p}rod_{d=c_{t-1}+1}^{c_{t}}Q_{\nu^{t},\nu^{i}}(x_{d},x_{c_{i-1}+1}). $$
Then $p_{c_{i-1}+1}\in \RR^\Lambda(\mathfrak{b}eta)$ is a polynomial in $x_{c_{i-1}+1}$ of degree $N_{i}(\widetilde{\nu})$ with leading coefficient in $K^\mathfrak{t}imes$ and other coefficients in $K[x_{1},x_{2},\cdots,x_{c_{i-1}}]$. Moreover, $p_{c_{i-1}+1}e(\widetilde{\nu})$ is a zero element in $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})$.
\end{lem}
\mathfrak{b}egin{proof}
The first part is a direct computation. For the last part, just consider
$\mathfrak{p}si_{c_{i-1}}\mathfrak{p}si_{c_{i-1}-1}\cdots\mathfrak{p}si_{1}a^{\Lambdabda}_{\nu^{i}}(x_{1})e(\widehat{i}dehat{\nu})\mathfrak{p}si_{1}\mathfrak{p}si_{2}\cdots\mathfrak{p}si_{c_{i-1}}$, where $\widehat{i}dehat{\nu}$ is the $n$-tuple obtained by moving the $(c_{i-1}+1)$-th component of $\widetilde{\nu}$ (which is exactly $\nu^{i}$) to the first part and unchanging the relative positions of all the other components. By definition $a^{\Lambdabda}_{\nu^{i}}(x_{1})e(\widehat{i}dehat{\nu})=0$ in $\RR^\Lambda(\mathfrak{b}eta)$. On the other hand, since $\nu^i\neq\nu^t$ for any $1\leq t<i$, we have that $$
\mathfrak{p}si_{c_{i-1}}\mathfrak{p}si_{c_{i-1}-1}\cdots\mathfrak{p}si_{1}a^{\Lambdabda}_{\nu^{i}}(x_{1})e(\widehat{i}dehat{\nu})=
a^{\Lambdabda}_{\nu^{i}}(x_{c_{i-1}+1})\mathfrak{p}si_{c_{i-1}}\mathfrak{p}si_{c_{i-1}-1}\cdots\mathfrak{p}si_{1}e(\widehat{i}dehat{\nu}).
$$
Finally, the lemma follows because $$
\mathfrak{p}si_{c_{i-1}}\mathfrak{p}si_{c_{i-1}-1}\cdots\mathfrak{p}si_{1}e(\widehat{i}dehat{\nu})\mathfrak{p}si_{1}\mathfrak{p}si_{2}\cdots\mathfrak{p}si_{c_{i-1}}
=\mathfrak{p}rod_{\mathfrak{s}ubstack{t=1}}^{i-1}\mathfrak{p}rod_{d=c_{t-1}+1}^{c_{t}}Q_{\nu^{t},\nu^{i}}(x_{d},x_{c_{i-1}+1}),
$$
where again we have used the assumption that $\nu^i\neq\nu^t$ for any $1\leq t<i$.
\end{proof}
\mathfrak{b}egin{prop}\label{Prop 2} Let $1\leq i\leq p$. For any integer $k$ which satisfies $c_{i-1}<k\leq c_{i}$, there exists a monic polynomial $p_{k}$ in $x_{k}$ of degree $N_{i}(\widetilde{\nu})-(k-c_{i-1}-1)$ with coefficients in $K[x_{1},x_{2},\cdots,x_{k-1}]$. Moreover,
$p_{k}e(\widetilde{\nu})$ is a zero element in $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})$.
\end{prop}
\mathfrak{b}egin{proof}
By Lemma $\ref{Lemma 9}$, we see that, up to a scalar in $K^\mathfrak{t}imes$, $p_{c_{i-1}+1}$ satisfies the requirement for $k=c_{i-1}+1$. We take $p_{c_{i-1}+2}=\mathfrak{p}artial_{c_{i-1}+1}(f)e(\widetilde{\nu})$. Then by Lemma $\ref{Lemma 8}$, it's easy to see that $p_{c_{i-1}+2}$ also satisfies the requirement for $k=c_{i-1}+2$. In general, the proposition follows from an induction on $k$.
\end{proof}
\mathfrak{b}egin{thm}\label{mainthm2b} The following set \mathfrak{b}egin{equation}\label{base1}
\Bigl\{\mathfrak{p}si_{w }\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\widetilde{\nu})\Bigm|\mathfrak{b}egin{matrix}\mathfrak{t}ext{$w\in\mathfrak{S}_{\mathbf{b}}$, for any $1\leq i\leq p$, $c_{i-1}<k\leq c_{i}$,}\\
\mathfrak{t}ext{$r_{k}\in\{0,1,\cdots,N^\Lambda_{i}(\widetilde{\nu})-(k-c_{i-1})\}$}\end{matrix}\Bigr\}
\end{equation} forms a $K$-basis of $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})$.
\end{thm}
\mathfrak{b}egin{proof}
Applying Proposition $\ref{Prop 2}$, we see that the elements in the above set (\ref{base1}) span the $K$-linear space $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})$. Counting the dimensions and using Theorem \ref{mainthm2a}, we see the set (\ref{base1}) must be a $K$-basis of $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})$. This proves the theorem.
\end{proof}
\mathfrak{b}egin{cor} We have that $$
\dim_q e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})=\mathfrak{p}rod_{i=1}^{p}\Bigl(\mathfrak{p}rod_{k=1}^{b_i}\frac{q_{\nu_k}^{-2k}-1}{q_{\nu_k}^{-2}-1}
\mathfrak{p}rod_{t=c_{i-1}+1}^{c_i}(1+q_{\nu_t}^{2}+\cdots+q_{\nu_t}^{2(N^\Lambda_i(\widetilde{\nu})-t)})\Bigr).
$$
\end{cor}
\mathfrak{b}egin{proof} This follows from Theorem \ref{mainthm2b}.
\end{proof}
\mathfrak{b}egin{prop}\label{mainprop2}
There is a $K$-linear isomorphism: $$\mathfrak{g}amma:\,\,e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})\,\,\cong\,\,\mathscr{H}^{(0)}_{N^\Lambda_{1}(\widetilde{\nu}),b_{1}}\otimes\mathscr{H}^{(0)}_{N^\Lambda_{2}(\widetilde{\nu}),b_{2}}\otimes\cdots
\otimes\mathscr{H}^{(0)}_{N^\Lambda_{p}(\widetilde{\nu}),b_{p}}. $$
\end{prop}
\mathfrak{b}egin{proof} For each $1\leq k\leq p$, we use $\mathfrak{t}au_k$ to denote the canonical isomorphism $\mathfrak{S}_{\{c_{k-1}+1,c_{k-1}+2,\cdots,c_k\}}\cong\mathfrak{S}_{b_k}$ which is uniquely determined on generators by $s_{c_{k-1}+j}\mapsto s_j,\,\forall\,\leq j<b_k$. We construct a linear map $$\mathfrak{g}amma:\,\,e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})\,\,\rightarrow\,\,\mathscr{H}^{(0)}_{N^\Lambda_{1}(\widetilde{\nu}),b_{1}}\otimes\mathscr{H}^{(0)}_{N^\Lambda_{2}(\widetilde{\nu}),b_{2}}\otimes\cdots\otimes
\mathscr{H}^{(0)}_{N^\Lambda_{p}(\widetilde{\nu}),b_{p}} $$
which sends $\mathfrak{p}si_{u_{1}u_{2}\cdots u_{p}}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\widetilde{\nu})$ to $$
(\mathfrak{p}si_{\mathfrak{t}au_1(u_1)}X_{1},\mathfrak{p}si_{\mathfrak{t}au_2(u_2)}X_{2},\cdots,\mathfrak{p}si_{\mathfrak{t}au_p(u_p)}X_{p}),
$$
where for each $1\leq i\leq p$, $u_i\in\mathfrak{S}_{\{c_{i-1}+1,\cdots,c_i\}}$ and $X_{i}:=\mathfrak{p}rod_{k=1}^{b_{i}}x^{r_{k+c_{i-1}}}_{k}$,
and for each $c_{i-1}+1\leq t\leq c_i$, $r_{t}\in\{0,1,\cdots,N_{i}(\widetilde{\nu})-(t-c_{i-1})\}$. Applying \cite[Theorem 2.34]{HuL}, Theorem \ref{mainthm2b} and Corollary \ref{maincor2a}, one sees that $\mathfrak{g}amma$ is a $K$-linear isomorphism.
\end{proof}
\mathfrak{s}mallskip
\mathfrak{s}ubsection{The general case}
In this subsection we shall construct monomial bases for the subspaces $e(\widetilde{\nu})\RR^\Lambda(\mathfrak{b}eta)e(\mu)$ and $e(\mu)\RR^\Lambda(\mathfrak{b}eta)e(\widetilde{\nu})$ for arbitrary $\mu\in I^\mathfrak{b}eta$.
Recall that we have fixed a special $n$-tuple $\widetilde{\nu}\in I^n$ at the beginning (\ref{wnu}) of this section. Let $\mathfrak{b}eta\in Q_n^+$ such that $\widetilde{\nu}\in I^\mathfrak{b}eta$. For any $\mu\in I^\mathfrak{b}eta$, we can always choose a minimal length right $\mathfrak{S}_\mathbf{b}$-coset representative $d_\mu$ of $\mathfrak{S}_{\mathbf{b}}$ in $\mathfrak{S}_n$ such that $d_\mu^{-1}\widetilde{\nu}=\mu$. In particular, $\mathfrak{S}(\widetilde{\nu},\mu)=d_{\mu}^{-1}\mathfrak{S}_{\mathbf{b}}$ and hence $\mathfrak{S}(\mu,\widetilde{\nu})=\mathfrak{S}_{\mathbf{b}}d_{\mu}$.
The following crucial definition plays an important role in our later construction of monomial bases for the subspaces $e(\widetilde{\nu})\RR^\Lambda(\mathfrak{b}eta)e(\mu)$ and $e(\mu)\RR^\Lambda(\mathfrak{b}eta)e(\widetilde{\nu})$.
\mathfrak{b}egin{dfn}\label{weight} Let $\mu=(\mu_1,\cdots,\mu_n)\in I^\mathfrak{b}eta, 1\leq k\leq n$. We define \mathfrak{b}egin{equation}\label{wt}
N^\Lambda(\mu,k):=N^\Lambda(d_\mu,\mu,k)+|\{1\leq j<k|\mu_{j}=\mu_{k}\}|. \end{equation}
\end{dfn}
\mathfrak{b}egin{examp}\label{toyexamp}
Suppose $\mu=\widetilde{\nu}$, then $d_\mu=1$ and $N^\Lambda(\mu,k)=N_{i}(\widetilde{\nu})-(k-c_{i-1}-1)$ whenever $c_{i-1}<k\leq c_{i}$ for some $1\leq i\leq p$.
\end{examp}
The following result is a crucial ingredient in the proof of our main result in this subsection.
\mathfrak{b}egin{lem}\label{keylem2} Let $1\leq i\leq p$ and $\mu\in I^\mathfrak{b}eta$. Let $1\leq t_1<t_2<\cdots<t_{b_i}\leq n$ be the unique $b_i$ integers such that $\mu_{t_j}=\nu^i$.
Let $w=w_1\mathfrak{t}imes\cdots\mathfrak{t}imes w_{p}\in\mathfrak{S}_\mathbf{b}$, where $w_{k}\in\mathfrak{S}_{\{c_{k-1}+1,\cdots,c_{k}\}}$ for each $1\leq k\leq p$. Then for any $1\leq j\leq b_i$, $$
N^\Lambda(wd_\mu,\mu,t_{j})=N^\Lambda(d_\mu,\mu,t_{j})+2(j-1)-2|\mathfrak{t}ilde{J}_{w_i}^{<d_\mu(t_j)}|, $$
where $\mathfrak{t}ilde{J}_{w_i}^{<d_\mu(t_j)}:=\{c_{i-1}+1\leq a<d_\mu(t_j)|w_i(a)<w_i(d_\mu(t_j))\}$.
In particular, $N^\Lambda(wd_\mu,\mu,t_{j})$ does not depend on $w_k$ for $1\leq k\neq i\leq p$.
\end{lem}
\mathfrak{b}egin{proof}
By definition of $d_\mu\in\mathfrak{S}(\mu,\widetilde{\nu})$, $d_{\mu}(k)\in \{c_{r-1}+1,c_{r-1}+2,\cdots,c_{r}\}$ whenever $\mu_{k}=\nu^{r}$. Therefore, we have $$\mathfrak{b}egin{aligned}
J_{wd_{\mu}}^{<t_j}&=\{1\leq s<t_{j}|wd_{\mu}(s)<wd_{\mu}(t_{j})\}\\
&=\mathfrak{b}igl\{1\leq s<t_j\mathfrak{b}igm|s\notin\{t_{1},t_{2},\cdots t_{j-1}\}, wd_{\mu}(s)<wd_{\mu}(t_{j})\mathfrak{b}igr\}\\
&\qquad \cup \mathfrak{b}igl\{t_{a}\mathfrak{b}igm|1\leq a\leq j-1,\,\,wd_{\mu}(t_a)<w d_{\mu}(t_{j})\mathfrak{b}igr\}\\
&=\mathfrak{b}igl\{1\leq s<t_j\mathfrak{b}igr|s\notin\{t_{1},t_{2},\cdots t_{j-1}\}, d_{\mu}(s)<d_{\mu}(t_{j})\mathfrak{b}igr\}\\
&\qquad\cup \mathfrak{b}igl\{t_{a}\mathfrak{b}igm|1\leq a\leq j-1,\,\,w_{i} d_{\mu}(t_{a})<w_{i} d_{\mu}(t_{j})\mathfrak{b}igr\}.
\end{aligned}
$$
Since $d_{\mu}$ is a minimal length right $\mathfrak{S}_\mathbf{b}$-coset representative in $\mathfrak{S}_n$, we have $d_{\mu}(t_{1})<d_{\mu}(t_{2})<\cdots<d_{\mu}(t_{b_{i}})$.
It follows that $$\mathfrak{b}egin{aligned}
N^\Lambda(wd_{\mu},\mu,t_{j})&=(\Lambdabda-\mathfrak{s}um_{s\in J_{wd_{\mu}}^{<t_j}}\alpha_{\mu_{s}}\mathfrak{b}ig )(h_{\mu_{t_{j}}})\\
&=N^\Lambda(d_{\mu},\mu,t_{j})+2(j-1)-2|\mathfrak{t}ilde{J}_{w_{i}}^{<d_{\mu}(t_j)}|.
\end{aligned}
$$
This completes the proof of the lemma.
\end{proof}
\mathfrak{b}egin{thm}\label{mainthm3a} Let $\mu=(\mu_1,\cdots,\mu_n)\in I^\mathfrak{b}eta$. Then we have $$
\dim\,e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)=\dim\,e(\mu)\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})=\Bigl(\mathfrak{p}rod_{i=1}^{p}b_{i}!\Bigr)\Bigl(\mathfrak{p}rod_{t=1}^{n}N^\Lambda(\mu,t)\Bigr).
$$
\end{thm}
\mathfrak{b}egin{proof} Using the anti-isomorphism $\ast$, we see that $$
\dim\,e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)=\dim\,e(\mu)\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu}) .
$$
Note that $\mathfrak{S}(\mu,\widetilde{\nu})=\mathfrak{S}_\mathbf{b} d_\mu$. Applying Theorem \ref{mainthmA} and Lemma \ref{keylem2}, we have $$\mathfrak{b}egin{aligned}
&\quad\,\dim\,e(\mu)\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})\\
&=\mathfrak{s}um_{w\in\mathfrak{S}_{\mathbf{b}}}\mathfrak{p}rod_{t=1}^{n}N^\Lambda(wd _{\mu},\mu,t)\\
&=\mathfrak{p}rod_{i=1}^{p}\mathfrak{s}um_{u\in\mathfrak{S}_{\{c_{i-1}+1,\cdots,c_i\}}}\mathfrak{p}rod_{\mathfrak{s}ubstack{1\leq t\leq n\\ \mu_{t}=\nu^i}}N^\Lambda(ud_{\mu},\mu,t).
\end{aligned}$$
For each $1\leq i\leq p$, we denote by $1\leq t_{i1}<t_{i2}<\cdots<t_{ib_i}\leq n$ the unique $b_i$-tuple such that $\mu_{t_{ij}}=\nu^i$, $\forall\,1\leq j\leq b_i$.
For each $1\leq j\leq b_i$, we set $$
N_{ij}:=N^\Lambda(d_\mu,\mu,t_{ij})+2(j-1). $$
Then, using Lemma \ref{keylem2} again, combing with the bijection in Lemma \ref{bij}, we can deduce that
$$\mathfrak{b}egin{aligned}
&\quad\,\mathfrak{s}um_{u\in\mathfrak{S}_{\{c_{i-1}+1,\cdots,c_i\}}}\mathfrak{p}rod_{\mathfrak{s}ubstack{1\leq t\leq n\\ \mu_t=\nu^i}} N^\Lambda(ud_{\mu},\mu,t)\\
&=\mathfrak{p}rod_{k=1}^{b_i}((N_{ik}+N_{ik}-2+N_{ik}-4+\cdots+N_{ik}-2(k-1))\\
&={b_i}!\mathfrak{p}rod_{k=1}^{b_i}(N_{ik}-(k-1))=b_{i}!\mathfrak{p}rod_{\mathfrak{s}ubstack{1\leq t\leq n\\ \mu_{t}=\nu^{i}}}N^\Lambda(\mu,t).
\end{aligned}
$$
Finally, we consider the products of the above identity over $1\leq i\leq p$. Then we can deduce that $\dim\,e(\mu)\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})=\Bigl(\mathfrak{p}rod_{i=1}^{p}b_{i}!\Bigr)\Bigl(\mathfrak{p}rod_{t=1}^{n}N^\Lambda(\mu,t)\Bigr)$.
This completes the proof of the theorem.
\end{proof}
\mathfrak{b}egin{cor}\label{maincor3} Let $\mu\in I^\mathfrak{b}eta$. Then $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)\neq 0$ if and only if for any $1\leq k\leq n$, $N^\Lambda(\mu,k)>0$.
\end{cor}
\mathfrak{b}egin{proof} The ``if'' part of the corollary follows directly from Theorem \ref{mainthm3a}. It remains to prove the ``only if'' part of the corollary.
Suppose that $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)\neq 0$. Assume there exists some $1\leq s\leq n$ such that $N^\Lambda(\mu,s)\leq 0$. First, $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)\neq 0$ implies that for any $1\leq k\leq n$, $N^\Lambda(\mu,k)\neq 0$.
For each $1\leq i\leq p$ we define $$
\{t_{ia}|1\leq a\leq b_i, t_{i1}<t_{i2}<\cdots<t_{ib_i}\}:=\{1\leq k\leq n|\mu_k=\nu^i\}.
$$
By definition (because $a_{kl}\leq 0$ for any $k\neq l$)), $N_\mu^\Lambda(t_{i1})>0$ for any $1\leq i\leq p$.
Suppose that $N^\Lambda(\mu,t_{ij})<0$ for some $1\leq j\leq b_i$ and $1\leq i\leq p$. Assume that $i,j$ is chosen such that $t_{ij}$ is as minimal as possible. By the last paragraph, we can deduce that $j>1$. Thus $N^\Lambda(\mu,t_{ia})>0$ for any $1\leq a<j$.
Note that $d_\mu(t_{i(j-1)})<d_\mu(t_{ij})$ and $\<\alpha_{\mu_{t_{i(j-1)}}},h_{\mu_{t_{ij}}}\>=2$. It follows that $$
N^\Lambda(\mu,t_{i(j-1)})\leq N^\Lambda(\mu,t_{ij})+1 ,
$$
which is a contradiction because $N^\Lambda(\mu,t_{ij})<0<N^\Lambda(\mu,t_{i(j-1)})$. This completes the proof of the ``only if'' part and hence the corollary.
\end{proof}
We want to construct an explicit homogeneous monomial bases for $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)$ and $e(\mu)\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})$, from which
one can also derive the graded dimensions of these two subspaces.
\mathfrak{b}egin{lem}\label{psidmu} Let $\mu\in I^\mathfrak{b}eta$. Let $s_{i_1}\cdots s_{i_m}$ and $s_{j_1}\cdots s_{j_m}$ be two reduced expression of $d_\mu$. Then $$
\mathfrak{p}si_{i_1}\cdots \mathfrak{p}si_{i_m}e(\mu)=\mathfrak{p}si_{j_1}\cdots \mathfrak{p}si_{j_m}e(\mu) .
$$
In other words, $\mathfrak{p}si_{d_\mu}e(\mu):=\mathfrak{p}si_{i_1}\cdots \mathfrak{p}si_{i_m}e(\mu)$ depends only on $\mu$ but not on the choices of the reduced expression of $d_\mu$.
\end{lem}
\mathfrak{b}egin{proof} Applying the defining relation of $\RR^\Lambda(\mathfrak{b}eta)$ or \cite[Theorem 4.10]{BKW}, we see that $\mathfrak{p}si_{i_1}\cdots \mathfrak{p}si_{i_m}e(\mu)-\mathfrak{p}si_{j_1}\cdots \mathfrak{p}si_{j_m}e(\mu)$ is either equal zero or equal to a $K$-linear combination of some elements of the form $$
e(\widetilde{\nu})\mathfrak{p}si_{p_1}\cdots \mathfrak{p}si_{p_t}x_1^{d_1}\cdots x_n^{d_n}e(\mu),
$$
where $t<m$, $d_1,\cdots d_n\in\mathbb{N}$. However, $d_\mu$ is a minimal length right $\mathfrak{S}_\mathbf{b}$-coset representative in $\mathfrak{S}_n$ such that $d_\mu\mu=\widetilde{\nu}$, which is
a minimal length element in $\mathfrak{S}_n$ such that $d_\mu\mu=\widetilde{\nu}$. It follows that the second case can not happen. In other words, $\mathfrak{p}si_{i_1}\cdots \mathfrak{p}si_{i_m}e(\mu)=\mathfrak{p}si_{j_1}\cdots \mathfrak{p}si_{j_m}e(\mu)$.
\end{proof}
\mathfrak{b}egin{lem}\label{keylem3} Let $\mu\in I^\mathfrak{b}eta$. Suppose that $1\leq k\leq n$ with $N^\Lambda(\mu,k)>0$. Then there exists a monic polynomial $p_{k}$ in $x_{k}$ of degree $N^\Lambda(\mu,k)$ with coefficients in $K[x_{1},x_{2},\cdots,x_{k-1}]$. Moreover, $\mathfrak{p}si_{d_\mu} p_{k}e(\mu)$ is a zero element in $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)$.
\end{lem}
\mathfrak{b}egin{proof}
Suppose $\mu_k=\nu^i$, where $\nu^i\in I$. In particular, $c_{i-1}<d_\mu(k)\leq c_i$. Recall the definitions of $\widetilde{\nu}$ and $\{c_j|1\leq j\leq p\}$ at the beginning of this section. We define $\mathcal{J}_i:=\{1\leq m<k|d_\mu(m)>c_{i}\}$ and write $$
\mathcal{J}_i=\{m_j|1\leq j\leq g, 1\leq m_{1}<m_{2}<\cdots<m_{g}<k\}.
$$
Then $\mathcal{J}_i=\{1\leq m\leq k|\mu_m=\nu^t, i<t\leq p\}$.
We consider the following products of cycles: $$\mathfrak{b}egin{aligned}
u_1:&=(k-g+1,k-g,\cdots,m_1+1,m_1)(k-g+2,k-g+1,\cdots,m_2+1,m_2)\cdots \\
&\qquad\qquad (k,k-1,\cdots,m_g+1,m_g) .\end{aligned}
$$
Clearly we have $$
u_1:=(s_{k-g}\cdots s_{m_{1}+1}s_{m_{1}})(s_{k-g+1}\cdots s_{m_2+1}s_{m_2})\cdots (s_{k-1}\cdots s_{m_g+1}s_{m_g}),
$$
and this is a reduced expression of $u_1$. We set $\mu^{[1]}:=u_1\mu$. In other words, $\mu^{[1]}$ is obtained from $\mu$ by moving its $m_1$-th, $\cdots$ $m_g$-th components to the $(k-g+1)$-th, $\cdots$, $k$-th positions respectively, and unchanging the relative positions of all the remaining components of $\mu$.
In particular, we have $\mu^{[1]}_{k-g}=\nu^i$ and there is no $t<k-g$ such that $\mu^{[1]}_{t}=\nu^j$ with $j>i$.
Now we define $\mathcal{J}'_i:=\{1\leq l<k-g|\mu_l^{[1]}=\nu^{i}\}$ and write $$
\mathcal{J}'_i=\{l_i|1\leq i\leq r, 1\leq l_{1}<l_{2}<\cdots<l_{r}<k-g\}.
$$
Let $\mu^{[2]}$ be the $n$-tuple obtained from $\mu^{[1]}$ by moving its $l_1$-th, $\cdots$ $l_r$-th components to the $(k-g-r)$-th, $\cdots$, $(k-g-1)$-th positions respectively, and unchanging the relative positions of all the remaining components of $\mu^{[1]}$. In fact, we can choose $u_2$ to be the unique minimal element satisfying $\mu^{[2]}=u_2\mu^{[1]}$. In particular, for any $a<k-g-r$ we have $\mu^{[2]}_a=\nu^j$ with $j<i$; while for any $k-g-r\leq b\leq k-g$ we have $\mu^{[2]}_a=\nu^i$.
Let $\widehat{i}dehat{\mu}$ be the $n$-tuple obtained from $\mu^{[2]}$ by moving the $(k-g-r)$-th component $\mu^{[2]}_{k-g-r}$ (which is equal to $\nu^{i}$ by construction) of $\mu^{[2]}$ to the first position and unchanging the relative positions of all the other components. We consider $$\mathfrak{p}si_{k-g-r}\mathfrak{p}si_{k-g-r-1}\cdots\mathfrak{p}si_2\mathfrak{p}si_{1}a^{\Lambdabda}_{\nu^{i}}(x_{1})e(\widehat{i}dehat{\mu})\mathfrak{p}si_{1}\mathfrak{p}si_{2}\cdots\mathfrak{p}si_{k-g-r-1}\mathfrak{p}si_{k-g-r}.
$$
The same argument as in the proof of Lemma \ref{Lemma 9} shows that this equals to $\widehat{i}dehat{p}_k e(\mu^{[2]})$, where $\widehat{i}dehat{p}_k$ is a polynomial in $x_{k-g-r}$ of degree $N^\Lambda(\mu,k)+r$ with leading coefficient in $K^\mathfrak{t}imes$ and other coefficients in $K[x_{1},x_{2},\cdots,x_{k-g-r-1}]$. Clearly, this is zero in $\RR^\Lambda({\mathfrak{b}eta})e(\mu^{[2]})$.
Using Lemma \ref{Lemma 8} we can deduce that there is a monic polynomial $p_k^{[2]}$ in $x_{k-g}$ of degree $N^\Lambda(\mu,k)$ with coefficients in $K[x_{1},x_{2},\cdots,x_{k-g-1}]$, and satisfies that $p_k^{[2]}e(\mu^{[2]})$ is zero $\RR^\Lambda({\mathfrak{b}eta})e(\mu^{[2]})$. Now we define $p_{k}=u_1^{-1}u_2^{-1}(p_k^{[2]})$, then $p_{k}$ is a monic polynomial in $x_{k}$ of degree $N^\Lambda(\mu,k)$ with coefficients in $K[x_{1},x_{2},\cdots,x_{k-1}]$ and $$\mathfrak{b}egin{aligned}\label{poly}
\mathfrak{p}si_{u_2}\mathfrak{p}si_{u_1}p_k e(\mu)=p_k^{[2]}\mathfrak{p}si_{u_2}\mathfrak{p}si_{u_1}e(\mu)=p_k^{[2]}e(\mu^{[2]})\mathfrak{p}si_{u_2}\mathfrak{p}si_{u_1}=0.
\end{aligned}
$$
Finally, by construction we can find $u_3\in\mathfrak{S}_n$ such that $d_\mu=u_3u_2u_1$, and $\ell(d_\mu)=\ell(u_3)+\ell(u_2)+\ell(u_1)$. Hence by Lemma \ref{psidmu}, $\mathfrak{p}si_{d_\mu} p_{k}e(\mu)=\mathfrak{p}si_{u_3}\mathfrak{p}si_{u_2}\mathfrak{p}si_{u_1}p_k e(\mu)=0$.
\end{proof}
Henceforth, for each $w\in\mathfrak{S}_\mathbf{b}$, we fix a reduced expression $s_{j_1}\cdots s_{j_a}$ of $w$ and define \mathfrak{b}egin{equation}\label{psiwd1}
\mathfrak{p}si^{\mathfrak{b}f 1}_{wd_\mu}:=\mathfrak{p}si_{j_1}\cdots\mathfrak{p}si_{j_a}\mathfrak{p}si_{d_\mu} .
\end{equation}
Note that every element in $\mathfrak{S}(\mu,\widetilde{\nu})$ is of the form $wd_\mu$ for some $w\in\mathfrak{S}_{\mathbf{b}}$.
\mathfrak{b}egin{thm}\label{mainthm3b}
Suppose that $N^\Lambda(\mu,k)>0$ for any $1\leq k\leq n$. Then the elements in the following set $$
\Bigl\{\mathfrak{p}si^{\mathfrak{b}f 1}_{w}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu)\Bigm| w \in\mathfrak{S}(\mu,\widetilde{\nu}), 0\leq r_{k}<N^\Lambda(\mu,k), \forall\,1\leq k\leq n\Bigr\}$$
form a $K$-basis of $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)$.
\end{thm}
\mathfrak{b}egin{proof}
This follows from Theorem \ref{mainthm3a} and Lemma \ref{keylem3}.
\end{proof}
\noindent
{\mathfrak{b}f Proof of Theorem \ref{mainthmD}}: For each $j>0$, we define $$M_j=\mathfrak{t}ext{$K$-Span}\Bigl\{\mathfrak{p}si^{\mathfrak{b}f 1}_{w}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu)\Bigm| w \in\mathfrak{S}(\mu,\widetilde{\nu}),\, \ell(w)<j,\,0\leq r_{k}<N^\Lambda(\mu,k), \forall\,1\leq k\leq n\Bigr\}.$$ We claim that for any $w \in\mathfrak{S}(\mu,\widetilde{\nu})$, any reduced expression $w=s_{i_1}\cdots s_{i_t}$ of $w$ and any non-negative integers $\{r_k\mathfrak{g}eq 0|1\leq k\leq n\}$, \mathfrak{b}egin{equation}\label{unitriangular}
\mathfrak{p}si_{i_1}\cdots\mathfrak{p}si_{i_t}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu)-\mathfrak{p}si^{\mathfrak{b}f 1}_{w}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu)\in M_{\ell(w)}.
\end{equation}
We prove this by induction on $\ell(w)$. When $w=d_\mu$ this follows from Lemma \ref{psidmu}. As in Lemma \ref{psidmu}, we can write $\mathfrak{p}si_{i_1}\cdots\mathfrak{p}si_{i_t}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu)-\mathfrak{p}si^{\mathfrak{b}f 1}_{w}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu)$ as a $K$-linear combination of some elements of the form $$
\mathfrak{p}si_{p_1}\cdots \mathfrak{p}si_{p_s}x_1^{d_1}\cdots x_n^{d_n}e(\mu),
$$
where $s<\ell(w)$, $d_1,\cdots d_n\in\mathbb{N}$ and $s_{p_1}\cdots s_{p_s}$ is a reduced expression of $u:=s_{p_1}\cdots s_{p_s}$. Then by induction hypothesis, we have $$\mathfrak{p}si_{p_1}\cdots \mathfrak{p}si_{p_s}x_1^{d_1}\cdots x_n^{d_n}e(\mu)\in \mathfrak{p}si^{\mathfrak{b}f 1}_u x_1^{d_1}\cdots x_n^{d_n}e(\mu)+M_{\ell(u)}.
$$ Now applying Lemma \ref{keylem3}, we can see $\mathfrak{p}si^{\mathfrak{b}f 1}_u x_1^{d_1}\cdots x_n^{d_n}e(\mu)\in M_{\ell(u)+1}\mathfrak{s}ubseteq M_{\ell(w)}$. Moreover, $M_{\ell(u)}\mathfrak{s}ubset M_{\ell(w)}$. Hence our claim follows. Since the transition matrix between the elements given in Theorem \ref{mainthm3b} and the elements given in Theorem \ref{mainthmD} is unitriangular, Theorem \ref{mainthmD} follows from Theorem \ref{mainthm3b} immediately.
\qed
Using the anti-isomorphism $\ast$ of $\RR^\Lambda(\mathfrak{b}eta)$, one can also get a $K$-basis for the subspace $e(\mu)\RR^\Lambda({\mathfrak{b}eta})e(\widetilde{\nu})$. Next we want to compare two different such kind of spaces.
\mathfrak{b}egin{lem}\label{Lemma 12} Let $\mu\in I^n$ and $1\leq k<n$. If $d _{\mu}>d _{\mu}s_k$, then $d _{\mu s_{k}}=d _{\mu}s_{k}$. In general, if
$d _{\mu}=d_{1}d_{2}$, with $\ell(d _{\mu})=\ell(d_{1})+\ell(d_{2})$, then $d _{\mu d_{2}^{-1}}=d_{1}$.
\end{lem}
\mathfrak{b}egin{proof} This follows from \cite[Lemma 1.4(ii)]{DJ1}.
\end{proof}
\mathfrak{b}egin{lem}\label{Lemma 13} Let $1\leq a<n$. Suppose that $d_{\mu}>d_{\mu}s_a$ (and hence $d_\mu \mathfrak{b}ig(a)>d_\mu(a+1)$), then
\[N^\Lambda(\mu,k)=
\mathfrak{b}egin{cases}
N^\Lambda(\mu s_{a},k),& \mathfrak{t}ext{if $k\neq a,\,a+1$};\\
N^\Lambda(\mu s_{a}, k+1)+\<\alpha_{\mu_{a+1}},h_{\mu_{a}}\>, & \mathfrak{t}ext{if $k=a$}; \\
N^\Lambda(\mu s_{a}, k-1), & \mathfrak{t}ext{if $k=a+1$}.\\
\end{cases}\]
\end{lem}
\mathfrak{b}egin{proof} Suppose $k\neq a,a+1$. We consider the map $$
\mathfrak{t}heta_a: J_{d_\mu}^{<k}\rightarrow J_{d_\mu s_a}^{<k},\,\, t\mapsto s_a(t) .
$$
It is clear that $\mathfrak{t}heta_a$ is a well-defined bijection in this case. Thus $N^\Lambda(\mu s_{a},k)=N^\Lambda(\mu,k)$.
Suppose $k=a+1$. Then in this case it is clear that $J_{d_\mu}^{<a+1}=J_{d_\mu s_a}^{<a}$ because $a\notin J_{d_\mu}^{<a+1}$. Hence $N^\Lambda(\mu s_{a},a+1)=N^\Lambda(\mu s_{a},a)$.
Finally, suppose $k=a$. Then $\mathfrak{t}heta_a$ restricts to a bijection between $J_{d_\mu}^{<a}$ and $J_{d_\mu s_a}^{<a+1}\mathfrak{s}etminus\{a\}$. In this case it follows from definition that $N^\Lambda(\mu,a)=N^\Lambda(\mu s_{a},a+1)+\<\alpha_{\mu_{a+1}},h_{\mu_{a}}\>$.
\end{proof}
For each $1\leq t\leq p$, we set $\ell_t:=\<\Lambda,\alpha_{\nu^t}\>$.
\mathfrak{b}egin{examp}\label{Example 2}
Let $\widetilde{\nu}=(1,1,2)$, $\mu=(2,1,1)$, then $d_{\mu}=s_2s_1$. By definition, we have $$N^\Lambda(\mu,1)=\ell_{2},\,\,\,N^\Lambda(\mu,2)=\ell_{1},\,\,\,N^\Lambda(\mu,3)=\ell_{1}-1.$$
Now we consider $\mu\,s_{1}=(1,2,1)$. One can check directly that $$
N^\Lambda(\mu s_1,1)=\ell_{1},\,\,\,N^\Lambda(\mu s_1,2)=\ell_{2}-\<\alpha_1,h_2\>,\,\,\, N^\Lambda(\mu s_1,3)=\ell_{1}-1. $$
\end{examp}
\mathfrak{b}egin{cor}\label{prop 3} Suppose that $N^\Lambda(\mu,k)>0$ for any $1\leq k\leq n$. Let $1\leq t<n$ such that $d_\mu>d_\mu s_{t}$. Then the map $\mathfrak{p}hi_{t}:\,e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)\rightarrow e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu s_{t})$ given by right multiplication of $\mathfrak{p}si_{t}$ is injective. More generally, if $d_{\mu}=u_{1}u_{2}$ with $\ell(w)=\ell(u_{1})+\ell(u_{2})$, then the map $\mathfrak{p}hi_{u_{2}}:\,e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)\rightarrow e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu u_{2}^{-1})$ given by right multiplication of $\mathfrak{p}si_{u_{2}^{-1}}\,\,$ is injective.
\end{cor}
\mathfrak{b}egin{proof} By Lemma \ref{Lemma 12}, $d_{\mu s_{t}}=d_{\mu}s_{t}$. We can write $$
\mathfrak{p}si_{d_{\mu}}e(\mu)=\mathfrak{p}si_{d_{\mu}s_{t}}\mathfrak{p}si_{s_{t}}e(\mu)=\mathfrak{p}si_{d_{\mu s_{t}}}e(\mu s_{t})\mathfrak{p}si_{s_{t}}. $$
The assumption that $N^\Lambda(\mu,k)>0$ for any $1\leq k\leq n$ and Lemma \ref{Lemma 13} imply that $N^\Lambda(\mu s_{t},k)>0$ for any $1\leq k\leq n$. Since $\mathfrak{p}si_{t}\mathfrak{p}si_{t}e(\mu s_{t})=Q_{\mu_{t+1},\mu_{t}}(x_{t},x_{t+1})e(\mu s_{t})$, it follows that for any $w\in\mathfrak{S}(\mu,\widetilde{\nu})$ and $r_k\in\mathbb{N}$, $1\leq k\leq n$, $\mathfrak{p}hi_t(\mathfrak{p}si_{w}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu))$ is of the form $\mathfrak{p}si_{w s_{t}}\mathfrak{p}rod_{k=1}^{n}f_{k}e(\mu s_t)$, where
\[f_{k}=
\mathfrak{b}egin{cases}
x_{k}^{r_{k}}& k\neq t\,\,,t+1\\
x_{t}^{r_{t+1}} & k=t\\
x_{t+1}^{r_{t}}Q_{\nu_{t+1},\nu_{t}}(x_{t},x_{t+1})& k=t+1.\\
\end{cases}\]
Note that $f_{t+1}$ is a polynomial in $x_{t+1}$ of degree $r_t-\<\alpha_{\mu_{t+1}},h_{\mu_{t}}\>$ with leading coefficient in $K^\mathfrak{t}imes$ and other coefficients in $K[x_{1},x_{2},\cdots,x_{t}]$. By Lemma \ref{keylem3}, we can write $\mathfrak{p}hi_t(\mathfrak{p}si_{w}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu))=c_0\mathfrak{p}si_{w s_{t}}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r'_{k}}e(\mu s_t)+\mathfrak{t}ext{``lower terms''}$, where $c_0\in K^\mathfrak{t}imes$ and ``lower terms'' means the degree of $x_{t+1}$ is less than $r_t-\<\alpha_{\mu_{t+1}},h_{\mu_{t}}\>$, and \[r'_{k}=
\mathfrak{b}egin{cases}
r_{k}& k\neq t\,\,,t+1\\
r_{t+1} & k=t\\
r_t-\<\alpha_{\mu_{t+1}},h_{\mu_{t}}\>& k=t+1.\\
\end{cases}\]
By Lemma \ref{Lemma 13}, if $k\neq t, t+1$, then $r'_{k}<N^\Lambda(\mu s_{t},k)=N^\Lambda(\mu,k)$ if $r_{k}<N^\Lambda(\mu,k)$; and $r'_{t}=r_{t+1}<N^\Lambda(\mu s_{t},t)=N^\Lambda(\mu,t+1)$ if $r_{t+1}<N^\Lambda(\mu,t+1)$; and $r'_{t+1}=r_{t}-\<\alpha_{\mu_{t+1}},h_{\mu_{t}}\><N^\Lambda(\mu s_{t},t+1)$ if $r_{t}<N^\Lambda(\mu,t)$.
By Theorem \ref{mainthm3b}, we know that $$\Bigl\{\mathfrak{p}si_{w }\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu)\Bigm| w \in\mathfrak{S}(\mu,\widetilde{\nu}), 0\leq r_{k}<N^\Lambda(\mu,k), \forall\,1\leq k\leq n\Bigr\}$$
forms a $K$-basis of $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)$. Similarly, the set $$\Bigl\{\mathfrak{p}si_{w s_{t}}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu s_{t})\Bigm| w \in\mathfrak{S}(\mu,\widetilde{\nu}), 0\leq r_{k}<N^\Lambda(\mu s_{t},k), \forall\,1\leq k\leq n\Bigr\}$$
forms a $K$-basis of $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu s_{t})$.
Now using Theorem \ref{mainthm3b} and Lemma \ref{keylem3}, we can see that the image of each basis element $\mathfrak{p}si_{w s_{t}}\mathfrak{p}rod_{k=1}^{n}x_{k}^{r_{k}}e(\mu)$ under $\mathfrak{p}hi_t$ has a leading term and they are $K$-linearly independent. It follows that the image of those basis elements of $e(\widetilde{\nu})\RR^\Lambda({\mathfrak{b}eta})e(\mu)$ under $\mathfrak{p}hi_t$ are $K$-linearly independent, which implies that $\mathfrak{p}hi_t$ is injective.
\end{proof}
\mathfrak{s}ubsection{The monomial bases of $\RR^\Lambda(\mathfrak{b}eta)$ when $n=3$}
In this subsection, we shall completely determine a monomial basis for $\RR^\Lambda(\mathfrak{b}eta)$ when $n=3$. Let $\mathfrak{b}eta\in Q_3^+$. Note that $\RR^\Lambda(\mathfrak{b}eta)={\text{op}}lus_{\nu,\mu\in I^\mathfrak{b}eta}e(\mu)\RR^\Lambda(\mathfrak{b}eta)e(\nu)$. By the results we have obtained in the last two subsections, we can assume without loss of generality that $\mathfrak{b}eta=2\alpha_1+\alpha_2$. We only need to construct a monomial basis for $e(1,2,1)\RR^\Lambda(\mathfrak{b}eta)e(1,2,1)$. We set $\nu:=(1,2,1)$. Then
$\mathfrak{S}(\nu,\nu)=\{(1), w:=(1,3)\}$, where $(1,3)$ denotes the transposition which swaps $1$ and $3$. We set $l_1:=\<\Lambda,h_1\>, l_2:=\<\Lambda,h_2\>$. Then we have $$\mathfrak{b}egin{aligned}
& N^\Lambda(1,\nu,1)=l_{1}\,\,,N^\Lambda(1,\nu,2)=l_{2}-a_{21}\,\,,N^\Lambda(1,\nu,3)=l_{1}-a_{12}-2; \\
& N^\Lambda(w,\nu,1)=l_{1}\,\,,N^\Lambda(w,\nu,2)=l_{2}\,\,,N^\Lambda(w,\nu,3)=l_{1}.
\end{aligned}
$$
\mathfrak{b}egin{lem}\label{iso2}
Suppose $\nu\,,\nu'\in I^{\mathfrak{b}eta}$, $1\leq t\leq n$ with $a_{\nu_{t},\nu_{t+1}}=0$. Then the map $\mathfrak{p}hi:\,e(\nu^{'})\RR^\Lambda({\mathfrak{b}eta})e(\nu)\rightarrow e(\nu^{'})\RR^\Lambda({\mathfrak{b}eta})e(\nu s_{t})$ given by right multiplication of $\mathfrak{p}si_{t}$ is an isomorphism.
\end{lem}
\mathfrak{b}egin{proof} This is clear because $\mathfrak{p}si_t^2e(\nu)=e(\nu)$ by assumption.
\end{proof}
Suppose $a_{12}=0$, then $a_{21}=0$. Applying Corollary \ref{maincor1} we can get that $$
\dim\,e(1,2,1)\RR^\Lambda({\mathfrak{b}eta})e(1,2,1)=2l_{1}(l_{1}-1)l_{2}, $$
which is exactly the same as the dimension of $e(1,2,1)\RR^\Lambda({\mathfrak{b}eta})e(1,1,2)$. Now using Lemma \ref{iso2}, one can easily get a monomial basis of $e(1,2,1)\RR^\Lambda({\mathfrak{b}eta})e(1,2,1)$ from the known monomial basis (see Theorem \ref{mainthm3b}) of $e(1,2,1)\RR^\Lambda({\mathfrak{b}eta})e(1,1,2)$ in this case.
Henceforth we assume $a_{12}\neq 0$ and thus $a_{12}\leq -1\mathfrak{g}eq a_{21}$. By definition, we have $a^{\Lambdabda}_{1}(x_{1})e(1,2,1)=0$, which implies that \mathfrak{b}egin{equation}\label{11a}
x_1^{l_1}e(1,2,1)\in\mathfrak{t}ext{$K$-Span}\{x_1^{c_1}e(1,2,1)|0\leq c_1<l_1\} .
\end{equation}
Similarly, \mathfrak{b}egin{equation}\label{11b}
Q_{1,2}(x_{1},x_{2})a^{\Lambdabda}_{2}(x_{2})e(1,2,1)=\mathfrak{p}si_{1}a^{\Lambdabda}_{2}(x_{1})\mathfrak{p}si_{1}e(1,2,1)=0,
\end{equation}
which implies that \mathfrak{b}egin{equation}\label{11c}
x_2^{l_2-a_{21}}e(1,2,1)\in\mathfrak{t}ext{$K$-Span}\{x_1^{c_1}x_2^{c_2}e(1,2,1)|c_1\mathfrak{g}eq 0, 0\leq c_2<l_2-a_{21}\} .
\end{equation}
Similarly, $\mathfrak{p}si_{1}\mathfrak{p}si_{2}\mathfrak{p}si_1a^{\Lambdabda}_{2}(x_{2})e(1,2,1)=\mathfrak{p}si_1\mathfrak{p}si_2a_2^\Lambda(x_1)\mathfrak{p}si_1e(1,2,1)=0$ together with $$\mathfrak{p}si_{1}\mathfrak{p}si_{2}\mathfrak{p}si_1a^{\Lambdabda}_{1}(x_{1})e(1,2,1)=0, $$ imply that \mathfrak{b}egin{equation}\label{12a}
\mathfrak{p}si_1\mathfrak{p}si_2\mathfrak{p}si_1x_1^{l_1}x_2^{l_2}e(1,2,1)\in\mathfrak{t}ext{$K$-Span}\{\mathfrak{p}si_1\mathfrak{p}si_2\mathfrak{p}si_1x_1^{c_1}x_2^{c_2}e(1,2,1)|0\leq c_1<l_1,\,0\leq c_2<l_2\} .
\end{equation}
As a result, we have that for any $a_1,a_2\in\mathbb{N}$, $$\mathfrak{b}egin{aligned}
& x_1^{a_1}x_2^{a_2}e(1,2,1)\in\mathfrak{t}ext{$K$-Span}\{x_1^{c_1}x_2^{c_2}e(1,2,1)|0\leq c_1<l_1, 0\leq c_2<l_2-a_{21}\},\\
&\mathfrak{p}si_{1}\mathfrak{p}si_{2}\mathfrak{p}si_{1}x_1^{a_1}x_2^{a_2}e(1,2,1)\in\mathfrak{t}ext{$K$-Span}\{\mathfrak{p}si_{1}\mathfrak{p}si_{2}\mathfrak{p}si_{1}x_1^{c_1}x_2^{c_2}e(1,2,1)|0\leq c_1<l_1, 0\leq c_2<l_2\} .
\end{aligned}
$$
Following \cite[(3.4)]{KK}, we define $$
\overline{Q}_{1,2,3}=\mathfrak{s}um_{\mu\in I^3,\mu_1=\mu_3}\frac{Q_{\mu_1,\mu_2}(x_1,x_2)-Q_{\mu_1,\mu_2}(x_3,x_2)}{x_1-x_3}e(\mu) .
$$
Applying \cite[(3.7)]{KK}, we can deduce that \mathfrak{b}egin{equation}\label{15a}
\mathfrak{p}si_{1}\mathfrak{p}si_{2}\mathfrak{p}si_{1}a^{\Lambdabda}_{1}(x_{3})e(1,2,1)-Q_{1,2}(x_{1},x_{2})s_{1}(\mathfrak{p}artial_{2}a_{1}(x_{2}))e(1,2,1)
=a^{\Lambdabda}_{1}(x_{1})\mathfrak{p}si_{1}\mathfrak{p}si_{2}\mathfrak{p}si_{1}e(1,2,1)=0,
\end{equation}
Note that the degree of $x_3$ in $a^{\Lambdabda}_{1}(x_{3})$ is $l_1$, while the degree of $x_3$ in $Q_{1,2}(x_{1},x_{2})s_{1}(\mathfrak{p}artial_{2}a_{1}(x_{2}))$ is $l_1-1$. Moreover, the coefficient of $x_3^{l_1}$ in $a^{\Lambdabda}_{1}(x_{3})$ is in $K^\mathfrak{t}imes$. Similarly, applying \cite[(3.7)]{KK} and the above definition, we can get that \mathfrak{b}egin{equation}\label{15b}
\mathfrak{p}si_{1}\mathfrak{p}si_{2}\mathfrak{p}si_{1}s_{1}(\mathfrak{p}artial_{2}a_{1}(x_{2}))e(1,2,1)+\overline{Q}_{1,2,3}s_{1}(\mathfrak{p}artial_{2}a_{1}(x_{2}))e(1,2,1)
=\mathfrak{p}si_{2}\mathfrak{p}si_{1}a^{\Lambdabda}_{1}(x_{1})\mathfrak{p}si_{1}\mathfrak{p}si_{2}e(1,2,1)=0.
\end{equation}
Note the degree of $x_3$ in $s_{1}(\mathfrak{p}artial_{2}a_{1}(x_{2}))$ is $l_1-1$, while the degree of $x_3$ in $\overline{Q}_{1,2,3}s_{1}(\mathfrak{p}artial_{2}a_{1}(x_{2}))$ is $l_1-a_{12}-2\mathfrak{g}eq l_1-1$. Moreover, the coefficient of $x_3^{l_1-1}$ in $s_{1}(\mathfrak{p}artial_{2}a_{1}(x_{2}))$ is in $K^\mathfrak{t}imes$, and the coefficient of $x_3^{l_1-a_{12}-2}$ in $\overline{Q}_{1,2,3}s_{1}(\mathfrak{p}artial_{2}a_{1}(x_{2}))$ is in $K^\mathfrak{t}imes$ too.
Using (\ref{15a}), (\ref{15b}) and the two displayed equalities in the last paragraph, we can deduce that the following result.
\mathfrak{b}egin{thm}\label{432} Suppose that $a_{1,2}\neq 0$ and $\mathfrak{b}eta=2\alpha_1+\alpha_2$. Then the following subset $$\mathfrak{b}egin{aligned}
&\{\mathfrak{p}si_{1}\mathfrak{p}si_{2}\mathfrak{p}si_{1}x_{1}^{k_{1}}x_{2}^{k_{2}}x_{3}^{k_{3}}|k_{1}<l_{1},\,\,k_{2}<l_{2},\,\,k_{3}<l_{1}\}\\
&\qquad \mathfrak{b}igcup
\{x_{1}^{k_{1}}x_{2}^{k_{2}}x_{3}^{k_{3}}|k_{1}<l_{1},\,\,k_{2}<l_{2}-a_{21},\,\,k_{3}<l_{1}-a_{12}-2\},\end{aligned}
$$
forms a $K$-basis of $e(1,2,1)\RR^\Lambda(\mathfrak{b}eta)e(1,2,1)$, where $l_1=\<\Lambda,h_1\>$, $l_2=\<\Lambda,h_2\>$.
\end{thm}
\mathfrak{b}egin{proof} By the discussion before the theorem, we see that the elements in the above subset are $K$-linear generators of $e(1,2,1)\RR^\Lambda(\mathfrak{b}eta)e(1,2,1)$. Using dimension formula Corollary \ref{maincor1}, we see this subset has the same cardinality as the dimension of $e(1,2,1)\RR^\Lambda(\mathfrak{b}eta)e(1,2,1)$. Thus it must form a $K$-basis of $e(1,2,1)\RR^\Lambda(\mathfrak{b}eta)e(1,2,1)$.
This completes the proof of the theorem.
\end{proof}
\mathfrak{b}egin{rem}
When $a_{12}=0$, the set in Theorem \ref{432} will not be a $K$-linear basis of $e(1,2,1)\RR^\Lambda(\mathfrak{b}eta)e(1,2,1)$. Actually, Lemma \ref{keylem3} tells us the following set is $K$-linearly dependent in $e(2,1,1)\RR^\Lambda(\mathfrak{b}eta)e(1,2,1)$ :$$\{\mathfrak{p}si_{2}\mathfrak{p}si_{1}x_{1}^{k_{1}}x_{2}^{k_{2}}x_{3}^{k_{3}}|k_{1}<l_{1},\,\,k_{2}<l_{2},\,\,k_{3}<l_{1}\}.
$$ Hence, $$\{\mathfrak{p}si_{1}\mathfrak{p}si_{2}\mathfrak{p}si_{1}x_{1}^{k_{1}}x_{2}^{k_{2}}x_{3}^{k_{3}}|k_{1}<l_{1},\,\,k_{2}<l_{2},\,\,k_{3}<l_{1}\}$$
is $K$-linearly dependent too.
\end{rem}
\mathfrak{s}ubsection{Some counter-examples on the graded freeness of $\RR^\Lambda(n)$ over its subalgebra $\RR^\Lambda(m)$ with $m<n$}
Let $\mathfrak{b}eta\in Q_n^+$ and $i\in I$ such that $e(\mathfrak{b}eta,i)\neq 0$. Kang and Kashiwara (\cite[Theorem 4.5]{KK}) have shown that $\RR^\Lambda(\mathfrak{b}eta+\alpha_i)e(\mathfrak{b}eta,i)$ is a projective right $\RR^\Lambda(\mathfrak{b}eta)$-module. It follows that
(\cite[Remark 4.20(ii)]{KK}) $\RR^\Lambda(n)$ is a projective $\RR^\Lambda(m)$-module when $n\mathfrak{g}eq m$, where $$\RR^\Lambda(n)={\text{op}}lus_{\mathfrak{b}eta\in Q_n^+}\RR^\Lambda(\mathfrak{b}eta).
$$
It is natural to ask whether $\RR^\Lambda(n)$ is a free $\RR^\Lambda(m)$-module. Moreover, when it is a free module, one can ask whether $\RR^\Lambda(n)$ has a homogeneous basis over the subalgebra $\RR^\Lambda(m)$.
In this subsection, we shall use our main results Theorem \ref{mainthmA} and Corollary \ref{maincor1} to give some examples to show that the answers to these questions are negative in general.
\mathfrak{b}egin{examp}
Let $A$ be of type $A^{(1)}_1$, i.e. $$A=\mathfrak{b}egin{pmatrix}2\,&-2\\
-2\,&2\end{pmatrix}.
$$
Assume $\Lambda=\Lambda_1+2\Lambda_2$. By the Brundan-Kleshchev's isomorphism \cite{BK:GradedKL} and the Ariki-Koike bases for the cyclotomic Hecke algebras \cite{AK}, it is easy to see that $\RR^\Lambda(2)$ is a free right $\RR^\Lambda(1)$-module. However, using Theorem \ref{mainthmA}, we can get that $$\mathfrak{b}egin{aligned}
\dim_q\,\RR^\Lambda(1)&=\dim_q\,\RR^\Lambda(\alpha_1)+\dim_q\,\RR^\Lambda(\alpha_2)\\
&=1+(1+q^2)=2+q^2 ,
\end{aligned}
$$
while $$\mathfrak{b}egin{aligned}
&\quad\,\dim_q\,\RR^\Lambda(2)\\
&=\dim_q\,\RR^\Lambda(2\alpha_1)+\dim_q\,\RR^\Lambda(2\alpha_2)+\dim_q\,e(1,2)\RR^\Lambda(\alpha_1+\alpha_2)e(1,2)\\
&\qquad +\dim_q\,e(1,2)\RR^\Lambda(\alpha_1+\alpha_2)e(2,1)+\dim_q\,e(2,1)\RR^\Lambda(\alpha_1+\alpha_2)e(1,2)\\
&\qquad\qquad +\dim_q\,e(2,1)\RR^\Lambda(\alpha_1+\alpha_2)e(2,1)\\
&=0+(q^{-2}+2+q^2)+(1+q^2+q^4+q^6)+2(q^2+q^4)+(1+2q^2+2q^4+q^6)\\
&=2q^6+5q^4+6q^2+4+q^{-2}.
\end{aligned}
$$
This implies that $\dim_q\,\RR^\Lambda(1)$ is not a factor of $\dim_q\,\RR^\Lambda(2)$. Thus, as a free right $\RR^\Lambda(1)$-module, $\RR^\Lambda(2)$ does not have a homogeneous basis.
\end{examp}
\mathfrak{b}egin{examp}
Let $A$ be of type $A_2$, i.e. $$A=\mathfrak{b}egin{pmatrix}2\,&-1\\
-1\,&2\end{pmatrix}.
$$
Assume $\Lambda=\Lambda_1+\Lambda_2$, $\mathfrak{b}eta=\alpha_1+\alpha_2$. Using Corollary \ref{maincor1}, we can get that $$\mathfrak{b}egin{aligned}
\dim\,\RR^\Lambda(\mathfrak{b}eta)&=\dim\,e(1\,2)\RR^\Lambda(\mathfrak{b}eta)e(1\,2)+\dim\,e(1\,2)\RR^\Lambda(\mathfrak{b}eta)e(2\,1)\\
&\qquad +\dim\,e(2\,1)\RR^\Lambda(\mathfrak{b}eta)e(1\,2)+\dim\,e(2\,1)\RR^\Lambda(\mathfrak{b}eta)e(2\,1)\\
&=2+1+1+2=6.
\end{aligned}
$$
Similarly, $$\mathfrak{b}egin{aligned}
&\quad\,\dim\,\RR^\Lambda(\mathfrak{b}eta+\alpha_1)e(\mathfrak{b}eta,1)\\
&=\dim\,\RR^\Lambda(\mathfrak{b}eta+\alpha_1)e(1,2,1)+\RR^\Lambda(\mathfrak{b}eta+\alpha_1)e(2,1,1)\\
&=\dim\,e(2,1,1)\RR^\Lambda(\mathfrak{b}eta+\alpha_1)e(1,2,1)+\dim\,e(1,2,1)\RR^\Lambda(\mathfrak{b}eta+\alpha_1)e(1,2,1)\\
&\qquad +\dim\,e(1,1,2)\RR^\Lambda(\mathfrak{b}eta+\alpha_1)e(1,2,1)+\dim\,e(2,1,1)\RR^\Lambda(\mathfrak{b}eta+\alpha_1)e(2,1,1)\\
&\qquad\qquad +\dim\,e(1,2,1)\RR^\Lambda(\mathfrak{b}eta+\alpha_1)e(2,1,1)+\dim\,e(1,1,2)\RR^\Lambda(\mathfrak{b}eta+\alpha_1)e(2,1,1)\\
&=2+1+0+4+2+0=9.
\end{aligned}
$$
Since $6\nmid 9$, it follows that $\RR^\Lambda(\mathfrak{b}eta+\alpha_1)e(\mathfrak{b}eta,1)$ is not a free right $\RR^\Lambda(\mathfrak{b}eta)$-module.
\end{examp}
\mathfrak{b}egin{examp}
Let $A$ be of type $A_3$, i.e. $$A=\mathfrak{b}egin{pmatrix}2\,&-1\,&0\\
-1\,&2\,&-1\\
0\,&-1\,&2\end{pmatrix}.
$$
Assume $\Lambda=3\Lambda_1+2\Lambda_2+2\Lambda_3$. Using Corollary \ref{maincor1}, we can get that $$\dim\,\RR^\Lambda(1)=3+2+2=7,
$$
and $$\mathfrak{b}egin{aligned}
\dim\,\RR^\Lambda(2)&=\dim\,\RR^\Lambda(2\alpha_1)+\dim\,\RR^\Lambda(2\alpha_2)+\dim\,\RR^\Lambda(2\alpha_3)\\
&\qquad +\dim\,\RR^\Lambda(\alpha_1+\alpha_2)+\dim\,\RR^\Lambda(\alpha_1+\alpha_3)+\dim\,\RR^\Lambda(\alpha_2+\alpha_3)\\
&=12+4+4+29+24+20=93.
\end{aligned}
$$
Again, we conclude that $\RR^\Lambda(2)$ is not a free $\RR^\Lambda(1)$-module.
\end{examp}
Let $\mathfrak{b}eta\in Q_n^+$. For each $i\in I$, there is a natural map $\mathfrak{g}amma_{\mathfrak{b}eta,i}: \RR^\Lambda(\mathfrak{b}eta)\rightarrow e(\mathfrak{b}eta,i)\RR^\Lambda(\mathfrak{b}eta+\alpha_i)e(\mathfrak{b}eta,i)$. We define $$
\mathfrak{g}amma_\mathfrak{b}eta={\text{op}}lus_{i\in I}\mathfrak{g}amma_{\mathfrak{b}eta,i}:\,\RR^\Lambda(\mathfrak{b}eta)\,\rightarrow\,{\text{op}}lus_{i\in I}e(\mathfrak{b}eta,i)\RR^\Lambda(\mathfrak{b}eta+\alpha_i)e(\mathfrak{b}eta,i),
$$
This map was studied in \cite{ZH} and was proved to be injective except in some special cases. It is natural to expect that ${\text{op}}lus_{i\in I}e(\mathfrak{b}eta,i)\RR^\Lambda(\mathfrak{b}eta+\alpha_i)e(\mathfrak{b}eta,i)$ is a free $\RR^\Lambda(\mathfrak{b}eta)$-module when $\mathfrak{g}amma_\mathfrak{b}eta$ is injective. The following example shows that this again fails in general.
\mathfrak{b}egin{examp}
Let $A$ be of type $A_2$, $\mathfrak{b}eta=\alpha_1+\alpha_2$ and $\Lambda=3\Lambda_1+2\Lambda_2$. Then $$
\Lambda-w_0\Lambda=5(\alpha_1+\alpha_2)\neq\mathfrak{b}eta .
$$
It follows from \cite[Theorem 3.7]{ZH} that $\mathfrak{g}amma_\mathfrak{b}eta$ is injective in this case. However, using Corollary \ref{maincor1}, we can get that $$\mathfrak{b}egin{aligned}
\dim\,\RR^\Lambda(\mathfrak{b}eta)&=\dim\,e(1,2)\RR^\Lambda(\mathfrak{b}eta)e(1,2)+\dim\,e(1,2)\RR^\Lambda(\mathfrak{b}eta)e(2,1)\\
&\qquad +\dim\,e(2,1)\RR^\Lambda(\mathfrak{b}eta)e(1,2)+\dim\,e(2,1)\RR^\Lambda(\mathfrak{b}eta)e(2,1)\\
&=9+6+6+8=29,
\end{aligned}
$$
and $$
\mathfrak{b}egin{aligned}
&\quad\,\dim\,e(\mathfrak{b}eta,1)\RR^\Lambda(\mathfrak{b}eta+\alpha_i) e(\mathfrak{b}eta,1)+\dim\,e(\mathfrak{b}eta,2)\RR^\Lambda(\mathfrak{b}eta+\alpha_i) e(\mathfrak{b}eta,2)\\
&=\dim\,e(1,2,1)\RR^\Lambda(\mathfrak{b}eta+\alpha_i) e(1,2,1)+\dim\,e(1,2,1)\RR^\Lambda(\mathfrak{b}eta+\alpha_i) e(2,1,1)\\
&\qquad +\dim\,e(2,1,1)\RR^\Lambda(\mathfrak{b}eta+\alpha_i) e(1,2,1)+\dim\,e(2,1,1)\RR^\Lambda(\mathfrak{b}eta+\alpha_i) e(2,1,1)\\
&\qquad\quad +\dim\,e(1,2,2)\RR^\Lambda(\mathfrak{b}eta+\alpha_i) e(1,2,2)+\dim\,e(1,2,2)\RR^\Lambda(\mathfrak{b}eta+\alpha_i) e(2,1,2)\\
&\qquad\qquad +\dim\,e(2,1,2)\RR^\Lambda(\mathfrak{b}eta+\alpha_i) e(1,2,2)+\dim\,e(2,1,2)\RR^\Lambda(\mathfrak{b}eta+\alpha_i) e(2,1,2)\\
&=36+36+36+48+36+24+24+20=260.
\end{aligned}
$$
Note that $29\nmid 260$. It follows that ${\text{op}}lus_{i\in I}e(\mathfrak{b}eta,i)\RR^\Lambda(\mathfrak{b}eta+\alpha_i)e(\mathfrak{b}eta,i)$ is not a free right $\RR^\Lambda(\mathfrak{b}eta)$-module.
\end{examp}
The above examples imply that in general one can not construct a basis of the cyclotomic quiver Hecke algebra $\RR^\Lambda(\mathfrak{b}eta)$ inductively via the injection $\mathfrak{g}amma_\mathfrak{b}eta$.
\mathfrak{b}igskip
\mathfrak{b}igskip
\mathfrak{b}egin{thebibliography}{2}
\mathfrak{b}ibitem{Ariki:can}
{\mathfrak{s}c S.~Ariki}, {\em On the decomposition numbers of the {Hecke} algebra of {$G(m,1,n)$}}, J. Math. Kyoto Univ., {\mathfrak{b}f 36} (1996), 789--808.
\mathfrak{b}ibitem{AK} {\mathfrak{s}c S.~Ariki and K.~Koike}, {\em A Hecke algebra of $(\mathbb{Z}/r\mathbb{Z})\wr\mathfrak{S}_n$ and construction of its representations}, Adv. Math., {\mathfrak{b}f 106} (1994), 216--243.
\mathfrak{b}ibitem{AP14} {\mathfrak{s}c S.~Ariki and E.~Park}, {\em Representation type of finite quiver Hecke algebras of type $A^{(2)}_{2\ell}$}, J. Algebra, {\mathfrak{b}f 397} (2014), 457--488.
\mathfrak{b}ibitem{AP16c} \leavevmode\mathfrak{v}rule height 2pt depth -1.6pt width 23pt, {\em Representation type of finite quiver Hecke algebras of type $C_{\ell}^{(1)}$}, Osaka J. Math., {\mathfrak{b}f 53}(2) (2016), 463--488.
\mathfrak{b}ibitem{APS} {\mathfrak{s}c S.~Ariki, E.~Park and L.~Speyer}, {\em Specht modules for quiver Hecke algebras of type $C$}, Publ. Res. Inst. Math. Sci., {\mathfrak{b}f 55}(3) (2019), 565--626.
\mathfrak{b}ibitem{BKOP} {\mathfrak{s}c G.~Benkart, S.~Kang, S.~Oh, E.~Park}, {\em Construction of irreducible representations over Khovanov-Lauda-Rouquier algebras of finite classical type}, Int. Math. Res. Not., {\mathfrak{b}f 2014}(5) (2014) 1312--1366.
\mathfrak{b}ibitem{BK:GradedKL}
{\mathfrak{s}c J.~Brundan and A.~Kleshchev}, {\em Blocks of cyclotomic {H}ecke algebras and {K}hovanov-{L}auda algebras}, Invent. Math., {\mathfrak{b}f 178}
(2009), 451--484.
\mathfrak{b}ibitem{BKgraded}
\leavevmode\mathfrak{v}rule height 2pt depth -1.6pt width 23pt, {\em Graded decomposition numbers for cyclotomic Hecke algebras}, Adv. Math., {\mathfrak{b}f 222} (2009), 1883--1942.
\mathfrak{b}ibitem{BKM}
{\mathfrak{s}c J.~Brundan, A.~Kleshchev and P.~McNamara}, {\em Homological properties of finite-type Khovanov-Lauda-Rouquier algebras}, Duke Math. J., {\mathfrak{b}f 163}(7) (2014), 1353--1404.
\mathfrak{b}ibitem{BKW}
{\mathfrak{s}c J.~Brundan, A.~Kleshchev and W.~Wang}, {\em Graded Specht modules},, J. reine angew. Math., {\mathfrak{b}f 655} (2011),
61--87.
\mathfrak{b}ibitem{Cr}
{\mathfrak{s}c L.~Crane}, {\em Clock and category: Is quantum gravity algebraic?} J. Math. Phys., {\mathfrak{b}f 36} (1995),
6180--6193.
\mathfrak{b}ibitem{CF}
{\mathfrak{s}c L.~Crane and I.B.~Frenkel}, {\em Four-dimensional topological quantum field theory, Hopf
categories, and the canonical bases}, J. Math. Phys., {\mathfrak{b}f 35} (1994), 5136--5154.
\mathfrak{b}ibitem{DJ1} {\mathfrak{s}c R.~Dipper and G.D.~James}, {\em Representations of Hecke algebras of general linear groups}, Proc. London Math. Soc., {\mathfrak{b}f 52}(3) (1986), 20--52.
\mathfrak{b}ibitem{G}
{\mathfrak{s}c I.~Grojnowski}, {\em Affine $\widehat{i}dehat{\mathfrak{sl}}_p$ controls the modular representation theory of the symmetric group and related Hecke algebras}, preprint, math.RT/9907129, 1999.
\mathfrak{b}ibitem{HL}
{\mathfrak{s}c A.E.~Hoffnung and A.D.~Lauda}, {\em Nilpotency in type $A$ cyclotomic quotients}, J. Algebraic Combin., {\mathfrak{b}f 32}(4) (2010), 533--555.
\mathfrak{b}ibitem{HM}
{\mathfrak{s}c J.~Hu and A.~Mathas}, {\em Graded cellular bases for the cyclotomic Khovanov-Lauda-Rouquier algebras of type $A$}, Adv. Math., {\mathfrak{b}f 225}(2) (2010), 598--642.
\mathfrak{b}ibitem{HuL}
{\mathfrak{s}c J.~Hu and X.f.~Liang}, {\em On the structure of cyclotomic nilHecke algebras}, Pac. J. Math., {\mathfrak{b}f 296}(1) (2018), 105--139.
\mathfrak{b}ibitem{Kac}
{\mathfrak{s}c V.G. Kac},
{{\em Infinite dimensional Lie algebras}, 3rd ed., Cambridge University Press, Cambridge, 1990.}
\mathfrak{b}ibitem{KK}
{\mathfrak{s}c S.~J. Kang and M.~Kashiwara}, {\em Categorification of highest weight modules via Khovanov-Lauda-Rouquier algebras}, Invent. Math., {\mathfrak{b}f 190} (2012), 699--742.
\mathfrak{b}ibitem{Kh00}
{\mathfrak{s}c M.~Khovanov}, {\em A categorification of the Jones polynomial}, Duke Math. J., {\mathfrak{b}f 101} (2000),
359--426.
\mathfrak{b}ibitem{KL1}
{\mathfrak{s}c M.~Khovanov and A.D.~Lauda}, {\em A diagrammatic approach to categorification of quantum groups, I}, Represent. Theory, {\mathfrak{b}f 13} (2009), 309--347.
\mathfrak{b}ibitem{KL2}
\leavevmode\mathfrak{v}rule height 2pt depth -1.6pt width 23pt, {\em A diagrammatic approach to categorification of quantum groups, II}, Trans. Amer. Math. Soc., {\mathfrak{b}f 363} (2011), 2685--2700.
\mathfrak{b}ibitem{Klesh:book}
{\mathfrak{s}c A.~S. Kleshchev}, {\em Linear and projective representations of symmetric
groups}, CUP, 2005.
\mathfrak{b}ibitem{K14}
\leavevmode\mathfrak{v}rule height 2pt depth -1.6pt width 23pt, {\em Cuspidal systems for affine Khovanov-Lauda-Rouquier algebras}, Math. Z., {\mathfrak{b}f 276} (2014), 691--726.
\mathfrak{b}ibitem{KLo}
{\mathfrak{s}c A.~S. Kleshchev and J.~Loubert}, {\em Affine cellularity of Khovanov-Lauda-Rouquier algebras of finite types}, Int. Math. Res. Not., {\mathfrak{b}f 2015}(14) (2015), 5659--5709.
\mathfrak{b}ibitem{KR10}
{\mathfrak{s}c A.~S. Kleshchev, A.~Ram}, {\em Homogeneous representations of Khovanov-Lauda algebras}, J. Eur. Math. Soc., {\mathfrak{b}f 12}(5) (2010), 1293--1306.
\mathfrak{b}ibitem{KR11}
\leavevmode\mathfrak{v}rule height 2pt depth -1.6pt width 23pt, {\em Representations of Khovanov-Lauda-Rouquier algebras and combinatorics of Lyndon words}, Math. Ann., {\mathfrak{b}f 349}(4) (2011), 943--975.
\mathfrak{b}ibitem{Lu}
{\mathfrak{s}c G.~Lusztig}, {\em Introduction to Quantum groups}, Birkh\"auser, 1994.
\mathfrak{b}ibitem{MT1}
{\mathfrak{s}c Andrew Mathas and Daniel Tubbenhauer}, {\em Subdivision and cellularity for weighted KLRW algebras}, preprint, math.RT/2111.12949, 2022.
\mathfrak{b}ibitem{MT2}
{\mathfrak{s}c Andrew Mathas and Daniel Tubbenhauer}, {\em Cellularity for weighted KLRW algebras of types B, A(2), D(2)}, preprint, math.RT/2201.01998, 2022.
\mathfrak{b}ibitem{OP}
{\mathfrak{s}c Se-jin Oh and E.~Park}, {\em Young walls and graded dimension formulas for finite quiver Hecke algebras of type $A_{2\ell}^{(2)}$ and $D_{\ell+1}^{(2)}$}, J.
Algebr. Comb.,.a, {\mathfrak{b}f 40} (2014), 1077--1102.
\mathfrak{b}ibitem{P}
{\mathfrak{s}c E.~Park}, {\em Cyclotomic quiver Hecke algebras corresponding to minuscule representations}, J. Korean Math. Soc., {\mathfrak{b}f 57}(6) (2020), 1373--1388.
\mathfrak{b}ibitem{Rou1}
{\mathfrak{s}c R.~Rouquier}, {\em $2$-Kac--Moody algebras}, preprint, math.RT/0812.5023v1, 2008.
\mathfrak{b}ibitem{Rou2}
\leavevmode\mathfrak{v}rule height 2pt depth -1.6pt width 23pt, {\em Quiver Hecke algebras and 2-Lie algebras}, Algebr. Colloq. {\mathfrak{b}f 19} (2012), 359--410.
\mathfrak{b}ibitem{VV}
{\mathfrak{s}c M.~Varagnolo and E.~Vasserot}, {\em Canonical bases and KLR algebras}, J. reine angew. Math., {\mathfrak{b}f 659} (2011), 67--100.
\mathfrak{b}ibitem{ZH}
{\mathfrak{s}c K.~Zhou and J.~Hu}, {\em On some embeddings between the cyclotomic quiver Hecke algebras}, Proc. Amer. Math. Soc., {\mathfrak{b}f 148} (2020), 495--511.
\end{thebibliography}
\end{document} |
\begin{document}
\begin{abstract}
Let $X$ be a topological Hausdorff space together with a continuous action of a finite group $G$. Let $R$
be the ring of integers of a number field~$F$. Let $\calE$ be a $G$-sheaf of flat $R$-modules over $X$ and let
$\Phi$ be a $G$-stable paracompactifying family of supports on $X$. We show that under some natural cohomological
finiteness conditions the Lefschetz number of the action of $g \in G$ on the cohomology
$ \com{H}_\Phi(X,\calE) \otimes_{R} F $ equals the Lefschetz number of the $g$-action on
$ \com{H}_{\Phi|X^G}(X^g, \calE_{|X^g}) \otimes_{R} F $, where $X^g$ is the set of fixed points of $g$
in $X$. More generally, the class $\sum_j (-1)^j [H^j_\Phi (X,\calE) \otimes_R F]$ in the character group
equals a sum $\sum_{[H]} \sum_{\lambda \in \widehat{H}_F} m_\lambda [\ind^G_H (V_\lambda)] $ of representations induced from
irreducible $F$-rational representations $\: V_\lambda \:$ of $\: H \:,$ where $[H]$ runs in the set of
$G$-conjugacy classes of subgroups of $G$. The integral coefficients $m_\lambda$ are explicitly
determined.
\end{abstract}
\maketitle
\section{Introduction and main results}
The most elementary classical version of the Lefschetz fixed point formula says that the Lefschetz number
$\calL(g)$ of a simplicial automorphism $g$ of finite order on a finite simplicial complex $X$ equals the Euler-Poincar\'e
characteristic of the fixed point set $X^g \subset X $ of the $g$-action. Here $\calL(g)$
is computed on $ H^\ast (X,\bbQ)$. Brown \cite{Brown1982} (based on Zarelua \cite{Zarelua1969}) and independently Verdier \cite{Verdier1973}
have extended this formula to more general spaces under the assumption of cohomological finiteness conditions.
Verdier uses cohomology with compact supports.
The objective of this paper is to generalize this Lefschetz fixed point formula to Hausdorff spaces with a
continuous action of a finite group $G$ and to cohomology of $G$-sheaves with a paracompactifying family of supports.
For applications of the Lefschetz fixed point formula
to cohomology of arithmetic groups see e.g.~\cite{Rohlfs1990}.
\subsection{Notation}
Throughout $F$ denotes an algebraic number field and $R$ denotes its ring of integers.
Let $G$ be a finite group, then $\Gzero(F[G])$ denotes the Grothendieck group of finitely generated $F[G]$-modules.
For every subgroup $H$ of $G$ there is the induction homomorphism $\ind_H^G: \Gzero(F[H]) \to \Gzero(F[G])$
which maps $[M]$ to $[F[G]\otimes_{F[H]}M]$.
For $ g \in G $ the trace of the $g$-action induces a morphism
$\tr(g): \Gzero(F[G]) \longrightarrow R$.
Let $Y$ be a Hausdorff space and let $\Phi$ be a family of supports on $Y$.
Let $k$ be a ring and $\calE$ be a sheaf of left $k$-modules on $Y$. If the cohomology $\com{H}_\Phi(Y,\calE)$
is finitely generated as $k$-module, then we say that the triple $(Y,\Phi,\calE)$ is of \emph{finite type} with respect to $k$.
Given a sheaf $\calE$ of $R$-modules on $Y$, we write
$\chi_\Phi(Y,\calE; F)$ for the Euler-Poincar\'e characteristic of the graded $F$-vectorspace
$\com{H}_\Phi(Y,\calE)\otimes_R F$ whenever it is \mbox{finite} dimensional.
Similarly, if a finite group $G$ acts continuously on $Y$ and $\calE$ is $G$-equivariant, then we denote the Euler-Poincar\'e characteristic
of the graded $F[G]$-module $\com{H}_\Phi(Y,\calE)\otimes_R F$ in the Grothendieck
group $\Gzero(F[G])$ by $\chi_\Phi(Y,\calE; F[G])$.
The image of $\chi_\Phi (Y, \calE; F[G]) $ under the morphism $\tr(g)$ is the
\emph{Lefschetz number} $ \calL_\Phi (g, \calE; F) = \sum^\infty_{j = 0} (-1)^j \tr (g| H^j _\Phi (Y ,\calE) \otimes_R F) \:.$
\subsection{Statement of results}
Denote by $X$ a topological Hausdorff space together with a continuous action of a finite group $G$.
We fix a paracompactifying $G$-stable family of supports $\Phi$ on $X$.
For a subgroup $H \leq G$ we denote the normalizer of $H$ in $G$ by $N_G(H)$ or $N(H)$.
We write $X^H$ for the set of points in $X$ which are fixed by $H$ and we write $X_H$ for
the set of points in $X$ whose stabilizer is exactly the group $H$. Note that $X^H$ is closed in $X$ and $X_H$ is open in $X^H$.
Let $\cla(G)$ be the set of conjugacy classes of subgroups of $G$. The paracompactifying
family $\Phi$ induces paracompactifying families on the locally closed subspaces
$ X_H, X^H, X_C := \bigcup_{H \in C} X_H $ for $C \in \cla(G)$ and on the
quotient space $ X_H/ N(H) $. For simplicity these families will be
denoted also by $\Phi$.
By $ \widehat{H}_F$ we denote the set of equivalence classes of
irreducible representations of $H$ on finite dimensional $F$-vectorspaces. If $\lambda \in \widehat{H}_F$,
we write $V_\lambda$ for a representative of $\lambda$. We define
$\deg V_\lambda := \dim_F \bigl(\Hom_{F[H]} (V_\lambda, V_\lambda )\bigr)$.
Let $\calE$ be a $G$-sheaf of $R$-modules on $X$.
We say that $\calE$ satisfies the finiteness condition \cF if
for every subgroup $H$ of $G$ the following hold:
\begin{enumerate}
\item The triple $(X_H,\Phi, \calE_{|X_H}) $ is of finite type w.r.t.~$R$, and
\item for any $\lambda \in \widehat{N(H)}_F$
there is an $N(H)$-invariant lattice $L_\lambda$ in $V_\lambda$ such that the triple
$(X_H,\Phi,\Hom_{R[H]}\bigl(L_\lambda, \calE_{|X_H} \bigr))$ is of finite type w.r.t.~$R$.
\end{enumerate}
We comment on this condition in section \ref{sec:Comments}.
\begin{theorem} Let $X$, $G$, $\Phi$ be as above and assume that the cohomological
$\Phi$-dimension of $X$ is finite. Let $ \calE$ be a $G$-sheaf of flat $R$-modules such that
condition \cF holds. Then
\begin{align*}
\chi_\Phi\bigl(X,\calE; F[G]\bigr) &=
\sum_{[H] \in \cla(G)} \frac{|H|}{|N(H)|} \ind_H^G\Bigl(\chi_{\Phi}\bigl(X_H, \calE_{|X_H}; F[H]\bigr)\Bigr)\\
& =
\sum_{[H] \in \cla(G)} \sum_{\lambda \in \widehat{H}_F}\frac{|H|\cdot e(\lambda)}{|N(H)| \cdot \deg(V_\lambda)} \ind_H^G\bigl([V_\lambda\bigr]).
\end{align*}
where $e(\lambda)$ denotes the Euler characteristic
$\chi_{\Phi}\bigl(X_H, \Hom_{R[H]}(M_\lambda, \calE_{|X_H}); F\bigr)$ for any
$H$-stable $R$-lattice $M_\lambda \subset V_\lambda$.
\end{theorem}
A proof of the theorem will be given in the next section.
\begin{corollary}\label{cor:letg}
Let $G$ be the finite cyclic group generated by an element $g$. Under the assumptions of the theorem we obtain
an equality of Lefschetz numbers
\begin{equation*}
\calL_\Phi\bigl(g,\calE; F\bigr) = \calL_{\Phi}\bigl(g, \calE_{|X^G}; F \bigr).
\end{equation*}
\end{corollary}
\begin{proof}
Use that for
$ G = \langle g \rangle $ we have $ \tr (g | \ind^G_H V) = 0 \:$ for all finite dimensional $ F[H]$-modules $V$ if
$ H \neq G $ and that $ X_G = X^G $.
\end{proof}
\section{Proof of the Theorem}
This section is devoted to the proof of the theorem.
We begin with the following general Lemma.
\begin{lemma}\label{lem:EulerCharModp}
Let $p$ be a prime number.
Let $\calE$ be a sheaf of abelian groups on $X$ and assume that the stalks of $\calE$ have no $p$-torsion.
If the triple $(X,\Phi,\calE)$ is of finite type (w.r.t.~$\bbZ$) then the triple
$(X,\Phi,\calE\otimes_\bbZ \bbF_p)$ is of finite type w.r.t.~$\bbF_p$.
Moreover we have
\begin{equation*}
\chi_\Phi(X,\calE; \bbQ) = \chi_\Phi(X,\calE\otimes_\bbZ \bbF_p; \bbF_p).
\end{equation*}
Here $\bbF_p$ denotes the finite field with $p$ elements.
\end{lemma}
\begin{proof}
Since $\calE$ is torsion-free, there is a short exact sequence of sheaves on $X$:
\begin{equation*}
0 \longrightarrow \calE \stackrel{p}{\longrightarrow} \calE \longrightarrow \calE\otimes_\bbZ \bbF_p \longrightarrow 0.
\end{equation*}
Consider the associated long exact sequence. We deduce that $(X,\Phi,\calE\otimes_\bbZ \bbF_p)$ is of finite type.
Further, write $H^i_\Phi(X,\calE) \cong \bbZ^{b_i} \oplus P^i \oplus T^i$, where $P^i$ is the subgroup of elements whose order is a power of $p$
and $T^i$ is the subgroup of elements of finite order prime to $p$.
Let $P^i_p$ denote the elements of order exactly $p$ and let $r_i = \dim_{\bbF_p} P^i_p$.
From the long exact sequence we obtain short exact sequences
\begin{equation*}
0 \longrightarrow H^i_\Phi(X,\calE)\otimes_\bbZ\bbF_p \longrightarrow H^i_\Phi(X,\calE\otimes\bbF_p)\longrightarrow P^{i+1}_p \longrightarrow 0
\end{equation*}
for every degree $i$. Thus $\dim_{\bbF_p} H^i_\Phi(X,\calE\otimes\bbF_p) = b_i + r_i + r_{i+1}$ and the second assertion follows
via alternating summation.
\end{proof}
Let $\pi: X \to X/G$ be the canonical projection.
Note that for a sheaf of abelian groups $\calE$ on $X$ there is a canonical isomorphism
\begin{equation*}
\com{H}_\Phi(X,\calE) \isomorph \com{H}_\Phi(X/G,\pi_*(\calE)).
\end{equation*}
In general, if $\calF$ is a sheaf of $R[G]$-modules on a space $Y$, then we write $\calF^G$ for the subsheaf of $G$-stable sections,
i.e.~$\calF^G(U) = \calF(U)^G$.
Let $\calE$ be a $G$-sheaf of $R$-modules on $X$. We write $\pi_*^G(\calE)$ for $\pi_*(\calE)^G$.
Note that the
triple $(X,\Phi, \calE)$ is of finite type w.r.t.~$R$ if and only if it is of finite type w.r.t.~$\bbZ$.
In this case we simply say that $(X,\Phi, \calE)$ is of finite type.
\begin{lemma}\label{lem:EulerCharCovering}
Suppose that $G$ is abelian and acts freely on $X$. Let $\calE$ be a flat $G$-sheaf of $R$-modules on $X$ such that
$(X,\Phi, \calE)$ is of finite type. In this case $(X/G, \Phi, \pi_*^G(\calE))$ is of finite type and
\begin{equation*}
\chi_\Phi(X,\calE; F) = |G| \chi_\Phi(X/G, \pi_*^G(\calE); F).
\end{equation*}
\end{lemma}
\begin{proof}
First note that $X/G$ has finite $\Phi$-dimension, since cohomological dimension
is a local property (cf.~ II.~4.14.1 in \cite{Godement1958}) and $\pi$ is a covering map.
Further, the triple $(X/G, \Phi , \pi_*^G(\calE))$ is of finite type due to the Grothendieck spectral sequence
\begin{equation*}
H^p(G, H^q_\Phi(X,\calE)) \implies H^{p+q}_\Phi(X/G,\pi_*^G(\calE))
\end{equation*}
which can be obtained for paracompactifying supports just as in \cite[Thm.~5.2.1]{Grothendieck1957}.
Now we prove the assertion about the Euler characteristic.
It is easy to check that $[F:\bbQ] \chi_\Phi(X, \calE; F) = \chi_\Phi(X, \calE; \bbQ)$, hence we can assume $R = \bbZ$.
By induction on the group structure we can assume that $G$ is finite cyclic of prime order $p$.
The assertion follows from Lemma \ref{lem:EulerCharModp} and a Theorem of E.~E.~Floyd (based on a result of P. A. Smith),
see \cite[Thm.~19.7]{Bredon1997} or \cite[Thm.~4.2]{Floyd1952}. Here we use that $\pi_*^G(\calE)\otimes_\bbZ \bbF_p = \pi_*^G(\calE\otimes_\bbZ \bbF_p)$.
Note that we assumed the $\Phi$-dimension of $X$ to be finite, which
implies in particular that $\dim_{\Phi,\bbF_p} X$ is finite in the notation of \cite{Bredon1997}.
Further the pull-back sheaf $\pi^* (\pi_*^G(\calE))$ is isomorphic to $\calE$ (see~\cite[p.~199]{Grothendieck1957}).
\end{proof}
\begin{lemma}
Let $G$ be a finite group which acts freely on $X$ and let $\calE$ be flat a $G$-sheaf of $R$-modules on $X$.
We assume that $(X,\Phi, \calE)$ is of finite type. For any $g \in G$ with $g \neq 1$ the Lefschetz number vanishes.
\end{lemma}
\begin{proof}
By taking a finite extension we can assume without loss of generality that $R$ contains all $|G|$-th roots of unity.
Further we can assume that $G$ is a finite cyclic group. Let $\psi: G \to R^\times$ be a character of $G$.
We can twist the $G$-sheaf $\calE$ with the character $\psi^{-1}$ to obtain a new $G$-sheaf $\calE\otimes \psi^{-1}$.
This sheaf is isomorphic to $\calE$ as a sheaf of $R$-modules, but not as $G$-sheaf.
Further we find that $\pi_*^G(\calE\otimes\psi^{-1})$ is the $\psi$-eigensheaf $\pi_*(\calE)_\psi$ in the
sheaf $\pi_*(\calE)$ of $R[G]$-modules, this means $\pi_*(\calE)_\psi$ is the subsheaf of
sections of $\pi_*(\calE)$ which transform with $\psi$ under the action of~$G$. From Lemma \ref{lem:EulerCharCovering}
we deduce that all the eigensheaves $\pi_*(\calE)_\psi$ have equal Euler characteristic.
However, since
\begin{equation*}
\calL_\Phi(g,\calE; F) = \sum_{\psi \in \widehat{G}} \psi(g) \chi_\Phi(X/G, \pi_*(\calE)_\psi; F)
\end{equation*}
the claim follows.
\end{proof}
We shall frequently use the following Lemma.
\begin{lemma}\label{lem:FixInside}
Let $\calE$ be a sheaf of $R[G]$-modules on a space $X$ and let $\Phi$ be a system of supports on $X$.
The inclusion $\calE^G \to \calE$ induces an isomorphism of vectorspaces
\begin{equation*}
\com{H}_\Phi(X,\calE^G) \otimes_R F \isomorph \com{H}_\Phi(X,\calE)^G \otimes_R F.
\end{equation*}
\end{lemma}
\begin{proof}
Consider the functor $B: \calE \mapsto \calE^G$ from the category $\Sh_X(R[G])$ of sheaves of $R[G]$-modules
to the category $\Sh_X(R)$ of sheaves of $R$-modules. This functor is left exact and we consider its right derived functor
\begin{equation*}
\RR{B}: \Der^+(\Sh_X(R[G])) \to \Der^+(\Sh_X(R)).
\end{equation*}
Note that $B$ takes injective sheaves of $R[G]$-modules to
flabby sheaves (see Corollary to Prop.~5.1.3 in \cite{Grothendieck1957}).
As in Thm.~5.2.1 in \cite{Grothendieck1957} there is a convergent spectral sequence
\begin{equation*}
H^p_\Phi(X,\RR{B}^q(\calE))\otimes_R F \implies H^{p+q}_\Phi(X,\calE)^G\otimes_R F,
\end{equation*}
where we use that $F$ is a flat $R$-module.
In fact, the stalk at $x \in X$ of $\RR{B}^q(\calE)$ is the group cohomology $H^q(G,\calE_x)$ which is purely $|G|$-torsion
for all $q \geq 1$. Hence the spectral sequence collapses and the claim follows.
\end{proof}
We obtain a refined version of Verdier's Lemma (cf.~\cite{Verdier1973}).
\begin{lemma}\label{lem:VerdierLemma}
Let $G$ be a finite group which acts freely on $X$ and let $\calE$ be a flat $G$-sheaf of $R$-modules on $X$.
We assume that $(X,\Phi, \calE)$ is of finite type.
In this case we have
\begin{equation*}
\chi_\Phi(X, \calE; F[G]) = \chi_\Phi(X/G,\pi^G_*(\calE); F) \cdot F[G].
\end{equation*}
In particular, Lemma \ref{lem:EulerCharCovering} holds without the assumption that $G$ is abelian.
\end{lemma}
\begin{proof}
With the same argument as in Lemma \ref{lem:EulerCharCovering} we see that $(X/G, \Phi, \pi^G_*(\calE))$ is of finite type.
It suffices to compute the Lefschetz numbers of all elements of $G$ and compare them with the right hand side.
The vanishing of all Lefschetz numbers for $g \neq 1$ shows that $ \chi_\Phi(X, \calE; F[G])$ is a multiple of the regular representation.
The coefficient is the Euler characteristic of the graded $F$-vectorspace $\com{H}_\Phi(X,\calE)^G\otimes_{R} F$.
Since $\com{H}_\Phi(X,\calE) \cong \com{H}_\Phi(X/G,\pi_*(\calE))$, we can use Lemma \ref{lem:FixInside} to deduce the claim.
\end{proof}
\begin{remark}
Verdier uses the projection formula, the finite tor-amplitude criterion and a famous theorem of Swan
to obtain this lemma for cohomology with compact supports and constant coefficients. It is possible to extend his approach to the case
of families of supports
using a suitable replacement for the projection formula, see~\cite{Kionke2012}.
\end{remark}
Finally we prove the main theorem. Recall that $X$ is a Hausdorff space with an action of a finite group $G$ and $\Phi$ is a $G$-invariant
paracompactifying system of supports.
\begin{proof}[Proof of the Theorem]
Note that for every subgroup $H$ of $G$ the space $Y = X^H$ (resp.~$Y =X_H$) has
finite $\Phi$-dimension since $Y$ is (locally) closed and $\Phi$ is paracompactifying (cf.~II.~Rem.~4.14.1 in \cite{Godement1958}).
By condition \cFone the triple
$(X_H,\Phi, \calE_{|X_H})$ is of finite type for every $H \leq G$.
For $i=1, \dots, |G|$ we define the closed set
\begin{equation*}
X^i = \bigcup_{\substack{H \leq G \\ |H|\geq i}} X^H.
\end{equation*}
Then $X^1 = X$ and $X^{|G|} = X^G$ is the set of fixed points.
An element $x\in X$ is in $X^i\setminus X^{i+1}$ exactly if it has an isotropy group with
$i$ elements, hence
\begin{equation*}
X^i\setminus X^{i+1} = \bigcup_{\substack{H \leq G \\ |H|=i}} X_H =: \bigcup_{\substack{C \in \cla(G)_i}} X_C
\end{equation*}
where $\cla(G)_i$ is the set of conjugacy classes of subgroups with $i$ elements.
Note that these unions are topologically disjoint. Using the long exact sequences of the pairs $(X^i,X^{i+1})$
with supports in $\Phi$ (cf.~II.~4.10.1 in \cite{Godement1958}) we obtain
\begin{equation*}
\chi_\Phi\bigl(X,\calE; F[G]\bigr) = \sum_{C \in \cla(G)} \chi_{\Phi}\bigl(X_C, \calE_{|X_C}; F[G]\bigr).
\end{equation*}
Since $X_C$ is the disjoint union $\bigcup_{H \in C} X_H$ and $G$ acts transitively on the components
we obtain
\begin{equation*}
\chi_{\Phi}\bigl(X_C, \calE_{|X_C}; F[G]\bigr) = \ind^G_{N_G(H)}\chi_{\Phi}\bigl(X_H, \calE_{|X_H}; F[N_G(H)]\bigr)
\end{equation*}
for any representative $H \in C$.
We are now in the specific situation where $H$ acts trivially on $X_H$ and $N_G(H)/H$ acts freely on $X_H$.
For simplicity we write $N$ for the normalizer $N_G(H)$.
We prove the following identity
\begin{equation*}
\chi_{\Phi}\bigl(X_H, \calE_{|X_H}; F[N]\bigr) = \frac{|H|}{|N|} \ind_H^N\Bigl( \chi_\Phi(X_H, \calE_{|X_H}; F[H]) \Bigr) .
\end{equation*}
By Frobenius reciprocity a finite dimensional $F[N]$-module $V$ is induced from the $F[H]$-module $W$
if and only if
\begin{equation*}
\dim_F \Hom_{F[N]}(V_\lambda, V) = \dim_F \Hom_{F[H]}\bigl((V_\lambda)_{|H}, W\bigr)
\end{equation*}
for all $\lambda \in \widehat{N}_F$. We use this principle in the Euler characteristic.
For $\lambda \in \widehat{N}_F$ we choose some lattice $L_\lambda \subset V_\lambda$ as in condition \cFtwo.
We obtain the $N$-sheaf $\Hom_R(L_\lambda, \calE_{|X_H})$ and the $N/H$-sheaf $\Hom_{R[H]}(L_\lambda, \calE_{|X_H})$.
For simplicity denote the canonical map $X_H \to X_H/N$ by $\pi$ as well.
Now we obtain
\begin{align*}
\chi\Bigl( \Hom_{F[N]}\bigl(V_\lambda, \com{H}_\Phi(X_H, &\calE_{|X_H})\otimes_R F\bigr) \Bigr) \\
&= \chi\Bigl( \Hom_{F[N]}\bigl(V_\lambda, \com{H}_\Phi(X_H/N,\pi_*(\calE_{|X_H}))\otimes_R F\bigr) \Bigr) \\
&= \chi\Bigl( \com{H}_\Phi(X_H/N,\Hom_R\bigl(L_\lambda, \pi_*(\calE_{|X_H})\bigr))^N\otimes_R F \Bigr) \\
(\text{by Lemma \ref{lem:FixInside}})\quad &= \chi\Bigl( \com{H}_\Phi(X_H/N,\pi^N_*\Hom_R\bigl(L_\lambda, \calE_{|X_H} \bigr))\otimes_R F) \Bigr)\\
&= \chi\Bigl( \com{H}_\Phi(X_H/N,\pi^{N/H}_*\Hom_{R[H]}\bigl(L_\lambda, \calE_{|X_H} \bigr))\otimes_R F) \Bigr)\\
(\text{by Lemma \ref{lem:VerdierLemma} and \cFtwo}) \quad &= \frac{|H|}{|N|}\chi\Bigl( \com{H}_\Phi(X_H,\Hom_{R[H]}\bigl(L_\lambda, \calE_{|X_H} \bigr))\otimes_R F) \Bigr)\\
(\text{by Lemma \ref{lem:FixInside}}) \quad &= \frac{|H|}{|N|}\chi\Bigl( \Hom_{F[H]}\bigl((V_\lambda)_{|H},\com{H}_\Phi(X_H, \calE_{|X_H} )\otimes_R F\bigr) \Bigr).
\end{align*}
This proves the first equality in the theorem.
Next we decompose the $F[H]$-module $\com{H}_\Phi (X_H, \calE_{|X_H} ) \otimes_R F$
into isotypical components. Let $V_\lambda$, with $\lambda \in \widehat{H}_F $, be an irreducible module and choose
some $R[H]$ stable lattice $ M_\lambda \subset V_\lambda $.
Put $ D_\lambda := \Hom_{F[H]} (V_\lambda, V_\lambda)$. Then
$ D_\lambda$ is a division algebra and the multiplicity $m^j_\lambda$ of $V_\lambda$ in
$H^j_\Phi(X_H, \calE_{|X_H}) \otimes_R F $ equals
\begin{align*}
m^j_\lambda & = \dim_{D_\lambda} \Hom_{F[H]} \bigl(V_\lambda, H^j_{\Phi} (X_H, \calE_{|X_H} ) \otimes_R F \bigr) \\
& = ( \deg V_\lambda)^{-1} \dim_F \Hom_{F[H]} ( V_\lambda , H^j_{\Phi} (X_H, \calE_{|X_H} ) \otimes_R F ) \\
& = (\deg V_\lambda)^{-1} H^j_\Phi (X_H, \Hom_{R[H]} (M_\lambda , \calE_{|X_H} )) \otimes_R F .
\end{align*}
Recall that $\deg V_\lambda = \dim_F(D_\lambda)$.
\end{proof}
\section{Further comments}\label{sec:Comments}
We add some remarks in order to clarify some assumptions for the main result.
\subsection{The finiteness condition}
If $\calE$ is a constant sheaf on $X$, then condition \cFone implies \cFtwo. In general this need not be the case.
This can already be seen in examples where the group acts trivially on the space.
Let $X$ be the unit disc in $\bbC$ and $G = \bbZ / 2 \bbZ$. Then there exists a
sheaf $\calE$ of $\bbZ[G]$-modules on $X$ such that $\com{H}(X, \calE)$ is finitely generated but $ H^2(X, \calE^G)$
is not finitely generated as $\bbZ$-module, i.e.~\cFone holds but \cFtwo fails.
It follows from a \v{C}ech cohomology argument that if $X$ and all $X^H$ are compact and homology locally connected (HLC),
then condition \cF holds for every locally
constant $G$-sheaf $\calE$ with finitely generated stalks (for the family of all supports).
\subsection{Sheaves of vectorspaces}
If we replace $R$ by $F$ and start with a $G$-sheaf $\calE_F$ of $F$-vectorspaces over $X$, then both
sides of the formula in the theorem make sense under suitable cohomological finiteness assumptions.
However, there are examples which show that the theorem does not hold in this situation.
For instance, let $X = S^1$ be the unit circle with the nontrivial action of $ G = \bbZ / 2 \bbZ $ by rotations.
There is a $G$-sheaf of $F$-vectorspaces $\calE_F$ such that $\com{H}(S^1,\calE_F)$ is finite dimensional and
$\chi(S^1, \calE_F )=1$. Then clearly
$| G | \chi ( X/G , \pi^G_* \calE_F ) \neq \chi (X, \calE_F )$
since the left hand side is an even number.
In fact, most complications in the proof arise from the fact that we have to work with sheaves over the ring $R$.
\subsection{Cohomology of arithmetic groups}
We indicate that the assumptions of the theorem hold for the cohomology of arithmetic
groups. Let $A$ be a reductive algebraic group defined over $\bbQ$ ($A$ is not necessarily
connected). By $\Gamma \subset A (\bbQ)$ we denote an arithmetic group.
Assume that $G \subset \Aut_\bbQ(A)$ is a finite
subgroup which acts on $\Gamma$ and on a finite dimensional rational representation $ \rho $ of $A$
on a $\bbQ$-vectorspace $ E $ such that
$g ( \rho (\eta) e) = \rho ( g(\eta)) ge$
for all $e \in E$, $\eta \in A(\bbQ)$, $g \in G$.
Let $ Y $ be the symmetric space attached to $ A (\bbR)$. Then $ G $ and $ \Gamma $ act on $Y$. Put
$ X := \Gamma \backslash Y $ as topological quotient and denote by $ f : Y \longrightarrow X $ the natural projection.
We choose a $G$ and $\Gamma$-stable lattice $ L $ in $ E $. Put $ \calE = f ^\Gamma_\ast L_Y $, where $ L_Y $
is the constant sheaf with stalks $ L$ on $Y$. Then $ G $ acts on $ \com{H} ( X, \calE) \otimes_{\bbZ} \bbQ
= \com{H}(\Gamma, L) \otimes_{\bbZ} \bbQ $. To see that the assumptions of the theorem hold one uses the
Borel-Serre compactification for $ X $ and $X^H$, see \cite{BorelSerre1973}.
\subsection{}
For a paracompactifying family of supports $\Phi$ on $X$ there is in general no equality of the form
\begin{equation*}
\com{H}_\Phi(X,\calE)\otimes_R F = \com{H}_\Phi(X,\calE \otimes_R F)
\end{equation*}
for a sheaf of $R$-modules $\calE$ on $X$. For
the family of compact supports this equality holds.
For cohomology of arithmetic groups, we have for all $R[\Gamma]$-modules $M$ and all $j$ that
$
H^j(\Gamma, M) \otimes_R F = H^j (\Gamma, M \otimes_R F) \:.$
This follows from the existence of a resolution $P_\bullet \longrightarrow \bbZ \longrightarrow 0 $
of $\bbZ $ by finitely generated free $\bbZ [\Gamma]$-modules and since $F$ is flat as $R$-module, see Thm.~11.4.4 in
\cite{BorelSerre1973} and p.~193 in \cite{BrownBook1982}.
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\providecommand{\href}[2]{#2}
\end{document} |
\begin{document}
\baselineskip = 16pt
\newcommand \ZZ {{\mathbb Z}}
\newcommand \NN {{\mathbb N}}
\newcommand \RR {{\mathbb R}}
\newcommand \PR {{\mathbb P}}
\newcommand \AF {{\mathbb A}}
\newcommand \GG {{\mathbb G}}
\newcommand \QQ {{\mathbb Q}}
\newcommand \CC {{\mathbb C}}
\newcommand \bcA {{\mathscr A}}
\newcommand \bcC {{\mathscr C}}
\newcommand \bcD {{\mathscr D}}
\newcommand \bcF {{\mathscr F}}
\newcommand \bcG {{\mathscr G}}
\newcommand \bcH {{\mathscr H}}
\newcommand \bcM {{\mathscr M}}
\newcommand \bcI {{\mathscr I}}
\newcommand \bcJ {{\mathscr J}}
\newcommand \bcK {{\mathscr K}}
\newcommand \bcL {{\mathscr L}}
\newcommand \bcO {{\mathscr O}}
\newcommand \bcP {{\mathscr P}}
\newcommand \bcQ {{\mathscr Q}}
\newcommand \bcR {{\mathscr R}}
\newcommand \bcS {{\mathscr S}}
\newcommand \bcV {{\mathscr V}}
\newcommand \bcU {{\mathscr U}}
\newcommand \bcW {{\mathscr W}}
\newcommand \bcX {{\mathscr X}}
\newcommand \bcY {{\mathscr Y}}
\newcommand \bcZ {{\mathscr Z}}
\newcommand \goa {{\mathfrak a}}
\newcommand \gob {{\mathfrak b}}
\newcommand \goc {{\mathfrak c}}
\newcommand \gom {{\mathfrak m}}
\newcommand \gon {{\mathfrak n}}
\newcommand \gop {{\mathfrak p}}
\newcommand \goq {{\mathfrak q}}
\newcommand \goQ {{\mathfrak Q}}
\newcommand \goP {{\mathfrak P}}
\newcommand \goM {{\mathfrak M}}
\newcommand \goN {{\mathfrak N}}
\newcommand \uno {{\mathbbm 1}}
\newcommand \Le {{\mathbbm L}}
\newcommand \Spec {{\rm {Spec}}}
\newcommand \Gr {{\rm {Gr}}}
\newcommand \Pic {{\rm {Pic}}}
\newcommand \Jac {{{J}}}
\newcommand \Alb {{\rm {Alb}}}
\newcommand \Corr {{Corr}}
\newcommand \Chow {{\mathscr C}}
\newcommand \Sym {{\rm {Sym}}}
\newcommand \Prym {{\rm {Prym}}}
\newcommand \cha {{\rm {char}}}
\newcommand \eff {{\rm {eff}}}
\newcommand \tr {{\rm {tr}}}
\newcommand \Tr {{\rm {Tr}}}
\newcommand \pr {{\rm {pr}}}
\newcommand \ev {{\it {ev}}}
\newcommand \cl {{\rm {cl}}}
\newcommand \interior {{\rm {Int}}}
\newcommand \sep {{\rm {sep}}}
\newcommand \td {{\rm {tdeg}}}
\newcommand \alg {{\rm {alg}}}
\newcommand \im {{\rm im}}
\newcommand \gr {{\rm {gr}}}
\newcommand \op {{\rm op}}
\newcommand \Hom {{\rm Hom}}
\newcommand \Hilb {{\rm Hilb}}
\newcommand \Sch {{\mathscr S\! }{\it ch}}
\newcommand \cHilb {{\mathscr H\! }{\it ilb}}
\newcommand \cHom {{\mathscr H\! }{\it om}}
\newcommand \colim {{{\rm colim}\, }}
\newcommand \End {{\rm {End}}}
\newcommand \coker {{\rm {coker}}}
\newcommand \id {{\rm {id}}}
\newcommand \van {{\rm {van}}}
\newcommand \spc {{\rm {sp}}}
\newcommand \Ob {{\rm Ob}}
\newcommand \Aut {{\rm Aut}}
\newcommand \cor {{\rm {cor}}}
\newcommand \Cor {{\it {Corr}}}
\newcommand \res {{\rm {res}}}
\newcommand \red {{\rm{red}}}
\newcommand \Gal {{\rm {Gal}}}
\newcommand \PGL {{\rm {PGL}}}
\newcommand \Bl {{\rm {Bl}}}
\newcommand \Sing {{\rm {Sing}}}
\newcommand \spn {{\rm {span}}}
\newcommand \Nm {{\rm {Nm}}}
\newcommand \inv {{\rm {inv}}}
\newcommand \codim {{\rm {codim}}}
\newcommand \Div{{\rm{Div}}}
\newcommand \CH{{\rm{CH}}}
\newcommand \sg {{\Sigma }}
\newcommand \DM {{\sf DM}}
\newcommand \Gm {{{\mathbb G}_{\rm m}}}
\newcommand \tame {\rm {tame }}
\newcommand \znak {{\natural }}
\newcommand \lra {\longrightarrow}
\newcommand \hra {\hookrightarrow}
\newcommand \rra {\rightrightarrows}
\newcommand \ord {{\rm {ord}}}
\newcommand \Rat {{\mathscr Rat}}
\newcommand \rd {{\rm {red}}}
\newcommand \bSpec {{\bf {Spec}}}
\newcommand \Proj {{\rm {Proj}}}
\newcommand \pdiv {{\rm {div}}}
\newcommand \wt {\widetilde }
\newcommand \ac {\acute }
\newcommand \ch {\check }
\newcommand \ol {\overline }
\newcommand \Th {\Theta}
\newcommand \cAb {{\mathscr A\! }{\it b}}
\newenvironment{pf}{\par\noindent{\em Proof}.}{
\framebox(6,6)
\par
}
\newtheorem{theorem}[subsection]{Theorem}
\newtheorem{conjecture}[subsection]{Conjecture}
\newtheorem{proposition}[subsection]{Proposition}
\newtheorem{lemma}[subsection]{Lemma}
\newtheorem{remark}[subsection]{Remark}
\newtheorem{remarks}[subsection]{Remarks}
\newtheorem{definition}[subsection]{Definition}
\newtheorem{corollary}[subsection]{Corollary}
\newtheorem{example}[subsection]{Example}
\newtheorem{examples}[subsection]{examples}
\title{Chow groups of conic bundles in $\PR^5$ and the Generalised Bloch's conjecture}
\author{Kalyan Banerjee}
\address{Harish Chandra Research Institute, India}
\email{banerjeekalyan@hri.res.in}
\begin{abstract}
Consider the Fano surface of a conic bundle embedded in $\PR^5$. Let $i$ denote the natural involution acting on this surface. In this note we provide an obstruction to the identity action of the involution on the group of algebraically trivial zero cycles modulo rational equivalence on the surface.
\end{abstract}
\maketitle
\section{Introduction}
One of the very important problems in algebraic geometry is to understand the Chow group of zero cycles on a smooth projective surface with geometric genus and irregularity equal to $0$. It was already proved by Mumford \cite{M}, that for a smooth, projective complex surface of geometric genus greater than zero, the Chow group of zero cycles is infinite dimensional, in the sense that, it cannot be "parametrized" by an algebraic variety. The conjecture due to Spencer Bloch asserts the converse, that is, for a surface of geometric genus and irregularity zero, the Chow group of zero cycles is isomorphic to the group of integers. The Bloch's conjecture has been studied and proved in the case when the surface is not of general type by \cite{BKL} and for surfaces of general type by \cite{B}, \cite{IM}, \cite{GP}, \cite{PW}, \cite{V},\cite{VC}. Inspired by the Bloch's conjecture, the following conjecture is made, which is a generalisation \cite{Vo}[conjecture 11.19].
\textit{ Conjecture : Let $S$ be a smooth projective surface over the field of complex numbers and let $\Gamma$ be a codimension two cycle on $S\times S$. Suppose that $\Gamma^*$ acts as zero on the space of globally holomorphic two forms on $S$, then $\Gamma_*$ acts as zero on the kernel of the albanese map from $\CH_0(S)$ to $Alb(S)$.}
This conjecture was studied in detail when the correspondence $\Gamma$ is the $\Delta- Graph(i)$, where $i$ is a sympletic involution on a $K3$ surface by \cite{GT}, \cite{HK},\cite{Voi}. In the example of K3 surfaces the push-forward induced by the involution acts as identity on Chow group of zero cycles of degree zero.
Inspired by this conjecture we consider the following question in this article. Let $X$ be a smooth, cubic fourfold in $\PR^5$. Consider a line $l$ in $\PR^5$, embedded in $X$. Considering the projection from the line $l$ to $\PR^3$, we have a conic bundle structure on the cubic $X$. Let $S$ be the discriminant surface of this conic bundle. Let $T$ be the double cover of $S$ inside the Fano variety of lines $F(X)$ of $X$, arising from the conic bundle structure. Then $T$ has a natural involution and we observe that the group of algebraically trivial zero cycles on $T$ modulo rational equivalence (denoted by $A_0(T)$) maps surjectively onto the algebraically trivial one cycles on $X$ modulo rational equivalence (denoted by $A_1(X)$). The action of the involution has as its invariant part equal to the $A_0(S)$ and as anti-invariant part equal to $A_1(X)$. The involution cannot act as $+1$ on the group $A_1(X)$, as it will follow that all the elements of $A_1(X)$ are $2$-torsion, hence $A_1(X)$ is weakly representable. This is not true by the main theorem of \cite{SC}. Now the question is, what is the obstruction to the $+1$ action of the involution in terms of the geometry of $S,T$.
\begin{theorem}
\label{theorem3}
Let $S$ be the discriminant surface as mentioned above. Then for any very ample line bundle $L$ on $S$ we cannot have the equality
$$L^2-g+1=g+n$$
where $g$ is the genus of the curve in the linear system of $L$ and $n$ is a positive integer.
\end{theorem}
This result motivates the following:
\begin{corollary}
Suppose that we have a surface of general type $S$ with geometric genus zero and we have an involution $i$ on the surface $S$ having only finitely many fixed points. Suppose that there exists a very ample line bundle $L$, on the minimal desingularization of the quotient surface $S/i$ such that the following equality
$$L^2-2g+1=n$$
is true, here $g$ is the genus of the smooth curves in the linear system $|L|$ and $n$ is some positive integer. Then the involution $i_*$ acts as identity on the group $A_0(S)$.
\end{corollary}
For the proof of the above theorem and the corollary, we follow the approach of the proof for the example of K3 surfaces due to Voisin as in \cite{Voi}. The proof involves two steps. First is that we invoke the notion of finite dimensionality in the sense of Roitman as in \cite{R1} and prove that the finite dimensionality of the image of a homomorphism from $A_0(T)$ to $A_1(X)$ (respectively from $A_0(S)\to A_0(S)$) implies that the homomorphism factors through the albanese map $A_0(T)\to Alb(T)$ (or $A_0(S)\to Alb(S)$ respectively). The second step is to show that, if we have the equality as above \ref{theorem3}, then the image of the homomorphism induced by the difference of the diagonal and the graph of the involution from $A_0(T)$ to $A_1(X)$ (or $A_0(S)\to A_0(S)$) is finite dimensional, yielding the $+1$ action of the involution on $A_1(X)$ or $A_0(S)$ respectively.
As an implication of the above corollary we obtain the Bloch's conjecture for the Craighero-Gattazzo surface of general type with geometric genus zero, studied in \cite{CG},\cite{DW}. This class of surfaces, is obtained as minimal resolution of singularities of singular quintics in $\PR^3$ invariant under an involution and having four isolated, simple elliptic singular points.
{\small \textbf{Acknowledgements:} The author would like to thank the hospitality of IISER-Mohali, for hosting this project. The author is indebted Kapil Paranjape for some useful conversations relevant to the theme of the paper. The author likes to thank Claire Voisin for her advice on the theme of the paper. The author is indebted to J.L.Colliot-Thelene, B.Poonen, and the anonymous referee for finding out a crucial mistake in the earlier version of the manuscript.}
{\small Assumption: We work over the field of complex numbers.}
\section{Finite dimensionality in the sense of Roitman and one-cycles on cubic fourfolds}
Let $P$ be a subgroup of the group of algebraically trivial one cycles modulo rational equivalence on a smooth projective fourfold $X$, the latter is denoted by $A_1(X)$. Following \cite{R1}, we say that the subgroup $P$ is finite dimensional, if there exists a smooth projective variety $W$, and a correspondence $\Gamma$ on $W\times X$, of correct codimension, such that $P$ is contained in the set $\Gamma_*(W)$.
Let $X$ be a cubic fourfold. Consider a line $l$ on $X$ and project from $l$ onto $\PR^3$. Consider the blow up of $X$ along $l$. Then the blow up $X_l$ has a conic bundle structure over $\PR^3$. Let $S$ be the surface in $\PR^3$ such that for any closed point on $S$, the inverse image is the union of two lines in $\PR^3$.
Let $T$ be the variety in $F(X)$ which is the double cover of $S$. Precisely it means the following. Let us consider
$$\bcU:=\{(l',x):x\in l', \pi_l(x)\in S \}$$
inside $F(X)\times X$. Then its projection to $F(X)$ is $T$ and we have a 2:1 map from $T$ to $S$, which is branched along finitely many points. So $T$ is surface.
Now for a hyperplane section $X_t$, let $l_1,l_2$ be two lines contained in $X_t$. By general position argument these two lines can be disjoint from $l$ inside $X$ and they are contained in $\PR^2$, so under the projection from $l$ they are mapped to two rational curves in $\PR^2$. Thus by Bezout's theorem they must intersect at a point $z$, so the inverse image of $z$ under the projection are two given lines $l_1,l_2$, which tells us that the map from $A_0(T_t)$ to $A_1(X_t)$ is onto, here $T_t$ is the double cover (for a general $t$) of $S_t$, where $S_t$ is the discriminant curve of the projection $\pi_l: X_t\to \PR^2$. This in turn says that $A_0(T)$ to $A_1(X)$ is onto, because $A_1(X)$ is generated by $A_1(X_t)$, where $t$ varies.
\begin{theorem}
\label{theorem1}
Let $Z$ be a correspondence supported on $T\times X$. Suppose that the image of $Z_*$ from $A_0(T)$ to $A_1(X)$ is finite dimensional. Then $Z_*$ factors through the albanese map of $T$.
\end{theorem}
\begin{proof}
The proof of this theorem follows the approach of \cite{Voi}[Theorem 2.3]. Since $Z_*$ has finite dimensional image, there exists a smooth projective variety $W$ and a correspondence $\Gamma$ supported on $W\times X$ such that image of $Z_*$ is contained in $\Gamma_*(W)$. Let $C$ inside $T$ be a smooth, hyperplane section (after fixing an embedding of $T$ into a projective space). Then by Lefschetz theorem on hyperplane sections we have that $J(C)$ maps onto $Alb(T)$. So the kernel is an abelian variety, denoted by $K(C)$. First we prove the following.
\begin{lemma}
The abelian variety $K(C)$ is simple for a general hyperplane section $C$ of $T$.
\end{lemma}
\begin{proof}
The proof of this lemma follows the approach of \cite{Voi}[Proposition 2.4]. Let if possible there exists a non-trivial proper abelian subvariety $A$ inside $K(C)$. Now $K(C)$ corresponds to the Hodge structure
$$\ker(H^1(C,\QQ)\to H^3(T,\QQ))\;.$$
Let $T\to D$ be a Lefschetz pencil such that a smooth fiber is $C$. Then the fundamental group $\pi_1(D\setminus 0_1,\cdots,0_m,t)$ acts irreducibly on the Hodge structure mentioned above, \cite{Vo}[Theorem 3.27]. Here $t$ corresponds to the smooth fiber $C$. Now the abelian variety $A$ corresponds to a Hodge sub-structure $H$ inside the above mentioned Hodge structure. Let $A_D$ be the base change of $A$ over the spectrum of the function field $\CC(D)$. For convenience, let us continue to denote $A_D$ by $A$. Then consider a finite extension $L$ of $\CC(D)$ inside $\overline{\CC(D)}$, such that $A$, $K(C)$ are defined over $L$. Then we spread $A,K(C)$, over a Zariski open $U'$ in $D'$, where $\CC(D')=L$ and $D'$ is a smooth, projective curve which maps finitely onto $D$. Denote these spreads by $\bcA,\bcK$ over $U'$. By throwing out more points from $U'$ we get that $\bcA\to U', \bcK\to U'$ are fibrations, of the underlying smooth manifolds. So the fundamental group $\pi_1(U',t')$ acts on $H$, which is the $2d-1$-th cohomology of $A$ ($d=\dim(A)$), and on $\ker(H^1(C,\QQ)\to H^3(T,\QQ))$. Since $U'$ maps finitely onto a Zariski open $U$ of $D$, we have that $\pi_1(U',t')$ is a finite index subgroup of $\pi_1(U,t)$. Now it is a consequence of the Picard-Lefschetz formula that $H$ is a $\pi_1(U,t)$ stable subspace of $\ker(H^1(C,\QQ)\to H^3(T,\QQ))$. The latter is irreducible under the action of $\pi_1(U,t)$. So we get that $H$ is either zero or all of $\ker(H^1(C,\QQ)\to H^3(T,\QQ))$. Therefore by the equivalence of abelian varieties and weight one, polarized Hodge structures, $A$ is either zero or all of $K(C)$.
\end{proof}
Now consider sufficiently ample hyperplane sections of $T$, so that the dimension of $K(C)$ is arbitrarily large, and hence strictly greater than $\dim(W)$. Consider the subset $R$ of $K(C)\times W$, consisting of pairs $(k,w)$ such that
$$Z_*j_*(k)=\Gamma_*(w)$$
here $j: C\to T$ is the closed embedding of $C$ into $T$. Since the image of $Z_*$ is finite dimensional, the projection from $R$ onto $K(C)$ is surjective. By the Mumford-Roitman argument on Chow varieties \cite{R}, $R$ is a countable union of Zariski closed subsets in the product $K(C)\times W$. By the uncountability of the field of complex numbers it follows that some component $R_0$ of $R$, dominates $K(C)$. Therefore we have that
$$\dim(R_0)\geq \dim(K(C))>\dim (W)\;.$$
So the fibers of the map $R_0\to W$ are positive dimensional. Since the abelian variety $K(C)$ is simple, the fibers of $R_0\to W$ generate the abelian variety $K(C)$. So for any zero cycle $z$ supported on the fibers of $R_0\to W$, we have that
$$Z_*j_*(z)=\deg(z)\Gamma_*(w)$$
since $z$ is of degree zero, it follows that $Z_*$ vanishes on the fibers of $R_0\to W$, which is positive dimensional, hence on all of $K(C)$, by the simplicity of $K(C)$.
Now to prove that the map $Z_*$ factors through $alb$, we consider a zero cycle $z$ of degree zero, which is given by a tuple of $2k$ points for a fixed positive integer $k$. Then we blow up $T$ along these points, denote the blow up by $\tau:T'\to T$. Let $E_i$'s be the exceptional divisor of the blow up, we choose $H$ in $\Pic(T)$, such that $L=\tau^*(H)-\sum_i E_i$ is ample (this can be obtained by Nakai Moisezhon-criterion for ampleness). Now consider a sufficiently large, very ample multiple of $L$, and apply the previous method to a general member $C'$ of the corresponding linear system. Then $K(C')$ is a simple abelian variety. Also $\tau(C')$ contains all the points at which we have blown up. Suppose that the corresponding cycle $z$ is annihilated by $alb_T$, then any of its lifts to $T'$ say $z'$, is annihilated by $alb_{T'}$ and is supported on $K(C')$. So applying the previous argument to the correspondence $Z'=Z\circ \tau$, we have that
$$Z_*(z)=Z'_*(z')=0\;.$$
\end{proof}
Let $i$ be the involution on $T$, then this involution induces an involution on $A_1(X)$. Consider the homomorphism given by the difference of identity and the induced involution on $A_1(X)$, call it $Z_{1*}$. It is clear from \ref{theorem1} that the image of $Z_*Z_{1*}$ cannot be finite dimensional, otherwise the involution will act as $+1$ on $A_1(X)$, leading to the fact that $A_1(X)=\{0\}$. Now we prove the following:
\begin{theorem}
\label{theorem2}
Let $S$ be the discriminant surface, mentioned above. Then for any very ample line bundle $L$ on $S$ the equality
$$L^2-g+1=g+n$$
cannot hold, where $g$ is the genus of a curve in the complete linear system of $L$ and $n$ is a positive integer.
\end{theorem}
\begin{proof}
The proof of this theorem follows the approach of \cite{Voi}[Proposition 2.5]. The discriminant surface $S$ is a quintic, hence its irregularity is zero. Consider a very ample line bundle $L$ on the quintic $S$. Let $g$ be the genus of a smooth curve in the linear system $|L|$. Now we calculate the dimension of $|L|$. Consider the exact sequence
$$0\to \bcO(C)\to \bcO(S)\to \bcO(S)/\bcO(C)\to 0$$
tensoring with $\bcO(-C)$ we have
$$0\to \bcO(S)\to \bcO(-C)\to\bcO(-C)|_C\to 0\;.$$
Taking sheaf cohomology we have
$$0\to \CC\to H^0(S,L)\to H^0(C,L|_C)\to 0$$
since the irregularity of the surface is zero.
On the other hand by Nakai-Moisezhon criterion the intersection number $L|_C$ is positive, so $L$ restricted to $C$ has positive degree, by Riemann-Roch this implies that
$$\dim(H^0(C,L|_C))=L^2-g+1\;,$$
provided that we have the equality
$$L^2-g+1=g+n$$
for some positive integer $n$.
Then the linear system of $L$ is of dimension $g+n$. Now consider the smooth, projective curves $C$ in this linear system $|L|$ and their double covers $\wt{C}$ (this is actually a covering for a general $C$, as the map $T\to S$ is branched along a finite set of points). By Bertini's theorem a general $\wt{C}$ is smooth. By the Hodge index theorem it follows that, $\wt{c}$ is connected. If not, suppose that it has two components $C_1,C_2$. Since $C^2>0$, we have $C_i^2>0$ for $i=1,2$ and since $\wt{C}$ is smooth we have that $C_1.C_2=0$. Therefore the intersection form restricted to $\{C_1,C_2\}$ is semipositive. This can only happen when $C_1$, $C_2$ are proportional and $C_i^2=0$, for $i=1,2$, which is not possible.
Now let $(t_1,\cdots, t_{g+n})$ be a point on $T^{g+n}$, which gives rise to the tuple $(s_1,\cdots,s_{g+n})$ on $S^{g+n}$, under the quotient map. There exists a unique, smooth curve $C$ containing all these points (if the points are in general position). Let $\wt{C}$ be its double cover on $T$. Then $(t_1,\cdots,t_{g+n})$ belongs to $\wt{C}$. Consider the zero cycle
$$\sum_i t_i-\sum_i i_*(t_i)$$
this belongs to the image of $P(\wt{C}/C)$ in $A_0(T)$, $P(\wt{C}/C)$ is the Prym variety corresponding to the double cover. So the image of
$$\sum_i \left(Z_*(t_i)-i_*Z_*(t_i)\right)$$
is an element in the image of this Prym variety under the homomorphism
$$A_0(T)\to A_1(X)\;.$$
So the map
$$T^{g+n}\to A_1(X)$$
given by
$$(t_1,\cdots,t_{g+n})\mapsto \sum_i Z_*(t_i)-i_*Z_*(t_i) $$
factors through the Prym fibration $\bcP(\wt {\bcC}/\bcC)$, given by
$$(t_1,\cdots,t_{g+n})\mapsto alb_{\wt{C}}\left(\sum_i t_i-i(t_i)\right)$$
here $\bcC, \wt{\bcC}$ are the universal smooth curve and the universal double cover of $\bcC$ over $|L|_0$ parametrizing the smooth curves in the linear system $|L|$. By dimension count, the dimension of $\bcP(\wt {\bcC}/\bcC)$ is $2g+n-1$. On the other hand we have that dimension of $T^{g+n}$ is $2g+2n$. So the map
$$T^{g+n}\to \bcP(\wt {\bcC}/\bcC)$$
has positive dimensional fibers, and hence the map
$$T^{g+n}\to A_1(X)$$
has positive dimensional fibers. So the general fiber of $$T^{g+n}\to A_1(X)$$ contains a curve. Let $H$ be the hyperplane bundle pulled back onto the quintic surface $S$. It is very ample. Pull it back further onto $T$, to get an ample line bundle on $T$. Call it $L'$. Then the divisor $\sum_i \pi_i^{-1}(L')$ is ample on $T^{g+n}$, where $\pi_i$ is the $i$-th co-ordinate projection from $T^{g+n}$ to $T$. Therefore the curves in the fibers of the above map intersect the divisor $\sum_i \pi_i^{-1}(L')$.
So we get that there exist points in $F_s$ (the general fiber over a cycle $s$ in $A_1(X)$) contained in $C\times T^{g+n-1}$ where $C$ is in the linear system of $L'$. Then consider the elements of $F_s$ the form $(c,s_1,\cdots,s_{g+n-1})$, where $c$ belongs to $C$. Considering the map from $T^{g+n-1}$ to $A_1(X)$ given by
$$(s_1,\cdots,s_{g+n-1})\mapsto Z_*(\sum_i s_i+c-\sum_i i(s_i)-i(c))\;,$$
we see that this map factors through the Prym fibration and the map from $T^{g+n-1}$ to $\bcP(\wt{\bcC}/\bcC)$ has positive dimensional fibers, since $n$ is large. So it means that, if we consider an element $(c,s_1,\cdots,s_{g+n-1})$ in $F_s$ and a curve through it, then it intersects the ample divisor given by $\sum_i \pi_i^{-1}(L')$, on $T^{g+n-1}$. Then we have some of $s_i$ is contained in $C$. So iterating this process we get that elements of $F_s$ are supported on $C^k\times T^{g+n-k}$, where $k$ is some natural number depending on $n$. Note that the genus of $C$ is fixed and equal to $11$ and less than $k$ and for a choice of a large multiple of the very ample line bundle $L$. Thus the elements of $F_s$ are supported on $C^{n_0}\times T^{g+n-k}$.
Therefore considering $\Gamma=Z_1\circ Z$, we get that $\Gamma_*(T^{g+n})=\Gamma_*(T^{m_0})$, where $m_0$ is strictly less than $g+n$.
Now we prove by induction that $\Gamma_*(T^{m_0})=\Gamma_*(T^m)$ for all $m\geq g+n$.
So suppose that $\Gamma_*(T^k)=\Gamma^*(T^{m_0})$ for $k\geq g+n$, then we have to prove that $\Gamma_*(T^{k+1})=\Gamma_*(T^{m_0})$. So any element in $\Gamma_*(T^{k+1})$ can be written as $\Gamma_*(t_1+\cdots+t_{m_0})+\Gamma_*(t)$. Now let $k-m_0=m$, then $m_0+1=k-m+1$. Since $k-m<k$, we have $k-m+1\leq k$, so $m_0+1\leq k$, so we have the cycle
$$\Gamma_*(t_1+\cdots+t_{m_0})+\Gamma_*(t)$$
supported on $T^k$, hence on $T^{m_0}$. So we have that $\Gamma_*(T^{m_0})=\Gamma_*(T^k)$ for all $k$ greater or equal than $g+n$. Now any element $z$ in $A_0(T)$, can be written as a difference of two effective cycle $z^+,z^-$ of the same degree. Then we have
$$\Gamma_*(z)=\Gamma_*(z^+)-\Gamma_*(z_-)$$
and $\Gamma_(z_{\pm})$ belong to $\Gamma_*(T^{m_0})$. So let $\Gamma'$ be the correspondence on $T^{2m_0}\times T$ defined as
$$\sum_{l\leq m_0}(pr_{l},pr_T)^*\Gamma-\sum_{m_0+1\leq l\leq 2m_0}(pr_l,pr_T)^* \Gamma$$
where $\pr_l$ is the $l$-th projection from $T^l$ to $T$, and $\pr_T$ is from $T^{2m_0}\times T$ to the last copy of $T$. Then we have
$$\im(\Gamma_*)=\Gamma'_*(T^{2m_0})\;.$$
This would imply that the image of $\Gamma_*$ is finite dimensional, so by \ref{theorem1} we have that the induced involution on $A_1(X)$ acts as identity. The involution acts as $-\id$ on $A_1(X)$. Hence all elements of $A_1(X)$ is a $2$-torsion. This will lead to a contradiction to the fact that $A_1(X)$ is infinite dimensional \cite{SC}.
\end{proof}
Now we proceed to the proof of the corollary stated in the introduction regarding the generalised Bloch conjecture on surfaces of general type with geometric genus zero and with an involution $i$. The result is as follows:
\begin{corollary}
\label{cor1}
Suppose that we have a surface of general type $S$ with geometric genus zero and we have an involution $i$ on the surface $S$ having only finitely many fixed points. Suppose that there exists a very ample line bundle $L$, on the minimal desingularization of the quotient surface $S/i$ (by the involution) such that the following equality
$$L^2-2g+1=n$$
is true, here $g$ is the genus of the smooth, projective curves in the linear system $|L|$, and $n$ is some positive integer. Then the involution $i_*$ acts as identity on the group $A_0(S)$.
\end{corollary}
\begin{proof}
Consider the resolution of singularity of the surface $S/i$. It is the quotient by the involution acting on the surface $\wt{S}$, obtained by blowing up the isolated fixed points of $i$ acting on $S$. Call this quotient $\wt{S}/i$. Since it is dominated by a surface of irregularity zero (namely $\wt{S}$), it has irregularity zero. Consider a very ample line bundle $L$ on $\wt{S}/i$. Let $g$ be the genus of a smooth, projective curve in the linear system $|L|$. Now we calculate the dimension of $|L|$. Consider the exact sequence
$$0\to \bcO(C)\to \bcO(\wt{S}/i)\to \bcO(\wt{S}/i)/\bcO(C)\to 0$$
tensoring with $\bcO(-C)$ we get
$$0\to \bcO(\wt{S}/i)\to \bcO(-C)\to\bcO(-C)|_C\to 0\;.$$
Taking sheaf cohomology we get
$$0\to \CC\to H^0(\wt{S}/i,L)\to H^0(C,L|_C)\to 0$$
since the irregularity of the surface $\wt{S}/i$ is zero.
On the other hand by Nakai-Moiseshon criterion the intersection number $L|_C$ is positive, so $L$ restricted to $C$ has positive degree, by Riemann-Roch this implies
$$\dim(H^0(C,L|_C))=L^2-g+1\;, $$
provided that we have the equality
$$L^2-g+1=g+n$$
for some positive integer $n$.
Then the linear system of $L$ is of dimension $g+n$. Now consider a smooth, projective curves $C$ in this linear system $|L|$ and its branched double cover $\wt{C}$, branched along the intersection of $\wt{C}$ with $E_i$, where $E_i$'s are the exceptional curves arising from the blow up $\wt{S}\to S$. By Bertini's theorem a general $\wt{C}$ is smooth. By the Hodge index theorem it follows that, it is connected. If not, suppose that it has two components $C_1,C_2$. Since $C^2>0$, we have $C_i^2>0$ for $i=1,2$ and since $\wt{C}$ is smooth we have that $C_1.C_2=0$. Therefore the intersection form restricted to $\{C_1,C_2\}$ is semipositive. This can only happen when $C_1$, $C_2$ are proportional and $C_i^2=0$, for $i=1,2$, which is not possible as $C_1+C_2$ is ample on $\wt{S}$.
Now let $(t_1,\cdots, t_{g+n})$ be a point on $\wt{S}^{g+n}$, which gives rise to the tuple $(s_1,\cdots,s_{g+n})$ on $(\wt{S}/i)^{g+n}$, under the quotient map. There exists a unique, smooth curve $C$ containing all these points (if the points are in general position). Let $\wt{C}$ be its branched double cover of $C$ in $\wt{S}$. Then $(t_1,\cdots,t_{g+n})$ belongs to $\wt{C}$. Consider the zero cycle
$$\sum_i t_i-\sum_i i_*(t_i)$$
this belongs to $P(\wt{C}/C)$, which is the Prym variety corresponding to the double cover $\wt{C}\to C$. So the image of
$$\sum_i \left(t_i-i_*(t_i)\right)$$
under the push-forward $j_{\wt{C}*}$
is an element in the image under the homomorphism
$$\id-i_*: A_0(\wt{S})\to A_0(\wt{S})$$
So the map
$$\wt{S}^{g+n}\to A_0(\wt{S})$$
given by
$$(t_1,\cdots,t_{g+n})\mapsto \sum_i (t_i-i_*(t_i)) $$
factors through the Prym fibration $\bcP(\wt {\bcC}/\bcC)$, given by
$$(t_1,\cdots,t_{g+n})\mapsto alb_{\wt{C}}\left(\sum_i t_i-i(t_i)\right)$$
here $\bcC, \wt{\bcC}$ are the universal family of smooth curves in $|L|$ and the universal double cover of $\bcC$ respectively, over $|L|_0$ parametrizing the smooth curves in the linear system $|L|$. By dimension count the dimension of $\bcP(\wt {\bcC}/\bcC)$ is $2g+n-1+m/2$, where $m$ is the number of branch points on the curve $\wt{C}$ counted with multiplicities. On the other hand we have that dimension of ${\wt{S}}^{g+n}$ is $2g+2n$. So the map
$${\wt{S}}^{g+n}\to \bcP(\wt {\bcC}/\bcC)$$
has fiber dimension equal to
$$2g+2n-2g-n+1-m/2=n+1-m/2\;.$$
Considering a large multiple of the very ample line bundle $L$, we can assume that the above number is positive. Indeed we have
$$L^2-2g+1=-L.K_{\wt{S}/i}-1=n>0$$
and $$K_{\wt{S/i}}=f^*(K_{S/i})+E$$
where $E$ is the exceptional divisor, $f$ is the regular map from $\wt{S}/i$ to $S/i$. Here we consider $\wt{S}$ is the blow up of $S$ along the unique fixed point of $i$. The calculation for finitely many fixed points greater than one is similar. Let $L$ be equal to $m'f^*(H)-m'E$ which is very ample, where $H$ is a very ample line bundle on $S/i$, after fixing an embedding into some projective space. Then we have to prove that
$$L.(-2K_{\wt{S}/i}-E)-2>0$$
that is
$$L.(-2f^*(K_{S/i})-3E)-2>0$$
putting the expression of $L$, the condition to be proven is
$$-(m'(f^*(H)-E)(2f^*(K_{S/i})+3E))-2=-2m'f^*(H).f^*(K_{S/i})-3m'-2>0$$
But by the adjunction formula on $\wt{S}/i$ we have
$$L^2-2g+1=-L.K_{\wt{S}/i}-1$$
on the other hand
$$L^2-2g+1>0$$
by the assumption of the theorem.
Therefore
$$-m'f^*(H).f^*(K_{S/i})-m'-1=-m'f^*(H).f^*(K_{S/i})-(m'+1)>0$$
so
$$-2m'f^*(H).f^*(K_{S/i})> 2m'+2\;.$$
Therefore choosing $l>3$, such that $m'f^*(lH)-m'E$ is very ample, we have
$$-2f^*(lm'H).f^*(K_{S/i})> 2l(m'+1)>3m'+2$$
for large values of $l$.
Also note that for $L=m'(f^*(lH)-E)$, $l>1$ we have
$$L^2-2g+1=-L.K_{\wt{S/i}}-1=-f^*(m'lH).f^*(K_{S/i})-m'-1=-m'lf^*(H).f^*(K_{S/i})-m'-1$$
we know that
$$-m'f^*(H).f^*(K_{S/i})>m'+1$$
so
$$-m'lf^*(H).f^*(K_{S/i})-m'-1>(m'l-1)(m'+1)> 0\;.$$
So for $L=f^*(m'lH)-m'E$ we have the equality
$$L^2-2g+1=n$$
for some positive integer $n$.
So the fiber contains a curve. Let $H$ be the hyperplane bundle pulled back onto the surface $\wt{S}/i$, after fixing an embedding of $\wt{S}/i$ into some projective space. It is very ample. Pull it back further onto $\wt{S}$, to get an ample line bundle on $\wt{S}$. Call it $L'$. Then the divisor $\sum_i \pi_i^{-1}(L')$ is ample on $\wt{S}^{g+n}$, where $\pi_i$ is the $i$-th co-ordinate projection from $\wt{S}^{g+n}$ to $\wt{S}$. Therefore the curves in the fibers of the above map intersect the divisor $\sum_i \pi_i^{-1}(L')$.
So there exist points in $F_s$ (the general fiber of $\wt{S}^{g+n}\to A_0(\wt{S})$ over a cycle $s$ in $A_0(\wt{S})$) contained in $C\times \wt{S}^{g+n-1}$ where $C$ is in the linear system of $L'$. Then consider the elements of $F_s$ the form $(c,s_1,\cdots,s_{g+n-1})$, where $c$ belongs to $C$. Considering the map from $\wt{S}^{g+n-1}$ to $A_0(\wt{S})$ given by
$$(s_1,\cdots,s_{g+n-1})\mapsto (\sum_i s_i+c-\sum_i i_*(s_i)-i_*(c))\;,$$
we see that this map factors through the Prym fibration and the map from $\wt{S}^{g+n-1}$ to $\bcP(\wt{\bcC}/\bcC)$ has positive dimensional fibers, by choosing $l$ and hence $n$ to be large. So, if we consider an element $(c,s_1,\cdots,s_{g+n-1})$ in $F_s$ and a curve through it, then it intersects the ample divisor given by $\sum_i \pi_i^{-1}(L')$, on $\wt{S}^{g+n-1}$. Then we have some of $s_i$ is contained in $C$. So iterating this process we have, the elements of $F_s$ are supported on $C^k\times \wt{S}^{g+n-k}$, where $k$ is some natural number depending on $n$. Note that the genus of $C$ is fixed and it is less than $k$ for a choice of a very large multiple of the very ample line bundle $L$. Thus the elements of $F_s$ are supported on $C^{n_0}\times \wt{S}^{g+n-k}$.
Therefore considering $\Gamma=\Delta_{\wt{S}}-Gr(i)$, we get that $\Gamma_*(\wt{S}^{g+n})=\Gamma_*(\wt{S}^{m_0})$, where $m_0$ is strictly less than $g+n$.
Now we prove by induction that $\Gamma_*(\wt{S}^{m_0})=\Gamma_*(\wt{S}^m)$ for all $m\geq g+n$.
So suppose that $\Gamma_*(\wt{S}^k)=\Gamma^*(\wt{S}^{m_0})$ for $k\geq g+n$, then we have to prove that $\Gamma_*(\wt{S}^{k+1})=\Gamma_*(\wt{S}^{m_0})$. So any element in $\Gamma_*(\wt{S}^{k+1})$ can be written as $$\Gamma_*(t_1+\cdots+t_{m_0})+\Gamma_*(t)$$ Now let $k-m_0=m$, then $m_0+1=k-m+1$. Since $k-m<k$, we have $k-m+1\leq k$, so $m_0+1\leq k$, so we have the cycle
$$\Gamma_*(t_1+\cdots+t_{m_0})+\Gamma_*(t)$$
supported on $\wt{S}^k$, hence on $\wt{S}^{m_0}$. So we have $$\Gamma_*(\wt{S}^{m_0})=\Gamma_*(\wt{S}^k)$$ for all $k$ greater or equal than $g+n$. Now any element $z$ in $A_0(\wt{S})$, can be written as a difference of two effective cycles $z^+,z^-$ of the same degree. Then we have
$$\Gamma_*(z)=\Gamma_*(z^+)-\Gamma_*(z_-)$$
and $\Gamma_(z_{\pm})$ belong to $\Gamma_*(\wt{S}^{m_0})$. So let $\Gamma'$ be the correspondence on $\wt{S}^{2m_0}\times \wt{S}$ defined as
$$\sum_{l\leq m_0}(pr_{l},pr_{\wt{S}})^*\Gamma-\sum_{m_0+1\leq l\leq 2m_0}(pr_l,pr_{\wt{S}})^* \Gamma$$
where $\pr_l$ is the $l$-th projection from $\wt{S}^l$ to $\wt{S}$, and $\pr_{\wt{S}}$ is from ${\wt{S}}^{2m_0}\times \wt{S}$ to the last copy of $\wt{S}$. Then we have
$$\im(\Gamma_*)=\Gamma'_*(\wt{S}^{2m_0})\;.$$
This would imply that the image of $\Gamma_*$ is finite dimensional, so as proved in \cite{Voi}[Theorem 2.3] the induced involution on $A_0(\wt{S})$ factors through the Albanese variety of $\wt{S}$ which is trivial. Hence $i_*$ acts as identity on $A_0(\wt{S})$. By the blow up formula
$$A_0(\wt{S})\cong A_0(S)$$
hence the involution $i_*$ acts as identity on $A_0(S)$.
\end{proof}
\begin{remark}
\label{rem1}
Suppose in the above corollary \ref{cor1} we have the fixed locus of the involution consisting of finitely many isolated fixed points and one rational curve. Then on $\wt{S}/i$ we have to prove that the number
$$L.(-2K_{\wt{S}/i}-\sum_j E_j-R)-2>0$$
Here $R$ is the strict transform of the rational curve component in the fixed locus, $E_j$ is the exceptional curve over the isolated fixed point $p_j$.
Putting $$L=m(f^*(H)-\sum_j E_j)$$
we have to prove that
$$-m(f^*(H)-\sum_j E_j)(2f^*(K_{S/i})+3\sum_j E_j+R)-2>0$$
So for simplicity let us assume that the number of isolated fixed point is one, so there is one exceptional divisor.
Thus we have to prove that
$$-m(f^*(H)- E)(2f^*(K_{S/i})+3E+R)-2>0$$
that is
$$-2mf^*(H)f^*(K_{S/i})-3m-2-mf^*(H).R>0$$
Since $R=f^*(L)$ where $L$ is a line in $S/i$, we have
$$f^*(H).f^*(L)=f^*(H.L)=f^*(p)=2p$$
Putting this in the above equation
$$-2mf^*(H).f^*(K_{S/i})-3m-2-2m=-mf^*(H)f^*(K_{S/i})-5m-2$$
an it has to be greater than zero. By choosing as before $lH$ in place of $H$ and assuming that $f^*(lmH)-mE$ and $f^*(mH)-mE$ are both very ample, we have
$$-2mf^*(lH).f^*(K_{S/i})>2l(m+1)$$
and $$2l(m+1)>5m+2$$ for high values of $l$. Therefore in this case also the argument of \ref{cor1} works and we get that the involution acts as identity on $A_0(S)$.
\end{remark}
\begin{example}
Let $F$ be a singular quintic, invariant under an involution on $\PR^3$ and having simple elliptic singularities at the points
$$(1:0:0:0), (0:1:0:0), (0:0:1:0), (0:0:0:1)$$
as studied in \cite{DW}[section 2]. Let us consider the minimum desingularization of this surface $F$ and call it $V$. This surface $V$ is a smooth, projective surface of general type with $p_g=q=0$, equipped with an involution. The fixed locus of the involution on $F$ consists of a line and five isolated fixed points. These five points are different from the singular points of $F$. Let us consider the pre-images of these five points on $V$. They are the isolated fixed points of the involution on $V$. Consider the blow-up of $V$ at the five isolated fixed points of the involution on $V$. Denote it by $V'$. This surface $V'$ is equipped with an involution $i$. Then it is proven in \cite{DW}[proposition 3.1], that $V'/i$ is a non-singular, rational surface. So by the above remark, \ref{rem1}, the involution acts as identity on $A_0(V')$, provided that there exists a line bundle $L$ on $V'/i$ such that
$$L.(-K_{V'/i})-1>0\;.$$
Following the discussion in \cite{DW}[discussion after proposition 3.1] we consider the minimal model of $V'/i$. Call it $S$, it is a minimal elliptic surface as mentioned in \cite{DW}[discussion after proposition 3.1]. For this $S$ we have
$$K_S^2=0$$
then by Riemann-Roch
$$h^0(-K_S)\geq K_{S}^2+\chi (\bcO_S)+1=1+1=2$$
as $h^0(2K_S)=-1$ ($S$ is rational, so $|2K_S|=\emptyset$) and $\chi(\bcO_S)=1$. Therefore for a very ample line bundle of large degree on $S$, we have
$$-L.K_S-1>0\;.$$
Now by construction, as in \cite{DW}, the surface $S$ is a contraction of $V'/i$ along two elliptic curves of self-intersection $-1$. Let $\pi$ be the blow-down map from $V'/i$ to $S$. Therefore for a very ample line bundle $$L=\pi^*(L')-E_1-E_2$$ and $$K_{V'/i}=\pi^*(K_S)+E_1+E_2$$ on $V'/i$, we have
$$-(\pi^*(L')-E_1-E_2)(\pi^*(K_S)+E_1+E_2)-1=-\pi^*(L'.K_S)-3>0$$
for some very ample line bundle of the form $$L=m\pi^*(L')-E_1-E_2\;.$$ Here $m$ is a very large positive integer. Thus we have
$$-L.K_{V'/i}-1>0\;.$$
Therefore there exists a line bundle $L$ on $V'/i$ such that
$$L^2-2g+1=n$$
for some positive integer $n$, here $g$ is the genus of a smooth curve in $|L|$, as required in the condition of the corollary \ref{cor1}.
Since $V'/i$ is rational, the involution acts also as $-1$ resulting to the fact that every element in $A_0(V')$ is $2$-torsion and hence by Roitman's theorem $A_0(V')=\{0\}$ (as $q=0$ for $V'$). Since by the blow up formula
$$A_0(V)\cong A_0(V')$$
we have $A_0(V)=\{0\}$. Thus the Bloch's conjecture holds on $V$.
\end{example}
\subsection{Generalization of the above result}
The technique of the proof of \ref{theorem2} is more general, in the sense that we only use the conic bundle structure of the cubic fourfold and the conic bundle structure on the hyperplane sections of the cubic fourfold. Suppose that we consider a fourfold $X$, which is unirational, so contains sufficiently many lines. Now consider a fixed line $l$ on $X$, and project onto $\PR^3$ from this line. Suppose that the discriminant surface $S$ inside $\PR^3$
admits a double cover $T$ of $S$ branched along finitely many points, inside the Fano variety of lines $F(X)$ of $X$.
The proof of \ref{theorem2} tells us that we have the following theorem:
\begin{theorem}
Let $X$ be a fourfold embedded in $\PR^5$, which admits a conic bundle structure. Let $S$ denote the discriminant surface for the conic bundle structure such that it admits a branched cover at finitely many points. Then for any very ample line bundle $L$ on $S$, we cannot have the equality
$$L^2-g+1=g+n$$
where $g$ is the genus of a curve in the linear system of $L$ and $n$ is a positive integer.
\end{theorem}
\end{document} |
\begin{document}
\title{Optimal Rescaling and the Mahalanobis Distance}
\subtitle{}
\author{Przemys³aw Spurek \and
Jacek Tabor
}
\institute{P. Spurek \at
Faculty of Mathematics and Computer Science, Jagiellonian University\\
\L ojasiewicza 6\\
30-348 Krak\'ow\\
Poland\\
\mathrm{v}arepsilonmail{przemyslaw.spurek@ii.uj.edu.pl }
\and
J. Tabor \at
Faculty of Mathematics and Computer Science, Jagiellonian University\\
\L ojasiewicza 6\\
30-348 Krak\'ow\\
Poland\\
\mathrm{v}arepsilonmail{jacek.tabor@ii.uj.edu.pl }
}
\deltaate{Received: date / Accepted: date}
\mathrm{m}aketitle
\begin{abstract}
One of the basic problems in data analysis lies in choosing the optimal rescaling (change of coordinate system) to study properties of a given data-set $Y$. The classical Mahalanobis approach has its basis in the classical normalization/rescaling formula $Y \ni y \to \Sigma_Y^{-1/2} \cdot (y-\mathrm{m}_Y)$,
where $\mathrm{m}_Y$ denotes the mean of $Y$ and $\Sigma_Y$ the covariance matrix .
Based on the cross-entropy we generalize this approach
and define the parameter which measures the fit of a
given affine rescaling of $Y$ compared to the Mahalanobis one.
This allows in particular to find an optimal change of coordinate system which satisfies some additional conditions. In particular we show that in the case when we put origin of coordinate system in $ \mathrm{m} $ the optimal choice is given by the transformation $Y \ni y \to \Sigma_Y^{-1/2} \cdot (y-\mathrm{m}_Y)$, where
$$
\Sigma=\Sigma_Y(\Sigma_Y-\mathrm{m}athrm{f}rac{(\mathrm{m}-\mathrm{m}_Y)(\mathrm{m}-\mathrm{m}_Y)^T}{1+\|\mathrm{m}-\mathrm{m}_Y\|_{\Sigma_Y}^2} )^{-1}\Sigma_Y.
$$
\keywords{Maximum Likelihood Estimation \and MLE \and Cross Entropy \and Optimal Coordinates \and Mahalanobis distance}
\mathrm{v}arepsilonnd{abstract}
\section{Introduction}
One of the crucial problem in statistics, compression, discrimination analysis
and in general in data mining is how to choose the optimal linear coordinate system and define distance which ``optimally'' underlines the internal structure of the data \cite{Bo-Gr,DM-JR,Ha-Ka,Ja-Sa,Ra-Ma,Re,Ti,Kr-Ka,ma_dis_a_3,ma_dis_a_4,ma_dis_a_5}.
The typical answer is the Mahalanobis distance \cite{ma_dis_a_1,ma_dis_a_2,Ma,DM-JR}, which is strictly connected with PCA \cite{jolliffe2005principal,pca_a_1,pca_a_2}.
In some other cases we choose just a weighted Euclidean distance.
Our approach uses a method based on MLE (Maximum Likelihood Estimation) \cite{Le-Ca,Bo,mle_a_3}.
To explain it more precisely consider the data-set $Y \subset \mathrm{m}athbb{R}^N$.
If we allow the translation of the origin of coordinate system,
in one dimensional case we usually apply the normalization:
$s:Y \ni y \to \sigma_Y^{-1}(y-\mathrm{m}_Y)$,
which in the multivariate case is replaced by
$$
s:Y \ni y \to \Sigma_Y^{-1/2}(y-\mathrm{m}_Y),
$$
where $\mathrm{m}_Y$ denotes the mean and $\Sigma_Y$ the covariance matrix of $Y$. Then we obtain
that the coordinates are uncorrelated, and the covariance matrix equals to identity. Taking the distance between the transformation of points $x,y$:
$$
\|sx-sy\|^2=(sx-sy)^T (sx-sy)=(x-y)^T \Sigma_{Y}^{-1}(x-y)
$$
we arrive naturally at the definition of the Mahalanobis distance:
$$
\|x-y\|_{\Sigma}^2:=(x-y)^T \Sigma^{-1}(x-y).
$$
If we do not allow the translation of the origin away from zero, which in some cases is natural, we usually only scale/normalize each coordinate by dividing it by its mean. This approach usually has good results if the standard deviation is small in comparison to the mean. In the opposite case, when the mean is small, dividing by it may ``unnaturally'' widen the variable under consideration.
The main results of the paper provide the optimal change of coordinate system which satisfies some additional conditions. In particular we show that in the case when we put origin of coordinate system in $ \mathrm{m} $ the optimal choice is given by the mapping $Y \ni y \to \Sigma_Y^{-1/2} \cdot (y-\mathrm{m}_Y)$, where
$$
\Sigma=\Sigma_Y(\Sigma_Y-\mathrm{m}athrm{f}rac{(\mathrm{m}-\mathrm{m}_Y)(\mathrm{m}-\mathrm{m}_Y)^T}{1+\|\mathrm{m}-\mathrm{m}_Y\|_{\Sigma_Y}^2} )^{-1}\Sigma_Y.
$$
\begin{figure}[hh]
\begin{center}
\subfigure[]
{\label{s1}
\begin{minipage}{0.3\linewidth}
\includegraphics[width=1.4in]{cs_3}
\includegraphics[width=1.4in]{cs_3_b}
\mathrm{v}arepsilonnd{minipage}}
\subfigure[]
{\label{s2}
\begin{minipage}{0.3\linewidth}
\includegraphics[width=1.4in]{cs_4}
\includegraphics[width=1.4in]{cs_4_b}
\mathrm{v}arepsilonnd{minipage}
}
\subfigure[]
{\label{s3}
\begin{minipage}{0.3\linewidth}
\includegraphics[width=1.4in]{cs_5}
\includegraphics[width=1.4in]{cs_5_b}
\mathrm{v}arepsilonnd{minipage}
}
\mathrm{v}arepsilonnd{center}
\caption{Optimal coordinate systems and the data rewritten in new bases in various situations: a) optimal Mahalanobis base; b) optimal base in the center in $[0,0]$ c) optimal base in the case of optimal rescaling of each coordinates; }
\label{fig:cs_2}
\mathrm{v}arepsilonnd{figure}
\begin{example}\label{ex:1}
We illustrate our results on a two-dimensional data drawn from the normal distribution
with mean $\mathrm{m}=[3,4]^T$ and covariance $\Sigma=\left[\begin{array}{cc}1&0.3\\0.3&0.6\mathrm{v}arepsilonnd{array}\right]$. In the Figure \ref{s1} we present the base which represents the best (Mahalanobis) rescaling and the data in new coordinates. In \ref{s2} we present optimal base in the case we do not allow to move the origin of the coordinate system from zero and the data in new base. In the Figure \ref{s3} we present optimal rescaling in the case when we the change of the origin, but we restrict to rescale the coordinates separately and the data in new base.
\mathrm{v}arepsilonnd{example}
To explain more precisely what we mean by optimal coordinates (in the
given class of coordinate systems)
we need to introduce the function which
measures the ``match'' of a given coordinate system to the data. Suppose that we are given a base $\mathrm{v}=(v_1,\ldots,v_N)$ of $\mathrm{m}athbb{R}^N$ and we put an origin of coordinate system at $\mathrm{m} \in \mathrm{m}athbb{R}^N$. Then
by $\mathrm{m}athbb{N}or_{[\mathrm{m},\mathrm{v}]}$ we denote the ``normalized'' Gaussian density with respect to the base $\mathrm{v}$ with center at $\mathrm{m}$. In other words
$$
\mathrm{m}athbb{N}or_{[\mathrm{m},\mathrm{v}]}(m+x_1v_1+\ldots+x_N v_N)=
\mathrm{m}athrm{f}rac{1}{(2\pi)^{N/2}}e^{-(x_1^2+\ldots+x_N^2)/2}.
$$
Observe that $\mathrm{m}athbb{N}or_{[\mathrm{m},\mathrm{v}]}=\mathrm{m}athbb{N}or_{(\mathrm{m},(vv^T)^{-1})}$,
where by $\mathrm{m}athbb{N}or_{(\mathrm{m},\Sigma)}$ we denote the normal density with mean $\mathrm{m}$
and covariance $\Sigma$. Then we can measure the ``match/fit'' of the coordinate system by the cross-entropy
\begin{equation} \label{e0}
H^{\times}(Y\|\mathrm{m}athbb{N}or_{[\mathrm{m},\mathrm{v}]})
\mathrm{v}arepsilonnd{equation}
of $Y$ with respect to $\mathrm{m}athbb{N}or_{[\mathrm{m},\mathrm{v}]}$.
One can easily observe that the minimum in \mathrm{v}arepsilonqref{e0} is attained by the Mahalanobis coordinate system, where the mean and covariance is that of $Y$, and thus we obtain a confirmation that the commonly used procedure is reasonable.
This allows to define the match of data $Y$
with respect to the coordinate system (or more precisely the
corresponding Gaussian density) by the formula
\begin{equation} \label{eq:M_la}
\mathrm{M}(Y\|\mathrm{m}athbb{N}or_{(\mathrm{m},\Sigma)}):=H^{\times}(Y\|\mathrm{m}athbb{N}or_{(\mathrm{m},\Sigma)})-H^{\times}(Y\|\mathrm{m}athbb{N}or_Y) \geq 0,
\mathrm{v}arepsilonnd{equation}
where $\mathrm{m}athbb{N}or_Y$ denotes the normal density with mean and covariance of $Y$.
From the well-known formulas \cite{Ni-No,Pe} concerning relative entropy and cross-entropy:
\begin{equation} \label{e1.5}
\begin{array}{l}
H^{\times}(Y\|\mathrm{m}athbb{N}or_{(\mathrm{m},\Sigma)})= \mathrm{m}athrm{f}rac{N}{2} \ln(2\pi)+\mathrm{m}athrm{f}rac{1}{2}\|\mathrm{m}-\mathrm{m}_Y\|^2_{\Sigma}+\mathrm{m}athrm{f}rac{1}{2}\mathrm{tr}(\Sigma^{-1}\Sigma_Y)+\mathrm{m}athrm{f}rac{1}{2}\ln \deltaet \Sigma,\\[1ex]
H^{\times}(Y\|\mathrm{m}athbb{N}or_Y) =\mathrm{m}athrm{f}rac{N}{2} \ln(2\pi e)+\mathrm{m}athrm{f}rac{1}{2}\ln \deltaet \Sigma_Y,
\mathrm{v}arepsilonnd{array}
\mathrm{v}arepsilonnd{equation}
we can easily deduce the formula for our match function for the Gaussian distribution $\mathrm{m}athbb{N}or(\mathrm{m},\Sigma)$ in $\mathrm{m}athbb{R}^N$:
\begin{equation} \label{e2}
\mathrm{M}(Y\|\mathrm{m}athbb{N}or_{(\mathrm{m},\Sigma)})= \mathrm{m}athrm{f}rac{1}{2}\left(\|\mathrm{m}-\mathrm{m}_Y\|^2_{\Sigma}+\mathrm{tr}(\Sigma^{-1}\Sigma_Y)-\ln \deltaet(\Sigma^{-1}\Sigma_Y)-N\right).
\mathrm{v}arepsilonnd{equation}
We can now
consider coordinate systems identified with
respective subclasses of Gaussian distributions. Consequently for a family
$\mathcal{F}$ of Gaussian distributions we put
$$
\mathrm{M}(Y\|\mathcal{F}):=\inf_{f \in \mathcal{F}}\mathrm{M}(Y\|f) = \inf_{f \in \mathcal{F}} H^{\times}(Y\| f)-H^{\times}(Y\|\mathrm{m}athbb{N}or_Y).
$$
Clearly, to find $f$ which minimizes the above it is equivalent to find
$f$ which minimizes the cross-entropy
$
H^{\times}(Y\|\mathcal{F}) := \inf_{f \in \mathcal{F}} H^{\times}(Y\|f)
$.
Since cross-entropy and log-likelihood functions differ only by sign, we arrive
at the typical MLE \cite{Le-Ca, Bo,mle_a_3} problem.
As $\mathcal{F}$ we consider the following typical subfamilies of all Gaussians $\mathcal{G}$:
\newline
\begin{tabular}{ l c p{9.5cm}}
$\mathcal{G}_{\mathrm{m}}$ & -- & Gaussian densities with mean at $\mathrm{m}$;
\\[0.5ex]
$\mathcal{G}_{s\mathrm{m}athrm{I}}$ & -- & Gaussian densities with covariance proportional to identity; \\[0.5ex]
$\mathcal{G}_{\mathrm{m},s\mathrm{m}athrm{I}}$ & -- & Gaussian densities with mean at $\mathrm{m}$ and covariance proportional to identity; \\[0.5ex]
$\mathcal{G}_{\mathrm{diag}}$ & -- & Gaussian densities with diagonal covariance; \\[0.5ex]
$\mathcal{G}_{\mathrm{m},\mathrm{diag}}$ & -- & Gaussian densities with mean at $\mathrm{m}$ and diagonal covariance. \\[0.5ex]
\mathrm{v}arepsilonnd{tabular}
\newline
Observe for example that the use of family $\mathcal{G}_{\mathrm{m},\mathrm{diag}}$ means
that we consider the coordinate systems which have origin at $\mathrm{m}$
and which have axes parallel to the original (cartesian) ones.
Moreover, if we have found the optimal Gaussian $\mathrm{m}athbb{N}or_{(\mathrm{m},\Sigma)}$
in our class of densities (identified with coordinate systems), we can transform the data $Y$ into those coordinates by the affine
transformation $Y \ni y \to \Sigma^{-1/2}(y-\mathrm{m}) \in \mathrm{m}athbb{R}^N$.
\section{Rescaling}
Assume that we have fixed the origin of the coordinate system at the point $\mathrm{m}$
and that we want to find how we should (uniformly) rescale the coordinates to optimally fit the data. This means that we search for $s$ such that
$s \to \mathrm{M}(Y\|\mathrm{m}athbb{N}or_{(\mathrm{m}_Y,s\mathrm{m}athrm{I})})$ attains minimum. Since
\begin{equation} \label{e3}
H^{\times}(Y\|\mathrm{m}athbb{N}or_{(\mathrm{m}_Y,s\mathrm{m}athrm{I})})=\mathrm{m}athrm{f}rac{1}{2}\big((\mathrm{tr}(\Sigma_Y)+\|\mathrm{m}-\mathrm{m}_Y\|^2)s^{-1}+N \ln s+N \ln(2\pi)\big),
\mathrm{v}arepsilonnd{equation}
by the trivial calculations we obtain that the above function attains its minimum
\begin{equation} \label{si}
\mathrm{m}athrm{f}rac{N}{2}\big(\ln \mathrm{m}athrm{f}rac{\mathrm{tr} (\Sigma_Y)+\|\mathrm{m}-\mathrm{m}_Y\|^2}{N}+\ln(2\pi e) \big)
\mathrm{v}arepsilonnd{equation}
for
\begin{equation} \label{e3.5}
s=\mathrm{m}athrm{f}rac{\mathrm{tr}(\Sigma_Y)+\|\mathrm{m}-\mathrm{m}_Y\|^2}{N}.
\mathrm{v}arepsilonnd{equation}
Thus applying (\ref{e1.5}) to (\ref{eq:M_la}) we have arrived at the following theorem.
\begin{theorem} \label{bas}
Let $Y$ be a data-set with invertible covariance matrix and $\mathrm{m}$ be fixed. Then $\mathrm{M}(Y\| \mathcal{G}_{\mathrm{m},s \mathrm{m}athrm{I}})$ is minimized for $s=(\mathrm{tr}(\Sigma_Y)+\|\mathrm{m}-\mathrm{m}_Y\|^2)/N$, and equals
$$
\mathrm{M}(Y\| \mathcal{G}_{\mathrm{m},s \mathrm{m}athrm{I}}) = \mathrm{m}athrm{f}rac{N}{2}\ln \mathrm{m}athrm{f}rac{\mathrm{tr}(\Sigma_Y)+\|\mathrm{m}-\mathrm{m}_Y\|^2}{N}-\mathrm{m}athrm{f}rac{1}{2}\ln \deltaet \Sigma_Y.
$$
\mathrm{v}arepsilonnd{theorem}
If we allow in above Theorem the change of the origin, to minimize the value of $\mathrm{M}$ we have to clearly put $\mathrm{m}$ at $\mathrm{m}_Y$:
\begin{corollary}
Let $Y$ be a data-set with invertible covariance matrix. Then $\mathrm{M}(Y\| \mathcal{G}_{s \mathrm{m}athrm{I}})$ is minimized for $\mathrm{m}=\mathrm{m}_Y$, $s=\mathrm{m}athrm{f}rac{1}{N}\mathrm{tr}(\Sigma_Y)$, and equals
$$
\mathrm{M}(Y\| \mathcal{G}_{s \mathrm{m}athrm{I}} )=\mathrm{m}athrm{f}rac{N}{2}\ln (\mathrm{tr} (\Sigma_Y)/N )-\mathrm{m}athrm{f}rac{1}{2}\ln \deltaet \Sigma_Y.
$$
\mathrm{v}arepsilonnd{corollary}
\begin{remark}
Assume that we want to move the origin to $\mathrm{m}$, and uniformly
rescale. Then Theorem \ref{bas} implies that
$$
y \to (y-\mathrm{m})/\sqrt{\mathrm{m}athrm{f}rac{1}{N}(\mathrm{tr}(\Sigma_Y)+\|\mathrm{m}-\mathrm{m}_Y\|^2)}
$$
is the optimal rescaling of this type. Thus in the case of univariate data, the optimal
rescaling when we do not change the origin of the coordinate system
is given by
$y \to y/\sqrt{\sigma_Y^2+\mathrm{m}_Y^2}=y/\sqrt{E(Y^2)}$.
\mathrm{v}arepsilonnd{remark}
We consider the case when we allow to rescale each coordinate $Y_i$ of $Y=(Y_1,\ldots,Y_N)$ separately. Then the optimal change of
coordinates of this form is given by optimal rescaling on each coordinate.
\begin{corollary}
Let $Y$ be a data-set with invertible covariance matrix and $\mathrm{m}$ be fixed. Then $\mathrm{M}(Y\| \mathcal{G}_{\mathrm{m},\mathrm{diag}})$ is minimized for $s_i=(\Sigma_Y)_{ii}+|\mathrm{m}_i-(\mathrm{m}_Y)_i|^2$, and equals
$$
\mathrm{M}(Y\| \mathcal{G}_{\mathrm{m},\mathrm{diag}}) = \mathrm{m}athrm{f}rac{1}{2}\sum_{i=1}^N \ln \big((\Sigma_Y)_{ii}+|\mathrm{m}_i-(\mathrm{m}_Y)_i|^2\big)-\mathrm{m}athrm{f}rac{1}{2}\ln \deltaet \Sigma_Y.
$$
\mathrm{v}arepsilonnd{corollary}
\begin{proof}
For $\mathrm{m}athbb{N}or_{(\mathrm{m},\mathrm{diag}(s_1,\ldots,s_n))} \in \mathcal{G}_{\mathrm{m},\mathrm{diag}}$ we have
$$
\begin{array}{l}
\mathrm{M}(Y\| \mathrm{m}athbb{N}or_{(\mathrm{m},\mathrm{diag}(s_1,\ldots,s_n))})=
H^{\times}(Y\|\mathrm{m}athbb{N}or_{(\mathrm{m},\mathrm{diag}(s_1,\ldots,s_n))})-H^{\times}(Y\|\mathrm{m}athbb{N}or_Y) \\[1ex]
=\sum \limits_{i=1}^N H^{\times}(Y_i\|\mathrm{m}athbb{N}or_{(\mathrm{m}_i,s_i)})-H^{\times}(Y\|\mathrm{m}athbb{N}or_Y).
\mathrm{v}arepsilonnd{array}
$$
By applying \mathrm{v}arepsilonqref{si} to the univariate data $Y_i$ and \mathrm{v}arepsilonqref{e1.5}
we obtain that the minimum of $\mathrm{M}(Y\|\mathrm{m}athbb{N}or_{(\mathrm{m},\mathrm{diag}(s_1,\ldots,s_n))})$
is realized for $s_i=(\Sigma_Y)_{ii}+|\mathrm{m}_i-(\mathrm{m}_Y)_i|^2)$ and equals
$$
\mathrm{m}athrm{f}rac{1}{2}\sum_{i=1}^N\big(\ln ((\Sigma_Y)_{ii}+|\mathrm{m}_i-(\mathrm{m}_Y)_i|^2)+\ln(2\pi e) \big)-\mathrm{m}athrm{f}rac{1}{2}\big(N \ln(2\pi e)+\ln \deltaet \Sigma_Y\big)
$$
$$
=\mathrm{m}athrm{f}rac{1}{2}\sum_{i=1}^N \ln ((\Sigma_Y)_{ii}+|\mathrm{m}_i-(\mathrm{m}_Y)_i|^2)-\mathrm{m}athrm{f}rac{1}{2}\ln \deltaet \Sigma_Y.
$$
\mathrm{v}arepsilonnd{proof}
If we additionally allow the change of the origin, we should put $\mathrm{m}=\mathrm{m}_Y$ and the rescaling takes the form
$y \to (y-\mathrm{m}_Y)/\sqrt{\mathrm{tr}(\Sigma_Y)/N}$.
Now we focus our attention on the problem how to find the optimal coordinate system in the general case. To do so we first need to present
a simple consequence of the famous von Neuman trace inequality
\cite{Gr,Mi}.
\mathrm{m}edskip
\noindent{\bf Theorem [von Neumann trace inequality].}{\mathrm{v}arepsilonm \/ Let $E,F$ be complex $N \times N$ matrices. Then
\begin{equation} \label{neu}
|\mathrm{tr}(EF) | \leq \sum_{i=1}^N s_i(E)\cdot s_i(F),
\mathrm{v}arepsilonnd{equation}
where $s_i(D)$ denote the ordered (decreasingly)
singular values of matrix $D$.}
\mathrm{m}edskip
Let us recall that for the symmetric positive matrix its
eigenvalues coincide with singular values.
Given $\lambda_1,\ldots,\lambda_N \in \mathrm{m}athbb{R}$ by $S_{\lambda_1,\ldots,\lambda_N}$ we denote the set of all symmetric matrices with eigenvalues $\lambda_1,\ldots,\lambda_N$.
\begin{proposition}
Let $B$ be a symmetric nonnegative matrix with eigenvalues $\beta_1 \geq \ldots \geq\beta_N \geq 0$ .
Let $0 \leq \lambda_1 \leq \ldots \leq \lambda_N$ be fixed.
Then
$$
\mathrm{m}in_{A \in S_{\lambda_1,\ldots,\lambda_N}} \mathrm{tr}(AB)=\sum_i \lambda_i \beta_i.
$$
\mathrm{v}arepsilonnd{proposition}
\begin{proof}
Let $e_i$ denote the orthogonal basis build from the eigenvectors of $B$, and let
operator $\bar A$ be defined in this base by $\bar A(e_i)=\lambda_i e_i$. Then trivially
$$
\mathrm{m}in_{A \in S_{\lambda_1,\ldots,\lambda_N}} \mathrm{tr}(AB) \leq
\mathrm{tr}(\bar AB)=\sum_i \lambda_i \beta_i.
$$
To prove the inverse inequality we will use the
von Neumann trace inequality. Let $A \in S_{\lambda_1,\ldots,\lambda_N}$ be arbitrary. We apply the inequality \mathrm{v}arepsilonqref{neu} for $E=\lambda_N \mathrm{m}athrm{I}-A$, $F=B$.
Since $E$ and $F$ are symmetric nonnegatively defined matrices, their eigenvalues
$\lambda_N-\lambda_i$ and $\beta_i$ coincide with singular values, and therefore by \mathrm{v}arepsilonqref{neu}
\begin{equation} \label{nu2}
\mathrm{tr}((\lambda_N\mathrm{m}athrm{I}-A)B) \leq \sum_i(\lambda_N-\lambda_i)\beta_i=
\lambda_N \sum_i \beta_i -\sum_i \lambda_i \beta_i.
\mathrm{v}arepsilonnd{equation}
Since
$$
\mathrm{tr}((\lambda_N\mathrm{m}athrm{I}-A)B)=\lambda_N \sum_i \beta_i -\mathrm{tr}(AB),
$$
from inequality \mathrm{v}arepsilonqref{nu2} we obtain that
$\mathrm{tr}(AB) \geq \sum_i \lambda_i \beta_i$.
\mathrm{v}arepsilonnd{proof}
Now we proceed to the main result of the paper.
\begin{theorem}
Let $Y$ be a data-set and $\mathrm{m} \in \mathrm{m}athbb{R}^N$ be fixed. Then
$$
\mathrm{M}(Y\|\mathcal{G}_{\mathrm{m}})=\mathrm{m}athrm{f}rac{1}{2}\ln(1+\|\mathrm{m}-\mathrm{m}_Y\|^2_{\Sigma_Y})
$$
and is attained for the density $\mathrm{m}athbb{N}or(\mathrm{m},\Sigma) \in \mathcal{G}_{\mathrm{m}}$, where
$$
\Sigma=\Sigma_Y(\Sigma_Y-\mathrm{m}athrm{f}rac{(\mathrm{m}-\mathrm{m}_Y)(\mathrm{m}-\mathrm{m}_Y)^T}{1+\|\mathrm{m}-\mathrm{m}_Y\|_{\Sigma_Y}^2} )^{-1}\Sigma_Y.
$$
\mathrm{v}arepsilonnd{theorem}
\begin{proof}
Let us first observe that by applying substitution
$$
A=\Sigma_Y^{1/2}\Sigma^{-1}\Sigma_Y^{1/2},
v=\Sigma_Y^{-1/2}(\mathrm{m}-\mathrm{m}_Y),
$$
we obtain
\begin{equation} \label{e}
\begin{array}{l}
H^{\times}(Y\|\mathrm{m}athbb{N}or_{(\mathrm{m},\Sigma)})= \mathrm{m}athrm{f}rac{1}{2}
\left(\mathrm{tr}(\Sigma^{-1}\Sigma_Y)+\|\mathrm{m}-\mathrm{m}_Y\|^2_{\Sigma}+\ln \deltaet\Sigma+N\ln(2\pi)\right) \\[1ex]
=\mathrm{m}athrm{f}rac{1}{2}\big(\mathrm{tr}(\Sigma^{-1}\Sigma_Y)+(\mathrm{m}-\mathrm{m}_Y)^T\Sigma^{-1}(\mathrm{m}-\mathrm{m}_Y)-\ln \deltaet \Sigma^{-1}\Sigma_Y\\[0.5ex]
\phantom{=\mathrm{m}athrm{f}rac{1}{2}\big(}+\ln \deltaet \Sigma_Y+N\ln(2\pi)\big) \\[1ex]
=\mathrm{m}athrm{f}rac{1}{2}\left(\mathrm{tr}(A)+v^TAv-\ln \deltaet A+\ln \deltaet \Sigma_Y+N\ln(2\pi)\right).
\mathrm{v}arepsilonnd{array}
\mathrm{v}arepsilonnd{equation}
Observe that $A$ is then a symmetric positive matrix, and that given a symmetric positive matrix $A$ we can uniquely determine $\Sigma$
by the formula
\begin{equation} \label{wyzna}
\Sigma=\Sigma_Y^{1/2}A^{-1}\Sigma_Y^{1/2}.
\mathrm{v}arepsilonnd{equation}
Thus finding minimum of \mathrm{v}arepsilonqref{e} reduces to finding a symmetric
positive matrix $A$ which minimize the value of
\begin{equation} \label{now}
\mathrm{tr}(A)+v^TAv-\ln \deltaet A.
\mathrm{v}arepsilonnd{equation}
Let us first consider $A \in S_{\lambda_1,\ldots,\lambda_N}$,
where $0 < \lambda_1 \leq \ldots \leq \lambda_N$ are fixed. Our
aim is to minimize
$$
v^TAv=\mathrm{tr}(v^TAv)=\mathrm{tr}(A \cdot (vv^T)).
$$
We fix an orthonormal base such that $v/\|v\|$ is its first element,
and then by applying von Neumann trace formula we obtain that
the above minimizes when $v$ is the eigenvector of $A$
corresponding to $\lambda_1$, and thus the minimum equals
$$
\lambda_1 \|v\|^2.
$$
Consequently we arrive at the minimization problem
$$
\lambda_1 (1+\|v\|^2)+\sum_{i>1}\lambda_i-\sum_i \ln \lambda_i.
$$
Now one can easily verify that the minimum of the above
is realized for
$$
\lambda_1=1/(1+\|v\|^2), \lambda_i=1 \mathrm{m}athrm{f}or i >1.
$$
and then \mathrm{v}arepsilonqref{now} equals
$$
N+\ln(1+\|\mathrm{m}-\mathrm{m}_Y\|_{\Sigma_Y}^2),
$$
while the formula for $A$ minimizing it is given by
$$
A=\mathrm{m}athrm{I}-\mathrm{m}athrm{f}rac{vv^T}{1+\|v\|^2}.
$$
Consequently then the value of \mathrm{v}arepsilonqref{e} is
$$
\mathrm{m}athrm{f}rac{1}{2}\left(\ln(1+\|\mathrm{m}-\mathrm{m}_Y\|^2_{\Sigma_Y})+\ln|\Sigma_Y|+N\ln(2\pi e)\right).
$$
and is attained by \mathrm{v}arepsilonqref{wyzna} for
$$
\Sigma=\Sigma_Y^{1/2}(I-\mathrm{m}athrm{f}rac{\Sigma_Y^{-1/2}(\mathrm{m}-\mathrm{m}_Y)(\mathrm{m}-\mathrm{m}_Y)^T\Sigma_Y^{-1/2}}{1+\|\mathrm{m}-\mathrm{m}_Y\|_{\Sigma_Y}^2} )^{-1}\Sigma_Y^{1/2}
$$
$$
=\Sigma_Y(\Sigma_Y-\mathrm{m}athrm{f}rac{(\mathrm{m}-\mathrm{m}_Y)(\mathrm{m}-\mathrm{m}_Y)^T}{1+\|\mathrm{m}-\mathrm{m}_Y\|_{\Sigma_Y}^2} )^{-1}\Sigma_Y.
$$
\mathrm{v}arepsilonnd{proof}
By the above theorem we get the formula for $H^{\times}(Y\|\mathcal{G}_{\mathrm{m}})$.
\begin{corollary}\label{the:cs_4}
Let $\mathrm{m} \in \mathrm{m}athbb{R}^N$ be fixed. Then
$$
H^{\times}(Y\|\mathcal{G}_{\mathrm{m}}) = \mathrm{m}athrm{f}rac{1}{2}\left(\ln(1+\|\mathrm{m}-\mathrm{m}_Y\|^2_{\Sigma_Y})+\ln|\Sigma_Y|+N\ln(2\pi e)\right),
$$
and is attained for the density $\mathrm{m}athbb{N}or(\mathrm{m},\Sigma)$, where
$
\Sigma=\Sigma_Y(\Sigma_Y-\mathrm{m}athrm{f}rac{(\mathrm{m}-\mathrm{m}_Y)(\mathrm{m}-\mathrm{m}_Y)^T}{1+\|\mathrm{m}-\mathrm{m}_Y\|_{\Sigma_Y}^2} )^{-1}\Sigma_Y
$.
\mathrm{v}arepsilonnd{corollary}
\begin{table}[!h]\centering
\deltaef1.5{1.7}
\begin{tabular}{||l|l||} \hline \hline
$\mathcal{F}$ & $M^{\times}(Y\|\mathcal{F})$ \\[0.5ex]
\hline \hline
$\mathcal{G}$ & $0$
\\ \hline
$\mathcal{G}_{\mathrm{m}}$ & $\mathrm{m}athrm{f}rac{1}{2}\left(\ln(1+\|\mathrm{m}-\mathrm{m}_Y\|^2_{\Sigma_Y})+\ln|\Sigma_Y|+N\ln(2\pi e)\right)$
\\ \hline
$\mathcal{G}_{sI}$ & $\mathrm{m}athrm{f}rac{N}{2}\ln \left(\mathrm{tr} \mathrm{m}athrm{f}rac{\Sigma_Y}{N} \right)-\mathrm{m}athrm{f}rac{1}{2}\ln \deltaet \Sigma_Y$
\\ \hline
$\mathcal{G}_{\mathrm{m},sI}$ & $ \mathrm{m}athrm{f}rac{N}{2}\ln \left( \mathrm{m}athrm{f}rac{\mathrm{tr}(\Sigma_Y)+\|\mathrm{m}-\mathrm{m}_Y\|^2}{N} \right) - \mathrm{m}athrm{f}rac{1}{2}\ln \left( \deltaet \Sigma_Y \right) $
\\ \hline
$\mathcal{G}_{\mathrm{diag}}$ & $ \mathrm{m}athrm{f}rac{N}{2}\ln \left( \mathrm{m}athrm{f}rac{\mathrm{tr}(\Sigma_Y) }{N} \right) -\mathrm{m}athrm{f}rac{1}{2}\ln \deltaet \Sigma_Y$
\\ \hline
$\mathcal{G}_{\mathrm{m},\mathrm{diag}}$ & $\mathrm{m}athrm{f}rac{1}{2}\sum \limits_{i=1}^N \ln \big((\Sigma_Y)_{ii}+|\mathrm{m}_i-(\mathrm{m}_Y)_i|^2\big)-\mathrm{m}athrm{f}rac{1}{2}\ln \left( \deltaet \Sigma_Y \right) $
\\ \hline
\hline
\mathrm{v}arepsilonnd{tabular}
\caption{Table of $M^{\times}(Y\|\mathcal{F})$ with respect to Gaussian subfamilies.}
\label{tab1:cec}
\mathrm{v}arepsilonnd{table}
At the end of this article we illustrate our results on the data generated from the classical Lena picture.
\begin{example}\label{ex:2}
Let us consider the classical Lena picture from The USC-SIPI Image Database (\url{http://sipi.usc.edu/database/}).
First, we interpret photo as a dataset, as in the JPG compression. We do this by dividing it into 8 by 8 pixels, where each pixel is described (in RGB) by using 3 parameters. Consequently each of the pieces is represented as a vector from $\mathrm{m}athbb{R}^{192}$. By this operation we obtain dataset $Y$ from $ \mathrm{m}athbb{R}^{192} $.
In Table \ref{tab:jpg} we present values of $\mathrm{M}(Y\|\mathcal{G}_{\mathrm{m},\mathcal{F}})$ for various $\mathcal{F}$. Moreover we consider the case when the origin is at $\mathrm{m} = \left[\mathrm{m}athrm{f}rac{1}{2},\ldots,\mathrm{m}athrm{f}rac{1}{2} \right] $, similarly like in JPG format.
\begin{table}[ht]
\deltaef1.5{1.5}
\centering
\begin{tabular}{ || l | c | c | c || }
\hline\hline
& $\mathrm{M}(Y\|\mathcal{G}_{\mathrm{m}})$ & $\mathrm{M}(Y\|\mathcal{G}_{\mathrm{m},\mathrm{diag}})$ & $\mathrm{M}(Y\|\mathcal{G}_{\mathrm{m},sI})$ \\
\hline
\hline
$\mathrm{m} = \mathrm{m}_{Y}$ & 0 & 672.9 & 679.168 \\
$\mathrm{m} = \left[\mathrm{m}athrm{f}rac{1}{2},\ldots,\mathrm{m}athrm{f}rac{1}{2} \right] $ & 0.520298 & 717.217 & 704.124 \\
$\mathrm{m} = \left[0,\ldots,0 \right] $ & 2.73125 & 883.227 & 1031 \\
\hline
\hline
\mathrm{v}arepsilonnd{tabular}
\caption{Values of $\mathrm{M}(Y\|\mathcal{F})$ for different $\mathcal{F}$ and data from Example \ref{ex:2}.}
\label{tab:jpg}
\mathrm{v}arepsilonnd{table}
As a conclusion we see that the change of the origin from the mean
to the point $[\mathrm{m}athrm{f}rac{1}{2},\ldots,\mathrm{m}athrm{f}rac{1}{2}]$ does not ``cost'' us
much, compared to the case when we restrict to the class of
$\mathcal{G}_{\mathrm{diag}}$. Moreover we see that the fixed center in $ \left[\mathrm{m}athrm{f}rac{1}{2},\ldots,\mathrm{m}athrm{f}rac{1}{2} \right]$ is better then $ \left[0,\ldots,0 \right] $ which shows that JPG approach is reasonable.
\mathrm{v}arepsilonnd{example}
\section{Conclusion}
In this paper we present the method of determining the optimal coordinate systems for the dataset $Y \in \mathrm{m}athbb{R}^N$ which meets an commonly encountered conditions. We interpret the optimal Gaussian density (in the sense of cross--entropy) as an optimal transformation of a data. Consequently we obtain the measure of the optimality of the given coordinates system (represented by Gaussian family $\mathcal{F}$)
$$
\mathrm{M}(Y\|\mathcal{F}) = \inf_{f \in \mathcal{F}} H^{\times}(Y\| f)-H^{\times}(Y\|\mathrm{m}athbb{N}or_Y).
$$
We obtain estimations in various subclasses of normal densities. The results we present in Tab. \ref{tab1:cec}
\begin{thebibliography}{10}
\bibitem{Bo-Gr}
I.~Borg and P.~J. Groenen, {\mathrm{v}arepsilonm Modern multidimensional scaling: Theory and
applications}.
\newblock Springer Verlag, 2005.
\bibitem{DM-JR}
R.~De~Maesschalck, D.~Jouan-Rimbaud, and D.~L. Massart, ``The mahalanobis
distance,'' {\mathrm{v}arepsilonm Chemometrics and Intelligent Laboratory Systems}, vol.~50,
no.~1, pp.~1--18, 2000.
\bibitem{Ha-Ka}
J.~Han, M.~Kamber, and J.~Pei, {\mathrm{v}arepsilonm Data mining: concepts and techniques}.
\newblock Morgan kaufmann, 2006.
\bibitem{Ja-Sa}
T.~Jayalakshmi and A.~Santhakumaran, ``Statistical normalization and back
propagation for classification,'' {\mathrm{v}arepsilonm International Journal of Computer
Theory and Engineering}, vol.~3, no.~1, pp.~1793--8201, 2011.
\bibitem{Ra-Ma}
T.~Raykov and G.~A. Marcoulides, {\mathrm{v}arepsilonm An introduction to applied multivariate
analysis}.
\newblock Psychology Press, 2008.
\bibitem{Re}
A.~C. Rencher and W.~F. Christensen, {\mathrm{v}arepsilonm Methods of multivariate analysis},
vol.~709.
\newblock Wiley, 2012.
\bibitem{Ti}
N.~H. Timm, {\mathrm{v}arepsilonm Applied multivariate analysis}.
\newblock Springer Verlag, 2002.
\bibitem{Kr-Ka}
P.~R. Krishnaiah and L.~N. Kanal, ``Classification, pattern recognition, and
reduction of dimensionality, volume 2 of handbook of statistics,'' {\mathrm{v}arepsilonm
North4Holland Amsterdam}, 1982.
\bibitem{ma_dis_a_3}
M.~P. McAssey, ``An empirical goodness-of-fit test for multivariate
distributions,'' {\mathrm{v}arepsilonm Journal of Applied Statistics}, no.~ahead-of-print,
pp.~1--12, 2013.
\bibitem{ma_dis_a_4}
E.~J. Bedrick, ``Graphical modelling and the mahalanobis distance,'' {\mathrm{v}arepsilonm
Journal of Applied Statistics}, vol.~32, no.~9, pp.~959--967, 2005.
\bibitem{ma_dis_a_5}
W.~Krzanowski, ``Non-parametric estimation of distance between groups,'' {\mathrm{v}arepsilonm
Journal of Applied Statistics}, vol.~30, no.~7, pp.~743--750, 2003.
\bibitem{ma_dis_a_1}
A.~F. Mitchell and W.~J. Krzanowski, ``The mahalanobis distance and elliptic
distributions,'' {\mathrm{v}arepsilonm Biometrika}, vol.~72, no.~2, pp.~464--467, 1985.
\bibitem{ma_dis_a_2}
H.~Holgersson and P.~S. Karlsson, ``Three estimators of the mahalanobis
distance in high-dimensional data,'' {\mathrm{v}arepsilonm Journal of Applied Statistics},
vol.~39, no.~12, pp.~2713--2720, 2012.
\bibitem{Ma}
P.~C. Mahalanobis, ``On the generalized distance in statistics,'' in {\mathrm{v}arepsilonm
Proceedings of the national institute of sciences of India}, vol.~2,
pp.~49--55, New Delhi, 1936.
\bibitem{jolliffe2005principal}
I.~Jolliffe, {\mathrm{v}arepsilonm Principal component analysis}.
\newblock Wiley Online Library, 2005.
\bibitem{pca_a_1}
I.~T. Jolliffe, ``Rotation of principal components: choice of normalization
constraints,'' {\mathrm{v}arepsilonm Journal of Applied Statistics}, vol.~22, no.~1,
pp.~29--35, 1995.
\bibitem{pca_a_2}
P.~Pack, I.~Jolliffe, and B.~Morgan, ``Influential observations in principal
component analysis: A case study,'' {\mathrm{v}arepsilonm Journal of Applied Statistics},
vol.~15, no.~1, pp.~39--52, 1988.
\bibitem{Le-Ca}
E.~L. Lehmann and G.~Casella, {\mathrm{v}arepsilonm Theory of point estimation}, vol.~31.
\newblock Springer, 1998.
\bibitem{Bo}
A.~Van~den Bos, {\mathrm{v}arepsilonm Parameter estimation for scientists and engineers}.
\newblock Wiley-Interscience, 2007.
\bibitem{mle_a_3}
R.~Cheng and N.~Amin, ``Maximum likelihood estimation of parameters in the
inverse gaussian distribution, with unknown origin,'' {\mathrm{v}arepsilonm Technometrics},
vol.~23, no.~3, pp.~257--263, 1981.
\bibitem{Ni-No}
F.~Nielsen and R.~Nock, ``Clustering multivariate normal distributions,'' in
{\mathrm{v}arepsilonm Emerging Trends in Visual Computing}, pp.~164--174, Springer, 2009.
\bibitem{Pe}
W.~D. Penny, ``Kullback-liebler divergences of normal, gamma, dirichlet and
wishart densities,'' {\mathrm{v}arepsilonm Wellcome Department of Cognitive Neurology}, 2001.
\bibitem{Gr}
R.~D. Grigorieff, ``A note on von neumann’s trace inequality,'' {\mathrm{v}arepsilonm Math.
Nachr}, vol.~151, pp.~327--328, 1991.
\bibitem{Mi}
L.~Mirsky, ``A trace inequality of john von neumann,'' {\mathrm{v}arepsilonm Monatshefte f{\"u}r
Mathematik}, vol.~79, no.~4, pp.~303--306, 1975.
\mathrm{v}arepsilonnd{thebibliography}
\mathrm{v}arepsilonnd{document} |
\begin{document}
\title{\Large Je\'{s}manowicz' conjecture and Fermat numbers}
\author{\large Min Tang\thanks{Corresponding author. This work was supported by the National Natural Science Foundation of China, Grant
No.10901002 and Anhui Provincial Natural Science Foundation, Grant No.1208085QA02. Email: tmzzz2000@163.com} and Jian-Xin Weng }
\date{} \maketitle
\vskip -3cm
\begin{center}
\vskip -1cm { \small
\begin{center}
School of Mathematics and Computer Science, Anhui Normal
University,
\end{center}
\begin{center}
Wuhu 241003, China
\end{center}
}
\end{center}
{\bf Abstract.} Let $a,b,c$ be relatively prime positive integers such that $a^{2}+b^{2}=c^{2}.$ In 1956, Je\'{s}manowicz conjectured that for any positive integer $n$, the only solution of $(an)^{x}+(bn)^{y}=(cn)^{z}$ in positive integers is $(x,y,z)=(2,2,2)$. Let $k\geq 1$ be an integer and $F_k=2^{2^k}+1$ be a Fermat number. In this paper, we show that Je\'{s}manowicz' conjecture is true for Pythagorean triples $(a,b,c)=(F_k-2,2^{2^{k-1}+1},F_k)$.
{\bf Keywords:} Je\'{s}manowicz' conjecture; Diophantine equation; Fermat numbers
2010 {\it Mathematics Subject Classification}: 11D61
\section{Introduction} Let $a,b,c$ be relatively prime positive integers such that $a^{2}+b^{2}=c^{2}$ with $2\mid b.$ Clearly, for any positive integer $n$, the Diophantine equation
\begin{equation}\label{eqn1}(na)^{x}+(nb)^{y}=(nc)^{z}\end{equation}
has the solution $(x, y, z)=(2,2,2).$ In 1956, Sierpi\'{n}ski \cite{Sierpinski} showed there is no other solution when $n=1$ and $(a,b,c)=(3,4,5)$, and Je\'{s}manowicz \cite{Jesmanowicz} proved that when $n=1$ and $(a,b,c)=(5,12,13),(7,24,25),(9,40,41),(11,60,61),$ Eq.(\ref{eqn1}) has only the solution $(x,y,z)=(2,2,2).$ Moreover, he conjectured that for any positive integer $n,$ the Eq.(\ref{eqn1}) has no positive integer solution other than $(x,y,z)=(2,2,2).$ Let $k\geq 1$ be an integer and $F_k=2^{2^k}+1$ be a Fermat number.
Recently, the first author of this paper and Yang \cite{Tang} proved that if $1\leq k\leq 4$, then the Diophantine equation \begin{equation}\label{eqn2}((F_k-2)n)^{x}+(2^{2^{k-1}+1}n)^{y}=(F_kn)^{z}\end{equation}
has no positive integer solution other than $(x,y,z)=(2,2,2)$.
For related problems, see (\cite{Deng}, \cite{Miyazaki}, \cite{Miyazaki2}).
In this paper, we obtain the following result.
\begin{theorem}\label{thm1} For any positive integer $n$ and Fermat number $F_k$, Eq.(\ref{eqn2}) has only the solution $(x,y,z)=(2,2,2)$.
\end{theorem}
Throughout this paper, let $m$ be a positive integer and $a$ be any integer relatively prime to $m$. If $h$ is the
least positive integer such that $a^{h}\equiv 1 \pmod m$, then $h$ is called the order of $a$ modulo $m$, denoted by $\textnormal{ord}_{m}(a)$.
\section{Lemmas}
\begin{lemma}\label{lem1}(\cite{Lu}) For any positive integer $m$, the Diophantine equation $(4m^{2}-1)^{x}+(4m)^{y}=(4m^{2}+1)^{z}$ has only the solution $(x,y,z)=(2,2,2).$\end{lemma}
\begin{lemma}\label{lem2}(See \cite[Lemma 2]{Deng}) If $z\geq max\{x,y\},$ then the Diophantine equation $a^{x}+b^{y}=c^{z},$ where $a,b$ and $c$ are any positive integers (not necessarily relative prime) such that $a^{2}+b^{2}=c^{2}$, has no solution other than $(x,y,z)=(2,2,2).$\end{lemma}
\begin{lemma}\label{lem3} (See \cite[Corollary 1]{Le}) If the Diophantine equation $(na)^{x}+(nb)^{y}=(nc)^{z}$(with $a^2+b^2=c^2$) has a solution $(x,y,z)\neq(2,2,2),$
then $x,y,z$ are distinct.\end{lemma}
\begin{lemma}\label{lem4}(See \cite[Lemma 2.3]{Deng2013}) Let $a,b,c$ be any primitive Pythagorean triple such that the Diophantine equation $a^{x}+b^{y}=c^{z}$ has the only positive integer solution $(x,y,z)=(2,2,2)$. Then (\ref{eqn1}) has no positive integer solution satisfying $x>y>z$ or $y>x>z$.
\end{lemma}
\begin{lemma}\label{lem5}Let $k$ be a positive integer and $F_k=2^{2^k}+1$ be a Fermat number. If $(x,y,z)$ is a solution of the Eq.(\ref{eqn2}) with $(x,y,z)\neq (2,2,2)$, then $x<z<y$.
\end{lemma}
\begin{proof} By Lemmas \ref{lem2}-\ref{lem4}, it is sufficient to prove that the Eq.(\ref{eqn2}) has no solution $(x,y,z)$ satisfying $y<z<x$.
By Lemma \ref{lem1}, we may suppose that $n\geq2$ and the Eq.(\ref{eqn2}) has a solution $(x,y,z)$ with $y<z<x$.
Then we have
\begin{equation}\label{eqn9}2^{(2^{k-1}+1)y}=n^{z-y}\Big(F_k^{z}-(F_k-2)^{x}n^{x-z}\Big).\end{equation}
By \eqref{eqn9} we may write $n=2^{r}$ with $r\geq1$.
Noting that $$\gcd\Big(F_k^{z}-(F_k-2)^{x}2^{r(x-z)},2\Big)=1,$$
we have \begin{equation}\label{eqn10}F_k^{z}-(F_k-2)^{x}2^{r(x-z)}=1.\end{equation}
Since $k\geq 1$, by (\ref{eqn10}) we have $F_k^z\equiv 1\pmod 3$, $z\equiv 0\pmod 2.$
Write $z=2z_{1},$ we have
\begin{equation}\label{eqn11}\Big(\prod\limits_{i=0}^{k-1}F_i\Big)^x2^{r(x-z)}=(F_k^{z_{1}}-1)(F_k^{z_{1}}+1).\end{equation}
Let $F_{k-1}=\prod\limits_{i=1}^tp_i^{\alpha_i}$ be the standard prime factorization of $F_{k-1}$ with $p_1<\cdots<p_t$. By the known Fermat primes, we know that there is the possibility of $t=1$. Moreover,
\begin{equation}\label{eqn12}\textnormal{ ord}_{p_i}(2)=2^{k}, \quad i=1,\cdots,t.\end{equation}
Noting that $\gcd(F_k^{z_{1}}-1,F_k^{z_{1}}+1)=2,$ we know that $p_t$ divide only one of $F_k^{z_{1}}-1$ and $F_k^{z_{1}}+1$.
{\bf Case 1.} $p_t\mid F_k^{z_{1}}-1$. Then $F_k^{z_{1}}-1\equiv 2^{z_1}-1\equiv 0\pmod {p_t}$. Noting that $\textnormal{ ord}_{p_t}(2)=2^{k}$, we have $z_1\equiv 0\pmod{2^{k}}$.
By (\ref{eqn12}) we have $$F_k^{z_{1}}-1\equiv 2^{z_1}-1\equiv 0\pmod {p_i}, \quad i=1,\cdots, t.$$
Since $\gcd(F_k^{z_{1}}-1,F_k^{z_{1}}+1)=2,$ by (\ref{eqn11}) we have $$F_k^{z_{1}}-1\equiv 2^{z_1}-1\equiv 0\pmod {p_i^{\alpha_ix}}, \quad i=1,\cdots, t.$$
Hence $F_{k-1}^x\mid F_k^{z_{1}}-1$.
{\bf Case 2.} $p_t\mid F_k^{z_{1}}+1$. Then $F_k^{z_{1}}+1\equiv 2^{z_1}+1\equiv 0\pmod {p_t}$. Noting that $\textnormal{ ord}_{p_t}(2)=2^{k}$, we have $2^{k-1}\mid z_1$, but $2^{k}\nmid z_1$.
By (\ref{eqn12}) we have $$2^{2z_1}-1=(2^{z_1}+1)(2^{z_1}-1)\equiv 0\pmod {p_i}, \quad i=1,\cdots, t.$$
Thus
$$F_k^{z_{1}}+1\equiv 2^{z_1}+1\equiv 0\pmod {p_i}, \quad i=1,\cdots, t.$$
Since $\gcd(F_k^{z_{1}}-1,F_k^{z_{1}}+1)=2,$ by (\ref{eqn11}) we have $$F_k^{z_{1}}+1\equiv 2^{z_1}+1\equiv 0\pmod {p_i^{\alpha_ix}}, \quad i=1,\cdots, t.$$
Hence $F_{k-1}^x\mid F_k^{z_{1}}+1$.
However, $$F_{k-1}^x=\Big(2^{2^{k-1}}+1\Big)^x>\Big(2^{2^{k-1}}+1\Big)^{2z_1}>F_k^{z_1}+1,$$ which is impossible.
This completes the proof of Lemma \ref{lem5}.
\end{proof}
\section{Proof of Theorem \ref{thm1}}
By Lemma \ref{lem1} and Lemma \ref{lem5}, we may suppose that $n\geq2$ and the Eq.(\ref{eqn2}) has a solution $(x,y,z)$ with $x<z<y$. Then
\begin{equation}\label{eqn13a}\Big(\prod_{i=0}^{k-1}F_i\Big)^{x}=n^{z-x}\Big(F_k^{z}-2^{(2^{k-1}+1)y}n^{y-z}\Big).\end{equation}
It is clear from \eqref{eqn13a} that
$$\gcd\Big(n,\prod\limits_{i=0}^{k-1}F_i\Big)>1.$$
Let
$\prod\limits_{i=0}^{k-1}F_i=\prod\limits_{i=1}^{t}p_i^{\alpha_i}$
be the standard prime factorization of $\prod\limits_{i=0}^{k-1}F_i$ and write $n=\prod\limits_{\nu=1}^{s}p_{i_\nu}^{\beta_{i_\nu}},$
where $\beta_{i_\nu}\geq1$, $\{i_1,\cdots,i_s\}\subseteq \{1,\cdots,t\}$. Let $T=\{1,2,\cdots, t\}\setminus \{i_1,\cdots,i_s\}$.
If $T=\emptyset$, then let $P(k,n)=1$. If $T\neq\emptyset$, then let
$$P(k,n)=\prod\limits_{i\in T}p_i^{\alpha_i}.$$
By (\ref{eqn13a}), we have
\begin{equation}\label{eqn14a}P(k,n)^x=F_k^{z}-2^{(2^{k-1}+1)y}\prod\limits_{\nu=1}^{s}p_{i_\nu}^{\beta_{i_\nu}(y-z)}.\end{equation}
Since $y\ge 2$, it follows that
\begin{equation}\label{eqn4.3}P(k,n)^x\equiv 1\pmod{2^{2^k}}.\end{equation}
If $3\mid P(k,n)$, then $P(k,n)\equiv -1\pmod 4$. This implies
that $x$ is even. If $3\nmid P(k,n)$, then $P(k,n)\equiv 1\pmod
4$. Let $P(k,n)=1+2^vW$, $2\nmid W$. Then $v\ge 2$. Suppose that
$x$ is odd, then
$$P(k,n)^x=1+2^vW', \quad 2\nmid W'.$$
Thus $v\ge 2^k$ and $P(k,n)\ge F_k$, a contradiction with $$
P(k,n)<\prod\limits_{i=0}^{k-1}F_i=F_k-2. $$ Therefore, $x$ is
even. Write $x=2^uN$ with $2\nmid N$. Then $u\geq 1$.
{\bf Case 1.} $P(k,n)\equiv -1\pmod 4$. Let $P(k,n)=2^dM-1$ with $2\nmid M$. Then $d\geq 2$ and
$$P(k,n)^x=1+2^{u+d}V, \quad 2\nmid V.$$
By (\ref{eqn4.3}) we have $u+d\geq 2^k$.
Choose a $\nu\in\{1,\cdots,s\}$, let $p_{i_\nu}=2^rt+1$ with $r\geq 1$, $2\nmid t$. Then
$$2^{d+r-1}<(2^dM-1)(2^rt+1)=P(k,n)\cdot p_{i_\nu}\leq \prod\limits_{i=0}^{k-1}F_i=2^{2^k}-1.$$
Thus $d+r\leq 2^k$. Hence $u\geq r$.
By (\ref{eqn14a}) we have \begin{equation}\label{eqn3.4}P(k,n)^x\equiv 2^z\pmod{p_{i_\nu}}.\end{equation}
Noting that $p_{i_\nu}-1\mid 2^ut$, we have \begin{equation}\label{eqn3.5}2^{tz}\equiv P(k,n)^{2^utN}\equiv 1\pmod {p_{i_\nu}}.\end{equation}
Since $\textnormal{ord}_{p_{i_\nu}}(2)$ is even and $2\nmid t$, we have $z\equiv 0\pmod 2$.
{\bf Case 2.} $P(k,n)\equiv 1\pmod 4$. Let $P(k,n)=2^{d'}M'+1$ with $2\nmid M'$. Then $d'\geq 2$ and
$$P(k,n)^x=1+2^{u+d'}V', \quad 2\nmid V'.$$
By (\ref{eqn4.3}) we have $u+d'\geq 2^k$.
Choose a $\mu\in\{1,\cdots,s\}$, let $p_{i_\mu}=2^{r'}t'+1$ with $r'\geq 1$, $2\nmid t'$. Then
$$2^{d'+r'}<(2^{d'}M'+1)(2^{r'}{t'}+1)=P(k,n)\cdot p_{i_\mu}\leq \prod\limits_{i=0}^{k-1}F_i=2^{2^k}-1.$$
Thus $d'+r'<2^k$. Hence $u>r'$.
By (\ref{eqn14a}) we have \begin{equation}\label{eqn3.6}P(k,n)^x\equiv 2^z\pmod{p_{i_\mu}}.\end{equation}
Noting that $p_{i_\mu}-1\mid 2^ut'$, we have \begin{equation}\label{eqn3.7}2^{t'z}\equiv P(k,n)^{2^ut'N}\equiv 1\pmod {p_{i_\mu}}.\end{equation}
Since $\textnormal{ord}_{p_{i_\mu}}(2)$ is even and $2\nmid t'$, we have $z\equiv 0\pmod 2$.
Write $z=2z_{1}, x=2x_{1}$.
By (\ref{eqn14a}), we have \begin{equation}\label{eqn15T}2^{(2^{k-1}+1)y}\prod\limits_{\nu=1}^{s}p_{i_\nu}^{\beta_{i_\nu}(y-z)}=\Big(F_k^{z_{1}}-P(k,n)^{x_1}\Big)\Big(F_k^{z_{1}}+P(k,n)^{x_1}\Big).\end{equation}
Noting that $$\gcd\Big(F_k^{z_{1}}-P(k,n)^{x_1},F_k^{z_{1}}+P(k,n)^{x_1}\Big)=2,$$
we have \begin{equation}\label{eqn16T}2^{(2^{k-1}+1)y-1}\mid F_k^{z_{1}}-P(k,n)^{x_1},\quad 2\mid F_k^{z_{1}}+P(k,n)^{x_1},\end{equation}
or \begin{equation}\label{eqn17T}2\mid F_k^{z_{1}}+P(k,n)^{x_1},\quad 2^{(2^{k-1}+1)y-1}\mid F_k^{z_{1}}-P(k,n)^{x_1}.\end{equation}
However, $$2^{(2^{k-1}+1)y-1}>2^{(2^{k-1}+1)2z_1}>(F_k+F_k-2)^{z_1}>F_k^{z_{1}}+P(k,n)^{x_1},$$
a contradiction.
This completes the proof of Theorem \ref{thm1}.
\section{Acknowledgment} We sincerely thank Professor Yong-Gao Chen for his
valuable suggestions and useful discussions. We would like to thank the referee for his/her helpful comments.
\end{document} |
\begin{document}
\gdef\@thefnmark{}\@footnotetext{\textup{2000} \textit{Mathematics Subject Classification}:
57M07, 20F05, 20F38}
\gdef\@thefnmark{}\@footnotetext{\textit{Keywords}:
Mapping class groups, punctured surfaces, involutions, generating sets}
\newenvironment{prooff}{
\par \noindent {\it Proof}\ }{
$\mathchoice\sqr67\sqr67\sqr{2.1}6\sqr{1.5}6$
\par}
\def\sqr#1#2{{\vcenter{\hrule height.#2pt
\hbox{\vrule width.#2pt height#1pt \kern#1pt
\vrule width.#2pt}\hrule height.#2pt}}}
\def\mathchoice\sqr67\sqr67\sqr{2.1}6\sqr{1.5}6{\mathchoice\sqr67\sqr67\sqr{2.1}6\sqr{1.5}6}
\def\pf#1{
\par \noindent {\it #1.}\ }
\def
$\square$
\par{
$\mathchoice\sqr67\sqr67\sqr{2.1}6\sqr{1.5}6$
\par}
\def\demo#1{
\par \noindent {\it #1.}\ }
\def
\par{
\par}
\def~
$\square${~
$\mathchoice\sqr67\sqr67\sqr{2.1}6\sqr{1.5}6$}
\title[Generating ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ by Three Involutions] {Generating the Extended Mapping Class Group by Three Involutions}
\author[T{\"{u}}l\.{i}n Altun{\"{o}}z, Mehmetc\.{i}k Pamuk, and O\u{g}uz Y{\i}ld{\i}z ]{T{\"{u}}l\.{i}n Altun{\"{o}}z, Mehmetc\.{i}k Pamuk, and Oguz Yildiz}
\address{Department of Mathematics, Middle East Technical University,
Ankara, Turkey}
\email{atulin@metu.edu.tr} \email{mpamuk@metu.edu.tr} \email{oguzyildiz16@gmail.com}
\begin{abstract}
We prove that the extended mapping class group, ${\rm Mod}^{*}({\Sigma}igma_{g})$, of a connected orientable surface of genus $g$, can be generated by three involutions
for $g\geq 5$. In the presence of punctures, we prove that ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ can be generated by three involutions for $g\geq 10$ and $p\geq 6$ (with the exception that for $g\geq 11$, $p$ should be at least $15$).
\end{abstract}
\maketitle
\setcounter{secnumdepth}{2}
\setcounter{section}{0}
\section{Introduction}
Let ${\Sigma}igma_{g,p}$ denote a connected orientable surface of genus $g$ with $p\geq0$ punctures. When $p=0$, we drop it from the notation and write ${\Sigma}igma_{g}$.
The mapping class group of ${\Sigma}igma_g$ is the group of isotopy classes of orientation preserving diffeomorphisms and is denoted by ${\rm Mod}({\Sigma}igma_g)$.
It is a classical result that ${\rm Mod}({\Sigma}igma_g)$ is generated by finitely many Dehn twists about nonseparating simple closed curves~\cite{de,H,l3}.
The study of algebraic properties of mapping class group, finding small generating sets, generating sets with particular properties, is an active one leading to interesting developments.
Wajnryb~\cite{w} showed that ${\rm Mod}({\Sigma}igma_g)$ can be generated by two elements given as a product of Dehn twists. As the group is not abelian, this is the smallest
possible. Korkmaz~\cite{mk2} improved this result by first showing that one of the two generators can be taken as a Dehn twist and the other as a torsion element.
He also proved that ${\rm Mod}({\Sigma}igma_g)$ can be generated by two torsion elements. Recently, the third author showed that ${\rm Mod}({\Sigma}igma_g)$ is generated by two torsions of small orders~\cite{y1}.
Generating ${\rm Mod}({\Sigma}igma_g)$ by involutions was first considered by McCarthy and Papadopoulus~\cite{mp}. They showed that the group can be generated
by infinitely many conjugates of a single involution (element of order two) for $g\geq 3$.
In terms of generating by finitely many involutions, Luo~\cite{luo} showed that any Dehn twist about a nonseparating simple closed curve
can be written as a product six involutions, which in turn implies that ${\rm Mod}({\Sigma}igma_g)$ can be generated by $12g+6$ involutions.
Brendle and Farb~\cite{bf} obtained a generating set of six involutions for $g\geq3$. Following their work, Kassabov~\cite{ka} showed that
${\rm Mod}({\Sigma}igma_g)$ can be generated by four involutions if $g\geq7$. Recently, Korkmaz~\cite{mk1} showed that ${\rm Mod}({\Sigma}igma_g)$ is generated by three involutions
if $g\geq8$ and four involutions if $g\geq3$. The third author improved these results by showing that this group can be generated by three involutions if $g\geq6$~\cite{y2}.
The extended mapping class group ${\rm Mod}^{*}({\Sigma}igma_{g})$ is defined to be the group of isotopy classes of all self-diffeomorphisms of ${\Sigma}igma_{g}$.
The mapping class group ${\rm Mod}({\Sigma}igma_{g})$ is an index two normal subgroup of ${\rm Mod}^{*}({\Sigma}igma_{g})$.
In~\cite{mk2}, it is proved that ${\rm Mod}^{*}({\Sigma}igma_{g})$ can be generated by two elements, one of which is a Dehn twist.
Moreover, it follows from~\cite[Theorem $14$]{mk2} that ${\rm Mod}^{*}({\Sigma}igma_{g})$ can be generated by three torsion elements for $g\geq1$.
Also, Du~\cite{du1, du2} proved that ${\rm Mod}^{*}({\Sigma}igma_{g})$ can be generated by two torsion elements of order $2$ and $4g+2$ for $g\geq 3$.
In terms of involution generators, as it contains nonabelian free groups, the minimal number of involution generators is three and
Stukow~\cite{st} proved that ${\rm Mod}^{*}({\Sigma}igma_{g})$ can be generated by three involutions for $g\geq1$.
Although our main interest in this paper is to find minimal generating sets for the extended mapping class group in the presence of punctures, in Section~\ref{S3},
we test our techniques to find minimal generating sets of involutions. In this direction, we obtain the following result(see Theorems~\ref{thm2} and ~\ref{thm3}):
\begin{thma}
For $g\geq 5$, the extended mapping class group ${\rm Mod}^{*}({\Sigma}igma_{g})$ can be generated by three involutions.
\end{thma}
In the presence of punctures, the mapping class group ${\rm Mod}({\Sigma}igma_{g,p})$ is defined to be the group of isotopy classes of orientation-preserving
self-diffeomorphisms of ${\Sigma}igma_{g,p}$ preserving the set of punctures. The extended mapping class group ${\rm Mod}^{*}({\Sigma}igma_{g,p})$
is defined as the group of isotopy classes of all (including orientation-reversing) self-diffeomorphisms of ${\Sigma}igma_{g,p}$ that preserve the set of punctures.
Kassabov~\cite{ka} gave involution generators of ${\rm Mod}({\Sigma}igma_{g,p})$, proving that this group
can be generated by four involutions if $g>7$ or $g=7$ and $p$ is even, five involutions if $g>5$ or $g=5$ and $p$ is even, six involutions if $g>3$ or $g=3$ and $p$ is even
(Allowing orientation reversing involutions these results can also be used for ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ \cite[Remark~$3$]{ka}).
Later, Monden~\cite{m1} removed the parity conditions on the number of punctures. For $g\geq1$ and $p\geq2$, he~\cite{m2} also proved that ${\rm Mod}({\Sigma}igma_{g,p})$ can be
generated by three elements, one of which is a Dehn twist. Moreover, he gave a similar generating set for ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ consisting of three elements. Recently, Monden showed that ${\rm Mod}({\Sigma}igma_{g,p})$ and ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ are generated by two elements~\cite{m3}.
In Section~\ref{S4}, we prove the following result, giving a partial answer to Question $5.6$ of \cite{m1}.
\begin{thmb}\label{thmb}
For $g\geq 10$ and $p\geq 6$ (with the exception that for $g\geq 11$, $p$ should be at least $15$), the extended mapping class group ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ can be generated by three involutions.
\end{thmb}
\begin{remark}
At the end of the paper, we also show that the same result holds for $g\geq 10$ and $p=1,2,3$.
\end{remark}
Before we finish the introduction, let us point out that by the version of Dehn-Nielsen-Baer theorem for punctured surfaces (see \cite[Section~8.2.7]{FM}),
${\rm Mod}^{*}({\Sigma}igma_{g,p})$ is isomorphic to the subgroup of the outer automorphism group $Out(\pi_1({\Sigma}igma_{g,p}))$ consisting of elements that preserve the set of
conjugacy classes of the simple closed curves surrounding individual punctures. Note also that these conjugacy classes are precisely the primitive conjugacy classes
that correspond to the parabolic elements of the group of isometries of the hyperbolic plane.
\noindent
{ Acknowledgements.}
We would like to thank Tara Brendle for her helpful comments. We also would like to thank the referee for carefully reading our manuscript, pointing out an error in an earlier version and suggesting useful ideas which improved the paper.
The first author was partially supported by the Scientific and Technological Research Council of Turkey (T\"{U}B\.{I}TAK)[grant number 117F015].
\par
\section{Background and Results on Mapping Class Groups} \label{S2}
Let ${\Sigma}igma_{g,p}$ be a connected orientable surface of genus $g$ with $p$ punctures specified by the set $P=\lbrace z_1,z_2,\ldots,z_p\rbrace$ of $p$ distinguished points. If $p$ is zero then we omit from the notation. {\textit{The mapping class group}}
${\rm Mod}({\Sigma}igma_{g,p})$ of the surface ${\Sigma}igma_{g,p}$ is defined to be the group of the isotopy classes of orientation preserving
diffeomorphisms ${\Sigma}igma_{g,p} \to {\Sigma}igma_{g,p}$ which fix the set $P$. {\textit{The extended mapping class group}} ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ of the surface ${\Sigma}igma_{g,p}$ is defined to be the group of isotopy classes of all (including orientation-reversing) diffeomorphisms of ${\Sigma}igma_{g,p}$ which fix the set $P$. Let ${\rm Mod}_{0}^{*} ({\Sigma}igma_{g,p})$ denote the subgroup of ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ which consists of elements fixing the set $P$ pointwise. It is obvious that we have the exact sequence:
\[
1\longrightarrow {\rm Mod}_{0}^{*}({\Sigma}igma_{g,p})\longrightarrow {\rm Mod}^{*}({\Sigma}igma_{g,p}) \longrightarrow S_{p}\longrightarrow 1,
\]
where $S_p$ denotes the symmetric group on the set $\lbrace1,2,\ldots,p\rbrace$ and the restriction of the isotopy class of a diffeomorphism to its action on the puncture points gives the last projection. \par
Let $\beta_{i,j}$ be an embedded arc joining two punctures $z_i$ and $z_j$ and not intersecting $\delta$ on ${\Sigma}igma_{g,p}$. Let $D_{i,j}$ be a closed regular neighbourhood of $\beta_{i,j}$ such that it is a disk with two punctures. There is a diffeomorphism $H_{i,j}: D_{i,j} \to D_{i,j}$, which interchanges the punctures such that $H_{i,j}^{2}$ is the right handed Dehn twist about $\partial D_{i,j}$ and is equal to the identity on the complement of the interior of $D_{i,j}$. Such a diffeomorphism is called \textit{the (right handed) half twist} about $\beta_{i,j}$. One can extend it to a diffeomorphism of ${\rm Mod}({\Sigma}igma_{g,p})$. Throughout the paper we do not distinguish a
diffeomorphism from its isotopy class. For the composition of two diffeomorphisms, we
use the functional notation; if $g$ and $h$ are two diffeomorphisms, then
the composition $gh$ means that $h$ is applied first.\\
\indent
For a simple closed
curve $a$ on ${\Sigma}igma_{g,p}$, following ~\cite{apy,mk1} the right-handed
Dehn twist $t_a$ about $a$ will be denoted by the corresponding capital letter $A$.
Now, let us recall the following basic properties of Dehn twists which we use frequently in the remaining of the paper. Let $a$ and $b$ be
simple closed curves on ${\Sigma}igma_{g,p}$ and $f\in {\rm Mod}^{*}({\Sigma}igma_{g,p})$.
\begin{itemize}
\item \textbf{Commutativity:} If $a$ and $b$ are disjoint, then $AB=BA$.
\item \textbf{Conjugation:} If $f(a)=b$, then $fAf^{-1}=B^{s}$, where $s=\pm 1$
depending on whether $f$ is orientation preserving or orientation reversing on a
neighbourhood of $a$ with respect to the chosen orientation.
\end{itemize}
\section{Involution generators for ${\rm Mod}^{*}({\Sigma}igma_g)$}\label{S3}
We start with this section by embedding ${\Sigma}igma_g$ into $\mathbb{R}^{3}$ so that it is invariant under the reflections $\rho_1$ and $\rho_2$ (see Figures~\ref{GOC} and~\ref{GEC}). Here, $\rho_1$ and $\rho_2$ are the reflections in the $xz$-plane so that $R=\rho_1\rho_2$ is the rotation by $\frac{2\pi}{g}$ about the $x$-axis. Now, let us recall the following set of generators given by Korkmaz~\cite[Theorem~$5$]{mk1}.
\begin{theorem}\label{thm1}
If $g\geq3$, then the mapping class group ${\rm Mod}({\Sigma}igma_g)$ is generated by the four elements $R$, $A_1A_{2}^{-1}$, $B_1B_{2}^{-1}$, $C_1C_{2}^{-1}$.
\end{theorem}
By adding an orientation reversing self-diffeomorphism to the above generating set, one can easily see that ${\rm Mod}^{*}({\Sigma}igma_g)$ can be generated by five elements. In the following theorems, we show that one can reduce the number of generators to three and all the generators are of order two.
\begin{figure}
\caption{The reflections $\rho_1$ and $\rho_2$ on ${\Sigma}
\label{GOC}
\end{figure}
\begin{figure}
\caption{The reflections $\rho_1$ and $\rho_2$ on ${\Sigma}
\label{GEC}
\end{figure}
\begin{theorem}\label{thm2}
If $g\geq5$ and odd, then ${\rm Mod}^{*}({\Sigma}igma_g)$ is generated by the involutions $\rho_1$, $\rho_2$ and $\rho_1A_1B_2C_{\frac{g+3}{2}} A_3$.
\end{theorem}
\begin{proof}
Consider the surface ${\Sigma}igma_{g}$ as in Figure~\ref{GOC} and observe that the involution $\rho_1$ satisfies
\[
\rho_1(a_1)=a_3, \rho_1(b_{2})=b_{2} \textrm{ and } \rho_1(c_{\frac{g+3}{2}})=c_{\frac{g+3}{2}}.
\]
Since $\rho_1$ reverses the orientation of a neighbourhood of any simple closed curve, we get
\[
\rho_1A_1\rho_1=A_{3}^{-1}, \rho_1B_{2}\rho_1=B_{2}^{-1} \textrm{ and } \rho_1C_{\frac{g+3}{2}}\rho_1=C_{\frac{g+3}{2}}^{-1}.
\]
It is easily seen that $\rho_1A_1B_2C_{\frac{g+3}{2}} A_3$ is an involution. Let $K$ be the subgroup of ${\rm Mod}^{*}({\Sigma}igma_{g})$ generated by the set
\[
\lbrace \rho_1,\rho_2, \rho_1A_1B_2C_{\frac{g+3}{2}} A_3 \rbrace.
\]
Note that the rotation $R$ and the orientation reversing diffeomorphism $\rho_1$ (or $\rho_2$) are contained in $K$. Hence, all we need to show is that
the elements $A_1A_{2}^{-1}, B_1B_{2}^{-1}$ and $C_1C_{2}^{-1}$ belong to $K$. For $g\geq7$ and odd, by proof of ~\cite[Theorem $3.4$]{apy}, these elements are contained in $K$. For $g=5$, the proof follows from the proof of \cite[Theorem $3.3$]{apy}.
\end{proof}
Next, we deal with the even genera case.
\begin{theorem}\label{thm3}
If $g\geq6$ and even, then ${\rm Mod}^{*}({\Sigma}igma_g)$ is generated by the involutions $\rho_1$, $\rho_2$ and $\rho_1A_2C_{\frac{g}{2}}B_{\frac{g+4}{2}} C_{\frac{g+6}{2}}$.
\end{theorem}
\begin{proof}
Consider the surface ${\Sigma}igma_{g}$ as in Figure~\ref{GEC} when $g\geq6$ and even. The involution $\rho_1$ satisfies
\[
\rho_1(a_2)=a_2, \rho_1(b_{\frac{g+4}{2}})=b_{\frac{g+4}{2}} \textrm{ and } \rho_1(c_{\frac{g}{2}})=c_{\frac{g+6}{2}}.
\]
Since $\rho_1$ reverses the orientation of a neighbourhood of any simple closed curve, we have
\[
\rho_1A_2\rho_1=A_{2}^{-1}, \rho_1B_{\frac{g+4}{2}}\rho_1=B_{\frac{g+4}{2}}^{-1} \textrm{ and } \rho_1C_{\frac{g}{2}}\rho_1=C_{\frac{g+6}{2}}^{-1}.
\]
It can be shown that $\rho_1A_2C_{\frac{g}{2}}B_{\frac{g+4}{2}} C_{\frac{g+6}{2}}$
is an involution. Let $H$ be the subgroup of ${\rm Mod}^{*}({\Sigma}igma_{g})$ generated by the set
\[
\lbrace \rho_1,\rho_2, \rho_1A_2C_{\frac{g}{2}}B_{\frac{g+4}{2}} C_{\frac{g+6}{2}} \rbrace.
\]
Note that the rotation $R$ is in $H$. Since $H$ contains the orientation reversing diffeomorphism $\rho_1$ (or $\rho_2$), again all we need to show is that
the elements $A_1A_{2}^{-1}, B_1B_{2}^{-1}$ and $C_1C_{2}^{-1}$ are contained in $H$. By the proof of ~\cite[Theorem $3.5$]{apy}, these elements are contained in $H$.
\end{proof}
\section{Involution generators for ${\rm Mod}^{*}({\Sigma}igma_{g,p})$}\label{S4}
In this section, we introduce punctures on a genus $g$ surface and present involution generators for the extended mapping class group ${\rm Mod}^{*}({\Sigma}igma_{g,p})$. First, we recall the following basic lemma from algebra.
\begin{lemma}\label{lemma1}
Let $G$ and $K$ be groups, Suppose that
we have the following short exact sequence holds,
\[
1 \longrightarrow N \overset{i}{\longrightarrow}G \overset{\pi}{\longrightarrow} K\longrightarrow 1.
\]
Then the subgroup $H$ contains $i(N)$ and has a surjection to $K$ if and only if $H=G$.
\end{lemma}
\par
For $G={\rm Mod}^{*}({\Sigma}igma_{g,p})$ and $N={\rm Mod}_{0}^{*}({\Sigma}igma_{g,p})$ (self-diffeomorphisms fixing the punctures pointwise), we have the following short exact sequence:
\[
1\longrightarrow {\rm Mod}_{0}^{*}({\Sigma}igma_{g,p})\longrightarrow {\rm Mod}^{*}({\Sigma}igma_{g,p}) \longrightarrow S_{p}\longrightarrow 1,
\]
where $S_p$ denotes the symmetric group on the set $\lbrace1,2,\ldots,p\rbrace$.
Therefore, we have the following useful result which follows immediately from Lemma~\ref{lemma1}. Let $H$ be a subgroup of ${\rm Mod}^{*}({\Sigma}igma_{g,p})$. If the subgroup $H$ contains ${\rm Mod}_{0}^{*}({\Sigma}igma_{g,p})$ and has a surjection to $S_p$ then $H={\rm Mod}^{*}({\Sigma}igma_{g,p})$.
\begin{figure}
\caption{The reflections $\rho_1$ and $\rho_2$ if $g=2k$ and $p=2b+1$.}
\label{EO}
\end{figure}
\begin{figure}
\caption{The reflections $\rho_1$ and $\rho_2$ if $g=2k$ and $p=2b$.}
\label{EE}
\end{figure}
\begin{figure}
\caption{The reflections $\rho_1$ and $\rho_2$ if $g=2k+1$ and $p=2b+1$.}
\label{OO}
\end{figure}
\begin{figure}
\caption{The reflections $\rho_1$ and $\rho_2$ if $g=2k+1$ and $p=2b$.}
\label{OE}
\end{figure}
In the presence of punctures, we consider the reflections $\rho_1$ and $\rho_2$ as shown in Figures~\ref{EO}, \ref{EE}, \ref{OO} and \ref{OE}. Note that the element $R=\rho_1\rho_2$ is contained in ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ and we have
\begin{itemize}
\item $R(a_i)=a_{i+1}$, $R(b_i)=b_{i+1}$ for $i=1,\ldots,g-1$ and $R(b_g)=b_{1}$,
\item $R(c_i)=c_{i+1}$ for $i=1,\ldots,g-2$,
\item $R(z_1)=z_p$ and $R(z_i)=z_{i-1}$ for $i=2,\ldots,p$.
\end{itemize}
\noindent
In the proof of the following lemmata, we basically follow Theorem~\ref{thm1}.
\begin{lemma}\label{lem2k}
For $g=2k\geq 10$, the subgroup $H$ of ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ generated by
\begin{equation*}
\begin{cases}
\rho_1, \rho_2, \rho_2H_{b,b+2}B_{k-3}A_{k-1}C_kA_{k+2}B_{k+4} & \text {if $p=2b+1\geq 7,$}\\
\rho_1, \rho_2, \rho_2H_{b,b+1}B_{k-3}A_{k-1}C_kA_{k+2}B_{k+4} & \text{if $p=2b\geq 6$}
\end{cases}
\end{equation*}
contains the Dehn twists $A_i$, $B_i$ and $C_j$ for $i=1,\ldots,g$ and $j=1,\ldots,g-1$.
\end{lemma}
\begin{proof}
Consider the models for ${\Sigma}igma_{g,p}$ as shown in Figures~\ref{EO} and~\ref{EE}. Start with the case $p=2b+1$. Let $E_1:=H_{b,b+2}B_{k-3}A_{k-1}C_kA_{k+2}B_{k+4}$ so that the subgroup $H$ is generated by the elements $\rho_1$, $\rho_2$ and $\rho_2E_1$. Since $H$ contains the elements $\rho_1$, $\rho_2$ and $\rho_2E_1$, it follows that $H$ also contains the elements $R=\rho_1\rho_2$ and $E_1=\rho_2\rho_2E_1$.
Let $E_2$ denote the conjugation of $E_1$ by $R^{-1}$. Since
\[
R^{-1}(b_{k-3},a_{k-1},c_k,a_{k+2},b_{k+4})=(b_{k-4},a_{k-2},c_{k-1},a_{k+1},b_{k+3})
\]
and
\[
R^{-1}(z_{b},z_{b+2})=(z_{b+1},z_{b+3}),
\]
it follows that $E_2=R^{-1}E_1R=H_{b+1,b+3}B_{k-4}A_{k-2}C_{k-1}A_{k+1}B_{k+3}\in H$. Let $E_3$ be the conjugation of $E_2$ by $R^3$. Since the element $R^3$ satisfies
\[
R^3
(b_{k-4},a_{k-2},c_{k-1},a_{k+1},b_{k+3})=(b_{k-1},a_{k+1},c_{k+2},a_{k+4},b_{k+6})
\]
and
\[
R^3(z_{b+1},z_{b+3})=(z_{b-2},z_{b}),
\]
the element
\[
E_3=R^3E_2R^{-3}=H_{b-2,b}B_{k-1}A_{k+1}C_{k+2}A_{k+4}B_{k+6}\in H.
\]
Consider the element $E_4=(E_2E_3)E_2(E_2E_3)^{-1}$, which is contained in $H$. Thus,
\[
E_4=H_{b+1,b+3}B_{k-4}A_{k-2}B_{k-1}A_{k+1}C_{k+2}
\]
As we have similar cases in the remaining parts of the paper, let us explain this calculation in more details. It is easy to verify that the diffeomorphism $E_2E_3$ maps the curves $\lbrace b_{k-4},a_{k-2},c_{k-1},a_{k+1},b_{k+3} \rbrace$ to the curves $\lbrace b_{k-4},a_{k-2},b_{k-1},a_{k+1},c_{k+2} \rbrace$, respectively. Since the half twists $H_{b+1,b+3}$ and $H_{b-2,b}$ commute, we get
\begin{eqnarray*}
E_4&=&(E_2E_3)E_2(E_2E_3)^{-1}\\
&=&(E_2E_3)(H_{b+1,b+3}B_{k-4}A_{k-2}C_{k-1}A_{k+1}B_{k+3})(E_2E_3)^{-1}\\
&=&H_{b+1,b+3}B_{k-4}A_{k-2}B_{k-1}A_{k+1}C_{k+2}.
\end{eqnarray*}
We also get the element
\[
E_5=RE_4R^{-1}=H_{b,b+2}B_{k-3}A_{k-1}B_{k}A_{k+2}C_{k+3}\in H.
\]
Hence the subgroup $H$ contains the element
\[E_6=E_1E_5^{-1}=C_kB_{k+4}C_{k+3}^{-1}B_{k}^{-1}.
\]
Moreover, we have the following elements:
\begin{eqnarray*}
E_7&=&RE_5R^{-1}=H_{b-1,b+1}B_{k-2}A_{k}B_{k+1}A_{k+3}C_{k+4},\\
E_8&=&R^{-3}E_7R^{3}=H_{b+2,b+4}B_{k-5}A_{k-3}B_{k-2}A_{k}C_{k+1}
\textrm{ and }\\
E_9&=&(E_7E_8)E_7(E_7E_8)^{-1}=H_{b-1,b+1}B_{k-2}A_{k}C_{k+1}A_{k+3}C_{k+4},
\end{eqnarray*}
are contained in $H$.
Thus, we obtain the element $E_7E_{9}^{-1}=B_{k+1}C_{k+1}^{-1} \in H$. By conjugating $B_{k+1}C_{k+1}^{-1}$ with powers of $R$, we have $B_{i}C_{i}^{-1} \in H$ for all $i=1,\ldots,g-1$. Moreover, the element $
E_6(B_kC_k^{-1})=B_{k+4}C_{k+3}^{-1}
$ is contained in $H$. Thus, each $B_{i+1}C_i^{-1}$ is in $H$ for all $i=1,\ldots,g-1$ by conjugating this element with powers of $R$. Consider the elements
\begin{eqnarray*}
E_{10}&=&(B_{k}C_{k}^{-1})(B_{k+5}C_{k+4}^{-1})(C_{k+4}B_{k+4}^{-1})E_1\\
&=&H_{b,b+2}B_{k-3}A_{k-1}B_kA_{k+2}B_{k+5}, \\
E_{11}&=&R^{-1}E_{10}R=H_{b+1,b+3}B_{k-4}A_{k-2}B_{k-1}A_{k+1}B_{k+4}\\
E_{12}&=&R^{3}E_{11}R^{-3}=H_{b-2,b}B_{k-1}A_{k+1}B_{k+2}A_{k+4}B_{k+7} \textrm { and }\\
E_{13}&=&(E_{11}E_{12})E_{11}(E_{11}E_{12})^{-1}=H_{b+1,b+3}B_{k-4}A_{k-2}B_{k-1}A_{k+1}A_{k+4},
\end{eqnarray*}
which are contained in $H$. Hence, $H$ contains the element
$E_{13}E_{11}^{-1}=A_{k+4}B_{k+4}^{-1}$. Hence, $A_{i}B_{i}^{-1}\in H$ for $i=1,\ldots g$,
by conjugating $A_{k+4}B_{k+4}^{-1}$ with powers of $R$.
Finally, we obtain the following elements:
\begin{eqnarray*}
A_1A_{2}^{-1}&=&(A_1B_1^{-1})(B_1C_1^{-1})(C_1B_{2}^{-1})(B_{2}A_{2}^{-1}),\\
B_1B_{2}^{-1}&=&(B_1C_1^{-1})(C_1B_{2}^{-1}) \textrm{ and }\\
C_1C_{2}^{-1}&=&(C_1B_{2}^{-1})(B_{2}C_{2}^{-1}),
\end{eqnarray*}
which are all contained in $H$. This completes the proof for $p=2b+1\geq 7$ by Theorem~\ref{thm1}.
For $p=2b\geq 6$, one can replace $H_{b,b+2}$ with $H_{b,b+1}$ and follow exactly the same steps as above.
\end{proof}
\begin{lemma}\label{lem2k+1}
For $g=2k+1\geq 13$, the subgroup $H$ of ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ generated by
\begin{equation*}
\begin{cases}
\rho_1, \rho_2, \rho_2H_{b,b+2}A_{k-1}C_{k-3}B_{k+1}C_{k+4}A_{k+3} & \text {if $p=2b+1\geq 7,$}\\
\rho_1, \rho_2, \rho_2H_{b,b+1}A_{k-1}C_{k-3}B_{k+1}C_{k+4}A_{k+3}& \text{if $p=2b\geq 6$}
\end{cases}
\end{equation*}
contains the Dehn twists $A_i$, $B_i$ and $C_j$ for $i=1,\ldots,g$ and $j=1,\ldots,g-1$.
\end{lemma}
\begin{proof}
Consider the models for ${\Sigma}igma_{g,p}$ as shown in Figures~\ref{OO} and~\ref{OE}. First let us consider the case $p=2b+1$. Let $F_1=H_{b,b+2}A_{k-1}C_{k-3}B_{k+1}C_{k+4}A_{k+3}$ so that the subgroup $H$ generated by the elements $\rho_1$, $\rho_2$ and $\rho_2F_1$. It follows from $H$ contains the elements $\rho_1$, $\rho_2$ and $\rho_2F_1$ that $H$ also contains the elements $R=\rho_1\rho_2$ and $F_1=\rho_2\rho_2F_1$.
Let $F_2$ denote the conjugation of $F_1$ by $R^{-1}$ so that
\[
F_2=R^{-1}F_1R=H_{b+1,b+3}A_{k-2}C_{k-4}B_{k}C_{k+3}A_{k+2}\in H.
\]
and let $F_3$ be the conjugation of $F_2$ by $R^{3}$:
\[
F_3=R^{3}F_2R^{-3}=H_{b-2,b}A_{k+1}C_{k-1}B_{k+3}C_{k+6}A_{k+5}\in H.
\]
From these, we get the following element:
\begin{eqnarray*}
F_4&=&(F_2F_3)F_2(F_2F_3)^{-1}\\
&=&H_{b+1,b+3}A_{k-2}C_{k-4}C_{k-1}B_{k+3}A_{k+2},
\end{eqnarray*}
which is contained in $H$. Thus, the subgroup $H$ contains the element
\[
F_5=F_4F_2^{-1}=C_{k-1}B_{k+3}C_{k+3}^{-1}B_{k}^{-1}.
\]
Also we get the following elements:
\begin{eqnarray*}
F_6&=&R^{3}F_4R^{-3}=H_{b-2,b}A_{k+1}C_{k-1}C_{k+2}B_{k+6}A_{k+5} \textrm{ and }\\
F_7&=&(F_4F_6)F_4(F_4F_6)^{-1}=H_{b+1,b+3}A_{k-2}C_{k-4}C_{k-1}C_{k+2}A_{k+2},
\end{eqnarray*}
which are contained in $H$.
Hence, we see that the element $F_7F_{4}^{-1}=C_{k+2}B_{k+3}^{-1} \in H$, which implies that $C_{i}B_{i+1}^{-1} \in H$ for all $i=1,\ldots,g-1$ by the action of $R$. It follows from the element $B_{k}C_{k-1}^{-1}\in H$ that
$F_5(B_{k}C_{k-1}^{-1})=B_{k+3}C_{k+3}^{-1}
$ is also contained in $H$. Similarly we have $B_{i}C_i^{-1} \in H$ for all $i=1,\ldots,g-1$ by the action of $R$. Moreover, the elements
\begin{eqnarray*}
F_8&=&(B_{k+1}C_{k}^{-1})(C_{k}B_{k}^{-1})F_2\\
&=&H_{b+1,b+3}A_{k-2}C_{k-4}B_{k+1}C_{k+3}A_{k+2}, \\
F_9&=&R^{3}F_8R^{-3}=H_{b-2,b}A_{k+1}C_{k-1}B_{k+4}C_{k+6}A_{k+5} \textrm { and }\\
F_{10}&=&(F_8F_9)F_8(F_8F_9)^{-1}=H_{b+1,b+3}A_{k-2}C_{k-4}A_{k+1}B_{k+4}A_{k+2}
\end{eqnarray*}
are all in $H$. Thus $H$ contains the element
$F_8F_{10}^{-1}(B_{k+4}C_{k+3}^{-1})=B_{k+1}A_{k+1}^{-1}$. Hence, $B_{i}A_{i}^{-1}\in H$ for $i=1,\ldots, g$ by conjugating this element with powers of $R$.
The remaining part of the proof can be completed as in the proof of Lemma~\ref{lem2k}.
\end{proof}
\begin{lemma}\label{lem11}
For $g=11$, the subgroup $H$ of ${\rm Mod}^{*}({\Sigma}igma_{g,p})$ generated by
\begin{equation*}
\begin{cases}
\rho_1, \rho_2, \rho_1H_{b,b+1}B_1A_4C_6A_9 & \text {if $p=2b+1\geq 15,$}\\
\rho_1, \rho_2, \rho_1H_{b-1,b+1}B_1A_4C_6A_9 & \text{if $p=2b\geq 16$}
\end{cases}
\end{equation*}
contains the Dehn twists $A_i$, $B_i$ and $C_j$ for $i=1,\ldots,g$ and $j=1,\ldots,g-1$.
\end{lemma}
\begin{proof}
Consider the models for ${\Sigma}igma_{g,p}$ as shown in Figures~\ref{OO} and~\ref{OE}. Let us first consider the case $p=2b+1$. Let $G_1=H_{b,b+1}B_1A_4C_6A_9$ and $H$ be the group generated by the elements $\rho_1$, $\rho_2$ and $\rho_1G_1$. It is easy to see that $H$ contains the elements $R=\rho_1\rho_2$ and $G_1=\rho_1\rho_1G_1$. We then have the following elements:
\begin{eqnarray*}
G_2&=&R^{-3}G_1R^3=H_{b+3,b+4}B_9A_1C_3A_6, \\
G_3&=&(G_1G_2)G_1(G_1G_2)^{-1}=H_{b,b+1}A_1A_4C_6B_9,\\
G_4&=&R^{3}G_3R^{-3}=H_{b-3,b-2}A_4A_7C_9B_1,\\
G_5&=&(G_4G_3)G_4(G_4G_3)^{-1}=H_{b-3,b-2}A_4A_7B_9A_1,\\
G_6&=&R^3G_5R^{-3}=H_{b-6,b-5}A_7A_{10}B_1A_4\textrm{ and }\\
G_7&=&(G_5G_6)G_5(G_5G_6)^{-1}=H_{b-3,b-2}A_4A_7B_9B_1,
\end{eqnarray*}
which are all in $H$. Thus, we obtain the element $G_5G_7^{-1}=A_1B_1^{-1}$. By conjugating by powers of $R$, we see that $A_iB_i^{-1}\in H$ for $i=1,2,\ldots,g$. We also have
\begin{eqnarray*}
G_8&=&(B_4A_4^{-1})G_1=H_{b,b+1}B_1B_4C_6A_9 \in H,\\
G_{9}&=&R^{-3}G_8R^3=H_{b+3,b+4}B_9B_1C_3A_6 \in H \textrm{ and }\\
G_{10}&=&(G_{9}G_8)G_{9}(G_{9}G_8)^{-1}=H_{b+3,b+4}A_9B_1B_4A_6\in H.
\end{eqnarray*}
Hence, we get $G_{9}G_{10}^{-1}(A_9B_9^{-1})=C_3B_4^{-1}\in H$, which implies that $C_iB_{i+1}\in H$ for $i=1,2,\ldots,g-1$ by the action of $R$. Moreover, the subgroup $H$ contains the following elements:
\begin{eqnarray*}
G_{11}&=&(B_9A_9^{-1})G_1=H_{b,b+1}B_1A_4C_6B_9,\\
G_{12}&=&R^{-3}G_{11}R^3=H_{b+3,b+4}B_9A_1C_3B_6 \textrm{ and } \\
G_{13}&=&(G_{12}G_{11})G_{12}(G_{12}G_{11})^{-1}=H_{b+3,b+4}B_9B_1C_3C_6.
\end{eqnarray*}
It follows that $G_{12}G_{13}^{-1}(B_1A_1^{-1})=B_6C_6^{-1}$. Again, by the action of $R$, the elements $B_iC_i^{-1}\in H$. One can complete the remaining part of the proof as in the proof of Lemma~\ref{lem2k}.
\end{proof}
\begin{figure}
\caption{The curves $\gamma_i$ and $e_{i,j}
\label{C}
\end{figure}
\
\begin{lemma}\label{lemma3}
Let $g\geq2$. For $i=1,\ldots,g-1$, in the mapping class group ${\rm Mod}({\Sigma}igma_{g,p})$, the element
\[
\phi_i=B_{i+1}\Gamma_{i}^{-1} C_{i}B_{i}
\]
maps the curve $e_{i,j}$ to the curve $e_{i+1, j}$, where the curves $\gamma_i$ and $e_{i,j}$'s are as in Figure~\ref{C}. Moreover, the diffeomorphism $\phi_i$ is contained in the group $H$ for $i=1,\ldots,g-1$.
\end{lemma}
\begin{proof}
It is easy to see that the diffeomorphism $\phi_i$ maps $e_{i,j}$ to $e_{i+1,j}$. Consider the diffeomorphism
\[
S=A_1B_1C_1\cdots C_{g-2}B_{g-1}C_{g-1}B_g.
\]
Since $S \in H$ and $S$ maps $a_2$ to $ \gamma_1$, the element $SA_2S^{-1}=\Gamma_1\in H$. By conjugating with powers of $R$, the element $\Gamma_i$ is in $H$. We conclude that $\phi_i\in H$.
\end{proof}
Let $H$ be the subgroup of ${\rm Mod}^{*}({\Sigma}igma_{g,p})$
generated by the elements given explicitly in lemmata~\ref{lem2k}, ~\ref{lem2k+1} and~\ref{lem11} with the conditions mentioned in these lemmata.
\begin{lemma}\label{lemma4}
The group ${\rm Mod}_{0}^{*}({\Sigma}igma_{g,p})$ is contained in the group $H$.
\end{lemma}
\begin{proof}
Since the group $H$ contains the Dehn twists $A_1$, $A_2$, $B_1,B_2,\ldots,B_g$ and $C_1,C_2,\ldots,C_{g-1}$ by lemmata~\ref{lem2k}, ~\ref{lem2k+1} and~\ref{lem11}, it suffices to prove that $H$ also contains the elements $E_{i.j}$ for some fixed $i$ and $j=1,2,\ldots,p-1$. First note that $H$ contains $A_{g}$ and $R=\rho_1\rho_2$. Consider the models for ${\Sigma}igma_{g,p}$ as shown in Figures~\ref{EO},~\ref{EE}.~\ref{OO} and~\ref{OE}, Since the diffeomorphism $R$ maps $a_{g}$ to $e_{1,p-1}$, we have
\[
RA_{g}R^{-1}=E_{1,p-1} \in H.
\]
The diffeomorphism $\phi_{g-1}\cdots \phi_2\phi_1$ in Lemma~\ref{lemma3} is given by
$\phi_i=B_{i+1}\Gamma_i^{-1}C_iB_i$ which maps each $e_{i,j}$ to $e_{i+1,j}$ for $j=1,2,\ldots,p-1$ (see Figure~\ref{C}). So we get
\[
\phi_{g-1}\cdots \phi_2\phi_1E_{1,p-1}(\phi_{g-1}\cdots \phi_2\phi_1)^{-1}=E_{g,p-1}\in H.
\]
Similarly, the diffeomorphism $R$ sends $e_{g,p-1}$ to $e_{1,p-2}$. Then we have
\[
RE_{g,p-1}R^{-1}=E_{1,p-2}\in H.
\]
It follows from
\[
\phi_{g-1}\cdots \phi_2\phi_1E_{1,p-2}(\phi_{g-1}\cdots \phi_2\phi_1)^{-1}=E_{g,p-2}\in H
\]
that
\[
R(E_{g,p-2})R^{-1}=E_{1,p-3}\in H.
\]
Continuing in this way, we conclude that the elements $E_{1,1},E_{1,2},$ $\ldots,E_{1,p-1}$ are contained in $H$. This completes the proof.
\end{proof}
We thank the referee for pointing us the proof of the following lemma.
\begin{lemma}\label{symm}
The symmetric group $S_{2b+1}$ is generated by the transposition $(b,b+2)$ and the $(2b+1)$-cycle $(1,2,\ldots,2b+1)$.
\end{lemma}
\begin{proof}
Set $\tau=(b,b+2)$ and $\sigma=(1,2,\ldots,2b+1)$. It is easy to verify that
\[
\sigma^{2}=(1,3,5,\ldots,2b+1,2,4,6,\ldots,2b).
\]
Now, rewrite $s_i=2i-1$ for $i=1,2,\ldots, b+1$ and $s_{b+1+i}=2i$ for $i=1,2,\ldots, b$. This gives
\[
\sigma^{-b+1}\tau\sigma^{b-1}=(s_1,s_2),
\]
\[
\sigma^{2}=(s_1,s_2,\ldots,s_{2b+1}).
\]
Since $(s_1,s_2)$ and $(s_1,s_2,\ldots,s_{2b+1})$ generate $S_{2b+1}$, we see that $S_{2b+1}=\langle \tau,\sigma \rangle$.
\end{proof}
Now, we are ready to prove the main theorem of this section.
\textit{Proof of Theorem B.}
Consider the surface ${\Sigma}igma_{g,p}$ as in Figures~\ref{EO}
and~\ref{EE}.\\
\underline{ If $g=2k\geq 10$ and $p\geq 6$}: In this case, consider the surface ${\Sigma}igma_{g,p}$ as in Figures~\ref{EO}
and~\ref{EE}. Since
\[
\rho_2(b_{k-3})=b_{k+4}, \rho_2(a_{k-1})=a_{k+2} \textrm{ and }\rho_2(c_{k})=c_{k}
\]
and $\rho_2$ is an orientation reversing diffeomorphism, we get
\[
\rho_2B_{k-3}\rho_2=B_{k+4}^{-1},
\rho_2A_{k-1}\rho_2=A_{k+2}^{-1} \textrm{ and }
\rho_2C_{k}\rho_2=C_{k}^{-1}.
\]
Also, observe that $\rho_2H_{b,b+2}\rho_2=H_{b,b+2}^{-1}$ for $p=2b+1$ and $\rho_2H_{b,b+1}\rho_2=H_{b,b+1}^{-1}$ for $p=2b$.
Then it is easy to see that each
\begin{equation*}
\begin{cases}
\rho_2H_{b,b+2}B_{k-3}A_{k-1}C_kA_{k+2}B_{k+4} & \text {if $p=2b+1,$}\\
\rho_2H_{b,b+1}B_{k-3}A_{k-1}C_kA_{k+2}B_{k+4} & \text{if $p=2b$}
\end{cases}
\end{equation*}
is an involution. Therefore, the generators of the subgroup $H$ given in Lemma~\ref{lem2k} are involutions.
\underline{ If $g=2k+1\geq 13$ and $p\geq 6$}: In this case, consider the surface ${\Sigma}igma_{g,p}$ as in Figures~\ref{OO}
and~\ref{OE}. It follows from
\[
\rho_2(a_{k-1})=a_{k+3}, \rho_2(c_{k-3})=c_{k+4} \textrm{ and }\rho_2(b_{k+1})=b_{k+1}
\]
and $\rho_2$ is an orientation reversing diffeomorphism that
\[
\rho_2A_{k-1}\rho_2=A_{k+3}^{-1},
\rho_2C_{k-3}\rho_2=C_{k+4}^{-1} \textrm{ and }
\rho_2B_{k+1}\rho_2=B_{k+1}^{-1}.
\]
Also, by the fact that $\rho_2H_{b,b+2}\rho_2=H_{b,b+2}^{-1}$ for $p=2b+1$ and $\rho_2H_{b,b+1}\rho_2=H_{b,b+1}^{-1}$ for $p=2b$,
it is easy to see that the elements
\begin{equation*}
\begin{cases}
\rho_2H_{b,b+2}A_{k-1}C_{k-3}B_{k+1}C_{k+4}A_{k+3} & \text {if $p=2b+1$,}\\
\rho_2H_{b,b+1}A_{k-1}C_{k-3}B_{k+1}C_{k+4}A_{k+3}& \text{if $p=2b$}
\end{cases}
\end{equation*}
are involutions.
\underline{ If $g=11$ and $p\geq 15$}: Consider the surface ${\Sigma}igma_{g,p}$ as in Figures~\ref{OO}
and~\ref{OE}. It is easy to see that
\[
\rho_1(b_{1})=b_{1}, \rho_1(a_{4})=a_{9} \textrm{ and }\rho_1(c_{6})=c_{6}
\]
and $\rho_1$ is an orientation reversing diffeomorphism that
\[
\rho_1B_{1}\rho_1=B_{1}^{-1},
\rho_1A_{4}\rho_1=A_{9}^{-1} \textrm{ and }
\rho_1C_{6}\rho_1=C_{6}^{-1}.
\]
Also, since $\rho_1H_{b,b+1}\rho_1=H_{b,b+1}^{-1}$ for $p=2b+1$ and $\rho_1H_{b-1,b+1}\rho_1=H_{b-1,b+1}^{-1}$ for $p=2b$, it is easy to verify that the elements
\begin{equation*}
\begin{cases}
\rho_1H_{b,b+1}B_{1}A_{4}C_{6}A_{9} & \text {if $p=2b+1$,}\\
\rho_1H_{b-1,b+1}B_{1}A_{4}C_{6}A_{9} & \text{if $p=2b$}
\end{cases}
\end{equation*}
are involutions. We see that the generators of the subgroup $H$ given in Lemma~\ref{lem11} are involutions.
The group ${\rm Mod}_{0}^{*}({\Sigma}igma_{g,p})$ is contained in $H$ by Lemma~\ref{lemma4}. We finish the proof by showing that $H$ is mapped surjectively onto $S_p$ by Lemma~\ref{lemma1}: The subgroup $H$ contains the element $\rho_2\rho_1$ which has the image $(1,2,\ldots,p)\in S_p$. For $g\neq 11$, since the subgroup $H$ contains the Dehn twists $A_i$, $B_i$ and $C_i$ by lemmata~\ref{lem2k} and~\ref{lem2k+1} , the group $H$ contains the half twist $H_{b,b+2}$ if $p=2b+1$ and the half twist $H_{b,b+1}$ if $p=2b$. For $p=2b+1$, it follows from Lemma~\ref{symm} that the image of $H_{b,b+2}$ which is $(b,b+2)$ and the $p$-cycle $(1,2,\ldots,p)$ generate $S_p$. For $p=2b$, it is clear that the image of $H_{b,b+1}$ which is $(b,b+1)$ and again the $p$-cycle $(1,2,\ldots,p)$ generate $S_p$. Likewise, for $g=11$, by Lemma~\ref{lem11}, the subgroup $H$ contains the half twist $H_{b,b+1}$ if $p=2b+1$, the half twist $H_{b-1,b+1}$ if $p=2b$. For the latter case $H$ also contains the half twist $R^{-1}H_{b-1,b+1}R=H_{b,b+2}$. This finishes the proof by the above argument.
Before we finish the paper let us mention the cases $p=2$ or $p=3$.
In these cases, the generating set of $H$ can be chosen as
\[
H=\left\{\begin{array}{lll}
\lbrace \rho_1,\rho_2, \rho_2B_{k-3}A_{k-1}C_kA_{k+2}B_{k+4} \rbrace & \textrm{if} & g=2k\geq10,\\
\lbrace \rho_1,\rho_2,\rho_2A_{k-1}C_{k-3}B_{k+1}C_{k+4}A_{k+3} \rbrace & \textrm{if} & g=2k+1\geq13.\\
\lbrace \rho_1,\rho_2,\rho_1B_1A_4C_6A_9\rbrace & \textrm{if} & g=11.\\
\end{array}\right.
\]
One can easily prove that the group $H$ contains ${\rm Mod}_{0}^{*}({\Sigma}igma_{g,p})$ by the similar arguments in the proofs of lemmata~\ref{lem2k+1}, \ref{lem2k}, \ref{lem11} and \ref{lemma4}. The element $\rho_2\rho_1 \in H$ has the image $(1,2,\ldots,p)\in S_p$. Thus, for $p=2$ this element generates $S_p$. If $p=3$, the element $\rho_1$ has the image $(1,2)$. Therefore, the group $H$ is mapped surjectively onto $S_p$ for $p=2,3$, We conclude that the group $H$ is equal to ${\rm Mod}^{*}({\Sigma}igma_{g,p})$.
\end{document} |
\begin{document}
\title{Using quantum mechanics for calculation of different infinite sums}
\author{ Petar Mali$^{1}$} \author{Milica Rutonjski $^{1}$} \author{Slobodan Rado\v sevi\' c $^{1}$} \author{Milan Panti\' c $^{1}$} \author{Milica Pavkov-Hrvojevi\' c $^{1}$}
\affiliation{$^1$ Department of Physics, Faculty of Sciences, University of Novi Sad,
Trg Dositeja Obradovi\' ca 4, 21000 Novi Sad, Serbia}
\mbox{d}ate{\today}
\begin{abstract}
We demonstrate that certain class of infinite sums can be calculated analytically starting from a specific quantum mechanical problem and using principles of quantum mechanics. For simplicity we illustrate the method by exploring the problem of a particle in a box. Twofold calculation of the mean value of energy for the polynomial wave function inside the well yields even argument $p$ ($p>2$) of Riemann zeta and related functions. This method can be applied to a wide class of exactly solvable quantum mechanical problems which may lead to different infinite sums. Besides, the analysis performed here provides deeper understanding of superposition principle and presents useful exercise for physics students.
\mbox{e}nd{abstract}
\maketitle
\section{Introduction}
\label{intro}
For most physics students, when studying quantum mechanics, the first problem that they encounter is the problem of a particle in a box. Standard quantum mechanics textbooks (see for instance \cite{grifits,sif,shankar}) include it as an early exercise illustrating eigenproblem in infinite-dimensional vector space and superposition principle. Although there are some upgrades to the problem including particle in an infinite potential well with moving walls \cite{zidovi,zidovi2} and the problem of splitting the box into two slightly unequal halves \cite{split}, even standard problem of a particle in a box is rich enough for investigation \cite{ermitovost}. In the problem of a particle in a box, an infinite one-dimensional potential well, the potential is given by
\begin{equation}
V(x) =
\begin{cases}
0, & x \small{\mbox{i}}n (0,a) \\
\small{\mbox{i}}nfty, & x \notin (0,a)
\mbox{e}nd{cases}
\mbox{e}nd{equation}
where $a$ is the well width. The eigenstates of Hamiltonian, normalized states with well-defined energy, are given by $\psi_n(x)=\sqrt{\frac{2}{a}}\sin\frac{n \pi x}{a}, n=1,2,3...$ The state of the system $\psi(x)$ is an element of $L^2[0,a]$ -- it is a unit vector which vanishes at $x=0$ and $x=a$. Since eigenstates of Hamiltonian form an orthonormal basis, any state $\psi(x)$ of the particle in the well can be written as a linear combination of the eigenstates of Hamiltonian. The mean (expectation) value of energy in the given state $\psi(x)$ can be calculated by using both
\begin{equation}
\langle \hat{H} \rangle_{\psi}=\small{\mbox{i}}nt^a_0 \psi^*(x)\left(-\frac{\hbar^2}{2m}\frac{\mbox{d}^2}{\mbox{d} x^2}\right)\psi(x)\mbox{d} x \label{srednjajedan},
\mbox{e}nd{equation}
and
\begin{equation}
\langle \hat{H} \rangle_{\psi}=\sum^{\small{\mbox{i}}nfty}_{n=1}W(E_n)E_n \label{srednjadva}
\mbox{e}nd{equation}
where
\begin{equation}
W(E_n)=|C_n|^2=\Bigg|\small{\mbox{i}}nt^a_0 \psi_n(x) \psi(x)\mbox{d} x\Bigg|^2.
\label{verovatnoca}\mbox{e}nd{equation}
Hereafter we shall use $\langle \hat{H} \rangle_{\psi} \mbox{e}quiv \langle \hat{H} \rangle$ for brevity. In cases when the state of the system is represented by polynomial $\psi(x)$, there are infinitely many non-zero terms in Eq. (\ref{srednjadva}). The aim of the paper is to use that type
of states for the purpose of evaluating a certain class of infinite sums appearing in the problem of black body radiation \cite{crnotelo} and thermodynamics
of Fermi \cite{filips,rajh} and Bose \cite{sloba} systems.
\section{Results}
\label{Results}
Taking into account that orthonormal basis $\psi_n(x), n=1,2,3...$ inside the well is of trigonometric type, prepared state $\psi(x)$ needs to be a polynomial inside the well in order to be an infinite superposition of basis vectors. It will be shown that polynomials of different degrees produce different infinite sums for the convenient choice of arguments of Riemann zeta function and closely related infinite sums. The main goal will be to calculate those sums and classify their appearance in relation to the degree of the polynomial $\psi(x)$. We shall start the analysis with the second-degree polynomial since first-degree polynomials cannot satisfy boundary conditions $\psi(0)=\psi(a)=0$.
\subsection{Polynomial wave function of second degree} \label{A}
The only second-degree polynomial wave function satisfying boundary condition is $\psi(x)=Cx(a-x)$, where $C$ is normalization constant. The wave function is normalized by employing $\small{\mbox{i}}nt^a_0|\psi(x)|^2\mbox{d} x=1$. Without loss of generality we choose constant $C$ to be positive, which leads to $\psi(x)=\sqrt{\frac{30}{a^5}}x(a-x)$.
As already mentioned the mean value of energy can be calculated twofold. Using (\ref{srednjajedan}) we obtain
\begin{equation}
\langle \hat{H} \rangle=\small{\mbox{i}}nt^{a}_0\psi(x)\left(-\frac{\hbar^2}{2m}\frac{\mbox{d}^2}{\mbox{d} x^2}\right)\psi(x)\mbox{d} x=\frac{5\hbar^2}{ma^2}, \label{aav1}
\mbox{e}nd{equation}
whereas the determination of mean value of energy using (\ref{srednjadva}) requires the calculation of probabilities $W(E_n)$ defined by (\ref{verovatnoca}). The integration can be simplified by employing the tabular method of the integration by parts \cite{tabularint} which will be used throughout the paper. The given method for solving integral $\small{\mbox{i}}nt^a_0 x(a-x)\sin \frac{n \pi x}{a}\mbox{d} x$ is shown below in detail.
\[
\renewcommand{1.5}{1.5}
\begin{array}{c @{\hspace*{1.0cm}} c}\toprule
D & I \\\cmidrule{1-2}
x(a-x)\tikzmark{Left 1} & \tikzmark{Right 1}\sin \frac{n \pi x}{a} \\
a-2x \tikzmark{Left 2} & \tikzmark{Right 2}-\frac{a}{n \pi}\cos \frac{n \pi x}{a} \\
-2 \tikzmark{Left 3} & \tikzmark{Right 3}-(\frac{a}{n \pi})^2 \sin \frac{n \pi x}{a} \\
0 \tikzmark{Left 4} & \tikzmark{Right 4}(\frac{a}{n \pi})^3\cos \frac{n \pi x}{a} \\\bottomrule
\mbox{e}nd{array}
\]
\DrawArrow[draw=red]{Left 1}{Right 2}{$+$}
\DrawArrow[draw=brown]{Left 2}{Right 3}{$-$}
\DrawArrow[draw=blue]{Left 3}{Right 4}{$+$}
Here $D$ stands for differentiation of the elements of the first column whereas $I$ denotes integration of the second column elements. The elements connected by arrows are multiplied and added by using alternating signs. Thus, we obtain
\begin{widetext}
\begin{equation}
\small{\mbox{i}}nt^a_0 x(a-x)\sin \frac{n \pi x}{a}\mbox{d} x=\Bigg[x(a-x)\left(-\frac{a}{n \pi}\cos \frac{n \pi x}{a}\right)-(a-2x)\left(-\left(\frac{a}{n \pi}\right)^2 \sin \frac{n \pi x}{a}\right)+(-2)\left(\frac{a}{n \pi}\right)^3\cos \frac{n \pi x}{a} \Bigg]\Bigg|^a_0,
\mbox{e}nd{equation}
\mbox{e}nd{widetext}
where due to the integration boundaries only the third term does not vanish.
Hereafter the tabular method will not be explicitly shown, since its application is analogous to the one presented above. The simplification introduced by this method becomes invaluable as the degree of the polynomial grows.
The performed integration leads to the following probabilities
\begin{equation}
W(E_n)=\frac{480}{n^6\pi^6}[1-(-1)^n].
\mbox{e}nd{equation}
Therefrom, the mean value of energy calculated from (\ref{srednjadva}) reads
\begin{equation}
\langle \hat{H} \rangle=\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{480}{n^6\pi^6}[1-(-1)^n]\frac{n^2\pi^2\hbar^2}{2ma^2}. \label{sumezadrugistepen}
\mbox{e}nd{equation}
The only non-zero terms are obtained for $n=2k+1$. Therefore,
\begin{equation}
\langle \hat{H} \rangle=\frac{960}{\pi^6}\frac{\pi^2\hbar^2}{2ma^2}\sum^{\small{\mbox{i}}nfty}_{k=0}\frac{1}{(2k+1)^4} \label{aav2}.
\mbox{e}nd{equation}
From Eqs. (\ref{aav1}) and (\ref{aav2}) we calculate the following infinite sum
\begin{equation}
\sum^{\small{\mbox{i}}nfty}_{k=0}\frac{1}{(2k+1)^4}=\frac{\pi^4}{96} \label{nep4}.
\mbox{e}nd{equation}
Making use of relation (see \cite{atlas})
\begin{equation}
\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^{p}}=(1-2^{-p+1})\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^{p}} \label{tajnaveza}
\mbox{e}nd{equation}
as well as already used relation
\begin{equation}
\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^p}+\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^p}=2\sum^{\small{\mbox{i}}nfty}_{n=0}\frac{1}{(2n+1)^p} \label{javnaveza}
\mbox{e}nd{equation} for $p=4$,
other two sums $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^4}$ and $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^4}$ appearing in (\ref{sumezadrugistepen}) can also be calculated starting from second-degree polynomial only. Calculation leads to $\frac{\pi^4}{90}$ and $\frac{7 \pi^4}{720}$, respectively. It should be noted that sum $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^p}$ is known as Riemann zeta function $\zeta(p)$, $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^p}$ is alternating zeta function, also known as Dirichlet eta function $\mbox{e}ta(p)$, whereas $\sum^{\small{\mbox{i}}nfty}_{n=0}\frac{1}{(2n+1)^p}$ is Dirichlet lambda function $\lambda(p)$. Calculation of the mean value of energy in state $\psi(x)=\sqrt{\frac{30}{a^5}}x(a-x)$ is performed in different quantum mechanics books (see for instance \cite{grifits}), however not for purpose of obtaining infinite sums. In particular, sum $\lambda(4)$ is therein taken from math tables.
\subsection{Polynomial wave functions of third degree } \label{B}
In the case of third-degree polynomials there exists an infinite set of wave functions $\psi(x)$ that satisfy boundary conditions. Let us take an arbitrary polynomial wave function of this type
$\psi(x)=\sqrt{\frac{210}{a^7}}x(a-x)(a-2x).$ The mean value of energy calculated using (\ref{srednjajedan}) is
\begin{equation}
\langle \hat{H} \rangle=\frac{21\hbar^2}{ma^2}. \label{bav1}
\mbox{e}nd{equation}
On the other hand, following (\ref{srednjadva}) and procedure given in Subsection \ref{A} we obtain
\begin{equation}
\langle \hat{H} \rangle=\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{30240}{n^6\pi^6}[1+(-1)^n]\frac{n^2\pi^2\hbar^2}{2ma^2}. \label{bav15}
\mbox{e}nd{equation}
The only non-zero terms are for $n=2k$:
\begin{equation}
\langle \hat{H} \rangle=\frac{1890 \hbar^2}{ma^2 \pi^4}\sum^{\small{\mbox{i}}nfty}_{k=1}\frac{1}{k^4} \label{bav2}.
\mbox{e}nd{equation}
Hence, from Eqs. (\ref{bav1}) and (\ref{bav2}) we obtain Riemann zeta function $\zeta(4)$
\begin{equation}
\sum^{\small{\mbox{i}}nfty}_{k=1}\frac{1}{k^4}=\frac{\pi^4}{90}. \label{zetaod4}
\mbox{e}nd{equation}
Beside this result, sum $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^4}$ can also be extracted from Eq. (\ref{bav15}). After having determined $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^4}$ this sum can be calculated from (\ref{sumezadrugistepen}). However, our classification method will be based on which sums can be obtained starting from the certain degree polynomials. Hence, even without knowing the relation (\ref{tajnaveza}), the system of equations for sums $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^4}$ and $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^4}$ can be formed by taking another third-degree polynomial, for instance $\psi(x)=\sqrt{\frac{105}{a^7}}x^2(a-x)$. Calculating the mean value of energy from (\ref{srednjajedan}) and (\ref{srednjadva}) we obtain
\begin{equation}
\langle \hat{H} \rangle=\frac{7\hbar^2}{ma^2} \label{cav1}
\mbox{e}nd{equation}
and
\begin{equation}
\langle \hat{H} \rangle=\frac{420 \pi^2 \hbar^2}{\pi^6 ma^2}[5\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^4}-4\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^4}] \label{cav2},
\mbox{e}nd{equation}
respectively.
Using result (\ref{zetaod4})
we get
\begin{equation}
\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^4}=\frac{7\pi^4}{720}.
\mbox{e}nd{equation}
We have illustrated the method that was not necessary to employ here, yet we discuss it for methodical reasons since it becomes inevitable for higher-degree polynomials.
It is now straightforward to calculate sum $\sum^{\small{\mbox{i}}nfty}_{n=0}\frac{1}{(2n+1)^4}$ using Eq. (\ref{javnaveza}), however we may ask whether it can be obtained without calculating $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^4}$ and $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^n}{n^4}$, as in the case of the second-degree polynomial. Namely, any third-degree polynomial wave function inside the well can be written in general form $\psi(x)=x(a-x)Q_1(x)$, where $Q_1(x)$ is a first-degree polynomial. Following that,
\begin{widetext}
\begin{equation}
C_n=\sqrt{\frac{2}{a}}\small{\mbox{i}}nt^a_0 \psi(x)\sin \frac{n \pi x}{a}\mbox{d} x=\sqrt{\frac{2}{a}}\left[\left(\frac{a}{n\pi}\right)^3(-1)^n[-2Q_1(a)-2aQ_1'(a)]-\left(\frac{a}{n\pi}\right)^3[-2Q_1(0)+2aQ_1'(0)]\right]. \mbox{e}nd{equation}
\mbox{e}nd{widetext}
We can see that the only three sums that can be possibly obtained in the case of third-degree polynomial wave functions are $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^4}$, $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^4}$ and $\sum^{\small{\mbox{i}}nfty}_{n=0}\frac{1}{(2n+1)^4}$. The last one appears when
\begin{equation}
-2Q_1(a)-2aQ_1'(a)=-2Q_1(0)+2aQ_1'(0),
\mbox{e}nd{equation}
which is equivalent to
\begin{equation}
Q_1(0)-Q_1(a)=2aQ_1'(0). \label{uslov}
\mbox{e}nd{equation}
This is however not possible for any first-degree polynomial $Q_1$. Condition (\ref{uslov}) is satisfied only for $Q_1(x)=\mbox{const}$ ($\psi(x)$ is a second-degree polynomial), i.e. there exists no third-degree polynomial wave function from which $\sum^{\small{\mbox{i}}nfty}_{n=0}\frac{1}{(2n+1)^4}$ could be independently calculated. To understand profoundly why this is the case it is necessary to translate the reference frame so that $x'=x-\frac{a}{2}$. In the new reference frame potential is given by \begin{equation}
V(x') =
\begin{cases}
0, & x' \small{\mbox{i}}n (-\frac{a}{2},\frac{a}{2}) \\
\small{\mbox{i}}nfty, & x' \notin (-\frac{a}{2},\frac{a}{2})
\mbox{e}nd{cases}
.\mbox{e}nd{equation}
By acting with the translation operator $T(\frac{a}{2})=\mbox{exp}(\frac{a}{2}\frac{d}{d x})$ (see for instance \cite{fermi,grajner}) on the states $\psi_n(x)=\sqrt{\frac{2}{a}}\sin\frac{n\pi x}{a}, n=1,2,3,...$ one can obtain eigenstates in potential $V(x')$ as
\begin{equation}
\psi_n(x')=\sqrt{\frac{2}{a}}
\begin{cases}
(-1)^{\frac{n-1}{2}}\cos\frac{n \pi x'}{a}, & \mbox{for odd}\,\, n \\
(-1)^{\frac{n}{2}}\sin \frac{n \pi x'}{a}, & \mbox{for even}\,\, n
\mbox{e}nd{cases}
.\mbox{e}nd{equation}
States in the new reference frame are even when $n$ is odd number, and vice versa. By translating the state
$\psi(x)=x(a-x)Q_1(x)=x(a-x)(a_1x+b_1)$ with the same translation operator we get the state $\psi(x')=(x'+\frac{a}{2})(\frac{a}{2}-x')(a_1x'+\frac{a_1a}{2}+b_1)$. The sum $\sum^{\small{\mbox{i}}nfty}_{n=0}\frac{1}{(2n+1)^4}$ can be solely obtained only in the case when $\psi(-x')=\psi(x')$ and this is possible only when $a_1=0$, which corresponds to the polynomial function of the second degree.
\subsection{Polynomial wave function of degree four and higher} \label{C}
Following the routine from Subsection \ref{B} we choose two arbitrary fourth-degree polynomials, $\sqrt{\frac{252}{a^9}}x^3(a-x)$ and $\sqrt{\frac{630}{a^9}}x^2(a-x)(a-2x)$. The expectation values of energy $\langle \hat{H} \rangle$ in the given states read $\frac{54\hbar^2}{5ma^2}$ and $\frac{24\hbar^2}{ma^2}$, respectively. Analogous procedure yields the system of equations
\begin{widetext}
\begin{eqnarray}
&& \sum^{\small{\mbox{i}}nfty}_{n=1} \Big[\frac{36}{n^4 \pi^4}-\frac{288}{n^6\pi^6}[1-(-1)^n]+\frac{1152}{n^8\pi^8}[1-(-1)^n]\Big]=\frac{3}{70} \label{jnamil} \nonumber\\
&&\sum^{\small{\mbox{i}}nfty}_{n=1}\Big[\frac{4}{n^4\pi^{4}}[17-8(-1)^n]
+ \frac{960}{n^6\pi^6}[(-1)^n-1]+\frac{4608}{n^{8}\pi^{8}}[1-(-1)^n]\Big]=\frac{4}{105}.
\mbox{e}nd{eqnarray}
\mbox{e}nd{widetext}
Here-from it is obvious that sums $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^4}$, $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^6}$, $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^8}$, $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^4}$, $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^6}$, $\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^8}$, $\sum^{\small{\mbox{i}}nfty}_{n=0}\frac{1}{(2n+1)^4}$, $\sum^{\small{\mbox{i}}nfty}_{n=0}\frac{1}{(2n+1)^6}$, and $\sum^{\small{\mbox{i}}nfty}_{n=0}\frac{1}{(2n+1)^8}$ can be obtained starting from the fourth-degree polynomials only. More detailed inspection of (\ref{jnamil}) suggests that one more fourth-degree polynomial has to be used to get the system of equations in closed form, due to the fact that some of the upper-mentioned sums are related (see Eqs. (\ref{tajnaveza}) and (\ref{javnaveza})).
The sum $\sum^{\small{\mbox{i}}nfty}_{n=0}\frac{1}{(2n+1)^4}$ does not appear in (\ref{jnamil}). However it can be independently calculated in case when at least one of the polynomials $\psi(x)$ is chosen so that $e^{\frac{a}{2}\frac{d}{dx}}\psi(x)$ is an even function, for instance $\psi(x)=\sqrt{\frac{10080}{313a^{9}}}x(a-x)(\frac{a}{2}+x)(\frac{3a}{2}-x)$. It should be noted that three polynomials are still sufficient for obtaining all mentioned sums.
If we proceed to the fifth-degree polynomials, we discover that they do not lead to new sums. This brings us to conclusion that odd-degree polynomials are not as rich in the analytically computable sums as the even-degree ones and therefore are of less interest. In the case of the sixth-degree polynomial wave functions the appropriate choice of polynomials would offer us the possibility to calculate the sums $\sum_{n=1}^{\small{\mbox{i}}nfty}\frac{1}{n^p}$ and $\sum_{n=1}^{\small{\mbox{i}}nfty}\frac{(-1)^{n-1}}{n^p}$ as well as $\sum_{n=0}^{\small{\mbox{i}}nfty}\frac{1}{(2n+1)^p}$ for $p=4,6,8,10,12$. Should we proceed to polynomials of higher degree, the pattern would remain the same. The list of the sums that can be calculated starting from the polynomial wave functions of certain degree, together with the values of those sums, is given in Table \ref{tabelasume}.
\begin{widetext}
\begin{center}
\begin{table}
\caption{List of the sums that can be calculated using the $n$th degree polynomial wave functions}
\begin{threeparttable}
\begin{tabular}{c |c| c |c}
\hline\hline
degree & $\psi(x)$ & $p$ & values of sums \tnote{a}\\ [0.5ex]
\hline\hline
2 & $Cx(a-x)\,\,\,\, \,\qquad \tnote{b} $ & $4$&\vtop{\hbox{\strut \vtop{\hbox{\strut $\zeta(4)=\frac{\pi^4}{90}$}\hbox{\strut $\mbox{e}ta(4)=\frac{7\pi^4}{720}$}}}\hbox{\strut $\lambda(4)= \frac{\pi^4}{96}$}} \\ \hline
3 &\,\, $Cx(a-x)Q_1(x) \,\,\,\, \tnote{c}$ \hspace{1mm} & $4$ &\vtop{\hbox{\strut \vtop{\hbox{\strut $\zeta(4)=\frac{\pi^4}{90}$}\hbox{\strut $\mbox{e}ta(4)=\frac{7\pi^4}{720}$}}}\hbox{\strut $\lambda(4)= \frac{\pi^4}{96}$}} \\ \hline
4 & $Cx(a-x)Q_2(x)$ & $4$,$6$,$8$ & \vtop{\hbox{\strut \vtop{\hbox{\strut $\zeta(4)=\frac{\pi^4}{90}$\quad$\zeta(6)=\frac{\pi^6}{945}$\quad $\zeta(8)=\frac{\pi^8}{9450}$}\hbox{\strut $\mbox{e}ta(4)=\frac{7\pi^4}{720}$\quad $\mbox{e}ta(6)=\frac{31\pi^6}{31240}$\quad $\mbox{e}ta(8)=\frac{127\pi^8}{1209600}$}} }\hbox{\strut $\lambda(4)=\frac{\pi^4}{96}$\quad $\lambda(6)=\frac{\pi^6}{960}$\quad $\lambda(8)=\frac{17\pi^8}{161280}$}} \\ \hline
5 & $Cx(a-x)Q_3(x)$ & $4$,$6$,$8$ & \vtop{\hbox{\strut \vtop{\hbox{\strut $\zeta(4)=\frac{\pi^4}{90}$\quad$\zeta(6)=\frac{\pi^6}{945}$\quad $\zeta(8)=\frac{\pi^8}{9450}$}\hbox{\strut $\mbox{e}ta(4)=\frac{7\pi^4}{720}$\quad $\mbox{e}ta(6)=\frac{31\pi^6}{31240}$\quad $\mbox{e}ta(8)=\frac{127\pi^8}{1209600}$}} }\hbox{\strut $\lambda(4)=\frac{\pi^4}{96}$\quad $\lambda(6)=\frac{\pi^6}{960}$\quad $\lambda(8)=\frac{17\pi^8}{161280}$}} \\ \hline
6 & $Cx(a-x)Q_4(x)$ &$4$,$6$,$8$,$10$,$12$& \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut $\zeta(4)=\frac{\pi^4}{90}$\quad$\zeta(6)=\frac{\pi^6}{945}$\quad $\zeta(8)=\frac{\pi^8}{9450}$}\hbox{\strut $\zeta(10)=\frac{\pi^{10}}{93555}$\quad $\zeta(12)=\frac{691\pi^{12}}{638512875}$}}}\hbox{\strut $\mbox{e}ta(4)=\frac{7\pi^4}{720}$\quad $\mbox{e}ta(6)=\frac{31\pi^6}{31240}$\quad $\mbox{e}ta(8)=\frac{127\pi^8}{1209600}$}}}\hbox{\strut $\mbox{e}ta(10)=\frac{73\pi^{10}}{6842880}$\quad $\mbox{e}ta(12)=\frac{1414477\pi^{12}}{1307674368000}$}}}\hbox{\strut $\lambda(4)=\frac{\pi^4}{96}$\quad $\lambda(6)=\frac{\pi^6}{960}$\quad $\lambda(8)=\frac{17\pi^8}{161280}$}}}\hbox{\strut $\lambda(10)=\frac{31\pi^{10}}{2903040}$\quad $\lambda(12)=\frac{691\pi^{12}}{638668800}$}} \\ [1ex] \hline
7 & $Cx(a-x)Q_5(x)$ &$4$,$6$,$8$,$10$,$12$& \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut $\zeta(4)=\frac{\pi^4}{90}$\quad$\zeta(6)=\frac{\pi^6}{945}$\quad $\zeta(8)=\frac{\pi^8}{9450}$}\hbox{\strut $\zeta(10)=\frac{\pi^{10}}{93555}$\quad $\zeta(12)=\frac{691\pi^{12}}{638512875}$}}}\hbox{\strut $\mbox{e}ta(4)=\frac{7\pi^4}{720}$\quad $\mbox{e}ta(6)=\frac{31\pi^6}{31240}$\quad $\mbox{e}ta(8)=\frac{127\pi^8}{1209600}$}}}\hbox{\strut $\mbox{e}ta(10)=\frac{73\pi^{10}}{6842880}$\quad $\mbox{e}ta(12)=\frac{1414477\pi^{12}}{1307674368000}$}}}\hbox{\strut $\lambda(4)=\frac{\pi^4}{96}$\quad $\lambda(6)=\frac{\pi^6}{960}$\quad $\lambda(8)=\frac{17\pi^8}{161280}$}}}\hbox{\strut $\lambda(10)=\frac{31\pi^{10}}{2903040}$\quad $\lambda(12)=\frac{691\pi^{12}}{638668800}$}} \\ [1ex] \hline
8 & $Cx(a-x)Q_6(x)$ &$4$,$6$,$8$,$10$,$12$,$14$,$16$& \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut \vtop{\hbox{\strut $\zeta(4)=\frac{\pi^4}{90}$\quad$\zeta(6)=\frac{\pi^6}{945}$\quad $\zeta(8)=\frac{\pi^8}{9450}$}\hbox{\strut $\zeta(10)=\frac{\pi^{10}}{93555}$\quad $\zeta(12)=\frac{691\pi^{12}}{638512875}$}} }\hbox{\strut $\zeta(14)=\frac{2 \pi^{14}}{18243225}$\quad$\zeta(16)=\frac{3617 \pi^{16}}{325641566250}$}} }\hbox{\strut $\mbox{e}ta(4)=\frac{7\pi^4}{720}$\quad $\mbox{e}ta(6)=\frac{31\pi^6}{31240}$\quad $\mbox{e}ta(8)=\frac{127\pi^8}{1209600}$}} }\hbox{\strut $\mbox{e}ta(10)=\frac{73\pi^{10}}{6842880}$\quad $\mbox{e}ta(12)=\frac{1414477\pi^{12}}{1307674368000}$}} }\hbox{\strut $\mbox{e}ta(14)=\frac{8191 \pi^{14}}{74724249600}$\quad$\mbox{e}ta(16)=\frac{16931177 \pi^{16}}{1524374691840000}$}} }\hbox{\strut $\lambda(4)=\frac{\pi^4}{96}$\quad $\lambda(6)=\frac{\pi^6}{960}$\quad $\lambda(8)=\frac{17\pi^8}{161280}$}} }\hbox{\strut $\lambda(10)=\frac{31\pi^{10}}{2903040}$\quad $\lambda(12)=\frac{691\pi^{12}}{638668800}$}} }\hbox{\strut $\lambda(14)=\frac{5461 \pi^{14}}{49816166400}$\quad$\lambda(16)=\frac{929569 \pi^{16}}{83691159552000}$}} \\ [1ex] \hline
$\ldots$ &$\ldots$&$\ldots$&$\ldots$\\ [1ex]
\hline\hline
\mbox{e}nd{tabular}
\begin{tablenotes}
\small{\mbox{i}}tem[a] Sums that appear in the Table are denoted by $\zeta(p)=\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{1}{n^p}$, $\mbox{e}ta(p)=\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{(-1)^{n-1}}{n^p}$, $\lambda(p)=\frac{1}{2}(\zeta(p)+\mbox{e}ta(p))=\sum^{\small{\mbox{i}}nfty}_{n=0}\frac{1}{(2n+1)^p}$.
\small{\mbox{i}}tem[b] $C$ presents corresponding normalization constant.
\small{\mbox{i}}tem[c] $Q_n(x)$ presents $n$th degree polynomial $Q_n(x)=a_nx^n+a_{n-1}x^{n-1}+...+a_1x+a_0$.
\mbox{e}nd{tablenotes}
\mbox{e}nd{threeparttable}
\label{tabelasume}
\mbox{e}nd{table}
\mbox{e}nd{center}
\mbox{e}nd{widetext}
One can proceed to higher-degree polynomials and obtain analytical forms of sums from the Table \ref{tabelasume} for larger $p$. As a consequence for the higher-degree polynomials and larger value of $p$ the coprime integers appearing in the sums increase. It can be noted that all the sums have irrational values and as $p$ increases the values of sums become closer to $1$. Following the rule of appearance of the sums from Table \ref{tabelasume}, one can conclude that for even polynomial wave functions of degree $n$ sums for $p=2n, p=2,3,...,n$ can be calculated, whereas the odd-degree ones produce the sums for $p=2n-2, p=3,4,...,n$.
All polynomial wave functions discussed in the paper are presented in Fig \ref{Fig1}. It can be observed that states $\sqrt{\frac{30}{a^5}}x(a-x)$ and $\sqrt{\frac{10080}{313a^9}}x(a-x)(\frac{a}{2}+x)(\frac{3a}{2}-x)$ are even with respect to the center of the well, whereas the others are not of a specific parity.
\begin{figure}[ht]
\small{\mbox{i}}ncludegraphics[width=8.4cm]{Fig1.eps}
\centering
\caption{\label{Fig1}
(Color online) Polynomial wave functions discussed in the paper for $a=1$.}
\mbox{e}nd{figure}
One can check that by increasing the number of nodes of polynomial wave function inside the well (increasing the degree of polynomial), mean energy in the given state increases. Yet, since the wave functions are polynomial only inside the well, it may happen that the higher-degree polynomial wave function has less nodes than the lower-degree one. For instance, the fourth-degree function $\psi(x)=\sqrt{\frac{10080}{313a^9}}x(a-x)(\frac{a}{2}+x)(\frac{3a}{2}-x)$ has no nodes inside the well (two roots are $x=-\frac{a}{2}$ and $x=\frac{3a}{2}$), wherefore the mean energy for the third-degree polynomials with one node (as $\psi(x)=\sqrt{\frac{210}{a^7}}x(a-x)(a-2x)$) is larger.
\section{Conclusion} \label{concl}
We have shown that starting from certain principles of quantum mechanics different infinite sums can be determined analytically. Namely, in the problem of a particle in a box, which is used to illustrate the idea of the method, the calculation of mean value of energy in the polynomial type of state inside the well, leads to different Riemann zeta $\zeta(p)$ and related functions for positive even arguments $p>2$ (similar method that includes the operator $\hat{H}^2$ can be used for computing these sums for p=2; see Appendix \ref{appa}). These sums for odd positive $p$ cannot be calculated analytically by using this or any other method.
An advantage of quantum mechanical approach is that one can check the course of the calculation by employing dimensional analysis based on the constants that appear in the problem ($a$, $\hbar$, $m$). Application of this method to more complex quantum mechanical problems would lead to a variety of other analytically computable infinite sums.
\appendix
\section{Sums for $p=2$} \label{appa}
Repeating the above performed procedure for the operator $\hat{H}^2$ we can also obtain $\zeta(2)$, $\mbox{e}ta(2)$, and $\lambda(2)$. Following paper \cite{ermitovost}, Eqs. (\ref{srednjajedan}) and (\ref{srednjadva}) are replaced with
\begin{equation} (\hat{H}\psi,\hat{H}\psi)=\sum^{\small{\mbox{i}}nfty}_{n=1}W(E_n)E_n^2, \mbox{e}nd{equation}
where we choose $\psi(x)=\sqrt{\frac{30}{a^5}}x(a-x)$. It was already shown that
\begin{equation}
W(E_n)=\frac{480}{n^6\pi^6}[1-(-1)^n],
\mbox{e}nd{equation}
therefore using
$E_n^2=\frac{n^2\pi^4\hbar^4}{4m^2a^4}$, as well as
$(\hat{H}\psi,\hat{H}\psi)=\frac{30\hbar^4}{m^2a^4}$, we obtain
\begin{equation}
\sum^{\small{\mbox{i}}nfty}_{n=1}\frac{4}{n^2\pi^2}\left[1-(-1)^n\right]=1. \label{jedan}
\mbox{e}nd{equation}
From (\ref{jedan}) and employing (\ref{tajnaveza}) and (\ref{javnaveza}) we get $\zeta(2)=\frac{\pi^2}{6}$, $\mbox{e}ta(2)=\frac{\pi^2}{12}$, and $\lambda(2)=\frac{\pi^2}{8}$. These three sums could be also calculated starting from the polynomial wave function of an arbitrary degree at the expense of solving system of equations.
\begin{references}
\bibitem{grifits} D.J.Griffiths, "Introduction to Quantum Mechanics", Prentice Hall, second edition (2010).
\bibitem{sif} L. Schiff, "Quantum mechanics"', McGraw-Hill Book Co. Inc., New York (1949).
\bibitem{shankar} R. Shankar, "Principles of quantum mechanics", Springer Science \& Business Media (2012).
\bibitem{zidovi}
S.W. Doescher, M. H. Rice, "Infinite square-well potential with a moving wall". Am. J. Phys. \textbf{37}, 1246-1249 (1969).
\bibitem{zidovi2} V.V. Dodonov, A.B. Klimov, D.E. Nikonov, "Quantum particle in a box with moving walls", Journal of mathematical physics \textbf{34}(8), 3391-3404 (1993).
\bibitem{split}
J. Gea-Banacloche, "Splitting the wave function of a particle in a box." Am. J. Phys. \textbf{70}, 307-312 (2002).
\bibitem{ermitovost}
G. Bonneau, J. Faraut, G. Valent, "Self-adjoint extensions of operators and the teaching of quantum mechanics" Am. J. Phys., \textbf{69}, 322-331 (2001).
\bibitem{crnotelo} L. Peliti, "Statistical Mechanics in a Nutshell", Princeton University Press (2011).
\bibitem{filips} P. Philips, "Advanced solid state physics", Cambridge University Press (2012).
\bibitem{rajh} L. Reichl, "A Modern Course in Statistical Physics", Wiley (2016).
\bibitem{sloba} S. Rado\v sevi\' c, M. Panti\' c, M. Pavkov-Hrvojevi\' c, D. Kapor, "Magnon energy renormalization and low-temperature thermodynamics of $O(3)$ Heisenberg ferromagnets", Annals of Physics \textbf{339}, 382-411 (2013).
\bibitem{tabularint} D. Horowitz, "Tabular Integration by Parts", The College Mathematics Journal, \textbf{21}, 4, 307-311 (1990).
\bibitem{atlas} J. Spanier, K. Oldham, "An Atlas of Functions", Washington, DC: Hemisphere (1987).
\bibitem{fermi} E. Fermi, "Notes on Quantum Mechanics", Phoenix Science Series Chichago (1961).
\bibitem{grajner} W. Greiner, B. M\"{u}ller, "Quantum mechanics: symmetries", Springer Science \& Business Media (2012).
\mbox{e}nd{references}
\mbox{e}nd{document} |
\begin{document}
\newcommand{\bull}{\rule{.85ex}{1ex} \par
}
\newenvironment{sketch}{\noindent {\bf Proof (sketch):\ }}{
\bull}
\newtheorem{theorem}{Theorem}[section]
\newtheorem{definition}[theorem]{Definition}
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{conjecture}[theorem]{Conjecture}
\newtheorem{exmp}{Example}
\newtheorem{notation}[theorem]{Notation}
\newtheorem{problem}{Problem}
\newtheorem{remark}[theorem]{Remark}
\newtheorem{observation}[theorem]{Observation}
\newcommand{\QCSP}[1]{\mbox{\rm QCSP$(#1)$}}
\newcommand{{\cal C}SP}[1]{\mbox{\rm CSP$(#1)$}}
\newcommand{mcSP}[1]{\mbox{{\sc Max CSP}$(#1)$}}
\newcommand{\wMCSP}[1]{\mbox{\rm weighted Max CSP$(#1)$}}
\newcommand{\cMCSP}[1]{\mbox{\rm cw-Max CSP$(#1)$}}
\newcommand{\tMCSP}[1]{\mbox{\rm tw-Max CSP$(#1)$}}
\renewcommand{\mbox{\bf P}}{\mbox{\bf P}}
\newcommand{\G}[1]{\mbox{\rm I$(#1)$}}
\newcommand{\NE}[1]{\mbox{$\neq_{#1}$}}
\newcommand{mcol}[1]{\mbox{\sc Max $#1$-Col}}
\newcommand{\mbox{\bf NP}}{\mbox{\bf NP}}
\newcommand{\mbox{\bf NL}}{\mbox{\bf NL}}
\newcommand{\mbox{\bf PO}}{\mbox{\bf PO}}
\newcommand{\mbox{\bf NP}O}{\mbox{\bf NPO}}
\newcommand{\mbox{\bf APX}}{\mbox{\bf APX}}
\newcommand{\mbox{\rm Aut}^*}{\mbox{\rm Aut}^*}
\newcommand{\mbox{\rm -$B$}}{\mbox{\rm -$B$}}
\newcommand{\GIF}[3]{\ensuremath{h_\{{#2},{#3}\}^{#1}}}
\newcommand{\mbox{\rm Spmod}}{\mbox{\rm Spmod}}
\newcommand{\mbox{\rm Sbmod}}{\mbox{\rm Sbmod}}
\newcommand{{\cal I}nv}[1]{\mbox{\rm Inv($#1$)}}
\newcommand{\mbox{\bf P}ol}[1]{\mbox{\rm Pol($#1$)}}
\newcommand{\sPol}[1]{\mbox{\rm s-Pol($#1$)}}
\newcommand{\underline}{\underlinederline}
\newcommand{\overline}{\overlineerline}
\def\hbox{ar}{\hbox{ar}}
\def\vect#1#2{#1 _1,\ldots,ots #1 _{#2}}
\def,\ldots,{,\ldots,}
\let\sse=\subseteq
\let\la=\langle
\def\langle\langle{\langle\langle}
\let\ra=\rangle
\def\rangle\rangle{\rangle\rangle}
\let\vr={\bf a}rrho
\def{\bf c}t#1#2{#1 _1,\ldots, #1 _{#2}}
\newcommand{{\bf a}}{{\bf a}}
\newcommand{{\bf b}}{{\bf b}}
\newcommand{{\bf c}}{{\bf c}}
\newcommand{{\bf x}}{{\bf x}}
\newcommand{{\bf y}}{{\bf y}}
\def{\bur Z^+}{{\bur Z^+}}
\def{\bur R}{{\bur R}}
\def{\cal D}{{\cal D}}
\def{\cal F}{{\cal F}}
\def{\cal I}{{\cal I}}
\def{\cal C}{{\cal C}}
\def{\cal U}{{\cal U}}
\def{\cal K}{{\cal K}}
\def{\cal L}{{\cal L}}
\def\2mat#1#2#3#4#5#6#7#8{
\begin{array}{c|cc}
$~$ & #3 & #4\\
\hline
#1 & #5& #6\\
#2 & #7 & #8 \end{array}}
\renewcommand{\varphi}{{\bf a}rphi}
\renewcommand{\varepsilon}{{\bf a}repsilon}
\def\tup#1{\mathchoice{\mbox{\boldmath$\displaystyle#1$}}
{\mbox{\boldmath$\textstyle#1$}}
{\mbox{\boldmath$\scriptstyle#1$}}
{\mbox{\boldmath$\scriptscriptstyle#1$}}}
\newcommand{\begin{center}\huge Draft!!! \end{center}}{\begin{center}\huge Draft!!! \end{center}}
\newcommand{\makebox[0mm]{}}{\makebox[0mm]{}}
\renewcommand{\text}[1]{\mbox{\rm \,#1\,}}
\renewcommand{\varnothing}{{\bf a}rnothing}
\newcommand{\underlineion}{\cup}
\newcommand{\cap}{\cap}
\newcommand{-}{-}
\newcommand{\compl}[1]{\overlineerline{#1}}
\newcommand{\card}[1]{{|#1|}}
\newcommand{\set}[1]{\{{#1}\}}
\newcommand{\ |\ }{\ |\ }
\newcommand{\st}{\ |\ }
\newcommand{\times}{\times}
\newcommand{\powerset}[1]{{\bf 2}^{#1}}
\newcommand{\tuple}[1]{\langle{#1}\rangle}
\newcommand{\seq}[1]{\langle #1 \rangle}
\newcommand{\seq{}}{\seq{}}
\newcommand{\floor}[1]{\left\lfloor{#1}\right\rfloor}
\newcommand{\ceiling}[1]{\left\lceil{#1}\right\rceil}
\newcommand{\rightarrow}{\rightarrow}
\newcommand{\!\circ\!}{\!\circ\!}
\newcommand{\transclos}[1]{#1^+}
\newcommand{\reduction}[1]{#1^-}
\newcommand{\perfimp}{\ |\ ackrel{p}{\Longrightarrow}}
\newcommand{{\em ie.}}{{\em ie.}}
\newcommand{{\em eg.}}{{\em eg.}}
\newcommand{paper}{paper}
\newcommand{\em}{\em}
\newcommand{${\mathbb R}$-interpretation}{${\mathbb R}$-interpretation}
\newcommand{${\mathbb R}$-model}{${\mathbb R}$-model}
\newcommand{^{\rm T}}{^{\rm T}}
\newcommand{\underlineprint}[1]{}
\newcommand{$\:$}{$\:$}
\newcommand{\prob}[1]{{\sc #1}}
\newcommand{{\it TSolve}}{{\it TSolve}}
\newcommand{{\it Neg}}{{\it Neg}}
\newcommand{XX}{XX}
\newcommand{{\it props}}{{\it props}}
\newcommand{{\it rels}}{{\it rels}}
\newcommand{\vdash_p}{\vdash_p}
\newcommand{{\rm Pr}}{{\rm Pr}}
\newcommand{{\rm AX}}{{\rm AX}}
\newcommand{{\bf AX}}{{\bf AX}}
\newcommand{\vdash_{\rm R}}{\vdash_{\rm R}}
\newcommand{\vdash_{\rm R,A}}{\vdash_{\rm R,A}}
\newcommand{{\em \#mis}}{{\em \#mis}}
\newcommand{{\em comb}}{{\em comb}}
\newcommand{{\sc X-Csp}}{{\sc X-Csp}}
\newcommand{{\sc Csp}}{{\sc Csp}}
\newcommand{\cc}[1]{\textnormal{\textbf{#1}}}
\newcommand{\opt}[0]{\textrm{{\sc opt}}}
\newcommand{mc}{mc}
\newcommand{\textrm{\textit{H\aa}}}{\textrm{\textit{H\aa}}}
\renewcommand{\atop}[2]{\genfrac{}{}{0pt}{}{#1}{#2}}
\newcommand{{\cal G_\equiv}}{{\cal G_\equiv}}
\newcommand{\equiv}{\equiv}
\pagestyle{plain}
\author{Robert Engstr\"om\footnote{\tt{engro910@student.liu.se}}, Tommy F\"arnqvist\footnote{\tt{\{tomfa, petej, johth\}@ida.liu.se }}, Peter Jonsson\footnotemark[2], and Johan Thapper\footnotemark[2]\\
\\
\small
Department of Computer and Information Science\\
\small
Link\"{o}pings universitet\\
\small
SE-581 83 Link\"{o}ping, Sweden\\}
\title{Graph Homomorphisms, Circular Colouring, and \\ Fractional Covering by $H$-cuts}
\date{}
\maketitle
\begin{abstract}
A graph homomorphism is a vertex map which carries edges from a source
graph to edges in a
target graph. The instances of the \emph{Weighted Maximum
$H$-Colourable Subgraph} problem ($mcol{H}$) are
edge-weighted graphs $G$ and
the objective is to find a subgraph of $G$ that has maximal total edge
weight, under
the condition that the subgraph has a homomorphism to $H$; note that for
$H=K_k$ this
problem is equivalent to {\sc Max $k$-cut}. F\"arnqvist et al.\ have
introduced a parameter on the space of graphs that allows close study of
the approximability properties of $mcol{H}$. Specifically, it can be
used to extend previously known (in)approximability results to larger
classes of graphs. Here, we investigate the properties of this parameter
on circular complete graphs $K_{p/q}$, where $2 \leq p/q \leq 3$. The
results are extended to $K_4$-minor-free graphs and graphs with bounded
maximum average degree. We also consider connections with
\v{S}\'{a}mal's work on fractional covering by cuts: we address,
and decide, two conjectures concerning cubical chromatic numbers.
\noindent
{\bf Keywords}: graph $H$-colouring, circular colouring, fractional
colouring, combinatorial optimisation
\end{abstract}
\section{Introduction}
Denote by ${\cal G}$ the set of all simple, undirected and
finite graphs.
A \emph{graph homomorphism} from $G \in {\cal G}$ to $H \in {\cal G}$ is a vertex map which
carries the edges in $G$ to edges in $H$.
The existence of such a map will be denoted by $G \rightarrow H$.
For a graph $G \in {\cal G}$, let
${\cal W}(G)$ be the set of \emph{weight functions}
$w : E(G) \rightarrow {\mathbb Q}^+$ assigning weights
to edges of $G$.
Now,
{\em Weighted Maximum $H$-Colourable Subgraph} (mcol{H}) is the
maximisation problem with
\begin{description}
\item[Instance:] An edge-weighted graph $(G,w)$, where $G \in {\cal
G}$ and
$w \in {\cal W}(G)$.
\item[Solution:] A subgraph $G'$ of $G$ such that $G' \rightarrow H$.
\item[Measure:] The weight of $G'$ with respect to $w$.
\end{description}
\noindent
Given an edge-weighted graph $(G,w)$, denote by $mc_H(G,w)$ the measure
of the optimal solution to the problem mcol{H}.
Denote by $mc_k(G,w)$ the
(weighted) size of a largest $k$-cut in $(G,w)$.
This notation is justified by the fact that
$mc_k(G,w) = mc_{K_k}(G,w)$.
In this sense, mcol{H} generalises {\sc Max $k$-cut} which is a
well-known and well-studied problem that is computationally hard
when $k > 1$.
Since mcol{H} is a hard problem to solve exactly, efforts have been
made to find suitable approximation algorithms.
F\"arnqvist et al.~\cite{farnqvist:etal:09} introduce a
method that can be used to extend previously known
(in)approximability bounds on mcol{H} to new and larger classes of
graphs. For example, they
present concrete approximation ratios for certain graphs (such as the odd cycles)
and
near-optimal asymptotic results for large graph classes.
The fundament of this promising technique is the ability to compute
(or closely approximate) a function
$s: {\cal G} \times {\cal G} \rightarrow {\mathbb R}$ defined as follows:
\begin{equation}
\label{eq:s}
s(M,N) = \inf_{\substack{G \in {\cal G} \\ \omega \in {\cal
W}(G)}}{\frac{mc_M(G,\omega)}{mc_N(G,\omega)}}.
\end{equation}
It is not surprising that estimating $s(M,N)$
is, in many cases,
non-trivial. One way is to solve a certain linear program that
we present in Section~\ref{sec:linprog}: the program
can be tedious to write down since it is based on the structure
of $N$'s automorphism group, and can be prohibitively large.
Another way is to use the following lemma:
\begin{lemma}[\cite{farnqvist:etal:09}]
\label{lem:sandwich}
Let $M \rightarrow H \rightarrow N$. Then, $s(M,H) \geq s(M,N)$ and
$s(H,N) \geq s(M,N)$.
\end{lemma}
It is apparent that in order to use this result effectively, we need a large
selection of graphs $M,N$ that are known to be close to each other with respect
to $s$. For the moment, the set of such examples is quite meagre.
Hence, we set out to investigate how the function $s$ behaves on
certain classes of graphs. In Section~\ref{sec:meas}, we will take a careful
look at 3-colourable circular complete graphs and, amongst other things,
find that $s$ is constant between a large number of these graphs.
Moreover, we will extend bounds on $s$ to other classes of graphs using
known results about homomorphisms to circular complete graphs;
examples include $K_4$-minor-free graphs and graphs with bounded
maximum average degree.
Yet another way of estimating the function $s$ is to relate it
to other graph parameters. In this vein, Section~\ref{sec:cut} is dedicated
to generalising the work of \v{S}\'{a}mal~\cite{samal:05,samal:06}
on fractional covering by cuts to obtain a new family of `chromatic
numbers'. This reveals that $s(M,N)$ and the new chromatic numbers
$\chi_M(N)$ are closely related quantities, which provides us with an
alternative way of computing $s$.
We also use our knowledge about the behaviour of $s$ to
disprove a conjecture by \v{S}\'{a}mal concerning the cubical chromatic
number and, finally, we decide in the positive another conjecture by
\v{S}\'{a}mal concerning the same parameter.
We conclude the paper, in Section~\ref{sec:open}, by discussing
open problems and directions for future research.
To improve readability some proofs are deferred to the appendices.
\section{A Linear Program for $s$}
\label{sec:linprog}
F\"arnqvist et al.~\cite{farnqvist:etal:09} have identified
an alternative expression for $s(M,N)$
which depends on the automorphism group of $N$.
Let $M$ and $N \in {\cal G}$ be graphs and let $A = \mbox{\rm Aut}^*(N)$ be the
(edge) automorphism group of $N$, i.e., $\pi \in A$ acts on $E(N)$
by permuting the edges.
Let $\hat{{\cal W}}(N)$ be the set of
weight functions $\omega \in {\cal W}(N)$ which satisfy
$\sum_{e \in E(N)} \omega(e) = 1$
and for which $\omega(e) = \omega(\pi \cdot e)$ for all $e \in E(N)$ and
$\pi \in \mbox{\rm Aut}^*(N)$. That is, the weight functions in $\hat{{\cal W}}(N)$
are constant over the edges belonging to each orbit of $\mbox{\rm Aut}^*(N)$.
\begin{lemma}[\cite{farnqvist:etal:09}]
\label{lem:auto}
Let $M,N\in {\cal G}$.
Then,
$s(M,N) = \inf_{w \in {\cal {\hat W}}(N)} mc_M(N,w)$.
In particular, when $N$ is edge-transitive,
$s(M,N) = mc_M(N,1/|E(N)|)$.
\end{lemma}
Lemma~\ref{lem:auto} shows that in order to determine $s(M,N)$, it is
sufficient to minimise $mc_M(N,\omega)$ over $\hat{{\cal W}}(N)$, and it follows
that $s(M,N)$ can be computed by solving a linear program.
For $i \in \{1,\ldots,r\}$, let $A_i$ be the orbits
of $\mbox{\rm Aut}^*(N)$ and, for $f : V(N) \rightarrow V(M)$, define
\begin{equation}
f_i = | \{u v \in A_i \;|\; f(u) f(v) \in E(M)\}|.
\end{equation}
That is, $f_i$ is the number of edges in $A_i$ which are mapped to an
edge in $M$ by $f$. The measure of a solution $f$ when $\omega \in
\hat{{\cal W}}(N)$ is equal to $\sum_{i=1}^{r}{\omega_i \cdot f_i}$
where $\omega_i$ is the weight of an edge in $A_i$. Given an $\omega$,
the measure of a solution $f$ depends only on the vector
$(f_1,\ldots,f_r) \in {\mathbb N}^r$. We call this vector the {\em
signature} of $f$. When there is no risk of confusion,
we will let $f$ denote the signature as well.
Since we have seen that the measure of a solution only
depends on its signature the solution space is taken to be the set of
possible signatures
\begin{equation}
F = \{f \in {\mathbb N}^r \,|\, f\mbox{ is a signature of a solution to $(N,\omega)$ of
$mcol{M}$} \}.
\end{equation}
The variables of the linear program are $\omega_1,\ldots,\omega_r$ and
$s$, where $\omega_i$ represents the weight of each element in the orbit
$A_i$ and $s$ is an upper bound on the signatures measure.
\begin{equation*}
\tag{LP}
\label{lp}
\begin{array}{ll}
\min s \\
\sum_i f_i \cdot \omega_i \leq s & \text{for each $(f_1, \ldots, f_r) \in
F$} \\
\sum_i |A_i| \cdot \omega_i = 1 & \ \text{and} \ \omega_i, s \geq 0\\
\end{array}
\end{equation*}
Given a solution $\omega_i,s$ to this program, $\omega(e) = \omega_i$
when $e \in A_i$ is a weight function which minimises $mc_M(G,\omega)$.
The value of this solution is $s = s(M,N)$.
\section{Solutions to (\ref{lp}) for Circular Complete Graphs} \label{sec:meas}
A circular complete graph $K_{p/q}$ is a graph with vertex set
$\{v_0,v_1,\ldots,v_{n-1}\}$ and edge set $E(K_{p/q}) = \{v_i v_j \ | \ q \leq |i-j|
\leq p-q\}$. This can be seen as placing the vertices on a circle and
connecting two vertices by an edge if they are at a distance at least
$q$ from each other. A fundamental property of these graphs is that
$K_{p/q} \rightarrow K_{p'/q'}$ iff $p/q \leq p'/q'$.
Due to this fact, when we write $K_{p/q}$, we will assume that $p$ and $q$
are relatively prime.
We will denote the orbits of the action of $\mbox{\rm Aut}^*(K_{p/q})$ by
$A_c = \{ v_i v_j \in E(K_{p/q}) \;|\; j-i \equiv q+c-1 \text{ (mod $p$)} \}$,
for $c = 1, \ldots, \lceil \frac{p-2q+1}{2} \rceil$.
We finally note that a
homomorphism from a graph $G$ to $K_{p/q}$ is called a (circular)
$(p/q)$-colouring of $G$.
More information on this topic can be gained
from the book by Hell and Ne\v{s}et\v{r}il~\cite{HN04} and from the
survey by Zhu~\cite{zhu:survey}.
In this section we start out by investigating $s(K_r, K_t)$
for rational numbers $2 \leq r < t \leq 3$.
In Section~\ref{ssec:k2}, we fix $r=2$ and choose $t$ so that
$\mbox{\rm Aut}^*(K_{t})$ has few orbits.
We find some interesting properties of these numbers which lead
us look at the case $r = 2+1/k$ in Section~\ref{ssec:odd}.
Our approach is based on relaxing the linear program (\ref{lp})
that was presented in Section~\ref{sec:linprog}, combined with
arguments that our chosen relaxations in fact find the optimum
in the original program.
\subsection{Maps to $K_2$} \label{ssec:k2}
We consider $s(K_2, K_{t})$ for
$t = 2 + n/k$ with $k > n \geq 1$, where $n$ and $k$ are integers.
The number of orbits of $\mbox{\rm Aut}^*(K_{t})$ then equals $\lceil (n+1)/2 \rceil$.
We choose to begin our study of $s(K_2, K_{t})$ using small values of $n$.
When $n = 1$, $K_{2+1/k}$ is isomorphic to the cycle $C_{2k+1}$.
The value of $s(K_2,C_{2k+1}) = 2k/(2k+1)$, for $k \geq 1$
was obtained in~\cite{farnqvist:etal:09}.
Combined with the following result, where we set $t = 2+2/(2k-1) = \frac{4k}{2k-1}$,
this has an immediate and perhaps surprising consequence.
\begin{proposition}
\label{prop:4k+4}
Let $k\geq 1$ be an integer, then $s(K_2,K_{\frac{4k}{2k-1}})=\frac{2k}{2k+1}$.
\end{proposition}
\begin{proof}
Let $V(K_{\frac{4k}{2k-1}})= \{v_0,v_1,\ldots,v_{4k-1}\}$ and $V(K_2) =\{w_0,w_1\}$.
We will present two maps $f, h : V(K_{\frac{4k}{2k-1}}) \rightarrow V(K_2)$.
$f$ sends a vertex $v_i$ to $w_0$ if $0 \leq i < 2k$ and to $w_1$ if
$2k \leq i < 4k$.
It is not hard to see that $f = (4k-2, 2k)$.
The map $h$ sends $v_i$ to $w_0$ if $i$ is even and to $w_1$ if $i$ is odd.
Then, $h$ maps all of $A_1$ to $K_2$ but none of the edges in $A_2$,
so $h = (4k,0)$.
It remains to argue that these two solutions suffice to determine $s$.
But we see that any map $g$ with $g_2 > 0$ must cut at least two edges in
the even cycle $A_1$, leading to $g_1 \leq 4k-2$, thus $g \leq f$,
componentwise.
The proposition now follows by solving the relaxation of (\ref{lp})
using only the two inequalities obtained from $f$ and $h$.
\end{proof}
\begin{corollary}
\label{cor:intervals}
Let $k \geq 1$ and $2 \leq r < \frac{2k+1}{k} \leq t \leq \frac{4k}{2k-1}$.
Then, $s(K_r,K_{t}) = \frac{2k}{2k+1}$.
\end{corollary}
\begin{proof}
Note that we have the chain of homomorphisms
$K_2 \rightarrow K_r \rightarrow K_{\frac{2k+1}{k}} \rightarrow K_t \rightarrow K_{\frac{4k}{2k-1}}$.
By Lemma~\ref{lem:sandwich}, we get $s(K_r,K_{\frac{2k+1}{k}}) \geq s(K_2,K_{\frac{2k+1}{k}}) = \frac{2k}{2k+1}$. But since $K_{\frac{2k+1}{k}} \not\rightarrow K_r$, and $K_{\frac{2k+1}{k}}$ is edge-transitive with $2k+1$ edges, $s(K_r, K_{\frac{2k+1}{k}}) \leq \frac{2k}{2k+1}$ and therefore $s(K_r,K_{\frac{2k+1}{k}}) = \frac{2k}{2k+1}$.
Again by Lemma~\ref{lem:sandwich}, we have
$
\frac{2k}{2k+1} = s(K_r, K_{\frac{2k+1}{k}}) \geq s(K_r, K_{t}) \geq s(K_2, K_{\frac{4k}{2k-1}}) = \frac{2k}{2k+1}.
$
\end{proof}
We find that there are intervals $I_k = \{ t \in \mathbb{Q} \;|\; 2+1/k \leq t \leq 2+2/(2k-1) \}$ where $s(t) = s(K_r, K_{t})$ is constant.
In Figure~\ref{fig:interval} these intervals are shown for the first few values of $k$. The intervals $I_k$ form an infinite sequence with endpoints tending to $2$.
Similar intervals appear throughout the space of circular complete graphs.
More specifically, F\"{a}rnqvist et al.~\cite{farnqvist:etal:09} have shown
that $s(K_n,K_{2m-1}) = s(K_n,K_{2m})$ for arbitrary integers $n, m \geq 2$.
Furthermore, it can be proved that $s(K_2, K_n) = s(K_{8/3}, K_n)$ for $n \geq 3$.
Two applications of Lemma~\ref{lem:sandwich} now shows that $s(K_r, K_{t})$ is
constant on the regions $[2, 8/3] \times J_m$, where
$J_m = \{ t \in \mathbb{Q} \;|\; 2m-1 \leq t \leq 2m \}$.
\begin{figure}
\caption{The space between $2$ and $3$ with the intervals $I_k$ marked for $k = 2, 3, 4$.}
\label{fig:interval}
\end{figure}
As we proceed with determining $s(K_2, K_{t})$ we can now,
thanks to Corollary~\ref{cor:intervals}, disregard those $t$ which fall
inside these constant intervals.
For $t = 2 + 3/k$, we see that if
$k \equiv 0 $ (mod 3), then $r$ is an odd cycle,
and if $k \equiv 2 $ (mod 3), then $t \in I_{k+1}$.
Therefore, we assume that $t$ is of the form
$2+3/(3k+1) = \frac{6k+5}{3k+1}$ for an integer $k \geq 1$.
\begin{proposition} \label{prop:3k+1}
Let $k \geq 1$ be an integer. Then, $s(K_2,K_{\frac{6k+5}{3k+1}})=\frac{6k^2+8k+3}{6k^2+11k+5}=1 - \frac{3k+2}{(k+1)(6k+5)}$.
\end{proposition}
For $t = 2 + 4/k$, we find that we only need to consider the case when $k \equiv 1 \mbox{ (mod 4) }$.
We then have graphs $K_{t}$ with $t = 2+4/(4k+1) = \frac{8k+6}{4k+1}$ for integers $k \geq 1$.
\begin{proposition} \label{prop:4k+1}
Let $k \geq 1$ be an integer. Then, $s(K_2,K_{\frac{8k+6}{4k+1}})=\frac{8k^2+6k+2}{8k^2+10k+3}=1 - \frac{4k+1}{(k+1/2)(8k+6)}$.
\end{proposition}
The expressions for $s$ in Proposition~\ref{prop:3k+1}~and~\ref{prop:4k+1}
have some interesting similarities, but for $n \geq 5$ it becomes harder to
pick out a suitable set of solutions which guarantee that the relaxation has
the same optimum as (\ref{lp}) itself.
Using computer calculations, we have however determined the first
two values ($k = 1, 2$) for the case $t = 2+5/(5k+1)$
and the first value ($k = 1$) for the case $t = 2+6/(6k+1)$.
\begin{equation}
s(K_2, K_{17/6}) = 322/425 \qquad
s(K_2, K_{27/11}) = 5/6 \qquad
s(K_2, K_{20/7}) = 67/89
\end{equation}
\subsection{Maps to Odd Cycles} \label{ssec:odd}
It was seen in Corollary~\ref{cor:intervals} that $s(K_r, K_{t})$ is constant
on the region $(r,t) \in [2,2+1/k) \times I_k$.
In this section, we will study what happens when $t$ remains in $I_k$, but
$r$ is set to $2+1/k$.
A first observation is that the absolute jump of the function
$s(K_r, K_{t})$ when $r$ goes from being less than $2+1/k$
to $r = 2+1/k$ must be largest for $t = 2+2/(2k-1)$.
Let $V(K_{2+2/(2k-1)}) = \{v_0, \ldots, v_{4k-1}\}$ and
$V(K_{2+1/k}) = \{w_0, \ldots, w_{2k}\}$.
The map $f(v_i) = w_i$ with the indices of $w$ taken modulo $2k+1$ has the
signature $f = (4k-1,2k)$.
Since the subgraph induced by the orbit $A_1$ is isomorphic to $C_{4k}$,
any map to an odd cycle must exclude at least one edge from $A_1$.
It follows that $f$ alone determines $s$,
and we can solve (\ref{lp}) to obtain $s(K_{2+1/k}, K_{2+2/(2k-1)}) = (4k-1)/4k$.
Thus, for $r < 2+1/k$, we have
\begin{equation}
\label{eq:jump}
s(K_{2+1/k}, K_{2+2/(2k-1)}) - s(K_r, K_{2+2/(2k-1)}) = (2k-1)/4k(2k+1)
\end{equation}
Smaller $t \in I_k$ can be expressed as $t = 2 + 1/(k-x)$,
where $0 \leq x < 1/2$.
We will write $x = m/n$ for positive integers $m$ and $n$ which
implies the form $t = 2 + n/(kn-m)$, with $m < n/2$.
For $m = 1$, it turns out to be sufficient to keep two inequalities
from (\ref{lp}) to get an optimal value of $s$.
From this we get the following result:
\begin{proposition} \label{prop:m1}
Let $k,n \geq 2$ be integers. Then, $s(C_{2k+1},K_{\frac{2(kn-1)+n}{kn-1}})=\frac{(2(kn-1)+n)(4k-1)}{(2(kn-1)+n)(4k-1)+4k-2}$.
\end{proposition}
There is still a non-zero jump of $s(K_r, K_{t})$ when we move from
$K_r < 2+1/k$ to $K_r = 2+1/k$, but it is obviously smaller than that
of (\ref{eq:jump}) and tends to 0 as $n$ increases.
For $m = 2$, we have $2(kn-m)+n$ and $kn-m$ relatively prime only when $n$
is odd.
In this case, it turns out that we need to include an increasing number of
inequalities to obtain a good relaxation.
Furthermore, we are not able to ensure that the obtained value is the
optimum of the original (\ref{lp}).
We will therefore have to settle for a lower bound for $s$.
Explicit calculations have shown that, for small values of $k$ and $n$,
equality holds in Proposition~\ref{th:qisfunny}.
We conjecture this to be true in general.
\begin{proposition} \label{th:qisfunny}
Let $k \geq 2$ be an integer and $n \geq 3$ be an odd integer. Then,
\begin{equation}
s(C_{2k+1},K_{\frac{2(kn-2)+n}{kn-2}}) \geq \frac{(2(kn-2)+n)(\xi_n(4k-1)+(2k-1))}{(2(kn-2)+n)(\xi_n(4k-1)+(2k-1))+(4k-2)(1-\xi_n)},
\end{equation}
where
$\xi_n =
\left(\alpha_1^{(n-1)/2} + \alpha_2^{(n-1)/2}\right)/4,$
and $\alpha_1, \alpha_2$ are the reciprocals of the roots of
$\frac{2k-3}{4k-2} z^2 - 2z + 1$.
\end{proposition}
\subsection{Extending the Results} \label{sec:apply}
We will now take a look at one possible way of extending the results in
the previous sections. To do this, we need to find graphs or classes of
graphs we can homomorphically sandwich between graphs with known
$s$ value. Clearly, $K_2$ has a homomorphism to all non-empty
graphs, and that if a graph $G$ has circular chromatic number $\chi_c(G)
\leq r$ it has a homomorphism to $K_r$. These facts, together with
Lemma~\ref{lem:sandwich}, combine into the following easily proved
lemma:
\begin{lemma}
\label{lem:circlesandwich}
Let $G$ be a non-empty graph with $\chi_c(G) \leq r$. Then, $s(K_2,G)
\geq s(K_2,K_r)$.
If, additionally, $G$ has odd girth no greater than $2k+1$,
then $s(C_{2k+1},G) \geq s(C_{2k+1},K_r)$.
\end{lemma}
We can now make use of known results about bounds on the circular
chromatic number for certain classes of graphs. Much of the extensive
study conducted in this direction was instigated by the restriction of a
conjecture by Jaeger~\cite{jaeger:88} to planar graphs, which is
equivalent to the claim that every planar graph of girth at least $4k$
has a circular chromatic number at most $2 + 1/k$, for $k \geq 2$. The
case $k=1$ is Gr\"{o}tzsch's theorem; that every triangle-free planar
graph is 3-colourable. Currently, the best proven girth for when the
circular chromatic number of a planar graph is guaranteed to be at most
$2+1/k$ is $\frac{20k-2}{3}$ and due to Borodin et
al.~\cite{Borodin:etal:jctb2004}. This result was used by F\"{a}rnqvist
et al.\ to achieve the bound $s(K_2,G) \leq \frac{4k}{4k+1}$ for planar
graphs $G$ of girth at least $(40k-2)/3$. Here, we significantly improve
this bound by considering $K_4$-minor-free graphs, for which Pan and
Zhu~\cite{pan:zhu:02} have shown how their circular chromatic number is
upper-bounded by their odd girth.
\begin{proposition} \label{thmI}
Let $G$ be a $K_4$-minor-free graph, and $ k \geq 1$ an integer. If $G$
has an odd girth of at least $6k-1$, then $s(K_2,G) \leq
\frac{4k}{4k+1}$. If $G$ has an odd girth of at least $6k+3$, then
$s(K_2,G) \leq \frac{4k+2}{4k+3}$.
\end{proposition}
Of course, it is a big limitation to only consider $K_4$-minor-free
graphs. Almost all work on the circular chromatic number for planar
graphs have focused on finding limits when $\chi_c(G) \leq 2 + 1/k$,
that is, when there exists a homomorphism to the odd cycle $C_{2k+1}$.
However, Corollary~\ref{cor:intervals} implies that for two graphs
$G$ and $H$, if $\chi_c(G) = 2 + 1/k$ and $\chi_c(H) = 2+ 2/(2k-1)$ then
$s(K_2,G)=s(K_2,H)$, so for our purposes it would be interesting to have
more results when $\chi_c(G) \leq 2+ 2/(2k-1)$.
For general graphs, we can use results from Raspaud and
Roussel~\cite{raspaud:rousell:07} relating the circular chromatic number
of graphs to their maximum average degree. Specifically, they show that
for a general graph $G$ of girth at least 12, 11, or 10, its circular
chromatic number is bounded from above by $8/3$, $11/4$, and $14/5$,
respectively, which translates into corresponding upper bounds $4/5$,
$17/22$, and $16/21$ on $s(K_2,G)$ (using Propositions \ref{prop:4k+4},
\ref{prop:3k+1}, \ref{prop:4k+1} and Lemma~\ref{lem:circlesandwich}).
\section{Fractional Covering by $H$-cuts} \label{sec:cut}
In the following, we slightly generalise the work of
\v{S}\'{a}mal~\cite{samal:05,samal:06} on fractional
covering by cuts to obtain a complete correspondence between $s(H,G)$
and a family of `chromatic numbers' $\chi_H(G)$ which generalise
\v{S}\'{a}mal's cubical chromatic number $\chi_q(G)$.
The latter corresponds to the case when $H = K_2$.
First, we recall the notion of a {\em fractional colouring} of a
(hyper-) graph.
Let $G$ be a (hyper-) graph with vertex set $V(G)$ and edge set
$E(G) \subseteq \mathcal{P}(V(G)) \setminus \{ \varnothing \}$.
A subset $I$ of $V(G)$ is called independent in $G$ if no edge
$e \in E(G)$ is a subset of $I$.
Let $\mathcal{I}$ denote the set of all independent sets of $G$
and for a vertex $v \in V(G)$, let $\mathcal{I}$
denote all independent sets which contain $v$.
Then, the fractional chromatic number $\chi_f(G)$ of $G$ is given by the
linear program:
\begin{equation}
\begin{array}{ll}
\text{Minimise} & \sum_{I \in \mathcal{I}} f(I) \\
\text{subject to} & \sum_{I \in \mathcal{I}(v)} f(I) \geq 1 \qquad \text{for all $v \in V(G)$}, \\
\text{where} & f : {\mathcal I} \rightarrow \mathbb{R}^+.
\end{array}
\end{equation}
The definition of fractional covering by cuts mimics fractional colouring,
but replaces vertices with edges and independent sets with certain cut
sets of the edges.
Let $G$ and $H$ be undirected simple graphs and $f$ be an arbitrary
vertex map from $G$ to $H$.
The map $f$ induces a partial map
from $E(G)$ to $E(H)$ and we will call the preimage of this map an {\em $H$-cut} in $G$.
When $H$ is a complete graph $K_k$, this is precisely the notion of a
{\em $k$-cut}.
Let $\mathcal{C}$ denote the set of $H$-cuts in $G$ and for an edge
$e \in E(G)$, let $\mathcal{C}(e)$ denote all $H$-cuts which
contain $e$.
The following definition is the generalisation of
{\em cut $n/k$-covers}~\cite{samal:06} to arbitrary $H$-cuts:
\begin{definition}
An {\em $H$-cut $n/k$-cover} of $G$ is a collection $X_1, \ldots, X_N$ of
$H$-cuts in $G$ such that every edge of $G$ is in at least $k$ of them.
The graph parameter $\chi_H$ is defined as:
\begin{equation}
\chi_H(G) = \inf
\{ \frac{n}{k} \,|\, \text{there exists an $H$-cut $n/k$-cover of $G$.} \}
\end{equation}
\end{definition}
By reasoning analogous to that of \v{S}\'{a}mal~\cite{samal:06} Lemma~5.1.3, $\chi_H$ is also
given by the following linear program:
\begin{equation} \label{coverprimal}
\begin{array}{ll}
\text{Minimise} & \sum_{X \in \mathcal{C}} f(X) \\
\text{subject to} & \sum_{X \in \mathcal{C}(e)} f(X) \geq 1 \qquad \text{for all $e \in E(G)$}, \\
\text{where} & f : {\mathcal C} \rightarrow \mathbb{R}^+.
\end{array}
\end{equation}
For $H = K_2$, an alternative definition of $\chi_H(G) = \chi_q(G)$ was
obtained in~\cite{samal:06} by taking the infimum
(actually minimum due to the formulation in (\ref{coverprimal}))
over $n/k$ for $n$ and $k$ such that $G \rightarrow Q_{n/k}$.
Here, $Q_{n/k}$ is the graph on vertex set $\{0,1\}^n$ with
an edge $u v$ if $d_H(u,v) \geq k$, where $d_H$ denotes the
Hamming distance.
We generalise this family as well to produce a scale for each $\chi_H$.
Namely, let $H^n_k$ be the graph on vertex set $V(H)^n$ and an edge
between $(u_1, \ldots, u_n)$ and $(v_1, \ldots, v_n)$ when
$|\{ i \,|\, (u_i, v_i) \in E(H) \}| \geq k$.
A moments thought shows that we can express $\chi_H$ as:
\begin{equation}
\chi_H(G) = \inf \{ \frac{n}{k} \,|\, G \rightarrow H^n_{k} \}.
\end{equation}
\v{S}\'{a}mal also notes that $\chi_q(G)$ is given by the fractional chromatic number
of a certain hypergraph associated to $G$.
For the general case, let $G'$ be the hypergraph obtained from $G$ by taking
$V(G') = E(G)$ and letting $E(G')$ be the set of minimal subgraphs
$S \subseteq G$ such that $S \not\rightarrow H$.
A short argument shows that indeed $\chi_f(G') = \chi_H(G)$.
Finally, we can work out the correspondence to $s(H,G)$.
Consider the dual program of (\ref{coverprimal}):
\begin{equation} \label{coverdual}
\begin{array}{ll}
\text{Maximise} & \sum_{e \in E(G)} g(e) \\
\text{subject to} & \sum_{e \in X} g(e) \leq 1 \qquad \text{for all $H$-cuts $X \in \mathcal{C}$}, \\
\text{where} & g : E(G) \rightarrow \mathbb{R}^+.
\end{array}
\end{equation}
Let $s = \sum_{e \in E(G)} g(e)$ and make the substitution $g' = g/s$ in
(\ref{coverdual}).
Comparing with (\ref{lp}), we have
\begin{equation}
\chi_H(G) = 1/s(H,G).
\end{equation}
We now move on to address two conjectures by \v{S}\'{a}mal~\cite{samal:06} on the cubical
chromatic number $\chi_q = \chi_{K_2}$.
In Section~\ref{sec:neg} we discuss an upper bound on $s$ which relates to
the first conjecture, Conjecture~5.5.3~\cite{samal:06}.
This is the suspicion
that $\chi_q(G)$ can be determined by measuring the maximum cut over
all subgraphs of $G$.
We show that this is false by providing a counterexample from
Section~\ref{ssec:k2}.
We then consider Conjecture~5.4.2~\cite{samal:06},
concerning ``measuring the scale'', i.e.,
determining $\chi_q$ for the graphs $Q_{n/k}$ themselves.
We prove that this conjecture is true,
and state it as Proposition~\ref{prop:samal} in Section~\ref{sec:pos}.
\subsection{An Upper Bound on $s$} \label{sec:neg}
In Section~\ref{sec:meas} we obtained lower bounds on $s$ by relaxing the
linear program (\ref{lp}).
In most cases, the corresponding solution was proven feasible for the
original (\ref{lp}), and hence optimal.
Now, we take a look at the only known source of upper bounds for $s$.
Let $G, H \in \mathcal{G}$, with $G \rightarrow H$ and take an arbitrary
$S$ such that $G \rightarrow S \rightarrow H$.
Then, applying Lemma~\ref{lem:sandwich} followed by Lemma~\ref{lem:auto}
gives
\begin{equation} \label{eq:ub}
s(G,H) \leq s(G,S) = \inf_{w \in {\cal {\hat W}}(S)} mc_G(S,w)
\leq mc_{G}(S, 1/|E(S)|).
\end{equation}
When $G = K_2$ it follows that
\begin{equation} \label{eq:samub}
s(K_2,H) \leq \min_{S \subseteq G} b(S),
\end{equation}
where $b(S)$ denotes the bipartite density of $S$.
\v{S}\'{a}mal~\cite{samal:06} conjectured that
this inequality, expressed on the form
$\chi_q(S) \geq 1/(\min_{S \subseteq G} b(S))$,
can be replaced by an equality.
We answer this in the negative, using $K_{11/4}$ as our counterexample.
Lemma~\ref{prop:3k+1} with $k = 1$ gives $s(K_2,K_{11/4}) = 17/22$.
If $s(K_2,K_{11/4}) = b(S)$ for some $S \subseteq K_{11/4}$ it means
that $S$ must have at least $22$ edges.
Since $K_{11/4}$ has exactly $22$ edges, then $S = K_{11/4}$.
However, a cut in a cycle must contain an even number of edges.
Since the edges of $K_{11/4}$ can be partitioned into two cycles,
we have that the maximum cut in $K_{11/4}$ must be of even size,
hence $|E(K_{11/4})| \cdot b(K_{11/4}) \neq 17$.
This is a contradiction.
\subsection{Confirmation of a Scale} \label{sec:pos}
As a part of his investigation of $\chi_q$, \v{S}\'{a}mal~\cite{samal:06}
set out to determine the value of $\chi_q(Q_{n/k})$.
We complete the proof of his Conjecture~5.4.2~\cite{samal:06}
to obtain the following result.
\begin{proposition} \label{prop:samal}
Let $k, n$ be integers such that $k \leq n < 2k$.
Then,
$\chi_q(Q_{n/k}) =
n/k$ if $k$ is even and
$(n+1)/(k+1)$ if $k$ is odd.
\end{proposition}
\v{S}\'{a}mal provides the upper bound and an approach to the lower bound
using the largest eigenvalue of the Laplacian of a subgraph of $Q_{n/k}$.
The computation of this eigenvalue
boils down to an inequality (Conjecture~5.4.6~\cite{samal:06})
involving some binomial coefficients.
We first introduce the necessary notation and then prove the remaining
inequality in Lemma~\ref{lem:ineq}, whose second part, for odd $k$,
corresponds to one of the formulations of the conjecture.
Proposition~\ref{prop:samal} then follows from
Theorem~5.4.7~\cite{samal:06} conditioned on the result of this lemma.
Let $k, n$ be positive integers such that $k \leq n$, and let
$x$ be an integer such that $1 \leq x \leq n$.
For $k \leq n < 2k$, let $S_o(n,k,x)$ denote the set of all $k$-subsets of $\{1, \ldots, n\}$ that have an odd number of elements in common with the set $\{n-x+1, \ldots, n\}$.
Define $S_e(n,k,x)$ analogously as the $k$-subsets with an even number of common elements.
Let $N_o(n,k,x) = |S_o(n,k,x)|$ and $N_e(n,k,x) = |S_e(n,k,x)|$. Then,
\begin{equation}
N_o(n,k,x) = \sum_{odd \, t} \binom{x}{t} \binom{n-x}{k-t}, \quad
N_e(n,k,x) = \sum_{even \, t} \binom{x}{t} \binom{n-x}{k-t}.
\end{equation}
When $x$ is odd, the function $f : S_o(2k,k,x) \rightarrow S_e(2k,k,x)$
given by the complement $f(\sigma) = \{1, \ldots, n\} \setminus \sigma$
is a bijection.
Since $N_o(n,k,x)+N_e(n,k,x) = \binom{n}{k}$, we have
\begin{equation} \label{eqn:1}
N_o(2k,k,x) = N_e(2k,k,x) = \frac{1}{2} \binom{2k}{k}.
\end{equation}
\begin{lemma} \label{lem:help}
Let $1 \leq x < n = 2k-1$ with $x$ odd. Then,
$N_e(n,k,x) = N_e(n,k,x+1)$ and $N_o(n,k,x) = N_o(n,k,x+1)$.
\end{lemma}
\begin{proof}
First, partition $S_e(n,k,x)$ into $A_1 = \{ \sigma \in S_e(n,k,x) \,|\, n-x \not\in \sigma \}$ and $A_2 = S_e(n,k,x) \setminus A_1$.
Similarly, partition $S_e(n,k,x+1)$ into $B_1 = \{ \sigma \in S_e(n,k,x+1) \,|\, n-x \not\in \sigma \}$ and $B_2 = S_e(n,k,x+1) \setminus B_1$.
Note that $A_1 = B_1$.
We argue that $|A_2| = |B_2|$.
To prove this, define the function $f : \mathcal{P}(\{1,\ldots,n\}) \rightarrow \mathcal{P}(\{1,\ldots,n-1\})$ by
$f(\sigma) = (\sigma \cap \{1, \ldots, n-x-1\}) \cup \{ s-1 \,|\; s \in \sigma, s > n-x \},$
i.e., $f$ acts on $\sigma$ by ignoring the element $n-x$ and renumbering
subsequent elements so that the image is a subset of $\{1, \ldots, n-1\}$.
Note that $f(A_2) = S_e(2k-2,k-1,x)$ and $f(B_2) = S_o(2k-2,k-1,x)$.
Since $x$ is odd, it follows from (\ref{eqn:1}) that
$|f(A_2)| = |f(B_2)|$.
The first part of the lemma now follows from the injectivity of
the restrictions $f|_{A_2}$ and $f|_{B_2}$.
The second equality is proved similarly.
\end{proof}
\begin{lemma} \label{lem:ineq}
Choose $k, n$ and $x$ so that $k \leq n < 2k$ and $1 \leq x \leq n$.
For odd $k$,
\begin{equation}
N_e(n,k,x) \leq \binom{n-1}{k-1} \quad \text{and for even $k$,} \quad N_o(n,k,x) \leq \binom{n-1}{k-1}.
\end{equation}
\end{lemma}
\begin{proof}
We will proceed by induction over $n$ and $x$.
The base cases are given by $x = 1$, $x = n$, and $n = k$.
For $x = 1$,
$N_o(n,k,x) = \binom{n-1}{k-1}$ and $N_e(n,k,x) = \binom{n-1}{k} \leq \binom{n-1}{k-1}$,
where the inequality holds for all $n < 2k$.
For $x = n$ and odd $k$, we have $N_e(n,k,x) = 0$, and for
even $k$, we have $N_o(n,k,x) = 0$.
For $n = k$,
$N_e(n,k,x) = 1-N_o(n,k,x) = 1$ if $x$ is even and 0 otherwise.
Let $1 < x < n$ and consider $N_e(n,k,x)$ for odd $k$ and $k < n < 2k-1$.
Partition the sets $\sigma \in S_e(n,k,x)$ into those for which
$n \in \sigma$ on the one hand and those for which
$n \not\in \sigma$ on the other hand.
These parts contain $N_o(n-1,k-1,x-1)$ and $N_e(n-1,k,x-1)$ sets,
respectively.
Since $k-1$ is even, and since $k \leq n-1 < 2(k-1)$ when $k < n < 2k-1$,
it follows from the induction hypothesis that
$N_e(n,k,x) =
N_o(n-1,k-1,x-1) + N_e(n-1,k,x-1) \leq
\binom{n-2}{k-2} + \binom{n-2}{k-1} = \binom{n-1}{k-1}.$
The case for $N_o(n,k,x)$ and even $k$ is treated identically.
Finally, let $n = 2k-1$.
If $x$ is odd, then Lemma~\ref{lem:help} is applicable,
so we can assume that $x$ is even.
Now, as before
$N_e(2k-1,k,x) =
N_o(2k-2,k-1,x-1) + N_e(2k-2,k,x-1) \leq
\frac{1}{2} \binom{2k-2}{k-1} + \binom{2k-3}{k-1} = \binom{n-1}{k-1},$
where the first term is evaluated using (\ref{eqn:1}).
The same inequality can be shown for $N_o(2k-1,k,x)$
and even $k$,
which completes the proof.
\end{proof}
\section{Conclusions and Open Problems}
\label{sec:open}
We have seen that for all integers $k \geq 2$, $s(K_2,K_t)$ is constant on $I_k$.
It follows that our sandwich approach using Lemma~\ref{lem:sandwich} with $M = K_2$
and $N = K_r$ can not distinguish between the class
of graphs with circular chromatic number $2+1/k$ and the (larger) class with
circular chromatic number $2+2/(2k-1)$.
As previously noted, Jaeger's conjecture and subsequent research
has provided partial information on the members of the former class.
We remark that Jaeger's conjecture implies a weaker statement in our
setting. Namely, if $G$ is a planar graph with girth greater than $4k$,
then $G \rightarrow C_k$ implies $s(K_2, G) \geq s(K_2,C_k) =
2k/(2k+1)$. Deciding this to be true would certainly provide support for
the original conjecture, and would be an interesting result in its
own right.
Our starting observation shows that the slightly weaker condition
$G \rightarrow K_{2+2/(2k-1)}$ implies the same result.
When it comes to completely understanding how $s$ behaves on circular complete graphs, even
restricted to those between $K_2$ and $K_3$, there is still work to be done.
For edge-transitive graphs $K_t$, in our case the cycles and the complete graphs,
it is not surprising
that the expression $s(K_r, K_t)$ assumes a finite number of values seen as a function of $r$.
Indeed, Lemma~\ref{lem:auto} says that $s(K_r, K_t) = mc_{K_r}(K_t, 1/|E(K_t)|)$ which
leaves at most $|E(K_t)|$ values for $s$.
This produces a number of constant intervals which are partly
responsible for the constant regions of Corollary~\ref{cor:intervals} and the discussion
following it.
More surprising are the constant intervals that arise from
$s(K_r,K_{2+2/(2k-1)})$.
They give some hope that the behaviour of $s$ is possible to characterise more generally.
One direction could be to identify additional constant regions,
perhaps showing that they completely tile the entire space?
In Section~\ref{sec:cut} we generalised the notion of covering by cuts
due to \v{S}\'{a}mal.
By doing this, we have found a different interpretation of the $s$-numbers
as an entire family of `chromatic numbers'.
It is our belief that these alternate viewpoints can benefit from each other.
The refuted conjecture in Section~\ref{sec:neg} is an immediate example of
this.
On the other hand, it would be interesting to determine
when the generalised upper bound in (\ref{eq:ub}) is tight.
For $H = K_2$, the proof of Proposition~\ref{prop:samal} is precisely such a result
for the graphs $Q_{n/k}$,
which is evident from studying the proof of Theorem 5.4.7~\cite{samal:06}.
Following this, a natural step would be to calculate $\chi_H(H_k^n)$ for
more general graphs $H$, starting with $H = K_3$.
It is fairly obvious that $mcol{H}$ is a special case of the
{\em maximum constraint satisfaction} ({\sc Max CSP}) problem;
in this problem, one is given a finite collection of constraints on overlapping
sets of variables, and the goal is to assign values from a given domain to the
variables so as to maximise the number of satisfied constraints.
By letting $\Gamma$ be a finite set of relations, we can
parameterise {\sc Max CSP} with $\Gamma$ ({\sc Max CSP}$(\Gamma)$) so that
the only allowed constraints are those constructed from the relations in $\Gamma$.
By viewing a graph $H$ as a binary relation, the problems {\sc Max CSP}$(\{H\})$
and $mcol{H}$ are virtually identical.
Raghavendra~\cite{raghavendra:08} has presented
an algorithm for {\sc Max CSP}$(\Gamma)$ based on semi-definite programming.
Under the so-called {\em unique games conjecture}, this algorithm
optimally approximates {\sc Max CSP}$(\Gamma)$ in polynomial-time, i.e. no
other polynomial-time algorithm can approximate the problem substantially better.
However, it is notoriously difficult to find out exactly how well the
algorithm approximates {\sc Max CSP}$(\Gamma)$ for a given $\Gamma$.
It seems plausible that
the function $s$ can be extended into a function $s'$ from pairs of sets
of relations to ${\mathbb Q}^+$, and that $s'$ can be used for studying
the approximability of {\sc Max CSP} by extending the approach in
F\"arnqvist~et al.~\cite{farnqvist:etal:09}. This would constitute a novel method for
studying the approximability of {\sc Max CSP} --- a method that, hopefully, may
cast some new light on the performance of Raghavendra's algorithm.
\appendix
\begin{center}
{\bf APPENDIX}
\end{center}
\noindent
Let $0 < q \leq p$ be positive integers. We often assign names to the vertices, so that $V(K_{p/q}) = \{v_0,v_1,\ldots,v_{p-1}\}$. Then, we have $E(K_{p/q}) = \{v_i v_j \; | \; q \leq |i-j| \leq p-q\}$. Note that $K_{p/q}$ does not have any edges unless $p \geq 2q$, since the circular distance between two vertices is as most $p/2$.
For a fixed $p$, let $\delta(v_i,v_j) = j - i$ (mod $p$). $\delta(v_i,v_j)$ is then the directed circular distance (in positive direction) between $v_i$ and $v_j$. Furthermore let $\bar{\delta}(v_i,v_j) = \min{\{\delta(v_i,v_j), \delta(v_j,v_i)\}}$. This is then the undirected circular distance. We do index arithmetics for circular complete graphs modulo $p$, e.g.
$v_{-1}=v_{p-1}$. Even though $K_{2k+1/k}$ is isomorphic to $C_{2k+1}$, we distinguish them by letting $v_i v_j$ be an edge in $C_{2k+1}$ if $\bar{\delta}_{2k+1}(v_i,v_j)=1$, while $v_i v_j$ is an edge in $K_{2k+1/k}$ if $\bar{\delta}_{2k+1}(v_i,v_j)=k$.
Let $M$ and $N$ be graphs and
let $F$ be a set of signatures to $(N,\omega)$ of $mcol{M}$.
If $F' \subseteq F$ is a subset for which the relaxation of (\ref{lp})
has the same optimal solution as the original program,
we will call $F'$ a \emph{complete} set of signatures with respect to
$(N,\omega)$ of $mcol{M}$.
\section{Proofs of Results from Section~\ref{ssec:k2}}
\subsection*{Proposition~\ref{prop:3k+1}}
\begin{proof}
Let $V(K_{\frac{6k+5}{3k+1}}) = \{v_0,v_1,\ldots,v_{6k+4}\}$ and $V(K_2) = \{w_0, w_1\}$.
Let $f$ be the solution with $f(v_i) = w_0$ if $0 \leq i < 3k+3$ and $f(v_i) = w_1$
if $3k+3 \leq i < 6k+5$.
From $A_1$ only the edges $v_0 v_{3k+1}$, $v_1 v_{3k+2}$ and $v_{3k+3} v_{6k+4}$ are mapped to a single vertex in $K_2$, so $f_1=6k+2$. From $A_2$ only the edge $v_0 v_{3k+2}$ is mapped to a single vertex in $K_2$, so $f_2=6k+4$.
Thus, $f$ has the signature $f = (6k+2, 6k+4)$.
Note that since $6k+5$ and $3k+2$ are relatively prime, the edges of $A_1$,
as well as $A_2$, form cylces of length $6k+5$.
Therefore, any solution which maps more than $6k+2$ edges from $A_1$ to $K_2$
must map exactly $6k+4$.
Let $g$ be such a solution. We will show that $g_2 = 2k+2$.
We may assume that $v_{3k+1} v_0$ is the edge in $A_1$ which is not
mapped to $K_2$ by $g$.
Note that, if $i \neq 3k+1, 6k+2$, then $v_i v_{i+3k+4}$ and $v_{i+3k+4} v_{i+3}$
are both mapped to $K_2$ by $g$ which implies that $g(v_i) = g(v_{i+3})$.
Now, let $v_l v_{l+3(k+1)}$ be an edge in $A_2$ and let
$S = \{l, l+3, \ldots, l+3k\}$.
Then, this edge is mapped to $K_2$ by $g$, i.e., $g(v_l) \neq g(v_{l+3(k+1)})$
if and only if
$\{3k+1, 6k+2\} \cap S \neq \varnothing$.
Since $v_{3k+1}$ and $v_{6k+2}$ are adjacent in $A_1$, they can not
both be in $S$.
Therefore, there are $2 \cdot |S| = 2(k+1)$ edges that are mapped
to $K_2$ by $g$, so $g = (6k+4, 2k+2)$.
We conclude that solving (\ref{lp}) with the inequalities obtained from
$f$ and $g$ yields the correct value of $s$.
\end{proof}
\subsection*{Proposition~\ref{prop:4k+1}}
\begin{proof}
Let $V(K_{\frac{8k+6}{4k+1}}) = \{v_0,v_1,\ldots,v_{8k+5}\}$ and $V(K_2) = \{w_0,w_1\}$.
Define $f$ by $h(v_i) = w_0$ if $0 \leq i < 4k+3$ and $f(v_i) = w_1$ if $4k+3 \leq i < 8k+6$.
Here, the edges $v_0 v_{4k+1},v_1 v_{4k+2},v_{4k+3} v_{8k+4}$ and
$v_{4k+4} v_{8k+5}$ in $A_1$ are mapped to a single vertex in $K_2$ by $f$
From $A_2$, $f$ maps edges $v_0 v_{4k+2}$ and $v_{4k+3} v_{8k+5}$ to a single
vertex in $K_2$.
Finally, $f$ maps all edges in $A_3$ to the edge in $K_2$.
The signature of this solution is $f=(8k+2,8k+4,4k+3)$.
Let $g$ be defined by
\begin{multline*}
g(v_0)=g(v_4)=\cdots=g(v_{8k+4})=g(v_2)=\cdots \\
=g(v_{4k-2})=g(v_{8k+3})=g(v_1)=\cdots=g(v_{4k-3})=w_0
\end{multline*}
and
\begin{multline*}
g(v_{4k+1})=g(v_{4k+5})=\cdots=g(v_{8k+5})=g(v_3)=\cdots \\
=g(v_{8k-1})=g(v_{4k+2})=g(v_{4k+6})=\cdots=g(v_{8k+2})=w_1.
\end{multline*}
From $A_1$ only the edges $v_{4k+1} v_{8k+2}$ and $v_{8k+3} v_{4k-2}$ are mapped to
a single vertex in $K_2$. From $A_2$ we partition the edges which are mapped to
the edge in $K_2$ by $g$ into four sets, with $k+1$ edges in each set. These are
\[
\{ v_0 v_{4k+2},v_4 v_{4k+6},\ldots,v_{4k} v_{8k+2} \},
\]
\[
\{ v_{4k+2} v_{8k+4}, v_{4k+6} v_2,\ldots,v_{8k+2} v_{4k-2} \},
\]
\[
\{ v_{4k+1} v_{8k+3},v_{4k+5} v_1,\ldots,v_{8k+1} v_{4k-3} \},
\]
\[
\{ v_{8k+3} v_{4k-1}, v_1 v_{4k+3},\ldots,v_{4k-3} v_{8k-1} \}.
\]
Finally, for $A_3$, $g$ maps the $k$ edges
$v_0 v_{4k+3},v_4 v_{4k+7},\ldots,v_{4k-4} v_{8k-1}$ as well as the $k+1$ edges
$v_{4k+1} v_{8k+4},v_{4k+5} v_2,\ldots,v_{8k+1} v_{4k-2}$ to the edge in $K_2$.
In summary, $g=(8k+4,4k+4,2k+1)$.
The relaxation of (\ref{lp}) corresponding to the two solutions $f$ and $g$ has
the following solution:
\[
s=\frac{8k^2+6k+2}{8k^2+10k+3}, \quad \omega_1=\frac{k}{8k^2+10k+3}, \quad \omega_2=\frac{1}{2(8k^2+10k+3)}, \quad \omega_3=0.
\]
We will now show that $s, \omega_1, \omega_2$ and $\omega_3$ is feasible in
the original program.
We will show that for all solutions $h$, we must have $h_2 \leq 8k+4$.
We will also show that if $h$ is such that $h_1 = 8k+4$, then
$h_2 \leq 4k+4$.
Finally, we will show that if $h_1 = 8k+6$, then $h_2$ must be 0.
In the final case, we note that $\omega_1 \cdot h_1 + \omega_2 \cdot h_2 < s$.
The edges of $A_2$ connects vertices at a distance of $4k+2$.
Since we have a common factor $2$ in $4k+2$ and $8k+6$,
the edges of $A_2$ consists of two odd cycles, each of length $4k+3$.
Since a cut of a cycle must include an even number of edges,
we can then at most have a solution that maps $8k+4$ edges to $K_2$.
For the second case,
note that $v_{i+4k+2}=v_{i+(2k+2)(4k+1)}$.
This means that the shortest path between $v_i$ and $v_{i+4k+2}$ in $A_1$
is of length $2k+2$. The edge $v_i v_{i+4k+2}$ is mapped to $K_2$
if and only if at least one edge in each of the paths from $v_i$ to $v_{i+4k+2}$ in
$A_1$ is not mapped to $K_2$, since they are both of even length.
If a solution $h$ has $h_1=8k+4$, only two edges from $A_1$ are
not mapped to $K_2$.
Therefore no more than $4k+4$ paths of length $2k+2$ can include
at least one of these two edges, hence $h_2 \leq 4k+4$.
Finally, if a solution $h$ includes an edge from $A_2$ it means that
$h(v_i) \neq h(v_{i+4k+2})$ for some $i$.
But since both paths from $v_i$ to $v_{i+4k+2}$ in $A_1$ are of even length,
not all edges from $A_1$ can be mapped to $K_2$.
So if $h_2 > 0$, then $h_1 < 8k+6$.
\end{proof}
\section{Proof of Proposition~\ref{prop:m1}}
\label{app:propm1proof}
The proof of Proposition~\ref{prop:m1} follows from Lemma~\ref{lem:solalpha} and~\ref{lem:solbeta} introduced and proved in this section.
\subsection*{Proposition~\ref{prop:m1}}
\begin{proof}
Let $p = 2(kn-1)+n$.
From Lemma~\ref{lem:solalpha}, we get a solution $f$, with
\begin{equation}
f = (\alpha \cdot |A_1|, |A_2|, \ldots, |A_{\lceil \frac{n+1}{2} \rceil}|),
\end{equation}
where $\alpha = 1-1/p$.
From Lemma~\ref{lem:solbeta}, we get another solution $f'$, with
\begin{equation}
f' = (|A_1|, \beta \cdot |A_2|, \ldots, \beta \cdot |A_{\lceil \frac{n+1}{2} \rceil}|),
\end{equation}
where $\beta = 1 - 2(2k-1)/p$.
The last constraint in (\ref{lp}) can be written as
\begin{equation}
\label{eq:isol1}
\sum_{i \neq 1} \omega_k \cdot |A_i| = 1 - \omega_1 \cdot |A_1|.
\end{equation}
We now insert (\ref{eq:isol1}) into the inequalities obtained from $f$ and
$f'$ to get the following relaxation of (\ref{lp}):
\begin{equation}
\label{eq:prog}
\begin{array}{l}
\omega_1 \cdot |A_1| \cdot (\alpha-1) + 1 \leq s \\
\omega_1 \cdot |A_1| \cdot (1-\beta) + \beta \leq s. \\
\end{array}
\end{equation}
The solution to this is $\frac{1-\alpha\beta}{2-\alpha-\beta}$,
which yields the $s$-value in the proposition.
To show that this is optimal for the original program,
let us consider the restriction of (\ref{lp}) in which we force
$\omega_i = 0$ for $i = 3, \ldots, \lceil \frac{n+1}{2} \rceil$.
Due to the second part of Lemma~\ref{lem:solbeta}, it suffices to
keep the two inequalities from $f$ and $f'$ in the program.
The equality constraint can now be written as
\begin{equation}
\label{eq:isol2}
\omega_2 \cdot |A_2| = 1 - \omega_1 \cdot |A_1|.
\end{equation}
By inserting (\ref{eq:isol2}) into the two remaining
inequalities we again obtain (\ref{eq:prog}).
Thus, the solution to the relaxation gives the right value for $s$.
\end{proof}
\begin{lemma}
\label{lem:solalpha}
Let $k,n,m$ be integers with $k,n \geq 2$ and $1 \leq m \leq \min\{n/2$, $2k+1\}$. Then, there exist a solution $f$ to $(K_{\frac{2kn+n-2m}{kn-m}},\omega)$ of MAX $C_{2k+1}$-COL with signature $(|A_1|-m, |A_2|, \ldots,|A_{\lceil\frac{n+1}{2}\rceil}|)$.
\end{lemma}
\begin{proof}
Let $V(K_{\frac{2kn+n-2m}{kn-m}}) = \{v_0,\ldots,v_{2kn+n-2m-1}\}$ and $V(C_{2k+1}) = \{w_0$, $\ldots$, $w_{2k}\}$. The construction of $f$ will depend on whether $m \leq k$ or $m > k$.
When $m \leq k$ we define $f$ as follows.
$f^{-1}(w_0) = \{v_0,v_1,\ldots,v_{n-1}\}$,
$f^{-1}(w_2) = \{v_{n},\ldots,v_{2n-1}\}$,
$\vdots$
$f^{-1}(w_{2k-2m}) = \{v_{(k-m)n},\ldots,v_{(k-m+1)n-1}\}$,
$f^{-1}(w_{2k-2m+2}) = \{v_{(k-m+1)n},\ldots,v_{(k-m+2)n-2}\}$,
$\vdots$
$f^{-1}(w_{2k}) = \{v_{kn-m+1},\ldots,v_{(k+1)n-m-1}\}$,
$f^{-1}(w_1) = \{v_{(k+1)n-m},\ldots\,v_{(k+2)n-m-1}\}$,
$\vdots$
$f^{-1}(w_{2k-2m-1}) = \{v_{(2k-2m-1)n-m},\ldots,v_{(2k-2m)n-m-1}\}$,
$f^{-1}(w_{2k-2m+1}) = \{v_{(2k-2m)n-m},\ldots,v_{(2k-2m+1)n-m-2}\}$,
$\vdots$
$f^{-1}(w_{2k-1}) =\{v_{2kn-2m+1},\ldots,v_{(2k+1)n-2m-1}\}$.
\noindent
Note, in particular, that
\[
|f^{-1}(w_j)| = \begin{cases}
n & \text{for $0 \leq j \leq 2(k-m)$, and} \\
n-1 & \text{for $2(k-m) < j \leq 2k-1$.} \\
\end{cases}
\]
When $m > k$, we define $f$ as follows:
$f^{-1}(w_0) = \{v_0,v_1,\ldots,v_{n-2}\}$,
$f^{-1}(w_2) = \{v_{n-1},\ldots,v_{2n-3}\}$,
$\vdots$
$f^{-1}(w_{4k-2m}) = \{v_{(2k-m)(n-1)},\ldots,v_{(2k-m+1)(n-1)-1}\}$,
$f^{-1}(w_{4k-2m+2}) = \{v_{(2k-m+1)(n-1)},\ldots,v_{(2k-m+2)(n-1)-2}\}$,
$\vdots$
$f^{-1}(w_{2k}) = \{v_{k(n-1)-m+k+1},\ldots,v_{(k+1)(n-1)-m+k-1}\}$,
$f^{-1}(w_1) = \{v_{(k+1)(n-1)-m+k},\ldots\,v_{(k+2)(n-1)-m+k-1}$,
$\vdots$
$f^{-1}(w_{4k-2m+1}) = \{v_{(4k-2m)(n-1)-m+k},\ldots,v_{(4k-2m+1)(n-1)-m+k-1}\}$,
$f^{-1}(w_{4k-2m+3}) = \{v_{(4k-2m+1)(n-1)-m+k},\ldots,v_{(4k-2m+2)(n-1)-m+k-2}\}$,
$\vdots$
$f^{-1}(w_{2k-1}) =\{v_{2k(n-1)-2m+2k+2},\ldots,v_{(2k+1)(n-1)-2m+2k}\}$.
\noindent
In this case,
\[
|f^{-1}(w_j)| = \begin{cases}
n-1 & \text{for $0 \leq j < 2(2k-m+1)$, and} \\
n-2 & \text{for $2(2k-m+1) \leq j \leq 2k-1$.} \\
\end{cases}
\]
Now,
consider a vertex $v_i$ with $f(v_i) = w_j$. Take one edge $v_i v_l \in A_2 \cup \cdots \cup A_{\lceil\frac{n+1}{2}\rceil}$. Then,
\begin{equation} \label{eq:deltail}
kn-m+1 \leq \delta(v_i,v_l) \leq 2(kn-m)+n-(kn-m+1)=kn-m+n-1.
\end{equation}
Let $a=\min{\{h \;|\; f(v_h) = w_{j-1} \}}$. That is, $v_a$ is the vertex with lowest index which is mapped to $w_{j-1}$. Furthermore let $b=\max{\{h \;|\; f(v_h) = w_{j+1} \}}$. We then have
\[
f(\{v_a,v_{a+1},\ldots,v_{b-1},v_b\})=\{w_{j-1},w_{j+1}\}.
\]
We now want to show that $l \in \{a, \ldots, b\}$. It will then follow that $f(v_i) f(v_l) \in E(C_{2k+1})$, i.e., all edges outside of $A_1$ are mapped to an edge in $C_{2k+1}$.
To do this, we will show that $\delta(v_i,v_a) \leq \delta(v_i,v_l) \leq \delta(v_i,v_b)$.
First, we bound $\delta(v_i,v_a)$ from above by taking a walk along the vertices between $v_i$ and $v_a$. We need to pass at most $|f^{-1}(w_j)|-1$ vertices to enter the set $f^{-1}(w_{j+2})$. We then continue until $f^{-1}(w_{2k-1})$ or $f^{-1}(w_{2k})$ depending on the parity of $j$. Our walk continues from $f^{-1}(w_{0})$ or $f^{-1}(w_1)$ up until we come to the last vertex in $f^{-1}(w_{j-3})$. Finally we take one last step into $f^{-1}(w_{j-1})$ and reach $v_a$. We have then passed
\[
\delta(v_i,v_a) \leq |f^{-1}(w_j)|-1+|f^{-1}(w_{j+2})|+|f^{-1}(w_{j+4})|+\ldots+|f^{-1}(w_{j-3})|+1
\]
vertices. There are $k$ sets among $f^{-1}(w_{j}),\ldots,f^{-1}(w_{j-3})$. When $m \leq k$ each set has either $n$ or $n-1$ vertices. However, at most $\lceil\frac{2(k-m)+1}{2}\rceil =k-m+1$ of them can contain $n$ vertices. Thus,
\[
\delta(v_i,v_a) \leq k(n-1)+ k-m+1 = kn-m+1.
\]
In the case of $m > k$, each set has either $n-1$ or $n-2$ vertices but at most $\frac{2(2k-m+1)}{2}=2k-m+1$ of them can contain $n-1$ vertices. Thus,
\[
\delta(v_i,v_a) \leq k(n-2) + 2k-m+1 = kn-m+1.
\]
When bounding $\delta(v_i,v_b)$ from below, we take a similar walk, but now we want to determine the fewest possible vertices we will pass. Therefore, we assume that we immediately move into the set $f^{-1}(w_{j+2})$ and will go all the way to the last vertex in $f^{-1}(w_{j+1})$. We have then passed a total of
\[
\delta(v_i,v_b) \geq |f^{-1}(w_{j+2})|+|f^{-1}(w_{j+4})|+\ldots+|f^{-1}(w_{j+1})|
\]
vertices. There are $k+1$ sets among $f^{-1}(w_{j+2}),\ldots,f^{-1}(w_{j+1})$. When $m \leq k$ at least $\frac{2(k-m)}{2}=k-m$ of the sets has $n$ vertices. Thus,
\[
\delta(v_i,v_b) \geq (k+1)(n-1)+k-m = kn-m+n-1.
\]
In the case of $m > k$, at least $\frac{2(2k-m+1)}{2} = 2k-m+1$ has $n-1$ vertices. Thus,
\[
\delta(v_i,v_b)\geq (k+1)(n-2) + 2k-m+1 = kn-m+n-1.
\]
Combining the lower and upper bounds with (\ref{eq:deltail}), we find that
\[
\delta(v_i,v_a) \leq kn-m+1 \leq \delta(v_i,v_l) \leq kn-m+n-1 \leq \delta(v_i,v_b),
\]
hence $f(v_i) f(v_l) \in E(C_{2k+1})$.
Since $v_i v_l$ was an arbitrary edge in $A_2 \cup \cdots \cup A_{\lceil \frac{n+1}{2} \rceil}$, this implies that $f_j = |A_j|$ for $j > 1$.
It remains to determine $f_1$.
Recall that $A_1 = \{ v_i v_{i + kn-m} \,|\, 0 \leq i < 2(kn-m)+n \}$.
As before, we want to check if $\delta(v_i,v_a) \leq \delta(v_i,v_l) = kn-m < kn-m+n-1 \leq \delta(v_i,v_b)$ to determine if $f(v_i) f(v_l) \in E(C_{2k+1})$.
This means that $f(v_i) f(v_l)$ is a non-edge in $C_{2k+1}$
if and only if $\delta(v_i,v_a) = kn-m+1$.
This, in turn, can only happen if the walk from $v_i$ to $v_a$ passes all $|f^{-1}(w_j)|-1$ of the vertices from $f^{-1}(w_j)$ (excluding $v_i$).
Thus, $v_i$ has to be the vertex with the lowest index in $f^{-1}(w_j)$.
In total there are $2k+1$ such vertices, one for each vertex in $C_{2k+1}$.
Furthermore, it must be the case that the walk fully passes the $k-m+1$ sets
$f^{-1}(w_0), f^{-1}(w_2), \ldots, f^{-1}(w_{2k-2m})$
with $n$ vertices in the case when $m \leq k$ and the $2k-m+1$ sets
$f^{-1}(w_0), f^{-1}(w_2), \ldots, f^{-1}(w_{2(2k-m)})$
with $n-1$ vertices when $m > k$.
When $m \leq k$ this happens precisely when $j$ is odd and
$2(k-m)+3 \leq j \leq 2k-1$, i.e. $m$ times.
When $m > k$ it happens precisely when $j$ is odd and
$2(k-m+1)+1 \leq j \leq 2k-1$ which is also $m$ times.
In all cases, there will be $m$ edges in $A_1$ which are not mapped to edges in
$E(C_{2k+1})$ so $f_1 = |A_1|-m$ which concludes the proof.
\end{proof}
Let $p$ and $q$ be relatively prime and let $V(K_{p/q}) = \{v_0, \ldots, v_{p-1}\}$. Define a function $\tau : [p] \rightarrow [p]$ by letting $\tau(i) = j$ if $0 \leq j < p$ and $jq \equiv i $ (mod $p$). Note that $\tau$ is a bijection on $[p]$. We will think of $\tau$ as indicating the length of a path (in the positive direction) from $v_0$ to $v_j$ in the cycle $A_1$.
We will denote the length from $v_k$ to $v_l$ in $A_1$ by $\delta_{\tau}(v_k,v_l) = \tau(l)-\tau(k)$ taken modulo $p$.
Note that $\delta_{\tau}(v_i,v_{i+a}) = \delta_{\tau}(v_0,v_a)$ for all integers $i$.
Closed and half-open intervals are defined by $[v_a,v_b]_{\tau} := \{v_l \;|\; \delta_{\tau}(v_a,v_l) \leq \delta_{\tau}(v_a,v_b)\}$ and $(v_a,v_b]_{\tau} := \{v_l \;|\; 0 < \delta_{\tau}(v_a,v_l) \leq \delta_{\tau}(v_a,v_b)\}$, respectively.
Let $V(C_{2k+1}) = \{w_0, \ldots, w_{2k}\}$.
Given a subset $S \subseteq \{v_0, \ldots, v_{p-1}\}$, we will now describe a general construction
of a solution $f = f_S$ to an instance $(K_{p/q}, \omega)$ of {\sc Max $C_{2k+1}$-COL}.
The idea is to map the nodes $v_{\tau(i)}$ in order of increasing $i$ starting by $f(v_{\tau(0)}) = f(v_0) = w_0$. We then map $v_{\tau(i)}$ to a node adjacent to $f(v_{\tau(i-1)})$, picking one of the two possibilities depending on whether $i+1 \in S$ or not.
To give the formal definition, it will be convenient to introduce the rotation
$\rho$ on $C_{2k+1}$ defined as $\sigma(w_i) = w_{i+1}$.
We then have,
\[
f(v_{\tau(i)}) = \begin{cases}
w_0 & \text{when $i = 0$,} \\
\rho^{-1}(f(v_{\tau(i-1)})) & \text{when $i > 0$ and $v_i \in S$,} \\
\rho(f(v_{\tau(i-1)})) & \text{when $i > 0$ and $v_i \not\in S$.} \\
\end{cases}
\]
Note that the last vertex to be mapped is $v_{\tau^{-1}(p-1)} = v_{p-q}$.
If the created solution has $f(v_{p-q}) = w_1$ or $w_{2k}$, then
$f_1 = |A_1|$, otherwise $f_1 = |A_1| - 1$.
In the latter case, it does not matter whether $v_0 \in S$ or not and we
can assume that $v_0 \not\in S$.
However, to maintain consistency in the case of $f_1 =|A_1|$,
we want to have $v_0 \in S$ if $f(v_{p-q}) = w_1$ and
$v_0 \not\in S$ otherwise.
Therefore, $v_0 \in S$ if and only if $f(v_{p-q}) = w_1$.
\begin{exmp}
The solution $f : V(K_{22/9}) \rightarrow V(C_5)$ with $S = \{v_{14}$, $v_1$, $v_{11}$, $v_{20}$, $v_7$, $v_{17}$, $v_4$, $v_{13}\}$ looks as follows.
\[
\begin{array}{c c c c c}
{\bf f^{-1}(w_0)} & {\bf f^{-1}(w_1)} & {\bf f^{-1}(w_2)} & {\bf f^{-1}(w_3)} & {\bf f^{-1}(w_4)} \\ \hline
v_0 & v_9 & v_{18} & v_5 & \\
& v_{1} & v_{14} & \\
& & v_{10} & v_{19} & v_6 \\
v_{15} & v_{2} & & & \\
v_{11} & & & & \\
& & & v_7 & v_{20} \\
& & & & v_{16} \\
v_3 & v_{12} & v_{21} & v_8 & \\
v_{13} & v_4 & v_{17} & &
\end{array}
\]
Note that the $v_i$ are mapped in the order $v_0, v_9, v_{18}, v_5, \ldots, v_{13}$.
$S$ is given in the order in which the vertices appear along the $A_1$.
To start, we let $f(v_0) = w_0$. Neither of $v_9, v_{18}$ or $v_5$ appear in
$S$, so these are mapped consecutively.
Then, we get to $v_{14}$ which is in $S$.
Since $f(v_5) = w_3$ we let $f(v_{14}) = w_2$.
Finally, $f(v_{13})=w_0$ so the signature of $f$ has $f_1 = |A_1|-1 = 21$.
\end{exmp}
We will now give some basic properties of the solutions created using this
construction for the case when $p = 2(kn-m)+n$ and $q = kn-m$.
We will from now on assume that $f_1 = |A_1|$.
This occurs when the construction has an equal number of applications of
$\rho$ and $\rho^{-1}$ modulo $2k+1$. That is, when $|S| \equiv p-|S| $ (mod $2k+1$).
Solving for $|S|$ we get:
\begin{equation} \label{eq:fulla1}
|S| \equiv 2k+1-m \text{ (mod $2k+1$)}.
\end{equation}
Assume that $f(v_i) = w_j, f(v_{i'}) = w_{j'}$.
Then, the index $j'$ is determined by $\delta_{\tau}(v_i,v_{i'})$ and
$S \cap (v_i, v_{i'}]_{\tau}$ as follows:
\begin{equation} \label{eq:index}
j' \equiv j+\delta_{\tau}(v_i,v_{i'}) - 2 \cdot |S \cap (v_i, v_{i'}]_{\tau}| \text{ (mod $2k+1$)}.
\end{equation}
\noindent
Relation (\ref{eq:index}) implies the following useful lemma:
\begin{lemma} \label{lem:usefulcong}
$f(v_i) f(v_{i'}) \in E(C_{2k+1})$ iff
$|S \cap (v_i, v_{i'}]_{\tau}| \equiv (k+1)(\delta_{\tau}(v_i,v_{i'}) \pm 1) $ (mod $2k+1$).
\end{lemma}
\begin{lemma}
\label{lem:solbeta}
Let $k,n \geq 2$ be integers.
There exists a solution $f$ to $(K_{\frac{2kn+n-2}{kn-1}},\omega)$ of {\sc Max $C_{2k+1}$-COL} with $f_1 = |A_1|$, and
\[
f = \begin{cases}
(|A_1|,|A_2|-2(2k-1),\ldots,|A_{\frac{n+1}{2}}|-2(2k-1)) & \text{if $n$ is odd,} \\
(|A_1|,|A_2|-2(2k-1),\ldots,|A_{\frac{n}{2}}|-2(2k-1),|A_{\frac{n+2}{2}}|-(2k-1)) & \text{if $n$ is even.}
\end{cases}
\]
Furthermore, for any other solution $g$, if $g_1 = |A_1|$, then $g_2 \leq f_2$,
componentwise.
\end{lemma}
\begin{proof}
Let $p = 2(kn-1)+n, q = kn-1$, and $V(K_{p/q}) = \{v_0,\ldots,v_{p-1}\}$.
The desired solution $f$ is obtained from the construction $f = f(S)$
with $S = [v_{\tau^{-1}(p-2k+1)},v_0]_{\tau}$.
As required by (\ref{eq:fulla1}), we have $|S| = p-(p-2k+1)+1 = 2k$ so that
$f_1 = |A_1|$.
It remains to determine $f_c$ for $c > 1$.
Let $v_i v_{i'} \in A_c, c > 1$ be an edge.
In order to count the edges only once, we will assume that $i' = i+q+(c-1) $ (mod $p$).
To be able to use the condition in Lemma~\ref{lem:usefulcong} we need to
determine $\delta_{\tau}(v_i, v_{i'})$.
But, $\tau(i') \equiv \tau(i)+1+q^{-1}(c-1) $ (mod $p$), where
$q^{-1} := p-2k-1$, the inverse of $q$ modulo $p$.
We then obtain $\delta_{\tau}(v_i, v_{i'}) = \tau(i')-\tau(i)$ by reducing $1+(p-2k-1)(c-1)$ modulo $p$.
\[
\delta_{\tau}(v_i, v_{i'}) = \begin{cases}
1 & \text{if $c = 1$, and} \\
-1+(2k+1)(n-c+1) & \text{otherwise.} \\
\end{cases}
\]
Assuming $c \neq 1$, we have two cases in Lemma~\ref{lem:usefulcong}.
We conclude that
$f(v_i) f(v_{i'}) \in E(C_{2k+1})$ if and only if
either
\begin{equation}
\label{eq:recycle}
|S \cap (v_{i},v_{i'}]_{\tau}| \equiv 0 \qquad \text{or} \qquad
|S \cap (v_{i},v_{i'}]_{\tau}| \equiv (k+1)(-2) \equiv 2k \text{ (mod $2k+1$)}.
\end{equation}
In both cases the condition is equivalent to $v_i, v_{i'} \not\in S \setminus \{v_0\}$.
Therefore, the edges $v_i v_i'$ which are not mapped to an edge in $C_{2k+1}$ by $f$ are the ones with an endpoint in $S \setminus \{v_0\}$. (There are no edges with both endpoints in this set.)
When $n$ is even and $c = n/2+1$, this number equals $|S \setminus \{v_0\}| = 2k-1$.
In all other cases, there are $2(2k-1)$ such edges.
The first part of the lemma follows.
For the second part, we pick an arbitrary solution $g$ and show that we can
find at least $2(2k-1)$ edges in $A_2$ which can not be mapped to $C_{2k+1}$,
provided that $g_1 = |A_1|$.
It is easy to see that, up to rotational symmetry, a $g$ with $g_1 = |A_1|$
must be constructible by $g = g(S)$ for some $S$.
We already know that such an $S$ must satisfy $|S| \equiv 2k$ (mod $2k+1$).
This implies $|S| \geq 2k$.
From $p \equiv 2k-1$ (mod $2k+1$), we also see that we must have
|$V(K_{p/q}) \setminus S| \geq 2k$.
As argued before, an edge from $A_c$ is mapped to $C_{2k+1}$ if and only if one of the
congruences in (\ref{eq:recycle}) holds.
Since $|S| \equiv 2k$ (mod $2k+1$), we can equivalently write this as
$f(v_i) f(v_{i'}) \in E(C_{2k+1})$ if and only if
either
\begin{equation}
\label{eq:recycle2}
|S \cap (v_{i'},v_{i}]_{\tau}| \equiv 2k \qquad \text{or} \qquad
|S \cap (v_{i'},v_{i}]_{\tau}| \equiv 0 \text{ (mod $2k+1$)}.
\end{equation}
Hence, either the intersection of $S$ with $(v_{i'},v_{i}]_{\tau}$ is empty or
the latter is a subset of the former.
As the two cases can be treated identically, we assume,
without loss of generality, that the intersection is empty.
Note that $|(v_{i'},v_{i}]_{\tau}| = 2k$
We will now determine $2(2k-1)$ edges which can not be mapped to edges
in $C_{2k+1}$.
Let $v_{j_{1}}$ be the first vertex in $S$ encountered following $A_1$ from $v_i$ in
the positive direction.
Similarly, let $v_{j_{2}}$ be the first vertex in $S$ encountered following $A_1$
from $v_{i'}$ in the negative direction.
Then, $v_{j_{1}}, v_{j_{1}-q} \in (v_{j_{1}+(a+1)q+1}, v_{j_{1}+aq}]_{\tau}$,
for $a = 0, \ldots, 2k-2$,
but $v_{j_{1}} \in S$ and $v_{j_{1}-q} \not\in S$ by construction.
Thus, from (\ref{eq:recycle2}), the edges $v_{j_{1}+aq} v_{j_{1} +(a+1)q+1}$
can not be mapped to $C_{2k+1}$.
In the other direction, we have $v_{j_{2}}, v_{j_{2}+q} \in (v_{j_{2}-a'q}, v_{j_{2}+(1-a')q+1}]_{\tau}$,
for $a' = 0, \ldots, 2k-2$,
but $v_{j_{2}} \in S$ and $v_{j_{2}+q} \not\in S$ by construction.
From this we get another $2k-1$ edges which can not be mapped to $C_{2k+1}$.
Finally, we note that since $S \subseteq [v_{j_1}, v_{j_2}]_{\tau}$ and
$|S| \geq 2k$, the edges
$v_{j_{1}+aq} v_{j_{1} +(a+1)q+1}$ and
$v_{j_{2}+(1-a')q+1} v_{j_{2}-a'q}$ are distinct.
This proves that $g_2 \leq f_2$.
\end{proof}
\begin{exmp}
With $k=3$ and $n=5$ the solution $f = f(S)$ to$ (K_{33/14},\omega)$ of MAX $C_7$-COL created as in Lemma~\ref{lem:solbeta} with $S = \{v_{29}$, $v_{10}$, $v_{24}$, $v_{5}$, $v_{19}$, $v_{0}\}$ looks like:
\[
\begin{array}{c c c c c c c}
{\bf f^{-1}(w_0)} & {\bf f^{-1}(w_1)} & {\bf f^{-1}(w_2)} & {\bf f^{-1}(w_3)} & {\bf f^{-1}(w_4)} & {\bf f^{-1}(w_5)} & {\bf f^{-1}(w_6)} \\ \hline
v_0 & v_{14} & v_{28} & v_9 & v_{23} & v_4 & v_{18} \\
v_{32} & v_{13} & v_{27} & v_8 & v_{22} & v_3 & v_{17} \\
v_{31} & v_{12} & v_{26} & v_7 & v_{21} & v_2 & v_{16} \\
v_{30} & v_{11} & v_{25} & v_6 & v_{20} & v_1 & v_{15} \\
& v_{19} & v_5 & v_{24} & v_{10} & v_{29} &
\end{array}
\]
\end{exmp}
\section{Proof of Proposition~\ref{th:qisfunny}}
The proof of Proposition~\ref{th:qisfunny} follows from a series of lemmas.
The function $\delta_{\tau}$ and how it is used for constructing solutions is
presented in Appendix~\ref{app:propm1proof}.
\begin{lemma}
\label{lem:splitend}
Let $k \geq 2$ be an integer, and $n \geq 3$ be an odd integer. Then, there exists a solution $f$ to $(K_{\frac{2kn+n-4}{kn-2}},\omega)$ of MAX $C_{2k+1}$-COL with the following signature:
\begin{equation}
\begin{array}{llll}
f_1 & = & |A_1|,\\
f_{2i} & = & |A_{2i}|-(\frac{n-1}{2}-i)(2k+1)-(4k-2) & \text{for $i = 1, 2, \ldots, \frac{n+1}{4}$}, \\
f_{2i+1} & = & |A_{2i+1}|-(i-1)(2k+1)-(4k-2) & \text{for $i = 1,2,\ldots,\frac{n-1}{4}$}.
\end{array}
\end{equation}
\end{lemma}
\begin{proof}
Let $G = K_{\frac{2kn+n-4}{kn-2}}$, $V(G) =\{ v_0,\ldots,v_{2kn+n-5}\}$ and $V(C_{2k+1}) = \{w_0,\ldots,w_{2k}\}$. A solution $f=f(S)$ with this signature is obtained with
$S = [v_{\tau^{-1}(2kn+n-2k-2)},v_0]_{\tau}.$
We then have $|S| = (2kn+n-4) - (2kn+n-2k-2) + 1 = 2k-1$, so $f_1 = |A_1|$ by (\ref{eq:fulla1}).
An orbit $A_{2i}$ includes edges which connects vertices at a distance $kn-2+2i-1$. $\delta_{\tau}(v_j,v_{j+kn+2i-3}) = (\frac{n-1}{2}-(i-1))(2k+1)-1$.
Lemma~\ref{lem:usefulcong} then says that $f(v_j) f(v_{j+kn+2i-3}) \in E(C_{2k+1})$
if and only if $|S \cap (v_{j},v_{j+kn+2i-3}]_{\tau}| \equiv 0$ or $2k$ (mod $2k+1$).
That is, $|S \cap (v_{j},v_{j+kn+2i-3}]_{\tau}|$ must be 0.
This is the case only when
\[
\delta_{\tau}(v_0,v_j) \leq \delta_{\tau}(v_0,v_{j+kn+2i-3}) < \delta_{\tau}(v_0,v_{\tau^{-1}(2kn+n-2k-2)}),
\]
which implies
\begin{multline*}
\delta_{\tau}(v_0,v_j) \leq \delta_{\tau}(v_0,v_{\tau^{-1}(2kn+n-2k-2)}) - \delta_{\tau}(v_j,v_{j+kn+2i-3}) - 1 \\
\leq 2kn+n-2k-2-((\frac{n-1}{2}-(i-1))(2k+1)-1)-1 \\
= 2kn+n-4 -(\frac{n-1}{2}-i)(2k+1)-(4k-2)-1,
\end{multline*}
which holds for exactly $2kn+n-4 -(\frac{n-1}{2}-i)(2k+1)-(4k-2)$ vertices $v_j$.
An orbit $A_{2i+1}$ includes edges which connects vertices at a distance $kn-2+2i$. $\delta_{\tau}(v_{j+kn+2i-2},v_j) = i(2k+1)-1$. Applying Lemma~\ref{lem:usefulcong} again asserts that $S \cap (v_{j},v_{j+kn+2i-2}]_{\tau}$ must be empty. Thus,
\[
\delta_{\tau}(v_0,v_{j+kn+2i-2}) \leq \delta_{\tau}(v_0,v_j) < \delta_{\tau}(v_0,v_{\tau^{-1}(2kn+n-2k-2)}),
\]
which implies
\begin{multline*}
\delta_{\tau}(v_0,v_j) \leq \delta_{\tau}(v_0,v_{\tau^{-1}(2kn+n-2k-2)}) - \delta_{\tau}(v_j,v_{j+kn+2i-2}) - 1 \\
\leq 2kn+n-2k-2-(i(2k+1)-1) -1 \\
= 2kn+n-4-(i-1)(2k+1)-(4k-2) - 1,
\end{multline*}
which holds for exactly $2kn+n-4-(i-1)(2k+1)-(4k-2)$ vertices $v_j$.
\end{proof}
\begin{exmp}
For $K_{31/13}$. The solution $f=f(S)$ as in Lemma~\ref{lem:splitend} with $k=3$ and $n=5$ has $S = \{v_{10}$, $v_{23}$, $v_{5}$, $v_{18}$, $v_{0}\}$ and looks like:
\[
\begin{array}{c c c c c c c}
{\bf f^{-1}(w_0)} & {\bf f^{-1}(w_1)} & {\bf f^{-1}(w_2)} & {\bf f^{-1}(w_3)} & {\bf f^{-1}(w_4)} & {\bf f^{-1}(w_5)} & {\bf f^{-1}(w_6)} \\ \hline
v_0 & v_{13} & v_{26} & v_8 & v_{21} & v_3 & v_{16} \\
v_{29} & v_{11} & v_{24} & v_6 & v_{19} & v_1 & v_{14} \\
v_{27} & v_{9} & v_{22} & v_4 & v_{17} & v_{30} & v_{12} \\
v_{25} & v_{7} & v_{20} & v_2 & v_{15} & v_{28} & \\
& v_{18} & v_5 & v_{23} & v_{10} & &
\end{array}
\]
\end{exmp}
The following technical lemma will prove useful in analysing the solutions
in Lemma~\ref{lem:corw}.
Some cases of the defined (partial) function $\gamma$ which are not needed
for this analysis have been left out.
\begin{lemma}
\label{lem:gamma}
Let $p,q,r,s$ be positive integers so that $r > 2p+q+s$ and $s \geq p$. Now consider $r$ elements equidistantly placed on a circle, and select two sequences $P_1$ and $P_2$, each containing $p$ consecutive elements, with $q$ elements between them on one side and $r-2p-q$ on the other side. Let $\gamma(i)$ be the number of ways to select $s$ consecutive elements on the circle with exactly $i$ elements from $P_1 \cup P_2$. Then,
when $s \leq q$:
\[
\gamma(i) = \begin{cases}
r-2p-2s+2 & \text{if $i = 0$,} \\
2s-2p+2 & \text{if $i = p$,} \\
0 & \text{if $i > p$.}
\end{cases}
\]
when $s = q+p$:
\[
\gamma(i) = \begin{cases}
r-3p-2q+1 & \text{if $i = 0$,} \\
2q+p+1 & \text{if $i = p$,} \\
0 & \text{if $i > p$.}
\end{cases}
\]
and when $s > q+p+1$:
\[
\gamma(i) = \begin{cases}
r-2p-q-s+1 & \text{if $i = 0$,} \\
2q+2 & \text{if $i = p$,} \\
2 & \text{if $i = p+1$,} \\
0 & \text{if $i > 2p$.}
\end{cases}
\]
\end{lemma}
\begin{proof}
Call the elements $\{0,\ldots,r-1\}$. Suppose $P_1 = \{0,\ldots,p-1\}$ and $P_2 = \{q+p,\ldots,q+2p-1\}$.
For $s \leq q$. Then the sequences that starts with $p,\ldots,q+p-s$ as well as $q+2p,\ldots,r-s$ are the only ones that do not contain any element from $P_1 \cup P_2$ and that is $q+p+s+1$ and $r-q-2p-s+1$ elements, and in total $r-2p-2s+2$. To get $p$ elements we have the sequences that starts with $q-2p-s,\ldots,q+p$ and $p-s,\ldots,0$ as the only options, and that is $s-p+1$ in both cases so $2s-2p+2$ in total. Also if $s \leq q$ clearly there is no sequence of length $s$ that includes element from both $P_1$ and $P_2$.
For $s=q+p$, the ones starting with $q+2p,\ldots,r-s$ are the only ones that do not contains any element from $P_1 \cup P_2$ and that is $r-q-2p-s+1=r-3p-2q+1$ elements. To get $p$ elements we have that for any sequence of length $s$ starting with an element $i \in P_1$ contains $i$ number of elements from $P_2$ so all sequences starting with $r-q,\ldots,q+p$ contains $p$ elements from $P_1 \cup P_2$, and that is $p+2q+1$ in total. This fact also makes it clear that no sequence can contain more than p elements from $P_1 \cup P_2$.
Finally when $s > q+p+1$, again sequences starting with $q+2p,r-s$ are the only ones to not contain any element from $P_1$ or $P_2$, and for sequences that include $p$ element, they must start with $p,\ldots,q+p$ or $r+p-s,\ldots,r+p+q-s$. That is both with $q+1$ for a total of $2q+2$. Also the only ones to contain $p+1$ elements are the ones starting with $p-1$ and $r+p+q-s+1$, and since $|P_1 \cup P_2| = 2p$ there are of course no sequence to contain more than that.
\end{proof}
\begin{lemma}
\label{lem:corw}
Let $f=f(S)$ be a solution to $(K_{\frac{2kn+n-2m}{kn-m}},\omega)$ of {\sc MAX $C_{2k+1}$-COL} with $f_1 = |A_1|$ and where $S = P_1 \cup P_2$, and $P_1 \cap P_2 = \varnothing$, where
$P_1 = [v_a,v_{a+(2k+1)-2}]_{\tau}$
and
$P_2 = [v_b,v_{b+(2k+1)-2}]_{\tau}$
such that
$\min_{v_i\in P_1, v_j \in P_2}{\bar{\delta}_{\tau}(v_i,v_j)} = (u-1)(2k+1)$.
Let $A_c$ be the orbit consisting of edges $v_l v_h$ with $\delta_{\tau}(v_l,v_h)=g(2k+1)-1$. Then,
\[
f_c = \begin{cases}
|A_c|-(8k-4) & \text{if $g < u$,} \\
|A_c|-(4k-2) & \text{if $g = u$,} \\
|A_c|-(g-u)(2k+1)-(6k-5) & \text{if $g > u$.}
\end{cases}
\]
\end{lemma}
\begin{proof}
We can apply Lemma~\ref{lem:gamma}, since we according to Lemma~\ref{lem:usefulcong} must have $|S \cap [v_{l+1},v_h]_{\tau}|=0,2k$ or $2k+1$. So for Lemma~\ref{lem:gamma} we have $r=|A_c|$, $p=2k$, $q=(u-1)(2k+1)$ and $s=g(2k+1)-1$. We see that $s=p+q$ when $g=u$ and when $g<u$ then $s\leq q$ and when $g > u$ then $s > q+p+1$. So all we have to do is for each case count $\gamma(0)+\gamma(p)+\gamma(p+1)$.
\end{proof}
Now it is possible to construct a series of signatures with solutions $f(S)$ where $S$ will have the properties sought after by Lemma~\ref{lem:corw}.
\begin{lemma}
\label{lem:splitmiddle}
There exists a set of solutions $F = \{ f^i \}, i = 2,\ldots,\frac{n+1}{2}$ to $(K_{\frac{2kn+n-4}{kn-2}},\omega)$ of the problem $mcol{C_{2k+1}}$ with signatures:
\begin{eqnarray*}
& & f^i_1=|A_1|
\; \forall f^i \in F \\
& & f^i_i=|A_i|-(4k-2)
\; \forall f^i \in F \\
& & f^{2i}_{2j+1}=|A_{2j+1}|-(8k-4)
\; \forall f^{2i} \in F \mbox{ and } j = 1,2,\ldots,\frac{n-1}{4} \\
& & f^{2i}_{2j}=|A_{2j}|-(8k-4)
\; \forall f^{2i} \in F \mbox{ and } j = i+1,i+2,\ldots,\frac{n+1}{4} \\
& & f^{2i+1}_{2j+1}=|A_{2j+1}|-(8k-4)
\; \forall f^{2i+1} \in F \mbox{ and } j = 1,2,\ldots,i-1 \\
& & f^{2i}_{2j}=|A_{2j}|-(i-j)(2k+1)-(6k-5)
\; \forall f^{2i} \in F \mbox{ and } j = 1,2,\ldots,i-1 \\
& & f^{2i+1}_{2j}=|A_{2j}|-(\frac{n-1}{2}-i-j)(2k+1)-(6k-5)
\; \forall f^{2i+1} \in F \mbox{ and } j = 1,2,\ldots,\frac{n+1}{4} \\
& & f^{2i+1}_{2j+1}=|A_{2j+1}|-(j-i)(2k+1)-(6k-5)
\; \forall f^{2i+1} \in F \mbox{ and } j = i+1,i+2,\ldots,\frac{n-1}{4} \\
\end{eqnarray*}
\end{lemma}
\begin{proof}
Let $f^{2i+1} = f(S)$ with $S = P_1 \cup P_2$,
where
$P_1= [v_{\tau^{-1}((n-i-1)(2k+1)-1)},v_{\tau^{-1}((n-i)(2k+1)-3)}]_{\tau}$
and
$P_2 = [v_{\tau^{-1}((n-1)(2k+1)-2)},v_0]_{\tau}$.
We have $|P_1|=|P_2|=2k$ so $|S| = 4k$, implying $f^{2i+1}_1=|A_1|$ due to (\ref{eq:fulla1}).
The orbits $A_{2j+1}$ include edges which connects vertices at a distance $kn-2+2j$ and
$\delta_{\tau}(v_{l+kn+2j-2},v_l) = j(2k+1)-1$. We now have the situation in Lemma~\ref{lem:corw} with $u=i$ and $g=j$. When $i<j$ then $f^{2i+1}_{2j+1}=|A_{2j+1}|-(j-i)(2k+1)-(6k-5)$. When $j=i$ then $f^{2i+1}_{2i+1} = |A_{2i+1}|-(4k-2)$ and when $i>j$ then $f^{2i+1}_{2j+1} = |A_{2j+1}|-(8k-4)$.
The orbits $A_{2j}$ include edges which connects vertices at a distance $kn-2+2j-1$ and
$\delta_{\tau}(v_l,v_{l+kn+2j-3}) = (\frac{n-1}{2}-j)(2k+1)-1$. Since we have $i \leq \left\lceil\frac{n+1}{4}-1\right\rceil$ and $j \leq \frac{n+1}{4}$, then $\frac{n-1}{2}-j > i$ for all $i$ and $j$. So for Lemma~\ref{lem:corw} only the third case applies with $u=i$ and $g = \frac{n-1}{2}-j$. Thus, we have $f^{2i+1}_{2j} = |A_{2j}|-(\frac{n-1}{2}-i-j)(2k+1)-(6k-5)$.
Let $f^{2i} = f(S)$ with
$S = P_1 \cup P_2$, where
$P_1= [v_{\tau^{-1}((\frac{n-1}{2}+i-1)(2k+1)-1)},v_{\tau^{-1}((\frac{n-1}{2}+i-1)(2k+1)-3)}]_{\tau}$
and
$P_2 = [v_{\tau^{-1}((n-1)(2k+1)-2)},v_0]_{\tau}$.
Again, we have $|P_1|=|P_2|=2k$ so $|S| = 4k$ and $f^{2i+1}_1=|A_1|$.
For the orbits $A_{2j+1}$ we have that $j < \frac{n-1}{2}-i$ for all $i$ and $j$ so only case 1 in Lemma~\ref{lem:corw} applies and $f^{2i}_{2j+1}=|A_{2j+1}|-(8k-4)$.
For the orbits $A_{2j}$ we have exactly the same situation as in Lemma~\ref{lem:corw} with $u = \frac{n-1}{2}-i$ and $g = \frac{n-1}{2}-j$. We notice that when $i<j$ then $g<u$ and $f^{2i}_{2j}=|A_{2j}|-(8k-4)$, when $i>j$ then $g>u$ and $f^{2i}_{2j}=|A_{2j}|-(i-j)(2k+1)-(6k-5)$ and when $i=j$ then $g=u$ and $f^{2i}_{2i} = |A_{2i}|-(4k-2)$.
\end{proof}
One important thing to notice here is that $f^{2i}$ with $l= \left\lfloor\frac{n+1}{4}\right\rfloor$ is the continuation of $f^{2j+1}$ with $j = \left\lfloor\frac{n-1}{4}\right\rfloor$. Since $\frac{n-1}{2}-\left\lfloor\frac{n-1}{4}\right\rfloor=\left\lfloor\frac{n+1}{4}\right\rfloor$. Another observation is that signature $f^{3}$ from Lemma~\ref{lem:splitmiddle} can always be removed from a complete set of signatures and it will still remain complete since the signature from Lemma~\ref{lem:splitend} is better or equal for all orbits.
\begin{exmp}
With $k=3$ and $n=5$ the solution $f= f(S)$ to $K_{31/13}$ of {\sc MAX $C_7$-COL} from Lemma~\ref{lem:splitmiddle} has
\[
S = \{v_{14}, v_{27}, v_{9}, v_{22}, v_{4}, v_{17}\} \cup \{v_{28}, v_{10}, v_{23}, v_{5}, v_{18}, v_{0}\},
\]
and looks like:
\[
\begin{array}{c c c c c c c}
{\bf f^{-1}(w_0)} & {\bf f^{-1}(w_1)} & {\bf f^{-1}(w_2)} & {\bf f^{-1}(w_3)} & {\bf f^{-1}(w_4)} & {\bf f^{-1}(w_5)} & {\bf f^{-1}(w_6)} \\ \hline
v_0 & v_{13} & v_{26} & v_8 & v_{21} & v_3 & v_{16} \\
v_{29} & v_{11} & v_{24} & v_6 & v_{19} & v_1 & \\
v_4 & v_{22} & v_{9} & v_{27} & v_{14} & & \\
& & & & & & v_{17} \\
v_{30} & v_{12} & v_{25} & v_{7} & v_{20} & v_2 & v_{15} \\
& v_{18} & v_5 & v_{23} & v_{10} & v_{28} &
\end{array}
\]
\end{exmp}
\noindent
We will now prove Proposition~\ref{th:qisfunny} using the solutions from
Lemma~\ref{lem:solalpha}, Lemma~\ref{lem:splitend} and Lemma~\ref{lem:splitmiddle}.
\subsection*{Proposition~\ref{th:qisfunny}}
\begin{proof}
We get $(n+1)/2$ inequalities from Lemma~\ref{lem:solalpha},~\ref{lem:splitend},
and~\ref{lem:splitmiddle}, where as noted above, we have removed the inequality
generated by $f^3$.
As variables we have $s$ and $\omega_i$, for $i = 1, \ldots, (n+1)/2$.
To solve the relaxation of (\ref{lp}), we solve the corresponding system with
equalities.
A similar treatment of the dual confirms that the obtained solution is
indeed the optimum.
We start by reducing our $\frac{n+1}{2} \times \frac{n+3}{2}$ system to
a $4 \times 4$ system.
However we need to rearrange the orbits to conveniently describe how they depend on each other. Let $A_1'=A_1, A_2'=A_3,\cdots,A_i'=A_{2j+1}, A_{i+1}'=A_{2l},A_{i+2}'=A_{2l-2},\cdots,A_{\frac{n+1}{2}}'=A_2$, where $j = \left\lfloor\frac{n-1}{4}\right\rfloor$ and $l= \left\lfloor\frac{n+1}{4}\right\rfloor$. Furthermore introduce new solutions $h$ so that $h^i$ denotes the solution that maximises $h_i$. This rearrangement makes sense, as it puts the orbits and solution in such an order that for all solutions $h^r$ we have $h^r_r > h^r_{r+1} > h^r_{r+2} > \cdots > h^r_{\frac{n+1}{2}}$.
Now we compare the equations in (\ref{lp}) from the signatures $h^{\frac{n+1}{2}}$ and $h^{\frac{n+1}{2}-1}$. Note that these are the signatures $f^2$ and $f^4$ from Lemma~\ref{lem:splitend}. We see then that we have
\[
\sum_j{h^{\frac{n+1}{2}-1}_j \omega_j}=\sum_j{h^{\frac{n+1}{2}}_j \omega_j} + (4k-2)\cdot\omega_{\frac{n+1}{2}-1} - (4k-2)\cdot\omega_{\frac{n+1}{2}},
\]
since we assume $\displaystyle{\sum_j{h^{\frac{n+1}{2}-1}_j \omega_j}=\sum_j{h^{\frac{n+1}{2}}_j \omega_j}=s}$, we get $\omega_{\frac{n+1}{2}}=\omega_{\frac{n+1}{2}-1}$.
For the general case we have
\begin{equation}
\sum_j{h^i_j \omega_j}=\sum_j{h^{i+1}_j \omega_j} + (4k-2)\cdot\omega_i - (4k-2)\cdot\omega_{i+1}
- (2k+1)\cdot\sum_{j=i+2}^{\frac{n+1}{2}}\omega_j,
\end{equation}
for $i = 3,4,\ldots,\frac{n+1}{2}-1$.
Since again we assume $\sum_j{h^i_j \omega_j}=\sum_j{h^{i+1}_j \omega_j}=s$, we get
\begin{equation}
\label{eq:omega}
\omega_i=\omega_{i+1}+\frac{2k+1}{4k-2}\cdot\displaystyle\sum_{j=i+2}^{\frac{n+1}{2}}\omega_j,
\end{equation}
for $i = 3,4,\ldots,\frac{n+1}{2}-1$. For $i=\frac{n+1}{2}-1$ this means $\omega_i=\omega_{i+1}$. For all other $i$, we use the fact that (\ref{eq:omega}) also holds for $\omega_{i+1}$ and thus have:
\begin{equation}
\label{eq:omega+1}
\omega_{i+1}=\omega_{i+2}+\frac{2k+1}{4k-2}\cdot\displaystyle\sum_{j=i+3}^{\frac{n+1}{2}}\omega_j,
\end{equation}
for $i = 3,4,\ldots,\frac{n+1}{2}-2$.
From (\ref{eq:omega+1}) we get,
\begin{equation}
\label{eq:sum}
(2k+1)\sum_{j=i+3}^{\frac{n+1}{2}}\omega_j = (4k-2)\cdot(\omega_{i+1}-\omega_{i+2}).
\end{equation}
We then insert (\ref{eq:sum}) into (\ref{eq:omega}) to express $\omega_i$ in terms of $\omega_{i+1}$ and $\omega_{i+2}$ only:
\begin{equation}
\omega_i = \omega_{i+1}+\frac{2k+1}{4k-2} \cdot \omega_{i+2} + (\omega_{i+1}-\omega_{i+2})
= 2\cdot\omega_{i+1}-\frac{2k-3}{4k-2}\cdot\omega_{i+2},
\end{equation}
for $i = 3,4,\ldots,\frac{n+1}{2}-2$.
We now define $\omega_i = g_{\frac{n+1}{2}-i}\cdot\omega_{\frac{n+1}{2}}$ with
\[
g_i =
\begin{cases}
1 & i = 0, 1, \\
2 \cdot g_{i-1}-\frac{2k-3}{4k-2}\cdot g_{i-2} & i = 2, 3, \ldots, \frac{n+1}{2}-3.
\end{cases}
\]
Thus, we can express $\omega_3,\ldots,\omega_{\frac{n+1}{2}-1}$ in terms of $\omega_{\frac{n+1}{2}}$. However, to proceed we need to express the coefficients $g_i$ in terms of $k$ and $n$. Define $G(z) = \sum_{g\geq0}{g_n z^n}$. We do not have to worry about the upper limit, since as far as we are concerned the recursion could go on towards infinity, without affecting the values we are interested in. After multiplying with $z^n$ and summing up from $n \geq 2$ we get
\[
g_2z^2+g_3z^3+\cdots = 2\{g_1z^2+g_2z^3+\cdots\}-\frac{2k-3}{4k-2}\{g_0z^2+g_1z^3+\cdots\},
\]
which we identify as
\[
G(z)-z-1=2z(G(z)-1)-\frac{2k-3}{4k-2}z^2G(z).
\]
Solving for $G(z)$ gives
\[
G(z) = \frac{1-z}{\frac{2k-3}{4k-2}z^2-2z+1}.
\]
The denominator has two distinct roots whose reciprocals are:
\[
\alpha_1=1+\sqrt{1-\frac{2k-3}{4k-2}} \qquad \text{and} \qquad \alpha_2=1-\sqrt{1-\frac{2k-3}{4k-2}}.
\]
Hence, we can express the $n$th coefficient of $G(z)$ as
\[
z[n]G(z)=\frac{(1-\frac{1}{\alpha_1})\cdot \alpha_1^{n+1}}{2-\frac{2k-3}{4k-2}\cdot\frac{2}{\alpha_1}}+\frac{(1-\frac{1}{\alpha_2})\cdot \alpha_2^{i+1}}{2-\frac{2k-3}{4k-2}\cdot\frac{2}{\alpha_2}}.
\]
We can now write down the smaller $4 \times 4$ system of equations. Let $|V| = |A_1| = |A_2| = \cdots = |A_{\frac{n+1}{2}}|$. From the equations of the signatures $h_1$ (from Lemma~\ref{lem:solalpha}), $h_2$ (from Lemma~\ref{lem:splitend}), and $h_3$ ($f_2$ from Lemma~\ref{lem:splitmiddle}), we get
\[
\begin{array}{ll}
(|V|-2) \cdot \omega_1 + |V| \cdot \omega_2 + |V| \cdot \displaystyle \sum_{i=0}^{\frac{n+1}{2}-3}g_i\cdot\omega_{\frac{n+1}{2}} & = s \\
|V| \cdot \omega_1 + (|V|-(4k-2)) \cdot \omega_2 + \displaystyle \sum_{i=0}^{\frac{n+1}{2}-3}(2k+1)\frac{n-1+2i}{2}\cdot g_i\cdot\omega_{\frac{n+1}{2}} & = s \\
|V| \cdot \omega_1 + (|V|-(8k-4)) \cdot (\omega_2 + \left(\displaystyle \sum_{i=0}^{\frac{n+1}{2}-3}g_i - (4k-2)\right) \cdot \omega_{\frac{n+1}{2}}) & = s \\
|V|\cdot(\omega_1+\omega_2 + \displaystyle \sum_{i=0}^{\frac{n+1}{2}-3}g_i\cdot\omega_{\frac{n+1}{2}}) & = 1.
\end{array}
\]
Solving this gives
\[
s=\frac{(2kn+n-4)(\xi_n(4k-1)+(2k-1))}{(2kn+n-4)(\xi_n(4k-1)+(2k-1))+(4k-2)(1-\xi_n)},
\]
where
\[
\xi_n =
\left(\alpha_1^{(n-1)/2} + \alpha_2^{(n-1)/2}\right)/4.
\]
\end{proof}
\section{Proofs of Results from Section~\ref{sec:apply}}
\subsection*{Lemma~\ref{lem:circlesandwich}}
\begin{proof}
Since $\chi_c(G) \leq r$ means there exist one $r' \leq r$ such that $G \rightarrow K_{r'}$, and since $K_r' \rightarrow K_r$ we have $G \rightarrow K_r$ and $K_2$ has a homomorphism to every graph that contains at least one edge so $K_2 \rightarrow G \rightarrow K_r$ and we can apply Lemma~\ref{lem:sandwich}. We also have that $C_{2k+1}$ has a homomorphism to each graph which contains an odd cycle with length at most $2k+1$. It is obvious that $C_{2k+1}$ has an homomorphism into a graph containing a cycle of length exactly $2k+1$. But we also know that $C_{2k+1} \rightarrow C_{2m+1}$ if $m \leq k$ so if $G$ contains an odd cycle of length at most $2k+1$ then we have $C_{2k+1} \rightarrow G \rightarrow K_r$.
\end{proof}
\subsection*{Proposition~\ref{thmI}}
\begin{proof}
By Pan and Zhu we know the following for graphs $G$ that are
$K_4$-minor-free and integers $k\geq 1$:
\begin{itemize}
\item If G has odd girth at least $6k-1$ then $\chi_c(G)\leq 8k/(4k-1)$;
\item If G has odd girth at least $6k+1$ then $\chi_c(G)\leq (4k+1)/2k$;
\item If G has odd girth at least $6k+3$ then $\chi_c(G)\leq
(4k+3)/(2k+1)$.
\end{itemize}
The above, combined with Proposition~\ref{prop:4k+4}, can be used to
specify values on $s(K_2,G)$. We get that when the odd girth is at least
$6k-1$ then $s(K_2,G) \geq \frac{4k}{4k+1}$ and when the odd girth is at
least $6k+3$ then $s(K_2,G) \geq \frac{4k+2}{4k+3}$. For graphs with odd
girth $6k+1$ the result of Pan and Zhu give no other guarantee than that
a homomorphism exists to the cycle $C_{4k+1}$, which gives us no better
bound than for graphs with girth $6k-1$.
\end{proof}
\end{document} |
\begin{document}
\title{Test of the quantumness of atom-atom correlations in a bosonic gas}
\author{D.~Ivanov}
\affiliation{Laser Research Institute, St.~Petersburg State University,
5 Uliyanovskaya, Petrodvoretz, St.~Petersburg, Russia}
\author{S.~Wallentowitz}
\affiliation{Facultad de F{\'i}sica, Pontificia Universidad Cat{\'o}lica de
Chile, Casilla 306, Santiago 22, Chile}
\begin{abstract}
It is shown how the quantumness of atom-atom correlations in a trapped
bosonic gas can be made observable. Application of
continuous feedback control of the center of mass of the atomic cloud is
shown to generate oscillations of the spatial extension of the cloud, whose
amplitude can be directly used as a characterization of atom-atom
correlations. Feedback parameters can be chosen such that the violation of a
Schwarz inequality for atom-atom correlations can be tested at noise
levels much higher than the standard quantum limit.
\end{abstract}
\pacs{05.40.-a, 05.30.-d, 05.30.Jp}
\date{30 September 2005}
\maketitle
In optics correlations between photons in a light field have been demonstrated
using a Hanbury Brown Twiss setup~\cite{hanbury-brown-twiss}. There the
incoming light beam is split by a semitransparent mirror and the two outputs are
measured with photodetectors. By displacing detectors normal to the detected
beams and by introducing a variable time delay, from the coincidences of
detections the intensity-intensity correlation of the quantized light field,
\begin{equation}
\label{eq:c-intro}
C(1,2) = \big\langle \cici \, \hat{I}(1) \, \hat{I}(2) \, \cici
\big\rangle .
\end{equation}
is obtained, where $1$ and $2$ represent the space-time coordinates $(x_1,t_1)$
and $(x_2,t_2)$, respectively. Here $\hat{I} \!=\! \hat{E}^{(-)}
\hat{E}^{(+)}$ with $\hat{E}^{(\pm)}$ being positive- and negative-frequency
parts of the (here scalar) electric field of the light beam and $\cici \; \cici$
denotes time and normal-ordering.
Interpreting the expectation value in Eq.~(\ref{eq:c-intro}) as being based on a
proper probability density, application of the Cauchy--Schwarz inequality leads
to
\begin{equation}
\label{eq:cs-intro}
C(1,2) \leq \sqrt{ C(1,1) \, C(2,2)}.
\end{equation}
This inequality is based on the assumption of a classical random process
with correlations describing either complete randomness or bunching of
photons~\cite{mandel-book}. However, quantum states of light exist that cannot
be described in this way and violate the inequality~(\ref{eq:cs-intro}).
This has been used as a criterion for true quantumness --- or non-classicality
--- of the incoming light beam, and has been experimentally observed
as antibunched
light~\cite{antibunching1,antibunching2,antibunching3,antibunching4}.
These concepts can also be applied to ultracold bosonic gases,
where the electric field is replaced by the matter-field
$\hat{\phi}(x,t)$. However, different from optical correlations measured by
absorbing photodetectors, atom correlations will not occur in normal operator
ordering. Nevertheless, an analogy between photonic and atomic correlations can
be drawn. In fact, atom-atom correlations have been recently measured in a way
analogous to the Hanbury Brown--Twiss optical setup~\cite{yasuda,bloch,aspect}.
Furthermore, there have been also approaches where the measurement of losses due
to three-body recombinations has been used to infer on those
correlations~\cite{kogan,holland,phillips}.
In these techniques either the gas has to be released from the trap or atomic
losses play a major role. One might, however, manipulate or drive the system in
such a way as to map the internal atom-atom correlations into easily accessible
mesoscopic observables, that may be observed {\it in situ} without loosing atoms
from the trap. This is the approach taken in this Letter: By applying a
continuous feedback control of the atomic cloud's center of mass and by
observing the cloud size, it is shown that atom-atom correlations can be
detected and used to test for the quantumness of atom-atom correlations, in
close analogy to antibunching in the case of photons.
Consider an ideal bosonic gas with $N$ atoms of mass $m$, that is kept in a
harmonic trap of frequency $\omega$. A feedback loop is continuously applied to
compensate for the motion of the center of mass of the gas $\hat{X}$, i.e. to
damp the collective motion of atoms. It consists of the continuously repeated
application of a measurement of $\hat{X}$ with measurement outcome $X_{\rm m}$
and resolution $\sigma_0$, and a corresponding shift by $-\zeta_0 X_{\rm m}$.
Both processes shall be much faster than the free oscillation of the system with
trap frequency $\omega$, so that an instantaneous action can be assumed.
Experimental implementations of such a feedback loop are based on the collective
interaction of the atoms with far off-resonant optical probe fields.
Two-photon transitions, whose strength depend on the positions of atoms in
the probe field, then lead to a redistribution of intensities in optical-field
modes. The latter may be detected and used as an input signal for a subsequent
control action using optical phase shifts of the same probe field.
Such a scheme has been realized with atoms in optical lattices~\cite{raithel} or
may be realized as an extension of the single-atom experiment by Fischer {\it
et~al.}~\cite{rempe}. Possibly it may also be integrated into the
magneto-optical or optical trap configuration, by detecting and modulating the
trapping laser fields.
Given the continuous application of feedback at rate $\gamma$, two
parameters determine the feedback dynamics: the rms time-integrated measurement
resolution $\sigma \!=\! \sigma_0 / \sqrt{\gamma}$ and the feedback shift rate
$\zeta \!=\! \zeta_0 \gamma$. The time evolution of the $N$-atom density
operator of the system, $\hat{\varrho}_N$, is then described by the master
equation of quantum Brownian motion~\cite{master1,master2,master3},
\begin{equation}
\label{eq:N-master}
\partial_t \, \hat{\varrho}_N
= - \frac{i}{\hbar} [ \hat{H}, \hat{\varrho}_N
] + i \frac{\zeta}{2\hbar} [ \hat{P}, \{ \hat{X}, \hat{\varrho}_N
\} ]
- \frac{1}{8\sigma^2} [ \hat{X}, [\hat{X}, \hat{\varrho}_N]]
- \frac{\zeta^2\sigma^2}{2\hbar^2} [ \hat{P}, [ \hat{P},
\hat{\varrho}_N]] .
\end{equation}
Here $\hat{H}$ is the Hamiltonian of the non-interacting atomic gas in the trap
potential and the center-of-mass (cm) operator and its canonically conjugate
total momentum read
\begin{equation}
\label{eq:XP-def}
\hat{X} = \frac{1}{N} \int \! dx \, \hat{\phi}^\dagger(x) \, x \,
\hat{\phi}(x) , \quad
\hat{P} = -i\hbar \int \! dx \, \hat{\phi}^\dagger(x)
\, \partial_x \, \hat{\phi}(x) ,
\end{equation}
with the atomic field $\hat{\phi}(x)$ obeying the bosonic commutator
relation $[\hat{\phi}(x), \hat{\phi}^\dagger(x')] \!=\! \delta(x \!-\! x')$.
The stationary behavior of the cm, obtained from
Eq.~(\ref{eq:N-master}), is an exponential damping at rate $\zeta/2$ of the
coherent oscillation: $\lim_{t\to\infty} \langle \hat{X}(t) \rangle \!=\! 0$.
Moreover, its rms spread converges exponentially at the same
rate to the non-zero stationary value
\begin{equation}
\label{eq:rms-cm-limit}
\lim_{t\to\infty} \sqrt{\langle [\Delta \hat{X}(t)]^2 \rangle} =
\Delta X_{\rm s} ,
\end{equation}
where $\Delta \hat{X} \!=\! \hat{X} \!-\! \langle \hat{X} \rangle$.
It represents the noise left in the cm after a time of the order of $\zeta^{-1}$
needed for damping the coherent cm oscillation. This noise is determined solely
by the parameters of the feedback and trap:
\begin{equation}
\label{eq:DX}
\Delta X_{\rm s} = \delta X_0 \, \sqrt{(\eta \!+\!
\eta^{-1}) / 2 } ,
\end{equation}
with the rms spread of the cm in the ground state of the trapping potential
being
\begin{equation}
\label{eq:dX0}
\delta X_0 = \sqrt{\hbar/(2 N m \omega)} ,
\end{equation}
which serves as the standard quantum limit (SQL) for the cm
coordinate. The parameter
\begin{equation}
\label{eq:eta}
\eta = {\delta X_0}^2 / (\zeta\sigma^2)
\end{equation}
specifies the ratio of spatial localization due to the potential over that due
to the feedback. Note, that Eq.~(\ref{eq:DX}) attains the SQL as a minimum
value for $\eta \!=\! 1$ but is otherwise much larger.
Our goal is to describe atomic correlation effects in the dynamics of the
atomic density. For that purpose we need the single-atom density matrix
\begin{equation}
\label{eq:density}
\rho(x,x',t) = \langle \hat{\phi}^\dagger(x') \, \hat{\phi}(x) \rangle_t ,
\end{equation}
with $\langle \ldots \rangle_t \!=\! {\rm Tr}[ \ldots \hat{\varrho}_N(t)]$.
To obtain the dynamical evolution of this density matrix from
Eq.~(\ref{eq:N-master}) of course is prevented by the correlations in
the many-atom systems. That correlations play a role in
Eq.~(\ref{eq:N-master}) can be seen from the occurrence of products of
operators $\hat{X}$ and $\hat{P}$, that contain products of four field
operators [cf.~Eq.~(\ref{eq:XP-def})], similar to atom-atom interactions.
However, recently it has been shown~\cite{ivanov-wal}, that despite of these
problems, the single-atom density matrix can be obtained via a procedure that we
may briefly outline here:
Instead of the single-atom density matrix, the joint Wigner function of single
atom (variables $x$, $p$) and cm of the other $N \!-\! 1$ atoms
(variables $X$, $P$) is considered:
\begin{eqnarray}
\label{eq:wigner}
W(x,p; X,P,t) & = & (2\pi\hbar)^{-3}
\! \int \! dx' \! e^{-i x'p / \hbar}
\int \! dX' \! \int \! dP' \nonumber \\
& & \times \left\langle \hat{\phi}^\dagger \!\Big( x \!-\!
\frac{x'}{2} \Big) \,
e^{ i [ (\hat{P} - P) X' + (\hat{X} - X) P'] / \hbar }
\hat{\phi} \!\Big( x \!+\! \frac{x'}{2} \Big) \right\rangle_t . \qquad
\end{eqnarray}
From Eq.~(\ref{eq:N-master}) a closed Fokker-Planck equation follows for this
distribution, which is of linear type with positive semi-definite diffusion
matrix, leading thus to a bound analytic Green function of Gaussian
type~\cite{risken}. In consequence, given the initial conditions, analytic
solutions for this Wigner function can be obtained, from which the solution
for the single-atom density matrix are derived by integration over the
auxiliary phase-space variables:
\begin{equation}
\label{eq:rho-W}
\rho(x \!+\! x', x \!-\! x',t) = \int \! dX \! \int \! dP \!
\int \! dp \, W(x,p;X,P,t) \, e^{2ip x'} .
\end{equation}
Thus in principle the complete atomic density profile could be obtained. Here
we focus on the rms spread of the corresponding atomic density,
\begin{equation}
\Delta x(t) = \left\{ \int \! \frac{dx}{N} \, x^2 \, \rho(x,x,t) -
\left[ \int \! \frac{dx}{N} \, x \, \rho(x,x,t) \right]^2
\right\}^{\frac{1}{2}} ,
\end{equation}
giving us information on the quantum-statistically averaged temporal evolution
of the extension of the atomic cloud. From a complete solution of the
Fokker--Planck equation for (\ref{eq:wigner}) this variance can be shown to
exponentially converge at rate $\zeta/2$ to the asymptotic behavior
$\lim_{t\to\infty} \Delta x(t) \!\sim\! \Delta
x_{\rm a}(t)$, defined by
\begin{equation}
\label{eq:dx}
\Delta x_{\rm a}(t) = \sqrt{ {\Delta X_{\rm s}}^2 +
{\sigma_q}^2(t) } .
\end{equation}
In this equation the first term in the square root is
given by the constant stationary rms cm spread [cf.~Eq.~(\ref{eq:DX})],
whereas the second term is explicitely time dependent and reads
\begin{equation}
\label{eq:C-def}
{\sigma_q}^2(t)
= \int \! \frac{dx}{N} \left\langle \hat{\phi}^\dagger(x)
\left[ q(x, t) \right]^2 \hat{\phi}(x) \right\rangle_0
- \int \! \frac{dx}{N} \int \! \frac{dx'}{N} \left\langle
\hat{\phi}^\dagger(x)
\, q(x, t) \, \hat{\phi}(x) \, \hat{\phi}^\dagger(x') \,
q(x',t) \, \hat{\phi}(x') \right\rangle_0 ,
\end{equation}
with the expectation value being taken with respect to the initial $N$-atom
density operator $\hat{\varrho}_N(0)$. The explicit time dependence of
Eq.~(\ref{eq:C-def}) is given by the single-atom quadrature, defined as
\begin{equation}
\label{eq:q}
q(x,t) = x \cos(\omega t) - \frac{i\hbar
\partial_x}{m\omega} \sin(\omega t) .
\end{equation}
Equation~(\ref{eq:C-def}) represents the central result of our Letter. In its
second part it contains an atom-atom correlation function with four
matter-field operators, showing that due to the feedback these correlations
show up in the observable mesoscopic size of the cloud, cf.~Eq.~(\ref{eq:dx}).
Thus the atom-atom correlations will become visible in a purely single-atom
property.
Due to its explicit time dependence, in general there will be no
stationary size of the atomic cloud, but instead the cloud will
periodically breath. It should be emphasized, that this breathing has nothing in
common with the well-known collective oscillations of a Bose gas or condensate.
The latter rely on the presence of atomic collisions, whereas the
breathing discussed here is an effect solely produced by the feedback. Feedback
of course effectively mediates interactions between atoms, so that a certain
analogy to true atom-atom interactions can be drawn. However, whereas collisions
of ultracold atoms lead to Hamiltonian terms, the feedback in addition provides
non-unitary parts of the time evolution [cf.~Eq.~(\ref{eq:N-master})].
Thus, after a transient behavior during a time of the order of $1/\zeta$, the
cloud size will oscillate at twice the trap frequency, which resembles
single-atom quadrature squeezing~\cite{squeezing}. The amplitude of this
oscillation will of course depend on the initial quantum state and its
correlations at time $t\!=\!0$, before the feedback has been turned on.
Since the SQL for a single atom in the trap reads $\delta x_0 \!=\! \delta X_0
\sqrt{N}$, it follows that the atomic cloud size~(\ref{eq:dx}) can become
smaller than that value. The condition for quadrature squeezing (QS) on the
single-atom level would then be that at some time during the half period
$\pi/\omega$ the following inequality is fulfilled:
\begin{equation}
\label{eq:squeeze-cond}
\Delta x_{\rm a}(t) < \delta x_0 \qquad \mbox{(single-atom QS)}.
\end{equation}
However, there is more to Eq.~(\ref{eq:dx}) than single-atom QS.
It can be revealed by taking a closer look at the structure of
Eq.~(\ref{eq:C-def}) and applying the ideas developed in the context of photon
antibunching. The probability density for finding an atom at position $x$ is
undoubtedly defined as
\begin{equation}
\label{eq:P(x)}
P(x) = \langle \hat{\phi}^\dagger(x) \hat{\phi}(x) \rangle / N .
\end{equation}
A quasi joint probability density for two atoms being at positions $x$ and $x'$,
that is consistent with the definition~(\ref{eq:P(x)}), reads
\begin{equation}
\label{eq:P(x,x')}
P(x,x') = \langle \hat{\phi}^\dagger(x) \hat{\phi}(x)
\hat{\phi}^\dagger(x') \hat{\phi}(x') \rangle / N^2 .
\end{equation}
Note that different from the optical case this correlation is not normally
ordered. Consistency means here that for $N$ atoms the marginals of $P(x,x')$
reproduce the correct probability density:
\begin{equation}
\int \! dx' \, P(x,x') = P(x) , \qquad \int \! dx \, P(x,x') = P(x') .
\end{equation}
Clearly $P(x,x')$ is not a proper probability density in general. However, when
interpreting the atomic fields as classical ones: $\hat{\phi}(x) \!\to\!
\phi(x)$, it becomes a proper classical joint probability density. In this
classical interpretation, we may now apply the Schwarz inequality that states
\begin{equation}
\int \! dx \! \int \! dx' \, q(x) q(x') P(x,x') \leq \int \! dx \, q^2(x)
P(x) \qquad \mbox{(classically)} .
\end{equation}
Applying this result to Eq.~(\ref{eq:C-def}) one obtains a classical inequality
for the contribution to the size of the atomic cloud:
$\sigma_q^2(t) \!\geq\! 0$. In consequence, via Eq.~(\ref{eq:dx}) one arrives
at the classical inequality for the size of the atomic cloud: $\Delta x_{\rm
a}(t) \!\geq\! \Delta X_{\rm s}$.
A violation of the Schwarz inequality would indicate true quantum
correlations between atoms as opposed to classical ones, since
then the second expectation value in Eq.~(\ref{eq:C-def}) cannot be
described by integration over a proper probability density. The
condition for this case is then
\begin{equation}
\label{eq:cs-cond}
\Delta x_{\rm a}(t) < \Delta X_{\rm s} \qquad \mbox{(Schwarz violation)} .
\end{equation}
Indeed, since Eq.~(\ref{eq:C-def}) contains atom-atom correlations in the
second term on the rhs, one may interpret a violation of the Cauchy--Schwarz
inequality, as formulated in Eq.~(\ref{eq:cs-cond}), as a test for true
quantum correlations between atomic pairs. Note, that the concept of defining
quantumness here is the same as in the case of photon antibunching in the
context of the optical Hanbury Brown--Twiss experiment.
A Schwarz violation at time $t$ can be easily shown to be equivalent to a
violation of the (classical) inequality
\begin{equation}
\label{eq:example}
\Delta q(t) \geq \sqrt{\langle [\Delta \hat{Q}(t)]^2 \rangle} ,
\end{equation}
where $\Delta q$ and $\Delta Q$ are the rms spread of the "single-atom"
quadrature, corresponding to the atomic density, and the rms spread of the cm
quadrature, respectively. The nature of such a violation can be
understood by considering the special case where the Schwarz violation occurs at
a time where the quadrature~(\ref{eq:q}) reduces to the atomic position, i.e.
$q(t) \!\to\! x$. The relation~(\ref{eq:example}) then states, that classically
the size of the atomic cloud $\Delta x$ is always equal or larger than the rms
spread of the cm of the cloud. In other words, the cm of the object is well
localized within the spatial extension of the cloud. A violation of this
classical inequality would then correspond to cases where the cm coordinate
reveals a rms spread larger than the cloud's size. In the extreme case, the
atomic cloud can then be seen as an almost pointlike object, its internal
distribution not being resolved, whose (cm) coordinate fluctuates.
It is thus the transition from an atomic cloud with well localized cm to a
quasi pointlike object with large fluctuation of its coordinate, that
corresponds to a transition of classical to quantum atom-atom correlations in
the cloud. Clearly for the general case, i.e. a Schwarz violation at an arbitary
time withing the half period $\pi/\omega$, the above interpretation
correspondingly holds for a specific quadrature $q(t)$ instead of position.
In overall thus two levels of quantum or -- if one wishes to use this term --
non-classical behavior can be distinguished by the amplitude of the
oscillation of the atomic-cloud size. These two however, single-atom QS and
Schwarz violation, do not form a unique hierarchy: For different parameters
$\eta$ of the feedback mechanism the two
boundaries [cf.~Eqs~(\ref{eq:squeeze-cond}) and (\ref{eq:cs-cond})], appear in
different orders. The order depends on whether $\Delta X_{\rm s} \!\leq\!
\delta x_0$ or not. The parameter range for this condition is obtained as
\begin{equation}
\label{eq:ranges}
\Delta X_{\rm s} \leq \delta x_0 \quad \mbox{for} \quad N \!-\! \sqrt{N^2
\!-\! 1} \leq \eta \leq N \!+\! \sqrt{N^2 \!-\! 1} .
\end{equation}
In the case $N\!\to\! \infty$ this range includes all possible values of $\eta$
and thus for a truly macroscopic system, before a Schwarz violation
can be observed always first single-atom QS appears. This case is depicted
in the left part of Fig.~\ref{fig:sq-cs}.
\begin{figure}
\caption{Observable size of the cloud versus time: For the parameter range
given by Eq.~(\ref{eq:ranges}
\label{fig:sq-cs}
\end{figure}
However, for a finite system values for $\eta$ can be found outside the
range~(\ref{eq:ranges}) which then reveals an exchange of the order of
single-atom QS and Schwarz violation. In this case quantum correlations
between atoms
can be observed without the presence of single-atom QS, cf. right part of
Fig.~\ref{fig:sq-cs}. The corresponding parameter range is given by
\begin{equation}
\label{eq:ranges2}
\eta < N \!-\! \sqrt{N^2 \!-\! 1} \quad \mbox{or} \quad \eta > N \!+\!
\sqrt{N^2 \!-\! 1} .
\end{equation}
In this range of $\eta$ apparently the quantumness of correlations can be
detected at much higher noise levels than the single-atom SQL
$\delta x_0$, which may render its experimental observation substantially
more feasible. Moreover, the two ranges in Eq.~(\ref{eq:ranges2}) correspond to
weak and strong feedback localization, respectively, and thus may allow
for a suitable combination of values for the shift rate $\zeta$
and the rms time-integrated measurement resolution $\sigma$ in an experiment.
Last but not least this scheme may be applied even for the extreme case of only
two indistinguished bosonic atoms, for which the range of possible values of
$\eta$, according to the case~(\ref{eq:ranges2}), becomes even more broad.
The criterium for quantumness of correlations can be tested directly from
measurements of the size of the atomic cloud $\Delta x(t)$ over half a period of
the trap oscillation, after the system has reached its asymptotic behavior
within a delay time of the order of $1/\zeta$. Experimentally a sufficiently
large number of sequences of feedback evolutions of varying time duration and
final cloud-size measurements have to be performed. Each sequence starts with
the identically prepared initial quantum state, whose correlations are to be
detected. Thus the final cloud-size measurements can be arbitrarily destructive
and can be performed for example by density-profile measurements.
These may be implemented by absorption imaging~\cite{absorb}, dispersive light
scattering~\cite{dispersive-scattering}, or possibly phase-modulation
spectroscopy~\cite{phase-spectroscopy}. Thus together with possible experimental
techniques to generate the required feedback loop~\cite{raithel,rempe}, an
experimental implementation of the presented scheme and a test for quantum
atom-atom correlations in bosonic gases seem to be feasible.
In summary we have shown that the quantumness of atom-atom correlations in a
trapped bosonic gas can be made observable as size oscillations of the atomic
cloud via feedback. For weak and strong feedback localization a Schwarz
violation for atom-atom correlations of a gas with finite atom number can be
observed in the absence of single-atom QS at correspondingly higher noise levels
than the SQL. Together with the feasibility of implementing this scheme with
present experimental techniques, this may allow for detecting the quantumness of
atom-atom correlations in a bosonic gas.
\acknowledgments
S.W. acknowledges support from the FONDECYT projects no. 1051072 and no.
7050184.
\end{document} |
\begin{document}
\title{Supervised functional classification: A theoretical remark and some
comparisons}
\author{ Amparo Ba\'{\i}llo\footnote{Corresponding author. Phone: +34 914978640,
e-mail: amparo.baillo@uam.es}
\quad and \quad Antonio Cuevas\thanks{The research of both authors was partially supported
by Spanish grant MTM2007-66632 and the IV PRICIT program titled {\em Modelizaci\'on Matem\'atica y Simulaci\'on Num\'erica en Ciencia y
Tecnolog\'{\i}a} (SIMUMAT).}
\\
\footnotesize Departamento de An\'alisis Econ\'omico: Econom\'{\i}a
Cuantitativa, Univ. Aut\'onoma de Madrid, Spain\\
\footnotesize Departamento de Matem\'aticas, Univ. Aut\'onoma de Madrid, Spain}
\date{}
\maketitle
\begin{abstract}
The problem of supervised classification (or discrimination) with functional data is
considered, with a special interest on the popular $k$-nearest neighbors ($k$-NN) classifier.
First, relying on a recent result by C\'erou and Guyader (2006), we prove the
consistency of the $k$-NN classifier for functional data whose distribution
belongs to a broad family of Gaussian processes with triangular covariance functions.
Second, on a more practical side, we check the behavior of the $k$-NN method when
compared with a few other functional classifiers. This is carried out through
a small simulation study and the analysis of several real functional data sets.
While no global ``uniform'' winner emerges from such comparisons, the overall performance of
the $k$-NN method, together with its sound intuitive motivation and relative simplicity,
suggests that it could represent a reasonable benchmark for the classification
problem with functional data.
\
\noindent
\it Key words and phrases\rm. Supervised classification, functional data, projections method,
nearest neighbors, discriminant analysis.
\noindent
\it AMS 2000 subject classification\rm. Primary 62G07; secondary 62G20.
\end{abstract}
\noindent
\bf 1. Introduction\rm
\
\noindent
\it 1.1 Some background on supervised classification\rm
\
Supervised classification is the modern name for one of the oldest statistical problems in
experimental science: to decide whether an individual, from which just a random
measurement $X$ (with values in a ``feature space'' ${\cal F}$ endowed with a metric $D$)
is known, either belongs to the population $P_0$ or to $P_1$.
For example, in a medical problem $P_0$ and $P_1$ could correspond to the group of ``healthy''
and ``ill'' individuals, respectively. The decision must be taken from the
information provided by a ``training sample'' $\mathcal X_n = \{ (X_i,Y_i), 1\leq i\leq n \}$,
where $X_i$, $i=1,\ldots,n$, are independent replications of $X$, measured on $n$ randomly chosen
individuals, and $Y_i$ are the corresponding values of an
indicator variable which takes values 0 or 1 according to
the membership of the $i$-th individual to $P_0$ or $P_1$.
Thus the mathematical problem is to find a ``classifier''
$g_n(x)=g_n(x;\mathcal X_n)$, with $g_n:{\cal F}\rightarrow
\{0,1\}$, that minimizes the classification error $P\{g_n(X)\neq Y\}$.
The term ``supervised'' refers to the fact that the individuals in the training
sample are supposed to be correctly classified, typically using ``external'' non
statistical procedures, so that they provide a reliable basis for the assignation
of the new observation. This problem, also known as ``statistical
discrimination'' or ``pattern recognition'', is at least 70 years old.
The origin goes back to the classical work by Fisher (1936) where, in the
$d$-variate case ${\cal F}={\mathbb R}^d$, a simple ``linear classifier''
$g_n(x)={\mathbbm 1}_{\{x: w^\prime x+w_0>0\}}$ was introduced (${\mathbbm 1}_A$ stands for
the indicator function of a set $A\subset {\cal F}$).
A deep insightful perspective of the supervised classification problem can be found in
the book of Devroye et al (1996). Other useful textbooks are Hand (1997) and Hastie et al.
(2001). All of them focus on the standard multivariate case ${\cal F}={\mathbb R}^d$.
It is not difficult to prove (e.g., Devroye et al., 1996, p. 11) that the optimal classification rule
(often called ``Bayes rule'') is
\begin{equation} \label{opt}
g^*(x)={\mathbbm 1}_{\{\eta(x)>1/2\}},
\end{equation}
where $\eta(x)=E(Y|X=x)$. Of course, since $\eta$ is unknown the exact
expression of this rule is usually unknown, and thus
different procedures have been proposed in order to
approximate it. In particular, it can be seen that
Fisher's linear rule is optimal provided that the conditional distributions of $X|Y=0$ and
$X|Y=1$ are both normal with identical covariance matrix. While these conditions look quite restrictive,
and it is straightforward
to construct problems where any linear rule has a
poor performance,
Fisher's classifier is still by far the most popular choice among users.
A simple non-parametric alternative is given by the $k$-nearest neighbors ($k$-NN)
method which is obtained by replacing the unknown regression function $\eta(x)$ in
(\ref{opt}) with the regression estimator
\begin{equation} \label{RegEstkNN}
\eta_n(x) = \frac{1}{k} \sum_{i=1}^n {\mathbbm 1}_{\{ X_i\in k(x) \}} Y_i
\end{equation}
where $k=k_n$ is a given (integer) smoothing parameter and ``$X_i\in k(x)$'' means that
$X_i$ is one of the $k$ nearest neighbours of $x$.
More concretely, if the pairs $(X_i,Y_i)_{1\leq i \leq n}$ are re-indexed as
$(X_{(i)},Y_{(i)})_{1\leq i \leq n}$ so that the $X_{(i)}$'s are arranged
in increasing distance from $x$,
$D(x,X_{(1)}) \leq D(x,X_{(2)}) \leq \ldots \leq D(x,X_{(n)})$,
then $k(x) = \{ X_{(i)},1\leq i \leq k \}$.
This leads to the $k$-NN classifier $g_n(x) = {\mathbbm 1}_{ \{ \eta_n(x) > 1/2 \}}$.
It is well-known that, in addition to this simple classifier, several other alternative
methods (kernel classifiers, neural networks, support vector machines,...) have been
developed and extensively analyzed in the latest years.
However, when used in practice with real data sets, the performance of Fisher's rule
is often found to be very close to that of the best one among all
the main alternative procedures.
On these grounds, Hand (2006) has argued in a provocative paper about the ``illusion of
progress'' in supervised classification techniques.
The central idea would be that the study of new classification rules often fails
to take into account the structure of real data sets and it tends to overlook the
fact that, in spite of the its theoretical limitations, Fisher's rule is
quite satisfactory in many practical applications.
This, together with its conceptual simplicity, explains its popularity over the years.
\
\noindent
\it 1.2 The purpose and structure of this paper\rm
\
We are concerned here with the problem of (binary) supervised classification with
functional data. That is, we consider the general framework indicated above but
we will assume throughout that the space $({\cal F},D)$ where the random elements
$X_i$ take values is a separable metric space of functions.
For some theoretical results (Theorem 2) we will impose a more specific assumption
by taking ${\cal F}$ as the space $C[a,b]$ of real continuous functions defined in a closed
finite interval $[a,b]$, with the usual supremum norm $\Vert \; \Vert_\infty$.
The study of discrimination techniques with functional data
is not as developed as the corresponding finite-dimensional theory
but, clearly, is one of the most active research topics in
the booming field of functional data analysis (FDA).
Two well-known books including broad overviews of FDA with interesting examples are Ferraty
and Vieu (2006) and Ramsay and Silverman (2005). Other
recent more specific references will be mentioned below.
There are of course several important differences between
the theory and practice of supervised classification for
functional data and the classical development of this topic
in the finite-dimensional case, where typically the data dimension $d$ is much smaller
than the sample size $n$ (the ``high-dimensional''
case where $d$ is ``large'', and usually $d>n$, requires a
separate treatment). A first important practical
difference is the role of Fisher's linear discriminant
method as a ``default'' choice and a benchmark for
comparisons. As we have mentioned, this holds for the
finite dimensional cases with ``small'' values of $d$ but
it is not longer true if functional (or high-dimensional) data are
involved. To begin with, there is no obvious way to apply
in practice Fisher's idea in the infinite-dimensional case,
as it requires to invert a linear operator which is not in
general a straightforward task in functional spaces; see,
however, James and Hastie (2001) for an interesting
adaptation of linear discrimination ideas to a functional
setting. Then, the question is whether there exists any functional discriminant method, based on
simple ideas, which could play a reference role similar to that of Fisher's method in the
finite dimensional case. The results in this paper suggest (as a partial, not definitive, answer)
that the $k$-NN method could represent a ``default standard'' in functional settings.
Another difference, particularly important from
the theoretical point of view, concerns the universal
consistency of the $k$-NN classifier. A classical result by
Stone (1977) establishes that in the finite-dimensional
case (with $X_i\in{\mathbb R}^d$) the conditional error of
the $k$-NN classifier
\begin{equation} \label{CondProbErr}
L_n=P \{ g_n(X)\neq Y |\mathcal X_n\},
\end{equation}
converges in probability (and also in mean) to that of the Bayes (optimal) rule $g^*$,
that is, $E(L_n)\rightarrow L^*=P \{ g^*(X)\neq Y \}$,
provided that $k_n\to\infty$ and $k_n/n\to 0$ as
$n\to\infty$. This result
holds universally, that is, irrespective of the
distribution of the variable $(X,Y)$.
The interesting point here is that this universal
consistency result is no longer valid in the
infinite-dimensional setting. As recently proved by C\'erou
and Guyader (2006), if the space ${\cal F}$ where $X$ takes values is a
general separable metric space, a non-trivial condition must be imposed
on the distribution of $(X,Y)$ in order to ensure the
consistency of the $k$-NN classifier.
The aim of this paper is twofold, with a common focus on the $k$-NN classifier and
in close relation with the above mentioned two differences
between the classification problem in finite and infinite settings. First, on the theoretical
side, we have a further look at the consistency theorem in C\'erou and Guyader
(2006) by giving concrete non-trivial examples where their consistency condition is
fulfilled. Second, from a more practical viewpoint, we will carry out numerical
comparisons (based both on Monte Carlo studies and real data examples)
to assess the performance of different functional classifiers, including $k$-NN.
This paper is organized as follows. In Section 2 the consistency of the functional $k$-NN
classifier is established, as a consequence of Theorem 2 in
C\'erou and Guyader (2006), for a broad class of Gaussian processes.
In Section 3 other functional classifiers recently considered in the literature
are introduced and briefly commented. They are all compared through a
simulation study (based on two different models) as well as six real data examples,
very much in the spirit of Hand's (2006) paper, where the performance of the
classical Fisher's rule was assessed in terms of its discrimination capacity in
several randomly chosen data sets.
\
\noindent
\bf 2. On the consistency of the functional $k$-NN classifier\rm
\
In the functional classification problem several auxiliary devices have been used to
overcome the extra difficulty posed by the infinite dimensional nature of the feature space.
They include dimension reduction techniques (e.g., James and Hastie 2001,
Preda {\em et al.} 2007), random projections combined with data-depth measures
projections use of data-depth measures (Cuevas {\em et
al.} 2007) and different adaptations to the functional framework of several
non-parametric and regression-based methods, including kernel classifiers (Abraham et al. 2006, Biau et al. 2005, Ferraty and Vieu
2003), reproducing kernel procedures (Preda 2007), logistic regression (M\"uller and Stadtm\"uller 2005)
and multilayer perceptron techniques with
functional inputs (Ferr\'e and Villa 2006).
\
\noindent
\it 2.1 On the consistency of the functional $k$-NN classifier\rm
\
The functional $k$-NN classifier belongs also to the class of procedures adapted from the usual
non-parametric multivariate setup.
Nevertheless, unlike most of the above mentioned functional methodologies, the $k$-NN procedure works according to exactly the same principles
in the finite and infinite-dimensional cases. It is defined by $g_n(x) = {\mathbbm 1}_{ \{ \eta_n(x) > 1/2 \}}$,
where $\eta_n$ is the $k$-NN regression estimator (\ref{RegEstkNN}), whose definition is formally identical to that of the
finite-dimensional case.
The intuitive interpretation is also the same in both cases.
No previous data manipulation, projection or dimension
reduction technique is required in principle, apart from the
discretization process necessarily involved in the
practical handling of functional data. In the present
section we offer some concrete examples where the $k$-NN
functional classifier is weakly consistent. As we have
mentioned in the previous section, this is a non-trivial
point since the $k$-NN classifier is no longer universally consistent
in the case of infinite-dimensional inputs $X$.
Throughout this section the feature space where the
variable $X$ takes values is a separable metric space $({\cal
F},D)$. We will denote by $P_X$ the distribution of $X$
defined by
$P_X (B) = P \{ X\in B \} \quad \mbox{for } B\in\mathcal B_{\mathcal
F}$, where $\mathcal B_{\mathcal F}$ are the Borel sets of $\mathcal
F$.
Let us now consider
the following regularity assumption on the regression
function $\eta(x)=E(Y|X=x)$
\begin{description}
\item[(BC) Besicovitch condition:]
$$
\lim_{\delta\to 0} \frac{1}{P_X(B_{X,\delta})} \int_{B_{X,\delta}} \eta(z) dP_X(z) = \eta(X)
\quad \mbox{in probability},
$$
where $B_{x,\delta} := \{ z\in \mathcal F: D(x,z)\leq \delta \}$ is the closed ball
with center $x$ and radius $\delta$.
\end{description}
Under \bf (BC)\/ \rm C\'erou and Guyader (2006, Th. 2) get the following consistency
result.
\
\noindent
\em
Denote by $L_n$ and $L^*$, respectively, the conditional error associated with the above defined $k$-NN classifier
and the Bayes (optimal) error for the problem at hand. If $({\cal F},D)$ is separable and condition \bf (BC)\/ \rm \em is
fulfilled then the $k$-NN classifier is weakly consistent,
that is $E(L_n)\rightarrow L^*$, as $n\to\infty$, provided
that $k\to\infty$ and $k/n\to 0$\rm.
\
\noindent
Besicovich condition plays an important role also in the consistency of kernel rules
(see Abraham et al. 2006).
C\'erou and Guyader (2006) have also considered the following more convenient condition (called
$P_X$-continuity) that ensures \bf (BC)\rm:
For every $\epsilon>0$ and for $P_X$-a.e. $x\in \mathcal F$
$$
\lim_{\delta\to 0} P_X \{ z\in \mathcal F: |\eta(z)-\eta(x)|>\epsilon | D(x,z)<\delta \} = 0.
$$
However, for our purposes, it will be sufficient to observe that the continuity
($P_X$-a.e.) of $\eta(x)$ implies also {\bf (BC)}.
We are interested in finding families of distributions of $(X,Y)$ under which the regression function
$\eta(x)$ is continuous ($P_X$-a.e.) and hence \bf (BC)\/ \rm holds.
From now on we will use the following notation. Let $\mu_i$ be the distribution of $X$
conditional on $Y=i$, that is,
$\mu_i(B) = P \{ X\in B|Y=i \}$, for $B\in \mathcal B_{\mathcal F}$ and $i=0,1$.
We denote by $S_i \subset \mathcal F$ the support of $\mu_i$, for $i=0,1$, and $S=S_0\cap S_1$.
The expression $\mu_0 << \mu_1$ will denote that $\mu_0$ is
absolutely continuous with respect to $\mu_1$. Also we will assume that $p=P\{Y=0\}$
fulfills $p\in(0,1)$.
The following theorem shows that the property of continuity (resp. $P_X$-continuity)
of $\eta(x)$, and hence the weak consistency of the $k$-NN classifier, follows
from the continuity (resp $P_X$-continuity) of the Radon-Nikodym derivative of $\mu_0$
with respect to $\mu_1$ provided that it exists.
\
\noindent
{\sc Theorem 1:} {\em
Assume that $P_X(\partial S)=0$ and that $\mu_0 << \mu_1$ and $\mu_1 << \mu_0$ on $S$.
Then the following inequality holds for $P_X$-a.e. $x,z\in{\cal F}$.
\begin{equation*}
|\eta(z)-\eta(x)| \leq \frac{p}{1-p} \left|\frac{d\mu_0}{d\mu_1}(x) -
\frac{d\mu_0}{d\mu_1}(z)\right|,
\end{equation*}
where $d\mu_0/d\mu_1$ denotes the Radon-Nikodym derivative of $\mu_0$ with respect to
$\mu_1$. When $S_0=S_1=S$ the assumption $P_X(\partial S)=0$ may be dropped.
In particular, $\eta$ is continuous $P_X$-a.e. (resp. $P_X$-continuous) whenever $d\mu_0/d\mu_1$
is continuous $P_X$-a.e. (resp. $P_X$-continuous). Of course, a similar result holds by
interchanging the sub-indices 0 and 1 and replacing $p$ by $1-p$.}
\noindent
{\sc Proof:}
Define $\mu=\mu_0+\mu_1$. Then $\mu_i << \mu$,
for $i=0,1$, and we can define the Radon-Nikodym derivatives $f_i = d\mu_i/d\mu$, for $i=0,1$.
From the definition of the conditional expectation we know
that $\eta(x)=E(Y|X=x)=P(Y=1|X=x)$ can be expressed by
\begin{equation} \label{etaBayes}
\eta(x) = \frac{f_1(x)(1-p)}{f_0(x) p + f_1(x)(1-p)}.
\end{equation}
Observe that
$\mu \lvert_{S^c\cap S_i} = \mu_i\lvert_{S^c\cap S_i}$ and thus
$f_i \lvert_{S^c\cap S_i} = \mathbbm{1}_{S^c\cap S_i}$, for $i=0,1$.
Since $\mu_0 << \mu_1$ and $\mu_1 << \mu_0$ on $S$ then, on this set, we can define the
Radon-Nikodym derivatives $d\mu_0/d\mu_1$ and $d\mu_1/d\mu_0$. In this case, it also holds
that $\mu\lvert_S << \mu_i\lvert_S$, for both $i=0,1$ and
$$
\frac{d\mu}{d\mu_i}(x) = 1 + \frac{d\mu_{1-i}}{d\mu_i} (x)
\qquad \mbox{for any } x\in S.
$$
Then (see, e.g., Folland 1999), for $i=0,1$ and for $P_X$-a.e. $x\in S$,
\begin{equation} \label{DRN}
f_i(x) = \frac{d\mu_i}{d\mu}(x) = \left( \frac{d\mu}{d\mu_i}(x) \right)^{-1}
= \frac{1}{1 + \frac{d\mu_{1-i}}{d\mu_i} (x) }
\end{equation}
Substituting (\ref{DRN}) into expression (\ref{etaBayes}) we get
\begin{eqnarray}
\eta(x) & = & \left\{ \begin{array}{l}
0 \quad \mbox{if } x\in S_0\cap S^c \\
1 \quad \mbox{if } x\in S_1\cap S^c \\
\displaystyle \frac{1-p}{p \frac{d\mu_0}{d\mu_1}(x) + 1-p} \quad \mbox{if } x\in S .
\end{array} \right.\label{etax}
\end{eqnarray}
Using this last expression we can see that if $P_X(\partial S)=0$ and if $d\mu_0/d\mu_1$
is continuous $P_X$-a.e. (resp. $P_X$-continuous) on $S$
then $\eta$ is also continuous $P_X$-a.e. (resp. $P_X$-continuous) on $S$.
To see this it suffices to observe that,
for $P_X$-a.e. $x,z\in \mbox{int}(S)$,
\begin{eqnarray*}
|\eta(z)-\eta(x)| & & = \left| \frac{1-p}{p \frac{d\mu_0}{d\mu_1}(z) + 1-p} -
\frac{1-p}{p \frac{d\mu_0}{d\mu_1}(x) + 1-p} \right| \\
& & \leq \frac{p}{1-p} \left|\frac{d\mu_0}{d\mu_1}(x) - \frac{d\mu_0}{d\mu_1}(z)\right| .
\end{eqnarray*}
To derive the last inequality we have used that, as $\mu_i$, $i=0,1$, are positive
measures, the Radon-Nikodym derivative $d\mu_0/d\mu_1$ is also
non-negative.
{$\Box$}
\
In order to be able to combine Theorem 1 and the consistency result in C\'erou and Guyader (2006, Th. 2),
we are interested in finding distributions $\mu_0,\mu_1$ of an infinite-dimensional random element $X$
such that $\mu_0 << \mu_1$ and $\mu_1 << \mu_0$ with continuous Radon-Nikodym derivatives.
Measures $\mu_0$ and $\mu_1$ satisfying that $\mu_0 << \mu_1$ and $\mu_1 << \mu_0$
on $S$ are said to be {\em equivalent} on $S$.
Let us denote by $(C[a,b],\|\;\|_\infty)$ the metric space of continuous real-valued functions $x$
defined on the interval $[a,b]$, endowed with the supremum norm,
$\| x\|_\infty=\sup\{|x(t)|:t\in [a,b]\}$. Also let $C^{2}[a,b]$ be the space of twice
continuously differentiable functions defined on $[a,b]$.
In the next theorem we show a broad class of Gaussian processes
fulfilling the conditions of Theorem 2 in C\'erou and Guyader (2006).
Thus the consistency of the $k$-NN classifier is
guaranteed for them. A key element in the proof are the
results by Varberg (1961) and J\o rsboe (1968) providing explicit expressions for
the Radon-Nikodym derivative of a Gaussian measure with
respect to another one. From the gaussianity assumption, the model is completely
determined by giving the mean and covariance functions.
For the sake of a more clear and systematic
presentation the statement is divided into three parts: The
first one applies to the case where the mean function in
both functional populations, with distributions
$\mu_0$ and $\mu_1$ (corresponding to $X|Y=0$ and $X|Y=1$), is common and the difference between both processes lies in
the covariance functions (which however keep a common
structure). The second part considers the dual case where
the difference lies in the mean functions and the covariance structure is common. Finally, the
third part of the theorem generalizes the previous two
statements by including the case of different mean
and covariance functions.
\
\noindent
{\sc Theorem 2:} {\em
Let $(\mathcal F,D) = (C[a,b],\| \; \|_\infty)$ with $0\leq a<b<\infty$.
\begin{enumerate}[{\bf a)}]
\item Assume that $X|Y=i$, for $i=0,1$,
are Gaussian processes on $[a,b]$, whose mean function is
zero
and with covariance functions $\Gamma_i(s,t) = u_i(\min(s,t)) \, v_i(\max(s,t))$,
for $s,t\in[a,b]$, where $u_i,v_i$, for $i=0,1$, are positive functions in $C^{2}[a,b]$.
Assume also that $v_i$, for $i=0,1$, and $v_1u_1'-u_1v_1'$ are bounded away from zero on $[a,b]$,
that $u_1v_1'-u_1'v_1 = u_0v_0'-u_0'v_0$ and that $u_1(a)=0$ if and only if $u_0(a)=0$.
Then $d\mu_0/d\mu_1$ is continuous on $\mathcal F$.
\item Assume that $X|Y=i$, for $i=0,1$, are Gaussian processes on $[a,b]$,
with equal covariance function $\Gamma(s,t) = u(\min(s,t)) \, v(\max(s,t))$,
for $s,t\in[a,b]$, where $u,v\in C^{2}[a,b]$ are positive functions and
$v$ and $vu'-uv'$ are bounded away from zero on $[a,b]$.
Assume also that the mean function of $X|Y=1$ is 0 and that of $X|Y=0$ is a function $m\in C^2[a,b]$,
such that $m(a)=0$ whenever $u(a)=0$. Then $d\mu_0/d\mu_1$ is continuous on $\mathcal F$.
\item Assume that $X|Y=i$, for $i=0,1$, are Gaussian processes on $[a,b]$,
with mean functions $m_i\in C^{2}[a,b]$
and covariance functions $\Gamma_i(s,t) = u_i(\min(s,t)) \, v_i(\max(s,t))$,
for $s,t\in[a,b]$, where $u_i,v_i$, for $i=0,1$, are positive functions in $C^{2}[a,b]$
which fulfill the same conditions imposed in (a).
Assume also that $m_i(a)=0$ whenever $u_i(a)=0$.
Then $d\mu_0/d\mu_1$ is continuous on $\mathcal F$.
\end{enumerate}
Therefore, under the assumptions in either (a), (b) or (c), the $k$-NN classifier
discriminating between $\mu_0$ and $\mu_1$ is weakly consistent when $k\to\infty$ and $k/n\to 0$.
}
\
\noindent
{\sc Proof:} \begin{enumerate}[{\bf a)}]
\item Varberg (1961, Th. 1) shows that, under the assumptions of (a),
$\mu_0$ and $\mu_1$ are equivalent measures and the Radon-Nikodym derivative of
$\mu_0$ with respect to $\mu_1$ is given by
\begin{equation} \label{J1}
\frac{d\mu_0}{d\mu_1}(x) = C_1 \, \exp\left\{ \frac{1}{2} \left[ C_2 x^2(a) +
\int_a^b f(t) d\left( \frac{x^2(t)}{v_0(t)v_1(t)} \right) \right] \right\}
\end{equation}
where
$$
C_1 = \left\{ \begin{array}{l}
\left( \frac{v_0(a)v_1(b)}{v_0(b)v_1(a)} \right)^{1/2} \quad \mbox{if } u_0(a)=0 \\
\left( \frac{u_1(a)v_1(b)}{v_0(b)u_0(a)} \right)^{1/2} \quad \mbox{if } u_0(a)\ne 0
\end{array} \right.
\qquad
C_2 = \left\{ \begin{array}{l}
0 \quad \mbox{if } u_0(a)=0 \\
\left( \frac{v_0(a)u_0(a)-u_1(a)v_1(a)}{v_1(a)v_0(a)u_0(a)u_1(a)} \right)^{1/2} \quad \mbox{if } u_0(a)\ne 0
\end{array} \right.
$$
and
$$
f(s) = \frac{v_1(s)v_0'(s)-v_0(s)v_1'(s)}{v_1(s)u_1'(s)-u_1(s)v_1'(s)} \quad \mbox{for } s\in [a,b] .
$$
Observe that, by the assumptions of the theorem, this function $f$ is
differentiable with bounded derivative.
Thus $f$ is of bounded variation and it may be expressed as the difference
of two bounded positive increasing functions.
Therefore the stochastic integral (\ref{J1}) is
well defined and it can be evaluated integrating by parts,
$$
\frac{d\mu_0}{d\mu_1}(x) = C_1 \exp \left[ \frac{1}{2} \left( C_3x^2(a) + C_4 x^2(b)
- \int_a^b \frac{x^2(t)}{v_0(t)v_1(t)} df(t)\right) \right]
$$
with
$ C_3=C_2-f(a)/v_0(a)v_1(a) $ and $ C_4 = f(b)/v_0(b)v_1(b) $.
It is clear that this derivative is a continuous functional of $x$ with respect
to the supremum norm.
Now, Theorem 1 implies that $\eta(x)$ is continuous and, therefore, Besicovich condition
{\bf (BC)} holds and, from Theorem 2 in C\'erou and Guyader (2006), the $k$-NN classifier
is weakly consistent. Note that the equivalence of $\mu_0$ and $\mu_1$ implies
the coincidence of both supports $S_0=S_1=S$.
\item In J\o rsboe (1968), p. 61, it is proved that, under the indicated assumptions, $\mu_0$ and $\mu_1$ are
equivalent measures with the following Radon-Nikodym derivative
$$
\frac{d\mu_0}{d\mu_1}(x) = \exp \left\{ D_1 + D_2 \, x(a) + \frac{1}{2} \int_a^b g(t)
d\left( \frac{2x(t)-m(t)}{v(t)} \right) \right\}
$$
where
$$
D_1 = -\frac{m^2(a)}{2 \, u(a) \, v(a)} \mathbbm 1_{\{ u(a)>0 \}} \; , \qquad
D_2 = \frac{m(a)}{u(a) \, v(a)} \mathbbm 1_{\{ u(a)>0 \}}
$$
and
$$
g(t) = \frac{v(t)m'(t)-m(t)v'(t)}{v(t)u'(t)-u(t)v'(t)} \; .
$$
Again, the integration by parts gives
\begin{equation}
\frac{d\mu_0}{d\mu_1}(x) = \exp \left\{ D_3 + \left( D_2 -2\,\frac{g(a)}{v(a)} \right) x(a)
+ 2 \,\frac{g(b)}{v(b)}\, x(b) - 2 \int_a^b \frac{x(t)}{v(t)}\, dg(t) \right\} ,
\end{equation}
with
$$
D_3 = D_1 - \int_a^b g(t) \, d\left( \frac{m(t)}{v(t)} \right) .
$$
Thus $d\mu_0/d\mu_1$, and hence $\eta$, are continuous and the consistency of the
$k$-NN classifier holds also in this case.
\item Let us denote by $P_{m,\Gamma}$ the distribution of the Gaussian process with mean $m$
and covariance function $\Gamma$. Then $\frac{d\mu_0}{d\mu_1}(x)$ is continuous since
(see e.g. Folland 1991)
\begin{equation} \label{RNNS}
\frac{d\mu_0}{d\mu_1}(x) = \frac{dP_{m_0,\Gamma_0}}{dP_{m_1,\Gamma_1}} (x)
= \frac{dP_{m_0,\Gamma_0}}{dP_{0,\Gamma_0}} (x) \,
\frac{dP_{0,\Gamma_0}}{dP_{0,\Gamma_1}} (x) \,
\frac{dP_{0,\Gamma_1}}{dP_{m_1,\Gamma_1}} (x),
\end{equation}
and, as we have shown in the proofs of (a) and (b), the Radon-Nikodym derivatives
in the right-hand side of (\ref{RNNS}) are all continuous.
{$\Box$}
\end{enumerate}
\
\noindent
{\sc Remark 1 (Application to the Ornstein-Uhlenbeck processes).}
Let $X|Y=i$, for $i=0,1$, be Gaussian processes on $[a,b]$, with zero mean
and covariance function $\Gamma_i(s,t) = \sigma_i^2 \exp(-\beta_i|s-t|)$, for $s,t\in[a,b]$,
where $\beta_i,\sigma_i>0$ for $i=0,1$.
Assume that $\sigma_1^2\beta_1=\sigma_0^2\beta_0$.
Then these processes satisfy the assumptions in Theorem 2(a).
\noindent
{\sc Remark 2 (Application to the Brownian motion).}
Theorem 2(b) can also be used to consistently discriminate between a
Brownian motion without trend ($m_0=0$) and another one
with trend ($m_1\neq 0$). It will suffice to consider the
case where $u(t)=t$ and $v\equiv 1$.
\noindent
{\sc Remark 3 (On triangular covariance functions).}
Covariance functions of type $\Gamma(s,t) = u(\min(s,t)) \, v(\max(s,t))$,
called \it triangular\rm, have received considerable attention in the literature.
For example, Sacks and Ylvisaker (1966) use this condition in the study of optimal
designs for regression problems where the errors are generated by a zero
mean process with covariance function $K(s,t)$. It turns out that the Hilbert space
with reproducing kernel $K$ plays an important role in the results and, as these authors
point out, the norm of this space is particularly easy to handle when $K$ is triangular.
On the other hand, Varberg (1964) has given an interesting representation of the
processes $X(t),\ 0\leq t<b$, with zero mean and triangular covariance function by
proving that they can be expressed in the form
$$
X(t)=\int_0^bW(u)d_uR(t,u),
$$
where $W$ is the standard Wiener process and $R=R(t,u)$ is a function, of bounded
variation with respect to $u$, defined in terms of $K$.
\noindent
{\sc Remark 4 (On plug-in functional classifiers).} The explicit knowledge of the conditional
expectation (\ref{etax}) in the cases considered in Theorem 2 could be
explored from the statistical point of view as they suggest
to use ``plug-in'' classifiers obtained by replacing
$\eta(x)$ in (\ref{opt}) with suitable parametric or semiparametric estimators.
\noindent
{\sc Remark 5 (On equivalent Gaussian measures and their supports).}
According to a well-known result by Feldman and H\'ajek,
for any given pair of Gaussian processes, there is a dichotomy in such
a way that they are either equivalent or mutually singular.
In the first case both measures $\mu_0$ and $\mu_1$ have a common support $S$ so that
Theorem 1 is applicable with $S=S_0=S_1$. As for the identification of the support,
Vakhania (1975) has proved that if a Gaussian process, with trajectories in a
separable Banach space ${\cal F}$, is not degenerate
(i.e., then the distribution of any non-trivial linear continuous functional is not degenerate)
then the support of such process is the whole space ${\cal F}$. Again, expression (\ref{etax}) of
the regression functional $\eta$ suggests the possibility of investigating
possible nonparametric estimators for the Radon-Nikodym derivative $d\mu_0/d\mu_1$ which would
in turn provide plug-in versions of the Bayes rule $g^*(x) = {\mathbbm 1}_{ \{ \eta(x) > 1/2 \}}$
with no further assumption on the structure of the involved Gaussian processes, apart from
their equivalence.
\
\noindent
\bf 3. Some numerical comparisons\rm
\
The aim of this section is to compare (numerically) the performance of several
supervised functional classification procedures already introduced in
the literature. The procedures are the $k$-NN rule, computed both with respect to the
supremum norm $\|\;\|_\infty$ and the $L^2$ norm $\|\;\|_2$, and other discrimination
rules reviewed in Section 3.1. One of the objectives of this numerical study is
to have some insight into which classification procedures perform
well no matter the type of functional data under consideration and could thus
be considered a sort of benchmark for the functional discrimination problem.
Section 3.2 contains a Monte Carlo study carried out on two different functional
data generating models. In Section 3.3 we consider six functional real data
sets taken from the literature.
\
\noindent
\it 3.1 Other functional classifiers\rm
\
Here we will review other classification techniques that have been used
in the literature in the context of functional data.
From now on we denote by $(t_1,\ldots,t_N)$
the nodes where the functional predictor $X$ has been
observed.
\noindent
{\em Partial Least Squares (PLS) classification}
Let us first describe the procedure in the context of a multivariate predictor $\mathbf X$.
PLS is actually a dimension reduction technique for regression problems with predictor
$\mathbf X$ and a response $Y$ (which in the case of classification takes only two values, 0 or 1,
depending on which population the individual comes from). The dimension reduction is carried out by projecting
$\mathbf X$ onto an lower dimensional space such that the
coordinates of the projected $\mathbf X$,
the PLS coordinates, are uncorrelated to each other and have maximum covariance with $Y$.
Then, if the aim is classification, Fisher's linear discriminant is applied to the PLS
coordinates of $\mathbf X$ (see Barker and Rayens 2003, Liu and Rayens 2007).
In the case of a functional predictor $X$ (see Preda et al. 2007), the above described procedure is applied to
the discretized version of $X$, $\mathbf X=(X(t_1),X(t_2),\ldots,X(t_N))$.
Here we have chosen the number of PLS directions, among the values 1,\ldots,10, by cross-validation.
\noindent
{\em Reproducing Kernel Hilbert Space (RKHS) classification}
We will also define this technique initially for a multivariate predictor $\mathbf X$.
For simplicity, we will assume that $\mathbf X$ takes values in $[0,1]^N$.
Let $\kappa$ be a function defined on $[0,1]^N\times[0,1]^N$.
A RKHS with kernel $\kappa$ is the vector space generated by all finite linear combinations
of functions of the form $\kappa_{\mathbf t^*}(\cdot)=\kappa(\mathbf t^*,\cdot)$,
for any $\mathbf t^*\in[0,1]^N$, and endowed with the inner product given by
$\langle \kappa_{\mathbf t^*}, \kappa_{\mathbf t^{**}}\rangle_\kappa=\kappa(\mathbf t^*,\mathbf t^{**})$.
RKHS are frequently used in the context of Machine Learning
(see Evgeniou {\em et al.} 2002, Wahba 2002); for their applications in Statistics the reader
is referred to the monograph of Berlinet and Thomas-Agnan (2004).
In this work we use the Gaussian kernel $\kappa(\mathbf s,\mathbf t) = \exp( -\|\mathbf s-\mathbf t\|_2^2/\sigma_\kappa^2 )$, where
$\sigma_\kappa>0$ is a fixed parameter. The classification problem is solved by plugging a regression
estimator of the type $\eta_n(\mathbf x) = \sum_{i=1}^n c_i \, \kappa(\mathbf x,\mathbf X_i)$ into
the Bayes classifier. When $X$ is a random function, this procedure is applied
to the discretized $X$.
The parameters $c_i$, for $i=1,\ldots,n$, are chosen to minimize the risk functional
$n^{-1} \sum_{i=1}^n (Y_i-\eta_n(X_i))^2 + \lambda \langle \eta,\eta\rangle_\kappa$,
where $\lambda>0$ is a penalization parameter.
In this work the values of the parameters $\lambda$ and $\sigma_\kappa$ have been chosen
by cross-validation via a leave-one-out procedure.
According to our results, it seems that the performance the RKHS methodology
is rather sensitive to changes in these parameters and even to the starting point of the
leave-one-out procedure mentioned.
\noindent
{\em Classification via depth measures}
The idea is to assign a new observation $x$ to that population, $P_0$ or $P_1$, with
respect to which $x$ is deeper (see Ghosh and Chaudhuri 2005, Cuevas et al. 2007).
From the five functional depth measures considered by Cuevas et al. (2007) we have
taken the $h$-mode depth and the random projection (RP) depth.
Specifically, the $h$-mode depth of $x$ with respect to the population given by the random
element $X$ is defined as $f_h(x) = E(K_h(\|x-X\|_2))$, where $K_h(\cdot) = h^{-1} K(\cdot/h)$,
$K$ is a kernel function (here we have taken the Gaussian kernel
$K(t) = \sqrt{2/\pi} \exp(-t^2/2)$) and $h$ is a smoothing parameter.
As the distribution of $X$ is usually unknown, in the simulations we actually use
the empirical version of $f_h$,
$ \hat f_h(x) = n^{-1} \sum_{i=1}^n K_h(\|x-X_i\|_2) $.
The smoothing parameter has been chosen as the 20 percentile in the $L^2$ distances between
the functions in the training sample (see Cuevas et al. 2007).
To compute the RP depth the training sample $X_1,\ldots,X_n$
is projected onto a (functional) random direction $a$ (independent of the $X_i$).
The sample depth of an observation $x$ with respect to $P_i$ is defined
as the univariate depth of
the projection of $x$ onto $a$ with respect to the projected training sample from $P_i$.
Since $a$ is a random element this definition leads
to a random measure of depth, but a single representative value has been obtained
by averaging these random depths over 50 independent random directions
(see Cuevas and Fraiman 2008 for a certain theoretical development of this idea).
If we are working with discretized versions $(x(t_1),\ldots,x(t_N))$ of the
functional data $x(t)$, we may take $a$ according to a uniform distribution on the
unit sphere of ${\mathbb R}^N$. This can be achieved, for example, setting
$a=Z/\|Z\|$, where $Z$ is drawn from standard Gaussian distribution on ${\mathbb R}^N$.
\noindent
{\em Moving window rule}
The moving window classifier is given by
$$
g_n(x) = \left\{ \begin{array}{ll}
0 & \mbox{if } \sum_{i=1}^n \mathbbm{1}_{\{Y_i=0,X_i\in B(x,h)\}}
\geq \sum_{i=1}^n \mathbbm{1}_{\{Y_i=1,X_i\in B(x,h)\}} , \\
1 & \mbox{otherwise} ,
\end{array} \right.
$$
where $h=h_n>0$ is a smoothing parameter. This classification rule was considered
in the functional setting, for instance, by Abraham et al. (2006). In this work the
parameter $h$ has been chosen again via cross-validation.
\
\noindent
\it 3.2 Monte Carlo results\rm
\
In this section we study two functional data models already considered by other authors.
More specifically, in Model 1, similar to one used in Cuevas et al.
(2007), $X|Y=i$ is a Gaussian process with mean
$ m_i(t) = 30 \, (1-t)^{1.1^i} \, t^{1.1^{1-i}} $
and covariance function $\Gamma_i(s,t)=0.25\exp(-|s-t|/0.3)$, for $i=0,1$.
Observe that this model with smooth trajectories satisfies the assumptions in
Theorem 2 and thus we would expect the $k$-NN classification rule
(with respect to the $\|\;\|_\infty$ norm) to perform nicely. Let us note that the value
of 1.1 in the exponent of $m_i(t)$ is in fact the one used in Model 1, pg. 487, of Cuevas et al.
(2007), although in their work a 1.2 was misprinted instead.
Model 2 appears in Preda et al. (2007), but here the
functions $h_i$, used to define the mean, have been rescaled to have domain
$[0,1]$. The trajectories of $X|Y=i$ are given by
\begin{equation} \label{Model2}
X_i(t)=U \, h_1(t) + (1-U) \, h_{i+2}(t) + \epsilon(t) \qquad \mbox{for } i=0,1,
\end{equation}
where $U$ is uniformly distributed on $[0,1]$, $h_1(t) = 2 \max(3-5|2t-1|,0)$,
$h_2(t) = h_1(t-1/5)$, $h_3(t) = h_1(t+1/5)$ and the $\epsilon(t)$ is
an approximation to the continuous-time white noise.
In practice, this means that in the discretized approximations $(X(t_1),\ldots,X(t_N))$
to $X(t)$, the variables $\epsilon(t_1),\ldots,\epsilon(t_N)$ are independently drawn
from a standard normal distribution.
The simulation results are summarized in Tables 1 and 2.
The number of equispaced nodes where the functional data have been evaluated is
the same for both models, $51$.
The number of Monte Carlo runs is 100.
In every run we generated two training samples (from $X|Y=0$
and $X|Y=1$ respectively) each with sample size
100, and we also generated a test sample of size 50 from each of the two
populations. The tables display the descriptive statistics of the proportion of
correctly classified observations from these test samples.
\
\begin{table}[h] \small \label{SimMod1}
\begin{center}
\begin{tabular}{rcccccccc}
& $k$-NN$|_\infty$ & $k$-NN$|_2$ & PLS & RKHS & $h$-modal & RP(hM) & MWR \\ \hline
Minimum & 0.6200 & 0.6600 & 0.6000 & 0.4800 & 0.6400 & 0.5400 & 0.6600 \\
First quartile & 0.8000 & 0.8000 & 0.8000 & 0.6600 & 0.8000 & 0.7800 & 0.8000 \\
Median & 0.8400 & 0.8400 & 0.8400 & 0.8400 & 0.8400 & 0.8400 & 0.8400 \\
Mean & 0.8396 & 0.8354 & 0.8371 & 0.7999 & 0.8409 & 0.8260 & 0.8393 \\
Third quartile & 0.8800 & 0.8800 & 0.8800 & 0.9400 & 0.8800 & 0.8800 & 0.8800 \\
Maximum & 0.9800 & 0.9600 & 0.9800 & 1.0000 & 0.9800 & 0.9800 & 1.0000 \\ [2 mm]
Std. deviation & 0.0603 & 0.0572 & 0.0668 & 0.1457 & 0.0589 & 0.0725 & 0.0634 \\ \hline
\end{tabular}
\end{center}
\caption{Simulation results for Model 1}
\end{table}
\begin{table}[h] \small \label{SimMod2}
\begin{center}
\begin{tabular}{rccccccccc}
& $k$-NN$|_\infty$ & $k$-NN$|_2$ & PLS & RKHS & $h$-modal & RP(hM) & MWR \\ \hline
Minimum & 0.8400 & 0.8400 & 0.8800 & 0.8400 & 0.8600 & 0.8400 & 0.8200 \\
First quartile & 0.9200 & 0.9400 & 0.9600 & 0.9600 & 0.9400 & 0.9400 & 0.9400 \\
Median & 0.9600 & 0.9600 & 0.9800 & 0.9800 & 0.9800 & 0.9600 & 0.9600 \\
Mean & 0.9522 & 0.9558 & 0.9686 & 0.9688 & 0.9657 & 0.9522 & 0.9570 \\
Third quartile & 0.9800 & 0.9800 & 0.9800 & 1.0000 & 1.0000 & 0.9800 & 0.9800 \\
Maximum & 1.0000 & 1.0000 & 1.0000 & 1.0000 & 1.0000 & 1.0000 & 1.0000 \\ [2 mm]
Std. deviation & 0.0335 & 0.0355 & 0.0279 & 0.0313 & 0.0308 & 0.0345 & 0.0349 \\ \hline
\end{tabular}
\caption{Simulation results for Model 2}
\end{center}
\end{table}
Regarding Model 1, observe that there is little difference between the correct classification rates
of any of the methods, except for the RKHS procedure which performs worse. In Model 2
the PLS, RKHS and $h$-modal methods slightly outperform the others. When the Monte Carlo
study with this model was carried out, we also applied the $k$-NN classification procedures
to a spline-smoothed version of the $X$ trajectories. The result was that the mean correct
classification rate increased to 0.9582 in the case of the supremum norm and to 0.9624
in the case of the $L^2$ norm. This, together with the analysis of the flies data in the
next subsection, seems to suggest that, when the curves $X$ are irregular, smoothing these
functions will enhance the $k$-NN discrimination procedure.
\
\noindent
\it 3.3. Some comparisons based on real data sets\rm
\
\noindent
\it 3.3.1. Brief description of the data sets \rm
\
\noindent
{\em Berkeley Growth Data:}
The Berkeley Growth Study (Tuddenham and Snyder 1954) recorded the heights of
$n_0=54$ girls and $n_1=39$ boys between the ages of 1 and 18 years. Heights were measured
at 31 ages for each child. These data have been previously analyzed
by Ramsay and Silverman (2002).
\noindent
{\em ECG data:} These are electrocardiogram (ECG) data, studied by Wei and Keogh (2006),
from the MIT-BIH Arrhythmia database (see Goldberger {\em et al.} 2000).
Each observation contains the successive measurements recorded
by one electrode during one heartbeat and was normalized and rescaled to have length 85.
A group of cardiologists have assigned a label of normal or abnormal to each data record.
Due to computational limitations, of the original $2026$ records in the data set,
we have randomly chosen only $200$ observations from each group.
\noindent
{\em MCO data:} The variable under study is the mitochondrial calcium overload (MCO), measured
every 10 seconds during an hour in isolated mouse cardiac cells.
The data come from research conducted by Dr. David Garc\'{\i}a-Dorado at the Vall d'Hebron
Hospital (see Ruiz-Meana et al. 2003, Cuevas, Febrero and Fraiman 2004, 2007).
In order to assess if a certain drug increased the MCO level,
a sample of functions of size $n_0 = 45$ was taken from a control group
and $n_1 = 44$ functions were sampled from the treatment group.
\noindent
{\em Spectrometric data:} For each of 215 pieces of meat a spectrometer provided the absorbance attained at 100
different wavelengths (see Ferraty and Vieu 2006 and references therein). The fat content of the meat was also obtained via chemical processing
and each of the meat pieces was classified as low- or high-fat.
\noindent
{\em Phoneme data:} The $X$ variable is the log-periodogram (discretized to 150 nodes) of a phoneme. The
two populations correspond to phonemes ``aa'' and ``ao'' respectively (see more information in
Ferraty and Vieu 2006).
We have considered a sample of 100 observations from each phoneme.
\noindent
{\em Medflies data:} This dataset was obtained by Prof. Carey from U.C. Davis (see Carey et al. 1998)
and has been studied, for instance, by M{\"u}ller and Stadtm{\"u}ller (2005).
The predictor $X$ is the number of eggs laid daily by a Mediterranean fruit fly for
a 30-day period. The fly is classified as long-lived if its remaining lifetime past 30 days
is more than 14 days and short-lived otherwise. The number of long- and short-lived flies
observed was 256 and 278 respectively.
\
\noindent
\it 3.3.2. Results \rm
\
We have applied the classification techniques reviewed in Section 3.1 to the real data sets
just described. While carrying out the simulations of Subsection 3.1,
we observed that the performance of the RKHS procedure was
very dependent on the initial values of the parameters $\sigma_K$ and $\lambda$ provided for
the cross-validation algorithm. In fact, finding initial values
for these parameters that would finally yield competitive results with respect to the
other methods took a considerable time. Thus we decided to exclude the RKHS classification
method from the study with real data.
We have computed, via a cross-validation procedure, the mean correct classification rates
attained by the different discrimination methods on the real data sets.
In Table 3 we display the results.
Since the egg-laying trajectories in the medflies data set were very irregular and spiky,
we have computed the correct classification rate for both the original data and
a smoothed version obtained with splines. The smoothing leads to a better
performance of the $k$-NN procedure with the supremum metric, just as it happened in the
simulations with Model 2.
\begin{table}[h] \label{RealDat}
\begin{center}
\begin{tabular}{lcccccccc}
Data set & $k$-NN$|_\infty$ & $k$-NN$|_2$ & PLS & $h$-modal & RP(hM) & MWR \\ \hline
Growth & 0.9462 & 0.9677 & 0.9462 & 0.9462 & 0.9462 & 0.9570 \\
ECG & 0.9900 & 0.9950 & 0.9825 & 0.9900 & 0.8575 & 0.8850 \\
MCO & 0.8427 & 0.8315 & 0.8876 & 0.7640 & 0.7079 & 0.6854 \\
Spectrometric & 0.9070 & 0.8558 & 0.9163 & 0.6791 & 0.6930 & 0.6558 \\
Phoneme & 0.7300 & 0.7800 & 0.7400 & 0.7300 & 0.7450 & 0.6950 \\
Medflies (non-smoothed) & 0.5468 & 0.5412 & 0.5262 & 0.4925 & 0.5056 & 0.5431 \\
\hspace{15 mm}
(smoothed) & 0.5712 & 0.5431 & 0.5094 & 0.5075 & 0.5543 & 0.5206 \\ \hline
\end{tabular}
\end{center}
\caption{Mean correct classification rates for the real data sets}
\end{table}
As a conclusion we would say that the $k$-NN classification methodology with respect to the
$L^\infty$ norm is always among the best performing ones if the $X$ trajectories are smooth.
The $k$-NN procedure with respect to the $L^2$ norm and the PLS methodology give also
good results, although the latter has the drawback of a much higher computation time.
\
\begin{center}
\sc References\rm
\end{center}
\begin{list}{}{\leftmargin .5cm\listparindent -.5cm}
\item\hspace{-.2cm}
Abraham, C., Biau, G. and Cadre, B. (2006). On the kernel rule for function classification.
Annals of the Institute of Statistical Mathematics 58, 619-633.
Barker M. and Rayens W. (2003). Partial least squares for discrimination. Journal of
Chemometrics 17, 166-73.
Berlinet, A. and Thomas-Agnan, C. (2004). Reproducing Kernel Hilbert Spaces in
Probability and Statistics. Kluwer Academic Publishers.
Biau, G., Bunea, F. and Wegkamp, M. (2005). Functional classification in Hilbert spaces. IEEE
Transactions on Information Theory 51, 2163-2172.
Carey, J.R., Liedo, P., M{\"u}ller, H.G., Wang, J.L. and Chiou, J.M. (1998).
Relationship of age patterns of fecundity to mortality, longevity, and lifetime
reproduction in a large cohort of Mediterranean fruit fly females. Journal of
Gerontology, Ser. A 53, 245--251.
C\'erou, F. and Guyader, A. (2006). Nearest neighbor classification in infinite dimension.
ESAIM: Probability and Statistics 10, 340-355.
Cuevas, A., Febrero, M and Fraiman, R. (2004). An ANOVA test for functional data.
Computational Statistics and Data Analysis 47, 111--122.
Cuevas, A., Febrero, M and Fraiman, R. (2007). Robust estimation and
classification for functional data via projection-based depth notions.
Computational Statistics 22, 481--496.
Cuevas, A. and Fraiman, R. (2008). On depth measures and dual statistics.
A methodology for dealing with general data. \it
Manuscript\rm.
Devroye, L., Gy\"orfi, L. and Lugosi, G. (1996). A Probabilistic Theory of
Pattern Recognition. Springer-Verlag.
Evgeniou , T., Poggio, T. Pontil, M. and Verri, A. (2002). Regularization
and statistical learning theory for data analysis. Computational Statistics
and Data Analysis, 38, 421--432.
Ferraty, F. and Vieu, P. (2003). Curves discrimination: A nonparametric functional approach.
Computational Statistics and Data Analysis 44, 161--173.
Ferraty, F. and Vieu, P. (2006). Nonparametric Modelling for Functional Data. Springer.
Ferr\'e, L. and Villa, N. (2006). Multilayer perceptron with functional inputs:
an inverse regression approach. Scandinavian Journal of Statistics 33, 807--823,
Fisher, R.A. (1936). The use of multiple measurements in taxonomic problems.
Annals of Eugenics 7, 179--188.
Folland, G. B. (1999). Real analysis. Modern techniques and their applications. Wiley.
Ghosh, A. K. and Chaudhuri, P. (2005). On maximal depth and related classifiers.
Scandinavian Journal of Statistics 32, 327--350.
Goldberger, A., Amaral, L., Glass, L., Hausdorff, J., Ivanov, P.,
Mark, R., Mietus, J., Moody, G., Peng, C., and He, S. (2000).
PhysioBank, PhysioToolkit, and PhysioNet: Components of a
New Research Resource for Complex Physiologic Signals. Circulation 101, 215--220.
Hand, D.J. (1997). Construction and Assessment of Classification Rules. Wiley.
Hand, D.J. (2006). Classifier technology and the illusion of progress.
Statistical Science 21, 1--14.
Hastie, T., Tibshirani, R. and Friedman, J. (2001). The Elements of Statistical
Learning. Springer.
James, G.M. and Hastie, T.J. (2001). Functional linear discriminant analysis for
irregularly sampled curves. Journal of the Royal Statistical Society, Ser. B 63, 533-550.
J\o rsboe, O. G. (1968). Equivalence or Singularity of Gaussian Measures on
Function Spaces. Various Publications Series, No. 4, Matematisk Institut, Aarhus
Universitet, Aarhus.
Liu, Y. and Rayens, W. (2007). PLS and dimension reduction for
classification. Computational Statistics 22, 189--208.
M\"uller, H.G. and Stadtm\"uller, U. (2005). Generalized functional linear models.
The Annals of Statistics 33, 774-805.
Preda, C. (2007). Regression models for functional data by reproducing kernel
Hilbert spaces methods. Journal of Statistical Planning and Inference 137,
829--840.
Preda, C., Saporta, G. and L\'ev\'eder, C. (2007). PLS classification of functional
data. Computational Statistics 22, 223--235.
Ramsay, J.O. and Silverman, B.W. (2002). Applied Functional Data
Analysis. Methods and Case Studies. Springer-Verlag.
Ramsay, J.O. and Silverman, B.W. (2005). Functional Data Analysis. Second edition. Springer.
Ruiz-Meana, M., Garc\'{\i}a-Dorado, D., Pina, P., Inserte, J., Agull\'o, L. and Soler-Soler, J. (2003). Cariporide preserves mitochondrial proton gradient and
delays ATP depletion in cardiomyocites during ischemic conditions.
American Journal of Physiology - Heart and Circulatory Physiology 285, 999--1006.
Sacks, J. and Ylvisaker, N.D. (1966). Designs for regression problems with correlated errors.
Annals of Mathematical Statistics 37, 66--89.
Stone, C. J. (1977). Consistent nonparametric regression. The Annals of Statistics 5, 595-645.
Tuddenham, R. D. and Snyder, M. M. (1954). Physical growth of California boys
and girls from birth to eighteen years. University of California Publications
in Child Development 1, 183--364.
Vakhania, N.N. (1975). The topological support of Gaussian measure in Banach space.
Nagoya Mathematical Journal 57, 59--63.
Varberg, D.E. (1961). On equivalence of Gaussian measures.
Pacific Journal of Mathematics 11, 751--762.
Varberg, D.E. (1964). On Gaussian measures equivalent to Wiener measure. Transactions
of the American Mathematical Society 113, 262--273.
Wahba, G. (2002). Soft and hard classification by reproducing kernel Hilbert space methods.
Proceedings of National Academy of Sciences 99, 16524--16530.
Wei, L. and Keogh, E. (2006). Semi-Supervised Time Series Classification.
Proceedings of the 12th ACM SIGKDD International Conference on Knowledge Discovery
and Data Mining, 748--753, Philadelphia, U.S.A.
\end{list}
\end{document} |
\begin{document}
\title{Cliques and constructors in ``Hats'' game}
\begin{abstract}
The following general variant of deterministic Hats game is analyzed.
Several sages wearing colored hats occupy the vertices of a graph, the $k$-th sage
can have hats of one of $h(k)$ colors. Each sage tries to guess the color of
his own hat merely on the basis of observing the hats of his neighbors
without exchanging any information. A predetermined guessing strategy is
winning if it guarantees at least one correct individual guess for every
assignment of colors.
For complete graphs and for cycles we solve the problem of describing functions
$h(k)$ for which the sages win. We demonstrate here winning strategies for the
sages on complete graphs, and analyze the Hats game on almost complete graphs.
We develop ``theory of constructors'', that is
a collection of theorems demonstrating how one can construct new graphs for
which the sages win. We define also new game ``Check by rook'' which is
equivalent to Hats game on 4-cycle and give complete analysis of this game.
\end{abstract}
\section{Introduction}
The Hats game is an interesting mathematical puzzle attracting attention of
many mathematicians for many years. In the classical version of the problem
there is a set of $n \geq 2$ players (sages) and adversary, who puts a hat of
one of $n$ colors to the head of each sage. Each sage sees the hats of the other
sages, but does not see his own hat. Taking into account this information only, he tries to guess the color of hat he is wearing. The goal of the sages is to guarantee that at least one of
them guesses the color of hat correctly whatever the hats arrangement is. The
players are allowed to discuss and fix a strategy before the hat assignment.
After that any communication is prohibited. When the sages simultaneously say
their guesses, winning condition would be checked (is it true, that at least one
of the sages guesses correctly). The problem is ``whether the sages can
guarantee a win?''
The answer to the above problem is ``Yes!''. It can be justified gracefully.
Let us enumerate the sages and identify the colors of hats with residues modulo
$n$. Every sage sees every hat except his own. Let the ``i''-th sage checks the
hypothesis that the sum of all colors, including his own, equals $i$ modulo $n$
and say the corresponding remainder. It's clear that the hypothesis of exactly
one of the sage is true, regardless of the hats arrangement. Thus this
sage guesses correctly the color of his hat.
A natural generalization of this problem is a game in which every sage can see
only some part of the others. Formally, let the sages be located in vertices
of some graph (``visibility graph''), the sage $i$ can see a color of the hat of
the sage $j$ if and only if there is an edge $(i, j)$ in the graph. This generalization was
introduced in~\cite{butler2009hat} and further was studied in a number of
papers~\cite{Gadouleau2015, Gadouleau2018, cycle_hats}. For example, the
connection of this Hats game with dynamical systems and coding theory was
analyzed in~\cite{Gadouleau2018}. M.\,Farnik define $\text{\rm HG}(G)$ in his PhD thesis~\cite{Farnik2015} as the maximal number of hat colors, for which the sages can guarantee
a win. He got some estimations of $\text{\rm HG}(G)$ in terms of maximum
degree of the graph and graph chromatic number. In~\cite{Alon2018} N.\,Alon et al.\ studied
$\text{\rm HG}(G)$ for some classes of graphs using mostly probabilistic methods.
The connection between $\text{\rm HG}(G)$ and other graph parameters was considered by
Bosek et al.~\cite{bosek19_hat_chrom_number_graph}.
W.\,Szczechla~\cite{cycle_hats} got complicated result that in case of three
colors. The sages can win on cycles with $n$ vertices if and only if $n$ is divisible by $3$ or $n
= 4$. Complete list of graphs on which sages win in case of three colors can be found
in~\cite{Kokhas2018}.
In addition to the above, a lot of other variants of the Hats game were
considered. For example, M.\,Krzywkowski's described in~\cite{krzywkowski2010variations} 36 variants of the game rules and
most of them are probabilistic. Description of important results and
applications of this game can be found in the same paper.
In paper~\cite{Kokhas2018} authors explain how to reduce the problem of finding
winning strategies for sages on graphs (generally speaking, this problem is very cumbersome)
to SAT (the Boolean satisfiability problem). This makes it possible to study winning strateies by computer for small quite efficiently.
In the present paper we consider a modification of the classical deterministic game on
a graph, in which the sages have different number of possible hat colors.
This modification is not only of its own interest but allows one to find more simple
strategies in the classical game, where the number of colors is constant.
This text is combined from papers \cite{KL19} and \cite{KLR19}.
An application of the technique developed in the present paper is given in~\cite{KL20}, where we build a planar graph with hat guessing number at least 14.
We introduce the following notation.
$\bullet$ $G = \langle {V, E} \rangle$ is a visibility graph, i.\,e.\ graph, at the
vertices of which the sages are located. We often identify the sages and the
vertices of $G$.
$\bullet$ $h\colon V\to {\mathbb N}$ is a hat function, or ``hatness'' for short,
$h(v)$ is a number of possible colors for the hat of sage $v$. For sage $A\in V$ let \emph{hatness of sage
A} be the value of $h(A)$. We assume that the list of colors using in this game is
known in advance, and the color of the hat of $A$ is one of the first $h(A)$ colors in this list.
We often identify the set of
possible hat colors of sage $A$ with set of residues modulo $h(A)$.
\begin{definition}
The {\sc Hats\ } game is the pair ${\mathcal G} = \langle {G, h} \rangle$, where $G$ is
a visibility graph, and $h$ is a hat function. The sages are located at
the vertices of visibility graph $G$ and participate in a \emph{test}. Before the test the sages should determine a public deterministic strategy. During the
test, every sage $v$ gets a hat of one of $h(v)$ colors. The sages try to guess
color of their own hats according the chosen strategy and if for each hats arrangement at least one of them guesses correctly, we say
that the sages \emph{win} or the game is winning. We call the graph in this case
also winning, keeping in mind that this property depends also on the
hat function. The game in which the sages have no winning strategy is said to be
\emph{losing}.
A game ${\mathcal G}_1=\langle {G_1, h_1} \rangle$ is \emph{a subgame} of the game ${\mathcal G} =
\langle {G, h} \rangle$, if $G_1$ is a subgraph of the graph $G$ and
$h_1=h\Big\vert_{V(G_1)}$.
When the adversary puts hats on the heads of all sages, i.\,e.\ assigns a possible
hat color to every sage, we obtain \emph{hats arrangement}. Formally, every hats
arrangement is a function $\varphi\colon V(G)\to {\mathbb Z}$, where
$0\leq\varphi(v)\leq h(v)-1$ for all $v\in V(G)$.
\end{definition}
We use standard notations of graph theory:
$C_n$ is an $n$-vertex cycle graph, $P_n$ is an $n$-vertex path,
$P_n(AB)$ is an $n$-vertex path with ends $A$ and $B$, $K_n$ is a
complete graph with $n$ vertices, $N(v)$ or $N_G(v)$ is a set of neighbors of
vertex $v$ in graph $G$.
Denote by $G^A$ the graph, in which one of vertices is $A$. This notation
is used emphasize that graphs under consideration share
common vertex $A$.
By $\langle {G, k} \rangle$ we denote the game on graph $G$ with constant hat function that is equal to $k$. For example, in these terms, the classical game described in the first paragraph is $\langle {K_n, n} \rangle$.
A \emph{strategy} of the sage in vertex $A$ is a function $f_A$ that puts into correspondence to each hats arrangement on $N(A)$ possible color of sage $A$'s hat (i.e. an integer from 0 to $h(A)-1$). \emph{Collective strategy of the sages} is just the set $\{f_A\mid A\in V(G)\}$.
\medbreak
In the second section we consider the {\sc Hats\ } game on complete and ``almost complete'' graphs. The main result here is theorem~\ref{thm:clique-win}.
In the third section we develop ``theory of constructors'', which is a set of theorems that
allow one to construct new winning graphs from existing ones.
In the fourth section we develop new elegant approach to the {\sc Hats\ }
game. We describe new game ``Rook check'' that is, in fact, equivalent to the {\sc Hats\ } game on a
$4$-cycle. It expands the arsenal of combinatorial tools for constructing
strategies. We present a complete research of game ``Rook check''
and discuss some of its variations.
In the fifth section we analyze the {\sc Hats\ } game on cycles with
arbitrary hat functions.
\section{{\sc Hats\ } game on complete and almost complete graphs}
\subsection{Game on complete graphs}
In this section we describe the game on a complete graph with vertices $A_1$,
$A_2$, \dots, $A_n$ and arbitrary hat function $h$. Let $a_i=h(A_i)$. The following theorem completely solves
the problem ``for which hat functions on a complete graph the sages win?''
\begin{theorem}\label{thm:clique-win}
Let hatnesses of $n$ sages located in vertices of complete graph, be $a_1$,
$a_2$, \dots, $a_n$. Then the sages win if and only iff
\begin{equation}
\frac1{a_1}+\frac1{a_2}+\ldots+\frac1{a_n}\geq 1.
\label{eq:clique-win}
\end{equation}
\end{theorem}
\begin{proof}
The necessity of condition~\eqref{eq:clique-win} is obvious: for each strategy of the sages, the $i$-th sage
guesses correctly exactly on $\frac 1{a_i}$ of all hat arrangements, so if the sum is
less than 1, there exists an arrangement for which no one guesses correctly.
We give two proofs of the sufficiency of condition~\eqref{eq:clique-win}. The
first one uses Hall's marriage theorem and the second one presents the
strategy, that generalizes the arithmetic strategy for the classical game.
\emph{Proof 1.}
Let us prove that if the sum is greater than or equal to 1, then the sages win. The
existence of a winning strategy is proved by using Hall's marriage theorem.
For each sage $i$, we split the set of all hat arrangements into subsets of $a_i$
elements each in the following way. Delete the color $c_i$ of the $i$-th sage from
each hat arrangement. For the remaining set $c=(c_1, \dots, c_{i-1},
\bar{c_i}, c_{i+1}, \dots, c_n)$ (symbol ``bar'' means that this color is
omitted) put
$$
A_c^i=\{(c_1,\dots, c_{i-1}, \ell, c_{i+1}, \dots, c_n ) \mid 0\leq \ell\leq
a_i-1
\}.
$$
Set $A_c^i$ consists of ``potentially possible'' hat arrangements from the
point of view of the $i$-th sage: he sees that colors of the other sages form
a set $c$ and mentally appends to it all possible colors $\ell$ of his own hat.
Bearing in mind the application of Hall's theorem, we associate the sets $A_c^i$ with ``girls'' and hat arrangements with ``boys''.
The boy $s$ and girl $A_c^i$ \emph{know each other} if the hat arrangement $s$ is an element of $A_c^i$. Every
boy knows $n$ girls, and for each $i$ every man knows exactly one girl of
type $A_c^i$. Every girl~$A_c^i$ knows exactly $a_i$ boys.
Let us prove that there exists a matching sending each boy to a girl. It
suffices to check the theorem condition, i.e., that every $m$ boys know together at
least $m$ girls. Consider an arbitrary set of $m$ boys. Since for each $i$,
the girl $A_c^i$ knows exactly $a_i$ boys, any $m$ boys know in
total at least $\frac m{a_i}$ girls of type $A_c^i$ for each $i$.
Summing over $i$, we find that the
total number of girls familiar with these $m$ boys is at least
$$
\frac m{a_1}+\frac m{a_2}+\ldots+\frac m{a_n}\geq m.
$$
This shows that the condition f the Hall's theorem is satisfied.
Thus, there exists a matching that assigns to each hat
arrangement a set of type $A_c^i$. Note that if the equality
$\frac1{a_1}+\frac1{a_2}+\ldots+\frac1{a_n}= 1$ holds, then this matching selects, in fact, one element in each set $A_c^i$. Otherwise, if
$\frac1{a_1}+\frac1{a_2}+\ldots+\frac1{a_n}> 1$, then ``there are
lonely girls'', i.\,e.\ no elements are selected in some sets $A_c^i$.
The constructed matching allows to define a strategy for the sages. Let the $j$-th sage
act by the rule: looking at hats of the other sages, i.\,e. at the set of colors
$$
c =(c_1,\dots, c_{j-1}, c_{j+1}, \dots, c_n),
$$
he reconstructs the set $A_c^j$ which, in fact, consists of all possible ways
to supplement the set $c$ to the hat arrangement on the whole graph. The sage
should say the color marked in set $A_c^j$ by our matching (if there is
no marked element, he says color arbitrarily). Since each hat arrangement is
mapped by our matching to the selected element of one of sets~$A_c^i$,
the $i$-th sage guesses correctly his own color for this hat arrangement.
\emph{Proof 2.}
Let $N=\text{LCM}(a_1, a_2, \dots, a_n)$ (the least common multiple). For $k$ from
$1$ to $n$ set $d_k=N/a_k$. We identify the set of all possible hat colors of
the $k$-th sage and the set of integers $\{d_k, 2d_k, \dots, a_kd_k\}$
modulo~$N$. Now we describe the winning strategy of the sages. Let the $k$-th sage
get hat of color $x_kd_k$, where $x_k\in\{1,2,\dots,a_k\}$ ($1\leq k\leq n$).
Let
$$
S=x_1d_1+x_2d_2+\ldots +x_nd_n\pmod{N}.
$$
Each sage, seeing those around him, knows all the summands of this sum, except for his own. Making assumption about the value of the sum, he can calculate the color
of his own hat. Let the first sage check the hypothesis $S\in\{1, 2, \dots, d_1\}$;
the second sage check the hypothesis $S\in\{d_1+1, d_1+2, \dots, d_1+d_2\}$ and so
on, the $n$-th sage check the hypothesis $S\in\{d_1+d_2+\ldots+d_{n-1}+1, \dots,
d_1+d_2+\ldots+d_{n-1}+d_n\}$. The hypothesis of the \hbox{$k$-th} sage involves
$d_k$ consecutive integers, among which exactly one is divisible by~$d_k$.
This integer determines the color of hat that the $k$-th sage should say.
We note that the inequality $d_1+d_2+\ldots+d_{n-1}+d_n\geq N$ holds by the definition of
numbers $d_k$ and inequality \eqref{eq:clique-win}. This
means that in the above strategy, the hypotheses of the sages cover all remainders modulo $N$.
So the sages win.
\end{proof}
\begin{definition}
The strategy of the sages is said to be \emph{precise} if for each hat
arrangement exactly one of sage guesses is correct.
\end{definition}
\begin{corollary}
Precise strategies exist if and only if the visibility graph is complete and
the hat function satisfies equality
\begin{equation}\label{eqn:sum=1}
\frac1{a_1}+\frac1{a_2}+\ldots+\frac1{a_n}= 1.
\end{equation}
\end{corollary}
\begin{proof}
Let the sages act according to some strategy. If the graph contains two non-adjacent
vertices $A$ and $B$, then we put arbitrary hats to all sages except for $A$ and $B$.
Now the answers of $A$ and $B$ are determined by the strategy. let us give them hats
for which their guesses are correct. With this hat arrangement, $A$, $B$
and, possibly, someone else, guess correctly. Therefore, the strategy is not
precise. The fact that the existing of precise strategy on complete graph is
equivalent to equality~\eqref{eqn:sum=1} follows from the proof of
theorem~\ref{thm:clique-win}.
\end{proof}
\subsection{Game on almost complete graphs}
\begin{definition}
An \emph{almost complete} graph is a complete graph with one edge removed. And
an \emph{almost clique} is an almost complete subgraph of some graph.
\end{definition}
\begin{corollary}
Let $G$ be an almost complete graph obtained from a complete graph $K_n$ with
vertices $A_1$, $A_2$, \dots, $A_n$ by removing the edge $A_{n-1}A_n$. Let
the $i$-th sage get hat of one of $a_i$ colors.
If graph $G$ is winning, then
\begin{equation}\label{ineq:clique-minus-edge}
\frac1{a_1}+\frac1{a_2}+\ldots+\frac1{a_n}-\frac1{a_{n-1}a_n}\geq 1.
\end{equation}
\end{corollary}
\begin{proof}
The fraction of total number of the arrangements for which $A_{n-1}$ or $A_n$
guesses correctly, is equal to
$$
\frac1{a_{n-1}}+\frac1{a_n} -\frac1{a_{n-1}a_n}.
$$
Indeed, let us fix hat colors for the sages $A_1$, \dots, $A_{n-2}$. Then the answers of
sages $A_{n-1}$ and $A_n$ are determined by the strategy. It is not difficult
to see that there are exactly $a_{n-1}+a_n-1$ hat arrangements among
$a_{n-1}a_n$ possible arrangements for $A_{n-1}$ and $A_n$, where
either $A_{n-1}$ or $A_n$ (maybe both) guesses correctly. As for the other
sages, each sage $A_k$ guesses correctly on $\frac1{a_k}$ fraction of all
arrangements. So if inequality~\eqref{ineq:clique-minus-edge} does not hold,
there exists a hat arrangement, where nobody guesses correctly.
\end{proof}
We call the game on almost complete graph \emph{almost precise}, if
inequality~\eqref{ineq:clique-minus-edge} turns into equality and the sages win.
In an almost precise game two sages ($A_{n-1}$ and $A_n$) guess their colors
correctly on the $\frac{1}{a_{n-1}a_n}$ fraction of all arrangements, and for all
other arrangements only one of the sages guesses.
We give a necessary condition for the game to be an almost precise game.
\begin{theorem}\label{thm:almost_precise_game}
Let $G$ be a complete graph with $n$ vertices $A_1$, $A_2$, \dots, $A_n$, in
which edge~$A_{n-1}A_n$ has been removed, and $h(A_i)=a_i$, $i=1$, \dots, $n$.
Let the game be almost precise, i.\,e.\ the following equality holds
\begin{equation}\label{eq:clique-minus-edge}
\frac1{a_1}+\frac1{a_2}+\ldots+\frac1{a_n} -\frac1{a_{n-1}a_n} = 1.
\end{equation}
Then $a_1a_2\cdots a_{n-2}$ is divisible by $a_{n-1}a_n$.
\end{theorem}
\begin{proof}
The summands $\frac1{a_1}$, \dots, $\frac1{a_{n-2}}$ have clear probabilistic
interpretation: $\frac1{a_i}$ is the fraction of the hat arrangements for which sage
$A_i$ guesses correctly.
Let $X$ be the set of hat arrangements for the first $n-2$~sages, i.\,e., in
other words, $X$ is a collection of sets of $n-2$~colors, where the first
color is a possible hat color of sage~$A_1$, the second color is a possible hat
color of sage~$A_2$ and so on, the $(n-2)$-th color is a possible hat color of
sage~$A_{n-2}$. Let $\alpha = a_1a_2\cdots a_{n-2}$, then $|X|=\alpha $.
We split set $X$ onto subsets~$L_i$ ($i=1$, $2$, \dots, $a_{n-1}$) such that if
sage~$A_{n-1}$ sees a set of colors from~$L_i$ on his neighbors, then he
says color~$i$. The sets~$R_j$ ($j=1$, 2, \dots, $a_n$) for sage~$A_n$ are defined
similarly. Let $L_k$ be the set~$L_i$ of minimum cardinality, $|L_k|=M\leq
\frac{\alpha}{a_{n-1}}$. Now we consider the sets $R_j\setminus L_k$ ($j=1$, 2, \dots,
$a_n$). These sets contain $\alpha-M$ elements in total and hence if $R_m\setminus
L_k$ is the set of minimum cardinality, then $|R_m\setminus L_k|\leq
\frac{\alpha-M}{a_n}$. Therefore,
\begin{align}
|L_k \cup R_m|&=|L_k|+|R_m\setminus L_k|\leq M+\frac{\alpha-M}{a_n}
=
\frac{\alpha}{a_n} + M\biggl(1- \frac{1}{a_n}\biggr)
\leq
\frac{\alpha}{a_n} + \frac{\alpha}{a_{n-1}}\biggl(1- \frac{1}{a_n}\biggr)
\label{eqn:2nerav}
=\\&=
\alpha\biggl(\frac{1}{a_{n-1}}+ \frac{1}{a_n}-\frac{1}{a_{n-1}a_n}\biggr)
=
\alpha\biggl(1-\sum_{i=1}^{n-2}\frac{1}{a_{i}}\biggr)
=
\alpha -\frac{\alpha}{a_1}-\ldots -\frac{\alpha}{a_{n-2}}.
\notag
\end{align}
Thus, if sage~$A_{n-1}$ has the hat of color~$k$, and sage $A_n$ has the hat of
color~$m$,
and the remaining sages have colors of hats arrangement from the set
$X\setminus(L_k \cup R_m)$, then one of the sages $A_1$, $A_2$, \dots $A_{n-2}$
guesses correctly. The fraction of hat arrangements, for which this
happens, is greater than or equal to $\rho =\frac1{a_1}+\frac1{a_2}+\ldots +\frac1{a_{n-2}}$.
But $\rho$ bounds from above the number of
arrangements on which the sages $A_1$, $A_2$, \dots $A_{n-2}$ win. Therefore both
inequalities \eqref{eqn:2nerav} are equalities. Then
$|L_k|=\frac{\alpha}{a_{n-1}}$ (moreover $|L_i|=\frac{\alpha}{a_{n-1}}$ for
all $i$), and $|R_ m\setminus
L_k|=\frac{\alpha}{a_{n}}-\frac{\alpha}{a_{n-1}a_{n}}$. Analogously
$|R_j|=\frac{\alpha}{a_{n}}$. Thus, $|R_m \cap
L_k|=\frac{\alpha}{a_{n-1}a_{n}}$, and $\alpha$ is divisible by
${a_{n-1}a_{n}}$.
\end{proof}
\begin{corollary}
Inequality~\eqref{ineq:clique-minus-edge} is not sufficient for the sages to win
on almost complete graphs. For $n=4$ almost complete graph with hat function
$a_1=3$, $a_2=6$, $a_3=3$, $a_4=4$ (edge $A_3A_4$ is removed) is losing, though
it satisfies inequality \eqref{ineq:clique-minus-edge} and
equality~\eqref{eq:clique-minus-edge}.
\end{corollary}
It immediately follows from theorem~\ref{thm:almost_precise_game} because
$a_1a_2$ is not divisible by $a_3a_4$ here.
Now consider two cases when the conditions of theorem~\ref{thm:almost_precise_game} are not sufficient.
\begin{lemma}
Let $G$ be an almost complete graph on vertices $A_1$, \dots, $A_n$, $n\geq 4$,
and without the edge $A_{n-1}A_n$. Let hat function $h$ satisfy the
equality~\eqref{eq:clique-minus-edge}. Then
\begin{enumerate}[1), nosep]
\item if $h(A_1)=2$, then the sages lose.
\item if $h(A_1)=3$, $h(A_n)=2$, then the sages lose.
\end{enumerate}
\end{lemma}
\begin{proof}
Let the sages fix some strategy.
1) Let the adversary give to the sages $A_2$, \dots, $A_{n-2}$ an arbitrary
collection of hats. We determine which color sage $A_{n-1}$ says in accordance with his
strategy when the hat of $A_1$'s is of color 0, and give the hat of this
color to $A_{n-1}$. Analogously we determine which color sage $A_{n}$ says in accordance with his
strategy when the hat of $A_1$ is of color~1 and give the hat of this
color to $A_{n-1}$. Thus we give the hats to all the sages except $A_1$, and hence the guess of
the sage $A_{1}$ is now determined. We can give him a hat of the color
he guesses correctly. Then two sages seeing each other guess
correctly. But this is impossible in almost precise games.
2) Let the adversary give to the sages $A_2$, \dots, $A_{n-2}$ an arbitrary
collection of hats. There are hats of three colors for $A_1$. Let us consider
the guess of $A_n$ in accordance with the strategy for each of these three colors.
In two of the cases, the sage $A_n$ says the same color and we give him the hat of this
color. If in the third case $A_n$ says another color, then we give
$A_{n-1}$ the hat of the color which $A_{n-1}$ says in this case (so
he will guess). Otherwise, give to $A_{n-1}$ an arbitrary hat. Thus regardless
of the color $A_{1}$'s hat, one of the sages $A_{n-1}$, $A_{n}$
guesses correctly. Now the answer of $A_1$ in accordance with the strategy is determined.
We give $A_1$ a hat the color of which he guesses correctly. Thus
two sages seeing each other guess correctly, which is impossible.
\end{proof}
For example, the sages lose on almost complete graph on $4$ vertices $A_1$,
$A_2$, $A_3$, $A_4$ (edge $A_3A_4$ is absent), where $h(A_1)=2$, $h(A_2)=10$,
$h(A_3)=4$, $h(A_4)=5$.
Finally, we demonstrate an example, where the
equality~\eqref{eq:clique-minus-edge} holds and almost precise game is possible.
\begin{lemma}
Let $G$ be an almost complete graph on 4 vertices $A$, $B$, $C$, $D$, in which
edge~$CD$ has been removed. Let $h(A)=6$, $h(B)=6$, $h(C)=2$, $h(D)=3$. Then
the sages win.
\end{lemma}
\begin{proof}
We interpret the hat colors of sages $A$ and $B$ as integers modulo~6, color of~$C$
as integer modulo~2, color of~$D$ as integer modulo~3. Denote the hat colors of
sages $A$ and $B$ by $a$ and~$b$. Let sage $C$ say color $c= (a+b) \bmod 2$,
let sage $D$ say color $d= (a+b) \bmod 3$. If sages $C$ and $D$ did not guessed
correctly,then $a+b=c+1 \bmod 2$ holds and also either $a+b=d+1 \bmod 3$ or $a+b=d+2 \bmod 3$. Then let $A$ compute
his own color assuming that $a+b=c+1 \bmod 2$ and $a+b=d+1 \bmod 3$, and $B$
compute his color assuming that $a+b=c+1 \bmod 2$ and $a+b=d+2 \bmod 3$.
\end{proof}
This result can also be obtained by using constructor of theorem~\ref{thm:addA2to_edge}.
\subsection{Maximum number of hats}
We present a funny corollary of theorem~\ref{thm:clique-win}. We ask
what is the maximum number of hats given to a sage in a winning graph
on $n$ vertices? To make the question meaningful we require that the hat function
makes the graph \emph{simple}, i.\,e.\ for each its subgraph the sages do not win
on this subgraph. Obviously, it is sufficient to find the maximum number for complete
graphs.
So the question is equivalent to the following number-theoretical combinatorial
problem: given~$n$, find $\max(a_1, a_2, \dots, a_n)$, where the positive
integers $a_i$ satisfy relation~\eqref{eq:clique-win}. The solution of this problem is known, namely,
this maximum is determined by Sylvester's sequence $(s_n)$:
$$
s_0=2, \qquad s_n=1+\prod\limits_{i=0}^{n-1}s_i.
$$
and $\max(a_1, a_2, \dots, a_n)=s_n-1$. The proof can be found
in~\cite{Soundararajan2005}.
Sylvester's sequence grows very quickly, for example, $s_8$ is a 27-digit number. Thus
if 8 sages are going to win in the {\sc Hats\ } game on the complete graph then
one of them can be given 27-digit number of hats! In recreational mathematics the
phrases ``number 8'' and ``large numbers'' are associated with the story about
of the inventor of chess, who asked to be given $2^{64}-1$ wheat grains as reward. The
number $2^{64}-1$ has ``only'' 21 digits. In fairness, we note that both sequences
grow as $C^{2^n}$, where $C$ is a constant.
\section{Constructors}
In this section we describe several constructors.
Each constructor is a theorem providing a tool which allows to construct
new winning games by combining several graphs in a new graph.
\subsection{Product}
\subsubsection{Constructor ``Product''}\label{subsubsec:product}
\begin{definition}
Let $A\in V(G)$. We say that a winning graph satisfies the \emph{maximum condition
in vertex~$A$} if increasing the hatness of vertex $A$ by 1 makes the graph
losing.
Let $G_1=\langle {V_1, E_1} \rangle$, $G_2=\langle {V_2, E_2} \rangle$ be two
graphs sharing common vertex $A$. \emph{The sum of graphs $G_1$, $G_2$ with
respect to vertex $A$} is the graph $\langle {V_1\cup V_2, E_1\cup E_2} \rangle$.
The sum is denoted by $G_1\gplus{A}G_2$.
Let ${\mathcal G}_1 =\left\langle G_1, h_1 \right\rangle$, ${\mathcal G}_2 =\left\langle G_2, h_2
\right\rangle$ be two games such that $V_1\cap V_2 = \{A\}$. The game ${\mathcal G} =
\langle {G_1 \gplus{A} G_2, h} \rangle$, where $h(v)$ equals $h_i(v)$ for $v\in
V(G_i)\setminus\{A\}$ and $h(A)=h_1(A) \cdot h_2(A)$
(fig.~\ref{fig:multiplication}), is called \emph{a product} of games ${\mathcal G}_1$,
${\mathcal G}_2$ with respect to vertex $A$. This product is denoted by ${\mathcal G}_1\gtimes{A}
{\mathcal G}_2$.
In such constructions, it is convenient to define the color of vertex $A$ as a
pair $(c_1, c_2)$, where $0\leq c_1\leq h_1(A)-1$, $0\leq c_2\leq h_2(A)-1$.
In this case, wWe say that $A$ has \emph{composite color}.
\end{definition}
\begin{theorem}[on the product of games]\label{thm:multiplication}
Let ${\mathcal G}_1 = \langle {G_1^A, h_1} \rangle$ and ${\mathcal G}_2 = \langle {G_2^A, h_2}
\rangle$ be two games such that $V(G_1)\cap V(G_2)=\{A\}$. If the sages win in
games ${\mathcal G}_1$ and ${\mathcal G}_2$, then they also win in game ${\mathcal G} = {\mathcal G}_1\gtimes{A}
{\mathcal G}_2$.
\end{theorem}
\begin{figure}
\caption{The product of games.}
\label{fig:multiplication}
\end{figure}
\begin{proof}
Let the hat of sage $A$ have composite color $(c_1,c_2)$, where $c_i$ is the
hat color of~$A$ in game~${\mathcal G}_i$. We fix winning strategies for games $G_1$ and
$G_2$ and construct strategy for the game ${\mathcal G}_1\gtimes{A} {\mathcal G}_2$. Let all the sages
in the graph $G_i\setminus\{A\}$ play according to the winning strategy for the game ${\mathcal G}_i$
(the neighbors of $A$ in $G_i$ look only at the component $c_i$ of the
composite color of $A$). The sage~$A$ plays in accordance with both strategies by giving composite
answer $(c_1,c_2)$; where $c_i$ ($i=1,2$) corresponds to his winning strategy
for the game~${\mathcal G}_i$ (for calculating the answer, sage~$A$ looks only on his
neighbors in the graph~$G_i$).
The presented strategy is winning because either someone from~
$G_1\setminus\{A\}$ or from~$G_2\setminus\{A\}$ will guess correctly, or
$A$ guesses correctly both components of his color.
\end{proof}
\begin{corollary}\label{cor:tree_with_exp_colors}
Let graph $G$ be a tree. The sages win in the game $\langle G, h\rangle$, where $h(v)=2^{deg(v)}$.
\end{corollary}
\begin{proof}
The sages win in the classical game $\langle {P_2, 2} \rangle$. Multiplying
$|E(G)|$ copies of this game, we get the required result.
\end{proof}
Corollary~\ref{cor:tree_with_exp_colors} was proved in~\cite[theorem
11]{bosek19_hat_chrom_number_graph} by induction.
In the sequel, We use the following notation for a hat function taking constant value on
the whole graph except for several vertices. Let $A$, $B$, $C$ be some vertices
of the graph. The notation $h_4^{A2B2C3}$ represents a hat function, for which $h(A)=2$,
$h(B)=2$, $h(C)=3$ (superscript) and $h(V)=4$ for all other $v\in V(G)$ (subscript).
The following corollary is an important special case of the previous one.
\begin{corollary}\label{cor:path2442}
The sages win in the game $\langle {P_n(AB), h_4^{A2B2}} \rangle$.
\end{corollary}
We note, by the way, that together with theorem~\ref{thm:multiplication}
this corollary is strengthened analog of lemma on ``pushing a hint''~\cite[lemma
10]{Kokhas2018}. Namely, if we consider the hatness 2 of vertex $A$ as a hint,
which bounds the number of colors for sage $A$ (there should be 4 colors, but we
simplify the game for this sage), then we can ``push'' this hatness 2 along path
$AB$, where all the other sages have hatness $4$. As a result, we see that in graph
$G_1\gplus{A} P_n(AB)$ this hatness 2 ``moves'' from vertex $A$ to vertex $B$.
\subsubsection{Non-maximality of products}
Theorem~\ref{thm:multiplication} shows, that when we stick together two winning
graphs $G_1$ and $G_2$ by vertex $A$, we can greatly increase the hatness of
vertex $A$, so that the obtained game is still winning. It is natural to
assume, that the initial games are simple. In the following example we can even
more increase $h(A)=h_1(A)h_2(A)$ keeping the graph winning. This works even in the
case when both graphs $G_i$ satisfy the maximum condition in vertex $A$ (and
in all other vertices, too).
Let graphs $G_1$ and $G_2$ be complete graphs with 5 vertices and hatnesses 4,
5, 5, 5, 6. These graphs are winning by theorem~\ref{thm:clique-win}.
If we increase the hatness of any vertex, then inequality~\eqref{eq:clique-win} is violated. So
these graphs satisfy the maximum condition in all vertices. Let $A$ be the
vertex with hatness $6$ in both graphs. Let ${\mathcal G}_{37}=\langle {G_1\gplus{A} G_2,
h}\rangle$, where $h$ has the same values for all vertices except $A$ as in
the initial graphs, and $h(A)=37=6\cdot 6 + 1$ (fig.~\ref{fig:big-bow}).
\begin{theorem}\label{thm:bantik}
The sages win in game ${\mathcal G}_{37}$.
\end{theorem}
\begin{figure}
\caption{Game ${\mathcal G}
\label{fig:big-bow}
\caption{Game ``Medium bow''.}
\label{fig:mid-bow}
\end{figure}
\begin{proof}
We deal with integers modulo $740=4\cdot5\cdot37$. If the sage hatness equals $k$
then the possible hat colors of this sage are defined to be the integers modulo 740, divisible
by~$\frac{740}{k}$. Let us consider a hat arrangement. Denote by $S_1$ and $S_2$ the
sums of the colors in the left and the right \hbox{5-cliques}. Let the
sage of hatness~4 in the left clique assumes that the color of his hat is
such that $S_1\in \{1,2,\ldots,185\}$.
The possible colors of his hat are $\frac{740}4=185$, $185\cdot 2$, $185\cdot 3$ and
$185\cdot 4\equiv 0$. For exactly one of these integers the sum $S_1$ belongs to the set
$ \{1,2,\ldots,185\}$ and the sage says the color corresponding to this integer.
Similarly, three sages with hatness~5 in the left clique check the
hypotheses $S_1\in \{186,\ldots\, 333\} $, $S_1\in\{334,\ldots\, 481\}$, and
$S_1\in\{482,\ldots\, 629\} $, respectively (each of these sets contains
$148=\frac{740}5$ numbers). The remaining hat arrangements (for which
$S_1\in\{630,\ldots, 740\}$) are left to sage $A$. His hatness equals 37 and his
colors are residues divisible by 20. Therefore, sage $A$ has a choice of
$\lceil (740-630+1)/20\rceil=6$ consecutive colors. The right sages acts
similarly, but replacing $S_1$ with~$S_2$. So sage $A$ has also a choice of 6 consecutive
colors for the $S_2$-hypothesis.
Let us describe details of sage $A$'s strategy. Let the sages of left and
right cliques use different rules for converting the color of sage $A$ into an
integer. The colors of sage $A$ are in fact integers modulo 37, but we
consider them as integers modulo~740 which are divisible by~20. Let the sages
on the left clique convert a color $20x \bmod 740 $ to an integer $x \bmod
37$. Let in the same moment the sages on the right clique convert a color $20x
\bmod 740 $ to an integer $6x \bmod 37$. (The map $x\mapsto 6x$ is a bijection
on the set of integers modulo~37.)
As is easily seen, any sets $\{x,x+1,\ldots,x+5\}$ and $\{6y,6y+6,\ldots, 6y+30\}$ of residues modulo 37 intersect in at most
one element. Then $A$ says the color from the intersection (or says an
arbitrary color if the intersection is empty). Then for any $S_1$ and $S_2$,
either $A$ guesses both sums, or someone on the right or on the left clique
guesses.
\end{proof}
It can analogously be proved that the sages win on graph ``Medium bow'',
fig.~\ref{fig:mid-bow}. This fact disproves the hypotheses 4 and 6 from
\cite{bosek19_hat_chrom_number_graph}.
\subsection{Substitution}
The following constructor removes a vertex of a graph $G_1$ and put a graph $G_2$ on
its place.
\begin{definition}
Let $G_1$ and $G_2$ be two graphs without common vertices. A
\emph{substitution of graph~$G_2$ to graph~$G_1$ in place of vertex~$v$}
is defined to be the graph $(G_1\setminus\{v\})\cup G_2$ with additional edges
connecting each vertex of $G_2$ with each neighbor of~$v$, see
fig.~\ref{fig:subs}. We denote the substitution by $G_1[v:=G_2]$.
\end{definition}
\begin{figure}
\caption{A substitution.}
\label{fig:subs}
\end{figure}
\begin{theorem}\label{thm:substitution}
Let the sages win in games ${\mathcal G}_1=\langle {G_1, h_1} \rangle$ and ${\mathcal G}_2=\langle
{G_2, h_2} \rangle$. Let $G$ be the graph of the substitution $G_1[v:=G_2]$, where
$v\in G_1$ is an arbitrary vertex. Then the game ${\mathcal G}=\langle{G, h} \rangle$ is
winning, where
\[
h(u) = \begin{cases}h_1(u)&u\in G_1\\ h_2(u)\cdot h_1(v)&u\in G_2\end{cases}
\]
\end{theorem}
\begin{proof}
Let $f_1$ and $f_2$ be winning strategies in the games ${\mathcal G}_1$ and ${\mathcal G}_2$,
respectively.
Let each sage $u$ of the subgraph~$G_2$ of $G$ get a hat of composite
color~$(c_1, c_2)$, where $0\leq c_1\leq h_1(v)-1$, $0\leq c_2\leq h_2(u)-1$.
These sages can calculate the coordinates of their composite colors independently:
sage $v$ finds colors $c_1$ and $c_2$ by using strategies~$f_1(v)$ and~$f_2(v)$,
respectively. In particular, this means that all sages of~$G_2$
say composite colors with the same first component.
Those of the other sages of~$G$, who are not the neighbors
of~$v$, play in accordance with the strategy~$f_1$. After the substitution the sages of~$G_1$, who are neighbors of~$v$, see that instead of one
neighbor~$v$ they have now $|V_2|$ neighbors (and, generally speaking, with
different hat colors). These sages do as follows. They see all the
hats of the sages of~$G_2$ and know their strategies.
Therefore, they understand, which of the sages of~$G_2$
guesses the second coordinate of his color. Denote this player
by~$w$ (if there are several winners, then they choose, for example,
the first winner in the pre-compiled list).
Then each former neighbor of~$v$ looks only at~$w$, more
precisely, at the first component of $w$'s color, and plays
in accordance with the strategy~$f_1$.
As a result, either someone from subgraph $G_1 \setminus\{v\}$ guesses
correctly, or $w$ guesses both components of his color correctly.
\end{proof}
\begin{corollary}\label{cor:triangle244-win}
The sages win in the games shown in fig.~\ref{fig:cor:triangle244-win}.
\end{corollary}
\begin{proof}
We apply theorem~\ref{thm:substitution} to games ${\mathcal G}_1=\langle P_2, 2
\rangle$ and ${\mathcal G}_2=\langle P_n(AB), h_4^{A2B2}\rangle$.
\end{proof}
Note, that the win of the sages of the first graph
(fig.~\ref{fig:cor:triangle244-win}) also follows from
theorem~\ref{thm:clique-win}.
\begin{figure}
\caption{Substitution of game $\langle P_n(AB), h_4^{A2B2}
\label{fig:cor:triangle244-win}
\end{figure}
\subsection{Vertices attaching}
The following theorems-constructors allow to obtain new winning or losing
graphs by attaching one or two new vertices to the existing graph.
\subsubsection{Attaching a vertex of hatness 2}
\begin{theorem}\label{thm:addA2}
Let $\langle {G, h} \rangle$ be a winning game and $B, C\in V(G)$. Then the
sages win in the game $\langle {G', h'} \rangle$, where $G'$ is the graph obtained
from $G$ by adding a vertex $A$ and edges $AB$, $AC$
(fig.~\ref{fig:addA2}), and hat function is given by formula
\[
h'(v)=
\begin{cases}
2, &\text{if}\ v = A,\\
h(v) + 1,&\text{if}\ v = B\ \text{or}\ C,\\
h(v), &\text{else}.
\end{cases}
\]
\end{theorem}
\begin{figure}
\caption{Attaching a vertex of hatness $2$.}
\label{fig:addA2}
\end{figure}
\begin{proof}
Let us describe a winning strategy. After attaching of new vertex sages $B$ and $C$
have one new possible color. Let sage~$A$ say ``1'' if he sees at least one
hat of the new color on sages~$B$ and $C$, otherwise $A$
says ``0''. If sages~$B$ and $C$ see a hat of color 0 on~$A$, then let they both
say the new color. Thus, if $A$'s color is 0, then one of the sages $A$, $B$
and $C$ win. If $A$'s color is 1, then $B$ and $C$ may think that
have not a new color, and therefore can
play by their strategies of the game~$\langle {G, h} \rangle$.
\end{proof}
\begin{corollary}\label{cor:cycle323}
Let $G$ be cycle $C_n$ \ $(n \geq 4)$, and let $B$, $A$ and $C$ be
three consequent vertices of the cycle. Then the sages win in the game
$\langle {G, h_4^{B3A2C3}} \rangle$.
\end{corollary}
\begin{proof}
By corollary~\ref{cor:path2442} the sages win on graph $P_{n-1}(CB)$ with
hatnesses $2,4,\dots, 4,2$. Attaching vertex $A$ to this graph gives a winning graph by theorem~\ref{thm:addA2}.
\end{proof}
This corollary strengthens lemma ``on the hint $A-1$ for cycle'' \cite[lemma
9]{Kokhas2018} without any technical calculations.
The following constructor shows that if the vertices
$B$ and $C$ in theorem~\ref{thm:addA2} are adjacent, then the numbers of colors for these vertices can greatly be increased.
\begin{theorem}\label{thm:addA2to_edge}
Let $\langle G,h\rangle$ be a winning game, and let $BC$ be an edge of the
graph~$G$. Consider a graph $G'=\left\langle V', E' \right\rangle$ obtained by
adding a new vertex~$A$ and two new edges to graph~$G$: $V'=V\cup \{A\}$, $E'=E\cup
\{AB, AC\}$. Then the sages win in the game $\langle G',h'\rangle$ (see
fig.~\ref{fig:addA2_with_edge}), where
\[
h'(v) = \begin{cases}
2,&\text{if}\ v = A\\
2h(v),&\text{if}\ v = B \text{ or } v = C\\
h(v),&\text{otherwise}
\end{cases}
\]
\end{theorem}
\begin{figure}
\caption{Attaching a vertex of hatness $2$ to edge $BC$.}
\label{fig:addA2_with_edge}
\end{figure}
\begin{proof}
Let the sages $B$ and $C$ have composite colors $(c,\epsilon)$, where $c$ is a
possible hat color in the game $\langle G,h\rangle$, $\epsilon\in\{0,1\}$. Let
sage~$A$ say color $c(A)=\epsilon_B +\epsilon_C \pmod 2$. Let sages $B$ and
$C$ look at the colors of their neighbors in graph~$G$, calculate the colors
$c(B)$, $c(C)$ in accordance with their winning strategy in game $\langle G,h\rangle$ and take
these colors as the first coordinates of their composite colors. By seeing
sage~$A$'s hat as well as each other hat, the sages $B$ and $C$ can calculate
the values $\epsilon_B$ and~$\epsilon_C$ for which $A$ does not guess correctly, they take
these bits as second components.
\end{proof}
Attaching edge $BA$ with leaf $A$ of hatness 2 to vertex $B$ of graph $G$ can be
interpreted as the product of game on graph $G$ and classical game $\langle
{P_2(AB), 2} \rangle$. If the game on graph $G$ was winning, then by
the theorem on product, we can double hatness of vertex $B$ in construction of winning game $G\gplus{B} BA$. In the following constructor we attach the edge to
a losing game, change the hatness of vertex $B$ to $2h(B)-1$, and as a result we
get a losing game.
\begin{theorem}
Let ${\mathcal G} = \langle {G, h} \rangle$ be a loosing game and $B$ be an arbitrary
vertex of graph~$G$. Let $G'=\left\langle V', E' \right\rangle$ be a graph
obtained by attaching a new pendant vertex~$A$ to graph~$G$: $V'=V\cup \{A\}$,
$E'=E\cup \{AB\}$. Then the sages loose in the game $\langle G',h'\rangle$, where
$h(A)=2$, $h'(B)=2h(B)-1$ and $h'(u)=h(u)$ for other vertices $u\in V$.
\end{theorem}
\begin{proof}
Let the sages fix a strategy $f$ on graph~$G'$. We construct a losing hat
arrangement for this strategy. For each of $2h(B)-1$ possible colors of $B$'s,
hat sage~$A$ has one (of 2 possible) answers in accordance with his strategy. He
says one of these two colors less often, namely, at most $h(B) - 1$ times. Let
the adversary give to sage $A$ the hat of this ``rare'' color. Then the strategy
of sage $B$ in game ${\mathcal G}$ is now completely determined. Let the adversary use
only the hats of those $h(B)$ colors for sage $B$, for which $A$ does not
guess correctly. Under this restriction, the adversary can nevertheless construct a
losing hat arrangement on $G$, because the game ${\mathcal G}$ is losing. So the
adversary can construct the loosing hat arrangement on graph $G'$.
\end{proof}
It is possible to attach a new vertex $A$ of hatness $2$ simultaneously to several
vertices of a losing graph. If we increase hatnesses of these vertices greatly, this
cancels out the possible advantage from the appearance of a new vertex and the graph
remains losing. In the following theorem, we consider the case of two vertices.
\begin{theorem}
Let $G$ be a loosing graph with vertices $B$ and $C$, and
$h(B)=h(C)=2$. Attach to the graph a new vertex~$A$
connected with~$B$ and~$C$ only. Then the sages lose on the obtained
graph, if we define new hat function as $h(A)=2$, $h(B)=3$, $h(C)=7$, and for
other vertices the hat function is the same as in $G$.
\end{theorem}
\begin{proof}
Let the sages fix some strategy on the new graph. The strategy of sage~$A$ can
be given as $3\times 7$ table: the rows correspond to the hat colors of
sage~$B$, the columns correspond to the hat colors of sage~$A$, and the table
entry (0 or 1) is the number that sage~$A$ says, when he sees the
corresponding $B$'s and $C$'s hat colors.
Each column of the table contains one of the symbols, 0 or 1, two times. Mark
in each column two cells that contain the symbol repeated at least twice in
this column. (If the symbol is repeated in all three cells of column, we mark any two
of them.) The marked cells can be located either in the first and the second
rows, or in the first and the third, or in the second and in the third. Since
there are 7~columns, the pigeonhole principle implies that there exist two rows, in
which the marked cells occupy three columns. The marked cells of one column
contain either two zeroes or two ones. Therefore, one can choose two
columns containing the same numbers in the marked cells.
Thus we have chosen two rows (for definiteness, the $i$-th and the $j$-th) and two
columns (for definiteness, the $k$-th and the $\ell$-th), which intersect in 4 cells
containing the same number, say~0. Now we construct a
disproving hat arrangement on the new graph. First, give hthe at of color~1 to
sage~$A$.
Then we choose a hat of the $i$-th or the $j$-th color for sage $B$, and
a hat of the $k$-th or the \hbox{$\ell$-th} color for sage~$C$. In this
case, sage~$A$ says ``0'' and guesses his color incorrectly. To
assign colors to sages $B$ and $C$, and the others, we consider a
game on graph~$G$: since the color of sage~$A$ is fixed, the strategies
of the other sages on graph~$G$ is uniquely determined. The restrictions to the
hat colors of $B$ and $C$ allow us to think that $h(B)=h(C)=2$. Since
graph~$G$ is losing for this hat function, we will successfully find a
disproving hat arrangement on it.
\end{proof}
The proof is based on ideas from Ramsey theory. Thus the statement can be generalized
to the case of large number of new vertices and those vertices to which they are
attached. However, apparently, such constructions give too overestimated values of the
hatnesses in losing graphs.
\subsubsection{Attaching vertices of hatnesses 2 and 3, connected by an edge}
Apparently it is hard to determine whether the graph obtained by attaching a new fragment via two independent ``jumpers'', is winning. We are able to do this for very small fragment only.
\begin{theorem}\label{thm:addA2B3}
Let ${\mathcal G}=\langle G,h\rangle$ be a winning game, and let $Z$, $C \in V$ be two
vertices of graph~$G$. Consider a graph $G'=\left\langle V', E' \right\rangle$
obtained by adding a new path~$ZABC$ to graph~$G$: $V'=V\cup\{A, B\}$, $E'=E\cup
\{ZA, AB, BC\}$ (fig.~\ref{fig:addA2B3}). Then the sages win in the game
${\mathcal G}'=\langle G',h'\rangle$, where
\[
h'(v)=
\begin{cases}
2, &\text{if}\ v = A,\\
3, &\text{if}\ v = B,\\
2h(v), &\text{if}\ v = Z,\\
h(v) + 1,&\text{if}\ v = C,\\
h(v), &\text{otherwise.}
\end{cases}
\]
\end{theorem}
\begin{figure}
\caption{Adding a new path~$ZABC$.}
\label{fig:addA2B3}
\end{figure}
\begin{proof}
Let sage $X$ get a hat of color~$c_X$. Consider the color $c_Z$ as
composite: $c_Z=(\epsilon, c)$, where $\epsilon_Z\in \{0, 1\}$, and
$c$ is one of $h(Z)$ colors in the game ${\mathcal G}$. Let us describe a winning strategy.
\begin{itemize}[nosep]
\item If $c_B\ne 2$, let sage $A$ say $B$'s hat color, otherwise he says
the color~$\epsilon_Z$.
\item Sage $B$ says ``2'' if he sees the hat of new color on sage~$C$,
otherwise he says $1 - c_A$.
\item If $c_B\ne 2$, then sage $C$ says new color, otherwise $C$ uses strategy of the game ${\mathcal G}$.
\item Let $Z$ take the first bit of his color $\epsilon_Z\ne c_A$, and find
the second component of his color in accordance with his strategy in ${\mathcal G}$.
\item The sages in $V(G)\setminus\{C,Z\}$ use strategy of ${\mathcal G}$. It is assumed here that
the neighbors of $Z$ see at the second component of $Z$'s color only and
the neighbors of $C$ do not distinguish the new color and 0-th color.
\end{itemize}
Now we consider all variants of pairs $(c_A, c_B)$ and check that the strategy is
winning.
In cases $(0, 0)$ and $(1, 1)$ sage $A$ guesses correctly.
In cases $(0, 1)$ and $(1, 0)$ $B$ or $C$ guesses correctly.
In cases $(0, 2)$ and $(1, 2)$ sage $A$ guesses correctly if $c_A= \epsilon_Z$, and
sage $B$ guesses correctly if $C$ has hat of new color. In the remaining cases, the
sages on graph $G$ use the strategy of game ${\mathcal G}$, and one of them guesses correctly.
\end{proof}
\begin{corollary}\label{cor:cycle_233}
Let graph $G$ be cycle $C_n$ $(n \geq 4)$ and $A$, $B$, $C$ be three
consequent vertices of the cycle. Then the sages win in game $\langle{G,
h_4^{A2B3C3}}\rangle$.
\end{corollary}
\begin{proof}
Follows from theorem~\ref{thm:addA2B3} applied to path $P_{n-2}(ZC)$ with hatnesses
$2,4,\dots, 4,2$ (this game is winning by corollary~\ref{cor:path2442}).
\end{proof}
\subsubsection{Attaching a leaf of large hatness}
Our latest constructor is great in that it works both for winning
and for losing games. It claims that attaching to a graph a leaf with hatness
more than $2$ does not affect the result of the game.
\begin{definition}
Let ${\mathcal G}=\langle {G, h} \rangle$ be a game and $A\in V(G)$.
The \emph{game ${\mathcal G}$ with the hint $A^*$} is defined as follows. The sages play on graph $G$ with
hat function $h$, but \textbf{during the test} the adversary comes up to
sage $A$ and says the true statement ``I just put on your head a hat of color
$c_1$ or $c_2$''. During the conversation the sages know that the adversary
is going to give a hint, but do not know what colors he will say. So the
sages determine usual strategies for everyone, except for the sage $A$, and sage $A$
gets the set of $\binom{h(A)}{2}$ strategies, one for each possible hint.
\end{definition}
``A theory of {\sc Hats\ } game with hints'' (where hatness of each sages is equal to
3) is developed by Kokhas and Latyshev's in~\cite{Kokhas2018}. The
following lemma from~\cite{Kokhas2018} remains almost unchanged in the case
of arbitrary hatnesses. We present here its the proof to make the paper self-contained.
\begin{lemma}\label{lem:hint_Astar}
The hint $A^*$ does not affect the result of the {\sc Hats\ } game.
\end{lemma}
\begin{proof}
Assume that the sages win with hint $A^*$. Let us fix strategies of all sages except for $A$
in the game with the hint $A^*$; we construct a
strategy of $A$ so that the sages win without hints.
Assume that the adversary gives a hat of color $x$ to $A$, so that
there exists a~hat arrangement in which $A$ gets a hat of color $x$, his
neighbors get hats of colors $u$, $v$, $w$ \dots, the other sages also get
hats of some colors, and no one (except for $A$) guesses correctly. we
want that in this case the sage $A$ to guesses the color of his hat correctly, i.\,e., his
strategy satisfies the requirement $f_A(u, v, w, \dots) = x$.
These requirements for different hat arrangements do not contradict each other.
Indeed, if there exists another hat arrangement where the neighbors still have
colors $u$, $v$, $w$, \dots and the sage $A$ gets another color $y$, then the
sages cannot win with the hint $A^*$, because the adversary can inform $A$
that he has a hat of color $x$ or $y$ and then choose one of the two hat
arrangements for which sage~$A$ does not guess his color correctly.
\end{proof}
\begin{theorem}
Let ${\mathcal G}_1 = \langle {G_1, h_1} \rangle$, $B\in V(G_1)$, $G_2=G_1\gplus{B} P_2(AB)$ and ${\mathcal G}_2 = \langle G_2, h_2 \rangle$,
where $h_2\Big\vert_{V(G_1)}=h_1$, \ $h_2(A)\geq 3$. The ame ${\mathcal G}_1$ is winning
if and only if ${\mathcal G}_2$ is winning.
\end{theorem}
\begin{proof}
In one direction the statement is obvious: if game ${\mathcal G}_1$ is winning, then
game~${\mathcal G}_2$ is also winning. (The sages on the subgraph $G_1$ win.)
Now prove that if game ${\mathcal G}_2$ is winning, then ${\mathcal G}_2$ is also winning. We
demonstrate that if the sages win in game~${\mathcal G}_2$, then they can win in
game~${\mathcal G}_1$ with hint~$B^*$.
Let $f_2$ be a winning strategy for game ${\mathcal G}_2$. In order to construct a
winning strategy for game ${\mathcal G}_1$ with the hint~$B^*$, we first
define a strategy for sages on $V(G_1)\setminus\{B\}$ --- let they use $f_2$.
Second, for any two different colors $(b_1, b_2)$ that can occur in the
hint we define a strategy of~$B$. Since $h_2(A)\geq 3$, for each pair of
colors $(b_1, b_2)$, $b_1 \neq b_2$, we can find a color $a$ such that $A$ can
not say ``$a$'' if he sees that $B$'s hat is of color $b_1$ or~$b_2$. Let
sage $B$, having received the hint $(b_1, b_2)$, say the color defined by the strategy
$f_2$ when he sees the hat of color~$a$ on sage~$A$ and the
colors of the other neighbors in $G_1$ are given by the current arrangement.
This strategy is winning in the game~${\mathcal G}_1$ with the hint~$B^*$. Indeed, let
the hat arrangement on $G_1$ be fixed and sage $B$ get a hint $(b_1, b_2)$.
Consider the corresponding hat arrangement on $G_2$ (we give the hat of color $a$ to
sage $A$). Then all the sages on $G_2$ use strategy $f_2$ (and sage $A$ does
not guess). Therefore, someone on $G_1$ guesses correctly. Thus, for the hat
arrangement and the hint under consideration, the sages on $G_1$ win, and hence the sages
win with the hint~$B^*$. Then they win in game
${\mathcal G}_1$ by lemma~\ref{lem:hint_Astar}.
\end{proof}
The proven theorem has an interesting generalization for losing graphs. If we
glue two losing graphs $G_1$ and $G_2$ by identifying a
vertex of hatness 2 of graph $G_2$ and any vertex of graph $G_1$, then the obtained
graph is losing.
\begin{theorem}
Let $G_1$ and $G_2$ be graphs such that $V(G_1)\cap V(G_2)=\{A\}$, $G= G_1+_AG_2$.
Assume that the games ${\mathcal G}_1=\langle G_1, h_1\rangle$ and ${\mathcal G}_2=\langle
G_2, h_2\rangle $ are losing, and also $h_1(A)\geq h_2(A)=2$. Then the
game ${\mathcal G}=\langle G_1+_AG_2, h\rangle$ is losing, where
$$
h(x) =
\begin{cases}
h_1(x),&x\in V(G_1)\\
h_2(x),&x\in V(G_2)\setminus A.
\end{cases}
$$
\end{theorem}
\begin{proof}
Assume that game ${\mathcal G}$ is winning, and let $f$ be a winning strategy. Let
$N(A)$ be the set of neighbors of vertex $A$ in graph $G_1$. Every hat
arrangement $\varphi$ on graph $G_1$ determines answer in accordance with strategy $f$ of
each sage in $V(G_1)\setminus A$. Let us prove that there exist two different hat
arrangements $\varphi_1$ and $\varphi_2$ on vertices of graph $G_1$, such that
$\varphi_1\big|_{N(A)}=\varphi_2\big|_{N(A)}$, $\varphi_1(A)\neq\varphi_2(A)$
and if the sages of $G_1$ play in accordance with strategy $f$, then in both
arrangements none of $V(G_1)\setminus A$ guesses correctly.
Assume that there are no two such arrangements. This means that for every
hat arrangement $c$ on $N(A)$, there is at most one color $\alpha(c)$ of $A$'s
hat, for which the hat arrangement $c\cup\alpha(c)$ on $N(A)\cup A$ can be
extended to a hat arrangements on $V(G_1)$ such that none in
$V(G_1)\setminus A$ guesses correctly in accordance with strategy $f$. We consider the following
strategy for game ${\mathcal G}_1$. Let everyone in $V(G_1)\setminus A$ play in accordance with
strategy $f$, and sage $A$ say $\alpha(c)$ (or 0, if $\alpha(c)$ is
undefined). This strategy is winning, because if none in $V(G_1)\setminus
A$ guess correctly, then $A$ has a hat of color $\alpha(c)$ and guesses correctly, a
contradiction.
Let us consider these two arrangements $\varphi_1$ and~$\varphi_2$. We fix the hat
arrangement $c=\varphi_1\big|_{N(A)}=\varphi_2\big|_{N(A)}$ on $N(A)$ and
restrict ourselves only to those arrangements, where sage $A$ gets a hat of
one of two colors $\varphi_1(A)$ or $\varphi_2(A)$. Then strategy $f$
determines actions of the sages on graph $G_2$, i.\,e.\, in the losing game
${\mathcal G}_2$ (sage $A$ can say more than two colors, but it does not help to win). ``Losing''
means that there exists a disproving hat arrangement~$\psi$. If
$\psi(A)=\varphi_1(A)$, then $\psi\cup \varphi_1\big|_{V(G_1)\setminus A}$ is
a disproving arrangement on~$G$, and if $\psi(A)=\varphi_2(A)$, then $\psi\cup
\varphi_2\big|_{V(G_1)\setminus A}$ is a disproving arrangement on~$G$.
\end{proof}
\subsection{More complicated constructors}
The next theorem generalizes of theorem~\ref{thm:addA2}.
\begin{theorem}\label{thm:addA2gen}
Let $\langle G_1 ,h_1\rangle$, $\langle G_2 ,h_2\rangle$ be two games, in
which the sages win. Let $A_1$, $A_2$, \dots, $A_k\in V_1$ and $B_1$, $B_2$, \dots,
$B_m\in V_2$. Let $G'=\left\langle V', E' \right\rangle$ be a graph obtained
by adding all the edges $A_iB_j$ to graph $G_1\cup G_2$: $V'=V_1\cup V_2$,
$E'=E_1\cup E_2\cup \{A_iB_j, i=1,\dots, k; j=1,\dots, m\}$
(fig.~\ref{fig:addA2gen}). Then the sages win in the game $\langle G',h'\rangle$,
where
$$
h'(u)=\begin{cases}
h_1(u), & u\in G_1\setminus \{ A_1, A_2, \dots, A_k\}, \\
h_2(u), & u\in G_2\setminus \{ B_1, B_2, \dots, B_m\}, \\
h_1(u)+1,& u\in \{ A_1, A_2, \dots, A_k\}, \\
h_2(u)+1,& u\in \{ B_1, B_2, \dots, B_m\}.
\end{cases}
$$
\end{theorem}
\begin{figure}
\caption{Stitching of two graphs, $k=2$, $m=3$.}
\label{fig:addA2gen}
\end{figure}
\begin{proof}
One new color has been added for sages $A_i$ and one for sages $B_j$ with
respect to initial games. Let this color be red. For each $i$ let sage~$A_i$
say that he has red hat, if he sees at least one red hat on sages~$B_j$, in
the opposite case let $A_i$ see at his neighbors in $G_1$ only and play in accordance with
winning strategy on~$G_1$. For each $j$, if sage~$B_j$ sees at least one
red hat on~$A_i$, then he sees at his neighbors in $G_2$ only and plays in accordance with
winning strategy on~$G_2$. In the opposite case $B_j$ says that he has
red hat. It is easy to check that this strategy is winning.
\end{proof}
The next theorem add ``surgical intervention'' to the previous construction: we
will sew together graphs, by joining the neighbors of the two chosen vertices
with hatness 2, and delete both vertices.
\begin{theorem}
Let $G_1$, $G_2$ be graphs containing vertices $A$ and $B$ respectively,
the games ${\mathcal G}_1=\langle G_1, h_1 \rangle$ and ${\mathcal G}_2=\langle G_2, h_2 \rangle$ be
winning and $h_1(A)=h_2(B)=2$. Let $N_A$ and~$N_B$ be the sets of neighbors of $A$
and $B$ in graphs $G_1$ and $G_2$. Consider a new graph $G$
(fig.~\ref{fig:AB-sewing}):
\begin{gather*}
V(G)=(V(G_1)\setminus A) \cup (V(G_2)\setminus B),
\\
E(G)=E(G_1)\big|_{V(G_1)\setminus A} \cup \,E(G_2)\big|_{V(G_2)\setminus B}
\cup \,\{XY \mid X \in N_A, Y \in N_B\}.
\end{gather*}
Then the game ${\mathcal G}=\langle G, h \rangle$ is winning, where
\[
h(x) =\begin{cases}
h_1(x),&x\in V(G_1)\setminus A,\\
h_2(x),&x\in V(G_2)\setminus B.
\end{cases}
\]
\end{theorem}
\begin{figure}
\caption{Sewing graphs by joining neighbors of vertices $A$ and $B$.}
\label{fig:AB-sewing}
\end{figure}
\begin{proof}
Let $f_1$ and $f_2$ be winning strategies in games ${\mathcal G}_1$ and ${\mathcal G}_2$.
Let us construct a winning strategy for game ${\mathcal G}$.
Let $c_1$ be an arbitrary hat arrangement on $N_A$. To this arranement, we
associate a color $g_1(c_1)$ which is the guess of sage $A$ in accordance with
strategy $f_1$ for this arrangement. Analogously, to each
arrangement $c_2$ on $N_B$, we associate a color $g_2(c_2)$. All the sages in
$N_A$ can determine $g_2(c_2)$ and all the sages in $N_B$ can determine $g_1(c_1)$.
The winning strategy is as follows. The sages on $V(G)\setminus (N_A\cup N_B)$
use their initial strategies $f_1$ and $f_2$. The sages on $N_A$ also use
strategy $f_1$ if the hat color of sage $A$ is $g_2(c_2)$. The sages on $N_B$
use strategy $f_2$ if the hat color of sage $B$ is not $g_1(c_1)$; denote
this color by $\overline{g_1}(c_1)$ (recall that $h(B)=2$).
Why this strategy is winning? If $g_1(c_1)=g_2(c_2)$, then on $G_2$-part of
the new graph we have game ${\mathcal G}_2$, where the color of sage $B$ is
$\overline{g_1}(c_1)=\overline{g_2}(c_2)$, and all the sages use strategy $f_2$ (the guess of $B$ is not determined). But $B$ guesses wrong in game ${\mathcal G}_2$ with the
hat arrangement under consideration, and $f_2$ is the winning strategy in game
${\mathcal G}_2$. Therefore someone in $V(G_2)\setminus B$ guesses correctly.
Analogously, if $g_1(c_1)\neq g_2(c_2)$, we have the game ${\mathcal G}_1$, where the color
of $A$ is ${g_2(c_2)}=\overline{g_1}(c_1)$, all sages use strategy $f_1$ and $A$
says nothing. But $A$ guesses wrong in accordance with strategy $f_1$, and $f_1$ is
winning strategy in game ${\mathcal G}_1$. Therefore, someone in $V(G_1)\setminus A$
guesses correctly.
\end{proof}
The following constructor allows to fasten several graphs $G_i$ by marking
several vertices in each graph with the names as in graph $G$ and joining them
together in the same way as the corresponding vertices are joined in $G$.
\begin{theorem}
Let a game $\langle G, h \rangle$ be winning, where $V(G)=\{A_1, A_2, \ldots,
A_k\}$. Let $G_i$ be a graph $(i=1,\dots, k)$, $B_{ij}\in V(G_i)$ be set of
marked vertices in $G_i$ (we assume that sets $V(G_i)$ are disjoint, the
numbers of marked vertices in different graphs are not necessarily the same, see
fig.~\ref{fig10}), and let the games $\langle G_i, h_i\rangle$ be winning. Consider a
new graph $G'=\langle{ V(G'), E(G') }\rangle$, where
$$
V(G')=V(G_1)\cup\ldots \cup V(G_k), \quad
E(G')=E(G_1)\cup\ldots\cup E(G_k)\cup \{B_{i_1j_1}B_{i_2j_2}\mid
A_{i_1}A_{i_2}\in E(G)\}.
$$
Then the game $\langle G', h'\rangle$ is winning, where
$$
h'(x)=\begin{cases}
h_i(x),& \text{if $x$ belongs to one of the sets \ }
V(G_i)\setminus\{B_{i1},B_{i2},\ldots\}, \\
h_i(x)+h(A_i)-1, & \text{if $x$ coincides with vertex $B_{ij}$}.
\end{cases}
$$
\end{theorem}
\begin{figure}
\caption{Fastening several graphs with the help of graph $G$.}
\label{fig10}
\end{figure}
\begin{proof}
Let $f$, $f_1$, \dots, $f_k$ be winning strategies for games $\langle
G,h\rangle$, $\langle G_1,h_1 \rangle$, \dots, $\langle G_k,h_k\rangle$
respectively. We may assume that sage $B_{ij}$ has hats of $h_i(B_{ij})$ old colors
and $h(A_{i})-1$ new colors. A \emph{megasage} $M_i$ is a set of sages
$\{B_{ij}\mid j=1,2,\ldots \}$. Define the color of hat of megasage as a number from 0 to
$h(A_{i})-1$ as follows. We set it to be 0 if all hat colors of
sages $B_{i1},B_{i2},\ldots$ are old; otherwise, we set it to be equal the
maximum new color number of the hats $B_{i1},B_{i2},\ldots$.
Now each megasage (i.\,e.\ each sage in this set) understands which hat colors
his neighboring megasages in graph $G$ have. Thus the megasages can use
strategy $f$: if a megasage has to say a~new color, let all sages forming this
magasage say this new color; if megasage $M_i$ has to say 0, then let sages
$B_{i1},B_{i2},\ldots$ use strategy $f_i$ (looking at the neighbors in the
component $G_i$ only). Let the sages in
$V(G_i)\setminus\{B_{i1},B_{i2},\ldots\}$ also use strategy $f_i$. To make our
strategy well-defined, we append the following rule: if a sage is assigned to play
strategy $f_i$ but he sees new color on the hats of his neighbors, then his guess
is not defined by the strategy, and we allow him to say an arbitrary guess.
Since strategy $f$ is winning, one of the megasages, say $M_{i_0}$, guesses correctly. If
his color is new then all sages $B_{i_0j}$ $(j=1,2,\ldots)$ say this color and
one of them certainly guesses correctly. If $M_{i_0}$ has color~0, then sages
$B_{i_01},B_{i2},\ldots$ use strategy $f_i$
and the other sages in $V(G_{i_0)})\setminus\{B_{i_01},B_{i_0)2},\ldots\}$
use strategy $f_i$ too. Thus someone in $V(G_i)$ guesses correctly.
\end{proof}
We note that if only one vertex $B_i$ is marked in each component
$G_i$, then the game on graph $G'$ remains winning even if we greatly
increase values of hatnesses $h(B_i)$. The following lemma holds.
\begin{lemma}
Let a game $\langle G, h'\rangle$ be winning, $V(G)=\{A_1, \dots, A_n\}$. Take
$n$ winning games $\langle G_i, h_i\rangle$ such that the sets $V(G_i)$ are
disjoint, and mark one vertex $A_i$ in each graph $G_i$. Join the marked
vertices as in graph $G$. Define the hat function in the obtained graph as
$h(A_i) = h'(A_i)h_i(A_i)$ (all other vertices have the same hatness as in the
initial graphs). Then the sages win.
\end{lemma}
Lemma follows from theorem \ref{thm:multiplication} on a game product.
The next constructor combines ideas of theorem~ \ref{thm:multiplication} on game
product and theorem~\ref{thm:substitution} on substitution in a single monster.
We glue several winning graphs with common vertex $O$ and create a copy of graph
$G$ by joining neighbors of $O$. For suitable hat function the obtained graph is
winning.
Recall that for $h(A)=mn$, we may consider hat color of $A$ as pair $(c_1,
c_2)$, where $0\leq c_1\leq m-1$, $0\leq c_2\leq n-1$; we say in this case that
color of sage $A$'s hat is composite.
\begin{figure}
\caption{``Cone'' over graph $G$.}
\label{fig:konus}
\end{figure}
\begin{theorem}[On a ``cone'' with vertex $O$ over graph $G$]\label{thm:konus}
Let a game ${\mathcal G}=\langle G, h \rangle$, where $V(G)=\{A_1, A_2, \ldots, A_k\}$
and $k$ games ${\mathcal G}_i=\langle G_i, h_i\rangle$, $1\leq i\leq k$, be winning, and the
sets $V(G_i)$ be disjoint. Assume that in each $G_i$ one vertex is labeled $O$
so that $h_1(O)=h_2(O)=\ldots=h_k(O)$, and also one of the neighbors $A_i$ of $O$ is labeled. Let $G'=\langle{V(G'), E(G')}\rangle$ be a new graph,
\begin{align*}
V(G')&=(V(G_1)\setminus\{O\})\cup\ldots \cup (V(G_k)\setminus\{O\})\cup\{O\},
\\
E(G')&=E(G_1)\cup\ldots\cup E(G_k)\cup \{A_i A_j\mid A_{i}A_{j}\in E(G)\}.
\end{align*}
Then the game $\langle G', h'\rangle$ is winning, where
$$
h'(x)=\begin{cases}
h_i(x), & \text{if $x$ belongs to one of sets \ } V(G_i)\setminus\{A_i\}, \\
h_i(A_i)h(A_i), & \text{if $x$ coincides with $A_i$}.
\end{cases}
$$
\end{theorem}
\begin{proof}
Let us describe a winning strategy. The sages in vertices $A_i$ have composite
colors, let they play two strategies simultaneously: strategy of game ${\mathcal G}_i$
for the first coordinate of the color and strategy of ${\mathcal G}$ for the second
one. The sages in $V(G_i)\setminus\{O, A_i\}$ use the strategy of game
${\mathcal G}_i$ (neighbors of~$A_i$ pay attention only to $G_i$-coordinate of his color).
Tthe most cunning role goes to sage $O$. He sees all the sages $A_i$ and knows, which
sage guesses the $G$-coordinate of his color correctly, let this sage be
$A_j$. Then $O$ looks only at his neighbors in graph $G_j$ and use strategy
$h_j$ (he looks at the $G_j$-coordinate of $A_j$'s hat color only).
As a result, someone on graph $G_j$ guesses correctly (if this sage is $A_j$, then
he guesses both components of his composite color).
\end{proof}
An \,e\,x\,a\,m\,p\,l\,e \,of winning graph obtained by theorem is shown in
fig.~\ref{fig:primer-konus}.
Here, graph $G$ is a complete graph on 3 vertices with hatnesses 3, 3, 3; $G_1$ is a
5-cycle with hatnesses 4, 2, 3, 3, 3; the graphs $G_2$ and $G_3$ are 4-cycles with
hatnesses 4, 2, 3, 3. The latter three graphs are winning by
corollary~\ref{cor:cycle_233}. $O$~is vertex of hatness 4 in these cycles.
\begin{figure}
\caption{The sages win by theorem \ref{thm:konus}
\label{fig:primer-konus}
\end{figure}
\medbreak
In the proof of theorem \ref{thm:konus}, sage $O$ plays the role of ``dispatcher'':
looking at sages $A_i$, he chooses in which component the winning game is played.
The success of this choice is provided by his knowledge of the winner in the
graph $G$. Now we consider a case in which sage $O$ cannot see the entire
graph $G$.
Let us fix a graph $G$, a hat function and an arbitrary strategy of sages on $G$.
A set $S\subset V(G)$ is said to be \emph{predictable} if it satisfies the property: for
any hat arrangement on graph $G$, one can choose a sage $A\in S$ looking at the hat colors in the set $S$ only, and so that if for this hat arrangement some sages in
$S$ guess their colors correctly, then $A$ is one of these ``winners''.
An example of predictable set $S$ is a 5-clique in the graph ``Big bow'' for the
strategy from theorem~\ref{thm:bantik} (fig.~\ref{fig:big-bow}). Indeed, the
strategy in the proof of theorem ~\ref{thm:bantik} for the sages on any of 5-cliques
consists of checking some hypotheses about the sums of colors over vertices of the
clique. Anyone who sees the hat arrangement only on the clique can
determine who of the non-central sages guesses correctly. If nobody guesses, then
the only person who can guess correctly in this set of vertices is the central
sage.
A simpler examples of predictable set is the component $G_1$ or $G_2$ in
theorem~\ref{thm:multiplication} on graph products (for the strategies from the
proof). For example, graph $G$ depicted in fig.~\ref{fig:konus-monstr}, left,
with all hatnesses equal 4, is the product of two 3-cliques $S_1$ and $S_2$ with
hatnesses 2, 4, 4. Hence, sets $S_1$ and $S_2$ are predictable.
Returning to ``cone'' theorem we note that the construction of a new graph can be
generalized to the case of several dispatchers, that collectively see the entire
graph $G$ but individually each dispatcher sees some predictable part of the
graph $G$ only. Let us describe this generalization more precisely.
Let ${\mathcal G}=\langle G, h \rangle$ be a winning game, where $V(G)=\{A_1, A_2, \ldots,
A_k\}$. Fix a winning strategy~$f$ in this game. Let the vertex set of $G$ be
a union of several predictable sets with respect to~$f$:
$V(G)=S_1\cup S_2\cup\ldots\cup S_m$. Sets $S_j$ can intersect; if a
vertex $A_\ell$ belongs to several~$S_j$, we consider one of them (any) as
``principal'' for $A_\ell$, the number of this set is denoted by $j_\ell$. For instance, one can take
$j_\ell=\min\{j: A_\ell\in S_j\}$. Assume that none of~$S_j$ is a subset of the
union of the other sets. Let for each~$\ell$, $1\leq \ell\leq k$, a winning game
${\mathcal G}_\ell=\langle G_{\ell}, h_{\ell}\rangle$ is given (the sets $V(G_\ell)$ are
pairwise disjoint), in each graph $G_{\ell}$ one arbitrary vertex
$A_\ell$ is marked and one more vertex, neighboring to~$A_\ell$, is labeled
as~$O_{j_\ell}$. Thus, some vertices in different graphs can be labeled with the same
label $O_j$. We assume that for each $j$, $1\leq j\leq m$, the hatnesses of all
vertices~$O_j$ are equal, denote this value by~$o_j$.
Consider a graph $G'=(V(G'), E(G'))$, where $V(G')=\bigcup\limits_{\ell=1}^{k}
V(G_{\ell})$. We assume that the vertices with the same labels (i.\,e.\ different
copies of vertices $O_j$) are identified in this union,
$$
E(G')=E(G_1)\cup\ldots\cup E(G_k)\cup E(G)\cup \{O_j A\mid 1\leq j\leq m, \ A\in S_j\}.
$$
The latter set in the union provides the ability of sages $O_j$ to see all
vertices of set $S_j$ including those vertices, for which index $j$ is not principal.
We consider the union formally: if a set $E(G_\ell)$ contains edge $A_\ell
O_{j_\ell}$, then $E(G')$ is also contains edge $A_\ell O_{j_\ell}$, since there are
vertices denoted by $A_\ell$ and $O_{j_\ell}$ in graph $G'$.
An example is given in fig.~~\ref{fig:konus-monstr}. Here we have $j_1=j_2=j_3=1$, $j_4=j_5=2$. Graph $G$ is a ``small bow'', we have checked above that sets $S_1$ and $S_2$ are predictable.
Define hat function $h'$ on graph $G'$
$$
h'(x)=\begin{cases}
h_{\ell}(x), & \text{if $x$ belongs to one of the sets \ } V(G_{\ell})\setminus\{O_{j_\ell}, A_\ell\},\\
o_j, & \text{if $x$ coincides with $O_j$}, \\
h_{\ell}(A_\ell)h(A_\ell), & \text{if $x$ coincides with $A_\ell$}.
\end{cases}
$$
\begin{figure}
\caption{``Cone'' over a graph $G$ with two dispatchers.}
\label{fig:konus-monstr}
\end{figure}
\begin{corollary}
In the above notations, the game $\langle G', h'\rangle$ is winning.
\end{corollary}
\begin{proof}
Each sage $A_\ell$ has a composite color and uses two strategies: the strategy
of game ${\mathcal G}_{\ell}$ for the first component of the color, and the strategy of
game ${\mathcal G}$ for the second one. The sages from $V(G_{\ell})\setminus\{O_{j_\ell},
A_\ell\}$ use the strategy of game ${\mathcal G}_{\ell}$ (the neighbors of $A_\ell$
see at the $G_\ell$-cordinate of his color only).
Each of the sages $O_j$ sees the predictable component $S_j$ of graph
$G$ and hence knows which sage in this component (if exists) guesses
the $G$-coordinate of his own color correctly. Let sage $O_j$ knows that sage
$A_\ell$ guesses correctly. If $j= j_\ell$, then $O_j$ sees only at his
neighbors on subgraph $G_\ell$ and uses strategy $h_\ell$ (taking into account
the $G_\ell$-coordinate of sage $A_\ell$'s color only). If $j\ne j_\ell$ or if
none of sages in $S_j$ guess correctly, $O_j$ can choose an arbitrary guess.
Now let $\ell$ be an index, for which sage $A_\ell$ guesses the $G$-coordinate of his
color correctly. Then on graph $G_\ell$ either one of the sages (not $A_\ell$)
guesses correctly, or $A_\ell$ guesses correctly both coordinates of his color.
\end{proof}
\section{Blind chess}
In this section we present a new game which is in fact a special case of {\sc Hats\ }
game on 4-cycle. This game gives us a whole class of new games on
cooperative guessing. All you need to change in the initial {\sc Hats\ } game is the
target of guessing. Here we replace the guessing of marked element in the set
(i.\,e.\ a color of hat) with making a check to invisible king! In general, the
sages can try to perform any actions, for which in the absence of information
100\,\% success is not guaranteed.
\subsection{Rook check}
\begin{definition}
The game ``Rook check''.
Two chess players $\mathcal L$ and $\mathcal R$ are sitting opposite each
other and there is a chessboard on the wall behind each of them. Each chess
player does not see his own board (which is behind him) but sees the board of the other chess
player. The referee places one black king on each of these boards. So the players
see the king on another board but do not see the king in their own board.
After that, each chess player, independently of the other, point to one square of his own chessboard
and the referee puts a white rook on this square. If
at least one of the kings is under attack by the rook (or the
rook is placed on the square where the king is), then the chess players
both win, otherwise they lose.
\end{definition}
Chessboards of the players can be different and have arbitrary sizes, which are
known to the players. As in the {\sc Hats\ } game, the chess players determine public
deterministic strategy in advance. The referee knows this strategy and plays
against the chess players.
Let us explain how Rook check game relates to the {\sc Hats\ } game. Let a graph
$G$ be the 4-cycle $ABCDA$ with hat function $h$. In fact, graph~$G$ is a
complete bipartite graph~$K_{2,2}$ with parts $\{A, C\}$ and $\{B, D\}$. The pair
of players $A$ and $C$ is called a chess player~$\mathcal L$, his board has
size $h(A)\times h(C)$. The pair $B$ and $D$ is called a chess player $\mathcal
R$, his board has size $h(B)\times h(D)$.
The hat colors of $A$ and $C$ can be interpreted as coordinates of the cell where
the king is placed. Since $A$ and $C$ do not see each other, they know nothing
about king placement on their board. The pair of colors that $A$ and $C$ say
can be interpreted as a \emph{cross} on the chessboard, i.\,e.\ a configuration
consisting of one horizontal and one vertical line, or which is the same, a
position for chess rook. It is clear that one or both chess players guess their
colors if and only if the king is under attack of the rook. Similar interpretations are valid
for $B$ and $D$.
Thus, the {\sc Hats\ } game on cycle $ABCDA$ with hat function $h$ is equivalent to the game
Rook check on the boards $L(h(A)\times h(C))$ and~$R(h(B)\times h(D))$. It is
clear that the result of the game does not depend on, which board is the left
and which is the right.
Generally, we can define the Rook check game in the case where $n$ chess players
sit in the vertices of an arbitrary graph: each player has his own chessboard but
sees only the boards of his neighbors (and does not see his own chessboard). The
aim of the players is similar, they want at least one of kings to be under
attack. This game is equivalent to {\sc Hats\ } game on a ``doubled'' graph. We will
not discuss this game here.
\medbreak
Let us return to the game of two players on the boards $L(a\times c)$ and $R(b\times
d)$. We use the following notations.
Let us number the cells of $L(a\times c)$ board from left to right from top to bottom, see
fig.~\ref{fig:2x3and3x4}{\it a}, where we use boards $L(2\times 3)$ and $R(3\times 4)$
as examples. Let the strategy of chess player~$\mathcal R$ be given by the table
as in fig.~\ref{fig:2x3and3x4}{\it b}. Here we put $ac$ labels $r_i$ in the
cells of $R(b\times d)$ board (a~cell can contain several labels $r_i$), where the index $i$
runs over all numbers of the cells of $L(a\times c)$ board.
The label~$r_i$ means that chess player~$\mathcal R$, seeing his partner's king
is on the $i$-th cell of $L(a\times c)$ board, puts his rook on the
cell of $R(b\times d)$ board with the label~$r_i$.
The strategy of chess player $\mathcal L$ is also fiven with help of
$R(b\times d)$ board, see fig.~\ref{fig:2x3and3x4}{\it c}. Here there is a
number from 1 to $ac$ in each cell of $R(b\times d)$, the numbers denote
cells of $L(a\times c)$ board. Each cell of $R(b\times d)$ board contains
exactly one number, some numbers from 1 to $ac$ can be absent in this table and some
numbers can repeat. When $\mathcal L$ sees that $\mathcal R$'s king is located
on $R(3\times 4)$ board in the cell labeled~$k$, he puts the rook on
$k$-th cell of $L(a\times c)$ board.
To avoid misunderstandings in notations, we use labels of type ``letter~$r$
with index'' for chess player~$\mathcal R$, and labels of type ``number'' for
chess player~$\mathcal L$. The lines on the board~$L$ are called rows and columns, whereas
the lines on the board $R$ are called verticals and horizontals.
\begin{figure}
\caption{Notations for strategies.}
\label{fig:2x3and3x4}
\end{figure}
\begin{definition}
Let the king be in the $i$-th cell of $L(a\times c)$ board. A cell of
$L(a\times c)$ board is said to be \emph{$i$-weak} if the rook does not attack the king from
this cell. For example, cells 5 and 6 on the $L(2\times 3)$ board
(fig.~\ref{fig:2x3and3x4}{\it а}) are 1-weak.
\end{definition}
\begin{lemma}\label{lem:chess-strategy}
Let $L(a\times c)$ and $R(b\times d)$ be the boards in the game Rook check.
A strategy is winning if and only if for each $i$, $1\leq i\leq ac$, all cells on
$R(b\times d)$ board labeled with numbers of $i$-weak cells belong to the cross
with center $r_i$.
\end{lemma}
\begin{proof}
Let cell $\ell$ of $L(a\times c)$ board be $i$-weak and the referee put the kings on the
cell $i$ of $L(a\times c)$ board and the cell of $R(c\times d)$
board labeled by $\ell$. Then player~$\mathcal L$ according to his strategy puts the rook on the
cell $\ell$ of $L(a\times c)$ board, and it does not attack the king. In the same
time, player~$\mathcal R$ puts his rook on the cell of $R(c\times d)$ board
labeled by $r_i$. The players win if and only if this rook attacks the king, i.\,e.\ the cell
labeled by~$\ell$ is in the cross with center $r_i$.
\end{proof}
This lemma provides the following property of winning strategies: if the cell $\ell$
on board $L(a\times c)$ is simultaneously $i$-weak, $j$-weak, etc., then all the
cells on $R(b\times d)$ board labeled by $\ell$ (if they exist) are located in the
intersection of the crosses with centers $r_i$, $r_j$ etc. For example, for the
strategies in fig.~\ref{fig:2x3and3x4} (we will prove below that they are
winning) both cells labeled by 1 on $R(3\times 4)$ board belong to intersection of
crosses $r_5$ and $r_6$ (the shaded area in fig.~\ref{fig:2x3and3x4}{\it b})
because the cells 5 and 6 on board $L(2\times 3)$ are 1-weak.
\smallbreak
The following theorem gives a complete analysis of the game Rook check for two players.
We assume that the number of horizontals of each board does not exceed the number of verticals and that the left board has the shortest vertical size.
\begin{theorem}\label{thm:Rook_chess}
The chess players win in the game Rook check on the following boards:
\begin{enumerate}[label=Win\arabic*),leftmargin=*,labelindent=\parindent,nosep]
\item if one of the boards has sizes $1\times k$, where $k$ is an arbitrary
positive integer;
\item $L(2\times k)$ and $R(2\times m)$, where $k$ and $m$ are arbitrary
positive integers;
\item\label{itm:win2333} $L(3\times 3$), $R(3\times 3)$;
\item $L(2\times 3)$, $R(3\times 4)$;
\item $L(2\times 4)$, $R(3\times 3)$;
\item $L(2\times 2)$, $R(k\times m)$, where $\min(k,m)\leq 4$.
\end{enumerate}
The chess players lose on the following boards:
\begin{enumerate}[label=Lose\arabic*),leftmargin=*,labelindent=\parindent,nosep]
\item\label{itm:lose2344} $L(2\times 3)$, $R(4\times 4)$;
\item $L(2\times 3)$, $R(3\times 5)$;
\item\label{itm:lose2434} $L(2\times 4)$, $R(3\times 4)$;
\item $L(2\times 5)$, $R(3\times 3)$;
\item $L(3\times 3)$, $R(3\times 4)$;
\item $L(2\times 2)$, $R(5\times 5)$.
\end{enumerate}
\end{theorem}
For boards of other sizes the question if the sages win can be answered by
comparing with these cases. For example, the chess players lose on the boards
$L(3\times 4)$, $R(3\times 4)$ because they lose even in ``smaller'' case~\ref{itm:lose2434}.
The chess players win on the boards $L(2\times 3)$, $R(3\times 3)$ because they
win even on larger boards (as in case~\ref{itm:win2333}).
\begin{proof}[Proof of the theorem]\mbox{}\\*
\smallbreak{\bf Win1)} This statement is trivial.
\smallbreak
{\bf Win2)} In the {\sc Hats\ } language the hat function of two neighbor sages in
the corresponding 4-cycle equals 2, these sages provide a win, even
not looking at the others.
\smallbreak
{\bf Win3)} This is a retelling to the language of the Rook check game
the known statement that the sages win on 4-cycle, if they all obtain hats of
three colors (\cite{Gadouleau2015, cycle_hats}). For example, the strategy
of the sages, described in~\cite{spb_school_problems_2016}, looks in Rook check
language as follows. If a chess player sees that the king of his
pertner is in the central cell of the board, then he puts his rook on the center too.
Otherwise he puts the rook on the cell, where the arrow
leading from partner's king shows (on the auxiliary diagram for this chess
player, see fig.~\ref{fig:two3x3str}. The coordinates of cells in the figure
correspond to the numbers of hat colors. Thus, chess player~$\mathcal L$,
seeing that the king of his partner is located in the cell~$(2,2)$, puts his rook
on the cell~$(1,0)$ (this case corresponds to the bold arrow in
fig.~\ref{fig:two3x3str} on the left).
\begin{figure}
\caption{Four sages stand around non-transparent baobab... }
\label{fig:two3x3str}
\end{figure}
\smallbreak
{\bf Win4), Win5)}
The strategies presented in fig.~\ref{fig:2x3and3x4} and~\ref{fig:2x4and3x3}
satisfy Lemma~\ref{lem:chess-strategy} (direct check). So the chess players win.
\begin{figure}
\caption{Winning strategy for game on boards
$L(2\times 4)$, $R(3\times 3)$.}
\label{fig:2x4and3x3}
\end{figure}
\smallbreak
{\bf Win6)} In {\sc Hats\ } language this case means that the 4-cycle contains
path~$P_3$ with hat function 2, $x$, 2, where $x\leq 4$. The sages win on
such path by corollary~\ref{cor:path2442}.
\smallbreak
{\bf Lose1)}
We show that the players have no winning strategy in this case.
Fix a strategy of chess player~$\mathcal R$, see, for instance,
fig~\ref{fig:2x3and4x4}{\it b}. Let us try to understand how the strategy of $\mathcal
L$ looks like, namely, where can the cells with labels 1, 2, and 3 be located on
$R(4\times 4)$ board. By lemma~\ref{lem:chess-strategy}, the cells with label~1
belong to the intersection of crosses $r_5$ and $r_6$, the cells with label~2 to
the intersection of crosses $r_4$ and~$r_6$, and the cells with label~3 to the
intersection of $r_4$ and $r_5$.
\begin{figure}
\caption{It happens that this strategy is losing.}
\label{fig:2x3and4x4}
\end{figure}
Note that the union of pairwise intersection of any three crosses (possibly, coinciding) on $R(4\times 4)$ board contains at most 8 cells. Indeed, let us examine the cases.
1. If the centers of the crosses belong to different verticals and horizontals, then each pairwise intersection consists of two cells (in the example in fig.~\ref{fig:2x3and4x4}{\it b}, the intersection of the crosses $r_5$ and $r_6$ is shaided), so we have at most 6 cells totally.
2. If the centers of any two crosses do not coincide and two centers belong to the same horizontal or vertical (as the crosses $r_4$ and $r_5$ in fig.~\ref{fig:2x3and4x4}{\it b}), then the intersection of these two crosses contains 4 cells and adding of the third cross (say, $r_6$) can give 4 more cells to the union of pairwise intersections only if the center of the third cross and one of the first two centers are on the same line (as $r_4$ and $r_6$ in fig.~\ref{fig:2x3and4x4}{\it b}. In this case, we have 8 cells, and 7 of them belong to one cross (cross~$r_4$ in our example).
3. If the centers of some two crosses coincide, then intersection of these crosses contains 7 cells. For any location of the third center the set of pairwise intersections does not increase.
Thus, for the cells with labels 1, 2, 3 on board $R(4\times 4)$ there are at most 8 positions, and similarly, for the cells with labels 4, 5, 6 there are at most 8 positions too. Since $R(4\times 4)$ board contains 16~cells, we have 8~positions for labels 1, 2, 3 and 8~positions for labels 4, 5, 6. But as was established by trying all possible cases 1--3, 8~positions can be realized only as a set ``whole cross plus one cell''. It remains to observe that it is impossible to cover $R(4\times 4)$ board completely by two crosses and two additional cells.
\smallbreak
{\bf Lose2)} As in \ref{itm:lose2344} we make sure that the union of pairwise
intersections of any three crosses (possibly, coinciding) on $R(3\times 5)$
board contains at most 8 cells. The cases, in which this union contains 7 or 8
cells, are drawn in fig.~\ref{fig:crosses3x5}, these are the cases, when the
centers of two crosses belong to the same vertical or the same horizontal (including
the case, when both centers are in one cell). In all these cases, the union of
pairwise intersections of three crosses occupies one whole horizontal of the
board, and it occupies less than a half of cells in each of the two other horizontals. This means that the union of two such sets cannot cover the board
completely.
\begin{figure}
\caption{Union of pairwise intersections of three crosses on board
$R(3\times 5)$.}
\label{fig:crosses3x5}
\end{figure}
\smallbreak
{\bf Lose3)}
The argument below was proposed by Oleg Chemokos.
We fix some strategies of the chess players $\mathcal L$ and $\mathcal R$ and verify that one can find positions for the kings such that both kings avoid a check.
In our standard notations each cell $i$ on $L(2\times 4)$ board determines three $i$-weak cells (see fig.~\ref{fig:2x4and3x3}{\it a}). This set of three weak cells can consist of any three cells in one row.
The strategy of chess player~$\mathcal L$ is given by labelling each cell on $R(3\times 4)$ board. Paint in white the cells of $R(3\times 4)$ board containing the labels corresponding to the first row of $L(2\times 4)$ board, paint in black the other cells. Without loss of generality we may assume that the number of white cells on the board is less than or equal the number of black cells. The following three cases cover all the possibilities, for which this inequality can be realized.
1. One of the horizontals of $R(3\times 4)$ board (for definiteness the first one) contains three white cells $u_1$, $u_2$, $u_3$ and one more horizontal (the second) contains two white cells $u_4$ and $u_5$. Then the first row of $L(2\times 4)$ board contains a cell~$\ell$ such that the label $\ell$ occurs in the first two horizontals of $R(3\times 4)$ board at most once, and, moreover, if so, the label $\ell$ occurs in the first horizontal, say, in cell $u_1$. The other cells of the first row on $L(2\times 4)$ board are $(\ell+4)$-weak, and cells $u_2$, $u_3$, $u_4$ and $u_5$ must belong to the same cross by lemma~\ref{lem:chess-strategy}, which is not true.
2. Each horizontal of $R(3\times 4)$ board contains two white cells. Then we choose a cell~$\ell$ in the first row of $L(2\times 4)$ board, such that the label $\ell$ occurs on $R(3\times 4)$ board at most once (for definiteness, in the third horizontal). The other cells in the first row of $L(2\times 4)$ board are $(\ell+4)$-weak, and the corresponding labels in the first two horizontals of $R(3\times 4)$ board are not covered by one cross.
3. One horizontal contains four white cells and two other horizontals contain one white cell each. Then we replace ``'black'' and ``white'' and consider the first case.
The obtained contradiction proves that the strategy is losing.
\smallbreak
{\bf Lose4)}
We number the cells of board $L$, as in fig.~\ref{fig:2x5and3x3}\,a). The strategy of chess player~$\mathcal L$ is given by writing the number from 1 to 10 in each cell of $R(3\times 3)$ board (these are the numbers of cells on $L(2\times 5)$ board). Since $L(2\times 5)$ board has two rows only, there exist two horizontals on $R(3\times 3)$ board and two cells in each of them, such that the four labels in these cells correspond to the cells (some of them can coincide) belonging to the same row of $L(2\times 5)$ board. Let $j$-th cell in the other row be $i$-weak with respect to all these cells.
For example, let labels 1, 2, 3, 4 be located on $R(3\times 3)$ board, as in fig. \ref{fig:2x5and3x3}\,b). Then the number~10 is 1-, 2-, 3- and 4-weak simultaneously. This means that the rook in the cell~$r_{10}$ of $R(3\times 3)$ board attacks the cells with labels 1, 2, 3 and 4. But this is impossible: to attack labels 1 and 2, it must be located in the upper row of $R(3\times 3)$ board, and to attack 3 and 4, it must be located in the bottom row.
By the same reason the general case is also impossible: the cell~$r_j$ must be located
in two horizontals of~$R(3\times 3)$ simultaneously.
\begin{figure}
\caption{Seek a strategy for game $L(2\times 5)$, $R(3\times 3)$.}
\label{fig:2x5and3x3}
\caption{Strategy for case Lose5).}
\label{fig:33vs34}
\end{figure}
\smallbreak
{\bf Lose5)}
Assume that the chess players have a winning strategy. We number the cells of board~$L(3\times 3)$ by numbers from 1 to 9. Then the strategy of chess player~$\mathcal R$ is specified by a placement of nine symbols: $r_1$, $r_2$, \dots, $r_9$ on board~$R(3\times 4)$. And the strategy of chess player~$\mathcal L$ is specified by writing a number from 1 to 9 in each cell of $R(3\times 4)$ board.
Claim 1. If the cells $u$, $v$, $w$ belong to three different rows and three different columns of $L(3\times 3)$ board, then the labels $r_u$, $r_v$ и $r_w$ belong to three different horizontals of $R(3\times 4)$ board.
Indeed, each cell of $L(3\times 3)$ board is either $u$-weak, or $v$-weak, or $w$-weak. By lemma~\ref{lem:chess-strategy} this implies that each label on $R(3\times 4)$ board belongs to $r_u$-, $r_v$- or $r_w$-cross. This is possible if the labels $r_u$, $r_v$ and $r_w$ are in different horizontals only.
Claim 2. There are two possible cases of the placement of symbols $r_1$, $r_2$, \dots, $r_9$ on $R(3\times 4)$ board:
1) either the symbols $r_1$, $r_2$, $r_3$ are located in one horizontal of $R(3\times 4)$ board, the symbols $r_4$, $r_5$, $r_6$ are located in another horizontal, and the symbols $r_7$, $r_8$, $r_9$ are in the third one;
2) or the symbols $r_1$, $r_4$, $r_7$ are located in one horizontal of $R(3\times 4)$ board, the symbols $r_2$, $r_5$, $r_8$ are located in another horizontal, and the symbols $r_3$, $r_6$, $r_9$ are in the third one.
The claim is proved by moderately nasty brute force with the help of claim 1.
Put rooks in all cells~$r_i$ of $R(3\times 4)$ board (we put in a cell as many rooks as there are symbols~$r_i$ in it). Each cell $i$ on $L(3\times 3)$ board determines four $i$-weak cells which are located in two rows and two columns.
Claim 3. Each cell on $R(3\times 4)$ board (let it contain a label $i$)
is under attack when the rook stands on cells labeled $r_j$, where $j$ is $i$-weak number. Two of this ``dangerous'' cells are located in the same horizontal, and the other two belong to another horizontal.
The claim follows from claim 2 . This means that we put several rooks on some cells.
Now we prove that no winning strategy with these properties exist. By claim 2, the first horizontal of $R(3\times 4)$ board contains at most three labels $r_i$. Therefore the first horizontal $R(3\times 4)$ board contains an ``empty'' cell, i.\,e.~the cell, containing no symbols~$r_i$, denote it by~$a$. For definiteness let it be in the fourth vertical (fig.~\ref{fig:33vs34}).
By claim 3, four rook's attacks are directed to this cell, and two of these four rooks are in one horizontal, and another two are in another horizontal. This means that two rooks are certainly located in one of the cells of the fourth vertical. For definiteness let this cell be located in the second horizontal.
By claim 2, the second horizontal contains three rooks in total, and we have established that two of them are in one cell.
Therefore, there are two ``empty'' cells in the second horizontal. Let us choose the one above which there is no more than one rook stands in the first horizontal. Let this cell be in the first column, denote it~$b$. There are four rooks attacks from two pairs of rooks located in two rows directed to chosen cell. One pair of rooks is obviously located in the second horizontal, and another pair is located in the third horizontal (there is at most one rook above cell~$b$ in the first horizontal). Now we see that one of the cells in the third horizontal, in the second or in the third vertical, cannot gather four rook's attacks from two different horizontals, a contradiction.
\smallbreak
{\bf Lose6)}
Assume that the chess players have a winning strategy. The strategy of chess player~$\mathcal R$ is given in standard notations by placement of the four symbols $r_1$, $r_2$, $r_3$, $r_4$ on $R(5\times 5)$ board. There is at least one cell $Q$ on $R(5\times 5)$ board, not belonging to any of four crosses determined by these symbols. The strategy of chess player~$\mathcal L$ is specified by writing a number from 1 to 4 in each cell of $R(5\times 5)$ board. Without loss of generality cell~$Q$ is labeled by~1.
Let the referee put the kings on cell~$Q$ on $R(5\times 5)$ board and on cell~4 on $L(2\times 2)$ board. Then player~$\mathcal L$ puts his rook on cell~1 of $L(2\times 2)$ board, and player~$\mathcal R$ puts the rook on cell~$r_4$ of $R(5\times 5)$ board. None of the rooks attacks the king. The chess players lost.
The theorem is completely proved.
\end{proof}
\subsection{Queen check}
Consider a variation of the game, where the players put queens instead of rooks. Call this game \emph{Queen check}.
\begin{lemma}\label{lemQueen4545}
The players win in Queen check game on boards $L(4\times 5)$, $R(4\times 5)$.
\end{lemma}
\begin{proof}
Paint the cells of both boards as shown in fig.~\ref{fig:queen45vs45}, a). Let
both chess players put their queens only on the cells marked with queens, and let the first chess player under the assumption ``the Kings are on cells
of the same color'', and the second under the assumption ``The
kings are located on cells of different colors''.
\end{proof}
However we can use also usual chess coloring instead of ``exotic'' coloring
as above. Indeed, the queen on the cell~$c2$ holds under attack all the
cells of the same color in chessboard coloring! And the same is true for the cell $c$3,
fig.~\ref{fig:queen45vs45}, b).
\begin{figure}
\caption{Queen check on $4\times5$ boards.}
\label{fig:queen45vs45}
\end{figure}
The next statement has been found by computer, the proof was found by
N.~Kononenko.
\begin{lemma}
The players win in ``Check by queen'' game on boards $L(4\times 4)$,
$R(5\times 5)$
\end{lemma}
\begin{proof}
Specify strategy of the chess players. Label $R(5\times 5)$ board as in
fig.~\ref{fig:queen4x4and5x5}\,a). Seeing the fellow's king on the cell with
label~$j$, chess player~$\mathcal L$ puts his queen on $L(4\times 4)$ board in
the cell, labeled by number~$j$, fig.~\ref{fig:queen4x4and5x5}\,b). So, chess
player~$\mathcal L$ uses only four positions for his queen. The numbers in the
cells of $L(4\times 4)$ board in fig.~\ref{fig:queen4x4and5x5}\,c) show, from
which positions the queen of chess player~$\mathcal L$ does not attack this
cell. For example, the numbers 1 and 2 in the lower left corner mean that the
lower left corner cell of $L(4\times 4)$ board is not under attack by the
queen located at 1-st and in 2-nd positions, shown in
fig.~\ref{fig:queen4x4and5x5}\,b), and ``--'' means that the cell is under
attack from all positions.
\begin{figure}
\caption{Queen check on $L(4\times 4)$ and $R(5\times 5)$ boards.}
\label{fig:queen4x4and5x5}
\end{figure}
Seeing the king on $L(4\times 4)$ board, chess player~$\mathcal R$ with help
of fig.~\ref{fig:queen4x4and5x5}\,c) immediately understands, from which
``unfavorable'' positions the queen of his partner cannot put the king in
check. Therefore he must put his queen on $R(5\times 5)$ board so that it
attacks all the cells, sending the queen of chess player~$\mathcal L$ to a
unfavorable position.
For unfavorable positions 1, 2 it is possible to put the queen on cell~{\it
b}3; \ for 1, 4 on cell~{\it c}4; \ for 2, 4 on cell~{\it c}2; \ for 3 on
cell~{\it c}3.
\end{proof}
We found by computer that in Queen check game the chess players lose on boards $L(3\times 4)$, $R(7\times 7)$ and $L(4\times 5)$, $R(5\times 5)$.
The following statement was suggested to us by S.~Berlov. It generalizes the argument of lemma~\ref{lemQueen4545}.
\begin{lemma}
Consider a variation of Queen сheck game in which five chess players
are located so that each of them sees the boards of the others but does not
see his own board. All the boards have size $11\times 11$. As in the initial
game, the referee puts one king on each board, and the chess players
simultaneously point to the cells on their own boards, where the queen has to be put.
The chess players win in this game.
\end{lemma}
\begin{proof}
On $11\times 11$ board, one can place 5 queens that keep all the cells under attack (for example, $b$4, $d$10, $f$6, $h$2 and~$j$8). During the game, the
chess players will put their queens on the these 5 positions only.
We number these positions from 0 to 4. In each cell of $11\times 11$
board, we place the number of any of these queens, that holds
this sell under attack. We assume that this labelling is applied all boards. When the referee puts the
king on some cell of a board, the label of this cell is called the \emph{weight}
of the king.
The strategy of chess players is as follows: let the $k$-th player check the
hypothesis that the sum of weights of all kings equals $k$ modulo~5. Each
player sees all the kings except his own and calculates the weight of
his king at which the hypothesis is correct. Then the player puts his queen on the
position, which number equals the calculated weight.
\end{proof}
\subsection{Check with other chess pieces}
In the games Bishop check or Knight check the check declaration means
that the chess player guesses the color of the cell in which the king is standing. Therefore the chess
players can win in these games only on small boards, where all cells of each
color can be attacked from one point.
Consider the game King check (the referee puts on the board a ``good king'' and the chess
player puts on the board an ``evil king'' who must put the good one in check.
\begin{theorem}
For the King check game on the boards $L(a\times b)$, $R(c\times d)$, denote by
$\ell$ the number of elements in the maximal set of cells on $L(a\times b)$
board, such that no two cells can be under attack of the same king. Define
a number $r$ for $R(c\times d)$ board analogously. Then the chess players win if and only if $\ell= r=2$ or one of the numbers $\ell$, $r$ equals~1.
\end{theorem}
\begin{proof}
Choose sets $S_L$, $S_R$ of cells on $L(a\times b)$ and $R(c\times d)$ boards
so that no two cells in these sets can be under attack of the same king,
$|S_L|=\ell$, $|S_R|=r$. Let the referee make things easier for the chess players by promising that he will place
the kings on the cells of the sets $S_L$ and $S_R$ only. Since the ``evil king''
cannot attack two cells simultaneously, we may assume that the chess players
just try to guess where the ``good king'' stands, or, which is the same, to
guess hat colors the {\sc Hats\ } game on the graph $P_2$ with hatnesses $\ell$, $r$, which
is possible only if $\ell= r=2$ or when one of the numbers $\ell$, $r$ equals~1.
It remains to show that in these cases the chess players win. For
$\ell=1$ or $r=1$ this is obvious. The maximal possible board for $\ell=r=2$ is
$3\times 6$, because no two corner cells in $4\times 4$ board as well as no two cells
of $1\times 7$ board with coordinates 1, 4, 7 are attacked by the same king.
On $3\times 6$ board, the chess players easily win by splitting te board into two
halves of sizes $3\times 3$ and checking the hypotheses ``good kings are in the
same/different halves''.
\end{proof}
\section{Analysis of {\sc Hats\ } game on a cycle}
According to results of W.~Szczechla~\cite{cycle_hats}, the sages have some
difficulties in the game on cycle $C_n$ already in the case, when all hatnesses are
equal to 3. In that case, the winn of the sages is possible only if $n=4$ or $n$ is divisible by
3. If one of the sages on any cycle has hatness 4 (and all others have hatness 3), the sages lose ~\cite[corollary
8]{cycle_hats}.
The following theorem gives the list of games on cycles containing a vertex of hatness 2,
where the sages win.
\begin{theorem}\label{thm:cycle-win}
Let $G$ be cycle $C_n$, and $h$ be the hat function such that $2
\leq h(v) \leq 4$ for all vertices $v$. Let $A\in V(G)$, $h(A) = 2$. Then the game ${\mathcal G}=\langle {G, h} \rangle$ is winning in the following cases.
\begin{enumerate}
\item $n=3$;
\item there is one more vertex with hatness $2$ other than $A$;
\item both neighbors of vertex $A$ have hatness $3$;
\item one neighbor of $A$ and the vertex following it are of hatness $3$.
\end{enumerate}
\end{theorem}
\begin{proof}
If $h_1(v)\leq h_2(v)$ for all $v\in V(G)$, then the winning in $\langle
{G, h_2} \rangle$ implies the winning in $\langle {G, h_1} \rangle$, or, which is
the same, the losing in $\langle {G, h_1} \rangle$ implies the losing in $\langle {G,
h_2} \rangle$. This is obvious, because the winning strategy for $\langle {G, h_2}
\rangle$ can be used as a winning strategy for $\langle {G, h_1} \rangle$, in
which instead of ``non-existing'' colors the sages say any
``existing''ccolor. Therefore, to prove the theorem, it suffices to check the winning
for the cases when the hat function is ``maximal'' (in the sense of definition in subsection~\ref{subsubsec:product}).
For each statement of the theorem, we give below the maximal hat functions and
the proofs that the sages win. We recall $h_4^{A2B2}$ is the hat
function, which values are equal to 4 in all the vertices other than $A$ and $B$,
where $h(A)=2$, $h(B)=2$.
\begin{enumerate}
\item $C_3$ with hatnesses 2, 4, 4. The sages win by
corollary~\ref{cor:triangle244-win}.
\item Game $\langle {C_n, h_4^{A2B2}} \rangle$ is winning, because it
contains a path with hatnesses $2,4,\dots,4,2$, where the sages win by
corollary~\ref{cor:path2442}.
\item The game $\langle {C_n, h_4^{A2B3C3}} \rangle$, where $B$ and $C$ are the
neighbors of $A$ is winning by corollary~\ref{cor:cycle323}.
\item The game $\langle {C_n, h_4^{A2B3C3}} \rangle$, where $A$, $B$, $C$ are
three consequent vertices, is winning by corollary~\ref{cor:cycle_233}.
\end{enumerate}
\end{proof}
\begin{conjecture}
Let $G$ be a cycle $C_n$ and let $h$ be a hat function such that $2
\leq h(v) \leq 4$ for every vertex $v$. Let $A\in V(G)$ be
such that $h(A) = 2$. Then the game ${\mathcal G}=\langle {G, h} \rangle$ is winning only
in the cases listed in theorem~\ref{thm:cycle-win}.
\end{conjecture}
To prove the conjecture it suffices to prove that the following two games are losing.
\begin{enumerate}
\item $\langle {C_n, h_3^{A2B4C4}} \rangle$ $(n \geq 4)$, where sages $B$
and $C$ are the neighbors of sage $A$. The loss in this game for $n=4$ is
proved in theorem~\ref{thm:Rook_chess},~\ref{itm:lose2344} in the language
of Rook check game. For $n\leq 7$ the loss was checked on computer by
reduction to SAT~\cite{Kokhas2018}. This allows us to assume
that for $n\geq 8$ the game is losing too, but we have no proof of this fact.
\item $\langle {G_n, h_3^{A2B4C3D4}} \rangle$ $(n \geq 4)$, where the sages $B$
and $C$ are the neighbors of sage $A$, and sage $D\ne A$ is the second neighbor of
sage $C$. The loss of this game for $n=4$ is proved in
theorem~\ref{thm:Rook_chess},~\ref{itm:lose2434}. For $n\leq 7$ the loss was
checked by computer. This allows us to assume that for $n\geq 8$ the game
is losing, but we still have no proof of this fact too.
\end{enumerate}
\section{Conclusion}
In the present paper we certainly prove that the variation of {\sc Hats\ } game in
question is a real gem of combinatorics. The firework of ideas that arise when considering
different approaches to the game is mesmeriring and awakens the imagination. In the
same time, the computational complexity of the game prevents from putting forward hasty
conjectures and effectively protects the game from a complete analysis.
\printbibliography
\end{document} |
\begin{document}
\newtheorem{thm}{Theorem}
\newtheorem{lem}[thm]{Lemma}
\newtheorem{rem}[thm]{Remark}
\newtheorem{prop}[thm]{Proposition}
\newtheorem{cor}[thm]{Corollary}
\newtheorem{ex}[thm]{Example}
\newcommand{r.~i.}{r.~i.}
\title[]{Khintchine inequality and Banach-Saks type properties in rearrangement-invariant spaces}
\date{}
\author[]{F.~A.~Sukochev and D. Zanin}
\thanks{Research supported by
the Australian Research Council.}
\keywords{$p$-Banach Saks type properties, rearrangement-invariant
spaces, Khintchine inequality, Kruglov property}
\subjclass{46E30, 46B20}
\date{}
\begin{abstract}
{\it We study the class of all rearrangement-invariant (=r.i.)
function spaces $E$ on $[0,1]$ such that there exists
$0<q<1$ for which $ \Vert \sum_{_{k=1}}^n\xi_k\Vert _{E}\leq
Cn^{q}$, where $\{\xi_k\}_{k\ge 1}\subset E$ is an arbitrary
sequence of independent identically distributed symmetric random
variables on $[0,1]$ and $C>0$ does not depend on $n$. We
completely characterize all Lorentz spaces having this property
and complement classical results of Rodin and Semenov for Orlicz
spaces $exp(L_p)$, $p\ge 1$. We further apply our results to the
study of Banach-Saks index
sets in r.i. spaces.
}
\end{abstract}
\maketitle
\section{Introduction}
A classical result of Rodin and Semenov (see \cite{RS} or \cite[
Theorem 2.b.4]{LT-II}) says that the sequence of Rademacher
functions $\{r_k\}_{k\ge 1}$ on $[0,1]$ in a r.i. space $E$ is
equivalent to the unit vector basis of $l_2$ if and only if $E$
contains (the separable part of) the Orlicz space $L_{N_2}(0,1)$
(customarily denoted as $exp(L_2)$) where $N_2(t)=e^{t^2}-1$.
Here, $\{r_k\}_{k\ge 1}$ may be thought of as a sequence of
independent identically distributed centered Bernoulli variables
on $[0,1]$. A quick analysis of the proof (see e.g.
\cite[p.134]{LT-II}) shows that the embedding $exp(L_2)\subseteq
E$ is established there under a weaker assumption that
$\{r_k\}_{k\ge 1}$ is $2$-Banach-Saks sequence in $E$, that is $
\Vert \sum_{_{k=1}}^nr_k\Vert _{_{E}}\leq Cn^{1/2}$, where $C>0$
does not depend on $n\ge 1$. The main object of study in the
present article is the class of all r.i. spaces $E$ such that
there exists $0<q<1$ for which
\begin{equation}\label{mainzero} \Vert \sum_{_{k=1}}^n\xi_k\Vert _{E}\leq Cn^{q},
\end{equation}
where $\{\xi_k\}_{k\ge 1}\subset E$ is an arbitrary sequence of
independent identically distributed symmetric random variables on
$[0,1]$ and $C>0$ does not depend on $n$. We completely
characterize all Lorentz spaces from this class in Corollary
\ref{lorentz alternative} below. In Theorem \ref{Marc} we obtain
sharp estimates of type \eqref{mainzero} for the Orlicz spaces
$exp(L_p)=L_{N_p}(0,1)$, $1\leq p<\infty$ where $N_p(t)=e^{t^p}-1$
complementing results of \cite{RS} (see also exposition in
\cite{D}). Our results have also a number of interesting
implications to the study of Banach-Saks type properties in r.i.
spaces.
Recall that a bounded sequence $\{x_n\}\subset E$ is called a
p-BS-sequence if for all subsequences $\{y_k\}\subset\{x_n\}$ we
have
\[
\sup\limits_{m\in
N}m^{-\frac{1}{p}}\Big\|\sum\limits_{k=1}^{m}y_k\Big\|_E<\infty.
\]
We say that $E$ has the p-BS-property and we write $E\in BS(p)$
if each weakly null sequence contains a p-BS-sequence. The set
\[
\Gamma(E)=\{p:\:p\geq 1,\:E\in BS(p)\}
\]
is said to be the index set of $E$, and is of the form
$[1,\gamma]$, or $[1,\gamma)$ for some $1\leq \gamma$.
If, in the preceding definition, we replace all weakly null
sequences by weakly null sequences of independent random variables
(respectively, by weakly null sequences of pairwise disjoint
elements; by weakly null sequences of independent identically
distributed random variables), we obtain the set $\Gamma_{\rm
i}(E)$ (respectively, $\Gamma_{\rm d}(E)$, $\Gamma_{\rm iid}(E)$).
The general problem of describing and comparing the sets
$\Gamma(E)$, $\Gamma_{\rm i}(E)$, $\Gamma_{\rm iid}(E)$) and
$\Gamma_{\rm d}(E)$ in various classes of r.i. spaces was
addressed in \cite{SeSu-CR, DoSeSu2004, SeSu, AsSeSu2005,
new-16-Sem-Suk, AsSeSu2007}. In particular, it is known
\cite{AsSeSu2005} that $1\in\Gamma(E)\subseteq \Gamma_{\rm
i}(E)\subseteq \Gamma_{\rm iid}(E)\subseteq [1,2]$ and
$\Gamma_{\rm i}(E)\subseteq \Gamma_d(E)$ for any r.~i.\ space $E$.
Moreover, the sets $\Gamma(E)$ and $\Gamma_{\rm i}(E)$ coincide in
many cases but not always. For example, $\Gamma(L_p)=\Gamma_{\rm
i}(L_p)=\Gamma_{\rm iid}(L_p)$, $1<p<\infty$ (see e.g.
\cite[Corollary 4.4 and Theorem 4.5]{new-16-Sem-Suk} and also
Theorem \ref{firstmain} below), whereas for the Lorentz space
$L_{2,1}$ generated by the function $\psi(t)=t^{1/2},$ we have
$\Gamma(L_{2,1})=[1,2)$ and $\Gamma_{\rm i}(L_{2,1})=[1,2]$
(\cite[Theorem~5.9]{new-16-Sem-Suk} and
\cite[Proposition~4.12]{AsSeSu2005}). It turns out that these two
situations are typical \cite[Theorem 9]{SeSu}: under the
assumption that $\Gamma(E)\ne \{1\}$, we have either $\Gamma_{\rm
i}(E)\setminus\Gamma(E)=\emptyset$ or else $\Gamma_{\rm
i}(E)\setminus\Gamma(E)=\{2\}$.
The present paper may also be considered as a contribution to the
study of the class of all r.i. spaces $E$ such that $\Gamma_{\rm
iid}(E)=\Gamma_{\rm i}(E)$. We prove a general theorem (see
Theorem \ref{firstmain} below) that $\Gamma_{\rm
iid}(E)=\Gamma_{\rm i}(E)$ if and only if $\Gamma_{\rm
iid}(E)\subseteq \Gamma_{\rm d}(E)$. It is easy to see that every
Lorentz space $\Lambda(\psi)$ satisfies the latter condition and,
using the main result described above, we give a complete
characterization of all Lorentz spaces $E=\Lambda(\psi)$ such that
$\Gamma_{\rm iid}(E)\neq \{1\}$ (see Theorem \ref{mainsecond} and
Corollary \ref{mainsecond_add}).
It also pertinent to note here, that if one views the Rademacher
system as a special example of sequences of independent mean zero
random variables, then a significant generalization of Khintchine
inequality is due to W.B. Johnson and G. Schechtman
\cite{JoSch1989}. They introduced the r.i. space $Z_E^2$ on
$[0,\infty)$ linked with a given r.i. space $E$ on $[0,1]$ and
showed that any sequence $\{f_k\}_{k=1}^\infty$ of independent
mean zero random variables in $E$ is equivalent to the sequence of
its disjoint translates $\{\bar
f_k(\cdot):=f_k(\cdot-k+1)\}_{k=1}^\infty$ in $Z_E^2$, provided
that $E$ contains an $L_p$-space for some $p<\infty$. This study
was taken further in \cite{Br1994,
AsSeSu2005,AsSu2006-1,AsSu2006-2}, where the connection between
this (generalized) Khintchine inequality and the so-called Kruglov
property was established (we explain the latter property in the
next section). We show the connection between the class of all
r.i. spaces with Kruglov property and the estimates
\eqref{mainzero} in Theorem \ref{Kruglov}. Recently, examples of
r.i. spaces $E$ such that $\Gamma(E)= \{1\}$ but $\Gamma_{\rm
i}(E)\ne \{1\}$ have been produced in \cite{AsSeSu2007} under the
assumption that $E$ has the Kruglov property. Our approach in
this paper complements that of \cite{AsSeSu2007}; in particular,
we present examples of Lorentz and Marcinkiewicz spaces $E$ such
that $\Gamma_{\rm i}(E)=\Gamma_{\rm iid}(E)\neq \{1\}$ and which
do not possess the Kruglov property.
Finally, we show that the equality $\Gamma_{\rm
iid}(E)=\Gamma_{\rm i}(E)$ fails when $E$ is a classical space
$L_{pq}$, $1<q<p<2$.
\section{Definitions and preliminaries}
\subsection{Rearrangement-invariant spaces}
A Banach space $(E,\Vert \cdot\Vert _{_{E}})$ of real-valued
Lebesgue measurable functions (with identification $m$-a.e.) on
the interval $[0,1]$ will be called {\it rearrangement-invariant}
(briefly, r.~i. ) if
\begin {enumerate}
\item[(i).] $E$ is an ideal lattice, that is, if $y\in E$, and if
$x$ is any measurable function on $[0,1]$ with $0\leq \vert x\vert
\leq \vert y\vert $ then $x\in E$ and $\Vert x\Vert _{_{E}}
\leq \Vert y\Vert _{_{E}};$
\item[(ii).] $E$ is rearrangement invariant in the sense that if
$y\in E$, and if $x$
is any measurable function on $[0,1]$ with $x^*=y^*$, then $x\in
E$ and $\Vert x\Vert _{_{E}} = \Vert y\Vert _{_{E}}$.
\end{enumerate}
\noindent Here, $m$ denotes Lebesgue measure and $x^*$ denotes
the non-increasing, right-continuous rearrangement of $x$ given by
$$
x^{*}(t)=\inf \{~s\ge 0:m (\{u\in [0,1]:\,\mid x(u)\mid >s\})\le
t~\},\quad t>0.
$$
For basic properties of r.i. spaces, we refer to the monographs
\cite{KPS,LT-II}. We note that for any r.i. space $E$ we have:
$L_\infty [0,1]\subseteq E\subseteq L_1[0,1].$ We will also work
with a r.i. space $E(\Omega,{\mathcal {P}})$ of measurable
functions on a probability space $(\Omega,{\mathcal {P}})$ given
by
$$
E(\Omega,{\mathcal {P}}):=\{f\in L_1(\Omega,{\mathcal {P}}):f^*\in
E\}, \quad \|f\|_{E(\Omega,{\mathcal {P}})}:=\|f^*\|_X.
$$
Here, the decreasing rearrangement $f^*$ is calculated with
respect to the measure ${\mathcal {P}}$ on $\Omega$.
Recall that for $0<\tau<\infty$, the dilation operator
$\sigma_\tau$ is defined by setting
$$\sigma_\tau x(t)=\begin{cases}
x(t/\tau),\;0\leq t\leq\min(1,\tau) \\
0,\; \min(1,\tau)<t\leq 1.
\end{cases}
$$ The dilation operators $\sigma_\tau$ are bounded in every r.i. space
$E$. Denoting the space of all linear bounded operators on a
Banach space $E$ by ${\mathcal L}(E)$, we set
\[
\alpha_E:=\lim\limits_{\tau\to
0}\frac{\ln\|\sigma_\tau\|_{{\mathcal L}(E)}}{\ln\tau},\quad
\beta_E:=\lim\limits_{\tau\to
\infty}\frac{\ln\|\sigma_\tau\|_{{\mathcal L}(E)}}{\ln\tau}.
\]
The numbers $\alpha_E$ and $\beta_E$ belong to the closed interval
$[0,1]$ and are called the Boyd indices of $E$.
The K\"othe dual $E^\times $ of an r.i. space $E$ on $[0,1]$
consists of all measurable functions $y$ for which
$$
\Vert y\Vert _{_{E^{\times }}}:= \sup \Big\{\int _0^1\vert
x(t)y(t)\vert\,dt:\ x\in E,\ \Vert x \Vert _{_{E}}\leq 1\,\Big\}
<\infty.
$$
If $E^*$ denotes the Banach dual of $E$, then $E^\times \subset
E^{*}$ and $E^\times =E^{*}$ if and only if $E$ is separable. An
r.i. space $E$ is said to have the {\it Fatou property} if
whenever $\{f_n\}_{n=1}^\infty\subseteq E$ and $f$ measurable on
$[0,1]$ satisfy $f_n\to f$ a.e. on $[0,1]$ and $\sup _n\Vert
f_n\Vert _{_{E }} <\infty $, it follows that $f\in E$ and $\Vert
f\Vert _{_{E}}\leq \liminf _{n\to \infty }\Vert f_n\Vert _{_{E}}$.
It is well-known that an r.i. space $E$ has the Fatou property if
and only if the natural embedding of $E$ into its K\"othe bidual
$E^{\times\times}$ is a surjective isometry.
Let us recall some classical examples of r.i. spaces on $[0,1]$.
Denote by $\Psi$ the set of all increasing continuous concave
functions on $[0,1]$ with
$\varphi(0)
=0$. Each function $\varphi\in\Psi$
generates the Lorentz space $\Lambda(\varphi)$ (see e.g.
\cite{KPS}) endowed with the norm
\[\|x\|_{\Lambda(\varphi)}=\int\limits_0^1 x^*(t)d\varphi(t)\]
and the Marcinkiewicz space $M(\varphi)$ endowed with the norm
\[
\|x\|_{M(\varphi)}=\sup\limits_{0<\tau\leq
1}\frac{1}{\varphi(\tau)}\int\limits_0^\tau x^*(t)dt.
\]
The space $M(\varphi)$ is not separable, but the space
\[\left \{x\in M(\varphi):\:\lim\limits_{\tau\to 0}\frac{1}{\varphi(\tau)}
\int\limits_0^\tau x^*(t)dt=0r.~i.ght \}\] endowed with the norm
$\|\cdot \|_{M(\varphi)}$ is a separable r.i. space (denoted
further as $(M(\varphi)_0$), which coincides with the closure of
$L_\infty$ in $(M(\varphi),\|\cdot \|_{M(\varphi)})$.
It is well known (see e.g. \cite[Section II.1]{KPS}) that
$$
\beta_{M(\varphi)}=1\Longleftrightarrow
\alpha_{\Lambda(\varphi)}=0\Longleftrightarrow\forall t\in
(0,1)\exists (s_n)_{n\ge1} \subseteq (0,1)\ :\
\lim_{n\to\infty}\frac{\varphi(ts_n)}{\varphi(s_n)}=1;
$$
$$
\alpha_{M(\varphi)}=0\Longleftrightarrow
\beta_{\Lambda(\varphi)}=1\Longleftrightarrow\forall
\tau\ge1\exists (s_n)_{n\ge1} \subseteq (0,1)\ :\
\lim_{n\to\infty}\frac{\varphi(s_n\tau)}{\varphi(s_n)}=\tau.
$$
If $M(t)$ is a convex increasing function on $[0,\infty)$ such
that $M(0)=0$, then
the Orlicz space $L_M$ on $[0,1]$ (see e.g. \cite{KPS, LT-II}) is
a r.i. space of all $x\in L_1[0,1]$ such that
\[\|x\|_{L_M}:=\inf\{\lambda :\lambda >0,\;\int\limits_{0}^{1}
M(|x(t)|/\lambda)dt\leq 1\}<\infty.\] The function
$N_p(u)=e^{u^p}-1$ is convex for $p\geq1$ and is equivalent to a
convex function for $0<p<1$ (see e.g. \cite{Br1994, AsSu2005}).
The space $L_{N_p}$, $0<p<\infty$ is customarily denoted $\exp
(L_p)$.
\subsection{The Kruglov property in r.i.\ spaces}
Let $f$ be a random variable on $[0,1]$. By $\pi(f)$ we denote the
random variable $\sum_{i=1}^N f_i$, where $f_i$'s are independent
copies of $f$ and $N$ is a Poisson random variable with parameter
$1$ independent of the sequence $\{f_i\}$.
{\bf Definition.}\quad {\sl An r.i. space $E$ is said to have the
Kruglov property, if and only if $f\in E\Longleftrightarrow
\pi(f)\in E.$}
This property has been studied by M. Sh. Braverman \cite{Br1994}
which uses some earlier probabilistic constructions of V.M.
Kruglov \cite{K} and in \cite{AsSu2005,AsSu2006-1, AsSu2006-2} via
an operator approach. It was proved in \cite{AsSu2006-2}, that an
r.i. space $E$ satisfies the Kruglov property if and only if for
every sequence of independent mean zero functions $\{f_n\}\in E$
the following inequality holds
\begin{equation}\label{independent to disjoint}
||\sum_{k=1}^nf_k||_E\leq const\cdot
||\sum_{k=1}^n\overline{f}_k||_{Z^2_E}.
\end{equation}
Here, $Z^2_E$ is an r.i. space on $(0,\infty),$ equipped with a
norm
$$||x||=||x^*\chi_{[0,1]}||_E+||x^*\chi_{[1,\infty)}||_{L_2}$$
and the sequence $\{\bar f_k\}_{k=1}^n\subseteq Z^2_{X}$ is a
sequence of disjoint translates of $\{f_k\}_{k=1}^n\subseteq X,$
that is, $\bar f_k(\cdot)=f_k(\cdot-k+1)$. Note that inequality
\eqref{independent to disjoint} has been proved earlier in
~\cite{JoSch1989} (see inequality~(3) there) under the more
restrictive assumption that $E\supseteq L_p$ for some $p<\infty$.
Clearly, the latter assumption holds if $\alpha_E>0$.
\section{Operators $A_n$, $n\ge 0$}
Let $\Omega$ be the segment $[0,1],$ equipped with the Lebesgue
measure.
Let $E$ be an arbitrary rearrangement invariant space on $\Omega.$
For every $n\geq 1$, we consider the operator
$A_n:E(\Omega)r.~i.ghtarrow
E(\underbrace{\Omega\times\Omega\times\cdots\times\Omega}_{2n\
times})$ given by
\begin{multline*}
A_nf=(f\otimes r)\otimes(1\otimes1)\otimes\cdots\otimes(1\otimes 1)+(1\otimes1)\otimes(f\otimes r)\otimes\cdots\otimes(1\otimes 1)+\cdots\\
\cdots +(1\otimes1)\otimes \cdots \otimes(1\otimes1)\otimes(f\otimes r),
\end{multline*}
where $r$ is centered Bernoulli random variable. For brevity, we
will also use the following notation
$$A_nf=(f\otimes r)_1+(f\otimes r)_2+\cdots+(f\otimes r)_n.$$
We set $A_0=0.$
The following theorem is the main result of the present section.
\begin{thm}\label{alternative}
The following alternative is valid in an arbitrary r.i.\ space $E.$
\begin{enumerate}
\item[(i).] $||A_n||_{{\mathcal L}(E)}=n$ for every natural $n;$
\item[(ii).] There exists a constant $\frac12\leq q<1,$ such that
$||A_n||_{{\mathcal L}(E)}\leq const\cdot n^q$ for all
$n\in\mathbb{N}.$
\end{enumerate}
\end{thm}
\begin{proof}
Since for all $n,m\geq0,$ we have
\begin{equation}\label{additive}
||A_{n+m}||_{{\mathcal L}(E)}\leq ||A_n||_{{\mathcal
L}(E)}+||A_m||_{{\mathcal L}(E)},
\end{equation}
and since $||f\otimes r||_E=||f||_E,$ we infer that
$||A_n||_{{\mathcal L}(E)}\leq n.$
Observing that $A_{mn}(f)$ and $A_m(A_n(f))$ are identically
distributed, we have
$$||A_{mn}(f)||_E=||A_m(A_n(f))||_E,\quad f\in E(\Omega).$$
Here, we identify the element $A_nf\in
E(\Omega\times\cdots\times\Omega)$ with an element from
$E(\Omega)$ via a measure preserving transformation
$\underbrace{\Omega\times\cdots\times\Omega}_{2n\
times}r.~i.ghtarrow\Omega.$ Hence,
\begin{equation}\label{multiplicative}||A_{mn}||_{{\mathcal L}(E)}\leq ||A_m||_{{\mathcal L}(E)}
\cdot||A_n||_{{\mathcal L}(E)}.\end{equation} Thus, we have the
following alternative:
\begin{enumerate}
\item[(i).] $||A_n||_{{\mathcal L}(E)}=n$ for every natural $n;$
\item[(ii).] There exists $n_0\geq2,$ such that
$||A_{n_0}||_{{\mathcal L}(E)}<n_0.$
\end{enumerate}
To finish the proof of Theorem \ref{alternative}, we need only to
consider the second case. Suppose there exists a constant
$\frac12\leq q<1,$ such that $||A_{n_0}||_{{\mathcal L}(E)}\leq
n_0^q.$ By \eqref{multiplicative} we have
$$||A_{n_0^m}||_{{\mathcal L}(E)}\leq ||A_{n_0}||_{{\mathcal L}(E)}^m\leq n_0^{qm},\ \forall m\in\mathbb{N}.$$
Every $n$ can be written as $\sum_{i=0}^ka_in_0^i,$ where $0\leq
a_i\leq n_0-1$ and $a_k\neq0.$ So, using \eqref{additive} and
\eqref{multiplicative}, we have
$$||A_n||_{{\mathcal L}(E)}\leq \sum_{i=0}^k||A_{a_in_0^i}||_{\mathcal{L}(E)}\leq
\sum_{i=0}^k||A_{a_i}||_{\mathcal{L}(E)}n_0^{qi}\leq$$
$$\leq (\sum_{i=0}^k n_0^{qi})\max\limits_{1\leq s\leq n_0}\{||A_s||_{{\mathcal L}(E)}\}
\leq \frac{n_0^q\cdot n_0^{qk}}{n_0^q-1}\max\limits_{1\leq s\leq
n_0}\{||A_s||_{{\mathcal L}(E)}\}.$$ Now, using the fact that
$q\geq\frac12$ and $n_0\geq2,$ we have $n_0^q-1\geq(\sqrt{2}-1).$
So,
$$\frac1{n_0^q-1}\leq \sqrt{2}+1.$$
Since $n_0^k\leq n,$ we have
$$||A_n||_{{\mathcal L}(E)}\leq (\sqrt{2}+1)\cdot n_0^q
\cdot\max\limits_{1\leq s\leq n_0}\{||A_s||_{{\mathcal
L}(E)}\}\cdot n_0^{qk}\leq const\cdot n^q.$$ This proves the
theorem. \end{proof}
\begin{rem}\label{connection}
We record here an important connection between the estimates given
in Theorem \ref{alternative}(ii) above and the set $\Gamma_{\rm
iid}(E)$, where the r.i. space $E$ is separable. For $\frac12\leq
q\leq 1$ the following conditions are equivalent
\begin{itemize}
\item[(i)] $ ||A_n||_{{\mathcal L}(E)}\leq const\cdot n^q$, $n\ge
1$; \item[(ii)]\quad $ \frac{1}{q} \in \Gamma_{\rm iid}(E)$.
\end{itemize}
Indeed, the implication $(i)\Rightarrow(ii)$ is obvious. Now, let
the probability space $(\Omega, \mathcal{P})$ be the infinite
direct product of measure spaces $([0,1],m)$. Fix $f\in E$ and
consider the sequence $\{(f\otimes r)_n\}_{n\ge 1}\subset
E(\Omega, \mathcal{P})$. It follows from \cite[Lemma 3.4]{SeSu}
that this sequence is weakly null in $E(\Omega, \mathcal{P})$.
Since the spaces $E$ and $E(\Omega, \mathcal{P})$ are isometric,
we obtain the implication $(ii)\Rightarrow(i)$ via an application
of the uniform boundedness principle.
\end{rem}
We complete this section with an estimate of $\|A_n\|_{{\mathcal
L}(E)}$, $n\ge 1$ in general r.i. spaces with the Kruglov
property.
\begin{thm}\label{Kruglov} Let $E$ be a separable r.i. space.
If $\beta_E<1$ and if $E$ satisfies the Kruglov property, then
$||A_n||_{{\mathcal L}(E)}\leq const\cdot n^q$ for all
sufficiently large $n\ge 1$ and any $\beta_E<q<1$.
\end{thm}
\begin{proof}
It is proved in \cite[Proposition 2.2]{AsSeSu2007} (see also
\cite[Theorem~1]{MS2002}), that for every r.i. space $E$ and an
arbitrary sequence of independent random variables
$\{f_k\}_{k=1}^n$ $(n\ge 1)$ from $E$, the right hand side of
\eqref{independent to disjoint} can be estimated as
\begin{equation}\label{quad sum}
||\sum_{k=1}^n\overline{f}_k||_{Z^2_E}\leq
6||(\sum_{k=1}^nf_k^2)^{\frac12}||_E.
\end{equation}
Now, assume in addition that the sequence $\{f_k\}_{k=1}^n$ $(n\ge
1)$ consists of independent identically distributed random
variables, $\|f_1\|_E=1$. Since $\beta_E<1,$ there exist $N$ and
$\beta_E<q<1$ such that for every $k\geq N$
$||\sigma_k||_{\mathcal{L}(E)}\leq k^{q}$. Fix $\varepsilon>0$
such that $\frac12+\varepsilon<q$. By \cite[Theorem 9]{SeSu}, in
every separable r.i. space $E$, the right hand side of \eqref{quad
sum} can be estimated as
\begin{equation}\label{quad bound}
||(\sum_{k=1}^nf_k^2)^{\frac12}||_E\leq\frac{4}{\varepsilon}\max_{1\leq
k\leq
n}(\frac{n}k)^{\frac12+\varepsilon}||\sigma_k||_{\mathcal{L}(E)}:=A,\quad
n\ge 1.
\end{equation}
So, the right hand side of \eqref{quad bound} can be estimated as
\begin{multline}\label{another_bound}
A\leq\frac{4}{\varepsilon}n^{\frac12+\varepsilon}
\max\{\max_{N\leq k\leq n}k^{-\frac12-\varepsilon}k^q,
\max_{1\leq k\leq N}k^{-\frac12-\varepsilon}||\sigma_k||_{\mathcal{L}(E)}\}=\\
=\frac{4}{\varepsilon}n^{\frac12+\varepsilon}
\max\{n^{q-\frac12-\varepsilon},const\}\leq const\cdot n^q.
\end{multline}
Recalling the definition of the operator $A_n$ and combining it
with \eqref{independent to disjoint}, \eqref{quad sum},
\eqref{quad bound}, \eqref{another_bound} yields the
assertion.\end{proof}
\begin{rem}
\begin{itemize}
\item[(i)] The assumption $\beta_E<1$ in Theorem \ref{Kruglov} is necessary (see \cite[Theorem 4.2]{AsSeSu2005}).
For example, the space $E=L_1$ satisfies the Kruglov property and $\beta_E=1$. However,
$\|A_n\|_{\mathcal{L}(L_1)}= n$.
\item[(ii)] On the other hand, the condition that $E$ satisfies the Kruglov property is not optimal. In the following section, we will show that there are
Lorentz spaces which do not possess the Kruglov property and which
still satisfy the condition of Theorem \ref{alternative}(ii).
\end{itemize}
\end{rem}
\section{Operators $A_n$, $n\ge 1$ in Lorentz spaces.}
We need the following technical facts. The first lemma is elementary and its proof is omitted.
\begin{lem}\label{linearity} Let $\psi$ is a concave function on $[0,1].$ If there are points
$0\leq x_1\leq x_2\leq\cdots\leq x_n\leq1,$ such that
$$\frac1n(\psi(x_1)+\cdots \psi(x_n))=\psi(\frac1n (x_1+\cdots +x_n)),$$
then $\psi$ is linear on $[x_1,x_n].$
\end{lem}
\begin{lem}\label{estimate of expectation} Let $x_1,\cdots,x_n$ are independent random variables. The following inequality holds.
$$\mathbb{E}(|x_1+\cdots+x_n|)\leq \mathbb{E}(|x_1|)+\cdots+\mathbb{E}(|x_n|).$$
Moreover, the equality holds if and only if all $x_i'$s are simultaneously non-negative
(or non-positive).
\end{lem}
\begin{proof} We have
$$\mathbb{E}(|x_1|)+\cdots+\mathbb{E}(|x_n|)-\mathbb{E}(|x_1+\cdots+x_n|)=\mathbb{E}(|x_1|+\cdots+|x_n|-|x_1+\cdots+x_n|)\geq0.$$
By the independence of $x_i's,$ $i=1,2,\cdots,n$ we have
$sign(x_i),$ $i=1,2,\cdots,n$ are independent random variables. If
there exists a function $x_i,$ which is neither non-negative, nor
non-positive, then, for every other function $x_j,$ we have {\bf z-z-z-z}
\begin{multline*}
m(x_ix_j<0)=m(sign(x_i)>0,sign(x_j)<0)
+m(sign(x_i)<0,sign(x_j)>0)\\=m(sign(x_i)>0)m(sign(x_j)<0)
+m(sign(x_i)<0)m(sign(x_j)>0)>0.
\end{multline*}
Hence, there exists a set $A$ of positive measure such that $x_ix_j<0$ almost everywhere on $A.$ This guarrantees that $|x_1|+\cdots+|x_n|>|x_1+\cdots+x_n|$ almost everywhere on $A.$ This is sufficient for the strict inequality to hold.\end{proof}
We need to consider the following properties of the function $\psi.$
\begin{equation}\label{first property}a_\psi:=\limsup_{ur.~i.ghtarrow0}\frac{\psi(ku)}{\psi(u)}<k.\end{equation}
\begin{equation}\label{second property}c_\psi:=\limsup_{ur.~i.ghtarrow0}\frac{\psi(u^l)}{\psi(u)}<1.\end{equation}
\begin{equation}\label{general limit property}\limsup_{ur.~i.ghtarrow0}\frac1{\psi(u)}\sum_{s=1}^n\psi(2^{1-s}\binom{n}{s}u^s)<n.\end{equation}
\begin{prop}\label{two limits into general} Suppose, there exist $k\geq2$ such that
\eqref{first property} holds and $l\geq2$ such that
\eqref{second property} holds. Then, \eqref{general limit
property} holds for all sufficiently large $n.$
\end{prop}
\begin{proof}
Consider the sum $\sum_{s=1}^n\psi(\binom{n}{s}2^{1-s}u^s).$ For any sufficiently large $n,$ we write
$$\sum_{s=1}^n=\sum_{s=1}^{1+[\frac{n}k]}+\sum_{s=2+[\frac{n}k]}^n.$$
Consequently, the upper limit in \eqref{general limit property}
can be estimated as
\begin{equation}\label{decomposition}\begin{split}
\limsup_{ur.~i.ghtarrow0}\frac1{\psi(u)}\sum_{s=1}^n\psi(\binom{n}{s}2^{1-s}u^s)\leq\limsup_{ur.~i.ghtarrow0}\frac1{\psi(u)}\sum_{s=1}^{1+[\frac{n}k]}\psi(\binom{n}{s}2^{1-s}u^s)+\\
+\limsup_{ur.~i.ghtarrow0}\frac1{\psi(u)}\sum_{s=2+[\frac{n}k]}^n\psi(\binom{n}{s}2^{1-s}u^s)
\end{split}\end{equation}
Consider the first upper limit in \eqref{decomposition}. Since
$\psi$ is concave, we have
$$\sum_{s=1}^{1+[\frac{n}k]}\psi(\binom{n}{s}2^{1-s}u^s)\leq(1+[\frac{n}{k}])\psi(\frac1{1+[\frac{n}k]}\sum_{s=1}^{1+[\frac{n}k]}\binom{n}{s}2^{1-s}u^s)=$$
$$=(1+[\frac{n}{k}])\psi(\frac1{1+[\frac{n}k]}(nu+o(u)))\leq(1+[\frac{n}{k}])\psi(ku(1+o(1))).$$
Hence, the first upper limit in \eqref{decomposition} is bounded
from above by
$$(1+[\frac{n}k])a_{\psi}=n\cdot\frac{a_{\psi}}{k}+o(n).$$
Consider the second upper limit in \eqref{decomposition}. It is
clear that for all $\frac1k n\leq s\leq n$
$$\binom{n}{s}\cdot2^{1-s}\leq 2^n$$
and
$$\binom{n}{s}2^{1-s}u^s\leq 2^nu^{\frac1k n}=(2^ku)^{\frac1k n}.$$
Thus, the second upper limit in \eqref{decomposition} can be
estimated as
$$\limsup_{ur.~i.ghtarrow0}\frac1{\psi(u)}\sum_{s=2+[\frac{n}k]}^n\psi(\binom{n}{s}2^{1-s}u^s)\leq n(1-\frac1k)\limsup_{ur.~i.ghtarrow0}\frac{\psi((2^ku)^{\frac{n}{k}})}{\psi(u)}.$$
Substituting variable $w=2^ku$ on the right hand side, we have
$$n(1-\frac1k)\limsup_{wr.~i.ghtarrow0}\frac{\psi(w^{\frac{n}{k}})}{\psi(2^{-k}w)}.$$
By the concavity of $\psi,$ we have
$\psi(2^{-k}w)\geq2^{-k}\psi(w).$ Therefore, the second upper
limit in \eqref{decomposition} is bounded from above by
$$n(1-\frac1k)2^k\limsup_{wr.~i.ghtarrow0}\frac{\psi(w^{\frac{n}{k}})}{\psi(w)}.$$
Now, we observe that
\begin{equation}
\limsup_{wr.~i.ghtarrow0}\frac{\psi(w^m)}{\psi(w)}\leq c_{\psi}^{\frac{\log(m)}{\log(l)}-1}.
\end{equation}
Indeed, let $l^r\leq m\leq l^{r+1},$
$$\frac{\psi(w^m)}{\psi(w)}\leq\frac{\psi(w^{l^r})}{\psi(w)}=\frac{\psi(w^{l^r})}{\psi(w^{l^{r-1}})}\cdots\frac{\psi(w^l)}{\psi(w)}$$
and
$$\limsup_{wr.~i.ghtarrow0}\frac{\psi(w^m)}{\psi(w)}\leq c_{\psi}^r \leq c_{\psi}^{\frac{\log(m)}{\log(l)}-1}.$$
If $n$ tends to infinity, then, thanks to the assumption $c_{\psi}<1,$ we have $$n(1-\frac1k)2^k\limsup_{wr.~i.ghtarrow0}\frac{\psi(w^{\frac{n}k})}{\psi(w)}=o(n).$$
Therefore, the upper limit in \eqref{general limit property} (see
also \eqref{decomposition}) is bounded from above by
$$\frac{a_{\psi}}k n+o(n).$$
Thus, the upper limit in \eqref{general limit property} is
strictly less then $n$ for every sufficiently large $n.$
\end{proof}
Let the function $g_n$ be defined by
\begin{equation}\label{g definition}
g_n(u):=\frac{||A_n\chi_{[0,u]}||_{\Lambda(\psi)}}{n||\chi_{[0,u]}||_{\Lambda(\psi)}}
=\frac1{n\psi(u)}\sum_{s=1}^n\psi(m(|(\chi_{[0,u]}\otimes
r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|\geq s)).
\end{equation}
It is obvious that $0\leq g_n\leq 1$.
\begin{rem} The second equality in \eqref{g definition} is a corollary of
\cite[II.5.4]{KPS}.
\end{rem}
\begin{prop}\label{tech fignya} For sufficiently large $n,$ we have $g_n(u)<1$ for all
$u\in(0,1]$.
\end{prop}
\begin{proof} Since $\psi$ is concave, we have
\begin{multline}\label{first bound of g}
\sum_{s=1}^n\psi(m(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|\geq s))\leq \\
\leq n\cdot\psi(\frac1n\sum_{s=1}^nm(|(\chi_{[0,u]}\otimes
r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|\geq s)).
\end{multline}
Note, that if random variable $\xi_n$ takes the values $0,1,\cdots,n$ then
\begin{equation}\label{expectation appears}
\sum_{s=1}^nm(\xi_n\geq s)=\mathbb{E}(\xi_n).
\end{equation}
By \eqref{expectation appears}, the right-hand side of
\eqref{first bound of g} is equal to $n\psi(\frac1n
\mathbb{E}(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes
r)_n|)).$ By Lemma \ref{estimate of expectation}, we have
\begin{equation}\label{strict estimate of expectation}
\frac1n \mathbb{E}(|(\chi_{[0,u]}\otimes
r)_1+\cdots+(\chi_{[0,u]}\otimes
r)_n|)<\mathbb{E}(|\chi_{[0,u]}\otimes r|)=u.
\end{equation}
Taking $\psi$, we obtain
\begin{equation}\label{second bound of g}
n\psi(\frac1n\mathbb{E}(|(\chi_{[0,u]}\otimes
r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|))\leq
n\psi(\mathbb{E}(|\chi_{[0,u]}\otimes r|)).
\end{equation}
The right hand side of \eqref{second bound of g} is equal to
$n\psi(u).$
Let us assume that $g_n(u)=1,$ for some $u>0$ and some $n>1.$ It
then follows, that both inequalities \eqref{first bound of g} and
\eqref{second bound of g} are actually equalities.
The equality
\begin{multline*}
\sum_{s=1}^n\psi(m(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|\geq s))=\\
=n\cdot\psi(\frac1n\sum_{s=1}^nm(|(\chi_{[0,u]}\otimes
r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|\geq s))
\end{multline*}
implies, by Lemma \ref{linearity}, that $\psi$ is linear on the
interval $[a_1,b_1]$ with $a_1=m(|(\chi_{[0,u]}\otimes
r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|\geq n),$ and
$b_1=m(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes
r)_n|\geq1).$
Since the inequality in \eqref{second bound of g} is actually an
equality, we derive from \eqref{strict estimate of expectation}
and \eqref{second bound of g}, that $\psi$ must be a constant on
the interval $[a_2,b_2]$ with $a_2=\frac1n
\mathbb{E}(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes
r)_n|),$ and $b_2=\mathbb{E}(|\chi_{[0,u]}\otimes r|)].$ Since
$\psi$ is increasing and concave function, it must be a constant
on $[a_2,1].$
Since, by \eqref{expectation appears},
$$\frac1n \mathbb{E}(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|)>m(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|\geq n)$$
and
$$\frac1n \mathbb{E}(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|)<m(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|\geq1)),$$
we have $a_1<a_2<b_2.$ So, the intersection of the intervals $[a_1,b_1]$ and $[a_2,1]$ contains an interval $[a_3,b_3]$ with $a_3<b_3.$
Since $\psi$ is a linear function on the $[a_1,b_1]$ and is a constant on the $[a_2,1]$ it must be a constant on $[a_1,1]$ that is on the interval
$$[m(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|\geq n),1]=[2^{1-n}u^n,1].$$
Thus, $\psi$ is a constant on the interval $[2^{1-n},1]\subset [2^{1-n}u^n,1],$ which is not the case for sufficiently large $n.$ So, $g_n(u)<1$ for all sufficiently large $n.$
\end{proof}
\begin{lem}\label{limit equivalence} For the function $g_n,$ defined in Proposition \ref{tech fignya}, we have
$$\limsup_{ur.~i.ghtarrow0}g_n(u)=\limsup_{ur.~i.ghtarrow0}\frac1{n\psi(u)}\sum_{s=1}^n\psi(2^{1-s}\binom{n}{s}u^s).$$
\end{lem}
\begin{proof}For every $s\geq 1,$ using a formula for conditional probabilities, we have
\begin{equation*}
m(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|\geq
s)=\sum_{k=1}^n\binom{n}{s}u^k(1-u)^{n-k}m(|r_1+\cdots+r_k|\geq s).
\end{equation*}
Actually, the summation above is taken from $k=s$ up to $n,$ since $m(|r_1+\cdots+r_k|\geq s)=0$ for every $k<s.$\\
If now $ur.~i.ghtarrow0,$ then, for every $s\geq1$ and $k>s,$ we have $\binom{n}{k}u^k(1-u)^{n-k}=o(u^s).$ Therefore,
\begin{equation}\label{main term selection}
m(|(\chi_{[0,u]}\otimes r)_1+\cdots+(\chi_{[0,u]}\otimes r)_n|\geq
s)=2^{1-s}\binom{n}{s}u^s(1+o(1)).
\end{equation}
Since $\psi$ is concave, we have
\begin{equation}\label{sublinearity}
\psi(\frac1{m}u)\leq\frac1{m}\psi(u),\ 0<m\leq1.
\end{equation}
This implies
\begin{equation}\label{psi limit property}
\lim\limits_{ur.~i.ghtarrow0}\frac{\psi(u(1+o(1)))}{\psi(u)}=1.
\end{equation}
After applying \eqref{main term selection} and \eqref{psi limit
property} to the definition of $g_n$ in \eqref{g definition}, we
obtain the assertion of the lemma.
\end{proof}
The following theorem is the main result in this section.
\begin{thm}\label{characterization} Let $\psi\in\Psi.$ The following conditions are equivalent.
$(i)$ $||A_n||_{\mathcal{L}(\Lambda(\psi))}<n$ for all
sufficiently large $n;$
$(ii)$ Estimates \eqref{first property} and \eqref{second
property} hold for some $k\geq2$ and $l\geq2$.
\end{thm}
\begin{rem} Note that condition $(i)$ above is equivalent to the assumption that
$||A_{n_0}||_{\mathcal{L}(\Lambda(\psi))}<n_0$ for some $n_0>1$
(see Theorem \ref{alternative}).
\end{rem}
\begin{proof} We are interested whether there exist $n\in\mathbb{N}$ and $c<n,$ such that
\begin{equation}\label{an bound}
||A_nf||_{\Lambda(\psi)}\leq c||f||_{\Lambda(\psi)},\
\ f\in\Lambda(\psi).
\end{equation}
We will use the following known description of extreme points of
the unit ball in $\Lambda(\psi).$ A function $f\in
extr(B_{\Lambda(\psi)}(0,1))$ if and only if
$$|f|=\frac{\chi_{A}}{||\chi_{A}||_{\Lambda(\psi)}}$$
for some measurable set $A\subset [0,1].$ Here $\chi_A$ is the
indicator function of the set $A$. This means that $f$ is of the
form
$$f=\frac{\chi_{A_1}-\chi_{A_2}}{\psi(m(A_1\cup A_2))}$$
with $A_1$ and $A_2$ having empty intersection. It is sufficient
to verify \eqref{an bound} only for functions $f$ as above (see
\cite[Lemma II.5.2]{KPS}).
Clearly, $f\otimes r$ and $|f|\otimes r$ are identically
distributed random variables. Therefore, $A_n(f)$ and $A_n(|f|)$
are also identically distributed ones. Furthermore,
$||A_m(f)||=||A_m(|f|)||$ and $||f||=||\,|f|\,||.$ Thus, we need
to check \eqref{an bound} for indicator functions only. It is
sufficient to take $A$ of the form $[0,u],$ $0<u\leq1.$
Using the notation $g_n(\cdot)$ introduced in \eqref{g
definition}, we see that \eqref{an bound} is equivalent to
\begin{equation}\label{gn bound}
\sup_u g_n(u)<1.
\end{equation}
Now, we are ready to finish the proof of the theorem.
[Necessity] Fix $n$ such that
$||A_n||_{\mathcal{L}(\Lambda(\psi))}<n.$ It follows from the
argument above that \eqref{gn bound} holds. Now, we immediately
infer from Lemma \ref{limit equivalence} and the definition of
$g_n(\cdot)$ that
$$\limsup_{ur.~i.ghtarrow0}\frac1{n\psi(u)}\sum_{s=1}^n\psi(\binom{n}{s}2^{1-s}u^s)<1,$$
which is equivalent to
\eqref{general limit property}. Thus,
$$\limsup_{ur.~i.ghtarrow0}\frac{\psi(nu)}{n\psi(u)}
=\limsup_{ur.~i.ghtarrow0}\frac{\psi(2^{1-1}\binom{n}{1}u^1)}{n\psi(u)}
\leq\limsup_{ur.~i.ghtarrow0}\frac1{n\psi(u)}\sum_{s=1}^n\psi(2^{1-s}\binom{n}{s}u^s)<1.$$
Suppose that \eqref{second property} fails. We have
$$\limsup_{ur.~i.ghtarrow 0}\frac{\psi(u^l)}{\psi(u)}=1$$
for every $l\geq 1.$ Since $\binom{n}{s}2^{1-s}u^s\geq u^{n+1}$ for every $s=1,2,\cdots,n$
and every sufficiently small $u,$ we have
$$\limsup_{ur.~i.ghtarrow 0}\frac1{n\psi(u)}\sum_{s=1}^n\psi(\binom{n}{s}2^{1-s}u^s)
\geq\limsup_{ur.~i.ghtarrow 0}\frac{n\psi(u^{n+1})}{n\psi(u)}=1.$$
This contradicts with \eqref{general limit property} and completes
the proof of necessity.
[Sufficiency] Fix $k\geq2$ (respectively, $l\geq2$) such that
\eqref{first property} (respectively, \eqref{second property})
holds. Then, for sufficiently large $n,$ \eqref{general limit
property} also holds. By Lemma \ref{limit equivalence}, we have
\begin{equation}\label{gn limit bound}
\limsup_{ur.~i.ghtarrow0}g_n(u)<1
\end{equation}
for all sufficiently large $n.$ By Proposition \ref{tech fignya},
we have $g_n(u)<1$ for all sufficiently large $n$ and for all
$u\in(0,1].$ Therefore, by \eqref{gn limit bound}, \eqref{gn
bound} holds for sufficiently large $n.$ Then (see the argument at
the beginning of the proof),
$||A_n||_{\mathcal{L}(\Lambda(\psi))}<n$ for sufficiently large
$n.$
\end{proof}
Combining Theorems \ref{alternative} and \ref{characterization},
we have
\begin{cor}\label{lorentz alternative} For every function $\psi,$ one of
the following two mutually excluding
alternatives holds.
\begin{enumerate}
\item There exist $q\in[\frac12,1)$ and $C>0,$ such that the
operator $A_n:\Lambda(\psi)r.~i.ghtarrow\Lambda(\psi)$ satisfies
$$||A_n||_{\mathcal{L}(E)}\leq C\cdot n^q,\ n\geq1.$$
\item Either for every $k\in\mathbb{N},$
\begin{equation}\label{first bad condition}
\limsup_{ur.~i.ghtarrow0}\frac{\psi(ku)}{\psi(u)}=k
\end{equation}
or for every $l\in\mathbb{N},$
\begin{equation}\label{second bad condition}
\limsup_{ur.~i.ghtarrow0}\frac{\psi(u^l)}{\psi(u)}=1.
\end{equation}
\end{enumerate}
\end{cor}
\begin{rem} \begin{itemize}\item[(i)] The condition \eqref{first bad
condition} is equivalent to the assumption
$\beta_{\Lambda(\psi)}=1$. \item[(ii)] The condition \eqref{second
bad condition} implies (but not equivalent to) the condition
$\alpha_{\Lambda(\psi)}=0$.
In the last
section of this paper, we will present an example $\psi\in \Psi$
failing \eqref{second bad condition}
such that the Lorentz space $\Lambda(\psi)$ fails the Kruglov property.
\end{itemize}
\end{rem}
\section{Operators $A_n$, $n\ge 1$ in Orlicz spaces $exp(L_p)$.}
The space $exp(L_p)$ satisfies Kruglov property if and only if
$p\leq1$ (see \cite{Br1994,AsSu2005}). The space $exp(L_p)$ is
2-convex for all $0<p<\infty$ (see e.g. \cite[1.d]{LT-II}). Now,
we immediately infer from \cite{AsSeSu2007} that $\Gamma_{\rm
iid}(exp(L_p)_0)=\Gamma_{\rm i}(exp(L_p)_0)=[1,2]$ for all
$0<p\leq1$ (here, $exp(L_p)_0$ is the separable part of the space
$exp(L_p)$). Using Remark \ref{connection}, we have
$||A_n||_{\mathcal{L}(exp(L_p)_0)}\leq const\cdot n^{\frac12}$ for
all $n\geq1$ and $0<p\leq1$. It easily follows that in fact,
$||A_n||_{\mathcal{L}(exp(L_p))}\leq const\cdot n^{\frac12}$ for
all $n\geq1$ and $0<p\leq1$. In this section, we prove the
estimate $||A_n||_{\mathcal{L}(exp(L_p))}\leq const\cdot
n^{\frac12}$ (respectively, $||A_n||_{\mathcal{L}(exp(L_p))}\leq
const\cdot n^{1-1/p}$) for all $1<p\leq 2$ (respectively, $2\leq
p<\infty$.) To this end, it is convenient to view $exp(L_p)$ as a
Marcinkiewicz space $M(\psi_p)$ with
$\psi_p(t)=t\log^{\frac1p}(\frac{e}t)$ (see \cite[Lemma
4.3]{AsSu2005}). The following simple lemma is crucial.
\begin{lem}\label{expl2 normal}
There exists $\Psi\ni\psi\sim\psi_2,$ such that the random
variable $\psi'\otimes r$ is Gaussian.
\end{lem}
\begin{proof} Setting $F(t):=\frac2{\sqrt{\pi}}\int_t^{\infty}e^{-z^2}dz$, $t\ge 0$
and denoting its inverse by $G$, we clearly have that $G\otimes r$
is Gaussian. From the obvious inequality
$$c_1\cdot e^{-2t^2}\leq F(t)\leq c_2 e^{-t^2},$$
substituting $t=G(z),$ we obtain
$$c_1\cdot e^{-2G^2(z)}\leq z\leq c_2 e^{-G^2(z)}$$
or, equivalently,
$$\frac1{\sqrt{2}}\log^{\frac12}(\frac{c_1}z)\leq G(z)\leq \log^{\frac12}(\frac{c_2}z).$$
This means
$$\psi(t)=\int_0^tG(z)dz\sim\int_0^t\log^{\frac12}(\frac{e}z)dz\sim t\log^{\frac12}(\frac{e}t)=\psi_2(t).$$
\end{proof}
\begin{thm}\label{explp bound}
\begin{itemize}
\item[(i)] For every $1\leq p\leq 2,$ we have
$||A_n||_{\mathcal{L}(exp(L_p))}\leq const\sqrt{n}.$
\item[(ii)]For every $2\leq p\leq\infty,$ we have
$||A_n||_{\mathcal{L}(exp(L_p))}\leq const\cdot n^{1-1/p}.$
\end{itemize}
\end{thm}
\begin{proof} (i).\quad By Lemma \ref{expl2 normal} $exp(L_2)=M(\psi)$, $\psi\in
\Psi$ where $\psi'\otimes r$ is Gaussian. Recall the following
description of the extreme points of the unit ball in
Marcinkiewicz spaces (see \cite{Ryff}): a function $f$ is an
extreme point of the unit ball in $M({\psi})$ if and only if
$f^*=\psi'$. Since $||A_nx||_{M({\psi})}=||A_n\psi'||_{M({\psi})}$
for any $x\in M_{\psi}$ with $x^*=\psi'$, we infer that
$||A_n\psi'||_{M({\psi})}=||A_n||_{\mathcal{L}(M({\psi}))}$, $n\ge
1$. Since the $\psi'\otimes r$ is Gaussian, the function
$$\frac{(\psi'\otimes r)_1+\cdots+(\psi'\otimes r)_n}{\sqrt{n}}$$
is also Guassian, in particular, its rearrangement coincides with
$\psi'$. This means $||A_n||_{\mathcal{L}(M_{\psi})}=\sqrt{n}$.
The result now follows by interpolation between $exp(L_{1})$ and
$exp(L_{2})$, since for every $0<p_1\leq p_2\leq\infty$ we have
$$[exp(L_{p_1}),exp(L_{p_2})]_{\theta,\infty}=exp(L_p)$$
with $\frac1p=\frac{1-\theta}{p_1}+\frac{\theta}{p_2}$ (see, for
example \cite{BrKrug}).
(ii).\quad Noting that $||A_n||_{\mathcal{L}(L_\infty)}=n$, $n\ge
1$, the assertion follows from (i) by applying the real method of
interpolation to the couple $(exp(L_{2}),L_\infty)$ as above.
\end{proof}
\section{Applications to Banach-Saks index sets}
The first main result of this section characterizing a subclass
of the class of all r.i. spaces $E$ such that $\Gamma_{\rm
iid}(E)= \Gamma_{\rm i}(E)$ is given in Theorem \ref{firstmain}
below. We firstly need a modification of the subsequence splitting
result from \cite[Theorem 3.2]{new-16-Sem-Suk}. We present
necessary details of the proof for convenience of the reader.
\begin{thm}\label{firsttechnical} Let $\{x_n\}_{n\ge 1}$ be a weakly null sequence of independent functions in a separable r.i. space $E$ with the Fatou property. Then, there exists a subsequence $\{y_n\}_{n\ge 1}\subset\{x_n\}_{n\ge 1},$ which can be split as $y_n=u_n+v_n+w_n, n\ge 1$. Here $\{u_n\}_{n\ge 1}$ is a weakly null sequence of independent identically distributed functions, the sequence $\{v_n\}_{n\ge 1}$ is also weakly null and consists of the elements with pairwise disjoint support and $\|w_n\|_E\to 0$ as $n\to \infty$.
\end{thm}
\begin{proof} Let the probability space $(\Omega, \mathcal{P})$ be the infinite direct product of measure spaces $([0,1],m)$. Without loss of generality, we assume that $E=E(\Omega)$ and that each function $x_n$
depends only on the $n-$th coordinate. That is the
following holds
$$x_n=\underbrace{1\otimes\cdots\otimes1}_{(n-1)\
times}\otimes h_n\otimes 1\otimes\cdots,\quad h_n\in E(0,1),\ \quad n\ge 1.$$ Consider the sequence
$\{g_n\}_{n\ge 1}=\{h^*_n\}_{n\ge 1}\subset E(0,1)$. Since
$$||x_n||_E=||g_n||_E\geq ||g_n\chi_{[0,s]}||_E\geq g_n(s)||\chi_{[0,s]}||_E,\quad s\in [0,1]$$
and the sequence $\{x_n\}$ is bounded, it follows from Helly Selection theorem that there exists a
subsequence $\{g_n^1\}\subset\{g_n\},$ which converges almost
everywhere on $[\frac12,1]$. Repeating the argument, we get a
subsequence $\{g_n^2\}\subset\{g_n^1\},$ which converges almost
everywhere on $[\frac13,1],$ etc. Thus, there exists a function
$h\in L_1(0,1)$ to which the diagonal sequence $\{g_n^n\}_{n\ge
1}=\{(h_n^n)^*\}_{n\ge 1}$ converges almost everywhere. The Fatou
property of $E$ guarantees that $h\in E(0,1)$ and $\|h\|_E\leq 1$.
There is an operator $P_n:L_1(0,1)\to L_1(0,1)$ of the form
$(P_nx)(t)=\alpha(t)x(\gamma(t))$ (here $|\alpha(t)|=1$ and
$\gamma$ is a measure preserving transformation of the interval
$(0,1)$ into itself), such that $P_ng_n^n=h_n^n$, $n\ge 1$ (see
e.g. \cite{KPS}). Now, put
$$y_n:=1\otimes1\otimes\cdots\otimes1\otimes h^n_n\otimes1\cdots,\quad n\ge 1,$$
$$u_n:=1\otimes1\otimes\cdots\otimes1\otimes (P_n h)\otimes1\cdots,\quad n\ge 1.$$
It is clear, that functions $u_n$ are independent. {\bf z-z-z-z} The proof is
finished by repeating the remaining argument from \cite[Theorem
3.2]{new-16-Sem-Suk}.
\end{proof}
\begin{thm}\label{firstmain} For an arbitrary separable r.i. space $E$ with the Fatou property,
we have
$$
\Gamma_{\rm iid}(E)= \Gamma_{\rm i}(E)\Longleftrightarrow
\Gamma_{\rm iid}(E)\subseteq \Gamma_{\rm d}(E).
$$
\end{thm}
\begin{proof} If $\Gamma_{\rm iid}(E)= \Gamma_{\rm i}(E)$, then
the embedding $\Gamma_{\rm iid}(E)\subseteq \Gamma_{\rm d}(E)$
follows immediately from \cite [Lemma 4.1(ii)]{AsSeSu2005}.
Suppose now that $\Gamma_{\rm iid}(E)\subseteq \Gamma_{\rm d}(E)$
and let $\{f_k\}_{k\ge 1}\subset E$ be a normalized weakly null
sequence of independent random variables on $[0,1]$. Passing to a
subsequence and applying the preceding theorem, we may assume that
$f_n=u_n+v_n+w_n, n\ge 1$, where $\{u_n\}_{n\ge 1}$ is a weakly
null sequence of independent identically distributed functions,
the sequence $\{v_n\}_{n\ge 1}$ is also weakly null and
consists of the elements with pairwise disjoint support and $\|w_n\|_E\to 0$ as $n\to
\infty$. Due to the latter convergence, we may assume without loss of generality that
$||w_k||_{E}\leq 2^{-k}$ and so for every subsequence $\{z_n\}\subset\{w_n\},$ we have
$$||\sum_{k=1}^nz_k||_{E}\leq 1.$$
If, in addition, $\frac {1}{q} \in \Gamma_{\rm iid}(E)$, then our
assumptions also guarantee that there are constants $C_2, C_3>0$
$$||\sum_{k=1}^nu_k||_{E}\leq C_2\cdot n^{q},\quad
||\sum_{k=1}^nv_k||_{E}\leq C_3\cdot n^{q}.$$
\end{proof}
We will illustrate the result above in the settings of:
$(\alpha)$ r.i. spaces satisfying an upper $2$-estimate; $(\beta)$
Lorentz spaces $\Lambda(\varphi)$ and Marcinkiewicz spaces
$M(\varphi)_0$, $\varphi\in \Psi$; and $(\gamma)$ classical
$L_{p,q}$-spaces.
$(\alpha)$\quad Recall that a Banach lattice $X$ is said to
satisfy an \textit{upper\ }$2-$\textit{estimate}, if there exists
a constant $C>0$ such that for every finite sequence
$(x_{i})_{_{i=1}}^{n}$ of pairwise disjoint elements in $X$
\begin{equation*}
\left\Vert \sum_{j=1}^{n}x_{j}r.~i.ght\Vert _{X}\leq C\left(
\sum_{j=1}^{n}\Vert x_{j}\Vert _{X}^{2}r.~i.ght) ^{1/2}.
\end{equation*}
\begin{cor}\label{upper2estimate} If $E$ is a separable r.i. space with the Fatou
property and satisfying an upper $2$-estimate, then $ \Gamma_{\rm
iid}(E)= \Gamma_{\rm i}(E)$.
\end{cor}
\begin{proof} The assumption that the space $E$ satisfies an upper
$2$-estimate implies immediately that $2\in \Gamma_{\rm d}(E)$ and
hence $[1,2]\subseteq \Gamma_{\rm d}(E)$. Noting that $
\Gamma_{\rm iid}(E) \subseteq [1,2]$ (see \cite [Lemma
4.1(i)]{AsSeSu2005} the result now follows from Theorem
\ref{firstmain}.
\end{proof}
$(\beta)$\quad Although Lorentz spaces do not satisfy an upper
$2$-estimate, we have $$\Gamma_{d}(\Lambda(\psi))=[1,\infty)$$
(see e.g. the proof of \cite [Corollary 4.8]{AsSeSu2005}) and
similarly, $\Gamma_{d}(M(\psi)_0)=[1,\infty)$ (see e.g. \cite
[p.897]{AsSeSu2005}) for any $\psi\in \Psi$. Although, the
Marcinkiewicz spaces $(M(\psi)_0)$ do not possess the Fatou
property, applying the modification of Theorem
\ref{firsttechnical} similar to to \cite[Lemma 3.6]{AsSeSu2005},
we obtain the following corollary from Theorem \ref{firstmain}.
\begin{cor}\label{coincidence_in_Lor} For every $\psi\in \Psi$, we have $\Gamma_{\rm
i}(\Lambda(\psi))= \Gamma_{\rm iid}(\Lambda(\psi))$ and
$\Gamma_{\rm i}(M(\psi)_0)= \Gamma_{\rm iid}(M(\psi)_0)$.
\end{cor}
$(\gamma)$\quad We will now show that the equality $\Gamma_{\rm
i}(E)=\Gamma_{\rm iid}(E)$ fails in the important subclass of r.i.
space which plays a significant role in the interpolation theory
\cite{KPS,LT-II}. Recall the definition of the Lorentz spaces
$L_{p,q}$, $1<p,q<\infty$: $x\in L_{p,q}$ if and only if the
quasi-norm
\[
\|x\|_{p,q}=
\dfrac{q}{p}\left(\displaystyle\int\limits_0^1
\left(x^*(t)t^{1/p}r.~i.ght)^q\dfrac{dt}{t}r.~i.ght)^{1/q},
\]
is finite. The expression $\|\cdot \|_{p,q}$ is a norm if
$1\leqslant q\leqslant p$ and is equivalent to a (Banach) norm if
$q>p$.
We will now show that $\Gamma_{\rm i}(L_{p,q})\neq\Gamma_{\rm
iid}(L_{p,q})$, provided $1<q<p<2$. To this end, we firstly
observe that every normalized sequence $\{v_n\}_{n\ge 1}\subset
L_{p,q}$ of functions with disjoint support contains a subsequence
spanning the space $l_q$ (see \cite[Lemma 2.1]{CD}). In
particular, $\Gamma_{\rm d}(L_{p,q})\subset\Gamma(l_q)=[1,q]$ and
so, by \cite[Lemma 4.1(ii)]{AsSeSu2005}, we have $\Gamma_{\rm
i}(L_{p,q})\subseteq [1,q]$. Next, it is proved in \cite[Corollary
5.2]{Br1996} (see also \cite{CarDil89}) {\bf z-z-z-z} that if $p<2$ then for every sequence of identically
distributed independent random variables we have
$$||\sum_{k=1}^nx_k||_{L_{p,q}}=o(n^{\frac1p}),$$
which implies, in particular, that $[1,p]\subseteq \Gamma_{\rm
iid}(L_{p,q})$. This shows that $(q,p]\subseteq \Gamma_{\rm
iid}(L_{p,q})\setminus \Gamma_{\rm i}(L_{p,q})$ as soon as
$1<q<p<2$.
Our second main result in this section completely characterizes
the subclass of all Lorentz spaces $\Lambda(\psi)$, $\psi\in \Psi$
whose Banach-Saks index set $\Gamma_{\rm i}(\Lambda(\psi))$ is
non-trivial.
\begin{thm}\label{mainsecond} $\Gamma_{\rm iid}(\Lambda(\psi))\neq\{1\}$ if and only if
the function $\psi$ satisfies conditions \eqref{first property}
and \eqref{second property} for some $k,l\geq2$.
\end{thm}
\begin{proof} Let $\{f_k\}_{k\ge1}\subset \Lambda(\psi)$ be a normalized weakly null sequence of
independent identically distributed random variables on $[0,1]$.
Note that we automatically have $\int_0^1f_kdm=0$, $k\ge 1$.
Using standard symmetrization trick, we consider another sequence
$\{f_k'\}_{k\ge 1}$ of independent random variables (which is also
independent with respect to the sequence $\{f_k\}_{k\ge 1}$) such
that $f_k'$ is equidistributed with $f_k$ and define
$h_k:=f_k-f_k'$, $k\ge 1$. Clearly, $\{h_k\}_{k\ge 1}$ is a
sequence of independent symmetric identically distributed random
variables. Noting, that by \cite[Proposition~11, p.~6]{Br1994}, we
have
$$||\sum_{k=1}^nf_k||_{\Lambda(\psi)}\leq const\cdot
||\sum_{k=1}^nh_k||_{\Lambda(\psi)},\quad n\ge 1.$$ Now, if $\psi$
satisfies conditions \eqref{first property} and \eqref{second
property}, then it follows from Corollary \ref{lorentz
alternative} that $||\sum_{k=1}^nh_k||_{\Lambda(\psi)}\leq
const\cdot n^q$ for some $q\in (0,1)$ and hence $\frac{1}{q}\in
\Gamma_{\rm iid}(\Lambda(\psi))$. Conversely, let $\frac{1}{q}\in
\Gamma_{\rm iid}(\Lambda(\psi))$ for some $q\in (0,1)$. Fix $f\in
\Lambda(\psi)$ and consider the sequence $\{(f\otimes r)_n\}_{n\ge
1}\subset \Lambda(\psi)(\Omega, \mathcal{P})$, where the
probability space $(\Omega, \mathcal{P})$ is the infinite direct
product of measure spaces $([0,1],m)$. Since Lorentz spaces
$\Lambda(\psi)(\Omega, \mathcal{P})$ and $\Lambda(\psi)(0,1)$ are
isometric, and since the sequence $\{(f\otimes r)_n\}_{n\ge 1}$ is
weakly null in $\Lambda(\psi)(\Omega, \mathcal{P})$ ( see e.g.
\cite[Lemma 3.4]{SeSu}), we have
$$
\sup_{n\ge 1}\frac{1}{n^q}\|(f\otimes r)_1+(f\otimes r)_2+\dots
+(f\otimes r)_n\|_{\Lambda(\psi)}\leq C(f).
$$
Setting, $B_n:=\frac{1}{n^q}A_n$, $n\ge 1$ we have
$\|B_nf\|_{\Lambda(\psi)}\leq C(f)$ for every $n\ge 1$. By the
uniform boundedness principle, we have
$\|B_n\|_{\mathcal{L}(\Lambda(\psi))}\leq C<\infty$ for all $n\ge
1$, or equivalently that $||A_n||_{\mathcal{L}(\Lambda(\psi))}\leq
C\cdot n^q,\ n\geq 1$. Corollary 9 now yields that the function
$\psi$ satisfies conditions \eqref{first property} and
\eqref{second property}.
\end{proof}
The following Corollary follows immediately from the above
combined with Corollary \ref{coincidence_in_Lor}.
\begin{cor}\label{mainsecond_add} $\Gamma_{\rm i}(\Lambda(\psi))\neq\{1\}$, if and only if the function $\psi\in \Psi$ satisfies conditions \eqref{first
property} and \eqref{second property} for some $k,l\geq2.$
\end{cor}
We complete this section with the description of
$\Gamma_i(exp(L_p)_0)$, $1\leq p<\infty$.
\begin{thm}\label{Marc} For every $1\leq p\leq2,$ we have
$\Gamma_{\rm iid}(exp(L_p)_0)=\Gamma_{\rm i}(exp(L_p)_0)=[1,2]$.
For every $2\leq p<\infty,$ we have $\Gamma_{\rm
iid}(exp(L_p)_0)=\Gamma_{\rm i}(exp(L_p)_0)= [1,\frac{p}{p-1}].$
\end{thm}
\begin{proof} The first assertion follows from Remark
\ref{connection}, Theorem \ref{explp bound} and Corollary
\ref{coincidence_in_Lor}. The same argument shows that
$\Gamma_i(exp(L_p)_0)\supseteq [1,\frac{p}{p-1}]$ for every $2\leq
p<\infty$. The equality $\Gamma_{\rm i}(exp(L_p)_0)=
[1,\frac{p}{p-1}]$ follows from the fact that the estimate $$\|A_n
\chi _{[0,1]}\|_{exp(L_p)_0}\leq const\cdot n^{1-1/p},\quad n\ge
1$$ is the best possible (see \cite[Theorem 8]{RS} or
\cite[Theorem 15]{D}).
\end{proof}
\section{Concluding Remarks and Examples}
The preceding theorem shows that the set $\Gamma_i(exp(L_p)_0)$ is
non-trivial for all $1\leq p<\infty$, whereas $exp(L_p)$ has the
Kruglov property if and only if $0<p\leq 1$. This result extends
and complements \cite{AsSeSu2007}, where examples of r.i. spaces
$E$ with Kruglov property such that $\Gamma(E)=\{1\}$ and
$\Gamma_{\rm i}(E)\neq \{1\}$ are built. We now present an example
of Lorentz space $\Lambda(\psi)$ such that $\Gamma_{\rm
i}(\Lambda(\psi))\neq\{1\}$ and which does not possess the Kruglov
property.
\begin{ex} Let $\psi\in \Psi$ be given by the condition
$\psi(t):=\frac{1}{\log^{\frac12}(\frac1t)}$, $t\in
[0,e^{-\frac32}]$ and be linear on $ [e^{-\frac32},1]$. The space
$\Lambda(\psi)$ does not have the Kruglov property, however
$\Gamma_{\rm i}(\Lambda(\psi))\neq\{1\}$
\end{ex}
\begin{proof}
Since for every $k,l>1$ we have
$$\lim_{ur.~i.ghtarrow0}\frac{\psi(ku)}{\psi(u)}=
\lim_{ur.~i.ghtarrow0}(\frac{\log(u)}{\log(ku)})^{\frac12}=
1<k, \quad\lim_{ur.~i.ghtarrow0}\frac{\psi(u^l)}{\psi(u)}=
\lim_{ur.~i.ghtarrow0}(\frac{\log(u)}{\log(u^l)})^{\frac12}=\frac1{l^{\frac12}}<1$$
we see that $\Gamma_{\rm i}(\Lambda(\psi))\neq\{1\}$ by Corollary
9.
By \cite[Theorem 5.1]{AsSeSu2005} a Lorentz space
$\Lambda(\phi)$, $\phi\in \Psi$ has the Kruglov property if and
only if
$$\sup_{t>0}\frac1{\phi(t)}\sum_{n=1}^{\infty}\phi(\frac{t^n}{n!})<\infty.$$
In our case, for every fixed $t\leq e^{-\frac32}$
$$\sum_{n=1}^{\infty}\psi(\frac{t^n}{n!})=
\sum_{n=1}^{\infty}\frac1{(\log(n!)+n\log(\frac1t))^{\frac12}}=
\sum_{n=1}^{\infty}\frac1{(n\log(n)(1+o(1)))^{1/2}}=\infty.
$$
\end{proof}
\leftline{F. Sulochev}\leftline{School of Mathematics and Statistics}\leftline{University of New South Wales, Kensington NSW 2052}\leftline{Email Address:{\it f.sukochev@unsw.edu.au}}
\leftline {D. Zanin}\leftline {School of Computer Science, Engineering and Mathematics} \leftline {Flinders University,
Bedford Park, SA 5042, Australia}\leftline {Email Address: {\it
zani0005@csem.flinders.edu.au} }
\end{document} |
\begin{document}
\title[Continuation homomorphism in Rabinowitz Floer homology]
{Continuation homomorphism in Rabinowitz Floer homology for symplectic deformations}
\author{Youngjin Bae}
\author{Urs Frauenfelder}
\address{
Youngjin Bae\\
Department of Mathematics and Research Institute of Mathematics\\
Seoul National University}
\email{jini0919@snu.ac.kr}
\address{
Urs Frauenfelder\\
Department of Mathematics and Research Institute of Mathematics\\
Seoul National University}
\email{frauenf@snu.ac.kr}
\keywords{Floer homology, Rabinowitz Floer homology, Ma\~n\'e critical value, Isoperimetric inequality}
\begin{abstract}
Will J.\,Merry computed Rabinowitz Floer homology above Ma\~n\'e's
critical value in terms of loop space homology in \cite{Mer10} by
establishing an Abbondandolo-Schwarz short exact sequence. The
purpose of this article is to provide an alternative proof of
Merry's result. We construct a continuation homomorphism for
symplectic deformations which enables us to reduce the computation
to the untwisted case. Our construction takes advantage of a special
version of the isoperimetric inequality which above Ma\~n\'e's
critical value holds true.
\end{abstract}
\maketitle
\section{Introduction}
Rabinowitz Floer homology as introduced
in \cite{CF09} is the semi-infinite dimensional Morse homology
associated to Rabinowitz action functional. Critical points of
Rabinowitz action functional are Reeb orbits on a fixed energy
hypersurface of arbitrary period. Rabinowitz Floer homology vanishes
if the energy hypersurface is displaceable, however, we have the
following non-vanishing result.
\begin{Thm}[Abbondandolo-Schwarz \cite{AS09}, Cieliebak-Frauenfelder-Oancea \cite{CFO09}]\label{untwisted}
Assume $N$ is a closed manifold. Denote by $ST^*N$ the unit cotangent bundle of $N$
in the cotangent bundle $T^*N$ which is endowed with its canonical symplectic structure.
Then in degree $*\neq0,1$
\[
\mathbb{R}FHb_*(ST^*N,T^*N)=
\left\{
\begin{array}{ll}
\mathrm{H}_*(\mathscr{L}_N),&\text{if }\ *>1, \\
\mathrm{H}^{-*+1}(\mathscr{L}_N),&\text{if }\ *<0.
\end{array}
\right.
\]
If $e(T^*N)$ is the Euler characteristic of $T^*N$, then in degree 0
we have
\[
\mathbb{R}FH_0^c(ST^*N,T^*N)=
\left\{
\begin{array}{ll}
\mathrm{H}_0(\mathscr{L}^c_N)\varpiplus\mathrm{H}^1(\mathscr{L}^c_N),&\text{if }\ c\neq0, \\
\mathrm{H}_0(\mathscr{L}^0_N)\varpiplus\mathrm{H}^1(\mathscr{L}^0_N),&\text{if }\ c=0 \text{ and } e(T^*N)=0, \\
\mathrm{H}^1(\mathscr{L}^0_N), &\text{if }\ c=0 \text{ and } e(T^*N)\neq 0.
\end{array}
\right.
\]
In degree 1 we have
\[
\mathbb{R}FH_1^c(ST^*N,T^*N)=
\left\{
\begin{array}{ll}
\mathrm{H}_1(\mathscr{L}^c_N)\varpiplus\mathrm{H}^0(\mathscr{L}^c_N),&\text{if }\ c\neq0, \\
\mathrm{H}_1(\mathscr{L}^0_N)\varpiplus\mathrm{H}^0(\mathscr{L}^0_N),&\text{if }\ c=0 \text{ and } e(T^*N)=0, \\
\mathrm{H}_1(\mathscr{L}^0_N), &\text{if }\ c=0 \text{ and } e(T^*N)\neq 0.
\end{array}
\right.
\]
Here, $\mathscr{L}_N$ is the free loop space of $N$ and $\mathscr{L}^c_N$ is the
connected component of $\mathscr{L}_N$ of homotopy type $c$ and
$\mathbb{R}FH^c(ST^*N,T^*N)$ is the Rabinowitz Floer homology for the
Rabinowitz action functional restricted to $\mathscr{L}^c_{T^*N}$. Moreover,
all homology groups are taken with $\mathbb{Z}_2$-coefficients.
\end{Thm}
An interesting result of Will J. Merry tells us that this theorem
continuous to hold in the presence of a weakly exact magnetic field
for high enough energy levels. On the cotangent bundle $\tau:T^*N\to
N$ of a closed Riemannian manifold $(N,g)$, we consider an
autonomous Hamiltonian system defined by a convex Hamiltonian \begin{equation}n
H_U(q,p)=\frac{1}{2}|p|^2+U(q) \end{equation} and a twisted symplectic form
\begin{equation}n \varpimega_{\sigma}=\varpimega_0+\tau^*\sigma. \end{equation} Here $\varpimega_0=dp\mathfrak{w}edge dq$
is the canonical symplectic form in canonical coordinates $(q,p)$ on
$T^*N$, $|p|$ denotes the dual norm of a Riemannian metric $g$ on
$N$, $U:N\to\mathbb{R}$ is a smooth potential, and $\sigma$ is a closed
2-form on $N$. This Hamiltonian system describes the motion of a
particle on $N$ subject to the conservative force $-\nabla U$ and
the magnetic field $\sigma$. We call the symplectic manifold
$(T^*N,\varpimega_{\sigma})$ a {\em twisted cotangent bundle}.
In order to state Will J. Merry's results we need the term of {\em
Ma\~n\'e critical value}. Let $(\mathfrak{w}idetilde{N},\mathfrak{w}idetilde g)$ be the
universal cover of $(N,g)$. Let $\sigma\in\mathcal{O}mega^2(N)$ denote a closed
{\em weakly exact} 2-form, which means that the pullback
$\mathfrak{w}idetilde\sigma\in\mathcal{O}mega^2(\mathfrak{w}idetilde N)$ is exact.
\begin{Def}
Let $\sigma\in\mathcal{O}mega^2(N)$ be a closed weakly exact 2-form.
Then the {\em Ma\~n\'e critical value} is defined as
\[
c=c(g,\sigma,U):=\inf_{\theta\in\mathcal{P}_\sigma}\sup_{q\in\mathfrak{w}idetilde{N}}\mathfrak{w}idetilde{H}_U(q,\theta_q),
\]
where $\mathcal{P}_{\sigma}=\{\theta\in\mathcal{O}mega^1(\mathfrak{w}idetilde{N})\;
|\;d\theta=\mathfrak{w}idetilde\sigma\}$ and $\mathfrak{w}idetilde{H}_U$ is the lift of
$H_U$ to the universal cover.
\end{Def}
In this article, we restrict our attention to the case of $c<\infty$
i.e. $\mathfrak{w}idetilde{\sigma}\in\mathcal{O}mega^2(\mathfrak{w}idetilde{N})$ admits a bounded
primitive. For given $k\in\mathbb{R}$, we let $\mathscr{S}igma_k:=H^{-1}_U(k)\subset
T^*N$. Then the dynamics of the hypersurface $\mathscr{S}igma_k$ changes
dramatically when $k$ is passing through $c$. If $k>c$ then
$\mathscr{S}igma_k$ is {\em virtual restricted contact}, and Rabinowitz Floer
homology is well-defined. All these things are investigated in
\cite{CFP09}. The following theorem was conjectured in \cite{CFP09}
and proved in \cite{Mer10} by using {\em the Abbondandolo-Schwarz
short exact sequence}.
\begin{Thm}[Merry \cite{Mer10}]\label{twisted}
Under the above assumptions if $k>c(g,\sigma,U)$, then in degree
$*\neq 0,1$
\[
\mathbb{R}FHb_*(\mathscr{S}igma_k,T^*N,\varpimega_\sigma)=
\left\{
\begin{array}{ll}
\mathrm{H}_*(\mathscr{L}_N),&\text{if }\ *>1, \\
\mathrm{H}^{-*+1}(\mathscr{L}_N),&\text{if }\ *<0.
\end{array}
\right.
\]
In degree $0,1$ we have the same result as in Theorem
\ref{untwisted}.
\end{Thm}
The aim of this article is to give an alternative proof of the above
theorem by constructing an explicit isomorphism between
$\mathbb{R}FHb(\mathscr{S}igma_k,T^*N,\varpimegaega_0)$ and
$\mathbb{R}FHb(\mathscr{S}igma_k,T^*N,\varpimegaega_\sigma)$ and then use the untwisted
version, namely Theorem~\ref{untwisted}. The explicit isomorphism is
given by the continuation homomorphism for the symplectic
deformation $r \mapsto \varpimegaega_{r\sigma}$ with $r \in [0,1]$. For the
following theorem note that $c(g,\sigma,U){\mathfrak g}eq c(g,0,U)=\max U$.
Hence if $k>c(g,\sigma,U)$, then the Rabinowitz Floer homology for
$\mathscr{S}igma_k$ is defined and coincides with the one from
Theorem~\ref{untwisted}.
\begin{Thm}\label{thm:rfhcon}
Under the above assumptions, if $k>c(g,\sigma,U){\mathfrak g}eq c(g,0,U)=\max
U$ and $\varpimega_0,\varpimega_{\sigma}\in\mathcal{O}mega^{\mathcal{M}p}_{\rm reg}(\mathscr{S}igma_k)$ then
there is a continuation map
\[
\mathcal{P}si_{\varpimega_0 *}^{\varpimega_{\sigma}}:\mathbb{R}FCb_*(\mathscr{S}igma_k,\varpimega_0)\to\mathbb{R}FCb_*(\mathscr{S}igma_k,\varpimega_{\sigma})
\]
which induces an isomorphism
\[
\mathfrak{w}idetilde{\mathcal{P}si_{\varpimega_0}^{\varpimega_{\sigma}}}_*:\mathbb{R}FHb_*(\mathscr{S}igma_k,\varpimega_0)\to\mathbb{R}FHb_*(\mathscr{S}igma_k,\varpimega_{\sigma}).
\]
\end{Thm}
One of our motivation for considering an alternative proof of
Merry's result is that the continuation homomorphism can be used to
compare spectral invariants between two different magnetic fields,
we refer to \cite{AF10} for a discussion of spectral invariants in
Rabinowitz Floer homology. We plan to discuss this in more detail in
a further paper.
The question of invariance under symplectic perturbation is also an
important issue in symplectic homology, we refer to the paper by
Ritter \cite{Rit}. In view of the long exact sequence between
symplectic homology and Rabinowitz Floer homology established in
\cite{CFO09} we expect interesting interactions of this paper with
the approach followed by Ritter.
\\ \\
\emph{Acknowledgement: } The authors were supported by the Basic
research fund 2010-0007669 funded by the Korean government.
We thank to J.-C. Sikorav for helpful comments
including Example \ref{ex:sol}.
\section{Continuation homomorphism in Morse and Floer homology}
\subsection{Morse homology}
Let $(M,g)$ be a closed Riemannian manifold and $f:M\to\mathbb{R}$ a Morse
function. We recall that the Morse chain complex $\mathbb{C}M_*(f)$ is the
graded $\mathbb{Z}_2$-vector space generated by the set $\mathbb{C}rit(f)$ of
critical points of $f$. The grading is given by the Morse index
$\mu=\mu_{\rm Morse}$ of $f$. The boundary operator
\[
{p_{\alpha}}rtial:\mathbb{C}M_*(f)\to\mathbb{C}M_{*-1}(f)
\]
is defined on generators by counting gradient flow lines.
Indeed assume that a Riemannian metric $g$ on $M$ satisfies the following transversality condition.
Stable and unstable manifolds with respect to the negative gradient flow of $\nabla f=\nabla^g f$ intersect transversally,
that is, $W^s(x){p_{\alpha}}rtialitchfork W^u(y)$ for all $x,y\in\mathbb{C}rit(f)$.
Then the moduli space
\[
\mathfrak{w}idehat{\mathcal{M}}(x_-,x_+):=\{x:\mathbb{R}\to M\ |\ {p_{\alpha}}rtial_sx(s)+\nabla f(x(s))=0,\ \lim_{s\to{p_{\alpha}}rtialm\infty}x(s)=x_{{p_{\alpha}}rtialm}\}
\]
is a smooth manifold of dimension $\dim\mathfrak{w}idehat{\mathcal{M}}(x_-,x_+)=\mu(x_-)-\mu(x_+)$.
Moreover, $\mathbb{R}$ acts by shifting the $s$-coordinate.
If $x_-\neq x_+$, the action is free and we denote the quotient by
\[
\mathcal{M}(x_-,x_+):=\mathfrak{w}idehat{\mathcal{M}}(x_-,x_+)/\mathbb{R}.
\]
Moreover, if $\mu(x_-)-\mu(x_+)=1$ then $\mathcal{M}(x_-,x_+)$ is a finite set.
Then we can define the differential ${p_{\alpha}}rtial={p_{\alpha}}rtial(f,g)$ as a linear map which is given on generators by
\[
{p_{\alpha}}rtial x_-:=\sum_{x_+\in\mathbb{C}rit(f)\atop\mu(x_-)-\mu(x_+)=1}\#_2\mathcal{M}(x_-,x_+)x_+,
\]
where, $\#_2$ denotes the count of a set modulo 2. It is a deep
theorem in Morse homology that the identity
\[
{p_{\alpha}}rtial\circ{p_{\alpha}}rtial=0
\]
holds, see \cite{Sch93} for details. Then
\[
\mathrm{HM}_*(f,g):=\mathrm{H}_*(\mathbb{C}M_\bullet(f),{p_{\alpha}}rtial(f,g))
\]
is the Morse homology for the pair $(f,g)$.
Moreover, $\mathrm{HM}_*(f,g)$ equals the singular homology $\mathrm{H}_*(M)$ of $M$.
In particular, $\mathrm{HM}(f,g)$ is independent of the choice of Morse-Smale pair $(f,g)$.
The independence of $\mathrm{HM}(f,g)$ of Morse-Smale pair $(f,g)$ can be shown directly using the continuation homomorphism
which is constructed in the following way.
For two Morse-Smale pairs $(f_{p_{\alpha}}rtialm,g_{p_{\alpha}}rtialm)$ we choose $T>0$ and a smooth family $\{f_s,g_s\}_{s\in\mathbb{R}}$
of functions $f_s:M\to\mathbb{R}$ and Riemannian metrics $g_s$ such that
\begin{equation}\begin{aligned}n
f_s=
\left\{
\begin{array}{l}
f_-\ \text{for} \ s\leq -T \\
f_+\ \text{for} \ s{\mathfrak g}eq T
\end{array}
\right.
\qquad
g_s=
\left\{
\begin{array}{l}
g_-\ \text{for} \ s\leq -T \\
g_+\ \text{for} \ s{\mathfrak g}eq T .
\end{array}
\right.
\end{aligned}\end{equation}
For critical points $x_{p_{\alpha}}rtialm\in\mathbb{C}rit(f_{p_{\alpha}}rtialm)$, we consider the moduli space
\[
\mathcal N(x_-,x_+)=\mathcal N(x_-,x_+;f_s,g_s):=\{x:\mathbb{R}\to M\ |\ {p_{\alpha}}rtial_sx(s)+\nabla^{g_s}f_s(x(s))=0,\ \lim_{s\to{p_{\alpha}}rtialm\infty}x(s)=x_{p_{\alpha}}rtialm\}.
\]
A homotopy $(f_s,g_s)$ is called regular if the Fredholm operator
obtained by linearizing the gradient flow equation is onto. In
particular, for a regular homotopy the moduli space $\mathcal
N(x_-,x_+)$ is a smooth manifold of dimension $\dim\mathcal
N(x_-,x_+)=\mu(x_-)-\mu(x_+)$. A generic homotopy is regular.
Moreover, in the special case $f_s=f_-=f_+$ and $g_s=g_-=g_+$ we
have the identity \begin{equation}\label{eqn:NM} \mathcal
N(x_-,x_+)=\mathfrak{w}idehat{\mathcal{M}}(x_-,x_+). \end{equation} If $\mu(x_-)-\mu(x_+)=0$ the
space $\mathcal N(x_-,x_+)$ is compact. In order to verify that we
need to prove a uniform energy bound of $x\in\mathcal N(x_-,x_+)$ as
follows \begin{equation}\begin{aligned}\label{eqn:Meb}
E(x)&=E_{g_s}(x)={\int_{-\infty}^{\infty}}\|{p_{\alpha}}rtial_s x(s)\|_{g_s}^2ds \\
&=-{\int_{-\infty}^{\infty}}\langle\nabla_{g_s}f_s(x(s)),{p_{\alpha}}rtial_s x(s)\rangle_{g_s}ds \\
&=-{\int_{-\infty}^{\infty}} df_s(x(s)){p_{\alpha}}rtial_s xds \\
&=-{\int_{-\infty}^{\infty}}\frac{d}{ds}f_s(x(s))ds+{\int_{-\infty}^{\infty}}\dot f_s(x(s))ds \\
&\leq\|f_-\|_{\infty}+\|f_+\|_{\infty}+2T\|\dot f_s\|_{\infty}.
\end{aligned}\end{equation}
Then we define a linear map
\begin{equation}\begin{aligned}n
Z=Z(f_s,g_s):\mathbb{C}M_*(f_-)&\to \mathbb{C}M_*(f_+) \\
x_-&\mapsto\sum_{x_+\in\mathbb{C}rit(f_+)\atop\mu(x_-)=\mu(x_+)}\#_2\mathcal N(x_-,x_+)x_+.
\end{aligned}\end{equation}
We denote ${p_{\alpha}}rtial_{p_{\alpha}}rtialm:={p_{\alpha}}rtial(f_{p_{\alpha}}rtialm,g_{p_{\alpha}}rtialm)$.
In the same manner as ${p_{\alpha}}rtial\circ{p_{\alpha}}rtial=0$, one proves in Morse homology
\[
Z\circ{p_{\alpha}}rtial_-={p_{\alpha}}rtial_+\circ Z,
\]
see \cite{Sch93}. In particular, on homology level we obtain the map
\[
\mathfrak{w}idetilde Z:\mathrm{HM}_*(f_-,g_-)\to\mathrm{HM}_*(f_+,g_+)
\]
which is called the continuation homomorphism.
By a homotopy-of-homotopies argument, it is proved that
$\mathfrak{w}idetilde Z$ is independent of the chosen homotopy $(f_s,g_s)$, see \cite{Sch93}.
Moreover, the continuation homomorphism is functorial in the following sense.
If we fix three Morse-Smale pairs $(f_a,g_a),\ (f_b,g_b)$, and $(f_c,g_c)$
we denote the corresponding continuation homomorphisms by $\mathfrak{w}idetilde Z_a^b:\mathrm{HM}_*(f_a,g_a)\to\mathrm{HM}_*(f_b,g_b)$
and similarly $\mathfrak{w}idetilde Z_a^c$ and $\mathfrak{w}idetilde Z_b^c$.
Then we have the following identity
\[
\mathfrak{w}idetilde Z_a^c=\mathfrak{w}idetilde Z_b^c\circ\mathfrak{w}idetilde Z_a^b.
\]
Now consider the case $f_s=f_a$ and $g_s=g_a$. By (\ref{eqn:NM}), we
get $\#_2\mathcal N(x_-,x_+)=1$ if $x_-=x_+$ and $\#_2\mathcal
N(x_-,x_+)=0$ for the other cases. Hence we obtain the following
identity
\[
\mathfrak{w}idetilde Z_a^a=\mathrm{id}_{\mathrm{HM}_*(f_a,g_a)}.
\]
In particular, we conclude that $\mathfrak{w}idetilde Z_a^b$ is an isomorphism with inverse $\mathfrak{w}idetilde Z_b^a$.
\subsection{Morse-Bott homology}\label{sec:MBh}
Let $M$ be a compact manifold and $(f,h,g,g^0)$ be a {\em Morse-Bott
quadruple}. The Morse-Bott quadruple consists of a Morse-Bott
function $f$ on M, a Morse function $h$ on $\mathbb{C}rit(f)$, a Riemannian
metric $g$ on $M$ and a Riemannian metric $g^0$ on $\mathbb{C}rit(f)$. We
assume that $(h,g^0)$ satisfies the Morse-Smale condition, i.e.
stable and unstable manifolds intersect transversally. For a
critical point $c$ on $h$, let $\mathrm{ind\,}_f(c)$ be the number of negative
eigenvalues of $\mathrm{Hess}(f)(c)$ and $\mathrm{ind\,}_h(c)$ be the number of
negative eigenvalues of $\mathrm{Hess}(h)(c)$. We define
\[
\mathrm{ind\,}(c):=\mathrm{ind\,}_{f,h}(c):=\mathrm{ind\,}_f(c)+\mathrm{ind\,}_h(c).
\]
\begin{Def}
For $c_1,c_2\in\mathbb{C}rit(h)$, and $m\in\mathbb{N}$ a {\em flow line from $c_1$
to $c_2$ with $m$ cascades}
\[
(x,T)=((x_k)_{1\leq k\leq m},(t_k)_{1\leq k \leq m-1})
\]
consist of $x_k\in C^\infty(\mathbb{R},M)$ and $t_k\in\mathbb{R}_{\mathfrak g}eq:=\{r\in\mathbb{R}:r{\mathfrak g}eq0\}$ which satisfy the following conditions:
\begin{enumerate}
\item $x_k\in C^\infty(\mathbb{R},M)$ are nonconstant solutions of
\[
\dot x_k=-\nabla f(x_k).
\]
\item There exists $p\in W^u_h(c_1)$ and $q\in W^s_h(c_2)$ such that
\[
\lim_{s\to-\infty}x_1(s)=p \text{ and } \lim_{s\to\infty}x_m(s)=q.
\]
\item For $1\leq k\leq m-1$ there are Morse flow lines $y_k\in C^\infty(\mathbb{R},\mathbb{C}rit(f))$ of $h$,
i.e. solutions of
\[
\dot y_k=-\nabla h(y_k),
\]
such that
\[
\lim_{s\to\infty}x_k(s)=y_k(0),\qquad \lim_{s\to-\infty}x_{k+1}(s)=y_k(t_k).
\]
\end{enumerate}
A \emph{flow line with zero cascades} is just an ordinary Morse flow
line from $c_1$ to $c_2$.
\end{Def}
\begin{figure}
\caption{A flow line with cascades}
\end{figure}
We denote the space of flow lines with $m$ cascades from $c_1$ to $c_2\in\mathbb{C}rit(h)$ by
\[
\mathfrak{w}idetilde\mathcal{M}_m(c_1,c_2).
\]
If $m{\mathfrak g}eq1$ then the group $\mathbb{R}^m$ acts on $\mathfrak{w}idetilde\mathcal{M}_m(c_1,c_2)$ by time shift on each cascade, i.e.
\[
x_k(s)\mapsto x_k(s+s_k).
\]
In the case of zero cascades $m=0$, the group $\mathbb{R}$ still acts on $\mathfrak{w}idetilde\mathcal{M}_0(c_1,c_2)$ by time shifting.
We denote the quotient by
\[
\mathcal{M}_m(c_1,c_2).
\]
We define the {\em set of flow lines with cascades from $c_1$ to $c_2$} by
\[
\mathcal{M}(c_1,c_2):=\bigcup_{m\in\mathbb{N}_0}\mathcal{M}_m(c_1,c_2).
\]
For a pair $(f,h)$ consisting of a Morse-Bott function $f$ on $M$ and
a Morse function $h$ on $\mathbb{C}rit(f)$, we define the chain complex $\mathbb{C}M_*(f,h)$
as the $\mathbb{Z}_2$-vector space generated by the critical points of $h$ graded by the index.
More precisely, $\mathbb{C}M_k(f,h)$ are formal sums of the form
\[
\xi=\sum_{c\in\mathbb{C}rit(h)\atop\mathrm{ind\,}(c)=k}\xi_cc
\]
with $\xi_c\in\mathbb{Z}_2$. For generic choice of the Riemannian metric $g$ on $M$,
the moduli spaces of flow lines with
cascades $\mathcal{M}(c_1,c_2)$ is a smooth manifold of dimension
\[
\dim\mathcal{M}(c_1,c_2)=\mathrm{ind\,}(c_1)-\mathrm{ind\,}(c_2)-1.
\]
If $\dim\mathcal{M}(c_1,c_2)=0$, then $\mathcal{M}(c_1,c_2)$ is finite. We define the boundary operator
\[
{p_{\alpha}}rtial_k:\mathbb{C}M_k(f,h)\to\mathbb{C}M_{k-1}(f,h)
\]
as the linear extension of
\[
{p_{\alpha}}rtial_kc=\sum_{\mathrm{ind\,}(c')=k-1}\#_2\mathcal{M}(c,c')c'
\]
for $c\in\mathbb{C}rit(h)$ with $\mathrm{ind\,}(c)=k$. The usual gluing and compactness arguments imply that
\[
{p_{\alpha}}rtial\circ{p_{\alpha}}rtial=0.
\]
This defines homology groups
\[
\mathrm{HM}_*(f,h,g,g^0):=\mathrm{H}_*(\mathbb{C}M_\bullet(f,h),{p_{\alpha}}rtial(f,h,g,g^0)).
\]
In the Morse-Bott situation, we can also show that the Morse-Bott
homology is independent of the choice of the Morse-Bott quadruple.
First take two regular quadruples $(f_-,h_-,g_-,g^0_-)$ and
$(f_+,h_+,g_+,g^0_+)$. Choose a smooth family of interpolations
$\{(f_s,g_s)\}_{s\in\mathbb{R}}$ such that \begin{equation}\begin{aligned}n f_s= \left\{
\begin{array}{l}
f_-\ \text{for} \ s\leq -T \\
f_+\ \text{for} \ s{\mathfrak g}eq T
\end{array}
\right.
\qquad
g_s=
\left\{
\begin{array}{l}
g_-\ \text{for} \ s\leq -T \\
g_+\ \text{for} \ s{\mathfrak g}eq T .
\end{array}
\right.
\end{aligned}\end{equation}
For $c_1\in\mathbb{C}rit(h_-)$, $c_2\in\mathbb{C}rit(h_+)$,
we consider the following flow lines from $c_1$ to $c_2$ with $m$ cascades
\[
(x,T)=((x_k)_{1\leq k\leq m},(t_k)_{1\leq k\leq m-1})
\]
for $x_k\in C^\infty(\mathbb{R},M)$ and $t_k\in\mathbb{R}_{\mathfrak g}eq$ which satisfy the following conditions:
\begin{enumerate}
\item $x_k$ are solutions of
\[
\dot x_k(s)=-\nabla_{\mathfrak{w}idetilde g_{k}}\mathfrak{w}idetilde f_k(x_k),
\]
where for some $m_1 \in \{1,\ldots,m\}$ \begin{equation}n \mathfrak{w}idetilde f_k=
\left\{
\begin{array}{ccc}
f_-&\text{ for }&1\leq k\leq m_1-1 \\
f_s&\text{ for }& k=m_1 \\
f_+&\text{ for }& m_1+1\leq k\leq m
\end{array}
\right.
\end{equation}
and
\begin{equation}n
\mathfrak{w}idetilde g_k=
\left\{
\begin{array}{ccc}
g_-&\text{ for }&1\leq k\leq m_1-1 \\
g_s&\text{ for }& k=m_1 \\
g_+&\text{ for }& m_1+1\leq k\leq m.
\end{array}
\right. \end{equation} Moreover, for $k \neq m_1$ the cascade $x_k$ is
nonconstant.
\item There exists $p_1\in W^u_{h_-}(c_1)$ and $p_2\in W^s_{h_+}(c_2)$ such that
$\lim_{s\to-\infty}x_1(s)=p_1$ and $\lim_{s\to\infty}x_m(s)=p_2$.
\item
For $1\leq k\leq m-1$, $y_k$ are Morse flow lines of $\mathfrak{w}idetilde h$, i.e. solutions of
\[
\dot y_k(s)=-\nabla_{\mathfrak{w}idetilde g^0_{k}}\mathfrak{w}idetilde h_k(y_k),
\]
and
\[
\lim_{s\to\infty}x_k(s)=y_k(0),\qquad \lim_{s\to-\infty}x_{k+1}(s)=y_k(t_k)
\]
where
\begin{equation}n
\mathfrak{w}idetilde h_k=
\left\{
\begin{array}{ccc}
h_-&\text{ for }&1\leq k\leq m_1-1 \\
h_+&\text{ for }& m_1\leq k\leq m-1
\end{array}
\right.
\end{equation}
and
\begin{equation}n
\mathfrak{w}idetilde g^0_k=
\left\{
\begin{array}{ccc}
g^0_-&\text{ for }&1\leq k\leq m_1-1 \\
g^0_+&\text{ for }& m_1\leq k\leq m-1.
\end{array}
\right.
\end{equation}
\end{enumerate}
For a generic choice of the data, the space of solutions of (1) to
(3) is a smooth manifold whose dimension is given by the difference
of the indices of $c_1$ and $c_2$. If $\mathrm{ind\,}(c_1)=\mathrm{ind\,}(c_2)$ then
this manifold is compact. In order to verify this we need to prove a
uniform energy bound of time-dependent cascades as in the Morse
case. Since a cascade consists of several negative gradient flow
lines $(x_k)_{1\leq k\leq m},(y_k)_{1\leq k\leq m-1}$, it suffices
to show that the energy of each (time-dependent) gradient flow line
are uniformly bounded. This is guaranteed by the argument of
(\ref{eqn:Meb}) in the Morse situation.
We define a map
\[
Z=Z(\mathfrak{w}idetilde f,\mathfrak{w}idetilde h,\mathfrak{w}idetilde g,\mathfrak{w}idetilde g^0):\mathbb{C}M_*(f_-,h_-)\to\mathbb{C}M_*(f_+,h_+)
\]
as the linear extension of
\[
Zc_-=\sum_{c_+\in\mathbb{C}rit(h_+)\atop\mathrm{ind\,}(c_+)=\mathrm{ind\,}(c_-)}\#_2\mathcal{M}(c_-,c_+)c_+
\]
where $c_-\in\mathbb{C}rit(h_-)$. Standard arguments as in the Morse case
show that $Z$ induces isomorphisms on homologies
\[
\mathfrak{w}idetilde Z:\mathrm{HM}_*(f_-,h_-,g_-,g^0_-)\to\mathrm{HM}_*(f_+,h_+,g_+,g^0_+).
\]
This proves that Morse-Bott homology is independent of the choice of
a Morse-Bott quadruple. We refer to Appendix A in \cite{Fra04}, for
details.
\subsection{Floer homology for Hamiltonian deformation}
Let $(M,\varpimega)$ be a symplectically aspherical closed $2n$-dimensional manifold
which means that $\varpimega|_{{p_{\alpha}}rtiali_2(M)}\equiv 0$.
Let $H:S^1\times M\to\mathbb{R}$ be a time-dependent Hamiltonian on $M$ and $H_t=H(t,\cdot)\in C^\infty(M,\mathbb{R})$.
The {\em Hamiltonian vector field} $X_{H_t}$ is defined by
\[dH_t = -\iota_{X_{H_t}}\varpimega.\]
An almost complex structure $J_t$ on $M$ is $\varpimega$-{\em compatible}
if $\langle\cdot,\cdot \rangle :=\varpimega(\cdot,J_t\cdot)$ is a Riemannian metric $\forall t\in S^1$.
Let $\mathscr{L}^0$ be the component of contractible loops on $M$.
The {\em Hamiltonian action} is
\[
\mathcal{A}_H:\mathscr{L}^0\to\mathbb{R}
\]
\[\mathcal{A}_H(x):=\int_{\mathbb{D}^2} \varpiverline{x}^*\varpimega-\int_0^1H(t,x(t))dt,\]
where $\varpiverline{x}$ is an extension of $x$ to the unit disk. Since
we consider only contractible loops such an extension exists and
because $\varpimega|_{{p_{\alpha}}rtiali_2(M)}=0$ the action functional does not depend on
the choice of the filling disk. A positive gradient flow line
$v:\mathbb{R}\times S^1 \to M$ of $\mathcal{A}_H$ satisfies the perturbed
Cauchy-Riemann equation \begin{equation}\label{eq:grad}
{p_{\alpha}}rtial_sv+J(t,v)({p_{\alpha}}rtial_tv-X_H(t,v))=0. \end{equation} Formally a positive gradient
flow line $v\in$``$C^\infty(\mathbb{R},\mathscr{L}^0)$'' is a solution of the ``ODE''
\[
{p_{\alpha}}rtial_sv-\nabla\mathcal{A}_H(v(s))=0.
\]
According to Floer, we interpret this as a solution of the PDE,
$v\in C^\infty(\mathbb{R}\times S^1,M)$ satisfying (\ref{eq:grad}).
\subsubsection{Sign and grading conventions}
The {\em Conley-Zehnder index} $\mathbb{C}Z(x;\tau) \in \mathbb{Z}$ of a nondegenerate 1-periodic orbit $x$ of $X_H$ with respect to a
symplectic trivialization $\tau : x^*TM \to S^1 \times \mathbb{R}^{2n}$ is defined as follows. The linearized Hamiltonian flow
along $x$ with $\tau$ defines a path of symplectic matrices $\mathcal{P}hi_t,\ t \in [0,1]$, with $\mathcal{P}hi_0=\mathrm{id}$ and $\mathcal{P}hi_1$ not having 1
as its spectrum. Then $\mathbb{C}Z(x;\tau)$ is the Maslov index of the path $\mathcal{P}hi_t$ as defined in \cite{RS,Sal}.
For a critical point $x$ of a $C^2$-small Morse function $H$ the Conley-Zehnder index
with respect to the constant trivialization $\tau$ is related to the Morse index by
\[\mathbb{C}Z(x;\tau)=n-\mu_{\rm Morse}(x).\]
We have the following identity
\[\mathbb{C}Z(x;\tau')=\mathbb{C}Z(x;\tau)-2c_1([\tau' \# \varpiverline{\tau}]),\]
where $c_1$ is first Chern class and $\varpiverline{\tau}$ means
opposite orientation of $\tau$. If $c_1(M)=0$ we obtain integer
valued Conley-Zehnder indices for all 1-periodic orbits. Without any
hypothesis on $c_1(M)$ we still have well-defined Conley-Zehnder
indices in $\mathbb{Z}_2$ and all the following result hold with respect to
this $\mathbb{Z}_2$-grading.
\subsubsection{Floer homology}
Let $\mathcal{P}(H)$ be the set of 1-periodic orbits of $X_H$. Given
$x_{p_{\alpha}}rtialm\in\mathcal{P}(H)$ we denote by $\mathfrak{w}idehat{\mathcal{M}}(x_-,x_+)$ the space of
solutions of (\ref{eq:grad}) with $\lim_{s \to {p_{\alpha}}rtialm
\infty}v(s,t)=x_{{p_{\alpha}}rtialm}(t)$. Its quotient by the $\mathbb{R}$-action
$s_0\cdot(s,t):=(s+s_0,t)$ on the cylinder is called the {\em moduli
space of Floer trajectories} and is denoted by
\[\mathcal{M}(x_-,x_+):=\mathfrak{w}idehat{\mathcal{M}}(x_-,x_+)/\mathbb{R}\]
Assume now that all elements of $\mathcal{P}(H)$ are nondegenerate.
Suppose further that the almost complex structure $J=(J_t),\;t\in S^1$ is generic,
so that $\mathcal{M}(x_-,x_+)$ is a smooth manifold of dimension
\[\dim\mathcal{M}(x_-,x_+)=\mathbb{C}Z(x_-)-\mathbb{C}Z(x_+)-1 .\]
The boundary operator ${p_{\alpha}}rtial_k:\mathbb{C}F_k(H)\to\mathbb{C}F_{k-1}(H)$ is defined by
\[{p_{\alpha}}rtial x:=\sum_{\mathbb{C}Z(y)=k-1}\#_2\mathcal{M}(x,y)y.\]
It increases the action and satisfies ${p_{\alpha}}rtial\circ{p_{\alpha}}rtial=0$.
Hence we can define Floer homology
\[
\mathcal{F}H_*(H)=\mathrm{H}_*(\mathbb{C}F_\bullet(H),{p_{\alpha}}rtial).
\]
Note that $(\mathbb{C}F_*(H),{p_{\alpha}}rtial)$ depends on additional data, namely the Hamiltonian $H$, the symplectic structure $\varpimega$,
and the almost complex structure $J_t$.
\subsubsection{Continuation map}
Floer proved that $\mathcal{F}H_*(H)$ depends only on the underlying manifold
$M$, see \cite{Fl1,Fl2,Fl3}. We now give a proof of Floer's theorem
via Morse-Bott methods which is due to Piunikhin, Salamon and
Schwarz \cite{PSS}. Take two different time-dependent Hamiltonians
$H_-,H_+\in C^\infty(S^1\times M)$, and choose $T>0$ and a smooth
family of Hamiltonians $H_s:S^1\times M\to\mathbb{R}$ with $s\in\mathbb{R}$ such
that \begin{equation}n H_s= \left\{
\begin{array}{l}
H_-\ \text{for} \ s\leq -T \\
H_+\ \text{for} \ s{\mathfrak g}eq T .
\end{array}
\right.
\end{equation}
Now take two different almost complex structures $J_{t,-},J_{t,+}$,
and a smooth family of almost complex structures $J_{t,s}$ such that
\begin{equation}n
J_{t,s}=
\left\{
\begin{array}{l}
J_{t,-}\ \text{for} \ s\leq -T \\
J_{t,+}\ \text{for} \ s{\mathfrak g}eq T .
\end{array}
\right.
\end{equation}
The continuation map between two different time-dependent Hamiltonian
\[
\zeta_{H_-}^{H_+}:\mathbb{C}F_*(H_-)\to\mathbb{C}F_*(H_+)
\]
is given by counting positive gradient flow lines $v\in$``$C^\infty(\mathbb{R},\mathscr{L}^0)$'' of
\[
\mathcal{A}_{H_s}(x)=\int_{\mathbb{D}^2}\varpiverline{x}^*\varpimega-\int_0^1 H_s(t,x(t))dt
\]
where, $v\in C^\infty(\mathbb{R}\times S^1,M)$ is a solution of
\begin{equation}\label{eqn:fhgrad}
\left.
\begin{array}{r}
{p_{\alpha}}rtial_sv+J_{t,s}(v)({p_{\alpha}}rtial_t v-X_{H_s}(v))=0 \\
\lim_{s\to{p_{\alpha}}rtialm\infty}v(s)=v_{p_{\alpha}}rtialm\in\mathbb{C}rit\mathcal{A}_{H_{p_{\alpha}}rtialm}.
\end{array}
\right\}
\end{equation}
For critical points $v_{p_{\alpha}}rtialm\in\mathbb{C}rit\mathcal{A}_{H_{p_{\alpha}}rtialm}$, we consider the moduli spaces
\[
\mathcal N_{H_{p_{\alpha}}rtialm}(v_-,v_+)=\mathcal N_{H_{p_{\alpha}}rtialm}(v_-,v_+;H_s)=\{v:\mathbb{R}\times S^1\to M\ |\ v\text{ satisfies }(\ref{eqn:fhgrad})\}.
\]
If $\mathbb{C}Z(v_-)=\mathbb{C}Z(v_+)$ the space $\mathcal N_{H_{p_{\alpha}}rtialm}(v_-,v_+)$ is
compact. A crucial ingredient for the compactness proof is, as in
the Morse case, a uniform energy bound for $v\in\mathcal
N_{H_{p_{\alpha}}rtialm}(v_-,v_+)$, see \cite{Sal} for details. The uniform energy
bound is given by \begin{equation}\begin{aligned}n
E(v)&=E_{J_{t,s}}(v)={\int_{-\infty}^{\infty}}\|{p_{\alpha}}rtial_s v(s)\|_{J_{t,s}}^2ds \\
&={\int_{-\infty}^{\infty}}\langle\nabla^{J_{t,s}}\mathcal{A}_{H_s}(v(s)),{p_{\alpha}}rtial_s v(s)\rangle_{J_{t,s}} ds \\
&={\int_{-\infty}^{\infty}}\frac{d}{ds}\mathcal{A}_{H_s}(v(s))ds-{\int_{-\infty}^{\infty}}\dot\mathcal{A}_{H_s}(v(s))ds \\
&=\mathcal{A}_{H_+}(v_+)-\mathcal{A}_{H_-}(v_-)-{\int_{-\infty}^{\infty}}\int_0^1\dot H_s(t,v(s,t))dtds \\
&\leq\mathcal{A}_{H_+}(v_+)-\mathcal{A}_{H_-}(v_-)+2T\max_{s\in[-T,T]\atop (t,x)\in S^1\times M}|\dot H_s(t,x)| ,
\end{aligned}\end{equation}
where $\|\cdot\|_{J_{t,s}}$ is given by $\int_0^1\varpimega(\cdot,J_{t,s}\cdot)dt$.
Then we can define a linear map
\begin{equation}\begin{aligned}n
\zeta_{H_-}^{H_+}=\zeta_{H_-}^{H_+}(H_s):\mathbb{C}F_*(H_-)&\to\mathbb{C}F_*(H_+) \\
v_-&\mapsto\sum_{v_+\in\mathbb{C}rit\mathcal{A}_{H_+}\atop\mathbb{C}Z(v_-)=\mathbb{C}Z(v_+)}\#_2\mathcal
N_{H_{p_{\alpha}}rtialm}(v_-,v_+)v_+ \end{aligned}\end{equation} which induces a homomorphism on homology
level,
\[
\mathfrak{w}idetilde\zeta_{H_-}^{H_+}:\mathcal{F}H_*(H_-)\to\mathcal{F}H_*(H_+).
\]
The resulting homomorphism is independent of the choice of the homotopy $H_s$ and $J_{t,s}$
by a homotopy-of-homotopies argument, similar as in the Morse situation.
By functoriality, we conclude that $\mathfrak{w}idetilde\zeta_{H_-}^{H_+}$ is an isomorphism with inverse $\mathfrak{w}idetilde\zeta_{H_+}^{H_-}$.
Now consider the special case, where Hamiltonian $H\equiv 0$ is the zero Hamiltonian.
Then
\[
\mathcal{A}_H(x)=\mathcal{A}_0(x)=\int_{\mathbb{D}^2}\varpiverline x^*\varpimega
\]
is the symplectic area functional which is Morse-Bott and
\[
\mathbb{C}rit\mathcal{A}_0=\{x\in\mathscr{L}^0\ |\ x\text{ is a constant loop}\}\cong M.
\]
This implies that
\[
\mathcal{F}H_*(0,f)=\mathrm{HM}_*(f)\cong \mathrm{H}_*(M),
\]
where $f:M\to\mathbb{R}$ is an additional Morse function on the critical manifold $\mathbb{C}rit\mathcal{A}_0\cong M$.
Note that $\mathrm{H}_*(M)$ is the singular homology of $M$ which only depends on $M$.
Hence we conclude that Floer homology does not depend on additional structures like $\varpimega,H$, and $J_t$.
\subsection{Floer homology for symplectic deformation}
In the previous subsection, we have seen that Floer homology is
independent of the symplectic structure. In this subsection, we ask
if this fact can also be seen directly by constructing a
continuation homomorphism between two symplectic forms. So far we
can only construct the continuation homomorphism for symplectic
deformations under additional assumptions on the symplectic
structures. Different from the case of Hamiltonian deformations, it
might be necessary to subdivide the symplectic deformations in a
sequence of small {\em adiabatic} steps.
Let $(M,g)$ be a $2n$-dimensional closed Riemannian manifold with two symplectic forms $\varpimega_0$, $\varpimega_1$.
Suppose that $(M,\varpimega_s)$ is a family of symplectically aspherical closed manifolds,
where $\varpimega_s=s\varpimega_1+(1-s)\varpimega_0$ for $s\in[0,1]$.
Then we want to construct a continuation map
\begin{equation}n
\mathcal{P}si_{\varpimega_0*}^{\varpimega_1}:\mathbb{C}F_{*}(\varpimega_0)\to\mathbb{C}F_{*}(\varpimega_1)
\end{equation}
which induces an isomorphism on homology level.
In order to state and prove our result we need the term of the {\em cofilling function}.
\begin{Def}[Gromov \cite{Gro1}, Polterovich \cite{Pol}]\label{def:cofilling}
Let $\sigma\in\mathcal{O}mega^2(M)$ be a closed weakly exact 2-form, then the {\em cofilling function} is
\[
u_{\sigma}(s):[0,\infty)\to[0,\infty)
\]
\[
u_{\sigma}(s)=u_{\sigma,g,x}(s)=\inf_{\theta\in\mathcal{P}_\sigma}\sup_{z\in
B_x(s)}|\theta_z|_{\mathfrak{w}idetilde{g}},
\]
where $\mathcal{P}_{\sigma}=\{\theta\in\mathcal{O}mega^1(\mathfrak{w}idetilde{M})\;
|\;d\theta=\mathfrak{w}idetilde\sigma\}$ is the space of primitives for
$\sigma$ and $B_x(s)$ be the $s$-ball centered at
$x\in\mathfrak{w}idetilde{M}$.
\end{Def}
\begin{Rmk}\label{rmk:diffmetric}
If we choose another Riemannian metric $g'$ on $M$ and a different
base point $x'\in\mathfrak{w}idetilde{M}$ then we can check that
$u_{\sigma,g,x}\sim u_{\sigma,g',x'}$. \footnote{ $f\sim g \iff
f\lesssim g$ and $\ g\lesssim f$,\qquad $f\lesssim g \iff \exists$
$C>0$ such that $f(s)\leq C(g(s)+1),\ \forall s\in[0,\infty)$. }
Moreover, since the function $u_{\sigma,g,x}$ actually only depends
on the projection of $x$ from the universal cover to the compact
space $M$, the constant can be chosen uniformly in $x$.
\end{Rmk}
\begin{Ex}\label{ex:torus}
Let us consider $({\mathbb{T}}^{2n}=\mathbb{R}^{2n} \slash \mathbb{Z}^{2n}, \varpimega=\sum_{i=1}^{n}dx_i \mathfrak{w}edge dy_i)$
with the metric induced by the standard metric on $\mathbb{R}^{2n}$.
Since $\varpimega=d(\sum_{i=1}^{n}x_i \mathfrak{w}edge dy_i)$ and
$\sum_{i=1}^{n}x_i \mathfrak{w}edge dy_i$ has {\em linear growth} on the universal cover $\mathbb{R}^{2n}$,
it follows that $u(s) \lesssim s$.
Suppose that there is $\theta\in\mathcal{O}mega^1(\mathbb{R}^{2n})$ such that $d\theta=\varpimega$ and
\[
\sup_{z\in B_x(s)}|\theta_z|\leq Cs^\alpha
\]
for $0\leq\alpha<1$, then we get
\[
{p_{\alpha}}rtiali r^2=\int_{\mathbb{D}_r}d\theta=\int_{{p_{\alpha}}rtialartial\mathbb{D}_r}\theta
\leq \max_{z\in{p_{\alpha}}rtialartial \mathbb{D}_r}|\theta_z|\int_{{p_{\alpha}}rtialartial \mathbb{D}_r}1
\leq Cr^\alpha \ 2{p_{\alpha}}rtiali r,
\]
where $\mathbb{D}_r$ is a 2-dimensional disk of radius $r$.
This cannot happen as $r\to\infty$, thus we conclude that $u_\varpimega(s)\sim s$.
\end{Ex}
\begin{Ex}\label{ex:hyperbolic}
Now consider $(\mathbb{H}^2, \varpimega=\frac{1}{y^2}dx \mathfrak{w}edge dy)$
with hyperbolic metric $ds^2=\frac{1}{y^2}(dx^2+dy^2)$. The given symplectic form $\varpimega$
has a {\em bounded} primitive 1-form $\frac{1}{y}dx$ which means that $u(s) \sim 1$.
It is well-known that a bounded 2-form on $\mathbb{H}^n$ with canonical hyperbolic metric has
constant cofilling function, see Gromov \cite[$5.B_5$]{Gro1}.
\end{Ex}
\begin{Ex}[Solvable manifold]\label{ex:sol}
Let us construct a 3-manifold $M$ fibered over $S^1$ with fiber ${\mathbb{T}}^2$ with hyperbolic monodromy,
\[
A=\begin{pmatrix}
2&1\\
1&1
\end{pmatrix}.
\]
Let $y,z$ be the coordinates of the fiber torus, then $\sigma =dy \mathfrak{w}edge dz$ is a well-defined 2-form on $M$.
Note that every primitive of $\sigma$ on the universal cover
has exponential growth, see Appendix \ref{app:sol}.
\end{Ex}
\begin{Lemma}[Quadratic isoperimetric inequality]\label{thm:isoperimetric}
Let $(M,g)$ be a closed Riemannian manifold with closed weakly exact 2-form $\sigma\in\mathcal{O}mega^2(M)$.
If $u_{\sigma}(t) \lesssim t$, then the {\em quadratic isoperimetric inequality} holds,
\begin{equation}n
\int_{\mathbb{D}^2}\varpiverline{v}^*\sigma\leq C\left(l(v)^2+1\right)
\end{equation}
where $l(v)=\int_{S^1}|{p_{\alpha}}rtial_tv(t)|_gdt$,
$\varpiverline v:\mathbb{D}^2\to M$ is an extension of the contractible loop $v:S^1\to M$ to the unit disk, and $C=C(M,g,\sigma)$.
\end{Lemma}
\begin{proof}
Since $u_{\sigma}(t) \lesssim t$, we can choose a 1-form
$\theta\in\mathcal{P}_{\sigma}$ which has linear growth on the universal
cover and such that $\max_{z\in B_{\mathfrak{w}idetilde v(0)}(l(\mathfrak{w}idetilde
v))}|\theta_z|_{\mathfrak{w}idetilde g} \leq u_\sigma(l(\mathfrak{w}idetilde v))+1$. Let
$\mathfrak{w}idetilde{\varpiverline{v}}\ : \mathbb{D}^2 \to \mathfrak{w}idetilde{M}$ be the lifting
of $\varpiverline{v}$ and set $\theta_{\max}(\mathfrak{w}idetilde v) = \underset{z
\in \mathfrak{w}idetilde v(S^1)}{\max} |\theta_z|_{\mathfrak{w}idetilde g}$. Note that
\begin{equation}\begin{aligned}n
\theta_{\max}(\mathfrak{w}idetilde v)&=\max_{z\in\mathfrak{w}idetilde v(S^1)}|\theta_z|_{\mathfrak{w}idetilde g} \\
&\leq\max_{z\in B_{\mathfrak{w}idetilde v(0)}(l(\mathfrak{w}idetilde v))}|\theta_z|_{\mathfrak{w}idetilde g} \\
&\leq u_\sigma(l(\mathfrak{w}idetilde v))+1 \\
&\leq \frac{C}{2}(l(\mathfrak{w}idetilde v)+1),
\end{aligned}\end{equation}
for some $C=C(M,g,\sigma)\in\mathbb{R}^+$.
The last inequality uses the fact $u_\sigma(t)\lesssim t$.
Then we get
\begin{equation}\begin{aligned}n
\int_{\mathbb{D}^2}\varpiverline{v}^*\sigma&=\int_{\mathbb{D}^2}\mathfrak{w}idetilde{\varpiverline{v}}^* \mathfrak{w}idetilde{\sigma}\\
&= \int_{\mathbb{D}^2}\mathfrak{w}idetilde{\varpiverline{v}}^* d\theta\\
&= \int_{S^1}\mathfrak{w}idetilde v^* \theta\\
&\leq\theta_{\max}(\mathfrak{w}idetilde v)l(v)\\
&\leq \frac{C}{2}\left(l(v)^2+l(v)\right) \\
&\leq C\left(l(v)^2+1\right).
\end{aligned}\end{equation}
Let us denote the constant $C$ as the {\em isoperimetric constant}.
\end{proof}
\begin{Def}
Let $M^{2n}$ be a closed manifold with a time-dependent Hamiltonian
$H:S^1\times M\to\mathbb{R}$. A pair $(\varpimega_0,\varpimega_1)$ is called a {\em
continuation pair} on $(M,H)$ if
\begin{itemize}
\item $(M,\varpimega_s)$ is a symplectically aspherical closed manifold $\forall s\in[0,1]$, \\
where $\varpimega_s=\varpimega_0+s\sigma,\ \ \sigma=\varpimega_1-\varpimega_0$;
\item $\mathcal{A}_{\varpimega_s}=\mathcal{A}_{H,\varpimega_s}:\mathscr{L}^0\to\mathbb{R}$ is Morse, for generic $s\in[0,1]$ and $s=0,1$;
\item $u_\sigma(t)\lesssim t$.
\end{itemize}
\end{Def}
\begin{Rmk}\label{rmk:lip_prop}
Let us apply Lemma \ref{thm:isoperimetric} to the {\em continuation pair} $(\varpimega_0,\varpimega_1)$ on $M$.
First set
\begin{equation}n\label{eqn:lin_symp_form}
\varpimega_s=\varpimega_0+\beta(s)\sigma,\qquad \sigma=\varpimega_1-\varpimega_0
\end{equation}
where $\beta(s)\in C^\infty(\mathbb{R},[0,1])$ is a cut-off function satisfying $\beta(s)=1$ for $s {\mathfrak g}eq 1$,
$\beta(s)=0$ for $s \leq 0$ and $0\leq\dot{\beta}(s) \leq 2$. Then we get
\begin{equation}\begin{aligned}n
\left| \int_{\mathbb{D}^2}\varpiverline{v}^*(\varpimega_s-\varpimega_0) \right|
&\leq \left| \int_{\mathbb{D}^2}\varpiverline{v}^*\beta(s)\sigma \right|
= \beta(s)\left| \int_{\mathbb{D}^2}\varpiverline{v}^*\sigma \right| \\
&\leq C\beta(s)\left[\left( \int_{S^1} |{p_{\alpha}}rtialartial_t v(t)| dt \right)^2+1\right].
\end{aligned}\end{equation}
Note that $C\beta(s)$ is continuous and $C\beta(s)=0$ for $s\leq0$.
Let us denote the function $C\beta(s)$ as the {\em isoperimetric constant function}.
\end{Rmk}
\begin{Thm}\label{thm:continuation}
Let $M^{2n}$ be a closed manifold with a time-dependent Hamiltonian $H:S^1\times M\to\mathbb{R}$.
If $(\varpimega_0,\varpimega_1)$ is a continuation pair on $(M,H)$ then there is a continuation map
\[
\mathcal{P}si_{\varpimega_0*}^{\varpimega_1}:\mathbb{C}F_*(\varpimega_0)\to\mathbb{C}F_*(\varpimega_1)
\]
which induces an isomorphism
\[
\mathfrak{w}idetilde{\mathcal{P}si_{\varpimega_0}^{\varpimega_1}}_*:\mathcal{F}H_*(\varpimega_0)\to\mathcal{F}H_*(\varpimega_1).
\]
\end{Thm}
\begin{proof}
First recall the definition of the action functional
\[
\mathcal{A}_{H,\varpimega}:\mathscr{L}^0\to\mathbb{R}
\]
\begin{equation}n \mathcal{A}_{H,\varpimega}(x)=\int_{\mathbb{D}^2}\varpiverline{x}^*\varpimega-\int_0^1H(t,x(t))dt,
\end{equation} where $\varpiverline{x}:\mathbb{D}^2\to M$ is an extension of the
contractible loop $x$ to the unit disk. By the Morse condition in
the definition of the continuation pair $(\varpimega_0,\varpimega_1)$, we know
that $\varpimega_0,\varpimega_1\in\mathcal{O}mega^{\rm symp}(M)$ are nondegenerate symplectic
forms. This means that every fixed point
$x\in\text{Fix}{p_{\alpha}}rtialhi_{H,\varpimega_i}^1$ is nondegenerate, where
${p_{\alpha}}rtialhi_{H,\varpimega_i}^1:M\to M$ is the time-1-map for the flow of the
non-autonomous Hamiltonian vector field $X_H^{\varpimega_i}$.
Let us consider
\[\varpimega_s=\varpimega_0+\beta(s)\sigma,\qquad \sigma=\varpimega_1-\varpimega_0\]
as in Remark \ref{rmk:lip_prop}. We choose further almost complex
structure $J_{s,t}$ for $\varpimega_s$. For technical reasons, we now
subdivide $\varpimega_s$ into sufficiently small pieces. Let
$\{\varpimega^i\}_{i=0}^{N}$ be a subdivision of $\varpimega_s$ satisfying
\begin{itemize}
\item $\varpimega^i=\varpimega_0+d(i)\sigma$, where $0=d(0)<d(1)<\cdots <d(N)=1$;
\item $\mathcal{A}_{H,\varpimega^i}:\mathscr{L}^0\to\mathbb{R}$ is Morse, $\forall i=0,1,\dots,N$;
\item $C(M,g,(d(i+1)-d(i))\sigma)\leq 1/8$, $\forall i=0,1,\dots,N-1$,\\ where $C$ is the isoperimetric constant.
\end{itemize}
The above 2nd condition is guaranteed by the generic Morse condition
for the continuation pair $(\varpimega_0,\varpimega_1)$. By Remark
\ref{rmk:lip_prop}, we can assume the 3rd condition.
Let $\varpimega_s^i=\varpimega^i+\beta(s)(\varpimega^{i+1}-\varpimega^i)$ be a homotopy between $\varpimega^i$ and $\varpimega^{i+1}$.
Now consider $v:\mathbb{R} \times S^1 \to M$ satisfying the gradient flow equation
\begin{equation}\label{eqn:gradeqn}
{p_{\alpha}}rtialartial_sv+J_{s,t}(v)({p_{\alpha}}rtialartial_tv-X_H^{\varpimega_s^i}(t,v))=0,
\end{equation} and the limit condition \begin{equation}\label{eqn:limit_of_grad}
\lim_{s \to -\infty}v(s,t)=v_- (t) \in \mathbb{C}rit \mathcal{A}_{H,\varpimega^i}
\quad
\lim_{s \to +\infty}v(s,t)=v_+ (t) \in \mathbb{C}rit \mathcal{A}_{H,\varpimega^{i+1}}.
\end{equation}
We then want to define a map
\[
\mathcal{P}si_{\varpimega^i \ \ k}^{\varpimega^{i+1}}:\mathbb{C}F_k(\varpimega^i)\to\mathbb{C}F_k(\varpimega^{i+1})
\]
given by
\begin{equation}n
\mathcal{P}si_{\varpimega^i\ \ k}^{\varpimega^{i+1}}(v_-)=\sum_{\mathbb{C}Z(v_+)=k}\#_2\mathcal{M}_{v_-,v_+}(\varpimega^i,\varpimega^{i+1})v_+.
\end{equation}
Here,
\begin{equation}n
\mathcal{M}_{v_-,v_+}(\varpimega^i,\varpimega^{i+1})=\{v:\mathbb{R}\times S^1\to M\ |\ v\text{ satisfies }(\ref{eqn:gradeqn}),\ (\ref{eqn:limit_of_grad})\}.
\end{equation}
Because $\varpimega_s$ is symplectically aspherical $\forall s\in\mathbb{R}$, there is no bubbling.
So it suffices to bound the energy $E(v)={\int_{-\infty}^{\infty}}\|{p_{\alpha}}rtialartial_sv\|_{s}^2ds$
of $v \in C^{\infty}(\mathbb{R} \times S^1,M)$ in terms of $v_-,\ v_+$
where, $\|\cdot,\cdot\|_s$ is the $L^2$-norm defined by $\int_0^1\varpimega_s(\cdot,J_s\cdot)dt$.
We first compute
\begin{equation}\begin{aligned}n
E(v)&=\int_{-\infty}^{\infty} \| {p_{\alpha}}rtialartial_s v \|_{s}^2 ds \\
&={\int_{-\infty}^{\infty}}{\langle}{p_{\alpha}}rtialartial_s v, \nabla \mathcal{A}_{H,\varpimega_s^i}(v) \rangle_{s}ds\\
&={\int_{-\infty}^{\infty}}\frac{d}{ds}\mathcal{A}_{H,\varpimega_s^i}(v)ds-{\int_{-\infty}^{\infty}}\dot{\mathcal{A}}_{H,\varpimega_s^i}(v)ds \\
&=\mathcal{A}_{H,\varpimega^{i+1}}(v_+)-\mathcal{A}_{H,\varpimega^i}(v_-)-{\int_{-\infty}^{\infty}}\dot{\mathcal{A}}_{H,\varpimega_s^i}(v)ds.
\end{aligned}\end{equation}
So we need to consider the following
\begin{equation}\begin{aligned}n
\left| \int_{-\infty}^{\infty}\dot{\mathcal{A}}_{H,\varpimega_s^i}(v)ds \right|
&\leq \int_{\infty}^{\infty}\dot{\beta}(s) \left| \int_{\mathbb{D}^2}\varpiverline{v}^*(\varpimega^{i+1}-\varpimega^i) \right| ds\\
&\leq {\int_{-\infty}^{\infty}} \dot{\beta}(s) C\left( \int_{S^1}|{p_{\alpha}}rtialartial_{t}v|_s dt \right)^2 ds+C
\end{aligned}\end{equation}
For some $C=C(M,g,(d(i+1)-d(i))\sigma)$.
Here $|\cdot,\cdot|_s$ is the norm on $M$ induced by the Riemannian metric $\varpimega_s(\cdot,J_s\cdot)$.
From the equation (\ref{eqn:gradeqn}), we get
\begin{equation}\label{eqn:gradeqn2}
{p_{\alpha}}rtialartial_t v = J(s,v){p_{\alpha}}rtialartial_s v + X_H^{\varpimega_s^i} (v).
\end{equation} \\
By putting the above equation (\ref{eqn:gradeqn2}) into the isoperimetric inequality, we obtain
\begin{equation}\begin{aligned}n\label{eqn:energybound1}
\left| \int_{-\infty}^{\infty}\dot{\mathcal{A}}_{H,\varpimega_s^i}(v)ds \right|
&\leq{\int_{-\infty}^{\infty}} \dot{\beta}(s) C\left( \int_{S^1}|{p_{\alpha}}rtialartial_{t}v|_s dt \right)^2 ds+C \\
&\leq C {\int_{-\infty}^{\infty}} \dot{\beta}(s) \|{p_{\alpha}}rtialartial_t v \|_s^2 ds+C \\
&= C {\int_{-\infty}^{\infty}} \underbrace{\dot{\beta}(s)}_{\leq 2} {\langle}J_s(v){p_{\alpha}}rtialartial_s v + X_H^{\varpimega_s^i}(v),J_s(v){p_{\alpha}}rtialartial_s v + X_H^{\varpimega_s^i}(v) \rangle_s ds+C\\
&\leq 2C \left( \int_{0}^{1} \| {p_{\alpha}}rtialartial_s v \|_s^2 ds+
\int_{0}^{1} \underbrace{2{\langle}J_s{p_{\alpha}}rtialartial_sv,X_H^{\varpimega_s^i}(v) \rangle_s}_{\leq \| {p_{\alpha}}rtialartial_s v \|_s^2+\| X_H^{\varpimega_s^i}(v) \|_s^2} ds +
\int_{0}^{1} \| X_H^{\varpimega_s^i}(v) \|_s^2 ds \right)+C\\
&\leq 4C {\int_{-\infty}^{\infty}} \|{p_{\alpha}}rtialartial_s v\|_s^2 ds + 4C \int_{0}^{1}\|X_H^{\varpimega_s^i}(v)\|_s^2ds+C \\
&\leq 4C\;E(v) + 4C\;c'+C,
\end{aligned}\end{equation}
where $c' \in \mathbb{R}$ is chosen satisfying $\|X_H^{\varpimega_s^i}(v)\|_s^2 \leq c'$. This is possible by the compactness of $M$. Thus we get
\begin{equation}\begin{aligned}n\label{eqn:energybound2}
E(v) \leq \underbrace{\mathcal{A}_{H,\varpimega^{i+1}}(v_+)-\mathcal{A}_{H,\varpimega^i}(v_-)+4Cc'+C}_{=:c''} +4C\;E(v).
\end{aligned}\end{equation}
Since $C=C(M,g,(d(i+1)-d(i))\sigma) \leq \frac{1}{8}$, we finally obtain
\begin{equation}\begin{aligned}n\label{eqn:energybound3}
E(v) &\leq c'' + \frac{1}{2}E(v) \\
E(v) &\leq 2c''.
\end{aligned}\end{equation}
Now we define the continuation map from $\varpimega_0$ to $\varpimega_1$ by juxtaposition
\[
\mathcal{P}si_{\varpimega_0}^{\varpimega_1}:\mathbb{C}F(\varpimega_0)\to\mathbb{C}F(\varpimega_1)
\]
\[
\mathcal{P}si_{\varpimega_0}^{\varpimega_1}=\mathcal{P}si_{\varpimega^{N-1}}^{\varpimega^N}\circ\cdots\circ\mathcal{P}si_{\varpimega^1}^{\varpimega^2}\circ\mathcal{P}si_{\varpimega^0}^{\varpimega^1}.
\]
By a standard argument in Floer homology theory,
each $\mathcal{P}si^{\varpimega^{i+1}}_{\varpimega^i}$ commutes with the boundary operators of the Floer chain complex.
This implies that $\mathcal{P}si_{\varpimega_0}^{\varpimega_1}$ also interchanges the boundary operators.
Hence we get an induced homomorphism
\begin{equation}n
\mathfrak{w}idetilde{\mathcal{P}si_{\varpimega_0}^{\varpimega_1}}:\mathcal{F}H(M,\varpimega_0)\to\mathcal{F}H(M,\varpimega_1).
\end{equation}
In a similar way we can construct
\begin{equation}n
\mathfrak{w}idetilde{\mathcal{P}si_{\varpimega_1}^{\varpimega_0}}:\mathcal{F}H(M,\varpimega_1)\to\mathcal{F}H(M,\varpimega_0),
\end{equation}
by following the homotopy backwards.
By a homotopy-of-homotopies argument, we conclude
$\mathfrak{w}idetilde{\mathcal{P}si_{\varpimega_1}^{\varpimega_0}}\circ\mathfrak{w}idetilde{\mathcal{P}si_{\varpimega_0}^{\varpimega_1}}=\mathrm{id}_{\mathcal{F}H(M,\varpimega_0)}$ and
$\mathfrak{w}idetilde{\mathcal{P}si^{\varpimega_1}_{\varpimega_0}}\circ\mathfrak{w}idetilde{\mathcal{P}si^{\varpimega_0}_{\varpimega_1}}=\mathrm{id}_{\mathcal{F}H(M,\varpimega_1)}$.
Therefore $\mathfrak{w}idetilde{\mathcal{P}si_{\varpimega_0}^{\varpimega_1}}$ is an isomorphism with inverse $\mathfrak{w}idetilde{\mathcal{P}si^{\varpimega_0}_{\varpimega_1}}$.
\end{proof}
\begin{Rmk}
In the proof of Theorem \ref{thm:continuation}, the {\em quadratic isoperimetric inequality} is essential.
One can check that if $u_{\varpimega_1-\varpimega_0}(t){\mathfrak g}nsim t$, the above proof does not work anymore.
\end{Rmk}
\section{Rabinowitz Floer homology}
\subsection{RFH for the cotangent bundle endowed with its canonical symplectic form}
In this section, we consider the cotangent bundle $(T^*N,\varpimega_0=d\lambda_{\rm liou})$
of a closed Riemannian manifold $(N,g)$
where $\lambda_{\rm liou}=p\mathfrak{w}edge dq$ is the {\em Liouville 1-form} for canonical coordinates $(q,p)\in T^*N$.
On the exact symplectic manifold $(T^*N,\lambda_{\rm liou})$, the {\em Liouville vector field} $X$
is defined by $\iota_X \varpimega_0=\lambda_{\rm liou}$.
$(T^*N,\lambda_{\rm liou})$ is {\em complete and convex} i.e. the following conditions hold:
\begin{itemize}
\item There exists a compact subset $K\subset T^*N$ with smooth boundary such that $X$ points out of $K$ along ${p_{\alpha}}rtial K$;
\item The vector field $X$ is complete and has no critical point outside $K$.
\end{itemize}
Equivalently, $(T^*N,\lambda_{\rm liou})$ is complete and convex
since there exists an embedding ${p_{\alpha}}rtialhi:\mathscr{S}igma\times [1,\infty)\to
T^*N$ such that ${p_{\alpha}}rtialhi^*\lambda=r\alpha_{\mathscr{S}igma}$, where $r$ denotes
the coordinates on $[1,\infty)$ and $\alpha_{\mathscr{S}igma}$ is a contact
form, and such that $T^*N\setminus{p_{\alpha}}rtialhi(\mathscr{S}igma\times(1,\infty))$ is
compact.
Consider now the complete convex exact symplectic manifold
$(T^*N,\lambda_{\rm liou})$ and the compact subset $DT^*N\subset
T^*N$ with smooth boundary $\mathscr{S}igma:=ST^*N={p_{\alpha}}rtial DT^*N$ such that
$\lambda_{\rm liou}|_{ST^*N}$ is a positive contact form with a Reeb
vector field $R$. We abbreviate by
$\mathscr{L}:=\mathscr{L}_{T^*N}=C^{\infty}(S^1,T^*N)$ the free loop space of $T^*N$.
A {\em defining Hamiltonian} for $\mathscr{S}igma$ is a smooth function
$H:T^*N\to\mathbb{R}$ with regular level set $\mathscr{S}igma=H^{-1}(0)$ whose {\em
Hamiltonian vector field} $X_H$ has compact support and agrees with
$R$ along $\mathscr{S}igma$. Given such a Hamiltonian, the {\em Rabinowitz
action functional} is defined by
\[\mathcal{A}_H : \mathscr{L}\times\mathbb{R}\to\mathbb{R}\]
\[\mathcal{A}_H(x,\eta):=\int_0^1x^*\lambda-\eta\int_0^1H(x(t))dt.\]
Critical points of $\mathcal{A}_H$ are solutions of the equations
\begin{equation}\label{eq:crit1}
\left.
\begin{array}{cc}
{p_{\alpha}}rtial_t x(t)=\eta X_H(x(t)), & t \in \mathbb{R}/\mathbb{Z} \\
\int_0^1H(x(t))dt=0. &
\end{array}
\right\}
\end{equation}
By the first equation $H$ is constant along $x$, so the second equation implies $H(x(t))\equiv 0$.
Since $X_H=R$ along $\mathscr{S}igma$, the equations (\ref{eq:crit1}) are equivalent to
\begin{equation}n\label{eq:crit2}
\left.
\begin{array}{cc}
{p_{\alpha}}rtial_t x(t)=\eta R(x(t)), & t \in \mathbb{R}/\mathbb{Z} \\
x(t)\in\mathscr{S}igma, & t \in \mathbb{R}/\mathbb{Z}.
\end{array}
\right\}
\end{equation}
So there are three types of critical points i.e. closed Reeb orbits on $\mathscr{S}igma$:
\begin{itemize}
\item Positively parametrized closed Reeb orbits corresponding to $\eta>0$;
\item Negatively parametrized closed Reeb orbits corresponding to $\eta<0$;
\item Constant loops on $M$ corresponding to $\eta=0$.
\end{itemize}
The action of a critical point $(x,\eta)$ is $\mathcal{A}_H(s,\eta)=\eta$.
A compatible almost complex structure $J$ on part of the symplectization
$(\mathscr{S}igma\times\mathbb{R}_+,d(r\alpha_{\mathscr{S}igma}))$ of a contact manifold
$(\mathscr{S}igma,\alpha_{\mathscr{S}igma})$ is called {\em cylindrical} if it satisfies:
\begin{itemize}
\item $J$ maps the Liouville vector field $r{p_{\alpha}}rtial_r$ to the Reeb vector field $R$;
\item $J$ preserves the contact distribution $\ker\alpha_{\mathscr{S}igma}$;
\item $J$ is invariant under the Liouville flow $(y,r)\mapsto (y,e^tr),\;t\in\mathbb{R}.$
\end{itemize}
For a smooth family $(J_t)_{t\in S^1}$ of cylindrical almost complex structure s on $(T^*N,\lambda_{\rm liou})$
we consider the following metric
$g=g_J$ on $\mathscr{L}\times\mathbb{R}$. Given a point $(x,\eta)\in\mathscr{L}\times\mathbb{R}$ and two tangent vectors
$(\hat{x}_1,\hat{\eta}_1),\;(\hat{x}_2,\hat{\eta}_2)\in T_{(x,\eta)}(\mathscr{L}\times\mathbb{R})=\mathcal{G}amma(S^1,x^*T(T^*N))\times\mathbb{R}$
the metric is given by
\[
g_{(x,\eta)}((\hat{x}_1,\hat{\eta}_1),(\hat{x}_2,\hat{\eta}_2))
=\int_0^1\varpimega\left(\hat{x}_1,J_t(x(t))\hat{x}_2 \right)dt+\hat{\eta}_1\cdot\hat{\eta}_2.
\]
The gradient of the Rabinowitz action functional $\mathcal{A}_H$ with respect to the metric $g_J$ at a point $(x,\eta)\in \mathscr{L}\times\mathbb{R}$ reads
\begin{equation}n
\nabla\mathcal{A}_H(x,\eta)=\nabla_J\mathcal{A}_H(x,\eta)=
\begin{pmatrix}
-J_t(x)\left({p_{\alpha}}rtial_tx-\eta X_H(x) \right) \\
-\int_0^1H(x(t))dt
\end{pmatrix}.
\end{equation}
Hence the positive gradient flow lines are solutions $(x,\eta)\in C^{\infty}(\mathbb{R}\times S^1,T^*N)\times C^{\infty}(\mathbb{R},\mathbb{R})$
of the partial differential equation
\begin{equation}n\label{eq:rfhgrad}
\left.
\begin{array}{cc}
{p_{\alpha}}rtial_sx+J_t(x)\left({p_{\alpha}}rtial_tx-\eta X_H(x) \right)=0 \\
{p_{\alpha}}rtial_s\eta+\int_0^1H(x(t))dt=0
\end{array}
\right\}.
\end{equation}
Then for $-\infty<a<b\leq\infty$ the resulting truncated Floer homology groups
\[\mathbb{R}FHb^{(a,b)}(\mathscr{S}igma,T^*N):=\mathrm{HM}^{(a,b)}(\mathcal{A}_H,J),\]
corresponding to action values in $(a,b)$, are well-defined and do not depend on the choice of the cylindrical $J$
and the defining Hamiltonian $H$. The {\em Rabinowitz Floer homology} of $(\mathscr{S}igma,T^*N)$ is defined as the limit
\[\mathbb{R}FHb_*(\mathscr{S}igma,T^*N):=
\lim_{\stackrel\longrightarrow a} \lim_{\stackrel \longleftarrow
b}\mathbb{R}FHb_*^{(-a,b)}(\mathscr{S}igma,T^*N), \qquad a,b\to\infty.\] This
definition is equivalent to the original one in \cite{CF09} by
\cite[Theorem A]{CF09'}.
Since the Rabinowitz action functional is defined on the full loop space
and the first part of the differential in the Rabinowitz Floer complex counts topological cylinders,
we can split the Rabinowitz Floer homology into factors labeled by free homotopy classes
\[
\mathbb{R}FHb(\mathscr{S}igma,T^*N)=\bigoplus_{\nu\in[S^1,T^*N]}\mathbb{R}FH^\nu(\mathscr{S}igma,T^*N),
\]
where $\mathbb{R}FH^\nu(\mathscr{S}igma,T^*N)$ is the Rabinowitz Floer homology for the Rabinowitz action functional
restricted to $\mathscr{L}^\nu=\mathscr{L}^\nu_{T^*N}$.
\subsubsection{Index and grading convention}
Let $\mathcal{M}$ be the moduli space of all finite energy gradient flow lines of the action functional
$\mathcal{A}_H:\mathscr{L}\times\mathbb{R}\to\mathbb{R}$. Since $\mathcal{A}_H$ is Morse-Bott, every finite energy gradient flow line
$(v,\eta)\in C^\infty(\mathbb{R}\times S^1,V)\times C^\infty(\mathbb{R},\mathbb{R})$ converges exponentially at both
ends to critical points $(v_{p_{\alpha}}rtialm,\eta_{p_{\alpha}}rtialm)\in\mathbb{C}rit(\mathcal{A}_H)$ as the flow parameter goes to ${p_{\alpha}}rtialm\infty$.
The linearization of the gradient flow equation along any path $(v,\eta)$ in $\mathscr{L}\times\mathbb{R}$ which converges
exponentially to the critical point of $\mathcal{A}_H$ gives rise to a Fredholm operator $D^{\mathcal{A}_H}_{(v,\eta)}$.
Let $C^-,C^+\subset\mathbb{C}rit(\mathcal{A}_H)$ be the connected component of the critical manifold of $\mathcal{A}_H$
containing $(v_-,\eta_-),(v_+,\eta_+)$ respectively. The local virtual dimension of $\mathcal{M}$ at a finite energy
gradient flow line is defined to be
\[
\mathrm{virdim}_{(v,\eta)}\mathcal{M}:=\mathrm{ind\,} D^{\mathcal{A}_H}_{(v,\eta)}+\dim C^-+\dim C^+
\]
where $\mathrm{ind\,} D^{\mathcal{A}_H}_{(v,\eta)}$ is the Fredholm index of the Fredholm operator $\mathrm{ind\,} D^{\mathcal{A}_H}_{(v,\eta)}$.
For generic compatible almost complex structures, the moduli space of finite energy gradient flow lines is
a manifold and the local virtual dimension of the moduli space at a gradient flow line $(v,\eta)$ corresponds
to the dimension of the connected component of $\mathcal{M}$ containing $(v,\eta)$.
To define a $\mathbb{Z}$-grading on $\mathbb{R}FHb(\mathscr{S}igma,T^*N)$, we need that the
local virtual dimension just depends on the asymptotics of the
finite energy gradient flow line. Since the first Chern class of
$T^*N$ vanishes, it can be shown that the local virtual dimension
equals
\[
\mathrm{virdim}_{(v,\eta)}\mathcal{M}=\mathbb{C}Z(v_+)-\mathbb{C}Z(v_-)+\frac{\dim C^-+\dim C^+}{2}.
\]
In order to deal with the third term it is useful to introduce the following index for the Morse function $h$
on $\mathbb{C}rit(\mathcal{A}_H)$. We define the {\em signature index} $\mathrm{ind\,}^\sigma_h(c)$ of a critical point $c$ of $h$ to be
\[
\mathrm{ind\,}^\sigma_h(c):=-\frac{1}{2}\mathrm{sign}(\mathrm{Hess}_h(c)).
\]
We define a {\em grading} $\mu$ on $\mathbb{R}FCb(\mathscr{S}igma,T^*N)=\mathbb{C}M(\mathcal{A}_H,h)$ by
\[
\mu(c):=\mathbb{C}Z(c)+\mathrm{ind\,}^\sigma_h(c)+\frac{1}{2}.
\]
These define a $\mathbb{Z}$-grading on the homology $\mathbb{R}FHb(\mathscr{S}igma,T^*N)$. We
refer to \cite{CF09, MerPat10} for more details.
\subsection{Rabinowitz Floer homology for a twisted cotangent bundle}
In the previous section, we considered an exact symplectic manifold.
By the exactness of symplectic form, there is no need to care about the filling disk of a given loop.
In general, twisted symplectic forms are not exact.
In order to define Rabinowitz Floer homology,
we need the notions of a {\em symplectically atoroidal manifold}
and a {\em virtual restricted contact type hypersurface}.
\begin{Def}
A symplectic manifold $(M,\varpimega)$ is called {\em symplectically atoroidal} if
\[
\int_{{\mathbb{T}}^2}f^*\varpimega=0,
\]
for any smooth function $f:{\mathbb{T}}^2\to T^*N$.
\end{Def}
\begin{Rmk}
Since there is a map $g:{\mathbb{T}}^2\to S^2$ of non vanishing degree, {\em
symplectically atoroidal} implies {\em symplectically aspherical}.
\end{Rmk}
\begin{Lemma}[Merry \cite{Mer10}]\label{lem:ato}
Let $\sigma\in\mathcal{O}mega^2(N)$ be a weakly exact 2-form and $u_{\sigma}\sim 1$,
then $f^*\sigma$ is exact for any smooth map $f:{\mathbb{T}}^2\to N$.
\end{Lemma}
\begin{proof}
Consider $G:=f_*({p_{\alpha}}rtiali_1({\mathbb{T}}^2))\leq{p_{\alpha}}rtiali_1(N)$.
Then $G$ is amenable, since ${p_{\alpha}}rtiali_1({\mathbb{T}}^2)=\mathbb{Z}^2$, which is amenable.
Now Lemma 5.3 in \cite{Pat06} tells us that since $\|\theta\|_\infty<\infty$,
we can replace $\theta$ by a $G$-invariant primitive $\theta'$ of $\mathfrak{w}idetilde\sigma$,
which descends to a primitive $\theta''\in\mathcal{O}mega^1({\mathbb{T}}^2)$ of $f^*\sigma$.
\end{proof}
\begin{Rmk}\label{rmk:fhc}
Given a free homotopy class $\nu\in[S^1,T^*N]$, fix a reference loop
$v_{\nu}=(q_\nu,p_\nu)\in\mathscr{L}^\nu_{T^*N}$. Let $Z$ be a cylinder
$S^1\times[0,1]$ with two boundary components ${p_{\alpha}}rtial'Z$ with the
boundary orientation and ${p_{\alpha}}rtial''Z$ with the opposite boundary
orientation. Choose $\varpiverline v:Z\to T^*N$ any smooth map such that
$\varpiverline v|_{{p_{\alpha}}rtial'Z}=v$ and $\varpiverline v|_{{p_{\alpha}}rtial''Z}=v_\nu$. Then
thanks to the previous lemma, the integral $\int_Z\varpiverline
v^*\tau^*\sigma$ is independent of the choice of $\varpiverline v$.
Similarly given any $q\in\mathscr{L}^{\tau\nu}_N$, let $\varpiverline q:Z\to N$
denote any smooth map such that $\varpiverline q|_{{p_{\alpha}}rtial'Z}=q$ and
$\varpiverline q|_{{p_{\alpha}}rtial''Z}=q_\nu$. Then the integral $\int_Z\varpiverline
q^*\sigma$ is independent of the choice of $\varpiverline q$. Note that
in particular if $q=\tau\circ v$ then
\[
\int_Z\varpiverline v^*\tau^*\sigma=\int_Z\varpiverline q^*\sigma.
\]
In particular, let $\sigma\in\mathcal{O}mega^2(N)$ be a weakly exact 2-form satisfying $u_\sigma\sim 1$,
then the twisted cotangent bundle $(T^*N,\varpimega_\sigma)$ is a {\em symplectically atoroidal manifold}.
Moreover, the Rabinowitz action functional
\[
\mathcal{A}_{\varpimega_\sigma}:\mathscr{L}\times\mathbb{R}\to\mathbb{R}
\]
\[
\mathcal{A}_{\varpimega_\sigma}(v,\eta):=\mathcal{A}_{H,\varpimega_\sigma}(v,\eta)=\int_Z\varpiverline
v^*\varpimega_\sigma-\eta\int_0^1H(v(t))dt
\]
is well-defined, independent of the choice of $\varpiverline v$. In the
special case, where $\nu=0$ is the trivial free homotopy class we
choose $v_\nu$ just a constant loop. In this case the cylinder $Z$
can be replaced by a filling disk $\mathbb{D}^2$ for the loop $v$.
\end{Rmk}
\begin{Def}
A closed hypersurface $\mathscr{S}igma$ in a symplectic manifold $(M,\varpimega)$ is called {\em virtually contact},
if there is a covering $p:\mathfrak{w}idehat M\to M$ and a primitive $\lambda\in\mathcal{O}mega^1(\mathfrak{w}idehat\mathscr{S}igma)$ of $p^*\varpimega$ such that
\begin{equation}\begin{aligned}\label{eqn:vcp}
\sup_{x\in\mathfrak{w}idehat{\mathscr{S}igma}}|\lambda_x|\leq C<\infty,\qquad
\inf_{x\in\mathfrak{w}idehat{\mathscr{S}igma}}\lambda(R){\mathfrak g}eq\mu>0,
\end{aligned}\end{equation}
where $|\cdot|$ is the lifting of a metric on $\mathscr{S}igma$ and $R$ is the pullback of a vector field generating $\ker(\varpimega|_{\mathscr{S}igma})$.
\end{Def}
\begin{Def}
A closed hypersurface $\mathscr{S}igma$ in a symplectic manifold $(M,\varpimega)$ is called
{\em virtual restricted contact}, if there is a covering $p:\mathfrak{w}idehat M\to M$
and a primitive $\lambda\in\mathcal{O}mega^1(\mathfrak{w}idehat M)$ of $p^*\varpimega$ such that
$\lambda$ satisfy (\ref{eqn:vcp}) again on $\mathfrak{w}idehat\mathscr{S}igma$.
\end{Def}
\begin{Rmk}
A {\em virtual restricted contact homotopy} is a smooth homotopy
$(\mathscr{S}igma_t,\lambda_t)\subset(M,\varpimega)$ of virtual restricted contact
hypersurfaces with the corresponding 1-forms on the covers such that
the preceding conditions hold with constants $C,\mu$ independent of
$t$. $\mathbb{R}FH(\mathscr{S}igma,M)$ is defined for each virtual restricted contact
hypersurface $\mathscr{S}igma$ and is invariant under virtual restricted
contact homotopies. For a twisted cotangent bundle
$(T^*N,\varpimega_\sigma)$ with any $k\in\mathbb{R}$ above Ma\~n\'e critical value
$c=c(g,\sigma,U)$ the hypersurface $\mathscr{S}igma_k=H^{-1}_U(k)\subset
T^*N$ is virtual restricted contact, see \cite{CFP09}.
\end{Rmk}
\section{Continuation homomorphism in RFH for symplectic deformations}
Let us begin with the {\em defining Hamiltonian} $H$ of the virtual restricted contact hypersurface $\mathscr{S}igma_k\subset T^*N$
\begin{equation}n
H:=H_{U,k,\xi}=\beta_{\xi}\circ (H_U-k)
\end{equation}
where, $\beta_{\xi}(t)$ is a smooth cut-off function satisfying $0\leq\dot{\beta}_{\xi}\leq1$,
\begin{equation}n
\beta_{\xi}(t)=
\left\{
\begin{array}{rcr}
t&\text{ if }&|t|\leq\xi-\epsilon \\
\xi&\text{ if }&t{\mathfrak g}eq\xi+\epsilon \\
-\xi&\text{ if }&-t{\mathfrak g}eq\xi+\epsilon
\end{array}
\right.,
\qquad \epsilon=\min\left\{1/3,\xi/3\right\}.
\end{equation}
Now we define the Rabinowitz action functional given by
\[\mathcal{A}_{\varpimega_\sigma}:\mathscr{L}\times\mathbb{R}\to\mathbb{R}\]
\begin{equation}n\label{eq:rfhaf}
\mathcal{A}_{\varpimega_\sigma}(v,\eta):=\mathcal{A}_{H,\varpimega_{\sigma}}(v,\eta)=\int_{Z}\varpiverline{v}^*\varpimega_{\sigma}-\eta\int_0^1H(v(t))dt,
\end{equation}
where $\varpiverline{v}, Z$ are given in Remark \ref{rmk:fhc}.
In this section, we consider the canonical cotangent bundle $(T^*N,\varpimega_0)$
and the twisted cotangent bundle $(T^*N,\varpimega_{\sigma})$
with the virtual restricted contact hypersurface
$\mathscr{S}igma_k=H^{-1}(0)=H^{-1}_U(k)$ where $k>c(g,\sigma,U)$
and $H_U(q,p)=\frac{1}{2}|p|_g^2+U(q)$.
For convenience, let us define the following sets
\begin{equation}\begin{aligned}n
&\mathcal{M}p(N)=\{\sigma\in\mathcal{O}mega^2(N)\ |\ \mathfrak{w}idetilde\sigma=d\theta,\ \|\theta\|_{\infty}<\infty\}; \\
&\mathcal{O}mega^{\mathcal{M}p}(T^*N)=\{\varpimega_{\sigma}\in\mathcal{O}mega^{2}(T^*N)\ |\ \sigma\in\mathcal{M}p(N)\}; \\
&\mathcal{O}mega^{\mathcal{M}p}(\mathscr{S}igma_k)=\{\varpimega_{\sigma}\in\mathcal{O}mega^{\mathcal{M}p}(T^*N)\ |\ k>c(g,\sigma,U)\}; \\
&\mathcal{O}mega^{\mathcal{M}p}_{\rm reg}(\mathscr{S}igma_k)=\{\varpimega_{\sigma}\in\mathcal{O}mega^{\mathcal{M}p}(\mathscr{S}igma_k)\
|\ \mathcal{A}_{\varpimega_\sigma}:\mathscr{L}\times\mathbb{R}\to\mathbb{R}\text{ is Morse-Bott}\}. \end{aligned}\end{equation} Note
that $\mathcal{O}mega^{\mathcal{M}p}(\mathscr{S}igma_k)$ is convex. Indeed this follows from the
following estimate for $t \in [0,1]$ and for primitives $\theta_1$
and $\theta_2$
\begin{eqnarray*}
|t\theta_1+(1-t)\theta_2|^2 &\leq&
t^2|\theta_1|^2+2t(1-t)|\theta_1||\theta_2|+(1-t)^2|\theta_2|^2\\
&\leq&t^2|\theta_1|^2+t(1-t)|\theta_1|^2+t(1-t)|\theta_2|^2+(1-t)^2
|\theta_2|^2\\
&=&t|\theta_1|^2+(1-t)|\theta_2|^2.
\end{eqnarray*}
It is known that for surfaces and vanishing potential the set
$\mathcal{O}mega^{\mathcal{M}p}_{\rm reg}(\mathscr{S}igma_k)$ is dense in $\mathcal{O}mega^{\mathcal{M}p}(\mathscr{S}igma_k)$
thanks to work of Miranda, see \cite{Mir}. In higher dimensions this
seems to be an open problem, although it would be very surprising if
it failed.
For a pair $(\varpimega_0,\varpimega_{\sigma})$ of $\mathcal{O}mega_{\rm
reg}^{\mathcal{M}p}(\mathscr{S}igma_k)$, we construct the continuation homomorphism
\[
\mathfrak{w}idetilde{\mathcal{P}si_{\varpimega_0}^{\varpimega_{\sigma}}}_*:\mathbb{R}FHb_*(\mathscr{S}igma_k,\varpimega_0)\to
\mathbb{R}FHb_*(\mathscr{S}igma_k,\varpimega_{\sigma}),
\]
by counting solutions of an $s$-dependent Rabinowitz Floer equation.
Before the construction, we must check the $L_{\infty}$-bound of the
Lagrange multiplier $\eta$ in the case of a twisted cotangent bundle with virtual restricted contact hypersurface.
The proof of the following proposition proceeds as \cite{CF09} for the restricted contact type case.
It was already used with no explicit proof in \cite{CFP09}.
For the readers convenience we include a proof here.
\begin{Prop}\label{prop:lagbd}
Let $(T^*N,\varpimega_{\sigma})$ be a twisted cotangent bundle
with a virtual restricted contact hypersurface $\mathscr{S}igma_k=H^{-1}(0)$ where $k>c$.
Then there exist constants $\epsilon>0$ and $\varpiverline{c}<\infty$ such that the following holds
\begin{equation}\label{eqn:lagbd}
\|\nabla\mathcal{A}_{\varpimega_\sigma}(v,\eta)\|\leq\epsilon \mathscr{L}ongrightarrow |\eta|\leq \varpiverline{c}(|\mathcal{A}_{\varpimega_\sigma}(v,\eta)|+1).
\end{equation}
\end{Prop}
Before embarking on the proof of the Proposition \ref{prop:lagbd}, we first explain as a
warm-up an extremal case of it, namely instead of looking at almost
critical points we consider critical points themselves.
\begin{Lemma}
Under the same assumptions as in Proposition \ref{prop:lagbd} for $(v,\eta)\in\mathbb{C}rit(\mathcal{A}_{\varpimega_\sigma}|_{\mathscr{L}^0\times\mathbb{R}})$ we have
\[|\mathcal{A}_{\varpimega_\sigma}(v,\eta)|{\mathfrak g}eq\frac{|\eta|}{c'}\]
where $c'>0$.
\end{Lemma}
\begin{proof}
Since we are considering the component of contractible loops we only
have to choose a filling disc $\varpiverline{v}\colon \mathbb{D}^2 \to T^*N$ for
the loop $v$. Inserting (\ref{eq:crit1}) into $\mathcal{A}_{\varpimega_\sigma}$ and
using the assumption of virtual restricted contact type
\begin{equation}\begin{aligned}\label{eqn:aas43}
|\mathcal{A}_{\varpimega_\sigma}(v,\eta)|&=\left|\int_{\mathbb{D}^2}\varpiverline{v}^*\varpimega_{\sigma}\right|
=\left|\int_{\mathbb{D}^2}\tilde{\varpiverline{v}}^*{p_{\alpha}}rtiali^*\varpimega_{\sigma}\right|
=\left|\int_{\mathbb{D}^2}\tilde{\varpiverline{v}}^*d\lambda_{\sigma}\right|
=\left|\int_{S^1}\tilde{v}^*\lambda_{\sigma}\right| \\
&=\left|\int_0^1\lambda_{\sigma}({p_{\alpha}}rtialartial_t\tilde{v})\right|
=\left|\int_0^1\lambda_{\sigma}(\eta\mathfrak{w}idetilde{X_H^{\varpimega_{\sigma}}}(\tilde{v}))\right| \\
&=\left|\eta\int_0^1\lambda_{\sigma}(\mathfrak{w}idetilde{X_H^{\varpimega_{\sigma}}}(\tilde{v}))\right| \\
&{\mathfrak g}eq\frac{|\eta|}{c'}
\end{aligned}\end{equation}
where $\tilde{\varpiverline{v}},\;\mathfrak{w}idetilde{X_H^{\varpimega_{\sigma}}}$ are lifts of $\varpiverline{v},\;X_H^{\varpimega_{\sigma}}$ respectively
to the cover ${p_{\alpha}}rtiali:\mathfrak{w}idetilde{\mathscr{S}igma}\to\mathscr{S}igma$.
The constant $c'>0$ exists by the second inequality in (\ref{eqn:vcp}).
\end{proof}
\begin{proof}[{\bf Proof of Proposition \ref{prop:lagbd}}]
The proof consists of 4 steps.
\\
{\bf Step 1} : {\em There exist $\delta>0$, a constant $c'>0$, a
covering $p \colon \mathfrak{w}idehat{M} \to M=T^*N$ and a primitive $\lambda
\in \mathcal{O}megaega^1(\mathfrak{w}idehat{M})$ of $p^*\varpimegaega_\sigma$ such that on
$U_\delta =H^{-1}(-\delta,\delta)$ we have the estimates
$\lambda(\mathfrak{w}idetilde{X_H^{\varpimega_{\sigma}}})>\frac{1}{2c'}+\delta$ and
$||\lambda||_\infty<\infty$ where the $L^\infty$-norm is taken with
respect to the lift of a metric on $N$.}
\\ \\
The assertion of Step~1 is surely true for $\delta=0$ for a
primitive $\lambda_0 \in \mathcal{O}megaega^1(\mathfrak{w}idehat{M})$ just by definition
of virtual restricted contact type. Let
$\mathfrak{w}idehat{\mathscr{S}igma}_k=p^{-1}(\mathscr{S}igma_k)$ be the lift of $\mathscr{S}igma_k$.
Thank to the bounds in the virtual contact assumption we can find
$\epsilon>0$ and a diffeomorphism $\mathcal{P}si$ from
$\mathfrak{w}idehat{\mathscr{S}igma}_k\times (-\epsilon,\epsilon)$ to an open
neighbourhood $U$ of $\mathfrak{w}idehat{\mathscr{S}igma}_k$ in $\mathfrak{w}idehat{M}$ such that
$\mathcal{P}si$ pulls back the symplectic form $\varpimegaega_\sigma$ on $U$ to the
symplectic form $\varpimegaega=d(r\lambda_0|_{\mathfrak{w}idehat{\mathscr{S}igma}_k})$ on
$\mathfrak{w}idehat{\mathscr{S}igma}_k\times (-\epsilon,\epsilon)$. Now choose
$\delta>0$ so small that $U_\delta \subset
\mathcal{P}si\big(\mathfrak{w}idehat{\mathscr{S}igma}_k\times (-\epsilon/2,\epsilon/2)\big)$ and
the bounds required in Step~1 hold for $\mathcal{P}si_*\lambda_1$ with
$\lambda_1=r\lambda_0|_{\mathfrak{w}idehat{\mathscr{S}igma}_k}$. Since $\lambda_1$ and
$\mathcal{P}si^*\lambda_0$ are two primitives of $\mathcal{P}si^*\varpimegaega_\sigma$ which
coincide on $\mathfrak{w}idehat{\mathscr{S}igma}_k$ we conclude that there exists a
function $f \in
C^\infty\big(\mathfrak{w}idehat{\mathscr{S}igma}_k\times(-\epsilon,\epsilon)\big)$ such
that $\lambda_1=\mathcal{P}si^*\lambda_0+df$. Now choose a cutoff function
$\beta \in C^\infty\big(\mathfrak{w}idehat{\mathscr{S}igma}_k\times
(-\epsilon,\epsilon) \big)$ with the property that $\beta(x,r)=1$
for $x \in \mathfrak{w}idehat{\mathscr{S}igma}_k$ and $|r| \leq \epsilon/2$ and
$\beta(x,r)=0$ if $r {\mathfrak g}eq 3\epsilon/4$. Finally set
$\lambda=\lambda_0 +\mathcal{P}si_*d(\beta f)$ on
$\mathcal{P}si\big(\mathfrak{w}idehat{\mathscr{S}igma}_k\times (-\epsilon,\epsilon)\big)$. This
finishes the proof of Step~1. For the next step we fix $\nu\in
[S^1,T^*N]$.
\\ \\
{\bf Step 2} : {\em There exist $\delta>0$ and a constants
$c_{\delta}<\infty$ and $a^\nu \in \mathbb{R}$ with the following
property. For every $(v,\eta)\in\mathscr{L}^\nu\times\mathbb{R}$ such that $v(t)\in
U_{\delta}=H^{-1}(-\delta,\delta)$ for every $t\in\mathbb{R}/\mathbb{Z}$, the
following estimate holds:}
\[
|\eta|\leq2c'|\mathcal{A}_{\varpimega_\sigma}(v,\eta)|+c_{\delta}\|\nabla\mathcal{A}_{\varpimega_\sigma}(v,\eta)\|+2c'|a^\nu|.
\]
Let $c'$, $\delta$ and $\lambda$ be as in Step~1 and set
\begin{equation}\label{eqn:c_deltaes}
c_{\delta}=2c'\|\lambda|_{{p_{\alpha}}rtiali^{-1}(U_{\delta})}\|_{\infty}<\infty.
\end{equation} We estimate \begin{equation}\begin{aligned}n
|\mathcal{A}_{\varpimega_\sigma}(v,\eta)|&=\left|\int_0^1\lambda(\tilde{v})({p_{\alpha}}rtialartial_t\tilde{v})
-\underbrace{\int_0^1\lambda(\tilde{v}_\nu)({p_{\alpha}}rtialartial_t\tilde{v}_\nu)}_{:=a^\nu}
-\eta\int_0^1H(v(t))dt \right| \\
&=\left|\eta\int_0^1\lambda(\tilde{v})(\mathfrak{w}idetilde{X_H^{\varpimega_{\sigma}}}(\tilde{v}))
+\int_0^1\lambda(\tilde{v})({p_{\alpha}}rtialartial_t\tilde{v}-\eta\mathfrak{w}idetilde{X_H^{\varpimega_{\sigma}}}(\tilde{v}))
-a^\nu
-\eta\int_0^1H(v(t))dt \right| \\
&{\mathfrak g}eq\left|\eta\int_0^1\lambda(\tilde{v})(\mathfrak{w}idetilde{X_H^{\varpimega_{\sigma}}}(\tilde{v}))\right|
-\left|\int_0^1\lambda(\tilde{v})({p_{\alpha}}rtialartial_t\tilde{v}-\eta\mathfrak{w}idetilde{X_H^{\varpimega_{\sigma}}}(\tilde{v}))\right|
-\left|\eta\int_0^1H(v(t))dt \right|
-|a^\nu|\\
&{\mathfrak g}eq|\eta|\left(\frac{1}{2c'}+\delta\right)-\frac{c_{\delta}}{2c'}\|{p_{\alpha}}rtialartial_t\tilde{v}-\eta\mathfrak{w}idetilde{X_H^{\varpimega_{\sigma}}}(\tilde{v})\|_1-|\eta|\delta-|a^\nu| \\
&{\mathfrak g}eq\frac{|\eta|}{2c'}-\frac{c_{\delta}}{2c'}\|{p_{\alpha}}rtialartial_t\tilde{v}-\eta\mathfrak{w}idetilde{X_H^{\varpimega_{\sigma}}}(\tilde{v})\|_2-|a^\nu| \\
&{\mathfrak g}eq\frac{|\eta|}{2c'}-\frac{c_{\delta}}{2c'}\|\nabla\mathcal{A}_{\varpimega_\sigma}(v,\eta)\|-|a^\nu|,
\end{aligned}\end{equation} where $v_\nu\in\mathscr{L}^\nu$ is a reference loop defined in Remark
\ref{rmk:fhc}. This proves Step 2.
\\ \\
{\bf Step 3} : {\em For each $\delta>0$, there exists
$\epsilon=\epsilon(\delta)>0$ such that if
$\|\nabla\mathcal{A}_{\varpimega_\sigma}(v,\eta)\|\leq\epsilon$ then $v(t)\in
U_{\delta}$ for every $t\in[0,1]$.}
\\ \\
First assume that $v\in\mathscr{L}$ has the property that there exist $t_0, t_1\in\mathbb{R}/\mathbb{Z}$ such that
$|H(v(t_0))|{\mathfrak g}eq\delta$ and $|H(v(t_1))|\leq\delta/2$. We claim that
\begin{equation}\label{2-1}
\|\nabla\mathcal{A}_{\varpimega_\sigma}(v,\eta)\|{\mathfrak g}eq\frac{\delta}{2\kappa}
\end{equation}
for every $\eta\in\mathbb{R}$, where
\[
\kappa:=\max_{x\in\varpiverline{U}_{\delta}}\|\nabla H(x)\|.
\]
To see this, assume without loss of generality that $t_0<t_1$ and
$\delta/2\leq|H(v(t))|\leq\delta$ for all $t\in[t_0,t_1]$. Then we estimate
\begin{equation}\begin{aligned}n
\|\nabla\mathcal{A}_{\varpimega_\sigma}(v,\eta)\|&{\mathfrak g}eq \sqrt{\int_0^1\|{p_{\alpha}}rtial_tv-\eta X_H^{\varpimega_{\sigma}}(v)\|^2dt} \\
&{\mathfrak g}eq \int_0^1\|{p_{\alpha}}rtial_tv-\eta X_H^{\varpimega_{\sigma}}(v)\|dt \\
&{\mathfrak g}eq \int_{t_0}^{t_1}\|{p_{\alpha}}rtial_tv-\eta X_H^{\varpimega_{\sigma}}(v)\|dt \\
&{\mathfrak g}eq \frac{1}{\kappa}\int_{t_0}^{t_1}\|\nabla H(v)\|\cdot\|{p_{\alpha}}rtial_tv-\eta X_H^{\varpimega_{\sigma}}(v)\|dt \\
&{\mathfrak g}eq \frac{1}{\kappa}\int_{t_0}^{t_1}|\langle\nabla H(v),{p_{\alpha}}rtial_tv-\eta X_H^{\varpimega_{\sigma}}(v)\rangle|dt \\
&= \frac{1}{\kappa}\int_{t_0}^{t_1}|\langle\nabla H(v),{p_{\alpha}}rtial_tv\rangle|dt \\
&= \frac{1}{\kappa}\int_{t_0}^{t_1}|dH(v){p_{\alpha}}rtial_tv|dt \\
&= \frac{1}{\kappa}\int_{t_0}^{t_1}|{p_{\alpha}}rtial_tH(v)|dt \\
&{\mathfrak g}eq \frac{1}{\kappa}\left|\int_{t_0}^{t_1}{p_{\alpha}}rtial_tH(v)dt\right| \\
&= \frac{1}{\kappa}|H(v(t_1))-H(v(t_0))| \\
&{\mathfrak g}eq \frac{1}{\kappa}\left(|H(v(t_1))|-|H(v(t_0))|\right) \\
&{\mathfrak g}eq \frac{\delta}{2\kappa}. \end{aligned}\end{equation} Now assume that $v\in\mathscr{L}$ has the
property that $v(t)\in T^*N\setminus U_{\delta/2}$ for every
$t\in[0,1]$. In this case we estimate \begin{equation}\label{2-2}
\|\nabla\mathcal{A}_{\varpimega_\sigma}(v,\eta)\|{\mathfrak g}eq\left|\int_0^1H(v(t))dt\right|{\mathfrak g}eq\frac{\delta}{2}
\end{equation} for every $\eta\in\mathbb{R}$. From (\ref{2-1}) and (\ref{2-2}) Step 3
follows with
\[
\epsilon=\frac{\delta}{2\max\{1,\kappa\}}.
\]
\\ \\
{\bf Step 4} : {\em We prove the proposition.}
\\ \\
Choose $\delta$ as in Step 1, $\epsilon=\epsilon(\delta)$ as in Step
3 and
\[
\varpiverline{c}=\max\{2c',2c_{\delta}\epsilon,4c'|a^\nu|\}.
\]
Assume that $\|\nabla\mathcal{A}_{\varpimega_\sigma}(v,\eta)\|\leq\epsilon$ then
\[
|\eta|\leq2c'|\mathcal{A}_{\varpimega_\sigma}(v,\eta)|+c_{\delta}\|\nabla\mathcal{A}_{\varpimega_\sigma}(v,\eta)\|+2c'|a^\nu|
\leq \varpiverline{c}(|\mathcal{A}_{\varpimega_\sigma}(v,\eta)|+1).
\]
This proves the Proposition \ref{prop:lagbd}.
\end{proof}
\begin{Rmk}\label{rmk:constconti}
A careful inspection of the proof of Proposition \ref{prop:lagbd}
shows that the constant $c',\delta,c_{\delta},\epsilon(\delta),$ and
$\varpiverline{c}$ continuously depend on the 2-form
$\sigma\in\mathcal{O}mega^2(M)$. In particular, Proposition \ref{prop:lagbd} can
be extended to families of symplectic forms.
\end{Rmk}
\begin{Lemma}[Linear isoperimetric inequality]\label{lem:linhmtp}
Let $\sigma\in\mathcal{O}mega^2(N)$ be a weakly exact 2-form and $u_{\sigma}\sim 1$, then
\[
\int_Z\varpiverline q^*\sigma \leq C\left(\int_0^1|{p_{\alpha}}rtial_tq|dt+1\right)
\]
where, $\varpiverline q,Z$ are the same as in Remark \ref{rmk:fhc} and
$C=C(N,g,\sigma,q_\nu)$.
\end{Lemma}
\begin{proof}
The proof uses the same argument as in Lemma
\ref{thm:isoperimetric}. Let $\mathfrak{w}idetilde{\varpiverline q}:Z\to
\mathfrak{w}idetilde N$ be the lifting of $\varpiverline q$ to the universal cover
and $\theta\in\mathcal{O}mega^1(\mathfrak{w}idetilde N)$ be a bounded primitive of
$\mathfrak{w}idetilde\sigma$ as in Lemma \ref{lem:ato}. Then we get \begin{equation}\begin{aligned}n
\int_Z\varpiverline q^*\sigma&=\int_Z\mathfrak{w}idetilde{\varpiverline q}^*\mathfrak{w}idetilde\sigma \\
&=\int_Z\mathfrak{w}idetilde{\varpiverline q}^*d\theta \\
&=\int_R\mathfrak{w}idetilde q^*\theta \\
&\leq\left|\int_0^1\mathfrak{w}idetilde q^*\theta\right|+\left|\int_0^1\mathfrak{w}idetilde q_\nu^*\theta\right|
+\left|\int_0^1\underline r^*\theta\right|+\left|\int_0^1\varpiverline r^*\theta\right| \\
&\leq\|\theta\|_\infty\left(\int_0^1|{p_{\alpha}}rtial_t q|dt+\int_0^1|{p_{\alpha}}rtial_t
q_\nu|dt +\int_0^1|{p_{\alpha}}rtial_t\underline r|dt+\int_0^1|{p_{\alpha}}rtial_t\varpiverline
r|dt\right), \end{aligned}\end{equation} where $R$ is a rectangle in $\mathfrak{w}idetilde N$ which
consists of $\mathfrak{w}idetilde q,\;\mathfrak{w}idetilde q_\nu,\;\underline r$, and
$\varpiverline r$. Here, $\underline r:[0,1]\to\mathfrak{w}idetilde N$ is a path
from $\mathfrak{w}idetilde q(0)$ to $\mathfrak{w}idetilde q_\nu(0)$ and $\varpiverline
r:[0,1]\to\mathfrak{w}idetilde N$ is a path from $\mathfrak{w}idetilde q(1)$ to
$\mathfrak{w}idetilde q_\nu(1)$. Since $\int_Z\varpiverline q^*\sigma$ does not
depend on the choice of $Z$, we may assume that $\underline r$,
$\varpiverline r$ are length minimizing curves on $\mathfrak{w}idetilde N$. This
implies that $\underline r$, $\varpiverline r$ are geodesics contained
in a fundamental domain in $\mathfrak{w}idetilde N$ or
\[
\int_0^1|{p_{\alpha}}rtial_t\underline r|dt\leq\text{diam}(N),\quad\int_0^1|{p_{\alpha}}rtial_t\varpiverline r|dt\leq\text{diam}(N).
\]
Set
$C=\max\left\{\|\theta\|_\infty,2\|\theta\|_\infty\int_0^1|{p_{\alpha}}rtial_tq_\nu|dt,4\|\theta\|_\infty
\text{diam}(N)\right\}$ then we get the conclusion.
\end{proof}
\begin{Rmk}\label{rmk:conti_const}
Note that $C$ converges to $0$ as $|\sigma|_g\to0$.
\end{Rmk}
\begin{Rmk}\label{rmk:lip_pro}
If we consider the family of symplectic forms on $T^*N$
\[
\varpimega_s=\varpimega_0+\beta(s)\tau^*\sigma\in\mathcal{O}mega^{\mathcal{M}p}(\mathscr{S}igma_k),\qquad
\forall s\in\mathbb{R},
\]
where $\beta(s)\in C^\infty(\mathbb{R},[0,1])$ is a cut-off function
satisfying $\beta(s)=1$ for $s{\mathfrak g}eq1$, $\beta(s)=0$ for $s\leq0$, and
$0\leq\dot{\beta}(s)\leq2$, then we obtain the estimate \begin{equation}\begin{aligned}n \left|
\int_Z\varpiverline{v}^*\dot\varpimega_s \right|
&\leq \left| \int_Z\varpiverline{v}^*\dot\beta(s)\tau^*\sigma\right| \\
&=\dot\beta(s)\left| \int_Z\varpiverline{v}^*\tau^*\sigma \right| \\
&\leq C\dot\beta(s)\left(\int_{S^1} |{p_{\alpha}}rtialartial_t v(t)| dt+1\right),
\end{aligned}\end{equation}
for some $C=C(N,g,\sigma,q_\nu)$ given in Lemma \ref{lem:linhmtp}.
\end{Rmk}
\begin{Prop}\label{prop:lagbd2}
Let $w=(v,\eta)\in C^{\infty}(\mathbb{R}\times S^1,T^*N)\times C^\infty(\mathbb{R},\mathbb{R})$ be a gradient flow line of
\[
\mathcal{A}_{\varpimega(s)}(v,\eta):=\mathcal{A}_{H,\varpimega_{s}}(v,\eta)=\int_{Z}\varpiverline{v}^*\varpimega_s-\eta\int_0^1H(x(t))dt
\]
i.e. a solution of
\begin{equation}\label{eqn:rfhgrd}
\left.
\begin{array}{cc}
{p_{\alpha}}rtial_sv+J_{t,s}(v)\left({p_{\alpha}}rtial_tv-\eta X_H^{\varpimega_s}(v) \right)=0 \\
{p_{\alpha}}rtial_s\eta+\int_0^1H(v(t))dt=0
\end{array}
\right\}
\end{equation}
\begin{equation}n
\label{eqn:rfhlim}
\lim_{s \to -\infty}w(s)=w_-\in\mathbb{C}rit\mathcal{A}_{\varpimega(0)}, \qquad
\lim_{s \to \infty}w(s)=w_+\in\mathbb{C}rit \mathcal{A}_{\varpimega(1)},
\end{equation}
where $\varpimega_s$ is same as in Remark \ref{rmk:lip_pro}.
If $|\sigma|_g$ is sufficiently small then the $L^{\infty}$-norm of $\eta$ is uniformly bounded
in terms of a constant which only depends on $w_-,w_+$.
\end{Prop}
\begin{proof}
We prove the proposition in three steps.
{\bf Step1} : Let us first {\em bound the energy of $w$ in terms of $\|\eta\|_{\infty}$.}
\begin{equation}\begin{aligned}\label{e0}
E(w)&={\int_{-\infty}^{\infty}}\|{p_{\alpha}}rtial_sw\|_{s}^2 ds \\
&={\int_{-\infty}^{\infty}}{\langle}{p_{\alpha}}rtial_s w, \nabla \mathcal{A}_{\varpimega(s)}(w) \rangle_{s}ds\\
&={\int_{-\infty}^{\infty}}\frac{d}{ds}\mathcal{A}_{\varpimega(s)}(w)ds-{\int_{-\infty}^{\infty}}\dot{\mathcal{A}}_{\varpimega(s)}(w)ds \\
&=\mathcal{A}_{\varpimega(1)}(w_+)-\mathcal{A}_{\varpimega(0)}(w_-)-{\int_{-\infty}^{\infty}}\dot{\mathcal{A}}_{\varpimega(s)}(w)ds.
\end{aligned}\end{equation}
We estimate the third term by
\begin{equation}\begin{aligned}\label{e1}
\left| {\int_{-\infty}^{\infty}}\dot{\mathcal{A}}_{\varpimega(s)}(w)ds \right|
&\leq {\int_{-\infty}^{\infty}}\left|\dot{\mathcal{A}}_{\varpimega(s)}(w)\right|ds \\
&= {\int_{-\infty}^{\infty}}\dot{\beta}(s) \left| \int_Z\varpiverline{v}^*\tau^*\sigma \right| ds\\
&\leq {\int_{-\infty}^{\infty}} \dot{\beta}(s) C\left( \int_{S^1}|{p_{\alpha}}rtialartial_{t}v|_{t,s} dt+1 \right) ds,
\end{aligned}\end{equation}
where $C$ is the isoperimetric constant in Remark \ref{rmk:lip_pro}
and $|\cdot|_{t,s}$ is the norm on $T^*N$ induced by the Riemannian metric $\varpimega_s(\cdot,J_{t,s}\cdot).$
From the gradient flow equation (\ref{eqn:rfhgrd}) we get
\[
{p_{\alpha}}rtial_tv=J_{t,s}(v){p_{\alpha}}rtial_sv+\eta X_H^{\varpimega_s}(v).
\]
By putting this into (\ref{e1}), we then obtain
\begin{equation}\begin{aligned}\label{eqn:dotAbd}
{\int_{-\infty}^{\infty}}\left|\dot{\mathcal{A}}_{\varpimega(s)}(w)\right|ds
&\leq{\int_{-\infty}^{\infty}} \dot{\beta}(s) C\left( \int_{S^1}|{p_{\alpha}}rtialartial_{t}v|_{t,s} dt +1\right) ds \\
&\leq{\int_{-\infty}^{\infty}} \dot{\beta}(s) C\left( \int_{S^1}|J_{t,s}(v){p_{\alpha}}rtial_sv+\eta X_H^{\varpimega_s}(v)|_{t,s} dt +1\right) ds \\
&\leq{\int_{-\infty}^{\infty}} \underbrace{\dot{\beta}(s)}_{\leq 2} C\left( \int_{S^1}\left(|{p_{\alpha}}rtial_sv|_{t,s}+|\eta|\; |X_H^{\varpimega_s}(v)|_{t,s}\right) dt+1 \right) ds \\
&\leq2C\int_0^1 \left( \int_{S^1}\left(|{p_{\alpha}}rtial_sv|_{t,s}^2+1+|\eta|\; |X_H^{\varpimega_s}(v)|_{t,s}\right) dt+1 \right) ds \\
&\leq2CE(v)+4C+2C\|\eta\|_{\infty}c'' \\
&\leq2CE(w)+4C+2C\|\eta\|_{\infty}c'', \end{aligned}\end{equation} where
$c''=\max_{s\in[0,1]\atop v\in T^*N}|X_H^{\varpimega_s}(v)|_{t,s}$. Note
that the maximum is attained, since by the assumption $dH$ has
compact support. Now by substituting the above equation into
(\ref{e0}), we get
\begin{equation}\begin{aligned}\label{eqn:e_bd}
E(w)&=\mathcal{A}_{\varpimega(1)}(w_+)-\mathcal{A}_{\varpimega(0)}(w_-)-{\int_{-\infty}^{\infty}}\dot{\mathcal{A}}_{\varpimega(s)}(w)ds \\
&\leq\mathcal{A}_{\varpimega(1)}(w_+)-\mathcal{A}_{\varpimega(0)}(w_-)+2CE(w)+4C+2C\|\eta\|_{\infty}c''
\end{aligned}\end{equation}
By choosing $\sigma\in\mathcal{O}mega^2(M)$ with sufficiently small norm,
we may assume that the isoperimetric constant $C$ is less than $\frac{1}{4}$.
For simplicity, set $\mathbb{D}elta=\mathcal{A}_{\varpimega(1)}(w_+)-\mathcal{A}_{\varpimega(0)}(w_-)$, then we get
\begin{equation}\begin{aligned}\label{eqn:e_bd2}
E(w)&\leq2\mathcal{A}_{\varpimega(1)}(w_+)-2\mathcal{A}_{\varpimega(0)}(w_-)+8C+4C\|\eta\|_{\infty}c''\\
&=2\mathbb{D}elta+8C+4C\|\eta\|_{\infty}c''.
\end{aligned}\end{equation}
This finishes Step1.
{\bf Step2} : Let $\epsilon$ be as in Proposition \ref{prop:lagbd} and Remark \ref{rmk:constconti}.
For $l\in\mathbb{R}$ let $\tau(l){\mathfrak g}eq0$ be defined by
\[
\tau(l):=\inf\{\tau{\mathfrak g}eq0:\|\nabla\mathcal{A}_{\varpimega(s)}((v,\eta)(l+\tau))\|_s<\epsilon\}.
\]
In this step we {\em bound $\tau(l)$ in terms of $\|\eta\|_{\infty}$ for all $l\in\mathbb{R}$}. Namely
\begin{equation}\begin{aligned}n
E(w)&={\int_{-\infty}^{\infty}}\|{p_{\alpha}}rtial_sw\|_s^2ds\\
&={\int_{-\infty}^{\infty}}\|\nabla\mathcal{A}_{\varpimega(s)}\|_s^2ds \\
&{\mathfrak g}eq\int_{l}^{l+\tau(l)}\underbrace{\|\nabla\mathcal{A}_{\varpimega(s)}\|_s^2}_{{\mathfrak g}eq \epsilon^2}ds \\
&{\mathfrak g}eq\epsilon^2\tau(l)
\end{aligned}\end{equation}
Step1 and the above estimate finish Step2.
{\bf Step3} : {\em We prove the proposition.}\\
First set
\begin{equation}\begin{aligned}n
\|H\|_{\infty}=\max_{x\in T^*N}|H(x)|, \quad
K=\max\{-\mathcal{A}_{\varpimega(0)}(w_-),\mathcal{A}_{\varpimega(1)}(w_+)\}.
\end{aligned}\end{equation}
By definition of $\tau(l)$, we obtain $\|\nabla\mathcal{A}_{\varpimega(s)}((v,\eta)(l+\tau(l)))\|_s<\epsilon$.
Now we are able to use Proposition \ref{prop:lagbd} and
get the following estimate by using (\ref{eqn:lagbd}), (\ref{eqn:dotAbd}) and (\ref{eqn:e_bd})
\begin{equation}\begin{aligned}\label{eqn:eta_bd1}
|\eta(l+\tau(l))|
&\leq \varpiverline{c}(|\mathcal{A}_{\varpimega(s)}(w(l+\tau(l)))|+1) \\
&\leq \varpiverline{c}\left(K+{\int_{-\infty}^{\infty}}\left|\dot{\mathcal{A}}_{\varpimega(s)}\right|ds+1\right) \\
&\leq \varpiverline{c}\left(K+2CE(w)+4C+2C\|\eta\|_{\infty}c''+1\right) \\
&\leq \varpiverline{c}\left(K+4C\mathbb{D}elta+16C^2+8C^2\|\eta\|_\infty c''+4C+2C\|\eta\|_{\infty}c''+1\right).
\end{aligned}\end{equation}
By Step2 and (\ref{eqn:e_bd}), we obtain the following inequalities
\begin{equation}\begin{aligned}\label{eqn:eta_bd2}
\left|\int_{l}^{l+\tau(l)}\dot{\eta}(s)ds\right|
&\leq \left|\int_{l}^{l+\tau(l)}\int_0^1H(v(t))dt\ ds\right|\\
&\leq \|H\|_{\infty}\tau(l) \\
&\leq \|H\|_{\infty}\frac{E(w)}{\epsilon^2} \\
&\leq \frac{\|H\|_{\infty}}{\epsilon^2}(2\mathbb{D}elta+8C+4C\|\eta\|_{\infty}c'').
\end{aligned}\end{equation}
Combining the above two estimates (\ref{eqn:eta_bd1}), (\ref{eqn:eta_bd2}),
we conclude
\begin{equation}\begin{aligned}n
|\eta(l)|
&\leq |\eta(l+\tau(l))|+\left|\int_{l}^{l+\tau(l)}\dot{\eta}(s)ds\right|\\
&\leq \varpiverline{c}\left(K+4C\mathbb{D}elta+16C^2+8C^2\|\eta\|_\infty c''+4C+2C\|\eta\|_{\infty}c''+1\right)\\
&\ \ \ +\frac{\|H\|_{\infty}}{\epsilon^2}(2\mathbb{D}elta+8C+4C\|\eta\|_{\infty}c'') \\
&=\underbrace{\left(8\varpiverline{c}c''C+2\varpiverline cc''+\frac{4c''\|H\|_{\infty}}{\epsilon^2}\right)C}_{=:K'}\|\eta\|_{\infty} \\
&\ \ \ +\underbrace{\varpiverline{c}K+4\varpiverline{c}C\mathbb{D}elta+16\varpiverline{c}C^2+4\varpiverline cC+\varpiverline{c}+\frac{2\|H\|_{\infty}\mathbb{D}elta}{\epsilon^2}+\frac{8C\|H\|_{\infty}}{\epsilon^2}}_{=:K''}.
\end{aligned}\end{equation}
Since the above estimate holds for all $l\in\mathbb{R}$
\[
\|\eta\|_{\infty}\leq K'\|\eta\|_{\infty}+K''.
\]
We can achieve that the {\em isoperimetric constant} $C$ satisfies
\begin{equation}\label{eqn:Ccond}
C\leq\frac{1}{4} \text{ \ \ and \ \ } K'\leq\frac{1}{2}
\end{equation}
by choosing $\sigma\in\mathcal{O}mega^2(M)$ with small norm.
This proves the proposition.
\end{proof}
\begin{Lemma}\label{lem:awin}
Assume that the isoperimetric constant $C$ is sufficiently small,
then the following holds true.
Suppose that $w=(v,\eta)\in C^\infty(\mathbb{R}\times S^1,T^*N)\times C^\infty(\mathbb{R},\mathbb{R})$ is a gradient flow line of the
time dependent gradient $\nabla\mathcal{A}_{\varpimega(s)}$ which converges asymptotically
$\lim_{s\to{p_{\alpha}}rtialm}w(s)=w_{p_{\alpha}}rtialm$ to critical points of $\mathcal{A}_{\varpimega(1)},\mathcal{A}_{\varpimega(0)}$ respectively
such that $a=\mathcal{A}_{\varpimega(0)}(w_-)$ and $b=\mathcal{A}_{\varpimega(1)}(w_+)$.
Then the following assertions meet\\
\begin{enumerate}
\item If $a{\mathfrak g}eq\frac{1}{9}$, then $b{\mathfrak g}eq\frac{a}{2}$; \\
\item If $b\leq-\frac{1}{9}$, then $a\leq\frac{b}{2}$.
\end{enumerate}
\end{Lemma}
\begin{proof}
By the previous proposition, we obtained the following uniform bound of $\eta$
\begin{equation}\begin{aligned}n
\|\eta\|_\infty&\leq2K''\\
&=2\varpiverline{c}K+8\varpiverline{c}C\mathbb{D}elta+32\varpiverline{c}C^2+8\varpiverline cC+2\varpiverline{c}+\frac{4\|H\|_{\infty}\mathbb{D}elta}{\epsilon^2}+\frac{16C\|H\|_{\infty}}{\epsilon^2}.
\end{aligned}\end{equation}
Moreover, since $E(w){\mathfrak g}eq0$ we obtain from (\ref{eqn:e_bd2}) the inequality
\begin{equation}n
b{\mathfrak g}eq a-4C-2C\|\eta\|_\infty c''.
\end{equation}
By taking a small isoperimetric constant $C$ satisfying
\begin{equation}\begin{aligned}\label{eqn:Ccond2}
C\varpiverline c c''\leq\frac{1}{32}; \\
C\left(2\varpiverline cC+\frac{\|H\|_\infty}{\epsilon^2}\right)c''\leq\frac{1}{128}; \\
C\left(1+16\varpiverline c c''C^2+4\varpiverline cc''C+\varpiverline c c''+\frac{8c''C\|H\|_\infty}{\epsilon^2}\right)\leq\frac{1}{144};
\end{aligned}\end{equation}
we now get
\begin{equation}\begin{aligned}\label{eqn:qaes}
b
&{\mathfrak g}eq a-4C-2C\|\eta\|_\infty c'' \\
&{\mathfrak g}eq a-4C-2C\left(2\varpiverline{c}K+8\varpiverline{c}C\mathbb{D}elta+32\varpiverline{c}C^2+8\varpiverline cC+2\varpiverline{c}+\frac{4\|H\|_{\infty}\mathbb{D}elta}{\epsilon^2}+\frac{16C\|H\|_{\infty}}{\epsilon^2}\right) c'' \\
&= a-4C\varpiverline c c''K-8C\left(2\varpiverline cC+\frac{\|H\|_\infty}{\epsilon^2}\right)c''\mathbb{D}elta \\
&\ \; \; \ \ \ -4C\left(1+16\varpiverline c c''C^2+4\varpiverline cc''C+\varpiverline c c''+\frac{8c''C\|H\|_\infty}{\epsilon^2}\right) \\
&{\mathfrak g}eq a-\frac{1}{8}K-\frac{1}{16}(b-a)-\frac{1}{36},
\end{aligned}\end{equation}
where $K=\max\{-a,b\}$.
To prove the assertion (1), we first consider the case
\[
|b|\leq a,\qquad a{\mathfrak g}eq\frac{1}{9}.
\]
In this case, we estimate
\[
b{\mathfrak g}eq a-\frac{1}{8}a-\frac{1}{8}a-\frac{1}{36}=\frac{3}{4}a-\frac{1}{36}{\mathfrak g}eq\frac{a}{2}.
\]
Hence to prove the assertion (1), it suffices to exclude the case
\[
-b{\mathfrak g}eq a{\mathfrak g}eq\frac{1}{9}.
\]
But in this case, (\ref{eqn:qaes}) leads to a contradiction in the following way
\[
b{\mathfrak g}eq\frac{1}{9}+\frac{1}{72}-\frac{1}{16}(b-a)-\frac{1}{36}{\mathfrak g}eq-\frac{1}{16}(b-a)>0.
\]
This proves the first assertion.
To prove the assertion (2), we set
\[
b'=-a,\qquad a'=-b.
\]
We note that if (\ref{eqn:qaes}) holds for $a$ and $b$, it also holds for $b'$ and $a'$.
Hence we get from the assertion (1) the implication
\[
-b{\mathfrak g}eq\frac{1}{9} \mathscr{L}ongrightarrow -a{\mathfrak g}eq-\frac{b}{2}
\]
which is equivalent to the assertion (2). This finishes the proof of the Lemma.
\end{proof}
\begin{proof}[{\bf Proof of Theorem \ref{thm:rfhcon}}]
We now construct the continuation homomorphism
\[
\mathcal{P}si_{\varpimega_0}^{\varpimega_{\sigma}}:\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega_0=dp\mathfrak{w}edge dq)\to\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega_{\sigma}=dp\mathfrak{w}edge dq+\tau^*\sigma)
\]
for $\varpimega_0,\varpimega_{\sigma}\in\mathcal{O}mega^{\mathcal{M}p}_{\rm reg}(\mathscr{S}igma_k)$.
Similar as in Theorem \ref{thm:continuation},
we first subdivide
\[
\varpimega_s=\varpimega_0+s(\varpimega_\sigma-\varpimega_0)
\]
into small pieces. We first assume that we can find a subdivision
$\{\varpimega^i\}_{i=0}^{N}$ of $\varpimega_s$ satisfying
\begin{itemize}
\item $\varpimega^i=\varpimega_0+d(i)\tau^*\sigma$, where $0=d(0)<d(1)<\cdots <d(N)=1$;
\item $\mathcal{A}_{H,\varpimega^i}:\mathscr{L}\times\mathbb{R}\to\mathbb{R}$ is Morse-Bott, $\forall i=0,1,\dots,N$;
\item $C(M,g,(d(i+1)-d(i))\sigma,v_\nu)$ satisfies (\ref{eqn:Ccond}),\ (\ref{eqn:Ccond2}), $\forall i=0,1,\dots,N-1$.
\end{itemize}
Let $\varpimega_s^i=\varpimega^i+\beta(s)(\varpimega^{i+1}-\varpimega^i)$ be a homotopy between $\varpimega^i$ and $\varpimega^{i+1}$.
First we construct the following continuation map
\[
\mathfrak{w}idetilde\mathcal{P}si_{\varpimega^i}^{\varpimega^{i+1}}:\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega^i)\to\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega^{i+1}).
\]
Since the action functional $\mathcal{A}_{H,\varpimega^i}$ is Morse-Bott, the
construction is given by counting gradient flow lines with cascades
as in the Morse-Bott homology. Let us choose Morse functions $h^i$
on $\mathbb{C}rit(\mathcal{A}_{H,\varpimega^i})$. We then define a map
\[
\mathcal{P}si_{\varpimega^i\ \ *}^{\varpimega^{i+1}}:\mathbb{R}FCb_*(\mathscr{S}igma_k,\varpimega^i)\to\mathbb{R}FCb_*(\mathscr{S}igma_k,\varpimega^{i+1})
\]
given by
\begin{equation}n
\mathcal{P}si_{\varpimega^i}^{\varpimega^{i+1}}(w_-)=\sum_{\mu(w_+)=\mu(w_-)}\#_2\mathcal{M}_{\varpimega^i}^{\varpimega^{i+1}}(w_-,w_+)w_+,
\end{equation}
where $w_-\in\mathbb{C}rit(h^i)$, $w_+\in\mathbb{C}rit(h^{i+1})$ and
$\#_2$ denotes the $\mathbb{Z}_2$-counting.
Here,
\begin{equation}\begin{aligned}n
\mathfrak{w}idehat\mathcal{M}_{\varpimega^i,m}^{\varpimega^{i+1}}(w_-,w_+)
&=\{w\ |\ w \text{ is a flow line with }m\text{-cascades from }w_-\text{ to }w_+\};\\
\mathcal{M}_{\varpimega^i,m}^{\varpimega^{i+1}}(w_-,w_+)&=\mathfrak{w}idehat\mathcal{M}_{\varpimega^i,m}^{\varpimega^{i+1}}(w_-,w_+)/\mathbb{R}^m;\\
\mathcal{M}_{\varpimega^i}^{\varpimega^{i+1}}(w_-,w_+)&=\bigcup_{m\in\mathbb{N}_0}\mathcal{M}_{\varpimega^i,m}^{\varpimega^{i+1}}(w_-,w_+).
\end{aligned}\end{equation}
The main issue of this construction is also the uniform bound of
$E(w)$. As in the Morse-Bott homology situation, it suffices to
check that each gradient flow line has a uniform energy bound. For
this reason, we now only consider the following uniform energy
bound. Let
\[
w'=(v',\eta')\in C^{\infty}(\mathbb{R}\times S^1,T^*N)\times C^\infty(\mathbb{R},\mathbb{R})
\]
be a gradient flow line of
\[
\mathcal{A}_{\varpimega_s^i}(v,\eta)=\int_Z\varpiverline{v}^*\varpimega_s^i-\eta\int_0^1H(x(t))dt
\]
i.e. a solution of
\begin{equation}n\label{eqn:c1} \left.
\begin{array}{cc}
{p_{\alpha}}rtial_sv+J_{t,s}(v)\left({p_{\alpha}}rtial_tv-\eta X_H^{\varpimega_s^i}(v) \right)=0 \\
{p_{\alpha}}rtial_s\eta+\int_0^1H(x(t))dt=0
\end{array}
\right\} \end{equation}
\begin{equation}n\label{eqn:c2} \lim_{s \to
-\infty}w'(s)=w'_-\in\mathbb{C}rit\mathcal{A}_{\varpimega^i}, \qquad \lim_{s \to
\infty}w'(s)=w'_+\in\mathbb{C}rit \mathcal{A}_{\varpimega^{i+1}}. \end{equation} To achieve a uniform
energy bound of $w'$, let us recall the equation (\ref{eqn:e_bd}) in
Proposition \ref{prop:lagbd2}
\begin{equation}n\label{eqn:ees}
E(w')\leq\mathcal{A}_{\varpimega^{i+1}}(w'_+)-\mathcal{A}_{\varpimega^i}(w'_-)+2CE(w')+4C+2C\|\eta'\|_{\infty}c''.
\end{equation}
Since the isoperimetric constant $C$ satisfies the condition (\ref{eqn:Ccond}),
we get the following uniform bound of the
Lagrangian multiplier $\eta'$ \begin{equation}n\label{eqn:etabd}
\|\eta'\|_{\infty}\leq 2K'' \end{equation} and \begin{equation}\begin{aligned}n
E(w')&\leq2\mathbb{D}elta+8C+4C\|\eta'\|_{\infty}c'' \\
&\leq2\mathbb{D}elta+8C+8Cc''K'', \end{aligned}\end{equation} where the coefficients are the same
as in Proposition \ref{prop:lagbd2}. Hence we conclude $E(w')$ is
uniformly bounded.
Now, by virtue of Lemma \ref{lem:awin}, we obtain for $a\leq-\frac{1}{9}$ and $b{\mathfrak g}eq\frac{1}{9}$ maps
\[
\mathcal{P}si_{\varpimega_i}^{\varpimega_{i+1}(a,b)}:\mathbb{R}FCb^{(\frac{a}{2},b)}(\mathscr{S}igma_k,\varpimega_i)\to\mathbb{R}FCb^{(a,\frac{b}{2})}(\mathscr{S}igma_k,\varpimega_{i+1})
\]
defined by counting gradient flow lines of the time dependent
Rabinowitz action functional. Since the continuation map
$\mathcal{P}si_{\varpimega_i}^{\varpimega_{i+1}(a,b)}$ commutes with the boundary
operators, this induces the following homomorphism on homology
level.
\[
\mathfrak{w}idetilde\mathcal{P}si_{\varpimega_i}^{\varpimega_{i+1}(a,b)}:\mathbb{R}FHb^{(\frac{a}{2},b)}(\mathscr{S}igma_k,\varpimega_i)\to\mathbb{R}FHb^{(a,\frac{b}{2})}(\mathscr{S}igma_k,\varpimega_{i+1})
\]
By taking the inverse and direct limit as follows
\[
\mathbb{R}FHb_*(\mathscr{S}igma_k,\varpimega_i)=\lim_{b\to\infty}\lim_{a\to-\infty}\mathbb{R}FHb_*^{(a,b)}(\mathscr{S}igma_k,\varpimega_i),
\]
we obtain
\[
\mathfrak{w}idetilde\mathcal{P}si_{\varpimega_i}^{\varpimega_{i+1}}:\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega_i)\to\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega_{i+1}).
\]
Similar in usual Floer homology, we can define the continuation
homomorphism by juxtaposition
\[
\mathfrak{w}idetilde\mathcal{P}si_{\varpimega_0}^{\varpimega_\sigma}:\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega_0)\to\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega_\sigma)
\]
\[
\mathfrak{w}idetilde\mathcal{P}si_{\varpimega_0}^{\varpimega_\sigma}=\mathfrak{w}idetilde\mathcal{P}si_{\varpimega^{N-1}}^{\varpimega^N}\circ\cdots\circ\mathfrak{w}idetilde\mathcal{P}si_{\varpimega^1}^{\varpimega^2}\circ\mathfrak{w}idetilde\mathcal{P}si_{\varpimega^0}^{\varpimega^1}.
\]
In a similar way, we can construct \begin{equation}n
\mathfrak{w}idetilde\mathcal{P}si_{\varpimega_\sigma}^{\varpimega_0}:\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega_\sigma)\to\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega_0),
\end{equation} by following the homotopy backwards. By a
homotopy-of-homotopies argument, we conclude
$\mathfrak{w}idetilde\mathcal{P}si_{\varpimega_\sigma}^{\varpimega_0}\circ\mathfrak{w}idetilde\mathcal{P}si_{\varpimega_0}^{\varpimega_\sigma}=\mathrm{id}_{\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega_0)}$
and
$\mathfrak{w}idetilde\mathcal{P}si^{\varpimega_\sigma}_{\varpimega_0}\circ\mathfrak{w}idetilde\mathcal{P}si^{\varpimega_0}_{\varpimega_\sigma}=\mathrm{id}_{\mathbb{R}FHb(\mathscr{S}igma_k,\varpimega_\sigma)}$.
Therefore $\mathfrak{w}idetilde\mathcal{P}si_{\varpimega_0}^{\varpimega_1}$ is an isomorphism with
inverse $\mathfrak{w}idetilde\mathcal{P}si^{\varpimega_0}_{\varpimega_1}$.
It remains to discuss the case where the corresponding subdivision
$\{\varpimegaega^i\}_{i=0}^N$ of $\varpimegaega_s$ does not exist. The issue is
the assertion about Morse-Bott which in higher dimensions is not
known to hold for generic choice of the magnetic field. In this case
we can perturb Rabinowitz action functional by an additional
non-physical perturbation already used in \cite{CFP09}. Namely
choose a compactly supported time-dependent Hamiltonian $F \in
C^\infty(T^*N \times S^1)$ and consider the perturbed Rabinowitz
action functional $\mathcal{A}_\varpimegaega^F \in
\mathscr{L}\times\mathbb{R} \to \mathbb{R}$ defined by
$$\mathcal{A}_\varpimegaega^F(v,\eta)=\mathcal{A}_\varpimegaega(v,\eta)+\int_0^1F(v(t),t)dt.$$
For generic perturbation $F$ the perturbed Rabinowitz action
functional is Morse, see \cite{CFP09}. Moreover, the difference
$\mathcal{A}_\varpimegaega-\mathcal{A}_\varpimegaega^F$ is uniformly bounded by
the Hofer norm of the perturbation $F$. By choosing a small enough
perturbation all our previous estimates hold up to an arbitrarily
small error term. This procedure allows us to construct a
continuation homomorphism between the two Rabinowitz Floer
homologies in the unlikely case where we cannot directly interpolate
between the two symplectic forms.
\end{proof}
\appendix
\section{Cofilling function in Example \ref{ex:sol}}\label{app:sol}
In this appendix, we will give more detailed explanation about Example \ref{ex:sol}.
We follow the idea of geometric group theory listed in \cite{ECH}.
\begin{proof}[Proof of Example \ref{ex:sol}]
Let us recall the 3-manifold $M$ in Example \ref{ex:sol}, fibered over $S^1$ with fiber ${\mathbb{T}}^2$ with hyperbolic monodromy
\[
A=\begin{pmatrix}
2&1\\
1&1
\end{pmatrix}.
\]
Let $y,z$ be the coordinates of the fiber torus, then $\sigma =dy \mathfrak{w}edge dz$ is a well-defined 2-form on $M$.
In order to show the exponential growth of the cofilling function $u_\sigma$,
we now consider the group $G$ generated by the following action on $\mathbb{R}^3$:
\begin{equation}\begin{aligned}n
\alpha :&\ (x,y,z)\mapsto \big(x+1,A(y,z)\big); \\
\beta :&\ (x,y,z)\mapsto (x,y+1,z); \\
{\mathfrak g}amma :&\ (x,y,z)\mapsto (x,y,z+1).
\end{aligned}\end{equation}
Note that the quotient space $G\setminus\mathbb{R}^3$ is the manifold $M$
with the universal covering map $p:\mathbb{R}^3\to M$.
Since there exists a Riemannian metric $g$ on $M$,
we get the pullback metric $\mathfrak{w}idetilde g=p^*g$ on $\mathbb{R}^3=\mathfrak{w}idetilde M$
which is invariant under the action of the group $G$.
Especially we choose a metric $g$ satisfying the condition that $\mathfrak{w}idetilde g$
has length 1 for the following edges in $\mathfrak{w}idetilde M$,
\begin{equation}\begin{aligned}n
(x,y,z)&\sim\big(x+1,A(y,z)\big); \\
(x,y,z)&\sim(x,y+1,z); \\
(x,y,z)&\sim(x,y,z+1);
\end{aligned}\end{equation}
where $p\sim q$ means the straight line connecting $p$ and $q$ in Euclidean metric.
Consider the word
\[
w_n=(\alpha^n{\mathfrak g}amma^{-1}\alpha^{-n})(\alpha^{-n}\beta^{-1}\alpha^n)(\alpha^n{\mathfrak g}amma\alpha^{-n})(\alpha^{-n}\beta\alpha^n)
\]
which represents the identity.
Then $w_n$ is regarded as a contractible path in the universal cover $(\mathbb{R}^3,\mathfrak{w}idetilde g)$,
travels around the following points with straight lines:
\[
(0,0,0),\ (n,0,0),\ (n,e_1),\ (0,A^{-n} e_1),\ (-n,A^{-2n} e_1),
\]
\[
(-n,A^{-2n} e_1+e_2),\ (0,A^{-n} e_1+A^n e_2),\ (n,e_1+A^{2n} e_2),
\]
\[
(n,A^{2n} e_2),\ (0,A^{n} e_2),\ (-n,e_2),\ (-n,0,0),\ (0,0,0).
\]
Note that the length of $w_n$ grows linearly as $n\to\infty$ with respect to the metric $\mathfrak{w}idetilde g$.
We claim that any bounding disk $D_n$ with ${p_{\alpha}}rtialartial D_n=w_n$
has area at least $k\lambda^{2n}$,
where $k$ is a positive constant and $\lambda$ is the eigenvalue of $A$ bigger than 1.
To show this, consider the following projection
\begin{equation}\begin{aligned}n
{p_{\alpha}}rtiali:\mathbb{R}^3\to\mathbb{R}^2,\qquad
(x,y,z)\mapsto(y,z).
\end{aligned}\end{equation}
Let $\sigma'$ be the area form $dy\mathfrak{w}edge dz\in\mathcal{O}mega^2(\mathbb{R}^2)$,
then we get $p^*\sigma={p_{\alpha}}rtiali^*\sigma'$.
Note that this form is preserved by the action of $G$ and we obtain
\[
\int_{p(D_n)}\sigma=\int_{D_n}p^*\sigma=\int_{D_n}{p_{\alpha}}rtiali^*\sigma'=\int_{{p_{\alpha}}rtiali(D_n)}\sigma'.
\]
The projection ${p_{\alpha}}rtiali(D_n)$ contains the parallelogram with vertices
$0, A^{-n} e_1, A^{-n} e_1+A^n e_2$ and $A^n e_2$ in the $yz$-plane,
and the area of this one is approximately $k\lambda^{2n}$ for large $n$.
Now suppose that there exists a primitive $\theta\in\mathcal{O}mega^1(\mathfrak{w}idetilde M)$ of $p^*\sigma$
with subexponential growth $f(n)=\sup_{z\in B_0(n)}|\theta_z|_{\mathfrak{w}idetilde p}$.
Then we deduce the following contradiction,
\[
\epsilon\lambda^{2n}\leq\int_{p(D_n)}\sigma=\int_{D_n}p^*\sigma=\int_{{p_{\alpha}}rtialartial D_n}\theta
\leq f(4n+2)\int_{w_n}1=f(4n+2)\cdot (8n+4)
\]
as $n\to\infty$ for small enough $\epsilon>0$.
If we take $ydz\in\mathcal{O}mega^1(\mathfrak{w}idetilde M)$ as a primitive of $p^*\sigma$
then by direct calculation $\int_{p_n}ydz\leq K\lambda^{2n}$ as $n\to\infty$,
where $p_n:[0,n]\to\mathfrak{w}idetilde M$ is a length $n$ path with $p(0)=0$ and $K$ large enough.
This implies that $\sup_{q\in B_0(n)}|ydz_q|_{\mathfrak{w}idetilde p}$ has at most exponential growth and
we conclude that $u_{\sigma}(s)\sim \exp(s)$.
\end{proof}
\end{document} |
\begin{document}
\title{Estimates for the maximal Cauchy Integral on chord-arc curves}
\author{Carmelo Puliatti}
\address{BGSMath and Departament de Matem\`atiques, Universitat Aut\`onoma
de Barcelona, 08193, Bellaterra, Barcelona, Catalonia}
\email{puliatti@mat.uab.cat}
\begin{abstract}
We study the chord-arc Jordan curves that satisfy the
Cotlar-type inequality $T_*(f)\lesssim M^2(Tf),$ where $T$ is the
Cauchy transform, $T_*$ is the maximal Cauchy transform and $M$ is
the Hardy-Littlewood maximal function. Under the background
assumption of asymptotic quasi-conformality we find a
characterization of such curves in terms of the smoothness of a parametrization of the curve.
\end{abstract}
\maketitle
\section{Introduction}
\blfootnote{\textit{2010 Mathematics Subject Classification.} Primary 42B20, 30C62, 28A80.
\\
\textit{Key words:} Cauchy integral, Cotlar's inequality, asymptotically conformal curve, chord-arc curve.}
Consider a homogeneous smooth Calder\'{o}n-Zygmund operator in $\mathbb{R}^n$
\begin{equation}\label{CZ}
Tf(x)= \operatorname{p.v.} \int f(x-y)\,K(y)\,dy \equiv \lim_{\epsilon\rightarrow
0} T_\epsilon f(x), \quad x \in \mathbb{R}^n,
\end{equation}
where $T_\epsilon$ is the truncation at level $\epsilon$ defined by
\begin{equation}
T_\epsilon f(x)= \int_{| y| > \epsilon} f(x-y) K(y) \,dy, \quad x \in \mathbb{R}^n,
\end{equation}
and $f$ is in $L^p(\mathbb{R}^n), \; 1 \le p < \infty.$ Here the kernel $K$ is of class $C^{\infty}$ off the origin,
homogeneous of order $-n$ and with zero integral on the unit sphere $$\{x \in \mathbb{R}^n : |x|=1\}.$$
Let $T_{*}$ be the maximal singular integral
\begin{equation}
T_{*}f(x)= \sup_{\epsilon > 0} | T_\epsilon f(x)|, \quad x \in \mathbb{R}^n,
\end{equation}
A classical fact relating $T_{*}$ and the standard Hardy-Littlewood maximal operator $M$ is Cotlar's inequality, which reads
\begin{equation}\label{Cotlar}
T_{*}(f)(x)\leq C\,\big(M(Tf)(x) + M(f)(x)\big), \quad x \in
\mathbb{R}^n.
\end{equation}
Combining this with the $L^p$ estimates $\|T(f)\|_p \le C \,\|f\|_p $ and $\|M(f)\|_p \le C \,\|f\|_p $, \; $1<p< \infty$ one gets $\|T_{*}(f)\|_p \le C \,\|f\|_p, \; 1<p< \infty$.
It was discovered in \cite{MOV} that if $T$ is an even higher order Riesz transform, that is, if
$K(x) = P(x)/|x|^{n+d}$, with $P$ an even homogeneous polynomial of degree $d$, then one can get rid of the
second term in the right hand side of \eqref{Cotlar}, namely,
\begin{equation}\label{SuperCotlar}
T_{*}(f)(x)\leq C\, M(Tf)(x), \quad x \in \mathbb{R }^n.
\end{equation}
Hence $\|T_{*}(f)\|_p \le C \,\|T(f)\|_p, \; 1<p< \infty$, in this case. However, if $T$ is an odd higher order Riesz transform, then
\eqref{SuperCotlar} may fail and the right substitute turns out to be (see \cite{MOPV})
\begin{equation}\label{CotlarOdd}
T_{*}(f)(x)\leq C\, M^2(Tf)(x), \quad x \in \mathbb{R }^n,
\end{equation}
where $M^2$ stands for the iteration of $M$.
Inequalities of the type \eqref{SuperCotlar} and \eqref{CotlarOdd}
were first considered in relation to the David-Semmes problem (see
\cite{MOV},\cite{MOPV} and \cite{V}) and later on were studied in
the context of the Cauchy singular integral on Lipschitz graphs and
$C^1$ curves by Girela-Sarr\'{\i}\'{o}n in \cite{G}. Let $\Gamma$ be either
a Lipschitz graph or a closed chord-arc curve in the plane, let $T$
be the Cauchy Singular Integral and $M$ the Hardy-Littlewood maximal
operator, both with respect to the arc-length measure, and let
$T_{*}$ be the maximal Cauchy Integral. Precise definitions will be
given below. Girela-Sarri\'on showed in \cite{G} that the presence
at a point $z$ of the curve of a non-zero angle prevents
\eqref{CotlarOdd}, with $x$ replaced by $z$, to hold. This agrees
with the intuition that \eqref{CotlarOdd} should help in finding
tangent lines, but suggests that it is a condition definetely
stronger than the mere existence of tangents. It was also shown in
\cite{G} that if $\Gamma$ is a closed $C^1$ curve with the property
that the modulus of continuity $\omega(z, \delta) $ of the unit
tangent vector satisfies
\begin{equation}\label{logomega}
\omega(z,\delta) \le C\, \frac{1}{\log(\frac{1}{\delta})}, \quad z \in \Gamma, \quad \delta < 1/2,
\end{equation}
then \eqref{CotlarOdd} holds with $x \in \mathbb{R}^n$ replaced by $z \in \Gamma$.
In this paper we study the validity of inequality \eqref{CotlarOdd} in the context of chord-arc curves. A chord-arc curve is a
rectifiable Jordan curve $\Gamma$ in the plane with the property that there exists a positive constant $C$ such that, given any two
points $z_1, z_2 \in \Gamma$ one has
\begin{equation}\label{chordarc}
\l(z_1,z_2) \leq C \,|z_1-z_2 |,
\end{equation}
where $l(z_1,z_2)$ is the length of the shortest arc in $\Gamma$ joining $z_1$ and $z_2$. Equivalently $\Gamma$ is a
bilipschitz image of the unit circle (see \cite{Pomm_bd}, Theorem 7.9).
Then $\Gamma$ can be parametrized by a
periodic function $ \gamma : \mathbb{R} \rightarrow \Gamma $ of period $T$ satisfying the bilipschitz condition
\begin{equation}\label{bilipschitz}
\frac{1}{L}\,|x-y| \leq |\gamma(x) -\gamma(y)| \leq L \,|x-y|, \quad x, y \in \mathbb{R}, \quad |x-y| \leq \frac{T}{2},
\end{equation}
for some positive constant $L$. We say, by slightly abusing language, that $\gamma$ is a bilipschitz parametrization of $\Gamma$.
One can take, for instance, the $T$-periodic extension of the arc-length parametrization of $\Gamma$ with $T$ being the length of $\Gamma$.
One can easily define the maximal Hardy-Littlewood operator and the Cauchy Integral on a chord-arc curve. Given $z\in \Gamma$ let
$t \in \mathbb{R}$ be such that $z=\gamma(t).$ Set
\begin{equation}
\Gamma_{z,r}:=\gamma(\{\tau :|\tau-t|<r\}).
\end{equation}
One should look at $\Gamma_{z,r}$ as ``balls'' of
radius $r$ centered at $z$. Indeed, owing to the bilipschitz condition \eqref{bilipschitz}, each $\Gamma_{z,r}$ contains and is contained in a disc in
$\Gamma$ of radius comparable to $r$, for $r < T .$ It will be more convenient
to work with $\Gamma_{z,r}$ than with the euclidean discs $D(z,r)\cap \Gamma$, where $D(z,r)$
stands for the planar disc of center $z$ and radius $r$.
Denote by $\mu$ the arc-length measure on $\Gamma$. For $f\in L^1(\Gamma,\mu)$ and $z\in\Gamma,$ we define the Hardy-Littlewood maximal
function on the curve $\Gamma$ as
\begin{equation}
Mf(z):=\underset{r>0}{\sup}\,\frac{1}{\mu(\Gamma_{z,r})}\int_{\Gamma_{z,r}}|f|d\mu.
\end{equation}
The Cauchy Integral is defined as
\begin{equation}\label{Cauchy}
Tf(z)= \operatorname{p.v.} \frac{1}{\pi\, i}\int_\Gamma \frac{1}{w-z}\,f(w)\,dw \equiv \lim_{\epsilon\rightarrow
0} T_\epsilon f(z), \quad z \in \Gamma,
\end{equation}
where
\begin{equation}\label{Cauchytrunc}
T_{\epsilon}f(z)=\frac{1}{\pi i}\int_{\Gamma\backslash\Gamma_{z,\epsilon}}\frac{f(w)}{w-z}dw
\end{equation}
is the truncated Cauchy Integral at level $\epsilon$. The maximal Cauchy Integral is
\begin{equation}\label{Cauchymax}
T_*f(z):=\underset{\epsilon>0}{\sup}\big|T_{\epsilon}f(z)\big|.
\end{equation}
Our aim is to investigate under what conditions on $\Gamma$ one has the inequality
\begin{equation}
T_* f(z)\leq C\, M^2(Tf)(z), \quad z \in \Gamma, \quad f \in L^2(\Gamma, \mu),
\end{equation}
where $C$ is a positive constant. Since we know that angles prevent
the above inequality to hold, we need to require on $\Gamma$ a
condition that excludes them. One such a condition is asymptotic
quasiconformality. Given two points $z_1, z_2 \in \Gamma$ let
$A(z_1,z_2)$ be the arc in $\Gamma$ joining the two points and
having smallest diameter (there is only one if the two points are
sufficiently close). The Jordan curve $\Gamma$ is said to be
asymptotically conformal if, given a positive number $\delta$ there
exists a positive $\epsilon$, so that for any two points $z_1, z_2
\in \Gamma$ satisfying $|z_1-z_2| < \epsilon$ one has
\begin{equation}\label{ac}
|z_1 -z |+|z_2 -z| \leq (1+\delta)|z_1 -z_2|, \quad z \in A(z_1,z_2).
\end{equation}
Our main result reads as follows.
\begin{theor*}\label{teorem} Let $T$ be the Cauchy Integral on an asymptotically conformal chord-arc curve $\Gamma$ and let $\gamma$ be a bilipschitz parametrization of $\Gamma$.
Then the estimate
\begin{equation}\label{m2}
T_*(f)(z) \leq C\, M^2(Tf)(z), \quad z\in \Gamma, \quad f \in
L^2(\Gamma,\mu),
\end{equation}
holds if and only if there exists $C>0$ such that
\begin{equation}\label{snddiff}
\big| \gamma(x+\epsilon)+\gamma(x-\epsilon)-2\gamma(x)\big|\leq C \,
\frac{\epsilon}{|\log\epsilon|},
\end{equation}
for each $\ep$ satisfying $0 < \epsilon< T$ and for each $x\in\mathbb{R}.$
\end{theor*}
One should recall that condition \eqref{snddiff} implies that
$\gamma$ is differentiable almost everywhere in the ordinary sense
and the derivative is a function of vanishing mean oscillation
(see \cite{WeissZygm}). Therefore, for chord arc curves satisfying the background
assumption of asymptotical conformality, inequality \eqref{m2} is
equivalent to the precise form of differentiability described in
terms of second order differences in \eqref{snddiff}.
In Section 2 we prove a couple of Lemmas which allow to express
condition \eqref{m2} in an equivalent form in terms of a function
related to the geometry of $\Gamma.$ Section 3 is devoted to take
care of a technical question, namely, that it is enough to estimate
truncations at small enough levels. In Section 4 we prove the
Theorem by means of three lemmas, one on them making the connection
between the function carrying the geometrical information and the
second difference condition \eqref{snddiff}. In Section 5 we present
an example of a spiralling domain that enjoys the equivalent
conditions in the Theorem but whose boundary is not of class $C^1.$
Our terminology and notation are standard. We let $C$ denote a
constant independent of the relevant variables under consideration
and which may vary at each occurrence. The notation $A \lesssim B$
means that there exists a constant $C>0$ such that $A\leq C B.$ We write $A\gtrsim B$ if $B\lesssim A.$ The disc centered at $z$ of radius $r$ is denoted by
$D(z,r)$.
\section{Two preliminary Lemmas}
The beginning of the proof follows the ideas of \cite{G}, so that we
will be rather concise. Given a function $f\in L^1(\Gamma,\mu)$ we
denote by
$m_{\Gamma_{z,\epsilon}}(f)=\fint_{\Gamma_{z,\epsilon}}f(w)\,d\mu(w)$
the mean of $f$ on $\Gamma_{z,\epsilon}$ with respect to the arc length measure $\mu$. We let $K_{z,\epsilon}$
denote the Cauchy kernel truncated at the point $z$ at level
$\epsilon,$ that is,
\begin{equation}\label{Cauchytrun}
K_{z,\epsilon}(w)= \frac{1}{\pi \,i} \frac{1}{w-z}
\,\chi_{\Gamma\setminus \Gamma_{z,\epsilon}}(w),\quad w \in \Gamma.
\end{equation}
Set $g_{z,\epsilon}=T(K_{z,\epsilon})$ and let $M>1$ be a big number to
be chosen later. Following \cite[p.673]{G} we obtain the identity
\begin{equation}\label{identitat}
-T_{\epsilon}f(z)=I_{\epsilon} + II_{\epsilon} + III_{\epsilon},
\end{equation}
where
\begin{align}\label{123_1}
I_{\epsilon}&:=
\int_{\Gamma_{z,M\epsilon}}Tf(w)\left(g_{z,\epsilon}(w)-m_{\Gamma_{z,M\epsilon}}(g_{z, \epsilon})\right)dw,\\
\label{123_2} II_{\epsilon} &:=
m_{\Gamma_{z,M\epsilon}}(g_{z,\epsilon})\int_{\Gamma_{z,M\epsilon}}
Tf(w)dw \\ \label{123_3} III_{\epsilon} &:= \int_{\Gamma\setminus
\Gamma_{z,M\epsilon}}Tf(w)g_{z,\epsilon}(w)dw.
\end{align}
Following closely the argument in \cite{G} one can prove
\begin{align}\label{stima1}
|I_{\epsilon}|\leq C\, M^2(Tf)(z),\\ \label{stima2}
|II_{\epsilon}|\leq C\, M(Tf)(z).
\end{align}
Since clearly $M(g) \leq M^2(g)$ for any $g$, we are left with the
task of estimating $III_\epsilon.$ The next lemma provides an
expression for $III_\epsilon$ in terms of a function encoding the
smoothness of $\Gamma.$ To state the lemma first we need to clarify
the definition of a branch of the logarithm of $w-z$, as a
function of $w$ with $z \in \Gamma$ fixed, in an appropriate region.
Given $z\in \Gamma$ let $\Delta_z$ be a curve connecting $z$ and
$\infty$ in the unbounded component of $\mathbb{C}\setminus \Gamma.$
Such curves exist and indeed we will construct a special one in
Section 4 (under the additional assumption of asymptotic
quasiconformality). Hence $\mathbb{C}\setminus \Delta_z$ is a simply
connected domain containing $\Gamma \setminus \{z\}$ and so there
exists in $\mathbb{C}\setminus \Delta_z$ a branch of $\log(w-z).$ In
particular, if $z=\gamma(x)$ for some $x \in \mathbb{R}$, the
expressions $\log (\gamma(x+\epsilon)-\gamma(x))$ and $\log
(\gamma(x-\epsilon)-\gamma(x))$ make sense for $ 0 < \epsilon < T. $
\begin{lemm}\label{lemm_curvature}
Let $\Gamma$ be a chord-arc curve and $\gamma$ a bilipschitz
parametrization of $\Gamma$. Let $z \in \Gamma$ and let $x$ be
a real number such that $\gamma(x)=z$. Then
for almost every $w\in\Gamma\backslash\Gamma_{z,M\epsilon}$ we have
\begin{equation}
T(K_{z,\epsilon})(w)=\frac{1}{\pi^2 (z-w)}\big[ F(x,\epsilon)+G_{z,\epsilon}(w)\big],
\end{equation}
where
\begin{equation}
\label{BBB}
F(x,\epsilon)= \log(\gamma(x+\epsilon)-\gamma(x)) - \log(\gamma(x-\epsilon)-\gamma(x)) + \pi i
\end{equation}
and
\begin{equation}\label{decG}
| G_{z,\epsilon}(w)|\leq
\frac{C\,\epsilon}{|z-w|}.
\end{equation}
\end{lemm}
\begin{proof}
Take $w\in\Gamma\backslash\Gamma_{z,M\epsilon}$ . Then
\begin{align}
T(K_{z,\epsilon})(w) & = -\frac{1}{\pi^2} \underset{\delta \rightarrow 0}{\lim}\int_{\Gamma\setminus(\Gamma_{w,\delta }\cup \Gamma_{z,\epsilon})}\frac{1}{(\zeta-z)(\zeta-w)}\,d\zeta \\
& = -\frac{1}{\pi^2} \frac{1}{w-z} \,\underset{\delta \rightarrow 0}{\lim} \int_{\Gamma\setminus(\Gamma_{w,\delta }\cup \Gamma_{z,\epsilon}) }\left( \frac{1}{\zeta-w} - \frac{1}{\zeta-z} \right)\,d\zeta.
\end{align}
Let $y \in \mathbb{R} $ with $\gamma(y) = w$ . Then the latest integral in the above formula is
\begin{align}
& \log\left(\gamma(y-\delta) - \gamma(y) \right) - \log\left(\gamma(x+\epsilon) - \gamma(y) \right)
+\log\left(\gamma(x-\epsilon) - \gamma(y) \right) \\
& - \log\left(\gamma(y+\delta) - \gamma(y) \right)
-\Big( \log\left(\gamma(y-\delta) - \gamma(x) \right) \\
&- \log\left(\gamma(x+\epsilon) - \gamma(x)\right)
+\log\left(\gamma(x-\epsilon) - \gamma(x)\right)- \log\left(\gamma(y+\delta) - \gamma(x) \right) \Big).
\end{align}
Assume that $\gamma$ is differentiable at the point $y$ and the derivative $\gamma'(y)$
does not vanish. Then we have that
\begin{equation}
\lim_{\delta \rightarrow 0} \Big(\log\left(\gamma(y-\delta) - \gamma(y) \right)- \log\left(\gamma(y+\delta) - \gamma(y) \right)\Big) = \pi i,
\end{equation}
because the curve $\Delta_w$ lies in the unbounded component of $\mathbb{C} \setminus \Gamma$, and then to the right hand side of $\Gamma$,
oriented according to the parametrization $\gamma$.
Taking limit as $\delta$ goes to $0$ we obtain
\begin{align}
T(K_{z,\epsilon})(w) & = -\frac{1}{\pi^2} \frac{1}{w-z} \Big( \big(\log(\gamma(x+\epsilon) - \gamma(x)) - \log(\gamma(x-\epsilon) - \gamma(x))+ \pi i \big) \\
& + \big(\log(\gamma(x+\epsilon) - \gamma(y)) - \log(\gamma(x-\epsilon) - \gamma(y))\big)\Big).
\end{align}
Define
\begin{equation}\label{exprG}
G_{z,\epsilon}(w)= \log\left(\gamma(x+\epsilon) - \gamma(y)\right) - \log\left(\gamma(x-\epsilon) - \gamma(y)\right).
\end{equation}
\noindent It remains to show the decay inequality \eqref{decG}. According to the choice of $\Delta_w$ we have a well defined branch of
$\log(\gamma(x+t)-w), \; -\epsilon < t < \epsilon.$ Thus
\begin{equation}\label{intG}
G_{z,\epsilon}(w) = \int_{-\epsilon}^{\epsilon} \frac{d}{dt} \log(\gamma(x+t)-w) \,dt =
\int_{-\epsilon}^{\epsilon} \frac{\gamma'(x+t)}{\gamma(x+t)-w}\, dt.
\end{equation}
Since $w=\gamma(y) \in \Gamma \setminus \Gamma_{z, M \epsilon}$, we have $ y \notin (x-M \epsilon, x+M \epsilon)$ and so
\begin{equation}\label{sota}
|w-z| = |\gamma(y) -\gamma(x)| \geq \frac{|y-x|}{L} \geq \frac{M \epsilon}{L} ,
\end{equation}
which gives , taking $M \geq 2 L^2$,
\begin{align}
|w-\gamma(x+t)| & \geq |w-z| - |\gamma(x) -\gamma(x+t)| \\ & \geq \frac{ |w-z| }{2} + \frac{M \epsilon}{2 L} - L \epsilon \\ & \geq \frac{ |w-z| }{2}.
\end{align}
Hence, by \eqref{intG},
\begin{align}
|G_{z,\epsilon}(w) | & \leq \int_{-\epsilon}^{\epsilon} \frac{|\gamma'(x+t)|}{|\gamma(x+t)-w|}\, dt \leq \frac{4L \epsilon}{|w-z|}.\qedhere
\end{align}
\end{proof}
\begin{lemm}\label{Flog}
Let $\Gamma$ be a chord-arc curve and $\gamma$ a bilipschitz
parametrization of $\Gamma$. Then the inequality
\begin{equation}\label{m22}
T_*(f)(z) \leq C\, M^2(Tf)(z), \quad z\in \Gamma, \quad f \in
L^2(\Gamma,\mu),
\end{equation}
is equivalent to
\begin{equation}\label{Ffitada}
| F(x,\epsilon)| |\log(\epsilon)| \leq C\, \quad 0< \epsilon <T, \quad x \in \mathbb{R}.
\end{equation}
\end{lemm}
\begin{proof}
Assume that \eqref{Ffitada} holds. Then by Lemma \ref{lemm_curvature}
\begin{align}
III_{\epsilon} & = \int_{\Gamma \setminus \Gamma_{z,M \epsilon}} Tf(w) \, T(K_{z,\epsilon})(w) \,dw \\
& = \frac{F(x,\epsilon)}{\pi^2} \int_{\Gamma \setminus \Gamma_{z,M \epsilon}} \frac{Tf(w)}{z-w}\,dw + \frac{1}{\pi^2} \int_{\Gamma \setminus \Gamma_{z,M \epsilon}} Tf(w) \, \frac{G_{z,\epsilon}(w) }{z-w}\,dw \\
& = F(x,\epsilon) \, IV_\epsilon + V_\epsilon,
\end{align}
where the last identity is a definition of the terms $IV_\epsilon $
and $V_\epsilon$. One can break the domain of integration in the integrals in $IV_\epsilon $
and $V_\epsilon$ into a union of dyadic annuli
\begin{equation}
A_j = \gamma \big\{y \in \mathbb{R} : M \epsilon\, 2^j < | y-x| \leq M \epsilon \, 2^{j+1} \big\}, \quad j=0, 1, ...
\end{equation}
then perform standard estimates and apply \eqref{decG} to get, thanks to the quadratic decay of the integrand,
\begin{equation}\label{quatre}
| V_\epsilon | \leq C\, M\big(T(f)\big)(z).
\end{equation}
For $IV_\epsilon $ one only has a first order decay, which gives
\begin{equation}
| IV_\epsilon | \leq C\, \Big|\log\Big(\frac{ML}{\epsilon}\Big)\Big|M(Tf)(z),
\end{equation}
thus completing the proof of the sufficient condition.
Assume now \eqref{m22}. Recalling that $III_{\epsilon} =F(x,\epsilon) \, IV_\epsilon + V_\epsilon$ and \eqref{quatre},
we obtain
\begin{equation}\label{F4}
\big| F(x,\epsilon) \, IV_\epsilon \big| \leq C\, M^2\big(T(f)\big)(z), \quad z \in \Gamma, \quad f \in L^2(\Gamma, \mu).
\end{equation}
The Cauchy Singular Integral operator $T$ is an isomorphism of $L^2(\Gamma, \mu)$ onto itself. This is proved in
Lemma 1 of \cite[p. 661]{G} for Lipschitz graphs, and the same proof works in our context. Thus \eqref{F4} can be rewritten as
\begin{equation}\label{Fg}
\Big| F(x,\epsilon) \, \int_{\Gamma \setminus \Gamma_{z,M \epsilon}} \frac{g(w)}{z-w}\,dw \Big| \leq C\, M^2(g)(z), \quad z \in \Gamma, \quad g \in L^2(\Gamma,\mu).
\end{equation}
To simplify the notation take $x=0= \gamma(x).$ Assume first that $0 <
\epsilon < 1.$ Apply \eqref{Fg} with $g$ the characteristic
function of $\gamma((\epsilon^n, \epsilon))$, where $n$ is a large
integer to be chosen. Then
\begin{equation}\label{F0}
| F(0,\epsilon)| \Big| \int_{\ep^n}^{\ep} \frac{\gamma'(t)}{\gamma(t)}
\,dt \Big| \leq C
\end{equation}
and
\begin{align}
\Big| \int_{\ep^n}^{\ep} \frac{\gamma'(t)}{\gamma(t)} \,dt \Big| & = | \log(\gamma(\ep)) - \log(\gamma(\ep^n) )| \\
& \geq | \log(|\gamma(\ep)|) - \log(|\gamma(\ep^n) |)|\\
& \geq \log\Big(\frac{1}{L^2\,\ep^{n-1}}\Big)\\
& \geq -2 \log(L) + (n-2) \log\Big(\frac{1}{\ep}\Big) +
\log\Big(\frac{1}{\ep}\Big)\\
& \geq |\log(\ep)|
\end{align}
provided $n=n(\ep)$ is large enough so that $-2 \log(L) + (n-2)
\log({1}/\ep) \geq 0$. Therefore \eqref{Ffitada} follows in
this case.
If $1 \leq \ep < T$ then we take as $g$ the characteristic function
of $\gamma((\ep^{-n}, \ep)).$ In this case we get
\begin{align}
\Big| \int_{\ep^{-n}}^{\ep} \frac{\gamma'(t)}{\gamma(t)} \,dt \Big| & \geq -2 \log(L) +
n \log(\ep)+ \log(\ep)\\ &
\geq |\log(\ep)|
\end{align}
provided $n$ is chosen so that $-2 \log(L) + n \log(\ep) \geq 0$.
\end{proof}
\section{Reduction to estimating truncations at small levels}
In this section we reduce the proof of \eqref{m2} to estimating the
truncations $T_{\ep} f$ for small $\ep.$ In the previous section we
showed that the estimate of $T_{\ep} f$ can be reduced to that of
the term $III_{\ep}$ in \eqref{123_3}.
\begin{lemm}\label{smalltrunc} If $\epsilon_0$ is a given positive number, then there exists a large positive number $M=M(L)$
so that
\begin{equation}
\Big|\int_{\Gamma\setminus \Gamma_{z,M
\epsilon}}Tf(w)\,g_{z,\epsilon}(w)dw \Big| \leq C\, M(Tf)(z),\quad z
\in \Gamma, \quad \epsilon_0 <\epsilon,
\end{equation}
for a positive constant $C=C(\ep_0,L).$
\end{lemm}
The small number $\ep_0$ will be chosen in the next section.
\begin{proof} Recall that
\begin{align}
g_{z,\ep}(w) &= T(K_{z,\epsilon})(w) \\& =
-\frac{1}{\pi^2} \operatorname{p.v.} \int_{\Gamma\setminus \Gamma_{z,\epsilon}} \frac{1}{(\zeta-w)(\zeta-z)}\,d\zeta \\
& = -\frac{1}{\pi^2} \frac{1}{w-z} \, \operatorname{p.v.}
\int_{\Gamma\setminus \Gamma_{z,\epsilon}} \left( \frac{1}{\zeta-w}
- \frac{1}{\zeta-z} \right)\,d\zeta \\
& = -\frac{1}{\pi^2} \frac{1}{w-z} \,\operatorname{p.v.}
\int_{\Gamma\setminus \Gamma_{z,\epsilon}} \frac{1}{\zeta-w}\,d\zeta
+ \frac{1}{\pi^2} \frac{1}{w-z} \,\operatorname{p.v.}
\int_{\Gamma\setminus \Gamma_{z,\epsilon}} \frac{1}{\zeta-z} \,d\zeta \\
& = h(w)+k(w),
\end{align}
where in the last identity we defined $h(w)$ and $k(w)$.
Applying the bilipschitz character of $\gamma$ we conclude that
\begin{equation}\label{ka}
|k(w)| \leq \frac{1}{\pi^2} \frac{L^2}{M \ep_0^2}
\operatorname{length}(\Gamma),\quad w \in \Gamma\setminus
\Gamma_{z,M \epsilon}, \quad \ep_0 < \ep.
\end{equation}
The estimate of $h(w)$ is a little trickier. We have
\begin{equation}
h(w)= -\frac{1}{\pi^2} \frac{1}{w-z} \,\operatorname{p.v.}
\int_{\Gamma} \frac{1}{\zeta-w}\,d\zeta + \frac{1}{\pi^2}
\frac{1}{w-z} \,\operatorname{p.v.} \int_{ \Gamma_{z,\epsilon}}
\frac{1}{\zeta-w}\,d\zeta
\end{equation}
\noindent A simple application of Cauchy's Theorem gives that, if
$\Gamma$ has a tangent at $w$,
\begin{equation}
\operatorname{p.v.} \int_{\Gamma} \frac{1}{\zeta-w}\,d\zeta = \pi i.
\end{equation}
As before, the bilipschitz character of $\gamma$ yields
\begin{equation}
|w-z|\geq \frac{M\ep}{L}, \quad w \in \Gamma\setminus \Gamma_{z,M
\epsilon}
\end{equation}
and
$$
|w-\zeta| \geq |w-z|-|z-\zeta| \geq \ep \Big(\frac{M}{L}-L\Big), \quad w \in
\Gamma\setminus \Gamma_{z,M \epsilon} \quad \zeta \in
\Gamma_{z,\epsilon}
$$
Choose $M$ so that $M/L-L \geq 1.$ Then
$$
|w-\zeta| \geq \ep, \quad w \in \Gamma\setminus \Gamma_{z,M
\epsilon} \quad \zeta \in \Gamma_{z,\epsilon}.
$$
Gathering all the previous estimates we finally get
\begin{equation}\label{hac}
|h(w)| \leq \frac{1}{\pi}\frac{L}{M \ep_0}+ \frac{1}{\pi^2}
\frac{\operatorname{length(\Gamma)}}{\ep_0}, \quad w \in
\Gamma\setminus \Gamma_{z,M \epsilon}, \quad \ep_0 < \ep.
\end{equation}
Hence \eqref{ka} and \eqref{hac} yield
\begin{equation}
|g_{z,\ep}(w)| \leq C, \quad w \in \Gamma\setminus \Gamma_{z,M
\epsilon}, \quad \ep_0 < \ep,
\end{equation}
where $C=C(\ep_0, M, L, \operatorname{length}(\Gamma))$ is a
constant depending on $\ep_0, M, L$ and
$\operatorname{length}(\Gamma)$.
Therefore
$$
\Big|\int_{\Gamma\setminus \Gamma_{z,M
\epsilon}}Tf(w)\,g_{z,\epsilon}(w)dw \Big| \leq C\, \int_{\Gamma}
|Tf(w)| \,d\mu(w) \leq C \operatorname{length}(\Gamma) M(Tf)(z),
$$
which completes the proof of the lemma.
\end{proof}
\section{The proof of the Theoem}
For $z \neq 0$ let $\operatorname{Arg}(z)$ denote the principal
argument of $z$, so that $ 0\leq \operatorname{Arg}(z) < 2 \pi.$
\begin{lemm}\label{ml}
Given $\alpha > 0$ there exists a positive number $\ep_0=\ep_0 (L)$
with the following property. Assume that $0 < \ep_1 \leq \ep_0$,
$\ep_1/2 < \ep \leq \ep_1$ and that for a fixed $x \in \mathbb{R}$ we
have $\gamma(x)=0$. If $\gamma(x-\tau), \; \tau > 0,$ satisfies
\begin{equation}\label{sec}
\frac{\ep_1}{2 L} < |\gamma(x-\tau)|< L \ep_1,
\end{equation}
then, for some $\theta$ such that
$\gamma(x-\tau)=|\gamma(x-\tau)| e^{i \theta}$, we have
\begin{equation}\label{arg}
\big|\theta-\big(\operatorname{Arg}(\gamma(x+\ep))+\pi\big)\big| < \alpha.
\end{equation}
\end{lemm}
\begin{proof}
Consider the triangle with vertices $0, \gamma(x-\tau)$ and
$\gamma(x+\ep)$ and side lengths $A= |\gamma(x-\tau)|$,
$B=|\gamma(x+\ep)|$ and $C= |\gamma(x+\ep)-\gamma(x-\tau)|$. By the
cosine Theorem
$$
C^2 = A^2+B^2 -2 AB \cos(\phi),
$$
where $\phi$ is the angle opposite to the side $C.$ In other
terms
$$
1+ \cos(\phi) = \frac{(A+B-C)(A+B+C)}{2AB}.
$$
By asymptotic quasiconformality, given $\delta>0$ there exists $\eta_0
> 0$ such that \\ $C = |\gamma(x+\ep)-\gamma(x-\tau)| <\eta_0$ implies $A+B \leq (1+\delta)C.$
The bilipschitz property of $\gamma$ \eqref{bilipschitz} yields
$\ep_1/2L^2 \leq \tau \leq L^2 \ep_1.$ Hence
\begin{equation}
1+ \cos(\phi)\leq \delta L^4 \frac{(\ep_1+\tau)^2}{\ep_1 \tau} \leq
2 \delta L^6 (1+L^2)^2.
\end{equation}
Taking $\theta = \operatorname{Arg}(\gamma(x+\ep)) +\phi$ we see that $ |\theta-(\operatorname{Arg}(\gamma(x+\ep))+ \pi )| < \alpha $
provided $\delta$ is small enough. Since
$$
|\gamma(x+\ep)-\gamma(x-\tau)| \leq L (\ep + \tau) \leq \ep_0 L
(1+L^2),
$$
one has to choose $\ep_0$ so that $ \ep_0 L (1+L^2)\leq \eta_0,$
which shows the correct dependence of $\ep_0$ and completes the
proof of the Lemma.
\end{proof}
Given a point $z \in \Gamma$ we want now to construct a special Jordan arc $\Delta_z$ connecting $z$ to $\infty$ in the complement
of $\Gamma.$ Assume, without loss of generality, that $z=0$. Take $x \in \mathbb{R}$ with $\gamma(x)=0$. Let $\ep_0$ be the number
given in the preceding Lemma and define, for $j=0,1,2, \dots$, a polar rectangle by
\begin{equation}\label{polarR}
R_j = \Big\{w = |w|e^{i \theta}: \frac{\ep_0}{2^{j+1} L} < |w| < \frac{\ep_0 L}{2^{j}} \quad \text{and}\quad
\Big|\theta- {\rm{Arg}}\Big(\gamma\Big(x+\frac{\ep_0}{2^j}\Big)\Big)+\pi\Big| < \alpha\Big\}.
\end{equation}
\noindent Applying Lemma \ref{ml} with $\ep=\ep_1=\ep_0/2^j$ we conclude that
$$
\{\gamma(x-\tau) : 0< \tau \} \cap \Big\{w : \frac{\ep_0}{2^{j+1} L} < |w| < \frac{\ep_0 L}{2^{j}} \Big\} \subset R_j.
$$
\noindent We need to introduce another polar rectangle
$$
S_j = R_j \cap \Big\{w : \frac{\ep_0 L}{2^{j+1}} < |w|\Big\}, \quad j=0,1,2, \dots
$$
We define inductively $\Delta_z =\Delta_0$ on $S_j$ by just
requiring that the Jordan arc $ \Delta_0 \cap \overline{S_j} $ lies
in the unbounded component of the complement of $\Gamma$,
$\overline{S_j} $ being the closure of $S_j.$ We then connect
$\Delta_0 \cap \overline{S_0} $ with $\infty$ by a Jordan arc in the
complement of $\Gamma,$ with the only precaution of not reentering the
disc $D(0,\ep_0)$ once $\Delta_0$ has left it.
It is worth pointing out that the axis of two consecutive polar rectangles $R_j$ and $R_{j+1}$ make an angle less than $\alpha$.
This follows by the defining property of $\ep_0$ (see the proof of Lemma \ref{ml}).
\begin{lemm}\label{argument}
\begin{equation}\label{log}
\log(\gamma(x-\ep))-\pi i = \log(-\gamma(x-\ep)), \quad x \in \mathbb{R}, \quad 0 < \ep \leq \ep_0.
\end{equation}
\end{lemm}
\begin{proof}
We know that
\begin{equation}\label{diflog}
\log(\gamma(x-\ep))-\pi i = \log(-\gamma(x-\ep))+ 2 \pi m i
\end{equation}
for some integer $m$. Our goal is to compute the difference
$$
\log(\gamma(x-\ep))-\log(-\gamma(x-\ep))
$$
by the integral
$$
\int_{\varsigma} \frac{1}{z} \,dz,
$$
where $\varsigma$ is an appropriately chosen Jordan arc connecting
$-\gamma(x-\ep)$ to $\gamma(x-\ep)$ in the complement of $\Delta_0.$
\begin{figure}
\caption{The curve $\varsigma$}
\end{figure}
Assume that $\ep_0 / 2^{j+1} < \ep \leq \ep_0 / 2^{j}$, for some
non-negative integer $j.$ Define $N$ as the smallest integer
satisfying
$$
\frac{L \ep_0}{2^{j+N}} \leq \frac{\ep_0}{L 2^{j+1}}.
$$
This is equivalent to $L^2 \leq 2^{N-1}$ and so $N$ depends only on
$L.$ Hence $R_k \subset D(0,{\ep_0}/{L \,2^{j+1}}),\; k \geq
j+N,$ and, in particular, $R_k,\; k \geq j+N,$ does not intersect
the circumference $\partial D(0,|\gamma(x-\ep)|).$
The angle between the axis of the polar rectangle $R_{j+l}$ and that
of $R_j$ is not greater than $l \alpha \leq N \alpha, \; l=1, 2,...,
N-1.$ Set $\beta = N \alpha,$ so that $\beta$ can be as small as
desired by taking $\alpha= \alpha(L)$ appropriately. We conclude
that
$$
R_{j+l}\subset \{w: w= |w| e^{i \theta} \;\, \text{with}\;\, |\theta
- \operatorname{Arg}(\gamma(x+\ep)+\pi)| < \beta \}, \quad l =
1,2,\dots,N-1.
$$
We are now ready to define the Jordan arc $\varsigma.$ Let
$z(x,\ep)$ be the point at the intersection of the circumference
$\partial D(0,|\gamma(x-\ep)|)$ and the ray
$$\{w: w= |w|e^{i \theta} \;\, \text{with}\;\, \theta= \operatorname{Arg}(\gamma(x+\ep)+\pi)- \beta \}.$$
\noindent Let $A$ stand for the arc in $\partial
D(0,|\gamma(x-\ep)|)$ having $-\gamma(x-\ep)$ as initial point and
$z(x,\ep)$ as end point (counterclockwise oriented).
There exists a rectifiable Jordan arc $\sigma$ joining the points
$z(x,\ep)$ and $\gamma(x-\ep)$ in the bounded component of the
complement of $\Gamma$ with the property that
$$\operatorname{length}(\sigma) \leq C\, |z(x,\ep)-\gamma(x-\ep)|.$$
This can be seen readily as follows. Set $\tilde{\gamma}(e^{ix})=
\gamma(x),\; x \in \mathbb{R}.$ Then $\tilde{\gamma}$ is a
bilipschitz homeomorphism between $\mathbb{T}$ and $\Gamma$ and
thus can be extended to a global bilipschitz homeomorphism of the
plane onto itself (see \cite{Tuk1},\cite{Tuk2}). The existence of the arc $\sigma$ is then
easily proved by transferring the question via the extended
bilipschitz homeomorphism.
Define $\varsigma = A \cup \sigma,$ oriented as already specified.
Note that $\varsigma$ lies in the complement of $\Delta_0,$ by the
previous discussion, in particular, the definition of $N$ and
$\beta.$
Therefore
\begin{equation}\label{varlog}
\log(\gamma(x-\ep))-\log(-\gamma(x-\ep)) = \int_{\varsigma}
\frac{1}{z} \,dz.
\end{equation}
On one hand we have
\begin{equation}
\int_{A} \frac{1}{z} \,dz = \pi i + O(\beta)
\end{equation}
and on the other hand
\begin{equation}
\Big|\int_{\sigma} \frac{1}{z} \,dz\Big| \leq
\frac{C\,|z(x,\ep)-\gamma(x-\ep)|}{|\gamma(x-\ep)|} \leq C\,
\beta=O(\beta).
\end{equation}
If $\beta$ is small enough so that $O(\beta)< \pi,$ then, by
\eqref{diflog}, we get that $m=0,$ and the lemma is proved.
\end{proof}
We need a final lemma, which concludes the proof of the Theorem.
\begin{lemm}\label{efa}
Let $\Gamma$ be an asymptotically conformal chord-arc curve and let $\gamma$ be a bilipschitz parametrization of $\Gamma$ (in the sense of \eqref{bilipschitz}).
Then there exists a constant $C > 1$ and a positive number $\ep_0$ such that
\begin{equation}\label{efasegon}
C^{-1}\, \frac{|\gamma(x+\ep)+\gamma(x-\ep)- 2 \gamma(x)|}{\ep} \leq |F(x,\ep)|
\leq C \, \frac{|\gamma(x+\ep)+\gamma(x-\ep)- 2 \gamma(x)|}{\ep},
\end{equation}
for $x \in \mathbb{R}$ and $0< \ep < \ep_0.$
\end{lemm}
\begin{proof}
Without loss of generality assume that $\gamma(x)=0.$ Let $\ep_0$ be the small number provided by Lemma \ref{ml}. By the construction of
the arc $\Delta_0$ described in the proof of Lemma \ref{ml} we have that the segment joining $-\gamma(x-\ep)$ and $\gamma(x+\ep)$ lies in the complement of $\Delta_0.$ We have, by Lemma \ref{argument},
\begin{align}
F(x,\epsilon) & = \log\big(\gamma(x+\epsilon)\big) - \log\big(\gamma(x-\epsilon)\big) + \pi i \\ &
= \log\big(\gamma(x+\epsilon)\big) - \log\big(-\gamma(x-\epsilon)\big)
\end{align}
and so
\begin{align}
F(x,\epsilon) & = \int_0^1 \frac{d}{dt} \log\big(-\gamma(x-\epsilon)+ t(\gamma(x+\epsilon)+\gamma(x-\epsilon)) \big) \,dt\\ &
= \int_0^1 \frac{\gamma(x+\epsilon)+\gamma(x-\epsilon)}{-\gamma(x-\epsilon)+ t(\gamma(x+\epsilon)+\gamma(x-\epsilon))} \,dt.
\end{align}
Set, to simplify notation, $a= -\gamma(x-\epsilon)$, $b=\gamma(x+\epsilon)$ and let $\theta$ denote the angle between $a$ and $b$. By Lemma \ref{ml} we know that $\theta$ is as small as we wish. In particular we can assume that $\cos(\theta) \geq 1/2.$ Thus, using the cosine Theorem,
\begin{align}
|a+t(b-a)|^2 & = (1-t)^2 |a|^2 + t^2 |b|^2 + 2 (1-t) t |a| |b| \cos(\theta) \\ & \geq \frac{1}{2}\,((1-t)|a|+t |b| )^2 \geq \frac{\ep^2}{2L^2},
\end{align}
and
\begin{equation}
|F(x,\ep)| \leq \frac{\sqrt{2} L }{\ep} |\gamma(x+\ep)+\gamma(x-\ep)|,
\end{equation}
which is the upper estimate in \eqref{efasegon}.
For the lower estimate we set $z_t= -\gamma(x-\epsilon)+ t(\gamma(x+\epsilon)+\gamma(x-\epsilon)). $ Since
$ \operatorname{Re}(z_t) \geq |z_t|/2 $ and $ |z_t| \geq \ep / (\sqrt{2} L)$
\begin{align}\label{efalower}
\Big| \int_0^1 \frac{1}{z_t} \,dt \Big| \geq
\operatorname{Re} \int_0^1 \frac{1}{z_t} \,dt & =
\int_0^1 \frac{\operatorname{Re} (z_t) }{| z_t|^2} \,dt \\& \geq \int_0^1 \frac{1}{2| z_t|}\,dt \geq
\frac{L}{\sqrt{2} \ep}.
\end{align}
To complete the proof of the Theorem one only needs to combine
Lemmas \ref{Flog}, \ref{smalltrunc} and \ref{efa}.
\end{proof}
\begin{rem*}\label{rem_decay}
Let $a= \gamma(x)-\gamma(x-\epsilon)$, $b=\gamma(x+\epsilon)-\gamma(x)$ and let $\alpha(x,\epsilon)$ be the angle spanned by $a$ and $b$.
By geometric considerations and using the cosine Theorem,
one can see that for a bilipschitz parametrization $\gamma$ such that
\begin{equation}\label{bilipschitz2}
c\,|x-y| \leq |\gamma(x) -\gamma(y)| \leq C \,|x-y|, \quad x, y \in \mathbb{R}, \quad |x-y| \leq \frac{T}{2},
\end{equation}
we get the a priori estimate
\begin{equation}
|\gamma(x+\ep)+\gamma(x-\ep)- 2 \gamma(x)|^{2}\leq c^2\epsilon^2+C^2\epsilon^2-2cC\epsilon^2 \cos\alpha(x,\epsilon).
\end{equation}
So, in the general case, we can guarantee just a linear decay of the second finite difference $|\gamma(x+\epsilon)+\gamma(x-\epsilon)-2\gamma(x)|$ and the logarithmic condition \eqref{snddiff} gives informations about the local behavior of the best constants $c$ and $C$ around $x$ and about the decay of $\alpha(x,\epsilon)$ for $\epsilon$ small. This remark will be useful in the next section.
\end{rem*}
\section{An example}
In this section we provide an example of curve $\gamma$ which is not $C^1$ but for which the improved Cotlar's inequality \eqref{m2} holds. The curve will be constructed in a recursive way and will be parametrized by arc-length. Without loss of generality, we will focus on defining a curve which is not closed. Indeed, possibly by connecting the ends of this curve in a smooth way, we can reduce to the same environment of the previous sections.\\
Let $0<\alpha<\pi/2.$ Let $F_{\alpha}:[0,1]\to \mathbb{R}$ be the function with support in $[1/4,3/4]$ which is linear in $[1/4,1/2]$ and $[1/2,3/4]$ with slope $\tan\alpha$ in $[1/4,1/2]$ and $-\tan\alpha$ in $[1/2,3/4]$. In other words
\begin{equation}
F_{\alpha}(t):=\max\Big\{0, \Big(\frac{1}{4}-\Big|t-\frac{1}{2}\Big|\Big)\tan\alpha\Big\}.
\end{equation}
Let $\xi>0$. For $t\in\mathbb{R}$ we define the function
\begin{equation}
\eta_\xi(t):=\eta\Big(\frac{t}{\xi}\Big)\frac{1}{\xi},
\end{equation}
where $\eta$ is a smooth, even and positive function such that $\supp\eta\subset [-1,1]$ and $\int\eta(t)dt=1.$
For $0<\xi<1/100$ we define the regularized function
\begin{equation}
\lambda_{\alpha}:=F_\alpha*\eta_\xi.
\end{equation}
We will call the curve $\Lambda_{\alpha}:=\big(t,\lambda_{\alpha}(t)\big)_{t\in[0,1]}$ $\alpha$-\textit{patch.}\\
An $\alpha-$patch has the following properties:
\begin{itemize}
\item $\Lambda_{\alpha}$ is the graph of a function $\lambda_{\alpha}:[0,1]\rightarrow \mathbb{R}$ which is symmetric around $1/2$.
\item if we denote by $[a,b]$ the segment joining the points $a,b\in\mathbb{R}^2,$ then $\Lambda_{\alpha}$ contains the segments $I_{\alpha}:=[(0,0\big),(1/4-\xi,0)],$ $II_{\alpha}:=[(1/4+\xi,\xi\tan\alpha),(1/2-\xi,(1/4-\xi)\tan\alpha)],$ $III_{\alpha}:=[(1/2+\xi, (1/4-\xi)\tan\alpha),(3/4-\xi,\xi\tan\alpha)]$ and $IV_{\alpha}:=[(3/4+\xi,0),(1,0)].$ We denote by $C^i_\alpha,$ $i=1,2,3$ the remaining three non-affine parts of the graph. Precisely, $C^1_\alpha$ joins the segments $I_\alpha$ and $II_\alpha,$ $C^2_\alpha$ the segments $II_\alpha$ and $III_\alpha$ and $C^3_\alpha$ the segments $III_\alpha$ and $IV_\alpha.$
\item the function $\lambda_\alpha$ is convex on the intervals below $C^1_\alpha$ and $C^3_\alpha$ and concave on the interval below $C^2_\alpha.$
\end{itemize}
The idea is that the $\alpha-$patch is a smoothened corner, as shown in Figure 2.
\begin{figure}
\caption{An $\alpha-$patch}
\label{fig:non_c1}
\end{figure}
\begin{rem}\label{size_tau}
Let us denote by $\tau(\alpha)$ the difference between the length of the (non-smoothened) graph of $F_{\alpha}$ and the length of $\Lambda_\alpha$. For what follows, we need to estimate its behavior for small values of $\alpha$. It suffices to observe that
\begin{equation}\label{tau_error}
\begin{split}
\tau(\alpha)&:=\length(F_\alpha)-\length(\Lambda_\alpha)\\
&=\int^1_0\Big(\sqrt{1+|f'_\alpha*\eta_\xi|^2(t)}\Big)-\Big(\sqrt{1+|f'_\alpha|^2(t)}\Big)dt\\
&=\int^1_0\frac{|f'_\alpha*\eta_\xi|^2(t)-|f'_\alpha|^2(t)}{\Big(\sqrt{1+|f'_\alpha*\eta_\xi|^2(t)}\Big)+\Big(\sqrt{1+|f'_\alpha|^2(t)}\Big)}dt\leq 2||f'_\alpha||^2_\infty = 2\tan\alpha.
\end{split}
\end{equation}
\end{rem}
\subsection*{Definition of the curve $\Gamma$}
Let $\alpha_j:=1/j$ for $j=1,2,\ldots$ positive integer. For the sake of notational convenience we replace the subscript $\alpha_j$ by $j$; for instance, we write $\Lambda_j$ for $\Lambda_{\alpha_j}$, $I_j$ for $I_{\alpha_j},\ldots,IV_j$ for $IV_{\alpha_j}$ and $C^i_j$ for $C^i_{\alpha_j}$. Moreover, $\tau_j:=\tau(\alpha_j).$
Now we can define $\Gamma$ according to the following recursive steps:
\begin{itemize}
\item $\Gamma_1:=\Lambda_1.$
\item We would like to glue on $II_1$ an appropriate rescaled, translated and rotated copy $\tilde{\Lambda}_2$ of $\Lambda_2.$ The angle of rotation is $\alpha_1.$ The scaling factor and the translation are chosen so that the origin of $\tilde{\Lambda}_2$ is $(1/4,0)$ and the end is $\big(1/2, (\tan\alpha)/4\big).$
Denote by $\widetilde{II}_2$ the image of $II_2$ via the same affinity which maps $\Lambda_2$ to $\tilde{\Lambda}_2$; let us use the tilde to denote the images of the other parts of the patch via the same map, too. Delete the segment $II_1$ from $\Lambda_1$ and add $\tilde{\Lambda}_2.$ Now the endings of $\tilde{\Lambda}_2$ should be deleted in order to make a connection with $\Lambda_1.$ The precise expression for the second step curve is
\begin{equation}
\Gamma_2:=\big((\Lambda_1\setminus II_1)\cup \tilde{\Lambda}_2\big)\setminus \big((\tilde{I}_2\cup \widetilde{IV}_2)\setminus II_1\big).
\end{equation}
\item given $\Gamma_n,$ which is a ``gluing'' of affine copies $\tilde{\Lambda}_j$ of $\Lambda_j$ for $j\in\{1,\ldots,n\},$ where $\widetilde{II}_n$ is the image of $II_j$ under the same affinity which maps $\Lambda_j$ to $\tilde{\Lambda}_j$, we define
\begin{equation}
\Gamma_{n+1}:=((\tilde{\Lambda}_n\setminus \widetilde{II}_n)\cup \tilde{\Lambda}_{n+1})\setminus ((\widetilde{I}_{n+1}\cup \widetilde{IV}_{n+1})\setminus \widetilde{II}_n),
\end{equation}
where $\tilde{\Lambda}_{n+1}$ is an re-scaled copy of $\Lambda_{n+1}$ rotated by an angle $\sum_{j=1}^{n+1}\alpha_j$ whose vertices coincide with the images of $(1/4,0)$ and $\big(1/2, \tan(\alpha)/4\big)$ via the transformation of the plain that sends $\Lambda_n$ to $\widetilde{\Lambda}_n$.
\end{itemize}
Then, $\{\Gamma_n\}_n$ converges in the Hausdorff distance (a similar case is presented, for example, in \cite{Falconer}) and we can simply define $\Gamma:=\lim_n\Gamma_n.$
\begin{figure}
\caption{The second step in the construction of the curve $\Gamma$}
\label{fig:exam}
\end{figure}
Let us now state an estimate that we will use in what follows.
\begin{lemm}\label{estim_angle}
Given $0<\alpha<\pi/2$ and $z_1,z_2\in\Lambda_{\alpha}$, we have
\begin{equation}\label{angle_corner}
l(z_1,z_2)\leq \frac{|z_1-z_2|}{\cos\alpha},
\end{equation}
where $l(z_1,z_2)$ denotes the length of the arc of $\Lambda_\alpha$ joining $z_1$ and $z_2$.
\end{lemm}
\begin{proof}
Let $t_1:=\lambda^{-1}_{\alpha}(z_1)$ and $t_2:=\lambda^{-1}_{\alpha}(z_2)$. We have $|t_1-t_2|\leq |z_1-z_2|.$ Moreover, because of the way we constructed $\Lambda_{\alpha},$ we have that $|\lambda'_{\alpha}(t)|\leq \tan \alpha$ for every $t\in [0,1].$ Collecting all these observations,
\begin{align}
l(z_1,z_2)&=\int_{t_1}^{t_2}\sqrt{1+|\lambda_{\alpha}'(t)|^2}dt\leq \int_{t_1}^{t_2}\sqrt{1+|\tan\alpha|^2}dt\\
&=|t_2-t_1|\sqrt{1+|\tan\alpha|^2}=\frac{|t_2-t_1|}{\cos\alpha}\leq \frac{|z_2-z_1|}{\cos\alpha}.
\qedhere
\end{align}
\end{proof}
\begin{rem}
Notice that the inequality \eqref{angle_corner} keeps holding for a scaling of $\Lambda_\alpha,$ in particular for the $\tilde{\Lambda}_j,\,j\in\mathbb{N}.$
\end{rem}
Let us define
\begin{equation}
L_n:= 2^{-2n+1}\Big(\prod_{j=1}^{n-1}\cos\alpha_j\Big)^{-1},
\end{equation}
which is half of the diameter of the rescaled patch $\tilde{\Lambda}_n$ in the construction of the curve $\Gamma.$ We will use $L_n$ as a quantifier of the scale.
\begin{lemm} \label{errors}For every $\delta>0$ there exists $k\in\mathbb{N}$ big enough such that for $z_1,z_2\in \Gamma\cap (\bigcup_{j=k}^{\infty}\tilde{\Lambda}_j)$ we have
\begin{equation}\label{sm_length}
l(z_1,z_2)\leq (1+\delta)|z_1-z_2|.
\end{equation}
\end{lemm}
\begin{proof} Let us start with some geometrical observation. \\
Let $k\in\mathbb{N}$ and $\zeta_1,\zeta_2\in\Gamma$. Suppose, moreover, that $\zeta_1\in \tilde{I}_k$ and $\zeta_2\in\widetilde{IV}_k$. It is useful to define
\begin{equation}
R_k:=l(\zeta_1,\zeta_2)-|\zeta_1-\zeta_2|.
\end{equation}
Observe that the definition of $R_k$ does not depend on the choice of $\zeta_1$ and $\zeta_2$ in the respective segments. In particular, by the construction of the curve $\Gamma$ and by the definition of the error term $\tau_j$ in \eqref{tau_error}, it is not difficult to check that we have
\begin{equation}\label{err}
R_k= \Big(3 \sum_{j=k+1}^{\infty} L_j- L_k\Big)- \sum_{j=k+1}^{\infty} 2L_j\tau_j.
\end{equation}
The term between parentheses in the right hand side is the length of the gluing of the 'non-regularized' $\alpha-$patches in the construction and the second sum is an error term due to the smoothing in the definition of $\alpha$-patch.\\
Because of the how we chose $L_j$ and $\tau_j,$ the quantity $R_k$ represents the error we make in estimating the length of the arch of the curve between $\zeta_1\in \tilde{I}_k$ and $\zeta_2\in\widetilde{IV}_k$ compared to $|\zeta_1-\zeta_2|.$ The presence of factor $2L_j$ in the last sum in the right hand side of \eqref{err} is due to the fact that the diameter of $\tilde{\Lambda}_j$ is equal to $2L_j$ and, thus, the error term $\tau_j$ has to be rescaled by that value.
It turns out that
\begin{equation}\label{small_error}
\frac{R_k}{L_k}\rightarrow 0 \,\,\text{ as }k\rightarrow\infty.
\end{equation}
Indeed, we have
\begin{align}
\frac{3}{L_k} \sum_{j=k+1}^{\infty} L_j&=\sum_{j=k+1}^{\infty}\frac{3}{4^{j-k+1}}\Big(\prod_{l=k}^{j}\cos\alpha_j\Big)^{-1}\\ \nonumber
&\leq
\sum_{j=k+1}^{\infty}\frac{3}{4^{j-k+1}}(\cos\alpha_k)^{-(j-k)}=\frac{12 \cos\alpha_k}{4\cos \alpha_k-1}-3
\end{align}
and the last term tends to $1$ as $k\rightarrow\infty.$ Moreover, using \eqref{tau_error} and since $L_j\leq 2^{k-j}L_k$ for $j>k$, we have that
\begin{equation}
\frac{1}{L_k}\sum_{j=k+1}^{\infty}2L_j\tau_j\lesssim \tau_{k+1} \sum_{j=k+1}^\infty {2^{k-j}}\rightarrow 0\,\,\text{ as }k\rightarrow\infty,
\end{equation}
so that \eqref{small_error} follows.\\
Let us combine this observation with \eqref{angle_corner} to prove \eqref{sm_length}. Let $z_1,z_2\in\Gamma$. Observe that each point of $\Gamma$ belongs to $\widetilde{\Lambda}_j$ for at most two different $j$.
Let $k_1$ be the maximum index such that $z_1\in\tilde{\Lambda}_{k_1}$ and let $k_2$ be the maximum index such that $z_2\in\tilde{\Lambda}_{k_2}$. The rest of the proof works with minor changes if we take the minimum instead of the maximum in the definitions of $k_1$ and $k_2.$\\
Without loss of generality, suppose $k_1\leq k_2.$
If $k_1=k_2$, the definition of $R_k$ and the estimate \eqref{angle_corner} allow us to write
\begin{equation}\label{easy_case}
l(z_1,z_2)\leq \frac{|z_1-z_2|}{\cos\alpha_{k_1}},
\end{equation}
if the point are at a distance $|z_1-z_2|\leq L_{k_1+1}$. For $|z_1-z_2|\geq L_{k_1+1}/4,$ we have to consider the additional error term $R_{k_1+1}$. In particular
\begin{equation}
l(z_1,z_2)\leq \frac{|z_1-z_2|}{\cos\alpha_{k_1}}+R_{k_1+1}\leq \frac{|z_1-z_2|}{\cos\alpha_{k_1}}+\frac{R_{k_1+1}}{4L_{k_1+1}}|z_1-z_2|,
\end{equation}
so that, invoking \eqref{small_error}, the lemma is proven in the case $k_1=k_2$.\\
Let us consider the other case, $k_1<k_2$. If $z_2\in\widetilde{\Lambda}_{k_1}$, \eqref{easy_case} easily applies. So we can suppose $z_2\not\in\widetilde{\Lambda}_{k_1}.$
In this case
\begin{equation}\label{ineqL}
|z_1-z_2|\geq \frac{L_{k_1+1}}{4}.
\end{equation}
Let $z'_2\in\widetilde{II}_{k_1}$ be the orthogonal projection of $z_2$ on the segment $\widetilde{II}_{k_1}.$ Using the triangular inequality and denoting by
\begin{equation}\label{height}
h_{k_1+1}:=\min\{h: \widetilde{\Lambda}_{k_1+1}\subset [0,h]n_V+V \text{ for some affine line }V \text{ with normal }n_V\}
\end{equation}
the width of $\tilde{\Lambda}_{k_1+1},$ we have
\begin{equation}\label{eps_1}
|z_1-z'_2|\leq |z_1-z_2|+ h_{k_1+1}.
\end{equation}
Let us remark that, by construction of $\Gamma,$
\begin{equation}\label{ineq}
\frac{h_{k_1+1}}{L_{k_1+1}}\rightarrow 0 \text{ as } k\rightarrow\infty.
\end{equation}
Given $m\in\mathbb{N}$ and $u,v\in\Gamma_m$, it is useful to denote by $l_m(u,v)$ the length of the arc of $\Gamma_m$ joining $u$ and $v$.
Now we want to prove that
\begin{equation}\label{tbp}
l(z_1,z_2)\leq l_{k_1}(z_1,z'_2)+ R_{k_1+1}.
\end{equation}
Let us just consider the case $z_1\in \tilde{I}_{k_1},$ since the other cases are analogous.
If $z_{k_2}\in \tilde{I}_{k_1+1}$ or $z_{k_2}\in \widetilde{IV}_{k_1+1},$ \eqref{tbp} holds trivially because $z_2=z'_2.$ Otherwise, let $\zeta$ be a point on $\widetilde{IV}_{k_1+1}$ and let us consider the quantities $l(z_2,\zeta)$ and $|z'_2-\zeta|.$ Observe that the consideration below does not depend on the auxiliary point $\zeta$ of $\widetilde{IV}_{k_1+1}$ we choose. Clearly $l(z_2,\zeta)\geq |z'_2-\zeta|$ and, because of the definition of $R_{k_1+1},$ the equality
\begin{equation}
l(z_1,z_2)+l(z_2,\zeta)=R_{k_1+1}+l_{k_1}(z_1,z'_2)+|z'_2-\zeta|,
\end{equation}
holds. So
\begin{equation}
l(z_1,z_2)=l_{k_1}(z_1,z'_2)+R_{k_1+1}+(|z'_2-\zeta|-l(z_2,\zeta))\leq l_{k_1}(z_1,z'_2)+R_{k_1+1}.
\end{equation}
The proof of the lemma is now over: indeed using \eqref{angle_corner}, \eqref{small_error}, \eqref{ineqL}, \eqref{eps_1} and \eqref{ineq} we get
\begin{align}\label{toinvert}
\frac{l(z_1,z_2)}{|z_1-z_2|}&\leq \frac{l_{k_1}(z_1,z'_2)}{|z_1-z_2|}+\frac{R_{k_1+1}}{|z_1-z_2|} \leq \frac{|z_1-z'_2|}{|z_1-z_2|\cos\alpha_{k_1}}+\frac{R_{k_1+1}}{|z_1-z_2|}\\
&\leq \frac{1}{\cos\alpha_{k_1}} + \frac{4h_{k_1+1}}{\cos\alpha_{k_1}L_{k_1+1}}+\frac{4R_{k_1+1}}{L_{k_1+1}}\rightarrow 1 \,\,\text{ as } k_1\rightarrow \infty. \qedhere
\end{align}
\end{proof}
\begin{prop}\label{nonsmooth}
$\Gamma$ is asymptotically smooth but not $C^1.$
\end{prop}
\begin{proof}
Let $\tilde{z}'_j\in\Gamma$ be the image of the point $z_{\alpha_j}'$ via the map which sends $\Lambda_j$ to $\widetilde{\Lambda}_j$.
We have that the curve $\Gamma$ is not $C^1$ at the point $z_0:=\lim_j z_j,$ where $z_j$ is an arbitrary point of $\tilde{\Lambda}_j$. Indeed, by our choice of the angles in the construction, $\sum_j \alpha_j = +\infty$ and the curve spirals close to $z_0$.\\
Let us now turn prove that the curve is asymptotically smooth.\\
Notice that we may write $\Gamma=\Gamma_1\cup\Gamma_2\cup\{z_0\},$ where $\Gamma_1$ and $\Gamma_2$ are smooth curves. Then, for every couple of points $\{z_1,z_2\}$ in one of those two smooth components we can exploit the smoothness to state that for every $\delta$ there exists $\bar{\epsilon}$ such that for $\epsilon<\bar{\epsilon}$ and $|z_1-z_2|=\epsilon$ we have
\begin{equation}
l(z_1,z_2)\leq (1+\delta)\epsilon.
\end{equation}
This, together with the result of Lemma \ref{errors} concludes the proof.
\end{proof}
Let us consider the arc-length parametrization $\gamma$ of $\Gamma$. Being $\Gamma$ asymptotically smooth, $\gamma$ is bilipschitz. In particular,
\begin{equation}
\frac{1}{C}|x-y|\leq|\gamma(x)-\gamma(y)|\leq |x-y|
\end{equation}
for a constant $C>1$ and $x,y\in [0,L(\Gamma)].$
As in Remark \ref{rem_decay} we denote by $\alpha(x,\epsilon)$ the angle between the vectors $\gamma(x)-\gamma(x-\epsilon)$ and $\gamma(x+\epsilon)-\gamma(x)$.
Because of the geometrical considerations in Remark \ref{rem_decay}, we have that
\begin{equation}\label{geom}
|\gamma(x+\ep)+\gamma(x-\ep)- 2 \gamma(x)|^2\leq \epsilon^2\Big( \frac{1}{C^2}+1-\frac{2}{C}\cos\alpha(x,\epsilon)\Big)
\end{equation}
for $\epsilon>0$ and $x\in[0,L(\Gamma)].$ Now we want to prove the estimate
\begin{equation}
|\gamma(x+\ep)+\gamma(x-\ep)- 2 \gamma(x)|\lesssim \frac{\epsilon}{|\log\epsilon|}.
\end{equation}
Being $\Gamma$ smooth off the point $z_0$ and arguing as in \cite{G}, the logarithmic condition \eqref{logomega} and the estimate \eqref{m2} are satisfied off that point.
Hence it suffices to prove \eqref{m2} for $\gamma(x)\in\cup_{k\geq k_0}\tilde{\Lambda}_k\cap\Gamma$ and $k_0$ big enough.
To do that, we will study the behavior of the angle $\alpha(x,\epsilon)$ and of the local value of the bilipschitz constant of $\gamma$ close to the point $z_0.$
\\
Being the curve asymptotically smooth, as a corollary of Lemma \ref{ml} we know that $\alpha(x,\epsilon)\to 0$ for $\epsilon$ small. Then, the second factor in the right hand side of \eqref{geom} behaves as
\begin{equation}
1+\frac{1}{C^2}-\frac{2}{C}\cos\alpha(x,\epsilon)= \Big[1+\frac{1}{C^2}-\frac{2}{C}\Big]+\frac{1}{C}\alpha(x,\epsilon)^2 + O(\alpha(x,\epsilon)^4)
\end{equation}
for $\epsilon\to 0.$\\
Let $x_0:=\gamma^{-1}(z_0).$ For $\epsilon>0,$ we denote by $C_{\epsilon}$ the smallest constant such that
\begin{equation}
\frac{1}{C_{\epsilon}}|x-y|\leq|\gamma(x)-\gamma(y)|\leq |x-y|
\end{equation}
holds for $x,y\in [x_0-\epsilon,x_0+\epsilon],$ i.e. the local value of the lower bilipschitz constant close to $x_0$.\\
Using this notation, to our purposes it suffices to prove that
\begin{equation}\label{estim_angle}
|\alpha(x,\epsilon)|\lesssim \big|\log\epsilon\big|^{-1}
\end{equation}
and
\begin{equation}\label{coef_old}
\Big[1-\frac{1}{C_{\epsilon}}\Big]\lesssim \big|\log\epsilon\big|^{-1}
\end{equation}
for $\epsilon$ small and $\gamma(x)$ close enough to $z_0.$\\
In particular, instead of \eqref{coef_old} we will prove that the stronger estimate
\begin{equation}\label{estim_lip}
\Big[1-\frac{1}{C_{\epsilon}}\Big]\lesssim \big|\log\epsilon\big|^{-2}
\end{equation}
holds for $\epsilon$ small and $\gamma(x)$ close enough to $z_0.$
The following two lemmas respectively prove the estimate for the angle and the estimate for $C_\epsilon.$
\begin{lemm}\label{lemm_ang}
For every $\epsilon_0$ there exists an integer $k_0$ such that
\begin{equation}
|\alpha(x,\epsilon)|\lesssim |\log\epsilon|^{-1}
\end{equation}
for $\epsilon<\epsilon_0,$ $|x-x_0|<\epsilon_0$ and $\gamma(x-\epsilon)\in\bigcup_{k=k_0}^{\infty}\widetilde{\Lambda}_k\cap\Gamma.$
\end{lemm}
\begin{proof}
Let $\epsilon>0$ and $z=\gamma(x)\in \Gamma.$ Moreover, let us define $z_{\pm}:=\gamma(x\pm \epsilon).$
Let $k$ be the maximum index such that $z\in\tilde{\Lambda}_{k}$ and let $k_\pm$ be the maximum index such that $z_\pm\in\tilde{\Lambda}_{k_\pm}$. Without loss of generality, we will prove the lemma for $x<x_0.$ Let us proceed with some geometrical consideration.\\
\begin{figure}
\caption{A schematic representation of the setting of the proof.}
\end{figure}
Let $L_z$ denote the line passing through $z$ and parallel to the segment $\widetilde{II}_{k_-}.$
Due to the definition of the angle $\alpha(x,\epsilon)$, we can fix the line $L_z$ and bound $|\alpha(x,\epsilon)|$ by the absolute value of the smallest angle $\angle([z_-,z],L_z)$ that $L_z$ forms with the segment $[z_-,z]$ plus the absolute value of the smallest angle $\angle([z,z_+],L_z)$ that $L_z$ forms with the segment $[z,z_+].$\\
If $z$ belongs to $\widetilde{\Lambda}_{k_-},$ due to the properties of the $\alpha_{k_-}-$patch, the arc $\gamma([x-\epsilon, x])$ is entirely contained in a cone of vertex $z$ and aperture $\angle([z_-,z],L_z)$. By elementary geometric considerations, we can write
\begin{equation}\label{angle_1}
\big|\angle([z_-,z],L_z)\big|\leq \alpha_{k_-}.
\end{equation}
Again, due to few geometric observations (that are not substantial for the sequel and we decide to omit in order to make the proof more concise) and to the way $\Gamma$ is defined, it is not difficult to see that
\begin{equation}\label{angle_2}
\big|\angle([z_+,z],L_z)\big|\leq 2\alpha_{k_-}.
\end{equation}
We are left to consider the case $z\not\in\widetilde{\Lambda}_{k_-}.$ As we observed in Lemma \ref{errors}, in this case we have $|z_--z|\geq L_{k_-+1}/4.$ Moreover, $\bigcup_{j=k_-+1}^{\infty}\widetilde{\Lambda}_j\cap \Gamma$ is contained in a rectangle whose base lays on $\widetilde{II}_{k_-},$ whose length is smaller than, say, $5L_{k_-+1}/3$ and with height $h_{k_-+1}$ (for its definition we refer to \eqref{height} in Lemma \ref{errors}). We recall that
\begin{equation}
\frac{h_j}{L_j}\to 0 \text{ for } j\to\infty.
\end{equation}
Now observe that $z_+\in\bigcup_{j=k_-}^{\infty}\widetilde{\Lambda}_j\cap \Gamma.$ For every point $z$ in this rectangle, using that $|z-z_+|\gtrsim L_{k_-+1}$, it holds that
\begin{equation}\label{angle_3}
|\angle([z_-,z],L_z)|\lesssim \alpha_{k_-}
\end{equation}
and
\begin{equation}\label{angle_4}
|\angle([z,z_+],L_z)|\lesssim \alpha_{k_-}.
\end{equation}
Joining \eqref{angle_1},\eqref{angle_2},\eqref{angle_3} and \eqref{angle_4}, we get
\begin{equation}
|\alpha(z,\epsilon)|\lesssim \alpha_{k_-}.
\end{equation}
Then, by the construction of $\Gamma$ and the definition of $L_m$, $L_{m+1}/L_m\leq 1/2$ for every $m$, that by iteration leads to
\begin{equation}
L_m\leq 2^{-m}.
\end{equation}
Now, if $\gamma(x-\epsilon)\in \widetilde{\Lambda}_{k_-}$ for $k_-$ big enough, we have that $\epsilon\lesssim L_{k_-}$ so that
\begin{equation}
k_-\gtrsim |\log\epsilon|
\end{equation}
for $\epsilon$ small enough.
So, gathering all the considerations and recalling that $\alpha_{k_-}=1/k_-$, we get the desired result.
\end{proof}
\begin{lemm}There exists $\epsilon_1>0$ such that the inequality \eqref{estim_lip} holds for $\epsilon<\epsilon_1$.
\end{lemm}
\begin{proof}
Let us consider $z_1,z_2\in\Gamma$. Let $k_1$ be the maximum index such that $z_1\in\tilde{\Lambda}_{k_1}$ and $k_2$ the maximum index such that $z_2\in\tilde{\Lambda}_{k_2}$. Without loss of generality, $k_1\leq k_2$ and $\gamma^{-1}(z_1)\leq \gamma^{-1}(\tilde{z}).$ The idea is to prove that $C^{-1}_{\epsilon}$ is greater than a quantity which approximates $\cos\alpha_{k_1}$. It is convenient to split the study into different cases.\\
If $k_1=k_2$ and $\gamma^{-1}(z_2)<\bar{x}$ or $k_2=k_1+1$ and $z_2\in\widetilde{I}_{k_1+1},$ then \eqref{angle_corner} gives
\begin{equation}
|z_1-z_2|\geq \cos\alpha_{k_1}l(z_1,z_2).
\end{equation}
If $k_1=k_2$ and $\gamma^{-1}(z_2)>\bar{x}$ or $k_2=k_1+1$ and $z_2\in\widetilde{IV}_{k_1+1}$, then we can write
\begin{equation}
|z_1-z_2|\geq \cos\alpha_{k_1}\big(l(z_1,z_2)-R_{k_1+1}\big)=\Big(\cos\alpha_{k_1}-\cos\alpha_{k_1}\frac{R_{k_1+1}}{l(z_1,z_2)}\Big)l(z_1,z_2)
\end{equation}
and we recall that
\begin{equation}
\frac{R_{k_1+1}}{l(z_1,z_2)}\lesssim \frac{R_{k_1+1}}{L_{k_1+1}}\to 0 \qquad\text{ for }\qquad k_1\to\infty.
\end{equation}
In the remaining cases, we know from the proof of Lemma \ref{errors} that
\begin{equation}
|z_1-z_2|\geq \Big(\cos\alpha_{k_1}-\cos\alpha_{k_1}\frac{h_{k_1+1}}{l(z_1,z_2)}-\cos\alpha_{k_1}\frac{R_{k_1+1}}{l(z_1,z_2)}\Big)l(z_1,z_2),
\end{equation}
so that, using the same argument as at the end of the proof of Lemma \ref{lemm_ang} together with the Taylor expansion for the cosine, the proof is completed. Let us remark that the exponent $2$ in \eqref{estim_lip} appears because of the Taylor expansion.
\end{proof}
The two previous lemmas show that the arc-length parametrization $\gamma$ of $\Gamma$ is such that the estimate
\begin{equation}
T_*(f)(z)\lesssim M^2(Tf)(z)
\end{equation}
holds for every $z\in \Gamma.$
\subsection*{Final remarks on the curve $\Gamma$.}
The curve $\Gamma$ that we studied in this section can be considered as an example of a critical curve for which the main theorem holds. Indeed, another look at the estimates we got tells that most of those concerning the geometry of the curve are close to being sharp. Moreover, the finite second difference $|\gamma(x+\epsilon)+\gamma(x-\epsilon)-2\gamma(x)|$ has the right decay we need; the choice of a slower decay for the angles $\alpha_j$ causes worse estimates for $|\alpha(x,\epsilon)|$ and, hence, the finite second difference estimate to fail. Let us notice that the spiraling of $\Gamma$ close to the point $z_0$ also gives an idea of how the critical curves may look like.\\
Asymptotically smooth curves that are not $C^1$ may also be defined by means of complex analysis (exploiting, for example, the results in \cite{Pomm}) but we found a constructive approach more convenient to our purposes.
\label{Bibliography}
\end{document} |
\begin{document}
\title{Corrections for\ ``Efficient Active Learning of Halfspaces:\ an Aggressive Approach''}
Dear Editor and Reviewers,
\vspace*{2em}
We are glad to be informed that our paper has been accepted with minor revisions. We have addressed your comments and corrected the manuscript accordingly. Please see below our responses to your comments and suggestions.
\begin{flushright}
Sincerely Yours, $\qquad\qquad\qquad\qquad\qquad$\\
A.~Gonen, S.~Sabato, S.~Shalev-Shwartz
\end{flushright}
\section{Reviewer 1}
\begin{enumerate}
\item
We have added a relevant discussion after the (sketch of the) proof of
theorem 5. In addition we have included, in Section 6.2, our
experimental results showing the advantage of selecting a majority
hypothesis over a random one (see Figure 2).
\item
$P(\cdot )$ is the probability measure defined for $\mathcal{H}$, while $\mathbb{P}[ \cdot]$ is notation
for a probability of events. We have made a second pass to make sure
notations are consistent.
\item
In example 2 we do wish to demonstrate also that the label complexity
can be exponential in the dimension, even when the points are on the
unit sphere, thus we elect to pack as many points as possible.
\item
Fixed.
\item
Equation 3 is fixed. We thank the reviewer for the suggested changes
in presentation of the proof of theorem 5, however we feel that
providing separate results for the different parts of the proof, and
providing background on submodularity, would be more beneficial for
the reader who wishes to understand the problem deeply.
\item
Fixed.
\item
Regarding Example 22, we have clarified the discussion above the
example. The point of this example is that even when CAL and active
learning have access to the same pool size which is required by
passive learning, there can still be an exponential gap in their label
complexity. Example 22 does not use the full class of separating
hyperplanes but a subset of this class, and only shows a marked
difference when the dimension is very high. Thus we feel that Example
19 is still important as it uses the full halfspace class and works in
a low dimension.
\item
We have accidentally left out the normalization in the margin
definition. This is now fixed.
\item
Fixed.
\item
Redundant P(.) was removed.
\item
Fixed.
\item
Fixed.
\item
Fixed.
\item
Fixed.
\end{enumerate}
\section{Reviewer 2}
\begin{enumerate}
\item
We added further explanations as suggested.
\item
There is no i.i.d. assumption: the lower bounds for active learning
hold for the examples in Section 2 even if the algorithm is allowed to
query any point in the support of the distribution.
\item
\begin{enumerate}
\item
We have clarified the definition of the backwards arrow in the first
paragraph of section 3.
\item
Fixed.
\item
Fixed.
\item
Indeed we mean the union of all equivalence classes. We have added a
clarification on the first paragraph of page 6.
\end{enumerate}
\item
\begin{enumerate}
\item
We have added a comment to that effect.
\item
The volume estimation algorithm, proposed by Kannan et al. (1997), is quite involved and it might be misleading to describe it in a few short lines. We hope that the interested reader can find all the details in the original reference.
\item
In general P is not necessarily uniform. For a uniform distribution Vol(V) and P(V) are equivalent (up to a constant factor). We have added a clarification in the text after eq. (4).
\end{enumerate}
\item
\begin{enumerate}
\item
We have moved the algorithms for non-separable data and kernels from the appendix to section 5.2.
\item
Indeed this is correct. We have rephrased the result.
\end{enumerate}
\item
\begin{enumerate}
\item
The test errors were added.
\item
The lambda/norm parameter was chosen to yield reasonable passive
learning loss, and H was selected to be an upper bound to this
loss. The dimension was selected to generate a separable distribution
in the transformed space. As we mention in the text, having to guess
an upper bound on the loss is a disadvantage of our method.
\end{enumerate}
\end{enumerate}
\end{document} |
\begin{document}
\maketitle \sloppy
\thispagestyle{empty}
\belowdisplayskip=18pt plus 6pt minus 12pt \abovedisplayskip=18pt
plus 6pt minus 12pt
\partialrskip 4pt plus 1pt
\partialrindent 0pt
{\mathbb{N}}ewcommand{{\bar{a}}rint}{
{\mathbb{R}}ule[.036in]{.12in}{.009in}\kern-.16in
\displaystyle\int }
\def{\mathbb{R}}{{\mathbb{R}}}
\def{[0,\infty)}{{[0,\infty)}}
\def{\mathbb{R}}{{\mathbb{R}}}
\def{\mathbb{N}}{{\mathbb{N}}}
\def{\mathbf{l}}{{\mathbf{l}}}
\def{{\bar{a}}r{u}}{{{\bar{a}}r{u}}}
\def{{\bar{a}}r{g}}{{{\bar{a}}r{g}}}
\def{{\bar{a}}r{G}}{{{\bar{a}}r{G}}}
\def{\bar{a}}{{{\bar{a}}r{a}}}
\def{\bar{v}}{{{\bar{a}}r{v}}}
\def{\bar{\mu}}{{{\bar{a}}r{\mu}}}
\def{\mathbb{R}}n{{\mathbb{R}^{n}}}
\def{\mathbb{R}}N{{\mathbb{R}^{N}}}
{\mathbb{N}}ewcommand{\snr}[1]{{\mathbf{l}}vert #1{\mathbb{R}}vert}
{\mathbb{N}}ewcommand{{\mathbb{N}}r}[1]{{\mathbf{l}}Vert #1 {\mathbb{R}}Vert}
{\mathbb{N}}ewtheorem{theo}{\bf Theorem}
{\mathbb{N}}ewtheorem{coro}{\bf Corollary}[section]
{\mathbb{N}}ewtheorem{lem}[coro]{\bf Lemma}
{\mathbb{N}}ewtheorem{rem}[coro]{\bf Remark}
{\mathbb{N}}ewtheorem{defi}[coro]{\bf Definition}
{\mathbb{N}}ewtheorem{ex}[coro]{\bf Example}
{\mathbb{N}}ewtheorem{fact}[coro]{\bf Fact}
{\mathbb{N}}ewtheorem{prop}[coro]{\bf Proposition}
{\mathbb{N}}ewcommand{{{\mathbb{R}}m div}}{{{\mathbb{R}}m div}}
\def\texttt{(a1)}{\texttt{(a1)}}
\def\texttt{(a1)}I{\texttt{(a2)}}
{\mathbb{N}}ewcommand{{(M_B^-)}}{{(M_B^-)}}
{\mathbb{N}}ewcommand{{(M_B^-)}j}{{(M_{B_{r_j}}^-)}}
{\mathbb{N}}ewcommand{{a^B_{{\mathbb{R}}m i}}}{{a^B_{{\mathbb{R}}m i}}}
{\mathbb{N}}ewcommand{{\mathcal{ A}}}{{\mathcal{ A}}}
{\mathbb{N}}ewcommand{\widetilde}{\widetilde}
{\mathbb{N}}ewcommand{\varepsilon}{\varepsilon}
{\mathbb{N}}ewcommand{\varphi}{\varphi}
{\mathbb{N}}ewcommand{\vartheta}{\vartheta}
{\mathbb{N}}ewcommand{{g_{{\bar{a}}r{u}}llet}}{{g_{{\bar{a}}r{u}}llet}}
{\mathbb{N}}ewcommand{{g_{{\bar{a}}r{u}}llet}n}{{({g_{{\bar{a}}r{u}}llet})_n}}
{\mathbb{N}}ewcommand{\varrho}{\varrho}
{\mathbb{N}}ewcommand{\partial}{\partialrtial}
{\mathbb{N}}ewcommand{{\mathcal{W}}}{{\mathcal{W}}}
{\mathbb{N}}ewcommand{{{\mathbb{R}}m supp}}{{{\mathbb{R}}m supp}}
{\mathbb{N}}ewcommand{{\min_{\partial B_k}u}}{{\min_{\partialrtial B_k}u}}
{\mathbb{N}}ewcommand{\textit{\texttt{data}}}{\textit{\texttt{data}}}
\partialrindent 1em
\begin{abstract}
We study properties of ${\mathcal{ A}}$-harmonic and ${\mathcal{ A}}$-superharmonic functions involving an operator having generalized Orlicz growth. Our framework embraces reflexive Orlicz spaces, as well as natural variants of variable exponent and double-phase spaces. In particular, Harnack's Principle and Minimum Principle are provided for ${\mathcal{ A}}$-superharmonic functions and boundary Harnack inequality is proven for ${\mathcal{ A}}$-harmonic functions.
\end{abstract}
\section{Introduction}
The cornerstone of the classical potential theory is the Dirichlet problem for harmonic functions. The focus of the nonlinear potential theory is similar, however, harmonic functions are replaced by $p$-harmonic functions, that is, continuous solutions to the $p$-Laplace equation $-\Delta_p u=-{{\mathbb{R}}m div}(|Du|^{p-2}Du)=0$, $1<p<\infty$. There are known attempts to adapt the theory to the case when the exponent varies in space, that is $p=p(x)$ for $x\in\Omega$ or the growth is non-polynomial. Inspired by the significant attention paid lately to problems with strongly nonstandard and non-uniformly elliptic growth e.g.~\cite{IC-pocket,ChDF,comi,hht,m,r} we aim at developing basics of potential theory for problems with essentially broader class of operators embracing in one theory as special cases Orlicz, variable exponent and double-phase generalizations of $p$-Laplacian. To cover whole the mentioned range of general growth problems we employ the framework described in the monograph~\cite{hahabook}. Let us stress that unlike the classical studies~\cite{hekima,KiMa92} the operator we consider does {\em not} enjoy homogeneity of a~form ${\mathcal{ A}}(x,k\xi)=|k|^{p-2}k{\mathcal{ A}}(x,\xi)$. Consequently, our class of solutions is {\em not} invariant with respect to scalar multiplication. Moreover, we allow for operators whose ellipticity is allowed to vary dramatically in the space variable. What is more, we do {\em not} need to assume in the definition of ${\mathcal{ A}}$-superharmonic function that it is integrable with some positive power, which is typically imposed in the variable exponent case, cf. e.g.~\cite{hhklm,laluto}.
We study fine properties of ${\mathcal{ A}}$-superharmonic functions defined by the Comparison Principle {with respect to} continuous solutions to $-{{\mathbb{R}}m div}{\mathcal{ A}}(x,Du)=0$. {Here ${\mathcal{ A}}:\Omega\times{\mathbb{R}}n\to{\mathbb{R}}n$ is assumed to have} generalized Orlicz growth expressed by the means of an inhomogeneous convex $\Phi$--functions $\varphi:\Omega\times{[0,\infty)}\to{[0,\infty)}$ satisfying natural non-degeneracy and balance conditions, see Section~{\mathbb{R}}ef{sec:prelim} for details. In turn, the solutions belong to the Musielak-Orlicz-Sobolev space $W^{1,\varphi(\cdot)}(\Omega)$ described carefully in the monograph~\cite{hahabook}. {The assumptions on the operator are summarized below and will be referred to as \textbf{(A)} throughout the paper}.
\subsection*{Assumption (A)}
{We assume that} $\Omega \subset {\mathbb{R}}n$, $n\ge 2$, is an open bounded set. Let a vector field ${\mathcal{ A}}:\Omega\times{\mathbb{R}}n\to{\mathbb{R}}n$ be a Caratheodory's function, that is $x\mapsto {\mathcal{ A}}(x,\cdot)$ is measurable and $z\mapsto {\mathcal{ A}}(\cdot,z)$ is continuous. Assume further that the following growth and coercivity assumptions hold true for almost all $x\in \Omega$ and all $z\in \mathbb{R}^{n}\setminus \{0\}$:
\begin{flalign}{\mathbf{l}}abel{A}
\begin{cases}
\ \snr{{\mathcal{ A}}(x,z)} {\mathbf{l}}e c_1^{\mathcal{ A}}\varphi{\mathbf{l}}eft(x,\snr{z}{\mathbb{R}}ight)/|z|,\\
\ c_2^{\mathcal{ A}} {\varphi{\mathbf{l}}eft(x,\snr{z} {\mathbb{R}}ight)} {\mathbf{l}}e {\mathcal{ A}}(x,z)\cdot z
\end{cases}
\end{flalign}
with absolute constants $c_1^{\mathcal{ A}},c_2^{\mathcal{ A}}>0$ and some function $\varphi:\Omega\times{[0,\infty)}\to{[0,\infty)}$ being measurable with respect to the first variable, convex with respect to the second one and satisfying (A0), (A1), (aInc)$_p$ and (aDec)$_q$ with some $1<p{\mathbf{l}}eq q<\infty$. {The precise statement of these conditions is given in Section {\mathbb{R}}ef{sec:prelim}.} We collect all parameters of the problem as $ \textit{\texttt{data}}=\textit{\texttt{data}}(p,q,c_1^{\mathcal{ A}},c_2^{\mathcal{ A}}). $
Moreover, let ${\mathcal{ A}}$ be monotone in the sense that for a.a. $x\in \Omega$ and any distinct $z_{1},z_{2}\in \mathbb{R}^{n}$ it holds that
\begin{flalign*}
0< \,{\mathbf{l}}angle {\mathcal{ A}}(x,z_{1})-{\mathcal{ A}}(x,z_{2}),z_{1}-z_{2}{\mathbb{R}}angle.
\end{flalign*}
We shall consider weak solutions, ${\mathcal{ A}}$-supersolutions, ${\mathcal{ A}}$-superharmonic, and ${\mathcal{ A}}$-harmonic functions related to the problem\begin{equation}
{\mathbf{l}}abel{eq:main}-{{\mathbb{R}}m div}\, {\mathcal{ A}}(x,Du)= 0 \quad\text{in }\ \Omega.
\end{equation}
For precise definitions see~Section~{\mathbb{R}}ef{sec:sols}.
\subsection*{Special cases}
{Besides the $p$-Laplace operator case, corresponding to the choice of} $\varphi(x,s)=s^{p},$ $1<p<\infty$, we cover by one approach a wide range of more degenerate operators. When we take $\varphi(x,s)=s^{p(x)}$, {with} $p: \Omega \to \mathbb{R}$ {such that} $1<p^{-}_{\Omega} {\mathbf{l}}eq p(x){\mathbf{l}}eq p^{+}_{\Omega} < \infty$ {and satisfying} ${\mathbf{l}}og$-H\"older condition (a special case of (A1)), {we render the so-called $p(x)$-Laplace equation}
\begin{align*}
0=-\Delta_{p(x)} u=-{{\mathbb{R}}m div}(\snr{Du}^{p(x)-2}Du).
\end{align*}
Within the framework studied in~\cite{comi} solutions to double phase version of the $p$-Laplacian \[0=-{{\mathbb{R}}m div}\, {\mathcal{ A}}(x,Du)=-{{\mathbb{R}}m div}{\mathbf{l}}eft(\omega(x)\big(\snr{Du}^{p-2}+a(x)\snr{Du}^{q-2}\big)Du{\mathbb{R}}ight)\] are analysed with $1<p{\mathbf{l}}eq q<\infty$, possibly vanishing weight $0{\mathbf{l}}eq a\in C^{0,\alpha}(\Omega)$ and $q/p{\mathbf{l}}eq 1+\alpha/n$ (a~special case of (A1); sharp for density of regular functions) and with a bounded, measurable, separated from zero weight $\omega$. We embrace also the borderline case between the double phase space and the variable exponent one, cf.~\cite{bacomi-st}. Namely, we consider solutions to
\[0=-{{\mathbb{R}}m div} {\mathcal{ A}}(x,Du)=-{{\mathbb{R}}m div}{\mathbf{l}}eft(\omega(x)\snr{Du}^{p-2}\big(1+a(x){\mathbf{l}}og({{\mathbb{R}}m e}+\snr{Du})\big)Du{\mathbb{R}}ight)\] with $1<p<\infty$, log-H\"older continuous $a$ and a bounded, measurable, separated from zero weight $\omega$. Having an $N$-function $B\in\Delta_2\cap{\mathbb{N}}abla_2$, we can allow for problems with the leading part of the operator with growth driven by $\varphi(x,s)=B(s)$ with an example of \[0=-{{\mathbb{R}}m div} \, {\mathcal{ A}}(x,Du)=-{{\mathbb{R}}m div}{\mathbf{l}}eft(\omega (x)\tfrac{B(\snr{Du})}{\snr{Du}^2}Du{\mathbb{R}}ight)\]
with a bounded, measurable, and separated from zero weight $\omega$.
To give more new examples one can consider problems stated in weighted Orlicz (if $\varphi(x,s)=a(x)B(s)$), variable exponent double phase (if $\varphi(x,s)=s^{p(x)}+a(x)s^{q(x)}$), or multi phase Orlicz cases (if $\varphi(x,s)=\sum_i a_i(x)B_i(s)$), as long as $\varphi(x,s)$ is comparable to a~function doubling with respect to the second variable and it satisfies the non-degeneracy and no-jump assumptions (A0)-(A1), see Section~{\mathbb{R}}ef{sec:prelim}.
\subsection*{State of art} The key references for already classical nonlinear potential theory are~\cite{adams-hedberg,hekima,KiMa92}, but its foundations date back further to~\cite{HaMa1,HedWol}. A complete overview of the theory for equations with $p$-growth is presented in~\cite{KuMi2014}. The first generalization of~potential theory towards nonstandard growth is done in the weighted case~\cite{Mik,Tur}. So far significant attention was put on the variable exponent case, see e.g.~\cite{Alk,hhklm,hhlt,hklmp,laluto}, and analysis of related problems over metric spaces~\cite{BB}, there are some results obtained in the double-phase case~\cite{fz}, but to our best knowledge the Orlicz case is not yet covered by any comprehensive study stemming from~\cite{lieb,Maly-Orlicz}.
Let us mention the recent advances within the theory. Supersolutions to~\eqref{eq:main} are in fact {solutions to} measure data problems with nonnegative measure, that enjoy lately the separate interest, cf.~\cite{ACCZG,IC-gradest,IC-measure-data,IC-lower,CGZG,CiMa,KiKuTu,KuMi2014,min-grad-est} concentrating on their existence and gradient estimates. The generalization of studies on removable sets for H\"older continuous solutions provided by~\cite{kizo} to the case of strongly non-uniformly elliptic operators has been carried out lately in~\cite{ChDF,ChKa}. There are available various regularity results for related quasiminimizers having Orlicz or generalized Orlicz growth~\cite{hh-zaa,hhl,hht,hklmp,ka,kale,Maly-Orlicz}. For other recent developments in the understanding of the functional setting {we refer} also to~\cite{CUH,yags,haju,CGSGWK}.
\subsection*{Applications} This kind of results are useful in getting potential estimates for solutions to measure data problems, entailing further regularity properties of their solutions, cf.~\cite{KiMa92,KuMi2014,KuMi2013}. Particularly, the Maximum and Minimum Principles for $p$-harmonic functions together with properties of Poisson modification of $p$-superharmonic functions are important tools in getting Wolff potential estimates via the methods of~\cite{KoKu,tru-wa}. In fact, developing this approach further, {we employ} the results of our paper in the proof of Wolff potential estimates for problems with Orlicz growth~\cite{CGZG-Wolff}. They directly entail many natural and sharp regularity consequences and Orlicz version of the Hedberg--Wolff Theorem yielding full characterization of the natural dual space to the space of solutions by the means of the Wolff potential (see~\cite{HedWol} for the classical version).
\subsection*{Results and organization} Section~{\mathbb{R}}ef{sec:prelim} is devoted to notation and basic information on the setting. In Section~{\mathbb{R}}ef{sec:sols} we define weak solutions, ${\mathcal{ A}}$-supersolutions, ${\mathcal{ A}}$-harmonic and ${\mathcal{ A}}$-superharmonic functions and provide proofs of their fundamental properties including the Harnack inequality for ${\mathcal{ A}}$-harmonic functions (Theorem~{\mathbb{R}}ef{theo:Har-A-harm}). Further analysis of ${\mathcal{ A}}$-superharmonic functions is carried out in Section~{\mathbb{R}}ef{sec:A-sh}. We prove there Harnack's Principle (Theorem~{\mathbb{R}}ef{theo:harnack-principle}), fundamental properties of Poisson's modification (Theorem~{\mathbb{R}}ef{theo:Pois}), and Strong Minimum Principle (Theorem~{\mathbb{R}}ef{theo:mini-princ}) together with their consequence of the boundary Harnack inequality (Theorem~{\mathbb{R}}ef{theo:boundary-harnack}) for ${\mathcal{ A}}$-harmonic functions.
\section{Preliminaries}{\mathbf{l}}abel{sec:prelim}
\subsection{Notation}
In the following we shall adopt the customary convention of denoting by $c$ a constant that may vary from line to line. Sometimes to skip rewriting a constant, we use ${\mathbf{l}}esssim$. By $a\simeq b$, we mean $a{\mathbf{l}}esssim b$ and $b{\mathbf{l}}esssim a$. By $B_R$ we shall denote a ball usually skipping prescribing its center, when it is not important. Then by $cB_R=B_{cR}$ we mean a ball with the same center as $B_R$, but with rescaled radius $cR$.
With $U\subset \mathbb{R}^{n}$ being a~measurable set with finite and positive measure $\snr{U}>0$, and with $f\colon U\to \mathbb{R}^{k}$, $k\ge 1$ being a measurable map, by
\begin{flalign*}
{\bar{a}}rint_{U}f(x) \, dx =\frac{1}{\snr{U}}\int_{U}f(x) \,dx
\end{flalign*}
we mean the integral average of $f$ over $U$. We make use of symmetric truncation on level $k>0$, $T_k:{\mathbb{R}}\to{\mathbb{R}}$, defined as follows \begin{equation*}T_k(s)={\mathbf{l}}eft\{\begin{array}{ll}s & |s|{\mathbf{l}}eq k,\\ k\frac{s}{|s|}& |s|\geq k. \end{array}{\mathbb{R}}ight. {\mathbf{l}}abel{Tk}\end{equation*}
\subsection{Generalized Orlicz functions} We employ the formalism introduced in the monograph~\cite{hahabook}. Let us present the framework.
For $L\geq 1$ a real-valued function $f$ is $L$-almost increasing, if $Lf(s) \geq f(t)$ for $s > t$; $f$ is called $L$-almost decreasing if $Lf(s) {\mathbf{l}}eq f(t)$ for $s > t$.
\begin{defi} We say that $\varphi:\Omega\times{[0,\infty)}\to[0,\infty]$ is a convex $\Phi$--function, and write $\varphi\in\Phi_c(\Omega)$, if the following conditions hold:
\begin{itemize}
\item[(i)] For every $s\in{[0,\infty)}$ the function $x\mapsto\varphi(x, s)$ is
measurable and for a.e. $x\in\Omega$ the function $s\mapsto\varphi(x, s)$ is increasing, convex, and left-continuous.
\item[(ii)] $\varphi(x, 0) = {\mathbf{l}}im_{s\to 0^+} \varphi(x, s) = 0$ and ${\mathbf{l}}im_{s\to \infty} \varphi(x, s) =
\infty$ for a.e. $x\in\Omega$.
\end{itemize}
\end{defi}
{\mathbb{N}}oindent Further, we say that $\varphi\in\Phi_c(\Omega)$ satisfies\begin{itemize}
\item[(aInc)$_p$] if there exist $L\geq 1$ and $p >1$ such that $s\mapsto\varphi(x, s)/s^p$ is $L$-almost increasing in ${[0,\infty)}$ for every $x\in\Omega$,
\item[(aDec)$_q$] if there exist $L\geq 1$ and $q >1$ such that $s\mapsto\varphi(x, s)/s^q$ is $L$-almost decreasing in ${[0,\infty)}$ for every $x\in\Omega$.
\end{itemize}
{\mathbb{N}}oindent By $\varphi^{-1}$ we denote the inverse of a convex $\Phi$-function $\varphi$ {with respect to the second variable}, that is
\[
\varphi^{-1}(x,\tau) := \inf\{s \ge 0 \,:\, \varphi(x,s)\ge \tau\}.
\]
We shall consider those $\varphi\in\Phi_c(\Omega)$, which satisfy the following set of conditions.
\begin{itemize}
\item[(A0)] There exists $\beta_0\in (0, 1]$ such that $\varphi(x, \beta_0) {\mathbf{l}}eq 1$ and
$\varphi(x, 1/\beta_0) \geq 1$ for all $x\in\Omega$.
\item[(A1)] There exists $\beta_1\in(0,1)$, such that for every ball $B$ with $|B|{\mathbf{l}}eq 1$ it holds that
\[\beta_1\varphi^{-1}(x,s){\mathbf{l}}eq\varphi^{-1}(y,s)\quad\text{for every $s\in [1,1/|B|]$ and a.e. $x,y\in B\cap\Omega$}.\]
\item[(A2)] For every {$s>0$} there exist $\beta_2\in(0,1]$ and $h\in L^1(\Omega)\cap L^\infty(\Omega)$, such that \[\varphi(x,\beta_2 r){\mathbf{l}}eq\varphi(y,r) +h(x)+h(y)\quad\text{for a.e. $x,y\in \Omega$ whenever $\varphi(y,r)\in[0,s]$}.\]
\end{itemize}
Condition (A0) is imposed in order to exclude degeneracy, while (A1) can be interpreted as local continuity. Fundamental role is played also by (A2) which imposes balance of the growth of $\varphi$ with respect to its variables separately.
\emph{The Young conjugate} of $\varphi\in\Phi_c(\Omega)$ is the function $\widetilde\varphi:\Omega\times{[0,\infty)}\to[0,\infty]$
defined as $ \widetilde \varphi (x,s) = \sup\{r \cdot s - \varphi(x,r):\ r \in {[0,\infty)}\}.$ Note that Young conjugation is involute, i.e. $\widetilde{(\widetilde\varphi)}=\varphi$. Moreover, if $\varphi\in\Phi_c(\Omega)$, then $\widetilde\varphi\in\Phi_c(\Omega)$.
For $\varphi\in\Phi_c(\Omega)$, the following inequality of Fenchel--Young type holds true
$$
rs{\mathbf{l}}eq \varphi(x,r)+\widetilde\varphi(x,s).$$
We say that a function $\varphi$ satisfies $\Delta_2$-condition (and write $\varphi\in\Delta_2$) if there exists a~constant $c>0$, such that for every $s\geq 0$ it holds $\varphi(x,2s){\mathbf{l}}eq c(\varphi(x,s)+1)$. If $\widetilde\varphi\in\Delta_2,$ we say that $\varphi$ satisfies ${\mathbb{N}}abla_2$-condition and denote it by $\varphi\in{\mathbb{N}}abla_2$. If $\varphi,\widetilde\varphi\in\Delta_2$, then we call $\varphi$ a doubling function. If $\varphi\in \Phi_c(\Omega)$ satisfies {{\mathbb{R}}m (aInc)$_p$} and {{\mathbb{R}}m (aDec)$_q$}, then $\varphi\simeq\psi_1$ with some $\psi_1\in \Phi_c(\Omega)$ satisfying $\Delta_2$-condition and $\widetilde\varphi\simeq \widetilde\psi_2$ with some $\widetilde\psi_2\in \Phi_c(\Omega)$ satisfying $\Delta_2$-condition, so we can assume that functions within our framework are doubling. Note that also $\psi_1\simeq\widetilde\psi_2$.
In fact, within our framework
\begin{equation}
{\mathbf{l}}abel{doubl-star}\widetilde\varphi{\mathbf{l}}eft(x, {\varphi(x,s)}/{s}{\mathbb{R}}ight)\sim \varphi(x,s) \quad\text{for a.e. }\ x\in\Omega\ \text{ and all }\ s>0
\end{equation}
for some constants depending only on $p$ and $q$.
\subsection{Function spaces}{\mathbf{l}}abel{ssec:spaces}
For a comprehensive study of these spaces we refer to \cite{hahabook}. We always deal with spaces generated by $\varphi\in\Phi_c(\Omega)$ satisfying (aInc)$_p$, (aDec)$_q$, (A0), (A1), and (A2). For $f\in L^0(\Omega)$ we define {\em the modular} $\varrho_{\varphi(\cdot),\Omega}$ by
\begin{equation}
{\mathbf{l}}abel{modular}
\varrho_{\varphi(\cdot),\Omega} (f)=\int_\Omega\varphi (x, | f(x)|) dx.
\end{equation}
When it is clear from the context we omit assigning the domain.
{\mathbb{N}}oindent {\em The Musielak--Orlicz
space} is defined as the set
\[L^{\varphi(\cdot)} (\Omega)= \{f \in L^0(\Omega):\ \ {\mathbf{l}}im_{{\mathbf{l}}ambda\to 0^+}\varrho_{\varphi(\cdot),\Omega}({\mathbf{l}}ambda f) = 0\}\]
endowed with the Luxemburg norm
\[\|f\|_{\varphi(\cdot)}=\inf {\mathbf{l}}eft\{{\mathbf{l}}ambda > 0 :\ \ \varrho_{\varphi(\cdot),\Omega} {\mathbf{l}}eft(\tfrac 1{\mathbf{l}}ambda f{\mathbb{R}}ight) {\mathbf{l}}eq 1{\mathbb{R}}ight\} .\]
For $\varphi\in\Phi_c(\Omega)$, the space $L^{\varphi(\cdot)}(\Omega)$ is a Banach space~\cite[Theorem~2.3.13]{hahabook}. Moreover, the following H\"older inequality holds true\begin{equation}
{\mathbf{l}}abel{in:Hold}\|fg\|_{L^1(\Omega)}{\mathbf{l}}eq 2\|f\|_{L^{\varphi(\cdot)}(\Omega)}\|g\|_{L^{\widetilde\varphi(\cdot)}(\Omega)}.
\end{equation}
We define {\em the Musielak-Orlicz-Sobolev space} $W^{1,\varphi(\cdot)}(\Omega)$ as follows
\begin{equation*}
W^{1,\varphi(\cdot)}(\Omega)=\big\{f\in W^{1,1}_{loc}(\Omega):\ \ f,|D f|\in L^{\varphi(\cdot)}(\Omega)\big\},
\end{equation*}where $D$ stands for distributional derivative. The space is considered endowed with the norm
\[
\|f\|_{W^{1,\varphi(\cdot)}(\Omega)}=\inf\big\{{\mathbf{l}}ambda>0 :\ \ \varrho_{\varphi(\cdot),\Omega} {\mathbf{l}}eft(\tfrac 1{\mathbf{l}}ambda f{\mathbb{R}}ight)+ \varrho_{\varphi(\cdot),\Omega} {\mathbf{l}}eft(\tfrac 1{\mathbf{l}}ambda Df{\mathbb{R}}ight){\mathbf{l}}eq 1\big\}\,.
\]
By $W_0^{1,\varphi(\cdot)}(\Omega)$ we denote a closure of $C_0^\infty(\Omega)$ under the above norm.
Because of the growth conditions $W^{1,\varphi(\cdot)}(\Omega)$ is a separable and reflexive space. Moreover, smooth functions are dense there.
\begin{rem}\cite{hahabook} If $\varphi\in \Phi_c(\Omega)$ satisfies {{\mathbb{R}}m (aInc)$_p$}, {{\mathbb{R}}m (aDec)$_q$}, (A0), (A1), (A2), {then} strong (norm) topology of $W^{1,\varphi(\cdot)}(\Omega)$ coincides with the sequensional modular topology. Moreover, smooth functions are dense in this space in both topologies.
\end{rem}
Note that as a consequence of \cite[Lemma~2.1]{bbggpv} for every function $u$, such that $T_k(u)\in W^{1,\varphi(\cdot)}(\Omega)$ for every $k>0$ (with $T_k$ given by~\eqref{Tk}) there exists a (unique) measurable function
$Z_u : \Omega \to {\mathbb{R}}n$ such that
\begin{equation}{\mathbf{l}}abel{gengrad}
D T_k(u) = \chi_{\{|u|<k\}} Z_u\quad \hbox{for
a.e. in $\Omega$ and for every $k > 0$.}
\end{equation}
With an abuse of~notation, we denote $Z_u$ simply by $D u$ and call it a {\it generalized gradient}.
{\subsection{The operator} Let us motivate that the growth and coercivity conditions from~\eqref{A} imply the expected proper definition of the operator involved in problem~\eqref{eq:main}. We notice that in our regime the operator $\mathfrak{A}_{\varphi(\cdot)}$ defined as $$
\mathfrak{A}_{\varphi(\cdot)} v := {\mathcal{ A}}(x,Dv)
$$ is well defined as $\ \mathfrak{A}_{\varphi(\cdot)} : W^{1,\varphi(\cdot)}_0(\Omega) \to (W^{1,\varphi(\cdot)}_0(\Omega))'\ $
via
\begin{flalign*}
{\mathbf{l}}angle\mathfrak{A}_{\varphi(\cdot)}v,w{\mathbb{R}}angle:=\int_{\Omega}{\mathcal{ A}}(x,Dv)\cdot Dw \,dx\quad \text{for}\quad w\in C^{\infty}_{0}(\Omega),
\end{flalign*}
where ${\mathbf{l}}angle \cdot, \cdot {\mathbb{R}}angle$ denotes dual pairing between reflexive Banach spaces $W^{1,\varphi(\cdot)}(\Omega))$ and $(W^{1,\varphi(\cdot)}(\Omega))'$. Indeed, when $v\in W^{1,\varphi(\cdot)}(\Omega)$ and $w\in C_0^\infty(\Omega)$, growth conditions~\eqref{A}, H\"older's inequality~\eqref{in:Hold}, equivalence~\eqref{doubl-star}, and Poincar\'e inequality~\cite[Theorem~6.2.8]{hahabook} justify that
\begin{flalign}
{\mathbb{N}}onumber\snr{{\mathbf{l}}angle \mathfrak{A}_{\varphi(\cdot)}v,w {\mathbb{R}}angle}{\mathbf{l}}e &\, c\int_{\Omega}\frac{\varphi(x,\snr{Dv})}{\snr{Dv}}\snr{Dw} \ dx {\mathbf{l}}e c{\mathbf{l}}eft \| \frac{\varphi(\cdot,\snr{Dv})}{\snr{Dv}}{\mathbb{R}}ight \|_{L^{\widetilde \varphi(\cdot)}(\Omega)}{\mathbb{N}}r{Dw}_{L^{\varphi(\cdot)}(\Omega)}{\mathbb{N}}onumber \\
{\mathbf{l}}e &\, c{\mathbb{N}}r{Dv}_{L^{ \varphi(\cdot)}(\Omega)}{\mathbb{N}}r{Dw}_{L^{\varphi(\cdot)}(\Omega)}{\mathbf{l}}e c{\mathbb{N}}r{w}_{W^{1,\varphi(\cdot)}(\Omega)}.{\mathbf{l}}abel{op}
\end{flalign}
By density argument, the operator is well-defined on $W^{1,\varphi(\cdot)}_0(\Omega)$.}
\section{Various types of solutions and the notion of ${\mathcal{ A}}$-harmonicity} {\mathbf{l}}abel{sec:sols}
All the problems are considered under Assumption {\bf (A)}.
\subsection{Definitions and basic remarks} $\ $
A \underline{continuous} function $u\in W^{1,\varphi(\cdot)}_{loc}(\Omega)$ is {called} an {\em ${\mathcal{ A}}$-harmonic function} in an open set $\Omega$ if it is a (weak) solution to the equation $-{{\mathbb{R}}m div}{\mathcal{ A}}(x,Du)= 0$, {i.e.,
\begin{equation}
{\mathbf{l}}abel{eq:main:0}
\int_\Omega {\mathcal{ A}}(x,Du)\cdot D\phi\,dx= 0\quad\text{for all }\ \phi\in C^\infty_0(\Omega).
\end{equation}
}
Existence and uniqueness of ${\mathcal{ A}}$-harmonic functions is proven in \cite{ChKa}.
\begin{prop} {\mathbf{l}}abel{prop:ex-Ahf} Under {{\mathbb{R}}m Assumption {\bf (A)}} if $\Omega$ is bounded and $w\in W^{1,\varphi(\cdot)}(\Omega)\cap C(\Omega)$, then there exists a unique solution $u\in W^{1,\varphi(\cdot)}(\Omega)\cap C(\Omega)$ to problem
\begin{equation*}
\begin{cases}-{{\mathbb{R}}m div}\, {\mathcal{ A}}(x,Du)= 0\quad\text{in }\ \Omega,\\
u-w\in W_0^{1,\varphi(\cdot)}(\Omega).\end{cases}
\end{equation*}
Moreover, $u$ is locally bounded and for every $E\Subset\Omega$ we have \[\|u\|_{L^\infty(E)}{\mathbf{l}}eq c(\textit{\texttt{data}}, \|Du\|_{ W^{1,\varphi(\cdot)}(\Omega)}).\]
\end{prop}{}
We call a function $u\in W^{1,\varphi(\cdot)}_{loc}(\Omega)$ a (weak) {\em ${\mathcal{ A}}$-supersolution} to~\eqref{eq:main:0} if~$-{{\mathbb{R}}m div}{\mathcal{ A}}(x,Du)\geq 0$ weakly in $\Omega$, that is \begin{equation*}
\int_\Omega {\mathcal{ A}}(x,Du)\cdot D\phi\,dx\geq 0\quad\text{for all }\ 0{\mathbf{l}}eq\phi\in C^\infty_0(\Omega)
\end{equation*}
and a (weak) {\em ${\mathcal{ A}}$-subsolution} if $-{{\mathbb{R}}m div}{\mathcal{ A}}(x,Du){\mathbf{l}}eq 0$ weakly in $\Omega$, that is \begin{equation*}
\int_\Omega {\mathcal{ A}}(x,Du)\cdot D\phi\,dx{\mathbf{l}}eq 0\quad\text{for all }\ 0{\mathbf{l}}eq\phi\in C^\infty_0(\Omega).
\end{equation*}
By density of smooth functions we can use actually test functions from $W^{1,\varphi(\cdot)}_0(\Omega)$.
The classes of {\em ${\mathcal{ A}}$-superharmonic} and {\em ${\mathcal{ A}}$-subharmonic} are defined by the Comparison Principle.
{ \begin{defi}{\mathbf{l}}abel{def:A-sh}
We say that function $u$ is ${\mathcal{ A}}$-superharmonic if
\begin{itemize}
\item[(i)] $u$ is lower semicontinuous;
\item[(ii)] $u {\mathbb{N}}ot\equiv \infty$ in any component of $\Omega$;
\item[(iii)] for any $K\Subset\Omega$ and any ${\mathcal{ A}}$-harmonic $h\in C(\overline {K})$ in $K$, $u\geq h$ on $\partialrtial K$ implies $u\geq h$ in $K$.
\end{itemize}
We say that an {\color{black} upper} semicontinuous function $u$ is ${\mathcal{ A}}$-subharmonic if $(-u)$ is ${\mathcal{ A}}$-superharmonic.
\end{defi}
}
The above definitions have the following direct consequences.
\begin{lem}{\mathbf{l}}abel{lem:A-arm-loc-bdd-below}
An ${\mathcal{ A}}$-superharmonic function $u$ is locally bounded from below.\\ An ${\mathcal{ A}}$-subharmonic function $u$ is locally bounded from above.
\end{lem}
\begin{lem}{\mathbf{l}}abel{lem:A-h-is-great}
If $u$ is ${\mathcal{ A}}$-harmonic, then it is ${\mathcal{ A}}$-supersolution, ${\mathcal{ A}}$-subsolution, ${\mathcal{ A}}$-superharmonic, and ${\mathcal{ A}}$-subharmonic.
\end{lem}
{By minor modification of the proof of \cite[Lemma 4.3]{ka} we get the following fact.
\begin{lem}{\mathbf{l}}abel{lem:comp-princ} Let $u\in W^{1,\varphi(\cdot)}(\Omega)$ be an ${\mathcal{ A}}$-supersolution to \eqref{eq:main:0}, and $v\in W^{1,\varphi(\cdot)}(\Omega)$ be an ${\mathcal{ A}}$-subsolution to \eqref{eq:main:0}. If $\min(u-v) \in W^{1,\varphi(\cdot)}_0(\Omega)$, then $u \geq {v}$ a.e. in $\Omega$.
\end{lem}
We have the following estimate for ${\mathcal{ A}}$-supersolutions. \begin{lem}[Lemma~5.1,~\cite{ChKa}]{\mathbf{l}}abel{lem:A-supers-cacc}If $u\in W^{1,\varphi(\cdot)}(\Omega)$ is a nonnegative ${\mathcal{ A}}$-supersolution, $B\Subset\Omega,$ and $\eta\in C^{1}_{0}(B)$ is such that $0{\mathbf{l}}eq \eta{\mathbf{l}}eq 1$. Then for all $\gamma\in(1,p)$ there holds
\begin{flalign*}
\int_{B }u^{-\gamma}\eta^{q}\varphi(x,\snr{D u}) \, dx{\mathbf{l}}e c\int_{B }u^{-\gamma}\varphi(x,\snr{D\eta}u) \, dx\quad\text{
with $\ \ c=c(\textit{\texttt{data}},\gamma)$.}
\end{flalign*}
\end{lem}}
It is well known that solutions, subsolutions, and supersolutions can be described by the theory of quasiminimizers. Since many of the results on quasiminizers from~\cite{hh-zaa} apply to our ${\mathcal{ A}}$-harmonic functions we shall recall the definition.
Among all functions having the same `boundary datum' $w\in W^{1,\varphi(\cdot)}(\Omega)$ the function $u\in W^{1,\varphi(\cdot)}$ is a {\em quasiminimizer} if it has the least energy up to a factor $C$, that is if $(u-w)\in W_0^{1,\varphi(\cdot)}(\Omega)$ and
\begin{equation}{\mathbf{l}}abel{def-quasiminimizer}
\int_\Omega \varphi(x,|D u|)\,dx{\mathbf{l}}eq C\int_\Omega \varphi(x, |D(u+v)|)\,dx
\end{equation}
holds true with an absolute constant $C>0$ for every $v\in W_0^{1,\varphi(\cdot)}(\Omega)$. We call a~function $u$ {\em superquasiminimizer} ({\em subquasiminimizer}) if~\eqref{def-quasiminimizer} holds for all $v$ as above that are additionally nonnegative (nonpositive).
\begin{lem}{\mathbf{l}}abel{lem:Ah-is-quasi}
An ${\mathcal{ A}}$-harmonic function $u$ is a quasiminimizer.
\end{lem}{}
\begin{proof}
Let us take an arbitrary $v\in W_0^{1,\varphi(\cdot)}(\Omega)$. We may write $v = w + \tilde{v} - u$ with `boundary datum' $w$ and any $\tilde{v} \in W_0^{1,\varphi(\cdot)}(\Omega)$, and upon testing the equation \eqref{eq:main} with $v$ we obtain
\[ \int_\Omega {\mathcal{ A}}(x,D u)\cdot Du\,dx=\int_\Omega {\mathcal{ A}}(x,D u)\cdot D(w+\tilde{v})\,dx.\]
Then by coercivity of ${\mathcal{ A}}$, Young's inequality, growth of ${\mathcal{ A}}$ and doubling growth of~$\varphi$, for every $\varepsilon>0$ we have
\begin{flalign*}
c_2^{\mathcal{ A}} \int_\Omega \varphi(x,|D u|)\,dx&{\mathbf{l}}eq \int_\Omega {\mathcal{ A}}(x,D u)\cdot Du\,dx=\int_\Omega {\mathcal{ A}}(x,D u)\cdot D(w+\tilde{v})\,dx\\
&{\mathbf{l}}eq \varepsilon \int_\Omega \widetilde\varphi(x,|{\mathcal{ A}}(x,D u)|)\,dx+c(\varepsilon)\int_\Omega \varphi(x, |D(w+\tilde{v})|)\,dx\\
&{\mathbf{l}}eq \varepsilon \int_\Omega \widetilde\varphi(x,c_1^{\mathcal{ A}}\varphi(x,|D u|)/|Du|)\,dx+c(\varepsilon)\int_\Omega \varphi(x, |D(w+\tilde{v})|)\,dx\\
&{\mathbf{l}}eq \varepsilon {\bar{a}}r c \int_\Omega \varphi(x,|D u|)\,dx+ c(\varepsilon)\int_\Omega \varphi(x, |D(w+\tilde{v})|)\,dx
\end{flalign*}
with ${\bar{a}}r c={\bar{a}}r c(\textit{\texttt{data}})>0.$ Let us choose $\varepsilon>0$ small enough for the first term on the right-hand side can be absorbed on the left-hand side. By rearranging terms, and using the fact that $u+v=w+\tilde{v}$ we get that
\[
\int_\Omega \varphi(x,|D u|)\,dx{\mathbf{l}}eq C\int_\Omega \varphi(x, |D(u+v)|)\,dx\quad \text{
with $\ \ C=C(\textit{\texttt{data}})>0$}.
\]
Hence we get the claim.
\end{proof}
{\mathbb{N}}oindent By the same calculations as in the above proof we have the following corollary.
\begin{coro}
If $u$ is ${\mathcal{ A}}$-supersolution, then $u$ is a superquasiminizer, i.e.~\eqref{def-quasiminimizer} holds for all nonnegative $v\in W_0^{1,\varphi(\cdot)}(\Omega)$.
\end{coro}
\subsection{Obstacle problem }
We consider the set
\begin{flalign}{\mathbf{l}}abel{con}
\mathcal{K}_{\psi,w}(\Omega):={\mathbf{l}}eft\{ v\in W^{1,\varphi(\cdot)}(\Omega)\colon \ v\ge \psi \ \ \mbox{a.e. in} \ \Omega \ \ \mbox{and} \ \ v-{w}\in W^{1,\varphi(\cdot)}_{0}(\Omega) {\mathbb{R}}ight\},
\end{flalign}
where we call $\psi:\Omega\to\overline{R}$ the obstacle and $w \in W^{1,\varphi(\cdot)}(\Omega)$ the boundary datum. If $
\mathcal{K}_{\psi,w}(\Omega){\mathbb{N}}eq\emptyset$ by a~solution to the obstacle problem we mean a function $u\in \mathcal{K}_{\psi,w}(\Omega)$ satisfying
\begin{flalign}{\mathbf{l}}abel{obs}
\int_{\Omega}{\mathcal{ A}}(x,Du)\cdot D(v-u) \ dx \ge 0 \quad \mbox{for all } \ v\in \mathcal{K}_{\psi,w}(\Omega).
\end{flalign}
We note the following basic information on the existence, the uniqueness, and the Comparison Principle for the obstacle problem are provided in \cite{kale} and~\cite[Section~4]{ChKa}.
\begin{prop}[Theorem 2, \cite{ChKa}]{\mathbf{l}}abel{prop:obst-ex-cont}
Under~{{\mathbb{R}}m Assumption {\bf (A)}} let the obstacle $\psi\in W^{1,\varphi(\cdot)}(\Omega)\cup\{-\infty\}$ and the boundary datum $w\in W^{1,\varphi(\cdot)}(\Omega)$ be such that $\mathcal{K}_{\psi,w}(\Omega){\mathbb{N}}ot =\emptyset$. Then there exists a function $u\in \mathcal{K}_{\psi,w}(\Omega)$ being a unique solution to the $\mathcal{K}_{\psi,w}(\Omega)$-obstacle problem \eqref{obs}. Moreover, if $\psi\in W^{1,\varphi(\cdot)}(\Omega)\cap C(\Omega)$, then $v$ is continuous and is ${\mathcal{ A}}$-harmonic in the open set $\{x\in \Omega\colon u(x)>\psi(x)\}$.
\end{prop}
{\mathbb{N}}oindent For more properties of solutions to related obstacle problems see also~\cite{BCP,Obs1,ChDF,Obs2,hhklm,ka}. In particular, in~\cite{ka} several basic properties of quasiminimizers to related variational obstacle problem are proven.
\begin{prop}[Proposition 4.3, \cite{ChKa}]
{\mathbf{l}}abel{prop:cacc}
Let $B_r \Subset B_R \subset \Omega$.
Under assumptions of Proposition~{\mathbb{R}}ef{prop:obst-ex-cont},
\begin{enumerate}
\item if $u$ is a solution to the $\mathcal{K}_{\psi,w}(\Omega)$-obstacle problem \eqref{obs}, then there exists $c=c(\textit{\texttt{data}},n)$, such that
\begin{align*}
\int_{ B_R} \varphi(x, |D(u-k)_+ |) \, dx {\mathbf{l}}eq c \int_{ B_R} \varphi{\mathbf{l}}eft (x,\dfrac{(u-k)_+}{R-r}{\mathbb{R}}ight ) \, dx,\ \text{
where $k \geq \sup_{x \in B_R} \psi(x)$.}
\end{align*}
\item if $u$ is a ${\mathcal{ A}}$-supersolution to \eqref{eq:main:0} in $\Omega$, then there exists $c=c(\textit{\texttt{data}},n)$, such that
\begin{align*}
\int_{B_R} \varphi(x, |Du_{-}|) \, dx {\mathbf{l}}eq c \int_{ B_R} \varphi{\mathbf{l}}eft (x,\dfrac{|u_{-}|}{R}{\mathbb{R}}ight ) \, dx.
\end{align*}
\end{enumerate}
\end{prop}
Note that in fact in~\cite[Proposition~4.3]{ka} only {\it (1)} is proven in detail, but {\it (2)} follows by the same arguments.
\section{${\mathcal{ A}}$-superharmonic functions}{\mathbf{l}}abel{sec:A-sh}
\subsection{Basic observations}
\begin{prop}[Comparison Principle]{\mathbf{l}}abel{prop:comp-princ} Suppose $u$ is ${\mathcal{ A}}$-superharmonic and $v$ is ${\mathcal{ A}}$-subharmonic in $\Omega$.
If ${\mathbf{l}}imsup_{y\to x} v(y){\mathbf{l}}eq{\mathbf{l}}iminf_{y\to x} u(y)$
for all $x\in\partialrtial \Omega$ (excluding the cases $-\infty{\mathbf{l}}eq-\infty$ and $\infty{\mathbf{l}}eq \infty$), then $v{\mathbf{l}}eq u$ in $\Omega$.
\end{prop}
\begin{proof} When we fix $x\in\Omega$ and $\varepsilon>0$, by the assumption we can find a regular open set $D\Subset\Omega,$ such that $v<u+\varepsilon$ on $\partialrtial D.$ Pick a decreasing sequence $\{\phi_k\}\subset C^\infty(\Omega)$ converging to $v$ pointwise in $\overline{D}$. Since $\partialrtial D$ is compact by lower semicontinuity of $(u+\varepsilon)$ we infer that $\phi_k{\mathbf{l}}eq u+\varepsilon$ on $\partialrtial D$ for some $k$. We take a function $h$ being ${\mathcal{ A}}$-harmonic in $D$ coinciding with $\phi_k$ on $\partialrtial D$. By definition it is continuous
up to a boundary of $D$. Therefore, $v{\mathbf{l}}eq h{\mathbf{l}}eq u+\varepsilon$ on $\partialrtial D$ and so $v{\mathbf{l}}eq h{\mathbf{l}}eq u+\varepsilon$ in $D$ as well. We get the claim by letting $\varepsilon\to 0$. \end{proof}{}
\begin{coro}{\mathbf{l}}abel{coro:min-A-super}Having the Comparison Principle one can deduce what follows.
\begin{itemize}
\item[(i)] If $a_1,a_2\in{\mathbb{R}}$, $a_1\geq 0$, and $u$ is ${\mathcal{ A}}$-superharmonic in $\Omega,$ then so is $a_1u+a_2$.
\item[(ii)] If $u$ and $v$ are ${\mathcal{ A}}$-superharmonic in $\Omega,$ then so is $\min\{u,v\}.$
\item[(iii)] Suppose $u$ is not identically $\infty$, then $u$ is ${\mathcal{ A}}$-superharmonic in $\Omega$ if and only if $\min\{u,k\}$ is ${\mathcal{ A}}$-superharmonic in $\Omega$ for every $k=1,2,\dots$.
\item[(iv)] The function $u$ is ${\mathcal{ A}}$-superharmonic in $\Omega$, if it is ${\mathcal{ A}}$-superharmonic in every component of $\Omega.$
\item[(v)] If $u$ is ${\mathcal{ A}}$-superharmonic and finite a.e. in $\Omega$ and $E\subset\Omega$ is a nonempty open subset, then $u$ is ${\mathcal{ A}}$-superharmonic in $E$.
\end{itemize}{}
\end{coro}
\begin{lem}{\mathbf{l}}abel{lem:pasting}
Suppose $D\subset\Omega$, $u$ is ${\mathcal{ A}}$-superharmonic in $\Omega$, and $v$ is ${\mathcal{ A}}$-superharmonic in $D$. If the function
\[w=\begin{cases}
\min\{u,v\}\quad&\text{in }\ D,\\
u\quad&\text{in }\ \Omega\setminus D
\end{cases}{}\]
is lower semicontinuous, then it is ${\mathcal{ A}}$-superharmonic in $\Omega$.
\end{lem}
\begin{proof}Let $E\Subset\Omega$ be open and $h$ be an ${\mathcal{ A}}$-harmonic function, such that $h{\mathbf{l}}eq w$ on $\partialrtial E.$ By the Comparison Principle of Proposition~{\mathbb{R}}ef{prop:comp-princ} we infer that $h{\mathbf{l}}eq w$ in $\overline{E}$. Since $w$
is lower semicontinuous, for every $x\in\partialrtial D\cap E$ it holds that
\[
{\mathbf{l}}im_{\substack{y\in D\cap\Omega \\ y\to x}}h(y){\mathbf{l}}eq u(x)=w(x){\mathbf{l}}eq{\mathbf{l}}iminf_{\substack{y\in D\cap\Omega \\ y\to x}} v(y).
\]
Consequently, for every $x\in\partialrtial (D\cap E)$ one has \[
{\mathbf{l}}im_{\substack{y\in D\cap\Omega \\ y\to x}}h(y){\mathbf{l}}eq w(x){\mathbf{l}}eq{\mathbf{l}}iminf_{\substack{y\in D\cap\Omega \\ y\to x}}w(y).\]
By the Comparison Principle of Proposition~{\mathbb{R}}ef{prop:comp-princ} also $h{\mathbf{l}}eq w$ in $D\cap E$. Then $h{\mathbf{l}}eq w$ in $E$, what was to prove.
\end{proof}{}{}
\begin{lem}{\mathbf{l}}abel{lem:cont-supersol-are-superharm} If $u$ is a continuous ${\mathcal{ A}}$-supersolution, then it is ${\mathcal{ A}}$-superharmonic.
\end{lem}
\begin{proof}
Since $u$ is continuous and finite a.e. (because it belongs to $W^{1,\varphi(\cdot)}_{loc}(\Omega)$), we have to prove only that Comparison Principle for ${\mathcal{ A}}$-superharmonic functions holds.
Let $G \Subset \Omega$ be an open set, and let $h$ be a continuous, ${\mathcal{ A}}$-harmonic function in $G$, such that $h {\mathbf{l}}eq u$ on $\partialrtial G$. Fix $\epsilon >0$ and choose and open set $E \Subset G$ such that $u + \epsilon \geq h$ in $G \setminus E$. Since the function $\min\{u+\epsilon-h,0\}$ has compact support, it belongs to $W^{1,\varphi(\cdot)}(E)$. Hence Lemma {\mathbb{R}}ef{lem:comp-princ} implies $u+\epsilon \geq h$ in $E$, and therefore a.e. in $G$. Since the function is continuous, the inequality is true in each point of $G$. As $\epsilon$ was chosen arbitrary, the claim follows.\end{proof}
We shall prove that ${\mathcal{ A}}$-superharmonic functions can be approximated from below by ${\mathcal{ A}}$-supersolutions.
\begin{prop}
{\mathbf{l}}abel{prop:from-below}
Let $u$ be ${\mathcal{ A}}$-superharmonic in $\Omega$ and let $G\Subset\Omega$. Then there exists a nondecreasing sequence of continuous ${\mathcal{ A}}$-supersolutions $\{u_j\}$ in $G$ such that $u={\mathbf{l}}im _{j\to\infty}u_j$ pointwise in $G$. For nonnegative $u$, approximate functions $u_j$ can be chosen nonnegative as well.
\end{prop}
\begin{proof}
Since $u$ is lower semicontinuous in $\overline{G}$, it is bounded from below and there exists a nondecreasing sequence $\{\phi_j\}$ of Lipschitz functions on $\overline{G}$ such that $u={\mathbf{l}}im _{j\to\infty}\phi_j$ in $G$. For nonnegative $u$, obviously $\phi_j,$ $j\in\mathbb{N}$ can be chosen nonnegative as well. Let $u_j$ be the {solution of the $\mathcal{K}_{\phi_j,\phi_j}(G)$-obstacle problem which by Proposition~{\mathbb{R}}ef{prop:obst-ex-cont} is continuous} and \[\phi_j<u_j \qquad\text{in the open set }\ A_j=\{x\in G:\ \phi_j{\mathbb{N}}eq u_j\}.\]
Moreover, $u_j$ is ${\mathcal{ A}}$-harmonic in $A_j.$ By Comparison Principle from Proposition~{\mathbb{R}}ef{prop:comp-princ} we infer that the sequence $\{u_j\}$ is nondecreasing. Since $u$ is ${\mathcal{ A}}$-superharmonic, we have $u_j{\mathbf{l}}eq u$ in $A_j$. Then consequently $\phi_j{\mathbf{l}}eq u_j{\mathbf{l}}eq u$ in $ G.$ Passing to the limit with $j\to\infty$ we get that $u={\mathbf{l}}im _{j\to\infty}u_j$, what completes the proof.
\end{proof}
\begin{lem}{\mathbf{l}}abel{lem:loc-bdd-superharm-are-supersol}
If $u$ is ${\mathcal{ A}}$-superharmonic in $\Omega$ and locally bounded from above, then $u\in W^{1,\varphi(\cdot)}_{loc}(\Omega)$ and $u$ is ${\mathcal{ A}}$-supersolution in $\Omega$.
\end{lem}
\begin{proof}
Fix open sets $E\Subset G \Subset \Omega$. By Proposition {\mathbb{R}}ef{prop:from-below} there exists a nondecreasing sequence of continuous ${\mathcal{ A}}$-supersolutions $\{u_j\}$ in $G$ such that $u={\mathbf{l}}im _{j\to\infty}u_j$ pointwise in $G$. Since $u$ is locally bounded we may assume {$u_j{\mathbf{l}}eq u <0$} in $G$. It follows from Proposition {\mathbb{R}}ef{prop:cacc} that the sequence {$\{|Du_j |\}$} is locally bounded in $L^{\varphi(\cdot)}(G)$. Since $u_j \to u$ a.e. in $G$, it follows that $u \in W^{1,\varphi(\cdot)}(G)$, and $Du_j {\mathbb{R}}ightharpoonup Du$ weakly in $L^{\varphi(\cdot)}(G)$.
We need to show now that $u$ is an ${\mathcal{ A}}$-supersolution in $\Omega$. To this end we first prove that (up to a subsequence) gradients {$\{Du_j \}$} converge a.e. in $G$. We start with proving that
\begin{equation}
{\mathbf{l}}abel{Ijto0}
I_j = \int_{E} \Big({\mathcal{ A}}(x,Du) - {\mathcal{ A}}(x,Du_j) \Big)\cdot \big( Du - Du_j \big)\, dx\to 0 \quad \text{as}\ j\to\infty.
\end{equation}
Choose $\eta \in C_0^\infty(G)$ such that $0 {\mathbf{l}}eq \eta {\mathbf{l}}eq 1$, and $\eta = 1$ in {$E$}. Using $\psi = \eta(u-u_j)$ as a test function for the ${\mathcal{ A}}$-supersolution $u_j$ and applying the H\"older inequality, the doubling property of $\varphi$, and the Lebesgue dominated monotone convergence theorem we obtain
\begin{align*}
-\int_G \eta {\mathcal{ A}}(x,Du_j) &\cdot \big( Du - Du_j \big)\, dx
{\mathbf{l}}eq
\int_G (u-u_j){\mathcal{ A}}(x,Du_j)\cdot D\eta\, dx \\
&{\mathbf{l}}eq 2 \|(u-u_j)D\eta\|_{L^{\varphi(\cdot)}(G)} \|{\mathcal{ A}}(\cdot,Du_j) \|_{L^{\widetilde\varphi(\cdot)}(G)} \\
&{\mathbf{l}}eq c \|u-u_j\|_{L^{\varphi(\cdot)}(G)} \to 0.
\end{align*}
Moreover, since
$$
\eta {\mathcal{ A}}(\cdot , Du) \in L^{\widetilde\varphi(\cdot)}(G),
$$
the weak convergence $Du_j {\mathbb{R}}ightharpoonup Du$ in $L^{\varphi(\cdot)}(G)$ implies
$$
\int_G \eta {\mathcal{ A}}(x,Du)\cdot \big( Du - Du_j \big)\, dx \to 0.
$$
Then, since $\eta \big({\mathcal{ A}}(x,Du) - {\mathcal{ A}}(x,Du_j) \big)\cdot \big( Du - Du_j \big) \geq 0$ a.e. in $G$, we conclude with~\eqref{Ijto0}. Since the integrand in $I_j$ is nonnegative, we may pick up a subsequence (still denoted $u_j$) such that
\begin{equation} {\mathbf{l}}abel{eq:point-conv}
\Big({\mathcal{ A}}(x,Du(x)) - {\mathcal{ A}}(x,Du_j(x)) \Big)\cdot \big( Du(x) - Du_j(x) \big) \to 0\ \ \text{
for a.a. $x\in E$.}
\end{equation}
Fix $x \in E$ such that \eqref{eq:point-conv} is valid, and that $|Du(x)| < \infty$. Upon choosing further subsequence we may assume that\[{Du_j(x)}\to \xi \in \overline{{\mathbb{R}}^n}.\] Since we have
\begin{align*}
\big({\mathcal{ A}}(x,&Du(x)) - {\mathcal{ A}}(x,Du_j(x)) \big)\cdot \big( Du(x) - Du_j(x) \big) \\
&\geq
c_2^{\mathcal{ A}} \varphi(x,|Du_j(x)|) - c_1^{\mathcal{ A}} \frac{\varphi(x, |Du(x)|)}{|Du(x)|} |Du_j(x)| - c_1^{\mathcal{ A}} \frac{\varphi(x, |Du_j(x)|)}{|Du_j(x)|} |Du(x)| \\
&\geq c(\textit{\texttt{data}},|Du(x)|) \varphi(x,|Du_j(x)|) {\mathbf{l}}eft(1- \frac{|Du_j(x)|}{\varphi(x,|Du_j(x)|)} - \frac{1}{|Du_j(x)|} {\mathbb{R}}ight)
\end{align*}
and \eqref{eq:point-conv} is true, it must follow that $|\xi| < \infty$.
Since the mapping $\zeta \mapsto {\mathcal{ A}}(x, \zeta)$ is continuous, we have
$$
\big({\mathcal{ A}}(x,Du(x)) - {\mathcal{ A}}(x,\xi) \big)\cdot \big( Du(x) - \xi \big) = 0
$$
and it follows that $\xi = Du(x)$, and
$$
Du_j(x) \to Du(x) \qquad \text{for a.e. $x \in E$},
$$
and
$$
{\mathcal{ A}}(\cdot, Du_j) {\mathbb{R}}ightharpoonup {\mathcal{ A}}(\cdot, Du) \qquad \text{weakly in $L^{\widetilde\varphi(\cdot)}$}.
$$
Therefore that $u$ is an ${\mathcal{ A}}$-supersolution of \eqref{eq:main:0}. Indeed, if $\phi \in C_0^\infty(\Omega),$ $\phi \geq 0$ is such that ${{\mathbb{R}}m supp}\, \phi \subset E$, then {$D\phi \in L^{\varphi(\cdot)}(E)$} and we have
\begin{align*}
0 {\mathbf{l}}eq \int_\Omega {\mathcal{ A}}(x, Du_j) \cdot D\phi\, dx \to
\int_\Omega {\mathcal{ A}}(x, Du) \cdot D\phi\, dx \quad \text{as}\ j\to\infty.
\end{align*}
Since $E$ was arbitrary this concludes the proof.
\end{proof}
\subsection{Harnack's inequalities}
In order to get strong Harnack's inequality for ${\mathcal{ A}}$-harmonic function and weak Harnack's inequality for ${\mathcal{ A}}$-superharmonic functions we need related estimates proved for ${\mathcal{ A}}$-subsolutions and ${\mathcal{ A}}$-supersolutions. Having Lemma~{\mathbb{R}}ef{lem:Ah-is-quasi} we can specify results derived for quasiminizers in~\cite{hh-zaa} to our case.
\begin{prop}[Corollary~3.6, \cite{hh-zaa}]
{\mathbf{l}}abel{prop:weak-Har-sub-sup}
For a locally bounded function $u\in W^{1,\varphi(\cdot)}_{loc}(\Omega)$ being ${\mathcal{ A}}$-subsolution in $\Omega$ there exist constants $R_0=R_0(n)>0$ and $C=C(\textit{\texttt{data}},n,R_0,{{\mathbb{R}}m ess\,sup}_{B_{R_0}} u)>0$, such that
\[{{\mathbb{R}}m ess\,sup}_{B_{R/2}}u-k{\mathbf{l}}eq C{\mathbf{l}}eft({\mathbf{l}}eft({\bar{a}}rint_{B_R}(u-k)_+^{s}\,dx{\mathbb{R}}ight)^\frac{1}{s}+R{\mathbb{R}}ight) \]
for all $R\in(0,R_0]$, $s>0$ and $k\in {\mathbb{R}}$.
\end{prop}
\begin{prop}[Theorem~4.3, \cite{hh-zaa}]
{\mathbf{l}}abel{prop:weak-Har-super-inf}
For a nonnegative function $u\in W^{1,\varphi(\cdot)}_{loc}(\Omega)$ ${\mathcal{ A}}$-supersolution in $\Omega$ there exist constants $R_0=R(n)>0$, $s_0=s_0(\textit{\texttt{data}},n)>0$ and $C=C(\textit{\texttt{data}},n)>0$, such that
\[ {\mathbf{l}}eft({\bar{a}}rint_{B_R}u^{s_0}\,dx{\mathbb{R}}ight)^\frac{1}{s_0} {\mathbf{l}}eq C{\mathbf{l}}eft({{\mathbb{R}}m ess\,inf}_{B_{R/2}} u+R{\mathbb{R}}ight) \]
for all $R\in(0,R_0]$ provided $B_{3R}\Subset\Omega$ and $\varrho_{\varphi(\cdot),B_{3R}}(Du){\mathbf{l}}eq 1.$
\end{prop}
Let us comment on the above result. For the application in \cite{hh-zaa} dependency of $s_0$ on other parameters is not important and so -- not studied with attention. Actually, this theorem is not proven in detail in \cite{hh-zaa}, but refers to standard arguments presented in~\cite{hht,hklmp}. Their re-verification enables to find $s_0=s_0(\textit{\texttt{data}},n)$. Let us note that after we completed our manuscript, an interesting study on the weak Harnack inequalities with an explicit exponent, holding for unbounded supersolutions, within our framework of generalized Orlicz spaces appeared, see~\cite{bhhk}.
{Since ${\mathcal{ A}}$-harmonic function is an ${\mathcal{ A}}$-subsolution and and ${\mathcal{ A}}$-supersolution at the same time (Lemma~{\mathbb{R}}ef{lem:A-h-is-great}), by Propositions~{\mathbb{R}}ef{prop:weak-Har-sub-sup} and~{\mathbb{R}}ef{prop:weak-Har-super-inf} we infer the full Harnack inequality.}
\begin{theo}[Harnack's inequality for ${\mathcal{ A}}$-harmonic functions]
{\mathbf{l}}abel{theo:Har-A-harm} For a nonnegative ${\mathcal{ A}}$-harmonic function $u\in W^{1,\varphi(\cdot)}_{loc}(\Omega)$ there exist constants $R_0=R(n)>0$, $s_0=s_0(\textit{\texttt{data}},n)>0$ and $C=C(\textit{\texttt{data}},n,R_0,{{\mathbb{R}}m ess\,sup}_{B_{R_0}} u)>0$, such that
\[ {{\mathbb{R}}m ess\,sup}_{B_{R}}u {\mathbf{l}}eq C{\mathbf{l}}eft({{\mathbb{R}}m ess\,inf}_{B_{R}} u+R{\mathbb{R}}ight) \]
for all $R\in(0,R_0]$ provided $B_{3R}\Subset\Omega$ and $\varrho_{\varphi(\cdot),B_{3R}}(Du){\mathbf{l}}eq 1.$
\end{theo}
\subsection{Harnack's Principle for ${\mathcal{ A}}$-superharmonic functions}
We are going to characterize the limit of nondecreasing sequence of ${\mathcal{ A}}$-superharmonic functions and their gradients.
\begin{theo}[Harnack's Principle for ${\mathcal{ A}}$-superharmonic functions] {\mathbf{l}}abel{theo:harnack-principle} Suppose that $u_i$, $i=1,2,{\mathbf{l}}dots$, are ${\mathcal{ A}}$-superharmonic and finite a.e. in $\Omega$. If the sequence $\{u_i\}$ is nondecreasing then the limit function $u={\mathbf{l}}im_{i \to \infty} u_i$ {is ${\mathcal{ A}}$-superharmonic or infinite in $\Omega$.} Furthermore, if $u_i$, $i=1,2,{\mathbf{l}}dots$, are nonnegative, then up to a subsequence also $Du_i\to Du$ a.e. in $\{u<\infty\},$ where `$D$' stands for the generalized gradient, cf.~\eqref{gengrad}.
\end{theo}{}
\begin{proof} The proof is presented in three steps. We start with motivating that the limit function is either ${\mathcal{ A}}$-superharmonic or $u \equiv \infty$, then we concentrate on gradients initially proving the claim for a priori globally bounded sequence $\{u_i\}$ and conclude by passing to the limit with the bound.
{\em Step 1.} Since $u_i$ are lower semicontinuous, so is $u$. The following fact holds: Given a compact set $K \Subset \Omega$, if $h \in C(K)$, $\epsilon >0$ is a small fixed number, and $u > h-\epsilon$ on $K$, then, for $i$ sufficiently large, $u_i > h-\epsilon$. Indeed, let's argue by contradiction. Assume that for every $i$ there exists $x_i\in K,$ such that
$$ u_i(x_i) {\mathbf{l}}eq h(x_i) - \epsilon.
$$
Since $K$ is compact, we can assume that $x_i \to x_o$. Fix $l \in \mathbb{N}$. Then, for $i > l$ we have
$$
u_l(x_i) {\mathbf{l}}eq u_i(x_i) {\mathbf{l}}eq h(x_i) - \epsilon
$$
The right-hand side in the previous display tends with $i \to \infty$ to $h(x_o)-\epsilon$. Hence
$$
u_l(x_o) {\mathbf{l}}eq {\mathbf{l}}iminf_{i\to\infty} u_l(x_i) {\mathbf{l}}eq h(x_o) - \epsilon
$$
Thus for every $l$ we have $u_l(x_o) {\mathbf{l}}eq h(x_o) - \epsilon$, which implies $u(x_o) {\mathbf{l}}eq h(x_o) - \epsilon$ which is in the contradiction with the fact that $u > h-\epsilon$ on $K$.
Using this fact we can prove that the limit function $u={\mathbf{l}}im_{i \to \infty} u_i$ {is ${\mathcal{ A}}$-superharmonic unless $u \equiv \infty$}. Choose an open $\Omega' \Subset {\Omega}$ and $h \in C(\overline{\Omega'})$ an ${\mathcal{ A}}$-harmonic function. Assume the inequality $ u \geq h$ holds on $\partialrtial \Omega'$. It follows that for every $\epsilon >0$ on $\partialrtial \Omega'$ we have $u > h-\epsilon$ and, from the aforementioned fact, it follows that $u_i > h- \epsilon$ on $\partialrtial \Omega'$. Since all $u_i$ are ${\mathcal{ A}}$-superharmonic, Proposition~{\mathbb{R}}ef{prop:comp-princ} yields that $u_i \geq h-\epsilon$ on $\Omega'$. Therefore $u \geq h-\epsilon$ on $\Omega'$. Since $\epsilon $ is arbitrary, we have $u \geq h$ on $\Omega'$. Therefore the Comparison Principle from definition of ${\mathcal{ A}}$-superharmonic holds unless $u \equiv \infty$ in~$\Omega$. Finally, $u={\mathbf{l}}im_{i \to \infty} u_i$ is ${\mathcal{ A}}$-superharmonic unless $u \equiv \infty$.
{\em Step 2.} Assume $0{\mathbf{l}}eq u_i{\mathbf{l}}eq k$ for all $i$ with $k>1$ and choose open sets $E\Subset G\Subset{\Omega}$. By Lemma~{\mathbb{R}}ef{lem:A-supers-cacc} we get that
\[\varrho_{\varphi(\cdot),G}(Du_i){\mathbf{l}}eq c k^q\]
with $c=c(\textit{\texttt{data}},n)>0$ uniform with respect to $i$. Then, by doubling properties of $\varphi$, we infer that\begin{equation}{\mathbf{l}}abel{Duibound}
\|Du_i\|_{L^{\varphi(\cdot)}(G)}{\mathbf{l}}eq c (\textit{\texttt{data}},n,k).
\end{equation}
Consequently $\{u_i\}$ is bounded in $W^{1,\varphi(\cdot)}(G)$ and $u_i\to u$ weakly in $W^{1,\varphi(\cdot)}(G).$ Further, it has a non-relabelled subsequence converging a.e.~in $G$ to $u\in W^{1,\varphi(\cdot)}(G)$. Let us show that
\begin{equation}
{\mathbf{l}}abel{grad=grad} Du_j\to Du\qquad\text{a.e. in }\ E.
\end{equation}{}
We fix arbitrary $\varepsilon\in(0,1)$, denote \[J_i=\{x\in E:\ \big({\mathcal{ A}}(x,Du_i(x))-{\mathcal{ A}}(x,Du(x))\big)\cdot(Du_i(x)-Du(x))>\varepsilon\}\]
and estimate its measure. We have
\begin{flalign}
|J_i|{\mathbf{l}}eq& |J_i\cap\{|u_i-u|\geq \varepsilon^2\}|{\mathbb{N}}onumber\\
&+\frac{1}{\varepsilon}\int_{J_i\cap\{|u_i-u|<\varepsilon^2\}} \big({\mathcal{ A}}(x,Du_i)-{\mathcal{ A}}(x,Du)\big)\cdot(Du_i-Du)\,dx.{\mathbf{l}}abel{Jiest1}
\end{flalign}{}
Let $\eta\in C_0^\infty(G)$ be such that $\mathds{1}_{E}{\mathbf{l}}eq \eta{\mathbf{l}}eq \mathds{1}_{G}$. We define
\[w_1^i=\min\big\{(u_i+\varepsilon^2-u)^+,2\varepsilon^2\big\}\quad\text{and}\quad w_2^i=\min\big\{(u+\varepsilon^2-u_i)^+,2\varepsilon^2\big\}.\]
Then $w_1^i\eta$ and $w_2^i\eta$ are nonnegative functions from $W^{1,\varphi(\cdot)}_0(G)$ and can be used as test functions. Since $u$ and $u_i$, $i=1,2,{\mathbf{l}}dots$, are ${\mathcal{ A}}$-supersolutions we already know that $u_i\to u$ weakly in $W^{1,\varphi(\cdot)}(E').$ By growth condition {we can estimate like in~\eqref{op}} and by~\eqref{Duibound} we have
\begin{flalign*}
\int_{G\cap\{|u_i-u|<\varepsilon^2\}} {\mathcal{ A}}(x,Du)\cdot(Du_i-Du)\eta\,dx&{\mathbf{l}}eq \int_{G\cap\{|u_i-u|<\varepsilon^2\}} {\mathcal{ A}}(x,Du)\cdot D\eta\,w^i_1\,dx\\
&{\mathbf{l}}eq \, c\varepsilon^2\int_{G} \frac{\varphi(x,|Du|)}{|Du|}|D\eta|\,dx\\
&{\mathbf{l}}eq \, c\varepsilon^2
\end{flalign*}{}
with $c>0$ independent of $i$ and $\varepsilon$. Analogously
\begin{flalign*}
\int_{G\cap\{|u_i-u|<\varepsilon^2\}} {\mathcal{ A}}(x,Du_i)\cdot(Du_i-Du)\eta\,dx&{\mathbf{l}}eq \, c\varepsilon^2,
\end{flalign*}{}
Summing up the above observations we have\begin{flalign*}\frac{1}{\varepsilon}\int_{J_i\cap\{|u_i-u|<\varepsilon^2\}} \big({\mathcal{ A}}(x,Du_i)-{\mathcal{ A}}(x,Du)\big)\cdot(Du_i-Du)\,dx{\mathbf{l}}eq c\varepsilon.
\end{flalign*}{}
The left-hand side is nonnegative by the monotonicity of the operator, so due to~\eqref{Jiest1} we have \begin{flalign*}
|J_i|{\mathbf{l}}eq& |J_i\cap\{|u_i-u|\geq \varepsilon^2\}|+c\varepsilon
\end{flalign*}{}
with $c>0$ independent of $i$ and $\varepsilon$. By letting $\varepsilon\to 0$ we get that $|E_j|\to 0$. Because of the strict monotonicity of the operator, we infer~\eqref{grad=grad}. {We can conclude the proof of this step by choosing a diagonal subsequence.}
{\em Step 3.} Now we concentrate on the general case. For every $k=1,2,\dots$ we select subsequences $\{u_{i}^{(k)}\}_k$ of $\{u_i\}$ and find an ${\mathcal{ A}}$-superharmonic function $v_k,$ such that $\{u_{i}^{(k+1)}\}\subset\{u_{i}^{(k)}\}$, $T_k(u^{(k)}_j)\to v_k$ and $D(T_k(u^{(k)}_j))\to D v_k$ a.e. in $\Omega$. We note that $v_k$ increases to a function, which is ${\mathcal{ A}}$-harmonic or equivalently infinite. Additionally, $v_k=T_k(u).$ The diagonally chosen subsequence $\{u_i^{(i)}\}$ has all the desired properties.
\end{proof}{}
We have the following consequence of the Comparison Principle and Theorem~{\mathbb{R}}ef{theo:harnack-principle}.
\begin{coro}[Harnack's Principle for ${\mathcal{ A}}$-harmonic functions]{\mathbf{l}}abel{coro:Ah-harnack-principle} Suppose that $u_i$, $i=1,2,{\mathbf{l}}dots$, are ${\mathcal{ A}}$-harmonic in $\Omega$. If the sequence $\{u_i\}$ is nondecreasing then the limit function $u={\mathbf{l}}im_{i \to \infty} u_i$ {is ${\mathcal{ A}}$-harmonic or infinite in $\Omega$.}
\end{coro}
\subsection{Poisson modification}
The Poisson modification of an ${\mathcal{ A}}$-superharmonic function in a regular set $E$ carries the idea of its local smoothing. A boundary point is called regular if at this point the boundary value of any Musielak-Orlicz-Sobolev function is attained not only in the Sobolev sense but also pointwise. A set is called regular if all of its boundary points are regular. See~\cite{hh-zaa} for the result that if the complement of $\Omega$ is locally fat at $x_0\in\partialrtial\Omega$ in the capacity sense, then $x_0$ is regular. Thereby of course polyhedra and balls are obviously regular.
Let us consider a function $u$, which is ${\mathcal{ A}}$-superharmonic {and finite a.e.} in $\Omega$ and an open set $E\Subset\Omega$ with regular $\overline{E}.$ We define
\[u_E=\inf\{v:\ v \ \text{is ${\mathcal{ A}}$-superharmonic in $E$ and }{\mathbf{l}}iminf_{y\to x}v(y)\geq u(x)\ \text{for each }x\in\partialrtial \overline E\}\]
and the {\em Poisson modification} of $u$ in $E$ by
\[P(u,E)=\begin{cases}
u\quad&\text{in }\ \Omega\setminus E,\\
u_E &\text{in }\ E.
\end{cases}{}\]
\begin{theo}[Fundamental properties of the Poisson modification]{\mathbf{l}}abel{theo:Pois} If $u$ is ${\mathcal{ A}}$-superharmonic {and finite a.e.} in $\Omega$, then its Poisson modification $P(u,E)$ is
\begin{itemize}
\item [(i)] ${\mathcal{ A}}$-superharmonic in~$\Omega$,
\item [(ii)] ${\mathcal{ A}}$-harmonic in $E$,
\item [(iii)] $P(u,E){\mathbf{l}}eq u$ in~$\Omega.$
\end{itemize}
\end{theo}{}
\begin{proof} The fact that $P(u,E){\mathbf{l}}eq u$ in $\Omega$ results directly from the definition. By assumption $u$ is finite somewhere. Let us pick a nondecreasing sequence $\{\phi_i\}\subset C^\infty({\mathbb{R}}n)$ which converges to $u$ in $\overline{E}$. Let $h_i$ be the unique ${\mathcal{ A}}$-harmonic function agreeing with $\phi_i$ on $\partialrtial E.$ The sequence $\{h_i\}$ is nondecreasing by the Comparison Principle from Proposition~{\mathbb{R}}ef{prop:comp-princ}. Since $h_i{\mathbf{l}}eq u$, by Harnack's Principle from Corollary~{\mathbb{R}}ef{coro:Ah-harnack-principle} we infer that \[h:={\mathbf{l}}im_{i\to\infty}h_i\]
is ${\mathcal{ A}}$-harmonic in $E$. Moreover, $h{\mathbf{l}}eq u$ and thus $h$ is also finite somewhere. Since
\[u(y)={\mathbf{l}}im_{i\to\infty}\phi_i(y){\mathbf{l}}eq{\mathbf{l}}iminf_{x\to y} h(x)\quad\text{for }\ y\in\partialrtial E,\]
it follows that $P(u,E){\mathbf{l}}eq h$ in $E$. On the other hand, by the Comparison Principle (Proposition~{\mathbb{R}}ef{prop:comp-princ}) we get that $h_i{\mathbf{l}}eq P(u,E)$ in $E$ for every $i$. Therefore $P(u,E)|_E=h$ is ${\mathcal{ A}}$-harmonic in $E$. This reasoning also shows that $P(u,E)$ is lower semicontinuous and, by Lemma~{\mathbb{R}}ef{lem:pasting}, it is also ${\mathcal{ A}}$-superharmonic in $\Omega$.
\end{proof}{}
\subsection{Minimum and Maximum Principles}
Before we prove the principles, we need to prove the following lemmas.
\begin{lem}{\mathbf{l}}abel{lem:oo}
If $u$ is ${\mathcal{ A}}$-superharmonic and $u=0$ a.e. in $\Omega,$ then $u\equiv 0$ in $\Omega.$
\end{lem}
\begin{proof}{It is enough to show that $u=0$ in a given ball $B\Subset\Omega.$ {By lower semicontinuity of $u$ infer that it is nonpositive.} By Lemma~{\mathbb{R}}ef{lem:loc-bdd-superharm-are-supersol}, we get that $u\in W^{1,\varphi(\cdot)}(\Omega).$ Let $v=P(u,B)$ be the Poisson modification of $u$ in $B.$ By Theorem~{\mathbb{R}}ef{theo:Pois} we have that $v$ is continuous in $B$ and $v{\mathbf{l}}eq u{\mathbf{l}}eq 0.$ Therefore $v$ is an ${\mathcal{ A}}$-supersolution in $\Omega$ and $(u-v)\in W^{1,\varphi(\cdot)}_0(\Omega)$. Moreover,
\[c_2^{\mathcal{ A}} \int_\Omega \varphi(x,|Dv|)\,dx{\mathbf{l}}eq \int_\Omega {\mathcal{ A}}(x,Dv)\cdot Dv\,dx{\mathbf{l}}eq \int_\Omega {\mathcal{ A}}(x,Dv)\cdot Du\,dx=0,\]
where the last equality holds because $Du=0$ a.e. in $\Omega.$ But then, we directly get that $Dv=0$ and $v=0$ a.e. in $\Omega.$ By continuity of $v$ in $B$ we get that $v=0$ everywhere in $B$. In the view of $v{\mathbf{l}}eq u{\mathbf{l}}eq 0$, we get that also $u\equiv 0$ in $\Omega.$}\end{proof}
\begin{lem}{\mathbf{l}}abel{lem:A-sh-lsc} If $u$ is ${\mathcal{ A}}$-superharmonic and finite a.e. in $\Omega$, then for every $x\in\Omega$ it holds that $u(x)={\mathbf{l}}iminf_{y\to x}u(y)={{\mathbb{R}}m ess}{\mathbf{l}}iminf_{y\to x} u(y).$
\end{lem}
\begin{proof}
We fix arbitrary $x\in\Omega$ and by lower semicontinuity $u(x){\mathbf{l}}eq {\mathbf{l}}iminf_{y\to x}u(y){\mathbf{l}}eq {{\mathbb{R}}m ess}{\mathbf{l}}iminf_{y\to x} u(y)=:a.$ Let $\varepsilon\in (0,a)$ and $B=B(x,r)\subset\Omega$ be such that $u(y)>a-\varepsilon$ for a.e. $y\in B.$ By Corollary~{\mathbb{R}}ef{coro:min-A-super} function $v=\min\{u-a+\varepsilon,0\}$ is ${\mathcal{ A}}$-superharmonic in $\Omega$ and $v=0$ a.e. in $B.$ By Lemma~{\mathbb{R}}ef{lem:oo} $v\equiv 0$ in $\Omega,$ but then $u(x)\geq a-\varepsilon$. Letting $\varepsilon\to 0$ we obtain that $u(x)=a$ and the claim is proven.
\end{proof}
{We define $\psi:\Omega\times{[0,\infty)}\to{[0,\infty)}$ is given by \begin{equation}
{\mathbf{l}}abel{psi}
\psi(x,s)=\varphi(x,s)/s.
\end{equation}
Note that within our regime $s\mapsto\psi(\cdot,s)$ is strictly increasing, but not necessarily convex. Although in general $\psi$ does not generate the Musielak-Orlicz space, we still can define $\varrho_{\psi(\cdot),\Omega}$ by~\eqref{modular} useful in quantifying the uniform estimates for trucations in the following lemma.
\begin{lem}{\mathbf{l}}abel{lem:unif-int} If for {$u$} there exist $M,k_0>0$, such that for all $k>k_0$\begin{equation}
{\mathbf{l}}abel{apriori}\varrho_{\varphi(\cdot),B}(DT_k u){\mathbf{l}}eq Mk,
\end{equation} then there exists a function $\zeta:[0,|B|]\to{[0,\infty)}$, such that ${\mathbf{l}}im_{s\to 0^+}\zeta(s)=0$ and for every measurable set $E\subset B$ it holds that for all $k>0$ \[\varrho_{\psi(\cdot),E}(D T_k u){\mathbf{l}}eq \zeta(|E|).\]
\end{lem}
\begin{proof} The result is classical when $p=q$, \cite{hekima}.
Therefore, we present the proof only for $p<q$. We start with observing that
\begin{flalign*}|\{x\in B \colon\, \varphi(x,|Du|)>s\}|&{\mathbf{l}}eq |\{x\in B \colon\,|u|>k\}|+ |\{x\in B \colon\,\varphi(x,|Du|)>s,\ |u|{\mathbf{l}}eq k\}|\\&=I_1+I_2.
\end{flalign*}
Let us first estimate the volume of superlevel sets of $u$ using Tchebyszev inequality, Poincar\'e inequality, assumptions on the growth of $\varphi$, and~\eqref{apriori}. For all sufficiently large $k$ we have
\begin{flalign*}
I_1&=|\{{x\in B \colon} |u|>k\}|{\mathbf{l}}eq \int_B\frac{|T_k u|^p}{k^p}\,dx{\mathbf{l}}eq\frac{c}{k^p} \int_B |DT_k u|^p\,dx\\
&{\mathbf{l}}eq\frac{c}{k^p} \int_B \varphi(x,|DT_k u|) \,dx= ck^{-p}\varrho_{\varphi(\cdot),B}(DT_k u){\mathbf{l}}eq cMk^{1-p}.
\end{flalign*}
Similarly by Tchebyszev inequality and~\eqref{apriori} we can estimate also
\begin{flalign*}
I_2=|\{x\in B \colon\ \varphi(x,|Du|)>s,\ |u|{\mathbf{l}}eq k\}|&{\mathbf{l}}eq \frac{1}{s}\int_{\{\varphi(x,DT_k u)>s\}}\varphi(x,D T_k u)\,dx{\mathbf{l}}eq {M} \frac{k}{s}.
\end{flalign*}
Altogether for all sufficiently large $s$ (i.e. $s>k_0^p$) we have that
\begin{flalign*}
|\{x\in B \colon\ \varphi(x,|Du|)>s\}|&{\mathbf{l}}eq I_1+I_2{\mathbf{l}}eq cs^{\frac{1-p}{p}}.
\end{flalign*}
Recall that due to~\eqref{doubl-star} there exists $C>0$ uniform in $x$ such that $\psi(x,s)\geq C \widetilde\varphi^{-1}(x,\varphi(x,s)),$ so
\begin{flalign*}
|\{x\in B \colon\,\psi(x,|Du|)>s\}|&{\mathbf{l}}eq |\{x\in B \colon\,C\widetilde\varphi^{-1}(x,\varphi(x,|Du|))>s\}|\\
&= |\{x\in B \colon\,\varphi(x,|Du|)>\widetilde\varphi(x,s/C)\}|\\
& {\mathbf{l}}eq |\{x\in B \colon\,\varphi(x,|Du|)>(s/C)^{q'}\}|{\mathbf{l}}eq c s^{-\frac{q'}{p'}}\,,
\end{flalign*}
for some $c>0$ independent of $x$. Since the case $q=p$ is trivial for these estimates, it suffices to consider $q>p$. Then ${-\frac{q'}{p'}}<{-1}$ and we get the uniform integrability of $\{\psi(\cdot,|DT_ku|)\}_k$, thus the claim follows.
\end{proof}
Let us sum up the information on integrability of gradients of truncations of ${\mathcal{ A}}$-superharmonic functions.
\begin{rem}{\mathbf{l}}abel{rem:unif-int} For a function $u$ being ${\mathcal{ A}}$-superharmonic and finite a.e. in $\Omega$, by Lemma~{\mathbb{R}}ef{lem:loc-bdd-superharm-are-supersol} we get that $\{T_k u\}$ is a sequence of ${\mathcal{ A}}$-supersolutions in $\Omega$. Then~\eqref{apriori} is satisfied because of the Caccioppoli estimate from Lemma~{\mathbb{R}}ef{lem:A-supers-cacc}. Having Lemma~{\mathbb{R}}ef{lem:unif-int} we get that there exists $R_0>0$, such that for every $x\in \Omega$ and $B=B(x,R)\Subset\Omega$ with $R<R_0$ we have $\varrho_{\psi(\cdot),B}(DT_k u){\mathbf{l}}eq 1$ for all $k>0$ and in fact also $\varrho_{\psi(\cdot),B}(Du){\mathbf{l}}eq 1$ (where `$D$' stands for the generalized gradient, cf.~\eqref{gengrad}).
\end{rem}
\begin{lem}{\mathbf{l}}abel{lem:wH-for-trunc}
For $u$ being a nonnegative function ${\mathcal{ A}}$-superharmonic and finite a.e. in $\Omega$ there exist constants $R^{\mathcal{ A}}_0=R_0^{\mathcal{ A}}(n)>0$, $s_0=s_0(\textit{\texttt{data}},n)>0$ as in the weak Harnack inequality (Proposition~{\mathbb{R}}ef{prop:weak-Har-super-inf}), and $C=C(\textit{\texttt{data}},n)>0$, such that for every $k>1$ we have
\begin{equation}
{\mathbf{l}}abel{in:wH-for-trunc}
{\mathbf{l}}eft({\bar{a}}rint_{B_R}(T_k u)^{s_0}\,dx{\mathbb{R}}ight)^\frac{1}{s_0} {\mathbf{l}}eq C{\mathbf{l}}eft({\inf}_{B_{R/2}} (T_k u)+R{\mathbb{R}}ight)
\end{equation}
for all $R\in(0,R_0^{\mathcal{ A}}]$ provided $B_{3R}\Subset\Omega$ and $\varrho_{\psi(\cdot),B_{3R}}(Du){\mathbf{l}}eq 1.$
\end{lem}
\begin{proof} The proof is based on Remark~{\mathbb{R}}ef{rem:unif-int} and
Proposition~{\mathbb{R}}ef{prop:weak-Har-super-inf} that provides weak Harnack inequality for an~${\mathcal{ A}}$-supersolution $v$ holding with constant $C=C(\textit{\texttt{data}},n)$ and for balls with radius $R<R_0(n)$ and so small that $\varrho_{\varphi(\cdot),B_{3R_0}}(Dv){\mathbf{l}}eq 1$.
The only explanation is required whenever $|Dv|\geq 1$ a.e. in the considered ball. Then for every $k>1$ there exists $R_1(k)$ such that we get~\eqref{in:wH-for-trunc} for $T_k v$ over balls such that $R<\min\{R_1(k),R_0(n)\}$ and $\varrho_{\varphi(\cdot),B_{3R_1(k)}}(D T_kv){\mathbf{l}}eq 1$. Of course, then there exists $R_0^{\mathcal{ A}}(k)\in(0,R_1(k))$, such that we have~\eqref{in:wH-for-trunc} for $R<\min\{R_1(k),R_0(n)\}$ and $ \varrho_{\psi(\cdot),B_{3R_0^{\mathcal{ A}}(k)}}(D T_kv){\mathbf{l}}eq \varrho_{\varphi(\cdot),B_{3R_0^{\mathcal{ A}}(k)}}(D T_kv){\mathbf{l}}eq 1$. Note that it is Remark~{\mathbb{R}}ef{rem:unif-int} that allows us to choose $R_0^{\mathcal{ A}}$ independently of $k$.
\end{proof}}
We are in a position to prove that an ${\mathcal{ A}}$-harmonic function cannot attain its minimum nor maximum in a domain.
\begin{theo}[Strong Minimum Principle for ${\mathcal{ A}}$-superharmonic functions]{\mathbf{l}}abel{theo:mini-princ} Suppose $u$ is ${\mathcal{ A}}$-superharmonic and finite a.e. in connected set $\Omega$. If $u$ attains its minimum inside $\Omega,$ then $u$ is a constant function.
\end{theo}
\begin{proof} {We consider $v=(u-\inf_{\Omega}u)$, which by Corollary~{\mathbb{R}}ef{coro:min-A-super} is ${\mathcal{ A}}$-superharmonic. Let $E=\{x\in \Omega:\ v(x)= 0\}$, which by lower semicontinuity of $v$ (Lemma~{\mathbb{R}}ef{lem:A-sh-lsc}) is nonempty and relatively closed in $\Omega.$ Having in hand Remark~{\mathbb{R}}ef{rem:unif-int} we can choose $B=B(x,R)\subset 3B\Subset\Omega$ with radius smaller than $R_0^{\mathcal{ A}}$ from Lemma~{\mathbb{R}}ef{lem:wH-for-trunc} and such that $\varrho_{\psi(\cdot), B_{3R}}(Du){\mathbf{l}}eq 1$ where $\psi$ is as in~\eqref{psi}. Therefore, in the rest of the proof we restrict ourselves to a ball $B$. By Corollary~{\mathbb{R}}ef{coro:min-A-super} functions $v$ and $T_k v$ are ${\mathcal{ A}}$-superharmonic in $3B$. Moreover, by Lemma~{\mathbb{R}}ef{lem:loc-bdd-superharm-are-supersol} we infer that $\{T_k v\}$ is a~sequence of ${\mathcal{ A}}$-supersolutions integrable uniformly in the sense of Lemma~{\mathbb{R}}ef{lem:unif-int}. We take any $y\in B$ -- a Lebesgue's point of $T_k v$ for every $k$ and choose $B'=B'(y,R')\Subset B.$ Let us also fix arbitrary $k>0$. We have the weak Harnack inequality from Lemma~{\mathbb{R}}ef{lem:wH-for-trunc} for $T_k v$ on $B'$ yielding
\[0{\mathbf{l}}eq{\mathbf{l}}eft({\bar{a}}rint_{B'} (T_kv)^{s_0}\,dx{\mathbb{R}}ight)^\frac{1}{s_0}{\mathbf{l}}eq C(\inf_{B'/2}T_k v+R')=CR'\]
with $s_0,C>0$ independent of $k$. Letting $R'\to 0$ we get that $T_k v(y)=0$. Lebesgue's points of $T_k v$ for every $k$ are dense in $B$, we get that $T_k v\equiv 0$ a.e. in $B$. By arguments as in Lemma~{\mathbb{R}}ef{lem:oo} we get that $T_k v\equiv 0$ in $B$, but then $B\subset E$ and $E$ has to be an open set. Since $\Omega$ is connected, $E$ is the only nonempty and relatively closed open set in $\Omega,$ that is $E=\Omega$. Therefore $T_kv\equiv 0$ in $\Omega.$ As $k>0$ was arbitrary $v=u-\inf_{\Omega}u\equiv 0$ in $\Omega$ as well.}
\end{proof}
The classical consequence of Strong Minimum Principle, we get its weaker form.
\begin{coro}[Minimum Principle for ${\mathcal{ A}}$-superharmonic functions]{\mathbf{l}}abel{coro:mini-princ} Suppose $u$ is ${\mathcal{ A}}$-superharmonic and finite a.e. in $\Omega$. If $E\Subset\Omega$ a connected open subset of $\Omega$, then
\[\inf_{E} u=\inf_{\partialrtial E} u.\]
\end{coro}
By the very definition of an ${\mathcal{ A}}$-subharmonic function one gets the following direct consequence of the above fact.
\begin{coro}[Maximum Principle for ${\mathcal{ A}}$-subharmonic functions]{\mathbf{l}}abel{coro:max-princ}
Suppose $u$ is ${\mathcal{ A}}$-subharmonic and finite a.e. in $\Omega$. If $E\Subset\Omega$ a connected open subset of $\Omega$, then
\[\sup_E u=\sup_{\partialrtial E} u.\]
\end{coro}
Having Theorem~{\mathbb{R}}ef{theo:mini-princ} and Corollary~{\mathbb{R}}ef{coro:max-princ}, we infer that if $u$ is ${\mathcal{ A}}$-harmonic in $\Omega$, then it attains its minimum and maximum on $\partialrtial\Omega$. In other words ${\mathcal{ A}}$-harmonic functions have the following Liouville-type property.
\begin{coro}[Liouville Theorem for ${\mathcal{ A}}$-harmonic functions]{\mathbf{l}}abel{coro:min-max-princ}
If an ${\mathcal{ A}}$-harmonic function attains its extremum inside a domain, then it is a constant function.
\end{coro}
\subsection{Boundary Harnack inequality for ${\mathcal{ A}}$-harmonic functions}
\begin{theo}[Boundary Harnack inequality for ${\mathcal{ A}}$-harmonic functions]{\mathbf{l}}abel{theo:boundary-harnack}
For a~nonnegative function $u$ which is ${\mathcal{ A}}$-harmonic in a connected set $\Omega$ there exist $R_0=R(n)>0$ and $C=C(\textit{\texttt{data}},n,R_0,{{\mathbb{R}}m ess\,sup}_{B_{R_0}}u)>0
$, such that
\begin{equation*}
\sup_{\partialrtial B_R} u{\mathbf{l}}eq C(\inf_{\partialrtial B_{R}} u+R) \end{equation*}
for all $R\in(0,R_0]$ provided $B_{3R}\Subset\Omega$ and $\varrho_{\psi(\cdot),B_{3R}}(Du){\mathbf{l}}eq 1,$ where $\psi$ is given by~\eqref{psi}.
\end{theo}\begin{proof}It suffices to note that by Lemma~{\mathbb{R}}ef{lem:A-h-is-great} we can use Minimum Principle of~Corollary~{\mathbb{R}}ef{coro:mini-princ} and Maximum Principle of Corollary~{\mathbb{R}}ef{coro:max-princ}. Then by Harnack inequality of Theorem~{\mathbb{R}}ef{theo:Har-A-harm} the proof is complete.
\end{proof}
{\begin{coro}
Suppose $u$ is ${\mathcal{ A}}$-harmonic in $B_{\frac{3}{2}R}\setminus B_R,$ with $R<R_0 $ from Theorem~{\mathbb{R}}ef{theo:boundary-harnack}, then exists $C=C(\textit{\texttt{data}},n,R_0,{{\mathbb{R}}m ess\,sup}_{B_{R_0}}u)>0
$, such that
\begin{equation*}
\sup_{\partialrtial B_{\frac 43 R}} u{\mathbf{l}}eq C(\inf_{\partialrtial B_{\frac 43 R}} u+2R). \end{equation*}
\end{coro}\begin{proof} Fix $\varepsilon>0$ small enough for $B_R\Subset B_{\frac 43 R-\varepsilon}\subset B_{\frac 43 R+\varepsilon}\Subset B_{\frac 32 R}.$ Of course, then $u$ is ${\mathcal{ A}}$-harmonic in ${B_{\frac 43 R+\varepsilon}\setminus B_{\frac 43 R-\varepsilon}}$. We cover the annulus with finite number of~balls of equal radius as prescribed in the theorem and such that $\varrho_{\psi(\cdot),B}(Du){\mathbf{l}}eq 1$, which is possible due to Remark~{\mathbb{R}}ef{rem:unif-int}. Let us observe that due to the Harnack's inequality from Theorem~{\mathbb{R}}ef{theo:boundary-harnack} we have\begin{flalign*}
\sup_{\partialrtial B_{\frac 43 R+\varepsilon}} u{\mathbf{l}}eq \sup_{\partialrtial B_{\frac 43 R+\varepsilon}\cup\partialrtial B_{\frac 43 R-\varepsilon}} u&{\mathbf{l}}eq C\Big( \inf_{\partialrtial B_{\frac 43 R+\varepsilon}\cup\partialrtial B_{\frac 43 R-\varepsilon}} u+ \frac 43 R+\varepsilon\Big)\\&{\mathbf{l}}eq C\Big( \inf_{\partialrtial B_{\frac 43 R+\varepsilon}} u + 2R\Big).
\end{flalign*}
Since $u$ is continuous in $B_{\frac{3}{2}R}\setminus B_R,$ passing with $\varepsilon\to 0$ we get the claim.
\end{proof}
}
\end{document} |
\begin{document}
\title{{f On a double integral of a product of Legendre polynomials}
\begin{abstract}
\noindent
We calculate a double integral over a product of Legendre polynomials multiplied by a binomial raised to a power.
\end{abstract}
During the calculation of the electromagnetic self-force of a uniformly charged spherical ball, we encountered the integral
\begin{align}\label{1}
I=\int_0^{\pi}d \theta \sin \theta \int_0^{\pi}d \theta' \sin \theta' (\cos \theta - \cos \theta')^{2n} P_l(\cos \theta) P_l(\cos \theta'),
\end{align}
where $n$ and $l$ are integer positive numbers and $P_l$ is a Legendre polynomial of order $l$. As far as we know, this integral was not calculated in closed form anywhere in the literature. We calculate it here.
After changing the variables $\cos \theta \rightarrow x$, $\cos \theta' \rightarrow y$ this integral becomes
\begin{align}\label{2}
I=\int_{-1}^1 dx \int_{-1}^1 dy \;(x-y)^{2n} P_l(x) P_l(y).
\end{align}
We first perform the integral $\int_{-1}^1 dx (x-y)^{2n} P_l(x).$
For this, we can use {\bf 7.228} and {\bf 8.703} from \cite{gra}. Combining these two equations that read
\begin{align}
\frac{1}{2}\Gamma(1+\mu) \int_{-1}^1 P_l(x) (z-x)^{-\mu-1}= (z^2-1)^{- \frac{\mu}{2}} e^{-i \pi \mu} Q_l^{\mu}(z), l=0,1,\dots, |arg(z-1)|<\pi,
\end{align}
\begin{align}
Q_{\nu}^{\mu}(x)= \frac{e^{i\pi \mu} \Gamma(\nu+\mu+1) \Gamma\left(\frac{1}{2}\right)}{2^{\nu+1} \Gamma\left( \nu + \frac{3}{2}\right)} (x^2-1)^{\frac{\mu}{2}} x^{-\nu-\mu-1} {}_2F_{1}\left(\frac{\nu+ \mu +2}{2}, \frac{\nu+\mu+1}{2}; \nu+\frac{3}{2}; \frac{1}{x^2}\right),
\end{align}
after using \[ \frac{\Gamma(l+\mu+1)}{\Gamma(\mu+1)} =(\mu +1)_l\; \text{and} \; \Gamma\left( l+\frac{3}{2}\right)= \frac{\sqrt{\pi} (l+1)_{l+1}}{2^{2l+1}},\]
one obtains for $\mu=-1-2n$
\begin{align}
\int_{-1}^1 dx \, (x-y)^{2n}P_l(x) = \frac{(-2n)_l \,2^{l+1} y^{2n-l}}{(l+1)_{l+1}} {}_2F_1 \left( \frac{l}{2}-n+\frac{1}{2}, \frac{l}{2}-n; l+\frac{3}{2}; \frac{1}{y^2} \right).
\end{align}
The same result given in Eq. (5) can be obtained by putting $a=1$, $m=0$ and $p=-2n$ in Eq. {\bf 2.17.4(5)} from \cite{pru}
\begin{align}
\int_{-a}^a dx \,\frac{(a^2-x^2)^{\frac{m}{2}}}{(x-y)^p} P_l^m \left( \frac{x}{a} \right)= \frac{2 (-1)^{m-1}(l+m)!}{(p-1)! (l-m)!} (y^2-a^2)^{\frac{m-p+1}{2}} Q_l^{p-m-1} \left( \frac{y}{a}\right),
\end{align}
although this equation is given in \cite{pru} as being valid only for $p=0,1, \dots$.
The same result given in Eq.(5) can be obtained by direct calculation, by using the Rodrigues formula for Legendre polynomials \cite{rai}
\begin{align}
P_l(x)= \sum_{k=0}^{[l/2]} \frac{(-1)^k \left( \frac{1}{2}\right)_{l-k} (2x)^{l-2k}}{k!(l-2k)!}
\end{align}
and the binomial expansion for $(x-y)^{2n}$
\begin{align}
(x-y)^{2n}= \sum_{p=0}^{2n} \frac{(-1)^p (2n)!}{p!(2n-p)!} x^p y^{2n-p},
\end{align}
and integrating the resulting double sum term by term. For $l$ odd, after noting that the term by term integration gives non-zero result only for $p$ odd and changing the summation index $p \rightarrow 2p$, one obtains
\begin{align}
\int_{-1}^1 dx \,(x-y)^{2n} P_l(x)= -2^{l+1} \sum_{k=0}^{[l/2]} \sum_{p=0}^{b-1} \frac{(-1)^k \left( \frac{1}{2} \right)_{l-k}(2n)! y^{2n-2p-1}}{k! (l-2k)!2^{2k}(2p+1)!(2n-2p-1)!}\nonumber \\
\cdot \frac{1}{(l-2k+2p+2)}.
\end{align}
Writing all the factorials in terms of Pochhammer symbols, the above summation over $k$ can be done as follows
\begin{align}
&\sum_{k=0}^{\frac{l-1}{2}} \frac{(-1)^k \left( \frac{1}{2}\right)_{l-k}}{k!(l-2k)!2^{2k}(l+2p+2-2k)} \nonumber \\
&= \frac{\left( \frac{1}{2}\right)_l}{\Gamma(1+l) (l+p+2)} {}_3F_2 \left( -\frac{l}{2}+\frac{1}{2}, -\frac{l}{2}, -\frac{l}{2}-p-1; \frac{1}{2}-l, -\frac{l}{2}-p,1\right)\nonumber \\
&\frac{\left(\frac{1}{2}\right)_l \left( \frac{1-l}{2} \right)_{\frac{l-1}{2}} (-p)_{\frac{l-1}{2}}}{l! \,(l+2p+2) \left(\frac{1}{2}-l\right)_{\frac{l-1}{2}} \left(-\frac{l}{2}-p\right)_{\frac{l-1}{2}}},
\end{align}
where, when we passed from the second to the third line of the above equation, we used equation {\bf 7.4.4 (81)} from \cite{pru}. We note that, because of the Pochhammer symbol $(-p)_{\frac{l-1}{2}}$, the r.h.s. of Eq. (10) is different from zero only for $p \ge \frac{l-1}{2}$. Introducing Eq. (10) in Eq. (9) and changing the summation index $p\rightarrow i, \, p-\frac{l-1}{2}=i$, the resulting summation over i can be done immediately and one obtains again the result of Eq. (5). The case $l$ even can be considered similarly.
Returning now to Eq. (2), after using Eq. (5) one obtains
\begin{align}\label{7}
I=\frac{(-2n)_l \;2^{l+1}}{(l+1)_{l+1}} \int_{-1}^1 dy\; y^{2n-l} P_l(y)\; {}_2F_1 \left(\frac{l}{2}-n, \frac{l+1}{2}-n, l+\frac{3}{2};\frac{1}{y^2} \right).
\end{align}
Note that, for $l \le 2n$, the hypergeometric function in Eq.(\ref{7}) is, in fact, a finite series, because $l/2-n$ and $(l+1)/2-n$ are negative integers when $l$ is even and odd respectively. Again, we consider the cases $l$ even and $l$ odd separately.
For $l$ even, using the definition of the Gauss hypergeometic function, we have
\begin{align}\label{8}
{}_2F_1 \left(\frac{l}{2}-n, \frac{l+1}{2}-n, l+\frac{3}{2};\frac{1}{y^2} \right)= \sum_{k=0}^{n-\frac{l}{2}} \frac{ \left( \frac{l}{2}-n \right)_k \left(\frac{l+1}{2}-n \right)_k}{k! \left( l+\frac{3}{2} \right)_k} \frac{1}{y^{2k}}.
\end{align}
From Eqs. (\ref{7}), (\ref{8}), one obtains
\begin{align}\label{9}
I= \frac{2 (-2n)_l2^{l+1}}{(l+1)_{l+1}} \sum_{k=0}^{n-\frac{l}{2}} \frac{ \left( \frac{l}{2}-n \right)_k \left(\frac{l+1}{2}-n \right)_k}{k! \left( l+\frac{3}{2} \right)_k} \int_0^1 dy \; y^{2n-2k-l} P_l(y),
\end{align}
where we used the fact that the integrand in the r.h.s. of Eq. (\ref{9}) is an even function, because $P_l(-y)=(-1)^l P_l(y)$ \cite{gra}. The integral in Eq. (\ref{9}) can be performed using ({\bf 2.17.1(4)} from \cite{pru} or {\bf 7.126(2)} from \cite{gra} )
\begin{align}\label{p2}
\int_0^1dx \; x^{\sigma} P_{\nu}(x)= \frac{\sqrt{\pi} \;2^{-\sigma-1} \Gamma(1+\sigma)}{\Gamma \left(1+\frac{\sigma-\nu}{2} \right) \Gamma \left( \frac{\sigma+\nu+3}{2} \right)}.
\end{align}
One obtains
\begin{align}\label{11}
I=\frac{\sqrt{\pi} \;(-2n)_l \;2^{2l-2n+1}}{(l+1)_{l+1}} \sum_{k=0}^{n-\frac{l}{2}} \frac{2^{2k} \left( \frac{l}{2}-n \right)_k \left( \frac{l+1}{2}-n \right)_k \Gamma(2n-2k-l+1)}{k! \left( l+\frac{3}{2} \right)_k \Gamma(1+n-k-l) \Gamma \left( n-k+\frac{3}{2} \right)}.
\end{align}
Using the definition of the Pochhammer symbol \cite{pru} \[ (a)_k= \frac{\Gamma(a+k)}{\Gamma(a)}= (-1)^k \frac{\Gamma(1-a)}{\Gamma(1-a-k)}, \]
and \cite{pru}
\[(a)_{2k}= \left( \frac{a}{2} \right)_k \left( \frac{a+1}{2} \right)_k 2^{2k}, \]
we write the Gamma functions in Eq. (\ref{11}) as follows
\begin{align}
& \Gamma(2n-2k-l+1)= \frac{\Gamma(2n-l+1)}{\left( \frac{l}{2}-n \right)_k \left( \frac{l}{2}-n+\frac{1}{2}\right)_k2^{2k}}, \nonumber\\
&\Gamma(1+n-l-k)= (-1)^k\frac{\Gamma(1+n-l)}{(l-n)_k},\nonumber \\
&\Gamma \left(n+\frac{3}{2}-k \right)= (-1)^k \frac{\Gamma \left( n+\frac{3}{2}\right)}{\left( -n-\frac{1}{2}\right)_k}.
\end{align}
Introducing (16) in (15), one obtains
\begin{align}
I=\frac{\sqrt{\pi} \;(-2n)_l\; 2^{2l-2n+1}(2n-l)!}{ (l+1)_{l+1} (n-l)! \left( n+\frac{1}{2} \right)!} {}_2F_1 \left(-n-\frac{1}{2}, l-n; l+\frac{3}{2}; 1 \right).
\end{align}
But the Gauss hypergeometric function of unit argument can be written as ({\bf 7.3.5(2)} \cite{pru})
\begin{align}
{}_2F_1(a,b;c;1)=\frac{\Gamma(c) \Gamma(c-a-b)}{\Gamma(c-a) \Gamma(c-b)},
\end{align}
and we obtain
\begin{align}\label{17}
I=\frac{ \sqrt{\pi}\; (-2n)_l \;2^{2l-2n+1} (2n-l)! \left(l+\frac{1}{2}\right)! (2n+1)!}{ (l+1)_{l+1} (n-l)! \left(n+\frac{1}{2}\right)! (l+n+1)! \left(n+ \frac{1}{2} \right)!},
\end{align}
where we use the notation $\Gamma(z)=(z-1)!$ both for integer and noninteger $z$. Using the definition of the Pochhammer symbol and \cite{pru}
\begin{align}
\frac{\Gamma(2z)}{\Gamma(z)}=\frac{2^{2z-1}}{\sqrt{\pi}} \Gamma \left( z+\frac{1}{2} \right),
\end{align}
we can write
\begin{align}\label{19}
&(-2n)_l= (-1)^l \frac{\Gamma(1+2n)}{\Gamma(1+2n-l)},\nonumber \\
& (l+1)_{l+1}=\frac{2^{2l+1}}{\sqrt{\pi}} \Gamma \left(l+\frac{3}{2} \right),\nonumber \\
& \frac{(2n)!}{\left( n+\frac{1}{2}\right)!}=\frac{2^{2n+1}\Gamma(n+1)}{\sqrt{\pi} (2n+1)},\\
& \frac{(2n+1)!}{\left( n+\frac{1}{2}\right)!}= \frac{2^{2n+1}\Gamma(n+1)}{\sqrt{\pi} }. \nonumber
\end{align}
Introducing (\ref{19}) in (\ref{17}), one obtains for $l$ even
\begin{align}
I= \frac{(-1)^l 2^{2n+2} (n!)^2}{(2n+1) (n-l)! (n+l+1)!}.
\end{align}
Note that, because of $(n-l)!$ from the denominator, this result is different from zero only for $n \ge l$. A similar calculation can be done for $l$ odd, and one obtains the same result. So, our final result for the integral (\ref{1}) is
\begin{equation}
I= \left\lbrace\begin{array}{c}
\frac{(-1)^l 2^{2n+2} (n!)^2}{(2n+1) (n-l)! (n+l+1)!}, n \ge l\\
0, n<l
\end{array}
\right..
\end{equation}
\end{document} |
\betaegingin{document}
\tauitle{On Change of Variable Formulas for non-anticipative functionals
}
\alphauthor{M. Mania$^{1)}$ and R. Tevzadze$^{2)}$}
\deltaate{~}
\muaketitle
\betaegingin{center}
$^{1)}$ A. Razmadze Mathematical Institute of Tbilisi State University, 6 Tamarashvili Str., Tbilisi 0177; and
Georgian-American University, 8 Aleksidze Str., Tbilisi 0193, Georgia,
\nuewline(e-mail: mania@rmi.ge)
\\
$^{2)}$ Georgian-American University, 8 Aleksidze Str., Tbilisi 0193, Georgia,
Georgian Technical Univercity, 77 Kostava str., 0175,
Institute of Cybernetics, 5 Euli str., 0186, Tbilisi,
Georgia
\nuewline(e-mail: rtevzadze@gmail.com)
\varepsilonnd{center}
\betaegingin{abstract}
{\betaf Abstract.}
For non-anticipative functionals, differentiable in Chitashvili's sense, the It\^o formula
for cadlag semimartingales is proved. Relations between different notions of functional derivatives
are established.
\varepsilonnd{abstract}
\betaigskip
\nuoindent {\it 2010 Mathematics Subject Classification. 90A09, 60H30, 90C39}
\
\nuoindent {\it Keywords}: The It\^o formula, semimartingales, non-anticipative functionals, functional derivatives
\section{Introduction}
The classical It\^o \cite{ito} formula shows that for a sufficiently smooth function\\ $(f(t,x), t\gammae0, x\in R)$ the transformed process
$f(t,X_t)$
is a semimartingale for any semimartingale $X$ and provides a decomposition of the process $f(t,X_t)$ as a sum of stochastic
integral relative to $X$ and a process of finite variation. This formula is applicable to functions of the current value of semimartingales,
but in many applications, such as statistics of random processes, stochastic optimal control or mathematical finance, uncertainty affects
through the whole history of the process and it is necessary to consider functionals of entire path of a semimartingale.
In 2009 Dupire \cite{Dupire} proposed a method to extend the It\^o formula for non-anticipative functionals
using naturally defined pathwise time and space derivatives. The space derivative measures the sensitivity of a functional $f:D([0,T], R)\tauo R$
to a variation in the endpoint of a path $\omega\in D([0,T], R)$ and is defined as a limit
$$
\partial_\omega f(t,\omega)=\lambdaim_{h\tauo 0}\varphirac{f(t,\omega+hI_{[t,T]})-f(t,\omega)}{h},
$$
if this limit exists, where $D([0,T])$ is the space of RCLL ( right continuous with left limits) functions. Similarly is defined the second order space
derivative $\partial_{\omegamega\omegamega}f:= \partial_{\omegamega}(f_{\omegamega}).$
The definition of the time derivative is based on the flat extension of a path $\omega$ up to time $t+h$ and is defined as a limit
$$
\partial_t f(t,\omega)=\lambdaim_{h\tauo 0+}\varphirac{f(t+h,\omega^t)-f(t,\omega)}{h},
$$
whenever this limit exists, where $\omega^t=\omega(.\wedge t)$ is the path of $\omega$ stopped at time $t$.
If a continuous non-anticipative functional $f$ is from $C^{1,2}$ , i.e., if $\partial_t f, \partial_\omega f$, $\partial_{\omega\omega}f$ exist and are continuous
with respect to the metric $d_\infty$ (defined in section 2)
and $X$ is a continuous semimartingale, Dupire \cite{Dupire} proved that the process $f(t,X)$ is also a semimartingale and
$$
f(t, X)=f(0, X)+\int_0^t\partial_t f(s,X)ds+\int_0^t\partial_\omega f(s,X)dX_s
$$
\betaegingin{equation}\lambdaanglebel{itoc}
+\varphirac{1}{2}\int_0^t\partial_{\omega\omega}f(s, X)d\lambdaangle X\rangle_s.
\varepsilonnd{equation}
For the special case of $f(t,X_t)$ these derivatives coincide with the usual space and time derivatives and the above formula
reduces to the standard It\^o formula.
Erlier related works are the works by Ahn \cite{ahn} and
Tevzadze \cite{T2}, where It\^o's formula was derived in very particular cases of functionals that assume the knowledge of the whole path
without path dependent dynamics.
Further works extending this theory and corresponding references one can see in \cite{CF1}, \cite{CF2}, \cite{LScS},\cite{O}.
Motivated by applications in stochastic optimal control, before Dupire's work, Chitashvili (1983) defined differentiability of non-anticipative
functionals in a different way and proved the corresponding It\^o formula for continuous semimartingales. His definition is based
on "hypothetical" change of variable formula for continuous functions of finite variation.
We formulate Chitashvili's definition of differentiability and present his change of variable formula in a simplified form and for one-dimensional case.
Let $C_{[0,T]}$ be the space of continuous functions on $[0,T]$ equipped with the uniform norm. Let $f(t,\omega)$ be non-anticipative continuous mapping of $C_{[0,T]}$ into $C_{[0,T]}$ and
denote by ${\cal V}_{[0,T]}$ the space of functions of finite variation on $[0,T]$.
A continuous non-anticipative functional $f$ is differentiable if there exist continuous functionals $f^0$ and $f^1$ such that for all $\omega\in C_{[0,T]}\cap {\cal V}_{[0,T]}$
\betaegingin{equation}\lambdaanglebel{chd}
f(t,\omega)=f(0,\omega)+\int_0^tf^0(s,\omega)ds+\int_0^tf^1(s,\omega)d\omega_s.
\varepsilonnd{equation}
A functional $f$ is two times differentiable if $f^1$ is differentiable, i.e., if
there exist continuous functionals $f^{0,1}$ and $f^{1,1}$ satisfying
\betaegingin{equation}\lambdaanglebel{chd2}
f^1(t,\omega)=f^1(0,\omega)+\int_0^tf^{1,0}(s,\omega)ds+\int_0^tf^{1,1}(s,\omega)d\omega_s.
\varepsilonnd{equation}
for all $\omega\in C_{[0,T]}\cap {\cal V}_{[0,T]}$.
Here functionals $f^0, f^1$ and $f^{1,1}$ play the role of time, space and the second order space derivatives respectively.
It was proved by Chitashvili \cite{Ch} that if the functional $f$ is two times differentiable then the process $f(t,X)$ is a semimartingale for any continuous semimartingale $X$ and is represented as
$$
f(t, X)=f(0, X)+\int_0^tf^0(s,X)ds+\int_0^tf^1(s,X)dX_s
$$
\betaegingin{equation}\lambdaanglebel{itoc}
+\varphirac{1}{2}\int_0^tf^{1,1}(s, X)d\lambdaangle X\rangle_s.
\varepsilonnd{equation}
The idea of the proof of change of variable formula (\ref{itoc}) for semimartingales is to use the change of variable formula for functions of finite variations,
first for the function $f$ and then for its derivative $f^1$, before approximating a continuous semimartingale $X$ by processes of finite variation.
In the paper Ren et al \cite{RTZ} a wider class of $C^{1,2}$ functionals was proposed, which is
based on the Ito formula itself. We formulate this definition in equivalent form and in one-dimensional case.
The function $f$ belongs to $C^{1,2}_{RTZ}$, if $f$ is a continuous non-anticipative functional on $[0,T]\tauimes C_{[0,T]}$ and there exist continuous non-anticipative functionals
$\alphalphapha, z, \gammaamma$, such that
\betaegingin{equation}\lambdaanglebel{itoc2}
f(t, X)=f(0, X)+\int_0^t\alphalphapha(s,X)ds+\int_0^tz(s,X)dX_s +\varphirac{1}{2}\int_0^t\gammaamma(s, X)d\lambdaangle X\rangle_s
\varepsilonnd{equation}
for any continuous semimartingale $X$.
The functionals $\alphalphapha, z$ and $\gammaamma$ also play the role of time, first and second order space derivatives respectively.
Since any process of finite variation is a semimartingale and any deterministic semimartingale is a function of finite variation, it follows from $f\in C^{1,2}_{RTZ}$ that
$f$ is differentiable in the Chitashvili sense and
\betaegingin{equation}\lambdaanglebel{ChT}
\alphalphapha=f^0,\;\;\;z=f^1.
\varepsilonnd{equation}
Becides, any $C^{1,2}$ process in the Dupire or Chitashvili sense is in $C^{1,2}_{RTZ}$,
which is a consequence of the functional It\^o formula proved in \cite{Dupire} and \cite{Ch} respectively.
Although, the definition of the class $C^{1,2}_{RTZ}$ does not require that $\gammaamma$ be (in some sense) the derivative of $z$, but
if $f\in C^{1,2}$ in the Chitashvili sense,
then beside equality (\ref{ChT}) we also have that $\gammaamma=f^{1,1}$ (i.e., $\gammaamma=z^1$).
Our goal is to extend the formula (\ref{itoc}) for RCLL (or cadlag in French terminology) semimartingales and to establish how Dupire's, Chitashvili's and other derivatives are related.
Since the bumped path used in the definition of Dupire's vertical derivative is not continuous even if $\omega$ is continuous,
to compare derivatives defined by (\ref{chd}) with Dupire's derivatives, one should extend Chitashvili's definition to RCLL processes, or
to modify Dupire's derivative in such a way that perturbation of continuous paths remain continuous.
The direct extension of Chitashvili's definition of differentiability for RCLL functions is following:
A continuous functional $f$ is differentiable, if there exist continuous functionals $f^0$ and $f^1$ (continuous with respect to the metric
$d_\infty$ defined by (\ref{rho}))
such that $ f(\cdot,\omega)\in {\cal V}_{[0,T]}$ for all $\omega\in {\cal V}_{[0,T]}$ and
\betaegingin{equation}\lambdaanglebel{xvii}
f(t,\omega)=f(0,\omega)+\int_0^tf^0(s,\omega)ds+\int_0^tf^1(s-,\omega)d\omega_s
\varepsilonnd{equation}
$$
+\sum_{s\lambdae t}\betaig[f(s,\omega)-f(s-,\omega)-f^1(s-,\omega)\Delta\omega_s\betaig],
$$
for all $(t,\omega)\in [0,T]\tauimes {\cal V}_{[0,T]}$.
In order to compare Dupire's derivatives with Chitashvili's derivatives, we introduce another type of
vertical derivative where, unlike to Dupire's derivative $\partial_\omegamega f$, the path deformation of
continuous paths are also continuous.
We say that a non-anticipative functional $f(t,\omegamega)$ is vertically differentiable and denote this differential by $D_\omegamega f(t,\omegamega)$, if
the limit
\betaegingin{equation}
D_\omegamega f(t,\omegamega):=\lambdaim_{h\tauo0, h>0}\varphirac{f(t+h,\omega^{t}+\chi_{t,h})-f(t+h,\omega^{t})}{h},
\varepsilonnd{equation}
exists for all $(t,\omega)\in [0,T]\tauimes {D}_{[0,T]}$, where
$$
\chi_{t,h}(s)=(s-t)1_{(t,t+h]}(s)+h1_{(t+h,T]}(s).
$$
Let $f(t,\omega)$ be differentiable in the sense of (\ref{xvii}).
Then, as proved in Proposition 1,
\betaegingin{equation}
f^0(t,\omega)=\partial_t f(t,\omegamega)\;\;\;\;\tauext{and}\;\;\;\; f^1(t,\omega)=D_\omegamega f(t,\omegamega).
\varepsilonnd{equation}
for all $(t,\omega)\in [0,T]\tauimes {D}_{[0,T]}$.
Thus, $f^0$ coincides with Dupire's time derivative, but $f^1$ is equal to $D_\omega f$ which is different from Dupire's vertical derivative in general. The simplest counterexample
is $f(t,\omega)=\omega_t-\omega_{t-}$. It is evident that in this case $\partial_\omega f=1$ and $D_\omega f=0$. In general, if $g(t,\omega):=f(t-,\omega)$ then
$D_\omega g(t,\omega)=D_\omega f(t,\omega)$ and $\partial_\omega g(t,\omega)=0$ if corresponding derivatives of $f$ exist. However, under stronger conditions, e.g. if $f\in C^{1,1}$ in the Dupire sense, then $D_\omega f$ exists and $D_\omega f=f^1=\partial_\omega f.$
The paper is organized as follows: In section 2 we extend Citashvili's change of variable formula for RCLL semimartingales and
give an application of this formula on the convergence of ordinary integrals to the stochastic integrals. In section 3 we establish relations between different type
of derivatives for non-anticipative functionals.
\section{The It\^o formula according to Chitashvili for cadlag semimartingales}
Let $\Omega:= D([0,T], R)$ be the set of c\`{a}dl\`{a}g
paths. Denote by $\omegamega$ the elements of $\Omega$, by $\omega_t$ the value of $\omega$ at time $t$ and let $\omega^t=\omega(\cdot\wedge t)$ be the path of $\omega$ stopped at $t$.
Let $B$ be the canonical process defined by $B_t(\omega)=\omega_t$, $\muathbb{F}=(F_t,t\in[0,T])$ the corresponding filtration and let
$\Lambda:= [0,T]\tauimes\Omega$.
The functional $f:[0,T]\tauimes D[0,T]\tauo R$ is non-anticipative if
$$
f(t,\omega)=f(t,\omega^t)
$$
for all $\omega\in D[0,T]$, i.e., the process $f(t,\omega)$ depends only on the path of $\omega$ up to time $t$ and is $\muathbb{F}$- adapted.
Following Dupire, we define semi-norms on $\Omega$ and a pseudo-metric on $\Lambda$ as follows:
for any $(t, \omegamega), ( t', \omegamega') \in\Lambda$,
\betaegingin{eqnarray}
\lambdaanglebel{rho}
\|\omegamega\|_{t}&:=& \sup_{0\lambdae s\lambdae t} |
\omegamega_s|,\nuonumber\\[-8pt]\\[-8pt]
d_\infty\betaigl((t, \omegamega),\betaigl(
t', \omegamega'\betaigr) \betaigr)&:=& \betaigl|t-t'\betaigr| +
\sup_{0\lambdae s\lambdae T} \betaigl|\omegamega_{t\wedge s} - \omegamega'_{t'\wedge
s}\betaigr|.\nuonumber
\varepsilonnd{eqnarray}
Then $(\Omega, \|\cdot\|_{T})$ is a Banach space and $(\Lambda, d_\infty)$ is
a complete pseudo-metric space.
Let ${\cal V}={\cal V}[0,T]$ be the set of finite variation paths from $\Omega$. Note that,
if $f\in C(\Lambda)$, then from $\Delta \omega_t=0$ follows $f(t,\omega)-f(t-,\omega)=0$, since $d_\infty((t_n,\omega),(t,\omega))\tauo 0$ when $t_n\uparrow t$.
Hence $f(t,\omega)-f(t-,\omega)\nueq 0$ means $\Delta \omega_t\nueq 0$.
Note that any functional $f:[0,T]\tauimes\Omegamega\tauo R$ continuous with respect to $d_\infty$ is non-anticipative. In this paper we consider only $d_\infty$-continuous, and hence non-anticipative,
functionals.
{\betaf {Definition 1.}}
We say that a continuous functional $f\in C([0,T]\tauimes \Omega)$ is differentiable , if there exist $f^0\in C([0,T]\tauimes \Omega)$ and $f^1\in C([0,T]\tauimes \Omega)$
such that for all $\omega\in {\cal V}$ the process $ f(t,\omega)$ is of finite variation and
\betaegingin{equation}\lambdaanglebel{xv}
f(t,\omega)=f(0,\omega)+\int_0^tf^0(s,\omega)ds+\int_0^tf^1(s-,\omega)d\omega_s
\varepsilonnd{equation}
$$
+\sum_{s\lambdae t}\betaig[f(s,\omega)-f(s-,\omega)-f^1(s-,\omega)\Delta\omega_s\betaig],
$$
for all $(t,\omega)\in [0,T]\tauimes\cal V$.
A functional $f$ is two times differentiable if $f^1$ is differentiable, i.e., if
there exist $f^{0,1}\in C([0,T]\tauimes \Omega)$ and $f^{1,1}\in C([0,T]\tauimes \Omega)$
such that for all $(t,\omega)\in [0,T]\tauimes\cal V$
\betaegingin{equation}\lambdaanglebel{two}
f^1(t,\omega)=f^1(0,\omega)+\int_0^tf^{1,0}(s,\omega)ds+\int_0^tf^{1,1}(s-,\omega)d\omega_s + V^1(t,\omega),
\varepsilonnd{equation}
where
$$
V^1(t,\omega)=\sum_{s\lambdae t}\betaig(f^1(s,\omega)-f^1(s-,\omega)-f^{1, 1}(s-,\omega)\Delta\omega_s\betaig).
$$
Now we give a generalization of Theorem 2 from Chitashvili \cite{Ch}
for general cadlag (RCLL) semimartingales.
\betaegin{thr}
Let $f$ be two times differentiable in the sense of Definition 1 and assume that for some $K>0$
\betaegingin{equation}\lambdaanglebel{v}
|f(t,\omega)-f(t-,\omega)-f^1(t-,\omega)\Delta\omega_t|\lambdae K(\Delta\omega_t)^2,\;\; \varphiorall\omega\in\cal V.
\varepsilonnd{equation}
Then for any semimartingale $X$ the process $f(t,X)$ is a semimartingale and
$$
f(t, X)=f(0, X)+\int_0^tf^0(s,X)ds+\int_0^tf^1(s-,X)dX_s
$$
\betaegingin{equation}\lambdaanglebel{ito}
+\varphirac{1}{2}\int_0^tf^{1,1}(s, X)d\lambdaangle X^c\rangle_s+\sum_{s\lambdae t}\betaig[f(s,X)-f(s-,X)-f^1(s-,X)\Delta X_s\betaig].
\varepsilonnd{equation}
\varepsilonnd{thr}
{\it Proof.} Let first assume that $X$ is a semimartingale with the decomposition
\betaegingin{equation}\lambdaanglebel{dec0}
X_t=A_t+M_t, t\in[0,T],
\varepsilonnd{equation}
where $M$ is a continuous local martingale and $A$ is a process of finite variation having only finite number of jumps, i.e., the jumps of $A$ are exhausted by
graphs of finite number of stopping times $(\tauau_i, 1\lambdae i\lambdae l, l<\infty)$.
Let $X_t^n= A_t+M^n_t$ and
\betaegingin{equation}
M^n_t= n\int_0^tM_s{\cal E}p(-n(\lambdaangle M\rangle_t-\lambdaangle M\rangle_s)d\lambdaangle M\rangle_s.
\varepsilonnd{equation}
It is proved in \cite{Ch} that
\betaegingin{equation}\lambdaanglebel{mc}
\sup_{s\lambdae t}|M^n_s-M_t|\tauo 0, \;\;\;as\;\;n\tauo\infty,\;\;\; a.s.
\varepsilonnd{equation}
Since $X^n$ is of bounded variation, $f$ is differentiable and $\Delta X^n_t=\Delta A_t=\Delta X_t$, it follows from (\ref{xv}) that
$$
f(t,X ^n)=f(0, X)+\int_0^tf^0(s,X^n)ds
$$
$$
+\int_0^tf^1(s-,X^n)dX_s +\int_0^tf^1(s-,X^n)d(M^n_s-M_s)
$$
\betaegingin{equation}\lambdaanglebel{itod}
+\sum_{s\lambdae t}\betaig(f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X_s\betaig).
\varepsilonnd{equation}
Since $X$ admits finite number of jumps, by continuity of $f$ and $f^1$,
\betaegingin{equation}\lambdaanglebel{jumpb}
\sum_{s\lambdae t}\betaig(f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X_s\betaig)\tauo
\varepsilonnd{equation}
$$
\tauo\sum_{s\lambdae t}\betaig(f(s,X)-f(s-,X)-f^1(s-,X)\Delta X_s\betaig)
$$
The continuity of $f, f^0, f^1$ and relation (\ref{mc}) imly that
\betaegingin{equation}\lambdaanglebel{fxn1a}
f(t,X^n)\tauo f(t,X),\;\;\;as\;\;n\tauo\infty,\;\;\; a.s.,
\varepsilonnd{equation}
\betaegingin{equation}\lambdaanglebel{fxn22a}
\int_0^tf^0(s,X^n)ds\tauo\int_0^tf^0(s,X)ds\;\;\;as\;\;n\tauo\infty,\;\;\; a.s..
\varepsilonnd{equation}
by the dominated convergence theorem and
\betaegingin{equation}\lambdaanglebel{fxn4a}
\int_0^tf^1(s-,X^n)dX_s\tauo\int_0^tf^1(s-,X)dX_s\;\;\;as\;\;n\tauo\infty,\;\;\; a.s..
\varepsilonnd{equation}
by the dominated convergence theorem for stochastic integrals.
Here we may use the dominated convergence theorem, since by continuity of $f^i ( i=0,1)$ the process
$\sup_{n, s\lambdae t}|f^i(s-, X^n)|$ is locally bounded (see Lemma A1).
Let us show now that
\betaegingin{equation}\lambdaanglebel{fx12aa}
\int_0^tf^1(s-,X^n)d(M^n_s-M_s)\tauo\varphirac{1}{2}\int_0^tf^{1,1}(s,X)d\lambdaangle M\rangle_s.
\varepsilonnd{equation}
Integration by parts and (\ref{two}) give
$$
\int_0^tf^1(s,X^n)d(M^n_s-M_s)=(M^n_t-M_t)f^1(t,X^n)-
$$
$$
-\int_0^t(M^n_s-M_s)f^{1,0}(s,X^n)ds-\int_0^t(M^n_s-M_s)f^{1,1}(s-,X^n)dA_s
$$
$$
-\int_0^t(M^n_s-M_s)f^{1,1}(s-,X^n)dX^n_s-\int_0^t(M^n_s-M^c_s)dV^1(s, X^n)=
$$
\betaegingin{equation}\lambdaanglebel{i5}
=I^1_t(n)+I^2_t(n)+I^3_t(n)+I^4_t(n) +I_t^5(n).
\varepsilonnd{equation}
$I^1_t(n)\tauo 0$ (as $n\tauo\infty$, a.s.) by continuity of $f^1$ and (\ref{mc}).
$I^2_t(n)$ and $I_t^3(n)$ tend to zero (as $n\tauo\infty$, a.s.) by continuity of $f^{1,0}$ and $f^{1,1}$, relation (\ref{mc}) and by the dominated convergence theorem
(using the same arguments as in (\ref{fxn22a})-(\ref{fxn4a})).
Moreover, since $A$ admits finite number of jumps at $(\tauau_i, 1\lambdae i\lambdae l)$
\betaegingin{equation}\lambdaanglebel{jump2}
I_t(5)=\sum_{s\lambdae t}(M^n_s-M_s)\betaig(f^1(s,X^n)-f^1(s-,X^n)-f^{1,1}(s-,X^n)\Delta A_s\betaig)
\varepsilonnd{equation}
$$
=\sum_{i\lambdae l}(M^n_{\tauau_i}-M_{{\tauau_i}})\betaig(f^1(\tauau_i,X^n)-f^1(\tauau_i-,X^n)-f^{1,1}(\tauau_i-,X^n)\Delta A_{\tauau_i}\betaig)
$$
$$
\lambdae \sup_{s\lambdae t}|M^n_s-M_s|\betaig(2l\sup_{n, s\lambdae t}|f^1(s,X^n)|+\sup_{n, s\lambdae t}|f^{1,1}(s,X^n)|\sum_{i\lambdae l}|\Delta A_{\tauau_i}|\betaig)\tauo 0,
$$
as $n\tauo\infty$, since the continuity of $f^1, f^{1,1}$, relation (\ref{mc}) and Lemma A1 imply that
$\sup_{n, s\lambdae t}|f^1(s,X^n)|+\sup_{n, s\lambdae t}|f^{1,1}(s,X^n)|<\infty$ (a.s.)
Let us consider now the term
$$
I_t^4(n)=\int_0^t(M_s-M^n_s)f^{1,1}(s,X^n)dM^n_s
$$
Let
$$
K^n_t=\int_0^t(M_s-M^n_s)dM^n_s.
$$
Using the formula of integration by parts we have
$$
K^n_t=-\varphirac{1}{2}(M_t^n)^2+M_t M_t^n-\int_0^tM_s^ndM_s
$$
and it follows from (\ref{mc}), the dominated convergence theorem and equality $M_t^2=2\int_0^tM_sdM_s+\lambdaangle M\rangle_t$, that
\betaegingin{equation}\lambdaanglebel{kn}
sup_{s\lambdae t}|K^n_s-\varphirac{1}{2}\lambdaangle M\rangle_s|\tauo 0, \;\;\;as\;\;n\tauo\infty,\;\;\; a.s.
\varepsilonnd{equation}
From definition of $M^n$, using the formula of integration by parts, it follows that $M^n$ admits representation
$$
M^n_t=n\int_0^t(M_s-M^n_s)d\lambdaangle M\rangle_s.
$$
Therefore
$$
K^n_t=n\int_0^t(M_s-M^n_s)^2d\lambdaangle M\rangle_s.
$$
This implies that $K^n$ is a sequence of increasing processes, which is stochastically bounded by (\ref{kn}) (i.e. satisfies the condition UT from (\cite{JMP})
and by theorem 6.2 of(\cite{JMP}) (it follows also from lemma 12 of \cite{CF1})
$$
\int_0^t(M_s-M^n_s)f^{1,1}(s,X^n)dM^n_s=
$$
$$
=\int_0^tf^{1,1}(s,X^n)dK^n_s\tauo\varphirac{1}{2}\int_0^tf^{1,1}(s,X)d\lambdaangle M\rangle_s,\;\;\;n\tauo\infty,
$$
which (together with (\ref{i5})) implies the convergence (\ref{fx12aa}).
Therefore, the formula (\ref{ito})
for the process $X$ with decomposition (\ref{dec0}) follows by passage to the limit in (\ref{itod}) using relations (\ref{jumpb})-(\ref{fx12aa}).
Note that in this cased the condition (\ref{v}) is not needed.
Let consider now the general case. Any semimartingale $X$ admits a decomposition $X_t=A_t+M_t$, where $A$ is a process of finite variation and $M$ is a locally square integrable martingale
(such decomposition is not unique, but the continuous martingale parts coincide for all such decompositions of $X$, which is sufficient for our goals) see \cite{J}.
Let $M_t=M_t^c+M^d_t$, where $M^c$ and $M^d$ are continuous and purely discontinuous martingale parts of $M$
respectively. Let $A_t=A_t^c+A_t^d$ be the decomposition of $A$, where $A^c$ and $A^d$ are continuous and purely discontinuous processes of finite variations respectively.
Note that $A^d$ is the sum of its jumps, whereas $M^d$ is the sum of compensated jumps of $M$. So,
we shall use the decomposition
\betaegingin{equation}\lambdaanglebel{dec1}
X_t=A_t^c+A_t^d+M_t^c+M_t^d
\varepsilonnd{equation}
for $X$ and using localization arguments, without loss of generality, one can assume that $M^c$ and $M^d$ are square integrable martingales.
Let $M^d_t(n)$ be the compensated sum of jumps of $M$ of amplitude greater than $1/n$, which is a martingale of finite variation and
is expressed as a difference
\betaegingin{equation}\lambdaanglebel{jump}
M^d_t(n)=B^n_t-\widetilde{ B_t^n},
\varepsilonnd{equation}
where $B^n_t=\sum_{s\lambdae t}\Delta M_sI_{(|\Delta M_s|\gammae 1/n)}$ and $\widetilde{B^n}$ is the dual predictable projection of $B^n$. It
can be expressed also as compensated stochastic integral
(see \cite{DM})
$$
M^d_t(n)=\int_0^tI_{(|\Delta M_s|>\varphirac{1}{n})}{}_{\omegaverset{\betaullet}C}dM_s,
$$
where by $H{}_{\omegaverset{\betaullet}C}Y$ we denote the compensated stochastic integral.
Since
$$
M^d_t(n)-M_t^d=\int_0^tI_{(0<|\Delta M_s|\lambdae\varphirac{1}{n})}{}_{\omegaverset{\betaullet}C}dM_s,
$$
it follows from Doob's inequality and from \cite{DM} (theorem 33, Ch.VIII) that
$$
E\sup_{s\lambdae t}|M_s^d(n)-M_s^d|^2\lambdae const E[M^d(n)-M^d]_t= const E[I_{(0<|\Delta M|\lambdae\varphirac{1}{n})}{}_{\omegaverset{\betaullet}C}M]
$$
$$
\lambdae const E\int_0^tI_{(0<|\Delta M_s|\lambdae\varphirac{1}{n})}d[M]_s\tauo 0, \;\;\;as\;\;n\tauo\infty
$$
by dominated convergence theorem, since $E[M^d]_T<\infty$.
Hence
\betaegingin{equation}\lambdaanglebel{md}
\sup_{s\lambdae t}|M^d_s(n)-M^d_s|\tauo 0, \;\;\;as\;\;n\tauo\infty,\;\;\; a.s.
\varepsilonnd{equation}
for some subsequence, for which we preserve the same notation.
Let
$$
A_t^d(n)=\sum_{s\lambdae t}I_{(|\Delta A_s|>\varphirac{1}{n})}\Delta A_s=\int_0^tI_{(|\Delta A_s|>\varphirac{1}{n})}dA_s.
$$
Since
$$
|A^d_t-A_t^d(n)|\lambdae\int_0^tI_{(0<|\Delta A_s|\lambdae\varphirac{1}{n})}|dA_s|
$$
we have that
\betaegingin{equation}\lambdaanglebel{ad}
\sup_{s\lambdae t}|A^d_s(n)-A^d_t|\tauo 0, \;\;\;as\;\;n\tauo\infty,\;\;\; a.s.
\varepsilonnd{equation}
Let
$$
X^n_t= A^c_t+A_t^d(n)+M_t^d(n)+M_t^c.
$$
Relations (\ref{md}) and (\ref{ad}) imply that
\betaegingin{equation}\lambdaanglebel{x}
\sup_{s\lambdae t}|X_s(n)-X_s|\tauo 0, \;\;\;as\;\;n\tauo\infty,\;\;\; a.s.,
\varepsilonnd{equation}
Thus, $X^n$ is a sum of continuous local martingale $M^c$ and a process of finite variation $A^c_t+A_t^d(n)+M_t^d(n)$
which admits only finite number of jumps for every $n\gammae 1$.
Therefore, as it is already proved,
$$
f(t,X^n)=f(0,X^n)+\int_0^tf^0(s,X^n)ds+\int_0^tf^1(s-,X^n)dX_s
$$
$$
+\int_0^tf^1(s-,X^n)d(M_s^n(d)-M_s^d)+\int_0^tf^1(s-,X^n)d(A_s^n(d)-A_s^d)
$$
$$
+\varphirac{1}{2}\int_0^tf^{1,1}(s, X)d\lambdaangle X^c\rangle_s
$$
\betaegingin{equation}\lambdaanglebel{fxnv}
+\sum_{s\lambdae t}\betaig(f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X^n_s\betaig).
\varepsilonnd{equation}
By continuity of $f, f^0$ and $f^1$
\betaegingin{equation}\lambdaanglebel{fxn1}
f(t,X^n)\tauo f(t,X),\;\;\;as\;\;n\tauo\infty,\;\;\; a.s.,
\varepsilonnd{equation}
\betaegingin{equation}\lambdaanglebel{fxn22}
\int_0^tf^0(s,X^n)ds\tauo\int_0^tf^0(s,X)ds\;\;\;as\;\;n\tauo\infty,\;\;\; a.s..
\varepsilonnd{equation}
by the dominated convergence theorem and
\betaegingin{equation}\lambdaanglebel{fxn4}
\int_0^tf^1(s-,X^n)dX_s\tauo\int_0^tf^1(s-,X)dX_s\;\;\;as\;\;n\tauo\infty,\;\;\; a.s..
\varepsilonnd{equation}
by the dominated convergence theorem for stochastic integrals (using the same arguments as in (\ref{fxn22a})- (\ref{fxn4a})).
By properties of compensated stochastic integrals
$$
\int_0^tf^1(s-,X^n)d(M^d_s(n)-M^d_s)=\int_0^tf^1(s-,X^n)I_{(0<|\Delta M_s|\lambdae\varphirac{1}{n})}{}_{\omegaverset{\betaullet}C}dM_s
$$
and using theorem 33, Ch. VIII from \cite{DM}
$$
E\betaig(\int_0^tf^1(s-,X^n)I_{(0<|\Delta M_s|\lambdae\varphirac{1}{n})}{}_{\omegaverset{\betaullet}C}dM_s\betaig)^2
$$
\betaegingin{equation}\lambdaanglebel{fx}
\lambdae const E\int_0^t(f^1(s-,X^n))^2I_{(0<|\Delta M_s|\lambdae\varphirac{1}{n})}d[M^d]_s\tauo 0\;\;\;as\;\;n\tauo\infty
\varepsilonnd{equation}
by dominated convergence theorem, since
$\sup_{n, s\lambdae t}(f^1(s,X^n))^2$ is locally bounded (by Lemma A1 from appendix), $I_{(0<|\Delta M_s|\lambdae\varphirac{1}{n})}\tauo 0$ and $E[M^d]_T<\infty$.
Similarly, $\int_0^tf^1(s-,X^n)d(A_s^n(d)-A_s^d)$ also tends to zero, since
\betaegingin{equation}\lambdaanglebel{fxan}
\int_0^tf^1(s-,X^n)d(A_s^n(d)-A_s^d)\lambdae \int_0^t|f^1(s-,X^n)|I_{(0<|\Delta A_s|\lambdae\varphirac{1}{n})}|dA_s|\tauo 0.
\varepsilonnd{equation}
From (\ref{jump})
$$
\Delta M^n_s(d)=\Delta M_sI_{(|\Delta M_s|\gammae 1/n)} - \betaig( \Delta MI_{(|\Delta M|\gammae 1/n)}\betaig)_s^p,
$$
where $Y^p$ is the usual projection of $Y$. Here we used the fact that the jump of the dual projection of $B^n$ is
the usual projection of the jump, i.e. $\Delta\widetilde{B^n_t}=(\Delta B^n)_t^p$. Therefore,
using condition (\ref{v}) we have that
$$
|(f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X^n_s|\lambdae const. (\Delta X^n_s)^2
$$
$$
= const. \betaig(\Delta A_sI_{(|\Delta A_s|\gammae 1/n)}+\Delta M_sI_{(|\Delta M_s|\gammae 1/n)} - ( \Delta MI_{(|\Delta M|\gammae 1/n)})_s^p\betaig)^2
$$
\betaegingin{equation}\lambdaanglebel{jump2}
\lambdae 3 const.\betaig( (\Delta A_s)^2+(\Delta M_s)^2+ E( (\Delta M_s)^2/F_{s-})\betaig).
\varepsilonnd{equation}
Since, it follows from (\ref{x}) and continuity of $f$ and $f^1$, that
$$
f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X^n_s\tauo f(s,X)-f(s-,X)-f^1(s-,X)\Delta X_s
$$
and
$$
\sum_{s\lambdae t}\betaig ( (\Delta A_s)^2+(\Delta M_s)^2+ E( (\Delta M_s)^2/F_{s-})\betaig ) < \infty,
$$
the dominated convergence theorem implies that
$$
\sum_{s\lambdae t}\betaig(f(s,X^n)-f(s-,X^n)-f^1(s-,X^n)\Delta X^n_s\betaig)
$$
\betaegingin{equation}\lambdaanglebel{jump3}
\tauo\sum_{s\lambdae t}\betaig(f(s,X)-f(s-,X)-f^1(s-,X)\Delta X_s\betaig), \;\;\;as\;\;\;n\tauo\infty.
\varepsilonnd{equation}
Therefore, passing to the limit in (\ref{fxnv}) it follows from (\ref{fxn1})-(\ref{jump3}) that (\ref{ito}) holds.\qed
Now we give one application of the change of variable formula (\ref{ito}) to the convergence of stochastic integrals.
If $g(t,x), t\gammae0, x\in R)$ is a function of two variables admitting continuous partial derivatives $\partial g(t,x)/\partial t$, $\partial g(t,x)/\partial x$
and $V^n$ is a sequence of processes of finite variations
converging to the Wiener process, then it was proved by Wong and Zakai \cite{WZ} that the sequence of ordinary integrals $\int_0^tg(s,V^n_s)dV^n_s$
converges to the Stratanovich stochastic integral. The following assertion generalizes this result for non-anticipative functionals $g(t,\omega)$.
{\betaf Corollary}. Assume that $f(t,\omega)$ is differentiable in the sense of Definition 1 and there is a continuous on $[0,T]\tauimes D([0,T])$ functional $F(t,\omega)$ such that
\betaegingin{equation}\lambdaanglebel{str0}
F(t,\omega)=\int_0^tf(s-,\omega)d\omega_s
\varepsilonnd{equation}
For all $\omega\in{\cal V}_{[0,T]}$.
Let $X$ be a cadlag semimartingale and let $(V^n,n\gammae1)$ be a sequence of processes of finite variation converging to $X$ uniformly on $[0, T]$.
Then
\betaegingin{equation}\lambdaanglebel{str}
\lambdaim_{n\tauo\infty}\int_0^tf(s-, V^n)dV^n_s= \int_0^tf(s-,X)dX_s
+\varphirac{1}{2}\int_0^tf^{1}(s, X)d\lambdaangle X^c\rangle_s.
\varepsilonnd{equation}
Proof: By continuity of $F$ and (\ref{str0})
\betaegingin{equation}\lambdaanglebel{str1}
\lambdaim_{n\tauo\infty}\int_0^tf(s-, V^n)dV^n_s=\lambdaim_{n\tauo\infty}F(t,V^n)=F(t,X).
\varepsilonnd{equation}
It is evident that
$$
F^1(t,\omega)=f(t,\omega),\;\; F^0(t,\omega)=0\;\;\tauext{and}\;\;\;F(t,\omega)-F(t-,\omega)-F^1(t-,\omega)\Delta\omega_t=0,
$$
Thus, $F$ is two times differentiable in the sense of definition 1 and condition (\ref{v}) is automatically satisfied.
Therefore, by the It\^o formula (\ref{ito})
$$
F(t,X)=\int_0^tf(s-,X)dX_s
+\varphirac{1}{2}\int_0^tf^{1}(s, X)d\lambdaangle X^c\rangle_s,
$$
which, together with (\ref{str1}) implies the convergence (\ref{str}).
\section{The relations between various definitions of functional derivatives}
Following Dupire \cite{Dupire} we define time and space derivatives, called also horizontal and vertical derivatives of the non-anticipative functionals.
{\betaf Definition 2}.
A non-anticipative functional $f(t,\omegamega)$ is said to be horizontally differentiable at $(t,\omegamega)\in\Lambdaambda$ if the limit
\betaegingin{equation}
\lambdaanglebel{hatpat} \partial_t f(t,\omegamega):=
\lambdaim_{h\tauo0, h>0} \varphirac
{1}{h} \betaigl[f (t+h,\omegamega^t )-f (t, \omegamega) \betaigr],\qquad t<T,
\varepsilonnd{equation}
exists. If $ \partial_t f(t,\omegamega)$ exists for all $(t,\omegamega)\in\Lambdaambda$, then the non-anticipating functional $\partial f_t$ is called the
horizontal derivative of $f$.
A non-anticipative functional $f(t,\omegamega)$ is vertically differentiable at $(t,\omegamega)\in\Lambdaambda$ if
\betaegingin{equation}
\lambdaanglebel{hatpax} \partial_{\omegamega} f(t,\omegamega):=
\lambdaim_{h\tauo0}\varphirac
{1}{h} \betaigl[ f(t,\omegamega+ h
1_{[t,T]}) - f(t,\omegamega) \betaigr],
\varepsilonnd{equation}
exists. If $f$ is vertically differentiable at all $(t,\omegamega)\in\Lambdaambda$ then the map $\partial _\omegamega f :\Lambda\tauo R$ defines a non-anticipative map,
called the vertical derivative of $f$.
Similarly one can define
\betaegingin{equation}
\partial_{\omegamega\omegamega}f:= \partial_{\omegamega
}(\partial f_{\omegamega}),\qquad.
\varepsilonnd{equation}
Define $C^{1,k}([0, T )\tauimes \Omegamega)$ as the set of functionals $f$, which
are
\betaegingin{itemize}
\item horizontally differentiable with $\partial_t f$ continuous at fixed times,
\item $k$ times vertically differentiable with continuous $\partial_{\omega}^k f$.
\varepsilonnd{itemize}
The following assertion follows from the generalized It\^o formula for cadlag semimartingales proved in \cite{CF1} (see also \cite{LScS}).
\betaegin{thr}
Let $f\in C^{1,1}([0,T]\tauimes \Omega)$. Then for all $(t,\omega)\in [0,T]\tauimes \cal V$
\betaeginaa
f(t,\omega)=f(0,\omega)+\int_0^t\partial_t f(s,\omega)ds+\int_0^t\partial_\omega f(s-,\omega)d\omega_s\\
+\sum_{s\lambdae t}(f(s,\omega)-f(s-,\omega)-\partial_{\omegamega}f(s-,\omega)\Delta\omega_s)
\varepsilonndaa
and $f(t,\omega)\in\cal V$ for all $\omega\in\cal V$.
\varepsilonnd{thr}
{\betaf Corollary}. If $f\in C^{1,1}([0,T]\tauimes \Omega)$, then $f$ is differentiable in the sense of Definition 1 and
$$
\partial_tf=f^0,\;\;\;\;\partial _\omegamega f= f^1.
$$
In order to compare Dupire's derivatives with Chitashvili's derivative (the derivative in the sense of Definition 1), we introduce another type of
vertical derivative where, unlike to Dupire's derivative $\partial_\omegamega f$, the path deformation of
continuous paths remain continuous.
{\betaf Definition 3}.
We say that a non-anticipative functional $f(t,\omegamega)$ is vertically differentiable and denote this differential by $D_\omegamega f(t,\omegamega)$, if
the limit
\betaegingin{equation}
D_\omegamega f(t,\omegamega):=\lambdaim_{h\tauo0, h>0}\varphirac{f(t+h,\omega^{t}+\chi_{t,h})-f(t+h,\omega^{t})}{h},
\varepsilonnd{equation}
exists for all $(t,\omega)\in [0,T]\tauimes \Omegamega$, where
$$
\chi_{t,h}(s)=(s-t)1_{(t,t+h]}(s)+h1_{(t+h,T]}(s).
$$
The second order derivative is defined similarly
$$
D_{\omega\omega}f=D_\omega(D_\omega f).
$$
Note that, if $f(t,\omega)=g(\omega_t)$ for any $\omega\in D[0,T]$, where $g=(g(x), x\in R)$ is a differentiable function, then
$D_\omega f(t,\omega)$ (so as $\partial _\omega f(t,\omega)$) coincides with $g'(\omega_t)$.
\betaegin{prop}\lambdaabel{11}
Let $f\in C([0,T]\tauimes \Omega)$ be differentiable in the sense of Definition 1, i.e., there exist $f^0, f^1\in C([0,T]\tauimes \Omega)$,
such that for all $(t,\omega)\in [0,T]\tauimes\cal V$
\betaegingin{equation}\lambdaanglebel{xv22}
f(t,\omega)=f(0,\omega)+\int_0^tf^0(s,\omega)ds+\int_0^tf^1(s-,\omega)d\omega_s + V(t,\omega),
\varepsilonnd{equation}
where
$$
V(t,\omega):=\sum_{s\lambdae t}\betaig[f(s,\omega)-f(s-,\omega)-f^1(s-,\omega)\Delta\omega_s\betaig]
$$
is of finite variation for all $\omega\in {\cal V}$.
Then for all $(t,\omega)\in [0,T]\tauimes D([0,T])$
\betaegingin{equation}
f^0(t,\omega)=\partial_t f(t,\omegamega)\;\;\;\;\tauext{and}\;\;\;\; f^1(t,\omega)=D_\omegamega f(t,\omegamega).
\varepsilonnd{equation}
\varepsilonnd{prop}
{\it Proof.} Since $\omega^t$ is constant on $[t,T]$ and $f(t,\omega^t)=f(t,\omega)$, if $s\lambdae t$, from (\ref{xv22}) we have that for any $ \omega\in{\cal V}$
\betaegingin{equation}\lambdaanglebel{xv23}
f(t+h,\omega^t)=f(0,\omega)+\int_0^tf^0(s,\omega)ds+\int_t^{t+h}f^0(s,\omega^t)ds+
\varepsilonnd{equation}
$$
+ \int_0^tf^1(s-,\omega)d\omega_s + V(t,\omega)
$$
and
\betaegingin{equation}\lambdaanglebel{xv24}
f(t+h,\omega^t+\chi_{t,h})=f(0,\omega)+\int_0^tf^0(s,\omega)ds+ \int_0^tf^1(s-,\omega)d\omega_s+
\varepsilonnd{equation}
$$
+\int_t^{t+h}f^0(s,\omega^t+\chi_{t,h})ds+ \int_t^{t+h}f^1(s,\omega^t+\chi_{t,h})ds+ V(t,\omega).
$$
Therefore
$$
\partial_t f(t,\omega)=\lambdaim_{h\tauo0}\varphirac{f(t+h,\omega^{t})-f(t,\omega)}{h}=
$$
$$
=\lambdaim_{h\tauo0}\varphirac{1}{h}\int_{t}^{t+h}f^0(s,\omega^{t})ds= f^0(t,\omega)
$$
by continuity of $f^0$.
It is evident that $\chi_{t,h}(s)\lambdae h$ and
$$\varphirac{\chi_{t,h}(s)-\chi_{t,0}(s)}{h}=\varphirac{\chi_{t,h}(s)}{h}\tauo 1_{[t,T]}(s)\;as\;h\tauo0+,\;\varphiorall s\in [0,T].$$
Trerefore, relations (\ref{xv24})-(\ref{xv23}) and continuity of $f^1$ and $f^0$ imply that
\betaeginaa
D_\omega f(t,\omega)=\lambdaim_{h\tauo0}\varphirac{f(t+h,\omega^{t}+\chi_{t,h})-f(t+h,\omega^{t})}{h}=\\
=\lambdaim_{h\tauo0}\varphirac{1}{h}\int_{t}^{t+h}\betaig(f^0(s,\omega^{t}+\chi_{t,h})-f^0(s,\omega^{t})\betaig)ds\\
+\lambdaim_{h\tauo0}\varphirac{1}{h}\int_{t}^{t+h}f^1(s,\omega^{t}+\chi_{t,h})ds= f^1(t,\omega)
\varepsilonndaa
for any $\omega\in\cal V([0,T])$ and by continuity of $f^1$ this equality is true for all $\omega\in D([0,T])$.
{\betaf Remark.}
If $f\in C([0,T]\tauimes \Omega)$ is two times differentiable in the sense of Definition 1, then similarly one can show that
$$
f^{1,1}(t,\omega)=D_{\omega\omega}f(s,\omega).
$$
{\betaf Corrolary 1.} Let $f\in C^{1,1}([0,T]\tauimes \Omega)$. Then for all $(t,\omega)\in \Lambdaambda$
\betaeginaa
\partial_\omega f(t,\omega)=f^1(t,\omega)=D_\omega f(t,\omega).
\varepsilonndaa
In general $ \partial_\omega f(t,\omega)$ and $D_\omega f(t,\omega)$ are not equal.
{\betaf Counterexample 1}. Let $g=(g(x),x\in r)$ be a bounded differentiable function and let $f(t,\omega)=g(\omega_t)-g(\omega_{t-})$. Then $\partial_\omega f(t,\omega)=g'(\omega_t)$
and
\betaeginaa
D_\omega f(t,\omega)=\lambdaim_{h\tauo 0+}\varphirac{f(t+h,\omega^{t}+\chi_{t,h})-f(t+h,\omega^{t})}{h}=0,\;
\rm{since}
\\varphi(t+h,\omega^{t}+\chi_{t,h})-f(t+h,\omega^{t})=g(\omega_t+h)-g(\omega_t+h)-g(\omega_t)+g(\omega_t)=0.
\varepsilonndaa
It is evident that $f\betaar\in C^{1,1}(\Lambda)$, since $f\betaar\in C(\Lambda)$ and
$\partial_t f=\infty.$
The following assertion shows that if $f$ belongs to the class $C^{1,2}(\Lambdaambda)$ of non-anticipative functionals, then $\partial f_\omega (t,\omega)$ and $\partial f_{\omega\omega} (t,\omega)$
are uniquelly determined by the restriction of $f$ to continuous paths. This assertion is proved by Cont and Fournie \cite{CF} (see also \cite{BCC}) in a complicated way.
We give a simple proof based on Proposition 1.
{\betaf Corrolary 2.} Let $f^1$ and $f^2$ belong to $\in C^{1,2}(\Lambdaambda)$ in the Dupire sense and
\betaegingin{equation}\lambdaanglebel{f1f2}
f^1(t,\omega)=f^2(t,\omega)\;\;\;\;\tauext{for all}\;\;\;(t,\omega)\in [0,T]\tauimes C([0,T]).
\varepsilonnd{equation}
Then
\betaegingin{equation}\lambdaanglebel{f12}
\partial_\omega f^1(t,\omega)=\partial_\omega f^2(t,\omega),\;\;\;\partial_{\omega\omega} f^1(t,\omega)=\partial_{\omega\omega}f^2(t,\omega)
\varepsilonnd{equation}
for all $(\omega,t)\in [0,T]\tauimes C([0,T])$.
{\it Proof}. By Theorem 2
\betaegingin{equation}\lambdaanglebel{bv}
f^i(t,\omega)=f^i(0,\omega)+\int_0^t\partial_t f^i(s,\omega)ds+\int_0^t\partial_\omega f^i(s,\omega)d\omega_s\;\;\;i=1,2,
\varepsilonnd{equation}
for all $\omega\in C([0,T])\cap{\cal V}([0,T])$.
It follows from Proposition 1 that
$$
\partial_\omega f^i(t,\omega)=D_\omega f^i(t,\omega);\;\;\; i=1,2.
$$
Since $\omega^{t}+\chi_{t,h}\in C([0,T])$ if $\omega\in C([0,T])$, by definition of $D_\omega$ and equality (\ref{f1f2}) we have
\betaegingin{equation}\lambdaanglebel{df12}
D_\omega f^1(t,\omega)=D_\omega f^2(t,\omega)\;\;\;\;\tauext{for all}\;\;\;(t,\omega)\in [0,T]\tauimes C([0,T])),
\varepsilonnd{equation}
which implies that
\betaegingin{equation}\lambdaanglebel{f12}
\partial_\omega f^1(t,\omega)=\partial_\omega f^2(t,\omega),\;\;\;\;\tauext{for all}\;\;\;(t,\omega)\in [0,T]\tauimes C([0,T]),
\varepsilonnd{equation}
It is evident that $\partial_t f^1(t,\omega)=\partial_t f^2(t,\omega)$ for all $(\omega,t)\in [0,T]\tauimes C([0,T])$. Therefore, comparing the It\^o formulas (\ref{itoc}) for
$f^1(t,\omega)$ and $f^2(t,\omega)$ we obtain that
$$
\int_t^u\partial_{\omega\omega}f^1(s,\omega)d\lambdaangle\omega\rangle_s=\int_t^u\partial_{\omega\omega}f^2(s,\omega)d\lambdaangle\omega\rangle_s
$$
for any continuous semimartinale $\omega$. Dividing both parts of this equality by $\lambdaangle\omega\rangle_u-\lambdaangle\omega\rangle_t$ and passing to the limit as $u\tauo t$, we obtain that
$\partial_{\omega\omega} f^1(t,\omega)=\partial_{\omega\omega}f^2(t,\omega)$ for any continuous semimartingale and by continuity of $\partial_{\omega\omega} f^1(t,\omega)$ and $\partial_{\omega\omega}f^2(t,\omega)$ this equality
will be true for all $\omega\in C([0,T])$.
\betaegin{prop}\lambdaabel{33}
Let $f\in C([0,T]\tauimes \Omega)$ be differentiable in the sense of Definition 1 and
\betaegingin{equation}\lambdaanglebel{xv25}
\lambdaeft|f(t,\omega)-f(t-,\omega)-\Delta\omega_tf^1(t-,\omega)\right|\lambdae K|\Delta\omega_t|^2
\varepsilonnd{equation}
for some $K>0$.
Then
$$
f^0(t,\omega)=\partial_t f(t,\omega), \;\;\;\;\varphiorall(t,\omega)\in \Lambda,
$$
$$
f^1(t,\omega)=\partial_\omega f(t,\omega),\;\;\;\;\varphiorall \omega\in C[0,T]
$$
(or for all $\omegamega$ continuous at $t$).
\varepsilonnd{prop}
{\it Proof.} For $\omega\in D[0,T]$ let
$\tauilde\omega=\omega_s$ if $s<t$ and $\tauilde\omega=\omega_{s-}+h$, if $s\gammae t$, i.e. $\tauilde\omega=\omega^{t-}+h1_{[t,T]}$, hence $\Delta\tauilde\omega_s=h$.
Therefore, using condition (\ref{xv25}) for $\tauilde\omega$ we have
\betaeginaa
\lambdaeft|\varphirac{f(t,\omega^{t-}+h1_{[t,T]})-f(t-,\omega)}{h}-f^1(t-,\omega)\right|\lambdae K|h|,\;\varphiorall h.
\varepsilonndaa
It follows from here that
$$
\lambdaim_{h\tauo0}\varphirac{f(t,\omega^{t-}+h1_{[t,T]})-f(t-,\omega)}{h}=f^1(t-,\omega),
$$
which implies that
$f^1(t,\omega)=\partial_\omega f(t,\omega)$ if $\omega$ is continuous at $t$.
Equality $f^1(t,\omega)=\partial_tf(t,\omega), \varphiorall(t,\omega)\in \Lambda$ is proved in Proposition 1.\qed
Now we introduce definition of space derivatives which can be calculated pathwise along the differentiable paths
and using such derivatives in Theorem 3 below a change of variables formula for functions of finite variations is proved, which
gives sufficient conditions for the existence of derivatives in the Chitashvili sense.
{\betaf Definition 4}.
We say that a non-anticipative functional $f(t,\omegamega)$ is differentiable, if the limits $f_t\;f_\omega\in C(\Lambda)$ exist, where
\betaeginaa
f_t(t,\omega)=\lambdaim_{h\tauo0, h>0}\varphirac{f(t+h,\omega^{t})-f(t,\omega)}{h}, \;\;\;\; \varphiorall(t,\omega)\in [0,T]\tauimes D[0,T]\\
f_{\omega}(t,\omega)=\lambdaim_{h\tauo0, h>0}\varphirac{f(t+h,\omega)-f(t+h,\omega^{t})}{\omega_{t+h}-\omega_t},\;\;\;\;\varphiorall(t,\omega)\in [0,T]\tauimes C^1[0,T].\\
\varepsilonndaa
\betaegin{prop}\lambdaabel{22}
Let $f$ be differentiable in the sense of definition 4.
Then $\varphiorall(t,\omega)\in [0,T]\tauimes C^1[0,T]$
\betaeginq\lambdaabel{itt}
f(t,\omega)-f(0,\omega)=\int_0^tf_t(s,\omega)ds+\int_0^tf_\omega(s,\omega)d\omega_s.
\varepsilonndq
\varepsilonnd{prop}
{\it Proof}. We have
\betaeginaa
\lambdaim_{h\tauo0, h>0}\varphirac{f(t+h,\omega)-f(t,\omega)}{h}\\
=
\lambdaim_{h\tauo0+}\varphirac{f(t+h,\omega)-f(t+h,\omega^{t})}{{\omega_{t+h}-\omega_t}}\tauimes \varphirac{{\omega_{t+h}-\omega_t}}{h}
\\
+\lambdaim_{h\tauo0+}\varphirac{f(t+h,\omega^{t})-f(t,\omega)}{h}
=\omega'(t)f_\omega(t,\omega)+f_t(t,\omega),\\
\varphiorall(t,\omega)\in [0,T]\tauimes C^1[0,T].
\varepsilonndaa
Hence right derivative of
\betaeginaa
f(t,\omega)-f(0,\omega)-\int_0^tf_t(s,\omega)ds-\int_0^tf_\omega(s,\omega)\omega'_sds
\varepsilonndaa
is zero for each $\omega\in C^1$. By the Lemma A2 of appendix formula (\ref{itt}) is satisfied.
\betaegin{thr}
Let $f\in C(\Lambda)$ and $f_t,f_\omega\in C(\Lambda)$ are derivatives in the sense of definition 4.
Assume also that for any $\omega\in\cal V$
$$
\sum_{s\lambdae t}|f(s,\omega)-f(s-,\omega)|<\infty.
$$
Then
\betaeginaa
f(t,\omega)=f(0,\omega)+\int_0^tf_t(s,\omega)ds+\int_0^tf_\omega(s,\omega)d\omega_s^c\\
+\sum_{s\lambdae t}(f(s,\omega)-f(s-,\omega)).
\varepsilonndaa
\varepsilonnd{thr}
{\it Proof}.
For $\omega\in V$ we have $\omega=\omega^c+\omega^d,\;\omega^d=\sum_{s\lambdae t}\Delta\omega_s,\;\omega^c\in C$. Set
$$\omega^{d,n}=\sum_{s\lambdae t,|\Delta\omega_s|>\varphirac1n}\Delta\omega_s,\;\omega^n=\omega^c+\omega^{d,n}.$$
It is evident that as $n\tauo 0$
$$
|\omega^n-\omega|_T=|\omega^d-\omega^{d,n}|_T=\muax_t|\int_0^t1_{(|\Delta\omega_s|\lambdae\varphirac1n)}d\omega_s^d\lambdae\int_0^T1_{(|\Delta\omega_s|\lambdae\varphirac1n)}dvar_s(\omega^d)\tauo 0.
$$
We know that discontinuity points of $f$ are also discontinuity points of $\omega$.
Let $\{t_1<...<t_k\}=\{s:|\Delta\omega_s|>\varphirac1n\}\cup\{0,T\}$. Denote by $\omega^{\varepsilon}\in C'$ a differentiable approximation of $\omega^c$, such that $var_T(\omega^\varepsilon-\omega^c)<\varepsilon$ and
let $\omega^{n,\varepsilon}=\omega^\varepsilon+\omega^{d,n}$.
Then by Proposition \ref{22}
\betaeginaa
f(t,\omega^{n,\varepsilon})-f(t_i,\omega^{n,\varepsilon})-\int_{t_i}^tf_t(s,\omega^{n,\varepsilon})ds-\int_{t_i}^tf_\omega^{n,\varepsilon}(s,\omega^{n,\varepsilon})\omega^{'\varepsilon}_sds=0,\;t\in[t_i,t_{i+1})
\varepsilonndaa
and
\betaeginaa
f(T,\omega^{n,\varepsilon})-f(0,\omega^{n,\varepsilon})=\sum_{i\gammae 1} \betaig(f(t_{i},\omega^{n,\varepsilon})-f(t_{i-1},\omega^{n,\varepsilon})\betaig)\\
=\sum \betaig (f(t_{i}-,\omega^{n,\varepsilon})-f(t_{i-1},\omega^{n,\varepsilon})\betaig )+\sum \betaig (f(t_i,\omega^{n,\varepsilon})-f(t_i-,\omega^{n,\varepsilon})\betaig )\\
=\sum\int_{t_{i-1}}^{t_i}f_t(s,\omega^{n,\varepsilon})ds+\sum\int_{t_{i-1}}^{t_i}f_\omega(s,\omega^{n,\varepsilon})\omega^{'\varepsilon}_sds+\sum \betaig (f(t_i,\omega^{n,\varepsilon})-f(t_i-,\omega^{n,\varepsilon})\betaig )\\
=\sum\int_{t_{i-1}}^{t_i}f_t(s,\omega^{n,\varepsilon})ds+\sum\int_{t_{i-1}}^{t_i}f_\omega(s,\omega^{n,\varepsilon})d\omega_s^{n,\varepsilon}\\
-\sum f_\omega(t_i-,\omega^{n,\varepsilon})\Delta\omega^{n,\varepsilon}_{t_i}+\sum \betaig (f(t_i,\omega^{n,\varepsilon})-f(t_i-,\omega^{n,\varepsilon})\betaig )\\
=\int_0^Tf_s(s,\omega^{n,\varepsilon})ds+\int_0^Tf_\omega(s,\omega^{n,\varepsilon})d\omega^{n,\varepsilon}_s\\
+\sum\betaig ( f(t_i,\omega^{n,\varepsilon})-f(t_i-,\omega^{n,\varepsilon})-f_\omega(t_i-,\omega^{n,\varepsilon})\Delta\omega^{n,\varepsilon}_{t_i}\betaig )\\
=\int_0^Tf_t(s,\omega^{n,\varepsilon})ds+\int_0^Tf_\omega(s,\omega^{n,\varepsilon})d\omega_s^\varepsilon+\sum\betaig ( f(t_i,\omega^{n,\varepsilon})-f(t_i-,\omega^{n,\varepsilon})\betaig ).
\varepsilonndaa
Since $f(t,\omega^{n,\varepsilon})$ admits finite number of jumps and $\sup_\varepsilon var_T\omega^\varepsilon<\infty$, passing to the limit as $\varepsilon\tauo0$ we get
\betaeginaa
f(T,\omega^{n})-f(0,\omega^{n})\\
=\int_0^Tf_t(s,\omega^{n})ds+\int_0^Tf_\omega(s,\omega^{n})d\omega_s^c+\sum\betaig ( f(t_i,\omega^n)-f(t_i-,\omega^n)\betaig ).
\varepsilonndaa
By the continuity of functionals
$f,\;f_t,\;f_\omega$ and Lemma A1 from the appendix
$$f(t,\omega^n)\tauo f(t,\omega),\;\int_0^tf_t(s,\omega^n)ds\tauo \int_0^tf_t(s,\omega)ds,
$$
$$\int_0^tf_t(s,\omega^n)d\omega_s^c\tauo \int_0^tf_t(s,\omega)d\omega_s^c,\;as\;n\tauo\infty.$$
It remains to show convergence of the sum. Since\\
$f^d(t,\omega)=\sum_{s\lambdae t}f(s,\omega)-f(s-,\omega)$ is of finite variation
\betaeginaa
f^d(t,\omega)=\sum \betaig ( f(t_i,\omega^{n})-f(t_i-,\omega^{n})\betaig )-\sum \betaig (f(t_i,\omega)-f(t_i-,\omega)\betaig )\\
=\sum_{s\lambdae t} (f(s,\omega)-f(s-,\omega))1_{(|\Delta\omega_s|\lambdae\varphirac1n)}\\
=\int_0^t1_{(|\Delta\omega_s|\lambdae\varphirac1n)}df^d(s,\omega)\tauo 0,\; as\;n\tauo\infty,
\varepsilonndaa
by the dominated convergence theorem.
{\betaf Corollary.} If $f$ satisfies conditions of Theorem 3 then $f$ is differentiable in the sense of Definition 1.
\section{Appendix}
The following lemma is a modification of lemma 6 of \cite{LScS}.
{\betaf Lemma A1}. Let $X_n,X\in \Omega$ be a sequence of paths, such that $||X_n-X||_T\tauo 0$ as $n\tauo\infty$. Let $f\in C(\Lambda)$.
Then
$$\sup_{t\lambdae T}|f(t,X_n)-f(t,X)|\omegaverset{n\tauo\infty}\tauo 0.$$
{\it Proof}.
If not then ${\cal E}ists \varepsilon > 0$, a sequence of integers $n_k, k=1,...$, and a sequence $s_k\in [0, T ]$ such that
\betaeginq\lambdaabel{uni}
|f(s_k, X_{n_k})-f(s_k, X)|
\gammae \varepsilon
\varepsilonndq
By moving to a subsequence we can assume without loss of generality that either $s_k \tauo s^*,\;s_k\gammae s^*$ or $s_k \tauo s^*,\;s_k< s^*$ for some $s^*\in[0, T]$.
In the first case by continuity assumption we get
\betaeginaa
|f(s_k, X_{n_k})-f(s_k, X)|\lambdae |f(s_k, X_{n_k})-f(s^*, X)|\\+|f(s_k, X)-f(s^*, X)|
\tauo 0,
\varepsilonndaa
since $d_\infty((s_k,X_{n_k}),(s^*, X))\tauo 0,\;d_\infty((s_k,X),(s^*, X))\tauo 0$ .
In the second case we have
\betaeginaa
|f(s_k, X_{n_k})-f(s_k, X)|
\lambdae |f(s_k, X_{n_k})-f(s^*, X^{s^*-})|\\
+|f(s_k, X)-f(s^*, X^{s^*-})|\tauo 0,
\varepsilonndaa
since $d_\infty((s_k, X_{n_k}),(s^*, X^{s^*-}))\tauo 0,\;d_\infty((s_k, X),(s^*, X^{s^*-}))\tauo 0$ .
This contradicts (\ref{uni}).
We shall need also the following assertion
{\betaf Lemma A2}. Let $f$ be a real-valued, continuous function, defined on an arbitrary interval $I$ of the real line. If $f$ is right (or left) differentiable
at every point $a \in I$, which is not the supremum (infimum) of the interval, and if this right (left) derivative is always zero, then $f$ is a constant.
{\it Proof}.
For a proof by contradiction, assume there exist $a < b$ in $I$ such that $f(a) \nueq f(b)$. Then
\betaeginaa
\varepsilon :={\varphirac {|f(b)-f(a)|}{2(b-a)}}>0.
\varepsilonndaa
Define $c$ as the infimum of all those $x$ in the interval $(a,b]$ for which the difference quotient of $f$ exceeds $\varepsilon$ in absolute value, i.e.
\betaeginaa
c=\inf\{\,x\in (a,b]\muid |f(x)-f(a)|>\varepsilon (x-a)\,\}.
\varepsilonndaa
Due to the continuity of $f$, it follows that $c < b$ and $|f(c)-–f(a)|=\varepsilon(c–-a)$. At $c$ the right derivative of $f$ is zero by assumption, hence there exists $d$ in the interval $(c,b]$
with $|f(x)–-f(c)|\lambdae\varepsilon(x–-c)$ for all $x \in (c,d]$. Hence, by the triangle inequality,
\betaeginaa
|f(x)-f(a)|\lambdaeq |f(x)-f(c)|+|f(c)-f(a)|\lambdaeq \varepsilon (x-a)
\varepsilonndaa
for all $x$ in $[c,d)$, which contradicts the definition of $c$.
\betaegingin{thebibliography}{50}
\betaibitem{ahn} H. Ahn, Semimartingale integral representation, Ann. Probab., 25 (1997), pp. 997-–1010.
\betaibitem{BCC}
V. Bally, L. Caramellino and R. Cont, Stochastic integration by parts and functional Ito calculus, Advanced Courses in Mathematics (CRM Barcelona),
Birkhauser/Springer, 2016.
\betaibitem{Ch} R. Chitashvili, Martingale ideology in the theory of controlled
stochastic processes. {\varepsilonm Probability theory and mathematical statistics.
Proc. $4$th USSR-Jap. Symp., Tbilisi,} 1982, {\varepsilonm Lecture Notes in Math.}
1021, 73--92, {\varepsilonm Springer, Berlin etc.,} 1983.
\betaibitem{CF}
R. Cont and D.A. Fourni\'{ e}, Functional Kolmogorov equations, Working Paper, 2010.
\betaibitem{CF1} R. Cont and D.-A. Fourni\'{e}, Change of variables formulas for non-anticipative
functionals on path space, J. Funct. Anal., 259(4), (2010), pp. 1043-1072.
\betaibitem{CF2} R. Cont and D.-A. Fourni\'{e}, Functional Ito calculus and stochastic
integral representation of martingales, Annals of Probability, 41 (2013),
pp. 109–-133.
\betaibitem{DM} C. Dellacherie, and P.A. Meyer, (1980). Probabilit\'{e}s et potentiel II.
Hermann, Paris, 1980
\betaibitem{Dupire} B. Dupire, (2009) {\it Functional It\^{o} calculus}, papers.ssrn.com.
\betaibitem{ito} K. Ito, On a formula concerning stochastic differentials//Nagoya
Math. J. (1951), v. 3.- p. 55-65.
\betaibitem{J} J. Jacod, (1979). Calcul stochastique et probl\'{e}mes de
martingales.{\it Lect. Notes in Math.} 714, Springer, Berlin, Heidelberg,
New York.
\betaibitem{JMP} A. Jakubowski, J. Memin, and G. Pages, (1989). Convergence en loi des sites
d'integrale stochastiques sur l'espace $D^1$ de Skorokhod.
{\it Probab. Th. Rel.Fields}, V. 81, p.117--137.
\betaibitem{LScS} S. Levental, M. Schroder and S. Sinha, A simple proof of functional Itô’s lemma for semimartingales
with an application, Statistics and Probability Letters, 83 (2013), 2019-–2026.
\betaibitem{O} H. Oberhauser, The functional Ito formula under the family of continuous semimartingale measures,
{\it Stochastics and Dynamics }Vol. 16, No. 04, (2016), 1650010.
\betaibitem{RTZ} Z. Ren, N. Touzi and J. Zhang, An overview of viscosity solutions of path-dependent PDEs, {\it Stochastic analysis and applications}, (2014), pp. 397-453
\betaibitem{T2} R. Tevzadze, Markov dilation of Diffusion Type Processes
and its Applications to the Financial Mathematics, Georgian Math.
Journal, vol.6, No 4, (1999), 363-378.
\betaibitem{WZ} E. Wong and M.Zakai, On the convergence of ordinary integrals to stochastic integrals,
The Annals of Mathematical Statistics
Vol. 36, No. 5 (1965), pp. 1560-1564
\varepsilonnd{thebibliography}
\varepsilonnd{document} |
\begin{document}
\title{Equality cases in Viterbo's conjecture and isoperimetric billiard inequalities}
\author[A.~Balitskiy]{Alexey~Balitskiy}
\email{balitski@mit.edu}
\thanks{The author is supported by the Russian Foundation for Basic Research Grant 18-01-00036.}
\address{Dept. of Mathematics, Massachusetts Institute of Technology, 182 Memorial Dr., Cambridge, MA 02142}
\address{Dept. of Mathematics, Moscow Institute of Physics and Technology, Institutskiy per. 9, Dolgoprudny, Russia 141700}
\address{Institute for Information Transmission Problems RAS, Bolshoy Karetny per. 19, Moscow, Russia 127994}
\keywords{Billiards, Minkowski norm, Viterbo's conjecture, Permutohedron.}
\begin{abstract}
We apply the billiard technique to deduce some results on Viterbo's conjectured inequality between the volume of a convex body and its symplectic capacity. We show that the product of a permutohedron and a simplex (properly related to each other) delivers equality in Viterbo's conjecture. Using this result as well as previously known equality cases, we prove some special cases of Viterbo's conjecture and interpret them as isoperimetric-like inequalities for billiard trajectories.
\end{abstract}
\maketitle
\section{Introduction}
Claude Viterbo~\cite{viterbo2000metric} conjectured an isoperimetric inequality for any (normalized) symplectic capacity and the volume of a convex body $X\subset \mathbb R^{2n}$:
\begin{equation}
\label{eq:viterbo}
\volu (X) \ge \frac{c(X)^n}{n!}.
\end{equation}
The minimum is supposed to be attained (perhaps, not uniquely) on the symplectic images of the Euclidean ball. This inequality is proven up to a constant factor to the power of $n$ in~\cite{artstein2008m}. We investigate the case when $c$ is the Hofer--Zehnder symplectic capacity $c_{HZ}$.
The computation of symplectic capacities is considered difficult, so we restrict to the case of convex bodies that are Lagrangian products of a component in $V = \mathbb R^n$ with $q$-coordinates and a component in $V^* = \mathbb R^n$ with $p$-coordinates. For such bodies there is a simple geometric interpretation, established in~\cite{artstein2014bounds}, of the Hofer--Zehnder capacity. Namely,
$$
c_{HZ}(K \times T) = \xi_T(K),
$$
where $\xi_T(K)$ denotes the length of the shortest closed billiard trajectory in a convex body $K \subset V$ with geometry of lengths determined by another convex body $T \subset V^*$ by the formula
$$
\|q\|_T = \max_{p\in T} \langle p, q \rangle, \quad q \in V.
$$
Here we assume that $T$ contains the origin in the interior, but we do not assume that $T$ is symmetric, so our ``norm'' function $\|\cdot\|_T$ is not symmetric in general. The shortest closed billiard trajectory will be understood in the sense of K. Bezdek and D. Bezdek~\cite{bezdek2009shortest} as the closed polygonal line of minimal $\|\cdot\|_T$-length not fitting into a translate of the interior of $K$. In Section~\ref{sec:billiard} we introduce this billiard technique in detail. If we take $K$ to be symmetric with respect to the origin, and if we set $T = K^\circ = \{p \in V^*: \langle q, p \rangle \le 1 ~\forall q\in K\}$ to be the polar body of $K$, then inequality~(\ref{eq:viterbo}) becomes
\begin{equation}
\label{eq:mahler}
\volu (K \times K^\circ) \ge \frac{c_{HZ}(K\times K^\circ)^n}{n!},
\end{equation}
which is equivalent (as proven in~\cite{artstein2014from}) to the longstanding Mahler's conjecture in convex geometry. This is one of the reasons why inequality~(\ref{eq:viterbo}) might be considered difficult and interesting.
Our first result concerns certain convex bodies for which the equality in~(\ref{eq:viterbo}) holds and for which we do not know whether they are symplectic balls or not. One known (since~\cite{artstein2014from}) family of such examples consists of bodies $X = H \times H^\circ$, where $H$ is a Hanner (or Hanner--Hansen--Lima) polytope. The Hanner bodies are famous for being minimizers in Mahler's conjectured inequality: they deliver equality in~(\ref{eq:mahler}).
We introduce another family of examples. In Section~\ref{sec:equality}, we prove that
$$
\volu (P_n \times \triangle_n^\circ) = \frac{c_{HZ}(P_n \times \triangle_n^\circ)^n}{n!}.
$$
Here $\triangle_n$ denotes a regular $n$-dimensional simplex centered at the origin; $\triangle_n^\circ$ is its polar simplex; $P_n$ denotes a certain $n$-dimensional permutohedron, which can be thought of as the Minkowski sum of all the edges of $\triangle_n$. For example, permutohedron $P_2$ is a regular hexagon; permutohedron $P_3$ is a truncated regular octahedron whose hexagonal facets are regular hexagons (Figure~\ref{pic:permutohedron}). We discuss multiple definitions of permutohedron, and their relevant properties, in Section~\ref{sec:permutohedron}.
\begin{figure}\label{pic:permutohedron}
\end{figure}
\footnotetext{The figure by w:en:User:Cyp@wikimedia, distributed under CC BY-SA 3.0 license.}
The rest of results in this paper are some very special cases of Viterbo's conjecture. In Section~\ref{sec:inequality}, we prove that Viterbo's conjecture (for $c_{HZ}$) holds for the bodies of the form $K \times \text{(a simplex $\triangle_n$)}$ or $K \times \text{(a parallelotope $\square_n$)}$, where $K \subset \mathbb{R}^n$ is any convex body, and the products are Lagrangian:
$$
\volu (K \times \triangle_n) \ge \frac{c_{HZ}(K \times \triangle_n)^n}{n!},
$$
$$
\volu (K \times \square_n) \ge \frac{c_{HZ}(K \times \square_n)^n}{n!}.
$$
The latter can be interpreted as a sharp isoperimetric-like inequality for billiard trajectories in the $\ell_1$-norm:
$$
\xi_{\square_n}(K) \le \left(2^n n! \volu (K)\right)^{1/n}.
$$
It seems that in other norms (corresponding to a fixed $T$, possibly non-symmetric) there are no known sharp inequalities bounding $\xi_T(K)$ in terms of $\volu K$. For example, in the Euclidean case ($T = B^n$ is the unit Euclidean $n$-ball) the inequality
$$
\xi_{B^n}(K) \le c_n \volu (K) ^{1/n}
$$
holds for any convex body $K \subset \mathbb{R}^n$, possibly non-symmetric. This inequality follows from the results of~\cite{artstein2008m} with factor $c_n = c \sqrt{n}$, for some absolute constant $c$.
But the optimal value of $c_n$, as well as the corresponding ``critical'' body $K$, seems to be unknown even for $n=2$.
\textbf{Acknowledgments.}
The author thanks Roman~Karasev for his constant attention to this work. The author also thanks Felix Schlenk for his helpful comments.
\section{Billiards in a Minkowski norm}
\label{sec:billiard}
Billiard dynamics in Minkowski (more generally, in Finsler) geometry corresponds physically to the propagation of light in a homogeneous anisotropic (respectively, in an inhomogeneous anisotropic) medium, with reflections in the boundary according to the Huygens principle (see~\cite{gutkin2002billiards}). We explain the relevant definitions in this section, modifying them for the non-smooth non-symmetric case.
We work in a pair of $n$-dimensional real vector space $V = \mathbb{R}^n$ and $V^* = \mathbb{R}^n$ with a canonical perfect pairing $\langle \cdot, \cdot \rangle$. If we identify $V$ with $V^*$ by sending a basis of $V$ to the dual basis of $V^*$, the pairing $\langle \cdot, \cdot \rangle$ becomes an inner product in $V$. The Euclidean norm $|v| = \sqrt{\langle v, v \rangle}$ will be denoted throughout the paper by the single bars. The double bars will denote a Minkowski norm (or a gauge function), as introduced below.
A \emph{convex body} $K \subset V = \mathbb{R}^n$ is a compact convex set with non-empty interior. The \emph{polar body} to a body $K \subset V$ is defined as $K^\circ = \{p \in V^*: \langle q, p \rangle \le 1 ~\forall q\in K\}$.
Let $V$ be endowed with the ``norm'' with unit ball $T^\circ = \{q \in V: \langle q, p \rangle \le 1 ~\forall p\in T\}$. We follow the notation of~\cite{akopyan2016elementary} and denote this norm by $\|\cdot\|_T$.
By definition, $\|q\|_T = \max\limits_{p \in T} \langle p, q \rangle$, where $\langle \cdot, \cdot \rangle : V^* \times V \rightarrow \mathbb{R}$ is the canonical bilinear form of the duality between $V$ and $V^*$. Here we assume that $T$ contains the origin but is not necessarily centrally symmetric. Therefore, our norms might not be symmetric; in general, $\|q\|_T \neq \|-q\|_T$. (Sometimes, such ``norms'' are called \emph{gauges}.)
The \emph{momentum} $p \in \partial T \subset V^*$ of the trajectory fragment $q \to q'$ is defined as a linear functional reaching its maximum at $q' - q$. If $T$ is not strictly convex, then there is an ambiguity in the definition of $p$.
The cone $N_K(q)$ of outer normals is defined as
$$
N_K(q) = \{n \in V^*: \langle n, q' - q\rangle \le 0 \ \forall q' \in K\}.
$$
The \emph{generalized reflection law} is the following relation:
\begin{equation}
\label{eq:reflection}
p' - p \in -N_K(q),
\end{equation}
where $p$ and $p'$ stand for the momenta of the billiard trajectory before and after the reflection at the point $q$.
\begin{definition}
\label{def:billiard}
A closed polygonal line $q_0 \to q_1 \to \ldots \to q_m = q_0$, with $q_i \in \partial K$, $q_i \neq q_{i+1}$, will be called a (generalized) \emph{closed billiard trajectory} in the configuration $K\times T$ if one can choose momenta $p_i \in \partial T$ for $q_{i} \to q_{i+1}$, $0 \le i < m$, so that the generalized reflection law (\ref{eq:reflection}) holds for each bounce $q_i$.
\end{definition}
By a \emph{classical billiard trajectory} we mean a trajectory that meets only smooth points of $\partial K$ and smooth points of $\partial T$. Additionally, in what follows we do not allow a classical trajectory to pass the same path multiple times.
We set
$$
\mathcal P_m(K) = \{ (q_1,\ldots, q_m) : \{q_1,\ldots, q_m\} \ \text{doesn't fit into}\ (\inte K + t)\ \text{with} \ t\in V \} =
$$
$$
= \{(q_1,\ldots, q_m) : \{q_1,\ldots, q_m\} \ \text{doesn't fit into}\ (\alpha K+t)\ \text{with}\ \alpha\in (0,1),\ t\in V \}
$$
and
$$
\xi_T(K) = \min_{Q \in \mathcal Q_T(K)} \ell_T(Q),
$$
where $Q = (q_1,\ldots, q_m),\ m \ge 2,$ ranges over the set $\mathcal Q_T(K)$ of all closed generalized billiard trajectories in $K$ with geometry defined by $T$. (Here we denote the length $\ell_T (q_1,\ldots, q_m) = \sum_{i=1}^m \|q_{i+1} - q_i\|_T$.)
It might not be clear if the minimum in the definition of $\xi_T(K)$ is attained, but it is true, and we can say even more:
\begin{theorem}
\label{thm:bezdeks}
For any convex bodies $K \subset V$, $T \subset V^*$ ($T$ is smooth) containing the origins of $V$ and $V^*$ in their interiors, the following holds:
$$
\xi_T(K) = \min_{m\ge 2} \min_{Q\in \mathcal P_m(K)} \ell_T(Q) = \min_{2\le m\le n+1} \min_{Q\in \mathcal P_m(K)} \ell_T(Q).
$$
\end{theorem}
\begin{remark}
\label{rem:xidef}
Theorem~\ref{thm:bezdeks} was proved in~\cite{bezdek2009shortest} in the Euclidean case (when $T$ is the Euclidean ball). In~\cite{akopyan2016elementary} it was generalized for the case of smooth bodies $K$, $T$.
We obtain the formulation above by approximating non-smooth bodies by smooth ones in the Hausdorff metric and passing to the limit.
\end{remark}
\section{Permutohedron properties}
\label{sec:permutohedron}
In this section, we recollect briefly several equivalent definitions of a (regular) permutohedron. We also explain some of its properties that we will need later.
The classical definition is the following one:
\begin{definition}
\label{def:permut1}
The $n$-dimensional permutohedron is the convex hull of points
$$
(\sigma(1), \sigma(2), \ldots, \sigma(n+1)) \in \mathbb{R}^{n+1},
$$
over all permutations $\sigma : \{1,2,\ldots,n+1\} \to \{1,2,\ldots,n+1\}$.
\end{definition}
For the proof of Theorem~\ref{thm:viterboeq}, the following definition will be a convenient one (it can be found in~\cite[Lecture~7.3]{ziegler1995lectures}):
\begin{definition}
\label{def:permut2}
Consider the regular simplex $\triangle_n = \conv\{v_0, \ldots, v_n\} \subset \mathbb{R}^n$ centered at the origin and normalized so that its edges are all of unit (Euclidean) length. The permutohedron $P_n \subset \mathbb{R}^n$ is defined as the Minkowski sum of the simplex edges:
$$
P_n = \sum_{0\le i<j \le n} [v_i,v_j].
$$
\end{definition}
For the proof of Theorem~\ref{thm:viterbosimplex}, the following definition will be of use (it can be found in~\cite[Chapter~21]{conway2013sphere}):
\begin{definition}
\label{def:permut3}
The permutohedron $\widetilde{P_n}$ is defined as the Vorono\u{\i} cell of the lattice
$$
A_n^* = \left\{(x_0, \ldots, x_n) \in \mathbb{Z}^{n+1}: \ \sum\limits_{i} x_i = 0, \ x_0 \equiv \ldots \equiv x_n \pmod{n+1}\right\},
$$
lying in $\{(x_0, \ldots, x_n) \in \mathbb{R}^{n+1}: \ \sum\limits_{i} x_i = 0\} \cong \mathbb{R}^n$.
\end{definition}
\begin{remark}
\label{rem:lattice} Observe that the lattice $A_n^*$ is generated by the vectors
$$
a_1 = (\underbrace{-1, n, -1, \ldots, -1}_{n+1})^t, \ldots, a_{n} = (-1,-1,-1,\ldots, n)^t.
$$
This lattice also contains the vector
$$
a_0 = (\underbrace{n, -1, -1, \ldots, -1}_{n+1})^t = -a_1 - \ldots -a_n.
$$
Those vectors $a_0, a_1, \ldots, a_n$ are the only vectors of the shortest nonzero length in $A_n^*$, as one can check manually.
\end{remark}
All these definitions give the same result up to similarity. The width of $P_n$ equals $n \langle v_0-v_1, e_n\rangle = \sqrt{(n^2+n)/2}$, and the width of $\widetilde{P_n}$ equals $|a_0| = \sqrt{n^2+n}$ (the fact that those lengths are indeed widths will be justified below, in part (3) of Fact~\ref{fact:combifacet}). Comparing the widths, we see that $\widetilde{P_n}$ is $\sqrt{2}$ times larger than $P_n$.
\begin{fact}
\label{fact:volume}
In the notation of Definition~\ref{def:permut2},
$$
\volu P_n = \frac{(n+1)^{n-1/2}}{2^{n/2}}.
$$
\end{fact}
\begin{proof}
We have:
$$
2^{n/2} \volu P_n = \volu \widetilde{P_n} = \det A_n^* = \det \Gamma(a_1, \ldots, a_n)^{1/2} =
\begin{vmatrix}
n^2 + n & -n-1 & -n-1 & \vdots \\
-n-1 & n^2+n & -n-1 & \vdots\\
-n-1 & -n-1 & n^2+n & \vdots \\
\cdots & \cdots & \cdots & \ddots
\end{vmatrix}^{1/2}.
$$
The latter $n\times n$ Gram matrix has an eigenvector $h = (1, \ldots, 1)^t$ corresponding to the eigenvalue $n+1$ without multiplicities. All the other eigenvectors, orthogonal to $h$, have the same $(n-1)$-fold eigenvalue $(n+1)^2$, as it is easy to check by hand. Therefore, $\det \Gamma(a_1, \ldots, a_n) = (n+1)^{2n-1}$, and the result follows.
\end{proof}
Now let us discuss the well-known combinatorial structure of permutohedron. The following fact is essentially a reformulation of~\cite[Proposition~2.6]{postnikov2009permutohedra}, where it is formulated in terms of Definition~\ref{def:permut1} and we translate it into the language of Definition~\ref{def:permut3}.
\begin{fact}
\label{fact:combipermut}
Let $\widetilde{P_n}$ be the Vorono\u{\i} cell of $A_n^*$ around the origin, as in Definition~\ref{def:permut3}. Fix numbers $y_i = \frac{n}{2} - i$, $0 \le i \le n$.
\begin{enumerate}
\item
The $d$-dimensional faces of $\widetilde{P_n}$ are in one-to-one correspondence with the ordered partitions of $\{0,1,\ldots,n\}$ into $n+1-d$ disjoint subsets. Given a decomposition $\{0,1,\ldots,n\} = B_0 \cup B_1 \cup \ldots \cup B_{n-d}$, the corresponding $d$-face of $\widetilde{P_n}$ consists of all points $(x_0, \ldots, x_n) \in \mathbb{R}^{n+1}$ satisfying for all $I \subseteq \{0,1,\ldots,n\}$
$$
\sum\limits_{i \in I} x_i \le \sum\limits_{j = 0}^{|I|-1} y_j,
$$
and such that the equality is attained (at least) for $I = B_0$, $I = B_0 \cup B_1$, $\ldots$, $I = B_0 \cup B_1 \cup \ldots \cup B_{n-d}$.
\item In particular, the vertices of $\widetilde{P_n}$ are in one-to-one correspondence with the ordered partitions into singletons. Having such a partition $B_0 \cup B_1 \cup \ldots \cup B_{n}$, associate to it the permutation $\pi$ of $\{0,1,\ldots,n\}$ given by $\pi(i) = j$ if $i \in B_j$. Given a permutation $\pi : \{0,1,\ldots,n\} \to \{0,1,\ldots,n\}$, let us write $v_\pi$ for the corresponding vertex. Then $v_\pi$ has coordinates $(y_{\pi(0)}, y_{\pi(1)}, \ldots, y_{\pi(n)})$.
\item The face corresponding to a partition $B$ contains the face corresponding to a partition $B'$ if and only if $B'$ is a refinement of $B$.
\end{enumerate}
\end{fact}
We give more details regarding the facets of $\widetilde{P_n}$.
\begin{fact}
\label{fact:combifacet}
Let $\widetilde{P_n}$ be the Vorono\u{\i} cell of $A_n^*$ around the origin, as in Definition~\ref{def:permut3}.
\begin{enumerate}
\item The facets of $\widetilde{P_n}$ are in one-to-one correspondence with the ordered pairs of sets $(S, \{0,1,\ldots,n\}\setminus S)$, for $\varnothing \neq S \subsetneq \{0,1,\ldots,n\}$. Let us write $F_S$ for the facet corresponding to $(S, \{0,1,\ldots,n\}\setminus S)$.
\item For any $S$, an outward pointing normal for $F_S$ can be chosen from the lattice $A_n^*$. Explicitly, we can take
$$
a_S = \sum_{i \in S} a_i.
$$
as an outer normal to $F_S$. The distance from the origin to $F_S$ equals $\frac{1}{2} |a_S|$.
\item The closest to the origin facets are characterized as follows: they are congruent to an $(n-1)$-dimensional permutohedron, and the corresponding set $S$ is of cardinality $|S|=1$ or $|S|=n$. The width of $\widetilde{P_n}$ is $|a_0|$.
\item Let $v_\pi$ be a vertex of $\widetilde{P_n}$ lying in a facet $F_S$ (so that $\pi(S) = \{0, \ldots, |S|-1\}$). Then $v_\pi-a_S$ is another vertex of $\widetilde{P_n}$, and its corresponding permutation $\pi'$ is obtained from $\pi$ by the left cyclic $|S|$-shift of values (so that $\pi(S) = \{n - |S| + 1, \ldots, n\}$). That is, $\pi'(i) = \pi(i) - |S| \pmod{n+1}$.
\end{enumerate}
\end{fact}
\begin{proof}
\begin{enumerate}
\item Follows from Fact~\ref{fact:combipermut}, part (1).
\item It follows from the definition of the Vorono\u{\i} tessellation that for any facet $F_S$ of $\widetilde{P_n}$, there is an adjacent Vorono\u{\i} cell touching $\widetilde{P_n}$ by $F_S$. The vector from the origin to the center of this cell is orthogonal to $F_S$ and is cut by $F_S$ into equal halves. This vector belongs to the lattice and can be taken as an outer normal for $F_S$. We want to show that it coincides with $a_S = \sum\limits_{i \in S} a_i$. On one hand, immediately from Fact~\ref{fact:combipermut} we get that all the vectors $a_i-a_j$ where either $i,j\in S$ or $i,j \notin S$, are parallel to $F_S$. Among those $a_i-a_j$, there are $(n-1)$ linearly independent vectors, so they span the tangent hyperplane for $F_S$. Also they are all orthogonal to $\sum_{i \in S} a_i$ (by a direct computation, using $\langle a_i, a_j\rangle = - n - 1 + (n+1)^2\delta_{ij}$). On the other hand, the vector $a_S = \sum\limits_{i \in S} a_i$ is primitive in the sense that $a_S \neq r\lambda$ for $r > 1$, $\lambda \in A_n^*$. The result now follows.
\item The distance from the origin to a facet $F_S$ equals $\frac12 |a_S|$. So the statement follows now from Remark~\ref{rem:lattice}: the shortest nonzero vectors of $A_n^*$ are precisely $a_0, a_1, \ldots, a_n$.
\item Let $v_\pi \in F_S$. The coordinates of $v_\pi$ are $(y_{\pi(0)}, y_{\pi(1)}, \ldots, y_{\pi(n)})$. Then the $i$-th coordinate ($0 \le i \le n$) of $v_\pi - a_S$ is
$$
(v_\pi - a_S)_i =
\begin{cases}
y_{\pi(i)} - n + |S|-1, & i \in S, \\
y_{\pi(i)} + |S|, & i \notin S.
\end{cases}
$$
These are precisely the coordinates of $v_{\pi'}$, where $\pi'(i) = \pi(i) - |S| \pmod{n+1}$.
\end{enumerate}
\end{proof}
Now we use this combinatorial description in order to establish a lemma that we will need in Section~\ref{sec:inequality}.
Consider the vectors $u_0, \ldots, u_n$ such that $|u_0| = \ldots = |u_n| = 1$ and the directions of the $u_i$ are equiangular, that is, the endpoints of $u_0, \ldots, u_n$ form a regular simplex in $\mathbb{R}^n$. Consider the lattice $\Lambda$ generated by the vectors $u_1, \ldots, u_n$. Since $u_0 = -\sum\limits_{i=1}^n u_i$, we have $u_0 \in \Lambda$. Let $P$ be the Vorono\u{\i} cell of $\Lambda$ around the origin. Observe that $\Lambda$ is just a scaled copy of $A_n^*$, so comparing $|u_0| = 1$ with $|a_0| = \sqrt{n^2+n}$ we obtain that $P$ is congruent to $\frac{1}{\sqrt{n^2+n}} \widetilde{P_n}$. We keep all the notation (like $v_\pi$, $F_S$) of Facts~\ref{fact:combipermut},~\ref{fact:combifacet} in the context of $P$.
Consider the tiling $T$ of $\mathbb{R}^n$ that is dual to the Vorono\u{\i} tessellation of $\mathbb{R}^n$ with respect to $\Lambda$. It is also known as the Delaunay tiling (see, e.g.,~\cite[Chapter~32]{gruber2007convex}). Let us recall its construction. The vertices of $T$ are precisely the elements of $\Lambda$. Vertices $\lambda_0, \lambda_1, \ldots, \lambda_d \in \Lambda$, $0 \le d \le n$, form a $d$-face of $T$ if the Vorono\u{\i} cells centered at the $\lambda_i$ have a common $(n-d)$-face.
It turns out that $T$ is in fact a triangulation (i.e., the corresponding polyhedral complex is simplicial). This might not be clear a priori, so we prove it as a part of the following lemma.
\begin{lemma}
\label{lem:equiedge}
Every full-dimensional cell of $T$ is a simplex. Every simplex $\sigma$ of $T$ has the following property: There exist a closed oriented polygonal line $Q_\sigma$ such that
\begin{itemize}
\item $Q_\sigma$ traverses along edges of $\sigma$ and visits every vertex of $\sigma$ once; in particular, $\conv Q_\sigma = \sigma$;
\item the segments of $Q_\sigma$ have unit length and the set of their directions coincides with $\{v_0, \ldots, v_n\}$.
\end{itemize}
\end{lemma}
\begin{proof}
Let $\sigma$ be a full-dimensional cell of $T$ corresponding to a vertex $v$ of the permutohedral tessellation. First, we want to prove that $v$ is adjacent to $n+1$ permutohedra. Let $P'$ and $P''$ be two of them sharing a common facet $F$, and let $\lambda', \lambda'' \in \Lambda$ be their centers. Note that $v - \lambda'$ is a vertex of $P$, the Vorono\u{\i} cell around the origin. We indexed the vertices of $P$ by permutations in Fact~\ref{fact:combipermut}. Let $\pi'$ be the relevant permutation. Similarly we introduce $\pi''$. How are $\pi'$ and $\pi''$ related? Firstly, they are different, because $v_{\pi'} = v - \lambda' \neq v - \lambda'' = v_{\pi''}$. Secondly, if $F - \lambda' = F_{S'}$, then $\lambda''-\lambda'$ is the outer normal for the face $F_{S'}$ of $P$ constructed in the proof of Fact~\ref{fact:combifacet}, part (2). We know that $v_{\pi'} - (\lambda''-\lambda') = v_{\pi''}$, and using Fact~\ref{fact:combifacet}, part (4), we conclude that $\pi''$ is obtained from $\pi'$ by a multiple left cyclic shift of values. Therefore, all the permutohedra adjacent to $v$ correspond to different cyclic shifts of a single permutation of $\{0,1,\ldots,n\}$. Hence, there are $n+1$ of them, and $\sigma$ is a simplex.
Now we construct $Q_\sigma$, for a simplex $\sigma$ of the Delaunay triangulation $T$. Let $v$ be the vertex of the permutohedral tessellation corresponding to $\sigma$. Let $\lambda \in \Lambda$ be a vertex of $\sigma$. We would like to show that among the edges of $\sigma$ incident to $\lambda$, there are precisely two of length 1, and one of them is pointed along a vector $u_i$ while the other is pointed against a vector $u_j$, for some $i\neq j$. Let $\pi$ be the permutation of $\{0,1,\ldots,n\}$ corresponding to the vertex $v - \lambda$ of $P$. Recall from Fact~\ref{fact:combifacet}, part (3), that there are exactly two facets of $P$ containing $v_\pi$ and congruent to a $(n-1)$-dimensional permutohedron. They correspond to the pairs $(\{\pi(0)\}, \{\pi(1), \ldots, \pi(n)\})$ and $(\{\pi(0), \ldots, \pi(n-1)\}, \{\pi(n)\})$. These two facets correspond to the shortest edges of $\sigma$ pointed along, say, $v_i$ and $-v_j$. From Fact~\ref{fact:combifacet}, part (4), we get that the vertices $v_\pi - u_i$ and $v_\pi + u_j$ of $P$ correspond to the left and right cyclic shifts of $\pi$. Let us mark all those (oriented along the $u_0, \ldots, u_n$) edges of $\sigma$ that we found, over all vertices $\lambda$ of $\sigma$. They form a family of oriented cycles passing through each vertex $\lambda \in \sigma$ once. We claim that this family is in fact a single cycle. To see that, we keep track of the permutations $\pi$ corresponding to the current vertex of $\sigma$. As we move along a cycle from the family, these permutations shift cyclically, as described above. Thus, the cycle consists of $(n+1)$ segments, and we found $Q_\sigma$ as desired. The lemma is proven.
\end{proof}
\section{Equality cases in Viterbo's conjecture}
\label{sec:equality}
Consider the regular simplex $\triangle_n = \conv\{v_0, \ldots, v_n\} \subset \mathbb{R}^n$ centered at the origin and normalized so that its edges are all of unit (Euclidean) length. Choose an orthonormal base $(e_1, \ldots, e_n)$ with $e_n$ pointing to $v_0$. Consider also the permutohedron $P_n \subset \mathbb{R}^n$, as in Definition~\ref{def:permut2}:
$$
P_n = \sum_{0\le i<j \le n} [v_i,v_j].
$$
The main result of this section if the following
\begin{theorem}
\label{thm:viterboeq} In the configuration $P_n \times \triangle_n^\circ$ the shortest generalized billiard trajectory has length $\xi_{\triangle_n^\circ}(P_n) = (n+1)^2$. Moreover, $X = P_n \times \triangle_n^\circ$ delivers equality in Viterbo's conjecture (where $P_n$ and $\triangle_n^\circ$ lie in Lagrangian subspaces).
\end{theorem}
This theorem can be viewed as the extension of the first part of the following proposition, proved in~\cite{balitskiy2016shortest}:
\begin{proposition}
\label{prop:triangle}
\begin{enumerate}
\item
In the configuration $P_2 \times \triangle_2^{\circ}$ the shortest generalized billiard trajectory has length
$$
\xi_{\triangle_2^{\circ}}(P_2) = 9.
$$
Hence, $X = P_2 \times \triangle_2^{\circ}$ delivers equality in Viterbo's conjecture.
\item
Any classical billiard trajectory in the configuration
$$
P_2 \times \triangle_2^{\circ}
$$
bounces $4$ times and has length 9.
\item
Arbitrarily close to any point
$$
(q,p) \in \partial(P_2 \times \triangle_2^{\circ})
$$
there passes a certain classical billiard trajectory of minimal length.
\end{enumerate}
\end{proposition}
Another statement of the same spirit, proved in~\cite{balitskiy2016shortest} is the following
\begin{proposition}
\label{prop:hanner}
\begin{enumerate}
\item
In a Hanner polytope $H \subset \mathbb{R}^n$ with geometry specified by its polar $H^{\circ}$, the shortest generalized billiard trajectory has length $\xi_{H^\circ}(H) = 4$, and $X = H\times H^\circ$ delivers equality in Viterbo's conjecture (the latter fact has been known since~\cite{artstein2014from}).
\item
Any classical billiard trajectory in a Hanner polytope $H \subset \mathbb{R}^n$ with geometry specified by its polar $H^{\circ}$ is $2n$-bouncing and has length 4.
\item
Moreover, in an arbitrarily small neighborhood of any point $(q,p) \in \partial (H \times H^{\circ})$, there passes a classical billiard (in the configuration $H \times H^{\circ}$) trajectory of minimal length.
\end{enumerate}
\end{proposition}
F. Schlenk has established that the interior of the Lagrangian product of a crosspolytope and a cube (its polar) is ``almost'' a symplectic ball in the following sense: the product symplectically embeds into the ball of arbitrarily close volume, and vice versa. The four-dimensional case can be found in~\cite[Lemma~3.1.8]{schlenk2005embedding} or~\cite[\S 4]{latschev2013gromov}; the $2n$-dimensional case follows the same lines (see also~\cite[Lemma~5.3.1]{schlenk2005embedding}, where the same technique is exploited for a simplex instead of a crosspolytope). A reasonable question is if the Lagrangian product of a crosspolytope and a cube (or more generally, $H\times H^\circ$ for an arbitrary Hanner polytope) is a symplectic ball. However, it is not clear for me that the affirmative answer to this question would reprove the implications of Proposition~\ref{prop:hanner}. It would have reproved them if the boundary of the product had been smooth (in this case we would have applied the results of \cite{paiva2014contact}).
Let us also note that Theorem~\ref{thm:viterboeq} would immediately follow from the affirmative answer to the following
\begin{question}
\label{ques:permutball}
Is $P_n \times \triangle_n^{\circ}$ a symplectic ball? More precisely, is the interior of $P_n \times \triangle_n^{\circ}$ a symplectic image of a Euclidean ball of the same volume?
\end{question}
We do not know the answer already in the case $n=2$.
To prove Theorem~\ref{thm:viterboeq}, we will directly compute $\xi_{\triangle_n^\circ}(P_n) = c_{HZ}(P_n \times \triangle_n^\circ)$ and then check the equality:
$$
\volu (P_n \times \triangle_n^\circ) = \frac{c_{HZ}(P_n \times \triangle_n^\circ)^n}{n!}.
$$
The volume $\volu \triangle_n = \frac{\sqrt{n+1}}{2^{n/2} n!}$ can be easily computed, the Mahler volume product $\volu \triangle_n \cdot \volu \triangle_n^\circ = \frac{(n+1)^{n+1}}{(n!)^2}$ is well known, so we conclude that $\volu \triangle_n^\circ = \frac{2^{n/2} (n+1)^{n+1/2}}{n!}$. The volume $\volu P_n = \frac{(n+1)^{n-1/2}}{2^{n/2}}$ was already computed (Fact~\ref{fact:volume}).
So it suffices to compute the right-hand side.
\begin{proposition}
\label{prop:capacity}
In the above notation,
$$
c_{HZ} (P_n \times \triangle_n^\circ) = (n+1)^2.
$$
\end{proposition}
For the proof, we use the characterization of Bezdek and Bezdek of shortest generalized billiard trajectories in $P_n$, when lengths are measured using the norm with unit body $\triangle_n$.
Consider first the following 2-periodic trajectory: take the centers $m_1, m_2$ of two opposite facets of $P_n$ that are congruent to $P_{n-1}$. Then the generalized billiard trajectory $m_1 \to m_2 \to m_1$ is a 2-fold bypass of the width of $P_n$. Clearly, it cannot fit into $\inte P_n$. The $\triangle_n$-length of this trajectory is $(n+1)^2$, so we have an estimate from above: $c_{HZ} (P_n \times \triangle_n^\circ) \le (n+1)^2$.
To prove the estimate from below, we consider an arbitrary closed polygonal line that cannot fit into $\inte P_n$ and show that its $\triangle_n$-length cannot be less than $(n+1)^2$.
First, we replace each segment $[q,q']$ of this line with a certain polygonal line of same $\triangle_n$-length but with edges directed along $v_0, \ldots, v_n$. This can be done as follows. Consider the convex cone
$$
C_j = \pos\{\{v_0, \ldots, v_n\} \setminus \{v_j\}\}
$$
in which the vector $q'-q$ lies. Such $C_j$ exists since $\bigcup\limits_{i=0}^n C_i = \mathbb{R}^{2n}$.
Then, decompose $q'-q = \sum\limits_{0\le k \le n, \ k\neq j} \alpha_k v_k$ with $\alpha_k \ge 0$. Now the polygonal line with edges congruent to $\alpha_i v_i$ suits our purpose well. Its $\triangle_n$-length equals the $\triangle_n$-length of $q'-q$ in the given norm because the norm function is linear on $C_j$.
Now, we have the closed polygonal line with at most $n+1$ directions used. Let its segments be congruent to $\alpha_1 v_{j_1}, \ldots, \alpha_m v_{j_m}$. The total $\triangle_n$-length of the segments of this polygonal line along the direction $v_i$ is proportional to $\sum\limits_{j_k = i} \alpha_k$. Note that $\sum\limits_i v_i = 0$ is the only linear dependence among the directions, so the relation $\sum\limits_{k=1}^m \alpha_k v_{j_k} = 0$ is a multiple of $\sum\limits_i v_i = 0$. Thus, the total $\triangle_n$-length of the segments of this polygonal line along each direction $v_i$ does not depend on $i$. Assume the contrary to the statement that we are to prove: suppose that this length along each direction $v_i$ is less than $n+1$. We need to show that such a line fits into a smaller homothet of $P_n$, this will be a contradiction. We formulate it as a lemma.
\begin{lemma}
\label{lem:fitinto}
Suppose a closed polygonal line consists of segments directed only along $v_0, \ldots, v_n$. Suppose also that the total $\triangle_n$-length of all segments that are directed along $v_i$ equals $n+1$, for each $i = 0, 1, \ldots, n$. Then this line can be covered by a translate of $P_n$.
\end{lemma}
\begin{proof}
We proceed by induction on $n$. The base case $n = 1$ is clear.
To begin, we note that the top ``horizontal'' (meaning orthogonal to $e_n$, which is supposed to be pointed ``upwards'') facet $F$ of $P_n$ is congruent to a copy of $P_{n-1}$ placed horizontally in $\mathbb{R}^n$. Explicitly,
$$
F = n v_0 + \sum\limits_{1\le i < j\le n} [v_i, v_j].
$$
Now let $Q = q_0 \to q_1 \to \ldots \to q_{m-1} \to q_m = q_0$ be a polygonal line satisfying the assumptions of the lemma, and let $q_0$ be the highest (having the largest coordinate along $e_n$) vertex of $Q$. Let us introduce a parametrization $q : [0,n+1] \to Q$, so that the point $q(t)$ runs along $Q$ with the constant velocity $(n+1)$. Without loss of generality, $q(0) = q(n+1) = q_0$.
Consider the following transformation of $Q$. We contract it using the transform
$$
\begin{pmatrix}
\frac{1}{n+1} & 0 & \vdots & 0 & 0\\
0 & \frac{1}{n+1} & \vdots & 0 & 0\\
\cdots & \cdots & \ddots & \cdots & \cdots\\
0 & 0 & \vdots & \frac{1}{n+1} & 0\\
0 & 0 & \vdots & 0 & 1\\
\end{pmatrix}
$$
and obtain the line $\widetilde Q$ with the corresponding parametrization $\widetilde q(\cdot)$. Also, denote by $\widetilde v_i$ the image of $v_i$ under this transform. Note that $(n+1)\widetilde v_i = v_i-v_0$.
Now we consider the Minkowski sum
$$
R = F + \widetilde Q = \bigcup\limits_{t\in[0,n+1]} (F + \widetilde q(t))
$$
and claim that $Q$ can be covered by $R$ (see Figure~\ref{pic:permutfit1}). We will find $s \in \mathbb{R}^n$ such that $q(t) \in F+s+\widetilde q(t)$ for all $t$. We determine the vertical component $\langle s, e_n \rangle = - \frac{n+1}{2} |v_0|$ from the relation $q(t) \in F+s+\widetilde q(t)$. So we need to adjust the horizontal component of $s$ in order to satisfy $q(t) \in F+s+\widetilde q(t)$.
Let $H = \mathbb{R}^{n-1} \times \{0\}$ be the horizontal subspace of $\mathbb{R}^n$. Let $\pi: \mathbb{R}^n \to H$ the orthogonal projection. Endow $H$ with the norm given by the unit body $\triangle_{n-1} = \conv\{\pi(v_1), \ldots, \pi(v_n)\}$. Identify $P_{n-1}$ with $\pi\left(F\right) = \sum\limits_{1\le i < j\le n} [\pi(v_i), \pi(v_j)] \subset H$. Consider the polygonal line $q(t) - \widetilde q(t)$, for $t\in[0,n+1]$. When $q(t)$ traverses along $v_i$, $1\le i \le n$, $q(t) - \widetilde q(t)$ traverses along $\pi(v_i)$. Whenever $q(t)$ traverses along $v_0$, $q(t) - \widetilde q(t)$ stays fixed. The overall time during which $q(t)$ traverses along $v_0$ is 1 (out of $n+1$). Let us reparametrize the polygonal line $q(t) - \widetilde q(t)$ by skipping all the time intervals during which it stays fixed. This way we will get a polygonal line $q'(t)$, for $t \in [0,n]$.
We make a few observations:
\begin{itemize}
\item $q'(t) \in H$ for all $t\in[0,n]$;
\item if we measure distances in $H$ using the $\triangle_{n-1}$-norm, then $q'(t)$ travels with the constant velocity $n$;
\item $q'(t)$ travels only along the distinguished directions $\pi(v_1), \ldots, \pi(v_n)$.
\end{itemize}
Therefore, we can apply the inductive assumption: there exists $s' \in H$ such that $q'(t) \in P_{n-1} + s'$ for all $t \in [0,n]$. Consequently, $q(t) - \widetilde q(t) \in P_{n-1} + s'$ for all $t \in [0,n+1]$.
Finally, we set $s = s' - \frac{n+1}{2} v_0 \in \mathbb{R}^n$. Then we have
$$
q(t) \in P_{n-1} + s' + \widetilde q(t) = F - \frac{n+1}{2} v_0 + s' + \widetilde q(t) = F+s+\widetilde q(t),
$$
so $Q \subset R + s$.
\begin{figure}
\caption{$Q$ covered by $R$}
\label{pic:permutfit1}
\end{figure}
The next step is to cover the set $R$ by $P_n$ (see Figure~\ref{pic:permutfit2}). To do this, we consider the coordinates $(\widetilde q_1(t), \ldots, \widetilde q_n(t))$ of the point $\widetilde q(t)$ in the basis $\widetilde v_1, \ldots, \widetilde v_n$. Shift $\widetilde Q$ (and $R = \widetilde Q + F$ accordingly) so that, for any $i$,
$$
\min\limits_{t\in [0,n+1]} \widetilde q_i(t) = 0.
$$
We claim that $R \subset P_n$ after such a shift. Indeed, any point of $R$ can be represented as
$$
f+ \widetilde q(t) = f + \sum\limits_{i=1}^n \widetilde q_i(t)\widetilde v_i,
$$
for some $t\in [0,n+1]$ and $f \in F$. From the assumptions of the lemma, it follows that $\widetilde q_i(t) \in [0,n+1]$. Hence, the point $\sum\limits_{i=1}^n \widetilde q_i(t) \widetilde v_i$ belongs to the Minkowski sum of the non-horizontal segments
$$
\sum\limits_{i=1}^n [0,(n+1)\widetilde v_i] = \sum\limits_{i=1}^n ([v_0, v_i] - v_0).
$$
Hence,
$$
f + \sum\limits_{i=1}^n \widetilde q_i(t) \widetilde v_i \in F + \sum\limits_{i=1}^n ([v_0, v_i] - v_0) = \sum\limits_{1\le i < j\le n} [v_i, v_j] + \sum\limits_{i=1}^n [v_0, v_i] = P_n,
$$
as required.
\end{proof}
\begin{figure}
\caption{$R$ covered by $P_n$}
\label{pic:permutfit2}
\end{figure}
\begin{remark}
\label{rem:uppercapacity}
As a referee pointed out, an example of a two-bouncing trajectory in the configuration $P_n \times \triangle_n^\circ$ estimates from above the cylindrical capacity $c_{cyl}(P_n \times \triangle_n^\circ)$ by $(n+1)^2$ (see~\cite[Remark 4.2]{artstein2014from}). So we have
$$
(n+1)^2 \le c_{HZ}(P_n \times \triangle_n^\circ) \le c_{cyl}(P_n \times \triangle_n^\circ) \le (n+1)^2,
$$
and all the capacities between $c_{HZ}$ and $c_{cyl}$ coincide on the Lagrangian product $P_n \times \triangle_n^\circ$.
Consequently, the equality in (\ref{eq:viterbo}) holds for $X = P_n \times \triangle_n^\circ$ and any symplectic capacity greater than the Hofer--Zehnder capacity.
\end{remark}
\section{Special cases of Viterbo's conjecture}
\label{sec:inequality}
Viterbo's conjecture is proven~\cite{hermann1998non} for ellipsoids, polydiscs and convex Reinhardt domains. However, even for $X \subset \mathbb{R}^4$ Viterbo's conjecture remains widely open. Here we prove the following special cases of the conjecture.
\begin{theorem}
\label{thm:viterbosimplex}
Let $K \subset V = \mathbb{R}^n$ be a convex body. Let $\triangle_n \subset V^*$ be a simplex. Then
$$
\volu (K \times \triangle_n) \ge \frac{c_{HZ}(K \times \triangle_n)^n}{n!},
$$
where $K$ and $\triangle_n$ lie in Lagrangian subspaces.
\end{theorem}
\begin{theorem}
\label{thm:viterbocube}
Let $K \subset V = \mathbb{R}^n$ be a convex body. Let $\square_n \subset V^*$ be a parallelotope. Then
$$
\volu (K \times \square_n) \ge \frac{c_{HZ}(K \times \square_n)^n}{n!},
$$
where $K$ and $\square_n$ lie in Lagrangian subspaces.
\end{theorem}
The latter can be interpreted as a sharp isoperimetric-like inequality for billiard trajectories in the $\ell_1$-norm, with equality attained on the crosspolytope $K = \square_n^\circ$:
$$
\xi_{\square_n}(K) \le 2\left(n! \volu (K)\right)^{1/n}.
$$
Similarly, the former is a sharp isoperimetric-like inequality for billiard trajectories in the non-symmetric $\|\cdot\|_{\triangle_n}$-norm, with equality attained on certain permutohedra.
For the proof of Theorem~\ref{thm:viterbosimplex} we will need a simple topological lemma.
\begin{lemma}
\label{lem:avoidtriangulation}
Let $T$ be a triangulation of $\mathbb{R}^n$ with the diameters of simplices uniformly bounded from above. Let $\Lambda$ be the vertex set of $T$. Assume that every simplex of $T$ can be covered by a translate of a convex body $K \subset \mathbb{R}^n$. Then any translate of $K$ meets $\Lambda$.
\end{lemma}
\begin{proof}
Let $d \in \mathbb{R}$ be an upper bound for the diameters of the cells of $T$.
Suppose there is a translate $K+t$ of $K$ avoiding $\Lambda$. We define a continuous vector field $\nu : \mathbb{R}^n \to \mathbb{R}^n$ as follows. For every vertex $\lambda \in \Lambda$, set $v(\lambda)$ equal to any unit vector pointing away from $K+t$. More precisely, take any hyperplane $H \ni \lambda$ avoiding $K+t$, and set $\nu(\lambda)$ to be the normal vector of $H$ pointing to the halfspace missing $K+t$ (see Figure~\ref{pic:brouwer}, illustrating the two-dimensional case). Then extend $\nu$ affinely to the entire $\mathbb{R}^n$.
\begin{figure}
\caption{Construction of vector field $\nu$}
\label{pic:brouwer}
\end{figure}
Now take a ball $B$ centered at any point of $K+t$. We take it large enough, say, of radius $100 (d + \diam K)$. Then $\frac{\nu}{|\nu|}\vert_{\partial B} : \partial B \to S^{n-1}$ has degree 1, hence there is $x \in \inte B$ such that $v(x) = 0$. If $x$ lies in a simplex $\sigma$ of $T$ with vertices $v_0, \ldots, v_n$, then there are non-negative multipliers $\alpha_0, \ldots, \alpha_n$, not all zeros, such that $\sum\limits_i \alpha_i \nu(v_i) = 0$. Informally, this means that $K+t$ is ``blocked'' inside the ``cage'' $\sigma$ with the ``cage bars'' $v_i$. Let us show rigorously that in this situation $\sigma$ cannot be covered by a translate of $K$.
We know that $K+t$ does not cover the vertices of $\sigma$. The choice of $\nu(v_i)$ implies that $\langle u - v_i, \nu(v_i) \rangle < 0$ for any $u \in K+t$. Assume that there is a translate $K+t+s$ covering $\sigma$. Multiplying $s$ by $\sum\limits_i \alpha_i \nu(v_i)$, we get
$$
\sum\limits_{i=0}^n \alpha_i \langle s, \nu(v_i) \rangle = 0.
$$
It follows that $\langle s, \nu(v_i) \rangle \le 0$ for some $i$. Then we obtain a contradiction as follows:
$$
0 = \langle v_i - v_i, \nu(v_i) \rangle = \langle \underbrace{(v_i-s)}_{\in K+t} - v_i, \nu(v_i) \rangle + \langle s, \nu(v_i) \rangle < 0.
$$
\end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:viterbosimplex}]
The inequality in question is invariant under the following family of transformations: if $V$ is acted on by an affine transform $A$, then $V^*$ is acted on by $(A^*)^{-1}$. Any simplex is affinely equivalent to a regular one, so we can assume that $\triangle_n^\circ = \conv\{u_0, \ldots, u_n\}$ is a regular simplex centered at the origin, with $|u_0| = \ldots = |u_n| = 1$. Further, we scale $K$ so that $c_{HZ}(K \times \triangle_n)$ becomes equal to $(n+1)$.
Consider the lattice $\Lambda$ generated by the vectors $u_0, \ldots, u_n$; recall that $\Lambda$ is just a scaled copy of $A_n^*$, and its Vorono\u{\i} cell $P$ (centered at the origin) is congruent to $\frac{1}{\sqrt{n^2+n}} \widetilde{P_n}$.
We will prove that $\volu K \ge \volu P$. This will suffice:
$$
\volu (K \times \triangle_n) \ge \volu (P \times \triangle_n) = \frac{c_{HZ}(P \times \triangle_n)^n}{n!} = \frac{(n+1)^n}{n!} = \frac{c_{HZ}(K \times \triangle_n)^n}{n!}.
$$
Here we used the results of Section~\ref{sec:equality}. Note that in our scale, $P$ equals $\frac{1}{n+1}$-times the Minkowski sum of the edges of $\triangle_n^\circ$, hence $c_{HZ}(P \times \triangle_n) = n+1$ and $\volu (P \times \triangle_n) = \frac{c_{HZ}(P \times \triangle_n)^n}{n!}$.
Now we are proving the estimate $\volu K \ge \volu P$. We will do it by showing that
$$
\bigcup_{\lambda \in \Lambda} (\lambda + K) = \mathbb{R}^n.
$$
This will imply that $\volu K$ is not less than the volume of the fundamental domain of $\Lambda$, i.e., $\volu P$.
Consider the Delaunay triangulation $T$ of $\mathbb{R}^n$ with respect to $\Lambda$. For each simplex $\sigma$ of $T$, we invoke Lemma~\ref{lem:equiedge} to find the polygonal line $Q_\sigma$ of length $n+1$. Since $c_{HZ}(K \times \triangle_n) = n+1$, such $Q_\sigma$ can be covered by a translate of $K$ (by Theorem~\ref{thm:bezdeks}). Therefore, any simplex of $T$ can be covered by a translate of $K$.
Assume that $\bigcup\limits_{\lambda \in \Lambda} (\lambda + K) \neq \mathbb{R}^n$, that is, there exists $x \in \mathbb{R}^n$ such that $(x + \Lambda) \cap K = \varnothing$. So we found a translate $K - x$ of $K$ that avoids $\Lambda$, while every simplex of $T$ can be covered by a translate of $K$. This contradicts Lemma~\ref{lem:avoidtriangulation}.
\end{proof}
Theorem~\ref{thm:viterbocube} will be proved inductively using the following lemma.
\begin{lemma}
\label{lem:viterboaddsegment}
Let $L \subset \mathbb{R}^{n-1}$ be a convex body. Suppose we established the inequality
$$
\volu (M \times L) \ge \frac{c_{HZ}(M \times L)^{n-1}}{(n-1)!}
$$
for all convex bodies $M \subset \mathbb{R}^{n-1}$. Then for any convex body $K \subset \mathbb{R}^n$ the following holds:
$$
\volu (K \times (L \times [-1,1])) \ge \frac{c_{HZ}(K \times (L \times [-1,1]))^n}{n!}.
$$
(As usual, the relevant products are assumed to be Lagrangian.)
\end{lemma}
\begin{proof}
Without loss of generality assume that $L$ contains the origin in its interior. Denote by $H$ the hyperplane in $\mathbb{R}^n$ where $L$ sits.
Note that
$$
(L \times [-1,1])^\circ = \conv (L^\circ \times \{0\} \cup \{\underbrace{(0,\ldots,0}_{n-1})\} \times [-1,1]).
$$
Sometimes this body is called the \emph{free sum} or the \emph{$\ell_1$-sum} of $L^\circ$ and $[-1,1]$.
Scale $K$ so that $c_{HZ}(K \times L \times [-1,1]) = \xi_{L \times [-1,1]}(K) = 4$. This implies that any closed polygonal line of $\|\cdot\|_{L \times [-1,1]}$-length 4 fits into $K$. In particular, any such polygonal line that is parallel to $H$ fits into $\pi_H(K)$, the orthogonal projection of $K$ onto $H$. Thus, $c_{HZ}(\pi_H(K) \times L) \ge 4$. By the assumption,
$$
\volu (\pi_H(K) \times L) \ge \frac{c_{HZ}(\pi_H(K) \times L)^{n-1}}{(n-1)!} \ge \frac{4^{n-1}}{(n-1)!}.
$$
Now we apply the following inequality (proven by Rogers and Shephard~\cite{rogers1958convex} in greater generality):
$$
\volu_n (K) \ge \frac{1}{n} \volu_{n-1} (\pi_H(K)) \cdot \max\limits_{x \in \mathbb{R}^n} \volu_1 ((x + H^\bot) \cap K).
$$
Since $\xi_{L \times [-1,1]}(K) = 4$, $K$ contains a segment orthogonal to $H$ of $\|\cdot\|_{L \times [-1,1]}$-length 2, and we conclude that $\max\limits_{x \in \mathbb{R}^n} \volu_1 ((x + H^\bot) \cap K) \ge 2$.
Finally, we combine all inequalities above:
\begin{multline*}
\volu (K \times L \times [-1,1]) \ge \frac{2}{n} \volu (\pi_H(K)) \volu(L \times [-1,1]) = \frac{4}{n} \volu (\pi_H(K) \times L) \ge \\
\ge \frac{4}{n} \cdot \frac{4^{n-1}}{(n-1)!} = \frac{c_{HZ}(K \times L \times [-1,1])^n}{n!}.
\end{multline*}
\end{proof}
\begin{proof}[Proof of Theorem~\ref{thm:viterbocube}]
The inequality in question is invariant under the transforms of the form $A \times (A^*)^{-1}$, for $A$ affine, so we assume that $\square_n = [-1,1]^n$ is a hypercube centered at the origin. Now the claim follows from~\ref{lem:viterboaddsegment} by induction. The base case
$$
\volu ([a,b] \times [-1,1]) \ge c_{HZ}([a,b] \times [-1,1])
$$
is trivial.
\end{proof}
\begin{remark}
\label{rem:viterbosimplexcube}
Starting from~\ref{thm:viterbosimplex} as the induction base, we see that the same induction proves the $c_{HZ}$-version of Viterbo's conjecture for a Lagrangian product of any convex body $K \subset \mathbb{R}^{k+m}$ with a simplex in $\mathbb{R}^k$ and a parallelotope in $\mathbb{R}^m$:
$$
\volu (K \times \triangle_k \times \square_m) \ge \frac{c_{HZ}(K \times \triangle_k \times \square_m)^{k+m}}{(k+m)!}.
$$
\end{remark}
\end{document} |
\begin{document}
\title{The variety of projections of a Tree-Prikry forcing}
\author{Tom Benhamou}
\address[Tom Benhamou]{School of Mathematical Sciences, Raymond and Beverly Sackler Faculty of Exact Science, Tel-Aviv University, Ramat Aviv 69978, Israel}
\email[Tom Benhamou]{tombenhamou@tauex.tau.ac.il}
\author{Moti Gitik}\thanks{The work of the second author was partially supported by ISF grant No 1216/18}
\address[Moti gitik]{School of Mathematical Sciences, Raymond and Beverly Sackler Faculty of Exact Science, Tel-Aviv University, Ramat Aviv 69978, Israel}
\email[Moti Gitik]{gitik@post.tau.ac.il}
\author{Yair Hayut}\thanks{The work of the third author was partially supported by the FWF Lise Meitner
grant 2650-N35, and the ISF grant 1967/21}
\address[Yair Hayut]{Einstein Institute of Mathematics, \\
Edmond J.\ Safra Campus, \\
The Hebrew University of Jerusalem \\
Givat Ram.\ Jerusalem, 9190401, Israel}
\email[Yair Hayut]{yair.hayut@mail.huji.ac.il}
\begin{abstract}
We study which $\kappa$-distributive forcing notions of size $\kappa$ can be embedded into tree Prikry forcing notions with $\kappa$-complete ultrafilters under various large cardinal assumptions. An alternative formulation - can the filter of dense open subsets of a $\kappa$-distributive forcing notion of size $\kappa$ be extended to a $\kappa$-complete ultrafilter.
\end{abstract}
\maketitle
\date{\today}
\section{introduction}
In this paper we will study possibilities of embedding of $\kappa$-distributive forcing notions of size $\kappa$ into
Prikry forcings with non-normal ultrafilter or into tree Prikry forcing notions with $\kappa$-complete ultrafilters.
\\By the result of Kanovei, Koepke and the second author \cite{PrikryCaseGitikKanKoe} every subforcing of the standard Prikry forcing is either trivial or equivalent to the Prikry forcing with the same normal ultrafilter.
However, the situation changes drastically if non-normal ultrafilters are used.
Existence of such embedding allows one to iterate distributive forcing notions on different cardinals, see \cite[Section 6.4]{Gitik2010}.
A closely related problem is the possibility of extension of the filter of dense open subsets of a $\kappa$-distributive forcing notion of size $\kappa$
to a $\kappa$-complete ultrafilter, the exact statement is given in theorem {\rangle}ef{equivalece}.
Clearly, if $\kappa$ is a $\kappa$-compact cardinal, then this follows. Actually more is true---there is a single Prikry type forcing, such that any $\kappa$-distributive forcing notion of size $\kappa$ embeds into it, see
\cite{GitikOnCompactCardinals}.
However, there are $\kappa$-distributive forcing notion of size $\kappa$ which can be embedded into Prikry forcing notions under much weaker assumptions. Thus, for example,
in \cite{TomMoti} starting from a measurable cardinal, a generic extension in which there is a $\kappa$-complete ultrafilter on $\kappa$, $\mathcal{U}$, such that the tree Prikry forcing using $\mathcal{U}$ introduces a Cohen subset of $\kappa$ was constructed.
This paper investigates different possibilities which are intermediate between those two extremes. More specifically, let $H$ be a subclass of the $\kappa$-distributive of size $\kappa$ forcings,
we examine the following question:
\vskip 0.2 cm
\begin{center}
\textit{ Can the dense open filter, $D(\mathbb{Q})$, of any $\mathbb{Q}\in H$\\ be extended to a $\kappa$-complete ultrafilter?}
\end{center}
\vskip 0.2 cm
Our notations are mostly standard. For general information about Prikry type forcing we refer the reader to \cite{Gitik2010}. For general information about large cardinals we refer the reader to \cite{Kanamori1994}.
Throughout the paper, $p {\langle}eq q$ means that $p$ is weaker than $q$.
The structure of the paper is as follows:
\begin{itemize}
\item Section $2$ is intended to give the reader background and basic definitions which appear in this paper.
\item The main result of section $3$ is theorem {\rangle}ef{equivalece}: Let $\mathbb{Q}$ be a $\sigma$-distributive forcing of size $\kappa$. Then $B(\mathbb{Q})$ is a projection of the tree Prikry forcing if and only if $D(\mathbb{Q})$ can be extended to a $\kappa$-complete ultrafilter. Moreover the ultrafilter extending $D(\mathbb{Q})$ must be Rudin-Keisler below the ultrafilters of the tree Prikry forcing.
\item Section $4$ deals with the class of $\kappa$-strategically closed and ${<}\kappa$-strategically closed forcings. Lemma {\rangle}ef{CohenProj} establishes that $\Add(\kappa,1)$ projects onto every $\kappa$-strategically closed forcing of cardinality $\kappa$. Also, we present the forcing that adds a Jensen square (see definition {\rangle}ef{definition: jensen square}) and prove that it maximal among all the ${<}\kappa$-strategically closed forcings, this is formulated in Lemma {\rangle}ef{JenSenProj}.
\item Section $5$ focuses on upper bounds. In theorem {\rangle}ef{LesskappaStClUpperBound} we give an upper bound for the claim ``For every ${<}\kappa$-strategically closed forcing of size $\kappa$ $\mathbb{P}$ and every $p\in\mathbb{P}$, $D_p(\mathbb{P})$ can be extended to a $\kappa$-complete ultrafilter".
In the rest of the section we discuss some weaker version of $\Pi^1_1$-subcompact cardinal which is an upper bound for the claim ``For every $\kappa$-distributive forcing of size $\kappa$ $\mathbb{P}$ and every $p\in\mathbb{P}$, $D_p(\mathbb{P})$ can be extended to a $\kappa$-complete ultrafilter.
\item Section $6$ is devoted to the forcing $Q$, of shooting a club through the singulars. This forcing is a milestone for the class of ${<}\kappa$-strategically closed forcing of size $\kappa$. In theorem -{\rangle}ef{thm:lowerbound}, we prove that if we can extend $D(Q)$, then either $\exists {\langle}ambda o({\langle}ambda)={\langle}ambda^{++}$ or $o^{\mathcal K}(\kappa)\geq \kappa+\kappa$.
\item Section $7$ provides a strengthening of results of section $6$ to $o^{\mathcal K}(\kappa)\geq \kappa^++1$.
\item Section $8$ defines a class called \textit{masterable forcing}. We show, starting with a measurable, that one can force that the filter of $D_p(\mathbb{P})$ can be extended to a $\kappa$-complete ultrafilter for every masterable forcing $\mathbb{P}$. In this generic extension we give examples of many important forcing notions which are masterable.
\item Section $9$ presents forcing notions which do not fall under the examples considered in this paper and present further research directions.
\end{itemize}
\section{Preliminaries}
Let us recall some basic concepts about forcing notions and Tree Prikry forcing. First, our forcing notions are always separative and have a minimal element. We force upward i.e. $p{\langle}eq q$ means that $q\Vdash p\in \dot{G}$. Let us start with the concept of projection:
\begin{definition}
Let $\mathbb{P},\mathbb{Q}$ be forcing notions, $\pi:\mathbb{P}{\rangle}ightarrow\mathbb{Q}$ is a projection if
\begin{enumerate}
\item $\pi$ is order preserving.
\item $\forall p\in\mathbb{P}\forall \pi(p){\langle}eq q\exists p'\geq p.\pi(p')\geq q$.
projection.
\item $Im(\pi)$ is dense in $\mathbb{P}$.
\end{enumerate}
\end{definition}
\begin{definition}
Let $\pi:\mathbb{P}{\rangle}ightarrow\mathbb{Q}$ be a function
\begin{enumerate}
\item If $G\subseteq\mathbb{P}$ is $V$-generic, define
$$\pi_*(G)=\{q\in\mathbb{Q}\mid \exists p\in G.q{\langle}eq\pi(p)\}$$
\item If $H\subseteq\mathbb{Q}$ is $V$-generic, define the quotient forcing
$$\mathbb{P}/H=\pi^{-1}[H]=\{p\in\mathbb{P}\mid \pi(p)\in H\}$$
With the separative order $p{\langle}eq_{\mathbb{P}/H} q$ if an only if for every $q{\langle}eq_{\mathbb{P}} r$, $r$ is compatible with $p$.
\end{enumerate}
\end{definition}
\begin{claim}
Let $\mathbb{P},\mathbb{Q}$ be any forcing notions, then:
\begin{enumerate}
\item Let $G\subseteq\mathbb{P}$ be $V$-generic and $\pi:\mathbb{P}{\rangle}ightarrow\mathbb{Q}$ a projection, then $\pi_*(G)\subseteq\mathbb{Q}$ is $V$-generic
\item Let $H\subseteq\mathbb{Q}$ be $V$-generic and $\pi:\mathbb{P}{\rangle}ightarrow\mathbb{Q}$ a projection, then if $G\subseteq\mathbb{P}/H$ is $V[H]$-generic, then $G\subseteq\mathbb{P}$ is $V$-generic, moreover, $\pi_*(G)=H$.
\item Let $G\subseteq\mathbb{P}$ be $V$-generic and $\pi:\mathbb{P}{\rangle}ightarrow\mathbb{Q}$ a projection, then $G\subseteq\mathbb{P}/\pi_*(G)$ is $V[\pi_*(G)]$-generic.
\end{enumerate}
\end{claim}
\begin{definition}
Let $\mathbb{P}$ be a forcing notion, denote by $B(\mathbb{P})$ the complete boolean algebra of regular open sets of $\mathbb{P}$.\end{definition}
If is known that $\mathbb{P}$ can be identify with a dense subset of $B(\mathbb{P})$ and that $B(\mathbb{P})$ is the unique (up to isomorphism) complete boolean algebra we a dense subset isomorphic to $\mathbb{P}$.
Moreover, $\mathbb{P}$ and $B(\mathbb{P})$ yield the same generic extensions. Let $G\subseteq\mathbb{P}$ be a $V$-generic filter then $\bar{G}=\{b\in B(\mathbb{P})\mid \exists p\in G.b{\langle}eq p\}\subseteq B(\mathbb{P})$ is $V$-generic and if $\bar{G}\subseteq B(\mathbb{P})$ is $V$-generic then $G=\bar{G}\cap \mathbb{P}\subseteq\mathbb{P}$ is $V$-generic.
For more information about boolean algebras see \cite{ShelahProper} or \cite{AbrahamHandbook}.
\begin{claim}{\langle}abel{absoToproj}
Let $\mathbb{P},\mathbb{Q}$ be forcing notions. Then:
\begin{enumerate}
\item There is a projection $\pi\colon\mathbb{P}{\rangle}ightarrow B(\mathbb{Q})$ if and only if there is a $\mathbb{P}$-name ${\langle}usim{H}$ such that
for every generic filter $H$ for $\mathbb{Q}$ there is a generic filter $G$ for $\mathbb{P}$ such that $({\langle}usim{H})_G=H$.
\item There is a strong projection $\pi:\mathbb{P}{\rangle}ightarrow B(\mathbb{Q})$ iff there is a $\mathbb{P}$-name ${\langle}usim{H}$ such that
for every $V$-generic filter $H$ for $\mathbb{Q}$ there is a $V$-generic filter $G$ for $\mathbb{P}$ such that $({\langle}usim{H})_G=H$.
\end{enumerate}
\end{claim}
\begin{definition}{\langle}abel{distributive}
Let $\mathbb{P}$ be a forcing notion and let $\kappa$ be a cardinal. $\mathbb{P}$ is $\kappa$-distributive if for every collection $\mathcal{D}$ of dense open subsets of $\mathbb{P}$, $|\mathcal{D}| < \kappa$, the intersection $\bigcap \mathcal{D}$ is also a dense open subset of $\mathbb{P}$.
\end{definition}
Note that if $\mathbb{P}$ is $\kappa$-distributive then the filter generated by the dense open subsets of $\mathbb{P}$ is $\kappa$-complete.
\begin{notation}
Let $\mathbb{P}$ be a forcing notion. We denote by $\mathcal{D}(\mathbb{P})$ the filter for dense open subsets of $\mathbb{P}$. For $p\in\mathbb{P}$ let $D_p(\mathbb{P})$ be the filter generated by $D(\mathbb{P})$ and the set $\{q\in\mathbb{P}\mid q\geq p\}$.
\end{notation}
Let us define the tree Prikry forcing. Let $\kappa$ be a cardinal, and let $\vec{\mathcal{U}} = {\langle}angle U_{\eta} \mid \eta \in [\kappa]^{<\omega}{\rangle}angle$ be a sequence of ultrafilters on $\kappa$, indexed by $[\kappa]^{<\omega}$ which is the set of all finite sequences below $\kappa$. Such that $U_\eta$ concentrate on the set $\kappa\setminus\max(\eta)+1$.
Let us define the forcing $\mathbb{T}_{\vec{\mathcal{U}}}$.
An element in $\mathbb{T}_{\vec{\mathcal{U}}}$ is a pair ${\langle}angle s, T{\rangle}angle$ where:
\begin{enumerate}
\item $s\in [\kappa]^{<\omega}$.
\item $T\subseteq [\kappa]^{<\omega}$, and for all $t\in T$, $s \trianglelefteq t$.
\item $T$ is $\vec{\mathcal{U}}$-splitting: for all $t \in T$, $\{\nu<\kappa \mid t^\smallfrown \nu \in T\} \in U_{t}$.
\end{enumerate}
For $T\subseteq [\kappa]^{<\omega}$ and $\eta\in T$ we denote $T_\eta = \{s\in[\kappa]^{<\omega} \mid \eta^\smallfrown s\in T\}$.
For $p = {\langle}angle s, T{\rangle}angle,\ p'={\langle}angle s', T'{\rangle}angle \in \mathbb{T}_{\vec{\mathcal{U}}}$, $p' {\langle}eq p$ and say that $p$ extends $p'$ if $s\in T'$ and $T \subseteq T'_s$. We denote $p'{\langle}eq^* p$ and say that $p$ is a direct extension of $p'$ if $p'{\langle}eq p$ and $s = s'$.
We will assume always that each $U_\eta$ is $\kappa$-complete. In this case, the relation ${\langle}eq^*$ is $\kappa$-complete.
The following claim is well known \cite[Lemma 3.16]{TomTreePrikry}:
\begin{lemma}[Strong Prikry Lemma]
Let $D \subseteq \mathbb{T}_{\vec{\mathcal{U}}}$ be dense open and let $p = {\langle}angle s, T{\rangle}angle\in\mathbb{T}_{\vec{\mathcal{U}}}$ be a condition. There is a direct extension of $p{\langle}eq^*p^* = {\langle}angle s, T^*{\rangle}angle$, and a natural number $n$ such that for all $\eta\in T^*$, with ${\langle}en \eta = n$, ${\langle}angle s^\smallfrown \eta, T^*_\eta{\rangle}angle\in D$ and for all $\eta$ such that ${\langle}en \eta < n$, ${\langle}angle s^\smallfrown \eta, T^*_\eta{\rangle}angle\notin D$.
\end{lemma}
When analyzing a tree of measures there is a natural iteration of ultrapowers to consider.
\begin{definition}
Let $\vec{\mathcal{U}}$ be a tree of $\kappa$-complete ultrafilters and $\eta\in[\kappa]^{<\omega}$. For $\vec{\mathcal{U}}$ and $0<n<\omega$, define recursively the $nth$ ultrafilter above $\eta$ derived from $\vec{\mathcal{U}}$, denoted $(\mathcal{U}_{\eta})_n$, to be the following ultrafilter over $[\kappa]^n$: $$(\mathcal{U}_{\eta})_1=\mathcal{U}_{\eta}$$
For $A\subseteq [\kappa]^{n+1}$ define
$$A\in(\mathcal{U}_{\eta})_{n+1}\Longleftrightarrow \{\gamma\in[\kappa]^n \mid A_{\gamma}\in \mathcal{U}_{{\eta}^{\frown}\gamma}\}\in(\mathcal{U}_{\eta})_n $$
where $$A_{\gamma}=\{\alpha<\kappa\mid \gamma^{\frown}\alpha\in A\}$$
\end{definition}
\begin{definition}{\langle}abel{iteration}
Let $\vec{\mathcal{U}}$ be a tree of $\kappa$-complete ultrafilters, define recursively the \textit{iteration corresponding} to $\vec{\mathcal{U}}$ above $\eta\in[\kappa]^{<\omega}$. $$j_0=j_{\mathcal{U}_{\eta}}:V{\rangle}ightarrow M_0\simeq Ult(V,\mathcal{U}_{\eta}), \ \ \delta_0= [id]_{\mathcal{U}_{\eta}}$$
$$j_{n,n+1}:M_n{\rangle}ightarrow \Ult(M_n,j_{n}(\mathcal{\vec{U}})_{\vec{\eta}^{\frown}{\langle}angle\delta_0,\dots,\delta_{n}{\rangle}angle})\simeq M_{n+1}$$
$\delta_{n+1} = [id]_{j_n(\mathcal{\vec{U}})_{\vec{\eta}^{\frown}{\langle}angle\delta_0,\dots,\delta_{n}{\rangle}angle}}$,$j_{n+1}=j_{n,n+1}\circ j_n$ and $j_{m,n+1}=j_{n,n+1}\circ j_{m,n}$
\end{definition}
The following theorem can also be found in \cite{TomTreePrikry}:
\begin{theorem}{\langle}abel{genericsequence}
Let $M_\omega$ be the $\omega$-th iteration of the iteration corresponding to $\vec{\mathcal{U}}$ above $\vec{\eta}$ i.e. $M_\omega$ is the transitive collapse of the direct limit of the system ${\langle}angle M_n,\ j_{n,m}\mid n,m<\omega{\rangle}angle$ defined in {\rangle}ef{iteration}, denote the direct limit embeddings by
$j_{n,\omega}:M_n{\rangle}ightarrow M_{\omega}$. Then the sequence $\vec{\eta}^{\frown}{\langle}angle\delta_n\mid n<\omega{\rangle}angle$ is $M_\omega$-generic for the forcing $j_{\omega}(\mathbb{T}_{\vec{\mathcal{U}}})$.
\end{theorem}
\begin{claim}{\langle}abel{claim}
For every $A\subseteq[\kappa]^{n}$ $${\langle}angle\delta_0,\dots,\delta_{n-1}{\rangle}angle\in j_{n-1}(A)\Longleftrightarrow A\in (\mathcal{U}_{\eta})_n$$
\end{claim}
\begin{proof}
For $n=1$ it is just {\L}o{\'s} theorem $[id]_{\mathcal{U}_\eta}\in j_1(A)\Longleftrightarrow A\in \mathcal{U}_{\eta}=(\mathcal{U}_{\eta})_1$. Assume that the claim holds for $n$, and let $A\subseteq[\kappa]^{n+1}$. Denote by $\vec{\delta}_m={\langle}\delta_0,\dots,\delta_m{\rangle}$, then
$$\vec{\delta}_{n}\in j_{n}(A)\Longleftrightarrow \delta_{n}\in j_{n}(A)_{\vec{\delta}_{n-1}}\Longleftrightarrow j_{n-1}(A)_{\vec{\delta}_{n-1}}\in j_{n-1}(\vec{\mathcal{U}})_{\eta^{\frown}\vec{\delta}_{n-1}}$$ By the definition of $j_{n-1}(A)_{\vec{\delta}_{n-1}}$ and the induction hypothesis we can continue the chain of equivalences
$$\Longleftrightarrow\{\gamma\mid A_{\gamma}\in \mathcal{U}_{\eta^{\frown}\gamma}\}\in (\mathcal{U}_{\eta})_n\Longleftrightarrow A\in (\mathcal{U}_{\eta})_{n+1}$$
\end{proof}
\section{Subforcing of the tree Prikry forcing}
In this section we characterize the $\sigma$-distributive complete subforcings of a tree Prikry forcing. Since no bounded subsets of $\kappa$ are introduced, such a forcing is either trivial or $(\kappa,\kappa)$-centered i.e. it is the union of $\kappa$ many sets $A_i$ for $i<\kappa$ such that each $A_i$ is $\kappa$-directed. Standard arguments show that those forcing notions have to be $\kappa$-distributive.
By a theorem of Gitik (see \cite{GitikOnCompactCardinals}), if $\kappa$ is $\kappa$-compact, then there is a Prikry type forcing which absorbs every $\kappa$-distributive forcing $\mathbb{P}$ of cardinality $\kappa$. A simpler version of this theorem is stated in the following claim:
\begin{claim}{\langle}abel{ExtensionToProj}
Assume that for every $p\in\mathbb{P}$, we can extend $D_p(\mathbb{P})$ to a $\kappa$-complete ultrafilter $U_p$. Then there is a tree of $\kappa$ complete ultrafilter $$\vec{\mathcal{W}}={\langle}angle W_{\eta}\mid \eta\in[\kappa]^{<\omega}{\rangle}angle$$ and a projection $\pi:\mathbb{T}_{\vec{\mathcal{W}}}{\rangle}ightarrow B(\mathbb{P})$
\end{claim}
\begin{proof}
We would like to turn the ultrafilters $U_p$ to ultrafilters on $\kappa$. For this, we first need to identify $\mathbb{P}$ with $[\kappa]^{<\omega}$ somehow. We define inductively for every $\eta\in[\kappa]^{<\omega}$ a condition $p_\eta\in \mathbb{P}$. First $p_{{\langle}{\rangle}}=0_{\mathbb{P}}$. Assume that $p_\eta$ is defined, and let $\mathbb{P}/p_{\eta}:=\{q\in\mathbb{P}\mid q\geq p_\eta\}$. By assumption $|\mathbb{P}/p_\eta|{\langle}eq\kappa$, fix any surjection $f_{\eta}:(\max\{\eta\},\kappa){\rangle}ightarrow\mathbb{P}/p_\eta$. Define for every $\alpha\in(\max\{\eta\},\kappa)$, $p_{\eta^{\smallfrown}\alpha}=f_\eta(\alpha)$.
Next we define the ultrafilters $W_{\eta}$ for every $\eta\in[\kappa]^{<\omega}$. Let $g_\eta:\mathbb{P}/p_\eta{\rangle}ightarrow (\max(\eta),\kappa)$ be a right inverse of $f_{\eta}$ such that $f_{\eta}\circ g_{\eta}=id_{\mathbb{P}/p_\eta}$. Define $W_{\eta}=g_{\eta*}(U_{p_\eta})$ to be the Rudin-Keisler projection of $U_{p_\eta}$ to $\kappa$ i.e. for $A\subseteq\kappa$:
$$A\in W_{\eta}\Longleftrightarrow g_{\eta}^{-1}[A\setminus \max(\eta)+1]\in U_{p_\eta}$$
In particular $\vec{\mathcal{W}}:={\langle} W_\eta\mid \eta\in[\kappa]^{<\omega}{\rangle}$ is defined.
Let us define the following name $$ {\langle}usim{H}=\{{\langle}angle \dot{q},{\langle}angle t, T{\rangle}angle{\rangle}angle\mid q\in\mathbb{P}, \ q{\langle}eq p_{t},{\langle}angle t, T{\rangle}angle\in \mathbb{T}_{\vec{\mathcal{W}}}\}$$
Then $\Vdash_{\mathbb{T}_{\vec{\mathcal{W}}}}{\langle}usim{H}$ is $V$-generic for $\mathbb{P}$. Indeed, Let $G\subseteq \mathbb{T}_{\vec{\mathcal{W}}}$ be $V$-generic and let $H=({\langle}usim{H})_G$. Assume that ${\langle}angle \alpha_n\mid n<\omega{\rangle}angle$ is the Prikry sequence produced by $G$, and denote by $p_n=p_{{\langle}\alpha_0,\dots,\alpha_n{\rangle}}$, then $$H=\{q\in\mathbb{P}\mid \exists n<\omega\ q{\langle}eq p_n\}$$
Note that, $\alpha_{n+1}>\alpha_n$ and by construction $p_{n+1}=f_{{\langle}\alpha_0,\dots,\alpha_n{\rangle}}(\alpha_{n+1})\in \mathbb{P}/ p_n$, hence the $p_n$'s are increasing in the order of $\mathbb{P}$ and $H$ is a filter. Let $D\subseteq \mathbb{P}$ be dense open. We proceed by a density argument, let ${\langle}angle t,T{\rangle}angle\in\mathbb{T}_{\vec{\mathcal{W}}}$ then $D$ is dense open above $p_t$ and therefore $D\cap\mathbb{P}/p_t\in U_{p_t}$. It is not hard to check from the definition that $f_t^{-1}[D\cap\mathbb{P}/p_t]\in W_t$. It follows that $succ_T(t)\in W_t$, fix any $\alpha\in f_t^{-1}[D\cap\mathbb{P}/p_t]\cap succ_T(t)$. Consider the condition ${\langle} t^{\smallfrown}\alpha,T_{t^{\smallfrown}\alpha}{\rangle}\geq {\langle} t,T{\rangle}$. By density, there is ${\langle} s^{\smallfrown}\alpha_{n_0},S{\rangle}\in G$ such that $p_{s^{\smallfrown}\alpha_{n_0}}=f_{s}(\alpha_{n_0})\in D$. By the definition of $H$ we conclude that $p_{s^{\smallfrown}\alpha_{n_0}}\in H\cap D$ and $H$ is a $V$-generic filter for $\mathbb{P}$.
Let ${\langle}usim{H}^*$ be a $\mathbb{T}_{\vec{\mathcal{W}}}$-name for the $B(\mathbb{P})$-generic corresponding to ${\langle}usim{H}$.
Now the projection is defined as follows:
$$\pi(x)=\inf\{b\in B(\mathbb{P})\mid x\Vdash b\in{\langle}usim{H}^*\}$$
Clearly $\pi$ is order preserving and dense in $B(\mathbb{P})$. To see that condition $(2)$ holds, is just an abstract argument, take $b{\langle}eq \pi(x)$, then $\neg(x\Vdash b^c\in {\langle}usim{H}^*)$, otherwise $b^c\geq \pi(x)\geq b$. Hence there is an extension $x'\geq x$ such that $x'\Vdash b^c\notin {\langle}usim{H}^*$, since ${\langle}usim{H}^*$ is an ultrafilter it follows that $b\in{\langle}usim{H}^*$. so $\pi(x'){\langle}eq b$.
\end{proof}
\begin{remark}
If $D_p(\mathbb{P})$ can be extended to $U_p$ only densely often, then we still get a projection.
\end{remark}
The following theorem claims that in some sense, this is the only way to get a projection.
\begin{theorem}{\langle}abel{equivalece}
Let $\mathbb{P}$ be a $\sigma$-distributive forcing of size $\kappa$. The following are equivalent:
\begin{itemize}
\item There is a sequence $\vec{\mathcal{U}}$ of $\kappa$-complete ultrafilters and a projection $\pi\colon \mathbb{T}_{\vec{\mathcal{U}}} \to B(\mathbb{P})$.
\item For every $p\in \mathbb{P}$, $D_p(\mathbb{P})$ can be extended to a $\kappa$-complete ultrafilter $U_p$.
\end{itemize}
\end{theorem}
\begin{proof}
If $D_p(\mathbb{P})$ can be extended to a $\kappa$-complete ultrafilter, use claim {\rangle}ef{ExtensionToProj}. For the other direction, let $\pi:T_{\vec{\mathcal{U}}}{\rangle}ightarrow B(\mathbb{P})$ be a projection, denote $\mathbb{T}_{\vec{\mathcal{U}}} = \mathbb{T}$. Without loss of generality, we can assume that $\mathbb{P}=\kappa$, and ${\langle}eq_{\mathbb{P}}$ is an order on $\kappa$.
Let $q\in \mathbb{P}$ and $p = {\langle}angle s, T{\rangle}angle\in\mathbb{T}$ such that $\pi(p)\geq q$ which exists since $\pi$ is a projection. For every $D\subseteq \mathbb{P}$ dense and open subset above $q$, let $$\bar{D} := \{b\in B(\mathbb{P})\mid \exists a\in D. a{\langle}eq b\}$$ Then $\bar{D}\subseteq B(\mathbb{P})$ is dense open, and since $\pi$ is a projection, $D':=\pi^{-1}[\bar{D}]$ is a dense open subset of $\mathbb{T}$ above $p$. By the strong Prikry property, there is a direct extension $p^* = {\langle}angle s, T_D{\rangle}angle \geq^* p$ and a natural number $n_D<\omega$ such that for all $\eta\in T^*$ such that ${\langle}en(\eta)=n_D$, $\pi({\langle}angle s ^\smallfrown \eta, (T_D)_\eta{\rangle}angle) \in \bar{D}$, while the projection of any extension of ${\langle}angle s, T_D{\rangle}angle$ of smaller length is not in $\bar{D}$. We claim that there must be a single $n^*<\omega$ which is an upper bound to the set $$\{n_D \mid D\subseteq \mathbb{P}\text{ dense open above } q\}$$
Otherwise, there is a sequence of dense open subsets $D_m$ above $q$ for which \[\sup_{m<\omega} n_{D_m}=\omega.\]
The forcing $\mathbb{P}$ is $\sigma$-distributive, thus
\[D^*=\bigcap_{m<\omega}D_m\]
is still dense and open above $q$. Consider $n_{D^*}$ and $T_{D^*}$. Any extension $p'$ of length $n_{D^*}$ from $T_{D^*}$ will satisfy $\pi(p') \in \bar{D^*}$ and in particular it will be in $\bar{D_m}$ for all $m$. But let $m$ be so large that $n_{D_m}>n_{D^*}$. This is a contradiction to the definition of $n_{D_m}$.
Let us fix such $n^*$. Next we consider the iterated ultrapower of length $\omega$ using the ultrafilters in $\vec{\mathcal{U}}$.
Let $k = {\langle}en s$ (the stem of $p$) and let us denote $s = {\langle}angle \delta^*_0, \dots, \delta^*_{k-1}{\rangle}angle$. Consider the iteration corresponding to $\vec{\mathcal{U}}$ above $s$, and denote
$\delta^*_{k+n} =\delta_n$.
By theorem {\rangle}ef{genericsequence}, ${\langle}angle \delta^*_n \mid n < \omega{\rangle}angle$ is a tree Prikry generic sequence for the forcing $j_{\omega}(\mathbb{T})$ over the model $M_\omega$ and by claim {\rangle}ef{claim}, this generic filter will contain the condition $j_{\omega}(p)$. Denote by $H_\omega\subseteq j_{\omega}(B(\mathbb{P}))$ the $M_\omega$-generic filter generated by the Prikry sequence in $M_\omega[{\langle}\delta^*_n\mid n<\omega{\rangle}]$.
Working in $M_{n^*-1}$, let
\[F = \{x \in j_{n^*-1}(\mathbb{P}) \mid \exists T, j_{n^*-1}(\pi)({\langle}angle {\langle}angle\delta^*_0, \dots, \delta^*_{k + n^* - 1}{\rangle}angle, T{\rangle}angle) \geq x\}.\]
$F \in M_{n*-1}$ and it is a subset of $j_{n^*-1}(\mathbb{P})=j_{n^*-1}(\kappa)$. In particular for every $x\in F$, $j_{n^*-1,\omega}(x)=x$.
Since for every $T$, $j_{n^*-1,\omega}({\langle}angle {\langle}angle\delta_0, \dots, \delta_{k + n^* - 1}{\rangle}angle, T{\rangle}angle)$ is a member of the generic filter which is generated by the sequence ${\langle}angle \delta^*_n \mid n < \omega{\rangle}angle$, we conclude that $F\subseteq H_\omega$. Note that $F\in M_{n^*}$, as $M_{n^*-1}$ and $M_{n^*}$ agree on subsets of $j_{n^*-1}(\kappa)$. It follows that $j_{n^*,\omega}(F)=F\in M_\omega$. Thus, there must be a single condition $f\in H_\omega$ forcing $F\subseteq \dot{H_\omega}$. This can be the case only if $f$ is stronger that all elements of $F$. find any $f^*\in\mathbb{P}$ such that $f^*\geq f$. We conclude that for every dense open set $D \subseteq \mathbb{P}$ above $q$, $f\in j_{\omega}(\bar{D})$ and since $D$ is dense open in $\mathbb{P}$, $f^*\in j_{\omega}(D)$.
Let us define:
\[U_q=\{A \subseteq \mathbb{P} \mid f^* \in j_\omega(A)\}\]
$U_q$ is a $\kappa$-complete ultrafilter (since $\crit j_{\omega} = \kappa$) and for all dense open $D \subseteq \mathbb{P}$ above $q$, $D \in U_q$.
\end{proof}
\begin{remark}
In the previous proof we have defined the filter $U_p$ to be $$U_q=\{A\subseteq\mathbb{P}\mid f^*\in j_\omega(A)\}$$
where $f^*\in\mathbb{P}$ was a condition forcing $F\subseteq\Dot{H_\omega}$, $\Dot{H_\omega}$ being a canonical name for the generic filter of $j_\omega(\mathbb{P})$. In $M_{n^*}$, we will have $F$ bounded in the critical point of $j_{n^*,\omega}$ and therefore $j_{n^*,\omega}(F)=F$. By elementarity of $j_{n^*,\omega}$, there is a condition $q^*\in j_{n^*}(\mathbb{P})$ forcing that $F\subseteq\Dot{H_{n^*}}$ where $\Dot{H_{n^*}}$ is the canonical name for the generic filter of $j_{n^*}(\mathbb{P})$. So we may use $q^*$ in order to define $$U_q=\{A\subseteq\mathbb{P}\mid q^*\in j_{n^*}(A)\}$$
This new definition indicates that if there is a projection from $\mathbb{T}_{\vec{\mathcal{U}}}$ onto $\mathbb{P}$ then there will be a Rudin-Keisler projection of the sequence of ultrafilters $\vec{\mathcal{U}}$ on an ultrafilter extending the filter of dense open subsets of $\mathbb{P}$.
\end{remark}
\begin{definition}
Let $\vec{\mathcal{U}}$ be a tree of $\kappa$-complete ultrafilters and let $W$ be a $\kappa$-complete ultrafilter. We say that $W {\langle}eq_{RK} \vec{\mathcal{U}}$ if there is $\vec{\eta} \in \kappa^{<\omega}$ and $n < \omega$ such that $$W{\langle}eq_{RK} (\mathcal{U}_{\vec{\eta}})_n$$.
\end{definition}
\begin{theorem}
Let $\vec{\mathcal{U}}$ be a tree of $\kappa$-complete ultrafilters and let $\mathbb{P}$ be $\sigma$-distributive forcing of cardinality $\kappa$.
If $\mathbb{T}_{\vec{\mathcal{U}}}$ projects onto $B(\mathbb{P})$ then for every $p={\langle}angle\delta_0,\dots,\delta_{k-1},T{\rangle}angle\in\mathbb{T}_{\vec{\mathcal{U}}}$ there is a $\kappa$-complete ultrafilter $U_p$ which extends $D_{\pi(p)}(\mathbb{P})$ that contain $p$ and $U_p{\langle}eq_{RK} \vec{\mathcal{U}}$.
\end{theorem}
\begin{proof}
The proof is just the continuation of the discussion following the proof of theorem {\rangle}ef{equivalece}, recall the definition of $U_p$ $$U_p=\{A\subseteq\mathbb{P}\mid q^*\in j_{n^*}(A)\}$$
There exists a function $g:[\kappa]^{n^*}{\rangle}ightarrow \mathbb{P}$ such that $j_{n^*}(g)(\delta_k,\dots,\delta_{n^*+k})=q^*$. We claim that $U_p=g_*((\mathcal{U}_{{\langle}angle\delta_0,\dots,\delta_{k-1}{\rangle}angle})_{n^*+1})$. Let $A\subseteq \mathbb{P}$, then
$$A\in U_p\Longleftrightarrow j_{n^*}(g)(\delta_k,\dots,\delta_{n^*+k})\in j_{n^*}(A)\Longleftrightarrow$$ $$\Longleftrightarrow{\langle}angle \delta_k,\dots,\delta_{n^*+k}{\rangle}angle\in j_{n^*}(g^{-1}[A]) \Longleftrightarrow g^{-1}[A]\in (\mathcal{U}_{{\langle}angle\delta_0,\dots,\delta_{k-1}{\rangle}angle})_{n^*+1} $$
\end{proof}
\section{Projections of forcings}
The following simple lemma indicates that the difficulty of extending the dense open filter for different forcing notions is related to the existence of projections from other forcing notions.
\begin{lemma}
Let $\pi\colon \mathbb{P}\to\mathbb{Q}$ be a projection of forcing notions and let $\kappa$ be a regular cardinal. If there is a $\kappa$-complete ultrafilter that extends $\mathcal{D}_p(\mathbb{P})$, then there is a $\kappa$-complete ultrafilter that extends $\mathcal{D}_{\pi(p)}(\mathbb{Q})$.
\end{lemma}
\begin{proof}
Let $\mathcal{U}$ be a $\kappa$-complete ultrafilter that extends $\mathcal{D}_p(\mathbb{P})$. Let:
\[\pi^*(\mathcal{U}) = \{A \subseteq \mathbb{Q} \mid \pi^{-1}(A) \in \mathcal{U}\}.\]
It is clear that $\pi^*(\mathcal{U})$ is a $\kappa$ complete ultrafilter. For any dense open set $D\in \mathcal{D}_{\pi(p)}(\mathbb{Q})$, the fact that $\pi$ is a projection ensures that $\pi^{-1}(D)\in D_{p}(\mathbb{P})$. Thus, $D \in \pi^*(\mathcal{U})$.
\end{proof}
For the definition of ${\langle}ambda$-strategically closed forcings see \cite{CummingsHand}.
The proof of the following lemma is a variant of theorem $14.1$ in \cite{CummingsHand}.
\begin{lemma}[Folklore]{\langle}abel{CohenProj}
Let $\mathbb{P}$ be $\kappa$-strategically closed forcing notion of size ${\langle}eq{\langle}ambda$. There is a projection from $\Col(\kappa,{\langle}ambda)$ onto $\mathbb{P}$.
\end{lemma}
The relevant case for our purpose is the case $\kappa = {\langle}ambda$. In this case, $\Col(\kappa,\kappa)\cong\Add(\kappa,1)$. Thus, if $\mathbb{P}$ is a $\kappa$-strategically closed forcing of size $\kappa$ then there is a projection from the Cohen forcing $\Add(\kappa,1)$ onto $B(\mathbb{P})$.
Note that the other direction of lemma {\rangle}ef{CohenProj} is also true, namely that if there is a projection $\pi:\Add(\kappa,1){\rangle}ightarrow B(\mathbb{P})$, then $\mathbb{P}$ must also be $\kappa$-strategically closed.
We conclude that questions about the existence of ultrafilters that extend the dense open filter of $\kappa$-strategically of cardinality $\kappa$ closed forcing notions are equivalent to the same question about the Cohen forcing.
For ${<}\kappa$-strategically closed forcing notions the situation is more involved.
\begin{definition}[Jensen]
Let $\kappa$ be an inaccessible cardinal. A \emph{Jensen Square} on $\kappa$ is a sequence ${\langle}angle C_\alpha \mid \alpha \in D{\rangle}angle$, such that
\begin{enumerate}
\item $D$ is a club consisting of only limit ordinals.
\item $C_\alpha$ is a club at $\alpha$.
\item $\otp C_\alpha < \alpha$.
\item If $\beta \in \acc C_\alpha$, then $\beta\in D$ and $C_\beta = C_\alpha \cap \beta$.
\end{enumerate}
\end{definition}
Note that if there is a Jensen square on $\kappa$ then $\kappa$ is not a Mahlo cardinal.
The following lemma was proven by Velleman \cite[Theorem 1]{Vell}.
\begin{lemma}{\langle}abel{Vell}
Let $\kappa$ be an infinite cardinal. If there is a Jensen square on $\kappa$ then every ${<}\kappa$-strategically closed forcing is $\kappa$-strategically closed.
\end{lemma}
There is a standard forcing for adding Jensen square at a cardinal $\kappa$, $\mathbb{S}_\kappa$.
\begin{definition}{\langle}abel{definition: jensen square}
The conditions of $\mathbb{S}_\kappa$ are pairs of the form ${\langle}angle \mathcal C, d{\rangle}angle$, such that
\begin{enumerate}
\item $d \subseteq \kappa$ is closed and bounded (with last element) consisting only of limit ordinals.
\item $\mathcal{C}$ is a function, $\dom \mathcal{C} = d$.
\item For every $\alpha \in d$, $\mathcal{C}(\alpha)$ is a club at $\alpha$, $\otp \mathcal{C}(\alpha) < \alpha$.
\item $\forall \beta \in \acc \mathcal{C}(\alpha)$, $\beta\in d$ and $\mathcal{C}(\beta) = \mathcal{C}(\alpha)\cap \beta$.
\end{enumerate}
For ${\langle}angle \mathcal{C}, d{\rangle}angle, {\langle}angle\mathcal{C}', d'{\rangle}angle\in\mathbb{S}_\kappa$, ${\langle}angle\mathcal{C}, d{\rangle}angle{\langle}eq {\langle}angle\mathcal{C}', d'{\rangle}angle$ if $d=d'\cap(\max(d)+1)$ and $\mathcal{C} = \mathcal{C}' {\rangle}estriction d$.
\end{definition}
There are many variations of this forcing, some of them can be found in \cite{CumShmSquare}.
\begin{lemma}[Folklore]{\langle}abel{JensenStCl}
Let $\kappa$ be a regular cardinal then $\mathbb{S}_\kappa$ is ${<}\kappa$-strategically closed.
\end{lemma}
\begin{proof}
Let us define a strategy $\sigma$ first.
$\sigma({\langle}angle{\rangle}angle)={\langle}angle\emptyset,\emptyset{\rangle}angle$. Assume that $${\langle}angle {\langle}angle C_i,D_i{\rangle}angle,{\langle}angle E_i,F_i{\rangle}angle\mid i<\alpha{\rangle}angle$$ is defined and played according to $\sigma$ and let us define $$\sigma({\langle}angle {\langle}angle C_i,D_i{\rangle}angle,{\langle}angle E_i,F_i{\rangle}angle\mid i<\alpha{\rangle}angle)={\langle}angle C_\alpha,D_\alpha{\rangle}angle$$
Denote by $d_i=\max(D_i)$. If $\alpha$ is limit, let $d_\alpha=\sup_{i<\alpha}d_i$. Then ${\langle}angle C_\alpha,D_\alpha{\rangle}angle$ is defined if and only if $d_\alpha$ is a singular cardinal, in which case $$D_\alpha=(\cup_{i<\alpha}D_i)\cup\{d'_\alpha\}$$ For every $i<\alpha$, $C_\alpha{\rangle}estriction D_i=C_i$ and $$C_\alpha(d_\alpha)=\bigcup_{i<\alpha}C_i(d_i)$$
If $\alpha=\beta+1$,let $d_\alpha$ be an ordinal of cofinality $\omega$ above
$\max(F_\beta)$ and ${\langle}angle x_n\mid n<\omega{\rangle}angle$ be a cofinal sequence in $d_\alpha$ such that $x_0>d_{\beta}$. Define $$D_\alpha=F_\beta\cup\{d_\alpha\}$$
Also $C_\alpha{\rangle}estriction F_\beta=E_\beta$ and
$$C_\alpha(d_\alpha)=C_{\beta}(d_{\beta-1})\cup\{x_n\mid n<\omega\}$$
Obviously, by the inductive construction $C_\alpha$ is coherent. It is not hard to see that $\otp(C_\alpha(d_\alpha)))=\omega\cdot\alpha$.
So the strategy $\sigma_{\langle}ambda$ starts by jumping above $\omega\cdot{\langle}ambda$, then uses $\sigma$. This guarantees that always $\otp(C_\alpha(d_\alpha))<d_\alpha$ and that $d_\alpha$ for limit $\alpha$ is always singular.
\end{proof}
In general, $|\mathbb{S}_\kappa| = \kappa^{<\kappa}$. Thus, for strongly inaccessible cardinals $\kappa$, $|\mathbb{S}_\kappa| = \kappa$ and it fits to the framework of this paper. For Mahlo cardinal $\kappa$, $\mathbb{S}_\kappa$ is not $\kappa$-strategically closed (otherwise, it would be possible to construct Jensen square sequence in the ground model). Thus, for Mahlo cardinal $\kappa$, $\mathbb{S}_\kappa$ is not isomorphic to a complete subforcing of $\Add(\kappa,1)$.
Let us remark that in models of the form $L[E]$, there is a partial square sequence in the ground model which is defined on all singular cardinals. In those cases the forcing that shoots a club through the singular cardinals clearly adds a Jensen square for $\kappa$.
The following lemma shows that adding a Jensen square to $\kappa$ is maximal between all ${<}\kappa$-strategically closed forcing notions.
\begin{lemma}{\langle}abel{JenSenProj}
$\mathbb{S}_\kappa \cong \mathbb{S}_\kappa \times \Add(\kappa,1)$. In particular, for every ${<}\kappa$-strategically closed forcing $\mathbb{P}$ of cardinality $\kappa$, there is a projection from $\mathbb{S}_\kappa$ onto $B(\mathbb{P})$.
\end{lemma}
\begin{proof}
Let us define a dense embedding $\pi:\mathbb{S}_\kappa{\rangle}ightarrow\mathbb{S}_\kappa\times\Add(\kappa,1)$,
for every $A\subseteq\kappa$ and $\alpha<\otp(A)$ let $A(\alpha)$ be the $\alpha$-th element of $A$ in it's natural enumeration.
Define $E_\omega=\{\alpha+\omega\mid \alpha<\kappa\}$ and for $\alpha\in E_{\omega}$ let $\alpha^-=\max(Lim(\alpha))$ be the maximal limit ordinal below $\alpha$.
Let ${\langle}angle\mathcal{C},d{\rangle}angle\in\mathbb{S}_{\kappa}$, define $\pi({\langle}angle\mathcal{C},d{\rangle}angle)={\langle}angle{\langle}angle\mathcal{C}',d{\rangle}angle,f{\rangle}angle$
such that:
\begin{enumerate}
\item $dom(f)=\gamma_d$, where $\gamma_d=\otp(d\cap E_{\omega})$.
\item For $i<\gamma_d$, define $$f(i)=1\Longleftrightarrow ((d\cap E_{\omega})(i))^-+1\in \mathcal{C}((d\cap E_{\omega})(i))$$
\item $dom(\mathcal{C}')=d$.
\item $\mathcal{C}'$ is defined inductively. For $\alpha\in d$ let $\beta_\alpha=\max(Lim(\mathcal{C}(\alpha))\cap\alpha)$ and assume that $\mathcal{C}'(\beta)$ is defined coherently for every $\beta<\alpha$.
\begin{enumerate}
\item If $\alpha\in d\cap E_{\omega}$, then $\beta_\alpha{\langle}eq\alpha^-$ and define $$\mathcal{C}'(\alpha)=\mathcal{C}'(\beta_\alpha)\cup \big( [\beta,\alpha^-]\cap \mathcal{C}(\alpha)\big)\cup\big \{\gamma-1\mid \gamma\in[\alpha^-+2,\alpha)\cap \mathcal{C}(\alpha)\big\}$$
\item If $\alpha\notin d\cap E_{\omega}$ and $\beta_\alpha=\alpha$ let \[\mathcal{C}'=\bigcup_{\gamma\in \acc(\mathcal{C}(\alpha))\cap\alpha}\mathcal{C}'(\gamma)\]
\item $\alpha\notin d\cap E_{\omega}$ and $\beta_\alpha<\alpha$ let
$$\mathcal{C}'(\alpha)=\mathcal{C}'(\beta_\alpha)\cup\big( [\beta_\alpha,\alpha]\cap\mathcal{C}(\alpha)\big)$$
\end{enumerate}
\end{enumerate}
Let us prove first that ${\langle}angle{\langle}angle\mathcal{C}',d{\rangle}angle,f{\rangle}angle\in\mathbb{S}_{\kappa}\times \Add(\kappa,1)$. Obviously, $f\in \Add(\kappa,1)$, it is routine to check that ${\langle}angle\mathcal{C}',d{\rangle}angle\in \mathbb{S}_{\kappa}$, show by induction that, $$\otp(\mathcal{C}(\alpha))=\otp(\mathcal{C}'(\alpha)), \ \acc(\mathcal{C}(\alpha))=\acc(\mathcal{C}'(\alpha))$$ and that condition $(3),(4)$ of definition {\rangle}ef{definition: jensen square} hold.
The induction step use the fact that by removing at most one ordinal below a limit point of the set does not change the order type and does not change limit points of the set.
To see that $\pi\image\mathbb{S}_\kappa$ is dense in $\mathbb{S}_{\kappa}\times\Add(\kappa,1)$, let $$p={\langle}angle{\langle}angle\mathcal{N},d{\rangle}angle,f{\rangle}angle\in \mathbb{S}_{\kappa}\times\Add(\kappa,1)$$ Extend $p$ if necessary to ${\langle}angle{\langle}angle\mathcal{N}',d'{\rangle}angle,f'{\rangle}angle$
so that $dom(f')=\otp(d'\cap E_{\omega})$. This is possible since $f$ can be defined arbitrarily on missing points of its domain and $\gamma_d$ can be increased by extending ${\langle}angle \mathcal{N},d{\rangle}angle$ at successor steps of $d$ from the set $E_{\omega}$ in a coherent way just as in lemma {\rangle}ef{JensenStCl}.
To see that ${\langle}angle{\langle}angle\mathcal{N}',d'{\rangle}angle,f'{\rangle}angle\in \pi\image\mathbb{S}_\kappa$,
define ${\langle}angle \mathcal{C},d'{\rangle}angle$ recursively. Assume $\alpha\in d'\cap E_{\omega}$ and $\alpha=(d'\cap E_\omega)(i)$. If $f'(i)=0$ define
$$\mathcal{C}(\alpha)=\mathcal{C}(\beta_\alpha)\cup \big( [\beta,\alpha^-]\cap \mathcal{N}(\alpha)\big)\cup\big \{\gamma+1\mid \gamma\in(\alpha^-,\alpha)\cap\mathcal{N}(\alpha)\big\}$$
If $f'(i)=1$ define
$$\mathcal{C}(\alpha)=\mathcal{C}(\beta_\alpha)\cup \big( [\beta,\alpha^-]\cap \mathcal{N}(\alpha)\big)\cup\big \{\gamma+1\mid \gamma\in(\alpha^-,\alpha)\cap\mathcal{N}(\alpha)\big\}\cup\{\alpha-\omega+1\}$$
If $\alpha\notin d'\cap E_\omega$ and $\alpha=\beta_\alpha$ define \[\mathcal{C}(\alpha)=\bigcup_{\gamma\in \acc(\mathcal{N}(\alpha))\cap\alpha}\mathcal{C}(\gamma)\]
Finally if
$\alpha\notin d'\cap E_{\omega}$ and $\beta_\alpha<\alpha$ let
$$\mathcal{C}(\alpha)=\mathcal{C}(\beta_\alpha)\cup\big( [\beta_\alpha,\alpha]\cap\mathcal{N}(\alpha)\big)$$
It is routine to check that $\pi$ is an embedding.
For the second part, assume that $\mathbb{P}$ is a ${<}\kappa$-strategically closed forcing, let $G$ be generic for $\mathbb{S}_\kappa$, then $V[G]=V[G'][H]$ where $G'$ is another generic for $\mathbb{S}_\kappa$ and $H$ is $V[G']$-generic for $Add(\kappa,1)$. In $V[G']$, since $\mathbb{S}_\kappa$ is ${<}\kappa$ strategically closed, there are no new plays of $\mathbb{P}$ of length less than $\kappa$, indicating that $\mathbb{P}$ stays ${<}\kappa$-strategically closed in $V[G']$. Since in $V[G']$ there is a square sequence, use {\rangle}ef{Vell} to conclude that $\mathbb{P}$ is $\kappa$-strategically closed in $V[G']$. Thus by {\rangle}ef{CohenProj}, there is $\pi:\Add(\kappa,1){\rangle}ightarrow \mathbb{P}\in V[G']$ a projection. Let us turn this projection to a projection in $V$ of $\mathbb{S}_\kappa\times\Add(\kappa,1)$. Let $\tilde{\pi}$ be a $\mathbb{S}_\kappa$-name such that $\Vdash_{\mathbb{S}_\kappa}\tilde{\pi}:\Add(\kappa,1){\rangle}ightarrow B(\mathbb{P})$ is a projection. Consider the set $$D=\{{\langle} p,q{\rangle}\in\mathbb{S}_\kappa\times\Add(\kappa,1)\mid \exists a\in \mathbb{P}. p\Vdash \tilde{\pi}(q)=a\}$$
It is dense in $\mathbb{S}_\kappa\times\Add(\kappa,1)$. For every ${\langle} p,q{\rangle}\in D$, define $\pi_*({\langle} p,q{\rangle})=a$ for the unique $a\in \mathbb{P}$, such that $p\Vdash \tilde{\pi}(q)=a$. It is a straightforward verification to see that $\pi_*:D{\rangle}ightarrow \mathbb{P}$ is a projection.
\end{proof}
The following lemma shows that $\mathbb{S}_\kappa$ is not maximal among the $\kappa$-distributive forcing notions. For a fat stationary set $S\subseteq \kappa$, let $Club(S)$ be the forcing that shoots a club through $S$ using closed and bounded conditions. By \cite{AbrahamShelah1994}, if $\kappa^{<\kappa} = \kappa$, then $Club(S)$ is $\kappa$-distributive if and only if $S$ is fat stationary set.
\begin{lemma}
Let $S \subseteq T \subseteq \kappa$ be fat stationary sets.
If the set of all $\alpha\in T \setminus S$ such that $T\cap\alpha$ contains a club at $\alpha$ is stationary, then $T\setminus S$ stays stationary in $V^{Club(T)}$ and in particular there is no projection from $Club(T)$ to $Club(S)$.
\end{lemma}
\begin{remark}
After adding a single Cohen set to $\kappa$, there is a partition of $\kappa$ into $\kappa$ many disjoint fat stationary sets. Thus, the structure of the $\kappa$-distributive forcing notions of size $\kappa$ might be complicated in general, even when $\kappa$ is a large cardinal.
\end{remark}
\begin{proof}
Let $C\subseteq T$ be a $V$-generic club for $Club(T)$. Assume that $S$ is not stationary in $V[C]$ and let ${\langle}usim{B}$ be a name such that some $p\in Club(T)$ forces that ${\langle}usim{B}$ is a club disjoint from $T\setminus S$. Let ${\langle}angle M_i\mid i<\kappa{\rangle}angle$ be an increasing and continuous chain of elementary substructures of $H(\theta)$ for some large enough $\theta$ such that:
\begin{enumerate}
\item $p,{\langle}usim{B},S,T,Club(T)\in M_0$.
\item $|M_i|<\kappa$.
\item $x_i:=M_i\cap\kappa\in \kappa$.
\item and ${}^{x_i}M_i\subseteq M_{i+1}$.
\end{enumerate}
Consider the club $\{\alpha\mid x_{\alpha}=\alpha\}$. There is $\alpha<\kappa$ such that $x_\alpha=\alpha\in T\setminus S$ and there is a closed unbounded set $D\subseteq T\cap \alpha$. Let us construct an increasing sequence of conditions ${\langle}angle p_i\mid i<\theta{\rangle}angle$ such that:
\begin{enumerate}
\item $p_0=p$ and $p_i\in M_{\alpha}$.
\item $[p_{i+1}\setminus\max(p_i)]\cap D\neq\emptyset$.
\item there is $\max(p_i)<y_i\in M_{\alpha}$ such that $p_{i+1}\Vdash y_i\in{\langle}usim{B}$.
\end{enumerate} that ${\langle}angle p_i\mid i<j{\rangle}angle$ is defined and let $\eta=\sup(\max(p_i)\mid i<j)$. If $j$ is limit and $\eta=\alpha$, define $\theta=j$ and stop. Otherwise, there is $r<\alpha$ such that ${\langle} p_i\mid i<j{\rangle}\subseteq M_{r}$ thus in $M_{j+1}$. By closure of $D$, $\eta\in D$, hence it is safe to define $$p_j=\cup_{i<j}p_i\cup\{\eta\}\subseteq T$$
which is definable in $M_{\alpha}$. For the successor step, assume $p_i\in M_{\alpha}$ is define. Work inside $M_{\alpha}$ and let $p'_{i+1}$ be a condition deciding a value $y_{i}\in{\langle}usim{B}$ above $\max(p_i)$. Since $D$ is unbounded, there is $z\in D\setminus \max(p'_{i+1})$ then $p_{i+1}=p'_{i+1}\cup\{z\}\in M_{\alpha}$ is as wanted.
Finally, $\cup_{i<\theta}p_i\cup\{\alpha\}\in Club(T)$ must force that $\alpha\in {\langle}usim{B}\cap(T\setminus S)$ which is a contradiction.
\end{proof}
\section{Implications}
In this section we will show that certain large cardinals weaker than $\kappa$-compacts already imply an existence of a $\kappa$-complete ultrafilter extending the filters $\mathcal{D}_p(\mathbb{P})$.
Let us deal first with ${<}\kappa$-strategically closed forcing notion of size $\kappa$.
Recall that a cardinal $\kappa$ is called \emph{superstrong} if and only if there is an elementary embedding
$j\colon V \to M$ such that $\crit(j)=\kappa$ and $V_{j(\kappa)}\subseteq M$.
While $j(\kappa)$ is always a strong limit cardinal, and inaccessible in $M$, it need not be regular in $V$. Actually, $\kappa^{+} {\langle}eq \cof(j(\kappa)) {\langle}eq 2^\kappa$, for the first such cardinal, see \cite{Perlmutter2015}.
However, if $\cof(j(\kappa))>{\langle}ambda$, then ${}^{\langle}ambda j(\kappa)\subseteq M$.
\begin{theorem}{\langle}abel{LesskappaStClUpperBound}
Suppose that there is an elementary embedding $j\colon V \to M$ such that $\crit j = \kappa$ and ${}^{2^\kappa}j(\kappa)\subseteq M$. Let $\mathbb{P}$ be a $<\kappa$-strategically closed forcing notion of size $\kappa$.
Then for every $p\in\mathbb{P}$ there is a $\kappa$-complete ultrafilter that extends $\mathcal{D}_p(\mathbb{P})$.
\end{theorem}
\begin{proof}
Assume without loss of generality that $\mathbb{P}=\kappa$. Fix $p\in \mathbb{P}$. Denote $2^\kappa$ by ${\langle}ambda$.
\\Clearly, ${\langle}ambda<j(\kappa)$, since $\mathcal{P}(\kappa)\subseteq M$ and $j(\kappa)$ is a measurable in $M$.
\\Let ${\langle}angle D_\alpha \mid \alpha < {\langle}ambda{\rangle}angle$ be an enumeration of all subsets of $\mathbb{P}$ in $V$ which are dense above $p$ and open.
In $M$, let $\Sigma$ be a winning strategy for the
game on $j(\mathbb{P})$ of length ${\langle}ambda + 1$.
Such $\Sigma$ exists since $j(\mathbb{P})$ is ${<}j(\kappa)$-strategically closed.
Let us pick by induction a sequence of conditions $p_\alpha \in j(\mathbb{P})=j(\kappa)$, $\alpha < {\langle}ambda$, such that $\forall \alpha < \beta$, $p_\alpha {\langle}eq p_\beta$ and $p_{\alpha + 1} \in j(D_\alpha)$.
First, let $p_0=p$. Each condition $p_{\alpha}$ is played by Player I according to $\Sigma$ and $q_\alpha$ is played by Player II, to be a condition stronger than $p_\alpha$ in $j(D_\alpha)$.
While the sequence ${\langle}angle j(D_\alpha) \mid \alpha < {\langle}ambda{\rangle}angle$ might not be in $M$,
the sequence ${\langle}angle p_\alpha, q_\alpha \mid \alpha < {\langle}ambda{\rangle}angle$ is in $M$, since ${}^{{\langle}ambda}j(\kappa)\subseteq M$, and it is a play which is played according to the strategy $\Sigma$. Therefore, it has an upper bound $\tilde{p}$ which is stronger than all the conditions $p_\alpha, q_\alpha$, $\alpha < {\langle}ambda$. By construction, $\tilde{p}\in \bigcap_{\alpha < \kappa^+} j(D_\alpha)$.
Finally,
$$U=\{X\subseteq \kappa \mid \tilde{p}\in j(X)\}$$ will be as desired.
\end{proof}
The assumption of the theorem cannot be optimal since
$V_{\kappa + 2} \subseteq M$, and thus it is true in $M$ as well that for every ${<}\kappa$-strategically closed forcing notion $\mathbb{P}$, there is a $\kappa$-complete ultrafilter that extends its dense open filter. Thus, by reflection, the conclusion holds for many cardinals below $\kappa$ as well.
Next, we turn to the class of $\kappa$-distributive forcings.
The upper bound in this case is a $1$-extendable cardinal:
\begin{definition}
A cardinal $\kappa$ is called $1$-extendible if there is a non-trivial elementary embedding $j:V_{\kappa+1}{\rangle}ightarrow V_{{\langle}ambda+1}$ such that $crit(j)=\kappa$.
\end{definition}
\begin{prop}
If $\kappa$ is $1$-extendible then for every $\kappa$-distributive forcing $\mathbb{P}$ of size $\kappa$ and every $p\in\mathbb{P}$, the filter $D_p(\mathbb{P})$ can be extended to a $\kappa$-complete ultrafilter.
\end{prop}
\begin{proof}
Code ${\langle}\mathbb{P},{\langle}eq_{\mathbb{P}}{\rangle}$ and an order of $\kappa$. Thus we can assume without loss of generality that ${\langle} \mathbb{P},{\langle}eq_{\mathbb{P}}{\rangle}\in V_{\kappa+1}$. Recall that $D_p(\mathbb{P}):=\{D\subseteq \mathbb{P}\mid D\text{ is dense open above }p\}$.
Then $D_p(\mathbb{P})\subseteq V_{\kappa+1}$ and it is definable from ${\langle}\mathbb{P},{\langle}eq_{\mathbb{P}}{\rangle}$. Since $j(\kappa)={\langle}ambda$, and $V_{\kappa+1}\models \kappa$ is inaccessible cardinal, by elementarity $V_{\langle}ambda\models {\langle}ambda$ is an inaccessible cardinals,
and therefore ${\langle}ambda$ is inaccessible cardinal in $V$. In particular, $V_{\langle}ambda$ is closed under $<{\langle}ambda$ sequences and the set $j''D_p(\mathbb{P})=\{j(D)\mid D\in D_p(\mathbb{P})\}\in V_{{\langle}ambda+1}$. By elementarity
$j(\mathbb{P})$ is ${\langle}ambda$-distributive, and since $|j''D_p(\mathbb{P})|=|D_p(\mathbb{P})|=2^k<{\langle}ambda$, we conclude that $\cap_{D\in D_p(\mathbb{P})}j(D)$ is dense open in $j(\mathbb{P})$.
In particular it is non empty and we can fix any $p^*\in \cap_{D\in D_p(\mathbb{P})}j(D)$. Now in $V$ we can define
$$F=\{X\subseteq\mathbb{P}\mid p^*\in j(X)\}$$
As in previous arguments, this $F$ is a $\kappa$-complete ultrafilter extending $D_p(\mathbb{P})$
\end{proof}
We deal here with the following weakening of $\kappa$-compactness:
\begin{center}
\emph{
For every $\kappa$-distributive forcing notion of cardinality $\kappa$, the filter of its dense open subsets can be extended to a $\kappa$-complete ultrafilter.}
\end{center}
In this context, the major difference between $\kappa$ being $\kappa$-compact and $\kappa$ being $1$-extendible is that we do not need to extend \textit{every} $\kappa$-complete filter on $\kappa$. For our purposes we are only interested in extending filters which are definable using a parameter which is a subset of $\kappa$. This distinction leads to the realm of subcompact cardinals. Subcompact cardinals were defined by R.\ Jensen:
\begin{definition} A cardinal
$\kappa$ is called \emph{subcompact} if
for every $A\subseteq H(\kappa^+)$,
there are ${\rangle}ho <\kappa$, $B\subseteq H({\rangle}ho^+)$
and an elementary embedding
$$j\colon {\langle} H({\rangle}ho^+),\in, B {\rangle} \to {\langle} H(\kappa^+),\in, A {\rangle}$$
with critical point ${\rangle}ho$, such that $j({\rangle}ho) = \kappa$.
\end{definition}
The following strengthening was introduced by I.\ Neeman and J.\ Steel \cite{NeemanSteelSubcompact}:
\begin{definition}{\langle}abel{def-pi1-1}
$\kappa$ is called \emph{$\Pi_1^1$-subcompact} if
for every $A\subseteq H(\kappa^+)$ and for every $\Pi_1^1$-statement $\Phi$,
if ${\langle} H(\kappa^+),\in, A {\rangle}\models \Phi$ then there are ${\rangle}ho <\kappa$ and $B\subseteq H({\rangle}ho^+)$
such that
${\langle} H({\rangle}ho^+),\in, B {\rangle}\models \Phi$ and there is an elementary embedding
$$j:{\langle} H({\rangle}ho^+),\in, B {\rangle} \to {\langle} H(\kappa^+),\in, A {\rangle}$$
with critical point ${\rangle}ho$, such that $j({\rangle}ho) = \kappa$.
\end{definition}
The third author showed in \cite{YairSquare} the following:
\begin{theorem}{\langle}abel{thm-pi1-1}
If $\kappa$ is $\Pi_1^1$-subcompact, then it is a $\kappa$-compact cardinal i.e.\ every $\kappa$-complete filter over $\kappa$ extends to a $\kappa$-complete ultrafilter.
On the other hand, if $\kappa$ is $\kappa$-compact then $\square(\kappa)$ and $\square(\kappa^+)$ fails.
\end{theorem}
The failure of square at two consecutive cardinals seem to have very high consistency strength, which made the conjecture that $\kappa$-compactness is equiconsistent with $\Pi_1^1$-subcompactness plausible. However, a recent work of
Larson and Sargsyan, \cite{LarsonSargsyan2021}, casts doubt on
this heuristic by showing that the consistency strength of the failure of two consecutive squares at $\omega_3$ and $\omega_4$
is below a Woodin limit of Woodin cardinals.
Let us start with the following observation:
\begin{prop}
Let $\kappa$ be a subcompact cardinal such the filter $F_{\mathbb Q}$ of dense open subsets of $\mathbb Q$ extends to a $\kappa$-complete ultrafilter over $\kappa$, for every
$\kappa$-distributive poset $\mathbb Q$ of size $\kappa$.
Then $\kappa$ is a limit of cardinals with the same extension property.
\end{prop}
\begin{proof}
Clearly it is enough to deal with posets which are partial orders on the set $\kappa$.
For every such $\mathbb Q$, fix a $\kappa$-complete ultrafilter $ F^*_{\mathbb Q}$ over $\kappa$ which extends $F_{\mathbb Q}$.
The ultrafilter $F^*_{\mathbb{Q}}$ is a subset of $P(\kappa) \subseteq H(\kappa^{+})$. Thus, one can code the set:
\[\{{\langle} \mathbb Q, F^*_{\mathbb Q} {\rangle} \mid \mathbb Q\subseteq \kappa, \text{ is } \kappa\text{-distributive}\}\]
by a subset $A$ of $H(\kappa^+)$ (for example, we can set $A = \bigcup_{\mathbb{Q}} \{\mathbb{Q}\} \times F^*_{\mathbb{Q}}$).
Now by the definition of subcompactness, with parameter $A$,
there are ${\rangle}ho <\kappa$ and $B\subseteq {\rangle}ho^+$
and an elementary embedding
$$j\colon {\langle} H({\rangle}ho^+),\in, B {\rangle} \to {\langle} H(\kappa^+),\in, A {\rangle}$$
with critical point ${\rangle}ho$, such that $j({\rangle}ho) = \kappa$.
Since the set of all ${\rangle}ho$-distributive posets is
definable in $H({\rangle}ho^+)$, and $j$ is elementary, the set $B$ is a code of the set:
\[\{ {\langle} \mathbb Q, F^*_{\mathbb Q} {\rangle} \mid \mathbb{Q} \subseteq {\rangle}ho \text{ is }{\rangle}ho\text{-distributive}\},\]
and in particular for every ${\rangle}ho$-distributive poset of size ${\rangle}ho$, $\mathbb Q$, there is a ${\rangle}ho$-complete filter extending $F_{\mathbb{Q}}$.
\end{proof}
Let us consider now the lightface version of Definition {\rangle}ef{def-pi1-1}:
\begin{definition} A cardinal
$\kappa$ is called \emph{lightface $\Pi_1^1$-subcompact} if
\\for every $\Pi_1^1$-statement $\Phi$,
if ${\langle} H(\kappa^+),\in{\rangle}\models \Phi$ then there is ${\rangle}ho <\kappa$
such that
${\langle} H({\rangle}ho^+),\in{\rangle}\models \Phi$ and there is an elementary embedding
$$j:{\langle} H({\rangle}ho^+),\in {\rangle} \to {\langle} H(\kappa^+),\in {\rangle}$$
with critical point ${\rangle}ho$, such that $j({\rangle}ho) = \kappa$.
\end{definition}
The definition does not allow us to add parameters from $H(\kappa^+)$ to the formula $\Phi$, and thus this large cardinal property is witnessed by a countable set of elementary embeddings.
The next proposition is similar to {\rangle}ef{thm-pi1-1}:
\begin{prop}
Let $\kappa$ be a lightface $\Pi^1_1$-subcompact. Then every $\kappa$-distributive forcing $\mathbb{P}$ of size $\kappa$, and every $p\in\mathbb{P}$, the filter $D_p(\mathbb{P})$ can be extended to a $\kappa$-complete ultrafilter.
\end{prop}
\begin{proof}
Assume otherwise, and let $\Phi$ be the statement that there is $\mathbb{P}={\langle}angle \kappa,{\langle}eq_{\mathbb{P}}{\rangle}angle\in H(\kappa^+)$ and no ulrafilter extending $D_p(\mathbb{P})$. $\Phi$ is of the form $$\underset{First \ order}{\underbrace{\exists\mathbb{P}}}\underset{Second \ order}{\underbrace{\forall U}} \underset{First \ order}{\underbrace{\mu(\mathbb{P},U)}}$$
Using ${{\rangle}m AC}\xspace$, such a formula can be expressed as a $\Pi^1_1$ formula \cite[P. 153, Lemma 7.2]{Drake}.
Note that $\Phi$ is defined with no parameters, hence by a lightface $\Pi^1_1$-subcompactness of $\kappa$, there is ${\rangle}ho<\kappa$ an elementary embedding
$$j:H({\rangle}ho^+){\rangle}ightarrow H(\kappa^+)$$
with critical point ${\rangle}ho$ such that $j({\rangle}ho)=\kappa$,
such that $H({\rangle}ho^+)\models\Phi$.
Therefore there is $\mathbb{P}_{\rangle}ho$ which is a counterexample of a forcing of size ${\rangle}ho$ which is ${\rangle}ho$-distributive such that there is no $\kappa$-complete filter extending the filter $D_p(\mathbb{P}_{\rangle}ho)$ for some $p\in\mathbb{P}_{{\rangle}ho}$. Let us enumerate all dense open subsets of $\mathbb{P}_{{\rangle}ho}$ above $p$ by ${\langle}angle D_i\mid i<2^{\rangle}ho{\rangle}angle$. The sequence ${\langle}angle j(D_i)\mid i<2^{\rangle}ho{\rangle}angle$ is in $H(\kappa^+)$, since $2^{\rangle}ho < \kappa$. By elementarity, $j(\mathbb{P}_{{\rangle}ho})$ is $j({\rangle}ho)$- distributive and therefore $\bigcap_{i<{\rangle}ho^+}j(D_i)\neq\emptyset$, so let $x$ be an element in the intersection. Then
$$\{X\subseteq\mathbb{P}_{{\rangle}ho}\mid x\in j(X)\}$$
is a ${\rangle}ho$-complete ultrafilter extending $D_p(\mathbb{P}_{{\rangle}ho})$ --- a contradiction to the choice of $\mathbb{P}_{\rangle}ho$.
\end{proof}
To see that the notion of a lightface $\Pi^1_1$-subcompact is strictly weaker than $\Pi^1_1$-subcompact we have the following proposition:
\begin{prop}
Let $\kappa$ be $\Pi^1_1$-subcompact.
Then $\kappa$ is a limit
of lightface $\Pi^1_1$-subcompact cardinals.
\end{prop}
\begin{proof}
Suppose that $\kappa$ is a $\Pi^1_1$-subcompact cardinal.
Let $\Phi$ be a $\Pi^1_1$ statement (with no parameters). If $\Phi$ holds in $H(\kappa^{+})$ let
$$j_{\Phi}\colon{\langle}angle H({\rangle}ho_\Phi^+),\in{\rangle}angle{\rangle}ightarrow {\langle}angle H(\kappa^+),\in{\rangle}angle$$ witness the reflection. Otherwise, let $B_\Phi$ be a subset of $H(\kappa^{+})$ witness the negation of $\Phi$. Let $T\subseteq \omega$ be the set of all G{\"o}del number of true $\Pi^1_1$-formulas in $H(\kappa^{+})$, and let $k_\Phi$ be the G{\"o}del number of $\Phi$.
For each $\Phi$ such that $k_\Phi \in T$, $j_\Phi\subseteq H({\rangle}ho_\Phi^+)\times H(\kappa^+)\subseteq H(\kappa^+)$.
There are only countably many such formulas $\Phi$, and thus we can code all those elementary embeddings as a single subset $A_T\subseteq H(\kappa^+)$\footnote{indeed, $A_T$ is an element of $H(\kappa^{+})$.}.
Similarly, we can gather all the sets $B_\Phi$ for $\Phi$ such that $k_\Phi \notin T$, into a single subset of $H(\kappa^{+})$, $B$. Since the truth values of first order formulas is $\Delta_1^1$, we can take a $\Pi^1_1$-formula $\Lambda$ with parameter $k$, using the predicate $B$ such that $\Lambda(k, B)$ if and only if $k$ is the G{\"o}del number of a $\Pi^1_1$-formula $\Phi$ and $B_k$ is a counterexample doe $\Phi$.
Let $A$ be a set coding $T$, $A_T$ and $B$.
There is a universal $\Pi^1_1$-formula $\Psi(y)$ where $y$ is a first order free variable such that for every regular cardinal
$\beta$, every $\Pi^1_1$ statement $\phi$, \[H(\beta)\models \phi\Longleftrightarrow H(\beta)\models \Psi(k)\]
for some natural number $k$ which is the G{\"o}del numbering of formulas \cite[p. 272, Lemma 1.9]{Drake}.
In the language of the model
${\langle} H(\kappa^+),\in, A{\rangle}$ we can formulate the statement $\alpha(A)$ "For every $\Pi^1_1$-statement $\Phi$, $k_\Phi\in T$ implies $\Psi(k_\Phi)$ and $k_\alpha \notin T$ implies $\Lambda(B, k)$".
Now, ${\langle} H(\kappa^+),\in, A{\rangle}\models \alpha(A)$,
apply $\Pi^1_1$-subcompactness to $A$,
there are ${\rangle}ho < \kappa$, $B \subseteq H({\rangle}ho^+)$ and
an elementary embedding:
$$j\colon{\langle}angle H({\rangle}ho^+),\in,B{\rangle}angle{\rangle}ightarrow {\langle}angle H(\kappa^+),\in,A{\rangle}angle$$
such that $\crit(j)={\rangle}ho$, $j({\rangle}ho)=\kappa$ and ${\langle}angle H({\rangle}ho^+),\in,B{\rangle}\models \alpha(B)$.
Let us show that ${\rangle}ho$ is lightface $\Pi^1_1$-subcompact and we will be done. Let $\zeta$ be a $\Pi^1_1$-statement, such that ${\langle}angle H({\rangle}ho^+),\in{\rangle}angle\models \zeta$, then $k_\zeta$ is coded in $B_1$ and by elementarity of $j$ also in $A_1$, hence ${\langle}angle H(\kappa^+),\in{\rangle}angle\models\zeta$.
So there is an embedding $j_{\zeta}$ coded by $A$.
In particular for ${\rangle}ho_\zeta<j({\rangle}ho)$, $${\langle} H(\kappa^+),\in,A{\rangle}\models \{x\mid {\langle} k_\zeta,x,j_\zeta(x){\rangle}\in A\}=H({\rangle}ho_\zeta^+)$$ by elementarity of $j$, there is ${\rangle}ho'_\zeta<{\rangle}ho$ such that $${\langle}angle H({\rangle}ho^+),\in,B{\rangle}angle\models\{x\mid {\langle} k_\zeta,x,j_\zeta(x){\rangle}\in B\}=H({\rangle}ho_\zeta^{'+}).$$
It must be that ${\rangle}ho'_\zeta=j({\rangle}ho_\zeta')={\rangle}ho_\zeta$, since the critical point is ${\rangle}ho$. For every $x\in H({\rangle}ho_\zeta^+)$ there is a unique $y$ such that ${\langle}angle k_\zeta,x,y{\rangle}\in B$, define $i_\zeta(x)=y$. So
$$i_\zeta:{\langle} H({\rangle}ho_\zeta^+),\in{\rangle}{\rangle}ightarrow{\langle} H({\rangle}ho^+),\in {\rangle}$$
We claim that $i_\zeta$ is elementary, and that $i_\zeta({\rangle}ho_\zeta)={\rangle}ho$. This will follow after we show that $j\circ i_\zeta=j_\zeta$. Indeed, ${\langle}angle k_\zeta, x,i_{\zeta}(x){\rangle}angle\in B$ and be elementarity ${\langle} k_\zeta, j(x),j(i_{\zeta}(x)){\rangle}\in A$ but $j(x)=x$ since ${\rangle}ho_\zeta^+<{\rangle}ho$ and therefore ${\langle} k_\zeta ,x,j(i_{\zeta}(x)){\rangle}={\langle} k_{\zeta}, x, j_{\zeta}(x){\rangle}$ in particular $j(i_\zeta(x))=j_\zeta(x)$.
\end{proof}
\section{Lower bound}
In this section we deal with the forcing notion for shooting a club through the stationary set of singular ordinals below $\kappa$, i.e.\
$$Q=\{a \subseteq \kappa \mid |a|<\kappa, a \text{ is closed and each member of } a \text{ is singular}\} $$
ordered by end-extension.
This forcing is $<\kappa$-strategically closed. In our framework, $\kappa$ is strongly inaccessible and thus this forcing is of cardinality $\kappa$.
Our aim will be to show the following:
\begin{theorem}{\langle}abel{thm:lowerbound}
Let us assume that there is a $\kappa$-complete ultrafilter which extending $\mathcal{D}_{\emptyset}(Q)$.
Then either there is an inner model for $\exists {\langle}ambda,\, o({\langle}ambda) = {\langle}ambda^{++}$, or $o^{\mathcal{K}}(\kappa) > \kappa^+$.
\end{theorem}
We split the proof into three parts. First, we will derive some unconditional claims that follow from the existence of such an ultrafilter. Then, we will focus in the case that there is no inner model with a measurable ${\langle}ambda$ of Mitchell order ${\langle}ambda^{++}$, and discuss the structure of the indiscernibles that follows from the hypothesis of the theorem. Finally, we will combine those two paths and obtain a robust way to extract some of the indiscernibles, from which we are going to get strength.
\subsection{Combinatorial consequences}
In order to prove Theorem {\rangle}ef{thm:lowerbound}, we will start with a sequence of lemmas, establishing the existence of a certain elementary embedding with some useful properties.
Let $F$ be a $\kappa$-complete ultrafilter which extends $\mathcal{D}_\emptyset(Q)$.
Consider the corresponding elementary embedding $j_{F}\colon V{\rangle}ightarrow \Ult(V,F)\simeq M_{F}$. Let $a=[id]_{F}$.
Then
\[a\in\bigcap \{j_{F}(D)\mid D\subseteq Q\text{ is dense open }\}\]
Let $a^*$ be a closed set of ordinals with minimal value of $\max(a^*)$, such that there is an ultrafilter $U=\{X\subseteq Q\mid a^*\in j_U(X)\}$ extending the filter of dense open subsets of $Q$. Equivalently, $a^*\in j_U(D)$, for every $D\subseteq Q$ dense open.
Fix such $a^*$ and let
$U$ be a witnessing ultrafilter. So $[id]_U=a^*$, by \cite[Lemma 1.6]{Hamkins1997}, \cite[Proposition 2.5]{TomTreePrikry}.
\begin{lemma}{\langle}abel{minimality}
For every $\xi < \max (a^*)$, there is a dense open $D$ such that $a^*\cap(\xi+1)\notin j_U(D)$.
\end{lemma}
\begin{proof}
Otherwise, let $\xi < \max(a^*)$ be the least ordinal such that $a' = a^* \cap (\xi + 1)$ belongs to $j_U(D)$ for all $D \subseteq Q$ dense open. Let $$U'=\{X\subseteq Q\mid a'\in j_{U}(X)\}.$$
The ultrafilter $U'$ is below $U$ in the Rudin-Kiesler order. One way to illustrate that is to pick a function $g\colon Q \to \kappa$ such that $j_U(g)(a^*) = \xi + 1$ and define the function $f(p)= p \cap g(p)$. Then $j_U(f) = a'$.
Let $k\colon M_{U'}{\rangle}ightarrow M_{U}$ be the elementary embedding defined by $k([f]_{U'})=j_U(f)(a')$. By standard arguments, $k\circ j_{U'}=j_U$.
Consider in $M_{U'}$ the element $b=[id]_{U'}$ (note that $U_b=U'$). By the properties of $k$, $k(b)= a' = a^*\cap(\xi+1)$, hence $\max(b){\langle}eq k(\max(b)){\langle}eq \xi<\max(a^*)$. To see that $b$ contradicts the minimality of $a^*$, note that for every dense open $D$, $k(b) = a'\in k(j_{U'}(D))$ and by elementarity of $k$, $b\in j_{U'}(D)$.
\end{proof}
\begin{lemma}{\langle}abel{genericCon}
Let $\eta<\max(a^*)$ and $q\subseteq\eta$, $q\in j(Q)$. For every dense open set $D \subseteq Q$, the condition $(a^*\setminus\eta)\cup q$ is in $j(D)$.
\end{lemma}
\begin{proof}
Otherwise, let $D_1$, $\eta<\max(a^*)$ and $q'$ be such that $$a^*\setminus\eta\cup q'\notin j(D_1)$$ By minimality of $a^*$, there is $D_2\subseteq Q$ dense and open such that $a^*\cap\eta\notin j(D_2)$.
Let $D^*$ be the set of all conditions $p\in Q$ such that there is $\eta < \max(p)$, $p \cap \eta \in D_1$ and moreover for every condition $q\in Q$ with $\max(q) {\langle}eq \eta$, $q \cup (p \setminus\eta) \in D_2$.
We claim that $D^*$ is dense open.
Let us show that $D^*$ is open. Let $p_1\in D^*$ and let $p_1{\langle}eq p_2$. Take $\eta<\max(p_1){\langle}eq\max(p_2)$ witnessing $p_1\in D^*$ then $$p_1\cap\eta=p_2\cap\eta\in D_1$$ and if $\max(q){\langle}eq\eta$ then $$q\cup(p_1\setminus\eta){\langle}eq q\cup (p_2\setminus\eta)\in D_2$$ since $D_2$ is open. Thus $p_2\in D^*$.
Let us show that $D^*$ is dense. Let $p\in Q$ be any condition, find $p{\langle}eq p_1\in D_1$. denote $\max(p_1)=\eta$ and note that $2^\eta < \kappa$. Let us enumerate all $q\in Q$ with $\max(q){\langle}eq\eta$, ${\langle}angle q_i\mid i<2^\eta{\rangle}angle$ and let $D^i_2$ be the collection of all conditions $r\in\mathbb{P}$ such that $r \setminus \eta \cup q_i \in D_2$. For every $i$, $D^i_2$ is dense open. $\mathbb{P}$ is $\kappa$-distributive and thus $\bigcap_{i < 2^\eta} D^i_2$ is dense. Let us pick a condition $p_2 \geq p_1$ in this intersection. Clearly, $p_2\in D^*$.
Let us claim that $a^*\notin j(D^*)$, and conclude the proof. For any $\xi<\max(a^*)$, if $\xi{\langle}eq \eta$ then $a^*\cap\eta\notin j(D_1)$ and if $\xi>\eta$ then let $p=a^*\cap (\eta,\xi)$ then $q'\cup p\notin j(D_2)$ since $q'\cup a^*\geq q'\cup p$. thus $a^*\notin j(D^*)$ contradiction the choice of $a^*$.
\end{proof}
We conclude that for every $\eta$, $a^* \setminus \eta \in j(D)$ for all $D\in V$, dense open.
In particular, we may assume that $\min(a^*)>\kappa$. Although, $a^*\setminus\kappa$ does not necessarily generates $U$, we take $b=[id]_{U_{a^*\setminus\kappa}}$, where $$U_{a^*\setminus\kappa}=\{X\subseteq Q\mid a^*\setminus\kappa\in j_U(X)\}$$ and $b$ will be as wanted, since $\max(b)=\max(a^*)$ but also $\min(b)\geq\kappa$. To see this, assume otherwise that $b\cap\kappa\neq\emptyset$. Let $k:M_{U_b}{\rangle}ightarrow M_U$, then $\crit(k)\geq\kappa$ and $k(b)=a^*\setminus\kappa$ so $b\cap\kappa=k(b\cap\kappa)\subseteq a^*\setminus\kappa$, contradiction.
Continuing, we would like to derive some more information about the size of $\max (a^*)$.
\begin{lemma}{\langle}abel{lem:bounding}
For any $f\colon\kappa\to \kappa$ and any $\tau<\max(a^*)$, $j_U(f)(\tau)< \max(a^*)$.
\end{lemma}
\begin{proof}
Assume otherwise, then there is $f,\tau$, witnessing the negation.
By lemma {\rangle}ef{minimality}, there is a dense open set $D$ such that $a^*\cap (\tau+1)\notin j_U(D)$.
Consider the set $D^*$ of all conditions $p\in D$ such that for every $\xi<\max(p)$, if $p\cap (\xi+1)\notin D$ then $f(\xi)<\max(p)$. Then $D^*$ is dense since for every $p_0$, we take $q\in D$ above $p_0$, the set $\{f(\xi)\mid \xi<\max(q)\}$ is bounded by some $\max(q){\langle}eq\alpha<\kappa$, then $$p_0{\langle}eq q{\langle}eq q^*:= q\cup\{\alpha\}\in D^*$$ since if $\xi<\max(q^*)$ and $q^*\cap(\xi+1)\notin D$, then $\xi<\max(q)$ as $q^*\cap (\max(q)+1)=q\in D$ and $D$ is open. so $f(\xi)<\alpha=\max(q^*)$. Also $D$ is open since is $p\in D^*$ and $p{\langle}eq p_1$, then $p_1\in D$ (since $D$ is open and $p\in D$), but also for every $\xi<\max(p_1)$, id $p_1\cap (\xi+1)\notin D$, then $\xi<\max(p)$. Thus $f(\xi)<\max(p){\langle}eq \max(p_1)$.
It follows that $a^*\in j_U(D^*)$, but this is a contradiction since $\tau<\max(a^*)$, $$a^*\cap (\tau+1)\notin j_U(D)\text{ and } j_U(f)(\tau)\geq \max(a^*)$$
\end{proof}
\begin{lemma}{\langle}abel{lem-cof}
$\kappa^{+} {\langle}eq \cf^V \max(a^*) {\langle}eq 2^\kappa$.
\end{lemma}
\begin{remark}
Note that $2^\kappa>\kappa^+$ already implies, by Mitchell \cite{Mit}, that $o(\kappa)\geq \kappa^{++}$, since $\kappa$ is a measurable. Thus, assuming our anti-large cardinal hypothesis, we get $\cf \max(a^*) = \kappa^{+}$.
\end{remark}
\begin{proof}
First let us show that $\cf^V \max(a^*) \geq \kappa^+$.
Otherwise, let ${\langle}angle\zeta_\delta \mid \delta < \delta^*{\rangle}angle$ be cofinal at $\max(a^*)$, $\delta^* {\langle}eq \kappa$. For every $\delta < \delta^*$, there is a dense open set $D_\delta \in V$ such that $a^* \cap (\zeta_\delta + 1)\notin D_\delta$.
Let $D_*$ be the set of all condition $p\in D$ such that there is $\xi < \max p$ such that $p \cap \xi \in \bigcap_{\delta < \min p} D_\delta$. Clearly, $D_*$ is dense open. Let us show that $a^* \notin j_U(D_*)$. Indeed, we assume that $\min a^* > \kappa$ and therefore if $a^*\in j_U(D_*)$ then there is some $\xi < \kappa^*$ such that $a^* \cap \xi \in j_U(D_\delta)$ for all $\delta < \delta^* {\langle}eq \kappa$, which contradicts our assumption.
Let us show now that $\cf^{V}(\max(a^*)) {\langle}eq 2^\kappa$. Indeed, let us fix some elementary submodel $H$ of sufficiently large $H(\theta)$ of cardinality $2^\kappa$ that contains $a^*$ and for every $D\subseteq Q$, $j(D)\in H$. It follows that for every $D\subseteq Q$ dense open in $V$, the minimal ordinal ${\rangle}ho < \max(a^*)$ such that $a^* \cap {\rangle}ho \in j(D)$ belongs to $H$. In particular, $\sup (\max(a^*) \cap H) = \max(a^*)$, by the minimality of $\max(a^*)$. Since $|H\cap \max(a^*)| {\langle}eq 2^\kappa$, we conclude that $\cf (\max(a^*)) {\langle}eq 2^\kappa$.
\end{proof}
Next, we would like to get a parallel of Claim {\rangle}ef{ExtensionToProj}. Since we only assume the existence of an ultrafilter extending $\mathcal{D}_{\emptyset}(Q)$, we have to be a bit more careful. We could use the homogeneity of $Q$ and derive an extension of $\mathcal{D}_p(Q)$ for all $p$, but we would like to get a relatively concrete representation of the generic, which would be useful during the proof.
Since $|Q|=\kappa$, there is a bijection $f:\kappa{\rangle}ightarrow Q$. Denote $\delta_{a^*}=j(f^{-1})(a^*)$ and define $$\mathcal{W}=\{X\subseteq \kappa\mid \delta_{a^*}\in j(X)\}$$
then $W$ is a $\kappa$-complete ultrafilter on $\kappa$, $U\equiv^{\mathrm{RK}}\mathcal{W}$. $M_{\mathcal{W}}=M_U$ and $[g]_U\mapsto[g\circ f]_{\mathcal{W}}$ is the unique isomorphism between the two ultrapowers.
\begin{lemma}{\langle}abel{genericClub}
Let ${\langle}angle \kappa_n\mid n<\omega{\rangle}angle$ be a generic Prikry sequence for $\mathcal{W}$. Then $\underset{n<\omega}{\bigcup}a_n$ is a generic club for $Q$ where $a_n=f(\kappa_n)$. Moreover, there is $N<\omega$ such that for every $N{\langle}eq n<\omega$, $\max(a_n)<\min(a_{n+1})$.
\end{lemma}
\begin{proof}
Let $\pi\colon Q{\rangle}ightarrow\kappa$ be such that $\kappa=[\pi]_{U}=[\pi\circ f]_{\mathcal{W}}$ be the projection to normal. In $V$, define the set \[A=\{\alpha<\kappa\mid \forall\beta<\pi(f(\alpha)),\, \max(f(\beta))<\min(f(\alpha))\},\] then $A\in \mathcal{W}$. To see this note that
\[M_{U}\models\forall\beta<\kappa=j(\pi)(a^*),\, \max(j(f)(\beta))<\min(a^*)\] since $j(f)(\beta)=f(\beta)<\kappa{\langle}eq\min(a^*)$. Now $a^*=[id]_{U}$, to see this, note that $$j(\pi)(a^*)=j(\pi\circ f)(\delta_{a^*}), \ \min(a^*)=\min(j(f)(\delta_{a^*}))$$ thus $\delta_{a^*}\in j(A)$ and $A\in \mathcal{W}$.
Let ${\langle}angle\kappa_n\mid n<\omega{\rangle}angle$ be a Prikry sequence for $W$. Then there is $N$ such that for every $N{\langle}eq n<\omega$, $\kappa_{n}<\pi(\kappa_{n+1})$ and $\kappa_n\in A$. By the definition of $A$ it follows that $\max(a_n)<\min(a_{n+1})$. Denote by $p_n=a_0\cup \cdots \cup a_n\in\mathbb{P}$, then for every $n\geq N$, $p_n{\langle}eq p_{n+1}$. We claim that $C_G=\underset{n<\omega}{\cup}a_n$ is a generic club though the singulars of $V$. To see this, let $D\subseteq\mathbb{P}$ be dense open, then by claim {\rangle}ef{genericCon}, $a^*\in j(D)$ and for every $\xi<\max(a^*)$, $q\subseteq\xi$, $a^*\setminus\xi\cup q\in j(D)$, this property reflects on a set in $\mathcal{W}$ i.e. $$B=\{\alpha<\kappa\mid \forall \xi<\max(f(\alpha))\forall q\subseteq\xi , \ f(\alpha)\setminus\xi\cup q\in D\}\in \mathcal{W}$$ and therefore there is $N{\langle}eq M<\omega$ such that for every $n\geq M$, $\kappa_n\in B$ and so $p_n\in D$.
\end{proof}
We denote $C(Q)=\bigcup_{n<\omega}a_n$ to be the $V$-generic club for $Q$.
The idea is that properties of $a^*$ reflect in some sense to the generic club $C(Q)$. This will be useful later, when we encounter some more delicate properties of $a^*$, using Mitchell's analysis of indiscernibles.
\subsection{Mitchell's indiscernibles}
Recall that $\mathcal{K}$ is the Mitchell's core model, under the anti-large cardinal hypothesis, $\neg \exists {\langle}ambda, o({\langle}ambda) = {\langle}ambda^{++}$.
For the convenience of the reader, we include here the statements of the basic definitions and results which we are going to use in the course of the proof, which we cite from \cite{MitchellHandbookCoveringLemma}.
\begin{definition}{\langle}abel{def:mitchell-ind}
\begin{enumerate}
\item Let $U$ be a measure, then $\crit(U)$ is the measurable $\kappa$ such that $U$ is a measure over $\kappa$.
\item Let $\vec{U}$ be a sequence of measures and let $\gamma'<\gamma$ in $\dom(\vec{U})$, denote by $coh_{\gamma',\gamma}=f$ for the least function in the well ordering of $\mathcal K=L[\vec{U}]$ such that $\gamma'=[f]_{\vec{U}_\gamma}\in \Ult(\mathcal K,\vec{U}_\gamma)$.
\item A system of indiscernibles for $\mathcal K$ is a sequence $\mathcal{C}$ such that:
\begin{enumerate}
\item $\dom(\mathcal{C})\subseteq\dom(\vec{U})$ and $\forall \gamma\in \dom(\mathcal{C}),\,\mathcal{C}_\gamma\subseteq \crit(\vec{U}_\gamma)$.
\item{\langle}abel{def:mitchell-ind-3-b} For every $f\in \mathcal K$, there is a finite sets $a\subseteq On$ such that for every $\gamma\in \dom(\vec{U})$:
$$
\begin{matrix}
\forall\nu\in \mathcal{C}_\gamma\setminus \sup(a\cap \crit(\vec{U}_\gamma)). \forall X\in f\image ( \nu\times\{\crit(\vec{U}_\gamma)\}) \\ \nu \in X{\langle}eftrightarrow X\cap \crit(\vec{U}_\gamma)\in \vec{U}_\gamma
\end{matrix}$$
\end{enumerate}
\item A sequence $\mathcal{C}$ of indiscernibles for $\mathcal K$ is said to be $h-coherent$ if $h\in \mathcal K$ is a function and:
\begin{enumerate}
\item $\forall\nu\in \cup_{\gamma\in \dom(\mathcal{C})}\mathcal{C}_\gamma$, there is a unique $\xi\in h''\nu$ such that $\nu\in \mathcal{C}_\xi$.
\item If $\nu\in \mathcal{C}_\gamma\cap \mathcal{C}_{\gamma'}$ where $\gamma\neq\gamma'$ and $\gamma\in h''\nu$, then $\crit(\vec{U}_{\gamma'})\in \mathcal{C}_{\gamma''}$ for some $\gamma''<\gamma$ with $\crit(\vec{U}_{\gamma''})=\crit(\vec{U}_\gamma)$.
\item{\langle}abel{def:mitchell-ind-4-c} If $\nu\in \mathcal{C}_\gamma$, $\gamma_\nu=coh{\gamma',\gamma}(\nu)$ for $\gamma'<\gamma$, and $\gamma'\in h''\nu$, then $\mathcal{C}_{\gamma_\nu}=\mathcal{C}_{\gamma'}\cap(\nu\setminus\nu')$ where $\nu'$ is the least such that $\gamma\in h''\nu'$
\end{enumerate}
\item Let $x$ be any set and $h$ a function. Then set $h''(x;\mathcal{C})$ is the smallest set $X$ such that $x\subseteq X$ and $X=h''[X\cup(\bigcup_{\gamma\in X}\mathcal{C}_\gamma)]$.
\item Suppose that $\mathcal{C}$ is a $g$-coherent system of indiscernibles. Define:
\begin{enumerate}
\item $S^{\mathcal{C}}(\gamma,\xi)=\min(\mathcal{C}_\gamma\setminus\xi+1)$.
\item $S^{\mathcal{C}}_*(\gamma,\xi)=\min(\bigcup_{\gamma'\geq\gamma}\mathcal{C}_\gamma\setminus\xi+1)$.
\item If $X$ is any set, and $\gamma\in \dom(\mathcal{C})\cap X$. An accumulation point of $\mathcal{C}_\gamma$ in $X$ is an ordinal $\nu\in X$ such that for every $\gamma'\in X\cap \gamma\cap g''\nu$, the $$\bigcup\{\mathcal{C}_{\gamma''}\mid \gamma''\geq \gamma', \crit{\vec{U}_{\gamma''}}=\crit{\vec{U}_\gamma}\}$$
is unbounded in $\nu$. Let $a^{\mathcal{C},X}(\gamma,\xi)$ is the least accumulation point of $\mathcal{C}_\gamma$ in $X$ above $\xi$.
\end{enumerate}
\end{enumerate}
\end{definition}
\begin{theorem}[Mitchell's Covering Lemma]{\langle}abel{the covering lemma}
Assume there is no inner model with $\exists {\langle}ambda. o({\langle}ambda)={\langle}ambda^{++}$. Let $\kappa,\kappa'$ be a $\mathcal K$-cardinal such that $\kappa'\geq \max\{\kappa, o(\kappa)\}$. Also let $X$ be a set such that $\kappa\not\subseteq X=Y\cap\mathcal K_{\kappa'}$ where $Y\prec_1 H(\kappa^{'+})$. Then there is ${\rangle}ho<\kappa$, $h\in\mathcal K$ and $\mathcal{C}$ such that:
\begin{enumerate}
\item $\mathcal{C}$ is an $h$-coherent system of indiscernibles for $\mathcal K$.
\item $\dom(C)\subseteq X$ and $\cup_\gamma \mathcal{C}_\gamma\subseteq X$.
\item $X=h''(X\cap{\rangle}ho;\mathcal{C})$ and hence $X\subseteq h''({\rangle}ho;\mathcal{C})$.
\item For every $\nu\in X\cap \kappa$, either $\nu\in h''[X\cap\nu]$, or $\nu\in\mathcal{C}_\gamma$ for some
$\gamma$ in which case there is $\xi\in X\cap\nu$ such that either
$\nu=S^{\mathcal{C}}(\gamma,\xi)=S^{\mathcal{C}}_*(\gamma,\xi)$ or there is
$\gamma<\gamma'\in h''[X\cap\nu]$ such that $\nu=a^{\mathcal{C},X}(\gamma',\xi)$.
\item If $X'$ is another set satisfying is another set satisfying the assumption of the theorem then there is a finite set $a\subseteq On$ such that for every $\xi,\gamma\in X\cap X'$ such that $a\cap \crit(\vec{U}_\gamma)\subseteq \xi$ and $\xi>\max\{{\rangle}ho_X,{\rangle}ho_{X'}\}$ then:
$$S^{\mathcal{C}}(\gamma,\xi)=S^{\mathcal{C}'}(\gamma,\xi)$$
$$S^{\mathcal{C}}_*(\gamma,\xi)=S^{\mathcal{C}'}_*(\gamma,\xi)$$
$$a^{\mathcal{C},X}(\gamma,\xi)=a^{\mathcal{C}',X'}(\gamma,\xi)$$
whenever either is defined.
\end{enumerate}
\end{theorem}
A \textit{Covering model} is a set $X$ satisfying the assumptions of the theorem {\rangle}ef{the covering lemma}. In the discussion ahead, we will not distinguish between a model and its set of ordinals. Thus, we will freely take elementary substructures in some model of ${{\rangle}m ZF}\xspaceC$, that do not contain all ordinals below some $\zeta$ and call them covering models.
The elementary embedding $j_U{\rangle}estriction\mathcal{K}\colon \mathcal{K} \to \mathcal{K}^{M_U}$ is an iterated ultrapower of $\mathcal{K}$ by its measures.
Let us denote the iteration by ${\langle} j_{\alpha,\beta}\mid \alpha{\langle}eq \beta{\langle}eq l^*{\rangle}$ where $j_{\alpha,\beta}:\mathcal{K}_{\alpha}{\rangle}ightarrow \mathcal{K}_{\beta}$. We can assume that the iteration is normal i.e. ${\langle} {\langle}ambda_i\mid i< l^*{\rangle}$ is increasing where ${\langle}ambda_i=\crit(j_{i,i+1})$. Hence ${\langle}ambda_0=\kappa$. Let ${\langle}angle \kappa_\alpha \mid \alpha{\langle}eq \alpha^*{\rangle}angle$ be the strictly increasing list of images of $\kappa$ under this iteration.
In particular, $\kappa_0=\kappa$ and $\kappa_{\alpha^*}=j_U(\kappa)$, and $\alpha^* {\langle}eq l^*$.
\begin{lemma}{\langle}abel{crit}
Let $\eta<\alpha^*$ and let $\eta_0$ be the least ordinal such that $\kappa_{\eta}{\langle}eq{\langle}ambda_{\eta_0}$. Also let $\xi_\eta<l^*$ be such that $j_{\xi_\eta}(\kappa)=\kappa_{\eta}$ Then $j_{\xi_\eta,\eta_0}(\kappa_{\eta})=\kappa_{\eta}$
\end{lemma}
\begin{proof}
By elementarity, $N_0:=\mathcal K_{\xi_\alpha}\models \kappa_{\eta}$ is measurable. Let us define an internal iteration of the measures of $N_0$, $i:N_0{\rangle}ightarrow N_0^*$, ${\langle} i_{\alpha,\beta}\mid \alpha{\langle}eq\beta{\langle}eq \theta^*{\rangle}$ defined as follows:
At limit steps we simply take a direct limit.
At successor step $\gamma+1$, assume that $i_{0,\gamma}\colon N_0{\rangle}ightarrow N_\gamma$ is defined and $U_\beta$ is a measure of $\zeta_\beta$ for $\beta<\gamma$ are the measures applied at stage $\beta$.
Let \[\bar{\zeta}_\gamma=\sup_{\beta<\gamma}(\zeta_\beta+1).\]
We split into cases:
\begin{itemize}
\item If $\cf^{N_0}(\gamma)>\kappa$ or $\gamma$ is successor ordinal, consider the first measurable $\zeta_\gamma\geq\bar{\zeta}_\gamma$ in $N_\gamma$ and apply $U(\zeta_\gamma,0)$.
\item If $\cf^{N_0}(\gamma) {\langle}eq \kappa$ and $\gamma$ is a limit ordinal, we take the least $N_\gamma$-measurable $\zeta=\zeta_\gamma$, such that for some ${\rangle}ho$, the set $\{\beta<\gamma\mid i_{\beta,\gamma}(U_\beta)=U(\zeta,{\rangle}ho)\}$ is bounded in $\gamma$, assuming that there is one. If there is no such $\zeta$, take $\gamma=\theta^*$ and halt.
\end{itemize}
Let us claim that the elementary embedding $j_{\xi_\eta, l^*}\colon N_0{\rangle}ightarrow\mathcal K^{M_U}$ can be completed to $N^*$. Indeed, in the comparison process between the models $N^*$ and $\mathcal K^{M_U}$, the model $N^*$
will not move since measurable cardinal in $N^*$ are critical points of steps of the iteration of cofinality at most $\kappa$ and $M_U$ is closed under $\kappa$-sequences.
Hence there is an iteration
${\langle}\sigma_{\alpha,\beta}\mid \alpha{\langle}eq\beta{\langle}eq {\rangle}ho^*{\rangle}$ such that $\sigma_{{\rangle}ho^*}\circ j_{\xi_\eta, l^*}=i$. We are only interested in the part of the iteration which have critical points below $\kappa_\eta$, and the iteration $\sigma\circ j_{\xi_\eta, l^*}$
is equivalent to a normal one. Let $\beta_0$ be the least such that $\crit(i_{\beta_0,\beta_0+1})\geq\kappa_\eta$, then there is $\gamma_0$ such that $\sigma_{\gamma_0}\circ j_{\eta_0,\xi_\eta}=i_{\beta_0}$.
Since $i_{\beta_0}$ is an internal iteration of $N_0$ with critical points below $\kappa_{\eta}$ which is measurable in $N_0$, $i(\kappa_{\eta})=\kappa_{\eta}$.
Hence $$\kappa_{\eta}{\langle}eq j_{\eta_0,\xi_\eta}(\kappa_{\eta}){\langle}eq \sigma_{\gamma_0}(j_{\eta_0,\xi_{\eta}}(\kappa_{\eta}))=\kappa_{\eta}.$$
We conclude that $j_{\xi_\eta, \eta_0}(\kappa_{\eta})=\kappa_{\eta}$.
\end{proof}
\begin{corollary}{\langle}abel{corcrit} $\{\kappa_\alpha\mid \alpha{\langle}eq \alpha^*\}\subseteq \{{\langle}ambda_i \mid i{\langle}eq l^*\}\cup\{j_U(\kappa)\}$.
\end{corollary}
\begin{proof}
Assume that $\kappa_\alpha\notin\{{\langle}ambda_i \mid i{\langle}eq l^*\}$, let us show that $\kappa_\alpha=j_U(\kappa)$. Let $\xi_\alpha$ to be the least such that $j_{\xi_\alpha}(\kappa)=\kappa_\alpha$. Consider $\alpha_0$ to be the minimal such that $\kappa_\alpha{\langle}eq{\langle}ambda_{\alpha_0}$. If $\alpha_0=l^*$, then we are done. Otherwise we actually get the conclusion by using the assumption that $\kappa_\alpha<{\langle}ambda_{\alpha_0}=\crit(j_{\alpha_0, l^*})$. Clearly, $\xi_\alpha{\langle}eq \alpha_0$, otherwise, since $\crit(j_{\alpha_0,\xi_\alpha})={\langle}ambda_{\alpha_0}>\kappa_\alpha$ (again, this is clear in case $\alpha_0=l^*)$, $j_{\xi_\alpha,\alpha_0}(\kappa_\alpha)=\kappa_\alpha=j_{\xi_\alpha,\alpha_0}(j_{\alpha_0}(\kappa))$, hence $\kappa_\alpha=j_{\alpha_0}(\kappa)$, contradiction the minimality of $\xi_\alpha$.
By lemma {\rangle}ef{crit}, $j_{\alpha_0,\xi_\alpha}(\kappa_\alpha)=\kappa_\alpha$, hence
$$j_U(\kappa)=j_{l^*,\alpha_0}(j_{\alpha_0,\xi_\alpha}(j_{\xi_\alpha}(\kappa)))=j_{\alpha_0,l^*}(j_{\xi_\alpha,\alpha_0}(\kappa_\alpha))=j_{\alpha_0,l^*}(\kappa_\alpha)=\kappa_\alpha$$
\end{proof}
\begin{claim}{\langle}abel{BoundFun}
If $\kappa_\alpha{\langle}eq\delta<\kappa_{\alpha+1}$ then there is $h\in({}^\kappa\kappa)^{\mathcal{K}}$ such that $\delta{\langle}eq j_U(h)(\kappa_\alpha)<\kappa_{\alpha+1}$.
\end{claim}
\begin{proof}
Assume $\kappa_\alpha{\langle}eq\delta<\kappa_{\alpha+1}$ decompose the iteration $$j_U{\rangle}estriction\mathcal K=j_{\xi_{\alpha+1},l^*}\circ j_{\xi_{\alpha}+1,\xi_{\alpha+1}}\circ j_{\xi_\alpha,\xi_\alpha+1}\circ j_{\xi_\alpha}$$ where $$j_{\xi_\alpha}:\mathcal K{\rangle}ightarrow \mathcal K_{\xi_\alpha}, \ \crit(j_{\xi_\alpha})=\kappa$$ $$j_{\xi_\alpha,\xi_\alpha+1}:\mathcal K_{\xi_\alpha}{\rangle}ightarrow \mathcal K_{\xi_\alpha+1}, \ \crit(j_{\xi_\alpha,\xi_\alpha+1})=\kappa_\alpha,
\text{ and }j_{\xi_\alpha,\xi_\alpha+1}(\kappa_\alpha)=\kappa_{\alpha+1}$$
$$j_{\xi_\alpha+1,\xi_{\alpha+1}}:\mathcal K_{\xi_\alpha+1}{\rangle}ightarrow\mathcal K_{\xi_{\alpha+1}}, \crit(j_{\xi_\alpha+1,\xi_{\alpha+1}})={\langle}ambda_{\xi_\alpha+1}$$
$$j_{\xi_{\alpha+1},l^*}:\mathcal K_{\xi_{\alpha+1}}{\rangle}ightarrow \mathcal K^{M_U}, \ \crit(j_{\xi_{\alpha+1},l^*})=\kappa_{\alpha+1}$$
First consider only the iteration $j_{\xi_\alpha+1}$, there is $f\in({}^\kappa\kappa)^\mathcal K$ such that $$j_{\xi_\alpha+1}(f)({\langle}ambda_{i_1},\dots,{\langle}ambda_{i_n})=\delta$$ where ${\langle}ambda_{i_1},\dots,{\langle}ambda_{i_n}{\langle}eq\kappa_\alpha$. Now let us define $h:\kappa{\rangle}ightarrow\kappa$ by $$h(\alpha)=\sup(f(\vec{\xi})\mid \vec{\xi}\in[\alpha+1]^n)$$
$h\in\mathcal K$ as it is definable. It follows that $\delta{\langle}eq j_{\xi_\alpha+1}(h)(\kappa_\alpha)<\kappa_{\alpha+1}$.
Further iteration might move $j_{\xi_\alpha+1}(h)(\kappa_\alpha)$, but not past $\kappa_{\alpha+1}$. Indeed, by lemma {\rangle}ef{crit}, $$\kappa_{\alpha+1}{\langle}eq j_{\xi_{\alpha}+1,\xi_{\alpha+1}}(\kappa_{\alpha+1}){\langle}eq j_{\xi_{\alpha+1}}(\kappa_{\alpha+1})=\kappa_{\alpha+1}$$
Hence $\kappa_{\alpha+1}= j_{\xi_{\alpha}+1,\xi_{\alpha+1}}(\kappa_{\alpha+1})$. It follows that $$j_U(h)(\kappa_\alpha)=j_{\xi_{\alpha+1},l^*}(j_{\xi_\alpha+1}(h)(\kappa_\alpha))=j_{\xi_{\alpha+1}}(h)(\kappa_\alpha)=$$
$$=j_{\xi_\alpha+1,\xi_{\alpha+1}}(j_{\xi_{\alpha}+1}(h)(\kappa_\alpha))<j_{\xi_\alpha+1,\xi_{\alpha+1}}(\kappa_{\alpha+1})=\kappa_{\alpha+1}$$
\end{proof}
There is a close connection between the critical points of the iteration $j_U$ and indiscernibles of covering models from Mitchell's covering lemma.
\begin{lemma}{\langle}abel{lemma: finite error}
Let $N=h^{N}\image({\rangle}ho;\mathbb{C}^N)$ be a covering model where $\mathbb{C}^N$ is a $h^N$-coherent system of indiscernibles for $\mathcal K_{M_U}$ where $h^N \in \mathcal K^{M_U}$ is a Skolem function. Suppose that $\kappa_{\gamma_0}\in N$ for some $\gamma_0<\alpha^*$.
Then
for all but finitely many $c \in \cup\{ \mathbb{C}^N_\gamma\mid \crit(\mathbb{C}^N_\gamma)=\kappa_{\gamma_0}\}$, $c\in \{\kappa_\alpha\mid \alpha{\langle}eq\gamma_0\}$.
\end{lemma}
\begin{proof}
Suppose otherwise. Let ${\langle} \delta_n \mid n < \omega{\rangle}$ be an increasing sequence in $$\cup\{ \mathbb{C}^N_\gamma\mid \crit(\mathbb{C}^N_\gamma)=\kappa_{\gamma_0}\}\setminus \{\kappa_\alpha\mid \alpha{\langle}eq\alpha^{*}\}$$
Set $$\alpha_n = \max(\{\alpha{\langle}eq \alpha^{*} \mid \kappa_\alpha<\delta_n\}).$$
By Claim {\rangle}ef{BoundFun} there is $f_n : \kappa\to \kappa$ in $\mathcal K$ increasing such that
$$\delta_n < j_U(f_n)(\kappa_{\alpha_n}) <\kappa_{\alpha_{n}+1}.$$ Consider $\{f_n \mid n < \omega\}.$
While this set might not be a member of $\mathcal K$, we are above to bound it. Let ${\langle} t_\xi \mid \xi < \kappa^+{\rangle}$
be the canonical enumeration in $\mathcal K$ of $(\kappa^\kappa)^\mathcal K$. For every $n < \omega$, let $\xi_n$ be the unique ordinal such that $f_n = t_{\xi_n}$. Both $\kappa$ and $\kappa^+ = (\kappa^+)^{\mathcal K}$ are regular in $V$ (here we are using the covering theorem, and the measurability of $\kappa$ in $V$). So, there is $a \subseteq\kappa^+, a \in \mathcal K,|a| < \kappa$ which covers
$\{\xi_n \mid n < \omega\}$. To find such a set, let $\xi=\sup_n \xi_n < \kappa^{+}$.
Let $p\colon \kappa \to \xi$ be a bijection in $K$. Since $\cf^V \kappa = \kappa > \omega$, $\sup p^{-1}(\xi_n) = \beta' < \kappa$.
Then take $a= p\image \beta' \in K$.
Define a function $f\colon\kappa\to \kappa$ in $\mathcal K$ as follows:
$$f(\nu) = \sup\{t_\xi(\nu) \mid \xi\in a\},$$ for every $\nu<\kappa$. Then, for every $n < \omega,\nu<\kappa,
$ $$\kappa > f(\nu) > f_{\xi_n}(\nu).$$
Now, in the ultrapower, for every $n < \omega,$
$$\kappa_{\alpha_n+1}> j_U(f)(\kappa_{\alpha_n}) > \delta_n.$$
Let $\delta^*=\sup_{n<\omega}\delta_n{\langle}eq\kappa_{\gamma_0}$. If $\delta^*=\kappa_{\gamma_0}$, then the function $j_U(f)\image\nu$ in is $\mathcal K^{M_U}$. Note that $\delta_n\in j_U(f)\image\kappa_{\alpha_n}$. For high enough $n$, this will contradicts Definition {\rangle}ef{def:mitchell-ind}, {\rangle}ef{def:mitchell-ind-3-b} and the indiscernibility of $\delta_n$'s.
If $\delta^*<\kappa_{\alpha+1}$, then it is also indiscernible and by definition {\rangle}ef{def:mitchell-ind}, {\rangle}ef{def:mitchell-ind-4-c}, the $\delta_n$'s are part of the indiscernibles for $\delta^*$. Then we again reach a contradiction to {\rangle}ef{def:mitchell-ind}, {\rangle}ef{def:mitchell-ind-3-b}.
\end{proof}
\begin{lemma}
For every $\alpha<\alpha^*$, $\kappa_{\alpha+1}$ is regular in $M_U$.
\end{lemma}
\begin{proof}
Otherwise, it is singular in $M_U$, denote by ${\langle}ambda=\cf^{M_U}(\kappa_{\alpha+1})<\kappa_{\alpha+1}$.
Work in $M_U$, let $H\prec H(\theta^+)$ be an elementary submodel for some high enough $\theta$, closed to ${\langle}ambda$ sequences, such that $|H|<\kappa_{\alpha+1}$. Apply Mitchell's covering lemma {\rangle}ef{the covering lemma}, find a covering
model $H\cap \mathcal K\subseteq N$ of cardinality less than $\kappa_{\alpha+1}$. It is of the form $h^N \image(\delta^N, \mathbb{C}^N)$, where $\delta^N<\kappa^*$,
$\mathbb{C}^N$ is a $h^N$-coherent system of indiscernibles for $\mathcal K_{M_U}$ and $h^N \in \mathcal K^{M_U}$ is a Skolem function.
We can assume also that ${\langle}ambda\subseteq H$. The indescernibles for $\kappa_{\alpha+1}$ in $N$ are unbounded in $\kappa_{\alpha+1}$. On the other hand, all but finitely many indiscernables for $\kappa_{\alpha+1}$ are among $\{\kappa_{\beta}\mid \beta{\langle}eq\alpha\}$. This is a contradiction.
\end{proof}
Consider $\kappa^*=\sup(\max(a^*)+1 \cap \{ \kappa_\alpha \mid \alpha{\langle}eq \alpha^*\})$. Then there is $\alpha^{**}<\alpha^*$ such that $\kappa^*=\kappa_{\alpha^{**}}$.
$\alpha^{**}\geq \kappa$.
\\In particular, the length of the sequence ${\langle}angle \kappa_\alpha \mid \alpha{\langle}eq \alpha^*{\rangle}angle$ is at least $\kappa$, and hence, $o^{\mathcal K}(\kappa)\geq \kappa$. We will not pursuit that direction here, as the next lemma gives a strictly stronger result.
\begin{lemma}
$\kappa^*=\max(a^*)$.
\end{lemma}
\begin{proof}
Otherwise, $\kappa^*<\max(a^*)<\kappa_{\alpha^{**}+1}$. By claim {\rangle}ef{genericCon}, for every $D$ dense open, $a^*\setminus\kappa^*\in j(D)$. Also, by minimality of $a^*$, there is a dense open set $D_0$ such that $a^*\cap(\kappa^*+1)\notin D_0$.
Let $h\colon\kappa{\rangle}ightarrow\kappa$ be such that $j(h)(\kappa^*)\geq\max(a^*)$ which exists by claim {\rangle}ef{BoundFun}.
Consider
\[C=\{\alpha<\kappa\mid \forall\beta<\alpha,\, h(\beta)<\alpha\},\]
the club of all closure points of $h$.
Let $D$ be the dense open set of all conditions $p\in Q$ such that there are $$\eta < \eta' < \max p$$ such that $p \cap \eta \in D_0$ and $\eta' \in C$. Let us claim that $a^* \notin j(D)$, and thus obtain a contradiction. Indeed, the least $\eta$ such that $a^* \cap \eta \in D_0$ is above $\kappa^* + 1$ and the next element of $j(C)$ above $\kappa^* + 1$ is at least $\max a^*$.
\end{proof}
\begin{claim}{\langle}abel{claim:bounding-using-function-from-K}
For every $\alpha < \alpha^*$, and a function $f \colon \kappa_\alpha \to \kappa_\alpha$ in $\mathcal{K}^{M_U}$, there is a function $g \in \mathcal{K}$ such that $j(g)(\zeta) \geq f(\zeta)$ for all $\zeta < \kappa_{\alpha}$, except for a bounded error.
\end{claim}
\begin{proof}
Fix in $\mathcal K = \mathcal K^V$ a sequence ${\langle} h_\tau \mid \tau<\kappa^+{\rangle}$ of functions such that for every $\tau<\tau'<\kappa^+$ the following hold in $\mathcal K$:
\begin{enumerate}
\item $h_\tau:\kappa\to \kappa$,
\item $h_\tau<h_{\tau'}$ mod bounded,
\item for every $g:\kappa\to \kappa$ there is ${\rangle}ho<\kappa^+$ such that $g<h_{{\rangle}ho}$ mod bounded.
\end{enumerate}
Note that $2^\kappa=\kappa^+$ in $\mathcal K$, hence it is easy to construct such a sequence.
Apply the iteration $j_U{\rangle}estriction \mathcal K$ to the list ${\langle} h_\tau \mid \tau<\kappa^+{\rangle}$.
Let us denote by $\mathcal K'$ the iterated ultrapower of $\mathcal K$, and $i\colon \mathcal K \to \mathcal K'$ the iteation, so that $i(\kappa) = \kappa_\alpha$, and the critical point of the rest of the iteration is $\geq \kappa_\alpha$.
Note that $i\image \kappa^+$ is cofinal at $i(\kappa^+)$. Moreover, $\kappa^+=(\kappa^+)^\mathcal K$, by the anti-large cardinal assumptions made.
Hence ${\langle} i(h_\tau) \mid \tau<\kappa^+{\rangle}$ will be dominating family of functions from $\kappa_\alpha$ to $\kappa_\alpha$ in $\mathcal K'$. As the critical point of the rest of the iteration
is high enough, $j(h_\tau) {\rangle}estriction \kappa_\alpha = i(h_\tau)$.
\end{proof}
\subsection{Isolating the indiscernibles}
Recall that ${\langle} \kappa_\beta \mid \beta{\langle}eq\alpha^{**}{\rangle}$ is the sequence of images of $\kappa$ under the iterated ultrapower $j_U\upharpoonright \mathcal K$. In particular, each $\kappa_{\beta+1}$ is the image of $\kappa_\beta$ under the ultrapower embedding using a measure over $\kappa_\beta$,
$\kappa_0=\kappa$ and $\kappa_{\alpha^{**}}=\kappa^*$.
The following lemma provides a sufficient condition for the main theorem of this section:
\begin{lemma}{\langle}abel{lem:isolating-the-indiscernibles}
Let $A(\eta)=\{\kappa_\gamma\mid \kappa_\gamma<\eta\}\cap \acc(a^*)$.
If there function $t\in({}^{\kappa^*}\kappa^*)^{\mathcal K^{M_U}}$ and $\gamma<\kappa^*$ such that $A(\kappa^*)\setminus\gamma=C_t\cap \acc(a^*)\setminus\gamma$, then $o^{\mathcal K^{M_U}}(\kappa^*)\geq(\kappa^*)^+$,
\end{lemma}
\begin{proof}
Assume otherwise that $o^{\mathcal K^{M_U}}(\kappa^*)<(\kappa^*)^+$.
Using Claim {\rangle}ef{claim:bounding-using-function-from-K}, we find some $t^*\in\mathcal K$ such that $j_U(t^*)$ dominates $t$.
Find disjoint sets ${\langle} X_i\mid i<o^{\mathcal K}(\kappa){\rangle}$ such that
$X_i\in U(\kappa,i)$. Since $o^{\mathcal K}(\kappa)<\kappa^+$ there is a bijection $\pi:o^{\mathcal K}(\kappa){\rangle}ightarrow \kappa$.
Define $g:\kappa{\rangle}ightarrow \kappa$ by $g(\nu)=\pi(i)$ for the unique $i$ such that $\nu\in X_i$.
Let us argue that $$(\star) \ \ A^*:=\{\nu<\kappa\mid g(\nu)<\nu\}\in\cap_{\xi<o^{\mathcal K}(\kappa)} U(\kappa,\xi).$$
Let $\xi<o^{\mathcal K}(\kappa)$, then
in the ultrapower $Ult(\mathcal K,U(\kappa,\xi))$, $j_{U(\kappa,\xi)}(g)$ is defined similarly using $j_{U(\kappa,\xi)}(\pi):j_{U(\kappa,\xi)}(o^{\mathcal K}(\kappa)){\rangle}ightarrow j_{U(\kappa,\xi)}(\kappa)$ and the sequence $$j_{U(\kappa,\xi)}({\langle} X_i\mid i<o^{\mathcal K}(\kappa){\rangle})={\langle} X'_i\mid i<j_{U(\kappa,\xi)}(o^{\mathcal K}(\kappa)){\rangle}$$
Note that $\kappa\in j_{U(\kappa,\xi)}(X_\xi)= X'_{j_{U(\kappa,\xi)}(\xi)}$ hence $$j_{U(\kappa,\xi)}(g)(\kappa)=j_{U(\kappa,\xi)}(\pi)(j_{U(\kappa,\xi)}(\xi))=j_{U(\kappa,\xi)}(\pi(\xi))=\pi(\xi)<\kappa$$ which is what we needed.
By $(\star)$, we can deduce that that $\forall \alpha<\alpha^{**}$, $j_U(g)(\kappa_\alpha)<\kappa_\alpha$. In particular $$(\star\star) \ \ \ M_U\models \ j_U(g)\text{ is regressive on } \acc(a^*)\cap j_U(C_{t^*})$$
Using our hypothesis again, $\acc(a*) \cap j_U(C_{t*})$ consists of the indiscernibles of $\kappa^*$. In particular, if ${\langle}angle \alpha_n \mid n < \omega{\rangle}angle$ is a sequence of ordinals below $\alpha^{**}$, such that $j_U(g)(\kappa_{\alpha_n})$ is fixed, then $j_U(g)(\sup \kappa_{\alpha_n})$ is strictly higher.
Let ${\langle} a_n\mid n<\omega{\rangle}$ be a Prikry sequence for $U$ obtained by lemma {\rangle}ef{genericClub} and let $C(\mathcal{Q})=\cup_{n<\omega} a_n$ be the generic club induced for $\mathcal{Q}$.
By reflecting $(\star\star)$, we get that for every $n\geq n_0$, $g$ is regressive on $\acc(a_n)\cap C_{t^*}$. Hence in $V[C(\mathcal{Q})]$, $g$ is regressive on a final segment of $C(\mathcal{Q})\cap C_{t^*}$ which is a club in $V[C(\mathcal{Q})]$. Since $\kappa$ remains regular in the generic extension $V[C(\mathcal{Q})]$, there is a stationary subset $S \subseteq C_{t*} \cap \acc C(\mathcal{Q})$ of ordinals of countable cofinality, on which $g$ is fixed. But, in particular, there is a continuous copy of $\omega + 1$ that consists of elements of $S$---a contradiction.
\end{proof}
Note that if $o^{{\mathcal K}^{M_U}}(\kappa^*) \geq (\kappa^*)^+$, then $o(\kappa) > \kappa^{+}$.
Indeed, $\kappa^*=\max(a^*)<j_U(\kappa)$. It follows that there is $\xi<l^*$ such that $\kappa^*=\crit(j_{\xi,\xi+1})$. This means that $o^{\mathcal K_\xi}(\kappa^*)>o^{\mathcal K^{M_U}}(\kappa^*)\geq(\kappa^*)^+$. By elementarity, $o^{\mathcal K}(\kappa)>\kappa^+$.
So, in order to conclude the proof, we need to prove that the hypothesis of Lemma {\rangle}ef{lem:isolating-the-indiscernibles} holds.
\begin{lemma}
For every $\eta {\langle}eq \alpha^{**}$, such that $\kappa_\eta \in \acc(a^*)$, there are $t_\eta \in ({}^{\kappa_\eta}{\kappa_\eta})^{{\mathcal K}^{M_U}}$ and $\gamma_\eta$ such that $A(\eta) \setminus \gamma_\eta= (C_{t_\eta} \cap a^* \cap \kappa_\eta) \setminus \gamma_\eta$.
\end{lemma}
\begin{proof}
First note that if $\eta$ is a limit ordinal and $t$ is any function from $\kappa_\eta$ to $\kappa_\eta$ in $\mathcal{K}^{M_U}$, then for every sufficiently large $\alpha < \eta$, $\kappa_\alpha \in C_t$. This is true, by the arguments of the proof of Claim {\rangle}ef{BoundFun} --- each such function $t$ is obtained by plugging into a an $j$-image of a function in $\mathcal{K}$, finitely many ordinals below $\kappa_{\eta}$, and restricting it to $\kappa_\eta$.
We prove by induction in $\eta{\langle}eq\alpha^{**}$ such that $\kappa_\eta\in \acc(a^*)$.
Assume inductively that the claim holds for all $\eta'<\eta$. Since $a^*$ is closed, $\kappa_\eta\in a^*$, thus $\kappa_\eta$ is singular in $M_U$. Let us denote by ${\langle}ambda=\cf^{M_U}(\kappa_\eta)<\kappa_\eta$ and split into cases:
\vskip 0.3 cm
\textbf{Case 1}: Assume that ${\langle}ambda>\omega$.
Since $\kappa_\eta$ is measurable in $\mathcal K^{M_U}$ and singular in $M_U$ there is a Prikry-Magidor sequence in ${\langle} c_i\mid i<{\langle}ambda{\rangle}\in M_U$ witnessing the singularity of $\kappa_\eta$. We can cover $\{c_i\mid i<{\langle}ambda\}$ with a covering model $N$ for $\kappa_\eta$ of cardinality less than $\kappa_\eta$ such that all the $c_i$'s are indiscernibles for $\kappa^*$ is $N$. By lemma {\rangle}ef{lemma: finite error}, for all but finitely many indiscernibles for $\kappa_\eta$, $c_i\in \{\kappa_\gamma\mid \gamma<\eta\}$. By removing a bounded piece if necessary, we can assume that $c_i=\kappa_{\eta_i}$, for some ordinals $\eta_i$.
Since both $a^*\cap \kappa_\eta$ and $\{\kappa_{\eta_i}\mid i<{\langle}ambda\}$ are clubs in $\kappa_\eta$ inside $M_U$, and the cofinality of $\kappa_\eta$ is ${\langle}ambda>\omega$, we my also assume that each $\kappa_{\eta_i}$ is a limit point of $a^*$. Apply the inductive hypothesis to each of the points $\kappa_{\eta_i}$ and obtain a function $t^{\eta_i}\colon\kappa_{\eta_i}{\rangle}ightarrow\kappa_{\eta_i}$ in $\mathcal K^{M_U}$ such that for some $\nu_i<\kappa_{\eta_i}$, $$C_{t^{\eta_i}}\cap \Lim(a^*)\setminus \nu_i=A(\eta_i)\setminus \nu_i.$$
By Mitchell's covering lemma, {\rangle}ef{the covering lemma}, $N=h^N \image (\delta^N, \mathbb{C}^N)$, where $\delta^N<\kappa_\eta$,
$\mathbb{C}^N$ is a sequence of indiscernibles and $h^N \in \mathcal K^{M_U}$ is a Skolem function.
In order to find a single function that works for $\kappa_\eta$ we will prove that we can choose these functions $t^{\eta_i}$ so that they are definable in the covering model $N$.
First let us argue that $A(\eta_i)$ is definable in $M_U$ from the parameters $\kappa_{\eta_i}$ and $a^*$, up to an initial segment, and this definition is uniform.
\begin{lemma}{\langle}abel{lem5-2-0-1}
Let $\xi{\langle}eq\alpha^{**}$ and suppose that ${\langle} c_i \mid i<{\langle}ambda {\rangle}\in M_U$ be an increasing sequence, cofinal in $\kappa_{\xi}$.
Let $N'$ be a covering model for $\kappa_{\xi}$ with $\{c_i \mid i<{\langle}ambda \}\subseteq N'$.
Suppose that ${\langle} c_i \mid i<{\langle}ambda {\rangle}$ are indiscernibles in $N'$ for $\kappa_{\xi}$. Then $c_i \in \{\kappa_\beta \mid \beta<\xi \}$, for all but finitely many $i$'s.
\end{lemma}
\begin{proof}
Similar to Lemma {\rangle}ef{lemma: finite error}.
\end{proof}
Now we can formulate the crucial property of subsets of $\kappa_{\xi}$ in $M_U$:
$(*)(B)$
\begin{enumerate}
\item $B\subseteq \kappa_{\xi} \cap \Lim(a^*)$.
\item For every covering model $N'$ for $\kappa_{\xi}$ there is ${\rangle}ho<\kappa_{\xi}$ such that for every indiscernible $c>{\rangle}ho$ for $\kappa_{\xi}$ in $N'$, if $c$ is a limit point of $a^*$, then $c\in B$.
\item For every sequence ${\langle} c_i \mid i<\theta{\rangle}\in M_U$ of elements of $B$, cofinal in $\kappa_{\xi}$, there is a covering model $N'$ for $\kappa_{\xi}$ and an ordinal $\theta'<\theta$ such that ${\langle} c_i \mid \theta'{\langle}eq i<\theta{\rangle}$ are indiscernibles for $\kappa_{\xi}$ in $N'$.
\end{enumerate}
\begin{lemma}{\langle}abel{lem5-2-0-2}
$(*)(A(\xi))$ holds.
\end{lemma}
\begin{proof}
Requirement (1) is clear. Requirement (2) follows from Lemma {\rangle}ef{lem5-2-0-1}. Let us show requirement (3).
Indeed, for every function $g \colon \kappa_{\xi}^{<\omega} \to \kappa_\xi$ in $\mathcal{K}^{M_U}$ there is a function $f \in \mathcal{K}$ such that \[g(\bar x) = j(f)({\rangle}ho_0, \dots, {\rangle}ho_{m-1}, \bar x),\] for some fixed ${\rangle}ho_0, \dots, {\rangle}ho_{m-1} < \kappa_\xi$. This follows from the iterated ultrapower representation. Thus, every $\kappa_{\alpha}$ which is larger than $\max({\rangle}ho_0, \dots, {\rangle}ho_{m-1})$ would be a closure point of this function. In particular, taking $g$ to be the Skolem function of any covering model $N'$ for the sequence ${\langle} c_i \mid i < \theta{\rangle}$ and taking $\theta'$ to be the least index in which $c_i > \max ({\rangle}ho_0, \dots, {\rangle}ho_{m-1})$. we conclude that each of the elements $c_i$ must be an indiscernible, by Theorem {\rangle}ef{the covering lemma}.
\end{proof}
\begin{lemma}{\langle}abel{lem5-2-0-3}
If $(*)(B_1)$ and $(*)(B_2)$ hold, then $B_1$ agrees with $B_2$ on a final segment, i.e.\ there is $\nu<\kappa_{\xi}$ such that $B_1\setminus \nu=B_2\setminus \nu$.
\end{lemma}
\begin{proof}
Suppose otherwise.
By symmetry, let us assume that there is a cofinal in $\kappa_{\xi}$ sequence $\{e_i \mid i<\theta\} \in B_1\setminus B_2$, in $M_U$. By the first clause of $(*)(B_1)$, each $e_i$ is a limit point of $a^*$.
By $(*)(B_1)(3)$, there will be a covering model $N'$ for $\kappa_{\xi}$ with $\{e_i \mid i<\theta\} \subseteq N'$ such that for some $\theta'<\theta$,
${\langle} e_i \mid \theta'{\langle}eq i<\theta{\rangle}$ are indiscernibles for $\kappa_\xi$ in $N'$. Apply now $(*)(B_2)(2)$ to $N'$ and ${\langle} e_i \mid \theta'{\langle}eq i<\theta{\rangle}$.
We will have then that a final segment of ${\langle} e_i \mid \theta'{\langle}eq i<\theta{\rangle}$ is in $B$. Contradiction.
\end{proof}
\begin{claim}{\langle}abel{definable function}
If there is a function $t\in({}^{\kappa_\xi}\kappa_\xi)^{\mathcal K^{M_U}}$ and some $\gamma<\kappa_\xi$ such that $A(\xi)\setminus\gamma=C_t\cap \Lim(a^*)\setminus\gamma$ then there is a uniformly definable function in $M_U$, $t^{\xi}\colon \kappa_\xi \to \kappa_\xi\in\mathcal K^{M_U}$, with parameters $\kappa_\xi,a^*$, such that for some $\mu<\xi$, $(C_{t^{\xi}}\cap \Lim(a^*) \cap \kappa_\eta) \setminus\mu=A(\eta)\setminus\mu$.
\end{claim}
\begin{proof}
By assumption, $t$ satisfies the above equality, and by the previous claim, we let $t^{\xi}$ be the least function $t$ in the order of $\mathcal{K}^{M_U}$ such that $(*)(C_{t}\cap \Lim(a^*))$ holds. This is formulated in $M_U$ using the parameters $\kappa_\xi$ and $a^*$.\end{proof}
Back to $\kappa_{\eta_i}$'s, by the induction hypothesis and by claim {\rangle}ef{definable function}, fix the function $t^{\eta_i}$ which is definable with parameters $\kappa_{\eta_i}, a^*$.
\begin{lemma}{\langle}abel{lem5-2-1}
Assume that $N_0$ is a covering model for $\kappa_\eta$ and $h^{N_0}\in\mathcal K^{M_U}$ the associated Skolem function. Consider $\tilde{h}^{N_0}\colon \kappa_\eta\to \kappa_\eta\in\mathcal K^{M_U}$ defined as follows:
$$\tilde{h}^{N_0}({{\rangle}ho})=\sup(\{ h^{N_0}(\vec{\xi})\mid \vec{\xi}\in [{\rangle}ho+1]^{<\omega} \text{ and } h^{N_0}(\vec{\xi})<\kappa^* \}).$$
Suppose $\eta'<\eta$ is such that $\kappa_{\eta'},a^*\in N_0$ and $t^{\eta'}$ is definable as above.
\\Then for all but boundedly many $\nu<\kappa_{\eta'},$
$\tilde{h}^{N_0}(\nu)\geq t^{\eta'}(\nu)$.
\end{lemma}
\begin{proof}
We use the elementarity of $N_0$ and the definability of $t^{\eta'}$ to conclude that $t^{\eta'} \in N_0\cap \mathcal K^{M_U}$. Note that
$t^{\eta'}= h^{N_0}(\vec{c})$, for a finite sequence of $N_0$-indiscernibles $\vec{c}$ ${\langle}eq \kappa_{\eta'}$. By the construction of the covering model $N_0$, we can find $t\in N_0$, $t\colon \kappa_\eta \to \kappa_\eta$ such that $t{\rangle}estriction\kappa_{\eta'}=t^{\eta'}$ and $t=h^N(\vec{c}')$ where $\vec{c}'$ are all indiscernables strictly below $\kappa_{\eta'}$.
Hence by the definition of $\tilde{h}^{N_0}$, for every $\max(\vec{c}'){\langle}eq\nu<\kappa_{\eta'}$,
$t^{\eta'}(\nu){\langle}eq \tilde{h}^{N_0}(\nu)$.
\\It follows then by the definition of $\tilde{h}^{N_0}$ that for all but boundedly many $\nu<\kappa_{\eta'},$
$\tilde{h}^{N_0}(\nu)\geq t^{\eta'}(\nu)$.
\end{proof}
For every $i<{\langle}ambda$, apply lemma {\rangle}ef{lem5-2-1} to $\kappa_{\eta_i}$ and the model $N$ to find $\nu_i<\kappa_{\eta_i}$ such that for every $\nu_i{\langle}eq \nu<\kappa_{\eta_i}$,
$\tilde{h}^N(\nu)\geq t^{\eta_i}(\nu)$.
\\Then, by pressing down, and since ${\langle}ambda>\omega$, there will be a stationary $Z\subseteq {\langle}ambda$ and $\nu^*<{\langle}ambda$ such that for every $\nu, \nu^*{\langle}eq \nu<\kappa_{\eta_\xi}$, $\xi\in Z$ the inequality $\tilde{h}^N(\nu)\geq t^{\eta_\xi}(\nu)$ holds.
Now, shrinking $Z$ more if necessary, we will get $\nu^{**}<\kappa^*$ such that
$$C_{\tilde{h}^N}\cap \Lim(a^*)\setminus \nu^{**}=A(\eta)\setminus \nu^{**}.$$
\vskip 0.5 cm
\textbf{Case 2:} Suppose that ${\langle}ambda=\omega$.
Once again, since $\kappa_{\eta}\in a^*$ we can find an increasing
and cofinal sequence in $\kappa_\eta$, ${\langle}\kappa_{\eta_n}\mid n<\omega{\rangle}\in M_U$.
Let us add points to this sequence.
If $\kappa_{\eta_n}\in \Lim(a^*)$, apply the induction hypothesis, find $t^{\eta_n}$ and let $\nu_n<\kappa_{\eta_n}$ be minimal such that $$C_{t^{\eta_n}}\cap \Lim(a^*)\setminus\nu_n= A(\eta_n)\setminus \nu_n.$$
Find $\xi_n<l^*$ be such that $\crit(j_{\xi_n,\xi_{n}+1})=\kappa_{\eta_n}$, then $t^{\eta_n}\in\mathcal K_{\xi_n}$.
We can represent $t^{\eta_n}$ in the iteration using some $f_n\in({}^{\kappa}\kappa)^{\mathcal K}$ and some intermediate critical points ${\langle}ambda_1,\dots ,{\langle}ambda_m<\kappa_{\eta_n}$, $j_{\xi_n}(f_n)({\langle}ambda_1,\dots,{\langle}ambda_m)=t^{\eta_n}$.
Let $$\max((\{\kappa_\alpha\mid \eta_{n-1}<\alpha<\eta_n\}\cap\{{\langle}ambda_1,\dots,{\langle}ambda_m\})\cup\{\kappa_{\eta_{n-1}}\})=\kappa_{\eta_{n,1}}$$
By minimality of $\nu_n$, there is $\eta_{n,2}<\eta_n$ such that $\kappa_{\eta_{n,2}}{\langle}eq \nu_n{\langle}eq \kappa_{\eta_{n,2}+1}$. If $\eta_{n,2}{\langle}eq\eta_{n,1}$ then add $\eta_{n,1}$ to the sequence and set $\eta^{(1)}=\eta_{n,1}$. Otherwise, add $\kappa_{\eta_{n,2}}$ to the sequence and set $\eta^{(1)}=\kappa_{\eta_{n,2}}$.
If $\kappa_{\eta_n}\notin \Lim(a^*)$, denote by $$\nu_n=\sup(a^*\cap \kappa_{\eta_n})<\kappa_{\eta_n}$$ There is $\eta'<\eta_n$ such that $\kappa_{\eta'}{\langle}eq \nu_n<\kappa_{\eta'+1}$ and there is a function $t^{\eta_n}\in({}^{\kappa_{\eta_n}}\kappa_{\eta_n})^{\mathcal K_{\xi_n}}$ such that $\nu_n{\langle}eq t^{\eta_n}(\kappa_{\eta'})$. Indeed, by lemma {\rangle}ef{BoundFun}, there is $f\in({}^{\kappa}\kappa)^{\mathcal K}$ such that $j_{\xi+1}(f)(\kappa_{\eta'})\geq \nu_n$, where $\xi<\xi_n$ is the step of the iteration such that $\kappa_{\eta'}$ is a critical point. Then we can set $t^{\eta_n}=j_{\xi_n}(f)$. Let $\eta^{(1)}=\eta'$.
In any case, if $\eta^{(1)}{\langle}eq \eta_{n-1}$ we are done. Otherwise, we move to $\kappa_{\eta^{(1)}}$ and repeat the above. After finitely many steps, defining $\eta^{(k)}<\eta^{(k-1)}<\cdots<\eta^{(1)}<\eta_n$ we reach $\eta_{n-1}$. After adding these new points, we obtain a sequence still of order type $\omega$. Without loss of generality, this was the sequence ${\langle} \kappa_{\eta_n}\mid n<\omega{\rangle}$ that we started with. During the construction we have defined a sequence of functions ${\langle} t^{\eta_n}\mid n<\omega{\rangle}$, such that $t^{\eta_n}\in ({}^{\kappa_{\eta_n}}\kappa_{\eta_n})^{\mathcal K^{M_U}}$ and by closure ${\langle} t^{\eta_n}\mid n<\omega{\rangle}\in M_U$.
Clearly, $t^{\eta_n}\in \mathcal K_{\xi_n}$. Let $\xi^*=\sup\xi_n$, then $\crit(j_{\xi^*,l^*})\geq\kappa_\eta$.
\begin{claim}{\langle}abel{dominating claim}
There is $\phi\in({}^{\kappa}\kappa)^\mathcal K$ such that $\forall n<\omega$, and every $\kappa_{\eta_{n-1}}{\langle}eq\nu<\kappa_{\eta_n}$ $t^{\eta_n}(\nu)<j_{\xi_n}(\phi)(\nu)$
\end{claim}
\begin{proof}
By construction of the sequence ${\langle} \kappa_{\eta_n}\mid n<\omega{\rangle}$, either $\kappa_{\eta_n}\notin \Lim(a^*)$ in which case there is $f_n\in\mathcal K$ such that $t^{\eta_n}=j_{\xi_n}(f_n)$ (no parameters needed). If $\kappa_{\eta_n}\in \Lim(a^*)$, then by the construction of the sequence $\kappa_{\eta_n}$, there is a function $f_n\in\mathcal K$ and critical points $${\langle}ambda_1<\cdots<{\langle}ambda_k<\kappa_{\eta_{n-1}}<\theta_1<\cdots<\theta_m<\kappa_{\eta_{n-1}+1}{\langle}eq \kappa_{\eta_n}$$ such that
$$t^{\eta_n}=j_{\xi_n}(f_n)({\langle}ambda_1,\dots,{\langle}ambda_k,\kappa_{\eta_{n-1}},\theta_1,..,\theta_m).$$
Since $\theta_m<\kappa_{\eta_{n-1}+1}$,
by lemma {\rangle}ef{BoundFun}, there is $b_n\in({}^{\kappa}\kappa)^{\mathcal K}$ such that $$\theta_m<j_{\xi_{n-1}}(b_n)(\kappa_{\eta_{n-1}}){\langle}eq j_{\xi_{n}}(b_n)(\kappa_{\eta_{n-1}}){\langle}eq j_U(b_n)(\kappa_{\eta_{n-1}})<\kappa_{\eta_{n}}.$$
In $\mathcal K$, define $\phi_n:\kappa{\rangle}ightarrow\kappa$ by $$\phi_n(\alpha)=\sup\{f_n(\vec{{\rangle}ho})(\xi)\mid\vec{{\rangle}ho}\in[b_n(\alpha)]^{<\omega}\cap \dom(f_n)\wedge \xi{\langle}eq\alpha\}+1.$$
Then for every $\kappa_{\eta_{n-1}}{\langle}eq\nu<\kappa_{\xi_n}$,
$$j_{\xi_n}(f_n)(\vec{{\langle}ambda},\kappa_{\eta_{n-1}},\vec{\theta})(\nu){\langle}eq \sup\{j_{\xi_n}(f_n)(\vec{\xi})(\xi)\mid\vec{\xi}\in[j_{\xi_n}(b_n)(\nu)]^{<\omega}\wedge \xi{\langle}eq\nu\}.$$
Hence $t^{\eta_n}(\nu)=j_{\xi_n}(f_n)(\vec{{\langle}ambda},\kappa_{\eta_{n-1}},\vec{\theta})(\nu)<j_{\xi_n}(\phi_n)(\nu)$.
We proceed as in lemma {\rangle}ef{lemma: finite error}. Suppose that ${\langle} d_i\mid i<\kappa^+{\rangle}$ is an enumeration of $({}^{\kappa}\kappa)^{\mathcal K}$ and that $\phi_n=d_{\mu_n}$ There is a set $a\subseteq\kappa^+$ such that $a\in\mathcal K, \ |a|<\kappa$ and $\{\mu_n\mid n<\omega\}\subseteq a$. Define in $\mathcal K$, $\phi:\kappa{\rangle}ightarrow\kappa$ by
$$\phi(\alpha)=\sup\{d_i(\alpha)\mid i\in a\}$$
Since $\kappa$ is regular in $\mathcal K$, $\phi$ is well defined
and for every $n<\omega$, $\phi$ dominates $\phi_n$ everywhere. By elementarity of $j_{\xi_n}$, $\phi$ will be as desired
\end{proof}
Denote by $t^\eta=j_{U}(\phi){\rangle}estriction\kappa_{\eta}\in\mathcal K^{M_U}$. Note that $t^\eta{\rangle}estriction\kappa_{\eta_n}=j_{\xi_n}(\phi)$. Let us prove that $t^\eta$ is as wanted:
\begin{claim}
There is $\gamma_\eta<\kappa_\eta$ such that
$$(C_{t^\eta}\cap \Lim(a^*) \cap \kappa_\eta)\setminus \gamma_{\eta}=A(\eta)\setminus\gamma_{\eta}.$$
\end{claim}
\begin{proof}
As we claimed before, $\{\kappa_\gamma\mid \gamma_\eta{\langle}eq \gamma<\eta\}$ is a weak Prikry-Magidor sequence for $\mathcal K^{M_U}$ and $C_{t^\eta}$ is a club in $\mathcal K^{M_U}$, there is $\gamma_\eta$ such that $\{\kappa_\gamma\mid \gamma_\eta{\langle}eq \gamma<\eta\}\subseteq C_{t^{\eta}}$. This proves
the inclusion from right to left. For the other direction, assume that $\delta\in C_{t^\eta}\setminus\kappa_{\gamma_\eta}$ such that $\delta\notin\{\kappa_\gamma\mid \gamma_\eta{\langle}eq\gamma<\eta\}$, let us argue that $a^*\cap \delta$ is bounded below $\delta$. Fix any $n<\omega$ such that $\kappa_{\eta_n}<\delta<\kappa_{\eta_{n+1}}$. We split into cases. If $\kappa_{\eta_{n+1}}\notin \Lim(a^*)$, then
$$\sup(a^*\cap\delta){\langle}eq\sup(a^*\cap\kappa_{\eta_{n+1}})=\nu_n{\langle}eq t^{\eta_{n+1}}(\kappa_{\eta_n})$$
By claim {\rangle}ef{dominating claim}, $t^{\eta_{n+1}}(\kappa_{\eta_n})<j_{\xi_n}(\phi)(\kappa_{\eta_n})=t_\eta(\kappa_{\eta_n})$.
Since $\kappa_{\eta_n}<\delta\in C_{t_\eta}$, we conclude that $\sup(a^*\cap\delta)<\delta$ and $\delta$ is not a limit point of $a^*$.
If $\kappa_{\eta_{n+1}}\in \Lim(a^*)$, then by the construction of $\kappa_{\eta_n}$ we have that $C_{t^{\eta_{n+1}}}\cap \Lim(a^*)\setminus \kappa_{\eta_n}=A(\eta_{n+1})\setminus \kappa_{\eta_n}$.
By assumption, $\delta\notin \{\kappa_\alpha\mid \gamma_\eta{\langle}eq \alpha<\eta\}$, hence $\delta\notin A(\eta_{n+1})$. Since $\kappa_{\eta_n}<\delta$, it follows that $\delta\notin C_{t^{\eta_{n+1}}}\cap \Lim(a^*)$.
\end{proof}
This conclude that proof of lemma {\rangle}ef{lem:isolating-the-indiscernibles}, and the proof of
theorem {\rangle}ef{thm:lowerbound}.\end{proof}
It is possible to try to proceed further and to deal with the situation when $o(\kappa^*) = (\kappa^*)^+$. If, as a result, $\kappa^*$ remain regular (which is typical forcing situation) then $a^*$ must be bounded in $\kappa^*$, since no regular cardinal can be in $a^*$, and so we are basically in the situation considered above.
\\However, $\kappa^*$ can change cofinality --- there are forcing construction in which it changes cofinality to $\omega$.
In this case a finer analysis of indiscernibles seems to be needed, and Mitchell's accumulation points may appear.
Our conjecture is that the result above is not optimal and it can be strengthened.
\section{Compactness for masterable forcing notions}
In this section we will isolate a subclass of forcing notions that consistently include many important forcing notions (such as all the complete subforcings of $\Add(\kappa,1)$ and more), such that it is possible to force from a measurable cardinal that for any forcing $\mathbb{P}$ in this class, there is a $\kappa$-complete ultrafilter extending $\mathcal{D}(\mathbb{P})$.
\begin{lemma}{\langle}abel{lemma}
Let $\mathbb{Q}$ be a $\kappa$-distributive forcing of size $\kappa$.
\\Suppose that there is a generic elementary embedding
\[j\colon V^{\mathbb{Q}} \to M\]
with $\crit j = \kappa$.
Then, in $M$, there is a single condition $m\in j(\mathbb{Q})$ which is stronger than $j(p)$ for any condition $p$ in the generic filter for $\mathbb{Q}$.
\end{lemma}
\begin{proof}
Without loss of generality we can assume that $\mathbb{Q}=\kappa$, i.e.\ the set of conditions of the forcing $\mathbb{Q}$ is just $\kappa$.
Let $G\subseteq\mathbb{Q}$ be the generic filter. By elementarity, $M = M'[j(G)]$, where $\forall p\in G,\,j(p)\in j(G)$.
Note that since $G\subseteq\kappa$ and $\crit(j)=\kappa$, $G=j(G)\cap\kappa\in M$.
By the distributivity of $\mathbb{Q}$ over $V$ and by elementarity of $j$, $j(\mathbb{Q})$ is also $j(\kappa)$-distributive over $M'$, hence $G\in M'$. In particular, the set
$$D=\{q \in j(\mathbb{Q}) \mid ((\forall p \in G)(q \geq j(p)))\vee ((\exists p \in G)(q\perp j(p)))\}$$
is dense open in $M'$. Clearly, any condition $m\in j(G)$ from this set will witness the validity of the lemma, since $j(G)\supseteq j\image G=G$.
\end{proof}
Define now a subclass of $\kappa$-distributive forcing of size $\kappa$.
\begin{definition}{\langle}abel{definition: masterable}
A forcing notion $\mathbb{Q}$ is called \emph{masterable} if
\begin{enumerate}
\item $\mathbb{Q}$ is a $\kappa$-distributive forcing of size $\kappa$,
\item there is a forcing notion ${\langle}usim{\mathbb{R}} \in V^Q$ such that
\begin{enumerate}
\item In $V^{\mathbb{Q}*{\langle}usim{\mathbb{R}}}$, there is an elementary embedding
$$j\colon V^{\mathbb{Q}} \to M$$
with $\crit j = \kappa$.
\item $\mathbb{Q}*{\langle}usim{\mathbb{R}}$ contains a dense subset of size ${\langle}eq \kappa$
and $\mathbb{Q} \ast {\langle}usim{\mathbb{R}}$ is ${<}\kappa$-strategically closed.
\end{enumerate}
\end{enumerate}
\end{definition}
Let $\mathcal{N}_\kappa$ denotes the class of all masterable forcing notions.
\begin{claim}
$\mathcal{N}_\kappa$ is closed under complete subforcings.
\end{claim}
\begin{proof}
Assume $\mathbb{Q}$ is a complete subforcing of $\mathbb{P}\in \mathcal{N}_\kappa$.
Then $|\mathbb{Q}|{\langle}eq |\mathbb{P}|{\langle}eq\kappa$ and let ${\langle}usim{\mathbb{R}}$ witness propery $(2)$ for $\mathbb{P}$. Let ${\langle}usim{\mathbb{R}'}=\mathbb{P}/{\langle}usim{G_{\mathbb{Q}}}\ast{\langle}usim{\mathbb{R}}$ where $\mathbb{P}/{\langle}usim{G_{\mathbb{Q}}}$ is then quotient forcing. Now $\mathbb{Q}*{\langle}usim{\mathbb{R}'}\simeq \mathbb{P}\ast{\langle}usim{\mathbb{R}}$ and so condition $(2)$ holds for $\mathbb{Q}$.
\end{proof}
\begin{theorem}{\langle}abel{thm1}
Assume GCH and let $\kappa$ be a measurable cardinal.
Then there is a cofinality preserving forcing extension in which for any $\mathbb{Q}\in\mathcal{N}_\kappa$, there is a $\kappa$-complete ultrafilter $\mathcal{U}$ extending $\mathcal{D}_p(\mathbb{Q})$ for every $p\in \mathbb{Q}$.
\end{theorem}
\begin{proof}
Let $\mathbb{P}_\kappa$ be a Easton support iteration of length $\kappa$, ${\langle}angle \mathbb{P}_\alpha, {\langle}usim{\mathbb{Q}}_\beta \mid \alpha{\langle}eq \kappa, \beta<\kappa {\rangle}angle$. At each step, ${\langle}usim{\mathbb{Q}}_\alpha$ is either the trivial forcing, if $\alpha$ is not inaccessible, or the lottery sum of all ${<}\alpha$-strategically closed forcing notions of size $\alpha$ (were the trivial forcing is included).
Let $G_\kappa\subseteq P_\kappa$ be a generic. We argue that the model $V[G_\kappa]$ is as desired.
Let $\mathbb{Q}$ be a forcing notion in $\big(\mathcal{N}_\kappa\big)^{V[G_\kappa]}$ and $p\in\mathbb{Q}$. Let $U$ be a normal, $\kappa$-complete ultrafilter over $\kappa$. Let $j_1 \colon V \to N_1 \cong \Ult(V, U)$ be the ultrapower maps using $U$. Let $\kappa_1 = j_1(\kappa)$.
Let us extend, in $V[G_\kappa]$, the embedding $j_1$ to an elementary embedding
\[j_1^*:V[G_\kappa]\to N_1[G_{\kappa_1}].\]
Indeed, $j(\mathbb{P}_\kappa) = \mathbb{P}_\kappa \ast j(\mathbb{P})_{[\kappa, j(\kappa))}$. By picking the trivial forcing at $\kappa$, the rest of the iteration is $\kappa^{+}$-strategically closed in $V$ (by the closure of $N_1$ to $\kappa$-sequences). The number of dense open sets of the tail forcing is $\kappa^{+}$ (as enumerated in $V$) and thus one can construct in $V[G_\kappa]$ an $N_1[G_\kappa]$-generic filter for the tail forcing $j(\mathbb{P})_{\kappa,j(\kappa)}$. Let $G_{\kappa_1}$ be the generic filter for $N_1$.
By elementarity, $j_1^*(\mathbb{Q}) \in (\mathcal{N}_{\kappa_1})^{N_1[G_{\kappa_1}]}$, thus by condition $(2)$ there is a ${\langle}usim{\mathbb{R}}$ and a dense subset $X\subseteq j_1^*(\mathbb{Q})*{\langle}usim{R}$ such that $N_1[G_{\kappa_1}]\models|X|{\langle}eq \kappa_1$. By $GCH$, from the point of view of $V[G_\kappa]$, there are $\kappa^+$ dense open sets to meet in order to generate a generic filter for $j_1^*(\mathbb{Q})$. By condition $(2)$, $j_1^*(\mathbb{Q})*{\langle}usim{\mathbb{R}}$ is ${<}\kappa_1$-strategically closed in $N_1[G_{\kappa_1}]$, again by closure of $N_1[G_{\kappa_1}]$ to $\kappa$ sequences from $V[G_k]$, it is $\kappa^+$-strategically closed from the point of view of $V[G_\kappa]$. Hence, one can find a $N_1[G_{\kappa_1}]$-generic filter, $G_{j_1^*(\mathbb{Q})}*G_{{\langle}usim{\mathbb{R}}} \in V[G_\kappa]$ with $j_1^*(p)\in G_{j_1^*(\mathbb{Q})}$.
Since $j_1^*(\mathbb{Q})$ is masterable using the forcing ${\langle}usim{\mathbb{R}}$ in the extension $N_1[G_{\kappa_1}][G_{j_1^*(\mathbb{Q})}*G_{{\langle}usim{\mathbb{R}}}]$ there is an elementary embedding
\[k\colon N_1[G_{\kappa_1}][G_{j_1^*(\mathbb{Q})}] \to N^*\]
such that $\crit k = \kappa_1$.
Let $m$ be a condition in $k(j_1(\mathbb{Q}))$ such that $m$ is stronger than $k(p)$ for all $p\in G_{j_1^*(\mathbb{Q})}$ which exists by applying lemma {\rangle}ef{lemma} to $j_1^*(\mathbb{Q})$. In $V[G_\kappa]$, define $$\mathcal{U} = \{A\subseteq \mathbb{Q}\mid m \in k(j^*_1(A_\xi))\}$$ It is clear that $\mathcal{U}$ is a $\kappa$-complete ultrafilter that extends $\mathcal{D}_p(\mathbb{Q})$.
\end{proof}
\begin{corollary}{\langle}abel{MasterForcing}
Consider $\mathcal{N}_\kappa$ of the model of the previous theorem $V[G_\kappa]$. Then
\begin{enumerate}
\item $\Add(\kappa,1)\in\mathcal{N}_\kappa$, and hence, by the claim above, all its complete subforcings are in $\mathcal{N}_\kappa$
( for example: adding a Suslin tree to $\kappa$, adding a non-reflecting stationary subset of a given stationary set etc.).
\item $Club(S)\in\mathcal{N}_\kappa$ for all $S\subseteq \kappa$ that contains all the singular cardinals and is of measure one in a normal measure over $\kappa$.
\end{enumerate}
\end{corollary}
\begin{proof}
For (1), we wish to prove that $\mathbb{Q}=\Add(\kappa,1)\in (N_\kappa)^{V[G_{\kappa}]}$.
Let $f$ be $V[G_\kappa]$-generic for $\Add(\kappa,1)$, We will extend in $V[G_\kappa][f]$ the elementary embedding $j_U:V{\rangle}ightarrow M_U$ to $$j^*:V[G_\kappa][f]{\rangle}ightarrow M_U[G_{\kappa_1}][f']$$
Then we can take ${\langle}usim{\mathbb{R}}$ to be the trivial forcing in the definition of masterable.
the generic $G_{\kappa_1}$ will be made of $G_\kappa$ followed by $f$ as generic for $Q_\kappa$, then a $M[G_\kappa*f]$-generic filter for the rest of the forcing $P_{(\kappa,\kappa_1]}$, can be constructed in $V[G_k][f]$ using the strategic closure of of the forcing as we did in theorem {\rangle}ef{thm1}. Also we can find the generic $f_{\kappa_1}\in V[G_\kappa][f]$ for $(\Add(\kappa_1,1))^{M[G_{\kappa_1}]}$, and $f_{\kappa_1}{\rangle}estriction\kappa=f$. Note that this is a condition in $\Add(\kappa_1,1)^{M[G_{\kappa_1}]}$. Above this condition, we can construct the generic $f_{\kappa_1}$ since again $\Add(\kappa_1,1)^{M[G_{\kappa_1}]}$ as $\kappa^+$ many dense open subsets from the point of view of $V[G_\kappa][f]$ and is it $k^+$-closed since the model is closed under $\kappa$-sequences.
For (2),
Let $S\subset\kappa$ be a stationary set that contains all singular cardinals and let us assume that $S\in W$, for normal measure $W$ over $\kappa$.
We need to show that $Club(S) \in (\mathcal{N}_\kappa)^{V[G_\kappa]}$. Indeed, the forcing $Club(S)$ is ${<}\kappa$-strategically closed. Let $H\subseteq Club(S)$ be $V[G_\kappa]$-generic.
Let us show that in $V[G_\kappa][H]$, the elementary embedding $j_W$, which corresponds to $W$, extends to an elementary embedding:
\[j_W^{\prime}\colon V[G_\kappa] \to N_W[G_{\kappa_1}'],\]
where $\kappa_1=j_W(\kappa)$,
by taking the generic of $j(\mathbb{P}_\kappa) {\rangle}estriction \kappa + 1$ to be $G_\kappa \ast H$ and extending it to a generic filter $G_{\kappa_1}'$, using the $\kappa^{+}$-strategically closure of the tail forcing in $V[G_\kappa][H]$.
Since $\crit j'_W = \kappa$, for any $p\in Club(S)$, $j'_W(p) = p$. Also, since $\kappa \in j'_W(S)$.
\[m = \{\kappa\}\cup\bigcup_{p\in H} p \in j_W'(Club(S)).\]
Using the same arguments as before, we can find an $N_W[G_{\kappa_1}]$-generic filter $H'\in V[G_\kappa][H]$ for $j_W'(Club(S))$ such that $m\in H'$. We conclude that the embedding $j_1'$ extends to an embedding:
\[j_W''\colon V[G_\kappa][H]\to N_1[G_{\kappa_1}][H'].\]
Therefore, we can take ${\langle}usim{\mathbb{R}}$ to be the trivial forcing.
\end{proof}
Note that in general $\Add(\kappa,1)$ might not be masterable. For example, if we force above $L[U]$ with $\Add(\kappa,1)$ the $\kappa$ is no longer measurable.
Let us deduce now one more corollary that relates to the result of section 6.
\begin{corollary}
Consider, in $V[G_\kappa]$, the forcing for adding a club through singulars and inaccessibles which are not Mahlo, i.e.
$$\mathbb{Q}=\{a \subseteq \kappa \mid |a|<\kappa, a \text{ is closed and each member of } $$$$a \text{ is either a singular cardinal or an inaccessible which is not a Mahlo}\} $$
ordered by end-extension.
Then $\mathbb{Q}\in \mathcal{N}_\kappa$.
\end{corollary}
\begin{proof}
Let $G(\mathbb{Q})$ be a $V[G_\kappa]$-generic subset of $\mathbb{Q}$.
Clearly, $\mathbb{Q}$ is a ${<}\kappa$-strategically closed forcing of cardinality $\kappa$.
Let ${\langle}usim{\mathbb{R}}$ be the forcing for adding a club through singulars over $V[G_\kappa,G(\mathbb{Q})]$.
Again $\mathbb{Q}*{\langle}usim{\mathbb{R}}$ is a ${<}\kappa$-strategically closed forcing of cardinality $\kappa$.
Let $G({\langle}usim{\mathbb{R}})$ be a generic subset of ${\langle}usim{\mathbb{R}}$ over $V[G_\kappa,G(\mathbb{Q})]$.
We shall argue that in $V[G_\kappa,G(\mathbb{Q}), G({\langle}usim{\mathbb{R}})]$ there is an elementary embedding
$$i\colon V[G_\kappa,G(\mathbb{Q})] \to M,$$
with $\crit(i) = \kappa$ and $({}^\kappa M )\cap V[G_\kappa,G(\mathbb{Q})]\subseteq M$.
Let $U$ be a normal ultrafilter over $\kappa$ in $V$ and
$j:V\to N$ the corresponding elementary embedding.
Work in $V[G_\kappa,G(\mathbb{Q}), G({\langle}usim{\mathbb{R}})]$ and extend it to an elementary embedding
$$i:V[G_\kappa,G(\mathbb{Q})] \to N[G_{j(\kappa)}, G(j(\mathbb{Q}))]$$
as follows.
Set $ G_{j(\kappa)}{\rangle}estriction \kappa=G_\kappa$.
Now let $Q_\kappa=\mathbb{Q}*{\langle}usim{\mathbb{R}}$ and take $G(\mathbb{Q})* G({\langle}usim{\mathbb{R}})$ to be its generic subset.
Note that $\kappa$ was a Mahlo cardinal in $ V[G_\kappa,G(\mathbb{Q})]$, and hence, in $N[G_\kappa,G(\mathbb{Q})]$, but $G(\mathbb{R})$ destroys its Mahloness.
We complete building $G_{j(\kappa)}$ using the strategic closure of the relevant forcing.
Let $G(j(\mathbb{Q}))$ starts with $\bigcup G(\mathbb{Q}) \cup \{\kappa\}$. $\kappa$ is not Mahlo anymore, and so, can be added.
Finally, complete building $G_{j(\mathbb{Q})}$ using the strategic closure of the forcing $j(\mathbb{Q})$ i.e. we have $\kappa^+$ many dense open sets to meet, the bad player starts with playing $G(\mathbb{Q}) \cup \{\kappa\}$ and then using the strategy we meet the rest of the dense open sets.
This completes the proof of $\mathbb{Q} \in \mathcal{N}_\kappa$.
\end{proof}
\section{Other examples}
The next interesting examples should be of forcings of size $\kappa$, which are $\kappa$-distributive, but not ${<}\kappa$-strategically closed nor masterable.
Let start with two simple general observations.
\begin{prop}
Let $\kappa>\aleph_1, \eta<\kappa$ be a regular cardinals. Assume that for every ${\langle}ambda<\kappa$, ${\langle}ambda^{<\eta}<\kappa$.
Suppose that ${\langle} Q,{\langle}eq_Q{\rangle} $ is an $\eta+1$-strategicaly closed forcing notion.
Then ${\langle} Q, {\langle}eq_Q{\rangle}$ preserves stationary subsets of $\kappa$ which concentrate on cofinality $\eta$ i.e. For any set $S$ such that $S\subset\{\nu<\kappa \mid \cof(\nu)=\eta\}$ is stationary, $\Vdash_Q \dot{S}$ is stationary.
\end{prop}
\begin{proof}
Let $S\subseteq \{\nu<\kappa \mid \cof(\nu)=\eta\}$ be stationary.
Suppose that for some generic subset $G(Q)$ of $Q$, $S$ is non-stationary in $V[G(Q)]$.
Let $C\subseteq \kappa$ be a club disjoint from $S$. Let ${\langle}usim{C}$ be a $Q$-name for $C$.
Then, back in $V$ there are $q \in G(Q)$ such that
$$q \Vdash ({\langle}usim{C}\subseteq \kappa \text{ is a club and } S\cap {\langle}usim{C}=\emptyset).$$
Fix a winning strategy $\sigma$ for the Player I in plays of the length $\eta+1$ for $Q$.
\\Pick now an elementary submodel $N$ of $H_\theta$, with $\theta$ large enough, such that
\begin{enumerate}
\item $N\supseteq \eta+1$ and $Q, S, \sigma, {\langle}usim{C},q \in N$.
\item $\kappa>|N|\geq\eta$,
\item $\sup(N\cap \kappa)\in S$,
\item ${}^{<\eta}N\subseteq N$,
\end{enumerate}
This is possible since we can construct an continuous and increasing sequence of models ${\langle}angle N_i\mid i<\eta{\rangle}angle$ satisfying $(1),(2)$, ${}^{<\eta}N_i\subseteq N_{i+1}$ and $\sup(N_i\cap\kappa)<\kappa$. Since $\eta$ is regular and $\eta^{<\eta}=\eta$ we can construct such a sequence and $\cup_{i<\eta}N_i=N^*_0$. Then $N^*_0$ satisfy $(1),(2),(4)$. We keep defining increasing and continuous models $${\langle}angle N^*_i\mid i<\kappa{\rangle}angle$$ satisfying $(1),(2)$ and at successor points also $(4)$. In this definition we exploit the cardinal assumption that for every ${\langle}ambda<\kappa$, ${\langle}ambda^<\eta<\kappa$. The set $$\{\sup(N^*_i\cap\kappa)\mid i<\kappa\}$$ is a club at $\kappa$ thus there is $i<\kappa$ such that $\alpha=\sup(N^*_i\cap\kappa)\in S$. Note that the cofinality of $\alpha$ is $\eta$ and therefore $(N^*_i)^{<\eta}\subseteq N^*_i$. Let $N=N^*_i$, then $N$ satisfy $(1)-(4)$.
Let ${\langle} \xi_i \mid i<\eta {\rangle}$ be a cofinal sequence in $\sup(N\cap \kappa)$. By $(4)$, every initial segment of it is in $N$.
Using $\sigma$ it is easy to define an increasing sequence of conditions ${\langle} q_i \mid i{\langle}eq \eta{\rangle}$ in $Q$ such that
\begin{enumerate}
\item $q_0=q$.
\item $q_i \in N$, for every $i<\eta$.
\item There is $\alpha_i\geq\xi_i$ such that $q_{i+1}\Vdash\dot{\alpha_i}\in{\langle}usim{C}$.
\end{enumerate}
Since $q_i\in N$, $\alpha_i\in N\cap\kappa$ such ${\langle}angle\alpha_i\mid i<\eta{\rangle}angle$ form an increasing and continuous sequence in $\alpha$. Let $p_\eta=\sigma({\langle}angle p_i q_i\mid i<\eta{\rangle}angle)$,
then $$q_\eta\Vdash \sup(N\cap \kappa)\in {\langle}usim{C},$$ since it also forces that ${\langle}usim{C}$ is closed.
This is impossible, since $\sup(N\cap \kappa)\in S$, Contradiction.
\end{proof}
\begin{prop}
Let $\kappa>\aleph_1, \eta<\kappa$ be a regular cardinals. Assume that for every ${\langle}ambda<\kappa$, ${\langle}ambda^{<\eta}<\kappa$.
Suppose that ${\langle} \mathbb{P},{\langle}eq_{\mathbb{P}}{\rangle}$ is a forcing notion that destroys stationarity of a subset of $\kappa$ which concentrate on cofinality $\eta$.
Then $P$ is not masterable.
\end{prop}
\begin{proof}
Suppose otherwise.
Then there is a forcing notion ${\langle}usim{\mathbb{R}}$ such that $\mathbb{P}*{\langle}usim{\mathbb{R}}$ is ${<}\kappa$-strategically closed.
In particular, $\mathbb{P}*{\langle}usim{\mathbb{R}}$ is $\eta+1$-strategically closed.
By the previous proposition, then $\mathbb{P}*{\langle}usim{\mathbb{R}}$ preserves stationary subsets of $\kappa$ which concentrate on cofinality $\eta$.
This is impossible since already ${\langle} \mathbb{P},{\langle}eq_{\mathbb{P}}{\rangle}$ is a forcing notion that destroys stationarity of some stationary subset $S\subseteq \kappa$ which concentrate on cofinality $\eta$, hence the witnessing club which is disjoint from $S$ will also be present in extensions of $\mathbb{P}*{\langle}usim{\mathbb{R}}$, Contradiction.
\end{proof}
Now we deal with a particular example.
Let $S$ be a fat subset of $\kappa$ such that $$ \{\nu<\kappa \mid \cof(\nu)=\eta\}\setminus S$$ is stationary.
Then, the forcing $Club(S)$ is $\kappa$-distributive (since $S$ is fat). $Club(S)$ shoots a club through $S$ and therefore distroys the stationarity of $\{\nu<\kappa\mid \cof(\nu)=\eta\}\setminus S$. It follows that $Club(S)$ is not $<-\kappa$-strateginaly closed (even not $\eta+1$-strateginaly closed ) and not masterable.
Note that if we force a Cohen function $f:\kappa \to \kappa$, then for every $\delta<\kappa$ the set
$$S^f_\delta=\{\nu<\kappa \mid f(\nu)=\delta\}$$ will be a fat stationary subset of $\kappa$ such that
for every regular $\eta<\kappa$,
the set
$$S^f_\delta\cap \{\nu<\kappa \mid \cof(\nu)=\eta\}$$ is co-stationary. The next lemma shows that a similar method to the one used for masterable forcings, can be used to extend $D_p(Q)$ for this kind of fat stationary sets.
\begin{lemma}
Let $\kappa$ be measurable cardinal and assume $GCH$. There is a cofinality preserving extension $V[G_\kappa]$ in which the following holds:
After forcing a Cohen function $f:\kappa\to \kappa$ with $\Add(\kappa,1)^{V[G_\kappa]}$, for every $\delta<\kappa$ and $p\in Club(S^{f}_\delta)$, $D_p(Club(S^{f}_{\delta}))$ can be extended to a $\kappa$-complete ultrafilter.
\end{lemma}
\begin{proof}
Let us use the same Easton support iteration ${\langle}angle P_\alpha,{\langle}usim{Q}_\beta\mid \alpha{\langle}eq\kappa,\beta<\kappa{\rangle}$ as for masterable forcing, where ${\langle}usim{Q_\beta}$ is the trivial forcing for accessible ordinals and the lottery sum over all ${<}\beta$-strategically closed forcings of size ${\langle}eq\beta$ for inaccessible $\beta$.
Let $G_\kappa\subseteq P_\kappa$ be $V$-generic. We claim that the model $V[G_\kappa]$ is as wanted. Let $f_\kappa$ be a $V[G_\kappa]$-generic function for $\Add(\kappa,1)^{V[G_{\kappa}]}$. In $V[G_\kappa][f_\kappa]$ we shell extend $D_p(Club(S^{f_\kappa}_\delta))$ for some $\delta<\kappa$ and $p\in Club(S^{f_\kappa}_\delta)$. First let $U\in V$ be some normal measure on $\kappa$, $$j_1:V{\rangle}ightarrow M_1\simeq Ult(V,U)$$
is the corresponding elementary embedding and
$$j_{1,2}:M_1{\rangle}ightarrow M_2\simeq Ult (M_1,j_1(U))$$ is the second iteration. Denote by $j_2=j_{1,2}\circ j_1$, $\kappa_i=j_i(\kappa)$ for $i=1,2$.
Secondly, by the same arguments as in {\rangle}ef{MasterForcing}, by picking $\Add(\kappa,1)$ at ${\langle}usim{Q}_\kappa$, we can construct the generic $$\underset{G_{\kappa_1}}{\underbrace{G_{\kappa}*f_\kappa*G_{(\kappa,\kappa_1)}}}*f_{\kappa_1}\in V[G_\kappa][f_\kappa]$$
which is $M_1$-generic for $j(P_\kappa*\Add(\kappa_1))=P_\kappa*{\langle}usim{Q}_\kappa*P_{(\kappa,\kappa_1)}*\Add(\kappa_1,1)$. Then the embedding $j_1:V{\rangle}ightarrow M_1$ lifts to $$j_1^*:V[G_\kappa][f_\kappa]{\rangle}ightarrow M_1[G_{\kappa_1}][f_{\kappa_1}]$$
Next, we claim that the forcing $\Add(\beta,1)*Club(S^{f}_\beta)$ is ${<}\beta$-strategically closed when $\beta$ is inaccessible. To see this, let ${\langle}ambda<\beta$, Then the good player can always play conditions of the form ${\langle} g,\dot{a}{\rangle}\in \Add(\beta,1)*Club(S^{f}_\beta)$ where $\dot{a}$ is the canonical name for some closed set such that $\max(a)=\dom(g)$. The strategy is defined as follows,
$$\sigma_{\langle}ambda({\langle}{\langle} g_i,\dot{a_i}{\rangle},{\langle} f_i,{\langle}usim{b_i}{\rangle}\mid i<\theta{\rangle})={\langle}angle g,\dot{a}{\rangle}$$
where for limit steps $\theta$, $$g=\bigcup_{i<\theta}g_i\cup\{{\langle}angle \nu,\delta{\rangle}\}, \ a=\cup_{i<\theta}a_i\cup\{\nu\}$$ $\nu$ being $\sup_{i<\theta}(\sup(\dom(g_i)))$.
This will form an element of $\Add(\beta,1)*Club(S^{f}_\beta)$ by the definition at successor points $\theta=\tau+1$, in which case $g$ will simply fill the missing points in $\dom(f_\tau)$ with some value different then $\delta$ up to $\sup(\dom(f_\tau))$, if there is a maximal element in $\dom(f_\tau)$ let $\nu=\max(f_\tau)+1$ otherwise $\nu=\sup(\dom(f_\tau))$ and define $g(\nu)=\delta$. ${\langle}usim{b_\tau}$ will be extended to a canonical name $\dot{a}$ according to $g$.
Using this strategically closure of the forcing $$\Add(\kappa_1,1)*{\langle}usim{Club(S^{f_{\kappa_1}}_\delta)}$$ and the usual arguments of number of dense open sets, in $V[G_\kappa][f_{\kappa}]$ we can find a $M_1[G_{\kappa_1}][f]$-generic club $H$ for $Club(S^{f_{\kappa_1}}_\delta)^{M_1[G_{\kappa_1}][f_\kappa]}$ with $j_1(p)\in H$. Let $C=\cup H\subseteq S^{f_{\kappa_1}}_\delta$ be the generic club.
Next we shell extend $j_{1,2}:M_1{\rangle}ightarrow M_2$ to $$j_{1,2}^*:M_1[G_{\kappa_1}][f_{\kappa_1}][H]{\rangle}ightarrow M_2[G_{\kappa_2}][f_{\kappa_2}][H'']$$
To do this, note that $$j_{2,1}(P_{\kappa_1}*\Add(\kappa_1,1)*{\langle}usim{Club(S^{f_{\kappa_1}}_\delta)}=P_{\kappa_1}*{\langle}usim{Q}_{\kappa_1}*P_{(\kappa_1,\kappa_2)}*\Add(\kappa_2,1)*{\langle}usim{club(S^{f_{\kappa_2}}_\delta))}$$
For $P_{\kappa_1}*{\langle}usim{Q}_\kappa$ we take $G_{\kappa_1}*(f_{\kappa_1}*H)$. For the forcing $P_{(\kappa_1,\kappa_2)}$ we can find a generic $G_{(\kappa_1,\kappa_2)}\in M_1[G_\kappa][f'][H]$ which is $M_2[G_{\kappa_1}][f'][H]$-generic for $P_{(\kappa_1,\kappa_2)}$. Finally, note that the condition $${\langle}angle f_{\kappa_1}\cup\{{\langle}\kappa_1,\delta{\rangle}\}, C\cup\{\kappa_1\}{\rangle}\in \Add(\kappa_2,1)*{\langle}usim{Club(S^{f_{\kappa_2}}_\delta))}$$ and once again by the strategically closure and $GCH$ we can extend this condition to a generic $f_{\kappa_2}*H'\in M_1[G_{\kappa_1}][f_{\kappa_1}][H]$.
So the embedding $j_{1,2}:M_1{\rangle}ightarrow M_2$ is lifted to
$$j_{1,2}^*:M_1[G_{\kappa_1}][f_{\kappa_1}][H]{\langle}ongrightarrow M_2[G_{\kappa_1}][f_{\kappa_1}][H][G_{(\kappa_1,\kappa_2))}][f_{\kappa_2}][H']$$
By lemma {\rangle}ef{lemma} there is a condition $m\in H'$ such that for every $q\in H$ $j_U^*(p){\langle}eq m$. In $V[G_\kappa][f]$, define
$$W=\{x\subseteq Club(S^{f}_\delta)\mid m\in j^*_{2,1}(j^*_1(X)\}$$
This $\kappa$-complete ultrafilter extends $D_p(Club(S^{f}_\delta))$.
\end{proof}
\section{Open Problems}
The following question looks natural:
\begin{question}
What is the exact strength of the following assertion: For every $\kappa$-distributive forcing notion of size $\kappa$ the filter of its dense open subsets can be extended to a $\kappa$-complete ultrafilter?
\end{question}
This question is twofold. We can ask what is the \emph{consistency strength} of this assertion and we can also inquire which large cardinals imply it.
Let $Q$ be the forcing for shooting a club through the singulars.
\begin{question}
Assume that $D(Q)$ can be extended to a $\kappa$-complete ultrafilter is it consistent that $\exists{\langle}ambda.\ o({\langle}ambda)={\langle}ambda^{++}$?
\end{question}
A natural candidate for a forcing for which extending the dense open filter to an ultrafilter might require a higher consistency strength is the forcing of adding a club through a fat stationary set $S\subseteq \kappa$.
\\However, as it was shown above, depending on the fat stationary set, it may require a measurable alone.
\\
A. Brodsky and A. Rinot \cite{Rinot2019} give a different way to produce many fat stationary sets.
They showed that $\square(\kappa)$ implies that $\kappa$ can be partitioned into $\kappa$ many disjoint fat stationary sets. In our context, $\kappa$ is a measurable, and so $\square(\kappa)$ fails.
It is likely that still in $L[E]$-type models there will be interesting fat sets.
The next question relates to theorem {\rangle}ef{equivalece}.
Recall that an abstract Prikry type forcing, is a forcing notion ${\langle} \mathcal{Q},{\langle}eq,{\langle}eq^*{\rangle}$ such that ${\langle}eq^*\subseteq{\langle}eq$, and the Prikry property holds:
$$\text{For every statement in the forcing language }\sigma,\text{ and any condition }q\in\mathcal{Q},$$
$$\text{ there is }q{\langle}eq^* q^*\in\mathcal{Q},\text{ such that }q^*\text{ decide }\sigma$$
To obtain interesting Prikry type forcing we usually require that the order ${\langle}eq^*$ has high closure or directness degree.
\begin{question}
Is there an abstract generalization of theorem {\rangle}ef{equivalece} to Prikry type forcing?
Namely, assume there is a projection from a Prikry type forcing $\mathcal{Q}$, for which ${\langle}eq^*$ is sufficiently closed or directed onto a distributive forcing $\mathbb{P}$. Can the filter $D_p(\mathbb{P})$ be extended to a $\kappa$-complete ultrafilter?
\end{question}
As we noted after the proof of Theorem {\rangle}ef{equivalece}, the current formulation does not quite give us an equivalence, as we do not know if the Prikry forcing can be projected onto a distributive forcing notion of size larger than $\kappa$.
\begin{question}
Is there a tree of measures on $\kappa$ such that the corresponding tree Prikry forcing, projects onto a $\sigma$-distributive forcing notion of size $>\kappa$.
\end{question}
\end{document} |
\begin{document}
\title{Application of Bayesian Networks for Estimation of Individual Psychological Characteristics}
\begin{center}
$^1$Extreme Computing Research Center, Center for Uncertainty Quantification in Computational Science \& Engineering King Abdullah University of Science and Technology, Thuwal 23955-6900, Kingdom of Saudi Arabia, e-mail: alexander.litvinenko@kaust.edu.sa,\\
$^2$Applied research department, Institute of Mathematics and Mathematical Modeling CS MES RK, Almaty, Kazakhstan, e-mail: n.litvinenko@inbox.ru\\
$^3$Institute of information and computational technologies CS MES RK, Almaty, Kazakhstan,
e-mail: morkenj@mail.ru
\end{center}
\begin{abstract}
An accurate qualitative and comprehensive assessment of human potential is one of the most important challenges in any company or collective. We apply Bayesian networks for developing more accurate overall estimations of psychological characteristics of an individual, based on psychological test results, which identify how much an individual possesses a certain trait. Examples of traits could be a stress resistance, the readiness to take a risk, the ability to concentrate on certain complicated work. The most common way of studying psychological characteristics of each individual is testing. Additionally, the overall estimation is usually based on personal experiences and the subjective perception of a psychologist or a group of psychologists about the investigated psychological personality traits.
\end{abstract}
\textbf{Keywords:} Bayesian network, graphical probability model, psychological test, probabilistic reasoning, R
\section{Introduction}
In this article we discuss applications of Bayesian network methods for solving typical and highly demanding tasks in psychology.
We compute overall estimates of the psychological personality traits, based on given answers on offered psychological tests, as well as a comprehensive study of the social status of the individual, their religious beliefs, educational level, intellectual capabilities, the influence of a particular social environment, etc.
We believe that the most optimal mathematical model for solving this problem is a graphical probabilistic model with strongly expressed cause-effect relations. Therefore, we chose the Bayesian network as our model. Advantages of the Bayesian network are as follows: 1) The Bayesian network reflects the causal-effect relationship very well. 2) The mathematical apparatus of Bayesian networks is well developed and thus, there are many software implementations of the Bayesian network methods available.
Bayesian network is a graphical probabilistic model that represents a set of random variables and their conditional dependencies via a directed acyclic graph \cite{BenGal08},
\cite{Pourret08}, \cite{Albert:09}.
For example, a Bayesian network could represent the probabilistic connections between overall economical situations, average salaries and nationalism in society. It can give recommendations to local governments of which steps to undertake to decrease the level of political tensions. Other promising applications are in Human Resource (HR) departments and in marriage agencies. Bayesian networks, by analyzing psychological properties of each individual, and sociological connections between individuals, may help to select a better group for a certain task, prevent possible conflicts and increase performance.
Bayesian framework is very popular in various kinds of applications: parameter identification \cite{matthies2016parameter}; Bayesian update \cite{matthies2016bayesian}, \cite{Rosic2013}; uncertainty quantification \cite{rosic2012sampling}, \cite{rosic2011direct}, \cite{pajonk2012deterministic}, \cite{UQLitvinenko12}; inverse problems \cite{hermann2016inverse}; classification \cite{berikov2003influence}, \cite{berikov2003methods}, \cite{berikov2004discrete}.
In this work we will apply Bayesian network \cite{Albert:09} to find a more accurate overall estimate for each investigated psychological personality trait (PPT), see Definition \ref{definition3}. Our mathematical model for the construction of overall estimate is the graphical probabilistic model that reflects probabilistic dependencies between the questions used in psychological tests and the overall estimates of the investigated PPT. Due to the presence of cause-effect relationships we will use Bayesian networks as the graphical probabilistic model \cite{Tu:06}. We consider also some of the problems which can typically arise during the computing of the overall estimates. For these problems we describe a step-by-step construction of the Bayesian network and we provide the programming code.
In the world of psychological tests, there are special software products that help specialists develop new tests and adapt existing products.
The main goals of this work are as follows:
\begin{enumerate}
\item to develop principles for constructing the overall estimates of PPT based on the usage of the Bayesian network;
\item to demonstrate the potential of graphical probabilistic models for solving problems of this type on several easy examples;
\item to implement these examples in R programming language;
\item to show the capabilities of Bayesian network for a qualitative analysis of the obtained solution.
\end{enumerate}
The structure of the paper is as follows: In Section~\ref{sec:defi} we introduce the required notions and definitions. Section~\ref{sec:ProblemStatement} is devoted to the problem statement. In Section~\ref{sec:ExamplesTasks} we consider and solve three different examples. We also list the solution in R-code. Finally, in the conclusion we repeat the main achievements and share our experience.
\section{Notions and definitions}
\label{sec:defi}
In this section we list the necessary definitions that will be used below. These definitions do not always coincide with definitions used in similar works. There are different reasons for this:
\begin{itemize}
\item many terms and definitions in psychology are not yet completely formed;
\item the meaning of the proposed concepts does not contradict the common notions in other literature;
\item our definitions simplify presentation and reading.
\end{itemize}
\begin{definition}
\textbf{Latency} is the property of objects or processes to be in a hidden state, without demonstrating itself explicitly.
\label{definition1}
\end{definition}
\begin{definition}
\textbf{Psychological test} is a standardized exercise, which results provide information about certain psychological traits of the individual
\label{definition2}
\end{definition}
\begin{definition}
\textbf{Psychological trait} is any stable characteristic of a person. This characteristic can be spiritual, moral, social, and is the real reason for the specific behavior of a given person under certain conditions.
\label{definition3}
\end{definition}
\begin{definition}
\textbf{A priori estimate} is the estimate, obtained before the experiment, on the basis of expert knowledge, some additional information, or taken as a first approximation.
\label{definition4}
\end{definition}
\begin{definition}
\textbf{A posteriori estimate} is the estimate obtained after the experiment, based on the results of this experiment.
\label{definition5}
\end{definition}
\begin{definition}
\textbf{Graph} is a set of vertices (nodes) connected by edges. We can also say that graph G is a pair of sets $G = (V,E)$, where $V$ is a subset of any countable set, and $E$ is a subset of $V \times V$. An oriented graph is a graph with oriented edges.
\label{definition6}
\end{definition}
\begin{definition}
\textbf{Graphical probabilistic model} is a probabilistic model, where the graph shows the probabilistic relationship between random variables. The vertices of the graph are random variables, and the edges reflect the probabilistic relationships between random variables.
\label{definition7}
\end{definition}
In the current work the vertices reflect investigated traits and estimates, and the edges reflect dependencies between traits and estimates.
\begin{definition}
\textbf{Bayesian network} is the finite, oriented and acyclic graph representing the graphical probability model.
\label{definition8}
\end{definition}
\begin{table}[!ht]
\begin{center}
\begin{tabular}{|c|l|}
\hline
\textit {Notation} & Meaning \\
\hline
PPT & psychological personality trait(s) \\
\hline
$N$ & number of psychological traits \\
\hline
$F_j$ & investigated psychological traits, $j=1,2,...,N$ \\
\hline
$E_j$ & a level how a respondent possesses trait $F_j$, $j=1,2,...,N$ \\
\hline
$Q_{jk}$ & a question, $j=1,2,...,N$, $k=1,2,...,M_j$.\\
&There is a set of questions $\{Q_{jk}\}$ for each investigated trait $F_j$\\
\hline
$E_{jk}$ & a grade, which a respondent received for his answer on question $Q_{jk}$\\
\hline
\end{tabular}
\caption{Notation}
\end{center}
\end{table}
\section{Problem statement}
\label{sec:ProblemStatement}
Let us construct a psychological test. In this test we investigate traits $F_j$, $j=1,2,...,N$. Here $F_1$ could be the stress resistance of an individual, $F_2$ the ability to speak the Spanish language, and so on. Suppose that the current respondent possesses all these traits $F_j$. The level how this respondent possesses trait $F_j$ will be denoted by $E_j$. For instance, $E_j=5$ (for the scale 1-very bad, 2-bad, 3-satisfactory, 4-good, 5 excellent) means that the respondent speaks excellent Spanish. Note that $F_j$ are latent values, i.e., for instance, it is hard to say without testing if the respondent possesses stress resistance or not. These $E_j$ estimates depend on many subjective items such as the quality of the tests, the mental state of the respondent, the psychologist's perceptions, etc.
For each investigated trait $F_j$ there is a set of questions $\{Q_{jk}\}$, $j=1,2,...,N$, $k=1,2,...,M_j$ in the test. A respondent receives a grade $E_{jk}$ for his answer on question $Q_{jk}$, where index $j$ means the $j$-th trait and index $k$ the $k$-th question.
A professional psychologist can conduct the dependence between the received grades $E_{jk}$ and the presence of $F_j$ by the given respondent. Such type of a priori data can also be obtained from previous experimental data, theoretical knowledge or psychologist's perception. Later on we will start with a Bayesian network that contains a priori probabilities in each node. Then our algorithm will visit each node and recompute each probability value inside (compute posterior probabilities).
The scales of grades for $E_j$ and $E_{jk}$ are discrete and can be very different. Often, scales are chosen on the basis of the subjective wishes of the psychologist. Note that scales for different values may not coincide. The most common scales are:
\begin{itemize}
\item a two-point scale \{0,1\}, \{Yes, No\}, \{presence, absence\}, \{true, false\};
\item a three-point scale \{-1, 0, 1\}, or, for example, \{coward, balanced, brave\}, \{absence, limited presence, presence\}, \{0. 1, 2\}, etc.
\item a five-point scale \{1, 2, 3, 4, 5\} or \{0, 1, 2, 3, 4\} etc.
\item a ten-point scale \{1, 2, ..., 10\}.
\item a hundred-point scale \{1, 2, ..., 100\}.
\end{itemize}
The final aim is to assess the presence of PPT by the respondent, based on the a priori estimates of the psychologist and answers of the respondent on the testing questions. It is necessary to take into account the following points:
\begin{itemize}
\item there is the probability of a ``slip'', i. e., when the respondent occasionally gives a wrong answer (pressed a wrong button on the keyboard).
\item there is a certain chance of occasionally guessing the correct answer.
\end{itemize}
\section{Examples of tests}
\label{sec:ExamplesTasks}
In this section we consider three typical test examples with increasing complexity. All three examples include tables with a priori estimates, given in a table format, as well as a description of these tables.
After each description we formulate three possible quantities of interests (we call these quantities of interests - ``Tasks''). After that we demonstrate how these Tasks can be solved with the Bayesian network.
The first test example is simple, the posterior is equal to prior. In the second and third test examples we will consider three Tasks. Each example contains the formulation of quantity which should be computed, settings of all required parameters and the corresponding solution in the R-code.
\subsection{An example of a test with one question}
\label{sec:Ex1}
We consider the simplest situation with one question and one trait (PPT). In this case, the simplest graph consists of two vertices and one edge -- one vertex for the question and one vertex for the trait and the edge connect these vertices.
Let us compute the grade $E_1$ how the respondent possesses the trait $F_1$.
For this PPT $F_1$ there is only one question $Q_{11}$ in this test. We denote the grade, received by the respondent by answering on this question as $E_{11}$.
A respondent may possess this PPT with varying degrees. Depending on the primary (a priori) grade of the trait $F_1$, one can a priori assume how respondents will answer question $Q_{11}$. Corresponding Bayesian network is in Fig.~\ref{fig:fig1}.
\begin{figure}
\caption{Bayesian network, where the estimate of the trait $F_1$ is determined by one question $Q_{11}
\label{fig:fig1}
\end{figure}
In this example the overall grade is the same as for the single test question for this trait. There is nothing to compute here. A psychologist usually has some a priori knowledge of how a respondent, who possess a certain trait, can respond to a single test-question for this trait. A priori knowledge is usually obtained from previous experiments, or relies on the knowledge of a psychologist. Table~\ref{tab1} gives prior estimates for this example. The trait is estimated by a discrete number from the set $\{1, 2, 3, 4, 5\}$. The question is estimated also by a discrete value from the set $\{1, 2, 3, 4, 5\}$.
\begin{figure}\label{tab1}
\end{figure}
One can interpret values from Table~\ref{tab1} in the following way. A psychologist thinks that:
\begin{enumerate}
\item 70\% of respondents, who do not possess the trait $F_1$
($E_1 = 0$) will answer question $Q_{11}$ with grade $E_{11}=1$.
\item only 20\% of respondents, who possess the trait $F_1$ with grade ($E_1 = 3$) will answer question $Q_{11}$ with grade $E_{11}=5$.
\item only 2\% of respondents, who do not possess the trait $F_1$ ($E_1 = 0$) will answer question $Q_{11}$ with grade $E_{11}=5$.
\end{enumerate}
This example is simple, the posterior probabilities are equal to prior probabilities. Table~\ref{tab1} gives us all necessary information. The situation becomes more complicated if there are several questions.
\subsection{An example of a test with two questions}
\label{sec:Ex2}
We increase the complexity of the example from Section~\ref{sec:Ex1}, namely, we consider two questions in the test. The PPT is estimated on a two-point scale. The question is estimated on a five-point scale. Therefore, the graph consists of 3 vertices and 2 edges \cite{Mu:06} (Fig.~\ref{fig:fig2}).
Let us build the estimate $E_1$ of $F_1$. For this trait there are two questions $Q_{11}$ and $Q_{12}$ in the test. We denote these estimates of questions by $E_{11}$ and $E_{12}$ respectively. Table~\ref{tab2} gives prior estimates for this example.
\begin{figure}
\caption{Bayesian network, where the estimate of the trait $F_1$ is determined by two questions $Q_{11}
\label{fig:fig2}
\end{figure}
\begin{figure}\label{tab2}
\end{figure}
One can interpret the values from Table~\ref{tab2} in the following way. A psychologist thinks that:
\begin{enumerate}
\item 60\%
of respondents, who do not possess the trait $F_1$ will answer question $Q_{12}$ with grade $E_{12}=1$.
\item 50\% of respondents, who possess the trait $F_1$ will answer question $Q_{11}$ with grade $E_{11}=5$. Also 40\% of respondents, who possess the trait $F_1$ will answer question $Q_{12}$ with grade $E_{12}=5$.
\item 3\% of respondents, who do not possess the trait $F_1$ will answer question $Q_{11}$ with grade $E_{11}=5$. Also 5\% of respondents, who do not possess the trait $F_1$ will answer question $Q_{12}$ with grade $E_{12}=5$.
\end{enumerate}
Possible quantities of interest in this example could be:
\begin{enumerate}
\item What is the probability of receiving the grade $E_j$ for each question by a respondent if a priori probabilities are given as in Table~\ref{tab2}.
\item The respondent answered the first question with grade 2, the second question with grade 3. What is the probability that the respondent possesses the trait $F_1$?
\item The respondent answered the first question with grade 3. What is the probability that the respondent will answer the second question with grades 4 or 5?
\end{enumerate}
To compute these three possible quantities of interest in R environment, we run the following commands as in Algorithm~\ref{a:alg1}. This preprocessing code allow us to include required R packages.
\begin{algorithm}
\begin{small}
\caption{R settings}
\label{a:alg1}
\begin{algorithmic}
\State \text{\#Clear the screen}
\State \text{rm(list=ls(all=TRUE))}
\State \text{\#Call the library bioconductor}
\State \text{source(``http://bioconductor.org/biocLite.R")}
\State \text{biocLite(``RBGL")}
\State \text{biocLite(``Rgraphviz")}
\State \text{\#Set all libraries we need}
\State \text{install.packages(``gRbase")}
\State \text{install.packages(``gRain")}
\State \text{library(gRbase)}
\State \text{library(gRain)}
\State \text{library(Rgraphviz)}
\end{algorithmic}
\end{small}
\end{algorithm}
Now we list the required steps in R environment, which set a priori distributions and build preliminary Bayesian network for all three Tasks.
\begin{algorithm}[!ht]
\caption{A priori parameter settings for the Example from Section~\ref{sec:Ex2}}
\begin{small}
\begin{algorithmic}
\label{a:alg2}
\State \text{\#Set a two-point scale for the given trait}
\State \text{lvl $\leftarrow$ c(``0",``1")}
\State \text{\#Set a five-point scale for questions}
\State \text{marks }$\gets$ \text{c(``1",``2",``3",``4",``5")}
\State \text{\#Assume a prior probability that the respondent possesses the given trait is 50\% }
\State \text{$F$ $\leftarrow$ cptable($\sim F$, values=c(50,50), levels=lvl)}
\State \text{\#Set a priori probabilities}
\State \text{$Q_{11}.F$ $\leftarrow$ cptable($\sim Q_{11} \mid F$,values=c(50, 30, 10, 5, 5, 2, 3, 5, 40, 50), levels=marks)}
\State \text{$Q_{12}.F$ $\leftarrow$ cptable($\sim Q_{12} \mid F$,values=c(60, 20, 10, 5, 5, 5,}
\text{5, 10, 40, 40), levels=marks)}
\State \text{\#Plot the graph}
\State \text{cpt.list $\leftarrow$ compileCPT(list($F$, $Q_{11}.F$, $Q_{12}.F$))}
\State \text{bnet $\leftarrow$ grain(cpt.list)}
\State \text{bnet $\leftarrow$ compile(bnet)}
\State \text{plot (bnet\$dag)}
\end{algorithmic}
\end{small}
\end{algorithm}
Now we formulate the Task:
\begin{task}
\label{t:2quest5}
To compute probability that a random respondent without any a priori knowledge about trait $F$ will answer on 2 questions.
\end{task}
Corresponding R-code, which solves this Task:
\begin{verbatim}
> xq1 = querygrain(bnet, nodes=c("Q11", "Q12"))
> xq1
$Q11
Q11
1 2 3 4 5
0.26 0.17 0.08 0.23 0.28
$Q12
Q12
1 2 3 4 5
0.33 0.13 0.1 0.23 0.23
\end{verbatim}
\textbf{Result:} From this listing in the R environment, one can see that due to prior data (in Table~\ref{tab2}), a respondent will answer the first question with grade, for example, 5, with probability 28\%, and with grade 3 with probability 8\%. Additionally, the last row shows that the respondent will answer the second question with grade 5 with probability 23\%, and with grade 3 with probability 10\%.
One more task is formulated as follows:
\begin{task}
\label{t:2quest1}
Assume that a respondent answered the first question with grade 2, the second question with grade 3. What is the probability that the respondent possesses trait $F_1$ ?\\
\end{task}
Corresponding R-code, which solves this Task:
\begin{verbatim}
> bnet.ev <- setEvidence(bnet, nodes = c("Q11","Q12"),
states = c("2","3"))
xq2 = querygrain(bnet.ev,nodes=c("F"))
> xq2 = querygrain(bnet.ev, nodes=c("F"))
> xq2
$F
F
0 1
0.91 0.09
\end{verbatim}
\textbf{Result:} From the last line in the R environment, one can see that the respondent does not possess the trait with probability 91\% and possesses the trait with probability 9\%.
One more task is formulated as follows:
\begin{task}
\label{a:alg5}
Assume that the respondent answered the first question with grade 2. What is the probability that respondent will answer the second question with grade 4 or 5?
\end{task}
Corresponding R-code is:
\begin{verbatim}
> bnet.ev <- setEvidence(bnet, nodes = c("Q11"), states = c("3"))
> xq2 = querygrain(bnet.ev, nodes=c("Q12"))
> xq2
$Q12
Q12
1 2 3 4 5
0.42 0.15 0.1 0.17 0.17
\end{verbatim}
\textbf{Result:} The respondent will answer the second question with grade 4 or 5 with probability 17\%+17\%=34\%.
\subsection{An example of test with five questions}
\label{sec:Ex3}
In this example we will consider a test with 5 questions. For all 5 questions we set up a five-point scale. The corresponding graph (Fig.~\ref{fig3}) consists of 6 vertices (5 vertices for 5 questions and one vertex for PPT) and 5 edges (each edge connects a question with the trait ).
\begin{figure}
\caption{Bayesian network, where the estimate of the trait $F_1$ is determined by five questions.}
\label{fig3}
\end{figure}
Let us build the overall estimate $E_1$ for trait $F_1$. There are 5 questions $Q_{11}$, $Q_{12}$,..., $Q_{15}$ for this trait in the test. We denote estimates of these questions as $E_{11}$, $E_{12}$,...,$E_{15}$.
Assume that experts, based on personal experience, have compiled Table~\ref{tab3} with prior estimates.
\begin{figure}\label{tab3}
\end{figure}
One can interpret the values from Table~\ref{tab3} in the following way. A psychologist thinks that:
\begin{enumerate}
\item A priori it is known that respondents, who possess trait $F_1$ with grade $E_1=3$ will answer question $Q_{11}$ with grade $E_{11}=4$ (25\%), the question $Q_{12}$ with grade $E_{12}=4$ (30\%), the question $Q_{13}$ with grade $E_{13}=4$ (40\%), the question $Q_{14}$ with grade $E_{14}=4$ (20\%), the question $Q_{15}$ with grade $E_{15}=4$ (20\%).
\item A priori it is known that respondents, who possess trait $F_1$ with grade $E_1=0$ will answer question $Q_{11}$ with grade $E_{11}=4$ (15\%)
the question $Q_{12}$ with grade $E_{12}=4$ (25\%), the question $Q_{13}$ with grade $E_{13}=4$ (20\%), the question $Q_{14}$ with grade $E_{14}=4$ (0\%), the question $Q_{15}$ with grade $E_{15}=4$ (0\%).
\item A priori it is known that respondents, who possess trait $F_1$ with grade $E_1=4$ will answer question $Q_{11}$ with grade $E_{11}=3$ (10\%), the question $Q_{12}$ with grade $E_{12}=3$ (10\%), the question $Q_{13}$ with grade $E_{13}=3$ (15\%), the question $Q_{14}$ with grade $E_{14}=3$ (30\%), the question $Q_{15}$ with grade $E_{15}=3$ (35\%).
\end{enumerate}
Possible quantities of interest here could be:
\begin{enumerate}
\item What is the probability that a respondent will answer all 5 questions with grade 5?
\item The respondent answered the first question with grade 5, the second and the third questions with grade 4. What is the probability that the respondent has the trait $F_1$ with grade not less than 3?
\item The respondent answered the first question with grade 5, the second and third questions with grade 3. What is the probability that the respondent will answer the fourth and fifth questions with grades not less than 4?
\end{enumerate}
The program code in R \cite{Ch:08}, computing quantities of interest, listed above, is the following. The setting commands for R are omitted for brevity.
\begin{algorithm}[!ht]
\caption{A priory parameter settings for the Example from Section~\ref{sec:Ex3}}
\begin{small}
\begin{algorithmic}
\label{a:alg6}
\State \text{\#Set the five-point scale for the given trait}
\State \text{lvl $\leftarrow$ c("0","1","2","3","4")}
\State \text{\# Set the five-point scale for tests}
\State \text{marks $\leftarrow$ c("1","2","3","4","5")}
\State \text{\# A priori it is unknown if the respondent possesses the trait.}
\State \text{\# With probability 20\% respondent can possess the trait in any amount.}
\State \text{$F$ $\leftarrow$ cptable($\sim F$, values=c(20,20,20,20,20),levels=lvl)}
\State \text{\#Set the a priori data (marks)}
\State \text{$Q_{11}.F$ $\leftarrow$ cptable($\sim Q_{11} \mid F$, values=c(30,20,15,15,}
\State \text{20,30,15,15,20,20,10,20,10,30,30,0,10,20,25, 45, 0, 0, 10, 30, 60), levels=marks)}
\State \text{$Q_{12}.F$ $\leftarrow$ cptable($\sim Q_{12} \mid F$, values=c(35,25,10,}
\State \text{ 25,5,25,20,25,15,15,15,20,20,20,25,10,10,10,30, 40, 0, 10, 10, 30, 50), levels=marks)}
\State \text{$Q_{13}.F$ $\leftarrow$ cptable($\sim Q_{13} \mid F$, values=c(40, 20, 20, 20,}
\State \text{0, 30, 20, 20, 20, 10, 20, 25, 20, 20, 15, 15, 10, 15, 40, 20, 0, 15, 15, 30, 40), levels=marks)}
\State \text{$Q_{14}.F$ $\leftarrow$ cptable($\sim Q_{14} \mid F$, values=c(50, 30, 20, 0, 0,}
\State \text{40, 40, 10, 10, 0, 35, 30, 25, 10, 0, 20, 15, 35, 20,10, 5, 10, 30, 30, 25), levels=marks)}
\State \text{$Q_{15}.F$ $\leftarrow$ cptable($\sim Q_{15} \mid F$, values=c(80, 10, 10, 0,}
\State \text{0, 50, 20, 20, 10, 0, 30, 40, 20, 10, 0, 20, 25, 25,20,10, 10, 15, 35, 20, 20), levels=marks)}
\State \text{\#Plot the graph}
\State \text{cpt.list $\leftarrow$ compileCPT(list($F, Q_{11}.F, Q_{12}.F,Q_{13}.F, Q_{14}.F, Q_{15}.F$))}
\State \text{bnet $\leftarrow$ grain(cpt.list)}
\State \text{bnet $\leftarrow$ compile(bnet)}
\State \text{plot (bnet\$dag)}
\end{algorithmic}
\end{small}
\end{algorithm}
\begin{task}
\label{t:2quest2}
Compute the probabilities that a respondent with no a priori information will answer all 5 questions.
\end{task}
Corresponding R-code, which solves this Task:
\begin{verbatim}
> xq1 = querygrain(bnet, nodes=c("Q11","Q12","Q13","Q14","Q15"))
> xq1
$Q11
Q11
1 2 3 4 5
0.14 0.13 0.14 0.24 0.35
$Q12
Q12
1 2 3 4 5
0.17 0.17 0.15 0.24 0.27
$Q13
Q13
1 2 3 4 5
0.21 0.18 0.18 0.26 0.17
$Q14
Q14
1 2 3 4 5
0.30 0.25 0.24 0.14 0.07
$Q15
Q15
1 2 3 4 5
0.38 0.22 0.22 0.12 0.06
\end{verbatim}
The output of the R program can be interpreted as follows:
\begin{enumerate}
\item
A random respondent will answer the first question with grade $\{1, 2, 3, 4, 5\}$ with probability \\$\{0.14, 0.13, 0.14, 0.24, 0.35\}$ respectively.
\item
A random respondent will answer the second question with grade $\{1, 2, 3, 4, 5\}$ with probability \\$\{0.17, 0.17, 0.15, 0.24, 0.27\}$ respectively.
\item
A random respondent will answer the third question with grade $\{1, 2, 3, 4, 5\}$ with probability \\$\{0.21, 0.18, 0.18, 0.26, 0.17\}$ respectively.
\item
A random respondent will answer the fourth question with grade $\{1, 2, 3, 4, 5\}$ with probability \\$\{0.30, 0.25, 0.24, 0.14, 0.07\}$ respectively.
\item
A random respondent will answer the fifth question with grade $\{1, 2, 3, 4, 5\}$ with probability \\$\{0.38, 0.22, 0.22, 0.12, 0.06\}$ respectively.
\end{enumerate}
\begin{task}
\label{t:2quest3}
The respondent answered the first question with grade 5, the second and third questions with grade 3, the fourth question with grade 2, the fifth question with grade 3. What is the probability that the respondent has the trait $F_1$ with grade not less than 3?
\end{task}
Corresponding R-code, which solves this Task:
\begin{verbatim}
>bnet.ev <- setEvidence(bnet, nodes
= c("Q11","Q12","Q13","Q14","Q15"), states=c("5","3","3","2","3"))
> xq2 = querygrain(bnet.ev, nodes=c("F"))
> xq2
$F
F
0 1 2 3 4
0.05 0.36 0.33 0.11 0.14
\end{verbatim}
The results of the R-code can be interpreted as follows: From the last line in R environment one can see that on the basis of a priori data for values of trait $\{0,1,2,3,4\}$ we will have corresponding output probabilities $\{0.05, 0.36, 0.33, 0.11, 0.14\}$.
\begin{task}
\label{t:2quest4}
Assume that a respondent answered the first question with grade 5, the second and third questions with grade 3. What is the probability that the respondent will answer the fourth and fifth questions with grades not less than 4?
\end{task}
Corresponding R-code, which solves this Task:
\begin{verbatim}
> bnet.ev <- setEvidence(bnet, nodes= c("Q11","Q12","Q13"),
states = c("5","3","3"))
> xq3 = querygrain(bnet.ev, nodes=c("Q14","Q15"))
> xq3
$Q14
Q14
1 2 3 4 5
0.29 0.26 0.24 0.15 0.07
$Q15
Q15
1 2 3 4 5
0.34 0.25 0.23 0.19 0.06
\end{verbatim}
The results of the R-code can be interpreted as follows: The respondent will answer the fourth question with grade not less than 4 with probability (15\%+7\%)=22\%. The respondent will answer the fifth question no worse than fourth with probability (19\%+6\%)=25\%.
\section{Conclusion}
We considered three different examples of psychological tests. The first test consisted of asking one question, the second of two questions and the third of five questions. After we set up all required statistical parameters and priors, we formulated three possible Tasks and offered their solutions in R environment. The solution includes the construction of a Bayesian network for each Task and computing posterior probabilities.
We used the constructed Bayesian networks to develop principles for computing the overall grade of the given trait $F$ (for instance, the stress resistance). This overall grade tells us the level of possession of this trait $F$ by the given respondent. We demonstrated the potential of graphical probabilistic models of three simple examples. Finally, we showed the capabilities of Bayesian networks for qualitative analysis of the resulting solution.
Although we considered relative simple examples with just one trait, the offered technique and software can be used in cases with more traits. An example of a test case with more than one trait will be considered in a soon to be published paper but we also did not observe any restrictions or limitations in that work. The number of questions in each test can also be increased. The offered R-code only solves the described examples. However, this R-code can be modified for larger numbers of traits, questions and tests.
\section{Software}
We use R programming language for realization of Bayesian networks due to its popularity among applied scientists/statisticians \cite{Bu:10,Kab:14}. The analogical work can be done in
\begin{itemize}
\item
MATLAB \cite{Mur:01};
\item
in one of the known software packages:
\begin{itemize}
\item
GeNIe \& SMILE, \mbox{http://genie.sis.pitt.edu}
\item
OpenBayes, \mbox{https://github.com/abyssknight/}
\item
BANSY3, \mbox{http://www.dynamics.unam.edu/}\\ \mbox{DinamicaNoLineal3/bansy3.htm}
\end{itemize}
\item in one of the commercial products: AgenaRisk Bayesian network tool, Bayesian network application library, Bayesia, BNet.
\end{itemize}
\subsection{Reproducibility} To reproduce the presented results one can download the R-code from Dropbox\\
\linkurl{https://www.dropbox.com/sh/t8cm12vv741a0h0/AABz_SwBEQ5mgKMyRAcl51mZa?dl=0}.
\section*{Acknowledgment}
This work was supported by the Institute of Mathematics and Mathematical Modeling CS MES Republic of Kazakhstan and by the Ministry of Education and Science of Kazakhstan, (grant number is 4085/GF4, 0115RK00640).
Additionally, we would like to express our enormous gratitude to Prof. Maksat Kalimoldayev,
Academic member of the National Academy of Sciences, the head of Institute of Information and Computational Technologies CS MES Republic of Kazakhstan
for his organizational assistance, valuable comments and financial support.
\input{main.bbl}
\section*{Author biographies}
\begin{wrapfigure}{l}{25mm}
\includegraphics[width=1in,height=1.2in,clip,keepaspectratio]{alex}\end{wrapfigure}\par
{\bf Alexander Litvinenko} joined KAUST in 2013. He specializes in efficient numerical methods for stochastic PDEs, uncertainty quantification, multi-linear algebra and Bayesian update methods. Alexander earned B.Sc. (2000) and M.Sc. (2002) degrees in mathematics at Novosibirsk State University, and his PhD (2006) at Max-Planck-Institut in Leipzig. From 2007-2013 he was a Postdoctoral Research Fellow at the TU Braunschweig in Germany.\\
\linkurl{https://ecrc.kaust.edu.sa/Pages/Litvinenko.aspx}\\
\begin{wrapfigure}{l}{25mm}
\includegraphics[width=1in,height=1.2in,clip,keepaspectratio]{litvinenkoN1}\end{wrapfigure}\par
{\bf Natalya Litvinenko} earned B.Sc. (2012) and M.Sc. (2014) in Mathematics at the Kazakh National University named after Al-Farabi, she also did an internship at Imperial College London (2014). Her research interest includes implementation of parallel algorithms on architectures NVIDIA CUDA, and Bayesian Networks. From 2015 till now she is taking part in the project ``Automated techniques for social-psychological diagnostics of military teams".\\
\begin{wrapfigure}{l}{25mm}
\includegraphics[width=1in,height=1.2in,clip,keepaspectratio]{Mamyrbayev.png}\end{wrapfigure}\par
{\bf Orken Mamyrbayev} earned B.Sc. (2001) and M.Sc. (2004) in Information systems at the Kazakh National Research Technical University named after K.I.Satpayev. His research interest includes digital signal processing, robotic systems, computer vision, automatic speech recognition. He earned his PhD (2014) in the group of Prof. Maksat Kalimoldayev. Currently he is deputy director in the Institute of Information and Computational Technologies CS MES RK.
\end{document} |
\begin{document}
\title{Graphs with bounded tree-width and large odd-girth are almost bipartite}
\author{Alexandr V. Kostochka\thanks{
Department of Mathematics, University of Illinois, Urbana, IL 61801
and Institute of Mathematics, Novosibirsk 630090, Russia. E-mail:
\texttt{kostochk@math.uiuc.edu}. This author's work was partially
supported by NSF grant DMS-0650784 and by grant 09-01-00244-a of the
Russian Foundation for Basic Research.}
\and
Daniel Kr{\'a}l'\thanks{
Institute for Theoretical Computer Science, Faculty of Mathematics and
Physics, Charles University, Malostransk{\'e} n{\'a}m{\v e}st{\'\i} 25,
118 00 Prague, Czech Republic. E-mail: \texttt{kral@kam.mff.cuni.cz}.
The Institute for Theoretical Computer Science (ITI) is supported by
Ministry of Education of the Czech Republic as project 1M0545.
This research has also been supported by the grant GACR 201/09/0197.}
\and
Jean-S{\'e}bastien Sereni\thanks{
CNRS (LIAFA, Universit\'e Denis Diderot), Paris, France, and Department
of Applied Mathematics (KAM), Faculty of Mathematics and Physics,
Charles University, Prague, Czech Republic. E-mail:
\texttt{sereni@kam.mff.cuni.cz}.}
\and
Michael Stiebitz\thanks{
Technische Universit\"at Ilmenau, Institute of Mathematics,
P.O.B. 100 565, D-98684 Ilmenau, Germany. E-mail:
\texttt{Michael.Stiebitz@tu-ilmenau.de}.}}
\date{}
\maketitle
\begin{abstract}
We prove that
for every $k$ and every $\varepsilon>0$, there exists $g$ such that
every graph with tree-width at most $k$ and odd-girth at least $g$
has circular chromatic number at most $2+\varepsilon$.
\end{abstract}
\section{Introduction}
It has been a challenging problem to prove the existence
of graphs of arbitrary high girth and chromatic
number~\cite{Erd59}. On the other hand, graphs with large
girth that avoid a fixed minor are known to have low chromatic number
(in particular, this applies to graphs embedded on a fixed surface).
More precisely, as Thomassen observed~\cite{Tho88},
a graph that avoids a fixed minor and has large girth is $2$-degenerate,
and hence $3$-colorable. Further, Galluccio, Goddyn and
Hell~\cite{bib-galluccio}
proved the following theorem, which essentially states that
graphs with large girth that avoid a fixed minor are almost bipartite.
\begin{theorem}[Galluccio, Goddyn and Hell, 2001]
\label{thm-ggh}
For every graph $H$ and every $\varepsilon>0$, there exists an integer
$g$ such that the circular chromatic number of
every $H$-minor free graph of girth at least $g$ is at most
$2+\varepsilon$.
\end{theorem}
A natural way to weaken the girth-condition is to
require the graphs to have high odd-girth
(the \emph{odd-girth} is the length of a shortest odd cycle).
However, Young~\cite{You96} constructed $4$-chromatic projective graphs with
arbitrary high odd-girth. Thus, the high odd-girth requirement is not
sufficient to ensure $3$-colorability, even for graphs embedded on a
fixed surface.
Klostermeyer and Zhang~\cite{KlZh00}, though, proved that the circular chromatic
number of every planar graph of sufficiently high odd-girth is
arbitrarily close to $2$. In particular, the same is true for
$K_4$-minor free graphs, i.e. graphs with tree-width at most $2$.
We prove that the conclusion is still true for any class of graphs of
bounded tree-width, which answers a question of Pan and
Zhu~\cite[Question 6.5]{bib-pan} also appearing as Question 8.12
in the survey by Zhu~\cite{bib-zhu01}.
\begin{theorem}\label{thm-main}
For every $k$ and every $\varepsilon>0$, there exists $g$ such that
every graph with tree-width at most $k$ and odd-girth at least $g$
has circular chromatic number at most $2+\varepsilon$.
\end{theorem}
Motivated by tree-width duality, Ne{\v s}et{\v r}il
and Zhu~\cite{bib-nesetril} proved the following theorem.
\begin{theorem}[Ne\v set\v ril and Zhu, 1996]\label{thm-twd}
For every $k$ and every $\varepsilon>0$, there exists $g$ such that
every graph $G$ with tree-width at most $k$ and homomorphic to a graph $H$
with girth at least $g$ has circular chromatic number at most
$2+\varepsilon$.
\end{theorem}
To see that Theorem~\ref{thm-main} implies Theorem~\ref{thm-twd},
observe that if $G$ has an odd cycle of length $g$, then
$H$ has an odd cycle of length at most $g$.
\section{Notation}
A \emph{$(p,q)$-coloring} of a graph is a coloring of the vertices
with colors from the set $\{0,\ldots,p-1\}$ such that the colors of
any two adjacent vertices $u$ and $v$ satisfy $q\le |c(u)-c(v)|\le p-q$.
The \emph{circular chromatic number $\chi_c(G)$} of a graph $G$
is the infimum (and it can be shown to be the minimum) of the ratios $p/q$ such
that $G$ has a $(p,q)$-coloring. For every finite graph $G$,
it holds that $\chi(G)=\lceil\chi_c(G)\rceil$ and
there is $(p,q)$-coloring of $G$ for every $p$ and $q$
with $p/q\ge\chi_c(G)$. In particular, the circular
chromatic number of $G$ is at most $2+1/k$
if and only if $G$ is homomorphic to a cycle of length $2k+1$.
The reader is referred to the surveys by Zhu~\cite{bib-zhu01,bib-zhu06}
for more information about circular colorings.
A \emph{$p$-precoloring} is a coloring $\varphi$ of a subset $A$ of vertices of
a graph $G$ with colors from $\{0,\ldots,p-1\}$,
and its \emph{extension} is a coloring of
the whole graph $G$ that coincides with $\varphi$ on $A$.
The following lemma can be seen as a corollary of a theorem
of Albertson and West~\cite[Theorem 1]{AlWe06}, and it
is the only tool we use from this area.
\begin{lemma}
\label{lm-extend}
For every $p$ and $q$ with $2<p/q$, there exists $d$ such that any $p$-precoloring
of vertices with mutual distances at least $d$ of a bipartite graph $H$
extends to a $(p,q)$-coloring of $H$.
\end{lemma}
A \emph{$k$-tree} is a graph obtained from a complete graph of order $k+1$
by adding vertices of degree $k$ whose neighborhood is a clique.
The \emph{tree-width} of a graph $G$ is the smallest $k$ such that
$G$ is a subgraph of a $k$-tree.
Graphs with tree-width at most $k$ are also called \emph{partial $k$-trees}.
A \emph{rooted partial $k$-tree} is a partial $k$-tree $G$
with $k+1$ distinguished vertices $v_1,\ldots,v_{k+1}$ such that
there exists a $k$-tree $G'$ that is a supergraph of $G$ and
the vertices $v_1,\ldots,v_{k+1}$ form a clique in $G'$.
We also say that the partial $k$-tree is \emph{rooted} at $v_1,\ldots,v_{k+1}$.
If $G$ is a partial $k$-tree rooted at $v_1,\ldots,v_{k+1}$ and
$G'$ is a partial $k$-tree rooted at $v'_1,\ldots,v'_{k+1}$, then
the graph $G\oplus G'$ obtained by identifying $v_i$ and $v'_i$ is
again a rooted partial $k$-tree
(identify the cliques in the corresponding $k$-trees).
Fix $p$ and $q$. If $G$ is a rooted partial $k$-tree,
then $\F(G)$ is the set of all $p$-precolorings of the $k+1$ distinguished
vertices of $G$ that can be extended to a $(p,q)$-coloring of $G$.
The next lemma is a standard application of results in the area
of graphs of bounded tree-width~\cite{RoSe86}.
\begin{lemma}
\label{lm-small}
Let $k$ and $N$ be positive integers such that $N\ge k+1$.
If $G$ is a partial $k$-tree with at least $3N$ vertices,
then there exist partial rooted $k$-trees $G_1$ and $G_2$ such that
$G$ is isomorphic to $G_1\oplus G_2$ and $G_1$ has at least $N+1$ and
at most $2N$ vertices.
\end{lemma}
If $G$ is a partial $k$-tree rooted at $v_1,\ldots,v_{k+1}$,
then its \emph{type} is a $(k+1)\times (k+1)$ matrix $M$ such that
$M_{ij}$ is the length of the shortest path between the vertices
$v_i$ and $v_j$. If there is no such path, $M_{ij}$ is equal to $\infty$.
Any matrix $M$ that is a type of a partial rooted $k$-tree satisfies
the triangle inequality (setting $\infty+x=\infty$ for any $x$).
A symmetric matrix $M$ whose entries are non-negative
integers and $\infty$ (and zeroes only on the main diagonal) that
satisfies the triangle inequality is a \emph{type}.
A type is \emph{bipartite} if $M_{ij}+M_{jk}+M_{ik}\equiv0\;\mod\; 2$
for any three finite entries $M_{ij}$, $M_{jk}$ and $M_{ik}$.
Two bipartite types $M$ and $M'$ are \emph{compatible} if $M_{ij}$ and $M'_{ij}$
have the same parity whenever both of them are finite.
We define a binary relation on bipartite types as follows:
$M\preceqM'$ if and only if $M$ and $M'$ are compatible and
$M_{ij}\leq M'_{ij}$ for every $i$ and $j$.
Note that the relation $\lm$ is a partial order.
We finish this section with the following lemma.
Its straightforward proof is included to help us in
familiarizing with the just introduced notation.
\begin{lemma}
\label{lm-type-glue}
Let $G^1$ and $G^2$ be two bipartite rooted partial $k$-trees
with types $M^1$ and $M^2$
such that there exists a bipartite type $M^0$ with $M^0\preceqM^1$ and $M^0\preceqM^2$.
Then the types $M^1$ and $M^2$ are compatible, $G^1\oplus G^2$ is a bipartite
rooted partial $k$-tree and its type $M$ satisfies $M^0\preceqM$.
\end{lemma}
\begin{proof}
The types $M^1$ and $M^2$ are compatible: if both $M^1_{ij}$ and
$M_{ij}^{2}$ are finite, then $M_{ij}^{0}$ is finite and has the same
parity as $M^1_{ij}$ and $M_{ij}^{2}$.
Hence, the entries $M^1_{ij}$ and $M^2_{ij}$ have the same parity.
Let $M$ be the type of $G^1\oplus G^2$.
Note that it does not hold in general that $M_{ij}=\min\{M^1_{ij},M^2_{ij}\}$.
We show that $M^0\preceqM$ which will also imply that $G^1\oplus G^2$
is bipartite since $M^0$ is a bipartite type.
Consider a shortest path $P$ between two distinguished
vertices $v_i$ and $v_{i'}$ and split $P$ into paths
$P_1,\ldots,P_\ell$ delimited by distinguished vertices on $P$.
Note that $\ell\le k$ since $P$ is a path. Let $j_0=i$ and
let $j_i$ be the index of the end-vertex of $P_i$ for
$i\in\{1,\ldots,\ell\}$.
In particular, $j_\ell=i'$. Each of the paths $P_1,\ldots,P_\ell$
is fully contained in $G^1$ or in $G^2$ (possibly in both if
it is a single edge).
Since $M^0\preceqM^1$ and $M^0\preceqM^2$, the length of $P_i$
is at least $M^0_{j_{i-1}j_i}$, and it has the same parity as $M^0_{j_{i-1}j_i}$.
Since $M^0$ is a bipartite type (among others, it satisfies the triangle
inequality), the length of $P$, which is $M_{ii'}$, has the same parity as
$M^0_{j_0j_\ell}=M^0_{ii'}$ and is at least $M^0_{ii'}$.
This implies that $M^0\preceqM$.
\end{proof}
\section{The Main Lemma}
In this section, we prove a lemma which forms the core of our argument.
To this end, we first prove another lemma that asserts that for every $k$,
$p$ and $q$, the set of types of all bipartite rooted partial $k$-trees
forbidding a fixed set of $p$-precolorings from extending (and maybe some
other precolorings, too) has always a maximal element.
We formulate the lemma slightly differently to facilitate its application.
\begin{lemma}
\label{lm-mainM}
For every $k$, $p$ and $q$, there exists a finite number
of (bipartite) types $M^1,\ldots,M^m$ such that for any bipartite
rooted partial $k$-tree $G$ with type $M$, there exists a bipartite
rooted partial $k$-tree $G'$ with type $M^i$ for some
$i\in\{1,\ldots,m\}$ such that $\F(G')\subseteq\F(G)$ and $M\preceqM^i$.
\end{lemma}
\begin{proof}
Let $d\ge 2$ be the constant from Lemma~\ref{lm-extend} applied for $p$ and $q$.
Let $M^1,\ldots,M^m$ be all bipartite types with entries
from the set $\{1,\ldots,D^{(k+1)^2}\}\cup\{\infty\}$ where $D=4d$.
Thus, $m$ is finite and does not exceed $(D^{(k+1)^2}+1)^{k(k+1)/2}$.
Let $G$ be a bipartite rooted partial $k$-tree with type $M$.
If $M$ is one of the types $M^1,\ldots,M^m$, then there is nothing
to prove (just choose $i$ such that $M=M^i$).
Otherwise, one of its entries is finite and exceeds $D^{(k+1)^2}$.
For $i\in\{1,\ldots,(k+1)^2\}$,
let $J^i$ be the set of all positive
integers between $D^{i-1}$ and $D^i-1$ (inclusively).
Let $i_0$ be the smallest integer such that no entry of $M$
is contained in $J^{i_0}$. Since $M$ has at most $k(k+1)/2$
different entries, such an index $i_0$ exists.
Note that if $i_0=1$, then Lemma~\ref{lm-extend} implies that
$\F(G)$ contains all possible $p$-precolorings, and the sought graph
$G'$ is the bipartite rooted partial $k$-tree composed of $k+1$ isolated vertices,
with the all-$\infty$ type.
Two vertices $v_i$ and $v_j$
at which $G$ is rooted are \emph{close} if $M_{ij}$ is at most $D^{i_0-1}$.
The relation $\approx$ of being close is an equivalence
relation on $v_1,\ldots,v_{k+1}$. Indeed, it is reflexive and
symmetric by the definition, and we show now that it is transitive.
Suppose that $M_{ij}$ and $M_{jk}$ are both at most $D^{i_0-1}$. Then,
the distance between $v_i$ and $v_k$ is at most
$M_{ij}+M_{jk}\le2D^{i_0-1}-2\le D^{i_0}-1$ since $D\ge2$. Consequently,
by the choice of $i_0$, the distance between $v_i$ and $v_k$ is at
most $D^{i_0-1}-1$ and thus $v_i\approx v_k$.
Let $C_1,\ldots,C_{\ell}$ be the equivalence classes of the relation $\approx$.
Note that $C_1,\ldots,C_{\ell}$ is a finer partition than that
given by the equivalence relation of being connected.
Since $G$ is bipartite, we can partition its vertices into
two color classes, say red and blue.
For every $i\in\{1,\ldots,\ell\}$, contract the closed neighborhood
of a vertex $v$ if $v$ is a blue vertex and its distance
from any vertex of $C_i$ is at least $D^{i_0-1}$ and keep doing so
as long as such a vertex exists. Observe that the resulting graph
is uniquely defined. After discarding the components that do not contain
the vertices of $C_i$, we obtain a bipartite partial $k$-tree $G_i$ rooted
at the vertices of $C_i$:
it is bipartite as we have always contracted closed neighborhoods
of vertices of the same color (blue)
to a single (red) vertex, and its tree-width is at most $k$ since
the tree-width is preserved by contractions.
Moreover, the distance between any two
vertices of $C_i$ has not decreased since any path between them through
any of the newly arising vertices has length
at least $2D^{i_0-1}-2\ge D^{i_0-1}$.
Now, let $G'$ be the bipartite rooted partial $k$-tree obtained
by taking the disjoint union of $G_1,\ldots,G_{\ell}$.
The type $M'$ of $G'$ can be obtained from the type of $G$:
set $M'_{ij}$ to be $M_{ij}$ if the vertices $v_i$ and $v_j$ are close, and
$\infty$ otherwise. Thus, $M'$ is one of the types
$M^1,\ldots,M^m$ and $M\preceqM'$.
It remains to show that $\F(G')\subseteq\F(G)$.
Let $c\in\F(G')$ be a $p$-precoloring that extends to $G'$, and recall that $D\ge4$.
For $i\in\{1,\ldots,\ell\}$,
let $A_i$ be the set of all red vertices at distance at most $D^{i_0-1}$ and
all blue vertices at distance at most $D^{i_0-1}-1$ from $C_i$, and
let $R_i$ be the set of all red vertices at distance $D^{i_0-1}-1$ or
$D^{i_0-1}$ from $C_i$.
Set $B_i=A_i\setminus R_i$ ($B_i$ is the ``interior'' of $A_i$ and $R_i$
its ``boundary'').
The extension of $c$ to $G_i$ naturally defines a coloring of all vertices of $A_i$:
$G_i$ is the subgraph of $G$ induced by $A_i$ with some red vertices of
$R_i$ identified (two vertices of $R_i$ are identified if and only if
they are in the same component of the graph $G-B_i$).
Let $H$ be the following auxiliary graph obtained from $G$: remove the vertices
of $B=B_1\cup\cdots\cup B_{\ell}$ and, for $i\in\{1,\ldots,\ell\}$,
identify every pair of vertices of $R_i$
that are in the same component of $G-B$.
Let $R$ be the set of vertices of $H$
corresponding to some vertices of $R_1\cup\cdots\cup R_{\ell}$. Precolor the vertices
of $R$ with the colors given by the colorings of $G_i$ (note that two vertices of $R_i$
in the same component of $G-B_i$ are also in the same component of $G-B$,
so this is well-defined). The graph $H$ is bipartite as only red vertices have been identified.
The distance between any two precolored vertices is at least $d$:
consider two precolored vertices $r$ and $r'$ at distance at most $d-1$.
Let $i$ and $i'$ be such that $r\in R_i$ and $r'\in R_{i'}$. If $i=i'$,
then $r$ and $r'$ are in the same component of $G-B$ and thus $r=r'$.
If $i\not=i'$ then
by the definition of $R_i$ and $R_{i'}$, the vertex $r$ is in $G$ at distance at most $D^{i_0-1}$
from some vertex $v$ of $C_i$ and $r'$ is at distance at most $D^{i_0-1}$ from
some vertex $v'$ of $C_{i'}$. So, the distance between $v$ and $v'$
is at most $2D^{i_0-1}+d<D^{i_0}-1$. Since $M$ has no entry from $J^{i_0}$,
the vertices $v$ and $v'$ must be close and thus $i=i'$, a
contradiction.
Since the distance between any two precolored vertices is at least $d$, the precoloring
extends to $H$ by Lemma~\ref{lm-extend} and in a natural way it defines a coloring of $G$.
We conclude that
every $p$-precoloring that extends to $G'$ also extends to $G$ and thus $\F(G')\subseteq\F(G)$.
\end{proof}
We now prove our main lemma, which basically states that there is only
a finite number of bipartite rooted partial $k$-trees that can appear
in a minimal non-$(p,q)$-colorable graph with tree-width $k$ and
a given odd girth.
\begin{lemma}
\label{lm-mainG}
For every $k$, $p$ and $q$, there exist a finite number $m$ and
bipartite rooted partial $k$-trees $G^1,\ldots,G^m$ with types
$M^1,\ldots,M^m$ such that for any bipartite rooted partial
$k$-tree $G$ with type $M$ there exists $i$ such that
$\F(G^i)\subseteq \F(G)$ and $M\preceqM^i$.
\end{lemma}
\begin{proof}
Let $M^1,\ldots,M^{m}$ be the types from Lemma~\ref{lm-mainM}.
We define the graph $G^i$ as follows:
for every $p$-precoloring $c$ that does not extend to a bipartite partial
rooted $k$-tree with type $M^i$, fix any partial rooted $k$-tree $G^i_c$
with type $M^i$ such that $c$ does not extend to $G^i_c$.
Set $G^i=\bigoplus_{c} G^i_c$, where $c$ runs over all such $p$-precolorings.
If the above sum of partial $k$-trees is non-empty, then the type $M$
of $G^i$ is $M^i$. Indeed,
$M\preceqM^i$ by the definition of $G^i$, and Lemma~\ref{lm-type-glue}
implies that $M^i\preceqM$.
If all the $p$-precolorings of the $k+1$ vertices in the root extend to
each partial $k$-tree of type $M^i$, then let
$G^i$ be the graph consisting of $k+1$ isolated vertices. This happens
in particular for the all-$\infty$ type.
Let us verify the statement of the lemma.
Let $G$ be a bipartite rooted partial $k$-tree and
let $M$ be the type of $G$. If $\F(G)$ is composed of all $p$-precolorings,
the sought graph $G^i$
is the one composed of $k+1$ isolated vertices. Hence, we assume that $\F(G)$
does not contain all $p$-precolorings, i.e., there are $p$-precolorings that do not extend to $G$.
By Lemma~\ref{lm-mainM}, there exists a bipartite rooted partial $k$-tree
$G'$ with type $M'$ such that $M\preceqM'=M^i$ for some $i$ and $\F(G')\subseteq\F(G)$.
For every $p$-precoloring $c$ that does not extend to $G'$
(and there exists at least one such $p$-precoloring $c$),
some graph $G^i_c$ has been glued into $G^i$.
Hence, $\F(G^i)\subseteq\F(G')\subseteq\F(G)$. Since the type of $G^i$ is $M^i$,
the conclusion of the lemma follows.
\end{proof}
\section{Proof of Theorem~\ref{thm-main}}
We are now ready to prove Theorem~\ref{thm-main}, which is recalled below.
\begin{thm2}
For every $k$ and every $\varepsilon>0$, there exists $g$ such that
every graph with tree-width at most $k$ and odd-girth at least $g$
has circular chromatic number at most $2+\varepsilon$.
\end{thm2}
\begin{proof}
Fix $p$ and $q$ such that $2<p/q\le 2+\varepsilon$. Let $G^1,\ldots,G^m$
be the bipartite partial $k$-trees from Lemma~\ref{lm-mainG} applied
for $k$, $p$ and $q$.
Set $N$ to be the largest order of the graphs $G^i$ and set $g$ to be $3N$.
We assert that each partial $k$-tree with odd-girth $g$ has circular chromatic
number at most $p/q$. Assume that this is not the case and
let $G$ be a counterexample with the fewest vertices.
The graph $G$ has at least $3N$ vertices (otherwise, it has no odd cycles and
thus it is bipartite). By Lemma~\ref{lm-small}, $G$ is isomorphic to
$G_1\oplus G_2$, where $G_1$ and $G_2$ are rooted partial $k$-trees and the number of vertices of $G_1$
is between $N+1$ and $2N$. By the choice of $g$, the graph
$G_1$ has no odd cycle and thus
it is a bipartite rooted partial $k$-tree. By Lemma~\ref{lm-mainG}, there exists $i$
such that $\F(G^i)\subseteq\F(G_1)$ and $M_1\preceqM^i$
where $M_1$ is the type of $G_1$ and $M^i$ is the type of $G^i$.
Let $G'$ be the partial $k$-tree $G^i\oplus G_2$.
First, $G'$ has fewer vertices than $G$ since the number of vertices of $G^i$
is at most $N$ and the number of vertices of $G_1$ is at least $N+1$.
Second, $G'$ has no $(p,q)$-coloring: if it had a $(p,q)$-coloring, then the
corresponding
$p$-precoloring of the $k+1$ vertices shared by $G^i$ and $G_2$ would extend
to $G_1$ since $\F(G^i)\subseteq\F(G_1)$
and thus $G$ would have a $(p,q)$-coloring, too.
Finally, $G'$ has no odd cycle of length at most $g$: if it had such a cycle,
replace any path between vertices $v_j$ and $v_{j'}$ of the root of $G^i$ with a path
of at most the same length between them in $G_1$ (recall that $M_1\preceqM^i$). If such paths
for different pairs of $v_j$ and $v_{j'}$ on the considered odd cycle intersect,
take their symmetric difference. In this way, we obtain an Eulerian subgraph of
$G=G_1\oplus G_2$ with an odd number of edges such that the number of its edges
does not exceed $g$. Consequently, this Eulerian subgraph has an odd cycle of
length at most $g$, which violates the assumption on the odd-girth of $G$.
We conclude that $G'$ is a counterexample with less vertices than
$G$, a contradiction.
\end{proof}
We end by pointing out that the approach used yields an upper bound
of
$3(k+1)\cdot2^{2^{p^{k+1}}((4d)^{(k+1)^2}+1)^{k^2}}$
for the smallest $g$ such that
all graphs with tree-width at most $k$ and odd-girth at least $g$
have circular chromatic number at most $p/q$, whenever $p/q>2$.
More precisely, the value of $N$ cannot exceed
$(k+1)\cdot2^{2^{p^{k+1}}((4d)^{(k+1)^2}+1)^{k^2}}$. To see this,
we consider all pairs $P=(C,M)$ where $C$ is a set of $p$-precolorings
of the root and $M$ is a type such that there is a bipartite rooted
partial $k$-tree of type $M$ to which no coloring of $C$ extends.
Let $n_P$ be the size of a smallest such partial $k$-tree.
We obtain a sequence of at most
$2^{p^{k+1}}\times\left((4d)^{(k+1)^{2}}+1\right)^{k^2}$
integers. The announced bound follows from the following fact:
if the sequence is sorted in increasing order, then each term is at most twice
the previous one.
Indeed, consider the tree-decomposition of the partial $k$-tree $G_P$
chosen for the pair $P$. If the bag containing the root has
a single child, then we delete a vertex of the root, and set a vertex
in the single child to be part of the root. We obtain a partial $k$-tree
to which some $p$-precolorings of $C$ do not extend. Thus,
$n_P\le1+n_{P'}$ for some pair $P'$ and $n_{P'}<n_P$.
If the bag containing the root has more than one child, then
$G_P$ can be obtained by identifying the roots of two smaller partial
$k$-trees $G$ and $G'$.
By the minimality of $G_P$, the orders of $G$ and $G'$ are $n_{P_1}$ and
$n_{P_2}$ for two pairs $P_1$ and $P_2$ such that $n_{P_i}<n_P$ for $i\in\{1,2\}$.
This yields the stated fact, which in turn implies
the given bound, since the smallest element of the sequence is $k+1$.
\noindent
\textbf{Acknowledgment.} This work was done while the first three
authors were visiting the fourth at Technische Universit\"at Ilmenau.
They thank their host for providing a perfect working environment.
\end{document} |
\begin{document}
\title[]{The dyadic fractional diffusion kernel as a central limit}
\author[]{Hugo Aimar}
\email{haimar@santafe-conicet.gov.ar}
\author[]{Ivana G\'{o}mez}
\email{ivanagomez@santafe-conicet.gov.ar}
\author[]{Federico Morana}
\email{fmorana@santafe-conicet.gov.ar}
\thanks{The research was supported by CONICET, ANPCyT (MINCyT) and UNL}
\subjclass[2010]{Primary 60F05,60G52, 35R11}
\keywords{central limit theorem; dyadic diffusion; fractional diffusion; stable processes; wavelet analysis}
\begin{abstract}
In this paper we obtain the fundamental solution kernel of dyadic diffusions in $\mathbb{R}^+$ as a Central Limit of dyadic mollification of iterations of stable Markov kernels. The main tool is provided by the substitution of classical Fourier analysis by Haar wavelet analysis.
\end{abstract}
\maketitle
\section{Introduction}
The analysis of solutions of nonlocal problems in PDE, has received new impulse after the remarkable results obtained by Caffarelli and Silvestre \cite{CaSi07}. For a probabilistic view of this problems see \cite{Val09}, \cite{Valdinocibook16}. Recently in \cite{AcAimFCAA},\cite{AcAimCzech},\cite{AiBoGo13}, a dyadic version of the fractional derivative was introduced and an associated diffusion was solved.
The classical diffusion process, described by the heat equation $\tfrac{\partial u}{\partial t}=\Delta u$, where $\Delta$
denotes the space Laplacian, has as a fundamental solution the Weierstrass kernel $W_t(x)= (4\pi
t)^{-d/2}e^{-\abs{x}^2/4t}$, which is the central limit distribution, for $n\to\infty$, of $\sqrt{n}^{-1}\sum_{j=1}^{n}X_j$,
where the $X_j$'s are identically distributed independent random variables with finite variance $t$ and vanishing mean value.
For our later analysis it is convenient to write the convergence in distribution of $n^{-1/2}\sum_{j=1}^n X_j$ to $W_t$ in terms of the common distribution of the random variables $X_j$, $j\in \mathbb{N}$. For the sake of simplicity let us assume that this distribution is given by the density $g$ in $\mathbb{R}^d$. In other words, $\mathscr{P}(\{X_j\in B\})=\int_Bg(x)dx$ where $B$ is a Borel set in $\mathbb{R}^d$. Hence since the random variables $X_j$ are independent the distribution of $S_n=\sum_{j=1}^nX_j$ is given by the convolution $g^n$ of $g$ $n$-times. Precisely, with $g^n=g\ast\cdots\ast g$ $n$-times, we have that $\mathscr{P}(\{S_n\in B\})=\int_Bg^n(x)dx$. On the other hand, $\mathscr{P}(\{n^{-1/2}\sum_{j=1}^nX_j\in B\})=\mathscr{P}(\{S_n\in\sqrt{n}B\})=\int_B(g^n)_{\sqrt{n}}(x)dx$, with $(g^n)_{\sqrt{n}}$ the mollification of $g^n$ by $\sqrt{n}$ in $\mathbb{R}^d$. Precisely, $(g^n)_{\sqrt{n}}(x)=n^{-d/2}g^n(\sqrt{n}x)$. These observations allows to read the CLT as a vague or Schwartz weak convergence of $(g^n)_{\sqrt{n}}(x)$ to $W_t(x)$ when $n\to\infty$. For every $f$ continuous and compactly supported in $\mathbb{R}^d$, we have that $\int_{\mathbb{R}^d}(g^n)_{\sqrt{n}}(x)f(x)\to\int_{\mathbb{R}^d}W_t(x)f(x) dx$ as $n\to\infty$. Since we shall be working in a non-translation invariant setting, to get the complete analogy we still rewrite the CLT as the weak convergence of the sequence of Markov kernel $K^n_{\sqrt{n}}(x,y)=(g^n)_{\sqrt{n}}(x-y)$ to the Markov Weierstrasss kernel $W_t(x-y)$. The kernel $K^n_{\sqrt{n}}(x,y)=\idotsint_{\mathbb{R}^{d-1}} g_{\sqrt{n}}(x-x_1)g_{\sqrt{n}}(x_1-x_2)\cdots g_{\sqrt{n}}(x_{n-1}-y)dx_1dx_2\cdots dx_{n-1}$ corresponds to the kernel of the $n$-th iteration of the operator $T_{\sqrt{n}}f(x)=\int_{\mathbb{R}^d}g_{\sqrt{n}}(x-y)f(y) dy$. The difference in the rhythms of the upper index $n$ of the iteration and the lower index $\sqrt{n}$ of mollification is related to the property of finite variance of $g$. In the problems considered here the Markov kernels involved have heavy tails and the central equilibria takes place for different proportions between iteration and mollification. There are many books where the classical CLT and some of its extensions are masterly exposed. Let us refer to \cite{Chungbook} as one of them.
In this paper we shall be concerned with diffusions of fractional type associated with dyadic differentiation in the space. The basic setting for our diffusions is $\mathbb{R}^+=\{x\in \mathbb{R}: x>0\}$. In \cite{AcAimCzech} it is proved that the function $u(x,t)$ defined for $x\in \mathbb{R}^+$ and $t>0$, given by
\begin{equation*}
u(x,t)=\sum_{h\in\mathscr{H}}e^{-t\abs{I(h)}^{-s}}\proin{u_0}{h}h(x),
\end{equation*}
with $\mathscr{H}$ the standard Haar system in $L^2(\mathbb{R}^+)$, $I(h)$ the support of $h$ and
$\proin{u_0}{h}=\int_{\mathbb{R}^+}u_0(x)h(x) dx$, solves the problem
\begin{equation*}
\left
\{\begin{array}{ll}
\frac{\partial u}{\partial t}=D^{s} u,\, & x\in\mathbb{R}^{+}, t>0;\\
u(x,0)=u_0(x),\, & x\in \mathbb{R}^+.
\end{array}
\right.
\end{equation*}
with
\begin{equation}\label{eq:derivativefractionalDs}
D^{s}g(x)=\int_{y\in \mathbb{R}^+}\frac{g(x)-g(y)}{\delta (x,y)^{1+s}} dy
\end{equation}
for $0<s<1$ and $\delta(x,y)$ the dyadic distance in $\mathbb{R}^{+}$ (see Section~\ref{sec:dyadycAnalysis} for definitions). The main point in the prove of the above statement is provided by the spectral analysis for $D^s$ in terms of Haar functions. In fact, $D^s h=\abs{I(h)}^{-s}h$. When $0<s<1$, since $h$ is a Lipschitz function with respect to $\delta$, the integral in \eqref{eq:derivativefractionalDs} defining $D^sh$ is absolutely convergent. For the case $s=1$ this integral is generally not convergent, nevertheless the operator $D^1$ is still well defined on the Sobolev type space of those function in $L^2(\mathbb{R}^+)$ such that the Haar coefficients $\proin{f}{h}$ satisfy the summability condition $\sum_{h\in\mathscr{H}}\tfrac{\abs{\proin{f}{h}}^2}{\abs{I(h)}^2}<\infty$. For those functions $f$ the first order nonlocal derivative is given by $D^1 f=\sum_{h\in\mathscr{H}}\tfrac{\proin{f}{h}}{\abs{I(h)}}h$. Moreover, with $u_0\in L^2(\mathbb{R}^+)$, the function
\begin{equation*}
u(x,t)=\int_{\mathbb{R}^+}K(x,y;t)u_0(y) dy,
\end{equation*}
with
\begin{equation}\label{eq:NucleoHaarDifusiones}
K(x,y;t)=\sum_{h\in\mathscr{H}}e^{-t\abs{I(h)}^{-1}}h(x)h(y),
\end{equation}
solves
\begin{equation*}
(P) \left
\{\begin{array}{ll}
\frac{\partial u}{\partial t}=D^{1} u,\, & x\in\mathbb{R}^{+}, t>0;\\
u(x,0)=u_0(x),\, & x\in \mathbb{R}^+.
\end{array}
\right.
\end{equation*}
Notice that for each $t>0$ the function of $x\in \mathbb{R}^+$, $u(x,t)$ is in the dyadic Sobolev space and its $D^1$ space derivative belongs to $L^2(\mathbb{R}^+)$.
The kernel $K(\cdot,\cdot;t)$ for fixed $t>0$ is not a convolution kernel. Nevertheless it can be regarded as a Markov transition kernel which, as we shall prove, depends only on $\delta(x,y)$.
In this note we prove that the Markov kernel family $K(\cdot,\cdot;t)$ is the central limit of adequate simultaneous iteration and mollification
of elementary dyadic stable Markov kernels. We shall precisely define stability later, but heuristically it means that the kernel behaves at infinity like a power law of the dyadic distance. The main result is contained in Theorem~\ref{thm:mainresult} in Section~\ref{sec:mainresult}. The basic tool for the proof of our results is the Fourier Haar analysis induced on $\mathbb{R}^+$ by the orthonormal basis of Haar wavelets.
The paper is organized as follow. In Section~\ref{sec:dyadycAnalysis} we introduce the basic facts from dyadic analysis on $\mathbb{R}^+$, in particular the Haar system as an orthonormal basis for $L^2(\mathbb{R}^+)$ and as an unconditional basis for $L^p(\mathbb{R}^+)$, $1<p<\infty$. Section~\ref{sec:Markovdyadickernels} is devoted to introduce the Markov type dyadic kernels. The spectral analysis of the integral operators generated by Markov type dyadic kernels is considered in \S~\ref{sec:spectralanalysis}. Section~\ref{sec:stability} is devoted to introduce the concept of stability and to prove that the kernel in \eqref{eq:NucleoHaarDifusiones} is $1$-stable with parameter $\tfrac{2}{3}t$. The iteration and mollification operators and their relation with stability are studied in Section~\ref{sec:iterationmollification}. Finally in Section~\ref{sec:mainresult} we state and prove our main result: spectral and $L^p(\mathbb{R}^+)$ ($1<p<\infty$) convergence to the solution of (P).
\section{Some basic dyadic analysis}\label{sec:dyadycAnalysis}
Let $\mathbb{R}^+$ denote the set of nonnegative real numbers. A dyadic interval is a subset of $\mathbb{R}^+$ that can be written as $I=I^j_k=[k2^{-j},(k+1)2^{-j})$ for some integer $j$ and some nonnegative integer $k$. The
family $\mathcal{D}$ of all dyadic intervals can be organized by levels of resolution as follows; $\mathcal{D}=\cup_{j\in \mathbb{Z}}\mathcal{D}^j$, where $\mathcal{D}^j=
\set{I^j_k: k=0,1,2,\ldots}$. The dyadic distance induced on $\mathbb{R}^+$ by $\mathcal{D}$ and the Lebesgue measure is defined by $\delta(x,y)=\inf\set{\abs{I}: I\in
\mathcal{D}, x\in I, y\in I}$ where $\abs{E}$ denotes the one dimensional Lebesgue measure of $E$. It is easy to check that $\delta$ is a distance (ultra-metric) on $\mathbb{R}^+$
and that, since $\abs{x-y}=\inf\{\abs{J}: x\in J, y\in J, J=[a,b), 0\leq a<b<\infty\}$, $\abs{x-y}\leq\delta(x,y)$. Of course the two distances are not equivalent. Pointwise the function $\delta(x,y)$ is larger than the usual distance $d(x,y)=\abs{x-y}$. Set $B_\delta(x,r)=\{y\in \mathbb{R}^+: \delta(x,y)<r\}$ to denote the $\delta$-ball centered a $x$ with positive radius $r$. Then $B_\delta(x,r)$ is the largest dyadic interval containing $x$ with Lebesgue measure less than $r$. For $r>0$, let $j\in \mathbb{Z}$ be such that $2^j<r\leq 2^{j+1}$. Then, for $x\in \mathbb{R}^+$, $B_\delta(x,r)=I$ with $x\in I\in\mathcal{D}$, $2^j=\abs{I}<r\leq 2^{j+1}$. So that $\tfrac{r}{2}\leq\abs{B_\delta(x,r)}<r$. This normality property of $(\mathbb{R}^+,\delta)$ equipped with Lebesgue measure shows that the $\delta$-Hausdorff dimension of intervals in $\mathbb{R}^+$ is one. In particular the integral singularities that negative powers of $\delta$ and $d$ produce have
the same orders. Precisely, for fixed $x\in \mathbb{R}^+$ the functions of $y\in \mathbb{R}^+$ defined by $\delta^{\alpha}(x,y)$ and $\abs{x-y}^\alpha$ have the same local and global integrability properties for $\alpha\in \mathbb{R}$.
\begin{lemma}\label{lemma:deltaintegrability}
\quad
\begin{enumerate}[(a)]
\item The level sets $L(\lambda)=\{(x,y):\delta(x,y)=\lambda\}$ are empty if $\lambda$ is not an integer power of two. On the other hand
$L(2^j)=\cup_{I\in\mathcal{D}^j}(I_l\times I_r)\cup (I_r\times I_l)$ with $I_l$ and $I_r$, the left and right halves of $I\in\mathcal{D}^j$. Hence, $\delta(x,y)=\sum_{j\in \mathbb{Z}}2^j\chi_{L(2^j)}(x,y)$.
\item For $x\in \mathbb{R}^+$ and $r>0$ we have,
\begin{enumerate}[b-i)]
\item
$\frac{c(\alpha)}{2^{1+\alpha}}r^{1+\alpha}\leq \int_{y\in B_{\delta}(x,r)} \delta^{\alpha}(x,y) dy \leq c(\alpha)r^{1+\alpha}$
for $\alpha>-1$ with $c(\alpha)=2^{-1}(1-2^{-(1+\alpha)})^{-1}$;
\item
$\int_{B_{\delta}(x,r)} \delta^{\alpha}(x,y) dy= +\infty$
for $\alpha\leq -1$;
\item
$\tilde{c}(\alpha)r^{1+\alpha}\leq\int_{\{y: \delta(x,y)\geq r\}} \delta^{\alpha}(x,y) dy\leq\frac{\tilde{c}(\alpha)}{2^{1+\alpha}} r^{1+\alpha}$
for $\alpha < -1$ with $\tilde{c}(\alpha)=2^{-1}(1-2^{1+\alpha})^{-1}$;
\item
$\int_{\{y: \delta(x,y)\geq r\}} \delta^{\alpha}(x,y) dy= +\infty$
for $\alpha\geq -1$.
\end{enumerate}
\end{enumerate}
\end{lemma}
\begin{proof}[Proof of (a)] Let $j\in \mathbb{Z}$ fixed. Then $\delta(x,y)=2^j$ if and only if $x$ and $y$ belong to the same $I\in\mathcal{D}^j$, but they do not belong to the same half of $I$. In other words, $(x,y)\in I_l\times I_r$ or $(x,y)\in I_r\times I_l$.
\noindent\textit{Proof of (b).} Fix $x\in \mathbb{R}^+$. Take $0<a<b<\infty$. Then, from \textit{(a)},
\begin{align*}
\int_{\{y\in B_\delta(x,b)\setminus B_\delta(x,a)\}}\delta^\alpha(x,y)dy
&=\int_{\{y: a\leq \delta(x,y)<b\}}\delta^\alpha(x,y)dy\\
&= \sum_{\{j\in \mathbb{Z}: a\leq 2^j<b\}}\int_{\{y:\delta(x,y)=2^j\}}2^{\alpha j}dy\\
&=\frac{1}{2}\sum_{\{j\in \mathbb{Z}: a\leq 2^j<b\}}2^{(1+\alpha)j}\\
&=\frac{1}{2}S(\alpha;a,b).
\end{align*}
When $\alpha\geq -1$, then $S(\alpha;a,b)\to +\infty$ for $b\to\infty$, for every $a$. Thus proves \textit{(iv)}. When $\alpha\leq -1$ then $S(\alpha;a,b)\to+\infty$ for $a\to 0$, for every $b$. For $\alpha>-1$, we have with $2^{j_0}\leq r<2^{j_0+1}$ that
\begin{equation*}
\int_{B_\delta(x,r)}\delta^\alpha(x,y)dy =\frac{1}{2}\lim_{a\to 0}S(\alpha;a,b)=\frac{1}{2}\sum_{j\leq j_0(r)}2^{(1+\alpha)j}
=\frac{1}{2}\frac{1}{1-2^{-(1+\alpha)}}2^{(1+\alpha)j_0}=c(\alpha)2^{(1+\alpha)j_0}.
\end{equation*}
Hence
\begin{equation*}
\frac{c(\alpha)}{2^{1+\alpha}}r^{1+\alpha}\leq \int_{y\in B_{\delta}(x,r)} \delta^{\alpha}(x,y) dy \leq c(\alpha)r^{1+\alpha}.
\end{equation*}
For $\alpha<-1$ we have, with $2^{j_0}\leq r<2^{j_0+1}$, that
\begin{equation*}
\int_{\delta(x,y)\geq r}\delta^{\alpha}(x,y)dy=\frac{1}{2}\lim_{b\to\infty}S(\alpha;r,b)=\frac{1}{2}\sum_{j\geq j_0(r)}(2^{1+\alpha})j=
\frac{1}{2}\frac{1}{1-2^{1+\alpha}}2^{(1+\alpha)j_0}=\tilde{c}(\alpha)2^{(1+\alpha)j_0},
\end{equation*}
so that
\begin{equation*}
\frac{\tilde{c}(\alpha)}{2^{1+\alpha}} r^{1+\alpha}\geq\int_{\{y: \delta(x,y)\geq r\}} \delta^{\alpha}(x,y) dy\geq\tilde{c}(\alpha)r^{1+\alpha}.
\end{equation*}
\end{proof}
The distance $\delta$ is not translation invariant. In fact, while for small positive $\varepsilon$, $\delta(\tfrac{1}{2}-\varepsilon,\tfrac{1}{2}+\varepsilon)=1$, $\delta(\tfrac{1}{2}+\tfrac{1}{2}-\varepsilon,\tfrac{1}{2}+\tfrac{1}{2}+\varepsilon)=2$. Neither is $\delta$ positively homogeneous. Nevertheless the next statement contains a useful property of dyadic homogeneity.
\begin{lemma}\label{lemma:deltahomogeneity}
Let $j\in \mathbb{Z}$ be given. Then, for $x$ and $y$ in $\mathbb{R}^+$, $\delta(2^jx,2^jy)=2^j\delta(x,y)$.
\end{lemma}
\begin{proof}
Notice first that since $x=y$ is equivalent to $2^jx=2^jy$, we may assume $x\neq y$. Since for $x$ and $y$ in $I\in \mathcal{D}$ we certainly have that $2^jx$ and $2^jy$ belong to $2^jI$, and the measure of $2^jI$ is $2^j$ times the measure of $I$, in order to prove the dyadic homogeneity of $\delta$, we only have to observe that the multiplication by $2^j$ as an operation on $\mathcal{D}$ preserves the order provided by inclusion. In particular $x$ and $y$ belong to $I$ but $x$ and $y$ do not belong to the same half $I_l$ or $I_r$ of $I$, if and only if $2^jx$ and $2^jy$ belong to $2^jI$ but $2^jx$ and $2^jy$ do not belong to the same half of $2^jI$.
\end{proof}
As in the classical case of the Central Limit Theorem, Fourier Analysis will play an important role in our
further development. The basic difference is that in our context the trigonometric expansions are substituted by the most
elementary wavelet analysis, the associated to the Haar system. Let us introduce the basic notation. Set $h^0_0(x)=\chi_{[0,1/2)}(x)-\chi_{[1/2,1)}(x)$
and, for $j\in \mathbb{Z}$ and $k=0,1,2,3,\ldots$; $h^j_k(x)=2^{j/2}h^0_0(2^jx-k)$. Notice that $h^j_k$ has $L^2$-norm equal to one for every $j$ and $k$. Moreover, $h^j_k$ is supported in $I=I^j_k\in \mathcal{D}^j$. Write $\mathscr{H}$ to denote the sequence of all those Haar wavelets. For $h\in\mathscr{H}$ we shall use the notation $I(h)$ to denote the interval $I$ in $\mathcal{D}$ for which $\supp h = I$. Also $j(h)$ is the only resolution level $j\in \mathbb{Z}$ such that $I(h)\in \mathcal{D}^j$.
The basic analytic fact of the system $\mathscr{H}$ is given by its basic character. In fact, $\mathscr{H}$ is an orthonormal basis for $L^2(\mathbb{R}^+)$. In particular,
for every $f\in L^2(\mathbb{R}^+)$ we have that in the $L^2$-sense $f=\sum_{h\in\mathscr{H}}\proin{f}{h}h$, where, as usual, for real valued $f$, $\proin{f}{h}=\int_{\mathbb{R}^+}f(x)h(x) dx$.
One of the most significant analytic properties of wavelets is its ability to characterize function spaces. For our purposes it will be useful to have in mind the characterization of all $L^p(\mathbb{R}^+)$ spaces for $1<p<\infty$.
\begin{theorem}[Wojtaszczyk \cite{Wojtasbook}]\label{thm:characterizationLp}
For $1<p<\infty$ and some constants $C_1$ and $C_2$ we have
\begin{equation}
C_1\norm{f}_p\leq \norm{\left(\sum_{h\in\mathscr{H}}\abs{\proin{f}{h}}^2\abs{I(h)}^{-1}\chi_{I(h)}\right)^{1/2}}_p\leq C_2\norm{f}_p
\end{equation}
\end{theorem}
\section{Markov dyadic kernels defined in $\mathbb{R}^+$}\label{sec:Markovdyadickernels}
A real function $K$ defined in $\mathbb{R}^+\times \mathbb{R}^+$ is said to be a symmetric Markov kernel if $K$ is nonnegative, $K(x,y)=K(y,x)$ for every $x\in \mathbb{R}^+$ and $y\in \mathbb{R}^+$ and $\int_{\mathbb{R}^+} K(x,y) dy=1$ for every $x\in \mathbb{R}^+$. We are interested in kernels $K$ as above such that $K(x,y)$ depends only on the dyadic distance $\delta(x,y)$ between the points $x$ and $y$ in $\mathbb{R}^+$. The next lemma contains three ways of writing such kernels $K$. The first is just a restatement of the dependence of $\delta$ and the other two shall be used frequently in our further analysis. The Lemma also includes relation between the coefficients and their basic properties.
\begin{lemma}\label{lemma:kerneldelta1}
Let $K$ be a real function defined on $\mathbb{R}^+\times \mathbb{R}^+$. Assume that $K$ is nonnegative and depends only on $\delta$, i.e., $\delta(x,y)=\delta(x',y')$ implies $K(x,y)=K(x',y')$, with $\int_{\mathbb{R}^+} K(x_0,y)dy=1$ for some $x_0\in \mathbb{R}^+$. Then, with the notation introduced in Lemma~1~(a) for the level sets of $\delta$, we have
\begin{enumerate}[(1)]
\item $K=\sum_{j\in \mathbb{Z}}k_j\chi_{L(2^j)}$, $k_j\geq 0$, $\sum_{j\in \mathbb{Z}}k_j2^{j-1}=1$ and $K$ is a symmetric Markov kernel.
\item The sequence $\overline{\alpha}=(\alpha_l=2^{-l}(k_{-l}-k_{-l+1}):l\in \mathbb{Z})$ belongs to $l^1(\mathbb{Z})$, $\sum_{l\in \mathbb{Z}}\alpha_l=1$ and the function $\varphi(s)=\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(s)$ with $\varphi_l(s)=2^{l}\chi_{(0,2^{-l}]}(s)$, provides a representation of $K$ in the sense that $\varphi(\delta(x,y))=K(x,y)$. Moreover, $\int_{\mathbb{R}^+}\abs{\varphi(s)}ds<\infty$ and $\int_{\mathbb{R}^+}\varphi(s)ds=1$.
\item The function $\varphi(s)$ can also be written as $\varphi(s)=\sum_{j\in \mathbb{Z}}\Lambda_j(\varphi_{j+1}(s)-\varphi_j(s))$.
\item\label{item:formulaerelated} The coefficients $\overline{k}=(k_j:j\in \mathbb{Z})$ in (1), $\overline{\alpha}=(\alpha_j:j\in \mathbb{Z})$ in (2) and $\overline{\Lambda}=(\Lambda_j:j\in \mathbb{Z})$ in (3) are related by the formulae
\begin{enumerate}[(\ref{item:formulaerelated}.a)]
\item $\alpha_j = \frac{k_{-j}-k_{-j+1}}{2^j}$
\item $k_j = \sum_{i=j}^{\infty}2^{-i}\alpha_{-i}$
\item $\Lambda_j = \sum_{l>j}\alpha_l $
\item $\alpha_j = \Lambda_{j-1}-\Lambda_j $
\item $\Lambda_j = \tfrac{1}{2}\left(-k_{-j}2^{-j}+\sum_{l<-j}k_l2^l\right) $
\item $k_j = -2^{-j}\Lambda_{-j}+\sum_{i\geq j+1}2^{-i}\Lambda_{-i}$.
\end{enumerate}
\item\label{item:propertiessequences} Some relevant properties of the sequences $\overline{k}$, $\overline{\alpha}$ and $\overline{\Lambda}$ are the following.
\begin{enumerate}[(\ref{item:propertiessequences}.a)]
\item $\overline{\alpha}\in l^1(\mathbb{Z})$;
\item $\sum_{l\leq j}\alpha_l2^l\geq 0$ for every $j\in \mathbb{Z}$;
\item $\abs{\alpha_l}\leq 2$ for every $l\in \mathbb{Z}$;
\item $\lim_{j\to-\infty}\Lambda_j=1$;
\item $\lim_{j\to+\infty}\Lambda_j=0$;
\item $\sum_{l\leq j-1}\Lambda_l2^l\geq\Lambda_j2^j$ for every $j\in \mathbb{Z}$;
\item $\sup_j\Lambda_j=1$;
\item $\inf_j\Lambda_j\geq -1$;
\item if $\overline{k}$ is decreasing then also $\overline{\Lambda}$ is decreasing.
\end{enumerate}
\end{enumerate}
\end{lemma}
\begin{proof}[Proof of (1)]
Since $K$ depends only on $\delta$, then the level sets for $\delta$ are level sets for $K$. Hence $K$ is constant, say $k_j\geq 0$, in $L(2^j)$ for each $j\in \mathbb{Z}$. Notice that the section of $L(2^j)$ at any $x\in \mathbb{R}^+$ has measure $2^{j-1}$, no matter what is $x$. In fact, $\left. L(2^j)\right|_{x}=\{y\in \mathbb{R}^+:(x,y)\in L(2^j)\}=\{y\in \mathbb{R}^+:\delta(x,y)=2^j\}=I$, where $I\in\mathcal{D}$ is the brother of the dyadic interval $J$ of level $j-1$ such that $x\in J$. Hence $\abs{\left. L(2^j)\right|_{x}}=2^{j-1}$. With the above considerations, since $\int_{\mathbb{R}^+}K(x_0,y)dy=1$, we see that
\begin{align*}
1&=\int_{\mathbb{R}^+}K(x_0,y)dy=\sum_{j\in \mathbb{Z}}k_j\int_{\mathbb{R}^+}\chi_{L(2^j)}(x_0,y)dy\\
&=\sum_{j\in \mathbb{Z}}k_j\abs{\left. L(2^j)\right|_{x_0}}=\sum_{j\in \mathbb{Z}}k_j 2^{j-1}\\
&=\sum_{j\in \mathbb{Z}}k_j\abs{\left. L(2^j)\right|_{x}}=\int_{\mathbb{R}^+}K(x,y)dy.
\end{align*}
Then $K$ is a Markov kernel and that the series $\sum_{j\in \mathbb{Z}}k_j2^{j-1}$ converges to $1$. The symmetry of $K$ is clear.
\textit{Proof of (2).} Since $\abs{\alpha_l}\leq 2^{-l}k_{-l}+2^{-l}k_{-l+1}$, the fact that $\overline{\alpha}$ belongs to $l^1(\mathbb{Z})$ follow from the fact that $\sum_{j\in \mathbb{Z}}k_j2^j=2$ proved (1). On the other hand,
\begin{equation*}
\sum_{l\in \mathbb{Z}}\alpha_l=\sum_{l\in \mathbb{Z}}k_{-l}2^{-l}-\sum_{l\in \mathbb{Z}}k_{-l+1}2^{-l}=2-1=1.
\end{equation*}
Let us now check that $\varphi(\delta(x,y))=K(x,y)$. Since $\delta(x,y)$ is a integer power of two and $k_j\to 0$ as $j\to\infty$, we have
\begin{align*}
\varphi(\delta(x,y)) &=\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(\delta(x,y))\\
&= \sum_{l\in \mathbb{Z}}\alpha_l 2^l\chi_{(0,2^{-l}]}(\delta(x,y))\\
&=\sum_{l\leq\log_2\tfrac{1}{\delta(x,y)}}2^{-l}(k_{-l}-k_{-l+1})2^l\\
&= \sum_{j\geq\log_2\delta(x,y)}(k_j-k_{j+1})\\
&= k_{\log_2\delta(x,y)}=K(x,y).
\end{align*}
Now, the absolute integrability of $\varphi$ and the value of its integral follow from the formulae $\varphi(s)=\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(s)$ since $\overline{\alpha}\in l^1(\mathbb{Z})$, $\sum_{l\in \mathbb{Z}}\alpha_l=1$ and $\int_{\mathbb{R}^+}\varphi_l(s)ds=1$.
\textit{Proof of (3).} Fix a positive $s$ and proceed to sum by parts the series defining $\varphi(s)=\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(s)$. Set $\Lambda_j=\sum_{l>j}\alpha_l$. Since $\alpha_l=\Lambda_{l-1}-\Lambda_l$, we have that
\begin{equation*}
\varphi(s) = \sum_{l\in \mathbb{Z}}(\Lambda_{l-1}-\Lambda_l)\varphi_l(s)
=\sum_{l\in \mathbb{Z}}\Lambda_{l-1}\varphi_l(s)-\sum_{l\in \mathbb{Z}}\Lambda_{l}\varphi_l(s)
= \sum_{l\in \mathbb{Z}}\Lambda_{l}(\varphi_{l+1}(s)-\varphi_l(s)),
\end{equation*}
as desired. Notice, by the way, that $\varphi_{l+1}(s)-\varphi_l(s)$ can be written in terms of Haar functions as $\varphi_{l+1}(s)-\varphi_l(s)=2^{\tfrac{l}{2}}h^l_0(s)$.
\textit{Proof of (4).} Follows from the definitions of $\overline{\alpha}$ and $\overline{\Lambda}$.
\textit{Proof of (5).} Notice first that (5.a) was proved in (2). The nonnegativity of $K$ and (4.b) show (5.b). Property (5.d) and (5.e) of the sequence $\overline{\Lambda}$ follow from (4.c) and the fact that $\sum_{l\in \mathbb{Z}}\alpha_l=1$ proved in (2). Inequality (5.f) follows from the positivity of $K$ and (4.f).
We will prove (5.g). From (5.d) and (5.e) we have that $\overline{\Lambda}\in l^{\infty}(\mathbb{Z})$. In fact, there exist $j_1<j_2$ in $\mathbb{Z}$ such that $\Lambda_j<2$ for $j<j_1$ and $\Lambda_j>-1$ for $j>j_2$. Since the set $\{\Lambda_{j_1},\Lambda_{j_1+1},\ldots,\Lambda_{j_2}\}$ is finite, we get the boundedness of $\overline{\Lambda}$. On the other hand, since from (5.d) $\lim_{j\to-\infty}\Lambda_j=1$ we have that $\sup_j\Lambda_j\geq 1$. Assume that $\sup_j\Lambda_j> 1$. Then there exists $j_0\in \mathbb{Z}$ such that $\Lambda_{j_0}>1$. Hence, again from (5.d) and (5.e) we must have that for $j<j_3$, $\Lambda_j<\Lambda_{j_0}$ and for $j>j_4$, $\Lambda_j<1<\Lambda_{j_0}$ for some integers $j_3<j_4$. So that there exists $j_5\in \mathbb{Z}$ such that $\Lambda_{j_5}\geq\Lambda_j$ for every $j\in \mathbb{Z}$ and $\Lambda_{j_5}>1$. Now
\begin{equation*}
2^{j_5}\Lambda_{j_5}=\sum_{l\leq j_5 -1}\Lambda_{j_5}2^l>\sum_{l\leq j_5-1}\Lambda_l2^l
\end{equation*}
which contradicts (5.f) with $j=j_5$.
For prove (5.h) assume that $\inf_j\Lambda_j<-1$. Choose $j_0\in \mathbb{Z}$ such that $\Lambda_{j_0}<-1$. Then from (5.f)
\begin{equation*}
\Lambda_{j_0+1}\leq 2^{-(j_0+1)}\sum_{l\leq j_0}\Lambda_l2^l
=\sum_{l\leq j_0}\Lambda_l2^{l-(j_0+1)}
=\frac{1}{2}\left(\Lambda_{j_0}+\sum_{l< j_0}\Lambda_l2^{l-j_0)}\right)
\leq\frac{1}{2}(\Lambda_{j_0}+1).
\end{equation*}
In the last inequality we used (5.g). Let us prove, inductively, that $\Lambda_{j_0+m}\leq\tfrac{1}{2}(\Lambda_{j_0}+1)$ for every $m\in \mathbb{N}$. Assume that the above inequality holds for $1\leq m\leq m_0$ and let us prove it for $m_0+1$.
\begin{align*}
\Lambda_{j_0+(m_0+1)}&\leq \sum_{l<j_0+m_0+1}2^{l-(j_0+m_0+1)}\Lambda_l\\
&=2^{-m_0-1}\left(\sum_{l=j_0}^{j_0+m_0}2^{l-j_0}\Lambda_l+\sum_{l<j_0}2^{l-j_0}\Lambda_l\right)\\
&=2^{-m_0-1}\left(\sum_{l=1}^{m_0}2^{l}\Lambda_{j_0+l}+\Lambda_{j_0}+\sum_{l<j_0}2^{l-j_0}\Lambda_l\right)\\
&\leq 2^{-m_0-1}\left(\sum_{l=1}^{m_0}2^{l-1}(\Lambda_{j_0}+1)+\Lambda_{j_0}+\sum_{l<j_0}2^{l-j_0}\right)\\
&=2^{-m_0-1}((2^{m_0}-1)(\Lambda_{j_0}+1)+\Lambda_{j_0}+1)\\
&=\frac{1}{2}(\Lambda_{j_0}+1).
\end{align*}
Property (5.c) for the sequence $\overline{\alpha}$ follows from (4.d), (5.g) and (5.h). Item (5.i) follows from (4.a) and (4.d).
\end{proof}
In the sequel we shall write $\mathscr{K}$ to denote the set of all nonnegative kernels defined on $\mathbb{R}^+\times \mathbb{R}^+$ that depends only on $\delta$ and for some $x_0\in \mathbb{R}^+$, $\int_{\mathbb{R}^+}K(x_0,y)dy=1$.
Let us finish this section by proving a lemma that shall be used later.
\begin{lemma}\label{lemma:basiccharacterizationK}
Let $\overline{\Lambda}=(\Lambda_j:j\in \mathbb{Z})$ be a decreasing sequence of real numbers satisfying
(5.d) and (5.e). Then there exists a unique $K\in\mathscr{K}$ such that the sequence that (3) of Lemma~\ref{lemma:kerneldelta1} associates to $K$ is the given $\overline{\Lambda}$.
\end{lemma}
\begin{proof}
Define $K(x,y)=\sum_{j\in \mathbb{Z}}(\Lambda_{j-1}-\Lambda_j)\varphi_j(\delta(x,y))$. Since $\overline{\Lambda}$ is decreasing the coefficients in the above series are all nonnegative. On the other hand, from (5.d) and (5.e) we have that $\sum_{j\in \mathbb{Z}}(\Lambda_{j-1}-\Lambda_j)=1$. Hence, for every $x\in \mathbb{R}^+$ we have
\begin{equation*}
\int_{y\in \mathbb{R}^+}K(x,y)dy = \sum_{j\in \mathbb{Z}}(\Lambda_{j-1}-\Lambda_j)\int_{y\in \mathbb{R}^+}\varphi_j(\delta(x,y))dy
= \sum_{j\in \mathbb{Z}}(\Lambda_{j-1}-\Lambda_j)=1
\end{equation*}
So that $K\in\mathscr{K}$.
\end{proof}
\section{The spectral analysis of the operators induced by kernels in $\mathscr{K}$}\label{sec:spectralanalysis}
For $K\in\mathscr{K}$ and $f$ continuous with bounded support in $\mathbb{R}^+$ the integral $\int_{\mathbb{R}^+}K(x,y)f(y)dy$ is well defined and finite for each $x\in \mathbb{R}^+$. Actually each $K\in\mathscr{K}$ determines an operator which is well defined and bounded on each $L^p(\mathbb{R}^+)$ for $1\leq p\leq\infty$.
\begin{lemma}
Let $K\in\mathscr{K}$ be given. Then for $f\in L^p(\mathbb{R}^+)$ the integral $\int_{\mathbb{R}^+}K(x,y)f(y)dy$ is absolutely convergent for almost every $x\in \mathbb{R}^+$. Moreover,
\begin{equation*}
Tf(x)=\int_{\mathbb{R}^+}K(x,y)f(y) dy
\end{equation*}
defines a bounded (non-expansive) operator on each $L^p(\mathbb{R}^+)$, $1\leq p\leq\infty$. Precisely, $\norm{Tf}_p\leq\norm{f}_p$ for $f\in L^p(\mathbb{R}^+)$.
\end{lemma}
\begin{proof}
Notice first that the function $K(x,y)f(y)=\varphi(\delta(x,y))f(y)$ is measurable as a function defined on $\mathbb{R}^+ \times\mathbb{R}^+$, for every measurable $f$ defined on $\mathbb{R}^+$. The case $p=\infty$ follows directly from the facts that $K$ is a Markov kernel and that $K(x,y)\abs{f(y)}\leq K(x,y)\norm{f}_\infty$. For $p=1$ using Tonelli's theorem we get
\begin{equation*}
\int_{x\in \mathbb{R}^+}\left(\int_{y\in \mathbb{R}^+}K(x,y)\abs{f(y)}dy\right)dx=
\int_{y\in \mathbb{R}^+}\abs{f(y)}\left(\int_{x\in \mathbb{R}^+}K(x,y)dx\right)dy=\norm{f}_1.
\end{equation*}
Hence $\int_{\mathbb{R}^+}K(x,y)f(y) dy$ is absolutely convergent for almost every $x$ and $\norm{Tf}_1\leq\norm{f}_1$. Assume that $1<p<\infty$ and take $f\in L^p(\mathbb{R}^+)$. Then
\begin{align*}
\abs{Tf(x)}^p &\leq \left(\int_{\mathbb{R}^+}K(x,y)\abs{f(y)} dy\right)^p=\left(\int_{\mathbb{R}^+}K(x,y)^{\tfrac{1}{p'}}K(x,y)^{\tfrac{1}{p}}\abs{f(y)} dy\right)^p\\
&\leq \left(\int_{\mathbb{R}^+}K(x,y) dy\right)^{\tfrac{p}{p'}}\left(\int_{\mathbb{R}^+}K(x,y)\abs{f(y)}^p dy\right)\\
&=\int_{\mathbb{R}^+}K(x,y)\abs{f(y)}^p dy.
\end{align*}
Hence $\norm{Tf}^p_p=\int_{\mathbb{R}^+}\abs{Tf(x)}^p dx\leq \int_{y\in\mathbb{R}^+}\left(\int_{x\in\mathbb{R}^+}K(x,y) dx\right)\abs{f(y)}^p dy=\norm{f}^p_p$.
\end{proof}
The spectral analysis of the operators $T$ defined by kernels in $\mathscr{K}$ is given in the next result.
\begin{theorem}\label{thm:autovalores}
Let $K\in\mathscr{K}$ and let $T$ be the operator in $L^2(\mathbb{R}^+)$ defined by $Tf(x)=\int_{\mathbb{R}^+}K(x,y)f(y) dy$. Then the Haar functions are eigenfunctions for $T$ and the eigenvalues are given by the sequence $\overline{\Lambda}$ introduced in Lemma~\ref{lemma:kerneldelta1}. Precisely, for each $h\in\mathscr{H}$
\begin{equation*}
Th=\Lambda_{j(h)}h:=\lambda(h) h,
\end{equation*}
where $j(h)$ is the level of the support of $h$, i.e. $supp\, h\in\mathcal{D}^{j(h)}$.
\end{theorem}
\begin{proof}
Since the sequence $(\alpha_l:l\in \mathbb{Z})$ belongs to $\ell^1(\mathbb{Z})$ and we can interchange orders of integration and summation
in order to compute $Th$. In fact,
\begin{equation*}
T h(x) = \int_{y\in\mathbb{R}^+} \varphi(\delta(x,y))h(y) dy
=\int_{y\in\mathbb{R}^+}\left(\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(\delta(x,y))\right) h(y) dy
= \sum_{l\in \mathbb{Z}}\alpha_l\left(2^{l}\int_{\{y: \delta(x,y)\leq 2^{-l}\}}h(y)dy\right).
\end{equation*}
Let us prove that
\begin{equation*}
\psi(x,l)=2^{l}\int_{\{y: \delta(x,y)\leq 2^{-l}\}}h(y) dy=\chi_{\{l>j(h)\}}(l) h(x).
\end{equation*}
If $x\notin I(h)$, since $\{y:\delta(x,y)\leq 2^l\}$ is the only dyadic interval $I_l^x$ containing $x$ of length $2^l$, only two situations are possible, $I_l^x\cap I(h)=\emptyset$ or $I_l^x\supset I(h)$, in both cases the integral vanish and $\psi(x,l)=0=\chi_{\{l<-j(h)\}}(l) h(x)$. Take now $x\in I(h)$. Assume first that $x\in I_l(h)$ (the left half of $I(h)$). So that $\psi(x,l)=2^{-l}\int_{I_l^x}h(y) dy=0$ if $l\leq j(h)$, since $I_l^x\supset I(h)$. When $l>j(h)$ we have that $h\equiv \abs{I(h)}^{-1/2}$ on $I_l^x$, hence $\psi(l,x)=2^{-l}\abs{I(h)}^{-1/2}\abs{I_l^x}=\abs{I(h)}^{-1/2}=h(x)$. In a similar way, for $x\in I_r(h)$, we get $\psi(l,x)=-\abs{I(h)}^{-1/2}=h(x)$.
\end{proof}
Notice that the eigenvalues $\lambda(h)$ tends to zero when the resolution $j(h)$ tends to infinity. Moreover this convergence is monotonic when all the $\alpha_l$ are nonnegative. Notice also that the eigenvalues depend only on the resolution level of $h$, but not on the position $k$ of its support. Sometimes we shall write $\lambda_j$, $j\in \mathbb{Z}$, instead of $\lambda(h)$ when $j$ is the scale of the support of $h$. With the above result, and using the fact that the Haar system $\mathscr{H}$ is an orthonormal basis for $L^2(\mathbb{R}^+)$, we see that, the action of $T$ on $L^2(\mathbb{R}^+)$ can be regarded as a multiplier operator on the scales.
\begin{lemma}
Let $K$ and $T$ as in Theorem~\ref{thm:autovalores}. The diagram
\begin{center}
\begin{tikzpicture}
\matrix (m) [matrix of math nodes,row sep=3em,column sep=4em,minimum width=2em]
{
L^2(\mathbb{R}^+) & \ell^2(\mathbb{Z}) \\
L^2(\mathbb{R}^+) & \ell^2(\mathbb{Z}) \\};
\path[-stealth]
(m-1-1) edge node [left] {$T$} (m-2-1)
edge node [below] {$H$} (m-1-2)
(m-2-1.east|-m-2-2) edge node [below] {$H$}
node [above] {} (m-2-2)
(m-1-2) edge node [right] {$M$} (m-2-2);
\end{tikzpicture}
\end{center}
commutes, where $H(f)=(\proin{f}{h}: h\in\mathscr{H})$ and $M(a_h:h\in\mathscr{H})=(\lambda(h)a_h:h\in\mathscr{H})$. In particular, $\norm{Tf}^2_2=\sum_{h\in\mathscr{H}}
\lambda^2(h)\abs{\proin{f}{h}}^2$.
\end{lemma}
The characterization of the space $L^p(\mathbb{R}^+)$ ($1<p<\infty$), Theorem~\ref{thm:characterizationLp} above, provides a similar result for the whole scale of Lebesgue spaces, $1<p<\infty$ with the only caveat that when $p\neq 2$ the norms are only equivalent. The next statement contains this observation.
\begin{theorem}\label{thm:op.Lp.haar}
With $K$ and $T$ as before and $1<p<\infty$ we have that
\begin{equation*}
\norm{Tf}_p\simeq\norm{\biggl(\sum_{h\in\mathscr{H}}(\lambda(h))^2\abs{\proin{f}{h}}^2\abs{I(h)}^{-1}\chi_{I(h)}\biggr)^{\tfrac{1}{2}}}_p
\end{equation*}
with constants which do not depend on $f$.
\end{theorem}
\begin{corollary}\label{coro:representationK}
For every $K\in\mathscr{K}$ and $(\lambda(h):h\in\mathscr{H})$ as in Theorem~\ref{thm:autovalores} we have the representation
\begin{equation*}
K(x,y)=\sum_{h\in\mathscr{H}}\lambda(h)h(x)h(y).
\end{equation*}
\end{corollary}
\begin{proof}
For $f=\sum_{h\in\mathscr{H}}\proin{f}{h}h$ with $\proin{f}{h}\neq 0$ only for finitely many Haar functions $h\in\mathscr{H}$, we have that
\begin{align*}
\int_{\mathbb{R}^+}K(x,y)f(y)dy=Tf(x)&=\sum_{h\in\mathscr{H}}\proin{f}{h}Th(x)\\
&=\sum_{h\in\mathscr{H}}\left(\int_{y\in\mathbb{R}^+}f(y)h(y)dy\right)\lambda(h)h(x)\\
&=\int_{y\in\mathbb{R}^+}\left(\sum_{h\in\mathscr{H}}\lambda(h)h(y)h(x)\right)f(y)dy.
\end{align*}
Since the space of such functions $f$ is dense in $L^2(\mathbb{R}^+)$ we have that $K(x,y)=\sum_h\lambda(h)h(x)h(y)$.
\end{proof}
\section{Stability of Markov kernels}\label{sec:stability}
In the case of the classical CLT the key properties of the distribution of the independent random variables $X_j$ are contained in the Gaussian central limit itself. Precisely, $(2\pi t)^{-1/2}e^{-\abs{x}^2/4t}$ is the distribution limit of $n^{-1/2}\sum_{j=1}^n X_j$ when $X_j$ are independent and are equi-distributed with variance $t$ and mean zero. Our ``gaussian'' is the Markov kernel $K_t(x,y)$ defined in $\mathbb{R}^+\times \mathbb{R}^+$ by applying Lemma~\ref{lemma:basiccharacterizationK} to the sequence $\Lambda_j=e^{-t2^{j}}$, $j\in \mathbb{Z}$ for fixed $t$. We may also use the Haar representation of $K_t(x,y)$ given by Corollary~\ref{coro:representationK} in \S~\ref{sec:spectralanalysis}. In this way we can write this family of kernels as $K_t(x,y)=\sum_{h\in\mathscr{H}}e^{-t2^{j(h)}}h(x)h(y)$. As we shall see, after obtaining estimates for the behavior of $K$ for large $\delta(x,y)$, this kernel has heavy tails. In particular, the analogous of the variance given by $\int_{y\in \mathbb{R}^+}K_t(x,y)\delta^2(x,y)dy$ is not finite. This kernel looks more as a dyadic version of Cauchy type distributions than of Gauss type distributions. Which is an agreement with the fact that $K_t$ solves a fractional differential equation and the natural processes are of Lévy type instead of Wiener Brownian. As a consequence, the classic moment conditions have to be substituted by stability type behavior at infinity.
\begin{lemma}\label{lemma:gaussianPsistability23}
Set for $r>0$
\begin{equation*}
\psi(r)=\frac{1}{r}\left(\sum_{j\geq 1}2^{-j}e^{-(2^jr)^{-1}}-e^{-r^{-1}}\right).
\end{equation*}
Then $\psi$ is well defined on $\mathbb{R}^+$ with values in $\mathbb{R}^+$. And
\begin{equation*}
r^{2}\psi(r)\to \frac{2}{3} \textrm{\quad as \quad} r\to\infty.
\end{equation*}
\end{lemma}
\begin{proof}
Since $e^{-(2^jr)^{-1}}$ is bounded above we see that $\psi(r)$ is finite for every $r>0$. On the other hand since
$\psi(r)=\tfrac{1}{r}\sum_{j\geq 1}2^{-j}[e^{-(2^jr)^{-1}}-e^{-r^{-1}}]$ and terms in brackets are positive we see that $\psi(r)>0$ for every $r>0$. Let us check the behavior of $\psi$ at infinity
\begin{equation*}
r^{2}\psi(r)=\sum_{j\geq 1}\frac{2^{-j}[e^{-(2^jr)^{-1}}-e^{-r^{-1}}]}{r^{-1}}\to \sum_{j\geq 1}2^{-j}(1-2^{-j})=\frac{2}{3}.
\end{equation*}
\end{proof}
\begin{lemma}\label{lemma:stability23}
Let $t>0$ be given. Set $\Lambda^{(t)}_j=e^{-t2^{j}}$, $j\in \mathbb{Z}$. Let $K_t(x,y)$ be the kernel that Lemma~\ref{lemma:basiccharacterizationK} associated to $\overline{\Lambda^{(t)}}$. Then $K_t\in\mathscr{K}$ and since $K_t(x,y)=\tfrac{1}{t}\psi(\tfrac{\delta(x,y)}{t})$, with $\psi$ as in Lemma~\ref{lemma:gaussianPsistability23}, we have
\begin{equation}\label{eq:propertystabilityone}
\delta(x,y)^{2}K_t(x,y)\to \frac{2}{3}\,t
\end{equation}
for $\delta(x,y)\to+\infty$.
\end{lemma}
\begin{proof}
Since $\Lambda^{(t)}_{j+1}<\Lambda^{(t)}_{j}$, for every $j\in \mathbb{Z}$, $\lim_{j\to-\infty}\Lambda^{(t)}_{j}=1$ and $\lim_{j\to +\infty}\Lambda^{(t)}_{j}=0$ we can use Lemma~\ref{lemma:basiccharacterizationK} in order to obtain the kernel $K_t(x,y)$. Now from Corollary~\ref{coro:representationK} we have that $K_t(x,y)=\sum_{h\in\mathscr{H}}e^{-t2^{j}}h(x)h(y)$. Let us check following the lines of \cite{AcAimFCAA}, that $K_t(x,y)=\tfrac{1}{t}\psi(\tfrac{\delta(x,y)}{t})$, with $\psi$ as in Lemma~\ref{lemma:gaussianPsistability23}. In fact, since $K_t(x,y)=\sum_{h\in\mathscr{H}}e^{-t\abs{I(h)}^{-1}}h(x)h(y)$, then a Haar function $h\in\mathscr{H}$ contributes to the sum when $x$ and $y$ both belong to $I(h)$. The smallest of such intervals, say $I_0=I(h^{(0)})$ is precisely the dyadic interval that determines $\delta(x,y)$. Precisely $\abs{I_0}=\delta(x,y)$. Let $h^{(1)}$ and $I_1=I(h^{(1)})$ be the wavelet and its dyadic support corresponding to one level less of resolution than that $I_0$ itself. In more familiar terms, $I_0$ is one of two son of $I_1$. In general, for each resolution level less than that of $I_0$ we find one and only one $I_i=I(h^{(i)})$ with $I_0\subset I_1\subset\ldots\subset I_i\subset\ldots$ and $\abs{I_i}=2^i\abs{I_0}$. We have to observe that except for $I_0$ where $x$ and $y$ must belong to different halves $I_{0,r}$ or $I_{0,l}$ of $I_0$, because of the minimality of $I_0$ for all the other $I_i$, $x$ and $y$ must belong to the same half $I_{i,l}$ or $I_{i,r}$ of $I_i$ because they are all dyadic intervals. These properties also show that $h^{(0)}(x)h^{(0)}(y)=-\abs{I_0}^{-1}=-\delta^{-1}(x,y)$ and, for $i\geq 1$, $h^{(i)}(x)h^{(i)}(y)=2^{-i}\abs{I_0}^{-1}=(2^i\delta(x,y))^{-1}$. Hence
\begin{align*}
K_t(x,y) &= -\frac{e^{-\tfrac{t}{\delta(x,y)}}}{\delta(x,y)}+\sum_{i\geq 1}e^{-\tfrac{t2^{-i}}{\delta(x,y)}}\frac{2^{-i}}{\delta(x,y)}\\
&= \frac{1}{\delta(x,y)}\left[\sum_{i\geq 1}2^{-i}e^{-\tfrac{t}{\delta(x,y)}2^{-i}}-e^{-\tfrac{t}{\delta(x,y)}}\right]\\
&= \frac{1}{t}\psi\left(\frac{\delta(x,y)}{t}\right).
\end{align*}
So that
\begin{equation*}
\delta(x,y)^{2}K_t(x,y)=\delta(x,y)^{2}\frac{1}{t}\psi\left(\frac{\delta(x,y)}{t}\right)
=t\left(\frac{\delta(x,y)}{t}\right)^{2}\psi\left(\frac{\delta(x,y)}{t}\right)
\end{equation*}
which from the result of Lemma~\ref{lemma:gaussianPsistability23} tends to $\tfrac{2}{3}$ when $\delta(x,y)\to +\infty$.
\end{proof}
Notice that from Lemma~\ref{lemma:deltaintegrability}-\textit{b.iv)} and the behavior at infinity of $K_t(x,y)$ provided in the previous result, we have
\begin{equation*}
\int_{R^+}K_t(x,y)\delta^2(x,y)dy=+\infty
\end{equation*}
for every $x\in \mathbb{R}^+$. Moreover, $\int_{R^+}K_t(x,y)\delta(x,y)dy=+\infty$. The adequate substitute for the property of finiteness of moments is provided by the stability involved in property \eqref{eq:propertystabilityone} in Lemma~\ref{lemma:stability23}. Since this property is going to be crucial in our main result we introduce formally the concept of stability. We say that a kernel $K$ in $\mathscr{K}$ is \textbf{\boldmath{$1$}-stable with parameter \boldmath{$\sigma>0$}} if
\begin{equation*}
\delta(x,y)^2 K(x,y)\to \sigma
\end{equation*}
for $\delta(x,y)\to\infty$. In the above limit, since the dimension of $\mathbb{R}^+$ with the metric $\delta$ equals one, we think $\delta^2$ as $\delta^{1+1}$, one for the dimension and the other for the order of stability.
Since for $K\in\mathscr{K}$ we have $K(x,y)=\varphi(\delta(x,y))$, the property of $1$-stability can be written as a condition for the behavior at infinity of profile $\varphi$. In particular, with the notation of Lemma~\ref{lemma:kerneldelta1}, the stability is equivalent to $4^jk_j\to\sigma$ as $j\to\infty$.
\section{Iteration and mollification in $\mathscr{K}$}\label{sec:iterationmollification}
As we have already observed in the introduction, the two basic operations on the identically distributed independent random variables $X_i$ in order to obtain the means that converge in distribution to the Central Limit, translate into iterated convolution and mollification. In this section, we shall be concerned with two operations, iteration and mollification on $\mathscr{K}$ and on the subfamily $\mathscr{K}^1$ of $1$-stable kernels in $\mathscr{K}$.
In the sequel, given a kernel $K$ in $\mathscr{K}$, $\bar{\Lambda}$, $\bar{\alpha}$ and $\bar{k}$ are the sequences defined on Lemma~\ref{lemma:kerneldelta1} associated to $K$. When a family of kernels in $\mathscr{K}$ is described by an index associated to $K$, say $K_i$, the corresponding sequences are denoted by $\bar{\Lambda}^i$, $\bar{\alpha}^i$ and $\bar{k}^i$.
\begin{lemma}
\begin{enumerate}[(a)]
\item For $K_1$ and $K_2\in\mathscr{K}$, the kernel
$$K_3(x,y)=(K_1\ast K_2)(x,y)=\int_{z\in \mathbb{R}^+}K_1(x,z)K_2(z,y)dz$$ is well defined; $K_3\in\mathscr{K}$ with
\begin{equation*}
\alpha^3_j=\alpha^1_j\lambda^2_j+\alpha^2_j\lambda^1_j+\alpha^1_j\alpha^2_j
\end{equation*}
for every $j\in \mathbb{Z}$;
\item $(\mathscr{K},\ast)$ and $(\mathscr{K}^1,\ast)$ are semigroups;
\item $\lambda^3_j=\lambda^1_j\lambda^2_j$ for every $j\in \mathbb{Z}$.
\end{enumerate}
\end{lemma}
\begin{proof}[Proof of (a)]
Let $K_i(x,y)=\varphi^i(\delta(x,y))$, $i=1,2$; with $\varphi^i(s)=\sum_{j\in \mathbb{Z}}\alpha^i_j\varphi_j(s)$, $\sum_{j\in \mathbb{Z}}\alpha^i_j=1$, $\sum_{j\in \mathbb{Z}}\abs{\alpha^i_j}<\infty$. Then, for $x\neq y$ both in $\mathbb{R}^+$. Set $I^*$ to denote the smallest dyadic interval containing $x$ and $y$. Then $\abs{I^*}=\delta(x,y)$ and $x$ and $y$ belong to different halves of $I^*$. From the above properties of the sequences $\bar{\alpha}^i$, $i=1,2$; we can interchange the orders of summation and integration in order to obtain
\begin{align*}
K_3(x,y) &= \int_{z\in \mathbb{R}^+}K_1(x,z)K_2(z,y)dz\\
&=\sum_{j\in \mathbb{Z}}\sum_{l\in \mathbb{Z}}2^i\alpha^1_j2^l\alpha^2_l\int_{z\in \mathbb{R}^+}\chi_{(0,2^{-j}]}(\delta(x,z))\chi_{(0,2^{-l}]}(\delta(z,y))dz\\
&=\sum_{j\in \mathbb{Z}}2^j\alpha^1_j\sum_{l\in \mathbb{Z}}2^l\alpha^2_l\abs{I^j_{k(x)}\cap I^l_{k(y)}}
\end{align*}
where $I^j_{k(x)}$ is the only dyadic interval in $\mathcal{D}^j$ such that $x\in I^j_{k(x)}$. Notice that the intersection of $I^j_{k(x)}$ and $I^l_{k(y)}$ is empty when $j$ and $l$ are both larger than the level $j^*$ of $I^*$. On the other hand, when $j$ or $l$ is smaller than or equal to $j^*$, the intersection is the smallest one. Say, if $j\leq j^*$ and $l>j$, $I^j_{k(x)}\cap I^l_{k(y)}=I^l_{k(y)}$.
With the above considerations we are now in position to compute $K_3(x,y)$ in terms of the sequences $\bar{\alpha}^i$ and $\bar{\lambda}^i$ as follows, with $c(j^*)=\{(j,l)\in \mathbb{Z}^2:j>j^* \textrm{ and } l>j^*\}$,
\begin{align*}
K_3(x,y) &= \sum\sum_{(j,l)\in \mathbb{Z}^2}2^{j+l}\alpha^1_j\alpha^2_l\abs{I^j_{k(x)}\cap I^l_{k(y)}}\\
&= \sum\sum_{\mathbb{Z}^2\setminus c(j^*)}2^{j+l}\alpha^1_j\alpha^2_l\abs{I^j_{k(x)}\cap I^l_{k(y)}}\\
&= \sum_{j\leq j^*}2^j\alpha^1_j\sum_{l>j}2^l\alpha^2_l\abs{I^l_{k(y)}}
+ \sum_{l\leq j^*}2^l\alpha^2_l\sum_{j>l}2^j\alpha^1_j\abs{I^j_{k(x)}}
+ \sum_{l\leq j^*}2^l\alpha^2_l2^l\alpha^1_l\abs{I^l_{k(y)}}\\
&= \sum_{j\leq j^*}2^j\alpha^1_j\lambda^2_j+\sum_{l\leq j^*}2^l\alpha^2_l\lambda^1_l+\sum_{l\leq j^*}2^l\alpha^1_l\alpha^2_l\\
&= \sum_{j\leq j^*}\left[\alpha^1_j\lambda^2_j+\alpha^2_j\lambda^1_j+\alpha^1_j\alpha^2_j\right]2^j\\
&= \sum_{j\in \mathbb{Z}}\left[\alpha^1_j\lambda^2_j+\alpha^2_j\lambda^1_j+\alpha^1_j\alpha^2_j\right]\varphi_j(\delta(x,y)).
\end{align*}
In other words, $K_3(x,y)=\varphi^3(\delta(x,y))$ with $\varphi^3(s)=\sum_{j\in \mathbb{Z}}\alpha^3_j\varphi_j(S)$
and $\alpha^3_j=\alpha^1_j\lambda^2_j+\alpha^2_j\lambda^1_j+\alpha^1_j\alpha^2_j$. Since, as it is easy to check by Tonelli's theorem $\int_{\mathbb{R}^+} K_3(x,y)dy=1$, we have that $K_3\in\mathscr{K}$.
\textit{Proof of (b).} We only have to show that if $K_1$ and $K_2$ are $1$-stable kernels in $\mathscr{K}$, then $K_3=K_1\ast K_2$ is also $1$-stable. As we observed at the end of Section~\ref{sec:stability} for $K_i$ $(i=1,2)$ we have $4^jk^i_j\to\sigma_i$ when $j\to+\infty$. We have to prove that $4^jk^3_j\to \sigma_1+\sigma_2$ when $j\to+\infty$. By Lemma~\ref{lemma:kerneldelta1}, item (4.b), we can write
\begin{align*}
4^jk^3_j &= 4^j\sum_{i\geq j}2^{-i}\alpha^3_{-i}\\
&= 4^j\sum_{i\geq j}2^{-i}[\alpha^1_{-i}\lambda^2_{-i}+\alpha^2_{-i}\lambda^1_{-i}+\alpha^1_{-i}\alpha^2_{-i}]\\
&= 4^j\sum_{i\geq j}(2^{-i}\alpha^1_{-i})\lambda^2_{-i}+4^j\sum_{i\geq j}(2^{-i}\alpha^2_{-i})\lambda^1_{-i}+
4^j\sum_{i\geq j}2^{-i}\alpha^1_{-i}\alpha^2_{-i}\\
&= I(j)+II(j)+III(j).
\end{align*}
We claim that $I(j)\to\sigma_1$, $II(j)\to\sigma_2$ and $III(j)\to 0$ when $j\to+\infty$. Let us prove that $I(j)\to\sigma_1$, $j\to +\infty$. Since
\begin{equation*}
\abs{I(j)-\sigma_1}\leq \abs{4^j\sum_{i\geq j}2^{-i}\alpha^1_{-i}(\lambda^2_{-i}-1)}+\abs{4^jk^1_j-\sigma_1}
\end{equation*}
from the fact that $K_1\in\mathscr{K}^1$ with parameter $\sigma_1$ and because of (5.d) in Lemma~\ref{lemma:kerneldelta1} we have that $I(j)\to\sigma_1$ as $j\to\infty$. The fact $II(j)\to\sigma_2$ follows the same pattern. Let us finally estimate $III(j)$. Notice that from (4.a) en Lemma~\ref{lemma:kerneldelta1} we have
\begin{align*}
\abs{III(j)}&\leq 4^j\sum_{i\geq j}2^{-i}\abs{\alpha^1_{-i}}\abs{\alpha^2_{-i}}\\
&\leq 4^j\left(\sum_{i\geq j}2^{-i}\abs{\alpha^1_{-i}}\right)\left(\sum_{l\geq j}\abs{\alpha^2_{-l}}\right)\\
&= 4^j\left(\sup_{i\geq j}2^{-i}\abs{\frac{k^1_i-k^1_{i+1}}{2^{-i}}}\right)\left(\sum_{l\geq j}\abs{\alpha^2_{-l}}\right)\\
&\leq 2\,4^j\sup_{i\geq j}k^1_i\left(\sum_{l\geq j}\abs{\alpha^2_{-l}}\right)\\
&= 2\,4^j k^1_{i(j)}\left(\sum_{l\geq j}\abs{\alpha^2_{-l}}\right),
\end{align*}
where, since $k_i\to 0$ when $j\to \infty$, $i(j)\geq j$ is the necessarily attained supremum of the $k_i$'s for $i\geq j$. So that $4^jk^1_{i(j)}=4^{j-i(j)}4^{i(j)}k^1_{i(j)}$ is bounded above because $K_1\in\mathscr{K}^1$. On the other hand, since $\bar{\alpha}^2\in l^1(\mathbb{Z})$ the tail $\sum_{l\geq j}\abs{\alpha^2_{-l}}$ tends to zero as $j\to\infty$.
\textit{Proof of (c).} Since each $K_i$, $i=1,2$, can be regarded as the kernel of the operator $T_if(x)=\int_{y\in \mathbb{R}^+}K_i(x,y)f(y)dy$, $K_3$ is the kernel of the composition of $T_1$ and $T_2$, we have that
\begin{equation*}
T_3h=(T_2\circ T_1)h=T_2(T_1h)=T_2(\lambda^1(h)h)=\lambda^1(h)T_2h=\lambda^1(h)\lambda^2(h)h.
\end{equation*}
So $\lambda^1$ and $\lambda^2$ depend only on the scale $j$ of $h$, so does $\lambda^3=\lambda^1\lambda^2$.
\end{proof}
\begin{corollary}
Let $K\in\mathscr{K}^1$ with parameter $\sigma$, then for $n$ positive integer the kernel $K^n$ obtained as the composition of $K$ $n$-times, i.e.,
\begin{equation*}\label{coro:compositonKntimes}
K^{(n)}(x,y)=\idotsint_{(\mathbb{R}^+)^{n-1}}K(x,y_1)\cdots K(y_{n-1},y)dy_1\cdots dy_{n-1}
\end{equation*}
belongs to $\mathscr{K}^1$ with parameter $n\sigma$ and eigenvalues $\lambda^{(n)}_j=(\lambda_j)^n$, $j\in \mathbb{Z}$, with $\lambda_j$ the eigenvalues of $K$.
\end{corollary}
Trying to keep the analogy with the classical CLT, the mollification operator, that we have to define, is expected to preserve $\mathscr{K}^1$ producing a contraction of the parameter $\sigma$ in order to counteract the dilation provided by the iteration procedure.
The first caveat that we have in our search for dilations is that, even when $\mathbb{R}^+$ is closed under (positive) dilations, the dyadic system is not. This means that usually $K(cx,cy)$ does not even belong to $\mathscr{K}$ when $K\in\mathscr{K}$ and $c>0$. Nevertheless, Lemma ~\ref{lemma:deltahomogeneity} in \S~\ref{sec:dyadycAnalysis} gives the answer. If $K(x,y)=\varphi(\delta(x,y))$ then $K_j(x,y)=2^jK(2^jx,2^jy)=2^jK(\delta(2^jx,2^jy))=2^j\varphi(2^j\delta(x,y))$ for every $j\in \mathbb{Z}$. Hence $K_j$ depends only on $\delta$. In the next lemma we summarize the elementary properties of this mollification operator.
\begin{lemma}\label{lemma:propertiesmollificationsK}
Let $K\in\mathscr{K}^1$ with parameter $\sigma$ be given. Then $K_j(x,y)=2^jK(2^jx,2^jy)$ belongs to $\mathscr{K}^1$ with parameter $2^{-j}\sigma$. Moreover, denoting with $\varphi^{(j)}$, $\bar{\alpha}^{j}=(\alpha^j_i: i\in \mathbb{Z})$ and $\bar{\lambda}^j=(\lambda^j_i: i\in \mathbb{Z})$ the corresponding functions and sequences for each $K_j$ we have that;
\begin{enumerate}[(a)]
\item $\varphi^{(j)}(s)=2^j\varphi(2^js)$, $j\in \mathbb{Z}$, $s>0$;
\item $\alpha^j_l=\alpha_{l-j}$, $j\in \mathbb{Z}$, $l\in \mathbb{Z}$;
\item $\lambda^j_l=\lambda_{l-j}$, $j\in \mathbb{Z}$, $l\in \mathbb{Z}$.
\end{enumerate}
\end{lemma}
\begin{proof}
From the considerations above, it is clear that $K_j\in\mathscr{K}$. Now, for $j\in \mathbb{Z}$ fixed,
\begin{equation*}
\delta(x,y)^2K_j(x,y)=\delta(x,y)^2 2^j K(2^jx,2^jy)=2^{-j}\delta(2^jx,2^jy)^2K(2^jx,2^jy)
\end{equation*}
which tends to $2^{-j}\sigma$ when $\delta(x,y)\to\infty$. Property (a) is clear. Property (b) follows from (a);
\begin{align*}
\varphi^{(j)}(s)=2^j\varphi(2^js)=2^j\sum_{l\in \mathbb{Z}}\alpha_l\varphi_l(2^js)=\sum_{l\in \mathbb{Z}}\alpha_l\varphi_{l+j}(s)=\sum_{l\in \mathbb{Z}}\alpha_{l-j}\varphi_l(s).
\end{align*}
Hence $\alpha^j_l=\alpha_{l-j}$. Finally (c) follows from (b) and (4.c) in Lemma~\ref{lemma:kerneldelta1}.
\end{proof}
Corollary~\ref{coro:compositonKntimes} and Lemma~\ref{lemma:propertiesmollificationsK} show that for $K\in\mathscr{K}^1$ with parameter $\sigma$ if we iterate $K$, $2^i$-times ($i$ a positive integer) to obtain $K^{(2^i)}$ and then we mollify this kernel by a scale $2^i$, the new kernel $M^i$ belongs to $\mathscr{K}^1$ with parameter $\sigma$. Notice also that iteration and mollification commute, so that $M^i$ can be also seen as the $2^i$-th iteration of the $2^i$ mollification of $K$. Let us gather in the next statement the basic properties of $M^i$ that shall be used later, and follows from Corollary~\ref{coro:compositonKntimes} and Lemma~\ref{lemma:propertiesmollificationsK}.
\begin{lemma}
Let $K\in\mathscr{K}^1$ with parameter $\sigma$ and let $i$ be a positive integer. Then, the kernel $M^i\in\mathscr{K}^1$ with parameter $\sigma$ and $\lambda^i_j=\lambda^{2^i}_{j-i}$.
\end{lemma}
\section{The main result}\label{sec:mainresult}
We are in position to state and prove the main result of this paper. In order to avoid a notational overload in the next statement, we shall use the notation introduced in the above sections.
\begin{theorem}\label{thm:mainresult}
Let $K$ be in $\mathscr{K}^1$ with parameter $\tfrac{2}{3}t>0$. Then
\begin{enumerate}[(a)]
\item the eigenvalues of $M^i$ converge to the eigenvalues of the kernel in \eqref{eq:NucleoHaarDifusiones} when $i\to+\infty$, precisely
\begin{equation*}
\lambda^{2^i}_{j-i}\to e^{-t2^j}, \textrm{ when } i\to\infty;
\end{equation*}
\item for $1<p<\infty$ and $u_0\in L^p(\mathbb{R}^+)$, the functions $v_i(x)=\int_{\mathbb{R}^+}M^i(x,y)u_0(y) dy$ converge in the $L^p(\mathbb{R}^+)$ sense to the solution $u(x,t)$ of the problem
\begin{equation*}
(P) \left
\{\begin{array}{ll}
\frac{\partial u}{\partial t}=D^{1} u,\, & x\in\mathbb{R}^{+}, t>0;\\
u(x,0)=u_0(x),\, & x\in \mathbb{R}^+.
\end{array}
\right.
\end{equation*}
for the precise value of $t$ for which the initial kernel $K$ is $1$-stable with parameter $\tfrac{2}{3}t$.
\end{enumerate}
\end{theorem}
\begin{proof}[Proof of (a)]
Since $K\in\mathscr{K}^1$ with parameter $\tfrac{2}{3}t>0$, which means that $k_m4^m\to\tfrac{2}{3}t$ as $m$ tends to infinity we have both that $k_m2^m\to 0$ when $m\to\infty$ and that $\sum_{l<m}k_l2^{l-1}<1$ for every positive integer $m$. Since, on the other hand $\sum_{l\in \mathbb{Z}}k_l2^{l-1}=1$, we have for $j\in \mathbb{Z}$ fixed and $i$ a large nonnegative integer that
\begin{equation*}
0<\sum_{l<i-j}k_l2^{l-1}- \frac{k_{i-j}2^{i-j}}{2}<1.
\end{equation*}
Hence, from Lemma~\ref{lemma:propertiesmollificationsK} and Lemma~\ref{lemma:kerneldelta1}, the $j$-th scale eigenvalues of the operator induced by the kernel $M^i$ ar given by
\begin{align*}
\lambda^{2^i}_{j-i}&=\left[\frac{1}{2}\left(\sum_{l<i-j}k_l2^l-k_{i-j}2^{i-j}\right)\right]^{2^i}\\
&=\left[\sum_{l<i-j}k_l2^{l-1}-k_{i-j}\frac{2^{i-j}}{2}\right]^{2^i}\\
&=\left[1-\left(\sum_{l\geq i-j}k_l2^{l-1}+\frac{k_{i-j}4^{i-j}}{2}\frac{2^j}{2^i}\right)\right]^{2^i}\\
&= \left[1-\gamma(i,j)\frac{2^j}{2^i}\right]^{2^i},
\end{align*}
with $\gamma(i,j)=2^{i-j}\sum_{l\geq i-j}k_l2^{l-1}+\frac{k_{i-j}4^{i-j}}{2}$. Notice that
\begin{equation*}
\gamma(i,j)=2^{i-j}\sum_{l\geq i-j}2^{-l-1}(k_l4^l)+\frac{k_{i-j}4^{i-j}}{2}=\sum_{m=0}^{\infty}2^{-m-1}(k_{i+m-j}4^{i+m-j})+\frac{k_{i-j}4^{i-j}}{2},
\end{equation*}
which tends to $t>0$ when $i\to\infty$. With these remarks we can write
\begin{equation*}
\lambda^{2^i}_{j-i}=\left(\left[1-\frac{\gamma(i,j)2^j}{2^i}\right]^{\tfrac{2^i}{\gamma(i,j)2^j}}\right)^{\gamma(i,j)2^j}
\end{equation*}
which tends to $e^{-t2^j}$ when $i$ tends to infinity.
\textit{Proof of (b).}
The function $v_i(x)-u(x,t)$ can be seen as the difference of two operators $T_i$ and $T^t_{\infty}$ acting on the initial condition,
\begin{equation*}
v_i(x)=T_iu_0(x)=\int_{y\in \mathbb{R}^+}M^i(x,y)u_0(y) dy
\end{equation*}
and
\begin{equation*}
u(x,t)=T^t_{\infty}u_0(x)=\int_{y\in \mathbb{R}^+}K(x,y;t)u_0(y)dy.
\end{equation*}
Since the eigenvalues of $T_i-T^t_\infty$ are given by $\lambda^{2^i}_{j(h)-i}-e^{-t2^{j(h)}}$, for each $h\in\mathscr{H}$, from Theorem~\ref{thm:op.Lp.haar} in Section~\ref{sec:spectralanalysis} we have
\begin{equation*}
\norm{v_i-u(\cdot,t)}_{L_p(\mathbb{R}^+)}\leq C_1\biggl\|\biggl(\sum_{h\in\mathscr{H}}\abs{\lambda^{2^i}_{j(h)-i}-e^{-t2^{j(h)}}}^2\abs{\proin{u_0}{h}}^2
\abs{I(h)}^{-1}\chi_{I(h)}(\cdot)\biggr)^{1/2}\biggr\|_{L_p(\mathbb{R}^+)}.
\end{equation*}
From (5.g) and (5.h) in Lemma~\ref{lemma:kerneldelta1} we have that the sequence $\lambda^{2^i}_{j(h)-i}$ is uniformly bounded. On the other hand, since $\norm{\bigl(\sum_{h\in\mathscr{H}}\abs{\proin{u_0}{h}}^2
{\abs{I(h)}}^{-1}\chi_{I(h)}(\cdot)\bigr)^{1/2}}_{L_p(\mathbb{R}^+)}\leq C_2\norm{u_0}_{L^p(\mathbb{R}^+)}<\infty$, we can take the limit for $i\to+\infty$ inside the $L^p$-norm and the series in order to get that $\norm{v_i-u(\cdot,t)}_{L_p(\mathbb{R}^+)}\to 0$ when $i\to+\infty$.
\end{proof}
\def$'${$'$}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\providecommand{\MR}{\relax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
\noindent{\footnotesize
\textsc{Instituto de Matem\'atica Aplicada del Litoral, UNL, CONICET}
\noindent\textmd{CCT CONICET Santa Fe, Predio ``Dr. Alberto Cassano'', Colectora Ruta Nac.~168 km 0, Paraje El Pozo, S3007ABA Santa Fe, Argentina.}
}
\end{document} |
\begin{document}
\selectlanguage{english}
\mathbb{A}ketitle
\begin{abstract}
In this small note we are concerned with the solution of Forward-Backward Stochastic Differential Equations (FBSDE) with drivers that grow quadratically in the control component (quadratic growth FBSDE or qgFBSDE). The main theorem is a comparison result that allows comparing componentwise the signs of the control processes of two different qgFBSDE. As a byproduct one obtains conditions that allow establishing the positivity of the control process.
\end{abstract}
{\bf 2010 AMS subject classifications:}
Primary: 60H30.
Secondary: 60H07, 60J60.\\
{\bf Key words and phrases:} BSDE, forward-backward SDE, quadratic
growth, comparison, positivity, stochastic calculus of variations, Malliavin calculus, Feynman-Kac formula.
\section{Introduction}
This small note is concerned with forward-backward stochastic differential equations (BSDEs) in the Brownian framework, i.e. equations following, for some measurable functions $b$, $\sigma$, $f$ and $g$, the dynamics
\begin{align*}
X_s^{t,x}&=x+\int_t^s b(r,X^{t,x}_r)\mathrm{d} r+\int_t^s \sigma(r,X^{t,x}_r)\mathrm{d} W_r,\\
Y^{t,x}_s &=g(X^{t,x}_T) +\int_s^T f(r,X^{t,x}_r,Y^{t,x}_r,Z^{t,x}_r)\mathrm{d} s-\int_t^T Z^{t,x}_r\mathrm{d} W_r,
\end{align*}
where $W$ a $d$-dimensional Brownian motion, $(t,x)\in[0,T]{t_{i}}mes\mathbb{R}^m$ and $s\in[t,T]$. The function $f$ is called generator or driver while $g$ is named the terminal condition function. The solution of the FBSDE is the triple of adapted processes $(X,Y,Z)$; $Z$ is called the control process.
In the last 30 years much attention has been given to this type of equations due
to their importance in the fields of optimal control and finance. The standard
theory of FBSDE is formulated under the canonical Lipschitz assumption (see for
example \cite{EPQ} and references), but in many financial problems drivers $f$
which have quadratic growth in the control component appear i.e.~when $f$
satisfies a growth condition of the type $|f(t,x,y,z)|\leq C(1+|y|+|z|^2)$. The
particular relation between FBSDE with drivers of quadratic growth in the
control component (qgFBSDE) and the field of finance, stochastic control and
parabolic PDE can be illustrated by the works \cite{HIM2005}, \cite{HPdR10},
\cite{EPQ} and references therein.
One of the fundamental results in BSDE or FBSDE theory is the so called
comparison theorem that allows one to compare the $Y$ components of of the
solution of two BSDEs. In rough, given a terminal condition function $g^i$, a
driver $f^i$ and the corresponding FBSDE solution $(X,Y^i,Z^i)$ for
$i\in\{1,2\}$, if $g^1$ dominates $g^2$ and $f^1$ dominates $f^2$ in some sense
then this order relation is expected to carry over to the $Y$ components, i.e.
$Y^1$ dominates $Y^2$ in some sense.
Such a result is however not possible for the control components $Z^i$. In this short note we give a type of comparison result for the control components $Z$, a so called comonotonicity result. This result allows one to compare the signs of the control processes $Z^1$ and $Z^2$ componentwise and as a side product one finds sufficient conditions to establish the positivity of the control process for a single FBSDE.
This type of results can be useful in several situations, for instance in the numerics for such equations, since they allow to establish a priori heuristics that can improve the quality of the numerical approximation. This point of view is pertinent as the applications of FBSDE extend to the field of fluid mechanics (see \cite{freidosreis2011}).
A possible application of the results presented in this note lies in the problematic of showing the existence (and smoothness) of marginal laws of $Y$ which are absolutely continuous with respect to the Lebesgue measure. This type of analysis involves showing the strict positivity of the Malliavin variance (in rough the $Z$ component) of the solution of the FBSDE, (see e.g. \cite{MR2134722}). The results in \cite{MR2134722} were established for FBSDE whose driver function satisfies a standard Lipschitz condition in its spatial components and it is not possible to adapt the proof to cover the qgFBSDE setting of this work.
From another point of view, the comonotonicity result is an interesting result in the context of economic models of equilibrium pricing when analyzed in the qgFBSDE framework. In such framework the equilibrium market price of risk can be characterized in terms of the control process of the solution to a qgFBSDE. The difficulty is that the individual optimization problems underlying the characterization of the equilibrium requires the equilibrium volatility (the $Z$ component of the solution to a certain qgFBSDE) to satisfy an exponential integrability condition as well as a positivity condition. Since the results of \cite{MR2134722} cannot be applied or adapted to the qgFBSDE setting, the comonotonicity result presented here (and its corollary) provides conditions that ensure the positivity of the relevant process and hence may prove to be very useful in equilibrium analysis. An example of such type of problems can be found for example in \cite{HPdR10}.
The results of this work originate in \cite{05CKW} where the authors give a comonotonicity result for FBSDE satisfying a standard Lipschitz condition and where the driver function is independent of the diffusion process $X$. In \cite{reis2011} the author extended the results of \cite{05CKW} to the qgFBSDE setting but was not able to include the dependence on $X$ in the driver. The dependence of $f$ in $X$ is something that is quite common in the financial framework and that makes the applicability of \cite{reis2011} limited. This short note presents a full generalization of the results of \cite{reis2011} where the driver is now allowed to depend on $X$, this makes the conditions and analysis more involved but makes the result general enough that it can now be ``broadly'' applied to the standard financial setting where the driver $f$ almost always depends on the underlying diffusion $X$.
The note is organized as follows: In Section 2 we introduce some notation and recall some known results. The main results are then stated and proved in Section 3.
\section{Preliminaries}
Throughout fix $T>0$. We work on a canonical Wiener space $(\Omega, \mathcal{F}, \mathbb{P})$ carrying a $d$-dimensional Wiener process $W = (W^1,\cdots, W^d)$ restricted to the time interval $[0,T]$ and we denote by $\mathcal{F}=(\mathcal{F}_t)_{t\in[0,T]}$ its natural filtration enlarged in the usual way by the $\mathbb{P}$-zero sets.
Let $p\geq 2$, then we denote by $\mathcal{S}^p(\mathbb{R}^m)$ the space of all measurable processes $(Y_t)_{t\in[0,T]}$ with values in $\mathbb{R}^m$ normed by $\| Y \|_{\mathcal{S}^p} = \mathbb{E}[\sup_{t \in [0,T]}|Y_t|^p ]^{{1}/{p}}$ and by $\mathcal{S}^\infty(\mathbb{R}^m)$ its subspace of bounded measurable processes. We also denote by $\mathcal{H}^p(\mathbb{R}^m)$ the space of all progressively measurable processes $(Z_t)_{t\in[0,T]}$ with values in $\mathbb{R}^m$ normed by $\|Z\|_{\mathcal{H}^p} = \mathbb{E}[\big( \int_0^T |Z_s|^2 \mathrm{d} s \big)^{p/2} ]^{{1}/{p}}$.
For vectors $x = (x^1,\cdots, x^m)\in \mathbb{R}^m$ we write $|x| = (\sum_{i=1}^m (x^i)^2)^{\frac{1}{2}}$. $\nabla$ denotes the canonical gradient operator and for a function $h(x,y):\mathbb{R}^m{t_{i}}mes\mathbb{R}^d\to \mathbb{R}$ we write $\nabla_x h$ or $\nabla_y h$ to refer to the first derivatives with relation to $x$ and $y$ respectively.
We work with decoupled systems of forward and backward stochastic differential equations (FBSDE) for $(t,x)\in[0,T]{t_{i}}mes\mathbb{R}^m$ and $s\in[t,T]$
\begin{align}
\label{sde}
X_s^{t,x}&=x+\int_t^s b(r,X^{t,x}_r)\mathrm{d} r+\int_t^s \sigma(r,X^{t,x}_r)\mathrm{d} W_r,\\
\label{bsde}
Y^{t,x}_s &=g(X^{t,x}_T) +\int_s^T f(r,X^{t,x}_r,Y^{t,x}_r,Z^{t,x}_r)\mathrm{d} s-\int_t^T Z^{t,x}_r\mathrm{d} W_r,
\end{align}
for some measurable functions $b$, $\sigma$, $g$ and $f$.
We now state our assumptions.
\begin{assump}\label{H1}
The function $b:[0,T]{t_{i}}mes\mathbb{R}^m\to \mathbb{R}^m$ and $\sigma:[0,T]{t_{i}}mes\mathbb{R}^m\to \mathbb{R}^{m{t_{i}}mes d}$ are continuously differentiable in space with derivatives uniformly bounded by a constant $K$ and are $\frac12$-H\"older continuous in time. $\sigma$ is uniformly elliptic and $|b(\cdot,0)|$ and $|\sigma(\cdot,0)|$ are uniformly bounded.
$g:\mathbb{R}^m\to\mathbb{R}$ is bounded, continuously differentiable with bounded derivatives. $f$ is a continuously differentiable function in space, uniformly continuous in the time variable and satisfies for some $M>0$ for all $(t,x,y,z)\in[0,T]{t_{i}}mes \mathbb{R}^m{t_{i}}mes \mathbb{R}{t_{i}}mes \mathbb{R}^d$, $|f(t,x,y,z)|\leq M (1+|y|+|z|^2)$ as well as
\begin{align*}
|\nabla_x f(t,x,y,z)|\leq M (1+|y|+|z|^2),\quad
|\nabla_y f(t,x,y,z)|\leq M, \quad
|\nabla_z f(t,x,y,z)|\leq M (1+|z|).
\end{align*}
\end{assump}
\begin{assump}
\label{H2}
The spatial derivatives $\nabla b$, $\nabla \sigma$ and $\nabla g$ satisfy a standard Lipschitz condition in their spatial variables with Lipschitz constant $K$.
$\nabla_y f$ satisfies a standard Lipschitz condition with Lipschitz constant $K$ and
for all $t\in[0,T]$, $x,x'\in\mathbb{R}^m$, $y,y'\in\mathbb{R}$ and $z,z'\in\mathbb{R}^d$ it holds that
\begin{align*}
&|\nabla_x f(t,x,y,z)-\nabla_x f(t,x',y',z')|
\\
&\hspace{1cm}
\leq K\big(1+|z|+|z'|\big)\big\{
(1+|z|+|z'|\big)|x-x'|+|y-y'|+|z-z'|\big\},\\
&|\nabla_z f(t,x,y,z)-\nabla_z f(t,x',y',z')|
\\
&\hspace{1cm}
\leq K \big\{(1+|z|+|z'|) |x-x'|+|y-y'|+|z-z'|\big\},
\end{align*}
\end{assump}
The next theorem compiles several results found throughout \cite{AIdR07}, \cite{IdR2010} and \cite{reis2011}.
\begin{theo}\label{compilationtheorem}
Let Assumption \ref{H1} hold then for any $p\geq 2$ and $(t,x)\in[0,T]{t_{i}}mes\mathbb{R}$ there exists a unique solution $\Theta^{t,x}=(X^{t,x},Y^{t,x},Z^{t,x})$ of FBSDE \eqref{sde}-\eqref{bsde} in the space $\mathcal{S}^p{t_{i}}mes\mathcal{S}^\infty{t_{i}}mes\mathcal{H}^p$ and\footnote{BMO refers to the class of Bounded mean oscillation martingales, see \cite{IdR2010} or \cite{kazamaki} for more details.} $\int_0^\cdot Z\mathrm{d} W \in BMO$.
The variational process of $\Theta^{t,x}$ exists and satisfies for $s\in[t,T]$
\begin{align}
\label{nablasde}
\nabla_x X_s^{t,x}&=I_d+\int_t^s \nabla_x b(r,X^{t,x}_r)\nabla_x X^{t,x}_r\mathrm{d} r+\int_t^s \nabla_x\sigma(r,X^{t,x}_r)\nabla_x X^{t,x}_r\mathrm{d} W_r,\\
\label{nablabsde}
\nabla_x Y^{t,x}_s &=\nabla_x g(X^{t,x}_T)\nabla_x X_T^{t,x} +\int_s^T \langle (\nabla f)(r,\Theta^{t,x}_r),\nabla_x \Theta^{t,x}_r \rangle\mathrm{d} s-\int_t^T \nabla_x Z^{t,x}_r\mathrm{d} W_r.
\end{align}
The triple $\Theta^{t,x}$ is Malliavin differentiable and its Malliavin
derivatives are given by $D \Theta^{t,x} = (D X^{t,x},DY^{t,x},DZ^{t,x})$. The
process $(Z_s^{t,x})_{s\in[t,T]}$ has continuous paths, $Z^{t,x}\in\mathcal{S}^p$ and
for $0\leq t\leq u\leq s\leq T$ the following representation holds
\begin{align}
\label{representation}
D_s Y^{t,x}_s = Z^{t,x}_s,\ \mathbb{P}\text{-}a.s. \quad \textrm{ and } \quad D_u Y^{t,x}_s = \nabla_x Y^{t,x}_s (\nabla_x X^{t,x}_u)^{-1} \sigma(u,X^{t,x}_u),\ \mathbb{P}\text{-}a.s.
\end{align}
There exists a continuous function $u:[0,T]{t_{i}}mes\mathbb{R}^m\to\mathbb{R}$ such that for all $(t,x)\in[0,T]{t_{i}}mes \mathbb{R}^m$ and $s\in[t,T]$ it holds that $Y^{t,x}_s=u(s,X_s^{t,x})$ $\mathbb{P}$-a.s..
Under Assumption \ref{H2} the function $u$ is continuously differentiable in its spatial variables and $Z^{t,x}_s=(\nabla_x u)(s,X_s^{t,x})\sigma(s,X^{t,x}_s)$ $\mathbb{P}$-a.s. for all $0\leq t\leq s\leq T$ and $x\in\mathbb{R}^m$.
\end{theo}
\begin{proof}
Existence and uniqueness of the solution is quite standard either for the SDE (e.g. \cite{Protter2005}) or for the BSDE (see e.g. Theorem 1.2.12 and Lemma 1.2.13 in \cite{reis2011}).
The variational differentiability and representation formulas as well as the path continuity of $Z$ follow from Theorems 2.8, 2.9 and 5.2 in \cite{IdR2010} (or Theorems 3.1.9, 3.2.4 and 4.3.2 of \cite{reis2011}). We emphasize that due to the continuity of the involved processes, the representation formulas \eqref{representation} hold $\mathbb{P}$-a.s. for all $t\in[0,T]$ and not just $\mathbb{P}\otimes \textrm{Leb}$-a.a.
Lastly, the Markov property of the $Y$ process is rather standard (see Theorem
4.1.1 of \cite{reis2011}). The differentiability assumptions on the driver and
terminal condition function (Assumption \ref{H2}) ensure that the function $u$
is continuously differentiable in the spatial variables. A detailed proof of
this can be found either in Theorem 7.7 in \cite{10AIdR} or Theorem 4.1.2 in
\cite{reis2011}.
\end{proof}
\section{A comonotonicity result for quadratic FBSDE}
In this section we work with a $d$-dimensional Brownian motion $W$ on the time interval $[0,T]$ for some positive finite $T$. Throughout let $(t,x)\in [0,T]{t_{i}}mes \mathbb{R}^m$. Our standing assumption for this section is as follows.
\begin{assump}\label{H}
Let Assumptions \ref{H1} and \ref{H2} hold. Assume that $m=1$ and $d\geq 1$
\end{assump}
\begin{remark}
\label{caseofmdiff1}
We note that it is possible to write the results of this section for multidimensional SDE systems (i.e.~when $m\geq 1$) under the assumption that $\sigma$ is a square diagonal matrix and the system of forward equations is fully decoupled. There are many applications where such an assumption takes place (e.g. \cite{HPdR10}). We write these result with $m=1$ to simplify the presentation of this short note.
\end{remark}
For each $i\in\{1,2\}$ we define the SDE \eqref{sde} with $b_i$ and $\sigma_i$ and BSDE \eqref{bsde} with terminal condition and driver given by $g_i$ and $f_i$. We denote the respective solution of the system by $(X^{t,x,i}_s,Y^{t,x,i}_s,Z^{t,x,i}_s)_{s\in[t,T]}$ valued in $\mathbb{R}{t_{i}}mes\mathbb{R}{t_{i}}mes\mathbb{R}^d$ for $(t,x,i)\in[0,T]{t_{i}}mes\mathbb{R}{t_{i}}mes\{1,2\}$.
We define the vector-product operator, ``$\odot$'', as $\odot:\mathbb{R}^d{t_{i}}mes\mathbb{R}^d\to\mathbb{R}^d$ such that
\begin{align}\label{odotoperator}
a\odot b= (a_1b_1, \ldots, a_d b_d),\qquad \textrm{for any } a=(a_1,\cdots,a_d),b=(b_1,\cdots,b_d)\in \mathbb{R}^d.
\end{align}
With the convention that $a\odot b\geq 0$ means that for each $i\in\{1,\ldots,d\}$, $a_i b_i\geq 0$.
The aim of this section is to explore conditions such the following statement holds
\[Z^{t,x,1}_s \odot Z_s^{t,x,2} \geq 0,\quad
\mathbb{P}\text{-a.s.},\quad \textrm{for any } (t,x)\in[0,T]{t_{i}}mes\mathbb{R}\textrm{ and }s\in[t,T].
\]
\begin{defi}[Comonotonic functions]
We say that two measurable functions $g,h:\mathbb{R}\to\mathbb{R}$ are comonotonic if they are \emph{monotone} and \emph{have the same type of monotonicity}, i.e.~if $g$ is increasing or decreasing then $h$ is also increasing or decreasing respectively. We say that $g$ and $h$ are strictly comonotonic if they are comonotonic and strictly monotonic.
\end{defi}
We now state our main theorem.
\begin{theo}\label{comono-theo-1}
Let Assumption \ref{H} hold and for $(t,x)\in[0,T]{t_{i}}mes\mathbb{R}$ define $(X^{t,x,i},Y^{t,x,i},Z^{t,x,i})$ as the unique solution of FBSDE (\ref{sde})-(\ref{bsde}) for $i\in\{1,2\}$. Suppose that $x\mathbb{A}psto g_i(x)$ and $x\mathbb{A}psto f_i(\cdot,x,\cdot,\cdot)$ are comonotonic for all $i\in\{1,2\}$ and further, that $g_1,g_2$ are also comonotonic\footnote{This implies that $x\mathbb{A}psto f_1(\cdot,x,\cdot,\cdot)$ and $x\mathbb{A}psto f_2(\cdot,x,\cdot,\cdot)$ are comonotonic as well.}. If it holds for all $s\in[t,T]$ that
\begin{align}
\label{sigmaineq}
\sigma_1(s,X^{t,x,1}_s)\odot \sigma_2(s,X^{t,x,2}_s)\geq 0,\quad \mathbb{P}\text{-}a.s.,
\end{align}
then
\begin{align}
\label{eq:ZZineq}
Z^{t,x,1}_s \odot Z^{t,x,2}_s \geq 0,\quad
\mathbb{P}\text{-}a.s.,\quad \textrm{for any } (t,x)\in[0,T]{t_{i}}mes\mathbb{R}\textrm{ and }s\in[t,T].
\end{align}
If $g_1,\,g_2$ are strictly comonotonic and inequality \eqref{sigmaineq} holds strictly then \eqref{eq:ZZineq} is also strict.
\end{theo}
\begin{proof}
Throughout take $t\in[0,T]$, $x\in\mathbb{R}$ and let $i\in\{1,2\}$. According to Theorem \ref{compilationtheorem}, for each $i\in\{1,2\}$ there exits a measurable deterministic, continuously differentiable function (in its spatial variables) $u_i:[0,T]{t_{i}}mes \mathbb{R}\to \mathbb{R}$ such that $Y_s^{t,x,i}=u_i(s,X_s^{t,x,i})$ and $Z_s^{t,x,i}= (\nabla_x u_i) (s,X_s^{t,x,i})\sigma(s,X_s^{t,x,i})$ $\mathbb{P}$-a.s. We have then $\mathbb{P}$-a.s. that for any $s\in[t,T]$ (recall that $\sigma_i$ is a vector and $\nabla u_i$ a scalar)
\begin{align}
\nonumber
Z^{t,x,1}_s\odot Z^{t,x,2}_s &= \Big( (\nabla_x u_1)(s,X^{t,x,1}_s)\ \sigma_1(s,X^{t,x,1}_s) \Big) \odot \Big( (\nabla_x u_2) (s,X^{t,x,2}_s)\ \sigma_2(s,X^{t,x,2}_s)\Big) \\
\label{comono-zodotz}
&= \Big(\sigma_1(s,X^{t,x,1}_s)\odot \sigma_2(s,X^{t,x,2}_s)\Big) (\nabla_x u_1)(s,X^{t,x,1}_s) (\nabla_x u)(s,X^{t,x,2}_s).
\end{align}
A standard comparison theorem for SDEs (see \cite{Protter2005}) yields that for any fixed $t$ and $T$ the mappings $x\mathbb{A}psto X^{t,x,i}_T$ are increasing. This, along with the fact that $g_1$ and $g_2$ are comonotonic functions, implies that for fixed $t$ and $T$ it holds that $x\mathbb{A}psto g_1(X^{t,x,1}_T)$ and $x\mathbb{A}psto g_2(X^{t,x,2}_T)$ are a.s.~comonotonic. A similar argument implies the same conclusion for the drivers $f_i$, i.e.~$x\mathbb{A}psto f_1(\cdot,X^{t,x,1}_\cdot,\cdot,\cdot)$ and $x\mathbb{A}psto f_2(\cdot,X^{t,x,2}_\cdot,\cdot,\cdot)$ are a.s. comonotonic.
Using the comparison theorem for quadratic BSDE (see e.g.~Theorem 2.6 in \cite{00Kob}) and the monotonicity (and comonotonicity) of $x\mathbb{A}psto g_i(X^{t,x,i}_T)$ and $x\mathbb{A}psto f_i(\cdot,X^{t,x,i}_\cdot,\cdot,\cdot)$ we can conclude that $x\mathbb{A}psto Y^{t,x,i}$ is also a.s.~monotone. Furthermore, since $x\mathbb{A}psto g_1(X^{t,x,1}_T)$, $x\mathbb{A}psto g_2(X^{t,x,2}_T)$, $x\mathbb{A}psto f_1(\cdot,X^{t,x,1}\cdot,\cdot,\cdot)$ and $x\mathbb{A}psto f_2(\cdot,X^{t,x,2}_\cdot,\cdot,\cdot)$ are comonotonic the same comparison theorem yields that the mappings $x\mathbb{A}psto Y^{t,x,1}$ and $x\mathbb{A}psto Y^{t,x,2}$ are also a.s.~comonotonic. Equivalently, one can write for any $(t,x)\in[0,T]{t_{i}}mes \mathbb{R}$ that (notice that $\nabla u$ exists according to Theorem \ref{compilationtheorem})
\begin{align}
\label{aux-for-strict}
\big\langle (\nabla_x u_1)(t,x), (\nabla_x u_2) (t,x)\big\rangle \geq 0.
\end{align}
Therefore, combining \eqref{aux-for-strict} with \eqref{sigmaineq} in (\ref{comono-zodotz}) we easily obtain
\[Z^{t,x,1}_s({\omega}ega)\odot Z^{t,x,2}_s({\omega}ega)\geq 0, \quad \mathbb{P}\text{-}a.s.\ {\omega}ega\in \Omega,\ (t,x)\in[0,T]{t_{i}}mes\mathbb{R},\quad s\in[t,T].
\]
Under the assumption that $g_1$ and $g_2$ are strictly comonotonic it is clear that inequality \eqref{aux-for-strict} is also strict. Furthermore, if one also assumes that the inequality in \eqref{sigmaineq} holds strictly for any $(t,x)\in[0,T]{t_{i}}mes\mathbb{R}$ then \eqref{eq:ZZineq} also holds strictly.
\end{proof}
Unfortunately it doesn't seem possible to weaken the assumptions of the previous theorem. The key factor is the representation of $Z^{t,x}$ via the function $Y^{t,x}_t=u(t,x)$ which needs to be continuously uniformly differentiable in the spatial variable and for that one needs Assumption \ref{H2} to hold.
We obtain an interesting conclusion of the previous result if we interpret the forward diffusion of the system as a backward equation. In terms of applications (as mentioned in the introduction) it is the next result that gives a condition that allows the user to conclude the positivity or negativity of the control process.
In the next result we focus on just one FBSDE so we fix $i=1$ and we omit this index.
\begin{coro}\label{comono-theo-2}
Let the assumption of Theorem \ref{comono-theo-1} hold (fix $i=1$). Take $(t,x)\in[0,T]{t_{i}}mes\mathbb{R}$ and let $(X,Y,Z)$ be the unique solution of the FBSDE
\begin{align}
\label{loc-19122008-1}
X_t&=x+\int_0^t b(s,X_s)\mathrm{d} s+\int_0^t \sigma(s,X_s)\mathrm{d}ws,\\
\label{loc-19122008-2}
Y_t &=g(X_T) +\int_t^T f(s,X_s,Y_s,Z_s)\mathrm{d}s-\int_t^T Z_s\mathrm{d}ws.
\end{align}
Then, if $x\mathbb{A}psto g(x)$ and $x\mathbb{A}psto f(\cdot,x,,\cdot,\cdot)$ are increasing (respectively decreasing) functions, then $Z_t \odot \sigma(t,X_t)$ is $\mathbb{P}$-a.s.~positive (respectively negative) for all $t\in[0,T]$. In particular, if the monotonicity of $g$ and $f$ (in $x$) is strict and if $\sigma$ is strictly positive then $Z$ is either strictly positive or strictly negative (according to the monotonicity of $g$ and $f$).
\end{coro}
\begin{proof}
Throughout let $x\in\mathbb{R}$ and $t\in[0,T]$.
We prove the statement for the case of $g(x)$ and $f(\cdot,x,\cdot,\cdot)$ being increasing functions (in the spatial variable $x$) and we give a sketch of the proof for the decreasing case. Rewriting SDE \eqref{loc-19122008-1} as a BSDE leads to $X_t=X_T-\int_t^T b(s,X_s)\mathrm{d} s-\int_t^T \sigma(s,X_s)\mathrm{d} W_s$. In fact we can still rewrite the above equation in a more familiar way, namely
\begin{align}\label{loc-19112008-3}
{t_{i}}lde{Y}_t={t_{i}}lde{g}(X_T)+\int_t^T {t_{i}}lde{f}(s,{t_{i}}lde{Y}_s)\mathrm{d} s-\int_t^T {t_{i}}lde{Z}_s\mathrm{d} W_s,
\end{align}
where ${t_{i}}lde{Z}_s=\sigma(s,X_s)$ for $s\in[0,T]$, ${t_{i}}lde{g}(x)=x$ and ${t_{i}}lde{f}(t,x,y,z)=- b(t,y)$.
At this stage we need to clarify the identification ${t_{i}}lde{Z}_\cdot=\sigma(\cdot,X_\cdot)$. Let us write explicitly the dependence on the parameter $x$ of the solution $(X,{t_{i}}lde{Y},{t_{i}}lde{Z})$ of the FBSDE (\ref{loc-19122008-1}), (\ref{loc-19112008-3}), i.e. we write $(X,{t_{i}}lde{Y} ,{t_{i}}lde{Z})$ to denote $(X,{t_{i}}lde{Y},{t_{i}}lde{Z})$.
Note that the solution of the BSDE (\ref{loc-19112008-3}) is the solution of SDE (\ref{loc-19122008-1}) which is a Markov process. We can then write ${t_{i}}lde{Y}_\cdot= X_\cdot={t_{i}}lde{u}(\cdot,X_\cdot)$ where ${t_{i}}lde{u}$ is the identity function (infinitely differentiable). Under Assumption \ref{H1} both $X$ an ${t_{i}}lde{Y}$ are differentiable as a functions of $x$ (see Theorem \ref{compilationtheorem}), we have then ${t_{i}}lde{Z}_\cdot=(\nabla_x {t_{i}}lde{u}) (\cdot,X_\cdot)\sigma(\cdot,X_\cdot)$. And since ${t_{i}}lde{u}$ is the identity function with derivative being the constant function $1$, it follows immediately that ${t_{i}}lde{Z}_\cdot=\sigma(\cdot,X_\cdot)$.
Our aim is to use the previous theorem to imply this result. So we only have to check that its assumptions are verified. Comparing the terminal conditions of (\ref{loc-19122008-2}) and (\ref{loc-19112008-3}), i.e. comparing $x\mathbb{A}psto g(X^x_T)$ with $x\mathbb{A}psto {t_{i}}lde{g}(X^x_T)= X^x_T$ it is clear that both functions are almost surely increasing. Further, the driver function ${t_{i}}lde{f}$ of BSDE \eqref{loc-19112008-3} is given by ${t_{i}}lde{f}(t,x,y,z)={t_{i}}lde{f}(t,y)=-b(t,y)$ which is independent of $x$. Clearly $x\mathbb{A}psto {t_{i}}lde{f}(\cdot,x,\cdot,\cdot)$ and $x\mathbb{A}psto f(\cdot,x,\cdot,\cdot)$ are comonotonic.
Theorem \ref{comono-theo-1} applies and we conclude immediately that
\begin{align}
\label{Zodotsigma}
Z_t\odot {t_{i}}lde{Z}_t=Z_t\odot \sigma(t,X_t)\geq 0,~\mathbb{P}\text{-}a.s.\quad t\in[0,T].
\end{align}
For the other case, when $g$ is a decreasing function, the approach is very similar. We rewrite the SDE (\ref{loc-19122008-1}) in the following way,
\[
-X_t=-X_T+\int_t^T b(s,X_s)\mathrm{d} s-\int_t^T\big[ -\sigma(s,X_s)\big]\mathrm{d} W_t,\quad t\in[0,T].
\]
The terminal condition of the above BSDE is given by $x\mathbb{A}psto {t_{i}}lde{g}(x)=-x$ evaluated at $x=X_T$ and the driver ${t_{i}}lde{f}(t,x,y,z)={t_{i}}lde{f}(t,y)=b(t,-y)$ which is independent of $x$. Since ${t_{i}}lde{g}$ is a decreasing function, we obtain our result by comparing the above BSDE with (\ref{loc-19122008-2}) and applying the previous theorem.
\end{proof}
The above corollary allows one to conclude in particular the strict positivity of the control process. If one is only interested in establishing positivity (ignoring strictness), then one can indeed lower the strength of the assumptions.
\begin{lemma}
Let Assumption \ref{H1} hold and $m=1$. Assume that $x\mathbb{A}psto g(x)$ and $x\mathbb{A}psto f(\cdot,x,0,0)$ are both monotone increasing then $ Z_t\odot \sigma(t,X_t)\geq 0,$ $\mathbb{P}$-a.s. for all $t\in[0,T]$. If $x\mathbb{A}psto g(x)$ and $x\mathbb{A}psto f(\cdot,x,\cdot,\cdot)$ are both monotone decreasing then $Z_t\odot \sigma(t,X_t)\leq 0$, $\mathbb{P}$-a.s. for all $t\in[0,T]$.
\end{lemma}
\begin{remark}
Again, as in Remark \ref{caseofmdiff1}, it is possible to state and prove the same result for $m\geq 1$. One needs to impose that $\sigma$ is a square diagonal matrix and the SDE to be a decoupled system.
\end{remark}
\begin{remark}
It is possible to weaken the assumptions of this lemma as was done for Theorem 4.3.6 in \cite{reis2011} or Corollary 2 in \cite{IdRZ2010}. Namely, the conditions are weakened to Lipschitz type conditions with the appropriate Lipschitz ``constant'', then one argues similarly but combining with a regularization argument.
\end{remark}
\begin{proof}
Throughout let $t\in[0,T]$ and $x\in\mathbb{R}$. Then due to the representation formulas in \eqref{representation} we have $\mathbb{P}$-a.s. that
\begin{align}
\label{trick}
Z_t\odot\sigma(t,X_t)
= D_t Y_t\odot \sigma(t,X_t)
= \nabla_x Y_t (\nabla_x X_t)^{-1} \sigma(t,X_t) \odot\sigma(t,X_t).
\end{align}
It is trivial to verify that $\sigma(t,X_t) \odot\sigma(t,X_t)\geq 0$. It remains to establish a result concerning the sign of $\nabla_x Y$ and $(\nabla_x X)^{-1}$.
Under the assumptions it is easy to verify that the solution of \eqref{nablasde} is positive. The solution of $\nabla_x X$ is essentially a positive geometric Brownian motion with a nonlinear drift and volatility which in turn implies that $(\nabla_x X)^{-1}$ is also positive. If we manage to deduce a result concerning the sign of $\nabla_x Y$ we are then able to obtain a weaker version of Corollary \ref{comono-theo-2}.
The methodology developed to deduce moment and a priori estimates for quadratic BSDE and illustrated in Lemma 3.1 and 3.2 of \cite{IdR2010} (or Chapter 2 in \cite{reis2011}) allow the following equality
\begin{align}
\label{simplifiedeq}
\nabla_x Y_t = \mathbb{E}^{\widehat{\mathbb{P}}}\big[
e_T (e_t)^{-1} \nabla_x g(X_t) \nabla X_T
+\int_t^T [e_r e_t^{-1} (\nabla_x f)(r,X_r,0,0)\nabla_x X_r] \mathrm{d} r
\big|\mathcal{F}_t\big],
\end{align}
where the process $e$ and the measure $\widehat{\mathbb{P}}$ are defined as
\[
e_t=\exp\Big\{ \int_0^t \frac{f(r,X_r,Y_r,Z_r)-f(r,X_r,0,Z_r)}{Y_r}\mathbbm{1}_{Y_r\neq 0} \mathrm{d} r \Big\},
\]
and $\widehat{\mathbb{P}}$ is a probability measure with Radon-Nikodym density given by
\[
\frac{\mathrm{d} \widehat{\mathbb{P}}}{\mathrm{d} \mathbb{P}}=M_T=\mathcal{E}\Big( \int_0^T \frac{f(r,X_r,0,Z_r)-f(r,X_r,0,0)}{|Z_r|^2}Z_r\mathbbm{1}_{|Z_r|\neq 0} \mathrm{d} W_r \Big).
\]
Both $(e_t)_{t\in[0,T]}$ and $M$ are well defined. The first because $y\mathbb{A}psto f(\cdot,\cdot,y,\cdot)$ is assumed to be uniformly Lipschitz and hence $e$ is bounded from above and below and away from zero. The second follows from a combination of the growth assumptions on $\nabla_z f$ and the fact that $\int Z \mathrm{d} W$ is a bounded mean oscillation martingale\footnote{This observation is key in many results for quadratic BSDE. The stochastic exponential of a BMO martingale is uniformly integrable and defines a proper density. This type of reasoning can be found ubiquitously in \cite{AIdR07} or \cite{IdR2010} for example.} (BMO).
We have already seen that $\nabla X$ is positive and it also trivial to conclude that the process $e$ also is. Given that $g$ and $f$ are differentiable, then saying that these functions are monotonic (in x) boils down to making a statement on the sign of $(\nabla_x g)(x)$ and $(\nabla_x f)(\cdot,x,0,0)$. If one assumes that $g$ and $f(\cdot,x,0,0)$ are monotone increasing in $x$ then $(\nabla g)(x)\geq 0$ and $(\nabla_x f)(\cdot,x,0,0)\geq 0 $ for all $x$. Hence from \eqref{simplifiedeq} (and the remarks above) we conclude that $\nabla_x Y$ is also positive. Returning to \eqref{trick} we have then that $Z_t \odot \sigma(t,X_t)\geq 0$ which proves our result.
The arguments are similar for the case when $g(x)$ and $f(\cdot,x,0,0)$ are
decreasing functions.
\end{proof}
{\bf Acknowledgments:} The first author would like to thank Peter Imkeller and Ulrich Horst for their comments. The first author gratefully acknowledges the partial support from the CMA/FCT/UNL through project PEst-OE/MAT/UI0297/2011.
This work was partially supported by the project SANAF UTA\_{}CMU/MAT/0006/2009.
\end{document} |
\begin{document}
\title{Order polynomial product formulas and poset dynamics}
\author{Sam Hopkins}
\address{Department of Mathematics, Howard University, Washington, DC}
\email{samuelfhopkins@gmail.com}
\thanks{The author was supported in part by NSF grant \#1802920.}
\begin{abstract}
We survey all known examples of finite posets whose order polynomials have product formulas, and we propose the heuristic that these are the same posets with good dynamical behavior. Here the dynamics in question are the actions of promotion on the linear extensions of the poset and rowmotion on the $P$-partitions of the poset.
\end{abstract}
\maketitle
\section{Introduction} \label{sec:intro}
A \dfn{linear extension} of a poset is a linear (i.e., total) ordering of its elements extending the partial ordering. Perhaps the single most important numerical invariant associated to a finite\footnote{All posets we consider will be finite and we will drop this adjective from now on.} poset $P$ is its number $e(P)$ of linear extensions. Computing the number of linear extensions of a poset is a $\#\mathrm{P}$-complete problem~\cite{brightwell1991counting}, which means that we cannot hope for a ``good'' formula for this number in general. Nevertheless, especially within the context of algebraic combinatorics, there is great interest in obtaining good formulas -- in the best cases, product formulas -- for the number of linear extensions of special families of posets. The most famous such product formula is the celebrated hook-length formula~\cite{frame1954hook} for the number of linear extensions of a poset of Young diagram shape. There are also hook-length formulas for shifted shapes, and for rooted forests; and more generally, the $d$-complete posets of Proctor~\cite{proctor2014dcomplete, kim2019hook, naruse2019skew}, which include Young diagram shapes, shifted shapes, and rooted forests, have hook-length formulas counting their linear extensions.
Here we will concentrate on a finer combinatorial invariant of a poset $P$ than its number of linear extensions: namely, its order polynomial. A \dfn{$P$-partition of height~$m$} is a weakly order-preserving map $P\to \{0,1,\ldots,m\}$; the \dfn{order polynomial} $\Omega_P(m)$ of~$P$ counts the number of $P$-partitions of height $m$.\footnote{Traditionally, as in~\cite[Chapter~3]{stanley2012ec1}, $P$-partitions are order-reversing and the order polynomial is what is $\Omega_P(m-1)$ in our notation. Our changes are superficial but lead to a cleaner presentation.} The order polynomial is a polynomial in $m$ of degree $\#P$, with leading coefficient equal to~$e(P)/\#P!$. Posets with product formulas for their order polynomials are much rarer than posets with product formulas enumerating their linear extensions.
We will review all known examples of posets with product formulas for their order polynomials below. However, our goal is not just to survey families of posets with order polynomial product formulas, but also to advertise an apparently powerful heuristic which says that:
\begin{center} {\bf the posets with order polynomial product formulas are \\ the same as the posets with good dynamical behavior}.\end{center}
Let us explain what we mean by poset dynamics. There are several constructions from algebraic combinatorics of interesting invertible operators acting on objects associated to a poset (e.g., linear extensions or $P$-partitions). And while these operators are defined for any poset, they tend to have good behavior (e.g., a small, predictable order and regular orbit structure) only for a select few families of posets. Here we will focus on two such operators: \dfn{promotion} and \dfn{rowmotion}.
Promotion is an invertible operator acting on the linear extensions of any poset. It was first defined and studied by Sch\"{u}tzenberger~\cite{schutzenberger1972promotion}, in conjunction with a related involutive operator called evacuation. Sch\"{u}tzenberger's initial motivation for studying these operators was the relation between evacuation and the RSK algorithm~\cite{schutzenberger1972schensted} (see~\cite[Chapter~7, \S A1.2]{stanley1999ec2} for a modern account of this relation).
Rowmotion is an invertible operator acting on the order ideals of any poset, which has been studied by a number of authors over several decades~\cite{brouwer1974period, deza1990loops, fonderflaass1993orbits, cameron1995orbits}, with a renewed interest especially in the last 10 or so years because of a surprising connection to the combinatorics of root systems~\cite{panyushev2009orbits, striker2012promotion, williams2013cataland}. Einstein and Propp~\cite{einstein2013combinatorial} gave a piecewise-linear extension of rowmotion to the entire order polytope $\mathcal{O}(P)$ of a poset $P$. By identifying the points in $\frac{1}{m}\mathbb{Z}^{P}\cap \mathcal{O}(P)$ with $P$-partitions of height $m$, we thus obtain an action of rowmotion on these $P$-partitions. It is this (piecewise-linear) action of rowmotion on $P$-partitions which we will mostly be concerned with.
Then, the heuristic we are proposing is more precisely that the following three properties of a poset $P$ are related and tend to occur simultaneously:
\begin{enumerate}[(1)]
\item \label{item:prod_form} the order polynomial $\Omega_P(m)$ has a product formula;
\item \label{item:pro} promotion acting on the linear extensions of $P$ has good behavior;
\item \label{item:row} rowmotion acting on the $P$-partitions of height $m$ has good behavior, for all $m$.
\end{enumerate}
These three properties are not perfectly correlated, in that we have examples of posets which satisfy some but not all of them (although we know of no counterexamples to \eqref{item:row} $\Rightarrow$ \eqref{item:pro} $\Rightarrow$ \eqref{item:prod_form}). Nevertheless, we do think this heuristic is powerful, and we remark that it is powerful ``in both directions'': that is, both for finding posets with good dynamical behavior (see, e.g.,~\cite{hopkins2020promotion}), and for finding posets with order polynomial product formulas (see, e.g., \cite{hopkins2020plane}).
\subsection{A more detailed account of the heuristic} \label{subsec:heuristic}
Let us give a more detailed account of what~\eqref{item:prod_form},~\eqref{item:pro}, and~\eqref{item:row} mean, and how they are related.
For $\Omega_P(m)$ to have a product formula, ideally all of its roots should be integers. There are also some interesting examples where the roots are \emph{half}-integers, so we will consider this acceptable as well. Furthermore, $P$ should come in a family, with certain numerical parameters attached to it, and we should be able to write $\Omega_P(m)$ in a simple way as a product of rational expressions involving these parameters. Since $e(P)=\#P! \cdot \lim_{m\to \infty} \frac{\Omega_P(m)}{m^{\#P}}$, whenever we have a product formula for $\Omega_P(m)$ we also have one for $e(P)$.
As it turns out, we also always seem to get very nice $q$-analogs when $P$ has an order polynomial product formula. Define
\[\Omega_P(m;q) \coloneqq \prod_{\alpha} \frac{(1-q^{\kappa(m-\alpha)})}{(1-q^{-\kappa\alpha})},\]
where the product is over all roots $\alpha$ of $\Omega_P(m)$, with multiplicity, and $\kappa$ is $1$ if these roots are all integers and $2$ if they are half-integers. Then, miraculously, in the examples we observe that $\Omega_P(m;q)$ is (for nonnegative integers $m$) a polynomial in~$q$ with nonnegative integer coefficients, which at $q=1$ is equal to $\Omega_P(m)$. Similarly, define
\[e(P;q) \coloneqq \prod_{j=1}^{\#P}(1-q^{j\kappa}) \cdot \lim_{m\to \infty} \Omega_P(m;q) = \prod_{j=1}^{\#P}(1-q^{j\kappa}) \prod_{\alpha}\frac{1}{(1-q^{-\kappa\alpha})}.\] Again, $e(P;q)$ is miraculously a $q$-analog of $e(P)$ in the sense that it is a polynomial in $q$ with nonnegative integer coefficients which at $q=1$ is equal to $e(P)$. (See~\cite{stanton1990fake} for discussion of when $q$-expressions of these forms are polynomials with nonnegative integer coefficients.)
Let $\mathcal{L}(P)$ denote the linear extensions of $P$ and $\mathrm{Pro} \colon \mathcal{L}(P)\to \mathcal{L}(P)$ denote promotion. When we formally define promotion and go over the basics below, we will see why $\mathrm{Pro}^{\#P}$ is the ``right power'' of promotion to look at to find good behavior. For promotion to have good behavior, ideally $\mathrm{Pro}^{\#P}$ is the identity. There are also some interesting examples where $\mathrm{Pro}^{\#P}$ is a non-identity involutive poset automorphism: these are exactly the examples where the roots of $\Omega_P(m)$ are half-integers.
Moreover, whenever promotion of $\mathcal{L}(P)$ has good behavior, $e(P;q)$ is apparently a cyclic sieving polynomial for the action of promotion. We will review the cyclic sieving phenomenon later, but for now the important remark is that if a polynomial with an expression as a product of ratios of $q$-numbers is a cyclic sieving polynomial for some cyclic action, then this action has a very regular orbit structure: for instance, this means there is a product formula enumerating every symmetry class.
Let $\mathcal{PP}^m(P)$ denote the height $m$ $P$-partitions and $\mathrm{Row} \colon \mathcal{PP}^m(P)\to \mathcal{PP}^m(P)$ denote rowmotion. Rowmotion only ever has good behavior when $P$ is graded (i.e., all maximal chains of $P$ have the same length). For a graded poset $P$ we use $r(P)$ to denote the rank of $P$ (i.e., the length of a maximal chain); we will see below why $\mathrm{Row}^{r(P)+2}$ is the ``right power'' of rowmotion to look at. For rowmotion to have good behavior, ideally $\mathrm{Row}^{r(P)+2}$ is the identity; but there are also interesting examples where it is an involutive automorphism, and again this happens when the roots of~$\Omega_P(m)$ are half-integers. Moreover, when rowmotion of $\mathcal{PP}^m(P)$ has good behavior, $\Omega_P(m;q)$ is apparently a cyclic sieving polynomial for this action.
\subsection{Whence all this?}
Where does this heuristic comes from, and why might these properties of a poset be related? The short answer is that these properties are indicative of some connection of the poset to \emph{algebra}, especially, the representation theory of Lie algebras, Lie groups, Weyl groups, etc.
For instance, it often happens that $\mathcal{PP}^m(P)$ indexes a basis of an irreducible representation of a Lie algebra, in which case we can compute $\Omega_P(m)$ using the Weyl dimension formula. Similarly, the $q$-analog $\Omega_P(m;q)$ can be obtained via a $q$-Weyl dimension formula for the principal specialization of the corresponding character.
Furthermore, the actions of promotion and rowmotion routinely have nice algebraic models as well. In the simplest cases, there are in fact diagrammatic models where the action is realized as rotation; but there are also examples where sophisticated tools from algebra like crystals and canonical bases are required. Indeed, these models are part of what make promotion and rowmotion so fascinating. We will review all known models of promotion and rowmotion below.
For the families of posets which have a direct connection to algebra, it is desirable to prove results uniformly, that is, without relying on classification theorems. We should also note that for some posets (such as the ``chain of V's''), the relevant algebra has apparently yet to be uncovered.
\subsection{The open problems}
As we will see in the subsequent sections, simply by carrying out the program in Section~\ref{subsec:heuristic} for the posets known to have an order polynomial product formula, we obtain many intriguing dynamical conjectures. But our heuristic also presents a few ``meta-problems'':
\begin{problem} \label{prob:connection}
Find formal relations between the properties~\eqref{item:prod_form},~\eqref{item:pro}, and~\eqref{item:row}.
\end{problem}
\begin{problem}
Find more examples of posets satisfying~\eqref{item:prod_form},~\eqref{item:pro}, and~\eqref{item:row}.
\end{problem}
\begin{problem}
Find a unified algebraic explanation for the good (enumerative and dynamical) behavior of the families of posets considered here.
\end{problem}
Regarding Problem~\ref{prob:connection}, we should remark that while there are several papers which study connections between promotion and rowmotion (see, e.g.,~\cite{striker2012promotion, dilks2017resonance, dilks2019rowmotion, bernstein2020promotion, bernstein2022promotion}), we know of none which discusses a direct connection between promotion of~$\mathcal{L}(P)$ and rowmotion of~$\mathcal{PP}^m(P)$.
\subsection{Acknowledgments}
I thank Ira Gessel, Soichi Okada, Rebecca Patrias, Robert Proctor, Victor Reiner, Martin Rubey, Jessica Striker, Bruce Westbury, and Nathan Williams for useful discussion.
\section{The posets} \label{sec:posets}
In this section we introduce the posets which have order polynomial product formulas. We assume the reader is familiar with poset basics as laid out for instance in~\cite[Chapter~3]{stanley2012ec1}. All the properties of posets we are interested in decompose in a natural way over disjoint unions, so we will only consider connected posets. These properties also evidently translate directly from a poset $P$ to its dual $P^*$. In fact, these properties (at least conjecturally) translate from a poset to any other poset with an isomorphic comparability graph (see~\cite{hopkins2019minuscule}). Therefore, we will not separately list posets with isomorphic comparability graphs to the ones below.
\subsection{Shapes and shifted shapes}
Several of the families of posets which have order polynomial product formulas will be Young diagram shapes or shifted shapes.
We assume the reader is familiar with the basics concerning partitions, shapes, and so on. We view a shape as a poset on its boxes with the partial order where $u\leq v$ means box $u$ is weakly northwest of box $v$. The poset objects associated to shapes have different traditional names: e.g., a linear extension is a standard Young tableau, a $P$-partition is a plane partition, etc.
\begin{figure}
\caption{Examples of the families of shapes.}
\label{fig:shapes}
\end{figure}
We now define the relevant families of shapes. The \dfn{rectangle} $R(a,b)$ is the shape for the partition $(b^a)$ using multiplicity notation. The \dfn{staircase} $S(n)$ is the shape for the partition $(n,n-1,\ldots,1)$. The \dfn{shifted trapezoid} $T(a,b)$ is the shifted shape for the strict partition $(a+b-1,a+b-3,a+b-5,\ldots,b-a+1)$. The \dfn{shifted double staircase} $DS(n,k)$ is the shifted shape for the strict partition $(n,n-1,\ldots,1)+(k,k-1,\ldots,1)$. Observe that $T(n,n)=DS(n,n-1)$ and $T(n,n+1)=DS(n,n)$. Figure~\ref{fig:shapes} depicts examples of these shapes.
We also define the arithmetic progression $AP(M,d,\ell)$ to be the shape for the partition $(M-d,M-2d,\ldots,M-\ell d)$. Observe that $R(a,b)=AP(b,0,a)$ and $S(n)=AP(n+1,1,n)$.
\subsection{Root posets}
The root posets are a very interesting family of posets coming from Lie theory, and some of them (specifically, the root posets of coincidental type) have order polynomial product formulas. We give only a very cursory account of root posets here; for a detailed account see~\cite{williams2013cataland} or~\cite[\S8]{hamaker2018doppelgangers}.
\begin{figure}
\caption{Left: some crystallographic root posets. Right: the non-crystallographic root posets of coincidental type.}
\label{fig:root_posets}
\end{figure}
Let $\Phi$ be an irreducible crystallographic root system of rank $n$, and $W$ its Weyl group. We use $\Phi^+$ to denote the positive roots of $\Phi$. We view $\Phi^+$ as a poset, called the \dfn{root poset}, where the partial order is $\alpha \leq \beta$ if and only if $\beta-\alpha$ is a nonnegative sum of simple roots. The poset $\Phi^+$ has $n$ minimal elements (the simple roots) and a unique maximal root (the highest root). It is a graded poset (with the rank function given by height). And it contains important numerical information about $W$. For instance, if $\Phi^+_0,\Phi^+_1,\ldots,\Phi^+_{r(\Phi^+)}$ are the ranks of this poset, then $(\#\Phi^+_0,\#\Phi^+_1,\ldots,\Phi^+_{r(\Phi^+)})$ is a partition and its conjugate partition is $(d_n-1,d_{n-1}-1,\ldots,d_1-1)$, where $d_1\leq d_2 \leq \cdots\leq d_n$ are the degrees of $W$. In particular $r(P)+2=d_n=h$ is the Coxeter number of $W$. Some of these crystallographic root posets are depicted on the left in Figure~\ref{fig:root_posets}.
Now let $\Phi$ be an irreducible non-crystallographic root system. One could naively apply the same definition of partial order to $\Phi^+$, but it would fail to have the desirable features discussed in the last paragraph. Armstrong~\cite{armstrong2009generalized} gave an \emph{ad hoc} construction of root posets $\Phi^+(I_2(\ell))$ and $\Phi^+(H_3)$ which do have these desirable features; these are depicted on the right in Figure~\ref{fig:root_posets}. For $H_4$, the other non-crystallographic root system, there are either many or no analogous root posets, depending on exactly which properties one chooses~\cite{cuntz2015root}.
Among all the complex reflection groups, a special sub-class are the so-called ``coincidental types,'' which are those whose degrees form an arithmetic progression. For the finite Coxeter groups, these are the types $A_n$, $B_n\simeq C_n$, $I_2(\ell)$, and $H_3$. Their corresponding root posets are the \dfn{root posets of coincidental type}. These are all depicted in Figure~\ref{fig:root_posets}. Observe that $\Phi^+(A_n)\simeq S(n)^*$ and $\Phi^+(B_n)\simeq T(n,n)^*$.
For $\Phi^+$ a crystallographic root poset, there is a canonical involutive poset automorphism $\delta\colon \Phi^+ \to \Phi^+$ defined by $\delta(\alpha) \coloneqq -w_0(\alpha)$ where $w_0\in W$ is the longest element of the Weyl group. For the cases that concern us: $\delta$ is the reflection across the vertical axis of symmetry for $\Phi^+(A_n)$; and $\delta$ is the identity for $\Phi^+(B_n)$. By convention, we define a poset automorphism $\delta\colon \Phi^+ \to \Phi^+$ for $\Phi^+$ a non-crystallographic root poset of coincidental type by: $\delta$ swaps the minimal elements of $\Phi^+(I_2(\ell))$ if $\ell$ is odd, and is the identity if~$\ell$ is even; and $\delta$ is the identity for $\Phi^+(H_3)$.
\begin{figure}
\caption{Left: the other minuscule posets besides the rectangle and shifted staircase. Right: the ``chain of V's.''}
\label{fig:minuscule_posets}
\end{figure}
\subsection{Minuscule posets}
The minuscule posets are another family of posets coming from Lie theory, with many remarkable properties. Again, we give only a cursory account; see~\cite[\S4]{hamaker2018doppelgangers} for a detailed treatment.
Let $\mathfrak{g}$ be a simple Lie algebra over $\mathbb{C}$, with $\Phi$ its root system and $W$ its Weyl group. A non-zero (integral, dominant) weight $\lambda$ of $\mathfrak{g}$ is said to be minuscule if the Weyl group acts transitively on the weights of the corresponding highest weight irreducible representation $V^{\lambda}$. In this case, the Weyl orbit $W\lambda$ is a distributive lattice, where the partial order is again given by root order: i.e., $\nu \leq \mu$ means $\mu-\nu$ is a nonnegative sum of simple roots. The \dfn{minuscule poset} corresponding to the minuscule weight $\lambda$ is the poset of join irreducible elements of the distributive lattice $W\lambda$.
If $\lambda$ is minuscule, then it must be equal to some fundamental weight $\omega_i$, and we can also describe the corresponding minuscule poset $P$ as the order filter in~$\Phi^+$ generated by the corresponding simple root $\alpha_i$: i.e., $P=\{\alpha\in \Phi^+\colon \alpha\geq \alpha_i\}$. The minuscule poset $P$ is always graded, and has a unique minimal and a unique maximal element. Furthermore, it has a canonical involutive poset anti-automorphism $\iota\colon P \to P^{*}$ which is induced from the action of multiplication by $w_0$ on $W\lambda$.
The minuscule posets, up to isomorphism, have been classified: they are the \dfn{rectangle} $R(a,b)$, the \dfn{shifted staircase} $DS(n,0)$, the \dfn{``propeller poset''} $D(n)$, and two \dfn{exceptional posets} $\Lambda_{E_6}, \Lambda_{E_7}$ coming from the types $E_6$ and $E_7$. The last three of these are depicted in Figure~\ref{fig:minuscule_posets}.
\subsection{The ``chain of V's''}
The final family of posets with an order polynomial product formula is the \dfn{``chain of $V$'s''}: $V(n) \coloneqq \begin{tikzpicture}[scale=0.3] \node[shape=circle,fill=black,inner sep=1.5] (B) at (-1,0) {}; \node[shape=circle,fill=black,inner sep=1.5] (C) at (1,0) {}; \node[shape=circle,fill=black,inner sep=1.5] (A) at (0,-1) {}; \draw (B)--(A); \draw (C)--(A); \end{tikzpicture} \times [n]$, the Cartesian product of the $3$-element ``V''-shaped poset $\begin{tikzpicture}[scale=0.3] \node[shape=circle,fill=black,inner sep=1.5] (B) at (-1,0) {}; \node[shape=circle,fill=black,inner sep=1.5] (C) at (1,0) {}; \node[shape=circle,fill=black,inner sep=1.5] (A) at (0,-1) {}; \draw (B)--(A); \draw (C)--(A); \end{tikzpicture}$ and the $n$-element chain $[n]$ (see the right of Figure~\ref{fig:minuscule_posets}). It was first studied by Kreweras and Niederhausen~\cite{kreweras1981solution}. It has a rather different structure than the other examples: for instance, in the other examples each element covers and is covered by at most two elements, but this is not true for $V(n)$. Let us use $\delta\colon V(n)\to V(n)$ to denote the involutive poset automorphism which is reflection across the vertical axis of symmetry of the ``V.''
\section{Order polynomial product formulas} \label{sec:formulas}
In this section we review the order polynomial product formulas for the posets introduced in Section~\ref{sec:posets}, and briefly explain where these formulas come from.
\subsection{Symmetry classes of plane partitions} \label{subsec:sym_classes}
The origin of all these kind of product formulas is MacMahon's investigation of plane partitions, and the subsequent investigation of plane partitions symmetry classes. See~\cite{krattenthaler2016plane} for a complete history.
An $a\times b$ plane partition of height $m$ is an $a\times b$ array $\pi=(\pi_{i,j})_{\substack{1\leq i \leq a,\\ 1\leq j \leq b}}$ of nonnegative integers which is weakly decreasing in rows and columns and for which the largest entry is at most $m$. We denote the set of such plane partitions by $\mathcal{PP}^{m}(a \times b)$. Observe that $\mathcal{PP}^{m}(a \times b)$ is exactly the same as $\mathcal{PP}^{m}(R(a,b))$.
We define the size of a plane partition $\pi \in \mathcal{PP}^{m}(a \times b)$ by $|\pi| \coloneqq \sum_{\substack{1\leq i \leq a, \\ 1 \leq j \leq b}} \pi_{i,j}$. MacMahon~\cite{macmahon1915combinatory} obtained the following celebrated product formula for the size generating function of plane partitions:
\[\sum_{\pi\in \mathcal{PP}^{m}(a \times b)} q^{|\pi|} = \prod_{i=1}^{a} \prod_{j=1}^{b} \frac{(1-q^{m+i+j-1})}{(1-q^{i+j-1})} \]
This is the $q$-analog $\Omega_P(m;q)$ from Section~\ref{subsec:heuristic} for $P=R(a,b)$.
The order polynomials for other posets beyond $R(a,b)$ arise when considering symmetries of plane partitions. The symmetries relevant to us are transposition $\mathrm{Tr}\colon \mathcal{PP}^{m}(n\times n) \to \mathcal{PP}^{m}(n\times n)$ given by $\mathrm{Tr}(\pi)_{i,j} \coloneqq \pi_{j,i}$, and complementation $\mathrm{Co}\colon \mathcal{PP}^{m}(a\times b) \to \mathcal{PP}^{m}(a\times b)$ given by $\mathrm{Co}(\pi)_{i,j} \coloneqq m-\pi_{a+1-i,b+1-j}$.
Plane partitions $\pi \in \mathcal{PP}^{m}(n \times n)$ with $\mathrm{Tr}(\pi)=\pi$ are called symmetric; they are evidently in bijection with $\mathcal{PP}^{m}(DS(n,0))$. MacMahon~\cite{macmahon1899partitions} conjectured, and Andrews~\cite{andrews1978plane} and Macdonald~\cite{macdonald1979symmetric} proved, the following product formula for the size generating function of symmetric plane partitions:
\[ \sum_{\pi\in \mathcal{PP}^{m}(n \times n),\mathrm{Tr}(\pi)=\pi} q^{|\pi|} =\prod_{1\leq i < j \leq n}\frac{(1-q^{2(i+j+m-1)})}{(1-q^{2(i+j-1)})} \cdot \prod_{i=1}^{n} \frac{(1-q^{2i+m-1})}{(1-q^{2i-1})}.\]
There is a second $q$-analog for symmetric plane partitions as well. Namely, for a plane partition $\pi \in \mathcal{PP}^{m}(n \times n)$ define $|\pi|' \coloneqq \sum_{1\leq i \leq j \leq n} \pi_{i,j}$. Then, Bender and Knuth~\cite{bender1972enumeration} conjectured, and Gordon~\cite{gordon1983bender}, Andrews~\cite{andrews1977plane}, and Macdonald~\cite{macdonald1979symmetric} proved:
\[ \sum_{\pi\in \mathcal{PP}^{m}(n \times n),\mathrm{Tr}(\pi)=\pi} q^{|\pi|'} = \prod_{1\leq i \leq j \leq n}\frac{(1-q^{i+j+m-1})}{(1-q^{i+j-1})}.\]
It is this second $q$-analog which is the $\Omega_P(m;q)$ from Section~\ref{subsec:heuristic} for $P=DS(n,0)$.
Plane partitions $\pi \in \mathcal{PP}^{2m}(n \times n)$ with $\mathrm{Tr}(\pi)=\mathrm{Co}(\pi)$ are called transpose-complementary; they are in bijection with $\mathcal{PP}^{m}(S(n))$. These were first enumerated by Proctor (see Theorem~\ref{thm:ap_prod_form}).
Plane partitions $\pi \in \mathcal{PP}^{2m}(n \times n)$ with $\mathrm{Tr}(\pi)=\pi$ and $\mathrm{Co}(\pi)=\pi$ are called symmetric self-complementay; they are in bijection with $\mathcal{PP}^{m}(T(\lfloor n/2 \rfloor, \lceil n/2\rceil) )$. These were again first enumerated by Proctor (see Theorem~\ref{thm:doppelganger_prod_form}).
\subsection{Minuscule posets}
Let $P$ be a poset. For a $P$-partition $\pi \in \mathcal{PP}^m(P)$, define its size to be $|\pi| \coloneqq \sum_{p \in P}\pi(p)$. Let $F_P(m;q) \coloneqq \sum_{\pi \in \mathcal{PP}^m(P)} q^{|\pi|}$ denote the size generating function of these $P$-partitions. The basic theory of $P$-partitions (see, e.g., \cite[\S3.15.2]{stanley2012ec1}) says that
\[(1-q)(1-q^2)\cdots(1-q^{\#P}) \cdot \lim_{m\to \infty} F_{P^*}(m;q) = \sum_{L \in \mathcal{L}(P)}q^{\mathrm{maj}(L)},\]
the major index generating function of linear extensions of $P$ (with respect to any fixed natural labeling). We remark that every $d$-complete poset $P$ has a product formula for $\lim_{m\to \infty} F_{P^*}(m;q)$~\cite{proctor2014dcomplete, kim2019hook, naruse2019skew}.
\begin{theorem}[{Proctor~\cite{proctor1984bruhat}}] \label{thm:min_prod_form}
Let $P$ be a minuscule poset. Then
\[ F_P(m;q) = F_{P^*}(m;q) = \prod_{p \in P}\frac{(1-q^{m+r(p)+1})}{(1-q^{r(p)+1})}, \]
where $r\colon P\to \mathbb{N}$ is the rank function of $P$.
\end{theorem}
Theorem~\ref{thm:min_prod_form} gives the $q$-analog $\Omega_P(m;q)$ from Section~\ref{subsec:heuristic} for $P$ a minuscule poset. Theorem~\ref{thm:min_prod_form} is due to Proctor~\cite{proctor1984bruhat} (although observe that for $R(a,b)$ and $DS(n,0)$ it is equivalent to results just mentioned in Section~\ref{subsec:sym_classes}). To prove this theorem he used Standard Monomial Theory, which explains that $\mathcal{PP}^{m}(P)$ indexes a basis of the representation $V^{m\lambda}$ when $P$ is the minuscule poset corresponding to the minuscule weight $\lambda$. Proctor moreover conjectured that the minuscule posets are the \emph{only} posets which have a product formula for $F_P(m;q)$ of this form.
\subsection{Root posets}
Let $W$ be a finite Coxeter group of rank $n$. Define the $q$-$W$-Catalan number by
\[\mathrm{Cat}(W;q) \coloneqq \prod_{i=1}^{n} \frac{(1-q^{h+d_i})}{(1-q^{d_i})}\]
where $d_1,\ldots,d_n$ are the degrees of $W$ and $h$ its Coxeter number. It is uniformly known that $\mathrm{Cat}(W; q)$ is a polynomial with nonnegative integer coefficients (see~\cite{bessis2011cyclic}).
An \dfn{order ideal} of a poset $P$ is a downwards-closed subset $I\subseteq P$ (i.e., a subset for which $y\in I$ and $x\leq y\in P$ implies $x\in I$). We use $\mathcal{J}(P)$ to denote the order ideals of $P$. We have a natural identification $\mathcal{J}(P)\simeq \mathcal{PP}^{1}(P)$ where an order ideal corresponds to the indicator function of its complement.
\begin{theorem}[{Cellini--Papi~\cite{cellini2002adnilpotent}, Haiman~\cite{haiman1994conjectures}}] \label{thm:root_j_prod_form}
Let $\Phi$ be a crystallographic root system and $W$ its Weyl group. Then $\#\mathcal{J}(\Phi^+) = \mathrm{Cat}(W;q \coloneqq 1)$.
\end{theorem}
The proof of Theorem~\ref{thm:root_j_prod_form} is uniform; however, there is no known statistic for order ideals of which $\mathrm{Cat}(W;q)$ is the generating function.
Now let $W$ be a finite Coxeter group of coincidental type. Define the $q$-$W$-multi-Catalan number by
\[\mathrm{Cat}(W,m;q) \coloneqq \prod_{j=0}^{m-1} \prod_{i=1}^{n} \frac{(1-q^{h+d_i+2j})}{(1-q^{d_i+2j})}.\]
It is known, in a case-by-case fashion, that $\mathrm{Cat}(W,m;q)$ is a polynomial in $q$ with nonnegative integer coefficients (for instance, for Type A this follows from~\cite[Theorem~1, Case~`CGI']{proctor1990new}; for other types it can be deduced from consideration of the poset's minuscule doppelg\"{a}nger in the sense of Section~\ref{subsec:doppelganger}).
The multi-Catalan numbers are {\bf not} the same as the more well-known Fuss-Catalan numbers. They first appeared, with this name, in the paper of Ceballos--Labb\'{e}--Stump~\cite{ceballos2014subword} which studied multi-triangulations and the multi-cluster complex. Our interest in these numbers is, however, the following:
\begin{theorem}[{\cite{proctor1983trapezoid, proctor1990new, williams2013cataland}}] \label{thm:root_prod_form}
Let $\Phi$ be a coincidental type root system and~$W$ its corresponding Coxeter group. Then $\Omega_{\Phi^+}(m) = \mathrm{Cat}(W,m;q \coloneqq 1)$.
\end{theorem}
Theorem~\ref{thm:root_prod_form} gives the $q$-analog $\Omega_P(m;q)$ from Section~\ref{subsec:heuristic} for $P$ a root poset of coincidental type. The proof of Theorem~\ref{thm:root_prod_form} is case-by-case, with the difficult cases of $\Phi^+(A_n)$ and $\Phi^+(B_n)$ proved by Proctor~\cite{proctor1990new, proctor1983trapezoid} using representations of the symplectic group. The cases $\Phi^+(I_2(\ell))$ and $\Phi^+(H_3)$ were checked by Williams~\cite{williams2013cataland}.
\subsection{Doppelg\"{a}ngers} \label{subsec:doppelganger}
Following~\cite{hamaker2018doppelgangers}, we call a pair of posets
\[(P,Q) \in \{(R(a,b),T(a,b)^*),(DS(5,0),\Phi^+(H_3)),(D(\ell),\Phi^+(I_2(2\ell)))\}\]
a \dfn{minuscule doppelg\"{a}nger pair}.
\begin{theorem}[\cite{proctor1983trapezoid}] \label{thm:doppelganger_prod_form}
Let $(P,Q)$ be a minuscule doppelg\"{a}nger pair. Then we have $\Omega_P(m) = \Omega_Q(m)$ for all $m\geq 1$.
\end{theorem}
Since minuscule posets have product formulas for their order polynomials (Theorem~\ref{thm:min_prod_form}), Theorem~\ref{thm:doppelganger_prod_form} says their doppelg\"{a}ngers do too. The difficult case of Theorem~\ref{thm:doppelganger_prod_form}, the rectangle/trapezoid pair, was yet again established by Proctor~\cite{proctor1983trapezoid} using representations of the symplectic group. The other cases are an easy check.
In~\cite{hamaker2018doppelgangers}, the authors gave a uniform bijection between $\mathcal{PP}^{m}(P)$ and $\mathcal{PP}^{m}(Q)$ (and also a uniform bijection between $\mathcal{L}(P)$ and $\mathcal{L}(Q)$) for any minuscule doppelg\"{a}nger pair $(P,Q)$. In~\cite{hopkins2019minuscule} it was suggested that minuscule doppelg\"{a}nger pairs are ``very similar.'' We will see more similarities below.
\subsection{Other examples}
\begin{theorem}[{Proctor}] \label{thm:ap_prod_form}
For $P=AP(M,d,\ell)$ an arithmetic progression,
\[ \Omega_{P}(m) = \prod_{\substack{(i,j) \in P, \\ \ell+c(i,j)\leq M-id} } \frac{m+\ell+c(i,j)}{\ell+c(i,j)} \cdot \prod_{\substack{(i,j) \in P, \\ \ell+c(i,j)> M-id} } \frac{(d+1)m+\ell+c(i,j)}{\ell+c(i,j)}, \]
where $c(i,j) \coloneqq j-i$ is the content of the box $(i,j)\in P$.
\end{theorem}
Theorem~\ref{thm:ap_prod_form} is due to Proctor. The case $d=1$ has an interpretation in terms of the representation theory of the symplectic group (see~\cite{proctor1988odd}), and the minuscule case $d=0$ has an interpretation in terms of the representation theory of the general linear group. However, for general $d$, Proctor's proof instead manipulates a determinantal formula due to MacMahon (see~\cite[Exercise 7.101]{stanley1999ec2}).
\begin{theorem}[{Kreweras--Niederhausen~\cite{kreweras1981solution}}] \label{thm:kreweras}
For the ``chain of V's,''
\[\Omega_{V(n)}(m) = \frac{\prod_{i=1}^{n}(m+1+i) \prod_{i=1}^{2n}(2m+i+1)}{(n+1)!(2n+1)!}.\]
\end{theorem}
Theorem~\ref{thm:kreweras} is due to Kreweras and Niederhausen~\cite{kreweras1981solution}. Their proof uses some basic $P$-partition theory together with a lot of clever algebraic manipulation and recurrences. Earlier, Kreweras~\cite{kreweras1965classe} obtained the product formula for~$e(V(n))$, which has an interpretation in terms of a $3$ candidate ballot problem.
\begin{theorem}[{Hopkins--Lai~\cite{hopkins2020plane}; Okada~\cite{okada2020intermediate}}] \label{thm:sds}
For the shifted double staircase,
\[ \Omega_{DS(n,k)}(m) = \prod_{1\leq i \leq j \leq n} \frac{m+i+j-1}{i+j-1} \cdot \prod_{1\leq i \leq j \leq k} \frac{m+i+j}{i+j}.\]
\end{theorem}
The proof of Theorem~\ref{thm:sds} in~\cite{hopkins2020plane} is based on the theory of lozenge tilings of the triangular lattice and the Kuo condensation recurrence technique. In~\cite{okada2020intermediate}, Okada proves Theorem~\ref{thm:sds} using ``intermediate symplectic group''~\cite{proctor1988odd} characters; in fact, he obtains a $q$-analog $\Omega_{DS(n,k)}(m;q)$.
\section{Promotion of linear extensions}
In this section we survey the posets which have good behavior of promotion.
\subsection{Definitions and basics} \label{sec:pro_basics}
Let $P$ be a poset on $n$ elements. For our purposes it is best to represent a linear extension of $P$ as a list $(p_1,p_2,\ldots,p_n)$ of the elements of $P$, each appearing once, for which $p_i \leq p_j$ implies that~$i\leq j$. We then define for each $i=1,\ldots,n-1$ the \dfn{Bender--Knuth involution} $\tau_i\colon \mathcal{L}(P)\to \mathcal{L}(P)$ by
\[ \tau_i(p_1,\ldots,p_{n}) \coloneqq \begin{cases} (p_1,\ldots,p_{i-1},p_{i+1},p_{i},p_{i+2},\ldots,p_{n}) &\textrm{if $p_{i}$, $p_{i+1}$ incomparable;} \\ (p_1,\ldots,p_{n}) &\textrm{otherwise}. \end{cases} \]
\dfn{Promotion} $\mathrm{Pro}\colon \mathcal{L}(P) \to \mathcal{L}(P)$ is the following composition of these $\tau_i$:
\[ \mathrm{Pro} \coloneqq \tau_{n-1} \circ \tau_{n-2} \circ \cdots \circ \tau_1. \]
\dfn{Evacuation} $\mathrm{Evac}\colon \mathcal{L}(P) \to \mathcal{L}(P)$ is the following composition of the $\tau_i$:
\[ \mathrm{Evac} \coloneqq (\tau_1) \circ (\tau_2 \circ \tau_{1}) \circ \cdots \circ (\tau_{n-2} \circ \cdots \circ \tau_{2} \circ \tau_1) \circ (\tau_{n-1} \circ \cdots \circ \tau_{2} \circ \tau_1).\]
There is a duality $\mathcal{L}(P)\to \mathcal{L}(P^*)$ which sends $L=(p_1,\ldots,p_n)$ to $L^*=(p_n,\ldots,p_1)$. Dual evacuation $\mathrm{Evac}^*\colon \mathcal{L}(P) \to \mathcal{L}(P)$ is defined by $\mathrm{Evac}^*(L) \coloneqq \mathrm{Evac}(L^*)^*$.
The following are the basic results concerning promotion and evacuation established by Sch\"{u}tzenberger~\cite{schutzenberger1972promotion}; see also the presentation of Stanley~\cite{stanley2009promotion}:
\begin{prop}[{Sch\"{u}tzenberger~\cite{schutzenberger1972promotion}}] \label{prop:pro_basics}
For any poset $P$,
\begin{itemize}
\item $\mathrm{Evac}$ and $\mathrm{Evac}^{*}$ are both involutions;
\item $\mathrm{Evac} \circ \mathrm{Pro}= \mathrm{Pro}^{-1} \circ \mathrm{Evac}$;
\item $\mathrm{Pro}^{\#P} = \mathrm{Evac}^{*} \circ \mathrm{Evac}$.
\end{itemize}
\end{prop}
Proposition~\ref{prop:pro_basics} explains why $\mathrm{Pro}^{\#P}$ is the ``right'' power of promotion to look at. As mentioned in Section~\ref{subsec:heuristic}, $\mathrm{Pro}^{\#P}$ is ideally the identity, but we will also see interesting examples where it is a non-identity involutive poset automorphism. Let us remark that if $\mathrm{Pro}^{\#P}$ is a poset automorphism, then it must be an involution. Indeed, suppose $\mathrm{Pro}^{\#P}=\delta$ is an automorphism; then by conjugating $\delta=\mathrm{Evac}^{*}\circ \mathrm{Evac}$ by $\mathrm{Evac}$ we get $\delta=\mathrm{Evac} \circ \mathrm{Evac}^{*}$, since evacuation commutes with any automorphism; in other words, we have $\delta=\delta^{-1}$, as claimed.
\subsection{Models}
We now review models (diagrammatic, algebraic, etc.) for promotion and evacuation for certain families of posets. These models lead to a precise understanding of the order and orbit structure of these operators.
\subsubsection{Rotation of noncrossing matchings and webs}
A noncrossing matching of~$[2n]$ is a partition of $[2n]$ into blocks of size $2$ for which there is no pair of crossing blocks. D.~White observed that promotion of standard Young tableaux of $2 \times n$ rectangular shape corresponds to rotation of noncrossing matchings of $[2n]$ (see for instance~\cite[\S8]{rhoades2010cyclic}).
Webs are a class of planar graphs Kuperberg introduced to study the invariant theory of Lie algebras. Khovanov and Kuperberg~\cite{khovanov1999web} (see also Tymoczko~\cite{tymoczko2012simple}) defined a bijection between standard Young tableaux of $3 \times n$ rectangular shape and a subset of $\mathfrak{sl}_3$-webs. Petersen, Pylyavskyy, and Rhoades \cite{petersen2009promotion} showed that under this bijection, promotion of tableaux corresponds to rotation of webs. Russell~\cite{russell2013explicit} (see also Patrias~\cite{patrias2019promotion}) showed that rotation for a broader class of $\mathfrak{sl}_3$-webs corresponds to promotion of certain \emph{semi}standard tableaux of $3\times n$ rectangular shape (see Section~\ref{subsec:semistandard}). Finally, Hopkins and Rubey~\cite{hopkins2020promotion} showed that linear extensions of~$V(n)$ can be encoded as certain $3$-edge-colored $\mathfrak{sl}_3$-webs for which promotion again corresponds to rotation.
In these correspondences between linear extensions and diagrams it is also possible to show that evacuation corresponds to reflection across a diameter (see, e.g., \cite{patrias2021evacuation}), so that the full dihedral action is apparent.
\subsubsection{Rotation of reduced words and Edelman--Greene-style bijections} \label{subsec:edleman-greene}
Let $W$ be a finite Coxeter group with $\Phi$ its root system. For a reduced word $s_{\alpha_{i_1}}s_{\alpha_{i_2}}\cdots s_{\alpha_{i_p}}$ of the longest word $w_0 \in W$, we define its (twisted) rotation to be $s_{\alpha_{i_2}}\cdots s_{\alpha_{i_p}}s_{\delta(\alpha_{i_1})}$, which is again a reduced word of $w_0$.
For $W$ of coincidental type, there is an equivariant bijection between linear extensions of $\Phi^+$ under promotion and reduced words of $w_0$ under rotation. For Type~A, this is due to Edelman and Greene~\cite{edelman1987balanced}. For Type~B, it was established by Haiman~\cite{haiman1989mixed} and Kra\'{s}kiewicz~\cite{kraskiewicz1989reduced}. For $I_2(\ell)$ and $H_3$ it was shown by Williams~\cite{williams2013cataland}.
We believe that evacuation should correspond to (twisted) reflection of the reduced word under these Edelman--Greene-style bijections, but we know of nowhere in the literature where this is explicitly stated.
\subsubsection{Kazhdan-Lusztig cell representations}
The theory of Kazhdan-Lusztig cells gives a canonical basis for any irreducible representation of the symmetric group~$S_n$. Rhoades~\cite{rhoades2010cyclic} showed that for an irreducible symmetric group representation of rectangular shape, the action of the long cycle $c=(1,2,\ldots,n)$ corresponds to promotion of standard tableaux in the Kazhdan-Lusztig basis. Rhoades's result built on an earlier results of Berenstein--Zelevinsky~\cite{berenstein1996canonical} and Stembridge \cite{stembridge1996canonical}, who showed that for \emph{any} irreducible symmetric group representation, the action of the longest word $w_0=n(n-1)\cdots1$ corresponds (up to sign) to evacuation of standard tableaux in the Kazhdan-Lusztig basis.
\subsubsection{Crystals and cactus group actions} \label{subsec:crystals_pro}
The cactus group action on the tensor product of crystals for simple Lie algebra representations, as defined by Henriques and Kamnitzer~\cite{henriques2006crystals}, gives rise to notions of evacuation and promotion acting on the corresponding highest weight words of weight zero: see~\cite{fontaine2014cyclic, westbury2016invariant, pfannerer2020promotion}. In the case of $V$ being the vector representation of $\mathfrak{sl}_k$, this cactus group promotion for weight zero highest weight words of $V^{\otimes kn}$ corresponds to promotion of standard Young tableaux of $k\times n$ rectangular shape, and similarly for evacuation.
\subsubsection{The Wronski map and monodromy} For a list $f_1(z), f_2(z), \ldots, f_d(z)$ of~$d$ linearly independent polynomials in $\mathbb{C}[z]$ of degree at most $n-1$, their Wronskian is the determinant of the $d\times d$ matrix whose rows are the derivatives $f^{(i)}_j(z)$ of these polynomials for $i=0,\ldots,d-1$. Up to scale, the Wronskian depends only the linear span of the $f_j(z)$. We thus obtain the Wronski map from the Grassmannian $\mathrm{Gr}(d,n)$ to projective space $\mathbb{P}^{n-1}$.
Standard Young tableaux of $d \times (n-d)$ rectangular shape index the fibers of the Wronski map. In~\cite{purbhoo2013wronksians}, Purbhoo showed that a certain monodromy action for the Wronski map corresponds to promotion for these tableaux. Moreover, in~\cite{purbhoo2018marvellous}, Purbhoo showed that by restricting to those preimages which lie in either the orthogonal or Lagrangian Grassmannian (under a certain embedding of these Type~B/C Grassmannians into the usual Type~A Grassmannian), one can similarly obtain the action of promotion on standard tableaux of shifted staircase or staircase shape.
\subsubsection{Evacuation of minuscule posets}
The extension of Sch\"{u}tzenberger's theory of jeu de taquin~\cite{schutzenberger1977correspondance} to cominuscule Schubert calculus due to Thomas and Yong~\cite[Lemma 5.2]{thomas2016cominuscule} implies that evacuation of a minuscule poset has a simple description in terms of its canonical anti-automorphism:
\begin{theorem}(\cite{schutzenberger1977correspondance, thomas2016cominuscule}) \label{thm:min_evac}
For a minuscule poset $P$ and $L\in \mathcal{L}(P)$, we have $\mathrm{Evac}(L) = \iota(L)^*$.
\end{theorem}
\subsubsection{Doppelg\"{a}ngers bijections}
The minuscule doppelg\"{a}nger pairs have the same orbit structure of promotion:
\begin{theorem}[{Haiman~\cite{haiman1989mixed, haiman1992dual}}] \label{thm:doppel_pro}
For $(P,Q)$ a minuscule doppelg\"{a}nger pair, there is a bijection between $\mathcal{L}(P)$ and $\mathcal{L}(Q)$ which commutes with promotion.
\end{theorem}
The bijection for the difficult case of Theorem~\ref{thm:doppel_pro}, the rectangle/trapezoid pair, is due to Haiman~\cite{haiman1989mixed, haiman1992dual}. The other cases are an easy check.
\subsection{Order}
We now review the posets $P$ for which $\mathrm{Pro}^{\#P}$ can be described.
\begin{theorem}[\cite{schutzenberger1977correspondance, edelman1987balanced, haiman1992dual}] \label{thm:pro_shapes}
For $P=R(a,b), T(a,b), DS(n,k)$, $\mathrm{Pro}^{\#P}$ is the identity. For $P=S(n)$, $\mathrm{Pro}^{\#P}$ is transposition.
\end{theorem}
The case of Theorem~\ref{thm:pro_shapes} for the rectangle $R(a,b)$ follows from Sch\"{u}tzenberger's theory of jeu de taquin~\cite{schutzenberger1977correspondance}. The case of the staircase $S(n)$ is due to Edelman--Greene~\cite{edelman1987balanced}. The cases of $T(a,b)$ and $DS(n,k)$ are due to Haiman~\cite{haiman1992dual}, who developed a method which recaptures the rectangle and staircase cases as well. In fact, Haiman and Kim~\cite{haiman1992characterization} showed that the \emph{only} shapes and shifted shapes for which $\mathrm{Pro}^{\#P}$ is the identity or transposition are those appearing in Theorem~\ref{thm:pro_shapes}.
\begin{theorem} \label{thm:min_pro}
For $P$ a minuscule poset, $\mathrm{Pro}^{\#P}$ is the identity.
\end{theorem}
Theorem~\ref{thm:min_pro} follows, for instance, from Theorem~\ref{thm:min_evac}.
\begin{theorem} \label{thm:root_pro}
For $P=\Phi^+$ a root poset of coincidental type, $\mathrm{Pro}^{\#P}=\delta$.
\end{theorem}
Theorem~\ref{thm:root_pro} follows, for instance, from the reduced word bijections (see the discussion in Section~\ref{subsec:edleman-greene}).
\begin{theorem}[{Hopkins--Rubey~\cite{hopkins2020promotion}}]
For $P=V(n)$, $\mathrm{Pro}^{\#P}=\delta$.
\end{theorem}
\subsection{Orbit structure}
We now discuss the orbit structure of promotion for the posets with good promotion behavior.
The cyclic sieving phenomenon of Reiner--Stanton--White~\cite{reiner2004cyclic} provides a very compact way to record the orbit structure of a cyclic group action. Recall that if~$X$ is a combinatorial set, $C=\langle c \rangle$ is a cyclic group of order $n$ acting on $X$, and $f(q) \in \mathbb{N}[q]$ is a polynomial with nonnegative integer coefficients, then we say that the triple $(X,C,f(q))$ exhibits \dfn{cyclic sieving} if for all integers $k$,
\[ \#\{x\in X\colon c^k(x)=x\}=f(q \coloneqq \zeta^k),\]
where $\zeta \coloneqq e^{2\pi i/n}$ is a primitive $n$th root of unity. As mentioned in Section~\ref{subsec:heuristic}, cyclic sieving phenomena (CSPs) where the polynomial has a simple product formula are especially valuable, because they imply a product formula for every symmetry class.
For the minuscule posets there is a beautiful such CSP:
\begin{theorem}[\cite{rhoades2010cyclic, purbhoo2018marvellous, sekheri2014CSP}] \label{thm:min_pro_csp}
Let $P$ be a minuscule poset. Then $(\mathcal{L}(P),\langle \mathrm{Pro} \rangle, f(q))$ exhibits cyclic sieving, where
\[ f(q) \coloneqq \sum_{L \in \mathcal{L}(P)}q^{\mathrm{maj}(L)} = (1-q)(1-q^2)\cdots (1-q^{\#P})\cdot \prod_{p \in P} \frac{1}{(1-q^{r(p)+1})}.\]
\end{theorem}
The case of Theorem~\ref{thm:min_pro_csp} for the rectangle $R(a,b)$ is due to Rhoades~\cite{rhoades2010cyclic}. The exceptional cases $\Lambda_{E_6}, \Lambda_{E_7}$ are a finite check which is easily carried out by computer. The case $D(n)$ is trivial. For the shifted staircase $DS(n,0)$, Purbhoo~\cite[Theorem~5.1(i)]{purbhoo2018marvellous} gave an interpretation of this fixed point count in terms of ribbon tableaux; and in~\cite{sekheri2014CSP} it was verified that this matches the CSP evaluation. It would be preferable to have a uniform proof of Theorem~\ref{thm:min_pro_csp}.
For the other posets with good promotion behavior, we have conjectural CSPs:
\begin{conj} \label{conj:root_pro_csp}
Let $\Phi$ be a rank $n$ root system of coincidental type, $W$ its Coxeter group, and $h$ its Coxeter number. Let $C=\langle c\rangle\simeq \mathbb{Z}/nh\mathbb{Z}$ act on $\mathcal{L}(\Phi^+)$ via $c(L) \coloneqq \mathrm{Pro}(L)$. Then $(\mathcal{L}(\Phi^+),C,f(q))$ exhibits cyclic sieving, where
\[f(q) \coloneqq (1-q^2)(1-q^4)\cdots (1-q^{nh})\cdot \lim_{m\to \infty} \mathrm{Cat}(W,m;q) \in \mathbb{N}[q].\]
\end{conj}
The cases $\Phi=B_n,H_3,I_2(2\ell)$ of Conjecture~\ref{conj:root_pro_csp} follow from Theorems~\ref{thm:doppel_pro} and~\ref{thm:min_pro_csp}. The case $\Phi=I_2(2\ell+1)$ is trivial. Hence, the only open case is $\Phi=A_n$, for which N.~Williams conjectured this CSP in a different but equivalent form c.~2010. Purbhoo~\cite[Theorem~5.1(ii)]{purbhoo2018marvellous} again gave an interpretation of this fixed point count in terms of ribbon tableaux, so possibly this conjecture could be resolved as in~\cite{sekheri2014CSP}.
\begin{conj}[{Hopkins--Rubey~\cite{hopkins2020promotion}}]
For all $n\geq 1$, the rational expression
\[ f(q) \coloneqq \frac{\prod_{i=1}^{3n} (1-q^{2i})}{\prod_{i=2}^{2n+1}(1-q^i)\prod_{i=2}^{n+1} (1-q^{2i}) }\]
is in $\mathbb{N}[q]$, and $(\mathcal{L}(V(n)),\langle \mathrm{Pro} \rangle, f(q))$ exhibits cyclic sieving.
\end{conj}
\begin{conj}
For all $1 \leq k \leq n$, the rational expression
\[ f(q) \coloneqq \frac{\prod_{i=1}^{n(n+1)/2 + k(k+1)/2} (1-q^{i})}{\prod_{1\leq i \leq j \leq n}(1-q^{i+j-1})\prod_{1\leq i
\leq j \leq k} (1-q^{i+j})} \]
is in $\mathbb{N}[q]$, and $(\mathcal{L}(DS(n,k)),\langle \mathrm{Pro} \rangle, f(q))$ exhibits cyclic sieving.
\end{conj}
Finally, we note that for evacuation acting on the linear extensions $\mathcal{L}(P)$ of \emph{any} poset $P$, there is a CSP using the (co)major index generating function~\cite[\S3]{stanley2009promotion}.
\section{Rowmotion of order ideals and \texorpdfstring{$P$}{P}-partitions}
In this section we survey the posets which have good behavior of rowmotion.
\subsection{Definitions and basics} \label{sec:row_basics}
Let $P$ be a poset. As discussed in Section~\ref{sec:intro}, rowmotion was originally defined as an action on $\mathcal{J}(P)$. But, following~\cite{einstein2013combinatorial}, we will right away define it as a piecewise-linear action on $\mathcal{PP}^{m}(P)$ for any $m$. For $p\in P$ we define the \dfn{(piecewise-linear) toggle} $\tau_p\colon \mathcal{PP}^{m}(P) \to \mathcal{PP}^{m}(P)$ by
\[\tau_p(\pi)(q) \coloneqq \begin{cases} \pi(q) &\textrm{if $p\neq q$}; \\ \min(\{\pi(r)\colon p \lessdot r\}) + \max(\{\pi(r)\colon r\lessdot p\}) - \pi(p) &\textrm{if $p=q$},\end{cases}\]
where $\min(\varnothing) \coloneqq m$ and $\max(\varnothing) \coloneqq 0$. \dfn{Rowmotion} $\mathrm{Row}\colon \mathcal{PP}^{m}(P)\to \mathcal{PP}^{m}(P)$ is then the following composition of these $\tau_i$:
\[\mathrm{Row} \coloneqq \tau_{p_1}\circ \tau_{p_2} \circ \cdots \circ \tau_{p_n},\]
where $(p_1,\ldots,p_n)$ is any linear extension of $P$. The traditional case of order ideal rowmotion is recovered via the identification $\mathcal{J}(P)\simeq\mathcal{PP}^{1}(P)$.
Rowmotion only ever has good behavior when $P$ is graded (indeed, the name ``rowmotion'' indicates toggling ``row-by-row,'' i.e., ``rank-by-rank''). So from now on assume $P$ is graded and $P_0, P_1, \ldots, P_{r(P)}$ are its ranks. Define $\tau_i \coloneqq \prod_{p\in P_i}\tau_p$ for $i=0,\ldots,r(P)$ (these toggles all commute, so this product makes sense). Observe that $\mathrm{Row} = \tau_{0} \circ \cdots \circ \tau_{r(P)-1} \circ \tau_{r(P)}$. In analogy with promotion/evacuation, let us then define \dfn{rowvacuation} $\mathrm{Rvac}\colon \mathcal{PP}^{m}(P)\to \mathcal{PP}^{m}(P)$ by
\[\mathrm{Rvac} \coloneqq (\tau_{r(P)}) \circ (\tau_{r(P)-1} \circ \tau_{r(P)}) \circ \cdots \circ (\tau_{1} \circ \cdots \circ \tau_{r(P)-1} \circ \tau_{r(P)}) \circ (\tau_{0} \circ \cdots \circ \tau_{r(P)-1} \circ \tau_{r(P)}) \]
There is a duality $\mathcal{PP}^{m}(P) \to \mathcal{PP}^{m}(P^*)$ given by $\pi^{*}(p) \coloneqq m-\pi(p)$. Dual rowvacuation $\mathrm{Rvac}^{*}\colon \mathcal{PP}^{m}\to \mathcal{PP}^{m}(P)$ is defined by $\mathrm{Rvac}^{*}(\pi) \coloneqq \mathrm{Rvac}(\pi^*)^*$.
The arguments from~\cite{stanley2009promotion} which establish Proposition~\ref{prop:pro_basics} are very formal (they only use that the~$\tau_i$ are involutions and that $\tau_i$ and $\tau_j$ commute if $|i-j|\geq 2$) and apply here as well, so that we have:
\begin{prop} \label{prop:row_basics}
For any graded poset $P$,
\begin{itemize}
\item $\mathrm{Rvac}$ and $\mathrm{Rvac}^{*}$ are both involutions;
\item $\mathrm{Rvac} \circ \mathrm{Row}= \mathrm{Row}^{-1} \circ \mathrm{Rvac}$;
\item $\mathrm{Row}^{r(P)+2} = \mathrm{Rvac}^{*} \circ \mathrm{Rvac}$.
\end{itemize}
\end{prop}
Proposition~\ref{prop:row_basics} explains why $\mathrm{Row}^{r(P)+2}$ is the ``right'' power of rowmotion to look at. Again, ideally $\mathrm{Row}^{r(P)+2}$ is the identity, but in some interesting cases it is an involutive poset automorphism. The same argument as for promotion shows that if $\mathrm{Row}^{r(P)+2}$ is a poset automorphism, it must be an involution.
{\bf N.B.}: often ``left-to-right'' toggling of order ideals or $P$-partitions is studied, as opposed to the ``top-to-bottom'' toggling of rowmotion. This left-to-right toggling is routinely called ``promotion,'' but to avoid confusion with promotion of linear extensions, we will not use that term. In practice, the techniques of~\cite{striker2012promotion} can always be used to show that left-to-right toggling is conjugate to top-to-bottom toggling.
\subsection{Models}
We now review models for rowmotion and rowvacuation for certain families of posets. As with the models for promotion and evacuation, these models give us a precise understanding of the order and orbit structure.
\subsubsection{Rotation of binary words, and parabolic cosets of Weyl groups}
Under the ``Stanley-Thomas word'' bijection (see~\cite{stanley2009promotion, propp2015homomesy}), rowmotion of $\mathcal{J}(R(a,b))$ corresponds to rotation of binary words with $a$ $1$'s and $b$ $0$'s. Extending this description, Rush and Shi~\cite{rush2013orbits} showed that if $P$ is the minuscule poset corresponding to the minuscule weight~$\lambda$, then under the natural isomorphism $\mathcal{J}(P) \simeq W/W_{J}$, where $W_J$ is the parabolic subgroup of the Weyl group $W$ stabilizing $\lambda$, the action of rowmotion is conjugate to the action of a Coxeter element $c\in W$.
\subsubsection{Kreweras complementation for noncrossing partitions, in all types}
A noncrossing partition of $[n]$ is a set partition of $[n]$ for which there is no pair of crossing blocks. The noncrossing partitions form a lattice, and the Kreweras complementation is an operator acting on this lattice which has order $2n$ (its square is rotation).
Now let $W$ be a finite Coxeter group and $\Phi$ its root system. By analogy, the noncrossing partitions for $W$ are the elements less than some fixed Coxeter element $c\in W$ in absolute order, and Kreweras complementation is the operator $w\mapsto cw^{-1}$ on these noncrossing partitions. Proving conjectures of Panyushev~\cite{panyushev2009orbits} and Bessis--Reiner~\cite{bessis2011cyclic}, Amstrong, Stump, and Thomas~\cite{armstrong2013uniform} showed that when $W$ is a Weyl group, rowmotion of $\mathcal{J}(\Phi^+)$ is in equivariant bijection with Kreweras complementation of the noncrossing partitions of $W$ (and for non-crystallographic types, see~\cite{cuntz2015root}).
In~\cite{defant2021symmetry}, rowvacuation of $\mathcal{J}(\Phi^+)$ is studied. In particular, it is shown there that the bijection of Amstrong--Stump--Thomas between $\mathcal{J}(\Phi^+)$ and the noncrossing partitions of $W$ transports the action of $\mathrm{Row}^{-1} \circ \mathrm{Rvac}$ to the involution $w \mapsto g w^{-1} g^{-1}$ (for the appropriate involution $g \in W$ depending on $c$). For the diagrammatic models of noncrossing partitions, this involution is a reflection across a diameter. In~\cite{hopkins2020birational} it is shown that, in Type~A, rowvacuation of $\mathcal{J}(\Phi^+)$ is the same as the ``Lalanne--Kreweras involution'' on Dyck paths.
\subsubsection{Promotion and evacuation of semistandard Young tableaux} \label{subsec:semistandard}
A semistandard Young tableau (SSYT) of shape $\lambda$ is a filling of the boxes of $\lambda$ with positive integers that is weakly increasing in rows and strictly increasing in columns. Let $\mathrm{SSYT}(\lambda,k)$ denote the set of semistandard tableaux of shape~$\lambda$ with entries at most $k$. For $i=1,\ldots,k-1$, the $i$th Bender--Knuth involution is an operator on $\mathrm{SSYT}(\lambda,k)$ which exchanges the number of $i$'s and $(i+1)$'s in a tableau. Promotion and evacuation can then be defined as operators on $\mathrm{SSYT}(\lambda,k)$ which are the appropriate compositions of these Bender--Knuth involutions.
Promotion and evacuation of semistandard Young tableaux are well-studied actions, with several algebraic guises (e.g., see Section~\ref{sec:ssyt_canonical} below). In particular, promotion of SSYTs of rectangular shape is used in the combinatorial definition of affine Type~A crystals, or more precisely, the so-called ``Kirillov-Reshetikhin crystals;'' see~\cite{shimozono2002affine, bandlow2010uniqueness}. In some sense promotion corresponds to the cyclic symmetry of the affine Dynkin diagram.
It is known that rowmotion acting on $\mathcal{PP}^{m}(R(a,b))$ is in equivariant bijection with promotion acting on $\mathrm{SSYT}(m^a,a+b)$ (see, e.g.,~\cite{kirillov1995groups} and~\cite[Appendix A]{hopkins2019cyclic}). And likewise for rowvacuation and evacuation.
\subsubsection{Canonical bases, from quantum groups and cluster algebras} \label{sec:ssyt_canonical}
The theory of quantum groups gives canonical bases for Lie group representations. Rhoades \cite{rhoades2010cyclic} (see also~\cite{rush2020restriction}) showed that for an irreducible representation of the general linear group of rectangular shape, the action of the long cycle on the dual canonical basis corresponds to the action of promotion on rectangular SSYTs. Again, Rhoades's result built on an earlier results of Berenstein--Zelevinsky~\cite{berenstein1996canonical} and Stembridge~\cite{stembridge1996canonical}, who showed that for \emph{any} irreducible representation of the general linear group, the action of the longest word corresponds (up to sign) to evacuation of tableaux in the dual canonical basis. Thanks to the discussion in Section~\ref{subsec:semistandard} above, this means that these actions are conjugate to romotion and rowvacuation of $\mathcal{PP}^{m}(R(a,b))$.
The theory of cluster algebras also gives canonical bases. Shen and Weng \cite{shen2018cyclic} showed that the action of the cyclic shift on the theta basis of the coordinate ring of the Grassmannian -- which is a canonical basis coming from its structure as a cluster algebra -- is also conjugate to the action of rowmotion on $\mathcal{PP}^{m}(R(a,b))$.
Recently, Gao, Lam, and Xu~\cite{gao022electrical} introduced the grove algebra, a version of the coordinate ring of the Lagrangian Grassmannian. They conjectured the existence of an electrical canonical basis for this grove algebra, which comes with a cyclic action. We believe that this cyclic action on electrical canonical basis elements should be in equivariant bijection with rowmotion of $\mathcal{PP}^{m}(\Phi^+(A_n))$.
\subsubsection{Crystals and cactus group actions} \label{sec:crystals}
Recall the cactus group promotion of highest weight words of weight zero for tensor products of crystals discussed in Section~\ref{subsec:crystals_pro}. As explained in~\cite[Example 2.4]{pfannerer2020promotion}, for tensor products of the spin representation of the spin group, these weight zero highest weight words correspond to fans of Dyck paths, which are in bijection with $\mathcal{PP}^{m}(\Phi^+(A_n))$. Moreover, using the techniques of~\cite{pfannerer2020promotion} it can be shown that cactus group promotion of these words is in equivariant bijection with rowmotion of $\mathcal{PP}^{m}(\Phi^+(A_n))$, and similarly for evacuation and rowvacuation. Promotion of fans of Dyck paths is studied in detail in the recent paper~\cite{pappe2022promotion}.
\subsubsection{Quiver representations and reflection functors}
In~\cite{garver2018minuscule}, Garver, Patrias, and Thomas studied minuscule posets and their $P$-partitions from the perspective of quiver representations. Fixing a quiver $Q$ whose underlying graph is a Dynkin diagram, and a node $i$ of this Dynkin diagram corresponding to a minuscule weight~$\omega_i$, they showed that the Jordan form of a generic nilpotent endomorphism gives a bijection from representations $X$ of $Q$ with support at $i$ to $P$-partitions for the corresponding minuscule poset~$P$. Moreover, they described the piecewise-linear toggles in terms of reflection functors. In this way, they were able to analyze rowmotion for $P$-partitions of minuscule posets using quiver representations.
\subsubsection{Rowvacuation of minuscule posets}
Rowvacuation of a minuscule poset has a simple description in terms of its canonical anti-automorphism:
\begin{theorem}[{\cite{grinberg2015birational2, okada2020birational}}] \label{thm:min_rvac}
For a minuscule poset $P$ and $\pi \in \mathcal{PP}^{m}(P)$, we have $\mathrm{Rvac}(\pi) = \iota(\pi)^*$.
\end{theorem}
The case $P=R(a,b)$ of Theorem~\ref{thm:min_rvac} was proved by Grinberg--Roby~\cite{grinberg2015birational2}; the rest of the theorem was proved, in a case-by-case manner, by Okada~\cite{okada2020birational}. Those authors described their results in terms of ``reciprocity'' of rowmotion, but it is easy to translate their results to this statement about rowvacuation.
\subsubsection{Doppelg\"{a}ngers bijections}
We conjectured that minuscule doppelg\"{a}nger pairs have the same orbit structure of rowmotion:
\begin{conj}[{\cite{hopkins2019minuscule}}] \label{conj:doppel_row}
For $(P,Q)$ a minuscule doppelg\"{a}nger pair, there is a bijection between $\mathcal{PP}^{m}(P)$ and $\mathcal{PP}^{m}(Q)$ which commutes with rowmotion.
\end{conj}
The case $m=1$ of Conjecture~\ref{conj:doppel_row} was proved in~\cite{dao2019trapezoid}, using the bijection of~\cite{hamaker2018doppelgangers}.
\subsubsection{Symmetry classes of plane partitions}
Grinberg and Roby~\cite{grinberg2015birational2} explained how rowmotion for the three ``triangular'' posets $DS(n,0)$, $\Phi^+(A_n)$, and $\Phi^+(B_n)$ can be understood by imposing symmetries on the rectangle:
\begin{lemma}[{Grinberg-Roby~\cite{grinberg2015birational2}; see also~\cite[\S5]{hopkins2019cyclic}}] \label{lem:row_syms}
\begin{itemize}
\item There is a $\mathrm{Row}$-equivariant bijection between $\mathcal{PP}^{m}(DS(n,0))$ and the subset $\pi \in \mathcal{PP}^{m}(n\times n)$ with $\mathrm{Tr}(\pi)=\pi$.
\item There is a $\mathrm{Row}$-equivariant bijection between $\mathcal{PP}^{m}(\Phi^+(A_n))$ and the subset of $\pi \in \mathcal{PP}^{2m}((n+1)\times (n+1))$ with $\mathrm{Row}^{n+1}(\pi)=\mathrm{Tr}(\pi)$.
\item There is a $\mathrm{Row}$-equivariant bijection between $\mathcal{PP}^{m}(\Phi^+(B_n))$ and the subset of $\pi \in \mathcal{PP}^{2m}(2n \times 2n)$ with $\mathrm{Tr}(\pi)=\pi$ and $\mathrm{Row}^{2n}(\pi)=\pi$.
\end{itemize}
\end{lemma}
We note that a similar ``triangle-into-rectangle'' embedding, but for linear extensions rather than $P$-partitions, was studied by Pon and Wang~\cite{pon2011promotion}.
\subsection{Order}
We now review the posets $P$ for which $\mathrm{Row}^{r(P)+2}$ acting on $\mathcal{PP}^{m}(P)$ can be described.
\begin{theorem}[\cite{grinberg2015birational2,garver2018minuscule,okada2020birational}] \label{thm:min_row}
For $P$ a minuscule poset, we have that $\mathrm{Row}^{r(P)+2}$ is the identity.
\end{theorem}
\begin{theorem}[\cite{grinberg2015birational2}] \label{thm:root_row}
For $P=\Phi^+$ a root poset of coincidental type, we have $\mathrm{Row}^{r(P)+2}=\delta$.
\end{theorem}
Theorems~\ref{thm:min_row} and~\ref{thm:root_row} were essentially proved, in a case-by-case fashion, by Grinberg--Roby~\cite{grinberg2015birational2}; the only case they could not address was $\Lambda_{E_7}$, which was resolved in~\cite{garver2018minuscule} and~\cite{okada2020birational}.
Conjecture~\ref{conj:doppel_row} and Theorem~\ref{thm:min_row} together imply that the trapezoid should have $\mathrm{Row}^{r(P)+2}$ equal to the identity (this was also conjecture by N.~Williams~\cite[Conjecture~75]{grinberg2015birational2}). But beyond the case $m=1$ this remains open.
The only other poset which apparently has good (piecewise-linear) rowmotion behavior is $V(n)$:
\begin{conj}
For $P=V(n)$, we have $\mathrm{Row}^{r(P)+2}=\delta$.
\end{conj}
\subsection{Orbit structure}
There are very nice conjectural CSPs for all the posets with good $P$-partition rowmotion behavior:
\begin{conj}[{\cite{hopkins2019minuscule}}] \label{conj:min_row_csp}
Let $P$ be a minuscule poset. Then the triple $(\mathcal{PP}^{m}(P),\langle \mathrm{Row} \rangle,F_P(m;q))$ exhibits cyclic sieving.
\end{conj}
\begin{conj}[{\cite{hopkins2019minuscule}}] \label{conj:root_row_csp}
Let $\Phi$ be a root system of coincidental type, $W$ its Coxeter group, and $h$ its Coxeter number. Let $C=\langle c\rangle\simeq \mathbb{Z}/2h\mathbb{Z}$ act on $\mathcal{PP}^{m}(\Phi^+)$ via $c(\pi) \coloneqq \mathrm{Row}(\pi)$. Then $(\mathcal{PP}^{m}(\Phi^+),C,\mathrm{Cat}(W,m;q))$ exhibits cyclic sieving.
\end{conj}
The case $m=1$ of Conjecture~\ref{conj:min_row_csp} was proved by Rush--Shi~\cite{rush2013orbits}. The case $m=1$ of Conjecture~\ref{conj:root_row_csp} was proved by Armstrong--Stump--Thomas~\cite{armstrong2013uniform} (and in fact they showed this for \emph{any} root system; see also~\cite{cuntz2015root}). The case of Conjecture~\ref{conj:min_row_csp} for the rectangle $R(a,b)$ was proved by Rhoades~\cite{rhoades2010cyclic} (and later, Shen--Weng~\cite{shen2018cyclic}). All other cases of these conjectures are open. We note the Type A case of Conjecture~\ref{conj:root_row_csp} was conjectured also by J.~Propp c.~2016.
Via Lemma~\ref{lem:row_syms}, the main remaining cases of Conjectures~\ref{conj:min_row_csp} and~\ref{conj:root_row_csp} can be translated into statements about the numbers of fixed points of various subgroups of $\langle \mathrm{Row}, \mathrm{Tr}\rangle$ acting on $\mathcal{PP}^{m}(n\times n)$ being counted by CSP-type evaluations. In~\cite{hopkins2019cyclic}, it was shown, building off the work of Rhoades~\cite{rhoades2010cyclic}, that the number of fixed points of any \emph{element} of $\langle \mathrm{Row}, \mathrm{Tr}\rangle$ acting on $\mathcal{PP}^{m}(n\times n)$ is given by a CSP-type evaluation of polynomial with a nice product formula.
Conjectures~\ref{conj:doppel_row} and~\ref{conj:min_row_csp} together describe the orbit structure of rowmotion for minuscule doppelg\"{a}ngers (and their claims are consistent with Conjecture~\ref{conj:root_row_csp}).
Finally, for $V(n)$ we conjecture:
\begin{conj}
For all $n,m \geq 1$, the rational expression
\[ f(q) \coloneqq \prod_{i=2}^{2n+1} \frac{(1-q^{2m+i})}{(1-q^i)} \cdot \prod_{i=2}^{n+1}\frac{(1-q^{2m+2i})}{(1-q^{2i})}\]
is in $\mathbb{N}[q]$, and $(\mathcal{PP}^{m}(V(n)),\langle \mathrm{Pro} \rangle, f(q))$ exhibits cyclic sieving.
\end{conj}
{}
\end{document} |
\begin{document}
\title{Continuous dynamical protection of two-qubit entanglement from uncorrelated dephasing, bit flipping, and dissipation}
\author{F. F. Fanchini}
\email{felipe@ifsc.usp.br}
\author{R. d. J. Napolitano}
\affiliation{Instituto de F\'{\i}sica de S\~{a}o Carlos, Universidade de S\~{a}o Paulo, Caixa Postal 369, 13560-970, S\~{a}o Carlos, SP, Brazil}
\date{\today}
\begin{abstract}
We show that a simple arrangement of external fields, consisting of a static component and an orthogonal rotating component, can continuously decouple a two-qubit entangled state from uncorrelated dephasing, bit flipping, and dissipation at finite temperature. We consider a situation where an entangled state shared between two non-interacting qubits is initially prepared and left evolve under the environmental perturbations and the protection of external fields. To illustrate the protection of the entanglement, we solve numerically a master equation in the Born approximation, considering independent boson fields at the same temperature coupled to the different error agents of each qubit.
\end{abstract}
\pacs{03.67.Pp, 03.67.Lx, 03.67.-a, 03.65.Yz}
\maketitle
\section{Introduction}
Entanglement of qubits is a fundamental resource of quantum information processing \cite{nielsen00}. A pure entangled state, initially prepared as a superposition of factorized states, inevitably decoheres under environmental noise \cite{zurek91} and could even disentangle completely within a finite time interval \cite{diosi03}. Consequently, any potentially successful design of a device intended to perform quantum computation must anticipate ways to preserve qubit entanglement.
There are very sophisticated methods developed to protect quantum states, like error-correcting codes \cite{shor95} and strategies based on decoherence-free subspaces and subsystems \cite{zanardi97}, where logical qubits do not necessarily coincide with physical qubits. This usually requires that the quantum information corresponding to $N$ logical qubits be stored in more than $N$ physical qubits. It is also possible to protect the quantum information stored directly in physical qubits by using the method of dynamical decoupling \cite{viola98}. We expect that a fully functional quantum computer will use all these protecting tools in a coordinated way for maximum efficiency and fidelity. Here, due to its relative simplicity, we focus on the continuous version of dynamical decoupling \cite{romero04,fanchini07} and show that it can protect a two-qubit entangled state at finite temperature from uncorrelated dephasing, bit flipping, and dissipation.
The paper is organized as follows. Section II presents the total Hamiltonian describing the qubit system and the rest of the universe. We also introduce the general method of dynamical decoupling employed to derive the simple control fields. The master equation for the evolution of the two-qubit density matrix is derived in Sec. III. The description of the environmental errors is given in Sec. IV, where we demonstrate the efficacy of the continuous dynamical decoupling in protecting entanglement. Finally, we conclude in Sec. V.
\section{The total Hamiltonian}
The Hamiltonian for the two-qubit system and the environment is written as
\begin{eqnarray}
H(t)=H_{0}(t)+H_{E}+H_{\rm int},\label{H}
\end{eqnarray}
where $H_{0}(t)$ is the qubit-system Hamiltonian, including terms describing the action of the control fields, $H_{E}$ is the environmental Hamiltonian, and $H_{\rm int}$ is the term representing the interaction between the qubits and their surroundings. The general prescription for dynamical decoupling \cite{viola98,facchi05} consists of finding a control Hamiltonian, acting only on the qubit system, such that its corresponding unitary evolution operator $U_{c}(t)$ is periodic and
\begin{eqnarray}
\int ^{t_{c}} _{0} U^{\dagger}_{c}(t)H_{\rm int}U_{c}(t) dt=0,\label{integral}
\end{eqnarray}
where $t_{c}$ is the period of $U_{c}(t)$. In the following we explain a possible strategy to find a combination of static and simple oscillating external fields leading to a periodic unitary operator $U_{c}(t)$ satisfying Eq. (\ref{integral}).
The interaction Hamiltonian $H_{\rm int}$ describing two qubits coupled to their surroundings is written as
\begin{eqnarray}
H_{\rm int}={\bf B}_{1}\cdot {\bm \sigma}_{1}+{\bf B}_{2}\cdot {\bm \sigma}_{2},\label{Hint}
\end{eqnarray}
where ${\bf B}_{k}=\sum _{m=1} ^{3}B_{k,m}{\bf \hat{x}}_{m}$, for $k=1,2$, with ${\bf \hat{x}}_{1}\equiv{\bf \hat{x}}$, ${\bf \hat{x}}_{2}\equiv{\bf \hat{y}}$, ${\bf \hat{x}}_{3}\equiv{\bf \hat{z}}$, and $B_{k,m}$, for $k=1,2$ and $m=1,2,3$, are operators that act on the environmental Hilbert space. Here, for $k=1,2$, ${\bm \sigma}_{k}={\bf \hat{x}}\sigma_{k,x}+{\bf \hat{y}}\sigma_{k,y}+{\bf \hat{z}}\sigma_{k,z}$, and $\sigma_{k,x}$, $\sigma_{k,y}$, and $\sigma_{k,z}$ are the Pauli matrices acting on qubit $k$. The unitary operator $\exp(-2in_{x}\pi t \sigma_{1,x}/t_{c})$ is periodic with period $t_c$ for any integer $n_{x}\neq 0$. If we identify this operator with $U_{c}(t)$ in Eq. (\ref{integral}) and take $H_{\rm int}$ as given by Eq. (\ref{Hint}), the integration does not give zero in general, but eliminates all terms proportional to $\sigma_{1,y}$ and $\sigma_{1,z}$. If, to replace $U_{c}(t)$ in Eq. (\ref{integral}), we consider the operator $\exp(-2in_{x}\pi t \sigma_{1,x}/t_{c})\exp(-2in_{z}\pi t \sigma_{1,z}/t_{c})$ instead, where $n_{x}$ and $n_{z}$ are non-zero integers, then the integration eliminates all terms proportional to ${\bm \sigma}_{1}$ if $n_{x}\neq n_{z}$, there remaining only the term $t_{c}{\bf B}_{2}\cdot {\bm \sigma}_{2}$. Hence, to satisfy Eq. (\ref{integral}), we choose
\begin{eqnarray}
U_{c}(t)=U_{2}(t)U_{1}(t)=U_{1}(t)U_{2}(t),\label{Uc}
\end{eqnarray}
since ${\bm \sigma}_{1}$ and ${\bm \sigma}_{2}$ commute, where
\begin{eqnarray}
U_{k}(t)=\exp\left(-i \frac{2n_{x}\pi t}{t_{c}}\sigma_{k,x}\right)\exp\left(-i \frac{2n_{z}\pi t}{t_{c}}\sigma_{k,z}\right),\label{Uk}
\end{eqnarray}
for $k=1,2$. Hence, if the control Hamiltonian is written as $H_{c}(t)={\bm \Omega}(t)\cdot\left( {\bm \sigma}_{1}+{\bm \sigma}_{2}\right)$, then Eqs. (\ref{Uc}) and (\ref{Uk}) imply the very simple external-field configuration of our previous work on single-qubit operations, Ref. \cite{fanchini07}:
\begin{eqnarray}
{\bm \Omega}(t)={\bf \hat{x}}n_{x}\omega +n_{z}\omega \left[{\bf \hat{z}} \cos\left( n_{x}\omega t \right)-{\bf \hat{y}} \sin\left( n_{x}\omega t \right) \right] ,\label{Omega}
\end{eqnarray}
where $\omega =2\pi/t_{c}$.
To study the protection of entanglement against environmental sources of noise, we focus on a situation where the two qubits are prepared in an entangled pure state at time $t=0$. We further assume that the qubits do not interact with each other and, if they could be isolated from the rest of the universe, their non-local state would remain unchanged. Thus, let us take $H_{q}=\omega_{0}(\sigma_{1,z}+\sigma_{2,z})$ as the unperturbed two-qubit Hamiltonian, using units for which $\hbar =1$. Let $\tau$ be the time interval during which we intend to preserve the entanglement. We then choose $t_{c}=\tau /N$, where $N$ is an integer, so that $U_{k}(\tau )=I$, for $k=1,2$.
Equation (\ref{H}) gives the Hamiltonian for the evolution of the ket $\left|\Psi (t)\right\rangle $ representing the joint state of the qubits and their environments. For the sake of simplicity, we assume there is a global, static control field along the $z$ axis chosen to cancel $H_{q}$ exactly. Hence, the remaining terms of $H_{0}(t)$ represent the action of additional control fields. We identify the evolution dictated by $H_{0}(t)$ with the action of the unitary operator $U_{c}(t)$ of Eqs. (\ref{Uc}) and (\ref{Uk}).
Here we show that the very simple field configuration of Eq. (\ref{Omega}) can also prevent a two-qubit entangled state from disentangling due to uncorrelated dephasing, bit flipping, and dissipation at finite temperature. We emphasize that Eq. (\ref{Omega}) is a simple combination of a static field along the $x$ axis and a rotating field in the $yz$ plane. Moreover, addressing each qubit independently is not necessary; the field is supposed to be spatially uniform in the neighborhood surrounding both qubits.
\section{The master equation}
In the interaction picture, the Hamiltonian is given by
\begin{eqnarray}
H_{I}(t)=\sum _{k=1} ^{2}\sum _{m=1} ^{3}U^{\dagger}_{E}(t)B_{k,m}U_{E}(t) U^{\dagger}_{c} (t)\sigma_{k,m}U_{c} (t),\label{HI}
\end{eqnarray}
where $\sigma_{k,1}\equiv \sigma_{k,x}$, $\sigma_{k,2}\equiv \sigma_{k,y}$, $\sigma_{k,3}\equiv \sigma_{k,z}$, $U_{E}(t)=\exp(-iH_{E}t)$, and we have used Eq. (\ref{Hint}). The quantities $U^{\dagger}_{c} (t)\sigma_{k,m}U_{c} (t)$, for $k=1,2$ and $m=1,2,3$, are rotations of $\sigma_{k,m}$, whose matrix elements, $R_{m,n}(t)$, are real functions of time:
\begin{eqnarray}
U^{\dagger}_{c} (t)\sigma_{k,m}U_{c} (t)= \sum_{n =1}^{3} R_{m,n}(t)\sigma_{k,n}. \label{rot}
\end{eqnarray}
If we define the operators $E_{k,m}(t)=U^{\dagger}_{E}(t)B_{k,m}U_{E}(t)$, for $k=1,2$ and $m=1,2,3$, and use Eqs. (\ref{HI}) and (\ref{rot}), then the interaction Hamiltonian becomes
\begin{eqnarray}
H_{I}(t)=\sum _{k=1} ^{2}\sum _{m=1} ^{3}\sum_{n =1}^{3}R_{m,n}(t)E_{k,m}(t) \sigma_{k,n}.\label{HI2}
\end{eqnarray}
In the interaction picture, the Redfield master equation describing the temporal evolution of the two-qubit reduced density matrix, $\rho _{I}(t)$, is written as \cite{breuer02}:
\begin{eqnarray}
\frac{d\rho _{I}(t)}{dt}=-\int^{t}_{0}dt^{\prime} {\rm Tr}_{E}\left\{{\left[H_{I}(t),\left[H_{I}(t^{\prime}),\rho _{E}\rho _{I}(t)\right]\right]}\right\},\label{master}
\end{eqnarray}
where we have assumed the noise is low enough that the Born approximation is valid. We also notice that Eq. (\ref{master}) is not a Markovian master equation, since the dynamical-decoupling process occurs in a time scale shorter than the environmental correlation time; this is the reason we keep $t$ as the upper limit of the integral on the right-hand side of Eq. (\ref{master}) (cf. p. 132 of Ref. \cite{breuer02}). Here, $\rho _{E}$ is the initial environmental density matrix, $\rho _{E}=\exp(-\beta H_{E})/Z$, where $Z$ is the partition function given by $Z={\rm Tr}_{E}\left[\exp(-\beta H_{E})\right]$, $\beta =1/k_{B}T$, $k_{B}$ is the Boltzmann constant, and $T$ is the absolute temperature, assumed to be the same in the surroundings of both qubits. By substituting Eq. (\ref{HI2}) into Eq. (\ref{master}) we encounter the quantities ${\rm Tr}_{E}\left[ E_{k,m}(t)\rho_{E}E_{k^{\prime},m^{\prime}}(t^{\prime})\right] $, for $k,k^{\prime}=1,2$ and $m,m^{\prime}=1,2,3$. To illustrate our methodology in a simple manner, we suppose that the reservoir operators at the position of one qubit are uncorrelated with the reservoir operators at the position of the other. Moreover, we also assume the qubits and their respective environments are identical. Thus, we can write ${\rm Tr}_{E}\left[ E_{k,m}(t)\rho_{E}E_{k^{\prime},m^{\prime}}(t^{\prime})\right] =\delta_{k,k^{\prime}}C_{m,m^{\prime}}(t,t^{\prime})$, for $k,k^{\prime}=1,2$ and $m,m^{\prime}=1,2,3$, where $C_{m,m^{\prime}}(t,t^{\prime})$ is the correlation function between components $m$ and $m^{\prime}$ of environmental field operators calculated at the same qubit position. We, thus, define the quantities
\begin{eqnarray}
D_{p,q}(t)=\sum_{m,n=1}^{3}R_{m,p}(t)\int _{0}^{t}dt^{\prime}R_{n,q}(t^{\prime})C_{m,n}(t,t^{\prime}),\label{Dab}
\end{eqnarray}
for $p,q=1,2,3$. Hence, the master equation now becomes
\begin{eqnarray}
\frac{d\rho_{I}(t)}{dt}=\sum_{k=1}^{2}\sum_{p,q=1}^{3}D_{p,q}(t)\left[ \sigma _{k,p},\rho _{I}(t)\sigma _{k,q}\right] \nonumber \\
+\sum_{k=1}^{2}\sum_{p,q=1}^{3}D^{\star}_{p,q}(t)\left[ \sigma _{k,q}\rho _{I}(t),\sigma _{k,p}\right],\label{master2}
\end{eqnarray}
where we have assumed the environmental fields are Hermitian.
\section{Continuous dynamical decoupling}
To solve Eq. (\ref{master2}) we need $D_{p,q}(t)$, Eq. (\ref{Dab}), and, therefore, we must calculate the correlation functions $C_{m,n}(t,t^{\prime})$, for $m,n=1,2,3$. Here, as stated above, we consider independent dephasing, bit flipping, and dissipation. Associated with these errors, we introduce six independent boson fields, three at each qubit position, all of them at the same finite temperature $T$. Accordingly, the terms appearing in Eq. (\ref{Hint}) can be written as
\begin{eqnarray}
{\bf B}_{k}\cdot {\bm \sigma}_{k}=\sigma _{k,z}\sum _{\lambda}\left[ g _{1,\lambda}a_{k,\lambda} +g ^{\star}_{1,\lambda}a^{\dagger}_{k,\lambda}\right]\nonumber \\
+\sigma _{k,x}\sum _{\lambda}\left[ g _{2,\lambda}b_{k,\lambda} +g ^{\star}_{2,\lambda}b^{\dagger}_{k,\lambda}\right]\nonumber \\
+\left(\sigma _{k,x}+i\sigma _{k,y}\right) \sum _{\lambda}g _{3,\lambda}c_{k,\lambda} \nonumber \\
+\left(\sigma _{k,x}-i\sigma _{k,y}\right) \sum _{\lambda}g ^{\star}_{3,\lambda}c^{\dagger}_{k,\lambda} ,\label{baths}
\end{eqnarray}
for $k=1,2$, where $g _{1,\lambda}$, $g _{2,\lambda}$, and $g _{3,\lambda}$ are complex coupling constants that do not depend on the qubit-position index $k$, reflecting the fact that the qubits are surrounded by identical environments, $a_{k,\lambda}$, $b_{k,\lambda}$, and $c_{k,\lambda}$ are, respectively, the annihilation operators for mode $\lambda$ of the boson field associated with dephasing, bit flipping, and dissipation, with respective creation operators $a^{\dagger}_{k,\lambda}$, $b^{\dagger}_{k,\lambda}$, and $c^{\dagger}_{k,\lambda}$. The field operators depend on $k$, since, although identical, the qubit environments are uncorrelated. The only non-zero commutators of these operators are: $[a_{k,\lambda},a^{\dagger}_{k,\lambda}]=1$, $[b_{k,\lambda},b^{\dagger}_{k,\lambda}]=1$, and $[c_{k,\lambda},c^{\dagger}_{k,\lambda}]=1$, for $k=1,2$, and all $\lambda$. Therefore, we take the environmental Hamiltonian as given by $H_{E}=\sum_{k=1}^{2}\sum_{\lambda}\left[ \omega _{1,\lambda}a^{\dagger}_{k,\lambda}a_{k,\lambda}+\omega _{2,\lambda}b^{\dagger}_{k,\lambda}b_{k,\lambda} + \omega _{3,\lambda}c^{\dagger}_{k,\lambda}c_{k,\lambda}\right] $, where $\omega _{1,\lambda}$, $\omega _{2,\lambda}$, and $\omega _{3,\lambda}$ are the $\lambda$th-mode frequencies of the fields associated with dephasing, bit flipping, and dissipation, respectively. Using Eq. (\ref{baths}), we can calculate the correlation functions $C_{m,m^{\prime}}(t,t^{\prime})={\rm Tr}_{E}\left[ E_{1,m}(t)\rho_{E}E_{1,m^{\prime}}(t^{\prime})\right] ={\rm Tr}_{E}\left[ E_{2,m}(t)\rho_{E}E_{2,m^{\prime}}(t^{\prime})\right] $, for $m,m^{\prime}=1,2,3$. The only non-zero correlations are:
\begin{eqnarray}
C_{1,1}(t,t^{\prime})&=&{\cal K}_{2}(t-t^{\prime})+2{\rm Re} \left[ {\cal L}_{2}(t-t^{\prime})\right] \nonumber \\
& &+{\cal K}_{3}(t-t^{\prime})+2{\rm Re} \left[ {\cal L}_{3}(t-t^{\prime})\right] ,\label{C11}\\
C_{1,2}(t,t^{\prime})&=&i{\cal K}_{3}(t-t^{\prime})-2{\rm Im} \left[ {\cal L}_{3}(t-t^{\prime})\right] ,\label{C12}\\
C_{2,1}(t,t^{\prime})&=&-i{\cal K}_{3}(t-t^{\prime})+2{\rm Im} \left[ {\cal L}_{3}(t-t^{\prime})\right] ,\label{C21}\\
C_{2,2}(t,t^{\prime})&=&{\cal K}_{3}(t-t^{\prime})+2{\rm Re} \left[ {\cal L}_{3}(t-t^{\prime})\right] ,\label{C22}\\
C_{3,3}(t,t^{\prime})&=&{\cal K}_{1}(t-t^{\prime})+2{\rm Re} \left[ {\cal L}_{1}(t-t^{\prime})\right] ,\label{C33}
\end{eqnarray}
where the complex functions ${\cal K}_{m}(t)$ and ${\cal L}_{m}(t)$, for $m=1,2,3$, are given by
${\cal K}_{m}(t)=\sum_{\lambda}\left|g_{m,\lambda}\right|^{2}\exp(i\omega_{m,\lambda}t)$ and ${\cal L}_{m}(t)=\sum_{\lambda}\left|g_{m,\lambda}\right|^{2}\exp(i\omega_{m,\lambda}t)/\left[ \exp(\beta \omega_{m,\lambda})-1\right] $.
In the limit in which the number of environmental normal modes per unit frequency becomes infinite, we define spectral densities for $m=1,2,3$ as $J_{m}(\omega)=\sum _{\lambda}\left|g_{m,\lambda}\right|^{2}\delta (\omega -\omega _{m,\lambda})$, with $\omega \in [0,+\infty)$ and interpret the summations in ${\cal K}_{m}(t)$ and ${\cal L}_{m}(t)$ as integrals over $\omega$: ${\cal K}_{m}(t)=\int ^{\infty}_{0}d\omega J_{m}(\omega )\exp(i\omega t)$ and ${\cal L}_{m}(t)=\int ^{\infty}_{0}d\omega J_{m}(\omega)\exp(i\omega t)/[\exp(\beta \omega )-1]$. For our present purpose of illustrating protection against SDE, it suffices to assume ohmic spectral densities with the same cutoff frequency $\omega _{c}$, that is, $J_{m}(\omega)=\eta _{m}\omega \exp(-\omega/\omega _{c})$, where $\eta _{m}$, for $m=1,2,3$, are dimensionless constants giving the respective strengths of dephasing, bit flipping, and dissipation. Calculating the continuum versions of ${\cal K}_{m}(t)$ and ${\cal L}_{m}(t)$, using the ohmic spectral densities, gives ${\cal K}_{m}(t)=\eta _{m} \omega _{c}^{2}/\left(1-i\omega _{c}t\right)^{2}$ and ${\cal L}_{m}(t)=(\eta _{m} /\beta ^{2}) \Psi ^{(1)}\left(1+1/(\beta \omega _{c})-it/\beta \right) $, where $\Psi ^{(1)}$ is the first polygamma function. By substituting these results into Eqs. (\ref{C11}), (\ref{C12}), (\ref{C21}), (\ref{C22}), and (\ref{C33}), we obtain the correlations that appear in Eq. (\ref{Dab}), where the rotation matrix elements are obtained from Eqs. (\ref{Uc}), (\ref{Uk}), and (\ref{rot}). Once we have the coefficients $D_{p,q}(t)$, for $p,q=1,2,3$, then we can solve Eq. (\ref{master2}) numerically.
\begin{figure}
\caption{\label{figure1}
\label{figure1}
\end{figure}
\begin{figure}
\caption{\label{figure2}
\label{figure2}
\end{figure}
Here we impose the extreme situation where the disentanglement occurs faster than the time scale defined by the inverse of the cutoff frequency, $1/\omega _{c}$, and the thermal correlation time, $\tau_{B}=\beta/\pi$. Hence, we take $\tau =2\pi/\omega _{c}$ and, therefore, $t_{c}=\tau /N$, as discussed below Eq. (\ref{Uk}). For concreteness, in the numerical calculations we choose $n_x=2$, $n_z=1$, $\tau =10^{-10}$s, a temperature of $T=0.1$K, $\eta _{1}=1/16$, $\eta _{2}=1/64$, and $\eta _{3}=1/256$. As initial pure states, we consider two classes:
\begin{eqnarray}
\left| \Phi (\theta)\right\rangle =\cos \theta \left|\uparrow\uparrow\right\rangle+\sin \theta \left|\downarrow\downarrow\right\rangle ,\label{phi}\\
\left| \Psi (\theta)\right\rangle =\cos \theta \left|\uparrow\downarrow\right\rangle+\sin \theta \left|\downarrow\uparrow\right\rangle ,\label{psi}
\end{eqnarray}
for $\theta \in [0,2\pi )$, where we adopt the usual spin notation.
To measure entanglement we use the concurrence \cite{wootters98}, defined as the maximum between zero and $\Lambda (t)$, with
\begin{eqnarray}
\Lambda (t) =\lambda _{1}-\lambda _{2}-\lambda _{3}-\lambda _{4},
\end{eqnarray}
where $\lambda _{1} \geq \lambda _{2} \geq \lambda _{3} \geq \lambda _{4}$ are the square roots of the eigenvalues of the matrix $\rho (t)\sigma _{1,y}\sigma _{2,y}\rho ^{\star}(t)\sigma _{1,y}\sigma _{2,y}$, where $\rho ^{\star}(t)$ is the complex conjugation of $\rho (t)$, the reduced density matrix of the two qubits in the Schr\"{o}dinger picture. Thus, if $\Lambda (t)$ gets less than or equal to zero, there is no entanglement and the state is separable. Our aim is to use external fields to keep $\Lambda (t)$ fixed. Figure \ref{figure1} shows $\Lambda (t)$ for the evolution of different initial entangled states. In panel (a), $\Lambda (t)$ is shown for initial states with $\Lambda (0)\approx0.70711$, when the control fields are turned off (indicated with $N=0$) and on ($N=1,3,8$). The solid and dotted lines correspond to the initial states of Eq. (\ref{phi}), with $\theta =\pi /8$ and $\theta =3\pi /8$, respectively. The double-dot-dashed line corresponds to the initial states of Eq. (\ref{psi}), with $\theta =\pi /8$ and $\theta =3\pi /8$ giving the same $\Lambda (t)$. In panel (b) $\Lambda (t)$ is given for Bell initial states, for which $\Lambda (0)=1$. The initial states of Eq. (\ref{phi}) evolve resulting in the same $\Lambda (t)$ for $\theta =\pm \pi /4$. Also, the initial states of Eq. (\ref{psi}) evolve resulting in the same $\Lambda (t)$ for $\theta =\pm \pi /4$. In this figure panel, we show the results for Eq. (\ref{phi}), since they differ from those of Eq. (\ref{psi}) only in the fourth decimal place. The solid line represents the result for the control field turned off, while the dotted, double-dot-dashed, and dashed lines show the results for control fields with $N=1,3,8$, respectively.
From the general theory of dynamical decoupling \cite{viola98,facchi05}, the protection gets better as $N$ gets larger. Figure \ref{figure2} shows the fidelity, $F(t)={\rm Tr}[\rho _{I}(t)\rho (0)]$, as a function of time for Bell initial states. All four Bell initial states result in the same fidelity function up to the fourth decimal place. The solid, dashed, dotted, and dot-dashed lines give the fidelities when the control fields are turned on, using $N=2,3,5,8$, respectively. The insets (a) and (b) show, respectively, the fidelity and concurrence, both calculated at $t=\tau $, as functions of $N$.
\section{conclusion}
In summary, we have shown that it is possible to protect a two-qubit entangled state from disentanglement, using a simple combination of a static field along the $x$ axis and a rotating field in the $yz$ plane. We have tested the method under a very unfavorable circumstance, where dephasing, bit flipping, and dissipation, at a finite temperature, are so effective as to disentangle an unprotected state within a time interval shorter than the characteristic reservoir correlation time, $2\pi /\omega _{c}$, and the thermal correlation time, $\tau_{B}=\beta/\pi$. Even so, the concurrence can be preserved at high fidelity, as shown in Figs. \ref{figure1} and \ref{figure2}. The present result also suggests that it might be possible to protect against disentanglement during the execution of an entangling quantum operation.
\end{document} |
\begin{document}
\title{ Reduced Free Products of Finite Dimensional $C^*$-Algebras } \par
\author{ Nikolay A. Ivanov }
\date{\today}
\address{\hskip-\parindent
Nikolay Ivanov \\
Department of Mathematics \\
Texas A\&M University \\
College Station TX 77843-3368, USA}
\email{nivanov@math.tamu.edu}
\begin{abstract}
We find a necessary and sufficient conditions for the simplicity and uniqueness of trace for reduced free
products of finite families of finite dimensional $C^*$-algebras with specified traces on them.
\end{abstract}
\maketitle
\section{Introduction and Definitions}
The notion of reduced free product of a family of $C^*$-algebras with specified states on them was introduced independently by Avitzour
(\cite{A82}) and Voiculescu (\cite{V85}). We will recall this notion and some of its properties here.
\par
\begin{defi}
The couple $(A,\phi)$, where $A$ is a unital $C^*$-algebra and $\phi$ a state is called a $C^*$-noncommutative probability space or $C^*$-NCPS.
\end{defi}
\par
\begin{defi}
Let $(A,\phi)$ be a $C^*$-NCPS and $\{ A_i | i \in I \}$ be a family of $C^*$-subalgebras of $A$, s.t. $1_A \in A_i$, $\forall i\in I$, where $I$ is an index set. We say that the family $\{ A_i |i \in I \}$ is free if $\phi(a_1...a_n)=0$, whenever $a_j \in A_{i_j} $ with $i_1\neq i_2\neq ... \neq i_n$ and $\phi(a_j)=0$, $\forall j \in \{ 1,...n \}$.
A family of subsets $\{ S_i | i \in I \}$ $\subset$ $A$ is $*$-free if
$\{ C^*(S_i \cup \{ 1_A \} ) | i \in I \}$ is free.
\end{defi}
Let $\{ (A_i,\phi_i) | i \in I \}$ be a family of $C^*$-NCPS such that the GNS representations of $A_i$ associated to $\phi_i$ are all faithful. Then there is a unique $C^*$-NCPS $(A,\phi) \overset{def}{=} \underset{i \in I}{*} (A_i,\phi_i)$ with unital embeddings $A_i \hookrightarrow A$, s.t.
\\
(1) $\phi|_{A_i}=\phi_i$
\\
(2) the family $\{ A_i | i \in I \}$ is free in $(A,\phi)$
\\
(3) $A$ is the $C^*$-algebra generated by $\underset{i \in I}{\bigcup}A_i$
\\
(4) the GNS representation of $A$ associated to $\phi$ is faithful.
\\
And also:
\\
(5) If $\phi_i$ are all traces then $\phi$ is a trace too (\cite{V85}).
\\
(6) If $\phi_i$ are all faithful then $\phi$ is faithful too (\cite{D98}).
\par
In the above situation $A$ is called the reduced free product algebra and $\phi$ is called the free product state. Also the construction of the
reduced free product is based on defining a free product Hilbert space, which turns out to be $\mathfrak{H}_A$ - the GNS Hilbert space for
$A$, associated to $\phi$.
\par
\begin{example}
If $\{ G_i | i \in I \}$ is a family of discrete groups and $C^*_r(G_i)$ are the reduced group $C^*$-algebras, corresponding to the left
regular representations of $G_i$ on $l^2(G_i)$ respectively, and if $\tau_i$ are the canonical traces on $C^*_r(G_i)$, $i \in I$, then
we have $\underset{i \in I}{*} (C^*_r(G_i), \tau_i)=(C^*_r(\underset{i \in I}{*}G_i), \tau)$, where $\tau$ is the canonical trace on the group $C^*$-algebra $C^*_r(\underset{i \in I}{*} G_i)$.
\end{example}
Reduced free products satisfy the following property:
\begin{lemma}[\cite{DR98}]
Let $I$ be an index set and let $(A_i,\phi_i)$ be a $C^*$-NCPS ($i \in I$), where each $\phi_i$ is faithful. Let $(B,\psi)$ be a $C^*$-NCPS
with $\psi$ faithful. Let
\begin{center}
$(A,\phi) = \underset{i\in I}{*} (A_i,\phi_i)$.
\end{center}
Given unital $*$-homomorphisms, $\pi_i : A_i \rightarrow B$, such that $\psi \circ \pi_i = \phi_i$ and $\{ \pi_i(A_i) \}_{i\in I}$ is free
in $(B, \psi)$, there is a $*$-homomorphism, $\pi : A \rightarrow B$ such that $\pi|_{A_i} = \pi$ and $\psi \circ \pi = \phi$.
\end{lemma}
\par
From now on we will be concerned only with $C^*$-algebras equipped with tracial states.
\par
The study of simplicity and uniqueness of trace for reduced free
products of $C^*$-algebras, one can say, started with the paper of
Powers \cite{P75}. In this paper Powers proved that the reduced
$C^*$-algebra of the free group on two generators $F_2$ is simple and
has a unique trace - the canonical one. In \cite{C79} Choi showed the same for the
"Choi algebra" $C_r^*(\mathbb{Z}_2 * \mathbb{Z}_3)$ and then
Paschke and Salinas in \cite{PS79} generalized the result to the case of $C_r^*(G_1 *
G_2)$, where $G_1, G_2$ are discrete groups, such that $G_1$ has
at least two and $G_2$ at least three elements. After that Avitzour in \cite{A82} gave a sufficient condition for simplicity and uniqueness of
trace for reduced free products of $C^*$-algebras, generalizing the
previous results. He proved:
\begin{thm}[\cite{A82}]
Let
\begin{equation*}
(\mathfrak{A}, \tau) = (A, \tau_A) * (B, \tau_B),
\end{equation*}
where $\tau_A$ and $\tau_B$ are traces and $(A,\tau_A)$ and $(B,\tau_B)$ have faithful GNS representations. Suppose
that there are unitaries $u,v \in A$ and $w \in B$, such that
$\tau_A(u) = \tau_A(v) = \tau_A(u^* v) = 0$ and $\tau_B(w) = 0$. Then
$\mathfrak{A}$ is simple and has a unique trace $\tau$.
\end{thm}
{\em Note:} It is clear that $uw$ satisfies $\tau((uw)^n) = 0$, $\forall n \in \mathbb{Z} \backslash \{ 0 \}$. Unitaries with this property we
define below.
\section{Statement of the Main Result and Preliminaries}
We adopt the following notation:
\\
If $A_0$, ... , $A_n$ are unital $C^*$-algebras equipped with traces $\tau_0$, ... , $\tau_n$ respectively, then $A=\underset{\alpha_0}{\overset{p_0}{A_0}} \bigoplus
\underset{\alpha_1}{\overset{p_1}{A_1}} \bigoplus ... \bigoplus \underset{\alpha_n}{\overset{p_n}{A_n}}$ will mean that the $C^*$-algebra
$A$ is isomorphic to the direct sum of $A_0$, ... , $A_n$, and is such that $A_i$ are supported on the projections $p_i$. Also $A$ comes with a
trace (let's call it $\tau$) given by the formula $\tau=\alpha_0\tau_0 + \alpha_1\tau_1 + ... + \alpha_n\tau_n$. Here of course $\alpha_0$, $\alpha_1$, ... ,
$\alpha_n > 0$ and $\alpha_0 + \alpha_1 + ... + \alpha_n = 1$.
\begin{defi}
If $(A,\tau)$ is a $C^*$-NCPS and $u\in A$ is a unitary with $\tau(u^n)=0$, $\forall n \in \mathbb{Z} \backslash \{ 0 \}$, then we call $u$ a
Haar unitary.
\par
If $1_A \in B \subset A$ is a unital abelian $C^*$-subalgebra of $A$ we call $B$ a diffuse abelian $C^*$-subalgebra of $A$ if $\tau|_B$ is
given by an atomless measure on the spectrum of $B$. We also call $B$ a unital diffuse abelian $C^*$-algebra.
\end{defi}
From Proposition 4.1(i), Proposition 4.3 of \cite{DHR97} we can conclude the following:
\begin{prop}
If $(B,\tau)$ is a $C^*$-NCPS with $B$-abelian, then $B$ is diffuse abelian if and only if $B$ contains a Haar unitary.
\end{prop}
$C^*$-algebras of the form $(\underset{\alpha}{\overset{p}{\mathbb{C}}} \bigoplus
\underset{1-\alpha}{\overset{1-p}{\mathbb{C}}})*(\underset{\beta}{\overset{q}{\mathbb{C}}}\bigoplus
\underset{1-\beta}{\overset{1-q}{\mathbb{C}}})$ have been described explicitly in \cite{ABH91} (see also \cite{D99LN}):
\begin{thm}
Let $1 > \alpha \geqq \beta \geqq \frac{1}{2}$ and let
\begin{center}
$( A,\tau ) = ( \underset{\alpha }{\overset{p}{\mathbb{C}}} \oplus
\underset{1-\alpha }{\overset{1-p}{\mathbb{C}}} ) * ( \underset{\beta }{\overset{q}{\mathbb{C}}}\oplus
\underset{1-\beta}{\overset{1-q}{\mathbb{C}}} ) $.
\end{center}
If $\alpha > \beta$ then
\begin{equation*}
A=\underset{\alpha -\beta }{\overset{p\wedge (1-q)}{\mathbb{C}}}\oplus
C([a,b], M_2(\mathbb{C}))\oplus \underset{\alpha + \beta -1}{\overset{p\wedge q}{\mathbb{C}}} ,
\end{equation*}
for some $0 < a < b < 1$. Furthermore, in the above picture
\begin{center}
$p=1 \oplus \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \oplus 1 ,$
\end{center}
\begin{equation*}
q=0\oplus \begin{pmatrix} t & \sqrt{t(1-t)} \\ \sqrt{t(1-t)} & 1-t \end{pmatrix} \oplus 1 ,
\end{equation*}
and the faithful trace $\tau$ is given by the indicated weights on the projections $p\wedge (1-q)$ and $p\wedge q$, together with an
atomless measure, whose support is $[a,b]$.
\par
If $\alpha =\beta > \frac{1}{2}$ then
\begin{equation*}
A=\{\ f:[0,b]\rightarrow M_2(\mathbb{C}) |\ f\ is\ continuous\ and\ f(0)\ is\ diagonal\ \} \oplus
\underset{\alpha + \beta -1}{\overset{p\wedge q}{\mathbb{C}}},
\end{equation*}
for some $0 < b < 1$. Furthermore, in the above picture
\begin{center}
$p= \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \oplus 1,$
\end{center}
\begin{equation*}
q= \begin{pmatrix} {t} & {\sqrt{t(1-t)}} \\ {\sqrt{t(1-t)}} & {1-t} \end{pmatrix} \oplus 1,
\end{equation*}
and the faithful trace $\tau$ is given by the indicated weight on the projection $p\wedge q$, together with an atomless measure on $[0,b]$.
\par
If $\alpha = \beta = \frac{1}{2}$ then
\begin{equation*}
A=\{\ f:[0,1]\rightarrow M_2(\mathbb{C}) |\ f\ is\ continuous\ and\ f(0)\ and\ f(1)\ are\ diagonal\ \}.
\end{equation*}
Furthermore in the above picture
\begin{center}
$p= \begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} ,$
\end{center}
\begin{equation*}
q= \begin {pmatrix} t & \sqrt{t(1-t)} \\ \sqrt{t(1-t)} & 1-t \end{pmatrix} ,
\end{equation*}
and the faithful trace $\tau$ is given by an atomless measure, whose support is $[0,1]$.
\end{thm}
The question of describing the reduced free product of a finite family of finite dimensional abelian $C^*$-algebras was studied by Dykema in
\cite{D99}. He proved the following theorem:
\begin{thm}[\cite{D99}]
Let
\begin{equation*}
(\mathfrak{A},\phi )=(\underset{\alpha_0}{\overset{p_0}{A_0}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ...
\oplus \underset{\alpha_n}{\overset{p_n}{\mathbb{C}}})*(\underset{\beta_0}{\overset{q_0}{B_0}} \oplus
\underset{\beta_1}{\overset{q_1}{\mathbb{C}}} \oplus ...
\oplus \underset{\beta_m}{\overset{q_m}{\mathbb{C}}}),
\end{equation*}
where $\alpha_0 \geq 0$ and $\beta_0 \geq 0$ and $A_0$ and $B_0$ are equipped with traces $\phi(p_0)^{-1} \phi|_{A_0}$, $\phi(q_0)^{-1}
\phi|_{B_0}$ and $A_0$ and $B_0$ have diffuse abelian $C^*$-subalgebras, and where $n \geq 1$, $m \geq 1$ (if $\alpha_0 = 0$ or $\beta_0 =
0$, or both, then, of course, we don't impose any conditions on $A_0$ or $B_0$, or both respectively).
Suppose also that $\dim(A) \geq 2$, $\dim(B) \geq 2$, and $\dim(A) + \dim(B) \geq 5$.
\par
Then
\begin{equation*}
\mathfrak{A} = \overset{r_0}{\mathfrak{A}_0} \oplus \underset{(i',j)\in L_+}{\bigoplus}
\underset{\alpha_i + \beta_i -1}{\overset{p_i \wedge q_j}{\mathbb{C}}},
\end{equation*}
where $L_+ = \{ (i,j)| 1 \leq i \leq n$, $1 \leq j \leq m$ and $\alpha_i + \beta_j > 1 \}$, and where
$\mathfrak{A}_0$ has a unital, diffuse abelian sublagebra supported on $r_0 p_1$ and another one supported on $r_0 q_1$.
\par
Let $ L_0 = \{(i,j)| 1 \leq i \leq n$, $1 \leq j \leq m$ and $\alpha_i + \beta_j = 1 \} .$
\par
If $L_0$ is empty then $\mathfrak{A}_0$ is simple and $\phi(r_0)^{-1} \phi|_{\mathfrak{A}_{0}}$ is the unique trace on
$\mathfrak{A}_0.$
\par
If $L_0$ is not empty, then for each $(i,j) \in L_0$ there is a $*$-homomorphism $\pi_{(i,j)}: \mathfrak{A}_0 \rightarrow \mathbb{C}$ such
that $\pi_{(i,j)}(r_0 p_i) = 1 = \pi_{(i,j)}(r_0 q_j).$ Then: \\
(1) $\mathfrak{A}_{00} \overset{def}{=} \underset{(i,j)\in L_0}{\bigcap} \ker (\pi_{(i,j)})$ \\
is simple and nonunital, and $\phi(r_0)^{-1} \phi|_{\mathfrak{A}_{00}}$ is the unique trace on
$\mathfrak{A}_{00}.$ \\
(2) For each $i\in \{1,...n \}, \ r_0 p_i$ is full in $\mathfrak{A}_0 \cap
\underset{i' \neq i}{\underset{(i',j) \in L_0}{\bigcap}} \ker
(\pi_{(i',j)}).$ \\
(3) For each $j \in \{ 1, ... , m \}, \ r_0 q_j$ is full in $\mathfrak{A}_{0} \cap
\underset{j' \neq j}{\underset{(i,j') \in
L_0}{\bigcap}} \ker (\pi_{(i,j')}).$
\end{thm}
One can define von Neumann algebra free products, similarly to reduced free products of $C^*$-algebras.
We will denote by $\mathbb{M}_n$ the $C^*$-algebra (von Neumann algebra) of $n \times n$ matrices
with complex coefficients.
\par
Dykema studied the case of von Neumann algebra free products of finite dimensional (von Neumann)
algebras:
\begin{thm}[\cite{D93}]
Let
\begin{equation*}
A = \underset{\alpha_0}{\overset{p_0}{L(F_s)}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{M}_{n_1}}} \oplus ... \oplus
\underset{\alpha_k}{\overset{p_k}{\mathbb{M}_{n_k}}}
\end{equation*}
and
\begin{equation*}
B = \underset{\beta_0}{\overset{q_0}{L(F_r)}} \oplus \underset{\beta_1}{\overset{q_1}{\mathbb{M}_{m_1}}} \oplus ... \oplus
\underset{\beta_l}{\overset{q_l}{\mathbb{M}_{m_l}}},
\end{equation*}
where $L(F_s), L(F_r)$ are interpolated free group factors, $\alpha_0, \beta_0 \geq 0$,
and where $\dim(A) \geq 2$, $\dim(B) \geq 2$ and $\dim(A) + \dim(B)\geq 5$.
Then for the von Neumann algebra free product we have:
\begin{equation*}
A*B = L(F_t) \oplus \underset{(i,j) \in L_+}{\bigoplus}
\underset{\gamma_{ij}}{\overset{f_{ij}}{\mathbb{M}_{N(i,j)}}},
\end{equation*}
where $L_+ = \{(i,j) | 1 \leq i \leq k, 1 \leq j \leq l, (\frac{\alpha_i}{n_i^2}) + (\frac{\beta_j}{m_j^2})
> 1 \}$, $N(i,j) = max(n_i, m_j)$, $\gamma_{ij} = N(i,j)^2 \cdot (\frac{\alpha_i}{n_i^2} +
\frac{\beta_j}{m_j^2} - 1)$, and $f_{ij} \leq p_i \wedge q_j$.
\end{thm}
{\em Note:} $t$ can be determined from the other data, which makes sense only if the interpolated free group factors are all different. We
will use only the fact that $L(F_t)$ is a factor. For definitions and properties of interpolated free group factors see \cite{Ra94} and \cite{D94}.
\par
In this paper we will extend the result of Theorem 2.4 to the case of reduced free products of finite dimensional $C^*$-algebras with specified traces on them. We will prove:
\begin{thm}
Let
\begin{equation*}
(\mathfrak{A},\phi )=(\underset{\alpha_0}{\overset{p_0}{A_0}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{M}_{n_1}}} \oplus ...
\oplus \underset{\alpha_k}{\overset{p_k}{\mathbb{M}_{n_k}}})*(\underset{\beta_0}{\overset{q_0}{B_0}} \oplus
\underset{\beta_1}{\overset{q_1}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\beta_l}{\overset{q_l}
{\mathbb{M}_{m_l}}}),
\end{equation*}
where $\alpha_0, \beta_0 \geq 0$, $\alpha_i > 0$, for $i=1,..,k$ and $\beta_j > 0$, for $j=1,...,l$, and where $\phi(p_0)^{-1} \phi|_{A_0}$ and
$\phi(q_0)^{-1} \phi|_{B_0}$ are traces on $A_0$ and $B_0$ respectivelly. Suppose that $\dim(A) \geq 2$, $\dim(B)
\geq 2$, $\dim(A) + \dim(B) \geq 5$, and that both $A_0$ and $B_0$ contain unital, diffuse abelian
$C^*$-subalgebras (if $\alpha_0 > 0$, respectivelly $\beta_0 > 0$). Then
\begin{equation*}
\mathfrak{A}= \underset{\gamma}{\overset{f}{\mathfrak{A}_0}} \oplus \underset{(i,j)\in L_+}{\bigoplus}
\underset{\gamma_{ij}}{\overset{f_{ij}}{\mathbb{M}_{N(i,j)}}},
\end{equation*}
where $L_+ = \{ (i,j)| \frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} > 1 \}$, $N(i,j) = max(n_i,m_j)$, $\gamma_{ij} =
N(i,j)^2(\frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} -1)$, $f_{ij} \leq p_i \wedge q_j$. There is a unital, diffuse abelian
$C^*$-subalgebra of $\mathfrak{A}_0$, supported on $f p_1$ and another one, supported on $f q_1$.
\par
If $L_0 = \{ (i,j)| \frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} = 1 \},$ is empty, then $\mathfrak{A}_0$ is simple with a unique trace. If $L_0$ is not empty, then $\forall (i,j) \in L_0 ,\ \exists \pi_{(i,j)} :
\mathfrak{A}_{0} \rightarrow \mathbb{M}_{N(i,j)}$ a unital $*$-homomorphism, such that $\pi_{(i,j)}(f p_i) = \pi_{(i,j)}(f q_j) = 1$. Then: \\
(1) $\mathfrak{A}_{00} \overset{def}{=} \underset{(i,j)
\in L_0}{\bigcap} \ker (\pi_{(i,j)})$ is simple and nonunital, and has a unique trace $\phi(f )^{-1} \phi |_{\mathfrak{A}_{00}}$. \\
(2) For each $i \in \{ 1, ..., k \}$, $f p_i$ is full in $\mathfrak{A}_0 \cap
\underset{i' \neq i}{\underset{(i',j) \in L_0}{\bigcap}} \ker(\pi_{(i',j)})$. \\
(3) For each $j \in \{ 1, ..., l \}$, $f q_j$ is full in $\mathfrak{A}_0 \cap
\underset{j' \neq j}{\underset{(i,j') \in L_0}{\bigcap}} \ker(\pi_{(i,j')})$.
\end{thm}
\section{Beginning of the Proof - A Special Case}
In order to prove this theorem we will start with a simpler case. We will study first the $C^*$-algebras of the form
$(A,\tau) \overset{def}{=} $ $ ( \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_m}{\overset{p_m}
{\mathbb{C}}})*(\mathbb{M}_n, tr_n)$ with $0 < \alpha_1 \leq ... \leq \alpha_m$. We chose a set of matrix units for $\mathbb{M}_n$ and denote them by $\{ e_{ij}|i,j \in \{1,...n \}
\} $ as usual. Let's take the (trace zero) permutation unitary
$$ u \overset{def}{=} \begin{pmatrix} 0 & 1 & ... & 0 \\ . & . & . & . \\ 0 & 0 & ... & 1 \\ 1 & 0 & ... & 0 \end{pmatrix} \in \mathbb{M}_n.$$ \\
We see that $\Ad(u)(e_{11}) = u e_{11} u^* = e_{nn}$ and for $2 \leq i \leq n$, $\Ad(u)(e_{ii}) = u e_{ii} u^* = e_{(i-1) (i-1)}$.
\par
It's clear that $$A = C^*(\{p_1, ..., p_m \}, \{ e_{ii}\}_{i=1}^n, u).$$ Then it is also clear that
$$A = C^*(\{ u^ip_1u^{-i} , ... , u^ip_mu^{-i} \}_{i=0}^{n-1}, \{ e_{ij} \}_{i=1}^{n}, u).$$ We want to show that the family
$$\{ \{ \mathbb{C} \cdot u^ip_1u^{-i} \oplus ,..., \oplus \mathbb{C} \cdot u^ip_mu^{-i} \}_{i=0}^{n-1},\ \{ \mathbb{C} \cdot e_{11} \oplus ... \oplus \mathbb{C} \cdot e_{nn} \} \}$$ is free.
We will prove something more general. We denote
$$B \overset{def}{=} C^*( \{ u^kp_1u^{-k}, ... , u^kp_mu^{-k} \}_{k=0}^{n-1}, \{ e_{11}, ... ,e_{nn} \} ).$$
Let $l$ be an integer and $l|n$, $1 < l < n$ (if such $l$ exists). Let $$E \overset{def}{=} C^*( \{ \{ u^kp_1u^{-k}, ... , u^kp_mu^{-k} \}_{k=0}^{l-1}, \{ e_{11} , ... , e_{nn} \}, \{ u^l, u^{2l}, ... , u^{n-l} \} \} ).$$ It's easy to see that
$$C^* ( \{ e_{11}, ... , e_{nn} \}, \{ u^l, u^{2l}, ... , u^{n-l} \} )=
\underbrace{\mathbb{M}_{ \frac{n}{l} } \oplus ... \oplus \mathbb{M}_{ \frac{n}{l} }}_{l-times} \subset \mathbb{M}_n.$$
We will adopt the following notation from \cite {D99LN}:
\par
Let $(D, \varphi)$ be a $C^*$-NCPS and $1_D \in D_1, ..., D_k \subset D$ be a family of unital
$C^*$-subalgebras of $D$, having a common unit $1_D$. We denote by $D^{\circ} \overset{def}{=} \{ d\in D |
\varphi(d)=0 \}$ (analoguously for $D_1$, ..., etc). We denote by $\Lambda^{\circ}(D_1^{\circ}, D_2^{\circ}
, ..., D_k^{\circ})$
the set of all words of the form $d_1 d_2 \cdots d_j$ and of nonzero length, where $d_t \in D_{i_t}^{\circ}$, for
some $1 \leq i_t \leq k$ and $i_t \neq i_{t+1}$ for any $1 \leq t \leq j-1$. \\
\par
We have the following
\begin{lemma}
If everything is as above, then:
(i) The family $\{ \{ u^kp_1u^{-k} , ... , u^kp_mu^{-k} \}_{k=0}^{n-1},$ $\{ e_{11} , ... ,e_{nn} \} \}$
is free in $(A,\tau)$. And more generally if
$$\omega \in \Lambda^{\circ}( C^*(p_1, ..., p_m)^{\circ}, ...,
C^*(u^{n-1}p_1u^{1-n}, ..., u^{n-1}p_mu^{1-n})^{\circ}, C^*(e_{11}, ..., e_{nn})^{\circ}),$$
then $\tau(\omega u^r)=0$ for all $0 \leq r \leq n-1$.
(ii) The family $\{ \{ u^kp_1u^{-k} , ... , uu^kp_mu^{-k} \}_{k=0}^{l-1},$ $\{ e_{11} , ... , e_{nn} , u^l,
u^{2l}, ... u^{n-l} \} \}$ is free in $(A,\tau)$. And more generally if
$$\omega \in \Lambda^{\circ}(C^*(p_1,..., p_m)^{\circ},...,
C^*(u^{l-1}p_1u^{1-l},..., u^{l-1}p_mu^{1-l})^{\circ},
C^*( e_{11}, ..., e_{nn}, u^l,..., u^{n-l})^{\circ}),$$
then $\tau(\omega u^r)=0$ for all $0 \leq r \leq l-1$.
\end{lemma}
\begin{proof}
Each letter $\alpha \in C^*( \{ u^kp_1u^{-k}, ... , u^kp_mu^{-k} \})$ with $\tau(\alpha) = 0$ can be represented as $\alpha = u^k \alpha' u^{-k}$ with $\tau(\alpha') = 0$, and $\alpha' \in C^*( \{ p_1, ..., p_m \} )$.
\par
Case (i): \\
\par
Each $$\omega \in \Lambda^{\circ}( C^*(p_1, ..., p_m)^{\circ}, ...,
C^*(u^{n-1}p_1u^{1-n}, ..., u^{n-1}p_mu^{1-n})^{\circ}, C^*(e_{11}, ..., e_{nn})^{\circ})$$ is of one of the
four following types:
\begin{equation}
\omega = \alpha_{11} \alpha_{12} \cdots \alpha_{1i_1} \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2
\alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1} \alpha_{t1} \cdots \alpha_{ti_t},
\end{equation}
\begin{equation}
\omega = \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2
\alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1} \alpha_{t1} \cdots \alpha_{ti_t},
\end{equation}
\begin{equation}
\omega = \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2
\alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1},
\end{equation}
\begin{equation}
\omega = \alpha_{11} \alpha_{12} \cdots \alpha_{1i_1} \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2
\alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1},
\end{equation}
where $\alpha_{ij} \in C^*(u^{k_{ij}}p_1u^{k_{ij}}, ..., u^{k_{ij}}p_mu^{k_{ij}})^{\circ}$ with
$0 \leq k_{ij} \leq n-1$, $k_{ij} \neq k_{i(j+1)}$ and $\beta_i \in C^*(e_{11}, ..., e_{nn})^{\circ}$. \\
\par
We consider the following two cases: \\
(a) We look at $\alpha_{ji} \alpha_{ji+1}$ with $\alpha_{jc}$ $\in$ $C^*(\{ u^{k_{c}}p_1u^{-k_{c}}, ... ,
u^{k_{c}}p_mu^{-k_{c}} \} )^{\circ}$ for
$c=i, i+1$.
We write $\alpha_{jc} = u^{k_{c}} \alpha'_{jc} u^{-k_{c}}$ with $\alpha'_{jc}
\in C^*( \{ p_1, ... , p_m \} )^{\circ}$ for
$c = i, i+1$. So $\alpha_{ji} \alpha_{ji+1} =$ \\
$u^{k_i} \alpha'_{ji} u^{k_{i+1} - k_i} \alpha'_{ji+1} u^{-k_{i+1}}$. Here $\alpha'_{ji}$ and $\alpha'_{ji+1}$ are free from
$u^{k_{i+1} - k_i}$ in $(A,\tau)$ (Notice that we have $k_{i+1} - k_i \neq 0$). \\
(b) We look at $\alpha_{ji_j} \beta_j \alpha_{(j+1) 1}$ with $\beta \in C^*( \{e_{11} , ... , e_{nn} \}
)^{\circ},$ \\
$\alpha_{(j+1)1} \in C^*( \{ u^{k_{j+1}}p_1u^{-k_{j+1}} , ... , u^{k_{j+1}} p_m u^ {-k _{j+1}} \} )^{\circ},$
\\
$\alpha_{ji_j} \in C^*( \{u^{k_j} p_1 u^{-k_j} , ... , u^{k_j} p_m
u^{-k_j} \} )^{\circ}$. Now we write $\alpha_{ji_j} = u^{k_j} \alpha'_{ji_j}
u^{-k_j}$ and $\alpha_{(j+1)1} = u^{k_{j+1}} \alpha'_{(j+1)1} u^{-k_{j+1}}$ with
$\alpha'_{ji_j} , \alpha'_{(j+1)1} \in
C^*( \{ p_1 , ..., p_m \} )^{\circ}$. We see that $\alpha_{ji_j} \beta_j \alpha_{(j+1)1} =$ $u^{k_j}
\alpha'_{ji_j} u^{-k_j} \beta_j u^{k_{j+1}} \alpha'_{(j+1)1} u^{-k_{j+1}}$. If $k_j = k_{j+1}$ then $\tau(u^{-k_j} \beta_j u^{k_{j+1}})
= \tau(u^{k_{j+1}} u^{-k_j} \beta_j) = \tau(\beta_j) = 0$ since $\tau$ is a trace. If $k_j \neq k_{j+1}$ then $\tau(u^{-k_j} \beta_j
u^{k_{j+1}}) = \tau(u^{k_{j+1}} u^{-k_j} \beta_j)$ and $u^{k_{j+1} - k_j} \beta_j \in \mathbb{M}_n$ is a linear combination of off-diagonal
elements, so $\tau(u^{k_{j+1}} u^{-k_j} \beta_j) = 0$ also. Notice that $\alpha'_{ji_j}$ and $\alpha'_{(j+1)1}$ are free from
$u^{-k_j} \beta_j u^{k_{j+1}}$ in $(A,\tau)$. \\
Now we expand all the letters in the word $\omega$ according to the cases (a) and (b). We
see that we obtain a word, consisting of letters of zero trace, such that every two consequitive letters come either from $C^*( \{p_1, ..., p_m \} )$ or from $\mathbb{M}_n$. So $\tau(\omega) = 0$. It only remains to look at the case of the word
$\omega u^r$
which is the word $\omega$, but ending in $u^r$. There are two principally different cases for $\omega u^r$
from the all four possible choices for $\omega$: \\
In cases (1) and (2) $\alpha_{ti_t} = u^k \alpha'_{ti_t} u^{-k}$ for some $0 \leq k \leq n-1$ with
$\alpha'_{ti_t} \in C^*( \{ p_1 , ..., p_m \} )^{\circ}$. So the word will end in $u^k \alpha'_{ti_t} u^{r-k}$.
If $r = k$ then $\alpha'_{ti_t}$ will be the last letter with trace zero and everything else will be the
same as for $\omega$, so the whole word will have trace $0$. If $k \neq r$ then $\tau(u^{r-k}) = 0$ and $u^{r-k}$
is free from $\alpha'_{ti_t}$ so the word in this case will be of zero trace too. \\
In cases (3) and (4) if $\beta_{t-1} u^{r}$ is the whole word then $\beta_{t-1} u^{r}$ is a linear
combination of off-diagonal elements of $\mathbb{M}_n$, and so its trace is $0$. If not then
$\alpha_{(t-1)i_{t-1}} = u^k \alpha'_{(t-1)i_{t-1}} u^{-k}$ with $\alpha'_{(t-1)i_{t-1}} \in
C^*( \{ p_1 , ... , p_m \} )^{\circ}$. So the word ends in \\
$ u^k \alpha'_{(t-1)i_{t-1}} u^{-k} \beta_{t-1}
u^{r} $. Similarly as above we see that $\tau(u^{-k} \beta_{t-1} u^{r}) = 0$ for all values of $k$ and $r$.
The rest of the word we treat as above and conclude that it's of zero trace in this case too. \\
So in all cases $\tau( \omega u^r) = 0$ just what we had to show. \\
\par
Case (ii): \\
\par
As in case (i)
$$\omega \in \Lambda^{\circ}(C^*(p_1,..., p_m)^{\circ},..., C^*(u^{l-1}p_1u^{1-l},...,
u^{l-1}p_mu^{1-l})^{\circ}, C^*( e_{11},..., e_{nn}, u^l,..., u^{n-l})^{\circ})$$
is of one of the following types: \\
\begin{equation}
\omega = \alpha_{11} \alpha_{12} \cdots \alpha_{1i_1} \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2
\alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1} \alpha_{t1} \cdots \alpha_{ti_t},
\end{equation}
\begin{equation}
\omega = \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2
\alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1} \alpha_{t1} \cdots \alpha_{ti_t},
\end{equation}
\begin{equation}
\omega = \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2
\alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1},
\end{equation}
\begin{equation}
\omega = \alpha_{11} \alpha_{12} \cdots \alpha_{1i_1} \beta_1 \alpha_{21} \cdots \alpha_{2i_2} \beta_2
\alpha_{31} \cdots \alpha_{t-1i_{t-1}} \beta_{t-1},
\end{equation}
where $\alpha_{ij} \in C^*(u^{k_{ij}}p_1u^{k_{ij}}, ..., u^{k_{ij}}p_mu^{k_{ij}})^{\circ}$ with
$0 \leq k_{ij} \leq l-1$ and $k_{ij} \neq k_{(i+1)j}$ and $\beta_i \in C^*(e_{11}, ..., e_{nn}, u^l,
u^{2l}, ..., u^{n-l})^{\circ}$. \\
\par
Similarly as case (i) we consider two cases: \\
(a) We look at $\alpha_{ji} \alpha_{ji+1}$ with $\alpha_{jc}$ $\in$ $C^*(\{ u^{k_{c}}p_1u^{-k_{c}}, ... ,
u^{k_{c}}p_mu^{-k_{c}} \} )$, and $0 \leq k_c \leq l-1$ for $c=i, i+1$. We write
$\alpha_{jc} = u^{k_c} \alpha'_{jc} u^{-k_c}$ with $\alpha'_{jc} \in
C^*( \{ p_1, ... , p_m \} )^{\circ}$ for $c = i, i+1$. It follows $\alpha_{ji} \alpha_{ji+1} =$
$u^{k_i} \alpha'_{ji} u^{k_{i+1} - k_i} \alpha'_{ji+1} u^{-k_{i+1}}$. Here $\alpha'_{ji}$ and $\alpha'_{ji+1}$ are free from $u^{k_{i+1} - k_i}$ in $(A,\tau)$ (and again $k_{i+1} - k_i \neq 0$). \\
(b) We look at $\alpha_{ji_j} \beta_j \alpha_{(j+1) 1}$ with $\beta_j \in C^*( \{e_{11} , ... , e_{nn} \} ,
\{ u^l, u^{2l}, ..., u^{n-l} \} )^{\circ},$ \\
$\alpha_{(j+1)1} \in C^*( \{ u^{k_{j+1}}p_1u^{-k_{j+1}} , ... , u^{k_{j+1}} p_m u^ {-k _{j+1}} \} )^{\circ},$
\\
$\alpha_{ji_j} \in C^*( \{u^{k_j} p_1 u^{-k_j} , ... , u^{k_j} p_m u^{-k_j} \} )^{\circ}$, where in this case
$k_j, k_{j+1} \in \{ 0, ..., l-1 \}$. Again we write $\alpha_{ji_j} = u^{k_j} \alpha'_{ji_j} u^{-k_j}$ and
$\alpha_{(j+1)1} = u^{k_{j+1}} \alpha'_{(j+1)1} u^{-k_{j+1}}$
with $\alpha'_{ji_j} , \alpha'_{(j+1)1} \in C^*( \{ p_1 , ... , p_m \} )^{\circ},$.
We have $\alpha_{ji_j} \beta_j \alpha_{(j+1)1} =$ $u^{k_j} \alpha'_{ji_j} u^{-k_j} \beta_j u^{k_{j+1}} \alpha'_{(j+1)1} u^{-k_{j+1}}$. \\
We only need to show that $\tau(u^{-k_j} \beta_j u^{k_{j+1}}) = 0$. $\tau(u^{-k_j} \beta_j u^{k_{j+1}}) = \tau(u^{k_{j+1}} u^{-k_j}
\beta_j) = \tau(u^{k_{j+1} - k_j} \beta_j)$. The case $ k_{j+1} = k_j$ is clear. Notice that if
$ k_{j+1} \neq k_j$ then $0 < k_{j+1} - k_j \leq l-1$. Is it clear that
$u^{k_{j+1} - k_j} \cdot \Span ( \{ e_{11}, ..., e_{nn} \}) \subset \mathbb{M}_n$ consists of liner
combination of off-diagonal elements. The same is clear for $u^{k_{j+1} - k_j} \cdot
\Span( \{ u^l, u^{2l} , ..., u^{n-l} \} ) \subset \mathbb{M}_n $. It's not difficult to see then that
$$u^{k_{j+1} - k_j} \cdot \Alg ( \{ e_{11}, ..., e_{nn} \}, \{ u^l, u^{2l}, ..., u^{n-l} \} )$$
will consist of linear span of the union of the off-diagonal entries among
$\{ e_{ij} | 1 \leq i,j \leq n \}$ present in $u^{k_{j+1} - k_j} \cdot \Span( \{e_{11}, ..., e_{nn} \})$ and
the ones present in \\
$u^{k_{j+1} - k_j} \cdot \Span( \{ u^l, u^{2l}, ..., u^{n-l} \} )$.
This shows that $u^{k_{j+1} - k_j} \beta_j$ will be also a linear span of off-diagonal entries in
$\mathbb{M}_n$ and will have trace $0$.
So $\tau(u^{-k_j} \beta_j u^{k_{j+1}}) = 0$. In this case also $\alpha'_{ji_j}$ and $\alpha'_{(j+1)1}$ are
free from $u^{-k_j} \beta_j u^{k_{j+1}}$ in $(A,\tau)$. \\
We expand all the letters of the word $\omega$ and see that it is of trace $0$ similarly as in case (i).
For the word $\omega u^r$ with $0 \leq r \leq l-1$ we argue similarly as in case (i).
Again there are two principally different cases: \\
In cases (5) and (6) $\alpha_{ti_t} = u^k \alpha'_{ti_t} u^{-k}$ for some $0 \leq k \leq l-1$ with
$\alpha'_{ti_t} \in C^*( \{ p_1 , ..., p_m \} )^{\circ}$. So the word will end in $u^k \alpha'_{ti_t}
u^{r-k}$. If $r = k$ then $\alpha'_{ti_t}$ will be the last letter with trace zero and everything else will
be the same as for $\omega$, so the whole word will have trace $0$. If $k \neq r$ then $\tau(u^{r-k}) = 0$
and $u^{r-k}$ is free from $\alpha'_{ti_t}$ so the word in this case will be of zero trace too.
In cases (7) and (8) $\beta_{t-1} u^r$ then this is a linear combination of off-diagonal elements as we showed
in case (ii)-(b). If not we write $\alpha_{(t-1)i_{t-1}} = u^k \alpha'_{(t-1)i_{t-1}} u^{-k}$ with
$0 \leq k \leq l-1$ and $\alpha'_{(t-1)i_{t-1}} \in
C^*( \{ p_1, ..., p_m \} )^{\circ}$. So the word that we are looking at will end in
$u^k \alpha'_{(t-1)i_{t-1}} u^{-k} \beta_{t-1} u^{r} $. Since $0 \leq k,r \leq l-1$ similarly as in case
(ii)-(b) we see that $\tau(u^{-k} \beta_{t-1} u^{r}) = 0$. We treat the remaining part of the word as above and conclude that in this case the word has trace
$0$. \\
\par
So in all cases $\tau(\omega u^r) = 0$ just what we had to show. \\
\par
This proves the lemma.
\end{proof}
From properties (5) and (6) of the reduced free product it follows that $\tau$ is a faithful trace. From Lemma 1.4 it follows that $$B =
(\mathbb{C} \cdot e_{11} \oplus ... \oplus \mathbb{C} \cdot e_{nn}) * (\underset{k=0}{\overset{n-1}{*}} (\mathbb{C} \cdot u^k p_1 u^{-k} \oplus ... \oplus
\mathbb{C} \cdot u^k p_m u^{-k})),$$
$$\cong ( \underset{\frac{1}{n}}{\mathbb{C}} \oplus ... \oplus \underset{\frac{1}{n}}{\mathbb{C}} ) *
(\underset{k=0}{\overset{n-1}{*}} (\underset{\alpha_1}{\mathbb{C}} \oplus ... \oplus
\underset{\alpha_m}{\mathbb{C}}))$$ and that $$E =
C^*( \{ e_{11}, ..., e_{nn}, u_l, u^{2l}, ..., u^{n-l} \} ) * (\underset{k=0}{\overset{l-1}{*}} (\mathbb{C} \cdot u^k p_1 u^{-k} \oplus ... \oplus
\mathbb{C} \cdot u^k p_m u^{-k})),$$
$$\cong (\underset{\frac{l}{n}}{\mathbb{M}_{\frac{n}{l}}} \oplus ... \oplus
\underset{\frac{l}{n}}{\mathbb{M}_{\frac{n}{l}}}) * (\underset{k=0}{\overset{l-1}{*}} (\underset{\alpha_1}{\mathbb{C}} \oplus ...
\oplus \underset{\alpha_m}{\mathbb{C}})).$$
\begin{cor}
If everything is as above:
(1) For $b \in B$ and $0 < k \leq n-1$ we have $\tau(b u^k) = 0$, so also $\tau(u^k b) = 0$.
\par
(2) For $e \in E$ and $0 < k \leq l-1$ we have $\tau(e u^k) = 0$, so also $\tau(u^k e) = 0$.
\end{cor}
For $(B, \tau|_B)$ and $(E, \tau|_E)$ we have that $\mathfrak{H}_B \subset \mathfrak{H}_E \subset \mathfrak{H}_A$. If $a \in A$ we will
denote by $\hat{a} \in \mathfrak{H}_A$ the vector in $\mathfrak{H}_A$, corresponding to $a$ by the GNS construction. We will show that
\begin{cor}
If everything is as above: \\
(1) $u^{k_1} \mathfrak{H}_B \bot u^{k_2} \mathfrak{H}_B$ for $k_1 \neq k_2$, $0 \leq k_1, k_2 \leq n-1$.
\par
(2) $u^{k_1} \mathfrak{H}_E \bot u^{k_2} \mathfrak{H}_E$ for $k_1 \neq k_2$, $0 \leq k_1, k_2 \leq l-1$.
\end{cor}
\begin{proof}
(1) Take $ b_1, b_2 \in B $. We have $\langle u^{k_1} \hat{b_1} , u^{k_2} \hat{b_2} \rangle = \tau(u^{k_2} b_2 b_1^* u^{-k_1}) =
\tau(b_2 b_1^* u^{k_2 - k_1}) = 0,$ by the above Corollary. \\
(2) Similarly take $e_1, e_2 \in E$, so $\langle u^{k_1} \hat{e_1}, u^{k_2} \hat{e_2} \rangle = \tau(u^{k_2} e_2 e_1^* u^{-k_1})
= \tau(e_2 e_1^* u^{k_2 - k_1}) = 0,$ again by the above Corollary.
\end{proof}
Now $\mathfrak{H}_A$ can be written in the form $\mathfrak{H}_A =
\underset{i=0}{\overset{n-1}{\bigoplus}} u^i \mathfrak{H}_B$ as a
Hilbert space because of the Corollary above. Denote by $P_i$ the projection $P_i : \mathfrak{H}_A \rightarrow \mathfrak{H}_A$ onto the
subspace $u^i \mathfrak{H}_B$. Now it's also true that $A =
\underset{i=0}{\overset{n-1}{\bigoplus}} u^i B$ as a Banach
space. To see this we notice that $\Span\{u^iB, i=0, ...n-1\}$ is dense in $A$, also that $u^i B,\ 0 \leq i
\leq n-1$ are closed in $A$. Now take a sequence $\{ \sum_{i=0}^{n-1} u^i b_{mi} \}_{m=1}^{\infty}$ converging to an element $a \in A$
($b_{mi} \in B$).
Then for each $i$ we have $\{ P_j \sum_{i=0}^{n-1} u^i b_{mi} P_0 \}_{m=1}^{\infty} = \{ P_j u^j b_{mj} P_0 \}_{m=1}^{\infty}$ converges
(to $P_j a P_0$), consequently the
sequence $\{ b_{mj} \}_{m=1}^{\infty}$ converges to an element $b_j$ in $B$ $\forall 0 \leq j \leq n-1$. So
$a = \sum_{i=0}^{n-1} u^i b_i$. Finally
the fact that $u^{i_1} B \cap u^{i_2} B = 0$, for $i_1 \neq i_2$ follows easily from
$u^{i_1} \mathfrak{H}_B \cap u^{i_2} \mathfrak{H}_B = 0$, for $i_1 \neq i_2$ and the fact that the trace $\tau$ is faithful. We also have
$A = \underset{i=0}{\overset{n-1}{\bigoplus}} B u^i$.
\par
Let $C$ is a $C^*$-algebra and $\Gamma$ is a discrete group with a given action
$\alpha : \Gamma \rightarrow Aut(C)$ on $C$. By $C \rtimes \Gamma$ we will denote the reduced crossed
product of $C$ by $\Gamma$. It will be clear what group action we take.
\par
Let's denote by $G$ the multiplicative group, generated by the automorphism $\Ad(u)$ of $B$. Then
$G \cong \mathbb{Z}_n$ and by what we proved above
$\mathfrak{H}_A \cong L^2(G,\mathfrak{H}_B)$.
\begin{lemma}
$A \cong B \rtimes G$
\end{lemma}
\begin{proof}
We have to show that the action of $A$ on $\mathfrak{H}_A$ "agrees" with the crossed product action. Take $a=
\underset{k=0}{\overset{n-1}{\sum}} b_k u^k \in A$, $b_k \in B, k=0, 1, ..., n-1$ and take $\xi = \underset{k=0}{\overset{n-1}{\sum}} u^k
\hat{b'_k} \in \mathfrak{H}_A$, $b'_k \in B, k=0, 1, ..., n-1$. Then $$a(\xi) = \underset{k=0}{\overset{n-1}{\sum}}
\underset{m=0}{\overset{n-1}{\sum}} b_k u^k u^m \hat{b'_m} = \underset{k=0}{\overset{n-1}{\sum}} \underset{m=0}{\overset{n-1}{\sum}}
u^{k+m} . (u^{-k-m} b_k u^{k+m} ) \hat{b'_m},$$
$$= \underset{s=0}{\overset{n-1}{\sum}} \underset{k=0}{\overset{n-1}{\sum}} (u^s . \Ad(u^{-s})(b_k) ) (\widehat{b'_{s-k(mod\ n)}}).$$ This shows that the action of $A$ on $\mathfrak{H}_A$ is the crossed product action.
\end{proof}
To study simplicity in this situation, we can invoke Theorem 4.2 from \cite{O75} and Theorem 6.5 from \cite{OP78}, or with the same success, use the following result from \cite{K81}:
\begin{thm}[\cite{K81}]
Let $\Gamma$ be a discrete group of automorphisms of $C^*$-algebra $\mathfrak{B}$. If $\mathfrak{B}$ is simple and if each $\gamma$ is
outer for the multiplier algebra $M(\mathfrak{B})$ of $\mathfrak{B}$, $\forall \gamma \in \Gamma \backslash \{ 1 \} $, then the
reduced crossed product of $\mathfrak{B}$ by $\Gamma$, $\mathfrak{B} \rtimes \Gamma$, is simple.
\end{thm}
An automorphism $\omega$ of a $C^*$-algebra $\mathfrak{B}$ , contained in a $C^*$-algebra $\mathfrak{A}$ is
outer for $\mathfrak{A}$, if there doesn't exist a unitary $w \in \mathfrak{A}$ with the property $\omega = \Ad(w)$.
\par
A representation $\pi$ of a $C^*$-algebra $\mathfrak{A}$ on a Hilbert space $\mathfrak{H}$ is called
non-degenerate if there doesn't exist a vector $\xi \in
\mathfrak{H}$, $\xi \neq 0$, such that $\pi(\mathfrak{A}) \xi = 0$.
\par
The idealizer of a $C^*$-algebra $\mathfrak{A}$ in a $C^*$-algebra $\mathfrak{B}$ ($\mathfrak{A} \subset \mathfrak{B}$) is the largest $C^*$-subalgebra of $\mathfrak{B}$ in which $\mathfrak{A}$ is an ideal. \\
We will not give a definition of multiplier algebra of a $C^*$-algebra. Instead we will give the following property from \cite{APT73}, which we will use (see \cite{APT73} for more details on multiplier algebras):
\begin{prop}[\cite{APT73}]
Each nondegenerate faithful representation $\pi$ of a $C^*$-algebra $\mathfrak{A}$ extends uniquely to a faithful representation of
$M(\mathfrak{A})$, and $\pi(M(\mathfrak{A}))$ is the idealizer of $\pi(\mathfrak{A})$ in its weak closure.
\end{prop}
Suppose that we have a faithful representation $\pi$of a $C^*$ algebra $\mathfrak{A}$ on a Hilbert space
$\mathfrak{H}$. If confusion is impossible we will denote by $\bar{\mathfrak{A}}$ (in $\mathfrak{H}$) the
weak closure of $\pi(\mathfrak{A})$ in $\mathbb{B}(\mathfrak{H})$.
\par
To study uniqueness of trace we invoke a theorem of B$\acute{e}$dos from \cite{B93}.
\par
Let $\mathfrak{A}$ be a simple, unital $C^*$-algebra with a unique trace $\varphi$ and let
$(\pi_{\mathfrak{A}}, \mathfrak{H}_{\mathfrak{A}},
\widehat{1_{\mathfrak{A}}})$ denote the GNS-triple associated to $\varphi$. The trace $\varphi$ is faithful by the simplicity of $\mathfrak{A}$ and
$\mathfrak{A}$ is isomorphic to $\pi_{\mathfrak{A}}(\mathfrak{A})$. Let
$\alpha \in Aut(\mathfrak{A})$. The trace $\varphi$ is $\alpha$-invariant by the uniqueness of $\varphi$. Then $\alpha$ is implemented on
$\mathfrak{H}_{\mathfrak{A}}$ by the unitary operator $U_{\alpha}$ given by
$U_{\alpha}(\hat{a}) = \alpha(a) \cdot \widehat{1_{\mathfrak{A}}}$,
$a \in \mathfrak{A}$. Then we denote the extension of $\alpha$ to the weak closure
$\bar{\mathfrak{A}}$ (in $\mathfrak{H}_{\mathfrak{A}}$) of $\pi_{\mathfrak{A}}(\mathfrak{A})$ on $\mathbb{B}(\mathfrak{H}_{\mathfrak{A}})$
by $\tilde{\alpha} \overset{def}{=} \Ad(U_{\alpha})$. We will say that $\alpha$ is $\varphi$-outer if $\tilde{\alpha}$ is outer for $\bar{\mathfrak{A}}$.
\begin{thm}[\cite{B93}]
Suppose $\mathfrak{A}$ is a simple unital $C^*$-algebra with a unique trace $\varphi$ and that $\Gamma$ is a discrete group with a
representation $\alpha : \Gamma \rightarrow Aut(\mathfrak{A})$, such that $\alpha_{\gamma}$ is $\varphi$-outer $\forall \gamma \in \Gamma
\backslash \{ 1 \}$. Then the reduced crossed product $\mathfrak{A} \rtimes \Gamma$ is simple with a unique trace $\tau$ given by
$\tau = \varphi \circ E$, where $E$ is the canonical conditional expectation from $\mathfrak{A} \rtimes \Gamma$ onto $\mathfrak{A}$.
\end{thm}
Let's now return to the $C^*$-algebra $(A,\tau) = ( \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus
\underset{\alpha_m}{\overset{p_m}{\mathbb{C}}})*(\mathbb{M}_n, tr_n)$, with $\alpha_1 \leq \alpha_2 \leq ... \leq \alpha_m$. If
$B \subset E \subset A$ are as in the beginning of this section, then the representations of $B$, $E$ and $A$ on $\mathfrak{H}_A$ are all nondegenerate. Also we have the following:
\begin{lemma}
The weak closure of $B$ in $\mathbb{B}(\mathfrak{H}_B)$ and the one in $\mathbb{B}(\mathfrak{H}_A)$ are the
same (or $\bar{B}$ (in $\mathfrak{H}_B$) $\cong$
$\bar{B}$ (in $\mathfrak{H}_A$)). Analoguously, $\bar{E}$ (in $\mathfrak{H}_E$) $\cong$ $\bar{E}$ (in $\mathfrak{H}_A$).
\end{lemma}
\begin{proof}
For $b \in B \subset A$ we have $b(u^t h) = u^t (\Ad(u^{-t} b))(h)$ for $h \in \mathfrak{H}_B$ and
$0 \leq t \leq n-1$. Taking a weak limit
in $\mathbb{B}(\mathfrak{H}_B)$ we obtain the same equation $\forall \bar{b} \in \bar{B}$
(in $\mathfrak{H}_B$): $\bar{b}(u^th) =
u^t(\Ad(u^{-t})(\bar{b}))(h)$, which shows, of course, that $\bar{b}$ has a unique extension to
$\mathbb{B}(\mathfrak{H}_A)$. Conversely if $\tilde{b} \in \bar{B}$ (in
$\mathfrak{H}_A$), then since $\mathfrak{H}_B$ is invariant for $B$ it will be invariant for $\tilde{b}$ also. So the restriction of
$\tilde{b}$ to $\mathfrak{H}_B$ is the element we are looking for.
\par
Analoguously if $e \in E$ and if $h_0 + u^l h_1 + ... + u^{n-l} h_{\frac{n}{l}-1} \in \mathfrak{H}_E$, then for $0 \leq t \leq l-1$ we have
$e(u^t(h_0 + u^l h_1 + ... + u^{n-l} h_{\frac{n}{l}-1})) = u^t(\Ad(u^{-t})(e))(h_0 + u^l h_1 + ... + u^{n-l} h_{\frac{n}{l}-1})$. And again
for an element $\bar{e} \in \bar{E}$ (in $\mathfrak{H}_E$) we see that $\bar{e}$ has a unique extension to an element of $\bar{E}$ (in
$\mathfrak{H}_A$). Conversely an element $\tilde{e} \in \bar{E}$ (in $\mathfrak{H}_A$) has $\mathfrak{H}_E$ as an invariant subspace, so we
can restrict it to $\mathfrak{H}_E$ to obtain an element in $\bar{E}$ (in $\mathfrak{H}_E$).
\end{proof}
We will state the following theorem from \cite{D99}, which we will frequently use:
\begin{thm}[\cite{D99}]
Let $\mathfrak{A}$ and $\mathfrak{B}$ be unital $C^*$-algebras with traces $\tau_{\mathfrak{A}}$ and
$\tau_{\mathfrak{B}}$ respectively, whose GNS representations are faithful. Let
\begin{center}
$(\mathfrak{C}, \tau) = (\mathfrak{A}, \tau_{\mathfrak{A}}) * (\mathfrak{B}, \tau_{\mathfrak{B}})$.
\end{center}
Suppose that $\mathfrak{B} \neq \mathbb{C}$ and that $\mathfrak{A}$ has a unital, diffuse abelian
$C^*$-subalgebra $\mathfrak{D}$ ($1_{\mathfrak{A}} \in \mathfrak{D} \subseteq \mathfrak{A}$).
Then $\mathfrak{C}$ is simple with a unique trace $\tau$.
\end{thm}
Using repeatedly Theorem 2.4 we see that $$B =
(\mathbb{C} \cdot e_{11} \oplus ... \oplus \mathbb{C} \cdot e_{nn}) * (\underset{k=0}{\overset{n-1}{*}} (\mathbb{C} \cdot u^k p_1 u^{-k} \oplus ... \oplus
\mathbb{C} \cdot u^k p_m u^{-k})),$$
$$\cong (U \oplus \underset{max \{ n\alpha_m - n + 1,\ 0 \} }{\overset{\tilde{p}}{\mathbb{C}}}) *
(\underset{\frac{1}{n}}{\overset{e_{11}}{\mathbb{C}}} \oplus ... \oplus \underset{\frac{1}{n}}{\overset{e_{nn}}{\mathbb{C}}}),$$ where $U$
has a unital, diffuse abelian $C^*$-subalgebra, and where $\tilde{p} = \underset{i=0}{\overset{n-1}{\wedge}} u^i p_m u^{-i}$.
\par
We will consider the following 3 cases, for $\alpha_1 \leq \alpha_2 \leq ... \leq \alpha_m$: \\
\par
(I) $\alpha_m < 1-\frac{1}{n^2}$.
\par
(II) $\alpha_m = 1-\frac{1}{n^2}$.
\par
(III) $\alpha_m > 1-\frac{1}{n^2}$. \\
\par
We will organize those cases in few lemmas:
\par
\par
(I)
\begin{lemma}
If $A$ is as above, then for $\alpha_m < 1-\frac{1}{n^2}$ we have that $A$ is simple with a unique trace.
\end{lemma}
\begin{proof}
We consider: \\
(1) $\alpha_m \leq 1-\frac{1}{n}$. \\
Then $B \cong U * (\underset{\frac{1}{n}}{\overset{e_{11}}{\mathbb{C}}} \oplus ... \oplus
\underset{\frac{1}{n}}{\overset{e_{nn}}{\mathbb{C}}})$ with $U$ containing a unital, diffuse abelian $C^*$-subalgebra (from Theorem 2.4).
From the Theorem 3.9 we see that $B$ is simple with a unique trace. \\
(2) $1-\frac{1}{n} < \alpha_m < 1-\frac{1}{n^2}$. \\
Then $B \cong (U \oplus \underset{n\alpha_m - n + 1}{\overset{\tilde{p}}{\mathbb{C}}}) *
(\underset{\frac{1}{n}}{\overset{e_{11}}{\mathbb{C}}} \oplus ... \oplus \underset{\frac{1}{n}}{\overset{e_{nn}}{\mathbb{C}}})$ with $U$
having a unital, diffuse abelian $C^*$-subalgebra. Using Theorem 2.4 one more time we see that $B$ is simple with a unique trace in this case also.
\par
We know that $A = B \rtimes G$, where $G = \langle \Ad(u) \rangle \cong \mathbb{Z}_n$. Since $B$ is unital then the multiplier algebra $M(B)$
coinsides with $B$. We note also that since $\bar{B}$ (in $\mathfrak{H}_B$)
is isomorphic to $\bar{B}$ (in $\mathfrak{H}_A$) to prove that some element of $Aut(B)$ is $\tau_B$-outer it's enough to prove that this
automorphism is outer for $\bar{B}$ (in $\mathfrak{H}_A$) (and it will be outer for $M(B) = B$ also). Making these observations and using
Theorem 3.5 and Theorem 3.7 we see that if we prove that $\Ad(u^i)$ is outer for $\bar{B}$ (in $\mathfrak{H}_A$), $\forall 0 < i \leq n-1$,
then it will follow that $A$ is simple with a unique trace. We will show that $\Ad(u^i)$ is outer for $\bar{B}$ (in $\mathfrak{H}_A$)
(we will write just $\bar{*}$ for $\bar{*}$ (in $\mathfrak{H}_A$) and omit writting $\mathfrak{H}_A$ - all the
closures will be in $\mathbb{B}(\mathfrak{H}_{\mathfrak{A}})$) for
the case $\alpha_m \leq 1-\frac{1}{n^2}$.
\par
Fix $0 < k \leq n-1$. Since $u^k \mathfrak{H}_B \perp \mathfrak{H}_B$ it follows that $u^k \notin \bar{B}$ (in $\mathfrak{H}_A$). Suppose
$\exists w \in \bar{B}$, such that $\Ad(u^k) = \Ad(w)$ on $\bar{B}$. Then $u^k w u^{-k} = w w w^* = w$ and $u^k w^* u^{-k} = w w^* w^* = w^*$
and this implies that $u^k$, $u^{-k}$, $w$ and $w^*$ commute, so it follows $u^k w^*$ commutes with $\overline{C^*(B, u^k)}$, so it belongs
to its center. If $k \nmid n$ then $\overline{C^*(B, u^k)} = \bar{A}$ and by Theorem 2.5 $\bar{A}$ (in $\mathfrak{H}_A$)is a factor, so $u^k w^*$ is a
multiple of $1_A$, which contradicts the fact $u^k \notin \bar{B}$. If $k=l \mid n$, then $\overline{C^*(B, u^k)} = \bar{E}$ and $\bar{E}$
(in $\mathfrak{H}_A$) $\cong$ $\bar{E}$ (in $\mathfrak{H}_E$) is a factor too (by Theorem 2.5), so this implies again that $u^k w^*$ is a
multiple of $1_A = 1_E$, so this is a contradiction again and this proves that $\Ad(u^k)$ are outer for $\bar{B}$, $\forall 0 < k \leq n-1$.
This concludes the proof.
\end{proof}
(III)
\begin{lemma}
If $A$ is as above, then for $\alpha_m > 1-\frac{1}{n^2}$ we have $A = A_0 \oplus \underset{n^2 \alpha_m - n^2 + 1}{\mathbb{M}_n}$, where $A_0$ is simple with a unique trace.
\end{lemma}
\begin{proof}
In this case $B \cong (U \oplus \underset{n\alpha_m - n + 1}{\overset{\tilde{p}}{\mathbb{C}}}) *
(\underset{\frac{1}{n}}{\overset{e_{11}}{\mathbb{C}}} \oplus ... \oplus \underset{\frac{1}{n}}{\overset{e_{nn}}{\mathbb{C}}})$, where $U$
has a unital, diffuse abelian $C^*$-subalgebra. Form Theorem 2.4 we see that $B \cong \overset{\tilde{p}_0}{B_0} \oplus \underset{n
\alpha_m - n + \frac{1}{n}}{\overset{e_{11} \wedge \tilde{p}}{\mathbb{C}}} \oplus ... \oplus \underset{n
\alpha_m - n + \frac{1}{n}}{\overset{e_{nn} \wedge \tilde{p}}{\mathbb{C}}}$ with $\tilde{p}_0 = 1- e_{11} \wedge
\tilde{p} - ... - e_{nn} \wedge \tilde{p}$, and $B_0$ being a unital, simple and having a
unique trace. It's easy to see that $\Ad(u)$ permutes $\{ e_{ii} | 1 \leq i \leq n \}$ and that $\Ad(u)$ permutes
$\{ u^i p_j u^{-i} | 0 \leq i \leq n-1 \}$ for each $1 \leq j \leq m$. But since $\tilde{p} =
\underset{i=0}{\overset{n-1}{\wedge}} u^i p_m u^{-i}$ we see that $\Ad(u)(\tilde{p}) = \tilde{p}$.
This shows that $\Ad(u)$ permutes
$\{ e_{ii} \wedge \tilde{p} | 1 \leq i \leq n \}$. This shows that $\Ad(\tilde{p}_0 u)$ is an automorphism
of $B_0$ and that
$\Ad((1-\tilde{p}_0) u)$ is an automorphism of $\overset{e_{11} \wedge \tilde{p}}{\mathbb{C}} \oplus ... \oplus \overset{e_{nn} \wedge
\tilde{p}}{\mathbb{C}}$. If we denote $G_1 = \langle \Ad(\tilde{p}_0 u) \rangle$ and $G_2 = \langle \Ad((1-\tilde{p}_0) u) \rangle$,
then we have
$A = B_0 \rtimes G_1 \oplus (\overset{e_{11} \wedge \tilde{p}}{\mathbb{C}} \oplus ... \oplus \overset{e_{nn} \wedge \tilde{p}}{\mathbb{C}})
\rtimes G_2$. Now it's easy to see that $(\overset{e_{11} \wedge \tilde{p}}
{\mathbb{C}} \oplus ... \oplus \overset{e_{nn} \wedge \tilde{p}}{\mathbb{C}}) \rtimes G_2 = C^*(\{ e_{11} \wedge \tilde{p}, ..., e_{nn}
\wedge \tilde{p} \}, (1-\tilde{p}_0) u) = (1-\tilde{p}_0).C^*( \{ e_{11}, ..., e_{nn} \}, u) \cong \mathbb{M}_n$ (because $\tilde{p}_0$ is
a central projection). To study
$A_0 \overset{def}{=} B_0 \rtimes G_1$ we have to consider the automorphisms $\Ad(\tilde{p}_0 u)$. From Lemma 3.8 we see that
$$\overline{B_0 \oplus \overset{e_{11}
\wedge\tilde{p}}{\mathbb{C}} \oplus ... \oplus \overset{e_{nn} \wedge \tilde{p}}{\mathbb{C}}}\ (in\ \mathfrak{H}_B) \cong \overline{B_0
\oplus \overset{e_{11} \wedge\tilde{p}}{\mathbb{C}} \oplus ... \oplus \overset{e_{nn} \wedge \tilde{p}}{\mathbb{C}}}\ (in\ \mathfrak{H}_A).$$
This implies $\bar{B}_0$ (in $\mathfrak{H}_{B_0}$) $\cong$ $\bar{B}_0$ (in $\mathfrak{H}_{A_0}$). This is because
$\mathfrak{H}_{A_0} = \tilde{p}_0 \mathfrak{H}_A$ and $\mathfrak{H}_{B_0} = \tilde{p}_0 \mathfrak{H}_B$ (which is clear, since
$\mathfrak{H}_{A_0}$ and $\mathfrak{H}_{B_0}$ are direct summands in $\mathfrak{H}_A$ and $\mathfrak{H}_B$ respectivelly). For some
$l | n$ if we denote $E_0 \overset{def}{=} \tilde{p}_0 E$ then by the same reasoning as above $$E = E_0 \oplus (1-\tilde{p}_0).
C^*(\{ e_{11}, ..., e_{nn} \}, u^l) \cong E_0 \oplus (\underbrace{\mathbb{M}_{\frac{n}{l}} \oplus ... \oplus \mathbb{M}_{\frac{n}{l}}}_{l-times}).$$
So we similarly have $\bar{E_0}$ (in $\mathfrak{H}_{E_0}$) $\cong$ $\bar{E_0}$ (in $\mathfrak{H}_{A_0}$). We use Theorem 2.5 and see that
$\bar{A} \cong L(F_t) \oplus \mathbb{M}_n$ and that $$\bar{E} \cong L(F_{t'}) \oplus (\underbrace{\mathbb{M}_{\frac{n}{l}} \oplus ... \oplus
\mathbb{M}_{\frac{n}{l}}}_{l-times}),$$ for some $1 < t, t' < \infty$. This shows that $\bar{A_0}$ and $\bar{E_0}$ are both factors.
Now for $\Ad(\tilde{p_0} u^k)$, $1 \leq k \leq n-1$ we can make the same reasoning as in the case (I) to
show that $\Ad(\tilde{p_0} u^k)$ are all outer for
$\bar{B_0}$, $\forall 1 \leq k \leq n-1$. Now we use Theorem 3.5 and Theorem 3.7 to finish the proof. Notice that the trace of the support
projection of $\mathbb{M}_n$, $e_{11} \wedge \tilde{p} + ... + e_{nn} \wedge \tilde{p}$, is $n^2 \alpha_m - n^2 + 1$.
\end{proof}
(II) \\
\par
We already proved that $\Ad(u^k)$ are outer for $\bar{B}$, $\forall 1 \leq k \leq n-1$. Using Theorem 2.4 we see
$B \cong (U \oplus \underset{1-\frac{1}{n}}{\overset{\tilde{p}}{\mathbb{C}}}) * (\underset{\frac{1}{n}}{\overset{e_{11}}{\mathbb{C}}}
\oplus ... \oplus \underset{\frac{1}{n}}{\overset{e_{nn}}{\mathbb{C}}})$ with $U$ having a unital, diffuse abelian $C^*$-subalgebra. There
are $*$-homomorphisms $\pi_i : B \rightarrow \mathbb{C}$, $1 \leq i \leq n$ with $\pi_i(\tilde{p}) = \pi_i(e_{ii}) = 1$,
and such that $B_0 \overset{def}{=} \underset{i=0}{\overset{n-1}{\bigcap}} \ker(\pi_i)$ is simple with a unique trace. Now if
$1 \leq k \leq n-1$, then $B_0 \bigcap \Ad(u^k)(B_0) = $ either $0$ or $B_0$, because $B_0$ and $\Ad(u^k)(B_0)$ are simple ideals in $B$.
The first possibility is actually
impossible, because of dimension reasons, so this shows that $B_0$ is invariant for $\Ad(u^k)$, $1 \leq k \leq n-1$. In other words
$\Ad(u^k) \in Aut(B_0)$. Similarly as in Lemma 3.4 it can be shown that
$$A_0 \overset{def}{=} C^*(B_0 \oplus B_0 u \oplus ... \oplus B_0
u^{n-1}) \cong B_0 \rtimes \{ \Ad(u^k) | 0 \leq k \leq n-1 \} \subset A.$$
\begin{lemma}
We have a short split-exact sequence:
\begin{center}
$0 \hookrightarrow A_0 \rightarrow A \overset{\curvearrowleft}{\rightarrow} \mathbb{M}_n \rightarrow 0$.
\end{center}
\end{lemma}
\begin{proof}
It's clear that we have the short exact sequence
\begin{equation*}
0 \rightarrow B_0 \hookrightarrow B \overset{\pi}{\longrightarrow} \underbrace{\mathbb{C}
\oplus ... \oplus \mathbb{C}}_{n-times} \rightarrow 0,
\end{equation*}
where $\pi \overset{def}{=} (\pi_1, ..., \pi_n)$. We think $\pi$ to be a map from $B$ to $diag(\mathbb{M}_n)$, defined by $$\pi(b) =
\begin{pmatrix} \pi_1(b) & 0 & ... & 0 \\ 0 & \pi_2(b) & ... & 0 \\ . & . & . & . \\ 0 & 0 & ... & \pi_n(b) \end{pmatrix} .$$
Now since
$\pi_i(\tilde{p}) = \pi_i(e_{ii}) = 1$ and $\Ad(u)(e_{11}) = u e_{11} u^* = e_{nn}$ and for
$2 \leq i \leq n$, $\Ad(u)(e_{ii}) = u e_{ii}
u^* = e_{(i-1) (i-1)}$, then $\pi_i \circ \Ad(u)(e_{(i+1) (i+1)}) = \pi_i \circ \Ad(u)(\tilde{p}) = 1$ for $1 \leq i \leq n-1$ and $\pi_n
\circ \Ad(u)(e_{1 1}) = \pi_n \circ \Ad(u)(\tilde{p}) = 1$. So since two $*$-homomorphism of a $C^*$-algebra, which coinside on a set
of generators of the $C^*$-algebra, are identical, we have $\pi_i \circ \Ad(u) = \pi_{i+1}$ for
$1 \leq i \leq n-1$ and $\pi_n \circ \Ad(u) = \pi_1$. Define $\tilde{\pi} : A \rightarrow \mathbb{M}_n$ by
$\underset{k=0}{\overset{n-1}{\sum}} b_ku^k \mapsto
\underset{k=0}{\overset{n-1}{\sum}} \pi(b_k) W^k$ (with $b_k \in B$), where $W \in \mathbb{M}_n$ is represented by the matrix, which
represent $u \in \mathbb{M}_n \subset A$, namely $$W \overset{def}{=} \begin{pmatrix} 0 & 1 & ... & 0 \\ . & . & . & . \\ 0 & 0 & ... & 1
\\ 1 & 0 & ... & 0 \end{pmatrix} .$$
We will show that if $b \in B$ and $0 \leq k \leq n-1$, then $\pi(u^k b u^{-k}) = W^k \pi(b) W^{-k}$. For this it's enough to show that $\pi(u b u^{-1}) = W \pi(b) W^{-1}$. For the matrix units $\{ E_{ij} | 1 \leq i,j \leq n \}$ we have as above $W E_{ii} W^* = E_{(i-1) (i-1)}$ for $2 \leq i \leq n-1$ and $W E_{11} W^* = E_{nn}$. So $$W \begin{pmatrix} \pi_1(b) & 0 & ... & 0 \\ 0 & \pi_2(b) & ... & 0 \\ . & . & . & . \\ 0 & 0 & ... & \pi_n(b) \end{pmatrix} W^* = \begin{pmatrix} \pi_2(b) & 0 & ... & 0 \\ 0 & \pi_3(b) & ... & 0 \\ . & . & . & . \\ 0 & 0 & ... & \pi_1(b) \end{pmatrix} ,$$
$$ = \begin{pmatrix} \pi_1(\Ad(u)(b)) & 0 & ... & 0 \\ 0 & \pi_2(\Ad(u)(b)) & ... & 0 \\ . & . & . & . \\ 0
& 0 & ... & \pi_n(\Ad(u)(b)) \end{pmatrix} = \pi(\Ad(u)(b)),$$ just what we wanted.
\par
Now for $b \in B$ and $0 \leq k \leq n-1$ we have $$\tilde{\pi}((b u^k)^*) = \tilde{\pi}(u^{-k} b^*) =
\tilde{\pi}(u^{-k} b^* u^k u^{-k}) = \pi(u^{-k} b^* u^k) W^{-k} = W^{-k} \pi(b^*) W^k W^{-k} , $$
$$ = W^{-k} \pi(b)^* = (\pi(b) W^k)^* = (\tilde{\pi}(b u^k))^*.$$
Also if $b, b' \in B$ and $0 \leq k, k' \leq n-1$, then $$\tilde{\pi}((b' u^{k'}).(b u^k)) = \tilde{\pi}(b'(u^{k'} b u^{-k'}) u^{k+k'}) =
\pi(b'(u^{k'} b u^{-k'})) W^{k+k'},$$
$$= \pi(b') \pi(u^{k'} b u^{-k'}) W^{k+k'} = \pi(b') W^{k'} \pi(b) W^{-k'} W^{k+k'} =
\tilde{\pi}(b' u^{k'}) \tilde{\pi}(b u^k).$$ This proves that that $\tilde{\pi}$ is a $*$-homomorphism. Continuity follows from continuity of $\pi$ and the Banach space representation $A = \underset{i=0}{\overset{n-1}{\bigoplus}} Bu^i$.
\par
Clearly $A_0 = \underset{i=0}{\overset{n-1}{\bigoplus}} B_0 u^i$ as a Banach space.
It's also clear by the definition of $\tilde{\pi}$
that $A_0 \subset \ker(\tilde{\pi})$. Since $A_0$ has a Banach space codimension $n^2$ in $A$, and so does
$\ker(\tilde{\pi})$, then we must have $A_0 = \ker(\tilde{\pi})$.
\par
From the construction of the map $\tilde{\pi}$ we see that $\tilde{\pi}(e_{ii}) = E_{ii}$, since $\pi(e_{ii}) = E_{ii}$ and also
$\tilde{\pi}(u^k) = W^k$. Since $\{e_{ii} | 1 \leq i \leq n \} \cup \{ W^k | 0 \leq k \leq n-1 \}$ generate $\mathbb{M}_n$, then we have
$\tilde{\pi}(e_{ij}) = E_{ij}$, so the inclusion map $s: \mathbb{M}_n \rightarrow A$ given by $E_{ij} \mapsto e_{ij}$ is a right inverse
for $\tilde{\pi}$.
\end{proof}
From this lemma follows that we can write $A = A_0 \oplus \mathbb{M}_n$ as a Banach space.
\begin{lemma}
If $\eta$ is a trace on $A_0$, then the linear functional on $A$ $\tilde{\eta}$, defined by
$\tilde{\eta}(a_0 \oplus M) = \eta(a_0) + tr_n(M)$, where $a_0 \in A_0$ and $M \in \mathbb{M}_n$
is a trace and $\tilde{\eta}$ is the unique extension of $\eta$ to a trace on $A$ (of norm 1).
\end{lemma}
\begin{proof}
The functional $\eta$ can be extended in
at most one way to a tracial state on $A$, because of the requirement $\tilde{\eta}(1_A) = 1$, the fact that $\mathbb{M}_n$ sits as a
subalgebra in $A$, and the uniqueness on trace on $\mathbb{M}_n$. Since $\tilde{\eta}(1_A) = 1$, to show that $\tilde{\eta}$ is a trace we
need to show that $\tilde{\eta}$ is positive and satisfies the trace property. For the trace property: If $x ,y \in A$ then we need to show
$\tilde{\eta}(xy) = \tilde{\eta}(yx)$. It is easy to see, that to prove this it's enough to prove that if $a_0 \in A_0$ and $M \in
\mathbb{M}_n$, then $\eta(a_0 M) = \eta(M a_0)$. Since $\eta$ is linear and $a_0$ is a linear combination of 4 positive elements we can
think, without loss of generality, that $a_0 \geq 0$. Then $a_0 = a_0^{1/2} a_0^{1/2}$ and $M a_0^{1/2}, a_0^{1/2} M \in A_0$, so since $\eta$ is a trace on $A_0$, we
have $\eta(M a_0) = \eta((M a_0^{1/2}) a_0^{1/2}) = \eta(a_0^{1/2}(M a_0^{1/2})) = \eta((a_0^{1/2} M) a_0^{1/2}) = \eta(a_0^{1/2}(a_0^{1/2}
M)) = \eta(a_0 M).$ This shows that $\tilde{\eta}$ satisfies the trace property. It remains to show positivity. Suppose $a_0 \oplus M \geq
0$. We must show $\eta(a_0 \oplus M) \geq 0$. Write $M = \underset{i=0}{\overset{n}{\sum}} \underset{j=0}{\overset{n}{\sum}} m_{ij} e_{ij}$ and
$a_0 = \underset{i=0}{\overset{n}{\sum}} \underset{j=0}{\overset{n}{\sum}} e_{ii} a_0 e_{jj}$ Since $\tilde{\eta}$ is a trace if $i \neq
j$, then $\tilde{\eta}(e_{ii} a_0 e_{jj}) = \tilde{\eta}(e_{jj} e_{ii} a_0) = 0$, so this shows that $\tilde{\eta}(a_0 \oplus M) =
\underset{i=0}{\overset{n}{\sum}} (\frac{m_{ii}}{n} + \eta(e_{ii} a_0 e_{ii}))$. Clearly $a_0 \oplus M \geq 0$ implies $\forall 1 \leq i
\leq n, e_{ii} (a_0 \oplus M) e_{ii} \geq 0$. So to show positivity we only need to show $\forall 1 \leq i \leq n$ $\tilde{\eta}(e_{ii}(a_0
+ M)e_{ii}) \geq 0$, given $\forall 1 \leq i \leq n, m_{ii} e_{ii} + e_{ii} a_0 e_{ii} \geq 0$. Suppose that for some $i$, $m_{ii} < 0$.
Then it follows that $e_{ii} a_0 e_{ii} \geq -m_{ii} e_{ii}$, so $e_{ii} a_0 e_{ii} \in e_{ii} A_0 e_{ii}$ is invertible, which implies
$e_{ii} \in A_0$, that is not true. So this shows that $m_{ii} \geq 0$, and $m_{ii} e_{ii} \geq -e_{ii} a_0 e_{ii}$. If
$\{ \epsilon_{\gamma} \}$ is an approximate unit for $A_0$, then positivity of $\eta$ implies
$1 = \| \eta \| = \underset{\gamma}{\lim}\
\eta(\epsilon_{\gamma})$. Since $\eta$ is a trace we have
$\underset{\gamma}{\lim}\ \eta(\epsilon_{\gamma} e_{ii}) = \frac{1}{n}$. Since
$\forall \gamma,\ m_{ii}\epsilon_{\gamma}^{1/2} e_{ii} \epsilon_{\gamma}^{1/2} \geq -
\epsilon_{\gamma}^{1/2} e_{ii} a_0 e_{ii}
\epsilon_{\gamma}^{1/2}$, then $$tr_n(m_{ii} e_{ii}) = \frac{m_{ii}}{n} =
\underset{\gamma}{\lim}\ \eta(m_{ii} e_{ii} \epsilon_{\gamma}) =
\underset{\gamma}{\lim}\ \eta(m_{ii} \epsilon_{\gamma}^{1/2} e_{ii} \epsilon_{\gamma}^{1/2})
\geq \underset{\gamma}{\lim}\
\eta(\epsilon_{\gamma}^{1/2} e_{ii} a_0 e_{ii} \epsilon_{\gamma}^{1/2}),$$
$$ = \underset{\gamma}{\lim}\ \eta(e_{ii} a_0 e_{ii} \epsilon_{\gamma})
= \eta(e_{ii} a_0 e_{ii}).$$ This finishes the proof of positivity and the proof of the lemma.
\end{proof}
\begin{remark}
We will show below that $\tau|_{A_0}$ is the unique trace on $A_0$. Since we have $A = A_0 \oplus \mathbb{M}_n$ as a Banach space, then
clearly the free product trace $\tau$ on $A$ is given by $\tau(a_0 \oplus M) = \tau|_{A_0}(a_0) + tr_n(M)$, where $a_0 \oplus M \in A_0
\oplus \mathbb{M}_n = A$. All tracial positive linear functionals of norm $\leq 1$ on $A_0$ are of the form $t\tau|_{A_0}$, where $0 \leq t \leq 1$.
Then there will be no other traces on $A$ then the family $\lambda_t \overset{def}{=} t \tau|_{A_0} \oplus tr_n$. To show that these are
traces indeed, we can use the above lemma (it is still true, no mater that the norm of $t \tau_{A_0}$ can be less than one), or we can
represent them as a convex linear combination $\lambda_t = t \tau + (1-t)\mu$ of the free product trace $\tau$ and the trace $\mu$,
defined by $\mu(a_0 \oplus M) = tr_n(M) = tr_n(\tilde{\pi}(a_0 \oplus M))$.
\end{remark}
\begin{lemma}
$\bar{B_0}$ (in $\mathfrak{H}_A$) $=$ $\bar{B}$ (in $\mathfrak{H}_A$).
\end{lemma}
\begin{proof}
Let's take
$D \overset{def}{=} ( \overset{1-\tilde{p}}{\mathbb{C}} \oplus \overset{\tilde{p}}{\mathbb{C}} ) * ( \overset{e_{11}}{\mathbb{C}} \oplus
\overset{e_{22} + ... + e_{nn}}{\mathbb{C}}) \subset B$. Denote $D_0 \overset{def}{=} D\cap B_0$. From Theorem 2.3 follows that
$D \cong \{ f: [0,b] \rightarrow \mathbb{M}_2 | f$ is continuous and $f(0)$ - diagonal$\}$ $\oplus \overset{\tilde{p}\wedge (1-e_{11})}
{\mathbb{C}}$, where $0 < b < 1$ and $\tau|_D$ is given by an atomless measure $\mu$ on $\{ f: [0,b] \rightarrow \mathbb{M}_2 | f$ is
continuous and $f(0)$ - diagonal $\}$, $\tilde{p}$ is represented by $\begin{pmatrix} 1 & 0 \\ 0 & 0 \end{pmatrix} \oplus 1$, and $e_{11}$
is represented by $\begin{pmatrix} 1-t & \sqrt{t(1-t)} \\ \sqrt{t(1-t)} & t \end{pmatrix} \oplus 0$. A $*$-homomorphism, defined on the
generators of a $C^*$-algebra can be extended in at most one way to the whole $C^*$-algebra. This observation, together with $\pi_1(e_{11}) =
\pi_1(\tilde{p}) = 1$ and $\pi_i(e_{22} = ... + e_{nn}) = \pi(\tilde{p}) = 1$ implies that $\pi_1|_D(f \oplus c) = f_{11}(0)$ and
$\pi_i|_D(f \oplus c) = c$ for $2 \leq i \leq n-1$. This means that $D_0 = \{ f: [0,b] \rightarrow \mathbb{M}_2 | f$ is continuous and
$f_{11}(0) = f_{12}(0) = f_{21}(0) = 0 \} \oplus 0$. Now we see $\bar{D_0}$ (in $\mathfrak{H}_D$) $\cong$ $\mathbb{M}_2
\otimes L^{\infty}([0,b], \mu) \oplus 0$, so then $e_{11} \in \bar{D_0}$ (in $\mathfrak{H}_D$).
So we can find sequence $\{ \varepsilon_n \}$ of self-adjoined elements (functions) of $D_0$, supported on $e_{11}$, weakly converging to
$e_{11}$ on $\mathfrak{H}_D$ and such that $\{ \varepsilon_n^2 \}$ also converges weakly to $e_{11}$ on $\mathfrak{H}_D$.
Then take $a_1, a_2 \in A$. in $\mathfrak{H}_A$ we have $\langle \widehat{a_1}, (\varepsilon_n^2 - e_{11})\widehat{a_2} \rangle =
\tau( (\varepsilon_n^2 - e_{11}) a_2 a_1^*) = \tau((\varepsilon_n - e_{11}) a_2 a_1^* (\varepsilon_n - e_{11})) \leq 4 \| a_2 a_1^* \|
\tau(\varepsilon_n^2 - e_{11})$ (The last inequality is obtained by representing $a_2 a_1^*$ as a linear combination of 4 positive elements and using Cauchy-Bounjakovsky-Schwartz inequality). This shows that $e_{11} \in \bar{D_0}$ (in $\mathfrak{H}_A$) $\subset \bar{B_0}$ (in $\mathfrak{H}_A$).
Analoguously $e_{ii} \in \bar{B_0}$ (in $\mathfrak{H}_A$), so this shows $\bar{B_0} = \bar{B}$ (in $\mathfrak{H}_A$).
\end{proof}
It easily follows now that
\begin{cor}
$\bar{A_0}$ (in $\mathfrak{H}_A$) $=$ $\bar{A}$ (in $\mathfrak{H}_A$).
\end{cor}
The representation of $B_0$ on $\mathfrak{H}_A$ is faithful and nondegenerate, and we can use Proposition 3.6, together with Theorem 3.5 and
the fact that $\Ad(u^k)$ are outer for $\bar{B} = \bar{B_0}$ to get:
\begin{lemma}
$A_0 = B_0 \rtimes G$ is simple.
\end{lemma}
For the uniqueness of trace we need to modify a little the proof Theorem 3.7 (which is Theorem 1 in \cite{B93}, stated for "nontwisted"
crossed products).
\begin{lemma}
$A_0 = B_0 \rtimes G$ has a unique trace, $\tau|_{A_0}$.
\end{lemma}
\begin{proof}
Above we already proved that $\{ \Ad(u^k) | 1 \leq k \leq n-1 \}$ are
$\tau|_{B_0}$-outer for $B_0$.
\par
Suppose that $\eta$ is a trace on $A_0$. We will show that $\tau|_{A_0} = \eta$. We consider the GNS
representation of $B$, associated
to $\tau|_B$. By repeating the proof of Lemma 3.13 we see that $\bar{B_0}$ (in $\mathfrak{H}_B$) $=$
$\bar{B}$ (in $\mathfrak{H}_B$).
The simplicity of $B_0$
allows us to identify $B_0$ with $\pi_{\tau|_B}(B_0)$. We will also identify $B_0$ with it's canonical copy in $A_0$. $A_0$ is
generated by $\{ b_0 \in B_0 \} \cup \{ u^k | 0 \leq k \leq n-1 \}$ and $\{ \Ad(u^k) | 0 \leq k \leq n-1 \}$
extend to $\bar{B_0}$
(in $\mathfrak{H}_A$), so also to $\bar{B_0}$ (in $\mathfrak{H}_B$) ( $\cong \bar{B}$ (in $\mathfrak{H}_A$)).
Now we can form the von Neumann algebra crossed product $\tilde{A} \overset{def}{=} \bar{B_0} \rtimes
\{ \Ad(u^k) | 0 \leq k \leq n-1 \} \cong \bar{B} \rtimes \{ \Ad(u^k) | 0 \leq k \leq n-1 \}$, where the weak closures are in
$\mathfrak{H}_B$. Clearly $\tilde{A} \cong \bar{A}$ (in $\mathfrak{H}_A$). Denote by $\widetilde{\tau_{B_0}}$ the extension of $\tau|_{B_0}$ to
$\bar{B_0}$ (in $\mathfrak{H}_A$), given by $\widetilde{\tau_{B_0}}(x) = \langle x(\widehat{1_A}), \widehat{1_A} \rangle_{\mathfrak{H}_A}$. By Proposition 3.19 of
Chapter V in \cite{T79}, $\widetilde{\tau_{B_0}}$ is a faithful normal trace on $\bar{B_0}$ (in $\mathfrak{H}_A$). Now from the fact that
$\bar{B_0}$ (in $\mathfrak{H}_A$) is a factor and using Lemma 1 from \cite{L81} we get that $\widetilde{\tau_{B_0}}$ is unique on
$\bar{B_0}$ (in $\mathfrak{H}_A$). By the same argument we have that the extension $\widetilde{\tau_{A_0}}$ of $\tau|_{A_0}$ to $\bar{A_0}$
(in $\mathfrak{H}_{A}$) $\cong$ $\bar{A}$ (in $\mathfrak{H}_A$) is unique, since $\bar{A_0}$ (in $\mathfrak{H}_{A}$) $\cong$ $\bar{A}$
(in $\mathfrak{H}_A$) is a factor.
\par
We take the unique extension of $\eta$ to $A$. We will call it again $\eta$ for convenience. \\
We denote by $\mathfrak{H}'_{C}$ the GNS Hilbert space for $C$, corresponding to $\eta|_C$ (for $C$ $=$ $A$, $B$, $B_0$, $A_0$).
Since $\eta|_{B_0} = \tau|_{B_0}$ it follows that $\bar{B_0}$ (in $\mathfrak{H}'_{B_0}$) $\cong$
$\bar{B}$ (in $\mathfrak{H}'_B$) and of course $\mathfrak{H}'_{B_0} = \mathfrak{H}'_B$. Then similarly as in Lemma 3.12 we get that
$\bar{A_0}$ (in $\mathfrak{H}'_{A_0}$) $\cong$ $\bar{A}$ (in $\mathfrak{H}'_{A}$), so
$\mathfrak{H}'_{A_0} = \mathfrak{H}'_{A}$ (this can be done, since $\tau|_{B_0} = \eta|_{B_0}$).
Now again by
Proposition 3.19 of Chapter V in \cite{T79} we have that
$\tilde{\eta}(x) \overset{def}{=} \langle x(\widehat{1_A}), \widehat{1_A} \rangle_{\mathfrak{H}'_A}$
($\widehat{1_A}$ is abuse of notation - in this case it's the element, corresponding to $1_A$ in
$\mathfrak{H}'_A$) defines a
faithful normal trace on $\overline{\pi'_{A}(A)}$ (in $\mathfrak{H}'_A$).
In particular $\tilde{\eta}|_{\overline{\pi'_A(B)}}$ is a faithful normal trace on
$\overline{\pi'_A(B)}$ (in $\mathfrak{H}'_{A}$).
By uniqueness of $\tau|_{B_0}$ we have $\tau|_{B_0} = \eta|_{B_0}$, so for $b_0 \in B_0$ we have $\tilde{\tau}
(b_0) = \tau(b_0) = \eta(b_0) = \langle \pi'_{A}(b_0)(\widehat{1_A}), \widehat{1_A} \rangle_{\mathfrak{H}'_A} = \tilde{\eta}(\pi'_{A}(b_0))$.
\par
Since $B_0$ is simple, it follows that $\pi'_{A}|_{B_0}$ is a $*$-isomorphism from $B_0$ onto $\pi'_{A}(B_0)$ and from Exercise 7.6.7 in
\cite{KR86} it follows that $\pi'_{A}|_{B_0}$ extends to a $*$-isomorphism from $\bar{B_0}$ (in $\mathfrak{H}_A$) $\cong$ $\bar{B}$
(in $\mathfrak{H}_A$) onto $\overline{\pi'_{A}(B_0)}$ (in $\mathfrak{H}'_A$) $\cong$ $\overline{\pi'_{A}(B)}$ (in $\mathfrak{H}'_A$).
We will denote this $*$-isomorphism by $\theta$.
We set $w \overset{def}{=} \pi'_A(u)$, $\beta \overset{def}{=} \theta \Ad(u) \theta^{-1}
\in Aut(\overline{\pi'_A(B)}$ (in $\mathfrak{H}'_A$)). For $b_0 \in B_0$ we have
$w \pi'_A(b_0) w^* = \pi'_A(u b_0 u^*) = \pi'_A((\Ad(u))(b_0)) =
\beta (\pi'_A(b_0))$.
So by weak continuity follows $\beta = \Ad(w)$ on $\overline{\pi'_A(B)}$ (in $\mathfrak{H}'_A$).
Since $\bar{B}$
(in $\mathfrak{H}_A$) is a factor and $\{ \Ad(u^k) | 1 \leq k \leq n-1 \}$ are all outer, Kallman's Theorem
(Corrolary 1.2 in \cite{Ka69}) gives us that $\{ \Ad(u^k) | 1 \leq k \leq n-1 \}$ act freely on $\bar{B}$
(in $\mathfrak{H}_A$). Namely if $\bar{b} \in \bar{B}$ (in $\mathfrak{H}_A$), and if $\forall \bar{b}'
\in \bar{B}$ (in $\mathfrak{H}_A$), $\bar{b} \bar{b}' = \Ad(u^k)(\bar{b}') \bar{b}$, then $\bar{b} = 0$.
Then by the above settings it is
clear that $\{ \Ad(w^k) | 1 \leq k \leq n-1 \}$ also act freely on $\overline{\pi'_A(B)}$
(in $\mathfrak{H}'_A$).
\par
Since $\tilde{\eta}$ is a faithful normal trace on $\overline{\pi'_A(A)}$ (in $\mathfrak{H}'_A$), then by Proposition 2.36 of Chapter V in
\cite{T79} there exists a faithful conditional expectation $P: \overline{\pi'_A(A)} \rightarrow \overline{\pi'_A(B)}$ (both weak
closures are in $\mathfrak{H}'_A$).
$\forall x \in \overline{\pi'_A(B)}$ (in $\mathfrak{H}'_A$), and $\forall 1 \leq k \leq n-1$,
$\Ad(w^k)(x) w^k = w^k x$. Applying $P$ we get $\Ad(w^k)(x)(P(w^k)) = P(w^k) x$, so by the free action of
$\Ad(w^k)$ we get that $P(w^k) = 0$, $\forall 1
\leq k \leq n-1$.
It's clear that $\{ \overline{\pi'_A(B)} \} \cup \{ w^k | 1 \leq k \leq n-1 \}$ generates
$\overline{\pi'_A(A)}$ (in
$\mathfrak{H}'_A$) as a von Neumann algebra.
Now we use Proposition 22.2 from \cite{S81}. It gives us a $*$-isomorphism $\Phi :
\overline{\pi'_A(A)}$ (in $\mathfrak{H}'_A$) $\rightarrow \bar{B} \rtimes
\{ \Ad(u^k) | 1 \leq k \leq n-1 \} \cong \bar{A}$ (last two weak
closures are in $\mathfrak{H}_A$) with $\Phi(\theta(x)) = x,$ $x\in \bar{B}$ (in $\mathfrak{H}_A$),
$\Phi(w) = u$. So since $\bar{A}$
(in $\mathfrak{H}_A$) is a finite factor, so is $\overline{\pi'_A(A)}$ (in $\mathfrak{H}'_A$), and so it's trace $\tilde{\eta}$ is unique.
Hence, $\tilde{\eta} = \tilde{\tau} \circ \Phi$, and so $\forall b \in B$, and $\forall 1 \leq k \leq n-1$ we have $\eta(b u^k) =
\tilde{\eta}(\pi'_A(b) \pi'_A(u^k)) = \tilde{\tau}(\Phi(\pi'_A(b)) \Phi(\pi'_A(u^k))) = \tilde{\tau}(\Phi(\theta(b)) \Phi(w^k)) =
\tilde{\tau}(b u^k) = \tau(b u^k)$. By continuity and linearity of both traces we get $\eta = \tau$, just what we want.
\end{proof}
We conclude this section by proving the following
\begin{prop}
Let
\begin{center}
$(A,\tau) \overset{def}{=} ( \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_m}{\overset{p_m}
{\mathbb{C}}})*(\mathbb{M}_n, tr_n)$,
\end{center}
where $\alpha_1 \leq \alpha_2 \leq ... \leq \alpha_m$. Then:
\par
(I) If $\alpha_m < 1-\frac{1}{n^2}$, then $A$ is unital, simple with a unique trace $\tau$.
\par
(II) If $\alpha_m = 1-\frac{1}{n^2}$, then we have a short exact sequence $0 \rightarrow A_0 \rightarrow A \rightarrow \mathbb{M}_n
\rightarrow 0$, where $A$ has no central projections, and $A_0$ is nonunital, simple with a unique trace $\tau|_{A_0}$.
\par
(III) If $\alpha_m > 1-\frac{1}{n^2}$, then $A = \underset{n^2 - n^2 \alpha_m}{\overset{f}{A_0}} \oplus \underset{n^2 \alpha_m - n^2 +
1}{\overset{1-f}{\mathbb{M}_n}}$, where $1-f \leq p_m$, and where $A_0$ is unital, simple and has a unique trace
$(n^2 - n^2 \alpha_m)^{-1} \tau|_{A_0}$.
\par
Let $f$ means the identity projection for cases (I) and (II). Then in all cases for each of the projections $f p_1, ..., f p_m$ we have a unital,
diffuse abelian $C^*$-subalgebra of $A$, supported on it.
\par
In all the cases $p_m$ is a full projection in $A$.
\end{prop}
\begin{proof}
We have to prove the second part of the proposition, since the first part follows from Lemma 3.10, Lemma 3.11, Lemma 3.12, Lemma 3.17 and
Lemma 3.18. From the discussion above we see that in all cases we have $fA = fB \rtimes \{ \Ad(f u^k f) | 0 \leq k \leq n-1 \}$, where $B$
and $\{ \Ad(f u^k ) | 0 \leq k \leq n-1 \}$ are as above. So the existence of the unital, diffuse abelian $C^*$-sublagebras follows from
Theorem 2.4, applied to $B$.
\par
In the case (I) $p_m$ is clearly full, since $A$ is simple. In the case (III) it's easy to see that $p_m \wedge f \neq 0$ and $p_m \geq (1-f)$, so since $A_0$ and $\mathbb{M}_n$ are simple in this case, then $p_m$ is full in $A$. In case (II) it follows from Theorem 2.4 that $p_m$ is full in $B$, and consequently in $A$.
\end{proof}
\section{ The General Case}
In this section we prove the general case of Theorem 2.6, using the result from the previous section
(Proposition 3.19). The prove of the general case involves techniques from \cite{D99}. So we will need two technical results from there.
\par
The first one is Proposition 2.8 in \cite{D99} (see also \cite{D93}):
\begin{prop}
Let $A = A_1 \oplus A_2$ be a direct sum of unital $C^*$-algebras and
let $p = 1 \oplus 0 \in A$. Suppose $\phi_A$ is a state on $A$ with $0
< \alpha \overset{def}{=} \phi_A(p) < 1$. Let $B$ be a unital
$C^*$-algebra with a state $\phi_B$ and let $(\mathfrak{A}, \phi) =
(A, \phi_A) * (B, \phi_B)$. Let $\mathfrak{A}_1$ be the
$C^*$-subalgebra of $\mathfrak{A}$ generated by $(0 \oplus A_2) +
\mathbb{C} p \subseteq A$, toghether with $B$. In other words
\begin{equation*}
(\mathfrak{A}_1, \phi|_{\mathfrak{A}_1}) =
(\underset{\alpha}{\overset{p}{\mathbb{C}}} \oplus
\underset{1-\alpha}{\overset{1-p}{A_2}}) * (B, \phi_B).
\end{equation*}
Then $p \mathfrak{A} p$ is generated by $p \mathfrak{A}_1 p$ and $A_1
\oplus 0 \subset A$, which are free in $(p \mathfrak{A} p,
\frac{1}{\alpha} \phi|_{p \mathfrak{A} p})$. In other words
\begin{equation*}
(p \mathfrak{A} p, \frac{1}{\alpha} \phi|_{p \mathfrak{A} p}) \cong
(p \mathfrak{A}_1 p,\frac{1}{\alpha} \phi|_{p \mathfrak{A}_1 p}) *
(A_1, \frac{1}{\alpha} \phi_A|_{A_1}).
\end{equation*}
\end{prop}
\begin{remark}
This proposition was proved for the case of von Neumann algebras in \cite{D93}. It is true also in the case of $C^*$-algebras.
\end{remark}
The second result is Proposition 2.5 (ii) of \cite{D99}, which is easy
and we give its proof also:
\begin{prop}
Let $A$ be a $C^*$-algebra. Take $h \in A, h \geq 0$, and let $B$ be the hereditary subalgebra
$\overline{hAh}$ of $A$ ( $\overline{*}$ means norm closure). Suppose that $B$ is full in $A$. Then if
$B$ has a unique trace, then $A$ has at most one tracial state.
\end{prop}
\begin{proof}
It's easy to see that $\Span \{ xhahy | a,x,y \in A \}$ is norm dense
in $A$. If $\tau$ is a tracial state on $A$ then $\tau(xhahy) =
\tau(h^{1/2} ahyx h^{1/2})$. Since $h^{1/2} ahyx h^{1/2} \in B$,
$\tau$ is uniquely determined by $\tau_B$.
\end{proof}
It is clear that Proposition 3.19 agrees with Theorem 2.6, so it is
a special case.
\par
As a next step we look at a $C^*$-algebra of the form
\begin{equation*}
(M, \tau) = (\underset{\alpha_0'}{\overset{p_0'}{A_0}} \oplus \underset{\alpha_1'}{\overset{p_1'}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\alpha_k'}{\overset{p_k'}{\mathbb{M}_{m_k}}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * (\mathbb{M}_n, tr_n),
\end{equation*}
where $A_0$ comes with a specified trace and has a unital, diffuse
abelian $C^*$-subalgebra with unit $p'_0$. Also we suppose that
$\alpha'_0 \geq 0$, $0 < \alpha_1' \leq ... \leq \alpha_k'$, $0 <
\alpha_1 \leq ... \leq \alpha_l$, $m_1, ..., m_k \geq 2$, and either $\alpha'_0 > 0$ or $k \geq 1$, or both. Let's denote $p_0
\overset{def}{=} p_0' + p_1' + ... + p_k'$, $B_0 \overset{def}{=}
\underset{\alpha_1'}{\overset{p_1'}{\mathbb{M}_{m_1}}} \oplus ...
\oplus \underset{\alpha_k'}{\overset{p_k'}{\mathbb{M}_{m_k}}}$, and $\alpha_0 \overset{def}{=} \alpha'_0 + \alpha'_1 + ... + \alpha'_k = \tau(p_0)$. \\
Let's have a look at the $C^*$-subalgebras $N$ and $N'$ of $M$ given by
\begin{equation*}
(N, \tau|_N) = (\underset{\alpha_0}{\overset{p_0}{\mathbb{C}}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * (\mathbb{M}_n, tr_n)
\end{equation*}
and
\begin{equation*}
(N', \tau|_{N'}) = (\underset{\alpha_0'}{\overset{p_0'}{\mathbb{C}}} \oplus \underset{\alpha_1'}{\overset{p_1'}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_k'}{\overset{p_k'}{\mathbb{C}}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * (\mathbb{M}_n, tr_n).
\end{equation*}
We studied the $C^*$-algebras, having the form of $N$ and $N'$ in the previous section. A brief description is as follows:
\par
If $\alpha_0, \alpha_l < 1-\frac{1}{n^2}$, then $N$ is
simple with a unique trace and $N'$ is also simple with a unique trace. For each of the projections $p_0', p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $N'$, supported on it.
\par
If $\alpha_0$, or $\alpha_l$ $= 1-\frac{1}{n^2}$, then
$N$ has no central projections, and we have a short exact sequence
$0 \rightarrow N_0 \rightarrow N \rightarrow \mathbb{M}_n \rightarrow
0$, with $N_0$ being simple with a unique trace. Moreover $p_0$ or $p_l$ respectivelly is full in $N$. For each of the
projections $p_0', p_1', ..., p_k', p_1, ..., p_l$ we have a unital,
diffuse abelian $C^*$-subalgebra of $N'$, supported on it.
\par
If $\alpha_0$ or $\alpha_l$ $> 1-\frac{1}{n^2}$, then
$N = \overset{q}{N_0} \oplus \mathbb{M}_n$, with $N_0$ being simple
and having a unique trace.
\par
We consider 2 cases:
\par
(I) case: $\alpha_l \geq \alpha_0$.
\par
(1) $\alpha_l < 1-\frac{1}{n^2}$.
\par
In this case $N$ and $N'$ are simple and has unique traces, and $p_0$ is full in $N$ and consequently $1_M = 1_N$ is contained in $\langle p_0 \rangle_N$ - the ideal of $N$, generated by $p_0$. Since $\langle p_0 \rangle_N \subset \langle p_0 \rangle_M$ it follows that $p_0$ is full also in $M$. From Proposition
4.1 we get $p_0 M p_0 \cong (A_0 \oplus B_0) * p_0 N p_0$. Then from
Theorem 3.9 follows that $p_0 M p_0$ is simple and has a unique trace.
Since $p_0$ is a full projection, Proposition 4.3 tells us that
$M$ is simple and $\tau$ is its unique trace. For each of the
projections $p_0', p_1', ..., p_k', p_1, ..., p_l$ we have a unital,
diffuse abelian $C^*$-subalgebra of $M$, supported on it, and comming
from $N'$.
\par
(2) $\alpha_l = 1-\frac{1}{n^2}$.
\par
In this case it is also true that for each of the projections
$p_0', p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse
abelian $C^*$-subalgebra of $M$, supported on it, and comming from
$N'$. It is easy to see that $M$ is the linear span of $p_0 M p_0$,
$p_0 M (1- p_0) N (1- p_0)$, $(1 -p_0) N p_0 M p_0$, $(1- p_0) N p_0 M
p_0 N (1- p_0)$ and $(1- p_0) N (1- p_0)$. We know that we have a
$*$-homomorphism $\pi : N \rightarrow M_n$, such that $\pi(p_l) = 1$.
Then it is clear that $\pi(p_0) = 0$, so we can extend $\pi$ to a
linear map $\tilde{\pi}$ on $M$, defining it to equal $0$ on $p_0 M
p_0$, $p_0 M (1- p_0) N (1- p_0)$, $(1 -p_0) N p_0 M p_0$ and $(1- p_0) N
p_0 M p_0 N (1- p_0)$. It is also clear then that $\tilde{\pi}$ will
actually be a $*$-homomorphism. Since $\ker(\pi)$ is simple in $N$ and
$p_0 \in \ker(\pi)$, then $p_0$ is full in $\ker(\pi) \subset N$, so by
the above representation of $M$ as a linear span we see that $p_0$ is
full in $\ker(\tilde{\pi})$ also. From Proposition 4.1 follows that
$p_0 M p_0 \cong (A_0 \oplus B_0) * (p_0 N p_0)$. Since $p_0 N p_0$
has a unital, diffuse abelian $C^*$-subalgebra with unit $p_0$, it
follows from Theorem 3.9 that $p_0 M p_0$ is simple and has a unique
trace (to make this conclusion we could use Theorem 1.5 instead). Now
since $p_0 M p_0$ is full and hereditary in $\ker(\tilde{\pi})$, from
Proposition 4.3 follows that $\ker(\tilde{\pi})$ is simple and has a
unique trace.
\par
(3) $\alpha_l > 1-\frac{1}{n^2}$.
\par
In this case $N = \underset{n^2 - n^2 \alpha_l}{\overset{q}{N_0}} \oplus \underset{n^2 \alpha_l - n^2 + 1}{\overset{1-q}{\mathbb{M}_n}}$ and also $N' = \underset{n^2 - n^2 \alpha_l}{\overset{q}{N'_0}} \oplus \underset{n^2 \alpha_l - n^2 + 1}{\overset{1-q}{\mathbb{M}_n}}$ with $N_0$ and $N'_0$ being simple with unique traces. For each of the projections $q p_0', q p_1'
, ..., q p_k', q p_1, ..., q p_l$ we have a unital, diffuse abelian
$C^*$-subalgebra of $M$, supported on it, and coming from $N'_0$.
\par
Since $p_0 \leq q$ we can write $M$ as a linear span of $p_0 M p_0$,
$p_0 M p_0 N_0 (1- p_0)$, $(1- p_0) N_0 p_0 M p_0$, $(1- p_0) N_0 p_0
M p_0 N_0 (1- p_0)$, $(1- p_0) N_0 (1- p_0)$ and $\mathbb{M}_n$. So we
can write $M = \underset{n^2 - n^2 \alpha_l}{\overset{q}{M_0}} \oplus \underset{n^2 \alpha_l - n^2 + 1}{\overset{1-q}{\mathbb{M}_n}}$,
where $M_0 \overset{def}{=} q M q \supset N_0$.
We know that $p_0$ is full in
$N_0$, so as before we can write $1_{M_0} = 1_{N_0} \in \langle p_0 \rangle_{N_0} \subset \langle p_0 \rangle_{M_0}$, so $\langle p_0 \rangle_{M_0} = M_0$. Because of Proposition 4.1, we can write $p_0 M_0 p_0 \cong (A_0
\oplus B_0) * (p_0 N_0 p_0)$. Since $p_0 N_0 p_0$ has a unital,
diffuse abelian $C^*$-subalgebra with unit $p_0$, then from Theorem
3.9 (or from Theorem 1.5) it follows that $p_0 M_0 p_0$ is simple with a
unique trace. Since $p_0 M_0 p_0$ is full and hereditary in $M_0$,
Proposition 4.3 yields that $M_0$ is simple with a unique trace.
\par
(II) $\alpha_0$ $>$ $\alpha_l$.
\par
(1) $\alpha_0 \leq 1- \frac{1}{n^2}$.
\par
In this case $p_0$ is full in $N$ and also in $N'$, so $1_M = 1_N \in \langle p_0 \rangle_N$,
which means $p_0$ is full in $M$ also. $p_0 M p_0$ is a full hereditary $C^*$-subalgebra of $M$ and $p_0 M p_0
\cong (A_0 \oplus B_0) * p_0 N p_0$ by Proposition 4.1. Since $p_0 N p_0$ has a diffuse abelian $C^*$-subalgebra, Theorem 3.9
(or Theorem 1.5) shows that $p_0 M p_0$ is simple with a unique trace and then by Proposition 4.3 follows that the same is true for $M$.
For each of the projections $p_0', p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $M$, supported
on it, comming from $N'$.
\par
(2) $\alpha_0$ $> 1-\frac{1}{n^2}$. \\
We have 3 cases:
\par
(2$'$) $\alpha'_0 > 1-\frac{1}{n^2}$.
\par
In this case $N \cong \overset{q}{N_0} \oplus \mathbb{M}_n$ and $N' \cong \overset{q'}{N'_0} \oplus \mathbb{M}_n$, where $q \leq q'$, with $N_0$ and $N'_0$ being simple and having unique traces. It is easy to see that $p'_1, ..., p'_k, p_1, ..., p_l \leq q'$, so for each of the
projections $p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $N'$, supported on it. So those
$C^*$-subalgebras live in $M$ also. We have a unital, diffuse abelian $C^*$-subalgebra of $A_0$, supported on $1_{A_0}$, which yields a
unital, diffuse abelian $C^*$-subalgebra on $M$, supported on $p'_0$. It is clear that $p_0$ is full in $N$, so as before, $1_M =
1_N \in \langle p_0 \rangle_N$, so $p_0$ is full in $M$ also, so $p_0 M p_0$ is a full hereditary $C^*$-subalgebra of $M$. From
Proposition 4.1 we have $p_0 M p_0 \cong (A_0 \oplus B_0) * ( p_0 N_0 p_0 \oplus \mathbb{M}_n)$. It is easy to see that
$\mathbb{M}_n$, for $n \geq 2$ contains two $tr_n$-orthogonal zero-trace unitaries. Since also $p_0 N_0 p_0$ has a
unital, diffuse abelian $C^*$-subalgebra, supported on $1_{N_0}$, it is easy to see (using Proposition 2.2) that it also contains two $\tau|{N_0}$-orthogonal, zero-trace unitaries. Then the conditions of
Theorem 1.5 are satisfied. This means that $p_0 M p_0$ is simple with a unique trace and Proposition 4.3 implies that $M$ is simple with
a unique trace also.
\par
(2$''$) $\alpha'_k > 1-\frac{1}{n^2}$.
\par
Let's denote $$N'' = (\underset{\alpha_0'}{\overset{p_0'}{A_0}} \oplus \underset{\alpha_1'}{\overset{p_1'}{\mathbb{M}_{m_1}}} \oplus ...
\oplus \underset{\alpha_{k-1}'}{\overset{p_{k-1}'}{\mathbb{M}_{m_{k-1}}}} \oplus \underset{\alpha_{k}'}{\overset{p_k'}{\mathbb{C}}} \oplus
\underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * (\mathbb{M}_n, tr_n).$$
Then $N''$ satisfies the conditions of case (I,3) and so $N'' \cong \overset{q}{N''_0} \oplus \mathbb{M}_n$. Clearly $p_0', p_1', ...,
p_{k-1}', p_1, ..., p_l \leq q$, so for each of the projections $p_0', p_1', ..., p_{k-1}', p_1, ..., p_l$ we have a unital, diffuse
abelian $C^*$-subalgebra of $N''_0$, supported on it. Those $C^*$-algebras live in $M$ also. From case (I,3) we have that $p'_k$ is full in
$N''$ and as before $1_M = 1_{N''} \in \langle p'_k \rangle_{N''}$ implies that $p'_k$ is full in $M$ also. From Proposition 4.1 follows
that $p'_k M p'_k \cong (p'_k N''_0 p'_k \oplus \mathbb{M}_n) * \mathbb{M}_{m_k}$. Since $N''_0$ has a unital, diffuse abelian
$C^*$-subalgebra, supported on $q p'_k$, then an argument, similar to the one we made in case (II, 2$"$), allows to apply Theorem 1.5 to get that
$p'_k M p'_k$ is simple with a unique trac. By Proposition 4.3 follows that the same is true for $M$. The unital, diffuse abelian
$C^*$-subalgebra of $M$, supported on $p'_k$, we can get by applying the note after Theorem 1.5 to $p'_k M p'_k \cong
(p'_k N''_0 p'_k \oplus \mathbb{M}_n) * \mathbb{M}_{m_k}$.
\par
(2$'''$) $\alpha'_0$ and $\alpha'_k$ $\leq 1-\frac{1}{n^2}$.
\par
In this case $N \cong \overset{q}{N_0} \oplus \mathbb{M}_n$, with $N_0$ being simple and having a unique trace. Moreover $N'$ has no central projections and for each of the projections $p'_0, p_1', ..., p_k', p_1, ..., p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $N'$, supported on it. So those
$C^*$-subalgebras live in $M$ also. It is clear that $p_0$ is full in $N$, so as before $1_M = 1_N \in \langle p_0 \rangle_N$, so $p_0$ is full in $M$ also, so $p_0 M p_0$ is a full hereditary $C^*$-subalgebra of $M$. From
Proposition 4.1 we have $p_0 M p_0 \cong (A_0 \oplus B_0) * ( p_0 N_0 p_0 \oplus \mathbb{M}_n)$. Since $A_0$ and $p_0 N_0 p_0$ both have
unital, diffuse abelian $C^*$-subalgebras, supported on their units, it is easy to see (using Proposition 2.2), that the conditions of
Theorem 1.5 are satisfied. This means that $p_0 M p_0$ is simple with a unique trace and Proposition 4.3 yields that $M$ is simple with
a unique trace also.
\par
We summarize the discussion above in the following
\begin{prop}
Let
\begin{equation*}
(M,\tau) \overset{def}{=} (\underset{\alpha_0'}{\overset{p_0'}{A_0}} \oplus \underset{\alpha_1'}{\overset{p_1'}{\mathbb{M}_{m_1}}} \oplus
... \oplus \underset{\alpha_k'}{\overset{p_k'}{\mathbb{M}_{m_k}}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{C}}} \oplus ... \oplus
\underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * (\mathbb{M}_n, tr_n),
\end{equation*}
where $n \geq 2$, $\alpha'_0 \geq 0$, $\alpha'_1 \leq \alpha'_2 \leq ... \leq \alpha'_k$, $\alpha_1 \leq ... \leq \alpha_l$, $m_1, ..., m_k \geq 2$,
and $\overset{p'_0}{A_0} \oplus 0$ has a unital, diffuse abelian $C^*$-subalgebra, having $p'_0$ as a unit. Then:
\par
(I) If $\alpha_l < 1-\frac{1}{n^2}$, then $M$ is unital, simple with a unique trace $\tau$.
\par
(II) If $\alpha_l = 1-\frac{1}{n^2}$, then we have a short exact sequence $0 \rightarrow M_0 \rightarrow M \rightarrow \mathbb{M}_n
\rightarrow 0$, where $M$ has no central projections and $M_0$ is nonunital, simple with a unique trace $\tau|_{M_0}$.
\par
(III) If $\alpha_l > 1-\frac{1}{n^2}$, then $M = \underset{n^2 - n^2 \alpha_l}{\overset{f}{M_0}} \oplus \underset{n^2 \alpha_l - n^2 +
1}{\overset{1-f}{\mathbb{M}_n}}$, where $1-f \leq p_l$, and where $M_0$ is unital, simple and has a unique trace
$(n^2 - n^2 \alpha_l)^{-1} \tau|_{M_0}$.
\par
Let $f$ means the identity projection for cases (I) and (II). Then in all cases for each of the projections $f p_0', f p_1', ..., f p_k', f p_1, ...,
f p_l$ we have a unital, diffuse abelian $C^*$-subalgebra of $M$, supported on it.
\par
In all the cases $p_l$ is a full projection in $M$.
\end{prop}
To prove Theorem 2.6 we will use Proposition 4.4. First let's check that Proposition 4.4 agrees with the conclusion of
Theorem 2.6. We can write $$(M,\tau) \overset{def}{=} (\underset{\alpha_0'}{\overset{p_0'}{A_0}} \oplus \underset{\alpha_1'}{\overset{p_1'}
{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\alpha_k'}{\overset{p_k'}{\mathbb{M}_{m_k}}} \oplus \underset{\alpha_1}{\overset{p_1}
{\mathbb{C}}} \oplus ... \oplus \underset{\alpha_l}{\overset{p_l}{\mathbb{C}}}) * \underset{\beta_1}{\overset{q_1}{\mathbb{M}_n}},$$ where
$q_1 = 1_M$ and $\beta_1 = 1$. It is easy to see that $L_0 = \{ (l,1) | \frac{\alpha_l}{1^2} + \frac{1}{n^2} = 1 \} = \{ (l,1) | \alpha_l =
1-\frac{1}{n^2} \}$, which is not empty if and only if $\alpha_l = 1-\frac{1}{n^2}$. Also $L_+ = \{ (l,1) | \frac{\alpha_l}{1^2} +
\frac{1}{n^2} > 1 \} = \{ (l,1) | \alpha_l > 1-\frac{1}{n^2} \}$, and here $L_+$ is not empty if and only if
$\alpha_l > 1-\frac{1}{n^2}$. If both $L_+$ and $L_0$ are empty, then $M$ is simple with a unique trace. If $L_0$ is not empty, then
clearly $L_+$ is empty, so we have no central projections and a short exact sequence $0 \rightarrow M_0 \rightarrow M \rightarrow
\mathbb{M}_n \rightarrow 0$, with $M_0$ being simple with a unique trace. In this case all nontrivial projections are full in $M$. If
$L_+$ is not empty, then clearly $L_0$ is empty and so $M = \underset{n^2 -n^2 \alpha_l}{\overset{q}{M_0}} \oplus
\underset{n^2(\frac{\alpha_l}{1^2} + \frac{1}{n^2} - 1)}{\overset{1-q}{\mathbb{M}_n}}$, where $M_0$ is simple with a unique trace. $p_l$ is
full in $M$. \\
\par
$Proof\ of\ Theorem\ 2.6:$ \\
\par
Now to prove Theorem 2.6 we start with
\begin{equation*}
(\mathfrak{A},\phi )=(\underset{\alpha_0}{\overset{p_0}{A_0}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{M}_{n_1}}} \oplus ...
\oplus \underset{\alpha_k}{\overset{p_k}{\mathbb{M}_{n_k}}})*(\underset{\beta_0}{\overset{q_0}{B_0}} \oplus
\underset{\beta_1}{\overset{q_1}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\beta_l}{\overset{q_l}{\mathbb{M}_{m_l}}}),
\end{equation*}
where $A_0$ and $B_0$ have unital, diffuse abelian $C^*$-subalgebras, supported on their units
(we allow $\alpha_0 = 0$ or/and $\beta_0 =
0$). The case where $n_1 = ... = n_k = m_1 = ... = m_l = 1$ is treated in Theorem 2.5. The case where
$\alpha_0 = 0$, $k = 1$, and $n_k >
1$ was treated in Proposition 4.4. So we can suppose without loss of generality that $n_k \geq 2$ and either
$k > 1$ or $\alpha_0 > 0$ or
both. To prove that the conclusions of Theorem 2.6 takes place in this case we will use induction on
$\card \{ i | n_i \geq 2 \} + \card \{ j | m_j \geq 2 \}$, having Theorem 2.5 ($\card \{ i | n_i \geq 2 \} +
\card \{ j | m_j \geq 2 \} = 0$)
as first step of the induction. We look at
\begin{equation*}
(\mathfrak{B},\phi|_\mathfrak{B})=(\underset{\alpha_0}{\overset{p_0}{A_0}} \oplus \underset{\alpha_1}{\overset{p_1}{\mathbb{M}_{n_1}}}
\oplus ... \oplus \underset{\alpha_{k-1}}{\overset{p_{k-1}}{\mathbb{M}_{n_{k-1}}}} \oplus \underset{\alpha_k}{\overset{p_k}{\mathbb{C}}})
* (\underset{\beta_0}{\overset{q_0}{B_0}} \oplus
\underset{\beta_1}{\overset{q_1}{\mathbb{M}_{m_1}}} \oplus ... \oplus \underset{\beta_l}{\overset{q_l}{\mathbb{M}_{m_l}}}) \subset
(\mathfrak{A},\phi).
\end{equation*}
We suppose that Theorem 2.6 is true for $(\mathfrak{B},\phi|_\mathfrak{B})$ and we will prove it for $(\mathfrak{A},\phi )$. This will be
the induction step and will prove Theorem 2.6.
\par
Denote $L_0^{\mathfrak{A}} \overset{def}{=} \{ (i,j)| \frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} = 1 \}$, $L_0^{\mathfrak{B}}
\overset{def}{=} \{ (i,j)| i \leq k-1$ and $\frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} = 1 \} \cup \{ (k,j) | \frac{\alpha_k}{1^2} +
\frac{\beta_j}{m_j^2} = 1 \}$ and similarly $L_+^{\mathfrak{A}} \overset{def}{=} \{ (i,j)| \frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2}
> 1 \}$, and $L_+^{\mathfrak{B}} \overset{def}{=} \{ (i,j)| i \leq k-1$ and $\frac{\alpha_i}{n_i^2} + \frac{\beta_j}{m_j^2} > 1 \} \cup \{ (k,j) | \frac{\alpha_k}{1^2} +
\frac{\beta_j}{m_j^2} > 1 \}$. Clearly $L_0^{\mathfrak{A}} \cap \{ 1 \leq i \leq k-1 \} = L_0^{\mathfrak{B}} \cap \{ 1 \leq i \leq k-1 \}$
and similarly $L_+^{\mathfrak{A}} \cap \{ 1 \leq i \leq k-1 \} = L_+^{\mathfrak{B}} \cap \{ 1 \leq i \leq k-1 \}$. Let
$N_{\mathfrak{A}}(i,j) = max(n_i, m_j)$ and let $N_{\mathfrak{B}}(i,j) = N_{\mathfrak{A}}(i,j), 1 \leq i \leq k-1$, and
$N_{\mathfrak{B}}(k,j) = m_j$.
By assumption
\begin{equation*}
\mathfrak{B}= \underset{\delta}{\overset{g}{\mathfrak{B}_0}} \oplus \underset{(i,j)\in L_+^{\mathfrak{B}}}{\bigoplus}
\underset{\delta_{ij}}{\overset{g_{ij}}{\mathbb{M}_{N_{\mathfrak{B}}(i,j)}}}.
\end{equation*}
We want to show that
\begin{equation}
\mathfrak{A} = \underset{\gamma}{\overset{f}{\mathfrak{A}_0}} \oplus \underset{(i,j)\in L_+^{\mathfrak{A}}}{\bigoplus}
\underset{\gamma_{ij}}{\overset{f_{ij}}{\mathbb{M}_{N_{\mathfrak{A}}(i,j)}}}.
\end{equation}
We can represent $\mathfrak{A}$ as the span of $p_k \mathfrak{A} p_k$, $p_k \mathfrak{A} p_k \mathfrak{B} (1-p_k)$, $(1-p_k) \mathfrak{B} p_k
\mathfrak{A} p_k$, $(1-p_k) \mathfrak{B} p_k \mathfrak{A} p_k \mathfrak{B} (1-p_k)$, and $(1-p_k) \mathfrak{B} (1-p_k)$.
From the fact that $g_{kj} \leq p_k$ and $g_{ij} \leq 1-p_k, \forall 1 \leq i \leq k-1$ we see that $p_k \mathfrak{B} (1-p_k) = p_k
\mathfrak{B}_0 (1-p_k)$, $(1-p_k) \mathfrak{B} p_k = (1-p_k) \mathfrak{B}_0 p_k$, and $(1-p_k) \mathfrak{B} (1-p_k) =
(1-p_k) \mathfrak{B}_0 (1-p_k) \oplus \underset{i \neq k}{\underset{(i,j) \in L_+^{\mathfrak{B}}}{\bigoplus}} \mathbb{M}_{N(i,j)}$. All this tells us that we can represent $\mathfrak{A}$ as the span of $p_k \mathfrak{A} p_k$, $p_k \mathfrak{A} p_k \mathfrak{B}_0 (1-p_k)$, $(1-p_k) \mathfrak{B}_0 p_k
\mathfrak{A} p_k$, $(1-p_k) \mathfrak{B}_0 p_k \mathfrak{A} p_k \mathfrak{B}_0 (1-p_k)$, $ (1-p_k) \mathfrak{B}_0 (1-p_k)$, and $\underset{i \neq k}{\underset{(i,j)\in L_+^{\mathfrak{B}}}{\bigoplus}} \underset{\delta_{ij}}{\overset{g_{ij}}{\mathbb{M}_{N(i,j)}}}$.
\par
In order to show that $\mathfrak{A}$ has the form (9), we need to look at $p_k \mathfrak{A} p_k$. From Proposition 4.1 we have
$$p_k \mathfrak{A} p_k \cong (p_k \mathfrak{B} p_k) * \mathbb{M}_{n_k} \cong
(\underset{\frac{\delta}{\alpha_k}}{\overset{g}{p_k \mathfrak{B}_0 p_k}} \oplus \underset{(k,j)\in L_+^{\mathfrak{B}}}{\bigoplus}
\underset{\frac{\delta_{kj}}{\alpha_k}}{\overset{g_{kj}}{\mathbb{M}_{N(k,j)}}}) * \mathbb{M}_{n_k}.$$
Since by assumption $p_k \mathfrak{B}_0 p_k$ has a unital, diffuse abelian $C^*$-subalgebra, supported on $1_{p_k \mathfrak{B}_0 p_k}$, we can use Proposition 4.4 to determine the form of $p_k \mathfrak{A} p_k$.
\par
Thus $p_k \mathfrak{A} p_k$:
\par
(i) Is simple with a unique trace if whenever for all $1 \leq r \leq l$ with $N(k,r) = 1$ we have
$\frac{\delta_{kr}}{\alpha_k} < 1 - \frac{1}{n_k^2}$.
\par
(ii) Is an extension $0 \rightarrow I \rightarrow p_k \mathfrak{A} p_k \rightarrow \mathbb{M}_{n_k}
\rightarrow 0$ if $\exists 1 \leq r \leq l$, with $N(k,r) = 1$, and
$\frac{\delta_{kr}}{\alpha_k} = 1 - \frac{1}{n_k^2}$.
Moreover $I$ is simple with a unique trace and has no central projections.
\par
(iii) Has the form $p_k \mathfrak{A} p_k = I \oplus \underset{n_k^2(\frac{\delta_{kr}}{\alpha_k} - 1 +
\frac{1}{n_k^2})}{\mathbb{M}_{n_k}}$, where $I$ is unital, simple with a unique trace whenever
$\exists 1 \leq r \leq l$ with $N(k,r) = 1$, and $\frac{\delta_{kr}}{\alpha_k} > 1 - \frac{1}{n_k^2}$.
\par
By assumption $\delta_{ij} = N(i,j)^2 (\frac{\alpha_i}{n_i^2} +\frac{\beta_j}{m_j^2} - 1)$,
so when $r$ satisfies the conditions of case (iii) above, then $m_r = 1$ and
$n_k^2(\frac{\delta_{kr}}{\alpha_k} - 1 + \frac{1}{n_k^2}) = n_k^2(\frac{\alpha_k + \beta_r - 1}{\alpha_k} +
\frac{1}{n_k^2} -1) = \frac{n_k^2}{\alpha_k}(\frac{\alpha_k}{n_k^2} + \frac{\beta_r}{1^2} - 1)$,
just what we needed to show. Defining $\mathfrak{A}_0 \overset{def}{=} (1-(\underset{(i,j)
\in L_+^{\mathfrak{A}}}{\oplus} f_{ij})) \mathfrak{A} (1-(\underset{(i,j) \in L_+^{\mathfrak{A}}}{\oplus}
f_{ij}))$, we see that $\mathfrak{A}$ has the form (9).
\par
We need to study $\mathfrak{A}_0$ now. Since clearly $g \leq f$, we see that
$\mathfrak{A} p_k \mathfrak{B}_0 = \mathfrak{A} p_k g \mathfrak{B}_0 = \mathfrak{A} g p_k \mathfrak{B}_0 =
\mathfrak{A}_0 p_k \mathfrak{B}_0$ and similarly $\mathfrak{A} p_k \mathfrak{B}_0 = \mathfrak{A}_0 p_k
\mathfrak{B}_0$. From this and from what we proved above follows that:
\begin{gather}
\mathfrak{A}_0 \text{ is the span of } p_k \mathfrak{A}_0 p_k,\ (1-p_k) \mathfrak{B}_0 p_k \mathfrak{A}_0 p_k, \\ \notag
p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k),\ (1-p_k) \mathfrak{B}_0 p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k), \text{ and } (1-p_k) \mathfrak{B}_0 (1-p_k).
\end{gather}
We need to show that for each of the projections $f p_s$, $0 \leq s \leq k$ and $f q_t$, $1 \leq t \leq l$, we have a unital, diffuse
abelian $C^*$-subalgebra of $\mathfrak{A}_0$, supported on it. The ones, supported on $f p_s$, $1 \leq s \leq k-1$ come from
$(1-p_k) \mathfrak{B}_0 (1-p_k)$ by the induction hypothesis. The one with unit $f p_k$ comes from the representation
$p_k \mathfrak{A} p_k \cong (p_k \mathfrak{B} p_k) * \mathbb{M}_{n_k}$ and Proposition 4.4. For $1 \leq s \leq l$ we have
\begin{gather}
q_s \mathfrak{A} q_s \cong \underset{\frac{\gamma}{\beta_s}}{\overset{f q_s}{{q_s \mathfrak{A}_0 q_s}}}
\oplus \underset{1 \leq i \leq k-1}{\underset{(i,s) \in L_+^{\mathfrak{A}}}{\bigoplus}}
\underset{\frac{\gamma_{is}}{\beta_s}}{\overset{f_{is}}{\mathbb{M}_{N_{\mathfrak{A}}(i,s)}}} \oplus
\underset{\frac{\gamma_{ks}}{\beta_s}}{\overset{f_{ks}}{\mathbb{M}_{N_{\mathfrak{A}}(k,s)}}}
\end{gather}
and
\begin{gather}
q_s \mathfrak{B} q_s \cong \underset{\frac{\delta}{\beta_s}}{\overset{g q_s}{q_s \mathfrak{B}_0 q_s}} \oplus
\underset{1 \leq i \leq k-1}{\underset{(i,s) \in L_+^{\mathfrak{B}}}{\bigoplus}}
\underset{\frac{\delta_{is}}{\beta_s}}{\overset{g_{is}}{\mathbb{M}_{N_{\mathfrak{B}}(i,s)}}} \oplus
\underset{\frac{\delta_{ks}}{\beta_s}}{\overset{g_{ks}}{\mathbb{M}_{N_{\mathfrak{B}}(k,s)}}}.
\end{gather}
From what we showed above follows that for $1 \leq i \leq k-1$ we have $\gamma_{is} = \delta_{is}$ and
$f_{is} = g_{is}$. If $(k,s) \notin L_+^{\mathfrak{B}}$, (or $\alpha_k < 1 - \frac{\beta_s}{m_s^2}$),
then $(k,s) \notin L_+^{\mathfrak{A}}$ and by (11) and (12) we see that $gq_s = fq_s$ and so in $\mathfrak{A}_0$ we have a unital, diffuse abelian
$C^*$-subalgebra with unit $gq_s = fq_s$, which comes from $\mathfrak{B}_0$. If $(k,s) \in L_+^{\mathfrak{B}}$, then $gq_s \lvertneqq fq_s$
and since we have a unital, diffuse abelian $C^*$-subalgebra of $\mathfrak{A}_0$, supported on $gq_s$, comming from $\mathfrak{B}_0$, we
need only to find a unital, diffuse abelian $C^*$-subalgebra of $\mathfrak{A}_0$, supported on $fq_s - gq_s$
and its direct sum with the one supported on $gq_s$ will be a unital, diffuse abelian $C^*$-subalgebra of
$\mathfrak{A}_0$, supported on $fq_s$. But from the form (11) and (12)
it is clear that $fq_s - gq_s \leq g_{ks}$, since from (11) and (12) $(f_{1s} + ... + f_{(k-1)s}) q_s \mathfrak{A} q_s (f_{1s} + ... + f_{(k-1)s}) = (g_{1s} + ... + g_{(k-1)s}) q_s \mathfrak{B} q_s (g_{1s} + ... + g_{(k-1)s})$. It is also clear then that $ fq_s - gq_s = f g_{ks} \leq p_k$, since $gq_s \perp g_{ks}$. We look for this $C^*$-subalgebra in
$$p_k \mathfrak{A} p_k = \underset{\frac{\gamma}{\alpha_k}}{\overset{fp_k}{ p_k \mathfrak{A}_0 p_k }} \oplus
\underset{(k,j)\in L_+^{\mathfrak{A}}}{\bigoplus} \underset{\frac{\gamma_{kj}}{\alpha_k}}{\overset{f_{kj}}
{\mathbb{M}_{N_{\mathfrak{A}}(k,j)}}} \cong (p_k \mathfrak{B} p_k) * \mathbb{M}_{n_k},$$
$$ \cong (\underset{\frac{\delta}{\alpha_k}}{\overset{g}{p_k \mathfrak{B}_0 p_k}} \oplus \underset{(k,j)\in L_+^{\mathfrak{B}}}{\bigoplus}
\underset{\frac{\delta_{kj}}{\alpha_k}}{\overset{g_{kj}}{\mathbb{M}_{N_{\mathfrak{B}}(k,j)}}}) *
\mathbb{M}_{n_k}.$$
Proposition 4.4 gives us a unital,
diffuse abelian $C^*$-subalgebra of $p_k \mathfrak{A}_0 p_k$, supported on $(f p_k) g_{ks} = f g_{ks} =
fq_s -gq_s$. This proves that we have a unital, diffuse abelian $C^*$-subalgebra of $\mathfrak{A}_0$,
supported on $fq_s$.
\par
Now we have to study the ideal structure of $\mathfrak{A}_0$, knowing by the induction hypothesis, the form
of $\mathfrak{B}$. We will
use the "span representation" of $\mathfrak{A}_0$ (10).
\par
For each $(i,j) \in L_0^{\mathfrak{B}}$ we know the existance of $*$-homomorphisms
$\pi_{(i,j)}^{\mathfrak{B}_0} : \mathfrak{B}_0 \rightarrow \mathbb{M}_{N_{\mathfrak{B}}(i,j)}$.
For $i \neq k$ we can write those as $\pi_{(i,j)}^{\mathfrak{B}_0} : \mathfrak{B}_0 \rightarrow
\mathbb{M}_{N_{\mathfrak{A}}(i,j)}$ and since the support of $\pi_{(i,j)}^{\mathfrak{B}_0}$ is contained in
$(1-p_k)$, using (10), we can extend linearly $\pi_{(i,j)}^{\mathfrak{B}_0}$ to $\pi_{(i,j)}^{\mathfrak{A}_0} : \mathfrak{A}_0 \rightarrow \mathbb{M}_{N_{\mathfrak{A}}(i,j)}$, by defining it to be zero on $p_k \mathfrak{A}_0 p_k$, $(1-p_k) \mathfrak{B}_0 p_k \mathfrak{A}_0 p_k$, $p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k)$, and $(1-p_k) \mathfrak{B}_0 p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k)$. Clearly $\pi_{(i,j)}^{\mathfrak{A}_0}$ is a $*$-homomorphism also.
\par
By the induction hypothesis we know that $g p_k$ is full in $\underset{i \neq k}{\underset{(i,j)
\in L_0^{\mathfrak{B}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{B}_0}) \subset \mathfrak{B}_0$ and by (10),
and the way we extended $\pi_{(i,j)}^{\mathfrak{B}_0}$, we see that $f p_k$ is full in
$\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})
\subset \mathfrak{A}_0$. Then $p_k \mathfrak{A}_0 p_k$ is full and hereditary in
$\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$,
so by the Rieffel correspondence from \cite{R82}, we have that $p_k \mathfrak{A}_0 p_k$ and
$\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$
have the same ideal structure.
\par
Above we saw that
\begin{gather}
p_k \mathfrak{A} p_k = \underset{\frac{\gamma}{\alpha_k}}{\overset{fp_k}{ p_k \mathfrak{A}_0 p_k }} \oplus
\underset{(k,j)\in L_+^{\mathfrak{A}}}{\bigoplus} \underset{\frac{\gamma_{kj}}{\alpha_k}}{\overset{f_{kj}}
{\mathbb{M}_{N_{\mathfrak{A}}(k,j)}}} \cong (p_k \mathfrak{B} p_k) * \mathbb{M}_{n_k} \cong \\ \notag
\cong (\underset{\frac{\delta}{\alpha_k}}{\overset{gp_k}{p_k \mathfrak{B}_0 p_k}} \oplus \underset{(k,j)\in L_+^{\mathfrak{B}}}{\bigoplus}
\underset{\frac{\delta_{kj}}{\alpha_k}}{\overset{g_{kj}}{\mathbb{M}_{N_{\mathfrak{B}}(k,j)}}}) * \mathbb{M}_{n_k}.
\end{gather}
From Proposition 4.4 follows that $p_k \mathfrak{A}_0 p_k$ is not simple if and only if
$\exists 1 \leq s \leq m$, such that $(k,s) \in L_+^{\mathfrak{B}}, m_s = 1$ with
$\frac{\delta_{ks}}{\alpha_k} = 1-\frac{1}{n_k^2}$, where $\delta_{ks} = \alpha_k + \beta_s -1$. This means
that $\frac{\alpha_k + \beta_s -1}{\alpha_k} = 1 - \frac{1}{n_k^2}$, which is equivalent to
$\frac{\beta_s}{1^2} + \frac{\alpha_k}{n_k^2} = 1$, so this implies $(k,s) \in L_0^{\mathfrak{A}}$. If this
is the case (13), together with Proposition 4.4 gives us a $*$-homomorphism
$\pi'_{(k,s)} : p_k \mathfrak{A}_0 p_k \rightarrow \mathbb{M}_{n_k}$, such that
$\ker(\pi'_{(k,s)}) \subset p_k \mathfrak{A}_0 p_k$ is simple with a unique trace.
Using (10) we extend $\pi'_{(k,s)}$ linearly to a linear map $\pi_{(k,s)}^{\mathfrak{A}_0} :
\mathfrak{A}_0 \rightarrow \mathbb{M}_{n_k}$, by defining $\pi_{(k,s)}^{\mathfrak{A}_0}$ to be zero on
$(1-p_k) \mathfrak{B}_0 p_k \mathfrak{A}_0 p_k$, $p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k)$, $(1-p_k)
\mathfrak{B}_0 p_k \mathfrak{A}_0 p_k \mathfrak{B}_0 (1-p_k)$, and $(1-p_k) \mathfrak{B}_0 (1-p_k)$.
Similarly as before, $\pi_{(k,s)}^{\mathfrak{A}_0}$ turns out to be a $*$-homomorphism.
By the Rieffel correspondence of the ideals of $p_k \mathfrak{A}_0 p_k$ and
$\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$,
it is easy to see that the simple ideal $\ker(\pi'_{(k,s)}) \subset p_k \mathfrak{A}_0 p_k$ corresponds to
the ideal $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0}) \subset
\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$, so
$\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$ is simple. To see that
$\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$ has a unique trace we
notice that from the construction of $\pi_{(i,j)}^{\mathfrak{A}_0}$ we have $\ker(\pi'_{(k,s)}) = p_k
\ker(\pi_{(k,s)}^{\mathfrak{A}_0}) p_k = p_k \underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}
\ker(\pi_{(i,j)}^{\mathfrak{A}_0}) p_k$ (the last equality is true because $p_k \mathfrak{A}_0 p_k \subset \
underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$).
Now we argue similarly as in the proof of Proposition 4.3, using the fact that $\ker(\pi'_{(k,s)})$ has a
unique trace: Suppose that $\rho$ is a trace on $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}
\ker(\pi_{(i,j)}^{\mathfrak{A}_0})$. It is easy to see that $\Span \{ x p_k a p_k y | x, y, a \in
\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0}), a \geq 0 \}$ is dense in
$\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$, since
$\ker(\pi'_{(k,s)})$ is full in $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}
\ker(\pi_{(i,j)}^{\mathfrak{A}_0})$. Then since $p_k a p_k \geq 0$ we have $\rho(x p_k a p_k y) =
\rho((p_k a p_k) y x) = \rho((p_k a p_k)^{1/2} y x (p_k a p_k)^{1/2})$ and since $(p_k a p_k)^{1/2} y x
(p_k a p_k)^{1/2}$ is supported on $p_k$, it follows that $(p_k a p_k)^{1/2} y x (p_k a p_k)^{1/2} \in p_k
\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0}) p_k = \ker(\pi'_{(k,s)})$,
so $\rho$ is uniquely determined by $\rho|_{\ker(\pi'_{(k,s)})}$ and hence $\underset{(i,j)
\in L_0^{\mathfrak{A}}}{\bigcap} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$ has a unique trace.
\par
If $\nexists 1 \leq s \leq m$ with $(k,s) \in L_0^{\mathfrak{A}}$ it follows from what we said above, that
$p_k \mathfrak{A}_0 p_k$ is simple with a unique trace. But since $p_k \mathfrak{A}_0 p_k$ is full and
hereditary in $\underset{i \neq k}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}}
\ker(\pi_{(i,j)}^{\mathfrak{A}_0}) = \underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}
\ker(\pi_{(i,j)}^{\mathfrak{A}_0})$ it follows that $\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}
\ker(\pi_{(i,j)}^{\mathfrak{A}_0})$ is simple with a unique trace in this case too.
\par
We showed already that $f p_k$ is full in $\underset{i \neq k}{\underset{(i,j)
\in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$.
Now let $1 \leq r \leq k-1$. We need to show that $f p_r$ is full in
$\underset{i \neq r}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$.
From (11) and (12) follows that $f-g \leq p_k$. So $f p_r = g p_r$ for all $1 \leq r \leq k-1$.
From the way we constructed $\pi_{(i,j)}^{\mathfrak{A}_0}$ is clear that $f p_r \in
\underset{i \neq r}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$.
It is also true that $f p_r \notin \ker(\pi_{(r,j)}^{\mathfrak{A}_0})$ for any $1 \leq j \leq l$. So the
smallest ideal of $\mathfrak{A}_0$, that contains $f p_r$, is $\underset{i \neq r}{\underset{(i,j)
\in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$, meaning that we must have $\langle f
p_r \rangle_{\mathfrak{A}_0} = \underset{i \neq r}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}}
\ker(\pi_{(i,j)}^{\mathfrak{A}_0})$.
\par
Finally, we need to show that for all $1 \leq s \leq l$ we have that $f q_s$ is full in
$\underset{j \neq s}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$.
Let $(i,j) \in L_0^{\mathfrak{A}}$ with $i \neq k$, $j \neq s$.
Since $g q_s \in \ker(\pi_{(i,j)}^{\mathfrak{B}})$ and since $(f-g)q_s \leq p_k$, the way we extended
$\pi_{(i,j)}^{\mathfrak{B}}$ to $\pi_{(i,j)}^{\mathfrak{A}}$ shows that
$f q_s \in \ker(\pi_{(i,j)}^{\mathfrak{B}})$. Let $(i,s) \in L_0^{\mathfrak{A}}$ and $i \neq k$.
Then we know that $g q_s \notin \ker(\pi_{(i,j)}^{\mathfrak{B}})$, which implies
$f q_s \notin \ker(\pi_{(i,j)}^{\mathfrak{A}})$. Suppose $(k,s) \in L_0^{\mathfrak{A}}$.
Then $m_s = 1$ and (13), Proposition 4.4, and the way we extended $\pi'_{(k,s)}$ to
$\pi_{(k,s)}^{\mathfrak{A}_0}$ show, that $f g_{ks} = fq_s - gq_s$ is full in $p_k \mathfrak{A}_0 p_k$,
meaning that $fq_s -gq_s$, and consequently $fq_s$, is not contained in $\ker(\pi_{(k,s)}^{\mathfrak{A}_0})$.
Finally let $j \neq s$, and suppose $(k,j) \in L_0^{\mathfrak{A}}$. This means that
$(k,j) \in L_+^{\mathfrak{B}}$ and also that the trace of $q_j$ is so big, that
$(i,s) \notin L_+^{\mathfrak{B}}$ and $(i,s) \notin L_0^{\mathfrak{B}}$ for any $1 \leq i \leq k$.
Then (12) shows that $q_s \leq g$. The way we defined $\pi_{(k,j)}^{\mathfrak{A}_0}$ using (13) and
Proposition 4.4 shows us that $\mathfrak{B}_0 \subset \ker(\pi_{(k,j)}^{\mathfrak{A}_0})$ in this case.
This shows $q_s = g q_s = fq_s \in \ker(\pi_{(k,j)}^{\mathfrak{A}_0})$. All this tells us that the smallest
ideal of $\mathfrak{A}_0$, containing $fq_s$, is
$\underset{j \neq s}{\underset{(i,j) \in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$,
and therefore $\langle fq_s \rangle_{\mathfrak{A}_0} = \underset{j \neq s}{\underset{(i,j)
\in L_0^{\mathfrak{A}}}{\bigcap}} \ker(\pi_{(i,j)}^{\mathfrak{A}_0})$.
\par
This concludes the proof of Theorem 2.6.
\qed
{\em Acknowledgements.} I would like to thank Ken Dykema, my advisor, for the many helpful conversations I
had with him, for the moral support and for reading the first version of this paper. I would also like to thank Ron Douglas and Roger Smith for some
discussions.
\end{document} |
\begin{document}
\journal{...}
\title{A nonsmooth variational approach to semipositone quasilinear problems in $\mathbb{R}^N$}
\author[1]{Jefferson Abrantes Santos\fnref{t1}} \address[1]{Universidade Federal de Campina Grande, Unidade Acad\^emica de Matem\'atica, \\ CEP: 58429-900, Campina Grande - PB, Brazil.} \ead{jefferson@mat.ufcg.edu.br}
\author[1]{Claudianor O. Alves\fnref{t2}} \ead{coalves@mat.ufcg.edu.br}
\author[3]{Eugenio Massa\fnref{t3}}\address[3]{{ Departamento de Matem\'atica,
Instituto de Ci\^encias Matem\'aticas e de Computa\c c\~ao, Universidade de S\~ao Paulo,
Campus de S\~ao Carlos, 13560-970, S\~ao Carlos SP, Brazil.}}
\ead{eug.massa@gmail.com}
\fntext[t1]{J. Abrantes Santos was partially supported by CNPq/Brazil 303479/2019-1}\fntext[t2]{C. O. Alves was partially supported by CNPq/Brazil 304804/2017-7}\fntext[t3]{E. Massa was partially supported by grant $\#$303447/2017-6, CNPq/Brazil.}
\date{}
\begin{abstract}
This paper concerns the existence of a solution for the following class of semipositone quasilinear problems
\begin{equation*}
\left \{
\begin{array}{rclcl}
-\Delta_p u & = & h(x)(f(u)-a) & \mbox{in} & \mathbb{R}^N, \\
u& > & 0 & \mbox{in} & \mathbb{R}^N, \\
\end{array}
\right.
\end{equation*}
where $1<p<N$, $a>0$, $ f:[0,+\infty) \to [0,+\infty)$ is a function with subcritical growth and $f(0)=0$, while $h:\mathbb{R}^N \to (0,+\infty)$ is a continuous function that satisfies some technical conditions.
We prove via nonsmooth critical points theory and comparison principle, that a solution exists for $a$ small enough. We also provide a version of Hopf's Lemma and a Liouville-type result for the $p$-Laplacian in the whole $\mathbb{R}^N$.
\noindent {\bf Mathematical Subject Classification MSC2010:}
35J20,
35J62
(49J52).
\noindent {\bf Key words and phrases:}
semipositone problems; quasilinear elliptic equations; nonsmooth nariational methods; Lipschitz functional; positive solutions.
\end{abstract}
\maketitle
\section{Introduction}
In this paper we study the existence of positive weak solutions for the $p$-Laplacian semipositone problem in the whole space
\begin{equation}\label{Problem-P}\tag{$P_a$}
\left \{
\begin{array}{rclcl}
-\Delta_p u & = & h(x)(f(u)-a) & \mbox{in} & \mathbb{R}^N, \\
u& > & 0 & \mbox{in} & \mathbb{R}^N, \\
\end{array}
\right.
\end{equation}
where $1<p<N$, $a>0$, $f:[0,+\infty) \to [0,+\infty)$ is a continuous function with subcritical growth and $f(0)=0$. Moreover, the function $h:\mathbb{R}^N \to (0,+\infty)$ is a continuous function satisfying
\begin{itemize}
\item[(\aslabel{$P_{1}$})\label{Hp_P2_L1Li}] $h\in L^{1}(\mathbb{R}^N)\cap L^{\infty}(\mathbb{R}^N)$,
\item[(\aslabel{$P_{2}$})\label{Hp_P4_Bbeta}] $h(x)<B|x|^{-\vartheta}$ for $x\neq0$, with $\vartheta>N$ and $B>0$.
\end{itemize}
An example of a function $h$ that satisfies the hypotheses \eqref{Hp_P2_L1Li}$-$\eqref{Hp_P4_Bbeta} is given below:
$$
h(x)=\frac{B}{1+|x|^{\vartheta}}, \quad \forall x \in \mathbb{R}^N.
$$
In the whole of this paper, we say that a function $u \in D^{1,p}(\mathbb{R}^N)$ is a weak solution for (\ref{Problem-P}) if
$u$ is a continuous positive function that verifies
$$
\int_{\mathbb{R}^N}|\nabla u|^{p-2}\nabla u \nabla v\,dx=\int_{\mathbb{R}^N}h(x)(f(u)-a)v\,dx, \quad \forall v \in D^{1,p}(\mathbb{R}^N).
$$
\subsection{State of art.}
The problem (\ref{Problem-P}) for $a = 0$ is very simple to be solved, either employing the well known
mountain pass theorem due to Ambrosetti and Rabinowitz \cite{AmbRab1973}, or via minimization. However, for the
case where (\ref{Problem-P}) is semipositone, that is, when $a> 0$, the existence of a positive
solution is not so simple, because the standard arguments via the mountain pass
theorem combined with the maximum principle do not directly give a positive
solution for the problem, and in this case, a very careful analysis must be done.
The literature associated with semipositone problems in bounded domains is very rich since the appearance
of the paper by Castro and Shivaji \cite{CasShi1988} who were the first to consider this
class of problems. We have observed that there are different methods to prove the existence
and nonexistence of solutions, such as subsupersolutions, degree theory arguments,
fixed point theory and bifurcation; see for example \cite{AliCaShi}, \cite{AmbArcBuff},\cite{AllNisZecca}, \cite{AnuHaiShi1996} and their
references. In addition to these methods, also variational methods were used in a
few papers as can be seen in \cite{AldHoSa19_semipos_omega}, \cite{CalCasShiUns2007}, \cite{CFELo}, \cite{CDS}, \cite{CQT}, \cite{CTY}, \cite{DC}, \cite{FigMasSan_KirchSemipos}, \cite{MR2008685} and \cite{Jea1_cont}. We would like to point out that in \cite{CFELo}, Castro, de Figueiredo and Lopera studied the existence of solutions for the following class of semipositone quasilinear problems
\begin{equation}\label{Problem-P2}
\left \{
\begin{array}{rclcl}
-\Delta_p u & = & \lambda f(u) & \mbox{in} & \Omega, \\
u(x)& > & 0 & \mbox{in} & \Omega, \\
u & = & 0 & \mbox{on} & \partial\Omega, \\
\end{array}
\right.
\end{equation}
where $\Omega \subset\mathbb{R}^{N}$, $N > p>2$, is a smooth bounded domain, $\lambda >0$ and $f:\mathbb{R} \to \mathbb{R}$ is a differentiable function with $f(0)<0$. In that paper, the authors assumed that there exist $q \in (p-1, \frac{Np}{N-p}-1), A,B>0 $ such that
$$
\left\{
\begin{array}{l}
A(t^q-1)\leq f(t) \leq B(t^q-1), \quad \mbox{for} \quad t>0\\
f(t)=0, \quad \mbox{for} \quad t \leq -1.
\end{array}
\right.
$$
The existence of a solution was proved by combining the mountain pass theorem with the regularity theory. Motivated by the results proved in \cite{CFELo}, Alves, de Holanda and dos Santos \cite{AldHoSa19_semipos_omega} studied the existence of solutions for a large class of semipositone quasilinear problems of the type
\begin{equation}\label{Problem-P3}
\left \{
\begin{array}{rclcl}
-\Delta_\Phi u & = & f(u)-a & \mbox{in} & \Omega, \\
u(x)& > & 0 & \mbox{in} & \Omega, \\
u & = & 0 & \mbox{on} & \partial\Omega, \\
\end{array}
\right.
\end{equation}
where $\Delta_\Phi$ stands for the $\Phi$-Laplacian operator. The proof of the main result is also done via variational methods, however in their approach the regularity results
found in Lieberman \cite{L1,L2} play an important role. By using the mountain pass theorem, the authors found a
solution $u_a$ for all $a>0$. After that, by taking the limit when $a$ goes to 0 and using the regularity results in \cite{L1,L2}, they proved that $u_a$ is positive for $a$ small enough.
Related to semipositone problems in unbounded domains, we only found the paper due to Alves, de Holanda, and dos Santos \cite{AldHoSa19_semipos_RN} that studied the existence of solutions for the following class of problems
\begin{equation}\label{Problem-PAHS}
\left \{
\begin{array}{rclcl}
-\Delta u & = & h(x)(f(u)-a) & \mbox{in} & \mathbb{R}^N, \\
u& > & 0 & \mbox{in} & \mathbb{R}^N, \\
\end{array}
\right.
\end{equation}
where $a>0$, $f:[0,+\infty) \to [0,+\infty)$ and $h:\mathbb{R}^N \to (0,+\infty)$ are continuous functions with $f$ having a subcritical growth and $h$ satisfying some technical conditions. The main tools used were variational methods combined with the Riesz potential theory.
\subsection{Statement of the main results.}
Motivated by the results found in \cite{CFELo}, \cite{AldHoSa19_semipos_omega} and \cite{AldHoSa19_semipos_RN}, we intend to study the existence of solutions for (\ref{Problem-P}) with two different types of nonlinearities. In order to state our first main result, we assume the following conditions on $f$:
\begin{itemize}
\item[(\aslabel{$f_0$}) \label{Hp_forig}] \qquad $$\displaystylelaystyle\lim_{t\to 0^+} \frac{F(t)}{t^{p}}=0\,;$$
\end{itemize}
\begin{itemize}
\item[(\aslabel{$f_{sc}$})\label{Hp_estSC}] \qquad there exists $q\in (1,p^*)$ such that
$\displaystylelaystyle \limsup_{t\to +\infty} \frac{f(t)}{t^{q-1}}<\infty,$
\end{itemize}
where $p^*=\frac{pN}{N-p}$ is the critical Sobolev exponent;
\begin{itemize}
\item[(\aslabel{$f_\infty$}) \label{Hp_PS_SQ}] \qquad
$q>p$ in \eqref{Hp_estSC} and there exist $\theta>p$ and $t_0>0$ such that
\begin{eqnarray*}
&&
0< \theta F(t) \leq f(t)t, \quad \forall t>t_0,
\end{eqnarray*}
\end{itemize}
where $F(t)=\int_{0}^{t}f(\tau) \, d \tau$.
Our first main result has the following statement
\begin{theorem} \label{Theorem1} Assume the conditions \eqref{Hp_P2_L1Li}$-$\eqref{Hp_P4_Bbeta}, \eqref{Hp_estSC}, \eqref{Hp_forig} and \eqref{Hp_PS_SQ}.
Then there exists $a^{\ast}>0$
such that, if $a\in[0,a^{\ast})$, problem \eqref{Problem-P} has a positive weak solution $u_a\in C(\mathbb{R}^N) \cap D^{1,p}(\mathbb{R}^N)$.
\end{theorem}
As mentioned above, a version of Theorem \ref{Theorem1} was proved in \cite{AldHoSa19_semipos_RN} in the semilinear case $p=2$. Their proof exploited variational methods for $C^1$ functionals and Riesz potential theory in order to prove the positivity of the solutions of a smooth approximated problem, which then resulted to be actual solutions of problem \eqref{Problem-P}. In our setting, since we are working with the $p$-Laplacian, that is a nonlinear operator, we do not have a Riesz potential theory analogue that works well for this class of operator. Hence,
a different approach was developed in order to treat the problem (\ref{Problem-P}) for $p \not =2$. Here, we make a different approximation for problem \eqref{Problem-P}, which results in working with a {\it nonsmooth approximating functional}.
As a result, the Theorem \ref{Theorem1} is also new when $p=2$, since the set of hypotheses we assume here is different. In fact, avoiding the use of the Riesz theory, we do not need to assume that $f$ is Lipschitz (which would not even be possible in the case of condition \eqref{Hp_forig_up} below), and a different condition on the decaying of the function $h$ is required.
The use of the nonsmooth approach turns out to simplify several technicalities usually involved in the treatment of semipositone problems. Actually, working with the $C^1$ functional naturally associated to \eqref{Problem-P}, one obtains critical points $u_a$ that may be negative somewhere.
When working in bounded sets, the positivity of $u_a$ is obtained, in the limit as $a\to0$, by proving convergence in $C^1$ sense to the positive solution $u_0$ of the case $a=0$, which is enough since $u_0$ has also normal derivative at the boundary which is bounded away from zero in view of the Hopf's Lemma. This approach can be seen for instance in \cite{Jea1_cont,CFELo,AldHoSa19_semipos_omega,FigMasSan_KirchSemipos}.
In $\R^n$, a different argument must be used: actually one can obtain convergence on compact sets, but the limiting solution $u_0$ goes to zero at infinity as $|x|^{(p-N)/(p-1) }$ (see Remark \ref{rm_udec}), which means that one needs to be able to do some finer estimates on the convergence.
In \cite{AldHoSa19_semipos_RN}, with $p=2$, the use of the Riesz potential, allowed to prove that $|x|^{N-2 }|u_a-u_0|\to0$ uniformly, which then led to the positivity of $u_a$ in the limit.
In the lack of this tool, we had to find a different way to prove the positivity of $u_a$.
The great advantage of our approach via nonsmooth analysis, is that our critical points $u_a$ will always be nonnegative functions (see Lemma \ref{lm_prop_minim}). In spite of not necessary being week solutions of the equation in problem \eqref{Problem-P}, they turn out to be supersolutions and also subsolutions of the limit equation with $a=0$. These properties will allow us to use comparison principle in order to prove the strict positivity of $u_a$ with the help of a suitable barrier function (see the Lemmas \ref{lm_z} and \ref{lm_ujpos}). From the positivity it will immediately follow that $u_a$ is indeed a weak solutions of \eqref{Problem-P}.
\par
The reader is invited to see that by \eqref{Hp_PS_SQ}, there exist $A_1,B_1>0$ such that
\begin{equation} \label{AR}
F(t) \geq A_1|t|^{\theta}-B_1, \quad for \ t\geq0.
\end{equation}
This inequality yields that the functional we will be working with is not bounded from below.
On the other hand, the condition \eqref{Hp_forig} will produce a ``range of mountains" geometry around the origin for the functional, which completes the mountain pass structure.
Finally, conditions \eqref{Hp_estSC} and \eqref{Hp_PS_SQ} impose a subcritical growth to $f$, which are used to obtain the required compactness condition.
\par
Next, we are going to state our second result. For this result, we still assume \eqref{Hp_estSC} together with the following conditions:
\begin{itemize}
\item[(\aslabel{$\widetilde f_0$}) \label{Hp_forig_up}] $$\displaystylelaystyle\lim_{t\to 0^+} \frac{F(t)}{t^{p}}=\infty\,;$$
\end{itemize}
\begin{itemize}
\item[(\aslabel{$\widetilde f_\infty$}) \label{Hp_PS_sQ}]\qquad $q<p$ in \eqref{Hp_estSC}.
\end{itemize}
Our second main result is the following:
\begin{theorem} \label{Theorem2} Assume the conditions \eqref{Hp_P2_L1Li}$-$\eqref{Hp_P4_Bbeta}, \eqref{Hp_estSC}, \eqref{Hp_forig_up} and \eqref{Hp_PS_sQ}.
Then there exists $a^{\ast}>0$
such that, if $a\in[0,a^{\ast})$,
problem \eqref{Problem-P} has
a positive weak solution $u_a\in C(\mathbb{R}^N) \cap D^{1,p}(\mathbb{R}^N)$.
\end{theorem}
In the proof of Theorem \ref{Theorem2}, the condition \eqref{Hp_forig_up} will produce a situation where the origin is not a local minimum for the energy functional, while \eqref{Hp_PS_sQ} will make the functional coercive, in view of \eqref{Hp_estSC}. It will be then possible to obtain solutions via minimization. As in the proof of Theorem \ref{Theorem1}, we will work with a nonsmooth approximating functional that will give us an approximate solution. After some computation, we prove that this approximate solution is in fact a solution for the original problem when $a$ is small enough.
\par
\begin{remark} Observe that if $f,h$ satisfy the set of conditions of Theorem \ref{Theorem1} or those of Theorem \ref{Theorem2} and $u$ is a solution of Problem \eqref{Problem-P}, then the rescaled function $v=a^{\frac{-1}{q-1}}u$ is a solution of the problem:
\begin{equation}\label{Problem-P_resc}
\left \{
\begin{array}{rclcl}
-\Delta_p v & = & a^{\frac{(q-p)}{q-1}} h(x)(\widetilde f_a(v)-1) & \mbox{in} & \mathbb{R}^N, \\
v& > & 0 & \mbox{in} & \mathbb{R}^N, \\
\end{array}
\right.
\end{equation}
which then takes the form of Problem \eqref{Problem-P2}, with $\lambda:=a^{\frac{(q-p)}{q-1}}$ and a new nonlinearity $\widetilde f_a(t)=a^{-1}f(a^{\frac{1}{q-1}}t)$, which satisfies the same hipotheses of $f$. In particular, if $f(t)=t^{q-1}$ then $\widetilde f_a\equiv f$.
In the conditions of Theorem \ref{Theorem1}, where $q>p$, we obtain a solution of Problem \eqref{Problem-P_resc} for suitably small values of $\lambda$, while in the conditions of Theorem \ref{Theorem2}, where $q<p$, solutions are obtained for suitably large values of $\lambda$.
It is worth noting that, as $a\to0$, the solutions of Problem \eqref{Problem-P} that we obtain are bounded and converge, up to subsequences, to a solution of Problem \eqref{Problem-P} with $a=0$ (see Lemma \ref{lemma6}). As a consequence, the corresponding solutions of Problem \eqref{Problem-P_resc} satisfy $v(x)\to \infty$ for every $x\in\R^N$.
Semipositone problems formulated as in \eqref{Problem-P_resc} were considered recently in \cite{CoRQTeh_semipos,PeShSi_semiposCrit}.
\end{remark}
\par
As a final result, we also show that, by the same technique used to prove the positivity of our solution, it is possible to obtain a version of Hopf's Lemma for the $p$-Laplacian in the whole $\mathbb{R}^N$, see Proposition \ref{hopf}. A further consequence is the following Liouville-type result:
\begin{proposition}\label{prop_Liou}
Let $N>p>1$ and $u \in D^{1,p}(\mathbb{R}^N) \cap C_{loc}^{1,\alpha}(\mathbb{R}^N)$ be a solution of problem:
\begin{equation}
\left \{
\begin{array}{rclcl}
-\Delta_p u & = & g(x) \hat f(u)& \mbox{in} & \mathbb{R}^N, \\
u& \geq & 0 & \mbox{in} & \mathbb{R}^N, \\
\end{array}
\right.
\end{equation}
where $\hat f,g$ are continuous, $g(x)>0$ in $\mathbb{R}^N$ and $\hat f(0)=0$ while $\hat f(t)>0$ for $t>0$. If $\liminf_{|x|\to\infty} |x|^{(N-p)/(p-1)}u(x)=0$, then $u\equiv0$.
\end{proposition}
\subsection{Organization of the article.}
This article is organized as follows: in Section \ref{sec_prelim}, we prove the existence of a nonnegative solution, denoted by $u_a$, for a class of approximate problems. In Section \ref{sec_estim}, we establish some properties involving the approximate solution $u_a$. In Section \ref{sec_prfmain}, we prove the Theorems \ref{Theorem1} and \ref{Theorem2} respectively. Finally, in Section \ref{sec_hopf}, we prove the Proposition \ref{hopf} about Hopf's Lemma for the $p$-Laplacian in the whole $\mathbb{R}^N$.
\subsection{Notations.} Throughout this paper, the letters $c$, $c_{i}$, $C$, $C_{i}$, $i=1, 2, \ldots, $ denote positive constants which vary from line to line, but are independent of terms that take part in any limit process. Furthermore, we denote the norm of $L^{p}(\Omega)$ for any $p\geq 1$ by $\|\,.\,\|_{p}$. In some places we will use $"\rightarrow"$, $"\rightharpoonup"$ and $"\stackrel{*}{\rightharpoonup}"$ to denote the strong convergence, weak convergence and weak star convergence, respectively.
\section{Preliminary results}\label{sec_prelim}
In the sequel, we consider the discontinuous function $f_a:\mathbb{R} \longrightarrow\mathbb{R}$ given by
\begin{equation}\label{eq_fa_d}
f_a(t)=\left \{
\begin{array}{ccl}
f(t)-a & \mbox{if} & t\geq 0, \\
0 & & t<0, \\
\end{array}
\right.
\end{equation}
and its primitive
\begin{equation}\label{eq_Fa}
F_a(t)=\displaystylelaystyle\int_{0}^{t} f_{a}(\tau)d\tau=\left \{
\begin{array}{ccl}
F(t)-at & \mbox{if} & t\geq 0, \\
0& \mbox{if} & t\leq 0. \\
\end{array}
\right.
\end{equation}
A direct computation gives
\begin{equation}\label{eq_Fa_est}
-at^+\leq F_a(t)\leq \left \{\begin{array}{ccl}
F(t) & \mbox{if} & t\geq 0, \\
0 & \mbox{if} & t\leq 0,
\end{array}\right.
\end{equation}
where $t^+=\max\{t,0\}$.
Our intention is to prove the existence of a positive solution for the following auxiliary problem
\begin{equation}\label{Problem-PA}\tag{AP$_a$}
\left \{
\begin{array}{rclcl}
-\Delta_p u & = & h(x)f_a(u) & \mbox{in} & \mathbb{R}^N, \\
u& > & 0 & \mbox{in} & \mathbb{R}^N, \\
\end{array}
\right.
\end{equation}
because such a solution is also a solution of \eqref{Problem-P}.
Associated with \eqref{Problem-PA}, we have the energy functional $I_a:D^{1,p}(\mathbb{R}^N)\longrightarrow\mathbb{R}$ defined by
\begin{equation*}
I_a(u)=\frac{1}{p}\int_{\mathbb{R}^N}|\nabla u|^pdx -\int_{\mathbb{R}^N} h(x)F_{a}(u)dx,
\end{equation*}
which is only locally Lipschitz.
Hereafter, we will endow $D^{1,p}(\mathbb{R}^N)=\left\{u\in L^{p^*}(\mathbb{R}^N);\, \nabla u\in L^p(\mathbb{R}^N,\mathbb{R}^N)\right\} $ with
the usual norm
$$
\|u\|=\left( \int_{\mathbb{R}^N}|\nabla u|^{p}\,dx \right)^{\frac{1}{p}}.
$$
Since the Gagliardo-Nirenberg-Sobolev inequality (see \cite{Evans})
$$
\|u\|_{p^*}\leq S_{N,p} \|u\|
$$
holds for all $u\in D^{1,p}(\mathbb{R}^N)$ for some constant $S_{N,p}>0$, we have that the embedding
\begin{equation} \label{IM}
D^{1,p}(\mathbb{R}^N)\hookrightarrow L^{p^*}(\mathbb{R}^N)
\end{equation}
is continuous.
The following Lemma provides us an useful compact embedding for $D^{1,p}(\mathbb{R}^N)$.
\begin{lemma}\label{l1}
Assume \eqref{Hp_P2_L1Li}.
Then, the embedding $D^{1,p}(\mathbb{R}^N)\hookrightarrow L^q_h(\mathbb{R}^N)$ is continuous and compact for every $q\in [1,p^*)$.
\end{lemma}
\begin{proof}
The continuity is obtained by H\"older inequality, using \eqref{IM} and \eqref{Hp_P2_L1Li}:
\begin{equation} \label{I1}
\int_{\mathbb{R}^N}h|u|^{q}\,dx \leq \n{h}_{r}\|u\|^q_{p^*}\leq C_h\|u\|^q, \quad \forall u \in D^{1,p}(\mathbb{R}^N),
\end{equation}
where $r=p^*/(p^*-q)$ is dual to $p^*/q$.
Let $\{u_n\}$ be a sequence in $D^{1,p}(\mathbb{R}^N)$ with $u_n\rightharpoonup 0\ \mbox{in}\ D^{1,p}(\mathbb{R}^N).$ For each $R>0$, we have the continuous embedding $D^{1,p}(\mathbb{R}^N) \hookrightarrow W^{1,p}(B_R(0))$. Since the embedding $W^{1,p}(B_R(0)) \hookrightarrow L^p(B_R(0))$ is compact, it follows that $D^{1,p}(\mathbb{R}^N) \hookrightarrow L^p(B_R(0))$ is a compact embedding as well. Hence, for some subsequence, still denoted by itself,
$$
u_n(x)\rightarrow 0\ \mbox{a.e. in}\ \mathbb{R}^N.
$$
By the continuous embedding \eqref{IM}, we also know that $\{|u_n|^{q}\}$ is a bounded sequence in $L^{\frac{p^*}{q}}(\mathbb{R}^N)$. Then, up to a subsequence if necessary,
$$
|u_n|^{q}\rightharpoonup 0 \mbox{ in } L^{\frac{p^*}{q}}(\mathbb{R}^N),
$$
or equivalently,
$$
\int_{\mathbb{R}^N}|u_n|^{q}\varphi dx \to 0, \quad \forall \varphi \in L^{r}(\mathbb{R}^N).
$$
As \eqref{Hp_P2_L1Li} guarantees that $h \in L^{r}(\mathbb{R}^N)$, it follows that
$$
\int_{\mathbb{R}^N}h(x)|u_n|^qdx \to 0.
$$
This shows that $u_n \to 0$ in $L^q_h(\mathbb{R}^N)$, finishing the proof.
\end{proof}
We also give the following result that will be used later.
\begin{lemma} \label{CN}
If $u_n\rightharpoonup u$ in $D^{1,p}(\mathbb{R}^N)$ and \eqref{Hp_P2_L1Li}
holds, then
$$
\int_{\mathbb{R}^N}h(x)|u_n-u||u_n|^{q-1}\,dx \to 0 \quad \mbox{as} \quad n \to +\infty, \quad \forall q\in[ 1,p^*).
$$
\end{lemma}
\begin{proof}
Set $r=\frac{p^*}{q-1}\in \left(\frac{p^*}{p^*-1}, \infty\right]$ and $r'=\frac{p^*}{p^*-(q-1)}\in [1,p^*)$ its dual exponent.
First note that $\{u_n\}$ is bounded in $D^{1,p}(\mathbb{R}^N)$, and so, $\{|u_n|^{q-1}\}$ is bounded in $L^r(\mathbb{R}^N)$
by \eqref{IM}, while $h|u_n-u| \to0$ in $L^{r'}$
since we can apply Lemma \ref{l1} with $h^{r'}$ in the place of $h$, which also satisfies condition \eqref{Hp_P2_L1Li}. Then by H\"older inequality
$$
\int_{\mathbb{R}^N}h(x)|u_n-u||u_n|^{q-1}\,dx \leq \|h(u_n-u)\|_{r'}\|u_n^{q-1}\|_{r}\to 0.
$$
\end{proof}
\subsection{Critical points theory for the functional $I_a$ }
As mentioned in the last subsection, the functional $I_a$ is only locally Lipschitz in $D^{1,p}(\mathbb{R}^N)$, then we cannot use variational methods for $C^1$ functionals. Having this in mind, we will then use the theory of critical points for locally Lipschitz functions in a Banach space, see Clarke \cite{Clarke_nonsmooth} for more details.
First of all, we recall that $u\in D^{1,p}(\mathbb{R}^N)$ is a critical point of $I_a$ if
\begin{equation}\label{eq_varminim}
\int_{\mathbb{R}^N}|\nabla u|^{p-2}\nabla u \nabla v\,dx+\int_{\mathbb{R}^N}h(x)(-F_a)^0(u,v)\,dx\geq 0, \quad \forall v \in D^{1,p}(\mathbb{R}^N),
\end{equation}
where $$(-F_a)^0(t,s)=\limsup_{\xi\searrow0,\,\tau\to t}\frac {-F_a(\tau+\xi s)+F_a(\tau)}{\xi }$$ indicates the generalized directional derivative of $-F_a$ at the point $t$ along the direction $s$.
It is easy to see that a global minimum is always a critical point; moreover, an analogous of the classical mountain pass theorem holds true (see \cite{Chang81_VMnsm}), where a critical point in the sense of \eqref{eq_varminim} is obtained at the usual minimax level provided the following form of (PS)-condition holds true:
\begin{itemize}
\item [(\aslabel{$PS_L$}) \label{PSL}]
If $\{u_n\}$ is a sequence in $D^{1,p}(\mathbb{R}^N)$ such that $\{I_a(u_n)\}$ is bounded and
\begin{equation}\label{eq_varPS}
\int_{\mathbb{R}^N}|\nabla u_n|^{p-2}\nabla u_n \nabla v\,dx+\int_{\mathbb{R}^N}h(x)(-F_a)^0(u_n,v)\,dx\geq- \varepsilon_n\n{v},
\end{equation}
$\forall v \in D^{1,p}(\mathbb{R}^N),$ where $\varepsilon_n\to0$, then $\{u_n\}$ admits a convergent subsequence.
\end{itemize}
\par
In the next Lemma, let us collect some useful properties that can be derived by the definition of critical points of $I_a$, given in \eqref{eq_varminim}.
\begin{lemma}\label{lm_prop_minim}
Assume \eqref{Hp_P2_L1Li}
and (\ref{Hp_estSC}). Then a critical point $u_a$ of the functional $I_a$, as defined in \eqref{eq_varminim}, has the following properties:
\begin{enumerate}
\item $u_a\geq 0$ in $\R^N$;
\item if $u_a>0$ in $\R^N$ then it is a weak solution of problem \eqref{Problem-PA}, and also a solution of problem \eqref{Problem-P};
\item $u_a$ is a weak subsolution of $-\Delta_p u=h(x)f(u)$ in $\R^N$;
\item $u_a$ is a weak supersolution of $-\Delta_p u=h(x)(f(u)-a)$ in $\R^N$.
\end{enumerate}
\end{lemma}
\begin{proof}
Straightforward calculations give
\begin{equation}
\label{eq_Fa0}
(-F_a)^0(t,s)=\begin{cases}
-(f(t)-a)s& \mbox{for $t>0,\ s\in\R$}\\
as&\mbox{for $t=0,\,s>0$}\\
0& \mbox{for $\begin{cases}t<0,\ s\in\R\\t=0,\,s\leq0\,.\end{cases}$}
\end{cases}
\end{equation}
By using $u_a^-=\max\cub{0,-u_a}$ as a test function in \eqref{eq_varminim} we get
$$-\n {u_a^-}^p= \int_{\mathbb{R}^N}|\nabla u_a|^{p-2}\nabla u_a \nabla u_a^-\,dx\geq-\int_{\mathbb{R}^N}h(x)(-F_a)^0(u_a,u_a^-)\,dx \geq 0\,,$$
then $u_a^-\equiv0$ and then (1.) is proved.
If $u_a>0$ in $supp(\phi)$, then from \eqref{eq_varminim},
\begin{equation*}
\int_{\mathbb{R}^N}|\nabla u_a|^{p-2}\nabla u_a \nabla \phi\,dx\geq
-\int_{\mathbb{R}^N}h(x)(-F_a)^0(u_a,\phi)\,dx=\int _{\mathbb{R}^N} h(x)f_a(u_a) \phi\,dx\,,
\end{equation*}
and by testing also with $-\phi$ one obtains equality, then (2.) is proved.
If $\phi\geq0$ in \eqref{eq_varminim} then
\begin{equation*}
\int_{\mathbb{R}^N}|\nabla u_a|^{p-2}\nabla u_a \nabla \phi\,dx\geq
-\int_{\mathbb{R}^N}h(x)(-F_a)^0(u_a,\phi)\,dx\geq \int_{\mathbb{R}^N}h(x)(f(u_a)-a)\phi\,dx\,;
\end{equation*}
by testing with $-\phi$ one obtains
\begin{equation*}
- \int_{\mathbb{R}^N}|\nabla u_a|^{p-2}\nabla u_a \nabla \phi\,dx\geq
-\int_{\mathbb{R}^N}h(x)(-F_a)^0(u_a,-\phi)\,dx\geq -\int_{\mathbb{R}^N}h(x)f(u_a)\phi\,dx\,.
\end{equation*}
The above analysis guarantees that, for every $\phi\in D^{1,p}(\R^N)$ with $\phi\geq0$,
\begin{equation}\label{eq_subsup}
\int_{\mathbb{R}^N}h(x)(f(u_a)-a)\phi\,dx\leq \int_{\mathbb{R}^N}|\nabla u_a|^{p-2}\nabla u_a \nabla \phi\,dx\leq\int_{\mathbb{R}^N}h(x)f(u_a)\phi\,dx \,,
\end{equation}
which proves the claims (3.) and (4.).
\end{proof}
\subsection{Mountain pass geometry}\label{sec_MP}
Throughout this subsection we assume the hypotheses of Theorem \ref{Theorem1}.
The next two Lemmas will be useful to prove that in this case $I_a$ verifies the mountain pass geometry.
\begin{lemma}\label{lemma1}
There exist $\rho,\alpha>0$ such that
$$I_a(u)\geq\alpha, \qquad\text{ for $\n u=\rho$ and any $a\geq0$.}$$
\end{lemma}
\begin{proof}
Notice that, in view of \eqref{Hp_forig}, \eqref{Hp_estSC} and \eqref{eq_Fa_est}, given $\epsilon>0$, there exists $C_{\epsilon}>0$ such that
$$
F_a(t)\leq{\epsilon}|t|^{p}+ C_{\epsilon}{|t|^{q}} ,\quad \forall t\in\R.
$$
Thus, by Lemma \ref{l1},
$$\int_{\R^N}h(x)F_a(u(x))dx\leq {\epsilon}C\|u\|^{p}+ C_{\epsilon}C{\|u\|^{q}}, \quad \forall u\in D^{1,p}(\R^N).$$
Thereby, setting $\n u=\rho$, we obtain
$$I_a(u)\geq \rho^p\rob{\frac1p-\varepsilon C-{CC_\varepsilon}\rho^{q-p}}\,.$$
Now, fixing $\varepsilon=1/(2pC) $ and choosing $\rho$ sufficiently small such that $CC_\varepsilon \rho^{q-p}\leq 1/4p$, so that the term in parentheses is at least $1/4p$, we see that the claim is satisfied by taking $\alpha =(1/4p)\rho^p$.
\end{proof}
\begin{lemma}\label{lemma2}
There exists $v\in D^{1,p}(\mathbb{R}^N) $ and $a_1>0$ such that $\|v\|>\rho$ and $I_a(v)< 0$, for all $a \in [0,a_1)$.
\end{lemma}
\begin{proof}
Fix a function
$$
\varphi\in C_{0}^{\infty}(\mathbb{R}^N) \setminus \{0\}, \quad \mbox{with} \quad \varphi \geq 0 \quad \mbox{and} ~ ||\varphi||=1.
$$
Note that for all $t>0$,
\begin{eqnarray*}
I_{a}(t\varphi) &=& \frac1p t^p- \int_{\Omega}h(x)F_a(t\varphi)dx \\
&=&\frac1p t^p- \int_{\Omega}h(x)F(t\varphi)\,dx + a\int_{\Omega}h(x)t\varphi\, dx,
\end{eqnarray*}
where $\Omega=supp \,\varphi$. Now, estimating with \eqref{AR} and assuming that $a$ is bounded in some set $[0,a_1)$, we find
\begin{eqnarray}\label{eq_est_abv_phi}
I_{a}(t\varphi) & \leq& \frac1p t^p- {A_1t^{\theta}}\int_{\R^N}h(x)\varphi^\theta dx+ B_1\n h_1+t a_1\int_{\R^N}h(x)\varphi dx \,.
\end{eqnarray}
Since $h>0$ the two integrals are positive, and using the fact that $\theta>p>1$, we can fix $t_1>\rho$ large enough so that $I_a(v)<0$, where $v=t_1\varphi\in D^{1,p}(\mathbb{R}^N)$.
\end{proof}
In the sequel, we are going to prove the version of (PS)-condition required in the critical points theory for Lipshitz functionals, for the functional $I_a$. To do this, observe that \eqref{Hp_PS_SQ} yields that $f_a$ also satisfies the famous condition due to Ambrosetti-Rabinowitz, that is,
there exists $T>0$, which does not depend on $a\geq 0$, such that
\begin{equation}\label{ARCondition}
\theta F_a(t) \leq tf_a(t)+T, \quad t\in\mathbb{R}\,.
\end{equation}
\begin{lemma}\label{lemma3}
For all $a\geq0$, the functional $I_a$ satisfies the condition \eqref{PSL}.
\end{lemma}
\begin{proof}
Observe that, by \eqref{eq_Fa0}, $(-F_a)^0(t,\pm t)= \mp f_a(t)t$ for all $t \in \mathbb{R}$. Then, from \eqref{eq_varPS},
$$
\m{\int_{\mathbb{R}^N}|\nabla u_n|^{p}\,dx-\int_{\mathbb{R}^N}h(x)f_a(u_n) u_n\,dx }\leq \varepsilon_n\n{u_n}.
$$
For $n$ large enough, we assume $\varepsilon_n<1$ so we get
\begin{equation}\label{ineq1-lemma1}
-\|u_n\|-\|u_n\|^{p} \leq-\int_{\mathbb{R}^N} h(x)f_{a}(u_n)u_n dx\,.
\end{equation}
On the other hand, since $|I_a(u_n)|\leq K$ for some $K>0$, it follows that
\begin{equation}\label{ineq2-lemma1}
\frac{1}{p}\|u_n\|^{p} - \int_{\mathbb{R}^N}h(x)F_a(u_n)dx\leq K, \quad \forall n \in\mathbb{N}.
\end{equation}
From \eqref{ARCondition} and \eqref{ineq2-lemma1},
\begin{equation}\label{ineq3lemma1}
\frac{1}{p}\|u_n\|^{p} - \frac{1}{\theta}\int_{\mathbb{R}^N}h(x)f_a(u_n)u_n\,dx-\frac{1}{\theta}T\|h\|_1 \leq K, \quad
\end{equation}
thereby, by \eqref{ineq1-lemma1} and \eqref{ineq3lemma1},
$$
\left(\frac{1}{p}-\frac{1}{\theta}\right)|| u_n||^p - \frac{1}{\theta}|| u_n||\leq K+\frac{1}{\theta}T\|h\|_1,
$$
for $n$ large enough. This shows that $\{u_n\}$ is bounded in $D^{1,p}(\mathbb{R}^N)$. Thus, without loss of generality, we may assume that
$$
u_n\rightharpoonup u ~~ \mbox{in} ~~ D^{1,p}(\mathbb{R}^N)
$$
and
$$
u_n(x) \to u(x) \quad \mbox{a.e. in} \quad \mathbb{R}^N.
$$
By \eqref{eq_Fa0} and conditions \eqref{Hp_estSC}-\eqref{Hp_forig}, there exists $C>0$ that does not dependent on $a$ such that
$$
|(-F_a)^0(t,s)|\leq \rob{C(|t|^{q-1}+|t|)+a}|s|
$$
and so,
$$
|h(x)(-F_a)^0(u_n,u_n-u)| \leq Ch(x)|u_n-u|(|u_n|^{q-1}+|u_n|+a).
$$
By Lemma \ref{CN}, we have the limit
$$
\int_{\mathbb{R}^N} h(x)(-F_{a})^0(u_n,\pm(u_n-u)) dx \to 0,
$$
that combines with the inequalities below, obtained from \eqref{eq_varPS},
\begin{multline}
-\varepsilon_n\n{u-u_n} -\int_{\mathbb{R}^N}h(x)(-F_a)^0(u_n,u-u_n)\,dx \\\leq \int_{\mathbb{R}^N}|\nabla u_n|^{p-2}\nabla u_n \nabla (u-u_n)\,dx\\\leq\int_{\mathbb{R}^N}h(x)(-F_a)^0(u_n,u_n-u)\,dx+ \varepsilon_n\n{u-u_n},
\end{multline}
to give
\begin{equation}\label{conv1lemma3}
\int_{\mathbb{R}^N}|\nabla u_n|^{p-2}\nabla u_n \nabla (u-u_n)\,dx\to 0.
\end{equation}
The weak convergence $u_n\rightharpoonup u$ in $D^{1,p}(\mathbb{R}^N)$ yields
\begin{equation}\label{conv2lemma3}
\int_{\mathbb{R}^N} |\nabla u|^{p-2}\nabla u\nabla (u_n-u)dx \to 0.
\end{equation}
From \eqref{conv1lemma3}, \eqref{conv2lemma3} and the (S+) property of the $p$-Laplacian, we deduce that $u_n\to u$ in $D^{1,p}(\mathbb{R}^N)$, finishing the proof. Here, the Simon's inequality found in \cite[Lemma A.0.5]{PA} plays an important role to conclude the strong convergence.
\end{proof}
Next, we obtain a critical point for $I_a$, by the mountain pass theorem for Lipschitz functionals. Furthermore, we will make explicit the dependence of the constants on the bounded interval $[0,\overline a)$ where the parameter $a$ is taken, by using as subscript its endpoint, which we still have to fix, while we will not mention their dependence on $h$ and $f$.
\begin{lemma}\label{lm_ua}
There exists a constant $C_{a_1}>0$ such that $I_a$ has a critical point $u_a\in D^{1,p}(\R^N)$ satisfying $0<\alpha\leq I_a(u_a)\leq C_{a_1}$, for every $a\in[0,a_1)$.
\end{lemma}
\begin{proof}
The Lemmas \ref{lemma1}, \ref{lemma2} and \ref{lemma3} guarantee that we can apply the mountain pass theorem for Lipchitz functionals due to \cite{Chang81_VMnsm}
to show the existence of a critical point $u_a \in D^{1,p}(\mathbb{R}^N)$ for all $a \in [0,a_1)$, with $I_a(u_a)=d_a\geq \alpha >0$, where $d_a$ is the mountain pass level associated with $I_a$.
Now, taking $\varphi\in C_{0}^{\infty}(\Omega)$ as in the proof of Lemma \ref{lemma2}, $t>0$, and estimating as in \eqref{eq_est_abv_phi}, we see that
$I_{a}(t\varphi) $ is bounded from above, uniformly if $a\in[0,a_1)$. Consequently, the mountain pass level is also estimated in the same way, that is,
$$
0<\alpha\leq d_a =I_a(u_a) \leq \max\{I_a(t\varphi); t\geq 0\} \leq C_{a_1}.
$$
\end{proof}
The next Lemma establishes a very important estimate involving the Sobo\-lev norm of the solution $u_a$ for $a \in [0,a_1)$.
\begin{lemma}\label{lm_nHincompact}
There exist constants $k_{a_1},K_{a_1}$, such that $0<k_{a_1}\leq\|u_a\|\leq K_{a_1}$ for all $a \in [0,a_1)$.
\end{lemma}
\begin{proof}
Using again that $(-F_a)^0(t,\pm t)= \mp f_a(t)t$, we get from \eqref{eq_varminim} that
\begin{equation}\label{eq_Ipuu}
\|u_a\|^{p}-\int_{\R^N}h(x) f_{a}(u_a)u_a=0\,.
\end{equation}
By Lemma \ref{lm_ua}, and subtracting \eqref{eq_Ipuu} divided by $\theta$,
we get the inequality below
$$
C_{a_1} \geq I_a(u_a) = \rob{ \frac 1p-\frac1\theta} \|u_a\|^{p} + \int_{\R^N}h(x) \left(\frac{1}{\theta}f_{a}(u_a)u_a -F_a(u_a)\right)dx,
$$
which combined with \eqref{ARCondition} leads to
$$
C_{a_1}\geq\rob{ \frac 1p-\frac1\theta} \|u_a\|^{p} -\n{h}_\infty T\,,
$$
establishing the estimate from above.
In order to get the estimate from below, just note that by \eqref{eq_Fa_est} and the embeddings in Lemma \ref{l1},
$$
\alpha\leq I_a(u_a)\leq
\frac1p\n{u_a}^p+a\int_{\R^N} u_a^+\,dx \leq \frac1p\n{u_a}^p+Ca_1\n{u_a}.
$$
This gives the desired estimate from below.
\end{proof}
\subsection{Gobal minimum geometry}\label{sec_min}
Throughout this subsection, we assume the hypotheses of Theorem \ref{Theorem2}.
The next three Lemmas will prove that $I_a$ has a global minimum at a negative level.
\begin{lemma}\label{lemma1m}
There exist $a_1,\alpha>0$ and $u_0\in D^{1,p}(\mathbb{R}^N)$
such that
$$I_a(u_0)\leq-\alpha, \qquad\text{ for any $a\in[0,a_1)$.}$$
\end{lemma}
\begin{proof}
Let $\varphi\in C_{0}^{\infty}(\mathbb{R}^N)$ be as in the proof of Lemma \ref{lemma2}. For $t>0$,
\begin{eqnarray*}
I_{a}(t\varphi) &=&\frac1p t^p - \int_{\Omega}h(x)F(t\varphi)\,dx + a\int_{\Omega}h(x)t\varphi\, dx\,,
\end{eqnarray*}
where $\Omega=supp \,\varphi$.
From \eqref{Hp_forig_up} and using the fact that $\displaystylelaystyle \inf_{x \in supp \,\varphi }h(x)=h_0>0$, we have that, for $t_0>0$ small enough
$$ \quad\int_{\Omega}h(x)F(t_0\varphi)\,dx\geq \frac2pt_0^{p}\,.$$
Therefore,
$$
I_a(t_0\varphi)\leq -\frac{1}p t_0^{p}+at_0\int_{\Omega}h(x) \varphi\,dx.
$$
Now fixing $\alpha=\frac{1}{2p}t_0^p>0$ and choosing $a_1=a_1(t_0)$ in such way that \\$a_1t_0\int_{\Omega} h(x)\varphi\,dx\leq\alpha$, we derive that
$$
I_a(t_0\varphi)\leq -\alpha<0 \qquad \text{for $a\in[0,a_1)$,}
$$
showing the Lemma.
\end{proof}
\begin{lemma}\label{lemma2m}
$I_a$ is coercive, uniformly with respect to $a\geq0$, in fact, there exist $H,\rho>0$ independent of $a$ such that $I_a(u)\geq H$ whenever $\n u\geq \rho$.
\end{lemma}
\begin{proof}
By \eqref{Hp_estSC} and Lemma \ref{l1}, there is $C>0$ such that
\begin{eqnarray}\label{eq_coerc}
I_a(u)&\geq &\frac 1p\n{u}^{p } - \int_{\R^N}h(x)\rob{C+C|u|^q}\,dx
\\\nonumber&\geq &\frac 1p\n{u}^{p }-C-C\n{u}^q,
\end{eqnarray}
then the claim follows easily since $p>q$ from \eqref{Hp_PS_sQ}.
\end{proof}
\begin{lemma}
For every $a\in\R$, $I_a$ is weakly lower semicontinuos.
\end{lemma}
\begin{proof}
The proof is classical, since the norm is weakly lower semicontinuos and
the term $\int_{\R}h(x)F_a(u)\,dx$ is weakly continuous. To see this, let $\{u_n\}$ be a sequence in $D^{1,p}(\mathbb{R}^N)$ such that
$$
u_n\rightharpoonup u ~~ \mbox{in} ~~ D^{1,p}(\mathbb{R}^N).
$$
Then, proceeding as in the proof of Lemma \ref{l1}, up to a subsequence
$$
u_n(x) \to u(x) \quad \mbox{in $L^{q}_h(\R^N)$\ and\ a.e. in} \ \mathbb{R}^N.
$$
This means that $w_n=h^{1/q}u_n\to w= h^{1/q}u$ in $L^{q}$, as a consequence, we may also assume that $\{w_n\}$ is dominated by some $g\in L^{q}$. On the other hand, by \eqref{Hp_estSC} and \eqref{Hp_P2_L1Li}
$$
|h\,F_a(u_n)|\leq h\,C(|u_n|^q+1)\leq C(g^q+h)\in L^1(\mathbb{R}^N),
$$
and so, $hF_a(u_n)$ is dominated and converges to its a.e. limit $hF_a(u)$. Since the same argument can be applied to any subsequence of the initial sequence, we can ensure that $$
\lim_{n \to +\infty}\int_{\R^N} hF_a(u_n)\,dx=\int_{\R^N} hF_a(u)\,dx
$$ along any $D^{1,p}$-weakly convergent sequence.
\end{proof}
We will now obtain a candidate solution for problem \eqref{Problem-PA} by minimization.
\begin{lemma}\label{lm_ua_m}
There exists a constant $C_{a_1}>0$ such that
$I_a$ has a global minimizer
$u_a\in D^{1,p}(\R^N)$ satisfying $0>-\alpha\geq I_a(u_a)\geq -C_{a_1}$, for every $a\in[0,a_1)$.
\end{lemma}
\begin{proof}
The minimizer is obtained in view of the above Lemmas. Actually the global minimum of $I_a$ stays below $-\alpha$ by Lemma \ref{lemma1m}, while the boundedness from below is a consequence of \eqref{eq_coerc}.
\end{proof}
The next Lemma establishes the same important estimate as the one in Lemma \ref{lm_nHincompact}, for the minimizer $u_a$.
\begin{lemma}\label{lm_nHincompact_m}
There exist constants $k_{a_1},K_{a_1}$, such that $0<k_{a_1}\leq\|u_a\|\leq K_{a_1}$ for all $a \in [0,a_1)$.
\end{lemma}
\begin{proof}
The bound from above for the norm of $u_a$ is a consequence of the uniform coercivity proved in Lemma \ref{lemma2m}, since $I_a(u_0)<0$. For the estimate from below, just note that by \eqref{eq_Fa_est},
$$
0>-\alpha\geq I_a(u_a)=
\frac1p\n u^p-\int_{\R^N} h(x)F_a(u_a)\,dx
$$
$$
\geq-\int_{\R^N} h(x)F( u_a^+)\,dx
$$
and the right hand side goes to zero if $\n{u_a}$ goes to zero, by Lemma \ref{l1} and the continuity of the integral.
\end{proof}
\section{Further estimates for the critical points $u_a$}\label{sec_estim}
From now on $u_a$ will be the critical point obtained in Lemma \ref{lm_ua} or in Lemma \ref{lm_ua_m}. Our first result ensures that the family $\{u_a\}_{a\in[0,\overline a)}$ is a bounded set in $L^{\infty}(\R^N)$ for $\overline a$ small enough. This fact is crucial in our approach.
\begin{lemma} \label{Estimativa}
There exists $C_{a_1}^\infty>0$ such that
\begin{equation}\label{eq_estCinfty}
\|u_a\|_{\infty} \leq C_{a_1}^\infty, \quad \forall a \in [0,a_1).
\end{equation}
\end{lemma}
\begin{proof}
By Lemma \ref{lm_prop_minim} we know that for $a\in[0,a_1)$, $u_a\geq0$ and it is a weak subsolution of
$$
-\Delta_p u=h(x)f(u), \quad \mbox{in} \quad \mathbb{R}^N.
$$
In the case of the mountain pass geometry, $u_a$ is also
a weak subsolution of
$$
-\Delta_p u=h(x)\alpha(x)\rob{1+|u|^{p-2}u}\quad \mbox{in} \quad \mathbb{R}^N,
$$
where, from \eqref{Hp_estSC} and \eqref{Hp_PS_SQ},
$$
\alpha(x):=\frac{f(u_a(x))}{1+u_a(x)^{p-1}}\leq D(1+u_a(x)^{q-p})\quad \mbox{in} \quad \mathbb{R}^N,
$$
for some $D>0$ which depends only on $f$.
Let $K_\rho(x)$ denote a cube centered at $x$ with edge length $\rho$, and $\n{\cdot}_{r,K}$ denote the $L^r$ norm restricted to the set $K$. Our goal is to prove that, for a fixed $\rho>0$ and any $x\in\R^N$, one has
\begin{equation}\label{eq_Tr1}
\sup_{K_\rho(x)}u_a\leq C \rob{1+\n{u_a}_{p^*, K_{2\rho}(x)}}
\end{equation}
where $C$ depends on $p,N,f,h $ only.
Since $K_\rho(x)$ can be taken anywhere and the right hand side is bounded for $\cub{u_a}_{a\in[0,a_1)}$ by Lemma \ref{lm_nHincompact} and \eqref{IM}, equation \eqref{eq_Tr1} gives a uniform bound for $u_a$ in $L^\infty$, proving our claim.
In order to prove \eqref{eq_Tr1}, we will use Trudinger \cite[Theorem 5.1]{Trud67_HarnType} (see also Theorem 1.3 and Corollary 1.1). For this, we need to show that
$$
\sup_{x\in\R^N,\ \rho>0}\frac{\n {h\alpha} _{N/p,K_\rho(x)}}{\rho^\delta}\leq C
$$
for a suitable $\delta>0$ and $C$ that do not depend on $a\in[0,a_1)$ (see eq. (5.1) in \cite{Trud67_HarnType}).
Actually let $\tau =p^*/(q-p)>N/p$, then
$$
\n {h\alpha} _{N/p,K_\rho(x)} \leq\n{h\alpha}_{\tau,{K_\rho(x)}} |{K_\rho(x)}|^{p/N-1/\tau}\,,
$$
and
\begin{multline}
\n {h\alpha} _{\tau,K_\rho(x)}^\tau= \int_{K_\rho(x)} (h\alpha) ^\tau dx \leq\int_{K_\rho(x)} h^\tau D(1+u_a^{q-p})^\tau\,dx\leq \\\leq D'\int_{K_\rho(x)} h^\tau (1+u_a^{p^*})dx\leq D''(1+\n{u_a}_{p^*}^{p^*} )\,.
\end{multline}
Using the fact that $|{K_\rho(x)}|= \rho^N$, we conclude that $ \rho^{-\delta}\n {h\alpha} _{N/p,K_\rho(x)}$ is bounded, for a suitable $\delta>0$, by a constant depending only on $p,N,f,h $ and $\n{u_a}_{p^*}$, which is bounded by Lemma \ref{lm_nHincompact} and \eqref{IM}.
In the case of the minimum geometry, we can take $\alpha$ to be a constant and then the boundedness of $ \rho^{-\delta}\n {h\alpha} _{N/p,K_\rho(x)}$ is easily obtained since $h\in L^\infty$ (in this case \eqref{eq_Tr1} can also be obtained directly from Theorem 1.3 and Corollary 1.1 in \cite{Trud67_HarnType}).
\end{proof}
In what follows, we show an estimate from below of the norm $L^{\infty}(B_\gamma)$ of $u_a$ for $a$ small enough, where $B_\gamma \subset \mathbb{R}^N$ is the open ball centered at origin with radio $\gamma>0$. This estimate is a key point to understand the behavior of the family $\{u_a\}$ when $a$ goes to 0.
\begin{lemma}\label{lemma6} There exist $\delta,\gamma>0$ that do not depend on $a \in [0,a_1)$, such that $\|u_a\|_{\infty,{B_\gamma}}\geq \delta$ for all $a\in[0,{a}_1)$.
\end{lemma}
\begin{proof}
By \eqref{eq_subsup}, since $u_a\geq0$,
\begin{equation}
\int_{\mathbb{R}^N}|\nabla u_a|^{p}\,dx\leq\int_{\mathbb{R}^N}h(x)f(u_a)u_a\,dx \,.
\end{equation}
By Lemma \ref{lm_nHincompact} (resp. Lemma \ref{lm_nHincompact_m}) the left hand side is bounded from below by $k_{a_1}^p$.
Let now
\\\indent $\bullet$
$\Gamma$ be such that $f(t)t< \Gamma$ for $t\in [0,C_{a_1}^\infty]$, where $C_{a_1}^\infty$ was given in Lemma \ref{Estimativa},
\\\indent $\bullet$
$\gamma$ be such that $\int_{\R^N\setminus B_\gamma}h\,dx<k_{a_1}^p/(2\Gamma)$,
\\\indent $\bullet$
$\delta$ be such that $f(t)t< k_{a_1}^p/(2\n{h}_\infty|B_\gamma|)$ for $t\in [0,\delta]$.
\\
Then if $u_a<\delta$ in $B_\gamma$ we are lead to the contradiction
$$
k_{a_1}^p \leq \int_{\R^N}|\nabla u_a|^p \,dx \leq\int_{\R^N\setminus B_\gamma} h(x)f(u_a)\,u_a\,dx+\int_{B_\gamma} h(x)f(u_a)\,u_a\,dx<k_{a_1}^p
$$
and then the claim is proved.
\end{proof}
We can now prove the following convergence result.
\begin{lemma}\label{lm_subtou}
Given a sequence of positive numbers $a_j\to 0$, there exists $u\in D^{1,p}(\R^N)$ and $\beta>0$ such that, up to a subsequence, $u_{a_j}\to u$ weakly in $D^{1,p}(\R^N)$ and in ${C}^{1,\beta}$ sense in compact sets. Moreover, $u>0$ is a solution of \eqref{Problem-P} with $a=0$.
\end{lemma}
\begin{proof}
Fixing $u_j=u_{a_j}$, it follows that $\{u_j\}$ is bounded in $L^\infty(\mathbb{R}^N)$, which means that we may apply \cite[Theorem 1]{Tolksdorf} to obtain that it is also bounded in ${C}_{loc}^{1,\alpha}(\R^N)$ for some $\alpha>0$. As a consequence, for $\beta\in(0,\alpha)$,
in any compact set $\overline\Omega$ it admits a subsequence that converges in ${C}^{1,\beta}(\overline\Omega)$ and using a diagonal procedure we see that there exists $u\in {C}^{1,\beta}(\R^N)$ such that, again up to a subsequence, $u_n\rightarrow u$ in ${C}^{1,\beta}$ sense in
compact sets. From Lemma \ref{lemma6}, $u$ is not identically zero. The boundedness in $W_{loc}^{1,p}(\mathbb{R}^N)$ implies that we may also assume that $u_j\rightharpoonup u$ in $W_{loc}^{1,p}$ and in $L_{loc}^{p^*}(\mathbb{R}^N)$.
For $\phi\geq 0$ with support in some bounded set $\Omega$, from \eqref{eq_subsup} we have
\begin{equation}
\int_{\Omega}h(x)(f(u_j)-a_j)\phi\,dx\leq \int_{\Omega}|\nabla u_j|^{p-2}\nabla u_j \nabla \phi\,dx\leq\int_{\Omega}h(x)f(u_j)\phi\,dx \,;
\end{equation}
the above convergences bring
\begin{equation}
\int_{\Omega}|\nabla u|^{p-2}\nabla u \nabla \phi\,dx=\int_{\Omega}h(x)f(u)\phi\,dx \,,
\end{equation}
then $u$ is a nontrivial solution of \eqref{Problem-P} with $a=0$, and since $f\geq0$, it follows that $u$ is everywhere positive.
\end{proof}
\section{Proof of the main Theorems }\label{sec_prfmain}
In order to prove that $u_a>0$ for $a$ small enough, we will first construct a subsolution that will be used for comparison.
\begin{lemma}\label{lm_z}
Let $\vartheta>N>p$ be as in
\eqref{Hp_P4_Bbeta}.
Given $A,r>0$ there exists $H>0$ such that
the problem
\begin{equation}\label{eq_probz}
\left \{
\begin{array}{rclcl}
-\Delta_p z & = & A & \mbox{in} & B_r\,, \\
-\Delta_p z & = & -H |x|^{-\vartheta} & \mbox{in} & \mathbb{R}^N\setminus B_r\,, \\
\end{array}
\right.
\end{equation}
has an explicit family of bounded radial and radially decreasing weak solutions, defined up to an additive constant. More precisely, if we take $H=A\frac {\vartheta-N}{N} r^{\vartheta}$ and if we fix $\lim_{|x|\to\infty}z(x)=0$, then the solution is
\begin{equation}\label{eq_z}
z(x)= \begin{cases}
C-\rob{\frac A N}^{1/(p-1)}\frac{p-1}p|x|^{p/(p-1)}& \mbox{ for $|x|<r$}\,,\\
\rob{\frac {A} {N}r^{\vartheta}}^{1/(p-1)}\frac{p-1}{\vartheta-p}|x|^{(p-\vartheta)/(p-1)} & \mbox{ for $|x|\geq r$}\,,\\
\end{cases}
\end{equation}
where C is chosen so that the two formulas coincide for $|x|=r$.
\end{lemma}
\begin{proof}
For a radial function $u(x)=v(|x|)$ one has $$\Delta_p u =|v'|^{p-2}\sqb{(p-1)v''+\frac{ N-1}{\rho}v'}\,.$$
By substitution, one can see that a function in the form $u(x)=v(|x|)=\sigma |x|^\lambda$ is a solution of the equation $\Delta_p u=\varrho|x|^b $ provided
$$
\begin{cases}
\lambda=\frac{p+b}{p-1}\,,
\\
|\sigma|^{p-2}\sigma=\frac 1{(N+b)|\lambda|^{p-2}\lambda}\,\varrho\,.
\end{cases}
$$
In particular,
\begin{itemize}
\item if $b=0$ then $\lambda=\frac p{p-1}>0$ and $\sigma$ has the same sign of $\varrho$:
\item if $b=-\vartheta$ with $\vartheta>N>p$ then $\lambda=\frac {p-\vartheta}{p-1}<0$ and still $\sigma$ has the same sign of $\varrho$.
\end{itemize}
Now, taking the two functions
$$
\begin{cases}
-\rob{\frac A N}^{1/(p-1)}\frac{p-1}p|x|^{p/(p-1)}& \mbox{for $|x|<r$\,, }
\\\rob{\frac {H} {\vartheta-N}}^{1/(p-1)}\frac{p-1}{\vartheta-p}|x|^{(p-\vartheta)/(p-1)}& \mbox{for $|x|>r$\,, }
\end{cases}
$$
they satisfy \eqref{eq_probz} and their radial derivatives are
$$
\begin{cases}
-\rob{\frac A N}^{1/(p-1)}|x|^{1/(p-1)}& \mbox{for $|x|<r$\,, }
\\-\rob{\frac {H} {\vartheta-N}}^{1/(p-1)}|x|^{(1-\vartheta)/(p-1)}& \mbox{for $|x|>r$. }
\end{cases}
$$
Note that the derivatives are equal at $|x|=r$ provided
$$
\frac A N r=\frac {H}{\vartheta-N} r^{1-\vartheta}\,.
$$
Having this in mind, we can therefore construct $z$ piecewise as in \eqref{eq_z} obtaining a bounded, radial and radially decreasing function, to which any constant can be added.
\end{proof}
In order to finalize the proof of our main Theorems, we only need to show that for any sequence of positive numbers $a_j\to0$ there exists a subsequence of the corresponding critical points $u_j:=u_{a_j}$ that are positive: this is proved in the following Lemma.
\begin{lemma}\label{lm_ujpos}
If $h$ satisfies \eqref{Hp_P2_L1Li}$-$\eqref{Hp_P4_Bbeta}, then the sequence $\{u_j\}$ satisfies $u_j>0$ for $j$ large enough.
\end{lemma}
\begin{proof}
Fix $r>0$. From Lemma \ref{lm_subtou}, up to a subsequence, $u_j\to u>0$ uniformly in $\overline{B_r}$. Thus, there exist $A,j_0>0$ such that,
\begin{equation}\label{eq_hfA}
\mbox{ $u_j>0$ \quad and \quad $h(x)f_{a_j}(u_j)\geq A$,\quad in $\overline{B_r}$,\quad for $j>j_0$. }
\end{equation}
Now let $B$ be the constant in condition \eqref{Hp_P4_Bbeta}, $ H=A\frac {\vartheta-N}{N} r^{\vartheta}$ as from Lemma \ref{lm_z} and let $j_1>j_0$ be such that $a_jB<H$ for $j>j_1$. Then we have,
\begin{equation}\label{eq_hfB}
\mbox{ $h(x)f_{a_j}(u_j)\geq-h(x)a_j\geq
-H |x|^{-\vartheta}$,\ \ in $B_r^C$,\ \ for $j>j_1$. }
\end{equation}
Combining equation \eqref{eq_subsup}, the above inequalities and \eqref{eq_probz}, we get, for every $\phi\in D^{1,p}(\R^N), \ \phi\geq0$,
\begin{equation}\label{eq_compuz}
\int_{\R^N} |\nabla u_j|^{p-2}\nabla u_j\nabla \phi\,dx \geq
\int_{\R^N} h(x)(f_{a_j}(u_j))\phi\,dx
\geq
\int_{\R^N} |\nabla z|^{p-2}\nabla z\nabla \phi\,dx\,.
\end{equation}
In order to conclude, fix an arbitrary $R>r$ and define $z_R$ by subtracting from $z$ a constant so that $z_R=0$ on $\partial B_R$ (but observe that $z_R>0$ in $ B_R$).
By \eqref{eq_compuz} (which holds true also for $z_R$) and since $u_j\geq 0=z_R$ on $\partial B_R$ we obtain by Comparison Principle that $u_j\geq z_R>0$ in $B_R$.
Since $R$ is arbitrary, we have proved that $u_j>0$ in $\R^N$ for $j>j_1$.
In particular, since $z_R\to z$ uniformly as $R\to \infty$, we conclude that $u_j\geq z$ in $\R^N$ for $j>j_1$.
\end{proof}
\section{Final comments. }\label{sec_hopf}
In this section we would like to point out some results that can be useful when studying problems that involve the $p$-Laplacian operator in the whole $\mathbb{R}^N$. Proposition \ref{hopf} below works like a Hopf's Lemma for the $p$-Laplacian operator in the whole $\mathbb{R}^N$ and it allows to prove the Liouville-type result in Proposition \ref{prop_Liou}.
\begin{lemma}\label{lm_z_h}
Let $N>p$ and $A,r>0$. Then, the problem
\begin{equation}\label{eq_probz_h}
\left \{
\begin{array}{rclcl}
-\Delta_p z & = & A & \mbox{in} & B_r, \\
-\Delta_p z & = & 0 & \mbox{in} & \mathbb{R}^N\setminus B_r, \\
\end{array}
\right.
\end{equation}
has an explicit family of bounded radial and radially decreasing weak solutions, defined up to an additive constant. More precisely, if we fix $\displaystylelaystyle \lim_{|x|\to\infty}z(x)=0$, then the solution is
$$
z(x)= \begin{cases}
C-\rob{\frac A N}^{1/(p-1)}\frac{p-1}p|x|^{p/(p-1)}& \mbox{ for $|x|<r$}\,,\\
\rob{\frac A N}^{1/(p-1)}\frac{p-1}{N-1}r^{N/(p-1)}|x|^{(p-N)/(p-1) }& \mbox{ for $|x|\geq r$}\,,\\
\end{cases}
$$
where C is chosen so that the two formulas coincide for $|x|=r$.
\end{lemma}
\begin{proof}
For $|x|>r$ we consider the family of p-harmonic functions $C_1|x|^{\frac{p-N}{p-1}}$, with radial derivative $-C_1\frac{N-p}{p-1}|x|^{-\frac{N-1}{p-1}}$.
Then we only have to set $-\rob{\frac A N}^{1/(p-1)}r^{1/(p-1)}=-C_1\frac{N-p}{p-1}r^{-(N-1)/(p-1)} $, that is,
\begin{equation}\label{eq_consthopf}C_1=\rob{\frac A N}^{1/(p-1)}\frac{p-1}{N-p}r^{N/(p-1)}.
\end{equation}
\end{proof}
Proceeding as in the proof of Lemma \ref{lm_ujpos}, we obtain the following Proposition as an immediate consequence of the above Lemma.
\begin{proposition}[Hopf's Lemma] \label{hopf}
Suppose $N>p$, $A,r,\alpha>0$ and $u \in D^{1,p}(\mathbb{R}^N) \cap C_{loc}^{1,\alpha}(\mathbb{R}^N)$ satisfying
$$
\begin{cases}
-\Delta_p u\geq A>0& in\ B_r\,,
\\-\Delta_p u\geq 0 & in\ \R^N\,,
\\u\geq 0& in\ \R^N\,.
\end{cases}
$$
Then $u(x)\geq C|x|^{(p-N)/(p-1) }$ for $|x|>r$, where $C$ is given in \eqref{eq_consthopf}.
\end{proposition}
The above Proposition complements, in some sense, the study made in \cite[Theorem 3.1]{CTT}, which obtained a similar estimate when $u$ is a solution of a particular class of $p$-Laplacian problems in the whole $\mathbb{R}^N$.
\begin{remark}\label{rm_udec}
Proposition \ref{hopf} applies in particular to the limit solution $u$ obtained in Lemma \ref{lm_subtou}, providing us with its decay rate at infinity.
\end{remark}
Finally, from Proposition \ref{hopf} it is straightforward to derive Proposition \ref{prop_Liou}.
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\end{document} |
\begin{document}
\title{Seismic Imaging and Optimal Transport}
\author{Bj\"orn Engquist}
\address{Department of Mathematics and ICES, The University of Texas at Austin, 1 University Station C1200, Austin, TX 78712 USA}
\email{engquist@math.utexas.edu}
\author{Yunan Yang}
\address{Department of Mathematics, The University of Texas at Austin, 1 University Station C1200, Austin, TX 78712 USA}
\email{yunanyang@math.utexas.edu}
\begin{abstract}
Seismology has been an active science for a long time. It changed character about 50 years ago when the earth’s vibrations could be measured on the surface more accurately and more frequently in space and time. The full wave field could be determined, and partial differential equations (PDE) started to be used in the inverse process of finding properties of the interior of the earth. We will briefly review earlier techniques but mainly focus on Full Waveform Inversion (FWI) for the acoustic formulation. FWI is a PDE constrained optimization in which the variable velocity in a forward wave equation is adjusted such that the solution matches measured data on the surface. The minimization of the mismatch is usually coupled with the adjoint state method, which also includes the solution to an adjoint wave equation. The least-squares ($L^2$) norm is the conventional objective function measuring the difference between simulated and measured data, but it often results in the minimization trapped in local minima. One way to mitigate this is by selecting another misfit function with better convexity properties. Here we propose using the quadratic Wasserstein metric ($W_2$) as a new misfit function in FWI. The optimal map defining $W_2$ can be computed by solving a Monge-Amp\`ere equation. Theorems pointing to the advantages of using optimal transport over $L^2$ norm will be discussed, and a number of large-scale computational examples will be presented.
\end{abstract}
\date{\today}
\maketitle
\noindent \textbf{Keywords.} Seismic Imaging, Full-waveform Inversion, Optimal Transport, Monge-Amp\`ere\xspace equation
\noindent \textbf{Math Subject Classification.} 65K10, 65Z05, 86A15, 86A22
\tableofcontents
\section{Introduction}
Earth Science is an early scientific subject. The efforts started as early as AD 132 in China when Heng Zhang invented the first seismoscope in the world (Figure~\ref{fig:scope}). The goal was to record that an earthquake had happened and to try to determine the direction of the earthquake. Substantial progress in seismology had to wait until about 150 years ago when seismological instruments started to record travel time.
\begin{figure}
\caption{(a)~The first Seismoscope designed in AD 132 and (b)~Modern seismic vibrator used in seismic survey}
\label{fig:scope}
\label{fig:survey}
\end{figure}
With increasing sophistication in devices measuring the vibrations of seismic waves and in the availability of high-performance computing increasingly advances mathematical techniques could be used to explore the interior of the earth. The development started with calculations by hand based on geometrical optics and travel time measurement. It continued with a variety of wave equations when the equipment allowed for measuring wave fields and modern computers became available. As we will see below a wide range of mathematical tools are used today in seismic imaging, including partial differential equation (PDE) constrained optimization, advanced signal processing, optimal transport and the Monge-Amp\`ere\xspace equation.
Since 19th-century modern seismographs were developed to record seismic signals, which are vibrations in the earth. In 1798 Henry Cavendish measured the density of the earth with less than 1\% error compared with the number we can measure nowadays. Nearly one hundred years later, German physicist Emil Wiechert first discovered that the earth has a layered structure and the theory was further completed as the crust-mantle-core three-layer model in 1914 by one of his student Beno Gutenberg. In the meantime, people studied the waves including body waves and surface waves to better understand the earthquake. P-waves and S-waves were first clearly identified for their separate arrivals by English geologist Richard Dixon Oldham in 1897. The Murchison earthquake in 1929 inspired the Danish female seismologist and geophysicist Inge Lehmann to study the unexpected P-waves recorded by the seismographs. Later on, she proposed that the core of the earth has two parts: the solid inner core of iron and a liquid outer core of nickel-iron alloy, which was soon acknowledged by peer geophysicists worldwide.
We will see that measuring travel time plays a vital role in the development of modern techniques for the inverse problem of finding geophysical properties from measurements of seismic waves on the surface. The methods are often related to travel time tomography. They are quite robust and cost-efficient for achieving low-resolution information of the subsurface velocities. The forward problem is based on ray theory or geometric optics~\cite{Bijwaard1998,Uhlmann2001}.
The development of man-made seismic sources and advanced recording devices (Figure~\ref{fig:survey}) facilitate the research on the entire wavefields in time and space (Figure~\ref{fig:data}) rather than merely travel time. This setup results in a more controlled setting and large amounts of data, which is needed for an accurate inverse process of estimating geophysical properties, for example, Figure~\ref{fig:vel}. The forward modeling is a wave equation with many man-made sources and many receivers.
The wave equation can vary from pure acoustic waves to anisotropic viscoelasticity. Even if there are various techniques in computational exploration seismology, there are two processes that currently stand out: reverse time migration (RTM)~\cite{Baysal1983, Yoon2004} and full waveform inversion (FWI)~\cite{tarantola1987inverse,tarantola1982generalized}.
\begin{figure}
\caption{(a)~An example of the seismic data measured from the receivers and (b)~Goal of inversion: geophysical properties as in the Sigsbee velocity model~\cite{Bashkardin}
\label{fig:data}
\label{fig:vel}
\end{figure}
Migration techniques can be applied in both the time domain and the frequency domain following the early breakthroughs by Claerbout on imaging conditions~\cite{Claerbout1971, Claerbout}. In RTM the computed forward wavefield starting from the source is correlated in time with the computed backward wavefield which is modeled with the measured data as the source term in the adjoint wave equation. The goal is to determine details of the reflecting surfaces as, for example, faults and sedimentary layers based on the measured data and a rough estimate of the geophysical properties. The least-squares reverse time migration (LSRTM)~\cite{Dai2012} is a new migration technique designed to improve the image quality generated by RTM. Reflectivity is regarded as a small perturbation in velocity, and the quantity is recovered through a linear inverse problem.
FWI is a high-resolution seismic imaging technique which recently gets great attention from both academia and industry~\cite{Virieux2017}. The goal of FWI is to find both the small-scale and large-scale components which describe the geophysical properties using the entire content of seismic traces. A trace is the time history measured at a receiver.
In this paper, we will consider the inverse problem of finding the wave velocity of an acoustic wave equation in the interior of a domain from knowing the Cauchy boundary data together with natural boundary conditions~\cite{Clayton1977}, which is implemented by minimizing the difference between computed and measured data on the boundary. It is thus a PDE-constrained optimization.
There are various kinds of numerical techniques that are used in seismic inversion, but FWI is increasing in popularity even if it is still facing three main computational challenges. First, the physics of seismic waves are complex, and we need more accurate forward modeling in inversion going from pure acoustic waves to anisotropic viscoelasticity~\cite{virieux2009overview}.
Second, even as PDE-constrained optimization, the problem is highly non-convex. FWI requires more efficient and robust optimization methods to tackle the intrinsic nonlinearity. Third, the least-squares norm, classically used in FWI, suffers from local minima trapping, the so-called cycle skipping issues, and sensitivity to noise~\cite{Seismology2011}. We will see that optimal transport based Wasserstein metric is capable of dealing with the last two limitations by including both amplitudes mismatches and travel time differences~\cite{EFWass, engquist2016optimal}.
We will introduce the mathematical formulation of these techniques in the following sections. The emphasis will be on FWI, but we will also summarize the state of the art of other standard imaging steps. Finally, we will relate FWI to RTM and LSRTM. These approaches all involve the interaction of the forward and the time-reversed wavefields, which is well known as the ``imaging condition'' in geophysics.
\section{Seismic Imaging}
Seismic data contains interpretable information about subsurface properties. Imaging predicts the spatial locations as well as specifies parameter values describing the earth properties that are useful in seismology. It is particularly important for exploration seismology which mainly focuses on prospecting for energy sources, such as oil, gas, coal. Seismic attributes contain both travel time records and waveform information to create an image of the subsurface to enable geological interpretation, and to obtain an estimate of the distribution of material properties in the underground. Usually, the problem is formulated as an inverse problem incorporating both physics and mathematics. Seismic inversion and migration are terms often used in this setting.
\subsection{Seismic data}
There are two types of seismic signals. Natural earthquakes propagate with substantial ultra-low frequency wave energy and penetrate deeply through the whole earth. Recorded by seismometers, the natural seismic waves are used to study earth structures.
The other type of data is generated by man-made ``earthquakes'' to obtain an image of the sedimentary basins in the interior of the earth close to the surface. A wavefield has to be produced using suitable sources at appropriate locations, measured by receivers at other locations after getting reflected back from within the earth, and stored using recorders.
In this paper, we mainly discuss the second type of seismic events.
The raw seismic data is not ideal to interpret and to create an accurate image of the subsurface. Recorded artifacts are related to the surface upon which the survey was performed, the instruments of receiving and recording and the noise generated by the procedure.
We must remove or at least minimize these artifacts. Seismic data processing aims to eliminate or reduce these effects and to leave only the influences due to the structure of geology for interpretation. Typical data processing steps include but are not limited to deconvolution, demultiple, deghosting, frequency filtering, normal moveout (NMO) correction, dip moveout (DMO) correction, common midpoint (CMP) stack, vertical seismic profiling (VSP), etc~\cite{RoyChowdhury2011, Yilmaz2001}.
In the recent two decades, the availability of the increased computer power makes it possible to process each trace of the recorded common source gathers separately, aiming for a better image. We will discuss several primary imaging methods such as traveltime tomography, seismic migration, least squares migration and full waveform inversion (FWI).
\subsection{Traveltime tomography}
Most discoveries related to the structure of the earth were based on the assumption that seismic waves can be represented by rays, which is closely associated with geometric optics~\cite{Rawlinsona, Rawlinson, Zelt1999}.
The primary advantages are its applicability to complex, isotropic and anisotropic, laterally varying layered media and its numerical efficiency in such computations. A critical observation is the travel time information of seismic arrivals. We can understand many arrival time observations with ray theory~\cite{cerveny1977ray}, which describes how short-wavelength seismic energy propagates.
As a background illustration, we will derive the ray tracing expressions in a 1D setting where the velocity only varies vertically~\cite{shearer2009introduction}. Ray tracing in general 3D structure is more complicated but follows similar principles. Considering a laterally homogeneous earth model where velocity $v$ only depends on depth, the ray parameter which is also called the horizontal slowness $p$, can be expressed in the following equation by the Snell's law:
\begin{equation}
p = s(z) \sin(\theta) = \frac{dT}{dX},
\end{equation}
where $s(z)$ ($= \frac{1}{v(z)}$) is the slowness, $\theta$ is the incidence angle, $T$ is the travel time, $X$ is the horizontal range. At the turning point depth $z_p$, $ p = s(z_p) $, a constant for a given ray. The vertical slowness $\eta = \sqrt{s^2 - p^2}$.
When the velocity is a continuous function of depth, the surface to surface travel time $T(p)$ and the distance traveled $X(p)$ have the following expressions:
\begin{equation}\label{eq:travelT}
T(p) = 2\int_0^{z_p} \frac{s^2(z)}{\sqrt{s^2(z) - p^2}} dz = 2\int_0^{z_p} \frac{s^2(z)}{\eta} dz,
\end{equation}
and
\begin{equation}\label{eq:travelX}
X(p) = 2p\int_0^{z_p} \frac{dz}{\sqrt{s^2(z) - p^2}} = 2p\int_0^{z_p} \frac{dz}{\eta}.
\end{equation}
The expressions above are the forward problem in traveltime tomography. The seismologists are interested in inverting model parameter $s(z)$ from observed traveltime $T$ and traveled distance $X$.
Using integral transform pair, we can obtain
\begin{equation}~\label{eq:travelV}
z(s) = - \frac{1}{\pi} \int_{s_0}^{s} \frac{X(p)}{\sqrt{p^2 - s^2(z) }} d(p) = \frac{1}{\pi} \int_0^{X(s)} \cosh^{-1}(p/s) dX,
\end{equation}
which gives us the 1D velocity model.
Equation~\end{equation}ref{eq:travelV} is one example of the 1D velocity inversion problem at a given depth. There are limitations about traveltime tomography in general. First, the first arrivals are inherently nonunique. Second, the lateral velocity variations are not considered in this setting. If we divide the earth model into blocks, the 3D velocity inversion techniques can resolve some of the lateral velocity perturbations by using the travel time in each block. The problem can be formulated into a least-squares ($L^2$) inversion by minimizing the travel time residual between the predicted time and the observed time: $ ||t_\text{obs} - t_\text{pred}||_2^2$~\cite{shearer2009introduction,Zelt2011}.
One limitation of ray theory is that it is applicable only to smooth media with smooth interfaces, in which the characteristic dimensions of inhomogeneities are considerably larger than the dominant wavelength of the considered waves. The ray method can yield distorted results and will fail at caustics or in general at so-called singular regions~\cite{Cerveny2011}. Moreover, much more information is available from the observed seismograms than travel times. To some extent, travel time tomography can be seen as phase-based inversion, and next, we will introduce waveform-based methods where the wave equation plays a significant role.
\subsection{Reverse Time Migration}
To overcome the difficulties of ray theory and further improve image resolutions, reverse time migration (RTM), least-squares reverse time migration (LSRTM) and full-waveform inversion (FWI) replace the semi-analytical solutions to the wave equation by fully numerical solutions including the full wavefield. Without loss of generality, we will explain all the methods in a simple acoustic setting:
\begin{equation}\label{eq:FWD}
\left\{
\begin{array}{rl}
& m(\mathbf{x})\frac{\partial^2 u(\mathbf{x},t)}{\partial t^2}- \Laplace u(\mathbf{x},t) = s(\mathbf{x},t)\\
& u(\mathbf{x}, 0 ) = 0 \\
& \frac{\partial u}{\partial t}(\mathbf{x}, 0 ) = 0 \\
\end{array} \right.
\end{equation}
We assume the model $m(\mathbf{x}) = \frac{1}{c(\mathbf{x})^2}$ where $c(\mathbf{x})$ is the velocity, $u(\mathbf{x},t)$ is the wavefield, $s(\mathbf{x},t)$ is the source. It is a linear PDE but a nonlinear operator from model domain $m(\mathbf{x})$ to data domain $u(\mathbf{x},t)$.
Despite the fact that migration can be used to update velocity model~\cite{liu1995migration, sava2004wave, symes2008migration}, its chief purpose is to transform measured reflection data into an image of reflecting interfaces in the subsurface. There are two principal varieties of migration techniques: reverse time migration (RTM) which gives a modest resolution of the reflectivity~\cite{Baysal1983, yoon2004challenges} and least-squares reverse-time migration (LSRTM) which typically yields a higher resolution of the reflectivity~\cite{Dai2012,Dai2013}.
Reverse-time migration is a prestack two-way wave-equation migration to illustrate complex structure, especially strong contrast geological interfaces such as environments involving salts. Conventional RTM uses an imaging condition which is the zero time-lag cross-correlation between the source and the receiver wavefields~\cite{Claerbout1971}:
\begin{equation}\label{eq:IC}
R(\mathbf{x}) = \sum_\text{shots}\int_0^T u(\mathbf{x},t)\cdot v(\mathbf{x},t) dt,
\end{equation}
where $u$ is the source wavefield in~\end{equation}ref{eq:FWD} and $v$ is the receiver wavefield which is the solution to the adjoint equation~\end{equation}ref{eq:rtm_adj}:
\begin{equation} \label{eq:rtm_adj}
\left\{
\begin{array}{rl}
& m(\mathbf{x})\frac{\partial^2 v(\mathbf{x},t)}{\partial t^2}- \Laplace v(x,t) = d(\mathbf{x},t) \delta(\mathbf{x}-\mathbf{x_r}) \\
& v(\mathbf{x}, T) = 0 \\
& v_t(\mathbf{x}, T ) = 0 \\
\end{array} \right.
\end{equation}
Here $T$ is the final recording time, $d$ is the observed data from the receiver $\mathbf{x_r}$ and $m$ is the assumed background velocity. The adjoint wave equation~\end{equation}ref{eq:rtm_adj} is always solved backward in time from $T$ to 0. Therefore it is also referred as backward propagation.
\begin{figure}
\caption{RTM: (a)~Synthetic forward wavefield $u_\text{fwd}
\label{fig:RTM1}
\label{fig:RTM2}
\label{fig:RTM3}
\label{fig:RTM3step}
\end{figure}
In classical RTM, the forward modeling typically does not contain reflection information. For example, it can be the paraxial approximation of the wave equation, which does not allow for reflections~\cite{Clayton1977}, or a smooth velocity model with unknown reflecting layers.
As a summary, the conventional RTM consists three steps as Figure~\ref{fig:RTM3step} shows:
\begin{enumerate}
\item Forward modeling of a wave field with a good velocity model to get $u_\text{fwd}$;
\item Backpropagation of the measured data through the same model to get $u_\text{bwd}$;
\item Cross-correlation the source wavefield $u_\text{fwd}$ and receiver wavefield $u_\text{bwd}$ based on an imaging condition (e.g., Equation~\end{equation}ref{eq:IC}) to detect the reflecting interfaces.
\end{enumerate}
RTM uses the entire solution of the wave equations instead of separating the downgoing or upgoing wavefields. Theoretically, RTM produces a more accurate image than ray-based methods since it does not rely on the asymptotic theory or migration using the one-way equation, which typically introduces modeling errors~\cite{Schuster2011}.
A good background velocity model that contains accurate information about the low-wavenumber components is also crucial for the quality of the image~\cite{Gray2011}. Recent advances in computation power make it possible to compute and store the solution of the wave equation efficiently, which significantly aids RTM to generate high-quality images~\cite{Etgen2009}.
\subsection{Least-squares Reverse Time Migration}
Least-squares reverse time migration (LSRTM) is a new migration method designed to improve the image quality generated by RTM. It is formulated as a linear inverse problem based on the Born approximation which we will describe briefly in this section. The wave equation~\end{equation}ref{eq:FWD} defines a nonlinear operator $\mathcal{F}$ from model domain to data domain that maps $m$ to $u$. The Born approximation is a linearization of this map to the first order so that we can denote it as $L = \frac{\delta \mathcal{F}}{\delta m}$~\cite{hudson1981use, van1954correlations}.
One can derive the Born approximation as follows~\cite{Demanet2016}. If we denote the model $m(\mathbf{x})$ as the sum of a background model and a small perturbation:
\begin{equation}\label{eq:m+dm}
m(\mathbf{x}) = m_0(\mathbf{x}) + \varepsilon m_1(\mathbf{x}),
\end{equation}
the corresponding wavefield $u$ also splits into two parts:
\begin{equation}
u(\mathbf{x},t) = u_0(\mathbf{x},t) + u_{sc}(\mathbf{x},t),
\end{equation}
where $u$ satisfies~\end{equation}ref{eq:FWD}, and $u_0$ solves the following equation:
\begin{equation}\label{eq:FWD0}
\left\{
\begin{array}{rl}
& m_0(\mathbf{x})\frac{\partial^2 u_0(\mathbf{x},t)}{\partial t^2} - \Laplace u_0(\mathbf{x},t) = s(\mathbf{x},t)\\
& u_0(\mathbf{x}, 0 ) = 0 \\
& \frac{\partial u_0}{\partial t}(\mathbf{x}, 0 ) = 0 \\
\end{array} \right.
\end{equation}
Subtracting \end{equation}ref{eq:FWD0} from \end{equation}ref{eq:FWD} and using~\end{equation}ref{eq:m+dm} , we derive an equation of $u_{sc}$ with zero initial conditions:
\begin{equation} \label{eq:scatter}
m_0\frac{\partial^2 u_{sc}(\mathbf{x},t)}{\partial t^2}- \Laplace u_{sc}(\mathbf{x},t) = -\varepsilon m_1 \frac{\partial^2 u(\mathbf{x},t)}{\partial t^2}.
\end{equation}
We can write $u_{sc}$ using Green's function $G$:
\begin{equation}
u_{sc}(\mathbf{x},t) = -\varepsilon \int_0^t \int_{\mathbb{R}^n} G(\mathbf{x},y;t-s) m_1(y) \frac{\partial^2 u}{\partial t^2} (y,s)dy ds.
\end{equation}
As a result, the original wavefield $u$ has an implicit relation:
\begin{equation}
u = u_0 - \varepsilon Gm_1 \frac{\partial^2 u}{\partial t^2} = \left[I + \varepsilon G m_1 \frac{\partial^2}{\partial t^2} \right]^{-1} u_0
\end{equation}
The last term can be expanded in terms of Born series,
\begin{eqnarray}
u &=& u_0 - \varepsilon \int_0^t \int_{\mathbb{R}^n} G(\mathbf{x},y;t-s) m_1(y) \frac{\partial^2 u_0}{\partial t^2} (y,s)dy ds + \mathcal{O} (\varepsilon ^2)\\
&=& u_0 + \varepsilon u_1 + \mathcal{O} (\varepsilon ^2)
\end{eqnarray}
Therefore, we can approximate $u_{sc}$ explicitly by $\varepsilon u_1$ as $- \varepsilon G m_1 \frac{\partial^2 u_0}{\partial t^2}$, which is called the Born approximation. We also derive a linear map from $m_1$ to $u_1$:
\begin{equation}\label{eq:Born}
\left\{
\begin{array}{rl}
& m_0\frac{\partial^2 u_1(\mathbf{x},t)}{\partial t^2} - \Laplace u_1(\mathbf{x},t) = -m_1 \frac{\partial^2 u_0(\mathbf{x},t)}{\partial t^2} \\
& u_1(\mathbf{x}, 0 ) = 0 \\
& \frac{\partial u_1}{\partial t}(\mathbf{x}, 0 ) = 0 \\
\end{array} \right.
\end{equation}
Unlike~\end{equation}ref{eq:scatter}, \end{equation}ref{eq:Born} is an explicit formulation with $m_0$ as the background velocity and $u_0$ as the background wavefiled which is the solution to~\end{equation}ref{eq:FWD0}.
It is convenient to denote the nonlinear forward map~\end{equation}ref{eq:FWD} as $\mathcal{F}: m \mapsto u$. A Taylor expansion of $u = \mathcal{F}(m)$ in the sense of calculus of variation, gives us:
\begin{equation}
u = u_0 + \varepsilon \frac{\delta \mathcal{F}}{\delta m}[m_0] m_1 + \frac{\varepsilon^2}{2} < \frac{\delta^2 \mathcal{F}}{\delta m^2}[m_0] m_1, m_1> + \ldots
\end{equation}
The functional derivative $ \frac{\delta \mathcal{F}}{\delta m}: m_1 \mapsto u_1 $ is the linear operator~\end{equation}ref{eq:Born}, which we hereafter denote as $L$ . The convergence of the Born series and the accuracy of the Born approximation can be proved mathematically~\cite{natterer2004error,newton2013scattering}.
We assume there is an accurate background velocity model $m_0$. The Born modeling operator maps the reflectivity $m_r$ to the scatted wavefield $d_r = \mathcal{F}(m) - \mathcal{F}(m_0)$:
\begin{equation}
Lm_r = d_r
\end{equation}
Although $L$ is linear, there is no guarantee that it is invertible~\cite{claerbout1992earth}. Instead of computing $L^{-1}$, we seek the reflectivity model by minimizing the least-squares error between observed data $d_r$ and predicted scattering wavefield:
\begin{equation}~\label{eq:LSRTM}
J(m_r) = ||Lm_r - d_r||_2^2
\end{equation}
The normal least-squares solution to~\end{equation}ref{eq:LSRTM} is $m_r = (L^TL)^{-1}L^Td_r$ where $L^T$ is the adjoint operator, but it is numerically expensive and unstable to invert the term $L^TL$ directly. Instead, the problem is solved in an iterative manner using optimization methods such as conjugate gradient descent (CG).
Another interesting way of approximating $(L^TL)^{-1}$ is to consider the problem as finding a non-stationary matching filter~\cite{guitton2004amplitude,He2013}. Similar to RTM, we can get an image by doing one step of migration:
\begin{equation}~\label{eq:m1}
m_1 = L^Td_r.
\end{equation}
One step of de-migration (Born modeling) based on $m_1$ generates data $d_1$
\begin{equation} \label{eq:m2}
d_1 = Lm_1.
\end{equation}
Finally, the re-migration step provides another image $m_2$
\begin{equation} \label{eq:m3}
m_2 = L^Td_1.
\end{equation}
Combining \end{equation}ref{eq:m1} to \end{equation}ref{eq:m3}, the inverse Hessian operator $(L^TL)^{-1}$ behaves like a matching filter between $m_1$ and $m_2$ which we are able to produce from the observed data. It is also the filter between $m_r$ and $m_1$ as \end{equation}ref{eq:m4} and \end{equation}ref{eq:m5} show below:
\begin{equation} \label{eq:m4}
m_1 = (L^TL)^{-1} m_2
\end{equation}
\begin{equation} \label{eq:m5}
m_r = (L^TL)^{-1} m_1
\end{equation}
Therefore, LSRTM can be seen as a process which first derives a filter to match the re-migration $m_2$ to the initial migration $m_1$ and then applies the filter back to the initial migrated image to give an estimate of the reflectivity. Seeking the reflectivity is equivalent to finding the best filter $K$ by minimizing the misfit $J(K)$ in the image or model domain:
\begin{equation}
J(K) = ||m_1 - Km_2 ||_2^2.
\end{equation}
The final reflectivity image $m_r \approx Km_1$. It is a single-iteration method which greatly reduces the computational cost of the iterative methods like CG.
A potentially better way of implementing the filter-based idea is to transform the image into curvelet domain~\cite{candes2006fast} to improve the stability and structural consistency in the matching~\cite{Wang2016}. The formulation of obtaining the Hessian filter in curvelet domain is to minimize a misfit function $J(s)$ where
\begin{equation}
J(s) = ||C(m_1) - s C(m_2)||_2^2 + \varepsilon ||s||_2^2,
\end{equation}
where $C$ is the curvelet domain transform operator, $s$ is the matching filter and $\varepsilon$ is the Tikhonov regularization parameter. The final reflectivity image $m_r \approx C^{-1}(|s|C(m_1))$, where $C^{-1}$ is the inverse curvelet transform operator.
In general, least-squares reverse time migration (LSRTM) is still facing challenges. First of all, the image quality highly depends on the accuracy of the background velocity model $m_0$. Even a small error can make the two wavefields meet at a wrong location, which generates a blurred image or an incorrect reflectivity~\cite{luo2014least}. Another drawback is its high computational cost compared with other traditional migration techniques. In practice, LSRTM fits not only the data but also the noise in the data. Consequently, it boosts the high-frequency noise in the image during the iterative inversion~\cite{Dai2017, Zeng2017}.
\subsection{Inversion}
The process of imaging through modeling the velocity structure is a form of inversion of seismic data~\cite{Treitel2001}, but in this paper, we regard inversion as a process of recovering the quantitative features of the geographical structure, that is, finding $m(\mathbf{x})$ in~\end{equation}ref{eq:FWD}. Inversion is often used to build a velocity model iteratively until the synthetic data matches the actual recording~\cite{Mora1989}.
Wave equation traveltime tomography~\cite{luo1991wave} and the ray-based tomography in the earlier section are phase-like inversion methods~\cite{Schuster2011}. Least-squares inversion is known as linearized waveform inversion~\cite{lailly1984migration,tarantola1984linearized}. The migration method introduced earlier, LSRTM, can also be seen as a linear inverse problem. The background model $m_0$ is not updated after each iteration in least-squares inversion. Similar to the goal of migration, the model to be updated iteratively is the reflectivity distribution instead of the velocity model. One can interpret the process as a series of reverse time migrations, where the data residual is backpropagated into the model instead of the recorded data itself (Figure~\ref{fig:RTM3}).
If the background model $m_0$ is the parameter we invert for, the problem turns into a nonlinear waveform inversion, which is also called full-waveform inversion (FWI). Both the low-wavenumber and high-wavenumber components are updated simultaneously in FWI so that the final image has high resolution and high accuracy~\cite{virieux2009overview}. FWI is the primary focus of the paper. In the following sections, we will further discuss the topic and especially the merit of using optimal transport based ideas to tackle the current limitations.
\begin{figure}
\caption{The framework of FWI as a PDE-constrained optimization}
\label{fig:FWI}
\end{figure}
\section{Full Waveform Inversion}
FWI is a nonlinear inverse technique that
utilizes the entire wavefield information to estimate
the earth properties.
The notion of FWI was first brought up three decades ago~\cite{lailly1983seismic, tarantola1982generalized} and has been actively studied as the computing power increases.
As we will see, the mathematical formulation of FWI is PDE constrained optimization.
Even inversion for subsurface elastic parameters using FWI
has become increasingly popular in exploration applications~\cite{ brossier2009seismic, Mora1988, virieux2009overview}.
Currently, FWI can achieve stunning clarity and resolution. Both academia and industry have been actively working on the innovative algorithms and software of FWI. However, this technique is still facing three main challenges.
First, the physics of seismic waves are complex, and we need more accurate forward modeling in inversion going from pure acoustic waves to anisotropic viscoelasticity. Recent developments focus on this multiparameter and multi-mode modeling. FWI strategies for simultaneous and hierarchical velocity and attenuation inversion have been investigated recently~\cite{Qu2017}, but there is a dilemma. The more realistic with more parameters the models of the earth become, the more ill-posed and even non-unique will the inverse problem be.
Second, it is well known that the accuracy of FWI deteriorates from the lack of low frequencies, data noise, and poor starting model. The limitation is mainly due to the ill-posedness of the inverse problem which we treat as a PDE-constrained optimization. FWI is typically performed using local optimization methods in which the subsurface model is described by using a large number of unknowns, and the number of model parameters is determined a priori~\cite{tarantola2005inverse}. These methods typically only use the local gradient of the objective function. As a result, the inversion process is easily trapped in the local minima. Markov chain Monte Carlo (MCMC) based methods~\cite{Sambridge2011}, particle swarm optimization~\cite{Chen2017}, and many other global optimization methods~\cite{Sen2011} can avoid the pitfall theoretically, but they are not cost-efficient to handle practical large-scale inversion currently.
Third, it is relatively inexpensive to update the model through local optimization methods in FWI, but the convergence of the algorithm highly depends on the choice of a starting model. The research directions can be grouped into two main ideas to tackle this problem. One idea is to replace the conventional least-squares norm with other objective functions in optimization for a wider basin of attraction~\cite{engquist2016optimal}. The other idea is to expand the dimensionality of the unknown model by adding non-physical coefficients. The additional coefficients may convexify the problem and fit the data better~\cite{ biondi2012tomographic, huang2017full}.
The essential elements of FWI framework (Figure~\ref{fig:FWI}) includes forward modeling and the adjoint-state method for gradient calculation.
\subsection{Forward modeling}
Wave-propagation modeling is the most significant step in seismic imaging. The earth is complex with various heterogeneity on many scales, and the real physics is far more complicated than the simple acoustic setting of this paper,
but the industry standard is still the acoustic model in time or frequency domain.
The current research of FWI covers multiple parameters inversion of seismic waveforms including anisotropic parameters, density, and attenuation factors~\cite{yang2016review} including viscoelastic modeling which is related to fractional Laplacian wave equations~\cite{schiessel1995generalized}. It should be noted that the more parameters in a model, the less well-posed is the inverse problem.
If we exclude the attenuation parameter, the general elastic wave equation is a realistic model. Based on the equation of conservation
of momentum (Newton's law of dynamics) and Hooke's law for stress and strain tensors, we have the following elastic wave equation:
\begin{equation}\label{eq:elastic1}
\rho \frac{\partial^2 u_i}{\partial t^2} = f_i + \frac{\partial \sigma_{ij}}{\partial x_j},
\end{equation}
\begin{equation}\label{eq:elastic2}
\frac{\partial \sigma_{ij}}{\partial t} = c_{ijkl} \frac{\partial \varepsilon_{ij}}{\partial t} + \frac{\partial \widetilde{\sigma}_{ij}}{\partial t},
\end{equation}
where $\rho$ is the density, $\mathbf{u}$ is the displacement vector, $\sigma$ is the nine-component stress tensor (i,j = 1,2,3), $\widetilde{\sigma}$ is the internal stress, $\mathbf{f}$ is the outer body force, $\varepsilon$ is the nine-component strain tensor which satisfies $\varepsilon_{ij} = \frac{1}{2} \left( \frac{\partial u_i}{\partial x_j} + \frac{\partial u_j}{\partial x_i} \right)$ and $c_{ijkl}$ is the stiffness tensor containing twenty-one independent components.
One can classify the current numerical methods of complex wave propagation into three categories: direct methods, integral wave equation methods and asymptotic methods~\cite{Viveros2011}. Direct methods include finite-difference method (FDM)~\cite{moczo2007finite}, pseudospectral method~\cite{furumura1998parallel}, finite element method (FEM)~\cite{marfurt1984accuracy}, spectral element method (SEM)~\cite{komatitsch1999introduction}, discontinuous Galerkin method (DG)~\cite{kaser2006arbitrary}, etc. Integral wave equation methods include both boundary element method (BEM)~\cite{bouchon2007boundary} and the indirect boundary element methods (IBEM)~\cite{pointer1998numerical} with a fast multipole method (FMM)~\cite{fujiwara2000fast} for efficiency. Asymptotic methods include geometrical optics, Gaussian beams~\cite{vcerveny1982computation} and frozen Gaussian beams~\cite{lu2011frozen}.
\subsection{Measure of mismatch}
In seismic inversion, the misfit function, i.e. the objective function in the optimization process, is defined as a functional on the data domain. Common misfit functions include cross-correlation traveltime measurements~\cite{luo1991wave,Marquering1999}, amplitude variations~\cite{Dahlen2002} and waveform differences~\cite{tarantola1982generalized}. In both time~\cite{tarantola1987inverse} and frequency domain~\cite{PRATT1990,pratt1990inverse}, the least-squares norm has been the most widely used misfit function. For example, in time domain conventional FWI defines a least-squares waveform misfit as
\begin{equation}
d(f,g) = J(m)=\frac{1}{2}\sum_r\int\abs{f(\mathbf{x_r},t;m)-g(\mathbf{x_r},t)}^2dt,
\end{equation}
where $\mathbf{x_r}$ are receiver locations, $g$ is observed data, and $f$ is simulated data which solves~\end{equation}ref{eq:FWD} with model parameter $m$. The time integral is carried out numerically as a sum. This formulation can also be extended to the case with multiple sources.
Real seismic data usually contains noise. As a result, denoising becomes an important step in seismic data processing. The $L^2$ norm is well known to be sensitive to noise~\cite{brossier2010data}. Other norms have been proposed to mitigate this problem. For example, the $L^1$ norm~\cite{Crase1990,tarantola1987inverse}, the Huber criterion~\cite{Guitton2003,ha2009waveform} and the hybrid $L^1/L^2$ criterion~\cite{Bube1997} all demonstrated improved robustness to noise compared with conventional $L^2$ norm.
All the misfit functions above are point-by-point based objective functions which means they only accumulate the differences in amplitude at each fixed time grid point. There are global misfit functions that compare the simulated and measured signals not just pointwise. The Wasserstein metric is one such metric which we will discuss later. It is very robust with respect to noise
The oscillatory and periodic nature of waveforms lead to another main challenge in FWI: the cycle-skipping issue when implementing FWI as a local inversion scheme.
If the true data and the initial synthetic data are more than half wavelength ($>\frac{\lambda}{2}$) away from each other, the first gradient can go in the wrong direction regarding the phase mismatch, but can nonetheless reduce the data misfit in the fastest manner~\cite{Beydoun1988}. Mathematically, it is related to the highly nonconvex and highly nonlinear nature of the inverse problem and results in finding only a local minima.
Figure~\ref{fig:2_ricker_signal} displays two signals, each of which contains two Ricker wavelets and $f$ is simply a shift of $g$. The $L^2$ norm between $f$ and $g$ is plotted in Figure~\ref{fig:2_ricker_L2} as a function of the shift $s$. We observe many local minima and maxima in this simple two-event setting which again demonstrated the difficulty of the, so called, cycle-skipping issues~\cite{yang2017analysis}.
\begin{figure}
\caption{(a)~A signal consisting two Ricker wavelets (blue) and its shift (red)~(b)~$L^2$ norm of the difference between $f$ and $f(t-s)$ in terms of shift $s$~(c)~$W_2$ norm between $f$ and $f(t-s)$ in terms of shift $s$}
\label{fig:2_ricker_signal}
\label{fig:2_ricker_L2}
\label{fig:2_ricker_W2}
\end{figure}
\begin{figure}
\caption{The shaded areas represent the mismatch each misfit function considers. (a) $L^2$: $\int (f-g)^2 dt$. (b) Integral wavefields method: $\int (\int f-\int g)^2 dt$.~\cite{yangletter}
\label{fig:L2(fg)}
\label{fig:I(fg)}
\label{fig:2L2}
\end{figure}
The lower frequency components have a wider basin of attraction with the least-squares norm being the misfit function. Several hierarchical methods that invert from low frequencies to higher frequencies have been proposed in the literature to mitigate the cycle-skipping of the inverse problem~\cite{bunks1995multiscale, kolb1986pre, pratt1990inverse,sirgue2004efficient,weglein2003inverse}. Several other methods instead compare the integrated waveforms~\cite{huang2014two, liu2012normalized} (Figure~\ref{fig:2L2}) and the waveform envelops~\cite{Bozdag2011, luo2015seismic}. They share a similar idea with the hierarchical methods of taking advantage of the lower frequency components in the data.
A recently introduced class of misfit functions is based on optimal transport~\cite{chen2017quadratic,EFWass, engquist2016optimal,W1_2D,W1_3D,yang2017analysis,yangletter, yang2017application}. As a useful tool from the theory of optimal transport, the Wasserstein metric computes the minimal cost of rearranging one distribution into another. The optimal transport based methods compare the observed and simulated data globally and thus include phase information. We will discuss these measures in section 4 and 5.
Other misfit functions with the idea of non-local comparison proposed in the literature include filter based misfit functions \cite{AWI,zhu2016building} as well as inversion using, so called, dynamic time warping~\cite{ma2013wave} and the registration map~\cite{Baek}. The differential semblance optimization~\cite{symes1991velocity} exploits both phase and amplitude information of the reflections. Tomographic full waveform inversion \cite{biondi2012tomographic} has some global convergence characteristics of wave-equation migration velocity analysis.
In the filter based methods~\cite{AWI,zhu2016building}, a filter is designed to minimize the $L^2$ difference between filtered simulated data and the observed data. The misfit is then a measure of how much the filter deviates from the identity. As we will see in the optimal transport based technique, this is done in one step where the optimal map directly determines the mapping of the simulated data. The optimal transport map is general and does not need to have the form of a convolution filter as in the filter based methods.
\subsection{Adjoint-state method}
Large-scale realistic 3D inversion is possible today. The advances in numerical methods and computational power allow for solving the 3D wave equations and compute the Fr\'{e}chet derivative with respect to model parameters, which are needed in the optimization. In the adjoint-state method, one only needs to solve two wave equations numerically, the forward propagation and the backward adjoint wavefield propagation. Different misfit functions typically only affect the source term in the adjoint wave equation~\cite{Plessix,tarantola2005inverse}.
Let us consider the misfit function $J(m)$ for computing the difference between predicted data $f$ and observed data $g$ where $m$ is the model parameter, $F(m)$ is the forward modeling operator, $u(\mathbf{x},t)$ is the wavefield and $s(\mathbf{x},t)$ is the source. The predicted data $f$ is the partial Cauchy boundary data of $u$ which can be written as $f = Ru$ where $R$ is a restriction operator only at the receiver locations. The wave equation~\end{equation}ref{eq:FWD} can be denoted as
\begin{equation} ~\label{eq:adj_fwd}
F(m) u = s.
\end{equation}
Taking first derivative regarding model $m$ on both sides gives us:
\begin{equation}
\frac{\partial F}{\partial m} u + F \frac{\partial u}{\partial m} = 0.
\end{equation}
Therefore,
\begin{equation}~\label{eq:adj_grad1}
\frac{\partial f}{\partial m} = -RF^{-1} \frac{\partial F}{\partial m} u.
\end{equation}
By the chain rule, the gradient of misfit function $J$ with respect to $m$ is
\begin{equation}~\label{eq:adj_grad0}
\frac{\partial J}{\partial m} = \left(\frac{\partial f}{\partial m} \right)^T \frac{\partial J}{\partial f}
\end{equation}
We can derive the following equation by plugging~\end{equation}ref{eq:adj_grad1} into ~\end{equation}ref{eq:adj_grad0} :
\begin{equation}~\label{eq:adj_grad2}
\frac{\partial J}{\partial m} =-u^T \left(\frac{\partial F}{\partial m}\right)^T F^{-T}R^T \frac{\partial J}{\partial f}
\end{equation}
Equation~\end{equation}ref{eq:adj_grad2} is the adjoint-state method. The term $F^{-T}R^T \frac{\partial J}{\partial f}$ denotes the backward wavefield $v$ generated by the adjoint wave equation whose source is the data residual $R^T \frac{\partial J}{\partial f}$. The gradient is similar to the usual imaging condition~\end{equation}ref{eq:IC}:
\begin{equation}~\label{eq:adj_grad3}
\frac{\partial J}{\partial m} =- \int_0^T \frac{\partial^2 u(\mathbf{x},t)}{\partial t^2} v(\mathbf{x},t)dt,
\end{equation}
where $v$ is the solution to the adjoint wave equation:
\begin{equation} \label{eq:FWI_adj}
\left\{
\begin{array}{rl}
& m\frac{\partial^2 v(\mathbf{x},t)}{\partial t^2}- \Laplace v(\mathbf{x},t) = R^T\frac{\partial J}{\partial f}\\
& v(\mathbf{x}, T) = 0 \\
& v_t(\mathbf{x}, T ) = 0 \\
\end{array} \right.
\end{equation}
Therefore $F^T$ can be seen as the backward modeling operator which is similar to the adjoint wave equation~\end{equation}ref{eq:rtm_adj} but with a different source term.
There are many other equivalent ways to formulate the adjoint-state method. One can refer to~\cite{Demanet2016,Plessix} for more details.
In FWI, our aim is to find the model parameter $m^{\ast}$ that minimizes the objective function, i.e. \(m^{\ast} = \argmin J(m) \). For this PDE-constrained optimization, one can use the Fr\'{e}chet derivative in a gradient-based iterative scheme to update the model $m$, such as steepest descent, conjugate gradient descent (CG), L-BFGS, Gauss-Newton method, etc. One can also derive the second-order adjoint equation for the Hessian matrix and use the full Newton's method in each iteration, but it is not practical regarding memory and current computing power. It is one of the current research interests to analyze and approximate the Hessian matrix in optimization~\cite{Virieux2017}.
\section{Optimal Transport for FWI}
Optimal transport has become a well-developed topic in mathematics since it was first brought up by Monge~\cite{Monge} in 1781.
Due to its ability to incorporate both intensity and spatial information, optimal transport based metrics for modeling and signal processing have recently been adopted in a variety of applications including image retrieval, cancer detection, and machine learning \cite{kolouri2016transport}. In computer science, the metric is often called the ``Earth Mover's Distance'' (EMD).
The idea of using optimal transport for seismic inversion was first proposed in~\cite{EFWass}. The Wasserstein metric is a concept based on optimal transportation~\cite{Villani}. Here, we transform our datasets of seismic signals into density functions of two probability distributions. Next, we find the optimal map between these two datasets and compute the corresponding transport cost as the misfit function in FWI. In this paper, we will focus on the quadratic cost function. The corresponding misfit is the quadratic Wasserstein metric ($W_2$). As Figure~\ref{fig:2_ricker_W2} shows, the convexity of $W_2$ is much better than the $L^2$ norm when comparing oscillatory seismic data with respect to shift.
Following the idea that changes in velocity cause a shift or ``transport'' in the arrival time, \cite{engquist2016optimal} demonstrated the advantageous mathematical properties of the quadratic Wasserstein metric ($W_2$) and provided rigorous proofs that laid a solid theoretical foundation for this new misfit function. We can apply $W_2$ as misfit function in two different ways: trace-by-trace comparison which is related to 1D optimal transport in the time dimension, and the entire dataset comparison in multiple dimensions. We will see that solving the Monge-Amp\`ere\xspace equation in each iteration of FWI is a useful technique~\cite{yang2017application} for calculating the Wasserstein distance. An analysis of the 1D optimal transport approach and the conventional misfit functions such as $L^2$ norm and integral $L^2$ norm illustrated the intrinsic advantages of this transport idea~\cite{yangletter}.
\subsection{Wasserstein metric}
Let X and Y be two metric spaces with nonnegative Borel measures $\mu$ and $\nu$ respectively. Assume X and Y have equal total measure:
\begin{equation}
\int_X d\mu = \int_Y d\nu
\end{equation}
Without loss of generality, we will hereafter assume the total measure to be one, i.e., $\mu$ and $\nu$ are probability measures.
\begin{definition}[Mass-preserving map]
A transport map $T: X \rightarrow Y$ is mass-preserving if for any measurable
set $B \in Y$ ,
\begin{equation}
\mu (T^{-1}(B)) = \nu(B)
\end{equation}
If this condition is satisfied, $\nu$ is said to be the push-forward of $\mu$ by $T$, and we write $\nu = T_\# \mu $
\end{definition}
In another word, given two nonnegative densities $f = d\mu$ and $g=d\nu$, we are interested in the mass-preserving map $T$ such that $f = g \circ T$. The transport cost function $c(x,y)$ maps pairs $(x,y) \in X\times Y$ to $\mathbb{R}\cup \{+\infty\}$, which denotes the cost of transporting one unit mass from location $x$ to $y$. The most common choices of $c(x,y)$ include $|x-y|$ and $|x-y|^2$, which denote the Euclidean norms for vectors $x$ and $y$ hereafter. Once we find a mass-preserving map $T$, the cost corresponding to $T$ is
\[
I(T,f,g,c) = \int\limits_Xc(x,T(x))f(x)\,dx.
\]
While there are many maps $T$ that can perform the relocation, we are interested in finding the optimal map that minimizes the total cost
\[
I(f,g,c) = \inf\limits_{T\in\mathcal{M}}\int\limits_Xc(x,T(x))f(x)\,dx,
\]
where $\mathcal{M}$ is the set of all maps that rearrange $f$ into $g$.
Thus we have informally defined the optimal transport problem, the optimal map as well as the optimal cost, which is also called the Wasserstein distance:
\begin{definition}[The Wasserstein distance]
We denote by $\mathscr{P}_p(X)$ the set of probability measures with finite moments of order $p$. For all $p \in [1, \infty)$,
\begin{equation}~\label{eq:static}
W_p(\mu,\nu)=\left( \inf _{T_{\mu,\nu}\in \mathcal{M}}\int_{\mathbb{R}^n}\left|x-T_{\mu,\nu}(x)\right|^p d\mu(x)\right) ^{\frac{1}{p}},\quad \mu, \nu \in \mathscr{P}_p(X).
\end{equation}
$\mathcal{M}$ is the set of all maps that rearrange the distribution $\mu$ into $\nu$.
\end{definition}
\subsection{1D problem}
In~\cite{yang2017application}, we proposed two ways of using $W_2$ in FWI were proposed. One can either compute the misfit globally by solving a 2D or 3D optimal transport problem or compare data trace-by-trace with the 1D explicit formula, see Theorem 1 below. For the 1D approach, the corresponding misfit function in FWI becomes
\begin{equation} \label{eqn:Wp1D}
J_1(m) = \sum\limits_{r=1}^R W_2^2(f(\mathbf{x_r},t;m),g(\mathbf{x_r},t)),
\end{equation}
where $R$ is the total number of time history traces, $g$ is the observed data, $f$ is the simulated data, $\mathbf{x_r}$ are the receiver locations, and $m$ is the model parameter.
Mathematically it is $W_2$ metric in the time domain and $L^2$ norm in the spatial domain.
For $f$ and $g$ in one dimension, it is possible to exactly solve the optimal transportation problem~\cite{Villani} in terms of the cumulative distribution functions
\begin{equation} \label{eq:F&G}
F(x) = \int_{-\infty}^x f(t)\,dt, \quad G(y) = \int_{-\infty}^y g(t)\,dt.
\end{equation}
In fact, the optimal map is just the unique monotone rearrangement of the density $f$ into $g$. In order to compute the Wasserstein metric ($W_p$), we need the cumulative distribution functions $F$ and $G$ and their inverses $F^{-1}$ and $G^{-1}$ as the following theorem states:
\begin{theorem}[Optimal transportation on $\mathbb{R}$]\label{OT1D}
Let $0 < f, g < \infty$ be two probability density functions, each supported on a connected subset of $\mathbb{R}$. Then the optimal map from $f$ to $g$ is $T = G^{-1}\circ F$.
\end{theorem}
From the theorem above, we derive another formulation for the 1D quadratic Wasserstein metric:
\begin{equation}\label{myOT1D}
\begin{aligned}
W_2^2(f,g) & = \int_0^1 |F^{-1} - G^{-1}|^2 dy \\
& = \int_X |x-G^{-1}(F(x))|^2 f(x)dx.
\end{aligned}
\end{equation}
The corresponding Fr\'{e}chet derive which is also the adjoint source term in the backward propagation is:
\begin{equation}~\label{eq:1D_ADS_C}
\begin{split}
\frac{\partial W_2^2(f,g)}{\partial f} = & \left( \int_t^{T_0}-2(s-G^{-1}(F(s))\frac{dG^{-1}(y)}{dy}\biggr\rvert_{y=F(s)} f(s) ds \right) dt \\ & +
|t-G^{-1}(F(t))|^2 dt.
\end{split}
\end{equation}
This adjoint source term in the discrete 1D setting can be computed as
\begin{equation}\label{eq:1D_ADS_D}
\begin{split}
\left[U\ \text{diag} \left(\frac{-2 f(t) dt}{g(G^{-1}\circ F (t))} \right) \right] (t-G^{-1} \circ F(t)) dt + |t-G^{-1}\circ F(t)|^2 dt,
\end{split}
\end{equation}
where $U$ is the upper triangular matrix whose non-zero components are 1.
\subsection{Monge-Amp\`ere\xspace equation}
This fully nonlinear partial differential equation plays an important role in computing the Wasserstein metric.
\subsubsection{Introduction}
In the previous section, we introduced the 1D optimal transport technique of comparing seismic data trace by trace and the explicit solution formula. Another option is a general optimal transport problem in all dimensions. In the global case we compare the full datasets and consider the whole synthetic data $f$ and observed data $g$ as objects with the general quadratic Wasserstein metric ($W_2$):
\begin{equation} \label{eqn:W22D}
J_2(m) = W_2^2(f(\mathbf{x_r},t;m),g(\mathbf{x_r},t)).
\end{equation}
The simple exact formula for 1D optimal transportation does not extend to optimal transportation in higher dimensions. Nevertheless, it can be computed by relying on two important properties of the optimal mapping~$T(x)$: conservation of mass and cyclical monotonicity. From the definition of the problem, $T(x)$ maps $f$ into $g$. If $T$ is a sufficiently smooth map and $\det(\nabla T(x) ) \neq 0$, the change of variables formula formally leads to the requirement
\begin{equation}\label{eq:massConserved}
f(x) = g(T(x))\det(\nabla T(x)).
\end{equation}
The optimal map takes on additional structure in the special case of the cost function (i.e., $c(x,y) = |x-y|^2$): it is cyclically monotone~\cite{Brenier,KnottSmith}.
\begin{definition}[Cyclical monotonicity]
\label{cyclical}
We say that $T:X\to Y$ is cyclically monotone if for any $m\in\mathbb{N}^+$, \(x_i\in X, \,1\leq i \leq m\),
\begin{equation}\label{eq:cyclical}
\sum_{i=1}^{m}|x_i-T(x_i)|^2 \leq \sum_{i=1}^{m}|x_i-T(x_{i-1})|^2
\end{equation}
or equivalently
\begin{equation}
\sum_{i=1}^{m}\langle T(x_i),x_i-x_{i-1}\rangle \geq 0
\end{equation}
where $x_0\end{equation}uiv x_m$.
\newline
\end{definition}
Additionally, a cyclically monotone mapping is formally equivalent to the gradient of a convex function~\cite{Brenier,KnottSmith}. Making the substitution $T(x) = \nabla u(x)$ into the constraint~\end{equation}ref{eq:massConserved} leads to the Monge-Amp\`ere\xspace equation
\begin{equation}~\label{eq:MAA}
\det (D^2 u(x)) = \frac{f(x)}{g(\nabla u(x))}, \quad u \text{ is convex}.
\end{equation}
In order to compute the misfit between distributions $f$ and $g$, we first compute the optimal map $T(x) = \nabla u(x)$ via the solution of this Monge-Amp\`ere\xspace equation coupled to the non-homogeneous Neumann boundary condition
\begin{equation}\label{eq:BC}
\nabla u(x) \cdot \nu = x\cdot \nu, \,\, x \in \partial X.
\end{equation}
The squared Wasserstein metric is then given by
\begin{equation}\label{eq:WassMA}
W_2^2(f,g) = \int_X f(x)\abs{x-\nabla u(x)}^2\,dx.
\end{equation}
For the general Monge-Amp\`ere\xspace equation, the uniqueness of the optimal map is not guaranteed. One need to discuss it in the context of a particular cost function and certain hypothesis. For example, the cyclical monotonicity is the key element in the proof of the following Brenier's theorem~\cite{Brenier, DePhilippis2013} which gives an elegant result about the uniqueness of optimal transport map for the quadratic cost $|x-y|^2$:
\begin{theorem}[Brenier's theorem]
Let $\mu$ and $\nu$ be two compactly supported probability measures on $\mathbb{R}^n$. If $\mu$ is absolutely continuous with respect to the Lebesgue measure, then
\begin{enumerate}
\item There is a unique optimal map $T$ for the cost function $c(x,y) = |x-y|^2$. \item There is a convex function $u: \mathbb{R}^n \rightarrow \mathbb{R}$ such that the optimal map $T$ is given by $T(x) = \nabla u(x)$ for $\mu$-a.e. x.
\end{enumerate}
Furthermore, if $\mu(dx) = f(x)dx$, $\nu(dy) = g(y)dy$, then $T$ is differential $\mu$-a.e. and
\begin{equation}
\det (\nabla T(x)) = \frac{f(x)}{g(T(x))}.
\end{equation}
\end{theorem}
We are here considering the connection between the Monge-Amp\`ere\xspace equation and optimal transport where the transport map is geometric in nature.
The Monge-Amp\`ere\xspace equation is of course also known for many other connections to geometry and mathematical physics. Let us mention a few examples. It arises naturally in many problems such as affine geometry~\cite{Cheng1986}, Riemannian geometry~\cite{aubin2013some}, isometric embedding~\cite{Han2006}, reflector shape design~\cite{Wang1996}, etc. In the last century, treatments about this equation mostly came from the geometric problems above~\cite{Caffarelli1985, Cheng1977,Krylov1995, Minkowski1989, trudinger2008monge}. If we consider the following general Monge-Amp\`ere\xspace equation:
\begin{equation}~\label{eq:generalMA}
\det (D^2 u(x)) = f(x, u , Du),
\end{equation}
when $f = K(x) (1 + |Du|^2)^{(n+2)/2}$, the equation becomes the prescribed Gaussian curvature equation~\cite{DePhilippis2013}. In affine geometry, an affine sphere in the graph satisfies the Monge-Amp\`ere\xspace equation~\end{equation}ref{eq:generalMA}. The affine maximal surface satisfies a fourth-order equation which is related to the general Monge-Amp\`ere\xspace equation:
\begin{equation}
\sum_{i,j = i}^n U^{ij}\partial_{x_i}\partial_{x_j}\left[\det (D^2 u) \right]^{-\frac{n+1}{n+2}} = 0,
\end{equation}
where $U^{ij}$ is the cofactor matrix of $D^2 u$~\cite{Trudinger2008}.
\subsubsection{Weak solutions}
Although the Monge-Amp\`ere\xspace equation is a second-order PDE, there is no guarantee that the classical $C^2$ solution always exists. For the generalized Monge-Amp\`ere\xspace equation~\end{equation}ref{eq:generalMA} with homogeneous Dirichlet boundary condition $u=0$ on $\partial \Omega$, it is well-known that there exists a classical convex solution $u \in C^2(\Omega) \cup C(\overline{\Omega})$, when $f$ is strictly positive and sufficiently smooth~\cite{Caffarelli1989, Caffarelli1990, Caffarelli1991}. When the assumptions no longer hold, we solve for two types of weak solutions instead: the Aleksandrov solution and the viscosity solution. One can refer to~\cite{gutierrez2016monge} for more details and proofs of the following definitions and theorems.
Let $\Omega$ be the open subset of $\mathbb{R}^d$ and $u: \Omega \rightarrow \mathbb{R}$. We denote $\mathcal{P}(\mathbb{R}^d)$ as the set of all subsets of $\mathbb{R}^d$.
\begin{definition}
The normal mapping of $u$, or the subdifferential of $u$, is the set-valued mapping $\partial u: \Omega \rightarrow \mathcal{P}(\mathbb{R}^d)$ defined by
\begin{equation}
\partial u(x_0) = \{ p: u(x) \geq u(x_0)+ p\cdot (x - x_0), \text{\ for all } x \in \Omega \}
\end{equation}
Given $V\in \Omega$, $\partial u(V) = \cup_{x\in V} \partial u(x)$.
\end{definition}
\begin{theorem}[Monge-Amp\`ere\xspace measure]
If $\Omega$ is open and $u \in C(\Omega)$, then the class
\[ \mathcal{S} = \{ V\subset \Omega: \partial u(V) \text{is Lebesgue measurable} \} \]
is a Borel $\sigma$-algebra. The set function $Mu: \mathcal{S} \rightarrow \overline{\mathbb{R}}$ defined by
\begin{equation}~\label{eq:MA_measure}
Mu(V) = |\partial u(V) |
\end{equation}
is a measure, finite on compact sets, called the Monge-Amp\`ere\xspace measure associated with the function $u$.
\end{theorem}
This is a measure generated by the the Monge-Amp\`ere\xspace operator, which naturally defines the notion of the Aleksandrov solution.
\begin{definition}[Aleksandrov solution]
Let $\nu$ be a Borel measure defined on $\Omega$ which is an open and convex subset of $\mathbb{R}^n$. The convex function $u$ is a weak solution, in the sense of Aleksandrov, to the Monge-Amp\`ere\xspace equation
\begin{equation}
\det D^2u = \nu \text{\quad in $\Omega$}
\end{equation}
if the associated Monge-Amp\`ere\xspace measure $Mu$ defined in~\end{equation}ref{eq:MA_measure} is equal to $\nu$.
\end{definition}
Next we state one existence and uniqueness result for the Aleksandrov solution~\cite{awanou2014discrete}.
\begin{theorem}[Existence and uniqueness of the Aleksandrov solution]
Consider the following Dirichlet problem of the Monge-Amp\`ere\xspace equation
\begin{eqnarray} \label{eq:alek}
\det D^2u &= &\nu \text{\quad in $\Omega$} \\
u &=& g \text{\quad on $\partial \Omega$} \nonumber,
\end{eqnarray}
on a convex bounded domain $\Omega \in \mathbb{R}^d$ with boundary $\partial \Omega$. Assume that $\nu$ is a finite Borel measure and $g \in C(\partial \Omega)$ which can be extended to a convex function $\tilde{g} \in C(\bar{\Omega})$. Then the Monge-Amp\`ere\xspace equation~\end{equation}ref{eq:alek} has a unique convex Aleksandrov solution in $C(\overline{\Omega})$.
\end{theorem}
Aleksandrov's generalized solution corresponds to the curvature measure in the theory of convex bodies~\cite{Trudinger2008}. A finite difference scheme for computing Aleksandrov measure induced by $D^2u$ in 2D was conducted in~\cite{oliker1989numerical} with the solution $u$ comes as a byproduct~\cite{feng2011analysis}.
Another notion of weak solution is the viscosity solution which occurs naturally if $f$ is continuous in \end{equation}ref{eq:generalMA}.
\begin{definition} [Viscosity solution]
Let $u\in C(\Omega)$ be a convex function and $f\in C(\Omega)$, $f\geq 0$. The function $u$ is a viscosity subsolution (supersolution) of \end{equation}ref{eq:generalMA} in $\Omega$ if whenever convex function $\phi \in C^2(\Omega)$ and $x_0 \in \Omega$ are such that $(u-\phi)(x) \leq (\geq )(u-\phi)(x_0) $ for all $x$ in the neighborhood of $x_0$, then we must have
\[
\det (D^2\phi(x_0)) \leq (\geq) f(x_0).
\]
The function $u$ is a viscosity solution if it is both a viscosity subsolution and supersolution.
\end{definition}
We can relate these two notions of weak solution in the following proposition:
\begin{proposition}
If $u$ is a Aleksandrov (generalized) solution of~\end{equation}ref{eq:generalMA} with $f$ continuous, then $u$ is also a viscosity solution.
\end{proposition}
\subsection{Numerical optimal transport in higher dimensions}
In this section, we will summarize some of the current numerical methods for solving the optimal transport problems in higher dimensions. These methods are based on the equivalent or relaxed formulations of the original Monge's problem. In the end, we will introduce a monotone finite difference Monge-Amp\`ere\xspace solver which is proved to converge to the viscosity solution to~\end{equation}ref{eq:MAA}~\cite{barles1991convergence,FroeseTransport}.
\subsubsection{General methods}
Optimal transport is a well-studied subject in mathematics while the computation techniques are comparatively underdeveloped.
We will focus on analysis based methods. There are combinatorial techniques that typically are computationally costly in higher dimensions, for example, the Hungarian algorithm~\cite{kuhn1955hungarian}.
The definition~\end{equation}ref{eq:static} is the original static formulation of the optimal transport problem with a quadratic cost. It is an infinite dimensional optimization problem if we search for $T$ directly. The non-symmetric nature of Monge's problem also generated difficulty because the map is unnecessarily bijective~\cite{Levy2017}.
In the 40's, Kantorovich relaxed the constraints and formulated the dual problem~\cite{kantorovich1960mathematical}. Instead of searching for a map $T$, the transference plan $\gamma$ is considered, which is also a measure supported by the product space $X\times Y$. The Kantorovich problem is the following:
\begin{equation}
\inf_{\gamma} \bigg\{ \int_{X \times Y} c(x,y) d\gamma\ |\ \gamma \geq 0\ \text{and}\ \gamma \in \Pi(\mu, \nu) \bigg\} ,
\end{equation}
where $\Pi (\mu, \nu) =\{ \gamma \in \mathcal{P}(X\times Y)\ |\ (P_X)\sharp \gamma = \mu, (P_Y)\sharp \gamma = \nu \}$. Here $(P_X)$ and $(P_Y)$ denote the two projections, and $(P_X)\sharp \gamma$ and $(P_Y)\sharp \gamma $ are two measures obtained by pushing forward $\gamma$ with these two projections.
Consider $\varphi \in L^1(\mu)$ and $\psi \in L^1(\nu)$, the Kantorovich dual problem is formulated as the following~\cite{Villani}:
\begin{equation}\label{eq:dual}
\sup_{\varphi, \psi} \left( \int_X \varphi\ d\mu + \int_Y \psi\ d\nu \right),
\end{equation}
subject to $\varphi(x) + \psi(y) \leq c(x,y)$, for any $(x,y) \in X \times Y$.
The dual formulation is a linear optimization problem which is solvable by linear programming~\cite{cuturi2015smoothed,oberman2015efficient, schmitzer2016sparse}. Kantorovich obtained the 1975 Nobel prize in economics for his contributions to resource allocation problems where he interpreted the dual problem as an economic equilibrium. Recently Cuturi introduced the entropy regularized optimal transport problem which enforces the desirable properties for optimal transference plan and convexifies the problem. There have been extremely efficient computational algorithms~\cite{cuturi2013sinkhorn} which allow various applications in image processing, neuroscience, machine learning, etc~\cite{benamou2015iterative, cuturi2014fast, gramfort2015fast, solomon2015convolutional}.
In the 90's, Benamou and Brenier derived an equivalent dynamic formulation~\cite{BenBre} which has been one of the main tools for numerical computation. The Benamou-Brenier formula identifies the squared quadratic Wasserstein metric between $\mu$ and $\nu$ by
\begin{equation}
W_2^2 (\mu, \nu) = \inf \int_0^1 \int |v(t,x)|^2 \rho(t,x) dx dt,
\end{equation}
where the infimum is taken among all the solutions of the continuity equation:
\begin{eqnarray}
\frac{\partial \rho}{\partial t} + \nabla(v\rho) &=& 0, \\
\text{subject to\quad }\rho(0,x) = f,\ \rho(1,x) &=& g,\nonumber
\end{eqnarray}
In fact the infimum is taken among all Borel fields $v(t,x)$ that transports $\mu$ to $\nu$ continuously in time, satisfying the zero flux condition on the boundary. Many fast solvers based on this dynamic formulation has been proposed in literature~\cite{benamou2015augmented, li2017parallel, papadakis2014optimal}. They are used particularly in image registration, warping, texture mixing, etc.
\subsubsection{The finite difference Monge-Amp\`ere\xspace solver}
As we have seen for the quadratic Wasserstein distance, the optimal map can be computed via the solution of a Monge-Amp\`ere\xspace partial differential equation~\cite{benamou2014numerical}. This approach has the advantage of drawing on the well-developed field of numerical partial differential equations (PDEs). We solve the Monge-Amp\`ere\xspace equation numerically for the viscosity solution using an almost-monotone finite difference method relying on the following reformulation of the Monge-Amp\`ere\xspace operator, which automatically enforces the convexity constraint~\cite{FroeseTransport}.
The scientific reason for using monotone type schemes follows from the following theorem by Barles and Souganidis~\cite{barles1991convergence}:
\begin{theorem}[Convergence of Approximation Schemes~\cite{barles1991convergence}]
Any consistent, stable, monotone approximation scheme to the solution of fully nonlinear second-order elliptic or parabolic PDE converges uniformly on compact subsets to the unique viscosity solution of the limiting equation, provided this equation satisfies a comparison principle.
\end{theorem}
The numerical scheme of~\cite{benamou2014numerical} uses the theory of~\cite{barles1991convergence} to construct a convergent discretization of the Monge-Amp\`ere\xspace equation~\end{equation}ref{eq:MAA} as stated in Theorem 7. A variational characterization of the determinant on the left hand side which also involves the negative part of the eigenvalues was proposed as the following equation:
\begin{multline}\label{eq:MA_convex}
{\det}(D^2u) = \\ \min\limits_{\{v_1,v_2\}\in V}\left\{\max\{u_{v_1,v_1},0\} \max\{u_{v_2,v_2},0\}+\min\{u_{v_1,v_1},0\} + \min\{u_{v_2,v_2},0\}\right\}
\end{multline}
where $V$ is the set of all orthonormal bases for $\mathbb{R}^2$.
Equation~\end{equation}ref{eq:MA_convex} can be discretized by computing the minimum over finitely many directions $\{\nu_1,\nu_2\}$, which may require the use of a wide stencil.
In the low-order version of the scheme, the minimum in~\end{equation}ref{eq:MA_convex} is approximated using only two possible values. The first uses directions aligning with the grid axes.
\begin{multline}\label{MA1}
MA_1[u] = \max\left\{\mathcal{D}_{x_1x_1}u,\delta\right\}\max\left\{\mathcal{D}_{x_2x_2}u,\delta\right\} \\+ \min\left\{\mathcal{D}_{x_1x_1}u,\delta\right\} + \min\left\{\mathcal{D}_{x_2x_2}u,\delta\right\} - f / g\left(\mathcal{D}_{x_1}u, \mathcal{D}_{x_2}u\right) - u_0.
\end{multline}
Here $dx$ is the resolution of the grid, $\delta>K\Delta x/2$ is a small parameter that bounds second derivatives away from zero, $u_0$ is the solution value at a fixed point in the domain, and $K$ is the Lipschitz constant in the $y$-variable of $f(x)/g(y)$.
For the second value, we rotate the axes to align with the corner points in the stencil, which leads to
\begin{multline}\label{MA2}
MA_2[u] = \max\left\{\mathcal{D}_{vv}u,\delta\right\}\max\left\{\mathcal{D}_{v^\perpv^\perp}u,\delta\right\} + \min\left\{\mathcal{D}_{vv}u,\delta\right\} + \min\left\{\mathcal{D}_{v^\perpv^\perp}u,\delta\right\}\\ - f / g\left(\frac{1}{\sqrt{2}}(\mathcal{D}_{v}u+\mathcal{D}_{v^\perp}u), \frac{1}{\sqrt{2}}(\mathcal{D}_{v}u-\mathcal{D}_{v^\perp}u)\right) - u_0.
\end{multline}
Then the monotone approximation of the Monge-Amp\`ere\xspace equation is
\begin{equation}\label{eq:MA_compact} M_M[u] \end{equation}uiv -\min\{MA_1[u],MA_2[u]\} = 0. \end{equation}
We also define a second-order approximation, obtained from a standard centred difference discretisation,
\begin{equation}\label{eq:MA_nonmon} M_N[u] \end{equation}uiv -\left((\mathcal{D}_{x_1x_1}u)(\mathcal{D}_{x_2x_2}u)-(\mathcal{D}_{x_1x_2}u^2)\right) + f/g\left(\mathcal{D}_{x_1}u,\mathcal{D}_{x_2}u\right) + u_0 = 0.\end{equation}
These are combined into an almost-monotone approximation of the form
\begin{equation}\label{eq:MA_filtered} M_F[u] \end{equation}uiv M_M[u] + \epsilon S\left(\frac{M_N[u]-M_M[u]}{\epsilon}\right) \end{equation}
where $\epsilon$ is a small parameter and the filter $S$ is given by
\begin{equation}\label{eq:filter}
S(x) = \begin{cases}
x & \abs{x} \leq 1 \\
0 & \abs{x} \ge 2\\
-x+ 2 & 1\le x \le 2 \\
-x-2 & -2\le x\le -1.
\end{cases}
\end{equation}
The Neumann boundary condition is implemented using standard one-sided differences. As described in~\cite{engquist2016optimal,FroeseTransport}, the (formal) Jacobian $\nabla M_F[u]$ of the scheme can be obtained exactly. It is known to be sparse and diagonally dominant.
\begin{theorem}[Convergence to Viscosity Solution~{\cite[Theorem 4.4]{FroeseTransport}}]\label{thm:MA_convergence}
Let the Monge-Amp\`ere\xspace equation \end{equation}ref{eq:MAA} have a unique viscosity solution and let $g>0$ be Lipschitz continuous on $\mathbb{R}^d$. Then the solutions of the scheme \end{equation}ref{eq:MA_filtered} converge to the viscosity solution of \end{equation}ref{eq:MAA} with a formal discretization error of $\mathcal{O}(Lh^2)$ where $L$ is the Lipschitz constant of $g$ and $h$ is the resolution of the grid.
\end{theorem}
Once the discrete solution $u_h$ is computed, the squared Wasserstein metric is approximated via
\begin{equation}\label{eq:WassDiscrete}
W_2^2(f,g) \approx \sum\limits_{j=1}^n (x_j-D_{x_j}u_h)^T\text{diag}(f)(x_j-D_{x_j}u_h) dt,
\end{equation}
where $n$ is the dimension of the data $f$ and $g$.
Then the gradient of the discrete squared Wasserstein metric can be expressed as
\begin{equation}
\begin{split}
\frac{\partial W_2^2(f,g) }{\partial f} = &\sum\limits_{j=1}^n \left[-2\nabla M_F^{-1}[u_f]^TD_{x_j}^T\text{diag}(f) \right](x_j - D_{x_j}u_f)dt \\ & + \sum\limits_{j=1}^n |x_j-D_{x_j}u_f|^2dt,
\end{split}
\end{equation}
This term is the discretized version of the Fr\'{e}chet derivative of the misfit function~\end{equation}ref{eqn:W22D} with respect to the synthetic data $f$, i.e., the adjoint source $\frac{\partial J}{\partial f}$ in the adjoint wave equation~\end{equation}ref{eq:FWI_adj}.
\section{Application of Optimal Transport to Seismic Inversion}
In this section, we first review the good properties of the $W_2$ norm for the application of full-waveform inversion. We will also explain some details of the implementations and show numerical results of using optimal transport based metrics as the misfit function in FWI.
\subsection{$W_2$ properties}
As we demonstrated in~\cite{engquist2016optimal}, the squared Wasserstein metric has several properties that make it attractive as a choice for misfit function. One highly desirable feature is its convexity with respect to several parameterizations that occur naturally in seismic waveform inversion~\cite{yang2017application}. For example, variations in the wave velocity lead to simulated $f$ that are derived from shifts,
\begin{equation}\label{eq:shift}
f(x;s) = g(x+s\eta), \quad \eta \in \mathbb{R}^n,
\end{equation}
or dilations,
\begin{equation}\label{eq:dilation}
f(x;A) = g(Ax), \quad A^T = A, \, A > 0,
\end{equation}
applied to the observation $g$.
Variations in the strength of a reflecting surface or the focusing of seismic waves can also lead to local rescalings of the form
\begin{equation}\label{eq:rescaleLocal}
f(x;\beta) = \begin{cases} \beta g(x), & x \in E\\ g(x), & x \in \mathbb{R}^n\backslash E.\end{cases}
\end{equation}
\begin{theorem}[Convexity of squared Wasserstein metric~{\cite{engquist2016optimal}}]\label{thm:convexity}
The squared Wasserstein metric $W_2^2(f(m),g)$ is convex with respect to the model parameters $m$ corresponding to a shift~$s$ in~\end{equation}ref{eq:shift}, the eigenvalues of a dilation matrix~$A$ in~\end{equation}ref{eq:dilation}, or the local rescaling parameter~$\beta$ in~\end{equation}ref{eq:rescaleLocal}.
\end{theorem}
Another important property of optimal transport is the insensitivity to noise. All seismic data contains either natural or experimental equipment noise. For example, the ocean waves lead to extremely low-frequency data in the marine acquisition. Wind and cable motions also generate random noise.
\begin{theorem}[Insensitivity to noise~{\cite{engquist2016optimal}}]\label{thm:noise}
Let $f_{ns}$ be $f$ with a piecewise constant additive noise of mean zero uniform distribution.
The squared Wasserstein metric $W_2^2(f,f_{ns})$ is of $\mathcal{O}(\frac{1}{N})$ where $N$ is the number of pieces of the additive noise in $f_{ns}$.
\end{theorem}
The $L^2$ norm is known to be sensitive to noise since the misfit between clean and noisy data is calculated as the sum of squared noise amplitude at each sampling point.
\subsection{Data normalization}
In optimal transport theory, there are two main requirements for signals $f$ and $g$: positivity and mass balance. Since these are not expected for seismic signals, some data pre-processing is needed before we can implement Wasserstein-based FWI.
In~\cite{EFWass,engquist2016optimal}, the signals were separated into positive and negative parts $f^+ = \max\{f,0\}$, $f^- = \max\{-f,0\}$ and scaled by the total mass $\langle f \rangle = \int_X f(x)\,dx$. Inversion was accomplished using the modified misfit function
\begin{equation} W_2^2\left(\frac{f^+}{\langle f^+ \rangle},\frac{g^+}{\langle g^+ \rangle} \right) + W_2^2\left(\frac{f^-}{\langle f^- \rangle}, \frac{g^-}{\langle g^- \rangle}\right). \end{equation}
While this approach preserves the desirable theoretical properties of convexity to shifts and noise insensitivity, it is not easy to combine with the adjoint-state method and more realistic examples. We require the scaling function to be differentiable so that it is easy to apply the chain rule when calculating the Fr\'{e}chet derivative for FWI backpropagation and also better suited for the Monge-Amp\`ere\xspace and the wave equation solvers.
There are other ways to rescale the datasets so that they become positive. For example, we can square the data as $\tilde{f} = f^2$ or extract the envelope of the data. These methods preserve the convexity concerning simple shifts, but we have lost the uniqueness: $f^2 = g^2$ does not imply $f=g$. As a result, more local minima are present since the fact that the misfit $J(f^2, g^2)$ is decreasing does not necessarily indicate that $f$ is approaching $g$, not to mention the non-unique issue of the inverse problem itself.
Typically, we first scale the data $f$ to be positive as $\tilde{f}$ and then normalize to ensure mass balance as $\tilde{f} / <\tilde{f}>$. We now introduce three normalization methods that are robust in realistic large-scale inversions: the linear scaling~\cite{yang2017application} (Figure ~\ref{fig:norm_LT})
\begin{equation}\label{eq:norm_LT}
\tilde{f} = f + c_1,\quad c_1 \geq \max\{-f,-g\},
\end{equation}
the exponential scaling~\cite{qiu2017full} (Figure ~\ref{fig:norm_exp})
\begin{equation}\label{eq:norm_exp}
\tilde{f} =\exp(c_2 f),\quad c_2>0,
\end{equation}
and the sign-sensitive scaling (Figure ~\ref{fig:norm_mix})
\begin{equation}\label{eq:norm_mix}
\tilde{f}=
\begin{cases}
f + \frac{1}{c_3},\quad & f\geq 0 \\
\frac{1}{c_3} \exp(c_3f),\quad & f < 0 \\
\end{cases}, \quad c_3 >0.
\end{equation}
\begin{figure}
\caption{(a)~The linear, (b)~the exponential and (c) the sign-sensitive scaling of a Ricker wavelet (Blue)}
\label{fig:norm_LT}
\label{fig:norm_exp}
\label{fig:norm_mix}
\label{fig:data_norm}
\end{figure}
If $c_2$ in ~\end{equation}ref{eq:norm_exp} and $c_3$ in ~\end{equation}ref{eq:norm_mix} are large enough, these two scaling methods keep the convexity of $W_2$ norm regarding simple shifts as shown in Figure~\ref{fig:2_ricker_W2}. From Taylor expansion, we can see that the scalings are very close to the linear scaling when $c_2$ is small. One has to be careful with the exponential scaling~\end{equation}ref{eq:norm_exp} since it can easily become extremely large, but the sign-sensitive scaling~\end{equation}ref{eq:norm_mix} will not.
\subsection{FWI with Kantorovich-Rubinstein norm}
When the cost function $c(x,y)$ is the $L^1$ norm $|x-y|$, i.e. $p=1$ in ~\end{equation}ref{eq:static} with $f\geq 0$, $g\geq 0$, and $\int f = \int g$, the corresponding alternative $W_1$ distance has the following equivalent dual formulation:
\begin{equation} \label{eq:W1}
W_1(f,g)
=\max_{\varphi \in \text{Lip}_1} \int_X \varphi(x)(f(x) - g(x))dx,
\end{equation}
where $\text{Lip}_1$ is the space of Lipschitz continuous functions with Lipschitz constant 1. However, seismic data $f$ and $g$ are oscillatory containing both positive and negative parts. If $\int f \neq \int g$, the value of ~\end{equation}ref{eq:W1} is always $+\infty$. Recently, \cite{W1_2D, W1_3D} introduced the following Kantorovich-Rubinstein (KR) norm in FWI which is a relaxation of the original $W_1$ distance by constraining the dual space:
\begin{equation} \label{eq: KR}
\text{KR}(f,g) = \max_{\varphi \in \text{BLip}_1} \int_X \varphi(x)(f(x) - g(x))dx
\end{equation}
Here $\text{BLip}_1$ is the space of bounded Lipschitz continuous functions with Lipschitz constant 1.
One advantage of using KR norm in FWI is that there is no need to normalize the data to be positive and mass balanced. However, KR norm has no direct connection with optimal transport once we no longer require $f$ and $g$ to be probability measures~\cite{vershik2013long}. When $f$ and $g$ are far apart which is very common when the initial velocity is rough, the maximum in ~\end{equation}ref{eq: KR} is achieved by ``moving'' $f^+$ to $f^-$ and $g^+$ to $g^-$. The notion of transport is void in this case and convexity is lost.
\subsection{Numerical results of global $W_2$}
In the next two subsections, we provide numerical results for two approaches to using $W_2$ with linear normalization~\end{equation}ref{eq:norm_LT}: trace-by-trace comparison and using the entire 2D datasets as objects. Here a trace is the time history measured at one receiver while the entire dataset consists of the time history of all the receivers. These are compared with results produced by using the standard least-squares norm $L^2$ to measure the misfit. More examples can be found in~\cite{yang2017application}.
\begin{figure}
\caption{(a)~True velocity and (b)~inital velocity for full Marmousi model}
\label{fig:marm2_true}
\label{fig:marm2_v0}
\label{fig:marm2_true,marm2_v0}
\end{figure}
\begin{figure}
\caption{Inversion results of (a)~$L^2$ and (b)~global $W_2$ for the scaled Marmousi model}
\label{fig:marm_L2}
\label{fig:marm_w2_2D}
\label{fig:marm_inv_scaled}
\end{figure}
First, we use a scaled Marmousi model to compare the inversion between global $W_2$ and the conventional $L^2$ misfit function. Figure~\ref{fig:marm2_true} is the P-wave velocity of the true Marmousi model, but in this experiment, we use a scaled model which is 1 km in depth and 3 km in width. The inversion starts from an initial model that is the true velocity smoothed by a Gaussian filter with a deviation of 40, which is highly smoothed and far from the true model (a scaled version of Figure~\ref{fig:marm2_v0}). We place 11 evenly spaced sources on top at 50 m depth and 307 receivers on top at the same depth with a 10 m fixed acquisition. The discretization of the forward wave equation is 10 m in the $x$ and $z$ directions and 10 ms in time. The source is a Ricker wavelet which is the second derivative of the Gaussian function with a peak frequency of 15 Hz, and a bandpass filter is applied to remove the frequency components from 0 to 2 Hz.
We use L-BFGS, a quasi-Newton method as the optimization algorithm ~\cite{liu1989limited}.
Inversions are terminated after 200 iterations. Figure~\ref{fig:marm_L2} shows the inversion result using the traditional $L^2$ least-squares method after 200 L-BFGS iterations. The inversion result of global $W_2$ (Figure~\ref{fig:marm_w2_2D}) avoids the problem of local minima suffered by the conventional $L^2$ metric, whose result demonstrates spurious high-frequency artifacts due to a point-by-point comparison of amplitude.
We solve the Monge-Amp\`ere\xspace equation numerically in each iteration of the inversion. The drawback to the PDE approach is that data must be sufficiently regular for solutions to be well-defined and for the numerical approximation to be accurate. To remain robust on realistic examples, we use filters that effectively smooth the seismic data, which can lead to a loss of high-frequency information. For illustration in this paper, we perform computations using a Monge-Amp\`ere\xspace solver for synthetic examples. Even in 2D, some limitations are apparent. This is expected to become even more of a problem in higher-dimensions and motivates our introduction of a trace-by-trace technique that relies on the exact 1D solution. The trace-by-trace technique is currently more promising for practical applications, as is evidenced in our computational examples in the next section.
\subsection{Numerical results of trace-by-trace $W_2$}
Recall that for the 1D trace-by-trace approach, the misfit function in FWI is
\begin{equation}
J_1(m) = \sum\limits_{r=1}^R W_2^2(f(\mathbf{x_r},t;m),g(\mathbf{x_r},t)),
\end{equation}
where $R$ is the total number of traces, $g$ is observed data, $f$ is simulated data, $\mathbf{x_r}$ are receiver locations, and $m$ is the model parameter. The adjoint source term for each single trace is
\begin{equation}~
\frac{\partial W^2_2(f,g)}{\partial f} = \left( \int_t^{T_0}\frac{-2(s-G^{-1} \circ F(s))}{g(G^{-1}\circ F(s))} f(s) ds+ |t-G^{-1}(F(t))|^2 \right)dt.
\end{equation}
The next experiment is to invert the full Marmousi model by conventional $L^2$ and trace-by-trace $W_2$ misfit. Figure~\ref{fig:marm2_true} is the P-wave velocity of the true Marmousi model, which is 3 km in depth and 9 km in width. The inversion starts from an initial model that is the true velocity smoothed by a Gaussian filter with a deviation of 40 (Figure~\ref{fig:marm2_v0}). The rest of the settings are the same as the previous section. Inversions are terminated after 300 L-BFGS iterations. Figure~\ref{fig:marm2_L2} shows the inversion result using the traditional $L^2$ least-squares method and figure~\ref{fig:marm2_w2_1D} shows the final result using trace-by-trace $W_2$ misfit function. Again, the result of $L^2$ metric has spurious high-frequency artifacts while $W_2$ correctly inverts most details in the true model. The convergence curves in Figure~\ref{fig:marm2_conv} show that $W_2$ reduces the relative misfit to 0.1 in 20 iterations while $L^2$ converges slowly to a local minimum.
\begin{figure}
\caption{Inversion results of (a)~$L^2$ and (b)~trace-by-trace $W_2$ for the true Marmousi model}
\label{fig:marm2_L2}
\label{fig:marm2_w2_1D}
\label{fig:marm2_inv}
\end{figure}
\begin{figure}
\caption{The convergence curves for trace-by-trace $W_2$ and $L^2$ based inversion of the full Marmousi model}
\label{fig:marm2_conv}
\end{figure}
\subsection{Insensitivity to noise}
One of the good properties of the quadratic Wasserstein metric\xspace is the insensitivity to noise~\cite{engquist2016optimal}. We repeat the previous experiment with a noisy reference by adding a uniform random iid noise to the data from the true velocity (Figure~\ref{fig:MARM2_noisy_trace}). The signal-to-noise ratio (SNR) is $-3.47$ dB. In optimal transport, the effect of noise is essentially negligible due to the strong cancellation between the nearby positive and negative noise.
All the settings remain the same as in the previous numerical experiment except the observed data. After 96 iterations, the optimization converges to a velocity presented in Figure~\ref{fig:MARM2_noisy_vel}. Although the result has lower resolution than Figure~\ref{fig:marm2_w2_1D}, it still recovers most features of Marmousi model correctly. Even when the noise is much larger than the signal, the quadratic Wasserstein metric\xspace still converges reasonably well.
\begin{figure}
\caption{(a)~Noisy and clean data and (b)~inversion result with the noisy data}
\label{fig:MARM2_noisy_trace}
\label{fig:MARM2_noisy_vel}
\label{fig:marm2_noisy}
\end{figure}
\end{document} |
\begin{document}
\title{Fragments of \sf{IOpen}}
\author{Konstantin Kovalyov}
\affil{
Phystech School of Applied Mathematics and Computer Science, Moscow Institute of Physics and Technology, Moscow, Russia
kovalev.ka@phystech.edu
}
\maketitle
\begin{abstract}
In this paper we consider some fragments of $\mathsf{IOpen}$ (Robinson arithmetic $\mathsf Q$ with induction for quantifier-free formulas) proposed by Harvey Friedman and answer some questions he asked about these theories. We prove that $\mathsf{I(lit)}$ is equivalent to $\mathsf{IOpen}$ and is not finitely axiomatizable over $\mathsf Q$, establish some inclusion relations between $\mathsf{I(=)}, \mathsf{I(\ne)}, \mathsf{I(\leqslant)}$ and $\mathsf{I} (\nleqslant)$. We also prove that the set of diophantine equations solvable in models of $\mathsf I (=)$ is (algorithmically) decidable.
\end{abstract}
\section{Introduction}
Recall that $\mathsf{IOpen}$ consists of Robinson arithmetic $\mathsf{Q}$ with the induction schema for all quantifier free formulas. We assume that $\leq$ is a symbol in the signature of $\mathsf{Q}$. In December 2021, Harvey Friedman posed some interesting questions about $\mathsf{IOpen}$ \cite{fom919}. To formulate Friedman's questions, let us introduce these fragments of $\mathsf{IOpen}$: $\mathsf{I(lit)}$ is $\mathsf Q$ with induction schema for all atomic formulas and negated atomic formulas, $\mathsf{I(=)}$ is $\mathsf Q$ with induction schema for all formulas of the form $t = s$, where $t$ and $s$ are arithmetic terms, in the similar way are defined theories $\mathsf{I(\ne)}, \mathsf{I(\leqslant)}, \mathsf{I(\nleqslant)}$.
His questions concern relations between the following fragments with weaker induction: $\mathsf{I(lit)}$, $\mathsf{I(=)}, \mathsf{I(\ne)}, \mathsf{I(\leqslant)}$ and $\mathsf{I(\nleqslant)}$.
Friedman stated the following questions:
\begin{enumerate}
\item Is $\mathsf{I(lit)}$ weaker then $\mathsf{IOpen}$?
\item What are relationships between $\mathsf{I(=)}, \mathsf{I(\ne)}, \mathsf{I(\leqslant)}, \mathsf{I(\nleqslant)}$?
\item Are there interesting theorems that are equivalent to $\mathsf{I(lit)}$ over $\mathsf{Q}$?
\end{enumerate}
It is also interesting to consider theories $\mathsf{I(=, \ne)}$ and $\mathsf{IOpen(=)}$ (induction for quantifier-free formulas, containing only atomic formulas of the form $s = t$) and ask a similar question about their equivalence.
In addition to these questions, we can also try to figure out decidability of set of Diophantine equations, that have a solution in theory $\mathsf{T}$, where $\mathsf{T}$ is one of our theories. Formally, this set is $D(\mathsf{T}) = \{(s, t) | \exists \mathcal M \vDash \mathsf{T} + \exists \vec{x} (s(\vec x) = t(\vec x))\}$. There are some results in this area:
\begin{itemize}
\item $D(\mathsf Q)$ is decidable (see \cite{jerabek2016})
\item Decidability of $D(\mathsf{IOpen})$ is not proved yet, there are partial results (see \cite{wilkie1978}, \cite{van_den_dries1980}, \cite{otero1990})
\end{itemize}
In Section 2 we prove that $\mathsf{IOpen} \equiv \mathsf{I(lit)}$ and $\mathsf{I(lit)}$ is not finitely axiomatizable, which answers questions 1 and 3 in the negative.
In Section 3 we establish the following facts about the relationships of our weak fragments:
\begin{itemize}
\item $\mathsf I(=) \nvdash \mathsf I(\ne), \mathsf I (\leqslant), \mathsf I(\nleqslant)$,
\item $\mathsf I(\ne) \nvdash \mathsf I(\leqslant), \mathsf I(\nleqslant)$,
\item $\mathsf I(\leqslant) \nvdash \mathsf I(=), \mathsf I(\ne), \mathsf I(\nleqslant)$,
\item $\mathsf I(\ne) + \forall x \forall y (x + y = y + x) \vdash \mathsf I(=)$.
\end{itemize}
We show that $D(\mathsf{I(=)})$ is decidable and that $\mathsf{I(=)}$ proves $Th_=(\mathbb N)$ (all true identities in $\mathbb N$), but $\mathsf Q + Th_=(\mathbb N) \nvdash \mathsf{I(=)}$.
In Section 4 we state the problems remaining open.
\section{Preliminaries}
\begin{definition} [Robinson arithmetic]
\textit{Robinson arithmetic} $\mathsf{Q}$ consists of the following axioms in the arithmetical language $\mathcal{L}_{ar} = (0, S, +, \cdot, \leqslant)$:
\begin{enumerate}
\item[(Q1)] $Sx \ne 0$
\item[(Q2)] $Sx = Sy \rightarrow x = y$
\item[(Q3)] $x \ne 0 \rightarrow \exists y (x = Sy)$
\item[(Q4)] $x + 0 = x$
\item[(Q5)] $x + Sy = S(x + y)$
\item[(Q6)] $x \cdot 0 = 0$
\item[(Q7)] $x \cdot Sy = x \cdot y + x$
\item[(Q8)] $x \leqslant y \leftrightarrow \exists r (r + x = y)$
\end{enumerate}
\end{definition}
\begin{definition}
$\mathsf{IOpen}$ consists of $\mathsf Q$ and the induction schema for all quantifier free formulas in the language $\mathcal L_{ar}$, $\mathsf{I(lit)}$ consists of $\mathsf Q$ and induction schema for all literals in the language $\mathcal L_{ar}$ (i.e. atomic formulas and their negations). In the similar way we can define $\mathsf{I(=)}$, $\mathsf{I(\ne)}, \mathsf{I(\leqslant)}$ and $\mathsf{I(\nleqslant)}$.
\end{definition}
\begin{proposition}[\cite{hajek_pudlak_2017}, Theorem 1.10]
The following formulas are provable in $\mathsf{IOpen}$:
\begin{enumerate}
\item[(1)] $x + y = y + x$,
\item[(2)] $x + (y + z) = (x + y) + z$,
\item[(3)] $x \cdot y = y \cdot x$,
\item[(4)] $x(y + z) = x y + x z$,
\item[(5)] $x(y z) = (x y) z$,
\item[(6)] $x + y = x + z \rightarrow y = z$,
\item[(7)] $x \leqslant y \vee y \leqslant x$,
\item[(8)] $x \leqslant y \wedge y \leqslant x \rightarrow x = y$,
\item[(9)] $(x \leqslant y \wedge y \leqslant z) \rightarrow x \leqslant z$,
\item[(10)] $x \leqslant y \leftrightarrow x + z \leqslant y + z$,
\item[(11)] $(z \ne 0 \wedge x z = y z) \rightarrow x = y$,
\item[(12)] $z \ne 0 \rightarrow (x \leqslant y \leftrightarrow x z \leqslant y z)$.
\end{enumerate}
\end{proposition}
\begin{remark}
(1)-(5) can be proven in $\mathsf{I(=)}$.
\end{remark}
All rings and semirings in this paper will be commutative, associative with identity. Usually, structures will be denoted by calligraphic letters (such as $\mathcal{M, F, R}, \dots$), and their domains will be denoted by $M, F, R, \dots$.
\begin{definition}
Let $\mathcal M$ be a ring (semiring), $\leqslant$ be a linear order on $\mathcal M$. Then $(\mathcal M, \leqslant)$ is called \textit{an ordered ring} if $\forall x, y, z \in M (x \leqslant y \leftrightarrow x + z \leqslant y + z)$ and $\forall x, y, z \in M, z > 0 (x \leqslant y \leftrightarrow x \cdot z \leqslant y \cdot z)$. An ordered ring (semiring) is called \textit{discretely ordered} if $1$ is the least positive element (or, equivalently, there is no elements between $0$ and $1$).
\end{definition}
\begin{definition}
Let $\mathcal{M} \subseteq \mathcal{R}$ be two ordered rings (with the same orderings) and $\mathcal M$ be discretely ordered. Then $\mathcal M$ is \textit{an integer part} of $\mathcal R$ if $\forall r \in R \exists m \in M (m \leqslant r < m + 1)$. Such an $m$ is called the integer part of $r$. Notation: $\mathcal M \subseteq^{IP} \mathcal R$.
\end{definition}
\begin{remark}
Since $\mathcal M$ is discretely ordered, for every $r \in R$ its integer part is uniquely defined.
\end{remark}
\begin{theorem}[\cite{shepherdson1964}]
Let $\mathcal{M}$ be a discretely ordered ring, $\mathcal M^+$ be the non-negative part of $\mathcal M$. Then, $\mathcal{M}^+ \vDash \mathsf{IOpen}$ iff $\mathcal M \subseteq^{IP} R(\mathcal M)$, where $R(\mathcal M)$ is the real closure of the ordered fraction field of $\mathcal M$.
\end{theorem}
\section{$\mathsf{IOpen} \equiv \mathsf{I(lit)}$ and $\mathsf{I(lit)}$ is not finitely axiomatizable}
\begin{proposition}
Statements (1)-(12) from Proposition 1.1 are provable in $\mathsf{I(lit)}$.
\end{proposition}
\begin{proof}
Recall these formulas:
\begin{enumerate}
\item[(1)] $x + y = y + x$,
\item[(2)] $x + (y + z) = (x + y) + z$,
\item[(3)] $x \cdot y = y \cdot x$,
\item[(4)] $x(y + z) = x y + x z$,
\item[(5)] $x(y z) = (x y) z$,
\item[(6)] $x + y = x + z \rightarrow y = z$,
\item[(7)] $x \leqslant y \vee y \leqslant x$,
\item[(8)] $x \leqslant y \wedge y \leqslant x \rightarrow x = y$,
\item[(9)] $(x \leqslant y \wedge y \leqslant z) \rightarrow x \leqslant z$,
\item[(10)] $x \leqslant y \leftrightarrow x + y \leqslant x + z$,
\item[(11)] $(z \ne 0 \wedge x z = y z) \rightarrow x = y$,
\item[(12)] $z \ne 0 \rightarrow (x \leqslant y \leftrightarrow x z \leqslant y z)$.
\end{enumerate}
As noted in Remark after Proposition 1.1, (1)-(5) are provable in $\mathsf{I(=)}$. We outline the proofs of (6)-(12).
\begin{enumerate}
\item[(6)] $x + y = x + z \rightarrow y = z$.
Suppose $y \neq z$. We prove by induction on $x$ the statement $x + y \neq x + z$.
If $x = 0$, $0 + y = y \neq z = 0 + z$ (here we used commutativity of addition and Q4).
Let $x + y \neq x + z$. Then, $S x + y = S(x + y) \neq S(x + z) = S x + z$ (here we used commutativity, Q2 and Q5).
\item[(7)] $x \leqslant y \vee y \leqslant x$.
Suppose there exist $x, y$ such that $x \nleqslant y$ and $y \nleqslant x$. We prove $x \nleqslant y + z$ by induction on $z$.
If $z = 0$, then $x \nleqslant y = y + 0$.
Let $x \nleqslant y + z$. Suppose, $x \leqslant y + S z$. Then, there exists an $r$ such that $r + x = y + S z$. If $r = 0$, $x = y + S z$, then, $y \leqslant x$, and we have a contradiction. Let $r = S r'$. $S(r' + x) = S r' + x = y + Sz = S(y + z) \Rightarrow r' + x = y + z$. So, $x \leqslant y + z$, a contradiction.
Now, let $z$ be $x$. Then $x \nleqslant y + x$, a contradiction.
\item[(8)-(9)] Could be easily done, using commutativity and associativity of addition and axioms of $\mathsf Q$.
\item[(10)] $x \leqslant y \leftrightarrow x + z \leqslant y + z$
If $x \leqslant y$, then $r + x = y$ for some $r$, so $r + (x + z) = y + z$ and $x + z \leqslant y + z$.
Suppose, $x + z \leqslant y + z$, but $x \nleqslant y$. By (7), $y \leqslant x$. Since we've already proved the opposite implication, $y + z \leqslant x + z$. Then, by (8), $x + z = y + z$. Using (6), we obtain that $x = y$, so $x \leqslant y$.
\item[(11)] $(z \ne 0 \wedge x z = y z) \rightarrow x = y$
Suppose, $x \ne y$. By (7) we can assume, that, for example, $x \leqslant y$. Then, there is $r \ne 0$ such that $r + x = y$. Suppose, $x z = y z$, where $z \ne 0$. Then, $x z = (r + x) z$, by (6) and distributivity, $r z = 0$, which is impossible, since $z \ne 0$ and $r \ne 0$.
\item[(12)] $z \ne 0 \rightarrow (x \leqslant y \leftrightarrow x z \leqslant y z)$
Suppose, $x \leqslant y$, then $r + x = y$ for some $r$. Then $y z = r z + x z$, so $x z \leqslant y z$.
Using (7), we can prove the opposite implication.
\end{enumerate}
\end{proof}
So, every model of $\mathsf{I(lit)}$ is a discretely ordered semiring. Let $\mathcal M = (M, +, \cdot, \leqslant, 0, 1)$ be a model of $\mathsf{I(lit)}$. We can extend this semiring to a ring in the following way. Consider pairs $(m, n)$ of elements of our semiring and define the equivalence relation on them: $(m, n) \sim (m', n') \leftrightharpoons m + n' = m' + n$ ($(m, n)$ can be understood as $m - n$). It easy to see that it is an equivalence relation. So, let $\widetilde{M} = M^2 / \sim$ and $\widetilde {\mathcal M} = (\widetilde{M}, ...)$ with the operations defined in an obvious way. It will be a discretely ordered ring and hence an integral domain. Denote by $F(\mathcal M)$ the (ordered) quotient field of $\widetilde{\mathcal M}$, by $R(\mathcal M)$ -- the real closure of $F(\mathcal M)$.
\begin{lemma}
Let $f \in \widetilde{\mathcal M}[X]$, $f(\frac{a}{q}) \leqslant 0$, $f(\frac{b}{q}) > 0$, $a, b, q \in M$, $a < b$. Then $\exists c \in M: f(\frac{c}{q}) \leqslant 0 \wedge f(\frac{c + 1}{q}) > 0$.
\end{lemma}
\begin{proof}
Define $g \in \widetilde{\mathcal M}[X]$ in the following way: $g(X) := q^n f(\frac{X + a}{q})$, where $n = \deg f$. Then $g(0) \leqslant 0$, $g(b - a) > 0$ and $\mathcal M \vDash g(0) \leqslant 0 \wedge \exists c (g(c) > 0)$. Since $\mathcal M \vDash \mathsf{I(lit)}$, $\mathcal M \vDash \exists c (g(c) \leqslant 0 \wedge g(c + 1) > 0)$. Then $f(\frac{c + a}{q}) \leqslant 0$ and $f(\frac{c + a + 1}{q}) > 0$.
\end{proof}
\begin{lemma}
Let $f \in \widetilde{\mathcal M}[X]$, $f(\frac{a}{q}) < 0$, $f(\frac{b}{q}) > 0$, $a, b, q \in M$, $a < b$ and $f$ has no roots in $F(\mathcal M)$. Then $\exists c \in M: a \leqslant c < b \wedge f(\frac{c}{q}) < 0 \wedge f(\frac{c + 1}{q}) > 0$.
\end{lemma}
\begin{proof}
Fix $a, b, q \in M$. Let $N(f) = \{m \in M|f(\frac{m}{q}) f(\frac{m + 1}{q}) < 0\}$. Note that $|N(f)|$ is finite, since for every $m \in N(f)$ there exists a root of $f$ in $R(\mathcal M)$ between $\frac{m}{q}$ and $\frac{m + 1}{q}$.
Suppose, there is $f \in \widetilde{\mathcal M}[X]$ such that $f(\frac{a}{q}) < 0$, $f(\frac{b}{q}) > 0$, but there is no $c$ between $a$ and $b$ such that $f(\frac{c}{q}) < 0$ and $f(\frac{c + 1}{q}) > 0$. Choose such an $f$ with the minimal $|N(f)|$. By Lemma 2.1 there is a $c \in M$ such that $f(\frac{c}{q}) < 0$ and $f(\frac{c + 1}{q}) > 0$. By the choice of $f$, $c < a$ or $c > b$.
If $c > b$, consider $\tilde f(X) := f(X)((2 c + 1) - 2 q X)$. Then $\tilde f(\frac{a}{q}) = f(\frac{a}{q})(\frac{2 c - 2 a + 1}{q}) < 0$, $\tilde f(\frac{b}{q}) = f(\frac{b}{q})(\frac{2 c - 2 b + 1}{q}) > 0$, there is no such $\tilde c$ between $a$ and $b$ such that $\tilde f(\frac{\tilde c}{q}) \tilde f(\frac{\tilde c + 1}{q}) < 0$ (on $[a, b]$ $\tilde f$ has the same sign as $f$) and $N(f) = N(\tilde f) \setminus \{c\}$. Hence, we have a contradiction with the choice of $f$. If $c < a$, we can consider in a similar way $\tilde f(X) := f(X)(2 q X - (2 c + 1))$.
\end{proof}
\begin{theorem}
Let $\mathcal M \vDash \mathsf{I(lit)}$. Then $\widetilde{\mathcal M} \subseteq^{IP} R(\mathcal M)$.
\end{theorem}
\begin{proof}
First we prove that $\widetilde{\mathcal M} \subseteq^{IP} F(\mathcal M)$. Consider $\frac{p}{q} \in F(\mathcal M), p > 0, q > 0$ (it is sufficient to prove the existence of the integer parts only for positive elements of $F(\mathcal M)$). $\mathcal M \vDash 0\cdot q \leqslant p \wedge (p + 1)q > p$. Then, by induction, we obtain $\mathcal M \vDash \exists m (m q \leqslant p \wedge (m + 1)q > p)$.
Consider a positive $r \in R(\mathcal M) \setminus F(\mathcal M)$. Let $f \in \widetilde{\mathcal{M}}[X]$ be the minimal polynomial of $r$. Let introduce the following equivalence relation $\sim$ on $R(\mathcal M)$: $x \sim y \leftrightharpoons \nexists z \in F(\mathcal M): (x < z < y \vee y < z < x)$. Note that if $x \sim y$ and $q_1 < x < q_2$ for some $q_1, q_2 \in F(\mathcal M)$, then $q_1 < y < q_2$. It is not very hard to prove that elements of $F(\mathcal M)$ can be equivalent only to themselves. If $f$ has some root $r' \sim r, r' \ne r$, then $f'$ has a root $r''$ between $r'$ and $r$ by Rolle's theorem (and $r'' \sim r$). If $r$ is a multiple root of $f$, then $f'(r) = 0$. So, we can take a derivative of $f$ until $f^{(k)}$ has only one simple root $\tilde r \sim r$. Then we can find $q_1, q_2 \in F(\mathcal M)$, $q_1, q_2 > 0$ such that the only root of $f^{(k)}$ between $q_1$ and $q_2$ is $\tilde r$ (since $r$ is positive, so is $\tilde r$, hence $q_1, q_2$ can be chosen positive). Let $q_i = \frac{a_i}{q}$, $q, a_i \in M$. Since $\tilde r$ is simple, $f^{(k)}(q_1) f^{(k)}(q_2) < 0$. Also we can suppose that $f^{(k)}$ has no roots in $F(\mathcal M)$ (if not, we can divide $f^{(k)}$ by $(X - q)$ for the suitable $q \in F(\mathcal M)$ and then multiply by the suitable $m \in M$). So, we can apply Lemma 2.2 and obtain that there exists $b \in M$ such that $f^{(k)}(\frac{b}{q}) f^{(k)}(\frac{b + 1}{q}) \leqslant 0$, $a_1 \leqslant b < a_2$. This implies that there is a root between $\frac{b}{q}$ and $\frac{b + 1}{q}$. Since there is only one root $\tilde r$ on the segment $[q_1, q_2]$, $\frac{b}{q} \leqslant \tilde r \leqslant \frac{b + 1}{q}$. Given that $\widetilde{\mathcal M} \subseteq^{IP} F(\mathcal M)$, we obtain that $\tilde r$ (and hence $r$) has an integer part in $\mathcal M$.
\end{proof}
\begin{corollary}
$\mathsf{I(lit)} \vdash \mathsf{IOpen}$.
\end{corollary}
\begin{proof}
Apply Theorem 1.1 to Theorem 2.1.
\end{proof}
\begin{theorem}
$\mathsf{I(lit)}$ is not finitely axiomatizable.
\end{theorem}
\begin{proof}
Suppose that $\mathsf{I(lit)}$ is finitely axiomatizable, then $\mathsf{I(lit)} \equiv \mathsf Q + \Gamma$, where $\Gamma$ is finite set of instances of induction axiom schema for literals. Denote by $N$ the largest degree of polynomials from $\Gamma$ (all terms in $\mathsf{I(lit)}$ are equal to polynomials) and denote by $p_1, \dots, p_n$ all the prime numbers $\leqslant N$.
Consider the following structure $\mathcal{M}$: $M = \{a_m X^{\frac{m}{q}} + a_{m - 1} X^{\frac{m - 1}{q}} + \dots + a_1 X^{\frac{1}{q}} + a_0| m, q \in \mathbb N, q = p_1^{\alpha_1}\dots p_n^{\alpha_n}$ for some $\alpha_1, \dots, \alpha_n \in \mathbb N, a_m, \dots, a_1 \in \mathbb R_{alg}, a_0 \in \mathbb Z, a_m \geqslant 0\}$ with the operations defined in the usual way. Note that the corresponding ring $\widetilde{\mathcal M}$ is not contained as an integer part of the real closure of the fraction field of this ring. We denote this real closure by $\mathcal R$ (in our case $\mathcal R = \{a_m X^{\frac{m}{q}} + a_{m - 1} X^{\frac{m - 1}{q}} + \dots + a_1 X^{\frac{1}{q}} + a_0 + a_{-1}X^{-\frac{1}{q}} + \dots | a_i \in \mathbb R_{alg}\}$, because of well known fact that the real closure of $\mathbb Z[X]$ is $\{a_m X^{\frac{m}{q}} + a_{m - 1} X^{\frac{m - 1}{q}} + \dots + a_1 X^{\frac{1}{q}} + a_0 + a_{-1}X^{-\frac{1}{q}} + \dots | a_i \in \mathbb R_{alg}\}$ and the latter contains $\mathcal M$). So, it is sufficient to prove $\mathcal M \vDash \mathsf Q + \Gamma$ and to apply Theorem 1.1.
\begin{lemma}
Let $f \in \widetilde{\mathcal M}[t]\setminus\{0\}$, $\deg f \leqslant N$ and $r = a_m X^{\frac{m}{q}} + a_{m - 1} X^{\frac{m - 1}{q}} + \dots + a_1 X^{\frac{1}{q}} + a_0 + a_{-1} X^{-\frac{1}{q}} + \dots \in R$ be a root of $f$, $m > 0$. Then $\frac{m}{q} = \frac{m'}{q'}$, where $q' = p_1^{\alpha_1} \dots p_n^{\alpha_n}$ (i.e. $a_m X^{\frac{m}{q}} \in \widetilde{M}$).
\end{lemma}
\begin{proof}[Proof of Lemma 2.3.]
Let $f(t) = P_k t^k + \dots + P_0$, where $P_i \in \widetilde{M}$, so $P_k r^k + \dots + P_0 = 0$. All nonzero $P_i r^i$ are of the form
$$b_i X^{\frac{i\cdot m}{q} + \frac{k_i}{C}} + \dots,$$
where $C = p_1^{\beta_1}\dots p_n^{\beta_n}$ is a common denominator of degrees in all $P_i$ (i.e. $f \in \mathbb R_{alg} [X^{\frac{1}{C}}][t]$), $b_i \in \mathbb R_{alg}\setminus\{0\}$.
Consider the largest $\frac{i\cdot m}{q} + \frac{k_i}{C}$. Since $f(r) = 0$, there is $j \neq i$ such that $$\frac{j\cdot m}{q} + \frac{k_j}{C} = \frac{i\cdot m}{q} + \frac{k_i}{C}.$$
So, $\frac{m}{q} = \frac{k_i - k_j}{C(j - i)}$. Let assume $j > i$, then, $m' := k_i - k_j$ and $q' := C(j - i)$. Since $j - i \leqslant N$, $q'$ is of the required form.
\end{proof}
\begin{lemma}
Let $f \in \widetilde{\mathcal M}[t]$, $\deg f \leqslant N$ and $r = \sum\limits_{k = m}^{-\infty}a_k X^{\frac{k}{q}}\in R$, $f(r) = 0$. Then $r$ has an integer part in $\widetilde{\mathcal M}$.
\end{lemma}
\begin{proof}[Proof of Lemma 2.4.]
Induction by $\max(m, 0)$. If $m \leqslant 0$, then $r \in (a_0 + 1, a_0 - 1)$ and $r$ has an integer part. If $m > 0$, $a_m X^{\frac{m}{q}} \in \widetilde{M}$ by Lemma 2.3. So, we can apply induction hypothesis to $f(t + a_m X^{\frac{m}{q}})$ and $r - a_m X^{\frac{m}{q}} = \sum\limits_{k = m - 1}^{-\infty}a_k X^{\frac{k}{q}}$. Denote by $s$ the integer part of $r - a_m X^{\frac{m}{q}}$, then $a_m X^{\frac{k}{m}} + s$ will be the integer part of $r$.
\end{proof}
\textit{Proof of Theorem 2.2.}
Let $\varphi(x, \vec{y})$ be an atomic formula or the negation thereof such that $Ind_{\varphi} \in \Gamma$. Then, $\varphi$ is equivalent to one of the following: $f(x) = 0$, $f(x) \neq 0$, $f(x) \leqslant 0$, $f(x) < 0$, where $f \in \widetilde{\mathcal M}[t]$ (with the coefficients dependent on $\vec{y}$) and $\deg f \leqslant N$. Cases $f(x) = 0$ and $f(x) \neq 0$ are trivial (since if polynomial has an infinite number of roots, then it is a zero polynomial). Consider the case $f(x) \leqslant 0$, the case $f(x) < 0$ is very similar. Suppose, $\mathcal M \vDash (f(0) \leqslant 0) \wedge \exists c (f(c) > 0)$. Let $A = \{r \in R | f(r) > 0 \wedge r > 0\}$. Since $\mathcal R$ is real closed, $A$ is a finite union of disjoint intervals. Since $\mathcal M \vDash \exists c (f(c) > 0)$, $M \cap A \ne \varnothing$. Consider the leftmost interval $(a, b)$ of $A$ containing some element $c$ of $M$. Since $f(a) = 0$, we have $[a] \in M$, where $[a]$ is the integer part of $a$ (by Lemma 2.4). Since $[a] \leqslant a < [a] + 1$ and $\mathcal M$ is discretely ordered, $[a] + 1 \leqslant c$ and $[a] + 1 \in (a, b)$. So, $\mathcal M \vDash f([a]) \leqslant 0 \wedge f([a] + 1) > 0$.
\end{proof}
\section{Relations between $\mathsf{I(=)}$, $\mathsf{I(\ne)}$ and $\mathsf{I(\leqslant)}$}
Our aim in this section is to prove the following theorems:
\begin{theorem}
There are the following relations between considered fragments:
\begin{itemize}
\item $\mathsf I(=) \nvdash \mathsf I(\ne), I\mathsf (\leqslant), \mathsf I(\nleqslant)$,
\item $\mathsf I(\ne) \nvdash \mathsf I(\leqslant), \mathsf I(\nleqslant)$,
\item $\mathsf I(\leqslant) \nvdash \mathsf I(=), \mathsf I(\ne), \mathsf I(\nleqslant)$,
\end{itemize}
\end{theorem}
\begin{theorem}
\begin{enumerate}
\item[(i)] $D(\mathsf{I}(=))$ is decidable;
\item[(ii)] $\mathsf I(=) \vdash Th_=(\mathbb N)$;
\item[(iii)] $\mathsf Q + Th_=(\mathbb N) \nvdash \mathsf I(=)$.
\end{enumerate}
\end{theorem}
\begin{theorem}
$\mathsf{I}(\ne) + \forall x \forall y (x + y = y + x) \vdash \mathsf{I}(=)$.
\end{theorem}
\begin{proposition}
(i) $\mathsf{I(=)} \nvdash Sx \ne x$ and $\mathsf{I(=)} \nvdash x + z = x + y \rightarrow z = y$;
(ii) $\mathsf{I(=)} \nvdash \mathsf{I(\ne)}$.
\end{proposition}
\begin{proof}
(i) Consider the $\mathcal L_{ar}$-structure $\mathcal M$ with the universe $M = \mathbb N \cup \{\omega\}$ and the operations defined in the following way: on natural numbers operations are defined in the standard way, $S\omega = \omega$, $x + \omega = \omega + x = \omega$, $0 \cdot \omega = \omega \cdot 0 = 0$, $x \ne 0 \rightarrow x \cdot \omega = \omega \cdot x = \omega$.
It is easy to see that $\mathcal M \vDash \mathsf Q$. It remains to show that $\mathcal M$ satisfies the induction scheme for formulas of the form $t = s$.
\begin{lemma}
Let $t(x, y_1, \dots, y_n)$ be a $\mathcal L_{ar}$-term and $y_1, \dots, y_n \in M$ are fixed. We say that term $t(x, \vec y)$ is constant in $x$ if $\exists z \in M \forall x \in M (t(x, \vec y) = z)$. Then $t(x, \vec y)$ is constant in $x$ or $t(\omega, \vec y) = \omega$. In the latter case, $t(x, \vec y) \geqslant x$ for all $x \in M$.
\end{lemma}
\begin{proof}
Trivial induction on terms from variables $x, y_1, \dots, y_n$.
\end{proof}
Using this lemma, one can easily prove the claim. Suppose, $\mathcal M \vDash t(0, \vec y) = s(0, \vec y)$ and $\mathcal M \vDash \forall x (t(x, \vec y) = s(x, \vec y) \rightarrow t(Sx, \vec y) = s(Sx, \vec y))$. Since $S\omega = \omega$, the latter means $\forall n \in \mathbb N \Big(\mathcal M \vDash t(n, \vec y) = s(n, \vec y) \rightarrow t(Sn, \vec y) = s(Sn, \vec y) \Big)$. By the usual induction we obtain $\forall n \in \mathbb N \Big(\mathcal M \vDash t(n, \vec y) = s(n, \vec y)\Big)$. If $t(x, \vec y)$ and $s(x, \vec y)$ are constant in $x$, then $\mathcal M \vDash \forall x (t(x, \vec y) = s(x, \vec y))$ and the induction axiom holds. If both $t$ and $s$ are not constant in $x$, then $t(\omega, \vec y) = \omega = s(\omega, \vec y)$, so, $\mathcal M \vDash \forall x (t(x, \vec y) = s(x, \vec y))$. Assume that $t$ is constant in $x$, $s$ is not constant in $x$. If $t(x, \vec y) = n \in \mathbb N$, then $t(n + 1, \vec y) = n \ne s(n + 1, \vec y) \geqslant n + 1$. So, $t(\omega, \vec y) = \omega = s(\omega, \vec y)$.
Finally, note that the constructed model falsifies $Sx \ne x$ and $x + z = z + y \rightarrow z = y$ (since $S\omega = \omega$ and $\omega + 0 = \omega + 1$).
(ii) Note that $\mathsf I(\ne) \vdash Sx \ne x$ ($S0 \ne 0$ and $Sx \ne x \rightarrow SSx \ne Sx$ are consequences of $\mathsf{Q}$, then apply the induction for the formula $Sx \ne x$).
\end{proof}
\begin{proposition}
\begin{enumerate}
\item[(i)] $\mathsf I(=) \nvdash \forall x \exists y (y^r \leqslant x \wedge \neg (Sy)^r \leqslant x)$ for all $r \geqslant 2$ (i.e. the existence of integer part of r-th roots is unprovable);
\item[(ii)] $\mathsf I(=) \nvdash \mathsf I(\leqslant), I(\nleqslant)$.
\end{enumerate}
\end{proposition}
\begin{proof}
(i) Consider the structure $\mathbb Z[X]^+ = \{a_n X^n + \dots + a_0 \in \mathbb Z[X]| a_n > 0 \vee a_n X^n + \dots + a_0 = 0\}$ with $S$, $+$ and $\cdot$ defined in the usual way and $f \leqslant g \leftrightharpoons f(x) \leqslant g(x)$ for all sufficiently large $x$. It is obvious that $\mathbb Z[X]^+ \vDash \mathsf Q$.
Let $t(x, \vec y), s(x, \vec y)$ be $\mathcal L_{ar}$-terms, $y_1, \dots, y_m \in \mathbb Z[X]^+$ are fixed. Suppose $t(0, \vec y) = s(0, \vec y)$ and $\forall x \big(t(x, \vec y) = s(x, \vec y) \rightarrow t(Sx, \vec y) = s(Sx, \vec y)\big)$. Then for all $k \in \mathbb N$ $t(k, \vec y) = s(k, \vec y)$. We can represent $t(x, \vec y) - s(x, \vec y)$ as $x^n P_n(\vec y) + \dots + P_0(\vec y)$, where $P_i(\vec y) \in \mathbb Z[\vec y]$. Considering $k = 0, 1, \dots, n$ we obtain
$$
\begin{pmatrix}
1 & 0 & \cdots & 0 \\
1 & 1 & \cdots & 1 \\
\vdots & & \ddots & \vdots \\
1 & n & \cdots & n^n
\end{pmatrix}
\begin{pmatrix}
P_0(\vec y) \\
\vdots \\
P_n(\vec y)
\end{pmatrix}
=
\begin{pmatrix}
0 \\
\vdots \\
0
\end{pmatrix}.
$$
Since the left matrix is invertible (it is a Vandermonde matrix),
$\begin{pmatrix}
P_0(\vec y) \\
\vdots \\
P_n(\vec y)
\end{pmatrix}
=
\begin{pmatrix}
0 \\
\vdots \\
0
\end{pmatrix}$. So, $\forall x \big(t(x, \vec y) = s(x, \vec y)\big)$ and $\mathbb Z[X]^+ \vDash \mathsf I (=)$.
Let us now prove that $\mathbb Z[X]^+ \nvDash \exists y (y^r \leqslant X \wedge \neg (y + 1)^r \leqslant X)$ for $r \geqslant 2$. Consider any $y \in \mathbb Z[X]^+$. If $\deg y = 0$, then $(y + 1)^r \in \mathbb N$, so $(y + 1)^r < X$. If $\deg y \geqslant 1$, then $\deg y^r > 1$, so $y^r > X$.
(ii) It is easy to see that $\mathbb Z[X]^+ \nvDash \mathsf I(\leqslant)$. Consider the induction axiom for the formula $x^r \leqslant y$. Suppose it holds in $\mathbb Z[X]^+$. Since $\mathbb Z[X]^+ \vDash 0^r \leqslant y, \neg \forall x (x^r \leqslant y)$, $\mathbb Z[X]^+ \vDash \exists x (x^r \leqslant y \wedge \neg (Sx)^r \leqslant y)$. So we obtain a contradiction. In the similar way we can prove $\mathbb Z[X]^+ \nvDash \mathsf I(\nleqslant)$.
\end{proof}
\begin{proof}[Proof of theorem 3.2.]
(i) We claim that if some equation $s = t$ has a solution in a model of $\mathsf I(=)$, then it has a solution in the model $\mathcal M$ from Proposition 3.1.
Since in $\mathsf I(=)$ one can prove the commutativity, associativity and distributivity of addition and multiplication, all terms can be represented as
$$s(\vec x) = \sum\limits_{(i_1, \dots, i_n): i_1 + \dots + i_n \leqslant k} a_{i_1, \dots, i_n} x_1^{i_1} \dots x_n^{i_n},$$
where $k$ is a natural number and all $a_{i_1, \dots, i_n}$ are numerals. It is clear that such a form can be found effectively. Let $\deg s := \max\{i_1 + \dots + i_n | a_{i_1, \dots, i_n} \ne 0\}$.
Let us fix two terms $s(\vec x)$ and $t(\vec x)$. Consider three cases: 1) $\deg s = \deg t = 0$, 2) $\deg s > 0$, $\deg t = 0$ (or, symmetrically, $\deg s = 0$, $\deg t > 0$), 3) $\deg s > 0$, $\deg t > 0$.
1) $s$ and $t$ are constants, so it is easy to check whether they are equal.
2) Suppose there is $\mathcal N \vDash \mathsf I(=)$ such that $s(\vec x) = t(\vec x)$ for some $x_1, \dots, x_n \in N$. Let $s(\vec x) = \sum\limits_{\substack{(i_1, \dots, i_n): \\ i_1 + \dots + i_n \leqslant k}} a_{i_1, \dots, i_n} x_1^{i_1} \dots x_n^{i_n}$ in $\mathcal N$. Suppose that for some $j$ $x_j$ is a nonstandard. Then for all $i_1, \dots, i_n$ such that $i_1 + \dots + i_n \leqslant k$ either $i_j = 0$ or $a_{i_1, \dots, i_n} x_i^{i_1} \dots x_{j - 1}^{i_{j - 1}} x_{j + 1}^{i_{j + 1}} \dots x_{n}^{i_n} = 0$ (otherwise $a_{i_1, \dots, i_n} x_1^{i_1} \dots x_n^{i_n}$ and $s(\vec x)$ would be nonstandard, which is contradictory since $t(\vec x)$ is a standard). So, if we replace $x_i$ by $0$, $s(\vec x)$ will not change its value. Since that we can replace all of nonstandard $x_i's$ by $0$ and obtain a solution of the considered equation in $\mathbb N$ (and hence in $\mathcal M$). Also it is clear that all $x_i$ can be bounded by $t$.
3) All such equations can be satisfied by taking $x_i = \omega$ ($s(\omega, \dots, \omega) = \omega = t(\omega, \dots, \omega)$).
From this we can easily obtain an algorithm to decide whether $s = t$ is satisfiable in $\mathsf I(=)$.
(ii) In fact, $Th_=(\mathbb N)$ can be deduced from $\mathsf Q$ and commutativity, associativity and distributivity of addition and multiplication.
Let us fix terms $s(\vec x)$ and $t(\vec x)$ such that $\mathbb N \vDash \forall \vec x (s(\vec x) = t(\vec x))$. As in (i), $s$ and $t$ can be represented as polynomials. Since they are equal in $\mathbb N$, they have equal coefficients and hence their equality is provable.
(iii) We introduce the following model $\mathcal N$: $N = \mathbb N \cup \{\omega_0, \omega_1\}$, operations on natural numbers defined in the standard way,
\begin{itemize}
\item $S \omega_i = \omega_i$, where $i \in \{0, 1\}$,
\item $\omega_i + n = n + \omega_i = \omega_i$, where $n \in \mathbb N$, $i \in \{0, 1\}$,
\item $\omega_i + \omega_j = \omega_{\max(i, j)}$, $i, j \in \{0, 1\}$,
\item $0 \cdot \omega_i = \omega_i \cdot 0 = 0$, $i \in \{0, 1\}$,
\item $n \cdot \omega_i = \omega_i \cdot n = \omega_i$, $i \in \{0, 1\}$, $n \in \mathbb N \setminus \{0\}$,
\item $\omega_i \cdot \omega_j = \omega_{\max(i, j)}$, $i, j \in \{0, 1\}$.
\end{itemize}
$\mathcal N \vDash \mathsf Q$ and operations in $\mathcal N$ are commutative, associative and distributive, so $\mathcal N \vDash Th_=(\mathbb N)$. But $\mathcal N \vDash 0 + \omega_0 = \omega_0 \wedge \forall x (x + \omega_0 = \omega_0 \rightarrow S x + \omega_0 = \omega_0) \wedge \omega_1 + \omega_0 \ne \omega_0$, so $\mathcal N \nvDash \mathsf I (=)$.
\end{proof}
\begin{proposition}
(i) $\mathbb Z[X]^+ \vDash \mathsf{I}(\ne)$;
(ii) $\mathsf I(\ne) \nvdash \mathsf I(\leqslant), \mathsf I(\nleqslant)$.
\end{proposition}
\begin{proof}
(i) We only need to prove $\mathbb Z[X]^+ \vDash Ind_{s(x, \vec y) \ne t(s, \vec y)}$, where $s$ and $t$ are terms. Fix these terms and $\vec y$. There are $P_n(\vec y), \dots, P_0(\vec y) \in \mathbb Z[X]$ such that $s(x, \vec y) - t(x, \vec y) = P_n(\vec y) x^n + \dots + P_0(\vec y)$. Suppose, $P_n(\vec y) 0^n + \dots + P_0(\vec y) = P_0(\vec y) \ne 0$, $\forall x (P_n(\vec y) x^n + \dots + P_0(\vec y) \ne 0 \rightarrow P_n(\vec y) (S x)^n + \dots + P_0(\vec y) \ne 0)$, but $\exists x \in \mathbb Z[X]^+: P_n(\vec y) x^n + \dots + P_0(\vec y) = 0$. Then, for all $k \in \mathbb N$ $P_n(\vec y) (x - k)^n + \dots + P_0(\vec y) = 0$ (since we can apply a contraposition to the step and the usual induction).
$$
\begin{pmatrix}
1 & x & \cdots & x^n \\
1 & x - 1 & \cdots & (x - 1)^n \\
\vdots & & \ddots & \vdots \\
1 & x - n & \cdots & (x - n)^n
\end{pmatrix}
\begin{pmatrix}
P_0(\vec y) \\
\vdots \\
P_n(\vec y)
\end{pmatrix}
=
\begin{pmatrix}
0 \\
\vdots \\
0
\end{pmatrix}.
$$
\end{proof}
Since this matrix is invertible (in $\mathbb Z (X)$), $P_n (\vec y) = \dots = P_0(\vec y) = 0$. So, $\forall x \in \mathbb{Z}[X] (s(x, \vec y) = t(x, \vec y))$, a contradiction.
(ii) We have already proved in Proposition 3.2 that $\mathbb Z^+ [X] \nvDash \mathsf I(\leqslant), \mathsf I(\nleqslant)$
\begin{proposition}
\begin{enumerate}
\item[(i)] $\mathsf I (\leqslant) \nvdash x + y = y + x, x \cdot y = y \cdot x, S x \ne x$
\item[(ii)] $\mathsf I (\leqslant) \nvdash \mathsf I (=), \mathsf I (\ne), \mathsf I (\nleqslant)$.
\end{enumerate}
\end{proposition}
\begin{proof}
(i) Consider the model $\mathcal M$: $M = \mathbb N \cup \{\omega_0, \omega_1\}$ with operations defined as follows (on $\mathbb N$ all operations defined in the standard way):
\begin{itemize}
\item $S \omega_i = \omega_i$, where $i \in \{0, 1\}$
\item $\omega_i + x = \omega_i$, $n + \omega_i = \omega_i$, where $i \in \{0, 1\}$, $x \in M$, $n \in \mathbb N$
\item $0 \cdot x = x \cdot 0 = 0$, $\omega_i \cdot x = \omega_i$, $n \cdot \omega_i = \omega_i$, where $i \in \{0, 1\}$, $x \in M \setminus \{0\}$, $n \in \mathbb N \setminus \{0\}$
\item $n \leqslant \omega_i$, $\omega_i \leqslant \omega_j$, where $i, j \in \{0, 1\}$, $n \in \mathbb N$
\end{itemize}
We prove that $\mathcal M$ is a model of $\mathsf{I(\leqslant)}$. It is not very hard to see that $\mathcal M \vDash \mathsf Q$. Let, for example, check $\mathcal M \vDash \forall x \forall y (x \cdot S y = x \cdot y + x)$. Fix $x, y \in M$. If $x, y \in \mathbb N$ or $x = 0$, it is obvious. Consider the case $x \in \mathbb N \setminus \{0\}$ and $y = \omega_i$: $x \cdot S y = x \cdot \omega_i = \omega_i = \omega_i + \omega_i = x \cdot y + y$. If $x = \omega_i$, then $x \cdot S y = \omega_i = x \cdot y + x$.
As in Proposition 3.1 we can formulate the analogous lemma about terms (i.e. for every term $t(x, \vec y)$ and fixed $y_1, \dots, y_n \in M$ $\exists z \in M \forall x \in M \: t(x, \vec y) = z$ or $t(\omega_i, \vec y) = \omega_i, i = 0, 1$ and $\forall x \in M \: t(x, \vec y) \geqslant x$) and end the proof of $\mathcal M \vDash \mathsf{I(\leqslant)}$ in a similar way.
Now, $\mathcal M \vDash \omega_0 + \omega_1 \ne \omega_1 + \omega_0, \omega_0 \cdot \omega_1 \ne \omega_1 \cdot \omega_0, S \omega_0 = \omega_0$, as required.
(ii) Easy follows from (i) since $\mathsf I(=) \vdash x + y = y + x, x \cdot y = y \cdot x$, $\mathsf I (\ne) \vdash S x \ne x$ and $\mathcal M \nvDash (\omega_0 \nleqslant 0) \wedge \forall x (\omega_0 \nleqslant x \rightarrow \omega_0 \nleqslant Sx) \rightarrow \forall x (\omega_0 \nleqslant x)$.
\end{proof}
\begin{proof}[Proof of the Theorem 3.1.]
Follows from Propositions 3.1-3.4.
\end{proof}
\begin{proof}[Proof of the Theorem 3.3.]
Firstly, we prove the following lemma.
\begin{lemma}
$\mathsf{I}(\ne) + \forall x \forall y (x + y = y + x)$ proves associativity, commutativity and distributivity of $+$ and $\cdot$.
\end{lemma}
\begin{proof}
\begin{itemize}
\item Associativity of addition.
Suppose there is $x, y, z$ such that $x + (y + z) \ne (x + y) + z$. Consider the formula $\varphi(x, y, z, t) := \Big((x + (y + z)) + ((x + y) + t) \ne ((x + y) + z) + (x + (y + t))\Big)$. Then,
$$\varphi(x, y, z, 0) \leftrightarrow (x + (y + z)) + (x + y) \ne ((x + y) + z) + (x + y) $$
$\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:
\leftrightarrow x + (y + z) \ne (x + y) + z$
(the latter equivalence is true since $\mathsf{I}(\ne) \vdash a + c = b + c \rightarrow a = b$). So, $\varphi(x, y, z, 0)$ is true.
Suppose, $\varphi(x, y, z, t)$ is true, but $\varphi(x, y, z, St)$ is false. Then,
$$\neg\varphi(x, y, z, St) \leftrightarrow (x + (y + z)) + S((x + y) + t) = ((x + y) + z) + (x + S(y + t)) $$
$\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:
\leftrightarrow S((x + (y + z)) + ((x + y) + t)) = S(((x + y) + z) + (x + (y + t)))$
$\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:
\leftrightarrow (x + (y + z)) + ((x + y) + t) = ((x + y) + z) + (x + (y + t))$
$\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:\:
\leftrightarrow \neg \varphi(x, y, z, t),$
so, we have got a contradiction. Applying induction to the formula $\varphi$, we obtain $\forall t \:\varphi(x, y, z, t)$. Now, substitute $z$ instead of $t$: $$(x + (y + z)) + ((x + y) + z) \ne ((x + y) + z) + (x + (y + z)),$$
contradiction with commutativity of addition.
\item Right distributivity.
Suppose there is $x, y, z$ such that $x(y + z) \ne xy + xz$. Consider the formula $\varphi(x, y, z, t) = \Big(x(y + z) + xy + xt \ne xy + xz + x(y + t)\Big)$ (since we have already proved associativity we can write terms as $s + t + r$).
It is easy to see that $\varphi(x, y, z, 0)$ and $\neg\varphi(x, y, z, St) \rightarrow \neg \varphi(x, y, z, t)$ are true. By induction we obtain $\forall t \: \varphi(x, y, z, t)$. After substitution $t := z$ we obtain a contradiction with commutativity.
\end{itemize}
All other identities can be proven in the same way. Let's list only the formulas $\varphi(x, y, z, t)$.
\begin{itemize}
\item Left distributivity: $\varphi(x, y, z, t) = \Big((x + y)z + xz + yt \ne xy + yz + (x + y)t\Big)$;
\item commutativity of multiplication: $\varphi(x, y, z, t) = \Big(xy + yt \ne yx + ty\Big)$;
\item associativity of multiplication: $\varphi(x, y, z, t) = \Big(x(yz) + (xy)t \ne (xy)z + x(yt)\Big)$.
\end{itemize}
\end{proof}
Let $\mathcal{M} \vDash \mathsf{I}(\ne) + \forall x \forall y (x + y = y + x)$. By lemma 3.2 $\mathcal{M}$ is a semiring that can be embedded in a ring $\widetilde{\mathcal{M}}$ (as in the proof of the Theorem 2.1). Let $f \in \widetilde{\mathcal{M}}[t]$. We prove by induction on $\deg f$ that the induction for the formula $f(x) = 0$ holds.
If $\deg f = 0$, then $f(0) = f(x)$ for all $x \in M$. If $f(0) = 0$, then $\forall x (f(x) = 0)$.
Let $\deg f = n > 0$, $f(0) = 0$ and $\forall x \in M (f(x) = 0 \rightarrow f(Sx) = 0)$. For $g \in \widetilde{\mathcal{M}}[t]$ we define $\tilde g (t) = g(St) - g(t)$. Denote by $\tilde g^{(k)}$ the $\tilde{(\cdot)}$ applied to $g$ $k$ times.
\begin{proposition}
$\forall k < n \forall x \in M (\tilde f^{(n - k)}(x) = 0)$.
\end{proposition}
\begin{proof}
Note that $\tilde f^{(n)}$ is a constant (since $\deg \tilde g < \deg g$) and $\forall m, k \in \mathbb N (\tilde f^{(k)}(m) = 0)$ (since by usual induction $f(\mathbb N) = \{0\}$ and $g(x) = g(Sx) \rightarrow \tilde g(x) = 0$).
Induction on $k$.
If $k = 0$, then $\tilde f^{(n - k)}$ is a zero constant (by above observations).
Let $k + 1 < n$. Then $\tilde f^{(n - (k + 1))}(0) = 0$ and $\tilde f^{(n - (k + 1))}(x) = 0 \rightarrow \tilde f^{(n - (k + 1))}(Sx) = 0$ (by the induction hypothesis). Since $\deg \tilde f^{(n - (k + 1))} < n$, we can apply induction axiom to the formula $\tilde f^{(n - (k + 1))}(x) = 0$ and obtain that for all $x \in M$ $\tilde f^{(n - (k + 1))}(x) = 0$.
\end{proof}
Now, suppose that there exists $x_0 \in M$ such that $f(x_0) \ne 0$. Consider the formula $f(x) - f(x_0) \ne 0$. Then, $f(0) - f(x_0) \ne 0$ and $f(x) - f(x_0) \ne 0 \rightarrow f(Sx) - f(x_0) \ne 0$ (since $(f(Sx) - f(x_0) - (f(x) - f(x_0)) = \tilde f^{(1)} (x) = 0$). Since $\mathcal{M} \vDash \mathsf{I}(\ne)$, we obtain $\forall x \in M (f(x) - f(x_0) \ne 0)$. It is a contradiction since we can substitute $x_0$ instead of $x$.
\end{proof}
\section{Remaining questions}
In this section we formulate some remaining problems.
\begin{problem}
Does $\mathsf I(=)$ follow from $\mathsf I(\ne)$?
\end{problem}
If the answer to Problem 1 is negative (i.e. $\mathsf I(\ne) \nvdash \mathsf I(=)$), then by Theorem 3.3 any countermodel must have noncommutative addition.
We introduce a structure with noncommutative operations. Informally speaking, this is an analogue of the $\mathbb Z[X]$, but with noncommutative operations.
Clearly, since the commutativity of operations is provable in $\mathsf I(=)$, this structure will not be a model of $\mathsf I(=)$.
Consider all formal sums of the form $a_1 X^{i_1} + \dots a_n X^{i_n}$ (the order of the sum is significant and we allow the sum to be empty), where $a_j \in \mathbb Z$ and $i_j \in \mathbb N$. We introduce the following reductions of such sums:
$$a_1 X^{i_1} + \dots + a_{j - 1} X^{i_{j - 1}} + 0 X^{i_j} + a_{j + 1} X^{i_{j + 1}} + \dots + a_n X^{i_n} \mapsto a_1 X^{i_1} + \dots + a_{j - 1} X^{i_{j - 1}} + a_{j + 1} X^{i_{j + 1}} + \dots + a_n X^{i_n}$$
and
$$a_1 X^{i_1} + \dots + a_{j - 1} X^{i_{j - 1}} + a_{j} X^{i_{j - 1}} + \dots + a_n X^{i_n} \mapsto a_1 X^{i_1} + \dots + (a_{j - 1} + a_{j}) X^{i_{j - 1}} + \dots + a_n X^{i_n}.$$
Let $\sim$ be the least equivalence relation, containing $\mapsto$.
\begin{definition}
A sum $A$ is in \textit{normal form} (NF) if there is no $B$ such that $A \mapsto B$.
\end{definition}
\begin{remark}
It is easy to see that for every sum $A$ there is a unique sum $B$ such that $A \sim B$ and $B$ is in NF.
\end{remark}
We will consider these sums up to $\sim$.
Operations are introduced in the following way:
$$(a_1 X^{i_1} + \dots + a_n X^{i_n}) + (b_1 X^{j_1} + \dots + b_m X^{j_m}) = a_1 X^{i_1} + \dots + a_n X^{i_n} + b_1 X^{j_1} + \dots + b_m X^{j_m},$$
$$S A = A + 1,$$
if $ b \geqslant 0$:
$$(a_1 X^{i_1} + \dots + a_n X^{i_n}) \cdot b X^j = \underbrace{(a_1 X^{i_1 + j} + \dots + a_n X^{i_n + j}) + \dots + (a_1 X^{i_1 + j} + \dots + a_n X^{i_n + j})}_{b \textit{ times}}, $$
if $b < 0$:
$$(a_1 X^{i_1} + \dots + a_n X^{i_n}) \cdot b X^j = \underbrace{(-a_n X^{i_n + j} - \dots - a_1 X^{i_1 + j}) + \dots + (-a_n X^{i_n + j} - \dots - a_1 X^{i_1 + j})}_{|b| \textit{ times}}, $$
$$A \cdot (b_1 X^{j_1} + \dots + b_m X^{j_m}) = A \cdot b_1 X^{j_1} + \dots + A \cdot b_m X^{j_m}.$$
As we can see, the result of the operations respects the equivalence relation introduced above.
Let us call this structure $\mathcal M$. To get from $\mathcal M$ a model of $\mathsf Q$, we need to take only the <<nonnegative>> (positive and zero) elements of $\mathcal M$. We call a sum \emph{positive} if in its normal form the sum of all coefficients before $X$'s with the greatest degree is positive. For example, $-X + X^2$ and $-X^2 + X + 2X^2$ are positive, but $X - X^2$ is not. It is easy to see that the sum and the product of any two nonnegative sums is nonnegative. As usual, we denote the substructure of nonnegative elements $\mathcal M^+$.
Now, $\mathcal M^+ \vDash \mathsf Q$.
\begin{hypothesis}
The introduced structure $\mathcal M^+$ is a model of $\mathsf I(\ne)$.
\end{hypothesis}
If this hypothesis turns out to be true, then $\mathsf I(\ne) \nvdash \mathsf I(=)$.
\begin{problem}
Is $\mathsf I(=, \ne)$ equivalent to $\mathsf{IOpen}(=)$ (induction for quantifier-free formulas, containing only atomic formulas of the form $s = t$)?
\end{problem}
There is a following result on the alternative axiomatization of $\mathsf{IOpen(=)}$, which can help in solving this problem.
\begin{theorem}[\cite{shepherdson:1967}, Theorem 2]
$\mathsf{IOpen(=)}$ is equivalent to the theory, consisting of $\mathsf Q$, commutativity, associativity and distributivity of addition and multiplication, and the scheme of axioms of the form
$$\underline{d} x = \underline{d} x' \rightarrow \forall y \bigvee\limits_{i = 0}^{d - 1} ((y + i) x = (y + i) x')$$
for all $d \geqslant 2$, where $\underline{d} = S^d(0)$.
\end{theorem}
\iffalse
There is one candidate for the role of a countermodel for $\mathsf{IOpen}(=)$. Let $\mathbb Z[X, Y]^+ = \{f \in \mathbb Z[X, Y]|$ all coefficients before monomials of the greatest degree of $f$ are positive$\}$. Consider the model $^{\mathbb Z[X, Y]^+}\!/\!_{(2 X - 2 Y, X^2 - Y^2)}$.
\begin{proposition}
\begin{enumerate}
\item[(i)] $\mathsf{IOpen}(=) \vdash (x + x = y + y) \rightarrow (z x = z y \vee (z + 1) x = (z + 1) y)$;
\item[(ii)] $^{\mathbb Z[X, Y]^+}\!/\!_{(2 X - 2 Y, X^2 - Y^2)} \nvDash \mathsf{IOpen}(=)$.
\end{enumerate}
\end{proposition}
\begin{proof}
(i) Let fix $x$ and $y$ such that $x + x = y + y$. We prove $z x = z y \vee (z + 1) x = (z + 1) y$ by induction on $z$.
If $z = 0$, $0 \cdot x = 0 \cdot y$.
Suppose $z x = z y \vee (z + 1) x = (z + 1) y$. If $z x = z y$, then $(S z + 1) x = z x + 2 x = z y + 2 y = (S z + 1) y$. If $(z + 1) x = (z + 1) y$, then $S z \cdot x = S z \cdot y$.
(ii) The formula from (i) is false in this model.
$x := X$, $y := Y$, $z := X$.
Suppose $z x = z y$, i.e. $X^2 = X Y$. Then there is some polynomials $P, Q$ such that $X^2 - X Y = 2P(X - Y) + Q(X^2 - Y^2)$. Then $X = 2 P + Q(X + Y)$. Let $a_X$ and $a_Y$ be the coefficient before $X$ and $Y$ in $P$, $b$ be the free coefficient in $Q$. Then $1 = 2 a_X + b$ and $0 = 2 a_Y + b$ and we have a contradiction: $b$ cannot be either even or odd. So, $z x \ne z y$.
Suppose $(z + 1) x = (z + 1) y$, i.e. $X^2 + X = X Y + Y$. Then $X + 1 = 2 P + Q(X + Y)$ and the free coefficient of the right hand side is even. So, $(z + 1) x \ne (z + 1) y$.
\end{proof}
\begin{hypothesis}
$^{\mathbb Z[X, Y]^+}\!/\!_{(2 X - 2 Y, X^2 - Y^2)}$ is a model of $\mathsf{I}(=, \ne)$.
\end{hypothesis}
\fi
\end{document} |
\begin{document}
\title{{\small{}Thick Points of High-Dimensional Gaussian Free Fields}}
\author{Linan Chen}
\selectlanguage{american}
\address{Department of Mathematics and Statistics, McGill University, 805
Sherbrooke Street West, Montréal, QC, H3A 0B9, Canada. }
\email{Email: lnchen@math.mcgill.ca}
\keywords{\noindent Gaussian free field, polynomial singularity, thick point,
Hausdorff dimension}
\subjclass[2000]{\noindent 60G60, 60G15.}
\thanks{The author would like to thank Daniel Stroock for helpful discussions.
The author is partially supported by NSERC Discovery Grant G241023.}
\selectlanguage{english}
\begin{abstract}
This work aims to extend the existing results on thick points of logarithmic-correlated
Gaussian Free Fields to Gaussian random fields that are more singular.
To be specific, we adopt a sphere averaging regularization to study
polynomial-correlated Gaussian Free Fields in higher-than-two dimensions.
Under this setting, we introduce the definition of thick points which,
heuristically speaking, are points where the value of the Gaussian
Free Field is unusually large. We then establish a result on the Hausdorff
dimension of the sets containing thick points.
\end{abstract}
\maketitle
\section{Introduction}
Many recent developments in statistical physics and probability theory
have seen Gaussian Free Field (GFF) as an indispensable tool. Heuristically
speaking, GFFs are analogues of Brownian motion with multidimensional
time parameters. Just as Brownian motion is thought of as a natural
interpretation of ``random curve'', GFFs are considered as promising
candidates for modeling ``random surface'' or ``random manifold'',
which ultimately lead to the study of random geometry. Motivated by
their importance, GFFs have been widely studied both in discrete and
continuum settings, and certain geometric properties of GFFs have
been revealed. For example, the distribution of extrema and near-extrema
of two-dimensional log-correlated discrete GFFs are studied by Ding
\emph{et al }\cite{DingZeitouni,DingRoyZeitouni,ChatterjeeDemboDing}.
However, for continuum GFFs, the notion of ``extrema'' is not applicable,
because even in the two-dimensional case a generic element of the
GFF is only a tempered distribution which is not defined point-wisely.
In fact, it is the singularity of GFFs that poses most challenges
in obtaining analytic results on the geometry of GFFs. To overcome
most of the challenges, one needs to apply a procedure
\footnote{In the literature of physics, such a procedure is called a ``regularization''.
} to approximate point-wise values of GFFs. One such procedure is to
average GFFs over some sufficiently ``nice'' Borel sets. Even though
it is a tempered distribution, a generic element of a GFF can be integrated
over sufficiently regular submanifolds. Using this idea, the notion
of ``thick point''
\footnote{The term ``thick point'' is borrowed from the literature of stochastic
analysis. There it refers to the extremes of the occupation measure
of a stochastic process.
} for continuum GFFs, as the analogue of extrema of discrete GFFs,
is introduced and studied by Hu, Miller and Peres in \cite{HMP}.
More specifically, let $h$ be a generic element of the GFF associated
with the operator $\Delta$ on a bounded domain $D\subseteq\mathbb{R}^{2}$
with the Dirichlet boundary condition. Governed by the properties
of the Green's function of $\Delta$ in 2D, such a GFF is logarithmically
correlated, and it is possible to make sense of the circular average
of $h$:
\[
\bar{h}_{t}\left(z\right):=\frac{1}{2\pi t}\int_{\partial B\left(z,t\right)}h\left(x\right)\sigma\left(dx\right)
\]
where $z\in D$, $\partial B\left(z,t\right)$ is the circle centered
at $z$ with radius $t$ and $\sigma\left(dx\right)$ is the length
measure along the circle. To get an approximation of ``$h\left(z\right)$'',
it is to our interest to study $\bar{h}_{t}\left(z\right)$ as $t\searrow0$.
For every $a\geq0$, the set of $a-$thick points of $h$ are defined
in \cite{HMP} as
\begin{equation}
T_{h}^{a}:=\left\{ z\in D:\,\lim_{t\searrow0}\,\frac{\bar{h}_{t}\left(z\right)}{\left(-\ln t\right)}=\sqrt{\frac{a}{\pi}}\right\} .\label{eq:2D thick point}
\end{equation}
With $z$ fixed, the circular average process $\left\{ \bar{h}_{t}\left(z\right):\, z\in(0,1]\right\} $
has the same distribution as a Brownian motion $\left\{ B_{\tau}\left(z\right):\,\tau\geq0\right\} $
up to a deterministic time change $\tau=\left(-\ln t\right)/\sqrt{2\pi}$,
and as $t\searrow0$, $\bar{h}_{t}\left(z\right)$ behaves just like
$B_{\tau}\left(z\right)$ as $\tau\nearrow\infty$. Then, for any
given $z\in D$, written in terms of $\left\{ B_{\tau}\left(z\right):\,\tau\geq0\right\} $,
the limit involved in (\ref{eq:2D thick point}) is equivalent to
\[
\lim_{\tau\rightarrow\infty}\,\frac{B_{\tau}\left(z\right)}{\tau}=\sqrt{2a}
\]
which occurs with probability zero for any $a>0$. Therefore, $a-$thick
points, so long as $a>0$, are locations where the field value is
``unusually'' large. The authors of \cite{HMP} prove that for every
$a\in\left[0,2\right]$, $\dim_{\mathcal{H}}\left(T_{h}^{a}\right)=2-a$
a.s., where ``$\dim_{\mathcal{H}}$'' denotes the Hausdorff dimension.
Thick points characterize a basic aspect of the ``landscape'' of
GFFs, that is, where the ``high peaks'' occur, and hence thick points
are of importance to understanding the geometry of GFFs. Besides,
the sets containing thick points also arise naturally as supports
of random measures. For example, the Liouville quantum gravity measure
constructed by Duplantier and Sheffield in \cite{DS1} is supported
on a thick point set. Another such example is multiplicative chaos.
In Kahane's paper \cite{Kah}, it is pointed out that multiplicative
chaos lives on a fractal set, which is essentially a thick point set
in a different context. More recently, the results on the support
of multiplicative chaos are reviewed by Rhodes and Vargas in \cite{RV13}.
Through different approximation procedures, the results in \cite{HMP}
are extended by Cipriani and Hazra to more general log-correlated
GFFs (\cite{CiprianiHazra13,CiprianiHazra14}). It is shown that for
log-correlated GFFs in any dimensions, one can similarly define thick
point sets as in (\ref{eq:2D thick point}) and a result on Hausdorff
dimensions of such sets is in order. However, to the best of the author's
knowledge, there had been no comparable study of thick points for
GFFs that are more singular, e.g., polynomial-correlated GFFs. In
fact, to date little is known about the geometry of such GFFs. Inspired
by the approach presented in \cite{HMP}, this article lays out the
first step of an attempt to explore geometric problems associated
with polynomial-correlated GFFs in any dimensions.
The main focus of this article is to extend the techniques and the
results on thick points of log-correlated GFFs to polynomial-correlated
GFFs on $\mathbb{R}^{\nu}$ for any $\nu>2$. Intuitively speaking,
compared with the log-correlated counterparts, GFFs that are polynomially
correlated consist of generic elements that are more singular so the
``landscape'' of such a field is ``rougher'', and the higher the
dimension $\nu$ is, the worse it becomes. To make these remarks rigorous
and to bring generality to our approach, we adopt the theory of the
Abstract Wiener Space (\cite{aws}) to interpret general Gaussian
random fields, including GFFs with any degree of polynomial singularity
in any dimensions. Let $\theta$ be a generic element of such a field.
It is always possible, by averaging $\theta$ over codimension-1 spheres
centered at $x\in\mathbb{R}^{\nu}$, to obtain a proper approximation
$\bar{\theta}_{t}\left(x\right)$ which approaches ``$\theta\left(x\right)$''
as $t\searrow0$. We give a careful analysis of the two parameter
Gaussian family
\[
\left\{ \bar{\theta}_{t}\left(x\right):\, x\in\mathbb{R}^{\nu},t\in(0,1]\right\}
\]
and use the concentric spherical averages (with $x$ fixed) to define
thick points. It turns out that, instead of the most straightforward
analogue of (\ref{eq:2D thick point}), a more suitable definition
of thick point for the degree-$(\nu-2)$-polynomial-correlated GFF
is that, for $\gamma\geq0$, $x$ is a $\gamma-$thick point of $\theta$
if and only if
\begin{equation}
\limsup_{t\searrow0}\,\frac{\bar{\theta}_{t}\left(x\right)}{\sqrt{-G\left(t\right)\ln t}}\geq\sqrt{2\nu\gamma}\label{eq:thick point def}
\end{equation}
where $G\left(t\right):=\mathbb{E}\left[\left(\bar{\theta}_{t}\left(x\right)\right)^{2}\right]$.
In a similar spirit as (\ref{eq:2D thick point}), if $\gamma>0$,
then a $\gamma-$thick point is a location where $\theta$ is unusually
large. By adapting the approach presented in \cite{HMP}, we establish
the result (Theorem \ref{thm:hausdorff dimension of thick point set})
that, if $T_{\theta}^{\gamma}$ is the set consisting of all the $\gamma-$thick
points of $\theta$ in the unit cube in $\mathbb{R^{\nu}}$, then
\[
\dim_{\mathcal{H}}\left(T_{\theta}^{\gamma}\right)=\nu\left(1-\gamma\right)\;\mbox{a.s..}
\]
Moreover, we investigate the relation between (\ref{eq:2D thick point})
and (\ref{eq:thick point def}), and show that (Theorem \ref{thm: no perfect thick piont})
due to the higher-order singularity of the polynomial-correlated GFFs,
with probability one, the ``perfect'' $\gamma-$thick point, i.e.,
$x$ such that
\begin{equation}
\lim_{t\searrow0}\,\frac{\bar{\theta}_{t}\left(x\right)}{\sqrt{-G\left(t\right)\ln t}}=\sqrt{2\nu\gamma},\label{eq:perfect thick point def}
\end{equation}
does not exist, which explains why (\ref{eq:thick point def}) is
more suitable a choice than (\ref{eq:2D thick point}) as the definition
of thick point for GFFs that are polynomially correlated. On the other
hand, if we relax the condition in (\ref{eq:perfect thick point def})
to
\begin{equation}
\lim_{n\rightarrow\infty}\,\frac{\bar{\theta}_{r_{n}}\left(x\right)}{\sqrt{-G\left(r_{n}\right)\ln r_{n}}}=\sqrt{2\nu\gamma},\label{eq:thick point along sequence}
\end{equation}
where $\left\{ r_{n}:\, n\geq0\right\} $ is any sequence that decays
to zero sufficiently fast, then we find out (Theorem \ref{thm:thick point along sequence})
that, if $ST_{\theta}^{\gamma}$ is the set consisting of all the
points $x$ in the unit cube in $\mathbb{R^{\nu}}$ that satisfies
(\ref{eq:thick point along sequence}), then
\[
\dim_{\mathcal{H}}\left(ST_{\theta}^{\gamma}\right)=\nu\left(1-\gamma\right)\;\mbox{a.s..}
\]
Some lemmas we obtained during the process are of independent interest.
In $\mathsection2$ we briefly introduce the theory of the Abstract
Wiener Space as the foundation for the study of GFFs. In $\mathsection3$
we give a detailed study of the Gaussian family consisting of spherical
averages of the GFFs. These are the main tools that will be exploited
in later parts of this article. Our main results are stated in $\mathsection4$
and at the beginning of $\mathsection5$. In particular, the result
on $\dim_{\mathcal{H}}\left(T_{\theta}^{\gamma}\right)$ is proved
by establishing the upper bound and the lower bound separately. The
upper bound is proved in $\mathsection4.1$, and the lower bound is
established in multiple steps in $\mathsection5$.
\section{Abstract Wiener Space and Gaussian Free Fields}
\selectlanguage{american}
The theory of Abstract Wiener Space (AWS), first introduced by Gross
\cite{aws}, provides an analytical foundation for the construction
and the study of Gaussian measures in infinite dimensions. To be specific,
given a real separable Banach space $E$, a non-degenerate centered
Gaussian measure $\mathcal{W}$ on $E$ is a Borel probability measure
such that for every $x^{*}\in E^{*}\backslash\{0\}$, the functional
$x\in E\mapsto\left\langle x,x^{*}\right\rangle \in\mathbb{R}$ has
non-degenerate centered Gaussian distribution under $\mathcal{W}$,
where $E^{*}$ is the space of bounded linear functionals on $E$,
and $\left\langle \cdot,x^{*}\right\rangle $ is the action of $x^{*}\in E^{*}$
on $E$. Further assume that $H$ is a real separable Hilbert space
which is continuously embedded in $E$ as a dense subspace. Then $E^{*}$
can be continuously and densely embedded into $H$, and for every
$x^{*}\in E^{*}$ there exists a unique $h_{x^{*}}\in H$ such that
$\left\langle h,x^{*}\right\rangle =\left(h,h_{x^{*}}\right)_{H}$
for all $h\in H$. Under this setting if the Gaussian measure $\mathcal{W}$
on $E$ has the following characteristic function:
\[
\mathbb{E}^{\mathcal{W}}\left[\exp\left(i\left\langle \cdot,x^{*}\right\rangle \right)\right]=\exp\left(-\frac{\left\Vert h_{x^{*}}\right\Vert _{H}^{2}}{2}\right)\mbox{ for every }x^{*}\in E^{*},
\]
then the triple $\left(H,E,\mathcal{W}\right)$ is called an \emph{Abstract
Wiener Space}. Moreover, since $\left\{ h_{x^{*}}:\, x^{*}\in E^{*}\right\} $
is dense in $H$, the mapping
\[
\mathcal{I}:\, h_{x^{*}}\in H\mapsto\mathcal{I}\left(h_{x^{*}}\right):=\left\langle \cdot,x^{*}\right\rangle \in L^{2}\left(\mathcal{W}\right)
\]
can be uniquely extended to a linear isometry between $H$ and $L^{2}\left(\mathcal{W}\right)$.
The extended isometry, also denoted by $\mathcal{I}$, is the \emph{Paley-Wiener
map} and its images $\left\{ \mathcal{I}\left(h\right):\, h\in H\right\} $,
known as the \emph{Paley-Wiener integrals}, form a centered Gaussian
family whose covariance is given by
\[
\mathbb{E}^{\mathcal{W}}\left[\mathcal{I}\left(h\right)\mathcal{I}\left(g\right)\right]=\left(h,g\right)_{H}\mbox{ for all }h,g\in H.
\]
Therefore, if $\left\{ h_{n}:n\geq1\right\} $ is an orthonormal basis
of $H$, then $\left\{ \mathcal{I}\left(h_{n}\right):n\geq1\right\} $
is a family of i.i.d. standard Gaussian random variables. In fact,
\begin{equation}
\mbox{ for }\mathcal{W}-\mbox{a.e. }x\in E,\quad x=\sum_{n\geq1}\mathcal{I}\left(h_{n}\right)\left(x\right)h_{n}.\label{eq:H_basis expansion}
\end{equation}
Although $\mathcal{W}$ is a measure on $E$, it is the inner product
of $H$ that determines the covariance structure of $\mathcal{W}$.
$H$ is referred to as the \emph{Cameron-Martin space} of $\left(H,E,\mathcal{W}\right)$.
The theory of AWS says that given any separable Hilbert space $H$,
one can always find $E$ and $\mathcal{W}$ such that the triple $\left(H,E,\mathcal{W}\right)$
forms an AWS. On the other hand, given a separable Banach space $E$,
any non-degenerate centered Gaussian measure $\mathcal{W}$ on $E$
must exist in the form of an AWS. That is to say that, AWS is the
``natural'' format in which any infinite dimensional Gaussian measure
exists. For further discussions on the construction and the properties
of AWS, we refer to \cite{aws}, \cite{awsrevisited}, \cite{add_Gaus}
and $\mathsection8$ of \cite{probability}.
\selectlanguage{english}
We now apply the general theory of AWS to study Gaussian measures
on function or generalized function spaces. To be specific, given
$\nu\in\mathbb{N}$ and $p\in\mathbb{R}$, consider the Sobolev space
$H^{p}:=H^{p}\left(\mathbb{R}^{\nu}\right)$, which is the closure
of $C_{c}^{\infty}\left(\mathbb{R}^{\nu}\right)$, the space of compactly
supported smooth functions on $\mathbb{R}^{\nu}$, under the inner
product given by, for $\phi,\,\psi\in C_{c}^{\infty}\left(\mathbb{R}^{\nu}\right)$,
\begin{eqnarray*}
\left(\phi,\,\psi\right)_{_{H^{p}}} & := & \left(\left(I-\Delta\right)^{p}\phi,\psi\right)_{L^{2}\left(\mathbb{R}^{\nu}\right)}\\
& = & \frac{1}{\left(2\pi\right)^{\nu}}\int_{\mathbb{R}^{\nu}}\left(1+\left|\xi\right|^{2}\right)^{p}\hat{\phi}\left(\xi\right)\overline{\hat{\psi}\left(\xi\right)}d\xi,
\end{eqnarray*}
where $\hat{\cdot}$ denotes the Fourier transform. $\left(H^{p},\,\left(\cdot,\cdot\right)_{H^{p}}\right)$
is a separable Hilbert space, and it will be taken as the Cameron-Martin
space for the discussions in this article. As mentioned earlier, there
exists a separable Banach space $\Theta^{p}:=\Theta^{p}\left(\mathbb{R}^{\nu}\right)$
and a Gaussian measure $\mathcal{W}^{p}:=\mathcal{W}^{p}\left(\mathbb{R}^{\nu}\right)$
on $\Theta^{p}$ such that the triple $\left(H^{p},\Theta^{p},\mathcal{W}^{p}\right)$
forms an AWS, to which we refer as the \emph{dim-$\nu$ order-$p$}
\emph{Gaussian Free Field }(GFF)
\footnote{In physics literature, the term ``GFF'' only refers to the case
when $p=1$. Here we slightly extend the use of this term and continue
to use GFF.
}.\emph{ }It is clear that the covariance of such a field is determined
by the Green's function of $\left(I-\Delta\right)^{p}$ on $\mathbb{R}^{\nu}$.
To give explicit formulations for the GFFs introduced in the framework
above, we review the result in \foreignlanguage{american}{\emph{\cite{probability}}}
($\mathsection8.5$) that, when $p=\frac{\nu+1}{2}$, $\Theta^{\frac{\nu+1}{2}}$
can be taken as
\[
\Theta^{\frac{\nu+1}{2}}:=\left\{ \theta\in C\left(\mathbb{R}^{\nu}\right):\lim_{\left|x\right|\rightarrow\infty}\,\frac{\left|\theta\left(x\right)\right|}{\log\left(e+\left|x\right|\right)}=0\right\} ,
\]
equipped with the norm
\[
\left\Vert \theta\right\Vert _{\Theta^{\frac{\nu+1}{2}}}:=\sup_{x\in\mathbb{R}^{\nu}}\,\frac{\left|\theta\left(x\right)\right|}{\log\left(e+\left|x\right|\right)}.
\]
In other words, the dim-$\nu$ order-$\frac{\nu+1}{2}$ GFF consists
of continuous functions on $\mathbb{R}^{\nu}$. More generally, for
$p\in\mathbb{R}$, $H^{p}$ is the isometric image of $H^{\frac{\nu+1}{2}}$
under the Bessel-type operator $\left(I-\Delta\right)^{\frac{\nu+1-2p}{4}}$.
Therefore, we can take $\Theta^{p}$ to be the image of $\Theta^{\frac{\nu+1}{2}}$
under $\left(I-\Delta\right)^{\frac{\nu+1-2p}{4}}$ and the corresponding
Gaussian measure is
\[
\mathcal{W}^{p}=\left(\left(I-\Delta\right)^{-\frac{\nu+1-2p}{4}}\right)_{\star}\mathcal{W}^{\frac{\nu+1}{2}}.
\]
In addition, if we identify $H^{-p}$ as the dual space of $H^{p}$,
then $\left(\Theta^{p}\right)^{*}\subseteq H^{-p}$ and for every
$\lambda\in\left(\Theta^{p}\right)^{*}$, it is easy to see that
\begin{equation}
\lambda\mapsto h_{\lambda}:=\left(I-\Delta\right)^{-p}\lambda\label{eq:(1-Delta)^(-p)}
\end{equation}
gives the unique element $h_{\lambda}\in H^{p}$ such that the action
of $\lambda\in\left(\Theta^{p}\right)^{*}$, when restricted on $H^{p}$,
coincides with $\left(\cdot,h_{\lambda}\right)_{H^{p}}$. Moreover,
the map (\ref{eq:(1-Delta)^(-p)}) can also be viewed as an isometry
between $H^{-p}$ and $H^{p}$. For $\lambda\in H^{-p}$, we still
use ``$h_{\lambda}$'' to denote the image of $\lambda$ under (\ref{eq:(1-Delta)^(-p)}).
Then the Paley-Wiener integrals $\left\{ \mathcal{I}\left(h_{\lambda}\right):\lambda\in H^{-p}\right\} $
form a centered Gaussian family with the covariance
\[
\mathbb{E}^{\mathcal{W}^{p}}\left[\mathcal{I}\left(h_{\lambda}\right)\mathcal{I}\left(h_{\eta}\right)\right]=\left(h_{\lambda},h_{\eta}\right)_{H^{p}}=\left(\lambda,\eta\right)_{H^{-p}}\mbox{ for every }\lambda,\eta\in H^{-p}.
\]
It is clear from the discussions above that with the dimension $\nu$
fixed, the larger the order $p$ is, the more regular the elements
of the GFF are; on the other hand, if $p$ is fixed, then the higher
the dimension $\nu$ is, the more singular the GFF becomes. In most
of the cases that are of interest to us, generic elements of GFFs
are only tempered distributions. For example, this is the case with
GFFs that are logarithmically correlated. Interpreted under the framework
introduced above, log-correlated GFFs are dim-$\nu$ order-$(\nu/2)$
GFFs, i.e., with $p=\nu/2$, since the Green's function of $\left(I-\Delta\right)^{\nu/2}$
on $\mathbb{\mathbb{R}^{\nu}}$ has logarithmic singularity along
the diagonal. On the other hand, if $2p\in\mathbb{N}$ and $2p<\nu$,
the Green's function have polynomial singularity with degree $\nu-2p$
and hence the corresponding GFFs are polynomially correlated. In this
article, we focus on studying certain geometric properties of polynomial-correlated
GFFs with
\footnote{The GFFs with $p$ being half integers (and hence the operator is
non-local) are considered by O. Nadeau-Chamard and the author in a
separate paper which is currently in preparation.
} $p\in\mathbb{N}$ and $p<\nu/2$.
We finish this section by remarking that instead of using the Bessel-type
operator $\left(I-\Delta\right)^{p}$ to construct GFFs on $\mathbb{R}^{\nu}$,
one can also use the operator $\Delta^{p}$, equipped with proper
boundary conditions, to construct GFFs on bounded domains on $\mathbb{R}^{\nu}$(e.g.,
\cite{HMP}, \cite{DS1} and \cite{Shef}). The field elements obtained
in either way possess similar local properties. However, $\left(I-\Delta\right)^{p}$
rather than $\Delta^{p}$ is a better choice for this project for
the technical reason that $\left(I-\Delta\right)^{p}$ allows the
GFF to be defined on the entire space, and hence we do not have to
specify a boundary condition, which is an advantage at least when
$p>1$.
\section{Spherical Averages of Gaussian Free Fields}
For the rest of this article, we assume that $\nu,p\in\mathbb{N}$,
$\nu>2$ and $1\leq p<\nu/2$, and $\theta$ is a generic element
of the dim-$\nu$ order-$p$ GFF, i.e., $\theta\in\Theta^{p}$ is
sampled under $\mathcal{W}^{p}$. Although ``$\theta\left(x\right)$''
is not defined for every $x\in\mathbb{R}^{\nu}$, we can use the ``average''
of $\theta$ over a sphere centered at $x$ to approximate ``$\theta\left(x\right)$'',
as the radius of the sphere tends to zero. To make this precise, we
need to introduce some notation. Let $B\left(x,t\right)$ and $\partial B\left(x,t\right)$
be the open ball and, respectively, the sphere centered at $x\in\mathbb{R}^{\nu}$
with radius (under the Euclidean metric) $t>0$, $\sigma_{x,t}$ the
surface measure on $\partial B\left(x,t\right)$, $\alpha_{\nu}\left(t\right):=\alpha_{\nu}t^{\nu-1}$
the surface area of $\partial B\left(x,t\right)$ with $\alpha_{\nu}:=2\pi^{\nu/2}/\Gamma\left(\nu/2\right)$,
and $\sigma_{x,t}^{ave}:=\sigma_{x,t}/\alpha_{\nu}\left(t\right)$
the spherical average measure over $\partial B\left(x,t\right)$.
We first state the following simple facts about $\sigma_{x,t}^{ave}$.
It is straightforward to derive these results, so we will omit the
proofs.
\begin{lem}
For every $x\in\mathbb{R}^{\nu}$ and $t>0$, $\sigma_{x,t}^{ave}\in H^{-1}\left(\mathbb{R}^{\nu}\right)$
and its Fourier transform is given by
\begin{equation}
\forall\xi\in\mathbb{R}^{\nu},\quad\widehat{\sigma_{x,t}^{ave}}\left(\xi\right)=\frac{\left(2\pi\right)^{\frac{\nu}{2}}}{\alpha_{\nu}}\, e^{i\left(x,\xi\right)_{\mathbb{R}^{\nu}}}\cdot\left(t\left|\xi\right|\right)^{\frac{2-\nu}{2}}J_{\frac{\nu-2}{2}}\left(t\left|\xi\right|\right)\label{eq:Fourier transform of spherical average}
\end{equation}
where $J_{\frac{\nu-2}{2}}$ is the standard Bessel function of the
first kind with index $\frac{\nu-2}{2}$.
\end{lem}
The first assertion of the lemma implies that $\sigma_{x,t}^{ave}\in H^{-p}\left(\mathbb{R}^{\nu}\right)$
for every $p\geq1$. In particular, this fact shows that, no matter
what the dimension is and how singular the GFF is, a codimension-1
sphere is always sufficiently ``nice'' that it is possible to average
the GFF over such a sphere. As a consequence, $\mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right)$,
viewed as the spherical average of the GFF, is well defined for every
$x\in\mathbb{R}^{\nu}$ and $t>0$ as a Gaussian random variable,
and as $t\searrow0$, from the point of the view of tempered distributions,
$\mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right)\left(\theta\right)$
approximates ``$\theta\left(x\right)$''. With the help of (\ref{eq:Fourier transform of spherical average}),
we can compute, by Parseval's identity, the covariance of the Gaussian
family consisting of all the spherical averages and express the covariance
as follows.
\begin{lem}
$\left\{ \mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right):\, x\in\mathbb{R}^{\nu},\, t>0\right\} $
is a two-parameter centered Gaussian family under $\mathcal{W}^{p}$,
and the covariance is given by, for $x,y\in\mathbb{R}^{\nu}$ and
$t,s>0$,
\begin{eqnarray}
& & \begin{split} & \mathbb{E}^{\mathcal{W}^{p}}\left[\mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right)\mathcal{I}\left(h_{\sigma_{y,s}^{ave}}\right)\right]\\
& \qquad=\frac{\left(2\pi\right)^{\nu/2}}{\alpha_{\nu}^{2}\left(ts\left|x-y\right|\right)^{\frac{\nu-2}{2}}}\int_{0}^{\infty}\frac{\tau^{2-\frac{\nu}{2}}J_{\frac{\nu-2}{2}}\left(t\tau\right)J_{\frac{\nu-2}{2}}\left(s\tau\right)J_{\frac{\nu-2}{2}}\left(\left|x-y\right|\tau\right)}{\left(1+\tau^{2}\right)^{p}}d\tau.
\end{split}
\label{eq:covariance for (1-Delta)^s-1}
\end{eqnarray}
In particular, when $x=y$, i.e., in the case of concentric spherical
averages,
\begin{equation}
\mathbb{E}^{\mathcal{W}^{p}}\left[\mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right)\mathcal{I}\left(h_{\sigma_{x,s}^{ave}}\right)\right]=\frac{1}{\alpha_{\nu}\left(ts\right)^{\frac{\nu-2}{2}}}\int_{0}^{\infty}\frac{\tau J_{\frac{\nu-2}{2}}\left(t\tau\right)J_{\frac{\nu-2}{2}}\left(s\tau\right)}{\left(1+\tau^{2}\right)^{p}}d\tau.\label{eq:covariance for (1-Delta)^s concentric}
\end{equation}
\end{lem}
Again, these results follow easily from integral representations of
Bessel functions (\cite{BesselFunctions}, $\mathsection3.3$) combined
with straightforward computations. Proofs are omitted.
To study the distribution of the family of spherical averages, and
to use them effectively to approximate ``pointwise values'' of the
GFF, it is useful to obtain as explicit an expression for the covariance
as possible. To this end, we will first assume $p=1$ and treat the
concentric spherical averages ($\mathsection3.1$) and the non-concentric
ones ($\mathsection3.2$) separately. During this process, we find
for each $x\in\mathbb{R}^{\nu}$ a set of ``renormalized spherical
averages'' which still approximates ``$\theta\left(x\right)$''
but whose covariance has technically desirable properties. In $\mathsection3.3$
we briefly explain the strategy for treating the spherical averages
when $p>1$.
\subsection{When $p=1$. Concentric Spherical Averages.}
For the rest of this article, when $p=1$, we simply write $\left(H^{p},\Theta^{p},\mathcal{\mathcal{W}}^{p}\right)$
as $\left(H,\Theta,\mathcal{\mathcal{W}}\right)$. It is clear from
(\ref{eq:covariance for (1-Delta)^s concentric}) that the distribution
of the concentric spherical averages $\left\{ \mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right):\, t>0\right\} $
at any given $x\in\mathbb{R}^{\nu}$ is independent of $x$. In fact,
the distribution of the GFF is translation invariant. First we state
a closed formula for the integral in (\ref{eq:covariance for (1-Delta)^s concentric}).
\begin{lem}
\label{lem:covariance C^1(t,s)} Fix any $x\in\mathbb{R}^{\nu}$.
For every $t,s>0$,
\begin{equation}
\mathbb{E}^{\mathcal{W}}\left[\mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right)\mathcal{I}\left(h_{\sigma_{x,s}^{ave}}\right)\right]=\frac{1}{\alpha_{\nu}\left(ts\right)^{\frac{\nu-2}{2}}}I_{\frac{\nu-2}{2}}\left(t\wedge s\right)K_{\frac{\nu-2}{2}}\left(t\vee s\right),\label{eq:concentric cov before renorm}
\end{equation}
where $I_{\frac{\nu-2}{2}}$ and $K_{\frac{\nu-2}{2}}$ are the modified
Bessel functions (with pure imaginary argument) with the index $\frac{\nu-2}{2}$.
\end{lem}
One can use a formula in \cite{BesselFunctions} ($\mathsection$13.53)
to derive (\ref{eq:concentric cov before renorm}) directly. An alternative
proof was provided in the Appendix of \cite{CJ}. So we will omit
the proof of Lemma \ref{lem:covariance C^1(t,s)} and refer to \cite{BesselFunctions}
and \cite{CJ} for details.
By (\ref{eq:concentric cov before renorm}), $\left\{ \mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right):\, t>0\right\} $
is a backward
\footnote{A ``backward'' Markov process is a process which exhibits Markov
property as the parameter ``$t$'' decreases.
} Markov Gaussian process. In fact, (\ref{eq:concentric cov before renorm})
leads to a renormalization of the spherical averages, i.e.,
\[
\bar{\sigma}_{x,t}:=\frac{\left(t/2\right)^{\frac{\nu-2}{2}}}{\Gamma\left(\nu/2\right)\cdot I_{\frac{\nu-2}{2}}\left(t\right)}\cdot\sigma_{x,t}^{ave}.
\]
Denote by $\bar{\theta}_{t}\left(x\right)$ the corresponding Paley-Wiener
integral $\mathcal{I}\left(h_{\bar{\sigma}_{x,t}}\right)\left(\theta\right)$.
Because
\[
\lim_{t\rightarrow0}\frac{\left(t/2\right)^{\frac{\nu-2}{2}}}{\Gamma\left(\nu/2\right)\cdot I_{\frac{\nu-2}{2}}\left(t\right)}=1,
\]
$\bar{\theta}_{t}\left(x\right)$ still is a legitimate approximation
of ``$\theta\left(x\right)$'' as $t\searrow0$. It follows from
(\ref{eq:concentric cov before renorm}) that the covariance of the
Gaussian process $\left\{ \bar{\theta}_{t}\left(x\right):\, t>0\right\} $
is given by, for $0<s\leq t$,
\begin{equation}
\mathbb{E}^{\mathcal{W}}\left[\bar{\theta}_{t}\left(x\right)\bar{\theta}_{s}\left(x\right)\right]=\frac{\alpha_{\nu}}{\left(2\pi\right)^{\nu}}\cdot\frac{K_{\frac{\nu-2}{2}}\left(t\right)}{I_{\frac{\nu-2}{2}}\left(t\right)}=:G\left(t\right).\label{eq:concentric cov renorm}
\end{equation}
The function $G$ defined above is positive and decreasing on $\left(0,\infty\right)$,
and when $t$ is sufficiently small, $G\left(t\right)=\mathcal{O}\left(t^{2-\nu}\right)$,
which reflects the fact that the dim-$\nu$ order-1 GFF is polynomially
correlated with degree $\nu-2$.
\begin{rem}
Since we are only concerned about $\bar{\theta}_{t}\left(x\right)$
when $t$ is small, without loss of generality, we assume that $t\in(0,1]$.
As a consequence of (\ref{eq:concentric cov renorm}), $\left\{ \bar{\theta}_{t}\left(x\right):\, t\in(0,1]\right\} $
is a Gaussian process with independent increment (in the direction
of $t$ decreasing), which, up to a time change, has the same distribution
as a Brownian motion. To be specific, if we define a ``clock'' by
\[
\tau:=G\left(t\right)-G\left(1\right)\mbox{ for }t\in(0,1],
\]
then
\[
\left\{ B_{\tau}:=\bar{\theta}_{G^{-1}\left(\tau+G\left(1\right)\right)}\left(x\right)-\bar{\theta}_{1}\left(x\right):\:\tau\geq0\right\}
\]
has the same distribution as a standard Brownian motion.
\end{rem}
Based on the preceding observations, results about the Brownian motion
can be transported directly to $\left\{ \bar{\theta}_{t}\left(x\right):\, t\in(0,1]\right\} $,
and the behavior of $\bar{\theta}_{t}\left(x\right)$ when $t$ is
small resembles that of the Brownian motion $B_{\tau}$ when $\tau$
is large. For example, by the law of the iterated logarithm,
\begin{equation}
\limsup_{t\searrow0}\frac{\left|\bar{\theta}_{t}\left(x\right)\right|}{\sqrt{2G\left(t\right)\cdot\ln\ln G\left(t\right)}}=1\mbox{ a.s.}.\label{eq:LIL for concentric}
\end{equation}
\subsection{When $p=1$. Non-concentric Spherical Averages.}
We now move on to the family of non-concentric spherical averages.
Again, instead of the regular spherical averages, we adopt the renormalized
spherical averages introduced in $\mathsection3.1$. Consider the
two-parameter Gaussian family
\[
\left\{ \bar{\theta}_{t}\left(x\right):\, x\in\mathbb{R}^{\nu},\, t\in(0,1]\right\} ,
\]
and denote by $\mbox{Cov}\left(x,t;\, y,s\right)$ the covariance
of $\bar{\theta}_{t}\left(x\right)$ and $\bar{\theta}_{s}\left(y\right)$
for $x,y\in\mathbb{R}^{\nu}$ and $t,s\in(0,1]$. One can compute
$\mbox{Cov}\left(x,t;\, y,s\right)$ using (\ref{eq:covariance for (1-Delta)^s-1})
and the renormalization. In fact, under certain circumstances, it
is possible to obtain explicit formulas for $\mbox{Cov}\left(x,t;\, y,s\right)$.
\begin{lem}
\label{lem:asymptotics of the cov functions}Let $x,y\in\mathbb{R}^{\nu}$and
$t,s\in(0,1]$.\\
(i) If $\left|x-y\right|\geq t+s$, i.e., if $B\left(x,t\right)\cap B\left(y,s\right)=\emptyset$,
\begin{equation}
\mbox{Cov}\left(x,t;\, y,s\right)=\left(2\pi\right)^{-\nu/2}\cdot\frac{K_{\frac{\nu-2}{2}}\left(\left|x-y\right|\right)}{\left|x-y\right|^{\frac{\nu-2}{2}}}=:C_{disj}\left(\left|x-y\right|\right),\label{eq:cov non-concentric non-overlapping}
\end{equation}
In particular, $C_{disj}\left(\left|x-y\right|\right)=\mathcal{O}\left(\left|x-y\right|^{2-\nu}\right)$
when $\left|x-y\right|$ is small.\\
(ii) If $t\geq\left|x-y\right|+s$, i.e., if $B\left(x,t\right)\supset B\left(y,s\right)$,
\begin{equation}
\mbox{Cov}\left(x,t;\, y,s\right)=\left(2\pi\right)^{-\nu/2}\cdot\frac{I_{\frac{\nu-2}{2}}\left(\left|x-y\right|\right)}{\left|x-y\right|^{\frac{\nu-2}{2}}}\cdot\frac{K_{\frac{\nu-2}{2}}\left(t\right)}{I_{\frac{\nu-2}{2}}\left(t\right)}=:C_{incl}\left(t,\left|x-y\right|\right),\label{eq:cov non-concentric inclusion}
\end{equation}
In particular, $C_{incl}\left(t,\left|x-y\right|\right)=\mathcal{O}\left(t^{2-\nu}\right)$
when $t$ is small.
\end{lem}
Again, by combining (\ref{eq:covariance for (1-Delta)^s-1}) with
a formula in \cite{BesselFunctions} ($\mathsection$13.53\foreignlanguage{american}{,
pp 429-430}), one can easily verify these results. An alternative
derivation was also provided in the Appendix of \cite{CJ}. We omit
the proofs and refer to \cite{BesselFunctions} and \cite{CJ} for
details. We remark that (\ref{eq:cov non-concentric non-overlapping})
and (\ref{eq:cov non-concentric inclusion}) demonstrate the advantage
of this particular renormalization of the spherical averages. For
the family of the renormalized spherical averages, under the hypothesis
(i) or (ii) in Lemma \ref{lem:asymptotics of the cov functions},
small radius (radii) does not affect the covariance, which favors
convergence as radius (radii) tends to zero.
However, one still needs to treat the renormalized spherical averages
in the most general case. To this end, we introduce the intrinsic
metric $d$ associated with the Gaussian family $\left\{ \bar{\theta}_{t}\left(x\right):\, x\in\mathbb{R}^{\nu},\, t\in(0,1]\right\} $
where
\[
d\left(x,t;\, y,s\right):=\left(\mathbb{E}^{\mathcal{W}}\left[\left|\bar{\theta}_{t}\left(x\right)-\bar{\theta}_{s}\left(y\right)\right|^{2}\right]\right)^{\frac{1}{2}}
\]
for $x,y\in\mathbb{R}^{\nu}$ and $t,s\in(0,1]$. Assuming $0<s\leq t\leq1$,
the triangle inequality implies that
\begin{equation}
d\left(x,t;\, y,s\right)\leq d\left(x,t;\, y,t\right)+\sqrt{G\left(s\right)-G\left(t\right)},\label{eq:triangle for d(x,t;y,s)}
\end{equation}
so to work with $d\left(x,t;\, y,s\right)$, we need to study $d\left(x,t;\, y,t\right)$,
i.e., the intrinsic metric associated with the family $\left\{ \bar{\theta}_{t}\left(x\right):\, x\in\mathbb{R}^{\nu}\right\} $
with $t\in(0,1]$ fixed.
\begin{lem}
\label{lem:estimate for d^2_t non-concentric} There exists a constant
\footnote{Throughout the article, $C_{\nu}$ denotes a constant that only depends
on the dimension, and $C_{\nu}$'s value may vary from line to line.
} $C_{\nu}>0$ such that for every $t\in(0,1]$ and every $x,y\in\mathbb{R}^{\nu}$,
\begin{equation}
d^{2}\left(x,t;\, y,t\right)\leq C_{\nu}\cdot t^{2-\nu}\left(\sqrt{\frac{\left|x-y\right|}{t}}\wedge1\right).\label{eq:estimate intrinsic metric non-concentric}
\end{equation}
\end{lem}
\begin{proof}
Based on (\ref{eq:cov non-concentric non-overlapping}), when $\left|x-y\right|\geq2t$,
\[
d^{2}\left(x,t;\, y,t\right)=2G\left(t\right)-2C_{disj}\left(\left|x-y\right|\right)
\]
which immediately implies (\ref{eq:estimate intrinsic metric non-concentric}).
More generally, using (\ref{eq:covariance for (1-Delta)^s-1}) and
(\ref{eq:covariance for (1-Delta)^s concentric}), we can rewrite
$d^{2}\left(x,t;\, y,t\right)$ as
\[
\begin{split}d^{2}\left(x,t;\, y,t\right) & =\mathbb{E}^{\mathcal{W}}\left[\left(\bar{\theta}_{t}\left(x\right)-\bar{\theta}_{t}\left(y\right)\right)^{2}\right]\\
& =\frac{2\alpha_{\nu}}{\left(2\pi\right)^{\nu}I_{\frac{\nu-2}{2}}^{2}\left(t\right)}\int_{0}^{\infty}\frac{\tau}{1+\tau^{2}}J_{\frac{\nu-2}{2}}^{2}\left(t\tau\right)\Psi\left(\tau\left|x-y\right|\right)d\tau
\end{split}
\]
where $\Psi$ is the function given by
\[
\forall w\in\left(0,\infty\right),\quad\Psi\left(w\right):=1-\frac{\left(2\pi\right)^{\nu/2}}{\alpha_{\nu}}w^{\frac{2-\nu}{2}}J_{\frac{\nu-2}{2}}\left(w\right).
\]
It follows from the properties of $J_{\frac{\nu-2}{2}}$ that $\Psi$
is analytic and
\[
\Psi\left(w\right)=\Gamma\left(\nu/2\right)\sum_{m=1}^{\infty}\frac{\left(-1\right)^{m-1}2^{-2m}}{m!\Gamma\left(\frac{\nu}{2}+m\right)}\cdot w^{2m}.
\]
Clearly, there exists $C_{\nu}>0$ such that $\left|\Psi\left(w\right)\right|\leq C_{\nu}\sqrt{w}$
for all $w\in[0,\infty)$. Therefore,
\[
d^{2}\left(x,t;\, y,t\right)\leq C_{\nu}\cdot t^{2-\nu}\sqrt{\left|x-y\right|}\int_{0}^{\infty}\frac{\tau^{3/2}}{1+\tau^{2}}J_{\frac{\nu-2}{2}}^{2}\left(t\tau\right)d\tau,
\]
and the integral on the right, after a change of variable $u=t\tau$,
becomes
\[
t^{-1/2}\int_{0}^{\infty}\frac{u^{3/2}}{t^{2}+u^{2}}J_{\frac{\nu-2}{2}}^{2}\left(u\right)du\leq t^{-1/2}\int_{0}^{\infty}u^{-1/2}J_{\frac{\nu-2}{2}}^{2}\left(u\right)du=C_{\nu}\cdot t^{-1/2}
\]
which leads to the desired inequality.
\end{proof}
Based on (\ref{eq:triangle for d(x,t;y,s)}) and (\ref{eq:estimate intrinsic metric non-concentric}),
it follows from the Kolmogorov continuity theorem that there exists
a continuous modification of $\left\{ \bar{\theta}_{t}\left(x\right):\, x\in\mathbb{R}^{\nu},\, t\in(0,1]\right\} $.
From now on, we assume that $\left\{ \bar{\theta}_{t}\left(x\right):\, x\in\mathbb{R}^{\nu},\, t\in(0,1]\right\} $
is such a modification. In other words, we assume that for every $\theta\in\Theta$,
$\left(x,t\right)\in\mathbb{R}^{\nu}\times(0,1]\mapsto\bar{\theta}_{t}\left(x\right)\in\mathbb{R}$
is continuous.
Since the distribution of the GFF is translation invariant and the
notion of ``thick point'' only concerns local properties of the
GFF, without loss of generality, we may restrict the GFF to $\overline{S\left(O,1\right)}$
the closed cube centered at the origin with side length $2$ under
the Euclidean metric
\footnote{Similarly, for $x\in\overline{S\left(O,1\right)}$ and $s>0$, $\overline{S\left(x,s\right)}$
is the Euclidean closed cube centered at $x$ with side length $2s$.
}. We will apply the metric entropy method (\cite{AT,Talagrand,Dudley})
to study the boundedness and the continuity of the family $\left\{ \bar{\theta}_{t}\left(x\right):\, x\in\overline{S\left(O,1\right)},\, t\in(0,1]\right\} $.
To set this up, we need to introduce some more notation. For every
compact subset $A\subseteq\overline{S\left(O,1\right)}\times(0,1]$,
let $\mbox{diam}_{d}\left(A\right)$ be the diameter of $A$ under
the metric $d$. $A$ is also compact under $d$, so $A$ can be finitely
covered under $d$. For $\epsilon>0$ and $\mathbf{x}\in\overline{S\left(O,1\right)}\times(0,1]$,
let $B_{d}\left(\mathbf{x},\epsilon\right)$ be the open ball centered
at $\mathbf{x}$ with radius $\epsilon$ under $d$, and $N\left(\epsilon,A\right)$
the smallest number of such balls $B_{d}\left(\mathbf{x},\epsilon\right)$
required to cover $A$. Then $N$ is the metric entropy function with
respect to $d$. Applying the standard entropy methods, we get the
following results.
\begin{lem}
\label{lem:expectation of max non-concentric}There exists a constant
$C_{\nu}>0$ such that for every $t,s\in(0,1]$ and every $x\in\overline{S\left(O,1\right)}$,
\begin{equation}
\mathbb{E}^{\mathcal{W}}\left[\sup_{y\in\overline{S\left(x,s\right)}}\,\left|\bar{\theta}_{t}\left(y\right)\right|\right]\leq C_{\nu}\cdot t^{1-\nu/2}\left(\frac{s^{1/4}}{t^{1/4}}\wedge\sqrt{\ln\left(\frac{s}{t}\right)}\right).\label{eq:expectation of sup spatial}
\end{equation}
\end{lem}
\begin{proof}
By (\ref{eq:estimate intrinsic metric non-concentric}), there exists
$C_{\nu}>0$ such that for every $y,\, y^{\prime}\in\overline{S\left(x,s\right)}$,
\[
d\left(y,t;\, y^{\prime},t\right)\leq C_{\nu}\cdot t^{1-\nu/2}\left(\frac{\left|y-y^{\prime}\right|^{1/4}}{t^{1/4}}\wedge1\right).
\]
First we assume that $2\sqrt{\nu}s\leq t$. For any $\epsilon>0$,
$d\left(y,t;\, y^{\prime},t\right)\leq\epsilon$ whenever $\left|y-y^{\prime}\right|\leq C_{\nu}^{-1}\cdot\epsilon^{4}t^{2\nu-3}$.
Therefore, for a possibly larger $C_{\nu}$,
\[
N\left(\epsilon,\overline{S\left(x,s\right)}\times\left\{ t\right\} \right)\leq C_{\nu}\left(s\epsilon^{-4}t^{3-2\nu}\right)^{\nu}.
\]
Besides, (\ref{eq:estimate intrinsic metric non-concentric}) implies
that
\[
\mbox{diam}_{d}\left(\overline{S\left(x,s\right)}\times\left\{ t\right\} \right)\leq C_{\nu}\cdot s^{1/4}t^{(3-2\nu)/4}.
\]
By the standard results on entropy (\cite{AT}, Theorem 1.3.3), there
exists a universal constant $K>0$ (later $K$ will be absorbed by
$C_{\nu}$) such that
\[
\begin{split}\mathbb{E}^{\mathcal{W}}\left[\sup_{y\in\overline{S\left(x,s\right)}}\left|\bar{\theta}_{t}\left(y\right)\right|\right] & \leq K\int_{0}^{\mbox{diam}_{d}\left(\overline{S\left(x,s\right)}\times\left\{ t\right\} \right)/2}\,\sqrt{\ln N\left(\epsilon,\overline{S\left(x,s\right)}\times\left\{ t\right\} \right)}d\epsilon\\
& \leq4K\nu\int_{0}^{C_{\nu}\cdot s^{1/4}t^{(3-2\nu)/4}}\sqrt{\ln\left(C_{\nu}\cdot s^{1/4}t^{(3-2\nu)/4}\epsilon^{-1}\right)}d\epsilon.\\
& \leq C_{\nu}\cdot s^{1/4}t^{(3-2\nu)/4}\int_{0}^{\infty}e^{-u^{2}}u^{2}du,
\end{split}
\]
which leads to (\ref{eq:expectation of sup spatial}).
Next, if $2\sqrt{\nu}s>t$, then $\mbox{diam}_{d}\left(\overline{S\left(x,s\right)}\times\left\{ t\right\} \right)\leq C_{\nu}\cdot t^{1-\nu/2}$.
Following exactly the same arguments as earlier, we arrive at
\[
\begin{split}\mathbb{E}^{\mathcal{W}}\left[\sup_{y\in\overline{S\left(x,s\right)}}\left|\bar{\theta}_{t}\left(y\right)\right|\right] & \leq C_{\nu}\cdot s^{1/4}t^{(3-2\nu)/4}\int_{\sqrt{\ln\left(C_{\nu}\cdot s^{1/4}t^{-1/4}\right)}}^{\infty}\, e^{-u^{2}}u^{2}du.\end{split}
\]
Combining this with the fact that
\[
\int_{a}^{\infty}e^{-u^{2}}u^{2}du=\mathcal{O}\left(ae^{-a^{2}}\right)\mbox{ for sufficiently large }a>0,
\]
we arrive at the desired conclusion.
\end{proof}
\subsection{When $p\geq2$.}
As shown in Lemma \ref{lem:covariance C^1(t,s)}, the concentric spherical
averages of the dim-$\nu$ order-1 GFF is a backward Markov Gaussian
process, which enables the renormalization that transforms it into
a time-changed Brownian motion. However, when $\left(I-\Delta\right)$
is replaced by $\left(I-\Delta\right)^{p}$ for $p\geq2$, spherical
averages of the corresponding GFF no longer possess such properties.
In particular, for the dim-$\nu$ order-$p$ GFF with $p\geq2$, for
any fixed $x\in\mathbb{R}^{\nu}$, the concentric spherical average
process $\left\{ \mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right):\, t\in(0,1]\right\} $
fails to be backward Markovian. Nonetheless, it is still possible
to explicitly compute the covariance of this process, the result of
which shows that, although not being an exact one, the process is
``close'' to becoming a Markov process. To make this rigorous, we
adopt the same method as the one presented in \cite{CJ}. For simplicity,
we only outline the idea here and refer to \cite{CJ} for more details.
The derivations of the covariance of the spherical averages, as shown
in $\mathsection3.1$ and $\mathsection3.2$, can be generalized to
the operator $m^{2}-\Delta$ for any $m>0$. To be specific, if the
operator $I-\Delta$ is replaced by $m^{2}-\Delta$ in constructing
the dim-$\nu$ order-1 GFF, then for every $x,y\in\mathbb{R}^{\nu}$
and every $t,s\in(0,1]$,
\[
\begin{split} & \mathbb{E}^{\mathcal{W}}\left[\mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right)\mathcal{I}\left(h_{\sigma_{y,s}^{ave}}\right)\right]\\
& \qquad\qquad=\frac{\left(2\pi\right)^{\nu/2}}{\alpha_{\nu}^{2}\left(ts\left|x-y\right|\right)^{\frac{\nu-2}{2}}}\int_{0}^{\infty}\frac{\tau^{2-\frac{\nu}{2}}J_{\frac{\nu-2}{2}}\left(t\tau\right)J_{\frac{\nu-2}{2}}\left(s\tau\right)J_{\frac{\nu-2}{2}}\left(\left|x-y\right|\tau\right)}{m^{2}+\tau^{2}}d\tau.
\end{split}
\]
Comparing this expression with the general formula (\ref{eq:covariance for (1-Delta)^s-1}),
one can easily verify that, for the dim-$\nu$ order-$p$ GFF, $\mathbb{E}^{\mathcal{W}^{p}}\left[\mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right)\mathcal{I}\left(h_{\sigma_{y,s}^{ave}}\right)\right]$
is equal to
\[
\begin{split}\frac{\left(2\pi\right)^{\nu/2}/(p-1)!}{\alpha_{\nu}^{2}\left(ts\left|x-y\right|\right)^{\frac{\nu-2}{2}}}\left(\frac{-1}{2m}\frac{d}{dm}\right)_{m=1}^{p-1}\left[\int_{0}^{\infty}\frac{\tau^{2-\frac{\nu}{2}}J_{\frac{\nu-2}{2}}\left(t\tau\right)J_{\frac{\nu-2}{2}}\left(s\tau\right)J_{\frac{\nu-2}{2}}\left(\left|x-y\right|\tau\right)}{m^{2}+\tau^{2}}d\tau\right].\end{split}
\]
In particular, when $x=y$ and $0<s\leq t\leq1$,
\[
\begin{split} & \mathbb{E}^{\mathcal{W}^{p}}\left[\mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right)\mathcal{I}\left(h_{\sigma_{x,s}^{ave}}\right)\right]\\
& \qquad\qquad=\frac{1}{\alpha_{\nu}\left(ts\right)^{\frac{\nu-2}{2}}\left(p-1\right)!}\left(\frac{-1}{2m}\frac{d}{dm}\right)_{m=1}^{p-1}\left[K_{\frac{\nu-2}{2}}\left(mt\right)I_{\frac{\nu-2}{2}}\left(ms\right)\right],
\end{split}
\]
the RHS of which obviously takes the form of
\begin{equation}
\sum_{k=1}^{p}a_{k}\left(t\right)b_{k}\left(s\right)\label{eq:cov for order-p GFF}
\end{equation}
where functions $a_{k}$ only depend on $t$ and functions $b_{k}$
only depend on $s$ for each $k=1,...,p$. A covariance of the form
of (\ref{eq:cov for order-p GFF}) does indicate that the Gaussian
process $\left\{ \mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right):t\in(0,1]\right\} $
is not backward Markovian. Heuristically speaking, at any given radius,
the spherical average alone ``provides'' too little information
for one to predict how the process will evolve for smaller radii.
To restore the Markov property, we need to ``collect'' more information
\footnote{This idea was originally proposed by D. Stroock during a discussion
with the author.
} about the GFF over each sphere.
To this end, recall the remark at the end of $\mathsection2$ that
the higher the order of the operator is, the more regular the corresponding
GFF becomes. In particular, for $p\ge2$, the $l-$th derivative of
the spherical average measure in radius, i.e., $\left(d/dt\right)^{l}\sigma_{x,t}^{ave}$
in the sense of tempered distribution, also gives rise to a Paley-Wiener
integral $\mathcal{I}\left(h_{\left(d/dt\right)^{l}\sigma_{x,t}^{ave}}\right)$
for $l=1,\cdots,p-1$. It turns out that, with $x\in\mathbb{R}^{\nu}$
fixed, if $\mathbf{V}_{x,t}$, for $t\in(0,1]$, is the $\mathbb{R}^{p}-$valued
random variable on $\Theta^{p}$ given by
\[
\mathbf{V}_{x,t}:=\left(\mathcal{I}\left(h_{\sigma_{x,t}^{ave}}\right),\mathcal{I}\left(h_{\left(d/dt\right)\sigma_{x,t}^{ave}}\right),\ldots,\mathcal{I}\left(h_{\left(d/dt\right)^{p-1}\sigma_{x,t}^{ave}}\right)\right),
\]
then the process $\left\{ \mathbf{V}_{x,t}:\, t\in(0,1]\right\} $
is a $\mathbb{R}^{p}-$valued Gaussian (backward) Markov process,
and for $0<s\leq t\leq1$,
\begin{equation}
\mathbb{E}^{\mathcal{W}^{p}}\left[\left(\mathbf{V}_{x,t}\right)^{\top}\left(\mathbf{V}_{x,s}\right)\right]=\mathbf{A}\left(t\right)\cdot\mathbf{B}\left(s\right),\label{eq:matrix covariance}
\end{equation}
where ``$\cdot$'' here refers to matrix multiplication, $\mathbf{A}\left(t\right)$
and $\mathbf{B}\left(s\right)$ are two $p\times p$ matrices depending
only on $t$ and, respectively, only on $s$, and for $1\leq i,j\leq p$,
\[
\left(\mathbf{A}\left(t\right)\right)_{ij}=\left(\frac{d}{dt}\right)^{i-1}a_{j}\left(t\right)\mbox{ and }\left(\mathbf{B}\left(s\right)\right)_{ij}=\left(\frac{d}{ds}\right)^{j-1}b_{i}\left(s\right),
\]
where $a_{j}$'s and $b_{i}$'s are as in (\ref{eq:cov for order-p GFF}).
In other words, when collecting simultaneously the spherical average
and its first $(p-1)$st order derivatives, the Markov property is
restored by this vector-valued process. Furthermore, the matrix $\mathbf{B}\left(s\right)$
is non-degenerate when $s$ is sufficiently small, so (\ref{eq:matrix covariance})
also leads to a renormalization which is $\mathbf{U}_{x,t}:=\mathbf{V}_{x,t}\cdot\mathbf{B}^{-1}\left(t\right)$.
It follows from (\ref{eq:matrix covariance}) that, for $0<s\leq t\leq1$,
\[
\mathbb{E}^{\mathcal{W}^{p}}\left[\left(\mathbf{U}_{x,t}\right)^{\top}\left(\mathbf{U}_{x,s}\right)\right]=\mathbf{B}^{-1}\left(t\right)\cdot\mathbf{A}\left(t\right).
\]
The renormalized process $\left\{ \mathbf{U}_{x,t}:\, t\in(0,1]\right\} $
has independent increment (in the direction of $t$ decreasing). Moreover,
it is possible to find a constant vector $\xi\in\mathbb{R}^{p}$ such
that, as $t\searrow0$,
\[
\left(\sigma_{x,t}^{ave},\,(d/dt)\sigma_{x,t}^{ave},\cdots,\,(d/dt)^{p-1}\sigma_{x,t}^{ave}\right)\cdot\mathbf{B}^{-1}\left(t\right)\cdot\xi^{\top}\rightarrow\delta_{x}
\]
in the sense of tempered distribution; this is because the coefficient
of $(d/dt)^{l}\sigma_{x,t}^{\mbox{ave}}$, as a function of $t$,
decays sufficiently fast as $t\searrow0$ for each $l=1,\cdots,p-1$.
Therefore, $\bar{\theta}_{t}\left(x\right):=\mathbf{U}_{x,t}\left(\theta\right)\cdot\xi^{\top}$
still is a legitimate approximation of ``$\theta\left(x\right)$''
when $t$ is small. In other words, although the derivatives of the
spherical averages are introduced to recover the Markov property,
these derivatives do not affect the approximation of point-wise values
of the GFF. Moreover, the two-parameter family $\left\{ \bar{\theta}_{t}\left(x\right):\, x\in\mathbb{R}^{\nu},\, t\in(0,1]\right\} $
possesses the same properties as those shown in $\mathsection3.1$
and $\mathsection3.2$.
Governed by the Green's function of $\left(I-\Delta\right)^{p}$ on
$\mathbb{R}^{\nu}$, the dim-$\nu$ order-$p$ GFF is polynomially
correlated with the degree of the polynomial being $\nu-2p$. In fact,
later discussions in this article, i.e., the study of thick point,
only requires the existence of an approximation $\bar{\theta}_{t}\left(x\right)$
such as the one obtained above. Therefore, it is sufficient to assume
$p=1$ and investigate the thick point problem for the dim-$\nu$
order-1 GFF with arbitrary $\nu>2$.
\section{Thick Points of Gaussian Free Fields}
To study the thick points of the dim-$\nu$ order-$1$ GFF $\left(H,\Theta,\mathcal{W}\right)$,
the first problem we face is to determine a proper definition for
the notion of ``thick point''. On one hand, inspired by (\ref{eq:2D thick point})
the thick point definition of log-correlated GFFs, we want to investigate
the points $x\in\overline{S\left(O,1\right)}$ where the rate of $\bar{\theta}_{t}\left(x\right)$
``blowing up'' as $t\searrow0$ is comparable with certain function
in $t$ that is singular at $t=0$. On the other hand, compared with
the log-correlated GFFs, a polynomial-correlated GFF has the properties
that, firstly, the point-wise distribution has a larger variance which
makes it harder to achieve an ``unusually'' large value required
by a limit such as the one in (\ref{eq:2D thick point}); secondly,
the near-neighbor correlation is stronger, which makes thick points,
defined in any reasonable sense, tend to stay close to each other,
and hence as a whole the set of thick points looks more sparse. Taking
into account of these considerations, we adopt a thick point definition
that is different from (\ref{eq:2D thick point}) but proven to be
more suitable for polynomial-correlated GFFs.
\begin{defn}
Let $\gamma\geq0$. For each $\theta\in\Theta$, $x\in\overline{S\left(O,1\right)}$
is a $\gamma-$\emph{thick point }of $\theta$ if
\begin{equation}
\limsup_{t\searrow0}\,\frac{\bar{\theta}_{t}\left(x\right)}{\sqrt{-G\left(t\right)\ln t}}\geq\sqrt{2\nu\gamma}\label{eq:def of thick point}
\end{equation}
where $G\left(t\right)=\mathbb{E}^{\mathcal{W}}\left[\left(\bar{\theta}_{t}\left(x\right)\right)^{2}\right]$.
\end{defn}
We denote by $T_{\theta}^{\gamma}$ the set of all the $\gamma-$thick
points of $\theta$. Since $\bar{\theta}_{t}\left(x\right)$ is assumed
to be continuous in $\left(x,t\right)\in\overline{S\left(O,1\right)}\times(0,1]$,
$T_{\theta}^{\gamma}$ is a measurable subset of $\overline{S\left(O,1\right)}$.
Moreover, viewing from the perspective of (\ref{eq:LIL for concentric}),
if $\gamma>0$, (\ref{eq:def of thick point}) requires $\bar{\theta}_{t}\left(x\right)$
to grow, as $t\searrow0$, no slower than a unusually large function
in $t$, at least along a sequence in $t$. In this sense, the value
of $\theta$ is unusually large at a $\gamma-$thick point so long
as $\gamma>0$. Compared with (\ref{eq:2D thick point}) , the requirement
in (\ref{eq:def of thick point}) is easier to achieve, which contributes
positively to $T_{\theta}^{\gamma}$ having ``detectable'' mass.
In fact, such a deviation from (\ref{eq:2D thick point}) (i.e., replacing
in the definition ``$\lim_{t\searrow0}$'' by ``$\limsup_{t\searrow0}$''
and ``$=$'' by ``$\geq$'') is necessary, as we will see later
in $\mathsection4.2$.
Our main goal is to determine the Hausdorff dimension of $T_{\theta}^{\gamma}$,
denoted by $\dim_{\mathcal{H}}\left(T_{\theta}^{\gamma}\right)$.
We state the main result below.
\begin{thm}
\label{thm:hausdorff dimension of thick point set}For $\gamma\in\left[0,1\right]$,
\[
\dim_{\mathcal{H}}\left(T_{\theta}^{\gamma}\right)=\nu\left(1-\gamma\right)\mbox{ a.s.}.
\]
Moreover, for $\mathcal{W}-$a.e. $\theta\in\Theta$, $x\in T_{\theta}^{0}$
for Lebesgue-a.e. $x\in\overline{S\left(O,1\right)}$.
On the other hand, for $\gamma>1$, $T_{\theta}^{\gamma}=\emptyset$
a.s..
\end{thm}
The theorem is proven by establishing the upper bound and the lower
bound separately. More specifically, we prove in $\mathsection4.1$
\begin{equation}
the\; upper\; bound:\quad\dim_{\mathcal{H}}\left(T_{\theta}^{\gamma}\right)\leq\nu\left(1-\gamma\right)\mbox{ a.s.}.\label{eq:upper bound}
\end{equation}
Then we devote the entire $\mathsection5$ to proving
\begin{equation}
the\; lower\; bound:\quad\dim_{\mathcal{H}}\left(T_{\theta}^{\gamma}\right)\geq\nu\left(1-\gamma\right)\mbox{ a.s.}.\label{eq:lower bound}
\end{equation}
As mentioned earlier, the polynomially singular covariance of the
GFF makes thick points rare and hence hard to detect, as a consequence
of which, the upper bound on the Hausdorff dimension of $T_{\theta}^{\gamma}$
is readily obtained, but deriving the lower bound is more complicated.
\subsection{Proof of the Upper Bound }
The derivation of the upper bound (\ref{eq:upper bound}) follows
an adaptation of the procedure used in \cite{HMP}. To simplify the
notation, we write
\[
D\left(t\right):=\sqrt{-G\left(t\right)\ln t}\;\mbox{ for }t\in(0,1].
\]
\begin{lem}
\label{lem:extreme value of two parameter family}There exists a constant
$C_{\nu}>0$ such that for every $x\in\overline{S\left(O,1\right)}$
and $n\geq1$,
\begin{equation}
\mathbb{E}^{\mathcal{W}}\left[\sup_{\left(y,t\right)\in\overline{S\left(x,2^{-n}\right)}\times\left[2^{-n},\,2^{-n+1}\right]}\,\frac{\bar{\theta}_{t}\left(y\right)}{D\left(t\right)}\right]\leq C_{\nu}\cdot\frac{1}{\sqrt{n}}.\label{eq:expectation of max of two parameter family}
\end{equation}
\end{lem}
\begin{proof}
Similarly as in Lemma \ref{lem:expectation of max non-concentric},
we will prove the desired result by the metric entropy method. Let
$n\geq1$ be fixed. For every $\epsilon>0$, set
\[
\tau_{n,\epsilon}:=\frac{1}{2}\left[\left(\frac{\epsilon^{2}}{9}\cdot C_{\nu}^{-1}\cdot2^{-\left(n+1\right)\left(\nu-3/2\right)}\right)^{2}\wedge2^{-n-1}\right]
\]
where $C_{\nu}$, for the moment, is the same constant as in (\ref{eq:estimate intrinsic metric non-concentric}).
Let
\[
\left\{ B\left(y_{l},\tau_{n,\epsilon}\right):\, l=1,\cdots,L_{\epsilon}\right\}
\]
be a finite covering of $\overline{S\left(x,2^{-n}\right)}$ where
$y_{l}\in S\left(x,2^{-n}\right)$ and $L_{\epsilon}$ is the smallest
number of balls $B\left(y_{l},\tau_{n,\epsilon}\right)$ needed to
cover $\overline{S\left(x,2^{-n}\right)}$ and hence $L_{\epsilon}=\mathcal{O}\left(2^{-n\nu}/\tau_{n,\epsilon}^{\nu}\right)$.
By (\ref{eq:estimate intrinsic metric non-concentric}), the choice
of $\tau_{n,\epsilon}$ is such that the diameter of each ball $B\left(y_{l},\tau_{n,\epsilon}\right)$
under the metric $d\left(\cdot,2^{-n-1};\,*,2^{-n-1}\right)$ is no
greater than $\epsilon/3$. In fact, for any $t\geq2^{-n-1}$, (\ref{eq:estimate intrinsic metric non-concentric})
implies that, if $\left|y-y^{\prime}\right|\leq2\tau_{n,\epsilon}$,
then
\[
d^{2}\left(y,t;\, y^{\prime},t\right)\leq C_{\nu}t^{3/2-\nu}\sqrt{2\tau_{n,\epsilon}}\leq\epsilon^{2}/9.
\]
Next, take $\tau_{0}:=2^{-n-1}$ and define $\tau_{m}$ inductively
such that
\[
G\left(\tau_{m-1}\right)-G\left(\tau_{m}\right)=\epsilon^{2}/9
\]
for $m=1,\cdots,M_{\epsilon}$, where $M_{\epsilon}$ is the smallest
integer such that $\tau_{M_{\epsilon}}\geq2^{-n+2}$ and hence
\[
M_{\epsilon}=\mathcal{O}\left(G\left(2^{-n}\right)\right)/\epsilon^{2}.
\]
Consider the covering of $\overline{S\left(x,2^{-n}\right)}\times\left[2^{-n},2^{-n+1}\right]$
that consists of the cylinders
\[
\left\{ B\left(y_{l},\tau_{n,\epsilon}\right)\times(\tau_{m-1},\tau_{m}):\, l=1,\cdots,L_{\epsilon},\, m=1,\cdots,M_{\epsilon}\right\} .
\]
Any pair of points $\left(\left(y,t\right),\,\left(w,s\right)\right)$
that lies in one of the cylinders above, e.g., $B\left(y_{l},\tau_{n,\epsilon}\right)\times(\tau_{m-1},\tau_{m})$,
satisfies that
\[
\begin{split}d\left(y,t;\, w,s\right) & \leq d\left(y,t;\, y,\tau_{m}\right)+d\left(y,\tau_{m};\, w,\tau_{m}\right)+d\left(w,\tau_{m};\, w,s\right)\\
& \leq\epsilon/3+\epsilon/3+\epsilon/3=\epsilon.
\end{split}
\]
This implies that
\[
N\left(\epsilon,\overline{S\left(x,2^{-n}\right)}\times\left[2^{-n},2^{-n+1}\right]\right)\leq L_{\epsilon}\cdot M_{\epsilon}
\]
where $N$ is the entropy function defined before Lemma \ref{lem:expectation of max non-concentric}.
Moreover, the diameter of $\overline{S\left(x,2^{-n}\right)}\times\left[2^{-n},2^{-n+1}\right]$
under the metric $d$ is bounded by $2\sqrt{G\left(2^{-n}\right)}$.
Therefore, there is a universal constant $K>0$ (later $K$ is absorbed
into $C_{\nu}$) such that
\[
\begin{split}\mathbb{E}^{\mathcal{W}}\left[\sup_{\left(y,t\right)\in\overline{S\left(x,2^{-n}\right)}\times\left[2^{-n},\,2^{-n+1}\right]}\,\bar{\theta}_{t}\left(y\right)\right] & \leq K\int_{0}^{\sqrt{G\left(2^{-n}\right)}}\sqrt{\ln\left(L_{\epsilon}\cdot M_{\epsilon}\right)}d\epsilon\\
& \leq K\int_{0}^{\sqrt{G\left(2^{-n}\right)}}\left(\sqrt{\ln L_{\epsilon}}+\sqrt{\ln M_{\epsilon}}\right)d\epsilon\\
& \leq C_{\nu}\sqrt{G\left(2^{-n}\right)}.
\end{split}
\]
(\ref{eq:expectation of max of two parameter family}) follows from
dividing both sides of the inequality above by $D\left(2^{-n+1}\right)$.
\end{proof}
Now we can get to the proof of the upper bound (\ref{eq:upper bound}).\\
\noindent \emph{Proof of the upper bound:} When $\gamma=0$, (\ref{eq:upper bound})
is trivially satisfied. Without loss of generality, we assume that
$\gamma\in(0,1]$ for the rest of the proof. For each $n\geq0$, consider
a finite lattice partition of $\overline{S\left(O,1\right)}$ with
cell size $2\cdot2^{-n}$ (i.e., the length, under the Euclidean metric,
of each side of the cell is $2\cdot2^{-n}$). Let $\left\{ x_{j}^{\left(n\right)}:\, j=1,\cdots,J_{n}\right\} $
be the collection of the lattice cell centers where $J_{n}=2^{\nu n}$
is the total number of the cells. Let $\gamma^{\prime\prime},\gamma^{\prime}$
be two numbers such that $0<\gamma^{\prime\prime}<\gamma^{\prime}<\gamma$
and $\gamma^{\prime}$ and $\gamma^{\prime\prime}$ can be arbitrarily
close to $\gamma$. Consider the subset of the indices
\[
I_{n}:=\left\{ j:\;1\leq j\leq J_{n}\mbox{ s.t. }\sup_{\left(y,r\right)\in\overline{S\left(x_{j}^{\left(n\right)},2^{-n}\right)}\times\left[2^{-n},2^{-n+1}\right]}\,\frac{\bar{\theta}_{r}\left(y\right)}{D\left(r\right)}>\sqrt{2\nu\gamma^{\prime}}\right\} .
\]
Combining (\ref{eq:expectation of max of two parameter family}) and
the Borell-TIS inequality (\cite{AT} $\mathsection2.1$ and the references
therein), we have that, for every $j=1,\cdots,J_{n}$,
\[
\begin{split}\mathbb{\mathcal{W}}\left(j\in I_{n}\right) & =\mathcal{W}\left(\sup_{\left(y,r\right)\in\overline{S\left(x_{j}^{\left(n\right)},2^{-n}\right)}\times\left[2^{-n},2^{-n+1}\right]}\,\frac{\bar{\theta}_{r}\left(y\right)}{D\left(r\right)}>\sqrt{2\nu\gamma^{\prime}}\right)\\
& \leq\exp\left[-\frac{1}{2}\left(\sqrt{2\nu\gamma^{\prime}}-\frac{C_{\nu}}{\sqrt{n}}\right)^{2}\cdot\ln2\cdot\left(n-1\right)\right]\\
& \leq\exp\left(-\nu\gamma^{\prime\prime}\cdot\ln2\cdot n\right)\,.
\end{split}
\]
Therefore,
\[
\mathbb{E}^{\mathcal{W}}\left[\mbox{card}\left(I_{n}\right)\right]=\sum_{j\in J_{n}}\mathcal{W}\left(j\in I_{n}\right)\leq C_{\nu}\cdot2^{\nu\left(1-\gamma^{\prime\prime}\right)n}.
\]
On the other hand, if $y\in T_{\theta}^{\gamma}$, then there exists
a sequence $\left\{ t_{k}:\, k\geq0\right\} $ with $t_{k}\searrow0$
as $k\nearrow\infty$ such that
\[
\frac{\bar{\theta}_{t_{k}}\left(y\right)}{D\left(t_{k}\right)}>\sqrt{2\nu\gamma^{\prime}}\mbox{ for all }k\geq0.
\]
For every $k$, let $n(k)$ be the unique positive integer such that
\[
2^{-n(k)}\leq t_{k}<2^{-n(k)+1}.
\]
If $x_{j}^{\left(n(k)\right)}$ is the cell center (at $n(k)-$th
level) such that $\left|y-x_{j}^{\left(n(k)\right)}\right|\leq\sqrt{\nu}2^{-n(k)}$,
then clearly $j\in I_{n(k)}$. Therefore,
\[
T_{\theta}^{\gamma}\subseteq\bigcap_{m\geq1}\,\bigcup_{n\geq m}\,\bigcup_{j\in I_{n}}\,\overline{S\left(x_{j}^{(n)},2^{-n}\right)}.
\]
Moreover, for each $m\geq1$, $\left\{ \overline{S\left(x_{j}^{\left(n\right)},2^{-n}\right)}:j\in I_{n},\, n\geq m\right\} $
forms a covering of $T_{\theta}^{\gamma}$, and the diameter (under
the Euclidean metric) of $\overline{S\left(x_{j}^{\left(n\right)},2^{-n}\right)}$
is $\sqrt{\nu}2^{-n+1}$. Thus, if $\mathcal{H}^{\alpha}$ is the
Hausdorff-$\alpha$ measure for $\alpha>0$, then
\[
\begin{split}\mathcal{H}^{\alpha}\left(T_{\theta}^{\gamma}\right) & \leq\liminf_{m\rightarrow\infty}\,\sum_{n\geq m}\,\sum_{j\in I_{n}}\,\left(2\sqrt{\nu}\cdot2^{-n}\right)^{\alpha}\\
& =C_{\nu}\cdot\liminf_{m\rightarrow\infty}\,\sum_{n\geq m}\,\mbox{card}\left(I_{n}\right)2^{-n\alpha}.
\end{split}
\]
It follows from Fatou's lemma that
\[
\begin{split}\mathbb{E}^{\mathcal{W}}\left[\mathcal{H}^{\alpha}\left(T_{\theta}^{\gamma}\right)\right] & \leq C_{\nu}\cdot\liminf_{m\rightarrow\infty}\,\sum_{n\geq m}\,\mathbb{E}^{\mathcal{W}}\left[\mbox{card}\left(I_{n}\right)\right]\cdot2^{-n\alpha}\\
& \leq C_{\nu}\cdot\lim_{m\rightarrow\infty}\,\sum_{n\geq m}\,2^{\left[\nu\left(1-\gamma^{\prime\prime}\right)-\alpha\right]n}\,.
\end{split}
\]
Clearly, for any $\alpha>\nu\left(1-\gamma^{\prime\prime}\right)$,
$\mathcal{H}^{\alpha}\left(T_{\theta}^{\gamma}\right)=0$ a.s. and
hence $\dim_{\mathcal{H}}\left(T_{\theta}^{\gamma}\right)\leq\alpha$
a.s.. Since $\gamma^{\prime\prime}$ is arbitrarily close to $\gamma$,
we conclude that
\[
\dim_{\mathcal{H}}\left(T_{\theta}^{\gamma}\right)\leq\nu\left(1-\gamma\right)\mbox{ a.s..}
\]
We have completed the proof of the upper bound (\ref{eq:upper bound}).
In addition, by the same argument as above, if $\gamma>1$, we can
choose $\gamma^{\prime\prime}$ to be greater than $1$, in which
case
\[
\sum_{n\geq m}\mathbb{E}^{\mathcal{W}}\left[\mbox{card}\left(I_{n}\right)\right]\leq\sum_{n\geq m}\,2^{\nu\left(1-\gamma^{\prime\prime}\right)n}\rightarrow0\mbox{ as }m\rightarrow\infty.
\]
This observation immediately implies the last assertion in Theorem
\ref{thm:hausdorff dimension of thick point set}, i.e., if $\gamma>1$,
then $T_{\theta}^{\gamma}=\emptyset$ a.s..
\subsection{Perfect $\gamma-$thick point.}
In this subsection we explain why the definition (\ref{eq:def of thick point})
is more proper for the study of thick points for polynomial-correlated
GFFs. Simply speaking, the straightforward analogue of (\ref{eq:2D thick point}),
the thick point definition for log-correlated GFFs, imposes too strong
a condition to fulfill in the case of polynomial-correlated GFFs.
To make this precise, we first define a more strict analogue of (\ref{eq:2D thick point}).
\begin{defn}
Let $\gamma\geq0$. For each $\theta\in\Theta$, $x\in\overline{S\left(O,1\right)}$
is called a \emph{perfect} $\gamma-$\emph{thick point }of $\theta$
if
\[
\lim_{t\searrow0}\,\frac{\bar{\theta}_{t}\left(x\right)}{\sqrt{-G\left(t\right)\ln t}}=\sqrt{2\nu\gamma}.
\]
\end{defn}
Again, if $PT_{\theta}^{\gamma}$ is the set that contains all the
perfect $\gamma-$thick points of $\theta$, then $PT_{\theta}^{\gamma}$
is a measurable subset of $\overline{S\left(O,1\right)}$. To study
$PT_{\theta}^{\gamma}$, we follow a similar strategy as the one used
to establish the upper bound in $\mathsection4.1$. For each $n\geq0$,
set $s_{n}:=2^{-n^{2}}$. For $n\geq1$, consider the two-parameter
Gaussian family
\[
\mathcal{A}_{n}:=\left\{ \bar{\theta}_{t}\left(x\right):\, x\in\overline{S\left(O,1\right)},\, t\in[s_{n},\, s_{n-1}]\right\} .
\]
Let $\omega_{n}$ be the modulus of continuity of $\mathcal{A}_{n}$
under the intrinsic metric $d$, i.e., for every $\delta>0$,
\[
\omega_{n}\left(\delta\right):=\sup\left\{ \left|\bar{\theta}_{t}\left(x\right)-\bar{\theta}_{s}\left(y\right)\right|:\, d\left(x,t;y,s\right)\leq\delta,\, x,y\in\overline{S\left(O,1\right)},\, t,s\in[s_{n},\, s_{n-1}]\right\} .
\]
\begin{lem}
There exists a constant $C_{\nu}>0$ such that for every $n\geq1$
and every $0<\delta\ll\sqrt{G\left(s_{n}\right)}$,
\begin{equation}
\mathbb{E}^{\mathcal{W}}\left[\omega_{n}\left(\delta\right)\right]\leq C_{\nu}\cdot\delta\sqrt{\ln\left(s_{n}^{\left(3-2\nu\right)/4}/\delta\right)}.\label{eq:expectation of modulus of continuity}
\end{equation}
Moreover, if
\[
\mathcal{B}_{n}:=\left\{ \left(x,\, y\right)\in\left(\overline{S\left(O,1\right)}\right)^{2}:\,\left|x-y\right|\leq\sqrt{\nu}\cdot s_{2n}\right\} ,
\]
then
\begin{equation}
\mathcal{W}\left(\sup_{\left(x,\, y\right)\in\mathcal{B}_{n},\, t\in[s_{n},\, s_{n-1}]}\left|\frac{\bar{\theta}_{t}\left(x\right)}{D\left(t\right)}-\frac{\bar{\theta}_{t}\left(y\right)}{D\left(t\right)}\right|>2^{-3n^{2}/16},\;\mbox{i.o.}\right)=0.\label{eq:almost sure modulus}
\end{equation}
\end{lem}
\begin{proof}
For every $\epsilon>0$, let $N\left(\epsilon,\mathcal{A}_{n}\right)$
be the entropy function, as introduced before Lemma \ref{lem:expectation of max non-concentric}.
Then, it follows from a similar argument as the one used in the proof
of Lemma \ref{lem:extreme value of two parameter family} that $N\left(\epsilon,\mathcal{A}_{n}\right)\leq L_{\epsilon}\cdot M_{\epsilon}$
where $L_{\epsilon}=\mathcal{O}\left(s_{n}^{\nu\left(3-2\nu\right)}\right)/\epsilon^{4\nu}$
is the entropy for the set $\overline{S\left(O,1\right)}$ under the
metric $d\left(\cdot,s_{n};\,*,s_{n}\right)$, and $M_{\epsilon}=\mathcal{O}\left(G\left(s_{n}\right)\right)/\epsilon^{2}$
is the entropy for the interval $[s_{n},s_{n-1}]$ corresponding to
the concentric process $\left\{ \bar{\theta}_{t}\left(x\right):t\in[s_{n},s_{n-1}]\right\} $
for any fixed $x$. Hence (\cite{AT}, Corollary 1.3.4), there exists
a universal constant $K>0$ such that for every $n\geq1$,
\[
\begin{split}\mathbb{E}^{\mathcal{W}}\left[\omega_{n}\left(\delta\right)\right] & \leq K\int_{0}^{\delta}\sqrt{\ln N\left(\epsilon,\mathcal{A}_{n}\right)}d\epsilon\leq K\int_{0}^{\delta}\left(\sqrt{\ln L_{\epsilon}}+\sqrt{\ln M_{\epsilon}}\right)d\epsilon.\end{split}
\]
Similarly as the integrals we evaluated when proving (\ref{eq:expectation of sup spatial}),
we have that
\[
\int_{0}^{\delta}\sqrt{\ln L_{\epsilon}}d\epsilon\leq C_{\nu}\cdot\delta\sqrt{\ln\left(s_{n}^{\left(3-2\nu\right)/4}/\delta\right)}
\]
and
\[
\int_{0}^{\delta}\sqrt{\ln M_{\epsilon}}d\epsilon\leq C_{\nu}\cdot\delta\sqrt{\ln\left(s_{n}^{\left(1-\nu/2\right)}/\delta\right)}
\]
for some $C_{\nu}>0$, which lead to the first assertion.
To prove the second assertion, notice that by (\ref{eq:estimate intrinsic metric non-concentric}),
if $\left|x-y\right|\leq\sqrt{\nu}\cdot s_{2n}$, then for any $t\in\left[s_{n},\, s_{n-1}\right]$,
\[
d\left(x,t;\, y,t\right)\leq C_{\nu}\cdot t^{\left(3-2\nu\right)/4}\cdot s_{2n}^{1/4}\leq C_{\nu}\cdot2^{n^{2}\left(2\nu-7\right)/4}.
\]
Therefore, by (\ref{eq:expectation of modulus of continuity}),
\[
\begin{split}\mathbb{E}^{\mathcal{W}}\left[\sup_{\left(x,\, y\right)\in\mathcal{B}_{n},\, t\in[s_{n},\, s_{n-1}]}\left|\bar{\theta}_{t}\left(x\right)-\bar{\theta}_{t}\left(y\right)\right|\right] & \leq\mathbb{E}^{\mathcal{W}}\left[\omega_{n}\left(C_{\nu}\cdot2^{n^{2}\left(\nu/2-7/4\right)}\right)\right]\\
& \leq C_{\nu}\cdot2^{n^{2}\left(\nu/2-7/4\right)}\cdot n.
\end{split}
\]
The desired conclusion follows from dividing both sides of the inequality
above by $D\left(s_{n-1}\right)$ and applying the Borel-Cantelli
lemma.
\end{proof}
Now we are ready to establish the main result of this subsection,
which is, if $\gamma>0$, then the perfect $\gamma-$thick point doesn't
exist almost surely. Being ``perfect'' prevents such points from
existing.
\begin{thm}
\label{thm: no perfect thick piont}If $\gamma>0$, then $PT_{\theta}^{\gamma}=\emptyset$
a.s..\end{thm}
\begin{proof}
Based on (\ref{eq:almost sure modulus}), for $\mathcal{W}-$a.e.
$\theta$, there exists $N_{\theta}\in\mathbb{N}$ such that for every
$n\geq N_{\theta}$ and $x,y$ such that $y\in\overline{S\left(x,s_{2n}\right)}$,
\[
\sup_{t\in[s_{n},\, s_{n-1}]}\left|\frac{\bar{\theta}_{t}\left(x\right)}{D\left(t\right)}-\frac{\bar{\theta}_{t}\left(y\right)}{D\left(t\right)}\right|\leq2^{-3n^{2}/16}.
\]
Choose $M>0$ to be a sufficiently large constant. Consider the lattice
partition of $\overline{S\left(O,1\right)}$ with cell size $2^{-k}$,
and let
\[
\left\{ x_{j}^{(k)}:\, j=1,\cdots,J_{k}\right\}
\]
be the cell centers. Let $y$ be a perfect $\gamma-$thick point.
For $n$ that is sufficiently large, if $x_{j}^{(4n^{2})}$ is the
cell center such that $y\in\overline{S\left(x_{j}^{(4n^{2})},s_{2n}\right)}$,
then
\[
\sup_{t\in[s_{n},\, s_{n-1}]}\left|\frac{\bar{\theta}_{t}\left(x_{j}^{(4n^{2})}\right)}{D\left(t\right)}-\sqrt{2\nu\gamma}\right|\leq\frac{1}{M}.
\]
In particular, this means that
\[
\left|\bar{\theta}_{s_{n-1}}\left(x_{j}^{(4n^{2})}\right)-\sqrt{2\nu\gamma}D\left(s_{n-1}\right)\right|\leq\frac{D\left(s_{n-1}\right)}{M},
\]
and for every $t\in\left[s_{n},\, s_{n-1}\right]$ ,
\[
\left|\bar{\theta}_{t}\left(x_{j}^{(4n^{2})}\right)-\bar{\theta}_{s_{n-1}}\left(x_{j}^{(4n^{2})}\right)-\sqrt{2\nu\gamma}\left[D\left(t\right)-D\left(s_{n-1}\right)\right]\right|\leq\frac{2D\left(t\right)}{M}.
\]
Let $\mathcal{P}_{n}=\mathcal{P}_{n}^{x_{j}^{(4n^{2})}}\subseteq\Theta$
be the collection of all the field elements $\theta$ such that
\[
\forall t\in\left[s_{n},\, s_{n-1}\right],\left|\bar{\theta}_{t}\left(x_{j}^{(4n^{2})}\right)-\bar{\theta}_{s_{n-1}}\left(x_{j}^{(4n^{2})}\right)-\sqrt{2\nu\gamma}\left[D\left(t\right)-D\left(s_{n-1}\right)\right]\right|\leq\frac{2D\left(t\right)}{M}.
\]
Clearly, $\mathcal{P}_{n}$ is a measurable set and $\mathcal{W}\left(\mathcal{P}_{n}\right)$
doesn't depend on $x_{j}^{(4n^{2})}$. To simplify the notation, we
will write ``$x_{j}^{(4n^{2})}$'' as ``$x$'' throughout this
proof. The idea is to rewrite $\mathcal{P}_{n}$ in terms of a shifted
$\theta$ and apply the Cameron-Martin formula. To this end, we define,
for $t\in(0,1]$,
\[
F\left(t\right):=\frac{D^{\prime}\left(t\right)}{G^{\prime}\left(t\right)}\mbox{ and }f\left(t\right):=F^{\prime}\left(t\right),
\]
and let $\zeta_{x,n}$ be the element in $H^{-1}\left(\mathbb{R}^{\nu}\right)$
such that its corresponding Paley-Wiener integral is
\[
\mathcal{I}\left(h_{\zeta_{x,n}}\right)\left(\theta\right)=\int_{s_{n}}^{s_{n-1}}\left(\bar{\theta}_{t}\left(x\right)-\bar{\theta}_{s_{n-1}}\left(x\right)\right)f\left(t\right)dt+F\left(s_{n}\right)\left(\bar{\theta}_{s_{n}}\left(x\right)-\bar{\theta}_{s_{n-1}}\left(x\right)\right).
\]
We observe that for every $t\in\left[s_{n},\, s_{n-1}\right]$,
\[
\begin{split}\left(h_{\bar{\sigma}_{x,t}},\, h_{\zeta_{x,n}}\right)_{H} & =\int_{s_{n}}^{t}\left[G\left(t\right)-G\left(s_{n-1}\right)\right]f\left(s\right)ds+\int_{t}^{s_{n-1}}\left[G\left(s\right)-G\left(s_{n-1}\right)\right]f\left(s\right)ds\\
& \qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad+F\left(s_{n}\right)\left[G\left(t\right)-G\left(s_{n-1}\right)\right]\\
& =\left[G\left(t\right)-G\left(s_{n-1}\right)\right]F\left(t\right)+\int_{t}^{s_{n-1}}\left[G\left(s\right)-G\left(s_{n-1}\right)\right]f\left(s\right)ds\\
& =-\int_{t}^{s_{n-1}}G^{\prime}\left(s\right)F\left(s\right)ds=-\int_{t}^{s_{n-1}}D^{\prime}\left(s\right)ds\\
& =D\left(t\right)-D\left(s_{n-1}\right).
\end{split}
\]
Therefore,
\[
\mathcal{I}\left(h_{\bar{\sigma}_{x,t}}-h_{\bar{\sigma}_{x,s_{n-1}}}\right)\left(\theta-\sqrt{2\nu\gamma}h_{\zeta_{x,n}}\right)=\bar{\theta}_{t}\left(x\right)-\bar{\theta}_{s_{n-1}}\left(x\right)-\sqrt{2\nu\gamma}\left[D\left(t\right)-D\left(s_{n-1}\right)\right].
\]
In other words, we can view
\[
\bar{\theta}_{t}\left(x\right)-\bar{\theta}_{s_{n-1}}\left(x\right)-\sqrt{2\nu\gamma}\left[D\left(t\right)-D\left(s_{n-1}\right)\right]
\]
as a Paley-Wiener integral of a translated GFF. Thus, by the Cameron-Martin
formula (\cite{probability}, Theorem 8.2.9), $\mathcal{W}\left(\mathcal{P}_{n}\right)$
is equal to
\[
\begin{split} & \mathbb{E}^{\mathcal{W}}\left[e^{-\sqrt{2\nu\gamma}\mathcal{I}\left(h_{\zeta_{x,n}}\right)-\nu\gamma\left\Vert h_{\zeta_{x,n}}\right\Vert _{H}^{2}};\,\forall t\in\left[s_{n},\, s_{n-1}\right],\left|\bar{\theta}_{t}\left(x\right)-\bar{\theta}_{s_{n-1}}\left(x\right)\right|\leq\frac{2D\left(t\right)}{M}\right]\\
\le\; & e^{-\nu\gamma\left\Vert h_{\zeta_{x,n}}\right\Vert _{H}^{2}}\cdot\exp\left\{ \frac{2\sqrt{2\nu\gamma}}{M}\left[\int_{s_{n}}^{s_{n-1}}D\left(t\right)f\left(t\right)dt+F\left(s_{n}\right)D\left(s_{n}\right)\right]\right\} \\
=\; & e^{-\nu\gamma\left\Vert h_{\zeta_{x,n}}\right\Vert _{H}^{2}}\cdot\exp\left\{ \frac{2\sqrt{2\nu\gamma}}{M}\left[\int_{s_{n}}^{s_{n-1}}\left(-D^{\prime}\left(t\right)\right)F\left(t\right)dt+F\left(s_{n-1}\right)D\left(s_{n-1}\right)\right]\right\} .
\end{split}
\]
Moreover, we compute
\[
\begin{split}\left\Vert h_{\zeta_{x,n}}\right\Vert _{H}^{2} & =\int_{s_{n}}^{s_{n-1}}\left[D\left(t\right)-D\left(s_{n-1}\right)\right]f\left(t\right)dt+F\left(s_{n}\right)\left[D\left(s_{n}\right)-D\left(s_{n-1}\right)\right]\\
& =\int_{s_{n}}^{s_{n-1}}\left(-D^{\prime}\left(t\right)\right)F\left(t\right)dt.
\end{split}
\]
It is easy to verify that, when $n$ is large,
\[
\int_{s_{n}}^{s_{n-1}}\left(-D^{\prime}\left(t\right)\right)F\left(t\right)dt=\mathcal{O}\left(n^{3}\right)\mbox{ and }F\left(s_{n-1}\right)D\left(s_{n-1}\right)=\mathcal{O}\left(n^{2}\right).
\]
Thus, $\mathcal{W}\left(\mathcal{P}_{n}\right)$ is no greater than
\[
\begin{split} & \exp\left[\left(-\nu\gamma+\frac{2\sqrt{2\nu\gamma}}{M}\right)\mathcal{O}\left(n^{3}\right)+\frac{2\sqrt{2\nu\gamma}}{M}\mathcal{O}\left(n^{2}\right)\right]\leq\exp\left(-n^{3}/M\right)\end{split}
\]
when $M$ is sufficiently large.
To complete the proof, we repeat the arguments that lead to the last
assertion in Theorem \ref{thm:hausdorff dimension of thick point set}.
Because
\[
\left\{ \theta\in\Theta:\, PT_{\theta}^{\gamma}\neq\emptyset\right\} \subseteq\bigcap_{m\geq1}\,\bigcup_{n\geq m}\,\bigcup_{\mbox{cell center }x_{j}^{(4n^{2})}}\,\mathcal{P}_{n}^{x_{j}^{(4n^{2})}},
\]
and the probability of the RHS is no greater than
\[
\lim_{m\rightarrow\infty}\sum_{n\geq m}\,2^{4\nu n^{2}}e^{-n^{3}/M}=0,
\]
$PT_{\theta}^{\gamma}$ is the empty set with probability one.
\end{proof}
\section{Proof of the Lower Bound}
In this section we will provide a proof of the lower bound (\ref{eq:lower bound}).
The strategy is to study the convergence of $\bar{\theta}_{t}\left(x\right)/D\left(t\right)$
as $t\searrow0$ along a prefixed sequence that decays to zero sufficiently
fast. To be specific, assume that $\left\{ r_{n}:\, n\geq0\right\} $
is a sequence of positive numbers satisfying that $r_{0}=1$, $r_{n}\searrow0$
as $n\nearrow\infty$, and
\begin{equation}
\lim_{n\rightarrow\infty}\frac{n^{2}\cdot\ln r_{n-1}}{\ln r_{n}}=0.\label{eq:condition on r_n}
\end{equation}
\begin{defn}
Let $\gamma\geq0$. For each $\theta\in\Theta$, $x\in\overline{S\left(O,1\right)}$
is called a \emph{sequential} $\gamma-$\emph{thick point }of $\theta$
with the sequence $\left\{ r_{n}:n\ge0\right\} $ if
\[
\lim_{n\rightarrow\infty}\,\frac{\bar{\theta}_{r_{n}}\left(x\right)}{\sqrt{-G\left(r_{n}\right)\ln r_{n}}}=\sqrt{2\nu\gamma}.
\]
With any sequence $\left\{ r_{n}:n\geq0\right\} $ as described above
fixed, we denote by $ST_{\theta}^{\gamma}$ the collection of all
the sequential $\gamma-$thick points of $\theta$ with $\left\{ r_{n}:n\geq0\right\} $.
$ST_{\theta}^{\gamma}$ is a measurable subset of $\overline{S\left(O,1\right)}$.
In this section we will prove the following result.\end{defn}
\begin{thm}
\label{thm:thick point along sequence}For $\gamma\in\left[0,1\right]$,
\[
\dim_{\mathcal{H}}\left(ST_{\theta}^{\gamma}\right)=\nu\left(1-\gamma\right)\mbox{ a.s.}.
\]
Moreover, for $\mathcal{W}-$a.e. $\theta\in\Theta$, $x\in ST_{\theta}^{0}$
for Lebesgue-a.e. $x\in\overline{S\left(O,1\right)}$.
On the other hand, for $\gamma>1$, $ST_{\theta}^{\gamma}=\emptyset$
a.s..
\end{thm}
Since $ST_{\theta}^{\gamma}\subseteq T_{\theta}^{\gamma}$, the established
upper bounds of the size of $T_{\theta}^{\gamma}$ also apply to $ST_{\theta}^{\gamma}$,
i.e., $\dim_{\mathcal{H}}\left(ST_{\theta}^{\gamma}\right)\leq\nu\left(1-\gamma\right)$
a.s. for $\gamma\in\left[0,1\right]$, and $ST_{\theta}^{\gamma}=\emptyset$
a.s. for $\gamma>1$. As for the lower bound, when $\gamma=0$, (\ref{eq:LIL for concentric})
implies that $\mathcal{W}\left(x\in ST_{\theta}^{0}\right)=1$ for
every $x\in\overline{S\left(O,1\right)}$. Let $\mu_{Leb}$ be the
Lebesgue measure on $\mathbb{R}^{\nu}$, and $\mathcal{H}^{\nu}$
the $\nu-$dimensional Hausdorff measure on $\mathbb{R}^{\nu}$. Then,
$\mathcal{H}^{\nu}=C_{\nu}\mu_{Leb}$ for a dimensional constant $C_{\nu}>0$.
By Fubini's theorem,
\[
\begin{split}\mathbb{E}^{\mathcal{W}}\left[\mathcal{H}^{\nu}\left(ST_{\theta}^{0}\right)\right] & =C_{\nu}\int_{\overline{S\left(O,1\right)}}\mathcal{W}\left(x\in ST_{\theta}^{0}\right)\mu_{Leb}\left(dx\right)=\mathcal{H}^{\nu}\left(\overline{S\left(O,1\right)}\right).\end{split}
\]
Since $\mathcal{H}^{\nu}\left(ST_{\theta}^{0}\right)\leq\mathcal{H}^{\nu}\left(\overline{S\left(O,1\right)}\right)$
a.s., they must be equal a.s., which implies that for $\mu_{Leb}-$a.e.
$x\in\overline{S\left(O,1\right)}$, $x\in ST_{\theta}^{0}$ and hence
$x\in T_{\theta}^{0}$. Thus, it is sufficient to derive the lower
bound of $\dim_{\mathcal{H}}\left(ST_{\theta}^{\gamma}\right)$ for
$\gamma\in\left(0,1\right)$.
\begin{rem}
One example of a sequence satisfying (\ref{eq:condition on r_n})
is $r_{n}=2^{-2^{n^{2}}+1}$ for $n\geq0$. However, the method explained
in this section applies to any sufficiently fast decaying sequence.
On the other hand, for technical reasons, we will assume that
\begin{equation}
\ln\left(-\ln r_{n+1}\right)=o\left(-\ln r_{n}\right)\mbox{ for all large }n\mbox{'s}.\label{eq:condition on r_n (2)}
\end{equation}
This assumption will not reduce the generality of the method. If a
given sequence $\left\{ r_{n}:n\geq0\right\} $ does not satisfy (\ref{eq:condition on r_n (2)}),
one can always ``fill in'' more numbers to get a new sequence $\left\{ \tilde{r}_{m}:m\geq0\right\} $
that satisfied both (\ref{eq:condition on r_n}) and (\ref{eq:condition on r_n (2)}),
and the original sequence $\left\{ r_{n}:n\geq0\right\} $ is a subsequence
of $\left\{ \tilde{r}_{m}:m\geq0\right\} $. Then, if we establish
a lower bound of $\dim_{\mathcal{H}}\left(ST_{\theta}^{\gamma}\right)$
with $\left\{ \tilde{r}_{m}:m\geq0\right\} $, the lower bound also
applies with any subsequence of $\left\{ \tilde{r}_{m}:m\geq0\right\} $.
The advantage of studying sequential thick points is that the same
method can be applied to the study of other problems related to the
geometry of GFFs, when convergence along sequence already gives rise
to interesting objects (e.g., random measures concerned in \cite{CJ,DS1,JJRV}),
especially in the absence of the perfect $\gamma-$thick point as
pointed out in Theorem \ref{thm: no perfect thick piont}.
\end{rem}
Let $\gamma\in\left(0,1\right)$ be fixed. We will obtain the lower
bound of $\dim_{\mathcal{H}}\left(ST_{\theta}^{\gamma}\right)$ in
multiple steps. To simplify the notation, for every $\theta\in\Theta$,
$x\in\overline{S\left(O,1\right)}$ and $n\geq1$, we write
\[
\Delta\bar{\theta}_{n}\left(x\right):=\bar{\theta}_{r_{n}}\left(x\right)-\bar{\theta}_{r_{n-1}}\left(x\right),
\]
\[
\Delta G_{n}:=G\left(r_{n}\right)-G\left(r_{n-1}\right)\mbox{ and }\Delta D_{n}:=D\left(r_{n}\right)-D\left(r_{n-1}\right).
\]
For each $x\in\overline{S\left(O,1\right)}$, we define the following
measurable subsets of $\Theta$:
\[
P_{x,0}:=\left\{ \theta\in\Theta:\,\left|\bar{\theta}_{0}\left(x\right)-\sqrt{2\nu\gamma}D\left(r_{0}\right)\right|\leq\sqrt{G\left(r_{0}\right)}\right\} ,
\]
and for $n\ge1$,
\[
P_{x,n}:=\left\{ \theta\in\Theta:\,\left|\Delta\bar{\theta}_{n}\left(x\right)-\sqrt{2\nu\gamma}\Delta D_{n}\right|\leq\sqrt{\Delta G_{n}}\right\} \mbox{ and }\Phi_{x,n}:=\left(\bigcap_{i=0}^{n}P_{x,i}\right).
\]
\subsection*{Step 1: Derive the probability estimates.}
Let $x\in\overline{S\left(O,1\right)}$ be fixed. It's clear that
$\left\{ \bar{\theta}_{0}\left(x\right),\,\Delta\bar{\theta}_{n}\left(x\right),\, n\geq1\right\} $
is a family of independent Gaussian random variables. The following
simple facts about $P_{x,n}$ and $\Phi_{x,n}$ are in order.
\begin{lem}
\label{lem:analysis of the probability}$P_{x,i}$, $i=0,1,\cdots,n$,
are mutually independent. Moreover, there exists a constant $C_{\nu}>0$
such that for every $n\geq1$,
\begin{equation}
e^{\nu\gamma\ln r_{n}-C_{\nu}\sqrt{-\ln r_{n}}}\leq\mathcal{W}\left(P_{x,n}\right)\leq e^{\nu\gamma\ln r_{n}+C_{\nu}\sqrt{-\ln r_{n}}}\label{eq: estimate for P_x,n}
\end{equation}
and
\begin{equation}
e^{\nu\gamma\ln r_{n}\left(1+C_{\nu}/n\right)}\leq\mathcal{W}\left(\Phi_{x,n}\right)\leq e^{\nu\gamma\ln r_{n}\left(1-C_{\nu}/n\right)}.\label{eq: estimate for Phi_x,n}
\end{equation}
\end{lem}
The results above follow from straightforward computations with Gaussian
distributions, combined with the assumption (\ref{eq:condition on r_n}).
Proofs are omitted.
\subsection*{Step 2: Obtain a subset of $ST_{\theta}^{\gamma}$.}
For every $n\ge0$, consider the lattice partition of $\overline{S\left(O,1\right)}$
with cell size $r_{n}$. Assume that $\mathcal{K}_{n}=\left\{ x_{j}^{\left(n\right)}:\, j=1,\cdots,K_{n}\right\} $
is the collection of all the cell centers, where $K_{n}=r_{n}^{-\nu}$.
For every $\theta\in\Theta$, set
\[
\Xi_{n,\theta}:=\left\{ x_{j}^{(n)}\in\mathcal{K}_{n}:\;1\leq j\leq K_{n},\,\theta\in\Phi_{x_{j}^{(n)},n}\right\} .
\]
\begin{lem}
For every $\gamma\in\left(0,1\right)$ and every $\theta\in\Theta$,
\begin{equation}
ST_{\theta}^{\gamma}\supseteq\Sigma_{\theta}^{\gamma}:=\bigcap_{k\geq1}\,\overline{\bigcup_{n\geq k}\,\bigcup_{x\in\Xi_{n,\theta}}\, S\left(x,r_{n}\right)}.\label{eq:subset of ST}
\end{equation}
.\end{lem}
\begin{proof}
Let $\theta$ and $\gamma$ be fixed. We first show that
\[
ST_{\theta}^{\gamma}\supseteq\bigcap_{k\geq1}\,\bigcup_{n\geq k}\,\bigcup_{x\in\Xi_{n,\theta}}\,\overline{S\left(x,r_{n}\right)}.
\]
For any $y$ in the RHS above, there exists a subsequence $\left\{ n_{k}:k\geq1\right\} \subseteq\mathbb{N}$
with $n_{k}\nearrow\infty$ as $k\nearrow\infty$ and a sequence of
cell centers $\left\{ x^{(n_{k})}\in\Xi_{n_{k},\theta}:\, k\geq1\right\} $
such that $\left|y-x^{(n_{k})}\right|\leq\sqrt{\nu}\, r_{n_{k}}$
for every $k\geq1$. Moreover, by the definition of $\Xi_{n_{k},\theta}$
and the triangle inequality, for every $j=0,1,\cdots,n_{k}$,
\[
\left|\frac{\bar{\theta}_{r_{j}}\left(x^{\left(n_{k}\right)}\right)}{D\left(r_{j}\right)}-\sqrt{2\nu\gamma}\right|\leq\frac{\sqrt{G\left(r_{0}\right)}+\sum_{p=1}^{j}\sqrt{\Delta G_{p}}}{D\left(r_{j}\right)}\leq\frac{j+1}{\sqrt{-\ln r_{j}}}.
\]
When $j$ is sufficiently large, the RHS above can be arbitrarily
small; moreover, (\ref{eq:almost sure modulus}) implies that, if
$n_{k}$ is large such that $r_{n_{k}}<r_{j+1}^{4}$, then
\[
\left|\frac{\bar{\theta}_{r_{j}}\left(x^{\left(n_{k}\right)}\right)}{D\left(r_{j}\right)}-\frac{\bar{\theta}_{r_{j}}\left(y\right)}{D\left(r_{j}\right)}\right|\leq r_{j-1}^{3/16}.
\]
It follows immediately from the triangle inequality that
\[
\lim_{j\rightarrow\infty}\frac{\bar{\theta}_{r_{j}}\left(y\right)}{D\left(r_{j}\right)}=\sqrt{2\nu\gamma},
\]
and hence $y\in ST_{\theta}^{\gamma}$.
Next, let $\tilde{y}\in\Sigma_{\theta}^{\gamma}$. For each $k\geq1$,
there exists a sequence $\left\{ y_{p}:\, p\geq1\right\} $ with
\[
y_{p}\in\bigcup_{n\geq k}\,\bigcup_{x\in\Xi_{n,\theta}}\, S\left(x,r_{n}\right)\,\mbox{ for every }p\geq1
\]
such that $\lim_{p\rightarrow\infty}y_{p}=\tilde{y}$. Either, for
some $n\geq k$, $y_{p}\in\bigcup_{x\in\Xi_{n,\theta}}\, S\left(x,r_{n}\right)$
for infinitely many $p$'s, in which case there must exist $x^{(n)}\in\Xi_{n,\theta}$
such that $\left|\tilde{y}-x^{(n)}\right|\leq2\sqrt{\nu}\cdot r_{n}$,
or, one can find a subsequence $\left\{ n_{p}:\, p\geq0\right\} $
with $n_{p}\nearrow\infty$ as $p\nearrow\infty$ such that $y_{p}\in S\left(x^{(n_{p})},r_{n_{p}}\right)$
for some $x^{(n_{p})}\in\Xi_{n_{p},\theta}$, in which case, since
$y_{p}\rightarrow\tilde{y}$, $\left|x^{(n_{p})}-\tilde{y}\right|$
can be arbitrarily small when $p$ is sufficiently large. In either
case, one can follow similar arguments as above to show that $\tilde{y}\in ST_{\theta}^{\gamma}$.
\end{proof}
\subsection*{Step 3: Construct a sequence of measures.}
For each $n\geq1$ and $\theta\in\Theta$, define a finite measure
on $\overline{S\left(O,1\right)}$ by,
\begin{equation}
\forall A\in\mathcal{B}\left(\overline{S\left(O,1\right)}\right),\,\mu_{n,\theta}\left(A\right):=\frac{1}{K_{n}}\sum_{j=1}^{K_{n}}\frac{\mathbb{I}_{\Xi_{n,\theta}}\left(x_{j}^{(n)}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)}\frac{\mbox{vol}\left(A\cap S\left(x_{j}^{(n)},r_{n}\right)\right)}{\mbox{vol}\left(S\left(x_{j}^{(n)},r_{n}\right)\right)}\label{eq:def of mu_n_theta}
\end{equation}
where ``vol'' refers to the volume under the Lebesgue measure on
$\mathbb{R}^{\nu}$. It is clear that
\begin{equation}
\begin{split}\mathbb{E}^{\mathcal{W}}\left[\mu_{n,\theta}\left(\overline{S\left(O,1\right)}\right)\right] & =1\end{split}
\label{eq:1st moment of total mass under mu_theta}
\end{equation}
for every $n\geq1$. We also need to study the second moment of $\mu_{n,\theta}\left(\overline{S\left(O,1\right)}\right)$,
to which end we write the second moment as
\begin{equation}
\begin{split}\mathbb{E}^{\mathcal{W}}\left[\left(\mu_{n,\theta}\left(\overline{S\left(O,1\right)}\right)\right)^{2}\right] & =\frac{1}{K_{n}^{2}}\,\sum_{j,k=1}^{K_{n}}\,\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)}\end{split}
.\label{eq:2nd moment of the total mass under mu_theta}
\end{equation}
We will show that
\[
\sup_{n\geq1}\,\mathbb{E}^{\mathcal{W}}\left[\left(\mu_{n,\theta}\left(\overline{S\left(O,1\right)}\right)\right)^{2}\right]<\infty.
\]
First notice that, when $j=k$, (\ref{eq: estimate for Phi_x,n})
implies that
\[
\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)}=\frac{1}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)}\leq e^{-\nu\gamma\ln r_{n}\left(1+C_{\nu}/n\right)},
\]
so the sum over the diagonal terms in (\ref{eq:2nd moment of the total mass under mu_theta})
is bounded from above by
\[
K_{n}^{-1}\cdot e^{-\nu\gamma\ln r_{n}\left(1+C_{\nu}/n\right)}=e^{\left(\nu-\nu\gamma\right)\ln r_{n}+o\left(\ln r_{n}\right)}
\]
which converges to zero as $n\rightarrow\infty$ so long as $\gamma<1$.
So we only need to treat the sum over the off-diagonal terms in (\ref{eq:2nd moment of the total mass under mu_theta}),
and this is done in separate cases depending on the distance between
the two cell centers $x_{j}^{(n)}$ and $x_{k}^{(n)}$.
Assume that $j\neq k$. Then there exists a unique $i\in\mathbb{N}$,
$0\leq i\leq n-1$, such that
\[
2r_{i+1}\leq\left|x_{j}^{(n)}-x_{k}^{(n)}\right|<2r_{i},\quad\left(\dagger\right)
\]
we can rewrite the sum over the off-diagonal terms in (\ref{eq:2nd moment of the total mass under mu_theta})
as
\begin{equation}
\frac{1}{K_{n}^{2}}\,\sum_{j=1}^{K_{n}}\,\sum_{i=0}^{n-1}\sum_{\left\{ k:\,(\dagger)\mbox{ holds with }i\right\} }\,\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)}\,.\label{eq:2nd moment rewritten in terms of L}
\end{equation}
Let $j$ and $k$ be fixed for now. For $l,\, l^{\prime}\ge1$, set
\[
\mbox{DCov}\left(l,\, l^{\prime}\right):=\mathbb{E}^{\mathcal{W}}\left[\Delta\bar{\theta}_{l}\left(x_{j}^{(n)}\right)\cdot\Delta\bar{\theta}_{l^{\prime}}\left(x_{k}^{(n)}\right)\right].
\]
By (\ref{eq:covariance for (1-Delta)^s-1}), $\mbox{DCov}\left(l,\, l^{\prime}\right)$
only depends on $r_{l}$, $r_{l^{\prime}}$ and $\left|x_{j}^{(n)}-x_{k}^{(n)}\right|$.
It is sufficient to treat the cases when $\left|x_{j}^{(n)}-x_{k}^{(n)}\right|$
is small, or equivalently, when $i$, as determined by $\left(\dagger\right)$,
is large. One can easily use (\ref{eq:cov non-concentric non-overlapping})
and (\ref{eq:cov non-concentric inclusion}) to verify that $\mbox{DCov}\left(l,\, l^{\prime}\right)=0$
when $l^{\prime}\geq i+2$ and either $l\geq i+2$ or $l\leq i-1$,
which implies that the family
\begin{equation}
\left\{ \Delta\bar{\theta}_{l}\left(x_{j}^{(n)}\right),\,\Delta\bar{\theta}_{l^{\prime}}\left(x_{k}^{(n)}\right):\,1\leq l\leq i-1,\, i+2\leq l\leq n,\, i+2\leq l^{\prime}\leq n\right\} \label{eq:independent family}
\end{equation}
is independent. However, the independence of this family is not sufficient
for (\ref{eq:2nd moment rewritten in terms of L}) to be bounded in
$n$. We need to carry out more careful analysis by further breaking
down the range of $\left|x_{j}^{(n)}-x_{k}^{(n)}\right|.$
\subsubsection*{Case 1.}
Assume that, for some sufficiently small $\epsilon\in\left(0,1-\gamma\right)$,
\[
2r_{i+1}\leq\left|x_{j}^{(n)}-x_{k}^{(n)}\right|<r_{i+1}^{1-\varepsilon}.
\]
In this case, besides the family of independent random variables in
(\ref{eq:independent family}), we also have that for $l^{\prime}\geq i+2$
,
\[
B\left(x_{k}^{(n)},r_{l^{\prime}-1}\right)\subseteq B\left(x_{j}^{(n)},r_{i}\right)\mbox{ and }B\left(x_{j}^{(n)},r_{i+1}\right)\bigcap B\left(x_{k}^{(n)},r_{l^{\prime}-1}\right)=\emptyset,
\]
which, by (\ref{eq:cov non-concentric non-overlapping}) and (\ref{eq:cov non-concentric inclusion}),
leads to $\mbox{DCov}\left(i;\, l^{\prime}\right)=0$ and $\mbox{DCov}\left(i+1,\, l^{\prime}\right)=0$,
and hence $\Delta\bar{\theta}_{i}\left(x_{j}^{(n)}\right)$ and $\Delta\bar{\theta}_{i+1}\left(x_{j}^{(n)}\right)$
are independent of $\Delta\bar{\theta}_{l^{\prime}}\left(x_{k}^{(n)}\right)$.
As a result,
\[
\begin{split}\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)} & \leq\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\cdot\Pi_{l^{\prime}=i+2}^{n}\mathcal{W}\left(P_{x_{k}^{(n)},l^{\prime}}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)}\\
& =\frac{1}{\mathcal{W}\left(\Phi_{x_{k}^{(n)},i+1}\right)}\leq\exp\left[-\nu\gamma\ln r_{i+1}\left(1+C_{\nu}/n\right)\right].
\end{split}
\]
The last inequality is due to (\ref{eq: estimate for Phi_x,n}). On
the other hand, if $j$ is fixed, then the number of $x_{k}^{(n)}$'s
such that
\[
2r_{i+1}\leq\left|x_{j}^{(n)}-x_{k}^{(n)}\right|<r_{i+1}^{1-\varepsilon}
\]
is of the order of $\left(r_{i+1}^{1-\epsilon}/r_{n}\right)^{\nu}.$
The contribution to (\ref{eq:2nd moment rewritten in terms of L})
under this case is
\[
\begin{split} & \sum_{i=0}^{n-1}\exp\left[\nu\left(1-\epsilon-\gamma\right)\ln r_{i+1}+o\left(-\ln r_{i+1}\right)\right]\end{split}
\]
which is bounded in $n$ since $\epsilon<1-\gamma$.
\subsubsection*{Case 2.}
Assume that
\begin{equation}
r_{i}-r_{i+2}<\left|x_{j}^{(n)}-x_{k}^{(n)}\right|\leq r_{i}+r_{i+2}.\label{eq:case 2}
\end{equation}
Since the random variables in (\ref{eq:independent family}) are independent,
we have that
\[
\begin{split}\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)} & \leq\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},i-1}\right)\Pi_{l=i+2}^{n}\mathcal{W}\left(P_{x_{j}^{(n)},l}\right)\Pi_{l^{\prime}=i+2}^{n}\mathcal{W}\left(P_{x_{k}^{(n)},l^{\prime}}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)}\\
& =\frac{1}{\mathcal{W}\left(P_{x_{j}^{(n)},i}\right)\mathcal{W}\left(P_{x_{j}^{(n)},i+1}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},i+1}\right)},
\end{split}
\]
which, by (\ref{eq: estimate for P_x,n}) and (\ref{eq: estimate for Phi_x,n}),
is no greater than
\[
\exp\left[-3\nu\gamma\ln r_{i+1}\left(1+C_{\nu}/n\right)\right].
\]
Meanwhile, with $x_{j}^{(n)}$ fixed, the number of $x_{k}^{(n)}$'s
that satisfy (\ref{eq:case 2}) is of the order of $r_{i}^{\nu-1}r_{i+2}/r_{n}^{\nu}$.
Hence, the contribution to (\ref{eq:2nd moment rewritten in terms of L})
under this case is
\[
\begin{split} & \sum_{i=0}^{n-1}\exp\left[\ln r_{i+2}-3\nu\gamma\ln r_{i+1}+o\left(-\ln r_{i+1}\right)\right]\end{split}
\]
which is bounded in $n$ by the assumption (\ref{eq:condition on r_n}).
\subsubsection*{Case 3}
Assume that either
\[
r_{i}-r_{i+1}<\left|x_{j}^{(n)}-x_{k}^{(n)}\right|\leq r_{i}-r_{i+2}\quad\mbox{(3a)}
\]
or
\[
r_{i}+r_{i+2}\leq\left|x_{j}^{(n)}-x_{k}^{(n)}\right|<r_{i}+r_{i+1}.\quad\mbox{(3b)}
\]
We observe that for all $l^{\prime}\geq i+3$, by (\ref{eq:cov non-concentric non-overlapping})
and (\ref{eq:cov non-concentric inclusion}), under the hypothesis
(3a) or (3b), $\mbox{DCov}\left(i+1,\, l^{\prime}\right)=0$. Together
with the family of independent random variables in (\ref{eq:independent family}),
we see that both $P_{x_{j}^{(n)},i+1}$ and $P_{x_{j}^{(n)},i+2}$
are independent of $P_{x_{k}^{(n)},l^{\prime}}$ for all $l^{\prime}\geq i+3$,
and similarly both $P_{x_{k}^{(n)},i+1}$ and $P_{x_{k}^{(n)},i+2}$
are independent of $P_{x_{j}^{(n)},l}$ for all $l\geq i+3$. Thus,
$\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)$
is bounded from above by
\begin{equation}
\begin{split} & \Pi_{l=i+3}^{n}\mathcal{W}\left(P_{x_{j}^{(n)},l}\right)\cdot\Pi_{l^{\prime}=i+3}^{n}\mathcal{W}\left(P_{x_{k}^{(n)},l^{\prime}}\right)\\
& \qquad\qquad\qquad\cdot\mathcal{W}\left(P_{x_{j}^{(n)},i+1}\cap P_{x_{j}^{(n)},i+2}\cap P_{x_{k}^{(n)},i+1}\cap P_{x_{k}^{(n)},i+2}\right),
\end{split}
\label{eq:splitting prob in case 3}
\end{equation}
so we only need to focus on the family
\[
\left\{ \Delta\bar{\theta}_{i+1}\left(x_{j}^{(n)}\right),\Delta\bar{\theta}_{i+2}\left(x_{j}^{(n)}\right),\Delta\bar{\theta}_{i+1}\left(x_{k}^{(n)}\right),\Delta\bar{\theta}_{i+2}\left(x_{k}^{(n)}\right)\right\} .
\]
\begin{lem}
\label{lem:estimate of prob of i+1,i+2,i+1,i+2}Under the hypothesis
(3a) or (3b), there exists a constant $C_{\nu}>0$ such that for all
$i\geq0$,
\begin{equation}
\begin{split} & \frac{\mathcal{W}\left(P_{x_{j}^{(n)},i+1}\cap P_{x_{j}^{(n)},i+2}\cap P_{x_{k}^{(n)},i+1}\cap P_{x_{k}^{(n)},i+2}\right)}{\mathcal{W}\left(P_{x_{j}^{(n)},i+1}\right)\mathcal{W}\left(P_{x_{j}^{(n)},i+2}\right)\mathcal{W}\left(P_{x_{k}^{(n)},i+1}\right)\mathcal{W}\left(P_{x_{k}^{(n)},i+2}\right)}\leq e^{C_{\nu}\sqrt{-\ln r_{i+1}}}.\end{split}
\label{eq:estimate of prob of i+1, i+2, i+1, i+2}
\end{equation}
\end{lem}
\begin{proof}
We will prove this result by multiple steps of conditioning. To further
simplify the notation, throughout the proof, we write
\[
X_{i+1}:=\Delta\bar{\theta}_{i+1}\left(x_{j}^{(n)}\right),\; X_{i+2}:=\Delta\bar{\theta}_{i+2}\left(x_{j}^{(n)}\right),
\]
and
\[
Y_{i+1}:=\Delta\bar{\theta}_{i+1}\left(x_{k}^{(n)}\right),\; Y_{i+2}:=\Delta\bar{\theta}_{i+2}\left(x_{k}^{(n)}\right).
\]
Clearly, $Y_{i+2}$ is independent of $Y_{i+1}$ and $X_{i+2}$. Furthermore,
$\mbox{Cov}\left(X_{i+1},Y_{i+2}\right)$ is given by, when (3a) applies,
\[
\begin{split}\mbox{DCov}\left(i+1,\, i+2\right) & =-C_{incl}\left(r_{i},\left|x_{j}^{(n)}-x_{k}^{(n)}\right|\right)+\mbox{Cov}\left(r_{i},x_{j}^{(n)};r_{i+1},x_{k}^{(n)}\right);\end{split}
\]
when (3b) applies,
\[
\begin{split}\mbox{DCov}\left(i+1,\, i+2\right) & =-C_{disj}\left(\left|x_{j}^{(n)}-x_{k}^{(n)}\right|\right)+\mbox{Cov}\left(r_{i},x_{j}^{(n)};r_{i+1},x_{k}^{(n)}\right).\end{split}
\]
In either case, $\mbox{Cov}\left(X_{i+1},Y_{i+2}\right)$ doesn't
depend on $r_{i+2}$, and by the asymptotics of the functions that
are involved and the Cauchy-Schwarz inequality,
\begin{equation}
\mbox{Cov}\left(X_{i+1},Y_{i+2}\right)=\mathcal{O}\left(\sqrt{G\left(r_{i+1}\right)G\left(r_{i}\right)}\right).\label{eq:bound for cov(i+2,i+1)}
\end{equation}
Similarly, $\mbox{Cov}\left(X_{i+1},Y_{i+1}\right)$ is given by,
when either (3a) or (3b) applies,
\[
\begin{split}\mbox{DCov}\left(i+1,\, i+1\right) & =C_{disj}\left(\left|x_{j}^{(n)}-x_{k}^{(n)}\right|\right)-2\mbox{Cov}\left(r_{i},x_{j}^{(n)};r_{i+1},x_{k}^{(n)}\right)\\
&
+\mbox{Cov}\left(r_{i},x_{j}^{(n)};r_{i},x_{k}^{(n)}\right),
\end{split}
\]
which implies that
\begin{equation}
\mbox{Cov}\left(X_{i+1},Y_{i+1}\right)=\mathcal{O}\left(\sqrt{G\left(r_{i+1}\right)G\left(r_{i}\right)}\right).\label{eq:bound for cov(i+1,i+1)}
\end{equation}
We first condition on $Y_{i+2}$. The joint conditional distribution
of $\left\{ X_{i+1},X_{i+2},Y_{i+1}\right\} $, given $Y_{i+2}=y$,
is the same as the Gaussian family $\left\{ X_{i+1}^{\prime},X_{i+2}^{\prime},Y_{i+1}^{\prime}\right\} $
where $X_{i+2}^{\prime}$ and $Y_{i+1}^{\prime}$ have the same distribution
as $X_{i+2}$ and $Y_{i+1}$ respectively, and $X_{i+1}^{\prime}$
has the Gaussian distribution $N\left(m,\sigma^{2}\right)$ with
\[
m:=\frac{\mbox{Cov}\left(X_{i+1},Y_{i+2}\right)}{\Delta G_{i+2}}y\quad\mbox{ and }\quad\sigma^{2}:=\Delta G_{i+1}-\frac{\mbox{Cov}^{2}\left(X_{i+1},Y_{i+2}\right)}{\Delta G_{i+2}}.
\]
In particular, if $\left|y-\sqrt{2\nu\gamma}\Delta D_{i+2}\right|\leq\sqrt{\Delta G_{i+2}}$,
then, by (\ref{eq:condition on r_n}) and (\ref{eq:bound for cov(i+2,i+1)}),
$m=o\left(1\right)$ and $\sigma^{2}=\Delta G_{i+1}+o\left(1\right)$,
and these estimates
\footnote{Here, as well as in later occasions, when concerning $o\left(1\right)$,
the ``estimate'' refers to the rate of the $o\left(1\right)$ term
converging to zero.
} can be made uniform in $y$. Moreover, the covariance of the family
is given by $\mbox{Cov}\left(X_{i+1}^{\prime},X_{i+2}^{\prime}\right)=0$,
$\mbox{Cov}\left(X_{i+2}^{\prime},Y_{i+1}^{\prime}\right)=\mbox{Cov}\left(X_{i+2},Y_{i+1}\right)$
and $\mbox{Cov}\left(X_{i+1}^{\prime},Y_{i+1}^{\prime}\right)=\mbox{Cov}\left(X_{i+1},Y_{i+1}\right)$.
We write the following conditional distribution as
\[
\mathcal{W}\left(P_{x_{j}^{(n)},i+1}\cap P_{x_{j}^{(n)},i+2}\cap P_{x_{k}^{(n)},i+1}|Y_{i+2}=y\right)=\mathcal{W}|_{Y_{i+2}=y}\left(P_{X_{i+1}^{\prime}}\cap P_{X_{i+2}^{\prime}}\cap P_{Y_{i+1}^{\prime}}\right),
\]
where $\mathcal{W}|_{Y_{i+2}=y}$ is the conditional distribution
under $\mathcal{W}$ given $Y_{i+2}=y$, and $P_{X_{i+1}^{\prime}}$,
$P_{X_{i+2}^{\prime}}$ and $P_{Y_{i+1}^{\prime}}$ are the corresponding
events concerning $X_{i+1}^{\prime}$, $X_{i+2}^{\prime}$ and $Y_{i+1}^{\prime}$,
e.g.,
\[
P_{X_{i+1}^{\prime}}=\left\{ \left|X_{i+1}^{\prime}-\sqrt{2\nu\gamma}\Delta D_{i+1}\right|\leq\sqrt{\Delta G_{i+1}}\right\} .
\]
Next, we condition on $X_{i+2}^{\prime}=x$ where $\left|x-\sqrt{2\nu\gamma}\Delta D_{i+2}\right|\leq\sqrt{\Delta G_{i+2}}$.
Then the conditional distribution of $\left\{ X_{i+1}^{\prime},Y_{i+1}^{\prime}\right\} $
is the same as that of $\left\{ X_{i+1}^{\prime\prime},Y_{i+1}^{\prime\prime}\right\} $
where $X_{i+1}^{\prime\prime}$ has the same distribution as $X_{i+1}^{\prime}$,
and $Y_{i+1}^{\prime\prime}$ has the Gaussian distribution $N\left(\lambda,\varsigma^{2}\right)$
where
\[
\lambda=\frac{\mbox{Cov}\left(X_{i+2},Y_{i+1}\right)}{\Delta G_{i+2}}x\quad\mbox{ and }\quad\varsigma^{2}=\Delta G_{i+1}-\frac{\mbox{Cov}^{2}\left(X_{i+2},Y_{i+1}\right)}{\Delta G_{i+2}}.
\]
Since $\mbox{Cov}\left(X_{i+2},Y_{i+1}\right)=\mbox{Cov}\left(X_{i+1},Y_{i+2}\right)$,
the estimates we obtained for $m$ and $\sigma^{2}$ also apply to
$\lambda$ and $\varsigma^{2}$ respectively, and those estimates
are uniform in $x$ and $y$. In addition, $\mbox{Cov}\left(X_{i+1}^{\prime\prime},Y_{i+1}^{\prime\prime}\right)=\mbox{Cov}\left(X_{i+1},Y_{i+1}\right)$.
Again, we write the following conditional distribution as
\[
\mathcal{W}|_{Y_{i+2}=y}\left(P_{X_{i+1}^{\prime}}\cap P_{Y_{i+1}^{\prime}}|X_{i+2}^{\prime}=x\right)=\mathcal{W}|_{X_{i+2}^{\prime}=x}\left(P_{X_{i+1}^{\prime\prime}}\cap P_{Y_{i+1}^{\prime\prime}}\right)
\]
where $\mathcal{W}|_{X_{i+2}^{\prime}=x}$ is the conditional distribution
under $\mathcal{W}|_{Y_{i+2}=y}$ conditioning on $X_{i+2}^{\prime}=x$,
and $P_{X_{i+1}^{\prime\prime}}$ and $P_{Y_{i+1}^{\prime\prime}}$
are the corresponding events concerning $X_{i+1}^{\prime\prime}$
and $Y_{i+1}^{\prime\prime}$.
To compute $\mathcal{W}|_{X_{i+2}^{\prime}=x}\left(P_{X_{i+1}^{\prime\prime}}\cap P_{Y_{i+1}^{\prime\prime}}\right)$,
we use conditioning again. Given
\[
Y_{i+1}^{\prime\prime}=w\in\left[\sqrt{2\nu\gamma}\Delta D_{i+1}-\sqrt{\Delta G_{i+1}},\,\sqrt{2\nu\gamma}\Delta D_{i+1}+\sqrt{\Delta G_{i+1}}\right],
\]
the conditional distribution of $X_{i+1}^{\prime\prime}$ is the Gaussian
distribution with the mean
\[
m+\frac{\mbox{Cov}\left(X_{i+1},Y_{i+1}\right)}{\varsigma^{2}}\left(w-\lambda\right)=\mathcal{O}\left(\sqrt{G\left(r_{i}\right)\left(-\ln r_{i+1}\right)}\right)
\]
and the variance
\[
\sigma^{2}-\frac{\mbox{Cov}^{2}\left(X_{i+1},Y_{i+1}\right)}{\varsigma^{2}}=\Delta G_{i+1}\left(1+o\left(1\right)\right).
\]
These estimates
\footnote{Here, as well as in later occasions, when concerning ``$\mathcal{O}$'',
the ``estimate'' refers to the constants in the upper and lower
bound.
} follow from (\ref{eq:bound for cov(i+1,i+1)}) and earlier estimates
on $m$, $\lambda$, $\sigma^{2}$ and $\varsigma^{2}$, and they
can be made uniform in $w$, $x$ and $y$. Therefore, one can easily
verify that
\[
\begin{split}\mathcal{W}|_{X_{i+2}^{\prime}=x}\left(P_{X_{i+1}^{\prime\prime}}|Y_{i+1}^{\prime\prime}=w\right) & \leq\exp\left[\nu\gamma\ln r_{i+1}+\mathcal{O}\left(\sqrt{-\ln r_{i+1}}\right)\right]:=p_{1},\end{split}
\]
and $p_{1}$ is independent of $w$, $x$ and $y$. This further leads
to
\[
\begin{split}\mathcal{W}|_{X_{i+2}^{\prime}=x}\left(P_{X_{i+1}^{\prime\prime}}\cap P_{Y_{i+1}^{\prime\prime}}\right) & \leq p_{1}\exp\left[\nu\gamma\ln r_{i+1}+\mathcal{O}\left(\sqrt{-\ln r_{i+1}}\right)\right]\\
& =\exp\left[2\nu\gamma\ln r_{i+1}+\mathcal{O}\left(\sqrt{-\ln r_{i+1}}\right)\right]:=p_{2},
\end{split}
\]
and $p_{2}$ is independent of $x$ and $y$.
Finally, since $X_{i+2}^{\prime}$ has the same distribution as $X_{i+2}$,
by backtracking the condition, we have that
\[
\mathcal{W}|_{Y_{i+2}=y}\left(P_{X_{i+1}^{\prime}}\cap P_{Y_{i+1}^{\prime}}\cap P_{X_{i+2}^{\prime}}\right)\leq p_{2}\mathcal{W}\left(P_{x_{j}^{(n)},i+2}\right),
\]
and hence
\[
\mathcal{W}\left(P_{x_{j}^{(n)},i+1}\cap P_{x_{j}^{(n)},i+2}\cap P_{x_{k}^{(n)},i+1}\cap P_{x_{k}^{(n)},i+2}\right)\leq p_{2}\mathcal{W}\left(P_{x_{j}^{(n)},i+2}\right)\mathcal{W}\left(P_{x_{k}^{(n)},i+2}\right).
\]
The desired estimate follows immediately from (\ref{eq: estimate for P_x,n}).
\end{proof}
It follows from (\ref{eq:condition on r_n}), (\ref{eq:splitting prob in case 3})
and (\ref{eq:estimate of prob of i+1, i+2, i+1, i+2}) that, in Case
3,
\[
\begin{split}\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)} & \leq\frac{\mathcal{W}\left(P_{x_{j}^{(n)},i+1}\cap P_{x_{j}^{(n)},i+2}\cap P_{x_{k}^{(n)},i+1}\cap P_{x_{k}^{(n)},i+2}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},i+2}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},i+2}\right)}\\
& \leq\frac{\exp\left(C_{\nu}\sqrt{-\ln r_{i+1}}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},i}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},i}\right)}=\exp\left[o\left(-\ln r_{i+1}\right)\right].
\end{split}
\]
On the other hand, with $x_{j}^{(n)}$ fixed, the number of $x_{k}^{(n)}$'s
that satisfy either (3a) or (3b) is of the order of $r_{i}^{\nu-1}r_{i+1}/r_{n}^{\nu}$.
Hence, the contribution to (\ref{eq:2nd moment rewritten in terms of L})
under this case is
\[
\begin{split} & \sum_{i=0}^{n-1}\exp\left[\ln r_{i+1}+o\left(-\ln r_{i+1}\right)\right]\end{split}
\]
which is bounded in $n$.
\subsubsection*{Case 4.}
The last case is that either
\[
r_{i+1}^{1-\epsilon}<\left|x_{j}^{(n)}-x_{k}^{(n)}\right|\leq r_{i}-r_{i+1}\quad\mbox{(4a)}
\]
or
\[
r_{i}+r_{i+1}\leq\left|x_{j}^{(n)}-x_{k}^{(n)}\right|<2r_{i}.\quad\,\mbox{(4b)}
\]
The strategy for studying this case is similar to that for the previous
case. We will omit the technical details that are the same as earlier,
but only address the differences in the treatment of Case 4 from that
of Case 3. When (4a) or (4b) applies, one can use (\ref{eq:cov non-concentric non-overlapping})
and (\ref{eq:cov non-concentric inclusion}) to verify that both $P_{x_{j}^{(n)},i}$
and $P_{x_{j}^{(n)},i+1}$ are independent of $P_{x_{k}^{(n)},l^{\prime}}$
for all $l^{\prime}\geq i+2$, and $P_{x_{k}^{(n)},i+1}$ is independent
of $P_{x_{j}^{(n)},l}$ for all $l\geq i+2$. Thus, $\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)$
is no greater than
\begin{equation}
\mathcal{W}\left(\bigcap_{l=i+2}^{n}P_{x_{j}^{(n)},l}\right)\mathcal{W}\left(\bigcap_{l^{\prime}=i+2}^{n}P_{x_{k}^{(n)},l^{\prime}}\right)\cdot\mathcal{W}\left(P_{x_{j}^{(n)},i}\cap P_{x_{j}^{(n)},i+1}\cap P_{x_{k}^{(n)},i+1}\right).\label{eq:splitting prob in case 4}
\end{equation}
\begin{lem}
Under the hypothesis (4a) or (4b), there exists a constant $C_{\nu,\epsilon}>0$
such that for all $i\geq0$,
\begin{equation}
\frac{\mathcal{W}\left(P_{x_{j}^{(n)},i}\cap P_{x_{j}^{(n)},i+1}\cap P_{x_{k}^{(n)},i+1}\right)}{\mathcal{W}\left(P_{x_{j}^{(n)},i}\right)\mathcal{W}\left(P_{x_{j}^{(n)},i+1}\right)\mathcal{W}\left(P_{x_{k}^{(n)},i+1}\right)}\leq\exp\left(C_{\nu,\epsilon}\sqrt{-\ln r_{i}}\right).\label{eq:estimate of prob of i,i+1.i+1}
\end{equation}
\end{lem}
\begin{proof}
Similarly as the proof of Lemma \ref{lem:estimate of prob of i+1,i+2,i+1,i+2},
one can prove (\ref{eq:estimate of prob of i,i+1.i+1}) by multiples
steps of conditioning. For simpler notation, we write
\[
X_{i}:=\Delta\bar{\theta}_{i}\left(x_{j}^{(n)}\right),\, X_{i+1}:=\Delta\bar{\theta}_{i+1}\left(x_{j}^{(n)}\right),\mbox{ and }Y_{i+1}:=\Delta\bar{\theta}_{i+1}\left(x_{k}^{(n)}\right).
\]
When (4a) or (4b) applies, by (\ref{eq:cov non-concentric non-overlapping})
and (\ref{eq:cov non-concentric inclusion}),
\begin{equation}
\mbox{Cov}\left(X_{i},Y_{i+1}\right)=\mathcal{O}\left(G\left(r_{i}\right)\right),\label{eq:bound for i,i+1}
\end{equation}
and
\begin{equation}
\mbox{Cov}\left(Y_{i+1},X_{i+1}\right)=\mathcal{O}\left(G^{1-\epsilon}\left(r_{i+1}\right)\right).\label{eq:bound for i+1,i+1}
\end{equation}
We first condition on $Y_{i+1}=y$ where $\left|y-\sqrt{2\nu\gamma}\Delta D_{i+1}\right|\leq\sqrt{\Delta G_{i+1}}$.
Then the joint conditional distribution of $\left\{ X_{i},X_{i+1}\right\} $
given $Y_{i+1}=y$ is the same as that of $\left\{ X_{i}^{\prime},X_{i+1}^{\prime}\right\} $
where $X_{i}^{\prime}$ and $X_{i+1}^{\prime}$ have distributions
$N\left(m_{1},\sigma_{1}^{2}\right)$ and $N\left(m_{2},\sigma_{2}^{2}\right)$
respectively, where $m_{1}=o\left(1\right)$, $\sigma_{1}^{2}=\Delta G_{i}+o\left(1\right)$,
and
\begin{equation}
m_{2}=\mathcal{O}\left(\Delta D_{i+1}\cdot G^{-\epsilon}\left(r_{i+1}\right)\right)\mbox{ and }\sigma_{2}^{2}=\Delta G_{i+1}\left[1+\mathcal{O}\left(G^{-2\epsilon}\left(r_{i+1}\right)\right)\right],\label{eq:estimate for m_2, sigma_2}
\end{equation}
and moreover,
\begin{equation}
\mbox{Cov}\left(X_{i}^{\prime},X_{i+1}^{\prime}\right)=\mathcal{O}\left(G\left(r_{i}\right)/G^{\epsilon}\left(r_{i+1}\right)\right).\label{eq:estimate for cov i,i+1}
\end{equation}
These estimates on $m_{1}$, $\sigma_{1}^{2}$, $m_{2}$, $\sigma_{2}^{2}$
and $\mbox{Cov}\left(X_{i}^{\prime},X_{i+1}^{\prime}\right)$ follow
from (\ref{eq:bound for i,i+1}) and (\ref{eq:bound for i+1,i+1})
and can be made uniform in $y$. Next, given $X_{i}^{\prime}=x$ where
$\left|x-\sqrt{2\nu\gamma}\Delta D_{i}\right|\leq\sqrt{\Delta G_{i}}$,
the conditional distribution of $X_{i+1}^{\prime}$ is the Gaussian
distribution $N\left(m_{3},\sigma_{3}^{2}\right)$ and, by (\ref{eq:estimate for cov i,i+1}),
$m_{3}$ and $\sigma_{3}^{2}$ follow the same estimates as $m_{2}$
and $\sigma_{2}^{2}$ respectively, i.e., the estimates in (\ref{eq:estimate for m_2, sigma_2}),
and these estimates are uniform in $x$ and $y$.
To proceed from here, we need to carry out a step that is different
from the proof of Lemma \ref{lem:estimate of prob of i+1,i+2,i+1,i+2}.
Specifically, we need to compare $\mathcal{W}\left(P_{x_{j}^{(n)},i+1}\right)$
and
\[
N\left(m_{3},\sigma_{3}^{2}\right)\left(\left[\sqrt{2\nu\gamma}\Delta D_{i+1}-\sqrt{\Delta G_{i+1}},\sqrt{2\nu\gamma}\Delta D_{i+1}+\sqrt{\Delta G_{i+1}}\right]\right).
\]
To this end, we write the later as
\[
\begin{split} & \frac{1}{\sqrt{2\pi}}\int_{\sqrt{2\nu\gamma}\Delta D_{i+1}-\sqrt{\Delta G_{i+1}}}^{\sqrt{2\nu\gamma}\Delta D_{i+1}+\sqrt{\Delta G_{i+1}}}\frac{\exp\left[-\left(w-m_{3}\right)^{2}/\left(2\sigma_{3}^{2}\right)\right]}{\sigma_{3}}dw\\
=\; & \frac{1}{\sqrt{2\pi}}\int_{\sqrt{2\nu\gamma}\Delta D_{i+1}-\sqrt{\Delta G_{i+1}}}^{\sqrt{2\nu\gamma}\Delta D_{i+1}+\sqrt{\Delta G_{i+1}}}\,\frac{\exp\left[-w^{2}/\left(2\Delta G_{i+1}\right)\right]}{\sqrt{\Delta G_{i+1}}}\cdot E\left(w\right)dw
\end{split}
\]
where
\[
E\left(w\right):=\frac{\sqrt{\Delta G_{i+1}}}{\sigma_{3}}\exp\left[-\frac{\left(w-m_{3}\right)^{2}}{2\sigma_{3}^{2}}+\frac{w^{2}}{2\Delta G_{i+1}}\right].
\]
Notice that by the estimates in (\ref{eq:estimate for m_2, sigma_2})
which apply to $m_{3}$ and $\sigma_{3}^{2}$, there exists a constant
$C_{\nu,\epsilon}>0$ such that
\[
\begin{split}\sup_{\left\{ w:\left|w-\sqrt{2\nu\gamma}\Delta D_{i+1}\right|\leq\sqrt{\Delta G_{i+1}}\right\} }\,\left|E\left(w\right)\right| & \leq C_{\nu,\epsilon}.\end{split}
\]
It follows from this observation that, given $X_{i}^{\prime}=x$,
the conditional probability of $X_{i+1}^{\prime}$ being in the desired
interval, i.e., $\left|X_{i+1}^{\prime}-\sqrt{2\nu\gamma}\Delta D_{i+1}\right|\leq\sqrt{\Delta G_{i+1}}$,
is bounded by $C_{\nu,\epsilon}\mathcal{W}\left(P_{x_{j}^{(n)},i+1}\right)$.
From this point, we backtrack the conditioning in the same way as
we did in the proof of Lemma \ref{lem:estimate of prob of i+1,i+2,i+1,i+2}
and arrive at
\[
\mathcal{W}\left(P_{x_{j}^{(n)},i}\cap P_{x_{j}^{(n)},i+1}\cap P_{x_{k}^{(n)},i+1}\right)\leq e^{\nu\gamma\ln r_{i}+\mathcal{O}\left(\sqrt{-\ln r_{i}}\right)}\mathcal{W}\left(P_{x_{j}^{(n)},i+1}\right)\mathcal{W}\left(P_{x_{k}^{(n)},i+1}\right).
\]
By (\ref{eq: estimate for P_x,n}), (\ref{eq:estimate of prob of i,i+1.i+1})
follows immediately.
\end{proof}
Based on (\ref{eq: estimate for Phi_x,n}), (\ref{eq:splitting prob in case 4})
and (\ref{eq:estimate of prob of i,i+1.i+1}), we have that, in Case
4,
\[
\begin{split}\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)} & \leq\frac{\mathcal{W}\left(P_{x_{j}^{(n)},i}\cap P_{x_{j}^{(n)},i+1}\cap P_{x_{k}^{(n)},i+1}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},i+1}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},i+1}\right)}\\
& \leq\frac{\exp\left(C_{\nu,\epsilon}\sqrt{-\ln r_{i}}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},i-1}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},i}\right)}\\
& \leq\exp\left[-\nu\gamma\ln r_{i}+o\left(-\ln r_{i}\right)\right].
\end{split}
\]
With $x_{j}^{(n)}$ fixed, the number of $x_{k}^{(n)}$'s that satisfy
either (4a) or (4b) is of the order of $\left(2r_{i}\right)^{\nu}/r_{n}^{\nu}$.
Hence, the contribution to (\ref{eq:2nd moment rewritten in terms of L})
under this case is
\[
\begin{split} & \sum_{i=0}^{n-1}\exp\left[\nu\left(1-\gamma\right)\ln r_{i}+o\left(-\ln r_{i}\right)\right]\end{split}
\]
which is bounded in $n$ since $\gamma<1$.
Summarizing our findings in all the cases above, we conclude that
\[
\sup_{n\geq1}\,\mathbb{E}^{\mathcal{W}}\left[\left(\mu_{n,\theta}\left(\overline{S\left(O,1\right)}\right)\right)^{2}\right]<\infty.
\]
\subsection*{Step 4: Study the $\alpha-$energy of $\mu_{n,\theta}$.}
In this subsection we study the $\alpha-$energy, $\alpha>0$, of
the measure $\mu_{n,\theta}$ introduced previously. Namely, with
$\alpha>0$ fixed, we consider, for every $\theta\in\Theta$ and every
$n\geq1$,
\[
I_{\alpha}\left(\mu_{n,\theta}\right):=\int_{\overline{S\left(O,1\right)}}\int_{\overline{S\left(O,1\right)}}\left|y-w\right|^{-\alpha}\mu_{n,\theta}\left(dy\right)\mu_{n,\theta}\left(dw\right).
\]
By the definition of $\mu_{n,\theta}$ (\ref{eq:def of mu_n_theta}),
$\mathbb{E}^{\mathcal{W}}\left[I_{\alpha}\left(\mu_{n,\theta}\right)\right]$
is equal to
\begin{equation}
\frac{1}{K_{n}^{2}}\sum_{j,k=1}^{K_{n}}\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)}\frac{\int_{\overline{S\left(x_{j}^{(n)},r_{n}\right)}}\int_{\overline{S\left(x_{k}^{(n)},r_{n}\right)}}\left|y-w\right|^{-\alpha}dydw}{\mbox{vol}\left(S\left(x_{j}^{(n)},r_{n}\right)\right)\mbox{vol}\left(S\left(x_{k}^{(n)},r_{n}\right)\right)}.\label{eq:expection of energy}
\end{equation}
In this subsection we will show that, if $\alpha<\nu\left(1-\gamma\right)$,
then
\[
\sup_{n\geq1}\,\mathbb{E}^{\mathcal{W}}\left[I_{\alpha}\left(\mu_{n,\theta}\right)\right]<\infty.
\]
For simplicity, we write
\[
I\left(x_{j}^{(n)},\, x_{k}^{(n)}\right):=\frac{\int_{\overline{S\left(x_{j}^{(n)},r_{n}\right)}}\int_{\overline{S\left(x_{k}^{(n)},r_{n}\right)}}\left|y-w\right|^{-\alpha}dydw}{\mbox{vol}\left(S\left(x_{j}^{(n)},r_{n}\right)\right)\mbox{vol}\left(S\left(x_{k}^{(n)},r_{n}\right)\right)}.
\]
When $j=k$, so long as $\alpha<\nu$, $I\left(x_{j}^{(n)},\, x_{k}^{(n)}\right)=C_{\nu}\cdot r_{n}^{-\alpha}$
for some dimensional constant $C_{\nu}>0$. Therefore, the sum over
the diagonal terms in (\ref{eq:expection of energy}) is
\[
\begin{split}\frac{1}{K_{n}^{2}}\,\sum_{j=1}^{K_{n}}\frac{C_{\nu}\cdot r_{n}^{-\alpha}}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)} & \leq C_{\nu}\cdot\exp\left\{ \left[\nu\left(1-\gamma\right)-\alpha\right]\ln r_{n}+o\left(-\ln r_{n}\right)\right\} \end{split}
\]
which tends to zero as $n\rightarrow\infty$ whenever $\alpha<\nu\left(1-\gamma\right)$.
So it is sufficient to treat the sum over the off-diagonal terms in
(\ref{eq:expection of energy}). To this end, we follow a similar
approach as the one adopted in the previous step. Again, assume that
$j\neq k$, let $i\in\mathbb{N}$, $0\leq i\leq n-1$, be the unique
integer such that
\[
2r_{i+1}\leq\left|x_{j}^{(n)}-x_{k}^{(n)}\right|<2r_{i},\quad(\dagger)
\]
and we rewrite the sum over the off-diagonal terms in (\ref{eq:expection of energy})
as
\begin{equation}
\frac{1}{K_{n}^{2}}\sum_{j=1}^{K_{n}}\sum_{i=0}^{n-1}\sum_{\left\{ k:\,(\dagger)\mbox{ holds with }i\right\} }\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)}\cdot I\left(x_{j}^{(n)},\, x_{k}^{(n)}\right).\label{eq:energy decomposed in i}
\end{equation}
Let $\alpha\in\left(0,\nu\left(1-\gamma\right)\right)$ be fixed.
We investigate the sum in (\ref{eq:energy decomposed in i}) according
to the four cases presented in the previous step. Same as earlier,
without loss of generality, we can assume that $i$ is sufficiently
large.\\
\noindent \emph{Case 1.} Assume that for some $\epsilon\in\left(0,1-\gamma-\frac{\alpha}{\nu}\right)$,
\[
2r_{i+1}\leq\left|x_{j}^{(n)}-x_{k}^{(n)}\right|<r_{i+1}^{1-\varepsilon}.
\]
We have found out in Case 1 previously (following the same arguments
with a possibly smaller $\epsilon$) that
\[
\begin{split}\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)} & \leq\exp\left[-\nu\gamma\ln r_{i+1}+o\left(-\ln r_{i+1}\right)\right]\end{split}
,
\]
and with $x_{j}^{(n)}$ fixed, the number of $x_{k}^{(n)}$'s that
satisfy the criterion of Case 1 is of the order of $\left(r_{i+1}^{1-\epsilon}/r_{n}\right)^{\nu}$.
Besides, it is easy to see that there exists $C_{\nu}>0$ such that
$I\left(x_{j}^{(n)},\, x_{k}^{(n)}\right)\leq C_{\nu}\cdot r_{i+1}^{-\alpha}$.
So the contribution to (\ref{eq:energy decomposed in i}) under this
case is
\[
\begin{split} & \sum_{i=0}^{n-1}\exp\left\{ \left[\nu\left(1-\epsilon-\gamma\right)-\alpha\right]\ln r_{i+1}+o\left(-\ln r_{i+1}\right)\right\} \end{split}
\]
which is bounded in $n$ since $\epsilon<1-\gamma-\frac{\alpha}{\nu}$.\\
\noindent \emph{Case 2}, \emph{Case 3} and \emph{Case (4b)}. Under
any of the conditions, as imposed in the previous step, of these three
cases, we have that $I\left(x_{j}^{(n)},\, x_{k}^{(n)}\right)\leq C_{\nu}\cdot r_{i}^{-\alpha}$.
Combining this with the findings from the previous step, i.e., the
estimate on
\[
\begin{split}\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)}\end{split}
\]
and the number of qualifying $k$'s for any fixed $j$, one can easily
confirm that the contribution to (\ref{eq:energy decomposed in i}),
in Case 2 or Case 3 or Case (4b), is bounded in $n$.\\
\noindent \emph{Case (4a)}. However, in the case of (4a), the arguments
above will not work, since $r_{i}^{-\alpha}$ above would be replaced
by $r_{i+1}^{-\alpha\left(1-\epsilon\right)}$. We need to apply a
finer treatment by decomposing the interval $(r_{i+1}^{1-\epsilon},\, r_{i}-r_{i+1}]$
into a union of disjoint intervals. To be specific, let $Z$ be the
smallest integer such that
\[
\left(r_{i+1}/r_{i}\right)^{\left(1-\gamma\right)^{Z}}\geq1-\frac{r_{i+1}}{r_{i}},
\]
for which to happen it is sufficient to make
\[
\left(1-\gamma\right)^{Z}\leq\frac{\ln\left(1-r_{i+1}/r_{i}\right)}{\ln\left(r_{i+1}/r_{i}\right)},
\]
so $Z$ should be taken as
\[
\frac{1}{\ln\left(1-\gamma\right)}\ln\left[\frac{\ln\left(1-r_{i+1}/r_{i}\right)}{\ln\left(r_{i+1}/r_{i}\right)}\right]+1=\mathcal{O}\left(-\ln r_{i+1}\right).
\]
Define a sequence of positive numbers $\left\{ R_{m}:\, m=0,\cdots,Z\right\} $
by $R_{0}:=r_{i+1}^{1-\epsilon}$, and
\[
R_{m}:=r_{i+1}^{\left(1-\gamma\right)^{m}}\cdot r_{i}^{1-\left(1-\gamma\right)^{m}}\mbox{ for }m=1,\cdots,Z.
\]
Clearly, $R_{m}<R_{m+1}$ and
\[
R_{Z}=r_{i}\cdot\left(r_{i+1}/r_{i}\right)^{\left(1-\gamma\right)^{Z}}\geq r_{i}-r_{i+1}.
\]
Denote $U_{m}:=(R_{m},\, R_{m+1}]$ for $m=0,1,\cdots,Z-1$. Clearly,
\[
(r_{i+1}^{1-\epsilon},\, r_{i}-r_{i+1}]\subseteq\bigcup_{m=0}^{Z-1}U_{m}.
\]
For each $m=0,1,\cdots,Z$, if $\left|x_{j}^{(n)}-x_{k}^{(n)}\right|\in U_{m}$,
then $I\left(x_{j}^{(n)},\, x_{k}^{(n)}\right)\leq C_{\nu}R_{m}^{-\alpha}$.
Recall that, in Case 4,
\[
\frac{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\bigcap\Phi_{x_{k}^{(n)},n}\right)}{\mathcal{W}\left(\Phi_{x_{j}^{(n)},n}\right)\mathcal{W}\left(\Phi_{x_{k}^{(n)},n}\right)}\leq\exp\left[-\nu\gamma\ln r_{i}+o\left(-\ln r_{i}\right)\right].
\]
Meanwhile, when $x_{j}^{(n)}$ is fixed, the number of $x_{k}^{(n)}$'s
such that $\left|x_{j}^{(n)}-x_{k}^{(n)}\right|\in U_{m}$ is no greater
than $R_{m+1}^{\nu}/r_{n}^{\nu}$. We will need the following estimate:
\[
\begin{split} & \exp\left[-\nu\gamma\ln r_{i}+o\left(-\ln r_{i}\right)\right]\cdot R_{m}^{-\alpha}\cdot R_{m+1}^{\nu}\\
\leq\; & \exp\left\{ \left[-\nu\gamma-\alpha+\alpha\left(1-\gamma\right)^{m}+\nu-\nu\left(1-\gamma\right)^{m+1}\right]\ln r_{i}+o\left(-\ln r_{i}\right)\right\} \\
& \qquad\qquad\qquad\qquad\qquad\qquad\cdot\exp\left\{ \left[-\alpha\left(1-\gamma\right)^{m}+\nu\left(1-\gamma\right)^{m+1}\right]\ln r_{i+1}\right\} \\
=\; & \exp\left\{ \left[\nu\left(1-\gamma\right)-\alpha\right]\ln r_{i}+o\left(-\ln r_{i}\right)+\left[\nu\left(1-\gamma\right)-\alpha\right]\left(1-\gamma\right)^{m}\ln\left(r_{i+1}/r_{i}\right)\right\} \\
\leq\; & \exp\left\{ \left[\nu\left(1-\gamma\right)-\alpha\right]\ln r_{i}+o\left(-\ln r_{i}\right)\right\} .
\end{split}
\]
Hence, under the condition (4a), the contribution to (\ref{eq:energy decomposed in i})
is
\[
\begin{split} & \sum_{i=0}^{n-1}\,\sum_{m=0}^{Z-1}\,\exp\left[-\nu\gamma\ln r_{i}+o\left(-\ln r_{i}\right)\right]R_{m}^{-\alpha}\cdot R_{m+1}^{\nu}\\
\leq\; & \sum_{i=0}^{n-1}Z\cdot\exp\left\{ \left[\nu\left(1-\gamma\right)-\alpha\right]\ln r_{i}+o\left(-\ln r_{i}\right)\right\} \\
\leq\; & \sum_{i=0}^{n-1}\exp\left\{ \left[\nu\left(1-\gamma\right)-\alpha\right]\ln r_{i}+o\left(-\ln r_{i}\right)+\mathcal{O}\left(\ln\left(-\ln r_{i+1}\right)\right)\right\} \\
=\; & \sum_{i=0}^{n-1}\exp\left\{ \left[\nu\left(1-\gamma\right)-\alpha\right]\ln r_{i}+o\left(-\ln r_{i}\right)\right\}
\end{split}
\]
which is bounded in $n$ since $\alpha<\nu\left(1-\gamma\right)$.
The last line is due to (\ref{eq:condition on r_n (2)}).
\subsection*{Step 5: Establish the lower bound.}
From this point on we follow a similar line of arguments as in \cite{HMP}
to complete the proof of the lower bound. Here we outline the key
steps for completeness. Fix any $\alpha\in\left(0,\nu\left(1-\gamma\right)\right)$.
Denote
\[
A_{1}:=\sup_{n\geq1}\,\mathbb{E}^{\mathcal{W}}\left[\left(\mu_{n,\theta}\left(\overline{S\left(O,1\right)}\right)\right)^{2}\right]\mbox{ and }A_{2}:=\sup_{n\geq1}\,\mathbb{E}^{\mathcal{W}}\left[I_{\alpha}\left(\mu_{n,\theta}\right)\right].
\]
For constants $c_{1}>1$, $c_{2}>0$, define the measurable subset
of $\Theta$
\[
\Lambda_{n}^{\alpha:}:=\left\{ \theta\in\Theta:\,\frac{1}{c_{1}}\leq\mu_{n,\theta}\left(\overline{S\left(O,1\right)}\right)\leq c_{1},\, I_{\alpha}^{\theta}\left(\mu_{n,\theta}\right)\leq c_{2}\right\}
\]
and $\Lambda^{\alpha}:=\limsup_{n\rightarrow\infty}\Lambda_{n}^{\alpha}$.
Clearly,
\[
\sup_{n\geq1}\,\mathcal{W}\left(I_{\alpha}^{\theta}\left(\mu_{n,\theta}\right)>c_{2}\right)\leq\frac{A_{2}}{c_{2}}\mbox{ and }\sup_{n\geq1}\,\mathcal{W}\left(\mu_{n,\theta}\left(\overline{S\left(O,1\right)}\right)>c_{1}\right)\leq\frac{1}{c_{1}}.
\]
Moreover, by (\ref{eq:1st moment of total mass under mu_theta}) and
the Paley-Zygmund inequality,
\[
\begin{split}\sup_{n\geq1}\,\mathcal{W}\left(\mu_{n,\theta}\left(\overline{S\left(O,1\right)}\right)<\frac{1}{c_{1}}\right) & \leq1-\frac{\left(1-\frac{1}{c_{1}}\right)^{2}}{A_{1}}\end{split}
.
\]
As a consequence, by choosing $c_{1}$ and $c_{2}$ sufficiently large,
we can make
\[
\mathcal{W}\left(\Lambda_{n}^{\alpha}\right)>\frac{\left(1-\frac{1}{c_{1}}\right)^{2}}{A_{1}}-\frac{1}{c_{1}}-\frac{A_{2}}{c_{2}}>\frac{1}{2A_{1}}
\]
for every $n\geq1$, and hence $\mathcal{W}\left(\Lambda^{\alpha}\right)\geq\frac{1}{2A_{1}}$.
For every $\theta\in\Lambda^{\alpha}$, there exists a subsequence
$\left\{ n_{k}:k\geq0\right\} $ such that
\[
\frac{1}{c_{1}}\leq\mu_{n_{k},\theta}\left(\overline{S\left(O,1\right)}\right)\leq c_{1},\, I_{\alpha}\left(\mu_{n_{k},\theta}\right)\leq c_{2}\mbox{ for all }k\geq0.
\]
Because $I_{\text{\ensuremath{\alpha}}}$, as a mapping from the space
of finite measures on $\overline{S\left(O,1\right)}$ to $\left[0,\infty\right]$.
is lower semi-continuous with respect to the weak topology,
\[
\mathcal{M}:=\left\{ \mu\mbox{ Borel measure on }\overline{S\left(O,1\right)}:\,\frac{1}{c_{1}}\leq\mu\left(\overline{S\left(O,1\right)}\right)\leq c_{1},\, I_{\alpha}\left(\mu\right)\leq c_{2}\right\}
\]
is compact, and hence for every $\theta\in\Lambda^{\alpha}$, there
exists a Borel measure $\mu_{\theta}$ on $\overline{S\left(O,1\right)}$
such that $\mu_{n_{k},\theta}$ weakly converges to $\mu_{\theta}$
along a subsequence of $\left\{ n_{k}:k\geq0\right\} $. Then,
\[
\frac{1}{c_{1}}\leq\mu_{\theta}\left(\overline{S\left(O,1\right)}\right)\leq c_{1},\, I_{\alpha}\left(\mu_{\theta}\right)\leq c_{2}.
\]
Moreover, $\Sigma_{\theta}^{\gamma}$, as defined in (\ref{eq:subset of ST}),
is a closed subset of $\overline{S\left(O,1\right)}$. Then the weak
convergence relation implies that $\mu_{\theta}\left(\Sigma_{\theta}^{\gamma}\right)\geq\frac{1}{c_{1}}$.
Therefore, if $\mathcal{C}^{\alpha}\left(\Sigma_{\theta}^{\gamma}\right)$
is the $\alpha-$capacity of the set $\Sigma_{\theta}^{\gamma}$,
i.e.,
\[
\mathcal{C}^{\alpha}\left(\Sigma_{\theta}^{\gamma}\right):=\sup\left\{ \left(\iint_{\Sigma_{\theta}^{\gamma}\times\Sigma_{\theta}^{\gamma}}\frac{\mu\times\mu\left(dydw\right)}{\left|y-w\right|^{\alpha}}\right)^{-1}:\,\mu\mbox{ is a probability measure on }\Sigma_{\theta}^{\gamma}\right\} ,
\]
then $\mathcal{C}^{\alpha}\left(\Sigma_{\theta}^{\gamma}\right)>0$,
and hence, by Frostman's lemma, $\dim_{\mathcal{H}}\left(\Sigma_{\theta}^{\gamma}\right)\geq\alpha$
which implies that $\dim_{\mathcal{H}}\left(ST_{\theta}^{\gamma}\right)\geq\alpha$.
Thus, we have established that
\[
\mathcal{W}\left(\dim_{\mathcal{H}}\left(ST_{\theta}^{\gamma}\right)\geq\alpha\right)\geq\mathcal{W}\left(\Lambda^{\alpha}\right)\geq\frac{1}{2A_{1}}.
\]
Finally, we recall from (\ref{eq:H_basis expansion}) that for $\mathcal{W}-$
a.e. $\theta\in\Theta$,
\[
\theta=\sum_{n\ge1}\mathcal{I}\left(h_{n}\right)\left(\theta\right)\cdot h_{n}
\]
where $\left\{ h_{n}:n\geq1\right\} $ is an orthonormal basis of
the Cameron-Martin space $H$ and $\left\{ \mathcal{I}\left(h_{n}\right):n\geq1\right\} $
under $\mathcal{W}$ forms a sequence of i.i.d. standard Gaussian
random variables. By a simple application of the Hewitt-Savage zero-one
law, we have that
\[
\mathcal{W}\left(\dim_{\mathcal{H}}\left(ST_{\theta}^{\gamma}\right)\geq\alpha\right)=1.
\]
Since $\alpha$ is arbitrary in $\left(0,\nu\left(1-\gamma\right)\right)$,
we get the desired lower bound, i.e.,
\[
\dim_{\mathcal{H}}\left(ST_{\theta}^{\gamma}\right)\geq\nu\left(1-\gamma\right)\mbox{ a.s..}
\]
This completes the proof of Theorem \ref{thm:thick point along sequence}.
Since $ST_{\theta}^{\gamma}$ is a subset of $T_{\theta}^{\gamma}$,
we have also established (\ref{eq:lower bound}) and hence Theorem
\ref{thm:hausdorff dimension of thick point set}.
{}
\end{document} |
\begin{document}
\title{Reduced ideals in pure cubic fields}
\author{G. Tony Jacobs}
\begin{Abstract}
Reduced ideals have been defined in the context of integer rings in quadratic number fields, and they are closely tied to the continued fraction algorithm. The notion of this type of ideal extends naturally to number fields of higher degree. In the case of pure cubic fields, generated by cube roots of integers, a convenient integral basis provides a means for identifying reduced ideals in these fields. We define integer sequences whose terms are in correspondence with some of these ideals, suggesting a generalization of continued fractions.
\end{Abstract}
\maketitle
\section{Introduction}
Quadratic fields have been studied much more extensively than their cubic analogues. Mollin, Shanks, and others developed a body of theory relating continued fractions to the ``infrastructure'' of quadratic fields, that is, to information about ideals and fractional ideals in the sub-rings of algebraic integers in quadratic fields. Hermite famously asked whether there is something which, for cubic fields, does what continued fractions do for quadratic fields.\cite{hermite} This paper is a move towards understanding infrastructure in cubic fields, and we take the existing results on the infrastructure of quadratic fields as our inspiration.
We look forward to a theory of infrastructure that applies to all cubic fields, i.e., degree-$3$ extensions of the rational numbers. We limit our work here to complex cubic fields, because such a field, in its ring of integers, has a unit group whose rank as a free $\mathbb{Z}$-module is $1$; in a totally real cubic field, the unit group has $2$ free $\mathbb{Z}$-module generators. Among the complex cubic fields, we study in particular pure cubic fields, which are generated by cube roots of integers.
A fundamental idea in infrastructure is that of a ``reduced ideal'', and in Section 2, we begin to develop this notion in the cubic case. We identify a canonical presentation for ideals, and identify necessary and sufficient conditions, in terms of that presentation, for an ideal to be reduced. We establish that each field under consideration has only finitely many reduced ideals, and we provide an algorithm for finding all of them in a given field.
In Section 3 of this paper, we construct for each field under study certain sequences, including a periodic sequence of natural numbers, which generalizes certain aspects of the continued fraction algorithm.
One appendix proves a quotidian and technical result on $\mathbb{Z}$-modules which is used when defining a canonical presentation of ideals in Section 2, and second and third appendices provide Python code, for executing algorithms described herein.
\section{Reduced ideals in pure cubic fields}
In this paper, we work in pure cubic fields, i.e., fields of the form $\mathbb{Q}(\alpha)$ where $\alpha^3=m$ for some positive cube-free integer $m$. Before proceeding at this level of specificity, we remark briefly on why this is our chosen purview.
We know from Dirichlet's unit theorem (see e.g. \cite[p. 346]{alacawilliams}) that a cubic field with three real embeddings (equivalently, positive discriminant) has a unit group of rank $2$, whereas a field with one real embedding and two complex embeddings (equivalently, negative discriminant) has a unit group of rank $1$. This makes those with complex embeddings, such as pure cubic fields, easier to study, especially when generalizing notions from real quadratic fields, which also have rank $1$ unit groups.
There is no loss of generality in choosing $m>0$, because $\mathbb{Q}(\sqrt[3]{-m})=\mathbb{Q}(\sqrt[3]{m})$.
Besides the structure of the unit group, we are working with the integral basis of each field under consideration. Among the cubic fields of negative discriminant, those of the form we study here have a particularly simple integral basis. A clear next step in generalization would be to address arbitrary cubic fields with negative discriminants; an integral basis of such fields is given, for example, in \cite{albert} and more briefly in \cite{alaca}. Another possible generalization would be to arbitrary fields with rank one unit groups, including imaginary quartic fields, as in \cite{buchmannwilliams}.
There are certain notions present in this paper, such as the ``shadow'' of an algebraic number, that would seem to generalize to number fields of arbitrary degree, with arbitrary unit group structure.
\subsection{Basic Results on Ideals}
We begin with some notation. Recall that a free $\mathbb{Z}$-module $M$ of rank $n$ is defined as a free abelian group on $n$ generators, with the operation \textit{scalar multiplication}, defined so that for $m\in M$ and for the scalar $n > 0 \in \mathbb{Z}$, we have $nm = (n-1)m + m$, $0m = 0$, and $(-n)m = -(nm)$.
\begin{remark}
\label{notation}
Throughout this work, we adopt the following notations: The free $\mathbb{Z}$-module $u_1\mathbb{Z}\oplus\cdots\oplus u_n\mathbb{Z}$ will be denoted $[u_1,\ldots,u_n]$. The greatest common divisor of the two integers $i$ and $j$ will be denoted $(i,j)$. Furthermore, we define the following variables for use throughout:
Let $h$ and $k$ be relatively prime, squarefree positive integers. Then $m=hk^2$ is a positive cube-free integer, and we note that every such integer can be expressed uniquely in this form. Set $\sigma=3$ if $m\equiv\pm 1\pmod{9}$, and $1$ otherwise. Let $\alpha$ be the unique real root of the $\mathbb{Q}$-irreducible polynomial $x^3-m$; the field $K=\mathbb{Q}(\alpha)$ is a degree $3$ extension of $\mathbb{Q}$. Setting $\widehat{m}=\widehat{\alpha}^3=h^2k$, we obtain another expression for the same field: $\mathbb{Q}(\widehat{\alpha})=\mathbb{Q}(\alpha)$. We note that $\sigma$ is invariant under switching $h$ and $k$; to avoid redundancy, we adopt the convention that $h>k$.
The field $K$ has integral basis $\left\{1,\alpha,\theta=\frac{1}{\sigma}\left(k \pm k\alpha + \widehat{\alpha}\right)\right\}$ \cite[p.176]{alacawilliams}. In cases where $\sigma=1$, it doesn't matter which of the plus or minus is chosen; in our calculations, we use the plus in these cases.
\end{remark}
We consider $\mathcal{O}_K$, the ring of integers of $K$, as the free $\mathbb{Z}$-module $[1,\alpha,\theta]$. Recall that a $\mathbb{Z}$-submodule is a subset of a $\mathbb{Z}$-module that is, itself, a $\mathbb{Z}$-module. Now, a non-zero ideal in $\mathcal{O}_K$ is necessarily a $\mathbb{Z}$-submodule of full rank, but not every rank-$3$ $\mathbb{Z}$-submodule of $\mathcal{O}_K$ is an ideal. Our first results concern the understanding of ideals in terms of their structure as $\mathbb{Z}$-submodules. We begin with a quotidian but useful fact about free $\mathbb{Z}$-modules, the proof of which is elementary and found in the appendix.
\begin{lemma}
\label{modulelemma}
Let $M=[u_1,\ldots,u_n]$ be a free $\mathbb{Z}$-module of rank $n$. Let $M'\subseteq M$ be a submodule of full rank. Then we can write $M'=[a_{1,1}u_1, \ldots, a_{n,1}u_1+\cdots+a_{n,n}u_n]$, with all coefficients integral. Furthermore, we can suppose without loss of generality that, for $i=1,\ldots,n$, we have $a_{i,i}$ strictly positive, and for $j=i+1,\ldots,n$ we have $0\leq a_{j,i}<a_{i,i}$. Subject to these conditions, all $\frac{n(n+1)}{2}$coefficients are uniquely determined.
\end{lemma}
\begin{definition}
Let $I$ be a submodule of the ring of integers in $K=\mathbb{Q}(\alpha)$, with $m,h,k,\alpha,\text{ and } \theta$ as above, and per lemma \ref{modulelemma}, let $I$ be written in the form $I=[a,b+c\alpha,d+e\alpha+f\theta]$ with $a,c,f>0$, $0\leq b<a$, $0\leq d<a$, and $0\leq e<c$. We refer to this expression as \textit{canonical form} for the submodule. The product $\mathrm{N}(I)=acf$ is uniquely determined by canonical form, and we define the \textit{norm of the submodule} to be this number. The smallest rational integer in the submodule, given by the number $a$ in canonical form, is defined as the \emph{length of the submodule}, sometimes denoted $\mathrm{Len}(I)$, and we will sometimes write $L$ instead of $a$.
\end{definition}
We note that the norm we have defined is precisely the index of the $\mathbb{Z}$-submodule in the $\mathbb{Z}$-module $\mathcal{O}_K$. Thus, in cases where the submodule is an ideal, the norm is the same as the ideal norm, defined in the usual way \cite[p.221]{alacawilliams}. In this case, we may refer to the submodule's norm, length, and canonical form as the norm, length, and canonical form of the ideal. We will now establish a proposition that gives us a way to determine, from its canonical form, when a submodule of $\mathcal{O}_K$ is an ideal. We first prove a technical lemma that will simplify our notation:
\begin{lemma}
\label{pqrstlemma}
Let $m, h, k, \sigma \textit{ and } \pm$ be as in Remark \ref{notation}.
Then the numbers:
\begin{enumerate}
\item $p=\frac{1}{\sigma}(hk\mp k^3)$
\item $q=\frac{1}{\sigma}(k-k^3)$
\item $r=\frac{1}{\sigma^2}(k^2\mp 2h + 1)$
\item $s=\frac{1}{\sigma^2}(h\mp k^4)$
\item $t=\frac{1}{\sigma}(k^3 + 2k)$
\end{enumerate}
are all integers.
\end{lemma}
\begin{proof}
In cases where $\sigma=1$ there is nothing to show. Thus, assume that $m=hk^2\equiv\pm 1\pmod{9}$, which implies that $h\equiv\pm 1\pmod{3}$, hence that $h^3\equiv\pm 1\pmod{9}$, and also that $k^2\equiv h^2\pmod{9}$. From these facts, we obtain the following:
\begin{enumerate}
\item $3p = hk\mp k^3 = k(h\mp k^2) \equiv k(h\mp 1) \equiv 0\pmod 3$
\item $3q = k-k^3 = k(1-k^2) \equiv k(0) \equiv 0\pmod{3}$
\item $9r = k^2\mp 2h + 1 \equiv h^2\mp 2h + 1 \equiv (h\mp1)^2 \equiv 0\pmod{9}$
\item $9s = h\mp k^4 \equiv h\mp h^4 \equiv h(1 \mp h^3) \equiv 0\pmod 9$
\item $3t = k^3+2k = 3k - 3q \equiv 0\pmod{3}$
\end{enumerate}
\end{proof}
We now state our condition for a submodule to be an ideal. A note of perspective: This theorem is a list of $16$ divisibility conditions; in the corresponding theorem on quadratic fields, the number of conditions is $3$ \cite[p.9]{mollin}. In generalizing these results to broader classes of number fields, characterizing ideals in terms similar to these seems likely to become tortuously technical, assuming that the underlying theory and approach remain the same.
\begin{proposition}[Identification of ideals]
\label{IDideal}
Take $m, h, k, \sigma, \pm, \alpha, \theta$ and $K$ as in Remark \ref{notation}, take $p, q, r, s$ and $t$ as in Lemma \ref{pqrstlemma}, and let $M=[a,b+c\alpha,d+e\alpha+f\theta]$ be a submodule of the $\mathbb{Z}$-module $\mathcal{O}_K$, in canonical form. Then $M$ is an ideal if and only if the following divisibility conditions are all satisfied:
\begin{enumerate}
\item
\begin{enumerate}
\item $c|a$
\item $c|b$
\end{enumerate}
\item
\begin{enumerate}
\item $f|a$
\item $f|\sigma kc$
\item $f|\sigma ke$
\item $f|b \pm k^2c$
\item $f|d \pm k^2e$
\end{enumerate}
\item
\begin{enumerate}
\item $cf|ae$
\item $cf|be - cd$
\item $cf|be \pm k^2ce$
\item $cf|df + qf^2 - \sigma ke^2 \mp 2k^2ef$
\item $cf|qef + sf^2 - de - tef \mp k^2e^2$
\end{enumerate}
\item
\begin{enumerate}
\item $acf|(k^2c^2 + b^2)f \mp k^2bcf - \sigma kc(be - cd)$
\item $acf|(pc - qb)cf + (b \pm k^2c)(be - cd)$
\item $acf|(pcf - k^2ce - bd - qbf)f \pm k^2f(2be - cd) + \sigma ke(be - cd)$
\item $acf|(pce - rk^2cf - qbe - sbf)f +(d + tf \pm k^2e)(be - cd)$
\end{enumerate}
\end{enumerate}
\end{proposition}
\begin{proof}
In order for $M$ to be an ideal, it must contain the products of $\alpha$ and $\theta$ with each of its $\mathbb{Z}$-generators. Thus, each of these products must be an integer combination of the generators. Multiplication by $\alpha$ and by $\theta$ are represented by the matrices:
\begin{equation*}
P_1 = \left[\begin{matrix} 0 & -k^2 & p \\ 1 & \mp k^2 & q \\ 0 & \sigma k & \pm k^2 \end{matrix}\right] \text{ and } P_2 = \left[\begin{matrix} 0 & p & -k^2r \\ 0 & q & s \\ 1 & \pm k^2 & t \end{matrix}\right],
\end{equation*}
respectively, with respect to our integral basis $\{1,\alpha,\theta\}$. We place the generators of $M$ in a matrix $A$, and we define a matrix $Q$ representing row operations that reduce $A$ as follows:
\begin{equation*}
A=\left[\begin{matrix} a & b & d \\ & c & e \\ & & f \end{matrix}\right],\,\, QA = \left[\begin{matrix}acf & & \\ & cf & \\ & & f \end{matrix}\right]
\end{equation*}
We note that $Q$ is an integer matrix, invertible over $\mathbb{Q}$, but not, in general, over $\mathbb{Z}$. Since we want the columns of $P_1A$ and $P_2A$ to be integer combinations of the columns of $A$, we use row reduction on the augmented matrix:
\begin{equation*}
\left[\begin{array}{c|cc} A & P_1A & P_2A \end{array}\right] \sim \left[\begin{array}{c|cc} QA & QP_1A & QP_2A \end{array}\right],
\end{equation*}
and check that the columns of $QP_1A$ and $QP_2A$ are integer combinations of the columns of $QA$. Since these products are unwieldy, we show results for the first two columns of $A$, and then the third:
\begin{align*}
QP_1\left[\begin{matrix} a & b \\ 0 & c \\ 0 & 0 \end{matrix}\right] &= \left[\begin{matrix} abf & -(k^2c^2 + b^2)f \pm k^2bcf + \sigma kc(be - cd) \\ af & bf \mp k^2cf - \sigma kce \\ 0 & \sigma kc \end{matrix}\right] \\
QP_2\left[\begin{matrix} a & b \\ 0 & c \\ 0 & 0 \end{matrix}\right] &= \left[\begin{matrix} a(be-cd) & (pc - qb)cf + (b \pm k^2c)(be - cd) \\ -ae & qcf - be \mp k^2ce \\ a & b \pm k^2c \end{matrix}\right] \\
QP_1\left[\begin{matrix} d \\ e \\ f \end{matrix}\right] &= \left[\begin{matrix} (pcf - k^2ce - bd - qbf)f \pm k^2f(2be - cd) + \sigma ke(be - cd) \\ df \mp k^2ef + qf^2 - \sigma ke^2 \mp k^2ef \\ \sigma ke \pm k^2f \end{matrix}\right] \\
QP_2\left[\begin{matrix} d \\ e \\ f \end{matrix}\right] &= \left[\begin{matrix} (pce - rk^2cf - qbe - sbf)f +(d + tf \pm k^2e)(be - cd) \\ qef + sf^2 - de \mp k^2e^2 - tef \\ d \pm k^2e + tf \end{matrix}\right]
\end{align*}
Again, in order for $M$ to be an ideal, all of the columns on the right sides of these equations must be integer combinations of the columns of $QA$. It is thus clear that our list of divisibility conditions is both necessary and sufficient for our result. This completes the proof.
\end{proof}
Using this result, we can fix $m$, thus choosing a field, and write down all ideals of a given length $L$. Recalling that the ideal $[a, b + c\alpha, d + e\alpha + f\theta]$ is called primitive if the greatest common divisor of the integers $a,b,c,d,e,f$ is $1$, it is also possible to list all primitive ideals with length $L$. We have written such an algorithm in Python, and it is available online.
\subsection{Reduced Ideals}
Next, we generalize the definition of a reduced ideal in a quadratic field (\cite[p.19]{mollin}) to fields of arbitrary degree. First, we define the ``shadow'' of a number.
\begin{definition}
Let $\beta$ be an algebraic number in a number field $K$, a finite extension of $\mathbb{Q}$. Then we define the shadow of $\beta$, $\mathrm{Sh}(\beta)=\mathrm{Sh}_K(\beta)$, as the product of all of its algebraic conjugates for that field, excluding itself.
\end{definition}
If $K$ is a quadratic extension of $\mathbb{Q}$ and $\beta$ is irrational, then $\mathrm{Sh}(\beta)$ is simply the algebraic conjugate of $\beta$. If $K$ is a degree $n$ extension, and $\beta$ is rational, then $\mathrm{Sh}(\beta)=\beta^{n-1}$. In any case, we have that $\mathrm{Sh}(\beta)\cdot\beta=\mathrm{N}(\beta)$, where $\mathrm{N}(\beta)$ represents the usual norm of an algebraic number in a number field. This gives us that $\mathrm{Sh}(\beta)\in K$; we also note that if $\beta$ is an algebraic integer, then $\mathrm{Sh}(\beta)$ is also an algebraic integer.
We give two formulas for the shadow of a number, when that number is given in terms of our known integral basis for pure cubic fields:
\begin{proposition}
\label{ShadowProp}
Take $m, h, k, \sigma, \pm, \alpha, \widehat{\alpha}, \theta \textit{ and } K$ as in Remark \ref{notation}, and let $\beta \in K$, so $\beta = x + y \alpha + z \theta$ for some rational $x,y,z$. Set:
\begin{equation*}
\left[\begin{array}{c} \widetilde{x} \\[1mm] \widetilde{y} \\[1mm] \widetilde{z} \end{array}\right] = \left[\begin{array}{c} x + \frac{zk}{\sigma} \\[1mm] y \pm \frac{zk}{\sigma} \\[1mm] \frac{z}{\sigma} \end{array}\right] = \left[\begin{array}{ccc} 1 & & \frac{k}{\sigma} \\[1mm] & 1 & \pm\frac{k}{\sigma} \\[1mm] & & \frac{1}{\sigma} \end{array}\right]\left[\begin{array}{c} x \\[1mm] y \\[1mm] z \end{array}\right]
\end{equation*}
Then we have:
\begin{align*}
\mathrm{Sh}(\beta) &= \widetilde{x}^2 + \alpha^2\widetilde{y}^2 + \widehat{\alpha}^2\widetilde{z}^2 - \alpha\widetilde{x}\widetilde{y} - \widehat{\alpha}\widetilde{x}\widetilde{z} - \alpha\widehat{\alpha}\widetilde{y}\widetilde{z} \\
&= (\widetilde{x}-\widehat{\alpha}\widetilde{z})^2 -\alpha(\widetilde{x}-\widehat{\alpha}\widetilde{z})(\widetilde{y}-\tfrac{\widehat\alpha}{\alpha}\widetilde{z}) + \alpha^2(\widetilde{y}-\tfrac{\widehat{\alpha}}{\alpha}\widetilde{z})^2.
\end{align*}
Also, $\mathrm{Sh}(\beta)\geq 0$ for all $\beta\in K$.
\end{proposition}
\begin{proof}
Note that we can write $\beta = x + y\alpha + z\theta = \widetilde{x} + \widetilde{y}\alpha + \widetilde{z}\widehat{\alpha}$.
Now let $\omega=e^{2\pi i/3}$. The algebraic conjugates of $\alpha$ are $\alpha\omega$ and $\alpha\omega^2$; the corresponding conjugates of $\widehat{\alpha}=\tfrac{\alpha^2}{k}$ are $\widehat{\alpha}\omega^2$ and $\widehat{\alpha}\omega$. Thus the algebraic conjugates of $\beta$ are $\widetilde{x}+\widetilde{y}\alpha\omega + \widetilde{z}\widehat\alpha\omega^2$ and $\widetilde{x}+\widetilde{y}\alpha\omega^2 + \widetilde{z}\widehat\alpha\omega$. Mutiplying these expressions together, we obtain our first formula.
Observing the form of each term, we note that, considered as an equation in $\mathbb{R}^3$, our first formula vanishes along the line $\widetilde{x}=\alpha\widetilde{y}=\widehat{\alpha}\widetilde{z}$. We therefore rewrite it in terms of the displacements $\widetilde{x}-\widehat{\alpha}\widetilde{z}$ and $\widetilde{y}-\tfrac{\widehat{\alpha}}{\alpha}\widetilde{z}$, and we have the second formula.
Finally, it is clear from the second formula that the function $\mathrm{Sh}$ is a positive definite quadratic form in the variables $(\widetilde{x} - \widehat{\alpha}\widetilde{z})$ and $(\widetilde{y}-\tfrac{\widehat{\alpha}}{\alpha}\widetilde{z})$.
\end{proof}
We are now ready to give a general definition of a reduced ideal.
\begin{definition}
Let $K$ be a degree $n$ extension of the rationals, let $I$ be a primitive ideal in its ring of integers, and let $L=\mathrm{Len}(I)$. We define $I$ to be a \emph{reduced} ideal if for all $\beta\in I$, the pair of inequalities $|\beta|<L$ and $|\mathrm{Sh}(\beta)|<L^{n-1}$ together imply that $\beta=0$.
\end{definition}
In our case, with $K=\mathbb{Q}(\alpha)$, where $\alpha=\sqrt[3]{m}$, we have an explicit description of ideals, and we can determine whether an ideal is reduced by examining its canonical form. The following three results are inspired by Mollin's Theorem 1.4.1 and its corollaries in \cite{mollin}.
\begin{theorem}[Identification of reduced ideals]
\label{IDreduced}
Let $I=[L,b+c\alpha,d+e\alpha+f\theta]$ be the canonical form of a primitive ideal in $\mathcal{O}_K$, with $m, h, k, \sigma, \pm, \alpha, \widehat{\alpha}, \theta \text{ and } K$ as previously. Then $I$ is reduced if and only if for every integer pair $(y,z)\neq (0,0)$ satisfying:
\begin{enumerate}
\item $0\leq z<\frac{\sigma L}{\widehat{\alpha}}$
\item $f|z$
\item $\left(\frac{\widehat{\alpha}}{\alpha}\mp k\right)\frac{z}{\sigma} - \frac{2L}{\sqrt{3}\alpha} \leq y \leq \frac{\sigma L - \widehat{\alpha}z + \sqrt{(\sigma L - \widehat{\alpha}z)(\sigma L + 3\widehat{\alpha}z)}}{2\sigma\alpha} \mp \frac{kz}{\sigma}$
\item $c|y-\frac{ez}{f}$,
\end{enumerate}
we have the inequality:
\begin{equation*}
\left\lfloor\frac{Q-(\frac{ybf+zcd-zbe}{cf})}{L}\right\rfloor<\frac{P-(\frac{ybf+zcd-zbe}{cf})}{L},
\end{equation*}
where
\begin{align*}
P &= P(y,z) \\
&=\max\left\{\frac{-\sigma L - kz - \left(\alpha(\sigma y \pm kz) + \widehat{\alpha}z\right)}{\sigma},\frac{\sigma\alpha y \pm \alpha kz + \widehat{\alpha}z - 2kz - \sqrt{(2\sigma L)^2 - 3(\sigma\alpha y \pm \alpha kz - \widehat{\alpha}z)^2}}{2\sigma} \right\},
\end{align*}
and
\begin{align*}
Q &= Q(y,z) \\
&=\min\left\{\frac{\sigma L - kz - \left(\alpha(\sigma y \pm kz) + \widehat{\alpha}z\right)}{\sigma},\frac{\sigma\alpha y \pm \alpha kz + \widehat{\alpha}z - 2kz + \sqrt{(2\sigma L)^2 - 3(\sigma\alpha y \pm \alpha kz - \widehat{\alpha}z)^2}}{2\sigma} \right\}.
\end{align*}
\end{theorem}
\begin{proof}
Let $\phi:\mathcal{O}_K\to \mathbb{R}^3$ be the additive homomorphism defined by $1\mapsto(1,0,0)$, $\alpha\mapsto(0,1,0)$ and $\theta\mapsto(0,0,1)$. This map is an isomorphism of the $\mathbb{Z}$-modules $\mathcal{O}_K$ and $\mathbb{Z}^3$, the latter of which is embedded in $\mathbb{R}^3$.
Now, let $\beta\in I\subset\mathcal{O}_K$. Our condition that $|\beta|<\mathrm{Len}(I)$ transforms into the geometric condition that the point $\phi(\beta)$ lies strictly between two planes: $\widetilde{x}+\alpha\widetilde{y}+\widehat{\alpha}\widetilde{z}=\pm L$. (Here we take $\widetilde{x}, \widetilde{y}, \widetilde{z}$ as in \ref{ShadowProp}.) Our second condition, that $\left|\mathrm{Sh}(\beta)\right|<\mathrm{Len}(I)^2$, transforms into the geometric condition that the point $\phi(\beta)$ lies in the interior of the oblique elliptic cylinder given by $(\widetilde{x} - \widehat{\alpha}\widetilde{z})^2 - \alpha(\widetilde{x} - \widehat{\alpha}\widetilde{z})(\widetilde{y} - \tfrac{\widehat\alpha}{\alpha}\widetilde{z}) + \alpha^2(\widetilde{y} - \tfrac{\widehat{\alpha}}{\alpha}\widetilde{z})^2 = L^2$.
These conditions define an open region $R$, between two planes and inside an elliptic cylinder, which is bounded and symmetric about the origin; it contains the images of $0$ and of at most finitely many other elements in the ideal $I$. The ideal is reduced if and only if $\phi(\beta)\not\in R$ for every non-zero $\beta\in I$. It is thus sufficient to write conditions establishing that the interior of $R$ contains no $\phi$-images of non-zero ideal elements.
First, we can ignore the line $y=z=0$, which contains images of rationals, because it intersects the boundaries of $R$ at $(\pm L,0,0)$, and there are no non-zero rational ideal elements between these two points.
Now, the entire region $R$ satisfies $|z|\leq\sup\{z : z\in R\} = \frac{\sigma L}{\widehat{\alpha}}$, because this is the maximum $z-$coordinate of the intersection of our elliptic cylinder with either plane. By symmetry, and because images of ideal elements have integer coordinates in $x, y, z$, we need only consider integer values from $z=0$ to $z=\left\lfloor\frac{\sigma L}{\widehat{\alpha}}\right\rfloor$. Furthermore, images of ideal elements will have $z$-coordinates that are multiples of $f$. Thus we obtain conditions (1) and (2) of this theorem.
For each integer $z$ in that range, we can bound possible $y$-values with the inequalities given in condition (3). The lower bound is the minimum $y$-value attained by a point on the elliptic cylinder, and the upper bound is the maximum $y$-value of a point of intersection of the cylinder and the planes. The final condition, that $y-\frac{ez}{f}\equiv 0\pmod{c}$, simply restricts our checking to $y$ values where images of ideal elements occur.
For each $(y,z)$ pair that we check, we wish to verify that no image of an ideal element lies in region $R$ along the line corresponding to those $y$ and $z$ values. This can be expressed by saying that the first image to the left of the right edge of $R$ is also to the left of the left edge of $R$. We write that geometric condition algebraically as the inequality $\left\lfloor\frac{Q-(\frac{ybf+zcd-zbe}{cf})}{L}\right\rfloor<\frac{P-(\frac{ybf+zcd-zbe}{cf})}{L}$, where $P=P(y,z)=\inf\{x:(x,y,z)\in R\}$ and $Q=Q(y,z)=\sup\{x:(x,y,z)\in R\}$ are the $x$-coordinates of the left and right edges of $R$, respectively. This proves the theorem.
\end{proof}
Using the same geometric construction (and the same notation) from this proof, we establish the following corollaries:
\begin{corollary}[Lower bound]
\label{LB}
If $L\leq\min\{\alpha,\frac{\widehat{\alpha}}{\sigma}\}$, then $I$ is reduced.
\end{corollary}
\begin{proof}
Since $L\leq\frac{\widehat{\alpha}}{\sigma}$, then in the above theorem, the only $z$-value satisfying inequality (1) is $z=0$. The intersection of $R$ with the plane $z=0$ has maximum/minimum $y$-vales of $\pm \frac{L}{\alpha}$, so with $L<\alpha$ the only integer $y$-value in our region is $y=0$. As noted in the proof of the theorem, no images of non-zero ideal elements are found in the interior of $R$ along the line $y=z=0$.
\end{proof}
\begin{lemma}
\label{fbound}
Using the notation from the theorem, if $I=[L,b+c\alpha,d+e\alpha+f\theta]$ is a primitive ideal, then $f|\sigma k$.
\end{lemma}
\begin{proof}
Let $g=(f,k)$, so we can write $f=gf'$ and $k=gk'$ with $(f',k')=1$. Then it follows from the ideal conditions $f|\sigma kc$ and $f|\sigma ke$ that $f'|\sigma c$ and $f'|\sigma e$, respectively. In the case where $\sigma=3$ and $3|f'$, let $f''=\frac{f'}{3}$, else let $f''=f'$. Then we have that $f''$ divides $a,c,e \text{ and } f$. Examining the conditions $f|b \pm k^2c$ and $f|d \pm k^2e$, we see that $f''$ also divides $b \text{ and } d$.
Since I is primitive, these divisibility conditions give us that $f''=1$, so $f=g$ or $f=3g$, the latter only if $\sigma=3$ and $3|f'$. In either case, $f|\sigma k$, as claimed.
\end{proof}
\begin{theorem}[Upper bound]
\label{UB}
If $L>\frac{6\sqrt{3}m}{\pi}$, then $I$ is not reduced.
\end{theorem}
\begin{proof}
The region $R$ in Theorem \ref{IDreduced} is convex and symmetric about the origin, and we claim its volume is equal to $\frac{4\pi \sigma L^3}{3\sqrt{3}\alpha\widehat{\alpha}}$. First, in the basis $\{\widetilde{x},\widetilde{y},\widetilde{z}\}$ the perpendicular distance between the planes $\widetilde{x}+\alpha \widetilde{y}+\widehat{\alpha}\widetilde{z}=\pm L$ is $\frac{2L}{\sqrt{1+\alpha^2+\widehat{\alpha}^2}}$, and the area of the ellipse at each end of $R$ is $\frac{2\pi L^2\sqrt{1+\alpha^2+\widehat{\alpha}^2}}{3\sqrt{3}\alpha\widehat{\alpha}}$. That gives us a volume of $\frac{4\pi L^3}{3\sqrt{3}\alpha\widehat{\alpha}}$. Translating back to the basis $\{x,y,z\}$, we pick up a factor of $\sigma$.
It now follows from Minkowski's convex body theorem (see, e.g. page 306 in \cite{alacawilliams}) that if $I$ is reduced, then $\mathrm{N}(I)\geq \frac{\pi\sigma L^3}{6\sqrt{3}\alpha\widehat{\alpha}}$, or $\frac18$ the volume of $R$. On the other hand, since $I$ is primitive if it is reduced, we also have from our canonical form and from Lemma \ref{fbound} that $\mathrm{N}(I)=acf\leq \sigma kL^2$. These two inequalities are incompatible for $L>\frac{6\sqrt{3}m}{\pi}$, so we have our result.
\end{proof}
We note a lack of symmetry in this upper bound formula. Since the field $K$ is generated indifferently by $\sqrt[3]{m}$ or $\sqrt[3]{\widehat{m}}$ (where $m=hk^2$ and $\widehat{m}=h^2k$), it seems odd that our upper bound includes one or the other, and isn't simply in terms of $h$ and $k$. Indeed, if we swap $h$ and $k$, we would find that ideals cannot be reduced with length greater than $\frac{6\sqrt{3}\widehat{m}}{\pi}$, but since $m<\widehat{m}$, the stated result is stronger.
\begin{theorem}
\label{finiteness}
Let $K=\mathbb{Q}(\alpha)$ where $\alpha^3=m$, for a cube-free integer $m$. Then the ring $\mathcal{O}_K$ contains at least one, and only finitely many, reduced ideals.
\end{theorem}
\begin{proof}
The entire ring $\mathcal{O}_K$ is always a reduced ideal, so we have at least one. By the above theorem, the length $L$ of a reduced ideal is bounded, say $L<L_0$. Thus, in accordance with the observation made in our proof of Theorem \ref{UB}, its norm is also bounded, by $\sigma k L_0^2$. Since there are only finitely many ideals of a given norm, \cite[p.313]{alacawilliams} we have this result as well.
\end{proof}
The above results (\ref{IDreduced} - \ref{UB}) give us a way of efficiently computing a complete list of reduced ideals in the fields we have been studying. We check for them by examining ideals of each length less than the upper bound of Theorem \ref{UB}. For each length, we produce a list of ideals, per the remarks following Proposition \ref{IDideal}.
As long as the length is less than $\min\left\{\alpha,\frac{\widehat{\alpha}}{\sigma}\right\}$, each ideal of that length is necessarily reduced by Corollary \ref{LB}. For each length between this minimum value and the upper bound, we examine each ideal. For each one, we obtain a list of pairs $(y,z)$ satisfying conditions (1), (2), (3) and (4). For each such pair, we calculate $P$ and $Q$ and check our main condition from Theorem \ref{IDreduced}. In Appendix 2, we provide Python code that executes this algorithm.
(In the code, for the sake of efficiency, we are able to skip the calculation for some ideals that, based on their canonical form, cannot be reduced. In short, if either coefficient $c$ or $f$ is too small, then Minkowski's convex body theorem makes it impossible for the ideal to be reduced.)
The following definition affords a different characterization of reduced ideals which will prove useful.
\begin{definition}
Let $I$ be an ideal (or fractional ideal) in a number field. Then $\beta\in I$ is a \emph{minimal element of I} if $\left|\gamma\right|<\left|\beta\right|$ and $\left|\mathrm{Sh}(\gamma)\right|<\left|\mathrm{Sh}(\beta)\right|$ for $\gamma\in I$ together imply that $\gamma=0$.
\end{definition}
Now we can characterize reduced ideals in terms of minimal elements.
\begin{theorem}
Let $I$ be an ideal in a number field. Then $I$ is reduced if and only if there is some rational $q\in I$ that is a minimal element of $I$.
\end{theorem}
The proof is immediate from the definition. In particular, if $I$ is reduced, then $q=\pm L(I)$ is a minimal element, and conversely.
\section{Periodic norm sequences}
As seen in Mollin's \textit{Quadratics}, the terms in a quadratic number's continued fraction expansion can be put in correspondence with a sequence of ideals, and the eventual periodicity of these sequences corresponds to the presence of finitely many reduced ideals in an equivalence class \cite[p. 44]{mollin}. We now develop a corresponding notion for a class of cubic numbers.
Throughout this section, let $h$, $k$, $\sigma$, $m$, $\alpha$, $\theta$, $\widehat\alpha$, $K$, and $\pm$ be as in Remark \ref{notation}, and for any triple $(x,y,z)$ define $(\widetilde{x},\widetilde{y},\widetilde{z})$ as in Proposition \ref{ShadowProp}. Let $\phi$ be the additive homomorphism defined in the proof of Theorem \ref{IDreduced}.
Define the functions $\mathrm{Val},\mathrm{Sh}:\mathbb{R}^3\to\mathbb{R}$ by the formulas $\mathrm{Val}(x,y,z)= x+ \alpha y + \theta z$ and $\mathrm{Sh}(x,y,z)=(\widetilde{x}-\widehat\alpha\widetilde{z})^2 - (\widetilde{x}-\widehat\alpha\widetilde{z})(\alpha\widetilde{y}-\widehat\alpha\widetilde{z}) + (\alpha\widetilde{y}-\widehat\alpha\widetilde{z})^2$. Also define the function $\mathrm{N}(x,y,z)=\mathrm{Sh}(x,y,z)\mathrm{Val}(x,y,z)$. Then, for $\beta\in K$, we have $\mathrm{Val}(\phi(\beta))=\beta$, $\mathrm{Sh}(\phi(\beta))=\mathrm{Sh}(\beta)$ and $N(\phi(\beta))=N(\beta)$. Furthermore, if $(x,y,z)\in\mathbb{Q}^3$, then $\mathrm{Val}(x,y,z)=\phi^{-1}(x,y,z)$, $\mathrm{Sh}(x,y,z)=\mathrm{Sh}(\phi^{-1}(x,y,z))$, and $\mathrm{N}(x,y,z)=\mathrm{N}(\phi^{-1}(x,y,z))$.
Taking $a$ and $b$ positive, define the region:
\begin{equation*}
R_{a,b}=\left\{(x,y,z)\in\mathbb{R}^3 \,:\, \left|\mathrm{Val}(x,y,z)\right|<a,\,\mathrm{Sh}(x,y,z)<b \right\}.
\end{equation*}
This region is convex and symmetric about the origin. (In this notation, the region examined in Theorem \ref{IDreduced} is $R_{L,L^2}$.) We now use regions of this form to define, associated with $\alpha$, a sequence $\left(\beta_n\right)_{n\geq 0}$ of algebraic numbers, and a sequence $\left(N_n\right)_{n\geq 0}$ of integers.
Let $\beta_0=1$, and $P_0=\phi(\beta_0)=(1,0,0)$. We begin with $R_{a_0,b_0}=R_{1,1}$, a region with the point $P_0$ on its boundary, and with no non-zero lattice points in its interior. To find $P_{n+1}$, let $a_{n+1}$ be the maximum positive number such that $R_{a_{n+1},b_n}$ has no lattice point in its interior. Such a number is guaranteed by Minkowski's convex body theorem. We will actually encounter two lattice points at once, because of symmetry; take $P_{n+1}$ to be the one for which the function $\mathrm{Val}$ is positive. We have $a_{n+1}=\mathrm{Val}(P_{n+1})$; also set $b_{n+1}=\mathrm{Sh}(P_{n+1})$, and let $\beta_{n+1}=\phi^{-1}(P_{n+1})$.
\begin{definition}
The sequence $\left(\beta_n\right)_{n\geq 0}$ is the \emph{minimal sequence associated with $\alpha$}, and $\left(\mathrm{N}_n\right)_{n\geq 0}=(\mathrm{N}(P_n))$ is the \emph{norm sequence associated with $\alpha$}.
\end{definition}
We have Python code online that calculates the minimal sequence and norm sequence of $\alpha$ given an appropriate value for $m$.
We note that the minimal sequence of $\alpha$ is precisely the sequence of minimal elements of $\mathcal{O}_K$, starting with $\beta_0=1$ and proceeding through minimal elements in order of increasing absolute value. The algorithm could be modified to run backwards, by holding cylinder heights constant and increasing their widths to find new points. This would give us the rest of the positive minimal elements, those with absolute values between $0$ and $1$. However, as we shall see, the sequence we have defined contains all the information we need. We first note some useful facts:
\begin{proposition}
If $\beta$ is a minimal element in the ideal (or fractional ideal) $I$, and $\gamma$ is another field element, then $\gamma\beta$ is a minimal element in the ideal (or fractional ideal) $(\gamma)\cdot I$.
\end{proposition}
\begin{proof}
This follows immediately because the functions $\mathrm{Sh}:K\to\mathbb{R}$ and $|\cdot|:K\to\mathbb{R}$ are both multiplicative.
\end{proof}
\begin{remark}[Dirichlet's Unit Theorem]
The unit group of $K$, a number field of degree $3$ with one real embedding and one pair of complex embeddings (i.e., a cubic field with negative discriminant), is of the form $U_K=\{\pm\varepsilon_0^j | j\in\mathbb{Z}\}$, where $\varepsilon_0\in K$ is the fundamental unit of the number field, which satisfies $\varepsilon_0>1$. (See, e.g., \cite[p.346,p.362]{alacawilliams}.)
\end{remark}
Now we are ready to show that the norm sequence we have defined is indeed periodic.
\begin{theorem}
\label{periodic}
The norm sequence of $\alpha$ is periodic, and the minimal sequence has the property that $\beta_{i+l}=\varepsilon_0\beta_i$, where $l$ is the period of the norm sequence, and $\varepsilon_0$ is the fundamental unit of the field $K=\mathbb{Q}(\alpha)$.
\end{theorem}
We note that this theorem is closely analogous to Proposition 2.6 from \cite{buchmannwilliams}.
\begin{proof}
Let $\varepsilon_0$ be the fundamental unit of $K$. Our first observation is that, if $\beta$ is any minimal element, then so is $\pm\varepsilon_0^j\beta$ for $j\in\mathbb{Z}$. So, the set of minimal elements is the set of all associates (unit multiples) of minimal elements on the interval $[1,\varepsilon_0)$. Let these elements be denoted $1=\beta_0<\cdots<\beta_{l-1}$. We know there are only finitely many, for a lattice can only intersect a compact region (the closure of $R_{\varepsilon_0,1}$) in finitely many points. Then the minimal sequence is of the form:
\begin{equation*}
(1=\beta_0,\ldots,\beta_{l-1},\varepsilon_0,\ldots,\varepsilon_0\beta_{l-1},\varepsilon_0^2,\ldots).
\end{equation*}
This sequence has the property that $\beta_{i+l}=\varepsilon_0\beta_i$, and taking norms, this gives us that $\mathrm{N}_{i+l}=\mathrm{N}_i$. Thus, we have periodicity. Furthermore, we know that $\mathrm{N}_0=1=\mathrm{N}_{l}=\mathrm{N}(\varepsilon_0)$, and since $\varepsilon_0$ is the fundamental unit, we know that $\mathrm{N}_i>1$ for any positive $i<l$. This gives us that the period of the norm sequence is precisely $l$.
\end{proof}
Now, let $\mathcal{M}$ be the set of elements in the minimal sequence of $\alpha$ on the interval $[1,\varepsilon_0)$, and let $\mathcal{R}$ be the set of reduced principal ideals in $K$. We construct functions $F:\mathcal{M}\to\mathcal{R}$ and $G:\mathcal{R}\to\mathcal{M}$, which we will show to be inverses. This will establish a bijection between our two sets.
First, let $\gamma$ be a minimal element of $\mathcal{O}_K$ satisfying $1\leq \gamma<\varepsilon_0$, and let $J$ be the fractional ideal generated principally by $\gamma^{-1}$. Since $J=(\gamma^{-1})\cdot\mathcal{O}_K$, then $1=\gamma^{-1}\gamma$ is minimal in $J$. Let $L$ be the least integer such that $I=(L)J=(\frac{L}{\gamma})$ is an integral ideal, which we note is primitive. Now, $L=L\cdot 1$ is minimal in $I$. Since $L$ is rational, then $I$ is reduced, and we set $F(\gamma)=I$.
In the other direction, let $I$ be a reduced principal ideal. Then $I=(\eta)$ for some integer $\eta>0$. Since $I$ is reduced, we have that $L=\mathrm{Len}(I)$ is minimal in $(\eta)$. Then $\widehat{\gamma}=L\eta^{-1}$, must be minimal in $(\eta^{-1})(\eta)=\mathcal{O}_K$. Let $j=-\lfloor\log_{\varepsilon_0}\widehat{\gamma}\rfloor$, and let $\gamma=\varepsilon_0^j\widehat{\gamma}$. Then $\gamma$ is a minimal element in $\mathcal{O}_K$ satisfying $1\leq\gamma<\varepsilon_0$, so we set $G(I)=\gamma$.
Since the ideal $I$ could be written as a principal ideal in more than one way, we need to check that $G$ is well-defined. However, if $I=(\eta')$, then we know that $\eta'=\eta\varepsilon_0^r$ for some integer $r$. Thus, in the above argument, we obtain a $\widehat{\gamma}'$ that is an associate of $\widehat{\gamma}$, and therefore an associate of the same $\gamma$. So, $G$ is well-defined.
\begin{theorem}
\label{bijection}
The functions $F$ and $G$ defined above are inverses, providing a bijection between the sets $\mathcal{M}$ and $\mathcal{R}$.
\end{theorem}
We note that this result mirrors Proposition 4.3 from \cite{buchmannwilliams}.
\begin{proof}
First, we calculate $F(G(I))$, where $I=(\eta)$ is a reduced principal ideal with length $L$. We have that $G(I)=\gamma$ where $\gamma$ is some associate of $\widehat{\gamma}=\frac{L}{\eta}$. To apply $F$, we must choose the smallest integer $L'$ such that $\left(L'\gamma^{-1}\right)$ in an integer ideal. We know that $I=(\eta)=\left(L\widehat{\gamma}^{-1}\right)=\left(L\gamma^{-1}\right)$ is an integer ideal, and furthermore, a primitive one because it is reduced. If $L'<L$, then $I$ would not be primitive, so we have $L'=L$, and
\begin{equation*}
F(G(I))=F(\gamma)=\left(\frac{L}{\gamma}\right)=\left(\frac{L}{\widehat{\gamma}}\right)=(\eta)=I,
\end{equation*}
as desired.
In the other direction, we consider $G(F(\gamma))$, where $\gamma$ is a minimal element in $\mathcal{O}_K$ satisfying $1\leq\gamma<\varepsilon_0$. Let $L$ be the least positive integer such that $I=\left(\frac{L}{\gamma}\right)$ is an integer ideal. Then:
\begin{equation*}
G(F(\gamma))=G\left(\left(\frac{L}{\gamma}\right)\right)=\varepsilon_0^j\frac{\mathrm{Len}(L/\gamma)}{L/\gamma}=\varepsilon_0^j\frac{L}{L/\gamma}=\varepsilon_0^j\gamma,
\end{equation*}
where $j=-\left\lfloor\log_{\varepsilon_0}\gamma\right\rfloor=0$. This completes our proof.
\end{proof}
The above result seems to scratch the surface of a theory of generalized continued fractions, i.e., sequences that are sensitive to the structure of cubic fields, analogously as ordinary continued fractions are sensitive to the structure of quadratic fields. This appears to be a possible area for further research.
\section{Appendix 1: Proof of lemma on Z-modules}
The following is a proof of Lemma \ref{modulelemma}:
\begin{replemma}{modulelemma}
Let $M=[u_1,\ldots,u_n]$ be a free $\mathbb{Z}$-module of rank $n$. Let $M'\subseteq M$ be a submodule of full rank. Then we can write $M'=[a_{1,1}u_1, \ldots, a_{n,1}u_1+\cdots+a_{n,n}u_n]$, with all coefficients integral. Furthermore, we can suppose without loss of generality that, for $i=1,\ldots,n$, we have $a_{i,i}$ strictly positive, and for $j=i+1,\ldots,n$ we have $0\leq a_{j,i}<a_{i,i}$. Subject to these conditions, all $\frac{n(n+1)}{2}$coefficients are uniquely determined.
\end{replemma}
\begin{proof}
Let $M=[u_1,\ldots,u_n]$, and let $M'\subseteq M$ have full rank. We note that, if $n=1$, there is nothing to show, and we proceed by induction on $n$.
Let $\widetilde{M}=[u_1,\ldots,u_{n-1}]$; then $M'\cap\widetilde{M}$ is a submodule of $\widetilde{M}$ with full rank. By induction, we have that $M'\cap\widetilde{M}=[w_1,\ldots,w_{n-1}]$ with each $w_i=a_{i,1}u_1 + \cdots + a_{i,i}u_i$, all coefficients $a_{i,j}$ integral. Furthermore, for $i=1,\ldots,n-1$, we have $a_{i,i}>0$, and for $j=i+1,\ldots,n-1$, we have $0\leq a_{j,i}<a_{i,i}$.
Now, we define the set $I=\{k\in\mathbb{Z} : ku_n\in M'\oplus u_1\mathbb{Z}\oplus\cdots\oplus u_{n-1}\mathbb{Z}\}$. We observe that $I$ is a non-zero ideal of $\mathbb{Z}$, so put $I=(a_{n,n})$. By the definition of $I$, we have integers $b_i\in\mathbb{Z}$, for $i=1,\ldots,n-1$, such that $\widehat{w}_n=b_1u_1 + \cdots + b_{n-1}u_{n-1} + a_{n,n}u_n \in M'$.
Using the division algorithm repeatedly, we can write:
\begin{align*}
b_{n-1} &= q_{n-1}a_{n-1,n-1} + a_{n,n-1} \\
b_{n-2} - q_{n-1}a_{n-1,n-2} &= q_{n-2}a_{n-2,n-2} + a_{n,n-2} \\
&\vdots \\
b_1 - q_{n-1}a_{n-1,1} - \cdots - q_2a_{2,1} &= q_1a_{1,1} + a_{n,1}
\end{align*}
We thus obtain an element of $M'$:
\begin{align*}
w_n &= \widehat{w}_n - q_{n-1}w_{n-1} - \cdots - q_1w_1 \\
&= a_{n,1}u_1 + \cdots + a_{n,n}u_n,\\
\end{align*}
with coefficients satisfying the required conditions. We must now show that the set $\{w_1,\ldots,w_n\}$ spans $M'$.
It is clear that $N=[w_1,\ldots,w_n]\subseteq M'$. For the reverse inclusion, take an element $m\in M'$, and write $m=k_1u_1+\cdots+k_nu_n$ in terms of our original integral basis. Now, $k_n\in I$, so $k_n=t_na_{n,n}$ for some integer $t_n$. Subtracting $m-t_nw_n$, we obtain $(k_1-t_na_{n,1})u_1 + \cdots + (k_{n-1}-t_na_{n,n-1})u_n-1$, an element of $M'\cap\widetilde{M}$.
By the induction hypothesis, this element can be written $m-t_nw_n = t_1w_1 + \cdots t_{n-1}w_{n-1}$, which puts:
\begin{equation*}
m = t_1w_1 + \cdots + t_nw_n \in N,
\end{equation*}
as desired.
To see that the expression is unique subject to our constraints, supppose that $M'$ is also given by $M'=[w_1',\ldots,w_n']$, with $w_1'=a_{1,1}'u_1, \ldots, w_n'=a_{n,1}'u_1 + \cdots + a_{n,n}'u_n$, and that the positivity and bounding constraints are satisfied by these coefficients. Examining the differences $w_i-w_i'\in M'$, we see that all coefficients must match, proving uniqueness.
\end{proof}
\section{Appendix 2: Python code for listing ideals}
The following algorithm can list every primitive ideal up to the Minkowski bound, or it can list every reduced ideal, in a given pure cubic field. Each ideal is listed as an ordered sextuple $(a,b,c,d,e,f)$, where the entries are the coefficients of the ideal's canonical form.
\begin{lstlisting}
import math
from fractions import gcd
def kPart(n):
#Input=integer
#Output=largest integer whose square divides n
kPart=int(math.floor(math.sqrt(n)))
success=0
while (kPart>1)*(success==0):
if n
kPart=kPart-1
else:
success=1
return kPart
def cubepart(m):
#Input=integer
#Output=2-vector:
#1st entry=cube-free part of m
#2nd entry=largest integer whose cube divides m
div=math.floor(math.exp(math.log(m)/3))
success=0
while (div>1)*(success==0):
if m
div=div-1
else:
success=1
return(m//div**3,div)
def isIdeal(a,b,c,d,e,f,m):
k=kPart(m)
h=m//k**2
N=a*c*f
area=c*f
sigma=1
if m**2
sigma=3
pm=1
if (m
pm=0-1
p=(h*k-pm*k**3)//sigma
q=(k-k**3)//sigma
r=(k**2-pm*2*h+1)//sigma**2
s=(h-pm*k**4)//sigma**2
t=(k**3+2*k)//sigma
ideal=1
if (d*f+q*f**2-sigma*k*e**2-pm*2*k**2*e*f)
ideal=0
else:
if (q*e*f+s*f**2-d*e-pm*k**2*e**2-t*e*f)
ideal=0
else:
if ((k**2*c**2 + b**2)*f - pm*k**2*b*c*f - sigma*k*c*(b*e - c*d))
ideal=0
else:
if (c*f*(p*c-q*b)+(b+pm*k**2*c)*(b*e-c*d))
ideal=0
else:
if (f*(p*c*f-k**2*c*e-b*d-q*b*f)+pm*k**2*f*(2*b*e-c*d)+sigma*k*e*(b*e-c*d))
ideal=0
else:
if (f*(p*c*e-r*k**2*c*f-q*b*e-s*b*f)+(d+t*f+pm*k**2*e)*(b*e-c*d))
ideal=0
return ideal
def isPrimitive(a,b,c,d,e,f):
primitive=1
divisor=2
while (divisor<=c)*(divisor<=f)*(primitive==1):
if (a
primitive=0
else:
divisor=divisor+1
return primitive
def isReduced(a,b,c,d,e,f,m):
isReduced=1
k=kPart(m)
h=m//k**2
sigma=1
pm=1
if m**2
sigma=3
if m
pm=0-1
alpha=math.exp(math.log(m)/3)
alphahat=math.exp(math.log(k*h**2)/3)
if a>min(alpha,alphahat/sigma):
z=0
maxZ=sigma*a/alphahat
while (z<maxZ)*(isReduced==1):
yMin=(alphahat/alpha - pm*k)*z/sigma - 2*a/(math.sqrt(3)*alpha)
yMax=(sigma*a - alphahat*z + math.sqrt((sigma*a-alphahat*z)*(sigma*a+3*alphahat*z)))/(2*sigma*alpha) - pm*k*z/sigma
y=math.ceil(yMin)
while (y-e*z//f)
y=y+1
while (y<yMax)*(isReduced==1):
if ((y**2+z**2)!=0):
P1=(0-sigma*a-k*z-(alpha*(sigma*y+pm*k*z)+alphahat*z))/sigma
P2=(sigma*alpha*y+pm*alpha*k*z+alphahat*z-2*k*z-math.sqrt((2*sigma*a)**2-3*(sigma*alpha*y+pm*alpha*k*z-alphahat*z)**2))/(2*sigma)
P=max(P1,P2)
Q1=(sigma*a-k*z-(alpha*(sigma*y+pm*k*z)+alphahat*z))/sigma
Q2=(sigma*alpha*y+pm*alpha*k*z+alphahat*z-2*k*z+math.sqrt((2*sigma*a)**2-3*(sigma*alpha*y+pm*alpha*k*z-alphahat*z)**2))/(2*sigma)
Q=min(Q1,Q2)
if math.floor((Q-(y*b*f+z*c*d-z*b*e)/(c*f))/a) >= (P-(y*b*f+z*c*d-z*b*e)/(c*f))/a:
isReduced=0
y=y+c
z=z+f
return isReduced
m=int(input('Starting m: '))
maxM=int(input('Maximum for m: '))
OutputFlag=int(input('enter 0 for all primitive ideals within Minkowski range; 1 for reduced ideals only:'))
while m<=maxM:
mValid=1
print()
if cubepart(m)[0]==1:
print("m =",m, " is a perfect cube")
mValid=0
if (cubepart(m)[1]>1)*(mValid==1):
mPrime=cubepart(m)[0]
k=kPart(mPrime)
h=mPrime//k**2
print("m =",m, " is not cube-free. See m =", min(h**2*k,h*k**2))
mValid=0
k=kPart(m)
h=m//k**2
if (h<k)*(mValid==1):
print("m =",m, " is redundant with m =",h**2*k)
mValid=0
if mValid==1:
sigma=1
pm=1
if m**2
sigma=3
if (m
pm=0-1
yn=(sigma-1)//2
print("m =",m,", sigma =",sigma,", pm =",pm,", k =",k)
maxL=math.floor(6*math.sqrt(3)*m/math.pi)
#That's the Minkowski bound, based on the volume of the region R
Mink2DConst=(9+2*math.sqrt(3)*math.pi)/36
#For use below, when applying Minkowski's convex body theorem to R intersect {z=0}
print("maxL=",maxL)
a=1
while a<=maxL:
#c=1
c=math.ceil(a*Mink2DConst/(math.exp(math.log(m)/3)))
#for c less than this bound, there is an ideal elemnt inside R for z=0 by Minkowski's convex body theorem
#Indeed, the area of R intersect {z=0} is a^2(9+2sqrt(3)pi)/(9m^(1/3)), so the ideal fails to be reduced
#whenever ac is less than one fourth of that, or when c<a(9+2sqrt(3)pi/(36m^(1/3))
while (c<=a):
if(a
b=0
while b<a:
d=0
while d<a:
e=0
while e<c:
if k==1:
fDiv=1
else:
fDiv=abs(gcd(gcd(gcd(gcd(gcd(gcd(a,sigma*k),a*e//c),b+yn*pm*k**2*c),d+yn*pm*k**2*e),b*e//c-yn*d),b*e//c+yn*pm*k**2*e))
f=math.ceil(math.pi*a**2/(6*math.sqrt(3)*m*c))
#for f less than this bound, there is an ideal element inside R by Minkowski's convex body theorem
while (f<=fDiv):
if (fDiv
if isIdeal(a,b,c,d,e,f,m):
if isPrimitive(a,b,c,d,e,f):
red=isReduced(a,b,c,d,e,f,m)
if OutputFlag==0:
#print()
if red==1:
print("Primitive Ideal (",a,b,c,d,e,f,"), with N =",a*c*f," is a reduced ideal.")
else:
print("Primitive Ideal (",a,b,c,d,e,f,"), with N =",a*c*f," is not reduced.")
else:
if red==1:
#print()
print("Reduced ideal: (",a,b,c,d,e,f,") has norm N =",a*c*f)
f=f+1
e=e+1
d=d+1
b=b+c
c=c+1
a=a+1
m=m+1
\end{lstlisting}
\section{Appendix 3: Python code for generating norm sequences}
The following algorithm takes a cube-free integer $m$ as input, as well as an upper bound on $z$-values, and lists elements of the norm sequence for $\sqrt[3]{m}$ that have $\phi$-images with $z$-coordinates less than the upper bound.
\begin{lstlisting}
import math
def cubepart(m):
#Input=integer
#Output=2-vector:
#1st entry=cube-free part of m
#2nd entry=largest integer whose cube divides m
div=math.floor(math.exp(math.log(m)/3))
success=0
while (div>1)*(success==0):
if m
div=div-1
else:
success=1
return(m//div**3,div)
def kPart(n):
#Input=integer
#Output=largest ineger whose square divides n
kPart=int(math.floor(math.sqrt(n)))
success=0
while (kPart>1)*(success==0):
if n
kPart=kPart-1
else:
success=1
return kPart
def Value(x,y,z):
xtilde=x+z*k/sigma
ytilde=y+pm*z*k/sigma
ztilde=z/sigma
Val=xtilde+alpha*ytilde+alphahat*ztilde
return Val
def Shadow(x,y,z):
xtilde=x+z*k/sigma
ytilde=y+pm*z*k/sigma
ztilde=z/sigma
Sh=xtilde**2+alpha**2*ytilde**2+alphahat**2*ztilde**2-alpha*xtilde*ytilde-alphahat*xtilde*ztilde-alpha*alphahat*ytilde*ztilde
return Sh
def Norm(x,y,z):
xtilde=x+z*k/sigma
ytilde=y+pm*z*k/sigma
ztilde=z/sigma
N=xtilde**3+h*k**2*ytilde**3+h**2*k*ztilde**3-3*h*k*xtilde*ytilde*ztilde
return N
m=1
m=int(input('m:'))
while m!=0:
mValid=1
print()
if cubepart(m)[0]==1:
print("m =",m, " is a perfect cube")
mValid=0
if (cubepart(m)[1]>1)*(mValid==1):
mPrime=cubepart(m)[0]
k=kPart(mPrime)
h=mPrime//k**2
print("m =",m, " is not cube-free. See m =", min(h**2*k,h*k**2))
mValid=0
k=kPart(m)
h=m//k**2
if (h<k)*(mValid==1):
print("m =",m, " is redundant with m =",h**2*k)
mValid=0
if mValid==1:
sigma=1
pm=1
if m**2
sigma=3
if (m
pm=0-1
yn=(sigma-1)//2
print("m =",m,", sigma =",sigma,", pm =",pm,", k =",k)
maxZ=int(input('maximum z-value:'))
alpha=math.exp(math.log(m)/3)
alphahat=math.exp(math.log(h**2*k)/3)
a=1
b=0
c=0
bestList=[[1,0,0]]
Val=1
Sh=1
N=Val*Sh
print(bestList[0],", Val=",Val,", Sh=",Sh,", N=",N)
c=c+1
while c<maxZ:
currentBest=[]
x0=math.floor((alphahat-k)/sigma*c)
y0=math.floor((alphahat-pm*alpha*k)/alpha/sigma*c)
if Shadow(x0,y0,c)<Sh:
currentBest.append([x0,y0,c])
if Shadow(x0,y0+1,c)<Sh:
currentBest.append([x0,y0+1,c])
if Shadow(x0+1,y0,c)<Sh:
currentBest.append([x0+1,y0,c])
if Shadow(x0+1,y0+1,c)<Sh:
currentBest.append([x0+1,y0+1,c])
currentBest.sort(key=lambda x:Value(x[0],x[1],x[2]))
while len(currentBest)>0:
if Shadow(currentBest[0][0],currentBest[0][1],currentBest[0][2])<Sh:
newBest=currentBest.pop(0)
Val=Value(newBest[0],newBest[1],newBest[2])
Sh=Shadow(newBest[0],newBest[1],newBest[2])
N=Norm(int(newBest[0]),int(newBest[1]),int(newBest[2]))
print(newBest,", Val=",Val,", Sh=",Sh,", N=",N)
bestList.append(newBest)
else:
currentBest.remove(currentBest[0])
c=c+1
m=int(input('m:'))
\end{lstlisting}
\section{Questions for further research}
\begin{itemize}
\item Can we obtain analogous results for arbitrary cubic fields with negative discriminant? For quartic fields with rank-$1$ unit groups?
\item Can we further extend the analogy between norm sequences and the continued fraction algorithm?
\item What is the corresponding structure like when the rank of the unit group is greater than $1$?
\end{itemize}
\end{document} |
\begin{document}
\newcommand{\begin{equation}}{\begin{equation}}
\newcommand{\end{equation}}{\end{equation}}
\title{Comment on ``On Visibility in the Afshar Two-Slit Experiment"}
\author{Tabish Qureshi}
\institute{Department of Physics, Jamia Millia Islamia\\
New Delhi-110025, India.\\
\email{tabish.ph@jmi.ac.in}}
\maketitle
\begin{abstract}
Recently Kastner has analyzed the issue of visibility in a modified
two-slit experiment carried out by Afshar et al, which has been a subject
of much debate. Kastner describes a thought experiment which is claimed to
show interference with hundred percent visibility and also an ``apparent"
which-slit information. We argue that this thought experiment does not
show interference at all, and is thus not applicable to the Afshar experiment.
\keywords{Complementarity \and Two-slit experiment \and Wave-particle duality}
\PACS{PACS 03.65.Ud ; 03.65.Ta}
\end{abstract}
An experiment which
claims to violate Bohr's complementarity principle, proposed and carried
out by Afshar et al \cite{afsharfp}, is a subject of current debate.
Basically, it
consists of a standard two-slit experiment, with a converging lens behind
the conventional screen for obtaining the interference pattern. Although
If the screen is removed, the light passes through the lens and produces
two images of the slits, which are captured on two detectors $D_A$ and
$D_B$ respectively. Opening only slit $A$ results in only detector $D_A$
clicking, and opening only slit $B$ leads to only $D_B$ clicking. Afshar
argues that the detectors $D_A$ and $D_B$ yield information about which
slit, $A$ or $B$, the particle initially passed through. If one places a
screen before the lens, the interference pattern is visible.
Conventionally, if one tries to observe the interference pattern, one
cannot get the which-way information. Afshar has a clever scheme for
establishing the existence of the interference pattern without actually
observing it. First the exact location of the dark fringes are noted by
observing the interference pattern. Then, thin wires are placed in the
exact locations of the dark fringes. The argument is that if the
interference pattern exists, sliding in wires through the dark fringes will
not affect the intensity of light on the two detectors. If the interference
pattern is not there, some photons are bound to hit the wires, and get
scattered, thus reducing the photon count at the two detectors. This way,
the existence of the interference pattern can be established without
actually disturbing the photons in any way. Afshar et al carried out the
experiment and found that sliding in wires in the expected locations of the
dark fringes, doesn't lead to any significant reduction of intensity at the
detectors. Hence they claim that they have demonstrated a violation of
complementarity.
Recently, Kastner has addressed the issue of interference visibility in
the Afshar experiment \cite{kastner09}. Kastner believes that the essence of
the Afshar experiment is captured by a thought experiment discussed by Srikanth
\cite{srikanth} in the context of complementarity. Kastner analyzed this
two-slit experiment in which there is an
additional internal degree of freedom of the detector elements
(which can be considered a “vibrational” component). The
particle + detector state evolves from the slits to the final screen with
initial detector state $|0\rangle$. The detector spatial basis states
$|\phi_x\rangle$ and vibrational basis states $|v_U\rangle$ and $|v_L\rangle$
(corresponding to the particle passing through the upper and lower slit,
respectively) are activated. This evolution, from the initial state to the
detected particle, is given by
\begin{equation}
{1\over \sqrt{2}}(|U\rangle+|L\rangle)|0\rangle
\rightarrow \sum_x |x\rangle \left\{a_x |\phi_x\rangle|v_U\rangle +
b_x |\phi_x\rangle|v_L\rangle\right\},
\end{equation}
where amplitudes $a_x$ and $b_x$ depend on wave number, distance, and slit
of origin, and $|x\rangle$ are final particle basis states.
Upon detection at a particular location $x$, one term remains from the sum
on the right-hand side of (1):
\begin{equation}
|x\rangle \left\{a_x |\phi_x\rangle|v_U\rangle +
b_x |\phi_x\rangle|v_L\rangle\right\}.
\end{equation}
Kastner argues that the result of this experiment is even more dramatic
than that of the Afshar experiment, because visibility is hundred percent
since a fully articulated interference pattern has been irreversibly
recorded - not just indicated indirectly - and yet a measurement can
be performed later, that seems to reveal “which slit” the
photon went through.
However, this argument is not correct, as can be seen from the following.
Suppose there were no ``vibrational states", then the term which remains
from the sum in (1) would be given by
\begin{equation}
|x\rangle \left\{a_x |\phi_x\rangle + b_x |\phi_x\rangle\right\}.
\end{equation}
The probability density of detecting the particle at position $x$ is then
given by
\begin{equation}
P(x) = \left\{|a_x|^2 + |b_x|^2 + a_x^*b_x + a_xb_x^* \right\}
\langle\phi_x|\phi_x\rangle,
\end{equation}
where the last two terms in the curly brackets denote interference.
One the other hand, the probability density of detecting the particle at
position $x$, in the presence of ``vibrational states" is given by
\begin{eqnarray}
P(x) &=& \{|a_x|^2\langle v_U|v_U\rangle + |b_x|^2\langle v_L|v_L\rangle
+ a_x^*b_x\langle v_U|v_L\rangle + a_xb_x^*\langle v_L|v_U\rangle \}
\langle\phi_x|\phi_x\rangle \nonumber\\
&=& \left\{|a_x|^2 + |b_x|^2 \right\} \langle\phi_x|\phi_x\rangle,
\end{eqnarray}
where the interference terms are killed by the orthogonality of $|v_U\rangle$
and $|v_L\rangle$.
So, contrary to the claim in \cite{kastner09}, this experiment does not
show any interference, although the ``vibrational states" do provide which-way
information. This is in perfect agreement with Bohr's complementarity
principle. It can show interference if $|v_U\rangle$
and $|v_L\rangle$ are not strictly orthogonal. However, in that case one
cannot extract any which-way information.
In conlcusion, we have shown that the thought experiment, described by
Kastner, does not show interference at all. What the experiment does
show is that if there exists which-way information in the state, there
is no interference pattern on the screen, in agreement with Bohr's
complementarity principle.
\end{document} |
\begin{document}
\title[Eigenvalue Fluctuations of Symmetric Group Representations]{Eigenvalue Fluctuations of Symmetric Group Permutation Representations on k-tuples and k-subsets}
\author{Benjamin Tsou}
\address{Department of Mathematics, University of California, Berkeley, CA 94720-3840}
\email{benjamintsou@gmail.com}
\begin{abstract}
Let the term $k$-representation refer to the permutation representations of the symmetric group $\mathfrak{S}_n$ on $k$-tuples and $k$-subsets as well as the $S^{(n-k,1^k)}$ irreducible representation of $\mathfrak{S}_n$. Endow $\mathfrak{S}_n$ with the Ewens distribution and let $\alpha$ and $\beta$ be linearly independent irrational numbers over $\mathbb{Q}$. Then for fixed $k > 1$ we show that as $n \to \infty$, the normalized count of the number of eigenangles in a fixed interval $(\alpha, \beta)$ of a $k$-representation evaluated at a random element $\sigma \in \mathfrak{S}_n$ converges weakly to a compactly supported distribution. In particular, we compute the limiting moments and moreover provide an explicit formula for the limiting density when $k = 2$ and the Ewens parameter $\theta = 1$ (uniform probability measure). This is in contrast to the $k = 1$ case where it has been shown previously that the distribution is asymptotically Gaussian.
\end{abstract}
\maketitle
\allowdisplaybreaks
\section{Introduction} \label{intro}
The group of permutation matrices can be viewed as the simplest (nontrivial) permutation representation of the symmetric group $\mathfrak{S}_n$. Wieand \cite{wieand} showed that under a uniform probability measure, the normalized limiting distribution of the number of eigenvalues of a random permutation matrix in some fixed arc of the unit circle follows a standard normal distribution. Recently, Ben Arous and Dang \cite{arousdang} have extended Wieand's work in \cite{wieand} to general functions other than the indicator function on an interval. In particular, they show that the fluctuations of sufficiently smooth linear statistics of permutation matrices drawn from the Ewens distribution are asymptotically non-Gaussian but infinitely divisible. They mention that this result is quite unusual since most prior work show asymptotic Gaussianity of eigenvalue fluctuation statistics.
In this paper, we extend Wieand's results in a different direction by studying higher dimensional representations of the symmetric group. In particular, we will consider the permutation representations on ordered $k$-tuples and unordered subsets of size $k$ as well as the irreducible representation $S^{(n-k,1^k)}$ for $k \ge 2$. We show that for these three types of representations (denoted $\rho_{n,k}^\textrm{tuple}$, $\rho_{n,k}^\textrm{set}$, and $\rho^{(n-k,1^k)}$), for $\sigma \in \mathfrak{S}_n$ drawn from the Ewens distribution, the normalized count of eigenvalues in some fixed arc of the unit circle converges to a class of compactly supported limiting distributions.
Let us now quickly review how permutation representations of $k$-tuples and $k$-subsets are defined. Section \ref{irrep} will give a short overview of irreducible representations of symmetric groups.
First, consider the set $Q_{n,k}^\textrm{tuple}$ of ordered $k$-tuples $(t_1,...,t_k)$ of distinct integers chosen from the set $[n] := \{1,...,n\}$. The symmetric group $\mathfrak{S}_n$ acts naturally on this set by $\sigma(t_1,...,t_k) = (\sigma(t_1),...,\sigma(t_k))$. We can form the $\displaystyle{\frac{n!}{(n-k)!}}$-dimensional vector space $V_{n,k}^{\textrm{tuple}}$ with basis elements $e_{(t_1,...,t_k)}$. Then the action of $\mathfrak{S}_n$ on $Q_{n,k}^\textrm{tuple}$ induces the permutation representation $\rho_{n,k}^{\textrm{tuple}}: \mathfrak{S}_n \rightarrow O(V_{n,k}^{\textrm{tuple}})$ where $O(V_{n,k}^{\textrm{tuple}})$ is the orthogonal group on $V_{n,k}^{\textrm{tuple}}$.
Similarly, consider the set $Q_{n,k}^\textrm{set}$ of $k$-subsets $\{t_1,...,t_k\}$ of distinct integers chosen from $[n]$. As for the set of ordered tuples, the symmetric group $\mathfrak{S}_n$ acts naturally on $Q_{n,k}^\textrm{set}$ by $\sigma(\{t_1,...,t_k\}) = \{\sigma(t_1),...,\sigma(t_k)\}$. We can form the $\displaystyle{ \binom{n}{k} }$-dimensional vector space $V_{n,k}^{\textrm{set}}$ with basis elements $e_{\{t_1,...,t_k\}}$. Then the action of $\mathfrak{S}_n$ on $Q_{n,k}^\textrm{set}$ gives the permutation representation $\rho_{n,k}^{\textrm{set}}: \mathfrak{S}_n \to O(V_{n,k}^{\textrm{set}})$.
To state the main results, let us introduce the relevant random variables describing the eigenvalue statistics of these symmetric group representations. Finite group representations are all unitarisable, and therefore all the eigenvalues are of the form $e^{2 \pi i \phi}$ on the unit circle. It will be convenient to refer to each eigenvalue $e^{2 \pi i \phi}$ by its \textit{eigenangle} $\phi \in [0, 1)$. Let $I = (\alpha, \beta)$ be an interval where $\alpha$ and $\beta$ are irrational and linearly independent over $\mathbb{Q}$. For $\sigma \in \mathfrak{S}_n$, let $X_{n,k}^{\textrm{tuple}}(\sigma)$, $X_{n,k}^{\textrm{set}}(\sigma)$, and $X_{n,k}^{\textrm{irrep}}(\sigma)$ denote the number of eigenangles (counted with multiplicity) of $\rho_{n,k}^\textrm{tuple}(\sigma)$, $\rho_{n,k}^\textrm{set}(\sigma)$, and $\rho^{(n-k,1^k)}(\sigma)$ respectively in the arc $I$.
Recall (see e.g. \cite{ewens}) that the Ewens distribution with parameter $\theta > 0$ defined on $\mathfrak{S}_n$ is given by
\begin{equation}
\mathbb{P}(\sigma) = \frac{\theta^{K(\sigma)}}{\theta (\theta+1) \cdot \cdot \cdot (\theta + n - 1)}
\end{equation}
where $K(\sigma)$ is the total number of cycles of the permutation $\sigma$. By equipping $\mathfrak{S}_n$ with the Ewens measure, we can think of $X_{n,k}^{\textrm{tuple}}$, $X_{n,k}^{\textrm{set}}$, and $X_{n,k}^{\textrm{irrep}}$ as random variables.
For $k>1$, define the centered and scaled versions
\begin{equation}
Y_{n,k}^{\textrm{tuple}} := \frac{X_{n,k}^{\textrm{tuple}} - \mathbb{E}[X_{n,k}^{\textrm{tuple}}]}{n^{k-1}}
\end{equation}
\begin{equation}
Y_{n,k}^{\textrm{set}} := k! \frac{X_{n,k}^{\textrm{set}} - \mathbb{E}[X_{n,k}^{\textrm{set}}]}{n^{k-1}}
\end{equation}
\begin{equation}
Y_{n,k}^{\textrm{irrep}} := k! \frac{X_{n,k}^{\textrm{irrep}} - \mathbb{E}[X_{n,k}^{\textrm{irrep}}]}{n^{k-1}}
\end{equation}
Our first result is to show that to compute the limiting distribution of these normalized eigenangle counts, it suffices to consider the simpler random variables $Y_{n,k}$ defined below. For each $\sigma \in \mathfrak{S}_n$, let $C_j^{(n)}(\sigma)$ denote the number of cycles of $\sigma$ of length $j$. Let $\mathcal{L}(X)$ denote the law of a random variable $X$.
\begin{theorem} \label{simplifythm}
Let \begin{equation}
Y_{n,k} = \sum_{j=1}^n \frac{j^{k-1} C_j^{(n)} (\{j \alpha\} - \{j \beta\})}{n^{k-1}}
\end{equation}
Under the Ewens distribution with parameter $\theta > 0$, as $n \to \infty$ for fixed $k > 1$, each of the random variables $Y_{n,k}^{\textrm{tuple}}$, $Y_{n,k}^{\textrm{set}}$, and $Y_{n,k}^{\textrm{irrep}}$ converges in law to the same limiting distribution $\displaystyle{\lim_{ n\to \infty} \mathcal{L} (Y_{n,k})}$ (assuming it exists).
\end{theorem}
\begin{remark}
In fact, the proof of Theorem \ref{simplifythm} readily shows that a similar result stated in Theorem \ref{simplifythmgeneral} holds for more general linear eigenvalue statistics than the indicator of an interval.
\end{remark}
Since $\displaystyle{\sum_{j=1}^n j C_j^{(n)} = n}$, it is easy to see that $\displaystyle{ \sum_{j=1}^n j^{k-1} C_j^{(n)} \le n^{k-1}}$. Thus, the distribution of $Y_{n,k}$ is supported on the finite interval $[-1,1]$ for all $n$. Hence by the method of moments, the sequence converges in distribution as long as the moments converge. The following theorem gives the limiting moments implicitly in terms of the exponential of a formal power series.
\begin{theorem} \label{momentsthm}
Under the Ewens distribution with parameter $\theta > 0$, for $k > 1$, $Y_{n,k}$ converges weakly as $n \to \infty$ to some compactly supported limiting distribution $Y_{\infty, k}$. The moments of $Y_{\infty, k}$ are given implicitly by the following equation in formal power series:
\begin{equation}\label{power} \sum_{m = 0}^\infty \mathbb{E}[(Y_{\infty, k})^m] \frac{\Gamma(m(k-1)+\theta)}{\Gamma(\theta) m!} z^m = \exp(K(z))
\end{equation} where $\displaystyle{K(z) = \sum_{m=1}^\infty \kappa_{2m} z^{2m}}$ and $\displaystyle{\kappa_{2m} = \frac{2\theta(2m(k-1)-1)!}{(2m+2)!} }$.
\end{theorem}
Remarkably, when $k=2$ and $\theta=1$, an explicit formula for the density of $Y_{\infty, 2}$ can be obtained.
\begin{corollary} \label{twocor}
For $\theta=1$ (i.e. the uniform measure on $\mathfrak{S}_n$), the random variable $Y_{\infty,2}$ is supported on the interval $[-1,1]$ and has probability density given by the formula:
\begin{equation}\label{probdensity2}
p_{Y_{\infty, 2}}(t) = \frac{e^{3/2}}{\pi |t|} \left(\frac{1}{|t|} - 1\right)^{-\frac{1}{2} (1 - |t|)^2} \left(\frac{1}{|t|} + 1\right)^{-\frac{1}{2} (1 + |t|)^2} \sin \bigg(\frac{(1-|t|)^2}{2} \pi \bigg)
\end{equation}
for $-1 \le t \le 1$ \bigg(and by continuity, $\displaystyle{p_{Y_{\infty, 2}}(0) = \frac{e^{3/2}}{\pi}} \bigg)$.
\end{corollary}
The rest of the paper is organized as follows. In sections \ref{ktuple}, \ref{ksubset}, and \ref{irrep}, we prove Theorem \ref{simplifythm} in turn for $Y_{n,k}^{\textrm{tuple}}$, $Y_{n,k}^{\textrm{set}}$, and $Y_{n,k}^{\textrm{irrep}}$. In section \ref{equidistributed sequences}, we review some basic theory of equidistributed sequences that will be useful for the moment method. In section \ref{k=1}, we use the method of moments to rederive the asymptotic gaussianity of the normalized eigenangle count in the $k=1$ case of permutation matrices. Then we move on to the $k>1$ case and prove Theorem \ref{momentsthm} in Section \ref{k>1}. Section \ref{k=2} proves the density formula for $k=2$ in Corollary \ref{twocor}. Finally in Section \ref{generalf}, we discuss the extension of Theorem \ref{simplifythm} to more general linear eigenvalue statistics and connect our results to those in \cite{arousdang}.
We end this introduction with a few bibliographic comments regarding the increasing activity in the study of eigenvalues of random permutation matrices over the last two decades. Wieand extended her Gaussianity results in \cite{wieand} to wreath products in \cite{wieand2}. Works by Diaconis and Shahshahani \cite{diaconisshah} and Evans \cite{evans} show that the spectrum of permutation matrices and various wreath products under a uniform probability measure converges weakly to the uniform distribution on the circle. Characteristic polynomials associated to a random permutation matrix were studied in several works, including \cite{cook}, \cite{bahierA}, \cite{hambly}, \cite{zeindler}, and \cite{dangzeindler}. Najnudel and Nikeghbali \cite{najnudel} and more recently Bahier (\cite{bahierB}, \cite{bahierC}) extend the work of Diaconis, Evans, and Wieand by studying ``randomized'' permutation matrices where each matrix entry equal to 1 is replaced by i.i.d. variables taking values in $\mathbb{C}^*$. The authors in \cite{hughesnajnudel} study a more general Ewens measure than the one considered by Ben Arous and Dang \cite{arousdang} and in this paper, also obtaining eigenvalue statistics fluctuation results. Evans \cite{evans2} considers spectra of random matrices involving more general representations of the symmetric group $\mathfrak{S}_n$, but the situation is quite different from ours in that the randomness is not taken over $ \mathfrak{S}_n$.
\section{The $k$-tuple representations} \label{ktuple}
In this section, we give a proof of Theorem \ref{simplifythm} for $Y_{n,k}^{\textrm{tuple}}$. First, we give a simple characterization of the spectrum of $\rho_{n,k}^{\textrm{tuple}}(\sigma)$ for $\sigma \in \mathfrak{S}_n$. Note that the eigenvalues only depend on the cycle structure of $\sigma$ since conjugacy classes in $\mathfrak{S}_n$ are determined by the cycle structure.
When $k=1$ (the defining representation of $\mathfrak{S}_n$), $\rho_{n,1}^{\textrm{tuple}}(\sigma) = M$ where $M$ is the permutation matrix corresponding to $\sigma$, i.e. $M_{ij} = 1$ if $j = \sigma(i)$ and 0 otherwise. It is easy to see that each $j$-cycle in $\sigma$ corresponds to the set of $j$ eigenangles $\displaystyle{ \left\{0, \frac{1}{j},...,\frac{j-1}{j} \right\} }$. For each $j$, we have $C_j^{(n)}(\sigma)$ copies of these eigenangles.
Then \begin{equation} X_{n,1}^{\textrm{tuple}}(\sigma) = n(\beta - \alpha) + \sum_{j=1}^n C_j^{(n)}(\sigma) (\{j \alpha\} - \{j \beta \}) \end{equation} where $\{x\}$ denotes the fractional part of $x$.
More generally, $\rho_{n,k}^{\textrm{tuple}}(\sigma)$ is the $\displaystyle{\frac{n!}{(n-k)!} \times \frac{n!}{(n-k)!}}$ permutation matrix $M$ corresponding to the action of $\sigma$ on $V_{n,k}^{\textrm{tuple}}$. Here, $M_{(t_1,...,t_k), (u_1,...,u_k)} = 1$ if $(u_1,...,u_k) = \sigma(t_1,...,t_k)$ and 0 otherwise. Let $\sigma_k^{\textrm{tuple}}$ be the permutation of size $\displaystyle{\frac{n!}{(n-k)!} }$ corresponding to $M$. Then looking at the cycle structure, we have \begin{equation} \label{Xnktuple} X_{n,k}^{\textrm{tuple}}(\sigma) = \frac{n!}{(n-k)!} (\beta - \alpha) + \sum_{j} C_{j,k}^{(n), \textrm{tuple}} (\sigma) (\{j \alpha\} - \{j \beta \}) \end{equation} where $C_{j,k}^{(n), \textrm{tuple}}(\sigma)$ is the number of cycles in $\sigma_k^{\textrm{tuple}}$ of length $j$.
We will say that an integer $i$ lies in cycle $C$ of the permutation $\sigma$ if $C$ contains $i$ in the cycle decomposition of $\sigma$. It will turn out that almost all the contribution to the sum in $Y_{n,k}^{\textrm{tuple}}$ comes from the tuples ($t_1,...,t_k)$ such that $t_1,...,t_k$ all lie in the same cycle of $\sigma$.
\begin{remark}
To reduce confusion, we will sometimes use the terms $\sigma$-cycle and $\sigma_k^{\textrm{tuple}}$-cycle to distinguish between cycles of $\sigma$ and $\sigma_k^{\textrm{tuple}}$ respectively.
\end{remark}
\begin{remark}
To reduce clutter, we will often leave the index off set and sequence notations. For example, if the index $n$ is understood to run over the range $1 \le n \le N$, then the notation $(a_n)$ should be read as the sequence $(a_1,...,a_N)$. Similarly, if the index $i$ is understood to run over the range $1 \le i \le m$, then $\{ A_i\}$ should be read as the set $\{A_1,...,A_m\}$.
\end{remark}
In order to analyze the sum in $Y_{n,k}^{\textrm{tuple}}$, it will help to obtain a partition of the set of $k$-tuples $(t_1,...,t_k)$ defined by the orbits of the action of $\sigma \in \mathfrak{S}_n$. First, we make the following:
\begin{definition}
Let $\displaystyle{[k] = \mathop{\cup}_{i = 1}^m A_i}$ be a partition of $[k]$ into disjoint, nonempty subsets. Order the sets $A_i$ such that $|A_1| \ge...\ge |A_m| > 0$. Setting $k_i = |A_i|$, this determines an integer partition $k = k_1+...+k_m$. Further partition each subset $A_r$ into $p(r)$ disjoint, nonempty subsets $A_{rs}$ such that $|A_{r1}| \ge...\ge |A_{r, p(r)}| > 0$. This determines an integer partition $\displaystyle{k_r = \sum_{s=1}^{p(r)} k_{rs}}$. The pair $(\{A_i\}, \{A_{rs}\})$ will be called a double partition of $[k]$.
\end{definition}
We can now define the following subsets of $Q_{n,k}^\textrm{tuple}$:
\begin{definition} \label{deftuples}
Let $(\{A_i\}, \{A_{rs}\})$ be a double partition of $[k]$ such that $|\{A_i\}| = m$. Choose a sequence $(i_1,...,i_m)$ of distinct integers from $[n]$. Then let $T_{\{A_i\}, \{A_{rs}\}}^{\sigma, (i_j)}$ denote the set of $k$-tuples $(t_1,...,t_k)$ of distinct integers such that the integers $t_{j}$ where $j \in A_r$ are all in $\sigma$-cycles of length $i_r$ and moreover, integers $t_{a}$ and $t_b$ are in the \textit{same} $\sigma$-cycle of length $i_r$ iff $a$ and $b$ are in the same subset $A_{rs}$ of $A_r$. Taking the union over $\sigma$-cycle lengths, we also define \begin{equation}T_{\{A_i\}, \{A_{rs}\}}^{\sigma} = \bigcup_{i_1 \neq... \neq i_m} T_{\{A_i\}, \{A_{rs}\}}^{\sigma, (i_j)} \end{equation} where $i_1 \neq... \neq i_m$ is shorthand for $i_1,...,i_m$ all distinct.
\end{definition}
For each $\sigma \in \mathfrak{S}_n$, the set $ \Big\{T_{\{A_i\}, \{A_{rs}\}}^{\sigma}: (\{A_i\}, \{A_{rs}\}) \text{ a double partition of }[k] \Big\}$ forms a partition of $Q_{n,k}^\textrm{tuple}$. Note that the number of parts in this partition is only a function of $k$ and does not grow with $n$. Thus, we can consider separately the limiting contribution of each part $T_{\{A_i\}, \{A_{rs}\}}^{\sigma} $ to the spectrum of $\rho_{n,k}^{\textrm{tuple}}(\sigma)$.
It is clear that $\sigma_k^{\textrm{tuple}}$ acts on $T_{\{A_i\}, \{A_{rs}\}}^{\sigma, (i_j)}$ and that each tuple $(t_1,...,t_k) \in T_{\{A_i\}, \{A_{rs}\}}^{\sigma, (i_j)}$ lies in a $\sigma_k^{\textrm{tuple}}$-cycle of length $[i_1,...,i_m]$. (Here, $[i_1,...,i_m]$ denotes the least common multiple of integers $i_1,...,i_m$). Thus, for each choice of double partition $(\{A_i\}, \{A_{rs}\})$ and each sequence of $\sigma$-cycle lengths $(i_1,...,i_m)$, the elements of $T_{\{A_i\}, \{A_{rs}\}}^{\sigma, (i_j)} $ form \begin{equation}\label{numform} \frac{1}{[i_1,...,i_m]} \prod_{r=1}^m \big(C_{i_r}^{(n)}(\sigma) \big)^{\underline{p(r)}} \prod_{s=1}^{p(r)} i_r^{\underline{k_{rs}}} \end{equation} $\sigma_k^{\textrm{tuple}}$-cycles of size $[i_1,...,i_m]$ where the polynomial $x^{\underline{n}} := x(x-1)...(x-n+1)$ (often called the $n$th falling factorial).
The following lemma shows that the only non-negligible contribution to $Y_{n,k}^{\textrm{tuple}}$ in the limit $n \to \infty$ comes from the $\sigma_k^{\textrm{tuple}}$-cycles containing tuples $(t_1,...,t_k)$ such that $t_1,...,t_k$ are all in the same $\sigma$-cycle.
\begin{lemma}\label{tuplebound} Let $(\{A_i\}, \{A_{rs}\})$ be a double partition of $[k]$. If $\displaystyle{\sum_{r=1}^m p(r) > 1}$, then \begin{equation} \lim_{n \to \infty} \frac{1}{n^{k-1}} \mathbb{E} \bigg[\sum_{i_1 \neq... \neq i_m} \frac{1}{[i_1,...,i_m]} \prod_{r=1}^m \big(C_{i_r}^{(n)} \big)^{\underline{p(r)}} \prod_{s=1}^{p(r)} i_r^{\underline{k_{rs}}} \bigg] = 0 \end{equation}
\end{lemma}
\begin{proof}
First, we compute the expectation $\displaystyle{\mathbb{E}\bigg[ \prod_{r=1}^m \big(C_{i_r}^{(n)} \big)^{\underline{p(r)}} \bigg]}$, often referred to as a factorial moment.
The following moment formula was established by Watterson \cite{watterson} (see e.g. \cite[(5.6)]{a}): Let $W_1,...,W_n$ be independent Poisson random variables with $\mathbb{E}[W_i] = \theta/i$ and $b_1,...,b_n$ be non-negative integers and set $l = b_1+2b_2+...+n b_n$. Then \begin{equation}\label{diaconispoisson} \mathbb{E} \bigg[\prod_{j=1}^n (C_j^{(n)})^{\underline{b_j}} \bigg] = \mathds{1}(l \le n) \mathbb{E} \bigg[\prod_{j=1}^n W_j^{\underline{b_j}} \bigg] \prod_{i=0}^{l-1} \frac{n-i}{\theta+n-i-1}
\end{equation}
Also, recall that if $X$ follows a Poisson distribution with parameter $\lambda$, the factorial moments are given by $\mathbb{E}[X^{\underline{n}}] = \lambda^n$.
Using these results, we can now compute the expectation in the lemma. Note that
\[ \prod_{i=0}^{l-1} \frac{n-i}{\theta+n-i-1} \le \frac{n}{n-l+\theta} \]
Then summing over all sequences $(i_1,...,i_m)$ of distinct integers in $[n]$, we get (assuming $\displaystyle{\sum_{r=1}^m p(r) > 1}$)
\begin{align}
&\nonumber \mathbb{E} \bigg[\sum_{i_1 \neq... \neq i_m} \frac{1}{[i_1,...,i_m]} \prod_{r=1}^m \big(C_{i_r}^{(n)} \big)^{\underline{p(r)}} \prod_{s=1}^{p(r)} i_r^{\underline{k_{rs}}} \bigg] \\
\le &\nonumber \sum_{i_1 \neq ...\neq i_m} \frac{1}{[i_1,...,i_m]} \frac{n}{n-\sum i_r p(r) +\theta} \prod_{r=1}^m \left( \frac{\theta}{i_r} \right)^{p(r)} \prod_{s=1}^{p(r)} i_r^{k_{rs}} \\
\le &\label{sum2} A(\theta) \sum_{\substack{i_1 \neq ...\neq i_m \\ \sum i_r p(r) < n}} \frac{1}{[i_1,...,i_m]} \frac{n}{n-\sum i_r p(r)} \prod_{r=1}^m i_r^{k_r - p(r)}
\end{align}
for some constant $A(\theta)$.
If $m = 1$, splitting the sum according to whether $n - i_1 p(1) > \sqrt{n}$ or $n - \sum i_1 p(1) \le \sqrt{n}$ shows that \eqref{sum2} is clearly of order $O(n^{k-3/2})$ and the lemma follows. If $m>1$, we have (using the notation $(i,j):= \gcd(i,j)$ )
\begin{align*}
&\sum_{\substack{i_1 \neq ...\neq i_m \\ \sum i_r p(r) < n}} \frac{1}{[i_1,...,i_m]} \frac{n}{n-\sum i_r p(r)} \prod_{r=1}^m i_r^{k_r - p(r)} \\
\le & \sum_{\substack{i_1 \neq ...\neq i_m \\ \sum i_r p(r) < n}} \frac{1}{[i_1,i_2]} \frac{n}{n-\sum i_r p(r)} \prod_{r=1}^m i_r^{k_r - p(r)} \\
< & \; n^{k - k_1 - k_2+1/2} \sum_{i \neq j}\frac{i^{k_1 - 1} j^{k_2 - 1}}{[i,j]}\\
= & \; 2n^{k - k_1 - k_2+1/2} \sum_{1 \le i < j \le n} i^{k_1 - 2} j^{k_2 - 2} (i,j) \\
= & \; 2n^{k - k_1 - k_2+1/2} \sum_{\substack{1 \le i < j \le n \\ (i,j)=1}} \sum_{d \le \frac{n}{j}} (d i)^{k_1 - 2} (d j)^{k_2 - 2} d \\
< & \; 2n^{k - k_1 - k_2+1/2} \sum_{1 \le i < j \le n} \sum_{d \le \frac{n}{j}} d^{k_1+k_2-3} i^{k_1 - 2} j^{k_2 - 2} \\
< & \; 2n^{k - k_1 - k_2+1/2} \sum_{i=1}^n\sum_{j=i}^n \left(\frac{n}{j}\right)^{k_1+k_2-2} i^{k_1 - 2} j^{k_2 - 2} \\
< & \; 2n^{k - 3/2} \sum_{i=1}^n \sum_{j=i}^n \frac{i^{k_1 - 2}}{j^{k_1}}
\end{align*}
Here, the second step is derived by splitting the sum according to whether $n - \sum i_r p(r) > \sqrt{n}$ or $n - \sum i_r p(r) \le \sqrt{n}$. The desired result then follows from the fact that \[\sum_{i=1}^n \sum_{j=i}^n \frac{i^{k_1 - 2}}{j^{k_1}} = O(\log^2 n) \]
\end{proof}
Lemma \ref{tuplebound} shows that the only cycles of $\sigma_k^{\textrm{tuple}}$ that contribute to $Y_{n,k}^{\textrm{tuple}}$ in the limit $n \to \infty$ are those formed from tuples in the set $T_{\{A_i\}, \{A_{rs}\}}^{\sigma}$ such that the double partition $(\{A_i\}, \{A_{rs}\})$ of $[k]$ is trivial, i.e. $\{A_{rs} \}$ consists of the single set $[k]$. Borrowing the result from Lemma \ref{s=1}, we see that $\displaystyle{\lim_{n \to \infty} \mathbb{E}[Y_{n,k}] = 0}$. Plugging $m=1$ into the expression \eqref{numform} then proves Theorem \ref{simplifythm} for $Y_{n,k}^{\textrm{tuple}}$.
\section{The $k$-subset representations} \label{ksubset}
Now we prove Theorem \ref{simplifythm} for $Y_{n,k}^{\textrm{set}}$. For each $\sigma \in \mathfrak{S}_n$, let $\sigma_k^{\textrm{set}}$ be the permutation of size $\displaystyle{ \binom{n}{k} }$ corresponding to $\rho_{n,k}^{\textrm{set}}(\sigma)$.
Similar to the ordered tuple case, the eigenvalue distribution is given by \begin{equation} \label{Xnkset} X_{n,k}^{\textrm{set}} = \binom{n}{k} (\beta - \alpha) + \sum_{j} C_{j,k}^{(n), \textrm{set}} (\{j \alpha\} - \{j \beta \})
\end{equation} where $C_{j,k}^{(n), \textrm{set}}(\sigma)$ is the number of cycles in $\sigma_k^{\textrm{set}}$ of length $j$.
As in the previous section, we will see that almost all of the contribution to the sum in $Y_{n,k}^{\textrm{set}}$ comes from the subsets $\{t_1,...,t_k\}$ such that $t_1,...,t_k$ are all in the same cycle of $\sigma$. Although the argument is similar to the ordered tuple case, a few subtleties arise.
For each $\sigma \in \mathfrak{S}_n$, we wish to partition $Q_{n,k}^\textrm{set}$ according to the number of elements $t_i$ in each $\sigma$-cycle. Unlike the ordered tuple case, instead of double partitioning the set $[k]$ we proceed by directly double partitioning the integer $k$.
\begin{definition} Let $k = k_1+...+k_m$ such that $k_1 \ge ...\ge k_m \ge 1$ be a partition of the integer $k$. Then, for each part $k_r$ choose a subpartition $\displaystyle{k_r = \sum_{s=1}^{p(r)} k_{rs}}$ such that $k_{r1} \ge ... \ge k_{r, p(r)} \ge 1$. We can also denote the subpartition by a sequence $(c_{r,1},...,c_{r,k_r})$ such that $\displaystyle{\sum_{i=1}^{k_r} i c_{r,i} = k_r}$. Here, $c_{r,i}$ represents the number of subparts of $k_r$ of size $i$. We will call the array $(k_{rs})$ where $1 \le r \le m$ and $1 \le s \le p(r)$ a double partition of $k$.
\end{definition}
We can define the following subsets of $Q_{n,k}^\textrm{set}$ analogously to Definition \ref{deftuples}:
\begin{definition}
Let $(k_{rs})$ be a double partition of $k$ such that $r$ runs over the range $1 \le r \le m$. Choose a sequence $(i_1,...,i_m)$ of distinct integers from $[n]$. Then let $T_{(k_{rs})}^{\sigma, (i_j) }$ denote the set of $k$-subsets $\{t_1,...,t_k\}$ such that for $1 \le r \le m$ and $1 \le s \le p(r)$, $k_{rs}$ of the elements $t_i$ lie in the same $\sigma$-cycle of length $i_r$. Taking the union over $\sigma$-cycle lengths, we also define \begin{equation} T_{(k_{rs})}^{\sigma} = \bigcup_{i_1 \neq... \neq i_m} T_{(k_{rs})}^{\sigma, (i_j) }
\end{equation}
\end{definition}
For each $\sigma \in \mathfrak{S}_n$, the set $\displaystyle{\Big\{ T_{(k_{rs})}^{\sigma}: (k_{rs}) \text{ a double partition of } k \Big\} }$ forms a partition of $Q_{n,k}^\textrm{set}$. As before, we can consider each part $T_{(k_{rs})}^{\sigma} $ individually since the number of parts in this partition is only a function of $k$ and does not grow with $n$.
To write the formula analogous to \eqref{numform} for the number of $\sigma_k^{\textrm{set}}$-cycles formed from $k$-subsets $\{t_1,...,t_k\} \in T_{(k_{rs})}^{\sigma, (i_j) }$, it will be helpful to introduce the notion of binary necklaces from combinatorics.
\begin{definition} A binary necklace of length $n$ is an equivalence class of strings of 0's and 1's of length $n$ that are identified under rotation i.e. in the same orbit under action of the cyclic group $\mathbb{Z}/n\mathbb{Z}$. The period of a necklace is the size of the corresponding equivalence class of strings, i.e. the period of a representative string.
Let $N_{i,k}$ be the number of binary length $i$ necklaces with exactly $k$ ones and let $N_{i, k}^{d}$ denote the number of such necklaces of period $d$. Finally, let $L_{i,k} := N_{i, k}^{i}$ denote the number of aperiodic necklaces of length $i$ with $k$ ones.
\end{definition}
We will see shortly that for large $n$, almost all necklaces are aperiodic, i.e. have period $n$.
For each subset $\{t_1,...,t_k\} \in T_{(k_{rs})}^{\sigma, (i_j) }$, for $1 \le r \le m$ and $1 \le s \le p(r)$, let $C_{rs}^{ \{t_i \}}$ be (one of) the $\sigma$-cycle(s) of length $i_r$ containing $k_{rs}$ elements of $\{t_1,...,t_k\}$. We can identify $C_{rs}^{ \{t_i \}}$ with a binary necklace by giving the label 1 to the $k_{rs}$ elements of $\{t_1,...,t_k\}$ that lie in the cycle and giving the label 0 to the rest of the numbers in the cycle. For example, if $C_{rs}^{ \{t_i \}}$ is the cycle $(3 5 2 9 1 4)$ and the subset of $k_{rs}$ elements of $\{t_1,...,t_k\}$ that lie in the cycle is $\{2,4\}$, then the induced binary necklace is $001001$. Let $d_{rs}^{ \{t_i \}}$ denote the period of the binary necklace $C_{rs}^{ \{t_i \}}$. Then we see that each $k$-subset $\{t_1,...,t_k\} \in T_{(k_{rs})}^{\sigma, (i_j) }$ lies in a $\sigma_k^{\textrm{set}}$-cycle of length $\big[\big(d_{rs}^{ \{t_i \}} \big) \big]$ where $\big[ (a_n) \big]$ denotes the least common multiple of all the elements in the sequence $(a_n)$.
Fix a double partition $(k_{rs})$ of $k$ and let $(i_1,...,i_m)$ be a sequence of $\sigma$-cycle lengths. Let $(d_{rs})$ be an array of non-negative integers where the indices $r$ and $s$ run over the same range as $(k_{rs})$. Then the $k$-subsets $\{t_1,...,t_k \} \in T_{(k_{rs})}^{\sigma, (i_j) }$ such that $d_{rs}^{ \{t_i\}} = d_{rs}$ form \begin{equation} \frac{1}{[(d_{ab})]} \prod_{r=1}^m \frac{\big(C_{i_r}^{(n)}(\sigma) \big)^{\underline{p(r)}}}{\prod_{i=1}^{k_r} c_{r,i}! } \prod_{s=1}^{p(r)} d_{rs} N_{i_r, k_{rs}}^{d_{rs}} \end{equation}
$\sigma_k^{\textrm{set}}$-cycles of length $[(d_{ab})]$.
Note that $\displaystyle{N_{i_r, k_{rs}}^{d_{rs}} = L_{d_{rs}, \frac{k_{rs}d_{rs}}{i_r}}}$. Clearly, this can only be non-zero if $i_r \bigm| k_{rs}d_{rs}$, i.e. $i_r f_{rs} = k_{rs}d_{rs}$ for some integer $f_{rs}$. Since $d_{rs} \bigm| i_r$, we have $i_r = d_{rs} g_{rs}$ for some integer $g_{rs}$. Putting this together, $k_{rs} = f_{rs} g_{rs}$. Also, we have the trivial bound $\displaystyle{L_{i, k} \le \frac{1}{i} \binom{i}{k}}$.
Let $\mathbf{D}$ denote the set of all arrays of non-negative integers $(d_{rs})$ such that $N_{i_r, k_{rs}}^{d_{rs}} \neq 0$, i.e. such that each array entry $d_{rs}$ is a valid period. Let $\mathbf{G}$ denote the set of all arrays of non-negative integers $(g_{rs})$ such that $g_{rs} \bigm| k_{rs}$. (For both arrays, the indices run over the range $1 \le r \le m$ and $1 \le s \le p(r)$). Then the number of $\sigma_k^{\textrm{set}}$ cycles formed from the elements of $T_{(k_{rs})}^{\sigma, (i_j) }$ is
\begin{equation} \label{subsetform}
\sum_{(d_{ab}) \in \mathbf{D}} \frac{1}{[(d_{ab})]} \prod_{r=1}^m \frac{\big(C_{i_r}^{(n)}(\sigma) \big)^{\underline{p(r)}}}{\prod_{i=1}^{k_r} c_{r,i}! } \prod_{s=1}^{p(r)} d_{rs} N_{i_r, k_{rs}}^{d_{rs}}
\end{equation}
We have the following lemma analogous to Lemma \ref{tuplebound}.
\begin{lemma}\label{subsetbound} Let the array $(k_{rs})$ where $1 \le r \le m$ and $1 \le s \le p(r)$ be a double partition of $k$. If $\displaystyle{\sum_{r=1}^m p(r) > 1}$, then \begin{equation} \lim_{n \to \infty} \frac{1}{n^{k-1}} \mathbb{E} \bigg[\sum_{i_1 \neq... \neq i_m} \sum_{(d_{ab}) \in \mathbf{D}} \frac{1}{[(d_{ab})]} \prod_{r=1}^m \frac{\big(C_{i_r}^{(n)}(\sigma) \big)^{\underline{p(r)}}}{\prod_{i=1}^{k_r} c_{r,i}! } \prod_{s=1}^{p(r)} d_{rs} N_{i_r, k_{rs}}^{d_{rs}} \bigg] = 0 \end{equation}
\end{lemma}
\begin{proof}
\begin{equation*}
\begin{aligned}
&\sum_{(d_{ab}) \in \mathbf{D}} \frac{1}{[(d_{ab})]} \prod_{r=1}^m \frac{\big(C_{i_r}^{(n)}(\sigma) \big)^{\underline{p(r)}}}{\prod_{i=1}^{k_r} c_{r,i}! } \prod_{s=1}^{p(r)} d_{rs} N_{i_r, k_{rs}}^{d_{rs}} \\
\le & \sum_{(d_{ab}) \in \mathbf{D}} \frac{1}{[(d_{ab})]} \prod_{r=1}^m \frac{\big(C_{i_r}^{(n)}(\sigma) \big)^{\underline{p(r)}}}{\prod_{i=1}^{k_r} c_{r,i}! } \prod_{s=1}^{p(r)} \binom{d_{rs} }{ \frac{k_{rs}d_{rs}}{i_r}} \\
= & \sum_{(g_{ab}) \in \mathbf{G}} \frac{1}{[(i_a/g_{ab} ) ]} \prod_{r=1}^m \frac{\big(C_{i_r}^{(n)}(\sigma) \big)^{\underline{p(r)}}}{\prod_{i=1}^{k_r} c_{r,i}! } \prod_{s=1}^{p(r)} \binom{i_r/g_{rs} }{ k_{rs}/g_{rs} } \\
< & \frac{1}{[i_1,...,i_m]} \sum_{(g_{ab}) \in \mathbf{G}} \prod_{r=1}^m \big(C_{i_r}^{(n)}(\sigma) \big)^{\underline{p(r)}} \prod_{s=1}^{p(r)} \binom{i_r}{ k_{rs}} k_{rs} \\
= & \frac{|\mathbf{G}|}{[i_1,...,i_m]} \prod_{r=1}^m \big(C_{i_r}^{(n)}(\sigma) \big)^{\underline{p(r)}} \prod_{s=1}^{p(r)} \binom{i_r }{ k_{rs}} k_{rs}
\end{aligned}
\end{equation*}
since $\displaystyle{\binom{n}{ m} \le \binom{a n}{ a m}}$ for $a \ge 1$ and $[p m_1, m_2] \le p [m_1, m_2]$.
The result then follows from Lemma \ref{tuplebound}.
\end{proof}
Thus, just as for the $k$-tuple case, the only $\sigma_k^{\textrm{set}}$-cycles that contribute to $Y_{n,k}^{\textrm{set}}$ in the limit $n \to \infty$ are those formed from $k$-subsets in $T_{(k_{rs})}^{\sigma}$ such that the double partition $(k_{rs})$ of $k$ is trivial, i.e. $(k_{rs})$ consists of just one part of size $k$. Since $\displaystyle{N_{j, k}^{d} \le \frac{1}{d} \binom{d}{ k d/j}}$, it is easy to see that for large $j$, almost all necklaces of length $j$ with $k$ ones are aperiodic, i.e. both $N_{j, k}$ and $L_{j, k}$ are asymptotically $\displaystyle{\frac{j^{k-1}}{k!} + O(j^{k-2})}$. Plugging $m=1$ into the expression \eqref{subsetform} then proves Theorem \ref{simplifythm} for $Y_{n,k}^{\textrm{set}}$.
\begin{remark}
Exact formulas for $N_{n,i}$ and $L_{n,i}$ are known:
\begin{equation} L_{n,i} = \frac{1}{n} \sum_{d \mid (n, i)} \mu(d) \binom{n/d}{ i/d} \end{equation}
\begin{equation} N_{n,i} = \frac{1}{n} \sum_{d \mid (n,i)} \varphi(d) \binom{n/d }{ i/d} \end{equation}
where $\mu(d)$ is the M\"{o}bius function and $\varphi$ is Euler's totient function. Derivation of these formulas and other results about necklaces can be found in e.g. \cite{bender, ruskey, sawada}.
\end{remark}
\section{The $S^{(n-k,1^k)}$ irreducible representation} \label{irrep}
In this section, we finally prove Theorem \ref{simplifythm} for the $S^{(n-k,1^k)}$ irreducible representation of the symmetric group $\mathfrak{S}_n$. First, we briefly review some basic facts from the representation theory of symmetric groups.
\subsection{Basics of symmetric group theory}
It is well known that every complex representation of a finite group is completely reducible, i.e. is the direct sum of irreducible representations. This follows from the fact that finite-dimensional unitary representations of any group are completely reducible and Weyl's unitary trick which shows that every finite dimensional representation of a finite group is unitarisable. Then the eigenvalue distribution of any finite group representation is simply a mixture of the eigenvalue distributions for each irreducible representation in the direct sum.
Thus, to understand the eigenvalue distributions of representations of the symmetric group, another perspective is to try to understand the irreducible representations. These representations are indexed by the partitions of $n$, often denoted $\lambda \vdash n$. We can visualize a partition $\lambda$ by drawing its diagram, which is a configuration of boxes arranged in left-justified rows such that there are $\lambda_i$ boxes in the $i^{\textrm{th}}$ row.
\begin{definition} Given a partition $\lambda \vdash n$, a Young tableau of shape $\lambda$ is obtained by placing the integers $[n]$ into the diagram for $\lambda$ (so that each number appears exactly once). Clearly, there are $n!$ Young $\lambda$-tableaux. A standard Young tableau is a tableau such that the entries are strictly increasing in each row and each column. If $\lambda, \mu \vdash n$, a semistandard tableau of shape $\lambda$ and type $\mu$ is a tableau where the entries are weakly increasing along each row and strictly increasing down each column such that the number $i$ appears $\mu_i$ times.
\end{definition}
One can consider an equivalence relation on the set of $\lambda$-tableaux such that $t_1 \sim t_2$ if $t_1$ and $t_2$ contain the same elements in each row. Each equivalence class $\{t \}$ under this relation is called a tabloid. Thus, a tabloid is a tableau that only cares about rows.
The action of $\mathfrak{S}_n$ on tabloids induces the permutation representation on a vector space with basis $e_{\{t \}}$ in the usual way. These $\frac{n!}{\lambda_1!...\lambda_r!}$ dimensional representations are denoted by $M^\lambda$ for each partition $\lambda \vdash n$ and called the permutation module corresponding to $\lambda$.
Using this terminology, the permutation representation on ordered $k$-tuples is equivalent to the permutation module $M^{(n-k,1^k)}$ and the permutation representation on unordered $k$-subsets is equivalent to the permutation module $M^{(n-k,k)}$.
One can find the irreps in the permutation modules $M^\lambda$. Define for each tableau $t$ a polytabloid $e_t \in M^\lambda$ by $\displaystyle{e_t = \sum_{\pi \in C_t} \sgn(\pi) e_{\pi\{t\}}}$ where $C_t$ is the subgroup of $\mathfrak{S}_n$ that stabilizes columns of $t$. Then the subspace of $M^\lambda$ spanned by the $\{e_t\}$ is called the Specht module $S^\lambda$. As $\lambda$ ranges over the partitions of $n$, $S^\lambda$ give all the irreps of $\mathfrak{S}_n$. The set of polytabloids $\{e_t : t \text{ is a standard } \lambda\text{-tableau}\}$ is a basis for $S^\lambda$. Thus, we see that the dimension of $S^\lambda$ is the number of standard $\lambda$-tableaux. The celebrated hook-length formula gives a formula for this number.
Young's rule gives a method of determining which irreducible subrepresentations are present in the permutation module $M^\lambda$.
\begin{lemma}[Young's Rule] \label{Young}
The multiplicity of $S^\lambda$ in $M^\mu$ is the Kostka number $K_{\lambda \mu}$, which is the number of semistandard tableau with shape $\lambda$ and type $\mu$.
\end{lemma}
For a proof of Lemma \ref{Young}, see e.g. \cite[Prop. 7.18.7]{stanley}. By Young's rule, we see for instance that $\displaystyle{M^{(n-2,1,1)} = S^{(n)} \oplus 2S^{(n-1,1)} \oplus S^{(n-2,2)} \oplus S^{(n-2,1,1)}}$. In general, $S^{(n-k,1^k)}$ appears as an irrep of $M^{(n-k,1^k)}$ with multiplicity 1.
The decomposition of $M^{(n-k,k)}$ into irreducibles is particularly easy to describe. We have $M^{(n-k,k)} = S^{(n)} \oplus S^{(n-1,1)} \oplus...\oplus S^{(n-k,k)}$.
More information about symmetric group theory can be found in any number of references. A few are \cite{diaconisbook, james, sagan, silberstein}.
\subsection{Eigenvalue distributions of irreducible representations}
Stembridge \cite{stembridge} has found an explicit formula for the eigenvalues of any irreducible representation of the symmetric group in terms of Young tableaux. In the following, we borrow terminology from \cite{stembridge}. First, we introduce the notion of a descent set.
\begin{definition}
Let $T$ be a standard Young tableau. If $k+1$ appears in a row strictly below $k$ in $T$, then $k$ is said to be a descent of $T$. We write $D(T)$ for the set of descents in $T$.
\end{definition}
Figures \ref{fig:sub1} and \ref{fig:sub2} give the descent sets for a standard tableau of shape $(6,1,1,1)$ and another of shape $(4,3,2)$.
Let $\mu \vdash n$ be the cycle type (i.e. list of cycle lengths in the cycle decomposition in non-increasing order) of $\sigma \in \mathfrak{S}_n$. Let $\rho_{n,1}^{\textrm{tuple}}$ be the defining representation of $\mathfrak{S}_n$. Then we define $b_\mu = (b_\mu(1),...,b_\mu(n))$ to be the vector of eigenangles of $\rho_{n,1}^{\textrm{tuple}}(\sigma)$ listed by cycle. For example, $\displaystyle{b_{(4,4,3,2)} = \left(\frac{1}{4},\frac{2}{4},\frac{3}{4},1,\frac{1}{4},\frac{2}{4},\frac{3}{4},1,\frac{1}{3},\frac{2}{3},1,\frac{1}{2},1 \right)}$.
Now we can state Stembridge's formula for the eigenvalues:
\begin{theorem}[Stembridge] \label{stembridge}
Let $\rho^\lambda$ be the representing map corresponding to the irrep $S^\lambda$. The eigenangles of $\rho^\lambda(\sigma)$ (counted with multiplicity) are indexed by standard Young $\lambda$-tableaux $T$ and given by $\displaystyle{\sum_{i \in D(T)} b_\mu(i)}$ where the sum is taken mod 1.
\end{theorem}
\begin{figure}
\caption{$D(T) = \{1, 5, 8\}
\caption{$D(T) = \{2, 3, 5, 7, 8\}
\caption{Descent sets of two Young tableau}
\label{fig:sub1}
\label{fig:sub2}
\end{figure}
Now, we turn to the eigenvalue distribution of $\rho^{(n-k,1^k)}$. It is easy to check that
\begin{proposition} \label{stem}
For $T$ running over all standard Young tableaux of shape $(n-k,1^k)$, we have \[ \{D(T)\} = Q_{n-1,k}^{\textrm{set}} \]
\end{proposition}
Let $\mu = (\mu_1,\mu_2,...,\mu_l)$ be the cycle type of $\sigma \in \mathfrak{S}_n$. For any set of sets (or tuples) $U$, define
\begin{equation} \label{etuple} E_n^U(\sigma) := \bigg\{\sum_{i \in S} b_\mu(i): S \in U \bigg\} \end{equation}
By Theorem \ref{stembridge} and Proposition \ref{stem}, the multiset of eigenangles of $\rho^{(n-k,1^k)}(\sigma)$ is $\displaystyle{E_n^{Q_{n-1,k}^{\textrm{set}} }(\sigma) }$. It will be easier to work our way towards $\displaystyle{E_n^{Q_{n-1,k}^{\textrm{set}} } }$ by first considering $\displaystyle{E_n^{Q_{n,k}^{\textrm{tuple*}} }}$ where $Q_{n,k}^\textrm{tuple*}$ is the set of $k$-tuples allowing repeats.
By the same reasoning as for $Y_{n,k}^{\textrm{tuple}}$, it is easy to see (just replace $\prod i_r^{\underline{k_{rs}}}$ with $\prod i_r^{k_{rs}}$ in Lemma \ref{tuplebound}):
\begin{lemma} \label{Qtuplestar}
As $n \to \infty$, $\displaystyle{ \frac{\big| E_n^{Q_{n,k}^\textrm{tuple*}} \cap I \big| - \mathbb{E}\big| E_n^{Q_{n,k}^\textrm{tuple*}} \cap I \big|}{n^{k-1}}}$ has the same limiting law as $Y_{n,k}$.
\end{lemma}
We now want to show that the same is true for $E_n^{Q_{n-1,k}^\textrm{tuple}}$. Note that we have the decomposition \begin{equation}\label{decomposition} Q_{n,k}^\textrm{tuple*} = Q_{n,k}^\textrm{tuple} \bigcup Q_{n,k}^\textrm{duplicate} \bigcup Q_{n,k}^\textrm{rest}
\end{equation} where $Q_{n,k}^\textrm{duplicate}$ contains the tuples with exactly two identical entries and $Q_{n,k}^\textrm{rest}$ contains the rest of the tuples in $Q_{n,k}^\textrm{tuple*}$.
We have the recursive relation:
\begin{equation}\label{recursive} E_n^{Q_{n,k}^\textrm{tuple}} = E_n^{Q_{n-1,k}^\textrm{tuple}} \bigcup \big(E_n^{Q_{n-1,k-1}^\textrm{tuple}}\big)^{\bullet k} \end{equation} where $\big( E_n^{Q_{n-1,k-1}^\textrm{tuple}} \big)^{\bullet k}$ denotes the multiset containing $k$ copies of $E_n^{Q_{n-1,k-1}^\textrm{tuple}}$.
We also have the following decomposition of $E_n^{Q_{n,k}^\textrm{duplicate}}$:
\begin{lemma} \label{duplicate}
The multiset $E_n^{Q_{n,k}^\textrm{duplicate}}$ is the union of $n {k \choose 2}$ rotated copies of $E_n^{Q_{n-1,k-2}^\textrm{tuple}}$.
\end{lemma}
\begin{proof}
Note that if $S \in Q_{n,k}^\textrm{tuple}$ is a $k$-tuple containing any element $j$ such that $b_\mu(j) = 1$ and $S^{-j}$ is the $(k-1)$-tuple gotten from $S$ by omitting $j$, then \begin{equation} \label{triv} \sum_{i \in S} b_\mu(i)=\sum_{i \in S^{-j}} b_\mu(i)
\end{equation}
Let $Q_{n,k}^{\textrm{tuple}-j}$ denote the set of $k$-tuples not containing the element $j$. Then using \eqref{triv}, we see that for $1 \le j \le n$, the multisets $E_n^{Q_{n,k}^{\textrm{tuple}-j}}$ are all rotations of $E_n^{Q_{n-1,k}^{\textrm{tuple}}}$. Since there are $n {k \choose 2}$ ways to pick two entries of a $k$-tuple in $Q_{n,k}^\textrm{duplicate}$ and assigning the same value $j$, the result then follows.
\end{proof}
Together, \eqref{recursive} and Lemma \ref{duplicate} show that $E_n^{Q_{n,k}^\textrm{tuple*}}$ is the union of $E_n^{Q_{n-1,k}^\textrm{tuple}}$, $k$ copies of $E_n^{Q_{n-1,k-1}^\textrm{tuple}}$, $n {k \choose 2}$ rotated copies of $E_n^{Q_{n-1,k-2}^\textrm{tuple}}$ and a set with cardinality of order $O(n^{k-2})$ that we can ignore. Then by Lemma \ref{Qtuplestar} and inducting on $k$, we have
\begin{lemma} \label{Qtuple}
As $n \to \infty$, $\displaystyle{ \frac{\big| E_n^{Q_{n-1,k}^\textrm{tuple}} \cap I \big| - \mathbb{E}\big| E_n^{Q_{n-1,k}^\textrm{tuple}} \cap I \big|}{n^{k-1}}}$ has the same limiting law as $Y_{n,k}$.
\end{lemma}
Since $E_n^{Q_{n-1,k}^\textrm{tuple}}$ just consists of $k!$ copies of $E_n^{Q_{n-1,k}^\textrm{set}}$, this proves Theorem \ref{simplifythm} for $Y_{n,k}^\textrm{irrep}$.
\section{Equidistributed sequences} \label{equidistributed sequences}
In this section, we review some of the theory of uniform distribution mod 1 that will be important in the sequel. This material is all contained in Kuipers and Neiderreiter's book \cite{kuipers}, which contains many other interesting results on equidistribution.
It will be convenient to identify the interval $[0,1]$ with the 1-dimensional torus (circle) $\mathbb{T}^1$ by identifying the two endpoints. Since $\mathbb{T}^1$ is a group under addition, this will obviate the need to take fractional parts.
\begin{definition}\label{equidistributed}
A sequence $(x_n)_{n \in \mathbb{N}}$ of elements of $\mathbb{T}^d$ is said to be equidistributed or uniformly distributed if for every box $\displaystyle{B = \prod_{i=1}^d [a_i, b_i] }$ such that $0 \le a_i < b_i \le 1$, we have \begin{equation}\lim_{n \to \infty} \frac{A(B; n)}{n} = \prod_{i=1}^d (b_i - a_i) \end{equation} where $A(B; n)$ counts the number of elements of the sequence $(x_1,...,x_n)$ in the box $B$.
\end{definition}
We have the following important criterion for equidistribution first formulated by Hermann Weyl.
\begin{theorem}[Weyl's Criterion]\label{weylcriterion}
The sequence $(x_j)_{j \in \mathbb{N}}$ of elements in $\mathbb{T}^d$ is equidistributed if and only if for each nonzero element $h \in \mathbb{Z}^d$, $\displaystyle{\lim_{n \to \infty} \frac{1}{n} \sum_{j=1}^n e^{2 \pi i h \cdot x_j} = 0}$.
\end{theorem}
From this, it is easy to establish \cite[Thm. 1.6.4]{kuipers}:
\begin{theorem}[Weyl's Equidistribution Theorem]\label{weylthm}
If $\gamma$ is irrational, then the sequence $(n \gamma)_{n \in N}$ is equidistributed. More generally, if $p$ is a polynomial with at least one nonconstant irrational coefficient, then the sequence $(p(n))_{n \in N}$ is equidistributed.
\end{theorem}
Weyl's criterion only gives a qualitative asymptotic condition for equidistribution. It will also be useful to have quantitative bounds on the rate of convergence to equidistribution.
\begin{definition}\label{multidiscrepancy}
Let $J$ be the set of $d$-dimensional boxes of the form $\displaystyle{\prod_{i=1}^d [a_i, b_i] }$ where $0 \le a_i < b_i \le 1$ and let $\mathcal{P}_n$ be a multiset or sequence of $n$ elements of the $d$-dimensional torus $\mathbb{T}^d$.
The multidimensional discrepancy is given by \[D(\mathcal{P}_n) := \sup_{B \in J} \bigg|\frac{A(B; n)}{n} - \prod_{i=1}^d (b_i - a_i) \bigg| \] where $A(B; n)$ counts the number of elements of $\mathcal{P}_n$ in the box $B$.
\end{definition}
\begin{remark}
By \cite[Thm. 2.1.1]{kuipers}, a sequence $(x_n)_{n \in \mathbb{N}}$ is equidistributed if and only if \sloppy $\displaystyle{ \lim_{n \to \infty} D(x_1,...,x_n) = 0}$.
\end{remark}
\begin{definition}
If $\omega$ is an infinite sequence, let $D_{i,n}(\omega)$ be the discrepancy of the $(i+1)^\textrm{st}$ through $(i+n)^\textrm{th}$ terms of the sequence. If $D_{i,n}(\omega) \to 0$ uniformly in $i$ as $n \to \infty$, we say that the sequence $\omega$ is \textit{well-distributed}.
\end{definition}
\begin{remark}
Note that for the sequence $x_n = n \alpha +\beta$, the discrepancy $D_{i,n}(x)$ only depends on $n$ since the subsequence $x_{i+1},...,x_{i+n}$ is just a translate of the sequence $x_1,...,x_n$. Thus, $(x_n)$ is well distributed. In fact, using the van der Corput lemma, one can show that if $p$ is a polynomial with at least one nonconstant irrational coefficient, then the sequence $(p(n))_{n \in N}$ is well-distributed.
\end{remark}
The following definition is useful to state various estimates for the discrepancy:
\begin{definition}\label{defirrational}
The irrationality measure, $\mu(r)$, of a real number $r$ is given by \[\mu(r)= \inf \left\{ \lambda\colon \left\lvert r-\frac{p}{q}\right\rvert < \frac{1}{q^{\lambda}} \text{ has only finitely many integer solutions in p and q }\right\} \]
\end{definition}
\begin{theorem}[{\cite[Thm. 2.3.2]{kuipers}}] \label{1Ddisc}
Let $\alpha$ have irrationality measure $\lambda$ and let $x_n = n \alpha + \beta$. Then for every $\varepsilon > 0$, \begin{equation} D_{i,n}(x) = O(n^{-\frac{1}{\lambda-1} + \varepsilon}) \end{equation}
\end{theorem}
\section{Moment method for $k=1$ case} \label{k=1}
Before applying the moment method to find the limiting distribution of $Y_{n,k}$ for $k > 1$, let us first consider the $k=1$ case of permutation matrices. Then the appropriate scaled count of eigenangles in the interval $I=(\alpha, \beta)$ is
\begin{equation} Y_{n,1}^\textrm{tuple} := \frac{X_{n,1}^\textrm{tuple} - \mathbb{E}[X_{n,1}^\textrm{tuple}]}{\sqrt{\log n}} = \frac{1}{\sqrt{\log n}} \sum_{j=1}^n \bigg(C_j^{(n)} - \frac{1}{j} \bigg) (\{j\alpha \} - \{j\beta \})
\end{equation}
Wieand \cite{wieand} (for $\theta=1$) and Ben Arous and Dang \cite{arousdang} (for general $\theta > 0$) use the Feller coupling \cite{feller} along with the CLT to show limiting normality of $Y_{n,1}^\textrm{tuple}$. The Feller coupling is a way of constructing random permutations using sums of Bernoulli random variables that allows for quantitative bounds on the distance between $C_j^{(n)}$ and independent Poisson variables $W_j$ with parameter $1/j$. Using this coupling, it turns out that the asymptotic behavior of $Y_{n,1}^\textrm{tuple}$ is unchanged if one replaces the dependent variables $C_j^{(n)}$ with the independent variables $W_j$. See \cite{wieand} for details. In this section, we will apply the method of moments to rederive this result.
To make computing the moments simpler, we assume $\alpha$ and $\beta$ are irrationals linearly independent over $\mathbb{Q}$ of finite irrationality measure. By Khintchine's theorem, the set of numbers with irrationality measure greater than 2 has Lebesgue measure 0, so this is not a very restrictive condition. It follows from Theorem \ref{1Ddisc} and \cite[Thm. 3]{wieand} that \begin{equation} \label{finiteness} \bigg|\sum_{j=1}^n \frac{1}{j} (\{j\alpha \} - \{j\beta \})\bigg| < C
\end{equation} for some absolute constant $C$. With this additional finiteness restriction on $\alpha$ and $\beta$, it suffices then to show that \begin{equation} Z_n := \frac{1}{\sqrt{\log n}} \sum_{j=1}^n C_j^{(n)} (\{j\alpha \} - \{j\beta \}) \end{equation} limits to a normal distribution. We wish to establish the following proposition, which states the convergence of the moments of $Z_n$ to those of a centered normal distribution with variance $\theta/6$.
\begin{proposition} \label{momentsk=1}
The odd moments of $Z_n$ limit to 0. For even $m$,
\begin{equation}\lim_{n \to \infty} \mathbb{E}[(Z_n)^m] = \frac{m!}{2^{m/2}} \frac{1}{(m/2)!} \left(\frac{\theta}{6}\right)^{m/2}
\end{equation}
\end{proposition}
For a partition $m = m_1+...+m_s$ such that $m_1 \ge... \ge m_s$, let $c_i$ be the number of parts $m_j$ equal to $i$, so that $\displaystyle{\sum_{i=1}^m i c_i = m}$. By the multinomial theorem,
\begin{multline} \label{multinomial}
(Z_n)^m =\frac{1}{(\log n)^{m/2}} \sum_{(m_i) \vdash m} \binom{m}{ m_1,...,m_s} \frac{1}{\prod_{l = 1}^m c_l!} \\ \sum_{j_1 \neq ... \neq j_s} \prod_{l=1}^s \Big(C_{j_l}^{(n)}\Big)^{m_l} (\{j_l\alpha \} - \{j_l\beta \})^{m_l}
\end{multline}
\begin{remark}
The coefficients $\displaystyle{ \binom{m}{ m_1,...,m_s} \frac{1}{\prod_{l = 1}^m c_l!} }$ are the so-called Fa\`a di Bruno coefficients which arise in the Fa\`a di Bruno formula \cite{faa} for derivatives as well as the expansion of Bell polynomials.
\end{remark}
First, we collect a few estimates that we will need:
\begin{lemma} \label{estimates}
\begin{enumerate}
\item[]
\item For large $n$ and $s \ge 1$, \begin{equation}
\sum_{\substack{j_1+...+j_s = n \\ j_l \ge 1}} \frac{1}{j_1...j_s} = O\left(\frac{1}{n} (\log n)^{s-1} \right)
\end{equation}
\item For $\theta > 0$ and integer $1 \le i \le n$, \begin{equation}\label{thetabound} \prod_{j=0}^{i-1} \frac{n-j}{\theta+n-j-1} \le A(\theta) \left( \frac{\theta+n-1}{\theta+n-i} \right)^{1-\theta}
\end{equation} for some constant $A(\theta)$. Moreover, if $i$ is such that $n-i = \Omega(n)$, then \begin{equation}\label{thetasim} \prod_{j=0}^{i-1} \frac{n-j}{\theta+n-j-1} \sim \left( \frac{\theta+n-1}{\theta+n-i} \right)^{1-\theta}
\end{equation} as $n \to \infty$.
\end{enumerate}
\end{lemma}
\begin{proof}
Inequality (1) follows from a simple induction argument. (2) follows from the fact that $\log(1+x) \le x$ for $x > 0$ and $\log(1+x) \approx x$ for small $x$.
\end{proof}
Using Lemma \ref{estimates}, we can compute the following limit:
\begin{lemma} \label{deltabound}
Let $\theta > 0$ and let $\delta > 0$ be small. Then for $s \ge 1$
\begin{equation}
\frac{1}{(\log n)^{s}} \sum_{\substack{j_1+...+j_s \le n \\ j_1 \ge \lfloor \delta n \rfloor} } \prod_{i=0}^{\sum j_l -1} \frac{n-i}{\theta+n-i-1} \prod_{l=1}^s \left( \frac{\theta}{j_l} \right) = 0
\end{equation}
where all indices $1 \le j_1,...,j_s \le n$.
\end{lemma}
\begin{proof}
Recall Vinogradov's Big-Oh notation $\ll$ to denote inequality up to an absolute constant $C$ as $n \to \infty$. Then
\begin{align}
&\nonumber\sum_{\substack{j_1+...+j_s \le n \\ j_1 \ge \lfloor \delta n \rfloor} } \prod_{i=0}^{\sum j_l -1} \frac{n-i}{\theta+n-i-1} \prod_{l=1}^s \left( \frac{\theta}{j_l} \right) \\
\ll &\nonumber\sum_{u=\lfloor \delta n \rfloor}^n \sum_{j_1=\lfloor \delta n \rfloor}^{u-1} \frac{1}{j_1} \left( \frac{\theta+n-1}{\theta+n-u} \right)^{1-\theta} \sum_{j_2+...+j_s = u-j_1} \frac{1}{j_2...j_s} \\
\ll &\nonumber\sum_{u=\lfloor \delta n \rfloor}^n \sum_{j_1=\lfloor \delta n \rfloor}^{u-1} \frac{1}{j_1} \left( \frac{\theta+n-1}{\theta+n-u} \right)^{1-\theta} \frac{1}{u-j_1} \log(u-j_1)^{s-2} \\
\ll &\nonumber\sum_{u=\lfloor \delta n \rfloor}^n \frac{1}{n^\theta} \frac{1}{(n-u)^{1-\theta}} (\log u)^{s-1} \\
\ll & (\log n)^{s-1}
\end{align}
\end{proof}
We are now ready to prove the following estimate involving the expectation $\displaystyle{\mathbb{E}\Big[\prod_{l=1}^s \Big(C_{j_l}^{(n)}\Big)^{\underline{r_l}} \Big]}$.
\begin{lemma} \label{Znlimitlemma}
Let $m = m_1+...+m_s$ be a partition of $m$ and $r_1,...,r_s$ be integers such that $1 \le r_l \le m_l$. Then
\begin{multline} \label{Znlimit}
\lim_{n \to \infty} \frac{1}{(\log n)^{m/2}} \sum_{j_1 \neq ... \neq j_s} \mathbb{E}\Big[\prod_{l=1}^s \Big(C_{j_l}^{(n)}\Big)^{\underline{r_l}} \Big] (\{j_l\alpha \} - \{j_l\beta \})^{m_l} \\ = \lim_{n \to \infty} \frac{1}{(\log n)^{m/2}} \sum_{j_1,...,j_s} \prod_{l=1}^s \left( \frac{\theta}{j_l} \right)^{r_l} (\{j_l\alpha \} - \{j_l\beta \})^{m_l}
\end{multline}
Moreover, the limit is 0 unless $r_l = 1$ and $m_l=2$ for all $l$.
\end{lemma}
\begin{proof}
By \eqref{diaconispoisson}, we have
\begin{multline} \label{cyclesum}
\sum_{j_1 \neq ... \neq j_s} \mathbb{E}\Big[\prod_{l=1}^s \Big(C_{j_l}^{(n)}\Big)^{\underline{r_l}} \Big] (\{j_l\alpha \} - \{j_l\beta \})^{m_l} \\
= \sum_{\substack{j_1 \neq ... \neq j_s \\ \sum_{l=1}^s j_l r_l \le n}} \prod_{i=0}^{\sum j_l r_l -1} \frac{n-i}{\theta+n-i-1} \prod_{l=1}^s \left( \frac{\theta}{j_l} \right)^{r_l} (\{j_l\alpha \} - \{j_l\beta \})^{m_l}
\end{multline}
If there exist parts in the partition of size 1, let $s'$ be such that $m_l = 1$ for $l > s'$. Then by \eqref{finiteness}, we can bound \eqref{cyclesum} (up to a constant) by
\begin{gather}
\nonumber \sum_{\substack{j_1 \neq ... \neq j_{s'} \\ \sum_{l=1}^{s'} j_l r_l \le n}} \prod_{i=0}^{\sum j_l r_l -1} \frac{n-i}{\theta+n-i-1} \prod_{l=1}^{s'} \left( \frac{\theta}{j_l} \right)^{r_l} (\{j_l\alpha \} - \{j_l\beta \})^{m_l} \\
\ll \sum_{\substack{1 \le j_l \le \delta n \\ 1 \le l \le s'}} \frac{1}{j_1...j_{s'}} \ll (\log n)^{(m-1)/2}
\end{gather}
where we have used Lemma \ref{deltabound}. Thus, both sides of \eqref{Znlimit} are clearly 0 when there is a part of size 1, and we can assume all parts are size at least 2. Then $s \le m/2$. By Lemma \ref{deltabound},
\begin{align}
&\nonumber \lim_{n \to \infty} \frac{1}{(\log n)^{m/2}} \sum_{\substack{j_1 \neq ... \neq j_s \\ \sum_{l=1}^s j_l r_l \le n}} \prod_{i=0}^{\sum j_l r_l -1} \frac{n-i}{\theta+n-i-1} \prod_{l=1}^s \left( \frac{\theta}{j_l} \right)^{r_l} (\{j_l\alpha \} - \{j_l\beta \})^{m_l} \\
=&\nonumber \lim_{n \to \infty} \frac{1}{(\log n)^{m/2}} \sum_{\substack{1 \le j_l \le \delta n \\ 1 \le l \le s}} \prod_{i=0}^{\sum j_l -1} \frac{n-i}{\theta+n-i-1} \prod_{l=1}^s \left( \frac{\theta}{j_l} \right)^{r_l} (\{j_l\alpha \} - \{j_l\beta \})^{m_l}
\end{align}
This limit is clearly 0 unless $r_l = 1$ and $m_l=2$ for all $l$ in which case \eqref{Znlimit} follows by taking $\delta \to 0$.
\end{proof}
We can finally obtain a simplified expression for the sum in \eqref{multinomial}.
\begin{lemma} \label{Znstirlinglemma}
Let $m = m_1+...+m_s$ be a partition of $m$. Then \begin{multline}\label{Znstirling}
\lim_{n \to \infty} \frac{1}{(\log n)^{m/2}} \sum_{j_1 \neq ... \neq j_s} \mathbb{E}\Big[\prod_{l=1}^s \Big(C_{j_l}^{(n)}\Big)^{m_l} \Big] (\{j_l\alpha \} - \{j_l\beta \})^{m_l} \\ = \lim_{n \to \infty} \frac{1}{(\log n)^{m/2}} \sum_{j_1,...,j_s} \prod_{l=1}^s \frac{\theta}{j_l} (\{j_l\alpha \} - \{j_l\beta \})^{m_l}
\end{multline}
where the limit is 0 unless $m_l=2$ for all $l$.
\end{lemma}
\begin{proof}
We rewrite $\displaystyle{\prod_{l=1}^s \Big(C_{j_l}^{(n)}\Big)^{m_l}}$ by making use of the following identity relating ordinary powers to falling factorial powers:
\begin{equation}\label{stirling} x^n = \sum_{r=0}^n \stirling{n}{r} x^{\underline{r}} \end{equation}
where the curly braces denote Stirling numbers of the second kind, i.e. the number of ways to partition $[n]$ into $r$ non-empty subsets.
Then we have \begin{equation}\prod_{l=1}^s \Big(C_{j_l}^{(n)}\Big)^{m_l} = \sum_{1 \le r_l \le m_l} A_{(r_1,...,r_s)} \prod_{l=1}^s \Big( C_{j_l}^{(n)} \Big)^{\underline{r_l}}
\end{equation}
for some constants $ A_{(r_1,...,r_s)}$ where note that $A_{(1,...,1)} = 1$. The result follows from Lemma \ref{Znlimitlemma}.
\end{proof}
By logarithmic summability \cite[p. 1569]{wieand}, \begin{equation}\label{logarithmic} \lim_{n \to \infty} \frac{1}{\log n} \sum_{j=1}^n \frac{1}{j} (\{j\alpha \} - \{j\beta \})^{m} = \lim_{n \to \infty} \frac{1}{n} \sum_{j=1}^n (\{j\alpha \} - \{j\beta \})^{m}\end{equation} We will need this limit for general $m$ in section \ref{k>1}.
\begin{lemma}\label{calculation} If $m$ is even,
\[\lim_{n \to \infty} \frac{1}{n} \sum_{j=1}^n (\{j\alpha \} - \{j\beta \})^{m} = \frac{2}{(m+1)(m+2)} \] If $m$ is odd, the limit is 0.
\end{lemma}
\begin{proof}
The sequence $(j\alpha , j\beta )$ is uniformly distributed mod 1. (See Definition \ref{equidistributed} and Theorem \ref{weylcriterion}). Thus, by exercise 1.6.3 in \cite[p. 52]{kuipers}, for Riemann integrable functions $f(x,y)$, \[\lim_{n \to \infty} \frac{1}{n} \sum_{j=1}^n f(\{j\alpha \}, \{j\beta \}) = \int_0^1 \int_0^1 f(x,y)dx dy.\] Therefore, \[\lim_{n \to \infty} \sum_{j=1}^n \frac{(\{j\alpha \} - \{j\beta \})^m}{n} = \mathbb{E} [(U_1 - U_2)^m] = \mathbb{E} \bigg[\sum_{i=0}^m \binom{m}{ i} U_1^i (-U_2)^{m-i}\bigg] \] where $U_1$ and $U_2$ are independent variables uniform on $[0,1]$. If $m$ is odd, this expectation is 0 by symmetry. If $m$ is even, the expectation equals
\begin{equation*}
\begin{aligned}
\sum_{i=0}^m \binom{m}{ i} \frac{1}{(i+1)(m-i+1)} (-1)^{m-i} &= \sum_{i=0}^m \frac{1}{(m+1)(m+2)} \binom{m+2}{ i+1} (-1)^i \\ &= \frac{2}{(m+1)(m+2)}
\end{aligned}
\end{equation*}
\end{proof}
Now combining \eqref{multinomial}, Lemma \ref{Znstirlinglemma}, and Lemma \ref{calculation}, we see that the odd moments of $Z_n$ converge to 0 and that even moments converge to \[\lim_{n \to \infty} \frac{m!}{2^{m/2}} \frac{1}{(m/2)!} \frac{1}{(\log n)^{m/2}} \bigg(\sum_{j=1}^n \frac{\theta}{j} (\{j\alpha \} - \{j\beta \})^2 \bigg)^{m/2} = \frac{m!}{2^{m/2}} \frac{1}{(m/2)!} \left(\frac{\theta}{6}\right)^{m/2} \]
This proves Proposition \ref{momentsk=1}.
\begin{remark}
If $\alpha$ and $\beta$ are not irrationals linearly independent over $\mathbb{Q}$, Wieand \cite{wieand} has also calculated the limit of the quadratic sums ($m=2$ in Lemma \ref{calculation}) for various cases. A modification of this moment method then gives that the limiting distribution is a normal distribution with variance given by the limit of the quadratic sum.
\end{remark}
\begin{remark} \label{Znstar}
Let $W_j$ be independent Poisson variables with parameter $\theta/j$. For each $n$, define \begin{equation} Z_n^* := \frac{1}{\sqrt{\log n}} \sum_{j=1}^n W_j (\{j\alpha \} - \{j\beta \})
\end{equation}
It is easy to see that Lemma \ref{Znlimitlemma}, and therefore Lemma \ref{Znstirlinglemma} hold if we replace the random variables $C_{j}^{(n)}$ with $W_j$ and therefore all moments of $Z_n$ and $Z_n^*$ have the same limit as $n \to \infty$. The method of moments thus gives an alternative to the Feller coupling method of seeing that $Z_n$ and $Z_n^*$ converge to the same limit (since the normal distribution is characterized by its moments).
\end{remark}
\section{Moment method for $k > 1$} \label{k>1}
In this section, we prove Theorem \ref{momentsthm}. With the same notation as in the previous section, we have
\begin{multline} \label{Ymultinomial}
\big(Y_{n,k}\big)^m = \frac{1}{n^{m(k-1)}} \sum_{(m_i) \vdash m} \binom{m}{ m_1,...,m_s} \frac{1}{\prod_{l = 1}^m c_l!} \\
\sum_{j_1 \neq ... \neq j_s} \prod_{l=1}^s j_l^{m_l(k-1)} \big(C_{j_l}^{(n)}\big)^{m_l} (\{j_l\alpha \} - \{j_l\beta \})^{m_l}
\end{multline}
First, we prove the following Riemann sum approximation:
\begin{lemma} \label{Yndeltaboundlemma}
Let $m = m_1+...+m_s$ be a partition of $m$ and $r = r_1+...+r_s$ where $1 \le r_l \le m_l$. As usual, all indices $1 \le j_1,...,j_s \le n$. Then
\begin{align} \label{Yndeltabound}
& \nonumber \lim_{n \to \infty} \frac{1}{n^{m(k-1) +s-r}} \sum_{1 \le \sum j_l r_l \le n} \prod_{l=1}^s j_l^{m_l(k-1) - r_l} \prod_{i=0}^{\sum j_l r_l -1} \frac{n-i}{\theta+n-i-1} \\
= & \int \limits_{0 \le \sum x_l r_l \le 1} \prod_{l=1}^s x_l^{m_l(k-1) - r_l} \left( \frac{1}{1-\sum x_l r_l} \right)^{1-\theta} dx_1...dx_s
\end{align}
\end{lemma}
\begin{proof}
By \eqref{thetabound},
\begin{align}
&\nonumber \frac{1}{n^{m(k-1) +s-r}} \sum_{1 \le \sum j_l r_l \le n} \prod_{l=1}^s j_l^{m_l(k-1) - r_l} \prod_{i=0}^{\sum j_l r_l -1} \frac{n-i}{\theta+n-i-1} \\
\ll &\nonumber \frac{1}{n^{m(k-1) +s-r}} \sum_{1 \le \sum j_l r_l \le n} \prod_{l=1}^s j_l^{m_l(k-1) - r_l} \left( \frac{\theta+n-1}{\theta+n-\sum j_l r_l} \right)^{1-\theta} \\
\sim &\nonumber \frac{1}{n^s} \sum_{1 \le \sum j_l r_l \le n} \prod_{l=1}^s \Big(\frac{j_l}{n}\Big)^{m_l(k-1) - r_l} \left( \frac{1}{1-\sum j_l r_l/n} \right)^{1-\theta} \\
\sim & \int \limits_{0 \le \sum x_l r_l \le 1} \prod_{l=1}^s x_l^{m_l(k-1) - r_l} \left( \frac{1}{1-\sum x_l r_l} \right)^{1-\theta} dx_1...dx_s
\end{align}
One can check that this integral is finite for $\theta > 0$. By continuity of the integral, and \eqref{thetasim}, we can then replace the first $\ll$ with $\sim$ and the result follows.
\end{proof}
The following lemma computes the inner sum in \eqref{Ymultinomial}:
\begin{lemma} \label{Ynlimitlemma}
Let $m = m_1+...+m_s$ be a partition of $m$. Then
\begin{multline} \label{Ynlimit}
\lim_{n \to \infty} \frac{1}{n^{m(k-1)}} \sum_{j_1 \neq ... \neq j_s} \prod_{l=1}^s j_l^{m_l(k-1)} \mathbb{E}\Big[\prod_{l=1}^s C_{j_l}^{(n)} \Big] (\{j_l\alpha \} - \{j_l\beta \})^{m_l} \\ = \lim_{n \to \infty} \frac{1}{n^{m(k-1)}} \sum_{j_1+...+j_s\le n} \left(\frac{n}{n-(j_1+...+j_s)} \right)^{1-\theta} \prod_{l=1}^s \big( \theta j_l^{m_l(k-1) - 1} \big) (\{j_l\alpha \} - \{j_l\beta \})^{m_l}
\end{multline}
\end{lemma}
\begin{proof}
Let $r_1,...,r_s$ be integers such that $1 \le r_l \le m_l$. Then by \eqref{diaconispoisson} and Lemma \ref{Yndeltaboundlemma},
\begin{multline*}
\lim_{n \to \infty} \frac{1}{n^{m(k-1)}} \sum_{j_1 \neq ... \neq j_s} \prod_{l=1}^s j_l^{m_l(k-1)} \mathbb{E}\Big[\prod_{l=1}^s \Big(C_{j_l}^{(n)}\Big)^{\underline{r_l}} \Big] (\{j_l\alpha \} - \{j_l\beta \})^{m_l} \\ = \lim_{n \to \infty} \frac{1}{n^{m(k-1)}} \sum_{j_1+...+j_s\le n} \left(\frac{n}{n-(j_1+...+j_s)} \right)^{1-\theta} \prod_{l=1}^s \big( \theta j_l^{m_l(k-1) - 1} \big) (\{j_l\alpha \} - \{j_l\beta \})^{m_l}
\end{multline*}
if $r_l=1$ for all $l$ and otherwise the limit is 0. Equation \eqref{Ynlimit} then follows from formula \eqref{stirling}.
\end{proof}
Wieand \cite{wieand} uses Exercise 65 from Szeg\H o and P\' olya's \textit{Problems and Theorems in Analysis I} \cite{polya} to prove \eqref{logarithmic}. To compute the limit in \eqref{Ynlimit}, we need a slightly modified version of this exercise.
\begin{lemma}\label{szego}
Define the following data:
Let $f(n)$ and $g_\delta(n)$ be increasing functions for each $\delta > 0$. Let $s_{n i}$, $1 \le i \le f(n)$ be a bounded array such that for each $\delta > 0$, there exists some limiting value $L$ such that \[\lim_{n \to \infty} \max_{i > g_\delta(n)} |s_{n i} - L| = 0 \]
Also, let $p_{n i}$, $1 \le i \le f(n)$ be an array such that $\displaystyle{\sum_{i=1}^{f(n)} p_{n i} = 1}$, $\displaystyle{\sum_{i=1}^{f(n)} |p_{n i}|}$ is bounded, and such that $\displaystyle{\lim_{\delta \to 0} \limsup_{n \to \infty}\sum_{i=1}^{g_\delta(n)} |p_{n i}| = 0}$. Finally, let $\displaystyle{t_n = \sum_{i=1}^{f(n)} p_{n i} s_{n i}}$. Then $\displaystyle{\lim_{n \to \infty} t_n = L}$.
\end{lemma}
\begin{proof}
The proof is a simple modification of the argument in Szeg\H o and P\' olya's exercise. \end{proof}
We now apply this lemma to the sum in \eqref{Ynlimit}. First, consider the simplest case $s=1$ and $\theta=1$.
\begin{lemma}\label{s=1} If $m$ is even (and positive), \begin{equation} \lim_{n \to \infty} \frac{1}{n^{m(k-1)}} \sum_{j=1}^n j^{m(k-1)-1} (\{j\alpha \} - \{j\beta \})^m = \frac{2}{(k-1)m(m+1)(m+2)}\end{equation} If $m$ is odd, the limit is 0.
\end{lemma}
\begin{proof}
Set \[s_{n i} = s_i := \frac{1}{i} \sum_{j=1}^i w_j \] and \[t_n = \frac{1}{\sum_{j=1}^n b_j} \sum_{j=1}^n b_j w_j \] where $b_j = j^{m(k-1)-1}$ and $w_j = (\{j\alpha \} - \{j\beta \})^m$.
A simple calculation (essentially Abel summation) shows that $t_n = \sum \limits_{i=1}^n p_{n i} s_i$ where $\displaystyle{p_{n i} = \frac{i(b_i - b_{i+1})}{\sum_{j=1}^n b_j}}$ when $i < n$ and $\displaystyle{p_{nn} = \frac{n b_n}{\sum_{j=1}^n b_j}}$. Note that all the terms $p_{n i}$ are negative except $p_{n n}$. Setting $f(n) = n$ and $g_\delta(n) = \delta n$, we see that all the conditions for the array $p_{ni}$ in Lemma \ref{szego} are satisfied. Then \[\lim_{n \to \infty} \frac{m(k-1)}{n^{m(k-1)}} \sum_{j=1}^n j^{m(k-1)-1} (\{j\alpha \} - \{j\beta \})^m = \lim_{n \to \infty} t_n = \lim_{n \to \infty} \frac{1}{n} \sum_{j=1}^n (\{j\alpha \} - \{j\beta \})^m \]
The result follows by Lemma \ref{calculation}.
\end{proof}
More generally, we need to deal with a multi-dimensional sequence.
\begin{lemma}\label{generalmainlemma}
Define a partition $\displaystyle{m = m_1+...+m_s}$ such that $\displaystyle{m_1 \ge... \ge m_s \ge 1}$. If all $m_l$ are even,
\begin{multline}
\lim_{n \to \infty} \frac{1}{n^{m(k-1)}} \sum_{j_1+...+j_s\le n} \left(\frac{n-(j_1+...+j_s)}{n} \right)^{\theta-1} \prod_{l=1}^s \big( \theta j_l^{m_l(k-1) - 1} \big) (\{j_l\alpha \} - \{j_l\beta \})^{m_l} \\ = \frac{\Gamma(\theta)}{\Gamma(m(k-1)+\theta)} \prod_{l=1}^s \frac{2 \theta \Gamma(m_l(k-1))}{(m_l+1)(m_l+2)}
\end{multline}
where $\Gamma(z)$ is the Gamma function. Otherwise, the limit is 0.
\end{lemma}
\begin{proof}
Define the set of integer lattice points \begin{equation} \mathfrak{X}_n = \{ (j_1,...,j_s) \in [n]^s: \sum_{l=1}^s j_l \le n \} \end{equation} and set $\displaystyle{f(n) = |\mathfrak{X}_n|}$. Let $b_{n, i}$ denote the elements of the multiset \[\bigg\{ \left(\frac{n-(j_1+...+j_s)}{n} \right)^{\theta-1} \prod_{l=1}^s \big( \theta j_l^{m_l(k-1) - 1} \big) : (j_1,...,j_s) \in \mathfrak{X}_n \bigg\} \] listed in increasing order. This induces a (not necessarily unique) ordering on the underlying set $\mathfrak{X}_n$. Let $\mathfrak{X}_n^i$ be the set consisting of the first $i$ elements of $\mathfrak{X}_n$ under this ordering.
Following the proof of Lemma \ref{s=1}, we define the arrays as follows.
Set \begin{equation*}\label{sn} s_{n i} = \frac{1}{i}\sum_{(j_1,...,j_s) \in \mathfrak{X}_n^i} \prod_{l=1}^s (\{j_l\alpha \} - \{j_l\beta \})^{m_l}
\end{equation*} and \begin{equation*} t_n = \frac{1}{\sum_{i=1}^{f(n)} b_{n,i}} \sum_{(j_1,...,j_s) \in \mathfrak{X}_n} \left(\frac{n-(j_1+...+j_s)}{n} \right)^{\theta-1} \prod_{l=1}^s \big( \theta j_l^{m_l(k-1) - 1} \big) (\{j_l\alpha \} - \{j_l\beta \})^{m_l} \end{equation*}
Then $\displaystyle{t_n = \sum_{i=1}^{f(n)} p_{ni} s_{ni}}$ where $\displaystyle{p_{n i} = \frac{i(b_{n, i} - b_{n, i+1})}{\sum_{j=1}^{f(n)} b_{n, j} }}$ for $i < f(n)$ and $\displaystyle{p_{n, f(n)} = \frac{f(n) b_{n, f(n)}}{\sum_{j=1}^{f(n)} b_{n, j} }}$. Clearly $\displaystyle{\sum_{i=1}^{f(n)} p_{ni} = 1}$ and since $b_{n,i}$ is a nondecreasing sequence, $\displaystyle{\sum_{i=1}^{f(n)} |p_{n i}|}$ will be bounded.
By Riemann integral comparison,
\begin{align}
\lim_{n \to \infty}\frac{1}{n^{m(k-1)}} \sum_{j=1}^{f(n)} b_{n, j} &\nonumber= \lim_{n \to \infty} \sum_{\sum_{l=1}^s j_l \le n} \frac{\theta^s}{n^s} \big(1 - (j_1+...+j_s)/n\big)^{\theta-1} \prod_{l=1}^s \left(\frac{j_l}{n}\right)^{m_l(k-1)-1} \\ &\nonumber= \int \limits_{\sum_{l=1}^s x_l \le 1} \theta^s (1- x_1-...-x_s)^{\theta-1} \prod_{l=1}^s x_l^{m_l(k-1)-1} dx_l \\ &= \label{mathematica} \theta^s \frac{\Gamma(\theta)}{\Gamma(m(k-1)+\theta)} \prod_{l=1}^s \Gamma(m_l(k-1))
\end{align}
where the last equality represents the normalizing constant for the Dirichlet distribution with parameters $m_1(k-1),...,m_s(k-1), \theta > 0$.
For each $\delta > 0$, define the set \begin{equation} \label{Yg} \mathfrak{Y}_{\delta, n} = \Big\{ (j_1,...,j_s) \in \mathfrak{X}_n: \left(\frac{n-(j_1+...+j_s)}{n} \right)^{\theta-1} \prod_{l=1}^s \big( \theta j_l^{m_l(k-1) - 1} \big) \le \delta n^{m(k-1)-s} \Big\}
\end{equation} and let $g_\delta(n) = |\mathfrak{Y}_{\delta, n}|$.
Note that by scaling $\mathfrak{Y}_{\delta, n}$ by a factor of $n$, the asymptotics of $g_\delta(n)$ can also be computed via comparison to an integral (volume approximation).
\begin{equation} \label{gasymp} \lim_{n \to \infty} \frac{g_\delta(n)}{n^s} = \int \limits_{\substack{ \sum x_l \le 1 \cr (1-\sum x_l)^{\theta-1} \prod \theta x_l^{m_l(k-1)-1} \le \delta } } dx_1...dx_s
\end{equation}
Then
\begin{equation*} \begin{aligned}\lim_{\delta \to 0} \limsup_{n \to \infty} \frac{1}{n^{m(k-1)}} \sum_{i=1}^{g_\delta(n)} |i(b_{n, i} - b_{n, i+1})| &\le \lim_{ \delta \to 0} \limsup_{n \to \infty} \frac{2}{n^{m(k-1)}} g_\delta(n) \delta n^{m(k-1)-s} \\ &= \lim_{\delta \to 0} 2 \delta \int \limits_{\substack{ \sum x_l \le 1 \cr (1-\sum x_l)^{\theta-1} \prod \theta x_l^{m_l(k-1)-1} \le \delta } } dx_1...dx_s = 0
\end{aligned}
\end{equation*}
It remains to show that for each $\delta > 0$, there exists some limiting value $L$ such that \[\lim_{n \to \infty} \max_{i > g_\delta(n)} |s_{n i} - L| = 0\] Define the mapping $\phi:[n]^s \to \mathbb{T}^{2s}$ given by \begin{equation} \phi(j_1,...,j_s) = (j_1 \alpha, j_1 \beta ,...,j_s \alpha , j_s \beta )
\end{equation}
Lemma \ref{tupledisc} below shows that $\displaystyle{ \lim_{n \to \infty} \max_{i > g_\delta(n)} |D(\phi(\mathfrak{X}_n^i))| = 0 }.$ This means that the condition on the array $s_{ni}$ is satisfied with $\displaystyle{ L := \int_0^1...\int_0^1 \prod_{l=1}^s (x_l - y_l)^{m_l}dx_ldy_l }$. By Lemma \ref{calculation}, $L = \newline \displaystyle{\prod_{l=1}^s \frac{2}{(m_l+1)(m_l+2)}}$ if all the exponents $m_l$ are even and $L = 0$ otherwise. The result follows by combining this with \eqref{mathematica}.
\end{proof}
To finish the proof of Lemma \ref{generalmainlemma}, we show:
\begin{lemma}\label{tupledisc}
Following the notation from the proof of Lemma \ref{generalmainlemma}, for each $\delta > 0$, \begin{equation}\label{lemmadisc} \lim_{n \to \infty} \max_{i > g_\delta(n)} |D(\phi(\mathfrak{X}_n^i))| = 0 \end{equation}
\end{lemma}
\begin{proof}
For any $A > 0$, the lattice $(A \mathbb{Z})^s$ defines a partition of $\mathbb{R}_+^s$ into $s$-dimensional cubes $C_{i,A}$ of side length $A$. Define the cubes so that the boundaries do not overlap and order them according to distance from the origin to the center of the cube. For each $\varepsilon > 0$ (and $A > 1$), this then induces a partition $\displaystyle{ \mathfrak{Y}_{\varepsilon, n} = \bigcup_{i} (C_{i,A} \cap \mathfrak{Y}_{\varepsilon, n} ) }$ where $\mathfrak{Y}_{\varepsilon, n}$ is defined in \eqref{Yg}. Given $\delta > 0$, we need to show that $D(\phi(\mathfrak{Y}_{\varepsilon, n})) \to 0$ as $n \to \infty$ uniformly over $\varepsilon \ge \delta$.
Since $(j \alpha, j \beta)$ is well-distributed, the discrepancies over the cubes tend to 0 uniformly as the side length approaches infinity, i.e. $\displaystyle{ \lim_{A \to \infty} \sup_i D(\phi(C_{i,A} \cap \mathbb{Z}^s)) = 0 }$. It is not hard to see (e.g. \cite[Thm 2.6]{kuipers}) that \begin{equation}\label{di} D(\phi(\mathfrak{Y}_{\varepsilon, n})) \le \sum_i \frac{|C_{i,A} \cap \mathfrak{Y}_{\varepsilon, n}|}{ |\mathfrak{Y}_{\varepsilon, n}| } D(\phi(C_{i,A} \cap \mathfrak{Y}_{\varepsilon, n}))
\end{equation}
Define the set of indices $S_{A, \varepsilon, n}$ such that $(C_{i,A} \cap \mathbb{Z}^s) \neq (C_{i,A} \cap \mathfrak{Y}_{\varepsilon, n})$ for $i \in S_{A, \varepsilon, n}$. In words, $\{C_{i,A} : i \in S_{A, \varepsilon, n} \}$ is the set of cubes on the boundary of $\mathfrak{Y}_{\varepsilon, n}$. Then by \eqref{di}, it suffices to show that for each $A > 0$, \begin{equation}\label{boundary} \sum_{i \in S_{A, \varepsilon, n}} \frac{|C_{i,A} \cap \mathfrak{Y}_{\varepsilon, n}|}{ |\mathfrak{Y}_{\varepsilon, n}| } \to 0 \end{equation} as $n \to \infty$ uniformly over $\varepsilon \ge \delta$. Shrink $\mathbb{Z}^s$ by a factor of $n$, which induces a corresponding scaling of the subsets $\mathfrak{Y}_{\varepsilon, n}$ and $C_{i,A} \cap \mathfrak{Y}_{\varepsilon, n}$. Then as in \eqref{gasymp}, \eqref{boundary} follows by volume approximation. The discrepancy bound \eqref{lemmadisc} follows from \eqref{di} by taking $A \to \infty$.
\end{proof}
Putting this together, we see that for even $m$,
\begin{align}
\mathbb{E}[\big(Y_{\infty, k}\big)^m] &\nonumber= \lim_{n \to \infty} \mathbb{E}[\big(Y_{n,k} \big)^m] \\ &\nonumber= \sum_{\substack{(m_i) \vdash m \\ m_i \text{ even}}} \binom{m}{ m_1,...,m_s} \frac{1}{\prod_{l = 1}^m c_l!} \frac{\Gamma(\theta)}{\Gamma(m(k-1)+\theta)} \prod_{l=1}^s \frac{2 \theta \Gamma(m_l(k-1))}{(m_l+1)(m_l+2)} \\
&= \label{momenttts} \frac{\Gamma(\theta)}{\Gamma(m(k-1)+\theta)} \sum_{\substack{(m_i) \vdash m \\ m_i \text{ even}}} \frac{m!}{\prod_{l = 1}^m c_l!}\prod_{l=1}^s \frac{2 \theta \Gamma(m_l(k-1))}{(m_l+2)!}
\end{align}
and $\displaystyle{\mathbb{E}[\big(Y_{\infty, k}\big)^m] = 0}$ for odd $m$.
\begin{definition}
The partial Bell polynomials are given by \[B_{n,k}(x_1,...,x_{n-k+1}) = \sum \frac{n!}{j_1!...j_{n-k+1}!} \left(\frac{x_1}{1!} \right)^{j_1}...\left(\frac{x_{n-k+1}}{(n-k+1)!} \right)^{j_{n-k+1}}\]
where the sum is taken over all sequences $j_1,...,j_{n-k+1}$ of non-negative integers such that $j_1+...+j_{n-k+1} = k$ and $j_1+2j_2+...+(n-k+1) j_{n-k+1} = n$. Then the complete Bell polynomials are defined by $\displaystyle{B_n(x_1,...,x_n) = \sum_{k=1}^n B_{n,k}(x_1,...,x_{n-k+1})}$.
\end{definition}
The Bell polynomials satisfy the exponential formula: \begin{equation} \exp \left( \sum_{n=1}^\infty \frac{a_n}{n!} x^n \right) = \sum_{n=0}^\infty \frac{B_n(a_1,...,a_n)}{n!} x^n
\end{equation}
Using this formula, one sees that as formal power series, \begin{equation*} \sum_{m = 0}^\infty \mathbb{E}[(Y_{\infty, k})^m] \frac{\Gamma(m(k-1)+\theta)}{\Gamma(\theta) m!} z^m = \exp(K(z))
\end{equation*} where $\displaystyle{K(z) = \sum_{m=1}^\infty \kappa_{2m} z^{2m}}$ and $\displaystyle{\kappa_{2m} = \frac{2\theta\Gamma(2m(k-1))}{(2m+2)!} }$. This completes the proof of Theorem \ref{momentsthm}.
\section{Eigenvalue density when $k = 2$ and $\theta=1$} \label{k=2}
We will now specialize to the case $k = 2$ and $\theta=1$. Then $\displaystyle{\kappa_{2m} = \frac{2}{2m(2m+1)(2m+2)}}$ and $K(z)$ has radius of convergence 1. Let us determine a closed form for $K(z)$ in this region. Note that $\displaystyle{(z^2 K(z))''' = \frac{2z}{1-z^2}}$. Solving this differential equation, we find that \begin{equation} K(z) = \frac{3}{2} - \frac{1}{2}\left(1 - \frac{1}{z}\right)^2 \log(1 - z) - \frac{1}{2}\left(1 + \frac{1}{z}\right)^2 \log(1 + z) \end{equation}
which is well defined for $|z| < 1$. By taking the branch cut of $\log(1-z)$ to be $[1, \infty)$ and the branch cut of $\log(1+z)$ to be $(-\infty, -1]$, we see that $K(z)$ can be extended analytically to $\mathbb{C} \setminus \{ (-\infty, -1] \cup [1, \infty) \}$. To extract the density, we use the Stieltjes transform and the associated inversion formula.
\begin{definition}
For a probability measure $\mu$, the Stieltjes transform is given by \[G_\mu(z) = \int_{\mathbb{R}} \frac{1}{z-t } \mu(dt)\] It is well-defined on $\displaystyle{\mathbb{C} \setminus \text{support}( \mu)}$. The Stieltjes inversion formula states \[ d\mu(x) = \lim_{\varepsilon \to 0^+} \frac{G_\mu(x - i \varepsilon) - G_\mu(x + i \varepsilon)}{2 i \pi}\] In particular, if this limit exists for all $x$ in the support of $\mu$, the formula gives the continuous density function $\rho$ of $\mu$.
\end{definition}
As formal power series, the Stieltjes transform $\displaystyle{G(z) := \mathbb{E}\bigg[\frac{1}{z - Y_{\infty, 2}}\bigg]}$ equals $\displaystyle{\frac{1}{z} \exp(K(1/z))}$. This must also be the asymptotic series expansion of $G(z)$ as $z \to \infty$ since it is determined uniquely. Thus, we have the equality $\displaystyle{G(z) = \frac{1}{z} \exp(K(1/z))}$ as analytic functions for $|z| > 1$. By the uniqueness of analytic continuation, this equality holds true in fact for $z \in \mathbb{C} \setminus [-1, 1]$.
Using the inversion formula on the random variable $Y_{\infty, 2}$, we get for $-1 < t < 0$,
\begin{align*}
& \lim_{\epsilon \to 0^+} \Im G(t + i \epsilon) \\
= & \frac{1}{t} \exp\left(\frac{3}{2} - \frac{1}{2} (1 - t)^2 \log\Big(1 - \frac{1}{t}\Big) \right)
\lim_{\epsilon \to 0^+} \Im \exp\left(-\frac{1}{2} (1+t)^2 \log \Big(1 + \frac{1}{t + i \epsilon}\Big)\right) \\
= & \frac{1}{t} \exp\left(\frac{3}{2} - \frac{1}{2} (1 - t)^2 \log \Big(1 - \frac{1}{t} \Big)\right) \Im \exp\left(-\frac{1}{2} (1+t)^2 \Big(\log \Big(\frac{1}{|t|} - 1 \Big) - i \pi \Big)\right) \\
= & \frac{1}{t} \exp\left(\frac{3}{2} - \frac{1}{2} (1 - t)^2 \log \Big(1 - \frac{1}{t} \Big) -\frac{1}{2} (1+t)^2 \log \Big(\frac{1}{|t|} - 1 \Big)\right)\sin \bigg(\frac{(1+t)^2}{2} \pi \bigg)
\end{align*}
Thus, by symmetry (since all odd moments of $Y_{\infty, 2}$ are zero) the density is
\begin{align}
&\nonumber p_{Y_{\infty, 2}}(t) \\
= & -\frac{1}{\pi} \lim_{\epsilon \to 0^+} \Im G(-|t| + i \epsilon) \nonumber \\
= &\frac{1}{\pi |t|} \exp\bigg(\frac{3}{2} - \frac{1}{2} (1 + |t|)^2 \log\Big(1 + \frac{1}{|t|}\Big) -\frac{1}{2} (1-|t|)^2 \log\Big(\frac{1}{|t|} - 1 \Big)\bigg) \sin \bigg(\frac{(1-|t|)^2}{2} \pi \bigg) \nonumber \\
= & \frac{e^{3/2}}{\pi |t|} \left(\frac{1}{|t|} - 1\right)^{-\frac{1}{2} (1 - |t|)^2} \left(\frac{1}{|t|} + 1\right)^{-\frac{1}{2} (1 + |t|)^2} \sin \bigg(\frac{(1-|t|)^2}{2} \pi \bigg)
\end{align}
for $-1 \le t \le 1$ which proves Corollary \ref{twocor}.
\begin{figure}
\caption{solid line = $\displaystyle{p_{Y_{\infty, 2}
\label{densityfigure}
\end{figure}
Figure \ref{densityfigure} shows the graphs of $\displaystyle{p_{Y_{\infty, 2}}(t)}$ and a normal density with mean 0 and variance $1/12$. It is striking how similar the two densities look. (However, the Gaussian density is of course not compactly supported.)
\begin{remark}
Let $\alpha_1, \alpha_2, \beta_1, \beta_2$ be irrational numbers linearly independent over $\mathbb{Q}$ and $I_1 = (\alpha_1, \beta_1)$ and $I_2 = (\alpha_2, \beta_2)$ be two intervals. Wieand \cite{wieand} showed that for the defining representation on $\mathfrak{S}_n$ with uniform measure, the normalized eigenvalue counts $Z^{I_1}_n$ and $Z^{I_2}_n$ converge in distribution to independent normal random variables. This is because $\lim_{n \to \infty} Cov(Z^{I_1}_n, Z^{I_2}_n) = 0$ and the multivariate normal distributions are determined by their covariance structure.
However, by computing cross moments, one sees that for the $k=2$ representations on $\mathfrak{S}_n$ with uniform measure, $Y^{I_1}_{n,2}$ and $Y^{I_2}_{n,2}$ do not converge to independent random variables. For example, a little calculation shows that
\begin{align}
\lim_{n \to \infty} \mathbb{E}[(Y^{I_1}_{n,2})^2] \mathbb{E}[(Y^{I_2}_{n,2})^2] &= \frac{2}{2 \cdot 3 \cdot 4} \frac{2}{2 \cdot 3 \cdot 4} \\
\lim_{n \to \infty} \mathbb{E}[(Y^{I_1}_{n,2})^2 (Y^{I_2}_{n,2})^2] &= \frac{1}{4} \frac{2}{3 \cdot 4} \frac{2}{3 \cdot 4} + \frac{1! 1!}{4!}\frac{2}{3 \cdot 4}\frac{2}{3 \cdot 4}
\end{align}
Thus, unlike the $k = 1$ case, the squares of the random variables $Y^{I_1}_{n,2}$ and $Y^{I_2}_{n,2}$ are positively correlated in the limit.
\end{remark}
\section{General linear combinations of cycle lengths} \label{generalf}
We now put our results in the context of the prior work of Ben Arous and Dang \cite{arousdang}. Let $(u_j)_{j \ge 1}$ be a sequence of real numbers and let the random variable $\displaystyle{ X_n^{(u_j)} := \sum_{j=1}^n u_j C_j^{(n)} }$ be the associated linear combination of cycle lengths. Ben Arous and Dang obtain two different limiting laws for $X_n^{(u_j)}$ depending on the conditions that the sequence $(u_j)_{j \ge 1}$ satisfies. Theorem \ref{smooth} below contains parts (1) and (2) of Theorem 2.3 in \cite{arousdang}. Theorem \ref{rough} below is part (1) of Theorem 2.4 in \cite{arousdang}. See Definition 2.1 in \cite{arousdang} for the definition of convergence in the Cesaro $(C,\theta)$ sense.
\begin{theorem} \label{smooth}
Let $\theta > 0$ and assume that $\displaystyle{\sum_{j=1}^\infty \frac{u_j^2}{j} \in (0, \infty) }$. If $0 < \theta < 1$, assume additionally that the sequence $(|u_j|)_{j \ge 1}$ converges to zero in the Cesaro $(C, \theta)$ sense. Then under the Ewens distribution with parameter $\theta$, $\displaystyle{X_n^{(u_j)} - \mathbb{E}[X_n^{(u_j)}] } $ converges weakly as $n \to \infty$ to a non-Gaussian infinitely divisible distribution defined by its Fourier transform \[\phi(t) = \exp \left(\theta \int(e^{itx} - 1 - itx) dM_f(x) \right) \] where the L\'{e}vy measure $M_f$ is given by $\displaystyle{ M_f = \sum_{j=1}^\infty \frac{1}{j} \delta_{u_j} }$.
\end{theorem}
\begin{theorem} \label{rough}
Let $\theta > 0$ and assume that $\displaystyle{\sum_{j=1}^\infty \frac{u_j^2}{j} = \infty }$ and that $\max \limits_{1 \le j \le n} |u_j| = o(\eta_n)$ where $\displaystyle{ \eta_n^2 = \theta \sum_{j=1}^n \frac{u_j^2}{j} }$. Then under the Ewens distribution with parameter $\theta$, the centered and normalized eigenvalue statistic $\displaystyle{ \frac{X_n^{(u_j)} - \mathbb{E}[X_n^{(u_j)}]}{\sqrt{\Var X_n^{(u_j)}}} }$ converges weakly as $n \to \infty$ to the standard normal $\mathcal{N}(0, 1)$.
\end{theorem}
As in \cite{wieand}, the main thrust of the proofs of these two theorems is the Feller coupling that relates cycle lengths $C_j^{(n)}$ to independent Poisson variables $W_j$ with parameter $\theta/j$. If the sequence $(u_j)_{j \ge 1}$ satisfies the hypotheses of Theorem \ref{smooth} or \ref{rough}, then $X_n^{(u_j)}$ and $\displaystyle{ X_n^{*, (u_j)} := \sum_{j=1}^n u_j W_j }$ will have the same limiting behavior. It is then easy to compute the limiting law of (the normalized version of) $\displaystyle{ X_n^{*, (u_j)}}$ and see that it is infinitely divisible and given either by Theorem \ref{smooth} or \ref{rough} depending on the asymptotics of $(u_j)$.
Note that the random variables $Y_{n,k}$ studied in this work correspond to $X_n^{(u_j)}$ where $u_j = j^{k-1} (\{j \alpha \} - \{j \beta \})$. As shown in Theorem \ref{momentsthm}, the limiting distribution $Y_{\infty, k}$ is compactly supported for $k>1$, hence not infinitely divisible. Thus, we've uncovered a new class of limiting distributions $Y_{\infty, k}$ not present in \cite{arousdang}. The random variables $Y_{n,k}$ of course must fail to satisfy the hypotheses of both Theorems \ref{smooth} and \ref{rough} and indeed $\displaystyle{\lim_{n \to \infty} \eta_n = \infty}$ and $\max \limits_{1 \le j \le n} |u_j| = \Omega(\eta_n)$ where $\eta_n$ is defined as in Theorem \ref{rough}. Let $Y_{n,k}^*$ be the normalized version of $X_n^{*,(u_j)}$, i.e.
\begin{equation} Y_{n,k}^* := \sum_{j=1}^n \frac{j^{k-1} W_j (\{j \alpha\} - \{j \beta\})}{n^{k-1} }
\end{equation}
Using the L\'{e}vy-Khintchine Representation Theorem, it is easy to state the limiting distribution of $Y_{n,k}^*$. Recall that Kolmogorov's theorem \cite[p. 162]{durrett}, a special case of the L\'{e}vy-Khintchine Theorem, states that a random variable $Z$ has an infinitely divisible distribution with mean 0 and finite variance if and only if its characteristic function has \[ \log \phi(t) = \int (e^{it x} - itx - 1)x^{-2} \nu(dx) \] where $\nu$ is called the canonical measure and $\Var Z = \nu(\mathbb{R})$.
\begin{proposition} \label{Ynkstar}
The random variables $Y_{n,k}^*$ converge weakly to a random variable $Y_{\infty,k}^*$ with an infinitely divisible law given by \[\log \mathbb{E}[\exp(i t Y_{\infty,k}^*)] = \int_{-1}^1 (e^{i t x} - it x - 1)x^{-2}\nu(dx) \] where the canonical measure is supported on the interval $[-1, 1]$ and given by \[\nu(dx) = \frac{\theta}{k-1} \left(-x^2 + \frac{|x|^3}{2} + \frac{|x|}{2} \right) dx\] with $\displaystyle{\nu(\mathbb{R}) = \frac{\theta}{12(k-1)} }$.
\end{proposition}
\begin{proof}
Let $\displaystyle{ a_j = \frac{j^{k-1}(\{j \alpha\} - \{j \beta\})}{n^{k-1}} }$. We have \begin{equation} \label{fininfdiv}
\log \mathbb{E}[\exp(i t Y_{n,k}^*)] = \log \prod_{j=1}^n \mathbb{E}[\exp(i t a_j W_j)] = \sum_{j=1}^n \frac{\theta}{j} (e^{i t a_j} - 1 )
\end{equation}
Then
\begin{align}
\nonumber \lim_{n \to \infty} \log \mathbb{E}[\exp(i t Y_{n,k}^*)] &= \lim_{n \to \infty} \sum_{j=1}^n \frac{\theta}{j} \big(\exp \bigg(i t \frac{j^{k-1}(\{j \alpha\} - \{j \beta\})}{n^{k-1}} \bigg) - 1 \big) \\
& \nonumber = \lim_{n \to \infty} \sum_{m=1}^\infty \frac{(it)^m}{m!} \sum_{j=1}^n \frac{\theta}{j} \bigg(\frac{j^{k-1}}{n^{k-1}} \bigg)^m ( \{j \alpha\} - \{j \beta \})^m \\
& \label{infdiv} = \sum_{m=1}^\infty \frac{(it)^{2m}}{(2m)!} \frac{2\theta}{(k-1)2m(2m+1)(2m+2)}
\end{align}
where the last equality is by Lemma \ref{s=1}. By \eqref{fininfdiv}, we see that $Y_{n,k}^*$ has an infinitely divisible distribution for each $n$ and one can check that \eqref{infdiv} can be written as the integral
\begin{equation}
\frac{\theta}{k-1}\int_{-1}^1 (e^{i t x} - it x - 1)\left(-1 + \frac{|x|}{2} + \frac{1}{2|x|} \right) dx
\end{equation}
\end{proof}
\begin{remark}
For each $t \ge 0$, let $\mu_t$ denote the infinitely divisible law with canonical measure in Kolmogorov's theorem supported on the interval $[-1, 1]$ and given by \[\nu(dx) = t \left(-x^2 + \frac{|x|^3}{2} + \frac{|x|}{2} \right) dx .\] Then note that the law forms a semigroup, i.e. $\mu_s \ast \mu_t = \mu_{s+t}.$ $\mu_0 = \delta_0$. Thus, $\mu_t$ is a L\'{e}vy process. The law of $Y_{\infty, k}^*$ is $\mu_{\theta/(k-1)}$. This shows how they all fit into the same L\'{e}vy process.
\end{remark}
Proposition \ref{Ynkstar} shows that $Y_{n,k}^*$ converges to an infinitely divisible law and hence $Y_{n,k}$ and $Y_{n,k}^*$ do not have the same limiting distribution. Thus, the Feller coupling between $C_j^{(n)}$ and $W_j$ breaks down here. One way to see how the difference arises is again from the moment method.
For the Poisson variable sum, instead of \eqref{Ynlimit}, we have
\begin{multline} \label{Ynstarlimit}
\lim_{n \to \infty} \frac{1}{n^{m(k-1)}} \sum_{j_1 \neq ... \neq j_s} \prod_{l=1}^s j_l^{m_l(k-1)} \mathbb{E}\Big[\prod_{l=1}^s W_{j_l} \Big] (\{j_l\alpha \} - \{j_l\beta \})^{m_l} \\ = \lim_{n \to \infty} \frac{1}{n^{m(k-1)}} \sum_{1 \le j_1,...,j_s\le n} \prod_{l=1}^s \big( \theta j_l^{m_l(k-1) - 1} \big) (\{j_l\alpha \} - \{j_l\beta \})^{m_l}
\end{multline}
Thus, the limiting moments will differ.
The authors in \cite{arousdang} were motivated by linear eigenvalue statistics, and therefore a specific choice of $(u_j)_{j \ge 1}$. For each $\sigma \in \mathfrak{S}_n$, let $E_{n,k}^{\textrm{tuple}}(\sigma)$, $E_{n,k}^{\textrm{set}}(\sigma)$, and $E_{n,k}^{\textrm{irrep}}(\sigma)$ denote the multiset of eigenangles of $\rho_{n,k}^{\textrm{tuple}}$, $\rho_{n,k}^{\textrm{set}}$, and $\rho_{n,k}^{\textrm{irrep}}$ respectively. Let $f$ be a real-valued periodic function with period 1. Define the linear eigenvalue statistic \begin{equation} X_{n, k, f}^{\textrm{tuple}}(\sigma) = \sum_{\phi \in E_{n,k}^{\textrm{tuple}}(\sigma)} f(\phi)
\end{equation} and define $X_{n, k, f}^{\textrm{set}}(\sigma)$ and $X_{n, k, f}^{\textrm{irrep}}(\sigma)$ similarly. Let \begin{equation} R_j(f) = \frac{1}{j} \bigg(\frac{1}{2} f(0) + \sum_{i=1}^{j-1} f\left(\frac{i}{j}\right) + \frac{1}{2}f(1) \bigg) - \int_0^1 f(x)dx
\end{equation}
One can interpret $R_j(f)$ as the error in approximating the integral using the trapezoidal rule.
Then it is easy to see (i.e. \cite[(1.8)]{arousdang}) that
\begin{equation} \label{Xnkftuple} X_{n,k, f}^{\textrm{tuple}}(\sigma) = \big| E_{n,k}^{\textrm{tuple}}(\sigma) \big| \int_0^1 f(x)dx + \sum_{j} C_{j,k}^{(n), \textrm{tuple}}(\sigma) j R_j(f)
\end{equation}
\begin{equation} \label{Xnkfset} X_{n,k, f}^{\textrm{set}}(\sigma) = \big| E_{n,k}^{\textrm{set}}(\sigma) \big| \int_0^1 f(x)dx + \sum_{j} C_{j,k}^{(n), \textrm{set}}(\sigma) j R_j(f)
\end{equation}
In particular, finding the limiting behavior of the linear statistic $\displaystyle{ X_{n, 1, f}^{\textrm{tuple}}(\sigma)}$ corresponds to investigating $X_n^{(u_j)}$ for $u_j = j R_j(f)$. This is the case studied in \cite{arousdang} for a wide class of functions $f$. For smooth functions with good trapezoidal approximations, $R_j(f)$ will decay to zero rapidly. Thus, we have a direct correspondence between smoothness of the function $f$ and decay rate of $u_j$.
For $k>1$, define
\begin{equation}
Y_{n,k, f}^{\textrm{tuple}} := \frac{X_{n,k, f}^{\textrm{tuple}} - \mathbb{E}[X_{n,k, f}^{\textrm{tuple}}]}{n^{k-1}}
\end{equation}
\begin{equation}
Y_{n,k, f}^{\textrm{set}} := k! \frac{X_{n,k, f}^{\textrm{set}} - \mathbb{E}[X_{n,k, f}^{\textrm{set}}]}{n^{k-1}}
\end{equation}
\begin{equation}
Y_{n,k, f}^{\textrm{irrep}} := k! \frac{X_{n,k, f}^{\textrm{irrep}} - \mathbb{E}[X_{n,k, f}^{\textrm{irrep}}]}{n^{k-1}}
\end{equation}
With these definitions, we can state the following generalization of Theorem \ref{simplifythm}.
\begin{theorem} \label{simplifythmgeneral}
Let $f$ be such that $R_j(f) = O(1/j)$. Let \begin{equation}
Y_{n,k,f} = \sum_{j=1}^n \frac{C_j^{(n)} j^k R_j(f)}{n^{k-1}}
\end{equation}
As $n \to \infty$ for fixed $k > 1$, each of the random variables $Y_{n,k,f}^{\textrm{tuple}}$, $Y_{n,k,f}^{\textrm{set}}$, and $Y_{n,k,f}^{\textrm{irrep}}$ converges in law to the same limiting distribution $\displaystyle{\lim_{ n\to \infty} \mathcal{L} (Y_{n,k,f})}$ (assuming it exists).
\end{theorem}
\begin{proof}
The proof of Theorem \ref{simplifythm} goes through essentially unchanged for each of the three types of representations.
We have equation \eqref{Xnkftuple} analogous to \eqref{Xnktuple}. Since $j R_j(f)$ is $O(1)$, Lemma \ref{tuplebound} applies and the proof follows unchanged for the $k$-tuple case. Similarly, we have equation \eqref{Xnkfset} analogous to \eqref{Xnkset}. Lemma \ref{subsetbound} also applies unchanged for the $k$-subset case. Finally, for the irrep case, the generalization of Lemma \ref{Qtuplestar} replacing $\displaystyle{E_n^{Q_{n,k}^\textrm{tuple*}} \cap I}$ with $\displaystyle{\sum_{\phi \in E_n^{Q_{n,k}^\textrm{tuple*}}} f(\phi) }$ clearly holds. Then the same induction argument on $k$ gives the appropriate generalization of Lemma \ref{Qtuple} and the result follows.
\end{proof}
\begin{remark}
By Lemma 5.3 in \cite{arousdang}, if $f$ is of bounded total variation, then $R_j(f) = O(1/j)$.
\end{remark}
\begin{remark}
Theorem \ref{simplifythm} corresponds to the case $f = \mathbbm{1}_{(\alpha, \beta)}$ where $\alpha$ and $\beta$ are linearly independent irrational numbers over $\mathbb{Q}$.
\end{remark}
From the perspective of Theorem \ref{simplifythmgeneral}, we see that whereas \cite{arousdang} studies the random variables $X_n^{(u_j)}$ for $u_j = j R_j(f)$ for functions $f$ of various degrees of smoothness, the present work investigates them mostly for $u_j = j^k R_j(f)$ where $f = \mathbbm{1}_{(\alpha, \beta)}$. To conclude, we observe that we can obtain limiting laws for (appropriately scaled versions of) $Y_{n,k,f}$ (and therefore $Y_{n,k,f}^{\textrm{tuple}}$, $Y_{n,k,f}^{\textrm{set}}$, and $Y_{n,k,f}^{\textrm{irrep}}$) that match those seen in Theorems \ref{smooth} and \ref{rough} by choosing $f$ so that $R_j(f)$ satisfies appropriate conditions. For instance, one can show via Euler-Maclaurin summation that if $f \in C^{2m}$, i.e. $2m$ times continuously differentiable, then $R_j(f) = O(1/j^{2m})$. Then if for even $k$ we take $f \in C^{k+2}$ and for odd $k$ we take $f \in C^{k+1}$, the hypotheses of Theorem \ref{smooth} are met. If we choose $f$ on the cusp of $k$-differentiability such that $R_j(f) = \Theta(1/j^k)$, then the hypotheses of Theorem \ref{rough} are met.
\end{document} |
\begin{document}
\title{On irrationality measure of Thue-Morse constant}
\begin{abstract}
We provide a non-trivial measure of irrationality for a class of Mahler
numbers defined with infinite products which cover the Thue-Morse constant.
Among the other things, our results imply a generalization
to~\cite{bugeaud_2011}.
\end{abstract}
\section{Introduction}
Let $\xi\in\mathbb{R}$ be an irrational number. Its irrationality exponent $\mu(\xi)$
is defined to be the supremum of all $\mu$ such that the inequality
$$
\left|\xi - \frac{p}{q}\rhoight|<q^{-\mu}
$$
has infinitely many rational solutions $p/q$. This is an important property
of a real number since it shows, how close the given real number can be
approximated by rational numbers in terms of their denominators. The
irrationality exponent can be further refined by the following notion.
Let $\psi(q):\mathbb{R}_{\ge 0} \to \mathbb{R}_{\ge 0}$ be a function which tends to zero as
$q\to \infty$. Any function $\psi$ with these properties is referred to as the
\emph{approximation function}. We say that an irrational number $\xi$ is
\emph{$\psi$-well approximable} if the inequality
\begin{equation}\label{def_well}
\left|\xi -
\frac{p}{q}\rhoight|<\psi(q)
\end{equation}
has infinitely many solutions $p/q\in\mathbb{Q}$. Conversely, we say that $\xi$ is
\emph{$\psi$-badly approximable} if~\eqref{def_well} has only finitely many
solutions. Finally, we say that $\xi$ is \emph{badly approximable} if it is
$c/q$-badly approximable for some positive costant $c>0$.
If a number $\xi\in\mathbb{R}$ is $\psi$-badly approximable, we also say that $\psi$
is a \emph{measure of irrationality of $\xi$}.
The statement $\mu(\xi) = \mu$ is equivalent to saying that for any
$\epsilon>0$, $\xi$ is both $q^{-\mu-\epsilon}$-well approximable and
$q^{-\mu+\epsilon}$-badly approximable. On the other hand, $(q^2\log
q)^{-1}$-badly approximable numbers are in general worse approached by
rationals when compared to $(q^2\log^2 q)^{-1}$-badly approximable numbers,
even though that both of them have irrationality exponent equal to 2.
\begin{remark} \label{im_same_c}
It is quite easy to verify
that, for any approximation function $\psi$, for any $\xi\in\mathbb{R}$ and any
$c\in\mathbb{Q}\setminus\{0\}$, the numbers $\xi$ and $c\xi$ simultaneously are or
are not $\psi$-badly approximable. Similarly, they simultaneously are or are
not $\psi$-well approximable.
\end{remark}
A big progress has been made recently in determining Diophantine
approximation properties of so called Mahler numbers. Their definition
slightly varies in the literature. In the present paper we define Mahler
functions and Mahler numbers as follows. An analytic function $F(z)$ is
called \emph{Mahler function} if it satisfies the functional equation
\begin{equation}\label{def_mahlf}
\sum_{i=0}^n P_i(z)F(z^{d^i}) = Q(z)
\end{equation}
where $n$ and $d$ are positive integers with $d\ge 2$, $P_i(z), Q(z) \in
\mathbb{Q}[z]$, $i=0,\dots,n$ and $P_0(z)P_n(z) \neq 0$. We will only consider those
Mahler functions $F(z)$ which lie in the space $\mathbb{Q}((z^{-1}))$ of Laurent
series. Then, for any $\alphapha\in\overline{\mathbb{Q}}$ inside the disc of convergence
of $F(z)$, a real number $F(\alphapha)$ is called a \emph{Mahler number}.
One of the classical examples of Mahler numbers is the so called Thue-Morse
constant which is defined as follows. Let $\vv
t=(t_0,t_1,\dots)=(0,1,1,0,1,0,0,\dots)$ be the Thue-Morse sequence, that is
the sequence $(t_n)_{n\in\mathbb{N}_0}$, where $\mathbb{N}_0:=\mathbb{N}\cup\{0\}$, defined by the
recurrence relations $t_0=0$ and for all $n\in\mathbb{N}_0$
$$
\begin{aligned}
t_{2n}&=t_n,\\
t_{2n+1}&=1-t_n.
\end{aligned}
$$
Then, the Thue-Morse constant $\tau_{TM}$ is a real number which binary expansion
is the Thue-Morse word. In other words,
\begin{equation} \label{defTM}
\tau_{TM}:=\sum_{k=0}^{\infty}\frac{t_k}{2^{k+1}}.
\end{equation}
It is well known that $\tau_{TM}$ is a Mahler number. Indeed, one can check that
$\tau_{TM}$ is related with the generating function
\begin{equation} \label{ftm_presentation}
f_{TM}(z):=\sum_{i=0}^{\infty}(-1)^{t_i}z^{-i}
\end{equation}
by the formula $\tau_{TM} = \frac12 (1- \frac12 f_{TM}(2))$. At the same time, the function $f_{TM}(z)$, defined
by~\eqref{ftm_presentation}, admits the following presentation~\cite[\S13.4]{AS2003}:
$$
f_{TM}(z)=\prod_{k=0}^{\infty}\left(1-z^{-2^k}\rhoight),
$$
and the following functional equation holds:
\begin{equation} \label{func_eq}
f_{TM}(z^2)=\frac{z}{z-1}f_{TM}(z).
\end{equation}
So it is indeed a Mahler function.
Approximation of Mahler numbers by algebraic numbers has been studied within
a broad research direction on transcendence and algebraic independence of
these numbers. We refer the reader to the monograph~\cite{Ni1996} for more
details on this topic.
It has to be mentioned that, though some results on approximation by
algebraic numbers can be specialized to results on rational approximations,
most often they become rather weak. This happens because the results on
approximations by algebraic numbers necessarily involve complicated
constructions, which results in some loss of precision. More fundamental
reason is that rational numbers enjoy significantly more regular (and much
better understood) distribution in the real line when compared to the
algebraic numbers.
The history of the research of approximation properties of Mahler numbers by
rational numbers probably started in the beginning of 1990th with the work of
Shallit and van der Poorten~\cite{vdP_S}, where they considered a class of
numbers that contains some Mahler numbers, including Fredholm constant
$\sum_{n=0}^{\infty}10^{-2^n}$, and they proved that all numbers from that
class are badly approximable.
The next result on the subject, the authors are aware of, is due to
Adamczewski and Cassaigne. In 2006, they
proved~\cite{adamczewski_cassaigne_2006} that every automatic number (which,
according to~\cite[Theorem 1]{becker_1994}, is a subset of Mahler numbers)
has finite irrationality exponent, or, equivalently, every automatic number
is not a Liouville number. Later, this result was extended to all
Mahler numbers~\cite{bell_bugeaud_coons_2015}.
We also mention here the result by Adamczewski and
Rivoal~\cite{adamczewski_rivoal_2009}, where they showed that some classes of
Mahler numbers are $\psi$-well approximable, for various functions $\psi$
depending on a class under consideration.
The Thue-Morse constant is one of the first Mahler numbers which irrationality
exponent was computed precisely,
it has been done by Bugeaud in 2011~\cite{bugeaud_2011}. This result served as a foundation
for several other works, establishing precise values of irrationality
exponents for wider and wider classes of Mahler numbers, see for
example~\cite{coons_2013, guo_wu_wen_2014, wu_wen_2014}.
Bugeaud, Han, Wen and Yao~\cite{bugeaud_han_wen_yao_2015} computed the
estimates of $\mu(f(b))$ for a large class of Mahler functions $f(z)$,
provided that the distribution of indices at which Hankel determinants of
$f(z)$ do not vanish or, equivalently, the continued fraction of $f(z)$ is
known. In many cases, these estimates lead to the precise value of
$\mu(f(b))$. We will consider this result in more details in the next
subsection. Later, Badziahin~\cite{badziahin_2017} provided a continued
fraction expansion for the functions of the form
$$
f(z) = \prod_{t=0}^\infty P(z^{-d^t})
$$
where $d\in\mathbb{N}, d\ge 2$ and $P(z)\in \mathbb{Q}[z]$ with $\deg P< d$. This result, complimented with~\cite{bugeaud_han_wen_yao_2015}, allows to find sharp estimates for the values of these functions at integer points.
Despite rather extensive studies on irrationality exponents of Mahler
numbers, very little is known about their sharper Diophantine approximation
properties. In 2015, Badziahin and Zorin~\cite{BZ2015} proved that the
Thue-Morse constant $\tau_{TM}$, together with many other values of $f_{TM}(b)$,
$b\in\mathbb{N}$, are not badly approximable. Moreover, they proved
\begin{theoremBZ}
Thue-Morse constant $\tau_{TM}$ is $\frac{C}{q^2\log\log q}$-well approximable, for some explicit constant $C>0$.
\end{theoremBZ}
Later, in~\cite{badziahin_zorin_2015} they extended this result to the values
$f_3(b)$, where $b$ is from a ceratin subset of positive integers, and
$$
f_3(z):= \prod_{t=0}^\infty (1 - z^{-3^t}).
$$
Khintchine's Theorem implies that outside of a set of the Lebesgue measure
zero, all real numbers are $\frac{1}{q^2\log q}$-well approximable and
$\frac{1}{q^2\log^2 q}$-badly approximable. Of course, this metric result
implies nothing for any particular real number, or countable family of real
numbers. However, it sets some expectations on the Diophantine approximaiton
properties of real numbers.
The result of Theorem~BZ does not provide the well-approximability result for
the Thue-Morse constant suggested by Khintchine's theorem, but it falls
rather short to it. At the same time, the bad-approximability side, suggested
by Khintchine theorem, seems to be hard to establish (or even to approach to
it) in the case of Thue-Morse constant and related numbers. In this paper we
prove that a subclass of Mahler numbers, containing, in particular,
Thue-Morse constant, is $(q\exp(K\sqrt{\log q\log\log q}))^{-2}$-badly
approximable for some
constant $K>0$, see Theorem~\rhoef{th_main}
at the end of Subsection~\rhoef{ss_CF_LS}.
This result is still pretty far from what is suggested by Khintchine's
theorem, however it significantly improves the best
result~\cite{bugeaud_2011} available at this moment, namely, that the
irrationality exponent of Thue-Morse constant equals 2.
\hidden{
\begin{theorem}\label{th_main}
Let $d\ge 2$ be an integer and
\begin{equation}\label{def_f}
f(z) = \prod_{t=0}^\infty P(z^{-d^t}),
\end{equation}
where $P(z)\in \mathbb{Z}[z]$ is a polynomial such that $P(1) = 1$ and $\deg P(z)<d$.
Assume that the series $f(z)$ is badly approximable (i.e. degrees of all
partial quotients of $f(z)$ are bounded from above by an absolute constant).
Then there exists a positive constant $C$ such that for any $b\in\mathbb{Z}$, $b\geq 2$, we have either $f(b) = 0$ or $f(b)$ is
$(q\exp(C\sqrt{\log q\log\log q}))^{-2}$-badly approximable.
\end{theorem}
}
\subsection{Continued fractions of Laurent series} \label{ss_CF_LS}
Consider the set $\mathbb{Q}((z^{-1}))$ of Laurent series equipped with the standard
valuation which is defined as follows: for $f(z) = \sum_{k=-d}^\infty
c_kz^{-k} \in \mathbb{Q}((z^{-1}))$, its valuation $\|f(z)\|$ is the biggest degree
$d$ of $z$ having non-zero coefficient $c_{-d}$. For example, for polynomials
$f(z)$ the valuation $\|f(z)\|$ coincides with their degree. It is well known
that in this setting the notion of continued fraction is well defined. In
other words, every $f(z)\in \mathbb{Q}((z^{-1}))$ can be written as
$$
f(z) = [a_0(z), a_1(z),a_2(z),\ldots] = a_0(z) + \mathop{\mathbf{K}}_{n=1}^\infty
\frac{1}{a_n(z)},
$$
where $a_i(z)$, $i\in\mathbb{Z}_{\ge 0}$, are non-zero polynomials with rational coefficients of degree at least
1.
The continued fractions of Laurent series share most of the properties of classical
ones~\cite{poorten_1998}. Furthermore, in this setting we have even stronger version of Legendre
theorem:
\begin{theoreml}\label{th_legendre}
Let $f(z)\in \mathbb{Q}((z^{-1}))$. Then $p(z)/q(z)\in \mathbb{Q}(z)$ in a reduced form is a
convergent of $f(z)$ if and only if
$$
\left\| f(z) - \frac{p(z)}{q(z)}\rhoight\| < -2\|q(z)\|.
$$
\end{theoreml}
Its proof can be found in~\cite{poorten_1998}. Moreover, if $p_k(z)/q_k(z)$
is the $k$th convergent of $f(z)$ in its reduced form, then
\begin{equation}\label{eq_conv}
\left\| f(z) - \frac{p_k(z)}{q_k(z)}\rhoight\| =
-\|q_k(z)\|-\|q_{k+1}(z)\|.
\end{equation}
For a Laurent series $f(z)\in \mathbb{Q}((z^{-1}))$, consider its value $f(b)$, where
$b\in \mathbb{N}$ lies within the disc of convergence of $f$. It is well known that
the continued fraction of $f(b)$ (or indeed of any real number $x$) encodes,
in a pretty straightforward way, approximational properties of this number.
At the same time, it is a much subtler question how to read such properties
of $f(b)$ from the continued fraction of $f(z)$. The problem comes from the
fact that after specialization at $z=b$, partial quotients of $f(z)$ become
rational, but often not integer numbers, or they may even vanish. Therefore
the necessary recombination of partial quotients is often needed to construct
the proper continued fraction of $f(b)$. The problem of this type has been
studied in the beautiful article~\cite{vdP_S}. Despite this complication, in
many cases some information on Diophantine approximaiton properties of $f(b)$
can be extracted. In particular, this is the case for Mahler numbers.
Bugeaud, Han, Wen and Yao~\cite{bugeaud_han_wen_yao_2015} provided the
following result that links the continued fraction of $f(z)$ and the
irrationality exponents of values $f(b), b\in \mathbb{N}$. In fact, they formulated
it in terms of Hankel determinants. The present reformulation can be found
in~\cite{badziahin_2017}:
\begin{theorembhwy}
Let $d\ge 2$ be an integer and $f(z) = \sum_{n=0}^\infty c_nz^n$ converge
inside the unit disk. Suppose that there exist integer polynomials $A(z),
B(z), C(z), D(z)$ with $B(0)D(0)\neq 0$ such that
\begin{equation}\label{mahl_eq}
f(z) = \frac{A(z)}{B(z)} + \frac{C(z)}{D(z)}f(z^d).
\end{equation}
Let $b\ge 2$ be an integer such that $B(b^{-d^n})C(b^{-d^n})D(b^{-d^n})\neq
0$ for all $n\in\mathbb{Z}_{\ge 0}$. Define
$$
\rhoho:= \limsup_{k\to\infty} \frac{\deg q_{k+1}(z)}{\deg q_k(z)},
$$
where $q_k(z)$ is the denominator of $k$th convergent to $z^{-1}f(z^{-1})$.
Then $f(1/b)$ is transcendental and
$$
\mu(f(1/b))\le (1+\rhoho)\min\{\rhoho^2,d\}.
$$
\end{theorembhwy}
The corollary of this theorem is that, as soon as
\begin{equation}\label{cond_cf}
\limsup_{k\to\infty} \frac{\deg q_{k+1}(z)}{\deg q_k(z)}=1,
\end{equation}
the irrationality exponent of $f(1/b)$ equals two. Then the natural question
arises: can we say anything better on the Diophantine approximation
properties of $f(1/b)$ in the case if the continued fraction of
$z^{-1}f(z^{-1})$ satisfies a stronger condition than~\eqref{cond_cf}? In
particular, what if the degrees of all partial quotients $a_k(z)$ are bounded
by some absolute constant or even are all linear? Here we answer this
question for a subclass of Mahler
functions.
The main result of this paper is the following.
\begin{theorem}\label{th_main}
Let $d\ge 2$ be an integer and
\begin{equation}\label{def_f}
f(z) = \prod_{t=0}^\infty P(z^{-d^t}),
\end{equation}
where $P(z)\in \mathbb{Z}[z]$ is a polynomial such that $P(1) = 1$ and $\deg P(z)<d$.
Assume that the series $f(z)$ is badly approximable (i.e. the degrees of all
partial quotients of $f(z)$ are bounded from above by an absolute constant).
Then there exists a positive constant $K$ such that for any $b\in\mathbb{Z}$,
$|b|\geq 2$, we have either $f(b) = 0$ or $f(b)$ is $q^{-2}\exp(-K\sqrt{\log
q\log\log q})$-badly approximable.
\end{theorem}
\hidden{
\begin{remark}
The proof of Theorem~\rhoef{th_main} can be simplified a bit in the case $b\geq 2$ (that is, if we do not consider cases corresponding to the negative values of $b$). At the same time, we can easily deduce the result of Theorem~\rhoef{th_main} in its whole generality from the result for positive values of $b$ only. Indeed, if $d$ is even, then
$$
f(-b)=\frac{P(-b)}{P(b)}f(b),
$$
hence the numbers $f(-b)$ and $f(b)$ are always simultaneously $\psi$-badly
approximable or $\psi$-well approximable for any function $\psi$.
\end{remark}
}
\hidden{
\begin{corollary} \label{cor_main}
Let $d\geq 2$ be an integer and let $f(z)$ be the same series as in the
statement of Theorem~\rhoef{th_main} (that is, defined by~\eqref{def_f} and
badly approximable). Then there exists a positive constant $K$ such that for
any $b\in\mathbb{Z}$, $|b|\geq 2$, we have either $f(b) = 0$, or $P(-b)=0$ or $f(b)$
is $q^{-2}\exp(-K\sqrt{\log q\log\log q})$-badly approximable.
\end{corollary}
\begin{proof}
We are going to deduce Corollary~\rhoef{cor_main} from Theorem~\rhoef{th_main}. First of all, note that in case $b\geq 2$ the result readily follows from Theorem~\rhoef{th_main}. So in this proof we need to deal only with the case $b\leq -2$, which we assume further in this proof.
In case if $d$ is even, we have either $P(-b)=0$ (and then the claim of corollary is justified in this case) or
$$
f(b)=\frac{P(b)}{P(-b)}f(-b),
$$
so the result follows from Remark~\rhoef{im_same_c}.
In case if $d$ is odd, consider the function $\widetilde{f}(z)$ defined by~\eqref{def_f} with the polynomial $P(z)$ replaced by $P(-z)$. It is easy to verify that we have
\begin{equation} \label{f-b_fb}
f(b)=\widetilde{f}(-b).
\end{equation}
At the same time, we can apply Theorem~\rhoef{th_main} to the function $\widetilde{f}(z)$ and the integer $-b\geq 2$, hence, by using~\eqref{f-b_fb}, this proves the claim of the corollary for $f(b)$.
\end{proof}
}
\hidden{
\begin{remark}
Results on irrationality measures of numbers, and more generally results of this kind covering whole classes of numbers, help in verification whether given numbers can or can not belong to certain sets. The very classical example of this nature is a proof by Liouville~\cite{Liouville1851} that, for any $b\in\mathbb{Z}$, $|b|\geq 2$, the number $L_b:=\sum_{k=0}^{\infty}b^{-k!}$ is not algebraic. To justify this, Liouville proved first that, in the modern language, the irratinality exponent of an algebraic number of degree $d$ is at most $d$, and then he verified that the irrationality exponent of the constant $L_b$ is infinite.
In the same article~\cite{Liouville1851}, Liouville stated the problem to verify whether the numbers $M_{b,2}=\sum_{k=0}^{\infty}b^{-k^2}$, where $b\in\mathbb{Z}$, $b\geq 2$ are algebraic or not. More than a hundred years later, a generalized version of this question was asked again in the ground breaking paper by Hartmanis and Stearns~\cite{HS1965}, this time they mentioned a problem to determine whether the numbers $M_{b,s}=\sum_{k=0}^{\infty}b^{-k^s}$, where $b,s\in\mathbb{Z}$, $b,s\geq 2$ are algebraic or not.
The transcendence of $M_{b,2}$ was proved only in~1996, \cite{DNNS1996}. The proof uses sophisticated technics of the modern theory of algebraic independence, and also exploits the presentation of $M_{b,2}$ as a value of a theta function. No such proof for $M_{b,3}$ or indeed for any $M_{b,s}$, $s\geq 3$ is known.
At the same time, the transcendence of $M_{b,s}$ for any $b,s\in\mathbb{Z}$, $|b|\geq 2$, $s\geq 2$ could have been deduced from the following result, conjectured by Lang:
\begin{conjecture}[Lang, 1965] \label{conjectire_Lang_1965}
For any $\alphapha\in\overline{\mathbb{Q}}$, there exists a constant $A=A(\alphapha)>0$ such that $\alphapha$ is $\frac{1}{q^2\cdot\log^{A}q}$-badly approximable.
\end{conjecture}
Indeed, it is enough to consider rational approximations to $M_{b,s}$ given by cutting their defining series at an arbitrary index,
$$
\xi_{b,s,N}:=\sum_{i=0}^{N}b^{-k^s}.
$$
With these approximations, one can easily verify that $M_{b,s}$ is $\frac{1}{q\exp\left(\log_b^{\frac{s-1}{s}} q\rhoight)}$ well-approximable.
Of course, Conjecture~\rhoef{conjectire_Lang_1965} is a far-reaching generaliztion of the famous Roth's theorem.
\end{remark}
}
\section{Preliminary information on series $f(z)$.}
In the further discussion, we consider series $f(z)$ which satisfies all the
conditions of Theorem~\rhoef{th_main}. Most of these conditions are straightforward to verify, the only non-evident point is to check whether the product function $f(z)$, defined by~\eqref{def_f}, is badly approximable. To address this, one can find a nice criteria
in \cite[Proposition 1]{badziahin_2017}: $f(z)$ is badly
approximable if and only if all its partial quotients are linear. This in
turn is equivalent to the claim that the degree of denominator of the $k$th convergent of $f(z)$
is precisely $k$, for all $k\in\mathbb{N}$.
As shown in~\cite{badziahin_2017}, it is easier to compute the continued
fraction of a slightly modified series
\begin{equation} \label{def_g}
g(z) = z^{-1}f(z).
\end{equation}
Since Diophantine approximtion properties of numbers $f(b)$ and $g(b) =
f(b)/b$ essentially coincide, for any $b\in \mathbb{N}$, we will further focus on the
work with the function $g(z)$. As we assume that $f(z)$ is a badly
approximable function, the function $g(z)$ defined by~\eqref{def_g} is also
badly approximable. In what follows, we will denote by $p_k(z)/q_k(z)$ the
$k$th convergent of $g(z)$, and then, by~\cite[Proposition 1]{badziahin_2017},
we infer that $\deg q_k(z)=k$.
Write down the polynomial $P(z)$ in the form
$$
P(z) = 1 + u_1z+\ldots +u_{d-1}z^{d-1}.
$$
Then $P(z)$ is defined by the vector $\mathbf{u} = (u_1,\ldots,u_{d-1}) \in
\mathbb{Z}^{d-1}$ and, via~\eqref{def_f} and~\eqref{def_g}, so is $g(z)$. To
emphasize this fact, we will often write $g(z)$ as $g_\mathbf{u}(z)$.
\subsection{Coefficients of the series, convergents and Hankel determinants}
We write the Laurent series $g_\mathbf{u}(z)\in \mathbb{Z}[[z^{-1}]]$ in the following form
\begin{equation} \label{def_g_u}
g_\mathbf{u}(z) = \sum_{n=1}^\infty c_n z^{-n}.
\end{equation}
We denote by $\mathbf{c}_n$ the vector $(c_1,c_2,\ldots, c_n)$.
Naturally, the definition of $g_{\mathbf{u}}(z)$ via the infinite product
(see~\eqref{def_f} and~\eqref{def_g}) imposes the upper bound on $|c_n|$,
$n\in\mathbb{N}$.
\begin{lemma} \label{lemma_ub_c_i}
The term $c_n$ satisfies
\begin{equation}\label{ineq_c_n}
|c_n| \leq \|\mathbf{u}\|_\infty ^{\lceil \log_d n\rhoceil} \le \|\mathbf{u}\|_\infty ^{\log_d n + 1}.
\end{equation}
Consequently,
\begin{equation}\label{ineq_c}
\|\mathbf{c}_n\|_\infty \leq \|\mathbf{u}\|_\infty ^{\log_d n + 1}
\end{equation}
\end{lemma}
\begin{proof}
Look at two different formulae for $g_\mathbf{u}(z)$:
$$
g_\mathbf{u}(z) = z^{-1}\prod_{t=0}^\infty (1+u_1z^{-d^t}+\ldots+u_{d-1}z^{-(d-1)d^t}) = \sum_{n=1}^\infty c_n z^{-n}.
$$
By comparing the right and the left hand sides one can notice that $c_n$ can be
computed as follows:
\begin{equation} \label{link_c_u}
c_n = \prod_{j=0}^{l(n)} u_{d_{n,j}}
\end{equation}
where $d_{n,0}d_{n,1}\cdots d_{n,l(n)}$ is the $d$-ary expansion of the number
$n-1$. Here we formally define $u_0=1$. Equation~\eqref{link_c_u} readily implies that $|c_n|\leq
\|\mathbf{u}\|^{l(n)}$. Finally, $l(n)$ is estimated by $l(n) \leq \lceil
\log_d(n-1)\rhoceil \le \lceil \log_d n\rhoceil$. The last two inequalities
clearly imply~\eqref{ineq_c_n}, hence~\eqref{ineq_c}.
\end{proof}
Let $p_k(z)/q_k(z)$ be a convergent of $g_\mathbf{u}(z)$ in its reduced form. Recall that throughout the text we assume that $f(z)$ is badly approximable, hence $g_{\mathbf{u}}(z)$ defined by~\eqref{def_g} is badly approximable, and because of this (and employing~\cite[Proposition 1]{badziahin_2017}) we have
\begin{equation} \label{deg_qk_k}
\deg q_k=k.
\end{equation}
Denote
\begin{equation} \label{coefficients_q_k_p_k}
\begin{aligned}
q_k(z) &= a_{k,0}+a_{k,1}z+\ldots + a_{k,k}z^k,\qquad \mathbf{a}_k:=(a_{k,0},\ldots,a_{k,k})\\
p_k(z) &= b_{k,0} +
b_{k,1}z+\ldots+b_{k,k-1}z^{k-1},\quad \mathbf{b}_k:=(b_{k,0},\ldots,b_{k,k-1}).
\end{aligned}
\end{equation}
Because of~\eqref{deg_qk_k}, we have
\begin{equation} \label{a_k_b_k_ne_0}
a_{k,k}\ne 0.
\end{equation}
The Hankel matrix is defined as follows:
$$
H_k=H_k(g_{\mathbf{u}})=
\begin{pmatrix}
c_{1} & c_{2} & \dots & c_{k}\\
c_{2} & c_{3} & \dots & c_{k+1}\\
\vdots & \vdots & {\rhom d}ots & \vdots\\
c_{k} & c_{k+l} & \dots & c_{2k-1}
\end{pmatrix}.
$$
It is known (see, for example,~\cite[Section 3]{badziahin_2017}) that the
convergent in its reduced form with $\deg q_k(z) = k$ exists if and only if the
Hankel matrix $H_k$ is invertible. Thus in our case
we necessarily have that $H_k(g_{\mathbf{u}})$
is invertible for any positive integer $k$.
From~\eqref{eq_conv}, we have that
\begin{equation}\label{eq_pq}
\|q_k(z)g_\mathbf{u}(z) - p_k(z)\| = -k-1.
\end{equation}
In other words, the coefficients for $x^{-1}, \ldots, x^{-k}$ in
$q_k(z)g_\mathbf{u}(z)$ are all zero and the coefficient for $x^{-k-1}$ is not. This
suggests a method for computing $q_k(x)$. One can check that the vector
$\mathbf{a}_k = (a_{k,0},a_{k,1},\ldots, a_{k,k})$ is the solution of the matrix
equation $H_{k+1}\mathbf{a}_k = c\cdot\mathbf{e}_{k+1}$, where $c$ is a non-zero
constant and
$$
\mathbf{e}_{k+1}=(0, \dots, 0, 1)^{t}.
$$
This equation has the unique solution since the matrix $H_{k+1}$ is
invertible. So, we can write the solution vector $\mathbf{a}$ as
\begin{equation} \label{matrix_equality_HAI}
\mathbf{a}_k = c\cdot H_{k+1}^{-1}\mathbf{e}_{k+1}.
\end{equation}
In what follows, we will use the norm of the matrix $\|H\|_\infty$, defined
to be the maximum of the absolute values of all its entries. Given a
polynomial $P(z)$ we define its height $h(P)$ as the maximum of absolute
values of its coefficients. In particular, we have $h(p_k(z)) =
\|\mathbf{b}_k\|_\infty$ and $h(q_k(z)) = \|\mathbf{a}_k\|_\infty$.
\hidden{
\begin{lemma} \label{lemma_ub_c_i}
The term $c_i$ satisfies
$$
|c_i| \le \|\mathbf{u}\|_\infty ^{\lceil \log_d i\rhoceil} \le \|\mathbf{u}\|_\infty ^{\log_d i + 1}.
$$
This in fact implies
\begin{equation}\label{ineq_c}
\|\mathbf{c}_i\|_\infty \le \|\mathbf{u}\|_\infty ^{\log_d i + 1}
\end{equation}
\end{lemma}
\begin{proof}
Look at two different formulae for $g_\mathbf{u}(z)$:
$$
g_\mathbf{u}(z) = z^{-1}\prod_{t=0}^\infty (1+u_1z^{-d^t}+\ldots+u_{d-1}z^{-(d-1)d^t}) = \sum_{i=1}^\infty c_i z^{-i}.
$$
By comparing the right and left hand sides one can notice that $c_i$ can be
computed as follows:
$$
c_i = \prod_{j=0}^{l(i)} u_{d_{i,j}}
$$
where $d_{i,0}d_{i,1}\cdots d_{i,l(i)}$ is a $d$-ary expansion of the number
$i-1$. Here we put $u_0=1$. This formula readily implies that $|c_i|\le
\|\mathbf{u}\|^{l(i)}$. Finally, $l(i)$ is estimated by $l(i) \le \lceil
\log_d(i-1)\rhoceil \le \lceil \log_d i\rhoceil$. The last two inequalities
imply~\eqref{ineq_c}.
\end{proof}
}
\begin{lemma}\label{lem_hpq}
For any $k\in\mathbb{N}$, the $k$-th convergent $p_k(z)/q_k(z)$ to $g_{\mathbf{u}}(z)$ can be represented by $p_k(z)/q_k(z)=\widetilde{p}_k(z)/\widetilde{q}_k(z)$,
where $\widetilde{p}_k,\widetilde{q}_k\in \mathbb{Z}[z]$ and
\begin{eqnarray}\label{ineq_hq}
h(\widetilde{q}_k)&\leq& (\|\mathbf{c}_{2k+1}\|_\infty^2 \cdot k)^{k/2}.\\
\label{ineq_hp}
h(\widetilde{p}_k)&\leq& \|\mathbf{c}_{2k+1}\|_\infty^{k+1} \cdot k^{(k+2)/2}.
\end{eqnarray}
Consecutively, the following upper bounds hold true:
\begin{eqnarray}\label{ineq_hq_2}
h(\widetilde{q}_k)&\leq& \|\mathbf{u}\|_\infty ^{k(\log_d (2k+1) + 1)} \cdot k^{k/2}.\\
\label{ineq_hp_2}
h(\widetilde{p}_k)&\leq& \|\mathbf{u}\|_\infty ^{(k+1)(\log_d (2k+1) + 1)} \cdot k^{(k+2)/2}.
\end{eqnarray}
\end{lemma}
\begin{proof} By applying Cramer's rule to the equation $H_{k+1}\mathbf{a}_k = c\cdot\mathbf{e}_{k+1}$ we infer that
\begin{equation} \label{formula_a_i}
a_{k,i}=c\cdot\frac{\Delta_{k+1,i}}{\det H_{k+1}}, \quad i=0,\dots,k,
\end{equation}
where $\Delta_{k+1,i}$ denotes the determinant of the matrix $H_{k+1}$ with
the $i$-th column replaced by $\mathbf{e}_{k+1}$, $i=1,\dots,k+1$. Then we use the
Hadamard's determinant upper bound to derive
\begin{equation} \label{h_a_i}
|\det H_{k+1}|\leq \|H_{k+1}\|_\infty^{k+1}\cdot (k+1)^{(k+1)/2} = (\|\mathbf{c}_{2k+1}\|_\infty^2 (k+1))^{(k+1)/2}.
\end{equation}
Moreover, by expanding the matrix involved in $\Delta_{k+1,i}$ along the $i$th
column and by using Hadamard's upper bound again we get
$$
|\Delta_{k+1,i}|\le \|H_{k+1}\|_\infty^k\cdot k^{k/2} = (\|\mathbf{c}_{2k+1}\|_\infty^2\cdot k)^{k/2}, \quad i=0,\dots,k.
$$
To define $\widetilde{q}_k(z)$, set $c = \det H_{k+1}$ in~\eqref{formula_a_i}.
Then we readily have $\widetilde{q}_k(z) = \sum_{i=0}^k \Delta_{k+1,i+1}z^i$. By
construction, it has integer coefficients and $h(\widetilde{q})$
satisfies~\eqref{ineq_hq}.
Next, from~\eqref{eq_pq} we get that the coefficients of $\widetilde{p}_k(z)$ coincide with
the coefficients for positive powers of $z$ of $\widetilde{q}_k(z)g_\mathbf{u}(z)$. By expanding
the latter product, we get
$$
|b_{k,i}|=\left|\sum_{j=i+1}^k a_{k,j} c_{j-i-1}\rhoight| \le \|\mathbf{c}_{2k+1}\|_{\infty}^{k+1}\cdot k^{(k+2)/2}.
$$
Hence~\eqref{ineq_hp} is also satisfied.
The upper bounds~\eqref{ineq_hq_2} and~\eqref{ineq_hp_2} follow
from~\eqref{ineq_hq} and~\eqref{ineq_hp} respectively by applying
Lemma~\rhoef{lemma_ub_c_i}.
\end{proof}
\begin{notation}
For the sake of convenience, further in this text we will assume that all the convergents to $g_{\mathbf{u}}(z)$ are in the form described in Lemma~\rhoef{lem_hpq}. That is,
we will always assume that $p_k(z)$ and $q_k(z)$ have integer coefficients
and verify the upper bounds~\eqref{ineq_hq} and~\eqref{ineq_hp}, as well
as~\eqref{ineq_hq_2} and~\eqref{ineq_hp_2}.
\end{notation}
For any $k\in\mathbb{N}$ we define a suite of coefficients $(\alphapha_{k,i})_{i> k}$ by
\begin{equation} \label{def_alpha}
q_k(z)g_\mathbf{u}(z)-p_k(z)=:\sum_{i=k+1}^{\infty}\alphapha_{k,i} z^{-i}.
\end{equation}
Note that from the equation $H_{k+1}\mathbf{a}_k = c\cdot\mathbf{e}_{k+1}$ we can get that
$\alphapha_{k,k+1} = c = \det H_{k+1}$. In particular, it is a non-zero integer.
\begin{lemma} \label{lemma_ub_coefficients}
For any $i,k\in\mathbb{N}$, $i> k\geq 1$, we have
\begin{equation} \label{lemma_ub_coefficients_claim}
\begin{aligned}
|\alphapha_{k,i}|&\leq (k+1) \|\mathbf{c}_{k+i}\|_\infty (\|\mathbf{c}_{2k+1}\|_\infty^2 \cdot k)^{k/2}\\
&\leq (k+1) \|\mathbf{u}\|_\infty^{\log_d(k+i)+1} \|\mathbf{u}\|_\infty^{k(\log_d(2k+1)+1)} \cdot k^{k/2}.
\end{aligned}
\end{equation}
\end{lemma}
\begin{proof}
One can check that $\alphapha_{k,i}$ is defined by the formula $\alphapha_{k,i} =
\sum_{j=0}^k a_{k,j} c_{j+i}$, which in view of~\eqref{ineq_hq} from
Lemma~\rhoef{lem_hpq} implies the first inequality
in~\eqref{lemma_ub_coefficients_claim}. Then, the second inequality
in~\eqref{lemma_ub_coefficients_claim} follows by applying
Lemma~\rhoef{lemma_ub_c_i}.
\end{proof}
\hidden{
\subsection{Estimates on the terms of the series}
\begin{lemma}
The term $c_i$ satisfies
$$
|c_i| \le \|\mathbf{u}\|_\infty ^{\lceil \log_d i\rhoceil} \le \|\mathbf{u}\|_\infty ^{\log_d i + 1}.
$$
This in fact implies
\begin{equation}\label{ineq_c}
\|\mathbf{c}_i\|_\infty \le \|\mathbf{u}\|_\infty ^{\log_d i + 1}
\end{equation}
\end{lemma}
\proof
Look at two different formulae for $g_\mathbf{u}(z)$:
$$
g_\mathbf{u}(z) = z^{-1}\prod_{t=0}^\infty (1+u_1z^{-d^t}+\ldots+u_{d-1}z^{-(d-1)d^t}) = \sum_{i=1}^\infty c_i z^{-i}.
$$
By comparing the right and left hand sides one can notice that $c_i$ can be
computed as follows:
$$
c_i = \prod_{j=0}^{l(i)} u_{d_{i,j}}
$$
where $d_{i,0}d_{i,1}\cdots d_{i,l(i)}$ is a $d$-ary expansion of the number
$i-1$. Here we put $u_0=1$. This formula readily implies that $|c_i|\le
\|\mathbf{u}\|^{l(i)}$. Finally, $l(i)$ is estimated by $l(i) \le \lceil
\log_d(i-1)\rhoceil \le \lceil \log_d i\rhoceil$. The last two inequalities
imply~\eqref{ineq_c}.
\endproof
}
\subsection{Using functional equation to study Diophantine approximaiton properties.}
From~\eqref{def_f} one can easily get a functional equation for $g_\mathbf{u}(z) =
z^{-1}f(z)$:
\begin{equation}\label{eq_funcg}
g_\mathbf{u} (z) = \frac{g_\mathbf{u} (z^d)}{P^*(z)},\quad P^*(z) = z^{d-1}P(z^{-1}) = z^{d-1}+u_1z^{d-2}+\ldots+u_{d-1}.
\end{equation}
This equation allows us, starting from the convergent $p_k(z)/q_k(z)$ to
$g_\mathbf{u}(z)$, to construct an infinite sequence of convergents
$\left(p_{k,m}(z)/q_{k,m}(z)\rhoight)_{m\in\mathbb{N}_0}$ to $g_\mathbf{u}(z)$ by
\begin{equation} \label{def_pkm_qkm_poly}
\begin{aligned}
q_{k,m}(z)&:=q_k(z^{d^m}),\\
p_{k,m}(z)&:=\prod_{t=0}^{m-1}P^*(z^{d^t})p_{k}(z^{d^m}).
\end{aligned}
\end{equation}
This fact can be checked by substituting
the functional equation~\eqref{eq_funcg} into the condition of Theorem~L. The reader can
also compare with~\cite[Lemma 3]{badziahin_2017}.
By employing~\eqref{eq_funcg} and~\eqref{def_alpha}, we find
\begin{equation} \label{property_k_m}
q_{k,m}(z)g_\mathbf{u}(z)-p_{k,m}(z)=\prod_{t=0}^{m-1}P^*(z^{d^t})\cdot\sum_{i=k+1}^{\infty}\alphapha_{k,i}
z^{-d^m\cdot i}.
\end{equation}
Consider an integer value $b$ which satisfies the conditions of
Theorem~\rhoef{th_main}. Define\footnote{There is a slight abuse of notation in
using the same letters $p_{k,m}$ and $q_{k,m}$ both for polynomials from
$\mathbb{Z}[z]$ and for their values at $z=b$. However, we believe that in this
particular case such a notation constitutes the best choice. Indeed, the main
reason to consider polynomials $p_{k,m}(z)$ and $q_{k,m}(z)$ is to define
eventually $p_{k,m}=p_{k,m}(b)$ and $q_{k,m}=q_{k,m}(b)$, which will play the
key role in the further proofs. At the same time, it is easy to distinguish
the polynomials $p_{k,m}(z)$, $q_{k,m}(z)$ and the corresponding integers
$p_{k,m}$ and $q_{k,m}$ by the context. Moreover, we will always specify
which object we mean and always refer to the polynomials specifying
explicitly the variable, that is $p_{k,m}(z)$, $q_{k,m}(z)$ and not $p_{k,m}$
and $q_{k,m}$.}
\begin{eqnarray}
p_{k,m}&:=&p_{k,m}(b), \label{def_p_k_m}\\
q_{k,m}&:=&q_{k,m}(b), \label{def_q_k_m}
\end{eqnarray}
where $p_{k,m}(z)$ and $q_{k,m}(z)$ are polynomials defined by~\eqref{def_pkm_qkm_poly}.
Clearly, for any $k\in\mathbb{N}$, $m\in\mathbb{N}_0$ we have $p_{k,m},q_{k,m}\in\mathbb{Z}$.
\hidden{
\begin{remark}
There is a slight abuse of notation in using the same letters $p_{k,m}$ and
$q_{k,m}$ both for polynomials from $\mathbb{Z}[z]$ and for their values at $z=b$.
However, we believe that in this particular case such a notation constitutes
the best choice. Indeed, the main reason to consider polynomials $p_{k,m}(z)$
and $q_{k,m}(z)$ is to define eventually $p_{k,m}=p_{k,m}(b)$ and
$q_{k,m}=q_{k,m}(b)$, which will play the key role in the further proofs. At
the same time, it is easy to distinguish the polynomials $p_{k,m}(z)$,
$q_{k,m}(z)$ and the corresponding integers $p_{k,m}$ and $q_{k,m}$ by the
context. Moreover, we will always specify which object we mean and always
refer to the polynomials specifying explicitly the variable, that is
$p_{k,m}(z)$, $q_{k,m}(z)$ and not $p_{k,m}$ and $q_{k,m}$.
\end{remark}
}
\begin{lemma} \label{lemma_smallness}
Let $b,k,m\in\mathbb{N}$, $b\geq 2$. Assume
\begin{equation} \label{lemma_smallness_bdm_geq_2}
b^{d^m}>2^{1+\log_d\|\mathbf{u}\|_\infty}.
\end{equation}
Then the integers $p_{k,m}$ and $q_{k,m}$ verify
\begin{equation} \label{lemma_smallness_ub}
\left|g_\mathbf{u}(b)-\frac{p_{k,m}}{q_{k,m}}\rhoight|\leq \frac{2(k+1)
k^{k/2} d^m \|\mathbf{u}\|_\infty^{m+(k+1)(\log_d(2k+1)+1)}}{q_{k,m}\cdot
b^{d^m\cdot k+1}}.
\end{equation}
Moreover, if we make in addition a stronger assumption
\begin{equation}\label{lem5_assump}
b^{d^m}\ge 4(k+1)k^{k/2} \|\mathbf{u}\|_\infty^{(k+1)(\log_d(2k+1)+1)},
\end{equation}
then
\begin{equation} \label{lemma_smallness_lb}
\frac{|g_\mathbf{u}(b)|}{4 q_{k,m}\cdot b^{d^m\cdot
k+1}}\leq\left|g_\mathbf{u}(b)-\frac{p_{k,m}}{q_{k,m}}\rhoight|.
\end{equation}
\end{lemma}
\begin{proof} Consider Equation~\eqref{property_k_m} with substituted $z:=b$:
\begin{equation} \label{property_k_m_z_substituted_b}
q_{k,m}g_\mathbf{u}(b)-p_{k,m}=\prod_{t=0}^{m-1}P^*(b^{d^t})\cdot\sum_{i=k+1}^{\infty}\alphapha_{k,i}
b^{-d^m\cdot i}.
\end{equation}
Each of the factors in $\left|P^*(b^{d^t})\rhoight|$ in the
right hand side of~\eqref{property_k_m_z_substituted_b} can be upper bounded by
$d\cdot\|\mathbf{u}\|_\infty b^{d^{t}(d-1)}$.
So, the product in the
right hand side of~\eqref{property_k_m_z_substituted_b} can be estimated by
\begin{equation}\label{eq_lem5_1}
\left|\prod_{t=0}^{m-1}P^*(b^{d^t})\rhoight| \leq d^m \|\mathbf{u}\|_\infty^m \cdot
b^{d^{m}-1}.
\end{equation}
Further, we estimate the second term on the right hand
side of~\eqref{property_k_m_z_substituted_b} by employing Lemma~\rhoef{lemma_ub_coefficients}:
\begin{equation}\label{eq_lem5_2}
\left|\sum_{i=k+1}^{\infty}\alphapha_{k,i} b^{-d^m\cdot i}\rhoight| \leq
(k+1) \|\mathbf{u}\|_\infty^{k(\log_d(2k+1)+1)} \cdot k^{k/2}\sum_{i=k+1}^\infty
\frac{\|\mathbf{u}\|_\infty^{\log_d(k+i)+1}}{b^{d^m\cdot i}}.
\end{equation}
The last sum in the right hand side of~\eqref{eq_lem5_2} is bounded from
above by
$$
\sum_{i=k+1}^\infty
\frac{\|\mathbf{u}\|_\infty^{\log_d(k+i)+1}}{b^{d^m\cdot i}} \le
\frac{\|\mathbf{u}\|_\infty}{b^{d^m(k+1)}}\cdot \sum_{i=0}^\infty
\frac{\|\mathbf{u}\|_\infty^{\log_d(2k+1+i)}}{b^{d^m\cdot i}}
$$
\begin{equation}\label{eq_lem5_3}
\le \frac{\|\mathbf{u}\|_\infty^{1+\log_d(2k+1)}}{b^{d^m(k+1)}}
\sum_{i=0}^\infty \frac{(i+1)^{\log_d\|\mathbf{u}\|_\infty}}{b^{d^m\cdot
i}} \le \frac{\|\mathbf{u}\|_\infty^{1+\log_d(2k+1)} \cdot
C(b,d,m,\|\mathbf{u}\|_\infty)}{b^{d^m(k+1)}},
\end{equation}
where
\hidden{
$C(\|\mathbf{u}\|_\infty)$ is a constant which only depends on
$\|\mathbf{u}\|_\infty$ but not on $k$ or $m$. In particular, for
$\|\mathbf{u}\|_\infty = 1$ it can be made equal to 2. It is also not too
difficult to check that $C(\|\mathbf{u}\|_\infty)\le 2$ in the case
$b^{d^m}\ge 2^{1+\log_d \|\mathbf{u}\|_\infty}$. Indeed, it readily follows from elementary remark that $i+1\leq 2^i$ for all $i\in\mathbb{Z}$.
}
$$
C(b,d,m,\|\mathbf{u}\|_\infty)=\sum_{i=0}^\infty \frac{(i+1)^{\log_d\|\mathbf{u}\|_\infty}}{b^{d^m\cdot
i}}.
$$
Note that for any $i\in\mathbb{Z}$, we have $i+1\leq 2^i$. Because of this, assumption~\eqref{lemma_smallness_bdm_geq_2} implies
\begin{equation} \label{C_leq_2}
C(b,d,m,\|\mathbf{u}\|_\infty)\leq 2.
\end{equation}
Finally, by putting
together,~\eqref{property_k_m_z_substituted_b},~\eqref{eq_lem5_1},~\eqref{eq_lem5_2},~\eqref{eq_lem5_3}
and~\eqref{C_leq_2} we get
$$
\left|q_{k,m}g_\mathbf{u}(b) - p_{k,m}\rhoight|\le \frac{2(k+1) k^{k/2} d^m
\|\mathbf{u}\|_\infty^{m+(k+1)(\log_d(2k+1)+1)}}{b^{d^m\cdot k + 1}}.
$$
Dividing both sides by $q_{k,m}$ gives~\eqref{lemma_smallness_ub}.
To get the lower bound, we first estimate the product
in~\eqref{property_k_m}.
$$
\prod_{t=0}^{m-1}P^*(b^{d^t}) = b^{d^m-1}
\prod_{t=0}^{m-1}P(b^{-d^t}) \geq
b^{d^m-1}\frac{g_\mathbf{u}(b)}{\prod_{t=m}^{\infty}P(b^{-d^t})}.
$$
By~\eqref{lem5_assump}, the denominator can easily be estimated as
$$
\prod_{t=m}^{\infty}P(b^{-d^t})\le \prod_{t=m}^{\infty} \left(1+\frac{2\|\mathbf{u}\|_\infty}{b^{d^t}}\rhoight) < 2.
$$
Therefore,
$$
\prod_{t=0}^{m-1}P^*(b^{d^t})\ge \frac12b^{{d^m}-1}g_\mathbf{u}(b).
$$
For the series in the right hand side of~\eqref{property_k_m}, we show that
the first term dominates this series. Indeed, we have $|\alphapha_{k,k+1}|\ge 1$
since it is a non-zero integer. Then,
\begin{equation} \label{lb_calculations}
\begin{aligned}
&|q_{k,m}g_\mathbf{u}(b)-p_{k,m}|=\left|\prod_{t=0}^{m-1}P^*(b^{d^t})\cdot\sum_{i=k+1}^{\infty}\alphapha_{k,i} b^{-d^m\cdot i}\rhoight|\\
&\geq
\frac12b^{d^m-1} |g_\mathbf{u}(b)| \left(b^{-d^m(k+1)}-\sum_{i=k+2}^{\infty}\left|\alphapha_{k,i}\rhoight| b^{-d^m\cdot i}\rhoight)\\
&\stackrel{\eqref{eq_lem5_2}, \eqref{eq_lem5_3}}\geq \frac12b^{d^m-1}
|g_\mathbf{u}(b)| \left(b^{-d^m(k+1)} -
\frac{C(b,d,m,\|\mathbf{u}\|_\infty)(k+1)k^{k/2}\|\mathbf{u}\|_\infty^{(k+1)(\log_d(2k+1)+1)}}{b^{d^m(k+2)}}\rhoight)
\end{aligned}
\end{equation}
Recall that
by~\eqref{C_leq_2}, we have
$C(b,d,m,\|\mathbf{u}\|_\infty) \leq 2$. So, by using
assumption~\eqref{lem5_assump}, we finally get
$$
|q_{k,m}g_\mathbf{u}(b)-p_{k,m}|\ge
\frac{b^{d^m-1}|g_\mathbf{u}(b)|}{4b^{d^m(k+1)}} =
\frac{|g_\mathbf{u}(b)|}{4b^{d^m\cdot k +1}}.
$$
Finally, dividing both sides by $q_{k,m}$ leads
to~\eqref{lemma_smallness_lb}.
\end{proof}
\begin{lemma} \label{lemma_qkm_db}
Let $b,k,m\in\mathbb{N}$, $k\geq 1$ and let
\begin{equation} \label{lemma_qkm_db_bdm_assumption}
b^{d^m} > 3\cdot
\left(\|\mathbf{c}_{2k+1}\|_\infty^2 k\rhoight)^{k/2}.
\end{equation}
Recall the notations $a_{k,i}$, $i=0,\dots,k$,
for the coefficients of $q_k$,
$k\in\mathbb{N}$, is defined in~\eqref{coefficients_q_k_p_k}. Then,
\begin{equation} \label{lemma_qkm_db_claim}
\frac12|a_{k,k}|\cdot b^{kd^m}\leq
q_{k,m}\leq\frac32|a_{k,k}|\cdot b^{kd^m}.
\end{equation}
\end{lemma}
\begin{proof}
The leading term of $q_{k,m}(z)$ is $a_{k,k}z^{kd^m}$. We know that $\deg
q_k(z) = k$, therefore $a_{k,k}\neq 0$ and $a_{k,k}$ is an integer. Recall
also that by~\eqref{ineq_hq} the maximum of the coefficients $a_{k,i}$,
$i=0,\dots,k$, does not exceed $(\|\mathbf{c}_{2k+1}\|_\infty^2 \cdot k)^{k/2}$.
Thus we find, by using assumption~\eqref{lemma_qkm_db_bdm_assumption},
$$
\left|\sum_{n=0}^{k-1} a_{k,n}\cdot b^{nd^m}\rhoight| \le b^{kd^m}
\left|\sum_{n=1}^{k} 3^{-n}\rhoight| \le \frac12 b^{kd^m}.
$$
We readily infer, by taking into account $q_{k,m} = a_{k,0}+a_{k,1}b^{d^m}+\ldots + a_{k,k}b^{k d^m}$,
$$
\frac12 |a_{k,k}|b^{kd^m}\le |a_{k,k}|b^{kd^m}- \frac12
b^{kd^m}\le |q_{k,m}|\le |a_{k,k}|q^{kd^m}+ \frac12 q^{kd^m} =
\frac32|a_{k,k}|q^{kd^m}.
$$
This completes the proof of the lemma.
\end{proof}
\begin{proposition} \label{proposition_db}
Let $k\geq 2$, $m\geq 1$ be integers and assume that~\eqref{lem5_assump} is
satisfied. Then, the integers $p_{k,m}=p_{k,m}(b)$ and $q_{k,m}=q_{k,m}(b)$,
defined by~\eqref{def_p_k_m} and by~\eqref{def_q_k_m}, satisfy
\begin{equation} \label{proposition_db_l_1}
\left|g_\mathbf{u}(b)-\frac{p_{k,m}}{q_{k,m}}\rhoight|\leq \frac{3(k+1)
k^{k} d^m \|\mathbf{u}\|_\infty^{m+(2k+1)(\log_d(2k+1)+1)}}{b \cdot q^2_{k,m}},
\end{equation}
\begin{equation} \label{proposition_db_u_1}
\frac{|g_\mathbf{u}(b)|}{8b q_{k,m}^2} \le \left|g_\mathbf{u}(b) -
\frac{p_{k,m}}{q_{k,m}}\rhoight|.
\end{equation}
Moreover, if, additionally, $k$ and $m$ satisfy
\begin{equation} \label{proposition_db_assump}
k\cdot d^m\log_2 b - 1 \geq
\frac{1}{3} m^2(\log \|\mathbf{u}\|_\infty)^2,
\end{equation}
then
\begin{equation} \label{proposition_db_claim_2}
\left|g_\mathbf{u}(b)-\frac{p_{k,m}}{q_{k,m}}\rhoight|\leq
\frac{3\cdot 2^{C{\sqrt{\log_2 q_{k,m}\log_2\log_2 q_{k,m}}}}}{q_{k,m}^{2}},
\end{equation}
where
\begin{equation} \label{def_C}
C=2\sqrt{2}+2\sqrt{5\cdot\log\|\mathbf{u}\|_{\infty}}+2.
\end{equation}
\end{proposition}
\begin{proof}
From Lemma~\rhoef{lemma_qkm_db} we have
$$
b^{k\cdot d^m}\ge
\frac{2q_{k,m}}{3|a_{k,k}|}\stackrel{\eqref{ineq_hq_2}}\ge \frac{2q_{k,m}}{3\|\mathbf{u}\|_\infty^{k(\log_d(2k+1)+1)}
k^{k/2}}.
$$
Similarly, by using $|a_{k,k}|\geq 1$ together with Lemma~\rhoef{lemma_qkm_db}, we get the lower bound
\begin{equation} \label{proposition_db_simple_ub}
b^{k\cdot d^m}\le 2q_{k,m}.
\end{equation}
These two bounds on $b^{kd^m}$ allow to infer the inequalities~\eqref{proposition_db_l_1} and~\eqref{proposition_db_u_1} straightforwardly from the corresponding bounds in Lemma~\rhoef{lemma_smallness}.
We proceed with the proof of the estimate~\eqref{proposition_db_claim_2}. We are going to deduce it as a corollary of~\eqref{proposition_db_l_1}. To this end, we are going to prove, under the assumptions of this proposition,
\begin{equation} \label{eq_prop_first}
(k+1)
k^{k} d^m \|\mathbf{u}\|_\infty^{m+(2k+1)(\log_d(2k+1)+1)}\leq 2^{C{\sqrt{\log_2 q_{k,m}\log_2\log_2 q_{k,m}}}},
\end{equation}
where the constant $C$ is defined by~\eqref{def_C}. It is easy to verify
that~\eqref{proposition_db_l_1} and~\eqref{eq_prop_first} indeed
imply~\eqref{proposition_db_claim_2}. Therefore in the remaining part of the
proof we will focus on verifying~\eqref{eq_prop_first}.
The inequality~\eqref{proposition_db_simple_ub} together with condition~\eqref{lem5_assump} imply
\begin{equation}\label{ineq_logqkm}
\begin{array}{rl}
\log_2 q_{k,m} \geq &(2k-1)+k\log_2 (k+1) + \frac{k^2}{2}\log_2 k\\
&+k(k+1)(\log_d(2k+1)+1)\log_2\|\mathbf{u}\|_\infty.
\end{array}
\end{equation}
By taking logarithms again one can derive that $\log_2\log_2 q_{k,m}\ge
\log_2 k$. Now we compute
\begin{equation} \label{proposition_db_implication_1}
\log_2 q_{k,m}\log_2\log_2 q_{k,m} \geq \frac{k^2}{2}(\log_2 k)^2 >
\frac{1}{8}(k\log_2 k+\log_2(k+1))^2.
\end{equation}
The last inequality in~\eqref{proposition_db_implication_1} holds true because $k\log_2 k>\log_2 (k+1)$ for $k\geq 2$.
Another implication of~\eqref{ineq_logqkm} is
\begin{equation} \label{proposition_db_implication_2_0}
\log_2 q_{k,m}\log_2\log_2 q_{k,m} \geq k(k+1)(\log_d(2k+1)+1)\log_2 k
\log_2\|\mathbf{u}\|_\infty.
\end{equation}
Since for $d\ge 2$ and $k\ge 2$ we have $\log_2 k\geq
\frac14(\log_d(2k+1)+1)$ and $k(k+1)\geq \frac15(2k+1)^2$, therefore
we readily infer from~\eqref{proposition_db_implication_2_0}
\begin{equation} \label{proposition_db_implication_2}
\log_2 q_{k,m}\log_2\log_2 q_{k,m} \ge \frac{1}{20\log_2\|\mathbf{u}\|_\infty}
(2k+1)^2(\log_d(2k+1)+1)^2 (\log_2 \|\mathbf{u}\|_\infty)^2.
\end{equation}
Next, it follows from~\eqref{proposition_db_simple_ub} that
\begin{equation} \label{qkm_geq_kdm}
\log_2 q_{k,m} \geq k\cdot d^m\log_2 b - 1.
\end{equation}
Therefore
assumption~\eqref{proposition_db_assump} implies that $\log_2 q_{k,m}\geq
\frac{1}{3} m^2(\log_2 \|\mathbf{u}\|_\infty)^2$. At the same time, the assumptions
$k\geq 2$ joint with~\eqref{lem5_assump} readily imply $b^{k\cdot d^m}\geq
576$, hence, by adding~\eqref{proposition_db_simple_ub}, we find $\log_2\log_2
q_{k,m}\geq\log_2\log_2 288>3$. So,
\begin{equation} \label{proposition_db_implication_3}
\log_2 q_{k,m}\log_2\log_2 q_{k,m} > m^2(\log_2 \|\mathbf{u}\|_\infty)^2.
\end{equation}
Also, by these considerations we deduce from~\eqref{qkm_geq_kdm}
\begin{equation} \label{proposition_db_implication_4}
\log_2 q_{k,m}\log_2\log_2 q_{k,m} > 3 d^m > \left(m\cdot\log_2 d\rhoight)^2.
\end{equation}
\hidden{
Then,
$$
\log q_{k,m}\log\log q_{k,m} \ge \frac1b m^2 (\log \|\mathbf{u}\|_\infty)^2
$$
}
Finally, by taking square root in the both sides of~\eqref{proposition_db_implication_1}, \eqref{proposition_db_implication_2}, \eqref{proposition_db_implication_3} and~\eqref{proposition_db_implication_4} and summing up the results we find
\hidden{
taking the mean value of three estimates on $\log
q_{k,m}\log\log q_{k,m}$ we have that
}
\begin{equation}\label{eq_prop1}
\begin{array}{rl}
C\sqrt{\log_2 q_{k,m}\log_2\log_2 q_{k,m}}\!\!\! &\geq \log_2(k+1) + k\log_2 k + m\log_2 d\\
&+ (m+(2k+1)(\log_d(2k+1)+1))\log_2\|\mathbf{u}\|_\infty,
\end{array}
\end{equation}
where the constant $C$ is defined by~\eqref{def_C}. Finally, by taking the
exponents base two from both sides of~\eqref{eq_prop1}, we
find~\eqref{eq_prop_first}, hence derive~\eqref{proposition_db_claim_2}.
\end{proof}
\begin{remark}
Note that the constant $C$ in Proposition~\rhoef{proposition_db} is rather far
from being optimal. The proof above can be significantly optimized to reduce
its value. However that would result in more tedious computations. All one
needs to show is the inequality~\eqref{eq_prop1}.
\end{remark}
\section{Proof of Theorem~\rhoef{th_main}}
We will prove the folowing result.
\begin{theorem} \label{thm_im}
Let $b\geq 2$. There exists an effectively computable constant $\gamma$,
which only depends on $d$ and $\mathbf{u}$, such that for any $p\in\mathbb{Z}$ and any
sufficiently large $q\in\mathbb{N}$,
we have
\begin{equation} \label{thm_im_result}
\left|g_{\mathbf{u}}(b)-\frac{p}{q}\rhoight|\geq\frac{|g_\mathbf{u}(b)|}{4b\cdot
q^{2}\cdot\exp\left(\gamma\sqrt{\log_2 q \log_2 \log_2 q}\rhoight)}.
\end{equation}
\end{theorem}
It is easy to see that Theorem~\rhoef{th_main} is a straight corollary of
Theorem~\rhoef{thm_im}. Indeed, if $f(b)$ from Theorem~\rhoef{th_main} is not
zero then so is $g_\mathbf{u}(b)$ and the lower bound~\eqref{thm_im_result} is
satisfied for all large
enough $q$, therefore
the inequality
$$
\left|g_\mathbf{u}(b)-\frac{p}{q}\rhoight| < q^{-2}\exp(-
\gamma
\sqrt{\log q\log \log q})
$$
has only finitely many solutions. By definition, this implies that
$g_{\mathbf{u}}(b)$ and in turn $f(b)$ are both $q^{-2}\exp(-
\gamma
\sqrt{\log q\log \log q})$-badly approximable.
\begin{proof}[Proof of Theorem~\rhoef{thm_im}]
In this proof, we will use the constant $C$ defined by~\eqref{def_C}. Fix a
couple of integers $p$ and $q$. We start with some preliminary calculations
and estimates.
Define $x>2$
to be the unique solution of the following equation
\begin{equation} \label{thm_im_def_x}
q=\frac{1}{12}\cdot x\cdot 2^{-\frac32C\sqrt{\log_2 x\log_2\log_2 x}},
\end{equation}
where the constant $C$ is defined by~\eqref{def_C}.
The condition $x>2$ ensures that both $\log_2 x$ and the double logarithm $\log_2\log_2 x$
exist and are positive, hence $2^{-C\sqrt{\log_2 x\log_2\log_2 x}}<1$ and
thus
\begin{equation} \label{q_leq_x}
12 q < x.
\end{equation}
For large enough $q$ we then have
$$
\frac{81}{4}C^2 \log_2 \log_2 x < \log_2 x
$$
and therefore
\begin{equation} \label{ie_loglog_x_leq_log_x}
2^{\frac32 C\sqrt{\log_2 x\log_2 \log_2 x}}<x^{1/3}.
\end{equation}
From~\eqref{thm_im_def_x} and~\eqref{ie_loglog_x_leq_log_x} we readily infer
\begin{equation} \label{thm_im_preliminary_ub_x}
x<\left(
12 q\rhoight)^{3/2},
\end{equation}
Rewrite~\eqref{thm_im_def_x} in the following form
\begin{equation} \label{thm_im_def_x_2}
x=
12 q\cdot 2^{\frac32C\sqrt{\log_2 x\log_2\log_2 x}}.
\end{equation}
Then, by applying~\eqref{thm_im_preliminary_ub_x} to it we find that,
for large enough $q$,
\begin{equation} \label{thm_im_ub_x}
x<
12q\cdot 2^{2C\sqrt{\log_2 q\log_2\log_2 q}}.
\end{equation}
Denote
\begin{equation} \label{def_t}
t:=\log_b x.
\end{equation}
Fix an arbitrary value $t_{\alphapha}u\ge t_{\alphapha}u_0>1$, where $t_{\alphapha}u_0 = t_{\alphapha}u_0(\mathbf{u})$ is a
parameter which only depends on $\mathbf{u}$ and which we will fix later (namely, it
has to ensure inequality~\eqref{cond_tau}). Assume that $t>2$ is large
enough (that is, assume $q$ is large enough, then by~\eqref{q_leq_x} $x$ is
large enough, hence by~\eqref{def_t} $t$ is large enough), so that
\begin{equation} \label{t_log_t}
d \leq
\frac1t_{\alphapha}u\sqrt{\frac{t}{\log_2 t}}.
\end{equation}
As $t>2$, we also have $t\log_2 t > 2$. Choose an integer $n$ of the form
$n:=k\cdot d^m$ with $m\in\mathbb{N}$, $k\in\mathbb{Z}$ such that
\begin{eqnarray}
t\leq & n & \leq t+dt_{\alphapha}u\sqrt{t\log_2 t}, \label{thm_im_distance_t_n_ub}\\
t_{\alphapha}u \sqrt{t\log_2 t} \leq & d^m & \leq d t_{\alphapha}u \sqrt{t\log_2 t}. \label{thm_im_2m_lb_1}
\end{eqnarray}
One can easily check that such $n$ always exists.
Inequalities \eqref{t_log_t}, \eqref{thm_im_distance_t_n_ub}
and~\eqref{thm_im_2m_lb_1} imply
\begin{equation} \label{thm_im_k_ub_1}
k=\frac{n}{d^m}\leq\frac{t+dt_{\alphapha}u \sqrt{t\cdot\log_2
t}}{t_{\alphapha}u\sqrt{t\cdot\log_2 t}}=\frac1t_{\alphapha}u\sqrt{\frac{t}{\log_2 t}}+d\le
\frac2t_{\alphapha}u\sqrt{\frac{t}{\log_2 t}}.
\end{equation}
Then we deduce, for $t$ large enough,
$$
k\log_2 k\le \frac2t_{\alphapha}u\sqrt\frac{t}{\log_2 t} \left(\log_2
(2/t_{\alphapha}u)+\frac12\log_2 t-\frac12\log_2\log_2 t\rhoight)< \frac2t_{\alphapha}u
\sqrt{t\log_2 t}.
$$
Therefore, for any $t_{\alphapha}u$ large enough, that is for any $t_{\alphapha}u\geqt_{\alphapha}u_0$, where $t_{\alphapha}u_0$ depends only on $\mathbf{u}$, we have
\begin{multline} \label{cond_tau}
2+\log_2 (k+1)+\frac{k}{2}\log_2 k
\\+ (k+1)(\log_d(2k+1)+1)\log_2
\|\mathbf{u}\|_\infty)
<t_{\alphapha}u \sqrt{t\log_2 t}.
\end{multline}
By taking the exponent base two of the left hand side of~\eqref{cond_tau} and
the exponent base $b\geq 2$ of the right hand side of~\eqref{cond_tau}, and
then using~\eqref{thm_im_2m_lb_1}, we ensure that~\eqref{lem5_assump} is
satisfied. We can also take $q$ (and, consecutively, $t$) large enough so
that $m$, bounded from below by~\eqref{thm_im_2m_lb_1}, satisfies $d^m \ge
m^2 (\log_2\|\mathbf{u}\|_\infty)^2$, and then
necessarily~\eqref{proposition_db_assump} is verified. Also,
\eqref{thm_im_distance_t_n_ub} and~\eqref{thm_im_2m_lb_1} imply that, for $t$
large enough, $k\geq 2$.
Hence we have checked all the conditions on $k$ and $m$ from
Proposition~\rhoef{proposition_db}. It implies that the integers $p_{k,m}$ and
$q_{k,m}$, defined by~\eqref{def_p_k_m} and~\eqref{def_q_k_m}, satisfy
inequalities~\eqref{proposition_db_u_1} and~\eqref{proposition_db_claim_2}.
Lemma~\rhoef{lemma_ub_c_i} and inequality~\eqref{lem5_assump} imply the
inequality~\eqref{lemma_qkm_db_bdm_assumption}, so we can use
Lemma~\rhoef{lemma_qkm_db}, i.e. we have
\begin{equation} \label{thm_im_q_k_m_db}
\frac12 |a_{k,k}|b^n\leq q_{k,m}\leq\frac32 |a_{k,k}|b^n.
\end{equation}
In case if
$$
\frac{p}{q}=\frac{p_{k,m}}{q_{k,m}},
$$
the result~\eqref{thm_im_result} readily follows from the lower
bound~\eqref{proposition_db_u_1} in Proposition~\rhoef{proposition_db}.
\hidden{
We readily infer
$$
\left|q-q_{k,m}\rhoight|\leq
$$
}
We proceed with the case
\begin{equation} \label{thm_im_case_different}
\frac{p}{q}\ne\frac{p_{k,m}}{q_{k,m}}.
\end{equation}
By triangle inequality, and then by the upper
bound~\eqref{proposition_db_claim_2}, we have
\begin{equation} \label{thm_im_main_lb_1}
\begin{aligned}
\left|g_\mathbf{u}(b)-\frac{p}{q}\rhoight|&\geq\left|\frac{p_{k,m}}{q_{k,m}}-\frac{p}{q}\rhoight|-\left|g_\mathbf{u}(b)-\frac{p_{k,m}}{q_{k,m}}\rhoight|\\
&\geq \frac{1}{q_{k,m} q}-\frac{3\cdot 2^{C{\sqrt{\log_2 q_{k,m}\log_2\log_2
q_{k,m}}}}}{q_{k,m}^2}.
\end{aligned}
\end{equation}
By
applying the upper bound in~\eqref{thm_im_q_k_m_db} complimented with~\eqref{ineq_hq_2}, we find
$$
\log_2 q_{k,m} \leq \log_2 \frac32+ k/2\log_2 k+
k(\log_d(2k+1)+1)\log_2\|\mathbf{u}\|_\infty + n\log_2 b
$$
Upper bounds~\eqref{thm_im_k_ub_1} on $k$
and~\eqref{thm_im_distance_t_n_ub} on $n$ ensure that for large
enough $q$ we have
\begin{equation} \label{thm_im_ub_1}
2^{C\sqrt{\log_2 q_{k,m}\log_2\log_2 q_{k,m}}}\leq 2^{\frac32C\sqrt{\log_2
x\log_2\log_2 x}}.
\end{equation}
The formula~\eqref{def_t} for $t$ and the lower bound
in~\eqref{thm_im_distance_t_n_ub} together give $b^n\geq x$. Then, by using
the lower bound~\eqref{thm_im_q_k_m_db},
we find
\begin{equation} \label{thm_im_lb_1}
q_{k,m}\geq \frac12 b^n\geq \frac12 x.
\end{equation}
By using the estimates~\eqref{thm_im_ub_1} and~\eqref{thm_im_lb_1} on the numerator and denominator respectively, and then by substituting the value of $x$ given by~\eqref{thm_im_def_x_2}, we find
$$
\frac{3\cdot 2^{C{\sqrt{\log_2 q_{k,m}\log_2\log_2 q_{k,m}}}}}{q_{k,m}^2}\leq
\frac{3\cdot 2^{\frac32C{\sqrt{\log_2 x\log_2\log_2 x}}}}{\frac12 x\cdot
q_{k,m}} \stackrel{\eqref{thm_im_def_x_2}}=\frac{1}{2 q_{k,m} q},
$$
hence, recalling~\eqref{thm_im_main_lb_1}, we find
\begin{equation} \label{thm_im_lb_12}
\left|g_\mathbf{u}(b)-\frac{p}{q}\rhoight|\geq\frac{1}{2 q_{k,m} q}.
\end{equation}
By inequality~\eqref{thm_im_q_k_m_db} combined with the upper bound in~\eqref{thm_im_distance_t_n_ub} and then~\eqref{def_t} and~\eqref{thm_im_ub_x} we get that, for $q$ large enough,
\begin{equation*}
q_{k,m}\leq\frac32 |a_{k,k}|b^n\leq \frac32 |a_{k,k}|
b^{t+dt_{\alphapha}u\sqrt{t\log_2 t}} \leq 18 |a_{k,k}|q\cdot
2^{(2dt_{\alphapha}u\log_2 b+2C)\sqrt{\log_2 q \log_2\log_2 q}}.
\end{equation*}
The bound~\eqref{ineq_hq_2} implies
\begin{equation} \label{ineq_hq_2_log_corollary}
\log_2 |a_{k,k}|\leq \frac{k}{2}\log_2 k+k(\log_d (2k+1) + 1)\log_2\|\mathbf{u}\|_\infty.
\end{equation}
By comparing the right hand side of this inequality with the left hand side
in~\eqref{cond_tau} we find
$$
|a_{k,k}|\leq 2^{2t_{\alphapha}u\sqrt{\log_2 q\log_2\log_2 q}}
$$
and then
$$
q_{k,m}\le 18 q\cdot 2^{(2dt_{\alphapha}u\log_2 b+2t_{\alphapha}u+2C)\sqrt{\log_2 q\log_2\log_2 q}}
$$
Finally, \eqref{thm_im_lb_12} implies
$$
\left|g_\mathbf{u}(b)-\frac{p}{q}\rhoight|\geq\frac{1}{36 q^2\cdot
2^{(2dt_{\alphapha}u\log_2 b+2t_{\alphapha}u+2C)\sqrt{\log_2 q \log_2\log_2 q}}}.
$$
This completes the proof of the theorem with $\gamma = \ln
2\cdot\left(2dt_{\alphapha}u\log_2 b+2t_{\alphapha}u+2C\rhoight)$.
\end{proof}
\end{document} |
\begin{document}
\maketitle
\Opensolutionfile{ans}[answers_lecture_4]
\fbox{\fbox{\parbox{5.5in}{
\textbf{Problem:}\\
How to find invariants of singularities of a $G$-structure?
}}}
\section*{Simple example}
Let $V = (v^1,v^2)$ be a vector field on $\mathbb{R}^2$.
If $V(x_0) \ne 0$, then $V$ restricted to a neighborhood $U(x_0)$ is equivalent to $\partial_1$, hence all nonvanishing vector fields are locally equivalent and there are no invariants.
If $V(x_0) = 0$, and $W(x_0) = 0$, and the matrix $||\partial_i V^j(x_0)||$ is not similar to the matrix $||\partial_i W^j(x_0)||$, then $V$ and $W$ are not locally equivalent and there arise invariants.
Generally, \emph{non-regular points have their own invariants}.
\section{Jets of smooth maps}
Let $M$ and $N$ be smooth manifolds, $x \in M$ is a point.
We will denote by $f : (M,x) \to N$ a smooth map which is defined in an open neighborhood $U$ of $x$.
By $D(f)$ we will denote the domain $U$.
Denote by $C^\infty((M,x),N)$ the set of all smooth maps $f : (M,x) \to N$.
On the set $C^\infty((M,x),N)$ we introduce the equivalence relation: we say that $f$, $g$ in $C^\infty( (M,x),N)$ are equivalent at $x \in M$ ($f \sim_x g$) if
\begin{equation}
\exists W \in \mathcal{U}_x, \quad W \subset D(f) \cap D(g) \text{ such that } f|_W = g|_W.
\label{eq:equivalence_between_local_maps}
\end{equation}
\begin{definition}
\label{def:germ_of_map}
A \emph{germ of a map} $f \in C^\infty( (M,x) ,N)$ at the point $x$ is the equivalence class of
$f \in C^\infty((M,x),N)$ with respect to $\sim_x$ denoted by $\langle f \rangle_x$.
\end{definition}
We set
\begin{multline}
\mathcal{G}_x(M,N) = C^\infty( (M,x) ,N) / \sim_x =
\\
=
\left\{ \langle f \rangle_x \mid f \in C^\infty( (M,x) ,N) \right\}.
\label{eq:set_of_germs}
\end{multline}
If $M_1$, $M_2$, and $M_3$ are smooth manifolds, and $f_1 \in C^\infty((M_1, x_1),M_2)$, $f_2 \in C^\infty((M_2,x_2),M_3)$ with $f(x_1) = x_2$, then we can define the composition of germs as follows:
\begin{eqnarray}
\langle f_2 \rangle_{x_2} \circ \langle f_1 \rangle_{x_1} = \langle f_2 \circ f_1 \rangle_{x_1}
\label{eq:comp_of_germs}
\end{eqnarray}
Another equivalence relation $\sim_k$ on the set $C^\infty( (M,x),N)$ is introduced as follows:
for $f,g \in C^\infty_x(M,N)$ such that $f(x)=g(x)$ we take the coordinate systems $(U,x^i)$ in a neighborhood $U$ of $x$ and $(V,y^\alpha)$ in a neighborhood $V$ of $y$.
Then we say that $f$ and $g$ are equivalent ($f \sim_k g$) if the Taylor series of the coordinate representations of $f$ and $g$ coincide up to the order $k$. One can prove that this equivalence relation does not depend on the choice of coordinate systems.
\begin{definition}
The equivalence class $j^k_x f$ of $f$ is called \emph{$k$-jet of the map $f$ at the point $x$}.
\end{definition}
The set of all $k$-jets of maps $f \in C^\infty( (M,x) ,N)$ will be denoted by $J^k_x(M,N)$.
It is clear that if $ f, g \in C^\infty( (M,x) ,N)$ determine the same germ at $x$, that is
if $f \sim_x g$, then $j^k_x f = j^k_x g$ for any $k$.
Also, the composition of maps, or the composition of germs, define the composition of $k$-jets:
\begin{equation}
j^k_{f_1(x)} (f_2) \circ j^k_x(f_1) = j^k_x( f_2 \circ f_1).
\label{eq:composition_of_jets}
\end{equation}
For the set of all $k$-jets $J^k(M,N)$ one can define two natural projections:
\begin{eqnarray}
\pi_0 : J^k(M,N) \to M, \quad j^k_{x}f \mapsto x,
\\
\pi_1 : J^k(M,N) \to N, \quad j^k_{x}f \mapsto f(x).
\label{eq:projections_on_jet_space}
\end{eqnarray}
The set $J^k(M,N)$, $\dim M = m$, $\dim N = n$ can be endowed by a manifold structure so that these projections are bundle projections.
We set
\begin{equation}
J^k_{x,y}(M,N) = \left\{ j^k f \in J^k(M,N) \mid \pi_0(j^k f) = x, \pi_1(j^k f) = y \right\}.
\label{eq:set_of_jets_joining_two_points}
\end{equation}
The typical fiber of the bundle $(J^k(M,N),\pi_0,M)$ is the manifold
$J^k_{0,0}(\mathbb{R}^m,\mathbb{R}^n) \times N$,
and of the bundle $(J^k(M,N),\pi_1,N)$ is $J^k_{0,0}(\mathbb{R}^m,\mathbb{R}^n) \times M$.
The manifold $J^k(M,M)$ endowed with the projections $\pi_0$ and $\pi_1$, and the composition of $k$-jets is a groupoid.
If $\pi : E \to M$ is a fiber bundle, then the set of $k$-jets of sections of the bundle $E$ is denoted by $J^k(E)$.
\section{Bundle of germs of diffeomorphisms. Coframe bundle of $k$th order}
\subsection{Bundle of germs of diffeomorphisms}
Let $\mathcal{D}(m)$ be the group of all germs of local diffeomorphisms of $\mathbb{R}^m$ at $0 \in \mathbb{R}^m$:
\begin{equation*}
\mathcal{D}(m) = \left\{ \langle \varphi\rangle_0 \mid
\varphi : (\mathbb{R}^m,0) \to (\mathbb{R}^m,0)
\text{ is a local diffeomorphism} \right\}
\end{equation*}
endowed with the operation of composition of germs:
$\langle \varphi_1 \rangle_0 \circ \langle \varphi_2 \rangle_0 =
\langle \varphi_1 \circ \varphi_2 \rangle_0$.
Now let us consider
\begin{equation*}
\mathcal{B}(M) = \left\{ \langle f \rangle_x \mid f : (M,x) \to (\mathbb{R}^n,0)
\text{ is a local diffeomorphism }\right\}
\end{equation*}
We have natural projection
\begin{equation*}
\pi : \mathcal{B}(M) \to M, \quad \pi(\langle f\rangle_x)=x,
\end{equation*}
and the natural right action of $\mathcal{D}(m)$ on $\mathcal{B}(M)$:
\begin{equation*}
\langle f\rangle_x \cdot \langle \varphi\rangle_0 = \langle \varphi^{-1} \circ f\rangle_x.
\end{equation*}
so $(\mathbb{B}(M),\pi,M)$ can be considered as a ``principal fiber bundle''.
Let $(U, u : U \to V \subset \mathbb{R}^m)$ be a coordinate map.
This map determines the ``trivialization'' of the bundle $\mathcal{B}(M)$ over $U$.
Let
\begin{equation*}
t_a : \mathbb{R}^m \to \mathbb{R}^m, \quad t_a(v) = v + a
\end{equation*}
be the parallel translation of $\mathbb{R}^m$.
Then
\begin{equation*}
\mathcal{U}: \pi^{-1}(U) \to U \times \mathcal{D}(m),
\quad
\langle f \rangle_x \to \left(x, \langle f \circ u^{-1} \circ t_{u(x)} \rangle_0\right).
\end{equation*}
gives us the required trivialization.
The inverse map is
\begin{equation*}
\mathcal{U}^{-1} : U \times \mathcal{D}(m) \to \pi^{-1}(U)
\quad
(x, \langle \varphi \rangle_0) \to \langle t_{-u(x)} \circ u \circ f^{-1} \rangle.
\end{equation*}
Now assume that we have two coordinate systems $(U,u)$ and $(U,\bar u)$ on $M$.
Then,
\begin{equation*}
\bar{\mathcal{U}} \circ \mathcal{U}^{-1} : U \times \mathcal{D}(m) \to U \times \mathcal{D}(m),
\quad (x, \varphi) \to (x, t_{-\bar u(x)} \circ \bar u \circ u^{-1} \circ t_{u(x)}).
\end{equation*}
so the gluing functions of the atlas of the ``principal bundle'' $(\mathcal{B}(M),\pi,M)$ constructed by an atlas $(U_\alpha,u_\alpha)$ of the manifold $M$ are
\begin{equation*}
g_{\beta\alpha} : U_{\alpha} \cap U_\beta \to \mathcal{D}(m), \quad
g_{\beta\alpha}(x) = t_{-u_\beta(x)} \circ u_\beta \circ u^{-1}_\alpha \circ t_{u_\alpha(x)}.
\end{equation*}
\begin{remark}
In what follows we will use unordered multiindices.
We denote by $\mathcal{I}(m)$ the set of all unordered multiindices
$I = \left\{ i_1 i_2 \dots i_k \right\}$, where $1 \le i_l \le m$, for all $l=\overline{1,k}$.
The number $k$ is called the length of the multiindex and is denoted by $|I|$.
Also, we set $I_k(m) = \left\{ I \in \mathcal{I}(m) \mid |I| = k \right\}$.
\end{remark}
\subsection{Differential group of $k$th order}
The $k$th order \emph{differential group} is the set of $k$-jets:
\begin{equation*}
D^k(m) = \left\{ j^k_0(\varphi) \mid
\varphi : (\mathbb{R}^m,0) \to (\mathbb{R}^m,0)
\text{ is a local diffeomorphism } \right\}.
\end{equation*}
On the set $D^k(m)$ consider the operation
\begin{equation}
D^k(m) \times D^k(m) \to D^k(m), \quad j^k_0(\varphi) \cdot j^k_0(\psi) = j^k(\varphi \circ \psi),
\label{eq:operation_in_kth_order_differential_group}
\end{equation}
then $(D^k(m),\cdot)$ is a group.
Denote $\varphi^k_I = \left.\partial_I\right|_0 \varphi^k$.
Then
\begin{equation}
\mathcal{C}^k : D^k(m) \to \mathbb{R}^N, \quad j^k_0(\varphi) \to \{\varphi^k_I\}
\label{eq:coordinates_on_kth_order_differential_group}
\end{equation}
in a one-to-one map of $D^k(m)$ onto the open set in $\mathbb{R}^N$ determined by the inequality
$\det\|\varphi^k_i\| \ne 0$.
In this way we get globally defined coordinates on $D^k(m)$ which will be called \emph{natural coordinates}.
With respect to the natural coordinates the product \eqref{eq:operation_in_kth_order_differential_group} is written in terms of polynomials, therefore is a smooth map.
Thus $D^k(m)$ is a Lie group.
\subsection{Bundle of $k$th order holonomic coframes}
For an $m$-dimensional manifold $M$ consider the set
\begin{equation*}
B^k(M) = \left\{ j^k_x{f} \mid f : (M,x) \to (\mathbb{R}^m,0)
\text{ is a local diffeomorphism }\right\}
\end{equation*}
whose elements are called \emph{$k$-coframes} or \emph{coframes of order $k$} of the manifold $M$.
We have the projection
\begin{equation*}
\pi^k : B^k(M) \to M, \quad \pi(j_x(f))=x.
\end{equation*}
On the set $B^k(M)$ we have the right $D^k(m)$-action:
\begin{equation*}
j^k_x(f) \cdot j^k_0(\varphi) = j^k_0(\varphi^{-1} \circ f).
\end{equation*}
and one can easily prove that this action is free and its orbits are the fibers of the projection $\pi$.
\subsubsection{Trivializing charts of $B^k(M)$. Gluing maps}
In what follows we set $t_a : \mathbb{R}^m \to \mathbb{R}^m$, $t_a(x) = x + a$, the parallel translation of $\mathbb{R}^m$ with respect to $a \in \mathbb{R}^m$.
Let $(U, u : U \to V \subset \mathbb{R}^m)$ be a coordinate chart on $M$.
We have the one-to-one map
\begin{multline}
\mathcal{T}^k: (\pi^k)^{-1}(U) \to U \times D^k(m),
\\
j^k_x(f) \to \left(x, j^k_0(t_{-u(x)} \circ u \circ f^{-1}) \right).
\label{eq:trivialization_of_kth_order_coframe_bundle}
\end{multline}
The map $\mathcal{T}^k$ is $D^k(m)$-equivariant because
\begin{multline*}
\mathcal{T}^k( j^k_x f \cdot j^k_0\varphi) = \mathcal{T}^k \left(j^k_x (\varphi^{-1} \circ f)\right) =
\left(x, j^k_0(t_{-u(x)} \circ u \circ f^{-1} \circ \varphi)\right)=
\\
=\left(x, j^k_0(t_{-u(x)} \circ u \circ f^{-1}) \cdot j^k_0\varphi \right)=
\left(x, j^k_0(t_{-u(x)} \circ u \circ f^{-1}) \right) \cdot j^k_0\varphi.
\end{multline*}
Since $D^k(m)$ is a Lie group, the map $\mathcal{T}^k$ defines a trivializing chart for the map $\pi^k : B^k(M) \to M$.
Therefore, for each atlas $(U_\alpha,u_\alpha)$, we construct the atlas of trivializing charts
$(U_\alpha,\mathcal{T}^k_\alpha)$. Find the gluing maps for this atlas.
Assume that $(U_\alpha,u_\alpha)$, $(U_\beta,u_\beta)$ are two coordinate systems on $M$, and
$U_\alpha \cap U_\beta \ne \emptyset$.
Then
\begin{equation*}
j^k_0(t_{-u_\beta(x)} \circ u_\beta \circ f^{-1}) =
j^k_0(t_{-u_\beta(x)} \circ u_\beta \circ u_\alpha^{-1} \circ t_{u_\alpha(x)}) \cdot
j^k_0(t_{-u_\alpha(x)} \circ u_\alpha \circ f^{-1})
\end{equation*}
Therefore, the gluing maps are
\begin{equation}
g_{\beta\alpha} : U_\alpha \cap U_\beta \to D^k(m), \quad
g_{\beta\alpha}(x) =
j^k_0(t_{-u_\beta(x)} \circ u_\beta \circ u_\alpha^{-1} \circ t_{u_\alpha(x)})
\label{eq:gluing_maps_for_B_k}
\end{equation}
Since the gluing functions are smooth, we conclude that $\pi^k : B^k(M) \to M$ is a $D^k(m)$-principal bundle over $M$ which is called \emph{the bundle of $k$-coframes of $M$} or \emph{the bundle of coframes of order $k$ of $M$}.
For a coordinate chart $(U,u)$ there is defined a section
\begin{equation}
s : U \to B^k(M), \quad s(x) = j^k_x u,
\label{eq:natural_sections_of_B_k}
\end{equation}
which is called the \emph{natural $k$-coframe field} associated with a coordinate chart $(U_\alpha,u_\alpha)$.
\subsubsection{Natural coordinates on $B^k(M)$}
If $(U,u)$ is a coordinate chart on $M$, and $\mathcal{T}^k$ is the corresponding trivialization of $B^k(M)$.
Then
\begin{equation}
(u \times \mathcal{C}^k) \circ \mathcal{T}^k : (\pi^k)^{-1}(U) \to \mathbb{R}^m \times \mathbb{R}^N
\end{equation}
gives \emph{natural local coordinates on} $B^k(M)$.
The section $s : U \to B^k(M)$ \eqref{eq:natural_sections_of_B_k} is written with respect to this coordinate system as follows:
\begin{equation}
s(u^k) = (u^k, \delta^k_i, 0).
\label{eq:natural_section_wrt_natural_coordinates}
\end{equation}
\begin{remark}
We have the natural projections $\pi^k_l : B^k(M) \to B^l(M)$, $k \ge l$, which are, in turn, principal fiber bundles with the group $H^k_l$ which is the kernel of the natural homomorphism $D^k(m) \to D^l(m)$.
\end{remark}
\subsection{Case $k=1$}
The Lie group $D^1(m) \cong GL(m)$ and $B^1(M)=B(M)$ is the coframe bundle of $M$.
\subsection{Case $k=2$}
\subsubsection{The group $D^2(m)$}
Elements of the group $D^2(m)$ are the $2$-jets of germs $\varphi \in \mathcal{D}(m)$. The coordinate system
\eqref{eq:coordinates_on_kth_order_differential_group} in this case is
\begin{equation}
j^2_0\varphi \longrightarrow (\varphi^k_i, \varphi^k_{ij}), \text{ where }
\varphi^k_i = \frac{\partial \varphi^k}{\partial u^i}(0),
\quad \varphi^k_{ij} = \frac{\partial^2 \varphi^k}{\partial u^i \partial u^j}(0).
\end{equation}
Here $u^i$ are coordinates on $\mathbb{R}^m$, and it is clear that $\varphi^k_{ij} = \varphi^k_{ji}$.
From this follows that $\dim D^2(m) = m^2 + m^2(m+1)/2$.
Now, if
$j^2_0\varphi \rightarrow (\varphi^k_i, \varphi^k_{ij})$, $j^2_0\psi \rightarrow (\psi^k_i, \psi^k_{ij})$, and
\begin{equation*}
j^2_0\psi \cdot j^2_0\varphi = j^2_0(\psi\circ\varphi) \rightarrow (\eta^k_i, \eta^k_{ij}),
\end{equation*}
by the chain rule we get that
\begin{equation}
\eta^k_i = \psi^k_s \varphi^s_i, \quad \eta^k_{ij} = \psi^k_{pq} \varphi^p_i \varphi^q_j + \psi^k_s \varphi^s_{ij}
\label{eq:product_D_2}
\end{equation}
These formulas express the product in the group $D^2(m)$ in terms of the natural coordinates $(\varphi^k_i,\varphi^k_{ij})$.
\subsubsection{The bundle $B^2(M)$}
The elements of $B^2(M)$ are the $2$-jets of local diffeomorphisms $f : (M,x) \to (\mathbb{R}^m,0)$.
The natural coordinates \eqref{eq:natural_section_wrt_natural_coordinates} in this case can be found as follows.
Let $(U,u)$ be a coordinate chart on $M$. Then, for any $j^2_x f$ with $x \in U$, the diffeomorphism
\begin{equation*}
f \circ u^{-1} : (\mathbb{R}^m, u(x)) \to (\mathbb{R}^m,0)
\end{equation*}
can be written as
$w^k=f^k(u^i)$, where $w^k$ are standard coordinates on $\mathbb{R}^m$, and $f^k(u^i(x)) = 0$.
Then take the inverse diffeomorphism $u^k = \widetilde{f}^k(w^i)$, and the local diffeomorphism $t_{-u(x)} \circ u \circ f^{-1}$ has the form $\widetilde{f}^k(w^i)-u^k(x)$.
Therefore, the natural coordinates of $j^k_x f$ induced by a coordinate chart $(U,u)$ on $M$ are
$(u^k,u^k_i,u^k_{ij})$, where
\begin{equation}
u^k_i = \frac{\partial\widetilde{f}^k}{\partial w^i}(0), \quad
u^k_{ij} = \frac{\partial^2\widetilde{f}^k}{\partial w^i \partial w^j}(0).
\label{eq:natural_coordinates_B_2}
\end{equation}
The derivatives of $\widetilde{f}$ at $0$ can be expressed in terms of the derivatives of $f$ at $u^k(x)$. If we denote
\begin{equation*}
f^k_i = \frac{\partial f^k}{\partial u^i}(u(x)),
f^k_{ij} = \frac{\partial^2 f^k}{\partial u^i \partial u^j}(u(x)).
\end{equation*}
then
\begin{equation*}
u^k_i = \widetilde{f}^k_i, \quad u^k_{ij} = - \widetilde{f}^k_s f^s_{lm} \widetilde{f}^l_i \widetilde{f}^m_j.
\end{equation*}
With respect to the natural coordinate system the $D^2(m)$-action is written as follows:
\begin{equation}
(u^k, u^k_i, u^k_{ij}) \cdot (\varphi^k_i, \varphi^k_{ij}) = (u^k, u^k_s \varphi^s_i,
u^k_{pq} \varphi^p_i \varphi^q_j + u^k_s \varphi^s_{ij}).
\label{eq:D_2(m)-action_wrt_natural_coordinates}
\end{equation}
Let us express the gluing maps in terms of the natural coordinates. If $(U,u)$ and $(U',u')$ are coordinate charts on $M$ sucht that $U \cap U' \ne \emptyset$, then from \eqref{eq:gluing_maps_for_B_k} it follows that the corresponding gluing map is
\begin{equation*}
g : U \cap U' \to D^2(m), g(x) = j^2_0(t_{-u'(x)} \circ u' \circ u^{-1} \circ t_{u(x)})
\end{equation*}
If the coordinate change $u' \circ u^{-1}$ is written as $v^k = v^{k}(u^i)$, then we have to take derivatives at $0$ of the map $v^{k}(u^i+u^i(x))-v^k(u^i(x))$, which are equal to the derivatives of the functions $v^k$ at $u^k(x)$. Therefore,
\begin{equation}
g : U \cap U' \to D^2(m), \quad
g(x) =
\left(\frac{\partial v^k}{\partial u^i}(u(x)), \frac{\partial v^k}{\partial u^i \partial u^j}(u(x))\right).
\label{eq:gluing_maps_for_B_2}
\end{equation}
Therefore, \emph{the bundle $B^2(M) \to M$ is the $D^2(m)$-principal bundle with gluing maps \eqref{eq:gluing_maps_for_B_2}}.
\section{First prolongation of a $G$-structure}
\subsection{First prolongation of an integrable $G$-structure}
Let $P(M) \to M$ be an integrable $G$-structure, that is a subbundle of $B(M)$ such that there exists an atlas
$\mathcal{A} = (U_\alpha,u_\alpha)$ such that the natural coframes of the atlas are sections of $P(M)$, or equivalently,
the coordinate change $u^{k'} = u^{k'}(u^i)$ has the property that $\|\frac{\partial u^{k'}}{\partial u^k}\| \in G$.
In this case, we can specify the set $\mathcal{B}_G$ of local diffeomorphisms $f : (M,x) \to (\mathbb{R}^m,0)$ such that for each coordinate map $u$ of the atlas $\mathcal{A}$, the local diffeomorphism $f \circ u^{-1}$ has the Jacobi matrix at in $G$ at all points of its domain.
It is clear that
\begin{equation}
P(M) = \left\{ j^1_x f \mid \| \frac{\partial (f \circ u^{-1})^k}{\partial u^i}|_{u(x)} \| \in G \right\}
\end{equation}
and consider
\begin{equation}
P^1(M) = \left\{ j^2_x f \mid f \in \mathcal{B}_G \right\}
\label{eq:first_holonomic_prolongation_of_P}
\end{equation}
\begin{remark}
Note that if $j^2_x f$ is an element of $P^1(M)$, then the Jacobi matrix
$\|\left.\frac{\partial (f\circ u^{-1})^k}{\partial u^i}\right|_{u(x)}\|$
is an element of $G$.
However, the converse is not true because, by definition, the Jacobi matrix belongs to $G$ for each point which implies conditions on the second derivatives of $f \circ u^{-1}$ (see below).
\end{remark}
In the same manner introduce the set $\mathcal{D}_G$ of local diffeomorphisms
$\varphi : (\mathbb{R}^m,0) \to (\mathbb{R}^m,0)$ whose Jacobi matrices are elements of $G$ at all points of their domains.
Consider the Lie subgroup of $D^2(m)$:
\begin{equation}
G^1 = \left\{ j^2_0 \varphi \in D^2(m) \mid \varphi \in \mathcal{D}_G \right\}
\label{eq:first_holonomic_prolongation_of_G}
\end{equation}
The Lie subgroup $G^1 \subset D^2(m)$ is called the \emph{first holonomic prolongation of the group} $G$.
The subset $P^1(M)$ is a submanifold of $B^2(M)$, and is the total space of a principal subbundle of $B^2(M) \to M$ with the subgroup $G^1 \subset D^2(m)$. This subbundle is called
the \emph{first holonomic prolongation of the integrable $G$-structure $P$}.
It is also clear that, for the atlas $\mathcal{A}$ the gluing map \eqref{eq:gluing_maps_for_B_2} takes values in the subgroup $G^1$. Therefore, \emph{an integrable $G$-structure defines reduction of the principal bundle $B^2(M)$ to the structure group $G^1 \subset D^2(m)$}.
\subsection{Algebraic structure of the Lie group $G^1$}
We have the surjective Lie group morphism
\begin{equation}
p^1 : G^1 \to G, \quad j^2_0 \varphi \mapsto j^1_0 \varphi.
\label{eq:surjective_morphism_G_1_to_G}
\end{equation}
Let us find the kernel of $p^1$. Let $\varphi : (\mathbb{R}^m,0) \to (\mathbb{R}^m,0)$ be an element of $\mathcal{D}_G$.
Then, the map $g(u^k) = \|\frac{\partial \varphi^k}{\partial u^i}\|$ takes values in $G$, and $g(0) = I \in G$. Therefore,
\begin{equation*}
\left.\frac{\partial^2\varphi}{\partial u^i\partial u^j}\right|_0 = \left.\frac{\partial g^k_i}{\partial u^j}\right|_0
\end{equation*}
is a linear map $t : \mathbb{R}^m \cong T_0 \mathbb{R}^m \to \mathfrak{g}(G) \subset \mathfrak{gl}(m)$
but with property that $t(u)v = t(v)u$. In other words, elements of $\ker p^1$ are tensors of type $(2,1)$ on $\mathbb{R}^m$ such that $t^k_{ij} \in \mathfrak{g}$ for each $i$, and $t^k_{ij} = t^k_{ji}$.
The vector space of such tensors is called the \emph{first prolongation of the Lie algebra $\mathfrak{g}$} and is denoted by $\mathfrak{g}^1$.
From this follows that $\ker p^1$ is a commutative Lie group.
Thus we have the exact sequence of Lie groups
\begin{equation*}
0 \to \mathfrak{g}^1 \to G^1 \to G \to e,
\label{eq:exact_splitting_sequence}
\end{equation*}
This sequence admits a splitting: for any $\|g^k_i\| \in G$ we take the diffeomorphism $\varphi^k(u^i) = g^k_i u^i$, which is evidently lies in $\mathcal{D}_G$, and set $s(j^1_0\varphi) = j^2_0\varphi$.
According to the group theory, the exact splitting sequence \eqref{eq:exact_splitting_sequence} determines the right action of $G$ on $\mathfrak{g}^1$: $R_g t = s(g^{-1}) \cdot t \cdot s(g)$, and $G^1$ is the extension of $G$ by the commutative group $\mathfrak{g}^1$ with respect to the action $R$.
This means that we have the group isomorphism
\begin{equation}
G^1 \to G \times \mathfrak{g}^1, \quad g^1 \mapsto (p^1(g^1), s\left( p^1( (g^1)^{-1})\right)g^1,
\label{eq:G_1_as_extension}
\end{equation}
therefore
\begin{equation}
G^1 \cong G \times \mathfrak{g}^1, \text{ and } (g_1,t_1) \cdot (g_2,t_2) = (g_1g_2, R(g_2)t_1 + t_2).
\label{eq:G_1_as_extension_1}
\end{equation}
Let us express this representation of $G^1$ in terms of the canonical coordinates.
We have $p^1(\varphi^k_i,\varphi^k_{ij})=\varphi^k_i$, and $s(\varphi^k_i) = (\varphi^k_i,0)$.
Hence follows that the isomorphism \eqref{eq:G_1_as_extension} is
\begin{equation*}
(\varphi^k_i, \varphi^k_{ij}) \to (g^k_i, t^k_{ij}) \text{ with }
g^k_i = \varphi^k_i, \ t^k_{ij} = \widetilde{\varphi}^k_s \varphi^s_{ij}.
\end{equation*}
Thus we get \emph{algebraic coordinates} $(g^k_i,t^k_{ij})$ on $G^1$ which are adopted to the algebraic structure of $G^1$.
The right action $R$ with respect to the canonical coordinates is written as follows (we use \eqref{eq:product_D_2}):
\begin{equation*}
(\widetilde{\varphi}^k_i, 0) \cdot (\delta^k_i, \varphi^k_{ij}) \cdot (\varphi^k_i,0) =
(\delta^k_i, \widetilde{\varphi}^k_s \psi^s_{lm} \varphi^l_i \varphi^m_j).
\end{equation*}
At the same time, the algebraic coordinates of the elements $(\varphi^k_i,0)$ and $(\delta^k_i,0)$ are the same, this means that they are $(g^k_i = \varphi^k_i, 0)$ and $(\delta^k_i, t^k_{ij} = \varphi^k_{ij})$.
Therefore, with respect to the algebraic coordinates the product of the group $G^1$ looks like
(see \eqref{eq:G_1_as_extension_1}):
\begin{equation}
(g^k_i, t^k_{ij}) \cdot (h^k_i, q^k_{ij}) = (g^k_s h^s_i, \widetilde{h}^k_s t^s_{pq} h^p_i h^q_j + q^k_{ij}).
\label{eq:product_G_1_wrt_algebraic_coordinates}
\end{equation}
\subsection{Algebraic coordinates on $B^2(M)$. Description of $P^1(M)$ with respect to algebraic coordinates}
One can easily see that $D^2(m) = (GL(m))^1$. Therefore, we can consider the algebraic coordinates on $D^2(m)$ which
give rise to the \emph{algebraic coordinates on the total space $B^2(M)$}.
Namely, if $(U,u)$ is a coordinate chart on $M$, and $(u^k, u^k_i, u^k_{ij})$ are the corresponding natural coordinates on $(\pi^2)^{-1}(U)$, for the \emph{algebraic coordinates} on $(\pi^2)^{-1}(U)$ we take
\begin{equation}
(u^k, p^k_i, p^k_{ij}) \text{ where } p^k_i = u^k_i, p^k_{ij} = \widetilde{u}^k_s u^s_{ij}.
\label{eq:algebraic_coordinates_on_B_2}
\end{equation}
In fact, we change coordinates on the second factor of $U \times D^2(m) \cong (\pi^2)^{-1}(U)$.
With respect to the algebraic coordinates the first prolongation $P^1$ of an integrable $G$-structure $P$ is described in the following way:
\begin{equation}
\left. P^{1} \right|_U = \left\{ (u^k, p^k_i, p^k_{ij}) \mid \|p^k_i\| \in G, \|p^k_{ij}\| \in \mathfrak{g}^1 \right\}.
\label{eq:P_1_wrt_algebraic_coordinates}
\end{equation}
If we have two coordinate charts of the atlas $\mathcal{A}$ on $M$ and $v^k = v^k(u^i)$ is the coordinate change, the corresponding gluing map \eqref{eq:gluing_maps_for_B_2} of trivializing charts of $P^1$ is written with respect to the algebraic coordinates as follows:
\begin{equation}
g : U \cap U' \to G^1, \quad
g(x) =
\left(\frac{\partial v^k}{\partial u^i}(u(x)), \frac{\partial u^k}{\partial v^s}(v(x)) \frac{\partial v^s}{\partial u^i \partial u^j}(u(x))\right).
\label{eq:gluing_map_of_P_2_wrt_algebraic_coordinates}
\end{equation}
\subsection{$P^1(M)$ in terms of $P(M)$}
\section{First prolongation of $G$-structure and associated bundles.}
Let us now consider the constructions of the previous section for cases $k=1$ and $k=2$.
The case $k=1$ is rather simple.
We have $D^1(m) \cong GL(m)$ and $B^1(M)$ is the coframe bundle of $M$,
because to each $j^1_x f$ we can put in correspondence the coframe $\{f^* du^i\}$ at $x \in M$.
So we will consider in details the case $k=2$.
\subsection{First prolongation of the Lie subgroup $G \subset GL(m)$}
\label{subsec:first_prolongation_of_lie_subgroup_GL(m)}
Now let us consider the set $\widetilde{D}^2(m) = (\varphi^k_i, \varphi^k_{ij})$, where $\|\varphi^k_i\|$ is an invertible matrix, and $\varphi^k_{ij}$ are not necessarily symmetric with respect to lower indices.
Then, the set $\widetilde{D}$ endowed with the operation $*$ given by \eqref{eq:product_D_2} is a Lie group called the \emph{nonholonomic differential group of second order}.
We will call it \emph{first prolongation of the group} $GL(m)$ and denote by $GL^{(1)}(m)$.
On the group $GL^{(1)}(m)$ we can introduce another coordinate system:
\begin{equation}
g^k_j = \varphi^k_j, \quad a^k_{ij} = \widetilde{\varphi}^k_s \varphi^s_{ij}
\label{eq:new_coorinates_differential_group}
\end{equation}
Then, with respect to these coordinates, by \eqref{eq:product_D_2}, we see that the product $*$ can be written as follows:
\begin{equation}
(g^k_i, a^k_{ij}) * (h^k_i, b^k_{ij}) = (g^k_s h^s_i, \widetilde{h}^k_s a^s_{pq} h^p_i h^q_j + b^k_{ij}).
\label{eq:product_new_coordinates}
\end{equation}
These formulas can be written in a matrix form. To do this, we consider $a^k_{ij}$ as a map $a : \mathbb{R}^m \to \mathfrak{gl}(m)$, $w^k \mapsto a^k_{ij}w^j$.
Then, \eqref{eq:product_new_coordinates} takes the form
\begin{equation}
(g,a)*(h,b) = (g h, ad h^{-1} a \circ h + b).
\label{eq:product_matrix_form}
\end{equation}
Therefore, the group
\begin{equation*}
GL^{(1)} \cong (GL(m)\times Hom_{\mathbb{R}}(\mathbb{R}^m,\mathfrak{gl}(m)),*),
\end{equation*}
where $*$ is defined in \eqref{eq:product_matrix_form}.
\begin{remark}
The vector space $Hom_{\mathbb{R}}(\mathbb{R}^m,\mathfrak{gl}(m))$ is a right $GL(m)$-module with respect to the action
\begin{equation}
\forall g \in GL(m), \quad a \in Hom_{\mathbb{R}}(\mathbb{R}^m,\mathfrak{gl}(m)), \quad g \cdot a = ad g^{-1} a g.
\label{eq:hom(R_m,gl(m))_is_a_right_GL(m)_module}
\end{equation}
The group $(GL(m)\times Hom_{\mathbb{R}}(\mathbb{R}^m,\mathfrak{gl}(m)),*)$ is the extension of the group $GL(m)$
with the right $GL(m)$-module $Hom_{\mathbb{R}}(\mathbb{R}^m,\mathfrak{gl}(m)),*)$. This fact motivates the definition of first prolongation for a subgroup $G \subset GL(m)$.
\end{remark}
\begin{remark}
The same considerations can be done for the holonomic jet group $D^2(m)$.
\end{remark}
\subsubsection{First prolongation of a Lie subgroup $G \subset GL(m)$}
The considerations of the previous subsection motivate
\begin{definition}
Let $G \subset GL(m)$ be a Lie subgroup. Then the \emph{first prolongation of G} is the group
\begin{equation}
G^{(1)} = G \times Hom_{\mathbb{R}}(\mathbb{R}^m,\mathfrak{g}(G))
\end{equation}
with product
\begin{equation*}
(g_1,a_1)(g_2,a_2) = (g_1 g_2, ad g_2^{-1} a_1 g_2 + a_2).
\end{equation*}
\end{definition}
\begin{remark}
In this case also the vector space $Hom_{\mathbb{R}}(\mathbb{R}^m,\mathfrak{g}(G))$ is a right $G$-module with respect to the action
\begin{equation}
\forall g \in G, \quad a \in Hom_{\mathbb{R}}(\mathbb{R}^m,\mathfrak{g}(G)), \quad g \cdot a = ad g^{-1} a g.
\label{eq:hom(R_m,g)_is_a_right_G_module}
\end{equation}
The group $(G\times Hom_{\mathbb{R}}(\mathbb{R}^m,\mathfrak{g}(G)),*)$ is the extension of the group $G$
with the right $G$-module $Hom_{\mathbb{R}}(\mathbb{R}^m,\mathfrak{g}(G)),*)$.
Therefore we have the short exact sequence of Lie groups:
\begin{equation*}
0 \to Hom_{\mathbb{R}}(\mathbb{R}^m,\mathfrak{g}(G)) \to G^{(1)} \overset{\pi}{\longrightarrow} G.
\end{equation*}
The Lie homomorphism $\pi: G^{(1)} \rightarrow G$ comes from the natural projection of 2-jets onto 1-jets.
\end{remark}
\subsubsection{Another coordinate system on $B^2(M)$}
Now recall that on $D^2(m)$ we can take another coordinate system $(g^k_i, a^k_{ij})$
(see \ref{eq:new_coorinates_differential_group}).
With respect to the coordinates $(g^k_i, a^k_{ij})$, the $D^2(m)$-action on $B^2(M)$ is written as
\begin{equation}
h^k_i = \widetilde{g}^k_s f^s_i, \quad
h^k_{ij} = \widetilde{g}^k_s f^s_{ij} -
a^k_{lm} \widetilde{g}^l_t\widetilde{g}^m_r f^t_i f^r_j.
\label{eq:action_of_D_2_wrt_coordinates_(g,a)}
\end{equation}
Let us find the expression for a trivialization
\begin{equation*}
(\pi^2)^{-1}(U) \to U \times D^2(m)
\end{equation*}
with respect to the coordinates $(g,a)$.
For this purpose we take a section $s(x^i) = (x^i,\delta^k_i,0)$ of $B^2(M)$ over $U$, in fact this is the natural frame of order $2$, this means that it consists of $2$-jets of the coordinate functions.
Then any point $b^2 = (x^i,f^k_i,f^k_{ij})$ can be written as $b^2 = s(x) \cdot (g,a)$, and the trivialization is given by
\begin{equation}
b^2 = (x^i,f^k_i,f^k_{ij}) \leftrightarrow (x, (g,a)).
\end{equation}
Using \eqref{eq:action_of_D_2_wrt_coordinates_(g,a)}, and the coordinate expression $s(x)$, we get
\begin{equation}
f^k_i = \widetilde{g}^k_i, \quad f^k_{ij} = - a^k_{lm} \widetilde{g}^l_i \widetilde{g}^m_j.
\end{equation}
Now let us write the gluing functions with respect to the coordinates $(p^k_i,p^k_{ij})$ on $B^2(M)$
and $(g^k_i,a^k_{ij})$.
To do this we use the coordinate change \eqref{eq:change_of_coordinates_f_to_p} on $B^2(M)$ and
\eqref{eq:new_coorinates_differential_group} on the group $D^2(M)$.
We have
\begin{equation*}
\begin{split}
p^k_i = \widetilde{f}^k_i, \quad p^k_{ij} = - f^k_{lm} \widetilde{f}^l_i \widetilde{f}^m_j
\\
\bar x^k_i = g^k_i, \quad \bar x^k_{ij} = g^k_s a^s_{ij}
\end{split}
\end{equation*}
and so,
\begin{equation}
f^k_i = \widetilde{p}^k_i, \quad f^k_{ij} = - p^k_{lm} \widetilde{p}^l_i \widetilde{p}^m_j
\label{eq:inverse_coordinate_transformation_B^2(M)}
\end{equation}
\begin{equation}
\begin{split}
& (x^k,f^k_i,f^k_{ij}) \to (x^k, (p^k_i,p^k_{ij}) ), \text{ where }
\\
&p^k_i = \widetilde{f}^k_i, \quad p^k_{ij} = - f^k_{lm} \widetilde{f}^l_i \widetilde{f}^m_j.
\end{split}
\label{eq:change_of_coordinates_f_to_p}
\end{equation}
Using these formulas, and \eqref{eq:action_of_D_2_wrt_coordinates_(g,a)}, we can write the $D^2(m)$-action with respect to the coordinates $(x^i,p^k_i,p^k_{ij})$.
If $(x^i,(p^k_i,p^k_{ij}))\cdot(g,a) = (x^i,(q^k_i,q^k_{ij}))$, then
\begin{equation}
q^k_i = p^k_s g^s_i
\quad
q^k_{ij} = \widetilde{g}^k_s p^s_{lm} g^l_i g^m_j + a^k_{ij}.
\label{eq:action_of_D_2_wrt_coordinates_p_and_(g,a)}
\end{equation}
So, as it should be, at the second argument we get the product of elements of the group $D^2(m)$ (cf.
\eqref{eq:product_new_coordinates}).
Hence, first of all from
we get that
\begin{equation}
\widetilde{\bar p}{}^k_s g^s_i = \widetilde{p}{}^k_i, \text{ and hence } \bar p^k_i = \widetilde{g}^k_s p^s_i.
\label{eq:first_order_coordinate_change}
\end{equation}
Now, from
with
\eqref{eq:inverse_coordinate_transformation_B^2(M)}, we get
\begin{equation*}
- \bar p^k_{rt}\, \widetilde{\bar p}{}^r_l\widetilde{\bar p}{}^t_m g^l_i g^m_j +
\widetilde{\bar p}{}^k_s g^s_t a^t_{ij} = f^k_{ij},
\end{equation*}
hence follows
\begin{equation*}
-\bar p^k_{lm} \widetilde{p}^l_i \widetilde{p}^m_j + \widetilde{p}^k_s a^s_{ij} = f^k_{ij} =
- p^k_{lm} \widetilde{p}^l_i \widetilde{p}^m_j.
\end{equation*}
Finally, we obtain
\begin{equation*}
\bar p^k_{lm} = p^k_{lm} + \widetilde{p}^k_t a^t_{lm} p^l_i p^m_j.
\end{equation*}
As the result, we get the following theorem.
\begin{theorem}
\label{}
\end{theorem}
Note that
\begin{equation}
x^k_i = \widetilde{\bar x^k_i},
\label{eq:correspondence_between_x_and_bar_x}
\end{equation}
With this notation, we have
\begin{equation}
\bar f^k_i =
\label{eq:change_coordinates_B_2(M)}
\end{equation}
\section{Prolongation of $G$-structure}
\subsection{First prolongation of integrable $G$-structure}
Let $P(M,G)$ be an integrable $G$-structure, this means that there exists an atlas $(U_\alpha,u_\alpha)$ such that $\left\{ \frac{\partial}{\partial u_\alpha} \right\}$ are the sections of $P$.
A first prolongation of $P(M,G)$ is the subbundle in $B^2(M)$ with the total space
\begin{equation}
P^1(M) = \left\{ j^2_x f \mid \left( \frac{\partial (f \circ u^{-1})^k}{\partial u^i}|_{u(x)} \right) \in G \right\}
\end{equation}
and the structure group $G^1 \subset D^2(m)$ (the holonomic prolongation of $G$):
\begin{equation}
G^1 = \left\{ j^2_0 \varphi \in D^2(m) \mid \left(\frac{\partial\varphi^k}{\partial u^i}\right) \in G \right\}.
\end{equation}
The product in $G^1$ is induced by the chain rule: if
$j^k_0 \varphi = (\varphi^k_i, \varphi^k_{i_1i_2}, \dots, \varphi^k_{i_1,\dots,i_k})$
$j^k_0 \psi = (\psi^k_i, \psi^k_{i_1i_2}, \dots, \psi^k_{i_1,\dots,i_k})$,
then
\begin{equation}
\eta^k_i = \psi^k_s \varphi^s_i,
\quad
\eta^k_{ij} = \psi^k_{pq} \varphi^p_i \varphi^q_j + \psi^k_s \varphi^s_{ij}, \dots
\end{equation}
\subsection{Prolongation of $G$-structure}
\subsubsection{Nonholonomic jet bundle}
A $k$-th order nonholonomic jet is a ``nonsymmetric'' Teylor series and is defined by coordinates:
\begin{equation}
f^j_0 = \left( f^j, f^j_i, f^j_{i_1 i_2}, \dots, f^j_{i_1,i_2,\dots,i_k} \right),
\end{equation}
where $f^j_{i_1 \dots i_j}$ are not supposed to be symmetric with respect to lower indices.
\subsection{Nonholonomic differential group}
The nonholonomic $k$-th order differential group is
$D^{(k)}(m) = \{\varphi^k_0 = \left( \varphi^j, \varphi^j_i, \varphi^j_{i_1 i_2}, \dots, \varphi^j_{i_1,i_2,\dots,i_k} \right) \mid \det \left(\varphi^j_i\right) \ne 0\}$,
and the product is also ``induced by the chain rule'':
if $\eta^k_0 = \psi^k_0 \cdot \varphi^k_0$ and
$\varphi^k_0 = (\varphi^j_i, \varphi^j_{i_1i_2}, \dots, \varphi^j_{i_1,\dots,i_k})$
$\psi^k_0 = (\psi^j_i, \psi^j_{i_1i_2}, \dots, \psi^j_{i_1,\dots,i_k})$,
then
$\eta^j_i = \psi^j_s \varphi^s_i$, $\eta^j_{i_1i_2} = \psi^j_{p_1p_2} \varphi^{p_1}_{i_1} \varphi^{p_2}_{i_2} + \psi^j_s \varphi^s_{i_1i_2}$, \dots
It is clear that $D^k(m)$ is a Lie subgroup of $D^{(k)}(m)$, so we have the left $D^k(m)$-action on $D^{(k)}(m)$.
A \emph{nonholonomic $k$-th order jet bundle} is the locally trivial bundle over $M$ with the fiber $D^{(k)}(m)$
associated with the $D^k(m)$-principal bundle $B^k(M)$ with respect to the left $D^k(m)$-action on $D^{(k)}(m)$.
\subsection{First prolongation of arbitrary $G$-structure}
Let $P(M,G)$ be a $G$-structure.
A \emph{first prolongation of $P(M,G)$} is the subbundle in $B^{(2)}(M)$ with the total space
\begin{equation}
P^{(1)}(M) = \left\{ f^2_0 \in B^{(2)}(M) \mid \left( f^j_i \right) \in G \right\}
\end{equation}
and the structure group $G^{(1)} \subset D^{(2)}(m)$ (the nonholonomic prolongation of $G$):
\begin{equation}
G^{(1)} = \left\{ \varphi^2_0 \in D^2(m) \mid \left(\varphi^k_i\right) \in G \right\}.
\end{equation}
\section{First prolongation of $G$-structure in terms of coframe bundle}
The first prolongation $P^{(1)}$ of $P$ can be expressed in terms of $P$ in the following ways:
\begin{equation}
P^{(1)} = \{p^1 : \mathbb{R}^m \to T_p P | \theta_p \alpha = 1_{\mathbb{R}^m} \},
\label{eq:P_1_1}
\end{equation}
or
\begin{equation}
P^{(1)} = \{\omega : T_p P \to \mathfrak{g} | \omega \sigma_p = 1_{\mathfrak{g}} \}
\label{eq:P_1_2}
\end{equation}
or
\begin{equation}
P^{(1)} = \left\{ H_p \mid H_p \oplus V_p = T_p P\right\}.
\label{eq:P_1_3}
\end{equation}
We will mainly use the first representation \eqref{eq:P_1_1}, but note that the third representation \eqref{eq:P_1_2}
says that, geometrically, $P^1$ consists of tangent subspaces transversal to vertical subspaces, i.\,e, of connections.
The projection $\pi^1_0 : P^1 \to P$, is defined in terms of \eqref{eq:P_1_1} as follows $(p^1 : \mathbb{R}^m \to T_p P) \mapsto b$.
\begin{theorem}[Algebraic structure of $G^{(1)}$]
$G^{(1)}$ is isomorphic to the extension of $G$ via the $G$-module $\mathcal{L}(\mathbb{R}^n,\mathfrak{g})$:
\begin{equation}
G^{(1)} = G \times \mathcal{L}(\mathbb{R}^n,\mathfrak{g}), \quad (g_1,a_1)*(g_2,a_2) = (g_1 g_2, ad g_2^{-1} a_1 + a_2).
\end{equation}
\end{theorem}
Action of $G^{(1)}$ on $P^{(1)}$ is described in the following way:
for $b^1 : \mathbb{R}^m \to T_b P$, and $g^1=(g,a) \in G^1$:
\begin{equation}
R^{(1)}_{g^1} b^1 = dR_g (b^1 \circ g) + \sigma_{pg} \circ a \circ g
\end{equation}
\section{First prolongation of equivariant map}
\subsection{First prolongation of a $G$-space}
Let $V$ be a manifold, then the first prolongation of $V$ is
\begin{equation}
V^{(1)} = \left\{ v^1 : \mathbb{R}^m \to T_v V \mid v \in V \right\},
\end{equation}
Let $\rho : G \times V \to V$ be a left action, then the first prolongation of $\rho$ is
\begin{equation}
\rho^{(1)} : G^{(1)} \times V^{(1)} \to V^{(1)}, \quad
\rho^{(1)}(g^1,v^1) = [dL_g \circ v^1 + \sigma_{gv}\circ A] \circ g^{-1}.
\end{equation}
\subsection{Prolongation of equivariant map}
$f : P \to V$ is an equivariant map.
Prolongation of $f$ is $f^{(1)} : P^{(1)} \to V^{(1)}$, $f^{(1)}(b^1) = df_{\pi^1(b^1)} \circ b^1$.
An equivariant map $f : P \to V$ determines a section $s : M \to E$, where $\pi_E : E \to M$ is a bundle with standard fiber $V$ associated with $P$.
$f^1 : P^1 \to V^1$ maps $b^1 \in P^1$ to the coordinates of $(s(\pi(b)),(\nabla s)(\pi(b)))$ with respect to $b$, where $\nabla$ corresponds to $b^1$.
\begin{example}[Simple example: vector field on $\mathbb{R}^n$]
$V$ is a vector field on $\mathbb{R}^m$.
The corresponding equivariant map is
\begin{equation}
f : B(\mathbb{R}^m) \to \mathbb{R}^m, b = \{\eta^a\}, f(b) = \{\eta^a(V(\pi(b)))\}.
\end{equation}
Then the first prolongation of $f$ is defined as follows.
If $b^{(1)} : e_i \in \mathbb{R}^m \to
\frac{\partial}{\partial x^i} + \Gamma^k_{ij} \frac{\partial}{\partial x^k_j}$,
then
\begin{equation}
f^{(1)} (b^{(1)}) = (V, \nabla(\omega) V) = (V^i, \partial_j V^i + \Gamma^k_{js} V^s).
\end{equation}
Action of $G^1$ is written as follows
\begin{equation}
(g,a)(V,\nabla(\omega) V) = (\tilde g^k_i V^i, \tilde g^k_s \nabla_m V^s g^m_i + \tilde g^k_s a^s_{mj} V^j g^m_i),
\end{equation}
At $x_0$ such that $V(x_0) = 0$, we get the action
\begin{equation}
(g,a) (0,\nabla(\omega) V) = (0, \tilde g^k_s \nabla_m V^s g^m_i) = (0, \tilde g^k_s \partial_m V^s g^m_i)
\end{equation}
Therefore, the prolonged action coincides with the action of the group $GL(n)$ on the vector space of $n\times n$-matrices by conjugation. The invariants of this action are well known, for example, these are
the trace and the determinant.
Therefore, to find the invariants of a zero $x_0$ of a vector field $V$ we have to
find the matrix $[\partial_i V^j(x_0)]$ and then write the invariants of this matrix under the conjugation, for example, one of them is $\det [\partial_i V^j(x_0)]$.
\end{example}
\end{document} |
\begin{document}
\def\paragraph{Proof.}{\paragraph{Proof.}}
\def
$\square${
$\square$}
\def
$\square${
$\square$}
\def{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}{{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}}
\def{\mathbb Q}}\def\R{{\mathbb R}} \def\E{{\mathbb E}{{\mathbb Q}}\def\R{{\mathbb R}} \def\E{{\mathbb E}}
\def{\mathbb P}{{\mathbb P}}
\def{\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G}{{\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G}}
\def{\cal A}}\def\cd{{\rm cd}}\def\mf{{\rm mf}{{\cal A}}\def\cd{{\rm cd}}\def\mf{{\rm mf}}
\def{\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}{{\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}}
\def{\cal H}}\def\Ker{{\rm Ker}{{\cal H}}\def\Ker{{\rm Ker}}
\title{\bf{Artin groups of spherical type up to isomorphism}}
\author{
\textsc{Luis Paris}}
\date{\today}
\maketitle
\begin{abstract}
We prove that two Artin groups of spherical type are isomorphic if and only if their defining
Coxeter graphs are the same.
\end{abstract}
\noindent
{\bf AMS Subject Classification:} Primary 20F36.
\section{Introduction}
Let $S$ be a finite set. Recall that a {\it Coxeter matrix} over $S$ is a matrix
$M=(m_{s\,t})_{s,t \in S}$ indexed by the elements of $S$ such that $m_{s\,s}=1$ for all $s \in
S$, and $m_{s\,t}=m_{t\,s} \in \{2, 3, 4, \dots, +\infty\}$ for all $s,t \in S$, $s \neq t$. A
Coxeter matrix $M=(m_{s\,t})$ is usually represented by its {\it Coxeter graph}, $\Gamma$, which
is defined as follows. The set of vertices of $\Gamma$ is $S$, two vertices $s,t$ are joined by
an edge if $m_{s\,t}\ge 3$, and this edge is labelled by $m_{s\,t}$ if $m_{s\,t} \ge 4$. For $s,t
\in S$ and $m \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}_{\ge 2}$, we denote by $w(s,t:m)$ the word $sts \dots$ of length $m$. The {\it Artin
group} associated to $\Gamma$ is defined to be the group $G=G_\Gamma$ presented by
$$
G= \langle S\ |\ w(s,t:m_{s\,t}) = w(t,s:m_{s\,t})\text{ for } s,t \in S,\ s \neq t\text{ and }
m_{s\,t} < +\infty \rangle\,.
$$
The {\it Coxeter group} $W=W_\Gamma$ associated to $\Gamma$ is the quotient of $G$ by the
relations $s^2=1$, $s\in S$. We say that $\Gamma$ (or $G$) is of {\it spherical type} if $W$ is
finite, that $\Gamma$ (or $G$) is {\it right-angled} if $m_{s\,t} \in \{2, +\infty \}$ for all
$s,t \in S$, $s \neq t$, and that $G$ (or $W$) is {\it irreducible} if $\Gamma$ is connected. The
number $n=|S|$ is called the {\it rank} of $G$ (or of $W$).
One of the main question in the subject is the classification of Artin groups up to isomorphism
(see \cite{Bes2}, Question 2.14). This problem is far from being completely solved as Artin groups
are poorly understood in general.
For example, we do not know whether all Artin groups are torsion free, and we do not know
any general solution to the word problem for these groups. The only known results concerning this
classification question are contained in a work by Brady, McCammond, M\"uhlherr, and Neumann \cite{BMMN},
where the authors determine a sort of transformation on Coxeter graphs which does not change the
isomorphism class of the associated Artin groups, and a work by Droms \cite{Dro}, where it is proved
that, if $\Gamma$ and $\Omega$ are two right-angled Coxeter graphs whose associated Artin groups
are isomorphic, then $\Gamma= \Omega$. Notice that an Artin group is biorderable if and only if
it is right-angled, hence a consequence of Droms' result is that, if $\Gamma$ is a right-angled
Coxeter graph and $\Omega$ is any Coxeter graph, and if the Artin groups associated to $\Gamma$
and $\Omega$ are isomorphic, then $\Gamma=\Omega$. The fact that right-angled Artin groups are
biorderable is proved in \cite{DuTh}. In order to show that the remainig Artin groups are not
biorderable, one has only to observe that, if $2<m_{s\,t}< +\infty$, then $(st)^{m_{s\,t}} =
(ts)^{m_{s\,t}}$ and $st \neq ts$, and that, in a biorderable group, two distinct elements cannot
have a common $m$-th power for a fixed $m$.
In this paper we answer the classification question in the restricted framework of spherical type
Artin groups. More precisely, we prove the following.
\begin{thm}
Let $\Gamma$ and $\Omega$ be two spherical type Coxeter graphs, and let $G$ and $H$ be the Artin
groups associated to $\Gamma$ and $\Omega$, respectively. If $G$ is isomorphic to $H$, then
$\Gamma=\Omega$.
\end{thm}
\noindent
{\bf Remark.} I do not know whether a non spherical type Artin group can be isomorphic to a
spherical type Artin group.
Artin groups were first introduced by Tits \cite{Tit2} as extensions of Coxeter groups. Later,
Brieskorn \cite{Bri1} gave a topological interpretation of the Artin groups of spherical type in
terms of complements of discriminantal varieties.
Define a (real) {\it reflection group} of rank $n$ to be a finite subgroup $W$ of $GL(n,\R)$
generated by reflections. Such a group is called {\it essential} if there is no non-trivial
subspace of $\R^n$ on which $W$ acts trivially. Let ${\cal A}}\def\cd{{\rm cd}}\def\mf{{\rm mf}$ be the set of reflecting hyperplanes of
$W$, and, for $H \in {\cal A}}\def\cd{{\rm cd}}\def\mf{{\rm mf}$, let $H_\C$ denote the complexification of $H$, {\it i.e.} the complex
hyperplane in $\C^n$ defined by the same equation as $H$. Then $W$ acts freely on $M(W)= \C^n
\setminus \cup_{H \in {\cal A}}\def\cd{{\rm cd}}\def\mf{{\rm mf}} H_\C$, and, by \cite{Che}, $N(W)= M(W)/W$ is the complement in $\C^n$
of an algebraic variety, $D(W)$, called {\it discriminantal variety} of type $W$. Now, take
a spherical type Coxeter graph $\Gamma$, and consider the associated Coxeter group
$W=W_\Gamma$. By \cite{Cox}, the group $W$ can be represented as an essential reflection group in
$GL(n,\R)$, where $n=|S|$ is the rank of $W$, and, conversely, any essential reflection group of
rank $n$ can be uniquely obtained in this way. By \cite{Bri1}, $\pi_1(N(W))$ is the Artin group
$G=G_\Gamma$ associated to $\Gamma$.
So, a consequence of Theorem 1.1 is that $\pi_1(N(W))= \pi_1( \C^n \setminus D(W))$ completely
determines the reflection group $W$ as well as the discriminantal variety $D(W)$.
Since the work of Brieskorn and Saito \cite{BrSa} and that of Deligne \cite{Del}, the
combinatorial theory of spherical type Artin groups has been well studied. In particular, these
groups are know to be biautomatic (see \cite{Cha2}, \cite{Cha}), and torsion-free. This last
result is a direct consequence of \cite{Del} and \cite{Bri1}, it is explicitely proved in
\cite{Deh}, and it shall be of importance in the remainder of the paper.
The first step in the proof of Theorem 1.1 consists of calculating some invariants for
spherical type Artin groups (see Section 3). It actually happens that these invariants separate
the irreducible Artin groups of spherical type (see Proposition 5.1). Afterwards,
for a given isomorphism $\varphi: G \to H$ between spherical type Artin groups, we
show that, up to some details, $\varphi$ sends each irreducible component of $G$ injectively into
a unique irreducible component of $H$, and that both components have the same invariants. In
order to do that, we first need to show that an irreducible Artin group $G$ cannot be decomposed
as a product of two subgroups which commute, unless one of these subgroups lies in the center of
$G$ (see Section 4).
From now on, $\Gamma$ denotes a spherical type Coxeter graph, $G$ denotes its associated Artin
group, and $W$ denotes its associated Coxeter group.
\noindent
{\bf Acknowledgments.} The idea of looking at centralizers of ``good'' elements in the
proof of Proposition 4.2 is a suggestion of Benson Farb. I am grateful to him for this clever idea
as well as for all his useful conversations. I am also grateful to Jean Michel who pointed out to
me his work with Michel Brou\'e, and to John Crisp for so many discussions on everything
concerning this paper.
\section{Preliminaries}
We recall in this section some well-known results on Coxeter groups and Artin groups.
For a subset $X$ of $S$, we denote by $W_X$ the subgroup of $W$ generated by $X$, and by $G_X$ the
subgroup of $G$ generated by $X$. Let $\Gamma_X$ be the full Coxeter subgraph of $\Gamma$ whose
vertex set is $X$. Then $W_X$ is the Coxeter group associated to $\Gamma_X$ (see \cite{Bou}), and $G_X$
is the Artin group associated to $\Gamma_X$ (see \cite{Lek} and \cite{Par1}). The subgroup $W_X$ is
called {\it standard parabolic subgroup} of $W$, and $G_X$ is called {\it standard parabolic
subgroup} of $G$.
For $w \in W$, we denote by ${\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (w)$ the word length of $w$ with respect to $S$. The group $W$ has
a unique element of maximal length, $w_0$, which satisfies $w_0^2=1$ and $w_0 S w_0 =S$, and whose
length is $m_1 + \dots + m_n$, where $m_1, m_2, \dots, m_n$ are the exponents of $W$.
The connected spherical Coxeter graphs are exactly the graphs $A_n$ ($n \ge 1$), $B_n$ ($n\ge 2$),
$D_n$ ($n\ge 4$), $E_6$, $E_7$, $E_8$, $F_4$, $H_3$, $H_4$, $I_2(p)$ ($p \ge 5$) represented in
\cite{Bou}, Ch. IV, \S 4, Thm. 1. (Here we use the notation $I_2(6)$ for the Coxeter graph $G_2$.
We may also use the notation $I_2(3)$ for $A_2$, and $I_2(4)$ for $B_2$.)
Let $F:G \to W$ be the natural epimorphism which sends $s$ to $s$ for all $s \in S$. This
epimorphism has a natural set-section $T: W \to G$ defined as follows. Let $w \in W$, and let
$w=s_1s_2 \dots s_l$ be a reduced expression of $w$ ({\it i.e.} $l={\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G}(w)$). Then $T(w)=s_1s_2
\dots s_l \in G$. By Tits' solution to the word problem for Coxeter groups \cite{Tit}, the
definition of $T(w)$ does not depend on the choice of the reduced expression.
Define the {\it Artin monoid} associated to $\Gamma$ to be the (abstract) monoid $G^+$ presented by
$$
G^+= \langle S\ |\ w(s,t: m_{s\,t}) = w(t,s: m_{s\,t}) \text{ for } s\neq t \text{ and } m_{s\,t}
<+\infty \rangle^+\,.
$$
By \cite{BrSa}, the natural homomorphism $G^+ \to G$ which sends $s$ to $s$ for all $s \in S$ is
injective. Note that this fact is always true, even if $\Gamma$ is not assumed to be of spherical
type (see \cite{Par2}).
The {\it fundamental element} of $G$ is defined to be $\Delta= T(w_0)$, where $w_0$ denotes the
element of $W$ of maximal length. For $X \subset S$,
We denote by $w_X$ the element of $W_X$ of maximal length,
and by $\Delta_X=T(w_X)$ the fundamental element of $G_X$.
The defining relations of $G^+$ being homogeneous, we can define two partial orders $\le_L$ and
$\le_R$ on $G^+$ as follows.
$\bullet$ We set $a \le_L b$ if there exists $c \in G^+$ such that $b=ac$.
$\bullet$ We set $a \le_R b$ if there exists $c \in G^+$ such that $b=ca$.
Now, the following two propositions are a mixture of several well-known results from \cite{BrSa}
and \cite{Del}.
\begin{prop}
(1) $G^+$ is cancellative.
(2) $(G^+, \le_L)$ and $(G^+, \le_R)$ are lattices.
(3) $\{a \in G^+; a\le_L \Delta\} = \{a \in G^+; a\le_R \Delta \} = T(W)$.
$\square$
\end{prop}
Note that the fact that $G^+$ is cancellative is true even if $\Gamma$ is not of spherical
type (see \cite{Mic}). The elements of $T(W)$ are called {\it simple elements}. We shall denote
the lattice operations of $(G^+, \le_L)$ by $\vee_L$ and $\wedge_L$,
and the lattice operations of $(G^+, \le_R)$ by $\vee_R$ and $\wedge_R$.
Define the {\it quasi-center} of $G$ to be the subgroup
$QZ(G)=\{ a \in G; aSa^{-1} = S \}$.
\begin{prop}
Assume $\Gamma$ to be connected.
(1) For $X \subset S$ we have
$$
\vee_L \{s; s\in X\} = \vee_R \{s; s\in X\} = \Delta_X\,.
$$
In particular,
$$
\vee_L \{s; s\in S\} = \vee_R \{s; s\in S\} = \Delta\,.
$$
(2) There exists a permutation $\mu: S \to S$ such that $\mu^2= \Id$ and $\Delta s = \mu(s)
\Delta$ for all $s \in S$.
(3) The quasi-center $QZ(G)$ of $G$ is an infinite cyclic subgroup generated by $\Delta$.
(4) The center $Z(G)$ of $G$ is an infinite cyclic subgroup of $G$ generated either by $\delta=
\Delta$ if $\mu=\Id$, or by $\delta= \Delta^2$ if $\mu\neq \Id$.
$\square$
\end{prop}
The generator $\delta$ of $Z(G)$ given in the above proposition shall be
called the {\it standard generator} of $Z(G)$. Note also that the assumption ``$\Gamma$ is
connected'' is not needed in (1) and (2). Let $\Gamma$ be connected. Then $\mu \neq \Id$ if and
only if $\Gamma$ is either $A_n$, $n\ge 2$, or $D_{2n+1}$, $n\ge 2$, or $E_6$, or $I_2(2p+1)$,
$p\ge 2$ (see \cite{BrSa}, Subsection 7.2).
Now, the following result can be found in \cite{Cha}.
\begin{prop}[Charney \cite{Cha}]
Each $a \in G$ can be uniquely written as $a=bc^{-1}$ where $b,c \in G^+$ and $b \wedge_R c =1$.
$\square$
\end{prop}
The expression $a=bc^{-1}$ of the above proposition shall be called the {\it Charney form} of $a$.
An easy observation shows that, if $s_1s_2 \dots s_l$ and $t_1 t_2 \dots t_l$ are two positive
expressions of a same element $a \in G^+$, then the sets $\{s_1, \dots, s_l\}$ and $\{t_1, \dots,
t_l\}$ are equal. In particular, if $a \in G_X^+$, then all the letters that appear in any
positive expression of $a$ lie in $X$. A consequence of this fact is the following.
\begin{lemma}
Let $X$ be a subset of $S$, let $a\in G_X$, and let $a=bc^{-1}$ be the Charney form of $a$ in $G$.
Then $b,c \in G_X^+$ and $a=bc^{-1}$ is the Charney form of $a$ in $G_X$.
\end{lemma}
\paragraph{Proof.}
Let $\vee_{X,R}$ and $\wedge_{X,R}$ denote the lattice operations of $(G_X^+,\le_R)$. The above
observation shows that, if $a\le_R b$ and $b \in G_X^+$, then $a \in G_X^+$. This implies that $b
\wedge_{X,R} c = b \wedge_R c$ for all $b,c \in G_X^+$. Now, let $a \in G_X$ and let $a=bc^{-1}$
be the Charney form of $a$ in $G_X$. We have $b,c \in G_X^+ \subset G^+$ and $b \wedge_R c = b
\wedge_{X,R} c = 1$, thus $a=bc^{-1}$ is also the Charney form of $a$ in $G$.
$\square$
\begin{corollary}
Let $X$ be a subset of $S$. Then $G_X \cap G^+ = G_X^+$.
$\square$
\end{corollary}
\begin{corollary}
Let $X$ be a subset of $S$, $X \neq S$. Then $G_X \cap \langle \Delta \rangle = \{ 1 \}$.
\end{corollary}
\paragraph{Proof.}
Take $s \in S \setminus X$. By Proposition 2.2, we have $s \le_R \Delta$, thus $\Delta \not \in
G_X^+ = G_X \cap G^+$.
$\square$
\section{Invariants}
The purpose of the present section is to calculate some invariants of the spherical type Artin
groups.
The first invariant that we want to calculate is the cohomological dimension, denoted by $\cd
(G)$. We assume the reader to be familiar with this notion, and we refer to \cite{Bro} for
definitions and properties. Our result is the following.
\begin{prop}
Let $n=|S|$ be the rank of $G=G_\Gamma$. Then $\cd (G)=n$.
\end{prop}
\paragraph{Proof.}
Recall the spaces $M(W)$ and $N(W)$ defined
in the introduction. Recall also that $\pi_1(N(W))=G$, that $W$ acts freely on $M(W)$, and that
$N(W)=M(W)/W$. In particular, $\pi_1(M(W))$ is a subgroup of $\pi_1(N(W))=G$ (it is actually the
kernel of the epimorphism $F: G \to W$). Finally, recall the well-known fact that, if $H_1$
is a subgroup of a given group $H_2$, then $\cd (H_1) \le \cd (H_2)$.
Deligne proved in \cite{Del} that $M(W)$ is aspherical, and Brieskorn proved in \cite{Bri2} that
$H^n(M(W), {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C})$ is a free abelian group of rank $\prod_{i=1}^n m_i \neq 0$, where $m_1, m_2, \dots, m_n$
are the exponents of $W$, thus $n \le \cd( \pi_1( M(W)) \le \cd(G)$. On the other hand, Salvetti
has constructed in \cite{Sal} an aspherical CW-complex of dimension $n$ whose fundamental group
is $G$, therefore $\cd (G) \le n$.
$\square$
The next invariant which interests us is denoted by $\mf (G)$ and is defined to be the maximal
order of a finite subgroup of $G/Z(G)$, where $Z(G)$ denotes the center of $G$. Its calculation
is based on Theorems 3.2 and 3.3 given below.
Recall the permutation $\mu: S \to S$ of Proposition 2.2. This extends to an isomorphism $\mu:
G^+ \to G^+$ which permutes the simple elements. Actually, $\mu(a)= \Delta a \Delta^{-1}$ for all
$a \in G^+$.
\begin{thm}[Bestvina \cite{Bes}]
Assume $\Gamma$ to be connected. Let $\GG= G/ \langle \Delta^2 \rangle$, and let $H$ be a finite
subgroup of $\GG$. Then
$H$ is a cyclic group, and,
up to conjugation, $H$ has one of the following two forms.
\noindent
{\bf Type 1:} The order of $H$ is even, say $2p$, and
there exists a simple element $a \in T(W)$ such that $a^p=\Delta$, $\mu(a)=a$, and
$\overline{a}$ generates $H$, where $\overline{a}$ denotes the element of $\GG$ represented by
$a$.
\noindent
{\bf Type 2:} The order of $H$ is odd, say $2p+1$, and
there exists a simple element $a \in T(W)$ such that $(a\, \mu(a))^{{p-1}\over 2} a =
\Delta$ and $\overline{ a\,\mu(a)}$ generates $H$.
$\square$
\end{thm}
Now, recall the so-called {\it Coxeter number} $h$ of $W$ (see \cite{Hum}, Section 3.18). Recall
also that this number is related to the length of $\Delta$ by the following formula
$$
{{nh}\over 2} = m_1+ \dots +m_n= {\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (\Delta)\,,
$$
where $n=|S|$ is the rank of $G$, and $m_1, \dots, m_n$ are the exponents of $W$.
\begin{thm}[Brieskorn-Saito \cite{BrSa}]
Choose any order $S=\{s_1,s_2, \dots, s_n\}$ of $S$ and write $\pi=s_1s_2 \dots s_n \in G$. Let
$h$ be the Coxeter number of $W$.
(1) If $\mu = \Id$, then $h$ is even and $\pi^{h \over 2} = \Delta$.
(2) If $\mu \neq \Id$, then $\pi^h= \Delta^2$.
$\square$
\end{thm}
Now, we can calculate the invariant $\mf (G)$.
\begin{prop}
Assume $\Gamma$ to be connected, and let $h$ be the Coxeter number of $W$.
(1) If $\mu = \Id$, then $\mf (G)= h/2$.
(2) If $\mu \neq \Id$, then $\mf (G)=h$.
\end{prop}
\paragraph{Proof.}
Assume $\mu=\Id$. Let $\GG^0= G/Z(G) = G/ \langle \Delta \rangle$. First, observe that
$\mf(G) \ge {h \over 2}$ by Theorem 3.3. So, it remains to prove that $\mf(G) \le {h \over 2}$,
namely, that $|H| \le {h \over 2}$ for any finite subgroup $H$ of $\GG^0$.
Let $H$ be a finite subgroup of $\GG^0$. Consider the exact sequence
$$
1 \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}/ 2{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C} \to \GG \stackrel{\phi}{\rightarrow} \GG^0 \to 1\,,
$$
where $\GG= G/ \langle \Delta^2 \rangle$, and set $\tilde H= \phi^{-1}(H)$. By Theorem 3.2,
$\tilde H$ is a cyclic group and, up to conjugation, $\tilde H$ is either of Type 1 or of Type 2.
The order of $\tilde H$ is even, say $2p$, thus $\tilde H$ is of Type 1, and there
exists a simple element $a \in T(W)$ such that $a^p=\Delta$ and $\overline{a}$ generates $\tilde
H$. Let $a=s_1 s_2 \dots s_r$ be an expression of $a$, and let $X=\{s_1, s_2, \dots, s_r\}$. We
have $\Delta= a^p \in G_X$, thus, by Corollary~2.6, $X=S$ and $r={\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (a) \ge |S|=n$. Finally,
$$
|H|={|\tilde H| \over 2} = p = {{\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (\Delta) \over {\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (a)} = \left( {nh \over 2} \right) /r \le
{h \over 2}\,.
$$
Now, assume $\mu \neq \Id$. Let $\GG= G/Z(G)= G/\langle \Delta^2 \rangle$. First, observe that
$\mf (G) \ge h$ by Theorem 3.3. So, it remains to prove that $\mf (G) \le h$, namely,
that $|H| \le h$ for any finite subgroup $H$ of $\GG$.
Let $H$ be a finite subgroup of $\GG$. By Theorem 3.2, $H$ is cyclic and, up to conjugation, $H$
is either of Type 1 or of Type 2. Let $p$ be the order of $H$. In both cases, Type 1 and Type 2,
there exists an element $b \in G^+$ such that $b^p=\Delta^2$ and $\overline{b}$ generates
$H$ (take $b=a$ if $H$ is of Type 1, and $b=a\, \mu(a)$ if $H$ is of Type 2). Let $b=s_1 s_2
\dots s_r$ be an expression of $b$, and let $X=\{s_1, s_2, \dots, s_r\}$. We have $\Delta^2=b^p
\in G_X$, thus, by Corollary~2.6, $X=S$ and $r={\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (b) \ge |S|=n$. It follows that
$$
|H| = p = {{\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (\Delta^2) \over {\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (b)} = {nh \over r} \le h\,.
$$
$\square$
The values of the Coxeter numbers of the irreducible Coxeter groups are well-known (see, for
instance, \cite{Hum}, Section 3.18). Applying Proposition 3.4 to these values, one can easily
compute the invariant $\mf (G)$ for each irreducible (spherical type) Artin group. The result is
given in Table 1.
\def\vrule height 0pt depth 8pt width 0pt{\vrule height 0pt depth 8pt width 0pt}
\def\vrule height 16pt depth 0pt width 0pt{\vrule height 16pt depth 0pt width 0pt}
\def\vrule height 16pt depth 8pt width 0pt{\vrule height 16pt depth 8pt width 0pt}
$$\vbox{
\begin{tabular}{ccccccccccccccc}
\hline
\vrule height 16pt depth 0pt width 0pt&{\vline\kern -0.2 em \vline}&&\vline&&\vline&&\vline& $D_n,\, n\ge 4$ &\vline& $D_n,\, n\ge
5$ &\vline&&\vline\\
\vrule height 0pt depth 8pt width 0pt$\Gamma$ &{\vline\kern -0.2 em \vline}& $A_1$ &\vline& $A_n,\ n\ge 2$ &\vline& $B_n,\, n\ge
2$ &\vline&
$n$ even &\vline& $n$ odd &\vline& $E_6$ &\vline\\
\hline
\vrule height 16pt depth 8pt width 0pt$\mf (G)$ &{\vline\kern -0.2 em \vline}& 1 &\vline& $n+1$ &\vline& $n$ &\vline& $n-1$
&\vline& $2n-2$ &\vline& 12 &\vline\\
\hline
\end{tabular}
\par
\begin{tabular}{ccccccccccccccccc}
\hline
\vrule height 16pt depth 0pt width 0pt&{\vline\kern -0.2 em \vline}&&\vline&&\vline&&\vline&&\vline&&\vline& $I_2(p),\, p\ge 6$
&\vline& $I_2(p),\, p\ge 5$ &\vline\\
\vrule height 0pt depth 8pt width 0pt$\Gamma$ &{\vline\kern -0.2 em \vline}& $E_7$ &\vline& $E_8$ &\vline& $F_4$ &\vline&
$H_3$ &\vline& $H_4$ &\vline& $p$ even &\vline& $p$ odd &\vline\\
\hline
\vrule height 16pt depth 8pt width 0pt$\mf (G)$ &{\vline\kern -0.2 em \vline}& 9 &\vline& 15 &\vline& 6 &\vline& 5 &\vline& 15
&\vline& $p/2$ &\vline& $p$ &\vline\\
\hline
\end{tabular}
}$$
\centerline{{\bf Table 1:} The invariant $\mf(G)$.}
\noindent
{\bf Remark.} Combining \cite{Bes}, Theorem 4.5, with \cite{BrMi}, Section 3, one can actually
compute all the possible orders for a finite subgroup of $G/Z(G)$. The maximal order suffices for
our purpose, thus we do not include this more complicate calculation in this paper.
The next invariant that we want to compute is the rank of the abelianization of $G$ that we denote
by ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab} (G)$. This invariant can be easily computed using the standard presentation of $G$, and
the result is as follows.
\begin{prop}
Let $\Gamma_0$ be the (non-labelled) graph defined by the following data.
$\bullet$ $S$ is the set of vertices of $\Gamma_0$;
$\bullet$ two vertices $s,t$ are joined by an edge if $m_{s\,t}$ is odd.
\noindent
Then the abelianization of $G$ is a free abelian group of rank ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab} (G)$, the number of connected
components of $\Gamma_0$.
$\square$
\end{prop}
The last invariant which interests us is the rank of the center of $G$ that we denote by $\rkZ
(G)$. The following proposition is a straightforward consequence of Proposition 2.2.
\begin{prop}
The center of $G$ is a free abelian group of rank $\rkZ (G)$, the number of components of $\Gamma$.
$\square$
\end{prop}
\section{Irreducibility}
Throughout this section, we assume that $G$ is irreducible (namely, that $\Gamma$ is connected).
Let $H_1, H_2$ be two subgroups of $G$. Recall that $[H_1, H_2]$ denotes the subgroup of $G$
generated by $\{a_1^{-1} a_2^{-1} a_1a_2; a_1 \in H_1\text{ and } a_2 \in H_2\}$. Our goal in this
section is to show that $G$ cannot be expressed as $G= H_1 \cdot H_2$ with $[H_1, H_2] = \{1\}$,
unless either $H_1 \subset Z(G)$ or $H_2 \subset Z(G)$. This shall implies that $G$ cannot be a
non-trivial direct product.
Recall that $\delta$ denotes the standard generator of $Z(G)$. For $X \subset S$, we denote by
$\delta_X$ the standard generator of $G_X$, and, for $a \in G$, we denote by $Z_G(a)$ the
centralizer of $a$ in $G$.
\begin{lemma}
Let $t \in S$ such that $\Gamma_{S \setminus \{t\}}$ is connected and $\mu(t) \neq t$ if $\mu
\neq \Id$. Then $Z_G( \delta_{S \setminus \{t\}})$ is generated by $G_{S \setminus \{t\}} \cup \{
\delta \}$ and is isomorphic to $G_{S \setminus \{t\}} \times \{ \delta \}$.
\end{lemma}
\paragraph{Proof.}
Assume first that $\mu=\Id$ (in particular, $\delta=\Delta$). By \cite{Par1}, Theorem 5.2,
$Z_G(\delta_{S\setminus \{t\}})$ is generated by $G_{S \setminus \{t\}} \cup \{ \Delta^2, \Delta
\Delta_{S \setminus \{t\}}^{-1}\}$, thus $Z_G(\delta_{S \setminus \{t\}})$ is generated by $G_{S
\setminus \{t\}} \cup \{ \delta \}$.
Now, assume $\mu \neq \Id$ (in particular, $\delta = \Delta^2$ and $\mu(t) \neq t$). By
\cite{Par1}, Theorem 5.2, $Z_G(\delta_{S \setminus \{t\}})$ is generated by $G_{S \setminus
\{t\}} \cup \{ \Delta^2, \Delta \Delta_{S \setminus \{\mu(t)\}}^{-1} \Delta \Delta_{S \setminus
\{t\}}^{-1} \}$. Observe that
\break
$\Delta \Delta_{S \setminus \{\mu(t)\}}^{-1} \Delta \Delta_{S
\setminus \{t\}}^{-1} = \Delta^2 \Delta_{S \setminus \{t\}}^{-2}$, thus $Z_G(\delta_{S \setminus
\{t\}})$ is generated by $G_{S \setminus \{t\}} \cup \{\delta\}$.
By the above, we have an epimorphism $G_{S \setminus \{t\}} \times \langle \delta \rangle \to
Z_G( \delta_{S \setminus \{t\}})$, and, by Corollary~2.6, the kernel of this epimorphism is
$\{1\}$.
$\square$
\noindent
{\bf Remark.} It is an easy exercise to show (under the assumption that $\Gamma$ is connect) that
there always exists $t \in S$ such that $\Gamma_{S \setminus \{t\}}$ is connected and $\mu(t)
\neq t$ if $\mu \neq \Id$.
\begin{prop}
Let $H_1, H_2$ be two subgroups of $G$ such that $G=H_1 \cdot H_2$ and $[H_1,H_2]=\{1\}$. Then
either $H_1 \subset Z(G)$ or $H_2 \subset Z(G)$. If, moreover, $H_1 \cap H_2 = \{1\}$, then
either $H_1=\{1\}$ and $H_2=G$, or $H_1=G$ and $H_2= \{1\}$.
\end{prop}
\paragraph{Proof.}
We argue by induction on $n= |S|$. If $n=1$, then $\Gamma= A_1$ and $G={\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$, and the conclusion of
the proposition is well-known.
Assume $n \ge 2$. For $i=1,2$, let $\tilde H_i$ denote the subgroup of $G$ generated by $H_i \cup
\{ \delta\}$. We have $G= \tilde H_1 \cdot \tilde H_2$, $[\tilde H_1, \tilde H_2] = \{1\}$, $H_1
\subset \tilde H_1$, and $H_2 \subset \tilde H_2$. Observe also that $\tilde H_1 \cap \tilde H_2$
must be included in the center of $G$, and that $\delta \in \tilde H_1 \cap \tilde H_2$, thus
$\tilde H_1 \cap \tilde H_2 = \langle \delta \rangle$. Take $t \in S$ such that $\Gamma_{S
\setminus \{t\}}$ is connected and $\mu(t) \neq t$ if $\mu \neq \Id$, write $X=S \setminus
\{t\}$, and choose $d_1 \in \tilde H_1$ and $d_2\in \tilde H_2$ such that $\delta_X=d_1d_2$.
Let $a \in G_X$. Choose $a_1 \in \tilde H_1$ and $a_2 \in \tilde H_2$ such that $a=a_1a_2$. We
have
$$
1= a^{-1} \delta_X^{-1} a \delta_X = a_1^{-1} d_1^{-1} a_1d_1 a_2^{-1} d_2^{-1} a_2d_2\,,
$$
thus
$$
a_1^{-1} d_1^{-1} a_1d_1 = d_2^{-1} a_2^{-1} d_2a_2 \in \tilde H_1 \cap \tilde H_2 = \langle
\delta \rangle \,.
$$
Let $k \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ such that $a_1^{-1} d_1^{-1} a_1d_1 = \delta^k$. Consider the homomorphism $\deg:
G \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ which sends $s$ to $1$ for all $s \in S$. Then
$$
0= \deg( a_1^{-1} d_1^{-1} a_1d_1) = \deg( \delta^k)= k\,{\rm lg}}\def\Id{{\rm Id}}\def\GG{{\cal G} (\delta)\,,
$$
thus $k=0$, hence $a_1$ and $d_1$ commute. Now, $a_1$ and $d_2$ also commute (since $a_1 \in
\tilde H_1$ and $d_2 \in \tilde H_2$), thus $a_1$ commutes with $\delta_X = d_1d_2$. By Lemma
4.1, $a_1$ can be written as $a_1= b_1 \delta^{p_1}$, where $b_1 \in G_X$ and $p_1 \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$. Note
also that $b_1 = a_1 \delta^{-p_1} \in \tilde H_1$, since $\delta \in \tilde H_1$, thus
$b_1 \in G_X \cap \tilde H_1$.
Similarly, $a_2$
can be written as $a_2=b_2 \delta^{p_2}$ where $b_2 \in G_X \cap \tilde H_2$ and $p_2 \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$. We
have $\delta^{p_1+p_2} = a b_1^{-1} b_2^{-1} \in G_X \cap \langle \delta \rangle = \{1\}$ (by
Corollary 2.6), thus $p_1+p_2=0$ and $a=b_1b_2$.
So, we have
$$
G_X= (G_X \cap \tilde H_1) \cdot (G_X \cap \tilde H_2)\,.
$$
Moreover, by Corollary 2.6,
$$
(G_X \cap \tilde H_1) \cap (G_X \cap \tilde H_2) = G_X \cap \langle \delta \rangle = \{1\}\,.
$$
By the inductive hypothesis, it follows that, up to permutation of 1 and 2, we
have $G_X \cap \tilde H_1 = G_X$ (namely, $G_X \subset \tilde H_1$), and $G_X \cap \tilde H_2 =
\{1\}$.
We turn now to show that $\tilde H_2 \subset \langle \delta \rangle = Z(G)$. Since $H_2 \subset
\tilde H_2$, this shows that $H_2 \subset Z(G)$.
Let $a \in \tilde H_2$. Since $\delta_X \in G_X \subset \tilde H_1$, $a$ and $\delta_X$ commute.
By Lemma 4.1, $a$ can be written as $a=b \delta^p$, where $b \in G_X$ and $p \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$. Since
$\delta \in \tilde H_2$, we also have $b = a \delta^{-p} \in \tilde H_2$, thus $b \in G_X \cap
\tilde H_2 = \{1\}$, therefore $a = \delta^p \in \langle \delta \rangle$.
Now, assume that $H_1 \cap H_2=\{1\}$. By the above, we may suppose that $H_2 \subset Z(G)=
\langle \delta \rangle$. In particular, there exists $k \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ such that $H_2 = \langle
\delta^k \rangle$.
Choose any order $S=\{s_1, \dots, s_n\}$ of $S$, and write $\pi=s_1s_2 \dots s_n \in G$.
Let $b \in H_1$ and $p \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ such that $\pi= b \delta^{pk}$.
Observe that $b \neq 1$ since $\pi$ is not central in $G$. Let $h$ denote the Coxeter number of
$W$. By Theorem~3.3, $\pi^h= b^h \delta^{phk} \in Z(G)$, thus $b^h \in Z(G)$. Moreover, $b^h \neq
1$ since $G$ is torsion free and $b \neq 1$.
This implies that $Z(H_1)\neq\{1\}$. Now, observe that $Z(H_1) \subset Z(G)= \langle
\delta \rangle$, thus there exists $l >0$ such that $Z(H_1)=\langle \delta^l \rangle$. Finally,
$\delta^{lk} \in H_1 \cap H_2 = \{1\}$, thus $kl=0$, therefore $k=0$ (since $l\neq 0$) and
$H_2=\{1\}$. Then we also have $H_1=G$.
$\square$
\begin{prop}
Assume $n=|S| \ge 2$. Let $H$ be a subgroup of $G$ such that $G=H \cdot \langle \delta \rangle$.
Then $\cd (H)= \cd (G)$, $\mf(H) = \mf(G)$, and ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab} (H)= {\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(G)$.
\end{prop}
\paragraph{Proof.}
For all $s \in S$, take $b_s \in H$ and $p_s \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ such that $s=b_s \delta^{p_s}$. We can and
do suppose that $p_s=p_t$ if $s$ and $t$ are conjugate in $G$. Then the mapping $S \to H$, $s
\mapsto b_s=s\delta^{-p_s}$ determines a homomorphism $\varphi: G \to H$.
We show that $\varphi: G \to H$ is injective. Observe that the mapping $S \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$, $s \mapsto
p_s$ determines a homomorphism $\eta: G \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$, and that $\varphi(a)= a \delta^{-\eta (a)}$ for
all $a \in G$. In particular, if $a \in \Ker \varphi$, then $a=\delta^{\eta(a)} \in Z(G)$. Choose
any order $S=\{s_1, \dots, s_n\}$ of $S$, and write $\pi=s_1s_2 \dots s_n \in G$. Note that
$\varphi(\pi) \neq 1$, since $\pi$ is not central in $G$, and that, by Theorem 3.3, there exists
$k>0$ such that $\pi^k = \delta$. Let $a \in \Ker \varphi$. Then $a = \delta^{\eta (a)} = \pi^{k
\eta(a)}$, thus $1=\varphi(a)= \varphi(\pi)^{k \eta(a)}$. We have $\varphi(\pi) \neq 1$ and $G$
is torsion free, hence $\eta(a) =0$ (since $k>0$) and $a=1$.
Now, recall that $\cd (H_1) \le \cd (H_2)$ if $H_1$ is a subgroup of a given group $H_2$. So,
$$
\cd (G)= \cd (\varphi(G)) \le \cd(H) \le \cd (G)\,.
$$
The equality $G=H \cdot \langle \delta \rangle = H \cdot Z(G)$ implies that $Z(H)= Z(G) \cap H$
and $G/Z(G)= H/Z(H)$. In particular, we have $\mf(H)= \mf(G)$.
Let ${\cal H}}\def\Ker{{\rm Ker}$ be a group, let $g$ be a central element in ${\cal H}}\def\Ker{{\rm Ker}$, and let $p>0$. Let $\GG= ({\cal H}}\def\Ker{{\rm Ker} \times {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C})/
\langle (g,p) \rangle$. Then one can easily verify (using the Reidemeister-Schreier method, for
example) that we have exact sequences $1 \to {\cal H}}\def\Ker{{\rm Ker} \to \GG \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}/p{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C} \to 1$ and $1 \to \Ab ({\cal H}}\def\Ker{{\rm Ker})
\to \Ab (\GG) \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}/p{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C} \to 1$, where $\Ab (\GG)$ (resp. $\Ab({\cal H}}\def\Ker{{\rm Ker})$) denotes the abelianization
of $\GG$ (resp. ${\cal H}}\def\Ker{{\rm Ker}$).
Now, recall the equality $G=H \cdot \langle \delta \rangle$. By Proposition 4.2, we have $H \cap
\langle \delta \rangle \neq \{1\}$. So, there exists $p>0$ such that $H \cap \langle \delta
\rangle = \langle \delta^p \rangle$. Write $d=\delta^p \in H$. Then $d$ is central in $H$ and $G
\simeq (H \times {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C})/\langle (d,p) \rangle$. By the above observation, it follows that we have an
exact sequence $1 \to \Ab (H) \to \Ab (G) \to {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}/p{\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C} \to 1$, thus $\Ab (H)$ is a free abelian
group of rank ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab} (G)$.
$\square$
\section{Proof of the main theorem}
\begin{prop}
Let $\Gamma$ and $\Omega$ be two connected spherical type Coxeter graphs, and let $G$ and $H$ be
the Artin groups associated to $\Gamma$ and $\Omega$, respectively. If $\cd (G)= \cd (H)$, $\mf
(G)= \mf (H)$, and ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(G)= {\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(H)$, then $\Gamma= \Omega$.
\end{prop}
\paragraph{Proof.}
Let $n$ and $m$ be the numbers of vertices of $\Gamma$ and $\Omega$, respectively. By Proposition~3.1,
we have $n= \cd(G)= \cd(H) = m$.
Suppose $n=m=1$. Then $\Gamma= \Omega= A_1$.
Suppose $n=m \ge 3$. Then one can easily verify in Table 1 that the equality $\mf (G)= \mf (H)$
implies $\Gamma= \Omega$.
Suppose $n=m=2$. Let $p,q \ge 3$, such that $\Gamma= I_2(p)$ and $\Omega= I_2(q)$. By Proposition~3.5,
either ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(G)={\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(H)=2$ and $p,q$ are both even, or ${\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(G)= {\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(H)=1$ and $p,q$
are both odd. If $p,q$ are both even, then, by Table 1, ${p \over 2} = \mf(G) = \mf(H)= {q
\over 2}$, thus $p=q$ and $\Gamma=\Omega= I_2(p)$. If $p,q$ are both odd, then, by Table 1, $p=
\mf (G)= \mf (H)=q$, thus $\Gamma=\Omega= I_2(p)$.
$\square$
\begin{corollary}
Let $\Gamma$ and $\Omega$ be two connected spherical type Coxeter graphs, and let $G$ and $H$ be
the Artin groups associated to $\Gamma$ and $\Omega$, respectively. If $G$ is isomorphic to $H$,
then $\Gamma=\Omega$.
$\square$
\end{corollary}
\noindent
{\bf Proof of Theorem 1.1.}
Let $\Gamma$ and $\Omega$ be two spherical type Coxeter graphs, and let $G$ and $H$ be the Artin
groups associated to $\Gamma$ and $\Omega$, respectively. We assume that $G$ is isomorphic to $H$
and turn to prove that $\Gamma=\Omega$.
Let $\Gamma_1, \dots, \Gamma_p$ be the connected components of $\Gamma$, and let $\Omega_1,
\dots, \Omega_q$ be the connected components of $\Omega$. For $i=1, \dots, p$, we denote by $G_i$
the Artin group associated to $\Gamma_i$, and, for $j=1, \dots, q$, we denote by $H_j$ the Artin
group associated to $\Omega_j$. We have $G= G_1 \times G_2 \times \dots \times G_p$ and $H=H_1
\times H_2 \times \dots \times H_q$. We may and do assume that there exists $x \in \{0,1, \dots,
p\}$ such that $\Gamma_i \neq A_1$ for $i=1, \dots, x$, and $\Gamma_i=A_1$ for $i=x+1, \dots, p$.
So, $G_1, \dots, G_x$ are non abelian irreducible Artin groups of rank $\ge 2$, and $G_{x+1},
\dots, G_p$ are all isomorphic to ${\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$. Similarly, we may and do assume that there exists $y \in
\{0,1, \dots, q\}$ such that $\Omega_j \neq A_1$ for $j=1, \dots, y$, and $\Omega_j=A_1$ for
$j=y+1, \dots, q$. We can also assume that $x \ge y$.
A first observation is, by Proposition 3.6, that
$$
p= \rkZ(G)= \rkZ(H)= q\,.
$$
Now, fix an isomorphism $\varphi: G \to H$. For $1 \le i\le p$, let $\iota_i: G_i \to G$ be the
natural embedding, for $1 \le j\le p$, let $\kappa_j: H \to H_j$ be the projection on the $j$-th
component, and, for $1 \le i,j \le p$, let $\varphi_{i\,j} = \kappa_j \circ \varphi \circ
\iota_i: G_i \to H_j$.
Let $j\in \{1, \dots, y\}$. Observe that $H_j= \prod_{i=1}^p \varphi_{i\,j}(G_i)$, and that
$[\varphi_{i\,j}(G_i), \varphi_{k\,j}(G_k)]=1$ for all $i,k \in \{1, \dots, p\}$, $i \neq k$. Let
$\delta_j^H$ denote the standard generator of $Z(H_j)$, and, for $i\in\{1, \dots, p\}$, let $\tilde
H_{i\,j}$ be the subgroup of $H_j$ generated by $\varphi_{i\,j}(G_i) \cup \{\delta_j^H\}$. By
Proposition~4.2, there exists $\chi (j) \in \{1, \dots, p\}$ such that $H_j= \tilde
H_{\chi(j)\,j}$, and $\tilde H_{i\,j} = Z(H_j)= \langle \delta_j^H \rangle$ for $i \neq \chi(j)$.
Since $H_j$ is non abelian, $\chi(j)$ is unique and $\chi(j)\in \{1, \dots, x\}$.
We turn now to show that the map $\chi: \{1, \dots, y\} \to \{1, \dots, x\}$ is surjective. Since
$x \ge y$, it follows that $x=y$ and $\chi$ is a permutation.
Let $i \in \{1, \dots, x\}$ such that $\chi(j) \neq i$ for all $j \in \{1, \dots, y\}$. Then
$\varphi_{i\,j} (G_i) \subset Z(H_j)$ for all $j=1, \dots, p$, thus $\varphi(G_i) \subset Z(H)$.
This contradicts the fact that $\varphi$ is injective and $G_i$ is non abelian.
So, up to renumbering the $\Gamma_i$'s, we can suppose that $\chi(i)=i$ for all $i\in \{1, \dots,
x\}$.
We prove now that $\varphi_{i\,i}: G_i \to H_i$ is injective for all $i \in \{1, \dots, x\}$. Let
$a \in \Ker \varphi_{i\,i}$. Since $\varphi_{i\,j} (a) \in Z(H_j)$ for all $j \neq i$, we have
$\varphi(a) \in Z(H)$. Since $\varphi$ is injective, it follows that $a \in Z(G_i)$. Let $\{s_1,
\dots, s_r\}$ be the set of vertices of $\Gamma_i$, and let $\pi= s_1s_2 \dots s_r \in G_i$.
Observe that $\varphi_{i\,i}(\pi) \neq 1$ since $\pi$ is not central in $G_i$. Let $\delta_i^G$
be the standard generator of $Z(G_i)$. By Theorem 3.3, there exists $k >0$ such that $\pi^k=
\delta_i^G$. On the other hand, since $a \in Z(G_i)$, there exists $l \in {\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}$ such that $a=
(\delta_i^G)^l= \pi^{kl}$. Now, $1= \varphi_{i\,i}(a)= \varphi_{i\,i}(\pi)^{kl}$, $H_i$ is
torsion free, and $\varphi_{i\,i}(\pi) \neq 1$, thus $kl=0$ and $a=\pi^{kl}=1$.
Let $i \in \{1, \dots, x\}$. Recall that $\varphi_{i\,i}: G_i \to H_i$ is injective, and $H_i=
\varphi_{i\,i} (G_i) \cdot \langle \delta_i^H \rangle$, where $\delta_i^H$ denotes the standard
generator of $H_i$. By Proposition 4.3, it follows that
$$
\cd(G_i)= \cd(H_i)\,,\quad \mf(G_i) =\mf(H_i)\,,\quad {\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(G_i)= {\rm rkAb}}\def\rkZ{{\rm rkZ}}\def\Ab{{\rm Ab}(H_i)\,,
$$
thus, by Proposition 5.1, $\Gamma_i=\Omega_i$. Let $i \in \{x+1, \dots, p\}$. Then $\Gamma_i=
\Omega_i = A_1$. So, $\Gamma= \Omega$.
$\square$
\noindent
{\bf Remark.} In the proof above, the homomorphism $\varphi_{i\,i}$ is injective but is not
necessarily surjective as we show in the following example.
Let $G_1= \langle s_1,s_2 | s_1s_2s_1= s_2s_1s_2 \rangle$ be the Artin group associated to $A_2$,
let $G_2={\mathbb Z}}\def\N{{\mathbb N}} \def\C{{\mathbb C}= \langle t \rangle$, and let $G=G_1 \times G_2$. We denote by $\delta= (s_1s_2)^3$
the standard generator of $Z(G_1)$. Let $\varphi: G \to G$ be the homomorphism defined by
$$
\varphi(s_1)= s_1 \delta t\,, \quad \varphi(s_2)= s_2 \delta t \,, \quad \varphi(t)= \delta t \,.
$$
Then $\varphi$ is an isomorphism but $\varphi_{1\,1}$ is not surjective. The inverse $\varphi^{-
1}: G \to G$ is determined by
$$
\varphi^{-1} (s_1)= s_1 t^{-1}\,, \quad \varphi^{-1} (s_2)= s_2 t^{-1}\,, \quad \varphi^{-1} (t)=
\delta^{-1} t^7\,.
$$
\noindent
\halign{#
\cr
Luis Paris\cr
Institut de Math\'ematiques de Bourgogne\cr
Universit\'e de Bourgogne\cr
UMR 5584 du CNRS, BP 47870\cr
21078 Dijon cedex\cr
FRANCE\cr
\noalign{
}
\texttt{lparis@u-bourgogne.fr}\cr}
\end{document} |
\begin{document}
\title{Recent advances on integrated quantum communications}
\author{Adeline Orieux and Eleni Diamanti}
\date{}
\address{LTCI, CNRS, T\'el\'ecom ParisTech, Universit\'e Paris-Saclay, 75013, Paris, France}
\ead{adeline.orieux@telecom-paristech.fr, eleni.diamanti@telecom-paristech.fr}
\begin{indented}
\item[]June 2016
\end{indented}
\begin{abstract}
In recent years, the use of integrated technologies for applications in the field of quantum information processing and communications has made great progress. The resulting devices feature valuable characteristics such as scalability, reproducibility, low cost and interconnectivity, and have the potential to revolutionize our computation and communication practices in the future, much in the way that electronic integrated circuits have drastically transformed our information processing capacities since the last century. Among the multiple applications of integrated quantum technologies, this review will focus on typical components of quantum communication systems and on overall integrated system operation characteristics. We are interested in particular in the use of photonic integration platforms for developing devices necessary in quantum communications, including sources, detectors and both passive and active optical elements. We also illustrate the challenges associated with performing quantum communications on chip, by using the case study of quantum key distribution - the most advanced application of quantum information science. We conclude with promising perspectives in this field.
\end{abstract}
\noindent{\it Keywords}: quantum communications, integrated photonics, quantum networks
\section{Introduction}
\label{sec:intro}
Quantum information science aims at harnessing quantum mechanical effects to develop systems that can provide a dramatic improvement in information processing, communication efficiency and security. Quantum technologies enable, for example, the distribution of secret keys to two distant parties with unconditional security, which is impossible by classical means \cite{SBC:rmp09}, while the basic principles of quantum computing models have successfully been demonstrated in small-scale systems \cite{BKB:science12}. The true disruptive potential of quantum information technologies, however, can be fully exploited in large-scale systems, namely computing servers manipulating and storing thousands of bits of quantum information (qubits) or networks linking thousands of users performing communication and distributed computation tasks. Similarly to the silicon chip revolution which is at the origin of modern communications, scaling up quantum systems will naturally necessitate the use of integrated technologies. Indeed, chip-based quantum information holds a lot of promise for future high performance information and communication infrastructures, but also presents a lot of challenges as it encompasses several different disciplines. For these reasons, it is one of the most active fields of research today.
Current integration efforts concern all aspects of quantum information, including in particular quantum computing and quantum communications, each of which puts specific constraints on the underlying physical technology. In this review, we will focus on quantum communication technologies on chip (with typical chip dimensions ranging from the millimeter to the centimeter scale).
In this setting, photons are the ideal information carriers due to their long coherence time, their weak interaction with the environment and their high speed. Additionally, and crucially, photons provide a viable pathway to integration, which is necessary to overcome the difficulties inherent in bulk systems to maintain mechanical stability in apparatuses of increasing size and complexity. In principle, integrated photonics can bring together many desirable characteristics in terms of efficiency, cost, scalability, flexibility and performance required for quantum communications.
In the following, we will first review the prevailing integration platforms used for quantum communications, with a particular emphasis on specific features and criteria that determine their suitability for specific applications. We will then describe efforts towards the integration of the main functionalities required in quantum communication systems, namely the generation of nonclassical states of light, their manipulation using reconfigurable passive circuits or active elements for modulation, routing and switching, their storage using quantum memories, and their detection using single-photon or coherent detectors. Beyond the integration of individual components, we are particularly interested here in the on-chip system design aspect, which plays a central role for practical applications. From this perspective, we will discuss in some detail integration efforts for quantum key distribution as well as progress towards on-chip quantum teleportation, entanglement distribution and quantum repeaters, which constitute the core of large-scale quantum networks. We will conclude with the multiple remaining challenges in this field and perspectives for the next few years.
We remark that this review is by no means exhaustive. Integrated quantum communications is a rapidly evolving research field with continuous developments; our goal here is to provide a comprehensive view of the current state of the art with the major tasks accomplished so far and those to be addressed next.
\section{Integration platforms for quantum communications}
\label{sec:platforms}
Thanks to the importance of miniaturization for practical and scalable information and communication devices, today we have at our disposal a diverse set of integration platforms allowing for high performance elements of increasing complexity. The suitability of these platforms for quantum communications is in general evaluated by various important features, including for example the necessity to use foundry services and compatibility with mass manufacturing processes, the ability to support nonlinear and electro-optic effects and single-photon detection, the compatibility with specific encodings of quantum information, such as polarization, path, and time bins, and the adaptivity to a practical communication network infrastructure. The prevailing platforms for such applications can be described as follows:
\begin{itemize}
\item Silicon-based platforms \cite{VP}, which include silicon (Si) as well as silicon nitride (SiN) and silicon carbide (SiC), provide popular solutions as they combine several appealing characteristics. The global prevalence of the Si integration platform for electronics applications has ensured that all the physical and technological attributes of this platform have been fully examined. The existing silicon-based CMOS (Complementary Metal-Oxide-Semiconductor) wafer fabrication facilities open up vast possibilities for cost-effective solutions featuring high yield and reproducibility. Indeed, silicon-on-insulator (SOI) photonics has become a key technology for applications in telecommunications, optical interconnects, medical screening, spectroscopy, biology and chemical sensing, which were inconceivable a few years ago. This technology offers in general waveguides with a very high refractive index, allowing for an increased circuit compactness even for complex layouts involving many elements. Additionally, the high electric field intensity resulting from the extreme mode confinement is beneficial for nonlinear effects. These favorable characteristics, however, come at the expense of poor mode-matching with optical fibers, which is the privileged channel for quantum communications, and increased propagation losses.
\item Platforms based on III-V compound semiconductors \cite{AdachiIIIV2005}, including indium phosphide (InP), gallium arsenide (GaAs), and gallium nitride (GaN), are widely used in optoelectronics as their direct bandgap allows laser emission and gives more favorable conditions than the indirect gap of Si in terms of speed. In the telecommunication wavelengths range, compounds such as InAsP allow laser emission, others such as AlGaAs are transparent and can be used to propagate and manipulate photons, while InGaAs can be used for detection. The III-V materials have a high refractive index and, in addition, they generally present a very strong second-order nonlinearity, allowing for compact and efficient frequency conversion devices or parametric down-conversion sources. These platforms also require foundry services, which are in general less advanced than the CMOS facilities, but are nonetheless widely developed for the laser diodes market. Full system integration, including electrically pumped lasers typically used in applications, may be envisaged with this technology. As for the Si platform, mode-matching with the optical fibers and propagation losses are an issue.
\item Nonlinear optical dielectric materials, in particular lithium niobate (LiNbO$_3$) and potassium titanyl phosphate (commonly known as KTP), form a particularly versatile integration platform. These materials present strong second-order nonlinearities and electro-optic properties, which makes them ideal for parametric down-conversion, frequency conversion and modulation processes. Their fabrication is well studied and controlled, however because of their relatively large size these platforms are less favorable for the development of scalable systems.They have been used for quantum communications with immense success leading to unprecedented developments in the field in the last decade (for a relatively recent review see \cite{Tanzilli:LPR2012}).
\item Platforms producing glass waveguides, such as silica-on-silicon (where silica is the common name of silicon dioxide, SiO$_2$) \cite{PCR:science08} or femtosecond laser writing \cite{fslaserwriting:1996,Osellame:JOSAB2003}, have been developed and used for quantum communications more recently, leading to reduced propagation losses and excellent mode-matching with standard optical fibers. These are also versatile platforms, with a well controlled fabrication procedure that does not require time-consuming foundry services and makes them ideal for rapid device and system tests. These platforms cannot support functionalities requiring nonlinear or electro-optic effects but they are well adapted for complex linear circuits involving a large number of integrated beam splitters (known as directional couplers). Contrary to the Si and III-V platforms, femtosecond laser written technologies also offer the capacity to preserve and manipulate polarization easily, which is important for some applications in quantum communications, and enable a remarkable versatility in the 3-dimensional geometry of the circuits.
\end{itemize}
In addition to the above integration platforms that have been widely used for quantum communications, other important integrated technologies include nitrogen vacancy centers in diamond \cite{HBD:nature15}, which can be coupled to superconducting circuits \cite{Esteve:PRL107}, and semiconductor quantum dots interfaced with photonic nanostructures \cite{LMS:rmp15}. These platforms are being developed primarily for applications in quantum computing and quantum simulation, which fall beyond the scope of this review; however, some results obtained with these technologies also have implications in quantum communications and quantum networks \cite{HBD:nature15,KBK:pnas15}.
It is clear from the above descriptions that no single integration platform can gather all the desired characteristics for a specific application. Indeed, it is generally understood that the next generation of quantum communication devices and systems will adopt hybrid integration technologies with the goal of bringing together the best elements of each platform. In the following, we will describe some important achievements obtained with the aforementioned technologies and discuss future directions towards advanced quantum communication networks.
\section{Integrated quantum communication devices}
\label{sec:devices}
The main optical quantum communication devices aim at generating, manipulating, storing and detecting quantum states of light. One important design element for all devices is the operation wavelength; communication through optical fibers imposes the use of the telecommunication wavelength range, where propagation loss is minimum, while free-space communications typically require near-infrared wavelengths. In the following, we describe integration efforts in all types of devices employed in quantum communication systems.
\subsection{Generation}
\label{subsec:generation}
The first key element of an optical quantum communication system is a source of nonclassical states of light. Some quantum communication applications, such as standard quantum key distribution (QKD) protocols \cite{SBC:rmp09}, only require single-photon states. As discussed in detail in recent reviews \cite{EFM:rsi11,SZ:jmo12}, such states can be generated either in a deterministic manner using single-photon emitters or probabilistically by heralding the generation of the desired state in one of the two members of a correlated photon pair with the detection of the second member; finally, single-photon states can also be simply simulated by highly attenuating coherent states produced by lasers. Weak coherent pulses are in fact sufficient for most QKD implementations, but the suppression of multi-photon events achieved by heralded single-photon sources is useful for other quantum communication protocols, such as quantum teleportation. The ideal statistics reached by on-demand single-photon sources, on the other hand, are necessary for applications in quantum computing and quantum simulation, as well as for repeater-based quantum networks. For such networks \cite{NB:natphoton14}, as well as for performing advanced applications, including device-independent \cite{BCP:rmp14} and distributed verification \cite{PCW:prl12} protocols, it is also necessary to generate two or multi-photon entangled states. Indeed, entangled-photon states constitute the most important resource in quantum communications. The performance of entangled-photon sources can be evaluated using several quality factors, for instance, the visibility or fidelity of the generated states with respect to the target state, and the violation of suitable Bell inequalities quantified by the Bell parameter. The degree of freedom where entanglement is generated is also an important characteristic depending on the desired application.
Sources generating correlated photon pairs are the basis both of the heralded generation of single-photon states used in quantum communications and of the generation of two-photon entangled states (also known as Einstein-Podolsky-Rosen - EPR - pairs). Hence, the vast majority of works on integrated sources so far has aimed at the generation of such states. Currently, the best on-chip sources of photon pairs at telecom wavelengths make use of second-order and third-order nonlinear processes, in particular, spontaneous parametric down-conversion (SPDC) and spontaneous four-wave mixing (SFWM), respectively. The former is possible in the III-V platform as well as in periodically poled LiNbO$_3$ (PPLN) and KTP (PPKTP) waveguides, while only the latter can be used in the Si platform.
In the Si platform, narrowband ($\leq$ 0.1 nm, instead of 1-100 nm typical of standard sources) entangled photon pairs can be generated in high quality factor microrings evanescently coupled to straight waveguides, as shown for example in Fig.~\ref{fig:Devices1}(a). Time-energy entangled photon pairs with 89\% raw visibility \cite{Bajoni:Optica2} and reconfigurable path-entangled pairs with a Bell parameter of 2.69 \cite{OBrien:NatComm6} have been reported with this geometry (we recall that the upper bound of the Bell parameter is $2\sqrt{2}\approx 2.83$ for an ideal state \cite{Cir:lmp80}). Furthermore, compact spiralled waveguides (see Fig.~\ref{fig:Devices1}(b)) combined with on-chip two-photon interference showing raw visibilities larger than 96\% have allowed the generation of two-photon NOON states \cite{OBrien:NatPhot8}. Nanowire waveguides combined with on-chip polarization rotators have also permitted the generation of polarization-entangled states with more than 91\% fidelity with respect to a Bell state \cite{Takesue:SciRep2}. All these highly compact sources have been made possible by the very tight bending radius and strong mode confinement that can be achieved in silicon-on-insulator waveguides. Note that these sources are based on SFWM where the pump wavelength is in general very close to the wavelength of the generated photons, which means that notch filters with a very high extinction are needed to eliminate residual pump photons. On the other hand, standard telecom lasers can be used as pump lasers.
In the III-V platform, different phase-matching techniques have led to promising photon pair sources with various quantum state properties in AlGaAs waveguides. The propagation losses usually associated with the quasi-phase-matching technique in AlGaAs have been sufficiently reduced recently, allowing for the generation of high quality entangled photon pairs with a coincidence-to-accidental ratio (CAR) larger than 100 \cite{Qian:APL103,Qian:OL39}. Furthermore, counterpropagating phase-matching, in which the pump beam impinges transversally on the waveguide surface and generates photons that are spatially separated in two countepropagating guided modes \cite{Ducci:PRL110}, enables the generation of intrinsically narrowband ($<$ 1 nm) polarization-entangled pairs. Finally, modal phase-matching, in which a pump mode guided in a photonic bandgap defect mode is converted to photon pairs guided by total internal reflection, has shown broadband emission of photon pairs: polarization-entangled \cite{Helmy:OE21} and energy-time-entangled \cite{Ducci:optica16} pairs have been obtained with similar raw Bell parameters of about 2.6. Note that a first demonstration of an electrically pumped source generating its own pump laser within the nonlinear waveguide (see Fig.~\ref{fig:Devices1}(c)) with a CAR larger than 13 has been demonstrated using this modal phase-matching strategy \cite{Ducci:PRL112}, opening the way towards fully integrated sources.
As mentioned in Section \ref{sec:platforms}, sources based on PPLN or PPKTP waveguides are the most established ones and have shown impressive quality and efficiency for many years. Some recent results include a source of path-entangled photon pairs based on two coupled PPLN waveguides (see Fig.~\ref{fig:Devices1}(d)) with a fidelity larger than 84\% with respect to a two-photon NOON state \cite{Silberhorn:1505}, a source of single-longitudinal-mode pairs with a 60 MHz bandwidth \cite{Silberhorn:NJP17} for applications with atom-based quantum memories, and a source of polarization-entangled photon pairs with a very high raw Bell parameter of 2.82 \cite{Tanzilli:OComm327}. A source of heralded single photons with a heralding efficiency of 60\%, one of the best reported so far for integrated sources, has been achieved in a LiNbO$_3$ chip combining a PPLN waveguide for SPDC and a wavelength demultiplexer for separating the emitted photons \cite{Silberhorn:NJP15}.
In silica waveguides, the nonlinear effect is not strong enough to allow the generation of photon pairs on reasonably small sizes. Indeed, SFWM in silica generally requires waveguide lengths of several tens of centimeters and up to a few meters, typically only achieved in optical fibers. An envisioned strategy in this case is the hybrid integration of nonlinear dielectric crystal waveguides for the source part combined with glass chips suitable for photon manipulation (discussed in the following section). While this promising direction is pursued, some recent works have combined off-chip devices with such chips to generate quantum states of particular interest. For example, a silica-on-silicon chip consisting of four tunable beam splitters was used to entangle two squeezed beams generated by (off-chip) sub-threshold optical parametric oscillators (OPOs) \cite{OBrien:NatPhot9}. The entanglement performance of this source, which is adapted to quantum information protocols exploiting continuous variables (CV) of light, was mainly limited by coupling losses in and out of the chip. Another illustration of this partially integrated approach was the use of femtosecond laser written glass circuits in combination with SPDC in a BiB$_3$O$_6$ crystal for the generation and characterization of single-photon W states \cite{GHP:natphoton14}.
\begin{figure}
\caption{Examples of integrated devices for the generation and manipulation of quantum photonic states. (a) Integrated silicon chip with a tunable microring SFWM source and tunable filters, as well as a pump rejection Bragg filter \cite{Bajoni:PRX2014}
\label{fig:Devices1}
\end{figure}
Some progress has also been made recently towards the generation of multi-photon states required for advanced quantum communication protocols. From a practical application perspective, it is particularly interesting to be able to herald the generation of these states instead of using commonly employed post-selection techniques \cite{BTC:njp13}. Heralded multi-photon states have been generated in experiments using BBO crystals \cite{Walther:NatPhot2010} or PPLN and PPKTP waveguides \cite{Jennewein:NatPhot2014}, while, importantly, the generation of a photon triplet has been reported recently in dielectric waveguides \cite{Silberhorn:QELS}.
We remark that a major issue, common to all the aforementioned sources, is the probabilistic nature and Poissonian statistics of the (multi-)photon state emission that leads to very small generation rates limited by undesired higher photon number terms. Techniques using multiplexing of several heralded sources combined with delay lines and active switches offer a promising route towards the solution of this problem \cite{Eggleton:NatComm4}.
\subsection{Manipulation}
\label{subsec:manipulation}
In all quantum information applications, the nonclassical states used as the main resources for the computation or communication protocols need to be suitably manipulated by implementing specific operations. For optical systems, the linear optics quantum computing (LOQC) scheme introduced in 2001 by Knill, Laflamme and Milburn (KLM) \cite{KLM} and widely studied since then, showed that the only basic elements required in addition to single-photon sources and detectors are beam splitters and phase-shifters, when the qubits are encoded in the path of the photons (also known as dual-rail encoding). The same scheme can be used with polarization-encoded photonic qubits by using wave-plates and polarizing beam splitters instead \cite{KLMpolar}.
The vast majority of the quantum photonics circuits demonstrated to date have used the dual-rail encoding because the building blocks are much easier to integrate than their polarization counterparts. Significant recent results include a silica-on-silicon reconfigurable circuit of nested Mach-Zehnder interferometers with electrically tunable phase-shifters \cite{OBrien:Science349}, an AlGaAs tunable Mach-Zehnder interferometer relying on the Pockels effect \cite{OBrien:OComm327}, a UV-written glass waveguide circuit for on-chip quantum teleportation with thermo-optic phase-shifters \cite{Walmsley:NatPhot8} (see Fig.~\ref{fig:tel}), femtosecond laser written glass circuits with up to 13 input and output optical modes for Boson Sampling \cite{Sciarrino:SciAdv1} and the previously mentioned femtosecond laser written glass circuits for the creation of single-photon W states over 16 spatial modes \cite{GHP:natphoton14}. Femtosecond laser written glass circuits have also been used recently to manipulate polarization-encoded photons \cite{Sansoni:NatComm5} (see Fig.~\ref{fig:Devices1}(e)) and even polarization-path hyper-entangled photons \cite{Sansoni:NatPhot7,Orieux:LSA2015}.
Note that many of the developed circuits have been designed for visible or near-infrared light because of the better performance of currently available sources and detectors at these wavelengths; however all the demonstrated devices can be adapted straightforwardly for telecom wavelengths by adjusting the waveguide cross-sections, as the materials used for the chips are also transparent in the telecom range. Indeed, thermally reconfigurable femtosecond laser written glass circuits at telecom wavelengths have been demonstrated very recently \cite{FMR:lsa15}.
Additional components are needed to interconnect the chips performing photon manipulation and the optical fibers of the communication network. In particular, on photonic circuits with dual-rail encoding, it is necessary to convert the qubits to an encoding that is more adapted for propagation in optical fibers, such as polarization or time-bin encoding. Such integrated qubit converters have been demonstrated recently on silicon-on-insulator chips (see Fig.~\ref{fig:Devices1}(b)) with 2D grating couplers allowing to interconvert polarization and path qubits with a fidelity of 98\% \cite{OBrien:arxiv1508,Massar:OL38}. For semiconductor-based platforms (silicon and III-V alike), because of the large refractive index difference between the optical fibers and the waveguides of the chip, it is also necessary to devise an efficient way of transferring one guided mode to the other. Several strategies can be adopted depending on the encoding: if there is only one input polarization mode then 1D grating couplers can be used to couple light incident from the chip surface into a waveguide, while if polarization matters then tapered waveguides and butt-coupled fibers on the chip edge should be used instead \cite{VP}. Spectral filters are also required inside the chips, especially just after the photon sources, to remove the powerful pump beam so that it does not generate noise in the detectors or cause unwanted nonlinear effects in the circuit. These filters have to be designed carefully so that they have low loss and preserve entanglement. One example of such integrated filters combined with a source of quantum states has been realized in a silicon chip \cite{Bajoni:PRX2014} (see Fig.~\ref{fig:Devices1}(a)) with more than 95 dB of rejection of the pump.
Finally, it may be useful to convert single photons from one wavelength to another in some particular cases, for example, if efficient detectors or quantum memories are available in a wavelength range different from the telecom one. This conversion, to be truly useful, must be done efficiently at the single-photon level and must not degrade the quantum state by noise addition. Such a process has been reported recently with PPLN waveguides to convert single photons from 910 nm to 600 nm with an internal conversion efficiency over 70\% and similar signal-to-noise ratio before and after conversion \cite{NIST:PRL2012}.
\subsection{Storage}
\label{subsec:storage}
A crucial element of future quantum communication networks are quantum memories, namely devices able to store a quantum state for a tunable amount of time before restituting it on-demand with high fidelity (see \cite{Tittel:JMO60,Review:QMemoryApplications} for recent reviews). Indeed, such a device is an essential part of quantum repeaters, allowing to increase the achievable communication distance between two users that is currently limited by the propagation losses of optical fibers. By enabling the synchronization of photonic processes and hence deterministic system operation, quantum memories also provide an efficient way to deal with the probabilistic nature of the multi-photon state emission, and are therefore a key factor for scalability. We note that the benefits of quantum memories extend well beyond this first application, as pointed out in Ref. \cite{Review:QMemoryApplications}. Indeed, they could also provide a way to manipulate the photons, for instance, as spectral shapers or frequency converters.
To date, no integrated memory directly compatible with optical telecommunication networks has been demonstrated. Recently, two integrated memories have shown promising results to store red frequency photons: one of them relies on a LiNbO$_3$ waveguide doped with thulium ions \cite{Tittel:PRL108}, while the other used the femtosecond laser writing technique to fabricate a waveguide inside a Y$_2$SiO$_5$ crystal doped with praseodymium ions \cite{CSM:arxiv15}. A cesium vapour-filled hollow core fiber operated at room temperature \cite{Sprague:NJP15} has also shown encouraging results. Note that these memories would be compatible with telecommunication networks by associating them with photon pair sources emitting one photon in the telecom range and one photon in the visible region of the spectrum where most accessible atomic transitions occur. To avoid the need for such specific dual band photon sources or single-photon frequency conversion devices, telecom compatible memories based on Erbium-doped fibers \cite{Tittel:PRL115} have been investigated recently. Much remains to be done in this domain to find a practical way of integrating these memories and achieve the high fidelities, efficiencies and long storage times needed for applications. A major difficulty encountered is, among others, the bandwidth mismatch between the atomic transitions (a few GHz) and typical photon sources (a few tens of GHz to a few THz).
\subsection{Detection}
\label{subsec:detection}
Finally, a ubiquitous component of quantum communication systems exploiting single-photon properties are single-photon detectors. In bulk experiments performed at telecom wavelengths, InGaAs single-photon avalanche photodiodes (SPADs) are typically used; however their detection efficiency at these wavelengths is generally limited to around 10-20\% as they are plagued with too much dark count noise at higher efficiencies. This is why a lot of effort has been devoted to an alternative approach, namely superconducting single-photon detectors (SSPDs), in which the absorption of single photons induces local heat that can be detected by a change in the resistivity of a superconducting wire (for a relatively recent review see \cite{Review:SNSPD2012}). Indeed, these detectors offer much larger detection efficiencies than SPADs, with very little dark count noise and a reduced jitter. Additionally, and crucially for some quantum information applications, they allow photon-number resolving detection.
\begin{figure}
\caption{NbN SNSPDs integrated on top of AlN waveguides by SiN membrane transfer. The superconducting NbN nanowire detector before transfer can be seen in panel a. Panels b, c and d show the steps of the transfer of the SiN membrane containing the detector onto the AlN waveguide, by means of a tungsten microprobe. Panel e shows the final device with the detector on top of the waveguide. Figure reproduced from Ref. \cite{Englund:NatComm6}
\label{fig:Devices2}
\end{figure}
In general, the integration of single-photon detectors is challenging. SSPDs present some advantages in this respect, as the deposition of a superconducting nanowire on top of a waveguide allows the photons to evanescently couple to the detector and be efficiently absorbed on short propagation lengths (see Fig.~\ref{fig:Devices2}). This technique is in principle compatible with all the different integration platforms we have introduced. The main challenge resides in finding a good fabrication technique such that the outstanding performances of bulk superconducting detectors are preserved in the integrated geometry. Several high quality results have been demonstrated for such superconducting nanowire single-photon detectors (SNSPDs) recently with NbN nanowires deposited on top of SiN \cite{Pernice:SR5}, Si and AlN \cite{Englund:NatComm6} or AlGaAs \cite{Fiore:IEEE2015} waveguides, yielding on-chip detection efficiencies of more than 70\%, 50\% and 20\% respectively. Despite these very promising results, one remaining limitation of these detectors is that they need to be cooled at cryogenic temperatures, which necessarily complicates practical network deployments and may hinder their integration on the same chip with other components performing better at room temperature.
As we will see in the following section, on-chip detection is currently the main obstacle on the way to fully integrated qubit-based quantum communication systems, while for CV-based quantum communications which require coherent detection techniques it is possible to use standard devices developed by the silicon photonics industry.
\section{Integrated quantum communication systems}
\label{sec:systems}
In the previous section we have seen a great number of studies targeting high performance on-chip quantum communication devices. In view of future large-scale quantum networks incorporating such devices, it is also crucial to take a system design approach including the components themselves but also the surrounding network environment and the additional constrains imposed by the implemented quantum communication protocols. Interestingly, although the performance of on-chip components is not necessarily superior to that of their bulk counterparts at the first stages of development, the full advantage of integration may appear once the entire system performance has been characterized.
There is a wide range of protocols and applications pertaining to quantum communications \cite{GT:natphoton07}. By far the most developed is quantum key distribution (QKD), while other important quantum cryptographic primitives include bit commitment, coin flipping, oblivious transfer, secret sharing, digital signatures, anonymous communication, secure identification, etc. Quantum communication complexity protocols, such as quantum fingeprinting, as well as random number generation are also essential elements of quantum communications networks. Finally, the backbone of such networks is entanglement distribution, leading to quantum teleportation, quantum relays and quantum repeaters. In the following, we will review recent integration efforts for QKD and for entanglement distribution and quantum teleportation; these case studies illustrate the challenges linked to system integration and pave the way to on-chip implementation of all other aforementioned protocols and, ultimately, of quantum communication networks.
\subsection{Quantum key distribution}
\label{subsec:qkd}
The ability to distribute secret keys between two parties over an untrusted channel with unconditional (or, information-theoretic) security, that is regardless of the capacities of a malicious eavesdropper, is arguably one of the most powerful achievements of quantum information science. Since the first proposal of a QKD protocol in 1984 \cite{BB84}, this application has advanced tremendously \cite{SBC:rmp09,LCT:natphoton14} and is now leading the way to the industrial development of quantum technologies. Despite its importance, however, efforts towards the development of chip-scale integrated QKD systems have been limited until recently. This development will be crucial for moving on from the current bulky, costly systems to compact and lightweight devices that can be mass manufactured at low cost. In this way, integration can open the way to the wide adoption of quantum technologies for securing communications in quantum information networks.
In QKD implementations, the key information is typically encoded either in discrete variables (DV), such as the polarization or phase of single photons (or more commonly in practical implementations, of weak coherent pulses, as discussed earlier), or in continuous variables (CV), such as the values of the quadrature components of the quantized electromagnetic field, those for instance of coherent states. These states are then transmitted over optical fibers or free space and detected at the receiver's end using single-photon detectors for DV-QKD protocols or coherent (homodyne or heterodyne) detection techniques for CV-QKD protocols. A prominent example of a DV-QKD protocol is decoy-state BB84 \cite{LMC:prl05,Wang:prl05}, while Gaussian modulated coherent state CV-QKD \cite{GG:prl02} is the most commonly used protocol in the CV framework. Finally, the distributed-phase-reference protocols, such as differential phase shift (DPS) QKD \cite{IWY:prl02} and coherent one way (COW) \cite{SBG:apl05}, where the key information is encoded on the phase difference between adjacent weak coherent pulses and on photon arrival times, respectively, also require the use of single-photon detectors. Beyond these standard protocols, there have been major recent advances in QKD, including in particular the proposal of measurement-device-independent (MDI) QKD \cite{LCQ:prl12} (see also \cite{BP:prl12}) and of the so-called Robin-Round (RR) DPS QKD protocol that does not require monitoring the signal disturbance to establish security \cite{SWK:nature14}. The former scheme provides a practical way of eliminating security breaches due to the imperfections of the receiver's detectors, while the latter features a very high noise tolerance. Both schemes have been demonstrated experimentally, for instance, in Refs. \cite{TYZ:prx15,VLC:jmo15,CLF:natphoton16,TST:natphoton15,WYC:natphoton15,GCL:prl15}, but their implementation would greatly benefit from photonic integration; this is especially true for RR-DPS QKD, which relies on a complex setup involving multiple interferometers, time delay circuits, etc.
Because of the birefringence of optical fibers, polarization encoding in DV-QKD is not always desirable, especially for long-distance communications; for this reason, the most advanced systems typically encode the key information on phase or time bins, which brings the need for precise and stable interferometers. These are in fact used in all phase-encoding-based protocols, including DPS QKD for example. They are the first components in QKD systems to have been integrated; indeed, planar lightwave circuits (PLCs) based on silica-on-silicon technology have been used for many years as asymmetric Mach-Zehnder interferometers \cite{DTL:opex06,YFT:ol12} because of their low loss and temperature-stabilized operation.
Although the use of PLC-based devices illustrates the favorable characteristics of on-chip QKD system components, significant efforts are required to develop fully integrated systems. Early steps in this direction considered a client-server scenario, where the QKD client (Alice) holds a low cost, lightweight device with integrated elements while the server (Bob) incorporates the large system resources that are difficult to integrate, in particular the single-photon detectors for DV-QKD. This scenario was motivated precisely by this difficulty that we have discussed in Section \ref{subsec:detection}, and could have some applications in use cases where many users receive secret keys by a few providers; MDI QKD, for instance, is well adapted to this scenario. In Ref.~\cite{ZAM:prl14}, the developed miniaturized client device includes a LiNbO$_3$ integrated polarization controller in a system implementing the so-called reference frame independent QKD protocol in a two-way optical fiber configuration where the server includes both the photon source and the single-photon detectors (Fig.~\ref{fig:qkd1}(a)). Ref.~\cite{VRF:jstqe15} demonstrated a handheld device for Alice, tailored for polarization-encoded, short range, free space QKD. It is based on an integrated optics architecture combining various techniques. In particular, vertical cavity surface emitting lasers, coupled to micro-polarizers fabricated using lithography, are used to generate the polarization qubits. They are combined with a waveguide array, fabricated using femtosecond laser writing on glass (which is suitable for polarization encoding as we have seen previously) for ensuring the spatial overlap of these qubits (Fig.~\ref{fig:qkd1}(b)).
\begin{figure}
\caption{(a) Experimental setup for client-server reference frame independent QKD \cite{ZAM:prl14}
\label{fig:qkd1}
\end{figure}
The above systems provided a proof-of-principle characterization of partially integrated QKD systems, however fully chip-based systems are necessary for a wide range of applications, including long-distance secure communications or key sharing in a server-server scenario, for instance between data centers with high security requirements in a cloud network infrastructure. Such systems are also needed for enhanced functionality and integrability in current communication networks. For DV-QKD and distributed-phase-reference protocols, the main limitation in this direction remains for the moment the need for on-chip single-photon detectors. Notwithstanding this element, a recent experiment demonstrated a high degree of system integration \cite{SEG:arxiv15} (Fig.~\ref{fig:qkd2}(a)). In this system, Alice's module is integrated on InP and includes a tunable telecom laser source and both active and passive elements such as electro-optic phase modulators and Mach-Zehnder interferometers enabling the reconfigurable implementation of the transmitter functionalities of the decoy-state BB84, DPS and COW QKD protocols. Bob's module on the other hand is integrated on SiO$_x$N$_y$ and includes passive elements enabling the receiver functionalities of these protocols, with the exception of the detection stage. This system achieved GHz operation with estimated secret key generation rates on the order of a few hundreds of kbit/s over a channel attenuation corresponding to 20 km of standard optical fiber in laboratory conditions.
\begin{figure}
\caption{(a) Photonic chip architecture combining several integrated devices for the implementation of discrete-variable and distributed-phase-reference QKD protocols \cite{SEG:arxiv15}
\label{fig:qkd2}
\end{figure}
The implementation of CV-QKD systems requires only standard telecom components \cite{JKL:natphoton13,DL:entropy15}, hence opening the way to complete system integration, including the coherent detection stage. Progress in this direction has been achieved recently using Si photonics, with a proof-of-principle photonic chip including some of the functionalities required by the Gaussian modulation coherent-state CV-QKD protocol \cite{PZH:qipc15} (Fig.~\ref{fig:qkd2}(b)), including amplitude and phase modulation and shot noise limited homodyne detection using germanium (Ge) photodiodes. Further system integration is feasible and particularly relevant in the context of new advances in this field, towards high-speed CV-QKD systems using a locally generated phase reference signal \cite{QLP:prx15,SBC:prx15,HHL:ol15}.
\subsection{Entanglement distribution and quantum teleportation}
\label{subsec:teleportation}
Intrinsic losses in photonic communication channels eventually make it impractical to perform communication tasks over point-to-point links, and impose a network structure. Quantum networks are indeed crucial for increasing the range of quantum communication systems and will be the neuralgic element of the future quantum Internet \cite{Kimble:nature08}. This task is enabled by entanglement distribution, the fundamental building block of quantum teleportation and quantum repeaters. Because of its importance, long-distance entanglement distribution has been thoroughly investigated experimentally with impressive results, for instance over 300 km of optical fiber \cite{IMT:oe13}. Furthermore, quantum teleportation over a 100-km fiber link \cite{TDS:optica15} and entanglement swapping over a 143-km free-space link \cite{HSF:pnas15} were shown recently. Combined with quantum memories, these protocols lead to embryonic demonstrations of quantum repeaters, such as those of Refs. \cite{BCT:natphoton14,SPZ:arxiv15}, which are suitable for fiber-based quantum communication networks.
The aforementioned advanced implementations are all relying on largely explored nonlinear optical materials, namely PPLN and PPKTP, for the generation of entangled or heralded single-photon states, and superconducting nanowire or avalanche photodiode single-photon detectors for fiber optic and free space experiments, respectively. The PLC interferometers routinely used in phase-encoded QKD systems were also employed in such setups while fiber-based quantum memories were used in Ref. \cite{SPZ:arxiv15}. These efforts towards compact systems are promising, however in general full integration of such implementations is a challenging task.
\begin{figure}
\caption{Silica-on-silicon chip architecture for quantum teleportation \cite{Walmsley:NatPhot8}
\label{fig:tel}
\end{figure}
Some further steps towards on-chip systems have been taken recently. We remark in particular the quantum teleportation experiment of Ref.~\cite{Walmsley:NatPhot8}, performed using a single UV-written silica-on-silicon chip including directional couplers (integrated beam splitters) and phase shifters for the realization of the path-encoded qubits, Hadamard and controlled-phase gates, and state tomography required by the implemented protocol (Fig.~\ref{fig:tel}). This protocol bypasses the feedforward operation typically required in quantum teleportation, and additionally the parametric down-conversion sources and avalanche photodiodes remain off chip; however, the experimental demonstration of the aforementioned functionalities opens the way to more complex implementations in future integrated systems. As we have previously mentioned, a silica-on-silicon chip was also used for continuous-variable entanglement generation and characterization - using off-chip homodyne detectors \cite{OBrien:NatPhot9}; these are the first steps towards the demonstration of quantum teleportation in the CV framework as well. Finally, Ref.~\cite{OBrien:arxiv1508} showed entanglement distribution, certified by a Bell test violation, between two separate fiber-connected chips using silicon photonics and based on path and polarization encodings (see Fig.~\ref{fig:Devices1}(b)). This is a natural setting in a network, hence such experiments advance towards realistic communication scenarios that rely on flexibility and interconnectivity.
A common feature of the above integrated quantum communication experiments is the absence of any appreciable distance between the communicating parties. Losses are indeed currently a bottleneck in these implementations, and reducing them at all stages of the system (within the chip, fiber coupling, etc) will be of utmost importance in order to improve the system performance and hence develop practical on-chip quantum communication applications.\\
Let us finish this section by remarking that, in addition to developing the integrated quantum communication systems themselves, several other elements come into play in a practical network environment. In particular, the synchronization of the devices typically requires very fast electronics, hence integration of electronic components operating at GHz rates have to be devised. Furthermore, the packaging of the systems, whose operation requires the tuning and routing of a number of interconnected components, needs to satisfy stringent practical constraints. Finally, multiplexing techniques routinely used to increase the bandwidth in data communications and successfully tested in quantum communications, for example, for QKD \cite{PDL:apl14} and entanglement distribution \cite{TGO:jap15}, will need to be adapted to chip-based systems.
\section{Conclusions and outlook}
In this review, we have discussed significant recent advances in the field of integrated quantum communications, which have addressed challenges linked to the main components used in such implementations but also to the system aspects inherent in operation in a network environment. The ingenious solutions that have been devised until today lead to further challenges that need to be tackled next.
On the components side, improving the heralding efficiency of on-chip sources of single and multi-photon states will be crucial, while the development of efficient frequency converters and integrated quantum memories, single-photon detectors and photon counters needs to be pursued using the techniques and integration platforms discussed in the previous sections, potentially adopting hybrid solutions. On the systems side, the realization of the first chip-based QKD and quantum teleportation systems, even at a prototypical stage, is extremely promising. The reduced payload of integrated quantum communication systems opens the way to a great range of applications: for example, mobile QKD networks can be envisaged, while crucially such systems may be deployed on satellites overcoming the challenge of losses, inherent in fiber optic networks, hence bringing quantum communications to the global scale. Further advances in integrated systems, including overcoming the losses and developing multiplexing techniques as discussed previously, will be required to achieve advanced protocol implementations on chip, such as device-independent and measurement-device-independent QKD, distributed communications employing multipartite states, as well as active quantum teleportation and quantum repeater links surpassing the performance of direct transmission links.
It is important to remark that some of the aforementioned protocols have a direct link with tests of fundamental physics. For example, device-independent QKD requires loophole-free Bell tests, which have recently been demonstrated in diamond \cite{HBD:nature15} and using nonlinear waveguides \cite{SMC:arxiv15,GVW:arxiv15}. This type of experiments can become routine thanks to the new opportunities offered by integration. More generally, on-chip systems for long-distance entanglement distribution and quantum communications in space can enable testing the quantum/classical interface and foundational notions such as nonlocality and contextuality in previously inaccessible regimes.
At a practical level, targeting truly useful systems with potential for industrial development will require the further enhancement of available infrastructures, both on the chip fabrication side, by developing worldwide high capacity multiple-use foundry services and growth facilities, and on the network side, by using deployed fibers and satellite devices for the purposes of quantum communication experiments. Furthermore, work towards certification and standardization, which is important for the impact and validation of future applications and already actively pursued for QKD, has to take into account the specificities of chip-based systems as well. Finally, we note that research in integrated quantum communications is of interest to the underlying technologies too; as an example, the requirements of Si-integrated modulator performance for QKD are different from those needed for standard classical optical communications, hence the exploration for suitable characteristics opens up new possibilities more generally in silicon photonics.
Because of the great promise it holds for demonstrating the true disruptive potential of quantum information science in large-scale systems, the field of integrated quantum communications is the subject of extremely active and innovative research work worldwide. Beyond any doubt, the resulting developments in the next years will change the landscape of our future communication and computation capacities and practices.
\ack
ED thanks the participants of the NSF Quantum Information on Chip Workshop held in Padova, Italy, in October 2015, for stimulating discussions; some of the discussed topics are reflected in this review. The authors acknowledge financial support from the City of Paris (project CiQWii), the French National Research Agency (projects QRYPTOS and COMB), the Ile-de-France Region (project QUIN), and the France-USA Partner University Fund (project CRYSP).
\section*{References}
\end{document} |
\begin{equation}gin{document}
\title{Beals characterization of
pseudodifferential operators\ in Wiener spaces}
\begin{equation}gin{abstract}
\noindent
The aim of this article is to prove a Beals type characterization theorem for pseudodifferential operators in Wiener spaces. The definition of pseudodifferential operators in Wiener spaces and a Calder\'on-Vaillancourt type result appear in \cite{AJN}. The set of symbols considered here is the one of \cite{AJN}. The Weyl calculus in infinite dimension considered here emphasizes the role of the Wick bi-symbols.
\end{abstract}
\parindent=0pt
\tableofcontents
\parindent = 0 cm
\parskip 10pt
\begin{align}selineskip 15pt
\section{Statement of the main result.}\label{s1}
In quantum field theory, such as quantum electrodynamics which will be considered in a forthcoming article, the set of states of the quantized field may be chosen as a symmetrized Fock space ${\cal F}_s (H_{\bf C})$
over an Hilbert space $H$. Among the operators acting in such spaces, those coming from the Weyl calculus in infinite dimension and recently introduced in \cite{AJN} (see also in \cite{AJN2} the case of the large but finite dimension) may have applications to modelling the interaction of the quantized field with a fixed particle of spin $1/2$.
These applications will be developed in a next article, but we need some properties which are not in \cite{AJN} and that we present it here.
We note by $H$ a real separable space and by $H_{\bf C}$ the complexified. The norm of $H$ is noted by
$|\cdot |$ and the scalar product of two elements $a$ and $b$ of $H$
is by $a \cdot b$. The norm of an element of $H^2$ is denoted by $|\cdot |$. For all $X = (x ,\xi)$ and $Y
= (y ,\eta)$
in $H^2$, we set
\begin{equation}\label{1.1}
X \cdot \overline Y = (x+i \xi) \cdot (y-i\eta),
\qquad \sigma (X , Y) = y\cdot \xi - x\cdot \eta. \end{equation}
We recall that ${\cal F}_s (H_{\bf C})$ is the completion of the direct sum of the subspaces ${\cal F}_n$ ($n\geq 0$) where ${\cal F}_0$ is one dimensional and represents the vacuum, while ${\cal F}_1 = H_{\bf C} $ and ${\cal F}_n$ ($n\geq 2$) is the $n-$ fold symmetrized tensor product representing the $n$ particles states. This space is not very convenient for the Weyl calculus since we have to write down integrals but it is isomorphic to some $L^2$ space on a suitable Banach $B$ endowed with a gaussian measure.
It is known that, for any separable real Hilbert space $H$ there exists,
\hskip 1cm - a Banach space $B$ containing $H$,
\hskip 1cm - a gaussian measure $\mu _{B , h }$ with variance $h$ on the $\sigma-$algebra of the Borel sets of $B$, for all $h>0$,
satisfying some assumptions we formulate here in saying that
$(i, H, B)$ is an abstract Wiener space
(where $i$ is the injection from $H$ into $B$). See \cite{G1}\cite{G2}\cite{KU} and \cite{AJN} for precise conditions which should be fullfilled by $B$. See also \cite{G3} (example 2, p. 92) for a standard way of construction of a space $B$ satisfying the assumptions.
Identifying $H$ with its dual, one has,
\begin{equation}\label{1.2} B' \subset H' = H \subset B. \end{equation}
If $H$ is finite dimensional, we have $B= H$ and for all Borel sets $\Omega$ in $H$,
\begin{equation}\label{1.3} \mu _{H , h} ( \Omega) = (2\pi h)^{-{\rm dim} (E)/2}
\int _{\Omega} e^{-{|y|^2 \over 2h}} dy.
\end{equation}
In the general case, the symmetrized Fock space ${\cal F}_s (H_{\bf C})$ (\cite{SE},\cite{RS}) is isomorphic to the space $L^2(B, \mu_{B , h/2})$ (see \cite{J}\cite{SI}). The complexified $H_{\bf C}
\subset {\cal F}_s (H_{\bf C}) $ is identified with a closed subset of $L^2(B, \mu_{B , h/2})$ which in field theory is the subspace corresponding to the states of the field with exactly one particle.
The Weyl calculus in infinite dimension of \cite{AJN} allows to associate to some suitable functions $F$ on the Hilbert space $H^2$, bounded and unbounded operators in ${\cal F}_s (H)$ (or in $L^2(B, \mu_{B , h/2})$). Let us first recall the assumptions filled by functions $F$.
\begin{equation}gin{defi}\label{d1.1} Let $(i, H, B)$ be a Wiener space satisfying (\ref{1.2}).
We choose a Hilbert
basis $(e_j )_{(j\in \Gamma)}$ of $H$, each vector belonging to
$B'$, indexed by a countable set $\Gamma$. Set $u_j = (e_j , 0)$ and
$v_j = (0, e_j)$ $(j\in \Gamma)$. A multi-index is a map
$(\alpha , \begin{equation}ta )$ from $\Gamma $ into
$\N \times \N$ such that $\alpha_j = \begin{equation}ta _j = 0$ excepted for a
finite number
of indices. Let $M$ be a nonnegative real number, $m$ a nonnegative integer
and $\varepsilon = (\varepsilon_j )_{(j \in \Gamma)}$ a family of
nonnegative real numbers. One denotes by $ S_m(M, \varepsilon)$ the
set of bounded continuous functions $ F:H^2\rightarrow {\bf
C}$ satisfying the following conditions. For every multi-index
$(\alpha , \begin{equation}ta)$ such that $0 \leq \alpha_j \leq
m$ and $0 \leq \begin{equation}ta_j \leq m$ for all $j\in \Gamma$, the following
derivative,
\begin{equation}\label{1.4}\partial_x^{\alpha}\partial_{\xi}^{\begin{equation}ta} F = \left [\prod _{j\in \Gamma }
\partial _{u_j} ^{\alpha_j} \partial _{v_j} ^{\begin{equation}ta_j}\right ] F \end{equation}
is well defined, continuous on
$H^2$ and satisfies, for every $(x , \xi)$ in $H^2$,
\begin{equation}\label{1.5}\left |\partial_x^{\alpha}\partial_{\xi}^{\begin{equation}ta} F(x , \xi)
\right | \leq M \prod _{j\in \Gamma } \varepsilon_j ^{\alpha_j +
\begin{equation}ta_j}. \end{equation}
\end{defi}
For each summable sequence
$(\varepsilon_j)$, the first step in \cite{AJN} is to associate to each function $F$ in $S_2(M, \varepsilon)$, a quadratic form $Q_h^{weyl} (F)$ on a dense subset ${\cal D}$ (see
Definition \ref{d2.1} above), and not an operator on the above Hilbert spaces.
One may also associate a quadratic form $Q_h^{weyl} (F)$ on ${\cal D}$ with symbols $F$ which are not in the above set, in particular if they are not bounded.
To do it, it is sufficient that the two conditions below are satisfied:
(H1) The function $F: H^2 \rightarrow {\bf C} $ has a stochastic extension
$\widetilde F : B^2 \rightarrow {\bf C} $ in $L^1 ( B^2 , \mu _{B^2 , h/2})$ (see definition
4.4 of \cite{AJN} which recall and adapt a previous definition of L. Gross \cite{G1}.
(H2) The action on $|\widetilde F|$ of the following heat operator
\begin{equation}\label{1.6} ( H_{h/2} |\widetilde F | ) (X) = \int _{B^2} |\widetilde F(X+Y) |
d\mu _{B^2 , h/2} (Y) \hskip 2cm X\in H^2\end{equation}
is polynomially bounded, i.e., it satisfies for $m\geq 0$ and
$C>0$,
\begin{equation}\label{1.7} ( H_{h/2} |\widetilde F | ) (X) \leq C (1+ |X|)^m \end{equation}
(that is to say that the norm in formula (12) in \cite{AJN} is finite).
In Theorem \ref{t2.2}, we recall the construction of $Q_h^{weyl} (F)$ in a slightly simplified way, but the construction in \cite{AJN} uses the analog in infinite dimension of Wigner functions which may have its own interest. The hypotheses 1 and 2 are satisfied if $F$ belongs to $S_2(M, \varepsilon)$, the sequence $(\varepsilon _j)$ being
summable. Inequality (\ref{1.7}) is then satisfied with $C=M$ and $m=0$. See others examples in Section \ref{s2}.
Next, as shown in \cite{AJN} (Theorem 1.4), if $F$ belongs to $S_2(M, \varepsilon)$ then $Q_h^{weyl} (F)$
is the quadratic form of a bounded operator in $L^2(B, \mu_{B , h/2})$ or equivalently, bounded in ${\cal F}_s(H_{\bf C})$. In addition, this operator satisfies, if $0 < h < 1$,
\begin{equation}\label{1.12} \Vert Op_h^{weyl} (F) \Vert \leq M \prod _{j\in \Gamma} (1 + 81\pi h S_{\varepsilon} \varepsilon_j ^2) \end{equation}
where
\begin{equation}\label{1.13} S_{\varepsilon} = \sup_{j\in \Gamma} \max(1, \varepsilon_j ^2 ).\end{equation}
The hypothesis (H2) in Theorem
1.4 in \cite{AJN}, which not mentioned here, is always satisfied if
$F$ belongs to $S_2(M, \varepsilon)$ and if the sequence $(\varepsilon_j )$ is summable (Proposition
8.4 in \cite{AJN}).
We have now to define and to compute, commutators of these operators with momentum and position operators. In finite dimension $n$, theirs compositions and commutators are a classically defined as operators from ${\cal S} (\R^n)$ into
${\cal S}' (\R^n)$. In our case, ${\cal S} (\R^n)$ is replaced by space
${\cal D}$ of Definition \ref{d2.1}. In the absence of an analog of ${\cal S}' (\R^n)$,
we prefer instead to use quadratic forms on ${\cal D}$ (see \cite{RS}). We then consider mappings $(f , g) \rightarrow A(f , g)$ on ${\cal D} \times {\cal D}$ that are linear in $f$ and antilinear in $g$. A notion of continuity is given in Section \ref{s2}.
One may define two compositions (left and right) of a quadratic form $Q$ on the space ${\cal D}$ of Definition \ref{d2.1}
with an operator $A : {\cal D} \rightarrow {\cal D}$ whose formal adjoint $A^{\star}$ also maps ${\cal D}$ into ${\cal D}$.
One set, for all $f$ and $g$ in ${\cal D}$,
\begin{equation}\label{a2}( Q \circ A ) (f , g) = Q ( Af , g),\qquad( A \circ Q ) (f , g) = Q ( f , A^{\star} g).\end{equation}
One then define the commutator $[A, Q ]$ and $({\rm ad} A) Q$ as the following quadratic form,
\begin{equation}\label{a3}[A, Q ] (f , g) = Q ( f , A^{\star} g ) - Q ( Af , g). \end{equation}
Thus, one can define the iterated bracket
$({\rm ad} A_1) \dots ({\rm ad} A_n) Q$ if $A_1$,
\dots $A_n$ are operators from ${\cal D}$ into ${\cal D}$.
We see in Proposition \ref{p2.3} that one may associate with each continuous linear form $G$ on $H^2$, not only a quadratic form $Q_h^{weyl} (G)$, but also an operator $Op_h^{weyl} (G)$ from ${\cal D}$ to ${\cal D}$. This Weyl operator is the Segal field, up to a numerical factor, and may be directly defined in ${\cal F}_s (H)$ using creation and annihilation operators, without using the Weyl calculus. In particular, when
$F(x , \xi) = a \cdot x$ with $a$ in $H$, the corresponding Weyl operator will be denoted $Q_h (a)$ (position operator). When $F(x , \xi) = b \cdot \xi $,
when $b$ in $H$, the operator will be denoted $P_h(b)$ (momentum operator).
If $F$ belongs to $S_m (M, \varepsilon)$ and $G$ is a continuous linear form on $H^2$ then Proposition \ref{p2.6} allows us to extend the following result which is well-known in finite dimension,
\begin{equation}\label{a1} [Q_h^{weyl } (F) , Op_h^{weyl } (G)] = {h\over i} Q_h^{weyl } ( \{ F , G \} ).\end{equation}
In particular, if $(e_j)$ is the Hilbertian basis of $H$ chosen to define our sets of symbols then equality (\ref{a1}) gives,
$$ [Q_h (e_j) , Q_h^{weyl } (F) ] = - {h\over i} Q_h^{weyl } \left ( {\partial F
\over \partial \xi_j} \right ), $$
$$ [P_h (e_j) , Q_h^{weyl } (F) ] = {h\over i} Q_h^{weyl } \left ( {\partial F
\over \partial x_j} \right ). $$
One may iterate and consider iterated commutators while restricting ourselves to some set of multi-indices. We denote by ${\cal M}_m$ the set of pairs $(\alpha , \begin{equation}ta)$ where $\alpha = (\alpha_j )_{(j\in \Gamma)}$ and
$\begin{equation}ta = (\begin{equation}ta_j )_{(j\in \Gamma)}$ are sequences of nonnegative integers such that $\alpha_j = \begin{equation}ta _j =0$ except for a finite number of indices $j$, and such that $\alpha _j \leq m$ and $\begin{equation}ta_j \leq m$ for all $j\in \Gamma$.
One associates to each multi-index $(\alpha , \begin{equation}ta)$ the following iterated commutator,
$$ ({\rm ad}P_h )^{\alpha} ({\rm ad}Q_h )^{\begin{equation}ta} Q_h ^{weyl} (F) = \prod _{j\in \Gamma} ( ad P_h(e_j) ) ^{\alpha _j}
\prod _{k\in \Gamma} ( ad Q_h(e_k) ) ^{\begin{equation}ta _k}Q_h ^{weyl} (F). $$
In the same way, if $F$ is in $S_m(M, \varepsilon)$ and if $(\alpha , \begin{equation}ta)$
is in ${\cal M}_p$, $p \leq m-2$,
$$ ({\rm ad}P_h )^{\alpha} ({\rm ad}Q_h )^{\begin{equation}ta} Q_h ^{weyl} (F) = (-1)^{|\begin{equation}ta |}
(h/i) ^{|\alpha + \begin{equation}ta |} Q_h ^{weyl} (\partial_x^{\alpha } \partial_{\xi}^{\begin{equation}ta } F). $$
From Theorem 1.4
in \cite{AJN}, the above Weyl quadratic form is associated to a bounded operator in $L^2(B , \mu_{B , h/2})$, denoted as below and verifiying,
\begin{equation}\label{1.15}\Vert ({\rm ad}P )^{\alpha} ({\rm ad}Q )^{\begin{equation}ta} Op_h^{weyl}(F) \Vert \leq
M \prod _{j\in \Gamma} (1 + 81\pi h S_{\varepsilon} \varepsilon_j ^2)
\prod _{j\in \Gamma} (h\varepsilon _j )^{\alpha _j +\begin{equation}ta _j}.\end{equation}
The purpose of this work is to prove the reciprocal statement, as Beals \cite{Bea} did in finite dimension
(see also \cite{BO1}\cite{BO2} and \cite{BO-C} for adaptations to other classes of symbols in finite dimension).
\begin{equation}gin{theo}\label{t1.2} Let $(i, H, B)$ be a Wiener space satisfying (\ref{1.1}).
Let $A_h$ be a bounded operator in $L^2(B, \mu_{B, h/2})$. Let
$(e_j)$ $(j\in \Gamma)$ a Hilbertian basis of $H$ consisting of elements in $B'$.
Let $M > 0$ and let $(\varepsilon _j ) _{(j \in \Gamma)}$ a
summable sequence of real numbers. Let $m \geq 2$. Suppose that,
for all $(\alpha , \begin{equation}ta )$ in ${\cal M}_{m+4}$, the commutator
$({\rm ad}P )^{\alpha} ({\rm ad}Q )^{\begin{equation}ta} A_h$ (being a
priori defined as a quadratic form on ${\cal D}$) is bounded
in $L^2(B, \mu_{B, h/2})$ and that,
\begin{equation}\label{1.15b} \Vert ({\rm ad}P )^{\alpha} ({\rm ad}Q )^{\begin{equation}ta} A_h \Vert \leq
M \prod _{j\in \Gamma} (h\varepsilon _j )^{\alpha _j +\begin{equation}ta _j}.\end{equation}
Then, if $0 < h < 1$, there exists a function $F_h $ in $S_m(M', \varepsilon)$ with,
\begin{equation}\label{1.16} M' = M \prod _{j\in \Gamma} (1 + K S_{\varepsilon} ^ 2 h \varepsilon _j ^2) \end{equation}
where $K$ is a universal constant, and $S_{\varepsilon}$ is defined in (\ref{1.13}),
such that the Weyl operator $Op_h^{weyl} (F )$ associated to $F$ is equal to $A_h$.
\end{theo}
Section \ref{s2} introduces various results concerning the Weyl calculus in infinite dimension intended to be used in an upcoming work. Sections \ref{s3} to \ref{s7}
are devoted to proof of Theorem \ref{t1.2}. Section \ref{s8} applies this
theorem to composition of two operators defined
by the Weyl calculus. We show that the composition is also defined by this calculus, but
we do not give any results on the possible asymptotic expansion of its symbol, this result being used in a forthcoming article.
\section{Weyl calculus in infinite dimension.}\label{s2}
\subsection{Coherent states.}\label{s2.A}
For $X=(a , b)$ in $H^2$, and all $h>0$, one defines $\Psi_{X ,h}$ the corresponding coherent state (\cite{Ber}\cite{C-R}\cite{F}), they belong to ${\cal F}_s (H_{\bf C})$ and are defined by,
\begin{equation}\label{2.1} \Psi_{(a , b) , h} = \sum _{n\geq 0}
{e^{-{|a|^2+ |b|^2 \over 4h}} \over (2h)^{n/2} \sqrt {n!} } (a+ib) \otimes \cdots \otimes (a+ib).\end{equation}
In view of the isomorphism from ${\cal F}_s (H_{\bf C})$ in $ L^2(B , \mu_{B , h/2})$,
each element $a$ of $H \subset {\cal F}_s (H_{\bf C})$ is seen as a
function in $L^2(B , \mu_{B , h/2})$ denoted $x \rightarrow \sqrt {h} \ell_a(x)$.
When $a$ is in $B' \subset H$, one has $\ell_a(x) = a(x)$. When $a$ is in $H$, it is approximated by a sequence $(a_j)$ in $B'$, we then show that the sequence $\ell_{a_j}$
is a Cauchy sequence in $ L^2(B , \mu_{B , h/2})$ and we denote by $\ell _a$ its limit.
With the same isomorphism, the coherent state $ \Psi_{(a , b) , h}$ defined in (\ref{2.1}) becomes,
\begin{equation}\label{2.2}\Psi_{X , h} (u) = e^{{1\over h} \ell _{ (a+ib)} (u) -{1\over 2h}|a|^2 -
{i\over 2h} a\cdot b},\quad X = (a , b) \in H^2,\quad {\rm a.e.}\ u\in B. \end{equation}
We see, for all $X = (x , \xi)$ and $Y = (y, \eta)$, with the notation (\ref{1.1}), that
\begin{equation}\label{2.3}< \Psi_{X h} , \Psi _{Yh}> =e^{-{1\over 4h}(|X|^2 +|Y|^2) + {1\over 2h} X \cdot \overline Y }.\end{equation}
In particular,
\begin{equation}\label{2.4}|< \Psi_{X h} , \Psi _{Yh}>| =e^{-{1\over 4h}|X-Y|^2 }.\end{equation}
We call Segal Bargmann transform (\cite{HA}) of $f$ the function
\begin{equation}\label{2.5}(T_hf) (X) = { < f , \Psi_{Xh} > \over < \Psi_{0h} , \Psi_{Xh} >},\qquad X\in H^2.\end{equation}
We know that $T_hf$ admits a stochastic extension $\widetilde T_hf $ in $L^2(B^2 , \mu _{B^2, h})$ and we know
that, $\widetilde T_h$ is a partial isometry from $L^2(B , \mu _{B, h/2})$ into $L^2(B^2 , \mu _{B^2, h})$.
\subsection{The space ${\cal D}$ and Wick symbols.}\label{s2B}
\begin{equation}gin{defi}\label{d2.1} For all subspaces $E$ of finite dimension in $H$,
${\cal D}_E$ denotes the space of functions $f : B \rightarrow {\bf C}$ such that,
i) the function $f$ is written under the form $ \widehat f \circ P_E$, where $\widehat f $
is a continuous function from $E$ in ${\bf C}$ and $P_E$ is the mapping from $B$ in
$E$ defined as follows, choosing an orthonormal basis
$\{ u_1 , ... u_n \}$ of $E$,
\begin{equation}\label{2.6}P_E(x) = \sum _{j=1}^n \ell _{u_j} (x) u_j,\quad a.e. \ x\in B\end{equation}
(the map $P_E$ is independent of the chosen basis).
ii) the function $E^2 \ni X \rightarrow < f, \Psi_{X h}>$ (scalar product in
$L^2(B , \mu_{B , h/2})$) is in the
Schwartz space ${\cal S} (E^2)$.
We shall denote by ${\cal D}$ the union of all spaces ${\cal D}_E$.
\end{defi}
We observe that the coherent states belong to ${\cal D}$. The condition ii)
is equivalent to say that the function $ \widehat f $ of i) is such that the function
\begin{equation}\label{2.7} E \ni u \rightarrow \widehat f (u) e^{-{|u|^2 \over 2h}}\end{equation}
belongs to ${\cal S} (E)$. One says that a quadratic form
$Q$ on ${\cal D}$ is {\it continuous} if, for all $E \subset H$ of finite dimension, there exists $C>0$ and $m\geq 0$ such that, for all $f$ and $g$ in ${\cal D}_E$,
\begin{equation}\label{2.8} |Q(f, g)| \leq C I(E, m) (f) I(E, m) (g)\end{equation}
where
\begin{equation}\label{2.9} I(E, m) (f) = \int _{E^2} |< f, \Psi_{X h}>| (1+|X|)^m dX.\end{equation}
One says that a linear mapping $T $ in ${\cal D}$ is continuous if,
for all $E\subset H$ of finite dimension, there exists $F\subset H$ of finite dimension
such that $f\in {\cal D}_E$ implies $Tf \in {\cal D}_F$ and if, for all
integer $m$, there exists $C$ and $m'$ such that,
\begin{equation}\label{2.10} I(F, m) (Tf) \leq C I(E , m') (f).\end{equation}
We shall recall the definition of the Wick symbol and bi-symbol. If $Q$ is a quadratic form
on ${\cal D}$, we denote by $S_h (Q)$ the function defined on $H^2$ by,
\begin{equation}\label{2.11} S_h (Q) (X , Y)= { Q( \Psi_{X , h} , \Psi_{Y , h}) \over
< \Psi_{X , h} , \Psi_{Y , h} >}.\end{equation}
If $Q(f , g) = < Af, g>$, where $A$ is an bounded operator in
the Fock space ${\cal F}_s (H_{\bf C})$, or equivalently in $L^2(B, \mu_{B , h/2})$, then
the symbol $S_h (Q)$ will be also denoted $S_h (A)$.
Let us recall that, if $X = (x , \xi)$ is identified with $x+i \xi$, then the function $S_h(A)$ is Gateaux
holomorphic in $X$ and antiholomorpic in $Y$.
We denote by $\sigma_h^{wick} (Q)$ the restriction to the diagonal of the above function,
\begin{equation}\label{2.12}\sigma_h^{wick} (Q) (X) = Q( \Psi_{X , h} , \Psi_{X , h}).\end{equation}
\subsection{Definition of the Weyl calculus in infinite dimension.}\label{s2.C}
If $H= B =\R^n$ and if, say, $F$ is a $C^{\infty }$ function
on $\R^{2n}$ bounded together with all its derivatives, one associates with $F$ an operator $Op_h^{weyl} (F)$ satisfying,
\begin{equation}\label{2.13} S_h ( Op_h^{weyl} (F) ) (X, Y) = \int _{\R^{2n}}F(Z)
e^{{1 \over h}( X \cdot \overline Z + \overline Y \cdot Z - X \cdot \overline Y) } d\mu _{h/2} (Z)
\end{equation}
$$=e^{{1\over 4h}|X - Y|^2} \int _{\R^{2n}} F\left (Z + {X+Y\over 2}
\right ) e^{{i\over 2h}( (\xi - \eta ) \cdot z - (x- y) \cdot \zeta )} d\mu _{\R^{2n} , h/2} (Z).
$$
This equality is proved in Unterberger \cite{U} and we use it for an extension to the infinite dimensional spaces.
The first issue is that, the function $F$ is defined on $H^2$ according the Definition \ref{d1.1}, and
giving a meaning in infinite dimension to an integral such as the one in (\ref{2.13}),
we have to integrate over $B^2$, where $(i, H, B)$ is a Wiener space. Indeed,
in infinite dimension, $H^2$ cannot be endowed with a gaussian measure which corresponds to its own norm.
We have to be able to extend the function $F$, defined on $H^2$, to a function $\widetilde F$
defined on $B^2$. In general it is not a density extension but a type of
extension introduced by L. Gross and named {\it stochastic extension}. It may be found in
\cite{AJN} (Definition 4.4) where we recall a definition of this notion adapted to our purposes.
From Proposition 8.4 of \cite{AJN}, we know that each function $F$ in $S_1 (M, \varepsilon)$ admits
a stochastic extension $\widetilde F$ in
$L^1 (B^2 , \mu _{B^2 , h/2})$ at least if the sequence $(\varepsilon_j)$ is summable. Moreover, the proof of Proposition 8.4 of \cite{AJN} shows that
any linear form $F$ on $H^2$ has a stochastic extension $\widetilde F$ in
$L^1 (B^2 , \mu _{B^2 , h/2})$.
By analogy with (\ref{2.13}), one expect to associate with each function $F$
satisfying the hypotheses (H1) and (H2) of Section \ref{s1}, a quadratic form $Q_h^{weyl} (F)$ on ${\cal D}$,
with bi-symbol $S_h ( Q_h^{weyl} (F)) $ of form,
\begin{equation}\label{2.14}\Phi (X , Y)=e^{{1\over 4h}|X - Y|^2} \int _{B^2}\widetilde F\left (Z + {X+Y\over 2}
\right ) e^{{i\over 2h}( \ell _{\xi - \eta} (z) -\ell _{ x- y} (\zeta ) )} d\mu _{B^2 , h/2} (Z).\end{equation}
\begin{equation}gin{theo}\label{t2.2} Let $F: H^2 \rightarrow {\bf C}$ be a function
satisfying the hypotheses (H1) and (H2) of Section \ref{s1} with $m\geq 0$.
Let $\widetilde F$ be the stochastic extension
of $F$ in $L^1 (B^2 , \mu _{B^2 , h/2})$. Then,
i) The integral (\ref{2.14}) converges and verifies,
\begin{equation}\label{2.15} |\Phi (X , Y)| \leq C e^{{1\over 4h}|X-Y|^2} \left ( 1+ { |X+Y| \over 2} \right )^m.\end{equation}
In addition, this function is Gateaux holomorphic in $X$ and anti-holomorphic in $Y$.
ii) There is a continuous quadratic form $Q_h^{weyl} (F)$ on ${\cal D}$
such that $S_h ( Q_h^{weyl} (F)) = \Phi$, i.e.,
\begin{equation}\label{2.16} S_h ( Q_h^{weyl} (F)) (X , Y) = e^{{1\over 4h}|X - Y|^2} \int _{B^2}\widetilde F\left (Z + {X+Y\over 2}
\right ) e^{{i\over 2h}( \ell _{\xi - \eta} (z) -\ell _{ x- y} (\zeta ) )} d\mu _{B^2 , h/2} (Z).\end{equation}
\end{theo}
{\it Proof.} i) The convergence of the integral (\ref{2.14}) and the estimate (\ref{2.15})
follow from hypothesis (H2). By a change of
variables (cf \cite{AJN}\cite{KU}), the function $\Phi$ may be also written as,
\begin{equation}\label{2.17}\Phi (X , Y)= \int _{B^2} \widetilde F(Z)
e^{{1 \over h}( \ell _X ( \overline Z) + \ell _{\overline Y} ( Z ) - X \cdot \overline Y) } d\mu _{h/2} (Z). \end{equation}
We deduce that it is holomorphic in $X$ and anti-holomorphic in $Y$.
ii) For all $f$ and $g$ in ${\cal D}_E$, where $E\subset H$
is a subspace of finite dimension, set
\begin{equation}\label{2.18} Q (f , g) = \int_{E^4} \Phi (X , Y) e^{ {1\over 2h} X\cdot \overline Y }
(T_hf) (X) \overline {(T_hg) (Y)} d\mu _{E^4 , h}(X , Y).\end{equation}
Using (\ref{2.15}) we see that, for all $f$ and $g$ in ${\cal D}_E$,
$$ | Q(f , g) | \leq C (2\pi h)^{-2 {\rm dim} E} \int_{E^4} |< f , \Psi_{X , h}>| |< g , \Psi_{Y , h}>|
(1+|X|) (1+|Y|) d\lambda (X , Y)$$
where $\lambda $ is the Lebesgue measure. Consequently, for all $f$ in ${\cal D}_E$, the integral defining
$Q(f, g)$ converges. When $f$ and $g$ belongs to ${\cal D}_E$, they also are
in ${\cal D}_F$, for all subspace $F$ containing $E$. If $F$ contains $E$, then
we denote by $S$ the orthogonal set to $F$ in $E$, and $(X_E, X_S)$ the variable of $F^2$. The transform
$T_h f$ is a function on $F^2$, independent of the variable $X_S$. We remark that,
$$\int_{S^4} \Phi (X_E + X_S , Y_E + Y_S)e^{ {1\over 2h} X_S\cdot \overline Y_S }
d\mu _{S^4 , h}(X_S , Y_S) = \Phi (X_E , Y_E ).$$
Indeed, the function in the integral is holomorphic in $X_S$, anti-holomorphic in $Y_S$, and its
integral is equal to its value at $X_S = Y_S = 0$. Consequently the definition of $Q(f, g)$ is indeed coherent, whether that $f$ and $g$
are seen as functions in ${\cal D}_E$ or in ${\cal D}_F$.
Let us show that the bi-symbol of $Q$ is $\Phi$. We have, for all $X= (x , \xi)$ and
$Y = (y, \eta)$ in $H^2$, if $E$ is the subspace spanned by $x$, $\xi$, $y$
and $\eta$,
$$ {Q (\Psi_{X h} , \Psi _{Yh} ) \over < \Psi_{X h} , \Psi _{Yh}> } = \int_{E^4}
\Phi (U , V)
{\cal B}_h(X , Y, U, V) d\mu _{E^4 , h}(U , V)$$
where ${\cal B}_h$ is a kind of reproducing kernel,
\begin{equation}\label{2.19}{\cal B}_h(X , Y, U, V) = e^{ {1\over 2h} (X \cdot \overline U + U\cdot \overline V
+ V \cdot \overline Y
- X\cdot \overline Y )}.\end{equation}
In a standard way, we have, if $\Phi$ is holomorphic in $X$, anti-holomorphic in $Y$,
\begin{equation}\label{2.20} \int_{E^4} \Phi (U , V)
{\cal B}_h(X , Y, U, V) d\mu _{E^4 , h}(U , V) = \Phi (X , Y).\end{equation}
It suffice to make the change of variables $U = X +S$, $V = Y + T$, and to apply the mean formula.
We then deduce that the bi-symbol of $Q$ is indeed $\Phi$.
\hbox{\vrule \vbox to 7pt{\hrule width 6pt
\hrule}\vrule }
When $F$ belongs to $S_2 (M, \varepsilon)$,
where the sequence $(\varepsilon_j)$ is summable, we have proved in \cite{AJN} that the quadratic form $Q_h^{weyl} (F)$ is associated with a bounded operator.
\subsection{Weyl symbol and Wick symbol.}\label{s2.D}
It is sufficient to restrict equality (\ref{2.16}) to the diagonal $Y= X$ to see that,
\begin{equation}\label{2.21}\sigma_h ^{wick} ( Q_h^{weyl} (F))(X) = \int _{B^2}\widetilde F(Z + X )
d\mu _{B^2 , h/2} (Z).\end{equation}
For all $t>0$, the operator
\begin{equation}\label{2.22}(H_tF) (X) =
\int _{B^2} \widetilde F( X +Y ) d\mu _{B^2, t} (Y)\end{equation}
is considered as the heat operator.
In the above and below integrals on $B^2$, $\widetilde F(X+Y)$ denotes the stochastic extension on $B^2$ of $H^2 \ni Y \rightarrow F(X+Y)$ for each $X$ in $H^2$, which exists since it satisfies the same hypotheses as $F$.
We then can write,
\begin{equation}\label{2.23} \sigma_h ^{wick} ( Q_h^{weyl} (F)) =H_{h/2}F.\end{equation}
Equality (\ref{2.23}) extends the standard fact in finite dimension, that the Wick symbol is obtained from the
Weyl symbol by the action of the heat operator.
From Kuo \cite{KU} (Theorem 6.2) or Gross \cite{G4} (Proposition 9), the function $H_tF$ is continuous on $H^2$. If $H$ is of finite dimension,
we have $B= H$, $\widetilde F =F$, and $H_t F = e^{ (t/2) \Delta} F$. Note that,
\begin{equation}\label{2.24} \sup _{X \in H^2} |(H_tF) (X)| \leq \sup _{Z \in B^2}| \widetilde F( Z )| =
\sup _{X \in H^2} |F(X)|.\end{equation}
\begin{equation}gin{prop}\label{p2.3} If $F$ is in $S_4(M, \varepsilon)$ with some chosen basis $(e_j)$ and if the sequence $(\varepsilon_j)$ is summable, then there exists $C>0$
such that, for all $X$ in $H^2$ and $t$ in $(0, 1)$,
\begin{equation}\label{2.25} |(H_tF) (X) - F(X) | \leq C t.\end{equation}
\end{prop}
{\it Proof.}
Let $E_m$ be the subspace spanned by the $e_j$ ($j\leq m$). We apply (\ref{2.24}) to the function
$F_m = F - F \circ \pi _{E_m}$.
We obtain, for all $X$ in $H^2$,
$$ \int _{B^2 } |( F \circ P _{E_m} ) (X+Y )) - (\widetilde F_t (X+ Y )) | d\mu _{B^2, t} (Y)
\leq \Vert F - F \circ \pi _{E_m} \Vert _{\infty}$$
where $\pi _{E_m} : H^2 \rightarrow E_m^2$ is the orthogonal projection and $P _{E_m} : B^2 \rightarrow E_m^2$
is its stochastic extension, defined as in (\ref{2.6}).
If $F$ is in $S_1 (M, \varepsilon)$, we have,
\begin{equation}\label{2.26}\Vert F - F \circ \pi _{E_m} \Vert _{\infty} \leq 2M \sum _{j=p}^{\infty} \varepsilon_j.\end{equation}
For all $m>0$ and
for all $X$ in $H^2$, we have,
$$ \int _{B^2} F ( P _{E_m} (X+Y )) d\mu _{B^2, t} (Y) =
\int _{E_m^2} F ( (\pi _{E_m} X) +Y ) d\mu _{E_m^2, t} (Y). $$
According to standard results in finite dimension, we have for all $a$ in $E_m^2$,
$$ \left | \int _{E_m^2} F ( a +Y ) d\mu _{E_m^2, t} (Y) - F(a)
\right | \leq t \Vert \Delta _m F \Vert _{\infty}$$
where
$$ \Delta_m = \sum _{j=1}^m \left ( {\partial ^2 \over \partial _{x_j}^2} +
{\partial ^2 \over \partial _{\xi_j}^2 } \right ).$$
We apply this inequality to $a = \pi _{E_m} (X)$ using again (\ref{2.26}).
Consequently, for all $t\in (0, 1)$ and $m\geq 1$,
$$ |(H_tF) (X) - F(X) | \leq 2M t\sum _{j=1}^m \varepsilon_j^2 + 4M \sum _{m+1}^{\infty } \varepsilon_j.$$
We deduce (\ref{2.25}) when $m$ goes to infinity.
\hbox{\vrule \vbox to 7pt{\hrule width 6pt
\hrule}\vrule }
\subsection{Operators with linear symbol. Composition.}\label{s2.E}
\begin{equation}gin{prop}\label{p2.4} Let $F$ be a continuous linear form on $H^2$. Let $Q_h^{weyl}(F)$ be the quadratic form on ${\cal D}$
defined in Theorem \ref{t2.2}. Then, there exists an operator denoted
$Op_h ^{weyl} (F)$ from ${\cal D}$ into itself, such that
\begin{equation}\label{2.27} Q_h^{weyl} ( F) (f , g) = < Op_h ^{weyl} (F) f, g>,\qquad
(f , g)\in {\cal D}^2.\end{equation}
\end{prop}
{\it Proof.} Let $f$ be in ${\cal D}_E$,
where $E \subset H$ is of finite dimension. As in Definition \ref{d2.1}, we may write, $f = \widehat f \circ P_E$,
where the function (\ref{2.7}) is in ${\cal S} (E)$.
Let $a$ and $b$ in $H$ be such that
$F(x , \xi ) = a \cdot x + b \cdot \xi$. Let $E_1$ be the subspace spanned
by $E$, $a$ and $b$. Set $f_1 : E_1 \rightarrow {\bf C}$ the function
defined by,
$$ f_1(u) = (a+i b) \cdot u \widehat f(\pi (u)) + {h\over i}
(\pi (b) \cdot \nabla \widehat f) (\pi (u)),\qquad u\in E_1$$
where $\pi : E_1 \rightarrow E$ is the orthogonal projection.
We have $OP_h^{weyl} (F) f = f_1 \circ P_{E_1}$ and this function is in
${\cal D}_{E_1}$. Thus, if $F$ is linear, the quadratic form
$Q_h^{weyl} (F)$ is associated with a continuous operator $Op_h^{weyl}(F)$
from ${\cal D}$ into ${\cal D}$. The set of linear functions is invariant by the operator $H_{h/2}$. Consequently, the Wick symbol of $Q_h^{weyl} (F)$ is also $F$. We may write
$F(x , \xi) = P(X ) + Q(\overline X)$. Then, the bi-symbol of
$Q_h^{weyl} (F)$ is $P(X) + Q(\overline Y)$. We have, for all $f$
in ${\cal D}_E$, for all $Y \in (E_1)^2$,
$$ < Op_h^{weyl} (F) f, \Psi_{Yh} > = (2\pi h) ^{-n}
\int _{E^2} <f, \Psi_{Xh} > < Op_h^{weyl} (F) \Psi_{Xh} , \Psi_{Yh} >
dX $$
$$ = (2\pi h) ^{-n}
\int _{E^2} <f, \Psi_{Xh} > [P(X) + Q(\overline Y)]
<\Psi_{Xh} , \Psi_{Yh} > dX.$$
Consequently, for all integer $m$,
$$ (1+|Y|)^m | < Op_h^{weyl} (F) f, \Psi_{Yh} > | \hskip 8cm$$
$$ \hskip 2cm \leq C(E , E_1, h)
\int _{E^2} (1+|X|)^{m+1} | <f, \Psi_{Xh} > | (1+|X-Y|)^{m+1}
e^{-{1\over 4h} |Y-X|^2} dX. $$
Therefore,
$$ I(E_1 , m) (Op_h^{weyl} (F) f) \leq C(E , E_1, m, h) I(E , m+1) (f)$$
which proves the continuity of $Op_h^{weyl} (F) $ in ${\cal D}$.
\hbox{\vrule \vbox to 7pt{\hrule width 6pt
\hrule}\vrule }
Let $A$ be a continuous quadratic form on ${\cal D}$.
Let $B : {\cal D} \rightarrow {\cal D}$ be a continuous linear mapping
with a linear Wick symbol. We recall that the quadratic forms
$ A \circ B $, $ B \circ A $ and $[A , B]$ are defined in (\ref{a2})
and (\ref{a3}).
\begin{equation}gin{theo}\label{t2.5} Let $A_h$ be a bounded operator in $L^2(B , \mu _{B , h/2})$,
and set $L_h $ an operator from ${\cal D}$ into ${\cal D}$ with a Wick symbol
being a linear form $L(x , \xi) $ on $H^2$.
Let $A_h \circ B_h $ be the quadratic form on ${\cal D}$ of their composition defined as
in Section \ref{s1}. Then, we have,
$$ \sigma_h ^{wick} (A_h \circ L_h) = \sigma_h ^{wick} (A_h) \sigma_h ^{wick} (L_h)+ \hskip 4cm$$
$$ \hskip 4cm{h\over 2} \sum _{j\in \Gamma} \left ( {\partial \over \partial x_j} - i
{\partial \over \partial \xi_j}\right )
\sigma_h^{wick} (A_h)
\left ( {\partial \over \partial x_j} + i
{\partial \over \partial \xi_j}\right )
\sigma_h^{wick} (L_h) $$
This result is valid when exchanging the roles of $A_h$ and $L_h$.
\end{theo}
{\it Proof.} Set $L(x , \xi ) = a \cdot x + b \cdot \xi$ with $a$ and $b$ in $H$. Let $X $ be in
$H^2$. There exists an unitary operator $W_{X , h}$ such that $\Psi _{X , h} = W _{X , h} \Psi _{0 , h} $.
We have,
$$ \sigma_h ^{wick} (A_h \circ L_h) (X) = < L_h \Psi_{X , h}, A_h^{\star} \Psi_{X , h } > = < f, g> $$
with $f= W_{X , h} ^{\star} L_h W_{X , h} \Psi_{0 , h} $ and $g= W_{X , h} ^{\star} A_h^{\star} W_{X , h} \Psi_{0 , h} $.
Let $T_hf$ and $T_hg $ be the Segal Bargmann transforms of $f$ and $g$ defined in (\ref{2.5}), $\widetilde T_hf $ and
$\widetilde T_hg$ being their stochastic extensions in $L^2(B^2 , \mu _{B^2, h})$. Since $\widetilde T_h$ is a partial isometry from $L^2(B , \mu _{B, h/2})$ into $L^2(B^2 , \mu _{B^2, h})$, we have
$$ \sigma_h ^{wick} (A_h \circ L_h) (X) = \int _{B^2} \widetilde T_h f(Z) \overline { \widetilde T_h g (Z)}
d \mu _{B^2, h} (Z).$$
We also have,
$$\widetilde T_h f(Z) = L(X) + \ell _{a+ib} (z-i \zeta).$$
Since $T_hg$ is antiholomorphic then the mean formula gives,
$$ \int _{B^2}\overline { \widetilde T_h g (Z)} d \mu _{B^2, h} (Z)= \overline {T_hg} (0) = < \Psi_{0, h}, g> =
\sigma _h^{wick} (A_h ) (X). $$
Similarly, integrating by parts (see Theorem 6.2 of Kuo \cite{KU}),
for all $\gamma $ in the complexified of $H$,
$$ \int _{B^2}\ell _{\gamma } ( z - i \zeta) \overline { \widetilde T_h g (Z)} d \mu _{B^2, h} (Z)=
h \gamma \cdot ( \partial_z - i \partial_{\zeta} ) \overline {T_hg} (0) =
h \gamma \cdot ( \partial_x - i \partial_{\xi } ) \sigma _h^{wick} (A_h ) (X). $$
The proof of Theorem then follows.
\hbox{\vrule \vbox to 7pt{\hrule width 6pt
\hrule}\vrule }
\begin{equation}gin{prop}\label{p2.6} Let $F$ be a function in
$S_2(M, \varepsilon)$ where the sequence $(\varepsilon _j) $ is summable and let $L$ be a continuous linear form on $H^2$. Let
\begin{equation}\label{b1} \Phi = FL + {h\over 2i} \{ F , L \},\qquad\Psi = FL - {h\over 2i} \{ F , L \}. \end{equation}
Then,
i) The functions $\Phi $ and $\Psi$ satisfy hypotheses (H1) and (H2) in Section \ref{s1}
ii) The corresponding
Weyl forms using the Theorem \ref{t2.2} satisfy, for all $f$ and $g$ in ${\cal D}$,
$$ Q_h^{weyl } (\Phi ) (f , g) = < Op_h^{weyl } (L) f ,Op_h^{weyl } (F) ^{\star} g >, $$
$$ Q_h^{weyl } (\Psi ) (f , g) = < Op_h^{weyl } (F) f ,Op_h^{weyl } (L) ^{\star} g >.$$
\end{prop}
{\it Proof. i) } Using the linearity of $G$ and the estimates
$\int_B |\ell_a(X)| |\ell_b(X)| d\mu_{B,h/2}(X) \leq C |a| |b|$, the existence of
$L^1$ stochastic extensions are obtained similarly as in the proof of the Proposition 8.4 of \cite{AJN}.
The polynomial estimate on the semigroup uses that the stochastic extension
of $X \rightarrow F(X) a.X$ is $\widetilde{F} \ell_a$ with $\int_B |\ell_a(X)| d\mu_{B,h/2}(X) \leq C |a|$.
{\it ii) } We may write $L(x, \xi)= a \cdot x + b \cdot \xi$ with $a$ and $b$
in $H$. From (\ref{2.22}),
$$ ( H_{h/2} F L) (X) = ( H_{h/2} F ) (X) L(X) + \int _{B^2} \widetilde F( X +Y )
( \ell _a (y) + \ell _b (\eta) ) d\mu _{B^2, h/2} (Y). $$
Integrating by parts,
$$ ( H_{h/2} F L) (X) = ( H_{h/2} F ) (X) L(X) +{h\over 2} \int _{B^2} \widetilde G( X +Y )
d\mu _{B^2, h/2} (Y) $$
where $G(x, \xi) = \Big ( a \cdot \partial _x + b \cdot \partial _{\xi} \Big ) F$.
In other words,
$$ ( H_{h/2} F L) (X) = ( H_{h/2} F ) (X) L(X) +{h\over 2} \Big ( a \cdot \partial _x
+ b \cdot \partial _{\xi} \Big ) ( H_{h/2} F ) (X). $$
Since $H_{h/2}$ leaves $F$ invariant, this may be written as,
$$ ( H_{h/2} F ) ( H_{h/2} L) + {h\over 2} \sum _{j\in \Gamma}
\left [ {\partial H_{h/2} F \over dx_j} {\partial H_{h/2} L \over dx_j} +
{\partial H_{h/2} F \over d\xi_j} {\partial H_{h/2} L \over d\xi_j} \right ].
$$
Similarly,
$$ H_{h/2} \{ F , L \} = \{ H_{h/2} F , L \} = \{ H_{h/2} F , H_{h/2} L \}.$$
Consequently, if $\Phi$ is defined in (\ref{b1}) then
$$ H_{h/2} \Phi = ( H_{h/2} F) ( H_{h/2} L) +\hskip 4cm$$
$$\hskip 4cm {h\over 2} \sum _{j\in \Gamma} \left ( {\partial \over \partial x_j} - i
{\partial \over \partial \xi_j}\right )
( H_{h/2} F)
\left ( {\partial \over \partial x_j} + i
{\partial \over \partial \xi_j}\right )
( H_{h/2} L). $$
From Theorem \ref{t2.5}, $ H_{h/2} \Phi$ is the Wick symbol of the composition of the two operators with Wick symbols being $H_{h/2} F$ and $H_{h/2} L$, that is to say,
$Op_h^{weyl} (F)$ and $Op_h^{weyl} (G)$.
The proposition is then a consequence of the following Lemma.
\hbox{\vrule \vbox to 7pt{\hrule width 6pt
\hrule}\vrule }
\begin{equation}gin{lemm}\label{l2.7} Two continuous quadratic forms on ${\cal D}$ with the same Wick symbol are equal.
\end{lemm}
{\it Proof.} Let $A$ be a continuous quadratic form on ${\cal D}$
which Wick symbol vanishes identically. Let
$X$ and $Y$ be in $H^2$. Set,
$$ \varphi ( \lambda , \mu ) = S_h (A) \left ( {X+Y \over 2} + \lambda {X-Y \over 2} ,
{X+Y \over 2} + \mu {X-Y \over 2} \right ). $$
This function on ${\bf C}^2$ is holomorphic in $\lambda $, anti-holomorphic in
$\mu$, and identically vanishing if $\lambda = \mu$. It is then identically vanishing
and the equality $\varphi (1 , -1)=0$ shows that $S_h (A) (X , Y)=0$. The bi-symbol of
$A$ is identically vanishing. Let $f$ and $g$ in ${\cal D}_E$ where
$E \subset H$ is a subspace of finite dimension $n$. Let $C$ and $m$
be the constants such that we have (\ref{2.8}) for all $f$ and $g$ in ${\cal D}_E$. Denote by $D(E,m)$ of functions $f$ such that the integral $I(E,m)(f)$ is finite, where $I(E,m)(f)$ is given in (\ref{2.9}). We also have
$$ f = ( 2\pi h)^{-n} \int _{E^2} < f , \Psi_{Xh} > \Psi_{Xh} dX $$
and similarly for $g$. Then applying \cite{Y} (Section V.5) one obtains $A(f,g)$ vanishes.
\hbox{\vrule \vbox to 7pt{\hrule width 6pt
\hrule}\vrule }
\subsection{Unbounded operators. Sobolev spaces.}\label{s2.F}
We denote by $W$ the completion of ${\cal D}$ for the following norm,
$$ \Vert u\Vert_W ^2 = \Vert u \Vert ^ 2 + \sum _{j\in \Gamma }
\Vert ( Q_h(e_j ) + i P_h(e_j )) u\Vert ^2. $$
Using annihilation operators, one has
$Q_h(e_j ) + i P_h(e_j ) = \sqrt {2h} a_h (e_j)$. Using the number operator
$N = \sum a_h ^{\star } (e_j) a_h (e_j)$, one has $ \Vert u\Vert_W ^2
= < (I + 2h N) u , u>$ (See also \cite{KR} and \cite{LA2} for other Sobolev spaces in infinite dimension).
\begin{equation}gin{prop}\label{p2.8.} i) For all $(a, b)$ in $H^2$, let $F_{a , b} (q, p) =
a \cdot q + b \cdot p$. Then the operator $Op_h^{weyl} (F_{a b})$ from ${\cal D}$
into itself, may be extended to an operator from
$W$ in $L^2(B , \mu _{B , h/2})$ and we have,
\begin{equation}\label{2.?} \Vert Op_h^{weyl} (F_{a b}) u \Vert \leq C (|a| + |b|) \ \Vert u \Vert _W.\end{equation}
ii) Let $F$ in $ S_3(M , \varepsilon )$.
Then the operator $A_h = Op_h^{weyl} (F )$ is bounded from $W $ into $W$.
\end{prop}
{\it Proof.} i) Point i) follows from estimates in Derezi\'nski-G\'erard \cite{DG} , Lemma 2.1 or Lemma 2.3. The operator $Op_h^{weyl} (F_{a b})$
is then denoted by $\Phi_S (a+i b)$.
ii) For all $u$ in $W$ and for all $j$ in $\Gamma$,
we have from Proposition \ref{p2.6},
$$ ( Q_h(e_j ) + i P_h(e_j )) A_h u = A_h ( Q_h(e_j ) + i P_h(e_j ))u + h
Op_h^{weyl } (G_j) u $$
with $G_j (x , \xi) = {\partial F \over \partial x_j} + i {\partial F \over \partial \xi_j}$.
This function belongs to a set $S_2 ( M \varepsilon_j , \varepsilon)$.
From Theorem 1.4 of \cite{AJN},
$$\Vert A_h \Vert \leq M',\qquad\Vert Op_h^{weyl } (G_j) \Vert \leq M' \varepsilon _j$$
where $M'$ is independent of $j$. The proposition then follows.
\hbox{\vrule \vbox to 7pt{\hrule width 6pt
\hrule}\vrule }
\section{Reduction to finite dimension.}\label{s3}
With a given bounded operator $A$ in
$L^2 (B, \mu _{B , h/2})$, one always may associate
a Wick symbol
$\sigma_h^{wick}(A)$. If $A$ verifies the hypotheses of Theorem \ref{t1.2},
we shall associate a Weyl symbol $F$ (which will depend on
$h$). Functions $F$ will satisfy $H_{h/2} F = \sigma_h^{wick}(A)$.
We bring this study to issues related to subspaces $E$ of
finite dimension in $B' \subset H$. One associates two partial heat operators with each
subspace $E \subset B'$. For
any bounded continuous function $F$ on $H^2$ and for all $t > 0$, one set,
\begin{equation}\label{3.1} (H_{E , t} F )(X) = \int _{E^2} F (X + Y_E )
d\mu _{ E^2 , t } (Y_E ).\end{equation}
One can also define a partial heat operator acting, not on
the variables of $E^2$, but on those of its orthogonal. The notation
$E^{\perp}$ now denotes,
\begin{equation}\label{3.2} E^{\perp} = \{ x \in B, \ \ \ u(x) = 0 \ \ \ \ u \in E \}.\end{equation}
This heat operator related to the variables of $(E^{\perp})^2$ can only act on bounded continuous functions $F$ on $H^2$ with
a stochastic extension $\widetilde F$ (bounded measurable function on $B^2$).
One set
\begin{equation}\label{3.3} (H_{ E^{\perp} , t } F )(X) = \int _{( E^{\perp})^2}
\widetilde F (X + Y_{E^{\perp}} ) d\mu _{ (E^{\perp})^2 , t } (Y_{E^{\perp}} ).\end{equation}
Indeed, we know from Ramer \cite{RA} (Section 1.B),
that the space $E^{\perp}$ defined in (\ref{3.2}) is also endowed with a gaussian measure. Similarly to $H_t$, we note that,
\begin{equation}\label{3.4} \sup_{X\in H^2} | (H_{ E^{\perp} , t } F )(X) | \leq
\sup_{X\in H^2} |F(X)|.\end{equation}
If $F$ is bounded and continuous on $H^2$ and if its
stochastic extension $\widetilde F$ exits, then we have, from \cite{RA} (Section 1.B,),
\begin{equation}\label{3.5} H_{h/2} F = H_{E , h/2 } H_{E^{\perp} , h/2 } F.\end{equation}
We then consider an increasing sequence $(\Lambda _n)$ of finite subspaces in $\Gamma$ whose union
is $\Gamma$. We set,
$$E(\Lambda _ n) = {\rm Vect} (e_j, \ j \in \Lambda _ n).$$
In Sections \ref{s4} to \ref{s7}, we shall prove the following propositions.
\begin{equation}gin{prop}\label{p3.1}
Let $A$ be a bounded operator in $L^2 (B, \mu _{B , h/2})$ satisfying the
hypotheses of Theorem \ref{t1.2}. Then,
i) the function $\sigma _h^{wick} (A)$ is
in the set $S_{m+4}(M, \varepsilon )$.
ii) Setting,
$$ P_{E(\Lambda _n)} (x, \xi ) = \left ( \sum _{j\in \Lambda _n }
e_j (x)e_j, \sum _{k\in \Lambda _n } e_k( \xi )e_k \right ),\qquad (x, \xi ) \in B^2 $$
and by denoting $ \Vert \cdot \Vert _{\infty } $ the supremum norm on $H^2$, we have,
\begin{equation}\label{a10} \Vert \sigma_h^{wick} (A) - \sigma_h^{wick} (A) \circ P_{E(\Lambda _n)}
\Vert _{\infty} \leq 2M \sum _{j\notin \Lambda _n } \varepsilon_j. \end{equation}
\end{prop}
\begin{equation}gin{prop}\label{p3.2} Let $A$ be a bounded operator in
$L^2 (B, \mu _{B , h/2})$ satisfying the hypotheses in Theorem \ref{t1.2}.
Then,
for all $n$, there exists a continuous bounded function $F_n$ on $H^2$
such that, if $0 < h < 1$,
i) We have
\begin{equation}\label{a4} H_{ E(\Lambda _n), h/2} F_n = \sigma_h^{wick} (A).\end{equation}
ii) The function $F_n$ is in $S_m ( M_n, \varepsilon)$ with
\begin{equation}\label{a5} M_n = M \prod _{j\in \Lambda _n} (1 + K S_{\varepsilon } ^2
h \varepsilon _j ^2 )\end{equation}
where $K$ is a numerical constant and $S_{\varepsilon} $ is defined
in (\ref{1.13}).
iii) If $n < p$ then the function $F_n - F_p$ is in $S_m ( M_{np}, \varepsilon)$
where
\begin{equation}\label{a6} M_{np} = M \left [ \sum _{j\in \Lambda _p \setminus \Lambda _n}
K (1 + hS_{\varepsilon} ^2 )^2 h \varepsilon _j^2 \right ]
\prod _{j\in \Lambda _p} (1 + K S_{\varepsilon} ^2 h\varepsilon _j^2 ).\end{equation}
\end{prop}
These propositions will be proved in Sections \ref{s4} to \ref{s7}. Let us verify that
Theorem \ref{t1.2} follows from these propositions. From Proposition \ref{p3.2}, the sequence $(F_n)$ converges
to a function $F$ in $S_m ( M' , \varepsilon)$ where $M'$ is defined
in (\ref{1.16}). Let us show that $H_{h/2} F = \sigma_h^{wick} (A)$.
From Proposition 8.4 in \cite{AJN},
the functions $F_n$ have stochastic extensions $\widetilde F_n$. Then, we may apply the operator $H_{ E(\Lambda _n )^{\perp} , h/2}$ to both sides of equality (\ref{a4}). We obtain from (\ref{a4}) and (\ref{3.5}),
\begin{equation}\label{a7} H_{h/2} F_n = H_{ E(\Lambda_n)^{\perp} , h/2} \sigma_h^{wick} (A).\end{equation}
Let us now take the limit as $n$ goes to infinity. We have from the point iii) of Proposition \ref{p3.2},
$$| F_n (X) - F (X)| \leq M \left [ \sum _{j\notin \Lambda _n} K h
\varepsilon_j^2 \right ] \prod _{j\in \Gamma } (1 + K h\varepsilon_j^2 ). $$
From (\ref{3.4})
we see that, in the sense of the uniform convergence,
\begin{equation}\label{a8} \lim _{n\rightarrow \infty } H_{h/2} F_n =
H_{h/2} F.\end{equation}
We shall also check that,
\begin{equation}\label{a9} \lim _{n\rightarrow \infty } H_{ E(\Lambda _n)^{\perp} , h/2}
\sigma_h^{wick} (A) = \sigma_h^{wick} (A).\end{equation}
Indeed, setting,
$\Psi = \sigma_h^{wick} (A)$,
we have
$$ \Vert \Psi - H_{ E(\Lambda _n)^{\perp} , h/2} \Psi \Vert _{\infty }
\leq \Vert \Psi - \Psi \circ P_{E(\Lambda _n ) } \Vert _{\infty } +
\Vert H_{ E(\Lambda _n)^{\perp} , h/2} ( \Psi - \Psi \circ P_{E(\Lambda _n ) } )
\Vert _{\infty }.$$
We have used the fact that $H_{ E(\Lambda_n)^{\perp} , h/2} (\Psi \circ P_{E(\Lambda _n)})
= \Psi \circ P_{E(\Lambda _n)}$. The
limit in (\ref{a9}) follows from (\ref{3.4})(\ref{a10}) and of point ii) in Proposition \ref{p3.1}. Using (\ref{a7})(\ref{a8})(\ref{a9}) we obtain
$H_{h/2}F = \sigma_h^{wick} (A)$. Since the function $F$ is in
$S_m(M', K \varepsilon )$ then a Weyl quadratic form is associated with, by Theorem \ref{t2.2}, and
a bounded operator $ Op_h^{weyl} (F )$ associated with, by Theorem 1.4 of \cite{AJN}.
From (\ref{2.23}), the Wick symbol of this operator is $H_{h/2}F$.
Consequently the operators $Op_h^{weyl} (F )$ and $A$ have the same Wick symbol. From Lemma \ref{l2.7}, these two operators are equal.
Once Propositions \ref{p3.1} and \ref{p3.2} proved, we have indeed found a function $F$ in $S_m(M', K \varepsilon )$ whose corresponding Weyl operator
equals to $ A$. Theorem \ref{t1.2} is then a consequence of Propositions \ref{p3.1} and \ref{p3.2}.
\hbox{\vrule \vbox to 7pt{\hrule width 6pt
\hrule}\vrule }
\section{Proof of Proposition \ref{p3.1}.}\label{s4}
Let $A$ be a bounded operator $A$ in
$L^2 (B, \mu _{B , h/2})$ satisfying the hypotheses of
Theorem \ref{t1.2}. From Theorem \ref{t2.5}, we have,
\begin{equation}\label{4.1} \sigma_h^{wick} ( [Q_h(ej ), A] ) =
ih {\partial \over \partial \xi _j } \sigma_h^{wick} (A),\qquad
\sigma_h^{wick} ( [P_h(ej ), A] ) =
- ih {\partial \over \partial \xi _j } \sigma_h^{wick} (A).\end{equation}
For all bounded operator $B$, one has,
$| \sigma_h^{wick} (B)(X)| \leq \Vert B \Vert $. Consequently,
if $A$ verifies the hypotheses of Theorem \ref{t1.2} one deduces estimates, for each multi-index
$(\alpha, \begin{equation}ta)$ in ${\cal M} _{m+4}$,
$$ | \partial_x^{\alpha}\partial_{\xi}^{\begin{equation}ta} \sigma_h^{wick} (A)(x, \xi)|
\leq M \prod _{j\in \Gamma } \varepsilon_j ^{\alpha _j + \begin{equation}ta _j}, $$
which prove point i) of Proposition \ref{p3.1}. We deduce,
$$ | \sigma_h^{wick}(A)(x, \xi )
- \sigma_h^{wick} (A)(P_{E(\Lambda _n)} (x, \xi)) | \leq 2 M \sum _{j\notin \Lambda _n }
\varepsilon_j ^{\alpha _j + \begin{equation}ta _j}, $$
which proves Proposition \ref{p3.1}. We shall also need analogous estimates on the bi-symbol. One deduces from (\ref{4.1}) these estimates by setting, for all $j\in \Gamma$,
$$ {\partial \over \partial X_j} = {1\over 2} \left ( {\partial \over \partial x_j}
- i {\partial \over \partial \xi _j} \right ),\qquad
{\partial \over \partial \overline Y_j} = {1\over 2} \left ( {\partial \over \partial y_j}
+ i {\partial \over \partial \eta _j} \right ).
$$
With these notations, one has,
\begin{equation}\label{4.2}
S_h([Q_h(e_j ), A])(X, Y ) = - h \left ( {\partial \over \partial X_j} - {\partial \over \partial \overline Y_j}
\right ) (S_hA)(X, Y ),\end{equation}
\begin{equation}\label{4.3} S_h([P_h(e_j ), A])(X, Y ) = -i h \left ( {\partial \over \partial X_j} + {\partial \over \partial \overline Y_j}
\right ) (S_hA)(X, Y ).\end{equation}
Consequently, for all multi-indices $(\alpha, \begin{equation}ta )$,
\begin{equation}\label{4.4} S_h ( ({\rm ad} P_h)^{\alpha} ({\rm ad} Q_h)^{\begin{equation}ta} A )(X, Y )
= c_{\alpha \begin{equation}ta}
h^{|\alpha + \begin{equation}ta |} (\partial _x + \partial _y)^{\alpha} (\partial _{\xi} + \partial _{\eta} )^{\begin{equation}ta }
S_h(A)(X, Y )\end{equation}
where $|c_{\alpha \begin{equation}ta} | =1$. With (\ref{2.4}), we deduce that
\begin{equation}\label{4.5} | (\partial _x + \partial _y)^{\alpha} (\partial _{\xi} + \partial _{\eta} )^{\begin{equation}ta }
S_h(A)(X, Y ) | \leq h^{-|\alpha + \begin{equation}ta |}
e^{-{1\over 4h} |X-Y|^2} \Vert ({\rm ad} P_h)^{\alpha} ({\rm ad} Q_h)^{\begin{equation}ta} A \Vert.\end{equation}
\section{Finite dimensional analysis.}\label{s5}
We consider here the case where $H$ is a real Hilbert space with finite dimension $n$. Let $A$ be an operator satisfying
hypothesis of Theorem \ref{t1.2}. Let $\Phi = S_hA$ its bi-symbol,
defined in (\ref{2.11}). We have seen that $\Phi (X , Y)$ is holomorphic in $X$, anti-holomorphic in $Y$.
From (\ref{4.5}), the following norm is finite,
\begin{equation}\label{5.1} N_h^{(2)} (\Phi) = \sum _{(\alpha , \begin{equation}ta) \in {\cal M} _2}
\Vert e^{-{1 \over 4h} |X - Y |^2 } (\partial _x + \partial _y)^{\alpha }
(\partial _{\xi } + \partial _{\eta })^{\begin{equation}ta } \Phi \Vert_{\infty },
\end{equation}
where $\Vert \cdot \Vert _{\infty }$ is the supremum norm. Note again that a choice of particular basis has been made.
One introduces in distributions sense an
integral transform giving the Weyl symbol $F$ of $A$ starting from the
bi-symbol $\Phi$, and give estimates on $F$. This integral is not converging but has to be understood as an oscillatory integrals
(see H\"ormander \cite{HO}). This leads to a proof of Beals's theorem in finite dimension
(see Unterberger \cite{U}). Setting,
\begin{equation}\label{5.2} K_h^{Beals} (X , Y, Z) = e^{- {1\over h}(Z-Y)\cdot (\overline Z- \overline X) - {1\over 2h}|X-Y|^2 }.\end{equation}
\begin{equation}gin{theo}\label{t5.1} Let $H$ be a real Hilbert space of
finite dimension $n$. Set $(X , Y) \rightarrow \Phi (X , Y)$ a function
on $H^2 \times H^2$ which is holomorphic in $X$ and anti-holomorphic in $Y$, such that the norm
$ N_h^{(2)} (\Phi)$ defined in (\ref{5.1}) is finite (for some orthonormal basis).
Then,
i) The following integral transform defines, a priori in the sense of distributions,
a function $B _h \Phi $ which is bounded and continuous on $H^2$,
\begin{equation}\label{5.3} (B_h \Phi ) (Z) = 2^n (2 \pi h)^{-2n} \int _{H^4}
\Phi (X , Y) K_h^{Beals} (X , Y, Z) dX dY.\end{equation}
Moreover, this function satisfies,
\begin{equation}\label{5.4} \Vert B_h \Phi \Vert _{\infty } \leq K^n N_h^{(2)} (\Phi) \end{equation}
ii) Moreover, one has,
\begin{equation}\label{5.5} ( H_{h/2} B_h \Phi ) (Z) = \Phi (Z , Z).\end{equation}
\end{theo}
{\it Proof of i).} We follow the method of Unterberger \cite{U}. The change of variables
$$ X = Z + S + {T\over 2},\qquad Y = Z + S - {T\over 2} $$
allows to rewrite (\ref{5.3}) as,
\begin{equation}\label{5.6} (B_h \Phi )(Z) = 2^n (2\pi h)^{-2n} \int _{H^4} \Psi (S, T , Z)
K_h(S, T )dSdT\end{equation}
with
\begin{equation}\label{5.7} \Psi (S, T , Z ) = \Phi \left ( Z + S +{T\over 2} , Z + S - {T\over 2} \right )
\end{equation}
\begin{equation}\label{5.8} K_h(S, T ) = e^{-{1\over h} |S|^2 - {i \over h}\sigma (S , T) - {1\over 4h} |T|^2}.\end{equation}
Set $S_j = (s_j, \sigma _j )$, $T_j = (t_j, \tau _j)$. Let $L_j$ and $M_j$ be the
operators defined, for each function $G(S, T )$, by
$$ L_j G = \left ( 1 + {\tau _j ^2 \over h } \right ) ^{-1} e^{-{1\over h} s_j^2 }
\left ( 1 - h {\partial ^2 \over \partial s_j^2 } \right ) e^{{1\over h} s_j^2 } G $$
$$ M_j G = \left ( 1 + {t _j ^2 \over h } \right ) ^{-1} e^{-{1\over h} \sigma _j^2 }
\left ( 1 - h {\partial ^2 \over \partial \sigma _j^2 } \right ) e^{{1\over h} \sigma _j^2 } G. $$
One verifies that,
$$ L_jK_h = K_h,\qquad M_jK_h = K_h \hskip 2cm j\leq n $$
where the function $K_h$ defined in (\ref{5.8}).
Consequently,
$$ (B_h \Phi )(Z) = 2^n (2\pi h)^{-2n} \int_{H^4} K_h(S, T )
\left [ \prod _{j\leq n} ^tL_j \ ^ t M_j \right ] \Psi (S, T , Z )
dS dT. $$
We see that,
$$ ^tL_j = \left ( 1 + {\tau _j ^2 \over h } \right ) ^{-1} \Big [
a_0(s_j / \sqrt h) + h^{1/2} a_1(s_j / \sqrt h)\partial _{s_j} +
h a_2(s_j / \sqrt h) \partial _{s_j} ^2 \Big ] $$
with
$$ a_0(s) = 3 - 4s^2,\qquad
a_1(s) = 4s,\qquad
a_2(s) = -1.$$
Similarly,
$$ ^ tM_j = \left ( 1 + {t _j ^2 \over h } \right ) ^{-1}
\Big [
a_0(\sigma _j / \sqrt h) + h^{1/2} a_1(\sigma _j / \sqrt h)\partial _{\sigma_j} +
h a_2(\sigma _j / \sqrt h) \partial _{\sigma _j} ^2 \Big ] $$
Consequently,
$$ | (B_h \Phi )(Z)| \leq \sum _{ (\alpha , \begin{equation}ta) \in {\cal M}_2 }
h^{|\alpha + \begin{equation}ta |/2} F_{\alpha \begin{equation}ta } (Z) $$
$$ F_{\alpha \begin{equation}ta } (Z) = 2^n (2\pi h)^{-2n} \int _{H^4}
e ^{ -{1\over h} |S|^2} \prod _{j\leq n} \left ( 1 + {t _j ^2 \over h } \right ) ^{-1}
\left ( 1 + {\tau _j ^2 \over h } \right ) ^{-1} \left | a^{\alpha } (s / \sqrt h)
a^{\begin{equation}ta } (\sigma / \sqrt h) \right | $$
$$ | e ^{ -{1\over 4h} |T|^2} \partial _s ^{\alpha } \partial _{\sigma} ^{\begin{equation}ta }
\Psi (S, T , Z )| dS dT $$
where we have set
$$ a ^{\alpha } (s ) = \prod _{j\leq n} a_{\alpha _j} (s_j ). $$
There exists $K > 0$ such that,
$$ \pi ^{-1/2} \int _{\R}e^{-s^2} |a_j(s)| ds \leq K,\qquad 0 \leq j \leq 2 $$
and also
$$ (2 \pi ) ^{-1/2} \int _{\R} (1 + x^ 2)^{-1} dx \leq K. $$
Consequently,
$$ | (B_h \Phi )(Z)| \leq K^n \sum _{ (\alpha , \begin{equation}ta) \in {\cal M}_2 }
h^{|\alpha + \begin{equation}ta |/2} \sup _{(S , T) \in H^4}
\left | e^{-{1\over 4h} |T|^2} \partial_s ^{\alpha } \partial_{\sigma } ^{\begin{equation}ta }
\Psi (S , T, Z) \right |. $$
From the defintion of $\Psi $ in (\ref{5.7}),
$$ | (B_h \Phi )(Z)| \leq K^n \sum _{ (\alpha , \begin{equation}ta) \in {\cal M}_2 }
h^{|\alpha + \begin{equation}ta |/2} \sup _{(X , Y) \in H^4}
\left | e^{-{1\over 4h} |X-Y|^2 } ( \partial_x + \partial_y) ^{\alpha }
( \partial_{\xi } + \partial_{\eta} ) ^{\begin{equation}ta }
\Phi (X Y) \right |.$$
We then deduce (\ref{5.5}) with another constant $K$.
{\it Proof of ii).} If a function $\Psi$ on $H^2$ is written as
$$ \Psi (Z) = e^{ -{1\over h} |Z|^2 + {1\over h} (A \cdot Z + B \cdot \overline Z )}$$
where $A$ and $B$ are in $H^2$, and $A \cdot Z $ denotes the bi-{\bf C}-linear scalar product, then the action of the heat operator on $\Psi$
verifies,
$$ ( H_{h/2} \Psi ) (Z) = \Big ( e^{{h\over 4} \Delta } \Psi \Big )
(Z) = 2^{-n} e^{{1\over 2h} |Z|^2 + {1\over 2 h} (A \cdot Z + B \cdot \overline Z )
{1\over 2 h} A \cdot B }.$$
Thus,
$$ ( H_{h/2} K_h^{Beals} (X , Y, \cdot ) ) (Z) = 2^{-n}
{\cal B}_h (Z , Z , X , Y) e^{-{1\over 2h} (|X|^2 + |Y|^2)} $$
where ${\cal B}_h$ is our type of reproducing kernel introduced in (\ref{2.19}). Consequently, as in (\ref{2.20})
$$ ( H_{h/2} B_h \Phi ) (Z) = \int _{H^4} \Phi (X , Y) {\cal B}_h (Z , Z , X , Y)
d\mu _{H^4 , h } (X , Y) = \Phi (Z , Z).$$
\section{Proof of Proposition 3.2: first step. }\label{s6}
For all operators $A$ satisfying the
hypotheses of Theorem \ref{t1.2} and for some subsets $E $ of
finite dimension in $B' \subset H$, we shall find a bounded continuous function
$\tau_{E,h} (A)$ on $H^2$ such that
\begin{equation}\label{6.1} H_{E, h/2} \tau_{E,h} (A) = \sigma_h^{wick} (A).\end{equation}
It is point i) of Proposition \ref{p3.2}. Moreover, we shall give estimations on this function. For all finite
subsets $I$ in $\Gamma $, let $E(I)$ be the subspace of $B' \subset H$
spanned by the $e_j $, $j \in I$. Recall that the elements $e_j$
$(j \in \Gamma)$ of our Hilbertian basis are in $B'$. Let
${\cal M}_2(I)$ be the set of all multi-indices $(\alpha, \begin{equation}ta)$ such that
$\alpha _j = \begin{equation}ta _j = 0$ if $j \notin I$, and $\alpha _j \leq 2$
and $\begin{equation}ta _j \leq 2$ if $j \in I$.
\begin{equation}gin{prop}\label{p6.1} Let $A$ be an operator satisfying the hypotheses in Theorem \ref{t1.2}. Set $I$
a finite subspace of $\Gamma$. Then, there exists a bounded continuous function
$ \tau_{E(I),h} (A)$ on $H^2$ satisfying (\ref{6.1}). Moreover,
\begin{equation}\label{6.2} \Vert \tau_{E(I),h} (A) \Vert _{\infty } \leq
K^{|I|} \sum _{(\alpha , \begin{equation}ta)\in {\cal M}_2(I) } h^{-|\alpha +\begin{equation}ta|/2}
\Vert ({\rm ad} P_h)^{\alpha} ({\rm ad} Q_h)^{\begin{equation}ta} A \Vert
\end{equation}
where $ K$ is a numerical constant.
\end{prop}
{\it Proof.} We denote $E = E(I)$, $E^{\perp}$ the orthogonal complement of $E$
in $H$, and $Z = (Z_E, Z_{E^{\perp}} )$ the variable in $H^2$.
For all $Z_{E^{\perp}} $ in $(E^{\perp})^2$, we shall apply Proposition \ref{t5.1} replacing $H$ by $E$, with the following function
$\Phi $ defined on $E^2$,
$$ \Phi_ { Z_{E^{\perp}} } (X_E , Y_E) = (S_hA) ( X_E , Z_{E^{\perp}} , Y_E,
Z_{E^{\perp}} ). $$
Using again notation (\ref{5.3}), which a priori only makes sense as an oscillatory integral on $E^2$, one set for all $Z = (Z_E, Z_{E^{\perp}} )$ in $H^2$,
$$ \tau_{E(I),h} (A) (Z) = 2^{ {\rm dim} (E)} (2 \pi h)^{-2{\rm dim} (E) }
\int _{E^4}
(S_hA) ( X_E , Z_{E^{\perp}} , Y_E,
Z_{E^{\perp}} ) K_h^{Beals} (X_E , Y_E, Z_E) dX_E dY_E$$
where $ K_h^{Beals} $ is defined in (\ref{5.2}). One may apply Theorem \ref{t5.1}, choosing as an orthonormal basis of $E = E(I)$, the one constituted with the $e_j$ $j\in I$.
With this choice, we have from (\ref{4.5}),
$$ N_h^{(2)} (\Phi_ { Z_{E^{\perp}} } ) \leq \sum _{(\alpha , \begin{equation}ta)\in {\cal M}_2(I) } h^{-|\alpha +\begin{equation}ta|/2}
\Vert ({\rm ad} P_h)^{\alpha} ({\rm ad} Q_h)^{\begin{equation}ta} A \Vert $$
and the term in the right hand side is finite under hypothesis of Theorem \ref{t1.2}.
From Theorem \ref{t5.1}, the function $ \tau_{E(I),h} (A)$ is well-defined,
continuous and bounded on $H^2$ and satisfies (\ref{6.1}) and (\ref{6.2}).
\hbox{\vrule \vbox to 7pt{\hrule width 6pt
\hrule}\vrule }
\section{Proof of Proposition \ref{p3.2}: second step.}\label{s7}
For all finite subsets $I$ of $\Gamma$, let us set
\begin{equation}\label{7.1} T_{I, h} = \prod _{j\in I} (I - H_{D_j , h/2} )\end{equation}
where $D_j$ is spanned by the vector $e_j$ of our Hilbertian basis of $H$, and $H_{D_j , h/2}$ is the operator defined in (\ref{3.1}),
with $E$ replaced by $D_j$, thus with an integral on $D_j ^2 $.
When $I =\emptyset$, we set $T _{I , h} = I d$.
We denote by $E(I)$ the subspace
of $B'$ spanned by the $e_j$, $j\in I$. Recall that the elements $e_j$
$(j \in \Gamma)$ of our Hilbertian basis of $H$ are in $B'$.
If $I = \emptyset$ then
set $E(I) = \{ 0 \}$. For any operator $A$ satisfying the
hypotheses in Theorem \ref{t1.2} and for all subspaces $E \subset B' \subset H$
of finite dimension, set $\tau _{E(I) , h} (A)$ the function on $H^2$ defined in
the Proposition \ref{p6.1}. In particular, we may have $E = E(I)$ with $I$
being a finite subset of $\Gamma$. We choose an increasing sequence
$(\Lambda_n)$ of finite subsets of $\Gamma$ with its union equals to $\Gamma$.
For all $ n$, one defines a function $F_n $ on $H^2$ by,
\begin{equation}\label{7.2} F_n = \sum _{I\subset \Lambda _n} T _{I , h}
\tau_{E(I) , h} (A).\end{equation}
The above sum is running over all the subsets $I$ of $\Lambda _n$ including the empty set. We shall show that this sequence
of functions has indeed the properties announced
Proposition \ref{p3.2}.
{\it Point i) }
One has, for all subsets $I \subset \Lambda _n$,
$$ H_{E(\Lambda _n), h/2} = H_{E(I), h/2} H_{E(\Lambda _n \setminus I), h/2} $$
and these operators commutes with each other and with $T _{I , h}$.
Consequently,
$$ H_{E(\Lambda _n), h/2} F_n = \sum _{I\subset \Lambda _n} T _{I, h}
H_{E(\Lambda _n \setminus I), h/2} H_{E(I), h/2} \tau_{E(I) , h} (A). $$
From equality (\ref{6.1})
applied to set $E(I)$, one has,
$$ H_{E(\Lambda _n), h/2} F_n = \sum _{I\subset \Lambda _n} T _{I , h}
H_{E(\Lambda _n \setminus I), h/2} \sigma_h^{wick} (A). $$
The following equality is
a variant of the binomial formula,
$$\sum _{I\subset \Lambda _n} T _{I , h} H_{E(\Lambda _n \setminus I), h/2} = I d.
$$
So, we have proved equality (\ref{a4}), point i) of the Proposition \ref{p3.2}.
Points ii) and iii)
will both be a direct consequence of the following inequality. If $A$
satisfies hypothesis in Theorem \ref{t1.2}, for all $(\alpha , \begin{equation}ta)$
in $M_m$, for any finite subset $I$ in $\Gamma$ and for all
$h$ in $(0, 1)$,
\begin{equation}\label{7.3} \Vert \partial_z ^{\alpha } \partial_{\zeta} ^{\begin{equation}ta } T_{I, h}
\tau_{E(I) , h} (A) \Vert _{\infty} \leq M (K S_{\varepsilon} ^ 2) ^{|I|}
\prod _{j\in I} h \varepsilon_j^2 \prod _{j\in \Gamma } \varepsilon _j ^{\alpha _j + \begin{equation}ta _j}
\end{equation}
where $K$ is a numerical constant and $S_{\varepsilon}$ is
defined in (\ref{1.13}).
It remains to prove (\ref{7.3}). If $H_{D_j,h/2}$ is defined in (\ref{3.1}),
with $E$ replaced by
$D_j = {\rm Vect}\,(ej )$, we may write,
$$ I - H_{ D_j, h/2} = {h\over 4} V_j
(\partial _{z_j} ^2 + \partial _{\zeta _j} ^2 ) $$
where the operators $V_j$ are bounded in the space $ C_b$ of
continuous bounded functions on $H^2$, and are commuting with partial derivatives operators. Moreover,
$$ \Vert V_j \Vert _{{\cal L} (C_b)} \leq 1. $$
Therefore, one may rewrite the operator $T_{I, h}$ defined in
(\ref{7.1}) under the following form,
$$ T_{I, h} = \prod _{j\in I} (h/4) V_j (\partial _{z_j} ^2 + \partial _{\zeta _j} ^2 ).$$
Let $ {\cal N} (I) $ be the set of
multi-indices $(\alpha , \begin{equation}ta )$ such that $\alpha _j = \begin{equation}ta _j = 0$ if
$ j \notin I$, and if $j \in I$,
either we have $\alpha _ j = 2$ and $\begin{equation}ta _j = 0$, or
$\alpha _j = 0$ and $\begin{equation}ta _j = 2$. Consequently,
$$\Vert \partial_z ^{\alpha } \partial_{\zeta} ^{\begin{equation}ta } T_{I, h}
\tau_{E(I) , h} (A) \Vert _{\infty} \leq (h/4)^{|I|} \sum _{ (\gamma , \delta) \in {\cal N} (I) }
\Vert \partial_z ^{\alpha + \gamma } \partial_{\zeta} ^{\begin{equation}ta + \delta }
\tau_{E(I) , h} (A) \Vert _{\infty}.$$
On verifies that,
$$ \left [ {\partial \over \partial x_j} + {\partial \over \partial y_j} + {\partial \over \partial z_j}
\right ] K_h^{Beals} (X, Y, Z ) = 0,\qquad
\left [ {\partial \over \partial {\xi}_j} + {\partial \over \partial {\eta}_j} + {\partial \over \partial \zeta _j}
\right ] K_h^{Beals} (X, Y, Z ) = 0.
$$
Consequently,
$$ \partial_z ^{\alpha } \partial_{\zeta} ^{\begin{equation}ta } \tau _{E(I),h} (A) =
\tau _{E(I),h} A_{\alpha \begin{equation}ta} $$
where $A_{\alpha \begin{equation}ta}$ is such that,
$$ (S_h A_{\alpha \begin{equation}ta} ) (X, Y ) = ( \partial _x +\partial _y)^{\alpha }
( \partial _{\xi} +\partial _{\eta} )^{\begin{equation}ta } (S_hA)(X, Y ).$$
From (\ref{4.4}),
$$A_{\alpha \begin{equation}ta} = c_{\alpha \begin{equation}ta} h^{-|\alpha +\begin{equation}ta |} ({\rm ad} P_h)^{\alpha}
({\rm ad} Q_h)^{\begin{equation}ta} A$$
where $|c_{\alpha \begin{equation}ta} | = 1$. Then,
$$ \Vert \partial_z ^{\alpha } \partial_{\zeta} ^{\begin{equation}ta } T _{I , h}
\tau _{E(I),h} (A) \Vert _{\infty } \leq (h/4)^{|I|} \sum _{(\gamma , \delta ) \in {\cal N} (I)}
h^{ -|\alpha + \begin{equation}ta + \gamma + \delta | }
\Vert \tau _{E(I),h} ( ({\rm ad} P_h)^{\alpha + \gamma }
({\rm ad} Q_h)^{\begin{equation}ta + \delta } A \Vert _{\infty }.
$$
From the Proposition \ref{p6.1},
$$ \Vert \partial_z ^{\alpha } \partial_{\zeta} ^{\begin{equation}ta } T _{I , h}
\tau _{E(I),h} (A) \Vert _{\infty } \leq (K h/4)^{|I|}
\sum _{(\gamma , \delta ) \in {\cal N} (I)}
\sum _{(\lambda , \mu ) \in {\cal M}_2 (I)}
h^{ -|\alpha + \begin{equation}ta + \gamma + \delta | - | \lambda + \mu |/2 }$$
$$ \hskip 3cm \Vert ( ({\rm ad} P_h)^{\alpha + \gamma + \lambda }
({\rm ad} Q_h)^{\begin{equation}ta + \delta + \mu } A \Vert. $$
If $(\alpha , \begin{equation}ta) \in {\cal M} _m$, $(\gamma , \delta ) \in {\cal N} (I)$
and $(\lambda , \mu) \in {\cal M} _2(I)$, then the sum
$ (\alpha + \gamma + \lambda , \begin{equation}ta + \delta + \mu)$ belongs to
$ {\cal M} _{m+4}$.
From assumptions of Theorem \ref{t1.2},
$$ \Vert \partial_z ^{\alpha } \partial_{\zeta} ^{\begin{equation}ta } T _{I , h}
\tau _{E(I),h} (A) \Vert _{\infty } \leq
M(K h/4)^{|I|} \sum _{(\gamma , \delta ) \in {\cal N} (I)}
\sum _{(\lambda , \mu ) \in {\cal M}_2 (I)}
h^{ |\lambda + \mu | /2} \prod _{j\in \Gamma} \varepsilon _j^{ \alpha _j
+ \begin{equation}ta _j + \gamma _j + \delta _j + \lambda _j + \mu_j}. $$
The number of multi-indices in ${\cal N} (I)$ is $2^{|I|}$,
and the number of multi-indices in ${\cal M}_2(I)$ is
$9^{|I|}$. For all multi-indices $ (\gamma , \delta) \in {\cal N} (I)$, we have
$ \gamma _j + \delta _j = 2$ if $j\in I$. If $0 < h < 1$, for all multi-indices
$ (\lambda , \mu ) \in {\cal M}_2(I)$, we have $ (
\sqrt h \varepsilon _j )^{\lambda _j+ \mu _j} \leq S_{\varepsilon}^ 2$,
where $ S_{\varepsilon}$ is defined in (\ref{1.13}). Consequently, we have indeed proved (\ref{7.3}) with another universal constant $K$.
From (\ref{7.2}), we deduce the points ii) and iii) of the
Proposition \ref{p3.2}, which complete the proof of Theorem \ref{t1.2}.
\hbox{\vrule \vbox to 7pt{\hrule width 6pt
\hrule}\vrule }
\section{Composition of operators.}\label{s8}
\begin{equation}gin{theo}\label{t8.1} Let $F$ in
$S_{m+6}(M , \varepsilon )$ and $G$ in $S_{m+6}(M' , \varepsilon )$ ($m \geq 0$).
Then there exists a function $ H_h$ in $S_{m}(M'' ,(m+4) \varepsilon ) $ such that,
\begin{equation}\label{8.1} Op_h^{weyl} (F ) \circ Op_h^{weyl}(G) = Op_h^{weyl}(H_h).\end{equation}
We have set,
$$ M'' = M M' \prod _{j\in \Gamma} (1
+ K(m + 4)^2 S_{\varepsilon}^2 h \varepsilon_j ^2)^3 \leqno (8.2) $$
where $K$ is a universal constant and $S_{\varepsilon}$
is defined in (\ref{1.13}).
\end{theo}
{\it Proof.} For any multi-index $(\alpha, \begin{equation}ta)$
in ${\cal M}_{m+4}$ we have,
$$ ({\rm ad}P_h )^{\alpha} ({\rm ad}Q_h )^{\begin{equation}ta} \Big ( Op_h^{weyl} (F ) \circ Op_h^{weyl} (G) \Big )
= $$
$$ \sum_{ \alpha ' + \alpha '' = \alpha \atop \begin{equation}ta ' + \begin{equation}ta '' = \begin{equation}ta }
\Big ( ({\rm ad}P_h )^{\alpha '} ({\rm ad}Q_h )^{\begin{equation}ta' } Op_h^{weyl} (F ) \Big ) \circ
\Big ( ({\rm ad}P_h )^{\alpha ''} ({\rm ad}Q_h )^{\begin{equation}ta'' } Op_h^{weyl} (G ) \Big ).
$$
From (\ref{1.15}) (with $m$ replaced by $m +6$) and similarly
for $G$, we have, for each multi-index $(\alpha, \begin{equation}ta)$ in
${\cal M}_{m+4}$,
$$ ({\rm ad}P_h )^{\alpha} ({\rm ad}Q_h )^{\begin{equation}ta}
\Big ( Op_h^{weyl} (F ) \circ Op_h^{weyl} (G) \Big ) \Vert \leq
M M' N(\alpha , \begin{equation}ta ) \prod _{j\in \Gamma}
(1+ 81 \pi h S_{\varepsilon} \varepsilon _j^2)^2
\prod _{j\in \Gamma} (h\varepsilon _j)^{\alpha _j + \begin{equation}ta _j}
$$
where $N(\alpha, \begin{equation}ta )$ is the number of decompositions of $(\alpha, \begin{equation}ta)$
as a sum of two multi-indices $(\alpha ', \begin{equation}ta ')$ and $(\alpha '', \begin{equation}ta '')$.
If $(\alpha, \begin{equation}ta)$ is in ${\cal M}_{m+4}$ then this number equals is smaller than $ (m + 4)^{|\alpha + \begin{equation}ta|}$.
Consequently, $Op_h^{weyl} (F ) \circ Op_h^{weyl} (G)$ satisfies
a condition similar to (\ref{1.15b}) with $\varepsilon _j$
remplaced by $(m + 4)\varepsilon _j$. So our Theorem \ref{t8.1} is
is a consequence of Theorem \ref{t1.2}.
\hbox{\vrule \vbox to 7pt{\hrule width 6pt
\hrule}\vrule }
\begin{equation}gin{thebibliography}{99}
\bibitem{AJN}
L. Amour, L. Jager, J. Nourrigat
{\it On bounded pseudodifferential operators in Wiener spaces},
arXiv:1412.1577
\bibitem{AJN2} L. Amour, L. Jager, J.
Nourrigat, {\it On bounded pseudodifferential operators in a
high-dimensional setting,} Proc. Amer. Math. Soc. 143 (2015) 2057-2068.
\bibitem{Bea} R. Beals, {\it Characterization of pseudodifferential operators
and applications,} Duke Math. J. 44 (1977), no. 1, 45-57
\bibitem{Ber} F. A. Berezin, {\it The method
of second quantization,} Translated from the Russian, Pure and
Applied Physics, Vol. 24 Academic Press, New York-London 1966.
\bibitem{BO1} J.M. Bony,
{\it Caract\'erisation des opd.} S\'eminaire EDP, X. Expos\'e n°23, 17pp, (1996-1997).
\bibitem{BO2} J.M. Bony,
{\it Characterization of pseudo-differential operators,}
Progress in non linear differential equations and their applications. Vol. 84. Birkha\"user, 21-34. (2013).
\bibitem{BO-C} J.M. Bony, J.Y. Chemin,
{\it Espaces fonctionnels associ\'es au calcul de Weyl-H\"ormander,}
Bull. Soc. Math. France. {\bf 122}, n°1 77-118, (1994).
\bibitem{C-R} M. Combescure, D. Robert, {\it Coherent states and applications in
mathematical physics,} Theoretical and Mathematical Physics.
Springer, Dordrecht, 2012.
\bibitem{DG} J. Derezi\'nski, C. G\'erard, {\it Asymptotic completeness in quantum field theory.
Massive Pauli-Fierz Hamiltonians. }
Rev. Math. Phys. 11 (1999), no. 4, 383-450.
\bibitem{F} G. B. Folland, {\it Harmonic analysis in
phase space.} Annals of Mathematics Studies, 122. Princeton
University Press, Princeton, NJ, 1989.
\bibitem{G1} L. Gross, {\it Measurable
functions on Hilbert space}, Trans. Amer. Math. Soc. 105 (1962)
372390.
\bibitem{G2} L. Gross, {\it Abstract Wiener spaces,} Proc. 5th Berkeley
Sym. Math. Stat. Prob, 2, (1965), 31-42.
\bibitem{G3} L. Gross, {\it Abstract
Wiener measure and infinite dimensional potential theory,} in
Lectures in modern Analysis and applications, II, Lecture Notes in
Math 140, 84-116, Springer (1970).
\bibitem{G4} L. Gross, {\it Potential theory
on Hilbert space,} J. Functional Analysis 1, (1967) 123-181.
\bibitem{HA} B. Hall, {\it Holomorphic methods in analysis and mathematical physics,}
First Summer School in Analysis and Mathematical Physics
(Cuernavaca Morelos, 1998), 159, Contemp. Math., 260, Amer. Math.
Soc., Providence, RI, 2000.
\bibitem{HO} L. H\"ormander, {\it The analysis of
linear partial differential operators,} Volume III, Springer, 1985.
\bibitem{J} S. Janson, {\it Gaussian Hilbert spaces,} Cambridge Tracts in Math.
129, Cambridge Univ. Press (1997).
\bibitem{KR} M. Kree, {\it Propri\'et\'e de trace pour des espaces de Sobolev en dimension infinie.}
Bull. Soc. Math. France, {\bf 105}, (1977) 141-163.
\bibitem{KU} H. H. Kuo, {\it Gaussian measures
in Banach spaces.} Lecture Notes in Mathematics, Vol. 463. Springer,
Berlin-New York, 1975.
\bibitem{LA2} B.Lasc{\it ar. Propri\'et\'es d'espaces de Sobolev en dimension infinie.}
Comm. in Partial Differential Equations {\bf 1}, 6, (1976) 561-584.
\bibitem{RA} R. Ramer, {\it On nonlinear
Transformations of Gaussian measures, } J. Funct. Analysis, 15
(1974), 166-187.
\bibitem{RS} M. Reed, B. Simon, {\it Methods of modern
mathematical physics,} Vol II, Fourier Analysis, selfadjointness,
Academic Press, 1975.
\bibitem{SE} I. Segal, {\it Tensor algebras
over Hilbert spaces,} I, Trans. Amer. Math. Soc, 81 (1956), 104-134.
\bibitem{SI} B. Simon, {\it The $ P (\Phi)_2$ Euclidean (Quantum) Field theory,}
Princeton Series in Physics, Princeton Univ. Press, Princeton
(1974).
\bibitem{U} A. Unterberger, {\it Les op\'erateurs m\'etadiff\'erentiels,}
in Complex analysis, microlocal calculus and relativistic quantum
theory, Lecture Notes in Physics 126 (1980) 205-241.
\bibitem{Y} K. Yosida, {\it Functional analysis,} Reprint of the sixth (1980) edition. Classics
in Mathematics. Springer- Verlag, Berlin, 1995.
\end{thebibliography}
laurent.amour@univ-reims.fr\newline
LMR EA 4535 and FR CNRS 3399, Universit\'e de Reims Champagne-Ardenne,
Moulin de la Housse, BP 1039,
51687 REIMS Cedex 2, France.
rlascar@math.univ-paris-diderot.fr\newline
Institut Mathématique de Jussieu UMR CNRS 7586, Analyse Algébrique, 4 Place Jussieu, 75005 Paris, France.
jean.nourrigat@univ-reims.fr\newline
LMR EA 4535 and FR CNRS 3399, Universit\'e de Reims Champagne-Ardenne,
Moulin de la Housse, BP 1039,
51687 REIMS Cedex 2, France.
\end{document} |
\begin{document}
\title[Slicing the Sierpi\'nski gasket] {Slicing the Sierpi\'nski gasket}
\author{Bal\'azs B\'ar\'any}
\address{Bal\'azs B\'ar\'any, Department of Stochastics, Institute of Mathematics, Technical University of Budapest, 1521
Budapest, P.O.Box 91, Hungary} \email{balubsheep@gmail.com}
\author{Andrew Ferguson}
\address{Andrew Ferguson\\Department of Mathematics\\University of Bristol\\ University Walk\\Bristol\\BS8 1TW\\UK.} \email{andrew.ferguson@bris.ac.uk}
\author{K\'aroly Simon}
\address{K\'aroly Simon, Department of Stochastics, Institute of Mathematics, Technical University of Budapest, 1521
Budapest, P.O.Box 91, Hungary} \email{simonk@math.bme.hu}
\thanks{ \indent
{\em Key words and phrases.} Hausdorff dimension, multifractal analysis, Sierpi\'nski gasket}
\begin{abstract}We investigate the dimension of intersections of the Sierpi\'nski gasket with lines. Our first main result describes a countable, dense set of angles that are exceptional for Marstrand's theorem. We then provide a multifractal analysis for the set of points in the projection for which the associated slice has a prescribed dimension.
\end{abstract}
\date{\today}
\maketitle
\thispagestyle{empty}
\section{Introduction and Statements}\label{sintro}
Let $\Delta\subset\mathbb{R}^2$ denote the Sierpi\'nski gasket, i.e. the unique non-empty compact set satisfying
\[
\Delta=S_0(\Delta)\cup S_1(\Delta)\cup S_2(\Delta),
\]
where
\begin{equation}\label{esiernor}
S_0(x,y)=\left(\frac{1}{2}x,\frac{1}{2}y\right),\ S_1(x,y)=\left(\frac{1}{2}x+\frac{1}{2},\frac{1}{2}y\right),\ S_2(x,y)=\left(\frac{1}{2}x+\frac{1}{4},\frac{1}{2}y+\frac{\sqrt{3}}{4}\right).
\end{equation}
It is well known that $\dim_H\Delta=\dim_B\Delta=\frac{\log3}{\log2}=s$, where $\dim_H$ denotes the Hausdorff and $\dim_B$ denotes the box (or Minkowski) dimension. For the definition and basic properties of the box and Hausdorff dimensions we refer the reader
to \cite{Fal2}.
We denote by $\mathrm{proj}_{\theta}$ the projection onto the line through the origin making angle $\theta$ with the $x$-axis. For $a\in \mathrm{proj}_\theta(\Delta)$ we let $L_{\theta,a}=\{(x,y)\,:\,\mathrm{proj}_\theta(x,y)=a\}=\{(x,a+x\tan\theta)\,:\,x\in\mathbb{R}\}$. The main purpose of this paper is to investigate the dimension theory of the slices $E_{\theta,a}=L_{\theta,a}\cap\Delta$. Since $\Delta$ is rotation and reflection invariant, we may assume without loss of generality that $\theta\in[0,\frac{\pi}{3})$. In Proposition \ref{pdc1} we show that a dimension conservation principle holds: if $\nu_\theta$ denotes the projection of the normalised $\log(3)/\log(2)$-dimensional Hausdorff measure then for all $\theta\in [0,\pi)$ and $a\in{\rm proj}_\theta(\Delta)$ we have $\underline{d}_{\nu_{\theta}}(a)+\overline{\dim}_B E_{\theta,a}=s$, where $\underline{d}_{\nu_\theta}(a)$ denotes the lower local dimension of $\nu_\theta$ at $a$. The analogous relationship between upper local dimension and lower box dimension is also proved.
Furthermore, in Theorem \ref{ttyp} we prove that whenever $\tan\theta=\frac{\sqrt{3}p}{2q+p}$ for positive integers $p,q$, the direction $\theta $
is exceptional in Marstrand's Theorem. More precisely, the dimension of Lebesgue almost all slices is a constant strictly smaller than $s-1$ but the dimension for almost all slices with respect to the projected measure is another constant strictly greater than $s-1$.
Finally, we provide a multifractal analysis of the Hausdorff dimension of the slices $E_{\theta,a}$ for $\tan\theta=\frac{\sqrt{3}p}{2q+p}$ for positive integers $p,q$. Furstenberg \cite{Fur} proved a dimension conservation principle for homogeneous sets, which in our setting corresponds to showing that
\begin{equation*}{\rm dim}_H(\Delta)=\sup\left\{\delta+{\rm dim}_H\{a\in\mathrm{proj}_\theta(\Delta)\,:\,{\rm dim}_H(E_{\theta,a})\geq \delta\}\right\}\end{equation*} i.e. any loss of dimension in the projection may be accounted for in the fibres $\{E_{\theta,a}\}_{a\in\mathrm{proj}_\theta(\Delta)}$. We remark that the results found in \cite{Fur} apply to a quite wide class of compact sets $E\subset\mathbb{R}^n$ and for all linear maps $P:E\to\mathbb{R}^n$.
In Theorem \ref{tspectra} we investigate the function \begin{equation*}\Gamma:\delta\mapsto {\rm dim}_H\{a\in\mathrm{proj}_\theta(\Delta)\,:\,{\rm dim}_H(E_{\theta,a})\geq \delta\}.\end{equation*} We prove that $\Gamma$ admits a multifractal description, in particular it is continuous, concave and may be represented as the Legendre transform of a pressure function.
For technical reasons we elect to prove our statements for the so-called right-angle Sierpi\'nski gasket $\Lambda$ which is the attractor of iterated function system
\begin{equation}\label{erightangle}
\Phi=\left\{F_0(x,y)=\left(\frac{x}{2},\frac{y}{2}\right),\ F_1(x,y)=\left(\frac{x}{2}+\frac{1}{2},\frac{y}{2}\right),\ F_2(x,y)=\left(\frac{x}{2},\frac{y}{2}+\frac{1}{2}\right)\right\},
\end{equation}
and intersections with lines of rational slope. There is a linear transformation $T$
\begin{equation}\label{etrans}
T=\left(
\begin{array}{cc}
1 & -\frac{\sqrt{3}}{3} \\
0 & \frac{2\sqrt{3}}{3} \\
\end{array}
\right)
\end{equation}
which maps the Sierpi\'nski gasket into the right-angle Sierpi\'nsi gasket. Since an invertible linear transformation does not change the dimension of a set we state our results for the usual Sierpi\'nski gasket and for appropriate slopes. For the transformation see Figure \ref{fusutoright}.
\begin{figure}
\caption{The transformation between the usual and right-angle Sierpi\'nski gasket.}
\label{fusutoright}
\end{figure}
Denote by $\nu$ the unique self-similar measure satisfying
\[
\nu=\sum_{i=0}^2\frac{1}{3}\nu\circ S_i^{-1}.
\]
One may show that this measure is nothing more than the normalised $s$-dimensional Hausdorff measure restricted to $\Delta$. We denote by $\nu_{\theta}$ the push-forward of $\nu$ under the projection $\mathrm{proj}_\theta$, i.e. $\nu_{\theta}=\nu\circ\mathrm{proj}_{\theta}^{-1}$. Let $\Delta_{\theta}$ denote the projection of $\Delta$.
The description of typical slices is given by the following result of Marstrand (see \cite{Mar} or \cite[Theorem 10.11]{M}).
\begin{prop}[Marstrand]\label{pmar}
For Lebesgue almost every $\theta\in[0,\frac{\pi}{3})$ and $\nu_{\theta}$-almost all $a\in\Delta_{\theta}$
\[
\dim_B E_{\theta,a}=\dim_H E_{\theta,a}=s-1.
\]
\end{prop}
We define the (upper and lower) local dimension of a Borel measure $\eta$ at the point $x$ by
\[
\underline{d}_{\eta}(x)=\liminf_{r\rightarrow0}\frac{\log\eta(B_r(x))}{\log r},\ \overline{d}_{\eta}(x)=\limsup_{r\rightarrow0}\frac{\log\eta(B_r(x))}{\log r}.
\]
Manning and Simon proved a dimension conservation result for the Sierpi\'nski carpet, connecting the local dimension of the projected natural measure and the box dimension of the slices (see \cite[Proposition 4]{MS}). For the Sierpi\'nski gasket we state an analogous result.
\begin{prop}\label{pdc1}
For every $\theta\in(0,\frac{\pi}{3})$ and $a\in\Delta_{\theta}$
\begin{eqnarray}
\underline{d}_{\nu_{\theta}}(a)+\overline{\dim}_B E_{\theta,a}=s,\\
\overline{d}_{\nu_{\theta}}(a)+\underline{\dim}_B E_{\theta,a}=s.
\end{eqnarray}
\end{prop}
Feng and Hu proved in \cite[Theorem 2.12]{FH} that every self-similar measure is exact dimensional. That is, the lower and upper local-dimension coincide and this common value is almost everywhere constant. Moreover, Young proved in \cite{Y} that this constant is the Hausdorff dimension of the measure. In other words, if $\eta$ is self-similar then
\[
\text{for $\eta$-almost all } x,\ \underline{d}_{\eta}(x)=\overline{d}_{\eta}(x)=d_{\eta}(x)=\dim_H\eta=\inf\left\{\dim_HA:\eta(A)=1\right\}.
\]
Using the above results we deduce.
\begin{cor}\label{cbox}
For every $\theta\in(0,\frac{\pi}{3})$ and $\nu_{\theta}$-almost every $a\in\Delta_{\theta}$ we have
\[
\dim_BE_{\theta,a}=s-\dim_H\nu_{\theta}\geq s-1.
\]
\end{cor}
Liu, Xi and Zhao \cite{LXZ} encoded the Box dimensions of a slice
through the Sierpi\'nski carpet for lines of rational slope in terms of the Lyapunov exponent of a random matrix product. They then used this coding to show that for a fixed rational direction $\theta$ the Box and Hausdorff dimensions of a slice coincide and are constant for Lebesgue-almost all translations. Moreover, this constant $\alpha(\theta )$ was shown to satisfy $\alpha(\theta )\leq s-1$, with this inequality being conjectured to be strict. This conjecture was proved by Manning and Simon
\cite[Theorem 9]{MS}.
We prove an analogous result for the Sierpi\'nski gasket. In addition, we will show
that the $\nu_\theta$-typical dimension of a slice is strictly bigger than $s-1$.
\begin{theorem}\label{ttyp}
Let $p,q\in\mathbb{N}$ and let us suppose that $\tan\theta=\frac{\sqrt{3}p}{2q+p}$ and $\theta\in(0,\frac{\pi}{3})$. Then there exist constants $\alpha(\theta), \beta(\theta)$ depending only on $\theta$ such that
\begin{enumerate}
\item for Lebesgue almost all $a\in\Delta_{\theta}$\label{ttyp1}
\[
\alpha(\theta):=\dim_B E_{\theta,a}=\dim_H E_{\theta,a}<s-1,
\]
\item for $\nu_{\theta}$-almost all $a\in\Delta_{\theta}$\label{ttyp2}
\[
\beta(\theta):=\dim_B E_{\theta,a}=\dim_H E_{\theta,a}>s-1.
\]
\end{enumerate}
\end{theorem}
A simple calculation reveals that the tangent of the set of angles in this theorem is equal
$\mathbb{Q}'=\left\{0<\sqrt[]{3}\frac{m}{n}<\sqrt[]{3}:
\mbox{ if } m \mbox{ is odd then }
n\mbox{ is odd }
\right\}$.
We remark that Theorem \ref{ttyp} shares a similarity with a result of Feng and Sidorov \cite{FS}[Theorem 3 and Proposition 4] where the Lebesgue typical local dimension is computed for a class of self-similar measures. The authors show that if an algebraic condition holds then the Lebesgue typical local dimension is strictly greater than one. By Proposition \ref{pdc1} the Theorem above may be rephrased in terms of local dimensions being either strictly greater or less than one.
In \cite{Fur}, Furstenberg introduced and proved a dimension conservation formula \cite[Definition 1.1]{Fur} for homogeneous fractals (for example homotheticly self-similar sets). As a consequence of Theorem \ref{ttyp}(\ref{ttyp2}) and Corollary \ref{cbox} we state the special case of Furstenberg's dimension conservation formula for the Sierpi\'nski gasket and rational slopes.
Furstenberg in \cite[Theorem 6.2]{Fur} stated the result as an inequality but combining the result as stated with the Marstrand Slicing Theorem (see \cite{Mar2} or \cite[Theorem 5.8]{Fal}) we see that
\begin{lemma}[Marstrand Slicing Theorem]\label{lslicing}
Let $F$ be any subset of $\mathbb{R}^2$, and let $E$ be a subset of the $y$-axis. If $\dim_H(F\cap L_{\theta,a})\geq t$ for all $a\in E$, then $\dim_HF\geq t + \dim_H E$.
\end{lemma}
\begin{cor}[Furstenberg]\label{cfurst}
Let $p,q\in\mathbb{N}$ be and let us suppose that $\tan\theta=\frac{\sqrt{3}p}{2q+p}$ and $\theta\in(0,\frac{\pi}{3})$. Then the map $\mathrm{proj}_{\theta}$ satisfies the dimension conservation formula \cite[Definition 1.1]{Fur} at the value $\beta(\theta)$, i.e.
\begin{equation}\label{efurst}
\beta(\theta)+\dim_H\left\{a\in\Delta_{\theta}:\dim_H E_{\theta,a}\geq\beta(\theta)\right\}=s.
\end{equation}
\end{cor}
\begin{proof}
\begin{equation*}
\begin{split}\dim_H\left\{a\in\Delta_{\theta}:\dim_H E_{\theta,a}\geq\beta(\theta)\right\}& \geq \dim_H\left\{a\in\Delta_{\theta}:\dim_B E_{\theta,a} =\dim_H E_{\theta,a}=\beta(\theta)\right\} \\
& \geq \dim_H \nu_{\theta}=s-\beta(\theta).\end{split}
\end{equation*}
The other direction follows from Lemma \ref{lslicing}.
\end{proof}
We remark that the above argument also shows that
\[
\beta(\theta)+\dim_H\left\{a\in\Delta_{\theta}:\dim_H E_{\theta,a}=\beta(\theta)\right\}=s.
\]
The other main goal of this paper is to analyse the behaviour of the function $\Gamma:\delta\mapsto\dim_H\left\{a\in\Delta_{\theta}:\dim_H E_{\theta,a}\geq\delta\right\}$ under the assumption that $\tan\theta=\frac{\sqrt{3}p}{2q+p}$, where $p,q\in\mathbb{N}$ and $(p,q)=1$. For the analysis we use two matrices generated naturally by the projection and the IFS $\left\{S_0, S_1, S_2\right\}$. For simplicity, we illustrate these matrices for the right-angle gasket.
Denote the angle $\theta$ projection of $\Lambda$ to the $y$-axis by $\Lambda_{\theta}$. Then $\Lambda_{\theta}=[-\tan\theta,1]$. Consider the projected IFS of $\Phi$, i.e. \[
\phi=\left\{f_0(t)=\frac{t}{2},f_1(t)=\frac{t}{2}+\frac{1}{2},f_2(t)=\frac{t}{2}-\frac{p}{2q}\right\}.
\]
By straightforward calculations and \cite[Theorem 2.7.]{NW} we see that $\phi$ satisfies the finite type condition and therefore, the weak separation property.
Let us divide $\Lambda_{\theta}$ into $p+q$ equal intervals such that $I_k=\left[1-\frac{k}{q},1-\frac{k-1}{q}\right]$ for $k=1,\dots,p+q$. Moreover, let us divide $I_k$ for every $k$ into two equal parts. Namely, let $I_k^0=\left[1-\frac{k}{q},1-\frac{2k-1}{2q}\right]$ and $I_k^1=\left[1-\frac{2k-1}{2q},1-\frac{k-1}{q}\right]$. Let us define the $(p+q)\times(p+q)$ matrices $\mx{A}_0, \mx{A}_1$ in the following way:
\begin{equation}\label{eprojmatr}
(\mx{A}_n)_{i,j}=\sharp\left\{k\in\left\{0,1,2\right\}:f_k(I_j)=I^n_i\right\}.
\end{equation}
For example, see the case $\frac{p}{q}=\frac{2}{3}$ of the construction in Figure \ref{fex} and the matrices are
\[
\mx{A}_0=
\left(\begin{array}{ccccc}
1 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 \\
0 & 1 & 0 & 0 & 1 \\
0 & 1 & 0 & 1 & 0 \\
0 & 0 & 0 & 1 & 0 \\
\end{array}\right)\text{ and }
\mx{A}_1=\left(
\begin{array}{ccccc}
0 & 1 & 0 & 0 & 0 \\
1 & 0 & 0 & 1 & 0 \\
1 & 0 & 1 & 0 & 0 \\
0 & 0 & 1 & 0 & 1 \\
0 & 0 & 0 & 0 & 1 \\
\end{array}
\right).
\]
\begin{figure}
\caption{Graph of the projection and construction of matrices $\mx{A}
\label{fex}
\end{figure}
We note that by some simple calculations the matrices $\mx{A}_0, \mx{A}_1$ can be written in the form
\begin{multline}\label{eprojmatr2}
(\mx{A}_n)_{i,j}=1\text{ if and only if }2i+1-n\equiv j \mod p+q\text{ or }\\2q+p\geq2i+n-1\geq q+1\text{ and }2i+1-n-q\equiv j\mod p+q
\end{multline}
for $n=0,1$ and $1\leq i,j\leq p+q$. Using these matrices we are able to explicitly express the quantities $\alpha(\theta),\beta(\theta)$.
\begin{prop}\label{pcalc}
Let $p,q\in\mathbb{N}$ be and let us suppose that $\tan\theta=\frac{\sqrt{3}p}{2q+p}$ and $\theta\in(0,\frac{\pi}{3})$. Moreover, let $\alpha(\theta)$ and $\beta(\theta)$ be as in Theorem \ref{ttyp}. Then
\begin{equation*}
\begin{split}\alpha(\theta)&=\frac{1}{\log2}\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{\xi_1,\dots,\xi_n=0}^1\frac{1}{2^n}\log\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e},\\
\beta(\theta)& =\frac{1}{\log2}\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{\xi_1,\dots,\xi_n=0}^1\frac{1}{3^n}\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{p}\log\left(\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{p}\right),
\end{split}\end{equation*}
where $\underline{e}=(1,\cdots,1)$ and $(\mx{A}_0+\mx{A}_1)\underline{p}=3\underline{p}$.
\end{prop}
The proof of Proposition \ref{pcalc} will follow from the proof of Theorem \ref{ttyp}. In order to obtain further information on the nature of the function $\Gamma(\delta)$ we will employ the theory of multifractal analysis for products of non-negative matrices \cite{F1,F2,FL2}. Let $P(t)$ denote the pressure function which is defined as
\begin{equation}\label{emxpressure}
P(t)=\lim_{n\rightarrow\infty}\frac{1}{n}\log\sum_{\xi_1,\dots,\xi_n=0}^1\left(\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}\right)^t
\end{equation}
and let us define \[
b_{\min}=\lim_{t\rightarrow-\infty}\frac{P(t)}{t},\ b_{\max}=\lim_{t\rightarrow\infty}\frac{P(t)}{t}.
\]
\begin{prop}\label{cspectra}
Let $p,q\in\mathbb{N}$ and let us suppose that $\tan\theta=\frac{\sqrt{3}p}{2q+p}$ and $\theta\in(0,\frac{\pi}{3})$. Then
\begin{enumerate}
\item $\dim_H\left\{a\in\Delta_{\theta}:\dim_BE_{\theta,a}=\alpha\right\}=\inf_t\left\{-\alpha t+\frac{P(t)}{\log2}\right\}$ for $b_{\min}\leq\alpha\leq b_{\max}$.\label{tspectra1}
\item $\dim_H\left\{a\in\Delta_{\theta}:d_{\nu_{\theta}}(a)=\alpha\right\}=\inf_t\left\{-(s-\alpha) t+\frac{P(t)}{\log2}\right\}$ for $s-b_{\max}\leq\alpha\leq s-b_{\min}$.\label{tspectra4}
\end{enumerate}
Both of the functions are concave and continuous.
\end{prop}
\begin{proof}
Proposition \ref{cspectra}(\ref{tspectra4}) follows immediately from \cite[Theorem 1.1]{FL}, \cite[Theorem 1.2]{FL}. Proposition \ref{cspectra}(\ref{tspectra1}) follows from combining the dimension conservation principle Proposition \ref{pdc1} with Proposition \ref{cspectra}(\ref{tspectra4}).
\end{proof}
We note that Proposition \ref{cspectra}(\ref{tspectra1}) may derived by applying \cite{F2} to the matrices $\mx{A}_0, \mx{A}_1$. We describe this derivation in Section \ref{stspectra}.
\begin{theorem}\label{tspectra}
Let $p,q\in\mathbb{N}$ and let us suppose that $\tan\theta=\frac{\sqrt{3}p}{2q+p}$ and $\theta\in(0,\frac{\pi}{3})$. Then
\begin{enumerate}
\item$\Gamma(\delta)=\dim_H\left\{a\in\Delta_{\theta}:\dim_H E_{\theta,a}\geq\delta\right\}=\inf_{t>0}\left\{-\delta t+\frac{P(t)}{\log2}\right\}$ if $b_{\max}\geq\delta>\alpha(\theta)$ and $\Gamma(\delta)=1$ if $\delta\leq\alpha(\theta)$. The function $\Gamma$ is decreasing and continuous.\label{tspectra2}
\item For every $b_{\max}\geq\delta\geq\alpha(\theta)$, $\chi(\delta)=\dim_H\left\{a\in\Delta_{\theta}:\dim_H E_{\theta,a}=\delta\right\}=\inf_{t>0}\left\{-\delta t+\frac{P(t)}{\log2}\right\}$. The function $\chi$ is decreasing and continuous.\label{tspectra3}
\end{enumerate}
\end{theorem}
For an example of the function $\delta\mapsto\dim_H\left\{a\in\Delta_{\theta}:\dim_H E_{\theta,a}=\delta\right\}$ with $\tan\theta=\frac{\sqrt{3}}{3}$ in the usual Sierpi\'nski gasket case, see Figure \ref{fspect}.
\begin{figure}
\caption{The graph of the function $\delta\mapsto\dim_H\left\{a\in\Delta_{\theta}
\label{fspect}
\end{figure}
The organisation of the paper is as follows: in Section \ref{sprop} we prove \ref{pdc1}. Section \ref{sttyp} contains proof of Proposition \ref{ttyp}. Finally, in Section \ref{stspectra} we prove Theorem \ref{tspectra}.
\section{Proof of Proposition \ref{pdc1}}\label{sprop}
In this section we prove Proposition \ref{pdc1}. The method is adapted from \cite[Proposition 4]{MS} where an analogous result is proved for the Sierpi\'nski carpet.
We first introduce notation that will be fixed for the remainder of the paper. Let $S_0, S_1, S_2$ be as in (\ref{esiernor}), moreover let $\Sigma=\left\{0,1,2\right\}^{\mathbb{N}}$ and $\Sigma^*=\bigcup_{n=0}^{\infty}\left\{0,1,2\right\}^n$. Write $\sigma:\Sigma\mapsto\Sigma$ for the left shift operator. Moreover, let $\Pi:\Sigma\mapsto\Delta$ the natural projection. That is, for every $\mathbf{i}=(i_1i_2\cdots)\in\Sigma$
\[
\Pi(\mathbf{i})=\lim_{n\rightarrow\infty}S_{i_1}\circ S_{i_2}\circ\cdots\circ S_{i_n}(0).
\]
Let $\mu$ be the equally distributed Bernoulli measure on $\Sigma$. That is, for every $\underline{i}\in\Sigma^*$ the measure of $[\underline{i}]=\left\{\mathbf{i}:\mathbf{i}=\underline{i}\omega\right\}$ is $\mu([\underline{i}])=3^{-\left|\underline{i}\right|}$, where $\left|\underline{i}\right|$ denotes the length of $\underline{i}$. Then $\nu=\Pi^*\mu=\mu\circ\Pi^{-1}$.
For simplicity we denote by $\Delta_{i_1\cdots i_n}=S_{i_1}\circ\cdots\circ S_{i_n}(\Delta)$. Let us call the $n$'th level ``good sets'' of $a\in\Delta_{\theta}$ the set of $(i_1\cdots i_n)$ such that $\Delta_{i_1\cdots i_n}$ intersects the set $E_{\theta,a}$. More precisely,
\begin{equation}\label{egoodset}
G_n(\theta,a)=\left\{(i_1\cdots i_n):\Delta_{i_1\cdots i_n}\cap E_{\theta,a}\neq\emptyset\right\}.
\end{equation}
\begin{lemma}\label{lbdgs}
For every $\theta\in[0,\frac{\pi}{3})$ and $a\in\Delta_{\theta}$
\[
\underline{\dim}_B E_{\theta,a}=\liminf_{n\rightarrow\infty}\frac{\log\sharp G_n(\theta,a)}{n\log2}\text{ and } \overline{\dim}_B E_{\theta,a}=\limsup_{n\rightarrow\infty}\frac{\log\sharp G_n(\theta,a)}{n\log2},
\]
where $\sharp G_n(\theta,a)$ denotes the cardinality of $G_n(\theta,a)$.
\end{lemma}
\begin{proof}
Let us denote the minimal number of intervals with length $r$ covering the set $E_{\theta,a}$ by $N_r(\theta,a)$. It is easy to see that
\begin{equation}\label{ebll}
N_{2^{-n}}(\theta,a)\leq\sharp G_n(\theta,a).
\end{equation}
On the other hand, for a minimal cover of $E_{\theta,a}$ with intervals of side length $2^{-n}$ every such interval will intersect an element of $G_n(\theta,a)$. Further, every element of $G_n(\theta,a)$ will intersect some element of this minimal cover. Finally, we observe that for every interval with side length $2^{-n}$ there are at most $\left\lceil\frac{4\sqrt{3}(2+\pi)}{3}\right\rceil$ cylinders in $G_n(\theta,a)$ which images under $\Pi$ intersect the interval. Hence,
\begin{equation}\label{ebul}
\sharp G_n(\theta,a)\leq\left\lceil\frac{4\sqrt{3}(2+\pi)}{3}\right\rceil N_{2^{-n}}(\theta,a).
\end{equation}
The equations (\ref{ebll}) and (\ref{ebul}) imply the statement of the lemma.
\end{proof}
\begin{proof}[Proof of Proposition \ref{pdc1}]
Let $\theta\in(0,\frac{\pi}{3})$ and let us take a point $a\in\Delta_{\theta}$. Take the $C(\theta)2^{-n}$ neighbourhood of $a$, where $C(\theta)=\frac{1}{2}\min\left\{\tan\theta,\cos(\theta+\frac{\pi}{6})\right\}$. Then
\[
\nu_{\theta}(B_{C(\theta)2^{-n}}(a))=\nu(B_{\cos\theta C(\theta)2^{-n}}(L_{\theta,a}))\geq\nu\left(\bigcup_{\underline{i}\in G_{n-c(\theta)}}\Delta_{\underline{i}}\right)=3^{-n+c(\theta)}\sharp G_{n-c(\theta)}(\theta,a),
\]
where $c(\theta)=\frac{\log\left(\cos\theta C(\theta)\right)}{\log2}$. Taking logarithms and dividing by $-n\log2$ yields
\[
\frac{\log(\nu_{\theta}(B_{C(\theta)2^{-n}}(a)))}{-n\log2}\leq\frac{(n-c(\theta))\log3}{n\log2}+\frac{\log\sharp G_{n-c(\theta)}(\theta,a)}{-n\log2}.
\]
Taking limit inferior and limit superior and using Lemma \ref{lbdgs} we obtain
\begin{equation}\label{edcub}
\begin{split}
&\underline{d}_{\nu_{\theta}}(a)+\overline{\dim}_B E_{\theta,a}\leq s,\\
&\overline{d}_{\nu_{\theta}}(a)+\underline{\dim}_B E_{\theta,a}\leq s.
\end{split}
\end{equation}
For the reverse inequality we have to introduce the so called ``bad'' sets which do not intersect $E_{\theta,a}$ but intersect its neighbourhood. That is,
\[
R_n(\theta,a)=\left\{(i_1\cdots i_n):\Delta_{i_1\cdots i_n}\cap E_{\theta,a}=\emptyset\text{ and }\Delta_{i_1\cdots i_n}\cap B_{\cos\theta C(\theta)2^{-n}}(L_{\theta,a})\neq\emptyset\right\}.
\]
Then
\[
\nu_{\theta}(B_{C(\theta)2^{-n}}(a))=\nu(B_{\cos\theta C(\theta)2^{-n}}(L_{\theta,a}))\leq3^{-n}\left(\sharp R_n(\theta,a)+\sharp G_n(\theta,a)\right).
\]
It is enough to prove that $\sharp R_n(\theta,a)$ is less than or equal to $\sharp G_n(\theta,a)$ up to a multiplicative constant.
\begin{figure}
\caption{A ``bad'' set of the Sierpi\'nski gasket}
\label{fsier}
\end{figure}
Let $\Delta_{\underline{i}}$ be an arbitrary $n$'th level cylinder set of $\Delta$. It is easy to see that if $\Delta_{\underline{i}}$ is not one of the corners of $\Delta$ then every corner of $\Delta_{\underline{i}}$ connects to another $n$'th level cylinder set, see Figure \ref{fsier}. We note that the constant $C(\theta)$ is chosen in the way that if the $\cos\theta C(\theta)2^{-n}$ neighbourhood of the line $L_{\theta,a}$ intersects a cylinder but not the line itself intersects it (that is it is a ``bad'' set) then the line intersects the closest neighbour of the cylinder. Therefore, for every $\underline{i}\in R_n(\theta,a)$ there exists at least one $\underline{j}\in G_n(\theta,a)$ such that $\Delta_{\underline{i}}$ and $\Delta_{\underline{j}}$ are connected to each other (by the choice of $C(\theta)$). Moreover, a cylinder set can be connected to at most $6$ other cylinder sets. Therefore, $R_n(\theta,a)\leq6G_n(\theta,a)$.
Applying that, we have
\[
\nu_{\theta}(B_{C(\theta)2^{-n}}(a))\leq3^{-n}7\#G_n(\theta,a).
\]
Taking logarithms, dividing by $-n\log2$ and taking limit inferior and limit superior we get by Lemma \ref{lbdgs}
\begin{equation}\label{edclb}
\begin{split}
&\underline{d}_{\nu_{\theta}}(a)+\overline{\dim}_B E_{\theta,a}\geq s,\\
&\overline{d}_{\nu_{\theta}}(a)+\underline{\dim}_B E_{\theta,a}\geq s.
\end{split}
\end{equation}
The inequalities (\ref{edcub}) and (\ref{edclb}) imply the statements.
\end{proof}
\section{Proof of Theorem \ref{ttyp}}\label{sttyp}
In this Section we prove Theorem \ref{ttyp}, that is for $\tan(\theta)\in\mathbb{Q}$ the angle $\theta$ is an exceptional direction in Marstrand's theorem. We encode the box dimension of a slice $E_{\theta,a}$ using the matrices $\mx{A}_0,\mx{A}_1$. This coding first appeared in \cite{LXZ}. We then show that the Lebesgue-typical dimension of a slice is strictly less than $s-1$ by applying a result of Manning and Simon \cite[Theorem 9]{MS}. Finally, we show that the $\nu_\theta$-typical dimension of a slice is strictly bigger than $s-1$.
For the rest of the paper we will work with the right-angle Sierpi\'nski gasket $\Lambda$ and for rational slopes.
For the rest of the paper we assume that $\theta\in(0,\frac{\pi}{2})$ such that $\tan\theta=\frac{p}{q}$ where $p,q\in\mathbb{N}$ and the greatest common divisor is $1$. (This is equivalent with the choice $\theta\in(0,\frac{\pi}{3})$ for $\Delta$.)
\begin{lemma}\label{lmxbox}
Let $\theta$ and $a\in\Lambda_{\theta}$ be such that $\tan\theta=\frac{p}{q}$ and
\[
a=1-\frac{k-1}{q}-\frac{1}{q}\sum_{i=1}^{\infty}\frac{\xi_i}{2^i}
\]
then
\[
\underline{\dim}_B E_{\theta,a}=\liminf_{n\rightarrow\infty}\frac{\log\underline{e}_k\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}}{n\log2}\text{ and }\overline{\dim}_B E_{\theta,a}=\limsup_{n\rightarrow\infty}\frac{\log\underline{e}_k\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}}{n\log2},
\]
where $\underline{e}_k$ is the $k$'th element of the natural basis of $\mathbb{R}^{p+q}$ and $\underline{e}=\sum_{k=1}^{p+q}\underline{e}_k$.
\end{lemma}
\begin{proof}
By the definition of the matrices $\mx{A}_0, \mx{A}_1$ it is easy to see that for every $n\geq1$ and $\xi_1,\dots,\xi_n\in\left\{0,1\right\}$ we have
\[
\left(\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\right)_{i,j}=\sharp\left\{\underline{i}\in\left\{0,1\right\}^n:f_{\underline{i}}(I_j)=I_i^{\xi_1,\dots,\xi_n}\right\},
\]
where $I_i^{\xi_1,\dots,\xi_n}$ denotes the interval $[1-\frac{i-1}{q}-\frac{1}{q}\sum_{l=1}^n\frac{\xi_l}{2^l}-\frac{1}{q2^{n}},1-\frac{i-1}{q}-\frac{1}{q}\sum_{l=1}^n\frac{\xi_l}{2^l}]$. Therefore
\[
\underline{e}_k\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}=\sharp\left\{\underline{i}\in\left\{0,1\right\}^n:\text{there exists a }1\leq j\leq p+q\text{ such that }f_{\underline{i}}(I_j)=I_k^{\xi_1,\dots,\xi_n}\right\}.
\]
For every $I_k^{\xi_1,\dots,\xi_n}$ and every $(i_1,\dots,i_n)$ such that there exists a $1\leq j\leq p+q$ such that $f_{i_1,\dots,i_n}(I_j)=I_k^{\xi_1,\dots,\xi_n}$, then $I_k^{\xi_1,\dots,\xi_n}\subseteq\mathrm{proj}_{\theta}\Lambda_{i_1,\dots,i_n}$. This implies that for every $a\in I_k^{\xi_1,\dots,\xi_n}$
\[
\underline{e}_k\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}\leq\sharp G_n(\theta,a).
\]
On the other hand for every $a\in\mathrm{proj}_{\theta}\Lambda$ if $a\in\mathrm{int}(I_k^{\xi_1,\dots,\xi_n})$ then for every $(i_1,\dots,i_n)\in G_n(\theta,a)$ there exists a $1\leq j\leq p+q$ such that $f_{i_1,\dots,i_n}(I_j)=I_k^{\xi_1,\dots,\xi_n}$. If $a\in\partial(I_k^{\xi_1,\dots,\xi_n})$ then for every $(i_1,\dots,i_n)\in G_n(\theta,a)$ there exists a $(i_1',\dots,i_n')\in G_n(\theta,a)$ and a $1\leq j\leq p+q$ such that $f_{i_1',\dots,i_n'}(I_j)=I_k^{\xi_1,\dots,\xi_n}$ as well as $\Lambda_{i_1,\dots,i_n}$ and $\Lambda_{i_1',\dots,i_n'}$ are connected or equal.
Since for every cylinder set can be connected to at most three other cylinder sets we have for any $a\in I_k^{\xi_1,\dots,\xi_n}$ that
\[
\sharp G_n(\theta,a)\leq3\underline{e}_k\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}.
\]
The proof is completed by Lemma \ref{lbdgs}.
\end{proof}
One of the main properties of the matrices $\mx{A}_0, \mx{A}_1$ is stated in the following proposition.
\begin{prop}\label{pmain}
Let $p,q$ be integers such that the greatest common divisor is $1$, and let $\mx{A}_0$ and $\mx{A}_1$ be defined as in (\ref{eprojmatr}) (or equivalently as in (\ref{eprojmatr2})). Then there exists $n_0\geq1$ and a finite sequence $(\xi_1,\dots,\xi_{n_0})\in\left\{0,1\right\}^{n_0}$ such that every element of $\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_{n_0}}$ is strictly positive.
Moreover, for every $n\geq1$
\begin{multline}\label{epmain}
\sharp\left\{(\xi_1,\dots,\xi_n)\in\left\{0,1\right\}^n:\exists1\leq i,j\leq p+q\text{ such that }\left(\mx{A}_{\xi_1,\dots,\xi_n}\right)_{i,j}=0\right\}\leq\\\sum_{l=0}^{(p+q-1)(p+q)-1}\binom{n}{l}2^l.
\end{multline}
\end{prop}
We divide the proof of Proposition \ref{pmain} into the following three lemmas.
\begin{lemma}\label{lmxbasic}
Let $p,q$ be integers such that the greatest common divisor is $1$, and let $\mx{A}_0$ and $\mx{A}_1$ be defined as in (\ref{eprojmatr}). Then there are at least one and at most two $1$'s in each column and in each row of $\mx{A}_n$. Moreover, the sum of each column of $\mx{A}_0+\mx{A}_1$ is three.
\end{lemma}
The proof follows immediately from the definition.
\begin{lemma}\label{lmxatleast}
Let $p,q$ be integers such that the greatest common divisor is $1$, and let $\mx{A}_0$ and $\mx{A}_1$ be defined as in (\ref{eprojmatr}) and (\ref{eprojmatr2}). Then for every $1\leq m\leq p+q$ distinct columns $1\leq j_1,\dots,j_m\leq p+q$ and every $n=0,1$ there exist $m$ distinct rows $1\leq i_1,\dots,i_m\leq p+q$ such that $\left(\mx{A}_n\right)_{i_k,j_k}=1$ for every $k=1,\dots,m$. Note that $i_1,\dots,i_m$ may depend on $n$.
\end{lemma}
\begin{proof}
If $p+q$ is odd then for any $j_k$ there exists a unique $i_k$ such that $2i_k-1+n\equiv j_k\mod p+q$ and, by (\ref{eprojmatr2}), $\left(\mx{A}_n\right)_{i_k,j_k}=1$. Moreover, if $j_k\neq j_{k'}$ then $i_k\neq i_{k'}$. This implies the statement of the lemma.
Now, let us assume that $p+q$ is even. Further, assume that there are two non-zero elements $j_1, j_2$ in the row $i_1$. Then
\[
2i_1-1+n\equiv j_1\mod p+q\text{ and }2i_1-1+n-q\equiv j_2\mod p+q.
\]
It is easy to see that every element of the column $j_2$ is $0$ except $(i_1,j_2)$. Moreover, there exists $1\leq i_1'\leq p+q$ such that $2i_1'-1+n\equiv j_1\mod p+q$. In this case, every element of the row $i_1'$ is $0$ except $(i_1',j_1)$. Otherwise, if there would be $j_3\neq j_1$ such that $2i_1'-1+n-q\equiv j_3\mod p+q$ then $j_3\equiv j_1-q\equiv j_2\mod p+q$, but every element of the column $j_2$ is zero except $(i_1,j_2)$, which is a contradiction.
Therefore, for $\mx{A}_n$, $n=0,1$ and for every $m$ distinct columns $j_1,\dots,j_m$ there are at least $m$ distinct rows $i_1,\dots,i_m$ such that $\left(\mx{A}_n\right)_{i_k,j_k}=1$.
\end{proof}
\begin{lemma}\label{lmxgrowth}
Let $p,q$ be integers such that the greatest common divisor is $1$, and let $\mx{A}_0$ and $\mx{A}_1$ be defined as in (\ref{eprojmatr}) and in (\ref{eprojmatr2}). Then for every $1\leq m<p+q$ distinct columns $1\leq j_1,\dots,j_m\leq p+q$ there exists an $n\in\left\{0,1\right\}$ and at least $m+1$ distinct rows $1\leq i_1,\dots,i_{m+1}\leq p+q$ such that $\left(\mx{A}_n\right)_{i_k,j_k}=1$ for $k=1,\dots,m$ and there exists a $j\in\left\{j_1,\dots,j_m\right\}$ such that $\left(\mx{A}_n\right)_{i_{m+1},j}=1$.
\end{lemma}
\begin{proof}
We argue by contradiction. Let us fix the $m$ distinct columns $1\leq j_1,\dots,j_m\leq p+q$. By Lemma \ref{lmxbasic} in every column there are at least one and at most two ``$1$'' elements and by Lemma \ref{lmxatleast} there are at least $m$ different rows $1\leq i_1,\dots, i_m\leq p+q$ in $\mx{A}_0$ and at least $m$ different rows $1\leq s_1,\dots, s_m\leq p+q$ in $\mx{A}_1$ such that $\left(\mx{A}_0\right)_{i_k,j_k}=1$ and $\left(\mx{A}_1\right)_{s_k,j_k}=1$. To get a contradiction we assume that
\begin{equation}\label{a1}
\tag{\textbf{A1}}
\forall i\not\in \left\{i_1,\dots ,i_m\right\},
\forall s\not\in \left\{s_1,\dots ,s_m\right\},
\forall k:\quad
\left(\mx{A}_0\right)_{i,j_k}=0,\
\left(\mx{A}_1\right)_{s,j_k}=0.
\end{equation}
By Lemma \ref{lmxbasic} the matrix $\mx{A}_0+\mx{A}_1$ has in each column exactly $3$ non-zero elements. Therefore we can assume without loss of generality that there is an $0\leq l\leq m$ such that in $\mx{A}_0$ the columns $j_1,\dots,j_l$ and in $\mx{A}_1$ the columns $j_{l+1},\dots,j_m$ contain two non-zero elements. Namely, there are $l$ distinct rows $1\leq i_1',\dots,i_l'\leq$ and $m-l$ distinct rows $1\leq s_{l+1}',\dots,s_m'\leq p+q$ such that $\left(\mx{A}_0\right)_{i_k',j_k}=1$ for $k=1,\dots,l$ and $\left(\mx{A}_1\right)_{s_k',j_k}=1$ for $k=l+1,\dots,m$. Moreover, by our assumption (\textbf{A1}) and Lemma \ref{lmxatleast}, for every $i_k'$ there exists a $i_{t_k}$ such that $l+1\leq t_k\leq m$ and $i_k'=i_{t_k}$. Similarly, for every $s_k'$ there exists a $s_{t_k}$ such that $1\leq t_k\leq l$ and $s_k'=s_{t_k}$.
Let us define now a directed graph $G(V,E)$ such that the vertices are $V=\left\{j_1,\dots,j_m\right\}$ and there is an edge $j_k\rightarrow j_n$ if and only if $s_k'=s_n$ or $i_k'=i_n$. It is easy to see that
\begin{equation}\label{eedge}
j_k\rightarrow j_n\Longleftrightarrow\left\{\begin{array}{crcr} j_n-q\equiv j_k\mod p+q & \text{if }p+q\text{ is odd}\\
j_k-q\equiv j_n\mod p+q & \text{if }p+q\text{ is even.}\end{array}\right.
\end{equation}
Since from every vertex of $G$ there is an edge pointing out, there is a cycle $j_{n_1}\rightarrow j_{n_2}\rightarrow\cdots\rightarrow j_{n_t}\rightarrow j_{n_1}$, where $1\leq t\leq m$. By (\ref{eedge}) we have
\[
\begin{split}
&j_{n_1}\equiv j_{n_2}-q\equiv\cdots\equiv j_{n_t}-(t-1)q\equiv j_{n_1}-tq\mod p+q\text{ if }p+q\text{ is odd or }\\
&j_{n_1}\equiv j_{n_t}-q\equiv\cdots\equiv j_{n_2}-(t-1)q\equiv j_{n_1}-tq\mod p+q\text{ if }p+q\text{ is even.}
\end{split}
\]
Then $tq\equiv0\mod p+q$. Since $(q,p+q)=1$, then $t\equiv0\mod p+q$. Therefore $p+q\leq t\leq m<p+q$ which is a contradiction.
\end{proof}
\begin{proof}[Proof of Proposition \ref{pmain}]
First, we prove the existence of such a sequence. It is easy to see by Lemma \ref{lmxatleast} that for every matrix $\mx{B}$ with non-negative elements and $n=0,1$, if the $l$'th column of $\mx{B}$ contains $m$ non-zero elements then the $l$'th column of the matrix $\mx{A}_n\mx{B}$ contains at least $m$ non-zero elements. Moreover, by Lemma \ref{lmxgrowth}, for every column $l$ of $\mx{B}$ there exists an $n\in\left\{0,1\right\}$ such that if it contains $m$ non-zero elements then the $l$'th column of $\mx{A}_n\mx{B}$ contains at least $m+1$ non-zero elements.
Therefore, by taking $n=(p+q)(p+q-1)+1$ we have that there exists $\left\{\xi_k\right\}_{k=1}^{n}$ with $\xi_k\in \{0,1\}$ for which every entry
of the matrix $\mx{A}_{\xi_{n}}\cdots\mx{A}_{\xi_1}$ is non-zero.
For a $(p+q)\times(p+q)$ non-negative matrix $B$ and $1\leq j \leq p+ q$ we let $n_j(\mx{B})$ denote the number of entries that are zero in the $j$'th column. We observe that for such a matrix we have $n_j(\mx{B}) \geq n_j(\mx{A}_i\mx{B})$ for each $i=0,1$. Furthermore, we have that there is at most one matrix $\mx{A}_i$ for which $n_j(\mx{B}) = n_j(\mx{A}_i\mx{B})$. Suppose now that for a finite word $(\xi_1,\dots,\xi_n)$ we have that the matrix $\mx{A}_{\xi_n}\cdots\mx{A}_{\xi_1}$ contains at least one zero entry. Thus, we have that
\begin{equation*}
1 \leq \sum_{j=1}^{p+q} n_j(\mx{A}_{\xi_n}\cdots\mx{A}_{\xi_1}) \leq \sum_{j=1}^{p+q} n_j(\mx{A}_{\xi_{n-1}}\cdots\mx{A}_{\xi_1}) \leq \cdots \leq \sum_{j=1}^{p+q} n_j(\mx{A}_{\xi_1}) \leq (p+q)(p+q-1).
\end{equation*}
This implies that in this chain of inequalities we must have at most $(p+q)(p+q-1)-1$ strict inequalities. This means at least $n+1-(p+q)(p+q-1)$ of our choice of $\xi_k$ is determined by $\xi_1,\xi_2,\ldots,\xi_{k-1}$. This implies the inequality (\ref{epmain}).
\end{proof}
It is natural to introduce the dyadic symbolic space. Let $\Xi=\left\{0,1\right\}^{\mathbb{N}}$ and $\Xi^*$ be the set of dyadic finite length words. Define the natural projection $\pi:\Xi\mapsto[0,1]$ by
\[
\pi(\mathbf{i})=\sum_{k=1}^{\infty}\frac{i_k}{2^k}.
\]
Moreover, let $\sigma$ be the left shift operator on $\Xi$.
For any $\theta$ with $\tan\theta\in\mathbb{Q}$ and $a\in\Lambda_{\theta}$ let us define $\Gamma_a=\left\{a+\frac{i}{q}\in\Lambda_{\theta}:i\in\mathbb{Z}\right\}$ and $F_{\theta,a}=\bigcup_{b\in\Gamma_a}E_{\theta,b}$.
\begin{prop}\label{pLebtypbox}
Let $p,q\in\mathbb{N}$ be relative primes and let $\theta\in(0,\frac{\pi}{2})$ be such that $\tan\theta=\frac{p}{q}$. Then for Lebesgue-almost every $a\in\Lambda_{\theta}$
\[
\dim_BE_{\theta,a}=\alpha(\theta),
\]
where
\begin{equation}\label{ealpha1}
\alpha(\theta)=\frac{1}{\log2}\lim_{n\rightarrow\infty}\frac{1}{n}\log\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e},\text{ for $\mathbb{P}$-a.a. }(\xi_1,\xi_2,\dots),
\end{equation}
where $\mathbb{P}$ is the equidistributed Bernoulli measure on $\Xi$. Similarly,
\begin{equation}\label{ealpha2}
\alpha(\theta)=\frac{1}{\log2}\lim_{n\rightarrow\infty}\frac{1}{n}\sum_{\xi_1,\dots,\xi_n}\frac{1}{2^n}\log\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}.
\end{equation}
\end{prop}
\begin{proof}
Since $\mx{A}_0, \mx{A}_1$ are non-negative matrices, we have for any $(\xi_1,\dots,\xi_n)\in\Xi^*$ and $1\leq k\leq n$
\[
\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}\leq\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_k}\underline{e}\ \underline{e}\mx{A}_{\xi_{k+1}}\cdots\mx{A}_{\xi_n}\underline{e}.
\]
Let $\mathbb{P}=\left\{\frac{1}{2},\frac{1}{2}\right\}^{\mathbb{N}}$ be the equidistributed Bernoulli measure on $\Xi$. Then by the sub-additive ergodic theorem (see \cite[p. 231]{W}) we have for $\mathbb{P}$-almost all $\underline{\xi}\in\Xi$ the limit (\ref{ealpha1}) exists and is constant. The equation (\ref{ealpha2}) follows also from the sub-additive ergodic theorem.
It is easy to see that the measure $\sum_{k=1}^{p+q}\frac{1}{p+q}\left.\mathbb{P}\circ\pi^{-1}\circ h_k\right|_{I_k}$ is equivalent with the Lebesgue measure on $\Lambda_{\theta}$, where $h_k(x)=-qx+q-k$, so that $h_k(I_k)=[0,1]$. This and Lemma \ref{lmxbox} implies that for Lebesgue almost every $a\in\Lambda_{\theta}$
\begin{equation}\label{edim1}
\max_{b\in\Gamma_a}\dim_BE_{\theta,b}=\dim_BF_{\theta,a}=\alpha(\theta).
\end{equation}
Let $(\xi_1,\dots,\xi_{n_0})\in\left\{0,1\right\}^{n_0}$ be as in Proposition \ref{pmain}. Then for every $1\leq k\leq p+q$ and every finite length word $(\zeta_1,\dots,\zeta_n)\in\left\{0,1\right\}^*$ and Lebesgue-almost every $a\in I_k^{\zeta_1,\dots,\zeta_n\xi_1\dots\xi_{n_0}}$ we have
\[
\dim_BE_{\theta,a}=\dim_BF_{\theta,a'}=\alpha(\theta),
\]
where $a'=2^{n+n_0}\left(a-1+\frac{k-1}{q}\right)+\frac{1}{q}\sum_{i=1}^n2^{n+n_0-i}\zeta_i+\frac{1}{q}\sum_{i=1}^{n_0}2^{n_0-i}\xi_i+1-\frac{k-1}{q}$. The statement of the proposition follows from the fact that the set\\ $\bigcup_{k=1}^{p+q}\bigcup_{n=0}^{\infty}\bigcup_{(\zeta_1,\dots,\zeta_n)\in\left\{0,1\right\}^n}I_k^{\zeta_1,\dots,\zeta_n\xi_1\dots\xi_{n_0}}$ has full Lebesgue measure in $\Lambda_{\theta}$.
\end{proof}
\begin{lemma}\label{lalpha}
The function $\alpha(\theta)<s-1$ for every $\theta$ such that $\tan\theta\in\mathbb{Q}^{+}$.
\end{lemma}
The proof of Lemma \ref{lalpha} coincides with the proof of \cite[Theorem 9]{MS}, (see \cite[Subsection 3.4, Subsection 3.5]{MS}), therefore we omit it.
Finally, we have to state a proposition about the coincidence of the Hausdorff and box dimension for ``typical'' points before we prove Theorem \ref{ttyp}.
\begin{prop}\label{phdbdcoin}
Let $p,q\in\mathbb{N}$ be relative primes and let $\theta\in(0,\frac{\pi}{2})$ be such that $\tan\theta=\frac{p}{q}$. Let $\eta$ be a left shift invariant measure on $\Xi$ such that
\begin{equation}\label{eassumption}
\eta\left(\bigcup_{n=0}^{\infty}\bigcup_{(\zeta_1,\dots,\zeta_n)\in\left\{0,1\right\}^n}[\zeta_1,\dots,\zeta_n\xi_1\dots\xi_{n_0}]\right)=1,
\end{equation}
where $(\xi_1,\dots,\xi_{n_0})$ is as in Proposition \ref{pmain}. Let $\eta=\sum_{k=1}^{p+q}\eta_k$ be an arbitrary positive decomposition of $\eta$. (That is, $\eta_k([\zeta_1,\dots,\zeta_n])>0$ for any $1\leq k\leq p+q$ and any cylinder set.) Then for $\lambda$-almost every $a\in\Lambda_{\theta}$
\[
\dim_HE_{\theta,a}=\dim_BE_{\theta,a},
\]
where
\[
\lambda=\sum_{k=1}^{p+q}\left.\eta_k\circ\pi^{-1}\circ h_k\right|_{I_k}.
\]
\end{prop}
The following lemma appears in a paper of Kenyon and Peres \cite[Proposition 2.6]{KP}, the proof is attributed to Ledrappier. We state the lemma only for our special case.
\begin{lemma}[Ledrappier]\label{lboxhd}
Let $T_2$ be the endomorphism $T_2(x)=2x\mod1$ on the one-dimensional torus $S^1$. Assume that $F\subset S^1\times S^1=\mathbb{T}^2$ is compact and invariant under $T_2\times T_2$ and $\nu$ a $T_2$-invariant probability measure on $S^1$. Then for $\nu$-a.e. $x$
\[
\dim_H\mathrm{proj}^{-1}(x)=\dim_B\mathrm{proj}^{-1}(x),
\]
where $\mathrm{proj}:F\mapsto S^1$ is the projection to the second coordinate.
\end{lemma}
\begin{proof}[Proof of Proposition \ref{phdbdcoin}]
It is easy to see that
\[
F_{\theta,a}=\Lambda\cap\left\{(x,y):px-qy\equiv -qa\mod1\right\}.
\]
Let $P:(x,y)\mapsto(x,(px-qy)\mod1)$ be a map of $\mathbb{T}^2$ into itself. Then
\[
\underline{\dim}_BP(F_{\theta,a})=\underline{\dim}_BF_{\theta,a},\ \overline{\dim}_BP(F_{\theta,a})=\overline{\dim}_BF_{\theta,a}\text{ and }\dim_HP(F_{\theta,a})=\dim_HF_{\theta,a}.
\]
and $P(\Lambda)\subset\mathbb{T}^2$ is compact and $T_2\times T_2$-invariant. Moreover, let $Q(a)=-qa\mod1$ be the mapping $\Lambda_{\theta}$ into $S^1$. Since $\eta$ is left shift invariant then $\lambda\circ Q^{-1}=\eta\circ\pi^{-1}$ is $T_2$ invariant. Since
\[
\mathrm{proj}^{-1}(-qa\mod1)=P(F_{\theta,a})
\]
by Lemma \ref{lboxhd} we have for $\lambda$-almost all $a\in\Lambda_{\theta}$ that
\begin{equation}\label{eprcoin}
\dim_HF_{\theta,a}=\dim_BF_{\theta,a}.
\end{equation}
Let $(\xi_1,\dots,\xi_{n_0})\in\left\{0,1\right\}^{n_0}$ be as in Proposition \ref{pmain}. Then by assumptions we have that for every $1\leq k\leq p+q$ and every finite length word $(\zeta_1,\dots,\zeta_n)\in\left\{0,1\right\}^*$ the measure $\lambda(I_k^{\zeta_1,\dots,\zeta_n\xi_1\dots\xi_{n_0}})>0$ and for $\lambda$-almost every $a\in I_k^{\zeta_1,\dots,\zeta_n\xi_1\dots\xi_{n_0}}$ the equation (\ref{eprcoin}) holds. Moreover, the fact that the matrix $\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_{n_0}}$ have strictly positive coefficient implies that
\[
\dim_BE_{\theta,a}=\dim_BF_{\theta,a'}=\dim_HF_{\theta,a'}=\dim_HE_{\theta,a},
\]
where $a'=2^{n+n_0}\left(a-1+\frac{k-1}{q}\right)+\frac{1}{q}\sum_{i=1}^n2^{n+n_0-i}\zeta_i+\frac{1}{q}\sum_{i=1}^{n_0}2^{n_0-i}\xi_i+1-\frac{k-1}{q}$. The proof is completed by applying the assumption (\ref{eassumption}).
\end{proof}
\begin{proof}[Proof of Theorem \ref{ttyp}]
Theorem \ref{ttyp}(\ref{ttyp1}) is an easy consequence of Proposition \ref{pLebtypbox}, Lemma \ref{lalpha} and Proposition \ref{phdbdcoin}.
The equalities of Theorem \ref{ttyp}(\ref{ttyp2}) follow from Corollary \ref{cbox} and Proposition \ref{phdbdcoin}. It is enough to prove that $\beta(\theta)>s-1$. To prove this fact, we use the method of \cite{R}.
Define $\eta$ probability measure on $\Xi$ as \[
\eta([\xi_1,\dots,\xi_n])=\frac{1}{3^n}\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{p},
\]
where $\underline{p}$ is the unique probability vector such that $\frac{1}{3}\left(\mx{A}_0+\mx{A}_1\right)\underline{p}=\underline{p}$. Then it is easy to see that $\eta$ is left shift invariant, moreover, by Perron-Frobenius Theorem, $\eta$ is mixing and therefore, an ergodic probability measure. Decompose $\eta=\sum_{k=1}^{p+q}\eta_k$ as
\[
\eta_k([\xi_1,\dots,\xi_n])=\frac{1}{3^n}\underline{e}_k\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{p}
\]
for every cylinder set $[\xi_1,\dots,\xi_n]$. Let us recall that $\nu_{\theta}$ is the projection of the natural self-similar measure on $\Lambda$. Observe that $\left.\nu_{\theta}\right|_{I_k}\circ h_k=\eta_k\circ\pi^{-1}$ and define $\widetilde{\nu}_\theta(.)=\sum_{k=1}^{p+q}\left.\nu_{\theta}\right|_{I_k}\circ h_k=\eta\circ\pi^{-1}$. Then $\widetilde{\nu}_{\theta}$ is a $T_2$- invariant probability measure satisfying the assumptions of Proposition \ref{phdbdcoin}.
By the Volume lemma \cite[Theorems 10.4.1, 10.4.2]{PU} we have
\begin{equation}\label{elimbeta}
\dim_H\widetilde{\nu}_{\theta}=\lim_{n\rightarrow\infty}-\frac{1}{n\log2}\sum_{\xi_1,\dots,\xi_n=0}^1\frac{1}{3^n}\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{p}\log\left(\frac{1}{3^n}\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{p}\right)
\end{equation}
On the other hand, since $\left.\nu_{\theta}\right|_{I_k}\circ h_k\ll\widetilde{\nu}_{\theta}$ for every $1\leq k\leq p+q$ which implies that $\dim_H\left.\nu_{\theta}\right|_{I_k}=\dim_H\left.\nu_{\theta}\right|_{I_k}\circ h_k\leq\dim_H\widetilde{\nu}_{\theta}$. However, \[
\dim_H\widetilde{\nu}_{\theta}=\inf_{1\leq k\leq p+q}\dim_H\left.\nu_{\theta}\right|_{I_k}\circ h_k=\inf_{1\leq k\leq p+q}\dim_H\left.\nu_{\theta}\right|_{I_k}=\dim_H\nu_{\theta}.
\]
By Lemma \ref{lalpha} there exists $\delta>0$ such that for sufficiently large $n$ there exists a sequence $(\xi_1,\dots,\xi_n)$ with
\[
\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{p}<\frac{1}{2^{n+\delta n}}.
\]
This implies that the limit in (\ref{elimbeta}) is strictly less than $1$. The proof can be finished by Corollary \ref{cbox}.
\end{proof}
\begin{proof}[Proof of Proposition \ref{pcalc}]
The statement of the proposition follows from Proposition \ref{pLebtypbox} and the proof of Theorem \ref{ttyp}(\ref{ttyp2}).
\end{proof}
\section{Proof of Theorem \ref{tspectra}}\label{stspectra}
In this section we apply the results of \cite{F1,F2,FL2} to the matrices $A_0,A_1$ to obtain a multifractal description of the dimension of the slices. Let \[
\widetilde{\Lambda}_{\theta}=\left\{a=1-\frac{k-1}{q}-\frac{1}{q}\sum_{i=1}^{\infty}\frac{\xi_i}{2^i}\in\Lambda_{\theta}:\exists k\geq1,\ \mx{A}_{\xi_1}\cdots\mx{A}_{\xi_k}>0\right\}.
\]
By (\ref{epmain}) we have
\begin{equation}\label{eb0}
\overline{\dim}_B\Lambda_{\theta}\backslash\widetilde{\Lambda}_{\theta}=0.
\end{equation}
Moreover, we can reformulate Lemma \ref{lmxbox}.
\begin{lemma}\label{lmxbox2}
Let $\theta$ and $a\in\widetilde{\Lambda}_{\theta}$ be such that $\tan\theta=\frac{p}{q}$ and
\[
a=1-\frac{k-1}{q}-\frac{1}{q}\sum_{i=1}^{\infty}\frac{\xi_i}{2^i}
\]
then
\[
\underline{\dim}_B E_{\theta,a}=\liminf_{n\rightarrow\infty}\frac{\log\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}}{n\log2}\text{ and }\overline{\dim}_B E_{\theta,a}=\limsup_{n\rightarrow\infty}\frac{\log\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}}{n\log2}.
\]
\end{lemma}
\begin{proof}[Proof of Proposition \ref{cspectra}(\ref{tspectra1})]
As a consequence of Lemma \ref{lmxbox2} and (\ref{eb0}) we have
\begin{multline*}
\dim_H\left\{a\in\Lambda_{\theta}:\dim_BE_{\theta,a}=\alpha\right\}=\\
\dim_H\left\{1-\frac{k-1}{q}-\frac{1}{q}\sum_{i=1}^{\infty}\frac{\xi_i}{2^i}\in\widetilde{\Lambda}_{\theta}:\lim_{n\rightarrow\infty}\frac{\log\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}}{n}=\alpha\log2\right\}=\\
\dim_H\left\{(\xi_1,\xi_2,\dots)\in\Xi:\lim_{n\rightarrow\infty}\frac{\log\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}}{n}=\alpha\log2\right\}.
\end{multline*}
By Proposition \ref{pmain}, one can finish the proof using \cite[Theorem 1.1]{F2}.
\end{proof}
By \cite[Lemma 2.2]{F2} and \cite[Theorem 3.3]{FL2} we state the following lemma for the pressure function.
\begin{lemma}\label{lpresproperty}
Let $P(t)$ be defined as in (\ref{emxpressure}). Then $P(t)$ is monotone increasing, convex and continuous for $t\in\mathbb{R}$. Moreover, for $t>0$ the pressure is differentiable.
\end{lemma}
\begin{lemma}\label{lgamma1}
For every $0\leq\delta\leq\alpha(\theta)$,
\[
\dim_H\left\{a\in\Lambda_{\theta}:\dim_HE_{\theta,a}\geq\delta\right\}=1.
\]
\end{lemma}
\begin{proof}
For every $0\leq\delta\leq\alpha(\theta)$ we have
\begin{multline*}
\dim_H\left\{a\in\Lambda_{\theta}:\dim_HE_{\theta,a}\geq\delta\right\}\geq\\\dim_H\left\{a\in\Lambda_{\theta}:\dim_HE_{\theta,a}=\dim_BE_{\theta,a}=\alpha(\theta)\right\}=1.
\end{multline*}
The last equation follows from Theorem \ref{ttyp}(\ref{ttyp1}). The upper bound is trivial.
\end{proof}
\begin{lemma}\label{lpreslim}
Let $P(t)$ be defined as in (\ref{emxpressure}). Then
\[
\lim_{t\rightarrow0+}P'(t)=\alpha(\theta)\log2.
\]
\end{lemma}
\begin{proof} First, we prove $\lim_{t\rightarrow0+}P'(t)\geq\alpha(\theta)\log2$. Suppose by way of contradiction that that there is a $t'>0$ such that $P'(t')=\alpha(\theta)\log2$ and that for every $0<t<t'$ we have $P'(t)<\alpha(\theta)\log2$. Then
\[
1=\dim_H\left\{a\in\Lambda_{\theta}:\dim_BE_{\theta,a}=\alpha(\theta)\right\}=\inf_t\left\{-\alpha(\theta)t+\frac{P(t)}{\log2}\right\}=-\alpha(\theta)t'+\frac{P(t')}{\log2}.
\]
Therefore $P(0)=\log2$ and $P(t')=\log2\alpha(\theta)t'+\log2$ contradicting our assumption that $P'(t)<\alpha(\theta)\log2$.
We now prove the other inequality $\lim_{t\rightarrow0+}P'(t)\leq\alpha(\theta)\log2$. Suppose now that $\lim_{t\rightarrow0+}P'(t)>\delta\log(2)>\alpha(\theta)\log(2)$ for some $\delta$. Then by Theorem \ref{cspectra}(\ref{tspectra1}) there is a $t^-\leq0$
\begin{multline*}
\dim_H\left\{a\in\Lambda_{\theta}:\dim_BE_{\theta,a}=\delta\right\}=\inf_t\left\{-\delta t+\frac{P(t)}{\log2}\right\}=-\delta t^-+\frac{P(t^-)}{\log2}>\\-\alpha(\theta)t^-+\frac{P(t^-)}{\log2}\geq\inf_t\left\{-\alpha(\theta)t+\frac{P(t)}{\log2}\right\}=\dim_H\left\{a\in\Lambda_{\theta}:\dim_BE_{\theta,a}=\alpha(\theta)\right\}=1,
\end{multline*}
which is a contradiction. (The last equality follows from Theorem \ref{ttyp}(\ref{ttyp1}).)
\end{proof}
Before we prove the case when $\alpha(\theta)<\delta\leq b_{\max}$ we need the so-called Gibbs measure.
\begin{lemma}\label{lgibbs}
For every $t>0$ there is a unique ergodic, left shift invariant Gibbs measure $\mu_t$ on $\Xi$ such that there exists a $C>0$ that for any $(\xi_1,\dots,\xi_k)\in\Xi^{*}$
\[
C^{-1}\leq\frac{\mu_t((\xi_1,\dots,\xi_k))}{\left(\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_k}\underline{e}\right)^te^{-kP(t)}}\leq C.
\]
Moreover,
\begin{equation}\label{lgibbs2}
\dim_H\mu_t=\frac{-tP'(t)+P(t)}{\log2}
\end{equation}
and
\begin{equation}\label{lgibbs3}
\lim_{n\rightarrow\infty}\frac{\log\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_n}\underline{e}}{n\log2}=\frac{P'(t)}{\log2}\text{ for $\mu_t$-a.a. }(\xi_1,\xi_2,\dots).
\end{equation}
\end{lemma}
The proof of the lemma follows from \cite[Theorem 3.2]{FL2} and \cite[Proof of Theorem 1.3]{FL2}.
\begin{lemma}\label{lgamma2}
For every $\alpha(\theta)<\delta\leq b_{\max}$,
\[
\dim_H\left\{a\in\Lambda_{\theta}:\dim_HE_{\theta,a}\geq\delta\right\}=\inf_{t>0}\left\{-\delta t+\frac{P(t)}{\log2}\right\}.
\]
\end{lemma}
\begin{proof}
Let us observe by Lemma \ref{lpreslim} that
\[
\inf_{t}\left\{-\delta t+\frac{P(t)}{\log2}\right\}=\inf_{t>0}\left\{-\delta t+\frac{P(t)}{\log2}\right\}.
\]
First, we will prove the upper bound with the method of \cite[Lemma 3.18]{Wi}.
Let us define the following set of intervals:
\[
\mathbf{A}_n(\varepsilon)=\left\{(\xi_1,\dots,\xi_k):k\geq n,\ \delta-\varepsilon\leq\frac{\log\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_k}\underline{e}}{k\log2}\right\}.
\]
It is easy to see that the set
\[
\bigcup_{j=1}^{p+q}\bigcup_{(\xi_1,\dots,\xi_k)\in\mathbf{A}_n(\varepsilon)}I_j^{\xi_1,\dots,\xi_k}
\]
covers the set $G_{\delta}=\left\{a\in\Lambda_{\theta}:\delta\leq\underline{\dim}_BE_{\theta,a}\right\}$. Let $\mathbf{B}_n(\varepsilon)$ be the set of disjoint cylinders of $\mathbf{A}_n(\varepsilon)$ such that
\[
\bigcup_{j=1}^{p+q}\bigcup_{(\xi_1,\dots,\xi_k)\in\mathbf{B}_n(\varepsilon)}I_j^{\xi_1,\dots,\xi_k}=\bigcup_{j=1}^{p+q}\bigcup_{(\xi_1,\dots,\xi_k)\in\mathbf{A}_n(\varepsilon)}I_j^{\xi_1,\dots,\xi_k}.
\]
Then for any $t>0$ and $\varepsilon'>0$ we have
\begin{multline*}
\mathcal{H}_{2^{-n}}^{-\delta t+\frac{P(t)}{\log2}+\varepsilon't}(G_{\delta})\leq\sum_{j=1}^{p+q}\sum_{(\xi_1,\dots,\xi_k)\in\mathbf{B}_n(\varepsilon)}\left|I_j^{\xi_1,\dots,\xi_k}\right|^{-\delta t+\frac{P(t)}{\log2}+\varepsilon't}\leq\\(p+q)2^{(\varepsilon-\varepsilon')n t}\sum_{(\xi_1,\dots,\xi_k)\in\mathbf{B}_n(\varepsilon)}\left(\underline{e}\mx{A}_{\xi_1}\cdots\mx{A}_{\xi_k}\underline{e}\right)^te^{-kP(t)}.
\end{multline*}
By Lemma \ref{lgibbs}
\[
\mathcal{H}_{2^{-n}}^{-\delta t+\frac{P(t)}{\log2}+\varepsilon'}(G_{\delta})\leq C(p+q)2^{(\varepsilon-\varepsilon')nt}\sum_{(\xi_1,\dots,\xi_k)\in\mathbf{B}_n(\varepsilon)}\mu_t((\xi_1,\dots,\xi_k))\leq C(p+q)2^{(\varepsilon-\varepsilon')nt}.
\]
This implies that
\[
\dim_H\left\{a\in\Lambda_{\theta}:\delta\leq\dim_HE_{\theta,a}\right\}\leq\dim_H\left\{a\in\Lambda_{\theta}:\delta\leq\underline{\dim}_BE_{\theta,a}\right\}\leq-\delta t+\frac{P(t)}{\log2}+\varepsilon't
\]
for any $t>0$ and $\varepsilon'>\varepsilon>0$. This proves the upper bound.
Now, we prove the lower bound. By Lemma \ref{lpresproperty}, for every $\alpha(\theta)<\delta< b_{\max}$ there exists a $t>0$ such that $P'(t)=\delta\log2$. By Lemma \ref{lgibbs}, let $\mu_t$ be the Gibbs measure. The measure $\mu_t$ is shift invariant and ergodic. Moreover, by the Gibbs property, $\mu_t$ satisfies the assumption of Proposition \ref{phdbdcoin} and we have
\[
\dim_HE_{\theta,a}=\dim_BE_{\theta,a}\text{ for $\mu_t$-almost all $(\xi_1,\xi_2,\dots)$,}
\]
where $a=1-\frac{k-1}{q}-\frac{1}{q}\sum_{i=1}^{\infty}\frac{\xi_i}{2^i}$ for some $1\leq k\leq p+q$.
Then by (\ref{lgibbs2}) and (\ref{lgibbs3}) we have
\begin{multline*}
\dim_H\left\{a\in\Lambda_{\theta}:\dim_HE_{\theta,a}\geq\delta\right\}\geq\dim_H\left\{a\in\Lambda_{\theta}:\dim_HE_{\theta,a}=\dim_BE_{\theta,a}=\delta\right\}\geq\\\dim_H\mu_t=-t\delta+\frac{P(t)}{\log2}\geq\inf_{t>0}\left\{-t\delta+\frac{P(t)}{\log2}\right\}.
\end{multline*}
If $\delta=b_{\max}$ then
\begin{multline*}
\dim_H\left\{a\in\Lambda_{\theta}:\dim_HE_{\theta,a}\geq b_{\max}\right\}\leq\lim_{\delta\rightarrow b_{\max}+}\dim_H\left\{a\in\Lambda_{\theta}:\dim_HE_{\theta,a}\geq\delta\right\}=\\\lim_{\delta\rightarrow b_{\max}+}\inf_{t>0}\left\{-t\delta+\frac{P(t)}{\log2}\right\}=\inf_{t>0}\left\{-t b_{\max}+\frac{P(t)}{\log2}\right\}=0.
\end{multline*}
In the last two equations we used the continuity property \cite[Theorem 1.1]{F2} and the definition of $b_{\max}$.
\end{proof}
\begin{proof}[Proof of Theorem \ref{tspectra}(\ref{tspectra2})]
The proof is the combination of Lemma \ref{lgamma1} and Lemma \ref{lgamma2}.
\end{proof}
\begin{proof}[Proof of Theorem \ref{tspectra}(\ref{tspectra3})]
By the observation
\begin{multline*}
\dim_H\left\{a\in\Lambda_{\theta}:\underline{\dim}_BE_{\theta,a}\geq\delta\right\}\geq\dim_H\left\{a\in\Lambda_{\theta}:\dim_HE_{\theta,a}=\delta\right\}\geq\\\dim_H\left\{a\in\Lambda_{\theta}:\dim_BE_{\theta,a}=\dim_HE_{\theta,a}=\delta\right\}
\end{multline*}
one can finish the proof as Lemma \ref{lgamma2}.
\end{proof}
\noindent{\bf Acknowledgment.} The authors would like to express their gratitude to the anonymous referees for their reading of the original version as well as their helpful comments.
\\\\
The research of
B\'ar\'any and Simon was supported by OTKA Foundation grant \# K 71693. Ferguson acknowledges support from EPSRC grant EP/I024328/1 and the University of Bristol.
\end{document} |
\begin{document}
\title{
Experimental Study of Concise Representations of Concepts and Dependencies
}
\newcommand{\inst}[1]{$^{#1}$}
\newcommand{\orcidID}[1]{~\textsuperscript{\textit{#1}}}
\newtheorem{definition}{Def.}
\author{
Aleksey Buzmakov\inst{1}\orcidID{0000-0002-9317-8785} \\
Egor Dudyrev\inst{2}\orcidID{0000-0002-2144-3308} \\
Sergei O. Kuznetsov\inst{2}\orcidID{0000-0003-3284-9001}\\
Tatiana Makhalova\inst{3}\orcidID{0000-0002-6724-3803} \\
Amedeo Napoli\inst{3,1}\orcidID{0000-0001-5236-9561} \\
}
\date{
HSE University, Perm, Russia \\
HSE University, Moscow, Russia \\
Université de Lorraine, CNRS, Inria, LORIA, F-54000 Nancy, France
}
\maketitle
\begin{abstract}
In this paper we are interested in studying concise representations of concepts and dependencies, i.e., implications and association rules.
Such representations are based on equivalence classes and their elements, i.e., minimal generators, minimum generators including keys and passkeys, proper premises, and pseudo-intents.
All these sets of attributes are significant and well studied from the computational point of view, while their statistical properties remain to be studied.
This is the purpose of this paper to study these singular attribute sets and in parallel to study how to evaluate the complexity of a dataset from an FCA point of view.
In the paper we analyze the empirical distributions and the sizes of these particular attribute sets.
In addition we propose several measures of data complexity relying on these attribute sets in considering real-world and related randomized datasets.
\end{abstract}
\section{Introduction}
In this paper we are interested in measuring ``complexity'' of a dataset in terms of Formal Concept Analysis (FCA \cite{GanterW99}).
On the one hand, we follow the lines of \cite{MakhalovaBKN22} where the ``closure structure'' and the ``closure index'' are introduced and based on the so-called passkeys, i.e., minimum generators in an equivalence class of itemsets. On the other hand, we'd like to capture statistical properties of a dataset, not just extremal characteristics such as the size of a passkey.
The closure structure represents a dataset, so that closed itemsets are assigned to the level of the structure given by the size of their passkeys.
The complexity of the dataset can be read along the number of levels of the dataset and the distribution of itemsets w.r.t. frequency at each level.
The most interesting
are the ``lower'' levels, i.e., the levels with the lowest closure index, as they usually contain itemsets with high frequency, contrasting the higher levels which contain itemsets with a quite low frequency.
Indeed, short minimum keys or passkeys correspond to implications in the related equivalence class with minimal left-hand side (LHS) and maximal right-hand side (RHS), which are the most informative implications \cite{PasquierBTL99,BastideTPSL00}.
Here we accept an alternative approach and we try to measure the complexity of a dataset in terms of five main elements that can be computed in a concept lattice, namely intents (closed sets), pseudo-intents, proper premises, keys (minimal generators), and passkeys (minimum generators).
We follow a more practical point of view and we study the distribution of these different elements in various datasets.
We also investigate the relations that these five elements have with one another, and the relations with implications and association rules.
For example, the number of intents gives the size of the lattice, while the number of pseudo-intents gives the size of the Duquenne-Guigues basis \cite{GuiguesD86}, and thus the size of the minimal implication basis representing the whole lattice.
Moreover, passkeys are indicators related to the closure structure and the closure index indicates the number of levels in the structure. The size of the covering relation of the concept lattice gives the size of the ``base'' of association rules.
Here we discuss alternative ways of defining the ``complexity'' of a dataset and how it can be measured in the related concept lattice that can be computed from this dataset.
For doing so, we introduce two main indicators, namely
(i) the probability that two concepts $C_1$ and $C_2$ are comparable,
(ii) given two intents $A$ and $B$, the probability that the union of these two intent is again an intent.
Both indicators are related to the distributivity of a lattice \cite{DaveyP90,Gratzer02}.
Indeed, a distributive lattice may appear as less complex than random lattices, since, given two intents $A$ and $B$, their meet $A \wedge B$ and their join $A \vee B$ are also intents.
Moreover, in a distributive lattice, all pseudo-intents are of size $1$,
meaning that Duquenne-Guigues implication base is very simple having premises of size 1.
Following the same idea, given a set of $n$ attributes, the Boolean lattice $\wp(n)$ is the largest lattice that one can build from a context of size $n\times n$, but $\wp(n)$ can also be considered as a simple lattice, since it can be represented by the set of its $n$ atoms, and moreover, the Dququenne-Guigues implication base is empty, so there are no nontrivial implications in this lattice.
In addition, a Boolean lattice is also distributive, thus it is simple in terms of the join of intents.
This is an original and practical study about the complexity of a dataset through an analysis of specific elements in the related concept lattice, namely intents, pseudo-intents, proper premises, keys, and passkeys.
Direct links are drawn with implications and association rules, making also a bridge between the present study in the framework of FCA, and approaches more related to data mining, actually pattern mining and association rule discovery.
Indeed, the covering relation of the concept lattice makes a concise representation of the set of association rules of the context~\cite{PasquierBTL99,BastideTPSL00}, so that every element of the covering relation, i.e., a pair of neighboring concepts or edge of the concept lattice, stays for an association rule, and reciprocally, every association rule can be given by a set of such edges.
Frequency distribution of confidence of the edges can be considered as an important feature of the lattice as a collection of association rules.
For studying practically this complexity, we have conducted a series of experiments where we measure the distribution of the different elements for real-world datasets and then for randomized datasets.
Actually randomized datasets are based on real-world datasets where either the distribution of crosses in columns is randomized or the whole set of crosses is randomized while keeping the density of the dataset.
We can observe that randomized datasets are usually more complex than real-world datasets.
This means that, in general, the set of ``interesting elements'' in the lattice is smaller
in real-world datasets.
The paper is organized as follows.
In the second section we introduce the theoretical background and necessary definitions.
Then the next section presents a range of experiments involving real-world and randomized datasets.
Finally, the results of experiments are discussed, and then we make a conclusion.
\section{Theoretical Background}
\subsection{Classes of Characteristic Attribute Sets}
Here we recall basic FCA definitions related to concepts, dependencies, and their minimal representations. After that we illustrate the definitions with a toy example.
Let us consider a formal context $K = (G, M, I)$ and prime operators:
\begin{align}
A' &= \{m \in M \mid \forall g \in A: gIm \}, \quad A \subseteq G \\
B' &= \{g \in G \mid \forall m \in B: gIm \}, \quad B \subseteq M
\end{align}
In what follows we illustrate the definitions using the ``four geometrical figures and their properties'' formal context presented in Table~\ref{tbl:toy_context} and introduced in \cite{KuznetsovO02}, where the set of objects $G = \{g_1 , g_2, g_3, g_4\}$ corresponds to \{equilateral triangle, rectangle triangle, rectangle, square\}), and the set of attributes $M = \{a, b, c, d\}$ corresponds to \{has 3 vertices, has 4 vertices, has a direct angle, equilateral\}.
\begin{table}
\centering
\begin{tabular}{r|ccccc}
\hline
{} & a & b & c & d & e\\
\hline
$g_1$ & x & & & x & \\
$g_2$ & x & & x & & \\
$g_3$ & & x & x & & \\
$g_4$ & & x & x & x & \\
\hline
\end{tabular}
\caption{The formal context of geometrical figures.}
\label{tbl:toy_context}
\end{table}
\begin{definition}[Intent or closed description]
A subset of attributes $B \subseteq M$ is an intent (is closed) iff $B'' = B$.
\end{definition}
In the running example (Table~\ref{tbl:toy_context}), $B = B'' = \{b, c\}$ is an intent and is the maximal subset of attributes describing the subset of objects $B' = \{g_2, g_3\}$.
\begin{definition}[Pseudo-intent]
A subset of attributes $P \subseteq M$ is a pseudo-intent iff:
\begin{enumerate}
\item $P \neq P''$
\item $Q'' \subset P$ for every pseudo-intent $Q \subset P$
\end{enumerate}
\end{definition}
Pseudo-intents are premises of implications of the cardinality-minimal implication basis called ``Duquenne-Guigues basis'' \cite{GuiguesD86} (DG-basis, also known as ``canonical basis'' or ``stembase'' \cite{GanterW99}).
In the current example (Table~\ref{tbl:toy_context}), the set of pseudo-intents is
$\big\{\{b\}, \{e\}, \{c, d\}, \{a,b,c\} \big\}$ since:
\begin{itemize}
\item $\{b\}, \{e\}, \{c, d\}$ are minimal non-closed subsets of attributes, and
\item $\{a,b,c\}$ is both non-closed and contains the closure $\{b, c\}$ of the pseudo-intent $\{b\}$.
\end{itemize}
\begin{definition}[Proper premise]
A set of attributes $A \subseteq M$ is a proper premise iff:
$$A \cup \bigcup_{n \in A} \left( A \setminus \{n\} \right)'' \neq A''$$
\end{definition}
Proper premises are premises of so-called direct or proper-premise base of implications, from which one obtains all implications with a single application of Armstrong rules (see also~\cite{RysselDB14}).
In the running example (Table~\ref{tbl:toy_context}), $Q = \{a, b\}$ is a proper premise since the union of $Q$ with the closures of its subsets does not result in the closure of $Q$, i.e., $\{a, b\} \cup \{a\}'' \cup \{b\}'' = \{a, b\} \cup \{a\} \cup \{b,c\} = \{a,b,c\} \neq \{a,b,c,d,e\}$.
\begin{definition}[Generator]
A set of attributes $D \subseteq M$ is a generator iff $\exists B \subseteq M: D'' = B$.
\end{definition}
In this paper, every subset of attributes is a generator of a concept intent.
A generator is called non-trivial if it is not closed.
In the current example (Table~\ref{tbl:toy_context}), $D = \{a, b, d\}$ is a generator of $B = \{a,b,c,d,e\}$ since $B$ is an intent, $D \subseteq B$, and $D'' = B$.
\begin{definition}[Minimal generator, key]
A set of attributes $D \subseteq M$ is a minimal generator or a key of $D''$ iff
$ \nexists m \in D: (D \setminus \{m\})'' = D''$.
\end{definition}
A minimal generator is inclusion minimal in the equivalence class of subsets of attributes having the same closure \cite{PasquierBTL99,BastideTPSL00}.
Every proper premise is a minimal generator, however the converse does not hold in general.
In the current example (Table~\ref{tbl:toy_context}), $D = \{a, c, d\}$ is a minimal generator since none of its subsets $\{a, c\}, \{a, d\}, \{c, d\}$ generates the intent $D'' = \{a,b,c,d,e\}$.
\begin{definition}[Minimum generator, passkey]
A set of attributes $D \subseteq M$ is a minimum generator or a passkey iff $D$ is a minimal generator of $D''$ with the minimal size among all minimal generators of $D''$.
\end{definition}
\begin{comment}
\begin{definition}[Minimum generator, passkey]
A set of attributes $D \subseteq M$ is a minimum generator or a passkey iff:
\begin{enumerate}
\item $D$ is a minimal generator
\item $\nexists E \subseteq M$, s.t. $E$ is a minimal generator of $D''$ and $|E| < |D|$.
\end{enumerate}
\end{definition}
\end{comment}
A minimum generator (a passkey) is cardinality-minimal in the equivalence class of subsets of attributes having the same closure.
In~\cite{MakhalovaBKN22} the size of a maximal passkey of a context was studied as an index of the context complexity.
In the current example (Table~\ref{tbl:toy_context}), $D = \{b,d\}$ is a minimum generator of the intent $\{b,c,d\}$ since there is no other generator of smaller cardinality generating $D''$.
Meanwhile $D = \{a,c,d\}$ is not a minimum generator of $D'' = \{a,b,c,d,e\}$ since the subset $E = \{e\}$ has a smaller size and the same closure, i.e., $E''= D''$.
Finally, we illustrate all these definitions at once.
To do so, we form the context of all possible classes of ``characteristic attribute sets'' of $M$ as they are introduced above, namely $(2^M, M_d, I_d)$, $M_d = \{\mathrm{passkey}, \mathrm{key}, \mathrm{proper\,premise},\ldots\}$, and $I_d$ defines whether a subset of attributes from $2^M$ belongs to a characteristic attribute set in $M_d$.
The concept lattice of this context is shown in Figure~\ref{fig:descr_lattice_toy_context}.
\begin{figure}
\caption{The concept lattice of ``characteristic attribute sets'' of the current context introduced in Table~\ref{tbl:toy_context}
\label{fig:descr_lattice_toy_context}
\end{figure}
\subsection{Towards Measuring Data Complexity}
``Data complexity'' can mean many different things depending on particular problem of data analysis one wants to solve.
For example, data can be claimed to be complex when data processing takes a very long time, and this could be termed as ``computational complexity'' of data.
Alternatively, data can be considered as complex when data is hard to analyze and to interpret.
For example, it can be hard to apply FCA or machine learning algorithms, such as clustering, classification, or regression.
Accordingly, it is quite hard to define data complexity in general terms.
If we consider the dimension of interpretability, then the size of the elements to interpret and their number are definitely important elements.
In an ideal case, one would prefer a small number of ``elements'', say interesting subsets of attributes, to facilitate interpretation.
Indeed, less than five rules with a few attributes in the premises and in the conclusions are simpler to interpret than hundred of rules with more than ten attributes in the premises and conclusions.
Thus, it is natural to study how the number of elements are distributed w.r.t. their size.
Moreover, in most of the cases, large numbers of elements are associated with computational complexity.
Thus controlling the size and the number of elements is also a way to control computational complexity.
It should also be mentioned that the number of elements is related to the so-called ``VC-dimension''of a context~\cite{AlbanoC17}, which is the maximal size of a Boolean sublattice.
Accordingly, for our study about data complexity, we decided to count the number of concepts, pseudo-intents, proper premises, keys, and passkeys in order to understand and evaluate the complexity of data.
For all these elements, we also study the distribution of element sizes.
Additionally, we decided to measure the ``lattice complexity'' with two new measures which are related to what could be termed the ``linearity'' of the lattice.
Indeed, the most simple lattice structure that can be imagined is a chain, while the counterpart is represented by the Boolean lattice, i.e., the lattice with the largest amount of connections and concepts.
However, it should be noticed that the Boolean lattice may be considered as complex from the point of view of interpretability, but very simple from the point of view of implication base, which is empty in such a lattice.
Then, a first way to measure the closeness of the lattice to a chain is the ``linearity index'' which is formally defined below as the probability that two random concepts are comparable in the lattice.
\begin{definition}
Given a lattice $\mathcal{L}$, the linearity index $\mathtt{LIN}(\mathcal{L})$ is defined as:
\begin{equation}
\mathtt{LIN}(\mathcal{L}) = \frac{1}{|\mathcal{L}|}\underset{c_1, c_2 \in \mathcal{L}, c_1 \neq c_2}{\sum} \mathbbm{1}(c_1 < c_2 \vee c_1 > c_2)
\end{equation}
where $|\mathcal{L}|$ denotes the number of the concepts in the lattice $\mathcal{L}$ and $\mathbbm{1}$ the indicator function which takes the value $1$ when the related constraint is true.
\end{definition}
The linearity index is maximal for a chain lattice and is minimal for the lattice of a nominal scale (or the lattice related to a bijection).
This index does not directly measure how well the lattice is interpretable.
One of the main interpretability properties is the size of some element sets, and in particular, the size and the structure of the implication basis.
One of the most simple structure for the implication basis can be found in distributive lattices, i.e. pseudo-intents are of size $1$.
Accordingly, we introduce the ``distributivity index'' which measures how a lattice is close to a distributive one.
\begin{definition}
Given a lattice $\mathcal{L}$, the distributivity index $\mathtt{DIST}(\mathcal{L})$ is defined as
\begin{equation}
\mathtt{DIST}(\mathcal{L}) = \frac{1}{|\mathcal{L}|}\underset{i_1,i_2 \in Intents(\mathcal{L}), i_1 \neq i_2}{\sum} \mathbbm{1}(i_1 \cup i_2 \in Intents(\mathcal{L})),
\end{equation}
where $Intents(\mathcal{L})$ is the set of concept intents in $\mathcal{L}$.
\end{definition}
The distributivity index is maximal for distributive lattices, and this includes chain lattices which are distributive lattices \cite{DaveyP90,Gratzer02}, and is again minimal for lattices of nominal scales which are not distributive.
However, it may sound strange to consider the lattices of nominal scales as complex.
Indeed they are simple from the viewpoint of implications.
For example, any pair of attributes from the lattice of a nominal scale --also termed as M3 for the most simple with 3 elements without top and bottom element-- can form the premise of an implication with a non-empty conclusion.
This indeed introduces many implications in the basis and this makes the DG-basis hard to interpret.
\subsection{Synthetic Complex Data}
In order to study different ways of measuring data complexity, we need to compare the behavior of different complexity indices for ``simple'' and ``complex'' data.
However, beforehand we cannot know which dataset is complex.
Accordingly, we will generate synthetic complex datasets and compare them with real-world data.
One way of generating complex data is ``randomization''.
Actually, randomized data cannot be interpreted since any possible result is an artifact of the method.
For randomized data we know beforehand that there cannot exist any rule or concept that have some meaning.
Thus, randomized data are good candidate data for being considered as ``complex''.
Now we discuss which randomization strategy should be used for generating such data.
A natural way is making randomized data similar in a certain sense to the real-world data they are compared to.
Firstly, when considering reference real-world data, it seems natural to keep the number of objects and attributes as they are in the real data.
Moreover, the ``density'' of the context, i.e., the number of crosses, significantly affects the size and the structure of the related concept lattice.
Thus, to ensure that randomized data are ``similar'' to the real data it is also natural to keep the density of data.
This gives us the first randomization strategy, i.e., for any real-world dataset we can generate a randomized dataset with the same number of objects and attributes, and with the same density.
Then, the crosses in the original context will be randomly spread along the new context in ensuring that the density is the same as in the original real data.
For example, let us consider the context given in Table~\ref{tbl:descr_lattice_bobross}, where there are $8$ objects, $9$ attributes, and $35$ crosses.
Thus, any context with $8$ objects, $9$ attributes, and $35$ crosses, can be considered as a randomization of this original context.
In our first randomization strategy, we suppose that the probability of generating any such randomized context is equally distributed.
\begin{table}
\begin{center}
\begin{tabular}{r|l|l|l|l|l|l|l|l|l}
\hline
descriptions & generator & closed & minimal & minimum & pseudo & proper & key & passkey & intent \\
& & descr & gen & gen & intent & premise & & & \\
\hline
67 & x & x & x & x & & & x & x & x \\
45 & x & x & & & & & & & x \\
41 & x & & x & x & x & x & x & x & \\
125 & x & & x & x & & x & x & X & \\
1 & x & & x & & x & x & x & & \\
25 & x & & x & & & x & x & & \\
33 & x & & & & x & & & & \\
1048239 & x & & & & & & & & \\
\hline
\end{tabular}
\caption{
The reduced context corresponding to the lattice of descriptions for Bob Ross dataset.}
\label{tbl:descr_lattice_bobross}
\end{center}
\end{table}
The randomized formal contexts for such strategy were studied in~\cite{BorchmannH16}.
The authors have found that the correlation between the number of concepts and the number of pseudo-intents has a non-random structure suggesting that fixing density is not enough in order to generate randomized data which are similar to the real one.
Accordingly, we also studied a randomization strategy that fixes the number of objects having a cross for every attribute as follows.
A randomized context is generated attribute by attribute.
The crosses in every column are randomly assigned while the number of crosses is not modified and remains the same as in the corresponding ``real attribute''.
This can be viewed as a permutation of the crosses within every column in the randomized context, a column being permuted independently of the others.
Such a procedure corresponds to the ``null hypothesis'' in statistical terms of independence between attributes.
Although such randomization strategy considers objects and attributes differently, it corresponds to typical cases in data analysis.
Indeed, in typical datasets, objects stand for observations that are described by different attributes.
The attributes correspond to any hypothesis of the data domain.
Then, analysts are usually interested in discovering some relations between attributes, and the hypothesis of attribute independence is a natural null hypothesis in such a setting.
For example, let as consider again the context in Table~\ref{tbl:descr_lattice_bobross}.
The numbers of objects and attributes in the randomized context remain the same.
Then a randomized context following the second strategy is any context having $8$ crosses for the first attribute, $2$ crosses for the second attribute, $5$ crosses for the third attribute, etc.
Finally, when randomizing data, one should have in mind that from a given real dataset many randomized datasets can be generated w.r.t. the same randomization strategy.
Thus, it is not enough to study only one random dataset for a given real dataset, but for the sake of objectivity, it is necessary to generate several randomized datasets and then to estimate the distribution of a characteristics under study within all randomized dataset.
In the next section we study different ways of measuring the complexity of a dataset and we observe that the complexity of randomized datasets is generally higher than the complexity of the corresponding real-world dataset.
\section{Experiments}
\subsection{Datasets}
For this preliminary study we selected $4$ small real-world datasets in order to support efficient computing of all necessary elements of the lattice.
Efficiency matters here because we involve randomization and computations are repeated hundreds of times for one dataset.
The study includes the following datasets:
``Live in water\footnote{\url{https://upriss.github.io/fca/examples.html}}'',
``Tea ladies\footnote{\url{https://upriss.github.io/fca/examples.html}}''
``Lattice of lattice properties\footnote{\url{https://upriss.github.io/fca/exampLes.html}}'', and
``Bob Ross episodes\footnote{\url{https://datahub.io/five-thirty-eight/bob-ross}}''.
For the sake of efficiency the fourth dataset was restricted to only first 20 attributes.
\begin{comment}
\begin{itemize}
\item ``Live in water'' \footnote{\url{https://upriss.github.io/fca/examples.html}}
\item ``Tea ladies'' \footnote{\url{https://upriss.github.io/fca/examples.html}}
\item ``Lattice of lattice properties'' \footnote{\url{https://upriss.github.io/fca/exampLes.html}}
\item ``Bob Ross episodes'' \footnote{\url{https://datahub.io/five-thirty-eight/bob-ross}}
\end{itemize}
\end{comment}
The datasets are analyzed in two different ways.
Firstly, the characteristic attribute sets are computed, e.g., concepts, keys, pseudo-intents, and then the relations existing between these elements are discussed. Secondly, we study the complexity of the datasets w.r.t. the number of characteristic attribute sets, and their distribution w.r.t. the size of these sets.
We also compared all these numerical indicators for real and the randomized datasets.
\subsection{Characteristic Attribute Sets in a Lattice}
In this section we study the relations between the different characteristic attribute sets.
The experiment pipeline reads as follows.
\begin{itemize}
\item
Given a context $K = (G,M,I)$, we compute all the possible ``attribute descriptions'', i.e., subsets of attributes in $2^M$, and we check whether a description shows some characteristic such as being closed, a pseudo-intent, a minimum generator, etc.
\item
We construct a new ``descriptions context'', namely $(2^M,CAS,I)$ where
$CAS = \{is\_closed, is\_gen, is\_key, is\_min\_gen, is\_passkey, is\_prop-prem\}$,
and $I$ is the relation indicating that a given set of attributes has a characteristic in $Char$.
\item
Finally, we construct the ``lattice of descriptions'' based on the description context''.
This lattice shows the relations existing between all generators -- subsets of attributes-- in a given dataset.
\end{itemize}
\begin{figure}
\caption{The lattice of ``attribute descriptions'' for the ``Bob Ross'' dataset and
their distributions.}
\label{fig:descr_lattice_bobross}
\end{figure}
\begin{comment}
\begin{table}
\caption{Reduced formal context corresponding to lattice of descriptions for Bob Ross dataset}
\label{tbl:descr_lattice_bobross}
\begin{center}
\begin{tabular}{r|lllllllll}
\toprule
\# & is & is & is & is & is & is & is & is & is \\
descriptions & generator & closed & minimal & minimum & pseudo & proper & key & passkey & intent \\
& & descr & gen & gen & intent & premise & & & \\
\midrule
67 & x & x & x & x & & & x & x & x \\
45 & x & x & & & & & & & x \\
41 & x & & x & x & x & x & x & x & \\
125 & x & & x & x & & x & x & X & \\
1 & x & & x & & x & x & x & & \\
25 & x & & x & & & x & x & & \\
33 & x & & & & x & & & & \\
1048239 & x & & & & & & & & \\
\bottomrule
\end{tabular}
\end{center}
\end{table}
\end{comment}
The ``description context'' for the ``Bob Ross'' dataset is given in Table~\ref{tbl:descr_lattice_bobross} and the corresponding lattice is shown in Figure~\ref{fig:descr_lattice_bobross}.
From the lattice we can check that any two classes of descriptions may intersect if this is not forbidden by their definition (e.g., a description cannot be both an intent and a pseudo-intent).
Although such a lattice is computed for a particular dataset, this is the general lattice structure which is quite always obtained.
In some very small datasets it may happen that some characteristic attribute sets are missing.
For example, in the ``Live in water'' lattice the properties of being a key, being a passkey, and being a proper premise, all coincide and collapse into one single node.
It is also very interesting to analyze the proportions of the sizes of classes of descriptions.
For example, in the ``Bob Ross'' context restricted to $20$ attributes, there are $2^{20}$ possible descriptions, but there are only $112$ of them which are closed, and only $259$ of them which are minimal generators.
Thus, the vast majority of the descriptions are ``useless'' in the sense that they do not correspond to any of the characteristic attribute subsets introduced above.
In the next subsection we consider the distributions of these characteristic attribute subsets.
\subsection{Data Complexity}
For analyzing data complexity, we start by comparing the numbers of elements in real data and in randomized data.
In Figure~\ref{fig:density-bob-intents}~and~\ref{fig:column-bob} the distributions of different lattice elements for ``Bob Ross'' dataset is shown\footnote{All figures are in supplementary materials \url{https://yadi.sk/i/8_5EEvY4zNi82g}}.
Along the horizontal axis the sizes of the elements are shown, i.e., the number of attributes in the intent, pseudo-intent, key, etc.
Along the vertical axis the number of elements of the corresponding sizes are shown.
Red crosses shows the values corresponding for real data and the boxplots visualize the values found in random data.
There were $100$ randomizations and thus boxplots are based on these $100$ values.
A box corresponds to the 50\% middle values among $100$ values.
In addition, it should be noticed that these two figures differ in the randomization strategy.
Figure~\ref{fig:density-bob-intents} corresponds to density-based randomization while Figure~\ref{fig:column-bob} shows randomization based on column permutations.
From both figures we can observe that randomized data contain significantly larger numbers of elements than the real data.
Moreover, the sizes of the elements for randomized data are larger than the sizes for real data.
Similar figures can be built for ``Tea Ladies'' and ``Lattice of lattice properties'' datasets.
However, we cannot distinguish the real dataset and the randomized data for the ``Live in Water'' dataset (see Figure~\ref{fig:density-water}).
This can be explained by the fact that either the dataset does not contain deep dependencies, or the dataset is too small, i.e., the randomized dataset cannot be substantially different from the original real one.
Let us now study how the linearity index and the distributivity index measure the complexity of a dataset.
Figures~\ref{fig:linearity} and~\ref{fig:distributivity} show the values of the linearity and distributivity indices correspondingly w.r.t. different randomizations.
From these figure we can see that datasets built from density-based randomization are more different from the real datasets than the randomized datasets built from column-wise permutations.
We should also notice that the values of the linearity and distributivity indices show a substantial dependence w.r.t. density of the corresponding context.
Indeed, if we look at the randomized datasets, we can see that the distributions of the linearity and distributivity indices are different.
This can be explained either by the context density and by the context size.
Thus, we cannot have any reference values for these indices that would split between ``complex'' and not ``simple'' data.
However, comparing the values of the index to the distribution of these indices allow one to decide on complexity of the data.
Finally, in all datasets but ``Live in water'' both indices, linearity and distributivity, have higher values for real datasets than for randomized datasets.
This shows again that real datasets are more structured than their randomized counterparts.
\section{Conclusion}
In this paper we have introduced and studied ``concise representations'' of datasets given by related contexts and concept lattices, and characteristic attributes sets based on equivalence classes, i.e., minimal generators, minimum generators including keys and passkeys, proper premises, and pseudo-intents.
We have also introduced two new indices for measuring the complexity of a datatet, the linearity index for checking the direct dependencies between concepts or how a concept lattice is close to a chain, and the distributivity lattice which measures how close is a concept lattice to a distributive lattice (where all pseudo-intents are of length $1$, thus leading to sets of simple implications).
We have also proposed a series of experiments where we analyze real-world datasets and their randomized counterparts.
As expected, the randomized datasets are more complex that the real ones.
The future work will be to improve this study in several directions, by studying more deeply the role of both indices, the linearity index and the distributivity index, by analyzing more larger datasets, and more importantly by analyzing the complexity from the point of view of the generated implications and association rules.
This is a first step in this direction and we believe that FCA can bring a substantial support for analyzing data complexity in general.
\section{Appendix: Figures Related to Experiments}
\begin{figure}
\caption{Intents}
\label{fig:density-bob-intents}
\caption{Keys}
\label{fig:density-bob-key}
\caption{Passkeys}
\label{fig:density-bob-passkey}
\caption{Pseudo-intents}
\label{fig:density-bob-pseudo-i}
\caption{Proper premises}
\label{fig:density-bob-proper-premises}
\caption{The numbers of elements for the ``Bob Ross'' dataset w.r.t. context randomization based on density.}
\label{fig:density-bob}
\end{figure}
\begin{figure}
\caption{Intents}
\label{fig:column-bob-intents}
\caption{Keys}
\label{fig:column-bob-key}
\caption{Passkeys}
\label{fig:column-bob-passkey}
\caption{Pseudo-intents}
\label{fig:column-bob-pseudo-i}
\caption{Proper premises}
\label{fig:column-bob-proper-premises}
\caption{The numbers of elements for the ``Bob Ross'' dataset w.r.t. context randomization based on column-wise permutations.}
\label{fig:column-bob}
\end{figure}
\begin{figure}
\caption{Intents}
\label{fig:density-water-intents}
\caption{Keys}
\label{fig:density-water-key}
\caption{Passkeys}
\label{fig:density-water-passkey}
\caption{Pseudo-intents}
\label{fig:density-water-pseudo-i}
\caption{Proper premises}
\label{fig:density-water-proper-premises}
\caption{The numbers of elements for the ``Live in water'' dataset w.r.t. context randomization based on density.}
\label{fig:density-water}
\end{figure}
\begin{figure}
\caption{Live in water}
\label{fig:density-water-lin}
\caption{Lattice}
\label{fig:density-lattice-lin}
\caption{Tea Lady}
\label{fig:density-tea-lin}
\caption{Bob Ross}
\label{fig:density-bob-lin}
\caption{Live in water}
\label{fig:column-water-lin}
\caption{Lattice}
\label{fig:column-lattice-lin}
\caption{Tea Lady}
\label{fig:column-tea-lin}
\caption{Bob Ross}
\label{fig:column-bob-lin}
\caption{The distributivity index for different datasets and different randomizations.}
\label{fig:distributivity}
\end{figure}
\begin{figure}
\caption{Live in water}
\label{fig:density-water-lin}
\caption{Lattice}
\label{fig:density-lattice-lin}
\caption{Tea Lady}
\label{fig:density-tea-lin}
\caption{Bob Ross}
\label{fig:density-bob-lin}
\caption{Live in water}
\label{fig:column-water-lin}
\caption{Lattice}
\label{fig:column-lattice-lin}
\caption{Tea Lady}
\label{fig:column-tea-lin}
\caption{Bob Ross}
\label{fig:column-bob-lin}
\caption{The linearity index for different datasets and different randomizations.}
\label{fig:linearity}
\end{figure}
\newcommand{\commment}[1]{}
\commment{
\begin{figure}
\caption{Histograms for "Live in water" dataset}
\label{fig:hist_water}
\end{figure}
\begin{figure}
\caption{Histograms for "Tea ladies" dataset}
\label{fig:hist_tealadies}
\end{figure}
\begin{figure}
\caption{Histograms for "Lattice of lattice properties" dataset}
\label{fig:hist_lattices}
\end{figure}
\begin{figure}
\caption{The histograms for ``Bob Ross'' dataset includng only the $20$ first attributes.}
\label{fig:hist_bobross}
\end{figure}
}
\end{document} |
\begin{equation}gin{document}
\sigmaetlength{\begin{equation}gin{align}selineskip}{5mm}
\begin{equation}gin{abstract}
This paper is motivated by the study of Lyapunov functionals for four equations describing free surface flows in fluid dynamics:
the Hele-Shaw and Mullins-Sekerka equations
together with their lubrication approximations,
the Boussinesq and thin-film equations. We identify
new Lyapunov functionals, including some which
decay in a convex manner (these are called strong Lyapunov functionals).
For the Hele-Shaw equation and the Mullins-Sekerka equation,
we prove that the $L^2$-norm of the free surface elevation and
the area of the free surface are Lyapunov functionals, together with parallel
results for the thin-film and Boussinesq equations.
The proofs combine exact identities for the dissipation rates with
functional inequalities. For the thin-film and Boussinesq equations,
we introduce a Sobolev inequality of independent interest which revisits some
known results and exhibits strong Lyapunov functionals. For the
Hele-Shaw and Mullins-Sekerka equations, we introduce a functional
which controls the $L^2$-norm of three-half spatial derivative.
Under a mild smallness assumption on the initial data, we show that the
latter quantity is also a Lyapunov functional for the Hele-Shaw equation, implying
that the area functional is a strong Lyapunov functional.
Precise lower bounds for the dissipation rates are established, showing that these
Lyapunov functionals are in fact entropies.
Other quantities are also studied such as Lebesgue norms or the Boltzmann's entropy.
\eqrefnd{abstract}
\title{Functional inequalities and strong Lyapunov functionals for free surface flows in fluid dynamics}
\sigmaection{Introduction}
\sigmaubsection*{The equations}
Consider a time-dependent surface $\Sigma$ given as
the graph of some function $h$, so that at time $t\ge 0$,
$$
\Sigma(t)=\{ (x,y) \in \mathbf{T}^{d}\times \mathbf{R}\,;\, y = h(t,x)\},
$$
where $\mathbf{T}^{d}$ denotes a $d$-dimensional torus.
We are interested by several free boundary problems described by nonlinear parabolic equations.
A free boundary problem is described by an evolution equation which expresses
the velocity of $\Sigma$ at each point
in terms of some nonlinear expressions depending on $h$.
The most popular example is the \textbf{mean-curvature} equation, which stipulates that
the normal component of the velocity of $\Sigma$ is
equal to the mean curvature at each point. It follows that:
\begin{equation}\leqft\vertbel{defi:kappa}
\partial_t h+\sigmaqrt{1+|\nabla h|^2}\kappa=0\quad\text{where}\quad
\kappa=-\cnx \leqft(\frac{\nabla h}{\sigmaqrt{1+|\nabla h|^2}}\right).
\eqrefnd{equation}
The previous equation plays a fundamental role in differential geometry.
Many other free boundary problems appear in fluid dynamics.
Among these, we are chiefly concerned by the equations modeling
the dynamics of a free surface transported by the flow of an incompressible
fluid evolving according to Darcy's law. We begin with the Hele-Shaw equations with or without
surface tension. One formulation of this problem reads (see Appendix~\ref{appendix:HS}):
\begin{equation}\leqft\vertbel{HS}
\partial_{t}h+G(h)(gh+\mu \kappa)=0,
\eqrefnd{equation}
where $\kappa$ is as in~\eqref{defi:kappa}, $g$ and $\mu$ are real numbers in $[0,1]$ and
$G(h)$ is the (normalized) Dirichlet-to-Neumann operator, defined as follows: For any functions $h=h(x)$ and $\psi=\psi(x)$,
$$
G(h)\psi (x)=\sigmaqrt{1+|\nabla h|^2}\partial_n\mathcal{H}(\psi)
\big\arrowvert_{y=h(x)},
$$
where $\nabla=\nabla_x$, $\partial_n=n\cdot\nabla$ and $n$ is the outward unit normal to $\Sigma$ given by
$$
n=\frac{1}{\sigmaqrt{1+|\nabla h|^2}}\begin{equation}gin{pmatrix} -\nabla h\\ 1\eqrefnd{pmatrix},
$$
and $\mathcal{H}(\psi)$ is the harmonic extension of~$\psi$ in the fluid domain, solution to
\begin{equation}\leqft\vertbel{defi:varphiintro}
\leqft\{
\begin{equation}gin{aligned}
&\Delta_{x,y}\mathcal{H}(\psi)=0\quad \text{in }\Omega\mathrel{:=}\{(x,y)\in \mathbf{T}^{d}\times \mathbf{R}\,:\, y<h(x)\},\\
&\mathcal{H}(\psi)\arrowvert_{y=h}=\psi.
\eqrefnd{aligned}
\right.
\eqrefnd{equation}
Hereafter, given a function $f=f(x,y)$, we use $f\arrowvert_{y=h}$ as a short notation for the function $x\mapsto f(x,h(x))$.
When $g=1$ and $\mu=0$, the equation~\eqref{HS} is called the Hele-Shaw equation without surface tension.
Hereafter, we will refer to this equation simply as the \textbf{Hele-Shaw} equation.
If $g=0$ and $\mu=1$, the equation is known as the Hele-Shaw equation with surface tension,
also known as the \textbf{Mullins-Sekerka} equation.
Let us record the terminology:
\begin{equation}gin{alignat}{2}
&\partial_{t}h+G(h)h=0\qquad &&(\text{Hele-Shaw}),\leqft\vertbel{HSi}\\
&\partial_{t}h+ G(h)\kappa=0\qquad &&(\text{Mullins-Sekerka})\leqft\vertbel{MSi}.
\eqrefnd{alignat}
We are also interested by two equations which
describe asymptotic regime in the \textbf{thin-film} approximation. They are
\begin{equation}gin{align}
&\partial_t h-\cnx(h\nabla h)=0 \qquad&&(\text{Boussinesq}),\leqft\vertbel{Bou}\\
&\partial_t h+\cnx(h\nabla \Delta h)=0\qquad &&(\text{thin-film}).\leqft\vertbel{ThFi}
\eqrefnd{align}
Equation \eqref{Bou} was derived from~\eqref{HSi} by Boussinesq~\cite{Boussinesq-1904}
to study groundwater infiltration.
Equation \eqref{ThFi} was derived from~\eqref{MSi} by Constantin, Dupont, Goldstein, Kadanoff, Shelley and Zhou in~\cite{Constantin1993droplet}
as a lubrication approximation model of the interface between two immiscible fluids in a Hele-Shaw cell.
\sigmaubsection{Lyapunov functionals and entropies}
Our main goal is to find some
monotonicity properties
for the previous free boundary flows, in a unified way.
Before going any further, let us fix the terminology used in this paper.
\begin{equation}gin{definition}\leqft\vertbel{Defi:1.1}
(a) Consider one of the evolution equation stated above and a function
$$
I: C^\infty(\mathbf{T}^{d})\to [0,+\infty).
$$
We say that $I$ is a \textbf{Lyapunov functional} if the following property holds:
for any smooth solution $h$ in $C^\infty([0,T]\times \mathbf{T}^{d})$ for some $T>0$, we have
$$
\forall t\in [0,T],\qquad \frac{\diff}{\dt} I(h(t))\leq 0.
$$
The quantity $-\frac{\diff}{\dt} I(h)$ is called the \textbf{dissipation rate} of the functional $I(h)$.
(b) We say that a Lyapunov functional $I$ is an \textbf{entropy} if the dissipation rate satisfies, for some $C>0$,
$$
-\frac{\diff}{\dt} I(h(t))\ge C I(h(t)).
$$
(c) Eventually, we say that $I$ is a \textbf{strong Lyapunov functional} if
$$
\frac{\diff}{\dt} I(h(t))\leq 0\quad\text{and}\quad\frac{\diff}{\dt}t I(h(t))\ge 0.
$$
This means that $t\mapsto I(h(t))$ decays in a convex manner.
\eqrefnd{definition}
\begin{equation}gin{remark}
$(i)$ The Cauchy problems for the previous free boundary equations have been studied by different techniques,
for weak solutions,
viscosity solutions or also classical solutions. We refer the reader to \cite{A-Lazar,AMS,ChangLaraGuillenSchwab,Chen-ARMA-1993,ChengCoutandShkoller-2014,Cheng-Belinchon-Shkoller-AdvMath,ChoiJerisonKim,CCG-Annals,Escher-Simonett-ADE-1997,FlynnNguyen2020,GG-JPS-AdvMaths-2019,Gunther-Prokert-SIAM-2006,
Hadzic-Shkoller-CPAM2015,Kim-ARMA2003,Knupfer-Masmoudi-ARMA-2015,NPausader,Pruss-Simonett-book}.
Thanks to the parabolic smoothing effect, classical solutions are smooth for positive times
(the elevation $h$ belongs to $C^\infty((0,T]\times \mathbf{T}^{d})$).
This is why we consider functionals $I$ defined only on smooth functions $C^\infty(\mathbf{T}^{d})$.
$(ii)$ Assume that $I$ is an entropy for an evolution equation and consider a global in time
solution of the latter problem.
Then the function
$t\mapsto I(h(t))$ decays exponentially fast. In the literature, there are more general
definition of entropies for various
evolution equations. The common idea is that entropy dissipation methods allow
to study the large time behavior or to prove functional inequalities
(see~\cite{Bertozzi-NoticesAMS-1998,Carillo-Jungel-Markovich-Toscani-Unterreiter,Arnold-et-al-2004,Evans-2004,Villani-Oldandnew,Bolley-Gentil-JMPA-2010,Dolbeault-Toscani-AIHPNL-2013,Bodineau-Lebowitz-Mouhot-Villani,Zugmeyer-arxiv2020,Jungel-book-entropy}).
$(iii)$ To say that $I(h)$ is a strong Lyapunov functional
is equivalent to say that the dissipation rate $-\frac{\diff}{\dt} I(h)$ is also a Lyapunov functional.
This notion was introduced in~\cite{Aconvexity}
as a tool to find Lyapunov functionals which control higher order Sobolev norms.
Indeed, in general,
the dissipation rate is expected to be a higher order energy
because of the smoothing effect of a parabolic equation.
Notice that the idea to compute the second-order derivative in time is related to the
celebrated work of Bakry and Emery~\cite{BakryEmmery-1985}.
\eqrefnd{remark}
\sigmaubsection{Examples}
Since we consider different equations, for the reader's convenience,
we begin by discussing some examples which are well-known in certain
communities.
\begin{equation}gin{example}\leqft\vertbel{Example:heateq}
Consider the heat equation
$\partial_t h-\Delta h=0$. The energy identity
$$
\frac{1}{2}\frac{\diff}{\dt} \int_{\mathbf{T}^{d}}h^2\diff \! x +\int_{\mathbf{T}^{d}}\leqft\vert \nabla h\right\vert^2\diff \! x=0,
$$
implies that the square of the $L^2$-norm is a Lyapunov functional.
It is in addition a strong Lyapunov
functional since, by differentiating the equation, the quantity $\int_{\mathbf{T}^{d}}\leqft\vert \nabla h\right\vert^2\diff \! x$
is also a Lyapunov functional. Furthermore, if one assumes that the mean value of $h(0,\cdot)$ vanishes, then
the Poincar\'e's inequality implies that the square of the $L^2$-norm is an entropy.
Now let us discuss another important property, which holds for positive solutions.
Assume that $h(t,x)\ge 1$ and introduce the Boltzmann's entropy, defined by
$$
H(h)=\int_{\mathbf{T}^{d}}h \log h \diff \! x.
$$
Then $H(h)$ is a strong Lyapunov functional.
This classical result (see Evans~\cite{Evans-BAMS-2004})
follows directly from the pointwise identities
\begin{equation}gin{align*}
&(\partial_t-\Delta)(h\log h)=-\frac{\leqft\vert \nabla h\right\vert^2}{h},\\
&(\partial_t -\Delta )\frac{\leqft\vert \nabla h\right\vert^2}{h}=-2h\leqft\vert \frac{\nabla^2 h}{h}
-\frac{\nabla h \otimes \nabla h}{h^2}\right\vert^2.
\eqrefnd{align*}
We will prove that the Boltzmann's entropy is also a strong Lyapunov functional for the Boussinesq equation~\eqref{Bou}, by using
a functional inequality which controls the $L^2$-norm of
$\leqft\vert \nabla h\right\vert^2/h$. Recall that the $L^1$-norm of $\leqft\vert \nabla h\right\vert^2/h$, called the Fisher's information,
plays a key role in entropy methods and
information theory (see~ Villani's lecture notes~\cite{Villani-Lecturenotes2008} and his book~\cite[Chapters 20, 21, 22]{Villani-Oldandnew}).
\eqrefnd{example}
For later references and comparisons, we discuss some examples of Lyapunov functionals
for the nonlinear equations mentioned above.
\begin{equation}gin{example}[Mean-curvature equation]\leqft\vertbel{example:MCF}
Consider the mean curvature equation $\partial_t h+\sigmaqrt{1+|\nabla h|^2}\kappa=0$.
If $h$ is a smooth solution, then
\begin{equation}\leqft\vertbel{MCF:n0}
\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)\leq 0 \quad\text{where}\quad
\mathcal{H}^{d}(\Sigma)=\int_{\mathbf{T}^{d}}\sigmaqrt{1+|\nabla h|^2}\diff \! x.
\eqrefnd{equation}
This is proved by an integration by parts argument:
\begin{equation}gin{align*}
\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)&=\int_{\mathbf{T}^{d}}\nabla_x (\partial_th) \cdot \frac{\nabla_x h}{\sigmaqrt{1+|\nabla h|^2}}\diff \! x
=\int_{\mathbf{T}^{d}} (\partial_t h)\kappa\diff \! x\\
&=-\int_{\mathbf{T}^{d}}\sigmaqrt{1+|\nabla h|^2}\kappa^2\diff \! x\leq 0.
\eqrefnd{align*}
In fact, the mean-curvature equation is a gradient flow for $\mathcal{H}^{d}(\Sigma)$, see~\cite{CMWP-BAMS-2015}.
When the space dimension $d$ is equal to $1$,
we claim that the following quantities are also Lyapunov functionals:
$$
\int_\mathbf{T} h^2\diff \! x,\quad \int_\mathbf{T} (\partial_x h)^2\diff \! x,\quad
\int_\mathbf{T} (\partial_t h)^2\diff \! x,\quad \int_\mathbf{T} (1+(\partial_xh)^2)\kappa^2\diff \! x.
$$
To our knowledge, these results are new and we prove
this claim in Appendix~\ref{Appendix:MCF}.
We will also prove that $\int_\mathbf{T} h^2\diff \! x$ is a strong Lyapunov functional.
\eqrefnd{example}
\begin{equation}gin{example}[Hele-Shaw equation]\leqft\vertbel{example:Hele-Shaw}
Consider the equation $\partial_{t}h+G(h)h=0$.
Recall that $G(h)$ is a non-negative operator. Indeed,
denoting by $\varphi=\mathcal{H}(\psi)$ the harmonic extension of $\psi$ given by~\eqref{defi:varphiintro},
it follows from Stokes' theorem that
\begin{equation}\leqft\vertbel{positivityDNintro}
\int_{\mathbf{T}^{d}} \psi G(h)\psi\diff \! x=\int_{\partial\Omega}\varphi \partial_n \varphi\diff\mathcal{H}^{d}=
\iint_{\Omega}\leqft\vert\nabla_{x,y}\varphi\right\vert^2\diff \! ydx\ge 0.
\eqrefnd{equation}
Consequently, if $h$ is a smooth-solution to $\partial_{t}h+G(h)h=0$, then
$$
\frac{1}{2}\frac{\diff}{\dt} \int_{\mathbf{T}^{d}}h^2\diff \! x =-\int_{\mathbf{T}^{d}} hG(h)h\diff \! x\leq 0.
$$
This shows that $\int_{\mathbf{T}^{d}} h^2\diff \! x$ is a Lyapunov functional.
In \cite{AMS}, it is proved that in fact $\int_{\mathbf{T}^{d}} h^2\diff \! x$
is a strong Lyapunov functional and also an entropy.
This result is generalized in \cite{Aconvexity} to functionals of the form
$\int_{\mathbf{T}^{d}} \Phi(h)\diff \! x$ where $\Phi$ is a convex function whose
derivative is also convex.
\eqrefnd{example}
\begin{equation}gin{example}[Mullins-Sekerka]\leqft\vertbel{example:Mullins-Sekerka}
Assume that $h$ solves $\partial_{t}h+G(h)\kappa=0$ and denote by $\mathcal{H}^{d}(\Sigma)$ the area functional (see~\eqref{MCF:n0}). Then~\eqref{positivityDNintro} implies that
$$
\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)=\int_{\mathbf{T}^{d}} (\partial_t h)\kappa\diff \! x=-\int_{\mathbf{T}^{d}}\kappa G(h)\kappa\diff \! x\leq 0,
$$
so $\mathcal{H}^{d}(\Sigma)$ is a Lyapunov functional.
In fact, the Mullins-Sekerka equation
is a gradient flow for $\mathcal{H}^{d}(\Sigma)$,
see~\cite{Almgren-Physics-1996,Giacomelli-Otto-CVPDE-2001}.
\eqrefnd{example}
\begin{equation}gin{example}[Thin-film equation]\leqft\vertbel{exampleTF}
The study of entropies plays a key role in the study of the thin-film equation (and its variant)
since the works of Bernis and Friedman~\cite{Bernis-Friedman-JDE}
and Bertozzi and Pugh~\cite{Bertozzi-Pugh-1996}. The simplest observation is that,
if $h$ is a non-negative solution to
$\partial_th+\partial_x(h\partial_x^3 h)=0$, then
$$
\frac{\diff}{\dt} \int_\mathbf{T} h^2\diff \! x\leq 0, \qquad \frac{\diff}{\dt} \int_\mathbf{T} (\partial_x h)^2\diff \! x\leq 0.
$$
(This can be verified by elementary integrations by parts.) To give an example of
hidden Lyapunov functionals, consider, for $p\ge 0$ and a function $h> 0$, the functionals
$$
H_p(h)=\int_\mathbf{T} \frac{h_x^2}{h^p}\diff \! x.
$$
Laugesen discovered~(\cite{Laugesen-CPAA}) that, for $0\leq p\leq 1/2$, $H_p(h)$
is a Lyapunov functional.
This result was complemented by
Carlen and Ulusoy~(\cite{Carlen-Ulusoy-CMS}) who showed
that $H_p(f)$ is an entropy when $0< p<(9 + 4\sigmaqrt{15})/53$.
We also refer to \cite{BerettaBDP-ARMA-1995,DPGG-Siam-1998,BDPGG-ADE-1998,JungelMatthes-Nonlinearity-2006}
for the study of entropies of the form $\int h^p\diff \! x$ with $1/2\leq p\leq 2$.
\eqrefnd{example}
\sigmaubsection{Main results and plan of the paper}
We are now ready to introduce our main new results.
To highlight the links between them, we
begin by gathering in the following table the list of all the Lyapunov functionals
that will be considered. This table includes known results, some of which have already been
discussed and others will be reviewed later.
Precise statements are given in the next section.
\begin{equation}gin{tabular}{@{}llllr@{}}
\toprule
Equations & \multicolumn{3}{c} { \textbf{Lyapunov functionals}
See} & Properties \\[1ex]
\toprule
\textbf{Heat} & {$\int h^2$ } &$(*)$ & Ex.~\ref{Example:heateq} &(S) \\[0.5ex]
\textbf{equation} & $\int h \log h$ & $(*)$& Ex.~\ref{Example:heateq} &(S), (GF) \\[0.5ex]
\midrule
\textbf{Mean} & $\int \sigmaqrt{1+|\nabla h|^2}=\mathcal{H}^{d}(\Sigma)$
&$(*)$& Ex.~\ref{example:MCF} & (GF)\\[0.5ex]
\textbf{curvature}& \cellcolor[gray]{0.95}{$\int \leqft\vert\nabla h\right\vert^2$}& &Prop.~\ref{Prop:C1nabla} & \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int h^2$} & $(d=1)$&Prop.~\ref{Prop:C1} & \ccell[gray]{0.95}{(S)} \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int (\partial_th)^2$} &$(d=1)$ &Prop.~\ref{Prop:C1} &\\[0.5ex]
& \cellcolor[gray]{0.95}{$\int (1+(\partial_xh)^2)\kappa^2$}
&$(d=1)$ &Prop.~\ref{Prop:C1}& \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int (\partial_xh)\arctan (\partial_xh)$} & $(d=1)$ &Prop.~\ref{Prop:C1}& \\[0.5ex]
\midrule
\textbf{Hele-Shaw} & $\int \Phi(h)$, $\Phi''\ge 0$
&$(*)$& Ex.~\ref{example:Hele-Shaw} & \\[0.5ex]
& $\int \Phi(h)$ , $\Phi'',\Phi'''\ge 0$ &$(*)$& Ex.~\ref{example:Hele-Shaw} & (S) \\[0.5ex]
& $\int h G(h)h$ &$(*)$& \S\ref{S213} & \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int \sigmaqrt{1+|\nabla h|^2}$} && Th.~\ref{T1} &\ccell[gray]{0.95}{(S)}\\[0.5ex]
& \cellcolor[gray]{0.95}{$\int \kappa G(h)h$} && Th.~\ref{Theorem:J(h)decays} &\\[0.5ex]
\midrule
\textbf{Mullins-} & $\int \sigmaqrt{1+|\nabla h|^2}$
& $(*)$ &Ex.\ref{example:Mullins-Sekerka} & (GF) \\[0.5ex]
\textbf{Sekerka} & \cellcolor[gray]{0.95}{$\int h^2$} & &Th.~\ref{T1} & \\[1ex]
\midrule
\textbf{Thin-film} & $\int \leqft\vert\nabla h\right\vert^2$ & $(*)$& Prop.~\ref{prop:lubrik1n} & \\[0.5ex]
&$\int h^{-p}h_x^2\qquad 0\leq p\leq 1/2$ &$(*)$& Ex.~\ref{exampleTF}& \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int h^{m}\qquad\quad \frac{1}{2} \leq m\leq 2$} & (**) &Prop.~\ref{positivity} &\\[0.5ex]
& $\int h\log h$ & &Prop.~\ref{positivity}& \\[0.5ex]\midrule
\textbf{Boussinesq} & {$\int h ^2$}& & Th.~\ref{Theo2bis} & \ccell[gray]{0.95}{(S)} \\[0.5ex]
& {$\int h\log h$} && Th.~\ref{Theo2bis} & \ccell[gray]{0.95}{(S)}\\[0.5ex]
& $\int h^{m+1}$ & (*) & Prop.~\ref{convexporoust} &\\[0.5ex]
& {$\int h^2\leqft\vert\nabla h\right\vert^2$}&($*$) &\S\ref{S:Boussinesq}& \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int h^m\leqft\vert\nabla h\right\vert^2, ~ 0\leq m\leq \frac{1+\sigmaqrt{7}}{2}$} &$(**)$ &Prop.~\ref{convexporous}& \\[0.5ex]
& \cellcolor[gray]{0.95}{$\int (\partial_xh)\arctan (\partial_xh)$} &$(d=1)$ &Prop.~\ref{prop:C2Boussinesq}& \\[0.5ex]
\bottomrule
\textbf{Legend:} & \multicolumn{4}{l} {The gray boxes point to the new results} \\
& \multicolumn{4}{l} { \sigmamall{$(*)$: already known}} \\
& \multicolumn{4}{l} { \sigmamall{$(**)$: improves previous exponents or simplifies the proof}} \\
& \multicolumn{4}{l} { \sigmamall{$(d=1)$: only in dimension one}} \\
& \multicolumn{4}{l} { \sigmamall{(S): is a strong Lyapunov functional}} \\
& \multicolumn{4}{l} { \sigmamall{(GF): is derived from a Gradient Flow structure.}} \\
\bottomrule
\eqrefnd{tabular}
To conclude this introduction, let us mention that in addition to Lyapunov functionals,
maximum principles also play a key role
in the study of these parabolic equations. One can think of the
maximum principles for the mean-curvature equation obtained
by Huisken~\cite{Huisken-JDE-1984} and Ecker and Huisken~(see \cite{Ecker-Huisken-Annals,Ecker-Regularity-Theory}),
used to obtain a very sharp global existence result
of smooth solutions. Many maximum principles exist also
for the Hele-Shaw equations (see~\cite{Kim-ARMA2003,ChangLaraGuillenSchwab}). In particular, we will use the
maximum principle for
space-time derivatives proved in~\cite{AMS}. Sea also~\cite{ConstantinVicol-GAFA2012} for related models.
For the thin-film equations of the form $\partial_th+\partial_x(f(h)\partial_x^3 h)=0$ with $f(h)=h^m$ and an exponent $m\ge 3.5$,
in one space dimension,
if the initial data $h_0$ is positive, then the solution $h(x,t)$
is guaranteed to stay positive
(see~\cite{Bernis-Friedman-JDE,Bertozzi-et-al-1994}
and~\cite{DPGG-Siam-1998,BDPGG-ADE-1998,ZhornitskayaBertozzi-2000,Bresch2018bd}).
\sigmaection*{Acknowledgements}
The authors acknowledge the support of the SingFlows project,
grant ANR-18-CE40-0027 of the French National Research
Agency (ANR).
\sigmaection{Statements of the main results}
Our main goal is to study the decay
properties of several natural coercive quantities for the Hele-Shaw,
Mullins-Sekerka, Boussinesq and thin-film equations,
in a unified way.
\sigmaubsection{Entropies for the Hele-Shaw and Mullins-Sekerka equations}
The first two coercive quantities we want to study are
the $L^2$-norm and the area functional (that is the $d$-dimensional surface measure):
\begin{equation}\leqft\vertbel{L2Hm}
\leqft(\int_{\mathbf{T}^{d}}h(t,x)^2\diff \! x\right)^\frac{1}{2},\qquad \mathcal{H}^{d}(\Sigma)=\int_{\mathbf{T}^{d}}\sigmaqrt{1+|\nabla h|^2}\diff \! x.
\eqrefnd{equation}
Our first main result states that these are Lyapunov functionals
for the Hele-Shaw and Mullins-Sekerka equations,
in any dimension.
\begin{equation}gin{theorem}\leqft\vertbel{T1}
Let $d\ge 1$, $(g,\mu)\in [0,+\infty)^2$ and assume that $h$
is a smooth solution to
\begin{equation}\leqft\vertbel{n21}
\partial_{t}h+G(h)(gh+\mu \kappa)=0.
\eqrefnd{equation}
Then,
\begin{equation}\leqft\vertbel{n31}
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x\leq 0 \quad
\text{and}\quad \frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)\leq 0.
\eqrefnd{equation}
\eqrefnd{theorem}
\begin{equation}gin{remark}
The main point is that this result holds uniformly with respect to $g$ and $\mu$.
For comparison, let us recall some results which hold for the special cases where either $g=0$ or $\mu=0$.
$i)$ When $g=0$, the fact that the area-functional $\mathcal{H}^{d}(\Sigma)$ decays in time
follows from
a well-known gradient flow structure for the Mullins-Sekerka equation. However, the
decay of the $L^2$-norm in this case is new.
$ii)$ When $\mu=0$, the decay of the $L^2$-norm follows from an elementary energy estimate.
However, the proof of the decay of the area-functional $t\mapsto \mathcal{H}^{d}(\Sigma(t))$
requires a more subbtle argument. It is implied (but only implicitly)
by some computations by Antontsev, Meirmanov, and Yurinsky in \cite{Antontsev-al-2004}.
The main point is that we shall give a different approach which holds uniformly with respect to $g$ and $\mu$.
In addition, we will obtain a precise lower bound for
the dissipation
rate showing that
$\mathcal{H}^{d}(\Sigma)$ is an entropy when $\mu=0$ and not only a Lyapunov functional.
\eqrefnd{remark}
To prove these two uniform decay results, the key ingredient will be to study the following functional:
$$
J(h)\mathrel{:=} \int_{\mathbf{T}^{d}} \kappa\, G(h)h\diff \! x.
$$
It appears naturally when performing energy estimates. Indeed, by multiplying the equation~\eqref{n21}
with $h$ or $\kappa$ and integrating by parts, one obtains
\begin{equation}gin{align}
&\frac{1}{2}\frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h^2\diff \! x+g \int_{\mathbf{T}^{d}}hG(h)h\diff \! x+\mu J(h)=0,\notag\\
&\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)+gJ(h)+\mu\int_{\mathbf{T}^{d}} \kappa G(h)\kappa\diff \! x=0.\leqft\vertbel{J(h)dt}
\eqrefnd{align}
We will prove that $J(h)$ is non-negative.
Since the Dirichlet-to-Neumann operator is a non-negative operator~(see \eqref{positivityDNintro}),
this will be sufficient to conclude that the $L^2$-norm and the area functional $\mathcal{H}^{d}(\Sigma)$ are non-increasing along the flow.
An important fact is that $J(h)$ is a nonlinear analogue
of the homogeneous $H^{3/2}$-norm. A first way to give this statement
a rigorous meaning consists in noticing that $G(0) h=\leqft\vert D_x\right\vert h=\sigmaqrt{-\Delta_x}h$ and
the linearized version of $\kappa$ is
$-\Delta_x h$. Therefore, if $h=\eqrefps\zeta$, then
$$
J(\eqrefps \zeta)=\eqrefps^2\int_{\mathbf{T}^{d}} \big( \leqft\vert D_x\right\vert^{3/2}\zeta\big)^2\diff \! x+O(\eqrefps^3).
$$
We will prove a functional inequality (see Proposition~\ref{P:Positive2} below) which shows
that $J(h)$ controls the $L^2(\Omega)$-norm
of the Hessian of the harmonic extension $\mathcal{H}(h)$ of~$h$, given by~\eqrefqref{defi:varphiintro} with $\psi=h$.
Consequently, $J(h)$ controls three half-derivative of $h$ in $L^2$
by means of a trace theorem.
\sigmaubsection{The area functional is a strong Lyapunov functional}\leqft\vertbel{S213}
As seen in Example~\eqrefqref{example:MCF}, for the mean-curvature equation in space dimension $d=1$, there exist
Lyapunov functionals which control all the spatial derivatives of order less than $2$.
Similarly, there are higher-order energies for the thin-film equations (see
Theorem~\ref{Theo2}, the Laugesen's functionals introduced in Example~\ref{exampleTF} and also \cite{ConstantinElgindi}).
On the other hand, for the Hele-Shaw and Mullins-Sekerka equations,
it is more difficult to find higher-order energies which control some derivatives
of the solution. This is becasue it is harder
to differentiate these equations.
For the Mullins-Sekerka problem, one can quote two recent papers
by Chugreeva--Otto--Westdickenberg~\cite{ChugreevaOttoWestdickenberg2019} and
Acerbi--Fusco--Julin--Morini~\cite{AcerbiFuscoJulinMorini2019}.
In both papers, the authors compute the second derivative in time of some
coercive quantities to study the long time behavior of the solutions, in perturbative regimes.
Here, we will prove a similar result for the Hele-Shaw equation.
However, the analysis will be entirely different.
On the one hand, it is easier in some sense to differentiate the Hele-Shaw equation.
On the other hand, we will be able to exploit some additional identities and inequalities which allow us to
obtain a result under a very mild-smallness assumption.
Here, we consider the Hele-Shaw equation:
\begin{equation}\leqft\vertbel{HSJ}
\partial_t h+G(h)h=0.
\eqrefnd{equation}
It is known that Cauchy problem for the latter equation
is well-posed on the Sobolev spaces $H^s(\mathbf{T}^{d})$ provided that $s>1+d/2$, and moreover the critical
Sobolev exponent is $1+d/2$ (see~\cite{Cheng-Belinchon-Shkoller-AdvMath,Matioc-APDE-2019,AMS,NPausader}).
On the other hand, the natural energy estimate only controls the $L^2$-norm.
It is thus natural to seek higher order energies, which are bounded in time and which control
Sobolev norms $H^\mu(\mathbf{T}^{d})$ of order $\mu>0$.
It was proved in~\cite{AMS,Aconvexity} that one can control one-half derivative of $h$ by
exploiting some convexity argument. More precisely, it is proved in the previous references that
\begin{equation}\leqft\vertbel{n120}
\frac{\diff}{\dt}\int_{\mathbf{T}^{d}}hG(h)h\diff \! x\leq 0.
\eqrefnd{equation}
This inequality gives a control of a higher
order Lyapunov functional of order $1/2$. Indeed,
$$
\int_{\mathbf{T}^{d}}hG(h)h\diff \! x=\iint_{\Omega}\leqft\vert\nabla_{x,y}\mathcal{H}(h)\right\vert^2\diff \! ydx,
$$
where $\mathcal{H}(h)$ is the harmonic extension of $h$ (solution to~\eqref{defi:varphiintro}
where $\psi$ is replaced by $h$).
Hence, by using a trace theorem,
it follows that $\int_{\mathbf{T}^{d}}hG(h)h\diff \! x$ controls the $H^{1/2}$-norm of $h$.
The search for higher-order functionals leads to interesting new difficulties.
Our strategy here is to try to prove that the area functional is a strong Lyapunov funtional.
This means that the function $t\mapsto\mathcal{H}^{d}(\Sigma(t))$ decays in a convex manner.
This is equivalent to $\diff^2 \mathcal{H}^{d}(\Sigma)/\diff \! t^2\ge 0$. Now, remembering (cf \eqref{J(h)dt})
that
$$
\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)+J(h)=0\quad\text{where}\quad J(h)=\int_{\mathbf{T}^d}\kappa G(h)h\diff \! x,
$$
the previous convexity argument suggests that $\diff J(h)/\diff \! t\leq 0$, which implies that $J(h)$ is
a Lyapunov function. This gives us a very interesting higher-order energy since
the functional $J(h)$ controls
three-half spatial derivatives of $h$ (as seen above, and as will be made precise
in Proposition~\ref{P:Positive2}).
The next result states that the previous strategy applies under
a very mild smallness assumption on the
first order derivatives of the elevation $h$ at time $0$.
\begin{equation}gin{theorem}\leqft\vertbel{Theorem:J(h)decays}
Consider a smooth solution to $\partial_t h+G(h)h=0$.
There exists a universal constant $c_d$ depending only on the dimension $d$ such that,
if initially
\begin{equation}\leqft\vertbel{esti:final6}
\sigmaup_{\mathbf{T}^d}\leqft\vert \nabla h_0\right\vert^2 \leq c_d,\qquad
\sigmaup_{\mathbf{T}^d}\leqft\vert G(h_0)h_0\right\vert^2 \leq c_d,
\eqrefnd{equation}
then
\begin{equation}\leqft\vertbel{n124}
\frac{\diff}{\dt} J(h)
+\frac{1}{2}\int_{{\mathbf{T}}^d}\frac{\big(|\nabla\nabla h|^2
+ |\nabla\partial_t h|^2\big)}{(1+|\nabla h|^2)^{3/2}}\diff \! x\leq 0.
\eqrefnd{equation}
\eqrefnd{theorem}
\begin{equation}gin{remark}
$i)$ The constant $c_d$ is the unique solution in $[0,1/4]$ to
$$
2c_d\leqft(d+\leqft(d+\sigmaqrt{d}\right) c_d\right)
+ 4 \leqft(c_d\leqft(d+ (d+1) c_d\right)\leqft(\frac{12}{1-2c_d}+1\right)\right)^{\frac{1}{2}}= \frac{1}{2}.
$$
$ii)$ Since
$$
\frac{\diff}{\dt} J(h)=- \frac{\diff}{\dt}t \mathcal{H}^1(\Sigma),
$$
it is equivalent to say that the area-functional $\mathcal{H}^{d}(\Sigma)$ is a strong Lyapunov functional.
\eqrefnd{remark}
\sigmaubsection{Entropies for the Boussinesq and thin-film equations}
The previous theorems suggest to seek a similar uniform result for the thin-film and Boussinesq equations.
In this direction, we will obtain various entropies and gather in the next result only the main consequences.
\begin{equation}gin{theorem}\leqft\vertbel{Theo2}
Let $d\ge 1$, $(g,\mu)\in [0,+\infty)^2$ and $h$ be a smooth solution to
\begin{equation}\leqft\vertbel{n21B}
\partial_{t}h-\cnx \big(gh\nabla h-\mu h\nabla \Delta h\big)=0.
\eqrefnd{equation}
Then,
\begin{equation}\leqft\vertbel{n31B}
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x\leq 0 \quad
\text{and}\quad \frac{\diff}{\dt} \int_{\mathbf{T}^{d}}\leqft\vert \nabla h\right\vert^2\diff \! x\leq 0.
\eqrefnd{equation}
\eqrefnd{theorem}
\begin{equation}gin{theorem}\leqft\vertbel{Theo2bis}
Let $d\ge 1$, and assume that $h$ is a smooth solution to
\begin{equation}\leqft\vertbel{n21bis}
\partial_{t}h-\cnx \big(h\nabla h\big)=0.
\eqrefnd{equation}
Then the square of the $L^2$-norm and the Boltzmann's entropy are strong Lyapunov functionals:
\begin{equation}\leqft\vertbel{n31.5}
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}}h^2\diff \! x\leq 0\quad \text{and}\quad\frac{\diff}{\dt}t \int_{\mathbf{T}^{d}}h^2\diff \! x\ge 0,
\eqrefnd{equation}
together with
\begin{equation}\leqft\vertbel{n31.5log}
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}}h\log h \diff \! x\leq 0\quad \text{and}\quad\frac{\diff}{\dt}t \int_{\mathbf{T}^{d}}h\log h\diff \! x\ge 0.
\eqrefnd{equation}
\eqrefnd{theorem}
\begin{equation}gin{remark}
We will study more general Lyapunov functionals of the form $\int_{\mathbf{T}^{d}} h^{m}\diff \! x$ and
$\int_{\mathbf{T}^{d}} h^m \leqft\vert\nabla h\right\vert^2\diff \! x$.
\eqrefnd{remark}
When $g=0$, the first half of \eqref{n31B} was already obtained by several authors.
The study of the decay of Lebesgue norms was initiated by Bernis and Friedman~\cite{Bernis-Friedman-JDE}
and continued by
Beretta-Bertsch-Dal Passo~\cite{BerettaBDP-ARMA-1995},
Dal Passo--Garcke--Gr\"{u}n~\cite{DPGG-Siam-1998} and more recently by
J\"ungel and Matthes~\cite{JungelMatthes-Nonlinearity-2006}, who
performed a systematic study of entropies for the thin-film equation, by means
of a computer assisted proof.
Here we will proceed differently and give a short proof, obtained
by computations inspired by functional inequalities of Bernis~\cite{Bernis-proc-1996} and
Dal Passo--Garcke--Gr\"{u}n~\cite{DPGG-Siam-1998}.
Namely, we will establish a Sobolev type inequality.
Quite surprisingly, this inequality will in turn allow us to study the case with gravity $g>0$, which is
in our opinion the most delicate part of the proof.
As we will see, the crucial ingredient to prove
Theorems~\ref{Theo2} and~\ref{Theo2bis} is given by the following functional inequality.
\begin{equation}gin{proposition}\leqft\vertbel{theo:logSob}
For any $d\ge 1$ and any positive function $\theta$ in $H^2(\mathbf{T}^{d})$,
\begin{equation}\leqft\vertbel{BmD}
\int_{\mathbf{T}^{d}} \big|\nabla \theta^{1/2}\big|^4\diff \! x\leq \frac{9}{16}\int_{\mathbf{T}^{d}} (\Delta \theta)^2 \diff \! x.
\eqrefnd{equation}
\eqrefnd{proposition}
There is a short proof which can be explained here.
\begin{equation}gin{proof}
By integrating by parts, we obtain the classical observation that
\begin{equation}\leqft\vertbel{Deltanablanabla}
\begin{equation}gin{aligned}
\int_{\mathbf{T}^{d}}(\Delta \theta)^2\diff \! x&=\int_{\mathbf{T}^d}\sigmaum_{i,j}(\partial_i^2\theta)(\partial_j^2\theta)\diff \! x\\
&=\int_{\mathbf{T}^d}\sigmaum_{i,j}(\partial_{ij}\theta)(\partial_{ij}\theta)\diff \! x=\int_{\mathbf{T}^{d}}\leqft\vert \nabla\nabla \theta\right\vert^2\diff \! x.
\eqrefnd{aligned}
\eqrefnd{equation}
Now, introduce $I=16\int_{\mathbf{T}^{d}} \big|\nabla \theta^{1/2}\big|^4\diff \! x$. By an immediate computation,
$$
I=\int_{\mathbf{T}^{d}} \theta^{-2}|\nabla \theta|^4\diff \! x
=-\int_{\mathbf{T}^{d}} \big(\nabla \theta^{-1} \cdot \nabla \theta\big) \, |\nabla\theta|^2\diff \! x.
$$
By integrating by parts, one can rewrite $I$ under the form
$$
I=\int_{\mathbf{T}^{d}} \theta^{-1} \Delta \theta |\nabla \theta|^2\diff \! x
+2\int_{\mathbf{T}^{d}} \theta^{-1}[(\nabla \theta \cdot \nabla) \nabla \theta]\cdot\nabla \theta\diff \! x.
$$
Since $\leqft\vert (\nabla \theta\cdot\nabla)\nabla \theta\right\vert
\leq \leqft\vert\nabla \theta\right\vert\leqft\vert \nabla^2 \theta\right\vert$ (see~\eqref{n2001} for details),
using~\eqrefqref{Deltanablanabla} and the Cauchy-Schwarz inequality, we obtain
$$
I \leq 3 \, I^{1/2} \bigg(\int_{\mathbf{T}^{d}} (\Delta \theta)^2\diff \! x\bigg)^{1/2}.
$$
Thus we conclude that
$$
I\leq 9 \int_{\mathbf{T}^{d}} (\Delta\theta)^2\diff \! x,
$$
which is the wanted inequality.
\eqrefnd{proof}
\begin{equation}gin{remark}
\begin{equation}gin{enumerate}[(i)]
\item See Proposition~\ref{P:refD.1v2} for a more general result.
\item The inequality~\eqref{BmD}
is a multi-dimensional version of an inequality of Bernis which holds
in space dimension $d=1$ (see Theorem~$1$ in~\cite{Bernis-proc-1996}).
In this direction, notice that a
remarkable feature of~\eqref{BmD} is that the constant $9/16$
is dimension-independent.
\item The Bernis' inequalities in~\cite{Bernis-proc-1996} and similar ones
(see Gr\"un~\cite{Grun-2001} and Dal Passo--Garcke--Gr\"{u}n~\cite{DPGG-Siam-1998})
have been used to study various problems in fluid dynamics.
In the opinion of the authors, Proposition~\ref{theo:logSob}
could have other applications in fluid dynamics.
As an example, we show in Appendix~\ref{appendix:compressible}
how to fully remove a technical obstruction
in the construction of weak-solutions for compressible
Navier-Stokes equations with viscosities depending on the density.
\eqrefnd{enumerate}
\eqrefnd{remark}
\sigmaection{Uniform Lyapunov functionals for the Hele-Shaw and Mullins-Sekerka equations}\leqft\vertbel{S:3}
In this section, we prove Theorem~\ref{T1}.
\sigmaubsection{Maximum principles for the pressure}\leqft\vertbel{S:pressure}
In this paragraph the time variable does not play any role and we ignore it to simplify notations.
We will need the following elementary result.
\begin{equation}gin{lemma}\leqft\vertbel{Lemma:decayinfty}
Consider a smooth function $h$ in $C^\infty(\mathbf{T}^d)$ and set
$$
\Omega=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{R}\,:\,y<h(x)\}.
$$
For any $\zeta$ in $C^\infty(\mathbf{T}^d)$, there is a unique function
$\phi\in C^\infty(\overline{\Omega})$ such that
$\nabla_{x,y}\phi\in L^2(\Omega)$, solution to
the Dirichlet problem
\begin{equation}\leqft\vertbel{defi:varphi2-zero}
\leqft\{
\begin{equation}gin{aligned}
&\Delta_{x,y}\phi=0 \quad\text{in }\Omega,\\
&\phi(x,h(x))=\zeta(x) \text{ for all }x\in\mathbf{T}^{d}.
\eqrefnd{aligned}
\right.
\eqrefnd{equation}
Moreover, for any multi-index $\alpha\in\mathbf{N}^d$ and any $\begin{equation}ta\in \mathbf{N}$ with $\leqft\vert\alpha\right\vert+\begin{equation}ta>0$,
one has
\begin{equation}\leqft\vertbel{decaytozero}
\partial_x^\alpha\partial_y^\begin{equation}ta\phi\in L^2(\Omega)\quad \text{and}\quad
\lim_{y\to-\infty}\sigmaup_{x\in\mathbf{T}^{d}}\leqft\vert \partial_x^\alpha\partial_y^\begin{equation}ta\phi(x,y)\right\vert=0.
\eqrefnd{equation}
\eqrefnd{lemma}
\begin{equation}gin{proof}
The existence and smoothness of the solution $\phi$ is a classical elementary result.
We prove only the property~\eqref{decaytozero}.
Let $y_0$ be an arbitrary real number such that $\mathbf{T}^{d}\times\{y_0\}$ is located underneath the boundary $\partial\Omega=\{y=h\}$
and then
set $\psi(x)=\phi(x,y_0)$. This function belongs to $C^\infty(\mathbf{T}^{d})$ since $\phi$
belongs to $C^\infty(\overline{\Omega})$. Now, in the domain $\Pi\mathrel{:=}\{(x,y)\,;\,y<y_0\}$, $\phi$ coincides
with the harmonic extension of $\psi$, by uniqueness of the harmonic extension.
Since $\Pi$ is invariant by translation in~$x$, we can compute the latter function by
using the Fourier transform in~$x$. It results that,
\begin{equation}\leqft\vertbel{n3000}
\forall x\in \mathbf{T}^{d},\quad \forall y<y_0, \qquad
\phi(x,y)=(e^{(y-y_0)\leqft\vert D_x\right\vert}\psi)(x).
\eqrefnd{equation}
(Here, for $\tau<0$, $e^{\tau\leqft\vert D_x\right\vert}$ denotes the Fourier
multiplier with symbol $e^{\tau\leqft\vert\xi\right\vert}$.) Indeed,
the function $(e^{(y-y_0)\leqft\vert D_x\right\vert}\psi)(x)$ is clearly harmonic and is equal to $\psi$ on $\{y=y_0\}$.
Then, for $\leqft\vert\alpha\right\vert+\begin{equation}ta>0$, it easily follows from~\eqref{n3000}
and the Plancherel theorem that $\partial_x^\alpha\partial_y^\begin{equation}ta\phi$ belongs to $L^2(\Pi)$.
On the other hand, on the strip $\{(x,y)\,;\, y_0<y<h(x)\}$, the function $\partial_x^\alpha\partial_y^\begin{equation}ta\phi$
is bounded and hence square integrable.
By combining the two previous results, we obtain
that
$\partial_x^\alpha\partial_y^\begin{equation}ta\phi$ belongs to~$L^2(\Omega)$. To prove
the second half of \eqref{decaytozero}, we use again the formula~\eqref{n3000}
and the Plancherel theorem, to infer that $\partial_x^\alpha\partial_y^\begin{equation}ta\phi(\cdot,y)$
converges to $0$ in any Sobolev space $H^\mu(\mathbf{T}^{d})$ ($\mu\ge 0$) when $y$ goes to $-\infty$.
The desired decay result now follows from the Sobolev embedding $H^\mu(\mathbf{T}^{d})\sigmaubset L^\infty(\mathbf{T}^{d})$ for $\mu>d/2$.
\eqrefnd{proof}
Let us fix some notations used in the rest of this section.
Now, we consider a smooth function $h=h(x)$ in $C^\infty(\mathbf{T}^d)$ and set
$$
\Omega=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{R}\,:\,y<h(x)\}.
$$
We denote by $\varphi$ the harmonic extension of $h$ in $\overline{\Omega}$. This is the solution to
\eqref{defi:varphi2-zero} in the special case where $\zeta=h$. Namely, $\varphi$ solves
\begin{equation}\leqft\vertbel{defi:varphi2}
\leqft\{
\begin{equation}gin{aligned}
&\Delta_{x,y}\varphi=0 \quad\text{in }\Omega,\\
&\varphi(x,h(x))=h(x) \text{ for all }x\in\mathbf{T}^{d}.
\eqrefnd{aligned}
\right.
\eqrefnd{equation}
Introduce $Q\colon \overline{\Omega}\to\mathbf{R}$ defined by
$$
Q(x,y)=\varphi(x,y)-y.
$$
We call $Q$ the pressure.
In this paragraph we gather some results for the pressure which are all
consequences of the maximum principle.
For further references, the main result states that $\partial_y Q<0$ everywhere in the fluid.
\begin{equation}gin{proposition}\leqft\vertbel{Prop:p3.2}
\begin{equation}gin{enumerate}[i)]
\item\leqft\vertbel{regP1} On the free surface $\Sigma=\{y=h(x)\}$,
the function $Q$ satisfies the following properties:
\begin{equation}\leqft\vertbel{n8}
\partial_n Q=-\leqft\vert \nabla_{x,y} Q\right\vert \quad\text{and}\quad n=-\frac{\nabla_{x,y}Q}{\leqft\vert \nabla_{x,y} Q\right\vert},
\eqrefnd{equation}
where $n$ denotes the normal to $\Sigma$, given by
\begin{equation}\leqft\vertbel{n5}
n=\frac{1}{\sigmaqrt{1+|\nabla h|^2}} \begin{equation}gin{pmatrix} -\nabla h \\ 1 \eqrefnd{pmatrix}.
\eqrefnd{equation}
Moreover, the Taylor coefficient $a$ defined by
\begin{equation}\leqft\vertbel{defi:Taylor}
a(x)=-\partial_y Q(x,h(x)),
\eqrefnd{equation}
satisfies $a(x)>0$ for all $x\in \mathbf{T}^{d}$.
\item\leqft\vertbel{regP2} For all $(x,y)$ in $\overline{\Omega}$, there holds
\begin{equation}\leqft\vertbel{n209}
\partial_y Q(x,y)<0.
\eqrefnd{equation}
Furthermore,
\begin{equation}\leqft\vertbel{n210}
\inf_{\overline{\Omega}}(-\partial_y Q)\ge \min B ig\{ \inf_{x\in\mathbf{T}^{d}}a(x),1B ig\}.
\eqrefnd{equation}
\item\leqft\vertbel{regP3} The function $\leqft\vert \nabla_{x,y} Q\right\vert$ belongs to $C^\infty(\overline{\Omega})$.
\item \leqft\vertbel{regP4} We have the following bound:
\begin{equation}\leqft\vertbel{esti:final8}
\sigmaup_{(x,y)\in\overline{\Omega}}\leqft\vert \nabla_{x,y}Q(x,y)\right\vert^2\leq \max_{\mathbf{T}^d}\frac{(1-G(h)h)^2}{1+|\nabla_xh|^2}.
\eqrefnd{equation}
\eqrefnd{enumerate}
\eqrefnd{proposition}
\begin{equation}gin{remark}\leqft\vertbel{Rema:final1}
Consider the evolution problem for the Hele-Shaw equation $\partial_th+G(h)h=0$. Then
in \cite{AMS} it is proved that
$$
\inf_{x\in\mathbf{T}^{d}}a(t,x)\ge \inf_{x\in\mathbf{T}^{d}}a(0,x),\quad
\sigmaup_{x\in\mathbf{T}^{d}}\leqft\vert G(h)h(t,x)\right\vert\leq \sigmaup_{x\in\mathbf{T}^{d}}\leqft\vert G(h)h(0,x)\right\vert.
$$
Therefore, \eqref{n210} and \eqref{esti:final8} give two different control
of the derivatives of the pressure, which are uniform in time.
\eqrefnd{remark}
\begin{equation}gin{proof}
In this proof, it is convenient to truncate the domain $\Omega$ to work with a compact domain. Consider
$\begin{equation}ta>0$ such that the line $\mathbf{T}^{d}\times \{-\begin{equation}ta\}$ is located underneath the free surface $\Sigma=\{y=h(x)\}$ and set
$$
\Omega_\begin{equation}ta=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{R}\,;\,-\begin{equation}ta<y<h(x)\}.
$$
We will apply the maximum principle in $\Omega_\begin{equation}ta$ and then let $\begin{equation}ta$ goes to $+\infty$.
$\ref{regP1})$ This point is well-known in certain communities,
but we recall the proof for the reader's convenience.
We begin by observing that,
since $Q\arrowvert_{y=h}=0$, on the free surface we have $\leqft\vert \nabla_{x,y} Q\right\vert=\leqft\vert \partial_n Q\right\vert$.
So to prove that $\partial_n Q=-\leqft\vert \nabla_{x,y} Q\right\vert$,
it remains only to prove that $\partial_n Q\leq 0$.
To do so, we begin by noticing that $Q$ is solution
to the following elliptic problem
$$
\Delta_{x,y}Q=0,\quad Q\arrowvert_{y=h}=0.
$$
We will apply the maximum principle in $\Omega_\begin{equation}ta$ with $\begin{equation}ta$ large enough.
In view of \eqref{decaytozero}, there is $\begin{equation}ta>0$ such that
$$
\forall y\leq -\frac{\begin{equation}ta}{2},\qquad \leqft\Vert \partial_y\varphi(\cdot,y)\right\Vert_{L^\infty(\mathbf{T}^{d})}\leq \frac{1}{2}.
$$
In particular, on $\{y=-\begin{equation}ta\}$, there holds
\begin{equation}\leqft\vertbel{Qincreases}
\forall y\leq -\frac{\begin{equation}ta}{2},\quad\forall x\in\mathbf{T}^{d},\qquad
\partial_y Q(x,y)=\partial_y\varphi(x,y)-1\leq -\frac{1}{2}.
\eqrefnd{equation}
On the other hand, by using the classical maximum principle for harmonic functions in $\Omega_\begin{equation}ta$, we see that
$Q$ reaches its minimum on the boundary $\partial\Omega_\begin{equation}ta$.
In light of \eqref{Qincreases}, the minimum is not attained on $\{y=-\begin{equation}ta\}$,
so it is attained on~$\Sigma$. Since
$Q$ vanishes there, this means that $Q\ge 0$ in $\Omega_\begin{equation}ta$. This immediately
implies the wanted result $\partial_n Q\leq 0$.
In addition, since the boundary is smooth,
we can apply
the classical Hopf--Zaremba's principle to infer that $\partial_n Q<0$ on $\Sigma$.
Let us now prove that $a>0$. Recall that, by notation, $\nabla$ denotes
the gradient with respect to the
horizontal variable only, $\nabla=(\partial_{x_1},\ldots,\partial_{x_d})^{t}$.
Since $Q$ vanishes on $\Sigma$,
we have
\begin{equation}\leqft\vertbel{n3001}
0=\nabla\big(Q\arrowvert_{y=h}\big)=(\nabla Q)\arrowvert_{y=h}+(\partial_yQ)\arrowvert_{y=h}\nabla h,
\eqrefnd{equation}
which implies that, on $y=h$ we have,
\begin{equation}gin{align}
a&=-(\partial_y Q)\arrowvert_{y=h}=-\frac{1}{1+|\nabla h|^2}
B ig(\partial_yQ\arrowvert_{y=h}-\nabla h\cdot (\nabla Q)\arrowvert_{y=h}B ig)\leqft\vertbel{esti:final9}\\
&=-\frac{1}{\sigmaqrt{1+|\nabla h|^2}}\notag
\partial_n Q.
\eqrefnd{align}
Since $\partial_n Q<0$ on $\Sigma$, this implies that $a$ is a positive function. Eventually,
remembering that
$n=\frac{1}{\sigmaqrt{1+|\nabla h|^2}} \leqft(\begin{equation}gin{smallmatrix} -\nabla h \\ 1 \eqrefnd{smallmatrix}\right)$
and using~\eqref{n3001}, we verify that
$$
n=-\frac{\nabla_{x,y} Q}{\leqft\vert \nabla_{x,y} Q\right\vert}\cdot
$$
This completes the proof of
statement~~$\ref{regP1})$.
$\ref{regP2})$ Since the function $-\partial_y Q$ is
harmonic in $\Omega$,
the maximum principle applied in $\Omega_\begin{equation}ta$ implies that $-\partial_yQ$ reaches
is minimum on the boundary $\partial\Omega_\begin{equation}ta$, so
$$
-\partial_y Q\ge \minB ig\{ \inf_{\Sigma} (-\partial_y Q),\inf_{\{y=-\begin{equation}ta\}}(-\partial_y Q)B ig\}.
$$
By letting $\begin{equation}ta$ goes to $+\infty$, we obtain~\eqref{n210} since $-\partial_y Q$
converges to $1$ (see~\eqref{decaytozero} applied with $\alpha=0$ and $\begin{equation}ta=1$).
This in turn implies~\eqref{n209}
in view of the fact that $a>0$, as proved in the previous point.
$\ref{regP3})$ Since we assume that $h$ is smooth,
the function $Q$ belongs to $C^\infty(\overline{\Omega})$.
As a consequence, to prove that $\leqft\vert \nabla_{x,y} Q\right\vert$ is smooth,
it is sufficient to prove that $\leqft\vert\nabla_{x,y} Q\right\vert^2$
is bounded from below by a positive constant,
which is an immediate consequence of~\eqref{n209}.
$\ref{regP4})$ Since $Q$ is an harmonic function, we have
$$
\Delta_{x,y}\leqft\vert \nabla_{x,y}Q\right\vert^2=2\leqft\vert\nabla_{x,y}^2Q\right\vert\ge 0.
$$
Consequently, the maximum principle for sub-harmonic functions implies that
$$
\sigmaup_{\overline{\Omega_\begin{equation}ta}}\leqft\vert \nabla_{x,y}Q\right\vert^2= \sigmaup_{\partial\Omega_\begin{equation}ta}\leqft\vert \nabla_{x,y}Q\right\vert^2,
$$
where $\Omega_\begin{equation}ta$ is as above. By letting $\begin{equation}ta$ goes to $+\infty$, we obtain that
\begin{equation}\leqft\vertbel{esti:final8.1}
\sigmaup_{\overline{\Omega}}\leqft\vert \nabla_{x,y}Q\right\vert^2= \max\leqft\{\sigmaup_{\Sigma}\leqft\vert \nabla_{x,y}Q\right\vert^2,1\right\},
\eqrefnd{equation}
where we used as above that $\leqft\vert\nabla_{x,y}Q\right\vert$ tends to $1$ when $y$ goes to $-\infty$.
We are thus reduced to estimating $\leqft\vert \nabla_{x,y}Q\right\vert^2$ on $\Sigma$. To do so,
observe that the identity~\eqref{n3001} implies that, on $\Sigma$, we have
\begin{equation}\leqft\vertbel{esti:final8.2}
\leqft\vert \nabla_{x,y}Q\right\vert^2=(1+|\nabla h|^2)(\partial_yQ)^2=(1+|\nabla h|^2)a^2.
\eqrefnd{equation}
Using the computations already performed in~\eqref{esti:final9} and remembering that $Q=\varphi-y$, we obtain
$$
a=-\frac{1}{1+|\nabla h|^2}
B ig(-1+\partial_y\varphi\arrowvert_{y=h}-\nabla h\cdot (\nabla \varphi)\arrowvert_{y=h}B ig).
$$
On the other, since $\varphi$ is the harmonic extension of $h$, by
definition of the Dirichlet-to-Neumann operator $G(h)$, one has
$$
G(h)h=\partial_y\varphi\arrowvert_{y=h}-\nabla h\cdot (\nabla \varphi)\arrowvert_{y=h}.
$$
We conclude that
$$
a=\frac{1-G(h)h}{1+|\nabla h|^2},
$$
which in turn implies that
$$
(1+|\nabla h|^2)a^2=\frac{(1-G(h)h)^2}{1+|\nabla_xh|^2}.
$$
By combining this with~\eqref{esti:final8.1} and \eqref{esti:final8.2},
we conclude the proof of statement~$\ref{regP4})$.
\eqrefnd{proof}
\sigmaubsection{The key functional identity}\leqft\vertbel{S:J(h)}
Let us recall some notations:
we denote by $\kappa$ the mean curvature
\begin{equation}\leqft\vertbel{n6}
\kappa=-\cnx \leqft(\frac{\nabla h}{\sigmaqrt{1+|\nabla h|^2}}\right).
\eqrefnd{equation}
Also, we denote by $\varphi=\varphi(x,y)$ the harmonic
extension of $h$ in $\Omega$ given by~\eqref{defi:varphi2} and we use the notation
$$
Q(x,y)=\varphi(x,y)-y.
$$
\begin{equation}gin{proposition}\leqft\vertbel{P:Positive2}
Let $d\ge 1$, assume that $h\colon \mathbf{T}^{d}\to \mathbf{R}$ is a smooth function and set
$$
J(h)\mathrel{:=} \int_{\mathbf{T}^{d}} \kappa\, G(h)h\diff \! x.
$$
Then
\begin{equation}\leqft\vertbel{n11}
J(h)=\iint_{\Omega}\frac{\leqft\vert \nabla_{x,y} Q\right\vert^2\leqft\vert \nabla_{x,y}^2Q\right\vert^2
-\leqft\vert \nabla_{x,y} Q\cdot \nabla_{x,y} \nabla_{x,y} Q\right\vert^2}{\leqft\vert \nabla_{x,y} Q\right\vert^3}\diff \! ydx\ge 0.
\eqrefnd{equation}
\eqrefnd{proposition}
\begin{equation}gin{remark}\leqft\vertbel{rema:34}
\begin{equation}gin{enumerate}[i)]
\item Since $\leqft\vert\nabla_{x,y}Q\right\vert\ge \leqft\vert\partial_yQ\right\vert$, it follows
from~\eqref{n209} and the positivity of the Taylor coefficient $a$ (see statement $\ref{regP1})$
in Proposition~\ref{Prop:p3.2}) ~that $\leqft\vert \nabla_{x,y} Q\right\vert$
is bounded by a positive constant on $\overline{\Omega}$.
On the other hand, directly from \eqref{decaytozero}, the function
$\leqft\vert \nabla_{x,y} Q\right\vert^2\leqft\vert \nabla_{x,y}^2Q\right\vert^2-\leqft\vert \nabla_{x,y} Q\cdot \nabla_{x,y} \nabla_{x,y} Q\right\vert^2$
belongs to $L^2(\Omega)$.
It follows that the right-hand side of \eqref{n11} is a well defined integral.
\item To clarify notations, set $\partial_i=\partial_{x_i}$ for $1\leq i\leq n$ and $\partial_{n+1}=\partial_y$.
Then
\begin{equation}gin{equation*}
\leqft\{
\begin{equation}gin{aligned}
&\leqft\vert \nabla_{x,y}^2 Q\right\vert^2=\sigmaum_{1\leq i,j\leq n+1}(\partial_{i}\partial_{j}Q)^2,\\
&\leqft\vert \nabla_{x,y} Q\cdot\nabla_{x,y}\nabla_{x,y} Q\right\vert^2
=\sigmaum_{1\leq i\leq n+1}\biggl(\sigmaum_{1\leq j\leq n+1}(\partial_{j}Q)\partial_{i}\partial_{j}Q\biggr)^2.
\eqrefnd{aligned}
\right.
\eqrefnd{equation*}
So, it follows from the Cauchy-Schwarz inequality that
\begin{equation}\leqft\vertbel{n2001}
\leqft\vert \nabla_{x,y} Q\cdot\nabla_{x,y}\nabla_{x,y} Q\right\vert^2\leq \leqft\vert\nabla_{x,y} Q\right\vert^2\leqft\vert \nabla_{x,y}^2 Q\right\vert^2.
\eqrefnd{equation}
This shows that $J(h)\ge 0$.
\item If $d=1$, then one can simplify the previous expression. Remembering that
$\Delta_{x,y}Q=0$, one can verify that
$$
J(h)=\frac{1}{2} \iint_\Omega\frac{\leqft\vert \nabla_{x,y}^2Q\right\vert^2}{\leqft\vert \nabla_{x,y} Q\right\vert}\diff \! ydx.
$$
Notice that, for the Hele-Shaw equation, one has a uniform in time estimate for
$\leqft\vert \nabla_{x,y} Q\right\vert$ as explained in Remark~\ref{Rema:final1}. Consequently,
$J(h)$ controls the $L^2$-norm of the second-order derivative of $Q$.
\eqrefnd{enumerate}
\eqrefnd{remark}
\begin{equation}gin{proof}
To prove Proposition~\ref{P:Positive2}, the main identity is given by the following result.
\begin{equation}gin{lemma}
There holds
\begin{equation}\leqft\vertbel{n369}
J(h)=\int_\Sigma \partial_n \leqft\vert \nabla_{x,y} Q\right\vert\diff \! \Hm,
\eqrefnd{equation}
where $\Sigma=\{y=h(x)\}$.
\eqrefnd{lemma}
\begin{equation}gin{proof}
By definition of the Dirichlet-to-Neumann operator, one has
$$
G(h)h=\sigmaqrt{1+|\nabla h|^2}\partial_n \varphi\arrowvert_{y=h},
$$
so
$$
\int_{\mathbf{T}^{d}} \kappa\, G(h)h\diff \! x
=\int_{\mathbf{T}^{d}} \kappa\,\partial_n \varphi\sigmaqrt{1+|\nabla h|^2}\diff \! x.
$$
Using the expression~\eqref{n5} for the normal $n$, we observe that
$$
\partial_nQ=\partial_n \varphi-\frac{1}{\sigmaqrt{1+|\nabla h|^2}}.
$$
Directly from the definition~\eqref{n6} of $\kappa$, we get that
$$
\int_{\mathbf{T}^{d}}\kappa \diff \! x=0.
$$
So by combining the previous identities, we deduce that
$$
J(h)
=\int_{\mathbf{T}^{d}} \kappa\,(\partial_n Q)\arrowvert_{y=h} \sigmaqrt{1+|\nabla h|^2}\diff \! x,
$$
which can be written under the form
\begin{equation}\leqft\vertbel{n358}
J(h)=\int_{\Sigma}\kappa \, \partial_n Q \diff \! \Hm.
\eqrefnd{equation}
For this proof only,
to simplify notations, we will write simply $\nabla$ and $\Delta$
instead of $\nabla_{x,y}$ and $\Delta_{x,y}$.
Now, we recall from Proposition~\ref{Prop:p3.2} that,
on the free surface $\Sigma$, we have
\begin{equation}\leqft\vertbel{n8bis}
\partial_n Q=-\leqft\vert \nabla Q\right\vert \quad\text{and}\quad n=-\frac{\nabla Q}{\leqft\vert \nabla Q\right\vert}.
\eqrefnd{equation}
It follows that
$$
\kappa=-\cn_{x,y} \leqft(\frac{\nabla Q}{\leqft\vert \nabla Q\right\vert}\right),
$$
and
\begin{equation}\leqft\vertbel{n10}
\int_{\Sigma}\kappa \, \partial_n Q \diff \! \Hm
=\int_{\Sigma}\cnx\leqft(\frac{\nabla Q}{\leqft\vert \nabla Q\right\vert} \right)\leqft\vert \nabla Q\right\vert \diff \! \Hm.
\eqrefnd{equation}
Remembering that $\cnx \nabla Q=0$, one can further simplify:
\begin{equation}gin{align*}
\cnx \leqft(\frac{\nabla Q}{\leqft\vert \nabla Q\right\vert} \right) \leqft\vert \nabla Q\right\vert
&=\cnx\leqft(\frac{\nabla Q}{\leqft\vert \nabla Q\right\vert} \leqft\vert \nabla Q\right\vert \right)
-\frac{\nabla Q}{\leqft\vert \nabla Q\right\vert}\cdot \nabla \leqft\vert\nabla Q\right\vert\\
&=\cnx \nabla Q-\frac{\nabla Q}{\leqft\vert \nabla Q\right\vert}\cdot \nabla \leqft\vert\nabla Q\right\vert\\
&=-\frac{\nabla Q}{\leqft\vert \nabla Q\right\vert}\cdot \nabla \leqft\vert\nabla Q\right\vert\cdot
\eqrefnd{align*}
Now, we use again the identity $n=-\frac{\nabla Q}{\leqft\vert \nabla Q\right\vert}$
to infer that, on $\Sigma$, we have
$$
\cnx\leqft(\frac{\nabla Q}{\leqft\vert \nabla Q\right\vert} \right) \leqft\vert \nabla Q\right\vert
=n\cdot \nabla \leqft\vert\nabla Q\right\vert=\partial_n \leqft\vert \nabla Q\right\vert.
$$
Consequently, it follows from~~\eqref{n358} and \eqref{n10} that
$$
J(h)=\int_{\Sigma} \partial_n\leqft\vert \nabla Q\right\vert\diff \! \Hm.
$$
This completes the proof of the lemma.
\eqrefnd{proof}
We have proved that $J(h)$ is equal to the integral over $\Sigma$ of
$\partial_n\leqft\vert \nabla Q\right\vert$. This suggests to apply the Stokes' theorem.
To do so, as in the proof of Proposition~\ref{Prop:p3.2}, it is convenient
to truncate the domain $\Omega$ to work with a compact domain.
Again, we consider
$\begin{equation}ta>0$ such that the hyperplane $\{y=-\begin{equation}ta\}$ is located
underneath the free surface $\Sigma$ and set
$$
\Omega_\begin{equation}ta=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{T}\,;\,-\begin{equation}ta<y<h(x)\}.
$$
Let us check that the contribution from the fictitious bottom disappears when $\begin{equation}ta$ goes to $+\infty$.
\begin{equation}gin{lemma}
Denote by $\Gamma_\begin{equation}ta$ the bottom $\Gamma_\begin{equation}ta=\{(x,y)\in \mathbf{T}^{d}\times\mathbf{R}\,;\;y=-\begin{equation}ta\}$. Then
\begin{equation}\leqft\vertbel{n370}
\lim_{\begin{equation}ta\to+\infty}\int_{\Gamma_\begin{equation}ta} \partial_n\leqft\vert \nabla Q\right\vert\diff \! \Hm=0.
\eqrefnd{equation}
\eqrefnd{lemma}
\begin{equation}gin{proof}
We have
$$
\int_{\Gamma_\begin{equation}ta} \partial_n\leqft\vert \nabla Q\right\vert\diff \! \Hm
=-\int_{\mathbf{T}^{d}}\partial_y \leqft\vert \nabla Q\right\vert\diff \! x
=-\int_{\mathbf{T}^{d}}\frac{\nabla_x Q\cdot\nabla_x\partial_yQ+\partial_y Q\partial_y^2Q}{\leqft\vert \nabla Q\right\vert}\diff \! x.
$$
As we have seen in Remark~\ref{rema:34},
the function $\leqft\vert \nabla Q\right\vert$ is bounded from below
by a positive constant in $\Omega$. Consequently, it is bounded
from below on $\Gamma_\begin{equation}ta$ uniformly with respect to $\begin{equation}ta$.
On the other hand, it follows from \eqref{decaytozero} that
$$
\lim_{\begin{equation}ta\to+\infty}\leqft\Vert (\nabla_x Q\cdot\nabla_x\partial_yQ
+\partial_y Q\partial_y^2Q)(\cdot,-\begin{equation}ta)\right\Vert_{L^\infty(\mathbf{T}^{d})}=0.
$$
This immediately gives the wanted result.
\eqrefnd{proof}
Now, we are in position to conclude the proof. It follows from \eqref{n369} that
$$
J(h)=\int_{\partial\Omega_\begin{equation}ta} \partial_n\leqft\vert \nabla Q\right\vert\diff \! \Hm-\int_{\Gamma_\begin{equation}ta} \partial_n\leqft\vert \nabla Q\right\vert\diff \! \Hm.
$$
Now, remembering that $\leqft\vert \nabla Q\right\vert$ belongs to $C^\infty(\overline{\Omega})$
(see statement~$\ref{regP3})$ in Proposition~\ref{Prop:p3.2}), one may apply
the Stokes' theorem to infer that
$$
J(h)=\int_{\Omega_\begin{equation}ta} \Delta\leqft\vert \nabla Q\right\vert\diff \! ydx-\int_{\Gamma_\begin{equation}ta} \partial_n\leqft\vert \nabla Q\right\vert\diff \! \Hm.
$$
Since $\leqft\vert \nabla Q\right\vert>0$ belongs to $C^\infty(\overline{\Omega})$, one can compute $\Delta\leqft\vert\nabla Q\right\vert$.
To do so, we apply the general identity
$$
\Delta u^2=2u\Delta u+2\leqft\vert \nabla u\right\vert^2,
$$
with $u=\leqft\vert\nabla Q\right\vert$.
This gives that
\begin{equation}gin{align*}
\Delta \leqft\vert \nabla Q\right\vert&=\frac{1}{2\leqft\vert \nabla Q\right\vert}B ig(\Delta \leqft\vert \nabla Q\right\vert^2-2\leqft\vert \nabla \leqft\vert\nabla Q\right\vert\right\vert^2B ig)\\
&=\frac{1}{2\leqft\vert \nabla Q\right\vert}\bigg(\Delta \leqft\vert \nabla Q\right\vert^2-2\frac{\leqft\vert \nabla Q\cdot\nabla\nabla Q\right\vert^2}{\leqft\vert \nabla Q\right\vert^2}\bigg).
\eqrefnd{align*}
On the other hand, since $\Delta Q=0$, one has
$$
\Delta \leqft\vert \nabla Q\right\vert^2=\sigmaum_{1\leq j,k\leq n+1}\partial_j^2(\partial_kQ)^2
=2\sigmaum_{1\leq j,k\leq n+1}(\partial_j\partial_k Q)^2=2\leqft\vert \nabla^2Q\right\vert^2.
$$
By combining the two previous identities, we conclude that
\begin{equation}gin{align*}
\Delta\leqft\vert\nabla Q\right\vert&=\frac{1}{\leqft\vert \nabla Q\right\vert^3}B ig(\leqft\vert \nabla Q\right\vert^2\leqft\vert \nabla^2Q\right\vert^2-\leqft\vert \nabla Q\cdot \nabla \nabla Q\right\vert^2B ig).
\eqrefnd{align*}
As we have seen in Remark~\ref{rema:34}, the previous term is
integrable on $\Omega$. So, we can use the dominated convergence theorem and let $\begin{equation}ta$ goes to $+\infty$.
Then~\eqref{n370} implies that the contribution from the bottom disappears
from the limit and we obtain the wanted result~\eqref{n11}.
This completes the proof.
\eqrefnd{proof}
\sigmaubsection{Proof of Theorem~\ref{T1}}
We are now ready to prove Theorem~\ref{T1}.
Let $(g,\mu)\in [0,+\infty)^2$ and assume that $h$ is a smooth solution to
$$
\partial_{t}h+G(h)(gh+\mu \kappa)=0.
$$
We want to prove that
$$
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x\leq 0 \quad
\text{and}\quad \frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)\leq 0.
$$
Multiplying the equation
$\partial_{t}h+G(h)(gh+\mu\kappa)=0$ by $h$ and
integrating over $\mathbf{T}^{d}$, one obtains that
\begin{equation}\leqft\vertbel{n157}
\frac{1}{2}\frac{\diff}{\dt} \int_{\mathbf{T}^{d}}h^2\diff \! x
=-g\int_{\mathbf{T}^{d}}hG(h)h\diff \! x-\mu\int_{\mathbf{T}^{d}}h G(h)\kappa\diff \! x.
\eqrefnd{equation}
The first term in the right-hand side is non-positive
since $G(h)$ is a non-negative operator. Indeed, as we recalled in the introduction,
considering an arbitrary function $\psi$ and denoting by $\varphi$ its
harmonic extension,
it follows from Stokes' theorem that
\begin{equation}\leqft\vertbel{positivityDN}
\int_{\mathbf{T}^{d}} \psi G(h)\psi\diff \! x=\int_{\partial\Omega}\varphi \partial_n \varphi\diff\mathcal{H}^{d}=
\iint_{\Omega}\leqft\vert\nabla_{x,y}\varphi\right\vert^2\diff \! ydx\ge 0.
\eqrefnd{equation}
This proves that
$$
-g\int_{\mathbf{T}^{d}}hG(h)h\diff \! x\leq 0.
$$
We now prove that the second term in the right-hand side of \eqref{n157} is also non-positive. To see this,
we use~\eqref{n11} and the fact that~$G(h)$ is self-adjoint:
$$
\int_{\mathbf{T}^{d}}h G(h)\kappa\diff \! x=\int_{\mathbf{T}^{d}}\kappa G(h)h\diff \! x =J(h)\ge 0.
$$
This proves that
$$
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x\leq 0.
$$
It remains to prove that $\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)\leq 0$. Write
\begin{equation}gin{align*}
\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)&=\frac{\diff}{\dt} \int_{\mathbf{T}^{d}}\sigmaqrt{1+|\nabla h|^2}\diff \! x\\
&=\int_{\mathbf{T}^{d}}\nabla_x (\partial_th) \cdot \frac{\nabla_x h}{\sigmaqrt{1+|\nabla h|^2}}\diff \! x\\
&=\int_{\mathbf{T}^{d}} (\partial_th)\kappa \diff \! x,
\eqrefnd{align*}
to obtain
$$
\frac{\diff}{\dt} \mathcal{H}^{d}(\Sigma)=-\mu\int_{\mathbf{T}^{d}}\kappa G(h)\kappa\diff \! x-g J(h)\leq 0,
$$
where we used again~\eqref{n11} and the property~\eqref{positivityDN} applied with $\psi=\kappa$.
This completes the proof.
\sigmaection{Strong decay for the Hele-Shaw equation}\leqft\vertbel{S:J(h)decays}
In this section we prove Theorem~\ref{Theorem:J(h)decays} about
the monotonicity of $J(h)$ for solutions of
the Hele-Shaw equation.
Recall that, by notation,
$$
J(h)=\int_{\mathbf{T}^{d}} \kappa G(h)h\diff \! x\quad\text{where}\quad
\kappa=-\cnx\leqft(\frac{\nabla h}{\sigmaqrt{1+|\nabla h|^2}}\right).
$$
We want to prove that $J(h)$ is non-increasing under a mild-smallness assumption on $\nabla_{x,t}h$ at initial time.
\begin{equation}gin{proposition}\leqft\vertbel{LJ(h)I1}
Assume that
$h$ is a smooth solution to the Hele-Shaw equation $\partial_t h+G(h)h=0$. Then
\begin{equation}\leqft\vertbel{n132}
\frac{\diff}{\dt} J(h)+\int_{\mathbf{T}^d}\frac{\leqft\vert\nabla\partial_t h\right\vert^2+\leqft\vert\nabla^2 h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x
-\int_\mathbf{T}\kappa \theta\diff \! x\leq 0,
\eqrefnd{equation}
where
\begin{equation}\leqft\vertbel{defi:theta}
\theta=G(h)\leqft(\frac{\leqft\vert\nabla_{t,x}h\right\vert^2}{1+|\nabla h|^2}\right)
-\cnx\leqft(\frac{\leqft\vert\nabla_{t,x}h\right\vert^2}{1+|\nabla h|^2}\nabla h\right),
\eqrefnd{equation}
with $\leqft\vert\nabla_{t,x}h\right\vert^2=(\partial_t h)^2+\leqft\vert \nabla h\right\vert^2$.
In addition, if $d=1$ then \eqref{n132} is in fact an equality.
\eqrefnd{proposition}
\begin{equation}gin{proof}
If $h$ solves the Hele-Shaw equation $\partial_t h+G(h)h=0$, one can rewrite $J(h)$ under the form
$$
J(h)=-\int_{\mathbf{T}^d} \kappa h_t \diff \! x,
$$
where $h_t$ as a shorthand notation for $\partial_t h$.
Consequently,
\begin{equation}\leqft\vertbel{esti:final10}
\frac{\diff}{\dt} J(h)+\int_{\mathbf{T}^d}\kappa_t h_t\diff \! x+\int_{\mathbf{T}^d}\kappa h_{tt}\diff \! x=0.
\eqrefnd{equation}
Let us compute the first integral. To do so, we use the Leibniz rule and then integrate
by parts, to obtain
\begin{equation}gin{align*}
\int_{\mathbf{T}^d}\kappa_t h_t\diff \! x
&=-\int_{\mathbf{T}^d}\cnx\leqft(\frac{\nabla h_t}{\sigmaqrt{1+|\nabla h|^2}}-\frac{\nabla h\cdot \nabla h_t}{(1+|\nabla h|^2)^{3/2}}\nabla h\right)h_t\diff \! x\\
&=\int_{\mathbf{T}^d}\frac{(1+|\nabla h|^2)\leqft\vert \nabla h_t\right\vert^2-(\nabla h\cdot\nabla h_t)^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x.
\eqrefnd{align*}
Now, the Cauchy-Schwarz inequality implies that
$$
(1+|\nabla h|^2)\leqft\vert \nabla h_t\right\vert^2-(\nabla h\cdot\nabla h_t)^2\ge \leqft\vert \nabla h_t\right\vert^2.
$$
(Notice that this is an equality in dimension $d=1$.) It follows from~\eqref{esti:final10} that
\begin{equation}\leqft\vertbel{n131}
\frac{\diff}{\dt} J(h)+\int_{\mathbf{T}^d}\frac{\leqft\vert \nabla h_t\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x+\int_{\mathbf{T}^d}\kappa h_{tt}\diff \! x\leq 0.
\eqrefnd{equation}
We now move to the most interesting part of the proof, which is the study the second term $\int \kappa h_{tt}$.
The main idea is to use the fact that the Hele-Shaw equation can be written under the form
of a modified Laplace equation. Let us pause to
recall this argument introduced in~\cite{Aconvexity}.
For the reader convenience,
we begin by considering
the linearized equation, which reads $\partial_t h+G(0)h=0$. Since the
Dirichlet-to-Neumann operator $G(0)$ associated to a flat half-space
is given by $G(0)=\lvert D\rvert$, that is the Fourier multiplier defined by
$\lvert D\rvert e^{ix\cdot\xi}
=\lvert \xi\rvert e^{ix\cdot\xi}$, the linearized Hele-Shaw equation reads
$$
\partial_t h+\leqft\vert D\right\vert h=0.
$$
Since $-\leqft\vert D\right\vert^2=\Delta$, we find that
$$
\Delta_{t,x}h=\partial_t^2 h+\Delta h=0.
$$
The next result generalizes this observation to the Hele-Shaw equation.
\begin{equation}gin{theorem}[from~\cite{Aconvexity}]\leqft\vertbel{proposition:elliptic}
Consider a smooth solution $h$ to $\partial_t h+G(h)h=0$. Then
\begin{equation}\leqft\vertbel{n131A}
\Delta_{t,x}h+B(h)^*\big( \leqft\vert \nabla_{t,x}h\right\vert^2\big)=0,
\eqrefnd{equation}
where $B(h)^*$ is the adjoint (for the $L^2(\mathbf{T}^d)$-scalar product)
of the operator defined by
$$
B(h)\psi=\partial_y \mathcal{H}(\psi)\arrowvert_{y=h},
$$
where $\mathcal{H}(\psi)$ is the harmonic extension of $\psi$, solution to
$$
\Delta_{x,y}\mathcal{H}(\psi)=0\quad \text{in }\Omega,\qquad \mathcal{H}(\psi)\arrowvert_{y=h}=\psi.
$$
\eqrefnd{theorem}
We next replace the operator $B(h)^*$ by an explicit expression which is easier to handle.
Directly from the definition of $B(h)$ and the chain rule, one can check that (see for instance
Proposition~$5.1$ in \cite{AMS}),
$$
B(h)\psi=\frac{G(h)\psi+\nabla h\cdot \nabla \psi}{1+|\nabla h|^2}\cdot
$$
Consequently,
$$
B(h)^*\psi=G(h)\leqft(\frac{\psi}{1+|\nabla h|^2}\right)
-\cnx\leqft(\frac{\psi}{1+|\nabla h|^2}\nabla h\right).
$$
It follows that
\begin{equation}\leqft\vertbel{n131AA}
B(h)^*\big( \leqft\vert \nabla_{t,x}h\right\vert^2\big)=\theta,
\eqrefnd{equation}
where $\theta$ is as defined in the statement of Proposition~\ref{LJ(h)I1}.
We now go back to the second term in the right-hand side of~\eqref{n131} and write that
$$
\int_{\mathbf{T}^d}\kappa h_{tt}\diff \! x=\int_{\mathbf{T}^d}\kappa \Delta_{t,x}h\diff \! x-\int_{\mathbf{T}^d}\kappa \Delta h\diff \! x.
$$
(To clarify notations, recall that $\Delta$ denotes the Laplacian with respect to the variable $x$ only.)
By plugging this in~\eqref{n131} and using \eqref{n131A}--\eqref{n131AA},
we get
$$
\frac{\diff}{\dt} J(h)+\int_{\mathbf{T}^d}\frac{\leqft\vert \nabla h_t\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x
-\int_\mathbf{T}\kappa \theta\diff \! x
-\int_{\mathbf{T}^d}\kappa \Delta h\diff \! x\leq 0.
$$
As a result, to complete the proof of Proposition~\ref{LJ(h)I1}, it remains only to show that
\begin{equation}\leqft\vertbel{claim:kappahxx}
-\int_{\mathbf{T}^d}\kappa \Delta h\diff \! x\ge\int_{\mathbf{T}^d}\frac{|\nabla^2 h|^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x.
\eqrefnd{equation}
Notice that in dimension $d=1$, we have
$$
\kappa=-\frac{\partial_x^2 h}{(1+(\partial_xh)^2)^{3/2}},
$$
so~\eqref{claim:kappahxx} is in fact
an equality. To prove~\eqref{claim:kappahxx} in arbitrary dimension,
we begin by applying the Leibniz rule to write
\begin{equation}\leqft\vertbel{n4001}
-\kappa=\frac{\Delta h}{\sigmaqrt{1+|\nabla h|^2}}-\frac{\nabla h\otimes \nabla h:\nabla^2 h}{(1+|\nabla h|^2)^{3/2}},
\eqrefnd{equation}
where we use the standard notations $\nabla h\otimes \nabla h=((\partial_ih)(\partial_j h))_{1\leq i,j\leq d}$,
$\nabla ^2h=(\partial_i\partial_j h)_{1\leq i,j\leq d}$ together with $A:B=\sigmaum_{i,j}a_{ij}b_{ij}$. So,
\begin{equation}\leqft\vertbel{n147}
-\int_{\mathbf{T}^d}\kappa \Delta h\diff \! x=
\int_{\mathbf{T}^d}\frac{(\Delta h)^2}{\sigmaqrt{1+|\nabla h|^2}}\diff \! x
-\int_{\mathbf{T}^d}\frac{(\Delta h)\nabla h\otimes \nabla h:\nabla^2 h}{(1+|\nabla h|^2)^{3/2}}\diff \! x.
\eqrefnd{equation}
On the other hand,
by integrating by parts twice, we get
\begin{equation}gin{align*}
&\int_{\mathbf{T}^d}\frac{(\Delta h)^2}{\sigmaqrt{1+|\nabla h|^2}}\diff \! x=\\
&\qquad=\sigmaum_{i,j}\int_{\mathbf{T}^d}\frac{(\partial_i^2 h)(\partial_j^2h)}{\sigmaqrt{1+|\nabla h|^2}}\diff \! x\\
&\qquad=\sigmaum_{i,j}\int_{\mathbf{T}^d}\frac{(\partial_i\partial_j h)^2}{\sigmaqrt{1+|\nabla h|^2}}\diff \! x\\
&\qquad\quad
+\sigmaum_{i,j,k}\frac{(\partial_ih)(\partial_k h)(\partial_j^2h)(\partial_i\partial_kh)-(\partial_ih)(\partial_kh)(\partial_{i}\partial_{j}h)(\partial_j\partial_kh)}{(1+|\nabla h|^2)^{3/2}}\diff \! x\\
&\qquad=\int_{\mathbf{T}^d}\frac{(1+|\nabla h|^2)\leqft\vert\nabla ^2h\right\vert^2+(\Delta h)\nabla h\otimes \nabla h:\nabla^2 h-(\nabla h\cdot \nabla^2h)^2
}{(1+|\nabla h|^2)^{3/2}}\diff \! x.\\
\eqrefnd{align*}
By combining this with~\eqref{n147} and simplifying, we obtain
$$
-\int_{\mathbf{T}^d}\kappa \Delta h\diff \! x=\int_{\mathbf{T}^d}\frac{(1+|\nabla h|^2)\leqft\vert\nabla ^2h\right\vert^2-(\nabla h\cdot\nabla\nabla h)^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x.
$$
Now, by using the Cauchy-Schwarz inequality in $\mathbf{R}^d$, we obtain the wanted inequality~\eqref{claim:kappahxx}, and
the proposition follows.
\eqrefnd{proof}
In view of the previous proposition, to prove that $J(h)$ is non-increasing,
it remains to show that the last term in the
left-hand side of~\eqref{n132} can be absorbed by the second one.
It does not seem feasible to get such a result by exploiting some special identity for the solutions,
but, as we will see, we do have an inequality
which holds under a very mild smallness assumption.
We begin by making a
smallness assumption on the space and time
derivatives of the unknown~$h$. We
will next apply a maximum principle to bound these derivatives
in terms of the initial data only.
\begin{equation}gin{lemma}\leqft\vertbel{Lemma:L938}
Let $c_d<1/2$ and assume that
\begin{equation}gin{equation}\leqft\vertbel{assu:L938}
\sigmaup_{t,x}\leqft\vert \nabla h(t,x)\right\vert^2 \leq c_d,\quad
\sigmaup_{t,x} ( h_t(t,x))^2\leq c_d.
\eqrefnd{equation}
Then
\begin{equation}\leqft\vertbel{n141}
\int_{\mathbf{T}^d}\kappa \theta\diff \! x
\leq \gamma_d \int_{\mathbf{T}^d}\frac{\leqft\vert\nabla h_t\right\vert^2
+\leqft\vert \nabla^2h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x
\eqrefnd{equation}
with
$$
\gamma_d = 2 c_d \leqft(d+\leqft(d+\sigmaqrt{d}\right) c_d\right) + 4 \leqft(c_d\leqft(d+ (d+1) c_d\right)\leqft(\frac{12}{1-2c_d}+1\right)\right)^{\frac{1}{2}}.
$$
\eqrefnd{lemma}
\begin{equation}gin{proof}
To shorten notations, let us set
$$
H\mathrel{:=} \int_{\mathbf{T}^d}\frac{\leqft\vert\nabla h_t\right\vert^2
+\leqft\vert \nabla^2h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x,
$$
and
$$
\zeta\mathrel{:=}\frac{\leqft\vert\nabla_{t,x}h\right\vert^2}{1+|\nabla h|^2}
=\frac{(\partial_th)^2+ \leqft\vert\nabla h\right\vert^2}{1+|\nabla h|^2}.
$$
Then, by definition of $\theta$ (see~\eqref{defi:theta}), we have
$$
\theta=G(h)\zeta-\cnx (\zeta\nabla h)
= I_1 + I_2,
$$
with
$$
I_1= - \zeta \Delta h
$$
and
$$
I_2 = G(h)\zeta - \nabla \zeta\cdot \nabla h.
$$
We will study the contributions of $I_1$ and $I_2$ to
$\int \kappa \theta\diff \! x$ separately.
{1) \eqrefm Contribution of $I_1$.}
We claim that
\begin{equation}\leqft\vertbel{n4002}
-\int_{\mathbf{T}^d}\kappa \zeta \Delta h\diff \! x\leq
\int_{\mathbf{T}^d}\zeta \leqft(d+ (d+\sigmaqrt d)|\nabla h|^2\right) \frac{|\nabla\nabla h|^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x.
\eqrefnd{equation}
To see this, we use again \eqref{n4001}, to write
$$
-\kappa\zeta \Delta h=
\zeta\frac{(\Delta h)^2}{\sigmaqrt{1+|\nabla h|^2}}-\zeta
\frac{(\Delta h)\nabla h\otimes \nabla h:\nabla^2 h}{(1+|\nabla h|^2)^{3/2}}.
$$
Then we recall that for all $v\colon\mathbf{R}^d \mapsto \mathbf{R}^d$
$$
(\cnx v)^2
=\sigmaum_i\sigmaum_j \partial_i v_i \partial_j v_j
\leq \sigmaum_i\sigmaum_j \frac{1}{2}
\bigl((\partial_i v_i)^2 + (\partial_j v_j)^2\bigr)
\leq d |\nabla u|^2,
$$
and therefore
\begin{equation}gin{equation}\leqft\vertbel{div}
(\Delta h)^2 \leq d \,|\nabla\nabla h|^2.
\eqrefnd{equation}
Then, by using the Cauchy-Schwarz inequality, we prove
the claim~\eqref{n4002}.
Now, observe that, by definition of $\zeta$ we have $\zeta\leq \leqft\vert\nabla_{t,x}h\right\vert^2$. So,
by assumption \eqref{assu:L938}, we deduce that
\begin{equation}gin{align*}
\zeta\leqft(d+ (d+\sigmaqrt d)|\nabla h|^2\right)&\leq \leqft\vert \nabla_{t,x} h\right\vert^2\leqft(d+ (d+\sigmaqrt d)|\nabla h|^2\right)\\
&\leq 2c_d\leqft(d+ (d+\sigmaqrt d)c_d\right).
\eqrefnd{align*}
Therefore, it follows from \eqref{n4002} that
\begin{equation}\leqft\vertbel{n4002ter}
-\int_{\mathbf{T}^d}\kappa \zeta \Delta h\diff \! x\leq 2c_d\leqft(d+ (d+\sigmaqrt d)c_d\right) H.
\eqrefnd{equation}
{2) \eqrefm Contribution of $I_2$.}
We now estimate the quantity
\begin{equation}\leqft\vertbel{n143}
\int_{\mathbf{T}^d} \kappa\, \big( G(h)\zeta-\kappa \nabla \zeta \cdot \nabla h\big)\diff \! x.
\eqrefnd{equation}
We will prove that the absolute value of this term is bounded by
\begin{equation}\leqft\vertbel{esti:final1}
4 \leqft(c_d\leqft(d+ (d+1) c_d\right)\leqft(\frac{12}{1-2c_d}+1\right)\right)^{\frac{1}{2}}H.
\eqrefnd{equation}
By combining this estimate with \eqref{n4002ter}, this will imply the wanted inequality~\eqref{n141}.
To begin, we apply the Cauchy-Schwarz inequality to bound the absolute value of~\eqref{n143} by
$$
\leqft(\int_{\mathbf{T}^d} (1+ |\nabla h|^2)^{3/2}\kappa^2\diff \! x\right)^\frac{1}{2}
\leqft(\int_{\mathbf{T}^d} \frac{(G(h)\zeta
- \nabla \zeta\cdot \nabla h)^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x\right)^\frac{1}{2}.
$$
We claim that
\begin{equation}\leqft\vertbel{esti:final2}
\int_{\mathbf{T}^d} (1+ |\nabla h|^2)^{3/2}\kappa^2\diff \! x\leq 2\big(d+(d+1)c_d\big) H,
\eqrefnd{equation}
and
\begin{equation}\leqft\vertbel{esti:final3}
\int_{\mathbf{T}^d} \frac{(G(h)\zeta
- \nabla \zeta\cdot \nabla h)^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x\leq
8c_d\leqft(\frac{12}{1-2c_d}+1\right)H.
\eqrefnd{equation}
It will follow from these claims that the absolute value of~\eqref{n143} is bounded by~\eqref{esti:final1}, which in turn will
complete the proof of the lemma.
We begin by proving~\eqref{esti:final2}.
Recall from~\eqref{n4001} that
\begin{equation}\leqft\vertbel{n4001bis}
-\kappa=\frac{\Delta h}{\sigmaqrt{1+|\nabla h|^2}}-\frac{\nabla h\otimes \nabla h:\nabla^2 h}{(1+|\nabla h|^2)^{3/2}},
\eqrefnd{equation}
and therefore, using the inequality $(\Delta h)^2 \leq d \,|\nabla\nabla h|^2$ (see~\eqrefqref{div}),
$$
\kappa^2\leq
2\big(d+(d+1)|\nabla h|^2\big)
\frac{|\nabla\nabla h|^2}{(1+|\nabla h|^2)^3},
$$
which implies~\eqref{esti:final2}, remembering that $\leqft\vert \nabla h\right\vert^2\leq c_d$,
by assumption~\eqref{assu:L938}.
We now move to the proof of~\eqref{esti:final3}.
Since
$$
\frac{(G(h)\zeta
- \nabla \zeta\cdot \nabla h)^2}{(1+|\nabla h|^2)^{3/2}}\leq
2(G(h)\zeta)^2+2 \leqft\vert \nabla \zeta\right\vert^2,
$$
it is sufficient to prove that
\begin{equation}\leqft\vertbel{n145}
\int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x
+\int_{\mathbf{T}^d} |\nabla \zeta|^2\diff \! x
\leq 4c_d\leqft(\frac{12}{1-2c_d}+1\right)H.
\eqrefnd{equation}
To establish~\eqref{n145}, the crucial point will be to bound
the $L^2$-norm of $G(h)\zeta$ in terms of the $L^2$-norm of $\nabla \zeta$.
Namely, we now want to prove the following estimate:
if $|\nabla h|^2\leq c_d$ with $c_d<1/2$, then
\begin{equation}\leqft\vertbel{d12}
\int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x\leq \frac{12}{1-2 c_d}
\int_{\mathbf{T}^d} |\nabla \zeta|^2\diff \! x.
\eqrefnd{equation}
To do so, we use
the following inequality\footnote{This inequality belongs to the family of Rellich type inequalities,
which give a control on the boundary of the normal
derivative in terms of the tangential one.} (proved in Appendix~\ref{A:Rellich}):
\begin{equation}\leqft\vertbel{d10}
\int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x \leq
\int_{\mathbf{T}^d} (1+|\nabla h|^2)|\nabla \zeta-\mathcal{B} \nabla h|^2 \diff \! x,
\eqrefnd{equation}
where
\begin{equation}\leqft\vertbel{d11}
\mathcal{B}=\frac{G(h)\zeta+\nabla \zeta \cdot \nabla h}{1+|\nabla h|^2}.
\eqrefnd{equation}
Then, by replacing $\mathcal{B}$ in \eqref{d10} by its expression \eqref{d11}, we obtain
\begin{equation}gin{multline*}
\int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x
\\
\leq\int_{\mathbf{T}^d} (1+|\nabla h|^2)
\leqft|\frac{(1+|\nabla h|^2){\rm Id}-\nabla h\otimes \nabla h}
{1+|\nabla h|^2}\nabla \zeta
-\frac{G(h)\zeta}{1+|\nabla h|^2}\nabla h\right|^2 \diff \! x.
\eqrefnd{multline*}
So, expanding the right-hand side and simplyfying, we get
\begin{equation}gin{align*}
&\int_{\mathbf{T}^d} \frac{1-|\nabla h|^2}{1+|\nabla h|^2} (G(h)\zeta)^2\diff \! x\\
&\qquad= \int_{\mathbf{T}^d}
\frac{|((1+|\nabla h|^2){\rm Id} -\nabla h \otimes \nabla h) \nabla \zeta|^2}{1+|\nabla h|^2} \diff \! x\\
&\qquad\quad
-2\int_{\mathbf{T}^d}\nabla h \cdot \frac{\bigl(((1+|\nabla h|^2){\rm Id}-\nabla h \otimes \nabla h) \nabla \zeta\bigr)}{1+|\nabla h|^2}
G(h)\zeta \diff \! x.
\eqrefnd{align*}
Hence, by using the Young inequality,
\begin{equation}gin{align*}
\int_{\mathbf{T}^d} \frac{1-|\nabla h|^2}{1+|\nabla h|^2} (G(h)\zeta)^2\diff \! x
&\leq \int_{\mathbf{T}^d}
\frac{\bigl|(1+|\nabla h|^2){\rm Id} -\nabla h\otimes \nabla h\bigr|^2}{1+\nabla h|^2}
|\nabla \zeta|^2\diff \! x\\
&\quad+\int_{\mathbf{T}^d} \frac{|\nabla h|^2}{1+|\nabla h|^2} (G(h)\zeta)^2\diff \! x\\
&\quad+\int_{\mathbf{T}^d} \frac{|(1+|\nabla h|^2){\rm Id} -\nabla h\otimes\nabla h|^2}
{1+|\nabla h|^2} |\nabla \zeta|^2\diff \! x.
\eqrefnd{align*}
Now we write
$$
\frac{|((1+|\nabla h|^2){\rm Id} -\nabla h \otimes \nabla h) |^2}{1+|\nabla h|^2}\leq
\frac{(1+2|\nabla h|^2)^2}{1+|\nabla h|^2},
$$
to obtain
$$
\int_\mathbf{T} \frac{1-2|\nabla h|^2}{1+|\nabla h|^2}
(G(h)\zeta)^2\diff \! x\leq 2\int_\mathbf{T}
\frac{(1+2|\nabla h|^2)^2}{1+|\nabla h|^2}|\nabla \zeta|^2\diff \! x.
$$
Now, recalling that $|\nabla h|^2 \leq c_d < 1/2$, we get
$$
\int_\mathbf{T}
(G(h)\zeta)^2\diff \! x\leq 2 \frac{(1+c_d) (1+2c_d)^2 }{1-2 c_d}\int_\mathbf{T}
|\nabla \zeta|^2\diff \! x\leq \frac{12}{1-2 c_d}\int_\mathbf{T}
|\nabla \zeta|^2\diff \! x.
$$
In view of~\eqref{d12}, to prove the wanted
inequality~\eqref{n145}, we are reduced to establishing
$$
\int_\mathbf{T} |\nabla \zeta|^2\diff \! x\leq 4 c_d
\int_\mathbf{T}\frac{|\nabla h_t|^2
+ |\nabla\nabla h|^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x.
$$
Since
$$
\nabla \zeta = 2\frac{h_t}{(1+|\nabla h|^2)^{1/4} }
\frac{\nabla h_t}{(1+|\nabla h|^2)^{3/4}}
+ 2\frac{(1-(h_t)^2)\nabla h}{(1+|\nabla h|^2)^{5/4}}
\cdot \frac{\nabla\nabla h}{(1+|\nabla h|^2)^{3/4}},
$$
the latter inequality will be satisfied provided that
$$
\frac{\leqft((1-(h_t)^2)|\nabla h|\right)^2}{(1+|\nabla h|^2)^{5/2}}\leq c_d,
\quad
\frac{\leqft( h_t\right)^2}{(1+|\nabla h|^2)^{1/2}}\leq c_d
$$
The latter couple of conditions are obviously satisfied when
\begin{equation}\leqft\vertbel{n150}
|\nabla h|^2\leq c_d,\quad |h_t|^2\leq c_d\quad\text{with}\quad c_d< \frac{1}{2}.
\eqrefnd{equation}
This completes the proof of Lemma~\ref{Lemma:L938}.
\eqrefnd{proof}
We are now in position to complete the proof. Recall that Proposition~\ref{LJ(h)I1} implies that
$$
\frac{\diff}{\dt} J(h)+\int_{\mathbf{T}^d}\frac{\leqft\vert\nabla\partial_t h\right\vert^2+\leqft\vert\nabla^2 h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x
\leq \int_\mathbf{T}\kappa \theta\diff \! x.
$$
On the other hand, Lemma~\ref{Lemma:L938} implies that
$$
\int_\mathbf{T}\kappa \theta\diff \! x\leq \gamma_d
\int_{\mathbf{T}^d}\frac{\leqft\vert\nabla\partial_t h\right\vert^2+\leqft\vert\nabla^2 h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x,
$$
with
$$
\gamma_d = 2 c_d \leqft(d+\leqft(d+\sigmaqrt{d}\right) c_d\right) + 4 \leqft(c_d\leqft(d+ (d+1) c_d\right)\leqft(\frac{12}{1-2c_d}+1\right)\right)^{\frac{1}{2}},
$$
provided
\begin{equation}\leqft\vertbel{esti:final4}
\sigmaup_{t,x}\leqft\vert \nabla h(t,x)\right\vert^2 \leq c_d,\quad
\sigmaup_{t,x} ( h_t(t,x))^2\leq c_d.
\eqrefnd{equation}
We now fix $c_d\in [0,1/4]$ by solving the equation $\gamma_d=1/2$
(the latter equation has a unique solution since $c_d\mapsto \gamma_d$ is strictly increasing).
It follows that,
\begin{equation}\leqft\vertbel{n152}
\frac{\diff}{\dt} J(h)+\frac{1}{2} \int_{\mathbf{T}^d}\frac{\leqft\vert\nabla\partial_t h\right\vert^2+\leqft\vert\nabla^2 h\right\vert^2}{(1+|\nabla h|^2)^{3/2}}\diff \! x\leq 0.
\eqrefnd{equation}
The expected decay of $J(h)$ is thus seen to hold
as long as the solution $h=h(t,x)$ satisfies the assumption~\eqref{esti:final4}. Consequently,
to conclude the proof of Theorem~\ref{Theorem:J(h)decays},
it remains only to show that the assumption~\eqref{esti:final4} on the solution will hold provided
that it holds initially. To see this, we use the fact that
there is a maximum
principle for the Hele-Shaw equation, for
space {\eqrefm and} time derivatives (the maximum principle for spatial
derivatives is well-known (see~\cite{Kim-ARMA2003,ChangLaraGuillenSchwab,AMS}),
the one for time derivative is given by Theorem~$2.11$ in~\cite{AMS}).
This means that the assumption \eqref{n150} holds
for all time $t\ge 0$ provided that it holds at time $0$.
This concludes the proof of Theorem~\ref{Theorem:J(h)decays}.
\sigmaection{On the thin-film equation}
The aim of this section is two-fold. Firstly, for the reader's convenience,
we collect various known results for the equation
$$
\partial_th-\cnx\big(h\nabla(gh-\mu\Delta h)\big)=0.
$$
Secondly, we study the decay of certain Lebesgue norms for the thin-film equation
\begin{equation}\leqft\vertbel{thinfilm-d}
\partial_t h +\cnx (h\nabla \Delta h) = 0.
\eqrefnd{equation}
Recall that we consider only smooth positive solutions. Then,
since
$$
\partial_t \int_{\mathbf{T}^{d}} h\diff \! x=0,
$$
and since $h=|h|$, the $L^1$-norm is preserved and, obviously,
it is a Lyapunov functional.
We study more generally the decay of Lebesgue norms $\int h^p\diff \! x$ with $p>0$.
The study of this question
goes back to the work of Beretta, Bertsch and Dal Passo~\cite{BerettaBDP-ARMA-1995}
and was continued by Dal Passo, Garcke and Gr\"{u}n~\cite{DPGG-Siam-1998},
Bertsch, Dal Passo, Garcke and Gr\"{u}n~\cite{BDPGG-ADE-1998}. In these papers,
it is proved that
$$
\int_{\mathbf{T}^d} h^{p}\diff \! x,
$$ is a Lyapunov functional for $1/2< p< 2$ and $d=1$. More recently,
J\"{u}ngel and Matthes performed in~\cite{JungelMatthes-Nonlinearity-2006}
a systematic study of entropies
for the thin-film equation, based on a computer assisted proof. They obtain the same result, allowing for the endpoints,
that is for
$1/2\leq p\leq 2$; they give a complete proof in space dimension $d=1$ and sketch the proof of the general case in Section $5$ of their paper.
Here, we will not prove any new result, but we propose a new proof of the
fact that $\int_{\mathbf{T}^d} h^p\diff \! x$ is a Lyapunov functional in any dimension $d\ge 1$ and for any $p$ in the closed interval $[1/2,2]$.
Our proof is self-contained and elementary. This will allow us to introduce a functional
inequality as well as some integration by parts arguments used lated to study the Boussinesq equation.
\sigmaubsection{Classical Lyapunov functionals}
\begin{equation}gin{proposition}\leqft\vertbel{prop:lubrik1}
Let $(g,\mu)\in [0,+\infty)^2$ and assume that $h$ is a smooth positive solution to
the thin-film equation
$$
\partial_th-\partial_x\big(h\partial_x(gh-\mu\partial_x^2 h)\big)=0.
$$
Then
$$
\frac{\diff}{\dt} \int_{\mathbf{T}} h^2\diff \! x \leq 0\quad\text{and}\quad \frac{\diff}{\dt}\int_{\mathbf{T}} (\partial_xh)^2\diff \! x\leq 0.
$$
\eqrefnd{proposition}
\begin{equation}gin{proof}
Multiply the equation by $h$ and integrate by parts. Then
$$
\frac{1}{2}\frac{\diff}{\dt}\int_\mathbf{T} h^2\diff \! x+g\int_\mathbf{T} hh_x^2\diff \! x+\mu\int_\mathbf{T} hh_xh_{xxx}\diff \! x=0.
$$
Now notice that
$$
\int_\mathbf{T} hh_xh_{xxx}\diff \! x=-\int_\mathbf{T} h_x^2h_{xx}\diff \! x-\int_\mathbf{T} hh_{xx}^2\diff \! x=-\int_\mathbf{T} hh_{xx}^2\diff \! x.
$$
Consequently,
$$
\frac{1}{2}\frac{\diff}{\dt}\int_\mathbf{T} h^2\diff \! x+\int_\mathbf{T} h(gh_x^2+\mu h_{xx}^2)\diff \! x= 0.
$$
Similarly, by multiplying the equation by $h_{xx}$
and integrating by parts, one obtains that
$$
\frac{1}{2} \frac{\diff}{\dt} \int_\mathbf{T} h_x^2\diff \! x+\int_\mathbf{T} h\big( gh_{xx}^2+\mu h_{xxx}^2\big)\diff \! x=0.
$$
Now, it follows directly from the assumption $h\ge 0$ that
$$
\frac{\diff}{\dt} \int_\mathbf{T} h^2\diff \! x\leq 0 \quad\text{and}\quad \frac{\diff}{\dt} \int_\mathbf{T} (\partial_x h)^2\diff \! x\leq 0.
$$
This completes the proof.
\eqrefnd{proof}
Half of the previous results can be generalized to the $d$-dimensional case
in a straightforward way.
\begin{equation}gin{proposition}\leqft\vertbel{prop:lubrik1n}
Let $d\ge 1$. If $h$ is a smooth positive solution to
the thin-film equation
\begin{equation}\leqft\vertbel{TFwith}
\partial_th+\cnx\big(h\nabla \Delta h)=0,
\eqrefnd{equation}
then
$$
\frac{\diff}{\dt}\int_{\mathbf{T}^{d}} \leqft\vert \nabla h\right\vert^2\diff \! x\leq 0.
$$
If $h$ is a smooth positive solution to
\begin{equation}\leqft\vertbel{TFwithout}
\partial_th-\cnx\big(h\nabla h)=0,
\eqrefnd{equation}
then
$$
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x \leq 0.
$$
\eqrefnd{proposition}
\begin{equation}gin{proof}
For the first point, we multiply the equation by $-\Delta h$ and integrate by parts.
For the second point, we multiply the equation by $h$ and integrate by parts.
\eqrefnd{proof}
This raises the question of proving the decay of the $L^2$-norm for \eqref{TFwith}
(resp.\ the decay of the $L^2$-norm of $\nabla h$ for \eqref{TFwithout}) in arbitrary dimension.
We study these questions in the rest of this section (resp.\ in Section~\ref{S:Boussinesq}).
\sigmaubsection{Decay of certain Lebesgue norms}
We begin by considering the special case of the $L^2$-norm.
The interesting point is that, in this case, we are able to prove that it is a Lyapunov functional by
means of a very simple argument.
\begin{equation}gin{proposition}\leqft\vertbel{prop:L2decaysagain}
Let $d\ge 1$ and consider a smooth positive solution $h$ to~$\partial_t h +\cnx (h\nabla \Delta h) = 0$.
Then
\begin{equation}\leqft\vertbel{decayL2TF}
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h(t,x)^2\diff \! x+\frac{2}{3}\int_{\mathbf{T}^{d}} h |\nabla\nabla h|^2 \diff \! x
+ \frac{1}{3} \int_{\mathbf{T}^{d}} h |\Delta h|^2\diff \! x = 0.
\eqrefnd{equation}
\eqrefnd{proposition}
\begin{equation}gin{proof}
The energy identity reads
$$
\frac{1}{2} \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h(t,x)^2\diff \! x+I=0
$$
where
$$
I= - \int_{\mathbf{T}^{d}} h \nabla h \cdot \nabla \Delta h\diff \! x.
$$
Integrating by parts we get
$$
I= \int_{\mathbf{T}^{d}} h|\Delta h|^2 \diff \! x
+ \int_{\mathbf{T}^{d}} |\nabla h|^2 \Delta h \diff \! x= I_1+I_2.
$$
We integrate by parts again to rewrite $I_2$ under the form
$$
I_2 = - 2\int_{\mathbf{T}^{d}} ((\nabla h \cdot \nabla) \nabla h) \cdot \nabla h\diff \! x
= 2 \int_{\mathbf{T}^{d}} h |\nabla \nabla h|^2\diff \! x
- 2 I.$$
It follows that
$$
I = \frac{2}{3}\int_{\mathbf{T}^{d}} h |\nabla\nabla h|^2 \diff \! x+ \frac{1}{3} \int_{\mathbf{T}^{d}} h |\Delta h|^2\diff \! x,
$$
which is the wanted result.
\eqrefnd{proof}
As explained in the paragraph at the beginning of this section, our main goal is to
give a simple and self-contained proof of the fact that the quantities $\int_{\mathbf{T}^d} h^p\diff \! x$ are Lyapunov functionals
for any $d\ge 1$ and any real number $p$ in $[1/2,2]$. To do so,
the key ingredient will be
a new functional inequality of independent interest which is given by the following
\begin{equation}gin{proposition}\leqft\vertbel{P:refD.1v2}
For any $d\ge 1$, any real number $\mu$ and any bounded positive function $\theta$ in $H^2(\mathbf{T}^{d})$,
\begin{equation}\leqft\vertbel{youpi2}
\frac{\mu^2}{3}\int_{\mathbf{T}^{d}} \theta^{\mu-1}\big|\nabla \theta\big|^4\diff \! x
\leq
\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\Delta \theta)^2 \diff \! x+
2\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\nabla\nabla \theta)^2 \diff \! x.
\eqrefnd{equation}
\eqrefnd{proposition}
\begin{equation}gin{remark}\leqft\vertbel{Rema:endpoint}
Dal Passo, Garcke and Gr\"un proved in \cite[Lemma~$1.3$]{DPGG-Siam-1998} the following identity:
\begin{equation}gin{align*}
&\int_{\mathbf{T}^{d}}f'(\theta)\leqft\vert\nabla \theta\right\vert^2\Delta \theta\diff \! x\\
&\qquad=-\frac{1}{3}\int_{\mathbf{T}^{d}}f''(\theta)\leqft\vert \nabla \theta\right\vert^4\diff \! x\\
&\qquad\quad+\frac{2}{3}\leqft(\int_{\mathbf{T}^{d}}f(\theta)\leqft\vert \nabla^2\theta\right\vert^2\diff \! x-\int_{\mathbf{T}^{d}}f(\theta)(\Delta \theta)^2\diff \! x\right).
\eqrefnd{align*}
Assuming that $\mu\neq -1$, by using this equality with $f(\theta)=\theta^\mu$, we obtain that
\begin{equation}gin{multline}\leqft\vertbel{youpi2-vDPGG}
\int_{\mathbf{T}^{d}} \theta^{\mu-1}\big|\nabla \theta\big|^4\diff \! x\\
\qquad\qquad\leq C(\mu)\leqft(
\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\Delta \theta)^2 \diff \! x+
\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\nabla\nabla \theta)^2 \diff \! x\right).
\eqrefnd{multline}
So \eqref{youpi2} is a variant of the previous inequality which holds uniformly in $\mu$
(this means that we can consider the case $\mu=-1$ already encountered in Proposition~\ref{theo:logSob}, the latter case being
important for the application since it is needed to control the $L^2$-norms).
\eqrefnd{remark}
\begin{equation}gin{proof}
The result is obvious when $\mu=0$ so we assume $\mu\neq 0$ in the sequel.
We then proceed as in the proof of Proposition~\ref{theo:logSob}. We begin by writing that
\begin{equation}gin{align*}
\int_{\mathbf{T}^{d}}\theta^{\mu-1}\big|\nabla \theta\big|^4\diff \! x
&=\int_{\mathbf{T}^{d}}\frac{1}{\mu}\nabla \theta^\mu\cdot \nabla \theta \leqft\vert \nabla\theta\right\vert^2\diff \! x\\
&=-\frac{1}{\mu}\int_{\mathbf{T}^{d}}\theta^\mu (\Delta \theta)\leqft\vert \nabla \theta\right\vert^2\diff \! x\\
&\quad-\frac{2}{\mu}\int_{\mathbf{T}^{d}}\theta^\mu
\nabla \theta\cdot\big[ (\nabla \theta\cdot\nabla )\nabla \theta\big]\diff \! x.
\eqrefnd{align*}
Then, by using Cauchy-Schwarz arguments similar to the ones used in the proof of Proposition~\ref{theo:logSob}, we infer that
\begin{equation}gin{multline*}
\int_{\mathbf{T}^{d}} \theta^{\mu-1}\big|\nabla \theta\big|^4\diff \! x\\
\qquad\qquad\leq \frac{1}{\mu^2}\leqft(
\leqft(\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\Delta \theta)^2 \diff \! x\right)^{\frac{1}{2}}+
2\leqft(\int_{\mathbf{T}^{d}} \theta^{\mu+1} (\nabla\nabla \theta)^2 \diff \! x\right)^{\frac{1}{2}}\right)^2.
\eqrefnd{multline*}
To conclude the proof, it remains only to use
the elementary inequality $(x+2y)^2\leq 3(x^2+2y^2)$.
\eqrefnd{proof}
We are now ready to give an elementary proof of the following result.
\begin{equation}gin{proposition}\leqft\vertbel{positivity}
Consider a real number $m$ in $[-1/2,0)\cup(0,1]$.
Then, for all smooth solution to $\partial_t h +\cnx (h\nabla \Delta h) = 0$,
\begin{equation}\leqft\vertbel{wantedmn1}
\frac{1}{m(m+1)}\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^{m+1}\diff \! x +C_m\int_{\mathbf{T}^{d}}h^{m-2}\leqft\vert\nabla h\right\vert^4\diff \! x\leq 0,
\eqrefnd{equation}
where
$$
C_m=\frac{1}{9}(-2m^2+m+1)\ge 0.
$$
\eqrefnd{proposition}
\begin{equation}gin{proof}
We begin by multiplying the equation by $\frac{1}{m}h^m$ and
integrating by parts, to get
\begin{equation}gin{align*}
&\frac{1}{m(m+1)}\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^{m+1}\diff \! x+ P_m = 0\quad\text{where}\\
&P_m=-\frac{1}{m}\int_{\mathbf{T}^{d}}\nabla h^m \cdot \big( h\nabla \Delta h\big)\diff \! x
=-\int_{\mathbf{T}^{d}}h^m\nabla h\cdot \nabla \Delta h\diff \! x.
\eqrefnd{align*}
Now, we use the following trick: there are two possible integrations by parts to
compute an integral of the form
$\int f\nabla g\cdot \nabla \Delta h\diff \! x$.
Indeed,
\begin{equation}gin{align*}
&\int_{\mathbf{T}^{d}} f(\partial_i g)(\partial_i \partial_j^2) h\diff \! x=-\int f (\partial_i^2 g)(\partial_j ^2h)\diff \! x
-\int (\partial_if)(\partial_ig)\partial_j^2h\diff \! x\\
&\int_{\mathbf{T}^{d}} f\partial_i g \partial_i \partial_j^2 h\diff \! x=-\int f (\partial_i\partial_j g)(\partial_i\partial_jh)\diff \! x
-\int (\partial_jf)(\partial_ig)\partial_j^2h\diff \! x.
\eqrefnd{align*}
Consequently, one has two different expressions for $\Pi_m$:
\begin{equation}gin{align}
P_m&=\int_{\mathbf{T}^{d}}h^m(\Delta h)^2\diff \! x+m\int_{\mathbf{T}^{d}}h^{m-1}\leqft\vert\nabla h\right\vert^2 \Delta h\diff \! x,\leqft\vertbel{Pm1}\\
P_m&=\int_{\mathbf{T}^{d}}h^m\leqft\vert\nabla^2 h\right\vert^2\diff \! x+m\int_{\mathbf{T}^{d}}h^{m-1}\nabla h\otimes \nabla h:\nabla^2 h\diff \! x.\leqft\vertbel{Pm2}
\eqrefnd{align}
To exploit the fact that there are two different identities for $P_m$, we need to
figure out the most appropriate linear combination of \eqref{Pm1} and~\eqref{Pm2}.
To do so, we will exploit the following cancellation
\begin{equation}gin{equation}\leqft\vertbel{identitysimplei}
\int B ig[f |\nabla \rho|^2\Delta \rho+2 f \nabla \nabla \rho : \nabla \rho \otimes \nabla \rhoB ig] \diff \! x
=-\int \leqft\vert \nabla \rho\right\vert^2\nabla f\cdot\nabla \rho\diff \! x,
\eqrefnd{equation}
which is proved again by an integration by parts:
$$
\int f (\partial_i\rho)^2\partial_j^2 \rho\diff \! x=-2\int f (\partial_i\rho)(\partial_i\partial_j\rho)\partial_j \rho\diff \! x
-\int (\partial_j f)(\partial_i\rho)^2\partial_j \rho\diff \! x.
$$
This suggests to add the right-hand side of \eqref{Pm1} with two times the right-hand side of \eqref{Pm2}.
This implies that
$$
3P_m=\int_{\mathbf{T}^{d}}h^m B ig( 2\leqft\vert\nabla^2 h\right\vert^2+(\Delta h)^2B ig)\diff \! x-m(m-1)\int_{\mathbf{T}^{d}}h^{m-2}\leqft\vert\nabla h\right\vert^4\diff \! x.
$$
Now, the functional inequality \eqref{youpi2} applied with $\mu=m-1$, implies that
$$
\int_{\mathbf{T}^{d}}h^m B ig( 2\leqft\vert\nabla^2 h\right\vert^2+(\Delta h)^2B ig)\diff \! x\ge \frac{(m-1)^2}{3}\int_{\mathbf{T}^{d}}h^{m-2}\leqft\vert\nabla h\right\vert^4\diff \! x.
$$
We thus obtain the wanted lower bound for the dissipation rate:
$$
P_m\ge C_m\int_{\mathbf{T}^{d}}h^{m-2}\leqft\vert\nabla h\right\vert^4\diff \! x\quad\text{with}\quad
C_m=\frac{1}{3}B ig( \frac{(m-1)^2}{3}-m(m-1)B ig).
$$
Now, it remains only to observe that the above constant $C_m$ is non-negative when
$-2m^2+m+1\ge 0$, that is for $m$ in $[-1/2,1]$.
\eqrefnd{proof}
\sigmaection{The Boussinesq equation}\leqft\vertbel{S:Boussinesq}
In this section, we begin by studying Lyapunov functionals for the Boussinesq equation
$$
\partial_th-\cnx(h\nabla h)=0.
$$
By a straightforward integration by parts argument, one has the following
\begin{equation}gin{proposition}\leqft\vertbel{convexporoust}
Consider a smooth positive solution to
\begin{equation}gin{equation}\leqft\vertbel{boussinesqpasdarcy}
\partial_t h -\cnx (h \nabla h) = 0.
\eqrefnd{equation}
For any real number $m$,
\begin{equation}gin{equation}\leqft\vertbel{estim1}
\frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h^{m+1}\diff \! x + m(m+1)\int_{\mathbf{T}^{d}} h^m |\nabla h|^2\diff \! x = 0,
\eqrefnd{equation}
and
\begin{equation}gin{equation}\leqft\vertbel{estim2}
\frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h\log h \diff \! x+ \int_{\mathbf{T}^{d}} |\nabla h|^2\diff \! x = 0.
\eqrefnd{equation}
\eqrefnd{proposition}
We want to seek strong Lyapunov functionals.
Recall from Definition~\ref{Defi:1.1} that $I$ is a strong Lyapunov functional if
it decays in a convex manner, in other words:
$$
\frac{\diff}{\dt} I\leq 0\quad\text{and}\quad\frac{\diff}{\dt}t I\ge 0.
$$
In view of~\eqref{estim1} and~\eqref{estim1}, we have to find
those $m$ for which
$$
\frac{\diff}{\dt}\int_{\mathbf{T}^{d}}h^m\leqft\vert\nabla h\right\vert^2\diff \! x\leq 0.
$$
As an example we recall that this property holds for $m=2$. Indeed, as explained by V\'azquez in his monograph (see~\cite[\S$3.2.4$]{Vazquez-PME-book}),
by multiplying the equation by $\partial_t (h^2)$, one obtains that
$$
\frac{\diff}{\dt}\int_{\mathbf{T}^{d}}h^2\leqft\vert\nabla h\right\vert^2\diff \! x\leq 0.
$$
By combining this with~\eqref{estim1}, we see that the square of the $L^3$-norm is a strong Lyapunov functional. We will
complement this by considering the square of the
$L^{m+1}$-norm for $0\leq m\leq (1+\sigmaqrt{7})/2$.
The upper bound is quite technical. However,
for the applications to the classical entropies, the important cases are the lower bound $m=0$
together with $m=1$
(this is because these are the two special results which will be used to prove that the square of the $L^2$-norm
and the Boltzmann's entropy are strong Lyapunov functionals).
We begin by considering the case $m=1$. In this case, an application
of the functional inequality given by Proposition~\ref{theo:logSob} will allow us to give a very simple proof of the following
\begin{equation}gin{proposition}
For any smooth positive solution to
\begin{equation}gin{equation}\leqft\vertbel{boussinesqpasdarcy2}
\partial_t h -\cnx (h \nabla h) = 0,
\eqrefnd{equation}
there holds
\begin{equation}\leqft\vertbel{Boussinesq:L2dtn2}
\frac{1}{2} \frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h\leqft\vert \nabla h\right\vert^2\diff \! x+ \int_{\mathbf{T}^{d}}B ig(\frac16 \leqft\vert\nabla h\right\vert^4 +
\frac{1}{2} h^2(\Delta h)^2B ig)\diff \! x\leq 0.
\eqrefnd{equation}
\eqrefnd{proposition}
\begin{equation}gin{remark}As already mentioned, it follows from~\eqref{estim1} and \eqref{Boussinesq:L2dtn2} that
$$
\frac{\diff}{\dt}t \int_{\mathbf{T}^{d}} h^2\diff \! x \ge 0.
$$
This proves that the square of the $L^2$-norm is a strong Lyapunov functional for the Boussinesq
equation.
\eqrefnd{remark}
\begin{equation}gin{proof}
The energy equation reads
$$
\frac{1}{2} \frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^2\diff \! x + \int_{\mathbf{T}^{d}} h \leqft\vert \nabla h\right\vert^2\diff \! x = 0.
$$
Let us study the time derivative of the dissipation rate $\int h|\nabla h|^2\diff \! x$.
Since
\begin{equation}gin{align*}
\partial_t(h\leqft\vert \nabla h\right\vert^2) &= (\partial_t h)\leqft\vert \nabla h\right\vert^2 + 2 h \nabla h\cdot \nabla \partial_t h\\
&= \cnx(h \nabla h)\leqft\vert \nabla h\right\vert^2 + 2 h \nabla h \cdot\nabla \cnx(h\nabla h),
\eqrefnd{align*}
we have
$$
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h\leqft\vert \nabla h\right\vert^2\diff \! x = \int_{\mathbf{T}^{d}} \cnx (h \nabla h) \leqft\vert \nabla h\right\vert^2\diff \! x
- 2 \int_{\mathbf{T}^{d}} (\cnx(h \nabla h))^2\diff \! x.
$$
Remark that
\begin{equation}gin{align*}
&\cnx (h \nabla h) \leqft\vert \nabla h\right\vert^2
= |\nabla h|^4 + h (\Delta h)\leqft\vert \nabla h\right\vert^2,\\
&(\cnx(h \nabla h))^2=h^2(\Delta h)^2+\leqft\vert \nabla h\right\vert^4+2h(\Delta h)\leqft\vert \nabla h\right\vert^2.
\eqrefnd{align*}
So we easily verify that
\begin{equation}gin{align*}
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h\leqft\vert \nabla h\right\vert^2\diff \! x &=
\frac{1}{2} \int_{\mathbf{T}^{d}} |\nabla h|^4 \diff \! x- \frac{3}{2} \int_{\mathbf{T}^{d}} ({\rm div}(h\nabla h))^2 \diff \! x \\
&\quad- \frac{1}{2} \int_{\mathbf{T}^{d}} h^2 (\Delta h)^2\diff \! x.
\eqrefnd{align*}
Now, we use Proposition~\ref{theo:logSob} applied with $\theta=h^2$ to write that
$$
\int (\cnx(h\nabla h))^2\diff \! x=\frac{1}{4}\int \big(\Delta h^2\big)^2\diff \! x\ge \frac{4}{9}\int \leqft\vert \nabla h\right\vert^4\diff \! x.
$$
This completes the proof.
\eqrefnd{proof}
Let us give now a more general result namely
\begin{equation}gin{proposition}\leqft\vertbel{convexporous}
Consider a smooth positive solution to
\begin{equation}gin{equation}\leqft\vertbel{boussinesqpasdarcy3}
\partial_t h -\cnx (h \nabla h) = 0,
\eqrefnd{equation}
and a real number $m$ in $[0,(1+\sigmaqrt{7})/2]$. Then
\begin{equation}gin{equation}\leqft\vertbel{estim3}
\frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h^m \leqft\vert \nabla h\right\vert^2\diff \! x+ I_m
\leq 0,
\eqrefnd{equation}
with
$$
I_m=\frac{m}{m+1}\int_{\mathbf{T}^{d}} h^{m+1} |\Delta h|^2 \diff \! x+ C_m \int_{\mathbf{T}^{d}} h^{m-1} |\nabla h|^4\diff \! x,
$$
where
$$
C_m =\frac{m+2}{m+1}\, \frac{(m+3)^2}{36}- \frac{m^2- m +2}{4}\ge 0.
$$
\eqrefnd{proposition}
\begin{equation}gin{remark}
It follows from~\eqref{estim1} that for all $m$ in $[0,(1+\sigmaqrt{7})/2]$,
$$
\frac{\diff}{\dt}t \int_{\mathbf{T}^{d}} h^{m+1}\diff \! x \ge 0.
$$
Similarly, there holds
$$
\frac{\diff}{\dt}t \int_{\mathbf{T}^{d}}h\log h\diff \! x\ge 0.
$$
\eqrefnd{remark}
\begin{equation}gin{proof}
Starting from
$$
\partial_t(h^m \leqft\vert \nabla h\right\vert^2) = (\partial_t h^m)\leqft\vert \nabla h\right\vert^2 + 2 h^m \nabla h\cdot \nabla \partial_t h,
$$
and then using the equation,
$$
\partial_t h^m - m h^{m-1} {\rm div}(h\nabla h) = 0,
$$
we deduce that
\begin{equation}gin{align*}
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^m\leqft\vert \nabla h\right\vert^2\diff \! x
&= \int_{\mathbf{T}^{d}} m h^{m-1} {\rm div} (h\nabla h) |\nabla h|^2\diff \! x\\
&\quad + \int_{\mathbf{T}^{d}} 2 h^m \nabla h \cdot \nabla {\rm div}(h\nabla h)\diff \! x.
\eqrefnd{align*}
Now we remark that
\begin{equation}gin{align*}
& h^{m-1}\cnx (h \nabla h) \leqft\vert \nabla h\right\vert^2
= h^{m-1} |\nabla h|^4 + h^m (\Delta h)\leqft\vert \nabla h\right\vert^2 \\
&\cnx(h^m\nabla h) \cnx(h \nabla h)= B ig(\cnx\big(h^{(m+1)/2}\nabla h\big)B ig)^2 - \frac{(m-1)^2}{4} h^{m-1} |\nabla h|^4.
\eqrefnd{align*}
Consequently,
\begin{equation}gin{align*}
\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^m\leqft\vert \nabla h\right\vert^2\diff \! x
& =\frac{m^2+1}{2}\int_{\mathbf{T}^{d}} h^{m-1} |\nabla h|^4 \diff \! x\\
&\quad+ m\int_{\mathbf{T}^{d}} h^m \Delta h |\nabla h|^2 \diff \! x\\
& \quad- 2 \int_{\mathbf{T}^{d}} \big(\cnx\big(h^{(m+1)/2}\nabla h\big)\big)^2\diff \! x.
\eqrefnd{align*}
By integrating by parts twice, we
verify that
\begin{equation}gin{align*}
(m+1)\int_{\mathbf{T}^{d}}h^m\leqft\vert \nabla h\right\vert^2\Delta h\diff \! x&=-\int_{\mathbf{T}^{d}}h^{m+1}(\Delta h)^2\, dx\\
&\quad
+\int_{\mathbf{T}^{d}}\cnx\big(h^{m+1}\nabla h\big)\Delta h\, dx.
\eqrefnd{align*}
Then, it follows from the equality
$$\cnx\big(h^{m+1}\nabla h\big) \Delta h
= B ig(\cnx\big(h^{(m+1)/2}\nabla h\big)B ig)^2 - \frac{(m+1)^2}{4} h^{m-1} |\nabla h|^4,
$$
that
\begin{equation}gin{align*}
\int_{\mathbf{T}^{d}} h^m\leqft\vert \nabla h\right\vert^2\Delta h\, dx
&=-\frac{1}{m+1}\int_{\mathbf{T}^{d}} h^{m+1}(\Delta h)^2\, dx\\
&\quad+\frac{1}{m+1}\int_{\mathbf{T}^{d}} \leqft(\cnx \big(h^{(m+1)/2}\nabla h\big)\right)^2\, dx\\
&\quad-\frac{m+1}{4}\int_{\mathbf{T}^{d}} h^{m-1}\leqft\vert \nabla h\right\vert^4\diff \! x.
\eqrefnd{align*}
As a result,
\begin{equation}gin{align*}
&\frac{\diff}{\dt} \int_{\mathbf{T}^{d}} h^m\leqft\vert \nabla h\right\vert^2\diff \! x \\
&\qquad\qquad =\frac{m^2-m+2}{4} \int_{\mathbf{T}^{d}} h^{m-1} |\nabla h|^4\diff \! x \\
&\qquad\qquad \quad - \frac{m}{m+1} \int_{\mathbf{T}^{d}} h^{m+1} (\Delta h)^2\diff \! x\\
&\qquad\qquad \quad - \frac{m+2}{m+1} \int_{\mathbf{T}^{d}}
B ig(\cnx\big(h^{(m+1)/2} \nabla h\big)B ig)^2 \diff \! x.
\eqrefnd{align*}
The inequality \eqref{youpi2} then implies that
$$
\int_{\mathbf{T}^{d}} B ig(\cnx\big(h^{(m+1)/2} \nabla h\big)B ig)^2 \diff \! x
\ge \frac{(m+3)^2}{36}\int_{\mathbf{T}^{d}} h^{m-1} |\nabla h|^4\diff \! x .
$$
Consequently, for any $m\ge 0$,
$$
\frac{\diff}{\dt}\int_{\mathbf{T}^{d}} h^m \leqft\vert \nabla h\right\vert^2\diff \! x+ I_m
\leq 0,
$$
with
$$
I_m=\frac{m}{m+1}\int_{\mathbf{T}^{d}} h^{m+1} (\Delta h)^2 \diff \! x+ C_m \int_{\mathbf{T}^{d}} h^{m-1} |\nabla h|^4\diff \! x,
$$
where
$$
C_m = \frac{m+2}{m+1}\cdot\frac{(m+3)^2}{36}-\frac{m^2-m+2}{4}.
$$
By performing elementary computations, one verifies that $C_m\ge 0$ for all $m$ in $[0,(1+\sigmaqrt 7 )/2]$, which completes the proof.
\eqrefnd{proof}
\appendix
\sigmaection{An application to compressible fluid dynamics}\leqft\vertbel{appendix:compressible}
The goal of this appendix is to show that the Sobolev inequality
given by Proposition~\ref{theo:logSob} has an important application on the
global existence of weak solutions on the compressible
Navier-Stokes with density dependent viscosities, namely
\begin{equation}gin{equation*}
\leqft\{
\begin{equation}gin{aligned}
&\partial_t \rho + {\rm div}(\rho u)= 0, \\
&\partial_t (\rho u) + {\rm div}(\rho u\otimes u)
- 2 {\rm div} (\mu(\rho) D(u)) - \nabla (\leqft\vertmbda(\rho){\rm div} u ) + \nabla p(\rho)= 0,
\eqrefnd{aligned}
\right.
\eqrefnd{equation*}
with $D(u) = (\nabla u + {}^t\nabla u)/2$, $p(s)=a s^\gamma$ with $\gamma>1$ and the initial boundary conditions
$$
\rho\vert_{t=0} = \rho_0, \qquad
\rho u\vert_{t=0} = m_0.
$$
Recently Bresch, Vasseur and Yu~\cite{BrVAYu19}
obtained the first result with a large class of given shear and bulk viscosities
respectively $s\mapsto \mu(s)$ and $s\mapsto\leqft\vertmbda(s)$ in a periodic domain $\Omega = {\mathbb T}^3$.
More precisely, if we assume the shear and bulk viscosities as
\begin{equation}gin{equation}
\mu(\rho)= \rho^\alpha,
\qquad
\leqft\vertmbda(\rho) = 2(\alpha-1) \rho^\alpha,
\eqrefnd{equation}
then the authors obtained
the existence of solutions under the assumption that
$$
\frac{2}{3} < \alpha < 4.
$$
The lower bound is a constraint coming naturally from a necessary coercivity property.
The upper-bound is a mathematical constraint due to Lemma $2.1$ in~\cite{BrVAYu19}, which reads as follows:
There exists $C>0$ independent on $\alpha$ and $\varepsilon >0$ as small as we want such that
\begin{equation}
\begin{equation}gin{aligned} \nonumber
+\infty > \frac{C}{\varepsilon}\int \rho^\alpha |\nabla\nabla \rho^{\alpha-1}|^2
& \ge \frac{4}{(3\alpha-2)^2}
\int |\nabla^2 \rho^{(3\alpha-2)/2}|^2 \\
& + \leqft(\frac{1}{\alpha}- \frac{1}{4} - \varepsilon\right) \frac{4^4}{(3\alpha-2)^4}
\int |\nabla \rho^{(3\alpha-2)/4}|^4.
\eqrefnd{aligned}
\eqrefnd{equation}
The constraint $\alpha<4$ allows to have two
positive terms in the righ-hand side and therefore
some appropriate controls on $\rho$ namely
$$
\nabla^2 \rho^{(3\alpha-2)/2}\in L^2((0,T)\times {\mathbb T}^3)\quad\text{and}\quad
\nabla\rho^{(3\alpha-2)/4} \in L^4((0,T)\times {\mathbb T}^3).
$$
Proposition \ref{theo:logSob} allows to compare the first
and the second quantity and therefore to relax the
constraint $\alpha <4.$ More precisely, using such estimate,
it suffices to check that
$$
\frac{1}{9} +
B igl(\frac{1}{\alpha}-\frac{1}{4}B igr)
\frac{4}{(3\alpha-2)^2} >0,
$$
to get a positive quantity on the right-hand side
controlling the $H^2$ derivatives.
We can check that it true for all $\alpha$ such
that $2/3 < \alpha <+\infty$. This implies
that the result by Bresch--Vasseur--Yu still holds
for any $\mu$ and $\leqft\vertmbda$ such that
$$
\mu(\rho) = \rho^\alpha, \qquad
\leqft\vertmbda(\rho) = 2 (\alpha-1) \rho^\alpha
$$
with $2/3 < \alpha <+\infty$.
\sigmaection{Lyapunov functionals for the mean-curvature equation}\leqft\vertbel{Appendix:MCF}
\begin{equation}gin{proposition}\leqft\vertbel{Prop:C1nabla}
If $h$ is a smooth solution to the mean-curvature equation
$$
\partial_th+\sigmaqrt{1+|\nabla h|^2}\kappa=0\quad\text{with}\quad
\kappa=-\cnx\leqft(\frac{\nabla h}{\sigmaqrt{1+|\nabla h|^2}}\right),
$$
then
\begin{equation}\leqft\vertbel{nC10}
\frac{\diff}{\dt} \int_{\mathbf{T}^d}\leqft\vert \nabla h\right\vert^2\diff \! x\leq 0.
\eqrefnd{equation}
\eqrefnd{proposition}
\begin{equation}gin{proof}
By multiplying the equation by $-\Delta h$ and integrating by parts, we find that
$$
\frac{\diff}{\dt} \int_{\mathbf{T}^d}\leqft\vert \nabla h\right\vert^2\diff \! x -\int_{\mathbf{T}^d}\sigmaqrt{1+|\nabla h|^2}\kappa\Delta h\diff \! x=0.
$$
Using the Leibniz rule, one has
$$
-\sigmaqrt{1+|\nabla h|^2}\kappa\Delta h
=(\Delta h)^2-\frac{\nabla h\cdot (\nabla h\cdot \nabla\nabla h)\Delta h}{1+|\nabla h|^2}.
$$
It follows from the Cauchy-Schwarz inequality that
$$
\leqft\vert \frac{\nabla h\cdot (\nabla h\cdot \nabla\nabla h)\Delta h}{1+|\nabla h|^2}
\right\vert
\leq \leqft\vert \nabla^2 h\right\vert\leqft\vert \Delta h\right\vert.
$$
Consequently,
$$
-\int_{\mathbf{T}^d}\sigmaqrt{1+|\nabla h|^2}\kappa\Delta h\diff \! x
\ge \int_{\mathbf{T}^d}\big((\Delta h)^2-\leqft\vert \nabla^2 h\right\vert\leqft\vert \Delta h\right\vert\big)\diff \! x.
$$
Now we claim that the above term is non-negative, which in turn will imply the
wanted result~\eqref{nC10}. To see this, we first use the Cauchy-Schwarz inequality to bound this term from below by
$$
\int_{\mathbf{T}^d}(\Delta h)^2\diff \! x
-\leqft(\int_{\mathbf{T}^d}(\Delta h)^2\diff \! x\right)^\frac{1}{2}\leqft(
\int_{\mathbf{T}^d}\leqft\vert \nabla^2 h\right\vert^2\diff \! x\right)^\frac{1}{2},
$$
and then apply the classical identity (see~\eqref{Deltanablanabla})
$$
\int_{\mathbf{T}^d}(\Delta h)^2\diff \! x=\int_{\mathbf{T}^d}\leqft\vert \nabla h\right\vert^2\diff \! x,
$$
which can be verified by integrating by parts twice.
\eqrefnd{proof}
\begin{equation}gin{proposition}\leqft\vertbel{Prop:C1}
If $h$ is a smooth solution to the mean-curvature equation in space dimension $d=1$:
$$
\partial_t h+\sigmaqrt{1+(\partial_x h)^2}\kappa=0\quad\text{with}\quad
\kappa=-\partial_x\leqft(\frac{\partial_x h}{\sigmaqrt{1+(\partial_x h)^2}}\right),
$$
then the following quantities are Lyapunov functionals:
$$
\int_\mathbf{T} h^2\diff \! x,\quad
\int_\mathbf{T} (\partial_t h)^2\diff \! x,\quad \int_\mathbf{T} (1+(\partial_xh)^2)\kappa^2\diff \! x.
$$
In addition, $\int_\mathbf{T} h^2\diff \! x$ is a strong Lyapunov functional.
\eqrefnd{proposition}
\begin{equation}gin{proof}
If the space dimension $d$ is equal to~$1$, we have
$$
\sigmaqrt{1+(\partial_x h)^2}\kappa=-\frac{\partial_{xx} h}{1+(\partial_x h)^2}.
$$
Consequently, the one-dimensional version of the mean-curvature equation reads
$$
\partial_t h-\frac{\partial_{xx}h}{1+(\partial_xh)^2}=0.
$$
We may further simplify the mean curvature equation by noticing that
\begin{equation}\leqft\vertbel{MCFarctan}
\partial_t h+\sigmaqrt{1+(\partial_xh)^2}\kappa=\partial_th-\frac{\partial_{xx}h}{1+(\partial_xh)^2}=
\partial_th-\partial_x \arctan (\partial_xh).
\eqrefnd{equation}
This immediately implies that the square of the $L^2$-norm is a Lyapunov functional:
\begin{equation}\leqft\vertbel{n71}
\frac{1}{2}\frac{\diff}{\dt} \int_{\mathbf{T}}h^2\diff \! x=-\int_\mathbf{T} (\partial_xh)\arctan (\partial_xh)\diff \! x \leq 0,
\eqrefnd{equation}
since $u \arctan u\ge 0$ for all $u\in \mathbf{R}$.
It also follows from the previous $\arctan$-formulation that the unknown $\dot{h}=\partial_t h$ is solution to
$$
\partial_t\dot{h}-\partial_x\leqft( \frac{\partial_x
\dot{h}}{1+(\partial_xh)^2}\right)=0.
$$
Multiplying the previous equation by $\dot{h}$ and integrating by parts in $x$, we infer that
$$
\frac{\diff}{\dt} \int_\mathbf{T}(\partial_th)^2\diff \! x\leq 0.
$$
By using the equation for $h$, this is equivalent to
$$
\frac{\diff}{\dt} \int_\mathbf{T} (1+(\partial_xh)^2)
\kappa^2 \diff \! x\leq 0.
$$
Now observe that
$$
\partial_t \big((\partial_x h) \arctan (\partial_x h)\big)
= \partial_t\partial_x h B igl(\arctan (\partial_x h)
+ \frac{\partial_x h}{1+(\partial_x h)^2}B igr).
$$
On the other hand, using the equation \eqref{MCFarctan}, we have
$$
\partial_t\partial_x h=\partial_xB ig(\frac{\partial_{xx}h}{1+(\partial_xh)^2}B ig).
$$
Therefore, integrating by parts, we conclude that
\begin{equation}gin{align*}
&\frac{\diff}{\dt} \int_\mathbf{T} (\partial_xh)\arctan (\partial_xh)\diff \! x \\
&\qquad\qquad=-\int_\mathbf{T} \frac{\partial_{xx}h}{1+(\partial_xh)^2}
\partial_x \leqft(\arctan (\partial_x h)
+ \frac{\partial_x h}{1+(\partial_x h)^2}\right)\diff \! x \\
&\qquad\qquad=-\int_\mathbf{T} \frac{\partial_{xx}h}{1+(\partial_xh)^2} \cdot
\frac{2\partial_{xx} h}{(1+(\partial_x h)^2)^2}\diff \! x.
\eqrefnd{align*}
This proves that
$$
\frac{\diff}{\dt} \int_\mathbf{T} (\partial_xh)\arctan (\partial_xh)\diff \! x=-2\int_\mathbf{T}\kappa^2\diff \! x \leq 0.
$$
So, in view of \eqref{n71}, we conclude that
$$
\frac{\diff}{\dt}t \int_{\mathbf{T}}h^2\diff \! x\ge 0.
$$
We thus have proved that
$$
\frac{\diff}{\dt} \int_{\mathbf{T}}h^2\diff \! x\leq 0\quad\text{and}\quad \frac{\diff}{\dt}t \int_{\mathbf{T}}h^2\diff \! x\ge 0.
$$
By definition, this means that $\int_{\mathbf{T}}h^2\diff \! x$ is a strong
Lyapunov functional for the mean-curvature equation.
\eqrefnd{proof}
The next proposition gives a somewhat surprising property of
the Boussinesq equation, which is directly inspired by the $\arctan$-formulation used above for the
mean-curvature equation.
\begin{equation}gin{proposition}\leqft\vertbel{prop:C2Boussinesq}
Consider the Boussinesq equation in space dimension $1$:
$$
\partial_th-\partial_x(h\partial_x h)=0.
$$
Then
$$
\frac{\diff}{\dt} \int_{\mathbf{T}} (\partial_x h) \arctan (\partial_x h)\diff \! x \leq 0.
$$
\eqrefnd{proposition}
\begin{equation}gin{proof}
As already seen in the previous proof,
$$
\partial_t \big((\partial_x h) \arctan (\partial_x h)\big)
=\partial_t\partial_x h \leqft(\arctan (\partial_x h)+\frac{\partial_x h}{1+(\partial_x h)^2}\right).
$$
Using the equation
$$
\partial_t\partial_x h = \partial_x^2(h\partial_x h),
$$
and then integrating by parts, we get
$$
\frac{\diff}{\dt} \int_{\mathbf{T}} (\partial_x h) \arctan (\partial_x h)\diff \! x
= - 2\int_{\mathbf{T}} \partial_x(h\partial_x h)
\frac{\partial_x^2 h}{(1+(\partial_x h)^2)^2}\diff \! x
= I,
$$
where $I$ reads
$$
I=- \int_{\mathbf{T}}\frac{2h(\partial_x^2 h)^2}{(1+(\partial_x h)^2)^2}\diff \! x
- \int_{\mathbf{T}}\frac{2(\partial_x h)^2 \partial_x^2 h}{(1+(\partial_x h)^2)^2}\diff \! x.
$$
Note that the second term vanishes since this is the integral of an exact derivative.
So,
$$
\frac{\diff}{\dt} \int_{\mathbf{T}} (\partial_x h) \arctan (\partial_x h)\diff \! x
+\int_{\mathbf{T}}\frac{2h(\partial_x^2 h)^2}{(1+(\partial_x h)^2)^2}\diff \! x
=0,
$$
which implies the wanted conclusion.
\eqrefnd{proof}
\sigmaection{A Rellich type estimate}\leqft\vertbel{A:Rellich}
This appendix gives a proof of the inequality~\eqref{d10}.
\begin{equation}gin{lemma}
For any smooth functions $h$ and $\zeta$ in $C^\infty(\mathbf{T}^d)$, there holds
\begin{equation}\leqft\vertbel{d10-bisb}
\int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x \leq
\int_{\mathbf{T}^d} (1+|\nabla h|^2)|\nabla \zeta-\mathcal{B} \nabla h|^2 \diff \! x,
\eqrefnd{equation}
where
\begin{equation}\leqft\vertbel{d11-bisb}
\mathcal{B}=\frac{G(h)\zeta+\nabla \zeta \cdot \nabla h}{1+|\nabla h|^2}.
\eqrefnd{equation}
\eqrefnd{lemma}
\begin{equation}gin{remark}
$i)$ This inequality extends to functions which are not smooth.
$ii)$ This generalizes an estimate proved in~\cite{A-stab-AnnalsPDE} when $d=1$,
for the Dirichlet-to-Neumann operator associated
to a domain with finite depth. When $d=1$, the main difference is that this is an identity (and not only an inequality).
This comes from the fact that, in the proof below, to derive \eqref{esti:final7} we use the inequality
$(\nabla h\cdot \mathcal{V})^2\leq |\nabla h|^2 \cdot |\mathcal{V}|^2$, which is clearly an equality when $d=1$.
\eqrefnd{remark}
\begin{equation}gin{proof}
We follow the analysis in~\cite{A-stab-AnnalsPDE}. Set
$$
\Omega=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{R}\,;y<h(x)\},
$$
and denote by $\phi$ the harmonic function defined by
\begin{equation}gin{equation}\leqft\vertbel{m1}
\leqft\{
\begin{equation}gin{aligned}
&\Delta_{x,y}\phi=0\quad\text{in }\Omega=\{(x,y)\in \mathbf{T}\times \mathbf{R} \,;\, y<h(x)\},\\
&\phi(x,h(x)) = \zeta(x).
\eqrefnd{aligned}
\right.
\eqrefnd{equation}
As recalled in Lemma~\ref{Lemma:decayinfty}, this
is a classical elliptic boundary problem, which admits a unique
smooth solution. Moreover, it satisfies
\begin{equation}\leqft\vertbel{decaytozero-appendix}
\lim_{y\to-\infty}\sigmaup_{x\in\mathbf{T}^{d}}\leqft\vert \nabla_{x,y}\phi(x,y)\right\vert=0.
\eqrefnd{equation}
Introduce the notations
$$
\mathcal{V}=(\nabla\phi)_{\arrowvert y=h}, \qquad \mathcal{B}=(\partial_y\phi)_{\arrowvert y=h}.
$$
(We parenthetically recall that $\nabla$ denotes the gradient with respect to
the horizontal variables
$x=(x_1,\ldots,x_d)$ only.)
It follows from the chain rule that
$$
\mathcal{V}=\nabla \zeta-\mathcal{B}\nabla h,
$$
while $\mathcal{B}$ is given by \eqref{d11-bisb}.
On the other hand, by definition of the Dirichlet-to-Neumann operator, one has
the identity
$$
G(h)\zeta=\big(\partial_y \phi-\nabla h\cdot \nabla \phi\big)_{\arrowvert y=h},
$$
so
$$
G(h)\zeta=\mathcal{B}-\nabla h\cdot \mathcal{V}.
$$
Squaring this identity yields
$$
(G(h)\zeta)^2
=\mathcal{B}^2-2 \mathcal{B}\nabla h \cdot \mathcal{V} +(\nabla h\cdot \mathcal{V})^2.
$$
Since $(\nabla h\cdot \mathcal{V})^2\leq |\nabla h|^2 \cdot |\mathcal{V}|^2$, this implies the inequality:
\begin{equation}\leqft\vertbel{esti:final7}
(G(h)\zeta)^2\leq \mathcal{B}^2-\leqft\vert\mathcal{V}\right\vert^2-2\mathcal{B}\nabla h\cdot \mathcal{V} +(1+|\nabla h|^2)\mathcal{V}^2.
\eqrefnd{equation}
Integrating this gives
$$
\int_{\mathbf{T}^d} (G(h)\zeta)^2\diff \! x \leq
\int_{\mathbf{T}^d} (1+|\nabla h|^2)\leqft\vert\mathcal{V}\right\vert^2 \diff \! x+R,
$$
where
$$
R=\int_{\mathbf{T}^d}B ig( \mathcal{B}^2-\leqft\vert\mathcal{V}\right\vert^2-2\mathcal{B}\nabla h\cdot \mathcal{V}B ig)\diff \! x.
$$
Since $\leqft\vert\mathcal{V}\right\vert=|\nabla \zeta-\mathcal{B} \nabla h|$,
we immediately see that, to obtain the wanted estimate~\eqref{d10-bisb}, it is sufficient to prove
that $R=0$. To do so, we begin by noticing that $R$ is the flux associated to a vector field. Indeed,
$$
R=\int_{\partial\Omega} X\cdot n\diff\mathcal{H}^{d}
$$
where
$X\colon \Omega\rightarrow \mathbf{R}^{d+1}$ is given by
$$
X=(-(\partial_y\phi)\nabla \phi;|\nabla \phi|^2-(\partial_y\phi)^2).
$$
Then the key observation is that this vector field satisfies
$\cn_{x,y} X=0$ since
$$
\partial_y \big( (\partial_y\phi)^2-|\nabla\phi|^2\big)
+2\cnx \big((\partial_y\phi)\nabla\phi\big)=
2(\partial_y\phi) \Delta_{x,y}\phi=0,
$$
as can be verified by an elementary computation.
Now, we see that the cancellation $R=0$ comes from the Stokes' theorem.
To rigorously justify this point, we
truncate $\Omega$ in order to work in a smooth bounded domain. Given a parameter $\begin{equation}ta>0$, set
$$
\Omega_\begin{equation}ta=\{(x,y)\in\mathbf{T}^{d}\times\mathbf{R}\,;-\begin{equation}ta<y<h(x)\}.
$$
An application of the divergence theorem in $\Omega_\begin{equation}ta$ gives that
$$
0=\iint_{\Omega_\begin{equation}ta} \cn_{x,y}X\diff \! ydx=R+\int_{\{y=-\begin{equation}ta\}}X\cdot (-e_y)\diff \! x,
$$
where $e_y$ is the vector $(0,\ldots,0,1)$ in $\mathbf{R}^{d+1}$.
Sending $\begin{equation}ta$ to $+\infty$ and remembering that $X$ converges to $0$ uniformly when $y$ goes to $-\infty$ (see~\eqref{decaytozero-appendix}), we obtain the expected result $R=0$.
This completes the proof.
\eqrefnd{proof}
\sigmaection{Darcy's law}\leqft\vertbel{appendix:HS}
In this appendix, we recall the derivation of the Hele-Shaw and Mullins-Sekerka equations.
These equations dictate the dynamics of the free surface of an incompressible
fluid evolving according to
Darcy's law. Consider a time-dependent fluid domain $\Omega$
of the form:
$$
\Omega(t)=\{ (x,y) \in \mathbf{T}^{d}\times \mathbf{R}\,;\, y < h(t,x)\}.
$$
The Darcy's law stipulates that the velocity
$v\colon \Omega\rightarrow \mathbf{R}^{d+1}$ and the pressure $P\colon\Omega\rightarrow \mathbf{R}$
satisfy the following equations:
$$
\cn_{x,y} v=0\quad\text{ and }\quad v=-\nabla_{x,y} (P+gy) \quad \text{in }\Omega,
$$
where $g>0$ is the acceleration of gravity.
In addition, one assumes that
$$
\lim_{y\to-\infty}v=0
$$
and that, on the free surface $\partial\Omega$,
the normal component of $v$
coincides with the normal component of the velocity of free surface, which
implies that
$$
\partial_t h=\sigmaqrt{1+|\nabla h|^2} \, v\cdot n\quad \text{on}\quad y=h,
$$
where $\nabla=\nabla_x$ and $n$ is the outward unit normal to $\partial\Omega$, given by
$$
n=\frac{1}{\sigmaqrt{1+|\nabla h|^2}} \begin{equation}gin{pmatrix} -\nabla h \\ 1 \eqrefnd{pmatrix}.
$$
The final equation states that
the restriction of the pressure to the free surface is proportional to the mean curvature:
$$
P=\mu \kappa \quad \text{on}\quad\partial\Omega,
$$
where the parameter $\mu$ belongs to $[0,1]$ and
$\kappa$ is given by~\eqref{defi:kappa}.
Now we notice that $\Delta_{x,y}(P+gy)=\cn_{x,y} v=0$ so $P+gy$ is
the harmonic extension of $gh+\mu\kappa$.
It
follows that the Hele-Shaw problem is equivalent to
\begin{equation}gin{equation*}
\partial_{t}h+G(h)(gh+\mu \kappa)=0.
\eqrefnd{equation*}
\begin{equation}gin{thebibliography}{10}
\bibitem{AcerbiFuscoJulinMorini2019}
Emilio Acerbi, Nicola Fusco, Vesa Julin, and Massimiliano Morini.
\newblock Nonlinear stability results for the modified {M}ullins-{S}ekerka and
the surface diffusion flow.
\newblock {\eqrefm J. Differential Geom.}, 113(1):1--53, 2019.
\bibitem{Aconvexity}
Thomas Alazard.
\newblock Convexity and the {H}ele-{S}haw equation.
\newblock {\eqrefm Water Waves}, to appear. Preprint arXiv:2003.02618.
\bibitem{A-stab-AnnalsPDE}
Thomas Alazard.
\newblock Stabilization of the water-wave equations with surface tension.
\newblock {\eqrefm Ann. PDE}, 3(2):Art. 17, 41, 2017.
\bibitem{A-Lazar}
Thomas Alazard and Omar Lazar.
\newblock Paralinearization of the {M}uskat equation and application to the
{C}auchy problem.
\newblock {\eqrefm Arch. Rational Mech. Anal.}, to appear. Preprint
arXiv:1907.02138.
\bibitem{AMS}
Thomas Alazard, Nicolas Meunier, and Didier Smets.
\newblock Lyapunov functions, identities and the {C}auchy problem for the
{H}ele-{S}haw equation.
\newblock {\eqrefm Comm. Math. Phys.}, to appear. Preprint arXiv:1907.03691.
\bibitem{Almgren-Physics-1996}
Robert Almgren.
\newblock Singularity formation in {H}ele-{S}haw bubbles.
\newblock {\eqrefm Phys. Fluids}, 8(2):344--352, 1996.
\bibitem{Antontsev-al-2004}
Stanislav Antontsev, Anvarbek Meirmanov, and Vadim Yurinsky.
\newblock Weak solutions for a well-posed {H}ele-{S}haw problem.
\newblock {\eqrefm Boll. Unione Mat. Ital. Sez. B Artic. Ric. Mat. (8)},
7(2):397--424, 2004.
\bibitem{Arnold-et-al-2004}
Anton Arnold, Jos{\'e}~Antonio Carrillo, Laurent Desvillettes, Jean Dolbeault,
Ansgar J\"{u}ngel, Claudia Lederman, Peter Markowich, Giuseppe Toscani, and
C{\'e}dric Villani.
\newblock Entropies and equilibria of many-particle systems: an essay on recent
research.
\newblock {\eqrefm Monatsh. Math.}, 142(1-2):35--43, 2004.
\bibitem{BakryEmmery-1985}
Dominique Bakry and Michel \'{E}mery.
\newblock Diffusions hypercontractives.
\newblock In {\eqrefm S\'{e}minaire de probabilit\'{e}s, {XIX}, 1983/84}, volume
1123 of {\eqrefm Lecture Notes in Math.}, pages 177--206. Springer, Berlin, 1985.
\bibitem{BerettaBDP-ARMA-1995}
Elena Beretta, Michiel Bertsch, and Roberta Dal~Passo.
\newblock Nonnegative solutions of a fourth-order nonlinear degenerate
parabolic equation.
\newblock {\eqrefm Arch. Rational Mech. Anal.}, 129(2):175--200, 1995.
\bibitem{Bernis-proc-1996}
Francisco Bernis.
\newblock Integral inequalities with applications to nonlinear degenerate
parabolic equations.
\newblock In {\eqrefm Nonlinear problems in applied mathematics}, pages 57--65.
SIAM, Philadelphia, PA, 1996.
\bibitem{Bernis-Friedman-JDE}
Francisco Bernis and Avner Friedman.
\newblock Higher order nonlinear degenerate parabolic equations.
\newblock {\eqrefm J. Differential Equations}, 83(1):179--206, 1990.
\bibitem{Bertozzi-NoticesAMS-1998}
Andrea~L. Bertozzi.
\newblock The mathematics of moving contact lines in thin liquid films.
\newblock {\eqrefm Notices Amer. Math. Soc.}, 45(6):689--697, 1998.
\bibitem{Bertozzi-et-al-1994}
Andrea~L. Bertozzi, Michael~P. Brenner, Todd~F. Dupont, and Leo~P. Kadanoff.
\newblock Singularities and similarities in interface flows.
\newblock In {\eqrefm Trends and perspectives in applied mathematics}, volume 100
of {\eqrefm Appl. Math. Sci.}, pages 155--208. Springer, New York, 1994.
\bibitem{Bertozzi-Pugh-1996}
Andrea~L. Bertozzi and Mary~C. Pugh.
\newblock The lubrication approximation for thin viscous films: regularity and
long-time behavior of weak solutions.
\newblock {\eqrefm Comm. Pure Appl. Math.}, 49(2):85--123, 1996.
\bibitem{BDPGG-ADE-1998}
Michiel Bertsch, Roberta Dal~Passo, Harald Garcke, and G\"{u}nther Gr\"{u}n.
\newblock The thin viscous flow equation in higher space dimensions.
\newblock {\eqrefm Adv. Differential Equations}, 3(3):417--440, 1998.
\bibitem{Bodineau-Lebowitz-Mouhot-Villani}
Thierry Bodineau, Joel Lebowitz, Cl\'{e}ment Mouhot, and C\'{e}dric Villani.
\newblock Lyapunov functionals for boundary-driven nonlinear drift-diffusion
equations.
\newblock {\eqrefm Nonlinearity}, 27(9):2111--2132, 2014.
\bibitem{Bolley-Gentil-JMPA-2010}
Fran\c{c}ois Bolley and Ivan Gentil.
\newblock Phi-entropy inequalities for diffusion semigroups.
\newblock {\eqrefm J. Math. Pures Appl. (9)}, 93(5):449--473, 2010.
\bibitem{Boussinesq-1904}
Joseph Boussinesq.
\newblock Recherches théoriques sur l'écoulement des nappes d'eau infiltrées
dans le sol et sur le débit des sources.
\newblock {\eqrefm Journal de Mathématiques Pures et Appliquées}, 10:5--78, 1904.
\bibitem{Bresch2018bd}
Didier Bresch, Mathieu Colin, Khawla Msheik, Pascal Noble, and Xi~Song.
\newblock B{D} entropy and {B}ernis-{F}riedman entropy.
\newblock {\eqrefm C.R. Acad Sciences Math\'ematiques}, 357:1--6, 2019.
\bibitem{BrVAYu19}
Didier Bresch, Alexis Vasseur, and Cheng Yu.
\newblock Global existence of entropy-weak solutions to the compressible
navier-stokes equations with nonlinear density dependent viscosities.
\newblock arXiv:1905.02701.
\bibitem{Carlen-Ulusoy-CMS}
Eric~A. Carlen and S{\"u}leyman Ulusoy.
\newblock An entropy dissipation-entropy estimate for a thin film type
equation.
\newblock {\eqrefm Commun. Math. Sci.}, 3(2):171--178, 2005.
\bibitem{Carillo-Jungel-Markovich-Toscani-Unterreiter}
Jos{\'e}~Antonio Carrillo, Ansgar J\"{u}ngel, Peter Markowich, Giuseppe
Toscani, and Andreas Unterreiter.
\newblock Entropy dissipation methods for degenerate parabolic problems and
generalized {S}obolev inequalities.
\newblock {\eqrefm Monatsh. Math.}, 133(1):1--82, 2001.
\bibitem{ChangLaraGuillenSchwab}
Hector~A. Chang-Lara, Nestor Guillen, and Russell~W. Schwab.
\newblock Some free boundary problems recast as nonlocal parabolic equations.
\newblock arXiv:1807.02714.
\bibitem{Chen-ARMA-1993}
Xinfu Chen.
\newblock The {H}ele-{S}haw problem and area-preserving curve-shortening
motions.
\newblock {\eqrefm Arch. Rational Mech. Anal.}, 123(2):117--151, 1993.
\bibitem{ChengCoutandShkoller-2014}
C.~H.~Arthur Cheng, Daniel Coutand, and Steve Shkoller.
\newblock Global existence and decay for solutions of the {H}ele-{S}haw flow
with injection.
\newblock {\eqrefm Interfaces Free Bound.}, 16(3):297--338, 2014.
\bibitem{Cheng-Belinchon-Shkoller-AdvMath}
Ching-Hsiao Cheng, Rafael Granero-Belinch\'{o}n, and Steve Shkoller.
\newblock Well-posedness of the {M}uskat problem with {$H^2$} initial data.
\newblock {\eqrefm Adv. Math.}, 286:32--104, 2016.
\bibitem{ChoiJerisonKim}
Sunhi Choi, David Jerison, and Inwon Kim.
\newblock Regularity for the one-phase {H}ele-{S}haw problem from a {L}ipschitz
initial surface.
\newblock {\eqrefm Amer. J. Math.}, 129(2):527--582, 2007.
\bibitem{ChugreevaOttoWestdickenberg2019}
Olga Chugreeva, Felix Otto, and Maria~G. Westdickenberg.
\newblock Relaxation to a planar interface in the {M}ullins-{S}ekerka problem.
\newblock {\eqrefm Interfaces Free Bound.}, 21(1):21--40, 2019.
\bibitem{CMWP-BAMS-2015}
Tobias~Holck Colding, William~P. Minicozzi, II, and Erik~Kj{\ae}r Pedersen.
\newblock Mean curvature flow.
\newblock {\eqrefm Bull. Amer. Math. Soc. (N.S.)}, 52(2):297--333, 2015.
\bibitem{Constantin1993droplet}
Peter Constantin, Todd~F. Dupont, Raymond~E. Goldstein, Leo~P. Kadanoff,
Michael~J. Shelley, and Su-Min Zhou.
\newblock Droplet breakup in a model of the hele-shaw cell.
\newblock {\eqrefm Physical Review E}, 47(6):4169, 1993.
\bibitem{ConstantinElgindi}
Peter Constantin, Tarek Elgindi, Huy Nguyen, and Vlad Vicol.
\newblock On singularity formation in a {H}ele-{S}haw model.
\newblock {\eqrefm Comm. Math. Phys.}, 363(1):139--171, 2018.
\bibitem{ConstantinVicol-GAFA2012}
Peter Constantin and Vlad Vicol.
\newblock Nonlinear maximum principles for dissipative linear nonlocal
operators and applications.
\newblock {\eqrefm Geom. Funct. Anal.}, 22(5):1289--1321, 2012.
\bibitem{CCG-Annals}
Antonio C{\'o}rdoba, Diego C{\'o}rdoba, and Francisco Gancedo.
\newblock Interface evolution: the {H}ele-{S}haw and {M}uskat problems.
\newblock {\eqrefm Ann. of Math. (2)}, 173(1):477--542, 2011.
\bibitem{DPGG-Siam-1998}
Roberta Dal~Passo, Harald Garcke, and G\"{u}nther Gr\"{u}n.
\newblock On a fourth-order degenerate parabolic equation: global entropy
estimates, existence, and qualitative behavior of solutions.
\newblock {\eqrefm SIAM J. Math. Anal.}, 29(2):321--342, 1998.
\bibitem{Dolbeault-Toscani-AIHPNL-2013}
Jean Dolbeault and Giuseppe Toscani.
\newblock Improved interpolation inequalities, relative entropy and fast
diffusion equations.
\newblock {\eqrefm Ann. Inst. H. Poincar\'{e} Anal. Non Lin\'{e}aire},
30(5):917--934, 2013.
\bibitem{Ecker-Regularity-Theory}
Klaus Ecker.
\newblock {\eqrefm Regularity theory for mean curvature flow}, volume~57 of {\eqrefm
Progress in Nonlinear Differential Equations and their Applications}.
\newblock Birkh\"{a}user Boston, Inc., Boston, MA, 2004.
\bibitem{Ecker-Huisken-Annals}
Klaus Ecker and Gerhard Huisken.
\newblock Mean curvature evolution of entire graphs.
\newblock {\eqrefm Ann. of Math. (2)}, 130(3):453--471, 1989.
\bibitem{Escher-Simonett-ADE-1997}
Joachim Escher and Gieri Simonett.
\newblock Classical solutions for {H}ele-{S}haw models with surface tension.
\newblock {\eqrefm Adv. Differential Equations}, 2(4):619--642, 1997.
\bibitem{Evans-2004}
Lawrence~C. Evans.
\newblock A survey of entropy methods for partial differential equations.
\newblock {\eqrefm Bull. Amer. Math. Soc. (N.S.)}, 41(4):409--438, 2004.
\bibitem{Evans-BAMS-2004}
Lawrence~C. Evans.
\newblock A survey of entropy methods for partial differential equations.
\newblock {\eqrefm Bull. Amer. Math. Soc. (N.S.)}, 41(4):409--438, 2004.
\bibitem{FlynnNguyen2020}
Patrick~T. Flynn and Huy~Q. Nguyen.
\newblock The vanishing surface tension limit of the {M}uskat problem.
\newblock arXiv:2001.10473, 2020.
\bibitem{GG-JPS-AdvMaths-2019}
Francisco Gancedo, Eduardo Garc\'{\i}a-Ju\'{a}rez, Neel Patel, and Robert~M.
Strain.
\newblock On the {M}uskat problem with viscosity jump: global in time results.
\newblock {\eqrefm Adv. Math.}, 345:552--597, 2019.
\bibitem{Giacomelli-Otto-CVPDE-2001}
Lorenzo Giacomelli and Felix Otto.
\newblock Variational formulation for the lubrication approximation of the
{H}ele-{S}haw flow.
\newblock {\eqrefm Calc. Var. Partial Differential Equations}, 13(3):377--403,
2001.
\bibitem{Grun-2001}
G{\"u}nther Gr\"{u}n.
\newblock On {B}ernis' interpolation inequalities in multiple space dimensions.
\newblock {\eqrefm Z. Anal. Anwendungen}, 20(4):987--998, 2001.
\bibitem{Gunther-Prokert-SIAM-2006}
Matthias G\"{u}nther and Georg Prokert.
\newblock On a {H}ele-{S}haw type domain evolution with convected surface
energy density: the third-order problem.
\newblock {\eqrefm SIAM J. Math. Anal.}, 38(4):1154--1185, 2006.
\bibitem{Hadzic-Shkoller-CPAM2015}
Mahir Had\v{z}i\'{c} and Steve Shkoller.
\newblock Global stability and decay for the classical {S}tefan problem.
\newblock {\eqrefm Comm. Pure Appl. Math.}, 68(5):689--757, 2015.
\bibitem{Huisken-JDE-1984}
Gerhard Huisken.
\newblock Flow by mean curvature of convex surfaces into spheres.
\newblock {\eqrefm J. Differential Geom.}, 20(1):237--266, 1984.
\bibitem{Jungel-book-entropy}
Ansgar J\"{u}ngel.
\newblock {\eqrefm Entropy methods for diffusive partial differential equations}.
\newblock SpringerBriefs in Mathematics. Springer, [Cham], 2016.
\bibitem{JungelMatthes-Nonlinearity-2006}
Ansgar J\"{u}ngel and Daniel Matthes.
\newblock An algorithmic construction of entropies in higher-order nonlinear
{PDE}s.
\newblock {\eqrefm Nonlinearity}, 19(3):633--659, 2006.
\bibitem{Kim-ARMA2003}
Inwon~C. Kim.
\newblock Uniqueness and existence results on the {H}ele-{S}haw and the
{S}tefan problems.
\newblock {\eqrefm Arch. Ration. Mech. Anal.}, 168(4):299--328, 2003.
\bibitem{Knupfer-Masmoudi-ARMA-2015}
Hans Kn\"{u}pfer and Nader Masmoudi.
\newblock Darcy's flow with prescribed contact angle: well-posedness and
lubrication approximation.
\newblock {\eqrefm Arch. Ration. Mech. Anal.}, 218(2):589--646, 2015.
\bibitem{Laugesen-CPAA}
Richard~S. Laugesen.
\newblock New dissipated energies for the thin fluid film equation.
\newblock {\eqrefm Commun. Pure Appl. Anal.}, 4(3):613--634, 2005.
\bibitem{Matioc-APDE-2019}
Bogdan-Vasile Matioc.
\newblock The {M}uskat problem in two dimensions: equivalence of formulations,
well-posedness, and regularity results.
\newblock {\eqrefm Anal. PDE}, 12(2):281--332, 2019.
\bibitem{NPausader}
Huy~Q. Nguyen and Beno{\^{\i}}t Pausader.
\newblock A paradifferential approach for well-posedness of the {M}uskat
problem.
\newblock arXiv:1907.03304.
\bibitem{Pruss-Simonett-book}
Jan Pr\"{u}ss and Gieri Simonett.
\newblock {\eqrefm Moving interfaces and quasilinear parabolic evolution
equations}, volume 105 of {\eqrefm Monographs in Mathematics}.
\newblock Birkh\"{a}user/Springer, [Cham], 2016.
\bibitem{Vazquez-PME-book}
Juan~Luis V\'{a}zquez.
\newblock {\eqrefm The porous medium equation}.
\newblock Oxford Mathematical Monographs. The Clarendon Press, Oxford
University Press, Oxford, 2007.
\newblock Mathematical theory.
\bibitem{Villani-Lecturenotes2008}
C.~Villani.
\newblock Entropy production and convergence to equilibrium.
\newblock In {\eqrefm Entropy methods for the {B}oltzmann equation}, volume 1916 of
{\eqrefm Lecture Notes in Math.}, pages 1--70. Springer, Berlin, 2008.
\bibitem{Villani-Oldandnew}
C\'{e}dric Villani.
\newblock {\eqrefm Optimal transport}, volume 338 of {\eqrefm Grundlehren der
Mathematischen Wissenschaften [Fundamental Principles of Mathematical
Sciences]}.
\newblock Springer-Verlag, Berlin, 2009.
\newblock Old and new.
\bibitem{ZhornitskayaBertozzi-2000}
Liya Zhornitskaya and Andrea~L. Bertozzi.
\newblock Positivity-preserving numerical schemes for lubrication-type
equations.
\newblock {\eqrefm SIAM J. Numer. Anal.}, 37(2):523--555, 2000.
\bibitem{Zugmeyer-arxiv2020}
Simon Zugmeyer.
\newblock Entropy flows and functional inequalities in convex sets.
\newblock arXiv:2001.02578.
\eqrefnd{thebibliography}
\begin{equation}gin{flushleft}
\textbf{Thomas Alazard}\\
Université Paris-Saclay, ENS Paris-Saclay, CNRS,\\
Centre Borelli UMR9010, avenue des Sciences, \\
F-91190 Gif-sur-Yvette\\
\textbf{Didier Bresch}\\
LAMA CNRS UMR5127, Univ. Savoie Mont-Blanc, \\
Batiment le Chablais, \\
F-73376 Le Bourget du Lac, France.
\eqrefnd{flushleft}
\eqrefnd{document} |
\begin{document}
\title{The Mystery of the Shape Parameter IV}
\author{Lin-Tian Luh\\Department of Mathematics, Providence University\\ Shalu Town, Taichung County\\ Taiwan\\Email: ltluh@pu.edu.tw}
\date{\today}
\maketitle
{\bf Abstract}. This is the fourth paper of our study of the shape parameter c contained in the famous multiquadrics $(-1)^{\lceil \beta\rceil}(c^{2}+\|x\|^{2})^{\beta},\ \beta>0$, and the inverse multiquadrics $(c^{2}+\|x\|^{2})^{\beta},\ \beta<0$. The theoretical ground is the same as that of \cite{Lu6}. However we extend the space of interpolated functions to a more general one. This leads to a totally different set of criteria of choosing c.\\
\\
{\bf keywords}: radial basis function, multiquadric, shape parameter, interpolation.
\section{Introduction}
Again, we are going to adopt the radial function
\begin{eqnarray}
h(x):=\Gamma(-\frac{\beta}{2})(c^{2}+|x|^{2})^{\frac{\beta}{2}},\ \beta\in R\backslash 2N_{\geq 0},\ c>0
\end{eqnarray}
, where $|x|$ is the Euclidean norm of $x$ in $R^{n},\ \Gamma$ is the classical gamma function, and $c,\beta$ are constants. This definition looks more complicated than the ones mentioned in the abstract. However it will simplify the Fourier transform of $h$ and our analysis of some useful results.
In order to make this paper more readable, we review some basic ingredients mentioned in the previous papers, at the cost of wasting a few pages.
For any interpolated function $f$, our interpolating function will be of the form
\begin{eqnarray}
s(x):=\sum_{i=1}^{N}c_{i}h(x-x_{i})+p(x)
\end{eqnarray}
where $p(x)\in P_{m-1}$, the space of polynomials of degree less than or equal to $m-1$ in $R^{n}, X=\{x_{1},\cdots,x_{N}\}$ is the set of centers(interpolation points). For $m=0,\ P_{m-1}:=\{0\}$. We require that $s(\cdot )$ interpolate $f(\cdot )$ at data points $(x_{1},f(x_{1})),\cdots,(x_{N},f(x_{N}))$. This results in a linear system of the form
\begin{eqnarray}
\sum_{i=1}^{N}c_{i}h(x_{j}-x_{i})+\sum_{i=1}^{Q}b_{i}p_{i}(x_{j})=f(x_{j}) & & ,j=1,\cdots,N \nonumber \\
\\
\sum_{i=1}^{N}c_{i}p_{j}(x_{i})=0 & & ,j=1,\cdots,Q \nonumber
\end{eqnarray}
to be solved, where $\{p_{1},\cdots,p_{Q}\}$ is a basis of $P_{m-1}$.
This linear system is solvable because $h(x)$ is conditionally positive definite(c.p.d.) of order $m=max\{ \lceil \frac{\beta}{2}\rceil , 0\}$ where $\lceil \frac{\beta}{2}\rceil \}$ denotes the smallest integer greater than or equal to $\frac{\beta}{2}$.
Besides the linear system, another important object is the function space. Each function of the form (1) induces a function space called {\bf native space} denoted by ${\cal C}_{h,m}(R^{n})$, abbreviated as ${\cal C}_{h,m}$, where $m$ denotes its order of conditional positive definiteness. For each member $f$ of ${\cal C}_{h,m}$ there is a seminorm $\|f\|_{h}$, called the $h$-norm of $f$. The definition and characterization of the native space can be found in \cite{Lu1}, \cite{Lu2}, \cite{Lu3-1}, \cite{MN1}, \cite{MN2} and \cite{We}. In this paper all interpolated functions belong to the native space.
Although our interpolated functions are defined in the entire $R^{n}$, interpolation will occur in a simplex. The definition of simplex can be found in \cite{Fl}. A 1-simplex is a line segment, a 2-simplex is a triangle, and a 3-simplex is a tetrahedron with four vertices.
Let $T_{n}$ be an n-simplex in $R^{n}$ and $v_{i},\ 1\leq i\leq n+1$ be its vertices. Then any point $x\in T_{n}$ can be written as convex combination of the vertices:
$$x=\sum_{i=1}^{n+1}c_{i}v_{i},\ \sum_{i=1}^{n+1}c_{i}=1,\ c_{i}\geq 0.$$
The numbers $c_{1},\cdots ,c_{n+1}$ are called the barycentric coordinates of $x$. For any n-simplex $T_{n}$, the {\bf evenly spaced points} of degree $l$ are those points whose barycentric coordinates are of the form
$$(\frac{k_{1}}{l},\frac{k_{2}}{l},\cdots,\frac{k_{n+1}}{l}),\ k_{i}\ nonnegative\ integers\ with\ \sum_{i=1}^{n+1}k_{i}=l.$$
It's easily seen that the number of evenly spaced points of degree $l$ in $T_{n}$ is exactly $$N=dimP_{l}^{n}=\left( \begin{array}{c}
n+l \\
n
\end{array} \right) $$
where $P_{l}^{n}$ denotes the space of polynomials of degree not exceeding $l$ in n variables. Moreover, such points form a determining set for $P_{l}^{n}$, as is shown in \cite{Bo}.
In this paper the evaluation argument $x$ will be a point in an n-simplex, and the set $X$ of centers will be the evenly spaced points in that n-simplex.
\section{Fundamental Theory}
Before introducing the main theorem, we need to define two constants.
\begin{de}
Let $n$ and $\beta$ be as in (1). The numbers $\rho$ and $\Delta_{0}$ are defined as follows.
\begin{list}
{(\alph{bean})}{\usecounter{bean} \setlength{\rightmargin}{\leftmargin}}
\item Suppose $\beta <n-3$. Let $s=\lceil \frac{n-\beta -3}{2}\rceil $. Then
\begin{list}{(\roman{milk})}{\usecounter{milk} \setlength{\rightmargin}{\leftmargin}}
\item if $\beta <0,\ \rho=\frac{3+s}{3}\ and\ \Delta_{0}=\frac{(2+s)(1+s)\cdots 3}{
\rho^{2}};$
\item if $\beta >0,\ \rho=1+\frac{s}{2\lceil \frac{\beta}{2}\rceil +3} \ and \ \Delta_{0}=\frac{(2m+2+s)(2m+1+s)\cdots (2m+3)}{\rho^{2m+2}}$ \\
where $ m=\lceil \frac{\beta}{2}\rceil$.
\end{list}
\item Suppose $n-3\leq \beta <n-1$. Then $\rho=1$ and $\Delta_{0}=1$.
\item Suppose $\beta \geq n-1$. Let $s=-\lceil \frac{n-\beta -3}{2}\rceil $. Then
$$\rho =1\ and \ \Delta_{0}=\frac{1}{(2m+2)(2m+1)\cdots (2m-s+3)} \ where \ m=\lceil \frac{\beta}{2}\rceil.$$
\end{list}
\end{de}
The following theorem is the cornerstone of our theory. We cite it directly from \cite{Lu3} with a slight modification to make it easier to understand.
\begin{thm}
Let $h$ be as in (1). For any positive number $b_{0}$, let $C=\max \left\{ \frac{2}{3b_{0}},8\rho\right\}$ and $\delta_{0}=\frac{1}{3C}$. For any n-simplex $Q$ of diameter $r$ satisfying $\frac{1}{3C}\leq r\leq \frac{2}{3C}$(note that $\frac{2}{3C}\leq b_{0}$), if $f\in {\cal C}_{h,m}$,
\begin{eqnarray}
|f(x)-s(x)|\leq 2^{\frac{n+\beta-7}{4}}\pi^{\frac{n-1}{4}}\sqrt{n\alpha_{n}}c^{\frac{\beta}{2}-l}\sqrt{\Delta_{0}}\sqrt{3C}\sqrt{\delta}(\lambda')^{\frac{1}{\delta}}\|f\|_{h}
\end{eqnarray}
holds for all $x\in Q$ and $0<\delta<\delta_{0}$, where $s(x)$ is defined as in (2) with $x_{1},\cdots ,x_{N}$ the evenly spaced points of degree $l$ in $Q$ satisfying $\frac{1}{3C\delta}\leq l\leq \frac{2}{3C\delta}$. The constant $\alpha_{n}$ denotes the volume of the unit ball in $R^{n}$, and $0<\lambda'<1$ is given by
$$\lambda'=\left(\frac{2}{3}\right)^{\frac{1}{3C}}$$
which only in some cases mildly depends on the dimension n.
\end{thm}
{\bf Remark}:(a)Note that the right-hand side of (4) approaches zero as $\delta\rightarrow 0^{+}$. This is the key to understanding Theorem2.2. The number $\delta$ is in spirit equivalent to the well-known fill-distance. Although the centers $x_{1},\cdots,x_{N}$ are not purely scattered, the shape of the simplex is controlled by us. Hence the distribution of the centers is practically quite flexible. (b)In (4) the shape parameter c plays a crucial role and greatly influences the error bound. This provides us with a theoretical ground of choosing the optimal c. However we need further work before presenting useful criteria.
In this paper all interpolated functions belong to a kind of space defined as follows.
\begin{de}
For any positive number $\sigma$,
$$E_{\sigma}:=\left\{ f\in L^{2}(R^{n}):\ \int |\hat{f}(\xi)|^{2}e^{\frac{|\xi|^{2}}{\sigma}}d\xi<\infty \right\}$$
where $\hat{f}$ denotes the Fourier transform of $f$. For each $f\in E_{\sigma}$, its norm is
$$\|f\|_{E_{\sigma}}:=\left\{ \int|\hat{f}(\xi)|^{2}e^{\frac{|\xi|^{2}}{\sigma}}d\xi\right\}^{1/2}$$.
\end{de}
The following lemma is cited from \cite{Lu5}.
\begin{lem}
Let $h$ be as in (1). For any $\sigma>0$, if $\beta<0$, $|n+\beta|\geq 1$ and $n+\beta+1\geq 0$, then $E_{\sigma}\subseteq {\cal C}_{h,m}(R^{n})$ and for any $f\in E_{\sigma}$, the seminorm $\|f\|_{h}$ of $f$ satisfies
$$\|f\|_{h}\leq 2^{-n-\frac{1+\beta}{4}}\pi^{-n-\frac{1}{4}}c^{\frac{1-n-\beta}{4}}\left\{ (\xi^{*})^{\frac{n+\beta+1}{2}}e^{c\xi^{*}-\frac{(\xi^{*})^{2}}{\sigma}}\right\}^{1/2}\|f\|_{E_{\sigma}}$$
where $$\xi^{*}:=\frac{c\sigma+\sqrt{c^{2}\sigma^{2}+4\sigma(n+\beta+1)}}{4}$$.
\end{lem}
\begin{cor}
Under the conditions of Theorem2.2, if $f\in E_{\sigma},\ \beta<0,\ |n+\beta|\geq 1$ and $n+\beta+1\geq 0$, (4) can be transformed into
\begin{eqnarray}
|f(x)-s(x)|\leq 2^{-\frac{3n}{4}-2}\pi^{-\frac{3}{4}n-\frac{1}{2}}\sqrt{n\alpha_{n}}\sqrt{\Delta_{0}}\sqrt{3C}c^{\frac{\beta-n+1-4l}{4}}\left\{(\xi^{*})^{\frac{n+\beta+1}{2}}e^{c\xi^{*}-\frac{(\xi^{*})^{2}}{\sigma}}\right\}^{1/2}\sqrt{\delta}(\lambda')^{\frac{1}{\delta}}\|f\|_{E_{\sigma}}
\end{eqnarray}
where $$\xi^{*}:=\frac{c\sigma+\sqrt{c^{2}\sigma^{2}+4\sigma(n+\beta+1)}}{4}$$.
\end{cor}
{\bf Proof}. This is an immediate result of Theorem2.2 and Lemma2.4. \hspace{5cm} $\sharp$\\
\\
Note that Corollary2.5 covers the very useful case $\beta=-1,\ n\geq 2$. However the case $\beta=-1,\ n=1$ is excluded. For this case we need a different approach.
\begin{lem}
Let $\sigma>0,\ \beta=-1$ and $n=1$. For any $f\in E_{\sigma}$,
$$\|f\|_{h}\leq 2^{-(n+\frac{1}{4})}\pi^{-1}\left\{ \frac{1}{ln2}+2\sqrt{3}M(c)\right\}^{1/2}\|f\|_{E_{\sigma}}$$
where $M(c):=e^{1-\frac{1}{c^{2}\sigma}}$ if $c\leq \frac{2}{\sqrt{3\sigma}}$ and $M(c):=g(\frac{c\sigma+\sqrt{c^{2}\sigma^{2}+4\sigma}}{4})$ if $c>\frac{2}{\sqrt{3\sigma}}$, where $g(\xi):=\sqrt{c\xi}e^{c\xi-\frac{\xi^{2}}{\sigma}}$.
\end{lem}
{\bf Proof}. This is just Theorem2.5 of \cite{Lu5}. \hspace{9cm} $\sharp$
\begin{cor}
Let $\sigma>0,\ \beta=-1$ and $n=1$. Under the conditions of Theorem2.2, if $f\in E_{\sigma}$, (4) can be transformed into
\begin{eqnarray}
|f(x)-s(x)|\leq 2^{\frac{\beta-3n}{4}-2}\pi^{\frac{n-5}{4}}\sqrt{n\alpha_{n}}\sqrt{\Delta_{0}}\sqrt{3C}c^{\frac{\beta}{2}-l}\left\{ \frac{1}{ln2}+2\sqrt{3}M(c)\right\}^{1/2}\sqrt{\delta}(\lambda')^{\frac{1}{\delta}}\|f\|_{E_{\sigma}}
\end{eqnarray}
where $M(c)$ is defined as in Lemma2.6.
\end{cor}
{\bf Proof}. This is an immediate result of Theorem2.2 and Lemma2.6. \hspace{4.8cm} $\sharp$\\
\\
Now we have dealt with the most useful cases for $\beta<0$. The next step is to treat $\beta>0$.
\begin{lem}
Let $\sigma>0,\ \beta>0$ and $n\geq 1$. For any $f\in E_{\sigma}$,
$$\|f\|_{h}\leq d_{0}c^{\frac{1-\beta-n}{4}}\left\{ \frac{(\xi^{*})^{\frac{1+\beta+n}{2}}e^{c\xi^{*}}}{e^{\frac{(\xi^{*})^{2}}{\sigma}}}\right\} ^{1/2}\|f\|_{E_{\sigma}}$$
where $\xi^{*}=\frac{c\sigma+\sqrt{c^{2}\sigma^{2}+4\sigma(1+\beta+n)}}{4}$ and $d_{0}$ is a constant depending on $n,\ \beta$ only.
\end{lem}
{\bf Proof}. This is just Theorem2.8 of \cite{Lu5}. \hspace{8.9cm} $\sharp$
\begin{cor}
Let $\sigma>0,\ \beta>0$ and $n\geq 1$. If $f\in E_{\sigma}$, (4) can be transformed into
\begin{eqnarray}
|f(x)-s(x)|\leq 2^{\frac{n+\beta-7}{4}}\pi^{\frac{n-1}{4}}\sqrt{n\alpha_{n}}\sqrt{\Delta_{0}}\sqrt{3C}d_{0}c^{\frac{1+\beta-n-4l}{4}}\left\{ \frac{(\xi^{*})^{\frac{1+\beta+n}{2}}e^{c\xi^{*}}}{e^{\frac{(\xi^{*})^{2}}{\sigma}}}\right\} ^{1/2}\sqrt{\delta}(\lambda')^{\frac{1}{\delta}}\|f\|_{E_{\sigma}}
\end{eqnarray}
where $d_{0},\ \xi^{*}$ are as in Lemma2.8.
\end{cor}
{\bf Proof}. This is an immediate result of Theorem2.2 and Lemma2.8. \hspace{4.5cm} $\sharp$
\section{Criteria of Choosing c}
Note that in (5),(6) and (7), there is a main function of c. As in \cite{Lu5}, let's call this function the MN function, denoted by $MN(c)$, and its graph the MN curve. The optimal choice of c is then the number minimizing $MN(c)$. However, unlike \cite{Lu5}, the range of c is the entire interval $(0,\infty)$, rather than a proper subset of $(0,\infty)$.
We now begin our criteria.\\
\\
{\bf Case1}. \fbox{$\beta<0,\ |n+\beta|\geq 1$ and $n+\beta+1\geq 0$} Let $f\in E_{\sigma}$ and $h$ be as in (1). Under the conditions of Theorem2.2, for any fixed $\delta$ satisfying $0<\delta<\delta_{0}$, the optimal value of c in $(0,\infty)$ is the number minimizing
$$MN(c):=c^{\frac{\beta-n+1-4l}{4}}\left\{(\xi^{*})^{\frac{n+\beta+1}{2}}e^{c\xi^{*}-\frac{(\xi^{*})^{2}}{\sigma}}\right\}^{1/2}$$
where $$\xi^{*}=\frac{c\sigma+\sqrt{c^{2}\sigma^{2}+4\sigma(n+\beta+1)}}{4}$$.\\
\\
{\bf Reason}: This is a direct consequence of (5). \hspace{8cm} $\sharp$\\
\\
{\bf Remark}:(a)It's easily seen that $MN(c)\rightarrow\infty$ as $c\rightarrow\infty$. Also, if $n+\beta+1>0,\ MN(c)\rightarrow\infty$ as $c\rightarrow 0^{+}$. (b)Case1 covers the frequently seen case $\beta=-1,\ n\geq 2$. (c)The number c minimizing $MN(c)$ can be easily found by Mathematica or Matlab.\\
\\
{\bf Numerical Results}:\\
\\
\begin{figure}
\caption{Here $n=2,\beta=-1,\sigma=1$ and $b_{0}
\end{figure}
\begin{figure}
\caption{Here $n=2,\beta=-1,\sigma=1$ and $b_{0}
\caption{Here $n=2,\beta=-1,\sigma=1$ and $b_{0}
\caption{Here $n=2,\beta=-1,\sigma=1$ and $b_{0}
\end{figure}
\begin{figure}
\caption{Here $n=2,\beta=-1,\sigma=1$ and $b_{0}
\end{figure}
{\bf Case2}. \fbox{$\beta=-1$ and $n=1$} Let $f\in E_{\sigma}$ and $h$ be as in (1). Under the conditions of Theorem2.2, for any fixed $\delta$ satisfying $0<\delta<\delta_{0}$, the optimal value of c in $(0,\infty)$ is the number minimizing
$$MN(c):=c^{\frac{\beta}{2}-l}\left\{\frac{1}{ln2}+2\sqrt{3}M(c)\right\}^{1/2}$$
where
$$M(c):=\left\{ \begin{array}{ll}
e^{1-\frac{1}{c^{2}\sigma}} & \mbox{if $0<c\leq \frac{2}{\sqrt{3\sigma}}$,} \\
g(\frac{c\sigma+\sqrt{c^{2}\sigma^{2}+4\sigma}}{4}) & \mbox{if $\frac{2}{\sqrt{3\sigma}}<c$}
\end{array} \right. $$, $g$ being defined by $g(\xi):=\sqrt{c\xi}e^{c\xi-\frac{\xi^{2}}{\sigma}}$.\\
\\
{\bf Reason}: This is a direct result of (6). \hspace{9cm} $\sharp$\\
\\
{\bf Remark}: Note that $MN(c)\rightarrow \infty$ both as $c\rightarrow \infty$ and $c\rightarrow 0^{+}$. Now let's see some numerical examples.
\begin{figure}
\caption{Here $n=1,\beta=-1,\sigma=1$ and $b_{0}
\end{figure}
\begin{figure}
\caption{Here $n=1,\beta=-1,\sigma=1$ and $b_{0}
\caption{Here $n=1,\beta=-1,\sigma=1$ and $b_{0}
\caption{Here $n=1,\beta=-1,\sigma=1$ and $b_{0}
\end{figure}
\begin{figure}
\caption{Here $n=1,\beta=-1,\sigma=1$ and $b_{0}
\end{figure}
{\bf Case3}. \fbox{$\beta>0$ and $n\geq 1$} Let $f\in E_{\sigma}$ and $h$ be as in (1). Under the conditions of Theorem2.2, for any fixed $\delta$ satisfying $0<\delta<\delta_{0}$, the optimal value of c in $(0,\infty)$ is the number minimizing
$$MN(c):=c^{\frac{1+\beta-n-4l}{4}}\left\{\frac{(\xi^{*})^{\frac{1+\beta+n}{2}}e^{c\xi^{*}}}{e^{\frac{(\xi^{*})^{2}}{\sigma}}}\right\}^{1/2}$$
, where
$$\xi^{*}=\frac{c\sigma+\sqrt{c^{2}\sigma^{2}+4\sigma(1+\beta+n)}}{4}$$.\\
\\
{\bf Reason}: This follows from (7). \hspace{10cm} $\sharp$\\
\\
{\bf Remark}: By observing that
$$c\xi^{*}-\frac{(\xi^{*})^{2}}{\sigma}=\frac{1}{16}\left[ 2c^{2}\sigma+2c\sqrt{c^{2}\sigma^{2}+4\sigma(n+\beta+1)}-(4n+\beta+1)\right]$$
, we can easily obtain useful results as follows. (a)If $1+\beta-n-4l>0$, $\lim_{c\rightarrow0^{+}}MN(c)=0$. (b)If $1+\beta-n-4l<0,\ \lim_{c\rightarrow 0^{+}}MN(c)=\infty$. (c)If $1+\beta-n-4l=0,\ \lim_{c\rightarrow 0^{+}}MN(c)$ is a finite positive number. (d)$\lim_{c\rightarrow \infty}MN(c)=\infty$.\\
\\
{\bf Numerical Results}: For simplicity, we offer results for $n=1$ only. In fact for $n\geq 1$ similar results can be presented without slight difficulty.
\begin{figure}
\caption{Here $n=1,\beta=1,\sigma=1$ and $b_{0}
\caption{Here $n=1,\beta=1,\sigma=1$ and $b_{0}
\caption{Here $n=1,\beta=1,\sigma=1$ and $b_{0}
\end{figure}
\begin{figure}
\caption{Here $n=1,\beta=1,\sigma=1$ and $b_{0}
\caption{Here $n=1,\beta=1,\sigma=1$ and $b_{0}
\end{figure}
\end{document} |
\begin{document}
\title{Discriminant and Singularities of Logarithmic Gauss Map, Examples and Application}
\begin{abstract}
The study of hypersurfaces in a torus leads to the beautiful
zoo of amoebas and their contours, whose possible configurations are seen from combinatorical data.
There is a deep connection to the logarithmic Gauss map and its critical points.
The theory has a lot of applications in many directions.
In this report we recall basic notions and results from the theory of amoebas,
show some connection to algebraic
singularity theory and discuss some consequences from the well known
classification of singularities to this subject.
Moreover, we have tried to compute some examples using the
computer algebra system {\sc Singular} and discuss different
possibilities and their effectivity to compute the critical points.
Here we meet an essential obstacle:
Relevant examples need real or even rational solutions,
which are found only by chance.
We have tried to unify different views to that subject.
\end{abstract}
\section{Toric hypersurface and Logarithmic Gauss map}
Let $V^*(f)$ be an algebraic hypersurface in the algebraic torus $\mathbb{T}^n$,
$\mathbb{T}:=\mathbb{C}^*$, i.e.
$$
V^*(f)=\{z\in\mathbb{T}^{n}\,| \, f(z)=0\},
$$
where $f(z)$ is the Laurent polynomial.
Recall that the \textit{Newton polyhedron}
${\mathcal N}_f \subset \mathbb{R}^n$ of $f$ is the convex hull in $\mathbb{R}^n$
of $A_f:=\textup{supp}(f)\subset \mathbb{Z}^n$.
Let $\mathbb{X}_\Sigma$ be the smooth toric variety associated to the fan $\Sigma$, which
is a refinement of the fan
dual to the Newton polyhedron ${\mathcal N}_f$.
We denote by $\overline{V}(f)\subset\mathbb{X}_\Sigma$
the closure of $V^*(f)$ in $\mathbb{X}_\Sigma$. The polynomial $f$ is called
\textit{non-singular for its Newton polyhedron}, if $V^*(f)$ is smooth
and for any face $\Delta\subset \mathcal{N}_f$
the truncation $f^{(\Delta)}$ of $f$ to the face $\Delta$ has
non-vanishing Jacobian at all $z\in\overline{V}(f)\cap \mathbb{X}_\Delta$:
$$ (z_1\partial f^{(\Delta)}/\partial z_1, \ldots ,
z_n\partial f^{(\Delta)}/\partial z_n)\neq 0.$$
In accordance with singularity resolution theorem, cf. \cite[page~291]{Ho},
a generic polynomial $f$ is non-singular for its Newton polyhedron and then $\overline{V}(f)$ is non-singular.
Next we introduce the so-called \textit{logarithmic Gauss map}
$\gamma_f:V^*(f)\to \mathbb{P}^{n-1}$.
Let $ {\mathbf t}^n$ denote the Lie algebra of ${\mathbb T}^n$,
which is identified with the tangent space of ${\mathbb T}^n$ at the unit point ${\bf e}$.
For any point $z\in V^*$ shift the tangent space $T_z(V^*)$ by the torus multiplication
(with $z^{-1}$) to a hyperplane $h_z\subset{\mathbf t}^n$, inducing a point
in the projective space of the dual $ {\mathbf t}^{n*}$, which we define to be $\gamma(z):=h_z^* \in
\mathbb{P}^{n-1}:=\mathbb{P}({\mathbf t}^{n*})$.
In coordinates of ${\mathbb T}^n$ the map $\gamma_f$ is given by
$$
\gamma_f(z)=\left(z_1 f_{z_1}:\ldots :z_n f_{z_n}\right)\in \mathbb{P}^{n-1}.
$$
Described in more geometric terms we have: Let $U\subset \mathbb{T}^n$
be a neighbourhood of a regular point $z$ on $V^*(f)$.
Choose a branch of the logarithmic map (restricted to $U$)
$ \log:U\to \mathbb{C}^n$, then the
direction of the normal line at $\log(z)$ to transformed hypersurface $\log(V^*(f)\cap U)$
has components $\left(z_1 f_{z_1},\ldots ,z_n f_{z_n}\right)$. This construction
does not depend on the choice of the branch of $\log$.
In \cite{Mi1}, 3.2, one can find the idea of a construction, how to extend $\gamma_f$
in the non-singular case to a finite map.
$$
\overline{\gamma}_f: \overline{V}(f)\to \mathbb{P}^{n-1}.
$$
Having a finite map $\gamma$ to a smooth variety, one can associate the {\em
ramification locus} or the {\em discriminant} as image of the critical locus:
$\mathcal{D}:=\gamma({\mathcal{C}_\gamma})$, which is usually a hypersurface.
An analytic structure which is compatible with base chance was introduced by Teissier, cf. \cite{Tes}:
The structure sheaf ${\mathcal O_D}$ is defined to be the quotient by the $0$-th
fitting ideal of $\gamma_*({\mathcal O_C})$.
In local coordinates the defining equation is obtained as the (classical)
discriminat of the polynomial, that generates the finite extension of the structure sheafs
over an open affine subsets.
From the well-known theorem of Kouchnirenko, cf. \cite[Th.~3]{Kou},
Mikhalkin obtains:
\begin{pro}[\cite{Mi1}]
\label{lemma:1}
If the polynomial $f$ is non-singular for its Newton polyhedron, then
the degree of $\overline{\gamma}_f$ is obtained as $$\deg(\overline{\gamma_f})=
n!\cdot \textup{Vol}({\mathcal N}_f).$$
\end{pro}
For later calculation we give a description of the logarithmic Gauss map
$\gamma_f$ in local coordinates.
Since $V^*(f)$ is smooth, we may assume w.l.o.g.
$f_{z_n}:=\partial f/\partial z_n\neq 0$ locally, then
a function $g(z')$, $z':=(z_1,\ldots ,z_{n-1})$, exists, such that $f(z',g(z'))\equiv 0$.
Hence, $g_{z_i}=-f_{z_i}(z',g)/f_{z_n}(z',g)$ and $(\log(g(z'))_{z_i}=g_{z_i}/g$ hold, and
one obtains the formula
\begin{equation}
\label{eq:a}
\gamma_f(z')=\left(-z_1\frac{\partial \log g(z')}{\partial z_1}:\,
\ldots\, :-z_{n-1}\frac{\partial \log g(z')}{\partial z_{n-1}}:1\right).
\end{equation}
Then the fiber $\gamma^{-1}_f(y),\ y=(y_1:\ldots:y_n)\in \mathbb{P}^{n-1}$ is given by the zeros of the
local complete intersection ideal generate by $f$ and the 2-minors of
$$\left(\begin{array}{ccc} z_1f_{z_1}& \ldots & z_nf_{z_n}\\ y_1 & \ldots & y_n\end{array}\right),$$
i.e. (in case of $y_n\neq 0$) $\gamma^{-1}(y)$ is defined by the complete intersection ideal
\begin{equation}
\label{eq:b}
I_y:=(f,h_1,\ldots h_{n-1}),
\end{equation}
where $h_i=y_n z_i f_{z_i}-y_i z_n f_{z_n}$. There are at most $n!\cdot\textup{Vol}({\mathcal N}_f)$
zeros in the torus by Proposition~1.
\section{Amoeba and its Contour versus Laurent series}
Consider a rational function $F(z)=h(z)/f(z)$ of $n$ complex variables and different
Laurent expansions
\begin{equation}
\label{eq:1}
\sum_{\alpha\in \mathbb{Z}^n} c_\alpha z^\alpha
\end{equation}
of $F$ centered at $z=0$. The most natural way to describe these expansions
uses the amoeba of polar hypersurface $V^*=V^*(f)$.
Recall, that the \textit{amoeba} $\mathcal{A}_{V^*}$ of a toric hypersurface
$V^*=V^*(f)$ is the image of $V^*$ by the logarithmic map
$\textup{Log}:\mathbb{T}^n\to\mathbb{R}^n$,
$$
\textup{Log}: (z_1,\ldots, z_n) \mapsto (\log|z_1|,\ldots, \log|z_n|).
$$
The complement $\mathbb{R}^n -\mathcal{A}_{V^*}$ to the amoeba consists
of a finite number of connected components~$E_i$,
which are open and convex, cf.~\cite[Section~6.1]{Gelfand}.
These components are characterized in the following Proposition,
which is a summary of
Propositions~2.5, 2.6 in \cite{FPT}, Theorem~10 and Corollary~6 in \cite[Section~I.5]{Ru}.
\begin{pro}
\label{thm:1}
There exists an open subset $U_\mathcal{N}$ in the set of polynomials with fixed Newton
polyhedron
$\mathcal{N}$ exists that satisfies the following property:
If $f\in U_\mathcal{N}$, then
there is a bijection
from the set of lattice points of $\mathcal{N} \cap\mathbb{Z}^n$ to the set of components of
$\mathbb{R}^n -\mathcal{A}_{V^*(f)}:\
\nu \mapsto E_\nu$,
such that the normal cone
$C^{\vee}(\nu)$ to $\mathcal{N}_f$
at the point $\nu$ is the recession cone of the component $E_\nu$.
\end{pro}
A recession cone is the maximal cone which can be put inside $E_\nu$ by a translation.
If $f\not\in U_\mathcal{N}$, the expected component $E_\nu$ may not exist for some non-vertice
lattice points $\nu\in\mathcal{N}$,
because the associated Laurent series below does not converge.
Given a component $E_\nu$ one obtains a Laurent
series of $F$ centered at $z=0$ using the
term $a_\nu z^\nu$ of~$f$ as denominator in a corresponding geometric progression
\begin{equation}
\label{eq:2}
\frac{1}{f}=\sum_{k=0}^\infty
\frac{\left(a_\nu z^\nu-f\right)^k}{(a_\nu z^\nu)^{k+1}}.
\end{equation}
The set $\{\textup{Log}^{-1}(E_\nu)\}$ contains the domain of convergence for
this Laurent series.
The support of expansion (\ref{eq:2}) is the minimal cone $K_\nu$, which after a translation
by $\nu$ contains the face $\Delta$ of
$\mathcal{N}_f$, which has $\nu$ as interior point.
A non-zero vector $q\in\mathbb{Z}^n\cap K_\nu$ defines a so-called
\textit{diagonal subsequence} $\{c_{k\cdot q}\}_{k\in\mathbb{N}}$ of the set of coefficients
of expansion (\ref{eq:1}). We will discuss its asymptotic in the next section.
The set of critical values of the map $\textup{Log}$ restricted to $V^*$ is called
the \textit{contour} $\mathcal{C}_{V^*}$ of the amoeba $\mathcal{A}_{V^*}$ (see \cite{PT}).
The contour is closely related to the logarithmic Gauss map $\gamma_f$.
Recall Lemma~3 from \cite{Mi1}.
\begin{lemma}
\label{lm:2}
The preimage of the real points under the logarithmic Gauss map is mapped by $\textup{Log}$
to the contour:
$$\mathcal{C}_{V^*}=\textup{Log}\left(\gamma_f^{-1}(\mathbb{P}^{n-1}_\mathbb{R})\right).$$
\end{lemma}
\begin{proof}
Let $z$ be a regular point on ${V^*}$ and $U$ its neighbourhood. Since the map
$\left.\textup{Log}\right|_{V^*}$ is a composition of $\log: z \mapsto (\log(z_1),\ldots ,\log(z_n))$
and the projection $\Re:\mathbb{C}^n\to\mathbb{R}^n$, the point $z$ is critical for
$\left.\textup{Log}\right|_{V^*}$ if the projection $d\Re: T_z \log\,(V^*\cap U)\to \mathbb{R}^n$
is not surjective at $z$.
A fiber $T_z \log\,({V^*}\cap U)$ of the tangent bundle of the image by $\log$ of the hypersurface
$V^*$ is the hyperplane $$\{ t\in \mathbb{C}^n: \left<\gamma_f(z),t\right>=0\}.$$
For real $\gamma_f(z)$ the projection $d\Re$ is not surjective.
If $\gamma_f(z)$ is not real one can
consider $\left<\gamma_f(z),t\right>=0$ as a system of linear equations
with fixed real part $\Re t,$ and solve it with respect to
$\Im t.$ Hence, $z$ is not critical for $\left.\textup{Log}\right|_{V^*}.$
\end{proof}
Therefore, the contour $\mathcal{C}_{V^*}$ can be computed as the $\textup{Log}$-image of
the zeros of the ideal
\begin{equation}
\label{eq:cv}
(f,q_n z_1 f_{z_1}-q_1 z_n f_{z_n},\ldots, q_n z_{n-1} f_{z_{n-1}}-q_{n-1} z_n f_{z_n}),
\end{equation}
where $q$ runs through all real points $(q_1:\ldots:q_n)\in \mathbb{P}_\mathbb{R}^{n-1}$, (here w.l.o.g $q_n\neq 0$).
\section{Singularities of Phase Function}
Consider the function
$$
\Phi:\mathbb{P}^{n-1}\times V^*\longrightarrow \mathbb{C},\
\Phi(y,z)=\left< y, \log z\right> .
$$
Introduce the \textit{phase function} $\varphi_q:=\Phi(q,-)$, later we show that it is indeed a phase function
of some oscilllating integral. Denote by $\textup{Crit}(\varphi_q)\subset V^*$
the set of critical (or stationary) points of function $\varphi_q$. It coincides with the preimage of the logarithmic Gauss map $\gamma_f$:
\begin{pro} \label{thm:3}
The relative critical locus of $\Phi$ coincides with the graph of $\gamma_f$:
$$ \textup{Crit}_{\mathbb{P}^{n-1}}(\Phi)=\Gamma_{\gamma_f}.
$$
\end{pro}
\begin{proof}
Assume $f_{z_n}\neq 0$, then we use local coordinates $z'$ on $V^*$, and consider the function $g(z')$ such that
$f(z',g(z'))\equiv 0$.
We obtain
$$
\partial\Phi(z,y)
/\partial z_i=\frac{y_i}{z_i}+\frac{y_n}{g(z')} \partial g(z')
/\partial {z_i},
\ i=1,\ldots, n-1.
$$
Up to a non-zero constant multiple the components of the gradient $\partial \Phi(z,y)/\partial z$
together with the defining polymonial $f(z)$ of $V^*$
give us the defining ideal (\ref{eq:cv}) of the fiber of $y$ by the logarithmetic Gauss map $\gamma$.
\end{proof}
The last statement shows us that the $\textup{Log}$-image of $\textup{Crit}(\varphi_q)$ is contained in
the contour $\mathcal{C}_{V^*}$ of the amoeba $\mathcal{A}_{V^*}$, and
the tangent hyperplane to $\mathcal{C}_{V^*}$ at a point $\textup{Log}(z_0)$, $z_0\in \textup{Crit}\,\varphi_q$,
has normal vector~$q\in\mathbb{Z}^n-\{0\}$.
Another consequence of the above formula concerns the connection between the
singularities in the fibers of the phase function and the fibers of the logarithmic Gauss
map:
\begin{pro}\label{thm:4}
Let $(z_0,y_0)\in \Gamma_{\gamma_f}$ be a point of the graph of $\gamma_f$,
then the Jacobian matrix of $\gamma_f$
at $z_0$ coincides with the Hesse matrix of $\varphi_{y_0}$ at $z_0$ up to
multiplication with a regular constant diagonal matrix $D$:
$$ \textup{Hess}(\varphi_{y_0})(z_0) =D \cdot \textup{Jac}(\gamma_f)(z_0).
$$
\end{pro}
\begin{proof}
As before we assume $f_{z_n}(z_0)\neq 0$ and use local coordinates $z'$.
From~(\ref{eq:a}) we obtain the entries of the Jacobian matrix $\textup{Jac}(\gamma_f)$ of the map $\gamma_f$
$$
\textup{Jac}(\gamma_f)_{(i,j)}=-\left( z_i \frac{\partial^2 \log (g(z'))}{\partial z_i\partial z_j}+
\delta_{ij}\frac{\partial\log(g(z'))}{\partial z_j}\right), i,j=1,\ldots, n-1,
$$
where $\delta_{ij}$ is the Kronecker symbol.
Moreover,
$$
\frac{\partial^2 \varphi_{y}}{\partial z_i\partial z_j}=y_{n}\frac{\partial^2 \log(g(z'))}
{\partial z_i \partial z_j}
-\delta_{ij}\frac{y_{i}}{z_i^2}
$$
holds for the second derivatives of $\varphi_y$.
Since
$\displaystyle \frac{y_{0,i}}{z_{0,i}}=-y_{0,n} \frac{\partial \log(g(z'_0)) }{\partial z_i}$ at
a critical point $z_0$ of $\varphi_{y_0}$, we obtain the statement by putting
the $i$-th entry of $D$ to be $\displaystyle d_i=-\frac{y_{0,n}}{z_i}$.
\end{proof}
We obtain as corollary of the last proposition that for directions $y=q$ outside the ramification locus of
the logarithmic Gauss map $\gamma_f$ the phase function $\varphi_q$ has only Morse critical points.
\begin{cor}
The logaritimic Gauss map $\gamma_f$ is umramified at $q\in \mathbb{Z}^n-\{0\}$ iff
the phase function $\varphi_q$ has only Morse
critical points, e.g. non-degenerated singularities.
\end{cor}
\begin{proof}
The map $\gamma_f$ is not ramified over $y$ iff its Jacobian has full rank at all points of the fiber
$\gamma^{-1}(y)$.
From Proposition~3 we get
$$
\det(\frac{\partial^2 \varphi_q}{\partial z_i\partial z_j}(z_0))
=\frac{q_n^{n-1}}{z_{0,1}\cdots z_{0,{n-1}}}\det(Jac(\gamma_f)(z_0)).
$$
Hence, the Jacobian determinant does not vanish iff the Hessian is not zero at corresponding points.
\end{proof}
Next we want to discuss degenerated critical points of the phase function.
By Mather-Yao type theorem the ${\mathcal R}$-class (right-equivalence)
of an analytic function $h(z)\in \mathbb{C}\{z\}=:{\mathcal O}_n$ at an isolated critical point $z_0=0$
is equivalent to the isomorphy type of the Milnor algebra $Q_h:={\mathcal O}_n/(\partial h/\partial z)$,
but as $\mathbb{C}[t]$-algebra, the action of $t$ on $Q_h$ induced by multiplication with $h$, cf. \cite{BM}.
The isomorphy type of the associated singularity $(V(h),z_0)$, i.e. the ${\mathcal K}$-class (contact-equivalence) of $h(z)$, is equivalent to the isomorphy class of the Tjurina algebra $T_h=Q_h/(h)$ itself, cf. \cite{Yau}.
Obviously, these equivalence classes coincide for quasi-homogeneous functions
(because $\mu(h)=\tau(h)$, $T_h=Q_h$, $hQ=0$).
The Milnor algebra of the phase function at $z_0$ coincides with the local algebra of the fiber
$\gamma_f^{-1}(\gamma_f(z_0))$ at $z_0$.
\begin{cor}
If $(z_0,y_0)\in \Gamma_{\gamma_f}$, denote by
$Q_\varphi$ the Milnor algebra of the function $(\varphi_{y_0}(z)-y_0)$ at $z_0$, then we have
$$Q_\varphi={\mathcal O}_{\gamma_f^{-1}(y_0),z_0} \ \ \mbox{and}\ \
Q_\varphi/Ann(\mathbf{m}_Q) = {\mathcal O}_{Sing(\gamma_f^{-1}(y_0)),z_0}.$$
\end{cor}
\begin{proof}
By Proposition 3
the germs coincide:
$(Crit(\varphi_{y_0}),z_0) = (\gamma_f^{-1}(y_0),z_0)$. The algebra of the critical
locus is the Milnor algebra of $(\varphi_{y_0}(z)-y_0)$. By Proposition 4
the Jacobi determinant of
$\gamma_f$ at $z_0$ equals up to a constant multiple to the Hessian of $\varphi_{y_0}$ at $z_0$,
which generates the
annulator of the maximal ideal in the local complete intersection algebra $Q_\varphi$.
\end{proof}
A function $h\in {\mathcal O}_n$ with isolated critical point is called {\em almost quasihomogeneous}, if $\mu=\tau+1$.
This is equivalent to $hQ_h=Ann({\bf m}_Q)$.
Assume that the singularities in a fiber of a phase function are quasihomogeneous or almost quasihomogeneous,
then in spite of Mather-Yao type theorems these singularities are determined by the fiber germs of the
logarithmic Gauss map, because $Q_\varphi=T_\varphi$ or $Q_\varphi/(Ann(\bf{m}))=T_\varphi$, respectively.
Note, that all simple or unimodal critical points belong to these singularities.
The singularities of a phase function on their part determine the
asymptotic of corresponding oscillating intergrals.
All degenerated critical points are lying over the singularities of the
discriminant ${\mathcal D}\subset
\mathbb{P}^{n-1}$ of $\gamma_f$. Many results could be found concerning the
connection between singularites of discrimant and singularities in the fiber.
We try to discuss some
consequnces with respect to our setting.
The finite map $\overline{\gamma}_f$ can be considered as family over $\mathbb{P}^{n-1}$
of complete
intersections (of relative dimension zero). Let $(X_0,0)$ be a germ of an
isolated complete intersection singularity, let
$X\rightarrow S$ its versal family with
discriminant $D\subset S$, the singularity of the discriminat $(D,0)$ determines
the special fiber
$(X_0,0)$ up to isomorphy by a result of Wirthmuller, c.f \cite{Wirt}. If $\dim (X_0)=0$
the multiplicity of the discriminant fulfills
$\textup{mult}(D,0)=\dim_\mathbb{C}({\mathcal O}_{X_0})-1=\dim_\mathbb{C}({\mathcal O}_{Sing(X_0)})$,
as a consequnce of \cite{Le}, for instance.
This is globalized straight forward.
\begin{pro}
Let $\gamma:X\rightarrow S$ be a finite morphism with discriminant $D\subset S$ and
each $X_s$ is a complete intersection, then holds:
$$\textup{mult}_s(D)\geq \sum_{z_i\in X_s}\textup{mult}(Sing(X_s),z_i)=\sum_{z_i\in X_s}(\textup{mult}(X_s,z_i)-1).$$
Moreover, equality holds at $s\in S$, if $\gamma$
induces a versal deformation
of $X_s$.
\end{pro}
\begin{proof}
The local branches of $D$ at $s$ are corresponding to the discriminants $D_i$ of the germs $(X,z_i)
\rightarrow (D,s)$, hence the multiplicities of $D_i$ add up to the multiplicity of $D$. Any
family is locally induced from a versal one, hence the discriminant is induced by base chance from
the discriminant of the versal family and its multiplicity cannot become smaller.
\end{proof}
Note, versality is an open property and corresponds to some kind of {\em stability} in the sence of Mather.
It is not clear for us, whether (or under which additional assumtions)
the logarithmic Gauss map $\gamma_f$ for a generic function $f(z)$ with fixed Newton polyhedron ${\mathcal N}$
has this stability property. It holds in all computed examples. But, an answer needs further investigation.
Inspecting the classification of hypersurface singularities we get the types of possible critical points
for small multiplicities of the discriminant, which are listed in the following Corollary.
\begin{cor}
Given a Laurent polynomial $f(z)$, non-singular with respect to its Newton polyhedron, and let $\gamma$ be the
corresponding logarithmic Gauss map with discriminant $D\subset \mathbb{P}^{n-1}$.
Let $m=m(q):=\textup{mult}(D,q)$, then the following configurations are met for the fiber $F_q:=\gamma_f^{-1}(q)$, respectively
for the collection of critical points of the phase function $\varphi_q(z)$:
\begin{itemize}
\item $m=1$: $F_q$ has exactly one point $z_*$ of multiplicity 2,
$\varphi_q$ has non-degenerated critical point and one
$A_2$-singularity at $z_*$.
\item $m=2$: $F_q$ either one point of multiplicity 3 or at most two points of multiplicity 2,
$\varphi_q$ has at most one $A_3$ or two $A_2$-points.
\item $m=3$: Besides $A_1$ can occur the following collections of critical points of $\varphi_q$:
one $D_4$ or one $A_4$ or a combination $k_2A_2+k_3A_3$ with
$k_2+2k_3\leq 3$.
\item $m\leq 6$: Type of critical set of $\varphi_q$: Only (simple) ADE-critical points can occur
$$\sum_{i\geq 1} k_iA_i+\sum_{i \geq 4} l_iD_i+\sum_{i=6}^8 n_iE_i,$$
such that
$$\sum i(k_i+l_i +n_i) \leq n!\,vol({\mathcal N})$$ and
$$\sum (i-1)(k_i+l_i+n_i)\leq m.$$
\item $m \leq 6$: all critical points are quasihomogeneous (and simple or unimodal).
\item $m \leq 14$: all critical points are almost quasihomogeneous (and simple or unimodal).
\end{itemize}
\end{cor}
The first critical point, which is not almost quasihomogeneous are the bimodal exceptional
singularities with smallest Milnor number $\mu=16$ of type $Q_{16}$ or $U_{16}$, cf.\cite{AVGZ}.
They can occur only at multiplicity $m\geq 15$.
\section{Representation of Diagonal Coefficient by Oscillating Integrals and its Phase Function}
In this section we return to Laurent series~(\ref{eq:1}) converging in $\Log^{-1}(E_\nu)$.
We explane the residue asymptotics formula for its diagonal
coefficient in the direction $q\in \mathbb{Z}^n\cap K_\nu$.
Recall, that the Laurent series coefficient can be represented in the form
$$
c_\alpha^\nu=\frac{1}{(2\pi\imath)^n}\int_{\Gamma_\nu}\,\frac{\omega}{z^{\alpha+\bf{1}}},
$$
where $\omega:=F(z)dz$ and the cycle $\Gamma_\nu$ is $n$-dimensional real torus
$\textup{Log}^{-1}(x_\nu), \ x_\nu\in E_\nu$.
The direction $q$ induces series of diagonal coefficients
\begin{equation}
\label{eq:2a}
c_{q\cdot k}^\nu=\frac{1}{(2\pi\imath)^n}\int_{\Gamma_\nu}\,\frac{\omega}{z^{q\cdot k+\bf{1}}}.
\end{equation}
We may assume that the point $x_\nu$ generates a line $L:=\mathbb{R}x_\nu\subset\mathbb{R}^n$,
which is transversal
to the boundary $\partial E_\nu$ and intersects it at a point $p$ and the normal vector at $p$
to $\partial E_\nu$ coincides with the vector~$q$.
In other words, $p$ is the $\Log$-image of points
$w^{(1)}(q),\ldots, w^{(N)}(q)$ from the fiber $\gamma^{-1}(q)$ of the logarithmic Gauss mapping.
The torus
$\Log^{-1}(p)\subset\Log^{-1}(L)$ intersects the hypersurface $V^*$
at most in $N\leq\textup{Vol}(\mathcal{N}_f)\cdot n!$ points.
Consider a heighbourhood $U_i$ in $\mathbb{C}^n$ of the point $w^{(i)}(q)$,
then $\Log^{-1}(L)$ intersects the hypersurface $V^*$ in $U_i$ along an
$(n-1)$-dimensional chain $h_i\subset V^*$.
It can be shown, cf.~\cite{Ts2} for the case $n=2$, that integral~(\ref{eq:2a})
is asymptotically equivalent for $k\to +\infty$ to the sum
\begin{equation}
\label{eq:4}
c_{q\cdot k}^{\nu}=
\frac{1}{(2\pi\imath)^{n-1}}\sum_{i=1}^{N}
\int_{h_i}\,\textup{res}\,\left(\frac{\omega}{z}\right)\cdot e^{-\left<q,\log z\right>\cdot k},
\end{equation}
where $\log z=(\log z_1,\ldots, \log z_n)$, $\textup{res}\, (\omega/z)$ is the residue form.
In local coordinates $z'=(z_1,\ldots, z_{n-1})$ of $V^*$ (assuming $f_{z_n}\neq 0$) we have
$\,\textup{res}\,(\frac{\omega}{z}) =\frac{g dz'}{z' \left. f_{z_n} \right|_{V^{*}}}$.
Therefore, the diagonal coefficient can be represented as the sum of oscillating integrals with the
{\em phase function} $\varphi_q(z')=\left.\left<q, \log z\right>\right|_{V^*}$.
The critical points of this phase function give the main contribution
to the asymptotic of such integrals. From Proposition~3 follows, that the support
of $h_i$ contains only one critical point of $\varphi_q,$ it is a point $w^{(i)}(q)\in \gamma^{-1}(q)$.
The asymptotics of an oscillating integral is most simple for Morse critical points.
In this case it is given by stationary phase method (also called saddle-point method, see~\cite{Wong}).
The Corollary~1 of Proposition~4 states, that for directions $y=q$ outside the ramification locus of
the logarithmic Gauss map $\gamma$ the phase function $\varphi_q$ has only Morse critical points.
The situation of a degenerated critical point is much more complicated.
First of all we are looking only for rational critical points!
By a result of Varchenko
some information about asymptotics of oscillating integral can be read from
the distance of the Newton diagramm of the phase function at the corresponding point
in case of a Newton non-degenerated phase function (and then it depends only of the
${\mathcal K}$-equivanence class of the hypersurface singularity). Otherwise, the distance is only a lower bound.
So called {\em adapted coordinates} exist always in dimension 2, such that the phase function is
Newton non-degenerated. Adapted coordinates can be computed algorithmically, for more details
cf. \cite{Va} and \cite{IM}.
\section{Discussion of examples}
\begin{example}
Consider the smooth hypersurface $V^*(f)$ defined as a zero set of the polynomial
$$f=z_1^2 z_2 +z_1 z_2^2 -z_1 z_2 +a,\ a\in\mathbb{R}, \ a\not=0,\frac{1}{27},$$
which is non-degenerated for its Newton polyhedron. The cubic $V^*(f)$ is
a two-dimensional real torus with three removed points.
The solutions $z(y)=(z_1(y), z_2(y))$ of
\begin{equation}
\label{eq:ex1}
\left\{
\begin{array}{r}
z_1^2 z_2 +z_1 z_2^2 -z_1 z_2 +a=0\\
h:=(2y_2 -y_1) z_1^2 z_2 +(y_2-2y_1) z_1z_2^2+(y_1-y_2)z_1 z_2=0\\
\end{array}
\right.
\end{equation}
for fixed parameter $(y_1:y_2)\in \mathbb{P}^1$ are zeroes of ideal~(\ref{eq:cv}) and for real
parameter $y$ they are
projected to the contour $\mathcal{C}_{V^*}$
by $\textup{Log}$-map. We are interested in the real ramification locus of $\gamma_f$.
We compute the resultant of $f,h$ with respect to the variable $z_2$
$$
\begin{array}{ll}
Res(f, h):= & (-y_1^2+y_1 y_2 +2y_2^2) z_1^3+(2y_1^2-2y_1y_2-y_2^2)z_1^2\\
\ & +(-y_1^2+y_1 y_2)z_1+ 4a y_1^2 -4a y_1 y_2+a y_2^2.
\end{array}
$$
The multiplicity of an isolated zero $z(y)$ of system~(\ref{eq:ex1}) coincides with the
multiplicity of the zero $z_1(y)$ in $Res(f,h)$. The discriminant
of the polynomial $Res(f,h)$ with respect to $z_1$
is the homogeneous polynomial
$$
\begin{array}{ll}
D(y_1, y_2) &=(1-27a)(-2y_1+y_2)^2 ( 4ay_1^6-12a y_1^5 y_2 +(-3a+1)y_1^4y_2^2- \\
\ & -2(1-13a)y_1^3 y_2^3 + (-3a+1) y_1^2 y_2^4 -12a y_1 y_2^5+ 4a y_2^6)
\end{array}
$$
in variables $y_1,y_2$.
Interested in roots of (\ref{eq:ex1}) in $\mathbb{T}^2$ we can omit the factor $(-2y_1+y_2)^2$ in the last expression.
Substituting in $D(y)$ an affine parameter $\lambda=y_1/y_2$ we get the polynomial
$$
D(\lambda)= 4a\lambda^6-12a \lambda^5 +(-3a+1)\lambda^4 -2
(1-13a)\lambda^3 + (-3a+1) \lambda^2 -12a\lambda+ 4a,
$$
whose real zeroes $\lambda_i$ give the points $(\lambda_i:1)\in \mathbb{P}^1_{\mathbb{R}}$
of the real ramification locus of $\gamma_f$.
We have three real intervals of the paramter line $\mathbb{R}_a$:
for $a<0$ the polynomial $D(\lambda)$ has
six real roots, for $0<a<\frac{1}{27}$ and $\frac{1}{27}<a$
the polynomial $D(\lambda)$
has no real roots.
Choosing values of $a$ from the different intervals of $\mathbb{R}_a$ we obtain different configurations
of the contour $\mathcal{C}_{V^*}$ and the amoeba $\mathcal{A}_{V^*}$. Because the volume
$2!\cdot\textup{vol}(\mathcal{N}_f)=3$
does not depend on $a$, all these configurations have a following common property:
the number of preimages $\textup{Log}^{-1}(p)$ of a point $p\in \mathcal{C}_{V^*}$
with normal vector $(y_1, y_2)\in \mathbb{R}^2$ is equal to three, provided we count such preimages, which
are solutions to~(\ref{eq:ex1}) for corresponding $(y_1:y_2)\in \mathbb{P}^1_{\mathbb{R}}$,
with their multiplicity in~(\ref{eq:ex1}). Hence, one can find for every $\lambda\in \mathbb{R}$
three points on $\mathcal{C}_{V^*}$ with the normal vector $(\lambda, 1)$. Moreover, each one lies on its own
colored or black part of the contour (see. Fig.~1).
On the left Fig.~1 six black points on $\mathcal{C}_{V^*}$ are images of pleat singularities of the mapping
$\left.\textup{Log}\right|_{V^*}$,
they correspond to values $\lambda_i$ that belong to the real ramification locus of $\gamma_f$.
Although, for $a>0$ the real ramification locus of $\gamma_f$ is empty, we can distinguish two situations.
If $0<a<1/27$ the hypersurface $V^*(f)$ is a complexification of the so-called Harnack curve
and the complement of its amoeba has the maximal number
of components. In this case $\left.\textup{Log} \right|_{V^*}$ has only fold singularities,
which coincide with $V^*(f)\cap \mathbb{R}^2$.
If $a>1/27$ the complement of the amoeba $\mathcal{A}_{V^*}$ has no bounded component, and the mapping
$\left.\textup{Log}\right|_{V^*}$ has three pleat singularities and other singularities are folds.
\begin{center}
\includegraphics{pic02.pdf}\hskip .25cm
\includegraphics{pic01.pdf}\hskip .25cm
\includegraphics{pic00.pdf}\\
{\small Fig.~1. The contour and the amoeba (shaded) for the polynomial $f=z_1^2 z_2 +z_1 z_2^2 -z_1 z_2 +a$:\\ on the left $a<0$, in the middle $0<a<1/27$,
on the right $a>1/27$.}
\end{center}
Therefore, for $a>0$ $\gamma_f$-fiber of any rational $\lambda$ contains only the Morse critical points of the phase function.
For example, set the parameter $a=3/100$ then the $\gamma_f$-fiber of $\lambda=1/3$ consists of the Morse points
$(3/10, 1/2)$, $(7/40+\sqrt{57}/40, 9/8-\sqrt{57}/8)$ and $(7/40-\sqrt{57}/40, 9/8 +\sqrt{57}/8)$.
For $a<0$ we can get degenerated rational points in a real ramification locus, e.g. there are six rational points
$-2, -1/2, 2/3, 3/2, 1/3,3$ in the real ramification locus of $\gamma_f,$ $a=-9/10.$ The $\gamma_f$-fiber of such points
has a simple point and an $A_2$-point of the phase function.
\end{example}
\begin{example}
We consider the polynomial $f$ in $n=3$ variables and non-degenerated for its Newton polyhedron,
$$
f=1+z_1+z_2+z_3+3z_1 z_2 +3 z_1 z_3 + 3 z_2 z_3+11 z_1 z_2 z_3.
$$
As in Example~1 the real ramification locus of $\gamma_f$ is determined by the
following system
\begin{equation}
\label{eq:ex2}
\left\{
\begin{array}{r}
1+z_1+z_2+z_3+3z_1 z_2 +3 z_1 z_3 + 3 z_2 z_3+11 z_1 z_2 z_3 =0,\\
y_3 z_1 - y_1 z_3 + 3y_3 z_1 z_2 +(3y_3-3 y_1)z_1 z_3 -3y_1 z_2 z_3 \hskip .75cm \\
+(11y_3 -11 y_1) z_1 z_2 z_3 =0,\\
y_3 z_2 - y_2 z_3 + 3y_3 z_1 z_2 -3y_2 z_1 z_3+(-3y_2+3y_3)z_2 z_3 \hskip .75cm \\
+(11y_3 -11 y_2) z_1 z_2 z_3 =0.\\
\end{array}
\right.
\end{equation}
With similar computations we obtain the discriminant $D(y)$ of the logarithmic Gauss map.
\begin{equation}
\label{eq:ex22}
D(y):=y_1^4\cdot(y_2-y_3)^2\cdot (4y_1+5y_2+5y_3)^2\cdot d(y),
\end{equation}
here $d(y)$ is a homogeneous polynomial of degree~$12$, it consists of 91 terms. Its Newton's
polyhedron is a triangle with vertices $(12,0,0)$, $(0,12,0)$ and $(0,0,12)$.
We do not consider zeroes of the first three factors in~(\ref{eq:ex22}), because
they do not give us multiple roots of~(\ref{eq:ex2}) in the torus.
The ramification locus of $\gamma_f$
is given by zero set of $d(y)$. Let $\lambda_1=y_1/y_3$, $\lambda_2=y_2/y_3 $
be coordinates in affine part of $\mathbb{P}^{2}_{\mathbb{R}}$, where $y_3\neq 1$.
Fig.~2 shows the zero set of $d(\lambda_1,\lambda_2,1)=d(y)/y_3^{12}$, which
coincides with the affine part of the real ramification locus of $\gamma_f$.
\begin{center}
\includegraphics{pic03.pdf}
\includegraphics{pic04.pdf}
{\small Fig.~2. The real ramification locus of $\gamma_f$.}
\end{center}
The red points $(1/9,1/9),\ (1/3,1/3),\ (1,3),\ (3,1),\ (1,9),\ (9,1)$ on Fig.~2 are
the degerated rational critical points
of the discriminant with Milnor number $\mu=2$.
This example of the polynomial $f$ is a special one,
because the existence of rational degenerated points in a real ramification locus is
not a generic property. But, we are interested in such points, because they
lead to degenerated critical points of a phase function.
In this example the $\gamma_f$-fiber of any $A_2$-point
contains excately one $A_3$-critical point of the phase function (see Appendix for details).
\end{example}
\mbox{}\\[1mm]
{\Large \bf Appendix: \\ Computation with {\sc Singular} (some experiences)}\\[1mm]
The computer algebra system {\sc Singular}, cf. \cite{sing}, was used for the computation of
examples. We tried several strategies for computing the discriminant of the Log-Gauss map with
different success, i.e. to get a result for non-trivial examples without
overflow and in reasonable time. Here we give a small introduction how proceed in {\sc Singular},
demonstrated with the equation of example 2.
Start with a base ring that contains the ideal $I$ of the graph of the log Gauss map $\gamma_f$ of a
polynomial $f=f(z)$ and compute $I$, (here $n=3$):
\begin{quote}
{\tt ring R=0,(y1,y2,y3,z1,z2,z3),dp; \\
poly f=1+z1+z2+z3+3*z1*z2 +3*z1*z3+3*z2*z3+11*z1*z2*z3;\\
matrix A[2][3] = z1*diff(f,z1),z2*diff(f,z2),z3*diff(f,z3),y1,y2,y3;\\
ideal I = f,minor(A,2);
}
\end{quote}
Next we project the graph restricted to some affine chart
$U_3:=\{y_3\not=0\}$ into $\mathbb{A}^3:=U_3\times \mathbb{A}^1$ ($\mathbb{A}^1_1$ - a
coordinate axes of $\mathbb{A}^3_z$). The image is a hypersurface defined by a
polynomial $h(y_1,y_2,z_1)$, which we could closure in $\mathbb{P}^2_y$
by homogenizing in the $y's$.
Using the elimination of variable,
the multiplicities of multiple factors may be lost, but it does not effect
the result.
\\
\begin{quote}
{\tt
I = subst(I,y3,1);
ideal J = eliminate(I,z2*z3);\\
poly h1 = J[1];
}
\end{quote}
The choice of the projection direction was good, if $deg_{z1}(h_1)=n!\,vol({\mathcal N})=6$.
The discriminant variety of $\gamma_f$ is contained in the discriminant hypersurface
of the projection $V(h_1)\subset U_3\times \mathbb{A}^1_1\longrightarrow U_3$,
computed in the next step.
\\
\begin{quote}
{\tt
poly d1 = resultant(h1,diff(h1,z1),z1);\\
d1 = homog(d1,y3);\\
list Ld = factorize(d1);
}
\end{quote}
The plane curve $V(d_1)\subset \mathbb{P}^2$ has several components, it may have components
with certain multiplicities, some of them induced from the closure $V^*(f)$ or
not belonging to the discriminant. If our polynomial is generic, then we expect the discriminant
of $\gamma_f$ (i.e. restricted to the torus) being irreducible. We should test which factor is correct.
Some components of $V(d_1)$ have empty fiber with respect to $\gamma_f$ or no multiple points in its
$\gamma_f$-fibers.
We can reduce sometimes the number of factor as follows: Compute for any coordinate $z_i$ (as above for $i=1$)
polynomials $h_i$ and $d_i$ and factorize only $d:=gcd(d_1,\ldots ,d_n)$.
\\
Having found the equation of
the discriminant polynomial $d_0(y)$, we can compute its (discrete) singular locus.
\\
\begin{quote}
{\tt
poly d0 = Ld[1][2];\hspace{2cm}\mbox{\it (choose the right factor in this example)}\\
d0 = subst(d0,y3,1); \\
ring S = 0,(y1,y2,y3),dp;\\
poly d0 = imap(R,d0);\\
ideal sl = slocus(d0);\\
list Lsl = primdecGTZ(sl);
}
\end{quote}
Here, the singular locus has six rational double points $Q_1=(1,3)$,
$Q_2=(\frac{1}{9},\frac{1}{9})$, $Q_3=(1,9)$, $Q_4=(\frac{1}{3},\frac{1}{3})$,
$Q_5=(9,1)$, $Q_6=(3,1)$
and more irrational singular points.
We choose $Q_1$ and check, that it is an $A_2$-singularity of $D$.
\\
\begin{quote}
{\tt
show(Lsl[2]);\hspace{26mm}\mbox{\it (choose one of the singular points of $D$)}\\
ring S' = 0,(y1,y2),ds;\\
poly d0 = imap(R,d0);\\
d0 = subst(d0,y1,y1+1); \hspace{7mm}\mbox{\it (translate that singularity to zero)}\\
d0 = subst(d0,y2,y2+3);\\
"mu =",milnor(d0);\hspace{2cm}\mbox{\it (Milnor number of the singularity)}\\
}
\end{quote}
Compute the $\gamma_f$-fiber of $Q_1$. Its has 3 simple points and exactly one point
$P_*=(-1,-\frac{1}{3},-1)$ of
multiplicity 3, being an $A_3$-point of the phase function.
\\
\begin{quote}
{\tt
setring R;\\
I = subst(I,y1,1); I = subst(I,y2,3);\\
ring R0 = 0,(z1,z2,z3),dp;\\
ideal I = imap(R,I);\\
list Lfib = primdecGTZ(i);\hspace{2cm}\mbox{\it (list contains the points of the fiber)}.\\
option(redSB);\\
show(std(Lfib[1][2]));\\
"mult =",vdim(std(Lfib[1][1]));
}
\end{quote}
Similar computations lead to similar results at the other 5 rational singularities of
the discriminant.
\end{document} |
\begin{document}
\begin{abstract}
We propose the notion of GAS numerical semigroup which generalizes both almost symmetric and 2-AGL numerical semigroups.
Moreover, we introduce the concept of almost canonical ideal which generalizes the notion of canonical ideal in the same way almost symmetric numerical semigroups generalize symmetric ones. We prove that a numerical semigroup with maximal ideal $M$ and multiplicity $e$ is GAS if and only if $M-e$ is an almost canonical ideal of $M-M$. This generalizes a result of Barucci about almost symmetric semigroups and a theorem of Chau, Goto, Kumashiro, and Matsuoka about 2-AGL semigroups. We also study the transfer of the GAS property from a numerical semigroup to its gluing, numerical duplication and dilatation.
\end{abstract}
\keywords{Almost symmetric numerical semigroup, almost Gorenstein ring, 2-AGL semigroup, 2-AGL ring, canonical ideal.}
\mathfrak maketitle
\section*{Introduction}
The notion of Gorenstein ring turned out to have great importance in commutative algebra, algebraic geometry and other mathematics areas and in the last decades many researchers have developed generalizations of this concept obtaining rings with similar properties in certain respects. With this aim, in 1997 Barucci and Fr\"oberg \cite{BF} introduced the notion of almost Gorenstein ring, inspired by numerical semigroup theory. We recall that a numerical semigroup $S$ is simply an additive submonoid of the set of the natural numbers $\mathfrak mathbb{N}$ with finite complement in $\mathfrak mathbb{N}$. The simplest way to relate it to ring theory is by associating with $S$ the ring $k[[S]]=k[[t^s \mathfrak mid s \in S]]$, where $k$ is a field and $t$ is an indeterminate. Actually it is possible to associate a numerical semigroup $v(R)$ with every one-dimensional analytically irreducible ring $R$. In this case a celebrated result of Kunz \cite{K} ensures that $R$ is Gorenstein if and only if $v(R)$ is a symmetric semigroup, see also \cite[Theorem 4.4.8]{BH} for a proof in the particular case of $k[[S]]$.
In \cite{BF} the notions of almost symmetric numerical semigroup and almost Gorenstein ring are introduced, where the latter is limited to analytically unramified rings. It turns out that $k[[S]]$ is almost Gorenstein if and only if $S$ is almost symmetric.
More recently this notion has been generalized in the case of one-dimensional local ring \cite{GMP} and in higher dimension \cite{GTT}. Moreover, in \cite{CGKM} it is introduced the notion of $n$-AGL ring in order to stratify the Cohen-Macaulay rings. Indeed a ring is almost Gorenstein if and only if it is either $1$-AGL or $0$-AGL, with $0$-AGL equivalent to be Gorenstein. In this respect $2$-AGL rings are near to be almost Gorenstein and for this reason their properties have been deepened in \cite{CGKM,GIT}. In \cite{CGKM} it is also studied the numerical semigroup case, where $2$-AGL numerical semigroups are close to be almost symmetric.
In this paper we introduce the class of {\em Generalized Almost Symmetric numerical semigroups}, briefly GAS numerical semigroups, that includes symmetric, almost symmetric and 2-AGL numerical semigroups, but not 3-AGL. Moreover, if $S$ has maximal embedding dimension and it is GAS, then it is either almost symmetric or 2-AGL.
Our original motivation to introduce this class is a result on 2-AGL numerical semigroups that partially generalize a property of almost symmetric semigroups. More precisely, let $S$ be a numerical semigroup with multiplicity $e$ and let $M$ be its maximal ideal. In \cite[Corollary 8]{BF} it is proved that $M-M$ is symmetric if and only if $S$ is almost symmetric with maximal embedding dimension. If we do not assume that $S$ has maximal embedding dimension, it holds that $S$ is almost symmetric if and only if $M-e$ is a canonical ideal of $M-M$ (indeed $S$ has maximal embedding dimension exactly when $M-e=M-M$, see \cite[Theorem 5.2]{B}). In \cite[Corollary 5.4]{CGKM} it is shown that $S$ is 2-AGL if and only if $M-M$ is almost symmetric and not symmetric, provided that $S$ has maximal embedding dimension.
Hence, it is natural to investigate what happens to $M-M$, for a 2-AGL semigroup, if we do not make any assumptions on its embedding dimension. It turns out that $M-e$ is an ideal of $M-M$ that satisfies some equivalent conditions, that are the analogue for ideals to the defining conditions of almost symmetric semigroup (cf. Definition 2.1 and Proposition \ref{almost canonical ideal});
for this reason we called the ideals in this class \emph{almost canonical ideals}. However the converse is not true:
there exist numerical semigroups $S$ such that $M-e$ is an almost canonical ideal of $M-M$, but that are not 2-AGL.
This fact lead us to look for those numerical semigroup satisfying this property, and we found that these
semigroups naturally generalize 2-AGL semigroups (this is evident if we look at $2K\setminus K$, where $K$ is the canonical ideal of $S$, cf. Proposition 3.1 and Definition 3.2); moreover, as we said above this class coincides with the union of 2-AGL and almost symmetric semigroups, if we assume maximal embedding dimension; hence we called them Generalized Almost Symmetric (briefly GAS). It turns out that GAS semigroups are interesting under many aspects; for example, if $S$ is GAS, it is possible to control both the semigroup generated by its canonical ideal (that plays a fundamental role in \cite{CGKM}; cf. Theorem \ref{Livelli più alti}) and its pseudo-Frobenius numbers (cf. Proposition \ref{PF GAS}).
Hence, in this paper, after recalling the basic definitions and notations, we introduce, in Section 2, the concept of almost canonical ideal.
We show under which respect they are a generalization of canonical ideals and we notice that, similarly to the canonical case, a numerical semigroup $S$ is almost symmetric if and only if it is an almost canonical ideal of itself. Moreover, we prove several equivalent conditions for a semigroup ideal to be almost canonical (cf. Proposition \ref{almost canonical ideal}) and we show how to find all the almost canonical ideals of a numerical semigroup and to count them (Corollary \ref{Number of almost canonical ideals}).
In Section 3 we develop the theory of GAS semigroups proving many equivalent conditions (see Proposition \ref{Characterizations GAS}), exploring their properties (cf. Theorem \ref{Livelli più alti} and Proposition \ref{PF GAS}) and relating them with other classes of numerical semigroups that have been recently introduced to generalize almost symmetric semigroups. The main result is Theorem \ref{T. Almost Canonical ideal of M-M}, where it is proved that $S$ is GAS if and only if $M-e$ is an almost canonical ideal of $M-M$.
Finally in Section 4 we study the transfer of the GAS property from $S$ to some numerical semigroup constructions: gluing in Theorem \ref{gluing}, numerical duplication in Theorem \ref{Numerical duplication S-<K>} and dilatation in Proposition \ref{dilatation}.
Several computations are performed by using the GAP system \cite{GAP} and, in particular, the NumericalSgps package \cite{DGM}.
\section{Notation and basic definitions}
A numerical semigroup $S$ is a submonoid of the natural numbers $\mathfrak mathbb{N}$ such that $|\mathfrak mathbb{N} \setminus S| < \infty$. Therefore, there exists the maximum of $\mathfrak mathbb{N} \setminus S$ that is said to be the Frobenius number of $S$ and it is denoted by $\F(S)$. Given $s_1, \dots, s_{\mathfrak nu} \in \mathfrak mathbb{N}$ we set $\langle s_1, \dots, s_{\mathfrak nu} \rangle=\{\lambda_1 s_1 + \dots + \lambda_{\mathfrak nu} s_{\mathfrak nu} \mathfrak mid \lambda_1, \dots, \lambda_{\mathfrak nu} \in \mathfrak mathbb{N} \}$ which is a numerical semigroup if and only if $\gcd(s_1, \dots, s_{\mathfrak nu})=1$. We say that $s_1, \dots, s_{\mathfrak nu}$ are minimal generators of $\langle s_1, \dots, s_{\mathfrak nu} \rangle$ if it is not possible to delete one of them obtaining the same semigroup. It is well-known that a numerical semigroup have a unique system of minimal generators, which is finite, and its cardinality is called embedding dimension of $S$. The minimum non-zero element of $S$ is said to be the multiplicity of $S$ and we denote it by $e$. It is always greater than or equal to the embedding dimension of $S$ and we say that $S$ has maximal embedding dimension if they are equal.
Unless otherwise specified, we assume that $S \mathfrak neq \mathfrak mathbb{N}$.
A set $I \subseteq \mathfrak mathbb{Z}$ is said to be a relative ideal of $S$ if $I+S\subseteq I$ and there exists $z \in S$ such that $z+I \subseteq S$. If it is possible to chose $z=0$, i.e. $I \subseteq S$, we simply say that $I$ is an ideal of $S$. Two very important relative ideals are $M(S)=S\setminus \{0\}$, which is an ideal and it is called the maximal ideal of $S$, and $K(S)=\{x \in \mathfrak mathbb{N} \mathfrak mid \F(S)-x \mathfrak notin S\}$. We refer to the latter as the standard canonical ideal of $S$ and we say that a relative ideal $I$ of $S$ is canonical if $I=x+K(S)$ for some $x \in \mathfrak mathbb{Z}$. If the semigroup is clear from the context, we write $M$ and $K$ in place of $M(S)$ and $K(S)$.
Given two relative ideals $I$ and $J$ of $S$, we set $I-J = \{x \in \mathfrak mathbb{Z} \mathfrak mid x+J \subseteq I\}$ which is a relative ideal of $S$. For every relative ideal $I$ it holds that $K-(K-I)=I$, in particular $K-(K-S)=S$. Moreover, an element $x$ is in $I$ if and only if $\F(S)-x \mathfrak notin K-I$, see \cite[Hilfssatz 5]{J}. As a consequence we get that the cardinalities of $I$ and $K-I$ are equal.
Also, if $I \subseteq J$ are two relative ideals, then $|J \setminus I|=|(K-I)\setminus (K-J)|$.
We now collect some important definitions that we are going to generalize in the next section.
\begin{definition} \label{Basic definitions} \rm Let $S$ be a numerical semigroup.
\begin{enumerate}
\item The {\it pseudo-Frobenius numbers} of $S$ are the elements of the set $\PF(S)=(S-M)\setminus S$.
\item The {\it type} of $S$ is $t(S)=|\PF(S)|$.
\item $S$ is {\it symmetric} if and only if $S=K$.
\item $S$ is {\it almost symmetric} if and only if $S-M=K \cup \{\F(S)\}$.
\end{enumerate}
\end{definition}
We note that $M-M=S \cup \PF(S)$. Given $0 \leq i \leq e-1$, let $\omega_i$ be the smallest element of $S$ that is congruent to $i$ modulo $e$.
A fundamental tool in numerical semigroup theory is the so-called Ap\'ery set of $S$ that is defined as $\Ap(S)=\{\omega_0=0, \omega_1, \dots, \omega_{e-1}\}$.
In $\Ap(S)$ we define the partial ordering $x \leq_S y$ if and only if $y= x+s$ for some $s \in S$ and we denote the maximal elements of $\Ap(S)$ with respect to $\leq_S$ by ${\rm Max}_{\leq_S}(\Ap(S))$. With this notation $\PF(S)=\{\omega -e \mathfrak mid \omega \in {\rm Max}_{\leq S}(\Ap(S)) \}$, see \cite[Proposition 2.20]{RG}.
We also recall that $S$ is symmetric if and only if $t(S)=1$, that is also equivalent to say that $k[[S]]$ has type $1$ for every field $k$, i.e. $k[[S]]$ is Gorenstein. Also for almost symmetric semigroups many useful characterizations are known, for instance it is easy to see that our definition is equivalent to $M+K \subseteq M$, but see also \cite[Theorem 2.4]{N}
for another useful characterization related to the Ap\'ery set of $S$ and its pseudo-Frobenius numbers.
\section{Almost canonical ideals of a numerical semigroup}
If $I$ is a relative ideal of $S$, the set $\mathfrak mathbb{Z}\setminus I$ has a maximum that we denote by $\F(I)$. We set $\widetilde{I}=I+(\F(S)-\F(I))$, that is the unique relative ideal $J$ isomorphic to $I$ for which $\F(S)=\F(J)$, and we note that $\widetilde{I} \subseteq K \subseteq \mathfrak mathbb{N}$ for every $I$.
The following is a generalization of Definition \ref{Basic definitions}.
\begin{definition} \rm
Let $I$ be a relative ideal of a numerical semigroup $S$.
\begin{enumerate}
\item The {\it pseudo-Frobenius numbers} of $I$ are the elements of the set $\PF(I)=(I-M)\setminus I$.
\item The {\it type} of $I$ is $t(I)=|\PF(I)|$.
\item $I$ is {\it canonical} if and only if $\widetilde{I}=K$.
\item $I$ is {\it almost canonical} if and only if $\widetilde{I}-M=K \cup \{\F(S)\}$.
\end{enumerate}
\end{definition}
\begin{remark} \rm \label{Rem as}
{\bf 1.} $S$ is an almost canonical ideal of itself if and only if it is an almost symmetric semigroup. \\
{\bf 2.} $M$ is an almost canonical ideal of $S$ if and only if $S$ is an almost symmetric semigroup. Indeed, $M-M=S-M$, since $S \mathfrak neq \mathfrak mathbb{N}$. Moreover, $t(M)=t(S)+1$. \\
{\bf 3.} It holds that $K-M=K \cup \{\F(S)\}$. One containment is trivial, so let $x \in ((K-M) \setminus (K \cup \{\F(S)\}))$. Then $0 \mathfrak neq \F(S)-x \in S$ and, thus, $\F(S)=(\F(S)-x)+x \in M+ (K-M) \subseteq K$ yields a contradiction. In particular, a canonical ideal is almost canonical. \\
{\bf 4.} Since $\F(S)=\F(\widetilde{I})$, it is always in $\widetilde{I}-M$. Moreover, we claim that $(\widetilde{I}-M) \subseteq K \cup \{\F(S)\}$. Indeed, if $x \in (\widetilde{I}-M)\setminus\{\F(S)\}$ and $x \mathfrak notin K$, then $\F(S)-x \in M$ and $\F(\widetilde{I})=\F(S)=(\F(S)-x)+x \in \widetilde{I}$.
In addition, $\widetilde{I}$ is always contained in $\widetilde{I}-M$ because it is a relative ideal of $S$. Hence, $I$ is an almost canonical ideal of $S$ if and only if
$K \setminus \widetilde{I} \subseteq (\widetilde{I}-M)$.
\end{remark}
Given a relative ideal $I$ of $S$, the Ap\'ery set of $I$ is $\Ap(I)=\{i \in I \mathfrak mid i-e \mathfrak notin I\}$. As in the semigroup case, in $\Ap(I)$ we define the partial ordering $x \leq_S y$ if and only if $y= x+s$ for some $s \in S$ and we denote by ${\rm Max}_{\leq_S}(\Ap(I))$ the maximal elements of $\Ap(I)$ with respect to $\leq_S$.
\begin{proposition} Let $I$ be a relative ideal of $S$. The following statements hold:
\begin{enumerate}
\item $\PF(I)= \{i-e \mathfrak mid i \in {\rm Max}_{\leq_S}(\Ap(I)) \}$;
\item $I$ is canonical if and only if its type is $1$.
\end{enumerate}
\end{proposition}
\begin{proof}
(1) An integer $i \in I$ is in ${\rm Max}_{\leq_S}(\Ap(I))$ if and only if $i-e \mathfrak notin I$ and $s+i \mathfrak notin \Ap(I)$, i.e. $s+i-e \in I$, for every $s \in M$. This is equivalent to say that $i-e \in (I-M)\setminus I=\PF(I)$. \\
(2) Since $\F(S)\in \widetilde I-M$, we have $t(\widetilde I)=t(I)=1$ if and only if $\widetilde{I}-M=\widetilde I \cup \{\F(S)\}$. Therefore, a canonical ideal has type 1 by Remark \ref{Rem as}.3. Conversely, assume that $t(\widetilde{I})=1$ and let $x \mathfrak notin \widetilde{I}$. Since $\widetilde{I} \subseteq K$, we only need to prove that $x \mathfrak notin K$. By (1), there is a unique maximal element in $\Ap(\widetilde{I})$ with respect to $\leq_S$ and, clearly, it is $\F(S)+e$. Let $0 \mathfrak neq \lambda \in \mathfrak mathbb{N}$ be such that $x+ \lambda e \in \Ap(\widetilde{I})$. Then, there exists $y \in S$ such that $x+\lambda e + y = \F(S)+e$ and $x=\F(S)-(y+(\lambda-1)e) \mathfrak notin K$, since $y+(\lambda-1)e \in S$.
\end{proof}
Let $g(S)=|\mathfrak mathbb{N}\setminus S|$ denote the genus of $S$ and let $g(I)=|\mathfrak mathbb{N}\setminus \widetilde{I}|$. We recall that $2g(S) \geq \F(S) + t(S)$ and the equality holds if and only if $S$ is almost symmetric, see, e.g., \cite[Proposition 2.2 and Proposition-Definition 2.3]{N}.
\begin{proposition} \label{almost canonical ideal}
Let $I$ be a relative ideal of $S$. Then
$g(I)+g(S) \geq \F(S) + t(I)$.
Moreover, the following conditions are equivalent:
\begin{enumerate}
\item $I$ is almost canonical;
\item $g(I)+g(S)=\F(S)+t(I)$;
\item $\widetilde{I}-M=K-M$;
\item $K-(M-M) \subseteq \widetilde{I}$;
\item If $x \in \PF(I)\setminus \{\F(I)\}$, then $\F(I)-x \in \PF(S)$.
\end{enumerate}
\end{proposition}
\begin{proof}
Clearly, $t(I)=t(\widetilde{I})$ and $g(I)-t(\widetilde{I})=|\mathfrak mathbb{N} \setminus \widetilde{I}|-|(\widetilde{I}-M)\setminus \widetilde{I}|=|\mathfrak mathbb{N}\setminus (\widetilde{I}-M)|$. Moreover, since $\F(S)+1-g(S)$ is the number of the elements of $S$ smaller than $\F(S)+1$, it holds that $\F(S)-g(S)=|\mathfrak mathbb{N}\setminus K|-1=|\mathfrak mathbb{N} \setminus (K \cup {\F(S)})|$. We have $\widetilde{I}-M \subseteq K \cup \{\F(S)\}$ by Remark \ref{Rem as}.4, then $g(I)-t(I) \geq \F(S) -g(S)$ and the equality holds if and only if $\widetilde{I}-M = K \cup \{\F(S)\}$, i.e. $I$ is almost canonical. Hence, (1) $\Leftrightarrow$ (2). \\
(1) $\Leftrightarrow$ (3). We have already proved that $K-M=K \cup \{\F(S)\}$ in Remark \ref{Rem as}.3. \\
(1) $R[It]ightarrow$ (4). The thesis is equivalent to $M-M \supseteq K-\widetilde{I}$. Let $x \in K-\widetilde{I}$ and assume by contradiction that there exists $m \in M$ such that $x+m \mathfrak notin M$. Then, $\F(S)-x-m \in K \cup \{\F(S)\}=\widetilde{I}-M$ and, so, $\F(S)-x \in \widetilde{I}$. Since $x \in K-\widetilde{I}$, this implies $\F(S) \in K$, that is a contradiction. \\
(4) $R[It]ightarrow$ (1). Let $x \in K$. It is enough to prove that $x \in \widetilde{I}-M$. Suppose by contradiction that there exists $m \in M$ such that $x+m \mathfrak notin \widetilde{I}\supseteq K-(M-M)$. In particular, $x+m\mathfrak notin K-(M-M)$ and so $\F(S)-(x+m) \in M-M$. This implies $\F(S)-x \in M$, that is a contradiction because $x \in K$. \\
(1) $R[It]ightarrow$ (5) We notice that $\PF(\widetilde{I})=\{x+\F(S)-\F(I) \mathfrak mid x \in \PF(I)\}$. Let $x \in \PF(I)\setminus \{\F(I)\}$ and let $y=x+\F(S)-\F(I) \in \PF(\widetilde I) \setminus \{ \F(S)\}$. We first note that $\F(S)-y \mathfrak notin S$, otherwise $\F(S)=y+(\F(S)-y) \in \widetilde I$. Assume by contradiction that $\F(S)-y \mathfrak notin \PF(S)$, i.e. there exists $m \in M$ such that $\F(S)-y+m \mathfrak notin S$. This implies that $y-m \in K \subseteq \widetilde{I}-M$ by (1) and, thus, $y=(y-m)+m \in \widetilde I$ yields a contradiction. Hence, $\F(I)-x=\F(S)-y \in \PF(S)$. \\
(5) $R[It]ightarrow$ (4) Assume by contradiction that there exists $x \in (K-(M-M))\setminus \widetilde I$. It easily follows from the definition that there is $s \in S$ such that $x+s \in \PF(\widetilde{I})$. Then, $\F(S)-x-s \in \PF(S) \cup \{0\} \subseteq M-M$ by (5) and $\F(S)-s=x +(\F(S)-x-s) \in (K-(M-M)) + (M-M) \subseteq K$ gives a contradiction.
\end{proof}
\begin{remark} \rm
{\bf 1.} In \cite[Theorem 2.4]{N} it is proved that a numerical semigroup $S$ is almost symmetric if and only if $\F(S)-f \in \PF(S)$ for every $f \in \PF(S) \setminus \{\F(S)\}$. Hence, the last condition of Proposition \ref{almost canonical ideal} can be considered a generalization of this result. \\
{\bf 2.} Almost canonical ideals naturally arise characterizing the almost symmetry of the numerical duplication $S \! \Join^b \! I$ of $S$ with respect to the ideal $I$ and $b \in S$, a construction introduced in \cite{DS}. Indeed \cite[Theorem 4.3]{DS} says that $S \! \Join^b \! I$ is almost symmetric if and only if $I$ is almost canonical and $K-\widetilde{I}$ is a numerical semigroup. \\
{\bf 3.} Let $T$ be an almost symmetric numerical semigroup with odd Frobenius number (or, equivalently, odd type). Let $b$ be an odd integer such that $2b \in T$ and set $I=\{x \in \mathfrak mathbb{Z} \mathfrak mid 2x+b \in T\}$. Then, \cite[Proposition 3.3]{S} says that $T$ can be realized as a numerical duplication $T=S \! \Join^b \! I$, where $S=T/2=\{y \in \mathfrak mathbb{Z} \mathfrak mid 2y \in T\}$, while \cite[Theorem 3.7]{S} implies that $I$ is an almost canonical ideal of $S$. In general this is not true if the Frobenius number of $T$ is even.
\end{remark}
Since $\F(K-(M-M))= \F(\widetilde{I})=\F(K)$ and $\widetilde{I} \subseteq K$ for every relative ideal $I$, Condition (4) of Proposition \ref{almost canonical ideal} allows to find all the almost canonical ideals of a numerical semigroup. Clearly it is enough to focus on the relative ideals with Frobenius number $\F(S)$.
\begin{corollary} \label{Number of almost canonical ideals}
Let $S$ be a numerical semigroup with type $t$.
If $I$ is almost canonical, then $t(I)\leq t+1$. Moreover, for every integer $i$ such that $1 \leq i \leq t+1$, there are exactly $\binom{t}{i-1}$ almost canonical ideals of $S$ with Frobenius number $\F(S)$ and type $i$. In particular, there are exactly $2^{t}$ almost canonical ideals of $S$ with Frobenius number $\F(S)$.
\end{corollary}
\begin{proof}
Let $C=\{s \in S \mathfrak mid s>\F(S)\}=K-\mathfrak mathbb{N}$ be the conductor of $S$ and let $n(S)=|\{s \in S \mathfrak mid s<\F(S)\}|$. It is straightforward to see that $g(S)+n(S)=\F(S)+1$. If $I$ is almost canonical, Proposition \ref{almost canonical ideal} implies that
\begin{align*}
t(I)&=g(I)+g(S)-\F(S)\leq |\mathfrak mathbb{N}\setminus (K-(M-M))|-n(S)+1= \\
&=|(M-M)\setminus (K-\mathfrak mathbb{N})|-n(S)+1
=|(M-M)\setminus C|-n(S)+1=\\
&=|(M-M)\setminus S|+|S \setminus C|-n(S)+1=t+n(S)-n(S)+1=t+1.
\end{align*}
By Proposition \ref{almost canonical ideal} an ideal $I$ with $\F(I)=\F(S)$ is almost canonical if and only if $K-(M-M) \subseteq I \subseteq K$ and we notice that $|K \setminus (K-(M-M))|=|(M-M)\setminus S|=t$. Let $A \subseteq (K \setminus (K-(M-M)))$ and consider $I=(K-(M-M)) \cup A$. We claim that $I$ is an ideal of $S$. Indeed, let $x \in A$, $m \in M$ and $y \in (M-M)$. It follows that $m+y \in M$ and, then, $x+m+y \in K$, since $K$ is an ideal. Therefore, $x+m \in K-(M-M)$ and $I$ is an ideal of $S$. Moreover, by \cite[Lemma 4.7]{DS}, $t(I)=|(K-I)\setminus S|+1=|K\setminus I|+1=t+1-|A|$ and the thesis follows, because there are $\binom{t}{i-1}$ subsets of $K\setminus (K-(M-M))$ with cardinality $t+1-i$.
\end{proof}
If $S$ is a symmetric semigroup, the only almost canonical ideals with Frobenius number $\F(S)$ are $M$ and $S$. In this case $t(M)=t(S)+1=2$.
If $S$ is pseudo-symmetric, the four almost canonical ideals with Frobenius number $\F(S)$ are $M$, $S$, $M \cup \{\F(S)/2\}$ and $K$. In this case $t(M)=3$, $t(S)=t(M \cup \{\F(S)/2\})=2$ and $t(K)=1$.
\section{GAS numerical semigroups}
In \cite{CGKM} it is introduced the notion of $n$-almost Gorenstein local rings, briefly $n$-AGL rings, where $n$ is a non-negative integer. These rings generalize almost Gorenstein ones that are obtained when either $n=0$, in which case the ring is Gorenstein, or $n=1$. In particular, in \cite{CGKM} it is studied the case of the 2-AGL rings, that are closer to be almost Gorenstein, see also \cite{GIT}.
Given a numerical semigroup $S$ with standard canonical ideal $K$ we denote by $\langle K \rangle$ the numerical semigroup generated by $K$. Following \cite{CGKM} we say that $S$ is $n$-AGL if $|\langle K \rangle \setminus K|=n$. It follows that $S$ is symmetric if and only if it is 0-AGL, whereas it is almost symmetric and not symmetric if and only if it is 1-AGL.
It is easy to see that a numerical semigroup is 2-AGL if and only if $2K=3K$ and $|2K\setminus K|=2$, see \cite[Theorem 1.4]{CGKM} for a proof in a more general context. We now give another easy characterization that will lead us to generalize this class.
\begin{proposition}
A numerical semigroup $S$ is 2-AGL if and only if $2K=3K$ and $2K \setminus K=\{\F(S)-x, \F(S)\}$ for a minimal generator $x$ of $S$.
\end{proposition}
\begin{proof}
One implication is trivial, so assume that $S$ is 2-AGL.
Since $S$ is not symmetric, there exists $k \in \mathfrak mathbb{N}$ such that $k$ and $\F(S)-k$ are in $K$ and so $\F(S) \in 2K \setminus K$. Let now $a \in (2K \setminus K) \setminus \{\F(S)\}$. Since $a \mathfrak notin K$, we have $\F(S)-a \in S$. Assume that $\F(S)-a=s_1+s_2$ with $s_1,s_2 \in S \setminus \{0\}$. It follows that $\F(S)-s_1=a+s_2 \in 2K$, since $2K$ is a relative ideal, and by definition $\F(S)-s_1 \mathfrak notin K$. Therefore, $\{a,\F(S)-s_1,\F(S)\} \subseteq 2K \setminus K$ and this is a contradiction, since $S$ is 2-AGL. Hence, $a=\F(S)-x$, where $x$ is a minimal generator of $S$.
\end{proof}
In light of the previous proposition we propose the following definition.
\begin{definition} \rm
We say that $S$ is a {\it generalized almost symmetric} numerical semigroup, briefly {\rm GAS} numerical semigroup, if either $2K=K$ or $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$ for some $r \geq 0$ and some minimal generators $x_1, \dots, x_r$ of $S$ such that $x_i-x_j \mathfrak notin \PF(S)$ for every $i,j$.
\end{definition}
The last condition could seem less natural, but these semigroups have a better behaviour. For instance, in Theorem \ref{Livelli più alti} we will see that this condition ensures that every element in $ \langle K \rangle \setminus K$ can be written as $\F(S)-x$ for a minimal generator $x$ of $S$.
We recall that $S$ is symmetric if and only if $2K=K$ and it is almost symmetric exactly when $2K \setminus K \subseteq \{\F(S)\}$.
\begin{examples} \rm
{\bf 1.} Let $S= \langle 9, 24, 39, 43, 77 \rangle$. Then, $\PF(S)=\{58, 73, 92, 107\}$ and $2K \setminus K=\{107-77,107-43,107-39,107-24,107-9,107\}$. Hence, $S$ is a GAS semigroup. \\
{\bf 2.} If $S=\langle 7,9,15 \rangle$, we have $2K=3K$ and $2K \setminus K=\{26-14,26-7,26\}$. Hence, $S$ is 3-AGL but it is not GAS because $14$ is not a minimal generator of $S$. \\
{\bf 3.} Consider the semigroup $S=\langle 8, 11, 14, 15, 17, 18, 20, 21 \rangle$. We have $2K \setminus K=\{13-11,13-8,13\}$, but $S$ is not GAS because $11-8 \in \PF(S)$. In this case $2K=3K$ and thus $S$ is 3-AGL.
\end{examples}
The last example shows that in a numerical semigroup $S$ with maximal embedding dimension there could be many minimal generators $x$ such that $\F(S) -x \in 2K\setminus K$. This is not the case if we assume that $S$ is GAS.
\begin{proposition} \label{MED}
If $S$ has maximal embedding dimension $e$ and it is {\rm GAS}, then it is either almost symmetric or {\rm 2-AGL} with $2K\setminus K=\{\F(S)-e,\F(S)\}$.
\end{proposition}
\begin{proof}
Assume that $S$ is not almost symmetric and let $\F(S)-x=k_1+k_2 \in 2K\setminus K$ with $x\mathfrak neq 0$ and $k_1,k_2 \in K$. Let $x\mathfrak neq e$ and consider $\F(S)-e=k_1+k_2+x-e$. Since $x-e \leq \F(S)-e < \F(S)$ and $S$ has maximal embedding dimension, $x-e \in \PF(S) \setminus \{\F(S)\} \subseteq K$ and, therefore, $\F(S)-e \in 3K \setminus K$. Moreover, $\F(S)-e$ cannot be in $2K$, because $S$ is GAS and $x-e \in \PF(S)$, then, $k_1+x-e \in 2K \setminus K$. Hence, we have $\F(S)-(\F(S)-k_1-x+e) \in 2K\setminus K$ and, thus, $\F(S)-k_1-x+e$ is a minimal generator of $S$. Since $S$ has maximal embedding dimension, this implies that $\F(S)-k_1-x \in \PF(S)$ and, then, $\F(S)-k_1 \in S$ yields a contradiction, since $k_1 \in K$. This means that $x=e$ and $2K\setminus K=\{\F(S)-e,\F(S)\}$.
Suppose by contradiction that $2K \mathfrak neq 3K$ and let $\F(S)-y \in 3K \setminus 2K$. In particular, $\F(S)-y \mathfrak notin K$ and, therefore, $y \in S$. If $\F(S)-y =k_1+k_2+ k_3$ with $k_i \in K$ for every $i$, then $k_1+k_2 \in 2K \setminus K$ and, thus, $k_1+k_2=\F(S)-e$. This implies that $\F(S)-e<\F(S)-y$, i.e. $y<e$, that is a contradiction.
\end{proof}
In particular, we note that in a 2-AGL semigroup with maximal embedding dimension it always holds that $2K \setminus K=\{\F(S)-e, \F(S)\}$.
\begin{proposition} \label{Characterizations GAS}
Given a numerical semigroup $S$, the following conditions are equivalent:
\begin{enumerate}
\item $S$ is {\rm GAS};
\item $x-y \mathfrak notin (M-M)$ for every different $x,y \in M\setminus (S-K)$;
\item either $S$ is symmetric or $2M \subseteq S-K \subseteq M$ and $M-M=((S-K)-M) \cup \{0\}$.
\end{enumerate}
\end{proposition}
\begin{proof}
If $S$ is symmetric, then $M \subseteq S-K$ and both (1) and (2) are true, so we assume $S \mathfrak neq K$. \\
$(1) R[It]ightarrow (2)$ Note that $K-S=K$ and $K-(S-K)=K-((K-K)-K)=K-(K-2K)=2K$. Thus, $x \in S \setminus (S-K)$ if and only if $\F(S)-x \in (K-(S-K))\setminus (K-S)=2K \setminus K$. Hence, if $S$ is GAS, then $x-y \mathfrak notin S \cup \PF(S)=M-M$ for every $x,y \in M\setminus (S-K)$. \\
$(2) R[It]ightarrow (1)$ If $x$, $y \in M \setminus (S-K)$, then $\F(S)-x$, $\F(S)-y \in 2K \setminus K$ and $x-y \mathfrak notin \PF(S)$, since it is not in $M-M$. We only need to show that $x$ is a minimal generator of $S$. If by contradiction $x=s_1+s_2$, with $s_1$, $s_2 \in M$, it follows that also $s_1$ is in $M \setminus (S-K)$. Therefore, $s_2=x-s_1 \in M$ yields a contradiction since $x-s_1 \mathfrak notin M-M$ by hypothesis. \\
$(2) R[It]ightarrow (3)$ Since $S$ is not symmetric, $S-K$ is contained in $M$. Moreover, if $2M$ is not in $S-K$, then there exist $m_1, m_2 \in M$ such that $m_1+m_2 \in 2M \setminus (S-K)$. Clearly also $m_1$ is not in $S-K$ and $(m_1+m_2)-m_1=m_2 \in M \subseteq M-M$ yields a contradiction.
It always holds that $((S-K)-M) \cup \{0\} \subseteq M-M$, then given $x \in (M-M) \setminus \{0\}$ and $m \in M$, we only need to prove that $x+m \in S-K$. If $m \in M \setminus (S-K)$ and $x+m \mathfrak notin S-K$, then $(x+m)-m=x \in M-M$ gives a contradiction.
If $m \in (S-K) \setminus 2M$ and $k \in K$, then $0 \mathfrak neq m+k \in S$ and, so, $x+m+k \in M$, that implies $x+m \in S-K$.
Finally, if $m \in 2M$, then $x+m \in 2M \subseteq S-K$. \\
$(3) R[It]ightarrow (2)$ Let $x,y \in M \setminus (S-K)$ with $x \mathfrak neq y$ and assume by contradiction that $x-y \in (M-M)=((S-K)-M)\cup \{0\}$. By hypothesis $y \in M$, then $x=(x-y)+y \in S-K$ yields a contradiction.
\end{proof}
In the definition of GAS semigroup we required that in $2K\setminus K$ there are only elements of the type $\F(S)-x$ with $x$ minimal generator of $S$. In general, this does not imply that the elements in $3K\setminus 2K$ are of the same type. For instance, consider $S=\langle 8,12,17,21,26,27,30,31 \rangle$, where $2K \setminus K=\{23-21,23-17,23-12,23-8,23\}$ and $3K\setminus 2K=\{23-20,23-16\}$. However, by Proposition \ref{MED}, this semigroup is not GAS. In fact, this never happens in a GAS semigroup as we are going to show in Theorem \ref{Livelli più alti}. First we need a lemma.
\begin{lemma} \label{Lemma livelli più alti}
Assume that $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$ with $x_1, \dots, x_r$ minimal generators of $S$. If $\F(S)-x \in nK \setminus (n-1)K$ for some $n>2$ and $x=s_1+s_2$ with $s_1$, $s_2 \in M$, then $\F(S)-s_1 \in (n-1)K$.
\end{lemma}
\begin{proof}
Let $\F(S)-(s_1+s_2)=k_1 + \dots + k_n \in nK \setminus (n-1)K$ with $k_i \in K$ for $1 \leq i \leq n$. Since $\F(S)-(s_1+s_2) \mathfrak notin (n-1)K$, we have $\F(S) \mathfrak neq k_1+k_2 \in 2K \setminus K$ and, then, $\F(S)-(k_1+k_2)$ is a minimal generator of $S$. Since $\F(S)-(k_1+k_2)=s_1+s_2+k_3+ \dots + k_{n}$, this implies that $s_1+k_3 + \dots + k_{n}\mathfrak notin S$, that is $k_1+k_2+s_2 =\F(S)-(s_1+k_3+\dots + k_{n}) \in K$. Therefore, $\F(S)-s_1=(k_1+k_2+s_2)+k_3+\dots+ k_{n} \in (n-1)K$ and the thesis follows.
\end{proof}
\begin{theorem} \label{Livelli più alti}
Let $S$ be a {\rm GAS} numerical semigroup that is not symmetric. Then, $\langle K \rangle \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$ for some minimal generators $x_1, \dots, x_r$ with $r \geq 0$ and $x_i-x_j \mathfrak notin \PF(S)$ for every $i$ and $j$.
\end{theorem}
\begin{proof}
We first prove that $x_i-x_j \mathfrak notin \PF(S)$ for every $i$ and $j$ without assuming that $x_i$ and $x_j$ are minimal generators. We can suppose that $x_i=x_1$ and $x_j=x_2$.
Let $\F(S)-x_1=k_1+\dots +k_n \in nK \setminus (n-1)K$ with $k_i \in K$ for every $i$ and assume by contradiction that $x_1-x_2 \in \PF(S)$.
We note that $\F(S)-x_2=k_1+\dots +k_n + (x_1-x_2)$ and $k_1+(x_1-x_2) \in K$. Indeed, if $\F(S)-k_1 -(x_1-x_2)=s \in S$, then $s \mathfrak neq 0$ and $\F(S)-k_1=(x_1-x_2)+s \in S$ yields a contradiction. If $k_1+k_2+(x_1-x_2) \mathfrak notin K$, then it is in $2K \setminus K$ and, since also $k_1+k_2 \in 2K \setminus K$, we get a contradiction because their difference is a pseudo-Frobenius number. Hence, $k_1+k_2+(x_1-x_2) \in K$.
We proceed by induction on $n$. If $n=2$, it follows that $\F(S)-x_2=k_1+k_2+(x_1-x_2) \in K$, that is a contradiction. So, let $n \geq 3$ and let $i$ be the minimum index for which $k_1+ \dots + k_i + (x_1-x_2) \mathfrak notin K$. It follows that $k_1+ \dots + k_i + (x_1-x_2) \in 2K\setminus K$ and, since also $k_1+k_2 \in 2K \setminus K$, this implies that $k_3+ \dots + k_i +(x_1-x_2) \mathfrak notin \PF(S)$. Moreover, it cannot be in $S$, because it is the difference of two minimal generators, since $S$ is GAS. Therefore, there exists $m \in M$ such that $k_3+ \dots + k_i +(x_1-x_2)+ m \mathfrak notin S$, that means $\F(S)-(k_3+ \dots + k_i +(x_1-x_2)+m)=k' \in K$. Thus, $\F(S)-((x_1-x_2)+m)=k'+k_3+ \dots + k_i \in jK \setminus K$ for some $1< j < n$. Moreover, $\F(S)-m=k'+k_3+ \dots + k_i +(x_1-x_2)\in \langle K \rangle \setminus K$ and by induction $(x_1-x_2)+m-m \mathfrak notin \PF(S)$, that is a contradiction. Hence, $x_1-x_2 \mathfrak notin \PF(S)$.
Let now $h \geq 3$. To prove the theorem it is enough to show that, if $\F(S)-x \in hK \setminus (h-1)K$, then $x$ is a minimal generators of $S$. We proceed by induction on $h$.
Using the GAS hypothesis, the case $h=3$ is very similar to the general case, so we omit it (the difference is that also $\F(S) \in 2K \setminus K$). Suppose by contradiction that $x=s_1+s_2$ and $\F(S)-(s_1+s_2)=k_1+ \dots +k_h \in hK \setminus (h-1)K$ with $k_1, \dots, k_h \in K$ and $s_1, s_2 \in M$.
Clearly, $\F(S)-s_1 \mathfrak notin K$ and by Lemma \ref{Lemma livelli più alti} we have $\F(S)-s_1 \in (h-1)K$; in particular, $s_1$ is a minimal generator of $S$ by induction. Let $1< i < h$ be such that $\F(S)-s_1 \in iK \setminus (i-1)K$. Since $\F(S)-(s_1+s_2) \mathfrak notin (h-1)K$, we have $k_1+ \dots + k_i \in iK \setminus (i-1)K$ and, by induction, $\F(S)-(k_1+\dots+k_i)$ is a minimal generator of $S$ and $\F(S)-(k_1+\dots+k_i)-s_1 \mathfrak notin \PF(S)$ by the first part of the proof. This means that there exists $s \in M$ such that $\F(S)-(k_1+\dots+k_i)-s_1+s \mathfrak notin S$, i.e. $k_1+\dots+k_i+s_1-s \in K$. This implies that $\F(S)-(s_2+s)=(k_1 + \dots + k_i +s_1-s)+k_{i+1}+\dots+k_h \in (h-i+1)K$ and, since $h-i+1 <h$, the induction hypothesis yields a contradiction because $s_2+s$ is not a minimal generator of $S$.
\end{proof}
We recall that in an almost symmetric numerical semigroup $\F(S)-f \in \PF(S)$ for every $f \in \PF(S)\setminus \{\F(S)\}$, see \cite[Theorem 2.4]{N}. The following proposition generalizes this fact.
\begin{proposition} \label{PF GAS}
Let $S$ be a numerical semigroup with $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$, where $x_i$ is a minimal generator of $S$ for every $i$.
\begin{enumerate}
\item For every $i$, there exist $f_j, f_k \in \PF(S)$ such that $f_j+f_k=\F(S)+x_i$.
\item For every $f \in \PF(S)\setminus \{\F(S)\}$, it holds either $\F(S)-f \in \PF(S)$ or $\F(S)-f+x_i \in \PF(S)$ for some $i$.
\end{enumerate}
\end{proposition}
\begin{proof}
Let $\F(S)-x_i=k_1+k_2 \in 2K\setminus K$ for some $k_1, k_2 \in K$ and let $s \in M$.
Since $x_i+s \in S$, we have $\F(S)-x_i-s \mathfrak notin K$ and then $\F(S)-x_i-s=k_1+k_2-s \mathfrak notin 2K$ because $x_i+s$ is not a generator of $S$. In particular, $k_1-s$ and $k_2-s$ are not in $K$. This means that $\F(S)-k_1+s$ and $\F(S)-k_2+s$ are in $S$ and, thus, $\F(S)-k_1, \F(S)-k_2 \in \PF(S)$. Moreover, $\F(S)-k_1+\F(S)-k_2=2\F(S)-(\F(S)-x_i)=\F(S)+x_i$ and (1) holds.
Let now $f \in \PF(S) \setminus \{\F(S)\}$ and assume that $\F(S)-f \mathfrak notin \PF(S)$. Then, there exists $s \in M$ such that $\F(S)-f+s \in \PF(S)$. In particular, $f-s \in K$ and $\F(S)-s=(\F(S)-f)+(f-s) \in 2K \setminus K$; thus, $s$ has to be equal to $x_i$ for some $i$ and $\F(S)-f+x_i \in \PF(S)$.
\end{proof}
\begin{examples} \rm \label{Examples}
{\bf 1.} Let $S=\langle 28,40,63,79,88\rangle$. We have $2K \setminus K=\{281-28,281\}$ and $S$ is 2-AGL. In this case $\PF(S)=\{100,132,177,209,281\}$ and $100+209=132+177=281+28$. \\
{\bf 2.} Consider $S= \langle 67, 69, 76, 78, 86 \rangle$. Here $2K \setminus K=\{485-86,485\}$ and the semigroup is 2-AGL. Moreover, $\PF(S)=\{218, 226, 249, 259, 267, 322, 485 \}$, $218+267=226+259=485$ and $249+322=485+86$. \\
{\bf 3.} If $S=\langle 9,10,12,13 \rangle$, then $2K \setminus K=\{17-13,17-12,17-10,17-9,17\}$ and $\PF(S)=\{11,14,15,16,17\}$. Hence, $S$ is GAS and, according to the previous proposition, we have
\begin{align*}
\F(S)+9&=11+15 &\F(S)+12=14+15&\\
\F(S)+10&=11+16 &\F(S)+13=14+16&.
\end{align*}
{\bf 4.} Conditions (1) and (2) in Proposition \ref{PF GAS} do not imply that every $x_i$ is a minimal generator. For instance, if we consider the numerical semigroup $S=\{15,16,19,20,24\}$, we have $2K \setminus K=\{42-40,42-36,42-32,42-24,42-20,42-19,42-16,42-15,42\}$ and $\PF(S)=\{28,29,33,37,41,42\}$. Moreover,
\begin{align*}
\F(S)+40&=41+41 &\F(S)+20=29+33 \\
\F(S)+36&=37+41 &\F(S)+19=28+33 \\
\F(S)+32&=37+37 &\F(S)+16=29+29 \\
\F(S)+24&=33+33 &\F(S)+15=28+29
\end{align*}
and, so, it is straightforward to see that the conditions in Proposition \ref{PF GAS} hold, but $32$, $36$ and $40$ are not minimal generators.
\end{examples}
We recall that $\L(S)$ denotes the set of the gaps of the second type of $S$, i.e. the integers $x$ such that $x \mathfrak notin S$ and $\F(S)-x \mathfrak notin S$, i.e. $x \in K \setminus S$, and that $S$ is almost symmetric if and only if $\L(S) \subseteq \PF(S)$, see \cite{BF}.
\begin{lemma} \label{Lemma L(S)}
Let $S$ be a numerical semigroup with $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r,\F(S)\}$, where $x_i$ is a minimal generator of $S$ for every $i$. If $x \in \L(S)$ and $\F(S)-x \mathfrak notin \PF(S)$, then both $x$ and $\F(S)-x+x_i$ are pseudo-Frobenius numbers of $S$ for some $i$.
\end{lemma}
\begin{proof}
Assume by contradiction that $x \mathfrak notin \PF(S)$. Therefore, there exists $s \in M$ such that $x+s \mathfrak notin S$ and, then, $\F(S)-x-s \in K$. Moreover, since $\F(S)-x \mathfrak notin \PF(S)$, there exists $t \in M$ such that $\F(S)-x+t \mathfrak notin S$ and then $x-t \in K$. Consequently, $\F(S)-s-t=(\F(S)-x-s)+(x-t) \in 2K$ and $\F(S)-s-t\mathfrak notin K$, since $s+t \in S$. This is a contradiction, because $s+t$ is not a minimal generator of $S$. Hence, $x \in \PF(S)$ and, since $\F(S)-x \mathfrak notin \PF(S)$, Proposition \ref{PF GAS} implies that $\F(S)-x+x_i \in \PF(S)$ for some $i$.
\end{proof}
\begin{lemma} \label{difference} As ideal of $M-M$, it holds $\widetilde{M-e}=M-e$ and
\[
K(M-M) \setminus (M-e) =\{x-e \mathfrak mid x \in \L(S) \text{ and } \F(S) - x \mathfrak notin \PF(S)\}.
\]
\end{lemma}
\begin{proof}
We notice that $\F(S)-e \mathfrak notin (M-M)$ and, if $y > \F(S)-e$ and $m \in M$, we have $y+m >\F(S)-e+m \geq \F(S)$. Therefore, $\F(M-M)=\F(S)-e=\F(M-e)$ and, then, $\widetilde{M-e}=M-e$.
We have $x-e \in K(M-M) \setminus (M-e)$ if and only if $x\mathfrak notin M$ and $(\F(S)-e)-(x-e) \mathfrak notin (M-M)$ that is in turn equivalent to $x \mathfrak notin M$ and $\F(S)-x \mathfrak notin S \cup \PF(S)$. Since $x \mathfrak neq 0$, this means that $x \in \L(S)$ and $\F(S)-x \mathfrak notin \PF(S)$.
\end{proof}
The following corollary was proved in \cite[Theorem 5.2]{B} in a different way.
\begin{corollary} \label{canonical ideal}
$S$ is almost symmetric if and only if $M-e$ is a canonical ideal of $M-M$.
\end{corollary}
\begin{proof}
By definition $M-e$ is a canonical ideal of $M-M$ if and only if $K(M-M) = (M-e)$. In light of the previous lemma, this means that there are no $x \in \L(S)$ such that $\F(S)-x \mathfrak notin \PF(S)$, that is equivalent to say that $\L(S)\subseteq \PF(S)$, i.e. $S$ is almost symmetric.
\end{proof}
In \cite[Corollary 8]{BF} it was first proved that $S$ is almost symmetric with maximal embedding dimension if and only if $M-M$ is a symmetric semigroup. In general it holds $M-M \subseteq M-e \subseteq K(M-M)$ and the first inclusion is an equality if and only if $S$ has maximal embedding dimension, whereas the previous corollary says that the second one is an equality if and only if $S$ is almost symmetric.
Moreover, if $S$ has maximal embedding dimension, in \cite[Corollary 5.4]{CGKM} it is proved that $S$ is 2-AGL if and only if $M-M$ is an almost symmetric semigroup which is not symmetric. If we want to generalize this result in the same spirit of Corollary \ref{canonical ideal}, it is not enough to consider the 2-AGL semigroups, but we need that $S$ is GAS. More precisely, we have the following result.
\begin{theorem} \label{T. Almost Canonical ideal of M-M}
The semigroup $S$ is {\rm GAS} if and only if $M-e$ is an almost canonical ideal of the semigroup $M-M$.
\end{theorem}
\begin{proof}
In the light of Remark \ref{Rem as}.4 and Lemma \ref{difference}, $M-e$ is an almost canonical ideal of $M-M$ if and only if
\begin{equation} \label{Eq.Canonical Ideal of M-M}
K(M-M) \setminus (M-e) \subseteq ((M-e)-((M-M)\setminus \{0\})).
\end{equation}
Assume that $S$ is GAS with $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$.
By Lemma \ref{difference} the elements of $K(M-M) \setminus (M-e)$ can be written as $x-e$ with $x \in \L(S)$ and $\F(S)-x \mathfrak notin \PF(S)$. In addition, Lemma \ref{Lemma L(S)} implies that both $x$ and $\F(S)-x+x_i$ are pseudo-Frobenius numbers of $S$ for some $i$.
Let $0 \mathfrak neq z\in (M-M)$. We need to show that $x-e+z\in M-e$, i.e. $x+z \in M$. Assume by contradiction $x+z \mathfrak notin M$, which implies $\F(S)-x-z \in K$. Since $x+z \mathfrak notin M$ and $x \in \PF(S)$, it follows that $z \mathfrak notin M$ and, then, $z \in \PF(S)$; hence, $z+x_i \in M$ and $\F(S)-z-x_i \mathfrak notin K$. We also have that $x-x_i \in K$, since $\F(S)-x+x_i \in \PF(S)$. Therefore,
\[
\F(S)-z-x_i=(\F(S)-x-z)+(x-x_i) \in 2K \setminus K
\]
and this yields a contradiction because $(z+x_i)-x_i \in \PF(S)$ and $S$ is a GAS semigroup.
Conversely, assume that the inclusion (\ref{Eq.Canonical Ideal of M-M}) holds.
An element in $2K\setminus K$ can be written as $\F(S)-s$ for some $s \in S$, since it is not in $K$.
Assume by contradiction that $s \mathfrak neq 0$ is not a minimal generator of $S$, i.e. $\F(S)-s_1-s_2=k_1+k_2 \in 2K\setminus K$ for some $s_1,s_2 \in M$ and $k_1, k_2 \in K$. It follows that $\F(S)-k_1-s_1=k_2 + s_2 \mathfrak notin S$, otherwise $\F(S)-s_1 \in K$. Moreover, $k_1+s_1 \mathfrak notin \PF(S) \cup S$, since $k_1+s_1+s_2 = \F(S)-k_2 \mathfrak notin S$.
Hence, Lemma \ref{difference} and our hypothesis imply that
\[k_2+s_2-e=\F(S)-k_1-s_1-e \in ((M-e)-((M-M)\setminus \{0\})).\]
Therefore, $\F(S)-k_1-e=(k_2+s_2-e)+s_1 \in M-e$ and, thus, $k_1 \mathfrak notin K$ yields a contradiction.
This means that $2K \setminus K=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$ with $x_i$ minimal generator of $S$ for every $i$. Now, assume by contradiction that $z=x_i-x_j \in \PF(S)$ for some $i,j$ and let
$\F(S)-x_i=\F(S)-x_j-z=k_1+k_2$ for some $k_1, k_2 \in K$.
Since $k_2+z+x_j=\F(S)-k_1 \mathfrak notin S$, it follows that $k_2+z \mathfrak notin S \cup \PF(S)$. Moreover, $\F(S)-k_2-z \mathfrak notin S$, otherwise $\F(S)-k_2\in S$. Therefore, Lemma \ref{difference} and inclusion (\ref{Eq.Canonical Ideal of M-M}) imply that $\F(S)-k_2-z-e \in ((M-e)-((M-M)\setminus \{0\}))$ and, since $z \in M-M$, it follows that $\F(S)-k_2 \in M$ which is a contradiction because $k_2 \in K$.
\end{proof}
\begin{example} \rm
Consider $S=\langle 9,13,14,15,19 \rangle$, that is a GAS numerical semigroup with $2K \setminus K=\{25-15,25-13,25-9,25\}$. Then, $M-9$ is an almost canonical ideal of $M-M$ by the previous theorem. In fact
\begin{equation*}
\begin{split}
&M-M=\{0,9,13,14,15,17, \rightarrow\}, \\
&K(M-M)=\{0,4,5,6,8,9,10,11,12,13,14,15,17 \rightarrow\},\\
&M-9=\{0,4,5,6,9,10,13,14,15,17 \rightarrow\}, \\
&(M-9)-((M-M)\setminus \{0\})=K(M-M) \cup \{16\}=\{0,4,5,6,8 \rightarrow\}.
\end{split}
\end{equation*}
\end{example}
\begin{remark} \rm
If $S$ is {\rm GAS}, it is possible to compute the type of $M-e$ seen as an ideal of the semigroup $M-M$. In fact by Theorem \ref{T. Almost Canonical ideal of M-M} and Proposition \ref{almost canonical ideal} it follows that
\begin{align*}
t(M-e)&=g(M-e)+g(M-M)-\F(M-M)= \\
&=g(M)-e+g(S)-t(S)-\F(S)+e=
2g(S)+1-t(S)-\F(S).
\end{align*}
Moreover, we recall that $2g(S) \geq t(S)+\F(S)$ is always true and the equality holds exactly when $S$ is almost symmetric. Therefore,
as $t(S)$ is a measure of how far $S$ is from being symmetric, $t(M-e)=t(M)$ (as ideal of $M-M$) can be seen as a measure of how far $S$ is from being almost symmetric. On the other hand, we note that the type of $M$ as an ideal of $S$ is simply $t(S)+1$.
\end{remark}
If $S$ has type 2 and $\PF(S)=\{f,\F(S)\}$, in \cite[Theorem 6.2]{CGKM} it is proved that $S$ is 2-AGL if and only if $3(\F(S)-f) \in S$ and $\F(S)=2f-x$ for some minimal generator $x$ of $S$. In the next proposition we generalize this result to the GAS case.
\begin{proposition} \label{type 2}
Assume that $S$ is not almost symmetric and that it has type 2, i.e. $\PF(S)=\{f,\F(S)\}$. Then, $S$ is {\rm GAS} if and only if $\F(S)=2f-x$ for some minimal generator $x$ of $S$. In this case, if $n$ is the minimum integer for which $n(\F(S)-f) \in S$, then $|2K \setminus K|=2$, $|3K \setminus 2K|= \dots = |(n-1)K \setminus (n-2)K|=1$ and $nK=(n-1)K$.
\end{proposition}
\begin{proof}
Assume first that $S$ is GAS and let $\F(S)-x$, $\F(S)-y \in 2K \setminus K$. Proposition \ref{PF GAS} implies that $\F(S)+x=f_1+f_2$ and $\F(S)+y=f_3+f_4$ for some $f_1,f_2,f_3,f_4 \in \PF(S)$. Since $f_i$ has to be different from $\F(S)$ for all $i$, it follows that $\F(S)+x=\F(S)+y=2f$ and, then, $x=y$. In particular, $\F(S)=2f-x$.
Assume now that $\F(S)=2f-x$ for some minimal generator $x$ of $S$. Clearly, $\F(S)-x=2(\F(S)-f) \in 2K \setminus K$. Let $y \mathfrak neq 0,x$ be such that $\F(S)-y \in 2K \setminus K$. Since $2K \setminus K$ is finite, we may assume that $y$ is maximal among such elements with respect to $\leq_S$, that is $\F(S)-(y+m) \mathfrak notin 2K\setminus K$ for every $m \in M$. Let $\F(S)-y=k_1+k_2$ with $k_1$, $k_2 \in K$. Since $\F(S)-y-m=k_1+k_2-m \mathfrak notin 2K \setminus K$, then $k_1-m$ and $k_2-m$ are not in $K$, which is equivalent to $\F(S)-k_1+m \in S$ and $\F(S)-k_2+m \in S$ for every $m \in M$. This means that $\F(S)-k_1$, $\F(S)-k_2 \in \PF(S)\setminus \{\F(S)\}$ which implies $\F(S)-y=2(\F(S)-f)=\F(S)-x$ and, thus, $x=y$. Therefore, $|2K \setminus K|=2$ and $S$ is GAS.
Moreover, if $S$ is GAS and $\F(S)-y =k_1+\dots+k_r \in rK \setminus (r-1)K$ with $r>2$ and $k_1, \dots, k_r \in K$, then $k_1= \dots=k_r=\F(S)-f$ because $k_i+k_j \in 2K \setminus K$ for every $i$ and $j$. Therefore, if $n(\F(S)-f) \in S$, then $nK=(n-1)K$. Assume that $r(\F(S)-f) \mathfrak notin S$. Clearly, it is in $rK$ and we claim that it is not in $K$. In fact, if $r(\F(S)-f) \in K$, it follows that it is in $\L(S)$ and, if $\F(S)-r(\F(S)-f)=f$, then $(r-1)(\F(S)-f)=0 \in S$ yields a contradiction. Therefore, Lemma \ref{Lemma L(S)} implies that $\F(S)-r(\F(S)-f)+x =f$ and, again, $(r-1)(\F(S)-f)=x \in S$ gives a contradiction. This means that $r(\F(S)-f) \in rK \setminus K$. Moreover, if $r(\F(S)-f)=k_1+\dots+ k_{r'} \in r'K\setminus(r'-1)K$ with $1<r'<r$ and $k_1, \dots, k_{r'} \in K$, we get $k_1=\dots=k_{r'}=\F(S)-f$ as above, that is a contradiction. Hence, $|rK \setminus (r-1)K|=1$ for every $1<r<n$.
\end{proof}
\begin{example} \rm \label{GAS tipo 2}
Consider $S=\langle 5,6,7\rangle$. In this case $f=8$ and $\F(S)=9$. Therefore, the equality $\F(S)=2f-7$ implies that $S$ is GAS. With the notation of the previous corollary we have $n=5$ and, in fact, $2K \setminus K=\{2,9\}$, $3K \setminus 2K=\{3\}$ and $4K \setminus 3K=\{4\}$.
\end{example}
In \cite{HHS} another generalization of almost Gorenstein ring is introduced. More precisely a Cohen-Macaulay local ring admitting a canonical module $\omega$ is said to be {\it nearly Gorenstein} if the trace of $\omega$ contains the maximal ideal. In the case of numerical semigroups it follows from \cite[Lemma 1.1]{HHS} that $S$ is nearly Gorenstein if and only if $M \subseteq K+(S-K)$, see also the arXiv version of \cite{HHS}. It is easy to see that an almost symmetric semigroup is nearly Gorenstein, but in \cite{CGKM} it is noted that a 2-AGL semigroup is never nearly Gorenstein (see also \cite[Remark 3.7]{BS} for an easy proof in the numerical semigroup case). This does not happen for GAS semigroups.
\begin{corollary}
Let $S$ be a {\rm GAS} semigroup, not almost symmetric, with $\PF(S)=\{f,\F(S)\}$. It is nearly Gorenstein if and only if $3f-2\F(S) \in S$.
\end{corollary}
\begin{proof}
We will use the following characterization proved in \cite{MS}: $S$ is nearly Gorenstein if and only if for every minimal generator $y$ of $S$ there exists $g \in \PF(S)$ such that $g+y-g' \in S$ for every $g' \in \PF(S)\setminus \{g\}$.
By Proposition \ref{type 2} it follows that $\F(S)=2f-x$ with $x$ minimal generator of $S$.
Let $y \mathfrak neq x$ another minimal generator of $S$ and assume by contradiction that $\F(S)+y-f \mathfrak notin S$. Therefore, there exists $s \in S$ such that $\F(S)+y-f+s \in \PF(S)$. If it is equal to $\F(S)$, then $f=y+s \in S$ yields a contradiction. If $\F(S)+y-f+s=f$, then $y+s=2f-\F(S)=x$ by Proposition \ref{type 2} and this gives a contradiction, since $x \mathfrak neq y$ is a minimal generator of $S$. Hence, $\F(S)+y-f \in S$ for every minimal generator $y \mathfrak neq x$.
On the other hand, $\F(S)+x-f=2f-x+x-f=f \mathfrak notin S$ and, therefore, $S$ is nearly Gorenstein if and only if $f+x-\F(S)=3f-2\F(S)\in S$.
\end{proof}
\begin{examples} \rm
{\bf 1.} In Example \ref{GAS tipo 2} we have $3f-2\F(S)=6 \in S$ and, then, the semigroup is both GAS and nearly Gorenstein. \\
{\bf 2.} Consider $S=\langle 9,17,67\rangle$ that has $\PF(S)=\{59,109\}$. Since $2*59-109=9$ and $3*59-2*109=-41 \mathfrak notin S$, the semigroup is GAS but not nearly Gorenstein. \\
{\bf 3.} If $S=\langle 10,11,12,25 \rangle$, we have $\PF(S)=\{38,39\}$ and $2*38-39=37$ is not a minimal generators, thus, $S$ is not GAS. On the other hand, it is straightforward to check that this semigroup is nearly Gorenstein.
\end{examples}
\begin{remark} \rm
In literature there are other two generalizations of almost Gorenstein ring. One is given by the so-called ring with canonical reduction, introduced in \cite{R}, which is a one-dimensional Cohen-Macaulay local ring $(R,\mathfrak m)$ possessing a canonical ideal $I$ that is a reduction of $\mathfrak m$. When $R=k[[S]]$ is a numerical semigroup ring, this definition gives a generalization of almost symmetric semigroup and $R$ has a canonical reduction if and only if $e+\F(S)-g \in S$ for every $g \in \mathfrak mathbb{N} \setminus S$, see \cite[Theorem 3.13]{R}. This notion is unrelated with the one of GAS semigroup, in fact it is easy to see that $S=\langle 4,7,9,10 \rangle$ is GAS and it doesn't have canonical reductions, while $S=\langle 8,9,10,22 \rangle$ is not GAS, but has a canonical reduction.
Another generalization of the notion of almost Gorenstein ring is given by the so-called generalized Gorenstein ring, briefly GGL, introduced in \cite{GIKT,GK}. A Cohen-Macaulay local ring $(R,\mathfrak mathfrak{m})$ with a canonical module $\omega$ is said to be GGL with respect to $\mathfrak mathfrak{a}$ if either $R$ is Gorenstein or there exists an exact sequence of $R$-modules
\[
0 \xrightarrow{} R \xrightarrow{\varphi} \omega \xrightarrow{} C \xrightarrow{} 0
\]
where $C$ is an Ulrich module of $R$ with respect to some $\mathfrak mathfrak m$-primary ideal $\mathfrak mathfrak a$ and $\varphi \otimes R/\mathfrak mathfrak a$ is injective. We note that $R$ is almost Gorenstein and not Gorenstein if and only if it is GGL with respect to $\mathfrak mathfrak m$. Let $S$ be a numerical semigroup and order $\PF(S)=\{f_1,f_2, \dots, f_t=\F(S)\}$ by the usual order in $\mathfrak mathbb{N}$. Defining a numerical semigroup GGL if its associated ring is GGL, in \cite{T} it is proved a useful characterization: $S$ is GGL if either it is symmetric or the following properties hold:
\begin{enumerate}
\item there exists $x \in S$ such that $f_i+f_{t-i}=\F(S)+x$ for every $i=1, \dots, \lceil t/2 \color{red}eil$;
\item $((c-M) \cap S) \setminus c=\{x\}$, where $c=S-\langle K \rangle$.
\end{enumerate}
Using this characterization it is not difficult to see that also this notion is unrelated with the one of GAS semigroup. In fact, the semigroups in Examples \ref{Examples}.2 and \ref{Examples}.3 are GAS but do not satisfy (1), whereas the semigroup $S=\langle 5,9,12 \rangle$ is not GAS by Proposition \ref{type 2}, because $\PF(S)=\{13,16\}$, but it is easy to see that it is GGL with $x=10$.
\end{remark}
\section{Constructing GAS numerical semigroups}
In this section we study the behaviour of the GAS property with respect to some constructions. In this way we will be able to construct many numerical semigroups satisfying this property.
\subsection{Gluing of numerical semigroups}
Let $S_1=\langle s_1, \dots, s_n \rangle$ and $S_2=\langle t_1, \dots, t_m \rangle$ be two numerical semigroups and assume that $s_1, \dots, s_n$ and $t_1, \dots, t_m$ are minimal generators of $S_1$ and $S_2$ respectively. Let also $a\in S_2$ and $b \in S_1$ be not minimal generators of $S_2$ and $S_1$ respectively and assume $\gcd(a,b)=1$. The numerical semigroup $\langle aS_1,bS_2 \rangle=\langle as_1, \dots, as_n, bt_1, \dots, bt_m \rangle$ is said to be the gluing of $S_1$ and $S_2$ with respect to $a$ and $b$. It is well-known that $as_1, \dots, as_n, bt_1, \dots, bt_m$ are its minimal generators, see \cite[Lemma 9.8]{RG}.
Moreover, the pseudo-Frobenius numbers of $T=\langle aS_1,bS_2 \rangle$ are
\[
\PF(T)=\{af_1+bf_2+ab \mathfrak mid f_1 \in \PF(S_1), f_2 \in \PF(S_2)\},
\]
see \cite[Proposition 6.6]{N}. In particular, $t(T)=t(S_1)t(S_2)$ and $\F(T)=a\F(S_1)+b\F(S_2)+ab$. Consequently, since $K(T)$ is generated by the elements $\F(T)-f$ with $f \in \PF(T)$, it is easy to see that $K(T)=\{ak_1+bk_2 \mathfrak mid k_1 \in K(S_1), k_2 \in K(S_2) \}$.
Since $t(T)=t(S_1)t(S_2)$, it follows that $T$ is symmetric if and only if both $S_1$ and $S_2$ are symmetric, so in the next theorem we exclude this case.
\begin{theorem} \label{gluing}
Let $T$ be a gluing of two numerical semigroups and assume that $T$ is not symmetric. The following are equivalent:
\begin{enumerate}
\item $T$ is {\rm GAS};
\item $T$ is {\rm 2-AGL};
\item $T=\langle 2S, b \mathfrak mathbb{N} \rangle$ with $b \in S$ odd and $S$ is an almost symmetric semigroup, but not symmetric.
\end{enumerate}
\end{theorem}
\begin{proof}
(2) $R[It]ightarrow$ (1) True by definition. \\
(1) $R[It]ightarrow$ (3) Let $T=\langle aS_1, bS_2 \rangle$. Since $T$ is not symmetric, we can assume that $S_1$ is not symmetric and, then, $\F(S_1)=k_1+k_2$ for some $k_1$, $k_2 \in K(S_1)$. This implies that
\[
\F(T)-b(\F(S_2)+a)=a\F(S_1)+b\F(S_2)+ab-b\F(S_2)-ab=ak_1+ak_2 \in 2K(T) \setminus K(T)
\]
because $\F(S_2)+a \in S_2$. Therefore, since $T$ is GAS, $\F(S_2)+a$ is a minimal generator of $S_2$. By definition of gluing, $a$ is not a minimal generator of $S_2$, so write $a=s+s'$ with $s$, $s' \in M(S_2)$. Since $\F(S_2)+s+s'$ is a minimal generator of $S_2$, we get $\F(S_2)+s=\F(S_2)+s'=0$, i.e. $\F(S_2)=-1$ and $a=s+s'=2$. This proves that $T=\langle 2S_1, b \mathfrak mathbb{N} \rangle$. Clearly, $b$ is odd by definition of gluing, so we only need to prove that $S_1$ is almost symmetric. Assume by contradiction that it is not almost symmetric and let $s \in M(S_1)$ such that $\F(S_1)-s=k_1+k_2 \in 2K(S_1)\setminus K(S_1)$ with $k_1$, $k_2 \in K(S_1)$. Then
\[
\F(T)-(2s+b)=2\F(S_1)-b+2b-2s-b=2k_1+2k_2 \in 2K(T) \setminus K(T)
\]
and $2s+b$ is not a minimal generator of $T$, contradiction.
\\
(3) $R[It]ightarrow$ (2) Since $S$ is not symmetric, $\langle K(S) \rangle \setminus K(S)= 2K(S) \setminus K(S)=\{\F(S)\}$. Consider an element $z \in \langle K(T) \rangle \setminus K(T)$, that is $z=2k_1+b\lambda_1 + \dots + 2k_r + b\lambda_r = 2(k_1+\dots +k_r)+b(\lambda_1+ \dots +\lambda_r)$ for some $k_1, \dots, k_r \in K(S)$ and $\lambda_1, \dots, \lambda_r \in \mathfrak mathbb{N}$. Since $z \mathfrak notin K(T)$, then $k_1+ \dots +k_r \mathfrak notin K(S)$ and so $k_1+\dots +k_r=\F(S)$. Therefore, $z=2\F(S)+b(\lambda_1+\dots + \lambda_r) \in 2K(T)\setminus K(T)$ and, since it is not in $K(T)$ and $\F(T)=2\F(S)+b$, it follows that either $z=2\F(S)$ or $z=2\F(S)+b$. Hence, $|\langle K(T) \rangle \setminus K(T)|=2$ and thus $T$ is 2-AGL.
\end{proof}
\subsection{Numerical Duplication}
In the previous subsection we have shown that if a non-symmetric GAS semigroup is a gluing, then it can be written as $\langle 2S, b \mathfrak mathbb{N}\rangle$. This kind of gluing can be seen as a particular case of another construction, the {\it numerical duplication}, introduced in \cite{DS}.
Given a numerical semigroup $S$, a relative ideal $I$ of $S$ and an odd integer $b \in S$, the numerical duplication of $S$ with respect to $I$ and $b$ is defined as $S \! \Join^b \! I=2\cdot S \cup \{2 \cdot I +b\}$, where $2\cdot X=\{2x \mathfrak mid x\in X\}$ for every set $X$. This is a numerical semigroup if and only if $I+I+b \subseteq S$. This is always true if $I$ is an ideal of $S$ and, since in the rest of the subsection $I$ will always be an ideal, we ignore this condition. In this case, if $S$ and $I$ are minimally generated by $\{s_1, \dots, s_\mathfrak nu\}$ and $\{i_1, \dots, i_\mathfrak mu\}$ respectively, then $S \! \Join^b \! I=\langle 2s_1, \dots, 2s_\mathfrak nu, 2i_1+b, \dots, 2i_\mathfrak mu+b \rangle$ and these generators are minimal. It follows that $\langle 2S, b \mathfrak mathbb{N} \rangle = S\! \Join^b \!S$.
\begin{remark} \label{PF duplication} \rm
The Frobenius number of $S \! \Join^b \! I$ is equal to $2\F(I)+b$. Moreover, the odd pseudo-Frobenius numbers of $S \! \Join^b \! I$ are $\{2\lambda+b \mathfrak mid \lambda \in \PF(I)\}$, whereas the even elements in $\PF(S \! \Join^b \! I)$ are exactly the doubles of the elements in $((M-M) \cap (I-I)) \setminus S$; see the proof of \cite[Proposition 3.5]{DS}. In particular, if $2f \in \PF(S \! \Join^b \! I)$, then $f \in \PF(S)$.
\end{remark}
In this subsection we write $K$ in place of $K(S)$. We note that $S-\langle K \rangle \subseteq S$ and $\F(S-\langle K \rangle)=\F(S)$.
\begin{lemma} \label{Lemma Numerical Duplication}
Let $S$ be a numerical semigroup, $b \in S$ be an odd integer, $I$ be an ideal of $S$ with $\F(I)=\F(S)$ and $T=S \! \Join^b \! I$. The following hold:
\begin{enumerate}
\item If $k\in K$, then both $2k$ and $2k+b$ are in $K(T)$. In particular, if $\F(S)-x \in iK \setminus K$, then $\F(T)-2x \in iK(T)\setminus K(T)$;
\item Let $k \in K(T)$. If $k$ is odd, then $\frac{k-b}{2} \in K$, otherwise $\F(S)-\frac{k}{2} \mathfrak notin I$;
\item If $I=S-\langle K \rangle$ and $k \in K(T)$ is even, then $\frac{k}{2} \in jK$ for some $j \geq 1$.
\item Let $I=S-\langle K \rangle$. If $\F(T)-2i-b \in \langle K(T) \rangle \setminus K(T)$, then $\F(S)-i \in \langle K \rangle \setminus K$ for every $i \in I$. Moreover, $\F(S)-x \in \langle K \rangle \setminus K$ if and only if $\F(T)-2x \in \langle K(T) \rangle \setminus K(T)$.
\end{enumerate}
\end{lemma}
\begin{proof}
(1) If $k \in K$, then $2k+b\in K(T)$, since $\F(T)-(2k+b) = 2(\F(S)-k)\mathfrak notin 2 \cdot S$. Moreover, $\F(T)-2k=2(\F(S)-k)+b$ and $\F(S)-k \mathfrak notin I$ because it is not in $S$, so $2k \in K(T)$. Therefore, if $\F(S)-x=k_1+\dots + k_i \in iK \setminus K$ with $k_1, \dots, k_i \in K$, then $\F(T)-2x=2k_1+ \dots +2k_{i-1}+(2k_i+b) \in iK(T)$ and, clearly, it is not in $K(T)$, since $2x \in T$. \\
(2) Let $k$ be odd. Since $2(\F(S)-\frac{k-b}{2})=2\F(S)+b-k=\F(T)-k \mathfrak notin T$, it follows that $\F(S)-\frac{k-b}{2}\mathfrak notin S$, i.e. $\frac{k-b}{2} \in K$. If $k$ is even, then $2(\F(S)-\frac{k}{2})+b=\F(T)-k \mathfrak notin T$ and, thus, $\F(S)-\frac{k}{2}\mathfrak notin I$.\\
(3) Since $\F(S)-\frac{k}{2} \mathfrak notin S-\langle K \rangle$ by (2), there exist $i\geq 1$ and $a \in iK$ such that $\F(S)-\frac{k}{2}+a \mathfrak notin S$, that is $\frac{k}{2}-a \in K$. Hence, $\frac{k}{2}=a+ (\frac{k}{2}-a) \in (i+1)K$. \\
(4) If $\F(T)-2i-b=k_1+ \dots + k_j + \dots k_n \in \langle K(T) \rangle \setminus K(T)$ with $k_1, \dots, k_j \in K(T)$ even and $k_{j+1}, \dots, k_n \in K(T)$ odd, then $\F(S)-i=\frac{k_1}{2}+\dots + \frac{k_j}{2} + \frac{k_{j+1}-b}{2} + \dots + \frac{k_{n}-b}{2} + \frac{(n-j)}{2}b \in \langle K \rangle \setminus K$ by (2) and (3). Using (1) the other statement is analogous.
\end{proof}
\begin{example} \rm \label{Example Numerical Duplication}
{\bf 1.} In the previous lemma we cannot remove the hypothesis $\F(I)=\F(S)$. For instance, consider $S=\langle 3,10,11 \rangle$, $I=\langle 3,10 \rangle$ and $T=S \! \Join^3 \! I $. Then, $\F(I)=11\mathfrak neq 8=\F(S)$ and we have $\F(S)-6 \in 2K \setminus K$, but $\F(T)-12 \mathfrak notin \langle K(T) \rangle$. \\
{\bf 2.} In the third statement of the previous lemma, $j$ may be bigger than 1. For instance, consider $S=\langle 6,28,47,97\rangle$ and $T=S\! \Join^{47} \!(S-\langle K\rangle)=\langle 12,56,71,94,115,153,159,194,197,241 \rangle$. Then $88,126,170,182 \in K(T)$, while $44,63,91 \in 2K \setminus K$ and $85 \in 3K \setminus 2K$.
\end{example}
\begin{corollary} \label{Numerical duplication 2-AGL}
Let $b \in S$ be odd and let $I=S-\langle K \rangle$. The following hold:
\begin{enumerate}
\item If $S$ is not almost symmetric, then $S\! \Join^b \!M$ is not {\rm GAS};
\item $S$ is n-{\rm AGL} if and only if $S \! \Join^b \! I$ is n-{\rm AGL}.
\end{enumerate}
\end{corollary}
\begin{proof}
(1) Let $T=S\! \Join^b \!M$ and let $x \mathfrak neq 0$ be such that $\F(S)-x \in 2K \setminus K$. By Lemma \ref{Lemma Numerical Duplication} (1), $\F(T)-2x$ and $\F(T)-(2x+b)$ are in $2K(T) \setminus K(T)$. Even though $2x+b$ and $2x$ are minimal generators, their difference $b$ is a pseudo-Frobenius number of $T$ by Remark \ref{PF duplication}, because $0 \in \PF(M)$, hence $T$ is not GAS. \\
(2) Let $T=S \! \Join^b \! I$. By Lemma \ref{Lemma Numerical Duplication} (4) we have that $\F(S)-x \in \langle K\rangle \setminus K$ if and only if $\F(T)-2x \in \langle K(T) \rangle \setminus K(T)$. Moreover,
if $\F(T)-(2i+b) \in \langle K(T) \rangle \setminus K(T)$, Lemma \ref{Lemma Numerical Duplication} (4) implies that $\F(S)-i \in \langle K \rangle$ and, since $i \in (S-\langle K \rangle)$, it follows that $\F(S) \in S$, that is a contradiction. Hence, $S$ is $n$-AGL if and only if $T$ is $n$-AGL.
\end{proof}
\begin{remark} \rm
If $S$ is almost symmetric with type $t$, then $M=K-(M-M)$ and, consequently, $S\! \Join^b \!M$ is almost symmetric with type $2t+1$ by \cite[Theorem 4.3 and Proposition 4.8]{DS}.
\end{remark}
If $R$ is a one-dimensional Cohen-Macaulay local ring with a canonical module $\omega$ such that $R \subseteq \omega \subseteq \overline{R}$, in \cite[Theorem 4.2]{CGKM} it is proved that the idealization $R \ltimes (R:R[\omega])$ is 2-AGL if and only if $R$ is 2-AGL. The numerical duplication may be considered the analogous of the idealization in the numerical semigroup case, since they are both members of a family of rings that share many properties (see \cite{BDS}); therefore, Corollary \ref{Numerical duplication 2-AGL} (2) should not be surprising. In the following proposition we generalize this result for the GAS property.
\begin{theorem} \label{Numerical duplication S-<K>}
Let $S$ be a numerical semigroup, let $b \in S$ be an odd integer and let $I=S-\langle K \rangle$. The semigroup $T=S \! \Join^b \! I$ is {\rm GAS} if and only if $S$ is {\rm GAS}.
\end{theorem}
\begin{proof}
Assume that $T$ is GAS and let $\F(S)-x \in 2K \setminus K$. By Lemma \ref{Lemma Numerical Duplication}, $\F(T)-2x\in 2K(T) \setminus K(T)$, so $2x$ is a minimal generator of $T$ and, thus, $x$ is a minimal generator of $S$. Now let $\F(S)-x$, $\F(S)-y \in 2K \setminus K$ and assume by contradiction that $x-y \in \PF(S)$.
In particular, $S$ is not symmetric and, then, $I=M-\langle K \rangle$. Moreover, $\F(T)-2x$ and $\F(T)-2y$ are in $2K(T) \setminus K(T)$. We also notice that $x-y \in I-I$, indeed, if $i \in I$ and $a \in \langle K \rangle$, it follows that $(x-y)+i+a \in (x-y)+M \subseteq S$. Therefore, Remark \ref{PF duplication} implies that $2(x-y) \in \PF(T)$; contradiction.
Conversely, assume that $S$ is GAS and let $\F(T)-z=k_1+k_2 \in 2K(T) \setminus K(T)$ with $k_1$, $k_2 \in K(T)$. If $z=2i+b$ is odd and both $k_1$ and $k_2$ are odd, then $i\in I$ and $\F(S)-i=(k_1-b)/2+(k_2-b)/2+b \in 2K$ by Lemma \ref{Lemma Numerical Duplication}.(2); on the other hand, if $k_1$ and $k_2$ are both even, $\F(S)-i=k_1/2+k_2/2 \in \langle K \rangle$ by Lemma \ref{Lemma Numerical Duplication}.3. Since $i \in (S-\langle K \rangle)$, in both cases we get $\F(S) \in S$, that is a contradiction. Hence, $z=2x$ is even. If $k_1$ is even and $k_2$ is odd, Lemma \ref{Lemma Numerical Duplication} implies that $\F(S)-x=k_1/2 + (k_2-b)/2 \in (j+1)K \setminus K$ for some $j\geq 1$ and, therefore, by Theorem \ref{Livelli più alti} it follows that $x$ is a minimal generator of $S$, i.e. $z=2x$ is a minimal generator of $T$. Moreover, let $\F(T)-2x$, $\F(T)-2y \in 2K(T)\setminus K(T)$ and assume by contradiction that $2x-2y \in \PF(T)$. Remark \ref{PF duplication} implies that $x-y \in \PF(S) \subseteq K \cup \{\F(S)\}$. Thus, if $\F(T)-2x=k_1+k_2$ with $k_1$, $k_2 \in K(T)$ and $k_1$ even, then $\F(S)-x=k_1/2+(k_2-b)/2 \in \langle K(S) \rangle \setminus K(S)$ by Lemma \ref{Lemma Numerical Duplication} and, so, $\F(S)-y=k_1/2+(k_2-b)/2+(x-y) \in \langle K(S) \rangle \setminus K(S)$. Hence, Theorem \ref{Livelli più alti} yields a contradiction, because $x-y \in \PF(S)$.
\end{proof}
\begin{example} \rm
{\bf 1.} Consider the semigroup $S$ in Example \ref{Example Numerical Duplication}.2. It is GAS and, then, the previous theorem implies that also $T=S\! \Join^{47} \!(S-\langle K\rangle)$ is GAS. However, we notice that $2K\setminus K=\{44,63,91\}$, $3K \setminus 2K=\{85\}$ and $4K=3K$, while $2K(T) \setminus K(T)=\{135,173,217,229\}$ and $2K(T)=3K(T)$. \\
{\bf 2.} Despite Theorem \ref{Numerical duplication S-<K>}, if $S \! \Join^b \! I$ is GAS for an ideal $I$ different form $S-\langle K \rangle$, it is not true that also $S$ is GAS. For instance, the semigroup $S$ in Example \ref{Example Numerical Duplication}.1 is not GAS, but $S\! \Join^3 \! I$ is.
\end{example}
\subsection{Dilatations of numerical semigroups}
We complete this section studying the transfer of the GAS property in a construction recently introduced in \cite{BS}: given $a \in M-2M$, the numerical semigroup $S+a=\{0\} \cup \{m+a \mathfrak mid m \in M\}$ is called dilatation of $S$ with respect to $a$.
\begin{proposition} \label{dilatation}
Let $a \in M-2M$. The semigroup $S+a$ is {\rm GAS} if and only if $S$ is {\rm GAS}.
\end{proposition}
\begin{proof}
We denote the semigroup $S+a$ by $T$. Recalling that $\F(T)=\F(S)+a$, by \cite[Lemma 3.1 and Lemma 3.4]{BS} follows that $2K(T)=2K(S)$ and
\begin{equation*}
\begin{split}
&2K(S) \setminus K(S)=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}, \\
&2K(T) \setminus K(T)=\{\F(T)-(x_1+a), \dots, \F(T)-(x_r+a), \F(T)\}
\end{split}
\end{equation*}
for some $x_1, \dots, x_r \in M$.
Assume that $S$ is a GAS semigroup. Then, $x_i$ is a minimal generator of $S$ and it is straightforward to see that $x_i+a$ is a minimal generator of $T$. Moreover, if $(x_i+a)-(x_j+a) \in \PF(T)$, then for every $m \in M$ we have $x_i-x_j+m+a \in T$, i.e. $x_i-x_j+m \in M$, that is a contradiction, since $S$ is GAS.
Now assume that $T$ is GAS. Suppose by contradiction that $x_i$ is not a minimal generator of $S$, that is $x_i=s_1+s_2$ for some $s_1$, $s_2 \in M$. We have $\F(S)-(s_1+s_2) \in 2K(S)\setminus K(S)$ and so $\F(S)-s_1 \in 2K(S)\setminus K(S)$, since $2K(S)$ is a relative ideal. Hence, $s_1=x_j$ for some $j$ and $(x_i+a)-(x_j+a)=s_2 \in S$. Since $x_i+a$ is a minimal generator, we have that $s_2 \mathfrak notin T$. Moreover, for every $m+a \in M(T)$ we clearly have $s_2+m+a \in M(T)$, because $s_2 \in S$. This yields a contradiction because $(x_i+a)-(x_j+a)=s_2 \in \PF(T)$ and $T$ is GAS. Finally, if $x_i-x_j \in \PF(S)$, it is trivial to see that $x_i-x_j \in \PF(T)$.
\end{proof}
\begin{remark} \rm
Suppose $2K(S+a) \setminus K(S+a)=\{\F(S+a)-(x_1+a), \dots, \F(S+a)-(x_r+a), \F(S+a)\}$ with $x_1+a, \dots, x_r+a$ minimal generators of $S+a$, but $S+a$ is not GAS. Then $2K(S) \setminus K(S)=\{\F(S)-x_1, \dots, \F(S)-x_r, \F(S)\}$, but it is not necessarily true that $x_1, \dots, x_r$ are minimal generators of $S$. For instance, consider $S=\langle 7,9,11 \rangle$ and $S+7=\langle 14, 16, 18, 21, 23, 25, 27, 29, 38, 40 \rangle$. In this case $2K(S+7) \setminus K(S+7)=\{33-29,33-18,33\}$ and $2K(S) \setminus K(S)=\{26-22, 26-11, 26\}$.
\end{remark}
\end{document} |
\begin{document}
\newcommand\ket[1]{\ensuremath{|#1\rangle}}
\newcommand\bra[1]{\ensuremath{\langle#1|}}
\newcommand\iprod[2]{\ensuremath{\langle#1|#2\rangle}}
\newcommand\oprod[2]{\ensuremath{|#1\rangle\langle#2|}}
\title{Local cloning of two product states}
\author{Zhengfeng Ji}
\email{jizhengfeng98@mails.tsinghua.edu.cn}
\author{Yuan Feng}
\email{feng-y@tsinghua.edu.cn}
\author{Mingsheng Ying}
\email{yingmsh@tsinghua.edu.cn}
\affiliation{
State Key Laboratory of Intelligent Technology and Systems, Department of Computer Science and Technology, Tsinghua University, Beijing 100084, China
}
\date{\today}
\begin{abstract}
Local quantum operations and classical communication (LOCC) put considerable constraints on many quantum information processing tasks such as cloning and discrimination. Surprisingly however, discrimination of any two pure states survives such constraints in some sense. In this paper, we show that cloning is not that lucky; namely, conclusive LOCC cloning of two product states is strictly less efficient than global cloning.
\end{abstract}
\pacs{03.67.Hk, 03.67.-a}
\maketitle
\section{Introduction}
Of all the landmark discoveries in quantum computation and quantum information theory, the impossibility of universal cloning~\cite{WZ82,Diek82} has an important position since cloning is one of the most fundamental information processing tasks and the ``non-cloning'' principle has both theoretical and practical inferences. Fruitful results on cloning have also been obtained under the condition when some compromises are made, such as approximate cloning~\cite{BH96,BDE+98} and probabilistic cloning of a finite set of states~\cite{DG98a,DG98b,Pati99,FZY02}. For example, Duan and Guo considered the problem of probabilistic cloning of two non-orthogonal states in Ref.~\cite{DG98a} and later they solved the problem in a more general setting in Ref.~\cite{DG98b}. Since local operations and classical communication (LOCC) were introduced into the research of basic quantum information processing tasks such as cloning and discrimination, more interesting results have been obtained in the literature. These results enrich both the research of quantum information and that of the historical ``non-locality'' problem. Recently, some works on LOCC cloning have been brought up~\cite{GKR04,ACP04,OH04}, all showing that LOCC cloning is somewhat difficult to perform. In this paper, we study the LOCC version of optimal probabilistic cloning and by ``optimal'' we mean that the success probability is maximized.
One other widely studied quantum information processing task which is closely related to cloning is quantum state discrimination. Similar to cloning, perfect discrimination of non-orthogonal states is also impossible and we can only discriminate a finite set of non-orthogonal states probabilistically~\cite{Ivan87,Diek88,Pere88,JS95}. Moreover, discrimination under the constraint of LOCC has also been extensively studied in the literature with even more results obtained than LOCC cloning. For example, it has been shown that there exists a set of orthogonal product states which cannot be discriminated perfectly using LOCC~\cite{BDF+99}. Yet any two pure states, entangled or not, can always be optimally discriminated both conclusively~\cite{WSHV00,CY01,CY02,JCY04} and inconclusively~\cite{VSPM01}. A comparison of these two results leads us to conclude that the inefficiency of LOCC is evident only when there are larger number of unknown states involved in the task.
Keeping in mind the fact that discrimination (or identification) can be regarded as a special case of cloning in which the number of destination copies goes infinite~\cite{DG98b}, we may naturally expect LOCC cloning, as a generalization, to have similar properties of LOCC discrimination. The fact that LOCC can achieve global optimality in discrimination of any two pure states leads us to the question of whether it will remain true in LOCC cloning. Namely, is LOCC still powerful enough to clone pure states efficiently when the number of unknown states is small?
But a simple thought tells that it is almost impossible even if the state to be cloned is exactly known since LOCC cannot increase the entanglement between separated parties. In fact, with Vidal's formula on probabilistic entanglement transformation~\cite{Vida99}, we can see that the success probability of local cloning of a Bell state (or any other entangled pure state) is $0$ while globally we can always clone the state perfectly. One approach that many other authors take to deal with this is to assume that LOCC cloning is performed with the help of a maximally entangled state, or a so-called ``blank state'', acting as a slot where the cloned copy may reside~\cite{LNFJ04,GKR04,ACP04,OH04}. This assumption is natural especially when one of the states to be copied is maximally entangled states. In this paper, however, we focus on a special case of the LOCC cloning problem where only product states are considered. Obviously, the entanglement constraint of LOCC does not apply any more and we can continue to discuss the question raised before nontrivially. Moreover, this separation of LOCC constraints on entanglement is helpful in that otherwise one might wrongly lay the blame on the ``entanglement non-increasing'' property which might be in fact only partly responsible for the inefficiency of LOCC.
As a complete answer to the question, we obtain a formula which precisely calculates how efficient LOCC cloning can be. This formula indicates that it is generally impossible to achieve global optimality in conclusive LOCC cloning even in the simplest case, cloning of two product states with equal prior probability. Namely, for any finite $n$, $m\!\to\!n$ cloning of two nonorthogonal product states cannot be globally optimal using only LOCC; while for infinitely large $n$, the LOCC cloning (which is then a discrimination) can be optimal. From the interesting result proved in Ref.~\cite{BDF+99}, we can see that perfect LOCC cloning of the nine orthogonal product states constructed there is also impossible since otherwise we can clone the state to infinite copies and discriminate them perfectly which has already been proved to be impossible. However, it is easy to see that LOCC can always clone two orthogonal product states perfectly. The inefficiency of LOCC is then further revealed in our paper by analyzing LOCC cloning of two product states when they are non-orthogonal.
Consider two separated parties, Alice and Bob, having $m$ copies of a product state secretly chosen from $\ket{\phi_1}_A \otimes \ket{\phi_2}_B$ and $\ket{\psi_1}_A \otimes \ket{\psi_2}_B$ with equal probability. They want to obtain $n(>m)$ copies of the chosen state, that is, $\ket{\phi_1}^{\otimes n}$ (or $\ket{\psi_1}^{\otimes n}$) on Alice's side and at the same time $\ket{\phi_2}^{\otimes n}$ (resp.\ $\ket{\psi_2}^{\otimes n}$ according to what state they initially possess) on Bob's side. Denote $\mu = |\iprod{\phi_1}{\psi_1}|$, $\nu = |\iprod{\phi_2}{\psi_2}|$ and assume that $\mu < 1$, $\nu < 1$. When Alice and Bob are able to perform arbitrary quantum operations on their joint system, the optimal conclusive cloning~\cite{DG98a,CB98} succeeds with the probability
\begin{equation}\label{eq:global_cloning_prob}
\eta_{c} = \frac{1-\mu^m\nu^m}{1-\mu^n\nu^n}.
\end{equation}
Our main result is that conclusive LOCC cloning of product states cannot achieve this when $\mu \nu$ is not zero which means that the secret states are non-orthogonal. We will revisit the global case problem which is first analyzed in Ref.~\cite{DG98a} and come back to our topic on LOCC cloning later.
Keep the number of initial copies $m$ unchanged and let $n$ tend to infinite. Eq.~\eqref{eq:global_cloning_prob} then gives the optimal success probability of identifying two unknown states when $m$ copies are provided~\cite{Ivan87,Diek88,Pere88,JS95}:
\begin{equation*}
\eta_{d} = 1-\mu^m \nu^m.
\end{equation*}
Moreover, we can have a more general result similar to Eq.~\eqref{eq:global_cloning_prob} in a quantum task called ``quantum state separation'' first introduced in Ref.~\cite{CB98}. Quantum separation generalizes both cloning and discrimination, and thus has a simple and general representation. Suppose we are given $\ket{\phi}$ (or \ket{\psi} as an equal probability alternative), and we want to obtain $\ket{\phi'}$ ($\ket{\psi'}$ respectively) without knowing what state it exactly is. Let $\mu = |\iprod{\phi}{\psi}|$, $\mu' = |\iprod{\phi'}{\psi'}|$ and $\mu \ge \mu'$ which is the key assumption in quantum state separation. It is easy to prove that the maximal probability of success is given by
\begin{equation}\label{eq:global_separation_prob}
\eta_s = \frac{1-\mu}{1-\mu'}.
\end{equation}
To be complete, we prove this formula in the following.
A unitary transformation $U$ on the system and the environment is supposed to be the optimal operation. We expand it in the following:
\begin{subequations}\label{eq:separation}
\begin{eqnarray}
U\ket{\phi}\ket{e} & = & \sqrt{s_1}\ket{\phi'}\ket{e_1} + \sum_{i=2}^n \sqrt{s_i}\ket{\alpha_i}\ket{e_i},\\
U\ket{\psi}\ket{e} & = & \sqrt{t_1}\ket{\psi'}\ket{e_1} + \sum_{i=2}^n \sqrt{t_i}\ket{\beta_i}\ket{e_i},
\end{eqnarray}
where $\ket{e}$ is the initial state of the ancillary system and $\ket{e_i}$ are orthogonal states.
\end{subequations}
Subsequent measurement on the system spanned by $\ket{e_i}$ tells whether the transformation is successful or not. It succeeds with probability $(s_1+t_1)/2$ when outcome is $e_1$ and fails with probability $1 - (s_1+t_1)/2$ otherwise.
The unitary transformation $U$ preserves inner product, that is
\begin{equation}\label{eq:inner_product_preserving}
\iprod{\phi}{\psi} = \sqrt{s_1 t_1} \iprod{\phi'}{\psi'} + \sum_{i=2}^n \sqrt{s_i t_i} \iprod{\alpha_i}{\beta_i}.
\end{equation}
Using the triangle inequality, we have
\begin{eqnarray*}
\mu & \le & \sqrt{s_1 t_1} \mu' + \sum_{i=2}^n \sqrt{s_i t_i} |\iprod{\alpha_i}{\beta_i}|\\
& \le & \sqrt{s_1 t_1} \mu' + \sum_{i=2}^n \sqrt{s_i t_i}\\
& \le & \frac{s_1 + t_1}{2} \mu' + \sum_{i=2}^n \frac{s_i + t_i}{2}\\
& = & \frac{s_1 + t_1}{2} \mu' + 1 - \frac{s_1 + t_1}{2}.
\end{eqnarray*}
Thus
\begin{equation*}
\eta_s = \frac{s_1 + t_1}{2} \le \frac{1-\mu}{1-\mu'}
\end{equation*}
with equality when $|\iprod{\alpha_i}{\beta_i}| = 1$, $s_i = t_i$ and $\iprod{\phi}{\psi}$, $\iprod{\alpha_i}{\beta_i}$ has the same phase with $\iprod{\phi'}{\psi'}$. The last condition is easily fulfilled as one of the states can be multiplied by a global phase without altering the physical meaning. This completes the proof of Eq.~\eqref{eq:global_separation_prob}.
Then, we study quantum separation of two states with arbitrary prior probability. Generally, it is hard to give an analytical formula for this problem, but we can obtain an upper bound on the success probability. The secret state is now $\ket{\phi}$ with probability $s$ and $\ket{\psi}$ with probability $t$ where $s+t=1$. Again, denote $\mu = |\iprod{\phi}{\psi}|$ and $\mu' = |\iprod{\phi'}{\psi'}|$. Let us consider the expansion in Eq.~\eqref{eq:separation} which also holds though the prior distribution is now not uniform. Thus, we still have
\begin{equation*}
\iprod{\phi}{\psi} = \sqrt{s_1 t_1} \iprod{\phi'}{\psi'} + \sum_{i=2}^n \sqrt{s_i t_i} \iprod{\alpha_i}{\beta_i},
\end{equation*}
and
\begin{equation*}
\mu \le \sqrt{s_1 t_1} \mu' + \sum_{i=2}^n \sqrt{s_i t_i},
\end{equation*}
which gives
\begin{equation*}
\frac{\mu-\mu'}{1-\mu'} \le \frac{(\sqrt{s_1 t_1} - 1) \mu' + \sum_{i=2}^n \sqrt{s_i t_i}}{1-\mu'}.
\end{equation*}
We claim that the right hand side is less than or equal to
\begin{equation*}
\frac{1 - ss_1 - tt_1}{2\sqrt{s t}},
\end{equation*}
for the correctness of which we only need to check
\begin{eqnarray}\label{eq:upper_bound_temp}
& & (2\sqrt{ss_1 tt_1} - 2\sqrt{st} + 1 - ss_1 -tt_1) \mu'\nonumber\\
& \le & 1 - ss_1 - tt_1 - \sum_{i=2}^n 2\sqrt{ss_i tt_i}.
\end{eqnarray}
Notice that the RHS of Eq.~\eqref{eq:upper_bound_temp} is larger than or equal to
\begin{equation*}
1 - \sum_{i=1}^n (ss_i + tt_i) = 0,
\end{equation*}
Eq.~\eqref{eq:upper_bound_temp} holds if the LHS of Eq.~\eqref{eq:upper_bound_temp} is negative. And when the LHS of Eq.~\eqref{eq:upper_bound_temp} is nonnegative, the maximal value of it obtains when $\mu' = 1$, so we need only to prove
\begin{equation*}
2\sqrt{ss_1 tt_1} - 2\sqrt{st} + 1 - ss_1 -tt_1 \le 1 - ss_1 - tt_1 - \sum_{i=2}^n 2\sqrt{ss_i tt_i}
\end{equation*}
which can be further reduced to a Cauchy-Schwartz inequality
\begin{equation}\label{eq:cauchy_inequality}
\sum_{i=1}^n \sqrt{s_i t_i} \le \left( \sum_{i=1}^n s_i \sum_{i=1}^n t_i \right)^{1/2} = 1.
\end{equation}
Thus we have proved that
\begin{equation*}
\frac{\mu-\mu'}{1-\mu'} \le \frac{1 - ss_1 - tt_1}{2\sqrt{s t}},
\end{equation*}
from which our upper bound follows
\begin{eqnarray}\label{eq:separation_upper_bound}
\eta_s = s s_1 + t t_1 \le 1 - 2\sqrt{st}\frac{\mu - \mu'}{1 - \mu'}.
\end{eqnarray}
Having in hand the above results concerning global case of quantum cloning, discrimination and quantum separation, we are now ready to prove our main result by an induction on the number of the maximal possible number of rounds of a protocol. To enjoy the generality which simplifies our proof, we present our proof in the language of quantum separation.
Each one of our players, Alice and Bob, is now restricted to perform arbitrary quantum operation on their own systems but can communicate classically back and forth. Originally, Alice and Bob possess $\ket{\phi_1} \otimes \ket{\phi_2}$ (or $\ket{\psi_1} \otimes \ket{\psi_2}$ with equal probability) and they want to optimally separate it to $\ket{\phi_1'} \otimes \ket{\phi_2'}$ (or $\ket{\psi_1'} \otimes \ket{\psi_2'}$ respectively) using LOCC. Let $\mu = |\iprod{\phi_1}{\psi_1}|$, $\mu' = |\iprod{\phi_1'}{\psi_1'}|$, $\nu = |\iprod{\phi_2}{\psi_2}|$, $\nu'= |\iprod{\phi_2'}{\psi_2'}|$ and $1 > \mu \ge \mu'$, $1 > \nu \ge \nu'$. We will prove that LOCC separation cannot achieve the global separation efficiency
\begin{equation}
\eta_s = \frac{1 - \mu \nu}{1 - \mu' \nu'}
\end{equation}
when $\mu' \nu' \neq 0$ and at least one of $\mu \ge \mu'$ and $\nu \ge \nu'$ is rigorous. In fact, it is first proved that for any LOCC protocol $\mathcal{P}$,
\begin{equation}\label{eq:local_separation_upper_bound}
\eta_s^{\mathcal{P}} \le 1 - \mu \nu + \frac{(1 - \mu)(1 - \nu)}{(1 - \mu')(1 - \nu')} \mu' \nu',
\end{equation}
which is easily verified to be smaller than the global separation efficiency. On the other hand, we will construct a LOCC protocol that achieves the efficiency defined as the RHS of Eq.~\eqref{eq:local_separation_upper_bound}. Combine these two parts we obtain the formula for LOCC separation
\begin{equation}\label{eq:local_separation_efficiency}
\eta_s^{L} = 1 - \mu \nu + \frac{(1 - \mu)(1 - \nu)}{(1 - \mu')(1 - \nu')} \mu' \nu'.
\end{equation}
In order to prove the upper bound in Eq.~\eqref{eq:local_separation_upper_bound}, we prove a more general bound that allows unequal initial distribution. Let $s$ and $t$ be the initial distribution and $\eta_s^{\mathcal{P}}(s,t)$ be the efficiency of LOCC separation protocal $\mathcal{P}$. We will show
\begin{equation}\label{eq:general_uppper_bound}
\eta_s^{\mathcal{P}}(s,t) \le 1 - 2\sqrt{st} \mu\nu + 2\sqrt{st} \frac{(1-\mu)(1-\nu)} {(1-\mu')(1-\nu')} \mu' \nu',
\end{equation}
for any protocoal $\mathcal{P}$ and any $s$ and $t$. It is easily seen that when $s=t=1/2$, Eq.~\eqref{eq:general_uppper_bound} degenerates to Eq.~\eqref{eq:local_separation_upper_bound}.
Now, consider any LOCC protocol $\mathcal{P}$ that separates $\ket{\phi_1} \otimes \ket{\phi_2}$ with probability $s$ (or $\ket{\psi_1} \otimes \ket{\psi_2}$ with probability $t$) to $\ket{\phi_1'} \otimes \ket{\phi_2'}$ (or $\ket{\psi_1'} \otimes \ket{\psi_2'}$). In such a LOCC protocol, local operations and classical communication can be carried out repeatedly in arbitrarily many rounds. For example, Alice goes first by measuring her part of system and informing the outcome to Bob, then Bob performs a measurement corresponding to the information Alice tells him, and so on. If we define a round to be a measurement on one's side together with a notification of the result to the other side, any execution (a concrete experiment) of the LOCC protocol is just a sequence of many rounds. Different executions of a same protocol may consist of different number of rounds since each round, except the first one, depends highly on the outcomes of the previous rounds. We prove the upper bound in Eq.~\eqref{eq:general_uppper_bound} by induction on the maximal possible number of rounds of any LOCC protocol $\mathcal{P}$.
To see that Eq.~\eqref{eq:general_uppper_bound} holds when $\mathcal{P}$ contains at most one round, we will employ the upper bound on global separation in Eq.~\eqref{eq:separation_upper_bound} proved before. Without loss of generality, let Alice perform the only round in protocol $\mathcal{P}$. What Bob can do is then merely some unitary transformation on his system which preserves the inner product $\nu$. Thus, when $\nu$ and $\nu'$ are not equal, $\mathcal{P}$ fails definitely and Eq.~\eqref{eq:general_uppper_bound} is obvious. When $\nu = \nu'$, protocol $\mathcal{P}$ succeeds if and only if Alice successfully performs separation on her side. Eq.~\eqref{eq:separation_upper_bound} says that Alice's chance to make it is bounded by
\begin{equation*}
1 - 2\sqrt{st}\frac{\mu - \mu'}{1 - \mu'},
\end{equation*}
which is exactly what we want when substituting $\nu = \nu'$ into Eq.~\eqref{eq:general_uppper_bound}. This establishes the initial condition of our inductive proof.
Each protocol $\mathcal{P}$ with maximally $l$ rounds can be reduced to protocols that have at most $l-1$ rounds after the first round being performed by one of our players, say, Alice. Let $\{ M_i \}$ be the measurement carried out by Alice in the first round of protocol $\mathcal{P}$; let $s_i$ and $\ket{\phi_1^i}$ be the probability and the post-measurement state respectively when the observed result is $i$ and the secret state of her system is actually $\ket{\phi_1}$; let $t_i$ and $\ket{\psi_1^i}$ be the correspondences when her part of secret state is prepared in $\ket{\psi_1}$. That is,
\begin{subequations}\label{eq:measurement}
\begin{eqnarray}
M_i \ket{\phi_1} & = & \sqrt{s_i} \ket{\phi_1^i}\\
M_i \ket{\psi_1} & = & \sqrt{t_i} \ket{\psi_1^i},
\end{eqnarray}
where $\sum_i M_i^{\dagger} M_i = I$.
\end{subequations}
Let $p_i$ be the probability that result $i$ occurs, $p_{\phi | i}$ and $p_{\psi | i}$ be the new distributions of the secret state, then
\begin{eqnarray*}
p_i & = & s s_i + t t_i\\
p_{\phi | i} & = & \frac{s s_i}{p_i}\\
p_{\psi | i} & = & \frac{t t_i}{p_i}.
\end{eqnarray*}
Efficiency of protocol $\mathcal{P}$ then equals to
\begin{equation*}
\eta_s^{\mathcal{P}} = \sum_i p_i \eta_s^{\mathcal{P}^i}(p_{\phi | i}, p_{\psi | i}),
\end{equation*}
where $\eta_s^{\mathcal{P}^i}(p_{\phi | i}, p_{\psi | i})$ is the efficiency of the further separation $\mathcal{P}^i$ when result $i$ occurs in the above measurement. Since $\mathcal{P}^i$ has at most $l-1$ rounds, by induction hypothesis, we can continue the last equation with
\begin{eqnarray*}
\eta_s^{\mathcal{P}} & = & \sum_i p_i \eta_s^{\mathcal{P}^i}(p_{\phi | i}, p_{\phi | i})\\
& \le & \sum_i p_i \left[1 - 2\sqrt{p_{\phi | i} p_{\phi | i}} \left( \mu_i \nu - \frac{(1-\mu_i)(1-\nu)}{(1-\mu')(1-\nu')} \mu' \nu' \right) \right]\\
& = & 1 - 2\sqrt{st} \left(\sum_i \sqrt{s_i t_i} \mu_i \right) \nu +\\
& & 2\sqrt{st} \left(\sum_i \sqrt{s_i t_i} - \sum_i \sqrt{s_i t_i} \mu_i \right) \frac{(1-\nu)\mu'\nu'}{(1-\mu')(1-\nu')},
\end{eqnarray*}
where $\mu_i = |\iprod{\phi_1^i}{\psi_1^i}|$.
Employing Eq.~\eqref{eq:measurement}, we have
\begin{equation*}
\mu = |\sum_i \sqrt{s_i t_i} \iprod{\phi_1^i}{\psi_1^i}| \le \sum_i \sqrt{s_i t_i} \mu_i.
\end{equation*}
From the above equation and Eq.~\eqref{eq:cauchy_inequality}, we obtain
\begin{equation*}
\eta_s^{\mathcal{P}} \le 1 - 2\sqrt{st} \mu \nu + 2\sqrt{st} \frac{(1-\mu) (1-\nu)}{(1-\mu') (1-\nu')} \mu' \nu',
\end{equation*}
which completes the proof.
Returning back to the case when the initial distribution is uniform (that is, $s=t$), we have the upper bound
\begin{equation*}
\eta_s^{\mathcal{P}} \le 1 - \mu \nu + \frac{(1-\mu) (1-\nu)}{(1-\mu') (1-\nu')} \mu' \nu',
\end{equation*}
by substituting $s$ and $t$ for $1/2$. The interesting thing is that such an upper bound is also achievable. We construct a protocol $\mathcal{P'}$ to show this. In protocol $\mathcal{P'}$, Alice and Bob optimally separate their own part first. If both of them succeed, the protocol finishes, else if only one of them succeeds, he (she) then performs optimal discrimination of the separated state and tells the result to the other one if the discrimination is again successful. Otherwise, the procedure fails. Then the probability of success is
\begin{eqnarray*}
\eta_s^{\mathcal{P'}} & = & \frac{(1-\mu)(1-\nu)}{(1-\mu')(1-\nu')} + \left(1 - \frac{1-\mu}{1-\mu'} \right) \frac{1-\nu}{1-\nu'} (1-\nu') +\\
& & \frac{1-\mu}{1-\mu'} \left(1 - \frac{1-\nu}{1-\nu'} \right) (1-\mu')\\
& = & 1 - \mu\nu + \frac{(1-\mu)(1-\nu)}{(1-\mu')(1-\nu')} \mu'\nu',
\end{eqnarray*}
which coincides with our upper bound. Then we get the formula that calculates the efficiency of LOCC separation of two product states:
\begin{equation*}
\eta_s^{L} = 1 - \mu \nu + \frac{(1-\mu) (1-\nu)}{(1-\mu') (1-\nu')} \mu' \nu'.
\end{equation*}
It is easily seen to be strictly less than the global efficiency when $0 < \mu' < \mu < 1$ and $0 < \nu' < \nu < 1$.
Via a simple substitution, we get the efficiency of LOCC cloning
\begin{equation*}
\eta_c^{L} = 1 - \mu^m \nu^m + \frac{(1-\mu^m) (1-\nu^m)}{(1-\mu^n) (1-\nu^n)} \mu^n \nu^n,
\end{equation*}
which is also strictly less than the global efficiency since $0 < \mu^n < \mu^m < 1$ and $0 < \nu^n < \nu^m < 1$ is obvious in an $m\!\to\!n$ cloning. The corresponding optimal protocol $\mathcal{P'}$ becomes that Alice and Bob perform $m\!\to\!n$ cloning separately and if any one of them fails, they resort to the discrimination protocol to improve the efficiency.
In sum, we have analyzed the problem of LOCC cloning and LOCC separation of two product states. It is proved that, except some trivial cases, $m\!\to\!n$ LOCC cloning of product states is less efficient than global cloning. This result strongly contrasts with the fact that any two pure states can be locally discriminated with global efficiency. In other words, LOCC pose more constraints on cloning than on discrimination. This is accordant with some recent results of the related works~\cite{ACP04,OH04}. Since product state does not involve us in the entanglement restriction of LOCC, our result also shows that there is something else, other than the entanglement constraint, that obstructs the cloning procedure in LOCC.
Efficiency, the success probability in conclusive cloning and discrimination, is a natural and important measure. Then why do cloning and discrimination (a cloning with infinite destination copies) have such a difference in it? One observation is that discrimination is somewhat classical since the final result it cares is classical while cloning is not. So in LOCC discrimination of two product states, if either of two parties succeeds then the whole task is done by communicating the result while in LOCC cloning, both of them are required to succeed on their own. Alternatively, from the view of quantum separation, discrimination is a separation where $\mu'$ or $\nu'$ equals to $0$ which makes it a ``trivial'' task while cloning is not trivial generally.
We are thankful to the colleagues in the Quantum Computation and Quantum Information Research Group for helpful discussions. This work was supported by the Natural Science Foundation of China (Grant Nos. 60273003, 60433050, and 60305005).
\end{document} |
\begin{document}
\newcommand{\,\mbox{d}}{\,\mbox{d}}
\newcommand{na\"{\i}ve }{na\"{\i}ve }
\newcommand{Na\"{\i}ve }{Na\"{\i}ve }
\newcommand{e.g.\xspace}{e.g.\xspace}
\newcommand{i.e.\xspace}{i.e.\xspace}
\newcommand{pdf.\xspace}{pdf.\xspace}
\newcommand{etc.\@\xspace}{etc.\@\xspace}
\newcommand{Ph.D.\xspace}{Ph.D.\xspace}
\newcommand{M.Sc.\xspace}{M.Sc.\xspace}
\newcommand{B.A.\xspace}{B.A.\xspace}
\newcommand{M.A.\xspace}{M.A.\xspace}
\newcommand{r\^{o}le}{r\^{o}le}
\newcommand{\hspace*{\fill} Rose Baker \today}{\hspace*{\fill} Rose Baker \today}
\newenvironment{entry}[1]
{\begin{list}{}{\renewcommand{\makelabel}[1]{\textsf{##1:}\hfil}
\settowidth{\labelwidth}{\textsf{#1:}}
\setlength{\leftmargin}{\labelwidth}
\addtolength{\leftmargin}{\labelsep}
\setlength{\itemindent}{0pt}
}}
{\end{list}}
\title{A new measure of treatment effect for random-effects meta-analysis of comparative binary outcome data}
\author{Rose Baker\\School of Business\\University of Salford, UK\\Dan Jackson\\Statistical Innovation Group\\Advanced
Analytics Centre\\AstraZeneca, Cambridge, UK\footnote{This work was done while Dan was working at the MRC/BSU, Cambridge}}
\maketitle
\begin{abstract}
Comparative binary outcome data are of fundamental interest in statistics and are often pooled in meta-analyses.
Here we examine the simplest case where for each study there are two patient groups and a binary event of interest, giving rise to a series of $2 \times 2$ tables.
A variety of measures of treatment effect are then available and are conventionally used in meta-analyses, such as the odds ratio, the risk ratio and the risk difference.
Here we propose a new type of measure of treatment effect for this type of data that is very easily interpretable by lay audiences.
We give the rationale for the new measure and we present three contrasting methods for computing its within-study variance so that it can be used in conventional meta-analyses. We then develop three alternative methods for random-effects meta-analysis that use our measure and we apply our methodolgy to some real examples. We conclude that our new measure is a fully viable alternative to existing measures. It has the advantage that its interpretation is especially simple and direct, so that its meaning can be more readily understood by those with little or no formal statistical training. This may be especially valuable when presenting `plain language summaries', such as those used by Cochrane.
\end{abstract}
{\bf Keywords: Beta distribution; parallel trial; random-effect; Relative risk; Treatment effect }
\section{Introduction}
In medicine, we often want to measure the effect of a treatment, usually a drug or a medical intervention,
and in epidemiology, we often wish to measure the effect of exposure to some health hazard. Multiple studies that provide relevant data are often available which may then be pooled in meta-analyses. We consider the case of comparative binary outcome data, where for each study there are two patient groups and a binary event of interest, such as death. Interest then lies in determining which patient group is more likely to experience the event. Most studies of this type are parallel studies, with a control (or placebo) group and a treatment group. Occasionally, studies are paired, where each patient acts as their own control. Here we focus on parallel studies and the estimation of an appropriate treatment effect for the resulting series of $2 \times 2$ tables. We will use random-effects meta-analyses for this purpose, so that the possibility of between-study heterogeneity is included in our modelling. If the strong assumption of homogeneity is made then common-effect models may be used instead, which greatly simplifies the statistical methods required.
Although the $2 \times 2$ table is a particularly simple and common data structure, the issues relating to the analysis of this type of data are subtle. In particular, there are several issues that should be considered when determining an appropriate measure of treatment effect for this type of data. For example, there is an important distinction between relative and absolute measures (Deeks, 2002). The Cochrane handbook (Higgins and Green, 2011), its section 9.7,
under the heading of sensitivity analyses, asks `for dichotomous outcomes, should odds ratios, risk ratios or risk differences be used?'. We therefore have three conventional measures of treatment effect for performing meta-analyses involving comparative binary data but all of these measures can be difficult for lay audiences to interpret. For example, according to Davies {\em et al} (1998), `odds ratios are hard to comprehend directly', and Grimes and Schulz (2008) state that `for most clinicians, odds ratios will remain ... well, odd'. Risk ratios and differences are probably easier to interpret but Schechtman (2002) explains that these measures also have their disadvantages, where the problems stem from the fact that the same risk difference or risk ratio might have very different implications depending on the baseline risk. In order to make the risk difference more interpretable its reciprocal, the number needed to treat (Nuovo, Melnikow and Chang, 2002), has been proposed; if the estimated risk difference indicates that the treatment is not beneficial relative to the control then this measure is interpreted as the number needed to harm. The number needed to treat is a very appealing and intuitive measure for non-statisticians to interpret but has serious statistical difficulties (Hutton, 2002 and 2010). The poor statistical properties of the number needed to treat are a consequence of the fact that it is undefined under the null hypothesis where the probability of an event is the same in both groups.
Our aim here is to develop a new measure of treatment effect for comparative binary outcome data that, like the risk difference, takes values in the interval [-1, 1], and is easily interpretable by non-statisticians. The proposed measure will have a very simple and intuitively appealing interpretation, along the lines of the number needed to treat.
The effect will be zero under the null.
Minus one will indicate that no patients in the treatment group experience the event (but some in the control do), and plus one will indicate that all patients in the treatment group experience the event (but some in the control group do not). The probability that patients in the treatment group experience the event will be a monotonically increasing function of the treatment effect, for a given probability in the control group.
A related idea to ours is the proposal of Mirzazadah, Malekinejad and Kahn (2015), the `relative risk reduction of an undesirable outcome'. This is a simple transformation of the relative risk, which our measure generalises. The disadvantages of our measure are that it will necessarily be unfamiliar, and so appear strange to statisticians, and that it is not differentiable (but is continuous) at the null. However this is not a serious statistical difficulty in practice. Our hope is that our ideas could be used to make meta-analyses and systematic reviews, and indeed statistical analyses more generally, more accessible to those with little or no formal statistical training. We return to this issue in the discussion.
The rest of the paper is set out as follows. In section 2 we summarise existing measures, develop our new measure, describe three methods to compute its variance and develop an accurate approximation to its distribution. In section 3 we develop three random-effects models for meta-analysis that use our new measure. The first of these models simply uses the conventional random-effects model to describe the outcome data but the second two models are novel and are motivated by the desire to make more accurate inferences using our new measure. In section 4 we apply our new methods to three real meta-analyses. We conclude in section 5 with a short discussion.
\section{A new measure of treatment effect and its properties}
In this section we summarise the most popular existing measures of treatment effect for analyzing $2 \times 2$ tables, develop our new measure and describe its properties.
\subsection{Existing measures}
In this section we describe methods for a single $2 \times 2$ table and in section three we will develop methods for the random-effects meta-analysis of multiple tables.
For comparative binary data, a variety of measures of treatment effect $\theta$ are currently available.
We will use $\theta$ to denote the treatment effect, where the type of treatment effect that this refers to will be obvious from the context, and ultimately we will use $\theta$ to denote our new measure.
Let $p$ denote the probability of an event in the control group and let $q$ denote the probability of an event in the treatment group. All the measures that follow are suitable functions of $p$ and $q$. We will see below that our new measure is another such function, but one where a simple causal explanation can be used to communicate its meaning.
In the context of meta-analysis, Hartung, Knapp and Sinha (2008) also give an account of many of the established measures that follow, to which the reader is referred for more details.
\subsubsection{The odds ratio.}
A very popular measure of the relative treatment effect is the odds ratio, $\frac{q/(1-q)}{p/(1-p)}$. This, and all the quantities that follow, are estimated by replacing $p$ and $q$ with their estimates, $\hat{p}$ and $\hat{q}$, which are the observed proportions of patients that experience the event of interest in each group. Analyses are usually performed on the log scale so that the log odds-ratio, $\ln\{\frac{q/(1-q)}{p/(1-p)}\}$ is used in analysis. Inferences may then be back-transformed to the odds scale. The log-odds ratio is undefined when $p=0,1$ or $q=0,1$ and halves or some other quantity are usually added to all entries of the $2 \times 2$ table prior to analysis to avoid this problem when there are zeros. This also applies to the relative risk when either probability is zero. The odds ratio may take any non-negative value, and the log odds ratio may take any value $(-\infty,\infty)$. As explained in the introduction, the odds ratio is not an easily interpretable quantity for many consumers of statistical analyses, and its use is usually motivated by its good statistical properties and connections with other standard statistical methods, such as logistic regression.
The odds ratio is label-invariant and the log-odds ratio simply changes sign when `good' and `bad' outcomes are switched; we use the shorthand $p\rightarrow 1-p, q \rightarrow 1-q$ to indicate this change. Similarly the log-odds ratio switches sign when treatment and control groups are interchanged, for which we use $p \leftrightarrow q$. The invariance property of the log-odds ratio is another reason why it is often preferred over some of the measures that follow.
\subsubsection{The relative risk.}
The relative risk of an event $q/p$, and the relative risk of not experiencing the effect $(1-q)/(1-p)$, are also commonly used relative measures of a treatment effect. These measures are more easily interpretable than the odds ratio. As with the odds ratio, analyses are usually performed on the log risk scale.
The relative risk of the event and not experiencing the event are not the same, so the relative risk is not label-invariant when $p \rightarrow 1-p$, $q \rightarrow 1-q$, but is
invariant under $ p \leftrightarrow q$.
\subsubsection{The risk difference.}
The risk difference $q-p$ is an absolute measure of treatment effect, that can take values in the interval [-1,1]. It has the problem that if $q=p+\theta$, for some values of $p$, $q$ will lie outside $[0,1]$ for $\theta \in [-1,1]$. In order to make this measure more interpretable,
its reciprocal $1/(q-p)$, the number needed to treat, has been proposed. However as explained in the introduction, the number needed to treat has been criticised because of its poor statistical properties which stem from the fact that it is undefined under the null where $p=q$.
\subsubsection{The arcsine difference.}
The arcsine difference $\sin^{-1}\sqrt{q}-\sin^{-1}\sqrt{p}$ is a risk difference with the variances of $p$ and $q$ stabilised using a variance stabilising transformation. This measure has been proposed by R\"{u}cker {\em et al} (2009), particularly in situations where the event of interest is rare. The arcsine can take values in the interval $[-\pi/2,\pi/2]$.
\subsubsection{Families of measures of treatment effects.}
Jackson, Baker and Bowden (2013) propose a family of treatment effects of the form $T(q)-T(p)$ that includes many of the measures described above and can be used in a sensitivity analysis. The transformation $T$ used by Jackson {\em et al}. was inspired by the one proposed by Aranda-Ordaz (1981) that can also be used for this purpose.
\subsection{New measure: The `GRRR'}
One observation from section 2.1 is that a wide variety of measures of treatment effect have been proposed and used in analysis. In particular, some of these are routinely used in meta-analyses. Other than the number needed to treat, which has poor statistical properties, all measures are, for one reason or another, hard for lay audiences to interpret. In this section we develop another measure, the `Generalised Relative Risk Reduction', which gives rise to the whimsical acronym `GRRR'.
Our new measure has a very simple interpretation, as is also the case for the number needed to treat, but the GRRR has more acceptable statistical properties.
The key concept is that as $\theta$ increases from $-1$ one towards zero, an increasing (from zero) proportion of those who experience the event under the placebo would also experience this under treatment until $\theta=0$ when $q=p$. Also as $\theta$ subsequently increases towards unity, an increasing proportion of those not experiencing the event under the placebo would experience this under the treatment. When $\theta=1$, all patients in the treatment group experience the event.
We begin by considering the probability $q$ as a function of $p$ and the new measure $\theta$. We define $q=q(p, \theta)$ so as to ensure that $\theta$ represents a meaningful and easily interpretable quantity.
We will define our measure differently for $\theta < 0$ and $\theta > 0$ whilst ensuring that $\theta$ is easily interpreted in either case. We then put these definitions together to define our measure.
We require that $\theta=0$ is equivalent to $q=p$, and also that $\theta \in [-1,1]$ acts continuously on $q$, so that $\partial q/\partial \theta > 0$. Finally, we will ensure that $\theta= \pm 1$ represents the greatest possible treatment effects.
\subsubsection{The case where $q > p$.}
If $q > p$, so that the event is at least as likely in the treatment group as in the control (and is not certain in the control group) we define $q=p+\theta(1-p)$, where $0 < \theta<1$. This can be interpreted as meaning that, in addition to the proportion who experienced the event in the control group (and who would also have experienced the event if they were instead in the treatment group),
a further proportion $\theta$ of those who would not experience the event in the control group would have experienced this event if they were in the treatment group. For example, $\theta=0.6$ can be interpreted as meaning that 60\% of patients who do not experience the event in the control group would have experienced the event if they were in the treatment group.
This may be more easily understood as $1-q=(1-\theta)(1-p)$, so that the proportion $1-p$ not experiencing the event shrinks by a factor of $1-\theta$. When $\theta=1$ we have that $q=1>p$, so that the event is certain in the treatment group whilst not certain in the control group.
\subsubsection{The case where $q < p$.}
If $q < p$, so that the event is less likely in the treatment group (and is not certain in the treatment group) we define $q=(1+\theta)p$ where $-1<\theta<0$. This can be interpreted as meaning that, if $\theta$ is negative, a proportion $1+\theta$ of those who would experience the event in the control group would also experience the event if they were in the treatment group (and all those who would not experience the event in the control group would also not experience the event if they were in the treatment group). For example, $\theta=-0.6$ can be interpreted as meaning that 40\% of patients who experience the event in the control group would also experience this event if they were in the treatment group. When $q=0$ and $p>0$ we have $\theta=-1$, so that $\theta=-1$ indicates that the event is impossible in the treatment group whilst being possible in the control group.
\subsubsection{The case where $q = p$.}
If $p=q$ then $\theta=0$ and we could take $q=p+\theta(1-p)$, as in the case where $q>p$, or $q=p(1+\theta)$, as in the case where $q<p$. We take the former option but this makes no material difference. Hence we arbitrarily use the definition for the case where $q > p$ to apply slightly more generally to $q \ge p$.
\subsubsection{Putting these three cases together and defining our new measure.}
The direct and easily interpretable nature of our new measure $\theta$ is now apparent, because it may be interpreted as simply modifying the response of a subset of the control group patients in order to produce the treatment group probabilities. We have used the causal language that the `event would have been different' for some easily identified proportions of patients in the control group `if they had instead been in the treatment group' when motivating our measure. However we will see below that the measure $\theta$ is just another suitable function of $p$ and $q$ that can be used to measure the treatment effect. Other explanations of why $p$ and $q$ take their values, and so result in a particular value of $\theta$, are of course also possible and more likely than the simple minded causal explanations that we have used to motivate the measure. However our intention is to use this causal explanation for lay audiences to explain one reason (of many) for the treatment effect observed. We are also able to communicate the uncertainty in our estimates using this language, as we demonstrate for some of our meta-analyses below.
Putting the three cases together, our proposed new treatment effect is defined by the function
\begin{equation}
q=\left\{\begin{array}{ll}
(1+\theta)p & \text{if $q < p$}\\
p+\theta (1-p)& \text{if $q \ge p$}.
\end{array}
\right. \label{eq:qdef}\end{equation}
Succinctly, the probability of an event in the treatment group is
\begin{equation}q=(1+\min(\theta,0))p+\max(\theta,0)(1-p),\label{eq:theta}\end{equation}
or $q-p=\min(\theta,0)p+\max(\theta,0)(1-p)$.
Writing $\min(\theta,0)=(\theta-|\theta|)/2,\, \max(\theta,0)=(\theta+|\theta|)/2$, we have the alternative form
\[q-p=\theta/2-|\theta|(p-1/2).\]
Solving (\ref{eq:qdef}) for $\theta$ gives rise to the definition of the GRRR of
\begin{equation}
\label{eq2}
\theta=\left\{\begin{array}{ll}
(q/p)-1 & \text{if ${q} < {p}$}\\
1-(1-q)/(1-p) & \text{if ${q} \ge {p}$}.
\end{array}
\right. \end{equation}
The nature of our generalised relative risk ratio is most evident from (\ref{eq2}): if ${q} < {p}$ then it is the relative risk minus one, and if ${q} \ge {p}$ then it is one minus the relative risk of not experiencing the event. Our measure is therefore a type of generalised relative risk ratio, hence its name. Although we motivated it without reference to risk ratios we can see now that it can be expressed directly in terms of them. Therefore our measure is closely related to methods that all statisticians will be familiar with, and is not such a radical departure from conventional methods as it may at first appear. More succinctly we can write
\begin{equation}{\theta}=\frac{{q}-{p}}{{p}+(1-2{p})H({q}-{p})},\label{eq:true}\end{equation}
where $H$ is the Heaviside step function, such that $H(x)=1$ if $x > 0$, else zero if $x < 0$, and $H(0)=1/2$; the case $p=q$ has been correctly specified as $\theta=0$ for all $p,q \in [0,1]$. Equation (\ref{eq:true}) appears to be an unusual candidate for a measure of treatment effect but it is a convenient way performing the calculation. We substitute the estimates $\hat{p}, \hat{q}$, which are just the observed proportions of events in each treatment group, into (\ref{eq:true}) to produce the estimated effect $\hat{\theta}$. Hence we can write
\begin{equation}
\label{eq2a}
\hat{\theta}=\left\{\begin{array}{ll}
(\hat{q}/\hat{p})-1 & \text{if ${\hat{q}} < {\hat{p}}$}\\
1-(1-\hat{q})/(1-\hat{p}) & \text{if ${\hat{q}} \ge {\hat{p}}$}.
\end{array}
\right. \end{equation}
and
\begin{equation}{\hat{\theta}}=\frac{\hat{q}-\hat{p}}{\hat{p}+(1-2\hat{p})H(\hat{q}-\hat{p})},\label{eq:est}\end{equation}
Although $\theta$ was motivated using risk ratios, the numerators in (\ref{eq:true}) and (\ref{eq:est}) are the true and estimated risk differences, respectively. Hence these forms shows how the proposed measure relates to this other well known measure of treatment effect.
A physical way to illustrate our measure, for those who think visually rather than numerically, could be to take a vessel such as a bottle with a long neck, the same length as the body, where in the neck the cross sectional area $p$ changes to $1-p$.
The total capacity of the bottle is a unit volume.
The body of the bottle is sunk into the ground, with the bottom at $-1$ and the top of the body at ground level, so that the top is at $+1$. Water is poured into the bottle, and the height of the water level is $\theta$.
The volume of water is $q$, so below ground level $q=(1+\theta)p$. At ground level, $q=p$ and $\theta=0$, and in the neck, $q=p+\theta(1-p)$.
The measure is shown in figure \ref{figa}.
\begin{figure}
\caption{\label{figa}
\label{figa}
\end{figure}
\subsubsection{Properties of the proposed measure }
The GRRR is `label-invariant', under $p\rightarrow 1-p, q \rightarrow 1-q$. This type of symmetry or label-invariance is thought desirable, because
without it conclusions would depend on whether we looked at the proportion of patients recovering, or the proportion not recovering. However, the widely used relative risk does not possess this property. Deeks (2002) however notes that the `natural' choice out of `good' or `bad' relative risk usually fits the data better, so this lack of invariance is not a serious problem in practice.
Our measure is not label invariant with regard to switching the treatment and control
labels, when $p \leftrightarrow q$. This is a consequence of motivating the measure by a consideration of simple causal implications of what would have happened to patients in the control group if they had instead have been in the treatment group: if we instead apply these causal implications to what would have happened to patients in the treatment group it is immediately obvious that
different inferences for $\theta$ will be made. In situations where one group is the `placebo' or `unexposed group, as is the case for all the examples in this paper, then it is much more natural to ask the question of what would have happened if patients in this group were treated or exposed, rather than ask this question `the other way round'.
However in situations where two treatments `A' and `B' are compared there is no such natural treatment ordering. To help the lay audience understand the relative merits of two such treatments, it may therefore be useful to present two analyses, the first where `A' is the control and `B' is the treatment, and then vice versa. The results can then be phrased by explaining the implications of the treatments if some patients in group A were instead in group B, and then also if some patients were in group B instead of group A.
For showing that the treatments do not give identical results, a label-invariant significance test could be performed.
The GRRR is not differentiable at $\theta=0$. This is a direct consequence of the fact that it is defined differently for positive and negative values. This is an undesirable property but we have not found it to be a serious issue in practice. However this would be a problem for any further methods that, for example, involved taking a Taylor series expansion that $\theta=0$. In any case, this is a much less serious issue than the one presented by the GRRR's main competitor for an easily interpreted measure of treatment effect, the number needed to treat, which is undefined when there is no treatment effect.
\subsection{Computing the variance of the estimated treatment effect}
In order to make inferences using the proposed measure using a normal approximation (for example when using the conventional random-effects model for meta-analysis, see our method 1 below) we need methods for calculating the variance $\sigma^2$ of $\hat{\theta}$. We use the notation
in table \ref{tab:1}, so that for example we can write $\hat{p}=n_{11}/N_1$, $\hat{q}=n_{12}/N_2$. We propose three different ways to compute $\sigma^2$.
\begin{table}[h]
\begin{tabular}{|l|l|l|} \hline
event$\downarrow$,group$\rightarrow$& Control&Treatment \\ \hline
Yes & $n_{11} $ & $n_{12} $ \\ \hline
No & $n_{21} $ & $n_{22} $ \\ \hline
Total&$N_1$ & $N_2$ \\ \hline
\end{tabular}
\caption{\label{tab:1}Notation for $2 \times 2$ tables; columns are the group, rows the event, e.g.\xspace successful or unsuccessful.}
\end{table}
\subsubsection{Analytical calculation of the variance.}
For typical study sizes available,
complete enumeration is simplest and most accurate. R\"ucker {\it et al} (2009) also consider this possibility for the arcsine difference, and refer to this as `analytical calculation of the variance' and we adopt their terminology here. Writing $P_i$ to denote the binomial probability of $i$ control group responses, and $Q_j$ to denote the probability of $j$ treatment group responses, with
$P_i={N_1 \choose i}\hat{p}^i(1-\hat{p})^{N_1-i}$,
$Q_j={N_2 \choose j}\hat{q}^i(1-\hat{q})^{N_2-j}$, we have
\begin{equation}\text{E}(\theta^m)=\sum_{i=0}^{N_1}\sum_{j=0}^{N_2} \theta_{ij}^m P_iQ_j,\label{eq:var}\end{equation}
where $\theta_{ij}$ is the value of $\theta$ corresponding to $\hat{\hat{p}}=i/N_1$, $\hat{\hat{q}}=j/N_2$. From these calculations $\sigma^2=\text{E}(\theta^2)-(\text{E}(\theta))^2$.
For most sample sizes encountered in practice, it is currently perfectly feasible to compute (\ref{eq:var}). The most efficient and robust way is to compute probabilities recursively, starting at the mode and continuing both up and down to very low probabilities. The correct scale factor for the probabilities is found by requiring that they sum to unity.
As is the case with the next two methods, this approach provides only an approximate variance because it `plugs in' the point estimates $\hat{p}$ and $\hat{q}$ instead of using the true (unknown) values $p, q$.
\subsubsection{Monte Carlo calculation of the variance.}
An alternative method is to use parametric bootstrapping,
where we simulate many binomial realizations from $P_i$ and $Q_j$, calculate $\hat{\theta}$ and the
variance of these bootstrap replications gives the required variance.
This method is attractive in situations where $N_1$ and/or $N_2$ are large, so that the enumeration
required in the previous method is less feasible.
\subsubsection{An approximate formula for the variance.}
The appendix give an approximate formula for the variance $\sigma^2$. This is based on the usual Taylor series expansion and normal approximation for the logged relative risk. The resulting formula requires use of the standard normal cumulative distribution but is very fast to compute and has been found to work very well when $0.1 < p < 0.9$, $0.1 < q < 0.9$, and $N_1 > 100, N_2 > 100$. The analysis in the appendix treats the cases where $q>p$ and $q<p$ separately, so that the non-differentiability at $\theta=0$ does not result in any difficulties for the approximations used.
\subsection{A split lognormal approximation for the distribution of $\hat{\theta}$}
A normal approximation for $\hat{\theta}$ can be used directly for making inferences about $\theta$ and can be anticipated to be adequate in situations where such an approximation for the risk difference is also reasonable (large samples, $p$ and $q$ both not close to zero or one). Any of the three methods for computing the variance of $\hat{\theta}$ described above could be used in this approximation, so that we approximate $\hat{\theta} \sim N(\theta, \sigma^2)$ where $\sigma^2$ is treated as if fixed and known.
Those who might be reluctant to use a normal approximation for $\hat{\theta}$ because it is also defined in terms of two estimated rate ratios in equation (\ref{eq2a}) (for which analysis is usually performed on the log scale) would probably be more willing to use normal approximations for sample proportions, and so be willing to approximate the distribution of $\hat{\theta}$ using ratios of two normal distributions. Then, as Marsaglia (2006) points, out `approximations show that many of the ratios of normal variates encountered in practice can themselves be taken as normally distributed'. Hence normal approximations for $\hat{\theta}$ are not necessarily immediately unacceptable.
However, we have found by simulating $2 \times 2$ tables that the distribution of $\hat{\theta}$ often contains a skew tail. Hence normal approximations made directly for $\hat{\theta}$ are only very crude and a better approximation for the sampling distribution of $\hat\theta$ is desirable. Here we develop such an approximation using a split lognormal distribution. This approximation will be used in our third model for random-effects meta-analysis (model 3) below. Our split lognormal approximation simply uses conventional normal approximations for the log risk ratios in (\ref{eq2a}) and then puts them together to approximate the distribution of $\hat{\theta}$.
Using the delta-method, we have approximately
\begin{equation}\ln(\hat{q}/\hat{p}) \sim N(\mu_1,\sigma_1^2)=N\left(\ln(q/p), \frac{1-q}{qN_2}+\frac{1-p}{pN_1}\right),\label{eq:a1}\end{equation}
using the notation in Table \ref{tab:1},
i.e.\xspace $\ln(\hat{q}/\hat{p})$ is normally distributed with known mean and variance. Similarly, take $\ln ((1-\hat{q})/(1-\hat{p}))$ as
\begin{equation}\ln ((1-\hat{q})/(1-\hat{p})) \sim N(\mu_2,\sigma_2^2)=N\left(\ln((1-q)/(1-p)), \frac{q}{(1-q)N_2}+\frac{p}{(1-p)N_1}\right).\label{eq:a2}\end{equation}
These approximations work surprisingly well, and we use them both, so that we approximate
\begin{equation}
\label{eq:app}
X=\left\{\begin{array}{cl}
~\ln(1+\hat{\theta}) \sim N\left(\mu_1,\sigma_1^2\right) & \text{if $\hat{\theta} < 0$}\\
-\ln(1-\hat{\theta}) \sim N\left(-\mu_2,\sigma_2^2\right) & \text{if $\hat{\theta} \ge 0$}.
\end{array}
\right. \end{equation}
Our definition of $X$ in (\ref{eq:app}) ensures that $X$ is increasing in $\hat{\theta}$, so that our parameterisation makes computation as easy as possible.
When using the approximation for $X$, and so $\hat{\theta}$, in (\ref{eq:app}), we follow the usual convention of taking the variances $\sigma_1^2$ and $\sigma_2^2$ as known but we estimate them using $\hat{p}$ and $\hat{q}$ in practice. To use (\ref{eq:app}) in order to specify the approximate distribution of $\hat{\theta}$ in terms of ${\theta}$, all that is then required is to write $\mu_1$ and $\mu_2$ as functions of $\theta$. It is straightforward to write $\mu_1 = \ln(1+{\theta})$ when $\theta <0$, and $\mu_2 = \ln(1-{\theta})$ when ${\theta} \ge 0$. However it is not so straightforward to write $\mu_1$ and $\mu_2$ as functions of $\theta$ when $\theta \ge 0$, and $\theta <0$, respectively, because then $\mu_1$ and $\mu_2$ are not directly specified by $\theta$. In the appendix we explain how to overcome this difficulty, so that the probability density function of $\hat{\theta}$ can be written in terms of $\theta$ when using the split normal approximation. We have found that our split lognormal approximation is able to capture the skew tail of the distribution of $\hat{\theta}$, and so is in general much more accurate than a crude normal approximation.
We also explain how a variety of other inferences can be made for a single $2 \times 2$ table using the split normal approximation in the appendix, including the computation of confidence intervals and p-values. However for the purposes of using the split normal approximation in meta-analysis (model 3, below) we require only the probability density function of $\hat{\theta}$ as a function of $\theta$, so that likelihoods can be computed and the usual asymptotic theory of maximum likelihood applied.
\section{Random-effects meta-analysis}
We now present three different methods for performing random-effects meta-analyses using our measure. These three contrasting methods are all 2-stage methods because we require the computation of study specific data in the first stage that are then pooled in the second stage. For method 1 (the conventional random-effects model), in the first stage we compute the $\hat{\theta}_i$ and $\sigma_i^2$ (where the $\sigma_i^2$ may be computed using any of the above three approaches) and we pool these outcome data in stage two in the usual way. For method 2 we use the same outcome data (the $\hat{\theta}_i$ and $\sigma_i^2$) but avoid using a normal distribution, in order to avoid concerns about using this distribution for our measure that is constrained to lie within [-1,1]. Finally for method 3 we use the split-normal distribution to describe the within-study distributions and we include non-normal random-effects. When using method 3 in the first stage we compute the $\hat{\theta_i}$, and the variances $\sigma_{1i}^2$ and $\sigma_{2i}^2$, required in the study specific split lognormal within-study approximations from (\ref{eq:app}). In principle 1-stage meta-analyses (e.g.\xspace Simmonds and Higgins 2006), that avoid within-study approximations and perform the analysis in a single stage, are possible and we return to this possibility in the discussion.
\subsection{Method one: The conventional random-effects method\label{meth1}}
This method is the simplest and most direct method: the study-specific outcome data, $\hat{\theta}_i$ and $\sigma^2_{i}$ are computed and used directly as outcome data in the random-effects model for meta-analysis $\hat{\theta}_i \sim N({\theta}, \sigma^2_{i} + \tau^2)$.
We refer to it as the `direct method' in the next section.
Any of the above three methods described above in section 2.3 for computing the $\sigma^2_{i}$ could be used in conjunction with this conventional approach. An advantage of this method is that, having computed the $\hat{\theta}_i$ and $\sigma^2_{i}$, standard meta-analysis software packages can be used to perform the analysis. A variety of methods for estimating $\tau^2$ are available when using this standard approach (Veroniki {\em et al}, 2016).
An advantage of using the new measure in this way is that there is no need to add $1/2$, or some other constant, to all counts to prevent infinities in $\hat{\theta}_i$ and $\sigma^2_{i}$. If both $p_i$ and $q_i$ are estimated as zero or unity, $\hat{\theta}_i$ would be zero as would its variance, and the study would be discarded; this is commonly done with 2-stage methods with conventional measures such as the odds ratio.
A potential problem here is that assuming a normal distribution for $\hat{\theta}_i$ is not especially appropriate, partly because $\hat{\theta} \in [-1,1]$, as discussed above. However standard random-effects meta-analyses are often performed using the risk difference, where this is also an issue but is not considered to be a sufficient concern to avoid this approach.
The next two methods address this problem.
\subsection{Method two: a random-effects model using the beta distribution\label{meth2}}
The $\hat{\theta}_i$ lie in the interval [-1,1]. In order to use the beta distribution to model these estimates, we model the transformed outcome data $\hat{\psi}_i=(1+\hat{\theta}_i)/2$ so that $\hat{\psi}_i \in [0,1]$. Specifically, the observed $\hat{\psi}_i$ is taken as a random variable from the beta distribution with mean $\psi=(1+\theta)/2$ and variance $(\sigma_i^2+\tau^2)/4$, where $\hat{\theta}_i$ and $\sigma_i^2$ are the same outcome data as in method one. Larger studies contribute more weight to the analysis via their smaller $\sigma_i^2$, as in the more conventional method above.
Thus the contribution to the likelihood from a study is
\[{\cal L}_i(\theta,\tau)=\hat{\psi}_i^{\alpha_i-1}(1-\hat{\psi}_i)^{\beta_i-1}/B(\alpha_i,\beta_i),\]
where $B$ denotes the beta function. Here $\alpha_i$ and $\beta_i$ are the parameters of the beta distribution that models $\hat{\psi}_i$, and so we must parameterise $(\alpha_i,\beta_i)$ in terms of the mean $\psi=(1+\theta)/2$ and variance $(\sigma_i^2+\tau^2)/4$ corresponding to the $i$th study. Hence we take $(\alpha_i,\beta_i)$ to be the values that correctly provide these two moments of $\hat{\psi}_i$. This reparamerization of $(\alpha_i,\beta_i)$ to $\psi=(1+\theta)/2$ and $(\sigma_i^2+\tau^2)/4$ is easily performed and is described in the appendix.
The likelihood function is then the product of the study specific ${\cal L}_i(\theta, \tau)$ and approximate
inference is performed using the asymptotic theory of maximum likelihood.
Note that here we must add halves, or some other quantity, when there are zero counts or we
will obtain zero probabilities under the beta distribution.
The transformation $\hat{\psi}_i=(1+\hat{\theta}_i)/2$ is very simple so that inferences are easily back-transformed to the original scale.
\subsection{Method three: a random-effects method using the split-lognormal approximation\label{meth3}}
Here the split lognormal approximation, from equations (\ref{eq:a1}), (\ref{eq:a2}) and (\ref{eq:app}) is used to model the within-study distributions. As explained in section 2.4, we can specify the approximation in terms of the GRRR. We can therefore apply this approximation to each study, and in terms of its study specific true underlying effect $\theta_i$. If we assume a common-effect model ($\tau^2=0) $, so that $\theta_i=\theta$ for all $i$, inference is easily formed using maximum likelihood as for the previous model where the likelihood function is the product of the ${\cal L}_i({\theta_i}) = {\cal L}_i({\theta})$. Larger studies contribute more weight to the analysis via their smaller $\sigma_{1i}^2$ and $\sigma_{2i}^2$ used in their study specific split lognormal approximation (equations \ref{eq:a1}, \ref{eq:a2} and \ref{eq:app}).
To include a random effect, and so fit a random-effects model, we take $\psi_i=(1+\theta_i)/2$ to have a beta distribution, with mean $\psi=(1+\theta)/2$ and variance $\tau^2/4$. Written in terms of $\psi_i$, the within-study likelihood ${\cal L}_i({\theta_i})$ is ${\cal L}_i({2\psi_i}+1)$. This within-study likelihood is then integrated over the distribution of $\psi_i$ in order to integrate out the random-effects in the usual way, so that
\[{\cal L}(\theta,\tau)=\frac{\int_0^1 {\cal L}_i(2\psi_i-1)\psi_i^{\alpha-1}(1-\psi_i)^{\beta-1}\,\mbox{d} \psi_i}{B(\alpha,\beta)}.\]
where this integration is performed numerically. As in method 2, approximate
inference is performed using the asymptotic theory of maximum likelihood. Here $\alpha$ and $\beta$ are chosen so that $\text{E}(\psi_i)=\psi=(1+\theta)/2$ and $\text{var}(\psi_i)=\tau^2/4$.
This reparameterisation is similar to the one used in method 2 and is also described in the appendix.
This is a similar approach to modelling the random effect as in the previous method. A conceptual difference is that in method 2 we used a beta distribution to model the $\hat{\psi}_i$ but
here we instead use this distribution to model the true underlying $\psi_i$; we assume that the $\psi_i$ follow a common distribution so that the same $\alpha$ and $\beta$ are used for all studies when computing the likelihood.
In this sense method 3 is computationally simpler, but it requires a separate numerical integration for every study when computing the likelihood. Hence method 3 is the most computationally expensive of the three methods that we propose,
but uses the most realistic model.
\section{Application to meta-analysis datasets}
In this section we will use 3 real examples to illustrate the use of our measure in practice. The first example involves thirteen randomized control trials from 1948 to 1976 on the prevention of tuberculosis using the BCG (Bacillus Calmette-Gu\'{e}rin) vaccine, with data given in Hartung, Knapp and Sinha (2008), but taken originally from Colditz {\em et al} (1994). The event of interest is contracting tuberculosis. The second example involves 22 trials of streptokinase following mycocardial infarction, given in Egger, Altman and Smith (2001).
Briefly, from 1959, 21 trials were carried out
to see whether streptokinase could reduce 6-month mortality from infarction; this was feasible because streptokinase can dissolve blood clots. Here the event of interest is death. The third example one of the eleven randomised control trials of lamotrigine (Ramaratnam, Panabianco and Marson, 2016), from 1989 to 2007, as an adjunctive therapy for the treatment of drug-resistant partial epilepsy. The outcome is a 50\% or more reduction in seizure frequency.
Table \ref{tab:40} shows the results from using all three methods described in section 3 on our main three examples using maximum likelihood estimation and the asymptotic theory of maximum likelihood to make inferences.
The within-study variance was computed exactly (by enumeration) for methods 1 and 2.
Table \ref{tab:dsl} shows the results using the two stage method described in section \ref{meth1},
instead using the the Dersimonian and Laird (1986) method. In Table \ref{tab:dsl} the quoted $\hat{\tau}$ is the square root of the corresponding DerSimonian and Laird estimate $\hat{\tau}^2$. It can be seen that the results using the more conventional DerSimonian and Laird method are very similar to those using our `direct' method.
\begin{table}[h]
\begin{tabular}{|l|c|l|l|l|l|l|} \hline
Analysis method&Dataset & $\hat\theta$ & s.e. & $\hat{\tau}$ & s.e. \\ \hline
2-stage Direct (sec \ref{meth1})&TB &-.496&.088&.292&.066\\ \hline
2-stage Beta (sec \ref{meth2})&TB & -.489&.083&.270&.061\\ \hline
2-stage Lognormal (sec \ref{meth3})&TB & -.505&.075&.239&.053\\ \hline
2-stage Direct (sec \ref{meth1})&Strept&-.170&.045&.149&.043\\ \hline
2-stage Beta (sec \ref{meth2})&Strept&-.174&.045&.152&.041\\ \hline
2-stage Lognormal (sec \ref{meth3})&Strept&-.200&.023&0&0\\ \hline
2-stage Direct (sec \ref{meth1})&Lamot&~.201&.036&.078&.031\\ \hline
2-stage Beta (sec \ref{meth2})&Lamot&~.200&.036&.031&.060\\ \hline
2-stage Lognormal (sec \ref{meth3})&Lamot&~.132&.030&.049&.046 \\ \hline
\end{tabular}
\caption{\label{tab:40}Results for the new measure applied to three examples, using all three methods.}
\end{table}
\begin{table}[h]
\begin{tabular}{|l|c|l|l|l|l|l|} \hline
Analysis method &Dataset & $\hat\theta$ & s.e. & $\hat{\tau}$ & $I^2$\% \\ \hline
2-stage Direct (sec \ref{meth1})&TB &-.493 & .102&.345&97.6\\ \hline
2-stage Direct (sec \ref{meth1})&Strept&-.168&.041&.133&63.0\\ \hline
2-stage Direct (sec \ref{meth1})&Lamot&.202&.033&0&0\\ \hline
\end{tabular}
\caption{\label{tab:dsl}Results for the new measure applied to parallel studies, using the Dersimonian and Laird method
and assuming a normal distribution for $\hat\theta$.}
\end{table}
The outcomes are harmful (death and contracting tuberculosis) in our first two examples and beneficial (reduction in seizure frequency) in our third example. Hence $\theta<0$ indicates treatment benefit in the first two examples and $\theta>0$ indicates treatment benefit in the third example. Using the results in Table \ref{tab:40}, and normal approximations for the maximum likelihood estimates, we infer that the treatment is beneficial in all three examples. Our use of the GRRR then allows us to communicate these findings to a lay audience in a very simple and direct way. For example, let us take $\hat{\theta}=-0.5$ from Table \ref{tab:40} for our first example (TB). We are then able to tell a lay audience that one way to explain the extent of the treatment efficacy is to say that we estimate that 50\% (i.e. half) of the population who do not take the vaccine and contract TB would also contract TB if they instead took the vaccine (where we assume that all those who would not contract TB without taking the vaccine also would not contract this if they
took the vaccine). In other words, we estimate that around half the population who do not take the vaccine and contract TB would instead avoid contracting this if they had taken the vaccine. Statements such as these nicely convey the notion that the vaccine has real benefit (but is not perfect) in a simple way, whilst being statistically principled. We can also quantify the uncertainty in this statement. From a 95\% confidence interval for $\theta$ this, the 50\% that we quoted could in fact be between around 30\%-70\%.
As another illustration, let us take $\hat{\theta}=0.2$ from Table \ref{tab:40} for our third example. We are then able to tell a lay audience that one way to explain the extent of the treatment efficacy is to say that, in addition to those who experience notable seizure frequency in the reduction without the lamotrigine (and would also experience this if they took this treatment), a further 20\% of those who do not experience notable seizure frequency without lamotrigine would instead experience this if they took the treatment. However there is uncertainty in this estimate and (from a 95\% confidence interval for $\theta$) this percentage could be between around 10\%-30\%. Again, these statements clearly convey the potential benefit of taking the treatment in an especially direct and transparent manner.
A final point is that analysts may be reluctant to use our proposed measure in analysis because it is unconventional, but may wish to convert results using other measures to it, so that explanations such as these can be given. This conversion can be performed upon adopting a representative baseline risk $p$ for the control group and we give full details of the calculation required in the appendix for converting the odds ratio in this way. We return to this issue in the discussion.
\section{Discussion}
A new treatment effect measure (generalized relative risk reduction, or `GRRR'), based on relative risk, has been introduced.
The new measure gives a treatment effect on a scale from minus one to plus one, with zero indicating no treatment effect.
There is a clear causal interpretation that accompanies the new measure and that can be used to communicate the results to those with little or no formal statistical training. Those who are faced with explaining the findings from meta-analyses, and statistical analyses more generally, to the general public are likely to find our new measure especially useful.
This may include journalists and politicians as well as clinicians. Health economists may also find this measure useful,
as costs of using or not using a new intervention are straightforward to calculate. We suggest that interpretations using our measure could be included in `plain language summaries' that accompany Cochrane reviews and other information sources that are intended for a wide audience. For the `take-home' messages from statistical analyses to be fully appreciated by the general public we require methods such as those that we present here. We hope that, at the very least, our methods will provide further ideas for communicating statistical findings in a simple and direct, and yet still statistically principled, manner that is widely accessible.
We have developed three methods for performing 2-stage random-effects meta-analysis that use our new measure.
We therefore have proof of concept that it may be used in conjunction with quite sophisticated statistical models. Future work could focus on other types of models where binary outcome data are modelled, such as logistic regressions and generalised linear mixed models. For example, regression modelling could be carried out by allowing $\psi =(1+\theta)/2\in [0,1]$ to be
a logistic function of covariates. These possibilities for more complex modelling include 1-stage methods for random-effects meta-analysis and the authors have developed two further methods of this type. These 1-stage methods have been found to produce similar results to the 2-stage methods presented here and may form the subject of future work. We have also performed a small scale empirical investigation to determine if the proposed measure results in better model fits than the more conventional (log) odds ratio. Further investigation is needed but our preliminary investigation suggests that models using our measure describe real meta-analysis datasets just as well as more conventional measures of treatment effect.
Rather than motivate our new measure as providing better fitting models to data, we have proposed it primarily so that the resulting statistical inferences can be more easily communicated to general audiences. We suggest therefore that it is particularly suited to providing results that could be communicated in plain language summaries such as those used by Cochrane. It is most straightforward to report results using our measure after actually using our measure as the outcome in analysis, but we suspect that many analysts would object to this idea unless our measure becomes more widely used and accepted. We would encourage analysts who might be uncomfortable in using our new measure in analysis to consider converting their results using a measure of their choice to ours, so that conclusions using our measure can be reported despite the fact that an alternative measure was used in analysis.
A classical (frequentist) approach to statistical inference has been adopted here but the likelihood-based methods can also be used for Bayesian inference.
Prior distributions for all parameters would then be needed.
The main difficulty is determining a suitable prior distribution for $\theta$,
and the beta distribution for $\psi$ is an obvious candidate. It can be used when there is a lot of prior information, and also includes as special cases the uniform and Jeffreys priors.
Markov chain Monte Carlo (MCMC) would probably be used to perform analyses because the resulting posterior distributions are unlikely to be analytically tractable.
The only other candidate measure of treatment effect for $2 \times 2$ tables that is so easily interpretable is the number needed to treat. However this measure has unacceptable statistical properties. We suggest that the GRRR is suitable as a replacement for this measure as it is both easily interpretable and
has acceptable statistical properties. The GRRR has however two undesirable properties: it is not invariant when the treatment and control groups are interchanged, as we have explained, and furthermore $\theta$ is not differentiable when $\theta=0$. This latter property has not caused us any problems here but this could result in difficulties for the unwary. At the very least, our proposed measure has much better properties than the number needed to treat, which we regard as its main contender for an easily interpreted and intuitively appealing measure of treatment effect for comparative binary outcome data.
To summarise, we hope that meta-analysts, and indeed the the statistics community more generally, will be convinced by the case for our new measure of treatment effect, and that they will find it to be a useful new way to measure and communicate the results from comparative trials that involve a binary event of interest. We also hope that our work will serve to stimulate debate about the best way to communicate statistical conclusions to those with little or no formal statistical training.
\section{Appendix: detailed formulae}
\subsection{The split-lognormal distribution}
In section 2.4 of the main paper we explain that it is possible to write the probability density function (pdf) of $\hat{\theta}$ in terms of $\theta$. In this section of the appendix we give full details about how this is done.
To derive an approximation for the sampling distribution of $\hat\theta$, using the delta method and assuming normality of the logged risk ratio, we take $\ln(\hat{q}/\hat{p})$ as
obeying (8) and $(1-\hat{q})/(1-\hat{p})$ as obeying (9), where these equations are in the main paper.
Then $\hat{q}/\hat{p}$ follows a lognormal distribution. Similarly, $(1-\hat{q})/(1-\hat{p})$ follows a lognormal distribution, and $\hat\theta$ is as defined in (10) via $X$.
From this approximation for $X$, and hence $\hat{\theta}$, the pdf and distribution function of $\hat{\theta}$ can be obtained. Hence
p-values, confidence intervals, and the moments of this distribution may also be computed.
\subsection{The probability density function and the cumulative distribution function of $\hat{\theta}$}
We can identify four cases from equation (10) and we evaluate the pdf for each case. Then the pdf is then defined for all cases. These four cases arise because $\hat{\theta}$ can take either sign (we use normal approximations and so the probability that $\hat{\theta}=0$ is zero), and $\theta$ can be of the same sign as
$\hat{\theta}$ or not. To see why we consider these four cases separately, consider the first line in the right hand side of (10), which applies to $\hat{\theta}<0$. If $\theta<0$, so that $\hat{\theta}$ and $\theta$ are both negative, then it is straightforward to write $\mu_1=\ln(1+\theta)$ and a normal approximation for $X$, and hence $\hat{\theta}$, is immediate for the combination of $\hat{\theta}<0$ and $\theta<0$. However if $\hat{\theta}<0$ and $\theta \ge 0$ then we no longer have $\mu_1=\ln(1+\theta)$ and the parameterisation of the normal approximation is not quite so immediate. Following a similar argument for the second line in the right hand side of (10), we can see that the difficulties occur when $\hat{\theta}$ and $\theta$ are not of the same sign, and so we adopt a `divide and conquer' approach of considering each case separately.
When $\hat{\theta} < 0$, $\theta < 0$, we have the
approximation $\ln(1+\hat{\theta}) \sim N[\mu_1=\ln(q/p)=\ln(1+\theta),\sigma_1^2]$, and so
the pdf
\[f(\hat{\theta})=\frac{\exp\{-(\ln(1+\hat{\theta})-\ln(1+\theta))^2/2\sigma_1^2\}}{\sqrt{2\pi\sigma_1^2}(1+\hat{\theta})}.\]
The corresponding distribution function is
\begin{equation}\Phi\{(\ln(1+\hat{\theta})-\ln(1+\theta))/\sigma_1\},\label{eq:phi1}\end{equation}
where $\Phi(\cdot)$ is the standard normal distribution function.
Here we have used the fact that $\mu_1=\ln(q/p)=\ln(1+\theta)$. However, when $\theta \ge 0$, $\mu_1$ is not specified by $\theta$.
This is because $\theta=1-(1-q)/(1-p)$ when $\theta \ge 0$ from which $q/p$ cannot be determined.
However, a substitute value can be found by requiring that the total probability is unity.
We therefore use the fact that
\[\text{Prob}(\hat{q}/\hat{p} > 1)=\Phi(\mu_1/\sigma_1)=\text{Prob}((1-\hat{q})/(1-\hat{p})) < 1=\Phi(-\mu_2/\sigma_2)\]
to obtain the useful result
\begin{equation}\mu_1/\sigma_1=-\mu_2/\sigma_2\label{eq:trick}\end{equation}
from which, because $\mu_1=\ln(q/p)$, the approximate mean of $\ln(1+\hat{\theta})$ for $\hat{\theta} < 0$ can be found
from $\mu_2=\ln((1-q)/(1-p))$, which is also equal to $\ln(1-\theta)$ when $\theta \ge 0$.
Using (\ref{eq:trick}), when $\hat{\theta} < 0$, $\theta \ge 0$
\[f(\hat{\theta})=\frac{\exp\{-(\ln(1+\hat{\theta})+(\sigma_1/\sigma_2)\ln(1-\theta))^2/2\sigma_1^2\}}{\sqrt{2\pi\sigma_1^2}(1+\hat{\theta})}.\]
The corresponding distribution function is
\[\Phi\{\ln(1+\hat{\theta})/\sigma_1+\ln(1-\theta)/\sigma_2\}.\]
Similarly, when $\hat{\theta} \ge 0$, we have for $\theta \ge 0$ that
\[f(\hat{\theta})=\frac{\exp\{-(\ln(1-\hat{\theta})-\ln(1-\theta))^2/2\sigma_2^2\}}{\sqrt{2\pi\sigma_2^2}(1-\hat{\theta})}.\]
The corresponding distribution function is
\begin{equation}\Phi\{(-\ln(1-\hat{\theta})+\ln(1-\theta))/\sigma_2\},\label{eq:phi3}\end{equation}
where the minus sign arises because $\ln(1-\hat{\theta})$ is a decreasing function of $\hat\theta$.
Finally for $\hat{\theta} \ge 0 $, $\theta < 0$
\[f(\hat{\theta})=\frac{\exp\{-(\ln(1-\hat{\theta})+(\sigma_2/\sigma_1)\ln(1+\theta))^2/2\sigma_2^2\}}{\sqrt{2\pi\sigma_2^2}(1-\hat{\theta})}.\]
The corresponding distribution function is
\[\Phi\{-\ln(1-\hat{\theta})/\sigma_2-\ln(1+\theta)/\sigma_1\}.\label{eq:phi4}\]
\subsubsection{P-values and confidence intervals}
At the end of section 2.4 of the main paper we explain that other inferences can be made for a single $2 \times 2$ table. In this section of the appendix we give full details of this.
First, p-values are given when $\theta=0$ so that $\mu_1=\mu_2=0$. The 1-sided p-value for obtaining $\hat\theta$ at least as large as observed when $\hat{\theta} \ge 0$ is $\Phi(\ln(1-\hat{\theta})/\sigma_2)$,
using (\ref{eq:phi3}) and the identity $1-\Phi(x)=\Phi(-x)$.
The 1-sided p-value for obtaining $\hat\theta$ at least as negative as observed when $\hat{\theta} < 0$ is $\Phi(\ln(1+\hat{\theta})/\sigma_1)$ from (\ref{eq:phi1}).
From (\ref{eq:phi1}) and (\ref{eq:phi3}) we obtain the formula for the corresponding 2-sided p-values, i.e.\xspace for $\hat\theta$ to exceed the observed $|\hat{\theta}|$ in either direction.
This is
\[p=\Phi(\ln(1-|\hat{\theta}|)/\sigma_1)+\Phi(\ln(1-|\hat{\theta}|)/\sigma_2)\]
from which we can see that the 2-sided p-value is 1 if $\hat{\theta}$ is exactly zero (this is impossible using normal approximations but could arise in real data.)
Confidence intervals for $\theta$ can be computed by equating quantiles of the pdf to the required values.
When $\hat{\theta} \ge 0$, from (\ref{eq:phi3}) and the corresponding pdf we obtain the size $\alpha$ limits for $\ln(1-\theta)$ as $\ln(1-\hat{\theta}) \pm \sigma_2 z_{\alpha/2}$,
where $z_\alpha$ is the $100\alpha$ percentile of the normal distribution. From this the limits for $\theta$ are
$\theta=1-(1-\hat{\theta})\exp(\pm \sigma_2 z_{\alpha/2}$).
However this calculation assumes that
$\hat{\theta} \ge 0$ and $\theta \ge 0$ but it may happen that lower limit is negative so that we have $\hat{\theta} \ge 0$ and $\theta < 0$
at the lower end of the confidence interval. In this case, the lower limit must be recomputed as
\[\theta=(1-\hat{\theta})^{-\sigma_1/\sigma_2}\exp(-\sigma_1 z_{\alpha/2} )-1.\]
This follows by using (\ref{eq:trick}) to deal with the issue that the signs of $\hat{\theta}$ and ${\theta}$ are not the same and proceeding in a similar way as when deriving the pdf and cumulative distribution function.
Similarly, when $\hat{\theta} < 0$, the confidence interval is $\theta=(1+\hat{\theta})\exp(\pm \sigma_1 z_{\alpha/2})-1$, unless the upper limit is positive, in which case it should be recomputed as
$\theta=1-(1+\hat{\theta})^{-\sigma_2/\sigma_1}\exp(\sigma_2 z_{\alpha/2})$.
\subsection{The variance of $\hat{\theta}$}
In order to use standard methods for meta-analysis we require within-study variances. In section 2.3.3. of the main paper we discuss an approximate formula for this. In this section of the appendix we derive this formula.
A large-sample approximation for the variance of $\hat\theta$ has been developed, and works very well when $0.1 < p < 0.9$, $0.1 < q < 0.9$, and $N_1 > 100, N_2 > 100$.
Because of the tractability of the lognormal distribution, one can calculate $A_n=\text{E}_L\{(\hat{q}/\hat{p})^n\}$, where the integral for the expectation
is truncated at $\hat{q}/\hat{p}=1$, so that the expectation is calculated over the range where $\hat{q} < \hat{p}$, i.e.\xspace
\[A_n=\frac{1}{\sqrt{2\pi \sigma_1^2}}\int_{0}^1 x^{n-1}\exp(-(\ln(x)-\mu_1)^2/2\sigma_1^2)\,\mbox{d} x.\]
This integral can be evaluated analytically by changing variable to $y=\ln(x)$, so that the integration is now performed over $(-\infty, 0)$, and completing the square in the exponent.
Similarly $B_n=\text{E}_R\{((1-\hat{q})/(1-\hat{p}))^n\}$ can be calculated as
\[B_n=\frac{1}{\sqrt{2\pi \sigma_2^2}}\int_{0}^1 x^{n-1}\exp(-(\ln(x)-\mu_2)^2/2\sigma_2^2)\,\mbox{d} x\]
where the integral for the expectation is truncated at $(1-\hat{q})/(1-\hat{p}) =1$, so that the expectation is calculated over the range where $1-\hat{q} \le 1-\hat{p}$, or $\hat{q} \ge \hat{p}$. Evaluating the integrals analytically gives
\[A_n=(q/p)^n\exp(n^2 \sigma_1^2/2)\Phi(-\mu_1/\sigma_1-n\sigma_1),\]
\[B_n=((1-q)/(1-p))^n\exp(n^2 \sigma_2^2/2)\Phi(-\mu_2/\sigma_2-n\sigma_2).\]
Hence we have one approximation for $\hat{q} < \hat{p}$ (the `left' side of the distribution), and a different approximation for $\hat{q} \ge \hat{p}$ (the right side).
From the definition of $\hat{\theta}$ we then have
\[\text{E}(\hat{\theta})=\text{E}_L(\hat{q}/\hat{p}-1)+\text{E}_R(1-(1-\hat{q})/(1-\hat{p}))=A_1-A_0+B_0-B_1,\]
\[\text{E}(\hat{\theta}^2)=A_2-2A_1+A_0+B_2-2B_1+B_0,\]
from which the variance of $\hat\theta$ can be computed as $\text{E}(\hat{\theta}^2) - \text{E}(\hat{\theta})^2 $, on replacing $p, q$ in the formula by $\hat{p}, \hat{q}$ respectively. Note that we have evaluated the expectation of $\hat{\theta}$ from its definition, where we have evaluated this expectation by integrating over the two areas of the sample space separately.
This approximation to $\sigma^2$ is surprisingly accurate.
It requires the computation of the normal distribution function 6 times.
\section{Reparameterising the beta distribution}
As explained in sections 3.2 and 3.3, to use methods 2 and 3 we need to specify the mean $\psi$ and variance $\sigma^2$ of a beta distribution, and then compute the usual beta function parameters $\alpha, \beta$
within the section of computer code that computes the log-likelihood function. We give the details here. For method 2 this mean is $\psi=(1+\theta)/2$ and the variance is $\sigma^2= (\sigma_i^2+\tau^2)/4$; for method 3 the variance is instead $\sigma^2=\tau^2/4$.
We have the standard result
\[\psi=\alpha/(\alpha+\beta),\]
\[\sigma^2=\frac{\alpha\beta}{(\alpha+\beta)^2(\alpha+\beta+1)}.\]
Hence $\sigma^2=\frac{\psi(1-\psi)}{\alpha+\beta+1}$, so that
\[\alpha=\psi\{\frac{\psi(1-\psi)}{\sigma^2}-1\},\label{eq:alpha}\]
\[\beta=(1-\psi)\{\frac{\psi(1-\psi)}{\sigma^2}-1\}.\label{eq:beta}\]
We must constrain our model parameters so that $\alpha$ and $\beta$ are positive in this reparameterisation. This could be a problem if the function minimiser chooses very large values of $\sigma^2$ and further reparameterisations could be used to make the numerical methods more robust.
\subsection{Converting to our new measure}
As explained at the end of section 4, we anticipate that some applied analysts may not be convinced by the case for using our measure in statistical analysis, but despite this will find the new measure to be an attractive option for communicating their findings to those with little or no formal statistical training. To use our proposed measure to communicate findings when alternative measures of treatment effect have been used in analysis, we need ways to convert other measures to ours. In this section we explain how to convert the odds ratio to the GRRR. The methods and issues are similar when converting other measures of treatment effect.
When converting from one measure of treatment effect to another, for example the odds ratio to the risk difference, we need to take a representative baseline risk $p$. This can be the average value of $p$ for the studies in the meta-analysis, either unweighted or weighted. We can then use the implied $q$ from one measure of treatment effect to compute the other measure that we wish to convert to. By taking into account the uncertainty in the first of these measures, we can communicate the uncertainty in the conversion.
Let us start by converting the odds ratio to a finite range. This can be done in several ways,
the simplest arguably being
\begin{equation}\phi=\frac{OR-1}{OR+1}=\frac{q-p}{p+q-2pq}.\label{eq:ortheta}\end{equation}
The measure in (\ref{eq:ortheta}), which itself is easily derived from the definition of the odds ratio, can then be converted into $\theta$ defined by equation (2) of the main paper. On eliminating $q$,
\begin{equation}\theta=\left\{\begin{array}{ll}
\frac{2(1-p)\phi}{1-\phi+2p\phi} & \text{if $\phi < 0$}\\
\frac{2p\phi}{1-\phi+2p\phi} & \text{if $\phi \ge 0$}.
\end{array}
\right. \label{eq:back}\end{equation}
so that the OR can easily be converted to $\phi$, which can then be converted to our measure $\theta$.
\end{document} |
\begin{document}
\title{Off-Policy RL Algorithms Can be Sample-Efficient for Continuous Control via Sample Multiple Reuse}
\begin{abstract}
Sample efficiency is one of the most critical issues for online reinforcement learning (RL). Existing methods achieve higher sample efficiency by adopting model-based methods, Q-ensemble, or better exploration mechanisms. We, instead, propose to train an off-policy RL agent via updating on a fixed sampled batch multiple times, thus reusing these samples and better exploiting them within a single optimization loop. We name our method \textit{sample multiple reuse} (SMR). We theoretically show the properties of Q-learning with SMR, e.g., convergence. Furthermore, we incorporate SMR with off-the-shelf off-policy RL algorithms and conduct experiments on a variety of continuous control benchmarks. Empirical results show that SMR significantly boosts the sample efficiency of the base methods across most of the evaluated tasks without any hyperparameter tuning or additional tricks.
\end{abstract}
\section{Introduction}
In recent years, the success of reinforcement learning (RL) has been witnessed in fields like games \cite{Mnih2015HumanlevelCT, Silver2016MasteringTG, Vinyals2019GrandmasterLI}, neuroscience \cite{Dabney2020ADC}, fast matrix multiplication \cite{Fawzi2022DiscoveringFM}, and nuclear fusion control \cite{Degrave2022MagneticCO}.
Online RL, different from batch RL \cite{Lange2012BatchRL}, defines the task of learning an optimal policy via continual interactions with the environment. The agent can generally explore (discover unseen regions) and exploit (use what it already knows) \cite{Sutton2005ReinforcementLA} the data due to the accessibility to the environment. Prior work explores many exploration methods for both discrete \cite{Ecoffet2020FirstRT, Burda2018ExplorationBR} and continuous control \cite{lillicrap2015continuous, Colas2017GEPPGDE} domains. With respect to the exploitation, off-policy deep RL algorithms are known to be more sample-efficient than on-policy methods, as they usually store past experiences and reuse them during training. Unfortunately, most of the off-policy deep RL algorithms, especially on continuous control domains, still need a vast number of interactions to learn meaningful policies. Such a phenomenon undoubtedly barriers the wide application of RL algorithms in real-world problems, e.g., robotics.
\begin{figure}
\caption{\textbf{Left:}
\label{fig:example}
\end{figure}
In this paper, we set our focus on continuous control domains. There are many efforts in enhancing the exploration capability of the off-policy RL algorithms by adding extra bonus reward \cite{Tang2016ExplorationAS, Fu2017EX2EW, Houthooft2016CuriositydrivenEI, Achiam2017SurpriseBasedIM}, leveraging maximum entropy framework \cite{ziebart2010modeling, Haarnoja2018SoftAO, haarnoja2018softactorcritic}, etc. Another line of research focuses on better exploiting the data. They achieve this by alleviating the overestimation bias in value estimate \cite{Fujimoto2018AddressingFA, Lee2020SUNRISEAS, Kuznetsov2020ControllingOB, Efficient2022Lyu}, using high update-to-data (UTD) ratio \cite{Chen2021RandomizedED, Hiraoka2021DropoutQF}, adopting model-based methods \cite{Janner2019WhenTT, Lai2020BidirectionalMP, Pan2020TrustTM, wuplan2022}, etc. Nevertheless, these advances often involve complex components like ensemble. We wonder: \textit{is it possible to design a simple method that can universally better exploit data and improve sample efficiency?}
To this end, we propose \textit{sample multiple reuse} (SMR), where we update the actor and the critic network multiple times on the fixed sampled batch data, as shown in Figure \ref{fig:example}. By doing so, the networks can better fit and exploit the batch data (as depicted in Figure \ref{fig:smriteration}). We deem that every collected sample from online interaction is valuable and is worth being utilized more times during training. SMR is general and can be combined with \textit{any} off-the-shelf off-policy continuous control RL algorithms by modifying only a few lines of code.
To illustrate the rationality and benefits of SMR, we combine it with Q-learning and propose Q-SMR algorithm. We theoretically analyze the convergence property of Q-SMR in the tabular case. We empirically show that Q-SMR exhibits stronger sample efficiency than vanilla Q-learning. We then combine SMR with five typical continuous control RL algorithms and run experiments on four tasks from OpenAI Gym \cite{Brockman2016OpenAIG}. We combine SMR with SAC \cite{Haarnoja2018SoftAO} and extensively evaluate SAC-SMR on two additional continuous control benchmarks, yielding a total of 30 tasks. Across most of the evaluated tasks, we observe improvement in sample efficiency over the base algorithms, often by a large margin (as shown in Figure \ref{fig:example}). The empirical results reveal that SMR is very general and can improve the sample efficiency of different algorithms in a variety of tasks.
To ensure that our proposed method is reproducible \cite{Islam2017ReproducibilityOB, henderson2018deep}, we include the anonymous code in \href{https://anonymous.4open.science/r/SMR-F3F2}{https://anonymous.4open.science/r/SMR-F3F2}, and evaluate the experimental results across fair evaluation metrics.
\begin{figure}
\caption{The key idea illustration of sample multiple reuse. The {\color{blue}
\label{fig:smriteration}
\end{figure}
\section{Preliminaries}
Reinforcement learning (RL) aims at dealing with sequential decision-making tasks. It can be formulated as a Markov decision process (MDP) defined by a tuple $\langle \mathcal{S}, \mathcal{A}, r, p, \gamma \rangle$. $\mathcal{S}$ is the state space, $\mathcal{A}$ is the action space, $r:\mathcal{S}\times\mathcal{A}\mapsto\mathbb{R}$ is the scalar reward signal, $p(\cdot|s,a)$ is the dynamics transition probability, and $\gamma\in[0,1)$ is the discount factor. In online RL, the agent can continually interact with the environment by following a policy $\pi:\mathcal{S}\mapsto\mathcal{A}$. The goal of the agent is to maximize the expected discounted long-term rewards, i.e.,
\begin{equation}
\max J(\phi) = \mathbb{E} \left[ \sum_{t=0}^\infty \gamma^t r(s_t,a_t) \bigg| s_0, a_0; \pi \right].
\end{equation}
A policy is said to be \textit{stationary} if it is time-invariant. The state-action value function (also $Q$-function) $Q^\pi:\mathcal{S}\times\mathcal{A}\mapsto\mathbb{R}$ given a policy $\pi$ is defined by
\begin{equation}
Q^\pi(s,a) = \mathbb{E}_\pi \left[ \sum_{t=0}^\infty \gamma^t r(s_t,a_t) \bigg| s_0=s, a_0=a\right].
\end{equation}
The optimal $Q$-function $Q^*$ is the unique fixed point of the Bellman operator $\mathcal{T}Q$, which is given by:
\begin{equation}
\mathcal{T}Q(s,a) := r(s,a) + \gamma\mathbb{E}_{s^\prime\sim p(\cdot|s,a)} [ \max_{a^\prime\in\mathcal{A}}Q(s^\prime,a^\prime)].
\end{equation}
A typical off-policy RL algorithm is Q-learning \cite{Watkins1992Qlearning}. It aims at learning the optimal $Q$-function and updates its entry via the following rule:
\begin{equation}
\label{eq:qlearning}
Q_{t+1}(s,a) = (1-\alpha_t)Q_t(s,a) + \alpha_t (r_t + \gamma\max_{a^\prime\in\mathcal{A}}Q_t(s^\prime,a^\prime)),
\end{equation}
where $\alpha_t$ is the learning rate at timestep $t$.
\section{Why Not Reuse Your Data More?}
In online deep RL, it is a common practice that we sample a mini-batch in a bootstrapping way from the replay buffer, where the past experience is stored, for training the RL agent. However, existing off-policy RL methods only evaluate \textit{once} upon the sampled transitions, which is a waste since they fail to better exploit the collected valuable samples.
We remedy existing off-policy RL algorithms by reusing the sampled batch data more times. Our key intuition and motivation lie in the fact that it is hard for the neural network to well-fit and well-evaluate the sampled batch with just one glance (check Figure \ref{fig:smriteration}). With more updates on the sampled batch, the network can better adapt to the sample distribution, in conjunction with a more reliable evaluation upon them. We name our method \textit{sample multiple reuse} (SMR), which can be combined with \textit{any} off-policy RL algorithms. We first combine our method with vanilla Q-learning \cite{Watkins1992Qlearning}, yielding the Q-SMR algorithm as depicted in Algorithm \ref{alg:q-smr}. We further define the number of iterations $M$ as the SMR ratio, which measures the fixed batch reusing frequency of the agent. Empirically, Figure \ref{fig:q-smrcliff} illustrates the superior sample efficiency of our proposed Q-SMR algorithm against vanilla Q-learning in the tabular case, where a fixed $M=10$ is utilized for the Q-SMR. In both the classical cliff-walking environment and a maze environment, Q-SMR is able to learn faster and converge faster.
\begin{minipage}{0.48\textwidth}
\begin{algorithm}[H]
\centering
\caption{Q-SMR}\label{alg:q-smr}
\begin{algorithmic}[1]
\STATE Set learning rate sequence $\{\alpha_t\}$, number of iterations $T$.
\STATE Initialize $Q(s,a)$ table with 0.
\FOR{$t$ = 1 to $T$}
\STATE Choose action $a$ derived from $Q$, e.g., $\epsilon$-greedy, and observe reward $r$ and next state $s^\prime$.
\color{red}
\FOR{$m$ = 1 to $M$}
\STATE Update $Q_t$ according to Equation \ref{eq:qlearning}.
\ENDFOR
\ENDFOR
\end{algorithmic}
\end{algorithm}
\includegraphics[width=\linewidth]{figures/qlearning.pdf}
\captionof{figure}{Comparison of Q-SMR and Q-learning on CliffWalking-v0 and maze-random-20x20-plus-v0 tasks from Gym \cite{Brockman2016OpenAIG}. The results are averaged over 20 independent runs, and the shaded region is the standard deviation.}
\label{fig:q-smrcliff}
\end{minipage}
\begin{minipage}{0.47\textwidth}
\begin{algorithm}[H]
\centering
\caption{Off-policy actor-critic with SMR}\label{alg:ac-smr}
\begin{algorithmic}[1]
\STATE Initialize critic network parameter $\theta$, actor network parameter $\phi$ with random parameters.\\
\STATE Initialize target critic network parameter $\theta^\prime\leftarrow \theta$.
\STATE Initialize empty replay buffer $\mathcal{B}=\{\}$. \\
\STATE (Optional) Initialize target actor network parameter $\phi^\prime\leftarrow \phi$.
\FOR{$t$ = 1 to $T$}
\STATE Choose action $a$ and observe reward $r$, next state $s^\prime$.
\STATE Store the transition in the replay buffer, i.e., $\mathcal{B}\leftarrow \mathcal{B}\cup \{(s,a,r,s^\prime)\}$.
\STATE Sample $N$ transitions $\{(s_j,a_j,r_j,s^\prime_j)\}_{j=1}^{N}$ from $\mathcal{B}$.
\color{red}
\FOR{$m$ = 1 to $M$}
\STATE Update critic by minimizing Bellman error.
\STATE Update actor with policy gradient.
\STATE Update target network.
\ENDFOR
\ENDFOR
\end{algorithmic}
\end{algorithm}
\end{minipage}
Moreover, our method can also be incorporated with any off-policy (deep) RL algorithms, and the experimental results in Figure \ref{fig:q-smrcliff} shed light on doing so. We detail the (abstracted) off-policy actor-critic with SMR in Algorithm \ref{alg:ac-smr}. Compared to typical actor-critic methods, our revised algorithm only enforces the agent to train on identical batch data multiple times. This requires a minimal change to the base algorithm, which can be completed by modifying a few lines of code. We defer the detailed pseudo-code of various off-policy algorithms with SMR in Appendix \ref{sec:pseudocodes}.
\section{Theoretical Analysis}
In this section, we aim at showing the theoretical properties of the Q-SMR algorithm in the tabular case. The theoretical guarantee of Q-SMR can pave the way for applying SMR in complex continuous control tasks. All missing proofs can be found in Appendix \ref{sec:missingproof}.
We consider asynchronous Q-learning \cite{EvenDar2004LearningRF, Li2020SampleCO} which follows the update rule:
\begin{equation}
\begin{aligned}
&Q_{t+1}(s_t,a_t) = (1-\alpha_t)Q_t(s_t,a_t)+\alpha_t \mathcal{T}_{t+1}Q_{t}(s_t,a_t), \\
&Q_{t+1}(s,a) = Q_t(s,a) \quad \forall\, (s,a)\neq (s_t,a_t),
\end{aligned}
\end{equation}
where $\mathcal{T}_{t+1}Q_{t}(s_t,a_t)=r_t+\gamma\max_{a^\prime\in\mathcal{A}}Q_t(s_{t+1},a^\prime)$. We have access to a sample trajectory $\{s_t,a_t,r_t\}_{t=0}^\infty$ from a behavior policy $\pi_b$, and we only update one $(s,a)$-entry each step here.
Given the SMR ratio $M$, we define $Q_t^{(i)}(s,a), i\in[1,M]$ as the intermediate $Q$-function at timestep $t$ and iteration $i$. The resulting $Q$-function after SMR iteration is $Q_t(s,a)$ where we omit superscript $(M)$ for $Q_t(s,a)$. We define $Q_{t+1}^{(0)}(s,a)=Q_t^{(M)}(s,a)$. We first give the update rule for Q-SMR that is equivalent to the loop (line 4-6 in Algorithm \ref{alg:q-smr}) in Theorem \ref{theo:updaterule}.
\begin{theorem}
\label{theo:updaterule}
The update rule of Q-SMR is equivalent to:
\begin{equation}
\begin{aligned}
&Q_{t+1}(s_t,a_t) = (1-\alpha_t)^M Q_{t}(s_t,a_t) + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}Q_{t+1}^{(M-1-i)}(s_t,a_t), \\
&Q_{t+1}(s,a) = Q_t(s,a) \quad \forall\, (s,a)\neq (s_t,a_t),
\end{aligned}
\end{equation}
where $\mathcal{T}_{t+1}Q_{t+1}(s_t,a_t) = r_t + \gamma\max_{a^\prime\in\mathcal{A}}Q_{t+1}(s_{t+1},a^\prime)$ denotes the empirical Bellman operator w.r.t. timestep $t+1$.
\end{theorem}
\noindent\textbf{Remark:} The update rule of Q-SMR relies on the intermediate value during the SMR iteration. The influence of the current Q-value in Q-SMR is reduced (as $(1-\alpha_t)^M\le (1-\alpha_t)$), and hence the value estimate can change faster by querying the maximal value. This we believe can partly explain the superior sample efficiency of Q-SMR against vanilla Q-learning depicted in Figure \ref{fig:q-smrcliff}.
\begin{assumption}
\label{ass:mdp}
Assume that $\forall t$, the reward signal is bounded, $|r_t|\le r_{\rm max}$.
\end{assumption}
We note that this is a widely used assumption, which can also be easily satisfied in practice as many reward functions are hand-crafted. We then show in Theorem \ref{theo:stability} that Q-SMR outputs a bounded value estimate throughout its iteration.
\begin{theorem}[Stability]
\label{theo:stability}
Let Assumption \ref{ass:mdp} hold and assume the initial $Q$-function is set to be 0, then for any iteration $t$, the value estimate induced by Q-SMR, $\hat{Q}_t$, is bounded, i.e., $|\hat{Q}_t|\le \dfrac{r_{\rm max}}{1-\gamma},\forall t$.
\end{theorem}
We further show that the Q-SMR algorithm is guaranteed to converge to the optimal Q-value, which reveals the rationality of utilizing the Q-SMR algorithm in practice and paves the way for extending the Q-SMR algorithm into deep RL scenarios.
\begin{theorem}[Convergence]
\label{theo:convergence}
Under some mild assumptions that are similar to \cite{Fujimoto2018AddressingFA,melo2001convergence}, the Q-SMR algorithm converges to the optimal $Q$-function.
\end{theorem}
Interestingly, we can establish a connection between modified learning rate and SMR update rule by assuming that the underlying MDP is \textit{nonreturnable}, i.e., $s_{t+1}\neq s_t$. Then, the rule can be simplified.
\begin{corollary}
\label{coro:simpleupdaterule}
If the MDP is nonreturnable, the update rule of Q-SMR gives:
\begin{equation}
\label{eq:simpleupdaterule}
\begin{aligned}
&Q_{t+1}(s_t,a_t) = (1-\alpha_t)^M Q_{t}(s_t,a_t) + \left[ 1-(1-\alpha_t)^M \right] \mathcal{T}_{t+1}Q_t(s_t,a_t), \\
&Q_{t+1}(s,a) = Q_t(s,a) \quad \forall\, (s,a)\neq (s_t,a_t).
\end{aligned}
\end{equation}
\end{corollary}
\noindent\textbf{Remark:} Compared to vanilla Q-learning, this rule actually \textit{modifies} the learning rate from $\alpha_t$ to $1-(1-\alpha_t)^M$. Since $\alpha_t\in[0,1]$, it is easy to see $1 - (1-\alpha_t)^M\in[0,1],\forall\,t$.
Furthermore, we can derive the finite time error bound of the Q-SMR algorithm based on the above corollary, which improves over the prior results \cite{Szepesvari1997TheAC, EvenDar2004LearningRF,Qu2020FiniteTimeAO}. Please refer to Appendix \ref{sec:additionaltheory} for more details and discussions.
\section{Experiments}
\begin{figure*}
\caption{Experimental results of four typical continuous control algorithms with and w/o SMR on four OpenAI Gym \cite{Brockman2016OpenAIG}
\label{fig:gymresult}
\end{figure*}
In this section, we investigate the benefits of SMR upon off-the-shelf off-policy continuous control RL algorithms. We aim at answering the following questions: (1) is the SMR general enough to benefit wide off-policy RL algorithms? (2) how much performance gain can off-policy RL algorithms acquire by using SMR?
In order to show the strong data exploitation ability and the generality of SMR, we combine SMR with TD3 \cite{Fujimoto2018AddressingFA}, SAC \cite{haarnoja2018softactorcritic}, DARC \cite{Efficient2022Lyu}, TQC \cite{Kuznetsov2020ControllingOB}, and REDQ \cite{Chen2021RandomizedED}. We choose these methods as they typically represent different categories of continuous control algorithms, i.e., TD3 leverages clipped double Q-learning, SAC is based on the maximum entropy RL, DARC enhances the agent's exploration capability by using double actors, TQC addresses overestimation by incorporating distributional RL into the continuous setting, and REDQ is the state-of-the-art model-free RL method which trains critic ensemble and uses a high update-to-data (UTD) ratio.
Besides the loop of reusing samples (line 5-9 in Algorithm \ref{alg:ac-smr}), we do not make any additional modifications (e.g., parameter tuning) to the base algorithm. We run experiments on four continuous control tasks from OpenAI Gym \cite{Brockman2016OpenAIG} simulated by MuJoCo \cite{Todorov2012MuJoCoAP}. All methods are run for 300K online interactions where we adopt the SMR ratio $M=10$ by default except REDQ where we set $M=5$ (as REDQ already uses a large UTD ratio). We note that 300K is a typical interaction step adopted widely in prior work \cite{Chen2021RandomizedED,Janner2019WhenTT,Hansen2022TemporalDL} for examining sample efficiency.
Each algorithm is repeated with 6 random seeds and evaluated over 10 trials every 1000 timesteps. We find that SMR significantly improves the sample efficiency of the base algorithms on almost every task, often outperforming them by a large margin (see Figure \ref{fig:gymresult}). SAC-SMR achieves 4x and TQC-SMR has 3x sample efficiency than the base algorithm as shown in Table \ref{tab:sampleefficiency}. Notably, SAC-SMR takes only 93K online interactions to reach 3000 in Hopper-v2, and TQC-SMR takes merely 34K online interactions. The results even match the performance of MBPO \cite{Janner2019WhenTT} (around 73K). We show in Appendix \ref{sec:missingexperiments} that other off-policy RL algorithms like DDPG, DrQ-v2 \cite{yarats2022mastering} can benefit from SMR as well. These altogether reveal that {\color{red} the advantage of SMR is algorithm-agnostic.}
\begin{figure*}
\caption{Experimental results of SAC-SMR against vanilla SAC on 8 state-based, 2 image-based DMC suite \cite{Tassa2018DeepMindCS}
\label{fig:sacdmc8}
\end{figure*}
\begin{table}[!htb]
\caption{Sample efficiency comparison. We choose SAC, TQC and DARC as examples. The numbers indicate the number of online interactions when the specified performance level is reached.}
\renewcommand\arraystretch{1.05}
\label{tab:sampleefficiency}
\setlength{\tabcolsep}{4pt}
\centering
\small
\begin{tabular}{l|ll|ll|ll}
\toprule
Score & SAC & SAC-SMR & TQC & TQC-SMR & DARC & DARC-SMR \\
\midrule
Hopper@3000 & 373K & \textbf{93K} & 160K & \textbf{34K} & 205K & \textbf{67K} \\
Ant@4000 & 982K & \textbf{211K} & 469K & \textbf{135K} & 324K & \textbf{305K} \\
HalfCheetah@10000 & 860K & \textbf{282K} & 576K & \textbf{185K} & 407K & \textbf{324K} \\
Walker2d@4000 & 656K & \textbf{164K} & 281K & \textbf{133K} & 292K & \textbf{264K} \\
\bottomrule
\end{tabular}
\end{table}
We further combine SMR with SAC and run SAC-SMR extensively on two additional continuous control benchmarks, DMC suite \cite{Tassa2018DeepMindCS} and PyBullet-Gym \cite{benelot2018}. We conduct experiments on 20 DMC suite tasks, 4 PyBullet-Gym tasks, and 6 image-based tasks from DMC suite, yielding a total of \textbf{30} tasks. For state-based tasks, we use $M=10$ and run for 500K interactions. For image-based tasks, as it is very time-consuming with $M=10$, we use $M=5$, which we find is sufficient to show the advantage of SMR. Both SAC and SAC-SMR are evaluated over 10 trials every 1000 timesteps. It can be seen in Figure \ref{fig:sacdmc8} that SMR significantly boosts the sample efficiency of SAC on the evaluated tasks. This can also be validated from Table \ref{tab:performance} where SAC-SMR achieves 2.5x the performance of SAC at 250K and 2.0x the performance of SAC at 500K when averaging the numbers.
Due to the space limit, we defer some results to Appendix \ref{sec:missingexperiments} and only report a small proportion of tasks here. These experimental results show that {\color{red} the advantage of SMR is task-agnostic.} In summary, we believe the above evidence is enough to verify the generality and effectiveness of SMR.
\begin{table}
\caption{Performance comparison of SAC, SAC-UTD (UTD ratio $G$=10) and SAC-SMR. We choose \texttt{cheetah-run} and \texttt{fish-swim} as examples. The numbers indicate the performance achieved when the specific number of data is collected. $\pm$ captures the standard deviation.}
\renewcommand\arraystretch{1.05}
\label{tab:performance}
\centering
\small
\begin{tabular}{l|lll}
\toprule
Amount of data & SAC & SAC-UTD & SAC-SMR \\
\midrule
cheetah-run@250K & 284.6$\pm$20.5 & 434.1$\pm$72.6 & \textbf{600.1}$\pm$49.2 \\
fish-swim@250K & 178.5$\pm$113.9 & 382.5$\pm$70.7 & \textbf{544.3}$\pm$184.4 \\
cheetah-run@500K & 452.1$\pm$47.7 & 633.9$\pm$99.1 & \textbf{725.4}$\pm$48.7 \\
fish-swim@500K & 324.8$\pm$213.9 & 712.0$\pm$41.9 & \textbf{756.3}$\pm$38.7 \\
\bottomrule
\end{tabular}
\end{table}
\noindent\textbf{Parameter Study.} The most critical hyperparameter in our method is the SMR ratio. It controls the frequency we reuse a fixed batch. Intuitively, we ought not to use too large $M$ to prevent potential overfitting in neural networks. For state-based tasks, we find that setting $M=10$ can incur very satisfying performance. In order to see the influence of the SMR ratio $M$, we conduct experiments on Ant-v2 and HalfCheetah-v2 from OpenAI Gym \cite{Brockman2016OpenAIG}. We sweep $M$ across $\{1,2,5,10,20\}$ and demonstrate in Figure \ref{fig:smr-ratio} that SMR can improve the sample efficiency of the base algorithm even with a small $M=2$, and the sample efficiency generally increases with larger $M$. We do not bother tuning $M$ and keep it fixed across our experiments.
\noindent\textbf{Computation Budget.} SMR consumes more computation budget than its base algorithm due to multiple updates on the fixed batch. Intuitively, our method will require more training time with a larger SMR ratio $M$. Typically, SMR ($M=10$) will take about 3-5 times of more training time, e.g., SAC-SMR takes around 6 hours for 300K interactions on Walker2d-v2, while SAC takes around 1.5 hours. Such cost is tolerable for \textit{state-based} tasks considering the superior sample efficiency improvement with SMR.
\noindent\textbf{Clarification on the Asymptotic Performance.} As we focus on improving the sample efficiency, the asymptotic performance of SMR upon different base methods lies out of the scope of this work. Nevertheless, readers of interest can find that the asymptotic performance of SMR is quite good (please refer to Appendix \ref{sec:smrlonger} where we run SMR upon different algorithms for longer interactions).
\begin{figure}
\caption{The performance of SAC-SMR under different SMR ratios on two selected environments. The results are averaged over 6 runs and the shaded area captures the standard deviation.}
\label{fig:smr-ratio}
\end{figure}
\section{Discussions}
\subsection{Is SMR equivalent to enlarging learning rate?}
One may think that SMR is equivalent to amplifying learning rate $M$ times at first sight, i.e., $\alpha_t \rightarrow M\alpha_t$. Whereas, we argue that they are \textit{quite different}. In the tabular case, we show in Theorem \ref{theo:updaterule} the update rule for Q-SMR, which is obviously not the rule that enlarges the original learning rate sequence $M$ times. In deep RL, suppose the (single) critic and actor are parameterized by $\theta$ and $\phi$, respectively. The objective function of the critic gives:
\begin{equation}
\label{eq:criticobjective}
\mathcal{L}_\theta = \mathbb{E}_{s,a,s^\prime\sim\rho}\left[ (Q_\theta(s,a) - r - \gamma Q_{\theta^\prime}(s^\prime, a^\prime))^2 \right],
\end{equation}
where $a^\prime\sim\pi_\phi$, $\rho$ is the sample distribution in the replay buffer, $\theta^\prime$ is the parameter of the target network. Deep neural networks are typically trained with stochastic gradient descent (SGD) \cite{LeCun2015DeepL, loshchilov2017sgdr, Bottou2010LargeScaleML}. The critic is optimized using the gradient information $\nabla \mathcal{L}_{\theta_t}$ obtained on the $t$-th batch, i.e., $\theta_{t+1} = \theta_t - \alpha_t \nabla \mathcal{L}_{\theta_t}$. We then show that, in deep RL, SMR is also not equivalent to enlarging learning rate.
\begin{theorem}
\label{theo:difference}
Denote $\theta_t^{(i)}$ as the intermediate parameter in the SMR loop at timestep $t$ and iteration $i$, then in deep RL, the parameter update using SMR satisfies:
\begin{equation}
\label{eq:smrlearningrate}
\theta_{t+1} = \theta_t - {\color{red} \alpha_t \sum_{i=0}^{M-1} \nabla\mathcal{L}_{\theta_{t+1}^{(i)}}} \neq \theta_t - {\color{red} M\alpha_t \nabla \mathcal{L}_{\theta_t}}.
\end{equation}
\end{theorem}
The inequality in the above theorem is due to the fact that $\theta_{t+1}^{(i+1)}\neq \theta_{t+1}^{(i)}$. A natural question is then raised: how does SMR compete against magnifying the learning rate?
\begin{minipage}{0.5\textwidth}
\includegraphics[width=0.95\linewidth]{figures/sac-lr.pdf}
\captionof{figure}{Comparison of SAC-SMR ($M=10$) against SAC-LR (i.e., amplify the learning rate 10 times). Each algorithm is repeated with 6 seeds and evaluated over 10 trials every 1000 timesteps. We report the mean performance and the standard deviation.}
\label{fig:enlargelr}
\end{minipage}
\hspace{0.03\linewidth}
\begin{minipage}{0.5\textwidth}
\includegraphics[width=0.95\linewidth]{figures/sac-bias.pdf}
\captionof{figure}{Normalized bias comparison of SAC and SAC-SMR on HalfCheetah-v2. SAC-SMR exhibits overfitting at first (with both larger average bias and std of bias) while can incur smaller estimation bias very quickly.}
\label{fig:sac-bias}
\end{minipage}
We answer this by conducting experiments on two selected environments from OpenAI Gym \cite{Brockman2016OpenAIG}. Empirical results in Figure \ref{fig:enlargelr} show that enlarging the learning rate does aid performance gain, yet it still underperforms SMR in sample efficiency. It is trivial to find the best learning rate. SMR, instead, can benefit the base algorithm with the default parameter (see more evidence in Appendix \ref{sec:missingexperiments}).
\subsection{Concerns on overfitting}
\label{sec:overfitting}
One may wonder whether the phenomenon of overfitting \cite{Dietterich1995OverfittingAU, Srivastava2014DropoutAS} will occur in SMR since we optimize the networks on fixed samples for multiple times. The networks may overfit the collected samples at first, but they can get rid of this dilemma and end up with better data exploitation later on with \textit{reasonable} $M$. We verify this by measuring the accuracy of $Q^\pi(s,a)$ over the state-action distribution of the current policy $\pi$ against its true value $Q(s,a)$ (i.e., discounted Monte Carlo return). Since the Monte Carlo return can change drastically during training, we adopt normalized estimation bias $\frac{Q^\pi(s,a)-Q(s,a)}{|\mathbb{E}_{\hat{s},\hat{a}\sim\pi}[Q(\hat{s},\hat{a})]|}$ for more meaningful comparison. We conduct experiments on HalfCheetah-v2. We run each algorithm with 6 seeds for 300K online interactions and evaluate them over 10 trials every 1000 timesteps. We adopt the same way of calculating the normalized estimation bias as REDQ \cite{Chen2021RandomizedED}. As illustrated in Figure \ref{fig:sac-bias}, SMR incurs slight overfitting at the beginning of training, while it can quickly escape from it and result in a smaller estimation bias afterwards.
This may be because the networks can well-fit new transitions from continual online interactions with multiple updates. Since SMR uses much fewer gradient steps per interaction with the environment compared with REDQ (with UTD $G=20$), we believe the concerns on overfitting can be mitigated to some extent. As a piece of evidence, we do not find any performance degradation with $M=10$ across a wide range of algorithms and tasks. The key for not overfitting is the appropriate choice of SMR ratio $M$. No wonder that it will be hard for the agent to get rid of overfitting with too large $M$ (e.g., $M=10^5$, also referred to as \emph{heavy priming} phenomenon in \cite{Nikishin2022ThePB}). For those who still worry about overfitting, we can remedy this by: (1) using a small $M$, e.g., $M=5$; (2) resetting the agent periodically \cite{Nikishin2022ThePB} such that it forgets past learned policy; (3) leveraging a larger batch size; etc. Note that one does not have to stick to adopting a high SMR ratio throughout the training process, and can use SMR as a \emph{starting point}, or a warm-up phase, e.g., one can use $M=10$ for 300K interactions and then resume vanilla training process (i.e., $M=1$), which can also relieve potential overfitting.
\subsection{Comparison with UTD (update-to-data)}
SMR focuses on boosting the sample efficiency of model-free algorithms by better exploiting collected samples. This is similar in spirit to model-based methods (e.g., MBPO \cite{Janner2019WhenTT}) and REDQ \cite{Chen2021RandomizedED} as they usually employ a large update-to-data (UTD) ratio, i.e., update the critic multiple times by sampling with bootstrapping (the sampled batch is different each time). However, SMR updates both actor and critic on the \textit{fixed} sampled batch multiple times to better fit the data (as Figure \ref{fig:smriteration} shows).
It is interesting to examine which way of reusing data can benefit the agent more. To answer this question, we compare SAC-SMR ($M=10$) against SAC-UTD (UTD $G=10$) and vanilla SAC on four DMC suite tasks. We run each algorithm for 1M online interactions. One can see that with the identical gradient steps per interaction with the environment, SMR achieves much better final performance and sample efficiency than UTD, as shown in Figure \ref{fig:replaycomp} and Table \ref{tab:performance}, indicating that SMR may be a better choice in practice. We remark here that the success of UTD in REDQ is attributed to a much higher UTD ratio and randomized critic ensemble. However, SMR does not rely on any specific component and can consistently improve the performance of the base algorithm. Meanwhile, we do not view SMR and UTD as contradictory methods, but rather orthogonal methods, e.g., one can find in Figure \ref{fig:gymresult} that SMR improves the sample efficiency of REDQ.
\begin{figure}
\caption{Comparison of SAC-SMR ($M=10$) against SAC-UTD ($G=10$) under identical update frequency. The results are averaged over 6 runs, and the standard deviation is also reported.}
\label{fig:replaycomp}
\end{figure}
\section{Related Work}
\label{sec:relatedwork}
\noindent\textbf{Off-policy RL algorithms.} Recently, we have witnessed the great success of off-policy algorithms in discrete settings since DQN \cite{Mnih2015HumanlevelCT}. There are many improvements upon it, including double Q-learning \cite{Hasselt2010DoubleQ, Hasselt2015DeepRL}, dueling structure \cite{Wang2015DuelingNA}, distributional perspective \cite{Bellemare2017ADP, Nam2021GMACAD, Dabney2017DistributionalRL}, experience replay techniques \cite{Schaul2015PrioritizedER, Hessel2017RainbowCI}, self-supervised learning \cite{Laskin2020ReinforcementLW, schwarzer2021dataefficient, Srinivas2020CURLCU}, model-based methods \cite{Kaiser2019ModelBasedRL, Ye2021MasteringAG, Schrittwieser2019MasteringAG, hamrick2021on, Hessel2021MuesliCI}, etc.
In the continuous control domain, off-policy RL algorithms are widely adopted, such as DDPG \cite{lillicrap2015continuous} and TD3 \cite{Fujimoto2018AddressingFA}. These methods are usually built upon the actor-critic framework \cite{prokhorov1997adaptive, konda2000actor}, accompanied with a replay buffer for storing past experiences. There are also many efforts in exploring off-policy training with image input \cite{Finn2015LearningVF, Dwibedi2018LearningAR, Sermanet2017TimeContrastiveNS, Nair2018VisualRL, Lee2019StochasticLA, yarats2021image, Hafner2020Dream, Hafner2018LearningLD, Yuan2022PreTrainedIE}.
\noindent \textbf{Sample-efficient continuous control algorithms.} How to improve the sample efficiency is one of the most critical issues to deploying the RL algorithms widely in the real world. Existing work realizes it via adding exploration noise \cite{lillicrap2015continuous, Plappert2017ParameterSN}, extra bonus reward \cite{Tang2016ExplorationAS, Fu2017EX2EW, Houthooft2016CuriositydrivenEI, Achiam2017SurpriseBasedIM}, multiple actors \cite{Zhang2018ACEAA, Efficient2022Lyu}, value estimate correction \cite{Pan2020SoftmaxDD, Wu2020ReducingEB, Kuznetsov2020ControllingOB, Kuznetsov2021AutomatingCO, Lyu2021ValueAF}, or by leveraging maximum entropy framework \cite{ziebart2010modeling, Haarnoja2018SoftAO, haarnoja2018softactorcritic}, incorporating uncertainty measurement \cite{Lee2020SUNRISEAS}, etc. \textit{SMR is orthogonal to all these advances} and can be easily combined with them.
Another line of research aiming at improving the sample efficiency in continuous control tasks sets their focus on learning a dynamics model of the environment \cite{Sutton1991dyna, Buckman2018SampleEfficientRL, Chua2018DeepRL, Janner2019WhenTT, DOro2020HowTL, li2022gradient, Hansen2022TemporalDL, voelcker2022value}. However, training an accurate model can be difficult \cite{Asadi2018TowardsAS, Asadi2018LipschitzCI, Lai2020BidirectionalMP} due to compounding errors \cite{Deisenroth2011PILCOAM, Venkatraman2015ImprovingMP, Talvitie2016SelfCorrectingMF}, and it is very time-consuming to run model-based RL codebase.
\noindent\textbf{Data replay methods.} There are many ways of utilizing data in deep RL scenario, e.g., replaying good transitions \cite{Schaul2015PrioritizedER,Liu2021RegretME,Zhang2017ADL,Kapturowski2018RecurrentER}, balancing synthetic data and real data in model-based RL \cite{Hasselt2019WhenTU,Pan2020TrustTM}, etc. Some studies \cite{Fedus2020RevisitingFO,oro2022sampleefficient,li2023efficient} explore and uncover the advantages of update frequency on the collected transitions in a bootstrapping way for sample efficiency. SMR, instead, reuses the fixed batch data for multiple times to aid sample efficiency, and is orthogonal to previous methods.
\section{Conclusion}
In this paper, we propose sample multiple reuse (SMR), a novel method for enhancing the exploitation ability of off-policy continuous control RL algorithms by optimizing the agent on the fixed sampled batch multiple times. We show the convergence property of Q-learning with SMR in the tabular case. SMR can be incorporated with \textit{any} off-policy RL algorithms to boost their sample efficiency. We empirically show that the benefits of SMR are both algorithm-agnostic and task-agnostic. We further show that SMR is different from amplifying learning rate and discuss the potential overfitting phenomenon when using SMR. We hope this work can provide some insights to the community and aid the design of more advanced off-policy RL algorithms.
The main limitation of our work lies in the fact that injecting a sample reuse loop for training neural networks takes extra time. Such cost is negligible for state-based tasks but not for image-based tasks (check Appendix \ref{sec:missingexperiments}). A promising solution may be dropout \cite{Srivastava2014DropoutAS}, which has been previously adopted to reduce the computation cost of REDQ in \cite{Hiraoka2021DropoutQF}. We leave it as future work.
\small
\appendix
\onecolumn
\section{Additional Theoretical Results}
\label{sec:additionaltheory}
In this section, we present additional theoretical results concerning on the sample complexity and finite time error bound for Q-SMR in nonreturnable MDPs. We first need to impose the following assumptions, which extends the assumption we made in the main text. All of the missing proofs can be found in Appendix \ref{sec:missingproof}.
\begin{assumption}[MDP Regularity]
\label{ass:mdpreg}
(1) $\forall t$, the reward signal is bounded, $|r_t|\le r_{\rm max}$; (2) The Markov chain induced by the stationary behavior policy $\pi_b$ is uniformly ergodic, and has a mixing time $t_{\rm mix}$,
\begin{equation*}
t_{\rm mix} := \min\left\{ t\bigg| \max_{(s_0,a_0)\in\mathcal{S}\times\mathcal{A}}D_{\rm TV}\left( P^t(\cdot|s_0,a_0)\| \mu_{\pi_b} \right) \le \dfrac{1}{4} \right\}.
\end{equation*}
\end{assumption}
$P^t(\cdot|s_0,a_0)$ is the distribution of $(s_t,a_t)$ conditioned on the initial state-action pair $(s_0,a_0)$ and $D_{\rm TV}(p\|q)$ denotes the total variation distance between two distributions $p,q$. Denote $\mu_{\pi_b}$ as the stationary distribution of the aforementioned Markov chain. We define $\mu_{\rm min}:=\inf_{(s,a)\in\mathcal{S}\times\mathcal{A}}\mu_{\pi_b}$.
We are now interested in the sample complexity of the Q-SMR algorithm, which is built upon Corollary \ref{coro:simpleupdaterule}. Note that a general analysis on the sample complexity of Q-SMR is very hard as the target value keeps changing during SMR iteration. We thus resort to nonreturnable MDP and present the sample complexity results in the appendix. We introduce an important lemma on the learning rate sequence in Lemma \ref{lemma:learningrate}, which plays a critical role in proving Theorem \ref{theo:convergencerate}.
\begin{lemma}
\label{lemma:learningrate}
Denote $\hat{\alpha}_t = 1 - (1-\alpha_t)^M$, then we have
\begin{equation}
\alpha_t \le \hat{\alpha}_t \le \min\{1, M\alpha_t\}.
\end{equation}
\end{lemma}
We then formally present the sample complexity of Q-SMR.
\begin{theorem}[Finite time error bound]
\label{theo:convergencerate}
Assume that Assumption \ref{ass:mdp} holds and the SMR ratio is set to be $M$. Suppose the learning rate is taken to be $\alpha_t = \frac{h}{M(t+t_0)}$ with $t_0\ge\max(4h, \lceil \log_2 \frac{2}{\mu_{\rm min}} \rceil t_{\rm mix})$ and $h\ge \frac{4}{\mu_{\rm min}(1-\gamma)}$, then with probability at least $1-\delta$,
\begin{equation}
\begin{aligned}
\|\hat{Q}_T - Q^*\|_\infty \le \tilde{\mathcal{O}}\left( \dfrac{r_{\rm max}\sqrt{t_{\rm mix}}}{(1-\gamma)^{2.5}\mu_{\rm min}}\dfrac{1}{\sqrt{T}} + \dfrac{ r_{\rm max}t_{\rm mix}}{(1-\gamma)^3\mu_{\rm min}^2}\dfrac{1}{T} \right).
\end{aligned}
\end{equation}
\end{theorem}
As an immediate corollary, we have:
\begin{corollary}[Sample complexity]
\label{coro:samplecomplexity}
For any $0<\delta<1$ and $0<\epsilon<1$, with the Q-SMR algorithm we have:
\begin{equation}
\forall (s,a)\in\mathcal{S}\times\mathcal{A}: \|\hat{Q}_T - Q^*\|_\infty \le \epsilon,
\end{equation}
holds with probability at least $1-\delta$, provided the iteration number $T$ obeys:
\begin{equation}
T\stackrel{>}{\sim}\dfrac{r_{\rm max}^2 t_{\rm mix}}{(1-\gamma)^5\mu_{\rm min}^2}\dfrac{1}{\epsilon^2}.
\end{equation}
\end{corollary}
\noindent\textbf{Remark:} The above conclusion says that the sample complexity of Q-SMR gives $\tilde{\mathcal{O}}\left(\dfrac{ t_{\rm mix}}{(1-\gamma)^5\mu_{\rm min}^2}\dfrac{1}{\epsilon^2}\right)$. This result matches the recent theoretical analysis on the sample complexity of asynchronous Q-learning \cite{Qu2020FiniteTimeAO}, which improves over the previous bound \cite{Szepesvari1997TheAC, EvenDar2004LearningRF}. For a detailed comparison, we notice that the above sample complexity becomes $\tilde{\mathcal{O}}\left(\dfrac{ t_{\rm mix} (|\mathcal{S}||\mathcal{A}|)^2}{(1-\gamma)^5}\dfrac{1}{\epsilon^2}\right)$ by using that $\dfrac{1}{\mu_{\rm min}}$ scales with $(|\mathcal{S}||\mathcal{A}|)$. The prior bound in \cite{Szepesvari1997TheAC} gives a sample complexity of $\tilde{\mathcal{O}}\left(\dfrac{(|\mathcal{S}||\mathcal{A}|)^5}{(1-\gamma)^5\epsilon^{2.5}}\right)$ ($\omega=0.8$) and $\tilde{\mathcal{O}}\left(\dfrac{(|\mathcal{S}||\mathcal{A}|)^{3.3}}{(1-\gamma)^{5.2}\epsilon^{2.6}}\right)$ ($\omega=0.77$), where $\omega$ is the step size. Our results are sharper in terms of the dependence of $\dfrac{1}{1-\gamma}, \dfrac{1}{\epsilon},(|\mathcal{S}||\mathcal{A}|)$. The result can be extended to a constant learning rate (i.e., $\alpha_t \equiv \alpha,\forall\, t$) by following a similar analysis as \cite{Li2020SampleCO, Li2021IsQM}.
\section{Missing Proofs}
\label{sec:missingproof}
\subsection{Proof of Theorem \ref{theo:updaterule}}
\begin{theorem}
\label{apptheo:updaterule}
The update rule of Q-SMR is equivalent to:
\begin{equation}
\begin{aligned}
&Q_{t+1}(s_t,a_t) = (1-\alpha_t)^M Q_{t}(s_t,a_t) + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}Q_{t+1}^{(M-1-i)}(s_t,a_t), \\
&Q_{t+1}(s,a) = Q_t(s,a) \quad \forall\, (s,a)\neq (s_t,a_t),
\end{aligned}
\end{equation}
where $\mathcal{T}_{t+1}Q_{t+1}(s_t,a_t) = r_t + \gamma\max_{a^\prime\in\mathcal{A}}Q_{t+1}(s_{t+1},a^\prime)$ denotes the empirical Bellman operator w.r.t. timestep $t+1$.
\end{theorem}
\begin{proof}
Note that we omit the superscript $^{(M)}$ for both the right $Q_t(s_t,a_t)$ and the left $Q_{t+1}(s_t,a_t)$ for clarity. We do $M$ iterations in SMR with intermediate $Q$ value labeled as $Q_t^{(i)}$ at timestep $t$ and iteration $i, i\in\{1,2,\ldots,M\}$. Set the current $Q$-function at timestep $t$ as $Q_t^{(0)}$, then with the SMR iteration, we have $Q_{t}^{(M)}(s_t,a_t)$ which is set to be the new $Q$-function at timestep $t+1$, $Q_{t+1}^{(0)}(s_t,a_t)=Q_{t}^{(M)}(s_t,a_t)$. Note that in SMR iteration, the timestep is fixed, only the superscript changes with iteration, using the rule that $Q_{t+1}^{(i)}(s_t,a_t) = (1-\alpha_t)Q_{t+1}^{(i-1)}(s_t,a_t) + \alpha_t \mathcal{T}_{t+1}Q_{t+1}^{(i-1)}(s_t,a_t),i\in\{1,2,\ldots,M\}$. Then run the loop till convergence. We will use induction to show the above conclusion.
If $M=1$, then the update rule becomes the vanilla Q-learning style (notice that $Q_t^{(0)}(\cdot,\cdot) = Q_t(\cdot,\cdot)$).
Now for $\forall\, M\ge 1$, let us assume the update rule holds, if $(s,a) = (s_t,a_t)$, then,
\begin{equation}
Q_{t+1}^{(M)}(s_t,a_t) = (1-\alpha_t)^M Q^{(0)}_{t+1}(s_t,a_t) + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}Q_{t+1}^{(M-1-i)}(s_t,a_t).
\end{equation}
Thus,
\begin{equation*}
\begin{aligned}
Q_{t+1}^{(M+1)}(s_t,a_t) &= (1-\alpha_t) Q^{(M)}_{t+1}(s_t,a_t) + \alpha_t \mathcal{T}_{t+1}Q_{t+1}^{(M)}(s_t,a_t). \quad \rm{(By}\,\rm{doing}\,\rm{one}\,\rm{iteration.)} \\
&= (1-\alpha_t)\left[(1-\alpha_t)^M Q^{(0)}_{t+1}(s_t,a_t) + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}Q_{t+1}^{(M-1-i)}(s_t,a_t)\right] + \alpha_t \mathcal{T}_{t+1}Q_{t+1}^{(M)}(s_t,a_t). \\
&= (1-\alpha_t)^{M+1} Q^{(0)}_{t+1}(s_t,a_t) + \sum_{i=1}^{M} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}Q_{t+1}^{(M-i)}(s_t,a_t) + \alpha_t \mathcal{T}_{t+1}Q_{t+1}^{(M)}(s_t,a_t). \\
&= (1-\alpha_t)^{M+1} Q^{(0)}_{t+1}(s_t,a_t) + \sum_{i=0}^{M} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}Q_{t+1}^{(M-i)}(s_t,a_t) \\
&= (1-\alpha_t)^{M+1} Q^{(M+1)}_{t}(s_t,a_t) + \sum_{i=0}^{M} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}Q_{t+1}^{(M-i)}(s_t,a_t) \\
\end{aligned}
\end{equation*}
Then by induction and omitting the superscript $^{(M+1)}$, we deduce that the update rule holds for $\forall\, M\ge 1$.
\end{proof}
\noindent\textbf{Remark:} It is quite hard to trace back the intermediate Bellman backup $\mathcal{T}_{t+1}Q_{t+1}^{(i)}(s_t,a_t)$ since it is taken over $r_t + \gamma\max_{a^\prime\in\mathcal{A}}Q_{t+1}^{(i)}(s_{t+1},a^\prime)$. Though $s_{t+1}$ is known, the maximal $Q$ value may change position with the iteration.
\subsection{Proof of Theorem \ref{theo:stability}}
\begin{theorem}[Stability]
Let Assumption \ref{ass:mdpreg} holds and assume that the initial $Q$-function is set to be 0, then for any iteration $t$, the value estimate induced by the Q-SMR, $\hat{Q}_t$, is bounded, i.e., $|\hat{Q}_t|\le \dfrac{r_{\rm max}}{1-\gamma},\forall t$.
\end{theorem}
\begin{proof}
We also show this by induction. Obviously, $|\hat{Q}_0|=0\le\dfrac{r_{\rm max}}{1-\gamma}$. Now let us suppose for $\forall \, t\ge0, |\hat{Q}_t|\le \dfrac{r_{\rm max}}{1-\gamma}$, then by using the update rule from Theorem \ref{theo:updaterule}, we have
\begin{align*}
|\hat{Q}_{t+1}(s_t,a_t)| &= \left|(1-\alpha_t)^M \hat{Q}_{t}(s_t,a_t) + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}\hat{Q}_{t+1}^{(M-1-i)}(s_t,a_t)\right| \\
&= \left|(1-\alpha_t)^M \hat{Q}_{t}(s_t,a_t) + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \left[r_t + \gamma\max_{a^\prime\in\mathcal{A}}\hat{Q}_{t+1}^{(M-1-i)}(s_{t+1},a^\prime)\right]\right| \\
&\le (1-\alpha_t)^M \left| \hat{Q}_{t}(s_t,a_t) \right| + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \left|r_t + \gamma\max_{a^\prime\in\mathcal{A}}\hat{Q}_{t+1}^{(M-1-i)}(s_{t+1},a^\prime)\right| \\
&\le (1-\alpha_t)^M \dfrac{r_{\rm max}}{1-\gamma} + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \left[|r_t| + \gamma\left|\max_{a^\prime\in\mathcal{A}}\hat{Q}_{t+1}^{(M-1-i)}(s_{t+1},a^\prime)\right|\right] \\
&\le (1-\alpha_t)^M \dfrac{r_{\rm max}}{1-\gamma} + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \left[r_{\rm max} + \gamma\dfrac{r_{\rm max}}{1-\gamma}\right] \\
&= (1-\alpha_t)^M \dfrac{r_{\rm max}}{1-\gamma} + \left[\sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i\right] \dfrac{r_{\rm max}}{1-\gamma} \\
&= (1-\alpha_t)^M \dfrac{r_{\rm max}}{1-\gamma} + \left[1-(1-\alpha_t)^M\right] \dfrac{r_{\rm max}}{1-\gamma} \\
&= \dfrac{r_{\rm max}}{1-\gamma}.
\end{align*}
By using induction, we deduce that $\forall\, t\ge0$ the Q-SMR outputs stable $Q$ value, which satisfies $|\hat{Q}_t|\le\dfrac{r_{\rm max}}{1-\gamma}$.
\end{proof}
\subsection{Proof of Corollary \ref{coro:simpleupdaterule}}
\begin{corollary}
\label{appcoro:simpleupdaterule}
If the MDP is nonreturnable, i.e., $s_{t+1}\neq s_t$, the update rule of Q-SMR gives:
\begin{equation}
\begin{aligned}
&Q_{t+1}(s_t,a_t) = (1-\alpha_t)^M Q_{t}(s_t,a_t) + \left[ 1-(1-\alpha_t)^M \right] \mathcal{T}_{t+1}Q_t(s_t,a_t), \\
&Q_{t+1}(s,a) = Q_t(s,a) \quad \forall\, (s,a)\neq (s_t,a_t),
\end{aligned}
\end{equation}
\end{corollary}
\begin{proof}
If the MDP is nonreturnable, then it is easy to address the empirical Bellman backup $\mathcal{T}_{t+1}Q_{t+1}^{(i)}$. We have that $\mathcal{T}_{t+1}Q_{t+1}^{(i)}(s_t,a_t) = r_t + \gamma \max_{a^\prime\in\mathcal{A}}Q_{t+1}^{(i)}(s_{t+1},a^\prime)$. Since it is asynchronous Q-learning, only entry $(s_t,a_t)$ will be updated inside the SMR loop. That is to say, $\mathcal{T}_{t+1}Q_{t+1}^{(i)}(s_t,a_t) = r_t + \gamma \max_{a^\prime\in\mathcal{A}}Q_{t+1}^{(i)}(s_{t+1},a^\prime)$ is unchanged throughout the SMR iteration. Therefore, based on Theorem \ref{apptheo:updaterule}, we have that the update rule gives
\begin{align*}
Q_{t+1}(s_t,a_t) &= (1-\alpha_t)^M Q_{t}(s_t,a_t) + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}Q_{t+1}^{(M-1-i)}(s_t,a_t), \\
&= (1-\alpha_t)^M Q_{t}(s_t,a_t) + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}Q_t(s_t,a_t), \\
&= (1-\alpha_t)^M Q_{t}(s_t,a_t) + \left[ 1-(1-\alpha_t)^M \right]\mathcal{T}_{t+1}Q_t(s_t,a_t).
\end{align*}
\end{proof}
\noindent\textbf{Remark:} If we also let $s_{t+1}=s_t$ follow the above update rule, then the analysis below (e.g., sample complexity) can be extended naturally. This, however, triggers a gap between the original SMR loop and this practical update rule. We thus enforce $s_{t+1}\neq s_t$. Our analysis is restricted to nonreturnable MDPs, while our empirical results remedy this and validate the effectiveness of our proposed method.
\subsection{Proof of Theorem \ref{theo:convergence}}
In order to show Theorem \ref{theo:convergence}, we first present a well-known result from \cite{Singh2000ConvergenceRF}, which is built upon a proposition from \cite{bertsekas2012dynamic}.
\begin{lemma}
\label{applemma:randomprocess}
Consider a stochastic process $(\zeta_t, \Delta_t, F_t),t\ge0$ where $\zeta_t, \Delta_t, F_t:X\mapsto \mathbb{R}$ satisfy the equation:
\begin{equation}
\Delta_{t+1}(x_t) = (1 - \zeta_t(x_t))\Delta_t(x_t) + \zeta_t(x_t)F_t(x_t),
\end{equation}
where $x_t\in X$ and $t=0,1,2,\ldots$. Let $P_t$ be a sequence of increasing $\sigma$-fields such that $\zeta_0$ and $\Delta_0$ are $P_0$-measurable and $\zeta_t, \Delta_t$ and $F_{t-1}$ are $P_t$-measurable, $t=1,2,\ldots$. Assume the following conditions hold: (1) The set $X$ is finite; (2) $\zeta_t(x_t)\in[0,1]$, $\sum_t\zeta_t(x_t) = \infty, \sum_t(\zeta_t(x_t))^2 < \infty$ with probability 1 and $\forall x\neq x_t:\zeta_t(x_t)=0$; (3) $\| \mathbb{E}[F_t|P_t] \| \le \kappa \|\Delta_t\| + c_t$, where $\|\cdot\|$ denotes maximum norm, $\kappa\in[0,1)$ and $c_t$ converges to 0 with probability 1; (4) Var$[F_t(x_t)|P_t]\le C(1 + \|\Delta_t\|)^2$, where $C$ is some constant. Then $\Delta_t$ converges to 0 with probability 1.
\end{lemma}
We also need the following lemma, which will be of great help.
\begin{lemma}
\label{applemma:learningratelemma}
If the learning rates satisfy $\alpha_t(s,a)\in[0,1], \sum_t\alpha_t(s,a) = \infty, \sum_t(\alpha_t(s,a))^2<\infty$ with probability 1, then the following holds with probability 1:
\begin{equation}
\sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \in[0,1], \quad \sum_t \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i = \infty, \quad \sum_t\left( \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \right)^2 < \infty.
\end{equation}
\end{lemma}
\begin{proof}
It is easy to find that $\sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i = 1 - (1-\alpha_t)^M$. Since $\alpha_t\in[0,1]$, we have $1-\alpha_t\in[0,1], (1-\alpha_t)^M\in[0,1]$ and therefore $\sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i = 1 - (1-\alpha_t)^M\in[0,1]$.
Meanwhile, $1 - \alpha_t \ge (1-\alpha_t)^M$, then $\sum_t \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \ge \sum_t \alpha_t = \infty$, thus $\sum_t \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i = \infty$.
Finally, $\sum_t\left( \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \right)^2 \le \sum_t\left( \sum_{i=0}^{M-1} \alpha_t \right)^2 = M^2 \sum_t\left(\alpha_t\right)^2 < \infty$.
\end{proof}
Then we formally give the convergence property of Q-SMR below.
\begin{theorem}[Formal, Convergence of Q-SMR]
\label{apptheo:convergence}
Given the following conditions: (1) each state-action pair is sampled an infinite number of times; (2) the MDP is finite; (3) $\gamma\in[0,1)$; (4) $Q$ values are stored in a look-up table; (5) the learning rates satisfy $\alpha_t(s,a)\in[0,1], \sum_t\alpha_t(s,a) = \infty, \sum_t(\alpha_t(s,a))^2<\infty$ with probability 1 and $\alpha_t(s,a)=0,\forall (s,a)\neq (s_t,a_t)$; (6) Var$[r(s,a)]<\infty, \forall s,a$, then the Q-SMR algorithm converges to the optimal $Q$-function.
\end{theorem}
\begin{proof}
To show the convergence of Q-SMR, we first show the convergence of the following update rule, which is exactly the rule of the simplified Q-SMR algorithm presented in the Corollary \ref{coro:simpleupdaterule}.
\begin{equation}
\label{eq:simplifiedrule}
\begin{aligned}
Q_{t+1}(s_t,a_t) = (1-\alpha_t)^M Q_{t}(s_t,a_t) + \left[ 1-(1-\alpha_t)^M \right] \mathcal{T}_{t+1}Q_t(s_t,a_t).
\end{aligned}
\end{equation}
Subtracting from both sides the quantity $Q^*(s_t,a_t)$, and letting $\Delta_t(s_t,a_t) = Q_t(s_t,a_t) - Q^*(s_t,a_t)$ yields:
\begin{equation*}
\Delta_{t+1}(s_t,a_t) = (1-\alpha_t)^M \Delta_t(s_t,a_t) + \left[ 1-(1-\alpha_t)^M \right] \left(r_t + \gamma\max_{a^\prime\in\mathcal{A}}Q_t(s_{t+1},a^\prime) - Q^*(s_t,a_t)\right).
\end{equation*}
Denote $\beta_t = 1 - (1-\alpha_t)^M$, and write $F_t(s_t,a_t) = r_t + \gamma\max_{a^\prime\in\mathcal{A}}Q_t(s_{t+1},a^\prime) - Q^*(s_t,a_t)$, we have
\begin{equation*}
\Delta_{t+1}(s_t,a_t) = (1-\beta_t) \Delta_t(s_t,a_t) + \beta_t F_t.
\end{equation*}
From Lemma \ref{applemma:learningratelemma}, we conclude that the new learning rate sequence obeys $\beta_t\in[0,1], \sum_t\beta_t = \infty$ and $\sum_t(\beta_t)^2<\infty$. Meanwhile, $\mathbb{E}[F_t(s_t,a_t)|P_t] = \mathcal{T}Q_t(s_t,a_t) - Q^*(s_t,a_t)$. Since the optimal $Q$-function is a fixed point of the Bellman operator, we have $\mathbb{E}[F_t(s_t,a_t)|P_t] = \mathcal{T}Q_t(s_t,a_t) - \mathcal{T}Q^*(s_t,a_t)$. Since the Bellman operator is a contraction, we have $\|\mathbb{E}[F_t(s_t,a_t)|P_t]\| = \|\mathcal{T}Q_t(s_t,a_t) - \mathcal{T}Q^*(s_t,a_t)\|\le \gamma\|\Delta_t\|$.
Finally, we check the variance of $F_t(s_t,a_t)$, it is easy to find:
\begin{align*}
{\rm Var}[F_t(s_t,a_t)|P_t] &= \mathbb{E}\left[ \left( r_t+\gamma\max_{a^\prime\in\mathcal{A}}Q_t(s_{t+1},a^\prime) - Q^*(s_t,a_t) - \left( \mathcal{T}Q_t(s_t,a_t) - Q^*(s_t,a_t) \right) \right)^2 \bigg|P_t \right] \\
&= \mathbb{E}\left[ \left( r_t+\gamma\max_{a^\prime\in\mathcal{A}}Q_t(s_{t+1},a^\prime) - \mathcal{T}Q_t(s_t,a_t) \right)^2 \bigg|P_t \right] \\
&= {\rm Var}\left[ r_t+\gamma\max_{a^\prime\in\mathcal{A}}Q_t(s_{t+1},a^\prime)\bigg|P_t \right].
\end{align*}
Due to the fact that $r_t$ is bounded, it clearly verifies that ${\rm Var}[F_t(s_t,a_t)|P_t]\le C(1 + \|\Delta_t\|)^2$ for some constant $C$. Combining these together, and by using Lemma \ref{applemma:randomprocess}, we conclude that $\Delta_t$ converges to 0 with probability 1. That is to say, the simplified Q-SMR algorithm with update rule in Equation \ref{eq:simplifiedrule} converges to the optimal $Q$-function. Then, for the formal Q-SMR update rule, we have
\begin{align}
Q_{t+1}(s_t,a_t) &= (1-\alpha_t)^M Q_{t}(s_t,a_t) + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}Q_{t+1}^{(M-1-i)}(s_t,a_t) \\
&\le (1-\alpha_t)^M Q_{t}(s_t,a_t) + \left[ 1-(1-\alpha_t)^M \right] \max_{i\in[0,M-1]}\mathcal{T}_{t+1}Q_t^{(i)}(s_t,a_t).
\end{align}
It is easy to check that the right side converges to the optimal $Q$-function by following the same analysis above. Furthermore, we have
\begin{align}
Q_{t+1}(s_t,a_t) &= (1-\alpha_t)^M Q_{t}(s_t,a_t) + \sum_{i=0}^{M-1} \alpha_t (1-\alpha_t)^i \mathcal{T}_{t+1}Q_{t+1}^{(M-1-i)}(s_t,a_t) \\
&\ge (1-\alpha_t)^M Q_{t}(s_t,a_t) + \left[ 1-(1-\alpha_t)^M \right] \min_{i\in[0,M-1]}\mathcal{T}_{t+1}Q_t^{(i)}(s_t,a_t).
\end{align}
Similarly, the lower bound side converges to the optimal $Q$-function. Then by combing the results above, we naturally conclude that Q-SMR converges to the optimal $Q$-function.
\end{proof}
\subsection{Proof of Theorem \ref{theo:difference}}
\begin{proof}
If we amplify $\alpha_t$, then we have
\begin{equation}
\label{eq:largerlearningrate}
\theta_{t+1} = \theta_t - {\color{red} M\alpha_t \nabla \mathcal{L}_{\theta_t}}.
\end{equation}
This is the parameter update rule for the case of enlarging learning rate $M$ times.
Now we investigate SMR with SGD. Denote $\theta_t^{(i)}$ as the intermediate parameter in the SMR loop at timestep $t$ and iteration $i$, then it is easy to find $\theta_{t+1}^{(1)} = \theta_{t+1}^{(0)} - \alpha_t \nabla\mathcal{L}_{\theta_{t+1}^{(0)}}$, and $\theta_{t+1}^{(2)} = \theta_{t+1}^{(1)} - \alpha_t \nabla\mathcal{L}_{\theta_{t+1}^{(1)}} = \theta_{t+1}^{(0)} - \alpha_t \nabla\mathcal{L}_{\theta_{t+1}^{(0)}} - \alpha_t \nabla\mathcal{L}_{\theta_{t+1}^{(1)}}$. Finally, by doing iteration till $M$, using $\theta_{t+1}^{(0)}=\theta_t^{(M)}$ and omitting the superscript $(M)$, we have
\begin{equation}
\theta_{t+1} = \theta_t - {\color{red} \alpha_t \sum_{i=0}^{M-1} \nabla\mathcal{L}_{\theta_{t+1}^{(i)}}}.
\end{equation}
\end{proof}
\subsection{Proof of Lemma \ref{lemma:learningrate}}
\begin{lemma}
\label{applemma:learningrate}
Denote $\hat{\alpha}_t = 1 - (1-\alpha_t)^M$, then we have
\begin{equation}
\alpha_t \le \hat{\alpha}_t \le \min\{1, M\alpha_t\}.
\end{equation}
\end{lemma}
\begin{proof}
We write $f(x) = 1 - (1-x)^M - x$ and $g(x) = 1 - (1-x)^M - Mx, x\in[0,1], M\ge 1, M\in\mathbb{Z}$, then for $f(x)$, we have $f(x) = 1 - (1-x)^M - x = (1-x) - (1-x)^M = (1-x)\left[ 1 - (1-x)^{M-1} \right]\ge 0$. Therefore, $1 - (1-x)^M\ge x$.
For $g(x)$, we have
\begin{align*}
g^\prime(x) = M(1-x)^{M-1} - M = M\left[ (1-x)^{M-1} - 1 \right] \le 0.
\end{align*}
It indicates that $g(x)$ decreases in the region $[0,1]$, thus $g(x) \le g(0) = 0$ which incurs $1 - (1-x)^M\le Mx,\forall\,x\in[0.1]$. Meanwhile, as $1 - (1-x)^M \le 1$, we thus have $1 - (1-x)^M \le \min\{1, Mx\}$.
By setting $x = \alpha_t$, we have the desired conclusions immediately.
\end{proof}
\subsection{Proof of Theorem \ref{theo:convergencerate}}
\begin{theorem}[Finite time error bound]
\label{apptheo:convergencerate}
Assume that Assumption \ref{ass:mdp} holds and the SMR ratio is set to be $M$. Suppose the learning rate is taken to be $\alpha_t = \frac{h}{M(t+t_0)}$ with $t_0\ge\max(4h, \lceil \log_2 \frac{2}{\mu_{\rm min}} \rceil t_{\rm mix})$ and $h\ge \frac{4}{\mu_{\rm min}(1-\gamma)}$, then with probability at least $1-\delta$,
\begin{equation}
\begin{aligned}
\|\hat{Q}_T - Q^*\|_\infty \le \tilde{\mathcal{O}}\left( \dfrac{r_{\rm max}\sqrt{t_{\rm mix}}}{(1-\gamma)^{2.5}\mu_{\rm min}}\dfrac{1}{\sqrt{T}} + \dfrac{r_{\rm max}t_{\rm mix}}{(1-\gamma)^3\mu_{\rm min}^2}\dfrac{1}{T} \right)
\end{aligned}
\end{equation}
\end{theorem}
The proof of this theorem is borrowed heavily from \cite{Qu2020FiniteTimeAO}. Throughout the proof, we denote $\|\cdot\|$ as the infinity norm. We also assume there exist some constant $C>0$ s.t. $\|F(x)\|\le \gamma\|x\| + C,\forall\,x\in\mathbb{R}^n$, where $F(\cdot)$ denotes the bellman operator. This assumption can be generally satisfied with $C = (1+\gamma)\|x^*\|$ since $\|F(x)\|\le \|F(x) - F(x^*)\| + \|F(x^*)\|\le \gamma\|x - x^*\| + \|x^*\|\le\gamma \|x\| + (1+\gamma)\|x^*\|$.
\begin{proof}
The proof is generally divided into three steps. First, we decompose the error in a recursive form. Second, we bound the contribution of the noise sequence to the error decomposition. Third, we use the error decomposition and the bounds to prove the result. We let $\hat{\alpha}_t = 1 - (1-\alpha_t)^M$ and rewrite the update rule for Q-SMR below.
\begin{align*}
&x_i(t+1) = x_i(t) + \hat{\alpha}_t (F_i(x(t)) -x_i(t) + \omega(t)), i=i_t, \\
&x_i(t+1) = x_i(t), i\neq i_t,
\end{align*}
where we write $Q_t(s_t,a_t)$ as $x_i(t)$, $i_t\in\{1,2,\ldots,n\}$ is a stochastic process adapted to a filtration $P_t$, $F_i(x(t)) = r(s_t,a_t) + \gamma \mathbb{E}_{s^\prime\sim P(\cdot|s_t,a_t)}\max_{a^\prime\in\mathcal{A}}Q_t(s^\prime,a^\prime)$, $\omega(t) = r_t + \gamma\max_{a^\prime\in\mathcal{A}}Q_{t}(s_{t+1},a^\prime) - r(s_t,a_t) - \gamma \mathbb{E}_{s^\prime\sim P(\cdot|s_t,a_t)}\max_{a^\prime\in\mathcal{A}}Q_t(s^\prime,a^\prime)$.
Following the same way in \cite{Qu2020FiniteTimeAO} (Equation 7), we rewrite the update formula as follows:
\begin{align*}
x(t+1) = (I-\hat{\alpha}_tD_t)x(t) + \hat{\alpha}_t D_t F(x(t)) + \hat{\alpha}_t(\epsilon(t) + \phi(t)),
\end{align*}
where $\epsilon(t) = \left[ (e_{i_t}e_{i_t}^T - D_t)(F(x(t-\tau)) - x(t-\tau)) + \omega(t)e_{i_t} \right]$, and $e_i$ is the unit vector with its $i$-th entry 1 and others 0. Clearly, $x(t)$ is $P_t$ measurable, and as $\epsilon(t)$ depends on $\omega(t)$ which is $P_{t+1}$ measurable, $\epsilon(t)$ is $P_{t+1}$ measurable. Moreover, we have
\begin{equation}
\label{appeq:epsilon}
\mathbb{E}\epsilon(t)|P_{t-\tau} = \mathbb{E}[(e_{i_t}e_{i_t}^T-D_t)|P_{t-\tau}][F(x(t-\tau)) - x(t-\tau)]+\mathbb{E}[\mathbb{E}[\omega(t)|P_t]e_{i_t}|P_{t-\tau}] = 0,
\end{equation}
where $D_t = \mathbb{E}e_{i_t}e_{i_t}^T|P_{t-\tau}$, $\tau$ is a positive integer. Assume there exist $\tau$ and a $\sigma^\prime\in(0,1)$ such that for any $i\in\mathcal{N}, \mathcal{N}=\{1,2,\ldots,n\}$ and $t\ge\tau$, $P(i_t=i|P_{t-\tau})\ge \sigma^\prime= M\sigma$, i.e., exploration is sufficient. Such requirement can be satisfied if we take $\sigma = \frac{1}{2}\mu_{\rm min}$ and $\tau = \lceil \log_2 (\frac{2}{\mu_{\rm min}}) \rceil t_{\rm mix}$ where $\lceil \cdot \rceil$ denotes taking ceiling of the integer, e.g., $\lceil 2.7 \rceil = 3, \lceil 5.1\rceil = 6$. $\phi(t) = \left[ (e_{i_t}e_{i_t}^T - D_t)(F(x(t)) - F(x(t-\tau)) - (x(t) - x(t-\tau))) \right]$.
We expand it recursively and have:
\begin{align*}
x(t+1) = \tilde{B}_{\tau-1,t}x(\tau) + \sum_{k=\tau}^t B_{k,t}F(x(k)) + \sum_{k=\tau}^t \hat{\alpha}_t \tilde{B}_{k,t}(\epsilon(k) + \phi(k)),
\end{align*}
where $B_{k,t} = \hat{\alpha}_k D_k\prod_{l=k+1}^t(I-\hat{\alpha}_l D_l), \tilde{B}_{k,t} = \prod_{l=k+1}^t (I-\hat{\alpha}_l D_l)$. It is easy to notice that $B_{k,t}$ and $\tilde{B}_{k,t}$ are $n$-by-$n$ diagonal random metrics, with their $i$-th diagonal entry given by $b_{k,t,i} = \hat{\alpha}_t d_{k,i}\prod_{l=k+1}^t (1-\hat{\alpha}_l d_{l,i})$ and $\tilde{b}_{k,t,i} = \prod_{l=k+1}^t (1 - \hat{\alpha}_l d_{l,i})$. For any $i$, the following holds almost surely,
\begin{align*}
b_{k,t,i}\le \beta_{k,t} := \hat{\alpha}_k \prod_{l=k+1}^t (1-\hat{\alpha}_l M\sigma), \quad \tilde{b}_{k,t,i}\le\tilde{\beta}_{k,t} := \prod_{l=k+1}^t (1-\hat{\alpha}_l M\sigma).
\end{align*}
Based on Lemma 8 in \cite{Qu2020FiniteTimeAO}, denote $a_t = \|x(t) - x^*\|$, then we have almost surely,
\begin{align}
\label{appeq:recursiveform}
a_{t+1} \le \tilde{\beta}_{\tau-1,t}a_\tau + \gamma \sup_{i\in\mathcal{N}}\sum_{k=\tau}^t b_{k,t,i}a_k + \left\| \sum_{k=\tau}^t\hat{\alpha}_k \tilde{B}_{k,t}\epsilon(k) \right\| + \left\| \sum_{k=\tau}^t\hat{\alpha}_k \tilde{B}_{k,t}\phi(k) \right\|.
\end{align}
\begin{lemma}
\label{applemma:boundonepsilonphi}
The following bounds hold almost surely: (a) $\|\epsilon(t)\|\le \bar{\epsilon}:= \dfrac{4r_{\rm max}}{1-\gamma} + C$; (b) $\|\phi(t)\|\le \sum_{k = t-\tau+1}^t 2\bar{\epsilon}\hat{\alpha}_{k-1}$.
\end{lemma}
\begin{proof}
Replacing $\bar{x}$ with $\dfrac{r_{\rm max}}{1-\gamma}$, $\bar{\omega}$ with $\dfrac{2r_{\rm max}}{1-\gamma}$ and using $\underline{v}$ as 1 (since we use infinity norm), and replacing $\alpha_{k-1}$ with $\hat{\alpha}_{k-1}$ in Lemma 9 of \cite{Qu2020FiniteTimeAO} will induce the conclusion immediately.
\end{proof}
These are still not enough to bound $\|\sum_{k=\tau}^t\hat{\alpha}_k \tilde{B}_{k,t}\epsilon(k)\|$ and $\|\sum_{k=\tau}^t \hat{\alpha}_k\tilde{B}_{k,t}\phi(k)\|$. We provide in the following lemma some useful results of $\beta_{k,t}$ and $\tilde{\beta}_{k,t}$.
\begin{lemma}
\label{applemma:beta}
If $\alpha_t = \frac{h}{M(t+t_0)}$, where $h>\frac{2}{\sigma}$ and $t_0\ge \max(4h,\tau)$, then $\beta_{k,t}$ and $\tilde{\beta}_{k,t}$ satisfy the following relationships:
(a) $\beta_{k,t}\le \dfrac{h}{k+t_0}\left( \dfrac{k+1+t_0}{t+1+t_0} \right)^{\sigma h}$, $\tilde{\beta}_{k,t}\le \left(\dfrac{k+1+t_0}{t+1+t_0}\right)^{\sigma h}$; (b) $\sum_{k=1}^t \beta_{k,t}^2 \le \dfrac{2h}{\sigma}\dfrac{1}{t+1+t_0}$;
(c) $\sum_{k=\tau}^t \beta_{k,t}\sum_{l=k-\tau+1}^k\hat{\alpha}_{l-1}< \dfrac{8h\tau}{\sigma}\dfrac{1}{t+1+t_0}$.
\end{lemma}
\begin{proof}
For part (a), notice that $\log (1-x) \le -x, \forall \, x <1$, then
\begin{equation*}
(1-M\sigma \hat{\alpha}_t) \le (1-M\sigma \alpha_t) = e^{\log (1-\frac{\sigma h}{t+t_0})} \le e^{-\frac{\sigma h}{t+t_0}},
\end{equation*}
where we use $\hat{\alpha}_t \ge \alpha_t$ according to Lemma \ref{applemma:learningrate}. Therefore, we have
\begin{align*}
\prod_{l=k+1}^t (1-M\sigma \hat{\alpha}_l) &\le e^{-\sum_{l=k+1}^t \frac{\sigma h}{l+t_0} } \le e^{-\int_{k+1}^{t+1}\frac{\sigma h}{y+t_0}dy} = e^{-\sigma h \log(\frac{t+1+t_0}{k+1+t_0})} = \left( \dfrac{k+1+t_0}{t+1+t_0} \right)^{\sigma h}.
\end{align*}
This directly leads to the bound on $\tilde{\beta}_{k,t}$. We have $\beta_{k,t} = \hat{\alpha}_k \prod_{l=k+1}^t(1-\hat{\alpha}_l \sigma) \le M \dfrac{h}{M(k+t_0)} \left(\dfrac{k+1+t_0}{t+1+t_0}\right)^{\sigma h} = \dfrac{h}{k+t_0}\left( \dfrac{k+1+t_0}{t+1+t_0} \right)^{\sigma h}$, where we use the fact that $\hat{\alpha}_k \le M\alpha_k$ based on Lemma \ref{applemma:learningrate}.
For part (b), we have
\begin{align*}
\beta^2_{k,t} \le \dfrac{h^2}{(k+t_0)^2}\left( \dfrac{k+1+t_0}{t+1+t_0} \right)^{2\sigma h} = \dfrac{h^2}{(t+1+t_0)^{2\sigma h}} \dfrac{(k+1+t_0)^{2\sigma h}}{(k+t_0)^2} \le \dfrac{2h^2}{(t+1+t_0)^{2\sigma h}}(k+t_0)^{2\sigma h-2},
\end{align*}
where we have used $(k+1+t_0)^{2\sigma h}\le 2(k+t_0)^{2\sigma h}$, which is true when $t_0\ge 4h$. Then, we have
\begin{align*}
\sum_{k=1}^t \beta^2_{k,t} &\le \dfrac{2h^2}{(t+1+t_0)^{2\sigma h}}\sum_{k=1}^t (k+t_0)^{2\sigma h -2} \le \dfrac{2h^2}{(t+1+t_0)^{2\sigma h}} \int_{1}^{t+1} (y+t_0)^{2\sigma h -2}dy \\
& < \dfrac{2 h^2}{(t+1+t_0)^{2\sigma h}}\dfrac{1}{2\sigma h-1}(t+1+t_0)^{2\sigma h-1} < \dfrac{2h}{\sigma} \dfrac{1}{t+1+t_0},
\end{align*}
where we have used the fact that $2\sigma h-1> \sigma h$ (as $h> \frac{2}{\sigma}$).
For part (c), notice that for $k-\tau\le l\le k-1$ where $k\ge \tau$, we have $\alpha_l \le \dfrac{h}{M(k-\tau+t_0)}$. Since $k\ge \tau$ and $t_0 > \max(4h,\tau)$ (the assumption), then we have $k+t_0 > 2\tau$ which indicates that $kh - 2h\tau + ht_0 >0$, and thus $kh + ht_0 < 2kh - 2h\tau + 2ht_0$, which is to say $\dfrac{h}{k-\tau+t_0}< \dfrac{2h}{k+t_0}$. Therefore, we have $\alpha_l < \dfrac{2h}{M(k+t_0)}$. By using Lemma \ref{applemma:learningrate}, we have $\hat{\alpha}_l \le M\alpha_l < \dfrac{2h}{k+t_0}$.
Then, we have
\begin{align*}
\sum_{k=\tau}^t \beta_{k,t}\sum_{l=k-\tau+1}^k\hat{\alpha}_{l-1} &< \sum_{k=\tau}^t\beta_{k,t} \dfrac{2h\tau}{k+t_0} \le \sum_{k=\tau}^t\dfrac{h}{k+t_0}\left( \dfrac{k+1+t_0}{t+1+t_0} \right)^{\sigma h}\dfrac{2h\tau}{k+t_0} \le \sum_{k=\tau}^t\dfrac{4h^2\tau}{(t+1+t_0)^{\sigma h}}(k+t_0)^{\sigma h-2} \\
&\le \dfrac{4h^2\tau}{(t+1+t_0)^{\sigma h}}\int_{\tau}^{t+1}(y+t_0)^{\sigma h-2}dy \le \dfrac{4h^2\tau}{(t+1+t_0)^{\sigma h}}\dfrac{(t+1+t_0)^{\sigma h-1}}{\sigma h - 1} \\
&\le \dfrac{8h\tau}{\sigma} \dfrac{1}{t+1+t_0},
\end{align*}
where we have used $(k+1+t_0)^{\sigma h}\le 2(k+t_0)^{\sigma h}$ and $\sigma h - 1 > \frac{1}{2}\sigma h$.
\end{proof}
Now we are ready to bound $\|\sum_{k=\tau}^t \hat{\alpha}_k\tilde{B}_{k,t}\phi(k)\|$. It is easy to find that
\begin{align*}
\left\|\sum_{k=\tau}^t \hat{\alpha}_k\tilde{B}_{k,t}\phi(k)\right\| \le \sum_{k=\tau}^t \hat{\alpha}_k \|\tilde{B}_{k,t}\|\|\phi(k)\|\le \sum_{k=\tau}^t \beta_{k,t}\sum_{l=k-\tau+1}^k 2\bar{\epsilon} \hat{\alpha}_{l-1} < \dfrac{16\bar{\epsilon}h\tau}{\sigma(t+1+t_0)}:= C_\phi \dfrac{1}{t+1+t_0},
\end{align*}
where we have used the fact that each entry of $\tilde{B}_{k,t}$ is upper bounded by $\tilde{\beta}_{k,t}$, i.e., $\|\tilde{B}_{k,t}\|\le \tilde{\beta}_{k,t}$ and $\beta_{k,t} = \hat{\alpha}_k \tilde{\beta}_{k,t}$ by definition. We now move on to bound $\|\sum_{k=\tau}^t\hat{\alpha}_k \tilde{B}_{k,t}\epsilon(k)\|$. It is straightforward that we use Azuma Hoeffding inequality to show this, which is presented in the following lemma.
\begin{lemma}[Lemma 13 in \cite{Qu2020FiniteTimeAO}]
Let $X_t$ be a $P_t$-adapted stochastic process with $\mathbb{E}X_t|P_{t-\tau}=0$. Meanwhile, $|X_t|\le \bar{X}_t$ almost surely, then with probability at least $1-\delta$, we have $\left| \sum_{k=0}^t X_k \right|\le \sqrt{2\tau \sum_{k=0}^t \bar{X}_k^2\log(\frac{2\tau}{\delta})}$.
\end{lemma}
Recall that $\sum_{k=\tau}^t \hat{\alpha}_k\tilde{B}_{k,t}\epsilon(k)$ is a random vector with its $i$-th entry $\sum_{k=\tau}^t \hat{\alpha}_k\epsilon_i(k)\prod_{l=k+1}^t(1-\hat{\alpha}_l d_{l,i})$, $d_{l,i}\ge\sigma^\prime=M\sigma$. Fixing $i$, $\epsilon_i(k)$ is a $P_{k+1}$ adapted stochastic process satisfying $\mathbb{E}\epsilon_i(k)|P_{k-\tau}=0$ (see Equation \ref{appeq:epsilon}). However, $\prod_{l=k+1}^t(1-\hat{\alpha}_ld_{l,i})$ is not $P_{k-\tau}$ measurable. To erase the randomness in it, we introduce the following lemma.
\begin{lemma}[Adapted from Lemma 14 in \cite{Qu2020FiniteTimeAO}]
\label{applemma:azuma}
For each $i$, we have almost surely,
\begin{align*}
\left| \sum_{k=\tau}^t \hat{\alpha}_k\epsilon_i(k)\prod_{l=k+1}^t (1-\hat{\alpha}_l d_{l,i}) \right| \le \sup_{\tau\le k_0\le t}\left( \left| \sum_{k=k_0+1}^t \epsilon_i(k)\beta_{k,t} \right| + 2\bar{\epsilon}\beta_{k_0,t} \right).
\end{align*}
\end{lemma}
\begin{proof}
Replacing $\alpha_k$ with $\hat{\alpha}_k$ and setting $v_i=1$ (since we use standard infinity norm) in Lemma 14 of \cite{Qu2020FiniteTimeAO} conclude the proof.
\end{proof}
After that, we can proceed with the proof with the aid of the following lemma.
\begin{lemma}
\label{applemma:errorepsilon}
For each $t$, with probability at least $1-\delta$, we have
\begin{align*}
\left\|\sum_{k=\tau}^t \hat{\alpha}_k \tilde{B}_{k,t}\epsilon(k)\right\| \le 6\bar{\epsilon} \sqrt{\dfrac{(\tau+1)h}{\sigma(t+1+t_0)}\log\left( \dfrac{2(\tau+1)tn}{\delta} \right)}.
\end{align*}
\end{lemma}
\begin{proof}
Fix $i$ and $\tau\le k_0\le t$, we have $\epsilon_i(k)\beta_{k,t}$ is a $P_{k+1}$ adapted stochastic process satisfying $\mathbb{E}\epsilon_i(k)\beta_{k,t}|P_{k-\tau}=0$. We also have $|\epsilon_i(k)\beta_{k,t}|\le |\epsilon_i(k)|\beta_{k,t}\le \bar{\epsilon}\beta_{k,t}$ (by using Lemma \ref{applemma:boundonepsilonphi}). We then can use the Azuma-Hoeffding bound in Lemma \ref{applemma:azuma}. With probability at least $1-\delta$, we have
\begin{align*}
\left| \sum_{k=k_0+1}^t\epsilon_i(k)\beta_{k,t} \right| \le \bar{\epsilon}\sqrt{2(\tau+1)\sum_{k=k_0+1}^t\beta^2_{k,t}\log\left( \dfrac{2(\tau+1)}{\delta} \right)}.
\end{align*}
By a union bound on $\tau\le k_0\le t$, we have with probability at least $1-\delta$,
\begin{align*}
\sup_{\tau\le k_0\le t}\left| \sum_{k=k_0+1}^t \epsilon_i(k)\beta_{k,t} \right| \le \bar{\epsilon}\sqrt{2(\tau+1)\sum_{k=\tau+1}^t\beta^2_{k,t}\log\left( \dfrac{2(\tau+1)t}{\delta} \right)}.
\end{align*}
Notice that $\sigma h>2$ and hence $\dfrac{(k_0 + 1+t_0)^{\sigma h}}{k_0+t_0}$ monotonically increases with $k_0$. Therefore, we have $\dfrac{(k_0 + 1+t_0)^{\sigma h}}{k_0+t_0} \le \dfrac{(t + 1+t_0)^{\sigma h}}{t+t_0},\forall\, \tau\le k_0\le t$. Here, we assume that $h> \dfrac{2}{\sigma(1-\gamma)}$ (again, we set $\sigma=\dfrac{1}{2}\mu_{\rm min}$) which obviously satisfies the assumption that $h>\dfrac{2}{\sigma}$ we make in Lemma \ref{applemma:beta}.
Then, by using Lemma \ref{applemma:azuma} and Lemma \ref{applemma:beta}, we have with probability at least $1-\delta$,
\begin{align*}
\left| \sum_{k=\tau}^t \hat{\alpha}_k\epsilon_i(k)\prod_{l=k+1}^t(1-\hat{\alpha}_ld_{l,i}) \right| &\le \sup_{\tau\le k_0 \le t}\left( \left| \sum_{k=k_0+1}^t \epsilon_i(k) \beta_{k,t} \right| + 2\bar{\epsilon}\beta_{k_0,t} \right) \\
&\le \bar{\epsilon}\sqrt{2(\tau+1)\sum_{k=\tau+1}^t\beta^2_{k,t}\log\left( \dfrac{2(\tau+1)t}{\delta} \right)} + \sup_{\tau\le k_0\le t}2\bar{\epsilon}\beta_{k_0,t} \\
&\le 2\bar{\epsilon} \sqrt{\dfrac{(\tau+1)h}{\sigma (t+1+t_0)}\log\left( \dfrac{2(\tau+1)t}{\delta} \right)} + 2\bar{\epsilon} \sup_{\tau\le k_0 \le t} \dfrac{h}{k_0+t_0} \left( \dfrac{k_0+1+t_0}{t+1+t_0} \right)^{\sigma h} \\
&\le 2\bar{\epsilon}\sqrt{\dfrac{(\tau+1)h}{\sigma(t+1+t_0)}\log\left(\dfrac{2(\tau+1)t}{\delta}\right)} + 2\bar{\epsilon}\dfrac{h}{t+t_0} \\
&\le 6\bar{\epsilon}\sqrt{\dfrac{(\tau+1)h}{\sigma(t+1+t_0)}\log\left(\dfrac{2(\tau+1)t}{\delta}\right)}.
\end{align*}
The last inequality is due to that $\dfrac{1}{t+t_0}$ is asymptotically smaller than $\sqrt{\dfrac{1}{t+1+t_0}}$. Finally, by using the union bound over $i\in\{1,2,\ldots,n\}$, we have
\begin{align*}
\left| \sum_{k=\tau}^t \hat{\alpha}_k\epsilon_i(k)\prod_{l=k+1}^t(1-\hat{\alpha}_ld_{l,i}) \right| &\le 6\bar{\epsilon}\sqrt{\dfrac{(\tau+1)h}{\sigma(t+1+t_0)}\log\left(\dfrac{2(\tau+1)tn}{\delta}\right)}.
\end{align*}
\end{proof}
By replacing $\delta$ with $\frac{\delta}{t}$, we can rewrite the conclusion in Lemma \ref{applemma:errorepsilon} as:
\begin{align*}
\left\|\sum_{k=\tau}^t \hat{\alpha}_k \tilde{B}_{k,t}\epsilon(k)\right\| \le 6\bar{\epsilon} \sqrt{\dfrac{(\tau+1)h}{\sigma(t+1+t_0)}\log\left( \dfrac{2(\tau+1)t^2n}{\delta} \right)} := C_\epsilon \sqrt{\dfrac{1}{t+1+t_0}},
\end{align*}
where $C_\epsilon = 6\bar{\epsilon}\sqrt{\frac{(\tau+1)h}{\sigma}\log\left( \frac{2(\tau+1)t^2n}{\delta} \right)}$, then by recalling Equation \ref{appeq:recursiveform}, we have for $\tau\le t\le T$, with probability at least $1-\delta$,
\begin{align*}
a_{t+1} &\le \tilde{\beta}_{\tau-1,t}a_\tau + \gamma \sup_{i\in\mathcal{N}}\sum_{k=\tau}^t b_{k,t,i}a_k + \left\| \sum_{k=\tau}^t\hat{\alpha}_k \tilde{B}_{k,t}\epsilon(k) \right\| + \left\| \sum_{k=\tau}^t\hat{\alpha}_k \tilde{B}_{k,t}\phi(k) \right\| \\
&\le \tilde{\beta}_{\tau-1,t}a_\tau + \gamma \sup_{i\in\mathcal{N}}\sum_{k=\tau}^t b_{k,t,i}a_k + \dfrac{C_\epsilon}{\sqrt{t+1+t_0}} + \dfrac{C_\phi}{t+1+t_0}.
\end{align*}
We now want to show that
\begin{align}
\label{appeq:finalconclusion}
a_T \le \frac{C_a}{\sqrt{T+t_0}} + \frac{C_a^\prime}{T+t_0},
\end{align}
where $C_a = \frac{12\bar{\epsilon}}{1-\gamma}\sqrt{\frac{(\tau+1)h}{\sigma}\log\left(\frac{2(\tau+1)T^2n}{\delta}\right)}$, $C_a^\prime = \frac{4}{1-\gamma}\max(C_\phi, \frac{2(\tau+t_0)r_{\rm max}}{1-\gamma})$. We use induction to show Equation \ref{appeq:finalconclusion}. It is easy to see that when $t = \tau$, Equation \ref{appeq:finalconclusion} holds as $\frac{C_a^\prime}{\tau+t_0} \ge \frac{8r_{\rm max}}{(1-\gamma)^2}\ge a_\tau$, where $a_\tau = \|x(\tau) - x^*\|\le \|x(\tau)\| + \|x^*\|\le \frac{2r_{\rm max}}{1-\gamma}$. We then assume that Equation \ref{appeq:finalconclusion} holds for up to $k\le t$, then we have
\begin{align*}
a_{t+1} &\le \tilde{\beta}_{\tau-1,t}a_\tau + \gamma \sup_{i\in\mathcal{N}}\sum_{k=\tau}^t b_{k,t,i}a_k + \dfrac{C_\epsilon}{\sqrt{t+1+t_0}} + \dfrac{C_\phi}{t+1+t_0} \\
&\le \tilde{\beta}_{\tau-1,t}a_\tau + \gamma \sup_{i\in\mathcal{N}}\sum_{k=\tau}^t b_{k,t,i}\left( \dfrac{C_a}{\sqrt{k+t_0}} + \dfrac{C_a^\prime}{k+t_0} \right) + \dfrac{C_\epsilon}{\sqrt{t+1+t_0}} + \dfrac{C_\phi}{t+1+t_0} \\
&= \tilde{\beta}_{\tau-1,t}a_\tau + \gamma \sup_{i\in\mathcal{N}}\sum_{k=\tau}^t b_{k,t,i}\dfrac{C_a}{\sqrt{k+t_0}} + \gamma \sup_{i\in\mathcal{N}}\sum_{k=\tau}^t b_{k,t,i}\dfrac{C_a^\prime}{k+t_0}+ \dfrac{C_\epsilon}{\sqrt{t+1+t_0}} + \dfrac{C_\phi}{t+1+t_0} \\
&\le \left( \dfrac{\tau+t_0}{t+1+t_0} \right)^{\sigma h}a_\tau + \dfrac{C_\phi}{t+1+t_0} + \gamma \sup_{i\in\mathcal{N}}\sum_{k=\tau}^t b_{k,t,i}\dfrac{C_a^\prime}{k+t_0} + \gamma \sup_{i\in\mathcal{N}}\sum_{k=\tau}^t b_{k,t,i}\dfrac{C_a}{\sqrt{k+t_0}} + \dfrac{C_\epsilon}{\sqrt{t+1+t_0}}
\end{align*}
where we use the bound for $\tilde{\beta}_{k,t}$ in Lemma \ref{applemma:beta}. To finish the final step of the proof, we need the aid of the following lemma.
\begin{lemma}[Adapted from Lemma 15 in \cite{Qu2020FiniteTimeAO}]
\label{applemma:sqrtgamma}
If $\sigma h(1-\sqrt{\gamma})\ge 1$, $t_0\ge 1$ and $\alpha_0 \le \frac{1}{2M}$. Then for any $i\in\mathcal{N}=\{1,2,\ldots,n\}$ and any $\omega\in(0,1]$, we have,
\begin{align*}
\sum_{k=\tau}^t b_{k,t,i}\dfrac{1}{(k+t_0)^\omega} \le \dfrac{1}{\sqrt{\gamma}(t+1+t_0)^\omega}.
\end{align*}
\end{lemma}
\begin{proof}
Denote $e_t = \sum_{k=\tau}^t b_{k,t,i}\dfrac{1}{(k+t_0)^\omega}$. We use induction to show that $e_t\le \dfrac{1}{\sqrt{\gamma}(t+1+t_0)^\omega}$. The conclusion is true for $t = \tau$ as $\hat{\alpha}_\tau \le M\alpha_\tau\le \dfrac{1}{2}$, then $e_\tau = b_{\tau,\tau,i}\dfrac{1}{(\tau+t_0)^\omega} = \hat{\alpha}_\tau d_{\tau,i}\dfrac{1}{(\tau+t_0)^\omega} \le \dfrac{1}{\sqrt{\gamma}(\tau+1+t_0)^\omega}$ due to $\left(1+\dfrac{1}{t_0}\right)^\omega \le 1 + \dfrac{1}{t_0} \le 2 \le \dfrac{2}{\sqrt{\gamma}}$, $t_0\ge1, \omega\le 1$. Then we assume the statement is true for $t-1$, then we have
\begin{align*}
e_t &= \sum_{k=\tau}^{t-1}b_{k,t,i}\dfrac{1}{(k+t_0)^\omega} + b_{t,t,i}\dfrac{1}{(t+t_0)^\omega} = (1-\hat{\alpha}_td_{t,i})\sum_{k=\tau}^{t-1}b_{k,t-1,i}\dfrac{1}{(k+t_0)^\omega} + \hat{\alpha}_td_{t,i}\dfrac{1}{(t+t_0)^\omega} \\
&= (1-\hat{\alpha}_td_{t,i})e_{t-1} + \hat{\alpha}_td_{t,i}\dfrac{1}{(t+t_0)^\omega} \le (1-\hat{\alpha}_td_{t,i}) \dfrac{1}{\sqrt{\gamma}(t+t_0)^\omega} + \hat{\alpha}_td_{t,i}\dfrac{1}{(t+t_0)^\omega} \\
&= \left[ 1-\hat{\alpha}_t d_{t,i}(1-\sqrt{\gamma}) \right] \dfrac{1}{\sqrt{\gamma}(t+t_0)^\omega} \le \left[ 1-\alpha_t M\sigma(1-\sqrt{\gamma}) \right] \dfrac{1}{\sqrt{\gamma}(t+t_0)^\omega} = \left[ 1-\dfrac{h}{t+t_0} \sigma(1-\sqrt{\gamma}) \right] \dfrac{1}{\sqrt{\gamma}(t+t_0)^\omega} \\
&= \left[ 1-\dfrac{\sigma h}{t+t_0} (1-\sqrt{\gamma}) \right]\left(\dfrac{t+1+t_0}{t+t_0}\right)^\omega \dfrac{1}{\sqrt{\gamma}(t+1+t_0)^\omega} = \left[ 1-\dfrac{\sigma h}{t+t_0} (1-\sqrt{\gamma}) \right]\left(1 + \dfrac{1}{t+t_0}\right)^\omega \dfrac{1}{\sqrt{\gamma}(t+1+t_0)^\omega},
\end{align*}
where we have used the fact that $\hat{\alpha}_k \ge \alpha_k$ and $d_{t,i}\ge M \sigma$. Using the fact that for any $x>-1$, $(1+x)\le e^x$, we have,
\begin{align*}
\left[ 1-\dfrac{\sigma h}{t+t_0} (1-\sqrt{\gamma}) \right]\left(1 + \dfrac{1}{t+t_0}\right)^\omega \le e^{-\frac{\sigma h}{t+t_0}(1-\sqrt{\gamma}) + \omega\frac{1}{t+t_0}} \le 1,
\end{align*}
where we have used $\omega \le 1$ and $\sigma h(1-\sqrt{\gamma})\ge 1$, therefore $\omega - \sigma h(1-\sqrt{\gamma})\le 0$. Thus, we have
\begin{align*}
e_t \le \left[ 1-\dfrac{\sigma h}{t+t_0} (1-\sqrt{\gamma}) \right]\left(1 + \dfrac{1}{t+t_0}\right)^\omega \dfrac{1}{\sqrt{\gamma}(t+1+t_0)^\omega} \le \dfrac{1}{\sqrt{\gamma}(t+1+t_0)^\omega}.
\end{align*}
This finishes the induction, and concludes the proof of this lemma.
\end{proof}
By using Lemma \ref{applemma:sqrtgamma} and setting $\omega = 1, \frac{1}{2}$, respectively, we have
\begin{align*}
a_{t+1} &\le \left( \dfrac{\tau+t_0}{t+1+t_0} \right)^{\sigma h}a_\tau + \dfrac{C_\phi}{t+1+t_0} + \gamma \sup_{i\in\mathcal{N}}\sum_{k=\tau}^t b_{k,t,i}\dfrac{C_a^\prime}{k+t_0} + \gamma \sup_{i\in\mathcal{N}}\sum_{k=\tau}^t b_{k,t,i}\dfrac{C_a}{\sqrt{k+t_0}} + \dfrac{C_\epsilon}{\sqrt{t+1+t_0}} \\
&\le \left( \dfrac{\tau+t_0}{t+1+t_0} \right)^{\sigma h}a_\tau + \dfrac{C_\phi}{t+1+t_0} + \sqrt{\gamma} \dfrac{C_a^\prime}{t+1+t_0} + \sqrt{\gamma} \dfrac{C_a}{\sqrt{t+1+t_0}} + \dfrac{C_\epsilon}{\sqrt{t+1+t_0}}.
\end{align*}
Denote $F_t = \sqrt{\gamma}\dfrac{C_a}{\sqrt{t+1+t_0}} + \dfrac{C_\epsilon}{\sqrt{t+1+t_0}}$ and $F_t^\prime = \sqrt{\gamma}\dfrac{C_a^\prime}{t+1+t_0} + \dfrac{C_\phi}{t+1+t_0}+\left( \dfrac{\tau+t_0}{t+1+t_0} \right)^{\sigma h}a_\tau$, then we have $a_{t+1}\le F_t+F_t^\prime$. It suffices to show $F_t\le \dfrac{C_a}{\sqrt{t+1+t_0}}, F_t^\prime\le\dfrac{C_a^\prime}{t+1+t_0}$.
Notice that $\dfrac{C_\epsilon}{C_a} = \dfrac{6\bar{\epsilon}\sqrt{\frac{(\tau+1)h}{\sigma}\log\left( \frac{2(\tau+1)t^2n}{\delta} \right)}}{\frac{12\bar{\epsilon}}{1-\gamma}\sqrt{\frac{(\tau+1)h}{\sigma}\log\left(\frac{2(\tau+1)T^2n}{\delta}\right)}}\le \dfrac{1-\gamma}{2}\le 1-\sqrt{\gamma}$. The last inequality is a direct result of the fact that $(\sqrt{\gamma}-1)^2\ge 0$. Thus $F_t\le \dfrac{C_a}{\sqrt{t+1+t_0}}$.
We also notice that $\dfrac{a_\tau(\tau+t_0)}{C_a^\prime} \le \dfrac{2r_{\rm max}}{1-\gamma} \dfrac{\tau+t_0}{C_a^\prime}\le\dfrac{1-\gamma}{4}\le \dfrac{1-\sqrt{\gamma}}{2}$. Furthermore, we have $\dfrac{C_\phi}{C_a^\prime} \le \dfrac{1-\gamma}{4} \le\dfrac{1-\sqrt{\gamma}}{2}$. Then, we have
\begin{align*}
F_t^\prime &= \sqrt{\gamma}\dfrac{C_a^\prime}{t+1+t_0} + \dfrac{C_\phi}{t+1+t_0}+\left( \dfrac{\tau+t_0}{t+1+t_0} \right)^{\sigma h}a_\tau \le \sqrt{\gamma}\dfrac{C_a^\prime}{t+1+t_0} + \dfrac{C_\phi}{t+1+t_0}+\dfrac{a_\tau(\tau+t_0)}{t+1+t_0} \\
&\le \sqrt{\gamma}\dfrac{C_a^\prime}{t+1+t_0} + \dfrac{1-\sqrt{\gamma}}{2}\dfrac{C_a^\prime}{t+1+t_0} + \dfrac{1-\sqrt{\gamma}}{2}\dfrac{C_a^\prime}{t+1+t_0} = \dfrac{C_a^\prime}{t+1+t_0}
\end{align*}
This finishes the induction, and we have $a_T \le \frac{C_a}{\sqrt{T+t_0}} + \frac{C_a^\prime}{T+t_0}$, where $C_a = \frac{12\bar{\epsilon}}{1-\gamma}\sqrt{\frac{(\tau+1)h}{\sigma}\log\left(\frac{2(\tau+1)T^2n}{\delta}\right)}$, $C_a^\prime = \frac{4}{1-\gamma}\max(C_\phi, \frac{2(\tau+t_0)r_{\rm max}}{1-\gamma}), C_\phi = \frac{16\bar{\epsilon}h\tau}{\sigma}$. Based on Lemma \ref{applemma:boundonepsilonphi}, we have $\bar{\epsilon}:= \dfrac{4r_{\rm max}}{1-\gamma} + C$ where $C\le (1+\gamma)\|x^*\|\le 2\dfrac{r_{\rm max}}{1-\gamma}$. Therefore, we have $\bar{\epsilon}\le \dfrac{6r_{\rm max}}{1-\gamma}$. Taken together with $\tau = \lceil \log_2(\frac{2}{\mu_{\rm min}}) \rceil t_{\rm mix}$ and $\sigma = \dfrac{\mu_{\rm min}}{2}$, we have with probability at least $1-\delta$,
\begin{align*}
\|\hat{Q}_T - Q^*\| &\le \dfrac{72r_{\rm max}}{(1-\gamma)^2} \sqrt{\dfrac{2(\lceil \log_2(\frac{2}{\mu_{\rm min}}) \rceil t_{\rm mix} + 1)h}{\mu_{\rm min} (T+t_0)} \log\left( \dfrac{2(\lceil \log_2(\frac{2}{\mu_{\rm min}}) \rceil t_{\rm mix} + 1)T^2|\mathcal{S}||\mathcal{A}|}{\delta} \right)} \\
& + \dfrac{4r_{\rm max}}{(1-\gamma)^2} \max \left( \dfrac{192 h \lceil \log_2(\frac{2}{\mu_{\rm min}}) \rceil t_{\rm mix}}{\mu_{\rm min}}, 2\left( \lceil \log_2(\frac{2}{\mu_{\rm min}}) \rceil t_{\rm mix} + t_0\right) \right)\dfrac{1}{T+t_0} \\
&\simeq \tilde{\mathcal{O}}\left( \dfrac{r_{\rm max}\sqrt{t_{\rm mix}}}{(1-\gamma)^{2.5}\mu_{\rm min}}\dfrac{1}{\sqrt{T}} + \dfrac{ r_{\rm max}t_{\rm mix}}{(1-\gamma)^3\mu_{\rm min}^2}\dfrac{1}{T} \right).
\end{align*}
The above inequality holds when we take $h=\Theta(\frac{1}{\mu_{\rm min}(1-\gamma)}), t_0 = \tilde{\Theta}(\max(\frac{1}{\mu_{\rm min}(1-\gamma)}, t_{\rm mix}))$. The whole proof is thus completed.
\end{proof}
\subsection{Proof of Corollary \ref{coro:samplecomplexity}}
\begin{corollary}[Sample complexity]
For any $0<\delta<1$ and $0<\epsilon<1$, with Q-SMR algorithm we have:
\begin{equation}
\forall (s,a)\in\mathcal{S}\times\mathcal{A}: \|\hat{Q}_T - Q^*\|_\infty \le \epsilon,
\end{equation}
holds with probability at least $1-\delta$, provided the iteration number $T$ obeys:
\begin{equation}
T\stackrel{>}{\sim}\dfrac{r_{\rm max}^2 t_{\rm mix}}{(1-\gamma)^5\mu_{\rm min}^2}\dfrac{1}{\epsilon^2}.
\end{equation}
\end{corollary}
\begin{proof}
The proof is quite straightforward. Since $\|\hat{Q}_T - Q^*\| \le \tilde{\mathcal{O}}\left( \dfrac{r_{\rm max}\sqrt{t_{\rm mix}}}{(1-\gamma)^{2.5}\mu_{\rm min}}\dfrac{1}{\sqrt{T}} + \dfrac{r_{\rm max}t_{\rm mix}}{(1-\gamma)^3\mu_{\rm min}^2}\dfrac{1}{T} \right)$. Reaching an accuracy of $\epsilon$ means that $\dfrac{r_{\rm max}\sqrt{t_{\rm mix}}}{(1-\gamma)^{2.5}\mu_{\rm min}}\dfrac{1}{\sqrt{T}} + \dfrac{ r_{\rm max}t_{\rm mix}}{(1-\gamma)^3\mu_{\rm min}^2}\dfrac{1}{T} \le \epsilon$. With the scale of $\dfrac{1}{\sqrt{T}}$ and $\dfrac{1}{T}$, $\dfrac{r_{\rm max}\sqrt{t_{\rm mix}}}{(1-\gamma)^{2.5}\mu_{\rm min}}\dfrac{1}{\sqrt{T}}\le \epsilon$ is sufficient, which leads to $T\ge \dfrac{r_{\rm max}^2 t_{\rm mix}}{(1-\gamma)^5\mu_{\rm min}^2}\dfrac{1}{\epsilon^2}$.
\end{proof}
\section{Missing Experimental Results and Details}
\label{sec:missingexperiments}
In this section, we provide some missing experimental results and details. We first demonstrate the experimental results of TD3-SMR and DDPG-SMR, and we also show how reducing SMR ratio and increasing batch size will affect them. We then list the full results of SAC-SMR on DMC suite \cite{Tassa2018DeepMindCS} and PyBullet-Gym \cite{benelot2018}, including state-based tasks and image-based tasks. Furthermore, we show that SMR can boost the sample efficiency of the base algorithm with longer online interactions (1M online interactions). We also conduct experiments on Arcade Learning Environment (Atari) where we combine SMR with DQN \cite{Mnih2015HumanlevelCT}. Finally, we show that SMR can improve sample efficiency regardless of the initial learning rate.
\subsection{Performance of TD3-SMR and DDPG-SMR}
\label{appsec:td3smrddpgsmr}
We summarize the full performance comparison of TD3-SMR against the vanilla TD3 as well as DDPG-SMR versus DDPG (here we use our DDPG from \cite{Fujimoto2018AddressingFA}) on four continuous control tasks from OpenAI Gym \cite{Brockman2016OpenAIG} in Figure \ref{fig:td3-smr-ddpg-smr}. We use $M=10$ by default. We notice that the sample efficiency of both TD3 and DDPG benefit greatly from SMR on many of the evaluated tasks. While we do observe a sort of performance instability on Ant-v2 for TD3-SMR, and find that DDPG-SMR underperforms the vanilla DDPG. For TD3, this may be because the neural networks encounter the phenomenon of overfitting in this environment. While for DDPG, this may be due to the fact that \textit{SMR does not modify the way of value estimation}, indicating that the phenomenon of overestimation still exists in DDPG-SMR. The overestimation bias can be accumulated during the sample reuse loop on Ant-v2, resulting in poor performance. On other tasks, we find that SMR consistently aids the sample efficiency of the base algorithm for both TD3 and DDPG, often by a large margin. As mentioned in Section \ref{sec:overfitting}, the ways of remedying the overfitting phenomenon can be (1) using smaller $M$, e.g., $M=5$; (2) resetting the agent periodically; (3) leveraging a larger batch size; etc. We show below the effectiveness of part of them, including using a smaller SMR ratio and using a larger batch size.
\begin{figure}
\caption{Experimental results of TD3-SMR against TD3 and DDPG-SMR against DDPG. The results are averaged over 6 random seeds, and the shaded region denotes the standard deviation.}
\label{fig:td3-smr-ddpg-smr}
\end{figure}
\begin{figure}
\caption{Performance comparison of TD3-SMR and DDPG-SMR against their base algorithms on Ant-v2. (a) TD3-SMR with different batch sizes where we sweep over $\{256,300,400\}
\label{fig:td3batchsize}
\label{fig:td3ratio}
\label{fig:ddpgbatchsize}
\label{fig:ddpgratio}
\label{fig:alleviatingoverfitting}
\end{figure}
We summarize the empirical results in Figure \ref{fig:alleviatingoverfitting}, where we run for 300K online interactions and evaluate the agent every 1000 timesteps over 10 trials. We find that using a smaller SMR ratio or using a larger batch size is beneficial to the stability and satisfying performance for TD3-SMR as shown in Figure \ref{fig:td3batchsize} and \ref{fig:td3ratio}. However, it can be seen that DDPG-SMR does not seem to benefit from either a smaller SMR ratio $M$ (even $M=2$) or a larger batch size. Using a smaller SMR ratio or larger batch size can help improve DDPG-SMR with $M=10$ to some extent. While they still underperform vanilla DDPG. This is due to \textit{SMR only enforces more updates on the fixed batch data instead of dealing with overestimation bias}. As shown in Equation \ref{eq:smrlearningrate}, SMR tends to \textit{smooth the gradient for updating} by leveraging the gradient of intermediate values. On tasks like \texttt{HalfCheetah-v2} and \texttt{Walker2d-v2}, SMR can benefit DDPG by better exploiting collected data, while on some tasks like \texttt{Ant-v2}, DDPG-SMR does not seem to be able to escape from the curse of overestimation bias. We, therefore, recommend the application of SMR upon off-policy continuous control algorithms that can address the overestimation bias themselves, e.g., TD3 \cite{Fujimoto2018AddressingFA} by using clipped double Q-learning; TQC \cite{Kuznetsov2020ControllingOB} by truncating small proportion of estimated $Q$ distribution, etc.
\subsection{Omitted results from DMC suite and PyBullet-Gym}
We demonstrate in this subsection the missing experimental results of SAC-SMR on DMC suite \cite{Tassa2018DeepMindCS} and PyBullet-Gym \cite{benelot2018}. The performance comparison of SAC-SMR and SAC is available in Figure \ref{fig:missingstatebasedtasks}. As expected, we observe that SAC-SMR outperforms the vanilla SAC on all of the evaluated state-based tasks. These further show that SMR can benefit the sample efficiency of the base algorithm on a wide range of different tasks.
\begin{figure}
\caption{Experimental results of SAC-SMR against SAC on state-based tasks from DMC suite and PyBullet-Gym. The results are averaged over 6 random seeds with 500K online interactions, and the shaded region denotes the standard deviation.}
\label{fig:missingstatebasedtasks}
\end{figure}
We further demonstrate in Figure \ref{fig:dmc-imagesacsmr} the experimental results on 4 additional image-based tasks from DMC suite. Note that we run experiments on DMC suite 100K benchmarks for SAC-SMR. For image-based tasks, we use a comparatively small SMR ratio $M=5$ as it will be very time-consuming to adopt a $M=10$ (image-based tasks already take much longer time to run than state-based tasks). For example, it takes about 4 hours to run with SAC on \texttt{reacher-easy} while it takes about 15 hours to run with SAC-SMR ($M=5$) on this task. If we adopt $M=10$, it will take more than 24 hours. We observe that on some of the image-based tasks, our SMR can boost the sample efficiency of SAC, e.g., SAC-SMR learns faster than vanilla SAC on \texttt{cheetah-run} and beats SAC on \texttt{cup-catch}. While we also see that SAC-SMR kind of underperforming SAC on \texttt{cartpole-swingup} and \texttt{reacher-easy}. In fact, we do not see a large margin on image-based tasks as on the state-based tasks. We attribute the reason to \textit{bad representation}. We usually leverage an encoder to deal with image input, where we do representation learning to reduce the size of the input. However, the parameters of the encoder are also continually updated during the training process. The error in representations accumulates and may impede the agent from learning better policy. For some of the tasks, SMR can benefit the agent, while on some tasks, things are different. SMR can benefit the state-based tasks as the states are \textit{precise representation} of the information that the agent needs to perform control. This phenomenon also exists on Atari tasks, one can refer to Appendix \ref{appsec:atari} for more details. Furthermore, as mentioned in \cite{yarats2022mastering}, the automatic entropy adjustment strategy in SAC is inadequate and in some cases may result in a premature \textit{entropy collapse}. This will in turn prevent the agent from finding more optimal behaviors. SMR can somewhat worsen this phenomenon due to multiple updates on the sampled batch. These we believe can explain the failure of SAC-SMR on some of the image-based tasks.
\begin{figure}
\caption{Experimental results on 4 image-based tasks from DMC suite 100K benchmarks. The results are averaged over 6 different random seeds, and the shaded region denotes the standard deviation.}
\label{fig:dmc-imagesacsmr}
\end{figure}
One interesting question is: whether more advanced methods for image-based tasks can benefit from SMR? To answer this question, we select the most recent DrQ-v2 \cite{yarats2022mastering} and combine it with our SMR. DrQ-v2 is built upon DrQ \cite{yarats2021image}, an actor-critic approach that uses data augmentation to learn directly from pixels. The improvements of DrQ-v2 over DrQ include: (1) switch SAC to DDPG (to avoid entropy collapse); (2) incorporate $n$-step returns to estimate temporal difference (TD); (3) introduce a decaying schedule for exploration noise error; (4) improve running speed; (5) find better hyperparameters. We adopt the SMR ratio $M=5$ as we do for image-based tasks in SAC-SMR. We demonstrate in Figure \ref{fig:drqv2} that DrQ-v2-SMR can outperform DrQ-v2 on most of the evaluated tasks (e.g., \texttt{cup-catch}, \texttt{cartpole-swingup}) and is competitive to DrQ-v2 on other tasks (e.g., \texttt{cheetah-run}). We also compare the final performance of DrQ-v2 and DrQ-v2-SMR at 500K frames in Table \ref{tab:drqv2performance}, where we unsurprisingly find the advantage of DrQ-v2-SMR over DrQ-v2. The success of SMR upon DrQ-v2 may due to (1) no entropy collapse and better exploration mechanism; (2) data augmentation to help alleviate the negative influence of initial bad representation.
Note that DrQ-v2-SMR spends 3 times of training time than DrQ-v2. Thanks to the fast running speed of DrQ-v2, this cost is comparatively tolerable. For example, DrQ-v2 requires 7 hours on \texttt{finger-spin} while DrQ-v2-SMR takes 20 hours.
\begin{figure}
\caption{Experimental results of DrQ-v2-SMR against vanilla DrQ-v2 on six image-based tasks from DMC suite. Each algorithm is run for 500K frames and evaluated over 10 trials every 1000 frames. The results are averaged over 6 different random seeds. We report the mean performance and the standard deviation.}
\label{fig:drqv2}
\end{figure}
\begin{table}
\caption{Performance comparison of DrQ-v2 and DrQ-v2-SMR on six image-based tasks from DMC suite. The numbers indicate the performance achieved when the specific number of frames is seen. We \textbf{bold} the best mean results.}
\renewcommand\arraystretch{1.05}
\label{tab:drqv2performance}
\centering
\small
\begin{tabular}{l|ll}
\toprule
Tasks & DrQ-v2 & DrQ-v2-SMR \\
\midrule
cartpole-swingup@500K & 682.7$\pm$304.8 & \textbf{842.5}$\pm$25.2 \\
cheetah-run@500K & 605.5$\pm$12.3 & \textbf{626.5}$\pm$17.8 \\
cup-catch@500K & 965.6$\pm$5.9 & \textbf{970.2}$\pm$5.1 \\
finger-spin@500K & 867.7$\pm$55.0 & \textbf{872.0}$\pm$39.3 \\
reacher-easy@500K & \textbf{736.8}$\pm$185.5 & 736.2$\pm$182.1 \\
walker-walk@500K & 869.5$\pm$102.6 & \textbf{949.3}$\pm$8.9 \\
\bottomrule
\end{tabular}
\end{table}
\subsection{Can SMR still work with longer online interactions?}
\label{sec:smrlonger}
In the main text and the appendix above, we run most of the experiments with only 300K online interactions or 500K online interactions (100K for SAC-SMR on image-based tasks from the DMC suite). Though 300K or 500K (or even fewer) online interactions are widely adopted for examining sample efficiency in model-based methods \cite{Janner2019WhenTT, Pan2020TrustTM, Lai2020BidirectionalMP, wu2022plan} and REDQ \cite{Chen2021RandomizedED}, one may wonder whether our method can consistently improve sample efficiency with longer online interactions. To address this concern, we run SAC-SMR ($M=10$) on 16 tasks from the DMC suite for a typical 1M online interactions. Each algorithm is evaluated every 1000 timesteps over 10 trials. We summarize the empirical results in Figure \ref{fig:sacsmr1m} where SAC-SMR significantly outperforms vanilla SAC on all of the evaluated tasks by a remarkable margin. SAC-SMR can converge faster and learn faster with longer online interactions.
\begin{figure}
\caption{Experimental results of SAC-SMR against SAC on 16 tasks from DMC suite. All methods are run for 1M online interactions, The results are averaged over 6 different random seeds and the shaded region represents the standard deviation.}
\label{fig:sacsmr1m}
\end{figure}
Furthermore, we run TQC-SMR and TQC for 1M online interactions on 4 OpenAI Gym environments to show the generality of the above conclusion. We summarize the empirical results in Figure \ref{fig:tqcsmr1m}. It can be seen that SMR consistently improves the sample efficiency of TQC with longer interactions, often surpassing the base algorithm by a large margin. We believe the evidence above are enough to show that SMR does aid sample efficiency with longer interactions.
\begin{figure}
\caption{Experimental results of TQC-SMR against TQC on 4 tasks from OpenAI Gym. All methods are run for 1M online interactions, The results are averaged over 6 different random seeds and the shaded region represents the standard deviation.}
\label{fig:tqcsmr1m}
\end{figure}
The concern on whether SMR aids sample efficiency with longer interactions is strongly correlated with the concern on the asymptotic performance of SMR. One can find in Figure \ref{fig:sacsmr1m} and \ref{fig:tqcsmr1m} that the asymptotic performance of SMR upon different baseline algorithms are actually quite good. For example, on many tasks like \textit{finger-turn-hard}, \textit{reacher-hard}, SAC-SMR converges very fast and achieves the highest possible return on them. Meanwhile, as we emphasize in the main text, we do not mean that the users have to always use a large SMR ratio if one worries about overfitting. SMR can serve as a quite good warm-up strategy, i.e., utilizing SMR (with SMR ratio $M=10$) for some initial interactions (e.g., 300K) and then resume vanilla training process (with SMR ratio $M=1$). In this way, one can enjoy both good sample efficiency from SMR and good asymptotic performance from the vanilla algorithm.
\subsection{Experimental results on Atari}
\label{appsec:atari}
\begin{wrapfigure}{r}{0.4\textwidth}
\centering
\includegraphics[scale=0.6]{figures/pong.pdf}
\caption{Experimental results of DQN-SMR ($M=10$) against DQN on PongNoFrameSkip-v4 task. Each method is run for 500K frames. The results are averaged over 5 random seeds, and the shaded region captures the standard deviation.}
\label{fig:pongexample}
\end{wrapfigure}
We show in the main text that Q-SMR significantly outperforms Q-learning on two discrete control tasks. One may naturally ask: can SMR aid the sample efficiency of DQN on Arcade Learning Environment \cite{Bellemare2012TheAL}? To answer this question, we conduct experiments on one typical environment \texttt{PongNoFrameSkip-v4}. We adopt the original way of processing the Atari image input, i.e., map the image ($3\times 84 \times 84$) into an embedding of size $32\times 7 \times 7$ with convolutional networks. Then, this representation is passed into an MLP with a hidden layer size of 256 to get $Q$-value estimate. We keep the default hyperparameters of DQN unchanged and only incorporate a sample reuse loop in it to yield DQN-SMR. It can be found in Figure \ref{fig:pongexample} that DQN-SMR with SMR ratio $M=10$ remarkably outperforms DQN on Pong. However, It takes about \textbf{84 hours} for DQN-SMR to run 500K frames on Pong, which is 8.7 times slower than DQN (9.6 hours). The computation cost is due to the fact that the size of image input is very large, and it takes time for the network to process it. Updating on the fixed batch (which SMR does) will inevitably worsen the situation and take longer time to train the agent. Considering that there are many advanced methods for discrete control with image input like EfficientZero \cite{Ye2021MasteringAG} (which solves Atari within 2 hours of real-time game play), MuZero \cite{Schrittwieser2019MasteringAG}, Dreamer v2 \cite{hafner2021mastering}, SimPLe \cite{Kaiser2019ModelBasedRL}, it is \textbf{\color{red} STRONGLY NOT RECOMMENDED} to adopt SMR on image-based tasks like Atari.
However, to show that our method can also work in discrete control settings, we conduct experiments on four environments from Atari. To save training time, we only adopt a small SMR ratio $M=2$. We run \texttt{PongNoFrameSkip-v4} for 1M frames, and other tasks for 4M frames. Each algorithm is evaluated every 5000 timesteps over 10 trials. The results are summarized in Figure \ref{fig:dqn-4}.
It is interesting to see that DQN-SMR outperforms DQN on \texttt{PongNoFrameSkip-v4} and is slightly better than DQN on \texttt{BreakoutNoFrameSkip-v4}. However, DQN-SMR underperforms DQN on \texttt{BeamRiderNoFrameSkip-v4} and only exceeds DQN at the last few online interactions. DQN-SMR learns faster than DQN at first several timesteps on \texttt{SpaceInvadersNoFrameSkip-v4} and underperforms DQN afterwards. Such a phenomenon is due to the fact that the encoder in the DQN network (convolutional layers) is also continually updated during training. At the first several steps, the encoder may output bad representations for the task, indicating that the resulting representations are actually biased and inaccurate. With the sample reuse loop on these bad representations, it will become harder for the network to learn the correct knowledge and policy for this control task. For some of the tasks, the agent may successfully get rid of this dilemma, while on some other tasks, the agent may get stuck and cannot escape from it. Also, DQN is known to incur overestimation bias \cite{Hasselt2015DeepRL, Sabry2019OnTR}, which is similar to DDPG. We observe DDPG-SMR underperforms DDPG on Ant-v2 in Appendix \ref{appsec:td3smrddpgsmr}, and the situation is similar here. Meanwhile, though we adopt a very small SMR ratio $M=2$, it still takes about 2 times longer training time than vanilla DQN, e.g., it takes 18 hours for DQN to run 1M steps on \texttt{PongNoFrameSkip-v4}, while it takes 37 hours of training time for DQN-SMR with $M=2$; it takes 3 days for DQN to run 4M frames on \texttt{BreakoutNoFrameSkip-v4}, while it takes about 6 days of training time for DQN-SMR with SMR ratio $M=2$. We thus do not recommend using SMR loop on image-based tasks. Since we set our focus on the continuous control domain, we do not actively conduct extensive experiments on DQN and its variants (e.g. C51 \cite{Bellemare2017ADP}, Rainbow \cite{Hessel2017RainbowCI}) in discrete control tasks.
\begin{figure}
\caption{Empirical results of DQN-SMR against DQN on four tasks from Atari. The results are averaged over 5 different random seeds with the shaded region denoting standard deviation.}
\label{fig:dqn-4}
\end{figure}
\subsection{Can SMR benefit base algorithm with different learning rate?}
In the main text, we combine SMR with the base algorithm without tuning the hyperparameters. Considering the difference between magnifying learning rate and SMR loop, one may wonder whether SMR can boost the sample efficiency of the base algorithm with different initial learning rates. We answer this by comparing SAC-SMR ($M=10$) against SAC and conducting experiments on two typical environments from OpenAI Gym \cite{Brockman2016OpenAIG}, HalfCheetah-v2 and Walker2d-v2, under different initial learning rates for actor and critic networks. We sweep the learning rate over $\{1\times 10^{-3}, 1\times 10^{-4}, 3\times 10^{-4}, 3\times 10^{-5}\}$ (SAC uses a learning rate of $3\times 10^{-4}$ by default, one can check the detailed hyperparameter setup for SAC in Appendix \ref{appsec:continuouscontrolalgo}). We summarize the empirical results in Figure \ref{fig:sac-different-lr}. It is easy to find that SMR notably improves the sample efficiency of SAC upon different initial learning rates, even with a large learning rate $1\times 10^{-3}$. We believe this evidence can alleviate the concern, and validate the effectiveness and generality of SMR.
\begin{figure}
\caption{Performance comparison of SAC-SMR with SMR ratio $M=10$ and SAC on HalfCheetah-v2 and Walker2d-v2 under different initial (fixed) learning rates for actor and critic networks. We sweep the initial learning rate across $\{0.001, 0.0001, 0.0003, 0.00003\}
\label{fig:lr1e-3}
\label{fig:lr1e-4}
\label{fig:lr3e-4}
\label{fig:lr3e-5}
\label{fig:sac-different-lr}
\end{figure}
\section{Pseudo Codes and Hyperparameters of Off-Policy Algorithms with SMR}
\label{sec:pseudocodes}
In this section, we list the missing details on pseudo codes and hyperparameter setup for off-policy algorithms we adopt in this paper. We first introduce the hyperparameters for Q-learning and Q-SMR (the pseudo codes are omitted, please check Algorithm \ref{alg:q-smr}). As we only present the general framework of SMR upon actor-critic architecture in Algorithm \ref{alg:ac-smr}, we further offer the detailed pseudo codes and hyperparameter setup for various continuous control algorithms.
\subsection{Q-learning and Q-SMR}
\label{appsec:qlearningandqsmr}
We conduct experiments using Q-SMR and Q-learning on two discrete control tasks, \texttt{CliffWalking-v0} from OpenAI Gym \cite{Brockman2016OpenAIG} and \texttt{maze-random-20x20-plus-v0} from Gym-Maze (please refer to Gym documentation (\href{https://gymnasium.farama.org/}{https://gymnasium.farama.org/}) and \href{https://github.com/MattChanTK/gym-maze}{https://github.com/MattChanTK/gym-maze}). \texttt{CliffWalking-v0} is a gridworld learning task adapted from Example 6.6 from \cite{Sutton2005ReinforcementLA}. It contains 4$\times$12 grids. The agent starts at the bottom-left and aims at reaching the bottom-right. There exists a cliff in the bottom-center, and the agent will return to the start position if it steps on the cliff. The agent can take four actions (move up, move down, move left, and move right). Each episode of game play contains 100 steps. The episode ends if the agent steps on the cliff. We run Q-SMR and Q-learning for 500 episodes and average their performance over 20 independent runs.
\texttt{maze-random-20x20-plus-v0} is a 2D maze environment where the agent is targeted to find its way from the top left corner to the goal at the bottom right corner. The objective is to find the shortest path from the start to the goal. The agent can also take four actions (move up, move down, move left, and move right) and the observation space is given by the coordinates of the agent. The agent receives a reward of 1 if it reaches the goal position. For every step in the maze, the agent receives an additional reward of $-\frac{0.1}{\# \rm cells}$, where $\# \rm cells$ denotes the number of cells. For \texttt{maze-random-20x20-plus-v0}, there are $20\times 20$ cells. Specially, the agent can \textit{teleport} from a portal to another portal of the same color. We run Q-SMR and Q-learning for 100 episodes where each episode contains 40000 steps. The maze will be reset if the episode terminates.
For both two environments, we use a learning rate of $\alpha = 0.05$, discount factor $\gamma=0.99$, and exploration rate $\epsilon = 0.1$ ($\epsilon$-greedy) for the training process. Unlike DQN, we use a fixed exploration rate and learning rate instead of decaying them. During the evaluation, we use an exploration rate of $\epsilon=0$. We adopt random seeds of 0-19 for simplicity.
\subsection{Continuous control algorithms}
\label{appsec:continuouscontrolalgo}
In our experiments, we use \texttt{MuJoCo 2.0} with \texttt{Gym} version \texttt{0.18.3} and \texttt{PyTorch} \cite{Paszke2019PyTorchAI} version \texttt{1.8}. We conduct experiments on MuJoCo ``-v2" environments and PyBullet ``-MuJoCoEnv-v0" environments.
We present in Algorithm \ref{alg:algtd3smr} the pseudo code for TD3-SMR. We omit the pseudo code for SAC-SMR since it is much similar to that of TD3-SMR. Compared to the original TD3, TD3-SMR only injects a sample reuse loop (see line 7-15 of Algorithm \ref{alg:algtd3smr}), which is the only modification. We list in Table \ref{tab:parametertd3sac} the hyperparameters for SAC, TD3, and SAC-SMR, TD3-SMR where SAC-SMR and TD3-SMR share identical parameters with their base algorithms. We keep the hyperparameters of all these algorithms fixed on all of the evaluated tasks. Our parameter setup generally resembles \cite{haarnoja2018softactorcritic, Haarnoja2018SoftAO}. It is worth noting that this hyperparameter setup is slightly different from the original TD3, where network size $(400,300)$, learning rate $1\times 10^{-3}$ and batch size $100$ are adopted (see \cite{Fujimoto2018AddressingFA}). As the authors mentioned (please see \href{https://github.com/sfujim/TD3}{https://github.com/sfujim/TD3}), the parameter setup for TD3 is now different from the original paper. We therefore choose to follow the current hyperparameter setup in the authors' official implementation.
\begin{algorithm}[tb]
\caption{TD3-SMR}
\label{alg:algtd3smr}
\begin{algorithmic}[1]
\STATE Initialize critic networks $Q_{\theta_1}, Q_{\theta_2}$ and actor network $\pi_{\phi}$ with random parameters
\STATE Initialize target networks $\theta_1^\prime \leftarrow \theta_1, \theta_2^\prime \leftarrow \theta_2, \phi^\prime \leftarrow \phi$ and replay buffer $\mathcal{B} = \{\}$
\FOR{$t$ = 1 to $T$}
\STATE Select action $a$ with exploration noise $a\sim \pi_\phi(s) + \epsilon$, where $\epsilon\sim \mathcal{N}(0,\sigma)$ and observe reward $r$, new state $s^\prime$
\STATE Store transitions in the replay buffer, i.e., $\mathcal{B}\leftarrow\mathcal{B}\bigcup \{(s,a,r,s^\prime)\}$
\STATE Sample $N$ transitions $\{(s_j,a_j,r_j,s_j^\prime)\}_{j=1}^N\sim\mathcal{B}$
\color{red} \FOR{$m$ = 1 to $M$}
\STATE $\tilde{a}\sim\pi_{\phi^\prime}(s^\prime) + \epsilon$, $\epsilon\sim$ clip($\mathcal{N}(0,\bar{\sigma}),-c,c$)
\STATE $y \leftarrow r + \gamma\min_{i=1,2}Q_{\theta^\prime_i}(s^\prime,\tilde{a})$
\STATE Update critic $\theta_i$ by minimizing $\frac{1}{N}\sum_s (Q_{\theta_i}(s,a)-y)^2$
\IF{$t\mod d$}
\STATE Update actor $\phi$ by deterministic policy gradient $\nabla_\phi J(\phi) = \frac{1}{N}\sum_s \nabla_a Q_{\theta_1}(s,a)|_{a=\pi_{\phi}(s)}\nabla_{\phi}\pi_{\phi}(s)$
\STATE Update target networks: $\theta_i^\prime \leftarrow \tau\theta_i + (1-\tau)\theta_i^\prime, \phi^\prime\leftarrow\tau\phi+(1-\tau)\phi^\prime$
\ENDIF
\ENDFOR
\ENDFOR
\end{algorithmic}
\end{algorithm}
We list the pseudo code for DARC-SMR \cite{Efficient2022Lyu} in Algorithm \ref{alg:algdarc} and its hyperparameter setup in Table \ref{tab:parametertd3sac}. We follow the original hyperparameter setup of the DARC paper and adopt the network size $(400,300)$ for both the actor network and critic network. For the weighting coefficient $\nu$ in DARC (for balancing the underestimation bias and overestimation bias), we also follow the best recommended hyperparameter by the authors, where we adopt $\nu=0.15$ for Hopper-v2, $\nu=0.25$ for Ant-v2, and $\nu=0.1$ for HalfCheetah-v2 and Walker2d-v2. For the regularization parameter $\lambda$ in DARC, we use $\lambda=0.005$ by default. Other parameters are identical to the original paper and we keep them unchanged throughout our experiments. We use the official implementation of DARC (\href{https://github.com/dmksjfl/DARC}{https://github.com/dmksjfl/DARC}) when conducting experiments.
\begin{table*}
\centering
\caption{Hyperparameters setup for TD3 \cite{Fujimoto2018AddressingFA}, SAC \cite{haarnoja2018softactorcritic}, DARC \cite{Efficient2022Lyu}, TQC \cite{Kuznetsov2020ControllingOB}, and REDQ \cite{Chen2021RandomizedED} on continuous control benchmarks.}
\label{tab:parametertd3sac}
\begin{tabular}{lrr}
\toprule
\textbf{Hyperparameter} & \textbf{Value} \quad \\
\midrule
Shared & \\
\qquad Actor network & \qquad $(400,300)$ for DARC and $(256,256)$ for others \\
\qquad Batch size &\qquad $256$ \\
\qquad Learning rate & \qquad $1\times 10^{-3}$ for DARC and $3\times 10^{-4}$ for others \\
\qquad Optimizer & \qquad Adam \cite{KingmaB14adam} \\
\qquad Discount factor & \qquad $0.99$ \\
\qquad Replay buffer size & \qquad $10^6$ \\
\qquad Warmup steps & \qquad $256$ for TQC and $5\times 10^3$ for others \\
\qquad Nonlinearity & \qquad ReLU \\
\qquad Target update rate & \qquad $5\times 10^{-3}$ \\
\midrule
TD3 & \\
\qquad Target update interval & \qquad $2$ \\
\qquad Critic network & \qquad $(256,256)$ \\
\qquad Exploration noise &\qquad $\mathcal{N}(0,0.1)$ \\
\qquad Target noise & \qquad $0.2$ \\
\qquad Noise clip & \qquad $0.5$ \\
\midrule
DARC & \\
\qquad Regularization parameter $\lambda$ & \qquad $0.005$ \\
\qquad Critic network & \qquad $(400,300)$ \\
\midrule
SAC & \\
\qquad Critic network & \qquad $(256,256)$ \\
\qquad Target update interval & $1$ \\
\qquad Reward scale & \qquad $1$ \\
\qquad Entropy target & \qquad $-{\rm dim}(\mathcal{A})$ \\
\qquad Entropy auto-tuning & \qquad True \\
\qquad Maximum log std & \qquad $2$ \\
\qquad Minimum log std & \qquad $-20$ \\
\midrule
TQC & \\
\qquad Critic network & \qquad $(512,512,512)$ \\
\qquad Number of critic networks & \qquad $5$ \\
\qquad Number of atoms & \qquad $25$ \\
\qquad Huber loss parameter & \qquad $1$ \\
\midrule
REDQ & \\
\qquad Critic network & \qquad $(256,256)$ \\
\qquad Update-to-data (UTD) ratio & \qquad $20$ \\
\qquad Number of critic networks & \qquad $10$ \\
\qquad In-target minimization parameter & \qquad $2$ \\
\bottomrule
\end{tabular}
\end{table*}
\begin{algorithm}[tb]
\caption{DARC-SMR}
\label{alg:algdarc}
\begin{algorithmic}[1]
\STATE Initialize critic networks $Q_{\theta_1}, Q_{\theta_2}$ and actor networks $\pi_{\phi_1}, \pi_{\phi_2}$ with random parameters
\STATE Initialize target networks $\theta_1^\prime \leftarrow \theta_1, \theta_2^\prime \leftarrow \theta_2, \phi_1^\prime \leftarrow \phi_1, \phi_2^\prime \leftarrow \phi_2$ and replay buffer $\mathcal{B} = \{\}$
\FOR{$t$ = 1 to $T$}
\STATE Select action $a$ with $\max_i \max_j Q_{\theta_i}(s, \pi_{\phi_j}(s))$ added $\epsilon\sim \mathcal{N}(0,\sigma)$
\STATE Execute action $a$ and observe reward $r$, new state $s^\prime$ and done flag $d$
\STATE Store transitions in the replay buffer, i.e., $\mathcal{B}\leftarrow\mathcal{B}\bigcup \{(s,a,r,s^\prime,d)\}$
\FOR{$i = 1,2$}
\STATE Sample $N$ transitions $\{(s_j,a_j,r_j,s_j^\prime,d_j)\}_{j=1}^N\sim\mathcal{B}$
\color{red} \FOR{$m$ = 1 to $M$}
\STATE Get actions: $a_1\leftarrow \pi_{\phi_1^\prime}(s^\prime) + \epsilon$, $a_2 \leftarrow \pi_{\phi_2^\prime}(s^\prime) + \epsilon$, $\epsilon\sim$ clip($\mathcal{N}(0,\bar{\sigma}),-c,c$)
\STATE Calculate $\hat{V}(s^\prime) = (1 - \nu) \max_{k=1,2} \min_{j=1,2} Q_{\theta_j^\prime}(s^\prime,a_k) + \nu \min_{k=1,2} \min_{j=1,2} Q_{\theta_j^\prime}(s^\prime,a_k),$
\STATE $y \leftarrow r + \gamma(1-d) \hat{V}(s^\prime)$
\STATE Update critic $\theta_i$ by minimizing $\frac{1}{N}\sum_s \left \{ (Q_{\theta_i}(s,a)-y)^2 + \lambda [Q_{\theta_1}(s,a) - Q_{\theta_2}(s,a)]^2 \right \}$
\STATE Update actor $\phi_i$ by maximizing $\frac{1}{N}\sum_s \nabla_a Q_{\theta_i}(s,a)|_{a=\pi_{\phi_i}(s)}\nabla_{\phi_i}\pi_{\phi_i}(s)$
\STATE Update target networks: $\theta_i^\prime \leftarrow \tau\theta_i + (1-\tau)\theta_i^\prime, \phi_i^\prime\leftarrow\tau\phi_i+(1-\tau)\phi_i^\prime$
\ENDFOR
\ENDFOR
\ENDFOR
\end{algorithmic}
\end{algorithm}
We further combine SMR with TQC \cite{Kuznetsov2020ControllingOB} and list the pseudo code for TQC-SMR in Algorithm \ref{alg:algtqc}, with its hyperparameter setup listed in Table \ref{tab:parametertd3sac}. Similarly, we follow the default hyperparameter recommended by the authors, e.g., the actor network has a network size of $(256,256)$, while the critic network has a size of $(512,512,512)$. The agent starts training when $256$ samples are collected. For the most critical hyperparameter, the number of dropped atoms $d$, we follow the original paper and adopt $d=5$ for Hopper-v2, $d=0$ for HalfCheetah-v2, and $d=2$ for Ant-v2 and Walker2d-v2. For TD3-SMR, SAC-SMR, and TQC-SMR, we adopt an SMR ratio $M=10$ by default for all of the evaluated state-based tasks. We use the official implementation of TQC (\href{https://github.com/SamsungLabs/tqc_pytorch}{https://github.com/SamsungLabs/tqc\_pytorch}) for all of the experimental evaluation.
\begin{algorithm}[tb]
\caption{TQC-SMR}
\label{alg:algtqc}
\begin{algorithmic}[1]
\STATE Initialize critic networks $Z_{\theta_n}, n\in\{1,2,\ldots,N\}$ and actor network $\pi_{\phi}$ with random parameters
\STATE Initialize target networks $\theta_n^\prime \leftarrow \theta_n, n\in\{1,2,\ldots,N\}$ and replay buffer $\mathcal{D} = \{\}$
\STATE Set target entropy $\mathcal{H}_T = -{\rm dim}(\mathcal{A})$, $\alpha=1$, number of quantiles $L$, left atom proportion $k$
\FOR{$t$ = 1 to $T$}
\STATE Execute action $a\sim\pi_\phi$ and observe reward $r$, new state $s^\prime$
\STATE Store transitions in the replay buffer, i.e., $\mathcal{D}\leftarrow\mathcal{D}\bigcup \{(s,a,r,s^\prime)\}$
\STATE Sample a mini-batch transitions $B = \{(s,a,r,s^\prime)\}\sim\mathcal{D}$
\color{red} \FOR{$m$ = 1 to $M$}
\STATE Update temperature parameter via $\nabla_\alpha J(\alpha) = \nabla_\alpha \mathbb{E}_{B,\pi_\phi}[\log\alpha \cdot (-\log\pi_\phi(a|s) - \mathcal{H}_T)]$
\STATE Update actor parameter $\phi$ via $\nabla_\phi \mathbb{E}_{B,\pi_\phi} \left[ \alpha\log\pi_{\phi}(a|s) - \dfrac{1}{NL}\displaystyle\sum_{l,n=1}^{N,L} \psi_{\theta_n}^l(s,a) \right]$ \\ // $\psi_{\theta_n}^l,l\in[1,L]$ is the atom at location $l$
\STATE $y_i = r + \gamma[z_{(i)}(s^\prime,a^\prime) - \alpha\log\pi_\phi(a^\prime|s^\prime)]$ \quad // $z_{(i)}$ is the sorted atoms in ascending order, $i\in[NL]$
\STATE Update critic parameter $\theta_n$ by $\nabla_{\theta_n} \mathbb{E}_{B,\pi_\phi}\left[ \dfrac{1}{kNL}\displaystyle \sum_{l=1}^L \sum_{i=1}^{kN} \rho_{\tau_l}(y_i - \psi_{\theta_n}^l) \right]$ \\
// $\rho_{\tau_l}$ is the Huber quantile loss with parameter 1
\STATE Update target networks: $\theta_n^\prime \leftarrow \beta\theta_n + (1-\beta)\theta_n^\prime, n\in\{1,2,\ldots,N\}$
\ENDFOR
\ENDFOR
\end{algorithmic}
\end{algorithm}
For REDQ \cite{Chen2021RandomizedED}, we also keep the original hyperparameters unchanged when combining it with SMR, i.e., it uses a learning rate of $3\times 10^{-4}$ and a network size of $(256,256)$ for both the actor network and critic network, an ensemble size of $10$ for critics. REDQ also adopts a high update-to-data (UTD) ratio of $G=20$ and samples $2$ different indices from $10$ critics when calculating the target $Q$ value. We summarize the pseudo code for REDQ-SMR in Algorithm \ref{alg:algredq} and the hyperparameter setup in Table \ref{tab:parametertd3sac}. Inspired by the fact that model-based methods often attain higher sample efficiency by using a high UTD ratio (the number of updates taken by the agent compared to the number of actual interactions with the environment), REDQ explores the feasibility of high UTD ratio without a dynamics model on continuous control tasks. As discussed in the main text (Section \ref{sec:relatedwork}), SMR is different from adopting a high UTD ratio. REDQ and model-based methods update the agent multiple times with bootstrapping, i.e., each time the agent sees different samples and updates on these different data multiple times. SMR, however, updates multiple times on the \textit{fixed} batch data for multiple times. Since REDQ already leverages a high UTD ratio, we use an SMR ratio $M=5$ for REDQ-SMR. It is worth noting that our reported performance of REDQ is slightly different from the original paper. We have tried our best to reproduce the performance of REDQ on MuJoCo tasks. However, as the authors commented in \href{https://github.com/watchernyu/REDQ}{https://github.com/watchernyu/REDQ}, the performance of REDQ seems to be quite different with different PyTorch \cite{Paszke2019PyTorchAI} version and the reasons are not entirely clear. We thus choose to run REDQ with its official implementation (\href{https://github.com/watchernyu/REDQ}{https://github.com/watchernyu/REDQ}) and PyTorch 1.8 and report the resulting learning curves.
\begin{algorithm}[tb]
\caption{REDQ-SMR}
\label{alg:algredq}
\begin{algorithmic}[1]
\STATE Initialize critic networks $Q_{\theta_i}, i=1,2,\ldots,N$ and actor network $\pi_{\phi}$ with random parameters
\STATE Initialize target networks $\theta_i^\prime \leftarrow \theta_i, i=1,2,\ldots,N$ and replay buffer $\mathcal{D} = \{\}$
\FOR{$t$ = 1 to $T$}
\STATE Take one action $a_t\sim\pi_{\phi}(\cdot|s_t)$ and observe reward $r_t$, new state $s^\prime_{t+1}$
\STATE Store transitions in the replay buffer, i.e., $\mathcal{D}\leftarrow\mathcal{D}\bigcup \{(s_t,a_t,r_t,s^\prime_{t+1})\}$
\FOR{$g$ = 1 to $G$}
\STATE Sample a mini-batch $B = \{(s,a,r,s^\prime)\}\sim\mathcal{D}$
\color{red} \FOR{$m$ = 1 to $M$}
\STATE Sample a set $\mathcal{K}$ of $K$ indices from $\{1,2,\ldots,N\}$
\STATE Compute the $Q$ target $y = r + \gamma \left( \min_{i\in\mathcal{K}}Q_{\theta_i^\prime}(s^\prime,\tilde{a}^\prime) - \alpha\log\pi_\phi(\tilde{a}^\prime|s^\prime) \right), \tilde{a}^\prime\sim\pi_\phi(\cdot|s^\prime)$
\FOR{$i=1,2,\ldots,N$}
\STATE Update $\theta_i$ with gradient descent using $\nabla_{\theta_i}\frac{1}{|B|}\sum_{(s,a,r,s^\prime)\sim B} (Q_{\theta_i}(s,a)-y)^2$
\STATE Update target networks: $\theta_i^\prime \leftarrow \tau\theta_i + (1-\tau)\theta_i^\prime$
\ENDFOR
\IF{$g=G$}
\STATE Update actor $\phi$ with gradient ascent using $\nabla_\phi \frac{1}{|B|} \sum_{s\in B} \left( \frac{1}{N}\sum_{j=1}^N Q_{\theta_j}(s,\tilde{a}) - \alpha\log\pi_\phi(\tilde{a}|s) \right),\tilde{a}\sim\pi_\phi(\cdot|s)$
\ENDIF
\ENDFOR
\ENDFOR
\ENDFOR
\end{algorithmic}
\end{algorithm}
For image-based tasks, we adopt the environment wrapper from TD-MPC \cite{Hansen2022TemporalDL} for SAC and SAC-SMR. The image is processed with a 4-layer CNN with kernel size $(7,5,3,3)$, stride $(2,2,2,2)$ and $32$ filters per layer. Then the representation is input into a 2-layer MLP with $512$ hidden units. We map the raw image input into an embedding of size $200$ and repeat the actions every two frames for six evaluated tasks. For DrQ-v2, we use its official implementation (\href{https://github.com/facebookresearch/drqv2}{https://github.com/facebookresearch/drqv2}) and keep its default hyperparameters setup fixed. For DQN experiments on Atari, we adopt the widely used RL playground implementation for DQN (\href{https://github.com/TianhongDai/reinforcement-learning-algorithms}{https://github.com/TianhongDai/reinforcement-learning-algorithms}).
\section{Compute Infrastructure}
In Table \ref{tab:computing}, we list the compute infrastructure that we use to run all of the algorithms.
\begin{table}[htb]
\caption{Compute infrastructure.}
\label{tab:computing}
\centering
\begin{tabular}{c|c|c}
\toprule
\textbf{CPU} & \textbf{GPU} & \textbf{Memory} \\
\midrule
AMD EPYC 7452 & RTX3090$\times$8 & 288GB \\
\bottomrule
\end{tabular}
\end{table}
\section{Licences}
We implement SAC on our own. Other codes are built upon source DDPG and TD3 codebases under MIT licence (\href{https://github.com/sfujim/TD3}{https://github.com/sfujim/TD3}), DARC codebase under MIT licence (\href{https://github.com/dmksjfl/DARC}{https://github.com/dmksjfl/DARC}), TQC codebase under MIT licence (\href{https://github.com/SamsungLabs/tqc\_pytorch}{https://github.com/SamsungLabs/tqc\_pytorch}), REDQ codebase under MIT licence (\href{https://github.com/watchernyu/REDQ}{https://github.com/watchernyu/REDQ}), DrQ-v2 codebase under MIT licence (\href{https://github.com/facebookresearch/drqv2}{https://github.com/facebookresearch/drqv2}).
\section{Broader Impacts}
This work mainly focuses on a simple and novel way of improving sample efficiency of the off-the-shelf off-policy RL algorithms. We do not foreseen any potential negative social impact of this work.
\end{document} |
\begin{document}
\title{\textbf{A note on the coincidence of \\the projective and conformal Weyl tensors}}
\author[]{{ Christian L\"ubbe} \footnote{E-mail address:{\tt c.luebbe@ucl.ac.uk}}}
\affil[]{Department of Mathematics, University College London, Gower
Street, London WC1E 6BT, UK}
\maketitle
\begin{abstract}
This article examines the coincidence of the projective and conformal Weyl tensors associated to a given connection $\nabla $. The connection may be a general Weyl connection associated to a conformal class of metrics $[g]$. The main result for $n \ge 4$ is that the Weyl tensors coincide iff $\nabla $ is the Levi-Civita connection of an Einstein metric.
\end{abstract}
\section{Introduction}
In 1918 Hermann Weyl introduced, what is now known as Weyl geometries \cite{Weyl1918}. He observed that the Riemann curvature has a conformally invariant component $C\tensor{ij}{k}{l}$, which he referred to as the conformal curvature. In \cite{Weyl1921} Weyl discussed both conformal and projective geometries and showed that analogously the Riemann curvature has a projectively invariant component $W\tensor{ij}{k}{l}$, referred to as the projective curvature. The idea has been extend to parabolic geometries, (see e.g. \cite{BEG}, \cite{CSbook}) and in the modern literature the invariant curvature component is simply referred to as the Weyl tensor or the Weyl curvature, with the type of geometry typically implied by the context. In this article we will be dealing with $C\tensor{ij}{k}{l}$ and $W\tensor{ij}{k}{l}$ simultaneously and we will refer to them as the conformal and projective Weyl tensors respectively.
In \cite{Nur12} Nurowski investigated when a given projective class of connections $ [ \nabla ] $ on $M$ includes a Levi-Civita connection of some metric $g$ on $M$. An algorithm to check the metrisability of a chosen projective structure was given.
In proposition 2.5 of \cite{Nur12} it was shown that the projective and conformal Weyl tensors coincide if and only if the Ricci tensor of the Levi-Civita connection satisfies
\begin{equation}
\label{Nurowski condition}
M\tensor{abcd}{ef}{} R_{ef}=0
\end{equation}
where
$$
M\tensor{abcd}{ef}{} = 2 g_{a[c}\delta^e_{d]}\delta^f_{b} + 2 g_{a[d} g_{c]b}g^{ef} + 2(n-1) g_{b[d}\delta^f_{c]}\delta^e_{a}.
$$
Corollary 2.6 of \cite{Nur12} deduces that the projective and conformal Weyl tensors of an Einstein metric are equal. As a comment Nurowski raised the question whether there are non-Einstein metrics, which satisfy condition \eqref{Nurowski condition}.
This article proves that this is not the case. In particular, for a given connection $\nabla$ on an $n\ge 4$ dimensional manifold the projective and conformal Weyl tensors associated to $\nabla$ only agree if $\nabla$ is the Levi-Civita connection of an Einstein metric.
The problem is addressed in more generality by allowing for general Weyl connections. This generalisation is of interest, due to the fact that neither the Ricci curvature of a general Weyl connection nor the Ricci curvature of a projective connection need be symmetric. Hence the possibility exists that the two Weyl tensors agree when using a general Weyl connection that is not a Levi-Civita connection for a metric in $[g]$.
\section{Projective and conformal connection changes}
We define the tensors
\begin{equation*}
\Sigma^{kl}_{ij} = \delta^k_i \delta^l_j + \delta^l_i \delta^k_j ,
\quad\quad
S^{kl}_{ij} = \delta^k_i \delta^l_j + \delta^l_i \delta^k_j - g_{ij} g^{kl}
\end{equation*}
Two connections $\nabla$ and $\check{\nabla} $ are projectively related if there exists a 1-form $\check{b}_i $ such that the connection coefficients are related by
\begin{equation*}
\check{\Gamma}\tensor{i}{k}{j} = \Gamma\tensor{i}{k}{j} + \Sigma^{kl}_{ij} \check{b}_l
\end{equation*}
We denote the class of all connections projectively related to $\nabla$ by $[\nabla]_p $.
Suppose further that $\nabla$ is related to the conformal class $[g]$. By this we mean that there exists a 1-form $f_i$ such that
\begin{equation}
\label{Weyl connection metric condition}
\nabla_i g_{kl} = -2f_i g_{kl}
\end{equation}
This holds for $g_{ij}$ iff it holds for any representative in $[g]$. Connections that satisfy \eqref{Weyl connection metric condition} are referred to as general Weyl connections of $[g]$. Note that the Levi-Civita connection of any representative in $[g]$ satisfies \eqref{Weyl connection metric condition}. However $\nabla$ need not be the Levi-Civita connection for a metric in $[g]$.
The connections $\nabla$ and $\hat{\nabla} $ are conformally related if there exists a 1-form $\hat{b}_i $ such that the connection coefficients are related by
\begin{equation*}
\hat{\Gamma}\tensor{i}{k}{j} = \Gamma\tensor{i}{k}{j} + S^{kl}_{ij} \hat{b}_l
\end{equation*}
We denote the class of all connections conformally related to $\nabla$ by $[\nabla]_c $. Observe that all connections in $[\nabla]_c $ satisfy \eqref{Weyl connection metric condition}.
\section{Decomposition of the Riemann curvature}
Given a connection $\nabla$ the Riemann and Ricci tensors are defined as
\begin{equation*}
2 \nabla_{[i} \nabla_{j]} v^k = R\tensor{ij}{k}{l} v^l, \quad \quad R_{jl} = R\tensor{kj}{k}{l}
\end{equation*}
The projective and conformal Schouten tensor are related to the Ricci tensor of $\nabla$ by \cite{BEG}, \cite{Fri03}
\begin{eqnarray*}
\rho_{ij} &=& \frac{1}{n-1} R_{(jl)} + \frac{1}{n+1} R_{[jl]}\\
P_{ij} &=& \frac{1}{n-2} R_{(jl)} + \frac{1}{n} R_{[jl]} - \frac{R_{kl}g^{kl}}{2(n-2)(n-1)} g_{ij}
\end{eqnarray*}
\noindent The Schouten tensors can be used to decompose the Riemann curvature as follows
\begin{equation}
\label{Riemann decomposition}
R\tensor{ij}{k}{l} = W\tensor{ij}{k}{l} + 2\Sigma_{l[i}^{km} \rho_{j]m} = C\tensor{ij}{k}{l} + 2S_{l[i}^{km} P_{j]m} ,
\end{equation}
where $W\tensor{ij}{k}{l}$ and $C\tensor{ij}{k}{l}$ are the projective and conformal Weyl tensors respectively. Moreover the once contracted Bianchi identity $\nabla_k R\tensor{ij}{k}{l} =0 $ implies \cite{BEG} that
\begin{eqnarray}\label{proj_Bianchi}
\nabla_k W\tensor{ij}{k}{l} &=& 2(n-2) \nabla_{[i} \rho_{j]l} = (n-2)y_{ijl}\\
\label{conf_Bianchi}
\nabla_k C\tensor{ij}{k}{l} &=& 2(n-3) \nabla_{[i} P_{j]l} = (n-3)Y_{ijl}.
\end{eqnarray}
The tensor $y_{ijl}$ and $Y_{ijl}$ are known as the Cotton-York tensors.
Under a connection change $\check{\nabla} = \nabla + \check{b}$ respectively $\hat{\nabla} = \nabla + \hat{b}$ the Schouten tensors transform as
\begin{eqnarray*}
\rho_{ij} - \check{\rho}_{ij} &=& \nabla_i b_j + \half \Sigma^{kl}_{ij} \check{b}_k \check{b}_l \\
P_{ij} - \check{P}_{ij} &=& \nabla_i b_j + \half S^{kl}_{ij} \hat{b}_k \hat{b}_l
\end{eqnarray*}
In both cases the Schouten tensors absorb all terms that arise in the Riemann tensor under connection changes. It follows that the projective Weyl tensor $W\tensor{ij}{k}{l} $ and the conformal Weyl tensor $C\tensor{ij}{k}{l} $ are invariants of the projective class $[\nabla]_p$ and the conformal class $[\nabla]_c$, respectively. The question we wish to address is for which manifolds these two invariants coincide.
We note that for $n \le 2 \,$ $W\tensor{ij}{k}{l} =0$ and for $n \le 3 \,$ $C\tensor{ij}{k}{l} =0$. Therefore it follows trivially that:
\noindent \textit{In $n=2$ the Weyl tensors always agree. In $n=3$ they agree if and only if the manifold is projectively flat, i.e. the flat connection is contained in $[\nabla]_p $ }
Hence in the following we focus only on $n > 3$.
\section{Coincidence of the conformal and projective \\ Weyl tensors}
The Ricci tensor can be decomposed into its symmetric trace-free, skew and trace components with respect to the metric $g_{ij}$:
\begin{eqnarray}
\label{Riccidecomp}
R_{ij} &=& \Phi_{ij} + \varphi_{ij} + \frac{R}{n}g_{ij}
\end{eqnarray}
Hence the Schouten tensors can be rewritten as
\begin{eqnarray}
\label{projectiveSchoutentoRicci}
\rho_{ij}
&=& \frac{1}{n-1} \Phi_{ij} + \frac{1}{n+1} \varphi_{ij} + \frac{R}{n(n-1)}g_{ij}\\
\label{conformalSchoutentoRicci}
P_{ij}
&=& \frac{1}{n-2} \Phi_{ij} + \frac{1}{n} \varphi_{ij} + \frac{R}{2n(n-1)}g_{ij}
\end{eqnarray}
The condition $W\tensor{ij}{k}{l} = C\tensor{ij}{k}{l} $ is equivalent to
\begin{equation}
2\Sigma_{l[i}^{km} \rho_{j]m} = 2S_{l[i}^{km} P_{j]m}
\end{equation}
\noindent Substitutions of \eqref{projectiveSchoutentoRicci} and \eqref{conformalSchoutentoRicci} give
\begin{eqnarray*}
2\Sigma_{l[i}^{km} \rho_{j]m}
&=& \frac{2}{n-1}\delta_{[i}^k \Phi_{j]l} + \frac{2 R}{n(n-1)}\delta_{[i}^k g_{j]l} + \frac{2}{n+1}\delta_{[i}^k \varphi_{j]l} - \frac{2}{n+1} \delta_l^k \varphi_{ij}\\
2S_{l[i}^{km} P_{j]m}
&=& \frac{2}{n-2}\delta_{[i}^k \Phi_{j]l} - \frac{2}{n-2} g_{l[i}\Phi_{j]m}g^{km} + \frac{2 R}{n(n-1)}\delta_{[i}^k g_{j]l} \nonumber \\
&& + \frac{2}{n}\delta_{[i}^k \varphi_{j]l} - \frac{2}{n} g_{l[i}\varphi_{j]m}g^{km} - \frac{2}{n} \delta_l^k \varphi_{ij}
\end{eqnarray*}
We observe that the scalar curvature terms are identical on both sides and hence only $\Phi_{ij}$ and $\varphi_{ij}$ are involved in our condition. The scalar curvature can take arbitrary values.
\noindent Taking the trace over $il$ and equating both sides.
\begin{eqnarray*}
2\Sigma_{l[i}^{km} \rho_{j]m} g^{il}
&=& \frac{1}{n-1}\Phi\tensor{j}{k}{} - \frac{R}{n} \delta_{j}^k + \frac{3}{n+1}\varphi\tensor{j}{k}{} \\
2\Sigma_{l[i}^{km} P_{j]m} g^{il}
&=& - \Phi\tensor{j}{k}{} - \frac{R}{n} \delta_{j}^k + \frac{4-n}{n}\varphi\tensor{j}{k}{} = - R\tensor{j}{k}{} + \frac{4}{n}\varphi\tensor{j}{k}{}
\end{eqnarray*}
Comparing irreducible components we find that we require
\begin{eqnarray}
\frac{n}{n-1}\Phi\tensor{j}{k}{} = 0 \quad \mathrm{and} \quad
\frac{n^2-4}{n(n+1)}\varphi\tensor{j}{k}{} = 0
\end{eqnarray}
Thus under our assumption of $n > 3$, both $\Phi_{ij}$ and $\varphi_{ij}$ must vanish. It follows that the Ricci tensor is pure trace and hence $g$ is an Einstein metric. Note that the Bianchi identities \eqref{proj_Bianchi}, \eqref{conf_Bianchi} imply that $R$ is constant.
The result can be formulated as follows
\begin{theorem}
Let $\nabla$ be a connection related to the conformal class $[g]$.
\begin{itemize}
\item In $n=2$ the Weyl tensors always vanish and hence agree.
\item In $n=3$ the Weyl tensors agree if and only if the manifold is projectively flat, i.e. the flat connection is contained in $[\nabla]_p $
\item In $n \ge 4$ the Weyl tensors agree if and only if the connection $\nabla$ is the Levi-Civita connection of the metric $g$ and the manifold is an Einstein manifold.
\end{itemize}
\end{theorem}
\begin{corollary}
If the projective and conformal Weyl tensor for $n\ge 4$ coincide then the Cotton-York tensors coincide as well. In fact they vanish identically.
\end{corollary}
The result follows immediately from the fact that the connection is the Levi-Civita connection of an Einstein metric. Hence the Schouten tensors are proportional to the metric and both Cotton-York tensors vanish.
\section{Conclusion}
It has been shown that the coincidence of the projective and conformal Weyl tensors is closely linked to the concept of Einstein metrics. For metric connections in $[\nabla]_c$ one could have deduced the main result directly from \eqref{Nurowski condition} by using the above decomposition of the Ricci tensor and using suitable traces of \eqref{Nurowski condition}. However, the set-up given here allowed for a direct generalisation to Weyl connections without requiring a more general form of \eqref{Nurowski condition}. Moreover it was felt that the set-up provided more clarity of role of the different types of curvatures involved.
\end{document} |
\begin{document}
\mathrm{ma}ketitle
\begin{abstract}
We construct combinatorial model category structures on the categories of (marked) categories and (marked) pre-additive categories, and we characterize (marked) additive categories as fibrant objects in a Bousfield localization of pre-additive categories. These model category structures are used to present the corresponding $\infty$-categories obtained by inverting equivalences. We apply these results to explicitly calculate limits and colimits in these $\infty$-categories. The motivating application is a systematic construction of the equivariant coarse algebraic $K$-homology with coefficients in an additive category from its non-equivariant version.
\end{abstract}
\tableofcontents
\section{Introduction}
If $\cC$ is a category and $W$ is a set of morphisms in $\cC$, then one can consider the localization functor
\[ \ell_{\cC} \colon \cC\to\cC_{\infty}:=\cC[W^{-1}] \]
in $\infty$-categories \cite[Def.~1.3.4.1]{HA} \cite[Def.~7.1.2]{cisin}), where we consider $\cC$ as an $\infty$-category given by its nerve (which we will omit in the notation). If the relative category $(\cC,W)$ extends to a simplicial model category
in which all
objects are cofibrant, then we have an equivalence of $\infty$-categories
\[ \cC_{\infty}\simeq \mathrm{ma}thrm{N}^{\mathrm{ma}thrm{coh}}(\cC^{cf})\ ,\]
where the right-hand side is the nerve of the simplicial category of cofibrant/fibrant objects of $\cC$ \cite[Def.~1.3.4.15 \& Thm.~1.3.4.20]{HA}. This explicit description of $\cC_{\infty}$ is sometimes very helpful in order to calculate mapping spaces in $\cC_{\infty}$ or to identify limits or colimits of diagrams in $\cC_{\infty}$.
In the present paper we consider the case where $\cC$ belongs to the list
\[ \{{\mathrm{ma}thbf{Cat}},{\mathrm{ma}thbf{Cat}}^{+},\mathrm{ma}thbf{preAdd},\mathrm{ma}thbf{preAdd}^{+}\} \]
where ${\mathrm{ma}thbf{Cat}}^{(+)}$ is the category of small (marked) categories (\cref{erboi33w23f234f2f}), and $\mathrm{ma}thbf{preAdd}^{(+)}$ is the category of small (marked)
pre-additive categories (\cref{fbioerwwef32r23r23r,rbgeoirgergergergre2r4}), and $W$ are the
(marking preserving) morphisms (functors or $\Ab$-enrichment preserving functors, respectively) which admit inverses up to (marked) isomorphisms (\cref{gijoorgergerg}).
In order to fix set-theoretic issues we choose three Grothendieck universes \begin{equation}\label{rtboihgiuf4f43f34f3f3}
{\mathrm{ma}thcal{U}}\mathrm{ma}thrm{sub}set \cV\mathrm{ma}thrm{sub}set {\mathrm{ma}thcal{W}}\ .
\end{equation}
The objects of $\cC$ are categories in $\cV$ which are locally ${\mathrm{ma}thcal{U}}$-small, while $\cC$ itself belongs to ${\mathrm{ma}thcal{W}}$ and is locally $\cV$-small. We will shortly say that the objects of $\cC$ are small (as already done above), and correspondingly, that $\cC$ itself is large.
Our first main theorem is:
\begin{theorem}\label{ergioergergre34}
The pair $(\cC,W)$ extends to a combinatorial, simplicial model category structure.
\end{theorem}
We refer to \cref{vgioeoerberebg} for a more precise formulation and recall that the adjective
\emph{combinatorial} means cofibrantly generated as a model category, and locally presentable as a category. In this model category structure all objects of $\cC$ are cofibrant.
The assertion of \cref{ergioergergre34} in the case of ${\mathrm{ma}thbf{Cat}}$ and $\mathrm{ma}thbf{preAdd}$ is well-known or folklore.
In the proof, which closely follows the standard line of arguments, we therefore put the emphasis on checking that
all arguments work in the marked cases as well.
In order to describe the homotopy theory of (marked) additive categories,
we show the following.
\begin{prop}\label{bhergerger}
There exists a Bousfield localization $L\mathrm{ma}thbf{preAdd}^{(+)}$ of $\mathrm{ma}thbf{preAdd}^{(+)}$ whose fibrant objects are the marked (additive) categories.
\end{prop}
We refer to \cref{rigerogergergre} for a more precise statement.
Let $W_{\mathrm{ma}thbf{Add}^{(+)}}$ denote the weak equivalences in $L\mathrm{ma}thbf{preAdd}^{(+)}$.
\cref{bhergerger} then implies that we have an equivalence of $\infty$-categories
\begin{equation}\label{g5g45ggg3ff3f}
\mathrm{ma}thbf{Add}^{(+)}_\infty := \mathrm{ma}thbf{preAdd}^{(+)}[W_{\mathrm{ma}thbf{Add}^{(+)}}^{-1}] \simeq \mathrm{ma}thrm{N}^{\mathrm{ma}thrm{coh}}(\mathrm{ma}thbf{Add}^{(+)})\ ,
\end{equation}
where $\mathrm{ma}thbf{Add}^{(+)}$ denotes the category of small (marked) additive categories (see \cref{rioehgjoifgregregegergeg,reiuheriververvec}).
For example, this allows us to calculate limits in $\mathrm{ma}thbf{Add}^+_\infty$,
which is one of the motivating applications of the present paper (see \cref{ex:bc}).
Since in general an $\infty$-category modeled by a combintorial model category is presentable, we get the following (see \cref{fiowefwefwfwf}).
\begin{kor}
The $\infty$-categories in the list
\[ \{{\mathrm{ma}thbf{Cat}}_{\infty},{\mathrm{ma}thbf{Cat}}^{+}_{\infty},\mathrm{ma}thbf{preAdd}_{\infty},\mathrm{ma}thbf{preAdd}^{+}_{\infty}, \mathrm{ma}thbf{Add}_{\infty},\mathrm{ma}thbf{Add}^{+}_{\infty}\} \]
are presentable.
\end{kor}
Presentability is a very useful property if one wants to show the existence of adjoint functors. For example the inclusion
$\cF_{\oplus} \colon \mathrm{ma}thbf{Add}_{\infty}\to \mathrm{ma}thbf{preAdd}_{\infty}$ preserves limits (by inspection) and therefore has a left-adjoint, the
additive completion functor
\[ L_{\oplus} \colon \mathrm{ma}thbf{preAdd}_{\infty}\to \mathrm{ma}thbf{Add}_{\infty} \]
(see \cref{fiowefwefwfwf}).
We demonstrate the utility of the model category structures, whose existence is asserted in \cref{ergioergergre34}, in a variety of examples.
\begin{enumerate}
\item In \cref{prop:2nerv}, we use relation \eqref{g5g45ggg3ff3f} in order to show an equivalence of $\infty$-categories
\[ \mathrm{ma}thbf{Add}_{\infty}\simeq \Nerve_{2}(\mathrm{ma}thbf{Add}_{(2,1)})\ ,\]
where the right-hand side is the $2$-categorical nerve of the strict two-category of small additive
categories. This is used in \cite{coarsetrans} to extend $K$-theory functors from $\mathrm{ma}thbf{Add}$ to $\Nerve_{2}(\mathrm{ma}thbf{Add}_{(2,1)})$.
\item In \cref{efweoifoewfewfewf3r323r2r} we verify that the localization functor $\ell_{\cC} \colon \cC\to \cC_{\infty}$ preserves arbitrary products, where $\cC$ belongs to the list
\[ \{ {\mathrm{ma}thbf{Cat}},{\mathrm{ma}thbf{Cat}}^{+},\mathrm{ma}thbf{preAdd}_{\infty},\mathrm{ma}thbf{preAdd}^{+}_{\infty}, \mathrm{ma}thbf{Add}_{\infty},\mathrm{ma}thbf{Add}^{+}_{\infty}\}\ ,\]
see \cref{wefiojewwefewf43t546466}.
\item In \cref{rgiuerhgweergergergeg} we consider additive categories of modules over rings. For example, we show in \cref{gueiurgrgerger} that
\[ L_{\oplus}(\ell_{\mathrm{ma}thbf{preAdd}}(\bR))\simeq \ell_{\mathrm{ma}thbf{Add}} (\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R) )\ ,\]
i.e.\ that the additive completion of a ring (considered as an object $\ell_{\mathrm{ma}thbf{preAdd}}(\bR)$ in $\mathrm{ma}thbf{preAdd}_{\infty}$) is equivalent to the additive category of its finitely generated and free modules (considered in $\mathrm{ma}thbf{Add}_{\infty}$).
We also discuss idempotent completions and its relation with the additive category of finitely generated projective modules along the same lines, see \cref{vgirejgoiergergergergregergergerg}.
\item The main result in \cref{erbgkioergergergegreg}, see \cref{weoijoijvu9bewewfewfwef}, is an explicit formula for the object \[ \colim_{BG} \ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG}(\underline{{\mathrm{ma}thbf{A}}}) \]
in $\mathrm{ma}thbf{preAdd}^{(+)}$, where $\underline{{\mathrm{ma}thbf{A}}}$ is a (marked) pre-additive category with trivial action of a group $G$ and $\ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG}$ is induced from $\ell_{\mathrm{ma}thbf{preAdd}^{(+)}}$.
\item In \cref{gijeriogjeroigergregeg} we consider $\cC$ in $ \{\mathrm{ma}thbf{preAdd}_{\infty},\mathrm{ma}thbf{preAdd}^{+}_{\infty}, \mathrm{ma}thbf{Add}_{\infty},\mathrm{ma}thbf{Add}^{+}_{\infty}\}$. In \cref{rgier9oger}, we provide an explicit formula for the object
\[ \lim_{BG} \ell_{\cC,BG} ({\mathrm{ma}thbf{A}}) \ ,\]
where ${\mathrm{ma}thbf{A}}$ is an object of $\cC$ with an action of $G$.
\end{enumerate}
In a parallel paper \cite{bunke} we consider model categoy structures on (marked) $*$-categories and a similar application to coarse homology theories including equivariant coarse topological $K$-homology.
\paragraph{Acknowledgements}
U.~Bunke and A.~Engel were supported by the SFB 1085 ``Higher Invariants''
funded by the Deutsche Forschungsgemeinschaft DFG. C.~Winges acknowledges support by the Max Planck Society and by Wolfgang L\"uck's ERC Advanced Grant ``KL2MG-interactions" (no.~662400). D.~Kasprowski and C.~Winges are members of the Hausdorff Center for Mathematics at the University of Bonn.
\section{Marked categories}\label{rgierogrg43rergerg4t3}
\mathrm{ma}thrm{sub}section{Categories of marked categories and marked pre-additive categories}
In this section we introduce categories of marked categories, marked pre-additive categories and additive categories. We further describe various relations between these categories given by forgetful functors and their adjoints. We finally describe their enrichments in groupoids and simplicial sets.
Let $\bC$ be a category.
\begin{ddd}
A \emph{marking on $\bC$} is the choice of a wide
{subgroupoid} $\bC^{+}$ of the underlying groupoid of $\bC$.
\end{ddd}
\begin{ex}In this example, we name the two extreme cases of markings.
On the one hand, we can consider the minimal marking $\bC^{+}_{min}$ given by the identity morphisms of $\bC$. On the other hand, we have the maximal marking $\bC^{+}_{max}$ given by the underlying groupoid of $\bC$.
\end{ex}
\begin{ddd}\label{erboi33w23f234f2f}A \emph{marked category} is a pair $(\bC,\bC^{+})$ of a category and a marking.
A morphism between marked categories $(\bC,\bC^{+})\to (\bD,\bD^{+})$ is a functor
$\bC\to \bD$ which sends $\bC^{+}$ to $\bD^{+}$.
\end{ddd}
We let ${\mathrm{ma}thbf{Cat}}^{+}$ denote the category of marked small categories and morphisms between marked categories. We have two functors
\begin{equation}
\label{eq_functor_Fplus}
\cF_{+} \colon {\mathrm{ma}thbf{Cat}}^{+}\to {\mathrm{ma}thbf{Cat}}\ , \quad (\bC,\bC^{+})\mathrm{ma}psto \bC
\end{equation}
and
\[ (-)^{+} \colon {\mathrm{ma}thbf{Cat}}^{+}\to \mathrm{ma}thbf{Groupoids}\ , \quad (\bC,\bC^{+})\mathrm{ma}psto \bC^{+}\ .\]
The functor $\cF_{+}$ (which forgets the markings) fits into adjunctions
\[ \mathrm{ma}thrm{mi} \colon {\mathrm{ma}thbf{Cat}}\leftrightarrows {\mathrm{ma}thbf{Cat}}^{+} \colon \cF_{+}\ , \quad \cF_{+} \colon {\mathrm{ma}thbf{Cat}}^{+}\leftrightarrows{\mathrm{ma}thbf{Cat}} \colon \mathrm{ma}\ ,\]
where the functors $\mathrm{ma}thrm{mi}$ (mark identities) and $\mathrm{ma}$ (mark all isomorphisms) are given (on objects) by
\[ \mathrm{ma}thrm{mi}(\bC):=(\bC,\bC^{+}_{min}) \ ,\quad \mathrm{ma}(\bC):=(\bC,\bC^{+}_{max})\ , \]
and their definition on morphisms as well as the unit and counit of the adjunctions are the obvious ones.
\begin{ddd} \label{fbioerwwef32r23r23r}
A \emph{pre-additive category} is a category which is enriched over the category of abelian groups.
A \emph{morphism} between pre-additive categories is a functor which is compatible with the enrichment.
\end{ddd}
We let $\mathrm{ma}thbf{preAdd}$ denote the category of small pre-additive categories and functors which are compatible with the enrichment.
The forgetful functor
(forgetting the enrichment) is the right-adjoint of an adjunction
\begin{equation}
\label{eq_functor_FZ}
\mathrm{Lin}_{\Z} \colon {\mathrm{ma}thbf{Cat}}\leftrightarrows\mathrm{ma}thbf{preAdd} \colon \cF_{\Z}
\end{equation}
whose left-adjoint is called the linearization functor.
For a pre-additive category ${\mathrm{ma}thbf{A}}$ we call $\cF_{\Z}({\mathrm{ma}thbf{A}})$ the underlying category.
\begin{rem} \label{vgeroihirovervbervevev}Let
${\mathrm{ma}thbf{A}}$ be a pre-additive category. If $A$ and $B$ are two objects of ${\mathrm{ma}thbf{A}}$ such that the product $A\times B$ and the coproduct $A\sqcup B$ exist, then the canonical morphism
$ A\sqcup B\to A\times B$ induced by the maps $(\id_{A},0) \colon A\to A\times B$ and $(0,\id_{B}) \colon B\to A\times B$ is an isomorphism. In this case we call the product or coproduct also the sum of $A$ and $B$ and use the notation $A\oplus B$.
\end{rem}
\begin{ddd} \label{rbgeoirgergergergre2r4}
We define the \emph{category of marked pre-additive categories} $\mathrm{ma}thbf{preAdd}^{+}$
as the pull-back (in $1$-categories)
\[\xymatrix{ \mathrm{ma}thbf{preAdd}^{+}\ar[r]\ar[d]&{\mathrm{ma}thbf{Cat}}^{+}\ar[d]^-{\cF_{+}}\\
\mathrm{ma}thbf{preAdd}\ar[r]^-{\cF_{\Z}}&{\mathrm{ma}thbf{Cat}}}\]
{with the functors $\cF_{+}$ and $\cF_{\Z}$ from \eqref{eq_functor_Fplus} and \eqref{eq_functor_FZ}.}
\end{ddd}
Thus a marked pre-additive category is a pair $({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{A}}^{+})$ of a pre-additive category ${\mathrm{ma}thbf{A}}$ and {a wide subgroupoid} ${\mathrm{ma}thbf{A}}^{+}$ of the underlying groupoid of ${\mathrm{ma}thbf{A}}$, and a morphism of marked pre-additive categories $({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{A}}^{+})\to ({\mathrm{ma}thbf{B}},{\mathrm{ma}thbf{B}}^{+})$ is a functor ${\mathrm{ma}thbf{A}}\to {\mathrm{ma}thbf{B}}$ which is compatible with the enrichment and sends ${\mathrm{ma}thbf{A}}^{+}$ to ${\mathrm{ma}thbf{B}}^{+}$.
We will denote the vertical arrow forgetting the markings, i.e., taking the underlying pre-additive category, also by $\cF_{+}$. We have adjunctions
\begin{equation}\label{f3rfkj34nfkjf3f3f3f43f}
\mathrm{ma}thrm{mi} \colon \mathrm{ma}thbf{preAdd}\leftrightarrows \mathrm{ma}thbf{preAdd}^{+} \colon \cF_{+}\ , \quad \cF_{+} \colon \mathrm{ma}thbf{preAdd}^{+}\leftrightarrows \mathrm{ma}thbf{preAdd} \colon \mathrm{ma}\ ,
\end{equation}
and
\[ \mathrm{Lin}_{\Z} \colon {\mathrm{ma}thbf{Cat}}^{+}\leftrightarrows \mathrm{ma}thbf{preAdd}^{+} \colon \cF_{\Z}\ .\] The unit of the last adjunction provides an inclusion of categories
$\bC \to \cF_{\Z}(\mathrm{Lin}_{\Z}( \bC))$, and the subcategory of marked isomorphisms in
$\mathrm{Lin}_{\Z}(\bC )$ is exactly the image of $\bC^{+}$ under this inclusion.
\begin{rem}
Note that a sum of two addable marked isomorphisms in a marked pre-additive category need not be marked. So in general the subcategory of marked isomorphisms of a marked pre-additive category is not pre-additive.
\end{rem}
From now one we will usually shorten the notation and denote marked categories just by one symbol $\bC$ instead of $(\bC,\bC^{+})$.
The categories ${\mathrm{ma}thbf{Cat}}$, $ {\mathrm{ma}thbf{Cat}}^{+}$ $\mathrm{ma}thbf{preAdd}$ and $\mathrm{ma}thbf{preAdd}^{+}$ are enriched over themselves. For categories ${\mathrm{ma}thbf{A}}$ and ${\mathrm{ma}thbf{B}}$ we let $\Fun_{{\mathrm{ma}thbf{Cat}}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$ in ${\mathrm{ma}thbf{Cat}}$ denote the category of functors from ${\mathrm{ma}thbf{A}}$ to ${\mathrm{ma}thbf{B}}$ and natural transformations.
Assume now that ${\mathrm{ma}thbf{A}}$ and ${\mathrm{ma}thbf{B}}$ are marked. Then we can consider the
functor category $\Fun_{{\mathrm{ma}thbf{Cat}}^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$ in ${\mathrm{ma}thbf{Cat}}$ of functors preserving the marked subcategories and natural transformations.
\begin{ddd} \label{gwiogefwerfwefwefewfw}
We define the \emph{marked} functor category $\Fun_{{\mathrm{ma}thbf{Cat}}^+}^{+}({\mathrm{ma}thbf{A}}, {\mathrm{ma}thbf{B}})$ in ${\mathrm{ma}thbf{Cat}}^{+}$
by marking those natural transformations $(u_{a})_{a\in {\mathrm{ma}thbf{A}}}$ of $\Fun_{{\mathrm{ma}thbf{Cat}}^+}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$ for which $u_{a}$ is a marked isomorphism for every $a$ in ${\mathrm{ma}thbf{A}}$.
\end{ddd}
Similarly, assume that ${\mathrm{ma}thbf{A}}$ and ${\mathrm{ma}thbf{B}}$ are pre-additive categories.
Then
the category of (enrichment preserving) functors $\Fun_{\mathrm{ma}thbf{preAdd}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$ and natural transformations is itself naturally enriched in abelian groups,
and hence is an object of $\mathrm{ma}thbf{preAdd}$. If ${\mathrm{ma}thbf{A}}$ and ${\mathrm{ma}thbf{B}}$ are marked pre-additive categories, then the same applies to the category $\Fun_{\mathrm{ma}thbf{preAdd}^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$ of functors preserving the enrichment and the marked subcategories.
\begin{ddd} \label{gwiogefwerfwefwefewfwadd}
We define the \emph{marked} functor category $\Fun^{+}_{\mathrm{ma}thbf{preAdd}^{+}}({\mathrm{ma}thbf{A}}, {\mathrm{ma}thbf{B}})$ in $\mathrm{ma}thbf{preAdd}^{+}$
by marking those natural transformations $(u_{a})_{a\in {\mathrm{ma}thbf{A}}}$ of $\Fun_{\mathrm{ma}thbf{preAdd}^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$ for which $u_{a}$ is marked for every $a$ in ${\mathrm{ma}thbf{A}}$.
\end{ddd}
\begin{rem} This is a remark about notation.
For $\cC={\mathrm{ma}thbf{Cat}}$ or $\cC=\mathrm{ma}thbf{preAdd}$ and ${\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}}$ in $\cC^{+}$ we can consider the functor category
$\Fun_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$ in $\cC$. The $+$-sign indicates that we only consider functors which preserve marked isomorphisms. In general we have a full inclusion of categories
$\Fun_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})\mathrm{ma}thrm{sub}seteq \Fun_{\cC}(\cF_{+}({\mathrm{ma}thbf{A}}),\cF_{+}({\mathrm{ma}thbf{B}}))$.
The upper index $+$ in $\Fun^{+}_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$ indicates that we consider
the functor category as a marked category, i.e., as an object of $\cC^{+}$. The symbol $\Fun^{+}_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})^{+}$ denotes the subcategory of marked isomorphisms. In our longer pair notation for marked objects we thus have
\[\Fun^{+}_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})=(\Fun_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}}),\Fun^{+}_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})^{+})\ .\qedhere\]
\end{rem}
We now introduce enrichments of the categories over simplicial sets using the nerve functor
\[ \Nerve \colon {\mathrm{ma}thbf{Cat}}\to \sSet\ .\]
\begin{rem} The usual enrichment of ${\mathrm{ma}thbf{Cat}}$ over simplicial sets is given by setting
\[ \Map^{\mathrm{ma}thrm{standard}}_{{\mathrm{ma}thbf{Cat}}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}}) :=\Nerve(\Fun_{{\mathrm{ma}thbf{Cat}}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}}))\ .\]
In the present paper we will consider a different enrichment which only takes the invertible natural transformations between functors into account.
\end{rem}
For the rest of this section $\cC$ serves as a placeholder for either ${\mathrm{ma}thbf{Cat}}$ or $\mathrm{ma}thbf{preAdd}$.
We start with marked categories ${\mathrm{ma}thbf{A}}$ and ${\mathrm{ma}thbf{B}}$ in $\cC^{+}$.
\begin{ddd}\label{ergeiorge4tgergregergreg}
We define
\[\Map_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}}):=\Nerve(\Fun^{+}_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})^{+})\ .\qedhere\]
\end{ddd}
In other words, $\Map_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$ is the nerve of the groupoid of marked isomorphisms in $\Fun^{+}_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$.
Let now ${\mathrm{ma}thbf{A}}$ and ${\mathrm{ma}thbf{B}}$ be categories in $\cC$
\begin{ddd}
We define
\[\Map_{\cC}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}}):=\Nerve(\Fun^{+}_{\cC^{+}}(\mathrm{ma}({\mathrm{ma}thbf{A}}),\mathrm{ma}({\mathrm{ma}thbf{B}}))^{+})\ .\qedhere\]
\end{ddd}
In other words, $\Map_{\cC}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$ is the nerve of the groupoid of isomorphisms in $\Fun_{\cC}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})$.
The composition of functors and natural transformations naturally induces the composition law for these mapping spaces. In this way we have turned the categories ${\mathrm{ma}thbf{Cat}}$, ${\mathrm{ma}thbf{Cat}}^{+}$, $\mathrm{ma}thbf{preAdd}$ and $\mathrm{ma}thbf{preAdd}^{+}$ into simplicially enriched categories.
\begin{rem}
Since the mapping spaces are nerves of groupoids they are Kan complexes. Therefore these simplicial categories are fibrant in Bergner's model structure on simplicial categories \cite{bergner}. \end{rem}
\mathrm{ma}thrm{sub}section{The model categories \texorpdfstring{$\mathrm{ma}thbf{preAdd}^{+}$}{preAdd-plus} and \texorpdfstring{${\mathrm{ma}thbf{Cat}}^{+}$}{Cat-plus}}
\label{sec:marked}
In this section we describe the model category structures on the categories ${\mathrm{ma}thbf{Cat}}$, ${\mathrm{ma}thbf{Cat}}^{+}$, $\mathrm{ma}thbf{preAdd}$ and $\mathrm{ma}thbf{preAdd}^{+}$, see \cref{gijoorgergerg}.
The main result is \cref{vgioeoerberebg}.
As before, $\cC$ serves as a placeholder for either ${\mathrm{ma}thbf{Cat}}$ or $\mathrm{ma}thbf{preAdd}$.
We first introduce the data for the model category structure on $\cC$ or $\cC^{+}$.
\begin{ddd}\label{gijoorgergerg}\mbox{}\begin{enumerate} \item
A morphism $f \colon {\mathrm{ma}thbf{A}}\to {\mathrm{ma}thbf{B}}$ in $\cC$ (or $\cC^{+}$) is a weak equivalence if it admits an inverse $g \colon {\mathrm{ma}thbf{B}}\to {\mathrm{ma}thbf{A}}$ up to isomorphisms (or marked isomorphisms).
\item
A morphism in $\cC $ (or $\cC^{+}$) is called a cofibration if it is injective on objects.
\item
A morphism in $\cC $ (or $\cC^{+}$) is called a fibration, if it has the right-lifting property for trivial cofibrations. \qedhere\end{enumerate}
\end{ddd}
The following is the main theorem of the present section.
\begin{theorem}\label{vgioeoerberebg}
The simplicial category $\cC$ (or $\cC^{+}$) with the weak equivalences, cofibrations and fibrations as in \cref{gijoorgergerg} is a simplicial
and combinatorial model category. \end{theorem}
\begin{proof}
We refer to \cite[Def.~1.1.3 and Def.~1.1.4]{hovey} or \cite[Def.~7.1.3]{MR1944041} for the axioms (M1)-(M5) for a model category and \cite[Def.~9.1.6]{MR1944041} for the additional axioms {(M6) and (M7)} for a simplicial model category.
For the Definition of cofibrant generation we refer to \cite[Def.~2.1.17]{hovey} or \cite[Def.~11.1.2]{MR1944041}.
Finally, a model category is called combinatorial if it is cofibrantly generated and locally presentable \cite{Dugger:aa},
\cite[Def.~A.2.6.1]{htt}.
\begin{enumerate}
\item In \cref{wrfwweffewf} we verify completeness and cocompleteness (M1).
\item Weak equivalences have the two-out-of-three property (M2) by \cref{fwiowowfefwefwef334}.
\item Weak equivalences, cofibrations and fibrations are closed under retracts (M3) by \cref{wfeoifjowefwefewfw}.
\item Lifting along trivial cofibrations holds by definition. Lifting along trivial fibrations (M4) holds by \cref{fweiowefwefewffewf}.
\item Existence of factorizations (M5) follows from {\cref{cor:factorization1} and \cref{cor:factorization2}}.
\item Simplicial enrichment (M6) is shown by \cref{efiuwehfiwefew23r23r32r}, and the pushout-product axiom (M7) is verified in \cref{foifjoewfefwefwef}.
\item The category is cofibrantly generated by \cref{fiowejofwefewfewf}.
\item It is locally presentable by \cref{ewdfoijfowefewfewfw}.\qedhere
\end{enumerate}
\end{proof}
\begin{rem}
The case of ${\mathrm{ma}thbf{Cat}}$ is well-known.
In the following, in order to avoid case distinctions, we will only consider the marked case in full detail.
In fact, the functor $\mathrm{ma} \colon \cC\to \cC^{+}$ is the inclusion of a full simplicial subcategory and
the model category structure is inherited. We will indicate the necessary modifications
(e.g, list the generating (trivial) cofibrations or the generators of the category in the unmarked case) in remarks at the appropriate places.
\end{rem}
Completeness and cocompleteness in the following means admitting limits and colimits with indexing categories in the universe ${\mathrm{ma}thcal{U}}$, see \eqref{rtboihgiuf4f43f34f3f3}.
\begin{prop}\label{wrfwweffewf}
The category $\cC^{+}$ is complete and cocomplete.
\end{prop}
\begin{proof}
We will deduce the marked case from the unmarked one and use as a known fact that $ \cC$ is complete and cocomplete, see \cite[Prop.~5.1.7]{Borceux} for cocompleteness for $\cC={\mathrm{ma}thbf{Cat}}$.
Let $I$ be a category in ${\mathrm{ma}thcal{U}}$ (see \eqref{rtboihgiuf4f43f34f3f3}) and $X \colon I\to \cC^{+}$ be a diagram.
We form the object $\colim_{I} \cF_{+}(X)$ of $\cC$.
We have a canonical morphism $\cF_{+}(X)\to \underline{\colim_{I} \cF_{+}(X)}$, where $\underline{-}$ denotes the constant $I$-object. We define the marked subcategory of $\colim_{I} \cF_{+}(X)$ as the subcatgeory generated
by the images of marked isomorphisms under the canonical functors $\cF_{+}(X(i))\to\colim_{I} \cF_{+}(X) $ for all $i$ in $I$ and denote the resulting object of $\cC^{+}$ by $Y$.
We claim that the resulting morphism $X\to \underline{Y}$ represents the colimit of the diagram $X$.
If $Y\to T$ is a morphism in $ \cC^{+}$, then
the induced functor
$\cF_{+}(X)\to \underline{\cF_{+}(Y)}\to \underline{\cF_{+}(T)}$ preserves marked isomorphisms, i.e., refines to a morphism in $(\cC^{+})^{I}$.
Vice versa, if
$X\to \underline{T}$ is a morphism in $ (\cC^{+})^{I}$,
then we get an induced morphism
$ \cF_{+}(Y)\to \underline{\cF_{+}(T)}$.
It preserves marked isomorphisms and therefore refines to a morphism in $\cC^{+}$.
This shows that $\cC^{+}$ is cocomplete.
Let $X \colon I\to \cC^{+}$ again be a diagram.
We form the object $\lim_{I} \cF_{+}(X)$ of $\cC$.
We have a canonical morphism $ \underline{\lim_{I} \cF_{+}(X)} \to \cF_{+}(X)$.
We mark all isomorphisms in $\lim_{I} \cF_{+}(X)$ whose evaluations at every $i$ in $I$ are marked isomorphisms in $X(i)$. In this way we define an object $Y$ of $\cC^{+}$.
We claim that the resulting morphism $ \underline{Y}\to X$ represents the limit of the diagram $X$.
If $T\to Y$ is a morphism in $\cC^{+}$, then the induced
$\underline{\cF_{+}(T)} \to \underline{\cF_{+}(Y)} \to \cF_{+}(X)$ refines to a morphism in $(\cC^{+})^{I}$.
Vice versa, if
$\underline{T}\to X$ is a morphism in $(\cC^{+})^{I}$, then we get an induced morphism
$\underline{\cF_{+}(T)}\to \cF_{+}(Y)$ which again refines to a morphism in $\cC^{+}$.
This shows that $\cC^{+}$ is complete.
\end{proof}
We let
\[ \cF_{\mathrm{ma}thbf{All}} \colon \cC^{+}\to {\mathrm{ma}thbf{Cat}}\]
denote the functor which takes the underlying category, i.e., which forgets markings and enrichments (in the case of $\mathrm{ma}thbf{preAdd}^{+}$).
Recall further that we have the functor
\[ (-)^{+} \colon \cC^{+}\to \mathrm{ma}thbf{Groupoids} \]
taking the groupoid of marked isomorphisms.
Let $f\colon{\mathrm{ma}thbf{A}}\to{\mathrm{ma}thbf{B}}$ be a morphism in $\cC^{+}$.\begin{lem}
\label{lem:markedequivs}The following are equivalent. \begin{enumerate}
\item $f$ is a weak equivalence.
\item $\cF_{\mathrm{ma}thbf{All}}(f)$ and $f^{+}$ are equivalences in ${\mathrm{ma}thbf{Cat}}$ and $\mathrm{ma}thbf{Groupoids}$, respectively.
\end{enumerate}
\end{lem}
\begin{proof}
If $f$ is a weak equivalence, then by \cref{gijoorgergerg} there exists an inverse $g$ up to marked isomorphism.
Then $\cF_{\mathrm{ma}thbf{All}}(g)$ and $g^{+}$ are the required inverse equivalences of $\cF_{\mathrm{ma}thbf{All}}(f)$ and $f^{+}$.
We now show the converse. We can choose an inverse equivalence $g^+ \colon {\mathrm{ma}thbf{B}}^+ \to {\mathrm{ma}thbf{A}}^+$ of $f^{+}$ and a natural isomorphism $u\colon \id_{{\mathrm{ma}thbf{B}}^+} \xrightarrow{\cong} f^+g^+$. We then define a functor $g \colon {\mathrm{ma}thbf{B}} \to {\mathrm{ma}thbf{A}}$ as follows. \begin{enumerate} \item
On objects: For an object $B$ of ${\mathrm{ma}thbf{B}}$ we set $g(B) := g^+(B)$. \item On morphisms: On the set of morphisms $\mathrm{ma}thbf{Ho}m_{\mathrm{ma}thbf{B}}(B,B')$, we define $g$ as the composition
\[ \mathrm{ma}thbf{Ho}m_{\mathrm{ma}thbf{B}}(B,B') \xrightarrow{\cong} \mathrm{ma}thbf{Ho}m_{\mathrm{ma}thbf{B}}(fg(B),fg(B')) \xleftarrow{\cong} \mathrm{ma}thbf{Ho}m_{\mathrm{ma}thbf{A}}(g(B),g(B'))\ .\]
There the first isomorphism is induced by $u$ and the second isomorphism employs the fact that $\cF_{\mathrm{ma}thbf{All}}(f)$ is an equivalence. Since $u$ is given by marked isomorphisms and $ f$ induces a bijection on marked isomorphisms, this map also preserves marked isomorphisms. \end{enumerate}
Then $g$ is the required inverse of $f$ up to marked isomorphism. The natural transformations are $u$ and $v\colon \id_{{\mathrm{ma}thbf{A}}}\to gf$ determined by $f(v_A)=u_{f(A)}$. Note that both are by marked isomorphisms since $f$ is a bijection on marked isomorphisms.
\end{proof}
Note that a weak equivalence not only preserves marked isomorphisms, but also detects them.
Let $\bC$ and $\bD$ be two objects of $\cC^{+}$ and $a \colon \bC\to \bD$ be a morphism. \begin{ddd}\label{wfiojowefewfewfew} The morphism
$a$ is called a {\emph{marked isofibration}}, if for every object $d$ of $ \bD$, every object $c$ of $\bC$ and every marked isomorphism $u \colon a(c)\to d$ in $\bD$ there exists a marked isomorphism $v \colon c\to c^{\prime}$ in $\bC$ such that $a(v)=u$.
\end{ddd}
\begin{ex} \label{efuweifo24frergergreg}
The object classifier in ${\mathrm{ma}thbf{Cat}}$ is the category $\Delta_{{\mathrm{ma}thbf{Cat}}}^{0}$ with one object $*$ and one morphism $\id_{*}$.
The object classifier in ${\mathrm{ma}thbf{Cat}}^{+}$ is given by $\Delta^{0}_{{\mathrm{ma}thbf{Cat}}^{+}}:=\mathrm{ma}thrm{mi}(\Delta_{{\mathrm{ma}thbf{Cat}}}^{0})$.
Furthermore, the object classifiers in $\mathrm{ma}thbf{preAdd}$ and $\mathrm{ma}thbf{preAdd}^{+}$ are given by
$\Delta_{\mathrm{ma}thbf{preAdd}}^{0}:=\mathrm{Lin}_{\Z}(\Delta_{{\mathrm{ma}thbf{Cat}}}^{0})$ and
$\Delta_{\mathrm{ma}thbf{preAdd}^{+}}^{0}:=\mathrm{Lin}_{\Z}(\Delta_{{\mathrm{ma}thbf{Cat}}^{+}}^{0})$, respectively.
The morphism classifier in ${\mathrm{ma}thbf{Cat}}$ is the category $\Delta_{{\mathrm{ma}thbf{Cat}}}^{1}$ with two objects $0$ and $1$, and one non-identity morphism $0\to 1$.
The morphism classifier in ${\mathrm{ma}thbf{Cat}}^{+}$ is given by $\Delta^{1}_{{\mathrm{ma}thbf{Cat}}^{+}}:=\mathrm{ma}thrm{mi}(\Delta_{{\mathrm{ma}thbf{Cat}}}^{1})$.
Furthermore, the morphism classifiers in $\mathrm{ma}thbf{preAdd}$ and $\mathrm{ma}thbf{preAdd}^{+}$ are given by
$\Delta_{\mathrm{ma}thbf{preAdd}}^{1}:=\mathrm{Lin}_{\Z}(\Delta_{{\mathrm{ma}thbf{Cat}}}^{1})$ and
$\Delta_{\mathrm{ma}thbf{preAdd}^{+}}^{1}:=\mathrm{Lin}_{\Z}(\Delta_{{\mathrm{ma}thbf{Cat}}^{+}}^{1})$, respectively.
The invertible morphism classifier in ${\mathrm{ma}thbf{Cat}}$ is the category $\mathbb{I}_{{\mathrm{ma}thbf{Cat}}}$ with two objects $0$ and $1$, and non-identity morphisms $0\to 1$ and its inverse $1\to 0$.
The invertible morphism classifier in ${\mathrm{ma}thbf{Cat}}^{+}$ is given by $\mathbb{I}_{{\mathrm{ma}thbf{Cat}}^{+}}:=\mathrm{ma}thrm{mi}(\mathbb{I}_{{\mathrm{ma}thbf{Cat}}})$.
Furthermore, the invertible morphism classifiers in $\mathrm{ma}thbf{preAdd}$ and $\mathrm{ma}thbf{preAdd}^{+}$ are given by
$\mathbb{I}_{\mathrm{ma}thbf{preAdd}} :=\mathrm{Lin}_{\Z}(\mathbb{I}_{{\mathrm{ma}thbf{Cat}}})$ and
$\mathbb{I}_{\mathrm{ma}thbf{preAdd}^{+}} :=\mathrm{Lin}_{\Z}(\mathbb{I}_{{\mathrm{ma}thbf{Cat}}^{+}})$, respectively.
Finally, the marked isomorphism classifier in ${\mathrm{ma}thbf{Cat}}^{+}$ is given by $\mathbb{I}_{{\mathrm{ma}thbf{Cat}}^{+}}^{+}:=\mathrm{ma}(\mathbb{I}_{{\mathrm{ma}thbf{Cat}}})$, and the one in $\mathrm{ma}thbf{preAdd}^{+}$ is given by
$\mathbb{I}^{+}_{\mathrm{ma}thbf{preAdd}^{+}} :=\mathrm{Lin}_{\Z}(\mathbb{I}^{+}_{{\mathrm{ma}thbf{Cat}}^{+}})$.
\end{ex}
We have the following statement about morphisms in $\cC^{+}$.
\begin{lem}\label{hgionhbaiovnioaghiphn}\mbox{}
\begin{enumerate}
\item Trivial fibrations are surjective on objects.
\item Weak equivalences which are surjective on objects have the right lifting property with respect to all cofibrations.
\end{enumerate}
In particular, a weak equivalence is a trivial fibration if and only if it is surjective on objects.
\end{lem}
\begin{proof}
Let $f \colon \bC \to \bD$ be a trivial fibration and let $D$ in $\bD$ be an object. Since $f$ is a weak equivalence, there exists an object $C$ in $\bC$ and an isomorphism $d \colon f(C) \xrightarrow{\cong} D$.
Consider the commutative diagram
\[\xymatrix{
\Delta^0_{\cC^{+}}\ar[r]^-{C}\ar[d] & \bC\ar[d]^-{f} \\
\mathbb{I}_{\cC^{+}}\ar[r]^-{d} & \bD
}\]
Since $\Delta^0_{\cC^{+}} \to \mathbb{I}_{\cC^{+}}$ is a trivial cofibration, $d$ admits a lift $c$ to $\bC$ whose codomain is a preimage of $D$.
Let now $f \colon \bC \to \bD$ be a weak equivalence which is surjective on objects. Consider a commutative diagram
\[\xymatrix{
{\mathrm{ma}thbf{A}}\ar[r]^-{\alpha}\ar[d]_-{i} & \bC\ar[d]^-{f} \\
{\mathrm{ma}thbf{B}}\ar[r]^-{\beta} & \bD
}\]
in which $i$ is a cofibration.
We first define the lift $\gamma$ of $\beta$ on objects.
If $B$ in ${\mathrm{ma}thbf{B}}$ lies in the image of $i$, there exists a unique object $A$ in ${\mathrm{ma}thbf{A}}$ with $i(A) = B$, and we set $\gamma(B) = \alpha(A)$. Otherwise, pick any $C$ in $\bC$ such that $f(C) = \beta(B)$ and set $\gamma(B) = C$.
For a morphism $b$ in ${\mathrm{ma}thbf{B}}$,
define $\gamma(b)$ as the unique preimage of $\beta(b)$ under $f$.
Then $f \circ \gamma = \beta$ holds by definition, and $\gamma \circ i = \alpha$ also follows easily from the fact that $f$ is faithful.
\end{proof}
\begin{lem}\label{fweiojweoiffewfwefwef}
A morphism in $\cC^{+}$ is a marked isofibration if and only if it has the right lifting property with respect to the morphism
\[ \Delta^{0}_{\cC^{+}} \xrightarrow{0} \mathbb{I}^{+}_{\cC^{+}} \] classifing the object $0$ of $ \mathbb{I}^{+}_{\cC^{+}}$.
\end{lem}
\begin{proof}
In view of the universal properties of $\Delta^{0}_{\cC^{+}}$ and $\mathbb{I}_{\cC^{+}}^{+}$ this is just a reformulation of \cref{wfiojowefewfewfew}.
\end{proof}
Since $\Delta^{0}_{\cC^{+}} \xrightarrow{0} \mathbb{I}^{+}_{\cC^{+}}$ is a trivial cofibration we conclude that
fibrations are marked isofibrations.
\begin{prop}\label{fiuehfieufwfewfwef}
The marked isofibrations in $\cC^{+}$ have the right lifting property with respect to trivial cofibrations.
\end{prop}
\begin{proof}
We consider a diagram
\[\xymatrix{{\mathrm{ma}thbf{A}}\ar[r]^{\alpha}\ar[d]^{i}&\bC\ar[d]^{f}\\{\mathrm{ma}thbf{B}}\ar@{.>}[ur]^{\ell}\ar[r]^{\beta}&\bD} \]
in $\cC^{+}$,
where $f$ is a marked isofibration and $i$ is a trivial cofibration.
We can find now a morphism $j \colon {\mathrm{ma}thbf{B}}\to {\mathrm{ma}thbf{A}}$ such that $j\circ i=\id_{{\mathrm{ma}thbf{A}}}$ and such that there is a marked isomorphism $u \colon i\circ j\to \id_{{\mathrm{ma}thbf{B}}}$ which in addition satisfies $u\circ i=\id_{i}$.
{On objects we define $\ell$ as follows:
For every object $B$ of ${\mathrm{ma}thbf{B}}$ we get a marked isomorphism \[\beta(u_{B}) \colon f(\alpha(j(B))=\beta(i(j(B)))\to \beta(B)\ .\] Using that $f$ is a marked isofibration we choose a marked isomorphism $v_B \colon \alpha(j(B))\to C$ such that $f(v_B)=\beta(u_{B})$. If $B$ is in the image of $i$, we can and will choose $v_B$ to be the identity. We then set $\ell(B):=C$. This makes both triangles commute.}
We now define the lift $\ell$ on a morphism $\phi \colon B\to B^{\prime}$ by
\[\ell(\phi):=v_{B'}\circ \alpha(j(\phi))\circ v_B^{-1}\ .\]
One can check that then both triangles commutes and that this really defines a functor.
One further checks that $\ell$ is a morphism of marked categories (and preserves the enrichment in the case of pre-additive categories). Here we use that $i$ detects marked isomorphisms.
\end{proof}
\begin{kor}\label{wfeiweiofewfewfewf}
{The notions of marked isofibration and fibration in $\cC^{+}$ coincide.}
\end{kor}
\begin{rem}\label{fiowefwefewfewf}
We note that all objects in $\cC^{+}$ are fibrant and cofibrant.
Consequently, the model category $\cC^{+}$ is proper by \cite[Cor.~13.1.3]{MR1944041}
\end{rem}
\begin{prop}\label{fweiowefwefewffewf}
The cofibrations in $\cC^{+}$ have the left-lifting property with respect to trivial fibrations.
\end{prop}
\begin{proof}
We consider a diagram
\[\xymatrix{{\mathrm{ma}thbf{A}}\ar[r]^{\alpha}\ar[d]^{i}&\bC\ar[d]^{f}\\{\mathrm{ma}thbf{B}}\ar@{.>}[ur]^{\ell}\ar[r]^{\beta}&\bD} \]
in $\cC^{+}$,
where $f$ is a trivial fibration and $i$ is a cofibration.
Since the map $i$ is injective on objects and the morphism $f$ is surjective on objects by \cref{hgionhbaiovnioaghiphn},
we can find a lift $\ell$ on the level of objects. Let now $B,B^{\prime}$ be objects in ${\mathrm{ma}thbf{B}}$.
Since $f$ is fully faithful we have a bijection
\[\xymatrix{\mathrm{ma}thbf{Ho}m_{\bC}(\ell(B),\ell(B^{\prime})) \ar[r]^f_\cong&\mathrm{ma}thbf{Ho}m_{\bD}(\beta(B),\beta(B^{\prime}) )} \ .\]
We can therefore define $\ell$ on $\mathrm{ma}thbf{Ho}m_{{\mathrm{ma}thbf{B}}}(B,B^{\prime})$ by
\[ \mathrm{ma}thbf{Ho}m_{{\mathrm{ma}thbf{B}}}(B,B^{\prime})\xrightarrow{\beta} \mathrm{ma}thbf{Ho}m_{\bD}(\beta(B),\beta(B^{\prime}))\cong \mathrm{ma}thbf{Ho}m_{\bC}(\ell(B),\ell(B^{\prime}))\ .\]
Since $f$ detects marked isomorphisms, $\ell$ preserves them.
The lower triangle commutes by construction. One can furthermore check that the upper triangle commutes. Finally one checks that this really defines a functor.
\end{proof}
\begin{lem}\label{fwiowowfefwefwef334}
The weak equivalences in $\cC^{+}$ satisfy the two-out-of-three axiom.
\end{lem}
\begin{proof}
It is clear that the composition of weak equivalences is a weak equivalence.
Assume that $f \colon {\mathrm{ma}thbf{A}}\to {\mathrm{ma}thbf{B}}$ and $g \colon {\mathrm{ma}thbf{B}}\to \bC$ are morphisms such that $f$ and $g\circ f$ are weak equivalences. Then we must show that $g$ is a weak equivalence. Let
$m \colon {\mathrm{ma}thbf{B}}\to {\mathrm{ma}thbf{A}}$ and $n \colon \bC\to {\mathrm{ma}thbf{A}}$ be inverse functors and let
$u \colon m\circ f\to \id_{{\mathrm{ma}thbf{A}}}$, $v \colon f\circ m\to \id_{{\mathrm{ma}thbf{B}}}$,
$x \colon n\circ g\circ f\to \id_{{\mathrm{ma}thbf{A}}}$ and $y \colon g\circ f\circ n\to \id_{\bC}$ be the corresponding marked
isomorphisms.
Then we consider the functor
$h:=f\circ n \colon \bC\to {\mathrm{ma}thbf{B}}$. We have marked isomorphisms
\[ h\circ g=f\circ n\circ g\xrightarrow{ v^{-1} } f\circ n\circ g\circ f\circ m\xrightarrow{x} f\circ m\xrightarrow{v} \id_{{\mathrm{ma}thbf{B}}}\ .\]
and
\[g\circ h=g\circ f\circ n\xrightarrow{y} \id_{\bC}\ .\]
If $g$ and $g \circ f$ are weak equivalences, the argument is analogous.
\end{proof}
\begin{prop}\label{wfeoifjowefwefewfw}
The cofibrations, fibrations, and weak equivalences in $\cC^{+}$ are closed under retracts.
\end{prop}
\begin{proof}
Since fibrations are characterized by a right lifting property they are closed under retracts.
Cofibrations are closed under retracts since a retract diagram of marked categories induces a retract diagram on the level of sets of objects, and injectivity of maps between sets is closed under retracts.
It remains to consider weak equivalences. We consider a diagram
\[\xymatrix{{\mathrm{ma}thbf{A}}\ar[r]^{i}\ar[d]^{f}&{\mathrm{ma}thbf{A}}^{\prime}\ar[r]^{p}\ar[d]^{f^{\prime}}&{\mathrm{ma}thbf{A}}\ar[d]^{f}\\{\mathrm{ma}thbf{B}}\ar[r]^{j}&{\mathrm{ma}thbf{B}}^{\prime}\ar[r]^{q}&{\mathrm{ma}thbf{B}}}\]
in $\cC^{+}$ with $p\circ i=\id_{{\mathrm{ma}thbf{A}}}$ and $q\circ j=\id_{{\mathrm{ma}thbf{B}}}$, and where $f^{\prime}$ is a weak equivalence. Let $g^{\prime} \colon {\mathrm{ma}thbf{B}}^{\prime}\to {\mathrm{ma}thbf{A}}^{\prime}$ be an inverse of $f^{\prime}$ up to marked isomorphism.
Then $p\circ g^{\prime}\circ j \colon {\mathrm{ma}thbf{B}}\to {\mathrm{ma}thbf{A}}$ is an inverse of $f$ up to marked isomorphism.
\end{proof}
We have finished the verification of the basic model category axioms except the existence of factorizations. This follows from considerations about the simplicial structure which we do now.
{We define a functor \begin{equation}\label{rthoir3terhtrhrth}
Q\colon\mathrm{ma}thbf{Groupoids}\to \cC^{(+)}
\end{equation}
as follows. Let $i\colon\mathrm{ma}thbf{Groupoids}\to {\mathrm{ma}thbf{Cat}}$ be the inclusion.
\begin{enumerate}
\item In the case ${\mathrm{ma}thbf{Cat}}$, we define $Q:=i$.
\item In the case ${\mathrm{ma}thbf{Cat}}^+$, we define $Q:=\mathrm{ma}\circ i$.
\item In the case $\mathrm{ma}thbf{preAdd}$, we define $Q:=\mathrm{Lin}_\Z\circ i$.
\item In the case $\mathrm{ma}thbf{preAdd}^+$, we define $Q:=\mathrm{Lin}_\Z\circ\mathrm{ma}\circ i$.
\end{enumerate}
For (marked) preadditive categories we need a further symmetric monoidal product structure $\otimes$ (which differs from the cartesian structure) on $\mathrm{ma}thbf{preAdd}^{+}$ given as follows:
\begin{enumerate}
\item (objects) The objects of ${\mathrm{ma}thbf{A}}\otimes {\mathrm{ma}thbf{B}}$ are pairs $(A,B)$ of objects $A$ in ${\mathrm{ma}thbf{A}}$ and $B$ in ${\mathrm{ma}thbf{B}}$.
\item (mophisms) The abelian group of morphisms between $(A,B)$ and $(A^{\prime},B^{\prime})$ is given by
\[ \mathrm{ma}thbf{Ho}m_{{\mathrm{ma}thbf{A}}\otimes {\mathrm{ma}thbf{B}}}((A,B),(A^{\prime},B^{\prime})):=\mathrm{ma}thbf{Ho}m_{{\mathrm{ma}thbf{A}}}(A,A^{\prime})\otimes \mathrm{ma}thbf{Ho}m_{{\mathrm{ma}thbf{B}}}(B,B^{\prime})\ .\]
The composition is defined in the obvious way.
\item(marking) We mark tensor products of marked isomorphisms.
\end{enumerate}
We refrain from writing out the remaining data (unit, unit- and associativity constraints) explicitly.}
In order to define a tensor structure of $\cC^{(+)}$ over simplicial sets, we start with a tensor structure over groupoids.
\begin{ddd}\label{defn_functor_sharp}
In the case ${\mathrm{ma}thbf{Cat}}^{(+)}$ we define the functor \[-\sharp -\colon {\mathrm{ma}thbf{Cat}}^{(+)} \times \mathrm{ma}thbf{Groupoids} \to {\mathrm{ma}thbf{Cat}}^{(+)} \ ,\quad ({\mathrm{ma}thbf{A}},G)\mathrm{ma}psto {\mathrm{ma}thbf{A}}\sharp G:={\mathrm{ma}thbf{A}}\times Q(G).\]
In the case $\mathrm{ma}thbf{preAdd}^{(+)}$ we define the functor \[-\sharp -\colon \mathrm{ma}thbf{preAdd}^{(+)} \times \mathrm{ma}thbf{Groupoids} \to \mathrm{ma}thbf{preAdd}^{(+)} \ ,\quad ({\mathrm{ma}thbf{A}},G)\mathrm{ma}psto {\mathrm{ma}thbf{A}}\sharp G:={\mathrm{ma}thbf{A}}\otimes Q(G).\qedhere\]
\end{ddd}
Let ${\mathrm{ma}thbf{B}}$ be in $\cC^{+}$. In the following lemma, we will write $\otimes$ for the product in ${\mathrm{ma}thbf{Cat}}^+$, to avoid distinguishing between ${\mathrm{ma}thbf{Cat}}^+$ and $\mathrm{ma}thbf{preAdd}^+$.
\begin{lem}\label{gioergegergreg}
We have an adjunction
\[ -\otimes {\mathrm{ma}thbf{B}} \colon \cC^{+}\leftrightarrows \cC^{+} \colon \Fun^{+}_{\cC^{+}} ({\mathrm{ma}thbf{B}} ,-)\ ,\]
where we view $\cC^+$ as enriched over $\cC^{+}$.
\end{lem}
\begin{proof} We provide an explicit description of the unit and the counit of the adjunction.
For ${\mathrm{ma}thbf{A}}$ in $\cC^+$ they are given by morphisms
\[ \eta_{{\mathrm{ma}thbf{A}}} \colon {\mathrm{ma}thbf{A}} \to \Fun_{\cC^{+}}^+({\mathrm{ma}thbf{B}},{\mathrm{ma}thbf{A}} \otimes {\mathrm{ma}thbf{B}}) \quad \text{and} \quad \epsilon_{{\mathrm{ma}thbf{A}}} \colon \Fun_{\cC^{+}}^+({\mathrm{ma}thbf{B}},{\mathrm{ma}thbf{A}}) \otimes {\mathrm{ma}thbf{B}} \to {\mathrm{ma}thbf{A}} \]
defined as follows: \begin{enumerate} \item
The morphism $\eta_{{\mathrm{ma}thbf{A}}}$ takes an object $A$ in ${\mathrm{ma}thbf{A}}$ to the functor sending an object $B$ in ${\mathrm{ma}thbf{B}}$ to $(A,B)$ and a morphism $b$ in ${\mathrm{ma}thbf{B}}$ to $(\id_A,b)$.
A morphism $a \colon A \to A'$ is sent by $\eta_{{\mathrm{ma}thbf{A}}}$ to the natural transformation $\{ (a,\id_B) \colon (A,B) \to (A',B) \}_{B \in B}$.
\item The morphism $\epsilon_A$ is induced by evaluation of functors.
\end{enumerate}
One checks that $\eta$ and $\epsilon$ are natural transformations. One furthermore
checks the triangle identities by explicit calculations.
\end{proof}
Recall that for ${\mathrm{ma}thbf{A}}$ and ${\mathrm{ma}thbf{B}}$ in $\cC^{+}$ the category $\Fun^{+}_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})^{+}$ is a groupoid.
Let $G$ be a groupoid. From \cref{gioergegergreg} we get natural isomorphisms
\begin{equation} \label{egiherigergregergr1}
\Fun_{\mathrm{ma}thbf{Groupoids}}(G, \Fun^{+}_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}})^{+})\cong \Fun_{\cC^{+}}^{+}({\mathrm{ma}thbf{A}}\sharp G,{\mathrm{ma}thbf{B}})^{{+}}\cong \Fun^{+}_{\cC^{+}}({\mathrm{ma}thbf{A}},\Fun^{+}_{\cC^{+}}(Q(G),{\mathrm{ma}thbf{B}}))^{{+}}
\end{equation}
In order to define the tensor structure of $\cC^{+}$ with simplicial sets
we consider the fundamental groupoid functor.
\begin{ddd}\label{fewiohfwiof2323r23r} The \emph{fundamental groupoid} functor $\Pi$ is defined as the left-adjoint of the adjunction
\[\Pi \colon \sSet\leftrightarrows \mathrm{ma}thbf{Groupoids} \colon \Nerve\ ,\]
where $\Nerve$ takes the nerve of a groupoid.\end{ddd}
Explicitly, the fundamental groupoid $\Pi(K)$ of a simplicial set $K$ is the groupoid freely generated by the path category $P(K)$ of $K$. The category $P(K)$ in turn is given as follows:
\begin{enumerate}
\item The objects of $P(K)$ are the $0$-simplices.
\item The morphisms of $P(K)$ are generated by the $1$-simplices of $K$ subject to the relation
$g\circ f\sim h$ if there exists a $2$-simplex $\sigma$ in $K$ with
$d_{2} \sigma=f$, $d_{0}\sigma=g$ and $d_{1}\sigma=h$.
\end{enumerate}
Using the tensor and cotensor structure with groupoids we define the corresponding structures with simplicial sets by pre-composition with the {fundamental}-groupoid functor. Recall the definition \eqref{rthoir3terhtrhrth} of $Q$.
\begin{ddd}\label{grighergregregeree}We define \emph{tensor and cotensor structures} on $\cC^{+}$ with simplicial sets by
\begin{equation*}
\cC^{+}\times \sSet\to \cC^{+}\ , \quad ({\mathrm{ma}thbf{A}},K)\mathrm{ma}psto {\mathrm{ma}thbf{A}}\sharp \Pi(K)\ .
\end{equation*}
\begin{equation*}
\sSet^{op}\times \cC^{+} \to \cC^{+}\ , \quad (K,{\mathrm{ma}thbf{B}} )\mathrm{ma}psto \Fun_{\cC^{+}}^{+}(Q(\Pi(K)),{\mathrm{ma}thbf{B}})\ .\qedhere
\end{equation*}
In order to simplify notation, we will usually write ${\mathrm{ma}thbf{A}}\sharp K$ instead of $ {\mathrm{ma}thbf{A}}\sharp \Pi(K)$
and ${\mathrm{ma}thbf{B}}^{K}$ instead of $\Fun_{\cC^{+}}^{+}(Q(\Pi(K)),{\mathrm{ma}thbf{B}})$. \end{ddd}
\cref{gioergegergreg} has the following corollary obtained by applying the nerve functor and using \cref{ergeiorge4tgergregergreg} of the simiplicial mapping sets in $\cC^{+}$.
\begin{kor}\label{efiuwehfiwefew23r23r32r}
For $K$ in $\sSet$ and ${\mathrm{ma}thbf{A}}$, ${\mathrm{ma}thbf{B}}$ in $\cC^{+}$
we have natural isomorphisms of simplicial sets
\[ \Map_{\sSet}(K,\Map_{\cC^{+}}({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{B}}))\cong \Map_{\cC^{+}}({\mathrm{ma}thbf{A}}\sharp K,{\mathrm{ma}thbf{B}})\cong \Map_{\cC^{+}}({\mathrm{ma}thbf{A}}, {\mathrm{ma}thbf{B}}^{K})\ .\]
\end{kor}
We consider a commutative square
\begin{equation}\label{2oihvwvwewv}
\xymatrix{
{\mathrm{ma}thbf{A}}\ar[r]^{i}\ar[d]_{f} & {\mathrm{ma}thbf{B}}\ar[d]^{g} \\
\bC\ar[r]^{j} & \bD
}
\end{equation}
in $\cC^{+}$.
\begin{lem}\label{lem:push.cofib.pull.fib}
\
\begin{enumerate}
\item \label{giorgergergerge99} If \eqref{2oihvwvwewv}
is a pushout and $i$ is a trivial cofibration, then $j$ is a trivial cofibration.
\item\label{giorgergergerge999} If \eqref{2oihvwvwewv}
is a pullback and $g$ is a trivial fibration, then $f$ is a trivial fibration.
\end{enumerate}
\end{lem}
\begin{proof}
We show Assertion \ref{giorgergergerge99}.
Because $i$ is a trivial cofibration, there exists a morphism $i' \colon {\mathrm{ma}thbf{B}} \to {\mathrm{ma}thbf{A}}$ such that $i' \circ i = \id_{\mathrm{ma}thbf{A}}$ and a marked isomorphism $u \colon i \circ i' \to \id_{\mathrm{ma}thbf{B}}$ satisfying $u \circ i = \id_i$.
By the universal property of the push-out, the morphism $f \circ i' \colon {\mathrm{ma}thbf{B}} \to \bC$ induces a morphism $j' \colon \bD \to \bC$ such that $j' \circ j = \id_\bC$.
In particular, $j$ is a cofibration and it remains to show that it is a weak equivalence.
Moreover, $g \circ u$ provides a marked isomorphism $j \circ f \circ i' = g \circ i \circ i' \to g$.
The functor $-\sharp \mathbb{I}_{{\mathrm{ma}thbf{Cat}}} \colon \cC^{+}\to \cC^{+}$ (see \cref{efuweifo24frergergreg} for $\mathbb{I}_{{\mathrm{ma}thbf{Cat}}}$ in $\mathrm{ma}thbf{Groupoids}$)
is a left-adjoint by \cref{gioergegergreg}. Therefore it preserves pushouts.
Using the first isomorphism in \eqref{egiherigergregergr1} and the fact that $\mathbb{I}_{{\mathrm{ma}thbf{Cat}}}$ is the morphism classifier in $\mathrm{ma}thbf{Groupoids}$,
we consider the natural transformation $g \circ u$ as a functor ${\mathrm{ma}thbf{B}} \sharp \mathbb{I}_{{\mathrm{ma}thbf{Cat}}} \to \bD$. Together with the functor $\bC\sharp \mathbb{I}_{{\mathrm{ma}thbf{Cat}}}\to \bD$ corresponding to the identity natural transformation of $j$, by the universal property of the push-out diagram $ \eqref{2oihvwvwewv} \sharp \mathbb{I}_{{\mathrm{ma}thbf{Cat}}}$ we obtain an induced functor $\bD \sharp \mathbb{I}_{{\mathrm{ma}thbf{Cat}}} \to \bD$ which provides, by a converse application of the first isomorphism in \eqref{egiherigergregergr1},
a marked isomorphism $j \circ j' \to \id_\bD$. This proves that $j$ is a weak equivalence.
The proof of Assertion \ref{giorgergergerge999} can be obtained by dualizing the proof above.\end{proof}
The following proposition verifies the pushout-product axiom (M7).
\begin{prop}\label{foifjoewfefwefwef}Let $a \colon {\mathrm{ma}thbf{A}} \to {\mathrm{ma}thbf{B}}$ be a cofibration in $\cC^{+}$ and
$i \colon X\to Y$ be a cofibration in $\sSet$.
Then \begin{equation}\label{dquihiduqwdqwd}
({\mathrm{ma}thbf{A}}\sharp Y )\sqcup_{{\mathrm{ma}thbf{A}}\sharp X } ({\mathrm{ma}thbf{B}}\sharp X )\to ({\mathrm{ma}thbf{B}}\sharp Y )
\end{equation}
is a cofibration. Moreover, if $i$ or $a$ is in addition a weak equivalence, then
\eqref{dquihiduqwdqwd} is a weak equivalence.
\end{prop}
In the proof of this proposition we use the following two lemmas.
\begin{lem} \label{reiofweiofweewf}
For ${\mathrm{ma}thbf{A}}$ in $ \cC$ the functor
\[ {\mathrm{ma}thbf{A}}\sharp - \colon\sSet\to \cC^{+} \]
preserves (trivial) cofibrations.
\end{lem}
\begin{proof}
If $i \colon X\to Y$ is a cofibration, then $\Pi(i)$ is injective on objects. This implies that
${\mathrm{ma}thbf{A}}\sharp i $ is injective on objects.
Assume now that $i $ is in addition a weak equivalence. Then
$\Pi(i)$ is an equivalence of groupoids. Let $j \colon \Pi(Y)\to \Pi(X)$ be an inverse equivalence and
$u \colon j\circ \Pi(i)\to \id_{\Pi(X)}$ and $v \colon \Pi(i)\circ j\to \id_{\Pi(Y)}$ be the corresponding isomorphisms.
Then we get a marked isomorphism
\[ {\mathrm{ma}thbf{A}}\sharp u \colon ({\mathrm{ma}thbf{A}}\sharp j)\circ ({\mathrm{ma}thbf{A}}\sharp i )\to \id_{{\mathrm{ma}thbf{A}}\sharp X }\]
by $({\mathrm{ma}thbf{A}}\sharp u)_{(a,x)}:=(\id_{a},u_{x})$.
Similarly, we have a marked isomorphism
\[ p {\mathrm{ma}thbf{A}}\sharp v \colon ({\mathrm{ma}thbf{A}}\sharp i )\circ ({\mathrm{ma}thbf{A}}\sharp j) \to \id_{{\mathrm{ma}thbf{A}}\sharp Y }\]
given by $({\mathrm{ma}thbf{A}}\sharp v)_{(a,x)}:=(\id_{a},v_{x})$.
\end{proof}
\begin{lem}\label{efwiuhfuihfewihfweiufhewfewfewfewfwefwf}
For a simplicial set $K$, the functor
\[ -\sharp K \colon \cC^{+}\to \cC^{+} \]
preserves (trivial) cofibrations.
\end{lem}
\begin{proof}
If $a \colon {\mathrm{ma}thbf{A}}\to {\mathrm{ma}thbf{B}}$ is a cofibration, then it is injective on objects. Then
$a\sharp K$ is injective on objects and hence a cofibration.
If $a$ is in addition a marked equivalence, then $a\sharp K$ is a marked equivalence, too. The argument is similar to
the corresponding part of the argument in the proof of \cref{reiofweiofweewf}.
\end{proof}
\begin{proof}[Proof of \cref{foifjoewfefwefwef}]
Consider the diagram
\[\xymatrix{{\mathrm{ma}thbf{A}}\sharp X \ar[rr]_{{\mathrm{ma}thbf{A}}\sharp i}\ar[dd]_{a\sharp X }&&{\mathrm{ma}thbf{A}}\sharp Y \ar[dd]^{a\sharp Y }\ar[dl]^-{b}\\&({\mathrm{ma}thbf{A}}\sharp Y )\sqcup_{{\mathrm{ma}thbf{A}}\sharp X } ({\mathrm{ma}thbf{B}}\sharp X )\ar[dr]^-{?}&\\{\mathrm{ma}thbf{B}}\sharp X \ar[rr]^{{\mathrm{ma}thbf{B}}\sharp i}\ar[ur]&&{\mathrm{ma}thbf{B}}\sharp Y }\]
The set of objects of the push-out is equal to the push-out of the object sets. Hence it is easy to check that $?$ is injective on objects and thus a cofibration.
Assume that $a$ is a weak equivalence.
By \cref{efwiuhfuihfewihfweiufhewfewfewfewfwefwf} the maps $a\sharp X $ and $a\sharp Y $ are trivial cofibrations.
{Since $b$ is a pushout of a trivial cofibration, it is a trivial cofibration by \cref{lem:push.cofib.pull.fib}.}
It follows from the two-out-of-three property, see \cref{fwiowowfefwefwef334}, that the morphism $?$ is a weak equivalence.
The case that $i$ is a weak equivalence is similar using \cref{reiofweiofweewf} instead of \cref{efwiuhfuihfewihfweiufhewfewfewfewfwefwf}.
\end{proof}
\begin{lem}\label{cor:factorization1}
Every morphism in $\cC^+$ can be factored into a cofibration followed by a trivial fibration.
\end{lem}
\begin{proof}
Let $a \colon {\mathrm{ma}thbf{A}} \to {\mathrm{ma}thbf{B}}$ be a morphism in $\cC^+$.
Denote by $i_1 \colon {\mathrm{ma}thbf{A}} \cong {\mathrm{ma}thbf{A}} \sharp \Delta^0 \to {\mathrm{ma}thbf{A}} \sharp \partial\Delta^1$ the morphism induced by the map classifying the vertex $1$, and let $j \colon {\mathrm{ma}thbf{A}} \sharp \partial\Delta^1 \to {\mathrm{ma}thbf{A}} \sharp \Delta^1$ be the morphism induced by the inclusion $\partial\Delta^1 \to \Delta^1$. Consider the diagram
\[\xymatrix{
{\mathrm{ma}thbf{A}}\ar[r]^-{i_0}\ar[d]_-{a} & {\mathrm{ma}thbf{A}} \sharp \partial\Delta^1\ar[r]^-{j}\ar[d] & {\mathrm{ma}thbf{A}} \sharp \Delta^1\ar[d] \\
{\mathrm{ma}thbf{B}}\ar[r]^-{e_{\mathrm{ma}thbf{B}}} & {\mathrm{ma}thbf{A}} \sqcup {\mathrm{ma}thbf{B}}\ar[r]^-{b} & Z(a)
}\]
in which $e_{\mathrm{ma}thbf{B}}$ is the canonical morphism, and in which the right square is defined to be a push-out.
Since ${\mathrm{ma}thbf{A}} \sharp \partial\Delta^1 \cong {\mathrm{ma}thbf{A}} \sqcup {\mathrm{ma}thbf{A}}$, it is easy to see that the left square is also a push-out.
Hence, the outer square is also a push-out.
By the universal property of the push-out, the composed morphism
\[ {\mathrm{ma}thbf{A}}\sharp \Delta^{1} \xrightarrow{a\sharp\Delta^{1}}{\mathrm{ma}thbf{B}}\sharp \Delta^{1} \xrightarrow{\pr_{{\mathrm{ma}thbf{B}}}} {\mathrm{ma}thbf{B}} \]
and the identity on ${\mathrm{ma}thbf{B}}$ induce a morphism $q \colon Z(a) \to {\mathrm{ma}thbf{B}}$ such that $q \circ b = \id_{\mathrm{ma}thbf{B}}$.
In particular, $q$ is surjective on objects.
Moreover, $b \circ e_{\mathrm{ma}thbf{B}}$ is a trivial cofibration by \cref{reiofweiofweewf} and \cref{lem:push.cofib.pull.fib}.\ref{giorgergergerge99}.
The two-out-of-three property (\cref{fwiowowfefwefwef334}) implies that $q$ is a weak equivalence, and hence a trivial fibration by \cref{hgionhbaiovnioaghiphn}.
Since the structure morphism $e_{\mathrm{ma}thbf{A}} \colon {\mathrm{ma}thbf{A}} \to {\mathrm{ma}thbf{A}} \sqcup {\mathrm{ma}thbf{B}}$ is a cofibration, the morphism $a' \colon {\mathrm{ma}thbf{A}} \to Z(a)$ is also a cofibration.
Regarding $Z(a)$ as the push-out of the right square, it follows from the universal property that $q \circ (b \circ e_{\mathrm{ma}thbf{A}}) = a$,
and thus provides the required factorization.
\end{proof}
Let ${\mathrm{ma}thbf{A}}$ be an object of $ \cC^{+}$. Recall the notation ${\mathrm{ma}thbf{A}}^{K}$ for a simplicial set $K$ from \cref{grighergregregeree}.
\begin{lem}\label{lem:power.fib}
{The functor
\[ {\mathrm{ma}thbf{A}}^{(-)} \colon \sSet^{op} \to \cC^{+} \]
sends (trivial) cofibrations to (trivial) fibrations.}
\end{lem}
\begin{proof}
This follows from \cref{gioergegergreg}, \cref{efwiuhfuihfewihfweiufhewfewfewfewfwefwf} and \cref{reiofweiofweewf} by explicitly checking lifting properties.
\end{proof}
\begin{lem}\label{cor:factorization2}
Every morphism in $\cC^+$ can be factored into a trivial cofibration followed by a fibration.
\end{lem}
\begin{proof}
Let $a \colon {\mathrm{ma}thbf{A}} \to {\mathrm{ma}thbf{B}}$ be a morphism in $\cC^+$.
Denote by $(\ev_0,\ev_1) \colon {\mathrm{ma}thbf{B}}^{\Delta^1} \to {\mathrm{ma}thbf{B}}^{\partial\Delta^1} \cong {\mathrm{ma}thbf{B}} \times {\mathrm{ma}thbf{B}}$ the morphism induced by the canonical inclusion $\partial\Delta^1 \to \Delta^1$.
Let $p_1 \colon {\mathrm{ma}thbf{B}}^{\partial\Delta^1} \cong {\mathrm{ma}thbf{B}} \times {\mathrm{ma}thbf{B}} \to {\mathrm{ma}thbf{B}}$ denote the projection on the second factor (which corresponds to the vertex $1$), and let $p_{\mathrm{ma}thbf{A}} \colon {\mathrm{ma}thbf{A}} \times {\mathrm{ma}thbf{B}} \to {\mathrm{ma}thbf{A}}$ be the projection.
Consider the diagram
\[\xymatrix@C=4em{
P(a)\ar[r]^-{q}\ar[d] & {\mathrm{ma}thbf{A}} \times {\mathrm{ma}thbf{B}}\ar[r]^-{p_{\mathrm{ma}thbf{A}}}\ar[d]^-{a \times \id_{\mathrm{ma}thbf{B}}} & {\mathrm{ma}thbf{A}}\ar[d]^-{a} \\
{\mathrm{ma}thbf{B}}^{\Delta^1}\ar[r]^-{(\ev_0,\ev_1)} & {\mathrm{ma}thbf{B}} \times {\mathrm{ma}thbf{B}}\ar[r]^-{p_1} & {\mathrm{ma}thbf{B}}
}\]
in which the left square is defined to be a pull-back.
Since the right square is also a pull-back, the outer square is a pull-back, too.
By the universal property of the pull-back, the composed morphism
\[ {\mathrm{ma}thbf{A}} \xrightarrow{a} {\mathrm{ma}thbf{B}} \xrightarrow{{\mathrm{ma}thtt{const}}} {\mathrm{ma}thbf{B}}^{\Delta^1} \]
and the identity on ${\mathrm{ma}thbf{A}}$ induce a morphism $i \colon {\mathrm{ma}thbf{A}} \to P(a)$ such that $p_{\mathrm{ma}thbf{A}} \circ q \circ i = \id_{\mathrm{ma}thbf{A}}$.
In particular, $i$ is a cofibration.
Since $\ev_1 = p_1 \circ (\ev_0,\ev_1)$ is a trivial fibration by \cref{lem:power.fib}, it follows from \cref{lem:push.cofib.pull.fib}.\ref{giorgergergerge999} that $p_{\mathrm{ma}thbf{A}} \circ q$ is a trivial fibration.
The two-out-of-three property (\cref{fwiowowfefwefwef334}) implies that $i$ is a weak equivalence, and thus a trivial cofibration.
Note that $q$ is a fibration since it is the pullback of a fibration (use again \cref{lem:power.fib} and \cref{lem:push.cofib.pull.fib}.\ref{giorgergergerge999}).
Since the structure morphism $p_{\mathrm{ma}thbf{B}} \colon {\mathrm{ma}thbf{A}} \times {\mathrm{ma}thbf{B}} \to {\mathrm{ma}thbf{B}}$ is a fibration, the morphism $p_{\mathrm{ma}thbf{B}} \circ q \colon P(a) \to {\mathrm{ma}thbf{B}}$ is also a fibration.
Regarding $P(a)$ as the pull-back of the left square, it follows from the universal property that $(p_{\mathrm{ma}thbf{B}} \circ q) \circ i = a$,
and thus provides the required factorization.
\end{proof}
We thus have finished the verification of the model category axioms (M1) to (M7).
\begin{rem}
By considering the full embedding $\mathrm{ma} \colon \cC\to \cC^{+}$, we obtain a verification of the axioms in the unmarked case.
\end{rem}
We next describe the generating cofibrations and the generating trivial cofibrations.
Recall that by \cref{fweiojweoiffewfwefwef} and \cref{wfeiweiofewfewfewf} we can take \[J:=\{\Delta_{\cC^{+}}^{0}\to \mathbb{I}^{+}_{\cC^{+}} \}\]
as the generating trivial cofibrations for $\cC^{+}$.
\begin{rem}
The set of generating trivial cofibrations for $\cC$ is given by
\[J:=\{\Delta^{0}_{\cC}\to \mathbb{I}_{\cC} \}\ .\qedhere\]
\end{rem}
We furthermore define
\[ I:=J\cup \{U,V,V^{+},W,W^{+}\} \]
where $U,V,V^{+},W,W^{+}$ are cofibrations defined as follows (see \cref{efuweifo24frergergreg}):
\begin{enumerate}
\item $U \colon \emptyset\to \Delta_{\cC^{+}}^{0}$.
\item We let $V \colon \Delta_{\cC^{+}}^{0}\sqcup \Delta_{\cC^{+}}^{0}\to \Delta^{1}_{\cC^{+}}$ classify the pair of objects $(0,1)$ of $ \Delta^{1}_{\cC^{+}}$.
\item
We let $V^{+} \colon \Delta_{\cC^{+}}^{0}\sqcup \Delta_{\cC^{+}}^{0}\to \mathbb{I}^{+}_{\cC^{+}}$ classify the pair of objects $(0,1)$ of $\mathbb{I}^{+}_{\cC^{+}}$.
\item We define $P$ as the push-out
\[\xymatrix{\Delta_{\cC^{+}}^{0}\sqcup \Delta_{\cC^{+}}^{0}\ar[r]^-V\ar[d]^{V}& \Delta^{1}_{\cC^{+}}\ar[d]\\ \Delta^{1}_{\cC^{+}}\ar[r]&P}\]
and let $W \colon P\to \Delta^{1}_{\cC^{+}}$ be the obvious map induced by $\id_{ \Delta^{1}_{\cC^{+}}}$. \item We define $P^{+}$ as the push-out
\[\xymatrix{\Delta_{\cC^{+}}^{0}\sqcup \Delta_{\cC^{+}}^{0}\ar[r]^-{V^{+}}\ar[d]^{V^{+}}&\mathbb{I}^{+}_{\cC^{+}}\ar[d]\\\mathbb{I}^{+}_{\cC^{+}}\ar[r]&P^{+}}\]
and let $W^{+} \colon P^{+}\to \mathbb{I}^{+}_{\cC^{+}}$ be the obvious map induced by $\id_{\mathbb{I}^{+}_{\cC^{+}}}$.
\end{enumerate}
\begin{lem}
The trivial fibrations in $\cC^{+}$ are exactly the morphisms which have the right-lifting property with respect to $I$.
\end{lem}
\begin{proof}
A trivial fibration is a weak equivalence which is in addition surjective on objects by \cref{hgionhbaiovnioaghiphn}.
We first observe that lifting with respect to $U$ exactly corresponds to the surjectivity on objects.
We now use the characterization of weak equivalences given in \cref{lem:markedequivs}.
Lifting with respect to $V$ and $W$ corresponds to surjectivity and injectivity on morphisms, and
lifting with respect to $V^{+}$ and $W^{+}$ corresponds to surjectivity and injectivity on marked isomorphisms.
\end{proof}
\begin{lem}
The objects $\emptyset$, $\Delta_{\cC^{+}}^{0}$, $\Delta^{1}_{\cC^{+}}$, $\mathbb{I}_{\cC}^{+}$,
$P$ and $P^{+}$ are compact. \end{lem}
\begin{proof}
If $\cC={\mathrm{ma}thbf{Cat}}$, then they are finite categories.
If $\cC=\mathrm{ma}thbf{preAdd}$, then they have finitely many objects and finitely generated abelian morphism groups. This implies the assertion.
\end{proof}
\begin{rem}
In the unmarked case,
we can take the set of generating cofibrations
\[ I:=J\cup \{U,V,W\} \]
with the following definitions:
\begin{enumerate}
\item $U \colon \emptyset\to \Delta_{\cC}^{0}$.
\item We let $V \colon \Delta_{\cC}^{0}\sqcup \Delta_{\cC }^{0}\to \Delta^{1}_{\cC}$ classify the pair of objects $(0,1)$ of $ \Delta^{1}_{\cC}$.
\item We define $P$ as the push-out
\[\xymatrix{\Delta_{\cC}^{0}\sqcup \Delta_{\cC}^{0}\ar[r]^-V\ar[d]^{V}& \Delta^{1}_{\cC}\ar[d]\\ \Delta^{1}_{\cC}\ar[r]&P}\]
and let $W \colon P\to \Delta^{1}_{\cC}$ be the obvious map induced by $\id_{ \Delta^{1}_{\cC}}$.
\end{enumerate}
The objects $\emptyset$, $\Delta_{\cC}^{0}$, $\Delta^{1}_{\cC}$, $\mathbb{I}_{\cC}$
and $P$ are compact.
\end{rem}
\begin{kor}\label{fiowejofwefewfewf}
The model category $\cC^{+}$ is cofibrantly generated by finite sets of generating cofibrations and trivial cofibrations between {compact} objects.
\end{kor}
\begin{prop}\label{ewdfoijfowefewfewfw}
The category $\cC^{+}$ is locally presentable.
\end{prop}
\begin{proof}
Since we have already shown that $\cC^{+}$ is cocomplete, by \cite[Thm 1.20]{AR} it suffices to show that $\cC^{+}$ has a strong generator consisting of compact objects. For this it suffices to show that there exists a set of compact objects such that every other object of $\cC$ is isomorphic to a colimit of a diagram with values in this set, see \cite[Lemma 11.4]{bunke}. We will call such a set strongly generating.
We will first show that ${\mathrm{ma}thbf{Cat}}^{+}$ is strongly generated by a finite set of compact objects.
We consider the category $\mathbf{DirGraph}^{+}$ of marked directed graphs. It consists of directed graphs with distinguished subsets of edges called marked edges. Morphisms in $\mathbf{DirGraph}^{+}$
must preserve marked edges.
The category $\mathbf{DirGraph}^{+}$ is locally presentable by \cite[Thm 1.20]{AR}. Indeed, it is cocomplete and strongly generated by the objects in the list
\[\{*\ , \bullet \to \bullet, \bullet\xrightarrow{+} \bullet\}\ .\] We have a forgetful functor from ${\mathrm{ma}thbf{Cat}}^{+}$ to marked directed graphs which fits into an adjunction
\[\mathrm{Free}_{{\mathrm{ma}thbf{Cat}}^{+}}\colon\mathbf{DirGraph}^{+}\leftrightarrows {\mathrm{ma}thbf{Cat}}^{+}\colon\cF_{\circ}\ .\]
The left adjoint takes the free category on the marked directed graph and
localizes at the marked isomorphisms.
The counit of the adjunction provides
a canonical morphism
\[v_{{\mathrm{ma}thbf{A}}} \colon {\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}}):=\mathrm{Free}_{{\mathrm{ma}thbf{Cat}}^{+}}(\cF_{\circ}({\mathrm{ma}thbf{A}}))\to {\mathrm{ma}thbf{A}}\]
of marked categories.
{Consider the pullback
\[\xymatrix{
{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}}) \times_{{\mathrm{ma}thbf{A}}}{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}})\ar[r]^-{p_1}\ar[d]_{p_2}&{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}})\ar[d]^{v_{\mathrm{ma}thbf{A}}}\\
{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}})\ar[r]^-{v_{\mathrm{ma}thbf{A}}}&{\mathrm{ma}thbf{A}}
}\]
We claim that the diagram}
\begin{equation*}
\begin{tikzcd}
{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}}) \times_{{\mathrm{ma}thbf{A}}}{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}}) \ar[r, shift left, "p_1"]\ar[r, shift right, "p_2"']&{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}})\ar[r, "v_{\mathrm{ma}thbf{A}}"]&{\mathrm{ma}thbf{A}}
\end{tikzcd}
\end{equation*}
is a coequalizer. We have $v_{\mathrm{ma}thbf{A}}\circ p_1=v_{\mathrm{ma}thbf{A}}\circ p_2$ by definition. That every morphism $f\colon {\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}})\to {\mathrm{ma}thbf{B}}$ with $f\circ p_1=f\circ p_2$ factors uniquely through $v_{\mathrm{ma}thbf{A}}$ follows from the fact that $v_{\mathrm{ma}thbf{A}}$ is surjective on objects and full.
We know that ${\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}})$ is isomorphic to a colimit of a small diagram involving the
list of finite categories
\begin{equation*}
\{ \mathrm{Free}_{{\mathrm{ma}thbf{Cat}}^{+}} (*)\ , \mathrm{Free}_{{\mathrm{ma}thbf{Cat}}^{+}}(\bullet \to \bullet), \mathrm{Free}_{{\mathrm{ma}thbf{Cat}}^{+}}(\bullet\xrightarrow{+} \bullet)\}\ .
\end{equation*}
The fiber product over ${\mathrm{ma}thbf{A}}$ is not a colimit. But we have a surjection
\[v'_{\mathrm{ma}thbf{A}}=v_{{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}}) \times_{{\mathrm{ma}thbf{A}}}{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}})}\colon {\mathrm{ma}thbf{F}}( {\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}}) \times_{{\mathrm{ma}thbf{A}}}{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}}))\to {\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}}) \times_{{\mathrm{ma}thbf{A}}}{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}})\]
and therefore a {coequalizer diagram}
\begin{equation*}
\begin{tikzcd}
{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}}) \times_{{\mathrm{ma}thbf{A}}}{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}})) \ar[r, shift left, "p_1\circ v'_{\mathrm{ma}thbf{A}}"]\ar[r, shift right, "p_2\circ v'_{\mathrm{ma}thbf{A}}"']&{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}})\ar[r, "v_{\mathrm{ma}thbf{A}}"]&{\mathrm{ma}thbf{A}}.
\end{tikzcd}
\end{equation*}
The marked category $ {\mathrm{ma}thbf{F}}( {\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}}) \times_{{\mathrm{ma}thbf{A}}}{\mathrm{ma}thbf{F}}({\mathrm{ma}thbf{A}}))$ is again a colimit of a diagram involving the generators in the list above. Hence ${\mathrm{ma}thbf{A}}$ itself is a colimit of a diagram built from this list.
A similar argument applies in the case $ \mathrm{ma}thbf{preAdd}^{+}$.
In this case we must replace $\cF_{\circ}$ by $\cF_{\circ}\circ \cF_{\Z}$ and $\mathrm{Free}_{{\mathrm{ma}thbf{Cat}}^{+}}$ by
$\mathrm{Lin}_{\Z}\circ \mathrm{Free}_{{\mathrm{ma}thbf{Cat}}^{+}}$. The list of generators is
\begin{equation*}
\mathrm{ma}thclap{
\{ \mathrm{Lin}_{\Z}(\mathrm{Free}_{{\mathrm{ma}thbf{Cat}}} (*))\ , \mathrm{Lin}_{\Z}(\mathrm{Free}_{{\mathrm{ma}thbf{Cat}}}(\bullet \to \bullet))\ , \mathrm{Lin}_{\Z}(\mathrm{Free}_{{\mathrm{ma}thbf{Cat}}}(\bullet\xrightarrow{+} \bullet))\}\ .
}
\end{equation*}
These categories are again compact since they have finitely many objects and their morphism groups are finitely generated.
\end{proof}
\begin{rem}
In order to show that ${\mathrm{ma}thbf{Cat}}$ and $\mathrm{ma}thbf{preAdd}$ are locally presentable one argues similarly using the category of directed graphs $\mathbf{DirGraph}$ and the adjunctions
\[\mathrm{Free}_{{\mathrm{ma}thbf{Cat}}} \colon \mathbf{DirGraph}\leftrightarrows {\mathrm{ma}thbf{Cat}} \colon \cF_{\circ}\ , \quad \mathrm{Lin}_{\Z}\circ \mathrm{Free}_{{\mathrm{ma}thbf{Cat}}} \colon \mathbf{DirGraph}\leftrightarrows \mathrm{ma}thbf{preAdd} \colon \cF_{\Z}\circ \cF_{\circ}\ .\qedhere\]
\end{rem}
\mathrm{ma}thrm{sub}section{(Marked) additive categories as fibrant objects}\label{gioegregreget34t34t34t34t}
In \cref{vgioeoerberebg} we have shown that the simplicial categories $\mathrm{ma}thbf{preAdd}$ and $\mathrm{ma}thbf{preAdd}^{+}$ are locally presentable and have a simplicial, cofibrantly generated model category structures. In the present section we introduce Bousfield localizations of these categories whose categories of fibrant objects are exactly the additive categories or marked additive categories.
Let ${\mathrm{ma}thbf{A}}$ be a pre-additive category.
\begin{ddd}\label{rioehgjoifgregregegergeg}
We say that ${\mathrm{ma}thbf{A}}$ is \emph{additive} if ${\mathrm{ma}thbf{A}}$ has a zero object and the sum, see \cref{vgeroihirovervbervevev}, of any two objects of ${\mathrm{ma}thbf{A}}$ exists.
\end{ddd}
We let $\mathrm{ma}thbf{Add}$ denote the full subcategory of $\mathrm{ma}thbf{preAdd}$ of additive categories.
\begin{rem}
In contrast to being a pre-additive category, being an additive category is a property of a category.
In the following we describe the conditions for an additive category just in terms of category language. First of all we require the existence of a zero object which by definition is an object which is both initial and final. Furthermore we require the existence of finite products and coproducts, and that the natural transformation
\[ -\sqcup- \to -\times-\]
of bifunctors (its definition uses the zero object) is an isomorphism. This leads naturally to an enrichment over commutative monoids. Finally we require that the morphism monoids are in fact abelian groups.
A morphism between additive categories can be characterized as a functor which preserves products.
It then preserves sums, zero objects, and the enrichment automatically. Here one can also interchange the roles of sums and products.
Therefore $\mathrm{ma}thbf{Add}$ can be considered as a (non-full) subcategory of ${\mathrm{ma}thbf{Cat}}$.
\end{rem}
Let $({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{A}}^{+})$ be a marked pre-additive category.
\begin{ddd}\label{reiuheriververvec}
$({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{A}}^{+})$ is a \emph{marked additive category} if the following conditions are satisfied:
\begin{enumerate}
\item The underlying category ${\mathrm{ma}thbf{A}}$ is additive.
\item\label{fdblkgjklrgregergergerg} ${\mathrm{ma}thbf{A}}^{+}$ is closed under sums. \qedhere \end{enumerate}
\end{ddd}
In detail, Condition \ref{fdblkgjklrgregergergerg}
means that
for every two morphisms $a \colon A\to A^{\prime}$ and $b \colon B\to B^{\prime}$ in ${\mathrm{ma}thbf{A}}^{+}$ the induced isomorphism $a\oplus b \colon A\oplus B\to A^{\prime}\oplus B^{\prime}$ (for any choice of objects and structure maps representing the sums) also belongs to ${\mathrm{ma}thbf{A}}^{+}$.
In \cref{rgvoihifowefwfwe} below we will discuss a natural example of a marked pre-additive category
in which the Condition \ref{fdblkgjklrgregergergerg} is violated.
\begin{ex}
A category $\bC$ with cartesian products can be refined to a symmetric monoidal category with the cartesian symmetric monoidal structure \cite[Sec.~2.4.1]{HA}. In particular we have a functor (uniquely defined up to unique isomorphism)
\[ -\times- \colon \bC\times \bC \to \bC\ .\]
This applies to an additive category ${\mathrm{ma}thbf{A}}$ where the cartesian product is denoted by $\oplus$.
We therefore have a sum functor
\[ -\oplus - \colon {\mathrm{ma}thbf{A}}\times {\mathrm{ma}thbf{A}}\to {\mathrm{ma}thbf{A}}\ .\]
Note that ${\mathrm{ma}thbf{A}}\times {\mathrm{ma}thbf{A}}$ (the product is taken in $\mathrm{ma}thbf{preAdd}$) is naturally an additive category again, and that the sum functor is a morphism of additive categories.
If $({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{A}}^{+})$ is now a marked additive category, then $({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{A}}^{+})\times ({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{A}}^{+})$ (the product is taken in $\mathrm{ma}thbf{preAdd}^{+}$) is marked again, and Condition \ref{reiuheriververvec}.\ref{fdblkgjklrgregergergerg}
implies that we also have a functor
\[ -\oplus - \colon ({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{A}}^{+})\times ({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{A}}^{+})\to({\mathrm{ma}thbf{A}},{\mathrm{ma}thbf{A}}^{+}) \]
between marked additive categories.
\end{ex}
We want to reformulate the characterization of (marked) additive categories from \cref{rioehgjoifgregregegergeg} and \cref{reiuheriververvec} as a right-lifting property.
To this end we introduce the pre-additive categories
$\bS_{\mathrm{ma}thbf{preAdd}}$ and $\emptyset_{\mathrm{ma}thbf{preAdd}}$ in $\mathrm{ma}thbf{preAdd}$ given as follows:
\begin{enumerate}
\item The pre-additive category $\bS_{\mathrm{ma}thbf{preAdd}}$ has three objects $1$, $2$, and $S$
and the morphisms are generated by the morphisms
\[\{ 1\xrightarrow{i_{1}}S ,2\xrightarrow{i_{2}}S, S\xrightarrow{p_{1}}1,S\xrightarrow{p_{2}}2 \}\ .\]
subject to the following relations:
\[ \quad p_{1}\circ i_{1}=\id_{1}\ , \quad p_{2}\circ i_{2}=\id_{2}\ , \quad i_{1}\circ p_{1} +i_{2}\circ p_{2}=\id_{S}\ .\]
\item $\emptyset_{\mathrm{ma}thbf{preAdd}}$ has one object $0$ and $\mathrm{ma}thbf{Ho}m_{\emptyset_{\mathrm{ma}thbf{preAdd}}}(0,0)=\{\id_0\}$. Note that $\id_0$ is the zero morphism.
\end{enumerate}
We further define the marked versions
\[ \bS_{\mathrm{ma}thbf{preAdd}^{+}}:=\mathrm{ma}thrm{mi}(\bS_{\mathrm{ma}thbf{preAdd}})\ , \quad \emptyset_{\mathrm{ma}thbf{preAdd}^{+}}:=\mathrm{ma}thrm{mi}(\emptyset_{\mathrm{ma}thbf{preAdd}}) \]
in $\mathrm{ma}thbf{preAdd}^{+}$ by marking the identities.
In the following let $\cC$ be a place holder for $\mathrm{ma}thbf{preAdd}$ or $\mathrm{ma}thbf{preAdd}^{+} $.
\begin{rem}\label{fiofjoiwefwfwefewfw} We consider the object $\bS_{\cC}$ of $\cC$.
Note that the relations $p_{1}\circ i_{2}=0$ and $p_{2}\circ i_{1}=0$ are implied. The morphisms $p_{1}$, $ p_{2}$ present $S$ as the product of $1$ and $2$, and the morphisms $i_{1}$ and $i_{2}$ present $S$ as a coproduct of $1$ and $2$. Consequently, $S$ is the sum of the objects $1$ and $2$, see \cref{vgeroihirovervbervevev}.
If ${\mathrm{ma}thbf{A}}$ belongs to $\cC$ and $f \colon \bS_{\cC}\to {\mathrm{ma}thbf{A}}$ is a morphism, then
the morphisms $f(p_{1})$, $ f(p_{2})$ present $f(S)$ as the product of $f(1)$ and $f(2)$, and the morphisms $f(i_{1})$, $f(i_{2})$ present $f(S)$ as a coproduct of $f(1)$ and $f(2)$. Hence again, $f(S)$ is the sum of the objects $f(1)$ and $f(2)$.
A functor $\bS_{\cC}\to {\mathrm{ma}thbf{A}}$ is the same as the choice of two objects $A$, $B$ in ${\mathrm{ma}thbf{A}}$ together with a representative of the sum $A\oplus B$ and the corresponding structure maps.
\end{rem}
\begin{rem}
The object $0$ of $\emptyset_{\cC}$ is a zero object. If ${\mathrm{ma}thbf{A}}$ belongs to $\cC$ and $f \colon \emptyset_{\cC}\to {\mathrm{ma}thbf{A}}$ is a morphism, then $f(0)$ is an object satisfying $\id_{f(0)} = 0$. Since ${\mathrm{ma}thbf{A}}$ is enriched over abelian groups, every object in ${\mathrm{ma}thbf{A}}$ admits a morphism to $f(0)$ and a morphism from $f(0)$, both of which are necessarily unique. Hence $f(0)$ is a zero object of ${\mathrm{ma}thbf{A}}$.
In fact, $\emptyset_{\cC}$ is the zero-object classifier in $\cC$.
\end{rem}
Recall the notation introduced in \cref{efuweifo24frergergreg}.
We let \begin{equation}\label{g4pogkp40gggt}
w \colon \Delta^{0}_{\cC}\sqcup \Delta^{0}_{\cC}\to \bS_{\cC}
\end{equation} be the morphism which classifies
the two objects $1$ and $2$. We furthermore let \begin{equation}\label{g4pogkp40gggt1}v \colon \emptyset\to \emptyset_{\cC}
\end{equation} be the canonical morphism from the initial object of $\cC$.
We now use that $\cC$ is a left-proper (see \cref{fiowefwefewfewf}), combinatorial simplicial model category (see \cref{vgioeoerberebg}). By \cite[Prop.~A.3.7.3]{htt}, for every set $\cS$ of cofibrations in $\cC$ the left Bousfield localization $L_{\cS}\cC$ (see \cite[Def. 3.3.1]{MR1944041} or \cite[Sec.~A.3.7]{htt} for a definition) exists and is again a combinatorial simplicial model category. We will consider the set $\cS:=\{v,w\}$ consisting of the cofibrations
\eqref{g4pogkp40gggt} and \eqref{g4pogkp40gggt1}.
\begin{prop}\label{rigerogergergre}
The fibrant objects in $L_{\{v,w\}}\cC$ are exactly the (marked) additive categories.
\end{prop}
\begin{proof}
The fibrant objects in $L_{\{v,w\}}\cC$ are the fibrant objects ${\mathrm{ma}thbf{A}}$ in $\cC$ which are local for $\{v,w\}$, i.e., for which the maps of simplicial sets $\Map_{\cC}(v,{\mathrm{ma}thbf{A}})$ and $\Map_{\cC}(w,{\mathrm{ma}thbf{A}})$ are trivial Kan fibrations, see \cite[Prop.~A.3.7.3(3)]{htt}.
Let ${\mathrm{ma}thbf{A}}$ be in $\cC$ and consider the lifting problem \begin{equation}\label{ecwcknbweckjwecewcecwec}
\xymatrix{\partial \Delta^{n}\ar[r]\ar[d]&\Map_{\cC}(\bS_{\cC},{\mathrm{ma}thbf{A}})\ar[d]^{\Map_{\cC}(w,{\mathrm{ma}thbf{A}})}\\\Delta^{n}\ar[r]\ar@{..>}[ur]&\Map_{\cC}( \Delta^{0}_{\cC}\sqcup \Delta^{0}_{\cC},{\mathrm{ma}thbf{A}})}\ .
\end{equation}
Since the mapping spaces in $\cC$ are nerves of groupoids they are $2$-coskeletal.
Hence the lifting problem is uniquely solvable for all $n\ge 3$ without any condition on ${\mathrm{ma}thbf{A}}$.
It therefore suffices to consider the cases $n=0,1,2$.
\begin{enumerate}\item[n=0] The outer part of the diagram reflects the choice of two objects in ${\mathrm{ma}thbf{A}}$, and a lift corresponds to a choice of a sum of these objects together with the corresponding structure maps.
Therefore the lifting problem is solvable if ${\mathrm{ma}thbf{A}}$ admits sums of pairs of objects.
\item[n=1] The outer part of the diagram reflects the choice of
(marked) isomorphisms $A\to A^{\prime}$ and $B\to B^{\prime}$ in ${\mathrm{ma}thbf{A}}$
and choices of objects $A\oplus A^{\prime}$ and $B\oplus B^{\prime}$ together with structure maps (inclusions and projections) representing the sums.
The lift then corresponds to the choice of a (marked) isomorphism $A\oplus A^{\prime}\to B\oplus B^{\prime}$.
In fact such an isomorphism exists (and is actually uniquely determined). In the marked case the fact that the isomorphism is marked is equivalent to the compatibility condition between the sums and the marking required for a marked additive category.
\item[n=2] The outer part reflects the choice of six objects $A,A',A''$ and $B,B',B''$ together with the choice of objects representing the sums $A\oplus B$, $A'\oplus B'$ and $A''\oplus B''$ together with structure maps and (marked) isomorphisms $a \colon A\to A'$, $a' \colon A'\to A''$, $a'' \colon A\to A''$, and $b \colon B\to B'$, $b' \colon B'\to B''$, $b'' \colon B\to B''$ respectively, and compatible (with the structure maps and hence uniquely determined) (marked) isomorphisms $a\oplus b \colon A\oplus B\to A'\oplus B'$, $a'\oplus b' \colon A'\oplus B'\to A''\oplus B''$, $a''\oplus b'' \colon A\oplus B\to A''\oplus B''$.
Thereby we have the relations $a''=a'\circ a$ and $b''=b'\circ b$.
A lift corresponds to a witness of the fact that $a''\oplus b''=(a'\oplus b')\circ (a\oplus b)$.
Hence the lift exists and is unique by the universal properties of the sums.
\end{enumerate}
We have
\[ \Map_{\cC}(v,{\mathrm{ma}thbf{A}}) \colon \Map_{\cC}(\emptyset_{\cC},{\mathrm{ma}thbf{A}})\to *\ .\]
The domain of this map is the space of zero objects in ${\mathrm{ma}thbf{A}}$ which is either empty or a contractible Kan complex.
Consequently, $\Map_{\cC}(v,{\mathrm{ma}thbf{A}})$ is a trivial Kan fibration exactly if ${\mathrm{ma}thbf{A}}$ admits a zero object.
\end{proof}
\mathrm{ma}thrm{sub}section{\texorpdfstring{$\infty$}{infty}-categories of (marked) pre-additive and additive categories}
In the present paper we use the language of $\infty$-categories as developed in \cite{Joyal}, \cite{htt} and \cite{cisin}.
Let
$\bC$ be a simplicial model category. By \cite[Thm.~1.3.4.20]{HA}, we have an equivalence of $\infty$-categories \begin{equation}\label{g4r5g4f34rf3f4f}
\mathrm{ma}thrm{N}^{\mathrm{ma}thrm{coh}}(\bC^{cf}) \simeq \bC^{c}[W^{-1}]\ ,
\end{equation}
where $\mathrm{ma}thrm{N}^{\mathrm{ma}thrm{coh}}(\bC^{cf})$ is the coherent nerve of the simplicial category of cofibrant-fibrant objects in $\cC$, and $\bC^{c}[W^{-1}]$ is the $\infty$-category obtained from (the nerve of) $\bC^{c}$ by inverting the weak equivalences of the model category structure, where $\bC^{c}$ denotes the ordinary category of cofibrant objects of $\bC$.
If $\cC$ is in addition combinatorial, then
$\bC^{c}[W^{-1}]$
is a presentable $\infty$-category \cite[Prop.~1.3.4.22]{HA}.
For the following we assume that $\bC$ is a combinatoral simplicial model category.
If $L_{\cS} \bC$ is the Bousfield localization of the model category structure on $\bC$ at a set $\cS$ of morphisms in $\bC^{cf}$, and $\mathrm{ma}thrm{N}^{\mathrm{ma}thrm{coh}}(\bC^{cf})\to L_{\cS}\mathrm{ma}thrm{N}^{\mathrm{ma}thrm{coh}}(\bC^{cf})$ is the localization at the same set of morphisms in the sense of \cite[Def.~5.2.7.2]{htt}, then using \cite[Rem.~1.3.4.27]{HA}
we get an equivalence of $\infty$-categories
\[ L_{\cS}\mathrm{ma}thrm{N}^{\mathrm{ma}thrm{coh}}( \bC^{cf})\simeq \mathrm{ma}thrm{N}^{\mathrm{ma}thrm{coh}}((L_{\cS} \bC)^{cf})\ .\]
We let $W_{\mathrm{ma}thbf{preAdd}^{(+)}}$ denote the weak equivalences in $\mathrm{ma}thbf{preAdd}^{(+)}$. Note that in $\mathrm{ma}thbf{preAdd}^{(+)}$ all objects are cofibrant and fibrant.
\begin{ddd}
We define the \emph{$\infty$-category of (marked) pre-additve} categories by
\[\mathrm{ma}thbf{preAdd}^{(+)}_{\infty}:= \mathrm{ma}thbf{preAdd}^{(+)}[W_{\mathrm{ma}thbf{preAdd}^{(+)}}^{-1}]\ .\qedhere\]
\end{ddd}
By a specialization of \eqref{g4r5g4f34rf3f4f} we have an equivalence of $\infty$-categories
\begin{equation}\label{ivfou89f43fvfeferferferf}
\mathrm{ma}thrm{N}^{\mathrm{ma}thrm{coh}}( \mathrm{ma}thbf{preAdd}^{(+)}) \simeq \mathrm{ma}thbf{preAdd}_{\infty}^{(+)} \ .
\end{equation}
A weak equivalence between fibrant objects in a Bousfield localization is a weak equivalence in the original model category. Consequently, a morphism between (marked) additive categories is
a weak equivalence in $L_{\{v,w\}}\cC$ if and only if it is a weak equivalence in (marked) pre-additive categories.
We let $W_{\mathrm{ma}thbf{Add}^{(+)}} $ denote the weak equivalences in the Bousfield localization $L_{\{v,w\}}\mathrm{ma}thbf{preAdd}^{(+)}$.
\begin{ddd}
We define the \emph{$\infty$-category of (marked) additive categories} by
\[\mathrm{ma}thbf{Add}^{(+)}_{\infty}:= \mathrm{ma}thbf{preAdd}^{(+)}[W_{\mathrm{ma}thbf{Add}^{(+)}}^{-1}]\ .\qedhere\]
\end{ddd}
By specialization of \eqref{g4r5g4f34rf3f4f}, we then have an equivalence of $\infty$-categories \begin{equation}\label{ivfou89f43fvfeferferferf1}
\mathrm{ma}thrm{N}^{\mathrm{ma}thrm{coh}}(\mathrm{ma}thbf{Add}^{(+)})\simeq \mathrm{ma}thbf{Add}^{(+)}_{\infty}\ .
\end{equation}
\begin{rem}The equivalences \eqref{ivfou89f43fvfeferferferf} and \eqref{ivfou89f43fvfeferferferf1} can be shown directly using
\cite[Prop.~1.3.4.7]{HA}. Indeed, the categories $\mathrm{ma}thbf{preAdd}^{(+)}$ and $\mathrm{ma}thbf{Add}^{(+)}$ are enriched in groupoids and therefore fibrant simplicial categories. The interval object of ${\mathrm{ma}thbf{A}}$ is given by ${\mathrm{ma}thbf{A}}^{\Delta^{1}}$.
In the case of (marked) additive categories we must observe that ${\mathrm{ma}thbf{A}}^{\Delta^{1}}$ is again (marked) additive.
\end{rem}
\begin{kor} \label{fiowefwefwfwf} \begin{enumerate} \item
The $\infty$-categories $\mathrm{ma}thbf{preAdd}^{(+)}_{\infty}$ and
$ \mathrm{ma}thbf{Add}^{(+)}_{\infty}$ are presentable.
\item We have an adjunction
\begin{equation}\label{vevnkvnrekovefveveervv}
L_{\oplus} \colon \mathrm{ma}thbf{preAdd}^{(+)}_{\infty}\leftrightarrows \mathrm{ma}thbf{Add}^{(+)}_{\infty} \colon \cF_{\oplus}\ ,
\end{equation}
where $\cF_{\oplus}$ is the inclusion of a full subcategory. \end{enumerate}
\end{kor}
The functor $L_{\oplus}$ is the additive completion functor.
In the following $\cC$ is a placeholder for ${\mathrm{ma}thbf{Cat}}^{(+)}$, $\mathrm{ma}thbf{Add}^{(+)}$ or $\mathrm{ma}thbf{preAdd}^{(+)}$.
The category $\cC$ can be considered as a category enriched in groupoids and therefore as a strict $(2,1)$-category which will be denoted by $\cC_{(2,1)}$. A strict $(2,1)$-category gives rise to an $\infty$-category as follows. We first apply the usual nerve functor to the morphism categories of $\cC_{(2,1)}$ and obtain a category enriched in Kan complexes. Then we apply the coherent nerve functor and get a quasi-category which we will denote by $\Nerve_{2}(\cC_{(2,1)})$. The obvious functor $\Nerve(\cC_{(1,1)})\to \Nerve_{2}(\cC_{(2,1)})$ (where $\cC_{(1,1)}$ denotes the underlying ordinary category of $\cC$) sends equivalences to equivalences and therefore descends to a functor
\begin{equation}\label{rgieogrgregegegerg}
\cC_{\infty}\to \Nerve_{2}(\cC_{(2,1)})\ .
\end{equation}
\begin{prop}
\label{prop:2nerv}
The functor \eqref{rgieogrgregegegerg} is an equivalence. \end{prop}
\begin{proof}
Note that $N_2(\cC_{(2,1)})$ and $N^{coh}(\cC)$ are isomorphic by the definition of the simplicial enrichtment of $\cC$.
We consider the following commuting diagram of quasi-categories
\[ \xymatrix{&\Nerve(\cC_{(1,1)})\ar[ld]_{\ell_{\cC}}\ar[dr] \ar[d]^{!}&&\\\cC_{\infty}\ar[r]_-{\simeq}^-{!!}&\Nerve^{coh}(\cC)\ar[r]_-{\cong}&\Nerve_{2}(\cC_{(2,1)})}\ .\]
The left triangle commutes since the morphism marked by $!$ is an explicit model of the localization morphism,
where we use
\eqref{ivfou89f43fvfeferferferf} (or \eqref{ivfou89f43fvfeferferferf1}, depending on the case) for the equivalence marked by $!!$.
The lower composition is then an explicit model of \eqref{rgieogrgregegegerg}.
\end{proof}
\section{Applications}\label{erigjieorgergree34t34t34t}
\mathrm{ma}thrm{sub}section{Localization preserves products} \label{efweoifoewfewfewf3r323r2r}
We show that the localizations
\[ \ell_{\cC^{(+)}} \colon \cC^{(+)}\to \cC^{(+)}_{\infty} \]
for $\cC$ in $\{{\mathrm{ma}thbf{Cat}}\ ,\mathrm{ma}thbf{preAdd}\ ,\mathrm{ma}thbf{Add}\}$
preserve products.
Let $I$ be a set.
Then we consider the functor
\[ \ell_{I,\cC}\colon \cC^{I}\to \cC_{\infty}^{I} \]
defined by post-composition with $\ell_{\cC}$.
For every category $\bC$ with products we have a functor $\prod_{I} \colon \bC^{I}\to \bC$. We apply this to $\bC=\cC$ and $\bC=\cC_{\infty}$.
\begin{prop}\label{wefiojewwefewf43t546466}
We have an equivalence of functors
\[ \ell_{\cC}\circ \prod_{I} \xrightarrow{\simeq}
\prod_{I}\ell_{I,\cC} \colon \cC^{I}\to \cC_{\infty}\ . \]
\end{prop}
\begin{proof}
We start with the case $\cC=\mathrm{ma}thbf{preAdd}^{(+)}$ or $\cC={\mathrm{ma}thbf{Cat}}^{(+)}$. We use that $\cC$ has a combinatorial model category structure in which all objects are cofibrant and fibrant. It is a general fact, that in this case the localization $\ell \colon \cC\to\cC_{\infty}$ preserves products. Here is the (probably much too complicated) argument.
We can
consider the injective model category structure on the diagram category $ \cC^{I}$. Since $I$ is discrete
one easily observes that all objects in this diagram category are fibrant again.
So we can take the identity as a fibrant replacement functor for $\cC^{I}$. This gives the equivalence
\[ \ell_{\cC} \circ \prod_{I} \xrightarrow{\simeq} \prod_{I}\ell_{I,\cC}\ ,\]
(e.g. by specializing \cite[Prop.~13.5]{bunke}).
In order to deduce the assertion for additive categories we consider the inclusion functor
$\cF_{\oplus,1} \colon \mathrm{ma}thbf{Add}^{(+)}\to \mathrm{ma}thbf{preAdd}^{(+)}$. This functor preserves weak equivalences and therefore descends essentially uniquely to the functor $\cF_{\oplus}$ in \eqref{vevnkvnrekovefveveervv} such that
\[ \cF_{\oplus}\circ \ell_{\mathrm{ma}thbf{Add}^{(+)}}\simeq \ell_{\mathrm{ma}thbf{preAdd}^{(+)}}\circ \cF_{\oplus,1}\ .\]
The functor $\cF_{\oplus}$ is a right-adjoint which preserves and detects limits.
We do not claim that $\cF_{\oplus,1}$ is a right-adjoint, but it clearly preserves products by inspection.
We let $\cF_{I,\oplus,1}$ and $\cF_{I,\oplus}$ be the factorwise application of $\cF_{\oplus,1}$ and $\cF_{\oplus}$.
With this notation we have an equivalence
\[ \cF_{\oplus,1}\circ \prod_{I}\cong \prod_{I} \circ \cF_{I,\oplus,1}\ . \]
The assertion in the case $\cC=\mathrm{ma}thbf{Add}^{(+)}$ now follows from the chain of equivalences \begin{eqnarray*}
\cF_{\oplus}\circ \ell_{\mathrm{ma}thbf{Add}^{(+)}}\circ \prod_{I}&\simeq&
\ell_{\mathrm{ma}thbf{preAdd}^{(+)}}\circ \cF_{\oplus,1}\circ \prod_{I}\\&\simeq&
\ell_{\mathrm{ma}thbf{preAdd}^{(+)}}\circ \prod_{I} \circ \cF_{I,\oplus,1}\\&\simeq&
\prod_{I} \circ \ell_{I,\mathrm{ma}thbf{preAdd}^{(+)}} \circ \cF_{I,\oplus,1}\\&\simeq&
\prod_{I} \circ \cF_{I,\oplus}\circ \ell_{I,\mathrm{ma}thbf{Add}^{(+)}} \\&\simeq&
\cF_{\oplus} \circ \prod_{I} \circ \ell_{I,\mathrm{ma}thbf{Add}^{(+)}}
\end{eqnarray*}
by removing $\cF_{\oplus}$.
\end{proof}
\mathrm{ma}thrm{sub}section{Rings and Modules}\label{rgiuerhgweergergergeg}
A unital ring $R$ can be considered as a pre-additive category $\bR$ with one object $*$ and ring of endomorphisms $\mathrm{ma}thbf{Ho}m_{\bR}(*,*):=R$.
The category of finitely generated free $R$-modules $\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R)$ is an additive category.
We have a canonical functor
$\bR\to \Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R)$ sending $*$ to $R$
which presents $\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R)$ as the additive completion of $\bR$.
This fact is well-known, see e.g. \cite[Sec.~2]{davis_lueck}.
In the following we provide a precise formulation using the language of $\infty$-categories.
Recall the sum-completion functor $L_{\oplus}$ from \cref{fiowefwefwfwf}.
\begin{prop}\label{gueiurgrgerger}
The morphism of pre-additive categories $\bR\to \Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R) $ induces an equivalence
\[ L_{\oplus} (\ell_{\mathrm{ma}thbf{preAdd}}(\bR))\simeq \ell_{\mathrm{ma}thbf{Add}}(\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R))\ .\]
\end{prop}
\begin{proof}
We must show that
\[ \Map_{\mathrm{ma}thbf{preAdd}_{\infty}}(\ell_\mathrm{ma}thbf{preAdd}(\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R)),\ell_\mathrm{ma}thbf{preAdd}({\mathrm{ma}thbf{B}}))\to \Map_{\mathrm{ma}thbf{preAdd}_{\infty}}(\ell_\mathrm{ma}thbf{preAdd}(\bR),\ell_\mathrm{ma}thbf{preAdd}({\mathrm{ma}thbf{B}})) \]
is an equivalence for every additive category ${\mathrm{ma}thbf{B}}$. In view of \eqref{ivfou89f43fvfeferferferf}, this is equivalent to the fact that
\[ \Map_{\mathrm{ma}thbf{preAdd} }( \Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R) , {\mathrm{ma}thbf{B}}) \to \Map_{\mathrm{ma}thbf{preAdd} }( \bR , {\mathrm{ma}thbf{B}} ) \]
is a trivial Kan fibration. Here we use that by \eqref{ivfou89f43fvfeferferferf} the mapping spaces in $\mathrm{ma}thbf{preAdd}_{\infty}$ are represented by the simplicial mapping spaces in $\mathrm{ma}thbf{preAdd}$, see
\cite[Sec.~2.2.2]{htt}. The proof is very similar to the proof of \cref{rigerogergergre}.
We must check the lifting property against the inclusions $\partial \Delta^{n}\to \Delta^{n}$. Again we must only consider the case $n\le 2$.
\begin{enumerate}
\item[n=0] A functor $\bR\to {\mathrm{ma}thbf{B}}$ (sending $*$ to an $R$-module $B$) determines a functor
\[\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R)\to {\mathrm{ma}thbf{B}}\]
which sends $R^{k}$ to $B^{\oplus k}$.
\item[n=1] An isomorphism of functors $\bR\to {\mathrm{ma}thbf{B}}$ given by an isomorphism of objects $f \colon B\to B^{\prime}$ which is compatible with the $R$-module structures induces an isomorphism of induced functors
$ \Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R)\to {\mathrm{ma}thbf{B}}$ which on $R^{k}$ is given by $\oplus_{k} f \colon B^{\oplus k}\to B^{\prime,\oplus k}$.
\item[n=2] The existence of the lift expresses the naturality of the isomorphisms obtained in the case $n=1$.\qedhere
\end{enumerate}
\end{proof}
In order to understand the category of finitely generated projective modules $\Mod^{\mathrm{fg} ,\mathrm{proj}}(R)$ and the morphism $\bR\to \Mod^{\mathrm{fg} ,\mathrm{proj}}(R)$ in a similar manner we must consider idempotent-complete additive categories.
Let ${\mathrm{ma}thbf{A}}$ be an additive category.
\begin{ddd}\label{vijoefvfbevev}
${\mathrm{ma}thbf{A}}$ is \emph{idempotent complete} if for every object $A$ in ${\mathrm{ma}thbf{A}}$ and projection $e$ in $\mathrm{End}_{{\mathrm{ma}thbf{A}}}(A)$ there exists an isomorphism $A\cong e(A)\oplus e(A)^{\perp}$ such that $e(A)$ and $e(A)^{\perp}$ are images of $e$ and $\id_{A}-e$.
\end{ddd}
The last part of this definition more precisely means that there exist morphisms $e(A)\to A$ and $e(A)^{\perp}\to A$ such that the diagrams
\[\xymatrix{A\ar[d]^{e}&\ar[l]_(0.7){\cong}e(A)\oplus e(A)^{\perp}\ar[d]^{\pr_{e(A)}}&e(A)\oplus e(A)^{\perp}\ar[d]^{\pr_{e(A)^{\perp}} }\ar[r]^(0.7){\cong}&A\ar[d]^{\id_{A}- e} \\ A&\ar[l]_(0.7){\cong} e(A)\oplus e(A)^{\perp}&e(A)\oplus e(A)^{\perp}\ar[r]^(0.7){\cong}&A}\]
commute.
Let now ${\mathrm{ma}thbf{A}}$ be a marked additive category.
\begin{ddd}
${\mathrm{ma}thbf{A}}$ is \emph{idempotent complete} if the underlying additive category $\cF_{+}({\mathrm{ma}thbf{A}})$ is idempotent complete (\cref{vijoefvfbevev}), and if in addition for every $A$ in ${\mathrm{ma}thbf{A}}$, every projection $e$ on $A$, and every
marked isomorphism $f \colon A\to A^{\prime}$ the induced isomorphism $e(A)\to e^{\prime}(A')$ is marked, where $e^{\prime}:=f\circ e\circ f^{-1}$.
\end{ddd}
We let $\mathrm{ma}thbf{Add}^{(+),\mathrm{idem} }$ be the full subcategory of $\mathrm{ma}thbf{Add}^{(+)}$ of idempotent complete small (marked) additive categories.
We can characterize idempotent completeness of a marked additive category as a lifting property. To this end we consider the following pre-additive category $\bE_{\mathrm{ma}thbf{preAdd}}$:
\begin{enumerate}
\item $\bE_{\mathrm{ma}thbf{preAdd}}$ has the object $*$.
\item The morphisms of $\bE_{\mathrm{ma}thbf{preAdd}}$ are generated by $\id_{*}$ and $e$ subject to the relation $e^{2}=e$.\end{enumerate}
We then consider the functor \begin{equation}\label{rev3rg3rgvervger43}
u \colon \bE_{\mathrm{ma}thbf{preAdd}}\to \bS_{\mathrm{ma}thbf{preAdd}}
\end{equation}
(see \cref{gioegregreget34t34t34t34t} for $\bS_{\mathrm{ma}thbf{preAdd}}$)
which sends $*$ to $S$ and $e$ to $ i_{1}\circ p_{1}$. In the marked case we consider
\[ u \colon \bE_{\mathrm{ma}thbf{preAdd}^{+}}\to \bS_{\mathrm{ma}thbf{preAdd}^{+}} \]
obtained from \eqref{rev3rg3rgvervger43} by applying the functor $\mathrm{ma}thrm{mi}$ marking the identities.
Then one checks:
\begin{lem}
A (marked) additive category ${\mathrm{ma}thbf{A}}$ is idempotent complete if and only if it is local with respect to the map $u$.
\end{lem}
\begin{proof}
The proof is similar to the proof of \cref{rigerogergergre}.\end{proof}
\begin{kor}
The fibrant objects in the Bousfield localization $L_{\{u,v,w\}}\mathrm{ma}thbf{preAdd}^{(+)}$ are exactly the idempotent-complete small (marked) additive categories.
\end{kor}
{We consider the equivalences
$W_{\mathrm{ma}thbf{Add}^{(+),\mathrm{idem} }}$ in the Bousfield localization $L_{\{u,v,w\}}\mathrm{ma}thbf{preAdd}^{(+)}$ and the $\infty$-category
\[\mathrm{ma}thbf{Add}^{(+),\mathrm{idem} }_{\infty}:= \mathrm{ma}thbf{preAdd}^{(+)}[W_{\mathrm{ma}thbf{Add}^{(+),\mathrm{idem} }}^{-1}]\ .\]
Using \eqref{g4r5g4f34rf3f4f},} we have an equivalence \begin{equation}\label{ivfou89f43fvfeferferferf11}
\mathrm{ma}thrm{N}^{\mathrm{ma}thrm{coh}}({\mathrm{ma}thbf{Add}^{(+),\mathrm{idem}}})\simeq\mathrm{ma}thbf{Add}^{(+),\mathrm{idem} }_{\infty} \ .
\end{equation}
We obtain the analog of \cref{fiowefwefwfwf}.
\begin{kor}
\begin{enumerate}
\item The $\infty$-category $\mathrm{ma}thbf{Add}^{(+),\mathrm{idem} }_{\infty}$ is presentable.
\item We have an adjunction
\[ L_{\mathrm{idem}} \colon \mathrm{ma}thbf{Add}^{(+)}_{\infty}\leftrightarrows \mathrm{ma}thbf{Add}^{(+),\mathrm{idem} }_{\infty} \colon\cF_{\mathrm{idem}} \]
where $ \cF_{\mathrm{idem}}$ is the inclusion and $L_{\mathrm{idem}}$ is the idempotent completion functor.
\item We have an adjunction
\[ L_{\oplus,\mathrm{idem} } \colon \mathrm{ma}thbf{preAdd}_{\infty}^{(+)}\leftrightarrows \mathrm{ma}thbf{Add}^{(+),\mathrm{idem} }_{\infty} \colon \cF_{\oplus,\mathrm{idem} } \]
where $\cF_{\oplus,\mathrm{idem} }\simeq \cF_{\oplus}\circ \cF_{\mathrm{idem}}$ and $L_{\oplus,\mathrm{idem} }\simeq L_{\mathrm{idem}}\circ L_{\oplus}$.
\end{enumerate}
\end{kor}
\begin{prop}\label{vgirejgoiergergergergregergergerg}The morphism of pre-additive categories $\bR\to \Mod^{\mathrm{fg} ,\mathrm{proj}}(R)$ induces an
equivalence
$L_{\oplus,\mathrm{idem} } (\ell_{\mathrm{ma}thbf{preAdd}}(\bR))\simeq \ell_{\mathrm{ma}thbf{Add}^{\mathrm{idem}}}(\Mod^{\mathrm{fg} ,\mathrm{proj}}(R))$.
\end{prop}
\begin{proof}
The proof is similar to \cref{gueiurgrgerger}.
\end{proof}The following is a precise version of the assertion that $\Mod^{\mathrm{fg} ,\mathrm{proj}}(R)$ is the idempotent completion of $\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R)$.
\begin{kor}The morphism of additive categories $\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R)\to \Mod^{\mathrm{fg} ,\mathrm{proj}}(R)$ induces
an equivalence
\[ \ell_{\mathrm{ma}thbf{Add}^{\mathrm{idem}}}(\Mod^{\mathrm{fg} ,\mathrm{proj}}(R))\simeq L_{\mathrm{idem}}( \ell_{\mathrm{ma}thbf{Add}}(\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R)))\ .\]
\end{kor}
\mathrm{ma}thrm{sub}section{\texorpdfstring{$G$}{G}-coinvariants}\label{erbgkioergergergegreg}
Let $G$ be a group. In this subsection we want to calculate explicitly the homotopy $G$-orbits of pre-additive categories with trivial $G$-action. The precise formulation of the result is \cref{weoijoijvu9bewewfewfwef}. We then discuss applications to group rings.
By $BG$ we denote the groupoid with one object $*$ and group of automorphisms $G$. The functor category $\Fun(BG,\bC)$ is the category of objects in $\bC$ with $G$-action and equivariant morphisms.
The underlying object or morphism of an object or morphism in $\Fun(BG,\bC)$ is the evaluation of the functor or morphism at $*$.
If $\bI$ is a category and $F \colon \bC\to \bD$ is a functor, then we will use the notation
\begin{equation}\label{f34iuh4fiuhrif894r43r34r3434r34r34r31}
F_{\bI} \colon \Fun(\bI,\bC)\to \Fun(\bI,\bD)\end{equation} for the functor defined by post-composition with $F$.
We consider a (marked) preadditive category ${\mathrm{ma}thbf{A}}$. It gives rise to a constant functor
$\underline{{\mathrm{ma}thbf{A}}}$ in $\Fun(BG,\mathrm{ma}thbf{preAdd}^{(+)})$ and hence to an object
$\ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG}(\underline{{\mathrm{ma}thbf{A}}})$ in $\Fun(BG,\mathrm{ma}thbf{preAdd}^{(+)}_{\infty})$.
Since the $\infty$-category $\mathrm{ma}thbf{preAdd}^{(+)}_{\infty}$ is presentable, it is cocomplete and the colimit in the following theorem exists. Recall the functor $- \sharp -$ from \cref{defn_functor_sharp}.
\begin{theorem}\label{weoijoijvu9bewewfewfwef}
We have a natural equivalence
\[ \colim_{ BG}\ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG}(\underline{{\mathrm{ma}thbf{A}}})\simeq \ell_{\mathrm{ma}thbf{preAdd}^{(+)}} ({\mathrm{ma}thbf{A}}\sharp BG)\ .\]
\end{theorem}
\begin{rem}
Note that the order of taking the colimit and the localization is relevant.
Indeed, we have $\colim_{BG} \underline{{\mathrm{ma}thbf{A}}}\cong {\mathrm{ma}thbf{A}}$ and therefore
$\ell_{\mathrm{ma}thbf{preAdd}^{(+)}}(\colim_{BG} \underline{{\mathrm{ma}thbf{A}}})\simeq \ell_{\mathrm{ma}thbf{preAdd}^{(+)}}({\mathrm{ma}thbf{A}})$.
\end{rem}
\begin{rem}\label{ergerg34t3t3434t3}
Note that the unmarked version of \cref{weoijoijvu9bewewfewfwef}
can be deduced from the marked version using the functor $\mathrm{ma}$ introduced in \eqref{f3rfkj34nfkjf3f3f3f43f}.
\end{rem}
In order to avoid case distinctions, we will formulate the details of the proof in the marked case. The unmarked case can be shown similarly, or alternatively deduced formally from the marked case as noted in \cref{ergerg34t3t3434t3}.
Since $\mathrm{ma}thbf{preAdd}^{+}$ has a cofibrantly generated model category structure, the projective model category structure on $\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})$ exists \cite[Thm.~11.6.1]{MR1944041}.
For every cofibrant replacement functor $l \colon L \to \id_{\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})}$ for this projective model category structure we have an equivalence \begin{equation}\label{fwreoiuh24iufhi3rfwrfwefwef}
\ell_{\mathrm{ma}thbf{preAdd}^{+}}\circ \colim_{BG}\circ L\simeq \colim_{BG}\circ \ell_{\mathrm{ma}thbf{preAdd}^{+},BG}
\end{equation}
of functors from $\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})$ to $\mathrm{ma}thbf{preAdd}^{+}_{\infty}$,
see e.g. \cite[Prop.~15.3]{bunke} for an argument.
We derive the formula asserted in \cref{weoijoijvu9bewewfewfwef} by considering a particular choice of a
cofibrant replacement functor.
\begin{ddd}\label{rwgioorgfgwergwf}Let $\tilde G$ in $\Fun(BG,\mathrm{ma}thbf{Groupoids})$ be the groupoid with $G$-action given as follows:
\begin{enumerate}
\item The objects of $\tilde G$ are the elements of $G$.
\item For every pair of of objects $g,g^{\prime}$ there is a unique morphism $g\to g^{\prime}$.
\item The group $G$ acts on $\tilde G$ by left-multiplication.
\end{enumerate}
The $G$-groupoid $\tilde G$ is often called the transport groupoid of $G$.\end{ddd}
We now define the functor
\[ L:=-\sharp \tilde G \colon \Fun(BG,\mathrm{ma}thbf{preAdd}^{+}) \to \Fun(BG,\mathrm{ma}thbf{preAdd}^{+}) \]
(more precisely $L(\bD)$ is the $G$-object obtained from the $G\times G$-object $\bD\sharp \tilde G$ in $\mathrm{ma}thbf{preAdd}^{+}$ by restriction of the action along the diagonal $G\to G\times G$).
We have a natural transformation
$L\to \id$ induced by the morphism of $G$-groupoids $\tilde G\to \underline{\Delta^{0}_{{\mathrm{ma}thbf{Cat}}}}$, where we use the canonical isomorphism $\bD\sharp \underline{\Delta^{0}_{{\mathrm{ma}thbf{Cat}}}}\cong \bD$.
\begin{lem}\label{lem:cofibrant-replacement}
The functor $L$ together with the transformation $L\to \id$
is a cofibrant replacement functor for the projective model category structure on $\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})$. \end{lem}
\begin{proof} Since $\operatorname{Re}s^{G}_{\{1\}}(\tilde G)\to \Delta^{0}_{{\mathrm{ma}thbf{Cat}}}$ is an (non-equivariant) equivalence of groupoids and for every object ${\mathrm{ma}thbf{A}}$ in $ \mathrm{ma}thbf{preAdd}^{+} $ the functor ${\mathrm{ma}thbf{A}}\sharp - \colon \mathrm{ma}thbf{Groupoids}\to \mathrm{ma}thbf{preAdd}^{+}$ preserves equivalences (see the proof of \cref{reiofweiofweewf}), the morphism $\bD\sharp \tilde G\to \bD$ is a weak equivalence in the projective model category structure on $\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})$ for every $\bD$ in $\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})$.
We must show that
$L(\bD)$ is cofibrant. To this end we consider the lifting problem
\[\xymatrix{\emptyset\ar[r]\ar[d]&{\mathrm{ma}thbf{A}}\ar[d]^{f} \\ \bD\sharp \tilde G\ar[r]^{u}\ar@{-->}[ur]^{c}&{\mathrm{ma}thbf{B}}} \]
where $f$ is a trivial fibration in $\mathrm{ma}thbf{preAdd}^{+}$. Since $f$ is surjective on objects
we can find an inverse marked equivalence (possibly non-equivariant) $g \colon {\mathrm{ma}thbf{B}}\to {\mathrm{ma}thbf{A}}$ for $f$ such that $f\circ g=\id_{{\mathrm{ma}thbf{B}}}$. The map
$\bD\sharp \{1\}\xrightarrow{u_{|\bD\sharp \{1\}}} {\mathrm{ma}thbf{B}}\xrightarrow{g}{\mathrm{ma}thbf{A}}$ can be uniquely extended to an equivariant morphism $c$ which is the desired lift. \end{proof}
\begin{proof}[Proof of \cref{weoijoijvu9bewewfewfwef}]
According to \eqref{fwreoiuh24iufhi3rfwrfwefwef} and \cref{lem:cofibrant-replacement}, we must calculate the object
\[ \colim_{BG} L(\underline{{\mathrm{ma}thbf{A}}})\cong \colim_{BG} (\underline{{\mathrm{ma}thbf{A}}}\sharp \tilde G) \]
for an object ${\mathrm{ma}thbf{A}}$ of $\mathrm{ma}thbf{preAdd}^{+}$.
To this end, we note that for a fixed marked pre-additive category $\bD$, we have by \eqref{egiherigergregergr1} an adjunction
\[ \bD\sharp- \colon \mathrm{ma}thbf{Groupoids}\leftrightarrows \mathrm{ma}thbf{preAdd}^{+} \colon \Fun^{+}_{\mathrm{ma}thbf{preAdd}^{+}}(\bD,-)\ .\]
Since $\bD\sharp-$ is a left-adjoint, it commutes with colimits.
Consequently, we get \begin{equation}\label{rverv43rgfrrerg}
\colim_{BG} (\underline{{\mathrm{ma}thbf{A}}}\sharp \tilde G)\simeq {\mathrm{ma}thbf{A}}\sharp \colim_{BG} \tilde G\ .
\end{equation}
The assertion of \cref{weoijoijvu9bewewfewfwef} now follows from a combination of the relations \eqref{fwrefwfkj2nirkfrwefwfw}, \eqref{rverv43rgfrrerg} and \eqref{fwreoiuh24iufhi3rfwrfwefwef}.
\end{proof}
Let $R$ be a unital ring.
By $R[G]$ we denote the group ring of $G$ with coefficients in $R$.
Recall from \cref{rgiuerhgweergergergeg} that we can consider unital rings as pre-additive categories which will be denoted by the corresponding bold-face letters.
\begin{lem}\label{ergioergreger34t3t}
We have an equivalence
\[ \colim_{BG} \ell_{\mathrm{ma}thbf{preAdd}^{},BG}(\underline{\bR})\simeq \ell_{\mathrm{ma}thbf{preAdd}^{}}(\mathrm{ma}thbf{R[G]})\ .\]
\end{lem}
\begin{proof}
By \cref{weoijoijvu9bewewfewfwef}, we have an equivalence
\[ \colim_{ BG} \ell_{\mathrm{ma}thbf{preAdd},BG}(\underline{\bR})\simeq \ell_{\mathrm{ma}thbf{preAdd}}(\bR\sharp BG)\ .\]
Unfolding the definitions (see e.g.~\cref{defn_functor_sharp}) we observe that
$\bR\sharp BG$ has one object, and its ring of endomorphisms is given by
$R\otimes_{\Z }\Z[G]\cong R[G]$.
\end{proof}
\begin{lem}\label{rierhigregegerg43t34t34t}
We have equivalences
\[ \colim_{BG}\ell_{\mathrm{ma}thbf{preAdd},BG}( \underline{ \Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R)})\simeq \ell_{\mathrm{ma}thbf{preAdd}}(\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R[G])) \]
and
\[ \colim_{BG}\ell_{\mathrm{ma}thbf{preAdd},BG} (\underline{ \Mod^{\mathrm{fg} ,\mathrm{proj}}(R)})\simeq \ell_{\mathrm{ma}thbf{preAdd}}(\Mod^{\mathrm{fg} ,\mathrm{proj}}(R[G])) \]
\end{lem}
\begin{proof}
By \cref{gueiurgrgerger},
we have an equivalence
\[ \colim_{BG} \ell_{\mathrm{ma}thbf{Add},BG}(\underline{\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R)})\simeq\colim_{BG} L_{ \oplus,BG}( \ell_{\mathrm{ma}thbf{preAdd},BG}(\underline{\bR }))\ .\]
Since $L_{\oplus}$ is a left-adjoint, it commutes with colimits.
Therefore,
\[ \colim_{BG} L_{ \oplus,BG}( \ell_{\mathrm{ma}thbf{preAdd},BG}(\underline{\bR }))\simeq L_{\oplus}(\colim_{ BG}\ell_{\mathrm{ma}thbf{preAdd},BG}(\underline{\bR})) \ .\]
By \cref{ergioergreger34t3t}, we have the equivalence
\[ L_{\oplus}(\colim_{BG}\ell_{\mathrm{ma}thbf{preAdd},BG}(\underline{\bR})) \simeq L_{\oplus} (\ell_{\mathrm{ma}thbf{preAdd}}(\mathrm{ma}thbf{R[G])}) \ .\]
Finally, by \cref{gueiurgrgerger} again
\[ L_{\oplus} (\ell_{\mathrm{ma}thbf{preAdd}}(\mathrm{ma}thbf{R[G])})\simeq \ell_{\mathrm{ma}thbf{preAdd}}(\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}(R[G]))\ .\]
The second equivalence is shown similarly, using \cref{vgirejgoiergergergergregergergerg} and $L_{\oplus,\mathrm{idem} }$ instead of \cref{gueiurgrgerger} and $L_{\oplus}$.
\end{proof}
\begin{ex}
A unital ring $R$ gives rise to two canonical marked preadditive categories $\mathrm{ma}thrm{mi}(\bR)$ (only the identity is marked) and $\mathrm{ma}(\bR)$ (all units are marked).
Then
\[ \colim_{BG}\ell_{\mathrm{ma}thbf{preAdd}^{+},BG}(\mathrm{ma}thrm{mi}(\bR))\simeq \ell_{\mathrm{ma}thbf{preAdd}^{+}}(\mathrm{ma}thbf{R[G]}^{\mathrm{can}_{G}})\ ,\]
where the marked isomorphisms in $\mathrm{ma}thbf{R[G]}^{\mathrm{can}_{G}}$ are the elements of $G$ (canonically considered as elements in $R[G]$).
In contrast,
\[ \colim_{BG}\ell_{\mathrm{ma}thbf{preAdd}^{+},BG}(\mathrm{ma}(\bR))=\ell_{\mathrm{ma}thbf{preAdd}^{+}}(\mathrm{ma}thbf{R[G]}^{\mathrm{can}})\ ,\]
where the marked isomorphisms in $\mathrm{ma}thbf{R[G]^{\mathrm{can}}}$ are the canonical units in $R[G]$, i.e., the elements of the form $ug$ for a unit $u$ of $R$ and an element $g$ of $G$.
\end{ex}
Let us now use the general machine in order to construct interesting functors on the orbit category
$G\mathrm{ma}thbf{Orb}$ of $G$. The group $G$ with the left action is an object of $G\mathrm{ma}thbf{Orb}$.
Since the right action of $G$ on itself implements an isomorphism $\mathrm{End}_{G\mathrm{ma}thbf{Orb}}(G)\cong G$, we get a fully faithful functor
\begin{equation}
\label{eq_BG_GOrb}
i \colon BG\to G\mathrm{ma}thbf{Orb}\ .
\end{equation}
If $\bC$ is a presentable $\infty$-category, then we have an adjunction
\begin{equation}
\label{eq_BG_GOrb_Kan}
i_{!} \colon \Fun(BG,\bC)\leftrightarrows \Fun(G\mathrm{ma}thbf{Orb},\bC) \colon i^{*}\ .
\end{equation}
The functor $i_{!}$ is the left Kan extension functor along $i$.
We now consider the composition
\begin{align}
\mathrm{ma}thbf{preAdd} & \quad \xrightarrow{\quad\mathrm{ma}thclap{\underline{(-)}}\quad} \quad \Fun(BG,\mathrm{ma}thbf{preAdd})\notag\\
& \quad \xrightarrow{\quad\mathrm{ma}thclap{\ell_{\mathrm{ma}thbf{preAdd},BG}}\quad} \quad \Fun(BG,\mathrm{ma}thbf{preAdd}_{\infty})\notag\\
& \quad \xrightarrow{\quad\mathrm{ma}thclap{i_{!}}\quad} \quad \Fun(G\mathrm{ma}thbf{Orb},\mathrm{ma}thbf{preAdd}_{\infty})\label{revelrnjjkrnfkjervervverveverv}
\end{align}
which we denote by $J^G$.
We are interested in the calculation of the value $J^{G}({\mathrm{ma}thbf{A}})(G/H)$ for
a subgroup $H$.
Let ${\mathrm{ma}thbf{A}}$ be a pre-additive category.
\begin{lem}\label{vfuiheiwufewfewfefewfewf}
We have an equivalence
\[ J^{G}({\mathrm{ma}thbf{A}})(G/H)\simeq \ell_{\mathrm{ma}thbf{preAdd}}( {\mathrm{ma}thbf{A}}\sharp BH)\ .\]
\end{lem}
\begin{proof}
The functor $S\mathrm{ma}psto (G\times_{H}S\to G/H)$ induces an equivalence of categories
$H\mathrm{ma}thbf{Orb}\xrightarrow{\simeq} G\mathrm{ma}thbf{Orb}/(G/H)$
which restricts to an equivalence
\begin{equation}\label{welrkerioerrfwfwefewf}
BH\simeq i/(G/H)\ ,
\end{equation}
where $i/(G/H)$ denotes the slice of $i \colon BG\to G\mathrm{ma}thbf{Orb}$ over $G/H$.
Using the pointwise formula for the left Kan extension functor $i_{!}$ at the equivalence marked by $!$ we get
\begin{eqnarray*}
J^{G}({\mathrm{ma}thbf{A}})(G/H)&\simeq&i_{!}(\ell_{\mathrm{ma}thbf{preAdd},BG}(\underline{{\mathrm{ma}thbf{A}}}))(G/H)\\&\stackrel{!}{\simeq}&
\colim_{(i(*)\to G/H)\in i/(G/H)} \ell_{\mathrm{ma}thbf{preAdd},BG}(\underline{{\mathrm{ma}thbf{A}}})(*)\\
&\stackrel{!!}{\simeq}&\colim_{BH} \ell_{\mathrm{ma}thbf{preAdd},BH}(\underline{{\mathrm{ma}thbf{A}}}) \\
&\stackrel{\cref{weoijoijvu9bewewfewfwef}}{\simeq} &\ell_{\mathrm{ma}thbf{preAdd}}({\mathrm{ma}thbf{A}}\sharp BH)\ ,
\end{eqnarray*}
where at $!!$ we use \eqref{welrkerioerrfwfwefewf} and
that the argument of the colimit is a constant functor.
\end{proof}
The case ${\mathrm{ma}thbf{A}}:=\bR$ for a ring $R$ leads to a functor
\[ J^{G}(\bR) \colon G\mathrm{ma}thbf{Orb} \to \mathrm{ma}thbf{preAdd}_{\infty} \] whose value at
$G/H$ is given by $J^{G}(\bR)(G/H)\simeq \ell_{\mathrm{ma}thbf{preAdd}}(\mathrm{ma}thbf{R[H]})$.
If we postcompose by $L_{\oplus}$ and use \cref{gueiurgrgerger}, then we get a functor
\[ L_{\oplus,G\mathrm{ma}thbf{Orb}}\circ
J^{G}(\bR) \colon G\mathrm{ma}thbf{Orb} \to \mathrm{ma}thbf{Add}_{\infty} \]
with values $L_{\oplus,G\mathrm{ma}thbf{Orb}}\circ
J^{G}(\bR)(G/H)\simeq \ell_{\mathrm{ma}thbf{Add}}(\Mod^{\mathrm{fg} ,\mathrm{ma}thrm{free}}R[H])$.
The composition
\[ K_{G\mathrm{ma}thbf{Orb}}\circ L_{\oplus,G\mathrm{ma}thbf{Orb}}\circ
J^{G}(\bR) \colon G\mathrm{ma}thbf{Orb}\to \Sp \]
therefore has the same values as the functor
representing the equivariant $K$-homology with $R$-coefficients constructed by \cite{davis_lueck}.
\mathrm{ma}thrm{sub}section{\texorpdfstring{$G$}{G}-invariants}\label{gijeriogjeroigergregeg}
Let $G$ be a group.
In this section we calculate the homotopy $G$-invariants of marked pre-additive categories with $G$-action.
The precise formulation is \cref{rgier9oger}.
Let
${\mathrm{ma}thbf{A}}$ be an object of $\Fun(BG,\mathrm{ma}thbf{preAdd}^{(+)})$, i.e., a (marked) pre-additive category with $G$-action.
\begin{ddd}\label{ddd_hat_A_G}
We define a (marked) pre-additive category $\hat {\mathrm{ma}thbf{A}}^{G}$ as follows:
\begin{enumerate}
\item The objects of $\hat {\mathrm{ma}thbf{A}}^{G}$ are pairs $(A,\rho)$ of an object $A$ of ${\mathrm{ma}thbf{A}}$
and a collection
$\rho:=(\rho(g))_{g\in G}$, where $\rho(g) \colon A\to g(A)$ is a (marked) isomorphism in ${\mathrm{ma}thbf{A}}$ and the equality
\[g(\rho(h)) \circ \rho(g)=\rho(hg)\]
holds true
for all pairs $g,h$ in $G$.
\item\label{guerioggergerg} The morphisms $(A,\rho)\to (A^{\prime},\rho^{\prime})$ in $\hat {\mathrm{ma}thbf{A}}^{G}$ are morphisms $a \colon A\to A^{\prime}$ in ${\mathrm{ma}thbf{A}}$ such that the equality $g(a)\circ \rho(g)=\rho^{\prime}(g)\circ a$ holds true for all $g$ in $G$.
\item The enrichment of $\hat {\mathrm{ma}thbf{A}}^{G}$ over abelian groups is inherited from the enrichment of ${\mathrm{ma}thbf{A}}$.
\item (in the marked case) The marked isomorphisms in $\hat {\mathrm{ma}thbf{A}}^{G}$ are those morphisms which are marked isomorphisms in ${\mathrm{ma}thbf{A}}$.\qedhere
\end{enumerate}
\end{ddd}
\begin{ex}\label{giurhgergeg34gergerge}
If ${\mathrm{ma}thbf{A}}$ is an object of $\mathrm{ma}thbf{preAdd}^{(+)}$, then we will shorten the notation and write
$\widehat{{\mathrm{ma}thbf{A}}}^{G}$ for $\widehat{\underline{{\mathrm{ma}thbf{A}}}}^{G}$, where $\underline{{\mathrm{ma}thbf{A}}}$ is ${\mathrm{ma}thbf{A}}$ with the trivial $G$-action.
In this case
$\widehat{{\mathrm{ma}thbf{A}}}^{G}$ is the category of objects of ${\mathrm{ma}thbf{A}}$ with an action of $G$ by (marked) isomorphisms, and equivariant morphisms. In the marked case, the marked isomorphisms in $\widehat{{\mathrm{ma}thbf{A}}}^{G}$ are those which are marked in ${\mathrm{ma}thbf{A}}$.
\end{ex}
Recall the notation \eqref{f34iuh4fiuhrif894r43r34r3434r34r34r31}
\begin{theorem}\label{rgier9oger}
We have an equivalence
\[ \lim_{BG}\ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG}({\mathrm{ma}thbf{A}})\simeq \ell_{\mathrm{ma}thbf{preAdd}^{(+)}}(\hat {\mathrm{ma}thbf{A}}^{G})\ .\]
\end{theorem}
\
\begin{rem}\label{geroigergergerg}
If ${\mathrm{ma}thbf{A}}$ is a pre-additive category with $G$-action, then
the unmarked version of \cref{rgier9oger} can be obtained from the marked versions by
\begin{eqnarray*} \lim_{BG}\ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG}({\mathrm{ma}thbf{A}})&\simeq& \cF_{+}(\mathrm{ma}(\lim_{BG}\ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG}{\mathrm{ma}thbf{A}})))\\&\simeq&
\cF_{+}( \lim_{BG}\ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG}(\mathrm{ma}_{BG}({\mathrm{ma}thbf{A}})))
\\&\simeq& \ell_{\mathrm{ma}thbf{preAdd}^{(+)}}(\cF_{+}(\widehat{\mathrm{ma}_{BG}({\mathrm{ma}thbf{A}})}^{G}))\end{eqnarray*}
using that $\mathrm{ma}$ (as a right-adjoint, see \eqref{f3rfkj34nfkjf3f3f3f43f}) preserves limits.
Note that
\[ \cF_{+}(\widehat{\mathrm{ma}_{BG}({\mathrm{ma}thbf{A}})}^{G})= \hat {\mathrm{ma}thbf{A}}^{G}\ ,\]
where on the left-hand side we use \cref{ddd_hat_A_G} in the marked case, and on the right-hand side we use it in the unmarked case.
\end{rem}
\begin{rem}
The order of taking the limit $\lim_{BG}$ and the localization $\ell_{...}$ matters.
For example, consider the additive category $\Mod(\Z)$ with the trivial $G$-action.
Then
\[ \lim_{BG} \Mod(\Z) \cong\Mod(\Z)\ .\]
On the other hand, $\widehat{\Mod(\Z)}^{G}$ is the category of representations of $G$ on $\Z$-modules. If $G$ is non-trivial, then it is not equivalent to $\Mod(\Z)$. \end{rem}
For simplicity (and in view of \cref{geroigergergerg}), we formulate the proof in the marked case, only.
Since the category $\mathrm{ma}thbf{preAdd}^{+}$ has a combinatorial model category structure the injective model category structure in $\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})$ exists. The proof of this fact involves Smith's theorem, see e.g. \cite[Thm.~1.7]{beke}, \cite[Sec.~A.2.6 ]{htt}. A textbook reference of the fact stated precisely in the form we need is \cite[Prop.~A.2.8.2]{htt}.
For every fibrant replacement functor $r \colon \id\to R$ in the injective model category structure on
$\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})$ we have an equivalence
\begin{equation}\label{eq:fibrant-replacement}
\ell_{\mathrm{ma}thbf{preAdd}^{+}}\circ \lim_{ BG}\circ R\simeq \lim_{BG}\circ \ell_{\mathrm{ma}thbf{preAdd}^{+},BG}
\end{equation}
of functors from $\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})$ to $\mathrm{ma}thbf{preAdd}^{+}_{\infty}$ (see e.g. \cite[Prop. 13.5]{bunke} for an argument).
In the following we use the notation introduced in \cref{gwiogefwerfwefwefewfwadd} and before \cref{gioergegergreg}. Furthermore, we consider the $G$-groupoid $\tilde G$ defined in \cref{rwgioorgfgwergwf}.
We define the functor
\begin{equation}\label{feoirufeoirfjerfref}
R:=\Fun^{+}_{\mathrm{ma}thbf{preAdd}^{+}}(Q(\tilde G),-) \colon \Fun(BG,\mathrm{ma}thbf{preAdd}^{+})\to \Fun(BG,\mathrm{ma}thbf{preAdd}^{+})
\end{equation}
together with the natural transformation $r \colon \id\to R$ induced by $\tilde G\to \Delta^{0}_{{\mathrm{ma}thbf{Cat}}}$ using the canonical isomorphism $\Fun^{+}_{\mathrm{ma}thbf{preAdd}^{+}}(Q(\Delta^{0}_{{\mathrm{ma}thbf{Cat}}}),-)\cong \id$.
\begin{lem}\label{wfeioweo9ffwefwf}
The functor \eqref{feoirufeoirfjerfref} together with the natural transformation $r$
is a fibrant replacement functor.
\end{lem}
\begin{proof}The morphism $\tilde G\to \Delta^{0}_{{\mathrm{ma}thbf{Cat}}}$ is a non-equivariant equivalence of groupoids. An inverse equivalence is given by any map $\Delta_{{\mathrm{ma}thbf{Cat}}}^{0}\to \tilde G$ classifying some object of $\tilde G$. Since this functor is injective on objects we conclude similarly as for
\cref{lem:power.fib} that the (non-equivariant) morphism $p \colon R({\mathrm{ma}thbf{A}})\to {\mathrm{ma}thbf{A}}$ it induces is a weak equivalence. Since $p\circ r=\id$ we conclude that $r \colon {\mathrm{ma}thbf{A}}\to R({\mathrm{ma}thbf{A}})$ is a (non-equivariant) weak equivalence, too. Hence
$r \colon {\mathrm{ma}thbf{A}}\to R({\mathrm{ma}thbf{A}}) $, now considered as a morphism in $\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})$, is an equivalence in the injective model category structure.
In order to finish the proof we must show that $ R({\mathrm{ma}thbf{A}})$ is fibrant.
To this end we consider the following square in $\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})$, where $c\colon \bC\to \bD$ is a trivial cofibration in $\Fun(BG,\mathrm{ma}thbf{preAdd}^{+})$:
\[\xymatrix{\bC\ar[r]\ar[d]^-{c}&R({\mathrm{ma}thbf{A}})\ar[d]\\\bD\ar[r]\ar@{..>}[ur]&{*}}\]
We must show the existence of the diagonal lift.
We use the identification $\Fun_{\mathrm{ma}thbf{preAdd}^{+}}^{+}(Q(\tilde G),*)\simeq *$ and the adjunction of \cref{gioergegergreg} in order to rewrite the lifting problem as follows.
\[\xymatrix{\bC\sharp \tilde G\ar[r]^{\phi}\ar[d]& {\mathrm{ma}thbf{A}}\ar[d]\\\bD\sharp \tilde G\ar[r]\ar@/^1cm/@{..>}[u]^{\tilde d}\ar@{..>}[ur] &{*}}\]
Since, after forgetting the $G$-action, the morphism of $c \colon \bC\to \bD$ is a trivial cofibration it is injective on objects.
We can therefore choose an inverse equivalence $d \colon \bD\to \bC$ (not necessarily $G$-invariant) up to marked isomorphism {with $d\circ c=\id_\bC$.}
We can extend the composition
\[ \bD\xrightarrow{d} \bC\to \bC\times \{1\}\to \bC\sharp \tilde G\] uniquely to a $G$-invariant morphism
\[ \tilde d \colon \bD\sharp \tilde G\to \bC\sharp \tilde G\]
by setting
\[ \tilde d(D,g):=(g(d(g^{-1}D)),g)\ , \quad \tilde d(f \colon D\to D^{\prime},g\to h):=g^{-1} d(g^{-1}f)\sharp (g\to h) \ . \]
The desired lift can now be obtained as the composition $\phi\circ \tilde d$.
\end{proof}
\begin{proof}[Proof of \cref{rgier9oger}]
By \eqref{eq:fibrant-replacement} and \cref{wfeioweo9ffwefwf}, we have an equivalence
\[ \lim_{BG}\ell_{\mathrm{ma}thbf{preAdd}^{+},BG}({\mathrm{ma}thbf{A}})\simeq \ell_{\mathrm{ma}thbf{preAdd}^{+}}( \lim_{BG}R({\mathrm{ma}thbf{A}}))\ . \]
In order to finish the proof of \cref{rgier9oger}, it remains to show that
\[ \lim_{BG}R({\mathrm{ma}thbf{A}})\cong \hat {\mathrm{ma}thbf{A}}^{G}\ .\]
We define a functor
\[ \Psi \colon \lim_{BG} R({\mathrm{ma}thbf{A}})=\lim_{BG} \Fun_{\mathrm{ma}thbf{preAdd}^{+}}^{+}(Q(\tilde G),{\mathrm{ma}thbf{A}})\to \hat {\mathrm{ma}thbf{A}}^{G} \]
as follows.
\begin{enumerate}
\item on objects:
\[ \Psi(\phi):=(\phi(1), (\phi(1 \to g))_{g\in G})\ . \]
Note that $\phi(g)=g\phi(1)$ by $G$-invariance of $\phi$.
\item on morphisms:
\[ \Psi((a_{h})_{h\in \tilde G} \colon \phi\to \psi):=a_{1} \colon \phi(1)\to \psi(1)\ .\]
One easily checks the relation \ref{guerioggergerg} using that $\phi$ and $\psi$ are $G$-invariant and that $(a_{h})_{h\in \tilde G}$ is a natural transformation.
\item We observe that $\Psi$ preserves marked isomorphisms.
\end{enumerate}
Finally we check that the
functor $\Psi$ is an isomorphism of categories. This finishes the proof of \cref{rgier9oger}.
\end{proof}
\cref{rgier9oger} implies an analogous statement for additive categories.
Let ${\mathrm{ma}thbf{A}}$ be in $\Fun(BG,\mathrm{ma}thbf{preAdd}^{(+)})$.
\begin{lem}\label{ergoiejrogregregeg}
If ${\mathrm{ma}thbf{A}}$ belongs to the subcategory $\Fun(BG, \mathrm{ma}thbf{Add}^{(+)})$, then $ \hat {\mathrm{ma}thbf{A}}^{G}$ is a (marked) additive category.
\end{lem}
\begin{proof}
We must show that $\hat {\mathrm{ma}thbf{A}}^{G}$ admits finite coproducts.
If $(M,\rho)$ and $(M^{\prime},\rho^{\prime})$ are two objects,
then $(M\oplus M^{\prime},\rho\oplus \rho^{\prime})$ together with the canonical inclusions represents the coproduct of $(M,\rho)$ and $(M^{\prime},\rho^{\prime})$.
In the marked case, one furthermore checks by inspection condition \ref{fdblkgjklrgregergergerg} from \cref{reiuheriververvec} for ${\mathrm{ma}thbf{A}}$ implies this condition for $\hat {\mathrm{ma}thbf{A}}^{G}$. This condition also implies that $\rho\oplus \rho^{\prime}$ acts by marked isomorphisms as required in the marked case.
\end{proof}
Let ${\mathrm{ma}thbf{A}}$ be in $ \Fun(BG, \mathrm{ma}thbf{Add}^{+})$.
\begin{kor}
\label{cor:invariants}
We have an equivalence
\[ \lim_{BG} \ell_{\mathrm{ma}thbf{Add}^{(+)},BG}({\mathrm{ma}thbf{A}})\simeq \ell_{\mathrm{ma}thbf{Add}^{(+)}}(\hat {\mathrm{ma}thbf{A}}^{G})\ .\]
\end{kor}
\begin{proof}
The functor $\cF_{\oplus} \colon \mathrm{ma}thbf{Add}^{+}_{\infty}\to \mathrm{ma}thbf{preAdd}^{+}_{\infty}$ is a right-adjoint and hence preserves limits.
Using \cref{rgier9oger}, we obtain equivalences
\begin{align*}
\cF_{\oplus}(\lim_{BG}(\ell_{\mathrm{ma}thbf{Add}^{(+)},BG} ({\mathrm{ma}thbf{A}}))) & \simeq \lim_{BG} \ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG} (\cF_{\oplus,BG}({\mathrm{ma}thbf{A}}))\\
& \simeq \ell_{\mathrm{ma}thbf{preAdd}^{(+)}}(\widehat{\cF_{\oplus,BG}( {\mathrm{ma}thbf{A}})}^{G})\\
& \simeq \cF_{\oplus}( \ell_{\mathrm{ma}thbf{Add}^{(+)}}(\hat {\mathrm{ma}thbf{A}}^{G}))
\end{align*}
Since $\hat {\mathrm{ma}thbf{A}}^{G}$ is additive by \cref{ergoiejrogregregeg},
this implies the assertion by omitting $\cF_{\oplus}$ on both sides.
\end{proof}
\begin{ex}\label{rgvoihifowefwfwe}
Let $k$ be a complete normed field and let $\mathbf{Ban}$ denote the category of Banach spaces over $k$ and bounded linear maps.
This category is additive. Note that only the equivalence class of the norm on an object of $\mathbf{Ban}$ is an invariant of the isomorphism class of the object. We use the norms in order to define a marked pre-additive category $\mathbf{Ban}^{+}$ by marking isometries.
It is first interesting to observe that $\mathbf{Ban}^{+}$ is not a marked additive category. In fact,
the Condition \ref{reiuheriververvec}.\ref{fdblkgjklrgregergergerg} is violated since only the equivalence class of the norm on a direct sum is fixed by the norms on the summands.
We can now calculate the $G$-invariants:
By \cref{cor:invariants},
\[ \lim_{BG} \ell_{\mathrm{ma}thbf{Add},BG} (\underline{\mathbf{Ban}})\simeq \ell_{\mathrm{ma}thbf{Add}}(\widehat{\mathbf{Ban}}^{G})\ .\]
By \cref{giurhgergeg34gergerge}, $\widehat{\mathbf{Ban}}^{G}$
is the category of Banach-spaces over $k$ with an action by $G$ and equivariant bounded linear maps.
On the other hand, by \cref{rgier9oger}
\[ \lim_{BG} \ell_{\mathrm{ma}thbf{preAdd}^{+},BG} (\underline{\mathbf{Ban}^{+}})\simeq \ell_{\mathrm{ma}thbf{preAdd}^{+}}(\widehat{\mathbf{Ban}^{+}}^{G})\ .\]
By \cref{giurhgergeg34gergerge}, $\widehat{\mathbf{Ban}^{+}}^{G}$
is the category of Banach-spaces over $k$ with an \emph{isometric} action by $G$ and equivariant bounded linear maps which are marked if the are isometric.
Hence $\cF_{+}( \widehat{\mathbf{Ban}^{+}}^{G} )$ is contained properly in
$ \widehat{\mathbf{Ban}}^{G} $.
This shows that even if we forget the marking at the end, the marking matters when we form limits.
\end{ex}
\begin{ex}
Let $R$ be a unital ring. We consider the additive categories $\Mod^{?}(R)$ and $\Mod^{?}(R)$, where the decoration $?$ is a condition like \emph{free}, \emph{projection}, \emph{finitely generated} or some
combination of these.
By \cref{rgier9oger} and \cref{giurhgergeg34gergerge}, we get
\[ \lim_{BG}\ell_{\mathrm{ma}thbf{Add},BG}(\underline{\Mod^{?}(R)})\simeq \ell_{\mathrm{ma}thbf{Add}}( \Fun(BG,\Mod^{?}(R)))\ .\]
Note the difference between limits and colimits: By \cref{rierhigregegerg43t34t34t} we have an equivalence
\[ \colim_{BG}\ell_{\mathrm{ma}thbf{Add},BG}(\underline{\Mod^{?}(R)})\simeq \ell_{\mathrm{ma}thbf{Add}}( \Mod^{?}(R[G])) \]
for $?=(\mathrm{fg},\mathrm{proj}), (\mathrm{fg},\mathrm{ma}thrm{free})$. If $G$ is infinite, then the interpretation of $?$ on the right-hand side leads to different categories (e.g. finitely free generated $R[G]$-modules are in general not finitely generated $R$-modules with a $G$-action).
\end{ex}
\begin{ex}
\label{ex:bc}
For the following example we assume familiarity with equivariant coarse homology theories and the example of equivariant coarse algebraic $K$-homology, see for example \cite[Sec.~2, 3 and 8]{equicoarse}. In particular, recall the definition of the functor $\bV_{\mathrm{ma}thbf{A}}\colon \mathrm{ma}thbf{BornCoarse}\to \mathrm{ma}thbf{Add}$ of $X$-controlled ${\mathrm{ma}thbf{A}}$-objects for a bornological coarse space $X$ and an additive category ${\mathrm{ma}thbf{A}}$ from \cite[Sec.~8.2]{equicoarse}. We define the functor
\[\bV_{\mathrm{ma}thbf{A}}^+\colon \mathrm{ma}thbf{BornCoarse}\to \mathrm{ma}thbf{Add}^+\]
by considering $\bV_{\mathrm{ma}thbf{A}}$ and marking the $\diag(X)$-controlled isomorphisms.
Let $X$ be a $G$-bornological coarse space and let ${\mathrm{ma}thbf{A}}$ be an additive category with a $G$-action. By functoriality the marked additive category $\bV_{\mathrm{ma}thbf{A}}^{+}(X)$ then has an action of $G\times G$. We consider $\bV_{\mathrm{ma}thbf{A}}^{+}(X)$ as a marked additive category with $G$-action by restricting the $G\times G$ action along the diagonal. As in \cref{ddd_hat_A_G} we can form the category $\widehat{\bV_{\mathrm{ma}thbf{A}}^{+}}^{G}$.
We define the functor
\[\bV_{\mathrm{ma}thbf{A}}^{G}:=\cF_{+}\circ \widehat{\bV_{\mathrm{ma}thbf{A}}^{+}}^{G}\colon G\mathrm{ma}thbf{BornCoarse}\to \mathrm{ma}thbf{Add}\ .\]
One checks that this definition agrees with the definition of $\bV_{\mathrm{ma}thbf{A}}^G$ from \cite[Sec.~8.2]{equicoarse}.
By definition, equivariant coarse algebraic $K$-homology is the functor $K{\mathrm{ma}thbf{A}}\cX^G:=K\circ\bV_{\mathrm{ma}thbf{A}}^{G}$.
The functor $\cF_+\colon \mathrm{ma}thbf{Add}^+\to\mathrm{ma}thbf{Add}$ descents to a functor $\cF_+\colon \mathrm{ma}thbf{Add}^+_\infty\to \mathrm{ma}thbf{Add}_\infty$.
Using \cref{cor:invariants}, we now obtain
\begin{align*}
K{\mathrm{ma}thbf{A}}\cX^G&=K\circ\bV_{\mathrm{ma}thbf{A}}^{G}=K\circ \cF_{+}\circ \widehat{\bV_{\mathrm{ma}thbf{A}}^{+}}^{G}\\
&\simeq K_\infty\circ \ell_{\mathrm{ma}thbf{Add}}\circ \cF_{+}\circ \widehat{\bV_{\mathrm{ma}thbf{A}}^{+}}^{G}\\
&\simeq K_\infty\circ \cF_{+}\circ \ell_{\mathrm{ma}thbf{Add}^+}\circ \widehat{\bV_{\mathrm{ma}thbf{A}}^{+}}^{G}\\
&\simeq K_\infty\circ \cF_{+}\circ \lim_{BG}\circ {\ell_{\mathrm{ma}thbf{Add}^+,BG}} \circ \bV_{\mathrm{ma}thbf{A}}^{+}\ .
\end{align*}
This shows that equivariant coarse algebraic $K$-homology can be computed from the non-equivariant version by taking $G$-invariants in marked additive categories.
\end{ex}
In addition to the adjunction \eqref{eq_BG_GOrb_Kan}, for a presentable $\infty$-category $\bC$ we also have an adjunction
\begin{equation}\label{vnvkjenvkjenvkevnkervccerf}
i^{op,*} \colon \Fun(G\mathrm{ma}thbf{Orb}^{op},\bC)\leftrightarrows \Fun(BG^{op},\bC) \colon i^{op}_{*}\ .
\end{equation} In analogy to \eqref{revelrnjjkrnfkjervervverveverv} we consider the functor $C^{G}$ defined as the composition
\[
\mathrm{ma}thclap{
\Fun(BG^{op},\mathrm{ma}thbf{preAdd}^{(+)}) \xrightarrow{\ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG}} \Fun(BG^{op},\mathrm{ma}thbf{preAdd}^{(+)}_{\infty})\xrightarrow{i^{op}_{*}} \Fun(G\mathrm{ma}thbf{Orb}^{op},\mathrm{ma}thbf{preAdd}^{(+)}_{\infty})\ .}
\]
For a (marked) pre-additive category with $G$-action
${\mathrm{ma}thbf{A}}$ we are interested in the values $C^{G}({\mathrm{ma}thbf{A}})(G/H)$ for subgroups $H$ of $G$.
\begin{lem}\label{guihiuwfewfwefwefwef}
We have an equivalence
\[ C^{G}({\mathrm{ma}thbf{A}})(G/H)\simeq \ell_{\mathrm{ma}thbf{preAdd}^{(+)}}(\widehat{ \operatorname{Re}s^{G}_{H}({\mathrm{ma}thbf{A}})}^{H})\ . \]
\end{lem}
\begin{proof}
The argument is very similar to the proof of \cref{vfuiheiwufewfewfefewfewf}. We use that the induction $S\mathrm{ma}psto G\times_{H}S$
induces an equivalence
\[ BH^{op}\simeq (G/H)/i^{op}\ ,\]
where $(G/H)/i^{op}$ denotes the slice of $i^{op} \colon BG^{op}\to G\mathrm{ma}thbf{Orb}^{op}$ under $G/H$.
Further employing the point-wise formula for the right-Kan extension functor $i^{op}_{*}$ and the equivalence $BH\simeq BH^{op}$ given by inversion, we get
\begin{align*}
C^{G}({\mathrm{ma}thbf{A}})(G/H)&\simeq i^{op}_{*}(\ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG^{op}}( {\mathrm{ma}thbf{A}} ))(G/H)\\&\simeq
\lim_{(G/H \to i^{op}(*))\in (G/H)/i^{op}} \ell_{\mathrm{ma}thbf{preAdd}^{(+)},BG^{op}}( {\mathrm{ma}thbf{A}} )(*)\\
&\simeq\lim_{ BH^{op}} \ell_{\mathrm{ma}thbf{preAdd}^{(+)},BH^{op}}( \operatorname{Re}s^{G}_{H}({\mathrm{ma}thbf{A}}) ) \\
&\simeq \ell_{\mathrm{ma}thbf{preAdd}^{(+)}}(\widehat{ \operatorname{Re}s^{G}_{H}({\mathrm{ma}thbf{A}})}^{H})\qedhere
\end{align*}
\end{proof}
\end{document} |
\begin{document}
\fussy
\title{The direct sum of $q$-matroids}
\begin{abstract}
For classical matroids, the direct sum is one of the most straightforward methods to make a new matroid out of existing ones. This paper defines a direct sum for $q$-matroids, the $q$-analogue of matroids. This is a lot less straightforward than in the classical case, as we will try to convince the reader. With the use of submodular functions and the $q$-analogue of matroid union we come to a definition of the direct sum of $q$-matroids. As a motivation for this definition, we show it has some desirable properties. \\
\noindent \textbf{Keywords:} $q$-matroid, $q$-analogue, direct sum \\
\textbf{MSC2020:} 05B35, 05A30
\end{abstract}
\section{Introduction}
The study of $q$-matroids, introduced by Crapo \cite{crapo1964theory}, has recently attracted renewed attention because of its link to network coding. After the reintroduction of the object by Jurrius and Pellikaan \cite{JP18} and independently that of $(q,r)$-polymatroids by Shiromoto \cite{S19}, several other papers have studied these objects, often in relation to rank metric codes. See for example \cite{BCR21,GJ20,ghorpade_shellability_2022,gluesing2021qpolyindep,gluesing2021qpoly,gorla2019rank,panja2019some}. \\
Roughly speaking, a $q$-analogue in combinatorics is a generalisation from sets to finite dimensional vector spaces. So a $q$-matroid is a finite dimensional vector space with a rank function defined on its subspaces, satisfying certain properties. One can also view this generalisation from the point of view of the underlying lattice: where matroids have the Boolean lattice (of sets and subsets) as their underlying structure, $q$-matroids are defined over the subspace lattice. The work of finding a $q$-analogue often comes down to writing a statement about sets in such a lattice-theoretic way that the $q$-analogue is a direct rephrasing for the subspace lattice. However, this is often not a trivial task, for two reasons. First, there might be several equivalent ways to define something over the Boolean lattice, where the $q$-analogues of these statements are not equivalent. Secondly, some statements on the Boolean lattice do not have a $q$-analogue: the subspace lattice is, contrarily to the Boolean lattice, not distributive. \\
In this paper we consider the {\em direct sum} of two $q$-matroids. An option to do this is to extend to the realm of sum-matroids \cite{panja2019some}, but we are looking for a construction that gives a $q$-matroid. This is one of the cases as mentioned above where the $q$-analogue is a lot harder than the relatively simple procedure of taking the direct sum of two classical matroids. The latter is defined as follows. Let $E_1$ and $E_2$ be disjoint sets and let $E=E_1\cup E_2$. Let $M_1=(E_1,r_1)$ and $M_2=(E_2,r_2)$ be two matroids. Then the direct sum $M_1\oplus M_2$ is a matroid with ground set $E$. For its rank function, note that we can write any $A\subseteq E$ as a disjoint union $A=A_1\sqcup A_2$ with $A_1\subseteq E_1$ and $A_2\subseteq E_2$. The rank function of the direct sum $M_1\oplus M_2$ is now given by $r(A)=r_1(A_1)+r_2(A_2)$. \\
If we try to mimic this procedure in the $q$-analogue, we run into trouble quite fast. Let $E_1$ and $E_2$ be disjoint subspaces and let $E=E_1\oplus E_2$. If we consider a subspace $A\subseteq E$, it might be that we cannot write it as a direct sum $A_1\oplus A_2$, with $A_1\subseteq E_1$ and $A_2\subseteq E_2$. In fact, most of the subspaces of $E$ can not be written in this way. Our goal is to define a rank function for these subspaces. \\
A naive try is to define a rank function in the $q$-analogue for all spaces $A\subseteq E$ that can be written as $A_1\oplus A_2$, and hope that the axioms for the rank function take care of the rest of the spaces. However, as we show with an example in Section \ref{ExDim4}, this procedure does not give us a unique direct sum. As a byproduct of this example, we find the smallest non-representable $q$-matroid. \\
Our solution for the direct sum of $q$-matroids is the following. We will first define the notion of {\em matroid union} for $q$-matroids in Section \ref{MatUn}. This notion is dual to matroid intersection, that we consider in Section \ref{IntDual}. Then we show in Section \ref{DirSum} that the direct sum of a $q$-matroid and a loop can be defined. Finally, we define the direct sum of two $q$-matroids by first adding loops to get two $q$-matroids on the same ground space, and then taking their matroid union. \\
To motivate this definition we show that this construction has several desirable properties. First of all, it generalises our naive attempt in Section \ref{ExDim4}. Also, taking the dual of a direct sum is isomorphic to first taking duals and then taking the direct sum. Lastly, restriction and contraction to $E_1$ and $E_2$ give back one of the original $q$-matroids. \\
We finish this paper by briefly considering what it would mean for a $q$-matroid to be connected (Section \ref{Connect}). As one might assume from the difficulty of the direct sum, this is also not an easy endeavour. We outline the problems that appear when trying to make a $q$-analogue of some of the several equivalent definitions of connectedness in classical matroids. \\
At the end of this paper (Appendix \ref{qcatalogue}) we give a catalogue of small $q$-matroids. In the paper, we will often refer to examples from this catalogue. Since the study of $q$-matroids is a relatively new one, we hope this catalogue to be useful for others learning about $q$-matroids.
\section{Preliminaries}\label{PrelimSec}
Following the notation of \cite{BCR21} we denote by $n$ a fixed positive integer and by $E$ a fixed $n$-dimensional vector space over an arbitrary field $\mathbb{F}.$ The notation $\mathcal{L}(E)$ indicates the \textbf{lattice of subspaces} of $E$. For any $A,B\in\mathcal{L}(E)$ with $A\subseteq B$ we denote by $[A,B]$ the interval between $A$ and $B$, that is, the lattice of all subspaces $X$ with $A\subseteq X\subseteq B$. For $A\subseteq E$ we use the notation $\mathcal{L}(A)$ to denote the interval $[\{0\},A]$. For more background on lattices, see for example Birkhoff \cite{birkhoff}. \\
We use the following definition of a $q$-matroid.
\begin{Definition}\label{rankfunction}
A $q$-matroid $M$ is a pair $(E,r)$ where $r$ is an integer-valued function defined on the subspaces of $E$ with the following properties:
\begin{itemize}
\item[(R1)] For every subspace $A\in \mathcal{L}(E)$, $0\leq r(A) \leq \dim A$.
\item[(R2)] For all subspaces $A\subseteq B \in \mathcal{L}(E)$, $r(A)\leq r(B)$.
\item[(R3)] For all $A,B\in \mathcal{L}(E)$, $r(A+ B)+r(A\cap B)\leq r(A)+r(B)$.
\end{itemize}
The function $r$ is called the \textbf{rank function} of the $q$-matroid.
\end{Definition}
Sometimes, we will need to deal with the rank functions of more than one $q$-matroid at a time, say $M,M'$, with ground spaces $E$, $E'$, respectively. In order to distinguish them (and emphasize the $q$-matroid in which we are computing the rank), we will write $r(M;A)$ for the rank in $M$ of a subspace $A\subseteq E$ and $r(M';A')$ for the rank in $M'$ of a subspace $A'\subseteq E'$. For a $q$-matroid $M$ with ground space $E$, we use $r(M)$ as notation for $r(M;E)$. \\
We will use the axioms of the rank functions repeatedly in our proofs, as well as the following lemma that follows by induction from the axiom (R2') in \cite[Theorem 31]{BCR21}.
\begin{Lemma}[Local semimodularity]\label{localSemimodularity}
If $A\subseteq B\subseteq E$ then $r(B)-r(A)\leq\dim B-\dim A$.
\end{Lemma}
A way to visualise a $q$-matroid is by taking the Hasse diagram of the underlying subspace lattice and colour all the covers: red if the rank goes up and green if the rank stays the same. This is done in Appendix \ref{qcatalogue}. More properties of this bi-colouring can be found in \cite{BCJ17}.
There are several important subspaces in $q$-matroids.
\begin{Definition}
Let $(E,r)$ be a $q$-matroid. A subspace $A$ of $E$ is called an \textbf{independent} space of $(E,r)$ if \[r(A)=\dim A.\]
An independent subspace that is maximal with respect to inclusion is called a \textbf{basis}.
A subspace that is not an independent space of $(E,r)$ is called a \textbf{dependent space} of the $q$-matroid $(E,r)$.
We call $C \in \mathcal{L}(E)$ a \textbf{circuit} if it is itself a dependent space and every proper subspace of $C$ is independent.
A \textbf{spanning space} of the $q$-matroid $(E,r)$ is a subspace $S$ such that $r(S)=r(E)$.
A subspace $A$ of a $q$-matroid $(E,r)$ is called a \textbf{flat} if for all $1$-dimensional subspaces $x \in \mathcal{L}(E)$ such that $x\nsubseteq A$ we have \[r(A+x)>r(A).\]
A subspace $H$ is called a $\textbf{hyperplane}$ if it is a maximal proper flat, i.e., if $H \neq E$ and the only flat that properly contains $H$ is $E$.
A $1$-dimensional subspace $\ell$ is called a \textbf{loop} if $r(\ell)=0$. All loops together form a subspace (\cite[Lemma 11]{JP18}) that we call the \textbf{loop space} of $M$.
\end{Definition}
A $q$-matroid can be equivalently defined by its independent spaces, bases, circuits, spanning spaces, flats and hyperplanes. See \cite{BCR21} for an overview of these cryptomorphic definitions. We will explicitly use the axioms for circuits:
\begin{Definition}\label{circuit-axioms}
Let $\mathcal{C}\subseteq\mathcal{L}(E)$. We
define the following \textbf{circuit axioms}.
\begin{itemize}
\item[(C1)] $\{0\}\notin\mathcal{C}$.
\item[(C2)] For all $C_1,C_2\in\mathcal{C}$, if $C_1\subseteq C_2$, then $C_1=C_2$.
\item[(C3)] For distinct $C_1,C_2 \in \mathcal{C}$ and any $X\in \mathcal{L}(E)$ of codimension $1$ there is a circuit $C_3 \in \mathcal{C}$ such that $C_3 \subseteq (C_1+C_2)\cap X$.
\end{itemize}
If $\mathcal{C}$ satisfies the circuit axioms (C1)-(C3), we say that $(E,\mathcal{C})$ is a \textbf{collection of circuits}.
\end{Definition}
Recall that a lattice isomorphism between a pair of lattices $(\mathcal{L}_1,\leq_1,\vee_1,\wedge_1)$ and $(\mathcal{L}_2,\leq_2,\vee_2,\wedge_2)$ is a bijective function $\varphi:\mathcal{L}_1\longrightarrow\mathcal{L}_2$ that is order-preserving and preserves the meet and join, that is, for all $x,y\in\mathcal{L}_1$ we have that $\varphi(x\wedge_1 y)=\varphi(x)\wedge_2\varphi(y)$ and $\varphi(x\vee_1 y)=\varphi(x)\vee_2\varphi(y)$. A lattice anti-isomorphism between a pair of lattices is a bijective function $\psi:\mathcal{L}_1\longrightarrow\mathcal{L}_2$ that is order-reversing and interchanges the meet and join, that is, for all $x,y\in\mathcal{L}_1$ we have that $\psi(x\wedge_1 y)=\psi(x)\vee_2\psi(y)$ and $\psi(x\vee_1 y)=\psi(x)\wedge_2\psi(y)$.
We hence define a notion of equivalence and duality between $q$-matroids.
\begin{Definition}
Let $E_1,E_2$ be vector spaces over the same field $\mathbb{F}$. Let $M_1=(E_1,r_1)$ and $M_2=(E_2,r_2)$ be $q$-matroids. We say that $M_1$ and $M_2$ are \textbf{lattice-equivalent} or \textbf{isomorphic} if there exists a lattice isomorphism $\varphi:\mathcal{L}(E_1)\longrightarrow \mathcal{L}(E_2)$ such that $r_1(A)=r_2(\varphi(A))$ for all $A\subseteq E_1$. In this case we write $M_1 \cong M_2$.
\end{Definition}
Fix an anti-isomorphism $\perp:\mathcal{L}(E)\longrightarrow\mathcal{L}(E)$ that is an involution. For any subspace $X \in \mathcal{L}(E)$ we denote by $X^\perp$ the \textbf{dual} of $X$ in $E$ with respect to $\perp$. Note that since an anti-isomorphism preserves the length of intervals, we have for any $X\leq\mathcal{L}(E)$ that $\dim(X^\perp)=\dim(E)-\dim(X)$. \\
From a lattice point of view, if $B=B_1\oplus B_2$, then $B=B_1\vee B_2$ and $B_1\wedge B_2=0$. Since $\perp$ is an anti-isomorphism of $\mathcal{L}(E)$, we have that $B^\perp=B_1^\perp\wedge B_2^\perp$ and $B_1^\perp\vee B_2^\perp=1$. Important operations on $q$-matroids are restriction, contraction and duality. We give a short summary here and refer to \cite{BCIR21,JP18} for details.
\begin{Definition}\label{defdual}
Let $M=(E,r)$ be a $q$-matroid. Then $M^*=(E,r^*)$ is also a $q$-matroid, called the \textbf{dual $q$-matroid}, with rank function
\[ r^*(A)=\dim(A)-r(E)+r(A^\perp). \]
The subspace $B$ is a basis of $M$ if and only if $B^\perp$ is a basis of $M^*$. From bi-colouring point of view, we get the dual $q$-matroid by turning the Hasse diagram upside down and interchange all red and green covers.
\end{Definition}
\begin{Definition}\label{restr}
Let $M=(E,r)$ be a $q$-matroid. The \textbf{restriction} of $M$ to a subspace $X$ is the $q$-matroid $M|_X$ with ground space $X$ and rank function $r_{M|_X}(A)=r_M(A)$.
The \textbf{contraction} of $M$ of a subspace $X$ is the $q$-matroid $M/X$ with ground space $E/X$ and rank function $r_{M/X}(A)=r_M(A)-r_M(X)$. A $q$-matroid that is obtained by restriction and contraction of $M$ is called a \textbf{minor} of $M$.
\end{Definition}
\begin{Theorem}\label{DualRestrContr}
Restriction and contraction are dual operations, that is, $M^*/X\cong (M|_{X^\perp})^*$ and $(M/X)^* \cong M^*|_{X^\perp}$.
\end{Theorem}
Finally, we will define what it means for a $q$-matroid to be representable and give an example of an important class of $q$-matroids.
\begin{Definition}
Let $M=(E,r)$ be a $q$-matroid of rank $k$ over a field $K$. Let $A\subseteq E$ and let $Y$ be a matrix with column space $A$. We say that $M$ is \textbf{representable} if there exists a $k\times n$ matrix $G$ over an extension field $L/K$ such that $r(A)$ is equal to the matrix rank of $GY$ over $L$.
\end{Definition}
\begin{Example}
Let $k$ be a positive integer, $k \leq n$.
The \textbf{uniform $q$-matroid} is the $q$-matroid $M=(E,r)$ with rank function defined as follows:
\[ r(U):= \left\{ \begin{array}{cl}
\dim(U) & \text{ if } \dim(U) \leq k, \\
k & \text{ if } \dim(U) > k.
\end{array}\right.
\]
We denote this $q$-matroid by $U_{k,n}$.
\end{Example}
\section{Intuitive try for the direct sum}\label{ExDim4}
As stated in the introduction, the $q$-analogue of the direct sum is not straightforward. Let $E=E_1\oplus E_2$ be a direct sum of subspaces and let $A\subseteq E$. Then we cannot, in general, decompose $A\subseteq E$ as $A=A_1\oplus A_2$ with $A_1\subseteq E_1$ and $A_2\subseteq E_2$. \\
With other cryptomorphic definitions of $q$-matroids we run into similar problems. Look for example at the independent spaces. In the classical case, the independent sets of the direct sum $M_1\oplus M_2$ are the unions of an independent set in $M_1$ and an independent set in $M_2$. If we want to take the direct sum of the $q$-matroids $M_1=U_{1,1}$ and $M_2=U_{1,1}$, we expect all subspaces to be independent. However, not all such spaces can be written as the sum of an independent space in $M_1$ and an independent space in $M_2$. Similar problems arise when trying to construct the bases and circuits of the direct sum of the $q$-matroids $M_1$ and $M_2$.
\\
In this section we explore if we can define the rank function of a direct sum of $q$-matroids by simply defining $r(A)=r_1(A_1)+r_2(A_2)$ for all $A$ that can be written as $A=A_1\oplus A_2$, and hoping that the rank axioms will take care of the rest of the subspaces. (Spoiler alert: it will not work).
\subsection{First definition and properties}\label{FirstDef}
Let us make our first trial to define the direct sum. We start with a definition mimicking the classical case. We consider these properties desirable for the direct sum of $q$-matroids. We also prove some direct consequences of these properties. The properties from Definition \ref{def-directsum1} will turn out not to define a unique $q$-matroid, hence they are not sufficient for defining the direct sum of $q$-matroids. However, our final definition will satisfy these properties.
\begin{Definition}\label{def-directsum1}
Let $M_1=(E_1,r_1)$ and $M_2=(E_2,r_2)$ be two $q$-matroids on trivially intersecting ground spaces. For a $q$-matroid $M=(E,r)$ on the ground space $E=E_1\oplus E_2$ we define the following properties:
\begin{itemize}
\item the minors $M|_{E_1}$ and $M/E_2$
are both isomorphic to $M_1$,
\item the minors $M|_{E_2}$ and $M/E_1$
are both isomorphic to $M_2$.
\end{itemize}
\end{Definition}
In particular, it follows from this construction that the rank of $M$ is the sum of the ranks of $M_1$ and $M_2$. The next theorem shows that this definition is equivalent to what we recognise as the $q$-analogue of the definition of direct sum in the classical case.
\begin{Theorem}\label{RangoSomma}
Let $M_1=(E_1,r_1)$ and $M_2=(E_2,r_2)$ be two $q$-matroids on trivially intersecting ground spaces. We define a $q$-matroid $M=(E,r)$ on the ground space $E=E_1\oplus E_2$. Then $M$ satisfies the properties of Definition \ref{def-directsum1} if and only if for each $A\subseteq E_1$ and $B\subseteq E_2$ it holds $r(A+B)=r_1(A)+r_2(B)$.
\end{Theorem}
\begin{proof}
First, assume $M$ satisfies the properties of Definition \ref{def-directsum1}. Note that for all $A\subseteq E_1$ we have $r(A)=r(M;A)=r(M|_{E_1};A)=r_1(A)$ and similarly, for all $B\subseteq E_2$ we have $r(B)=r_2(B)$. So we need to show that $r(A+B)=r(A)+r(B)$. We prove this by applying semimodularity multiple times. First we apply it to $A$ and $B$. Since $A\cap B=\{0\}$, we have $r(A\cap B)=0$ and (r3) gives us
\[ r(A+B)\leq r(A)+r(B). \]
We claim that $r(M;E_1+B)=r(M;E_1)+r(M;B)$.
Indeed, $r(M/E_1;(E_1+B)/E_1)=r(M_2;B)$ by Definition \ref{def-directsum1}.
Moreover, by Definition \ref{restr}, $r(M/E_1;(E_1+B)/E_1)=r(M; E_1+B)-r(M;E_1)$. Summing up
\[r(M;B)=r(M_2;B)=r(M/E_1;(E_1+B)/E_1)=r(M; E_1+B)-r(M;E_1),\]
so $r(M;E_1+B)=r(M;E_1)+r(M;B)$. Now we apply (r3) to $E_1$ and $A+B$.
\begin{align*}
r(E_1)+r(A+B) & \geq r(E_1+(A+B))+r(E_1\cap(A+B)) \\
& = r(B+E_1)+r(A) \\
& = r(B)+r(E_1)+r(A).
\end{align*}
This implies that
\[ r(A+B)\geq r(A)+r(B). \]
Combining the two inequalities gives the desired equality: $r(A+B)=r(A)+r(B)$. \\
For the other implication, suppose that $r(A+B)=r_1(A)+r_2(B)$. The two conditions in Definition \ref{def-directsum1} are symmetric, so we only need to prove the first one. We show that the rank function on $M|_{E_1}$ is equal to the rank function on $M_1$. Let $A\subseteq E_1$. Then
\[ r(M|_{E_1};A)=r(M;A)=r_1(A)+r_2(0)=r_1(A). \]
Now for $M/E_2$, let $C\subseteq E$ such that $E_2\subseteq C$. Then we can write $C=A+E_2$ with $A\subseteq E_1$. Then
\[ r(M/E_2;C/E_2)=r(M;C)-r(M;E_2)=r_1(A)+r_2(E_2)-r(M;E_2)=r_1(A). \]
It follows that $M|_{E_1}$ and $M/E_2$ are both isomorphic to $M_1$.
\end{proof}
As mentioned, the classical case of this last theorem is exactly the definition of the rank in the direct sum of matroids. This implies that Definition \ref{def-directsum1}, when applied to the classical case, completely determines the direct sum. We will see in the next subsection that this is not the case in the $q$-analogue. \\
We will close this section with some small results that show that Definition \ref{def-directsum1} implies the rank of all spaces of dimension and codimension $1$. Note that the next results only depend on Definition \ref{def-directsum1}, with the exception of Lemma \ref{allredontop}.
\begin{Proposition}\label{prop-noloops}
Let $M$ be a $q$-matroid satisfying the properties of Definition \ref{def-directsum1}. Suppose $M_1$ has loop space $L_1$ and $M_2$ has loop space $L_2$. Then the loop space of $M$ is $L_1\oplus L_2$.
\end{Proposition}
\begin{proof}
Since loops come in subspaces \cite[Lemma 11]{JP18}, $L_1\oplus L_2$ in $E$ only contains loops. We will show $M$ contains no other loops.
Suppose, towards a contraction, that there is a loop $\ell$ in $M$ that is not in $L_1\oplus L_2$. By assumption, $\ell$ is not in $E_1$ or in $E_2$. First we apply the semimodular inequality to $E_1$ and $\ell$:
\begin{align*}
r(E_1+\ell)+r(E_1\cap\ell) & \leq r(E_1)+r(\ell) \\
r(E_1+\ell)+0 & \leq r(E_1)+0
\end{align*}
hence $r(E_1+\ell)=r(E_1)$. Now we consider the $1$-dimensional space $x=(E_1+\ell)\cap E_2$. We claim that this space has rank $1$. Towards a contradiction, suppose $r(x)=0$ hence $x\subseteq L_2$. Then $r(\ell+x)=0$. Let $y$ be the $1$-dimensional space $(\ell+x)\cap E_1$. It has rank $0$ because it is in $\ell+x$, hence $y\subseteq L_1$. Now we have $\ell\subseteq y+x\subseteq L_1\oplus L_2$, which is a contradiction to $\ell\not\subseteq L_1\oplus L_2$. We conclude that $r(x)=1$. \\
Now we apply the semimodular inequality to $E_1+\ell$ and $E_2$.
\begin{align*}
r((E_1+\ell)+ E_2)+r((E_1+\ell)\cap E_2) & \leq r(E_1+\ell)+r(E_2) \\
r(E_1+E_2)+1 & \leq r(E_1)+r(E_2)
\end{align*}
and this is a contradiction. So there are no loops outside $L_1\oplus L_2$ in $M$.
\end{proof}
In particular, since we know exactly what are the loops of the direct sum, we know that all other $1$-dimensional spaces have rank $1$. Dually, we can derive a similar result for the codimension-$1$ spaces.
The next Lemma holds for all $q$-matroids. It is the dual of the statement that loops come in subspaces.
\begin{Lemma}\label{allredontop}
Let $M=(E,r)$ be a $q$-matroid. Let $H$ be the intersection of all codimension $1$ spaces in $E$ of rank $r(M)-1$. Then the spaces $A$ such that $H\subseteq A\subseteq E$ are exactly all the elements of $\mathcal{L}(E)$ such that $r(E)-r(A)=\dim E-\dim A$.
\end{Lemma}
\begin{proof}
Let $X$ be a codimension $1$ space such that $r(X)=r(E)-1$. Consider the dual $q$-matroid $M^*$. Then $r^*(X^\perp)=\dim X^\perp-r(E)+r(X)=1-r(E)+r(E)-1=0$. Hence $X^\perp$ is a loop in $M^*$. This implies that $H^\perp$ is the sum of all loops in $M^*$, hence it is the loop space of $M^*$ and there are no other loops in $M^*$. For any $A$ such that $H\subseteq A\subseteq E$ we have that $A^\perp\subseteq H^\perp$, so $A^\perp$ has rank $0$ in $M^*$. This implies
\begin{align*} r(E)-r(A) & =r(E)-(r^*(A^\perp)+\dim A-r^*(E)) \\
& =r(E)+r^*(E)-\dim A \\
& =\dim E-\dim A.
\end{align*}
Conversely, if $A$ is a subspace such that $r(E)-r(A)=\dim E-\dim A$, then $r^*(A)=0$ by the same calculation as above. This implies the only spaces $A$ for which it holds that $r(E)-r(A)=\dim E-\dim A$, are the spaces such that $H\subseteq A\subseteq E$.
\end{proof}
The next result is the dual of Proposition \ref{prop-noloops}.
\begin{Proposition}\label{prop-nocoloops}
Let $M$ be a $q$-matroid satisfying the properties of Definition \ref{def-directsum1}. Suppose $M_1$ and $M_2$ do not have any codimension $1$ spaces of rank $r(M_1)-1$ and $r(M_2)-1$, respectively. Then $M$ does not have any codimension $1$ spaces of rank $r(M)-1$.
\end{Proposition}
\begin{proof}
Suppose, towards a contraction, that there is a codimension $1$ space $H$ of rank $r(M)-1$ in $M$. By construction, $H$ does not contain $E_1$ or $E_2$. So $E_1\cap H$ is of codimension $1$ in $E_1$, and by construction it has rank $r(E_1)$. \\
Now we apply the semimodular inequality to $E_1$ and $H$.
\begin{align*}
r(E_1\cap H)+r(E_1+H) & \leq r(E_1)+r(H) \\
r(E_1)+r(M) & \leq r(E_1)+r(M)-1
\end{align*}
and this is a contradiction. So there are no codimension $1$ spaces in of rank $r(M)$ in $M$.
\end{proof}
\subsection{Non-uniqueness of the first definition}\label{subsec-NotUnique}
In this section we show by example that Definition \ref{def-directsum1} does not uniquely define the direct sum of $q$-matroids. \\
Let $E=\mathbb{F}_2^4$ and let $M_1=M_2=U_{1,2}$. We will attempt to construct the direct sum $M=M_1\oplus M_2$. We assume that it has the properties from Definition \ref{def-directsum1}. So the $q$-matroid $M$ has at least two circuits: $E_1$ and $E_2$. Our goal is to determine $M$ completely. Note that Theorem \ref{RangoSomma} defines the rank for all subspaces of $E$ that can be written as a direct sum of a subspace of $E_1$ and a subspace of $E_2$. \\
All $1$-dimensional spaces in $E$ have rank $1$ because of Proposition \ref{prop-noloops} and by Proposition \ref{prop-nocoloops} all $3$-dimensional spaces in $E$ have rank $2$. This means that what is left to do is to decide for all $2$-dimensional spaces if they have rank $1$ or rank $2$, that is, whether they are a circuit or an independent space. We use the next lemma for this.
\begin{Lemma}\label{CircuitIntersection}
Let $M_1=M_2=U_{1,2}$ and let $M$ satisfy the conditions of Definition \ref{def-directsum1}. Let $C_1$ and $C_2$ be circuits of $M$ of dimension $2$. Then $\dim(C_1\cap C_2)\neq1$.
\end{Lemma}
\begin{proof}
If $C_1=C_2$, the result is clear. So let $C_1\neq C_2$. Suppose, towards a contradiction, that $\dim(C_1\cap C_2)=1$. Then $\dim(C_1+C_2)=3$. Now apply semimodularity to $C_1$ and $C_2$.
\begin{align*}
r(C_1+C_2)+r(C_1\cap C_2) & \leq r(C_1)+r(C_2) \\
2+1 & \leq 1+1
\end{align*}
This is a contradiction, hence $\dim(C_1\cap C_2)\neq1$.
\end{proof}
This means that every $2$-dimensional space that intersects with either $E_1$ or $E_2$ is independent. A counting argument shows that there are only six $2$-dimensional spaces that have trivial intersection with both $E_1$ and $E_2$. Denote by $A$, $B$, $C$, $D$, $F$, $G$ the six $2$-spaces of unknown rank. The following is independent of a choice of basis for $E$, but for convenience, we can coordinatize the spaces in the following way:
\[ E_1=\left\langle \begin{array}{cccc} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \end{array}\right\rangle, \qquad
E_2=\left\langle \begin{array}{cccc} 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \end{array}\right\rangle,
\]
\[ A=\left\langle \begin{array}{cccc} 1 & 0 & 0 & 1 \\ 0 & 1 & 1 & 0 \end{array}\right\rangle, \qquad
B=\left\langle \begin{array}{cccc} 1 & 0 & 1 & 0 \\ 0 & 1 & 1 & 1 \end{array}\right\rangle, \qquad
C=\left\langle \begin{array}{cccc} 1 & 0 & 1 & 1 \\ 0 & 1 & 0 & 1 \end{array}\right\rangle,
\]
\[ D=\left\langle \begin{array}{cccc} 1 & 0 & 1 & 0 \\ 0 & 1 & 0 & 1 \end{array}\right\rangle, \qquad
F=\left\langle \begin{array}{cccc} 1 & 0 & 0 & 1 \\ 0 & 1 & 1 & 1 \end{array}\right\rangle, \qquad
G=\left\langle \begin{array}{cccc} 1 & 0 & 1 & 1 \\ 0 & 1 & 1 & 0 \end{array}\right\rangle.
\]
Note that $\{E_1,E_2,A,B,C\}$ and $\{E_1,E_2,D,F,G\}$ form a \textbf{spread} in $E$ (a spread is a set of subspaces of the same dimension such that every $1$-dimensional space is in exactly one spread element \cite{segre}). The two spreads are isomorphic, in the sense that a change of basis of $E$ maps one to the other. Since $A$, $B$, and $C$ all intersect $D$, $F$, and $G$, deciding that at least one of $\{A,B,C\}$ is a circuit means $\{D,F,G\}$ are all independent, and vice versa. So, without loss of generality, we have completely determined the matroid $M$ if we have found which of the three $2$-dimensional spaces $A$, $B$, and $C$ are circuits and this implies that $D,F,G$ are all independent.
\begin{Lemma}\label{3ContainsSpread}
Every $3$-dimensional space $T$ contains an element of the spread \[\{E_1,E_2,A,B,C\}.\]
\end{Lemma}
\begin{proof}
This can be done via a counting argument and the pigeon hole principle. $T$ intersects all spread elements in dimension at least $1$, since $\dim E=4$. All $1$-dimensional subspaces of $E$ are by definition contained in exactly one spread element. There are five spread elements and seven $1$-dimensional subspaces in $T$, so there has to be a spread element that contains at least two $1$-dimensional subspaces of $T$, and hence intersects it in dimension $2$. But that means the whole spread element is contained in $T$.
\end{proof}
If $A$, $B$, and $C$ are all circuits, there are no other circuits because of Lemma \ref{CircuitIntersection} and axiom (C2). If not all of $A$, $B$, and $C$ are circuits, there have to be circuits of dimension $3$. These will be all the $3$-dimensional spaces that do not contain a circuit of dimension $2$. These circuits do, however, contain an element of the spread, by Lemma \ref{3ContainsSpread}. \\
We check the circuit axioms for this construction. (C1) and (C2) are clear. For (C3), notice that the sum of every pair of circuits is equal to $E$. Thus it is sufficient to show that every $3$-space contains a circuit. This is true by construction: a 3-space either contains a $2$-dimensional circuit, or it is a circuit itself. \\
We have seen that no matter what we decide for the independence of $A$, $B$, and $C$, we always get a $q$-matroid. This means that the properties of the direct sum as in Definition \ref{def-directsum1} are not enough to determine the direct sum completely: we can make a $q$-matroid with $2$, $3$, $4$ or $5$ circuits that all satisfy this definition.
\subsection{A small non-representable $q$-matroid}
As a byproduct of the example in the previous section, we find a non-representable $q$-matroid in dimension $4$. The existence of non-representable $q$-(poly)matroids was established and discussed in \cite{gluesing2021qpolyindep}. However, the example here is not included in their construction and it is also the smallest possible non-representable $q$-matroid. In the classical case, the smallest non-representable matroid is of size $8$ and rank $4$ (the V\'amos matroid). For $q$-matroids it is smaller: dimension $4$ and rank $2$.
\begin{Proposition}\label{MatrixAssoc}
Let $M$ be a representable $q$-matroid over $\mathbb{F}_2$ of rank $2$ and dimension $4$, with (at least) two circuits of dimension $2$ and no loops. Then the matrix representing $M$ has the shape
$$G:=\left[\begin{array}{cccc}
1 & \alpha & 0 & 0 \\
0 & 0 & 1 & \beta
\end{array}\right],$$
with $\alpha,\beta \in \mathbb{F}_{2^m}\setminus\mathbb{F}_2$, $m>1$.
\end{Proposition}
\begin{proof}
Since $M$ has rank $2$ and dimension $4$, the shape of the matrix is
\[ G:=\left[\begin{array}{cccc}
x_1 & x_2 & x_3 & x_4 \\
y_1 & y_2 & y_3 & y_4
\end{array}\right], \]
with all entries in $\mathbb{F}_{2^m}$.
Without loss of generality we apply row reduction and get $x_1=1, y_1=0$.
Since there are no loops, the columns of $G$ cannot be all zero.
Consider now the two circuits. They are, without loss of generality, $E_1:=\langle(1,0,0,0),(0,1,0,0) \rangle$ and
$E_2:=\langle(0,0,1,0),(0,0,0,1) \rangle$.
We have for $E_1$
$$\left[\begin{array}{cccc}
1 & x_2 & x_3 & x_4 \\
0 & y_2 & y_3 & y_4
\end{array}\right] \cdot \left[\begin{array}{cc}
1 & 0 \\
0 & 1\\
0 & 0 \\
0 & 0
\end{array}\right] = \left[\begin{array}{cc}
1 & x_2 \\
0 & y_2
\end{array}\right],$$
whose rank must be one, leading to $y_2=0$.
Similarly, for $E_2$ we have
$$\left[\begin{array}{cccc}
1 & x_2 & x_3 & x_4 \\
0 & y_2 & y_3 & y_4
\end{array}\right] \cdot \left[\begin{array}{cc}
0 & 0 \\
0 & 0\\
1 & 0 \\
0 & 1
\end{array}\right] = \left[\begin{array}{cc}
x_3 & x_4 \\
y_3 & y_4
\end{array}\right],$$
whose rank must be one, leading to the fact that $(x_3,x_4)$ and $(y_3,y_4)$ are scalar multiples. By row reduction we can conclude that $x_3=x_4=0$ and the absence of loops implies that $x_2,y_3,y_4\neq 0$. We can finally set, again by row reduction, $y_3=1$.
Note that column operations over the ground field $\mathbb{F}_2$ give an isomorphic $q$-matroid, so we have that $x_2$ and $y_4$ are elements of $\mathbb{F}_{2^m}$ but not of $\mathbb{F}_2$.
\end{proof}
\begin{Theorem}\label{NonReprDim4}
If the $q$-matroid from Section \ref{subsec-NotUnique} is representable, it cannot have $4$ circuits of dimension $2$. This gives an example of a non-representable $q$-matroid.
\end{Theorem}
\begin{proof}
We know the representation is of the form
\[ \left[\begin{array}{cccc}
1 & \alpha & 0 & 0 \\
0 & 0 & 1 & \beta
\end{array}\right], \]
with $\alpha,\beta \in \mathbb{F}_{2^m}\setminus\mathbb{F}_2$ by Proposition \ref{MatrixAssoc}.
Consider
\[ \left[\begin{array}{cccc}
1 & \alpha & 0 & 0 \\
0 & 0 & 1 & \beta
\end{array}\right] \cdot
\left[\begin{array}{cc}
a_0 & b_0 \\
a_1 & b_1 \\
a_2 & b_2 \\
a_3 & b_3
\end{array}\right] =
\left[\begin{array}{cc}
a_0+a_1\alpha & b_0+b_1\alpha \\
a_2+a_3\beta & b_2+b_3\beta
\end{array}\right].
\]
In order to have a circuit of dimension $2$, the determinant of this $2\times 2$ matrix should be zero. In particular, we need to have proportional columns.
This automatically tells us that $a_0=a_1=0$ implies $b_0=b_1=0$, and that $a_2=a_3=0$ implies $b_2=b_3=0$.
These two cases correspond to the two circuits $E_1$ and $E_2$ from Proposition \ref{MatrixAssoc}. Using the representations from Section \ref{subsec-NotUnique}, we found the determinants of all $2$-dimensional spaces $A,B,C,D,F,G$. They are the following:
\begin{itemize}
\item[$A$:] $\alpha \beta+1$
\item[$B$:] $\alpha+\beta +1$
\item[$C$:] $\alpha \beta+\beta+ \alpha$
\item[$D$:] $\alpha+\beta$
\item[$F$:] $\alpha \beta+\beta+1$
\item[$G$:] $\alpha\beta +\alpha+1$
\end{itemize}
Now, it is easy to see that if $A$ and $B$ vanish, then $C$ vanishes as well, and the same goes for $D$, $F$ and $G$. We already saw that circuits appear either in $\{A,B,C\}$ or $\{D,E,F\}$ and the other spaces are independent.
Therefore, the alternatives we have are:
\begin{itemize}
\item none of the six determinants above vanishes, so $E_1$ and $E_2$ are the only circuits of dimension $2$;
\item one determinant vanishes, so we have three circuits of dimension $2$;
\item the determinants of all the elements in a spread vanish, leading to five circuits of dimension $2$ (that are all circuits in the $q$-matroid). \qedhere
\end{itemize}
\end{proof}
\begin{Corollary}
The $q$-matroid over $\mathbb{F}_2$ of rank $2$ and dimension $4$ with four circuits, as described in Theorem \ref{NonReprDim4}, is the smallest non-representable $q$-matroid.
\end{Corollary}
\begin{proof}
See the appendix for a list of all $q$-matroids with a ground space of dimension at most $3$. All of these are representable. Hence, the $q$-matroid from Theorem \ref{NonReprDim4} is the smallest non-representable $q$-matroid.
\end{proof}
\begin{example}\label{SomeExamplesRepresentables}
Consider the finite field $\mathbb{F}_{64}$ and a primitive element $\alpha$ such that $\alpha^6=\alpha^4+\alpha^3+\alpha+1$. We give some examples of $q$-matroids of dimension $4$ and rank $2$, arising from our construction in Section \ref{subsec-NotUnique}, distinguishing them by the number of their circuits:
\begin{itemize}
\item $\left[\begin{array}{cccc}
1 & \alpha^2 & 0 & 0 \\
0 & 0 & 1 & \alpha^7
\end{array}\right]$ represents a $q$-matroid with two $2$-dimensional circuits;
\item $\left[\begin{array}{cccc}
1 & \alpha & 0 & 0 \\
0 & 0 & 1 & \alpha^8
\end{array}\right]$ represents a $q$-matroid with three $2$-dimensional circuits;
\item $\left[\begin{array}{cccc}
1 & \alpha^{42} & 0 & 0 \\
0 & 0 & 1 & \alpha^{21}
\end{array}\right]$ represents a $q$-matroid with five $2$-dimensional circuits.
\end{itemize}
\end{example}
\begin{Remark}
Example \ref{SomeExamplesRepresentables} above also tells us something about the direct sum of two representable $q$-matroids. Suppose we have two representable $q$-matroids $M_1$ and $M_2$ over the same field $K$. Suppose these $q$-matroids are representable by matrices $G_1$ and $G_2$ over an extension field $L$ of $K$. One would expect the direct sum $M_1\oplus M_2$ to be representable by
\[ G=\left[\begin{array}{cc} G_1 & 0 \\ 0 & G_2 \end{array}\right]. \]
However, this construction is not uniquely defined, in the sense that it depends on the representations of $M_1$ and $M_2$. Over $\mathbb{F}_{64}$, we can represent the $q$-matroid $U_{1,2}$ as $\left[\begin{array}{cc} 1 & \beta \end{array}\right]$ for any $\beta\in\mathbb{F}_{64}\backslash\mathbb{F}_2$. Then the $q$-matroids of Example \ref{SomeExamplesRepresentables} are all of the form of the matrix $G$ above, so we would expect all of them to represent $U_{1,2}\oplus U_{1,2}$. However, these are not isomorphic $q$-matroids.
\end{Remark}
\section{Submodular functions and associated $q$-matroids}
Our goal is to define the direct sum of $q$-matroids in terms of matroid union. Before we can define that, we need some background on integer-valued increasing submodular functions. A function $f$ on the subspaces of $E$ is submodular if the following hold for all $A,B\subseteq E$:
\[ f(A+B)+f(A\cap B) \leq f(A)+f(B). \]
Such function can be viewed as the rank function of a $q$-polymatroid, and we refer to \cite{gluesing2021qpolyindep} for an extension of, and some overlap with, the results presented here.
The following proposition and corollary are the $q$-analogues of Proposition 11.1.1 and Corollary 11.1.2 in \cite{oxley}.
\begin{Proposition}\label{CircuitsOfMf}
Let $f$ be an integer-valued increasing submodular function on the subspaces of a finite-dimensional vector space $E$. Let
\[ \mathcal{C}(f)=\{C\subseteq E :C\text{ is non-trivial and minimal w.r.t. inclusion s.t. }f(C)<\dim(C)\}. \]
Then $\mathcal{C}(f)$ is the collection of circuits of a $q$-matroid $M(f)=(E,\mathcal{C}(f))$.
\end{Proposition}
\begin{proof}
We prove that $\mathcal{C}(f)$ satisfies the circuit axioms from Definition \ref{circuit-axioms}. The axiom (C1) holds by definition and, by minimality, we have (C2).\\
Let us now prove (C3). Let $C_1 \neq C_2$ be two elements of $\mathcal{C}(f)$ and let $X$ be a codimension $1$ space containing neither $C_1$ nor $C_2$ (otherwise the assertion holds void).
We have $C_i \cap X \subsetneq C_i$ for $i=1,2$. Therefore,
$C_i \cap X \notin \mathcal{C}(f)$ by (C2) and so $\dim(C_i \cap X) \leq f(C_i \cap X)$ for $i=1,2$. Since $f$ is increasing,
\[
\dim(C_i \cap X) \leq f(C_i \cap X) \leq f(C_i) < \dim(C_i)
\]
and $\dim(C_i\cap X)=\dim(C_i)-1$, we have $\dim(C_i)-1 =f(C_i)$.
Since $f$ is increasing, is suffices to show $f((C_1+C_2)\cap X)<\dim((C_1+C_2)\cap X)$, because then $(C_1+C_2)\cap X$ contains a circuit. Now $f$ is increasing and submodular, so
\[
f((C_1+C_2)\cap X)\leq f(C_1+C_2)
\leq f(C_1)+f(C_2)-f(C_1 \cap C_2)\]
and because $C_1 \cap C_2 \subsetneq C_i$ for $i=1,2$, by minimality, $f(C_1 \cap C_2) \geq \dim(C_1 \cap C_2)$. Finally,
\begin{align*}
f((C_1+C_2)\cap X) & \leq f(C_1)+f(C_2)-f(C_1 \cap C_2) \\
& = \dim(C_1)+\dim(C_2)-2-f(C_1 \cap C_2) \\
& \leq \dim(C_1+C_2)-2 \\
& =\dim((C_1+C_2)\cap X)-1.
\end{align*}
This shows that $M(f)$ is a $q$-matroid defined by its circuits $\mathcal{C}(f)$.
\end{proof}
The following is a direct result of the definition of $\mathcal{C}(f)$ and the fact that every proper subspace of a circuit is independent.
\begin{Corollary}\label{IndepInMf}
A subspace $I\subseteq E$ is independent in $M(f)$ if and only if $\dim(I')\leq f(I')$ for all nontrivial subspaces $I'$ of $I$.
\end{Corollary}
The next theorem is the $q$-analogue of \cite[Chapter 8.1 Theorem 2]{welsh1976matroid}. We point out that Theorem \ref{thm-FunctionToMatroid} and Propsition \ref{IndepOfPoly} ware already proven in \cite[Theorem 3.9]{gluesing2021qpolyindep}, but with the minimum taken over the subspaces of $A$ instead of all spaces in $E$. See also Remark \ref{r-incl-only}.
\begin{Theorem}\label{thm-FunctionToMatroid}
Let $f$ be a non-negative integer-valued increasing submodular function on the subspaces of $E$ with $f(0)=0$. Then
\[ r(A)=\min_{X\subseteq E}\{f(X)+\dim(A)-\dim(A\cap X)\} \]
is the rank function of a $q$-matroid.
\end{Theorem}
\begin{proof}
We will prove that the function $r$ satisfies the rank axioms. It is clear that $r$ is integer-valued. It is non-negative because both $f(A)$ and $\dim(A)-\dim(A\cap X)$ are non-negative. By taking $X=\{0\}$ in the definition, we get $f(\{0\})+\dim(A)-\dim(\{0\})=\dim(A)$ and therefore $r(A)\leq\dim(A)$. This proves (r1). \\
In order to prove (r2), let $A\subseteq B\subseteq E$. Then for any $X\subseteq E$, we have that $\dim(B)-\dim(A)\geq\dim(B\cap X)-\dim(A\cap X)$. It follows that
\[ f(X)+\dim(A)-\dim(A\cap X) \leq f(X)+\dim(B)-\dim(B\cap X) \]
for all $X\subseteq E$ and thus $r(A)\leq r(B)$. The proof of (r3) is rather technical, but essentially a lot of rewriting. We first claim that
\begin{align*}
& \dim(A)-\dim(A\cap X)+\dim(B)-\dim(B\cap Y) \geq \\
& \quad \dim(A+B)-\dim((A+B)\cap(X+Y))+\dim(A\cap B)-\dim((A\cap B)\cap(X\cap Y)).
\end{align*}
This statement will be used later on in the proof. By using that
\[ \dim(A)+\dim(B)=\dim(A+B)+\dim(A\cap B)\]
and multiplying by $-1$ we can rewrite our claim as
\[ \dim(A\cap X)+\dim(B\cap Y) \leq \dim((A+B)\cap(X+Y))+\dim((A\cap B)\cap(X\cap Y)). \]
Using the modular equality again, we get
\begin{align*}
\dim((A\cap B)\cap(X\cap Y)) &= \dim((A\cap X)\cap(B\cap Y)) \\
& = \dim(A\cap X)+\dim(B\cap Y)-\dim((A\cap X)+(B\cap Y))
\end{align*}
and thus our claim is equivalent to
\[ \dim((A\cap X)+(B\cap Y))\leq\dim((A+B)\cap(X+Y)).\]
To prove this, it is enough to show the inclusion of vector spaces $(A\cap X)+(B\cap A)\subseteq(A+B)\cap(X+Y)$. Let $\mathbf{a}\in A\cap X$ and $\mathbf{b}\in B\cap Y$ be two nonzero vectors. Then $\mathbf{a}+\mathbf{b}\in(A\cap X)+(B\cap Y)$. We prove that $\mathbf{a}+\mathbf{b}\in(A+B)\cap(X+Y)$. Because $\mathbf{a}\in A$ also $\mathbf{a}\in A+B$ and because $\mathbf{a}\in X$ also $\mathbf{a}\in X+Y$. So $\mathbf{a}\in(A+B)\cap(X+Y)$. By a similar reasoning, $\mathbf{b}\in(A+B)\cap(X+Y)$. So $\mathbf{a}+\mathbf{b}\in(A+B)\cap(X+Y)$ as was to be shown. This finishes the proof of our claim. \\
We can now get back to proving axiom (r3). In the third step we use the claim together with the submodularity of $f$. In the fourth step we set $U=X+Y$ and $V=X\cap Y$. This will not produce all possible $U,V\subseteq E$, so the minimum is at least as big as the minimum over all $U,V\subseteq E$.
\begin{align*}
\lefteqn{r(A)+r(B)} \\
& = \min_{X\subseteq E}\{f(X)+\dim(A)-\dim(A\cap X)\} + \min_{Y\subseteq E}\{f(Y)+\dim(B)-\dim(B\cap Y)\} \\
& = \min_{X,Y\subseteq E}\{f(X)+f(Y)+\dim(A)-\dim(A\cap X)+\dim(B)-\dim(B\cap Y)\} \\
& \geq \min_{X,Y\subseteq E}\{ f(X+Y)+f(X\cap Y)+\dim(A+B)-\dim((A+B)\cap(X+Y)) \\
& \qquad +\dim(A\cap B)-\dim((A\cap B)\cap(X\cap Y))\} \\
& \geq \min_{U,V\subseteq E}\{f(U)+f(V)+\dim(A+B)-\dim((A+B)\cap U) \\
& \qquad +\dim(A\cap B)-\dim((A\cap B)\cap V)\} \\
& = \min_{U\subseteq E}\{f(U)+\dim(A+B)-\dim((A+B)\cap U)\} \\
& \qquad + \min_{V\subseteq E}\{f(V)+\dim(A\cap B)-\dim((A\cap B)\cap V)\} \\
& = r(A+B)+r(A\cap B).
\end{align*}
So the rank function $r$ satisfies all rank axioms (r1),(r2),(r3).
\end{proof}
\begin{Remark}\label{r-incl-only}
Note that the minimum in Theorem \ref{thm-FunctionToMatroid} is taken over all subspaces of $E$. This is convenient for some of the proofs, but not strictly necessary. Let $X\subseteq E$ and let $X'=A\cap X$. Then
\[ f(X')+\dim(A)-\dim(A\cap X') \leq f(X)+\dim(A)-\dim(A\cap X) \]
because $f(X')\leq f(X)$ and $\dim(A\cap X')=\dim(A\cap X)=\dim(X')$. This means that the minimum over all subspaces $X\subseteq E$ is the same as the minimum taken only over the subspaces $X'\subseteq A$. This makes calculating the rank function a lot faster in practice.
\end{Remark}
The next proposition shows that the $q$-matroids from Corollary \ref{IndepInMf} and Theorem \ref{thm-FunctionToMatroid} are the same.
\begin{Proposition}\label{IndepOfPoly}
Let $f$ be a non-negative integer-valued increasing submodular function with $f(0)=0$. Let $M(f)$ be the corresponding $q$-matroid as defined in Corollary \ref{IndepInMf} with independent spaces $\mathcal{I}$. Let $r$ be the rank function as defined in Theorem \ref{thm-FunctionToMatroid}. Then both give the same $q$-matroid because $r(I)=\dim(I)$ for all $I\in\mathcal{I}$.
\end{Proposition}
\begin{proof}
We have to prove that $r(I)=\dim(I)$ iff $\dim(I')\leq f(I')$ for all nontrivial subspace $I'$ of $I$. Note that since $f(0)=0$, this holds for all subspaces $I'$ of $I$, also the trivial one. (Note that Proposition \ref{CircuitsOfMf} does not require $f(0)=0$, but Theorem \ref{thm-FunctionToMatroid} does.)
From the remark before we have that
\[ r(I)=\min_{I'\subseteq I}\{f(I')+\dim(I)-\dim(I')\}. \]
As already proven in Theorem \ref{thm-FunctionToMatroid}, $r(I)\leq\dim(I)$. For the other inequality, the following are equivalent:
\begin{align*}
I\in\mathcal{I}(M(f)) &\Leftrightarrow f(I')\geq\dim(I') \text{ for all }I'\subseteq I \\
&\Leftrightarrow f(I')+\dim(I)-\dim(I')\geq\dim(I) \text{ for all }I'\subseteq I \\
&\Leftrightarrow r(I)\geq\dim(I)
\end{align*}
This proves that $r(I)=\dim(I)$.
\end{proof}
\section{Matroid union}\label{MatUn}
In this section we define the $q$-analogue of matroid union by means of its rank function and we show what are the independent spaces.
\begin{Definition}\label{DefUnione}
Let $M_1$ and $M_2$ be two $q$-matroids on the same ground space $E$, with rank functions $r_1$ and $r_2$, respectively. Then the \textbf{matroid union} $M_1\vee M_2$ is defined by the rank function
\[ r(A)=\min_{X\subseteq A}\{r_1(X)+r_2(X)+\dim A-\dim X\} \]
\end{Definition}
\begin{Theorem}\label{Union_q-Matr}
Let $M_1$ and $M_2$ be two $q$-matroids on the same ground space $E$, with rank functions $r_1$ and $r_2$, respectively. Then the matroid union $M_1\vee M_2$ is a $q$-matroid.
\end{Theorem}
\begin{proof}
For all $A\subseteq E$, define a function $f(A)=r_1(A)+r_2(A)$. We claim that $f$ is a non-negative integer-valued submodular function on the subspaces of $E$ with $f(0)=0$. \\
Note that $r_1$ and $r_2$ are non-negative integer valued submodular functions on the subspaces of $E$ with $r_1(\{0\})=r_2(\{0\})=0$. It follows directly that $f$ is a non-negative integer-valued function on the subspaces of $E$. It is increasing, because for all $A\subseteq B\subseteq E$ we have
\[ f(A)= r_1(A)+r_2(A)\leq r_1(B)+r_2(B)=f(B). \]
Furthermore, $f$ is submodular, because for all $A,B\subseteq E$ we have
\begin{align*}
f(A+B)+f(A\cap B) & = r_1(A+B)+r_2(A+B)+r_1(A\cap B)+r_2(A\cap B) \\
& \leq r_1(A)+r_1(B)+r_2(A)+r_2(B) \\
& = f(A)+f(B).
\end{align*}
Now we apply Theorem \ref{thm-FunctionToMatroid} and Remark \ref{r-incl-only} to the function $f$: this shows that the function $r$ of Definition \ref{DefUnione} is indeed the rank function of a $q$-matroid $(E,r)$.
\end{proof}
We gather some important properties of the matroid union.
\begin{Remark}\label{dependsOnCoordinates}
The matroid union is not always invariant under coordinatisation. That is, if $\varphi:\mathcal{L}(E)\longrightarrow\mathcal{L}(E)$ is a lattice isomorphism, then it is direct from the definition that $\varphi(M_1)\vee\varphi(M_2)=\varphi(M_1\vee M_2)$. However, $M_1\vee M_2$ is not necessarily isomorphic to $\varphi(M_1)\vee M_2$. We illustrate this with a small example. \\
Let $M_1$ and $M_2$ both be isomorphic to the mixed diamond, see \ref{qcd2}. That is: $\dim(E)=2$, $r(E)=1$ and $r(A)=1$ for all $1$-dimensional spaces except one loop. Suppose the loop is at the same coordinates for both $M_1$ and $M_2$, call this subspace $\ell$. Then the rank of $M_1\vee M_2$ is one, as we will show. Consider all $X\subseteq E$. If $\dim(X)=0$ or $\dim(X)=2$ then the expression inside the minimum of Definition \ref{DefUnione} is equal to $2$. If $\dim(X)=1$ we have to distinguish between $\ell$ and the any other space. If $X=\ell$ the expression is $0+0+2-1=1$, otherwise it is $1+1+2-1=3$. Therefore, $r(E)=1$. \\
Consider now the case where the loop of $M_1$ is $\ell_1$ and the loop of $M_2$ is $\ell_2$, with $\ell_1\neq\ell_2$. Then the calculations are as before for $\dim(X)=0$ or $\dim(X)=2$. For $\dim(X)=1$ and $X\neq\ell_1,\ell_2$ we get $1+1+2-1=3$. If $X=\ell_1$ we get $0+1+2-1=2$, and similarly for $X=\ell_2$ we get $1+0+2-1=2$. So $r(E)=2$. \\
This example illustrates that we have to be careful to define $M_1$ and $M_2$ precisely, not just up to isomorphism.
\end{Remark}
We prove two straightforward lemmas concerning the matroid union.
\begin{Lemma}\label{AddGreenDoesNothing}
Let $M_1$ and $M_2$ be two $q$-matroids on the same ground space $E$ and let $r(M_2)=0$. Then $M_1\vee M_2=M_1$ and in particular, $M\vee U_{0,n}=M$.
\end{Lemma}
\begin{proof}
The rank function of the matroid union is equal to
\[ r(A)=\min_{X\subseteq A}\{r_1(X)+0+\dim A-\dim X\}. \]
By local semimodularity (Lemma \ref{localSemimodularity}), $r_1(X)-\dim(X)+\dim(A)\geq r_1(A)$ for all $X\subseteq A$ and equality is attained for $X=A$. Hence $r(A)=r_1(A)$ and $M_1\vee M_2=M_1$.
\end{proof}
\begin{Lemma}\label{IndipUnionFromM1M2}
Let $M_1$ and $M_2$ be $q$-matroids on the same ground space $E$. Let $I$ be independent in both $M_1$ and $M_2$. Then $I$ is independent in $M_1\vee M_2$.
\end{Lemma}
\begin{proof}
We have that $r(M_1;I)=r(M_2;I)=\dim I$ by definition. Also, all subspaces of $I$ are independent. This means that
\begin{align*}
r(M_1\vee M_2;I) &= \min_{X\subseteq I}\{r(M_1;X)+r(M_2;X)+\dim I-\dim X\} \\
&= \min_{X\subseteq I}\{\dim X+\dim X+\dim I-\dim X\} \\
&= \dim I.
\end{align*}
We conclude that $I$ is independent in $M_1\vee M_2$.
\end{proof}
The independent spaces of the matroid union can be found in the following way.
\begin{Theorem}\label{indipUnion}
Let $M_1=(E,\mathcal{I}_1)$ and $M_2=(E,\mathcal{I}_2)$ be two $q$-matroids defined by their independent spaces. Then $I\subseteq E$ is an independent space of the matroid union $M_1\vee M_2$ if and only if for all $J\subseteq I$ there exist $I_1\in\mathcal{I}_1$ and $I_2\in\mathcal{I}_2$ such that $J=I_1\oplus I_2$. We notate the collection of independent spaces of $M_1\vee M_2$ by $\mathcal{I}$.
\end{Theorem}
\begin{proof}
Let $f(A)=r_1(A)+r_2(A)$, as in the proof of Theorem \ref{Union_q-Matr}. According to Corollary \ref{IndepInMf} and Proposition \ref{IndepOfPoly}, we know that the independent spaces of the matroid union are exactly those $I\subseteq E$ such that for all nontrivial subspaces $J\subseteq I$ we have $\dim J\leq f(J)$. \\
First, let $I\subseteq E$ such that all nontrivial subspace $J\subseteq I$ can be written as $J=I_1\oplus I_2$ with $I_1\in\mathcal{I}_1$ and $I_2\in\mathcal{I}_2$. We need to prove that $I$ is independent in $M_1\vee M_2$, that is, for all $J\subseteq I$ it holds that $\dim J\leq f(J)$. This follows from
\[ \dim(J)=\dim(I_1)+\dim(I_2) = r_1(I_1)+r_2(I_2) \leq r_1(J)+r_2(J)=f(J), \] the inequality coming from the axiom (r2). \\
For the other implication, let $I$ be independent in $M_1\vee M_2$. We need to show that we can write all $I$ as $I=I_1\oplus I_2$ with $I_1\in\mathcal{I}_1$ and $I_2\in\mathcal{I}_2$. Because all subspaces of an independent space are independent, this proves the statement. \\
First, note that if $I$ is independent in $M_1\vee M_2$, then its rank is equal to its dimension: $\dim I=r(I)=\min_{X\subseteq I}\{r_1(X)+r_2(X)+\dim I-\dim X\}$. Therefore, for each $X \subseteq I$ it holds
\[ r_1(X)+r_2(X)-\dim X\geq 0. \]
We will proceed by mathematical induction on the dimension of $I$. If $\dim I=0$ then $I=\{0\}$ and we can write $\{0\}=\{0\}\oplus\{0\}$ where $\{0\}\in\mathcal{I}_1$ and $\{0\}\in\mathcal{I}_2$. If $\dim I=1$, then $r_1(I)+r_2(I)\geq1$ so $I$ is independent in at least one of $M_1$ and $M_2$. Without loss of generality, let $I$ be independent in $M_1$, then we can write $I=I\oplus\{0\}$ with $I\in\mathcal{I}_1$ and $\{0\}\in\mathcal{I}_2$. \\
Let $I$ be independent in $M_1\vee M_2$ with $\dim I=h+1$. Let $J\subseteq I$ with $\dim J=h$ and $J=J_1\oplus J_2$ for some $J_1\in\mathcal{I}_1$ and $J_2\in\mathcal{I}_2$. We will show that there is a $1$-dimensional $x\subseteq I-J$ such that either $J_1\oplus x\in\mathcal{I}_1$ or $J_2\oplus x\in\mathcal{I}_2$. \\
Assume that for all $x\subseteq I-J$ the space $J_1\oplus x$ is dependent in $M_1$. This implies that $r_1(J_1\oplus x)=r_1(J_1)$ and by \cite[Prop. 7]{JP18} we have $r_1(I)=r(J_1)=\dim J_1$. Since $I\subseteq I$ we have the following equivalent statements:
\begin{align*}
r_1(I)+r_2(I)-\dim I &\geq 0 \\
\dim J_1+r_2(I)-(\dim J_1+\dim J_2+1) &\geq 0 \\
r_2(I) &\geq \dim J_2+1
\end{align*}
and hence $r_2(I)\geq 1$. This implies that not all $x\subseteq I-J$ can be loops in $M_2$, because if they were, by semimodularity this would imply $r_2(I)=0$. So assume $x\subseteq I-J$ with $r_2(x)=1$. Then, by applying semimodularity in $M_2$ again, we get
\begin{align*}
r_2(J_2+x)+r_2(J_2\cap x) &\geq r_2(J_2)+r_2(x) \\
r_2(J_2\oplus x)+0 &\geq \dim J_2+1
\end{align*}
and it follows that $J_2\oplus x$ is independent in $M_2$. This gives the decomposition $I=J_1\oplus (J_2\oplus x)$ with $J_1\in\mathcal{I}_1$ and $J_2\oplus x\in\mathcal{I}_2$.
\end{proof}
\begin{Remark}
We want to point out that Theorem \ref{indipUnion} is indeed a $q$-analogue of the classical case. There the independent sets of the matroid union are defined by
\[ \mathcal{I}=\{I_1\cup I_2:I_1\in\mathcal{I}_1,I_2\in\mathcal{I}_2\}. \]
First of all, note that the union can be rewritten as a disjoint union. Let $I=J_1\cup J_2$ with $J_1\in\mathcal{I}_1$ and $J_2\in\mathcal{I}_2$. Take $I_1=J_1$ and $I_2=J_2-J_1$, then $I=I_1\sqcup I_2$. This procedure does not create a unique $I_1$ and $I_2$, there is a lot of choice involved. However, it does imply that every independent set $I$ of the matroid union is of the form $I=I_1\sqcup I_2$, and conversely, every $I=I_1\sqcup I_2$ is independent in the matroid union.
In the classical case, if $I=I_1\sqcup I_2$ then for all $\bar{J}\subseteq I$ we can write directly $\bar{J}=\bar{J}_1\sqcup \bar{J}_2$ with $\bar{J}_1=\bar{J}\cap I_1$ and $\bar{J}_2=\bar{J}\cap I_2$. Since $\bar{J}_1\subseteq I_1$ and $\bar{J}_2\subseteq I_2$, these are independent. This reasoning does not hold in the $q$-analogue (see also the Introduction), which is why we specifically have to state it in the definition. For a counterexample, see the example in Remark \ref{dependsOnCoordinates}: if $\ell_1=\ell_2=\ell$ we can write $E=I_1\oplus I_2$ for some $1$-dimensional $I_1$ and $I_2$ that are not equal to $\ell$, but we cannot write $\ell$ as the direct sum of independent spaces of $M_1$ and $M_2$.
\end{Remark}
\section{Matroid intersection and duality} \label{IntDual}
We complete our study of the matroid union for $q$-matroids by defining the dual operation, that is matroid intersection. We follow \cite[p.123]{welsh1976matroid}.
\begin{Definition}
Let $M_1$ and $M_2$ be $q$-matroids on the same ground space $E$ with collection of spanning spaces $\mathcal{S}(M_1)$ and $\mathcal{S}(M_2)$. Define the \textbf{$q$-matroid intersection} of $M_1$ and $M_2$ by its spanning spaces:
\[ \mathcal{S}(M_1\wedge M_2)=\{S_1\cap S_2: S_1\in\mathcal{S}(M_1), S_2\in\mathcal{S}(M_2)\}. \]
\end{Definition}
We need to prove that it is a $q$-matroid. This can be done by checking the axioms for spanning spaces, but we can also do this by proving a more general result:
\begin{Theorem}\label{thm-intersection}
Let $M_1$ and $M_2$ be $q$-matroids on the same ground space $E$. Then
\[ M_1\wedge M_2=(M_1^*\vee M_2^*)^*. \]
\end{Theorem}
\begin{proof}
From \cite{BCR21} we know that the orthogonal complements of the spanning spaces of a $q$-matroid $M$ are the independent spaces of the dual $q$-matroid $M^*$. So we have to prove that the orthogonal complements of $\mathcal{S}(M_1\wedge M_2)$ are the independent spaces of $M_1^*\vee M_2^*$. \\
First, start with $S\in\mathcal{S}(M_1\wedge M_2)$. Then we can write $S=S_1\cap S_2$ for $S_1\in\mathcal{S}(M_1)$ and $S_2\in\mathcal{S}(M_2)$. Let $S'\supseteq S$ be a superspace of $S$. Then we can write $S'=T_1\cap T_2$ for $T_1\supseteq S_1$ and $T_2\supseteq S_2$. Note that $T_1$ and $T_2$ are also spanning spaces of $M_1$ and $M_2$, respectively. Now we take orthogonal complements.
The orthogonal complement is $S^\perp=S_1^\perp+S_2^\perp$. Now we can write $S_1^\perp=I_1^*$ with $I_1^*$ independent in $M_1^*$, and similarly, $S_2^\perp=I_2^*$ is independent in $M_2^*$. We need to prove that $I_1^*+I_2^*$ is independent in $M_1^*\vee M_2^*$, that is, we have to show that all $I'\subseteq I_1^*+I_2^*$ can be written as $J_1\oplus J_2$ with $J_1$ independent in $M_1^*$ and $J_2$ independent in $M_2^*$. Note that all $I'\subseteq I_1^*+I_2^*$ can be written as $I'=(S')^\perp$, with $S'=T_1\cap T_2$ as above. If we take orthogonal complements of $T_1$ and $T_2$, we get independent spaces of $M_1^*$ and $M_2^*$. So we can write $I'=J_1\oplus J_2$. (We can always make the sum a direct sum by taking a subspace of $J_1$ if necessary.) We conclude that $I_1^*+I_2^*$ is independent in $M_1^*\vee M_2^*$.
For the opposite inclusion, start with an independent space $I$ of $M_1^*\vee M_2^*$. Then by Theorem \ref{indipUnion} we can write $I=I_1+I_2$ with $I_1$ independent in $M_1^*$ and $I_2$ independent in $M_2^*$. Taking orthogonal complements gives that $I^\perp=I_1^\perp\cap I_2^\perp=S_1\cap S_2$ for spanning spaces $S_1$ in $M_1$ and $S_2$ in $M_2$. This implies that $I^\perp$ is in $\mathcal{S}(M_1\wedge M_2)$.
Since $(M_1^*\vee M_2^*)^*$ is a $q$-matroid, this shows that $M_1\wedge M_2$ is a $q$-matroid as well.
\end{proof}
We have the following corollary on intersection, union, and restriction and contraction.
\begin{Corollary}\label{DualUnionIntersection}
Let $M_1$ and $M_2$ be $q$-matroids on the same ground space $E$. Then, for $T\subseteq E$,
\[ (M_1\vee M_2)|_T = M_1|_T\vee M_2|_T \]
and also
\[ (M_1\wedge M_2)/T \cong (M_1/T)\wedge(M_2/T). \]
\end{Corollary}
\begin{proof}
The first part of the statement follows directly from Definition \ref{DefUnione} of the matroid union and the fact that for the rank function of the restriction is $r_{M|_T}(A)=r(A)$. The second statement follows from the first by applying Theorem \ref{thm-intersection}, use $(M/T)^* \cong M^*|_{T^\perp}$ from Theorem \ref{DualRestrContr}, and then applying Theorem \ref{thm-intersection} again.
\begin{align*}
(M_1/T)\wedge(M_2/T) & = ((M_1/T)^*\vee(M_2/T)^*)^* \\
& \cong ((M_1^*|_{T^\perp})\vee(M_2^*|_{T^\perp}))^* \\
& = ((M_1^*\vee M_2^*)|_{T^\perp})^* \\
& \cong (M_1^*\vee M_2^*)^*/T \\
& = (M_1\wedge M_2)/T. \qedhere
\end{align*}
\end{proof}
We finish this section with the dual of Lemma \ref{AddGreenDoesNothing}.
\begin{Lemma}\label{IntersectAllRed}
Let $M$ be a $q$-matroid. Then $M= M\wedge U_{n,n}$.
\end{Lemma}
\begin{proof}
Applying Lemma \ref{AddGreenDoesNothing} to $M^*$ gives that $M^*= M^*\vee U_{0,n}=(M\wedge U_{n,n})^*$. Dualising both sides gives the desired result.
\end{proof}
\section{The direct sum}\label{DirSum}
In this section we will define the direct sum of two $q$-matroids. The idea will be to first add loops to $M_1$ and $M_2$, so they are on the same ground space, and then taking their matroid union. In the classical case, we can also write the direct sum like this: the idea for this construction comes from \cite[Proposition 7.6.13 part 2]{brylawski_1986}.
\subsection{Defining the direct sum} \label{AdL}
The next definition explains how to ``add a loop'' to a $q$-matroid.
\begin{Definition}\label{AggiungiLoop}
Let $M=(E,r)$ be a $q$-matroid. Then the direct sum of $M$ and a loop $\ell$ is denoted by $M'=M\oplus \ell$ and constructed in the following way. Let $E'=E+\ell$. Then for every $A'\subseteq E'$ we can write $A'+\ell=A\oplus\ell$ for a unique $A\subseteq E$. Then $r'(A')=r(A)$.
\end{Definition}
\begin{Remark}\label{caseSplitting}
The definition above divides the subspaces of $E'$ into three different kinds.
\begin{itemize}
\item If $A'\subseteq E$ then $A'=A$ and $\dim A'=\dim A$.
\item If $A'\supseteq \ell$ then $A=A'\cap E$ and $\dim A=\dim A'-1$.
\item If $A'$ is not contained in $E$ and does not contain $\ell$, then $\dim A'=\dim A$. There is a diamond with bottom $A'\cap A\subseteq E$, top $A'+\ell$ and with $A$ and $A'$ in between.
\end{itemize}
\end{Remark}
This construction is well defined, in the sense that it gives a $q$-matroid, as the next theorem shows.
\begin{Theorem}\label{AddLoop}
The direct sum $M'=M\oplus\ell$ as defined above is a $q$-matroid, that is, the rank function $r'$ satisfies (r1),(r2),(r3).
\end{Theorem}
\begin{proof}
(r1) Since $r(A)\geq0$ we have $r'(A')\geq0$ as well. We get that $r'(A')=r(A)\leq\dim A\leq\dim A'$ by Remark \ref{caseSplitting}.
(r2)
Let $A'\subseteq B'$. Since
$A=(A'+l)\cap E$,
$B=(B'+l)\cap E$ and
$A'+l \subseteq B'+l$, we have that $A \subseteq B$. Therefore $r'(A')=r(A)\leq r(B)=r'(B')$.
For (r3) let $A',B'\subseteq E'$. We first claim that $(A'+B')+\ell=(A+B)+\ell$ and $(A'\cap B')+\ell=(A\cap B)+\ell$, because this implies that
\begin{align*}
r'(A'+B') + r'(A'\cap B') & = r(A+B) + r(A\cap B) \\
& \leq r(A)+r(B) \\
& = r'(A')+r'(B').
\end{align*}
Now let us prove the claims. For addition, we see that
\[ (A'+B')+\ell=(A'+\ell)+(B'+\ell)=(A+\ell)+(B+\ell)=(A+B)+\ell. \]
For intersection we distinguish three cases depending on whether $A'$ and $B'$ contain $\ell$.
\begin{itemize}
\item Let $\ell\not\subseteq A',B'$. Then $(A'\cap B')+\ell=(A'+\ell)\cap(B'+\ell)=(A+\ell)\cap(B+\ell)=(A\cap B)+\ell$.
\item Let $\ell\subseteq A',B'$. Then $(A'\cap B')+\ell=A'\cap B'=(A+\ell)\cap(B+\ell)=(A\cap B)+\ell$.
\item Let $\ell\subseteq A'$ and $\ell\not\subseteq B'$. Then $(A'\cap B')+\ell=((A'\cap E)\cap B')+\ell=((A'\cap E)+\ell)\cap(B'+\ell)=(A+\ell)\cap(B+\ell)=(A\cap B)+\ell$.
\end{itemize}
The function $r'$ satisfies the axioms (r1),(r2),(r3), hence $M$ is a $q$-matroid.
\end{proof}
We combine the adding of loops and the matroid union to define the direct sum.
\begin{Definition}\label{DirSumWithUnion}
Let $M_1=(E_1,r_1)$ and $M_2=(E_2,r_2)$ be two $q$-matroids on trivially intersecting ground spaces. Let $n_1=\dim E_1$ and $n_2=\dim E_2$. We construct the direct sum $M_1\oplus M_2$ as follows.
\begin{itemize}
\item Let $E=E_1\oplus E_2$. This will be the ground space of $M$. By slight abuse of notation, we denote by $E_i$ both the ground space of $M_i$ and the embedding of $E_i$ in $E$.
\item In the lattice $\mathcal{L}(E)$ we have that the intervals $[0,E_1]$ and $[E_2,1]$ are isomorphic to $\mathcal{L}(E_1)$, and the intervals $[0,E_2]$ and $[E_1,1]$ are isomorphic to $\mathcal{L}(E_2)$. Fix the involution $\perp$ such that $E_1^\perp=E_2$.
\item Add $n_2$ times a loop to $M_1$, using Theorem \ref{AddLoop}. This gives the $q$-matroid $M_1'$ on ground space $E$. Assume that $M_1'|_{E_1}\cong M_1$ and $M_1'|_{E_2}\cong U_{0,n_2}$.
\item Add $n_1$ times a loop to $M_2$, using again Theorem \ref{AddLoop}. This gives the $q$-matroid $M_2'$ on ground space $E$. Assume that $M_2'|_{E_1}\cong U_{0,n_1}$ and $M_2'|_{E_2}\cong M_2$.
\end{itemize}
Now the direct sum is defined as $M_1\oplus M_2=M_1'\vee M_2'$, with the matroid union as in Theorem \ref{Union_q-Matr}.
\end{Definition}
Note that this procedure is well-defined, since we already showed that adding loops and taking the matroid union are well-defined constructions. We do, however, have to show that this procedure always defines the same $q$-matroid up to isomorphism, since it was observed in Remark \ref{dependsOnCoordinates} that matroid union is not invariant under coordinatisation.
\begin{Theorem}\label{IsomorphicIsLinearAlgebra}
Let $M_1=(E_1,r_1)$ and $M_2=(E_2,r_2)$ be two $q$-matroids on trivially intersecting ground spaces and let $M=M_1\oplus M_2$ be their direct sum as constructed in Definition \ref{DirSumWithUnion}. Let $\varphi_i$ be a lattice-isomorphism of $\mathcal{L}(E_i)$ for $i=1,2$. Then there is an isomorphism $\psi$ of $\mathcal{L}(E)$ such that $\varphi_1(M_1)\oplus\varphi_2(M_2)=\psi(M)$.
\end{Theorem}
\begin{proof}
Let $\psi$ be an isomorphism on $\mathcal{L}(E)$ such that $\psi|_{E_1}=\varphi_1$ and $\psi|_{E_2}=\varphi_2$. We can construct $\psi$ by its images of $n_1+n_2$ linearly independent $1$-dimensional spaces: we find these by taking the image under $\varphi_1$ of $n_1$ linearly independent $1$-spaces in $E_1$ and the image under $\varphi_2$ of $n_2$ linearly independent $1$-spaces in $E_2$. \\
Let $A\subseteq E$ and let $B\subseteq E_1$ such that $A+E_2=B\oplus E_2$. This means that $B=(A+E_2)\cap E_1$. Now we have that
\begin{align*}
(\psi(A)+E_2)\cap E_1 &= (\psi(A)+\psi(E_2))\cap E_1 \\
&= \psi(A+E_2)\cap E_1 \\
&= \psi(B\oplus E_2) \cap E_1 \\
&= (\psi(B)\oplus\psi(E_2))\cap E_1 \\
&= (\varphi_1(B)\oplus E_2)\cap E_1 \\
&= \varphi_1(B) \\
&= \varphi_1((A+E_2)\cap E_1).
\end{align*}
The rank function of $\varphi_1(M_1)'$ is equal to
\begin{align*}
r(\varphi_1(M_1)';A) & = r(\varphi_1(M_1);(A+E_2)\cap E_1) \\
& = r(M_1;\varphi_1((A+E_2)\cap E_1)) \\
& = r(M_1; (\psi(A)+E_2)\cap E_1) \\
& = r(M_1';\psi(A) ).
\end{align*}
We have a similar argument for $M_2$ and $\varphi_2$. Combining these gives that
\begin{align*}
\lefteqn{r(\varphi_1(M_1)\oplus\varphi_2(M_2);A)} \\
& = r(\varphi_1(M_1)'\vee\varphi_2(M_2)';A) \\
&= \min_{X\subseteq E}\{ r(\varphi_1(M_1)';X)+r(\varphi_2(M_2)';X)+\dim A-\dim(A\cap X)\} \\
&= \min_{\psi(X)\subseteq E}\{ r(M_1';\psi(X))+r(M_2';\psi(X))+\dim\psi(A)-\dim(\psi(A)\cap \psi(X))\} \\
&= r(M;\psi(A)) = r(\psi(M);A).
\end{align*}
This proves the theorem.
\end{proof}
The next lemma is a direct consequence of Theorem \ref{thm:directsumrank}, but we prove it now to make the calculations in the next section easier.
\begin{Lemma}\label{l-RankOfSum}
For two $q$-matroids $M_1$ and $M_2$ it holds that
\[r(M_1\oplus M_2)=r(M_1)+r(M_2).\]
\end{Lemma}
\begin{proof}
By applying Definitions \ref{AggiungiLoop} and \ref{DirSumWithUnion}, we get that
\[ r(M_1\oplus M_2)=r(M_1'\vee M_2';E)=\min_{X\subseteq E}\{ r(M_1';X)+r(M_2';X)+\dim E-\dim X\}. \]
If we take $X=E$, we get that
\[ r(M_1';X)+r(M_2';X)+\dim E-\dim X = r(M_1';E)+r(M_2';E)=r(M_1)+r(M_2). \]
Now let $Y_1\subseteq E_1$ such that $X+E_2=Y_1\oplus E_2$. Then $r(M_1';X)=r(M_1;Y_1)$. Similarly, let $Y_2\subseteq E_2$ such that $X+E_1=Y_2\oplus E_1$ so $r(M_2';X)=r(M_2;Y_2)$. We have that $\dim(Y_1)=\dim(X)-\dim(X\cap E_2)$ and $\dim(Y_2)=\dim(X)-\dim(X\cap E_1)$. Note that, by local semimodularity (Lemma \ref{localSemimodularity}, $r(M_1;Y_1)\geq r(M_1;E_1)-\dim(E_1)+\dim(Y_1)$ and similarly $r(M_2;Y_2)\geq r(M_2;E_2)-\dim(E_2)+\dim(Y_2)$. All together this gives
\begin{align*}
\lefteqn{r(M_1';X)+r(M_2';X)+\dim E-\dim X} \\
&= r(M_1;Y_1)+r(M_2;Y_2)+\dim E-\dim X \\
&\geq r(M_1;E_1)-\dim(E_1)+\dim(Y_1) \\
& \quad +r(M_2;E_2)-\dim(E_2)+\dim(Y_2)+\dim E-\dim X \\
&= r(M_1)+r(M_2)-\dim(X)+\dim(Y_1)+\dim(Y_2) \\
&= r(M_1)+r(M_2)-\dim(X)+\dim(X)-\dim(X\cap E_2)+\dim(X)-\dim(X\cap E_1) \\
&= r(M_1)+r(M_2)+\dim(X)-\dim(X\cap E_2)-\dim(X\cap E_1) \\
&\geq r(M_1)+r(M_2).
\end{align*}
This means that the minimum $\min_{X\subseteq E}\{ r(M_1';X)+r(M_2';X)+\dim E-\dim X\}$ is attained by $X=E$ and $r(M_1\oplus M_2)=r(M_1)+r(M_2)$.
\end{proof}
\subsection{Examples of the direct sum}
To get some feeling for this construction, we analyse some small examples. We refer to the Appendix for an overview of small $q$-matroids. \\
We start with the easiest examples possible, with $n_1=n_2=1$.
\begin{Example}
Let $M_1=M_2=U_{0,1}$. This is the sum of two loops. In fact, we could just use Theorem \ref{AddLoop} here, without Definition \ref{DirSumWithUnion}, but we do the whole procedure for clarity. For $M_1'=M_1\oplus\ell$, let $E_1=\langle(1,0)\rangle$. Then by Theorem \ref{AddLoop}, $M_1'$ is a $q$-matroid of rank $0$, so all its subspaces have rank zero. In fact, $M_1'\cong U_{0,2}$. Let $E_2=\langle(0,1)\rangle$. We also have that $M_2'\cong U_{0,2}$. Applying Theorem \ref{Union_q-Matr} we find that $M_1'\vee M_2'\cong U_{0,2}$. \\
Let $M_1=U_{0,1}$ and $M_2=U_{1,1}$. Then $M_1'=U_{0,2}$ as argued above. For $M_2'$, let $E_2=\langle(0,1)\rangle$ and apply Theorem \ref{AddLoop}. By construction, $r(\{0\})=0$. In dimension $1$ we have $r(\langle (0,1)\rangle)=r(E_2)=r_2(E_2)=1$, $r(\ell)=r_2(\{0\})=0$, and for all other spaces $A$ of dimension $1$ we have $r(A)=r_2(E_2)=1$. These are the three cases in Remark \ref{caseSplitting}. Note that $M_2'$ is a mixed diamond (see Section \ref{qcd2}). Finally, we have $r(E)=r_2(E_2)=1$. By Lemma \ref{AddGreenDoesNothing}, $M_1 \oplus M_2=M_1' \vee M_2' =M_2'$. \\
The last case to consider is $M_1=M_2=U_{1,1}$. We have seen that $M_1'$ and $M_2'$ are a mixed diamond. To get $M_1'\vee M_2'$, we first see that $r(\{0\})=0$. In dimension $1$, we have that $r(\langle(0,1)\rangle)=\min\{ 0+0+1-0, 1+0+1-1 \}=1$. For $r(\langle(1,0)\rangle)$ we get the same but in a different order, so the rank is again $1$. For a $1$-dimensional space not equal to $E_1$ or $E_2$ we get $r(A)=\min\{0+0+1-0, 1+1+1-1 \}=1$. Finally, for $E$ we get $r(E)=\min\{0+0+2-0,1+0+2-1,0+1+2-1,1+1+2-1,1+1+2-0\}=2$. So, $M_1\oplus M_2=U_{2,2}$.
\end{Example}
Note that it follows from this example that $U_{1,2}$ is connected: it cannot be written as a direct sum of two $q$-matroids of dimension $1$.
\begin{Example}\label{ex-PrimePlusRed}
We calculate the $q$-matroid $P_1^*$ (see Section \ref{P1star}), it is the sum of a prime diamond (see Section \ref{qcd2}) and an independent 1-dimensional space, that is, $M_1=U_{1,2}$ and $M_2=U_{1,1}$. Let $E_1=\langle (0,0,1),(0,1,0)\rangle$ and $E_2=\langle (1,0,0)\rangle$. We first have to make $M_1'$ and $M_2'$. \\
For $M_1'$ we take $\ell=E_2=\langle(1,0,0)\rangle$. We have that $r_1'(0)=0$ and $r_1'(E)=r(M_1)=1$. For a $1$-dimensional space inside $E_1$, the rank is $1$, while $r_1'(\ell)=0$. For any other $1$-dimensional space $A$, $r_1'(A)=r_1(A')$ for $A'\subseteq E_1$, so $r_1'(A)=1$. For the $2$-dimensional spaces $A$, $r_1'(E_1)=1$. If $\ell\subseteq A$, $r_1'(A)=r_1(A\cap E_1)=1$. For the other $2$-dimensional spaces we have $r_1'(A)=r_1(E_1)=1$. Together, we find that $M_1'$ is the $q$-matroid $P_1$ in the Section \ref{P1}. \\
For $M_2'$ we have to add a loop twice to $U_{1,1}$. The first loop gives the mixed diamond, as explained in the previous example. The second one gives a $q$-matroid isomorphic to $P_2$ (see Section \ref{P2}). \\
Now we take the union. We have $r(0)=0$ and also $r(E)=2$ by Lemma \ref{l-RankOfSum}. \\ There are three types of $1$-dimensional spaces, as well as three types of $2$-dimensional spaces. Let $\dim A=1$. If $A\subseteq E_1$ then $r(A)=\min\{0+0+1-0, 1+0+1-1\}=1$. \\If $A=E_2$ then $r(A)=\min\{0+0+1-0,0+1+1-1\}=1$. For the other $1$-dimensional spaces $A$, $r(A)=\min\{0+0+1-0,1+1+1-1\}=1$. Now let $\dim A=2$. If $A=E_1$ then $r(A)=\min\{0+0+2-0,1+0+2-1,1+0+2-2\}=1$. For the other 2-dimensional spaces $A$, note that any $1$-dimensional space has rank $1$ in either $M_1'$ or in $M_2'$, contributing $1+0+2-1=0+1+2-1=2$ to the minimum. The zero space also contributes $0+0+2-0=2$, and the space itself gives $1+1+2-2=2$. So $r(A)=2$. \\
In total, we see that $U_{1,2}\oplus U_{1,1}\cong P_1^*$.
\end{Example}
\subsection{Properties of the direct sum}
We will now show that the direct sum as defined here has some desirable properties. All of these results are also true for the classical case, motivating the `correctness' of the definition of the direct sum presented in the previous section. Further support of the definition is provided by \cite{GJ}, where it is shown that the direct sum is the coproduct in the category of $q$-matroids and linear weak maps.
\begin{Theorem}\label{thm:directsumrank}
Let $M_1$ and $M_2$ be two $q$-matroids with ground spaces $E_1$ and $E_2$, respectively. Let their direct sum be as defined in Definition \ref{DirSumWithUnion}. Then for any $A\subseteq E$ of the form $A=A_1\oplus A_2$ with $A_1\subseteq E_1$ and $A_2\subseteq E_2$ it holds that $r(M_1\oplus M_2;A)=r(M_1;A_1)+r(M_2;A_2)$.
\end{Theorem}
\begin{proof}
By definition of the direct sum we have that
\[ r(M_1\oplus M_2;A)=\min_{X\subseteq A}\{r(M_1';X)+r(M_2';X)+\dim A-\dim X\}. \]
We will show that the minimum is attained for $X=A$. First, note that $A+E_2=A_1\oplus E_2$ and $A+E_1=A_2\oplus E_1$. Then taking $X=A$ inside the minimum gives
\[ r(M_1';A)+r(M_2';A)+\dim A-\dim A=r(M_1;A_1)+r(M_2;A_2). \]
We have left to show that for any $X\subseteq A$, the quantity inside the minimum is at least $r(M_1;A_1)+r(M_2;A_2)$. To see this, take $B_1\subseteq E_1$ and $B_2\subseteq E_2$ such that $X+E_2=B_1\oplus E_2$ and $X+E_1=B_2\oplus E_1$. \\
For the dimension of $B_1$, we have that $\dim B_1=\dim(X+E_2)-\dim E_2=\dim X-\dim(X\cap E_2)$. Furthermore, $B_1\subseteq A_1$ and thus by local semimodularity (Lemma \ref{localSemimodularity}), $r(M_1;A_1)-\dim A_1\leq r(M_1;B_1)-\dim B_1$. Similar results hold for $B_2$. Finally, note that $\dim B_1+\dim B_2\leq\dim X$. \\
Combining this, we get that
\begin{align*}
\lefteqn{r(M_1';X)+r(M_2';X)+\dim A-\dim X} \\
&= r(M_1;B_1)+r(M_2;B_2)+\dim A-\dim X \\
&\geq r(M_1;A_1)-\dim A_1+\dim B_1+r(M_2;A_2)-\dim A_2+\dim B_2+\dim A-\dim X \\
&\geq r(M_1;A_1)+r(M_2;A_2).
\end{align*}
This completes the proof that $r(M_1\oplus M_2;A)=r(M_1;A_1)+r(M_2;A_2)$.
\end{proof}
From Theorem \ref{RangoSomma} the following is now immediate.
\begin{Corollary}\label{MinorTheorem}
Let $M_1$ and $M_2$ be two $q$-matroids with ground spaces $E_1$ and $E_2$, respectively. Then their direct sum, as defined in Definition \ref{DirSumWithUnion}, satisfies the properties of Definition \ref{def-directsum1}.
\end{Corollary}
Note that this implies that also the rest of the results in Section \ref{FirstDef} hold for our Definition \ref{DirSumWithUnion} of the direct sum. Another desirable property of our definition of the direct sum is that the dual of the direct sum is the direct sum of the duals. \\
In order to prove that direct sum commutes with duality, we need to define duality on $E_1$, $E_2$, and $E$ in a compatible way.
\begin{Definition}
Let $E=E_1\oplus E_2$ and let $\perp$ be an anti-isomorphism on $\mathcal{L}(E)$ such that $E_1^\perp=E_2$. Define an anti-isomorphism $\perp\!\!(E_1)$ on $E_1$ by
\[ A^{\perp(E_1)}:=(A+E_2)^\perp=A^\perp\cap E_2^\perp=A^\perp\cap E_1. \]
Similarly, we define the anti-isomorphism $A^{\perp(E_2)}=A^\perp\cap E_2$.
\end{Definition}
The map $\perp\!\!(E_1)$ (and, similarly, $\perp\!\!(E_2)$) is indeed an anti-isomorphism, because it is the concatenation of the isomorphism $[0,E_1]\to[E_2,E]$ given by $A\mapsto A\oplus E_2$ and the anti-isomorphism $\perp$ restricted to $[E_2,E]\to[0,E_1]$.
\begin{Theorem}\label{thm-DualDirect}
Let $M_1$ and $M_2$ be $q$-matroids on ground spaces $E_1$ and $E_2$, respectively. Then we have that $(M_1\oplus M_2)^*=M_1^*\oplus M_2^*$.
\end{Theorem}
\begin{proof}
Let $B$ be a basis of $M_1\oplus M_2$. We will prove that $B^\perp$ is a basis of $M_1^*\oplus M_2^*$. First, note that by Lemma \ref{l-RankOfSum} we have
\begin{align*}
r(M_1^*\oplus M_2^*) &= r(M_1^*)+r(M_2^*) \\
&= \dim E_1-r(M_1)+\dim E_2-r(M_2) \\
&= \dim E-r(M_1\oplus M_2) \\
&= \dim B^\perp.
\end{align*}
This means that if we show that $B^\perp$ is independent in $M_1^*\oplus M_2^*$, it is also a basis. The rank of $B^\perp$ in $M_1^*\oplus M_2^*$ is given by
\[ r(M_1^*\oplus M_2^*, B^\perp)=\min_{X\subseteq B^\perp}\{r((M_1^*)';X)+r((M_2^*)';X)+\dim B^\perp-\dim X\}. \]
We want this to be equal to $\dim B^\perp$, hence we need to show for all $X\subseteq B^\perp$ that
\[ r((M_1^*)';X)+r((M_2^*)';X) \geq \dim X. \]
This bound is tight: take $X=\{0\}$ for example. In order to rewrite the left hand side of this inequality, note that
\begin{align*}
((X+E_2)\cap E_1)^{\perp(E_1)} & = ((X+E_2)\cap E_1)^\perp\cap E_1 \\
&= ((X+E_2)^\perp + E_2)\cap E_1 \\
&= ((X^\perp\cap E_1)+E_2)\cap E_1 \\
&= X^\perp \cap E_1
\end{align*}
because for a space in $E_1$, first adding $E_2$ and then intersecting with $E_1$ is giving the same space we start with. With this in mind, we can rewrite one of the rank functions:
\begin{align*}
r((M_1^*)';X) &= r(M_1^*;(X+E_2)\cap E_1) \\
&= r(M_1;((X+E_2)\cap E_1)^{\perp(E_1)}) +\dim((X+E_2)\cap E_1)-r(M_1;E_1) \\
&= r(M_1;X^\perp \cap E_1) +\dim E_1-\dim(X^\perp\cap E_1)-r(M_1;E_1).
\end{align*}
We have a similar result for $r((M_2^*)';X)$. Applying this yields
\begin{align*}
\lefteqn{r((M_1^*)';X)+r((M_2^*)';X)} \\
&= r(M_1;X^\perp \cap E_1) +\dim E_1 -\dim(X^\perp\cap E_1)-r(M_1;E_1) \\
& \quad + r(M_2;X^\perp \cap E_2) +\dim E_2-\dim(X^\perp\cap E_2)-r(M_2;E_2) \\
&= \dim X +\dim X^\perp -\dim B + r(M_1;X^\perp \cap E_1) -\dim(X^\perp\cap E_1) \\
& \quad + r(M_2;X^\perp \cap E_2) -\dim(X^\perp\cap E_2).
\end{align*}
In order for this quantity to be greater than or equal to $\dim X$, we need to prove for all $X\subseteq B^\perp$ the following inequality:
\[ r(M_1;X^\perp\cap E_1) + r(M_2;X^\perp\cap E_2) + \dim X^\perp \geq \dim B+\dim(X^\perp\cap E_1)+\dim(X^\perp\cap E_2). \]
We proceed by mathematical induction on $\dim X^\perp$, so the base case is $X^\perp=B$. We claim that $r(M_1;B\cap E_1)=\dim(B\cap E_1)$. Since $B$ is a basis, it holds for all $Y\subseteq B$ that $r(M_1';Y)+r(M_2';Y) \geq \dim Y$ (by a reasoning as in the beginning of this proof). In particular, this holds for $Y=B\cap E_1\subseteq B$, so
\[ r(M_1';B\cap E_1)+r(M_2';B\cap E_1) = r(M_1;B\cap E_1) + 0 \geq \dim(B\cap E_1) \]
and thus by the rank axiom (r2) equality holds and we prove our claim. By the same reasoning, we have that $r(M_2;B\cap E_2)=\dim(B\cap E_2)$. This implies the induction step of our proof:
\[ r(M_1;B\cap E_1) + r(M_2;B\cap E_2) + \dim B = \dim B+\dim(B\cap E_1)+\dim(B\cap E_2). \]
Now assume the inequality holds for all $Y\supseteq B$ with $\dim Y\leq d$, where $\dim B\leq d\leq\dim E$. Consider a space $Y$ with $\dim Y=d+1$ and write $Y=Y'\oplus x$ for some $1$-dimensional subspace $x$. Since $x$ cannot be in both $E_1$ and $E_2$, we can assume without loss of generality that $x\not\subseteq E_1$ for any choice of $x$ such that $Y=Y'\oplus x$ (the case $x\not\subseteq E_2$ goes similarly). Then by rewriting and using the induction hypothesis we get
\begin{align*}
\lefteqn{r(M_1;(Y'\oplus x)\cap E_1) + r(M_2;(Y'\oplus x)\cap E_2) + \dim (Y'\oplus x)} \\
&= r(M_1;Y'\cap E_1) + r(M_2;(Y'\oplus x)\cap E_2) + \dim Y' +1 \\
&\geq r(M_1;Y'\cap E_1) + r(M_2;Y'\cap E_2) + \dim Y' +1 \\
&\geq \dim B+\dim(Y'\cap E_1)+\dim(Y'\cap E_2) + 1\\
&= \dim B+\dim((Y'\oplus x)\cap E_1)+\dim(Y'\cap E_2)+1 \\
&\geq \dim B+\dim((Y'\oplus x)\cap E_1)+\dim((Y'\oplus x)\cap E_2).
\end{align*}
This concludes the proof that $B^\perp$ is independent in $M_1^*\oplus M_2^*$, hence a basis, and we have proven that $(M_1\oplus M_2)^*=M_1^*\oplus M_2^*$.
\end{proof}
In the last example we will answer the question started in Section \ref{ExDim4} about the direct sum of two copies of $U_{1,2}$. This direct sum is now uniquely defined.
\begin{Example}\label{2U12}
Let $M_1=M_2=U_{1,2}$. We will compute $M:=M_1 \oplus M_2$. This $q$-matroid is defined as $M=U_{1,2}'\vee U_{1,2}'$. \\
Let us coordinatize the ground space of $M_1$ as
$E_1=\langle (1,0,0,0), (0,1,0,0) \rangle$ and that of $M_2$ as $E_2=\langle (0,0,1,0), (0,0,0,1) \rangle$. Let $E=E_1\oplus E_2$. \\
We first compute $U_{1,2}'$. Since $n_1=n_2=2$, we need to add two loops to $U_{1,2}$ via Definition \ref{AggiungiLoop}. This gives a $q$-matroid with ground space $E$ and $r(A)=1$ for each $A\subseteq E$, unless $A\subseteq E_2$, then $r(A)=0$. \\
To determine $M=U_{1,2}'\vee U_{1,2}'$ we use Lemma \ref{l-RankOfSum} to get $r(M)=2$. By Proposition \ref{prop-noloops}, $M$ does not have any loops. So it suffices to decide for every $2$-dimensional space $A$ whether it is a basis or a circuit. First, note that
\[r(A)=\min_{X \subseteq A} \{r_1(X)+r_2(X)+\dim(A)-\dim(X)\}
=\min_{X \subseteq A} \{r_1(X)+r_2(X)+2-\dim(X)\}.\]
We distinguish between different types of $2$-spaces, depending on their intersection with $E_1$ and $E_2$.
\begin{itemize}
\item For $A=E_1=E_2$ we have $r(A)=1$ by Corollary \ref{MinorTheorem}.
\item Let $A\cap E_1 =A \cap E_2=\{0\}$, then
\begin{itemize}
\item if $\dim(X)=0$ then $r_1(X)+r_2(X)+2-\dim(X)=2$;
\item if $\dim(X)=1$ then $r_1(X)+r_2(X)+2-\dim(X)=3$;
\item if $\dim(X)=2$ then $r_1(X)+r_2(X)+2-\dim(X)=2$;
\end{itemize}
so we conclude that $r(A)=2$.
\item In the case $\dim(A\cap E_2)=1$ and $A\cap E_1 =\{0\}$ (or vice versa) we have
\begin{itemize}
\item if $\dim(X)=0$ then $r_1(X)+r_2(X)+2-\dim(X)=2$;
\item if $\dim(X)=1$ then $r_1(X)+r_2(X)+2-\dim(X)=3$ if $X$ is not contained in $E_2$, and $r_1(X)+r_2(X)+2-\dim(X)=2$ otherwise;
\item if $\dim(X)=2$ then $r_1(X)+r_2(X)+2-\dim(X)=2$;
\end{itemize}
so we conclude that $r(A)=2$.
\item Finally, if $\dim(A\cap E_2)=\dim(A\cap E_1)=1$ we have
that
\begin{itemize}
\item if $\dim(X)=0$ then $r_1(X)+r_2(X)+2-\dim(X)=2$;
\item if $\dim(X)=1$ then $r_1(X)+r_2(X)+2-\dim(X)=3$ if $X$ is not contained in $E_1$ nor in $E_2$, and $r_1(X)+r_2(X)+2-\dim(X)=2$ otherwise;
\item if $\dim(X)=2$ then $r_1(X)+r_2(X)+2-\dim(X)=2$;
\end{itemize}
so we conclude that $r(A)=2$.
\end{itemize}
We see that all $2$-spaces except $E_1$ and $E_2$ are basis. Since we have $E_1=E_2^\perp$, it follows that this $q$-matroid is self-dual. Because $U_{1,2}^*=U_{1,2}$, this example is in agreement with Theorem \ref{thm-DualDirect}.
\end{Example}
\section{Connectedness}\label{Connect}
In the classical case, every matroid is the direct sum of its connected components. It therefore makes sense to consider the notion of connectedness in the study of the direct sum of $q$-matroids. In this final section we collect some thoughts and examples concerning a possible $q$-analogue of connectedness. We will not be able to define the concept, but we hope to argue why it is not straightforward and give some possible paths for further investigation. \\
To define connectedness in classical matroids, we use the following relation on the elements of a matroid $M=(E,r)$.
\begin{quote}
Two elements $x,y\in E$ are related if either $x=y$ or if there is a circuit of $M$ that contains both $x$ and $y$.
\end{quote}
This relation is in fact an equivalence relation \cite[Theorem 3.36]{gordonmcnulty}. We call a matroid connected if it has only one equivalence class under this relation. If there are multiple equivalence classes $E_1,\ldots,E_k$ then we can write
\[ M=M|_{E_1}\oplus\cdots\oplus M|_{E_k}. \]
We will discuss some attempts to find a $q$-analogue of this equivalence relation. Note that we are looking for an equivalence relation on the $1$-dimensional spaces of $E$.
\subsection{First attempt}
The first obvious $q$-analogue for the relation is the following:
\begin{Definition}\label{def-failed1}
Two $1$-dimensional spaces $x,y\subseteq E$ are related if either $x=y$ or if there is a circuit of $M$ that contains both $x$ and $y$.
\end{Definition}
However, this is not an equivalence relation, because it is not transitive. Look at the matroid $P_1$ from the catalogue (Section \ref{P1}). The spaces $\langle(0,1,0)\rangle$ and $\langle(0,0,1)\rangle$ are in a circuit, and also $\langle (0,0,1)\rangle$ and $\langle(1,1,0)\rangle$ are in a circuit, but $\langle(0,1,0)\rangle$ and $\langle(1,1,0)\rangle$ are not in a circuit.
\subsection{Alternative attempt}
Assume we have a $q$-matroid $M=(E,r)$ with $\mathcal{H}$ its family of hyperplanes.
\begin{Definition}\label{relation_hyperplanes}
Let $x$ and $y$ be two $1$-dimensional spaces in $E$. We say $x$ and $y$ are related if $x=y$ or if there is a hyperplane $H\in\mathcal{H}$ such that $x,y\not\subseteq H$. We call this relation $R$.
\end{Definition}
\begin{Remark}
For classical matroids, consider the following relations:
\begin{itemize}
\item $x$ and $y$ are related if $x=y$ or if there is a circuit containing both $x$ and $y$.
\item $x$ and $y$ are related if $x=y$ or if there is a hyperplane containing neither $x$ nor $y$.
\end{itemize}
It is a well established result for classical matroids (see for example \cite[Theorem 3.36]{gordonmcnulty}) that the first relation is an equivalence relation. It is also a classical result \cite[Theorem 3.48]{gordonmcnulty} that both relations give the same equivalence classes. However, the $q$-analogues of these two relations are \emph{not} equivalent. Being in a circuit is equivalent to being in the orthogonal complement of a hyperplane, not being outside a hyperplane. So the relation defined in this subsection is not equivalent to the relation in the previous subsection. In fact, Definition \ref{relation_hyperplanes} is an equivalence relation, as the next theorem shows.
\end{Remark}
\begin{Theorem}
The relation $R$ from Definition \ref{relation_hyperplanes} is an equivalence relation.
\end{Theorem}
\begin{proof}
We follow the proof of \cite[Proposition 3.36]{gordonmcnulty}, replacing circuits with hyperplanes and reversing inclusion. $R$ is clearly reflexive and symmetric. So we only have to prove it is transitive. We will frequently use the following hyperplane axiom \cite{BCR21}:
\begin{itemize}
\item[(H3')] If $H_1,H_2\in\mathcal{H}$ with $y\not\subseteq H_1,H_2$ and $x\subseteq H_2$, $x\not\subseteq H_1$, then there is an $H_3\in\mathcal{H}$ such that $(H_1\cap H_2)+y\subseteq H_3$ and $x\not\subseteq H_3$.
\end{itemize}
Let $x,y,z$ be $1$-dimensional spaces in $E$. Let $x,y\not\subseteq H_1$ and $y,z\not\subseteq H_2$. We have to show there exists a hyperplane $H'$ not containing $x$ and $z$. If $x\not\subseteq H_2$ or $z\not\subseteq H_1$, we are done, so suppose $x\subseteq H_2$ and $z\subseteq H_1$. We will use induction on $\dim H_1-\dim(H_1\cap H_2)$. \\
Suppose $\dim H_1-\dim(H_1\cap H_2)=1$, then we can write $H_1$ as $(H_1\cap H_2)+z$. Applying (H3') yields an $H'\in\mathcal{H}$ such that $(H_1\cap H_2)+y\subseteq H'$ and $x\not\subseteq H'$. We need to have that $z\not\subseteq H'$, because otherwise $H_1\subsetneq H'$ and this violates axiom (H2). So $H'$ is a hyperplane not containing $x$ and $z$, as requested. \\
Now suppose $\dim H_1-\dim(H_1\cap H_2)=n>1$ and assume that $H'$ exists for all pairs of hyperplanes such that $\dim H_1-\dim(H_1\cap H_2)<n$. We will use (H3') twice to find a hyperplane $H_4\in\mathcal{H}$ such that $\dim H_1-\dim(H_1\cap H_4)<\dim H_1-\dim(H_1\cap H_2)$ and such that $x\subseteq H_4$, $x\not\subseteq H_1$ and $z\subseteq H_1$, $z\not\subseteq H_4$. Then we can apply the induction hypothesis to $H_1$ and $H_4$.
\[ \includegraphics[width=.8\textwidth]{hyperplanes.pdf} \]
First we apply (H3') to $H_1$ and $H_2$. This gives $H_3\in\mathcal{H}$ such that $(H_1\cap H_2)+y\subseteq H_3$ and $x\not\subseteq H_3$. If $z\not\subseteq H_3$ we are done, so let $z\subseteq H_3$. However, there is a $1$-dimensional space $z^*\subseteq H_1$, $z^*\not\subseteq H_2$ such that $z^*\not\subseteq H_3$: if not, $H_1\subsetneq H_3$ and this violates axiom (H2). \\
Now we apply (H3') again, to $H_2$ and $H_3$ with $z^*\not\subseteq H_2,H_3$ and $z\subseteq H_3$, $z\not\subseteq H_2$. This gives $H_4\in\mathcal{H}$ such that $(H_2\cap H_3)+z^*\subseteq H_4$ and $z\not\subseteq H_4$. If $x\not\subseteq H_4$ we are done, so let $x\subseteq H_4$. \\
By construction (see picture) we have that $(H_1\cap H_2)\subseteq(H_1\cap H_4)$. This inclusion is strict, because $z^*\subseteq H_1,H_4$ but $z^*\not\subseteq H_2$. This means we have $\dim H_1-\dim(H_1\cap H_4)<\dim H_1-\dim(H_1\cap H_4)$. By the induction hypothesis, we can now find an $H'\in\mathcal{H}$ such that $x,z\not\subseteq H'$. \\
This proves that the relation $R$ is transitive, and hence an equivalence relation.
\end{proof}
The good news is that we have found a relation that is in fact an equivalence relation. The bad news is that it does not work like we want to. The uniform $q$-matroids $U_{0,3}$ and $U_{3,3}$ only have one equivalence class, where we would want that $U_{0,3}$ is the sum of three copies of $U_{0,1}$ and $U_{3,3}$ is the sum of three copies of $U_{1,1}$. Also the $q$-matroid $P_1^*$ (Section \ref{P1star}) in the catalog has only one equivalence class, where we constructed it in Example \ref{ex-PrimePlusRed} as the direct sum $U_{1,1}\oplus U_{1,2}$. $P_1$ on the other hand (the dual of $P_1^*$) has more than one equivalence class: a signal that this attempt for an equivalence relation does not play nice with duality.
\subsection{Towards a well-defined definition}
As we saw, Definition \ref{def-failed1} is in general not an equivalence relation. However, in some $q$-matroids it is an equivalence relation. From our examples, we think the following statements could be true.
\begin{Conjecture}
The relation of Definition \ref{def-failed1} is an equivalence relation in at least one of $M$ and $M^*$.
\end{Conjecture}
\begin{Conjecture}
Let $M$ be a $q$-matroid with circuits $\mathcal{C}$ and cocircuits $\mathcal{C}^*$. Suppose $\dim(C\cap C^*)\neq1$ for all $C\in\mathcal{C}$ and $C^*\in\mathcal{C}^*$. Then Definition \ref{def-failed1} is an equivalence relation.
\end{Conjecture}
Both conjectures are of course true in the classical case. To see this for the last conjecture, note that it can be proven that the intersection between a circuit and a cocircuit can never be a single element. See for example \cite[Proposition 2.1.11]{oxley}. The $q$-analogue of this statement is not true in general: see for example the $q$-matroid $P_1^*$ of Section \ref{P1star}. It has one circuit, $\langle(0,1,0),(0,0,1)\rangle$, that intersects in dimension $1$ with the cocircuit $\langle(1,1,0),(0,0,1)\rangle$. \\
We welcome any further hints towards a better understanding of the $q$-analogues of the direct sum, connectedness, and their relation.
\appendix
\section{A catalogue of small $q$-matroids}\label{qcatalogue}
In this appendix we make a list of all $q$-matroids with a ground space up to dimension $3$. We hope that these explicit examples help the reader developing intuition on $q$-matroids. We represent the $q$-matroids as a colouring of the underlying subspace lattice: if a cover is red, the rank goes up; if a cover is green, the rank stays the same. See \cite{BCJ17} for more information on matroidal bicolourings.
When defining a space as the span of some vectors, for space reasons, we remove parentheses and commas. As an example, the space generated by $(0,1,0)$ will be denoted by $\langle 010 \rangle$.
\subsection{Dimension 0}\label{qcd0}
There is only one $q$-matroid of dimension zero: the uniform $q$-matroid $U_{0,0}$. This is independent of the field over which the ground space is defined.
\subsection{Dimension 1}\label{qcd1}
There are two $q$-matroids of dimension one: the uniform $q$-matroids $U_{0,1}$ and $U_{1,1}$. This is independent of the field over which the ground space is defined. Their representations are $[0]$ and $[1]$.
\subsection{Dimension 2}\label{qcd2}
It is proven in \cite{BCJ17} that a bicolouring is matroidial if and only if it is one of the following four options:
\[ \begin{array}{cc}
\hspace{-40pt} \xygraph{
[] *+{\dir{o}}
(-@[red][ddlll(.5)] *+{\dir{o}}
(-@[red][ddrrr(.5)] *+{\dir{o}}="0"
)
,-@[red][dl] *+{\dir{o}}
(-@[red]"0"
)
,[d] *[]{\ldots}
,-@[red][dr] *+{\dir{o}}
(-@[red]"0"
)
,-@[red][ddrrr(.5)] *+{\dir{o}}
(-@[red]"0"
)
)
} \hspace{-40pt} &
\hspace{-40pt} \xygraph{
[] *+{\dir{o}}
(-@[green][ddlll(.5)] *+{\dir{o}}
(-@[green][ddrrr(.5)] *+{\dir{o}}="0"
)
,-@[green][dl] *+{\dir{o}}
(-@[green]"0"
)
,[d] *[]{\ldots}
,-@[green][dr] *+{\dir{o}}
(-@[green]"0"
)
,-@[green][ddrrr(.5)] *+{\dir{o}}
(-@[green]"0"
)
)
} \hspace{-40pt} \\[-24pt]
\text{one} & \text{zero} \\[12pt]
\hspace{-40pt} \xygraph{
[] *+{\dir{o}}
(-@[green][ddlll(.5)] *+{\dir{o}}
(-@[red][ddrrr(.5)] *+{\dir{o}}="0"
)
,-@[green][dl] *+{\dir{o}}
(-@[red]"0"
)
,[d] *[]{\ldots}
,-@[green][dr] *+{\dir{o}}
(-@[red]"0"
)
,-@[red][ddrrr(.5)] *+{\dir{o}}
(-@[green]"0"
)
)
} \hspace{-40pt} &
\hspace{-40pt} \xygraph{
[] *+{\dir{o}}
(-@[green][ddlll(.5)] *+{\dir{o}}
(-@[red][ddrrr(.5)] *+{\dir{o}}="0"
)
,-@[green][dl] *+{\dir{o}}
(-@[red]"0"
)
,[d] *[]{\ldots}
,-@[green][dr] *+{\dir{o}}
(-@[red]"0"
)
,-@[green][ddrrr(.5)] *+{\dir{o}}
(-@[red]"0"
)
)
} \hspace{-40pt} \\[-24pt]
\text{mixed} & \text{prime}
\end{array} \]
This implies that there are also four $q$-matroids of dimension $2$. The \textbf{one} diamond is the $q$-matroid $U_{2,2}$ of rank $2$, represented by the identity matrix $I_2$. There are two $q$-matroids of rank $1$ and dimension $2$: the uniform $q$-matroid $U_{1,2}$ given by the \textbf{prime} diamond, represented by $\left[\begin{array}{cc}1&\alpha \end{array}\right]$ and the $q$-matroid given by the \textbf{mixed} diamond, represented as $\left[\begin{array}{cc} 1&0 \end{array}\right]$ where $1$ and $\alpha$ are algebraically independent. Finally, the only $q$-matroid with dimension $2$ and rank $0$ is $U_{0,2}$.
\subsection{Dimension 3}\label{qcd3}
One can argue that this is the first dimension where things get interesting.
\begin{Theorem}
There are $8$ $q$-matroids of dimension $3$.
\end{Theorem}
\begin{proof}
By duality, we only need to show that there are four $q$-matroids of dimension $3$ and rank $0$ or $1$. There is, as in any dimension, one $q$-matroid of rank $0$: $U_{0,3}$. For a rank $1$ $q$-matroid it suffices to say which $1$-dimensional subspaces of the ground space are independent (i.e., bases) and dependent (i.e., loops). Since loops come in subspaces \cite[Lemma 11]{JP18}, we determine the $q$-matroid completely by picking a dimension for the loop space. Since a loopspace in a $q$-matroid of rank $1$ has dimension at most $2$ (otherwise it would have rank $0$), the loopspace can have dimension $0$, $1$ or $2$. This gives three $q$-matroids of rank $1$.
\end{proof}
We will now explicitly list all eight $q$-matroids of dimension $3$. For convenience, we do this over the field $\mathbb{F}_2$, but the general construction of the theorem above holds for other fields as well.
\subsubsection{$U_{0,3}$}
\begin{minipage}{.55\textwidth}
\resizebox{\textwidth}{!}{ \xygraph{
[] *+[]{\tiny{\txt{$E$}}}
(-@[green][dlll] *+[]{\tiny{\txt{100 \\ 010}}}
(-@[green][d] *+[]{\tiny{\txt{ 100}}}="100"
(-@[green][drrr] *+[]{\tiny{\txt{0}}}="0"
)
,-@[green][dr] *+[]{\tiny{\txt{010}}}="010"
(-@[green]"0"
)
,-@[green][drr] *+[]{\tiny{\txt{110}}}="110"
(-@[green]"0"
)
)
,(-@[green][dll] *+[]{\tiny{\txt{100 \\ 011}}}
(-@[green]"100"
,-@[green][drr] *+[]{\tiny{\txt{111}}}="111"
(-@[green]"0"
)
,-@[green][drrr] *+[]{\tiny{\txt{011}}}="011"
(-@[green]"0"
)
)
,(-@[green][dl] *+[]{\tiny{\txt{100 \\ 001}}}
(-@[green]"100"
,-@[green][drrr] *+[]{\tiny{\txt{001}}}="001"
(-@[green]"0"
)
,-@[green][drrrr] *+[]{\tiny{\txt{101}}}="101"
(-@[green]"0"
)
)
,(-@[green][d] *+[]{\tiny{\txt{010 \\ 001}}}
(-@[green]"010"
,-@[green]"011"
,-@[green]"001"
)
,(-@[green][dr] *+[]{\tiny{\txt{101 \\ 010}}}
(-@[green]"010"
,-@[green]"111"
,-@[green]"101"
)
,(-@[green][drr] *+[]{\tiny{\txt{101 \\ 011}}}
(-@[green]"110"
,-@[green]"011"
,-@[green]"101"
)
,(-@[green][drrr] *+[]{\tiny{\txt{110 \\ 001}}}
(-@[green]"110"
,-@[green]"111"
,-@[green]"001"
)
)
} }
\end{minipage}
\begin{minipage}{.4\textwidth}
Rank: $r(E)=0$ \\
Independent: $0$\\
Bases: $0$ \\
Circuits: all $1$-spaces \\
Hyperplanes: none\\
Cocircuits: none \\
Dual: $U_{3,3}$\\
Direct sum: $U_{0,1}\oplus U_{0,1}\oplus U_{0,1}$. \\
Representation: $\left[\begin{array}{ccc} 0 & 0 & 0 \end{array}\right]$
\end{minipage}
\subsubsection{$U_{1,3}$}
\begin{minipage}{.55\textwidth}
\resizebox{\textwidth}{!}{ \xygraph{
[] *+[]{\tiny{\txt{$E$}}}
(-@[green][dlll] *+[]{\tiny{\txt{100 \\ 010}}}
(-@[green][d] *+[]{\tiny{\txt{ 100}}}="100"
(-@[red][drrr] *+[]{\tiny{\txt{0}}}="0"
)
,-@[green][dr] *+[]{\tiny{\txt{010}}}="010"
(-@[red]"0"
)
,-@[green][drr] *+[]{\tiny{\txt{110}}}="110"
(-@[red]"0"
)
)
,(-@[green][dll] *+[]{\tiny{\txt{100 \\ 011}}}
(-@[green]"100"
,-@[green][drr] *+[]{\tiny{\txt{111}}}="111"
(-@[red]"0"
)
,-@[green][drrr] *+[]{\tiny{\txt{011}}}="011"
(-@[red]"0"
)
)
,(-@[green][dl] *+[]{\tiny{\txt{100 \\ 001}}}
(-@[green]"100"
,-@[green][drrr] *+[]{\tiny{\txt{001}}}="001"
(-@[red]"0"
)
,-@[green][drrrr] *+[]{\tiny{\txt{101}}}="101"
(-@[red]"0"
)
)
,(-@[green][d] *+[]{\tiny{\txt{010 \\ 001}}}
(-@[green]"010"
,-@[green]"011"
,-@[green]"001"
)
,(-@[green][dr] *+[]{\tiny{\txt{101 \\ 010}}}
(-@[green]"010"
,-@[green]"111"
,-@[green]"101"
)
,(-@[green][drr] *+[]{\tiny{\txt{101 \\ 011}}}
(-@[green]"110"
,-@[green]"011"
,-@[green]"101"
)
,(-@[green][drrr] *+[]{\tiny{\txt{110 \\ 001}}}
(-@[green]"110"
,-@[green]"111"
,-@[green]"001"
)
)
} }
\end{minipage}
\begin{minipage}{.4\textwidth}
Rank: $r(E)=1$ \\
Independent: $0$, all $1$-spaces\\
Bases: all 1-spaces \\
Circuits: all $2$-spaces \\
Hyperplanes: $0$\\
Cocircuits: $E$ \\
Dual: $U_{2,3}$\\
Direct sum: no. \\
Representation: $\left[\begin{array}{ccc} 1 & \alpha & \alpha^2 \end{array}\right]_{\mathbb{F}_{2^3}}$
\end{minipage}
\subsubsection{$P_1$: rank 1, $1$-dimensional loopspace} \label{P1}
\begin{minipage}{.55\textwidth}
\resizebox{\textwidth}{!}{ \xygraph{
[] *+[]{\tiny{\txt{$E$}}}
(-@[green][dlll] *+[]{\tiny{\txt{100 \\ 010}}}
(-@[red][d] *+[]{\tiny{\txt{ 100}}}="100"
(-@[green][drrr] *+[]{\tiny{\txt{0}}}="0"
)
,-@[green][dr] *+[]{\tiny{\txt{010}}}="010"
(-@[red]"0"
)
,-@[green][drr] *+[]{\tiny{\txt{110}}}="110"
(-@[red]"0"
)
)
,(-@[green][dll] *+[]{\tiny{\txt{100 \\ 011}}}
(-@[red]"100"
,-@[green][drr] *+[]{\tiny{\txt{111}}}="111"
(-@[red]"0"
)
,-@[green][drrr] *+[]{\tiny{\txt{011}}}="011"
(-@[red]"0"
)
)
,(-@[green][dl] *+[]{\tiny{\txt{100 \\ 001}}}
(-@[red]"100"
,-@[green][drrr] *+[]{\tiny{\txt{001}}}="001"
(-@[red]"0"
)
,-@[green][drrrr] *+[]{\tiny{\txt{101}}}="101"
(-@[red]"0"
)
)
,(-@[green][d] *+[]{\tiny{\txt{010 \\ 001}}}
(-@[green]"010"
,-@[green]"011"
,-@[green]"001"
)
,(-@[green][dr] *+[]{\tiny{\txt{101 \\ 010}}}
(-@[green]"010"
,-@[green]"111"
,-@[green]"101"
)
,(-@[green][drr] *+[]{\tiny{\txt{101 \\ 011}}}
(-@[green]"110"
,-@[green]"011"
,-@[green]"101"
)
,(-@[green][drrr] *+[]{\tiny{\txt{110 \\ 001}}}
(-@[green]"110"
,-@[green]"111"
,-@[green]"001"
)
)
} }
\end{minipage}
\begin{minipage}{.4\textwidth}
Rank: $r(E)=1$ \\
Independent: $0$, all $1$-spaces except $\langle100\rangle$\\
Bases: all 1-spaces except $\langle100\rangle$ \\
Circuits: $\langle100\rangle$, $\langle010,001\rangle$, $\langle101,010\rangle$, $\langle101,011\rangle$, $\langle110,001\rangle$ \\
Hyperplanes: $\langle100\rangle$\\
Cocircuits: $\langle010,001\rangle$ \\
Dual: $M_1^*$\\
Direct sum: $U_{0,1}\oplus U_{1,2}$ \\
Representation: $\left[\begin{array}{ccc} 0 & 1 & \alpha \end{array}\right]_{\mathbb{F}_{2^2}}$
\end{minipage}
\subsubsection{$P_2$: rank 1, $2$-dimensional loopspace} \label{P2}
\begin{minipage}{.55\textwidth}
\resizebox{\textwidth}{!}{ \xygraph{
[] *+[]{\tiny{\txt{$E$}}}
(-@[red][dlll] *+[]{\tiny{\txt{100 \\ 010}}}
(-@[green][d] *+[]{\tiny{\txt{ 100}}}="100"
(-@[green][drrr] *+[]{\tiny{\txt{0}}}="0"
)
,-@[green][dr] *+[]{\tiny{\txt{010}}}="010"
(-@[green]"0"
)
,-@[green][drr] *+[]{\tiny{\txt{110}}}="110"
(-@[green]"0"
)
)
,(-@[green][dll] *+[]{\tiny{\txt{100 \\ 011}}}
(-@[red]"100"
,-@[green][drr] *+[]{\tiny{\txt{111}}}="111"
(-@[red]"0"
)
,-@[green][drrr] *+[]{\tiny{\txt{011}}}="011"
(-@[red]"0"
)
)
,(-@[green][dl] *+[]{\tiny{\txt{100 \\ 001}}}
(-@[red]"100"
,-@[green][drrr] *+[]{\tiny{\txt{001}}}="001"
(-@[red]"0"
)
,-@[green][drrrr] *+[]{\tiny{\txt{101}}}="101"
(-@[red]"0"
)
)
,(-@[green][d] *+[]{\tiny{\txt{010 \\ 001}}}
(-@[red]"010"
,-@[green]"011"
,-@[green]"001"
)
,(-@[green][dr] *+[]{\tiny{\txt{101 \\ 010}}}
(-@[red]"010"
,-@[green]"111"
,-@[green]"101"
)
,(-@[green][drr] *+[]{\tiny{\txt{101 \\ 011}}}
(-@[red]"110"
,-@[green]"011"
,-@[green]"101"
)
,(-@[green][drrr] *+[]{\tiny{\txt{110 \\ 001}}}
(-@[red]"110"
,-@[green]"111"
,-@[green]"001"
)
)
} }
\end{minipage}
\begin{minipage}{.4\textwidth}
Rank: $r(E)=1$ \\
Independent: $0$ all $1$-spaces except $\langle 100\rangle,\langle 010 \rangle,\langle 110 \rangle$\\
Bases: all independents except 0 \\
Circuits: the three loops $\langle 100\rangle,\langle 010 \rangle,\langle 110 \rangle$\\
Hyperplanes: $\langle 100 , 010 \rangle$\\
Cocircuits: $\langle 001 \rangle$ \\
Dual: $M_2^*$\\
Direct sum: $U_{0,1}\oplus U_{0,1}\oplus U_{1,1}$. \\
Representation: $\left[\begin{array}{ccc} 0 & 0 & 1 \end{array}\right]$
\end{minipage}
\subsubsection{$P_2^*$} \label{P2star}
\begin{minipage}{.55\textwidth}
\resizebox{\textwidth}{!}{ \xygraph{
[] *+[]{\tiny{\txt{$E$}}}
(-@[green][dlll] *+[]{\tiny{\txt{100 \\ 010}}}
(-@[red][d] *+[]{\tiny{\txt{ 100}}}="100"
(-@[red][drrr] *+[]{\tiny{\txt{0}}}="0"
)
,-@[red][dr] *+[]{\tiny{\txt{010}}}="010"
(-@[red]"0"
)
,-@[red][drr] *+[]{\tiny{\txt{110}}}="110"
(-@[red]"0"
)
)
,(-@[green][dll] *+[]{\tiny{\txt{100 \\ 011}}}
(-@[red]"100"
,-@[red][drr] *+[]{\tiny{\txt{111}}}="111"
(-@[red]"0"
)
,-@[red][drrr] *+[]{\tiny{\txt{011}}}="011"
(-@[red]"0"
)
)
,(-@[red][dl] *+[]{\tiny{\txt{100 \\ 001}}}
(-@[green]"100"
,-@[red][drrr] *+[]{\tiny{\txt{001}}}="001"
(-@[green]"0"
)
,-@[green][drrrr] *+[]{\tiny{\txt{101}}}="101"
(-@[red]"0"
)
)
,(-@[red][d] *+[]{\tiny{\txt{010 \\ 001}}}
(-@[green]"010"
,-@[green]"011"
,-@[red]"001"
)
,(-@[green][dr] *+[]{\tiny{\txt{101 \\ 010}}}
(-@[red]"010"
,-@[red]"111"
,-@[red]"101"
)
,(-@[green][drr] *+[]{\tiny{\txt{101 \\ 011}}}
(-@[red]"110"
,-@[red]"011"
,-@[red]"101"
)
,(-@[red][drrr] *+[]{\tiny{\txt{110 \\ 001}}}
(-@[green]"110"
,-@[green]"111"
,-@[red]"001"
)
)
} }
\end{minipage}
\begin{minipage}{.4\textwidth}
Rank: $r(E)=2$ \\
Independent: $0$ all $1$-spaces except $\langle 001\rangle$, and as 2-spaces $\langle 100, 010\rangle$, $\langle 100, 011\rangle$, $\langle 101, 010\rangle$\\
Bases: $\langle 100, 010\rangle$, $\langle 100, 011\rangle$, $\langle 101, 010\rangle$ \\
Circuits: the loop $\langle 001\rangle$\\
Hyperplanes: $\langle 100 , 001 \rangle$, $\langle 010, 001\rangle$, $\langle 110, 001\rangle$ \\
Cocircuits: $\langle 010 \rangle$, $\langle 100\rangle$, $\langle 110 \rangle$ \\
Dual: $M_2^*$\\
Direct sum: $U_{1,1}\oplus U_{1,1}\oplus U_{0,1}$. \\
Representation: $\left[\begin{array}{ccc} 1 & 0&0 \\
0& 1 & 0 \end{array}\right]$
\end{minipage}
\subsubsection{$P_1^*$}\label{P1star}
\begin{minipage}{.55\textwidth}
\resizebox{\textwidth}{!}{ \xygraph{
[] *+[]{\tiny{\txt{$E$}}}
(-@[green][dlll] *+[]{\tiny{\txt{100 \\ 010}}}
(-@[red][d] *+[]{\tiny{\txt{ 100}}}="100"
(-@[red][drrr] *+[]{\tiny{\txt{0}}}="0"
)
,-@[red][dr] *+[]{\tiny{\txt{010}}}="010"
(-@[red]"0"
)
,-@[red][drr] *+[]{\tiny{\txt{110}}}="110"
(-@[red]"0"
)
)
,(-@[green][dll] *+[]{\tiny{\txt{100 \\ 011}}}
(-@[red]"100"
,-@[red][drr] *+[]{\tiny{\txt{111}}}="111"
(-@[red]"0"
)
,-@[red][drrr] *+[]{\tiny{\txt{011}}}="011"
(-@[red]"0"
)
)
,(-@[green][dl] *+[]{\tiny{\txt{100 \\ 001}}}
(-@[red]"100"
,-@[red][drrr] *+[]{\tiny{\txt{001}}}="001"
(-@[red]"0"
)
,-@[red][drrrr] *+[]{\tiny{\txt{101}}}="101"
(-@[red]"0"
)
)
,(-@[red][d] *+[]{\tiny{\txt{010 \\ 001}}}
(-@[green]"010"
,-@[green]"011"
,-@[green]"001"
)
,(-@[green][dr] *+[]{\tiny{\txt{101 \\ 010}}}
(-@[red]"010"
,-@[red]"111"
,-@[red]"101"
)
,(-@[green][drr] *+[]{\tiny{\txt{101 \\ 011}}}
(-@[red]"110"
,-@[red]"011"
,-@[red]"101"
)
,(-@[green][drrr] *+[]{\tiny{\txt{110 \\ 001}}}
(-@[red]"110"
,-@[red]"111"
,-@[red]"001"
)
)
} }
\end{minipage}
\begin{minipage}{.4\textwidth}
Rank: $r(E)=2$ \\
Independent: $0$, all $1$-spaces, all $2$-spaces except $\langle010,001\rangle$\\
Bases: all 2-spaces except $\langle010,001\rangle$ \\
Circuits: $\langle010,001\rangle$ \\
Hyperplanes: $\langle100\rangle$, $\langle110\rangle$, $\langle111\rangle$, $\langle101\rangle$, $\langle010,001\rangle$ \\
Cocircuits: $\langle010,001\rangle$, $\langle110,001\rangle$, $\langle101,011\rangle$, $\langle101,010\rangle$, $\langle100\rangle$ \\
Dual: $M_1$\\
Direct sum: $U_{1,1}\oplus U_{1,2}$ \\
Representation: $\left[\begin{array}{ccc} 1 & 0 & 0 \\ 0 & \alpha & 1 \end{array}\right]_{\mathbb{F}_{2^2}}$
\end{minipage}
\subsubsection{$U_{2,3}$}
\begin{minipage}{.55\textwidth}
\resizebox{\textwidth}{!}{ \xygraph{
[] *+[]{\tiny{\txt{$E$}}}
(-@[green][dlll] *+[]{\tiny{\txt{100 \\ 010}}}
(-@[red][d] *+[]{\tiny{\txt{ 100}}}="100"
(-@[red][drrr] *+[]{\tiny{\txt{0}}}="0"
)
,-@[red][dr] *+[]{\tiny{\txt{010}}}="010"
(-@[red]"0"
)
,-@[red][drr] *+[]{\tiny{\txt{110}}}="110"
(-@[red]"0"
)
)
,(-@[green][dll] *+[]{\tiny{\txt{100 \\ 011}}}
(-@[red]"100"
,-@[red][drr] *+[]{\tiny{\txt{111}}}="111"
(-@[red]"0"
)
,-@[red][drrr] *+[]{\tiny{\txt{011}}}="011"
(-@[red]"0"
)
)
,(-@[green][dl] *+[]{\tiny{\txt{100 \\ 001}}}
(-@[red]"100"
,-@[red][drrr] *+[]{\tiny{\txt{001}}}="001"
(-@[red]"0"
)
,-@[red][drrrr] *+[]{\tiny{\txt{101}}}="101"
(-@[red]"0"
)
)
,(-@[green][d] *+[]{\tiny{\txt{010 \\ 001}}}
(-@[red]"010"
,-@[red]"011"
,-@[red]"001"
)
,(-@[green][dr] *+[]{\tiny{\txt{101 \\ 010}}}
(-@[red]"010"
,-@[red]"111"
,-@[red]"101"
)
,(-@[green][drr] *+[]{\tiny{\txt{101 \\ 011}}}
(-@[red]"110"
,-@[red]"011"
,-@[red]"101"
)
,(-@[green][drrr] *+[]{\tiny{\txt{110 \\ 001}}}
(-@[red]"110"
,-@[red]"111"
,-@[red]"001"
)
)
} }
\end{minipage}
\begin{minipage}{.4\textwidth}
Rank: $r(E)=2$ \\
Independent: all except $E$\\
Bases: all $2$-spaces \\
Circuits: $E$ \\
Hyperplanes: all $1$-spaces \\
Cocircuits: all $2$-spaces \\
Dual: $U_{1,3}$\\
Direct sum: no. \\
Representation: $\left[\begin{array}{ccc} 1 & 0 & \alpha \\ 0 & 1 & \alpha^2 \end{array}\right]_{\mathbb{F}_{2^3}}$
\end{minipage}
\subsubsection{$U_{3,3}$}
\begin{minipage}{.55\textwidth}
\resizebox{\textwidth}{!}{ \xygraph{
[] *+[]{\tiny{\txt{$E$}}}
(-@[red][dlll] *+[]{\tiny{\txt{100 \\ 010}}}
(-@[red][d] *+[]{\tiny{\txt{ 100}}}="100"
(-@[red][drrr] *+[]{\tiny{\txt{0}}}="0"
)
,-@[red][dr] *+[]{\tiny{\txt{010}}}="010"
(-@[red]"0"
)
,-@[red][drr] *+[]{\tiny{\txt{110}}}="110"
(-@[red]"0"
)
)
,(-@[red][dll] *+[]{\tiny{\txt{100 \\ 011}}}
(-@[red]"100"
,-@[red][drr] *+[]{\tiny{\txt{111}}}="111"
(-@[red]"0"
)
,-@[red][drrr] *+[]{\tiny{\txt{011}}}="011"
(-@[red]"0"
)
)
,(-@[red][dl] *+[]{\tiny{\txt{100 \\ 001}}}
(-@[red]"100"
,-@[red][drrr] *+[]{\tiny{\txt{001}}}="001"
(-@[red]"0"
)
,-@[red][drrrr] *+[]{\tiny{\txt{101}}}="101"
(-@[red]"0"
)
)
,(-@[red][d] *+[]{\tiny{\txt{010 \\ 001}}}
(-@[red]"010"
,-@[red]"011"
,-@[red]"001"
)
,(-@[red][dr] *+[]{\tiny{\txt{101 \\ 010}}}
(-@[red]"010"
,-@[red]"111"
,-@[red]"101"
)
,(-@[red][drr] *+[]{\tiny{\txt{101 \\ 011}}}
(-@[red]"110"
,-@[red]"011"
,-@[red]"101"
)
,(-@[red][drrr] *+[]{\tiny{\txt{110 \\ 001}}}
(-@[red]"110"
,-@[red]"111"
,-@[red]"001"
)
)
} }
\end{minipage}
\begin{minipage}{.4\textwidth}
Rank: $r(E)=3$ \\
Independent: all\\
Bases: $E$ \\
Circuits: no \\
Hyperplanes: $2$-spaces \\
Cocircuits: $1$-spaces \\
Dual: $U_{0,3}$\\
Direct sum: $U_{1,1}\oplus U_{1,1}\oplus U_{1,1}$. \\
Representation: identity matrix $I_3$
\end{minipage}
\end{document} |
\begin{document}
\title[On Wigner's theorem]{On Wigner's theorem in smooth normed spaces}
\author{Dijana Ili\v{s}evi\'{c}}
\address{Department of Mathematics,
University of Zagreb, Bijeni\v{c}ka 30, P.O. Box 335, 10002 Zagreb,
Croatia}
\email{ilisevic@math.hr}
\author{Aleksej Turn\v{s}ek}
\address{Faculty of Maritime Studies and Transport, University of Ljubljana, Pot pomor\-\v{s}\v{c}akov 4, 6320 Portoro\v{z}, Slovenia and Institute of Mathematics, Physics and Mechanics, Jadranska 19, 1000 Ljubljana, Slovenia}
\email{aleksej.turnsek@fmf.uni-lj.si}
\thanks{This research was supported in part by the Ministry of Science
and Education of Slovenia.}
\subjclass[2010]{39B05, 46C50, 47J05}
\keywords{Wigner's theorem, isometry, normed space}
\begin{abstract}
In this note we generalize the well-known Wigner's unitary-anti\-unitary theorem.
For $X$ and $Y$ smooth normed spaces and $f:X\to Y$ a surjective mapping such that $|[f(x),f(y)]|=|[x,y]|$, $x,y\in X$, where $[\cdot,\cdot]$ is the unique semi-inner product, we show that $f$ is phase equivalent to either a linear or an anti-linear surjective isometry. When $X$ and $Y$ are smooth real normed spaces and $Y$ strictly convex, we show that Wigner's theorem is equivalent to $\{\|f(x)+f(y)\|,\|f(x)-f(y)\|\}=\{\|x+y\|,\|x-y\|\}$, $x,y\in X$.
\end{abstract}
\maketitle
\newtheorem{theorem}{Theorem}[section]
\newtheorem{proposition}[theorem]{Proposition}
\newtheorem{lemma}[theorem]{Lemma}
\newtheorem{corollary}[theorem]{Corollary}
\newtheorem{definition}{Definition}
\theoremstyle{definition}
\newtheorem{example}[theorem]{Example}
\newtheorem{xca}[theorem]{Exercise}
\newtheorem{question}{Question}
\theoremstyle{remark}
\newtheorem{remark}{Remark}[section]
\section{Introduction}
Let $(H,(\cdot,\cdot))$ and $(K,(\cdot,\cdot))$ be inner product spaces over $\mathbb F\in\{\mathbb R,\mathbb C\}$ and suppose that $f:H\to K$ is a mapping satisfying
\begin{equation}| (f(x),f(y))|=|(x,y)|,\quad x,y\in H.\end{equation}
Then the famous Wigner's theorem says that $f$ is a solution of (1) if and only if it is phase equivalent to a linear or an anti-linear isometry, say $U$, that is, $$f(x)=\sigma(x) Ux,\quad x\in H,$$
where $\sigma: H\to\mathbb F$, $|\sigma(x)|=1$, $x\in H$, is a so called phase function. This celebrated result plays a very important role in quantum mechanics and in representation theory in physics.
There are several proofs of this result, see \cite{Bargmann, Freed, Geher, Gyory, Lomont, Ratz, Sharma1, Sharma3} to list just some of them. For generalizations to Hilbert $C^*$-modules see \cite{Bakic, Molnar}.
On each normed space $X$ over $\mathbb{F}$ there exists at least one semi-inner product (s.i.p.), see \cite{Giles, Lumer}, on $X$ which is a function $[\, \cdot, \cdot \,] \colon X\times X\to\mathbb{F}$ with the following properties:
\begin{enumerate}
\item $[x+y,z]=[x,z]+[y,z]$, $[\lambda x,y]=\lambda[x,y]$, $[x,\lambda y]=\overline{\lambda}[x,y]$ for all $\lambda\in\mathbb{F}$ and $x,y \in X$,
\item $[x,x]>0$ when $x\ne0$,
\item $|[x,y]|\leq[x,x]^{1/2}[y,y]^{1/2}$ for all $x,y \in X,$
\end{enumerate}
and moreover, it is compatible with the norm in the sense that $[x,x]^{1/2}=\|x\|$.
Recall that $X$ is said to have a Gateaux differentiable norm at $x\ne0$ whenever
$$\lim_{t\to0,t\in\mathbb{R}}\frac{\|x+ty\|-\|x\|}{t}$$
exists for all $y\in X$.
Remember also that a support functional $\phi_x$ at $x\in X$ is a
norm-one linear functional in $X^*$ such that $\phi_x(x) = \|x\|$. By
the Hahn--Banach theorem there always exists at least one such
functional for every $x\in X$.
A normed space $X$ is said to be smooth at $x$ if there
exists a unique support functional at $x$.
If $X$ is smooth at each one of its points then $X$ is said to be
smooth. It is well known, see for instance \cite[Theorem 1, p.~22]{Diestel}, that a Banach space $X$ is smooth at $x$ if and only if the norm is Gateaux differentiable at $x$. Moreover, in this case, the real part $\text{Re}\,\phi_x$ of a unique support functional $\phi_x$ at $x$ is given by
\begin{equation}\label{smooth}
\text{Re}\,\phi_x(y)=\lim_{t\to0, t\in\mathbb R}\frac{\|x+ty\|-\|x\|}{t}.
\end{equation}
If $X$ is not smooth then there are many semi-inner products compatible with the norm. However, if $X$ is smooth then $[x,y]:=\|y\|\phi_y(x)$, where $\phi_y$ is the support functional at $y$, is the unique semi-inner product with $[x,x]^{1/2}=\|x\|$.
Now the following natural question arises: Let $X,Y$ be normed spaces and $f:X\to Y$ a mapping such that
\begin{equation}\label{normedWigner}
|[f(x),f(y)]|=|[x,y]|,\quad x,y\in X.
\end{equation}
Is it true that $f$ satisfies (\ref{normedWigner}) if and only if it is phase equivalent to either a linear or an anti-linear isometry? Let us first check that in general even not all linear isometries satisfy (\ref{normedWigner}).
\begin{example}
Let $T \colon (l_\infty^2,\mathbb R)\to(\l_\infty^2,\mathbb R)$ be defined by $T(x,y)=(y,x)$ and let the semi-inner product for $x=(x_1,x_2)$ and $y=(y_1,y_2)$ be defined by
$$[x,y]=\begin{cases}
x_1y_1&\text{if}\quad |y_1|>|y_2|\\
x_2y_2&\text{if}\quad|y_1|<|y_2|\\
\frac{3}{4}x_1y_1+\frac{1}{4}x_2y_2&\text{if}\quad |y_1|=|y_2|.
\end{cases}
$$
Then for $x=(1,0)$ and $y=(1,1)$ we get $[x,y]=\frac{3}{4}$ and $[Tx,Ty]=\frac{1}{4}$.
\end{example}
However, if $X$ and $Y$ are smooth normed spaces, then a mapping phase equivalent to a linear or an anti-linear isometry satisfies (\ref{normedWigner}). Indeed, if $U$ is a linear or an anti-linear isometry, then $\|Uy+tUx\|=\|y+tx\|$, $t\in\mathbb R$, hence by (\ref{smooth})
$$\text{Re}\,\phi_{Uy}(Ux)=\text{Re}\,\phi_y(x)$$
and then also $[Ux,Uy]=[x,y]$. From
$$[f(x),f(y)]=[\sigma(x)Ux,\sigma(y)Uy]=\sigma(x)\overline{\sigma(y)}[Ux,Uy]$$
the claim follows. In our main result Theorem \ref{main} we show that the converse also holds.
\section{Results}
Throughout, for a normed space $(X, \mathcal{V}ert \cdot \mathcal{V}ert)$, by $[\, \cdot, \cdot \,]$ we denote a semi-inner product satisfying $\mathcal{V}ert x \mathcal{V}ert = [x,x]^{1/2}$. We denote by $\mathbb PX=\{\langle x\rangle : x \in X\}$ the set of all one-dimensional subspaces of
a normed space $X$. If $M\subset X$ then $\langle M\rangle$ will denote the subspace generated by the set $M$. If $L\subseteq X$ is a two-dimensional subspace then $L=\langle L\rangle$ is called a projective line. Recall also that $A:X\to Y$ is semilinear if $A(x+y)=Ax+Ay$ and $A(\lambda x)=h(\lambda)Ax$, $x,y\in X$, $\lambda \in\mathbb{F}$, where $h:\mathbb{F}\to\mathbb{F}$ is a homomorphism. Next we state the fundametal theorem of projective geometry in the form in which it will be needed, see \cite[Theorem 3.1]{Faure}.
\begin{theorem}[Fundamental theorem of projective geometry]\label{projective}
Let $X$ and $Y$ be vector spaces over $\mathbb{F}$ of dimensions at least three. Let $g: \mathbb PX\to\mathbb PY$ be a mapping such that
\begin{itemize}
\item[(i)] The image of $g$ is not contained in a projective line.
\item[(ii)] $0\ne c\in \langle a,b\rangle, a\ne 0\ne b,$ implies $g(\langle c\rangle)\in \langle g(\langle a\rangle), g(\langle b\rangle)\rangle$.
\end{itemize}
Then there exists an injective semilinear mapping $A:X\to Y$ such that
$$g(\langle x\rangle)=\langle Ax\rangle,\quad 0\ne x\in X.$$
Moreover, $A$ is unique up to a non-zero scalar factor.
\end{theorem}
In the proof of the next theorem we will also need the notion of orthogonality in normed spaces. Remember that $x\in X$ is Birkhoff-James orthogonal to $y\in X$,
$$x\perp y\quad \text{if}\quad \|x+\lambda y\|\geq\|x\|\quad \text{for all }\lambda\in\mathbb{F}.$$
When $x\in X$ is a point of smoothness, then $x\perp y$
if and only if $y$ belongs to the kernel of the unique support
functional at~$x$, see \cite[Proposition 1.4.4.]{Fleming-Jamison}.
Important consequence is that Birkhoff-James orthogonality is right additive in smooth spaces, that is, $x\perp y, x\perp z\mathcal{R}ightarrow x\perp y+z$.
Also note that in this case $x\perp y$ if and only if $[y,x]=0$.
\begin{theorem}\label{main}
Let $X$ and $Y$ be smooth normed spaces over $\mathbb{F}$ and suppose that $f \colon X\to Y$ is a surjective mapping satisfying
$$|[f(x),f(y)]|=|[x,y]|,\quad x,y\in X.$$
\begin{itemize}
\item[(i)] If $\mathrm{d}im X\geq2$ and $\mathbb{F}=\mathbb{R}$, then $f$ is phase equivalent to a linear surjective isometry.
\item[(ii)] If $\mathrm{d}im X\geq2$ and $\mathbb{F}=\mathbb{C}$, then $f$ is phase equivalent to a linear or conjugate linear surjective isometry.
\end{itemize}
\end{theorem}
\begin{proof}
Let $\lambda\in\mathbb{F}$ and $x\in X$. We will show that $f(\lambda x)=\gamma f(x)$, where $\gamma=\gamma(\lambda,x)$ depends on $\lambda$ and on $x$, and $|\gamma|=|\lambda|$. The function
\begin{equation}\label{min}
\xi\mapsto\|f(\lambda x)-\xi f(x)\|
\end{equation}
is continuous and tends to infinity when $|\xi|$ tends to infinity. Hence there is at least one point, say $\gamma$, such that the function in (\ref{min}) achieves its global minimum. Thus
$$\min_{\xi\in\mathbb{F}}\|f(\lambda x)-\xi f(x)\|=\|f(\lambda x)-\gamma f(x)\|.$$
Note that
$$\|f(\lambda x)-\gamma f(x)+\mu f(x)\|\geq\|f(\lambda x)-\gamma f(x)\|$$
for all $\mu\in\mathbb{F}$, hence $f(\lambda x)-\gamma f(x)\perp f(x)$. Since $f$ is surjective, there is $z\in X$ such that $f(z)=f(\lambda x)-\gamma f(x)$. Then from $f(z)\perp f(x)$ we get $z\perp x$, and then $z\perp \lambda x$ and $f(z)\perp f(\lambda x)$. Since $Y$ is smooth, Birkhoff-James orthogonality is right additive, so from $f(z)\perp f(\lambda x)$ and $f(z)\perp f(x)$ we conclude $f(z)\perp f(\lambda x)-\gamma f(x)=f(z)$. Thus $f(z)=0$ and we have $f(\lambda x)=\gamma f(x)$.
Furthermore,
$$\vert \lambda \vert \mathcal{V}ert x \mathcal{V}ert = \|\lambda x\|=\|f(\lambda x)\|=\|\gamma f(x)\|=\vert \gamma \vert \mathcal{V}ert f(x) \mathcal{V}ert = \vert \gamma \vert \mathcal{V}ert x \mathcal{V}ert,$$
which implies $|\gamma|=|\lambda|$.
Next, let $x,y\in X$ be linearly independent. We will show that
$f(x+y)=\alpha f(x)+\beta f(y)$, where $\alpha=\alpha(x,y)$, $\beta=\beta(x,y)$, and $|\alpha|=|\beta|=1$. Analogously as before we obtain $\alpha, \beta\in\mathbb F$ such that
$$\min_{\xi,\eta\in\mathbb F}\|f(x+y)-\xi f(x)-\eta f(y)\|=\|f(x+y)-\alpha f(x)-\beta f(y)\|.$$
Furthermore, it is easy to see that
$$f(x+y)-\alpha f(x)-\beta f(y)\perp f(x)\quad\text{and}\quad f(x+y)-\alpha f(x)-\beta f(y)\perp f(y).$$
Take $z\in X$ such that $f(z)=f(x+y)-\alpha f(x)-\beta f(y)$. Then $f(z)\perp f(x)$ implies $z\perp x$, $f(z)\perp f(y)$ implies $z\perp y$ and smoothness of $X$ implies $z\perp x+y$ and then $f(z)\perp f(x+y)$. Hence $f(z)\perp f(z)$ and $f(z)=0$. Let us show that $|\alpha|=1$. Let $\min_\lambda\|x+\lambda y\|=\|x+\lambda_0y\|$. Then $x+\lambda_0y\perp y$ and $x+\lambda_0y\not\perp x$. Indeed, suppose that $x+\lambda_0y\perp x$. Then by the right additivity we get $x+\lambda_0y\perp x+\lambda_0y$. This would mean that $x+\lambda_0y=0$, a contradiction because $x$ and $y$ are linearly independent.
Denote $w=x+\lambda_0y$.
Since $w \perp y$ we also have $f(w) \perp f(y)$.
Then
\begin{eqnarray}
[f(x+y), f(w)]&=&\alpha[f(x), f(w)]+\beta[f(y), f(w)]\nonumber \\
&=&\alpha[f(x), f(w)],\nonumber
\end{eqnarray}
which implies
\begin{eqnarray}
\vert \alpha \vert \vert[x, w]\vert &=&\vert \alpha \vert \vert[f(x), f(w)]\vert=\vert[f(x+y), f(w)]\vert\nonumber \\
&=&\vert[x+y, w]\vert = \vert[x, w]\vert,\nonumber
\end{eqnarray}
hence $|\alpha|=1$. Similarly we get $|\beta|=1$.
Let us prove that $f$ induces a surjective mapping $\tilde{f} \colon \mathbb{P}X\to\mathbb{P}Y$ defined by $\tilde{f}(\langle x\rangle)=\langle f(x)\rangle$. Suppose $\langle x\rangle=\langle y\rangle$, that is $y=\lambda x$. Then $f(y)=f(\lambda x)=\gamma f(x)$ for some $\gamma\in\mathbb F$ and then $\langle f(y)\rangle=\langle f(x)\rangle$. So $\tilde{f}$ is well defined and surjective because $f$ is surjective.
Now suppose that $\mathrm{d}im X\geq3$ and let $x\in X$ be a unit vector. Choose a unit vector $y\in\ker\phi_x$, where $\phi_x$ is the support functional at $x$, and then choose a unit vector $z\in\ker\phi_x\cap\ker\phi_y$, where $\phi_y$ is the support functional at $y$. Then from $x\perp y$, $x\perp z$ and $y\perp z$ follows that $x,y,z$ are linearly independent. Indeed, $y$ and $z$ are linearly independent because $y\perp z$. From $x\perp y$ and $x\perp z$ it follows, using homogeneity and right additivity of Birkhoff-James orthogonality, that $x\perp\langle y,z\rangle$, hence $x,y,z$ are linearly independent. Now $f(x), f(y), f(z)$ are unit vectors such that $f(x)\perp f(y)$, $f(x)\perp f(z)$ and $f(y)\perp f(z)$. As before we conclude that $f(x), f(y)$ and $f(z)$ are linearly independent. So the image of $f$ is not contained in a two-dimensional subspace, thus the image of $\tilde{f}$ is not contained in a projective line. This shows that $\tilde{f}$ satisfies condition (i) of Theorem \ref{projective}. Furthermore, from $f(\lambda x)=\gamma f(x)$ and $f(x+y)=\alpha f(x)+\beta f(y)$ it follows that condition (ii) of Theorem \ref{projective} is also satisfied.
Thus by Theorem \ref{projective} we conclude that $\tilde{f}$ is induced by a bijective semilinear mapping $A:X\to Y$, that is,
$$\tilde{f}(\langle x\rangle)=\langle Ax\rangle,\quad x\in X.$$
Fix a nonzero $x\in X$. Then $f(x)=\lambda Ax$ for some nonzero $\lambda\in\mathbb{F}$. Let $y\in X$ be such that $x$ and $y$ are linearly independent. Then $f(y)=\mu Ay$ and $f(x+y)=\nu A(x+y)$. Note also that $Ax$ and $Ay$ are linearly independent since $A$ is semilinear and bijective.
Thus from $f(x+y)=\alpha f(x)+\beta f(y)=\alpha\lambda Ax+\beta\mu Ay$ we get
$\alpha\lambda=\nu$ and $\beta\mu=\nu$. Since $|\alpha|=|\beta|=1$ we get $|\lambda|=|\mu|=|\nu|$.
Hence $f(z)=\lambda(z)Az$ with $\vert \lambda(z) \vert=\vert \lambda \vert$ for all $z\in X$.
Let $U=\lambda A$ and $\sigma(z)=\lambda(z)/\lambda$ for every $z \in X$.
Then $\sigma \colon X \to \mathbb{F}$ is a phase function and
$$f(z)=\lambda(z)Az=\sigma(z)Uz, \quad z \in X.$$
If $\mathbb{F}=\mathbb{R}$ then $A$ (hence also $U$) is linear, because any nontrivial homomorphism $h \colon \mathbb{R}\to\mathbb{R}$ is identity. Suppose $\mathbb{F}=\mathbb{C}$.
Let $\xi\in\mathbb{C}$. Then
$$f(\xi z)=\lambda(\xi z)A(\xi z)=\lambda(\xi z)h(\xi)Az,$$
and on the other hand $f(\xi z)=\xi'f(z)=\xi'\lambda(z)Az$. Because $|\lambda(\xi z)|=|\lambda(z)|$ and $|\xi'|=|\xi|$ we get $|h(\xi)|=|\xi|$.
Then $h$ is continuous at zero, hence continuous everywhere.
A continuous homomorphism $h \colon \mathbb{C}\to\mathbb{C}$ is either identity or conjugation.
Therefore $A$, and also $U$, is linear or conjugate linear.
It is now clear that $U$ is an isometry. It is surjective because $f$ is surjective.
This completes the proof.
Let us now suppose that $\mathrm{d}im{X}=2$.
Let us fix linearly independent $x_0, y_0 \in X$.
Let $A(x_0)=f(x_0)$.
For every $\mu \in \mathbb{F}$ there exist $\omega_1, \omega_2 \in \mathbb{F}$ such that
$f(x_0+\mu y_0)=\omega_1 f(x_0)+\omega_2 f(y_0)$,
with $\vert \omega_1 \vert =1$, $\vert \omega_2 \vert = \vert \mu \vert$.
Let $h(\mu)=\omega_2 / \omega_1$ and $A(\mu y_0) = h(\mu)f(y_0)$.
Note that $\vert h(\mu) \vert = \vert \mu \vert$.
Furthermore, let us define $A(x_0+\mu y_0)=A(x_0)+A(\mu y_0)$.
For $\lambda, \mu \in \mathbb{F}$,
$$f(x_0+(\lambda+\mu)y_0)=\omega_1 f(x_0) + \omega_1 h(\lambda+\mu)f(y_0),$$
and also
\begin{eqnarray}
f(x_0+(\lambda+\mu)y_0) &=& f((x_0+\lambda y_0)+\mu y_0) = \omega_2 f(x_0+\lambda y_0) + \omega_3 f(y_0) \nonumber \\
&=& \omega_4 f(x_0)+ \omega_4 h(\lambda)f(y_0)+\omega_3f(y_0). \nonumber
\end{eqnarray}
Since $f(x_0)$ and $f(y_0)$ are also linearly independent, $\omega_4=\omega_1$ and $\omega_4 h(\lambda)+\omega_3 = \omega_1 h(\lambda + \mu)$, with $\vert \omega_1 \vert = 1$ and $\vert \omega_3 \vert = \vert \mu \vert$.
Then
\begin{eqnarray}\label{0A}
h(\lambda + \mu)=h(\lambda) + \frac{\omega_3}{\omega_1},
\end{eqnarray}
which implies
$$\vert \lambda + \mu \vert = \vert h(\lambda+\mu) \vert = \mathcal{B}ig{\vert} h(\lambda) + \frac{\omega_3}{\omega_1} \mathcal{B}ig{\vert}$$
with $\vert h(\lambda) \vert = \vert \lambda \vert$ and $\vert \omega_3 / \omega_1 \vert = \vert \mu \vert$.
This yields
$$\mathcal{B}ig{\vert} \frac{\lambda}{\mu}+1 \mathcal{B}ig{\vert} = \mathcal{B}ig{\vert} h(\lambda) \frac{\omega_1}{\omega_3}+1 \mathcal{B}ig{\vert}$$
with $\vert \frac{\lambda}{\mu} \vert = \vert h(\lambda) \frac{\omega_1}{\omega_3} \vert$.
It can be easily verified that
$$\frac{\lambda}{\mu} = h(\lambda) \frac{\omega_1}{\omega_3} \quad \textup{or} \quad \frac{\overline{\lambda}}{\overline{\mu}} = h(\lambda) \frac{\omega_1}{\omega_3},$$ that is,
\begin{eqnarray}\label{1A}
\frac{\omega_3}{\omega_1}=h(\lambda)\frac{\mu}{\lambda}
\end{eqnarray}
or
\begin{eqnarray}\label{2A}
\frac{\omega_3}{\omega_1}=h(\lambda)\frac{\overline{\mu}}{\overline{\lambda}}.
\end{eqnarray}
Let us fix $\eta \in \mathbb{F}$.
If \eqref{1A} holds for $\lambda=1$ and $\mu=\eta-1$ then \eqref{0A} implies
$$h(\eta)=h(1)\eta.$$
If \eqref{2A} holds for $\lambda=1$ and $\mu=\eta-1$ then \eqref{0A} implies
$$h(\eta)=h(1)\overline{\eta}.$$
If $\mathbb{F}=\mathbb{R}$ we are done. Suppose that $\mathbb{F} = \mathbb{C}$.
Note that \eqref{0A} becomes
$$h(\lambda+\mu)=h(\lambda)+h(\lambda)\frac{\mu}{\lambda},$$
or
$$h(\lambda+\mu)=h(\lambda)+h(\lambda)\frac{\overline{\mu}}{\overline{\lambda}}.$$
If for some $\lambda \in \mathbb{F} \setminus \mathbb{R}$ we have $h(\lambda)=h(1)\lambda$ and for some $\mu \in \mathbb{F} \setminus \mathbb{R}$ we have $h(\mu)=h(1)\overline{\mu}$ then
$$h(\mu)=h(\lambda+(\mu-\lambda))=h(\lambda)+h(\lambda)\frac{\mu-\lambda}{\lambda}=h(1)\mu$$
or
$$h(\mu)=h(\lambda+(\mu-\lambda))=h(\lambda)+h(\lambda)\frac{\overline{\mu}-\overline{\lambda}}{\overline{\lambda}}=h(1)\frac{\lambda}{\overline{\lambda}}\mu.$$
In both cases we arrive at a contradiction with $\lambda, \mu \notin \mathbb{R}$.
Hence $h(\lambda)=h(1)\lambda$ for every $\lambda \in \mathbb{R}$ or $h(\lambda)=h(1)\overline{\lambda}$ for every $\lambda \in \mathbb{R}$.
Let $k=h(1)$ and let $A(y_0)=kf(y_0)$.
Then $A(\mu y_0)=\mu A(y_0)$ or $\overline{\mu}A(y_0)$, and $A(x_0+\mu y_0)=A(x_0)+\mu A(y_0)$ or $A(x_0+\mu y_0)=A(x_0)+\overline{\mu}A(y_0)$, respectively.
In the first case we extend $A$ to $X$ by $A(\lambda x_0+\mu y_0)= \lambda A(x_0+\frac{\mu}{\lambda}y_0)$, and in the second case by $\overline{\lambda} A(x_0+\frac{\mu}{\lambda}y_0)$.
Such $A$ is linear or conjugate linear.
From
\begin{eqnarray}
\mathcal{V}ert \lambda x_0 + \mu y_0 \mathcal{V}ert &=& \vert \lambda \vert \, \mathcal{V}ert f(x_0+\frac{\mu}{\lambda} y_0) \mathcal{V}ert = \vert \lambda \vert \, \mathcal{V}ert f(x_0)+h(\frac{\mu}{\lambda})f(y_0) \mathcal{V}ert \nonumber \\
&=& \vert \lambda \vert \, \mathcal{V}ert A(x_0+\frac{\mu}{\lambda} y_0) \mathcal{V}ert = \mathcal{V}ert A(\lambda x_0+\mu y_0) \mathcal{V}ert, \nonumber
\end{eqnarray}
we conclude that $A$ is an isometry.
Finally,
\begin{eqnarray}
f(\lambda x_0 + \mu y_0) &=&\lambda' f(x_0+\frac{\mu}{\lambda} y_0) \nonumber \\
&=& \lambda' (\omega f(x_0) + \omega h(\frac{\mu}{\lambda}) f(y_0)) = \omega\frac{\lambda'}{\lambda} A(\lambda x_0 + \mu y_0) \nonumber
\end{eqnarray}
for some $\omega, \lambda' \in \mathbb{F}$ such that $\vert \omega \vert = 1$, $\vert \lambda' \vert = \vert \lambda \vert$.
It remains to define $\sigma(\lambda x_0 + \mu y_0) = \omega \frac{\lambda'}{\lambda}$.
\end{proof}
\begin{remark}
If $X$ is one-dimensional then $X$ is obviously smooth. Suppose that $Y$ is a smooth normed space and $f:X\to Y$ a mapping such that $|[f(x),f(y)]|=|[x,y]|$, $x,y\in X$. Let $\lambda\in\mathbb F$ and fix a unit vector $x\in X$. Analogously as in Theorem \ref{main}, we obtain $f(\lambda x)=\gamma f(x)$ for some $\gamma\in\mathbb F$, which depends on $\lambda$, and $|\gamma|=|\lambda|$. Now for $z=\lambda x$ define phase function $\sigma(z)=\gamma/\lambda$ and define a linear surjective isometry $U:X\to Y$ by $Uz=\lambda f(x)$. Then $f=\sigma U$ and we conclude that $f$ is phase equivalent to a linear surjective isometry.
\end{remark}
Maksa and P\'{a}les, see \cite{Maksa}, showed that for a mapping $f:H\to K$, where $H$ and $K$ are real inner product spaces, Wigner's theorem is equivalent to the requirement that $f$ satisfies the following condition:
\begin{equation}\label{phaseisometry}
\{\|f(x)+f(y)\|,\|f(x)-f(y)\|\}=\{\|x+y\|,\|x-y\|\},\quad x,y\in H.
\end{equation}
They asked for possible generalizations in the setting of real normed spaces, that is, if $X$ and $Y$ are real normed spaces and $f:X\to Y$ a mapping, is it true that $f$ satisfies (\ref{phaseisometry}) if and only if $f$ is phase equivalent to a linear isometry?
Recall that a normed space $X$ is said to be strictly convex whenever the unit sphere $S_X$ contains no non-trivial line segments, that is, each point of $S_X$ is an extreme point of a unit ball $B_X$.
The following proposition generalizes \cite[Theorem 2 (i) $\mathcal{L}eftrightarrow$ (iv) $\mathcal{L}eftrightarrow$ (v)]{Maksa}.
\begin{proposition}
Let $X$, $Y$ be real smooth normed spaces, $Y$ strictly convex, $f:X\to Y$ surjective. The following assertions are equivalent:
\begin{itemize}
\item[(i)] $|[f(x),f(y)]|=|[x,y]|$, $x,y\in X$.
\item[(ii)] $f$ is phase equivalent to a linear surjective isometry.
\item[(iii)] $\{\|f(x)+f(y)\|,\|f(x)-f(y)\|\}=\{\|x+y\|,\|x-y\|\}$, $x,y\in X$.
\end{itemize}
\end{proposition}
\begin{proof}
(i) $\mathcal{R}ightarrow$ (ii) is Theorem \ref{main}, and (ii) $\mathcal{R}ightarrow$ (iii) is obvious. It remains to prove (iii) $\mathcal{R}ightarrow$ (i). Let $x=y$. Then from $\{2\|f(x)\|,0\}=\{2\|x\|,0\}$ we get $\|f(x)\|=\|x\|$, $x\in X$. Insert $2x$ and $x$ in (iii) to get
$$\{\|f(2x)+f(x)\|,\|f(2x)-f(x)\|\}=\{3\|x\|,\|x\|\},\quad x\in X.$$
Hence for $x\in X$ either $\|f(2x)+f(x)\|=3\|x\|$ or $\|f(2x)+f(x)\|=\|x\|$.
Suppose that $x\in X$ is such that $\|f(2x)+f(x)\|=3\|x\|$. Then from
$$3\|x\|=\|f(2x)+f(x)\|\leq\|f(2x)\|+\|f(x)\|=3\|x\|$$
and strict convexity of $Y$ we get $f(2x)=2f(x)$. If $\|f(2x)-f(x)\|=3\|x\|$, then, analogously, we get $f(2x)=-2f(x)$. Therefore $f(2x)=\pm 2f(x)$, $x\in X$. Let $n=2^m$. Then from $f(nx)=\pm nf(x)$, $x\in X$, we have
\begin{eqnarray*}
n(\|f(x)+\tfrac{1}{n}f(y)\|-\|f(x)\|)= \|\pm f(nx)+f(y)\|-n\|f(x)\|\\
=\|nx\pm y\|-n\|x\|= n(\|x\pm\tfrac{1}{n}y\|-\|x\|), \quad y\in X.
\end{eqnarray*}
Thus $|[f(y),f(x)]|=|[y,x]|$, $x, y\in X$ and the proof is completed.
\end{proof}
In the last part of the paper we consider mappings $f:X\to Y$ satisfying
\begin{equation}
[f(x),f(y)]=[x,y],\quad x,y\in X.
\end{equation}
Namely, it is easy to see that in the setting of inner product spaces any such mapping is necessarily a linear isometry.
\begin{proposition}\label{isometry}
Let $X$ and $Y$ be normed spaces and $f \colon X\to Y$ a mapping such that $[f(x),f(y)]=[x,y]$, $x,y\in X$.
\begin{itemize}
\item[(i)] If $f$ is surjective then $f$ is a linear isometry.
\item[(ii)] If $X=Y$ is smooth Banach space then $f$ is a linear isometry.
\end{itemize}
\end{proposition}
\begin{proof}
(i). From
\begin{align*}
[f(\lambda x+\mu y),f(z)]&=[\lambda x+\mu y,z]=\lambda[x,z]+\mu[y,z]\\
&=\lambda[f(x),f(z)]+\mu[f(y),f(z)]=[\lambda f(x)+\mu f(y),f(z)]
\end{align*}
we conclude
\begin{equation}\label{orth}
[f(\lambda x+\mu y)-\lambda f(x)-\mu f(y),f(z)]=0
\end{equation}
for all $x,y,z\in X$ and all $\lambda, \mu\in \mathbb{F}$. Since $f$ is surjective, linearity of $f$ follows.
(ii). The proof is by contradiction. Let us denote $u=f(\lambda x+\mu y)-\lambda f(x)-\mu f(y)$ and suppose that $u\ne 0$. From (\ref{orth}) we get $f(z)\perp u$ for all $z\in X$ and because $X$ is smooth this is equivalent to $\phi_{f(z)}(u)=0$. Because of the homogeneity of orthogonality relation we may and do assume that $\|u\|=1$. From
$$\|\phi_u+\xi\phi_{f(z)}\|\geq|\phi_u(u)+\xi\phi_{f(z)}(u)|=|\phi_u(u)|=1=\|\phi_u\|$$
for all $\xi\in\mathbb{R}$ we conclude $\phi_u\perp\phi_{f(z)}$ for all $z\in X$.
Homogeneity of Birkhoff-James orthogonality implies $\phi_u\perp\xi\phi_{f(z)}$ for all $z\in X$, $\xi \in \mathbb{R}$.
Furthermore,
$$\|f(z)\|\phi_{f(z)}(f(w))=[f(w),f(z)]=[w,z]=\|z\|\phi_z(w)$$
shows
$$\phi_{f(z)}\circ f=\phi_z,\quad z\in X.$$
By the Bishop--Phelps theorem (see \cite{Bishop-Phelps} or a recent survey \cite{Aron-Lomonosov}), for given $\psi \in X^*$ and $\varepsilon > 0$ there exists $\theta \in X^*$, $\mathcal{V}ert \theta \mathcal{V}ert = \mathcal{V}ert \psi \mathcal{V}ert$ and $\mathcal{V}ert \psi - \theta \mathcal{V}ert < \varepsilon$, such that there exists $z \in S_X$ satisfying $\theta(z)=\mathcal{V}ert \theta \mathcal{V}ert$.
Then $\pm\frac{1}{\|\theta\|}\theta$ is the support functional at $z\in S_X$. Thus $\theta=\pm\|\theta\|\phi_z\in\{\xi\phi_z: z\in X, \xi\in\mathbb{R}\}$. Hence $X^*$ is contained in the norm closure of $\{\xi\phi_z: z\in X, \xi\in\mathbb{R}\}$. Since the reverse inclusion is trivial we conclude that $X^*$ is equal to the norm closure of $\{\xi\phi_z: z\in X, \xi\in\mathbb{R}\}$.
Then from
$$\{\xi\phi_{f(z)}: z\in X, \xi\in\mathbb{R}\}\supseteq \{\xi\phi_{f(z)}\circ f: z\in X, \xi\in\mathbb{R}\}$$
and $\phi_{f(z)}\circ f=\phi_z$ for all $z\in X$ we conclude that $X^*$ is equal to the norm closure of $\{\xi\phi_{f(z)}: z\in X, \xi\in\mathbb{R}\}$. Then $\phi_u\perp \xi\phi_{f(z)}$ for all $z\in X$ and $\xi \in \mathbb{R}$ implies $\phi_u=0$. This shows that our assumption $u\ne 0$ is false and $f$ must be linear. This completes the proof.
\end{proof}
\begin{corollary}
Let $X$ and $Y$ be normed spaces, $X$ smooth and $f:X\to Y$ a mapping. If $f$ is surjective or $X=Y$ then the following assertions are equivalent:
\begin{itemize}
\item[(i)] $[f(x),f(y)]=[x,y]$, $x,y\in X$.
\item[(ii)] $f$ is a linear isometry.
\end{itemize}
\end{corollary}
\begin{proof}
That (i)$\mathcal{R}ightarrow$(ii) follows by Proposition \ref{isometry}. Let us prove (ii)$\mathcal{R}ightarrow$(i). Take arbitrary $u,v\in Y$ and find $x,y\in X$ such that $u=f(x)$ and $v=f(y)$. Then from
$$\frac{1}{t}(\|u+tv\|-\|u\|)=\frac{1}{t}(\|f(x)+tf(y)\|-\|f(x)\|)=\frac{1}{t}(\|x+ty\|-\|x\|)$$
it follows that $Y$ is also smooth and $[f(x),f(y)]=[x,y]$.
\end{proof}
\end{document} |
\begin{document}
\title[Semi-classical {\em vs.} quantum description of the ground state of three-level atoms\dots ]{Semi-classical {\em vs.} quantum description of the ground state of three-level atoms interacting with a one-mode electromagnetic field}
\author{S Cordero, O Casta\~nos, R L\'opez-Pe\~na and E~Nahmad-Achar}
\address{
Instituto de Ciencias Nucleares, Universidad Nacional Aut\'onoma de M\'exico, Apartado Postal 70-543, 04510 M\'exico DF, Mexico \\ }
\ead{\mailto{sergio.cordero@nucleares.unam.mx},\,\mailto{ocasta@nucleares.unam.mx},\\ \mailto{lopez@nucleares.unam.mx},\,\mailto{nahmad@nucleares.unam.mx}}
\date{\today}
\begin{abstract}
We consider $N_a$ three-level atoms (or systems) interacting with a one-mode electromagnetic field in the dipolar and rotating wave approximations. The order of the quantum phase transitions is determined explicitly for each of the configurations $\Xi$, $\Lambda$ and $V$, with and without detuning. The semi-classical and exact quantum calculations for both the expectation values of the total number of excitations $\cal{M}=\langle \bm{M} \rangle$ and photon number $n=\langle \bm{n} \rangle$ have an excellent correspondence as functions of the control parameters. We prove that the ground state of the collective regime obeys sub-Poissonian statistics for the ${\cal M}$ and $n$ distribution functions. Therefore, their corresponding fluctuations are not well described by the semiclassical approximation. We show that this can be corrected by projecting the variational state to a definite value of ${\cal M}$.
\end{abstract}
\pacs{42.50.Ct,42.50.Nn,73.43.Nq,03.65.Fd}
\maketitle
\section{Introduction}
Interaction of $N_a$ two-level atoms with a quantized electromagnetic field, using dipolar and rotating wave approximations, is described by the {\em Tavis-Cummings Model} \cite{tavis68,tavis69}, having an extensive use in quantum optics \cite{dodonov03}. Recently this model has been
physically realized using a QED cavity with Bose-Einstein
condensates \cite{baumann10,nagy10}. Particularly interesting has
been the investigation of the phase transitions of the
system in the thermodynamic limit \cite{hepp73,wang73}, and at zero
temperature \cite{buzek05,castanos09a,castanos09b}.
The system of three-level atoms interacting with a one mode radiation field together with a dipole-dipole interaction between the atoms has been studied to determine the atomic squeezing~\cite{civitarese1,civitarese2}. They consider $\Xi$ and $\Lambda$ configurations under initial conditions of the matter and field parts associated to $SU(2)$ and Heisenberg-Weyl coherent states, respectively. Spin variances for the $V$ and $\Lambda$ configurations of an ensemble of atoms interacting with two light fields, a coherent pump state and a squeezed vacuum as a probe, have been calculated by means of the Langevin equations derived from the Bloch equations~\cite{dantan}. By using a Holstein-Primakoff mapping, two stable states, normal and superradiant (the latter in two colors), have been identified in the thermodynamic limit for the $\Lambda$ configuration~\cite{brandes}.
More recently, we have analytically obtained the localization of the quantum phase transitions from the normal to the collective regimes for three-level atoms interacting with a one-mode field for the $\Xi$, $\Lambda$ , and $V$ configurations, in the rotating wave approximation (RWA). These transitions appear in the ground state energy surface ${\cal E}^c$ and the corresponding total number of excitations ${\cal M}^c$, when plotted as functions of their corresponding dipole coupling constants (control parameters), calculated using as test function the direct product of the Heisenberg-Weyl (field contribution) and Gelfand-Tsetlin (matter contribution) coherent states. We found that the agreement of these quantities with the corresponding exact quantum calculations (namely $E^q$ and $M^q$) is remarkable~\cite{clpcna13}.
In this paper we determine explicitly the order of the quantum phase transitions, and calculate the Mandel parameter of the ${\cal M}$ distribution function and of the photon number distribution function of the ground state of the system. We find that first- and second-order transitions appear for atoms in the $\Xi$ configuration, and only second-order transitions appear for atoms in the $V$ configuration. Atoms in the $\Lambda$ configuration, depending of the detuning parameter, mimic the behavior of the $\Xi$ or the $V$ configuration. We find that in the collective regime, i.e., where the ground state possesses ${\cal M} >0$, the state obeys sub-Poissonian statistics while in the normal regime it satisfies Poissonian statistics.
While both, the total number of excitations ${\cal M}$ and the expectation value of the number of photons $\langle \bm{n}\rangle$, are in agreement with their corresponding exact quantum calculation, we find that their fluctuations are not. This is because the semi-classical ground state has the contribution of an infinite number of photons in a Poissonian distribution. The above suggest a projection of the test function to a definite value of the total number of excitations. This we do by means of a discretization of ${\cal M}$, according to its expectation value with respect to the test function. We prove that this {\it projected state} provides the appropriate correction, where now ${\cal M}$, $\langle \bm{n}\rangle$, and their corresponding fluctuations are in excellent agreement with the exact quantum calculation.
The paper is organized as follows: Sec.~\ref{formalism} presents in general the problem for $N_a$ atoms of $N$-levels interacting with $L$-modes of a quantized electromagnetic field in the dipolar approximation. In Sec.~\ref{three.level.atoms} we restrict to the problem of three-level atoms interacting with a one-mode quantized electromagnetic field (QEMF) in the RWA approximation, and establish the corresponding constant of motion ${\cal M}$ (total number of excitations) for each atomic configuration. In \ref{GCS} the test function as a direct product of Heisenberg-Weyl (field contribution) and Gelfand-Tsetlin (matter contribution) coherent states is proposed for the semi-classical approximation. The corresponding semi-classical energy of the problem is calculated in \ref{energy_surface}. In \ref{order.trans} we provide an exact expression to evaluate the first order derivatives of the ground state energy surface (as a function of the control parameters), so that the first-order transitions for each atomic configuration can be calculated in analytical form. For every value of the total number of excitations, the corresponding Mandel parameter of the semi-classical ground state, providing the kind of statistics that it satisfies, is evaluated in \ref{fluc}. In \ref{numerical.results} we show the numerical results for both order transitions, the Mandel parameter and the photon expectation value, for all different atomic configurations. Sec.~\ref{QNS} presents the exact quantum calculations and compares them with the semi-classical ones. In Sec. \ref{quantum.proj} the calculations obtained by using the projected variational state with the corresponding exact quantum results are compared. Finally, we give in Section \ref{concluding} some concluding remarks.
\section{$N$-level atoms interacting with an $L$-mode QEMF}\label{formalism}
We consider, in the dipolar approximation, the Hamiltonian of $N_a$ identical atoms of $N$-levels interacting with $L$-modes of a quantized electromagnetic field. Let $\bm{A}_{ij}^{(k)}$ denote the atomic operator of the $k$-th atom. For each atom, these operators obey a unitary algebra ${\rm u}_k(N)$ in $N$ dimensions, i.e.,
\begin{eqnarray}
\sum_{i=1}^{N} \bm{A}_{ii}^{(k)} = 1,\\
\left[\bm{A}_{ij}^{(k)},\bm{A}_{lm}^{(k')}\right] = \delta_{kk'}\left(\delta_{jl}\bm{A}_{im}^{(k)} - \delta_{im}\bm{A}_{lj}^{(k)}\right).
\end{eqnarray}
Defining
\begin{eqnarray}
\bm{A}_{ij} &\equiv& \sum_{k=1}^{N_a} \bm{A}_{ij}^{(k)},\label{op.Aij}
\end{eqnarray}
one can see that the following relationships are fulfilled
\begin{eqnarray}
\bm{n}_a= \sum_{i=1}^{N} \bm{A}_{ii} \label{eq.Na}\\
\left[\bm{A}_{ij},\bm{A}_{lm}\right] = \delta_{jl}\bm{A}_{im} - \delta_{im}\bm{A}_{lj}. \label{eq.AijAlm}
\end{eqnarray}
We have here defined the operator $\bm{n}_a$ representing the total number of atoms, with eigenvalue $N_a$, and Eq. (\ref{eq.AijAlm}) shows that the set of operators $\bm{A}_{ij}$ obey the commutation relations of a unitary algebra in $N$ dimensions, ${\rm u}(N)= \oplus^{N_a}_{k=1} \, u_k(N)$.
Now, for $L$-modes of a quantized field and $N_a$ atoms, the free Hamiltonian may be written as ($\hbar = 1$)
\begin{eqnarray}
\bm{H}_0 = \sum_{\ell = 1}^L \Omega_\ell \bm{a}_\ell^\dag \bm{a}_\ell + \sum_{i=i}^N\omega_i\bm{A}_{ii},
\end{eqnarray}
where $\Omega_\ell$ and $\omega_i$ correspond, respectively, to the frequencies of the $\ell$-th field mode and $i$-th atomic level (we choose $\omega_1\leq\omega_2\leq\cdots\leq\omega_N$). Here $\bm{a}_\ell^\dag,\ \bm{a}_\ell$ are the usual creation and annihilation operators of the field obeying the boson algebra, i.e.,
\begin{eqnarray}
\left[\bm{a}_i,\bm{a}_j^\dag \right] = \delta_{ij},
\end{eqnarray}
and $\bm{A}_{ij}$ are the atomic operators of Eq. (\ref{op.Aij}).
The interaction Hamiltonian due to the dipole operator $\vec{\bm{d}}$ of the atoms with the electromagnetic field $\vec{\bm{E}}$, reads as \cite{haroche}
\begin{eqnarray}\label{eq.DE}
\bm{H}_{int} = -\vec{\bm{d}}\cdot\vec{\bm{E}}.
\end{eqnarray}
$\vec{\bm{d}}$ may be written as
\begin{eqnarray}\label{eq.D}
\vec{\bm{d}} = \sum_{i\neq j} \vec{d}_{ij} \bm{A}_{ij},
\end{eqnarray}
where $\vec{d}_{ij}$ represent the matrix elements of the vector dipole operator between the levels $j$ and $i$. Notice that $\vec{\bm{d}}$ has no diagonal contributions, because the dipolar interaction of a level with itself is zero. The corresponding quantized field may be written as
\begin{eqnarray}\label{eq.E}
\vec{\bm{E}} = \sum_{\ell=1}^L \left[\vec{{\cal E}}_\ell(\vec{r})\bm{a}_\ell + \vec{{\cal E}}^*_\ell(\vec{r})\bm{a}_\ell^\dag\right],
\end{eqnarray}
where $\vec{{\cal E}}_\ell(\vec{r})$ obeys the Helmholtz equation for the $\ell$-th field mode, providing the structure of the field into the cavity. Substituting Eqs. (\ref{eq.D}) and (\ref{eq.E}) into Eq. (\ref{eq.DE}), and reordering the different contributions, one may write the interaction Hamiltonian as
\begin{eqnarray}\label{eq.Hint.1.full}
\bm{H}_{int} &=& -\sum_{s=1}^{N-1}\sum_{\ell=1}^L \left(\bm{a}_\ell^\dag {\vec{g}_{s\ell}}\cdot\vec{\bm{\sigma}}_{s-} + \bm{a}_\ell \vec{\bm{\sigma}}_{s+}\cdot {\vec{g}_{s\ell}}^{\, *\textrm{\tiny T}} \right) \nonumber \\
&-& \sum_{s=1}^{N-1}\sum_{\ell=1}^L \left(\bm{a}_\ell {\vec{g}_{s\ell}}^{\, *}\cdot\vec{\bm{\sigma}}_{s-} + \bm{a}_\ell^\dag \vec{\bm{\sigma}}_{s+}\cdot {\vec{g}_{s\ell}}^{\, \textrm{\tiny T}} \right),\qquad
\end{eqnarray}
where were defined the vector operators
\begin{eqnarray}
\vec{\bm{\sigma}}_{s+} = \left(\bm{A}_{1+s,1},\dots,\bm{A}_{j+s,j},\dots, \bm{A}_{(N-s)+s,N-s}\right)\quad
\label{sigmaop}
\end{eqnarray}
containing the set of operators $\bm{A}_{ij}$ with transitions from the $j$-th level of the atom to the $(j+s)$-th level. Also, $\vec{\bm{\sigma}}_{s-} = \vec{\bm{\sigma}}_{s+}^\dag$, and
\begin{eqnarray}
\vec{g}_{s\ell} = \frac{1}{\sqrt{N_a}}\left(\mu_{1,1+s}^{(\ell)}, \dots, \mu_{j,j+s}^{(\ell)}, \dots, \mu_{N-s,(N-s)+s}^{(\ell)}\right)\qquad \label{eq.gsk}
\end{eqnarray}
with $\mu_{ij}^{(\ell)}/\sqrt{N_a} = \vec{d}_{ij}\cdot \vec{{\cal E}}_\ell^*$, the coupling parameter between levels $i$ and $j$, and where we have taken $\vec{d}_{ji}=\vec{d}_{ij}$. Here, we have eliminated the dependence on $\vec{r}$ of $\vec{{\cal E}}_k^*$ by supposing that the $N_a$ atoms are stationary at the center of the cavity, and that the field is a smooth function in that region.
The second term in the rhs of equation (\ref{eq.Hint.1.full}) corresponds to the counter-rotating term, and when RWA approximation is considered this term is neglected. So the interaction term in the RWA approximation is given by
\begin{eqnarray}\label{eq.Hint.1}
\bm{H}_{int} = -\sum_{s=1}^{N-1}\sum_{\ell=1}^L \left(\bm{a}_\ell^\dag {\vec{g}_{s\ell}}\cdot\vec{\bm{\sigma}}_{s-} + \bm{a}_\ell \vec{\bm{\sigma}}_{s+}\cdot {\vec{g}_{s\ell}}^{\, *\textrm{\tiny T}} \right). \quad
\end{eqnarray}
Finally, the full Hamiltonian in RWA reads as
\begin{eqnarray}\label{eq.H.full}
\bm{H} &=& \sum_{\ell = 1}^L \Omega_\ell \bm{a}_\ell^\dag \bm{a}_\ell + \sum_{j=1}^N\omega_j\bm{A}_{jj} \nonumber \\
&-&
\sum_{s=1}^{N-1}\sum_{\ell=1}^L \left(\bm{a}_\ell^\dag {\vec{g}_{s\ell}}\cdot\vec{\bm{\sigma}}_{s-} + \bm{a}_\ell \vec{\bm{\sigma}}_{s+}\cdot {\vec{g}_{s\ell}}^{\, *\textrm{\tiny T}} \right).
\end{eqnarray}
The Hamiltonian above shows the underlying structure of the unitary group in $N$ dimensions, $U(N)$, which makes natural the use of the Gelfand-Tsetlin states~\cite{gelfand}. This allows for the description, in general, of systems with any kind of symmetry, including distinguishable particles.
\section{Three-level atoms interacting with a one-mode QEMF}\label{three.level.atoms}
In what follows we consider $N_a$ three-level atoms interacting with a one-mode QEM field, i.e., we choose $N=3$ and $L=1$ in Eq. (\ref{eq.H.full}). Replacing the corresponding values of $\vec{\bm{\sigma}}_{s\pm}$ and $\vec{g}_{s\ell}$ into Eq. (\ref{eq.H.full}) one finds the Hamiltonian of the system as
\begin{eqnarray}\label{eq.H.3level}
\bm{H} &=& \Omega \bm{a}^\dag\bm{a} + \omega_1\bm{A}_{11} + \omega_2\bm{A}_{22} + \omega_3\bm{A}_{33}
- \frac{1}{\sqrt{N_a}}\mu_{12}\left(\bm{a}\bm{A}_{21} + \bm{a}^\dag\bm{A}_{12} \right)\nonumber \\
&-&\frac{1}{\sqrt{N_a}} \mu_{13}\left(\bm{a}\bm{A}_{31} + \bm{a}^\dag\bm{A}_{13}\right)
-\frac{1}{\sqrt{N_a}} \mu_{23}\left(\bm{a}\bm{A}_{32} + \bm{a}^\dag\bm{A}_{23}\right) \, .
\end{eqnarray}
where the subscript on the field operators is no longer necessary, and without loss of generality we assume that the coupling constants obey $\mu_{ij} = \mu_{ij}^* = \mu_{ji}$. The only operator of the form $\bm{C} = \lambda\bm{a}^\dag\bm{a} + \lambda_1\bm{A}_{11} + \lambda_2\bm{A}_{22} + \lambda_3\bm{A}_{33}$ that commutes with the Hamiltonian Eq. (\ref{eq.H.3level}) is given by Eq. (\ref{eq.Na}), i.e., the total number of atoms is conserved. However, if one allows one coupling term $\mu_{ij}$ to be zero, it is possible to find another operator that commutes with the Hamiltonian Eq. (\ref{eq.H.3level}). This operator, for each atomic configuration, is given by
\begin{eqnarray}
\bm{M}_{\Xi} = \bm{a}^\dag\bm{a} + \bm{A}_{22} + 2\bm{A}_{33} \quad (\mu_{13} = 0)\, , \label{eq.M.Xi} \\
\bm{M}_\Lambda = \bm{a}^\dag\bm{a} + \bm{A}_{33} \quad (\mu_{12} = 0)\, , \label{eq.M.Lambda}\\
\bm{M}_V = \bm{a}^\dag\bm{a} + \bm{A}_{22} + \bm{A}_{33} \quad (\mu_{23} = 0) \, , \label{eq.M.V}
\end{eqnarray}
which may be written in general as
\begin{equation}
\label{generalM}
\bm{M} = \bm{a}^\dag\bm{a} + \lambda_2 \, \bm{A}_{22} + \lambda_3 \, \bm{A}_{33}
\end{equation}
with $\lambda_i$ as in Table~\ref{t1}.
\begin{table}
\caption{Values of $\lambda_{i}$, $i=2,\,3$, which determine the constant of motion $\bm{M}$.}
\begin{center}
\begin{tabular}{|c|cc|}
\hline
Configuration&$\lambda_{2}$&$\lambda_{3}$\\
\hline & &\\[-3mm]
$\Xi$&1&2\\
$\Lambda$&0&1\\
$V$&1&1\\
\hline
\end{tabular}
\end{center}
\label{t1}
\end{table}
The $\bm{M}$ operator corresponds to the total number of excitations for the different atomic configurations $\Xi,\ \Lambda$ and ${\rm V}$ \cite{yoo85}. The condition $\mu_{ij} = 0$ implies that transitions between levels $i$ and level $j$ are forbidden; a visual inspection of the different configurations (cf. Fig. \ref{fig0}) immediately suggests the expressions (\ref{eq.M.Xi}-\ref{eq.M.V}).
\begin{figure}
\caption{Atomic configurations and dipolar coupling parameters.}
\label{fig0}
\end{figure}
\subsection{Semi-classical variational states}\label{GCS}
In the Hamiltonian that we have given above, for the description of three-level atoms interacting with an electromagnetic field, naturally appear matter operators that generate the unitary algebra in three dimensions, ${\rm U}(3)$. This lends itself to be described by the Gelfand-Tsetlin states~\cite{gelfand} which carry the irreducible representations of ${\rm U}(3)$ and are in general denoted by
\begin{eqnarray}\label{eq.G.state}
\left|\begin{array}{c c c c c} h_1 & & h_2 & & h_3 \\ & q_1 && q_2 & \\ && r && \end{array} \right\rangle = \vert h_1 \, h_2 \, h_3 \, q_1 \, q_2 \, r \rangle \, ,
\end{eqnarray}
where the labels satisfy the inequalities $q_1\geq r \geq q_2$ and $h_i\geq q_i\geq h_{i+1}$, with $i=1,2$. The nine generators of $U(3)$ can be classified into weight, raising, and lowering operators. The weight generators $\bm{A}_{ii}$ satisfy the eigenvalue equations
\begin{eqnarray*}
\bm{A}_{11} \vert h_1 \, h_2 \, h_3 \, q_1 \, q_2 \, r \rangle = r \, \vert h_1 \, h_2 \, h_3 \, q_1 \, q_2 \, r \rangle \, , \\
\bm{A}_{22} \vert h_1 \, h_2 \, h_3 \, q_1 \, q_2 \, r \rangle = (q_1 +q_2 -r ) \, \vert h_1 \, h_2 \, h_3 \, q_1 \, q_2 \, r \rangle \, , \\
\bm{A}_{33} \vert h_1 \, h_2 \, h_3 \, q_1 \, q_2 \, r \rangle = (N_a - q_1 -q_2) \, \vert h_1 \, h_2 \, h_3 \, q_1 \, q_2 \, r \rangle \, ,
\end{eqnarray*}
with $N_a=h_1+h_2 +h_3$.
For the values $q_1=h_1$, $q_2=h_2$, and $r=h_1$, one has the highest weight state, for which
\begin{equation}
\bm{A}_{ij} \vert h_1 \, h_2 \, h_3 \, h_1 \, h_2 \, h_1 \rangle = 0 \, , \quad \hbox{for} \ i < j \, ,
\end{equation}
where $\bm{A}_{i j}$ are the raising weight generators. For this state, the eigenvalues of the weight generators determine the irreducible representation of $U(3)$, i.e., $[h_1,h_2,h_3]$. Physically this means that we have $h_i$ atoms in the level $\omega_i$.
In this work we may consider as a variational test function the direct product of a Heisenberg-Weyl coherent state (field contribution) with a $U(3)$ coherent state (matter contribution), because these generate a basis of the Hilbert space and let us obtain analytic expressions for the expectation values of matter and field observables, as done for two-level systems in~\cite{castanos09a}.
For the one-mode field we use the states $\vert\alpha\rangle$, which satisfy $\bm{a}|\alpha\rangle=\alpha|\alpha\rangle$, while for the matter we follow the procedure established by Perelomov~\cite{perelomov}. The unnormalized ${\rm U}(3)$ coherent states can be defined as
\begin{equation}\label{eq.G}
|h_1h_2h_3,\vec{\gamma} \, \}= \bm{O}(\vec{\gamma})\vert h_1 \, h_2 \, h_3 \, h_1 \, h_2 \, h_1 \rangle \, ,
\end{equation}
with $\vec{\gamma} = (\gamma_1,\gamma_2,\gamma_3)$, and where we have introduced the product of exponentials of lowering weight generators
\begin{eqnarray}
\bm{O}(\vec{\gamma}) &\equiv&{\rm e}^{\gamma_3\bm{A}_{21}}{\rm e}^{\gamma_2\bm{A}_{31}}{\rm e}^{\gamma_1\bm{A}_{32}} \, .
\end{eqnarray}
Therefore, the variational test function is given by
\begin{equation*}
\vert h_1,h_2,h_3; \alpha\,\vec{\gamma}\rangle \equiv |\alpha\rangle\otimes|h_1,h_2,h_3;\vec{\gamma}\rangle\ .
\end{equation*}
For the evaluation of the energy surface of the system, i.e., the expectation value of the Hamiltonian (\ref{eq.H.3level}) with respect to the tensorial product $|\alpha\rangle\otimes|h_1,h_2,h_3;\vec{\gamma}\rangle$, we proceed as follows:
\begin{enumerate}
\item[i)] Determine the coherent state representations of the generators $\bm{A}_{ij}$, $\bm{a}$, and $\bm{a}^\dagger$.
\item[ii)] Evaluate the kernel of the Heisenberg-Weyl and ${\rm U}(3)$ coherent states,
\begin{equation*}
\{ \alpha \, \vert \, \alpha^\prime \} \ \hbox{and} \ \{ h_1h_2h_3,\vec{\gamma} \, \vert h_1h_2h_3,\vec{\gamma}^{\, \prime} \, \} \
\end{equation*}
\item[iii)] Apply the representation form of each operator of the Hamiltonian to the corresponding kernel evaluated at $\alpha^\prime=\alpha$ and $\vec{\gamma}^{\, \prime}=\vec{\gamma}$.
\end{enumerate}
For the Heisenberg-Weyl case, it is well known that
\begin{equation*}
\bm{a} \to \frac{\partial \phantom{\alpha^*} }{\partial \alpha^*} \, , \quad \bm{a}^\dagger \to \alpha^* \, , \quad
\{ \alpha \, \vert \, \alpha^\prime \} = \exp{ (\alpha^* \, \alpha^\prime) } \, .
\end{equation*}
Now, for the ${\rm U}(3)$ case, the first step is to determine the coherent state representation of the generators
\begin{eqnarray}\label{eq.bAijk}
\{ h_1h_2h_3;\vec{\gamma}|\bm{A}_{ij}| \psi\rangle,
\end{eqnarray}
where $|\psi\rangle$ is an arbitrary state of the matter. Substituting the definition of the coherent state (\ref{eq.G}), one
has
\begin{eqnarray}\label{eq.bGijk}
\langle h_1 \ h_2 \, h_3 \, h_1 \, h_2 \, h_1 \vert \bm{G}_{ij}\bm{O}^\dag(\vec{\gamma})|\psi\rangle,
\end{eqnarray}
where we define $\bm{G}_{ij} = \bm{O}^\dag(\vec{\gamma})\bm{A}_{ij}{\bm{O}^\dag}^{-1}(\vec{\gamma})$.
Using the expansion of ${\rm e}^{\bm{A}} \bm{B} {\rm e}^{-\bm{A}}$, it is straightforward that $\bm{G}_{ij} $ takes the form
\begin{eqnarray}\label{eq.Gij}
\bm{G}_{ij} &=& \bm{A}_{ij} + \delta_{3i}\gamma_1^* \bm{A}_{2j} + \left[\delta_{1j}\left(\gamma_1^*\gamma_3^*- \gamma_2^*\right)-\gamma_1^*\delta_{2j}\right] \bm{A}_{i3} \nonumber \\
&+&\delta_{i3}\left[\delta_{1j}\left({\gamma_1^*}^2\gamma_3^*- \gamma_1^*\gamma_2^*\right) - \delta_{2j}{\gamma_1^*}^2 \right]\bm{A}_{23} \nonumber \\
&+& \left(\delta_{i3}\gamma_2^*+\delta_{2i}\gamma_3^*\right) \bm{A}_{1j} -\delta_{1j}\gamma_3^* \bm{A}_{i2} - \delta_{i3}\delta_{1j}\gamma_1^*\gamma_3^* \bm{A}_{22} \nonumber \\
&+& \left\{\delta_{i3}\left[\delta_{1j}\left(\gamma_1^*\gamma_2^*\gamma_3^*- {\gamma_2^*}^2\right)-\delta_{2j}\gamma_1^*\gamma_2^*\right] \right. \nonumber \\
&+& \left.\delta_{i2}\left[\delta_{1j}\left(\gamma_1^*{\gamma_3^*}^2- \gamma_2^*\gamma_3^*\right)-\delta_{2j}\gamma_1^*\gamma_3^*\right]\right\} \bm{A}_{13} \nonumber \\
&-& \delta_{1j}\left(\delta_{i3}\gamma_2^*\gamma_3^* + \delta_{i2}{\gamma_3^*}^2\right) \bm{A}_{12}.
\end{eqnarray}
To apply $\bm{G}_{ij}$ to the bra associated to the highest weight state, we have to take into account that the weight generators are diagonal, the lowering generators yield zero, and the raising generators in (\ref{eq.Gij}) must be replaced by
\begin{eqnarray*}
\bm{A}_{23} \to \frac{\partial}{\partial \gamma_1^*} \, , \quad \bm{A}_{13} \to \frac{\partial}{\partial \gamma_2^*} \, , \quad
\bm{A}_{12} \to \left(\frac{\partial}{\partial \gamma_3^*}+\gamma_1^*\frac{\partial}{\partial \gamma_2^*}\right) \, .
\end{eqnarray*}
This yields the Gelfand-Tsetlin coherent representation of the ${\rm U}(3)$ generators as
\begin{equation*}
\mathcal{A}_{ij} \{h\,\vec{\gamma}\vert\psi\rangle = \{h\,\vec{\gamma}\vert\bm{A}_{ij}\vert\psi\rangle \, .
\end{equation*}
As an example, we give the ${\rm U}(3)$ coherent state representation of the ${\rm U}(2)$ subalgebra $\{ \bm{A}_{11},\, \bm{A}_{12},\, \bm{A}_{21},\, \bm{A}_{22}\}$: One writes, using (\ref{eq.Gij}),
\begin{eqnarray*}
\bm{G}_{11} &=& \bm{A}_{11} - \gamma_3^\ast\bm{A}_{12} + \left(\gamma_1^\ast \gamma_3^\ast - \gamma_2^\ast\right) \bm{A}_{13} \\
\bm{G}_{12} &=& \bm{A}_{12} - \gamma_1^\ast\bm{A}_{13} \\
\bm{G}_{21} &=& \bm{A}_{21} - \gamma_3^\ast\bm{A}_{12} + \gamma_3^\ast \left(\gamma_1^\ast \gamma_3^\ast - \gamma_2^\ast\right) \bm{A}_{13} \\
& +& \left(\gamma_1^\ast \gamma_3^\ast - \gamma_2^\ast\right) \bm{A}_{23} + \gamma_3^\ast(op{A}_{11} - \bm{A}_{22}) \\
\bm{G}_{22} &=& \bm{A}_{22} + \gamma_3^\ast\bm{A}_{12} - \gamma_1^\ast\gamma_3^\ast\bm{A}_{13} - \gamma_1^\ast\bm{A}_{23}
\end{eqnarray*}
Then we make the replacements indicated above, to get
\begin{eqnarray*}
\mathcal{A}_{11} &\to& h_1 - \gamma_2^\ast \frac{\partial}{\partial\gamma_2^\ast} - \gamma_3^\ast \frac{\partial}{\partial\gamma_3^\ast} \\
\mathcal{A}_{12} &\to& \frac{\partial}{\partial\gamma_3^\ast} \\
\mathcal{A}_{21} &\to& \gamma_3^\ast \left( h_1 - h_3 + \gamma_1^\ast \frac{\partial}{\partial\gamma_1^\ast} - \gamma_2^\ast \frac{\partial}{\partial\gamma_2^\ast} - \gamma_3^\ast \frac{\partial}{\partial\gamma_3^\ast} \right) - \gamma_2^\ast \frac{\partial}{\partial\gamma_1^\ast} \\
\mathcal{A}_{22} &\to& h_2 - \gamma_1^\ast \frac{\partial}{\partial\gamma_1^\ast} + \gamma_3^\ast \frac{\partial}{\partial\gamma_3^\ast}
\end{eqnarray*}
It is straightforward to prove that the operators $\mathcal{A}_{ij}\ (i,j=1,2)$ satisfy the commutation relations of a ${\rm U}(2)$ algebra.
The ${\rm U}(3)$ matter kernel is given by
\begin{equation*}
\langle h_1 \, h_2 \, h_3 \, h_1 \, h_2 \, h_1 \vert \bm{O}^\dagger (\vec{\gamma}) \bm{O}(\vec{\gamma}^{\, \prime})\vert h_1 \, h_2 \, h_3 \, h_1 \, h_2 \, h_1 \rangle \, .
\end{equation*}
To evaluate the expression it is convenient to rewrite the product of operators as
\begin{eqnarray}\label{eq.OdO}
\bm{O}^\dag(\vec{\gamma}) \, \bm{O}(\vec{\gamma}')=\bm{O}(\vec{\beta}) \, {\rm e}^{\lambda_1\bm{A}_{11} + \lambda_2\bm{A}_{22} + \lambda_3 \, \bm{A}_{33}}\bm{O}^\dag(\vec{\beta}') \, , \quad
\end{eqnarray}
because the matrix element, with respect to the Gelfand-Tsetlin highest weight state, of the operators $\bm{O}(\vec{\beta})$ and $\bm{O}^\dag(\vec{\beta}' )$ yield a result equivalent to the identity operator and the remaining exponential is diagonal.
To interchange the exponential operators, we use a faithful realization of the generators as $\bm{A}_{ij} = |i\rangle\langle j|$. One then finds the values of the $\beta$'s, $\beta'$'s and $\lambda$'s as functions of $\gamma$'s and $\gamma'$'s, in such a manner that the expression (\ref{eq.OdO}) is satisfied. Following this procedure, one obtains the ${\rm U}(3)$ matter kernel
\begin{eqnarray}\label{eq.Kp}
&& \{ h_1h_2h_3,\vec{\gamma} \, \vert h_1h_2h_3,\vec{\gamma}^{\, \prime} \, \} = \left(1+\gamma_2^*\gamma_2' + \gamma_3^*\gamma_3'\right)^{h_1-h_2} \times \nonumber\\ &&
\Big(1 + \gamma_2^* (\gamma^\prime_2 -\gamma^\prime_1 \, \gamma^\prime_3) + \gamma^*_1 ( \gamma^\prime_1 - \gamma^\prime_2 \, \gamma^*_3 + \gamma^\prime_1 \, \gamma^*_3 \, \gamma^\prime_3 ) \Big)^{h_2-h_3} \,.
\end{eqnarray}
The general case of distinguishable particles could be interesting in quantum information theory, for example, for the description of systems of q-trits. In our case of study we restrict ourselves to the totally symmetric configuration, i.e., that of indistinguishable particles. For the symmetric basis the corresponding kernel of the matter contribution is obtained by taking $h_2=h_3=0$. From here on we simplify the notation by omitting the values of $h_2$ and $h_3$ in the Gelfand-Tsetlin states. Therefore the kernel of the tensorial product of coherent states is
\begin{eqnarray}\label{eq.K}
K(h_1;\alpha,\alpha',\vec{\gamma},\vec{\gamma}') = {\rm e}^{\alpha^*\alpha'}\left(1+\gamma_2^*\gamma_2' + \gamma_3^*\gamma_3'\right)^{h_1}.
\end{eqnarray}
\subsection{Energy surface}\label{energy_surface}
Applying the corresponding coherent state representation of the Hamiltonian (\ref{eq.H.3level}) on the kernel above, dividing by the scalar product of the coherent states, and replacing $\alpha'=\alpha$ and $\vec{\gamma}' = \vec{\gamma}$, the energy surface is
\begin{eqnarray}\label{eq.E.G}
{\cal E}^c &=& \Omega \rho^2 + h_1 \frac{\omega_1 +\omega_2\varrho_3^2 + \omega_3 \varrho_2^2}{1 + \varrho_2^2 + \varrho_3^2 }
- 2\sqrt{h_1}\mu_{12} \frac{\rho\varrho_3\cos(\vartheta_{3})}{1 + \varrho_2^2 + \varrho_3^2 }\nonumber \\ &-& 2\sqrt{h_1}\mu_{13} \frac{\rho\varrho_2\cos(\vartheta_{2})}{1 + \varrho_2^2 + \varrho_3^2 } - 2\sqrt{h_1}\mu_{23} \frac{\rho\varrho_2\varrho_3\cos(\vartheta_{1})}{1 + \varrho_2^2 + \varrho_3^2 },
\end{eqnarray}
where we have rewritten the parameters in their polar form, i.e., $\alpha = \rho {\rm e}^{i\phi},\ \gamma_j = \varrho_j {\rm e}^{i\varphi_j}$ and identified $\vartheta_3 = \phi-\varphi_3, \ \vartheta_2 = \phi-\varphi_2$ and $\vartheta_1 = \phi -\varphi_2+\varphi_3$.
Minimizing ${\cal E}^c$ respect to the phases $\vartheta_i$ one finds that the critical values are given by $\vartheta_{ic} = 0,\ \pi$. The minimum is obtained when $\mu_{ij}\cos\left(\vartheta_{kc}\right)>0$ for cyclic indices $i,\ j$ and $k$. Since these values are independent of $\rho$'s, one may replace this condition on Eq. (\ref{eq.E.G}), and hence the energy surface is rewritten as
\begin{eqnarray}\label{eq.E.G2}
{\cal E}^c &=& \Omega \rho^2 + h_1 \frac{\omega_1 +\omega_2\varrho_3^2 + \omega_3 \varrho_2^2}{1 + \varrho_2^2 + \varrho_3^2 } \nonumber \\
&-&
2\sqrt{h_1}\rho \frac{|\mu_{12}|\varrho_3 + |\mu_{13}|\varrho_2 + |\mu_{23}|\varrho_2\varrho_3}{1 + \varrho_2^2 + \varrho_3^2 }.
\end{eqnarray}
It is easy to see that the condition
\begin{eqnarray}
\frac{\partial}{\partial \rho}{\cal E}^c =0,
\end{eqnarray}
is satisfied when $\rho=\rho_c$ (critical value of the variable $\rho$) where $\rho_c$ is given by
\begin{eqnarray}\label{eq.rho.minG}
\rho_c = \frac{\sqrt{h_1}}{\Omega} \frac{|\mu_{12}|\varrho_{3c} + |\mu_{13}|\varrho_{2c} + |\mu_{23}|\varrho_{2c}\varrho_{3c}}{1 + \varrho_{2c}^2 + \varrho_{3c}^2} \, .
\end{eqnarray}
Here $\varrho_{2c}$ and $\varrho_{3c}$ stand for the critical values of $\varrho_{2}$ and $\varrho_{3}$, respectively.
It is worth stressing the fact that the energy surface given by Eq. (\ref{eq.E.G}) [or equivalently Eq. (\ref{eq.E.G2})] has no a dependence on $\gamma_1 = \varrho_1 {\rm e}^{i\varphi_1}$, because we are taking $h_2=h_3=0$ in the definition of the Gelfand-Tsetlin coherent state.
For the semi-classical calculation of the ground state energy, it is worth referring to the intensive quantity $E^c = {\cal E}^c/ h_1$ which describes the energy per particle:
\begin{eqnarray}\label{eq.E.G3}
E^c &=& \Omega \, r^2 + \frac{\omega_1 +\omega_2 \varrho_3^2 + \omega_3 \varrho_2^2}{1 + \varrho_2^2 + \varrho_3^2 } \nonumber \\
&-&
2r \frac{|\mu_{12}|\varrho_3 + |\mu_{13}|\varrho_2 + |\mu_{23}|\varrho_2\varrho_3}{1 + \varrho_2^2 + \varrho_3^2 },
\end{eqnarray}
where $r=\rho/\sqrt{h_1}$. In a similar way we define the total number of excitations per particle $M^c = {\cal M}^c/N_a$.
An approximation to the ground state energy of the system is obtained by substituting the minima critical points into the energy surface. From (\ref{eq.rho.minG}) and (\ref{eq.E.G3}) we obtain $E^{c}=E^{c}(\varrho_{2c},\,\varrho_{3c})$, whose minimum in general has no analytic solutions for arbitrary points in parameter space ($\mu_{ij}$) and a particular atomic configuration.
The critical points satisfy $\varrho_{2c},\,\varrho_{3c}\geq0$. To find numerically these critical points we proceed as follows, starting with the first quadrant in the $\varrho_{2c}-\varrho_{3c}$ plane:
\begin{itemize}
\item The area is divided into $N$ regions forming a lattice;
\item the energy surface is evaluated at the central point of each of these regions;
\item the region with minimum energy, together with its closest neighbors, is selected to build a new lattice;
\item this method is iterated until the desired precision is reached.
\end{itemize}
If the area of the first set is $S$, the method establishes the critical point with a precision of $3^{m-1} \sqrt{S/N^m}$, where $m$ is the number of iterations.
Recently~\cite{clpcna13} we found the minimum energy surface $E^c$ as a function of the control parameters $\mu_{ij}$. It changes value from $E^c=0$ to $E^c<0$, when a transition from $M^c=0$ (normal regime) to $M^c>0$ (collective regime) in the total number of excitations of the corresponding semi-classical approximation to the ground state of the system takes place. This leads to the existence of a separatrix in parameter space, for which we were able to propose the following ansatz:
For the $\Xi$ configuration,
\begin{eqnarray}\label{eq.trans.Xi}
\Omega\, \omega_{21} = \mu_{12}^2 + \left[|\mu_{23}|-\sqrt{\Omega \, \omega_{31}}\right]^2 \Theta\left[|\mu_{23}|-\sqrt{\Omega \, \omega_{31}}\right], \qquad
\end{eqnarray}
where the Bohr frequency $\omega_{ij}\equiv \omega_i-\omega_j$ is the energy shift between the atomic levels $i$ and $j$ and $\Theta\left[x\right]$ stands for the Heaviside theta function.
For the $\Lambda$ configuration,
\begin{eqnarray}\label{eq.trans.Lambda}
\Omega\,\omega_{31} =\mu_{13}^2 + \left[|\mu_{23}|-\sqrt{\Omega \, \omega_{21}}\right]^2 \Theta\left[|\mu_{23}|-\sqrt{\Omega \, \omega_{21}}\right]\, ,
\end{eqnarray}
For the $V$ configuration,
\begin{eqnarray}\label{eq.trans.V}
\frac{\mu_{12}^2}{\Omega\,\omega_{21}} + \frac{\mu_{13}^2}{\Omega\,\omega_{31}}= 1.
\end{eqnarray}
The separatrix of the different configurations correspond to the thermodynamic limit, that is when the number of atoms $N_a \to \infty$.
\subsection{Order of the transitions}\label{order.trans}
A phase transition is of order $j$, according to the Ehrenfest classification~\cite{gilmore93}, if $j$ is the lowest non-negative integer for which
\begin{equation*}
\lim_{\epsilon \to 0} \frac{\partial^j E^c}{\partial s^j} \Bigg |_{s=s_0+\epsilon} \neq \lim_{\epsilon \to 0} \frac{\partial^j E^c}{\partial s^j} \Bigg |_{s=s_0-\epsilon} \, ,
\end{equation*}
where $s$ represents here any of the control parameters $\mu_{ij}$.
In general we do not have analytical expressions for the critical points, so the order of the transitions must be obtained numerically. In the case of first-order transitions, however, we may use
\begin{eqnarray*}
dE^c = \ &&\left(\frac{\partial E^c}{\partial \rho}\right) d\rho + \left(\frac{\partial E^c}{\partial \varrho_2}\right) d\varrho_2 + \left(\frac{\partial E^c}{\partial \varrho_3}\right) d\varrho_3 + \sum_{i<j} \left(\frac{\partial E^c}{\partial \mu_{ij}}\right) d\mu_{ij}
\end{eqnarray*}
which evaluated at the critical points reduces to
\begin{equation*}
dE^c \Bigg|_{\rho_c,\varrho_{2c},\varrho_{3c}} = \ \sum_{i<j} \left(\frac{\partial E^c}{\partial \mu_{ij}}\right)_{\rho_c,\varrho_{2c},\varrho_{3c}} d\mu_{ij}
\end{equation*}
and this provides us with the following expressions:
\noindent For the $\Xi$ configuration
\begin{eqnarray}\label{eq.dE.X}
\frac{\partial}{\partial \mu_{12}} E^c_{\Xi} = - 2 \frac{r_c\ \varrho_{3c}}{1+\varrho_{2c}^2 + \varrho_{3c}^2}, \\
\frac{\partial}{\partial \mu_{23}} E^c_{\Xi} = - 2 \frac{r_c\ \varrho_{2c}\ \varrho_{3c}}{1+\varrho_{2c}^2 + \varrho_{3c}^2}\,;
\end{eqnarray}
for the $\Lambda$ configuration
\begin{eqnarray}\label{eq.dE.L}
\frac{\partial}{\partial \mu_{13}}E^c_{\Lambda} = - 2 \frac{r_c\ \varrho_{2c}}{1+\varrho_{2c}^2 + \varrho_{3c}^2}, \\
\frac{\partial}{\partial \mu_{23}} E^c_{\Lambda} = - 2 \frac{r_c\ \varrho_{2c}\ \varrho_{3c}}{1+\varrho_{2c}^2 + \varrho_{3c}^2}\,;
\end{eqnarray}
and for the ${\rm V}$ configuration
\begin{eqnarray}\label{eq.dE.V}
\frac{\partial}{\partial \mu_{12}} E^c_{V} = - 2 \frac{r_c\ \varrho_{3c}}{1+\varrho_{2c}^2 + \varrho_{3c}^2}, \\
\frac{\partial}{\partial \mu_{13}} E^c_{V} = - 2 \frac{r_c\ \varrho_{2c}}{1+\varrho_{2c}^2 + \varrho_{3c}^2}\,.
\end{eqnarray}
For the second-order transitions one has to infer them through numerical differentiation of the equations, or through derivatives of second order when analytical expressions are available.
\subsection{Statistics of semi-classical ground state}\label{fluc}
The statistics of the semi-classical ground state is given by the well-known $Q$-Mandel parameter \cite{mandel79}, defined for the field states as
\begin{eqnarray}
Q = \frac{(\Delta n)^2 - \langle \bm{n}\rangle}{\langle \bm{n}\rangle}.
\end{eqnarray}
The photon distribution obeys $(\Delta n)^2 = \langle {\bf n}\rangle$, and hence $Q=0$ for any value of the control parameters, i.e., the contribution of the photons in the semi-classical ground state obeys Poissonian statistics.
On the other hand, one may study the statistics of the ground state as a function of the total number of excitations ${\cal M}$, i.e., consider both field and matter contributions. So one may define, in a similar way, the $Q_M$-Mandel parameter as
\begin{eqnarray}\label{eq.QM.def}
Q_M = \frac{(\Delta M)^2 - \langle \bm{M}\rangle}{\langle \bm{M}\rangle}.
\end{eqnarray}
To evaluate the expression (\ref{eq.QM.def}) we use Eq.(\ref{generalM}) together with
\begin{eqnarray}
\bm{M}^2 &=& \bm{n}^2 + \lambda_2^2\bm{A}_{22}^2 + \lambda_3^2\bm{A}_{33}^2 \nonumber \\
&+& 2\bm{n}\left(\lambda_2\bm{A}_{22} + \lambda_3\bm{A}_{33}\right) + 2\lambda_2\lambda_3\bm{A}_{22}\bm{A}_{33} \, .
\end{eqnarray}
For the totally symmetric coherent variational test function one may establish the following relations between expectation values for matter and field observables:
\begin{eqnarray}\label{eq.relations.2}
\langle \bm{n}^2\rangle = \langle \bm{n}\rangle^2 + \langle \bm{n}\rangle,\\
\langle \bm{A}_{22}^2\rangle = \langle \bm{A}_{22}\rangle +\left(1-\frac{1}{N_a}\right) \langle \bm{A}_{22}\rangle^2,\\
\langle \bm{A}_{33}^2\rangle = \langle \bm{A}_{33}\rangle +\left(1-\frac{1}{N_a}\right) \langle \bm{A}_{33}\rangle^2,\\
\langle \bm{n}\bm{A}_{ii}\rangle = \langle \bm{n}\rangle \langle \bm{A}_{ii}\rangle, \\
\langle \bm{A}_{22}\bm{A}_{33}\rangle = \left(1-\frac{1}{N_a}\right)\langle \bm{A}_{22}\rangle \langle \bm{A}_{33}\rangle \,.
\end{eqnarray}
\begin{figure}
\caption{(Color online.) First derivative of the ground state energy with respect its control parameters, for atoms in $\Xi$ configuration under double resonance condition $\Delta_{21}
\label{f1}
\end{figure}
\begin{figure*}
\caption{(Color online.) (a) $Q_M$-Mandel parameter as a function of the control parameters, for atoms in $\Xi$ configuration in double resonance. The separatrix is shown by a white line, as well as three points (dots) where the corresponding $M$ distribution of the ground state for $N_a=40$ atom has been calculated (solid bars) and compared with its corresponding Poissonian distribution (dots). (b) ${\cal M}
\label{f2}
\end{figure*}
\begin{figure}
\caption{(Color online.) The average number of photons in units of the total number of atoms $r^2_c = \rho^2_c/N_a$ is shown, for atoms in $\Xi$ configuration in double resonance. Notice that for greater values of $\mu_{23}
\label{f3}
\end{figure}
\noindent Hence, the fluctuation of the total number of excitations for the variational state, defined by $(\Delta M^c)^2 = \langle \bm{M}^2\rangle - \langle \bm{M}\rangle^2$, is given by
\begin{eqnarray}\label{eq.fluc.MG}
(\Delta M^c)^2 = \langle \bm{M}\rangle + \lambda_3\left(\lambda_3-1\right)\langle \bm{A}_{33}\rangle -\frac{1}{N_a}\left[\lambda_2\langle \bm{A}_{22}\rangle + \lambda_3\langle \bm{A}_{33}\rangle\right]^2,
\end{eqnarray}
where we have used the fact that $\lambda_2^2=\lambda_2$ and $\lambda_3^2 = \lambda_3$ or $2\lambda_3$ to identify the appropriate value of $\langle \bm{M}\rangle$. Then the $Q_M$-Mandel parameter for this state reads
\begin{eqnarray}\label{eq.QM}
Q_M{}^c = \frac{1}{\langle \bm{M}\rangle} \bigg[\lambda_3\left(\lambda_3-1\right)\langle \bm{A}_{33}\rangle -\frac{1}{N_a}\left[\lambda_2\langle \bm{A}_{22}\rangle + \lambda_3\langle \bm{A}_{33}\rangle\right]^2\bigg].
\end{eqnarray}
Note that the $Q_M$-Mandel parameter does not depend on the total number of atoms $N_a$, since both quantities $\langle \bm{M}\rangle$ and $\langle \bm{A}_{ii}\rangle$ are proportional to $N_a$
Since $\lambda_3=1$ for the $\Lambda$ and ${\rm V}$ configurations, one finds from Eq. (\ref{eq.QM}) that in these cases $Q_M\leq 0$, and then the corresponding coherent state obeys only Poissonian ($Q_M=0$) and sub-Poissonian ($Q_M<0$) statistics. For the $\Xi$ configuration however $\lambda_3=2$ and hence the sign of $Q_M$ may be determined only via evaluation of the corresponding critical points.
\subsection{Numerical results}\label{numerical.results}
As pointed out in \cite{clpcna13}, the minimization of the semi-classical energy $E^c$ provides analytic expressions for the phases and $r_c=\rho_c/\sqrt{N_a}$. There is not, in general, an analytic solution available for the minimum value of the energy surface with respect to the other two independent variables $\varrho_2$ and $\varrho_3$. This suggests the use of a numerical method to evaluate the critical points $\varrho_{2c},\ \varrho_{3c}$, as functions of the control parameters $\mu_{ij}$.
To describe the levels of the atom we can use the detuning, defined by
\begin{eqnarray}\label{eq.detuning}
\Delta_{ij} = \omega_{ij} - \Omega, \quad \omega_{ij} = \omega_i-\omega_j\,.
\end{eqnarray}
Without loss of generality, we chose $\Omega=1$ and $\omega_1 = 0$. So both the control parameters, atomic levels and the detuning are measured in units of the field frequency.
\subsubsection{$\Xi$ configuration}\label{NR.Xi}
The $\Xi$ configuration forbids the transition $\omega_1\longleftrightarrow\omega_3$, and this is introduced in the Hamiltonian by taking $\mu_{13}=\nobreak0$. Then $\Delta_{21}$ and $\Delta_{32}$ are related to the energy levels by
\begin{eqnarray}
\omega_2 = \Delta_{21} + \omega_1 + \Omega\,,\\
\omega_3 = \Delta_{32} + \Delta_{21} + \omega_1 + 2\,\Omega\,.
\end{eqnarray}
Also, in the $\Xi$ configuration the condition $\omega_2\approx \omega_3/2$ is fulfilled, and the detuning should satisfy $\Delta_{21}\approx \Delta_{32}$ with $|\Delta_{ij}|< 1$ to be consistent with the RWA approximation.
Figure \ref{f1} shows the first derivatives of the energy surface for the ground state in double resonance, i.e., when $\Delta_{21}=\Delta_{32}=0$. The corresponding separatrix Eq. (\ref{eq.trans.Xi}) is shown by a white line. One can observe that the derivative is continuous in the region $\mu_{23}\leq \sqrt{\Omega \, \omega_{31}}$, where the separatrix is given by $\mu_{12}= \sqrt{\Omega \, \omega_{21}}$; here a second-order transition occurs. For $\mu_{23}> \sqrt{\Omega \, \omega_{31}}$, the separatrix is given by $\left(|\mu_{23}|- \sqrt{\Omega \, \omega_{31}}\right)^2+{\mu_{12}}^2 = \Omega \, \omega_{21}$, the derivative is discontinuous and first-order transitions take place.
In Fig. \ref{f2}(a) the $Q_M$-Mandel parameter Eq. (\ref{eq.QM}) for this configuration is shown. One can observe that for
\begin{eqnarray}\label{eq.Xi.Poisson}
\Omega\,\omega_{21} &\geq& \mu_{12}^2 + \left(|\mu_{23}|-\sqrt{\Omega\,\omega_{31}}\right)^2 \Theta\left[|\mu_{23}|-\sqrt{\Omega\,\omega_{31}}\right]
\end{eqnarray}
we have $Q_M=0$, i.e., for this region in parameter space the semi-classical ground state has Poissonian statistics. On the other hand, when Eq. (\ref{eq.Xi.Poisson}) is not satisfied one finds $Q_M<0$, providing sub-Poissonian statistics.
Figures \ref{f2}(b), (c) and (d), respectively, show the distribution of ${\cal M}$ in the semi-classical ground state for $N_a=40$ atoms, using $(\mu_{12} = 1.01,\ \mu_{23}= 0.5)$,\ $(\mu_{12} = 0.05,\ \mu_{23}= 2.45)$ and $(\mu_{12} = 1.5,\ \mu_{23}=2.5)$ (solid bars) in comparison with the corresponding Poissonian distribution (dots). Notice that the first two points are very close to the separatrix, as shown in the figure \ref{f2}(a) [dotted data], but their corresponding $Q_M$-Mandel parameters are different. One may observe that in Fig. \ref{f2}(b) both distributions are practically indistinguishable, since the average value of the total number of excitations is $M^c_\Xi \approx 2.3 \times 10^{-2}$ (with the corresponding $Q_M$-Mandel parameter $Q_M \approx -4.7\times10^{-3}$), i.e., the contribution of the state with ${\cal M}=0$ dominates in the ground state, while, in the other cases [Figs. \ref{f2}(c), (d)] the average values are $M^c_\Xi \approx 2.87 \hbox{ and } 3$ (with $Q_M\approx-0.41 \hbox{ and } -0.17$, respectively), where the contribution of the state with ${\cal M}=0$ is negligible.
Fig. \ref{f3} shows the average number of photons in units of the total number of atoms, $r_c^2 = \rho_c^2/N_a = \langle \bm{n}\rangle/N_a$. Since the field is a coherent state, the fluctuation of the number of photons satisfies $(\Delta n)^2=\langle \bm{n}\rangle$.
\subsubsection{$\Lambda$ configuration}\label{NR.Lambda}
\begin{figure}
\caption{(Color online.) First derivative of the ground state energy with respect to its control parameters, for atoms in the $\Lambda$ configuration with a non-resonant condition $\Delta_{31}
\label{f4}
\end{figure}
\begin{figure*}
\caption{(Color online.) (a) $Q_M$-Mandel parameter as a function of the control parameters, for atoms in the $\Lambda$ configuration in a non-resonant condition $\Delta_{31}
\label{f5}
\end{figure*}
\begin{figure}
\caption{(Color online.) Average value of the photon distribution in units of the total number of atoms $r^2_c = \rho^2_c/N_a$ [Cf. Eq. (\ref{eq.rho.minG}
\label{f6}
\end{figure}
For atoms in the $\Lambda$ configuration it is required that the transitions from $\omega_1\longleftrightarrow\omega_2$ be negligible, and so we take $\mu_{12}=0$. The detuning for the corresponding values of the frequencies $\omega_2$ and $\omega_3$ are
\begin{eqnarray}
\omega_2= \Delta_{31} - \Delta_{32} + \omega_1, \\
\omega_3 = \Delta_{31} + \omega_1 + \Omega.
\end{eqnarray}
Because of the convention $\omega_1\leq\omega_2\leq\omega_3$ used in the labeling of the energy levels, the condition $\omega_1\approx\omega_2$ requires $\Delta_{31}-\Delta_{32}\approx 0$ with $\Delta_{31}\geq\Delta_{32}$.
First we consider the case of equal detuning, i.e., $\Delta_{31}=\Delta_{32}$. In this case, the critical points may be calculated analytically as functions of the control parameters. These are given by $\varrho_{2c}=\varrho_{3c} = 0$ in the normal regime, with ${\mu_{13}}^2+{\mu_{23}}^2\leq\Omega\,\omega_3$; while in the collective regime we have
\begin{eqnarray}\label{eq.L.criticos}
\varrho_{2c} = \frac{1}{\mu_{13}}\sqrt{\frac{\left(\mu_{13}^2+\mu_{23}^2\right)
\left(\mu_{13}^2+\mu_{23}^2-\Omega \, \omega_3\right)}{ \mu_{13}^2+\mu_{23}^2+\Omega \, \omega_3}}, \quad \\
\varrho_{3c} = \frac{\mu_{23}}{\mu_{13}},
\end{eqnarray}
where states with ${\cal M}>0$ contribute to the ground state.
Substituting the critical points in the expression for the energy one finds that the minimum energy surface is given by $E_\Lambda^c=0$ for $\mu_{13}^2+\mu_{23}^2\leq\Omega \, \omega_3$ and
\begin{eqnarray}\label{eq.L.minE}
E^c_{\Lambda} = -\frac{1}{4\Omega}\frac{\left(\mu_{13}^2+\mu_{23}^2-\Omega \, \omega_3\right)^2}{ \mu_{13}^2+\mu_{23}^2},
\end{eqnarray}
in the collective region. Taking the first derivatives of the minimum energy surface and evaluating at the separatrix, one finds that only second-order transitions occur.
In the collective regime the $Q_M$-Mandel parameter reads
\begin{eqnarray}\label{eq.QM.L}
Q_M = - \frac{\Omega^2 \left(\mu_{13}^2 + \mu_{23}^2 - \Omega \, \omega_3\right)}{\left(\mu_{13}^2 + \mu_{23}^2 \right) \left(\mu_{13}^2 + \mu_{23}^2 + \Omega \, \omega_3\right)}.
\end{eqnarray}
One can show that, independently of the detuning values, $Q_M=0$ in the normal regime (${\cal M}=0$), yielding Poissonian statistics, while in the collective regime (${\cal M}>0$) we have sub-Poissonian statistics, $Q_M<0$. Also we notice that $Q_M\to 0 $ when the control parameters go to infinity.
We now consider atoms in the $\Lambda$ configuration with $\Delta_{31}\neq\Delta_{32}$, we chose $\Delta_{31} = 0.3$ and $\Delta_{32} =-0.2$. In this case the problem does not have an analytic solution and one needs to consider numerical solutions as for the $\Xi$ configuration.
Figure \ref{f4} shows the first derivatives of the semi-classical energy surface for the ground state.
These present discontinuities along the separatrix where $|\mu_{23}|>\sqrt{\Omega \, \omega_{21}}$ indicating first-order transitions. In the region where $|\mu_{23}|<\sqrt{\Omega \, \omega_{21}}$ with $\mu_{13} = \sqrt{\Omega \, \omega_{31}}$ the derivatives are continuous, and second-order transitions occur. The corresponding $Q_M$-Mandel parameter and the $M$-distribution of the coherent state for three values with $N_a=40$ atoms is shown in Fig.~\ref{f5}. Here we compare the sub-Poissonian distribution of the state with its corresponding Poissonian distribution (dots). Finally the photon number distribution is shown in Fig.~\ref{f6}. One should compare the behavior of these quantities Figs. \ref{f4}, \ref{f5} and \ref{f6} with the corresponding ones for the $\Xi$ configuration, Figs. \ref{f1}, \ref{f2} and \ref{f3}, respectively. Notice that the behavior is very similar, i.e., for atoms in $\Lambda$ configuration with unequal detuning, the physical quantities and properties (order of the transitions) resemble those of the atoms in $\Xi$ configuration: they are both qualitatively equivalent.
\subsubsection{${\rm V}$ configuration}\label{NR.V}
\begin{figure}
\caption{(Color online.) First derivatives of the ground state energy with respect to its control parameters, for atoms in the $V$ configuration with a $\Delta_{21}
\label{f7}
\end{figure}
\begin{figure*}
\caption{(Color online.) (a) $Q_M$-Mandel parameter as a function of the control parameters, for atoms in the $V$ configuration in the non-resonant case $\Delta_{21}
\label{f8}
\end{figure*}
\begin{figure}
\caption{(Color online.) Average value of the photon number distribution in units of the total number of atoms $r^2_c = \rho^2_c/N_a$ [see Eq. (\ref{eq.rho.minG}
\label{f9}
\end{figure}
\begin{figure*}
\caption{(Color online.) Properties of atoms in the $\Xi$ configuration in a double resonance condition, with fixed value $\mu_{23}
\label{f10}
\end{figure*}
A system of atoms in the ${\rm V}$ configuration requires $\mu_{23}=0$, since transitions between the levels $\omega_3$ and $\omega_2$ are negligible. In this case, the detuning parameters are $\Delta_{21}$ and $\Delta_{31}$ are given by
\begin{eqnarray}
\omega_2 = \Delta_{21}+\omega_1 +\Omega,\\
\omega_3 = \Delta_{31}+\omega_1 + \Omega.
\end{eqnarray}
Notice that the condition $\omega_2\approx\omega_3$ on $\omega_1 \leq\nobreak \omega_2\leq\omega_3$ reads, in terms of the detuning, as $\Delta_{21}\approx\Delta_{31}$ but satisfying $\Delta_{21}\leq\Delta_{31}$.
In a similar form to the atoms in the $\Lambda$ configuration, when the detuning parameters are equal, $\Delta_{21}=\Delta_{31}$, the problem has analytic solution. The critical points are $\varrho_{2c}=\varrho_{3c}=\nobreak0$ for the normal regime implying an energy surface for the ground state equal to zero. For the collective regime, $\varrho_{2c}$ and $\varrho_{3c}$ take the values
\begin{eqnarray}\label{eq.V.criticos}
\varrho_{2c} = \mu_{13} \sqrt{\frac{\mu_{12}^2+ \mu_{13}^2-\Omega \, \omega_3}{\left(\mu_{12}^2+ \mu_{13}^2\right) \left(\mu_{12}^2+\mu_{13}^2+\Omega \, \omega_3\right)}}, \quad\\
\varrho_{3c} =\mu_{12}\sqrt{\frac{\mu_{12}^2+ \mu_{13}^2 - \Omega \, \omega_3}{\left(\mu_{12}^2+\mu_{13}^2\right) \left(\mu_{12}^2+\mu_{13}^2 + \Omega \, \omega_3\right)}} \, . \quad
\end{eqnarray}
Substituting these into the expression for the energy Eq. (\ref{eq.E.G3}), one finds
\begin{eqnarray}\label{eq.V.minE}
E^c_{V} = -\frac{1}{4\Omega} \frac{\left(\mu_{12}^2 + \mu_{13}^2 - \Omega \, \omega_3\right)^2}{\mu_{12}^2 + \mu_{13}^2} \, .
\end{eqnarray}
This is similar as for atoms in the $\Lambda$ configuration, in fact, the expression is equal by just replacing $\mu_{23}\to\mu_{12}$ in Eq. (\ref{eq.L.minE}). A similar situation occurs for the $Q_M$-Mandel parameter, which is given by
\begin{eqnarray}
Q_M = - \frac{\Omega^2\left(\mu_{12}^2 + \mu_{13}^2 - \Omega \, \omega_3\right)}{\left(\mu_{12}^2 + \mu_{13}^2 \right)\left(\mu_{12}^2 + \mu_{13}^2 + \Omega \, \omega_3\right)} \, .
\end{eqnarray}
Hence, atoms in both configurations $V$ and $\Lambda$ have similar properties under equal detuning considerations.
By considering the case of unequal detuning $\Delta_{21}\neq\Delta_{31}$, we choose to analyze the case $\Delta_{21} = 0.2$ and $\Delta_{31}=0.3$. Fig. \ref{f7} shows the first derivatives of the energy surface for the ground state as a function of the control parameters $\mu_{12},\ \mu_{13}$. In both cases the first derivative is continuous, and so second-order transitions are present.
Similarly the $Q_M$-Mandel parameter is continuous [Fig.~\ref{f8}(a)] in a vicinity of the separatrix (white line). The corresponding ${\cal M}$ distribution of the coherent state with $N_a=40$ for three different points are shown in Figs.~\ref{f8}(b), (c) and (d) (bars), and these are compared with their respective Poissonian distribution (dots). One can observe that the ${\cal M}$ distribution is very close to the corresponding Poissonian one, and this is due to the fact that $Q_M\sim 10^{-2}$ is close to zero for any considered value.
Finally, Fig.~\ref{f9} shows the corresponding photon number distribution in units of the total number of atoms $r_c^2 = \rho_c^2/N_a$. This quantity is a continuous smooth function around the separatrix, since this configuration presents only second-order transitions.
The same results are obtained for various values of the detuning parameters.
\begin{figure*}
\caption{(Color online.) Properties of atoms in the $\Xi$ configuration in a double resonance condition, with fixed value $\mu_{23}
\label{f11}
\end{figure*}
\section{Comparison with the quantum solution}\label{QNS}
The exact numerical calculation of the ground state energy may be evaluated using the uncoupled basis given by the direct product between the field $|n\rangle$ and matter states Eq. (\ref{eq.G.state}). Since we have chosen $h_1=N_a$ and $h_2=h_3=0$ one may simplify the Gelfand-Tsetlin notation as
\begin{eqnarray}\label{eq.uncoupled.basis}
|n q_1 r\rangle \equiv |n\rangle \otimes\left|\begin{array}{c c c} q_1 & & 0 \\ & r & \end{array}\right\rangle \, ,
\end{eqnarray}
$q_2$ is zero because it must satisfy $h_2\geq q_2\geq h_3$. The corresponding matrix elements of the operators $\bm{A}_{ij}$ (for this particular basis) are given in the appendix \ref{ap.Aij}, which can be used to calculate the matrix elements of the Hamiltonian, Eq. (\ref{eq.H.3level}), and to evaluate numerically its eigenvalues.
For each particular atomic configuration ($\Xi,\ \Lambda$ or ${\rm V}$) there is an additional constant of motion $\bm{M}$, namely total number of excitations (\ref{eq.M.Xi}-\ref{eq.M.V}). Taking a particular configuration, the Hamiltonian has a matrix representation as a {\em block diagonal matrix}, where the dimension of each matrix of the diagonal depends of $M^q$ and $N_a$. For large values of $M^q$, however, the dimension depends only of $N_a$ and is given by
\begin{equation*}
\frac{N_a(N_a +1)}{2} + N_a +1 \, ;
\end{equation*}
this occurs for $M^q_\Xi\geq 2 \, N_a$ ($\Xi$ configuration), $M^q_\Lambda\geq N_a$ ($\Lambda$ configuration) and $M^q_V\geq N_a$ (${\rm V}$ configuration), relationships provided by the condition $n\geq0$ in Eq. (\ref{eq.M.Xi}-\ref{eq.M.V}). For $M^q_\Xi< 2N_a, \ M^q_\Lambda<N_a$ or $M^q_V<N_a$ we could not find a simple relationship for the dimension of matrix.
To find the quantum ground energy and its corresponding eigenstate, we proceed as follows. For each configuration of the atom, we take a value of $M^q$, and for fixed parameters $\Omega,\ \omega_1,\ \omega_2$ and $\omega_3$ the eigenvalues and their corresponding eigenstates are evaluated numerically as functions of the control parameters $\mu_{ij}$. This gives us the ground state energy for each corresponding total number of excitations.
It is worth mentioning that, for a fixed region of values of the interaction intensity, one may estimate the maximum value of $M^q$ that is required to find the minimum energy; this value is provided by semi-classical calculation.
In order to see how well the semi-classical results approximate the corresponding exact quantum ones, we consider atoms in the $\Xi$ configuration in a double resonance for $N_a=5$ atoms. Notice that the quantum calculation of the ground state depends on the number of atoms $N_a$ considered, and this is in contrast with the semi-classical one where this quantity plays the role of an extensive variable. Let us focus on the expectation values of the total number of excitations $\langle \bm{M}\rangle$, number of photons $\langle \bm{n}\rangle$ and its fluctuations $\left(\Delta n\right)^2=\langle \bm{n}^2\rangle-\langle \bm{n}\rangle^2$.
\begin{figure*}
\caption{(Color online.) Exact and projected solutions compared for atoms in the $\Xi$ configuration in double resonance $\Delta_{21}
\label{f12}
\end{figure*}
Fig.~\ref{f10} shows, respectively, the expectation values of the total number of excitations [Fig.~\ref{f10}(a)], number of photon [Fig.~\ref{f10}(b)] and photon fluctuations [Fig.~\ref{f10}(c)] as a function of the intensity $\mu_{12}$ for a fixed value $\mu_{23}=0.5$. In all cases, the semi-classical calculation is represented by a continuous line while the corresponding exact quantum calculation by dots. One may observe that in the case of the expectation values both calculations are in very good agreement [Figs.~\ref{f10} (a) and (b)]. The fluctuation in the number of photons however fails to render the quantum results [Fig.~\ref{f10}(c)], except in the normal regime where $\langle \bm{n}\rangle = 0$ in both cases. This difference is due to the fact that in the semi-classical ground state a coherent state for the photon contribution is considered, and hence, the fluctuations are equal to its expectation value, $\left(\Delta n\right)^2 = \langle \bm{n}\rangle$, in other words this possesses a Poissonian distribution. However, the photon distribution of the exact ground state does not have this property, because the total number of excitations is fixed for this state.
The above comparison suggests that we should consider an additional correction to our semi-classical test state.
\section{Projected variational state}\label{quantum.proj}
The matter unnormalized ${\rm U}(3)$ coherent state for the totally symmetric representation, i.e., $h_2=h_3=0$, can be written as
\begin{equation}
|h_1, \, \vec{\gamma}\} = \sum^\infty_{n,m=0} \frac{\gamma_2{}^n}{n!} \, \frac{\gamma_3{}^m}{m!} \, (\bm{A}_{21})^m \, (\bm{A}_{31})^n \vert h_1,0,0\rangle_F
\end{equation}
because $\bm{A}_{32} \vert h_1,0,0\rangle_F =0$ and where $\vert h_1,0,0\rangle_F$ denotes the Gelfand-Tsetlin highest weight state (HWS). In this case, one can represent the ${\rm U}(3)$ generators as follows: $\bm{A}_{31} = \bm{b}_3^\dagger \, \bm{b}_1$ and $\bm{A}_{21} = \bm{b}_2^\dagger \, \bm{b}_1$. Then the HWS can be written as
\begin{equation}\label{hws}
\vert h_1,0,0\rangle_F = \frac{1}{\sqrt {h_1!}} (\bm{b}_1^{\dagger})^{h_1} \vert 0,0,0 \rangle_F \, ,
\end{equation}
where we are using the Fock vacuum state $\vert 0,0,0 \rangle_F$ defined by $\bm{b}_k \vert 0,0,0 \rangle_F =0$ with $k=1,2,3$. The action of $\bm{A}_{31}$, and $\bm{A}_{21}$ on (\ref{hws}) is straightforward and results in
\begin{equation*}
|h_1, \, \vec{\gamma}\} = \sum^{h_1}_{n=0} \, \sum^{h_1-n}_{m=0} {\textstyle\sqrt{\frac{h_1 !}{(h_1-n-m)! \, n! \, m!}}} \
\gamma_2{}^m \, \gamma_3{}^n\vert h_1-n-m,n,m \rangle_F \, .
\end{equation*}
Therefore, the semi-classical variational state constructed by the tensor product of matter and field components is given by
\begin{eqnarray}\label{test}
|\alpha;\, h_1, \, \vec{\gamma}\rangle &=& \frac{{\rm e}^{-|\alpha|^2/2}}{\{ h_1; \, \vec{\gamma} \, | \, h_1; \, \vec{\gamma}\} ^{1/2}} \ \sum_{\nu=0}^\infty \sum_{n=0}^{h_1} \sum_{m=0}^{h_1-n}\frac{\sqrt{h_1!}\ \alpha^\nu \gamma_3^{n}\ \gamma_2^{m} }{\sqrt{\nu!\ n!\ m!\ (h_1-n-m)!}} \nonumber \\[3mm]
&&\times |\nu\, , h_1-n-m\, , n\, , m\rangle_F,
\end{eqnarray}
where, by means of (\ref{eq.Kp}) with $h_2=h_3=0$ and $\vec \gamma^\prime = \vec \gamma$,
\begin{equation*}
\{ h_1, \, \vec{\gamma} \, | \, h_1, \, \vec{\gamma}\} = \left(1+|\gamma_2|^2+|\gamma_3|^2\right)^{h_1}
\end{equation*}
and one can thus write (\ref{test}) in the form
\begin{eqnarray}\label{eq.sc.coh}
|\alpha;\, h_1, \, \vec{\gamma}\rangle &=& \frac{{\rm e}^{-|\alpha|^2/2}}{\left(1+|\gamma_2|^2+|\gamma_3|^2\right)^{h_1/2}}\frac{1}{\sqrt{h1!}}\ {\rm e}^{\alpha\bm{a}^\dag}\left(\bm{b}_1^\dag + \gamma_3 \, \bm{b}_2^\dag + \gamma_2 \, \bm{b}_3^\dag\right)^{h_1} \nonumber \\[3mm] &&\times |0,0, 0,0\rangle_F \, .
\end{eqnarray}
To have a variational state with a definite total number of excitations, we replace the eigenvalue of the number of photons by $\nu =M- \lambda_2 n - \lambda_3 m$. To select the atom configuration one uses the corresponding values of $\lambda_2$ and $\lambda_3$ in Table \ref{t1}. Then the unnormalized projected state is
\begin{eqnarray}\label{eq.proj}
|\alpha;\, h_1, \, \vec{\gamma}\}_M &=& \sum_{n=0}^{h_1} \sum_{m=0}^{h_1-n}\frac{\sqrt{h_1!}\ \alpha^{M- \lambda_2 n - \lambda_3 m} \gamma_3^{n}\ \gamma_2^{m} }{\sqrt{(M- \lambda_2 n - \lambda_3 m)!\ n!\ m!\ (h_1-n-m)!}} \nonumber \\[3mm] &&\times |M- \lambda_2 n - \lambda_3 m, \ h_1-n-m,\ n,\ m\rangle_F,
\end{eqnarray}
and contains only states with a fixed value of $M$, so that the semi-classical coherent state is written in simple form as
\begin{eqnarray}\label{eq.exp.proj}
|\alpha;\, h_1, \, \vec{\gamma}\rangle = \frac{{\rm e}^{-|\alpha|^2/2}}{\left(1+|\gamma_2|^2+|\gamma_3|^2\right)^{h_1/2}} \, \sum_{M=0}^\infty |\alpha;\, h_1, \, \vec{\gamma}\}_M \, . \quad
\end{eqnarray}
The state $|h_1;\alpha\vec{\gamma}\}_M$ is the unnormalized projected state.
Since, the expectation value of the total number of excitations is very close to the exact one [see Fig. \ref{f10}(a)], one may correct the semi-classical ground state by considering, for each value of $\langle \bm{M}\rangle$, the corresponding projected state $|h_1;\alpha\ \vec{\gamma}\}_M$, but as the semi-classical calculation of $\langle \bm{M}\rangle$ is a continuous function of the control parameters, it is necessary to discretize it. We do this by defining $M_{dis} = \lceil\langle \bm{M}\rangle\rceil$, the ceiling of the expected $\bm{M}$ value. So, for particular values of the control parameters we define the {\it projected state} as $|h_1;\alpha\ \vec{\gamma}\}_{M_{dis}}$.
We will use these projected states to calculate the expectation values of observables. To this end, the overlap is given by [from Eq. (\ref{eq.proj})]
\begin{eqnarray}\label{eq.norma.proj}
\{\alpha;\, h_1, \, \vec{\gamma} | \alpha;\, h_1, \, \vec{\gamma}\}_{M_{dis}} &=& \sum_{n=0}^{h_1} \sum_{m=0}^{h_1-n}\frac{h_1!\ \rho_c^{2(M_{dis}- \lambda_2 n - \lambda_3 m)} }{(M_{dis}- \lambda_2 n - \lambda_3 m)!} \nonumber \\[2mm]
&&\times \frac{ \varrho_{3c}^{2n}\ \varrho_{2c}^{2 m} }{ n!\, m!\, (h_1-n-m)!}\,,
\end{eqnarray}
where we have evaluated at the critical points of the semi-classical calculation. As an example, the unnormalized expectation value of the number of photons reads
\begin{eqnarray}\label{eq.nu.proj}
\{\alpha;\, h_1, \, \vec{\gamma} |\bm{n}| \alpha;\, h_1, \, \vec{\gamma}\}_{M_{dis}} &=& \sum_{n=0}^{h_1} \sum_{m=0}^{h_1-n} \frac{h_1!\ \rho_c^{2(M_{dis}- \lambda_2 n - \lambda_3 m)} }{(M_{dis}- \lambda_2 n - \lambda_3 m-1)!}
\nonumber \\
&&\times \frac{\varrho_{3c}^{2n}\ \varrho_{2c}^{2 m} }{ n!\, m!\, (h_1-n-m)!}\, .
\end{eqnarray}
Fig.~\ref{f11} shows, in similar form to Fig.~\ref{f10} and for the same parameters and atomic configuration, the expectation values of $\bm{M}$ [Fig.~\ref{f11}(a)], $\bm{n}$ [Fig.~\ref{f11}(b)] and its fluctuations $\left(\Delta n\right)^2$ [Fig.~\ref{f11}(c)], comparing the exact calculation (EQ, darker dots) with the corresponding one using the projected state (SC, lighter dots). Notice that now the photon fluctuations provided by the projected state are comparable with the exact calculation, showing that the projected state corrects the wrong behavior of the fluctuations of the standard coherent state.
Figure~\ref{f11} is shown for $N_a=5$ atoms; for larger values of $N_a$ both calculations will be indistinguishable.
\begin{figure*}
\caption{(Color online.) Exact and projected solutions compared for atoms in the $\Lambda$ configuration in a non-resonant condition $\Delta_{31}
\label{f13}
\end{figure*}
\begin{figure*}
\caption{
(Color online.) Exact and projected solutions compared for atoms in the $V$ configuration in double resonance $\Delta_{21}
\label{f14}
\end{figure*}
\subsection{$\Xi$ configuration}\label{quantum.proj.X}
For $N_a=40$ atoms in the $\Xi$ configuration, in double resonance, i.e., $\Delta_{21}=\Delta_{32}=0$, the expectation value of the number of photons and its fluctuations are compared for both the exact (mesh) and projected variational (continuous surface) states in Fig.~\ref{f12}. For $\langle\bm{n}\rangle_\Xi /N_a$ [Fig.~\ref{f12}(a)] there are no visual differences. In fact, this figure is identical to figure~\ref{f3} where the expectation value of the number of photons is shown for the semi-classical coherent state. Table \ref{t2} shows the minimum and maximum values of the difference between the projected and exact results given by
\begin{table}
\caption{Maximum and minimum difference between projected and exact quantum results Eq. (\ref{eq.deltaNph}) for the three configurations of the atom. The maximum difference is reached close to the separatrix due to the finite number of atoms; this value diminishes as we move away from the separatrix or as $N_{a}$ is increased.}
\begin{center}
\begin{tabular}{|c|cc|}
\hline
Configuration&$\min\delta \langle{\bf n}\rangle/N_a$& $\max\delta \langle{\bf n}\rangle/N_a$\\
\hline & &\\[-3mm]
$\Xi$&0&$\sim3.3\times10^{-2}$\\
$\Lambda$&0&$\sim7.7\times10^{-1}$\\
$V$&0&$\sim2.4\times10^{-2}$\\
\hline
\end{tabular}
\end{center}
\label{t2}
\end{table}
\begin{eqnarray}\label{eq.deltaNph}
\frac{\delta\langle \bm{n}\rangle}{N_a} &\equiv& \left|\frac{\langle \bm{n}\rangle_{proj}-\langle \bm{n}\rangle_q}{N_a}\right|,
\end{eqnarray}
in absolute value and normalized by the number of atoms. In the normal regime the difference vanishes exactly, while in the collective regime it is of order $\sim 10^{-2}$. Finally, Fig.~\ref{f12}(b) shows the corresponding fluctuations presenting very small differences in the collective regime.
\subsection{$\Lambda$ configuration}\label{quantum.proj.L}
For the $\Lambda$ configuration we consider a non-resonant case $\Delta_{31}=0.3$ and $\Delta_{32}=-0.2$, and $N_a=40$ atoms. Under these conditions the behavior of the physical observables resembles that of the $\Xi$ configuration by showing both, first- and second-order phase transitions.
Fig.~\ref{f13}(a) shows the comparison between the expectation values of the number of photons calculated with respect to the exact (mesh) and projected (continuous surface) states, where one may observe an excellent agreement between both surfaces. In the normal regime the difference $\delta\langle \bm{n}\rangle/N_a $ vanishes exactly, while in the collective regime the maximum value is of order $\sim 10^{-1}$. As in the previous case, this diminishes as we move away from the separatrix or as $N_{a}$ is increased. Fig.~\ref{f13}(b) compares the fluctuations in the number of photons. In contrast to the $\Xi$ configuration, here the fluctuations tend asymptotically to a constant value.
\subsection{$V$ configuration}\label{quantum.proj.V}
Finally, we consider the expectation value of the number of photons for atoms in the $V$ configuration, in a double resonance condition $\Delta_{21}=\Delta_{31}=0$, with $N_a=40$ atoms. As discussed in the semi-classical calculation of Sec.~\ref{NR.V}, the qualitative behavior of the physical quantities for this configuration is independent of the detuning considered.
Fig.~\ref{f14}(a) shows the comparison between the expectation value of the number of photons evaluated for the exact quantum (mesh) and projected (continuous surface) states. One may observe that there are no visual differences. Differences of order $\sim 10^{-2}$ appear in the collective regime, as shown in Table.~\ref{t2}. The fluctuations are shown in Fig.~\ref{f14}(b), and once again these approach a constant in the collective regime, in a similar fashion to the $\Lambda$ configuration.
\section{Concluding remarks}\label{concluding}
The ground state of a system of $N_a$ three-level atoms interacting via dipole interactions with a one-mode quantized electromagnetic field was described, in the rotating wave approximation. The different atomic configurations $\Xi$,
$\Lambda$, and $V$ were considered.
The ground state was approximated by a test function (semi-classical state) constructed from the tensorial product of Heisenberg-Weyl and U(3) coherent states. There are two different behaviors called normal, where the ground state is given by all the atoms in the lower energy level and without photons (${\cal M}=0$), and collective, where the atoms are distributed amongst the three levels of the system, and with a corresponding number of excitations ${\cal M} \neq 0$ and average number of photons $\langle \bm n \rangle \neq 0$.
The ground state of the system in the $\Xi$ configuration exhibits first- and second-order transitions, independently of the detuning values (see Fig.~\ref{f1}). For atoms in the $\Lambda$ configuration, one finds for equal detuning values that it can only present second-order transitions, this is shown analytically in Eq.~(\ref{eq.L.minE}). For different detuning parameters, this configuration yields first- and second-order transitions (see Fig.~\ref{f4}). For atoms in the $V$ configuration, independently of the detuning, there are only second-order transitions, and this is shown analytically in Eq.~(\ref{eq.V.minE}) for equal detuning parameters and numerically in Fig.~\ref{f7} for other cases.
For all atomic configurations, we have found that in the normal regime the expectation value of the total number of excitations with respect to the ground state is zero and it follows a Poissonian distribution. In the collective regime the total number of excitations for the ground state has a sub-Poissonian distribution as shown in Figs.~\ref{f2}, \ref{f5} and \ref{f8}. The expectation values of the number of photons given in Figs.~\ref{f3} and \ref{f6} display discontinuities where first-order transitions take place.
For $N_a=5$ atoms in the $\Xi$ configuration, the exact quantum calculation for the expectation values of the total number of excitations and of the number of photons were compared with the corresponding semi-classical ones. Both calculations agree, as shown in Figs.~\ref{f10}(a) and (b). Similar results can be obtained for the other configurations. However, the fluctuations in the number of photons are very different [Fig.~\ref{f10}(c)], which suggests to consider a new test function. We proposed to project the semi-classical test function to a definite total number of excitations ${\cal M}$; this projected state was obtained by choosing the ceiling value of ${\cal M}^c$ together with the critical points for the semi-classical case. We showed that the photon fluctuations provided by the projected state are comparable with those of the exact calculation, so that the projected state corrects the wrong behavior of the standard coherent state (cf. Fig.~\ref{f11}).
Finally, for $N_a=40$ atoms the expectation values and fluctuations of the number of photons were calculated. In all cases, we have found that the results for the projected state are indistinguishable from those of the exact one as can be seen in Figs.~\ref{f12}(a), \ref{f13}(a) and \ref{f14}(a). To have a quantitative estimation of the differences between these calculations, we used Eq.~(\ref{eq.deltaNph}) observing the major differences along the separatrix [Table~\ref{t2}]. This is valid for all atomic configurations.
For the $\Xi$ configuration, in the double resonance case and any number of atoms, we found a fixed point in the parameter space $(\mu_{12}=1, \, \mu_{23}=\sqrt 2)$, in which there is coexistence between three different eigenstates associated to the same energy. They correspond to a total number of excitations of ${\cal M}=0$, ${\cal M}=1$, and ${\cal M}=2$, thus implying the presence of a triple point in the parameter space.
When more than one electromagnetic modes are present the physics can be much richer. Specific cases where each mode resonates with one and only one atomic energy transition, and where one considers only one atomic configuration, have been studied in the thermodynamic limit~\cite{brandes}. The general situation, however, is highly non-trivial and merits further study.
\subsection*{Acknowledgments}
This work was partially supported by CONACyT-M\'exico (under project
101541), and DGAPA-UNAM (under project IN102811).
\appendix
\section{Matrix elements of the ${\rm U}(3)$ operators}\label{ap.Aij}
The matrix elements of the generators of ${\rm U}(3)$, for a general irreducible representation $[h_1,h_2,h_3]$, can be found in~\cite{moshinsky67}. For the totally symmetric representation, $[h_1,0,0]$, the Gelfand-Tsetlin states take the form
\begin{eqnarray}
|qr\rangle&\equiv&\left|\begin{array}{c c c} q & & 0 \\ & r & \end{array} \right\rangle,
\end{eqnarray}
where $q$ and $r$ take values from $0$ to $h_1$.
In this representation, the matrix elements of the atomic operators $\bm{A}_{ij}$ are given by
\begin{eqnarray}
\langle q r|\bm{A}_{11}|q r\rangle = r,
\end{eqnarray}
\begin{eqnarray}
\langle q r|\bm{A}_{22}|q r\rangle = q-r,
\end{eqnarray}
\begin{eqnarray}
\langle q r|\bm{A}_{33}|q r\rangle = h_1-q,
\end{eqnarray}
\begin{eqnarray}
\langle q r+1|\bm{A}_{12}|q r\rangle = \sqrt{(q-r)(r+1)}, \qquad
\end{eqnarray}
\begin{eqnarray}
\langle q+1 r+1|\bm{A}_{13}|q r\rangle = \sqrt{(h_1-q)(r+1)}, \qquad
\end{eqnarray}
\begin{eqnarray}
\langle q+1 r|\bm{A}_{23}|q r\rangle = \sqrt{(h_1-q)(q-r+1)}, \qquad
\end{eqnarray}
and zero for other cases.
\section*{References}
\end{document} |
\begin{document}
\title{Test of Einstein-Podolsky-Rosen Steering Based on the All-Versus-Nothing Proof}
\author{Chunfeng~Wu\footnote{Correspondence to:
chunfeng\_wu@sutd.edu.sg}} \affiliation{Centre for Quantum
Technologies, National University of Singapore, 3 Science Drive 2,
Singapore 117543} \affiliation{Pillar of Engineering Product
Development, Singapore University of Technology and Design, 20 Dover
Drive, Singapore 138682}
\author{Jing-Ling~Chen\footnote{Correspondence to:
cqtchenj@nus.edu.sg}} \affiliation{Centre for Quantum Technologies,
National University of Singapore, 3 Science Drive 2, Singapore
117543} \affiliation{Theoretical Physics Division, Chern Institute
of Mathematics, Nankai University, Tianjin 300071, People's Republic
of China}
\author{Xiang-Jun~Ye}
\affiliation{Centre for Quantum Technologies, National
University of Singapore, 3 Science Drive 2, Singapore 117543}
\affiliation{Theoretical Physics Division, Chern Institute of
Mathematics, Nankai University, Tianjin 300071, People's Republic of
China}
\author{Hong-Yi~Su}
\affiliation{Centre for Quantum Technologies, National
University of Singapore, 3 Science Drive 2, Singapore 117543}
\affiliation{Theoretical Physics Division, Chern Institute of
Mathematics, Nankai University, Tianjin 300071, People's Republic of
China}
\author{Dong-Ling~Deng}
\affiliation{Department of Physics and Michigan Center for
Theoretical Physics, University of Michigan, Ann Arbor, Michigan
48109, USA}
\author{Zhenghan~Wang}
\affiliation{Microsoft Research,
Station Q, University of California, Santa Barbara, CA 93106, USA}
\author{C. H. Oh\footnote{Correspondence to:
phyohch@nus.edu.sg}}
\affiliation{Centre for Quantum Technologies, National
University of Singapore, 3 Science Drive 2, Singapore 117543}
\affiliation{Department of Physics, National University of
Singapore, 2 Science Drive 3, Singapore 117551}
\date{\today}
\maketitle
\textbf{In comparison with entanglement and Bell nonlocality,
Einstein-Podolsky-Rosen steering is a newly emerged research topic
and in its incipient stage. Although Einstein-Podolsky-Rosen
steering has been explored via violations of steering inequalities
both theoretically and experimentally, the known inequalities in the
literatures are far from well-developed. As a result, it is not yet
possible to observe Einstein-Podolsky-Rosen steering for some steerable mixed
states. Recently, a simple approach was presented to identify
Einstein-Podolsky-Rosen steering based on all-versus-nothing
argument, offering a strong condition to witness the steerability of
a family of two-qubit (pure or mixed) entangled states. In this
work, we show that the all-versus-nothing proof of
Einstein-Podolsky-Rosen steering can be tested by measuring the
projective probabilities. Through the bound of probabilities imposed
by local-hidden-state model, the proposed test shows that steering
can be detected by the all-versus-nothing argument experimentally
even in the presence of imprecision and errors. Our test can be
implemented in many physical systems and we discuss the possible
realizations of our scheme with non-Abelian anyons and trapped
ions.}
\noindent In 1935, Einstein, Podolsky, and Rosen (EPR)
questioned the completeness of quantum mechanics (QM) based on local
realism~\cite{1935Einstein}. Many efforts have been devoted to a
deeper understanding of QM in the form of three types of quantum
nonlocalities: quantum entanglement, EPR steering, and Bell
nonlocality~\cite{WJD07}. Within the hierarchy of nonlocalities, the
set of EPR steerable states is a subset of entangled states and a
superset of Bell nonlocal states. Quantum entanglement and Bell
nonlocality have attained flourishing developments since 1964.
However, EPR steering is a newly emerged research topic and, to
date, is far from being completely understood. Steering inequalities
for EPR steering are the analog of Bell inequalities for Bell
nonlocality. Their violations, predicted by quantum mechanics,
reveal EPR steering. Such a violation rules out the existence of a
local-hidden-state (LHS) model, the same way the violation of a Bell
inequality rules out the existence of a local-hidden-variable (LHV)
model. In comparison to the development of Bell nonlocality, the
research on EPR steering is in its developing stages, even though
Schr{\" o}dinger discussed the concept in 1935~\cite{Schrsteering}.
A reason for this is the absence of a rigorous formulation of the
concept of EPR steering, which did not appear until the work of
Wiseman, Jones, and Doherty~\cite{WJD07} in 2007. Indeed, EPR
steering answers a question of fundamental quantum physics as well
as opens new possibilities for quantum communication, thus it has
inspired some recent research in quantum information
theory~\cite{SQKD}.
For a pure entangled state shared by two separated observers Alice
and Bob, Bob's qubit can be ``steered" into different states
although Alice has no access to the qubit. Schr\"{o}dinger adopted
the word \emph{steering}” to describe this type of nonlocality. This
means that Alice has the ability to remotely prepare Bob's particle
in different states by measuring her particle using different settings,
and here we use $\tilde{\rho}^{A}_a$ to denote the conditional state Bob gets if Alice measures her particle with measurement $\hat{A}$ and obtains result $a$.
While Bob suspects that Alice may send
him some non-entangled particles and fabricate the results based her
knowledge of LHS. If Bob's system admits a LHS model $\{ \wp_{\xi} \rho_{\xi} \}$, where $\rho_{\xi}$'s are states that Bob does not know (but Alice knows),
and $\wp_{\xi}>0$ is the probability of $\rho_{\xi}$, then Alice could attempt to fabricate the results
using her knowledge of $\xi$, in other words, $\tilde{\rho}^{A}_a=\sum_{\xi} \wp(a|\hat{A},\xi) \wp_{\xi} \rho_{\xi}$, with $\sum_{a} \wp(a|\hat{A},\xi)=1$.
If Bob finds there is a LHS model which can
describe his conditional states after he asks Alice to perform the
measurement on her particle, then he is not convinced the existence
of EPR steering.
Very recently many results have been achieved to show violations of
steering inequalities both theoretically and experimentally, thus
rendering LHS model untenable
\cite{Reid1,Reid2,stin,NP2010,NC2012,He11,Loopholefree}. However,
the existed steering inequalities in the literatures are far from
well-developed, and therefore it is not yet possible to observe EPR
steering for some steerable mixed states~\cite{EPR-us}. Another elegant
approach to explore the contradiction between QM and LHS model is
the \emph{all-versus-nothing} (AVN) proof of the existence of EPR
steering. This can be considered as the steering analog of
Greenberger-Horne-Zeilinger (GHZ) argument without inequalities for
Bell nonlocality~\cite{GHZ89}. Currently such an AVN proof for EPR
steering has been shown to be a strong condition to witness the
steerability of a family of two-qubit (pure or mixed) entangled
states and have the ability of detecting asymmetric
steering~\cite{EPR-us}. This also offers an effective way to detect
EPR steering for two qubits experimentally.
In this work, we investigate the test of EPR steering according to
its AVN argument and demonstrate directly the contradiction between
LHS model and QM. We show that by observing projective
probabilities, the existence of steering can be verified by defining
a probability bound imposed by LHS model. Our test is the first one
proposed to detect EPR steering based on the AVN proof and it is
suitable for all the two-qubit entangled states specified in
Ref.~\cite{EPR-us}, both pure and mixed. The possible implementation
of our test is discussed by using non-Abelian Fibonacci anyons and
trapped ions, but it is not limited to these systems. Our test is
also applicable to many other physical systems, such as photons,
atoms as well as superconductors, etc. In a system of non-Abelian Fibonacci
anyons, each logical qubit is encoded into triplet of Fibonacci
anyons and the corresponding operations are carried out by braiding
the anyons. As braids are performed by taking an anyon either around
another or not, which will not cause small errors from slight
imprecisions in the way that anyons are moved. Therefore, the test
is fault-tolerant to errors and offers high experimental precision.
In an ion-trap experiment, present experimental achievements on
high-fidelity state initialization, quantum gates and state readout
make our scheme of detecting steering possibly testable.
\noindent{\bf Results}
\noindent First let Alice and Bob share
a pure entangled state
$|\Psi\rangle_{AB}=\cos\theta|00\rangle_{AB}+\sin\theta|11\rangle_{AB}$.
In the steering scenario, Alice adopts the following settings:
$\hat{A} \in \{ \mathcal {P}^{\hat{z}}_a, \mathcal {P}^{\hat{x}}_a
\}$, where $\mathcal {P}^{\hat{z}}_a$ and $\mathcal {P}^{\hat{x}}_a$
denote Alice's projective measurements in $\hat{z}$- and
$\hat{x}$-directions, and $a$ (with $a=0, 1$) is measurement
result. After Alice's measurements, Bob's conditional states become
\begin{eqnarray}\label{crhoz}
\tilde{\rho}^{\hat{z},0}_B&=&\cos^{2}\theta \; |0\rangle_B \langle 0 |,\nonumber\\
\tilde{\rho}^{\hat{z},1}_B&=&\sin^{2}\theta \;|1\rangle_B \langle 1 |,\nonumber\\
\tilde{\rho}^{\hat{x},0}_B&=&\frac{1}{2} |\psi\rangle_B \langle \psi|,\nonumber\\
\tilde{\rho}^{\hat{x},1}_B&=&\frac{1}{2} | \varphi \rangle_B \langle
\varphi|,
\end{eqnarray}
where $\tilde{\rho}^{\hat{A},a}_B$ describes Bob's state after Alice
performs measurement $\hat{A}$ and obtains result $a$, and $|\psi
\rangle_B= \cos \theta |0\rangle_B + \sin \theta |1\rangle_B$,
$|\varphi \rangle_B= \cos \theta |0\rangle_B - \sin \theta
|1\rangle_B$. If there exists a LHS model can fake the results
(\ref{crhoz}), i.e., there exists a suitable ensemble $\{ \wp_{\xi}
\rho_{\xi} \}$ and a stochastic map $\wp(a|\hat{A},\xi)$ satisfying
$\tilde{\rho}^{\hat{A},a}_B=\sum_\xi \wp(a|\hat{A},\xi) \wp_{\xi}
\rho_{\xi}$, then Bob is not convinced that Alice can \emph{steer}
his conditional states. Otherwise the LHS model contradicts with QM.
According to the AVN proof \cite{EPR-us}, the entangled state
$|\Psi\rangle_{AB}$ cannot be described by any LHS model except $\theta=0$ or $\pi/2$. The incisive contradiction between QM and LHS
model is due to different predicted projective probabilities as stated in the following. For QM, Bob obtains zero probabilities
after he performs some appropriate projective measurements on his
qubit
\begin{eqnarray}
P^{\rm QM}_{1}&=& {\rm Tr}[ |1\rangle_B \langle 1| \tilde{\rho}^{\hat{z},0}_B] = 0 ,\nonumber \\
P^{\rm QM}_{2}&=& {\rm Tr}[ |0\rangle_B \langle 0| \tilde{\rho}^{\hat{z},1}_B] = 0 ,\nonumber \\
P^{\rm QM}_{3}&=& {\rm Tr}[ |\psi^\perp \rangle_B \langle \psi^\perp | \tilde{\rho}^{\hat{x},0}_B] = 0 , \nonumber\\
P^{\rm QM}_{4}&=& {\rm Tr}[ |\varphi^\perp \rangle_B \langle
\varphi^\perp | \tilde{\rho}^{\hat{x},1}_B] = 0,\label{cF}
\end{eqnarray}
where
$|\psi^\perp\rangle_B=\sin\theta|0\rangle_B-\cos\theta|1\rangle_B$
and
$|\varphi^\perp\rangle_B=\sin\theta|0\rangle_B+\cos\theta|1\rangle_B$
are orthogonal to $|\psi\rangle_B$ and $|\varphi\rangle_B$,
respectively. However, for a LHS model, it predicts the corresponding probabilities as follows,
\begin{eqnarray}
&&P^{\rm LHS}_{1}={\rm Tr}[|1\rangle_B \langle 1 | \sum_\xi \wp(a=0|\hat{z},\xi) \wp_{\xi} \rho_{\xi} ], \nonumber\\
&&P^{\rm LHS}_{2}={\rm Tr}[|0\rangle_B \langle 0 | \sum_\xi \wp(a=1|\hat{z},\xi) \wp_{\xi} \rho_{\xi} ],\nonumber\\
&&P^{\rm LHS}_{3}={\rm Tr}[|\psi^\perp\rangle_B \langle \psi^\perp | \sum_\xi \wp(a=0|\hat{x},\xi) \wp_{\xi} \rho_{\xi} ], \nonumber\\
&&P^{\rm LHS}_{4}={\rm Tr}[|\varphi^\perp\rangle_B \langle
\varphi^\perp | \sum_\xi \wp(a=1|\hat{x},\xi) \wp_{\xi} \rho_{\xi}
].\label{qqcF}
\end{eqnarray}
From the AVN proof \cite{EPR-us}, we know that the state $|\Psi\rangle_{AB}$ possesses EPR steering if $\theta\neq 0$ or $\pi/2$, and this tells us there exists no LHS model of the state such that $\tilde{\rho}^{A}_a=\sum_{\xi} \wp(a|\hat{A},\xi) \wp_{\xi} \rho_{\xi}$. When $\theta=0$ or $\pi/2$, the state is separable, and hence it is possible to find a LHS model to describe it. Therefore, we know that the probabilities (\ref{qqcF}) cannot be zero simultaneously except $\theta=0$ or $\pi/2$.
In an ideal test for EPR steering, after Alice performs projective measurement on her qubit of the state
$|\Psi\rangle_{AB}$, Bob then measures the probabilities by
projecting the states $|0\rangle_B$, $|1\rangle_B$,
$|\psi^\perp\rangle_B$ and $|\varphi^\perp\rangle_B$ on his qubits.
If he finds the four probabilities $P^B_{i}$ ($i=1, 2, 3, 4$)
are all zero, then EPR steering is demonstrated. Nevertheless, in
real experiments (Exp), measurement results are inevitably affected
by experimental precision and errors. It is possible that the
probabilities obtained experimentally may deviate from the
theoretical values slightly, i.e., $P^{\rm Exp}_{i}=P^{\rm
QM}_i+\varepsilon_i$ (here $\varepsilon_i$ are small numbers caused
by errors). We then investigate how close a LHS model could be to
simulate Eq.~(\ref{cF}). We have shown that for the state
$|\Psi\rangle_{AB}$ the probabilities $P^{\rm LHS}_{i}=0 \;(i
=1,2,3,4)$ only if the state shared by Alice and Bob is not
steerable, otherwise, some of $P^{\rm LHS}_{i}$ cannot be zero. For
the sake of simplicity, consider the inevitable errors
$\varepsilon_i=\varepsilon$ for all $i$, we can detect steerability
if $P^{\rm LHS}_{i} > \varepsilon$ for some $i $. Therefore, the
optimal LHS model for this experiment is the one making $P^{\rm
LHS}_{i}$ approach to $P^{\rm QM}_{i}$ as closely as possible for
all $i $. We define
\begin{eqnarray}
\Delta= \min_{\rm LHS} \{ \max_{i\in\{1,2,3,4\}} \{ |P^{\rm
LHS}_{i}-P^{\rm QM}_{i}|
\} \}, \label{qqDT}
\end{eqnarray}
where $\Delta$ describes the bound of probabilities imposed by the
optimal LHS model. In our test, EPR steering can be detected when
$\Delta >\varepsilon$. Fig.~\ref{LHS1}(a) shows the relation between
parameter $\theta$ and $\Delta$ obtained numerically (see the Methods section). We
find that $\Delta$ is of order $10^{-2}$ when $\theta$ is not closed
to $0$ or $\pi/2$. This implies that the EPR steering of state
$|\Psi\rangle_{AB}$ can be revealed by the experiments with
precision $\varepsilon<10^{-3}$. In Fig.~\ref{LHS1}(a), it is
observed that $\Delta$ changes symmetrically with respect to
$\theta$ and approaches to its maximal value when $\theta=\pi/4$.
This shows the more entangled the state is, the easier to detect EPR
steering in the experiment.
We next consider a two-qubit mixed state
\begin{eqnarray}
\rho_{AB}=\cos^2\theta|\psi^+\rangle_{AB}\langle\psi^+|+\sin^2\theta|\varphi^+\rangle_{AB}\langle\varphi^+|.
\label{rhoL}
\end{eqnarray}
Here
$|\psi^+\rangle_{AB}=\frac{1}{\sqrt{2}}(|00\rangle_{AB}+|11\rangle_{AB})$
and
$|\varphi^+\rangle_{AB}=\frac{1}{\sqrt{2}}(|01\rangle_{AB}+|10\rangle_{AB})$
are two Bell states.
The measurement settings of Alice are still $\hat{A} \in \{ \mathcal
{P}^{\hat{z}}_a, \mathcal {P}^{\hat{x}}_a \}$, and after Alice
performs measurement $\hat{A}$ and obtains result $a$, Bob's
conditional states can be expressed as
$\tilde{\rho}^{\hat{A},a}_B=\sum_\xi \wp(a|\hat{A},\xi) \wp_{\xi}
\rho_{\xi}$ provided with a LHS model $\{ \wp_{\xi} \rho_{\xi} \}$
and a stochastic map $\wp(a|\hat{A},\xi)$. Similarly, for the state
$\rho_{AB}$, Bob obtains quantum probabilities of measuring his
qubit in the states $|0\rangle_B$, $|1\rangle_B$, $|+\rangle_B$ and
$|-\rangle_B$ as
\begin{eqnarray}\label{NESF}
&&P^{\rm QM}_{1}= {\rm Tr} [ |1\rangle_B \langle 1 | \tilde{\rho}^{\hat{z},0}_B ]=\frac{\sin^2\theta}{2}, \nonumber\\
&&P^{\rm QM}_{2}= {\rm Tr} [ |0\rangle_B \langle 0 | \tilde{\rho}^{\hat{z},1}_B ]=\frac{\sin^2\theta}{2},\nonumber\\
&&P^{\rm QM}_{3}= {\rm Tr} [ |-\rangle_B \langle - | \tilde{\rho}^{\hat{x},0}_B ]=0, \nonumber\\
&&P^{\rm QM}_{4}= {\rm Tr} [ |+\rangle_B \langle + |
\tilde{\rho}^{\hat{x},1}_B ]=0,
\end{eqnarray}
where $| \pm \rangle_B = \frac{1}{\sqrt{2}}(|0\rangle_{B} \pm
|1\rangle_{B})$, and $\tilde{\rho}^{\hat{z},a}_B$ is Bob's
conditional state after Alice performs projective measurement in
$\hat{z}$-direction, etc. It has been proved that there does not
exist any LHS model for $\rho_{AB}$ with $\theta \neq
\pm\frac{\pi}{4}$~\cite{EPR-us} such that probability equations
in~(\ref{NESF}) can be satisfied simultaneously. If Bob observes
experimentally these four probabilities $P^{\rm QM}_{i}$'s, then EPR
steering of the state is exhibited, or there exists no LHS model.
Consider experimental imprecision and errors, we also investigate
the condition to detect EPR steering of $\rho_{AB}$ by plotting the
variation of LHS bound $\Delta$ versus $\theta$, see Fig.~\ref{LHS1}
(b). It can be found that for the experiments with precision
$\varepsilon<10^{-3}$, the EPR steering of $\rho_{AB}$ can be
observed when $\theta$ is not close to $\pi/4$. It is worthy of
pointing out that our test of EPR steering is not limited to the
states $|\Psi\rangle_{AB}$ and $\rho_{AB}$, but also applicable to
the family of two-qubit entangled states specified in Ref.
\cite{EPR-us}, regardless of pure or mixed.
\noindent{\bf Discussions}
\noindent Let us make some discussions
on the possible realization of our test in physical systems. We
first consider non-Abelian Fibonacci anyons which are shown to be
the simplest non-Abelian quasiparticles for universal topological
quantum computation~\cite{2008Nayak}. Follow Freedman \textit{et
al}.'s work ~\cite{2002Freedman}, we encode logical qubits into
triplets of anyons with total topological charge $1$:
$|0\rangle^L=|((\bullet,\bullet)_{\mathbf{I}},\bullet)_{\tau}\rangle$
and $|1\rangle^L=|((\bullet,\bullet)_{\tau},\bullet)_{\tau}\rangle$
(here $L$ denotes ``logical"). The so-called noncomputational state
$|\texttt{NC}\rangle=|((\bullet,\bullet)_{\tau},\bullet)_{\mathbf{I}}\rangle$
is the only state of three anyons that has total topological charge
$0$. Quantum operations can be constructed by using two elementary
braiding operations $R_1,R_2$ acting on the Hilbert space of three
Fibonacci anyons and their inverses~\cite{2005Bonesteel,2007Simon}. The obtained quantum gates, together with the controlled-NOT gate obtained in Refs.~\cite{2005Bonesteel,2007Simon,GHZ-us} are useful in the
construction of EPR steering test by preparing logical-qubit states and achieving required operations (see the Methods section).
Several candidates for realizing non-Abelian anyons have been suggested in physical systems,
such as fractional quantum Hall liquid~\cite{2004Xia}, rotating Bose-Einstein condensates~\cite{2001Cooper},
as well as quantum spin systems~\cite{2004Freedman,2005Fend}.
Another possible system to explore the realization of our test
experimentally is trapped ion.
Refs.~\cite{ion2006,ion20081,ion20082} have reported experimental
results of high-fidelity state preparation, quantum gate operations,
and state measurement for optical qubits stored in $^{40}{\rm
Ca}^{+}$ held in a trap. State preparation is usually done by
precisely manipulating the internal levels of ion utilizing laser
pulses and the Blatt group realized state initialization with
fidelity more than $99.8\%$~\cite{ion20081}. By a
M{\o}lmer-S{\o}rensen-type gate
operation~\cite{SMentangling1,SMentangling2}, a Bell-type entangled
state of ions with a fidelity of $99.3(1)\%$ was realized in the
same work~\cite{ion20081}. The Blatt group also presented
single-qubit gates with fidelity exceeding $99.9\%$ in trapped
ions~\cite{ion2006}. As for state measurement capability in an
ion-trap experiment, A. H. Myerson {\it et al.}~\cite{ion20082}
achieved $99.991(1)\%$ readout fidelity, sufficient for
fault-tolerant quantum computation by measuring population of states
using time-resolved photon counting. For the entangled state
realized in Ref.~\cite{ion20081}, the probability bound is found to
be $\Delta=0.0732$, and this means that the EPR steering of the
entangled state can be verified experimentally with precision
$\varepsilon<0.0732$. The experimental achievements in the
literatures~\cite{ion2006,ion20081,ion20082} tell us that our test
of EPR steering based on the AVN proof is possibly realizable with
current techniques in ion-trap experiments.
To summarize, we have presented a test to
identify EPR steering based on the AVN argument by measuring
projective probabilities. Our test is applicable to the family of
two-qubit entangled states specified in Ref.~\cite{EPR-us}
regardless of pure or mixed. We have provided the condition on
experimental implementation of our scheme through expression
(\ref{qqDT}) that EPR steering can be observed in the presence of
experimental imprecision and errors. Our result is the first
experimental test presented to detect EPR steering by resorting to
the AVN proof, and it can be implemented in systems such as
non-Abelian anyons and trapped ions. The primary advantage of our
test based on non-Abelian anyons is that it is fault-tolerant, or
the logical quantum state used is robust against local
perturbations. Specifically, it has been proven that these logical
qubits might be robust to random perturbations~\cite{2010}. Our test
can also be realizable in ion-trap experiments based on current
experimental techniques as recent progress in trapped ion offers
high-fidelity state preparation, quantum gate operations, and state
measurement for optical qubits stored in it. Let us point out that
the realization of our test is not limited to the two systems but
also applicable to many other physical systems like photons, atoms
and superconductors, etc. We expect further investigations in this
direction, both theoretically and experimentally.
\noindent{\bf Methods}
\noindent \textbf{To find the optimal LHS model.}
We here present a theorem which is used to find the optimal LHS model for a given two-qubit state.
\emph{Theorem -- For any given two-qubit state $\rho_{AB}$ in a $N$-setting protocol, if there is a LHS model for the state, then there is a LHS model with the number of hidden states no larger than $2^N$}.
The proof of the Theorem needs two lemmas associated with the concept of deterministic LHS (dLHS) model which is a LHS satisfying
$\wp(a|\hat{A},\xi) \in \{0,1\}, \forall \hat{A}, \xi, a$. We also briefly restate the notations to be used, $\tilde{\rho}^{\hat{A}}_{a}$ is the conditional states of Bob after Alice measures $\hat{A}$ and gets result $a\in\{0,1\}$, the tilde here denotes this state is unnormalized and its norm is $P^{\hat{A}}_{a}$, the probability associated with the output $a$.
\noindent \emph{Lemma 1.} For any given two-qubit state $\rho_{AB}$, if there is a LHS model for $\rho_{AB}$ then there is a dLHS model for $\rho_{AB}.$
In a general $N$-setting protocol, we have $\hat{A}\in\{\hat{A_1},\hat{A_2},\hat{A_3},...,\hat{A_N}\}$. Suppose $\rho_{AB}$ has a LHS description thus there is an ensemble $\{\wp_{\xi}\rho_{\xi}\}$ and an associated probability $\wp(a|\hat{A},\xi)$ fulfilling $\tilde{\rho}^{A}_a=\sum_{\xi} \wp(a|\hat{A},\xi) \wp_{\xi} \rho_{\xi}$. We note that $\wp(1|\hat{A},\xi)=1-\wp(0|\hat{A},\xi)$. Now if $\forall \{\xi, \hat{A}\}, \wp(0|\hat{A},\xi)\in\{0,1\}$, then it is a dLHS model. We next check each $\xi$ to see whether $\wp(0|\hat{A},\xi)\in\{0,1\}$. For any $\xi$ with $\wp(0|\hat{A},\xi)\in\{0,1\}$, we keep these terms unchanged. For $\xi=k\;\mid$ $\wp(0|\hat{A},k)\notin\{0,1\}$, we decompose this term into $2^N$ separate terms as follows. First we define a new term $m_a=\sum_{i=1}^{N} 2^{N-i} a_i +1$, where $a_i$ denote the measurement results of $A_i$ ($a_i=0,1$). It is not difficult to find that $m_a$ ranges from $1$ to $2^N$ depending on $a_i$. We then do the decomposion by choosing
\begin{eqnarray}\label{dLHSM}
&&\rho_{k}^{(m_a)}= \rho_{k},\label{dLHSM1}\\
&&\wp_{k}^{(m_a)}=\prod_{i=1}^{N} \wp(a_i|\hat{A_i},k)\wp_k, \label{dLHSM2}\\
&&\wp(0|\hat{A_i},{k}^{(m_a)})= \left\{ \begin{aligned}
&1 \;\;\; \textrm{if} \;\;a_i=0\\
&0 \;\;\; \textrm{else}\end{aligned} \right.\label{dLHSM4}
\end{eqnarray}
where $\rho_{k}^{(m_a)}$ is the hidden state and $\wp_{k}^{(m_a)}$ is its weight. By direct calculations it can be verified that $\wp(a|\hat{A},k)\wp_{k}\rho_{k}=\sum^{2^N}_{m_a=1} \wp(a|\hat{A},{k}^{(m_a)}) \wp_{k}^{(m_a)} \rho_{k}^{(m_a)}$. Eq.~(\ref{dLHSM4}) shows the reconstructed stochastic maps are all deterministic. Thus by this way, we get a dLHS model that satisfies $\tilde{\rho}^{A}_a=\sum_{\xi} \wp(a|\hat{A},\xi) \wp_{\xi} \rho_{\xi}$. $\Box$
\noindent \emph{Lemma 2.} For a dLHS model, $\tilde{\rho}^{A}_a=\sum_{\xi} \wp(a|\hat{A},\xi) \wp_{\xi} \rho_{\xi}$ can be rewritten as $P^{\hat{A}}_{a} \rho^{\hat{A}}_{a}=\sum_{\xi \in H^{\hat{A}}_{a}} \wp_{\xi} \rho_{\xi}$, where $H^{\hat{A}}_{a}$ stands for the set of hidden states that contribute to $\rho^{{\hat{A}}}_{a}$ indicating the corresponding $\wp(a|\hat{A},\xi)=1$. The equality holds if and only if the following equalities are fulfilled,
\begin{equation}\label{MassCenter}
\left\{ \begin{aligned}
P^{{\hat{A}}}_{a} &= \sum_{\xi \in H^{\hat{A}}_{a}} \wp_{\xi} \\
P^{{\hat{A}}}_{a} \; \overrightarrow{r_{a}}^{{\hat{A}}} &= \sum_{\xi \in H^{\hat{A}}_{a}} \wp_{\xi} \; \overrightarrow{r_{\xi}}
\end{aligned} \right.
\end{equation}
where $\overrightarrow{r_{a}}^{{\hat{A}}}$and $\overrightarrow{r_{\xi}}$ are the Bloch vectors of $\rho^{{\hat{A}}}_{a}$ and $\rho_{\xi}$ respectively.
Let us look at the proof of the lemma. We have $\rho^{{\hat{A}}}_{a} = (\mathbf{1}+\overrightarrow{r_{a}}^{{\hat{A}}} \cdot \overrightarrow{\sigma} )/2 $ and $\rho_{\xi} =(\mathbf{1}+\overrightarrow{r_{\xi}} \cdot \overrightarrow{\sigma} )/2$, where $\mathbf{1}$ describes identity matrix. So the equality of $P^{{\hat{A}}}_{a} \rho^{{\hat{A}}}_{a}=\sum_{\xi \in H^{\hat{A}}_{a}} \wp_{\xi} \rho_{\xi}$ gives $P^{{\hat{A}}}_{a} \, \mathbf{1} + P^{{\hat{A}}}_{a} \; \overrightarrow{r_{a}}^{{\hat{A}}} \cdot \overrightarrow{\sigma} = \sum_{\xi \in H^{\hat{A}}_{a}} \wp_{\xi} \mathbf{1} + \sum_{\xi \in H^{\hat{A}}_{a}} \wp_{\xi} \overrightarrow{r_{\xi}} \cdot \overrightarrow{\sigma}$. Thus we obtain Eq.~(\ref{MassCenter}). $\Box$
We would like to point out that Eq. \eqref{MassCenter} is similar to the problem describing center of mass if we treat the probabilities $\wp_{\xi}$ and $P^{\hat{A}}_{a}$ as masses, as well as Bloch vectors ($\overrightarrow{r_{\xi}}$ and $\overrightarrow{r_{a}}^{{\hat{A}}}$) as the position vectors of various masses. Lemma 2 shows that the task to find a dLHS model for a state $\rho^{\hat{A}}_{a}$ with probability $P^{\hat{A}}_{a}$ is equivalent to find a distribution of masses in the Bloch sphere with total mass $P^{\hat{A}}_{a}$ and center of mass being located at $\overrightarrow{r_{a}}^{{\hat{A}}}$. We show in the following that with the aid of Eq. (\ref{MassCenter}), we can impose constraints on measurement settings to find a dLHS model. If we cannot find a dLHS model for $\rho_{AB}$, Lemma 1 shows that we can neither find a LHS model, and this thus affirms the steerability of $\rho_{AB}$. For any given $\rho_\xi$, we can always assign a $N$-length bit string constructed from $\wp(a|\hat{A_1},\xi)\wp(a|\hat{A_2},\xi)\cdots\wp(a|\hat{A_N},\xi)$ considering $\wp(a|\hat{A_i},\xi)\in\{0,1\}$. Next let us describe the LHS model by dividing hidden states $\{\rho_\xi\}$ into many subsets with each subset containing all of the $\rho_\xi$ that has the same $N$-length bit string. Thus in this way, each subset is unique, or not overlapping with others. We can take each of the subsets as one new hidden state by resorting to Lemma 2. We use the fact that hidden state can be treated as mass point so we can consider the centre of mass of each subset as the new state and the weight of the new state is the correspoinding total mass. It is not difficult to find that there are totally $2^N$ such new states, and thus the LHS model has only $2^N$ hidden states. This ends our proof of the Theorem.
Therefore we conclude that the optimal LHS model contains an
ensemble with four pure hidden states in the two-setting protocol, and more hidden states make no improvement.
The optimal LHS model is numerically obtained by minimizing
function $ F_{n}= \Sigma_{i=1}^{4} v_i^n $ for a large $n$, where $v_i=|P^{\rm LHS}_{i}-P^{\rm
QM}_{i}|$. In this approach, we utilize the knowledge of vector norm. First we have $(F_{n})^{1/n}$ which is the $l_n$-norm of vector $\vec{v}=(v_1, v_2, v_3, v_4)^{T}$. We know that $l_\infty$-norm of a vector is just its
maximum element and hence by definition $\Delta$ equals to the minimum of
$l_\infty$-norm of the vector $\vec{v}=(v_1, v_2, v_3, v_4)^{T}$. So for a large enough
$n$ we can get a good approximation of $\Delta$ from minimizing $(F_{n})^{1/n}$ with varied $\rho_\xi$. In our calculations, we use $n=46$ since we find numerically the improvement of $\Delta$ by choosing a number larger than $46$ is negligibly small. As shown in Fig. \ref{Dnum} (a) and (b), we find the values of $\Delta$ by choosing $\theta=\pi/8,\;\pi/6$ for $|\Psi\rangle_{AB}$ with different $n$ (ranging from $20$ to $120$). It is clear that the values of $\Delta$ do not change substantially and the change is in the ten-thousandths place when $n$ is greater than $45$. The results show us that $n=46$ is large enough to obtain a reasonable value of $\Delta$. Apparemently we can choose other values of $n$ as long as $n>45$ and the choice will not affect the value of $\Delta$ much. We also plot the variation of $\Delta$ versus $\theta$ by choosing different $n$ for $|\Psi\rangle_{AB}$ in Fig. \ref{Dnum} (c). Seen from Fig. \ref{Dnum} (c), the three curves corresponding to $n=46$, $n=50$, $n=100$ respectively are almost overlapped. Hence we know that $n=46$ is large enough to obtain a reasonable value of $\Delta$.
\noindent\textbf{Approximation of quantum gates in non-Abelian
Fibonacci anyons.} Quantum operations can be constructed by using
two elementary braiding operations $R_1,R_2$ acting on the Hilbert
space of three Fibonacci anyons and their
inverses~\cite{2005Bonesteel,2007Simon}. In Fig.~\ref{Braid-Gates},
we plot the braids that approximate the quantum gate
\begin{eqnarray}\label{Ut}
U_{\theta}=\left(\begin{matrix}
\cos\theta&\sin\theta\\
\sin\theta&-\cos\theta
\end{matrix}\right)
\end{eqnarray}
with $U_1=U_{\pi/6}$ and $U_2=U_{-\pi/3}$. Any other quantum gate
$U_{\theta}$ can be obtained in a similar way.
The approximations are obtained by performing brute force searches and
the distance between two matrices $\mathscr{M}$ and
$\mathscr{M}^{\prime}$ is defined as the square root of the largest
eigenvalues of $(\mathscr{M}-\mathscr{M}^{\prime})^{\dagger}
(\mathscr{M}-\mathscr{M}^{\prime})$~\cite{2005Bonesteel,2007Simon}. The
distances between the required operations and the gates resulting
from actual braiding are about $5.7\times10^{-5}$ for $U_1$ and
$U_2$. In fact, these gates can be systematically improved to any
required accuracy due to the Solovay-Kitaev
theorem~\cite{1999Kitaev}. The above quantum gates, together with the controlled-NOT gate
obtained in Refs.~\cite{2005Bonesteel,2007Simon,GHZ-us} are useful in the
construction of EPR steering test by preparing
logical-qubit states. We apply the
operation $U_{\theta}$ (with $\theta\in (0, \pi/2)$) on the logical
qubit A of initial state $|\Psi\rangle^L_0=|0\rangle^L_A\otimes|0\rangle^L_B$ and a controlled-NOT gate is followed on the two logical
qubits, then we have the two-logical-qubit pure states $|\Psi\rangle^L_{AB}$.
To prepare mixed state, we need an ancilla logical qubit C, and
initially assume that the logical qubits are in the state
$|\Psi\rangle^L_0=|0\rangle^L_A\otimes|0\rangle^L_B\otimes|0\rangle^L_C$.
We apply Hadamard gate on logical qubit A, $U_{\theta}$ on logical qubit C, then a controlled-NOT gate on logical qubits A and
B, and finally a controlled-NOT gate on logical qubits C and B, we
then have
$|\Psi\rangle^L_{ABC}=\cos\theta|\psi^+\rangle^L_{AB}|0\rangle^L_C+\sin\theta|\varphi^+\rangle^L_{AB}|1\rangle^L_C$.
Look at the first two qubits only, we successfully have the
state $\rho^L_{AB}$ as in (\ref{rhoL}).
All the operations involved in our scheme, such as $\mathcal {P}^{\hat{z}}_a$
and $\mathcal {P}^{\hat{x}}_a$ for Alice, $|\psi^\perp \rangle^L_B
\langle \psi^\perp |$ and $|\varphi^\perp \rangle^L_B \langle
\varphi^\perp | $ for Bob, can be carried out by braiding the
Fibonacci anyons. For instance, the single-logical-qubit states
$|\psi^\perp\rangle^L_B$ and $|\varphi^\perp\rangle^L_B$ of Bob can
be realized by using the action of $U_{\pm\theta}$ on
$|1\rangle^L_B$ (up to a global phase).
{\bf Acknowledgements}
We thank V. Vedral for valuable discussions.
J.L.C. is supported by National Basic Research Program (973 Program)
of China under Grant No. 2012CB921900 and NSF of China (Grant No.
11175089). This work is also partly supported by National Research
Foundation and Ministry of Education, Singapore (Grant No. WBS:
R-710-000-008-271).
{\bf Author Contributions}
All authors contributed to this work including developing the scheme and preparing the manuscript.
C.W. and J.L.C. initiated the idea. C.W., J.L.C., X.J.Y., H.Y.S., and D.L.D. proposed the scheme.
C.W., J.L.C., Z.W. and C.H.O. wrote the main manuscript text. X.J.Y.
and H.Y.S. prepared the figures. All authors reviewed the
manuscript.
{\bf Supplementary Information} is linked to the online version of
the paper at www.nature.com/nature.
{\bf Additional Information}
{\bf Competing financial interests:} The authors declare no
competing financial interests.
\begin{figure}
\caption{ \textbf{Numerical results of the bound $\Delta$ imposed by
the optimal LHS model versus $\theta$}
\label{LHS1}
\end{figure}
\begin{figure}
\caption{ \textbf{Approximating quantum gates $U_{\theta}
\label{Braid-Gates}
\end{figure}
\begin{figure}
\caption{\textbf{Variations of $\Delta$ for $|\Psi\rangle_{AB}
\label{Dnum}
\end{figure}
\end{document} |
\begin{document}
\title{Outlier-Robust Group Inference via Gradient Space Clustering}
\begin{abstract}
Traditional machine learning models focus on achieving good performance on the overall training distribution, but they often underperform on minority groups.
Existing methods can improve the worst-group performance, but they can have several limitations: (i) they require group annotations, which are often expensive and sometimes infeasible to obtain, and/or (ii) they are sensitive to outliers. Most related works fail to solve these two issues simultaneously as they focus on conflicting perspectives of minority groups and outliers. We
address the problem of learning group annotations in the presence of outliers by clustering the data in the space of gradients of the model parameters. We show that data in the gradient space has a simpler structure while preserving information about minority groups and outliers, making it suitable for standard clustering methods like DBSCAN. Extensive experiments demonstrate that our method significantly outperforms state-of-the-art both in terms of group identification and downstream worst-group performance.
\end{abstract}
\section{Introduction}
\label{sec:intro}
Empirical Risk Minimization (ERM), i.e., the minimization of average training loss over the set of model parameters, is the standard training procedure in machine learning. It yields models with strong in-distribution performance\footnote{I.e. low loss on test data drawn from the same distribution as the training dataset.} but does not guarantee satisfactory performance on minority groups that contribute relatively few data points to the training loss function \citep{sagawa2019distributionally,koh2021wilds}.
This effect is particularly problematic when the minority groups correspond to socially-protected groups. For example, in the toxic text classification task, certain identities are overwhelmingly abused in online conversations that form data for training models detecting toxicity \citep{dixon2018measuring}.
Such data lacks sufficient non-toxic examples mentioning these identities, yielding
problematic and unfair spurious correlations -- as a result ERM learns to associate these identities with toxicity \citep{dixon2018measuring,garg2019counterfactual,yurochkin2020sensei}. A related phenomenon is \tilde{p}h{subpopulation shift} \citep{koh2021wilds}, i.e., when the test distribution differs from the train distribution
in terms of group proportions. Under subpopulation shift, poor performance on the minority groups in the train data translates into poor overall test distribution performance, where these groups are more prevalent or more heavily weighted. Subpopulation shift occurs in many application domains \citep{tatman2017gender,beery2018recognition,oakden2020hidden,santurkar2020breeds,koh2021wilds}.
Prior work offers a variety of methods for training models robust to subpopulation shift and spurious correlations, including group distributionally robust optimization (gDRO) \citep{hu2018does,sagawa2019distributionally}, importance weighting \citep{shimodaira2000improving,byrd2019effect}, subsampling \citep{sagawa2020investigation,idrissi2022balancing,maity2022does}, and variations of tilted ERM \citep{li2020tilted,li2021tilted}. These methods are successful in achieving comparable performance across groups in the data, but they require group annotations. The annotations can be expensive to obtain, e.g., labeling spurious backgrounds in image recognition \citep{beery2018recognition} or labeling identity mentions
in the toxicity example. It also could be challenging to anticipate all potential spurious correlations in advance, e.g., it could be background, time of day, camera angle, or unanticipated identities subject to harassment.
Recently, methods have emerged for learning group annotations \citep{sohoni2020george,liu2021jtt,creager2021eiil} and variations of DRO that do not require groups \citep{hashimoto2018fairness,zhai2021doro}. One common theme is to treat data where an ERM model makes mistakes (i.e., high-loss points) as a minority group \citep{hashimoto2018fairness,liu2021jtt} and increase the weighting of these points. Unfortunately, such methods are at risk of overfitting to outliers (e.g., mislabeled data, corrupted images), which are also high-loss points. Indeed, existing methods for outlier-robust training propose to \tilde{p}h{ignore} the high-loss points \citep{shen2019learning}, the direct opposite of the approach in \citep{hashimoto2018fairness,liu2021jtt}.
\begin{figure}
\caption{\textbf{An illustration of learning group annotations in the presence of outliers.}
\label{fig:motivating}
\end{figure}
In this paper, our goal is to learn group annotations in the presence of outliers. Rather than using loss values (which above were seen to create opposing tradeoffs), we propose to instead first represent data using gradients of a datum's loss w.r.t.\ the model parameters. Such gradients tell us how a specific data point wants the parameters of the model to change to fit it better. In this gradient space, we anticipate groups (conditioned on label) to correspond to gradients forming clusters. Outliers, on the other hand, majorly correspond to isolated gradients: they are likely to want model parameters to change differently from any of the groups \tilde{p}h{and} other outliers. See Figure {\textnormal{e}}f{fig:motivating} for an illustration.
The gradient space structure allows us to separate out the outliers and learn the group annotations via traditional clustering techniques such as DBSCAN \citep{ester1996dbscan}. We use learned group annotations to train models with improved worst-group performance (measured w.r.t.\ the true group annotations).
We summarize our contributions below:
\begin{itemize}
\item We show that gradient space simplifies the data structure and makes it easier to learn group annotations via clustering.
\item We propose Gradient Space Partitioning (\textsc{GraSP}), a method for learning group annotations in the presence of outliers for training models robust to subpopulation shift.
\item We conduct extensive experiments on one synthetic dataset and three datasets from different modalities and demonstrate that our method achieves state-of-the-art both in terms of group identification quality and downstream worst-group performance.
\end{itemize}
\section{Preliminaries and Related Work}
In this section, we review the problem of training models in the presence of minority groups.
Denote $[N] = \set{1,\dots,N}$.
Consider a dataset ${\mathcal{D}} = \set{{\textnormal{v}}z}_{i=1}^{n} \subset {\mathcal{Z}}$ consisting of $n$ samples ${\textnormal{v}}z \in {\mathcal{Z}}$, ${\textnormal{v}}z = ({\textnormal{v}}x, {\textnormal{y}})$, where ${\textnormal{v}}x \in {\mathcal{X}} = {\mathbb{R}}^d$ is the input feature and ${\textnormal{y}} \in {\mathcal{Y}} = \{1,\ldots,C\}$ is the class label.
The samples from each class $y\in{\mathcal{Y}}$ are categorized into $K_y$ groups.
Denote $K$ to be the total number of
groups $\set{{\mathcal{G}}_1, \dots, {\mathcal{G}}_K} \triangleq P \subset {\mathcal{Z}}$, where $K = \sum_{y\in{\mathcal{Y}}} K_y$.
Denote the group membership of each point in the dataset as $\set{{\textnormal{g}}_i}_{i=1}^{n}$, where ${\textnormal{g}}_i \in [K]$ for all $i \in [n]$. For example, in toxicity classification, a group could correspond to a toxic comment mentioning a specific identity, or, in image recognition, a group could be an animal species appearing on an atypical background \citep{beery2018recognition,sagawa2019distributionally}.
The goal is to to learn a model $h \in {\mathcal{H}}: {\mathcal{X}} \to {\mathcal{Y}}$ parameterized by ${\boldsymbol{\theta}} \in \Theta$ that performs well on all groups ${\mathcal{G}}_k$, where $k\in[K]$. Depending on the application, this model can alleviate fairness concerns \citep{dixon2018measuring}, remedy spurious correlations in the data \citep{sagawa2019distributionally}, and promote robustness to subpopulation shift \citep{koh2021wilds}, i.e., when the test data has unknown group proportions.
We divide the approaches for learning in the presence of minority groups into three categories: the \tilde{p}h{group-aware} setting where the group annotations ${\textnormal{g}}_i$ are known, the \tilde{p}h{group-oblivious} setting that does not use the group annotations, and the \tilde{p}h{group-learning} setting where the group annotations are learned from data to be used as inputs to the group-aware methods.
\textbf{Group-aware setting.}
Many prior works
assume access to the minority group annotations. Among the state-of-the-art methods in this setting is group Distributionally Robust Optimization (gDRO) \citep{sagawa2019distributionally}.
Let $\ell: {\mathcal{Y}} \times {\mathcal{Y}}$ be a loss function.
The optimization problem of gDRO is
\begin{equation}\label{opt:gdro}
\min_{{\boldsymbol{\theta}}\in\Theta} \max_{k\in[K]} \frac{1}{|{\mathcal{G}}_{k}|}\sum_{{\textnormal{v}}z \in {\mathcal{G}}_{k}} \ell({\textnormal{y}}, h_{\boldsymbol{\theta}}({\textnormal{v}}x)), \tag{gDRO}
\end{equation}
which aims to minimize the maximum group loss.
In addition to assuming clean group annotations, another line of research under this setting considers noisy or partially available group annotations~\citep{jung2022learning,lamy2019noise,mozannar2020fair,celis2021fair}.
Methods in this class achieve meaningful improvements over ERM in terms of worst-group accuracy, but anticipating relevant minority groups and obtaining the annotations is often burdensome.
\textbf{Group-oblivious setting.} In contrast to the group-aware setting, the \tilde{p}h{group-oblivious} setting attempts to improve worst-group performance without group annotations. Methods in this group rely on various forms of DRO \citep{hashimoto2018fairness,zhai2021doro} or adversarial reweighing \citep{lahoti2020fairnessarl}. Algorithmically, this results in up/down-weighing the contribution of the high/low-loss points. For example, \citet{hashimoto2018fairness} optimizes a DRO objective with respect to a chi-square divergence ball around the data distribution, which is equivalent to minimizing $\frac{1}{n}\sum_i [\ell({\textnormal{y}}, h_{\boldsymbol{\theta}}({\textnormal{v}}x)) - \eta]^2_+$, i.e., an ERM discounting low-loss points by a constant depending on the ball radius.
\textbf{Group-learning setting.}
The final category corresponds to a two-step procedure, wherein the data points are first assigned group annotations based on various criteria, followed by group-aware training typically using gDRO. In this category, Just Train Twice (JTT) \citep{liu2021jtt} trains an ERM model and designates high-loss points as the minority and low-loss points as the majority group;
George \citep{sohoni2020george} seeks to cluster the data to identify groups with a combination of dimensionality reduction, overclustering, and augmenting features with loss values, and Environment Inference for Invariant Learning (EIIL) \citep{creager2021eiil} finds group partition that maximizes the Invariant Risk criterion \citep{arjovsky2019invariant}.
Our method, Gradient Space Partitioning (GraSP), belongs to this category. GraSP differs from prior works in its ability to account for outliers in the data. In addition, prior methods in this and the group-oblivious categories typically require validation data with \tilde{p}h{true} group annotations for model selection to achieve meaningful worst-group performance improvements over ERM, while GraSP does not need these annotations to achieve good performance. In our experiments, this can be attributed to GraSP's better recovery of the true group annotations, making them suitable for gDRO model selection (see Section {\textnormal{e}}f{sec:exp}). We summarize properties of the most relevant methods in each setting in Table {\textnormal{e}}f{tab:baseline}.
\begin{table}[t]
\caption{\textbf{Summary of methods for learning in the presence of minority groups. }
"-" indicates that there is no clear evidence in the prior works.
}
\label{tab:baseline}
\begin{center}
\begin{small}\tiny{
\setlength{\tabcolsep}{1.2pt}
\begin{tabularx}{\textwidth}{p{8em}|c|c|cc|cccc}
\toprule
Setting & & Group-aware & \multicolumn{2}{c}{Group-oblivious} & \multicolumn{4}{c}{Group-learning} \\ \midrule
Method & \multirow{2}{*}{ERM} & gDRO & $\chi^2$-DRO & DORO & JTT & EIIL & George & \textsc{GraSP}{} \\
& & \citep{sagawa2019distributionally} & \citep{hashimoto2018fairness} & \citep{zhai2021doro} & \citep{liu2021jtt} & \citep{creager2021eiil} & \citep{sohoni2020george} & (Ours) \\
\midrule
Improves worst-group performance? & \ding{55}& \ding{51}& \ding{51}& \ding{51} & \ding{51} & \ding{51} & \ding{51} & \ding{51} \\
No training group annotations? & \ding{51}& \ding{55} & \ding{51}& \ding{51}& \ding{51}& \ding{51} & \ding{51} & \ding{51} \\
No validation group annotations? & \ding{51} & \ding{55}& \ding{55} & \ding{55}& \ding{55}& \ding{55} & \ding{51} & \ding{51} \\
Group inference? & \ding{55} & \ding{55}& \ding{55} & \ding{55} & \ding{51} & \ding{51} & \ding{51} & \ding{51}\\
Robust to outliers? & \ding{55} & - & \ding{55} & \ding{55}& \ding{55} & - & - & \ding{51} \\
\bottomrule
\end{tabularx}
}
\end{small}
\end{center}
{\bm{s}}kip -0.15in
\end{table}
\textbf{The challenge of outliers.}
Outliers, e.g., mislabeled samples or corrupted images, are ubiquitous in applications \citep{singh2012outlier}, and outlier detection has long been a topic of inquiry in ML \citep{hodge2004survey,wang2019progress}. Outliers are especially challenging to detect when data has (unknown) minority groups, which could be hard to distinguish from outliers but require the opposite treatment:
Minority groups need to be upweighted while outliers must be discarded. \citet{hashimoto2018fairness} write, ``it is an open question whether it is possible to design algorithms which are both fair to unknown latent groups and robust [to outliers].''
We provide an illustration of a dataset with minority groups and an outlier in Figure {\textnormal{e}}f{fig:motivating}(a). Figure {\textnormal{e}}f{fig:motivating}(b) illustrates the problem with the methods relying on the loss values. Specifically, \citet{liu2021jtt} and \citet{hashimoto2018fairness} upweigh high-loss points, overfitting the outlier. \citet{zhai2021doro} optimize \citet{hashimoto2018fairness}'s objective function after discarding a fraction of points with the largest loss values to account for outliers. They assume that outliers will have higher loss values than the minority group samples, which can easily be violated leading to exclusion of the minority samples, as illustrated in Figure~{\textnormal{e}}f{fig:motivating}.
\textbf{Gradients as data representations.}
Given a model $h_{{\boldsymbol{\theta}}_0}(\cdot)$ and loss function $\ell(\cdot,\cdot)$,
one can consider an alternative representation of the data where each sample is mapped to the gradient with respect to the model parameters of the loss on this sample:
\begin{equation}
\label{eq:gradient}
{\bm{f}}_i = \left.\frac{\partial \ell ({\textnormal{y}}_i, h_{{\boldsymbol{\theta}}}({\textnormal{v}}x_i))}{\partial {\boldsymbol{\theta}}} {\textnormal{i}}ght|_{{\boldsymbol{\theta}} = {\boldsymbol{\theta}}_0}\text{ for }i=1,\ldots,n.
\end{equation}
We refer to \eqref{eq:gradient} as the \tilde{p}h{gradient representation}. Prior works considered gradient representations \citep{mirzasoleiman2020coresets}, as well as loss values \citep{shen2019learning}, for outlier-robust learning. Gradient representations have also found success in novelty detection~\citep{kwon2020novelty}, anomaly detection~\citep{kwon2020backpropagated}, and out-of-distribution inputs detection~\citep{huang2021importance}.
In this work, we show that, unlike loss values, gradient representations are suitable for simultaneously learning group annotations \tilde{p}h{and} detecting outliers. Compared to the original feature space, gradient space simplifies the data structure, making it easier to identify minority groups. Figure {\textnormal{e}}f{fig:motivating}(c) illustrates a failure of feature space clustering. Here the majority group for class $y=0$ is a mixture of three components with one of the components being close to the minority group in the feature space. In the gradient space, for a logistic regression model, representations of misclassified points remain similar to the original features, while the representations of correctly classified points are pushed towards zero. We illustrate the benefits of the gradient representations in Figure {\textnormal{e}}f{fig:motivating}(d) and provide additional details in the subsequent section.
\section{\textsc{GraSP}{}: Gradient Space Partitioning}
In this section, we present our method for group inference and outlier detection, which we refer to as Gradient Space Partitioning (\textsc{GraSP}{}).
We first demonstrate that the gradient space is more suitable for using clustering methods to learn group annotations and identify outliers than the feature space.
We support this claim with an example using a logistic regression model and an empirical study of synthetic and semi-synthetic datasets.
We then present the details of \textsc{GraSP}{} in Sec.~{\textnormal{e}}f{sec:grasp_alg}.
\subsection{Gradient Space vs Feature Space}\label{sec:space}
\textbf{Logistic regression example.}
We present an example based on the logistic regression model to better understand how using the gradient space simplifies the data structure and aids clustering.
Consider a binary classification problem (${\textnormal{y}} \in \{0,1\}$) and logistic regression model ${\mathbb{P}}({\textnormal{y}} = 1|{\textnormal{v}}x) = \sigma({\bm{w}}^\top {\textnormal{v}}x + b)$ trained on the given dataset ${\mathcal{D}}$, where $\sigma(\cdot)$ denotes the sigmoid function, ${\bm{w}}$ are the coefficients and $b$ is the bias. Recall that the logistic regression loss is
\[
\ell({\textnormal{y}},\sigma({\bm{w}}^\top {\textnormal{v}}x + b)) = -{\textnormal{y}} \log(\sigma({\bm{w}}^\top {\textnormal{v}}x + b)) - (1-{\textnormal{y}})\log(1-\sigma({\bm{w}}^\top {\textnormal{v}}x + b)).
\]
The gradient of this loss at point ${\textnormal{v}}z$ w.r.t.\ $({\bm{w}}, b)$ is
\begin{equation}
{\bm{f}} =: \nabla_{[{\bm{w}},b]}\ell({\textnormal{y}},{\bm{w}}^\top {\textnormal{v}}x + b) = (\sigma({\bm{w}}^\top {\textnormal{v}}x + b) - {\textnormal{y}}) \left[\begin{array}{c}{\textnormal{v}}x\\ 1\end{array}{\textnormal{i}}ght].
\label{eq:grad}
\end{equation}
Note that this gradient is simply a scaling of the data vector ${\textnormal{v}}x$ by the error $(\sigma({\bm{w}}^\top {\textnormal{v}}x + b) - {\textnormal{y}}) \in [-1,1]$, padded by an additional element (the bias entry) consisting of the error alone. In particular, note that when ${\textnormal{v}}z$ is correctly classified, the scaling $(\sigma( {\bm{w}}^\top{\textnormal{v}}x + b)-{\textnormal{y}})$ will be close to zero, and when it is incorrectly classified, the magnitude of the scaling will approach 1.
\begin{wrapfigure}[25]{r}{0.3\textwidth}
\centering
{\bm{s}}pace{-.23in}
\includegraphics[width=0.3\textwidth]{iclr2023/figures/cosine.png}
{\bm{s}}pace{-.2in}
\caption{
\textbf{
Normalized representations of the data from Figure~{\textnormal{e}}f{fig:motivating}(a) in (a) feature space and (b) gradient space.
}
The green points are the means (before normalization) of the corresponding representations. Gradient space makes it easier to identify groups and detect outliers via clustering with centered cosine distances.
}
{\bm{s}}pace{-.3in}
\label{fig:cosine}
\end{wrapfigure}
We interpret this gradient (\eqref{eq:grad}) through the lens of Euclidean distance ($\|{\bm{f}}_i - {\bm{f}}_j\|_2$) and centered cosine distance ($1 - \frac{\langle{\bm{f}}_i - \mu_f, {\bm{f}}_j - \mu_f {\textnormal{a}}ngle}{\|{\bm{f}}_i - \mu_f\|_2 \cdot \|{\bm{f}}_j - \mu_f\|_2}$) metrics,\footnote{Here $\mu_f$ refers to the class-conditional empirical mean of ${\bm{f}}$.}
respectively. Recall that we apply clustering to each class independently.
\begin{itemize}
\item \textbf{Euclidean distance.}
The scaling effect mentioned in the previous paragraph shrinks the correctly classified points towards the origin, while leaving the misclassified points almost unaffected. The error itself is included as an extra element (using loss as an additional feature was previously considered as a heuristic in feature clustering for learning group annotations \citep{sohoni2020george}).
Consequently, gradient clustering w.r.t.\ Euclidean distance should cluster the correctly classified samples into one ``majority'' group, and then divide the remaining points into minority groups and outliers based on the size of the error and their position in the feature space. For a visual example, see Figure~{\textnormal{e}}f{fig:motivating}(d).
\item \textbf{Centered cosine distance.}
We compare the (class conditioned; class dependency omitted for simplicity) centering terms in the gradient and feature spaces, $\mu_f$ and $\mu_{x}$ respectively:
\[
\mu_f = \frac{1}{n}\sum_{i:y_i=c} (\sigma( {\bm{w}}^\top{\textnormal{v}}x_i + b)-{\textnormal{y}}_i)\left[\begin{array}{c}{\textnormal{v}}x_i\\ 1\end{array}{\textnormal{i}}ght],
\quad \mu_x = \frac{1}{n}\sum_{i:y_i=c} {\textnormal{v}}x_i.
\]
Due to the underrepresentation of the minority group in the data, the feature space center will be heavily biased towards the majority group which could hinder the clustering as illustrated in Figure {\textnormal{e}}f{fig:cosine}(a).
On the other hand, the expression of $\mu_f$ above implies that gradient space center upweighs high-loss points which are more representative of the minority groups, resulting in a center in-between minority and majority groups.
Thus, centering in the gradient space facilitates learning group annotations via clustering with the cosine distance as illustrated in Figure {\textnormal{e}}f{fig:cosine}(b).
\end{itemize}
\begin{algorithm2e}[t]
\SetKwInOut{Input}{Input}
\SetKwInOut{Output}{Output}
\Input{DBSCAN hyperparameters ${\epsilon}$ and $m$}
Train the ERM classifier ${\boldsymbol{\theta}}_0 \leftarrow \arg\min_{{\boldsymbol{\theta}} \in \Theta'} \sum_{{\textnormal{v}}z \in {\mathcal{D}}} \ell ({\textnormal{y}}, h_{{\boldsymbol{\theta}}}({\bm{x}}))$ \;
\For{${\textnormal{v}}z \in {\mathcal{D}}$}{
Compute its gradient ${\bm{f}} \leftarrow \frac{\partial \ell ({\textnormal{y}}, h_{{\boldsymbol{\theta}}}({\textnormal{v}}x))}{\partial {\boldsymbol{\theta}}} \mid_{{\boldsymbol{\theta}} = {\boldsymbol{\theta}}_0}$\;
}{}
\For{$y \in {\mathcal{Y}}$}{
Consider all samples $\set{{\textnormal{v}}z_i} \subset {\mathcal{D}}$ with ${\textnormal{y}} = y$ and their corresponding gradients $\set{{\bm{f}}_i}$\;
$\mu_f \leftarrow \operatorname{mean}(\set{{\bm{f}}_i})$, compute the distance matrix $D$, where $D_{ij} = 1 - \frac{\langle {\bm{f}}_i -\mu_f, {\bm{f}}_j -\mu_f {\textnormal{a}}ngle}{\norm{{\bm{f}}_i-\mu_f}\cdot\norm{{\bm{f}}_j-\mu_f}}$\;
Assign group annotations and identify outliers by performing DBSCAN clustering in gradient space: $\set{\hat{{\textnormal{g}}}_i} \leftarrow \operatorname{DBSCAN}(D,{\epsilon},m)$, where $\hat{{\textnormal{g}}}_i = -1$ indicates outliers\;
}{}
\Output{Dataset with predicted group annotations ${\mathcal{D}}' \leftarrow \set{({\textnormal{v}}x, \hat{{\textnormal{g}}}, {\textnormal{y}})}_{\set{\hat{{\textnormal{g}}} \neq -1, {\textnormal{v}}z \in {\mathcal{D}}}}$, where the detected outliers are removed}
\caption{\textsc{GraSP}{}}
\label{alg:grass}
\end{algorithm2e}
\textbf{Quantitative comparison.}
We compare the group identification quality of clustering in feature space and gradient space on two datasets consisting of 4 groups. We consider both clean and contaminated versions. The first dataset is Synthetic based on the Figure {\textnormal{e}}f{fig:motivating} illustration. The second dataset is known as Waterbirds \citep{sagawa2019distributionally}. It is a semi-synthetic dataset of images of two types of birds placed on two types of backgrounds. We embed the images with a pre-trained ResNet50 \citep{he2016resnet} model.
To obtain gradient space representations, we trained logistic regression models. See Section~{\textnormal{e}}f{sec:exp} for additional details.
We consider three popular clustering methods: K-means, DBSCAN with Euclidean distance, and DBSCAN with centered cosine distance.
Group annotations quality is evaluated using the Adjusted Rand Index (ARI) \citep{hubert1985ari}, a measure of clustering quality.
Higher ARI indicates higher group annotations quality, and ARI~$=1$ implies the predicted group partition is identical to the true group partition.
The definition of ARI is provided in Appendix~{\textnormal{e}}f{app:background}. We summarize the results in Table~{\textnormal{e}}f{tab:representation}.
Clustering in the gradient space noticeably outperforms clustering in the feature space.
These results provide empirical evidence that gradient space facilitates learning of group annotations via clustering.
Visualization of the feature space and gradient space of the Synthetic and Waterbirds datasets are provided in Appendix~{\textnormal{e}}f{sec:space_vis}.
\begin{table}[t]
\caption{
\textbf{Group identification quality of clustering methods in feature space and gradient space measured by Adjusted Rand Index (ARI). }
Higher ARI indicated higher group identification quality.
The results are reported on clean and contaminated versions of Synthetic and Waterbirds datasets.
Three different clustering methods are considered: K-means, DBSCAN w.r.t.\ {} Euclidean distance (DBSCAN/Euclidean), and DBSCAN w.r.t.\ {} centered cosine distance (DBSCAN/Cos).
We set $k=2$ for K-means, which is the number of groups per class in these datasets.
The gradient space clustering noticeably outperforms feature space clustering.}
\label{tab:representation}
\begin{center}
\begin{small}
\begin{sc}
\tiny{
\begin{tabularx}{\textwidth}{lc|ccc|ccc}
\toprule
\multirow{2}{*}{Dataset} & \multirow{2}{*}{Outliers?} & \multicolumn{3}{c}{Feature Space} & \multicolumn{3}{c}{Gradient Space} \\
& & K-means & DBSCAN/Euclidean & DBSCAN/Cos & K-means & DBSCAN/Euclidean & DBSCAN/Cos \\ \midrule
\multirow{2}{*}{Synthetic} & \ding{55} & .5505 & .5923 & .5133 & \textbf{.8409} & .7724 & .6943 \\
& \ding{51} & .3631 & .6042 & .4946 & .6436 & \textbf{.7237} & .6944\\ \midrule
\multirow{2}{*}{Waterbirds} & \ding{55} & .3932 & .0000 & .0418 & .7235 & .7304 & \textbf{.7453} \\
& \ding{51} & .3932 & .0000 & .0418 & .7171 & .7304 & \textbf{.7453} \\
\bottomrule
\end{tabularx} }
\end{sc}
\end{small}
\end{center}
\end{table}
\subsection{\textsc{GraSP}{} for Group Inference and Outlier Identification}\label{sec:grasp_alg}
Having motivated our choice of performing clustering in the gradient space, we now present \textsc{GraSP}{} in detail.
We then describe how to train a distributionally and outlier robust model using \textsc{GraSP}{}.
\textbf{Clustering method and distnace measure.} Results in Table {\textnormal{e}}f{tab:representation} indicate that both K-means and DBSCAN perform well in the gradient space. DBSCAN is a density-based clustering algorithm, where clusters are defined as areas of higher density, while the rest of the data is considered outliers. In this work, we choose to use DBSCAN for its ability to identify outliers, which is an important aspect of the problem we consider. As an additional benefit, unlike K-means, it does not require knowledge of the number of groups. See Appendix {\textnormal{e}}f{app:background} for a detailed description of DBSCAN.
In terms of distance measure, we recommend cosine distance due to its better performance on the Waterbirds data, which closer resembles real data. We note that the distance and clustering method choices could be reconsidered depending on the application. For example, for Gaussian-like data without outliers, K-means performed better in Table {\textnormal{e}}f{tab:representation}.
\textbf{\textsc{GraSP}{}.} We present the pseudocode of \textsc{GraSP}{} in Algorithm~{\textnormal{e}}f{alg:grass}.
We first train an ERM classifier $h_{{\boldsymbol{\theta}}}(\cdot)$ and collect the gradients of sample's loss w.r.t.\ ~model parameters ${\boldsymbol{\theta}}$.
We then compute the pairwise centered cosine distances within each class $y\in{\mathcal{Y}}$ using gradient representations, as discussed in Sec.~{\textnormal{e}}f{sec:space}.
Lastly, to estimate the group annotations and identify outliers, we apply DBSCAN on these distance matrices for each class $y\in {\mathcal{Y}}$.
\textbf{Training models with improved worst-group performance in the presence of outliers using \textsc{GraSP}{}.}
We discard the identified outliers and then provide learned group annotations as inputs to a Group-aware method of choice. For concreteness, in this work, we will use \eqref{opt:gdro}. Specifically, we employ
the method of \citet{sagawa2019distributionally} to solve the gDRO problem, which is a stochastic optimization algorithm with convergence guarantees. We note that other choices could be appropriate. For example, methods accounting for noise in group annotations~\citep{lamy2019noise,mozannar2020fair,celis2021fair}
are interesting to consider as they could counteract mistakes in \textsc{GraSP}{} annotations.
\paragraph{Remark.} We note that
the model $h_\theta$ and parameter space
$\Theta$ used for computing gradient representations ${\bm{f}}$ and learning group annotations with \textsc{GraSP}{} can be different from the classifier and parameter space used for the final model training.
For example, one can train a logistic regression model (using features from a pre-trained model when appropriate) and collect the corresponding gradients for \textsc{GraSP}{}, and then train a deep neural network of choice with the estimated group annotations.
\section{Experiments}\label{sec:exp}
In this section, we conduct extensive experiments on both synthetic and benchmark datasets to evaluate the performance of \textsc{GraSP}{}.
\footnote{Our code is available in Github repository~\url{https://github.com/yzeng58/private_demographics}.}
Our results show that \textsc{GraSP}{} outperforms the state-of-the-art baselines in terms of group identification quality and downstream worst-group performance while providing robustness to outliers.
\subsection{Datasets and baselines}
\textbf{Synthetic.} We generate a synthetic dataset of 1,000 samples with two features ${\textnormal{v}}x\in{\mathbb{R}}^2$, a group attribute ${\textnormal{g}}\in[4]$, and a binary label ${\textnormal{y}} \in \set{0,1}$, similar to the motivating example of Figure~{\textnormal{e}}f{fig:motivating}.
\textbf{(Clean):} The synthetic dataset consists of 10 Gaussian clusters with a variance of 0.01, and each Gaussian cluster contains 100 samples.
Class 0 is divided into two groups: group 3 consists of four Gaussian clusters with centers $(1,5),(1,3),(1,2),(1,1)$; group 2 consists of one Gaussian cluster with center $(0,4)$.
Similarly, Class 1 is divided into two groups: group 1 consists of four Gaussian clusters with centers $(0,5),(0,3),(0,2),(0,1)$; group 2 consists of one Gaussian cluster with center $(1,4)$.
\textbf{(Contaminated):} We contaminate the synthetic dataset by flipping randomly selected 5\% of labels.
The contaminated synthetic dataset is visualized in Appendix Figure~{\textnormal{e}}f{fig:syn_outlier_vis}.
\textbf{Waterbirds.} \textbf{(Clean):} Waterbirds \citep{sagawa2019distributionally,wah2011cub} is a semi-synthetic image dataset of land birds and water birds \citep{wah2011cub} placed on either land or water backgrounds using images from the Places dataset \citep{zhou2017places}. There are 11,788 images of birds on their typical (majority) and atypical (minority) backgrounds.
The task is to predict the types of birds and the background type is the group (2 background types per class, a total of 4 groups).
We follow an identical procedure to \citet{idrissi2022balancing} to pre-process the dataset.
\textbf{(Contaminated):} We contaminate the Waterbirds dataset by introducing outliers in the training and validation datasets.
We flip the class labels of 2\% of the data, transform 1\% of the images with Gaussian blurring, color dither (randomly change the brightness, saturation, and contrast of the images) 1\% of the images, and posterize 1\% of the images maintaining 4 bits per color channel.
We visualize a contaminated example in Appendix Figure~{\textnormal{e}}f{fig:waterbirds_process}.
\textbf{COMPAS \& CivilComments.} Both datasets are real and collected by humans, therefore likely to contain outliers.
\textbf{(COMPAS):} COMPAS \citep{compas} is a recidivism risk score prediction dataset consisting of 7,214 samples.
Each class $y\in[0,1]$ is divided into six groups: Caucasian males, Caucasian females, African-American males, African-American females, males of other races, and females of other races, making 12 groups in total.
\textbf{(CivilComments):} CivilComments \citep{dixon2018measuring,koh2021wilds} is a language dataset containing online forum comments.
The task is to predict whether comments are toxic or not.
We follow a procedure identical to \citet{idrissi2022balancing} to preprocess the dataset.
We divide comments in each class into two groups according to the presence or absence of identity terms pertaining to protected groups (LGBT, Black, White, Christian, Muslim, other religion).
\textbf{Experimental baselines.} We compare \textsc{GraSP}{} to four different types of baselines: (1) standard empirical risk minimization (ERM), (2) a group-aware method (gDRO~\citep{sagawa2019distributionally}), (3) a group-oblivious method (DORO, CVaR-DORO variation~\citep{zhai2021doro}), and (4) two group-learning methods (EIIL~\citep{creager2021eiil}, George~\citep{sohoni2020george}). We chose DORO among the methods relying on loss values to improve worst-group performance because it is the only method from this group designed to be robust to outliers. Recall that only the group-aware method (gDRO) has access to the true group annotations, thus it should be interpreted as an ``oracle'' baseline.
We also perform an ablation study by considering an additional group-learning baseline, Feature Space Partitioning (FeaSP). It is identical to GraSP except it performs DBSCAN clustering in the feature space. Comparison to FeaSP emphasizes the importance of clustering in the gradient space as opposed to other choices such as the clustering method and distance measure.
\begin{table}[t]
\caption{
\textbf{Group identification performance of group-learning methods measured by Adjusted Rand Index (ARI). }
Higher ARI indicated higher group identification quality.
The results are reported on clean and contaminated versions of Synthetic and Waterbirds datasets, COMPAS and CivilComments datasets.
\textsc{GraSP}{} significantly outperforms the other group-learning baselines on all the tested datasets.
Moreover, we observe that \textsc{GraSP}{} is robust to outliers.
}
\label{tab:ari}
\begin{center}
\begin{small}
\begin{sc}
\begin{tabularx}{.9\textwidth}{lcccccc}
\toprule
& \multicolumn{2}{c}{Synthetic} & \multicolumn{2}{c}{Waterbirds} & \multirow{2}{*}{COMPAS} & \multirow{2}{*}{CivilComments}\\
Outliers? & \ding{55} & \ding{51} & \ding{55} & \ding{51} & & \\
Method & ARI & ARI & ARI& ARI& ARI& ARI \\
\midrule
EIIL & -.0069& -.0043 & .0114 & .0078 & -.0025 & -.0001 \\
George&.6027& .4565 & .2832 & .2600 & .1962 & .1422 \\
FeaSP & .5133 & .4946 & .0418 & .0418 & .2956 & .2093 \\
\textsc{GraSP}{} (Ours) & \textbf{.6943} & \textbf{.6944} & \textbf{.7453} & \textbf{.7453} & \textbf{.5453} & \textbf{.2639}\\
\bottomrule
\end{tabularx}
\end{sc}
\end{small}
\end{center}
{\bm{s}}kip -0.15in
\end{table}
\subsection{Evaluation of \textsc{GraSP}{}}
In this section, we assess the performance of \textsc{GraSP}{} in terms of group identification and downstream tasks of training models with comparable performance across groups, both with and without outliers.
In all experiments, we consider true group annotations unknown in both train and validation data (except for ``oracle'' gDRO which has access to true group annotations in both train and validation data). Arguably, this setting is more practical due to group annotations often being expensive or infeasible to obtain, even for a smaller validation set. We note that this setting differs from the majority of prior works considering unknown group annotations (see Table {\textnormal{e}}f{tab:baseline}). For example, inspecting Table~5 in Appendix~B.2 of \citet{zhai2021doro}, we notice that their DORO is unable to improve upon ERM without access to validation data with true group annotations (see results for non-oracle model selection). We report results with known validation group annotations in Appendix~{\textnormal{e}}f{sec:w_val}.
\textbf{Group annotations quality.}
The first experiment examines the quality of group annotations learned with \textsc{GraSP}{}.
To collect the gradients of the data's losses w.r.t.\ {} the model parameters, we train a logistic regression model on the Synthetic dataset, a three-layer ReLU neural network with 50 hidden neurons per layer on the COMPAS dataset, and a BERT~\citep{devlin2018bert} model on the CivilComments dataset (due to the large number of parameters in BERT, we only consider the last transformer and the subsequent prediction layer when extracting gradients).
For the Waterbirds dataset, we first featurize the images using a ResNet50 pre-trained on ImageNet \citep{deng2009imagenet}, and then train a logistic regression.
We then use DBSCAN clustering with centered cosine distance. We select DBSCAN hyperparameters using standard clustering metrics that do not require knowledge of the true group annotations, see Appendix~{\textnormal{e}}f{sec:hyperparameter}.
In Table~{\textnormal{e}}f{tab:ari}, we compare group identification quality of \textsc{GraSP}{} (measured with ARI) to three group learning baselines, EIIL, George, and FeaSP, across four datasets. There are two key observations supporting the claims made in this paper: (i) clustering in the gradient space (\textsc{GraSP}{}) outperforms clustering in the feature space (FeaSP and George), as well as other baselines (EIIL); (ii) \textsc{GraSP}{} is robust to outliers, i.e. it performs equally well in the presence and absence of outliers. To comment on the low ARI of EIIL, we note that the Invariant Risk criteria EIIL optimizes was designed primarily for invariant learning (i.e., learning environment labels) \citep{arjovsky2019invariant,creager2021eiil}, which may not be suitable for learning group annotations.
\textbf{Worst-group performance.} This is the standard metric when comparing methods for training ML models with comparable performance across groups (evaluated w.r.t.\ true group annotations) \citep{sagawa2019distributionally,koh2021wilds}. For the group-learning methods (\textsc{GraSP}{}, FeaSP, George, EIIL), we first discard identified outliers if applicable (\textsc{GraSP}{} and FeaSP), and then train gDRO with the corresponding learned group annotations. We also use the learned group annotations on the validation data to select the corresponding gDRO hyperparameters. In Appendix~{\textnormal{e}}f{sec:robust_cluster} we demonstrate that \textsc{GraSP}{} worst-group performance is fairly robust to the corresponding DBSCAN hyperparameters. For ERM and DORO we used the validation set overall performance for hyperparameter selection.
For all methods, on a given dataset, we train models with the same architecture and initialization. Recall that these models can be different from the models used in estimating group annotations with any of the group-learning methods. See Appendix~{\textnormal{e}}f{sec:hyperparameter} for details.
We summarize results in Table~{\textnormal{e}}f{tab:worst}. \textsc{GraSP}{} outperforms baselines on Synthetic, COMPAS, and CivilComments datasets. For the Waterbirds dataset, \textsc{GraSP}{} also performs relatively well. Interestingly, EIIL performs best on the contaminated Waterbirds dataset, despite the poor ARI discussed earlier. It is, however, failing on the COMPAS dataset. We also notice that \textsc{GraSP}{} outperforms ``oracle'' gDRO on Synthetic and COMPAS datasets. This could be due to the fact that gradient space clustering helps to focus on ``harder'' instances, as discussed in Section~{\textnormal{e}}f{sec:space}, while the available (``oracle'') group annotations (at least on COMPAS), might be noisy.
\begin{table}[t]
\caption{
\textbf{Downstream worst-group accuracy and average accuracy on the test data.}
The average test accuracy is a re-weighted average of the group-specific accuracies, where the weights are based on the training distribution.
The results are reported on clean and contaminated versions of Synthetic and Waterbirds datasets, COMPAS and CivilComments datasets.
We observe that \textsc{GraSP}{} significantly outperforms the group-oblivious (DORO) and other group-learning approaches (EIIL, George, FeaSP) methods on Synthetic, COMPAS, and CivilComments datasets, and performs relatively well on Waterbirds datasets, while being robust to outliers.
}
\label{tab:worst}
\begin{center}
\begin{small}
\tiny{
\begin{sc}
\setlength{\tabcolsep}{8pt}
\begin{tabularx}{\textwidth}{lcccccc}
\toprule
& \multicolumn{2}{c}{Synthetic} & \multicolumn{2}{c}{Waterbirds} & \multirow{2}{*}{COMPAS} & \multirow{2}{*}{CivilComments}\\
Outliers? & \ding{55} & \ding{51} & \ding{55} & \ding{51} & & \\
Method & Worst.(Avg.) & Worst.(Avg.) & Worst.(Avg.)& Worst.(Avg.)& Worst.(Avg.)& Worst.(Avg.) \\
\midrule
ERM & .6667(.8823) & .5333(.8273) & .6075(.9673) & .5249(.9621) & .4706(.6792) & .4659(.9213) \\ \midrule
DORO & .6667(.8823) & .6000(.8342) & .5888(.9694) & .6636(.9686) & .4706(.6801) & .4905(.9182)\\
EIIL & .6667(.8783) & .6000 (.8115) & .6916 (.9645) & \textbf{.7056(.9629)} & .0588 (.6046) & .6056(.9066) \\
George &.5333(.8732) & .6000(.8342) & .\textbf{7523}(.9612) & .5897(.9100) & .4416(.6232) & .5897(.9100)\\
FeaSP & .6667(.8823) & .6667(.8823)& .1417(.9346) & .1417(.9346) & .4416(.6232) & .6056(.9066) \\
\textsc{GraSP}{} (Ours) & \textbf{.8000}(.8926)& \textbf{.8000}(.8926) & .6854(.9654) & .6798(.9004) & \textbf{.4743}(.6717) & \textbf{.6798(.9004)} \\ \midrule
gDRO (oracle) & .7333(.8639) & .8000(.8755) & .8665(.9272) & .8545(.9081) & .4625(.6807) & .6941(.8767) \\
\bottomrule
\end{tabularx}
\end{sc}
}
\end{small}
\end{center}
{\bm{s}}kip -0.15in
\end{table}
\section{Conclusion}
In this work, we considered the problem of learning group annotations in the presence of outliers. Our method allows training models with comparable performance across groups to alleviate spurious correlations and accommodate subpopulation shifts when group annotations are not available and need to be estimated from data. We accomplished this by leveraging existing outlier-robust clustering approaches to estimate the group (and outlier) memberships of each point in the dataset. Key to our proposed approach is performing the clustering in the \tilde{p}h{gradient space}, where the gradient is of the loss at each point with respect to model parameters. We provided strong intuitive and empirical justifications for using the gradient space over the feature space.
Finally, we provided a variety of synthetic and real-world experiments where \textsc{GraSP}{} consistently outperformed or nearly matched the performance of all comparable baselines in terms of both learned group annotations quality and downstream worst-group performance.
One advantage of the gradient space is the simplification of the structure of the correctly classified points (often the majority group), which is also a limitation if identifying \tilde{p}h{subgroups} within the majority group is of interest. This does not affect the downstream worst-group performance, but may be undesirable from the exploratory data analysis perspective.
As a next step, when training models with \textsc{GraSP}{} group annotations, it would be interesting to consider alternatives to gDRO that are accounting for noise in group annotations \citep{lamy2019noise,mozannar2020fair,celis2021fair} to counteract \textsc{GraSP}{} estimation error. Alternatively, one can consider training with group-oblivious methods such as DORO \citep{zhai2021doro} and performing model selection on the validation data with \textsc{GraSP}{} group annotations.
\section*{Acknowledgments}
We thank Luann Jung for conducting a preliminary study of \textsc{GraSP}{} for her Master's thesis.
Kangwook Lee was supported by NSF/Intel Partnership on Machine Learning for Wireless Networking Program under Grant No. CNS-2003129 and by the Understanding and Reducing Inequalities Initiative of the University of Wisconsin-Madison, Office of the Vice Chancellor for Research and Graduate Education with funding from the Wisconsin Alumni Research Foundation.
The MIT Geometric Data Processing group acknowledges the generous support of Army Research Office grants W911NF2010168 and W911NF2110293, of Air Force Office of Scientific Research award FA9550-19-1-031, of National Science Foundation grants IIS-1838071 and CHS-1955697, from the CSAIL Systems that Learn program, from the MIT--IBM Watson AI Laboratory, from the Toyota--CSAIL Joint Research Center, and from a gift from Adobe Systems.
\appendix
{\bm{s}}pace{-.4in}
\section{Background}\label{app:background}
In this section, we provide the details of Adjusted Rand Index (ARI), and describe the complete algorithm of DBSCAN.
\paragraph{Adjusted Rand Index (ARI)}~\citep{hubert1985ari}
The Adjusted Rand Index (ARI) is a measure of the degree of agreement between two data partitions and accounts for the chance grouping of elements in the data sets.
In our case, consider true group partition $P$ and estimated group partition $\hat{P}$.
ARI can be computed by
\begin{equation}
ARI(P,\hat{P}) = \frac{\sum_{k, k'}\left(\begin{array}{c}
n_{k k'} \\
2
\end{array}{\textnormal{i}}ght)-\left[\sum_k\left(\begin{array}{c}
n_k \\
2
\end{array}{\textnormal{i}}ght) \sum_k'\left(\begin{array}{c}
n_{k'} \\
2
\end{array}{\textnormal{i}}ght){\textnormal{i}}ght] /\left(\begin{array}{l}
n \\
2
\end{array}{\textnormal{i}}ght)}{\frac{1}{2}\left[\sum_k\left(\begin{array}{c}
n_k \\
2
\end{array}{\textnormal{i}}ght)+\sum_k'\left(\begin{array}{c}
n_{k'} \\
2
\end{array}{\textnormal{i}}ght){\textnormal{i}}ght]-\left[\sum_k\left(\begin{array}{c}
n_k \\
2
\end{array}{\textnormal{i}}ght) \sum_k'\left(\begin{array}{c}
n_k' \\
2
\end{array}{\textnormal{i}}ght){\textnormal{i}}ght] /\left(\begin{array}{c}
n \\
2
\end{array}{\textnormal{i}}ght)},
\end{equation}
where $n_{kk'}$ is the number of data points belonging to ${\mathcal{G}}_k \in P$ assigned to group $\hat{{\mathcal{G}}}_{k'} \in \hat{P}$, $n_k = |{\mathcal{G}}_k|$, $n_{k'} = |{\mathcal{G}}_{k'}|$, and $n$ is the total number of samples in the dataset.
\paragraph{DBSCAN}~\citep{ester1996dbscan}
DBSCAN is a clustering and outlier-detecting method that does not require the number of clusters to be known.
It operates on a distance matrix $D$.
We call a sample as a "core sample" if there exist $m$ other samples within a distance of ${\epsilon}$ from this sample.
DBSCAN starts with a single cluster that contains an arbitrary core sample and adds core samples from the neighborhood of the cluster to the cluster until all core samples in the ${\epsilon}$-neighborhood of the cluster have been visited.
It then adds the remaining samples in the ${\epsilon}$-neighborhood of the cluster to the cluster.
Next, DBSCAN creates another cluster and expands that cluster by finding unvisited core samples.
It then repeats this process of creating and expanding clusters until all core samples have been visited.
Any remaining samples that are not added to a cluster are considered outliers.
Note that DBSCAN clustering requires two hyperparameters (${\epsilon}, m$) and a distance matrix $D$ as input.
\section{Visualization of Gradient Space and Feature Space}\label{sec:space_vis}
In this section, we visualize the gradient space and feature space of contaminated Synthetic (see Fig.~{\textnormal{e}}f{fig:grad_syn}) and Waterbirds dataset (see Fig.~{\textnormal{e}}f{fig:grad_waterbirds}).
\begin{figure}
\caption{Visualization of input features and 2D t-SNE results of gradients with $y=0$ on contaminated Synthetic dataset. (i) Input features. (ii) 2D t-SNE results of gradients.}
\label{fig:grad_syn}
\caption{3D t-SNE visualization of features and gradients with $y=1$ on contaminated Waterbirds dataset. Left: 3D t-SNE visualization of features extracted from ResNet-50 pretrained on ImageNet~\citep{deng2009imagenet}
\label{fig:grad_waterbirds}
\end{figure}
\section{Experiment}
\begin{figure}
\caption{(a) Scatter plot of contaminated Synthetic dataset.
(b) Original image of \texttt{010.Red\_winged\_Blackbird/Red\_Winged\_Blackbird\_0079\_4527.jpg}
\label{fig:syn_outlier_vis}
\label{fig:waterbirds_original}
\label{fig:waterbirds_process}
\end{figure}
\subsection{More Details of Experiment Setup}\label{sec:hyperparameter}
\begin{figure}
\caption{Group identification quality of \textsc{GraSP}
\label{fig:waterbirds_heatmap_ari_0}
\end{figure}
\begin{figure}
\caption{Group identification quality of \textsc{GraSP}
\label{fig:waterbirds_heatmap_ari_1}
\end{figure}
\paragraph{Datasets}
The batch size of Synthetic, Waterbirds, COMPAS, and CivilComments datasets are 128, 128, 128, and 32 for both group inference and downstream DRO tasks.
We split the Synthetic and COMPAS datasets into training, validation, and test datasets at the ratio of 0.6:0.2:0.2.
We follow an identical procedure to \citet{idrissi2022balancing} to pre-process the Waterbirds and Civilcomments dataset.
Fig.~{\textnormal{e}}f{fig:syn_outlier_vis} visualizes the contaminated Synthetic dataset.
We provide an example of a contaminated sample in Fig.~{\textnormal{e}}f{fig:waterbirds_original} and Fig.~{\textnormal{e}}f{fig:waterbirds_process}, which present an image before and after Gaussian blurring.
\paragraph{Group annotations quality.}
To collect the gradients of the corresponding datum's loss w.r.t.\ the model parameters, we train a logistic regression model on 50 epochs on the Synthetic dataset, a three-layer ReLU neural network with 50 hidden neurons for
300 epochs on the COMPAS dataset, and a BERT~\citep{devlin2018bert} model for 10 epochs on the
CivilComments dataset. For the Waterbirds dataset, we first featurize the images using a ResNet50
pre-trained on ImageNet~\citep{deng2009imagenet}, and then train a logistic regression for 360 epochs
We tune the DBSCAN clustering hyperparameters ${\epsilon}\in\set{.1,.2,.3,.5,.7}, m\in\set{10,20,30,50,70,100}$ for each $y\in {\mathcal{Y}}$, for both FeaSP and \textsc{GraSP}{}.
We tune the learning rate of EIIL in $\set{10^{-1}, 10^{-2}, 10^{-3}, 10^{-4}}$, run EIIL for 50 epochs on Synthetic, Waterbirds, and COMPAS datasets, three epochs on Civilcomments dataset.
We tune the overcluster factor of George in $\set{1,2,5,10}$, and employ the over-cluster Gaussian Mixture Model clustering for George.
Lastly, we select the best EIIL epoch and other hyperparameters based on \tilde{p}h{Silhouette Coefficient}, a measure assessing the clustering quality in terms of the degree to which a sample clusters with other similar samples.
\paragraph{Worst-group performance.}
We use Adam optimizer for all trainings.
We tune outlier fraction ${\epsilon} \in \set{.005,.01,.02,.1,.2}$ and minimal group fraction $\in \set{.1,.2,.5}$ for (CvAR-)DORO on all datasets.
We tune the learning rate $\in \set{10^{-5}, 10^{-4}, 10^{-3}}$ and weight decay $\in \set{10^{-4}, 10^{-3}, 10^{-2}}$ for all methods.
We select the step size of the group weights $q$ in gDRO~\citep{sagawa2019distributionally} $\in \set{.001, .01, .1}$.
We train a three-layer ReLU neural network with 50 hidden neurons per layer for 50 and 300 epochs on the Synthetic and COMPAS datasets, respectively.
We train a logistic regression model with 360 epochs on the Waterbirds dataset, and a BERT model for 10 epochs on the Civilcomments dataset.
\begin{figure}
\caption{Worst-group accuracy of \textsc{GraSP}
\label{fig:waterbirds_heatmap_acc}
\end{figure}
\begin{figure}
\caption{Worst-group accuracy of \textsc{GraSP}
\label{fig:compas_heatmap_acc}
\end{figure}
\subsection{Robustness to DBSCAN Clustering Hyperparameters}\label{sec:robust_cluster}
In this experiment, we investigate the effect of clustering hyperparameters on group inference and downstream DRO task performances.
In doing so, we let ${\epsilon} \in \set{.1,.15,.2,.25,.3,.35,.4,.45,.5,.55,.6,.65,.7}$ and $m \in \set{5,10,20,30,40,50,60,100}$ and visualize how ARI varies with different choice of clustering hyperparameters on different classes of Waterbirds dataset in Fig.~{\textnormal{e}}f{fig:waterbirds_heatmap_ari_0} and Fig.~{\textnormal{e}}f{fig:waterbirds_heatmap_ari_1}.
We observe that the group identification performance is robust to clustering hyperparameters.
For worst-group performance, we set the ${\epsilon}$ and $m$ to be the same for different classes on the datasets.
We visualize how it varies with clustering hyperparameters on Waterbirds and COMPAS dataset in Fig.~{\textnormal{e}}f{fig:waterbirds_heatmap_acc} and Fig.~{\textnormal{e}}f{fig:compas_heatmap_acc}.
A similar phenomenon is observed for worst-group performance — we find that worst-group performance is fairly robust to DBSCAN clustering hyperparameters.
\subsection{Results with Known Validation Group Annotations}\label{sec:w_val}
\begin{table}[h]
\caption{
\textbf{Group identification performance of Group-learning methods measured by Adjusted Rand Index (ARI) when validation group annotations are available. }
Higher ARI indicated higher group identification quality.
The results are reported on clean and contaminated versions of Synthetic and Waterbirds datasets, COMPAS and Civilcomments datasets.
Our \textsc{GraSP}{} significantly outperforms the other Group-learning baselines on all the tested datasets.
Moreover, we observe that \textsc{GraSP}{} is robust to outliers.
}
\label{tab:ari_val}
\begin{center}
\begin{small}
\begin{sc}
\begin{tabularx}{.9\textwidth}{lcccccc}
\toprule
& \multicolumn{2}{c}{Synthetic} & \multicolumn{2}{c}{Waterbirds} & \multirow{2}{*}{COMPAS} & \multirow{2}{*}{Civilcomments}\\
Outliers? & \ding{55} & \ding{51} & \ding{55} & \ding{51} & & \\
Method & ARI & ARI & ARI& ARI& ARI& ARI \\
\midrule
EIIL & -.0069& -.0031 & .0114 & .0078 & -.0025 & -.0001 \\
George &.6027& .4565 & .3223 & .3822 & .2059 & .2218 \\
FeaSP & .5189 & .5276 & .5189 & .1069 & .2956 & .2072 \\
\textsc{GraSP}{} (Ours) & \textbf{.7497} & \textbf{.7241} & \textbf{.8137} & \textbf{.7531} & \textbf{.5453} & \textbf{.2863}\\
\bottomrule
\end{tabularx}
\end{sc}
\end{small}
\end{center}
\end{table}
\begin{table}[h]
\caption{
\textbf{Downstream DRO performance of various methods measured by worst-group accuracy and average accuracy on the test dataset when validation group annotations are available. }
The average test accuracy is a re-weighted average of the group-specific accuracies, where the weights are based on the training distribution.
The results are reported on clean and contaminated version of Synthetic and Waterbirds datasets, COMPAS, and Civilcomments datasets.
We observe that \textsc{GraSP}{} significantly outperforms the Group-oblivous (DORO) and Group-learning approaches (EIIL, George, FeaSP) methods on Synthetic, COMPAS, and Civilcomments datasets, and performs relatively well on Waterbirds datasets, while being robust to outliers.
Note that \textsc{GraSP}{} sometimes outperforms gDRO (oracle), which can get access to the true group annotations.
This is because \textsc{GraSP}{} may focus on ``harder'' instances more, which potentially affect the results most.
}
\label{tab:worst_val}
\begin{center}
\begin{small}
\tiny{
\begin{sc}
\setlength{\tabcolsep}{8pt}
\begin{tabularx}{\textwidth}{lcccccc}
\toprule
& \multicolumn{2}{c}{Synthetic} & \multicolumn{2}{c}{Waterbirds} & \multirow{2}{*}{COMPAS} & \multirow{2}{*}{Civilcomments}\\
Outliers? & \ding{55} & \ding{51} & \ding{55} & \ding{51} & & \\
Method & Worst.(Avg.) & Worst.(Avg.) & Worst.(Avg.)& Worst.(Avg.)& Worst.(Avg.)& Worst.(Avg.) \\
\midrule
ERM & .6667(.8823) & .5333(.8273) & .6075(.9673) & .5249(.9621) & .4706(.6792) & .4659(.9213) \\ \midrule
DORO & .6667(.8823) & .7333(.8332) & .6604(.9669) & .6056(.9066) & .4387(.6696) & .6056(.9066)\\
EIIL & .6667(.8783) & .7333 (.8170) & .6927 (.9649) & \textbf{.7056(.9629)} & .0588 (.6046) & .6056(.9066) \\
George &.6667(.8823) & .7333(.8227) & .\textbf{8053}(.9511) & .6056(.9066) & .4664(.6219) & .6056(.9056)\\
FeaSP & .6667(.8823) & \textbf{.8000(.8391)}& .1417(.9346) & .1417(.9346) & .4545(.6386) & .6056(.9066) \\
\textsc{GraSP}{} (Ours) & \textbf{.8000}(.8926)& \textbf{.8000}(.8372) & .7274(.9541) & .6804(.8999) & \textbf{.4743}(.6681) & \textbf{.6804(.8999)} \\ \midrule
gDRO (oracle) & .7333(.8639) & .8000(.8755) & .8665(.9272) & .8545(.9081) & .4625(.6807) & .6941(.8767) \\
\bottomrule
\end{tabularx}
\end{sc}
}
\end{small}
\end{center}
\end{table}
Lastly, we report the experiment results on group inference and downstream DRO tasks where the hyperparameters of Group-oblivious (DORO) and Group-learning (EIIL, George, FeaSP, \textsc{GraSP}{}) are selected based on the corresponding metric computed with true validation group annotations.
The same phenomenon we observe under the setting of unavailable validation group annotations also holds under this setting.
Table~{\textnormal{e}}f{tab:ari_val} shows that \textsc{GraSP}{} also significantly outperforms the other Group-learning methods in terms of group learning when true validation group annotations are available.
For downstream DRO tasks, while the worst-group accuracy of all Group-oblivious and Group-Learning methods are improved, \textsc{GraSP}{} still achieves the highest worst-group performance on Synthetic, COMPAS, and Civilcomments datasets and performs relatively well on Waterbirds dataset.
\end{document} |
\begin{document}
\title{Entanglement detection with imprecise measurements}
\author{Simon Morelli}\thanks{S.M. and H.Y. contributed equally to this manuscript.}
\author{Hayata Yamasaki}\thanks{S.M. and H.Y. contributed equally to this manuscript.}
\author{Marcus Huber}
\author{Armin Tavakoli}
\affiliation{Institute for Quantum Optics and Quantum Information -- IQOQI Vienna, Austrian Academy of Sciences, Boltzmanngasse 3, 1090 Vienna, Austria}
\affiliation{Atominstitut, Technische Universit{\"a}t Wien, Stadionallee 2, 1020 Vienna, Austria}
\begin{abstract}
We investigate entanglement detection when the local measurements only nearly correspond to those intended. This corresponds to a scenario in which measurement devices are not perfectly controlled, but nevertheless operate with bounded inaccuracy. We formalise this through an operational notion of inaccuracy that can be estimated directly in the lab. To demonstrate the relevance of this approach, we show that small magnitudes of inaccuracy can significantly compromise several well-known entanglement witnesses. For two arbitrary-dimensional systems, we show how to compute tight corrections to a family of standard entanglement witnesses due to any given level of measurement inaccuracy. We also develop semidefinite programming methods to bound correlations in these scenarios.
\end{abstract}
\maketitle
\textit{Introduction.---} Deciding whether an initially unknown state is entangled is one of the central challenges of quantum information science \cite{Guhne2009, Horodecki2009, Friis2019}. The most common approach is the method of entanglement witnesses, in which one hypothesises that the state is close to a known target and then finds suitable local measurements that can reveal its entanglement \cite{Horodecki1996, Terhal1999, Lewenstein2000}. In principle, this allows for the detection of every entangled state. However, it crucially requires the experimenter to flawlessly perform the stipulated quantum measurements. This is an idealisation to which one may only aspire: even for the simplest system of two qubits, small alignment errors can cause false positives \cite{Seevinck2007, Rosset2012}. In contrast, by adopting a device-independent approach, any concerns about the modelling of the measurement devices can be dispelled. This entails viewing them as quantum black boxes and detecting entanglement through the violation of a Bell inequality \cite{Bancal2011, Moroder2013}. However, Bell experiments are practically demanding \cite{Brunner2014}. Also, many entangled states either cannot, or are not known to, violate any Bell inequality \cite{Werner1989, Augusiak2014}. In addition, for the common purpose of verifying that a non-malicious entanglement source operates as intended, a device-independent approach is to use a sledgehammer to crack a nut. In the interest of a compromise, entanglement detection has also been investigated in steering scenarios, in which some devices are assumed to be perfectly controlled and others are quantum black boxes \cite{Wiseman2007}. Nevertheless, such asymmetry is often not present in non-malicious scenarios, and the approach still suffers from drawbacks similar to both the device-independent case, albeit it milder, and the standard, fully controlled, scenario. A much less explored compromise route is to only assume knowledge of the Hilbert space dimension \cite{Moroder2012, Tavakoli2018}. This essentially adopts the view that the experimenter has no control over the relevant degrees of freedom. Such ideas have also been used to strengthen steering-based entanglement detection \cite{Moroder2016}.
Here, we introduce an approach to entanglement detection that neither assumes flawless control of the measurements nor views them as mostly uncontrolled operations. The main idea is that an experimenter can quantitatively estimate the accuracy of their measurement devices and then base entanglement detection on this benchmark. Such knowledge naturally requires a fixed Hilbert space dimension: the experimenter knows the degrees of freedom on which they operate.
To quantify the inaccuracy between the intended target measurement and the lab measurement, we use a simple fidelity-based notion that can handily be measured experimentally.
In what follows, we first establish the relevance of small inaccuracies by showcasing that the conclusions of well-known entanglement witnesses can be substantially compromised. We show that the magnitude of detrimental influence associated to a small inaccuracy does not have to decrease for higher-dimensional systems. This is important because higher-dimensional entangled systems are increasingly interesting for experiments \cite{Dada2011, Erhard2020, Ecker2019, Herrera2020, Hu2020} but typically cannot be controlled as precisely as qubits. Secondly, we develop entanglement criteria that explicitly take the degree of inaccuracy into account. For two-qubit scenarios, we provide this based on the simplest entanglement witness and the Clauser-Horne-Shimony-Holt (CHSH) quantity. For a pair of systems of any given local dimension, we show that such criteria can be analytically established as corrections to a simple family of standard entanglement witnesses.
Finally, we present semidefinite programming (SDP) relaxations for bounding the set of quantum correlations under measurement inaccuracies. We use this both to estimate the potentially constructive influence of measurement inaccuracy on entanglement-based correlations and to systematically place upper bounds for separable states on linear witnesses.
\textit{Framework.---} We consider sources of bipartite states $\rho=\rho_\text{AB}$ of local dimension $d$. The subsystems are measured individually with settings $x$ and $y$ respectively, producing outcomes $a,b\in\{1,\ldots,o\}$. The experimenter's aim is to measure the first (second) system using a set of projective measurements $\{\tilde{A}_{a|x}\}$ ($\{\tilde{B}_{b|y}\}$). These are called target measurements. However, the measurements actually performed in the lab do not precisely correspond to the targeted measurements, but instead to positive operator-valued measures (POVMs) $\{A_{a|x}\}$ ($\{B_{b|y}\}$). These are called lab measurements and do not need to be projective. The correlations in the experiment are given by the Born-rule
\begin{equation}\label{born}
p(a,b|x,y)=\Tr\left[A_{a|x}\otimes B_{b|y}\rho\right].
\end{equation}
We quantify the correspondence between each of the target measurements and the associated lab measurements through their average fidelity,
\begin{align}\label{fidelity}
& \mathcal{F}^\text{A}_x\equiv \frac{1}{d}\sum_{a=1}^{o}\Tr\left[A_{a|x} \tilde{A}_{a|x} \right],
& \mathcal{F}^{\text{B}}_y\equiv \frac{1}{d}\sum_{b=1}^{o}\Tr\left[B_{b|y} \tilde{B}_{b|y} \right].
\end{align}
The fidelity respects $\mathcal{F}\in[0,1]$ with $\mathcal{F}=1$ if and only if the lab measurement is identical to the target measurement. Importantly, the fidelity admits a simple operational interpretation: it is the average probability of obtaining outcome $a$ ($b$) when the lab measurement is applied to each of the orthonormal states spanning the eigenspace of the $a$-th ($b$-th) target projector. Thus, the fidelities $\{\mathcal{F}^\text{A}_x,\mathcal{F}^\text{B}_y\}$ can be directly determined by probing the lab measurements with single qudits from a well-calibrated, auxiliary, source. This requires no entanglement and can routinely be achieved, see e.g.~Ref.~\cite{Bouchard2018}. It motivates the assumption of a bounded inaccuracy, i.e.~a lower bound on each of the fidelities,
\begin{align}\label{assumption}
& \mathcal{F}^\text{A}_x\geq 1-\varepsilon^\text{A}_x, & \mathcal{F}^\text{B}_y\geq 1-\varepsilon^\text{B}_y,
\end{align}
where the parameter $\varepsilon\in[0,1]$ is the inaccuracy of the considered lab measurement. In the extreme case of $\varepsilon=0$, the lab measurement is identical to the target measurement and our scenario reduces to a standard entanglement witness. In the other extreme, $\varepsilon=1$, only the Hilbert space dimension of the measurement is known. Away from these extremes, one encounters the more realistic scenario, in which the experimenter knows the degrees of freedom, but is only able to control them up to a limited accuracy.
The simplest tests of entanglement use the minimal number of outcomes ($o=2$). In such scenarios the fidelity constrains \eqref{assumption} can be simplified into
\begin{align}\label{observable}
& \Tr\left(A_{x}\tilde{A}_{x}\right)\geq d\left(1-2\varepsilon^\text{A}_x\right), &&\Tr\left(B_{y}\tilde{B}_{y}\right)\geq d\left(1-2\varepsilon^\text{B}_y\right)
\end{align}
where we have defined observables $A_{x}\equiv A_{1|x}-A_{2|x}$ and $B_{y}\equiv B_{1|y}-B_{2|y}$. The observables can be arbitrary Hermitian operators whose extremal eigenvalue is bounded by unity, i.e.~$\norm{A_x}_\infty\leq 1$ and $\norm{B_y}_\infty\leq 1$.
Notice that the proposed framework immediately extends also to multipartite scenarios.
\textit{Impact of inaccuracies in entanglement witnessing.---} A crucial motivating question for our approach is whether, and to what extent, small inaccuracies in the measurement devices ($\varepsilon\ll 1$) impact the analysis of a conventional entanglement witness. We discuss this matter based on several well-known witnesses.
Firstly, consider the simplest entanglement witness for two qubits, involving two pairs local Pauli observables: $\mathcal{W}=\expect{\sigma_X\otimes\sigma_X}+\expect{\sigma_Z\otimes\sigma_Z}$. For separable states we have $\mathcal{W}\leq \mathcal{W}_\text{sep}=1$ and for entangled states $\mathcal{W}\leq \mathcal{W}_\text{ent}=2$. Consider now that the lab observables $\{A_1,A_2\}$ and $\{B_1,B_2\}$ only nearly correspond \eqref{observable} to the target observables $\{\sigma_X,\sigma_Z\}$. Since $\mathcal{W}_\text{ent}=2$ is algebraically maximal, it remains unchanged, but such is not the case for the separable bound $\mathcal{W}_\text{sep}$. Thanks to the simplicity of $\mathcal{W}$, we can precisely evaluate $\mathcal{W}_\text{sep}$ in the prevalent scenario when all measurement devices are equally inaccurate, i.e.~$\varepsilon^\text{A}_x=\varepsilon^\text{B}_y=\varepsilon$. For a product state, we have $\mathcal{W}=\expect{A_1}\expect{B_1}+\expect{A_2}\expect{B_2}\leq
\sqrt{\expect{A_1}^2+\expect{A_2}^2} \sqrt{\expect{B_1}^2+\expect{B_2}^2}$. Since the target measurements are identical on both sites and the factors are independent, they are optimally chosen equal. Then, it is easily shown that the optimal choice of Bloch vectors corresponds to aligning $A_1$ and $A_2$ ($B_1$ and $B_2$) to the extent allowed by $\varepsilon$. This leads to the following tight condition for entanglement detection (see Supplementary Material)
\begin{equation}\label{qubit}
\mathcal{W}_\text{sep}(\varepsilon)= 1+4\left(1-2\varepsilon\right)\sqrt{\varepsilon\left(1-\varepsilon\right)},
\end{equation}
when $\varepsilon\leq \frac{1}{2}-\frac{1}{2\sqrt{2}}$ and $\mathcal{W}_\text{sep}=2$ otherwise. Importantly, the derivative diverges at $\varepsilon\rightarrow 0^+$. Hence, a small $\varepsilon$ induces a large perturbation in the ideal ($\varepsilon=0$) separable bound. In the vicinity of $\varepsilon=0$, it scales as $\mathcal{W}_\text{sep}\sim 1+4\sqrt{\varepsilon}$. For example, $\varepsilon=0.5\%$ leads to $\mathcal{W}_\text{sep}(\varepsilon)\approx 1.28$, which eliminates over a quarter of the range in which standard entanglement detection is possible, indicating the relevance of false positives.
Secondly, consider the CHSH quantity for entanglement detection, namely $\mathcal{W}=\expect{\sigma_X\otimes \left(\sigma_X+\sigma_Z\right)}+\expect{\sigma_Z\otimes \left(\sigma_X-\sigma_Z\right)}$. Here, we have targeted observables optimal for violating the CHSH Bell inequality \cite{CHSH1969}. One has $\mathcal{W}_\text{sep}=\sqrt{2}$ and $\mathcal{W}_\text{ent}=2\sqrt{2}$. In contrast to the previous example, the fact that all correlations from $d$-dimensional separable states constitute a subset of all correlations based on local hidden variables implies that entanglement can be detected for any value of $\varepsilon$. However, as we show in Supplementary Material through an explicit separable model that we conjecture to be optimal, this fact does not qualitatively improve the robustness of idealised ($\varepsilon=0$) entanglement detection to small inaccuracies. We obtain
\begin{align}
\mathcal{W}_\text{sep}=4\left(1-2\varepsilon\right)\sqrt{\varepsilon(1-\varepsilon)}+\sqrt{2-16\varepsilon\left(1-\varepsilon\right)\left(1-2\varepsilon\right)^2},
\end{align}
when $\varepsilon\leq \frac{1}{2}-\frac{1}{2\sqrt{2}}$ and $\mathcal{W}_\text{sep}=2$ otherwise. For small $\varepsilon$, we find $\mathcal{W}_\text{sep}\sim \sqrt{2}+4\sqrt{\varepsilon}$. An inaccuracy of $\varepsilon=0.5\%$ ensures $\mathcal{W}_\text{sep}\gtrsim 1.67 $, which eliminates nearly a fifth of the range in which standard entanglement detection is possible.
\begin{figure}
\caption{Numerically obtained lower bounds on the relative magnitude of the entangled-to-separable gap, $\Delta$, for entanglement witnessing based on two conjugate bases at different degrees of measurement inaccuracy $\varepsilon\in\{0.5\%, 1\%, 2\%, 3\%, 5\%, 10\%\}
\label{FigNumerics}
\end{figure}
Interestingly, it is \textit{a priori} not clear how small $\varepsilon$ should impact standard entanglement witnessing as $d$ increases. On the one hand, the impact ought to increase due to the increasing number of orthogonal directions in Hilbert space. On the other hand, it ought to decrease due to the growing distances in Hilbert space. For instance, the $\varepsilon$ required to transform the computational basis into its Fourier transform scales as $\varepsilon=\frac{\sqrt{d}-1}{\sqrt{d}}$, which rapidly approaches unity. To investigate the trade-off between these two effects, we consider the $d$-dimensional generalisation of the simplest entanglement witness. Both subsystems are subject to the same pair of target measurements, namely the computational basis $\{\ket{e_i}\}_{i=1}^d$ and its Fourier transform $\{\ket{f_i}\}_{i=1}^d$, where $\ket{f_i}=\Omega\ket{e_i}$ with $\Omega_{jk}=\frac{1}{\sqrt{d}}e^{\frac{2\pi i}{d}jk}$. The witness is
$\mathcal{W}^{(d)}=\sum_{i=1}^d \bracket{e_i,e_i}{\rho}{e_i,e_i}+\bracket{f_i,f_i}{\rho}{f_i,f_i}$. Notice that for $d=2$ this only differs from the previous, simplest, witness by a normalisation term. One has $\mathcal{W}^{(d)}_\text{sep}=1+\frac{1}{d}$ and $\mathcal{W}^{(d)}_\text{ent}=2$ \cite{Spengler2012}. Allowing for measurement inaccuracy, we use an alternating convex search algorithm to numerically optimise over the lab measurements and shared separable states to obtain lower bounds on $\mathcal{W}^{(d)}_\text{sep}(\varepsilon)$. See Supplementary Material for details about the method. In order to compare the impact of measurement inaccuracy for different dimensions, we consider the following ratio between the entangled-to-separable gap in the inaccurate and ideal case, $\Delta\equiv \frac{\mathcal{W}^{(d)}_\text{ent}(0)-\mathcal{W}^{(d)}_\text{sep}(\varepsilon)}{\mathcal{W}^{(d)}_\text{ent}(0)-\mathcal{W}^{(d)}_\text{sep}(0)}=\frac{d}{d-1}\left[2-\mathcal{W}^{(d)}_\text{sep}(\varepsilon)\right]$. Notice that the numerator features $\mathcal{W}^{(d)}_\text{ent}(0)$ instead of $\mathcal{W}^{(d)}_\text{ent}(\varepsilon)$ because $\varepsilon$ is not in itself a resource for the experimenter. The results of the numerics are illustrated in Figure~\ref{FigNumerics} for some different choices of $\varepsilon$. We observe that $\Delta$ is not monotonic in $d$, but instead features a maximum, that shifts downwards in $d$ as $\varepsilon$ increases. Beyond this maximum point, the impact of measurement inaccuracies grows as the dimension becomes large.
Finally, for multipartite qubit states, it is natural to expect that the detrimental influence of small $\varepsilon$ grows with the number of qubits under consideration. The reason is that measurement inaccuracies can accumulate separately in the different subsystems. This intuition is confirmed by the models of Ref.~\cite{Rosset2012}, in which small alignment errors are used to spoof, with increasing magnitude, the standard fidelity-based witness of genuine multipartite entanglement for Greenberger-Horne-Zeilinger states \cite{Bourennane2004}.
This further confirms the need of considering measurement inaccuracies.
\textit{High-dimensional entanglement criterion.---} In view of the the relevance of small measurement inaccuracies, it is natural to formulate entanglement criteria that take them explicitly into account beyond the simplest, two-qubit, scenario. Consider a pair of $d$-dimensional systems and $n\in\{1,\ldots,d^2-1\}$ measurements. For system A, the observables ideally correspond to (subsets of) a generalised Bloch basis $\{\lambda_i\}_{i=1}^{n}$ and for system B, the ideal observables are the complex conjugates $\{\bar{\lambda}_i\}_{i=1}^{n}$. Here, $\lambda_i$ is $d$-dimensional, traceless and satisfies $\Tr\left(\lambda_i\lambda_j^\dagger\right)=d\delta_{ij}$ \cite{Bertlmann2008}. Defining $\rho=\frac{1}{d}\left(\openone+\sum_{i=1}^{d^2-1}\mu_i\lambda_i\right)$, one has $\norm{\vec{\mu}}^2\leq d-1$. A simple standard entanglement witness, based on a total of $n$ measurements, is then given by
\begin{equation}\label{oldwitness}
\mathcal{W}^{(d)}=\sum_{i=1}^n \expect{\lambda_i\otimes \bar{\lambda}_i}.
\end{equation}
Using H\"older's inequality, one finds that separable states obey $\mathcal{W}^{(d)}_\text{sep}=d-1$. When the choice of Bloch basis is fixed, entangled states can achieve at most $\mathcal{W}_\text{ent}^{(d)}=\nu_\text{max}\left[\sum_{i=1}^n \lambda_i\otimes \bar{\lambda}_i\right]$, by choosing the state as the eigenvector corresponding to the largest eigenvalue ($\nu_\text{max}$). When the choice of Bloch basis is not fixed, a general upper bound for entanged states is $\mathcal{W}^{(d)}_\text{ent}\leq \min\left\{\sqrt{n\left(d^2-1\right)},n(d-1)\right\}$, as shown in Supplementary Material. Note that $n(d-1)$ only is relevant when $d=2$. Notice also that the maximally entangled state $\ket{\phi^+_d}=\frac{1}{\sqrt{d}}\sum_{i=0}^{d-1}\ket{ii}$ achieves $\mathcal{W}^{(d)}=n$ regardless of the choice of Bloch basis.
Consider now that the lab observables only nearly correspond to $\{\lambda_i\}$ and $\{\bar{\lambda}_i\}$ respectively. We write them as
$A_i=q \lambda_i+\sqrt{1-q^2}\lambda_{i}^{\perp}$ and $B_i=q \bar{\lambda}_i+\sqrt{1-q^2}\bar{\lambda}_{i}^{\perp}$, where $q\in[-1,1]$ is related to the inaccuracy through $q=1-2\varepsilon$ and $\lambda_{i}^{\perp}$ and $\bar{\lambda}_{i}^{\perp}$ are observables orthogonal to $\lambda_i$ and $\bar{\lambda}_i$, respectively, on the generalised Bloch sphere. In Supplementary Material, we prove that the witness $\mathcal{W}^{(d)}=\sum_{i=1}^n\expect{A_i\otimes B_i}$ for separable states obeys
\begin{equation}\label{bound}
\mathcal{W}^{(d)}_\text{sep}(\varepsilon)\leq \left(d-1\right) \left(q+\sqrt{n-1}\sqrt{1-q^2}\right)^2,
\end{equation}
when $q\geq \frac{1}{\sqrt{n}}$ and otherwise $\mathcal{W}^{(d)}_\text{sep}(\varepsilon)\leq n\left(d-1\right)$, which is algebraically maximal. As is intuitive, the window for detecting entanglement shrinks as $\varepsilon$ increases.
We investigate the tightness of the bound. To this end, choose the state as $\ket{\phi^\dagger}\otimes \ket{\phi^T}$, where the local Bloch vector is $\mu_i=\frac{\sqrt{d-1}}{\sqrt{n}}$ and where $\lambda_i\rightarrow \lambda_i^\dagger$ ($\lambda_i\rightarrow \lambda_i^T$) for $\ket{\phi^\dagger}$ ($\ket{\phi^T}$). Choose the observables as $A_i=q\lambda_i+\sum_{j\neq i}\frac{\sqrt{1-q^2}}{\sqrt{n-1}}\lambda_j$ and $B_i=q\bar{\lambda}_i+\sum_{j\neq i}\frac{\sqrt{1-q^2}}{\sqrt{n-1}}\bar{\lambda}_j$. This returns the separable bound \eqref{bound}. However, we need to check that the Bloch vector $\vec{\mu}$ corresponds to a valid state. Curiously, for the most powerful case, namely $n=d^2-1$, tightness would be implied by a positive answer to the long-standing open question of whether there exists a Weyl-Heisenberg covariant symmetric informationally complete (SIC) POVM in dimension $d$. To see the connection, simply choose the Bloch basis as the non-Hermitian Weyl-Heisenberg basis $\{X^uZ^v\}$ for $u,v\in\{0,\ldots,d-1\}$ and $u+v>0$, where $X=\sum_{k=0}^{d-1}\ketbra{k+1}{k}$ and $Z=\sum_{k=0}^{d-1}e^{\frac{2\pi ik}{d}}\ketbra{k}{k}$. It follows immediately that $|\bracket{\phi}{X^uZ^v}{\phi}|=\frac{1}{\sqrt{d+1}}$, which defines a SIC-POVM. Since these SIC-POVMs are conjectured to exist in all dimensions \cite{Zauner2011}, and are known to exist up to well above the first hundred dimensions \cite{Scott2017, Fuchs2017}, our bound is plausibly tight for any $d$.
\textit{SDP methods.---} We develop a hierarchy of SDP relaxations to bound the largest possible value of any linear witness, $\mathcal{W}=\sum_{a,b,x,y} c_{abxy}p(a,b|x,y)$, for some real coefficients $c_{abxy}$. The method applies both for correlations originating from entangled states and from separable states, under any given degree of measurement inaccuracy and arbitrary target measurements. Thus, we systematically establish upper bounds $\mathcal{W}^\uparrow_\text{ent}(\varepsilon)\geq \mathcal{W}_\text{ent}(\varepsilon)$ and $\mathcal{W}^\uparrow_\text{sep}(\varepsilon)\geq \mathcal{W}_\text{sep}(\varepsilon)$. This has a three-fold motivation. Firstly, $\mathcal{W}_\text{ent}$ will generally depend on $\varepsilon$; cases with $\mathcal{W}^{(d)}>\mathcal{W}^{(d)}_\text{ent}(0)$ can be observed when the inaccuracies accumulate in a constructive way (e.g.~a favourable systematic error in the local reference frames). It is relevant to bound such occurances. Secondly, knowledge of $\mathcal{W}^\uparrow_\text{ent}(\varepsilon)$ allows an experimenter to give lower bounds on the inaccuracy of the measurement devices. Thirdly, and most importantly, this enables a general and systematic construction of entanglement witnesses of the form $\mathcal{W}\leq \mathcal{W}^\uparrow_\text{sep}(\varepsilon)$.
We discuss the main features of the method for computing $\mathcal{W}_\text{ent}^\uparrow(\varepsilon)$ and then see how it can be extended to also compute $\mathcal{W}_\text{sep}^\uparrow(\varepsilon)$. To this end, as is standard, the SDP relaxation method is based on the positivity of a moment matrix. This matrix consists of traces of monomials (in the spirit of e.g.~\cite{Burgdorf2012}) which are composed of products of the state, the lab measurements and the target measurements (see Supplementary Material for specifics). Moments corresponding to products of the first two can be used to build a generic linear witness $\mathcal{W}$ via Eq.~\eqref{born}. Moments corresponding to products of the final two can be used to build the constraints on the fidelities $\mathcal{F}^\text{A}_x$ and $\mathcal{F}^\text{B}_y$. Our construction draws inspiration from two established ideas. Firstly, one can capture the constraints of $d$-dimensional Hilbert space, on the level of the moment matrix, by numerically sampling states and measurements \cite{Navascues2015}. Secondly, in scenarios without entanglement, constraints capturing the fidelity of a quantum state with a target can be incorporated into the moment matrix \cite{Tavakoli2021}. We adapt the latter to entanglement-based scenarios and measurement fidelities as needed for Eq.~\eqref{assumption}. Details are given in Supplementary Material. We have applied this method, at low relaxation level, in several different case studies in low dimensions and frequently found that the obtained upper bounds coincide with those obtained from interior point optimisation routines. We note that the computational requirements for this tool can be much reduced since sampling-based symmetrisation methods of Ref.~\cite{Tavakoli2019} can straightforwardly be incorporated.
To extend this method for the computation of $\mathcal{W}_\text{sep}^\uparrow(\varepsilon)$, we must incorporate constraints on the set of quantum states. Since the set of separable states is generally difficult to characterise (see e.g.~\cite{DPS2002}), we instead adopt an approach in which we use the ideal entanglement witness condition, $\mathcal{W}\leq \mathcal{W}_\text{sep}(0)$, which we may realistically assume to possess, in place of the set of separable states. Then, since the probabilities associated to performing the target measurements on the state explicitly appear in our moment matrix, we can introduce it as an additional linear constraint in our SDP. Hence, the optimisation is effectively a relaxation of the subset of entangled states for which the original entanglement witness holds. In fact, since the set of separable states is characterised by infinitely many linear entanglement witnesses, one can in this way continue to introduce linear standard witnesses to constrain the effective state space in the SDP and thus further improve the accuracy of the bound $\mathcal{W}_\text{sep}^\uparrow(\varepsilon)$. In Supplementary Material we exemplify the use of this method, in its basic version, using only a single witness constraint $\mathcal{W}\leq \mathcal{W}_\text{sep}(0)$ on the state space, and show that it returns non-trivial, albeit not tight, bounds for two simple entanglement witnesses for relevant values of $\varepsilon$.
\textit{Discussion.---} We have introduced and investigated entanglement detection when the measurements only nearly correspond to those intended to be performed in the laboratory. We have shown the relevance of the concept, presented explicit entanglement witnesses that take measurement inaccuracy into account, and finally shown how SDP methods can be applied to these types of problems. These results are a step towards a theoretical framework for detecting entanglement based on devices that are quantitatively benchmarked in an operationally meaningful and experimentally accessible manner.
Our work leaves several natural open problems. If given an arbitrary standard entanglement witness, how can we compute corrections due to the introduction of measurement inaccuracies? Our SDP method is a first step towards addressing this problem but better methods are necessary both in terms of computational cost and in terms of the accuracy of the separable bound. Moreover, for a given $d$, what is the smallest number of auxiliary global measurement settings needed to eliminate the diverging derivative for optimal standard entanglement witnesses under small measurement inaccuracy? In addition, can one extend our entanglement witnesses to witnesses of genuine higher-dimensional entanglement, e.g.~by detecting the Schmidt number?
Also, in this first work, we have focused on bipartite entanglement. It would be interesting to identify useful entanglement witnesses for multipartite states at bounded measurement inaccuracy. Finally, the framework proposed here for entanglement detection draws inspiration from ideas proposed in semi-device-independent quantum communications. Given that several frameworks for semi-device-independence recently have been proposed \cite{VanHimbeeck2017, info1, info2, Wang2019, Tavakoli2021}, there may be other similarly inspired avenues for entanglement detection based on quantitative benchmarks.
\appendix
\section{Simplest entanglement witness}\label{AppSimple}
Consider the entanglement witness $\mathcal{W}=\expect{\sigma_X\otimes\sigma_X}+\expect{\sigma_Z\otimes\sigma_Z}$ on a pair of qubits. We allow the lab observables to have an $\varepsilon$-deviation with respect to the target measurements $\{\sigma_X,\sigma_Z\}$ on both sites. This corresponds to the constraints
\begin{align}
& \Tr\left(A_1\sigma_X\right)\geq 2-4\varepsilon, && \Tr\left(A_2\sigma_Z\right)\geq 2-4\varepsilon,\\
& \Tr\left(B_1\sigma_X\right)\geq 2-4\varepsilon, && \Tr\left(B_2\sigma_Z\right)\geq 2-4\varepsilon,
\end{align}
where we have chosen that all measurements are subject to the same magnitude of inaccuracy.
Due to the symmetry of $\mathcal{W}$ under a party swap, we can choose $A_1=B_1$ and $A_2=B_2$. Since the measurements are characterised by a pair of Bloch vectors, we can without loss of generality choose them in the $XZ$-plane of the Bloch sphere. We therefore write $A_k=B_k=\cos\theta_k \sigma_X+\sin\theta_k \sigma_Z$. In the relevant case of equality, the fidelity conditions then become
\begin{align}
&\theta_1=-\arccos\left(1-2\varepsilon\right),\\
&\theta_2=\arcsin\left(1-2\varepsilon\right).
\end{align}
Due to the party symmetry, we can choose a product state on the form $\ket{\phi}\otimes \ket{\phi}$ where $\ket{\phi}=\cos z\ket{0}+\sin z\ket{1}$. Then we obtain
\begin{equation}
\mathcal{W}=1+4\left(1-2\varepsilon\right)\sqrt{\varepsilon(1-\varepsilon)}\sin(4z),
\end{equation}
which is optimal at $z=\frac{\pi}{8}$ when $\varepsilon\leq \frac{1}{2}$. Hence
\begin{equation}
\mathcal{W}_\text{sep}=1+4\left(1-2\varepsilon\right)\sqrt{\varepsilon(1-\varepsilon)}.
\end{equation}
Notice that this is only valid for $\varepsilon\leq \frac{1}{2}-\frac{1}{2\sqrt{2}}$. For larger $\varepsilon$ we have $\mathcal{W}_\text{sep}=2$.
Moreover, we note that the immediate generalisation of this witness, namely $\mathcal{W}=\expect{\sigma_X\otimes\sigma_X}+\expect{\sigma_Y\otimes\sigma_Y}+\expect{\sigma_Z\otimes\sigma_Z}$, in the presence of measurement inaccuracies, can by similar means be shown to admit the separable bound
\begin{equation}
\mathcal{W}_\text{sep}=2+4\sqrt{2}\left(1-2\varepsilon\right)\sqrt{\varepsilon(1-\varepsilon)}-(1-2\varepsilon)^2,
\end{equation}
when $\varepsilon\leq \frac{3-\sqrt{3}}{6}$ and $\mathcal{W}_\text{sep}=3$ otherwise.
\section{Entanglement detection based on the CHSH quantity}\label{AppCHSH}
Consider a pair of qubits, each of which is subject to two measurements. The target observables on both sites are $\sigma_X$ and $\sigma_Z$. The lab observables all have the same inaccuracy bound $\varepsilon$. Thus we have
\begin{align}
& \Tr\left(A_1\sigma_X\right)\geq 2-4\varepsilon, && \Tr\left(A_2\sigma_Z\right)\geq 2-4\varepsilon,\\\label{cons}
& \Tr\left(B_1\sigma_X\right)\geq 2-4\varepsilon, && \Tr\left(B_2\sigma_Z\right)\geq 2-4\varepsilon.
\end{align}
In case of perfect measurements, the CHSH quantity acts as a conventional entanglement witness,
\begin{equation}
\mathcal{W}=\expect{A_1\otimes B_1}+\expect{A_1\otimes B_2}+\expect{A_2\otimes B_1}-\expect{A_2\otimes B_2}\leq \sqrt{2},
\end{equation}
which is respected by all separable states. Evidently, since $\mathcal{W}\leq 2$ for local hidden variable models, which in particular account for the statistics of any measurements performed on a separable state, it follows that entanglement can be detected for arbitrary $\varepsilon$.
We show the potential influence of small measurement inaccuracies through an explicit quantum model. Choose $A_1=B_1$ and associate it to a Bloch vector $\vec{n}_1=\left(\cos \alpha,0,\sin\alpha\right)$ in the XZ-plane. Similarly choose $A_2=B_2$ and associate it to the Bloch vector $\vec{n}_2=\left(\cos \beta,0,\sin\beta\right)$. Our strategy is to align the two Bloch vectors as much as possible under the constraints \eqref{cons}. This implies the choice of
\begin{align}
& \alpha=\arccos\left(1-2\varepsilon\right), && \beta=\arcsin\left(1-2\varepsilon\right).
\end{align}
Then, we choose the product state $\ket{\psi}=\ket{\phi}\otimes\ket{\phi}$ with $\ket{\phi}=\cos z\ket{0}+\sin z\ket{1}$, where
\begin{equation}
z=-\frac{\pi}{4}+\frac{1}{4}\arctan\left(\frac{1}{8\varepsilon-8\varepsilon^2-1}\right).
\end{equation}
The angle has been choosen so as to place the Bloch vector of $\ket{\phi}$ right in the middle of $\vec{n}_1$ and $\vec{n}_2$. This leads to the following value of the CHSH quantity,
\begin{align}
\mathcal{W}=4\left(1-2\varepsilon\right)\sqrt{\varepsilon(1-\varepsilon)}+\sqrt{2-16\varepsilon\left(1-\varepsilon\right)\left(1-2\varepsilon\right)^2},
\end{align}
when $\varepsilon\leq \frac{1}{2}-\frac{1}{2\sqrt{2}}$ and $\mathcal{W}=2$ otherwise. The derivative diverges as $\varepsilon\rightarrow 0^+$, indicating the first-order impact of small measurement inaccuracies. For small $\varepsilon$, the value scales as $\mathcal{W}\sim \sqrt{2}+4\sqrt{\varepsilon}-4\sqrt{2}\varepsilon$. For example, if we choose $\varepsilon=0.5\%$, the separable model achieves $\mathcal{W}=1.67$ which is a perturbation comparable to that obtained in the main text for the simplest two-qubit entanglement witness.
\section{Lower bounds: alternating convex search}\label{AppSeesaw}
Consider that we are given an arbitrary linear functional $\mathcal{W}$, arbitrary target measurements $\{\tilde{A}_{a|x}\}$ and $\{\tilde{B}_{b|y}\}$ and arbitrary measurement inaccuracies $\{\varepsilon^\text{A}_x,\varepsilon^\text{B}_y\}$. Consider a linear functional
\begin{equation}
\mathcal{W}=\sum_{a,b,x,y}c_{abxy} \Tr\left[A_{a|x}\otimes B_{b|y}\rho\right],
\end{equation}
with some real coefficients $c_{abxy}$. We describe a numerical method, based on alternating convex search, to systematically establish lower bounds on both $\mathcal{W}_\text{sep}$ and $\mathcal{W}_\text{ent}$. To this end we consider latter case first.
In order to place a lower bound on $\mathcal{W}_\text{ent}$, we decompose the optimisation problem into three parts: one over the measurements on system A, one over the measurements on system B and one over the global shared state. To this end, we first choose a random set of measurements $\{B_{b|y}\}$ and a random pure state $\rho$. Then, we optimise $\mathcal{W}$ over the measurements $\{A_{a|x}\}$ under the constraint that $\mathcal{F}^\text{A}_x\geq 1-\varepsilon^\text{A}_x$. This optimisation is a semidefinite program and can therefore be efficiently solved. Using the returned measurements $\{A_{a|x}\}$, we optimise $\mathcal{W}$ over the measurements $\{B_{b|y}\}$ under the constraint that $\mathcal{F}^\text{B}_y\geq 1-\varepsilon^\text{B}_y$. This is again a semidefinite program. Finally, using the returned measurements $\{B_{b|y}\}$, we evaluate the Bell operator
\begin{equation}
\mathcal{B}=\sum_{a,b,x,y}c_{abxy}A_{a|x}\otimes B_{b|y}
\end{equation}
and compute its largest eigenvalue. The associated eigenvector is the optimal state, which corresponds to our choice of $\rho$. This routine of two semidefinite programs and one eigenvalue computation can then be iterated in order to find increasingly accurate lower bounds on $\mathcal{W}_\text{ent}$. The procedure depends on the initial starting point and ought therefore to be repeated several times independently.
To place a lower bound on $\mathcal{W}_\text{sep}$, we can proceed analogously to the above when treating the separate optimisations over the measurements $\{A_{a|x}\}$ and $\{B_{b|y}\}$. However, the optimisation over the state is now less straightforward since we require that $\rho=\ketbra{\phi}{\phi}\otimes \ketbra{\psi}{\psi}$. The optimisation over the state can be cast as another alternating convex search, treated as a sub-routine to the main alteranting convex search. In other words, we sample a random $\ket{\phi}$ and evaluate the semidefinite program optimising $\mathcal{W}$ over $\ket{\psi}$. Then, using the returned $\ket{\psi}$, we run a semidefinite program optimising $\mathcal{W}$ over $\ket{\phi}$. This procedure is iterated until desired convergence is obtained.
\section{Bounds on witness}\label{AppWitness}
Let $\{\lambda_i\}_{i=1}^{d^2-1}$ be an orthonormal basis the space of operators acting on $d$-dimensional Hilbert space, with $\Tr\left(\lambda_i\lambda_j^\dagger\right)=d\delta_{ij}$. Then, every qudit state can be written as
\begin{equation}\label{bloch}
\rho=\frac{1}{d}\left(\openone+\vec{\mu}\cdot\vec{\lambda}\right),
\end{equation}
where $\vec{\mu}$ is some complex-valued Bloch vector with entries $\mu_i=\expect{\lambda_i^\dagger}=\Tr\left(\rho \lambda_i^\dagger\right)$. By checking the purity $\Tr\left(\rho^2\right)$, one finds that $\norm{\vec{\mu}}^2=\sum_{i=1}^{d^2-1}\expect{\lambda_i^\dagger}^2\leq d-1$. In general, not every such Bloch vector corresponds to a valid density matrix.
Consider the witness
\begin{equation}
\mathcal{W}^{(d)}=\sum_{i=1}^n \expect{\lambda_i\otimes \bar{\lambda}_i}.
\end{equation}
For separable states, we can evaluate $\mathcal{W}^{(d)}_\text{sep}$ by restricting to product states. Then we have
\begin{align}\nonumber\label{C3}
\mathcal{W}^{(d)}&=\sum_{i=1}^n \expect{\lambda_i}_A \expect{\bar{\lambda}_i}_B\leq \sqrt{\sum_{i=1}^n \expect{\lambda_i}^2_A}\sqrt{\sum_{i=1}^n \expect{\bar{\lambda}_i}_B^2}\\
&\leq d-1 = \mathcal{W}^{(d)}_\text{sep}.
\end{align}
Notice that this is independent of $n$.
For entangled states, we have
\begin{align}
&\mathcal{W}^{(d)}\leq \sum_{i=1}^n \expect{\lambda_i\otimes \bar{\lambda}_i}=\nu_\text{max}\left[\sum_{i=1}^n \lambda_i\otimes \bar{\lambda}_i \right]\\
&\leq n \max_i \nu_\text{max}\left[\lambda_i\otimes \bar{\lambda}_i\right]=n \max_i \nu_\text{max}\left[\lambda_i\right]^2\leq n(d-1),
\end{align}
where we used that $\nu_\text{max}\left[\lambda_i\right]\leq \sqrt{d-1}$. However this, essentially trivial, bound is only tight for $d=2$, in which case it is algebraically maximal. To obtain a bound for $d>2$, we note that the entangled state lives in dimension $d^2$. Hence, its Bloch vector length is at most $\sqrt{d^2-1}$. In other words,
\begin{equation}
\sum_{i=1}^{n} \expect{\lambda_i\otimes \bar{\lambda}_i}^2\leq d^2-1.
\end{equation}
Taking the case of equality, we obtain a bound on the largest value of the witness when all entries in the sum are equal. Thus we require
\begin{equation}
\expect{\lambda_i\otimes \bar{\lambda}_i}=\sqrt{\frac{d^2-1}{n}},
\end{equation}
which gives
\begin{equation}
\mathcal{W}^{(d)}_\text{ent}\leq \sqrt{n}\sqrt{d^2-1}.
\end{equation}
This bound is not necessarily tight.
Consider now the case when we have separable states and inaccurate measurements. Expand $\mathcal{W}^{(d)}$ as follows,
\begin{align}\nonumber
\mathcal{W}^{(d)}=& \sum_{i=1}^n\expect{A_i\otimes B_i}=q^2\sum_{i=1}^n\expect{\lambda_i}_A\expect{\bar{\lambda}_i}_B\\\nonumber
&+q\sqrt{1-q^2}\sum_{i=1}^n\left(\expect{\lambda_i}_A\expect{\bar{\lambda}_{i}^\perp}_B+\expect{\lambda_{i}^\perp}_A\expect{\bar{\lambda}_i}_B\right)\\
&+\left(1-q^2\right)\sum_{i=1}^n\expect{\lambda_{i}^\perp}_A\expect{\bar{\lambda}_{i}^\perp}_B.
\end{align}
We examine these sums one by one. From \eqref{C3}, we see that the first sum is at most $d-1$. Next, we use the Cauchy-Schwarz inequality to write the second sum as
\begin{align}\nonumber
\sum_{i=1}^n& \expect{\lambda_i}_A\expect{\bar{\lambda}_{i}^\perp}_B\leq\sqrt{\sum_{i=1}^n\expect{\lambda_i}_A^2}\sqrt{\sum_{i=1}^n\expect{\bar{\lambda}_{i}^\perp}_B^2}\\
&\leq \sqrt{d-1}\sqrt{\sum_{i=1}^n\expect{\bar{\lambda}_{i}^\perp}_B^2}\leq \left(d-1\right)\sqrt{n-1}.
\end{align}
In the last step, we have used the following lemma. Let $\vec{u}\in\mathbb{R}^n$ and $\vec{v}^i\in\mathbb{R}^n$ be unit vectors such that the $i$'th component of $\vec{v}^i$ is zero, i.e.~$\vec{v}_i^i=0$. Then we have that
\begin{equation}
\sum_{i=1}^n \left(\vec{u}\cdot \vec{v}^i\right)^2\leq \sum_{i=1}^n 1-\vec{u}_i^2=n-1.
\end{equation}
Again using the Cauchy-Schwarz inequality and this lemma also leads to
\begin{align}
&\sum_{i=1}^n\expect{\lambda_{i}^\perp}_A\expect{\bar{\lambda}_{i}}_B\leq \left(d-1\right)\sqrt{n-1},\\
&\sum_{i=1}^n\expect{\lambda_{i}^\perp}_A\expect{\bar{\lambda}_{i}^\perp}_B \leq \left(d-1\right)\left(n-1\right).
\end{align}
Putting it together, we arrive at the bound
\begin{equation}\label{res}
\mathcal{W}_\text{sep}\leq \left(d-1\right)\left(n-1-q^2\left(n-2\right)+2q\sqrt{n-1}\sqrt{1-q^2}\right).
\end{equation}
\section{Semidefinite relaxations}\label{AppSDP}
Consider the task of optimising an arbitrary linear functional over the set of projective quantum strategies with a given inaccuracy to a set of target measurements:
\begin{align}\nonumber\label{optim}
& \qquad \qquad \mathcal{W}_\text{ent}=\max_{\{A_{a|x}\},\{B_{b|y}\},\rho} \mathcal{W}[p] \\\nonumber
& \text{subject to }\quad \Tr\left(\rho\right)=1, \qquad \rho\geq 0, \qquad \rho\in \mathcal{L}(\mathbb{C}^d)\\ \nonumber
& A_{a|x}A_{a'|x}=A_{a|x}\delta_{a,a'}, \qquad B_{b|y}B_{b'|y}=B_{b|y}\delta_{b,b'},\\\nonumber
& \sum_a A_{a|x}=\openone_d, \qquad \sum_b B_{b|y}=\openone_d\\\nonumber
& \mathcal{F}^\text{A}_x\geq 1-\varepsilon^\text{A}_x, \qquad \mathcal{F}^\text{B}_y\geq 1-\varepsilon^\text{B}_y\\
& p(a,b|x,y)=\Tr\left[A_{a|x}\otimes B_{b|y} \rho\right],
\end{align}
where $\mathcal{L}(\mathbb{C}^d)$ is the set of linear operators of dimension $d$. This is generally a difficult optimisation problem. However, it can be relaxed into a hierarchy of increasingly precise criteria, each of which can be evaluated as a semidefinite program.
To this end, define the operator list
\begin{equation}
S=\{\openone_{d^2}, \rho, \{A_{a|x}\}_{a,x},\{B_{b|y}\}_{b,y},\{\tilde{A}_{a|x}\}_{a,x},\{\tilde{B}_{b|y}\}_{b,y}\}.
\end{equation}
Here, the measurement operators are to be understood as spanning the full Hilbert space, e.g.~$A_{a|x}\rightarrow A_{a|x}\otimes \openone_d$. We let $M_k$ denote the set of all monomials, taken from the list $S$, of degree at most $k$. We let $n(k)$ denote the size of the set $M_k$. Then, we define the $n(k)\times n(k)$ tracial moment matrix as
\begin{equation}
\Gamma(u;v)=\Tr\left(uv^\dagger\right),
\end{equation}
for $u,v\in M_k$. A quantum model implies the positivity of $\Gamma$. Moreover, by including enough monomials, we can formulate the objective as a linear function in the moment matrix,
\begin{equation}\label{obj}
\mathcal{W}(\Gamma)=\sum_{a,b,x,y} c_{abxy} \Gamma(\rho A_{a|x}; B_{b|y}).
\end{equation}
Similarly, the inaccuracy constraints can be formulated as the linear constraints
\begin{align}\nonumber\label{sdpcons}
& \frac{1}{d^2}\sum_{a=1}^o \Gamma(A_{a|x}; \tilde{A}_{a|x})\geq 1-\varepsilon^\text{A}_x, \\
& \frac{1}{d^2}\sum_{b=1}^o \Gamma(B_{b|y}; \tilde{B}_{b|y})\geq 1-\varepsilon^\text{B}_y.
\end{align}
In order to capture the constraints of $d$-dimensional Hilbert space and to fix the target measurements in the optimisation, we proceed as follows \cite{Navascues2015, Tavakoli2021}. We randomly sample $\rho$, $\{A_{a|x}\}_{a,x}$ and $\{B_{b|y}\}_{b,y}$ from a $d$-dimensional Hilbert space and construct the list $S$. Note that the target measurements are fixed at all times. Then, we evaluate the moment matrix and label it $\Gamma^{(1)}$. This process is repeated, leading to a list of sampled moment matrices $\{\Gamma^{(1)},\ldots,\Gamma^{(m)}\}$. The sampling is terminated when the next moment matrix is found to be linearly dependent on all the previously sampled moment matrices. Thus, the sampled list constitutes a (non-orthonormal) basis of the space of moment matrices. We then define the total moment matrix as the affine combination
\begin{align}\label{sdpcons2}
& \Gamma=\sum_{i=1}^m s_i \Gamma^{(i)}, && \sum_{i=1}^m s_i=1,
\end{align}
where $\{s_i\}$ serve as optimisation variables.
We can now formulate our relaxation of the optimisation problem \eqref{optim} as $\mathcal{W}_\text{ent}(\varepsilon)\leq \mathcal{W}_\text{ent}^\uparrow(\varepsilon) $ where
\begin{align}
& \mathcal{W}_\text{ent}^\uparrow \equiv \max_{\{s_i\}} \mathcal{W}(\Gamma) \quad \text{ subject to } \quad \Gamma\geq 0
\end{align}
under the constraints \eqref{sdpcons} and \eqref{sdpcons2}. This can be evaluated as a semidefinite program. The relaxation becomes tighter as the list of monomials $M_k$ is extended.
\begin{figure}
\caption{\textbf{Solid lines.}
\label{Fig_SDP}
\end{figure}
In order to instead obtain bounds of the form $\mathcal{W}_\text{sep}(\varepsilon)\leq \mathcal{W}_\text{sep}^\uparrow(\varepsilon) $, we can add the constraint
\begin{equation}\label{ew}
\sum_{a,b,x,y} c_{abxy}\Gamma(\rho \tilde{A}_{a|x}; \tilde{B}_{b|y})\leq \mathcal{W}_\text{sep}(0),
\end{equation}
which corresponds to a standard entanglement witness. Note that we can introduce even more ``target'' measurements in the operator list $S$, thus extending the size $n(k)$ of the moment matrix, and then use them to build additional linear constraint like \eqref{ew} representing standard entanglement witnesses. The introduction of these shrinks the effective state space, thus improving the accuracy of the bound $ \mathcal{W}_\text{sep}^\uparrow(\varepsilon)$, at the price of a larger SDP.
We exemplify a simple version of this method for the case of the two witnesses considered in Appendix~\ref{AppSimple}, namely $\mathcal{W}_2=\expect{\sigma_X\otimes\sigma_X}+\expect{\sigma_Z\otimes\sigma_Z}\leq 1$ and $\mathcal{W}_3=\expect{\sigma_X\otimes\sigma_X}+\expect{\sigma_Y\otimes\sigma_Y}+\expect{\sigma_Z\otimes\sigma_Z}\leq 1$, at inaccuracy $\varepsilon$. These are evaluated with monomial lists of length $46$ and $89$ respectively. The results are illustrated in Figure~\ref{Fig_SDP}. As expected, the returned bounds are not tight, due to the basic relaxation of the separable set to all entangled states obeying $\mathcal{W}\leq 1$. Nevertheless, the bounds are non-trivial for relevant values of $\varepsilon$.
\end{document} |
\begin{document}
\preprint{APS/123-QED}
\title{Realistic simulation of quantum computation using\\ unitary and measurement channels}
\author{Ahmed Abid Moueddene}
\email{A.A.Moueddene@tudelft.nl}
\affiliation{QuTech, Delft University of Technology
Delft, The Netherlands}
\affiliation{Quantum Computer Engineering Dept, Delft University of Technology
Delft, The Netherlands.}
\author{Nader Khammassi}
\email{ Nader.Khammassi@intel.coml}
\affiliation{Intel Labs, Intel Corporation, Hillsboro, Oregon, USA
}
\author{Koen Bertels}
\email{ K.L.M.Bertels@tudelft.nl}
\affiliation{Quantum Computer Engineering Dept, Delft University of Technology
Delft, The Netherlands.}
\author{Carmen G. Almudever}
\email{C.GarciaAlmudever-1@tudelft.nl}
\affiliation{QuTech, Delft University of Technology
Delft, The Netherlands}
\affiliation{Quantum Computer Engineering dept, Delft University of Technology
Delft, The Netherlands.}
\date{\today}
\begin{abstract}
The implementation and practicality of quantum algorithms highly hinge on the quality of operations within a quantum processor. Therefore, including realistic error models in quantum computing simulation platforms is crucial for testing these algorithms. Existing classical simulation techniques of quantum information processing devices exhibit a trade-off between scalability (number of qubits that can be simulated) and accuracy (how close the simulation is to the target error model). In this paper, we introduce a new simulation approach that relies on approximating the density matrix evolution by a stochastic sum of unitary and measurement channels within a pure state simulation environment. This model shows an improvement of at least one order of magnitude in terms of accuracy compared to the best known stochastic approaches while allowing to simulate a larger number of qubits compared to the exact density matrix simulation. Furthermore, we used this approach to realistically simulate the Grover's algorithm and the surface code 17 using gate set tomography characterization of quantum operations as a noise model.
\end{abstract}
\maketitle
\section{Introduction}
Quantum computing relies on exploiting quantum phenomena such as superposition and entanglement to solve some complex computational tasks that are intractable for classical computers. To this purpose, quantum algorithms are implemented on systems of qubits in which a universal set of quantum operations is available. However, due to the unavoidable coupling with the environment and imperfect control, both qubits and operations are inherently noisy. Consequently, we are now entering the Noisy Intermediate Scale Quantum (NISQ) era, in which Quantum Processing Units (QPUs) consisting of a few tens of noisy qubits \cite{Preskill2018quantumcomputingin} are being demonstrated. Recently, quantum supremacy was achieved\cite{arute_quantum_2019}, that is, solving problems that no classical counterpart can solve. Before having such a large chips widely available, there is a need for quantum platforms where to test the functionality of quantum algorithms and their robustness against noise. In order to respond to this need, a small number of QPUs are available in the cloud \cite{IBMQX,rigettiqcs}. However, their limited accessibility and still relatively low number of qubits motivated the development of quantum computing simulation environments that incorporate realistic noise models based on characteristics of real devices.
When including realistic error models in quantum computing simulation platforms, there is a trade-off between accuracy, the closeness of the simulation to the real physical noise model, and scalability, the largeness of the quantum system that can be simulated. As a matter of fact, the exact simulation of density matrices using the superoperator representation has a major drawback of scalability in terms of the number of qubits possible to simulate \cite{Quantumsim,Quest}. Alternatively, there exist many stochastic approaches that approximate error channels by injecting errors from a cheaper to implement set of quantum channels, and therefore allowing the simulation of a larger number of qubits. These approaches include the depolarizing channel\cite{qiandqc}, the Pauli channel\cite{convex}, the Pauli Twirling Approximation (PTA)\cite{PhysRevA.88.012314,katabarawa,tomita}, the Pauli Measurement Channel (PMC), and the Clifford Measurement Channel (CMC) approximation\cite{PhysRevA.94.042338}. Some of these approximations were endowed by honesty constraints\cite{PhysRevA.89.022306,PhysRevA.87.012324}. These approaches have limited accuracy when used to simulate reasonably large circuits, which we refer to as the channel composition problem\cite{PhysRevA.95.062337}. In order to overcome this lack of accuracy, a quasistochastic version of the CMC was proposed\cite{PhysRevA.95.062337}, where negative probabilities of injecting errors were allowed. However, the stochastic noise models that can be incorporated in pure state simulation platforms are still poorly investigated.
To have a more scalable simulation approach compared to the exact density matrix simulation while limiting the loss in terms of accuracy, we propose a new simulation technique. It is based on the stochastic approximation of quantum channels by i) unitary channels and ii) measurements in arbitrary basis followed by conditional unitary gates depending on the measurement outcome. As a noise model, we use the Gate Set Tomography (GST) characterization of real devices. Our simulation includes single-qubit gates, two-qubit gates, and State Preparation And Measurement (SPAM) operations \cite{1310.4492,GST2q}. The main contributions of this work are the following:
\begin{itemize}
\item To improve the accuracy of the stochastic approaches, we approximate gate channels by convex sums of Unitary and Measurement Channels (UMC).
\item We introduce a stochastic approximation to realistically simulate SPAM operators.
\item We propose to adjust the fidelity of the operations by linearly tuning the Lindbladian of errors.
\item The UMC approximation is integrated in the QX simulator, a pure state simulation platform.
\item As a proof of concept, we simulate the 2-qubit Grover's algorithm and the surface code 17 under various mean fidelities.
\end{itemize}
This paper is structured as follows. In Section II, an overview of QPUs characterization protocols and simulation techniques is presented. In Section III, we introduce our simulation technique. In Section IV we describe the integration of error models in QX. Finally, our results and conclusion are shown in Sections V and VI, respectively.
\section{Quantum devices characterization and simulation: an overview }
A QPU can be modeled as a quantum system defined by its quantum state, a set of quantum gates and quantum measurements. Several approaches have been adopted to implement simulators for such systems with different trade-offs in terms of accuracy, simulation efficiency (including required computing power and memory requirements), and scalability to large qubit systems. Stabilizer-based simulations can be performed very efficiently on classical computers due to low memory and computing power requirements. However, this comes at the cost of restricting the supported quantum gates to the Clifford group and not supporting arbitrary qubit rotations. Examples of such simulators are CHP \cite{chp2} and one of the backends of QX \cite{7927034} and LIQUi$\ket{}$ \cite{liqui}.The lack of arbitrary quantum gate support in stabilizer-based simulators limits the number of algorithms that can be executed and the accuracy of implementable error models that is often reduced to simple Pauli errors.
Universal quantum computer simulators include arbitrary quantum gates and operate on a pure quantum state $\ket{\psi}$ modeled by a state vector in the Hilbert space $H$ with unit norm. Each quantum gate is implemented as a unitary operator $U: H\rightarrow H$, mapping a state to another one with $UU^\dagger=1$. In addition, measuring a quantum state corresponds to a projection on a well-defined axis. Examples of such universal simulators are the QX simulator \cite{7927034}, qHipster\cite{Qhipster}, ProjectQ\cite{projectQ}, QuEST\cite{Quest}, and CGPU\cite{QCGPU}. They allow simulating arbitrary quantum circuits but on a limited number of qubits compared to stabilizer-based simulators. Since universal quantum computer simulators can implement arbitrary qubit rotations, they also offer the opportunity to include more accurate error models that are not anymore limited to basic Pauli errors. Therefore, they provide a better accuracy-scalability trade-off than much heavier simulation techniques such as the full density matrix approach. The later operates on mixed quantum states and has significantly higher memory and computing power requirements that limits the simulation to a relatively small number of qubits.
When simulating an error-free QPU, operators describing state preparation, quantum gates and measurements are well known, since when they are assumed perfect, each operation corresponds by default to the desired one. However, it is known that isolating quantum systems from the environment is a major challenge for building a scalable QPU. This coupling with the environment makes qubits in any quantum technology to be in mixed states. Accordingly, the output of a state preparation is a mixed state composed of the target state with a portion of other unwanted states and therefore, it can be described by its corresponding density matrix in a given QPU. Density matrices can be estimated using Quantum State Tomography (QST)\cite{PhysRevLett.77.4281,QSE}, in which a number of copies of a given state are measured in a tomographically complete basis to approximate its corresponding density matrix.
Furthermore, by representing quantum states as density matrices, noisy quantum gates should be regarded as quantum channels, which are completely positive trace preserving (CPTP) maps that map valid quantum states (unit trace hermitian) to other valid quantum states. Quantum channels are commonly described by their Krauss representation, and according to the Stinespring dilation theorem\cite{10.2307/2032342}, they come from the joint unitary evolution of qubits with their environment. This interaction with the environment together with imperfect control introduce errors during the implementation of quantum gates. In order to acquire some knowledge about operational errors \cite{qiandqc}, Standard Quantum Process Tomography (SQPT) \cite{qpt} was proposed\cite{doi:10.1080/09500349708231894}. It is based on estimating a quantum process by implementing the QST protocol on quantum states that are usually generated by applying the target process on a tomographically complete set of states. A more inclusive approach called the Linear Gate Set Tomography (LGST) was introduced to characterize gate errors together with SPAM errors\cite{gstintro,1310.4492}. In this work, we simulate QPUs given their Extended Gate Set Tomography (EGST) characterisation. EGST is performed by sampling large sets of quantum circuits built as sequences taken from a target gate set. These sequences ensure 1) initializations and measurements in an informationally overcomplete set of initializations and measurements, and 2) the amplification of errors as the length of circuits increases. The target gate set is constructed via Maximum Likelihood Estimation (MLE), that is, estimating the set of operations that will most likely provide the measured frequencies. The EGST protocol certainly owes its accuracy to the use of a large number of sequences and the separation of SPAM errors from gates errors \cite{1310.4492,GST2q}. In short, the EGST protocol takes as input the measurements observed via the implementation of a predefined set of circuits run on the target QPU and as output it provides the following:
i) Prepared states described as density matrices.
ii) Quantum gates described as quantum channels.
iii) Quantum measurements described as measurement operators that act on density matrices.
Based on such description, noisy quantum computation can be simulated accurately as quantum channels and measurements acting on density matrices. To this end, it is optimal to use the superoperator representation of quantum channels \cite{Quantumsim}. However, since the density matrix is stored on a $2^{2 \times n}$ vectors, $n$ being the number of qubits, this approach has a major drawback of scalability due to the amount memory required. Therefore, the depolarizing channel is commonly used as a noise model. This model introduces Pauli errors with homogeneous probability to each qubit at each step of the circuit. If the circuit is restricted to only include Clifford gates, this kind of computations can be efficiently simulated using the stabilizer formalism which is highly scalable\cite{chp1}. Error rates in this noise model are related to the randomized benchmarking protocol which in most cases gives a weak interpretation of errors faced in reality\cite{unitary2design}. To provide a more realistic approximation of errors, the Pauli Twirl Approximation was introduced \cite{PhysRevA.88.012314,katabarawa,tomita}. PTA consists in simulating the erroneous parts of each operation by Pauli gates with probabilities equal to the diagonal elements of the process matrix of the error channel. That is equivalent to replace the error channel with another whose process matrix has only diagonal elements. Being oblivious to non-diagonal elements, PTA was updated to include the set of all possible operations that can be implemented using the stabilizer formalism, which is Clifford gates and Measurement followed by conditional gates Channels (CMC) \cite{PhysRevA.94.042338,PhysRevA.89.022306,PhysRevA.87.012324}. It takes advantage of the convexity propriety; that is, given a set of $n$ quantum channels $\left\{ \Lambda_{i}\right\} _{i=1}^{n}$, and an n-entry probability vector $\left\{ p_{i}\right\} _{i=1}^{n}$ such that $\Sigma_{i=0}^n p_{i}=1$, the convex sum $\Sigma_{i=0}^n \Lambda_{i}p_{i}$ is also a quantum channel. The CMC approximation is done by injecting CMC channels according to the probability vector $\left\{ p_{i}\right\} _{i=1}^{n}$ that minimizes $||{\sum}_{i=1} ^{n}p_{i}\Lambda_{i}-\mathcal{E}||_{\diamond}$, where $\Lambda_i$'s are CMC channels and $\mathcal{E}$ is the target realistic error channel. Furthermore, these channels were endowed with honesty constraints so the CMC channel does not underestimate the effect of noise. But it turns out that this approximation has a drawback of channel composition\footnote{The channel composition problem means that a given approximation is accurate for a single channel, but when simulating large circuits using multiple approximate channels the errors accumulate in a way that the accuracy is substantially decreased.}, and the restriction on Clifford operations imposed by the use of the stabilizer formalism prevents the simulation of universal quantum computation.
In summary, some of the simulation approaches such as using density matrices are precise but not very scalable in terms of the number of qubits that can be simulated. Others, such as the CMC approximation, allow simulating a large number of qubits but with less accuracy. In order to overcome all these limitations and have a noise model that is more accurate than the CMC approximation while being more scalable than the exact density matrix simulation approach, we propose a new stochastic approach based on extending the CMC to include more general forms of channels $\Lambda_i$. It has the advantage of using a universal pure states simulation back-end where the states are stored in $2^{n}$ complex vectors, and hence, it requires the square root of memory compared to the exact density matrix simulation. Furthermore, we will show that it provides higher accuracy than the existing stochastic approaches since it uses more varied elements to approximate the targeted noisy operations.
\section{UMC approximation of quantum operations}
After running the EGST protocol on the target QPU, this work, as illustrated by the dashed box in Figure \ref{diag}, aims at introducing a method to make a pure state simulation platform, the QX simulator, mimic the behavior of a QPU given its EGST characterization. In order to define the specifications of the noisy operations that are implementable in QX, this section explains how to approximate quantum operations using UMC channels. We also introduce methods to simulate more reliable operations by linearly tuning the Lindbladian of errors.
\begin{figure}
\caption{The process diagram of our simulation approach.}
\label{diag}
\end{figure}
\subsection{UMC approximation of quantum channels}
We address the problem of the approximation of a noisy operation channel $\mathcal{E}$ by a convex sum of pure state operations. That is, unitary channels and measurement channels corresponding to measurements followed by unitary gates conditioned on the measurement outcome. In the absence of an algebraic decomposition, this is equivalent to solving the following constrained optimization problem:
Given the form of a finite set of channels $\left\{ \Lambda_{i}\right\} _{i=1}^{n}$ and the channel $ \mathcal{E}$. Minimize:
\begin{equation}
f( \textbf{p}, \bm{ \theta },\bm{ \beta})=|| \sum_{i=1} ^{n}p_{i}\Lambda_{i}( \textbf{p}, \bm{ \theta} , \bm{\beta)}-\mathcal{E}||_{\diamond}
\end{equation}
With the following linear constraints:
\begin{equation}
\sum_{i=1} ^{n}p_{i}=1 , \,p_{i}\geq0 \text{ and } 0\leq\theta_{j}<2\pi\,\,\,\forall i,j.
\end{equation}
Where the metric $||..||_{\diamond}$ refers to the diamond distance\cite{diamond}, $ \textbf{p}$ is a probability vector\footnote{$\textbf{p}$ is a vector where each entry $p_i$ corresponds to the probability of the channel $\Lambda_i$ being applied. Therefore, it should satisfy $\sum_{i=1} ^{n}p_{i}=1$ and $\,p_{i}\geq0$ }, $\mathcal{E}$ is the target channel and $ \Lambda_{i}$'s are unitary and measurement channels. $\bm{\theta}$ and $\bm{\beta}$ are matrices containing the angles that specify unitary $U$ and $M$ measurement channels, respectively. For the single qubit case, we found optimal to use a convex sum of four unitary channels and two measurement channels. Therefore, our approximate channel is specified by $p_i$'s, $\theta_{i}$'s, and $\beta_i$'s as following:
\begin{eqnarray}
\sum_{i=1} ^{n}p_{i}\Lambda_{i}&=& {\sum}_{i=1} ^{4 }p_{i}U(\theta_{i,1},\theta_{i,2},\theta_{i,3})\nonumber \\
&+& \sum_{i=1} ^{2}p_{i+4} M(\beta_{i,1},..,\beta_{i,9})
\end{eqnarray}
Explicitly, $ M(\beta_{i,1},..,\beta_{i,9})$ are specified by the two Krauss operators $\ket{f_{1}}\bra{f}$ and $\ket{f_{2}}\bra{\bar{f}}$, corresponding to $\ket{f_{1}}=U(\beta_{i,1},\beta_{i,2},\beta_{i,3})\ket{0}$, $\ket{f_{2}}=U(\beta_{i,4} ,\beta_{i,5} ,\beta_{i,6})\ket{0}$, and $\bra{f}=\bra{0}U(\beta_{i,7},\beta_{i,8},\beta_{i,9})$. As we include four unitary channels and two measurement channels in the single qubit channel decomposition, $\textbf{p}$ is a 6-entry probability vector, $\bm{\theta}$ is a 4-by-3 angle matrix, and $\bm{\beta}$ is a 2-by-9 angle matrix. The entries of $\textbf{p}$, $\theta$, and $\beta$ are the freedom degrees of our optimization problem.
For two-qubit channels, we use the following decomposition:
\begin{eqnarray}
\sum_{i=1} ^{n}p_{i}\Lambda_{i} &=& {\sum}_{i=1} ^{5}p_{i}U(\theta_{i,1},...,\theta_{i,15}) \nonumber \\
&+& p_{6}M(\theta_{6,1},..,\theta_{6,9})\otimes I \nonumber \\
&+& p_{7}I\otimes M(\theta_{7,1},..,\theta_{7,9}) \nonumber \\
&+& p_8M(\theta_{8,1},..\theta_{8,9})\otimes M(\theta_{9,1},..\theta_{9,9})
\end{eqnarray}
This decomposition includes five unitary channels, two uncorrelated measurement channels and a pair of correlated measurement channels.
\subsection{SPAM errors simulation}
Furthermore, SPAM errors are characterized by vectorized operators corresponding to a prepared state $\ket{\ket{\rho_{0}}}$ and a measurement generator $\bra{\bra{E}}$. However, in most of the quantum computing simulation platforms, qubits are usually initialized in the ground state $\ket{\ket{\rho_{perfect}}}=\ket{\ket{1/\sqrt{2},0,0,1/\sqrt{2}}}^t$ , and measured in the Pauli Z basis $\bra{\bra{E}}=\bra{\bra{1/\sqrt{2},0,0,-1/\sqrt{2}}}$. Therefore,
we use the channel $\Lambda_{prep}$ that maps a pure ground state $\ket{\ket{\rho_{perfect}}}$ to the noisy prepared state $\ket{\ket{\rho_{0}}}$, and a channel $\Lambda_{meas}$ that maps states to be measured via the faulty measurement $\bra{\bra{E}}$ to states having same expectation values under a perfect measurement $\bra{{\bra{E_0}}}$. Hence:
\begin{eqnarray}
\ket{\ket{\rho_{0}}}&=&\Lambda_{prep}\ket{\ket{\rho_{perfect}}}\\
\bra{\bra{{E_0}}}&=&\bra{\bra{E_{perfect}}}\Lambda_{meas}
\end{eqnarray}
We obtain $\Lambda_{prep}$ and $\Lambda_{meas}$ by maximizing the following function:
\begin{eqnarray}
f_{prep}(\textbf{p}, \bm{ \theta} ,\bm{ \beta})&=& fidelity(\Lambda_{prep}(\rho_{perfect})\,,\,\rho_0)\\
f_{meas}(\textbf{p}, \bm{ \theta} , \bm{\beta}))&=& fidelity(E\Lambda_{prep}()\,,\,E_0)
\end{eqnarray}
Where $(\textbf{p}, \bm{\theta} ,\bm{ \beta})$ are the parameters of $\Lambda_{prep}$ and $\Lambda_{meas}$ as a UMC convex sum and $E\Lambda()$ stands for measuring the operator $E$ after the application of a channel $\Lambda$. Note that the notion of fidelity holds also for the measurement operators. For this approximation, we achieved a $100\%$ fidelity in both $f_{prep}$ and $f_{meas}$ using the SQP algorithm from the Matlab optimization toolbox. Solving these optimization problems is faster and more precise compared to the UMC decomposition of quantum maps, as it has to satisfy a smaller system of equations. For instance, $f_{prep}$ can be solved by maximizing the fidelity between the upper left block of the Choi-Jamiolkowski representation \cite{Choi} of $\Lambda_{prep}$ and $\rho_0 $. Therefore, a system of three equations should be satisfied which makes it simpler than UMC decomposition single-qubit channels, where a system of twelve equations should be satisfied.
\subsection{Tunning the fidelity of operations}
Having SPAM channels together with single and two-qubit gate channels allows to realistically simulate noisy quantum computations. These noisy operations have fixed fidelities often lower than the threshold of many QEC codes. Thus, in order to be able to evaluate a given QEC code or quantum circuit under different fidelities, we use the Lindbladian representation of error generator $\tilde{G}=G_{target}e^{\mathcal{L}}$, where $G_{target}$ is a perfect channel (no errors) and $\mathcal{L}$ is the Lindbladian of errors. The entries of the Lindbladian get close to zero when the channel is closer to the perfect one, and they get larger absolute values when the channel is noisier. Moreover, by tuning the Lindbladian of single qubit channels and computing the resulting channel's fidelity, we observed that if a given channel $\tilde{G}$ has infidelity $\bar{f}$ , the gate $\tilde{G}'=G_{target}e^{\mathcal{L}\times n}$ has an infidelity $\bar{f}'=n\times\bar{f}$. By varying the parameter $n$, gates with different infidelities can be simulated .
As illustrated in the upper part of the dashed box in Figure \ref{diag}, by using the approximations introduced in this section and taking density matrices, quantum channels and measurement operators characterizing the target QPU as inputs, we can provide probabilities and angles that specify pure state operations. These probabilities and angles are fed to the QX simulator as will be described in the next section.
\section{Error Model Integration in QX}
The QX simulator, as shown in Figure \ref{qx_arch}, provides an abstract interface for implementing various error models and using them for injecting noise in arbitrary quantum circuits. The error model interface exposes an abstract noise injection function that can be implemented and customized for each new error model, allowing the extension and the integration of new error models in QX. Previously, several error models such as the Depolarizing Channel or the Pauli Twirling Approximation have been implemented. Those implementations use the user-provided Pauli errors parameters to inject noise in a perfect quantum circuit loaded in the QX simulator based on the specified error probabilities.
The simulation of the circuit can be executed efficiently compared to density matrix simulations due to lower requirements in terms of memory and computing power. However, if the circuit is composed by stochastic sums of pure state operations, a pure state simulation platform provides, up to sampling errors, the same results as the density matrix simulation. In other words, the measurement expectation values of the resulting density matrix can be reconstructed through the sampling of a large number of pure state simulation runs. The circuit of each run is constructed by picking from each operation's convex sum, a pure state operation according to its corresponding probability.
As a first step, the CMC approximation has been introduced in QX as a new error model that injects the errors from weighted combinations of the 24 single-qubit Clifford gates and the 6 Pauli resets. The probabilities of the different errors for a given quantum operation are computed from its GST characterization and expressed as a 30-entry probability vector where each entry is corresponding to a specific error type. A perfect circuit expressed in QX using the C++ API or the cQASM representation \cite{khammassi2018cqasm} is transformed into a noisy circuit through injecting errors based on that error probability vectors. The measurement expectation values are obtained by sampling noisy circuits.
Similarly, the UMC approximation has been implemented using the same interface to maintain the same plug-and-play error model interface and allow us to compare different error models using the same target quantum circuit. The UMC stores its parameters as a vector of error probabilities with their respective operators. Those operators are modeled as a set of arbitrary unitary gates and measurements in arbitrary basis followed by gates conditioned on the measurement outcome. Each of these operations is defined by a set of angles. These angles and the probabilities of injections are obtained via the optimization algorithm described in Section III. The UMC model is used to replace perfect gates by noisy ones when sampling a quantum circuit.
\begin{figure}
\caption{QX simulator architecture and error model integration.}
\label{qx_arch}
\end{figure}
\section{Results}
In order to evaluate our UMC error model, we first compare it to the CMC error model. Then, we use it to simulate the two-qubit Grover's algorithm using our model and the full density matrix simulation. In addition, to demonstrate the scalability potential of our approach, we simulate the 17 qubits distance 3 surface code using operations with tuned fidelities and infer the fidelity value beyond which the use of this code is beneficial.
\subsection{UMC vs. CMC}
In order to compare our UMC approach with the CMC approximation, we have approximated the GST-derived channels of 5 single-qubit gates corresponding to $R_x(90)$, $R_x(180)$, $R_y(90)$, $R_y(180)$, and the idling gate. In Figure \ref{diamond}, the diamond norm between the target and approximate channels using the UMC and CMC approaches are shown. In overall, our UMC allows a 2.73$\%$ diamond distance closer approximation which means 36.6 times higher accuracy. Furthermore, we have achieved a diamond norm of 0.0225 between the UMC approximate and the target noisy Cphase gate. Note that our approach uses a smaller number of parameters to approximate two-qubit gates compared to CMC which is generally impractical for two-qubit channels due to the largeness of the search space (number of two-qubits Cliffords). In addition, the achieved infidelities between the target and the approximate SPAM operators are of the order of $10^{-11}$.
\begin{figure}
\caption{The diamond norm for single-qubit gates using the CMC approximation (blue bars) and the UMC approximation (red bars).}
\label{diamond}
\end{figure}
These results were obtained using the SQP algorithm from the Matlab optimization toolbox. To compute the diamond norm we used QETLAB\cite{qetlab} and the CVX package\cite{cvx,cvx2}.
\subsection{UMC vs. a full density matrix simulation of the two-qubit Grover's algorithm }
To test the accuracy of our model, we have simulated the two-qubit Grover's algorithm using the UMC approximation and the exact density matrix simulation. As shown in Figure \ref{Fig2}, the two-qubit Grover's algorithm is a special case since its corresponding circuit lies in the two-qubit Clifford group and its theoretical success probability is $100\%$ (deterministic solution). Therefore, a failure of the algorithm is purely due to operational errors. Table \ref{table:1} shows the success rate of the algorithm using the mentioned approaches. In this case, the algorithm's success rate provided by our approach has an inaccuracy in the order of $10^{- 3}$ compared to exact density matrix simulation.
\begin{figure}
\caption{Circuit of the two-qubit Grover's algorithm. The operator $O$ is the oracle operator and it inverses the amplitude of the target state. $cU_{00}
\label{Fig2}
\end{figure}
\begin{table}[h]
\centering
\begin{tabular}{|l|l|l|l|l|}
\hline
Noise model & $f_{00}$ & $f_{01}$ & $f_{10}$ & $f_{11}$ \\ \hline
Exact & 0.7365 & 0.7490 & 0.7474 & 0.7661 \\ \hline
UMC & 0.7411 & 0.7473 & 0.7442 & 0.7652 \\ \hline
\end{tabular}
\label{tab1}
\caption{Success rate of the Grover's algorithm using the exact density matrix simulation and the stochastic approximate channels UMC.}
\label{table:1}
\end{table}
In our simulations, the Oracle operator $O$ and the inversion operator $cU_{00}$ are implemented by a Cphase gate and when needed, also single-qubit $R_x(180)*R_y(180)$ are applied. For instance, the $cU_{00}$ can be implemented as $R_x(180)\dot R_y(180)$ acting on both qubits followed by a Cphase gate. Note that, although the diamond norm of the UMC approximation of the CPhase gate, which is the main source of mismatch, is about $10^{-2}$ (0.0225), the gap between the fidelities of the Grover's algorithm using the two simulation approaches is in the order of $10^{-3}$.
\subsection{The pseudo-threshold of the surface code 17}
The fidelity of single-qubit gates in the original gate set we are using is $0.9996$, which as we will show, is around the threshold of the surface code. However, the fidelities of the controlled-phase (C-Phase) gate (0.9266), sate preparation (0.9296) and measurement (0.9603) are far below the threshold for this code. Therefore, we target gates that have higher fidelities by linearly decreasing the Lindbladian of errors as explained in Section II. The diamond norm of the approximation improves as the fidelity of the target gate increases. Figure \ref{Norms} shows the variation of the diamond norm of our approximation for single and two-qubit channels. It can be seen that for fidelities between 0.9992 and 0.9999, the diamond norm of the approximation of single-qubit gates and the controlled-phase gate goes from $1.15\times 10^{-4}$ to $1.44\times 10^{-5}$ and from $2.08\times 10^{-3}$ to $2.96\times 10^{-4}$, respectively.
\begin{figure}
\caption{The fidelity of the target gate versus the UMC approximation achieved distance of (red line) the controlled- phase gate and (blue line) single-qubit operations (average over the used single-qubit channels).}
\label{Norms}
\end{figure}
Using these approximations, we implemented the tiled version of the surface code 17 \cite{tomita} with various fidelities. As shown in Figure \ref{fig}, the implementation is done using single-qubit $R_y(\pm 90)$ rotations and C-Phase gate as a two-qubit entangling gate that are supported by superconducnting transmon qubits \cite{DiCarlo2009}. We used the minimum-weight perfect matching decoder\cite{decoder}. For the sake of optimality, we did not include idling errors to have a lower threshold which requires less sampling for higher accuracy. Figure \ref{sc7res} shows the logical error rate obtained for various mean fidelities of the physical operations. It can be observed that when using our proposed noise model, the pseudo-threshold for the surface code 17 resides within operations having mean fidelities around $0.9997$ (crossing point dashed red and blue lines).
\begin{figure}
\caption{a) Surface code 17 lattice. Black dots correspond to data qubits, white (black) plackets are ancila qubits used to measure Z (X) syndromes. b) Parity-check circuit for measuring X syndromes. C) Parity-check circuit for measuring the Z syndromes. Note that - and + correspond respectively to $R_y(-90)$ and $R_y(90)$.}
\label{}
\label{}
\label{}
\label{fig}
\end{figure}
\begin{figure}
\caption{The logical error rate of the surface code 17 vs. the average fidelity of physical operations.}
\label{sc7res}
\end{figure}
\section{Conclusion}
This work bridges the gap between the stochastic channel approximations using the stabilizer formalism and the exact density matrix simulation. It tackles the channel composition problem in the former approach by
approximating the density matrix evolution by stochastic sum of unitary and measurement channels within a pure state simulation environment. This error model considerably reduces the diamond norm between the target and approximate channels. For instance, our UMC approximation of single-qubit gate channels derived via the GST protocol resulted in a diamond distance of $\sim 10^{-4}$ compared to $\sim10^{-3}$ provided by the best known stochastic approaches. We also introduced an accurate simulation of SPAM operators with an infidelity of $\sim 10^{-11}$.
Furthermore, to test the accuracy of our UMC model we simulated the Grover's algorithm using our approach and compared it with the exact density matrix simulation. We have shown that our approach provides an inaccuracy of $10^{-3}$. We have also shown that by linearly increasing/decreasing the Lindbladian of errors we can tune the fidelity of the quantum operations and the higher the fidelities are, the more accurate our approximation is. Therefore, we were able to simulate the surface code 17 using the QX simulator under various operation fidelities. This allowed us to estimate that this quantum error correction code would be effective if gate fidelities are beyond $0.9997$. The surface code simulations were performed on a cluster node with 2 $\times$ Xeon E5-2683 v3 CPU's (@ 2.00GHz = 28 cores / 56 threads) and 24 x 16GB DDR4 = 384GB memory. As the qubit register size is only 17, we could perform over 50 simulations simultaneously. Furthermore, using more nodes of our distributed system can increase significantly our sampling speed and therefore speedup the overall simulation time.
Although the distance 3 surface code is used as a use case to illustrate quantum circuit simulation using the UMC error model, larger circuits on larger qubit registers can be simulated: each node of our simulation platform allows the simulation of up to 34 fully entangled qubits in QX and therefore enable the simulation of a considerably larger number of qubits compared to exact density matrix simulations.
This work was done under the assumption of static noise in the absence of leakage errors, spacial "crosstalk" and temporal correlations. Therefore, including such noise models will be a step towards realism in the simulation of quantum computation.
\end{document} |
\begin{document}
\begin{abstract}
DNNs are becoming less and less over-parametrised due to recent advances in efficient model design, through careful hand-crafted or NAS-based methods. Relying on the fact that not all inputs require the same amount of computation to yield a confident prediction, adaptive inference is gaining attention as a prominent approach for pushing the limits of efficient deployment. Particularly, early-exit networks comprise an emerging direction for tailoring the computation depth of each input sample at runtime, offering complementary performance gains to other efficiency optimisations. In this paper, we decompose the design methodology of early-exit networks to its key components and survey the recent advances in each one of them. We also position early-exiting against other efficient inference solutions and provide our insights on the current challenges and most promising future directions for research in the field.
\end{abstract}
\title{Adaptive Inference through Early-Exit Networks:\Design, Challenges and Directions}
\ifacm
\else
\pagestyle{plain}
\fi
\section{Introduction}
During the past years, there has been an unprecedented surge in the adoption of Deep Learning in various tasks, ranging from computer vision \cite{resnet} to Natural Language Processing (NLP) \cite{transformers} and from activity recognition \cite{dl_activity_recognition} to health monitoring \cite{emotionsense_www}. A common denominator and, undoubtedly, a key enabler for this trend has been the significant advances in hardware design \cite{embench_2019,ai_benchmark_2019} (e.g. GPU, ASIC/FPGA accelerators, SoCs) along with the abundance of available data, both enabling the training of deeper and larger models.
While the boundaries of accuracy are pushed year by year, DNNs often come with significant workload and memory requirements, which make their deployment on smaller devices cumbersome, be it smartphones or other mobile and embedded devices found in the wild. Equally important is the fact that the landscape of deployed devices is innately heterogeneous \cite{wu2019machine}, both in terms of capabilities (computational and memory) and budget (energy or thermal).
To this direction, there has been substantial research focusing on minimising the computational and memory requirements of such networks for efficient inference.
Such techniques include architectural, functional or representational optimisations in DNNs \cite{Wang2019}, aiming at faster forward propagation at a minimal cost. These include custom -- hand or NAS-tuned -- blocks \cite{mobilenets,efficientnet}, model weights sparsification and pruning \cite{DBLP:journals/corr/HanMD15} as well as low-precision representation and arithmetics \cite{rastegari2016xnor,ternary_nets}.
Given there is no free lunch in Deep Learning, most of the aforementioned approaches trade off model accuracy for benefits in latency and memory consumption.
Moreover, while some of these approaches may work out-of-the-box, others do require significant effort during or post training to maintain performance or to target different devices \cite{DBLP:journals/corr/HanMD15}.
\begin{figure}
\caption{Early-exit network architecture}
\label{fig:ee-network}
\end{figure}
A complimentary family of solutions further pushing the efficiency envelope exploits the accuracy-latency trade-off at runtime, by adapting the inference graph \cite{nestdnn_2018,horvath2021fjord,wang2018skipnet,wu2018blockdrop} or selecting the appropriate model \cite{han2016mcdnn,Lee_2019} for the device, input sample or deadline at hand. This category includes early-exiting (EE) \cite{branchynet2016}.
Early-exit networks
leverage the fact that not all input samples are equally difficult to process, and thus invest a variable amount of computation based on the input's difficulty and the DNN's prediction confidence; an approach resonating with the natural thinking mechanism of humans.
Specifically, early-exit networks consist of a backbone architecture, which has additional exit heads (or classifiers) along its depth (Fig.~\ref{fig:ee-network}). At inference time, when a sample propagates through the network, it flows through the backbone and each of the exits sequentially, and the result that satisfies a predetermined criterion (exit policy) is returned as the prediction output, circumventing the rest of the model. As a matter of fact, the exit policy can also reflect the target device capabilities and load and dynamically adapt the network to meet specific runtime requirements \cite{spinn2020mobicom,hapi2020iccad}.
\begin{figure*}
\caption{Early-exit networks workflow}
\label{fig:ee-workflow}
\end{figure*}
Reaping the benefits of early exiting upon deployment, however, is not as trivial as jointly training a backbone network with randomly placed exits. One needs to carefully design the network and the training sequence of the exits relative to the backbone before choosing the exit policy for the deployment at hand.
These decisions can be posed as a Design Space Exploration problem that can be efficiently traversed through a ``train-once, deploy-everywhere'' paradigm. This way, the training and deployment processes of early-exit networks can be detached from one another \cite{hapi2020iccad}.
This paper provides a thorough and up-to-date overview of the area of early-exit networks. Specifically, we first describe the typical architecture and major components of these networks across modalities. Next, we survey the state-of-the-art techniques
and bring forward the traits that make such models a compelling solution. Last, we conclude by discussing current challenges in existing systems, the most promising avenues for future research and the impact of such approaches on the next generation of smart devices.
\section{Early-Exit Networks}
DNNs can be thought as complex feature extractors, which repre-
\noindent
sent inputs into an embedded space and classify samples based on the separability of classes in the hyperplane manifold. Typically, shallow layers extract low-level features, such as edges, whereas deeper ones build upon higher level semantics.
Under that framework, early exits can be thought as early decisions based on the shallower representations. The hypothesis behind their operation lays on the fact that such features on easier samples might be enough to offer the desired distinguishability between classes.
Several important decisions arise when designing, training and deploying such networks, however, as different designs affect the dynamics the network precision, performance and efficiency. In this and the following sections we go through the workflow of deploying an early-exit network (Fig.~\ref{fig:ee-workflow}).
\subsection{Designing the architecture}
\noindent
\textbf{Model \& Exit Architecture.}
Initially, one needs to pick the architecture of the early-exit model. There are largely two avenues followed in the literature: i) \textit{hand-tuned end-to-end designed networks} for early-exiting, such as MSDNet \cite{Huang2017}, and ii) \textit{vanilla backbone networks, enhanced with early exits} along their depth \cite{branchynet2016,hapi2020iccad,fang2020flexdnn,sdn_icml_2019}.
This design choice is crucial as it later affects the capacity and the learning process of the network, with different architectures offering varying scalability potential and convergence dynamics.
In the first case, networks are designed with progressive inference carved into their design. This means that the model and the architecture of its early exits are co-designed -- and potentially trained jointly. Such an approach allows for more degrees of freedom, but potentially restricts the design's performance across different circumstances and deployment scenarios, since this decision needs to be made early in the design process. For example, the existence of residual connections spanning across early exits can help generalisability of the network. On the other hand, some properties, such as maintaining multiple feature size representations, can prove detrimental in terms of model footprint \cite{Huang2017}.
On the other hand, when disentangling the backbone network's design from the early exits, one can have the flexibility of lazily selecting the architecture of the latter ones. Although this might not yield the best attainable accuracy, since the two components are not co-designed, it enables case-driven designs of early-exits that can be potentially trained separately to the main network and selected at deployment time \cite{hapi2020iccad}.
It is worth noting that early exits can adopt a uniform or non-uniform architecture, based on their placement. While the latter enlarges the design space of early-exit networks, it creates an interesting trade-off: The number (and type\footnote{Type can refer to the type of convolutions, such as regular vs. depthwise separable.}) of exit-specific layers accuracy vs. their overhead. While the adaptive and input-specific nature of early-exit networks is highly praised, when an early output does not meet the criteria for early-stopping, the runtime of the exit-specific layers are essentially an overhead to the inference computation. As such, the early exits need to be designed in comparison with the backbone network (i.e. relative cost) and with the exit policy at hand (i.e. frequency of paying that cost).
\noindent
\textbf{Number \& Position of Early-exits.}
In parallel with the architecture, one also needs to select the number and positioning of early exits along the depth of the network. This decision not only affects the granularity of early results, but also the overall overhead of early-exiting compared to the vanilla single-exit inference.
Too densely placed early exits can yield an extreme overhead without justifying the gains achieved by the extra classifiers, whereas too sparse placements can offer large refinement period until the next output is available. Moreover, having too many early classifiers can negatively impact convergence when training end-to-end.
With respect to positioning a given number of early exits, they can be placed equidistantly or at variable distances across the depth of the network. The decision depends on the use-case, the exit rate and the accuracy of each early exit. It is worth noting that this inter-exit distance is not actual ``depth'', but can be quantified by means of FLOPs or parameters in the network.
\subsection{Training the network}
After materialising its architecture, the early-exit model needs to be trained on given dataset.
As hinted, there are largely two ways to train early-exit networks: i) \textit{end-to-end (E2E)} and ii) \textit{intermediate classifiers (IC) only}. Each approach presents different trade-offs in terms of achieved accuracy vs. flexibility for target-specific adjustments. Here, we discuss these trade-offs along with orthogonal training techniques that can boost the overall accuracy of the model.
\subsubsection{End-to-end vs. IC-only training}
\noindent
\textbf{\newline End-to-end training.}
The approach comprises jointly training the network and early exits. Normally, a joint loss function is shaped which sums intermediate and the last output losses ($L_{task}^{(i)}$) in a weighted manner (Eq.~\ref{eq:e2e_loss}) and then backpropagates the signals to the respective parts of the network. While the achieved accuracy of this approach can be higher both for the intermediate ($y_{i<N}$) and the last exit ($y_N$), this is not guaranteed due to cross-talk between exits \cite{Li_2019_ICCV}. Concretely, the interplay of multiple backpropagation signals and the relative weighting ($w_i$) of the loss components \cite{hu2019learning} needs to be carefully designed, to enable the extraction of reusable features across exits. As such, while offering a higher potential, E2E training requires manual tuning of the loss function as well as co-design of the network architecture and the populated exits \cite{sdn_icml_2019}.
\begin{equation}\label{eq:e2e_loss}
L_{e2e}(y_0, \dots, y_N, y) = \sum_{i=0}^N w_i * L_{task}^{(i)}(y_i,y)
\end{equation}
\noindent
\textbf{IC-only training.}
Alternatively, the backbone of the network and the early exits can be trained separately in two distinct phases. Initially, the backbone of the network, which may or may not be early-exit aware, is trained - or comes pretrained. In the subsequent phase, the backbone network is frozen\footnote{Meaning that the weights of this submodel are not updated through backpropagation.}, early-exits are attached at different points of the network and are trained separately (Eq.~\ref{eq:ic_loss}). This means that each exit is only fine-tuning its own layers and does not affect the convergence of the rest of the network. Therefore, the last exit is left intact, there is neither cross talk between classifiers nor need to hand-tune the loss function.
As such, more exit variants can be placed at arbitrary positions in the network and be trained in parallel, offering scalability in training while leaving the selection of exit heads for deployment time \cite{hapi2020iccad}. Thus, a \textit{``train-once, deploy-everywhere''} paradigm is shaped for multi-device deployment.
On the downside, this training approach is more restrictive in terms of degrees of freedom on the overall model changes, and thus can yield lower accuracy than an optimised jointly trained variant.
\begin{equation}\label{eq:ic_loss}
L_{ic\text{-}only}^{(i)}(y_i, y) = L_{task}^{(i)}(y_i, y)
\end{equation}
\subsubsection{Training with distillation}
An ensuing question that arises from the aforementioned training schemes is whether the early-exits differ in essence from the last one and whether there is knowledge to be distilled between them. To this direction, there has been a series of work \cite{zhang2019your, Phuong_2019_ICCV, always_personal_hotmobile, liu-etal-2020-fastbert,Li_2019_ICCV} that employ knowledge distillation \cite{Hinton2015} in a self-supervised way to boost the performance of early classifiers. In such a setting, the student $i$ is typically an early exit and the teacher $j$ can be a subsequent or the last exit ($j\geq i$). As such, the loss function for each exit is shaped as depicted in Eq.~\ref{eq:distillation_loss} and two important hyperparameters emerge, to be picked at design time; namely the distillation \textit{temperature} ($T$) and the \textit{alpha} ($\alpha$). The temperature effectively controls how ``peaky'' the teacher softmax (soft labels) should be while the alpha parameter balances the learning objective between ground truth ($y$) and soft labels ($y_j$).
\begin{equation}\label{eq:distillation_loss}
L_{distill}^{(i)}(y_i, y_j, y) = L_{task}^{(i)}(y_i, y) + \alpha L_{KL}(y_i, y_j, T)
\end{equation}
\subsubsection{Training personalised early-exits}
Hitherto, early exits have been trained for the same task uniformly across exits. However, when deploying a model in the wild, user data are realistically non-IID\footnote{Non Identically and Independently Distributed.} and may vary wildly from device to device. With this in mind, there has been a line of work \cite{reda_mm,always_personal_hotmobile} that personalises early exits on user data, while retaining the performance of the last exit in the source global domain.
In \cite{always_personal_hotmobile}, this is effectively accomplished through IC-only training, where the backbone network is trained on a global dataset and early-exits are then trained on user-specific datasets in a supervised or self-supervised manner. In the latter case, data labels are obtained from the prediction of the last exit. Orthogonally, knowledge distillation can still be employed for distilling knowledge from the source domain to the personalised exits, by treating the last exit as the teacher \cite{reda_mm,always_personal_hotmobile}.
\subsection{Deploying the network}
At this stage, an early-exit network has been trained and ready to be deployed for inference on a target device. There are largely three inference modes for early exits, each relevant to different use cases:
\begin{itemize}[noitemsep,label={},leftmargin=*,topsep=1pt]
\item \textbf{Subnet-based inference.} A single-exit submodel is selected (up to a specified early exit) and deployed on the target device. The main benefit here is the single training cycle for models of varying footprint, which, in turn, can target different devices or SLOs\footnote{Service Level Objective: e.g.~max latency or min accuracy.}.
\item \textbf{Anytime adaptive inference.} An adaptive inference model is deployed and each sample exits (early) based on its difficulty, the confidence of the network prediction and potential app-specific SLOs. This mode offers progressive refinement of the result through early-exiting and latency gains for easier samples.
\item \textbf{Budgeted adaptive inference.} Similar to anytime inference, but with throughput-driven budget. Concretely given a total latency budget, the goal is to maximise the throughput of correct predictions. This means that the model elastically spends more time on some examples at the expense of early-exiting on others.
\end{itemize}
Next, we are focusing on the last two use-cases and more specifically how the exit policy is shaped.
\subsubsection{Deploying for adaptive inference}
Exit policy is defined as the criterion upon which it is decided whether an input sample propagating through the network exits at a specified location or continues. Picking the appropriate depth to exit is important both for performance and to avoid ``overthinking\footnote{Overthinking refers to the non-monotonic accuracy of ICs; i.e.~later classifiers can misclassify a sample that was previously correctly classified.}'' \cite{sdn_icml_2019}. Overall, there are i)~\textit{rule-based} and ii)~\textit{learnable} exit policies.
\noindent
\textbf{Rule-based early-exiting.}
Most works in progressive inference have been employing the softmax of an exit to quantify the confidence of the network for a given prediction \cite{berestizshevsky2019dynamically}. On the one hand, we have approaches where the criterion is a threshold on the entropy of the softmax predictions \cite{branchynet2016}. Low entropy indicates similar probabilities across classes and thus a non-confident output whereas higher entropy hints towards a single peak result. On the other hand, other approaches use the top-1 softmax value as a quantification of confidence.
An overarching critique for using confidence-based criteria, however, has been the need to manually define an arbitrary threshold, along with the overconfidence of certain deep models. Solutions include calibrating the softmax output values \cite{Guo2017} or moving to different exit schemes.
Alternative rule-based exit policies include keeping per class statistics at each layer \cite{class_means}, calculating classifiers' \textit{trust scores} based on sample distances to a calibration set \cite{trust_score_neurips} or exiting after $n$ exits agree on the result \cite{bert_loses_patience_neurips2020}.
\noindent
\textbf{Learnable exit policies.}
Expectedly, one may wonder why not to learn network weights and the exit policy jointly. To this direction there has been work approaching the exit policy in differentiable \cite{chen2020learning, scardapane2020differentiable} and non-differentiable \cite{epnet} ways. In essence, instead of explicitly measuring the exit's confidence, the decision on whether to exit can be based on the feature maps of the exits themselves. The exit decision at a given classifier can be independent of the others (adhering to the Markov property) or can be modelled to also account for the outputs of adjacent exits.
\section{Early-Exits \& Target Hardware}
Early-exiting not only provides an elegant way to dynamically allocate compute to samples based on their difficulty, but also an elastic way to scale computation based on the hardware at hand.
Although we have presented so far design, training and deployment as three distinct stages, these can, in fact, be co-designed and co-optimised for targeting different devices and SLOs.
First, a considerable benefit of early-exit networks, as aforementioned, is their ``train-once, deploy-everywhere'' paradigm. Essentially, this means that an overprovisioned network -- e.g.~a network with densely placed early-exits -- can be trained and then different parts of it be deployed according to the device's computational budget, memory capacity or energy envelope and application's latency, throughput or accuracy objectives. In essence, tweaking i)~the classifier architecture, ii)~the number and positioning of early-exits and iii)~exit-policy to the hardware at hand can be posed as a Design Space Exploration (DSE) problem with the goal of (co-)optimising latency, throughput, energy or accuracy given a set of restrictions, posed in the form of execution SLOs \cite{hapi2020iccad}. Accurately modelling this optimisation objective subject to the imposed restrictions is important for yielding efficient valid designs for the use-case at hand and shaping the Pareto front of optimal solutions.
Traversing this search space efficiently is important, especially since it needs to be done once per target device. Therefore, end-to-end training is usually avoided in favor of the more flexible IC-only approach.
It should be noted, though, that the search is run prior to deployment, and its cost is amortised over multiple inferences.
Instead of searching for the optimal network configuration for fixed hardware, another set of approaches is to design the hardware specifically for early-exit networks \cite{9032146, farhadi2019novel,kim2020low} or co-design the network and hardware for efficient progressive inference \cite{8843626,9020551}.
\section{Adaptive Inference Landscape}
\noindent
\textbf{Offline accuracy-latency trade-off.}
DNNs have been getting deeper and wider in their pursuit of state-of the art accuracy. However, such models still have to be deployable on devices in the wild. As such, optimising DNNs for efficient deployment has been an extremely active area of research.
Approaches in the literature exploit various approximation and compression methods \cite{Wang2019} to reduce the footprint of these models, including quantisation of network weights and activations \cite{rastegari2016xnor,ternary_nets,gholami2021survey}
or weight sparsification and pruning \cite{deng2020model,liu2020pruning,DBLP:journals/corr/HanMD15}.
A common denominator amongst these techniques is that they inherently trade off latency or model size with accuracy. This trade-off is exploited offline in a device-agnostic or hardware-aware \cite{yang2018netadapt} manner.
Alongside, recent models tend to become less redundant, and thus more efficient, through careful hand-engineering \cite{mobilenets}, or automated NAS-based design \cite{efficientnet,liu2018darts}
of their architecture.
These approaches remain orthogonal to adaptive inference, thus offering complementary performance gains.
\noindent
\textbf{Dynamic Networks.} Techniques in this family take advantage of the fact that different samples may take varying computation paths during inference \cite{liu2018dynamic}, either based on their intricacy or the capacity of the target device.
Such methods include dynamically selecting specialised branches \cite{mullapudi2018hydranets}, skipping \cite{wang2018skipnet,wu2018blockdrop,veit2018convolutional} or ``fractionally executing" (i.e. with reduced bitwidth) \cite{shen2020fractional} layers during inference, and dynamically pruning channels \cite{horvath2021fjord,nestdnn_2018,gao2018dynamic,lin2017runtime} or selecting filters \cite{yu2018slimmable, yu2019universally, chen2019you}. These approaches typically exploit trainable gating/routing components in the network architecture. This, however, complicates the training procedure and restricts post-training flexibility for efficient deployment on different hardware.
\noindent
\textbf{Inference Offloading.}
Orthogonally, there has been a series of work on adaptive inference offloading, where part of the computational graph of a DNN is offloaded to a faster remote endpoint for accelerating inference to meet a stringent SLO \cite{Kang2017}. Some \cite{edgent_2020, spinn2020mobicom} even combine early-exit networks with offloading.
\noindent
\textbf{Model Selection \& Cascades}
More closely related to early-exiting come approaches that train a family of models with different latency-accuracy specs, all deployed on the target device.
This is achieved by trading off precision \cite{kouris2018cascade}, resolution \cite{yang2020resolution} or model capacity \cite{han2016mcdnn,wang2017idk}
to gain speed, or by incorporating efficient specialised models \cite{wei2019self}. At inference, the most appropriate model for each input is selected through various identification mechanisms \cite{Lee_2019,taylor2018adaptive}, or by structuring the model as a cascade and progressively propagating to more complex models until a criterion is met \cite{kouris2020throughput}. Although seemingly similar to early-exiting, ``hard" samples may propagate through numerous cascade stages without re-use of prior computation.
\noindent
\textbf{Early-exiting.}
Early-exiting has been applied for different intents and purposes.
Initially, single early-exits were devised as a mechanism to assist during training \cite{szegedy2015going}, as a means of enhancing the feedback signal during backpropagation and to avoid the problem of vanishing gradients. In fact, these exits were dropped during inference. Since then, however, early-exits have proven to be a useful technique of adaptive inference and have been applied successfully to different modalities and for different tasks. These previously discussed techniques are concisely presented in Table~\ref{tab:ee_related_work} and organised by their optimisation goal, input modality and trained task.
\setlength{\tabcolsep}{2pt}
\begin{table*}[t]
\caption{Work in early exiting.}
\centering
\scriptsize
\resizebox{0.9\linewidth}{!}{
\begin{tabular}{l l l l}
\toprule
\begin{tabular}{@{}c@{}} \textbf{Category} \\ \end{tabular} & \begin{tabular}{@{}c@{}} \textbf{Title} \\ \end{tabular} & \begin{tabular}{@{}c@{}} \textbf{Modality/Task} \\ \end{tabular}
& \begin{tabular}{@{}c@{}} \textbf{Description} \\ \end{tabular}
\\ \midrule
\multirow{14}{10em}{\textbf{Early-exit network-specific techniques}} &
MSDNet \cite{Huang2017,Li_2019_ICCV}
& Vision/Classification
& Hand-tuned multi-scale EE-network. \\
& Not all pixels are equal \cite{8100167}
& Vision/Segmentation
& Pixel-level EE based on difficulty for semantic segmentation.\\
& Phuong et al. \cite{Phuong_2019_ICCV}
& Vision/Classification
& Distillation-based EE from later to earlier MSDNet exits. \\
& RBQE \cite{xing2020early}
& Vision/Enhancement
& UNet-like network with EE, for Quality Enhancement.\\
& The Right Tool for the Job \cite{schwartz-etal-2020-right}
& NLP
& Jointly trained EE on BERT. \\
& DeeBERT \cite{xin-etal-2020-deebert}
& NLP/GLUE
& Jointly trained EE on Ro(BERT)a models.\\
& FastBERT \cite{liu-etal-2020-fastbert}
& NLP
& Distillation-based EE on BERT models.\\
& Depth-Adaptive Transformer \cite{Elbayad2020Depth-Adaptive}
& NLP/BLEU
& Transformer-based EE for translation.\\
& Bert Loses Patience \cite{bert_loses_patience_neurips2020}
& NLP/GLUE
& Patience-based EE on (AL)BERT models.\\
& Cascade Transformer \cite{soldaini-moschitti-2020-cascade}
& NLP/QA
& Transformer-based EE rankers for Question Answering. \\
& MonoBERT \cite{xin-etal-2020-early}
& IR/Document Ranking
& Asymmetric EE BERT for efficient Document Ranking. \\
& Chen et al. \cite{chen2020don}
& Speech
& Speech separation with EE-transformers.\\
\midrule
\multirow{9}{10em}{\textbf{Early-exiting network-agnostic techniques}}
& CDLN \cite{panda2016conditional}
& Vision/Classification
& Primary early-exit work based on linear classifiers.\\
& BranchyNet \cite{branchynet2016}
& Vision/Classification
& Entropy-based fixed classifier EE-technique. \\
& SDN \cite{sdn_icml_2019}
& Vision/Classification
& E2E \& IC-only overthinking-based training EE. \\
& HAPI \cite{hapi2020iccad}
& Vision/Classification
& Hardware-aware design of EE-networks via DSE. \\
& Edgent \cite{edgent_2020}
& Vision/Classification
& Submodel selection for offloading through EE training. \\
& SPINN \cite{spinn2020mobicom}
& Vision/Classification
& Partial inference offloading of EE-networks. \\
& FlexDNN \cite{fang2020flexdnn}
& Vision/Classification
& Footprint overhead-aware design of EE-networks.\\
& DDI \cite{wang2020dual}
& Vision/Classification
& Combines layer/channel skipping with early exiting. \\
& MESS \cite{mess_kouris}
& Vision/Segmentation
& Image-level EE based on difficulty for semantic segmentation. \\
\midrule
\multirow{3}{10em}{\textbf{Variable label distributions}}
& Bilal et al. \cite{bilal2017convolutional}
& Vision/Classification
& Hierarchy-aware ee-CNNs through confusion matrices. \\
& Bonato et al. \cite{bonato2021class}
& Vision/Classification
& Class prioritisation on EE. \\
& PersEPhonEE \cite{always_personal_hotmobile}
& Vision/Classification
& Personalised EE-networks under non-IID data \\
\midrule
\multirow{3}{10em}{\textbf{Learnable exit policies}}
& Scardapane et al. \cite{scardapane2020differentiable}
& Vision/Classification
& Differentiable jointly learned exit policy on EE-networks. \\
& EpNet \cite{epnet}
& Vision/Classification
& Non-differentiable exit policy for EE-networks. \\
& Chen et al. \cite{chen2020learning}
& Vision/\{Classification, Denoising\}
& Jointly learned variational exit policy for EE-networks.\\
\midrule
\multirow{2}{10em}{\textbf{Adversarial robustness}}
& Triple-wins \cite{Hu2020Triple}
& Vision/Classification
& Accuracy, robustness, efficiency multi-loss. \\
& DeepSloth \cite{hong2021a}
& Vision/Classification
& Adversarial slowdown-enducing attack. \\
\midrule
\multirow{5}{10em}{\textbf{EE-H/W (co-)design}}
& Kim et al. \cite{9032146}
& Vision/Identification
& Low-power \& robust EE model+H/W for continuous face recognition. \\
& Farhadi et al. \cite{farhadi2019novel}
& Vision/Classification
& FPGA partial reconfiguration for progressive inference.\\
& Kim et al. \cite{kim2020low}
& Vision/Classification
& Single-layer EE and H/W synthesis for thresholding. \\
& Paul et al. \cite{8843626}
& Vision/Classification
& Efficient EE inference on FPGA.\\
& DynExit \cite{9020551}
& Vision/Classification
& Trainable weights in joint EE loss and FPGA dployment.\\
\bottomrule
\end{tabular}
}
\label{tab:ee_related_work}
\end{table*}
\noindent
\textbf{Other surveys.} There have been certain previous surveys touching on the topic of early-exiting, either only briefly discussing it from the standpoint of dynamic inference networks \cite{han2021dynamic} or combining it with offloading \cite{matsubara2021split}. To the best of our knowledge, this is the first study that primarily focuses on early-exit networks and their design trade-offs across tasks, modalities and target hardware.
\section{Discussion \& Future Directions}
Having presented how early-exiting operates and what has been accomplished by prior work, here we discuss the main challenges and most prominent directions for future research in the field.
\subsection{Open Challenges.}
\noindent\textbf{Modalities.} A lot of research efforts in early exits have focused on the task of image classification through CNNs, and only most recently NLP through Transformer networks. However, a large variety of models (e.g.~RNN, GAN, seq2seq, VAE) are deployed in the wild, addressing different tasks including object detection, semantic segmentation, regression, image captioning and many more. Such models come with their own set of challenges and require special handling on one or more of the core components of early-exit networks, which remain largely unexplored to date.
\noindent\textbf{Early-exit Overhead.} Attaching early exits to a backbone network introduces a workload overhead for the samples where the exit at hand cannot yield a confident enough prediction. This overhead heavily depends on the architecture of the exit, its position in the network, the effectiveness of the exit policy and the task itself. Hence, instantiating the optimal configuration of early exits on a backbone network, which balances this overhead against the performance gains from exiting early, remains a challenging task.
\noindent\textbf{Architectural Search Space} As previously established, there is a large interplay between the building blocks of early-exit networks. It is therefore desirable to co-optimise many design and configuration parameters such as exit number, placement, architecture and policy. This inflates the architectural search space and makes it computationally challenging to traverse, in search for optimal configurations. Structured or NAS-based approaches to explore this space could could provide an efficient solution to this end.
\noindent\textbf{Training Strategy.} Training early-exit networks is inherently challenging. Normally, early layers in DNNs extract lower-level appearance features, whereas deeper ones extract higher-level semantics, important for the respective task. In the realm of early-exiting,
classifiers placed shallowly are natively pushing for the extraction of semantically strong features earlier in the network, which causes tension between the gradients of different exits, and may harm the overall accuracy in the case of e2e training. Conversely, IC-only trained early exits may lead to inferior accuracy or increased overheads. Developing a training strategy that can combine the best of both worlds remains an open question.
\noindent\textbf{Exit Policy.} Current hand-tuned exit policy treat prediction ``confidence" as a proxy to accuracy. Exit placement and training strategy may cause this predictions to become naturally under- or over-confident, leading to a probability distribution over layers that does not reflect the network's innate uncertainty \cite{Guo2017}. Developing exit strategies that better reflect the networks readiness-to-exit and potential ability-to-improve its prediction by propagating to the next exit is a challenging area of research. Additionally, it is important to allow such methodologies to remain adaptable post-training, in order to facilitate efficient deployment to use-cases with varying requirements and devices with different computational capabilities.
\subsection{Additional future directions}
\noindent\textbf{Temporal-Awareness.} In video or mobile agent applications, strong correlations typically exist between temporally and spatially adjacent input samples, and hence their underlying predictive difficulty given previous predictions \cite{hu2020temporally}. There is therefore space to integrate historical or codec information to further optimise early-exiting.
\noindent\textbf{Hierarchical Inference.} In latency critical applications, having some higher-level actionable result from early on may be more important than waiting for an accurate finer-grained classification prediction. Early-exit networks can facilitate this paradigm, through hierarchical inference, with earlier exits providing more abstract -- and therefore easier to materialise -- predictions (e.g. ``dog"), before specialising their prediction in deeper exits (e.g. ``beagle'') \cite{bilal2017convolutional,zamir2017feedback}.
\noindent\textbf{Personalisation.} At deployment time, deep learning models often meet narrower distributions of input samples than what they have been originally trained for (i.e. detecting a single user or their relatively stationary environment).
In such cases, early exits can act as a self-acceleration mechanism, trained on the spot, e.g. through knowledge (self-)distillation from the final exit \cite{always_personal_hotmobile}, to maximise their performance by specialising to the target distribution.
\noindent\textbf{Heterogeneous Federated Learning (FL).} In FL deployments, participating devices can have very heterogeneous computational and network capabilities \cite{horvath2021fjord}. As such, submodels of varying depth may be distributed to clients to train on their local dataset, thus improving participation and fairness while avoiding stragglers.
\noindent\textbf{Probabilistic Inference.}
Probabilistic models (e.g. Bayesian Neural Networks) have a native way of quantifying the predictive uncertainty of the network across all stages of inference \cite{gal2016dropout}.
This property of stochastic models can be exploited by the exit policy, rendering BNNs a natural fit for early exiting methodologies.
\footnotesize
\end{document} |
\begin{document}
\pagestyle{headings}
\title{Branching Place Bisimilarity}
\author{Roberto Gorrieri\\
\institute{Dipartimento di Informatica --- Scienza e Ingegneria\\
Universit\`a di Bologna, \\Mura A. Zamboni 7,
40127 Bologna, Italy}
\email{{\small roberto.gorrieri@unibo.it}}
}
\maketitle
\begin{abstract}
Place bisimilarity $\sim_p$ is a behavioral equivalence for finite Petri nets, proposed in \cite{ABS91}
and proved decidable in \cite{Gor21}. In this paper we propose an extension to finite Petri nets with silent moves
of the place bisimulation idea, yielding {\em branching} place bisimilarity $\approx_p$, following the intuition
of branching bisimilarity \cite{vGW96} on labeled transition systems. We also propose a slightly coarser variant,
called branching {\em d-place} bisimilarity $\approx_d$, following the intuition of d-place bisimilarity in \cite{Gor21}.
We prove that $\approx_p$ and $\approx_d$ are decidable equivalence relations. Moreover, we prove that $\approx_d$ is strictly finer than
branching fully-concurrent bisimilarity \cite{Pin93,Gor20c}, essentially because $\approx_d$ does not consider
as unobservable those $\tau$-labeled net transitions
with pre-set size larger than one, i.e., those resulting from (multi-party) interaction.
\end{abstract}
\section{Introduction}
Place bisimilarity,
originating from an idea by Olderog \cite{Old} (under the name of strong bisimilarity) and then refined by Autant,
Belmesk and Schnoebelen \cite{ABS91},
is a behavioral equivalence over finite Place/Transition Petri nets (P/T nets, for short),
based on relations over the {\em finite set of net places}, rather than over the
(possibly infinite) set of net markings.
This equivalence does respect the expected causal behavior of Petri nets; in fact, van Glabbeek
proved in \cite{G15}
that place bisimilarity is slightly finer than
{\em structure preserving bisimilarity} \cite{G15}, in turn
slightly finer than {\em fully-concurrent bisimilarity} \cite{BDKP91}.
Place bisimilarity was proved decidable in \cite{Gor21} and it is the first {\em sensible} (i.e., fully respecting causality
and the branching structure)
behavioral equivalence
which was proved decidable over finite (possibly unbounded) Petri nets (with the exception of net isomorphism).
In \cite{Gor21}, a sligthly coarser variant is proposed, called {\em d-}place bisimilarity, which allows to relate not only places
to places, but also
places to the empty marking. D-place bisimilarity was proved to be finer than fully-concurrent bisimilarity and, to date, it is
the coarsest sensible behavioral relation to be decidable on finite Petri nets (when all the transition labels are considered as observable).
This paper aims at extending the place bisimulation idea to Petri nets {\em with silent transitions}, i.e., transitions with
unobservable label, usually denoted by $\tau$.
To this aim, we take inspiration from {\em branching} bisimilarity, proposed in \cite{vGW96} over
labeled transition systems \cite{Kel76,GV15} (LTSs, for short),
a behavioral relation more appropriate than weak bisimilarity \cite{Mil89}, as it better respects the timing of choices.
The main problem we had to face was to properly understand if and when a silent net transition can
be really considered as potentially
unobservable. In fact, while in the theory of sequential, nondeterministic systems, modeled by means of LTSs,
all the $\tau$-labeled transitions can, to some extent, be abstracted away,
in the theory of Petri nets (and of distributed systems, in general), it is rather questionable whether this is the case.
For sure a silent net transition with pre-set and post-set of size 1 may be abstracted away, as it represents some internal
computation, local to a single sequential component of the distributed system. However, a $\tau$-labeled net transition
with pre-set of size 2 or more, which models a (possibly, multi-party) interaction, is really observable: since to
establish the synchronization
it is necessary to use some communication infrastructure, for sure one observer can see that such a synchronization takes place.
This is, indeed, what happens over the Internet: a communication via IP is an observable event,
even if the actual content of the message may be unobservable (in case it is encrypted).
For this reason, our definition of {\em branching place bisimulation} considers as potentially unobservable only the so-called
{\em $\tau$-sequential} transitions, i.e., those silent transitions whose pre-set and post-set have size 1.
We prove that branching place bisimilarity $\approx_p$ is an equivalence relation, where the crucial step in this proof
is to prove that the relational composition of two branching place bisimulations is a branching place bisimulation.
We also define a slightly coarser variant, called branching d-place bisimilarity $\approx_d$, that allows to relate
a place not only to another place, but also to the empty marking.
Of course, $\approx_d$ is rather discriminating if compared to other behavioral semantics; in particular, we prove
that it is strictly finer than {\em branching fully-concurrent bisimilarity} \cite{Pin93,Gor20c}, essentially because the latter
may also abstract w.r.t. silent transitions that are not $\tau$-sequential (and also may relate markings of different size).
The main contribution of this paper is to show that $\approx_p$ is decidable for finite P/T nets
(and, in a similar manner, that also $\approx_d$ is so).
The proof idea is as follows. As a place relation $R \subseteq S \times S$ is finite if the set $S$
of places is finite,
there are finitely many place relations for a finite net. We can list all these relations, say $R_1, R_2, \ldots R_n$.
It is decidable whether a place relation $R_i$ is a branching
place bisimulation by checking two {\em finite} conditions over
a {\em finite} number of marking pairs: this is a non-obvious observation, as a branching place bisimulation requires
that the place bisimulation game holds for the infinitely many pairs $m_1$ and $m_2$
which are {\em bijectively} related via $R_i$ (denoted by $(m_1, m_2) \in R_i^\oplus$).
Hence, to decide whether
$m_1 \approx_p m_2$, it is enough to check, for $i = $ $1, \ldots n$, whether $R_i$ is a branching place
bisimulation and, in such a case, whether $(m_1, m_2) \in R_i^\oplus$.
The paper is organized as follows. Section \ref{def-sec} recalls the basic definitions about Petri nets, their causal semantics and also the definition of branching fully-concurrent bisimilarity.
Section \ref{place-sec} recalls the main definitions and results about place bisimilarity and d-place bisimilarity from \cite{Gor21}.
Section \ref{br-place-sec} introduces branching place bisimulation and proves that the largest such relation is an equivalence relation.
Section \ref{decid-br-place-sec} shows that $\approx_p$ is decidable.
Section \ref{case-sec} presents a small case study about a producer-consumer system in order to show the real applicability of the approach.
Section \ref{br-d-place-sec} introduces branching d-place bisimilarity $\approx_d$, hints that it is a coarser, decidable equivalence relation
and proves that it is strictly finer than branching fully-concurrent bisimilarity.
Finally, in Section \ref{conc-sec} we discuss the pros and cons of branching (d-)place bisimilarity,
and describe related literature and some future research.
\section{Basic Definitions} \label{def-sec}
\begin{definition}\label{multiset}{\bf (Multiset)}\index{Multiset}
Let ${\mathbb N}$ be the set of natural numbers.
Given a finite set $S$, a {\em multiset} over $S$ is a function $m: S \rightarrow{\mathbb N}$.
The {\em support} set $dom(m)$ of $m$ is $\{ s \in S \;\;\big|\;\; m(s) \neq 0\}$.
The set of all multisets
over $S$, denoted by ${\mathcal M}(S)$, is ranged over by $m$.
We write $s \in m$ if $m(s)>0$.
The {\em multiplicity} of $s$ in $m$ is given by the number $m(s)$. The {\em size} of $m$, denoted by $|m|$,
is the number $\sum_{s\in S} m(s)$, i.e., the total number of its elements.
A multiset $m$ such
that $dom(m) = \emptyset$ is called {\em empty} and is denoted by $\theta$.
We write $m \subseteq m'$ if $m(s) \leq m'(s)$ for all $s \in S$.
{\em Multiset union} $\_ \oplus \_$ is defined as follows: $(m \oplus m')(s)$ $ = m(s) + m'(s)$;
it is commutative, associative and has $\theta$ as neutral element.
{\em Multiset difference} $\_ \ominus \_$ is defined as follows:
$(m_1 \ominus m_2)(s) = max\{m_1(s) - m_2(s), 0\}$.
The {\em scalar product} of a number $j$ with $m$ is the multiset $j \cdot m$ defined as
$(j \cdot m)(s) = j \cdot (m(s))$. By $s_i$ we also denote the multiset with $s_i$ as its only element.
Hence, a multiset $m$ over $S = \{s_1, \ldots, s_n\}$
can be represented as $k_1\cdot s_{1} \oplus k_2 \cdot s_{2} \oplus \ldots \oplus k_n \cdot s_{n}$,
where $k_j = m(s_{j}) \geq 0$ for $j= 1, \ldots, n$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
\begin{definition}\label{pt-net-def}{\bf (Place/Transition net)}
A labeled {\em Place/Transition} Petri net (P/T net for short) is a tuple $N = (S, A, T)$, where
\begin{itemize}
\item
$S$ is the finite set of {\em places}, ranged over by $s$ (possibly indexed),
\item
$A$ is the finite set of {\em labels}, ranged over by $\ell$ (possibly indexed), and
\item
$T \subseteq {(\mathcal M}(S) \setminus \{\theta\}) \times A \times {\mathcal M}(S)$
is the finite set of {\em transitions},
ranged over by $t$ (possibly indexed).
\end{itemize}
Given a transition $t = (m, \ell, m')$,
we use the notation:
\begin{itemize}
\item $\pre t$ to denote its {\em pre-set} $m$ (which cannot be empty) of tokens to be consumed;
\item $l(t)$ for its {\em label} $\ell$, and
\item $\post t$ to denote its {\em post-set} $m'$ of tokens to be produced.
\end{itemize}
Hence, transition $t$ can be also represented as $\pre t \deriv{l(t)} \post t$.
We also define the {\em flow function}
{\mbox flow}$: (S \times T) \cup (T \times S) \rightarrow {\mathbb N}$ as follows:
for all $s \in S$, for all $t \in T$,
{\mbox flow}$(s,t) = \pre{t}(s)$ and {\mbox flow}$(t,s) = \post{t}(s)$.
We will use $F$ to denote the {\em flow relation}
$\{(x,y) \;\;\big|\;\; x,y \in S \cup T \, \wedge \, ${\mbox flow}$(x,y) > 0\}$.
Finally, we define pre-sets and post-sets also for places as: $\pre s = \{t \in T \;\;\big|\;\; s \in \post t\}$
and $\post s = \{t \in T \;\;\big|\;\; s \in \pre t\}$. Note that while the pre-set (post-set) of a transition is, in general,
a multiset, the pre-set (post-set) of a place is a set.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
Graphically, a place is represented by a little circle and a transition by a little box. These are
connected by directed arcs, which
may be labeled by a positive integer, called the {\em weight}, to denote the number of tokens
consumed (when the arc goes from a place to the transition) or produced (when the arc goes form the transition to a
place) by the execution of the transition;
if the number is omitted, then the weight default value is $1$.
\begin{definition}\label{net-system}{\bf (Marking, P/T net system)}
A multiset over $S$ is called a {\em marking}. Given a marking $m$ and a place $s$,
we say that the place $s$ contains $m(s)$ {\em tokens}, graphically represented by $m(s)$ bullets
inside place $s$.
A {\em P/T net system} $N(m_0)$ is a tuple $(S, A, T, m_{0})$, where $(S,A, T)$ is a P/T net and $m_{0}$ is
a marking over $S$, called
the {\em initial marking}. We also say that $N(m_0)$ is a {\em marked} net.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
\subsection{Sequential Semantics}\label{seql-sem-sec}
\begin{definition}\label{firing-system}{\bf (Enabling, firing sequence, reachable marking, safe net)}
Given a P/T net $N = (S, A, T)$, a transition $t $ is {\em enabled} at $m$,
denoted by $m[t\rangle$, if $\pre t \subseteq m$.
The execution (or {\em firing}) of $t$ enabled at $m$ produces the marking $m' = (m \ominus \pre t) \oplus \post t$.
This is written $m[t\rangle m'$.
A {\em firing sequence} starting at $m$ is defined inductively as follows:
\begin{itemize}
\item $m[\epsilon\rangle m$ is a firing sequence (where $\epsilon$ denotes an empty sequence of transitions) and
\item if $m[\sigma\rangle m'$ is a firing sequence and $m' [t\rangle m''$, then
$m [\sigma t\rangle m''$ is a firing sequence.
\end{itemize}
If $\sigma = t_1 \ldots t_n$ (for $n \geq 0$) and $m[\sigma\rangle m'$ is a firing sequence, then there exist $m_1, \ldots, m_{n+1}$ such that
$m = m_1[t_1\rangle m_2 [t_2\rangle \ldots m_n [t_n\rangle m_{n+1} = m'$, and
$\sigma = t_1 \ldots t_n$ is called a {\em transition sequence} starting at $m$ and ending
at $m'$.
The definition of pre-set and post-set can be extended to transition sequences as follows:
$\pre{\epsilon} = \theta$, $\pre{(t \sigma)} = \pre{t} \oplus (\pre{\sigma} \ominus \post{t})$, $\post{\epsilon} = \theta$,
$\post{(t \sigma)} = \post{\sigma} \oplus (\post{t} \ominus \pre{\sigma})$.
The set of {\em reachable markings} from $m$ is
$[m\rangle = \{m' \;\;\big|\;\; \exists \sigma.$ $
m[\sigma\rangle m'\}$.
The P/T net system $N = $ $(S, A, T, m_0)$ is {\em safe} if for each
$m \in [m_0\rangle$ and for all $s \in S$, we have that $m(s) \leq 1$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
Note that the reachable markings of a P/T net can be countably infinitely many when the net is not bounded, i.e.,
when the number of tokens on some places can grow unboundedly.
Now we recall a simple behavioral equivalence on P/T nets, defined directly over the markings of the net, which
compares two markings with respect to their sequential behavior.
\begin{definition}\label{def-int-bis}{\bf (Interleaving Bisimulation)}
Let $N = (S, A, T)$ be a P/T net.
An {\em interleaving bisimulation} is a relation
$R\subseteq {\mathcal M}(S) \times {\mathcal M}(S)$ such that if $(m_1, m_2) \in R$
then
\begin{itemize}
\item $\forall t_1$ such that $m_1[t_1\rangle m'_1$, $\exists t_2$ such that $m_2[t_2\rangle m'_2$
with $l(t_1) = l(t_2)$ and $(m'_1, m'_2) \in R$,
\item $\forall t_2$ such that $m_2[t_2\rangle m'_2$, $\exists t_1$ such that $m_1[t_1\rangle m'_1$
with $l(t_1) = l(t_2)$ and $(m'_1, m'_2) \in R$.
\end{itemize}
Two markings $m_1$ and $m_2$ are {\em interleaving bisimilar},
denoted by $m_1 \sim_{int} m_2$, if there exists an interleaving bisimulation $R$ such that $(m_1, m_2) \in R$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
Interleaving bisimilarity was proved undecidable in \cite{Jan95} for P/T nets having at least two unbounded places,
with a proof based on the comparison of two {\em sequential} P/T nets,
where a P/T net is sequential if it does not offer any concurrent behavior. Hence, interleaving bisimulation equivalence is
undecidable even for the subclass of sequential finite P/T nets. Esparza observed in \cite{Esp98} that all the non-interleaving
bisimulation-based equivalences (in the spectrum ranging from interleaving bisimilarity to fully-concurrent bisimilarity \cite{BDKP91})
collapse to interleaving bisimilarity over sequential P/T nets. Hence, the proof in \cite{Jan95} applies to all these
non-interleaving bisimulation equivalences as well.
\begin{definition}\label{pt-silent-def}{\bf (P/T net with silent moves)}
A P/T net $N = (S, A, T)$ such that $\tau \in A$, where $\tau$
is the only invisible action that can be used to label transitions, is called a P/T net {\em with silent moves}.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
We now extend the behavioral equivalence above to P/T nets with silent transitions, following the intuition
of {\em branching} bisimulation \cite{vGW96} on LTSs.
\begin{definition}\label{br-int-bis}{\bf (Branching interleaving bisimulation)}
Let $N = (S, A, T)$ be a P/T net with silent moves.
A {\em branching} interleaving bisimulation is a relation
$R\subseteq {\mathcal M}(S) \times {\mathcal M}(S)$ such that if $(m_1, m_2) \in R$
then
\begin{itemize}
\item $\forall t_1$ such that $m_1[t_1\rangle m_1'$,
\begin{itemize}
\item[--] either $l(t_1) = \tau$ and $\exists \sigma_2$ such that $o(\sigma_2) = \epsilon$,
$m_2[\sigma_2\rangle m_2'$ with $(m_1, m_2') \in R$ and $(m_1', m_2') \in R$,
\item[--] or $\exists \sigma, t_2$ such that $o(\sigma) = \epsilon$, $l(t_1) = l(t_2)$,
$m_2[\sigma\rangle m [t_2\rangle m_2'$ with $(m_1, m) \in R$ and $(m_1', m_2') \in R$,
\end{itemize}
\item and, symmetrically, $\forall t_2$ such that $m_2[t_2 \rangle m_2'$.
\end{itemize}
Two markings $m_1$ and $m_2$ are {\em branching interleaving bisimilar},
denoted $m_1 \approx_{bri} m_2$, if there exists a branching interleaving bisimulation $R$
that relates them.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
This definition is not a rephrasing on nets of the original definition on LTSs in \cite{vGW96}, rather of a slight variant
called {\em semi-branching bisimulation} \cite{vGW96,Bas96}, which gives rise to the same equivalence
as the original definition but has better mathematical properties; in particular it ensures \cite{Bas96} that the relational composition of
branching bisimulations is a branching bisimulation.
Note that a silent transition performed by one of the two markings may be matched by the other one also by idling:
this is due to the {\em either} case when $\sigma_2 = \epsilon$
(or $\sigma_1 = \epsilon$ for the symmetric case).
Branching interleaving bisimilarity $\approx_{bri}$, which is defined as the union of all the branching interleaving bisimulations,
is the largest branching interleaving bisimulation and also an equivalence relation.
Of course, also branching interleaving bisimilarity is undecidable for finite P/T nets.
\begin{remark}\label{stutt1-rem-int}{\bf (Stuttering property)}
It is not difficult to prove that, given a silent firing sequence
$m_1 [t_1\rangle m_2 [t_2\rangle m_3 \ldots m_n [t_n\rangle m_{n+1}$, with $l(t_i) = \tau$ for $i = 1, \ldots, n$,
if $m_1 \approx_{bri} m_{n+1}$,
then $m_i \approx_{bri} m_j$ for $i, j = 1, \ldots, n+1$.
This is sometimes called the {\em stuttering property}.
An important property hods for $\approx_{bri}$.
Consider the {\em either} case: since $(m_1, m_2) \in \approx_{bri}$
by hypothesis, and $m_2[\sigma_2\rangle m_2'$ with $(m_1, m_2') \in \approx_{bri}$ and $(m'_1, m_2') \in \approx_{bri}$,
it follows that $(m_2, m_2') \in \approx_{bri}$ because $\approx_{bri}$ is an equivalence relation. This implies that
all the markings in the silent path from $m_2$ to $m_2'$ are branching interleaving bisimilar (by the
{\em stuttering property}).
Similarly for the {\em or} case: if $m_1 [t_1\rangle m'_1$ (with $l(t_1)$ that can be $\tau$) and $m_2$ responds by performing
$m_2 [\sigma\rangle m [t_2\rangle m'_2$ with $m_1 \approx_{bri} m$, then, by transitivity, $m_2 \approx_{bri} m$;
hence, by the stuttering property,
$m_1$ is branching interleaving bisimilar to each marking in the path from $m_2$ to $m$.
This property will also hold for branching place bisimilarity, we will introduce in Definition \ref{bpb-bis-def}.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{remark}
\subsection{Causality-based Semantics}\label{causal-sem-sec}
We outline some definitions, adapted from the literature
(cf., e.g., \cite{GR83,BD87,Old,G15,Gor22}).
\begin{definition}\label{acyc-def}{\bf (Acyclic net)}
A P/T net $N = (S, A, T)$ is
{\em acyclic} if its flow relation $F$ is acyclic (i.e., $\not \exists x$ such that $x F^+ x$,
where $F^+$ is the transitive closure of $F$).
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
The causal semantics of a marked P/T net is defined by a class of particular acyclic safe nets,
where places are not branched (hence they represent a single run) and all arcs have weight 1.
This kind of net is called {\em causal net}.
We use the name $C$ (possibly indexed) to denote a causal net, the set $B$ to denote its
places (called {\em conditions}), the set $E$ to denote its transitions
(called {\em events}), and
$L$ to denote its labels.
\begin{definition}\label{causalnet-def}{\bf (Causal net)}
A causal net is a finite marked net $C(\mathsf{m}_0) = (B,L,
E, \mathsf{m}_0)$ satisfying
the following conditions:
\begin{enumerate}
\item $C$ is acyclic;
\item $\forall b \in B \; \; | \pre{b} | \leq 1\, \wedge \, | \post{b} | \leq 1$ (i.e., the places are not branched);
\item $ \forall b \in B \; \; \mathsf{m}_0(b) = \begin{cases}
1 & \mbox{if $\; \pre{b} = \emptyset$}\\
0 & \mbox{otherwise;}
\end{cases}$\\
\item $\forall e \in E \; \; \pre{e}(b) \leq 1 \, \wedge \, \post{e}(b) \leq 1$ for all $b \in B$ (i.e., all the arcs have weight $1$).
\end{enumerate}
We denote by $Min(C)$ the set $\mathsf{m}_0$, and by $Max(C)$ the set
$\{b \in B \;\;\big|\;\; \post{b} = \emptyset\}$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
Note that any reachable marking of a causal net is a set, i.e.,
this net is {\em safe}; in fact, the initial marking is a set and,
assuming by induction that a reachable marking $\mathsf{m}$ is a set and enables $e$, i.e.,
$\mathsf{m}[e\rangle \mathsf{m}'$,
then also
$\mathsf{m}' = (\mathsf{m} \ominus \pre{e}) \oplus \post{e}$ is a set,
as the net is acyclic and because
of the condition on the shape of the post-set of $e$ (weights can only be $1$).
As the initial marking of a causal net is fixed by its shape (according to item $3$ of
Definition \ref{causalnet-def}), in the following, in order to make the
notation lighter, we often omit the indication of the initial marking,
so that the causal
net $C(\mathsf{m}_0)$ is denoted by $C$.
\begin{definition}\label{trans-causal}{\bf (Moves of a causal net)}
Given two causal nets $C = (B, L, E, \mathsf{m}_0)$
and $C' = (B', L, E', \mathsf{m}_0)$, we say that $C$
moves in one step to $C'$ through $e$, denoted by
$C [e\rangle C'$, if $\; \pre{e} \subseteq Max(C)$, $E' = E \cup \{e\}$
and $B' = B \cup \post{e}$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
\begin{definition}\label{folding-def}{\bf (Folding and Process)}
A {\em folding} from a causal net $C = (B, L, E, \mathsf{m}_0)$ into a net system
$N(m_0) = (S, A, T, m_0)$ is a function $\rho: B \cup E \to S \cup T$, which is type-preserving, i.e.,
such that $\rho(B) \subseteq S$ and $\rho(E) \subseteq T$, satisfying the following:
\begin{itemize}
\item $L = A$ and $\mathsf{l}(e) = l(\rho(e))$ for all $e \in E$;
\item $\rho(\mathsf{m}_0) = m_0$, i.e., $m_0(s) = | \rho^{-1}(s) \cap \mathsf{m}_0 |$;
\item $\forall e \in E, \rho(\pre{e}) = \pre{\rho(e)}$, i.e., $\rho(\pre{e})(s) = | \rho^{-1}(s) \cap \pre{e} |$
for all $s \in S$;
\item $\forall e \in E, \, \rho(\post{e}) = \post{\rho(e)}$, i.e., $\rho(\post{e})(s) = | \rho^{-1}(s) \cap \post{e} |$
for all $s \in S$.
\end{itemize}
A pair $(C, \rho)$, where $C$ is a causal net and $\rho$ a folding from
$C$ to a net system $N(m_0)$, is a {\em process} of $N(m_0)$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
\begin{definition}\label{trans-process}{\bf (Moves of a process)}
Let $N(m_0) = (S, A, T, m_0)$ be a net system
and let $(C_i, \rho_i)$, for $i = 1, 2$, be two processes of $N(m_0)$.
We say that $(C_1, \rho_1)$
moves in one step to $(C_2, \rho_2)$ through $e$, denoted by
$(C_1, \rho_1) \deriv{e} (C_2, \rho_2)$, if $C_1 [e\rangle C_2$
and $\rho_1 \subseteq \rho_2$.
\noindent
If $\pi_1 = (C_1, \rho_1)$ and $\pi_2 = (C_2, \rho_2)$, we denote
the move as $\pi_1 \deriv{e} \pi_2$.
We can extend the definition of move to transition sequences as follows:
\begin{itemize}
\item $\pi \Deriv{\epsilon} \pi$, where $\epsilon$ is the empty transition sequence, is a move sequence
and
\item if $\pi \deriv{e} \pi'$ and
$\pi' \Deriv{\sigma} \pi''$, then
$\pi \Deriv{e \sigma} \pi''$ is a move sequence. \\[-1.1cm]
\end{itemize}
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
\begin{definition}\label{po-process-def}{\bf (Partial orders of events from a process)}
From a causal net $C = (B, L, E, \mathsf{m}_0)$, we can
extract the {\em partial order of its events}
$\mathsf{E}_{\mathsf{C}} = (E, \preceq)$,
where $e_1 \preceq e_2$ if there is a path in the net from $e_1$ to $e_2$, i.e., if $e_1 \mathsf{F}^* e_2$, where
$\mathsf{F}^*$ is the reflexive and transitive closure of
$\mathsf{F}$, which is the flow relation for $C$.
Given a process $\pi = (C, \rho)$, we denote $\preceq$ as $\leq_\pi$,
i.e. given $e_1, e_2 \in E$, $e_1 \leq_\pi e_2$ if and only if $e_1 \preceq e_2$.
We can also extract the
{\em abstract} partial order of its {\em observable} events $\mathsf{O}_{C} = (E', \preceq')$,
where $E' = \{e \in E \;\;\big|\;\; \mathsf{l}(e) \neq \tau\}$ and
$\preceq' = \preceq \upharpoonright E'$.
Two partial orders $(E_1, \preceq_1)$ and $(E_2, \preceq_2)$ are isomorphic
if there is a label-preserving, order-preserving bijection $g: E_1 \to E_2$, i.e., a bijection such that
$\mathsf{l}_1(e) = \mathsf{l}_2(g(e))$ and $e \preceq_1 e'$ if and only if $g(e) \preceq_2 g(e')$.
We also say that $g$ is an {\em abstract} (or {\em concrete})
{\em event isomorphism} between
$C_1$ and
$C_2$ if it is an isomorphism between their associated abstract (or concrete)
partial orders of events $\mathsf{O}_{C_1}$
and $\mathsf{O}_{C_2}$ (or $\mathsf{E}_{C_1}$
and $\mathsf{E}_{C_2}$).
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
In case of P/T nets without silent transitions, the coarsest behavioral equivalence fully respecting causality and the branching time is
the the largest
{\em fully-concurrent bisimulation} (fc-bisimulation, for short) \cite{BDKP91}, whose definition
was inspired by previous notions of equivalence on other models of concurrency:
{\em history-preserving bisimulation}, originally defined in \cite{RT88} under the name of {\em behavior-structure bisimulation}, and
then elaborated on in \cite{vGG89} (who called it by this name) and also independently defined in \cite{DDM89}
(who called it by {\em mixed ordering bisimulation}). If two markings are fully-concurrent bisimilar, then they generate
processes with isomorphic concrete partial orders. Its definition follows.
\begin{definition}\label{sfc-bis-def}{\bf (Fully-concurrent bisimulation)}
Given a P/T net $N = (S, A, T)$, a {\em fully-concurrent bisimulation}
is a relation $R$, composed of
triples of the form $(\pi_1, g, \pi_2) $, where, for $i = 1,2$,
$\pi_i = (C_i, \rho_i)$ is a process of $N(m_{0i})$ for some $m_{0i}$ and
$g$ is a concrete event isomorphism between $C_1$ and $C_2$, such that
if $(\pi_1, g, \pi_2) \in R$ then
\begin{itemize}
\item[$i)$]
$\forall t_1, \pi_1'$ such that $\pi_1 \deriv{e_1} \pi_1'$ with $\rho_1'(e_1) = t_1$, $\exists t_2, \pi_2', g'$ such that
\begin{enumerate}
\item $\pi_2 \deriv{e_2} \pi_2'$ with $\rho_2'(e_2) = t_2$;
\item $g' = g \cup \{(e_1, e_2)\}$, and finally,
\item $(\pi_1', g', \pi_2') \in R$;
\end{enumerate}
\item[$ii)$] and symmetrically, if $\pi_2$ moves first.
\end{itemize}
Two markings $m_1, m_2$ are fc-bisimilar,
denoted by $m_1 \sim_{fc} m_2$, if a fully-concurrent bisimulation R exists,
containing a triple $(\pi^0_1, \emptyset, \pi^0_2)$ where
$\pi^0_i = (C^0_i, \rho^0_i)$ such that
$C^0_i$ contains no events and
$\rho^0_i(Min(C^0_i)) = \rho^0_i(Max(C^0_i))$ $ = m_i\;$ for $i = 1, 2$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
Fully-concurrent bisimilarity $\sim_{fc}$ is an equivalence relation, that is strictly finer than interleaving bisimilarity $\sim_{int}$
and also undecidable for finite P/T nets.
An extension to P/T nets with silent transitions can be the following branching fully-concurrent bisimilarity \cite{Pin93,Gor20c}.
\begin{definition}\label{brfc-bis-def}{\bf (Branching fc-bisimulation)}
Given a net $N = (S, A, T)$, a {\em branching fully-concurrent bisimulation}
is a relation $R$, composed of
triples of the form $(\pi_1, g, \pi_2) $, where, for $i = 1,2$,
$\pi_i = (C_i, \rho_i)$ is a process of $N(m_{0i})$ for some $m_{0i}$,
and $g$ is an abstract event isomorphism between $C_1$ and $C_2$, such that
if $(\pi_1, g, \pi_2) \in R$ then
\begin{itemize}
\item[$i)$]
$\forall t_1, \pi_1'$ such that $\pi_1 \deriv{e_1} \pi_1'$ with $\rho_1'(e_1) = t_1$,
\begin{itemize}
\item {\em either} $l(e_1) = \tau$ and there exist $\sigma_2$ (with $o(\sigma_2) = \epsilon$) and $\pi_2'$
such that $\pi_2 \Deriv{\sigma_2} \pi_2'$, $(\pi_1, g, \pi_2') \in R$ and $(\pi_1', g, \pi_2') \in R$;
\item {\em or} $\exists \sigma$ (with $o(\sigma) = \epsilon$), $e_2, \pi_2', \pi_2'', g'$
such that
\begin{enumerate}
\item $\pi_2 \Deriv{\sigma} \pi_2' \deriv{e_2} \pi_2''$;
\item if $l(e_1) = \tau$, then $l(e_2) = \tau$ and $g' = g$;
otherwise, $l(e_1) = l(e_2)$ and
$g' = g \cup \{(e_1, e_2)\}$;
\item and finally, $(\pi_1, g, \pi_2') \in R$ and
$(\pi_1', g', \pi_2'') \in R$;
\end{enumerate}
\end{itemize}
\item[$ii)$] symmetrically, if $\pi_2$ moves first.
\end{itemize}
Two markings $m_{1}$ and $m_2$ of $N$ are bfc-bisimilar,
denoted by $m_{1} \approx_{bfc} m_{2}$,
if there exists a branching fully-concurrent bisimulation $R$ with a triple
$((C^0_1, \rho_1), g_0, (C^0_2, \rho_2))$, where
$C^0_i$ contains no transitions, $g_0$ is empty and
$\rho_i(Min( C^0_i)) = \rho_i(Max( C^0_i)) = m_i\;$ for $i = 1, 2$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
Branching fully-concurrent bisimilarity $\approx_{bfc}$ is an equivalence relation \cite{Gor20c}, that is strictly finer than
branching interleaving bisimilarity $\approx_{bri}$ and also undecidable for finite P/T nets.
\section{Place Bisimilarity} \label{place-sec}
We now present place bisimulation, introduced in \cite{ABS91} as an
improvement of {\em strong bisimulation}, a behavioral relation proposed by Olderog in \cite{Old} on safe
nets which fails to induce an equivalence relation.
Our definition is formulated in a slightly different way, but it is coherent with the original one.
First, an auxiliary definition.
\begin{definition}\label{add-eq}{\bf (Additive closure)}
Given a P/T net $N = (S, A, T)$ and a {\em place relation} $R \subseteq S \times S$, we define a {\em marking relation}
$R^\oplus \, \subseteq \, {\mathcal M}(S) \times {\mathcal M}(S)$, called
the {\em additive closure} of $R$,
as the least relation induced by the following axiom and rule.
$\begin{array}{lllllllllll}
\bigfrac{}{(\theta, \theta) \in R^\oplus} & \; \; \; & \; \; \;
\bigfrac{(s_1, s_2) \in R \; \; \; (m_1, m_2) \in R^\oplus }{(s_1 \oplus m_1, s_2 \oplus m_2) \in R^\oplus } \\
\end{array}$
\\[-.2cm]
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
Note that, by definition, two markings are related by $R^\oplus$ only if they have the same size;
in fact, the axiom states that
the empty marking is related to itself, while the rule, assuming by induction
that $m_1$ and $m_2$ have the same size, ensures that $s_1 \oplus m_1$ and
$s_2 \oplus m_2$ have the same size.
\begin{proposition}\label{fin-k-add}
For each relation $R \subseteq S \times S$, if $(m_1, m_2) \in R^\oplus$,
then $|m_1| = |m_2|$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{proposition}
Note also that there may be several proofs of $(m_1, m_2) \in R^\oplus$,
depending on the chosen order of the elements
of the two markings and on the definition of $R$. For instance, if
$R = \{(s_1, s_3), (s_1, s_4), (s_2, s_3), (s_2, s_4)\}$,
then $(s_1 \oplus s_2, s_3 \oplus s_4) \in R^\oplus$ can be proved by
means of the pairs $(s_1, s_3)$ and $(s_2, s_4)$,
as well as by means of $(s_1, s_4), (s_2, s_3)$.
An alternative way to define that two markings $m_1$ and $m_2$
are related by $R^\oplus$ is to state that $m_1$ can be represented
as $s_1 \oplus s_2 \oplus \ldots \oplus s_k$,
$m_2$ can be represented as $s_1' \oplus s_2' \oplus \ldots \oplus s_k'$
and $(s_i, s_i') \in R$ for $i = 1, \ldots, k$.
In fact, a naive algorithm for checking
whether $(m_1, m_2) \in R^\oplus$ would simply consider
$m_1$ represented as $s_1 \oplus s_2 \oplus \ldots \oplus s_k$ and then scan all the possible permutations of
$m_2$, each represented as $s'_1 \oplus s'_2 \oplus \ldots \oplus s'_k$,
to check that $(s_i, s_i') \in R$ for $i = 1, \ldots, k$. Of course, this naive algorithm is in $O(k!)$.
\begin{example}\label{nsubtractive}
Consider $R = \{(s_1, s_3),$ $(s_1, s_4), (s_2, s_4)\}$, which is not an equivalence relation.
Suppose we want to check that $(s_1 \oplus s_2, s_4 \oplus s_3) \in R^\oplus$.
If we start by matching $(s_1, s_4) \in R$, then we fail because the residual $(s_2, s_3)$ is not in $R$.
However, if we permute the second marking to $s_3 \oplus s_4$, then we succeed because the required pairs
$(s_1, s_3)$ and $(s_2, s_4)$ are both in $R$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{example}
Nonetheless, the problem of checking whether $(m_1, m_2) \in R^\oplus$ has polynomial time complexity
because it can be considered as an instance of
the problem of finding a perfect matching in a bipartite graph,
where the nodes of the two partitions are the tokens in the
two markings, and the edges
are defined by the relation $R$.
In fact,
the definition of the bipartite graph takes $O(k^2)$ time (where $k = |m_1| = |m_2|$) and, then,
the Hopcroft-Karp-Karzanov algorithm \cite{HK73,Kar73} for computing the maximum matching has
worst-case time complexity $O(h\sqrt{k})$, where $h$ is the number of the edges in the bipartire graph ($h \leq k^2$) and
to check whether the maximum matching is perfect can be done simply by checking that the size of the matching equals the number of nodes in each partition, i.e., $k$.
Hence, in evaluating the complexity of the algorithm in Section \ref{decid-br-place-sec}, we assume that the complexity of
checking whether $(m_1, m_2) \in R^\oplus$ is in $O(k^2 \sqrt{k})$.
A related problem is that of computing, given a marking $m_1$ of size $k$, the set of all the markings $m_2$ such that
$(m_1, m_2) \in R^\oplus$. This problem can be solved with a worst-case time complexity of $O(n^k)$ because each of the $k$
tokens in $m_1$ can be related via $R$ to $n$ places at most.
\begin{proposition}\label{add-prop1}\cite{Gor17b}
For each place relation $R \subseteq S \times S$, the following hold:
\begin{enumerate}
\item If $R$ is an equivalence relation, then $R^\oplus$ is an equivalence relation.
\item If $R_1 \subseteq R_2$, then $R_1^\oplus \subseteq R_2^\oplus$, i.e., the additive closure is monotone.
\item If $(m_1, m_2) \in R^\oplus$ and $(m_1', m_2') \in R^\oplus$,
then $(m_1 \oplus m_1', m_2 \oplus m_2') \in R^\oplus$, i.e., the additive closure is additive.\\[-1.1cm]
\end{enumerate}
{\mbox{ }\nolinebreak
{$\Box$}}
\end{proposition}
Now we list some useful, and less obvious, properties of additively closed place relations (proof in \cite{Gor17b}).
\begin{proposition}\label{add-prop2}
For each family of place relations $R_i \subseteq S \times S$, the following hold:
\begin{enumerate}
\item $\emptyset^\oplus = \{(\theta, \theta)\}$, i.e., the additive closure of the empty place relation
is a singleton marking relation, relating the empty marking to itself.
\item $(\mathcal{I}_S)^\oplus = \mathcal{I}_M$, i.e., the additive closure of the
identity relation on places $\mathcal{I}_S = \{(s, s) \;\;\big|\;\; s \in S\}$ is the identity relation on markings
$\mathcal{I}_M = \{(m, m) \;\;\big|\;\; m \in {\mathcal M}(S)\}$.
\item $(R^\oplus)^{-1} = (R^{-1})^\oplus$, i.e., the inverse of an additively closed relation $R$ is the additive closure
of its inverse $R^{-1}$.
\item $(R_1 \circ R_2)^\oplus = (R_1^\oplus) \circ (R_2^\oplus)$, i.e., the additive closure of the composition of two
place relations is the compositions of their additive closures.\\[-1.1cm]
\end{enumerate}
{\mbox{ }\nolinebreak
{$\Box$}}
\end{proposition}
\begin{definition}\label{def-place-bis}{\bf (Place Bisimulation)}
Let $N = (S, A, T)$ be a P/T net.
A {\em place bisimulation} is a relation
$R\subseteq S \times S$ such that if $(m_1, m_2) \in R^\oplus$
then
\begin{itemize}
\item $\forall t_1$ such that $m_1[t_1\rangle m'_1$, $\exists t_2$ such that $m_2[t_2\rangle m'_2$
with $(\pre{t_1}, \pre{t_2}) \in R^\oplus$, $l(t_1) = l(t_2)$, $(\post{t_1}, \post{t_2}) \in R^\oplus$ and $(m'_1, m'_2) \in R^\oplus$,
\item $\forall t_2$ such that $m_2[t_2\rangle m'_2$, $\exists t_1$ such that $m_1[t_1\rangle m'_1$
with $(\pre{t_1}, \pre{t_2}) \in R^\oplus$, $l(t_1) = l(t_2)$, $(\post{t_1}, \post{t_2}) \in R^\oplus$ and $(m'_1, m'_2) \in R^\oplus$.
\end{itemize}
Two markings $m_1$ and $m_2$ are {\em place bisimilar}, denoted by
$m_1 \sim_p m_2$, if there exists a place bisimulation $R$ such that $(m_1, m_2) \in R^\oplus$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
\begin{proposition}\label{place-bis-eq}\cite{ABS91,Gor21}
For each P/T net $N = (S, A, T)$, relation $\sim_p \; \subseteq \mathcal{M}(S) \times \mathcal{M}(S)$ is an equivalence relation.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{proposition}
Place bisimilarity $\sim_p$ is decidable \cite{Gor21} and also sensible, i.e., it fully respects causality and the branching structure
because it is slightly finer than {\em structure-preserving} bisimilarity \cite{G15}, in turn slightly finer than fully-concurrent bisimilarity (that is undecidable).
By Definition \ref{def-place-bis}, place bisimilarity can be defined as follows:
$\sim_p = \bigcup \{ R^\oplus \;\;\big|\;\; R \mbox{ is a place bisimulation}\}.$
\noindent
By monotonicity of the additive closure (Proposition \ref{add-prop1}(2)), if $R_1 \subseteq R_2$, then
$R_1^\oplus \subseteq R_2^\oplus$. Hence, we can restrict our attention to maximal place bisimulations only:
$\sim_p = \bigcup \{ R^\oplus \;\;\big|\;\; R \mbox{ is a {\em maximal} place bisimulation}\}.$
\noindent
However, it is not true that
$\sim_p = (\bigcup \{ R \;\;\big|\;\; R \mbox{ is a {\em maximal} place bisimulation}\})^\oplus$
\noindent
because the union of place bisimulations may not be a place bisimulation,
so that its definition is not coinductive. We illustrate this fact by means of the following example.
\begin{figure}
\caption{A simple net}
\label{net-tau1}
\end{figure}
\begin{example}\label{primo-tau-ex}
Consider the simple P/T net in Figure \ref{net-tau1}, with $S = \{s_1, s_2, s_3\}$. It is rather easy to realize the following two are maximal place bisimulations:
$R_1 = \mathcal{I}_S = \{(s_1, s_1), (s_2, s_2), (s_3, s_3)\}$ and
$R_2 = (R_1 \setminus \mathcal{I}_{\{s_1, s_2\}}) \cup \{(s_1, s_2), (s_2, s_1)\} = \{(s_1, s_2), (s_2, s_1), (s_3, s_3)\}$,
\noindent However, note that
their union $R = R_1 \cup R_2$ is not a place bisimulation. In fact, on the one hand $(s_1 \oplus s_1, s_1 \oplus s_2) \in R^\oplus$,
but, on the other hand, these two markings do not satisfy the place bisimulation game, because
$s_1 \oplus s_1$ is stuck, while $s_1 \oplus s_2$ can fire
the $a$-labeled transition, reaching $s_3$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{example}
\section{Branching Place Bisimilarity} \label{br-place-sec}
Now we define a variant of place bisimulation, which is insensitive, to some extent, to $\tau$-sequential transitions,
i.e., $\tau$-labeled transitions whose pre-set and post-set have size one. In order to properly define this relation,
called {\em branching} place bisimulation, we need some auxiliary definitions.
\begin{definition}\label{pt-silent-def}{\bf ($\tau$-sequential)}
Give a P/T net $N = (S, A, T)$ with silent moves,
a transition $t \in T$ is {\em $\tau$-sequential} if $l(t) = \tau$ and $|\post{t}| = 1 = |\pre{t}|$.
A P/T net $N$ with silent moves is {\em $\tau$-sequential} if
$\forall t \in T$ if $l(t) = \tau$, then $t$ is $\tau$-sequential.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
\begin{definition}\label{tr-seq-silent-def}{\bf (Idling transitions, $\tau$-sequential (acyclic) transition sequence)}
Given a P/T net $N = (S, A, T)$ with silent moves, the set of {\em idling transitions} is $I(S) = \{i(s) \;\;\big|\;\; $ $ s \in S, i(s) = (s, \tau, s)\}$.
In defining {\em silent transition sequences}, we take the liberty of using also the fictitious idling transitions, so that, e.g.,
if $\sigma = i(s_1) i(s_2)$, then $s_1 \oplus s_2 [\sigma \rangle s_1 \oplus s_2$.
Given a transition sequence $\sigma$, its {\em observable label} $o(\sigma)$ is computed inductively as:
$\begin{array}{lllllllll}
& o(\epsilon) & = & \epsilon \\
& o(t \sigma) & = & \begin{cases}
l(t) o(\sigma) & \mbox{if $l(t) \neq \tau$}\\
o(\sigma) & \mbox{otherwise.}
\end{cases}
\end{array}$
A transition sequence $\sigma = t_1 t_2 \ldots t_n$ (where $n \geq 1$ and some of the $t_i$ can be idling transitions)
is {\em $\tau$-1-sequential} if $l(t_i) = \tau$, $|\post{t_i}| = 1 = |\pre{t_i}|$ for $i = 1, \ldots, n$,
and $\post{t_i} = \pre{t_{i+1}} $ for $i = 1, \ldots, n-1$, so that
$o(\sigma) = \epsilon$ and $|\post{\sigma}| = 1 = |\pre{\sigma}|$.
A transition sequence
$\sigma = \sigma_1 \sigma_2 \ldots \sigma_k$ is {\em $\tau$-k-sequential} if $\sigma_i$ is $\tau$-1-sequential for $i = 1, \ldots, k$,
$\pre{\sigma} = \pre{\sigma_1} \oplus \pre{\sigma_2} \oplus \ldots \oplus \pre{\sigma_k}$ and
$\post{\sigma} = \post{\sigma_1} \oplus \post{\sigma_2} \oplus \ldots \oplus \post{\sigma_k}$, so that
$o(\sigma) = \epsilon$ and $|\post{\sigma}| = k = |\pre{\sigma}|$.
We say that $\sigma$ is $\tau$-sequential if it is $\tau$-k-sequential for some $k \geq 1$.
A $\tau$-1-sequential
$\sigma = t_1 t_2 \ldots t_n$ is {\em acyclic} if $\pre{\sigma} =$
$ m_0 [t_1\rangle m_1 [t_2\rangle m_2$ $ \ldots m_{n-1}[t_n\rangle
m_{n} = \post{\sigma}$ and $m_i \neq m_j$ for all $i \neq j$, with $i,j \in \{1, 2, \ldots, n\}$.
A $\tau$-k-sequential
$\sigma = \sigma_1 \sigma_2 \ldots \sigma_k$ is {\em acyclic}
if $\sigma_i$ is $\tau$-1-sequential and acyclic for $i = 1, \ldots, k$.
We say that $\sigma$ is an acyclic $\tau$-sequential transition sequence if it is $\tau$-k-sequential and acyclic
for some $k \geq 1$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
\begin{remark}\label{tr-seq-silent-rem}{\bf (Acyclic $\tau$-sequential transition sequence)}
The definition of acyclic $\tau$-1-sequential transition sequence is a bit non-standard as it may allow for a cycle
when the initial marking $m_0$ and the final one $m_n$ are the same.
For instance, $\sigma = i(s) i(s)$ is cyclic, while the apparently cyclic subsequence $\sigma' = i(s)$ is actually acyclic, according to our definition.
Note that, given a $\tau$-1-sequential transition sequence $\sigma$, it is always possible to find an acyclic
$\tau$-1-sequential transition sequence $\sigma'$ such that $\pre{\sigma} = \pre{\sigma'}$ and $\post{\sigma} = \post{\sigma'}$.
For instance, if $\pre{\sigma} = m_0 [t_1\rangle m_1 [t_2\rangle m_2 \ldots m_{n-1}[t_n\rangle
m_{n} = \post{\sigma}$ and the only cycle is given by $m_i [t_{i+1}\rangle m_{i+1} \ldots m_{j-1}[t_j\rangle m_j$ with $m_i = m_j$ and $i \geq 1$, then
$\sigma' = t_1 t_2 \ldots t_i t_{j+1} \ldots t_n$ is acyclic and $\pre{\sigma} = \pre{\sigma'}$ and $\post{\sigma} = \post{\sigma'}$.
Note that the definition of acyclic $\tau$-k-sequential transition sequence does not ensure the absence of cycles even if all the
$\tau$-1-sequential transition sequences composing it are acyclic. For instance, consider
$\sigma = \sigma_1 \sigma_2$, where $\sigma_1 = i(s_1)$
and $\sigma_2 = i(s_2)$. According to our definition, $\sigma$ is $\tau$-2-sequential and
acyclic because both $\sigma_1$ and $\sigma_2$ are $\tau$-1-sequential and acyclic
(according to our definition); however,
the execution of the two idling transitions generates a cycle.
Note also that, given a $\tau$-k-sequential transition sequence $\sigma = \sigma_1 \sigma_2 \ldots \sigma_k$, it is always
possible to find an acyclic
$\tau$-k-sequential transition sequence $\sigma' = \sigma'_1 \sigma'_2 \ldots \sigma'_k$,
where $\sigma_i'$ is the acyclic $\tau$-1-sequential transition sequence corresponding to $\sigma_i$ for $i = 1, 2, \ldots, k$,
in such a way that $\pre{\sigma} = \pre{\sigma'}$ and $\post{\sigma} = \post{\sigma'}$.
Finally, we remark that, given two markings $m_1$ and $m_2$ of equal size $k$, it is decidable whether there exists an acyclic $\tau$-k-sequential transition
$\sigma$ such that $\pre{\sigma} = m_1$ and $\post{\sigma} = m_2$, essentially because this is similar to the reachability problem
(limited by using only $\tau$-sequential transitions), which is decidable \cite{Mayr84}.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{remark}
Branching place bisimulation is inspired to (semi-)branching
bisimulation \cite{vGW96,Bas96}, a behavioral relation defined over LTSs. In its definition,
we use {\em $\tau$-sequential transition sequences}, usually denoted by $\sigma$, which are sequences composed of $\tau$-sequential
transitions in $T \cup I(S)$, i.e., $\tau$-sequential net transitions and also idling transitions.
\begin{definition}\label{bpb-bis-def}{\bf (Branching place bisimulation)}
Given a P/T net $N = (S, A, T)$, a {\em branching place bisimulation} is a relation
$R\subseteq S \times S$ such that if $(m_1, m_2) \in R^\oplus$
\begin{enumerate}
\item $\forall t_1$ such that $m_1[t_1\rangle m_1'$
\begin{itemize}
\item[$(i)$] either $t_1$ is $\tau$-sequential and
$\exists \sigma, m_2'$ such that $\sigma$ is $\tau$-sequential,
$m_2[\sigma\rangle m_2'$, and $(\pre{t_1}, \pre{\sigma}) \in R$,
$(\pre{t_1}, \post{\sigma}) \in R$, $(\post{t_1}, \post{\sigma}) \in R$ and
$(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\oplus$;
\item[$(ii)$] or there exist $\sigma, t_2, m, m_2'$ such that
$\sigma$ is $\tau$-sequential, $m_2[\sigma\rangle m [t_2\rangle m_2'$,
$l(t_1) = l(t_2)$, $\post{\sigma} = \pre{t_2}$,
$(\pre{t_1}, \pre{\sigma}) \in R^\oplus$, $(\pre{t_1}, \pre{t_2}) \in R^\oplus$
$(\post{t_1}, \post{t_2}) \in R^\oplus$, and moreover,
$(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\oplus$;
\end{itemize}
\item and, symmetrically, $\forall t_2$ such that $m_2[t_2\rangle m_2'$
\begin{itemize}
\item[$(i)$] either $t_2$ is $\tau$-sequential and
$\exists \sigma, m_1'$ such that $\sigma$ is $\tau$-sequential,
$m_1[\sigma\rangle m_1'$, and $(\pre{\sigma}, \pre{t_2}) \in R$,
$(\post{\sigma}, \pre{t_2}) \in R$, $(\post{\sigma}, \post{t_2}) \in R$ and
$(m_1 \ominus \pre{\sigma}, m_2 \ominus \pre{t_2}) \in R^\oplus$;
\item[$(ii)$] or there exist $\sigma, t_1, m, m_1'$ such that
$\sigma$ is $\tau$-sequential, $m_1[\sigma\rangle m [t_1\rangle m_1'$,
$l(t_1) = l(t_2)$, $\post{\sigma} = \pre{t_1}$,
$(\pre{\sigma}, \pre{t_2}) \in R^\oplus$, $(\pre{t_1}, \pre{t_2}) \in R^\oplus$
$(\post{t_1}, \post{t_2}) \in R^\oplus$, and moreover,
$(m_1 \ominus \pre{\sigma}, m_2 \ominus \pre{t_2}) \in R^\oplus$.
\end{itemize}
\end{enumerate}
Two markings $m_1$ and $m_2$ are branching place bisimulation equivalent,
denoted by $m_1 \approx_{p} m_2$,
if there exists a branching place bisimulation $R$ such that $(m_1, m_2) \in R^\oplus$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
Note that, in the either case, by additivity of $R^\oplus$ (cf. Proposition \ref{add-prop1}(3)), from
$(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\oplus$ and
$(\pre{t_1}, \post{\sigma}) \in R$, we get $(m_1, m_2') \in R^\oplus$, as well as, from
$(\post{t_1}, \post{\sigma}) \in R$ we get $(m_1', m_2') \in R^\oplus$. Similarly, for the or case,
from $(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\oplus$, $\post{\sigma} = \pre{t_2}$ and
$(\pre{t_1}, \pre{t_2}) \in R^\oplus$, we get $(m_1, m) \in R^\oplus$, as well as, from $(\post{t_1}, \post{t_2}) \in R^\oplus$,
we get $(m_1', m_2') \in R^\oplus$.
Note also that a $\tau$-sequential transition performed by one of the two markings may be matched by the other one also by idling:
this is due to the {\em either} case when $\sigma = i(s_2)$ for a suitable token $s_2$ such that the required properties are satisfied
(i.e., such that $(\pre{t_1}, \pre{\sigma}) \in R$,
$(\pre{t_1}, \post{\sigma}) \in R$, $(\post{t_1}, \post{\sigma}) \in R$ and
$(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\oplus$, where $\pre{\sigma} = \post{\sigma} = s_2$).
\begin{proposition}\label{prop-bpb-bis1}
For each P/T net $N = (S, A, T)$, the following hold:
\begin{itemize}
\item[$(i)$] The identity relation ${\mathcal I}_S$ is a branching place bisimulation.
\item[$(ii)$] The inverse relation $R^{-1}$ of a branching place bisimulation $R$ is a branching place bisimulation.
\end{itemize}
\proof
The proof is almost standard, due to Proposition \ref{add-prop2}.
Case $(i)$ is obvious: If $(m_1, m_2) \in {\mathcal I}_S^\oplus$, then $m_1 = m_2$, so that the branching
place bisimulation game can be mimicked trivially: given $(m, m) \in {\mathcal I}_S^\oplus$,
for all $t$ such that $m[t\rangle m'$, the other instance of $m$ in the pair replies with $m[t\rangle m'$
(case 1($ii$), with $\sigma = \epsilon$) and all the required conditions are trivially satisfied.
For case $(ii)$, assume $(m_2, m_1) \in (R^{-1})^\oplus$ and $m_2 [t_2\rangle m_2'$.
By Proposition \ref{add-prop2}(3), we have that
$(m_2, m_1) \in (R^\oplus)^{-1}$ and so $(m_1, m_2) \in R^\oplus$.
Since $R$ is a branching place bisimulation, we have that
\begin{itemize}
\item[$(i)$] either $t_2$ is $\tau$-sequential and
$\exists \sigma, m_1'$ such that $\sigma$ is $\tau$-sequential,
$m_1[\sigma\rangle m_1'$, and $(\pre{\sigma}, \pre{t_2}) \in R$,
$(\post{\sigma}, \pre{t_2}) \in R$, $(\post{\sigma}, \post{t_2}) \in R$ and,
moreover, $(m_1 \ominus \pre{\sigma}, m_2 \ominus \pre{t_2}) \in R^\oplus$;
\item[$(ii)$] or there exist $\sigma, t_1, m, m_1'$ such that
$\sigma$ is $\tau$-sequential, $m_1[\sigma\rangle m [t_1\rangle m_1'$,
$l(t_1) = l(t_2)$, $\post{\sigma} = \pre{t_1}$,
$(\pre{\sigma}, \pre{t_2}) \in R^\oplus$, $(\pre{t_1}, \pre{t_2}) \in R^\oplus$
$(\post{t_1}, \post{t_2}) \in R^\oplus$, and
$(m_1 \ominus \pre{\sigma}, m_2 \ominus \pre{t_2}) \in R^\oplus$.
\end{itemize}
Summing up, if $(m_2, m_1) \in (R^{-1})^\oplus$ and $m_2 [t_2\rangle m_2'$ (the case when $m_1$
moves first is symmetric, and so omitted), then
\begin{itemize}
\item[$(i)$] either $t_2$ is $\tau$-sequential and
$\exists \sigma, m_1'$ such that $\sigma$ is $\tau$-sequential,
$m_1[\sigma\rangle m_1'$, and $(\pre{t_2}, \pre{\sigma}) \in R^{-1}$,
$(\pre{t_2}, \post{\sigma}) \in R^{-1}$, $(\post{t_2}, \post{\sigma}) \in R^{-1}$ and
$(m_2 \ominus \pre{t_2}, m_1 \ominus \pre{\sigma}) \in (R^{-1})^\oplus$;
\item[$(ii)$] or there exist $\sigma, t_1, m, m_1'$ such that
$\sigma$ is $\tau$-sequential, $m_1[\sigma\rangle m [t_1\rangle m_1'$,
$l(t_1) = l(t_2)$, $\post{\sigma} = \pre{t_1}$,
$(\pre{t_2}, \pre{\sigma}) \in (R^{-1})^\oplus$, $(\pre{t_2}, \pre{t_1}) \in (R^{-1})^\oplus$
$(\post{t_2}, \post{t_1}) \in (R^{-1})^\oplus$, and, moreover,
$(m_2 \ominus \pre{t_2}, m_1 \ominus \pre{\sigma}) \in (R^{-1})^\oplus$;
\end{itemize}
so that $R^{-1}$ is a branching place bisimulation, indeed.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{proposition}
Much more challenging is to prove that the relational composition of two branching place bisimulations is a branching place bisimulation. We need a technical lemma first.
\begin{lemma}\label{tau-lemma}
Let $N = (S, A, T)$ be a P/T net, and $R$ be a place bisimulation.
\begin{enumerate}
\item For each $\tau$-sequential transition sequence $\sigma_1$,
for all $m_2$ such that $(\pre{\sigma_1}, m_2) \in R^\oplus$, a $\tau$-sequential transition sequence
$\sigma_2$ exists
such that $m_2 = \pre{\sigma_2}$ and
$(\post{\sigma_1}, \post{\sigma_2}) \in R^\oplus$;
\item and symmetrically, for each $\tau$-sequential transition sequence $\sigma_2$,
for all $m_1$ such that $(m_1, \pre{\sigma_2}) \in R^\oplus$, a $\tau$-sequential transition sequence
$\sigma_1$ exists
such that $m_1 = \pre{\sigma_1}$ and
$(\post{\sigma_1}, \post{\sigma_2}) \in R^\oplus$.
\end{enumerate}
\proof By symmetry, we prove only case $1$, by induction on the length of $\sigma_1$.
{\em Base case}: $\sigma_1 = \epsilon$. In this trivial case, $\pre{\sigma_1} = \theta$ and so the only possible
$m_2$ is $\theta$ as well. We just take $\sigma_2 = \epsilon$ and all the required conditions are trivially satisfied.
{\em Inductive case}: $\sigma_1 = \delta_1 t_1$, where $t_1 \in T \cup I(S)$.
Hence, by inductive hypothesis, for each $m_2$ such that $(\pre{\delta_1}, m_2) \in R^\oplus$, we know that there
exists a $\delta_2$ such that $m_2 = \pre{\delta_2}$ and
$(\post{\delta_1}, \post{\delta_2}) \in R^\oplus$.
\noindent
If $t_1 = i(s)$, then we have to consider two subcases:
\begin{itemize}
\item if $s \in \post{\delta_1}$, then $\pre{\delta_1 t_1} = \pre{\delta_1}$ and $\post{\delta_1 t_1} = \post{\delta_1}$.
Hence, we can take $\sigma_2 = \delta_2$ and all the required conditions are trivially satisfied;
\item if $s \not\in \post{\delta_1}$, then $\pre{\delta_1 t_1} = \pre{\delta_1} \oplus s$ and $\post{\delta_1 t_1} = \post{\delta_1} \oplus s$.
Then, $\forall s'$ such that $(s, s') \in R$, we can take $\sigma_2 = \delta_2 i(s')$, so that
$(\pre{\delta_1 t_1}, \pre{\delta_2 i(s')}) \in R^\oplus$,
$(\post{\delta_1 t_1}, \post{\delta_2 i(s')}) \in R^\oplus$, as required.
\end{itemize}
\noindent
Also if $t_1 \in T$, we have consider two subcases:
\begin{itemize}
\item If $s_1 = \pre{t_1} \in \post{\delta_1}$, then, since $(\post{\delta_1}, \post{\delta_2}) \in R^\oplus$,
there exists $s_2 \in \post{\delta_2}$ such that $(s_1, s_2) \in R$ and
$(\post{\delta_1} \ominus s_1, \post{\delta_2} \ominus s_2) \in R^\oplus$.
Then, by
Definition \ref{bpb-bis-def}, it follows that to the move $t_1 = s_1 \deriv{\tau} s_1'$:
\begin{itemize}
\item[$(i)$] Either there exist
$\sigma, s_2'$ such that $\sigma$ is $\tau$-sequential,
$s_2[\sigma\rangle s_2'$,
$(s_1, s_2') \in R$ and $(s_1', s_2') \in R$.
In this case, we take $\sigma_2 = \delta_2 \sigma$, so that
$(\pre{\delta_1 t_1}, \pre{\delta_2 \sigma}) \in R^\oplus$
(because $\pre{\delta_1 t_1} = \pre{\delta_1}$ and $\pre{\delta_2 \sigma} = \pre{\delta_2}$), and
$(\post{\delta_1 t_1}, \post{\delta_2 \sigma}) \in R^\oplus$
(because $\post{\delta_1 t_1} = (\post{\delta_1} \ominus s_1)\oplus s_1'$
and $\post{\delta_2 \sigma} = (\post{\delta_2}\ominus s_2)\oplus s_2'$), as required.
\item[$(ii)$] Or there exist $\sigma, t_2, \overline{s}, s_2'$ such that
$\sigma t_2$ is $\tau$-sequential, $\post{\sigma} = \pre{t_2}$,
$s_2[\sigma\rangle \overline{s} [t_2\rangle s_2'$,
$(s_1, \overline{s}) \in R$ and
$(s_1', s_2') \in R$.
In this case, we take $\sigma_2 = \delta_2 \sigma t_2$, so that
$(\pre{\delta_1 t_1}, \pre{\delta_2 \sigma t_2}) \in R^\oplus$, and, moreover,
$(\post{\delta_1 t_1}, \post{\delta_2 \sigma t_2}) \in R^\oplus$, as required.
\end{itemize}
\item If $s_1 = \pre{t_1} \not\in \post{\delta_1}$, then, for each $s_2$ such that $(s_1, s_2) \in R$,
we follow the same step as above (by Definition \ref{bpb-bis-def}).
It follows that to the move $t_1 = s_1 \deriv{\tau} s_1'$:
\begin{itemize}
\item[$(i)$] Either there exist
$\sigma, s_2'$ such that $\sigma$ is $\tau$-sequential,
$s_2[\sigma\rangle s_2'$,
$(s_1, s_2') \in R$ and $(s_1', s_2') \in R$.
In this case, we take $\sigma_2 = \delta_2 \sigma$, so that
$(\pre{\delta_1 t_1}, \pre{\delta_2 \sigma}) \in R^\oplus$
(because $\pre{\delta_1 t_1} = \pre{\delta_1} \oplus s_1$ and
$\pre{\delta_2 \sigma} = \pre{\delta_2} \oplus s_2$), and
$(\post{\delta_1 t_1}, \post{\delta_2 \sigma}) \in R^\oplus$
(because $\post{\delta_1 t_1} = \post{\delta_1} \oplus s_1'$
and $\post{\delta_2 \sigma} = \post{\delta_2}\oplus s_2'$), as required.
\item[$(ii)$] Or there exist $\sigma, t_2, \overline{s}, s_2'$ such that
$\sigma t_2$ is $\tau$-sequential, $\post{\sigma} = \pre{t_2}$,
$s_2[\sigma\rangle \overline{s} [t_2\rangle s_2'$,
$(s_1, \overline{s}) \in R$ and
$(s_1', s_2') \in R$.
In this case, we take $\sigma_2 = \delta_2 \sigma t_2$, so that
$(\pre{\delta_1 t_1}, \pre{\delta_2 \sigma t_2}) \in R^\oplus$, and, moreover,
$(\post{\delta_1 t_1}, \post{\delta_2 \sigma t_2}) \in R^\oplus$, as required.
\end{itemize}
\end{itemize}
\noindent
And so the proof is complete.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{lemma}
\begin{proposition}\label{prop-bpb-bis2}
For each P/T net $N = (S, A, T)$, the relational composition $R_1 \circ R_2$ of
two branching place bisimulations $R_1$ and $R_2$ is a branching place bisimulation.
\proof
Assume $(m_1, m_3) \in (R_1 \circ R_2)^\oplus$ and $m_1 [t_1\rangle m'_1$.
By Proposition \ref{add-prop2}(4), we have that
$(m_1, m_3) \in (R_1)^\oplus \circ (R_2)^\oplus$, and so $m_2$ exists such that
$(m_1, m_2) \in R_1^\oplus$ and $(m_2, m_3) \in R_2^\oplus$.
As $(m_1, m_2) \in R_1^\oplus$ and $R_1$ is a branching place bisimulation,
if $m_1 [t_1\rangle m_1'$, then
\begin{itemize}
\item[$(i)$] either $t_1$ is $\tau$-sequential and
$\exists \sigma, m_2'$ such that $\sigma$ is $\tau$-sequential,
$m_2[\sigma\rangle m_2'$, and $(\pre{t_1}, \pre{\sigma}) \in R_1$,
$(\pre{t_1}, \post{\sigma}) \in R_1$, $(\post{t_1}, \post{\sigma}) \in R_1$ and
$(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R_1^\oplus$;
\item[$(ii)$] or there exist $\sigma, t_2, m, m_2'$ such that
$\sigma$ is $\tau$-sequential, $m_2[\sigma\rangle m [t_2\rangle m_2'$,
$l(t_1) = l(t_2)$, $\post{\sigma} = \pre{t_2}$,
$(\pre{t_1}, \pre{\sigma}) \in R_1^\oplus$, $(\pre{t_1}, \pre{t_2}) \in R_1^\oplus$
$(\post{t_1}, \post{t_2}) \in R_1^\oplus$, and moreover,
$(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R_1^\oplus$.
\end{itemize}
\begin{itemize}
\item
Let us consider case $(i)$, i.e., assume that to the move $m_1 [t_1\rangle m_1'$, $m_2$ replies with
$m_2 [\sigma \rangle m_2'$ such that $\sigma$ is $\tau$-sequential, $(\pre{t_1}, \pre{\sigma}) \in R_1$,
$(\pre{t_1}, \post{\sigma}) \in R_1$, $(\post{t_1}, \post{\sigma}) \in R_1$ and,
moreover, $(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R_1^\oplus$.
Since $(m_2, m_3) \in R_2^\oplus$, there exists a submarking $\overline{m} \subseteq m_3$
such that $(\pre{\sigma}, \overline{m}) \in R_2^\oplus$ and
$(m_2 \ominus \pre{\sigma}, m_3 \ominus \overline{m}) \in R_2^\oplus$.
By Lemma \ref{tau-lemma}, a $\tau$-sequential transition sequence $\sigma'$ exists
such that $\overline{m} = \pre{\sigma'}$ and
$(\post{\sigma}, \post{\sigma'}) \in R_2$. Hence,
$m_3 [\sigma'\rangle m_3'$, where $m_3' = (m_3 \ominus \pre{\sigma'}) \oplus \post{\sigma'}$.
Summing up, to the move $m_1 [t_1\rangle m_1'$, $m_3$ can reply with $m_3 [\sigma'\rangle m_3'$, in such a way that
$(\pre{t_1}, \pre{\sigma'}) \in R_1 \circ R_2$,
$(\pre{t_1}, \post{\sigma'}) \in R_1 \circ R_2$, $(\post{t_1}, \post{\sigma'}) \in R_1 \circ R_2$ and,
moreover, $(m_1 \ominus \pre{t_1}, m_3 \ominus \pre{\sigma'}) \in (R_1 \circ R_2)^\oplus$,
(by Proposition \ref{add-prop2}(4)), as required.
\item
Let us consider case $(ii)$, i.e., assume that to the move $m_1 [t_1\rangle m_1'$, $m_2$ replies with
$m_2[\sigma\rangle m [t_2\rangle m_2'$, where $\sigma$ is $\tau$-sequential,
$l(t_1) = l(t_2)$, $\post{\sigma} = \pre{t_2}$, and $(\pre{t_1}, \pre{\sigma}) \in R_1^\oplus$,
$(\pre{t_1}, \pre{t_2}) \in R_1^\oplus$, $(\post{t_1}, \post{t_2}) \in R_1^\oplus$, and moreover,
$(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R_1^\oplus$.
Since $(m_2, m_3) \in R_2^\oplus$, there exists a submarking $\overline{m} \subseteq m_3$
such that $(\pre{\sigma}, \overline{m}) \in R_2^\oplus$ and
$(m_2 \ominus \pre{\sigma}, m_3 \ominus \overline{m}) \in R_2^\oplus$.
By Lemma \ref{tau-lemma}, there exists a $\tau$-sequential transition sequence $\sigma'$
such that $\overline{m} = \pre{\sigma'}$ and
$(\post{\sigma}, \post{\sigma'}) \in R_2^\oplus$. Hence,
$m_3 [\sigma'\rangle m'$, where $m' = (m_3 \ominus \pre{\sigma'}) \oplus \post{\sigma'}$ and,
moreover, $(m, m') \in R_2^\oplus$.
Since $(m, m') \in R_2^\oplus$, $\post{\sigma} = \pre{t_2}$ and $(\post{\sigma}, \post{\sigma'}) \in R_2^\oplus$, there exists
$\underline{m} = \post{\sigma'} \subseteq m'$ such that
$(\pre{t_2}, \underline{m}) \in R_2^\oplus$ and $(m \ominus \pre{t_2}, m' \ominus \underline{m}) \in R_2^\oplus$.
Hence, by Definition \ref{bpb-bis-def},
to the move $\pre{t_2} [t_2\rangle \post{t_2}$,
$\underline{m}$ can reply as follows:
\begin{itemize}
\item[$(a)$] Either $t_2$ is $\tau$-sequential and
$\exists \overline{\sigma}$
such that $ \overline{\sigma}$ is $\tau$-sequential, $\underline{m}= \pre{\overline{\sigma}}$,
$\underline{m}[ \overline{\sigma}\rangle \post{\overline{\sigma}}$,
and $(\pre{t_2}, \pre{ \overline{\sigma}}) \in R_2$,
$(\pre{t_2}, \post{ \overline{\sigma}}) \in R_2$,
$(\post{t_2}, \post{ \overline{\sigma}}) \in R_2$ and
$(m \ominus \pre{t_2}, m' \ominus \pre{ \overline{\sigma}}) \in R_2^\oplus$.
In this case, to the move $m_1 [t_1\rangle m_1'$, $m_3$ can reply with
$m_3 [\sigma' \rangle m'[ \overline{\sigma}\rangle m_3'$, with $m_3' = (m' \ominus \pre{ \overline{\sigma}})
\oplus \post{ \overline{\sigma}}$,
such that $(\pre{t_1}, \pre{\sigma' \overline{\sigma}}) \in (R_1 \circ R_2)^\oplus$
(because $(\pre{t_1}, \pre{\sigma}) \in R_1^\oplus$, $\post{\sigma'} = \pre{\overline{\sigma}}$ and
$(\pre{\sigma}, \pre{\sigma}') \in R_2^\oplus$),
$(\pre{t_1}, \post{\sigma' \overline{\sigma}}) \in (R_1 \circ R_2)^\oplus$
(because $(\pre{t_1}, \pre{t_2}) \in R_1$, $\post{\sigma'} = \pre{\overline{\sigma}}$
and $(\pre{t_2}, \post{ \overline{\sigma}}) \in R_2$),
$(\post{t_1}, \post{\sigma' \overline{\sigma}'}) \in (R_1 \circ R_2)^\oplus$
(as $(\post{t_1}, \post{t_2}) \in R_1$ and $(\post{t_2}, \post{ \overline{\sigma}}) \in R_2$),
and, moreover,
$(m_1 \ominus \pre{t_1}, m_3 \ominus \pre{\sigma' \overline{\sigma}}) \in (R_1 \circ R_2)^\oplus$.
\item[$(b)$] or $\exists \overline{\sigma}, t_3, \overline{m}$ such that
$ \overline{\sigma}$ is $\tau$-sequential, $\underline{m} = \pre{ \overline{\sigma}}$,
$\underline{m}[ \overline{\sigma}\rangle \overline{m} [t_3\rangle \post{t_3}$,
$l(t_2) = l(t_3)$, $\overline{m} = \post{ \overline{\sigma}} = \pre{t_3}$,
$(\pre{t_2}, \pre{ \overline{\sigma}}) \in R_2^\oplus$, $(\pre{t_2}, \pre{t_3}) \in R_2^\oplus$
$(\post{t_2}, \post{t_3}) \in R_2^\oplus$, and
$(m \ominus \pre{t_2}, m' \ominus \pre{ \overline{\sigma}}) \in R_2^\oplus$.
In this case, to the move $m_2 [\sigma \rangle m [t_2\rangle m_2'$, $m_3$ replies with
$m_3 [\sigma' \rangle m'[ \overline{\sigma}\rangle m'' [t_3 \rangle m_3'$, with
$m_3' = (m' \ominus \pre{ \overline{\sigma}}) \oplus \post{t_3}$,
such that
$\overline{\sigma}$ is $\tau$-sequential, $\pre{\overline{\sigma}} = \post{\sigma'}$,
and therefore
$(\pre{\sigma t_2}, \pre{\sigma' \overline{\sigma} t_3}) \in R_2^\oplus$
(because $\pre{\sigma t_2} = \pre{\sigma}$, $\pre{\sigma' \overline{\sigma} t_3} = \pre{\sigma'}$
and $(\pre{\sigma}, \pre{\sigma}') \in R_2^\oplus$), and
$(\post{\sigma t_2},$ $ \post{\sigma' \overline{\sigma} t_3}) \in R_2^\oplus$ (because
$\post{\sigma t_2} = \post{t_2}$, $ \post{\sigma' \overline{\sigma} t_3} = \post{t_3}$
and $( \post{t_2}, \post{t_3}) \in R_2^\oplus$).
Summing up, to the move $m_1 [t_1\rangle m_1'$, $m_3$ can reply with
$m_3 [\sigma' \rangle m'[ \overline{\sigma}\rangle m'' [t_3 \rangle m_3'$,
such that $(\pre{t_1}, \pre{\sigma' \overline{\sigma}}) \in (R_1 \circ R_2)^\oplus$
(as $(\pre{t_1}, \pre{\sigma}) \in R_1^\oplus$,
$\pre{\sigma' \overline{\sigma}} = \pre{\sigma'}$ and $(\pre{\sigma}, \pre{\sigma}') \in R_2^\oplus$),
$(\pre{t_1}, \pre{t_3}) \in (R_1 \circ R_2)^\oplus$ (as $(\pre{t_1}, \pre{t_2}) \in R_1^\oplus$,
and $(\pre{t_2}, \pre{t_3}) \in R_2^\oplus$),
$(\post{t_1}, \post{t_3}) \in (R_1 \circ R_2)^\oplus$ (because $(\post{t_1}, \post{t_2}) \in R_1^\oplus$,
and $(\post{t_2}, \post{t_3}) \in R_2^\oplus$),
and
$(m_1 \ominus \pre{t_1}, m_3 \ominus \pre{\sigma' \overline{\sigma}}) \in (R_1 \circ R_2)^\oplus$
(because $(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R_1^\oplus$ and
$(m_2 \ominus \pre{\sigma}, m_3 \ominus \pre{\sigma'}) \in R_2^\oplus$).
\end{itemize}
\end{itemize}
The case when $m_2$ moves first is symmetric, and so omitted. Hence, $R_1 \circ R_2$ is a branching place bisimulation, indeed.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{proposition}
\begin{theorem}\label{ssb-bis-eq}
For each P/T net $N = (S, A, T)$, relation $\approx_{p} \; \subseteq \mathcal{M}(S) \times \mathcal{M}(S)$ is an equivalence relation.
\proof
As the identity relation ${\mathcal I}_S$ is a branching place bisimulation by Proposition \ref{prop-bpb-bis1}(i),
we have that ${\mathcal I}_S^\oplus \subseteq \; \approx_p$, and so $\approx_p$ is reflexive.
Symmetry derives from the following argument.
For any $(m, m') \in \; \approx_p$, there exists a branching place bisimulation $R$ such that $(m, m') \in R^\oplus$;
by Proposition \ref{prop-bpb-bis1}(ii), relation $R^{-1}$ is a branching place bisimulation, and by Proposition \ref{add-prop2}(3)
we have that $(m', m) \in (R^{-1})^\oplus$; hence,
$(m', m) \in \; \approx_p$.
Transitivity also holds for $\approx_p$. Let $(m, m') \in \; \approx_p$ and $(m', m'') \in \; \approx_p$; hence, there
exist two branching place bisimulations $R_1$ and $R_2$ such that $(m, m') \in R_1^\oplus$ and $(m', m'') \in R_2^\oplus$. By
Proposition \ref{prop-bpb-bis2}, $R_1 \circ R_2$ is a branching place bisimulation such that the
pair $(m, m'') \in (R_1 \circ R_2)^\oplus$
by Proposition \ref{add-prop2}(4); hence, $(m, m'') \in \; \approx_p$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{theorem}
\begin{proposition}{\bf (Branching place bisimilarity is finer than branching interleaving bisimilarity)}
For each P/T net $N = (S, A, T)$, $m_1 \approx_{p} m_2$ implies $m_1 \approx_{bri} m_2$.
\proof If $m_1 \approx_{p} m_2$, then $(m_1, m_2) \in R^\oplus$ for some branching place bisimulation $R$.
Note that $R^\oplus$ is a branching interleaving bisimilarity, so that $m_1 \approx_{bri} m_2$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{proposition}
Branching place bisimilarity $\approx_p$ is also finer than branching fully-concurrent bisimilarity $\approx_{bfc}$. The proof of this fact is postponed to Section \ref{br-d-place-sec}.
\begin{example}
Consider the nets in Figure \ref{tau-fig2}. Of course, $s_1 \approx_p s_2$, as well as $s_1 \approx_p s_4$.
However, $s_2 \not \approx_p s_5$, because $s_2$ cannot respond to the non-$\tau$-sequential move
$s_5 \deriv{\tau} \theta$. For the same reason, $s_2 \not \approx_p s_6$. Note that silent transitions that are not
$\tau$-sequential are not considered as unobservable.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{example}
\begin{figure}
\caption{Some simple nets with silent moves}
\label{tau-fig2}
\end{figure}
\noindent
By Definition \ref{bpb-bis-def}, branching place bisimilarity can be defined as follows:
$\approx_p = \bigcup \{ R^\oplus \;\;\big|\;\; R \mbox{ is a branching place bisimulation}\}.$
\noindent
By monotonicity of the additive closure (Proposition \ref{add-prop1}(2)), if $R_1 \subseteq R_2$, then
$R_1^\oplus \subseteq R_2^\oplus$. Hence, we can restrict our attention to maximal branching place bisimulations only:
$\approx_p = \bigcup \{ R^\oplus \;\;\big|\;\; R \mbox{ is a {\em maximal} branching place bisimulation}\}.$
\noindent
However, it is not true that
$\approx_p = (\bigcup \{ R \;\;\big|\;\; R \mbox{ is a {\em maximal} branching place bisimulation}\})^\oplus$,
because the union of branching place bisimulations may be not a branching place bisimulation.
\begin{example}
Consider the nets in Figure \ref{tau-fig3}. It is easy to realize that $s_1 \oplus s_2 \approx_p s_3 \oplus s_5$,
because $R_1 = \{(s_1, s_3), (s_2, s_5), (s_1, s_4)\}$ is a branching place bisimulation.
In fact, to the move $t_1 = s_1 \oplus s_2 \deriv{a} s_1 \oplus s_2$, $s_3 \oplus s_5$ replies
with $s_3 \oplus s_5[\sigma \rangle s_4 \oplus s_5 [t_2\rangle s_3 \oplus s_5$,
where $\sigma = t \, i(s_5)$ (with $t = (s_3, \tau, s_4)$ and $i(s_5) = (s_5, \tau, s_5)$) and
$t_2 = (s_4 \oplus s_5, a, s_3 \oplus s_5)$, such that
$(\pre{t_1}, \pre{t_2}) \in R_1^\oplus$ and $(\post{t_1}, \post{t_2}) \in R_1^\oplus$.
Then, to the move $s_3 \oplus s_5[t\rangle s_4 \oplus s_5$, $s_1 \oplus s_2$ can reply by idling with
$s_1 \oplus s_2 [\sigma'\rangle s_1 \oplus s_2$, where $\sigma' = i(s_1)$, and
$(\pre{\sigma'}, \pre{t}) \in R_1^\oplus$, $(\post{\sigma'}, \pre{t}) \in R_1^\oplus$ and $(\post{\sigma'}, \post{t}) \in R_1^\oplus$.
Note that also the identity relation $\mathcal{I}_S$, where $S = \{s_1, s_2, s_3, s_4, s_5\}$ is a branching place bisimulation.
However, $R = R_1 \cup \mathcal{I}_S$ is not a branching place bisimulation, because, for instance,
$(s_1 \oplus s_2, s_3 \oplus s_2) \in R^\oplus$, but these two markings are clearly not equivalent, as $s_1 \oplus s_2$ can do $a$,
while $s_3 \oplus s_2$ cannot.
Similarly, one can prove that $s_1 \oplus s_2 \approx_p s_6 \oplus s_8$ because
$R_2 = \{(s_1, s_6), (s_2, s_8),$ $(s_1, s_7), (s_2, s_9)\}$ is a branching place bisimulation.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{example}
\begin{figure}
\caption{Some branching place bisimilar nets}
\label{tau-fig3}
\end{figure}
\section{Branching Place Bisimilarity is Decidable} \label{decid-br-place-sec}
In order to prove that $\approx_p$ is decidable, we first need a technical lemma which states that it is decidable to check
if a place relation $R \subseteq S \times S$ is a branching place bisimulation.
\begin{lemma}\label{bpb-rel-dec-lem}
Given a P/T net $N = (S, A, T)$ and a place relation $R \subseteq S \times S$, it is decidable if $R$
is a branching place bisimulation.
\proof
We want to prove that $R$ is a branching place bisimulation if and only if the following two conditions are satisfied:
\begin{enumerate}
\item $\forall t_1 \in T$, $\forall m$ such that $(\pre{t_1}, m) \in R^\oplus$
\begin{itemize}
\item[$(a)$] either $t_1$ is $\tau$-sequential and there exists an acyclic $\tau$-sequential
$\sigma$ such that $m = \pre{\sigma}$,
$(\pre{t_1}, \post{\sigma}) \in R$ and $(\post{t_1}, \post{\sigma}) \in R$;
\item[$(b)$] or there exist an acyclic $\tau$-sequential $\sigma$ and $t_2 \in T$, with
$\post{\sigma} = \pre{t_2}$, such that $m = \pre{\sigma}$, $l(t_1) = l(t_2)$, $(\pre{t_1}, \pre{t_2}) \in R^\oplus$
and $(\post{t_1}, \post{t_2}) \in R^\oplus$.
\end{itemize}
\item $\forall t_2 \in T$, $\forall m$ such that $(m, \pre{t_2}) \in R^\oplus$
\begin{itemize}
\item[$(a)$] either $t_2$ is $\tau$-sequential and there exists an acyclic $\tau$-sequential $ \sigma$
such that $m = \pre{\sigma}$,
$(\post{\sigma}, \pre{t_2}) \in R$ and $(\post{\sigma}, \post{t_2}) \in R$;
\item[$(b)$] or there exist an acyclic $\tau$-sequential $\sigma$ and $t_1 \in T$, with
$\post{\sigma} = \pre{t_1}$, such that $m = \pre{\sigma}$, $l(t_1) = l(t_2)$, $( \pre{t_1}, \pre{t_2}) \in R^\oplus$
and $(\post{t_1}, \post{t_2}) \in R^\oplus$.
\end{itemize}
\end{enumerate}
The implication from left to right is obvious: if $R$ is a branching place bisimulation, then for sure conditions 1 and 2 are satisfied,
because, as observed in Remark \ref{tr-seq-silent-rem}, if there exists a suitable $\tau$-sequential transition
sequence $\sigma$, then there exists also a suitable {\em acyclic} $\tau$-sequential
$\sigma'$ such that $\pre{\sigma} = \pre{\sigma'}$ and $\post{\sigma} = \post{\sigma'}$.
For the converse implication, assume that conditions 1 and 2 are satisfied; then we have to prove that the branching
place bisimulation game for $R$ holds for all pairs $(m_1, m_2) \in R^\oplus$.
Let $ q = \{(s_1, s_1'), (s_2, s_2'), \ldots,$ $(s_k, s_k')\}$ be any multiset of associations
that can be used to prove that $(m_1, m_2) \in R^\oplus$. So this means that
$m_1 = s_1 \oplus s_2 \oplus \ldots \oplus s_k$, $m_2 = s_1' \oplus s_2' \oplus \ldots \oplus s_k'$
and that $(s_i, s_i') \in R$ for $i = 1, \ldots, k$.
If $m_1 [t_1 \rangle m_1'$, then $m_1' = m_1 \ominus \pre{t_1} \oplus \post{t_1}$.
Consider the multiset of associations $p = \{(\overline{s}_{1}, \overline{s}'_{1}),$ $\ldots, (\overline{s}_{h}, \overline{s}'_{h})\} \subseteq q$,
with $\overline{s}_{1} \oplus \ldots \oplus \overline{s}_{h}$ $= \pre{t_1}$.
Note that $(\pre{t_1}, \overline{s}'_{1} \oplus \ldots \oplus \overline{s}'_{h}) \in R^\oplus$.
Therefore, by condition 1, (by denoting by $m$ the multiset $\overline{s}'_{1} \oplus \ldots \oplus \overline{s}'_{h}$)
\begin{itemize}
\item[$(a)$] either $t_1$ is $\tau$-sequential and there exists an acyclic $\tau$-sequential
$\sigma$ such that $m = \pre{\sigma}$,
$(\pre{t_1}, \post{\sigma}) \in R$ and $(\post{t_1}, \post{\sigma}) \in R$;
\item[$(b)$] or there exist an acyclic $\tau$-sequential $\sigma$ and $t_2 \in T$, with
$\post{\sigma} = \pre{t_2}$, such that $m = \pre{\sigma}$, $l(t_1) = l(t_2)$, $(\pre{t_1}, \pre{t_2}) \in R^\oplus$
and $(\post{t_1}, \post{t_2}) \in R^\oplus$.
\end{itemize}
In case $(a)$, since $\pre{\sigma} \subseteq m_2$, also $m_2 [\sigma \rangle m_2'$ is firable, where $m_2' = m_2 \ominus \pre{\sigma} \oplus \post{\sigma}$, so that
$(\pre{t_1}, \post{\sigma}) \in R$, $(\post{t_1}, \post{\sigma}) \in R$
and, finally, $(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\oplus$, as required.
Note that the last condition holds because, from the multiset $q$
of matching pairs for $m_1$ and $m_2$,
we have removed those in $p$.
In case $(b)$, since $\pre{\sigma} \subseteq m_2$, also $m_2 [\sigma \rangle m [t_2\rangle m_2'$ is firable,
where $m_2' = m_2 \ominus \pre{\sigma} \oplus \post{t_2}$, so that $l(t_1) = l(t_2)$,
$(\pre{t_1}, \pre{t_2}) \in R^\oplus$, $(\post{t_1}, \post{t_2}) \in R^\oplus$
and, finally, $(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\oplus$, as required.
If $m_2 [t_2 \rangle m_2'$, then we have to use an argument symmetric to the above, where condition 2 is used instead.
Hence, we have proved that conditions 1 and 2 are enough to prove that $R$ is a branching place bisimulation.
Finally, observe that the set $T$ is finite and, for each $t_1 \in T$,
the number of markings $m$ such that $(\pre{t_1}, m) \in R^\oplus$ and $(m, \pre{t_1}) \in R^\oplus$ is finite as well.
More precisely, this part of the procedure has worst-case time complexity
$O(q \cdot n^{p})$,
where $q = |T|$, $n = |S|$ and $p$ is the least number such that $| \pre{t}| \leq p$ for all $t \in T$, as the number
of markings $m$ related via $R_i$ to $\pre{t_1}$ is $n^{p}$ at most.
Moreover, for each pair $(t_1, m)$ satisfying the condition $(\pre{t_1}, m) \in R^\oplus$,
we have to check conditions $(a)$ and $(b)$, each checkable in a finite amount of time.
In fact, for case $(a)$, we have to check if there exists a place $s$ such that $(\pre{t_1}, s) \in R$ and $(\post{t_1}, s) \in R$,
which is reachable from $m$ by means of an acyclic
$\tau$-1-sequential transition sequence $\sigma$; this condition is decidable because we have at most $n$ places to examine and for each
candidate place $s$, we can check whether a suitable acyclic $\tau$-1-sequential $\sigma$ exists.
Similarly, in case (b) we have to consider all the transitions $t_2$ such that
$(\pre{t_1}, \pre{t_2}) \in R^\oplus$ and $(\post{t_1}, \post{t_2}) \in R^\oplus$ (and this can be checked with
worst-time complexity $O(q \cdot (p^2\sqrt{p}))$,
where $q = |T|$, $n = |S|$ and $p$ is the least number such that $| \pre{t}| \leq p$
and $|\post{t}| \leq p$ for all $t \in T$)
and check whether at least one of these is reachable from
$m$ by means of an acyclic $\tau$-sequential transition sequence $\sigma$ such that $\pre{\sigma} = m$ and $\post{\sigma} = \pre{t_2}$
and, as observed in Remark \ref{tr-seq-silent-rem},
the existence of such a $\sigma$ is decidable.
Therefore, in a finite amount of time we can decide if a
given place relation $R$ is actually a branching place bisimulation.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{lemma}
\begin{theorem}\label{bpl-bis-decid-th}{\bf (Branching place bisimilarity is decidable)}
Given a P/T net $N = (S, A, T)$, for each pair of markings $m_1$ and $m_2$, it is decidable whether $m_1 \approx_p m_2$.
\proof
If $|m_1| \neq |m_2|$, then $m_1 \not \approx_p m_2$ by Proposition \ref{fin-k-add}.
Otherwise, we assume that $|m_1| = k = |m_2|$.
As $|S| = n$, the set of all the place relations over $S$ is of size $2^n$.
Let us list all the place relations as follows:
$R_1, R_2, \ldots, R_{2^n}$.
Hence, for $i = 1, \ldots, 2^n$, by Lemma \ref{bpb-rel-dec-lem} we can decide whether $R_i$ is a branching
place bisimulation and, in such a case,
we can check whether $(m_1, m_2) \in R_i^\oplus$ in $O(k^2 \sqrt{k})$ time.
As soon as we found a branching place bisimulation $R_i$ such that $(m_1, m_2) \in R_i^\oplus$,
we stop concluding that $m_1 \approx_p m_2$. If none of the $R_i$ is a branching
place bisimulation such that $(m_1, m_2) \in R_i^\oplus$, then
we can conclude that $m_1 \not\approx_p m_2$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{theorem}
\section{A Small Case Study}\label{case-sec}
In Figure \ref{upc-a-place} a producer-consumer system is outlined. The producer $P_1$ can unboundedly produce item $a$, each time
depositing one token on place $D_3$, or it can perform some internal work (e.g., preparation of the production lines) and then choose to produce
item $a$ or item $b$, depositing one token on $D_1$ or $D_2$, respectively, and then start again from place $P_1$.
The consumer $C$ can synchronize with the deposit processes $D_1, D_2, D_3$ to perform the delivery of the selected item to $C_1$. This
sequential system has the ability to directly perform $cons$ reaching $C_3$ or it needs some preparatory internal work before
performing $cons$ to reach the same place. Finally, $C_3$ can perform an internal transition reaching $C$. Note that the three silent transitions
are all $\tau$-sequential.
\begin{figure}
\caption{An unbounded producer-consumer system}
\label{upc-a-place}
\end{figure}
\begin{figure}
\caption{Another unbounded producer-consumer system}
\label{upc-b-place}
\end{figure}
In Figure \ref{upc-b-place} another unbounded producer-consumer system is outlined.
The producer $P_1'$ can choose to produce item $a$ or item $b$,
depositing one token on $D_1'$ or $D_2'$, respectively, and then become $P_2'$, which can unboundedly choose to produce $a$ or $b$.
The consumer $C'$ can synchronize with the deposit processes $D_1', D_2'$ to perform the delivery of the selected item to $C_1'$. This
sequential system first performs an internal transition and then it has the ability to perform $cons$ in two different ways:
either directly reaching $C'$ or
reaching $C_3'$, which performs an internal transition in order to reach $C'$. Note that the two silent transitions
are $\tau$-sequential.
It is not difficult to realize that the following place relation
$
\begin{array}{rrcl}
\quad &R & = & \{(P_1, P_1'), (P_2, P_1'), (P_1, P_2'), (P_2, P_2'), (D_1, D_1'), (D_2, D_2'), (D_3, D_1'),\\
& && (C, C'), (C_1, C_1'), (C_2, C_2'), (C_3, C_3'), (C_1, C_2'), (C_3, C')\}
\end{array}
$
\noindent
is a branching place bisimulation, so that $P_1 \oplus C \approx_p P_1' \oplus C'$ as $(P_1 \oplus C, P_1' \oplus C') \in R^\oplus$.
The fact that $R$ is a branching place bisimulation can be proved by exploiting Lemma \ref{bpb-rel-dec-lem}: it is enough to check that, for each transition $t_1$ of the first net and for each marking $m$ of the second net such that $(\pre{t_1}, m) \in R^\oplus$, the following hold:
\begin{itemize}
\item[$(a)$] either $t_1$ is $\tau$-sequential and there exists an acyclic $\tau$-sequential
$\sigma$ such that $m = \pre{\sigma}$,
$(\pre{t_1}, \post{\sigma}) \in R$ and $(\post{t_1}, \post{\sigma}) \in R$;
\item[$(b)$] or there exist an acyclic $\tau$-sequential $\sigma$ and $t_2 \in T$, with
$\post{\sigma} = \pre{t_2}$, such that $m = \pre{\sigma}$, $l(t_1) = l(t_2)$, $(\pre{t_1}, \pre{t_2}) \in R^\oplus$
and $(\post{t_1}, \post{t_2}) \in R^\oplus$.
\end{itemize}
And the symmetric condition for each transition $t_2$ of the second net and for each marking $m$ of the first net such that $(m, \pre{t_2}) \in R^\oplus$.
For instance, consider the $\tau$-sequential transition $(P_1, \tau, P_2)$.
The only markings to consider are $P_1'$ and $P_2'$ and, by the either case $(a)$ above,
it is enough to consider $\sigma = i(P_1')$ or $\sigma = i(P_2')$, respectively, to get the thesis. Similarly, for transition
$(C_1, cons, C_3)$ we have to consider only the markings $C_1'$ and $C_2'$; the former can respond by first performing the silent transition
to $C_2'$ and then $(C_2', cons, C_3')$, so that, by case $(b)$ above, we get the thesis by choosing $\sigma = (C_1', \tau, C_2')$;
in the latter case, we simply choose $\sigma = i(C_2')$. As a final example for this side of the proof, consider transition
$(D_1 \oplus C, del_a, C_1)$, so that the only marking to consider is $D_1' \oplus C'$, that can respond with $(D_1' \oplus C', del_a, C_1')$ to satisfy the required conditions.
Symmetrically, in case of transition $(P_1', b, P_2' \oplus D_2')$, the only markings to consider are $P_1$ and $P_2$. In the latter case,
$P_2$ can respond with transition $(P_2, b, P_1 \oplus D_2)$ and, by the or case $(b)$, we get the thesis
by choosing $\sigma = i(P_2)$. In the former case, $P_1$ can respond
by first performing the internal $\tau$-sequential transition, reaching $P_2$, and then transition $(P_2, b, P_1 \oplus D_2)$; hence,
by the or case, we get the thesis by choosing $\sigma = (P_1, \tau, P_2)$.
Similarly, for transition $(C_2', cons, C')$ we have to consider markings $C_1$ and $C_2$. In the latter case, $C_2$ can respond
with $(C_2, cons, C_3)$ and the thesis is satisfied, by the or case, with $\sigma = i(C_2)$. In the former case, $C_1$ first performs
the silent transition to $C_2$ and then $(C_2, cons, C_3)$, and the thesis is satisfied by choosing $\sigma = (C_1, \tau, C_2)$.
As a final example for this side of the proof, consider transition
$(D_1' \oplus C', del_a, C_1')$, so that the two markings to consider are $D_1 \oplus C$ and $D_1 \oplus C_3$.
The former can simply respond by $(D_1 \oplus C, del_a, C_1)$, while the latter first performs $\sigma = i(D_1) (C_3, \tau, C)$.
\section{A Coarser Variant: Branching D-place Bisimilarity}\label{br-d-place-sec}
We first recall from \cite{Gor21} a coarser variant of place bisimulation, called {\em d-place bisimulation}.
Then, we introduce {\em branching d-place} bisimulation. Finally, we prove that branching d-place bisimilarity $\approx_d$
is finer than branching fully-concurrent bisimilarity.
\subsection{D-place Bisimilarity}\label{d-place-ssec}
A coarser variant of place bisimulation, introduced in \cite{Gor21} and called {\em d-place bisimulation}, may relate a
place $s$ also to the empty marking $\theta$.
In order to provide the definition of d-place bisimulation,
we need first to extend the domain of a place relation:
the empty marking $\theta$ is considered as an additional place, so that a place
relation is defined not on $S$, rather on $S \cup \{\theta\}$.
Hence,
the symbols $r_1$ and $r_2$ that occur in the following definitions,
can only denote either the empty marking $\theta$ or a
single place $s$.
Now we extend the idea of additive closure to these more general place relations,
yielding {\em d-additive closure}.
\begin{definition}\label{hadd-eq}{\bf (D-additive closure)}
Given a P/T net $N = (S, A, T)$ and a {\em place relation} $R \subseteq (S\cup \{\theta\}) \times (S \cup \{\theta\})$, we define
a {\em marking relation}
$R^\odot \, \subseteq \, {\mathcal M}(S) \times {\mathcal M}(S)$, called
the {\em d-additive closure} of $R$,
as the least relation induced by the following axiom and rule.
$\begin{array}{lllllllllll}
\bigfrac{}{(\theta, \theta) \in R^\odot} & \; & \;
\bigfrac{(r_1, r_2) \in R \; \; (m_1, m_2) \in R^\odot }{(r_1 \oplus m_1, r_2 \oplus m_2) \in R^\odot } \\
\end{array}$
\\[-.2cm]
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
Note that if two markings are related by $R^\odot$, then they
may have different size;
in fact, even if the axiom relates the empty marking to itself (so two markings with the same size),
as $R \subseteq (S\cup \{\theta\}) \times (S \cup \{\theta\})$, it may be the case that $(\theta, s) \in R$,
so that, assuming $(m_1', m_2') \in R^\odot$ with $|m_1'| = |m_2'|$, we get that the pair
$(m_1', s \oplus m_2')$ belongs to $R^\odot$, as $\theta$ is the identity for
the operator of multiset union.
Hence, Proposition \ref{fin-k-add}, which is valid for place relations defined over $S$, is not valid for place relations
defined over $S\cup \{\theta\}$.
However, the properties in Propositions \ref{add-prop1}
and \ref{add-prop2} hold also for these more general place relations.
Note that checking whether $(m_1, m_2) \in R^\odot$ has complexity $O(k^2\sqrt{k})$, where $k$ is the size of the largest marking.
\begin{definition}\label{def-dplace-bis}{\bf (D-place bisimulation)}
Let $N = (S, A, T)$ be a P/T net.
A {\em d-place bisimulation} is a relation
$R\subseteq (S\cup \{\theta\}) \times (S \cup \{\theta\})$ such that if $(m_1, m_2) \in R^\odot$
then
\begin{itemize}
\item $\forall t_1$ such that $m_1[t_1\rangle m'_1$, $\exists t_2$ such that $m_2[t_2\rangle m'_2$
with $(\pre{t_1}, \pre{t_2}) \in R^\odot$, $l(t_1) = l(t_2)$, $(\post{t_1}, \post{t_2}) \in R^\odot$ and, moreover,
$(m_1', m_2') \in R^\odot$,
\item $\forall t_2$ such that $m_2[t_2\rangle m'_2$, $\exists t_1$ such that $m_1[t_1\rangle m'_1$
with $(\pre{t_1}, \pre{t_2}) \in R^\odot$, $l(t_1) = l(t_2)$, $(\post{t_1}, \post{t_2}) \in R^\odot$ and, moreover,
$(m_1', m_2') \in R^\odot$.
\end{itemize}
Two markings $m_1$ and $m_2$ are {\em d-place bisimilar}, denoted by
$m_1 \sim_{d} m_2$, if there exists a d-place bisimulation $R$ such that $(m_1, m_2) \in R^\odot$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
D-place bisimilarity $\sim_d$ is a decidable equivalence relation \cite{Gor21}.
Moreover, in \cite{Gor21} it is proved that $\sim_d$ is
finer than fully-concurrent bisimilarity $\sim_{fc}$ \cite{Gor21}. This implication is strict,
as illustrated by the following example.
\begin{example}\label{ex-dead}
Consider Figure \ref{net-d2-place}. Even if $s_1$ and $s_3 \oplus s_4$
are fc-bisimilar, we cannot find any d-place bisimulation relating these two markings. If we include the necessary
pairs $(s_1, s_3)$ and $(\theta, s_4)$, then we would fail immediately, because the pair $(s_1, s_3)$ does not satisfy
the d-place bisimulation game, as $s_1$ can move, while $s_3$ cannot.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{example}
\begin{figure}
\caption{Two fc-bisimilar nets, but not d-place bisimilar}
\label{net-d2-place}
\end{figure}
\begin{figure}
\caption{Two d-place bisimilar nets}
\label{net-d-place}
\end{figure}
\begin{figure}
\caption{Relation $\{(s_1, s_2), (\theta, s_3)\}
\label{net-d3-place}
\end{figure}
\begin{example}\label{ex-dplace}
Consider the net in Figure \ref{net-d-place}. It is easy to realize that $R = \{(s_1, s_4), (\theta, s_5),$ $
(s_2, s_6),$ $(s_3, \theta)\}$ is a d-place bisimulation. Hence, this example shows that d-place bisimilarity is strictly coarser
than place bisimilarity, and that it does not preserves the causal nets,
because $s_1$ and $s_4$ generate different causal nets.
The places that are related to $\theta$ (i.e., $s_3$ and $s_5$) are deadlocks, i.e., they have empty post-set.
However, it may happen that a d-place bisimulation can also relate a place with non-empty post-set to $\theta$.
In fact, consider the net in Figure \ref{net-d3-place}. It is easy to observe that the relation
$R = \{(s_1, s_2), (\theta, s_3)\}$ is a d-place bisimulation, as for all the pairs $(m_1, m_2) \in R^\odot$,
both markings are stuck, so that the d-place bisimulation game is vacuously satisfied.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{example}
\begin{remark}{\bf (Condition on the pre-sets)}\label{pre-rem}
As a consequence of the observation of the previous examples, it is easy to note that
if a d-place bisimulation $R$ relates a place $s$ with non-empty post-set to $\theta$, then it is not possible
to find two transitions $t_1$ and $t_2$ such that for the proof of $(\pre{t_1}, \pre{t_2}) \in R^\odot$ it is
necessary to use the pair $(s, \theta)$ (cf. Example \ref{ex-dead}). In other words,
the condition $(\pre{t_1}, \pre{t_2}) \in R^\odot$ in Definition \ref{def-dplace-bis} is actually $(\pre{t_1}, \pre{t_2}) \in \overline{R}^\oplus$,
where $\overline{R} = \{(r_1, r_2) \in R \;\;\big|\;\; r_1 \in S \wedge r_2 \in S\}$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{remark}
\subsection{Branching D-place Bisimulation}\label{brd-place-ssec}
Branching d-place bisimulation is defined as branching place bisimulation (using {\em $\tau$-sequential transition sequences},
i.e., sequences composed of $\tau$-sequential net transitions and also idling transitions), where the additive closure $\oplus$ is replaced
by the d-additive closure $\odot$, except when considering the presets of the matched transitions where $R^\odot$
is actually $\overline{R}^\oplus$
(cf. Remark \ref{pre-rem}).
\begin{definition}\label{bdpb-bis-def}{\bf (Branching d-place bisimulation)}
Given a P/T net $N = (S, A, T)$, a {\em branching d-place bisimulation} is a relation
$R\subseteq (S\cup \{\theta\}) \times (S \cup \{\theta\})$ such that if $(m_1, m_2) \in R^\odot$
\begin{enumerate}
\item $\forall t_1$ such that $m_1[t_1\rangle m_1'$
\begin{itemize}
\item[$(i)$] either $t_1$ is $\tau$-sequential and
$\exists \sigma, m_2'$ such that $\sigma$ is $\tau$-sequential,
$m_2[\sigma\rangle m_2'$, and $(\pre{t_1}, \pre{\sigma}) \in \overline{R}$,
$(\pre{t_1}, \post{\sigma}) \in \overline{R}$, $(\post{t_1}, \post{\sigma}) \in \overline{R}$ and
$(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\odot$;
\item[$(ii)$] or there exist $\sigma, t_2, m, m_2'$ such that
$\sigma$ is $\tau$-sequential, $m_2[\sigma\rangle m [t_2\rangle m_2'$,
$l(t_1) = l(t_2)$, $\post{\sigma} = \pre{t_2}$,
$(\pre{t_1}, \pre{\sigma}) \in \overline{R}^\oplus$, $(\pre{t_1}, \pre{t_2}) \in \overline{R}^\oplus$
$(\post{t_1}, \post{t_2}) \in R^\odot$, and moreover,
$(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\odot$;
\end{itemize}
\item and, symmetrically, $\forall t_2$ such that $m_2[t_2\rangle m_2'$
\end{enumerate}
Two markings $m_1$ and $m_2$ are branching d-place bisimilar,
denoted by $m_1 \approx_{d} m_2$,
if there exists a branching d-place bisimulation $R$ such that $(m_1, m_2) \in R^\odot$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{definition}
It is easy to observe that, in the either case, by additivity of $R^\odot$ (also w.r.t. $\overline{R}^\oplus$), from
$(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\odot$ and
$(\pre{t_1}, \post{\sigma}) \in \overline{R}$, we get $(m_1, m_2') \in R^\odot$, as well as, from
$(\post{t_1}, \post{\sigma}) \in \overline{R}$ we get $(m_1', m_2') \in R^\odot$. In a similar manner, for the or case,
from $(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\odot$, $\post{\sigma} = \pre{t_2}$ and
$(\pre{t_1}, \pre{t_2}) \in \overline{R}^\oplus$, we get $(m_1, m) \in R^\odot$, as well as, from $(\post{t_1}, \post{t_2}) \in R^\odot$,
we get $(m_1', m_2') \in R^\odot$.
Note also that a $\tau$-sequential transition performed by one of the two markings may be matched by the other one also by idling:
this is due to the {\em either} case when $\sigma = i(s_2)$ for a suitable token $s_2$ such that $(\pre{t_1}, \pre{\sigma}) \in \overline{R}$,
$(\pre{t_1}, \post{\sigma}) \in \overline{R}$, $(\post{t_1}, \post{\sigma}) \in \overline{R}$ and
$(m_1 \ominus \pre{t_1}, m_2 \ominus \pre{\sigma}) \in R^\odot$, where $\pre{\sigma} = \post{\sigma} = s_2$.
\begin{figure}
\caption{Two branching d-place bisimilar nets}
\label{net-bd-place}
\end{figure}
\begin{example}\label{ex-bdp-bis}
Consider the nets in Figure \ref{net-bd-place}. It is easy to realize that $s_1 \approx_d s_4 \oplus s_5$ because
$R = \{(s_1, s_4), (\theta, s_5), (\theta, s_6), (s_2, s_7), (s_2, s_8), (s_3, \theta)\}$ is a branching d-place bisimulation
such that $(s_1, s_4 \oplus s_5) \in R^\odot$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{example}
Similarly to what done in Proposition \ref{prop-bpb-bis1} and Proposition \ref{prop-bpb-bis2}, we can also prove that the identity
relation is a branching d-place bisimulation, that the inverse of a branching d-place bisimulation is a branching d-place bisimulation and that
the relational composition of two branching d-place bisimulations is a branching d-place bisimulation.
As a consequence, $\approx_d$ is also an equivalence relation.
\noindent
By Definition \ref{bdpb-bis-def}, branching d-place bisimilarity can be defined as follows:
$\approx_d = \bigcup \{ R^\oplus \;\;\big|\;\; R \mbox{ is a branching d-place bisimulation}\}.$
\noindent
By monotonicity of the d-additive closure, if $R_1 \subseteq R_2$, then
$R_1^\odot \subseteq R_2^\odot$. Hence, we can restrict our attention to maximal branching d-place bisimulations only:
$\approx_d = \bigcup \{ R^\odot \;\;\big|\;\; R \mbox{ is a {\em maximal} branching d-place bisimulation}\}.$
\noindent
However, it is not true that
$\approx_d = (\bigcup \{ R \;\;\big|\;\; R \mbox{ is a {\em maximal} d-place bisimulation}\})^\odot$,
because the union of branching d-place bisimulations may be not a branching d-place bisimulation.
Hence, its definition is not coinductive, so that we cannot adapt the well-known algorithms for computing
the largest bisimulation equivalence \cite{PT87,KS83}.
Nonetheless, we can adapt the decidability proof
in Section \ref{decid-br-place-sec}, to prove that also $\approx_d$ is decidable for finite P/T nets.
The key point is that we can prove, similarly to what done in Lemma \ref{bpb-rel-dec-lem}, that $R$ is a
branching d-place bisimulation if and only if the following two finite conditions are satisfied:
\begin{enumerate}
\item $\forall t_1 \in T$, $\forall m$ such that $(\pre{t_1}, m) \in \overline{R}^\oplus$
\begin{itemize}
\item[$(a)$] either $t_1$ is $\tau$-sequential and there exists an acyclic $\tau$-sequential
$\sigma$ such that $m = \pre{\sigma}$,
$(\pre{t_1}, \post{\sigma}) \in \overline{R}$ and $(\post{t_1}, \post{\sigma}) \in \overline{R}$;
\item[$(b)$] or there exist an acyclic $\tau$-sequential $\sigma$ and $t_2 \in T$, with
$\post{\sigma} = \pre{t_2}$, such that $m = \pre{\sigma}$, $l(t_1) = l(t_2)$, $(\pre{t_1}, \pre{t_2}) \in \overline{R}^\oplus$
and $(\post{t_1}, \post{t_2}) \in R^\odot$.
\end{itemize}
\item $\forall t_2 \in T$, $\forall m$ such that $(m, \pre{t_2}) \in \overline{R}^\oplus$
\begin{itemize}
\item[$(a)$] either $t_2$ is $\tau$-sequential and there exists an acyclic $\tau$-sequential $ \sigma$
such that $m = \pre{\sigma}$,
$(\post{\sigma}, \pre{t_2}) \in \overline{R}$ and $(\post{\sigma}, \post{t_2}) \in \overline{R}$;
\item[$(b)$] or there exist an acyclic $\tau$-sequential $\sigma$ and $t_1 \in T$, with
$\post{\sigma} = \pre{t_1}$, such that $m = \pre{\sigma}$, $l(t_1) = l(t_2)$, $( \pre{t_1}, \pre{t_2}) \in \overline{R}^\oplus$
and $(\post{t_1}, \post{t_2}) \in R^\odot$,
\end{itemize}
\end{enumerate}
\noindent
that are decidable in exponential time.
Hence, by considering all the finitely many place relations for a finite P/T net, we can check whether each of them is a branching
d-place bisimulation and, in such a case, whether the considered markings are related by its d-additive closure.
Summing up, also $\approx_d$ is decidable for finite P/T nets in exponential time.
Of course, $\approx_d$ is coarser than $\approx_p$ because a branching place bisimulation is also a branching d-place bisimulation, but the reverse is not true; for instance, relation $R$ in Example \ref{ex-bdp-bis} is not a branching place bisimulation.
\subsection{Sensible Behavioral Equivalence}\label{bdpb>bfc-ssec}
In this section we argue that $\approx_d$ is a sensible (i.e., fully respecting causality and the branching structure) behavioral
equivalence, by proving that it is finer than branching fully-concurrent bisimilarity.
\begin{theorem}\label{bdpbis>bfc-bis}{\bf (Branching d-place bisimilarity
is finer than branching fully concurrent bisimilarity)}
Let $N = (S, A, T)$ be a P/T net with silent moves.
If $m_1 \approx_{d} m_2$, then $m_1 \approx_{bfc} m_2$.
\proof
If $m_1 \approx_{d} m_2$, then there exists a branching d-place bisimulation $R_1$ such that
$(m_1, m_2) \in R_1^\odot$.
Let us consider
\begin{equation*} \label{R2}
\begin{split}
R_2 \overset{def}{=} \lbrace (\pi_1, g, \pi_2) | & \pi_1 = (C_1, \rho_1) \text{ is a process of $N(m_{1})$,} \\
& \pi_2 = (C_2, \rho_2) \text{ is a process of $N(m_{2})$},\\
& \text{$g$ is an abstract event isomorphism between $C_1$ and $C_2$},\\
& \text{and property } \Phi(\pi_1,g,\pi_2) \text{ holds}
\rbrace ,
\end{split}
\end{equation*}
\noindent
where property $\Phi(\pi_1, g, \pi_2)$ states that there exists a multiset
$q = \{(r_1, r_1'),$ $ (r_2, r_2'),$ $ \ldots,$ $(r_k, r_k')\}$
\noindent
of associations such that if $Max(C_1) = b_1 \oplus \ldots \oplus b_{k_1}$ and $Max(C_2) = b_1'
\oplus \ldots \oplus b'_{k_2}$ (with $k_1, k_2 \leq k$)
then we have that
\begin{enumerate}
\item $\rho_1(Max(C_1)) = r_1 \oplus \ldots \oplus r_k$ and $\rho_2(Max(C_2)) = r_1' \oplus \ldots \oplus r_k'$
(remember that some of the $r_i$ or $r_i'$ can be $\theta$),
\item for $i = 1, \ldots, k$, $(r_i, r_i') \in R_1$, so that $(\rho_1(Max(C_1)), \rho_2(Max(C_2))) \in R_1^\odot$,
\item and for $i = 1, \ldots, k$, if $r_i = \rho_1(b_j)$ for some $b_j \in Max(C_1) \cap \post{e_1}$
then either $r_i' = \theta$,
or $r_i' = \rho_2(b'_{j'})$ for some $b'_{j'} \in Max(C_2) \cap \post{e_2}$ for some event $e_2$ such that
\begin{itemize}
\item if $e_1$ is observable, then either $g(e_1) = e_2$ or $g(e_1) \leq_{\pi_2} e_2$
and all the events in the path from $g(e_1)$ (excluded) to $e_2$ (included) are $\tau$-sequential;
\item if $e_1$ is not observable, then for each observable $e_1'$ we have that $e_1' \leq_{\pi_1} e_1$if and only if
$g(e_1') \leq_{\pi_2} e_2$.
\end{itemize}
And symmetrically, if $r_i' = \rho_2(b'_{j'})$ for some
$b'_{j'} \in Max(C_2) \cap \post{e_2}$ then
either $r_i = \theta$, or $r_i = \rho_1(b_j)$ for some $b_j \in Max(C_1) \cap \post{e_1}$ for some event $e_1$ such that
\begin{itemize}
\item if $e_2$ is observable, then either $g(e_1) = e_2$ or there exists $e_1' \leq_{\pi_1} e_1$ such that $g(e_1') = e_2$
and all the events in the path from $e_1'$ (excluded) to $e_1$ (included) are $\tau$-sequential;
\item if $e_2$ is not observable, then for each observable $e_2'$ we have that $e_2' \leq_{\pi_2} e_2$ if and only if
$g^{-1}(e_2') \leq_{\pi_1} e_1$.
\end{itemize}
\end{enumerate}
Note that such a multiset $q$ has the property that for each $(r_i, r_i') \in q$, we have that either one of the two elements in the pair is
$\theta$, or both places are the image of suitable conditions with no observable predecessor event, or
both places are the image of conditions generated by (or causally dependent on) events related by
the abstract event isomorphism $g$.
We want to prove that $R_2$ is a branching fully-concurrent bisimulation.
First of all, consider a triple of the form $(\pi_1^0, g^0, \pi_2^0)$,
where $\pi_i^0 = (C_1^0, \rho_i^0)$,
$C_i^0$ is the causal net without events and $\rho_1^0, \rho_2^0$ are
such that $\rho_i^0(Min(C_i^0)) = \rho_i^0(Max(C_i^0)) = m_i$ for $i= 1, 2$
and $g^0$ is the empty function.
Then $(\pi_1^0, g^0, \pi_2^0)$ must belong to $R_2$,
because $(C_i^0, \rho_i^0)$ is a process of $N(m_i)$, for $i=1, 2$ and $\Phi(\pi_1^0, g^0, \pi_2^0)$
trivially holds because,
by hypothesis, $(m_1, m_2) \in R_1^\odot$.
Hence, if $R_2$ is a branching fully-concurrent bisimulation, then the triple
$(\pi_1^0, g^0, \pi_2^0) \in R_2$ ensures that $m_1 \approx_{bfc} m_2$.
Let us check that $R_2$ is a branching fc-bisimulation.
Assume $(\pi_1, g, \pi_2) \in R_2$, where $\pi_i = (C_i, \rho_i)$ for $i = 1, 2$, so that
$\Phi(\pi_1, g, \pi_2)$ holds for some suitable multiset $q$ of associations.
In order to be a branching fc-bisimulation triple, it is necessary that
\begin{itemize}
\item[$i)$]
$\forall t_1, \pi_1'$ such that $\pi_1 \deriv{e_1} \pi_1'$ with $\rho_1'(e_1) = t_1$,
\begin{itemize}
\item {\em either} $l(e_1) = \tau$ and there exist $\sigma_2'$ (with $o(\sigma_2') = \epsilon$) and $\pi_2'$
such that $\pi_2 \Deriv{\sigma_2'} \pi_2'$, $(\pi_1, g, \pi_2') \in R$ and $(\pi_1', g, \pi_2') \in R$;
\item {\em or} $\exists \sigma'$ (with $o(\sigma') = \epsilon$), $e_2, \pi_2', \pi_2'', g'$
such that
\begin{enumerate}
\item $\pi_2 \Deriv{\sigma'} \pi_2' \deriv{e_2} \pi_2''$;
\item if $l(e_1) = \tau$, then $l(e_2) = \tau$ and $g' = g$;
otherwise, $l(e_1) = l(e_2)$ and
$g' = g \cup \{(e_1, e_2)\}$;
\item and finally, $(\pi_1, g, \pi_2') \in R$ and
$(\pi_1', g', \pi_2'') \in R$;
\end{enumerate}
\end{itemize}
\item[$ii)$] symmetrically, if $\pi_2$ moves first.
\end{itemize}
Assume $\pi_1 = (C_1, \rho_1) \deriv{e_1} (C_1', \rho_1') = \pi_1'$ with $\rho_1'(e_1) = t_1$.
Now, let $p = \{(\overline{r}_{1}, \overline{r}'_{1}),$ $\ldots,$ $ (\overline{r}_{h}, \overline{r}'_{h})\} \subseteq q$,
with $\overline{r}_{1}\oplus \ldots \oplus \overline{r}_{h}$ $= \pre{t_1}$.
Note that $(\pre{t_1}, \overline{r}'_{1} \oplus \ldots \oplus \overline{r}'_{h}) \in R_1^\odot$.
Now we remove from $\overline{r}_{1}\oplus \ldots \oplus \overline{r}_{h}$ those $\overline{r}_{h} = \theta$
to get $\overline{s}_{1}\oplus \ldots \oplus \overline{s}_{h'}$ $= \pre{t_1}$, with $h' \leq h$.
Similarly, we filter out from $\overline{r}'_{1} \oplus \ldots \oplus \overline{r}'_{h}$ only those related to places $\overline{s}_i$ in $\pre{t_1}$,
to get $\overline{m}_2 = \overline{s}'_{1} \oplus \ldots \oplus \overline{s}'_{h'}$, such that
$(\pre{t_1}, \overline{m}_{2}) \in \overline{R}_1^\oplus$.
By the characterization used in proving that a place relation is a branching d-place bisimulation,
since $R_1$ is a branching d-place bisimulation, from $(\pre{t_1},\overline{m}_2) \in \overline{R}_1^\oplus$ it follows that
\begin{itemize}
\item[$(a)$] either $t_1$ is $\tau$-sequential and there exists an acyclic $\tau$-sequential
$\sigma$ such that $\overline{m}_2 = \pre{\sigma}$,
$(\pre{t_1}, \post{\sigma}) \in \overline{R}_1$ and $(\post{t_1}, \post{\sigma}) \in \overline{R}_1$;
\item[$(b)$] or there exist an acyclic $\tau$-sequential $\sigma$ and $t_2 \in T$, with
$\post{\sigma} = \pre{t_2}$, such that $\overline{m}_2 = \pre{\sigma}$, $l(t_1) = l(t_2)$,
$(\pre{t_1}, \pre{t_2}) \in \overline{R}_1^\oplus$
and $(\post{t_1}, \post{t_2}) \in R_1^\odot$.
\end{itemize}
In the either case $(a)$, since $(\pre{t_1},\overline{m}_2) \in \overline{R}_1^\oplus$ and $\overline{m}_2 = \pre{\sigma}$,
we can really extend $\pi_2$ by performing a suitable $\sigma'$ (with $o(\sigma') = \epsilon$) to a suitable process $\pi_2'$
such that $\pi_2 \Deriv{\sigma'} \pi_2'$, $\rho_2'(\sigma') = \sigma$, $(\pi_1, g, \pi_2') \in R_2$ and $(\pi_1', g, \pi_2') \in R_2$,
where the last two conditions hold because property $\Phi(\pi_1, g, \pi_2')$ and $\Phi(\pi_1', g, \pi_2')$ trivially hold.
More precisely, $\Phi(\pi_1, g, \pi_2')$ holds because
from the multiset $q = \{(r_1, r_1'),$ $ (r_2, r_2'),$ $ \ldots,$ $(r_k, r_k')\}$ we remove the multiset
$p = \{(\overline{s}, \overline{s}')\} \subseteq q$ (such that
$ \pre{t_1} = \overline{s}$ and $\pre{\sigma} =
\overline{s}'$), and we
add the multiset $p' = \{(\overline{s}, \overline{s}'')\}$,
where $\post{\sigma} = \overline{s}''$, so that the resulting multiset
of associations satisfies the three conditions required by property $\Phi(\pi_1, g, \pi_2')$.
Similarly, $\Phi(\pi_1', g, \pi_2')$ holds because from the multiset $q = \{(r_1, r_1'),$ $ (r_2, r_2'),$ $ \ldots,$ $(r_k, r_k')\}$ we remove the multiset
$p = \{(\overline{s}, \overline{s}')\} \subseteq q$, and we
add the multiset $p'' = \{(\underline{s}, \overline{s}'')\}$,
where $\post{t_1} = \underline{s}$ and $\post{\sigma} = \overline{s}''$, so that the resulting multiset
of associations satisfies the three conditions required by property $\Phi(\pi_1', g, \pi_2')$.
In the or case $(b)$, we can really extend $\pi_2$ by performing a suitable $\sigma'$ (with $o(\sigma') = \epsilon$) to a suitable
process $\pi_2'$ such that $\pi_2 \Deriv{\sigma'} \pi_2'$, $\rho_2'(\sigma') = \sigma$ and $(\pi_1, g, \pi_2') \in R_2$; the last conditions
can be proved similarly as above; in particular, property $\Phi(\pi_1, g, \pi_2')$ holds
because
from the multiset $q = \{(r_1, r_1'),$ $ (r_2, r_2'),$ $ \ldots,$ $(r_k, r_k')\}$ we remove the multiset
$p = \{(\overline{s}_{1}, \overline{s}'_{1}),$ $\ldots,$ $ (\overline{s}_{h'}, \overline{s}'_{h'})\} \subseteq q$
(such that $ \pre{t_1} = \overline{s}_{1}\oplus \ldots \oplus \overline{s}_{h'}$ and $\pre{\sigma} = \overline{m}_2 =
\overline{s}'_{1} \oplus \ldots \oplus \overline{s}'_{h'}$) and we add
the multiset $p' = \{(\overline{s}_{1}, \overline{s}''_{1}), \ldots, (\overline{s}_{h'}, \overline{s}''_{h'})\}$,
where $\post{\sigma} = \overline{s}''_{1} \oplus \ldots \oplus \overline{s}''_{h'}$, so that the resulting multiset, say $q'$,
of associations satisfies the three conditions required by property $\Phi(\pi_1, g, \pi_2')$, indeed.
Moreover, because property $\Phi(\pi_1, g, \pi_2')$ holds for the resulting multiset $q'$
and $p' \subseteq q'$ is the multiset of associations ensuring that $(\pre{t_1}, \pre{t_2}) \in \overline{R}_1^\oplus$,
it is possible to single out an event $e_2$
such that $\pi_2' = (C_2', \rho_2') \deriv{e_2} (C_2'', \rho_2'') = \pi_2''$
(where $\rho_2''$ is such that $\rho_2''(e_2) = t_2$, with $l(t_1) = l(t_2)$) and such that
the set of observable events generating (or causing) the conditions of $\pre{e_1}$ (which are
mapped by $\rho_1$ to $\pre{t_1}$) are isomorphic, via $g$,
to the set of observable events generating (or causing) the conditions of $\pre{e_2}$ (which are
mapped by $\rho_2'$ to $\pre{t_2}$). Therefore,
the new generated events $e_1$ and $e_2$ have isomorphic observable predecessors via $g$.
So, by defining $g' = g \cup \{(e_1, e_2)\}$ (in case $l(t_1) \neq \tau$; otherwise $g' = g$),
we can conclude that $g'$ is an abstract event isomorphism between $C_1'$ and $C_2''$,
so that $(\pi_1', g', \pi_2'') \in R_2$. This last condition holds because property $\Phi(\pi_1', g', \pi_2'')$ holds.
In fact, from the multiset of associations $q'$ we remove the associations in $p'$ and
add any multiset $p''$ of associations
that can be used to prove that $(\post{t_1}, \post{t_2}) \in R_1^\odot$. The resulting multiset $q''$
satisfies property $\Phi(\pi_1', g', \pi_2'')$, as $q''$ can be used to prove that
$(\rho_1'(Max(C_1')), \rho_2''(Max(C_2''))) \in R_1^\odot$
and
for each $(r_i, r_i') \in q''$, we have that either one of the two elements in the pair is
$\theta$, or both places are the image of suitable conditions with no observable predecessor event, or
both places are the image of conditions generated by (or causally dependent on) events related by
the abstract event isomorphism $g'$.
The case when $\pi_2 = (C_2, \rho_2)$ moves first is symmetrical and so omitted.
Therefore, $R_2$ is a branching fully-concurrent bisimulation and, since $(\pi_1^0, g^0, \pi_2^0) \in R_2$, we have that
$m_1 \approx_{bfc} m_2$.
{\mbox{ }\nolinebreak
{$\Box$}}
\end{theorem}
However, the reverse implication of Theorem \ref{bdpbis>bfc-bis} does not hold in
general: it may happen that $m_1 \approx_{bfc} m_2$ but $m_1 \not \approx_{d} m_2$, as the following
example shows.
\begin{figure}
\caption{A P/T net with $s_1 \approx_{bfc}
\label{stutt1-fig}
\end{figure}
\begin{example}\label{counter-ex1}
Consider the net in Figure \ref{stutt1-fig}. It is not difficult to realize that $s_1 \approx_{bfc} s_3$.
Informally, if $s_1 \deriv{\tau} s_2 \oplus s_3$,
$s_3$ can reply with $s_3 \deriv{\tau} s_3 \oplus s_4$ and $s_2 \oplus s_3 \approx_{bfc} s_3 \oplus s_4$, as required.
Symmetrically, besides the move above, $s_3$ can also do $s_3 \deriv{a} \theta$, and $s_1$ can reply with $s_1 \Deriv{\tau} s_3 \deriv{a} \theta$ with $s_3 \approx_{bfc} s_3$ and $\theta \approx_{bfc} \theta$.
However, $s_1 \not \approx_{d} s_3$: if $s_3 \deriv{a} \theta$, then $s_1$ can only respond with $s_1 \deriv{\tau} s_2 \oplus s_3 \deriv{\tau} s_3 \deriv{a} \theta$, but the silent path $s_1 \deriv{\tau} s_2 \oplus s_3 \deriv{\tau} s_3$ is not composed of $\tau$-sequential transitions only (actually, none
of the two is $\tau$-sequential)
{\mbox{ }\nolinebreak
{$\Box$}}
\end{example}
\section{Conclusion and Future Research}\label{conc-sec}
Place bisimilarity \cite{ABS91} is the only decidable \cite{Gor21}
behavioral equivalence for P/T nets which respects the expected causal behavior,
as it is slightly finer than {\em structure preserving bisimilarity} \cite{G15},
in turn slightly finer than {\em fully-concurrent bisimilarity} \cite{BDKP91}.
Thus, it is the only equivalence for which it is possible (at least, in principle)
to verify algorithmically the (causality-preserving) correctness of an implementation by exhibiting a place bisimulation between its
specification and implementation.
It is sometimes argued that place bisimilarity is too discriminating. In particular, \cite{ABS91} and \cite{G15} argue
that a {\em sensible} equivalence should not distinguish markings whose behaviors are patently the same, such as
marked Petri nets that differ only in their unreachable parts. As an example, consider the net in Figure \ref{abs-net}, discussed in \cite{ABS91}.
Clearly, markings $s_1$ and $s_4$ are equivalent, also according to all the behavioral equivalences discussed in \cite{G15}, except
for place bisimilarity. As a matter of fact, a place bisimulation $R$ containing the pair $(s_1, s_4)$ would require also the pairs
$(s_2, s_5)$ and $(s_3, s_6)$, but then this place relation $R$ cannot be a place bisimulation because
$(s_2, \oplus s_3, s_5 \oplus s_6) \in R^\oplus$, but $s_2 \oplus s_3$ can perform $c$, while this is not possible for $s_5 \oplus s_6$.
Nonetheless, we would like to argue in favor of place bisimilarity, despite this apparent paradoxical example.
\begin{figure}
\caption{Two non-place bisimilar nets}
\label{abs-net}
\end{figure}
As a matter of fact, our interpretation of place bisimilarity is that this equivalence is
an attempt of giving semantics to {\em unmarked} nets, rather than to marked nets,
so that the focus shifts from the common (but usually undecidable) question {\em When are two markings equivalent?} to the more
restrictive (but decidable) question {\em When are two places equivalent?}
A possible (preliminary, but not accurate enough) answer to the latter question may be: two places are equivalent if,
whenever the same number of tokens are put on these two places,
the behavior of the marked nets is the same. If we reinterpret the example of Figure \ref{abs-net} in this perspective, we clearly see that
place $s_1$ and place $s_4$ cannot be considered as equivalent because, even if the markings $s_1$ and $s_4$
are equivalent, nonetheless the marking $2 \cdot s_1$ is not equivalent
to the marking $2 \cdot s_4$, as only the former can perform the trace $abc$.
A place bisimulation $R$ considers two places $s_1$ and $s_2$ as equivalent if $(s_1, s_2) \in R$, as, by definition of place bisimulation,
they must behave the same in any $R$-related context. Back to our example in Figure \ref{abs-net}, if $(s_1, s_4)$ would
belong to $R$, then also $(2 \cdot s_1, 2 \cdot s_4)$ should belong to
$R^\oplus$, but then we discover that the place bisimulation game does not hold for this pair of markings, so that $R$ cannot be a place bisimulation.
If we consider the duality between the process algebra FNM (a dialect of CCS, extended with multi-party interaction)
and P/T nets, proposed in \cite{Gor17}, we may find further arguments supporting this more restrictive interpretation of
net behavior. In fact, an {\em unmarked} P/T net $N$ can be described by an FNM system of equations, where each equation defines
a constant $C_i$ (whose body is a sequential process term $t_i$), representing place $s_i$.
Going back to the nets in Figure \ref{abs-net}, according to this duality, the constant $C_1$ for place $s_1$
is not equivalent (in any reasonable sense) to the constant $C_4$ for place $s_4$ because these two constants describe all the potential behaviors of these two places, which are clearly different!
Then, the marked net $N(m_0)$ is described by a parallel term composed of as many instances of $C_i$ as the tokens that are present in $s_i$
for $m_0$, encapsulated by a suitably defined restriction operator $\restr{L}-$. Continuing the example,
it turns out that $\restr{L}C_1$ is equivalent to $\restr{L}C_4$ because the markings $s_1$ and $s_4$ are equivalent,
but $\restr{L}(C_1 \mbox{$\,|\,$} C_1)$ is not equivalent to
$\restr{L}(C_4 \mbox{$\,|\,$} C_4)$ because the markings $2 \cdot s_1$ is not equivalent
to the marking $2 \cdot s_4$, as discussed above.
Moreover, there are at least the following three important technical differences between
place bisimilarity and other coarser, causality-respecting equivalences, such as fully-concurrent bisimilarity \cite{BDKP91}.
\begin{enumerate}
\item A fully-concurrent bisimulation is a complex relation --
composed of cumbersome triples of the form (process, bijection, process) --
that must contain infinitely many triples if the net system offers never-ending behavior. (Indeed, not even one single case study of a system with never-ending behavior
has been developed for this equivalence.)
On the contrary,
a place bisimulation is always a very simple finite relation over the finite set of places. (And a simple case study is described in \cite{Gor21}.)
\item A fully-concurrent bisimulation proving that $m_1$ and $m_2$ are equivalent
is a relation specifically designed for the initial markings $m_1$ and $m_2$. If we want to prove that,
e.g., $n \cdot m_1$ and $n \cdot m_2$ are fully-concurrent bisimilar (which
may not hold!), we have to construct a new fully-concurrent bisimulation to this aim.
Instead, a place bisimulation $R$
relates those places which are considered equivalent under all the possible
$R$-related contexts.
Hence, if $R$ justifies that $m_1 \sim_{p} m_2 $
as $(m_1, m_2) \in R^\oplus$, then
for sure the same $R$ justifies that $n \cdot m_1$ and $n \cdot m_2$ are
place bisimilar, as also
$(n \cdot m_1, n \cdot m_2) \in R^\oplus$.
\item Finally, while place bisimilarity is decidable \cite{Gor21}, fully-concurrent bisimilarity is undecidable on finite P/T nets \cite{Esp98}.
\end{enumerate}
The newly defined {\em branching place bisimilarity} is the only extension
of the place bisimilarity idea to P/T nets with silent moves
that has been proved decidable, even if the time complexity of its decision procedure we have proposed
is exponential in the size of the net.
Thus, it is the only equivalence for P/T nets with silent transitions for which it is possible (at least, in principle)
to verify algorithmically the correctness of an implementation by exhibiting a branching (d-)place bisimulation between its
specification and implementation, as we did for the small case study in Section \ref{case-sec}.
We have also proposed a slight weakening of branching place bisimilarity $\approx_p$, called {\em branching d-place bisimilarity} $\approx_d$, which may
relate places to the empty marking $\theta$
and which is still decidable. Actually, we conjecture that branching d-place bisimilarity is the coarsest,
sensible equivalence relation which is decidable on finite P/T nets with silent moves.
Of course, these behavioral relations may be subject to the same criticisms raised to place bisimilarity
and also its restrictive assumption that only $\tau$-sequential transitions can be abstracted away can be criticized,
as its applicability to real case studies may appear rather limited.
In the following, we try to
defend our point of view.
First, on the subclass of BPP nets, branching place bisimilarity coincides with
{\em branching team bisimilarity} \cite{Gor20c}, a very satisfactory equivalence which is actually coinductive and, for this reason,
also very efficiently decidable in polynomial time. Moreover, on the subclass of {\em finite-state machines}
(i.e., nets whose transitions have singleton pre-set and singleton, or empty, post-set), branching team bisimilarity
has been axiomatized \cite{Gor19b} on the process algebra CFM \cite{Gor17}, which can represent all (and only) the
finite-state machines, up to net isomorphism.
Second, branching (d-)place bisimilarity is a sensible behavioral equivalence relation, as it
does respect the causal behavior of P/T nets. In fact, we have proved that {\em branching fully-concurrent
bisimilarity} \cite{Pin93,Gor20c} (which is undecidable) is strictly coarser than $\approx_d$,
because it may equate nets whose silent transitions are not $\tau$-sequential (and also may relate markings of different size),
as illustrated in Example \ref{counter-ex1}.
As a further example,
consider the net in Figure \ref{net-bfc-place}. Of course, the markings $s_1 \oplus s_3$ and $s_5 \oplus s_6$
are branching fully-concurrent bisimilar: to the move $s_1 \oplus s_3 [t_1 \rangle s_2 \oplus s_3$, where
$t_1 = (s_1, \tau, s_2)$, $s_5 \oplus s_6$ can reply with $s_5 \oplus s_6[t_2\rangle s_7 \oplus s_8$,
where $t_2 = (s_5 \oplus s_6, \tau, s_7 \oplus s_8)$ and the reached markings are clearly equivalent.
However, $s_1 \oplus s_3 \not\approx_p s_5 \oplus s_6$ because $s_1 \oplus s_3$ cannot reply
to the move $s_5 \oplus s_6[t_2\rangle s_7 \oplus s_8$, as $t_2$ is not $\tau$-sequential (i.e., it can be seen as
the result of a synchronization), while $t_1$ is $\tau$-sequential.
\begin{figure}
\caption{Two branching fully-concurrent P/T nets}
\label{net-bfc-place}
\end{figure}
We already argued in the introduction that it is very much questionable whether a synchronization
can be considered as unobservable, even if this idea is rooted
in the theory of concurrency from the very beginning.
As a matter of fact, in CCS \cite{Mil89} and in the
$\pi$-calculus \cite{MPW,SW}, the result of
a synchronization is a silent, $\tau$-labeled (hence unobservable) transition.
However, the silent label $\tau$ is used in these
process algebras for two different purposes:
\begin{itemize}
\item
First, to ensure that a synchronization is strictly binary:
since the label $\tau$ cannot be used for synchronization, by labeling a synchronization transition by $\tau$
any further synchronization of the two partners with other parallel components is prevented (i.e.,
multi-party synchronization is disabled).
\item
Second, to describe that the visible effect of the transition is null: a $\tau$-labeled transition can be considered
unobservable and can be abstracted away, to some extent.
\end{itemize}
Nonetheless, it is possible to modify slightly these process algebras by introducing
two different actions for these different purposes.
In fact, the result of a binary synchronization can be some {\em observable} label, say $\lambda$
(or even $\lambda(a)$, if the name of the communication channel $a$ is
considered as visible), for which
no co-label exists, so that further synchronization is impossible.
While the action $\tau$, that can be used as a prefix for the prefixing operator, is
used to denote some local, internal (hence unobservable) computation.
In this way, a net semantics for these process algebras (in the style of, e.g., \cite{Gor17}) would generate
$\tau$-sequential P/T nets, that are amenable to be compared by means of branching place bisimilarity.
As a final comment, we want to discuss an apparently insurmountable limitation of our approach.
In fact, the extension of the
place bisimulation idea to nets with silent transitions that are not $\tau$-sequential seems very hard, or even impossible.
Consider again the two P/T nets
in Figure \ref{net-bfc-place}. If we want that $s_1 \oplus s_3$ be related to $s_5 \oplus s_6$,
we need to include the pairs
$(s_1, s_5)$ and $(s_3, s_6)$. If the marking $s_5 \oplus s_6$ silently reaches
$s_7 \oplus s_8$, then $s_1 \oplus s_3$ can respond by idling
(and in such a case we have to include the pairs $(s_1, s_7)$ and $(s_3, s_8)$) or by performing
the transition $s_1 \deriv{\tau} s_2$ (and in such a case we have to include the pairs $(s_2, s_7)$ and $(s_3, s_8)$).
In any case, the candidate place relation $R$ should be of the form
$\{(s_1, s_5), (s_3, s_6), (s_3, s_8), \ldots\}$.
However, this place relation cannot be a place bisimulation of any sort because, on the one hand,
$(s_1 \oplus s_3, s_5 \oplus s_8) \in R^\oplus$
but, on the other hand, $s_1 \oplus s_3$ can eventually perform $a$, while $s_5 \oplus s_8$ is stuck.
Nonetheless, this negative observation is coherent with our intuitive interpretation of (branching) place bisimilarity as a way to give semantics to
{\em unmarked} nets. In the light of the duality between
P/T nets and the FNM process algebra discussed above \cite{Gor17},
a place is interpreted as a sequential process type (and each token
in this place as an instance of a sequential process of that type);
hence, a (branching) place bisimulation
essentially states which kinds of sequential processes (composing the distributed system represented by the Petri net)
are to be considered equivalent. In our example above, it makes no sense to consider place $s_1$ and place $s_5$ as equivalent,
because the corresponding FNM constants $C_1$ and $C_5$ have completely different behavior:
$C_5$ can interact (with $C_6$), while $C_1$ can only perform
some internal, local transition.
Future work will be devoted to find more efficient algorithms for checking branching place bisimilarity.
One idea could be to build directly the set of maximal branching place bisimulations, rather than to scan all the
place relations to check whether
they are branching place bisimulations, as we did in the proof of Theorem \ref{bpl-bis-decid-th}.
\end{document} |
\begin{document}
\title{\begin{LARGE}\textbf{Quantum stabilizer codes from Abelian and non-Abelian groups association schemes
}\end{LARGE}}
\date{20 July 2014}
\author{\textbf{A. Naghipour$^{1,2}$}\footnote{{\it Electronic addresses:} naghipour@ucna.ac.ir and a\_naghipour@tabrizu.ac.ir} \textbf{
M. A. Jafarizadeh$^{3}$} \footnote{{\it Electronic addresses:} jafarizadeh@tabrizu.ac.ir and mjafarizadeh@yahoo.com}
\textbf{
S. Shahmorad$^{2}$} \footnote{{\it Electronic address:} shahmorad@tabrizu.ac.ir}\\
[5pt]
{\it $^{1}$Department of Computer Engineering, University College of
Nabi Akram,}\\ {\it No. 1283 Rah Ahan Street,
Tabriz, Iran}\\
[2mm]
{\it $^{2}$Department of Applied Mathematics,
Faculty of Mathematical Sciences, University of Tabriz,}\\
{\it 29 Bahman Boulevard, Tabriz, Iran }\\
[2mm]
{\it $^{3}$Department of
Theoretical Physics and Astrophysics,
Faculty of Physics, University of Tabriz,}\\
{\it 29 Bahman Boulevard, Tabriz, Iran }}
\date{27 September 2014}
\hphantom{$-$}aketitle
\leftskip=0pt \hrule\vskip 8pt
\begin{small}
\hspace{-.8cm}
{\bfseries Abstract}\\\\
A new method for the construction of the binary quantum stabilizer
codes is provided, where the construction is based on Abelian and
non-Abelian groups association schemes. The association schemes
based on non-Abelian groups are constructed by bases for the regular
representation from $U_{6n}$, $T_{4n}$, $V_{8n}$ and dihedral
$D_{2n}$ groups. By using Abelian group association schemes followed
by cyclic groups and non-Abelian group association schemes a list of
binary stabilizer codes up to $40$ qubits is given in tables $4$,
$5$, and $10$. Moreover, several binary stabilizer codes of
distances $5$ and $7$ with good quantum parameters is presented. The
preference of this method specially for Abelian group association
schemes is that one can construct any binary quantum stabilizer code
with any distance by using the commutative structure of association
schemes.
\\
\\
{\bf Keywords:} Stabilizer codes; Association schemes; Adjacency
matrices; Cyclic groups; Quantum Hamming bound; Optimal stabilizer
codes
\parindent 1em
\end{small}
\vskip 10pt\hrule
\hphantom{$-$}edskip
\section{\hspace*{-.5cm}Introduction}
The important class of quantum codes are stabilizer codes. The
stabilizer codes, first introduced by Gottesman [1]. These codes are
useful for building quantum fault tolerant circuits. Stabilizer code
encompasses large class of well-known quantum codes, including Shor
$9$-qubit code [6], CSS code [7], and toric code [3]. For stabilizer
codes, the error syndrome is identified by measuring the generators
of the stabilizer group. The several methods for constructing good
families of quantum codes by numerous authors over recent years have
been proposed. In [8]-[12] many binary quantum codes have been
constructed by using classical error-correcting codes, such as
Reed-Solomon codes, Reed-Muller codes, and algebraic-geometric
codes. The theory was later extended to the nonbinary case, which
authors in [13]-[15] have introduced nonbinary quantum codes for the
fault-tolerant quantum computation. Several new families of quantum
codes, such convolutional quantum codes, subsystem quantum codes
have been studied through algebraic and geometric tools and the
stabilizer method has been extended to these variations of quantum
code [16], [17].
\\
\hspace*{0.5cm} Wang et al. [21] studied the construction of
nonadditive AQCs as well as constructions of asymptotically good
AQCs derived from algebraic-geometry codes . Wang and Zhu [22]
presented the construction of optimal AQCs. Ezerman et al. [23]
presented so-called CSS-like constructions based on pairs of nested
subfield linear codes. They also employed nested codes (such as BCH
codes, circulant codes, etc.) over $\hphantom{$-$}athbb{F}_{4}$ to construct
AQCs in their earlier work [24]. The asymmetry was introduced into
topological quantum codes in [25]. Leslie [26] presented a new type
of sparse CSS quantum error-correcting code based on the homology of
hypermaps. Authors in [27] have studied the construction of AQCs
using a combination of BCH and finite geometry LDPC codes. Various
constructions of new AQCs have been studied in [28], [29]. Here in
this work the dominant underlying theme is that of constructing good
binary quantum stabilizer codes of distance $3$ and higher, e.g.,
codes with good quantum parameters based on Abelian and non-Abelian
groups association schemes. Using Abelian and non-Abelian groups
association schemes, we obtain many binary quantum stabilizer codes.
\\
\hspace*{0.5cm} An association scheme is a combinatorial object with
useful algebraic properties (see [30] for an accessible
introduction). This mathematical object has very useful algebraic
properties which enables one to employ them in algorithmic
applications such as the shifted quadratic character problem [31]. A
$d$-class symmetric association scheme ($d$ is called the diameter
of the scheme) has $d+1$ symmetric relations $R_i$ which satisfy
some particular conditions. Each non-diagonal relation $R_i$ can be
thought of as the network $(V,R_i)$, where we will refer to it as
the underlying graph of the association scheme ($V$ is the vertex
set of the association scheme which is considered as the vertex set
of the underlying graph). In [32], [33] algebraic properties of
association schemes have been employed in order to evaluate the
effective resistances in finite resistor networks, where the
relations of the corresponding schemes define the kinds of
resistances or conductances between any two nodes of networks. In
[34], a dynamical system with $d$ different couplings has been
investigated in which the relationships between the dynamical
elements (couplings) are given by the relations between the vertexes
according to the corresponding association schemes. Indeed,
according to the relations $R_i$, the so-called adjacency matrices
$A_i$ are defined which form a commutative algebra known as
Bose-Mesner (BM) algebra. Group association schemes are particular
schemes in which the vertices belong to a finite group and the
relations are defined based on the conjugacy classes of the
corresponding group. Working with these schemes is relatively easy,
since almost all of the needed information about the scheme. We will
employ the commutative structure of the association schemes in order
to the construction of binary quantum stabilizer codes, in terms of
the parameters of the corresponding association schemes such as the
valencies of the adjacency matrices $A_i$ for $i=1,...,d$. As it
will be said in Section 3, in order to construct the binary quantum
stabilizer codes, one needs a binary matrix $A=(A_1 \vert A_2)$,
such that by removing arbitrarily row or rows from $A$ one can
obtain $n-k$ independent generators. After finding the code distance
by $n-k$ independent generators one can then determine the
parameters of the associated code.
\\
\hspace*{0.5cm} The organization of the paper is as follows. In
section 2, we give preliminaries such as quantum stabilizer codes,
association schemes, group association schemes, finite Abelian
groups and finite non-Abelian groups. Section 3 is devoted to the construction of binary quantum
stabilizer codes based on Abelian group association schemes. In section 4,
we construct the binary quantum stabilizer codes based on non-Abelian group association schemes.
The paper ends with a brief conclusion.
\\
\section{\hspace*{-.5cm} Preliminaries }
In this section, we give some preliminaries such as quantum codes
and association schemes used through the paper.
\\
\subsection{\hspace*{-.5cm} Quantum stabilizer codes}
We recall quantum stabilizer codes. For material not covered in this
subsection, as well as more detailed information about quantum error
correcting codes, please refer to [20], [36]. We employ binary
quantum error correcting codes (QECCs) defined on the complex
Hilbert space $\hphantom{$-$}athcal{H}_{2}^{\otimes n}$ where $\hphantom{$-$}athcal{H}_{2}$
is the complex Hilbert space of a single qubit $\alpha \vert 0
\rangle + \beta \vert 1 \rangle$ with $\alpha , \beta \in
\hphantom{$-$}athbb{C}$ and $ \vert \alpha \vert^{2} + \vert \beta \vert^{2}=1$.
The fundamental element of stabilizer formalism is the Pauli group
$\hphantom{$-$}athcal{G}_n$ on $n$ qubits. The Pauli group for one qubit is
defined to consist of all Pauli matrices, together with
multiplicative factors $\pm 1$, $\pm i$:
\\
\begin{equation}\label{adm1}
\hphantom{$-$}athcal{G}_1 = \{\pm I, \pm iI, \pm X, \pm iX, \pm Y, \pm iY, \pm
Z, \pm iZ\}
\end{equation}
\\
where $X , Y$ and $Z$ are the usual Pauli matrices and I is the
identity matrix. The set of matrices $\hphantom{$-$}athcal{G}_1$ forms a group
under the operation of matrix multiplication. In general, group
$\hphantom{$-$}athcal{G}_n$ consist of all tensor products of Pauli matrices on
$n$ qubits again with multiplicative factors $\pm 1$, $\pm i$.
\\
\hspace*{0.5cm} Suppose $S$ is a subgroup of $\hphantom{$-$}athcal{G}_n$ and
define $V_S$ to be the set of $n$ qubit states which are fixed by
every element of $S$. The $V_S$ is the vector \textit{space
stabilized} by $S$, and $S$ is said to be the \textit{stabilizer} of
the space $V_S$.
\\
Consider the stabilizer $ S=\langle g_1 , ... , g_l \rangle$. The
check matrix corresponding to $S$ is an $l \times 2n$ matrix whose
rows correspond to the generators $g_1$ through $g_l$; the left hand
side of the matrix contains $1$s to indicate which generators
contain $X$s, and the right hand side contains $1$s to indicate
which generators $Z$s; the presence of a $1$ on both sides indicates
a $Y$ in the generator. The $i$-th row of the check matrix is
constructed as follows: If $g_i$ contains $I$ on the $j$-th qubit
then the matrix contain 0 in $j$-th and $n+j$-th columns. If $g_i$
contains an $X$ on the $j$-th qubit then the element in $j$-th
column is 1 and in $n+j$-th column is 0. If it contains $Z$ on
$j$-th qubit then $j$-th column contains 0 and $n+j$-th element
contains 1. And in the last, if $g_i$ contains operator $Y$ on
$j$-th qubit then both $j$-th and $n+j$-th columns contain 1.
\\
The check matrix does not contain any information about overall
multiplicative factor of $g_i$. We denote by $r(g)$ a row vector
representation of operator $g$ from check matrix, which contains
$2n$ binary elements. Define $\Lambda$ as:
\\
\begin{equation}
\Lambda = \left[
\begin{array}{cc}
0 & I \\
I & 0 \\
\end{array}
\right]_{2n\times 2n}
\hspace*{3cm}
\end{equation}
\\
where the matrices $I$ on the off-diagonals are $n \times n$.
Elements $g$ and $g'$ of the Pauli group are easily seen to commute
if and only if \hspace*{1mm}$r(g) \Lambda r(g')^{T}=0$. Therefore
the generators of stabilizer $S=\langle g_1, ... ,g_l\rangle $ with
corresponding check matrix $M$ commute if and only if $M \Lambda
M^{T}=0$. Let $S=\langle g_1, ... ,g_l\rangle $ be such that $-I$ is
not an element of $S$. Then the generators $g_i$, $i \in \{1, ...
,l\}$ are independent if and only if the rows of the corresponding
check matrix are linearly independent.\\
Suppose $C(S)$ is a stabilizer code with stabilizer $S$. We denote
by $N(S)$ a subset of $\hphantom{$-$}athcal{G}_n$, which is defined to consist
of all elements $E \in \hphantom{$-$}athcal{G}_n$ such that $EgE^{\dag}\in S$
for all $g \in S$. The following theorem specifies the correction
power of $C(S)$.
\\
\\
\textbf{Theorem 2.1.} Let $S$ be the stabilizer for a stabilizer
code $C(S)$. Suppose $\{E_j\}$ is a set of operators in
$\hphantom{$-$}athcal{G}_n$ such that $E_{j}^{\dag} E_k \notin N(S) - S$ for all
$j$ and $k$. Then $\{E_j\}$ is a correctable set of errors for the
code $C(S)$.
\\
\\
\textit{Proof}. See [36].
\\
\\
\hspace*{0.5cm} Theorem 2.1 motivates the definition of a notion of
\textit{distance} for a quantum code in analogy to the distance for
a classical code. The \textit{weight} of an error
$E\in\hphantom{$-$}athcal{G}_n$ is defined to be the number of terms in the
tensor product which are not equal to the identity. For example, the
weight of $X_1 Z_4 Y_8$ is three. The distance of stabilizer code
$C(S)$ is given by the minimum weight of an element of $N(S)-S$. In
terms of the binary vector pairs $\textbf{(a,b)}$, this is
equivalent to a minimum weight of the bitwise OR $\textbf{(a,b)}$ of
all pairs satisfying the symplectic orthogonality condition,
\\
\begin{equation}
A_1 \textbf{b} + A_2 \textbf{a}=0,
\hspace*{3cm}
\end{equation}
\\
which are not linear combinations of the rows of the binary check
matrix $ A= ( A_1 \vert A_2 )$.
\\
\hspace*{0.5cm} A $2$-ary quantum stabilizer code $\hphantom{$-$}athcal{Q}$,
denoted by $[[n,k,d]]_{2}$, is a $2^{k}$-dimensional subspace of the
Hilbert space $\hphantom{$-$}athcal{H}^{\otimes n}_{2}$ stabilized by an Abelian
stabilizer group $\hphantom{$-$}athcal{S}$, which does not contain the operator
$-I$ [6], and can correct all errors up to
$\lfloor\frac{d-1}{2}\rfloor$ . Explicitly
\\
\begin{equation}
\hphantom{$-$}athcal{Q}=\{\vert \psi \rangle: s \vert \psi \rangle = \vert \psi
\rangle, \hspace*{1mm}\forall s \in \hphantom{$-$}athcal{S}\}.
\hspace*{3cm}
\end{equation}
\\
This code, encodes $k$ logical qubits into $n$ physical qubits. The
rate of such code is $\frac{k}{n}$. Since codespace has dimension
$2^{k}$ so that we can encode $k$ qubits into it. The stabilizer
$\hphantom{$-$}athcal{S}$ has a minimal representation in terms of $n-k$
independent generators $\{ g_1, ... , g_{n-k}\hspace*{1mm} \vert
\hspace*{1mm}\forall i \in \{1, ... , n-k\}, \hspace*{1mm}g_i \in
\hphantom{$-$}athcal{S}\}$. The generators are independent in the sense that
none of them is a product of any other two (up to a global phase).
\\
\\
\hspace*{0.5cm} As in classical coding theory, there are two bounds
which have been established as necessary conditions for quantum
codes.
\\
\\
\textbf{Lemma 2.2} (quantum Hamming bound for binary case). For any
pure quantum stabilizer code $[[n,k,d]]_{2}$, we have the following
inequality
\begin{equation}
\sum_{j=0}^{[\frac{d-1}{2}]}\binom{n}{j}3^j2^k\leq2^n.
\end{equation}
\\
\textit{Proof}. See [5].
\\
\\
For any pure quantum stabilizer code with distance $3$, the quantum
Hamming bound is written by
\begin{equation}
n-k\geq \lceil \log_{2}(3n+1)\rceil.
\end{equation}
It is also satisfied for degenerate codes of distances $3$ and $5$
[1].
\\
\\
\textbf{Lemma 2.3} (quantum Knill-Laflamme). For any quantum
stabilizer code $[[n,k,d]]_{q}$, we have
\begin{equation}
n\geq k+2d-2.
\end{equation}
\\
\textit{Proof}. See [2].
\\
\\
The class of quantum stabilizer codes is optimal in the sense that
its $k$ with fixed $n$ and $d$ is the largest.
\subsection{\hspace*{-.5cm} Association schemes}
The theory of association schemes has its origin in the design of
statistical experiments [18] and in the study of groups acting on
finite sets [35]. Besides, associations schemes are used in coding
theory [19], design theory and graph theory. One of the important
preferences of association schemes is their useful algebraic
structures that enable one to find the spectrum of the adjacency
matrices relatively easy; then, for different physical purposes, one
can define particular spin Hamiltonians which can be written in
terms of the adjacency matrices of an association scheme so that the
corresponding spectra can be determined easily. The reader is
referred to [4] for further information on association schemes.
\\
\\
\textbf{Definition \hspace*{1mm}2.2.1.} A d-class association scheme
$\Omega$ on a finite set $V$ is an order set $\{R_0,R_1, ... ,R_d\}$
of relations on the set $V$ which satisfies the following axioms:
\\
\\
(1)\hspace*{1mm}$\{R_0,R_1, ... ,R_d\}$ is a partition of $V\times
V$.
\\
\\
(2) $R_0$ is the identity relation, i.e., $(x,y)\in R_0$ if and only
if $x=y$, whenever $x,y \in V$.
\\
\\
(3) Every relation $R_i$ is symmetric, i.e., if $(x,y) \in R_i$ then
also $(y,x) \in R_i$, for every $x,y \in V$.
\\
\\
(4) Let $0\leq i,j,l \leq d$. Let $x,y \in V$ such that $(x,y) \in
R_l$, then the number
\\
$$ p_{ij}^{l}= \vert \{z \in V : (x,z) \in R_i \hspace*{1mm}\textrm{and}\hspace*{1mm} (z,y) \in R_j
\}\vert$$
\\
only depends on $i,j$ and $l$.
\\
\\
The relations $R_0,R_1, ... ,R_d$ are called the associate classes
of the scheme; two elements $x,y \in V$ are $i$-th associates if
$(x,y) \in R_i$. The numbers $p^{l}_{ij}$ are called the
\textit{intersection numbers} of $\Omega$. If
\\
\\
$(3)^{'} \hspace*{3mm} R_i^{t}=R_i \hspace*{3mm}\textrm{for}
\hspace*{3mm}0\leq i\leq d,\hspace*{3mm} \textrm{where}\hspace*{3mm}
R_{i}^{t}=\{(\beta,\alpha): (\alpha,\beta) \in R_i\}$
\\
\\
then the corresponding association scheme is called symmetric.
Further, if $p^{l}_{ij}=p^{l}_{ji}$ for all $ 0\leq i,j,l \leq d$,
then $\Omega =(V,\{R_i\}_{0\leq i \leq d})$ is called commutative.
Let $\Omega$ be a commutative symmetric association scheme of class
$d$; then the matrices $A_0,A_1,...,A_d$ defined by
\\
\begin{equation}
(A_{i})_{\alpha , \beta}= \left\{
\begin{array}{ll}
1 & \hbox{if}\hspace*{2mm}(\alpha , \beta) \in R_i, \\
0 & \hbox{otherwise}
\end{array}
\right.
\hspace*{4cm}
\end{equation}
\\
are adjacency matrices of $\Omega$ and are such that
\\
\begin{equation}
A_i A_j = \sum_{l=0}^{d}p_{ij}^{l}A_l. \hspace*{4cm}
\end{equation}
\\
From (2.9), it is seen that the adjacency matrices $A_0,A_1, ...
,A_d$ form a basis for a commutative algebra $\hphantom{$-$}athbf{A}$ known as
the Bose-Mesner algebra of $\Omega$. This algebra has a second basis
$E_0, ... E_d$ primitive idempotents,
\\
\begin{equation}
E_0 =\frac{1}{n}J,\hspace*{3mm}E_i E_j=\delta_{ij}
E_i,\hspace*{3mm}\sum_{i=0}^{d}E_i = I,
\hspace*{3cm}
\end{equation}
\\
where $\nu =\vert V\vert$ and $J$ is an $\nu \times \nu$ all-one
matrix in $A$. In terms of the adjacency matrices $A_0,A_1, ...
,A_d$ the four defining axioms of a $d$-class association scheme
translate to the following four statements [39]:
\\
\begin{equation}
\sum_{l=0}^{d}A_l=J,\hspace*{3mm}A_0=I,\hspace*{3mm}A_i =A_i^{T}
\hspace*{3mm}\textrm{and} \hspace*{3mm}A_i A_j
=\sum_{l=0}^{d}p_{ij}^{l}A_l.
\hspace*{3cm}
\end{equation}
\\
\\
with $0\leq i,j \leq d$ and where $I$ denotes the $\nu \times \nu$
identity matrix and $A^{T}$ is the transpose of $A$. Consider the
cycle graph with $\nu$ vertices by $C_\nu$. It can be easily seen
that, for even number of vertices $\nu=2m$, the adjacency matrices
are given by
\\
\begin{equation}
A_i = S^{i}+ S^{-i},\hspace*{3mm} i=1,2, ... ,m-1,
\hspace*{3mm}A_m=S^{m},
\hspace*{3cm}
\end{equation}
\\
where $S$ is an $\nu \times \nu$ circulant matrix with period $\nu
(S^{\nu}= I_\nu)$ defined as follows:
\\
\begin{equation}
S=\left(
\begin{array}{ccccccc}
0 & 0 & 0 & \ldots & 0 & 0 & 1 \\
1 & 0 & 0 & \ldots & 0 & 0 & 0 \\
0 & 1 & 0 & \ldots & 0 & 0 & 0 \\
\vdots\\
0 & 0 & 0 & \ldots & 1 & 0 & 0 \\
0 & 0 & 0 & \ldots & 0 & 1 & 0 \\
\end{array}
\right).
\hspace*{3cm}
\end{equation}
\\
For odd number of vertices $\nu =2m+1$, we have
\\
\begin{equation}
A_i = S^{i}+ S^{-i},\hspace*{3mm} i=1,2, ... ,m-1,m. \hspace*{3cm}
\end{equation}
\\
One can easily check that the adjacency matrices in (2.12) together
with $A_0=I_{2m}$ (and also the adjacency matrices in (2.14)
together with $A_0=I_{2m+1}$) form a commutative algebra.
\\
\subsection{\hspace*{-.3cm}Group association schemes}
In order to construct quantum stabilizer codes, we need to study the
group association schemes. Group association schemes are particular
association schemes for which the vertex set contains elements of a
finite group $G$ and the relations $R_i$ are defined by
\begin{equation}
R_i=\{(\alpha,\beta):\alpha \beta^{-1} \in C_i\},
\hspace*{3cm}
\end{equation}
\\
where $C_{0}=\{e\},C_{1},\ldots,C_{d} $ are the set of conjugacy
classes of $G$. Then, $\Omega = (G,\{R_i\}_{0\leq i \leq d})$
becomes a commutative association scheme and it is called the group
association scheme of the finite group $G$. It is easy to show that
the $\textit{i}$\hspace*{0.3mm}th adjacency matrix is a summation
over elements of the $\textit{i}$\hspace*{0.3mm}th stratum group. In
fact by the action of $\bar{C}_{i}:=\Sigma_{g\in C_i}{g}$
($\bar{C}_i$ is called the $\textit{i}$\hspace*{0.3mm}th
$\textit{class sum}$) on group elements in the regular
representation we observe that $\forall \alpha , \beta ,
(\bar{C}_i)_{\alpha\beta}= (A_i)_{\alpha\beta}$, so
\\
\begin{equation}
A_i =\bar{C}_i = \sum_{g \in C_i}g,
\hspace*{3cm}
\end{equation}
\\
Thus due to (2.9),
\begin{equation}
\bar{C}_i \bar{C}_j= \sum^{d}_{l=0}p^{l}_{ij} \bar{C}_l,
\hspace*{3cm}
\end{equation}
\\
However the intersection numbers $p^{l}_{ij}, i,j,l =0,1,...,d$ are
given by [38]
\\
\begin{equation}
p^{l}_{ij}= \frac{\vert C_i \vert \vert C_j \vert}{\vert G \vert}
\sum^{d}_{m=0} \frac{\chi_m (g_i)\chi_m
(g_j)\overline{\chi_m(g_l)}}{\chi_m (1)},
\hspace*{3cm}
\end{equation}
where $n:= \vert G \vert $ is the total number of vertices.
\\
\subsection{\hspace*{-.3cm}Finite Abelian groups}
The classification of finite groups is extremely difficult, but the
classification of finite Abelian is not so difficult. It turns out
that a fine Abelian group is isomorphic to a product of cyclic
groups, and there's a certain uniqueness to this representation.
\\
\subsubsection{\hspace*{-.3cm}Cyclic groups and subgroups}
Let $G$ be a group and $a\in G$. The subset
\begin{equation}
\langle a \rangle = \{a^{n}\vert n \in \hphantom{$-$}athbb{Z}\}
\end{equation}
\\
is a subgroup of $G$. It is called a $\textit{cyclic subgroup}$ of
$G$, or the subgroup $\textit{generated}$ by $a$. If $G=\langle a
\rangle$ for some $a\in G$ then we call $G$ a cyclic group.
\\
\\
The \textit{order} of an element $a$ in a group is the least
positive integer $n$ such that $a^{n}=1$. It's denoted ord $a$.
\\
\\
We will often denote the abstract cyclic group of order $n$ by $C_n
= \{1,a,a^{2}, ... , a^{n-1}\}$ when the operation is written
multiplicatively. It is isomorphic to the underlying additive group
of the ring $\hphantom{$-$}athbb{Z}_n$ where an isomorphism is
$f:\hphantom{$-$}athbb{\hphantom{$-$}athbb{Z}}_n \rightarrow C_n$ is defined by
$f(k)=a^{k}$.
\\
\\
$\hspace*{3mm}$Note that cyclic group are all Abelian, since $a^{n}
a^{m}=a^{m+n}= a^{m}a^{n}$. The integers $\hphantom{$-$}athbb{Z}$ under addition
is an infinite cyclic group, while $\hphantom{$-$}athbb{Z}_n$, the integers
modulo $n$, is a finite cyclic group of order $n$. Every cyclic
group is isomorphic either to $\hphantom{$-$}athbb{Z}$ or to $\hphantom{$-$}athbb{Z}_n$ for
some $n$.
\\
\subsubsection{\hspace*{-.3cm}Product of groups}
Using multiplicative notation, if $G$ and $H$ are two groups then $G
\times H$ is a group where the product operation $(a,b)(c,d)$ is
defined by $(ac,bd)$, for all $a,c\in G$ and all $b,d\in H$.
\\
\\
The product of two Abelian groups is also called their
\textit{direct sum}, denoted $G \oplus H$. Since every cyclic group
of order $n$ is given by the modular integers $\hphantom{$-$}athbb{Z}_n$ under
addition mod $n$. Hence, to illustrate, an Abelian group of order
1200 may actually be isomorphic to, say, the group
$\hphantom{$-$}athbb{Z}_{40}\times \hphantom{$-$}athbb{Z}_{6}\times \hphantom{$-$}athbb{Z}_{5}$.
Furthermore, the Chinese remainder theorem, as we'll see, which says
that if $m$ and $n$ are relatively prime, then $\hphantom{$-$}athbb{Z}_{_{mn}}
\cong \hphantom{$-$}athbb{Z}_{_{m}} \times \hphantom{$-$}athbb{Z}_{_{m}}$. In the preceding
example, we may then replace $\hphantom{$-$}athbb{Z}_{_{40}}$ by
$\hphantom{$-$}athbb{Z}_{_{2^{3}}}\times \hphantom{$-$}athbb{Z}_{5}$, and $\hphantom{$-$}athbb{Z}_{6}$
by $\hphantom{$-$}athbb{Z}_{2} \times \hphantom{$-$}athbb{Z}_{3}$. Therefore, we will state
the fundamental theorem like this: every finite Abelian group is the
product of cyclic groups of prime power orders. The collection of
these cyclic groups will be determined uniquely by the group $G$.
\\
\\
\textbf{Theorem 2.4} (Chinese remainder theorem for groups). Suppose
that $n=km$ where $k$ and $m$ are relatively prime. Then the cyclic
group $C_n$ is isomorphic to $C_k \times C_m$. More generally, if
$n$ is the product $k_1 \cdots k_r$ where the factors are pairwise
relatively prime, then
\begin{equation}
C_n \cong C_{k_{1}} \times ... \times C_{k_{r}}=
\prod^{r}_{i=1}C_{k_{_{i}}}.
\hspace*{3cm}
\end{equation}
\\
In particular, if $n= p^{e_{1}}_{1} ... p^{e_{r}}_{r}$, then the
cyclic group $C_n$ factors as the product of the cyclic groups
$C_{p_{i}^{e_{i}}}$, that is
\\
\begin{equation}
C_n \cong \prod^{r}_{i=1} C_{p_{i}^{e_{i}}}.
\hspace*{3cm}
\end{equation}
\\
\\
\textbf{Theorem 2.5} (Fundamental theorem of finite Abelian groups).
Every finite Abelian group is isomorphic to the direct product of a
unique collection of cyclic groups, each having a prime power order.
\\
\\
\textit{Proof}. See [37].
\\
\\
\hspace*{3mm}For the determination of the number of distinct Abelian
group of order $n$ we need to study the partition function. In
number theory, a partition of a positive integer $n$ is a way of
writing $n$ as a sum of position integers. The number of different
partitions of $n$ is given by the partition function $p(n)$ [40].
\\
\\
For instance $p(5)=7$, having seen the seven ways we can partition
5, i.e.,
\\
\\
\begin{equation}
\begin{array}{c}
5=5 \\
\hspace*{7mm}5=4+1 \\
\hspace*{7mm}5=3+2 \\
\hspace*{14mm}5=3+1+1 \\
\hspace*{14mm}5=2+2+1 \\
\hspace*{21mm}5=2+1+1+1\\
\hspace*{28mm}5=1+1+1+1+1
\end{array}
\hspace*{3cm}
\end{equation}
\\
\\
So, there are seven Abelian group of order 32, i.e.,
\\
\\
\begin{equation}
\begin{array}{c}
G_1=\hphantom{$-$}athbb{Z}_{2^{5}}\\
\hspace*{9mm} G_2=\hphantom{$-$}athbb{Z}_{2^{4}}\times \hphantom{$-$}athbb{Z}_{2}\\
\hspace*{10.5mm} G_3=\hphantom{$-$}athbb{Z}_{2^{3}}\times \hphantom{$-$}athbb{Z}_{2^{2}} \\
\hspace*{17.7mm}G_4=\hphantom{$-$}athbb{Z}_{2^{3}}\times \hphantom{$-$}athbb{Z}_{2}\times \hphantom{$-$}athbb{Z}_{2} \\
\hspace*{19.2mm}G_5=\hphantom{$-$}athbb{Z}_{2^{2}}\times \hphantom{$-$}athbb{Z}_{2^{2}}\times \hphantom{$-$}athbb{Z}_{2} \\
\hspace*{26.7mm}G_6=\hphantom{$-$}athbb{Z}_{2^{2}}\times \hphantom{$-$}athbb{Z}_{2}\times \hphantom{$-$}athbb{Z}_{2}\times \hphantom{$-$}athbb{Z}_{2} \\
\hspace*{34mm}G_7=\hphantom{$-$}athbb{Z}_{2}\times \hphantom{$-$}athbb{Z}_{2}\times \hphantom{$-$}athbb{Z}_{2}\times
\hphantom{$-$}athbb{Z}_{2}\times \hphantom{$-$}athbb{Z}_{2}
\end{array}
\hspace*{2.2cm}
\end{equation}
\\
\\
The above function enables us to better express the number of
distinct Abelian group of a given order, as follows.
\\
\\
\textbf{Theorem 2.6.} Let $n$ denote a positive integer which
factors into distinct prime powers, written $n=\prod p_{k}^{e_{k}}$.
Then there are exactly $\prod p(e_k)$ distinct Abelian group of
order $n$.
\\
\\
\hspace*{3mm}In particular, when $n$ is square-free, i.e., all
$e_k=1$ then there is a unique Abelian group of order $n$ given by
$\hphantom{$-$}athbb{Z}_{p_{1}} \times \hphantom{$-$}athbb{Z}_{p_{2}}\times ... \times
\hphantom{$-$}athbb{Z}_{p_{k}}$, which is just the cyclic group
$\hphantom{$-$}athbb{Z}_{n}$, if we may borrow Chinese reminder theorem again.
\\
\\
\subsection{\hspace*{-.3cm}Finite non-Abelian groups}
A non-Abelian group, also sometimes called a non-commutative group,
is a group $(G,*)$ in which there are at least two elements $a$ and
$b$ of $G$ such that $a*b\neq b*a$. Non-Abelian groups are pervasive
in mathematics and physics. Both discrete groups and continuous
groups may be non-Abelian. Most of the interesting Lie groups are
non-Abelian, and these play an important role in gauge theory.
\subsubsection{\hspace*{-.3cm}Non-Abelian group $U_{6n}$}
The group $U_{6n}$, where $n\geq 1$, is generated by two generators
$a$ and $b$ with the following relations:
\begin{equation}
U_{6n}=\{a,b: a^{2n}=b^3=1, a^{-1}ba=b^{-1}\}.
\hspace*{3cm}
\end{equation}
The group $U_{6n}$ has $3n$ conjugacy class. The $3n$ conjugacy
classes are given by, for $0\leq r\leq n-1$,
\begin{equation}
\{a^{2r}\}, \{ba^{2r}, b^2a^{2r}\}, \{a^{2r+1}, ba^{2r+1},
b^2a^{2r+1}\}.
\hspace*{3cm}
\end{equation}
The number of group elements of $U_{6n}$ is $6n$ and the matrix
representations of $[a]$ and $[b]$ with respect to the basis
$\hphantom{$-$}athcal{B}=\{a^j, ba^j, b^2a^j\}$, for $0\leq j\leq 2n-1$, are
given by
\begin{equation}
[a]=\left(
\begin{array}{ccc}
S & 0 & 0 \\
0 & 0 & S \\
0 & S & 0 \\
\end{array}
\right), [b]=\left(
\begin{array}{ccc}
0 & I & 0 \\
0 & 0 & I \\
I & 0 & 0 \\
\end{array}
\right)
\end{equation}
where $I$ is an $2n \times 2n$ identity matrix and $S$ is an $2n
\times 2n$ circulant matrix with period $2n (S^{2n}= I_{2n})$. The
adjacency matrices $A_0$,$A_1$,...,$A_{3n-1}$ of this group are
given by
\begin{align}
A_r &=[a]^{2r}, \qquad r=0,1,...,n-1 \nonumber \\
A_{n+r} &=[b][a]^{2r}+[b]^2[a]^{2r}, \qquad r=0,1,...,n-1 \\
A_{2n+r} &=[a]^{2r+1}+[b][a]^{2r+1}+[b]^2[a]^{2r+1}, \qquad
r=0,1,...,n-1 \nonumber.
\hspace*{3cm}
\end{align}
One can easily that the adjacency matrices in (2.27) form a
commutative algebra [4].
\subsubsection{\hspace*{-.3cm}Non-Abelian group $T_{4n}$}
The group $T_{4n}$, where $n\geq 1$, with two generators $a$ and
$b$, obeys the following relations:
\begin{equation}
T_{4n}=\{a,b: a^{2n}=1, a^{n}=b^2, b^{-1}ab=a^{-1}\}.
\hspace*{3cm}
\end{equation}
The group $T_{4n}$ has $n+3$ conjugacy class. The $n+3$ conjugacy
classes are given by
\begin{equation}
\{1\}, \{a^{n}\}, \{a^r, a^{-r}\}(1\leq r\leq n-1), \{ba^{2j}: 0\leq
j\leq n-1\}, \{ba^{2j+1}: 0\leq j\leq n-1\}.
\hspace*{3cm}
\end{equation}
The number of group elements of $T_{4n}$ is $4n$ and the matrix
representations of $[a]$ and $[b]$ with respect to the basis
$\hphantom{$-$}athcal{B}=\{a^j, ba^j, b^2a^j, b^3a^j\}$, for $0\leq j\leq n-1$,
are given by
\begin{equation}
[a]=\left(
\begin{array}{cc}
S & 0 \\
0 & S^{-1} \\
\end{array}
\right), [b]=\left(
\begin{array}{cccc}
0 & 0 & I & 0 \\
0 & 0 & 0 & I \\
0 & I & 0 & 0 \\
I & 0 & 0 & 0 \\
\end{array}
\right)
\end{equation}
where $I$ is an $n \times n$ identity matrix and $S$ is an $2n
\times 2n$ circulant matrix with period $2n (S^{2n}= I_{2n})$. The
adjacency matrices $A_0$,$A_1$,...,$A_{n+2}$ of this group are given
by
\begin{align}
A_{0} &=I_{4n}, \qquad n=2,3,... \nonumber \\
A_{1} &=[a]^n, \qquad n=2,3,... \nonumber \\
A_{j+1} &=[a]^{j}+[b]^2[a]^{n-j}, \qquad j=1,...,n-1 \nonumber \\
A_{n+1} &=\sum_{j=0}^{\lceil
\frac{n}{2}\rceil-1}([b][a]^{2j}+[b]^3[a]^{2j}), \qquad 2j<n \\
A_{n+2} &=\sum_{j=0}^{\lceil
\frac{n-1}{2}\rceil-1}([b][a]^{2j+1}+[b]^3[a]^{2j+1}), \qquad 2j+1<n.
\nonumber
\hspace*{3cm}
\end{align}
One can easily prove that the adjacency matrices in (2.31) form a
commutative algebra [4].
\subsubsection{\hspace*{-.3cm}Non-Abelian group $V_{8n}$}
The group $V_{8n}$, where $n$ is an odd integer number [38], is
generated by two generators $a$ and $b$ with the following
relations:
\begin{equation}
V_{8n}=\{a,b: a^{2n}=b^4=1, ba=a^{-1}b^{-1}, b^{-1}a=a^{-1}b\}.
\hspace*{3cm}
\end{equation}
The group $V_{8n}$ has $2n+3$ conjugacy class. The $2n+3$ conjugacy
classes are given by
\begin{align}
\{1\}, \{b^2\}, \{a^{2r+1}, b^2a^{-2r-1}\}(0\leq r\leq
n-1),\nonumber \\
\{a^{2s}, a^{-2s}\}, \{b^2a^{2s}, b^2a^{-2s}\}(1\leq s\leq
\frac{n-1}{2}), \\
\{b^ka^j: j\; \text{even} ,\; k=1,3\}, \; \text{and} \; \{b^ka^j:
j\; \text{odd}, \;k=1,3\}\nonumber.
\end{align}
The number of group elements of $V_{8n}$ is $8n$ and the matrix
representations of $[a]$ and $[b]$ with respect to the basis
$\hphantom{$-$}athcal{B}=\{a^j, ba^j, b^2a^j, b^3a^j\}$, for $0\leq j\leq 2n-1$,
are given by
\begin{equation}
[a]=\left(
\begin{array}{cccc}
S & 0 & 0 & 0 \\
0 & 0 & 0 & S^{-1} \\
0 & 0 & S & 0 \\
0 & S^{-1} & 0 & 0 \\
\end{array}
\right), [b]=\left(
\begin{array}{cccc}
0 & I & 0 & 0 \\
0 & 0 & I & 0 \\
0 & 0 & 0 & I \\
I & 0 & 0 & 0 \\
\end{array}
\right)
\end{equation}
where $I$ is an $2n \times 2n$ identity matrix and $S$ is an $2n
\times 2n$ circulant matrix with period $2n (S^{2n}= I_{2n})$. The
adjacency matrices $A_0$,$A_1$,...,$A_{2n+3}$ of this group are
given by
\begin{align}
A_0 &=I_{8n}, \nonumber \\
A_{1} &=[b]^2, \nonumber \\
A_{2+j} &=[a]^{2j+1}+[b]^2[a]^{2n-2j-1}, \qquad j=0,1,...,n-1 \nonumber\\
A_{n+1+j} &=[a]^{2j}+[a]^{2n-2j}, \qquad j=1,2,...,\frac{n-1}{2}
\nonumber\\
A_{n+1+\frac{n-1}{2}+j} &=[b]^2[a]^{2j}+[b]^2[a]^{2n-2j}, \qquad
j=1,2,...,\frac{n-1}{2}\\
A_{2n+1} &=\sum_{j=0}^{n-1}([b][a]^{2j}+[b]^3[a]^{2j}), \nonumber\\
A_{2n+2} &=\sum_{j=0}^{n-1}([b][a]^{2j+1}+[b]^3[a]^{2j+1}).
\nonumber
\hspace*{3cm}
\end{align}
One can easily prove that the adjacency matrices in (2.35) form a
commutative algebra [4].
\subsubsection{\hspace*{-.3cm}The dihedral group $D_{2n}$}
The dihedral group $G=D_{2n}$ is generated by two generators $a$ and
$b$ with the following relations:
\begin{equation}
D_{2n}=\{a,b: a^{n}=b^2=1, b^{-1}ab=a^{-1}\}
\hspace*{3cm}
\end{equation}
We consider the case of $n=2m$; the case of odd $n$ can be
considered similarly. The dihedral group $G=D_{2n}$ with $n=2m$ has
$m+3$ conjugacy classes, are given by
\begin{align}
\{1\}, \{a^{r}, a^{-r}\}(1\leq r\leq
m-1),\nonumber \\
\{a^{m}\}, \{a^{2j}b\}(0\leq j\leq m-1), \\
\{a^{2j+1}b\}(0\leq j\leq m-1).\nonumber
\end{align}
The adjacency matrices $A_0$,$A_1$,...,$A_{m+2}$ of this group with
$n=2m$ are given by
\begin{align}
A_{0} &=I_{2n}, \nonumber \\
A_{j} &=I_{2}\otimes (S^{j}+S^{-j}), \qquad j=1,2,...,m-1 \nonumber \\
A_{m} &=I_{2}\otimes S^{m}, \nonumber \\
A_{m+1} &=\sigma_{x}\otimes(\sum_{j=0}^{m-1}S^{2j}), \\
A_{m+2} &=\sigma_{x}\otimes(\sum_{j=0}^{m-1}S^{2j+1}). \nonumber
\hspace*{3cm}
\end{align}
where $S$ is an $n \times n$ circulant matrix with period $n (S^{n}=
I_{n})$ and $\sigma_{x}$ is the Pauli matrix. Also, the adjacency
matrices of this group with $n=2m+1$ are given by
\begin{align}
A_{0} &=I_{2n}, \nonumber \\
A_{j} &=I_{2}\otimes (S^{j}+(S^{-1})^j), \qquad j=1,2,...,m \\
A_{m+1} &=\sigma_{x}\otimes J_n. \nonumber
\hspace*{3cm}
\end{align}
where $S$ is an $n \times n$ circulant matrix with period $n (S^{n}=
I_{n})$ and $J_{n}$ is the $n \times n$ all-one matrix. One can
easily prove that the adjacency matrices in (2.38) and (2.39) form a
commutative algebra [4].
\section{\hspace*{-.3cm}Construction of stabilizer codes from Abelian group association schemes}
To construct a quantum stabilizer code of length $n$ based on the
Abelian group association schemes we need a binary matrix $A=(A_1
\vert A_2)$ which has $2n$ columns and two sets of rows, making up
two $n \times n$ binary matrices $A_1$ and $A_2$, such that by
removing arbitrarily row or rows from $A$ we can achieve $n-k$
independent generators. After finding the code distance by $n-k$
independent generators we can then determine the parameters of the
associated code. The parameters $[[ n,k,d ]]_{2}$ of the associated
quantum stabilizer are its length $n$, its dimension $k$, and its
minimum distance $d$.
\\
\\
Consider the cycle graph $C_\nu$ with $\nu$ vertices, as is
presented in section 2.2. By setting $m=2$ in view of (2.14), we
have
\\
\begin{equation}
A_0 = I_5,\hspace*{4mm} A_1 = S+S^{-1},\hspace*{4mm} A_2=S^{2}
+S^{-2} \hspace*{3cm}
\end{equation}
\\
where $S$ is an $5\times5$ circulant matrix with period 5($S^5=I_5$)
defined as follows:
\\
\begin{equation}
S=\left(
\begin{array}{ccccc}
0 & 0 & 0 & 0 & 1 \\
1 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 \\
\end{array}
\right)
\end{equation}
\\
One can see that $A_i$ for $i=1,2\hspace*{1mm}$ are symmetric and
$\sum_{i=0}^{2}A_i =J_5$. Also it can be verified that,
$\{A_i,\hspace*{2mm}i=1,2\}$ is closed under multiplication and
therefore, the set of matrices $A_0, A_1$ and $A_2$ form a symmetric
association scheme.
\\
\\
In view of $A_0, A_1$ and $A_2$ we can write the following cases:
\\
\begin{equation}
A_0,\hspace*{2mm}A_1,\hspace*{2mm}A_2,\hspace*{2mm}A_0
+A_1,\hspace*{2mm}A_0 +A_2,\hspace*{2mm}A_1 +A_2,\hspace*{2mm}A_0
+A_1 +A_2 \hspace*{3cm}
\end{equation}
\\
By examing the number of combinations of 2 cases selected from a set
of the above 7 distinct cases and considering $B_1=S + S^{-1}$ and
$B_2=S^{2} + S^{-2}$ the binary matrix $B=(B_1 \vert B_2)$ is
written as
\\
\begin{equation}
B= \left(
\begin{array}{ccccccccccc}
0 & 1 & 0 & 0 & 1 & | & 0 & 0 & 1 & 1 & 0 \\
1 & 0 & 1 & 0 & 0 & | & 0 & 0 & 0 & 1 & 1 \\
0 & 1 & 0 & 1 & 0 & | & 1 & 0 & 0 & 0 & 1 \\
0 & 0 & 1 & 0 & 1 & | & 1 & 1 & 0 & 0 & 0 \\
1 & 0 & 0 & 1 & 0 & | & 0 & 1 & 1 & 0 & 0 \\
\end{array}
\right)
\hspace*{3cm}
\end{equation}
\\
By removing the last row from the binary matrix $B$ we can achieve
$n-k=4$ independent generators. The distance $d$ of the quantum code
is given by the minimum weight of the bitwise OR $\textbf{(a,b)}$ of
all pairs satisfying the symplectic orthogonality condition,
\\
\begin{equation}
B_1 \textbf{b} + B_2 \textbf{a}=0 \hspace*{3cm}
\end{equation}
\\
Let $\textbf{a}=(x_1,x_2,x_3,x_4,x_5)$ and
$\textbf{b}=(y_1,y_2,y_3,y_4,y_5) $. Then by using (3.5), we have
\\
\begin{equation}
\left\{
\begin{array}{ll}
x_3 + x_4 +y_2 +y_5 =0 \\
x_4 + x_5 +y_1 +y_3 =0 \\
x_1 + x_5 +y_2 +y_4 =0 \\
x_1 + x_2 +y_3 +y_5 =0
\end{array}
\right. \hspace*{3cm}
\end{equation}
\\
By using (3.6) we can get the code distance $d$ equal to $3$. Since
the number of independent generators is $n-k=4$, therefore $k=1$,
thus the $[[ 5,1,3 ]]_{2}$ optimal quantum stabilizer code is
constructed. It encodes $k=1$ logical qubit into $n=5$ physical
qubits and protects against an arbitrary single-qubit error. Its
stabilizer consists of $n-k=4$ Pauli operators in table 1.
\\
$$
\begin{tabular}{|c|c|}
\hline
Name & Operator\\
\hline
$g_1$ &I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X \\
$g_2$ & X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\\
$g_3$ & Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z \\
$g_4$ & Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X \\
\hline
\end{tabular}
$$
\begin{table}[htb]
\caption{\small{Stabilizer generators for the $[[5,1,3]]_{2}$
code.}} \label{table:2}
\newcommand{\hphantom{$-$}}{\hphantom{$-$}}
\newcommand{\cc}[1]{\hphantom{$-$}ulticolumn{1}{c}{#1}}
\renewcommand{0pc}{0pc}
\renewcommand{1.}{1.}
\end{table}
\\
\\
Similar to case $m=2$ we obtain quantum stabilizer codes from
$C_\nu,\hspace*{2mm}\nu=6,7, ...\hspace*{1mm}. $ In the case of
$m=3$ from $C_6$ we can write
\\
\begin{equation}
A_0=I_6,\hspace*{4mm}A_1=S^{1}+S^{-1},\hspace*{4mm}A_2=S^{2}+S^{-2},\hspace*{4mm}A_3=S^{3}
\hspace*{3cm}
\end{equation}
\\
It can be easily seen that $A_i$ for $i=1,2,3$ are symmetric and
$\sum_{i=0}^{3} A_i=J_6$. By choosing $B_1=A_2 +A_3$ and $B_2=A_0
+A_1+A_2$ the binary matrix $B =(B_1 \vert B_2)$ will be in the form
\\
\begin{equation}
B=\left(
\begin{array}{ccccccccccccc}
0 & 0 & 1 & 1 & 1 & 0 & | & 1 & 1 & 1 & 0 & 1 & 1 \\
0 & 0 & 0 & 1 & 1 & 1 & | & 1 & 1 & 1 & 1 & 0 & 1 \\
1 & 0 & 0 & 0 & 1 & 1 & | & 1 & 1 & 1 & 1 & 1 & 0 \\
1 & 1 & 0 & 0 & 0 & 1 & | & 0 & 1 & 1 & 1 & 1 & 1 \\
1 & 1 & 1 & 0 & 0 & 0 & | & 1 & 0 & 1 & 1 & 1 & 1 \\
0 & 1 & 1 & 1 & 0 & 0 & | & 1 & 1 & 0 & 1 & 1 & 1 \\
\end{array}
\right)
\hspace*{3cm}
\end{equation}
\\
By removing the last row from $B$ and constituting the system of
linear equations the analogue of previous case, we can achieve
$d=3$. Since the number of independent generators is $n-k=5$,
therefore the optimal quantum stabilizer code is of length $6$, that
encodes $k=1$ logical qubit, i.e., $[[6,1,3]]_{2}$ is constructed.
This code generated by the $n-k=5$ independent generators in table
$2$.
\\
\\
$$
\begin{tabular}{|c|c|}
\hline
Name & Operator\\
\hline
$g_1$ &Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Z \\
$g_2$ & Z\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\\
$g_3$ & Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X \\
$g_4$ & X\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y \\
$g_5$ & Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Z \\
\hline
\end{tabular}
$$
\begin{table}[htb]
\caption{\small{Stabilizer generators for the $[[ 6,1,3 ]]_{2}$
code.}} \label{table:2}
\newcommand{\hphantom{$-$}}{\hphantom{$-$}}
\newcommand{\cc}[1]{\hphantom{$-$}ulticolumn{1}{c}{#1}}
\renewcommand{0pc}{0pc}
\renewcommand{1.}{1.}
\end{table}
\\
\\
To construct a quantum stabilizer code from $C_7$ by using $(2.14)$,
we have
\\
\begin{equation}
A_0=I_7,\hspace*{4mm}A_1=S+S^{-1},\hspace*{4mm}A_2=S^{2}+S^{-2},\hspace*{4mm}A_3=S^{3}+S^{-3}
\hspace*{2cm}
\end{equation}
\\
One can see that $A_i$ for $i=1,2,3$ are symmetric and
$\sum_{i=0}^{3}A_i=J_7$. Also it can be easily shown that,
$\{A_i,\hspace*{1mm}i=1,2,3\}$ is closed under multiplication and
therefore, the set of matrices $A_0, ... ,A_3$ form a symmetric
association scheme. By choosing $B_1$ and $B_2$ as follows:
\\
\begin{equation}
B_1=A_1,\hspace*{4mm}B_2=A_2+A_3
\hspace*{3cm}
\end{equation}
\\
We can be seen that $B_1 B_2^{T}+ B_2 B_1^{T}=0$. So all operators
are commute. On the other hand, since
\\
\begin{equation}
B= \left(
\begin{array}{ccccccccccccccc}
0 & 1 & 0 & 0 & 0 & 0 & 1 & | & 0 & 0 & 1 & 1 & 1 & 1 & 0 \\
1 & 0 & 1 & 0 & 0 & 0 & 0 & | & 0 & 0 & 0 & 1 & 1 & 1 & 1 \\
0 & 1 & 0 & 1 & 0 & 0 & 0 & | & 1 & 0 & 0 & 0 & 1 & 1 & 1 \\
0 & 0 & 1 & 0 & 1 & 0 & 0 & | & 1 & 1 & 0 & 0 & 0 & 1 & 1 \\
0 & 0 & 0 & 1 & 0 & 1 & 0 & | & 1 & 1 & 1 & 0 & 0 & 0 & 1 \\
0 & 0 & 0 & 0 & 1 & 0 & 1 & | & 1 & 1 & 1 & 1 & 0 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 & 1 & 0 & | & 0 & 1 & 1 & 1 & 1 & 0 & 0 \\
\end{array}
\right)
\hspace*{3cm}
\end{equation}
\\
By removing the last row from it by $(3.5)$ the code distance is
$d=3$. And also since the number of independent generators is
$n-k=6$. Therefore, we can obtain the $[[7,1,3]]_{2}$ quantum
stabilizer code. This code generated by $6$ the independent
generators in table $3$.
\\
$$
\begin{tabular}{|c|c|}
\hline
Name & Operator\\
\hline
$g_1$ & I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\\
$g_2$ & X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Z\\
$g_3$ & Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Z\\
$g_4$ & Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\\
$g_5$ & Z\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\\
$g_6$ & Z\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\\
\hline
\end{tabular}
$$
\begin{table}[htb]
\caption{\small{Stabilizer generators for the $[[ 7,1,3 ]]_{2}$
code.}} \label{table:2}
\newcommand{\hphantom{$-$}}{\hphantom{$-$}}
\newcommand{\cc}[1]{\hphantom{$-$}ulticolumn{1}{c}{#1}}
\renewcommand{0pc}{0pc}
\renewcommand{1.}{1.}
\end{table}
\\
Applying (2.12) and (2.14), we can obtain quantum stabilizer codes
from $C_\nu ({\nu}=8,9,...)$.
\\
\\
\textbf{Remark.} A list of binary quantum stabilizer codes from
$C_\nu ({\nu}=8,9,...)$ is given in tables 4 and 5. The first column
shows cyclic groups. The second column shows $B_1$ and $B_2$ in
terms of $A_i$, $i=0,1,...,m$. The third column shows the value of
the length of quantum stabilizer code. The fourth column shows the
value of $n-k$. The fifth column shows a list of the quantum
stabilizer codes. In this table $I_{n}$ is an $n\times n$ unit
matrix and $X$ is an Pauli matrix. Also, we will sometimes use
notation where we omit the tensor signs. For example $A_1I_2I_2$ is
shorthand for $A_1\otimes I_2\otimes I_2$. All the optimal quantum
stabilizer codes, i.e., codes with largest possible $k$ with fixed
$n$ and $d$ constructed in table $4$ lengths labeled by $l$ having
the best parameters known. The highest rate $\frac{k}{n}$ of
$[[n,k,d]]_{2}$ quantum stabilizer codes with minimum distance $d$
is labeled by $u$ in below tables.
\\
\\
$$
\begin{tabular}{|c|p{10.5cm}|c|c|l|}
\hline
\hline
Cyclic group & $B_i(i=1,2)$ & $n$ & $n-k$ & $[[ n,k,d ]]_{2}$\\
\hline
$C_{8}$ & $B_1=A_3+A_4$,\hspace*{2mm}$B_2=A_2+A_3$ & $8$ & $6$ & $[[ 8,2,3 ]]_{2}$ \\
$C_{2}\times C_{4} $ & $B_1=I_{2}A_2+XA_1$,
\hspace*{2mm}$B_2=I_{2}A_1+XA_1+XA_2$ & $8$ & $6$ & $[[ 8,2,3]]_{2}$ \\
$C_{2}\times C_{2}\times C_{2}$ &
$B_1=I_2I_2X+XI_2I_2+XI_2X+XXX$,\hspace*{2mm}$B_2=I_2I_2X+I_2XI_2+XXI_2+XXX$ & $^{l}8$ & $5$ & $[[ 8,3,3 ]]_{2}$ \\
$C_{9}$ & $B_1=A_1+A_2$,\hspace*{2mm}$B_2=A_2+A_4$ & $9$ & $6$ & $[[ 9,3,3 ]]_{2}$ \\
$C_{3}\times C_{3}$ & $B_1=I_3A_1+SS+S^2S^2$,
\hspace*{2mm}$B_2=I_3A_1+SS^2+S^2S$ & $9$ & $6$ & $[[ 9,3,3 ]]_{2}$ \\
$C_{10}$ & $B_1=A_2+A_4+A_5$,\hspace*{2mm}$B_2=A_0+A_2+A_3$ & $10$ & $6$ & $[[ 10,4,3 ]]_{2}$ \\
$C_{10}$ & $B_1=A_4$,\hspace*{2mm}$B_2=A_0+A_3+A_5$ & $10$ & $9$ & $[[ 10,1,4 ]]_{2}$ \\
$C_{11}$ & $B_1=A_1+A_3+A_4+A_5$,\hspace*{2mm}$B_2=A_2+A_5$ & $11$ & $7$ & $[[ 11,4,3 ]]_{2}$ \\
$C_{11}$ & $B_1=A_1+A_4+A_5$,\hspace*{2mm}$B_2=A_2+A_5$ & $11$ & $10$ & $[[ 11,1,5 ]]_{2}$ \\
$C_{12}$ & $B_1=A_2+A_4+A_5+A_6$,\hspace*{2mm}$B_2=A_2+A_3+A_5$ & $^{l}12$ & $6$ & $[[ 12,6,3 ]]_{2}$ \\
$C_{12}$ & $B_1=A_2+A_4+A_5+A_6$,\hspace*{2mm}$B_2=A_2+A_3+A_5+A_6$ & $12$ & $7$ & $[[ 12,5,3 ]]_{2}$ \\
$C_{3}\times C_{4} $ & $B_1=I_{12}+I_3A_1+A_1I_4$,
\hspace*{2mm}$B_2=A_1A_1+A_1I_4$ & $12$ & $10$ & $[[ 12,2,3 ]]_{2}$ \\
$C_{3}\times C_{2}\times C_{2}$ & $B_1=A_1I_2I_2+A_1I_2X+A_1XX$,
\hspace*{2mm}$B_2=I_3XI_2+I_3XX+A_1I_2X$ & $12$ & $8$ & $[[ 12,4,3 ]]_{2}$ \\
$C_{13}$ & $B_1=A_1+A_3+A_4+A_5$,\hspace*{2mm}$B_2=A_2+A_3+A_5$ & $13$ & $8$ & $[[ 13,5,3 ]]_{2}$ \\
$C_{13}$ & $B_1=A_1+A_3+A_4+A_5$,\hspace*{2mm}$B_2=A_2+A_3+A_5$ & $13$ & $12$ & $[[ 13,1,5 ]]_{2}$ \\
$C_{14}$ & $B_1=A_0+A_3+A_4+A_6+A_7$,\hspace*{2mm}$B_2=A_2+A_3+A_5$ & $14$ & $8$ & $[[ 14,6,3 ]]_{2}$ \\
$C_{14}$ & $B_1=A_0+A_3+A_4+A_6+A_7$,\hspace*{2mm}$B_2=A_2+A_3+A_5$ & $14$ & $11$ & $[[ 14,3,4 ]]_{2}$ \\
$C_{15}$ & $B_1=A_3+A_4+A_6+A_7$,\hspace*{2mm}$B_2=A_1+A_2+A_3+A_5$ & $15$ & $9$ & $[[ 15,6,3 ]]_{2}$ \\
$C_{16}$ & $B_1=A_3+A_4+A_6$,\hspace*{2mm}$B_2=A_2+A_3+A_5$ & $16$ & $11$ & $[[ 16,5,3 ]]_{2}$ \\
$C_{16}$ & $B_1=A_0+A_3+A_4+A_8$,\hspace*{2mm}$B_2=A_0+A_1+A_2+A_5$ & $16$ & $8$ & $[[ 16,8,3 ]]_{2}$ \\
$C_{2}\times C_{8}$ & $B_1=I_2A_2+XA_2+XA_4+I_2A_3+I_2A_4+XA_1$,
\hspace*{2mm}$B_2=I_2A_2+XA_3+I_2A_1+I_2A_3+I_2I_8$ & $16$ & $7$ & $[[ 16,9,3 ]]_{2}$ \\
$C_{2}\times C_{2}\times C_{4}$ & $B_1=I_2I_2A_2+A_1I_2A_1+A_1A_1I_4+I_2A_1I_4+I_2A_1A_1+A_1A_1A_1$,
\hspace*{0.1mm}$B_2=I_2I_2A_2+A_1I_2A_2+I_2I_2A_1+I_2A_1A_2+I_2A_1I_4+I_2A_1A_1+A_1A_1A_1$ & $16$ & $8$ & $[[ 16,8,3 ]]_{2}$ \\
$C_{4}\times C_{4}$ & $B_1=I_4A_1+A_1A_1+A_1A_2+A_2A_2$,
\hspace*{2mm}$B_2=I_4A_2+A_1I_4+A_1A_2+A_2I_4+A_2A_1$ & $16$ & $12$ & $[[ 16,4,3 ]]_{2}$ \\
$C_{2}\times C_{2}\times C_{2}\times C_{2}$ & $B_1=XI_2I_2X+XI_2XX+XXXX+I_2XXX$,
\hspace*{2mm}$B_2=I_2I_2I_2X+I_2I_2XI_2+I_2I_2XX+I_2XI_2X+I_2XXI_2+XI_2I_2X+XXI_2I_2+XXXI_2$ & $16$ & $9$ & $[[ 16,7,3 ]]_{2}$ \\
$C_{17}$ & $B_1=A_3+A_4+A_6+A_7+A_8$,\hspace*{2mm}$B_2=A_2+A_3+A_5$ & $17$ & $10$ & $[[ 17,7,3 ]]_{2}$ \\
$C_{17}$ & $B_1=A_3+A_4+A_6+A_7+A_8$,\hspace*{2mm}$B_2=A_2+A_3+A_5$ & $17$ & $14$ & $[[ 17,3,4 ]]_{2}$ \\
$C_{18}$ & $B_1=A_0+A_3+A_4+A_5+A_6$,\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8+A_9$ & $18$ & $10$ & $[[ 18,8,3 ]]_{2}$ \\
$C_{19}$ & $B_1=A_3+A_4+A_6+A_9$,\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7$ & $19$ & $10$ & $[[ 19,9,3 ]]_{2}$ \\
$C_{20}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}$,\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8$ & $20$ & $8$ & $[[ 20,12,3 ]]_{2}$ \\
$C_{21}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}$,\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8$ & $21$ & $8$ & $[[ 21,13,3 ]]_{2}$ \\
$C_{21}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}$,\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8$ & $21$ & $11$ & $[[ 21,10,4 ]]_{2}$ \\
$C_{21}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}$,\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8$ & $21$ & $12$ & $[[ 21,9,5 ]]_{2}$ \\
$C_{21}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}$,\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8$ & $^{l}21$ & $16$ & $[[ 21,5,7 ]]_{2}$ \\
\hline \hline
\end{tabular}
$$
\begin{table}[htb]
\caption{\small{Quantum stabilizer codes $[[ n,k,d ]]_{2}$.}}
\label{table:2}
\newcommand{\hphantom{$-$}}{\hphantom{$-$}}
\newcommand{\cc}[1]{\hphantom{$-$}ulticolumn{1}{c}{#1}}
\renewcommand{0pc}{0pc}
\renewcommand{1.}{1.}
\end{table}
\\
\\
$$
\begin{tabular}{|c|p{10.5cm}|c|c|l|}
\hline
\hline
Cyclic group & $B_i(i=1,2)$ & $n$ & $n-k$ & $[[ n,k,d ]]_{2}$\\
\hline
$C_{25}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}$,\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8$ & $25$ & $8$ & $[[ 25,17,3 ]]_{2}$ \\
$C_{25}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}$,\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8$ & $25$ & $12$ & $[[ 25,13,4 ]]_{2}$ \\
$C_{30}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}+A_{14}$,\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8+A_{13}+A_{15}$
& $30$ & $8$ & $[[ 30,22,3 ]]_{2}$ \\
$C_{30}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}+A_{14}$,\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8+A_{13}+A_{15}$
& $30$ & $18$ & $[[ 30,12,5 ]]_{2}$ \\
$C_{40}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}+A_{12}+A_{14}+A_{15}+A_{16}+A_{18}+A_{19}$,
\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8+A_{12}+A_{15}+A_{16}+A_{17}+A_{18}+A_{20}$
& $40$ & $10$ & $^{u}[[ 40,30,3 ]]_{2}$ \\
$C_{40}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}+A_{12}+A_{14}+A_{15}+A_{16}+A_{18}+A_{19}$,
\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8+A_{12}+A_{15}+A_{16}+A_{17}+A_{18}+A_{20}$
& $40$ & $14$ & $^{u}[[ 40,26,5 ]]_{2}$ \\
$C_{40}$ & $B_1=A_3+A_4+A_6+A_9+A_{10}+A_{12}+A_{14}+A_{15}+A_{16}+A_{18}+A_{19}$,
\hspace*{0.5mm}$B_2=A_3+A_5+A_6+A_7+A_8+A_{12}+A_{15}+A_{16}+A_{17}+A_{18}+A_{20}$
& $^{l}40$ & $19$ & $^{u}[[ 40,21,7 ]]_{2}$ \\
\hline
\hline
\end{tabular}
$$
\begin{table}[htb]
\caption{\small{Quantum stabilizer codes $[[ n,k,d ]]_{2}$.}}
\label{table:2}
\newcommand{\hphantom{$-$}}{\hphantom{$-$}}
\newcommand{\cc}[1]{\hphantom{$-$}ulticolumn{1}{c}{#1}}
\renewcommand{0pc}{0pc}
\renewcommand{1.}{1.}
\end{table}
\subsection{\hspace*{-.3cm}construction of quantum stabilizer codes
of distances five and seven from Abelian group association schemes}
We can extend the stabilizers of the codes from section $3$ to get
distances five and seven codes. The parameters of these codes with
$d=5,7$ will be $[[ n,k,d ]]_{2}$. In the case of $m=5$ from
$C_{11}$ we can write
\\
\begin{equation}
A_0=I_{11},\hspace*{0.5mm}A_1=S^{1}+S^{-1},\hspace*{0.5mm}A_2=S^{2}+S^{-2},\hspace*{0.5mm}A_3=S^{3}+S^{-3},\hspace*{0.5mm}
A_4=S^{4}+S^{-4},\hspace*{0.5mm}A_5=S^{5}+S^{-5}
\end{equation}
\\
where $S$ is an $11\times 11$ circulant matrix with period $11$
$(S^{11}=I_{11})$. One can easily see that the above adjacency
matrices for $i=1,...,5$ are symmetric and
$\sum_{i=0}^{5}A_i=J_{11}$. Also, the set of matrices $A_0, ...
,A_5$ form a symmetric association scheme. By choosing
$B_1=A_1+A_4+A_5$ and $B_2=A_2 +A_5$ the binary matrix $B =(B_1
\vert B_2)$ will be in the form
\\
\begin{equation}
B=\left(
\begin{array}{ccccccccccccccccccccccc}
0 & 1 & 0 & 0 & 1 & 1 & 1 & 1 & 0 & 0 & 1 & | & 0 & 0 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 0\\
1 & 0 & 1 & 0 & 0 & 1 & 1 & 1 & 1 & 0 & 0 & | & 0 & 0 & 0 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1\\
0 & 1 & 0 & 1 & 0 & 0 & 1 & 1 & 1 & 1 & 0 & | & 1 & 0 & 0 & 0 & 1 & 0 & 0 & 1 & 1 & 0 & 0\\
0 & 0 & 1 & 0 & 1 & 0 & 0 & 1 & 1 & 1 & 1 & | & 0 & 1 & 0 & 0 & 0 & 1 & 0 & 0 & 1 & 1 & 0\\
1 & 0 & 0 & 1 & 0 & 1 & 0 & 0 & 1 & 1 & 1 & | & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 0 & 0 & 1 & 1\\
1 & 1 & 0 & 0 & 1 & 0 & 1 & 0 & 0 & 1 & 1 & | & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 0 & 0 & 1\\
1 & 1 & 1 & 0 & 0 & 1 & 0 & 1 & 0 & 0 & 1 & | & 1 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 0 & 0\\
1 & 1 & 1 & 1 & 0 & 0 & 1 & 0 & 1 & 0 & 0 & | & 0 & 1 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 0\\
0 & 1 & 1 & 1 & 1 & 0 & 0 & 1 & 0 & 1 & 0 & | & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 1\\
0 & 0 & 1 & 1 & 1 & 1 & 0 & 0 & 1 & 0 & 1 & | & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 0 & 0 & 0\\
1 & 0 & 0 & 1 & 1 & 1 & 1 & 0 & 0 & 1 & 0 & | & 0 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 0 & 0\\
\end{array}
\right)
\end{equation}
\\
By removing the last row from $B$ and by considering
$\textbf{a}=(x_{01},...,x_{11})$ and
$\textbf{b}=(y_{01},...,y_{11})$, in view of (3.5) we can achieve
$d=5$.
\\
Since the number of independent generators is $n-k=10$, therefore
the quantum stabilizer code is of length $11$, that encodes $k=1$
logical qubit, i.e., $[[11,1,5]]_{2}$ is constructed. This code
generated by the $n-k=10$ independent generators in table $6$.
\\
\\
$$
\begin{tabular}{|c|c|}
\hline
Name & Operator\\
\hline
$g_1$ &I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}X\\
$g_2$ &X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}Z\\
$g_3$ &Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}I\\
$g_4$ &I\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Y\hspace*{2mm}X\\
$g_5$ &X\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Y\\
$g_6$ &Y\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Y\\
$g_8$ &X\hspace*{2mm}Y\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}I\\
$g_9$ &I\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\\
$g_{10}$ &Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\\
\hline
\end{tabular}
$$
\begin{table}[htb]
\caption{\small{Stabilizer generators for the $[[ 11,1,5 ]]_{2}$
code.}} \label{table:2}
\newcommand{\hphantom{$-$}}{\hphantom{$-$}}
\newcommand{\cc}[1]{\hphantom{$-$}ulticolumn{1}{c}{#1}}
\renewcommand{0pc}{0pc}
\renewcommand{1.}{1.}
\end{table}
\\
\\
For construction of distance five quantum stabilizer code from
$C_{13}$ by using $(2.14)$, we have
\\
\\
\begin{equation}
A_0=I_{13},A_1=S+S^{-1},A_2=S^{2}+S^{-2},A_3=S^{3}+S^{-3},
A_4=S^{4}+S^{-4},A_5=S^{5}+S^{-5},A_6=S^{6}+S^{-6}
\end{equation}
\\
One can see that $A_i$ for $i=1,...,6$ are symmetric and
$\sum_{i=0}^{6}A_i=J_{13}$. Also it can be easily shown that,
$\{A_i,\hspace*{1mm}i=1,...,6\}$ is closed under multiplication and
therefore, the set of matrices $A_0, ... ,A_6$ form a symmetric
association scheme. By choosing $B_1$ and $B_2$ as follows:
\\
\begin{equation}
B_1=A_1+A_3+A_4+A_5,\hspace*{4mm}B_2=A_2+A_3+A_5
\hspace*{3cm}
\end{equation}
\\
We can be seen that $B_1 B_2^{T}+ B_2 B_1^{T}=0$. So all operators
are commute. On the other hand, since
\\
\begin{equation}
B=\left(
\begin{array}{ccccccccccccccccccccccccccc}
0 & 1 & 0 & 1 & 1 & 1 & 0 & 0 & 1 & 1 & 1 & 0 & 1 \hspace*{2mm} |\hspace*{2mm} 0 & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 1 & 0 & 1 & 1 & 0\\
1 & 0 & 1 & 0 & 1 & 1 & 1 & 0 & 0 & 1 & 1 & 1 & 0 \hspace*{2mm} |\hspace*{2mm} 0 & 0 & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 1 & 0 & 1 & 1\\
0 & 1 & 0 & 1 & 0 & 1 & 1 & 1 & 0 & 0 & 1 & 1 & 1 \hspace*{2mm} |\hspace*{2mm} 1 & 0 & 0 & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 1 & 0 & 1\\
1 & 0 & 1 & 0 & 1 & 0 & 1 & 1 & 1 & 0 & 0 & 1 & 1 \hspace*{2mm} |\hspace*{2mm} 1 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 1 & 0\\
1 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 1 & 1 & 0 & 0 & 1 \hspace*{2mm} |\hspace*{2mm} 0 & 1 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 1 & 0 & 0 & 1\\
1 & 1 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 1 & 1 & 0 & 0 \hspace*{2mm} |\hspace*{2mm} 1 & 0 & 1 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 1 & 0 & 0\\
0 & 1 & 1 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 1 & 1 & 0 \hspace*{2mm} |\hspace*{2mm} 0 & 1 & 0 & 1 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 1 & 0\\
0 & 0 & 1 & 1 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 1 & 1 \hspace*{2mm} |\hspace*{2mm} 0 & 0 & 1 & 0 & 1 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 1\\
1 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 0 & 1 & 0 & 1 & 1 \hspace*{2mm} |\hspace*{2mm} 1 & 0 & 0 & 1 & 0 & 1 & 1 & 0 & 0 & 0 & 1 & 1 & 0\\
1 & 1 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 0 & 1 & 0 & 1 \hspace*{2mm} |\hspace*{2mm} 0 & 1 & 0 & 0 & 1 & 0 & 1 & 1 & 0 & 0 & 0 & 1 & 1\\
1 & 1 & 1 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 0 & 1 & 0 \hspace*{2mm} |\hspace*{2mm} 1 & 0 & 1 & 0 & 0 & 1 & 0 & 1 & 1 & 0 & 0 & 0 & 1\\
0 & 1 & 1 & 1 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 0 & 1 \hspace*{2mm} |\hspace*{2mm} 1 & 1 & 0 & 1 & 0 & 0 & 1 & 0 & 1 & 1 & 0 & 0 & 0\\
1 & 0 & 1 & 1 & 1 & 0 & 0 & 1 & 1 & 1 & 0 & 1 & 0 \hspace*{2mm} |\hspace*{2mm} 0 & 1 & 1 & 0 & 1 & 0 & 0 & 1 & 0 & 1 & 1 & 0 & 0\\
\end{array}
\right)
\end{equation}
\\
By removing the last row from $B$ and by considering
$\textbf{a}=(x_{01},...,x_{13})$ and
$\textbf{b}=(y_{01},...,y_{13})$, in view of (3.5) we can achieve
$d=5$.
\\
Since the number of independent generators is $n-k=12$, therefore
the quantum stabilizer code is of length $13$, that encodes $k=1$
logical qubit, i.e., $[[13,1,5]]_{2}$ is constructed. This code
generated by the $n-k=12$ independent generators in table $7$.
\\
$$
\begin{tabular}{|c|c|}
\hline
Name & Operator\\
\hline
$g_1$ &I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\\
$g_2$ &X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Z\\
$g_3$ &Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\\
$g_4$ &Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\\
$g_5$ &X\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\\
$g_6$ &Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\\
$g_7$ &I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\\
$g_8$ &I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\\
$g_9$ &Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}X\\
$g_{10}$ &X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\\
$g_{11}$ &Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\\
$g_{12}$ &Z\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}I\hspace*{2mm}X\\
\hline
\end{tabular}
$$
\begin{table}[htb]
\caption{\small{Stabilizer generators for the $[[ 13,1,5 ]]_{2}$
code.}} \label{table:2}
\newcommand{\hphantom{$-$}}{\hphantom{$-$}}
\newcommand{\cc}[1]{\hphantom{$-$}ulticolumn{1}{c}{#1}}
\renewcommand{0pc}{0pc}
\renewcommand{1.}{1.}
\end{table}
\\
For the construction of distance five quantum stabilizer code from
$C_{21}$ we choose $B_1$ and $B_2$ as follows:
\\
\begin{equation}
B_1=A_3+A_4+A_6+A_9+A_{10},\hspace*{4mm}B_2=A_3+A_5+A_6+A_7+A_8
\end{equation}
\\
We can be seen that $B_1 B_2^{T}+ B_2 B_1^{T}=0$. So all operators
are commute. By removing the last nine rows from $B=(B_1 \vert B_2)$
and by considering $\textbf{a}=(x_{01},...,x_{21})$ and
$\textbf{b}=(y_{01},...,y_{21})$, in view of (3.5) we can achieve
$d=7$.
\\
Since the number of independent generators is $n-k=16$, therefore
the optimal quantum stabilizer code is of length $21$, that encodes
$k=5$ logical qubit, i.e., $[[21,5,7]]_{2}$ is constructed. This
code generated by the $n-k=16$ independent generators in table $8$.
The rate $\frac{k}{n}$ of $[[21,5,7]]_{2}$ code is $0.238$.
\\
$$
\begin{tabular}{|c|c|}
\hline
Name & Operator\\
\hline
$g_1$ &I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\\
$g_2$ &I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\\
$g_3$ &I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\\
$g_4$ &Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\\
$g_5$ &X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\\
$g_6$ &Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\\
$g_7$ &Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\\
$g_8$ &Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\\
$g_9$ &Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\\
$g_{10}$ &X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\\
$g_{11}$ &X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\\
$g_{12}$ &X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\\
$g_{13}$ &X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}Z\\
$g_{14}$ &Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\\
$g_{15}$ &Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Y\\
$g_{16}$ &Y\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}Y\hspace*{2mm}Z\hspace*{2mm}X\hspace*{2mm}Y\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Y\hspace*{2mm}X\hspace*{2mm}Z\\
\hline
\end{tabular}
$$
\begin{table}[htb]
\caption{\small{Stabilizer generators for the $[[ 21,5,7 ]]_{2}$
code.}} \label{table:2}
\newcommand{\hphantom{$-$}}{\hphantom{$-$}}
\newcommand{\cc}[1]{\hphantom{$-$}ulticolumn{1}{c}{#1}}
\renewcommand{0pc}{0pc}
\renewcommand{1.}{1.}
\end{table}
\\
\\
\section{\hspace*{-.3cm}Construction of stabilizer codes from non-Abelian group association schemes}
The construction of binary quantum stabilizer codes based on the
non-Abelian group association schemes as in the case of Abelian
group association schemes. To do so, we choose a binary matrix
$A=(A_1 \vert A_2)$, such that by removing arbitrarily row or rows
from $A$ we can obtain $n-k$ independent generators. After finding
the code distance by $n-k$ independent generators we can then
determine the parameters of the associated code.
\\
\\
Consider the group $U_{6n}$, as is presented in section 2.5. By
setting $n=2$ in view of (2.27), we have
\\
\begin{align}
A_0 &=I_{12},\nonumber\\
A_1 &=[a]^2,\nonumber\\
A_2 &=[b]+[b]^2, \\
A_3 &=[b][a]^2+[b]^2[a]^2,\nonumber\\
A_4 &=[a]+[b][a]+[b]^2[a],\nonumber\\
A_{5} &=[a]^3+[b][a]^3+[b]^2[a]^3 \nonumber
\hspace*{3cm}
\end{align}
\\
\\
One can see that $\sum_{i=0}^{5}A_i =J_{12}$, $A_i^{T}\in
\{A_0,A_1,...,A_5\}$ for $0\leq i \leq 5$, and $A_iA_j$ is a linear
combination of $A_0,A_1,...,A_5$ for $0\leq i,j\leq 5$ . Also it
can be verified that, $\{A_i,\hspace*{2mm}i=1,...,5\}$ is closed
under multiplication and therefore, the set of matrices
$A_0,A_1,...,A_5$ form an association scheme with $5$ classes.
\\
\\
By examing the number of combinations of 2 cases selected from a set
of 63 distinct cases and considering $B_1=A_2$ and $B_2=A_3+A_5$ the
binary matrix $B=(B_1 \vert B_2)$ is written as
\\
\begin{equation}
B=\left(
\begin{array}{cccccccccccccccccccccccc}
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \hspace*{2mm} |\hspace*{2mm} 0 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 0 & 0 \hspace*{2mm} |\hspace*{2mm} 0 & 0 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 1 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 0 \hspace*{2mm} |\hspace*{2mm} 0 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 1 \hspace*{2mm} |\hspace*{2mm} 1 & 0 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 1 & 0 & 0 \\
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \hspace*{2mm} |\hspace*{2mm} 0 & 1 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 1 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \hspace*{2mm} |\hspace*{2mm} 0 & 0 & 1 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 1 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \hspace*{2mm} |\hspace*{2mm} 1 & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 0 & 1 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 & 1 \hspace*{2mm} |\hspace*{2mm} 1 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 1 & 0 & 0 \\
1 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \hspace*{2mm} |\hspace*{2mm} 0 & 1 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \hspace*{2mm} |\hspace*{2mm} 0 & 0 & 1 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \hspace*{2mm} |\hspace*{2mm} 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 0 & 0 & 0 & 1 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \hspace*{2mm} |\hspace*{2mm} 1 & 1 & 0 & 0 & 1 & 1 & 0 & 0 & 1 & 0 & 0 & 0 \\
\end{array}
\right)
\end{equation}
\\
By removing the last four rows from the binary matrix $B$ we can
achieve $n-k=8$ independent generators. The distance $d$ of the
quantum code is given by the minimum weight of the bitwise OR
$\textbf{(a,b)}$ of all pairs satisfying the symplectic
orthogonality condition,
\\
\begin{equation}
B_1 \textbf{b} + B_2 \textbf{a}=0 \hspace*{3cm}
\end{equation}
\\
Let $\textbf{a}=(x_{01},...,x_{12})$ and
$\textbf{b}=(y_{01},...,y_{12})$. Then by using (4.3), we have
\\
\begin{equation}
\left\{
\begin{array}{ll}
x_{02} + x_{06} + x_{07} + x_{10} + x_{11} + y_{05} + y_{09} =0 \\
x_{03} + x_{07} + x_{08} + x_{11} + x_{12} + y_{06} + y_{10} =0 \\
x_{04} + x_{05} + x_{08} + x_{09} + x_{12} + y_{07} + y_{11} =0 \\
x_{01} + x_{05} + x_{06} + x_{09} + x_{10} + y_{08} + y_{12} =0 \\
x_{02} + x_{03} + x_{06} + x_{10} + x_{11} + y_{01} + y_{09} =0 \\
x_{03} + x_{04} + x_{07} + x_{11} + x_{12} + y_{02} + y_{10} =0 \\
x_{01} + x_{04} + x_{08} + x_{09} + x_{12} + y_{03} + y_{11} =0 \\
x_{01} + x_{02} + x_{05} + x_{09} + x_{10} + y_{04} + y_{12} =0
\end{array}
\right.
\hspace*{3cm}
\end{equation}
\\
By using (4.4) we can get the code distance $d$ equal to $3$. Since
the number of independent generators is $n-k=8$, therefore the
quantum stabilizer code is of length $12$, that encodes $k=4$
logical qubits, i.e., $[[ 12,4,3 ]]_{2}$ is constructed. This code
generated by the $n-k=8$ independent generators in table $9$.
\\
$$
\begin{tabular}{|c|c|}
\hline
Name & Operator\\
\hline
$g_1$ &I\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I \\
$g_2$ &I\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z \\
$g_3$ &I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z \\
$g_4$ &Z\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X \\
$g_5$ &X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I \\
$g_6$ &I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}Z \\
$g_7$ &Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z \\
$g_8$ &Z\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}I\hspace*{2mm}Z\hspace*{2mm}Z\hspace*{2mm}I\hspace*{2mm}X \\
\hline
\end{tabular}
$$
\begin{table}[htb]
\caption{\small{Stabilizer generators for the $[[12,4,3]]_{2}$
code.}} \label{table:1}
\newcommand{\hphantom{$-$}}{\hphantom{$-$}}
\newcommand{\cc}[1]{\hphantom{$-$}ulticolumn{1}{c}{#1}}
\renewcommand{0pc}{0pc}
\renewcommand{1.}{1.}
\end{table}
\\
Applying (2.27), (2.31), (2.35), (2.38) and (2.39) we can obtain
quantum stabilizer codes from $U_{6n}$, $T_{4n}$, $V_{8n}$ and
dihedral $D_{2n}$ groups. A list of quantum stabilizer codes is
given in table $10$.
\\
\\
\textbf{Remark.} Table $10$ is a list of quantum stabilizer codes
from $U_{6n}$, $T_{4n}$, $V_{8n}$ and dihedral $D_{2n}$ groups. The
first column shows non-Abelian groups. The second column shows $B_1$
and $B_2$ in terms of $A_i$, $i=0,1,...,m$. where $m$ denotes the
number of conjugacy classes of the group $G$. The third column shows
the value of the length of quantum stabilizer code. The fourth
column shows the value of $n-k$. The fifth column shows a list of
the quantum stabilizer codes.
\\
$$
\begin{tabular}{|c|p{10.5cm}|c|c|l|}
\hline
\hline
Group & $B_i(i=1,2)$ & $n$ & $n-k$ & $[[ n,k,d ]]_{2}$\\
\hline
$U_{12}$ & $B_1=A_1+A_2+A_4$,\hspace*{2mm}$B_2=A_3$ & $12$ & $8$ & $[[ 12,4,3 ]]_{2}$ \\
$U_{12}$ & $B_1=A_1+A_2+A_5$,\hspace*{2mm}$B_2=A_0+A_4$ & $12$ & $8$ & $[[ 12,4,3 ]]_{2}$ \\
$U_{12}$ & $B_1=A_2$,\hspace*{2mm}$B_2=A_3+A_5$ & $12$ & $8$ & $[[ 12,4,3 ]]_{2}$ \\
$U_{12}$ & $B_1=A_1+A_2+A_5$,\hspace*{2mm}$B_2=A_0+A_4$ & $12$ & $11$ & $[[ 12,1,4 ]]_{2}$ \\
$U_{18}$ & $B_1=A_1+A_2+A_3+A_7+A_8$,
\hspace*{2mm}$B_2=A_0+A_1+A_2+A_4+A_5$ & $18$ & $12$ & $[[ 18,6,3]]_{2}$ \\
$U_{18}$ & $B_1=A_1+A_2+A_3+A_7$,
\hspace*{2mm}$B_2=A_0+A_1+A_2+A_4$ & $18$ & $13$ & $[[ 18,5,3]]_{2}$ \\
$U_{18}$ & $B_1=A_1+A_2+A_3+A_7$,
\hspace*{2mm}$B_2=A_0+A_1+A_2+A_4$ & $18$ & $16$ & $[[ 18,2,4]]_{2}$ \\
$U_{24}$ & $B_1=A_0+A_1+A_2+A_3+A_4+A_8+A_{10}$,\hspace*{2mm}$B_2=A_0+A_3+A_5+A_6+A_{11}$ & $24$ & $12$ & $[[ 24,12,3 ]]_{2}$ \\
$U_{24}$ & $B_1=A_0+A_1+A_2+A_3+A_4+A_8+A_{10}$,\hspace*{2mm}$B_2=A_0+A_3+A_5+A_6+A_{11}$ & $24$ & $16$ & $[[ 24,8,5 ]]_{2}$ \\
$T_{12}$ & $B_1=A_2+A_4$,\hspace*{2mm}$B_2=A_0+A_5$ & $12$ & $9$ & $[[ 12,3,3 ]]_{2}$ \\
$T_{12}$ & $B_1=A_0+A_4$,
\hspace*{2mm}$B_2=A_1+A_2+A_5$ & $12$ & $10$ & $[[ 12,2,3 ]]_{2}$ \\
$T_{16}$ & $B_1=A_0+A_1+A_2+A_6$,\hspace*{2mm}$B_2=A_0+A_2+A_3$ & $16$ & $14$ & $[[ 16,2,3 ]]_{2}$ \\
$V_{24}$ & $B_1=A_0+A_3+A_6+A_7$,\hspace*{2mm}$B_2=A_0+A_2+A_4$ & $24$ & $20$ & $[[ 24,4,3 ]]_{2}$ \\
$D_{12}$ & $B_1=A_3+A_5$,\hspace*{2mm}$B_2=A_2+A_3+A_5$ & $12$ & $10$ & $[[ 12,2,3 ]]_{2}$ \\
\hline \hline
\end{tabular}
$$
\begin{table}[htb]
\caption{\small{Quantum stabilizer codes $[[ n,k,d ]]_{2}$.}}
\label{table:2}
\newcommand{\hphantom{$-$}}{\hphantom{$-$}}
\newcommand{\cc}[1]{\hphantom{$-$}ulticolumn{1}{c}{#1}}
\renewcommand{0pc}{0pc}
\renewcommand{1.}{1.}
\end{table}
\\
\\
\section{\hspace*{-.5cm}\ Conclusion}
We have developed a new method of constructing binary quantum
stabilizer codes from Abelian and non-Abelian groups association
schemes. Using this method, we have constructed good binary quantum
stabilizer codes of distances $3$, $4$, $5$, and $7$ up to $40$.
Furthermore, binary quantum stabilizer codes of a large length $n$
with high distance can be constructed. We can see from tables 4 and
5 that the Abelian association schemes procedure for the
construction of the binary quantum stabilizer codes is superior to
non-Abelian group association schemes. Although we focused
specifically on Abelian and non-Abelian groups association schemes,
we expect that the introduced method might then be applied to other
association schemes such as association scheme defined over the
coset space $G/H$, where $H$ is a normal subgroup of finite group
$G$ with prime index., strongly regular graphs, distance regular
graphs, etc. These association schemes are under investigation.
\\
\end{document} |
\begin{document}
\mathcal{M}aketitle
\begin{abstract}
We show that the discrete complex,
and numerous hypercomplex,
Fourier transforms defined and used
so far by a number of
researchers can be unified into a single framework
based on a matrix exponential version of Euler's formula $e^{j\theta}=\cos\theta+j{\color{white}+}in\theta$,
and a matrix root of $-1$ isomorphic to the imaginary root $j$.
The transforms thus defined can be computed
using standard matrix
multiplications and additions with no hypercomplex code,
the complex or hypercomplex algebra being represented
by the form of the matrix root of $-1$,
so that the matrix multiplications
are equivalent to multiplications in the appropriate algebra.
We present examples from the complex, quaternion and biquaternion algebras,
and from Clifford algebras \clifford{1}{1} and \clifford{2}{0}.
The significance of this result is both in the theoretical unification,
and also in the scope it affords for insight into the structure of the various transforms,
since the formulation is such a simple generalization of the classic complex case.
It also shows that hypercomplex discrete Fourier transforms may be {computed}
using standard matrix arithmetic packages {without the need for a hypercomplex library},
which is of importance in providing
a reference implementation for {verifying}
implementations based on hypercomplex code.
\end{abstract}
{\color{white}+}ection{Introduction}
The discrete Fourier transform
is widely known and used in signal and image processing,
and in many other fields where data is analyzed for frequency content \cite{Bracewell:2000}.
The discrete Fourier transform in one dimension is classically formulated as:
\begin{equation}
\label{eqn:classicdft}
\begin{aligned}
F[u] &= S{\color{white}+}um_{m=0}^{M-1}f[m]\exp\left( -j 2\pi\frac{mu}{M}\right)\\
f[m] &= T{\color{white}+}um_{u=0}^{M-1}F[u]\exp\left(\phantom{-}j 2\pi\frac{mu}{M}\right)
\end{aligned}
\end{equation}
where $j$ is the imaginary root of $-1$,
$f[m]$ is real or complex valued with $M$ samples,
$F[u]$ is complex valued, also with $M$ samples,
and the two scale factors $S$ and $T$ must multiply to $1/M$.
If the transforms are to be unitary then $S$ must equal $T$ also.
In this paper we {discuss the formulation of the transform}
using a matrix exponential form of Euler's formula in which the imaginary square root of
$-1$ is replaced by {an isomorphic} matrix root.
{This formulation works for the complex DFT, but more importantly,
it works for hypercomplex DFTs (reviewed in §\,\ref{hypercomplex}).}
The matrix exponential formulation is equivalent to all the known hypercomplex
generalizations of the DFT known to the authors, based on quaternion,
biquaternion or Clifford algebras,
through a suitable choice of matrix root of $-1$,
isomorphic to a root of $-1$ in the corresponding hypercomplex algebra.
{All associative hypercomplex algebras (and indeed the complex algebra)
are known to be isomorphic to matrix algebras over the reals or the complex numbers.
For example, Ward \cite[\S\,2.8]{Ward:1997} discusses isomorphism
between the quaternions and $4\times4$ real or $2\times2$ complex matrices
so that quaternions can be replaced by matrices, the rules of matrix
multiplication then being equivalent to the rules of quaternion
multiplication \emph{by virtue of the pattern of
the elements of the quaternion within the matrix}.
Also in the quaternion case, Ickes \cite{Ickes:1970} wrote an important paper
showing how multiplication of quaternions
could be accomplished using a matrix-vector or vector-matrix
product that could accommodate reversal of the product ordering
by a partial transposition within the matrix.
This paper, more than any other, led us to the {observations}
presented here.}
{The fact that a hypercomplex DFT may be formulated using a matrix
exponential may not be surprising.
Nevertheless, to our knowledge, those who have worked on hypercomplex
DFTs have not so far noted or exploited the observations made in this
paper, which is surprising, given the ramifications discussed later.}
{\color{white}+}ection{{Hypercomplex transforms}}
\label{hypercomplex}
{
The first published descriptions of hypercomplex transforms that we are
aware of date from the late 1980s, using quaternions. In all three known
earliest formulations, the transforms were defined for two-dimensional
signals (that is functions of two independent variables).
The two earliest formulations \cite[§\,6.4.2]{Ernst:1987} and
\cite[Eqn.~20]{Delsuc:1988} are almost equivalent
(they differ only in the placing of the exponentials and the signal
and the signs inside the exponentials)\footnote{{In comparing
the various formulations of hypercomplex transforms, we have changed
the symbols used by the original authors in order to make the comparisons
clearer. We have also made trivial changes such as the choice of basis
elements used in the exponentials.}}:
\[
F(\omega_1,\omega_2) = \int_{-\infty}^{\infty}\int_{-\infty}^{\infty}
f(t_1, t_2)
e^{\i\omega_1 t_1}e^{\j\omega_2 t_2}\textrm{d}\xspace t_1\textrm{d}\xspace t_2
\]
In a non-commutative algebra the ordering of exponentials within
an integral is significant, and of course, two exponentials with
different roots of -1 cannot be combined trivially. Therefore there
are other possible transforms that can be defined by positioning
the exponentials differently.
The first transform in which the exponentials were placed either
side of the signal function was that of Ell \cite{Ell:thesis,Ell:1993}:
\[
F(\omega_1,\omega_2) = \int_{-\infty}^{\infty}\int_{-\infty}^{\infty}
e^{\i\omega_1 t_1}
f(t_1, t_2)
e^{\j\omega_2 t_2}\textrm{d}\xspace t_1\textrm{d}\xspace t_2
\]
}
This style of transform was followed by Chernov, B\"{u}low and Sommer
\cite{Chernov:1995,Bulow:1999,BulowSommer:2001} and others since.
In 1998 the present authors described a single-sided hypercomplex
transform for the first time \cite{SangwineEll:1998} exactly as in
\eqref{eqn:classicdft} except that $f$ and $F$ were quaternion-valued
and $j$ was replaced by a general quaternion root of $-1$.
{Expressed in the same form as the transforms above, this would be:
\[
F(\omega_1,\omega_2) = \int_{-\infty}^{\infty}\int_{-\infty}^{\infty}
e^{\mathcal{M}u(\omega_1 t_1 + \omega_2 t_2)}
f(t_1, t_2) \textrm{d}\xspace t_1\textrm{d}\xspace t_2
\]
where $\mathcal{M}u$ is now an arbitrary root of -1, not necessarily a basis
element of the algebra. The realisation that an arbitrary root of -1
could be used meant that it was possible to define a hypercomplex
transform applicable to one dimension:
\[
F(\omega) = \int_{-\infty}^{\infty}e^{\mathcal{M}u\omega t} f(t) \textrm{d}\xspace t
\]
}
Pei \textit{et al} have studied efficient implementation of quaternion
FFTs and presented a transform based on commutative reduced
biquaternions \cite{PeiDingChang:2001,PeiChangDing:2004}.
Ebling and Scheuermann defined a Clifford Fourier transform
\cite[\S\,5.2]{10.1109/TVCG.2005.54},
but their transform used the pseudoscalar
(one of the basis elements of the algebra)
as the square root of $-1$.
{A}part from the works by the present authors
\cite{SangwineEll:1998,SangwineEll:2000b,10.1109/TIP.2006.884955},
the idea of using a root of $-1$ different to the basis elements of a
hypercomplex algebra was not developed {further until 2006},
with the publication of a
paper setting out the roots of $-1$ in biquaternions
(a quaternion algebra with complex numbers as the components of the quaternions)
\cite{10.1007/s00006-006-0005-8}.
This work prepared the ground for a biquaternion Fourier transform
\cite{10.1109/TSP.2007.910477}
based on the present authors' one-sided quaternion transform \cite{SangwineEll:1998}.
More recently, the idea of finding roots of $-1$ in other algebras
has been advanced in Clifford algebras by Hitzer and Ab{\l}amowicz
\cite{10.1007/s00006-010-0240-x} with the express intent of using them in
Clifford Fourier transforms, perhaps generalising the ideas of
Ebling and Scheuermann \cite{10.1109/TVCG.2005.54}.
Finally, in this very brief summary of prior work we mention
that the idea of applying hypercomplex algebras in signal
processing has been studied by other authors apart from those
referenced above.
For an overview see \cite{AlfsmannGocklerSangwineEll:2007}.
{In what follows we concentrate on DFTs in one dimension
for simplicity, returning to the two dimensional case in §\,\ref{twodimdft}.}
{\color{white}+}ection{Matrix formulation of the discrete Fourier transform}
{\color{white}+}ubsection{Matrix form of Euler's formula}
\label{sec:mateuler}
The transform presented in this paper depends on a generalization of
Euler's formula: $\exp i\theta = \cos\theta + i{\color{white}+}in\theta$,
in which the imaginary root of $-1$ is replaced by a matrix root,
that is, a matrix that squares to give a negated identity matrix.
Even among $2\times2$ matrices there is an infinite number of such
roots \cite[p16]{Nahin:2006}.
In the matrix generalization, the exponential must, of course,
be a matrix exponential \cite[\S\,11.3]{Golub:1996}.
{The following Lemma is not claimed to be original but we have not been
able to locate any published source that we could cite here.
Since the result is essential to Theorem~\ref{theorem:matrixdft},
we set it out in full.}
\begin{lemma}
\label{lemma:euler}
Euler's formula may be generalized as follows:
\begin{equation*}
e^{\mathcal{M}at{J}\theta} = \mathcal{M}at{I}\cos\theta + \mathcal{M}at{J}{\color{white}+}in\theta
\end{equation*}
where \mathcal{M}at{I} is an identity matrix, and $\mathcal{M}at{J}^2 = -\mathcal{M}at{I}$.
\end{lemma}
\begin{proof}
The result follows from the series expansions of the matrix exponential
and the trigonometric functions.
From the definition of the matrix exponential \cite[\S\,11.3]{Golub:1996}:
\begin{align*}
e^{\mathcal{M}at{J}\theta} &= {\color{white}+}um_{k=0}^{\infty} \frac{\mathcal{M}at{J}^k\theta^k}{k!} =
\mathcal{M}at{J}^0 + \mathcal{M}at{J}\theta + \frac{\mathcal{M}at{J}^2\theta^2}{2!} +
\frac{\mathcal{M}at{J}^3\theta^3}{3!} + \cdots
\intertext{Noting that $\mathcal{M}at{J}^0=\mathcal{M}at{I}$ (see \cite[Index Laws]{CollinsDictMaths}),
and separating the series into even and odd terms:}
&= \mathcal{M}at{I} - \frac{\mathcal{M}at{I}\theta^2}{2!}
+ \frac{\mathcal{M}at{I}\theta^4}{4!}
- \frac{\mathcal{M}at{I}\theta^6}{6!} + \cdots\\
&\phantom{=} + \mathcal{M}at{J}\theta - \frac{\mathcal{M}at{J}\theta^3}{3!}
+ \frac{\mathcal{M}at{J}\theta^5}{5!}
- \frac{\mathcal{M}at{J}\theta^7}{7!} + \cdots\\
&= \mathcal{M}at{I}\cos\theta + \mathcal{M}at{J}{\color{white}+}in\theta
\end{align*}
\qed
\end{proof}
Note that matrix versions of the trigonometric functions are not
needed to compute the matrix exponential, because $\theta$ is a scalar.
In fact, if the exponential is evaluated numerically using a
matrix exponential algorithm or function, the trigonometric
functions are not even explicitly evaluated \cite[\S\,11.3]{Golub:1996}.
In practice, given that this is a special case of the matrix exponential,
(because $\mathcal{M}at{J}^2=-\mathcal{M}at{I}$),
it is likely to be numerically preferable to evaluate the
trigonometric functions and to sum scaled versions of \mathcal{M}at{I} and \mathcal{M}at{J}.
{Notice that the matrix $e^{\mathcal{M}at{J}\theta}$ has a structure
with the cosine of $\theta$ on the diagonal and the (scaled) sine of $\theta$
where there are non-zero elements of \mathcal{M}at{J}.}
{\color{white}+}ubsection{Matrix form of DFT}
\label{sec:matdft}
The classic discrete Fourier transform of \eqref{eqn:classicdft} may be
generalized to a matrix form in which the signals are vector-valued with
$N$ components each and the root of $-1$ is replaced by an $N\times N$ matrix root
$\mathcal{M}at{J}$ such that {$\mathcal{M}at{J}^2=-\mathcal{M}at{I}$}.
In this form, subject to choosing the correct representation for the matrix
root of $-1$, we may represent a wide variety of complex and hypercomplex
Fourier transforms.
\begin{theorem}\label{theorem:matrixdft}
The following are a discrete Fourier transform pair\footnote{The colon notation used
here will be familiar to users of \textsc{matlab}\textregistered\xspace (an explanation may be found
in \cite[\S\,1.1.8]{Golub:1996}). Briefly, $\mathcal{M}at{f}[:,m]$ means the
$m\textsuperscript{th}$ column of the matrix \mathcal{M}at{f}.}:
\begin{align}
\label{eqn:forward}
\mathcal{M}at{F}[:,u] &= S{\color{white}+}um_{m=0}^{M-1}\exp\left( -\mathcal{M}at{J}\,2\pi\frac{mu}{M}\right)\mathcal{M}at{f}[:,m]\\
\label{eqn:inverse}
\mathcal{M}at{f}[:,m] &= T{\color{white}+}um_{u=0}^{M-1}\exp\left(\phantom{-}\mathcal{M}at{J}\,2\pi\frac{mu}{M}\right)\mathcal{M}at{F}[:,u]
\end{align}
where \mathcal{M}at{J} is a $N\times N$ matrix root of $-1$,
\mathcal{M}at{f} and \mathcal{M}at{F} are $N\times M$ matrices with one sample per column,
and the two scale factors $S$ and $T$ multiply to give $1/M$.
\end{theorem}
\begin{proof}
\mathcal{N}ewcommand{\mathcal{M}}{\mathcal{M}athcal{M}}
The proof is based on substitution of the forward transform \eqref{eqn:forward}
into the inverse \eqref{eqn:inverse} followed by algebraic reduction to a result
equal to the original signal $\mathcal{M}at{f}$.
We start by substituting \eqref{eqn:forward} into
\eqref{eqn:inverse}, replacing $m$ by $\mathcal{M}$ to keep the two indices distinct{,
and at the same time replacing the two scale factors by their product $1/M$}:
\begin{equation*}
\mathcal{M}at{f}[:,m] = {\frac{1}{M}}{\color{white}+}um_{u=0}^{M-1}\left[e^{\mathcal{M}at{J}2\pi\frac{mu}{M}}
{\color{white}+}um_{\mathcal{M}=0}^{M-1}e^{-\mathcal{M}at{J}2\pi\frac{\mathcal{M} u}{M}}\mathcal{M}at{f}[:,\mathcal{M}]\right]
\end{equation*}
The exponential of the outer summation can be moved inside the inner,
because it is constant with respect to the summation index $\mathcal{M}$:
\begin{equation*}
\mathcal{M}at{f}[:,m] = \frac{1}{M}{\color{white}+}um_{u=0}^{M-1}{\color{white}+}um_{\mathcal{M}=0}^{M-1}
e^{\mathcal{M}at{J}\,2\pi\frac{mu}{M}}
e^{-\mathcal{M}at{J}\,2\pi\frac{\mathcal{M} u}{M}}
\mathcal{M}at{f}[:,\mathcal{M}]
\end{equation*}
The two exponentials have the same root of $-1$, namely \mathcal{M}at{J},
and therefore they can be combined:
\begin{equation*}
\mathcal{M}at{f}[:,m] = \frac{1}{M}{\color{white}+}um_{u=0}^{M-1}{\color{white}+}um_{\mathcal{M}=0}^{M-1}
e^{\mathcal{M}at{J}\,2\pi\frac{(m-\mathcal{M})u}{M}}
\mathcal{M}at{f}[:,\mathcal{M}]
\end{equation*}
We now isolate out from the inner summation the case where $m=\mathcal{M}$.
In this case the exponential reduces to an identity matrix, and we have:
\begin{align*}
\mathcal{M}at{f}[:,m] &= \frac{1}{M}{\color{white}+}um_{u=0}^{M-1}\mathcal{M}at{f}[:,m]\\
&+ \frac{1}{M}{\color{white}+}um_{u=0}^{M-1}\left[\left.
{\color{white}+}um_{\mathcal{M}=0}^{M-1}\right|_{\mathcal{M}\mathcal{N}e m}
e^{\mathcal{M}at{J}\,2\pi\frac{(m-\mathcal{M})u}{M}}
\mathcal{M}at{f}[:,\mathcal{M}]\right]
\end{align*}
The first line on the right sums to $\mathcal{M}at{f}[:,m]$, which is the original
signal, as required.
To complete the proof,
we have to show that the second line on the right reduces to zero.
Taking the second line alone,
and changing the order of summation, we obtain:
\[
\left.{\color{white}+}um_{\mathcal{M}=0}^{M-1}\right|_{\mathcal{M}\mathcal{N}e m}
\left[{\color{white}+}um_{u=0}^{M-1}e^{\mathcal{M}at{J}\,2\pi\frac{(m-\mathcal{M})u}{M}}\right]\mathcal{M}at{f}[:,\mathcal{M}]
\]
Using Lemma~\ref{lemma:euler} we now write the matrix exponential
as the sum of a cosine and sine term.
\begin{equation*}
\left.{\color{white}+}um_{\mathcal{M}=0}^{M-1}\right|_{m\mathcal{N}e\mathcal{M}}
\left[
\begin{aligned}
\mathcal{M}at{I}&{\color{white}+}um_{u=0}^{M-1}\cos\left(\!2\pi\frac{(m-\mathcal{M})u}{M}\right)\\
+\mathcal{M}at{J}&{\color{white}+}um_{u=0}^{M-1}{\color{white}+}in\left(\!2\pi\frac{(m-\mathcal{M})u}{M}\right)
\end{aligned}
\right]\mathcal{M}at{f}[:,\mathcal{M}]
\end{equation*}
Since both of the inner summations are sinusoids summed over an
integral number of cyles, they vanish, and this completes the
proof.
\qed
\end{proof}
Notice that the requirement for $\mathcal{M}at{J}^2=-\mathcal{M}at{I}$ is the only
constraint on \mathcal{M}at{J}.
It is not necessary to constrain elements of \mathcal{M}at{J} to be real.
Note that $\mathcal{M}at{J}^2=\mathcal{M}at{-I}$ implies that $\mathcal{M}at{J}^{-1}=-\mathcal{M}at{J}${,}
hence the inverse transform is obtained by negating or inverting the matrix
root of $-1$ (the two operations are equivalent).
The matrix dimensions must be consistent according to the ordering inside the summation.
As written above, for a complex transform represented in matrix
form,
\mathcal{M}at{f} and \mathcal{M}at{F} must have two rows and $M$ columns.
If the exponential were to be placed on the right,
\mathcal{M}at{f} and \mathcal{M}at{F} would have to be transposed,
with two columns and $M$ rows.
It is important to realize that \eqref{eqn:forward} is totally
different to the classical matrix formulation of the discrete Fourier
transform,
as given for example by Golub and Van Loan \cite[\S\,4.6.4]{Golub:1996}.
The classic DFT given in \eqref{eqn:classicdft} can be formulated as a matrix
equation in which a large $M\times M$ {Vandermonde} matrix containing
$n\textsuperscript{th}$ roots of unity
multiplies the signal $f$ expressed as a vector of real or complex values.
Instead,
in \eqref{eqn:forward} each matrix exponential multiplies
\emph{one column} of \mathcal{M}at{f},
corresponding to \emph{one sample} of the signal represented by \mathcal{M}at{f}
and the dimensions of the matrix exponential are set by the dimensionality
of the algebra (2 for complex, 4 for quaternions \textit{etc.}).
In \eqref{eqn:forward} it is the multiplication of the exponential and
the signal samples, dependent on the algebra involved,
that is expressed in matrix form,
not the structure of the transform itself.
Readers who are already familiar with hypercomplex Fourier transforms should note that
the ordering of the exponential within the summation \eqref{eqn:forward}
is not related to the ordering within the hypercomplex formulation of the transform
(which is significant because of non-commutative multiplication).
The hypercomplex ordering can be accommodated within the framework presented
here by changing the representation of the matrix root of $-1$,
in a non-trivial way, shown for the quaternion case by
Ickes \cite[Equation 10]{Ickes:1970} and called \emph{transmutation}.
We have studied the generalisation of Ickes' transmutation to the case
of Clifford algebras, and it appears {that there is a more general}
operation. {In the cases we have studied this can be}
described as negation of the off-diagonal elements of the
lower-right sub-matrix, excluding the first row and column\footnote{This
gives the same result as transmutation in the quaternion case.}.
We believe a more general result is known in Clifford algebra
but we have not been able to locate a clear statement that we could cite.
We therefore leave this for later work, as a full generalisation to Clifford algebras of
arbitrary dimension requires further work, and is more appropriate to
a more mathematical paper.
{\color{white}+}ection{Examples in specific algebras}
\label{sec:examples}
In this section we present the information necessary for \eqref{eqn:forward}
and \eqref{eqn:inverse} to be verified numerically.
In each of the cases below, we present an example root of $-1$ and
a matrix representation{\footnote{{The matrix representations of
roots of -1 are not unique -- a transpose of the matrix, for example, is
equally valid. The operations that leave the square of the matrix invariant
probably correspond to fundamental operations in the hypercomplex algebra, for
example negation, conjugation, reversion.}}}.
We include in the Appendix a short \textsc{matlab}\textregistered\xspace function for computing
the transform in \eqref{eqn:forward}.
The same code will compute the inverse by negating \mathcal{M}at{J}.
This may be used to verify the results in the next section and to
compare the results obtained with the classic complex FFT.
In order to verify the quaternion or biquaternion results,
the reader will need to install the QTFM toolbox \cite{qtfm}{,
or use some other specialised software for computing with quaternions.}
{\color{white}+}ubsection{Complex algebra}
\label{sec:complex}
The $2\times2$ real matrix
$\left(
\begin{smallmatrix}
0 & -1\\
1 & \phantom{-}0
\end{smallmatrix}
\right)$
can be easily verified by eye to be a square root of the negated identity matrix
$\left(\begin{smallmatrix}-1 & \phantom{-} 0\\ \phantom{-} 0 &-1\end{smallmatrix}\right)$,
and it is easy to verify numerically
that Euler's formula gives the same numerical results in the classic complex
case and in the matrix case for an arbitrary $\theta$.
This root of $-1$ is based on the well-known isomorphism between a complex number
$a+j b$ and the matrix representation
$\left(\begin{smallmatrix}a & -b\\b & \phantom{-}a\end{smallmatrix}\right)$
\cite[Theorem 1.6]{Ward:1997}\footnote{We have used the transpose of Ward's
representation for consistency with the quaternion and biquaternion representations
in the two following sections.}.
The structure of a matrix exponential $e^{\mathcal{M}at{J}\theta}$ using the above matrix
for \mathcal{M}at{J} is
$\left(
\begin{smallmatrix}
C & -S\\
S & \phantom{-}C
\end{smallmatrix}
\right)$ where $C=\cos\theta$ and $S={\color{white}+}in\theta$.
{\color{white}+}ubsection{Quaternion algebra}
\label{sec:quaternion}
The quaternion roots of $-1$ were discovered by Hamilton
\cite[pp\,203, 209]{Hamiltonpapers:V3:7}, and consist of all
unit pure quaternions, that is quaternions of the form $x\i+y\j+z\k$
subject to the constraint $x^2+y^2+z^2=1$.
A simple example is the quaternion $\boldsymbol\mathcal{M}u=(\i+\j+\k)/{\color{white}+}qrt{3}$,
which can be verified by hand to be a square root of $-1$ using the usual
rules for multiplying the quaternion basis elements ($\i^2=\j^2=\k^2=\i\j\k=-1$).
Using the isomorphism with $4\times4$ matrices given by Ward \cite[\S\,2.8]{Ward:1997},
between the quaternion
$w+x\i+y\j+z\k$ and the matrix:
\begin{align*}
&
\begin{pmatrix}
w & -x & -y & -z\\
x & \phantom{-}w & -z & \phantom{-}y\\
y & \phantom{-}z & \phantom{-}w & -x\\
z & -y & \phantom{-}x & \phantom{-}w
\end{pmatrix}
\intertext{we have the following matrix representation:}
\boldsymbol\mathcal{M}u = \frac{1}{{\color{white}+}qrt{3}}
&
\begin{pmatrix}
\mathcal{N}ewcommand{{\color{white}+}}{{\color{white}+}}
0 & -1 & -1 & -1\\
1 & \phantom{-}0 & -1 & \phantom{-}1\\
1 & \phantom{-}1 & \phantom{-}0 & -1\\
1 & -1 & \phantom{-}1 & \phantom{-}0
\end{pmatrix}
\end{align*}
Notice the structure that is apparent in this matrix: the
$2\times2$ blocks on the leading diagonal {at the top left and bottom right}
can be recognised as roots of $-1$ in the complex algebra as shown in \S\,\ref{sec:complex}
\begin{proposition}
\label{prop:quatroot}
Any matrix of the form:
\[
\begin{pmatrix}
0 & -x & -y & -z\\
x & \phantom{-}0 & -z & \phantom{-}y\\
y & \phantom{-}z & \phantom{-}0 & -x\\
z & -y & \phantom{-}x & \phantom{-}0
\end{pmatrix}
\]
with $x^2+y^2+z^2=1$ is the square root of a negated $4\times4$ identity matrix.
Thus the matrix representations of the quaternion roots of $-1$ are all
roots of the negated $4\times4$ identity matrix.
\end{proposition}
\begin{proof}
The matrix is anti-symmetric, and the inner product of the $i\textsuperscript{th}$
row and $i\textsuperscript{th}$ column is $-x^2-y^2-z^2$, which is $-1$
because of the stated constraint.
Therefore the diagonal elements of the square of the matrix are $-1$.
Note that the rows of the matrix have one or three negative values,
whereas the columns have zero or two.
The product of the $i\textsuperscript{th}$ row with the $j\textsuperscript{th}$
column, $i\mathcal{N}e j$, is the sum of two values of opposite sign and equal magnitude.
Therefore all off-diagonal elements of the square of the matrix are zero.
\qed
\end{proof}
The structure of a matrix exponential $e^{\mathcal{M}at{J}\theta}$ using a matrix as in
Proposition \ref{prop:quatroot} for \mathcal{M}at{J} is:
\[
\begin{pmatrix}
\phantom{x}C & -xS & -yS & -zS\\
xS & \phantom{-x}C & -zS & \phantom{-}yS\\
yS & \phantom{-}zS & \phantom{-x}C & -xS\\
zS & -yS & \phantom{-}xS & \phantom{-x}C
\end{pmatrix}
\]
where, as before, $C=\cos\theta$ and $S={\color{white}+}in\theta$.
{\color{white}+}ubsection{Biquaternion algebra}
The biquaternion algebra \cite[Chapter 3]{Ward:1997}
(quaternions with complex elements)
can be handled exactly as in the previous section, except
that the $4\times4$ matrix representing the root of $-1$ must
be complex (and the signal matrix must have {four}
complex elements {per column}).
The set of square roots of $-1$ in the biquaternion algebra
is given in \cite{10.1007/s00006-006-0005-8}.
A simple example is $\i+\j+\k+\I(\j-\k)$ (where \I denotes the
classical complex root of $-1$, that is the biquaternion has
real part $\i+\j+\k$ and imaginary part $\j-\k$).
Again, this can be verified by hand to be a root of $-1$
and its matrix representation is:
\[
\begin{pmatrix}
0 & -1 & -1 - \I & -1 + \I\\
1 & \phantom{-}0 & -1 + \I & \phantom{-}1 + \I\\
1 +\I & \phantom{-}1 -\I & 0 & -1\\
1 -\I & -1 -\I & 1 & \phantom{-}0
\end{pmatrix}
\]
Again, sub-blocks of the matrix have recognizable structure.
The {upper left and lower right} diagonal $2\times2$ blocks are roots of
$-1$, while the {lower left and upper right} off-diagonal {$2\times2$}
blocks are nilpotent -- that is their square vanishes.
{\color{white}+}ubsection{Clifford algebras}
Recent work by Hitzer and Ab{\l}amowicz \cite{10.1007/s00006-010-0240-x}
has explored the roots of $-1$ in Clifford algebras \clifford{p}{q} up to those
with $p+q = 4$, which are 16-dimensional algebras\footnote{$p$ and $q$ are
non-negative integers such that $p+q=n$ and $n\ge1$. The dimension of the
algebra (strictly the dimension of the space spanned by the basis
elements of the algebra) is $2^n$ .}.
The derivations of
the roots of -1 for the 16-dimensional algebras are long and difficult.
Therefore, for the moment, we confine the discussion here to lower-order
algebras, noting that, since all Clifford algebras are isomorphic to a
matrix algebra, we can be assured that if roots of -1 exist, they must
have a matrix representation.
Using {the results obtained by Hitzer and Ab{\l}amowicz},
and by finding from first principles
the layout of a real matrix isomorphic to a Clifford
multivector in a given algebra, it has been possible to
verify that the transform formulation presented in this
paper is applicable to at least the lower order Clifford
algebras.
The quaternions and biquaternions are isomorphic to the
Clifford algebras \clifford{0}{2} and \clifford{3}{0}
respectively so this is not surprising. Nevertheless, it is
an important finding, because until now quaternion and
Clifford Fourier transforms were defined in different
ways, using different terminology, and it was difficult
to make comparisons between the two.
Now, with the matrix exponential formulation,
it is possible to handle quaternion and Clifford transforms
(and indeed transforms in different Clifford algebras) within
the same algebraic and/or numerical framework.
We present examples here from two of the 4-dimensional
Clifford algebras, namely \clifford{1}{1} and \clifford{2}{0}.
These results have been verified against the \textsc{CLICAL}\xspace
package \cite{CLICAL-User-manual} to ensure that the
multiplication rules have been followed correctly and
that the roots of $-1$ found by Hitzer and Ab{\l}amowicz
are correct.
Following the notation in \cite{10.1007/s00006-010-0240-x},
we write a multivector in \clifford{1}{1} as
$\alpha + b_1 e_1 + b_2 e_2 + \beta e_{12}$,
where $e_1^2=+1, e_2^2=-1, e_{12}^2=+1$ and $e_1 e_2 = e_{12}$.
A possible real matrix representation is as follows:
\[
\begin{pmatrix}
\alpha & \phantom{-}b_1 & -b_2 & \beta\\
b_1 & \phantom{-}\alpha & -\beta & b_2\\
b_2 & -\beta & \phantom{-}\alpha & b_1\\
\beta & -b_2 & \phantom{-}b_1 & \alpha
\end{pmatrix}
\]
In this algebra, the constraints on the coefficients
of a multivector for it to be a root of $-1$ are as
follows: $\alpha=0$ and $b_1^2-b_2^2+\beta^2=-1$
\cite[Table 1]{10.1007/s00006-010-0240-x}\footnote{We have
re-arranged the constraint compared to \cite[Table 1]{10.1007/s00006-010-0240-x}
to make the comparison with the quaternion case easier:
we see that the signs of the squares of the coefficients
match the signs of the squared basis elements.}.
Choosing $b_1=\beta=1$ gives $b_2={\color{white}+}qrt{3}$ and thus
$e_1 + {\color{white}+}qrt{3}e_2 + e_{12}$ which can be verified
algebraically or in \textsc{CLICAL}\xspace to be a root of $-1$.
The corresponding matrix is then:
\[
\begin{pmatrix}
0 & \phantom{-}1 & -{\color{white}+}qrt{3} & 1\\
1 & \phantom{-}0 & -1 & {\color{white}+}qrt{3}\\
{\color{white}+}qrt{3} & -1 & \phantom{-}0 & 1\\
1 & -{\color{white}+}qrt{3} & \phantom{-}1 & 0
\end{pmatrix}
\]
Following the same notation in algebra \clifford{2}{0},
in which $e_1^2=e_2^2=+1, e_{12}^2=-1$, a possible
matrix representation is:
\[
\begin{pmatrix}
\alpha & \phantom{-}b_1 & b_2 & -\beta\\
b_1 & \phantom{-}\alpha & \beta & -b_2\\
b_2 & -\beta & \alpha & \phantom{-}b_1\\
\beta & -b_2 & b_1 & \phantom{-}\alpha
\end{pmatrix}
\]
The constraints on the coefficients are $\alpha=0$ and
$b_1^2+b_2^2-\beta^2=-1$,
and choosing $b_1=b_2=1$ gives $\beta={\color{white}+}qrt{3}$ and thus
$e_1 + e_2 + {\color{white}+}qrt{3}e_{12}$ is a root of $-1$.
The corresponding matrix is then:
\[
\begin{pmatrix}
0 & \phantom{-}1 & 1 & -{\color{white}+}qrt{3}\\
1 & \phantom{-}0 & {\color{white}+}qrt{3} & -1\\
1 & -{\color{white}+}qrt{3} & 0 & \phantom{-}1\\
{\color{white}+}qrt{3} & -1 & 1 & \phantom{-}0
\end{pmatrix}
\]
Notice that in both of these algebras the matrix representation
of a root of $-1$ is very similar to that given for the quaternion
case in Proposition~\ref{prop:quatroot}, with zeros on the
leading diagonal, an odd number of negative values in each
row and an even number in each column.
It is therefore simple to see that minor modifications to
Proposition~\ref{prop:quatroot} would cover these algebras
and the matrices presented above.
{\color{white}+}ection{An example not based on a specific\\algebra}
\label{sec:mystery}
We show here using an arbitrary $2\times2$ matrix root of $-1$,
that it is possible to define a Fourier transf{or}m
{without a specific} algebra.
Let an arbitrary real matrix be given as
$J = \left(\begin{smallmatrix}a & b\\c & d\end{smallmatrix}\right)$,
then by brute force expansion of $J^2=-{I}$ we find
the original four equations reduce to but two independent equations.
Picking $(a,b)$ and solving for the remaining coefficients we find that
any matrix of the form:
\begin{equation*}
\begin{pmatrix}a & \phantom{-} b\\ -(1+a^2)/b &-a\end{pmatrix}
\end{equation*}
with finite $a$ and $b$, and $b\mathcal{N}e0$, is a root of $-1$. Choosing instead $(a,c)$ we get the transpose form:
\begin{equation*}
\begin{pmatrix}a & -(1+a^2)/c \\ c &-a\end{pmatrix}
\end{equation*}
where $c\mathcal{N}e0$.
Choosing the cross-diagonal terms $(b,c)$ yields:
\begin{equation}
\label{eqn:ellipseroot}
{
\begin{pmatrix}
\pm\kappa & b\\
c & \mathcal{M}p\kappa
\end{pmatrix}
}
\end{equation}
{where $\kappa={\color{white}+}qrt{ -1 - bc}$ and} $bc\leq-1$.
In all cases the resulting matrix has eigenvalues of $\lambda = \pm i$.
(This is a direct consequence of the fact that this matrix squares to $-1$.)
Each form, however, has different eigenvectors.
The standard matrix {representation} for the complex operator {$i$ is}
{$\left(\begin{smallmatrix}0 & -1\\1 &\phantom{-}0\end{smallmatrix}\right)$}
{with}
eigenvectors $v = [1,\pm\,i]$.
In the matrix with $(a,b)$ parameters the eigenvectors are $v = [1,-b/(a\,\pm\,i)]$
whereas the cross-diagonal form with $(b,c)$ parameters has eigenvectors
{$v = [1,(\kappa\,\pm\,i)/c]$}.
These forms suggest the interesting question:
which algebra, if any, applies here\footnote{It is possible that
there is no corresponding `algebra' in the usual sense.
Note that there are only two Clifford algebras of dimension 2,
one of which is the algebra of complex numbers.
The other has no multivector roots of -1 \cite[\S\,4]{10.1007/s00006-010-0240-x}
and therefore the roots of $-1$ given above cannot be a root of $-1$ in any
Clifford algebra.}; and how can the Fourier coefficients (the `spectrum') be interpreted?
{We are not able to answer the first question in this paper.
The `interpretation' of the spectrum is relatively simple.
Consider a spectrum $\mathcal{M}at{F}$ containing only one non-zero column at index $u_0$
with value $\left(\begin{smallmatrix}x\\y\end{smallmatrix}\right)$ and invert this
spectrum using \eqref{eqn:inverse}.
Ignoring the scale factor, the result will be the signal:
\[
\mathcal{M}at{f}[:,m] = \exp\left(\mathcal{M}at{J}\,2\pi\frac{mu_0}{M}\right)\begin{pmatrix}x\\y\end{pmatrix}
\]
The form of the matrix exponential depends on \mathcal{M}at{J}.
In the classic complex case, as given in \S\,\ref{sec:complex},
the matrix exponential, as already seen, takes the form:
\[
\begin{pmatrix}
\cos\theta & - {\color{white}+}in\theta\\
{\color{white}+}in\theta & \phantom{-}\cos\theta
\end{pmatrix}
\]
where $\theta=2\pi\frac{mu_0}{M}$.
}
This is a rotation matrix and it
maps a real unit vector $\left(\begin{smallmatrix}1\\0\end{smallmatrix}\right)$
to a point on a circle in the complex plane.
It embodies the standard \emph{phasor} concept associated with sinusoidal functions.
Using the same analysis, this time using the matrix in \eqref{eqn:ellipseroot} above,
one obtains for the matrix exponential the `phasor' operator:
\[
\left(
\begin{array}{@{}rr@{}}
\cos\theta + \kappa{\color{white}+}in\theta & b{\color{white}+}in\theta\\
c{\color{white}+}in\theta & \cos\theta - \kappa{\color{white}+}in\theta
\end{array}
\right)
\]
Instead of mapping a real unit vector
$\left(\begin{smallmatrix}1\\0\end{smallmatrix}\right)$
to a {point on a circle},
this matrix maps to an ellipse.
Thus, we see that a transform based on a matrix such as that
in \eqref{eqn:ellipseroot} has basis functions that are projections of an
elliptical, rather than a circular path in the complex plane, as in the
classical complex Fourier transform.
We refer the reader to a discussion on a similar point for the one-sided quaternion
discrete Fourier transform in our own 2007 paper \cite[\S\,VI]{10.1109/TIP.2006.884955},
in which we showed that the quaternion coefficients of the Fourier spectrum
also represent elliptical paths through the space of the signal samples.
{It is possible that the matrices discussed in this section could be
transformed by similarity transformations into matrices representing elements
of a Clifford algebra\footnote{{We are grateful to Dr Eckhard Hitzer for pointing
this out, in September 2010.}}.
Note that in the quaternion case, any root of -1
lies on the unit sphere in 3-space,
and can therefore be transformed into another root of -1 by a rotation.
It is possible that the same applies in other algebras,
the transformation needed being dependent on the geometry.}
Clearly there are interesting issues to be studied here,
and further work to be done.
{\color{white}+}ection{Non-existence of transforms in algebras with odd dimension}
\label{sec:nonexist}
In this section we show that there are no real matrix roots of $-1$
with odd dimension.
This is not unexpected, since the existence of such roots would {suggest}
the existence of a hypercomplex algebra of odd dimension.
The significance of this result is to show that there is no
discrete Fourier transform
as formulated in Theorem \ref{theorem:matrixdft}
for an algebra of dimension $3$, which is
of importance for the processing of signals representing physical 3-space
quantities, or the values of colour image pixels.
We thus conclude that the choice of quaternion Fourier transforms or
a Clifford Fourier transform of dimension $4$ is inevitable in these
cases.
This is not an unexpected conclusion, nevertheless,
in the experience of the authors, some researchers in signal and image
processing hesitate to accept the idea of using four dimensions to
handle three-dimensional samples or pixels. (This is despite the rather
obvious parallel of needing two dimensions -- complex numbers -- to
represent the Fourier coefficients of a real-valued signal or image.)
\begin{theorem}
There are no $N\times N$ matrices $\mathcal{M}at{J}$ with real elements such that
$\mathcal{M}at{J}^2=-\mathcal{M}at{I}$ for odd values of $N$.
\end{theorem}
\begin{proof}
The determinant of a diagonal matrix is the product of its diagonal entries.
Therefore $|-\mathcal{M}at{I}|=-1$ for odd $N$.
Since the product of two determinants is the determinant of the product,
$|\mathcal{M}at{J}^2|=-1$ requires $|\mathcal{M}at{J}|^2=-1$, which cannot
be satisfied if $\mathcal{M}at{J}$ has real elements.
\qed
\end{proof}
{\color{white}+}ection{Extension to two-sided DFTs}
\label{twodimdft}
There have been various definitions of two sided hypercomplex
Fourier transforms and DFTs.
We consider here only one case
to demonstrate that the approach presented in this
paper is applicable to two-sided as well as one-sided transforms: this is
a matrix exponential Fourier transform based on
Ell's original two-sided two-dimensional
quaternion transform
\cite[Theorem 4.1]{Ell:thesis}, \cite{Ell:1993}, \cite{10.1049/el:19961331}.
A more general formulation is:
\begin{equation}
\mathcal{M}at{F}[u, v] = S\!{\color{white}+}um_{m=0}^{M-1}{\color{white}+}um_{n=0}^{N-1}\!
e^{-\mathcal{M}at{J} 2\pi\frac{mu}{M}}\mathcal{M}at{f}[m,n]e^{-\mathcal{M}at{K} 2\pi\frac{nv}{N}}
\label{eqn:twodforward}
\end{equation}
\begin{equation}
\mathcal{M}at{f}[m, n] = T\!{\color{white}+}um_{u=0}^{M-1}{\color{white}+}um_{v=0}^{N-1}\!
e^{+\mathcal{M}at{J} 2\pi\frac{mu}{M}}\mathcal{M}at{F}[u,v]e^{+\mathcal{M}at{K} 2\pi\frac{nv}{N}}
\label{eqn:twodinverse}
\end{equation}
in which \emph{each element} of the two-dimensional arrays \mathcal{M}at{F}
and \mathcal{M}at{f} is a {square} matrix representing a
complex or hypercomplex number using
a matrix isomorphism for the algebra in use,
for example the representations already given in \S\,\ref{sec:quaternion}
in the case of the quaternion algebra;
the two scale factors multiply to give $1/MN$,
and \mathcal{M}at{J} and \mathcal{M}at{K} are matrix
representations of two \emph{arbitrary}
roots of $-1$ in the chosen algebra.
(In Ell's original formulation, the roots of $-1$
were $\j$ and $\k$, that is two of the \emph{orthogonal} quaternion basis
elements. The following theorem shows that there is no requirement for
the two roots to be orthogonal in order for the transform to invert.)
\begin{theorem}
\label{theorem:twodmatrixdft}
The transforms in \eqref{eqn:twodforward} and \eqref{eqn:twodinverse}
are a two-dimensional discrete Fourier transform pair,
{provided that $\mathcal{M}at{J}^2 = \mathcal{M}at{K}^2 = -\mathcal{M}at{I}$.}
\end{theorem}
\begin{proof}
\mathcal{N}ewcommand{\mathcal{M}}{\mathcal{M}athcal{M}}
\mathcal{N}ewcommand{\mathcal{N}}{\mathcal{M}athcal{N}}
The proof follows the same scheme as the proof of Theorem~\ref{theorem:matrixdft},
but we adopt a more concise presentation to fit the available column space.
We start by substituting \eqref{eqn:forward} into
\eqref{eqn:inverse}, replacing $m$ and $n$ by $\mathcal{M}$ and $\mathcal{N}$ respectively
to keep the indices distinct:
\begin{align*}
\mathcal{M}at{f}[m, n] &= {\frac{1}{M N}}{\color{white}+}um_{u=0}^{M-1}{\color{white}+}um_{v=0}^{N-1}
e^{\mathcal{M}at{J} 2\pi\frac{mu}{M}}\\
&\times\left[{\color{white}+}um_{\mathcal{M}=0}^{M-1}{\color{white}+}um_{\mathcal{N}=0}^{N-1}
e^{-\mathcal{M}at{J} 2\pi\frac{\mathcal{M} u}{M}}
\mathcal{M}at{f}[\mathcal{M},\mathcal{N}]
e^{-\mathcal{M}at{K} 2\pi\frac{\mathcal{N} v}{N}}\right]\\
&\times
e^{\mathcal{M}at{K} 2\pi\frac{nv}{N}}
\end{align*}
The scale factors can be moved outside both summations,
and replaced with their product $1/M N$;
and the exponentials of the outer summations can be moved inside the inner,
because they are constant with respect to the summation indices $\mathcal{M}$ and $\mathcal{N}$.
At the same time, adjacent exponentials with the same root of $-1$ can be merged.
With these changes{, and omitting the scale factor to save space},
the right-hand side of the equation becomes:
\begin{equation*}
{\color{white}+}um_{u=0}^{M-1}{\color{white}+}um_{v=0}^{N-1}
{\color{white}+}um_{\mathcal{M}=0}^{M-1}{\color{white}+}um_{\mathcal{N}=0}^{N-1}
e^{\mathcal{M}at{J} 2\pi\frac{(m-\mathcal{M})u}{M}}
\mathcal{M}at{f}[\mathcal{M},\mathcal{N}]
e^{\mathcal{M}at{K} 2\pi\frac{(n-\mathcal{N})v}{N}}
\end{equation*}
We now isolate out from the inner pair of summations the case where $\mathcal{M}=m$ and $\mathcal{N}=n$.
In this case the exponentials reduce to identity matrices, and we have:
\begin{equation*}
\frac{1}{MN}{\color{white}+}um_{u=0}^{M-1}{\color{white}+}um_{v=0}^{N-1}\mathcal{M}at{f}[m,n]
\end{equation*}
This sums to $\mathcal{M}at{f}[m,n]$, which is the original two-dimensional signal,
as required.
To complete the proof we have to show that the rest of the summation,
excluding the case $\mathcal{M}=m$ and $\mathcal{N}=n$, reduces to zero.
Dropping the scale factor, and changing the order of summation,
we have the following inner double summation:
\begin{equation*}
{\color{white}+}um_{u=0}^{M-1}{\color{white}+}um_{v=0}^{N-1}
e^{\mathcal{M}at{J} 2\pi\frac{(m-\mathcal{M})u}{M}}
\mathcal{M}at{f}[\mathcal{M},\mathcal{N}]
e^{\mathcal{M}at{K} 2\pi\frac{(n-\mathcal{N})v}{N}}
\end{equation*}
Noting that the first exponential and \mathcal{M}at{f} are independent
of the second summation index $v$, we can move them outside
the second summation (we could do similarly with the exponential
on the right and the first summation):
\begin{equation*}
{\color{white}+}um_{u=0}^{M-1}
e^{\mathcal{M}at{J} 2\pi\frac{(m-\mathcal{M})u}{M}}
\mathcal{M}at{f}[\mathcal{M},\mathcal{N}]
{\color{white}+}um_{v=0}^{N-1}
e^{\mathcal{M}at{K} 2\pi\frac{(n-\mathcal{N})v}{N}}
\end{equation*}
and, as in Theorem \ref{theorem:matrixdft},
the summation on the right is over an integral number of cycles
of cosine and sine, and therefore vanishes.
\qed
\end{proof}
Notice that it was not necessary to assume that \mathcal{M}at{J} and \mathcal{M}at{K}
were orthogonal: it is sufficient that each be a root of $-1$.
This has been verified numerically using the two-dimensional code
given in the Appendix.
{\color{white}+}ection{Discussion}
We have shown that any discrete Fourier transform in an algebra
that has a matrix representation, can be formulated in the way
shown here.
This includes the complex, quaternion, biquaternion, and Clifford
algebras (although we have demonstrated only certain cases of
Clifford algebras, we believe the result holds in general).
{This observation provides a theoretical unification of
diverse hypercomplex DFTs.}
Several immediate possibilities for further work, as well as
ramifications, now suggest themselves.
Firstly,
the study of roots of $-1$ is accessible from the matrix
representation as well as direct representation in whatever
algebra is employed for the transform.
All of the results obtained so far in hypercomplex algebras,
and known to the authors \cite[pp\,203, 209]{Hamiltonpapers:V3:7},
\cite{10.1007/s00006-006-0005-8,10.1007/s00006-010-0240-x},
were achieved by working \emph{in the algebra} in question,
that is by algebraic manipulation of quaternion,
biquaternion or Clifford multivector values.
An alternative approach would be to work in the equivalent
matrix algebra, but this seems difficult even for the lower
order cases.
Nevertheless, it merits further study
because of the possibility of finding a systematic approach
that would cover many algebras in one framework.
Following the reasoning in \S\,\ref{sec:mystery},
it is possible to define matrix roots of $-1$ that appear not
to be isomorphic to any Clifford or quaternion algebra,
and these merit further study.
Secondly, the matrix formulation presented here lends itself
to analysis of the structure of the transform, including
possible factorizations for fast algorithms, as well as
parallel or vectorized implementations for single-instruction,
multiple-data (\textsc{simd}) processors, and of course,
factorizations into multiple complex FFTs as has been done
for quaternion FFTs (see for example \cite{SangwineEll:2000b}).
In the case of matrix roots of $-1$ which do
not correspond to Clifford or quaternion algebras, analysis of
the structure of the transform may give insight into possible
applications of transforms based on such roots.
Finally, at a practical level, hypercomplex transforms implemented
{directly} in hypercomplex arithmetic are likely to be much
faster than any implementation based on matrices,
but the simplicity of the matrix exponential formulation
{discussed in this paper},
and the fact that it can be computed using standard real or complex
matrix arithmetic, {\emph{without using a hypercomplex library},}
means that the matrix exponential formulation provides a very
simple reference implementation which can be used for
verification of {the correctness of} hypercomplex code.
{This is an important point, because verification of the
correctness of hypercomplex FFT code is otherwise non-trivial.
Verification of inversion is simple enough, but establishing that
the spectral coefficients have the correct values is much less so.
}
\appendix
{\color{white}+}ection{\textsc{matlab}\textregistered\xspace code}
We include here two short \textsc{matlab}\textregistered\xspace functions
for computing the forward transform given in \eqref{eqn:forward},
and \eqref{eqn:twodforward},
\emph{apart from the scale factors}. The inverses
can be computed simply by interchanging the input and output
and negating the matrix roots of $-1$.
Neither function is coded for speed, on the contrary the coding
is intended to be simple and easily verified against the equations.
\begin{alltt}
{\color{blue}function} F = matdft(f, J)
M = size(f, 2);
F = zeros(size(f));
{\color{blue}for} m = 0:M-1
{\color{blue}for} u = 0:M-1
F(:, u + 1) = F(:, u + 1) {\color{blue}...}
+ expm(-J .* 2 .* pi .* m .* u./M) {\color{blue}...}
* f(:, m + 1);
{\color{blue}end}
{\color{blue}end}
\end{alltt}
\begin{alltt}
{\color{blue}function} F = matdft2(f, J, K)
A = size(J, 1);
M = size(f, 1) ./ A;
N = size(f, 2) ./ A;
F = zeros(size(f));
{\color{blue}for} u = 0:M-1
{\color{blue}for} v = 0:N-1
{\color{blue}for} m = 0:M-1
{\color{blue}for} n = 0:N-1
F(A*u+1:A*u+A, A*v+1:A*v+A) = {\color{blue}...}
F(A*u+1:A*u+A, A*v+1:A*v+A) + {\color{blue}...}
expm(-J .* 2*pi .* m .* u./M) {\color{blue}...}
* f(A*m+1:A*m+A, A*n+1:A*n+A) {\color{blue}...}
* expm(-K .* 2*pi .* n .* v./N);
{\color{blue}end}
{\color{blue}end}
{\color{blue}end}
{\color{blue}end}
\end{alltt}
\mathcal{N}ocite{Hamilton:1848}
\end{document} |
\begin{document}
\title{Comparison of quantum discord and relative entropy in some bipartite quantum systems }
\author{M.~Mahdian}
\altaffiliation{
Author to whom correspondence should be addressed; electronic
mail: mahdian@tabrizu.ac.ir}
\affiliation{
Faculty of Physics, Theoretical and astrophysics department , University of Tabriz, 51665-163 Tabriz, Iran}
\author{M. B. ~Arjmandi}
\affiliation{
Faculty of Physics, Theoretical and astrophysics department , University of Tabriz, 51665-163 Tabriz, Iran}
\begin{abstract}
The study of quantum correlations in High-dimensional bipartite systems is crucial for the development of quantum computing.
We propose relative entropy as a distance measure of correlations may be measured by means of the distance from the quantum state to the closest classical-classical state. In particular, We establish relations between relative entropy and quantum discord quantifiers obtained by means
of orthogonal projection measurements. We show that for symmetrical X-states density matrices the quantum discord is equal to relative entropy. At the end of paper, various examples of X-states such as two-qubit and qubit-qutrit have been demonstrated.
\end{abstract}
\pacs{03.67.Mn 03.65.Ta }
\maketitle
\section{Introduction}
The main goal of quantum information theory is quantifying and
describing quantum processes and rely on quantum correlations
[1,2,3,4]. These correlations are
essential resources in information and computation sciences and
different measures have been put forward for it. Entanglement as the
cornerstone of correlation measures has an effective role in
information processing and has applications in the quantum
computing, cryptography, superdense coding [5,6]
and has been used to quantify quantum teleportation
\cite{Bennett2,Oh}. Apart from entanglement, quantum states can
exhibit other correlations not present in classical systems and it could be a new resource for quantum computation. In order to quantify quantum correlations, the suitable measure is so called the
quantum discord, introduced by Olliver and Zurek \cite{zurek} and
also by Henderson and Vedral \cite{Vedral} independently. Over the past decade, quantum discord has received a lot of attention and many studies performed and articles written about that \cite{ Li, Lang,
Mahdian, Luo, Girolami1, Daki, Girolami2, Saif, Shunlong}. For pure states, quantum discord reduces to entanglement but has a nonzero
value for some mixed separable states and define as the discrepancy between total correlation and classical correlation. However, for mixed quantum states, evaluation of quantum discord is based on minimization procedures over all possible positive operator valued measures (POVM), or von Neumann measurements that can be performed on the subsystems and thus it is somewhat difficult to calculate even numerically.\\
Quite recently, a few analytical results of quantum discord including especially the case of two qubits states such as rank-2 states \cite{shi} and Bell-diagonal \cite{Luo} have been obtained. In addition, for a rather limited set of two-qubit states, the so-called X states, an analytical formula of quantum discord is proposed by Ali et al \cite{mazhar}. We know that quantum discord measures the amount of information that
cannot be obtained by performing the measurement on one subsystem alone and state after measurement is conditional state. But, in this paper we consider some density matrices that after performing measurement over all the subsystems are conditional density matrix that are classical-classical state (which means that right and left quantum discord are equal) . So, for these kind of quantum states quantum discord is equal to relative entropy and calculation of relative entropy would be too easy.\\
\emph{Quantum discord}. The classical mutual information $I(A:B)$ for two discrete random variables A and B, is defined as $I(A:B)=H(A)+H(B)-H(A,B)$. Here, $H(p)=-\sum_{i}p_{i} \log p_{i} $ denotes the shannon entropy of the proper distribution \cite{Nielsen}. For a classical
probability distribution, Bayes' rule: $p(a_{i},b_{j})=p(a_{i}|b_{j})p(b_{j})=p(b_{j}|a_{i})p(a_{i})$,
leads to an equivalent definition of the mutual information as
$I(A:B)=H(A)-H(A|B)$.\\
For a given quantum density matrix of a composite system $\rho_{AB}$ , the total amount of correlations, including classical and quantum correlations, is quantified by the quantum mutual information as
\begin{equation}
I(\rho_{AB})=S(\rho_{A})+S(\rho_{B})-S(\rho_{AB}),
\end{equation}
where $S(\rho) = -Tr(\rho log \rho)$ denotes the von Neumann entropy of the relevant state and suppose A and B share a quantum state $\rho_{AB}\in
\cal{H}_{A}\otimes \cal{H}_{B}$.
Assume that we perform a set of local projective measurements (von Neumann measurements)
$\{\Pi^{(j)}_{B}=|j_B\rangle\langle j_B|\}$ on subsystem B .
The measurements will disturb subsystem B and the whole system AB simultaneously.
If the measurement is taken over all
possible complete set of von Neumann projective measurement (one-dimensional orthogonal projectors), described
by ${\{\Pi^{j}_{B}}\}$, corresponding to outcomes j, on subsystem B the resulting
state is given by the shared ensemble $\{\rho_{A|i}, P_{i}\}$, where $ \rho_{A|i} $ is conditional density matrix of bipartite system :
\begin{equation}
\rho_{A|i}=\frac{1}{p_{i}}{(I_A\otimes\Pi^{j}_{B}) \rho_{AB} (I_A\otimes\Pi^{j}_{B})},
\end{equation}
and after taking partial trace over subsystem B, the result is state of subsystem A in form :
\begin{equation}
\rho_{A|i}=\frac{1}{p_{i}}\texttt{Tr}_B\{(I_A\otimes\Pi^{j}_{B}) \rho_{AB} (I_A\otimes\Pi^{j}_{B})\},
\end{equation}
\begin{equation}
P_{i}=\texttt{Tr} (\Pi^{j}_{B}\rho_{AB}\Pi^{j}_{B}),
\end{equation}
with $I_A$ being the identity matrix of subsystem A
and $\texttt{Tr}_{B}$ denotes the partial trace of over subsystem B.
As an example for the set of measurment, for the state of two qubits
\begin{equation}
\Pi^{1}=\frac{1}{2} (I+\sum_{j} n_{j} \sigma_{j}),
\end{equation}
\begin{equation}
\Pi^{2}=\frac{1}{2} (I-\sum_{j} n_{j} \sigma_{j}),
\end{equation}
that $ \sigma_{j} $ are the Pauli matrices and $ \widehat{n} $ is the Bloch sphere eigen vectors :
\begin{equation}
\widehat{n}=(\widehat{n}_{x}, \widehat{n}_{y}, \widehat{n}_{z})=(sin\theta cos\phi, sin\theta sin\phi, cos\theta) .
\end{equation}
A quantum analogue of the conditional entropy can then be defined as
$S_{\{\Pi^{j}_{B}\}}(A|B)\equiv\sum_{i}P_{i}S(\rho_{A|i})$ and an
alternative version of the quantum mutual information can now be defined as $J_{\{\Pi^{j}_{B}\}}(A|B)=
S(\rho_{A} )- S_{\{\Pi^{j}_{B}\}}(A|B)$ where $\rho_{A} = \texttt{Tr}_{B}(\rho)$ and $\rho_{B} = \texttt{Tr}_{A}(\rho)$
are the reduced density matrix of subsystem A and subsystem B. The
above quantity depends on the selected set of von Neumann measurements or a suitable set of orthogonal projectors ${\{\Pi^{j}_{B}}\}$ . To get all the classical correlations present
in $\rho_{AB}$, we maximize $J_{\{\Pi^{j}_{B}\}}(\rho_{AB})$, over
all ${\{\Pi^{j}_{B}}\}$
\begin{equation}\label{122}
J (\rho_{AB})= Max_{\{\Pi^{j}_{B}\}}\{S(\rho_{A})-S_{\{\Pi^{j}_{B}\}}(A|B)\}.
\end{equation}
Then, quantum discord on subsystem B is defined (right quantum
discord) as:
$$D_{R}(\rho_{AB})=I(\rho_{AB})-J(\rho_{AB})$$
\begin{equation}
=S(\rho_{B})-S(\rho_{AB})+Min_{{\{\Pi^{j}_{B}}\}}S_{\{\Pi_{j}\}}(A|B).
\end{equation}
If the measurement is taken over all possible POVMs
${\{\Pi^{j}_{A}}\}$ on subsystem A, the resulting state is given by
the shared ensemble $\{\rho_{B|i}, P_{i}\}$, where $ \rho_{B|i} $ is conditional density matrix of bipartite system :
\begin{equation}
\rho_{B|i}=\frac{1}{p_{i}}{(\Pi^{j}_{A}\otimes I_B) \rho_{AB} (\Pi^{j}_{A}\otimes I_B)},
\end{equation}
and by taking partial trace over subsystem A , the result is state of subsystem B :
\begin{equation}
\rho_{B|i}=\frac{1}{p_{i}}\texttt{Tr}_A\{(\Pi^{j}_{A}\otimes I_B) \rho_{AB} (\Pi^{j}_{A}\otimes I_B)\},
\end{equation}
\begin{equation}
P_{i}=\texttt{Tr}(\Pi^{j}_{A}\rho_{AB}\Pi^{j}_{A}),
\end{equation}
with $I_B$ is the identity matrix of subsystem B
and $\texttt{Tr}_{A}$ denotes the partial trace of over subsystem A and similar to above relation, classical correlation and quantum discord
on subsystem A (left quantum discord) defined as
\begin{equation}\label{222}
J (\rho_{AB})= Max_{\{\Pi^{j}_{A}\}}\{S(\rho_{B})-S_{\{\Pi^{j}_{A}\}}(A|B)\},
\end{equation}
and
\begin{equation}\label{QD}
D_{L}(\rho_{AB})=S(\rho_{A})-S(\rho_{AB})+Min_{{\{\Pi^{j}_{A}}\}}S_{\{\Pi^{j}_{A}\}}(B|A).
\end{equation}
It has been shown that $D_{R}(\rho)$, $D_{L}(\rho)$ are always
non-negative and is not symmetric, i.e.
$D_{R}(\rho)\neq D_{L}(\rho)$ in general \cite{zurek}.\\
As mentioned above, for quantifying correlations we need to apply optimization over POVM measures to extract all of classical correlations which is a nontrivial task \cite{Nielsen, Brandt, Sandor}. Therefore, it is difficult to calculate quantum discord in the general case
since the optimization should be taken. In this article, for some bipartite density matrices in SU(N) algebra (i.e. X-states), we will see that density matrix of composite subsystems A and B after von Neumann measurements is conditional density matrix of bipartite system that is classical-classical state and for these X-states right and left quantum discord are equal. Therefor, the set of measurement will be complete and our states change in a classical state, so we just extract classical correlation from it. Maximization of $J(\rho_{AB})$ captures the maximum classical correlation that can
be extracted from the system, and whatever extra correlation that may remain is the quantum correlation. So, for these bipartite quantum systems, we can use relative entropy of discord instead of quantum discord.\\
The organization of this paper is as follows. In Sec. II we explain
relative entropy of discord and reveal the relation between quantum
discord and relative entropy of discord. In Sec. III, we give an
explanation about SU(N) algebra and general form of the density
matrix are obtained. In Sec. IV, we perform our investigation on two-qubit states and established relations. In Sec. V, we perform our
inquiry on qubit-qutrit states and got the same results. Finally, we
summarize our results in Sec. VI.
\section{Relative Entropy Of Discord}
The relative entropy is a non-negative and appropriate measure of distance
between two arbitary states, which
is defined as \cite{Modi3}
\begin{equation}
S(\rho\|\gamma)=\texttt{Tr}(\rho log_2\rho-\rho log_2\gamma).
\end{equation}
By using the consept of relative entropy, we can define the Geometric Discord (GD) as the minimum of distance between closest classical-classical state and the state of bipartite system
\begin{equation}
GD_{rel}(\rho_{AB})=Min_{\chi\in \cal{C}}S(\rho_{AB}\|\chi),
\end{equation}
which $\chi $ belongs to set of classical states ($\cal{C}$) and minimum is taken over all possible states $\chi$. \\
Modi et al. have showed in [29] that definition of relative entropy represented by equation (15) can be replaced with
\begin{equation}
S(x\|y)=S(y)-S(x).
\end{equation}
So we will have
$$GD_{rel}(\rho_{AB})=S(\rho_{AB}\|\chi)=S(\chi_{\rho_{AB}})-S(\rho_{AB})=$$
\begin{equation}\label{REL}
\texttt{Tr}(\rho_{AB}\log\rho_{AB}-\rho_{AB}\log{\chi_{\rho_{AB}}}).
\end{equation}
Moreover the closest classical-classical state is defined by
\begin{equation}
\chi_{\rho_{AB}}=\sum_{j}(\Pi^j_{A}\otimes\Pi^j_{B})\rho_{AB}(\Pi^j_{A}\otimes\Pi^j_{B}),
\end{equation}
where $ \Pi^j $ is the von Neumann projective measurment which acts on subsystems A and B.
Now, it can be shown that after von Neumann measurment the result will be conditional state as the equations (2) and (10) and for this kind of density matrix e.g. X-state, it is classical-classical state. So for these bipartite systems, the optimization problem over measurements that used for computing the quantum discord in equation (14) can be turn into minimization of distance between the density matrix of bipartite system and closest classical-classical state. Then we can use the geometric discord instead of quantum discord for these density matrices.
Let the projection measurements $\Pi^{1}_{A,B}$ and $\Pi^{2}_{A,B}$ effect on subsystems A and B. After apply the measurements, the conditional states will be
\begin{equation}
\rho_{A|i}=\sum_{i}\frac{(\mathbb{I}\otimes \Pi^{i}_B)\rho_{AB}(\mathbb{I}\otimes \Pi^{i}_B)}{\texttt{Tr} (\Pi^{i}_B\rho_{AB}\Pi^{i}_{B})},
\end{equation}
\begin{equation}
\rho_{B|i}=\sum_{i}\frac{(\Pi^{i}_A\otimes \mathbb{I})\rho_{AB}(\Pi^{i}_A\otimes \mathbb{I})}{\texttt{Tr} (\Pi^{i}_{A}\rho_{AB}\Pi^{i}_{A})}.
\end{equation}
We have investigated for these bipartite systems that considered here, after projective measurements on subsystems we get conditional states
$$\rho_{A|i}=\rho_{A|\Pi^{1}_B}+\rho_{A|\Pi^{2}_B}=\chi_{\rho_{AB}},$$
and also
$$\rho_{B|i}=\rho_{B|\Pi^{1}_A}+\rho_{B|\Pi^{2}_A}=\chi_{\rho_{AB}},$$
and here
$$\texttt{Tr}(\Pi^{i}_{B}\rho_{AB}\Pi^{i}_{B})=\texttt{Tr}(\Pi^{i}_{A}\rho_{AB}\Pi^{i}_{A})=Tr(\chi_{\rho_{AB}})=1. $$
So, with compression of equations (\ref{REL}) and (\ref{QD}), after some calculation for these bipartite quantum states(X-state) we get
\begin{equation}
S(\chi_{\rho_{AB}})=S(\rho_{B})+Min_{{\{\Pi^{j}}\}}S_{\{\Pi^{j}\}}(B|A).
\end{equation}
So we have
$$D_{R,L}=S(\rho_{B})+Min_{{\{\Pi^{j}}\}}S_{\{\Pi^{j}\}}(B|A)-S(\rho_{AB})$$
\begin{equation}\label{re-dis}
=S(\chi_{\rho_{AB}})-S(\rho_{AB})=GD_{rel}.
\end{equation}
We apply this method for various examples of X-states such as on two qubits and calculate the quantum discord for these quantum states and show that the result is equal with results of previous papers, especially with Mazhar Ali's work in reference [11].\\
One of the other measure of quantum correlation is the quantum deficit which is defined as difference between the work or information of total system and information of subsystems after effect the LOCC operations to localization of information \cite{Modi3}. It is categorized such zero, one and two-way deficit that are different in type of interaction between subsystems. The zero-way quantum deficit is quantified as minimum of distance between the state of system and classical-classical state
\begin{equation}
\Delta=Min_{\Pi_{a},\Pi_{b}}(S(\chi_{\rho_{AB}})-S(\rho_{AB})),
\end{equation}
where $ \chi_{\rho_{AB}} $ is classical-classical state that represented in equation (19). So by these explanations, the zero-way quantum deficit is equal to minimum of relative entropy.
\section{SU(N) Description}
In this section, we show Hermitian operator on a discrete
N-dimensional Hilbert space $\cal{H}$ versus generators of the
SU(N) algebra \cite{Schlienz}. To obtain the generators of the SU(N)
algebra, introduce a set of N projection operators as follows:
\begin{equation}
\widehat{P}_{jk}=|j\rangle\langle k|,
\end{equation}
where $|n\rangle$ are the orthonormalized eigenstates of the linear
Hermitian operator. We can make $N^{2}-1$ operators with
\begin{equation}
\widehat{U}_{jk}=\widehat{P}_{jk}+\widehat{P}_{kj},
\end{equation}
\begin{equation}
\widehat{V}_{jk}=-i(\widehat{P}_{jk}-\widehat{P}_{kj}),
\end{equation}
\begin{equation}
\widehat{W}_{l}=\sqrt{\frac{2}{l(l+1)}}(P_{11}+\cdots+P_{ll}-lP_{l+1,l+1}),
\end{equation}
where \,$1\leq j<k\leq N$\,\,\,,\,\,\,$1\leq l\leq N-1$,
the set of the resulting operators are given by
\begin{equation}
\{\widehat{\lambda}_{j}\}=\{\widehat{U}_{jk}\}\cup\{\widehat{V}_{jk}\}\cup\{\widehat{W}_{l}\},
\end{equation}
$$\{j=1,2,...,N^{2}-1\},$$
the matrices $\{\widehat{\lambda}_{j}\}$ are called generalized Pauli
matrices or SU(N) generators and density matrix for this algebra is
represented by
\begin{equation}
\rho=\frac{1}{N}\mathbb{I}+\frac{1}{2}\sum_{j=1}^{N^{2}-1}\lambda_{j}\widehat{\lambda}_{j}.
\end{equation}
They also satisfy the following relations:
$$Tr(\widehat{\lambda}_{i}\widehat{\lambda}_{j})=2\delta_{ij},$$
$$S_{j}=Tr\{\widehat{\lambda}_{j}\rho\},$$
$$Tr\{\widehat{\lambda}_{j}\}=0.$$
For a bipartite system with states $\rho_{AB}\in\cal{H_A}\otimes \cal{H_B},$ $dim H_A=d_{A}$ and $dim H_B= d_{B},$ density matrix is shown in
Fano form \cite{Fano} as
\begin{equation}
\rho_{AB}=\frac{1}{d_{A}d_{B}}(\mathbb{I}_{A}\otimes\mathbb{I}_{B}+
\sum^{N^{2}-1}_{i=1}\alpha_{i}\widehat{\lambda}_{i}^{A}\otimes\mathbb{I}_{B}+
\sum_{j=1}^{N^{2}-1}\beta_{j}\mathbb{I}_{A}\otimes\widehat{\lambda}_{j}^{B}
\end{equation}
\begin{equation}
+\sum_{i=1}^{N^{2}-1}\sum_{j=1}^{N^{2}-1}\gamma_{ij}\widehat{\lambda}_{i}^{A}\otimes\widehat{\lambda}_{j}^{B}).
\end{equation}
Closest classical-classical state with projection operators $P_{k}=|k\rangle\langle k|$ is given with
$$\chi_{\rho_{(AB)}}=\sum_{k}(P_{k}^{A}\otimes P_{k}^{B})\rho_{AB}
(P_{k}^{A}\otimes
P_{k}^{B})=\frac{1}{d_{A}d_{B}}(\mathbb{I}_{A}\otimes\mathbb{I}_{B}$$
$$+\sum_{k=1}^{N}\sum_{i=1}^{N^{2}-1}\alpha_{i}(P_{k}^{A}\lambda_{i}^{A}P_{k}^{A})\otimes\mathbb{I}_{B}+
\sum_{k=1}^{N}\sum_{j=1}^{N^{2}-1}\beta_{j}\mathbb{I}_{A}\otimes(P_{k}^{B}\lambda_{j}^{B}P_{k}^{B})$$
\begin{equation}
+\sum_{k=1}^{N}\sum_{i=1}^{N^{2}-1}\sum_{j=1}^{N^{2}-1}\gamma_{ij}
(P_{k}^{A}\lambda_{i}^{A}P_{k}^{A})\otimes(P_{k}^{B}\lambda_{j}^{B}P_{k}^{B}),
\end{equation}
where after calculation takes the form:
\begin{equation}
\chi_{\rho_{AB}}=\sum_{A,B}(|k_{A}k_{B}\rangle\langle
k_{A}k_{B}|)\rho_{AB}(|k_{A}k_{B}\rangle\langle k_{A}k_{B}|),
\end{equation}
by applying the projection operators we obtain
\begin{equation}
P_{k}\{\widehat{V}_{jk}\}P_{k}=0,
\end{equation}
\begin{equation}
P_{k}\{\widehat{U}_{jk}\}P_{k}=0,
\end{equation}
\begin{equation}
P_{k}\{\widehat{W}_{l}\}P_{k}\neq0.
\end{equation}
Density matrix of projective measurements on the subsystem A for
density matrix Eq. (21) is
$$\rho_{B|k_1}=\sum_{k}(P_{k}^{A}\otimes
\mathbb{I})\rho_{AB}(P_{k}^{A}\otimes\mathbb{I})=\frac{1}{d_{A}d_{B}}(\mathbb{I}_{A}\otimes\mathbb{I}_{B}$$
$$+\sum_{k=1}^{N}\sum_{i=1}^{N^{2}-1}\alpha_{i}(P_{k}^{A}\lambda_{i}^{A}P_{k}^{A})\otimes\mathbb{I}_{B}
+\sum_{k=1}^{N}\sum_{j=1}^{N^{2}-1}\beta_{j}\mathbb{I}_{A}\otimes
\lambda_{j}^{B}$$
\begin{equation}
+\sum_{k=1}^{N}\sum_{i=1}^{N^{2}-1}\sum_{j=1}^{N^{2}-1}\gamma_{ij}(P_{k}^{A}\lambda_{i}^{A}P_{k}^{A})\otimes
\lambda_{j}^{B}),
\end{equation}
with considering Eqs. (25, 26, 27) we will gain
$$\rho_{B|k_1}=\frac{1}{d_{A}d_{B}}(\mathbb{I}_{A}\otimes\mathbb{I}_{B}$$
$$+\sum_{k=1}^{N}\sum_{i=1}^{N^{2}-1}\alpha_{i}(P_{k}^{A}\{\widehat{W}_{l}\}P_{k}^{A})\otimes\mathbb{I}_{B}
+\sum_{k=1}^{N}\sum_{j=1}^{N^{2}-1}\beta_{j}\mathbb{I}_{A}\otimes
\lambda_{j}^{B}$$
\begin{equation}
+\sum_{k=1}^{N}\sum_{i=1}^{N^{2}-1}\sum_{j=1}^{N^{2}-1}\gamma_{ij}(P_{k}^{A}\{\widehat{W}_{l}\}P_{k}^{A})\otimes
\lambda_{j}^{B}).
\end{equation}
Density matrix of projective measurements on the subsystem B for
density matrix Eq. (22) is
$$\rho_{A|k_2}=\sum_{k}(\mathbb{I}\otimes P_{k}^{B})\rho_{AB}(\mathbb{I}\otimes
P_{k}^{B})=\frac{1}{d_{A}d_{B}}(\mathbb{I}_{A}\otimes\mathbb{I}_{B}$$
$$+\sum_{k=1}^{N}\sum_{i=1}^{N^{2}-1}\alpha_{i}\lambda_{i}^{A}\otimes\mathbb{I}_{B}+
\sum_{k=1}^{N}\sum_{j=1}^{N^{2}-1}\beta_{j}\mathbb{I}_{A}\otimes
(P_{k}^{B}\lambda_{j}^{B}P_{k}^{B})$$
\begin{equation}
+\sum_{k=1}^{N}\sum_{i=1}^{N^{2}-1}\sum_{j=1}^{N^{2}-1}
\gamma_{ij}\lambda_{i}^{A}\otimes(P_{k}^{B}\lambda_{j}^{B}P_{k}^{B})),
\end{equation}
with considering Eqs. (25, 26, 27) we will get
$$\rho_{A|k_2}=\frac{1}{d_{A}d_{B}}(\mathbb{I}_{A}\otimes\mathbb{I}_{B}$$
$$+\sum_{k=1}^{N}\sum_{i=1}^{N^{2}-1}\alpha_{i}\lambda_{i}^{A}\otimes\mathbb{I}_{B}+
\sum_{k=1}^{N}\sum_{j=1}^{N^{2}-1}\beta_{j}\mathbb{I}_{A}\otimes
(P_{k}^{B}\{\widehat{W}_{l}\}P_{k}^{B})$$
\begin{equation}
+\sum_{k=1}^{N}\sum_{i=1}^{N^{2}-1}\sum_{j=1}^{N^{2}-1}\gamma_{ij}\lambda_{i}^{A}
\otimes(P_{k}^{B}\{\widehat{W}_{l}\}P_{k}^{B})).
\end{equation}
The computation of quantum discord is dependent to optimization of measurment. In the next sections, two examples of X-state density matrix as Two-Qubit and Qubit-Qutrit have been presented. We show that optimization of measurment can be replaced by minimum of distance between the state of bipartite system and its classical-classical state. Also we are following to extend this method to higher bipartite systems.
\section{Two-Qubit density matrices}
As the first example, we investigate two qubits state which we frequently encounter in condensed matter systems, quantum dynamic, etc. and apply our achievements. The general form of two qubits density matrix is given by
\begin{equation}\label{density}
\rho_{AB}=\frac{1}{4}(\mathbb{I}_{2}\otimes
\mathbb{I}_{2}+\sum_{i=1}^{3}\alpha_{i}\sigma_{i}\otimes
\mathbb{I}_{2}+\sum_{i=1}^{3}\beta_{i}\mathbb{I}_{2}\otimes
\sigma_{i}+\sum_{i,j=1}^{3}\gamma_{ij}\sigma_{i}\otimes\sigma_{j}),
\end{equation}
where $\alpha_{i}, \beta_{i}, \gamma_{ij}\in\mathbb{R},$\ and
$\sigma_{i}$ $(i=1,2,3)$ are three Pauli matrices and $\mathbb{I}$
is identity matrix. For this density matrix, closest
classical-classical state according Eq.(25) calculate as follow
$$\chi_{\rho_{AB}}=\frac{1}{4}(\mathbb{I}_{2}\otimes\mathbb{I}_{2}
+\sum_{k_{A}=1}^{2}\sum_{i=1}^{3}\alpha_{i}(|k_{A}\rangle\langle
k_{A}|\sigma_{i}^{A}|k_{A}\rangle\langle
k_{A}|)\otimes\mathbb{I}_{2}+$$
$$\sum_{k_{B}=1}^{2}\sum_{j=1}^{3}\beta_{j}\mathbb{I}_{2}\otimes
(|k_{B}\rangle\langle k_{B}|\sigma_{j}^{B}|k_{B}\rangle\langle
k_{B}|)+$$
\begin{equation}
\sum_{k_{A}=1}^{2}\sum_{k_{B}=1}^{2}\sum_{i,j=1}^{3}\gamma_{ij}(|k_{A}\rangle\langle
k_{A}|\sigma_{i}^{A}|k_{A}\rangle\langle
k_{A}|)\otimes(|k_{B}\rangle\langle
k_{B}|\sigma_{j}^{B}|k_{B}\rangle\langle k_{B}|).
\end{equation}
With refer to equations (2) and (10) and apply the measurments on $ \rho_{AB} $, the conditional states obtain as
$$
\rho_{B|k_1}=\frac{1}{4}(\mathbb{I}_{2}\otimes\mathbb{I}_{2}+
\sum_{k_{A}=1}^{2}\sum_{i=1}^{3}\alpha_{i} (|k_{A}\rangle\langle
k_{A}|\sigma_{i}^{A}|k_{A}\rangle\langle
k_{A}|)\otimes\mathbb{I}_{2}$$
\begin{equation}+\sum_{j=1}^{3}\beta_{j}\mathbb{I}_{2}\otimes
\sigma_{j}^{B}+\sum_{k_{A}=1}^{2}\sum_{i=1}^{3}\sum_{j=1}^{3}\gamma_{ij}(|k_{A}\rangle\langle
k_{A}|\sigma_{i}^{A}|k_{A}\rangle\langle k_{A}|)\otimes
\sigma_{j}^{B}),
\end{equation}
and
$$\rho_{A|k_2}=\frac{1}{4}(\mathbb{I}_{A}\otimes\mathbb{I}_{B}
+\sum_{i=1}^{3}\alpha_{i}\sigma_{i}^{A}\otimes\mathbb{I}_{B}$$
$$+\sum_{k_{B}=1}^{2}\sum_{j=1}^{3}\beta_{j}\mathbb{I}_{A}\otimes
(|k_{B}\rangle\langle k_{B}|\sigma_{j}^{B}|k_{B}\rangle\langle
k_{B}|)$$
\begin{equation}
+\sum_{k_{B}=1}^{2}\sum_{i=1}^{3}\sum_{j=1}^{3}\gamma_{ij}\sigma_{i}^{A}\otimes(|k_{B}\rangle\langle
k_{B}|\sigma_{j}^{B}|k_{B}\rangle\langle k_{B}|)).
\end{equation}
We choose the measurment in eigenbasis of $ \sigma_z $ i.e
$$|k_{A}\rangle\langle k_{A}|= |0\rangle\langle 0|,$$
and
$$|k_{B}\rangle\langle k_{B}|= |1\rangle\langle 1|.$$
In this paper, we consider X-state density matrix which because of the visual appearance of these density matrices look like to letter X, then are called by this name. By effect the mentioned measurments in equation (40) it becomes X-state by following conditions
$$\alpha_{1}=\alpha_{2}=\beta_{1}=\beta_{2}=0,$$
$$\gamma_{31} =\gamma_{13}=\gamma_{32}=\gamma_{23}= 0$$
So density matrix for two qubits in form X-state
obtains as
\begin{equation}
\rho_{AB}=\left(
\begin{array}{cccc}
\rho_{11}&0&0&\rho_{14}\\
0&\rho_{22}&\rho_{23}&0\\
0&\rho_{32}&\rho_{33}&0\\
\rho_{41}&0&0&\rho_{44}\\
\end{array}
\right),
\end{equation}
where
$$\rho_{11}=1+\gamma_{33}+\alpha_{3}+\beta_{3},$$
$$\rho_{22}=1-\gamma_{33}+\alpha_{3}-\beta_{3},$$
$$\rho_{33}=1-\gamma_{33}-\alpha_{3}+\beta_{3},$$
$$\rho_{44}=1+\gamma_{33}-\alpha_{3}-\beta_{3},$$
$$\rho_{14}=\rho^\ast_{41}=\gamma_{11}-i\gamma_{12}-i\gamma_{21}-\gamma_{22},$$
$$\rho_{23}=\rho^\ast_{32}=\gamma_{11}+i\gamma_{12}-i\gamma_{21}+\gamma_{22},$$
and also $ \sum_{i}{\rho_{ii}}=1 $. By apply the measurment in equation (32), closest classical-classical state will be
\begin{equation}
\chi_{\rho_{AB}}=\left(
\begin{array}{cccc}
\rho_{11}&0&0&0\\
0&\rho_{22}&0&0\\
0&0&\rho_{33}&0\\
0&0&0&\rho_{44}\\
\end{array}
\right).
\end{equation}
Moreover the conditional states represented by equations (42) and (43) are
\begin{equation}
\rho_{A|i}=\rho_{A|1}+\rho_{A|2}=\chi_{\rho_{AB}},
\end{equation}
and also
\begin{equation}
\rho_{B|i}=\rho_{B|1}+\rho_{B|2}=\chi_{\rho_{AB}}.
\end{equation}
The von Neumann entropy of $ \chi_{\rho_{AB}} $ is
\begin{equation}
S(\chi_{\rho_{AB}})=-\sum \rho_{ii}\log_{2}\rho_{ii},
\end{equation}
In the other hand $ S_{\Pi^j}(B|A) $ will be [11]
\begin{equation}
S_{\Pi^{j}}(B|A)=-\frac{1+\delta_{z}}{2} log \frac{1+\delta_{z}}{2}
- \frac{1-\delta_{z}}{2} log \frac{1-\delta_{z}}{2},
\end{equation}
where $ \delta_{z} = |(\rho_{11}+\rho_{44})-(\rho_{22}+\rho_{33})| $ and also the state of subsystems A and B respectively is
\begin{equation}
\rho_{A}=\left(
\begin{array}{cccc}
\rho_{11}+\rho_{22}&0\\
0&\rho_{33}+\rho_{44}\\
\end{array}
\right),
\end{equation}
\begin{equation}
\rho_{B}=\left(
\begin{array}{cccc}
\rho_{11}+\rho_{33}&0\\
0&\rho_{22}+\rho_{44}\\
\end{array}
\right),
\end{equation}
then von Neumann entropy of $ \rho_{B} $ obtains as
\begin{equation}
S(\rho_{B})=-((\rho_{11}+\rho_{33}) log (\rho_{11}+\rho_{33}) + (\rho_{22}+\rho_{44}) log (\rho_{22}+\rho_{44})).
\end{equation}
By simplify these equations we get
\begin{equation}
S(\rho_B)+S_{\{\Pi^{j}_{B}\}}(B|A)=-\sum \rho_{ii}\log_{2}\rho_{ii}=S(\chi_{\rho_{AB}}).
\end{equation}
So
$$D(\rho_{AB})=S(\rho_B)+S_{\{\Pi^{j}_{B}\}}(B|A)-S(\rho_{AB})$$
\begin{equation}=S(\chi_{\rho_{AB}})-S(\rho_{AB})=GD(\rho_{AB}).
\end{equation}
It can be shown that if $ \delta_{x} $ be the optimal value, so we choose the von Neumann measurment in the eigenbasis of $ \sigma_{x} $ i.e. $ |k_{A}\rangle=\frac{|0\rangle+|1\rangle}{\sqrt{2}} $ , $|k_{B}\rangle=\frac{|0\rangle-|1\rangle}{\sqrt{2}} $ and as well as for $ \delta_{y} $ i.e. $ |k_{A}\rangle=\frac{|0\rangle+i|1\rangle}{\sqrt{2}} $ , $|k_{B}\rangle=\frac{|0\rangle-i|1\rangle}{\sqrt{2}} $
, and the results is same to Mazhar Ali et al. [11].
\section{Qubit-Qutrit states}
As the second example, we have generalized our relations for Qubit-Qutrit,
including $\rho_{AB}\in\cal{H_A}\otimes \cal{H_B},$ $dim H_A=2$ and $dim H_B=3$. So,
density matrix versus SU(N) algebra can be represented as
\begin{equation}
\rho=\frac{1}{6}(\mathbb{I}_{2}\otimes\mathbb{I}_{3}+
\sum_{i=1}^{3}\alpha_{i}\sigma_{i}\otimes\mathbb{I}_{3}+\sum_{i}^{8}\sqrt{3}
\beta_{i}\mathbb{I}_{2}\otimes\lambda_{i}
+\sum_{i}^{3}\sum_{j}^{8}\gamma_{ij}\sigma_{i}\otimes\lambda_{j}),
\end{equation}
where $\alpha_{i}, \beta_{i}, \gamma_{ij} \in \mathbb{R},\,
\sigma_{i} (i=1,2,3)$, are three Pauli matrices and $\lambda_{i}
(i=1,\cdots,8)$, are Gell mann matrices and $\mathbb{I}$ is identity
matrix. We applied the conditions until matrix comes in the form
X-state. These conditions are
as $\{\alpha_{3};\beta_{3};\gamma_{33};\gamma_{38};\gamma_{24};\gamma_{14};\gamma_{25};\gamma_{15}\}\neq0,$ and other coefficients are equal to zero. So, density matrix can be written as
$$
\rho_{AB}=\frac{1}{6}(\mathbb{I}_{2}\otimes\mathbb{I}_{3}+
\alpha_{3}\sigma_{3}\otimes\mathbb{I}_{3}+\sqrt{3}
\beta_{3}\mathbb{I}_{2}\otimes\lambda_{3}+\gamma_{33}\sigma_{3}\otimes\lambda_{3}
$$
\begin{equation}\label{density2}
+\gamma_{38}\sigma_{3}\otimes\lambda_{8}+
\gamma_{24}\sigma_{2}\otimes\lambda_{4}+\gamma_{14}\sigma_{1}\otimes\lambda_{4}+\gamma_{25}\sigma_{2}\otimes\lambda_{5}+\gamma_{15}\sigma_{1}\otimes\lambda_{5}).
\end{equation}
By using Eq. (\ref{REL}) relative entropy of discord for this density matrix
Eq. (\ref{density2}) is equal to
\begin{equation}\label{relt1}
D_{rel}(\rho)=\sum_{i=1}^{6}-(\Phi_{i}\log_{2}\Phi_{i}+\Psi_{i}\log_{2}\Psi_{i}),
\end{equation}
where
$$\Phi_{1,2}=\frac{1}{6}(1-2\beta_{8}\pm \alpha_{3}\mp\frac{2\gamma_{38}}{\sqrt{3}}),$$
$$\Phi_{3,4}=\frac{1}{6}(1+\sqrt{3}\beta_{3}+\beta_{8}\mp \alpha_{3}\mp \gamma_{33}\mp\frac{\gamma_{38}}{\sqrt{3}}),$$
$$\Phi_{5,6}=\Psi_{5,6}=\frac{1}{6}
(1-\sqrt{3}\beta_{3}+\beta_{8}-\alpha_{3}\pm
\gamma_{33}\mp\frac{\gamma_{38}}{\sqrt{3}}),$$
$$\Psi_{1,2}=\frac{1}{36}(6+
3\sqrt{3}\beta_{3}-3\beta_{8}+3\gamma_{33}+3\sqrt{3}\gamma_{38}\pm$$
$$\sqrt{3}[9\beta_{3}^{2}+27\beta_{8}^{2}+36\beta_{8}\alpha_{3}+12\alpha_{3}^{2}+
12((\gamma_{15}+\gamma_{24})^{2}+(\gamma_{14}-\gamma_{25})^{2})$$
$$+3\gamma_{33}^{2}+6\beta_{3}
(3\sqrt{3}\beta_{8}+2\sqrt{3}\alpha_{3}+\sqrt{3}\gamma_{33}-\gamma_{38})$$
$$-6\sqrt{3}\beta_{8}\gamma_{38}-4\sqrt{3}\alpha_{3}\gamma_{38}+\gamma_{38}^{2}+2\gamma_{33}
(9\beta_{8}+6\alpha_{3}-\sqrt{3}\gamma_{38})]^{\frac{1}{2}}),$$
$$\Psi_{3,4}=\frac{1}{36}(6 +
3\sqrt{3}\beta_{3}-3\beta_{8}-3\gamma_{33}-3\sqrt{3}\gamma_{38}\pm$$
$$\sqrt{3}[9\beta_{3}^{2}+27\beta_{8}^{2}-36\beta_{8}\alpha_{3}+12\alpha_{3}^{2}+
12((\gamma_{15}-\gamma_{24})^{2}+(\gamma_{14}+\gamma_{25})^{2})$$
$$+3\gamma_{33}^{2}+6\beta_{3}
(3\sqrt{3}\beta_{8}-2\sqrt{3}\alpha_{3}-\sqrt{3}\gamma_{33}+\gamma_{38})$$
$$+6\sqrt{3}\beta_{8}\gamma_{38}-4\sqrt{3}\alpha_{3}\gamma_{38}+\gamma_{38}^{2}+2\gamma_{33}
(-9\beta_{8}+6\alpha_{3}-\sqrt{3}\gamma_{38})]^{\frac{1}{2}}).$$
It can be seen that the result Eq.(\ref{relt1}) is equal to the result is
obtained for quantum discord qubit-qutrit density matrix. Here we consider the set of measurment in eigenbasis of $ S_{z} $ . To
better illustrate the results for $2\times3$ matrices we consider
the following example \cite{Karpat,kapil,Mazhar}\\
$\rho=\frac{p}{2}(|00\rangle\langle00|+|01\rangle\langle01|+|00\rangle\langle12|+
|11\rangle\langle11|+|12\rangle\langle12|+$
\begin{equation}
|12\rangle\langle00|)+\frac{1 -
2p}{2}(|02\rangle\langle02|+|02\rangle\langle10| +
|10\rangle\langle02|+|10\rangle\langle10|),
\end{equation}
where classical correlation $\chi_{\rho}$ are obtained as follows.
\begin{equation}
\chi_{\rho}=\frac{1}{2} \left(
\begin{array}{cccccc}
p&0&0&0&0&0\\
0&p&0&0&0&0\\
0&0&1-2p&0&0&0\\
0&0&0&1-2p&0&0\\
0&0&0&0&p&0\\
0&0&0&0&0&p\\
\end{array}
\right),
\end{equation}
and we have
\begin{equation}
S(\chi_{\rho_{AB}})=1-2p\log_{2}p-(1-2p)\log_{2}(1-2p).
\end{equation}
\section{Conclusions}
In this paper, we have investigated an analytical method of quantum
discord for some bipartite quantum systems. We represent with
orthogonal projective measurement on the subsystems, the resulting matrix will be classical-classical state and set of measurements will be complete. Thus, for these states we obtain after measurement, the optimization over orthogonal
projective measurements can be turn into minimization of distance between
the state of bipartite system and its closest
classical-classical state. This
means that the relative entropy of discord can be replaced with
quantum discord and we have justified our claim with examples that
have mentioned above. We are going to extend this method for case of high bipartite systems in future.
\section{Acknowledgments}
This work is published as a part of research project supported by the university of
Tabriz research affairs office.
\end{document} |
{\beta}gin{document}
\title[Identities involving Stirling numbers of types $B$ and $D$]{Some identities involving second kind Stirling numbers of types $B$ and $D$}
\thanks{This research was supported by a grant from the Ministry of Science and Technology, Israel, and the France's Centre National pour la Recherche Scientifique (CNRS)}
\author{Eli Bagno, Riccardo Biagioli and David Garber}
\address{Eli Bagno, Jerusalem College of Technology\\
21 Havaad Haleumi St. Jerusalem, Israel}
\email{bagnoe@g.jct.ac.il}
\address{Riccardo Biagioli\\
Institut Camille Jordan, Universit\'e Claude Bernard Lyon 1 \\
69622 Villeurbanne Cedex, France}
\email{biagioli@math.univ-lyon1.fr}
\address{David Garber \\
Department of Applied Mathematics, Holon Institute of Technology,
52 Golomb St., PO Box 305, 58102 Holon, Israel, and (sabbatical:) Einstein Institute of Mathematics, Hebrew University of Jerusalem, Jerusalem, Israel }
\email{garber@hit.ac.il}
\date{\today}
\textcolor{magenta}ketitle
{\beta}gin{abstract}
Using Reiner's definition of Stirling numbers of the second kind in types $B$ and $D$, we generalize two well-known identities concerning the classical Stirling numbers of the second kind. The first identity relates them with Eulerian numbers and the second identity interprets them as entries in a transition matrix between the elements of two standard bases of the polynomial ring $\textcolor{magenta}thbb{R}[x]$. Finally, we generalize these identities to the group of colored permutations $G_{m,n}$.
\end{abstract}
\section{Introduction}
The {\em Stirling number of the second kind}, denoted $S(n,k)$, is defined as the number of partitions of the set $[n]:=\{1,\dots,n\}$ into $k$ non-empty subsets (see \cite[page 81]{EC1}). Stirling numbers of the second kind arise in a variety of problems in enumerative combinatorics; they have many combinatorial interpretations, and have been generalized in various contexts and in different ways.
In the geometric theory of Coxeter groups they appear as follows. For any finite Coxeter group $W$, there is a corresponding hyperplane arrangement $\textcolor{magenta}thcal{W}$, whose elements are the reflecting hyperplanes of $W$. Associated with $\textcolor{magenta}thcal{W}$, there is the set of all the intersections of these hyperplanes, ordered by reverse inclusion, called the {\em intersection lattice}, and denoted $L(\textcolor{magenta}thcal{W})$ (see e.g. \cite{BS,Stanley-arr}). It is well-known that in the Coxeter group of type $A$, $L(\textcolor{magenta}thcal{A}_n)$ is isomorphic to the lattice of the set partitions of $[n]$. By this identification, the subspaces of dimension $n-k$ are counted by $S(n,k)$. In this geometric context, Stirling numbers of the second kind are usually called Whitney numbers (see \cite{Su,Za} for more details).
For Coxeter groups of types $B$ and $D$, Zaslavsky \cite{Za} gave a description of $L(\textcolor{magenta}thcal{B}_n)$ and $L(\textcolor{magenta}thcal{D}_n)$ by using the general theory of signed graphs. Then, Reiner~\cite{R} gave a different combinatorial representation of $L(\textcolor{magenta}thcal{B}_n)$ and $L(\textcolor{magenta}thcal{D}_n)$ in terms of new types of set partitions, called $B_n$- and {\em $D_n$-partitions}. We call the number of $B_n$- (resp. $D_n$-) partitions with $k$ pairs of nonzero blocks the {\em Stirling number of the second kind of type} $B$ (resp. {\em type} $D$).
The posets of $B_n$- and $D_n$-partitions, as well as their isomorphic intersection lattices, have been studied in several papers~\cite{BjS, BjW, BS, CW1, CW2, Su}, from algebraic, topological and combinatorial points of view. However, to our knowledge, two famous identities concerning the classical Stirling numbers of the second kind (see e.g. Bona~\cite[Theorems 1.8 and 1.17]{Bo}) have not been generalized to types $B$ and $D$ in a combinatorial way: the first identity relates the Stirling numbers to the Eulerian numbers, and the second one formulates a change of bases in ${\textcolor{magenta}thbb{R}}[x]$, both will be described below.
The original definition of the {\em Eulerian numbers} was given by Euler in an analytic context \cite[\S 13]{Eu}. Later, they began to appear in combinatorial problems, as the Eulerian number $A(n,k)$ counts the number of permutations in the symmetric group $S_n$ having $k-1$ descents, where a {\it descent} of $\sigma \in S_n$ is an element of the {\em descent set} of $\sigma$, defined by :
{\beta}gin{equation}\label{des_typeA}
{\rm Des}(\sigma):=\{i \in [n-1]\mid \sigma(i)>\sigma(i+1)\}.
\end{equation}
We denote by ${{\rm{des}}}(\sigma):=|{\rm{Des}}(\sigma)|$ the {\em descent number}.
The first above-mentioned identity relating Stirling numbers of the second kind and Eulerian numbers is the following one, see e.g. \cite[Theorem 1.17]{Bo}:
{\beta}gin{thm}\label{thm:typeA}
For all non-negative integers $n {\gamma}eq r$, we have
{\beta}gin{equation}\label{eq:Stirling_Eulerian_typeA}
S(n,r) = \frac{1}{r!}\sum_{k=0}^r A(n, k) \binom{n-k}{r-k}.
\end{equation}
\end{thm}
The second identity arises when one expresses the standard basis of the polynomial ring ${\textcolor{magenta}thbb{R}}[x]$ as a linear combination of the basis consisting of the falling factorials (see e.g. the survey of Boyadzhiev \cite{Boy}):
{\beta}gin{thm}\label{thm:typeA_falling}
Let $x \in \textcolor{magenta}thbb{R}$ and let $n \in \textcolor{magenta}thbb{N}$. Then we have
{\beta}gin{equation}\label{reg}
x^n=\sum\limits_{k=0}^n{S(n,k)[x]_k},
\end{equation}
\noindent
where $[x]_k:=x(x-1) \cdots (x-k+1)$ is the {\em falling factorial of degree $k$} and $[x]_0:=1$.
\end{thm}
There are some known proofs for the last identity. A combinatorial one, realizing $x^n$ as the number of functions from the set $\{1,\dots,n \}$ to the set $\{1,\dots,x \}$ (for a positive integer $x$), is presented in \cite[Eqn. (1.94d)]{EC1}.
The first geometric proof is due to Knop \cite{K}.
In this paper, we use Stirling numbers of the second kind of types $B$ and $D$, in order to generalize the identities stated in Equations \eqref{eq:Stirling_Eulerian_typeA} and \eqref{reg}. Theorems~\ref{main_thm_B} and \ref{main_thm_D} below are generalizations of the first identity for types $B$ and $D$: they will be proven by providing explicit procedures to construct ordered set partitions starting from the elements of the corresponding Coxeter groups.
Theorems~\ref{thm_bala} and~\ref{thm_bala_D} generalize the second identity. We present here a geometric approach, suggested to us by Reiner~\cite{R1}, which is based on some geometric characterizations of the intersection lattices of types $B$ and $D$.
Moreover, we show how to generalize these two classical identities to the colored permutations group $G_{m,n}$.
The rest of the paper is organized as follows. Sections \ref{Eulerian numbers} and \ref{Set partitions} present the known generalizations of the Eulerian numbers and the set partitions, respectively, to the Coxeter groups of types $B$ and $D$. In Sections \ref{connections between Stirling and Euler} and \ref{Falling polynomials for Coxeter groups}, we state our generalizations of the two identities and prove them.
Finally, in Section \ref{Possible generalizations}, we present some possible extensions of the main results.
\section{Eulerian numbers of types $B$ and $D$} \label{Eulerian numbers}
We start with some notations. For $n\in \textcolor{magenta}thbb{N}$, we set $[\pm n]:=\{\pm 1,\ldots,\pm n\}$. For a subset $B \subseteq [\pm n]$, we denote by $-B$ the set obtained by negating all the elements of $B$, and by $\pm B$ we denote the unordered pair of sets $B,-B$.
Let $(W,S)$ be a Coxeter system. As usual, denote by $\ell(w)$ the {\em length} of $w \in W$, which is the minimal integer $k$ satisfying $w=s_1\cdots s_k$ with $s_i \in S$.
The {\em (right) descent set} of $w \in W$ is defined to be
$${\rm Des}(w):=\{s \in S \mid \ell(ws)<\ell(w)\}.$$
A combinatorial characterization of ${\rm Des}(w)$ in type $A$ is given by Equation \eqref{des_typeA} above. Now we recall analogous descriptions in types $B$ and $D$.
We denote by $B_{n}$ the group of all bijections ${\beta}$ of the set
$[\pm n]$ onto itself such that
\[{\beta}(-i)=-{\beta}(i)\]
for all $i \in [\pm n]$, with composition as the
group operation. This group is usually known as the group of {\em
signed permutations} on $[n]$.
If ${\beta} \in B_{n}$, then we write ${\beta}=[{\beta}(1),\dots,{\beta}(n)]$ and we call this the
{\em window} notation of ${\beta}$.
As a set of generators for $B_n$ we
take $S_B:=\left\{ s_0^B, s_1^B,\ldots,s_{n-1}^B \right\}$ where for $i \in[n-1]$
\[s_i^B:=[1,\ldots,i-1,i+1,i,i+2,\ldots,n] \;\; {\rm and} \;\; s_0^B:=[-1,2,\ldots,n].\]
It is well-known that $(B_n,S_B)$ is a Coxeter system of type $B$ (see e.g. \cite[\S 8.1]{BB}). The following characterization of the (right) descent set of ${\beta} \in B_n$ is well-known \cite{BB}.
{\beta}gin{prop} Let ${\beta} \in B_n$. Then
{\beta}gin{eqnarray*}
{\rm{Des}}_B({\beta})=\{i \in [0,n-1] \mid {\beta}(i) > {\beta}(i+1)\},
\end{eqnarray*}
where ${\beta}(0):=0$ (we use the usual order on the integers). In particular, $0 \in {\rm{Des}}_B({\beta})$ is a descent if and only if ${\beta}ta(1) < 0$. We set ${\rm{des}}_B({\beta}):=|{\rm{Des}}_B({\beta})|.$
\end{prop}
For all non-negative integers $n{\gamma}eq k$, we set
{\beta}gin{equation}\label{def:Eulerian_B}
A_B(n,k):=|\{{\beta} \in B_n \mid {\rm{des}}_B({\beta})= k \}|,
\end{equation}
and we call them the {\em Eulerian numbers of type} $B$.
Note that in our context $A_B(n,k)$ counts permutations in $B_n$ having $k$ descents rather than $k-1$, like in type $A$, since this produces nicer formulas.
We denote by $D_{n}$ the subgroup of $B_{n}$ consisting of all the
signed permutations having an {\em even} number of negative entries in
their window notation.
It is usually called the {\em even-signed permutation group}. As a set of generators for $D_n$ we take
$S_D:=\left\{ s_{0}^D,s_{1}^D,\dots,s_{n-1}^D \right\}$ where for $i \in [n-1]$:
\[s_i^D:=s_i^B \;\; {\rm and} \;\; s_{0}^D:=[-2,-1,3,\ldots,n].\]
It is well-known that $(D_n,S_D)$ is a Coxeter system of type $D$, and there is a direct combinatorial way to compute the (right) descent set of ${\gamma} \in D_{n}$ (see e.g. \cite[\S 8.2]{BB}):
{\beta}gin{prop}Let ${\gamma} \in D_n$. Then
{\beta}gin{eqnarray*}\label{lD}
{\rm{Des}}_D({\gamma})=\{i \in [0,n-1] \mid {\gamma}(i)>{\gamma}(i+1)\},
\end{eqnarray*}
where ${\gamma}(0):=-{\gamma}(2)$. In particular, $0 \in {\rm{Des}}_D({\gamma}amma)$ if and only if\break ${\gamma}amma(1)+{\gamma}amma(2)<0$. We set ${\rm{des}}_D({\gamma}):=|{\rm{Des}}_D({\gamma})|$.
\end{prop}
For all non-negative integers $n{\gamma}eq k$, we set:
{\beta}gin{equation}\label{def:Eulerian_D}
A_D(n,k):=|\{{\gamma} \in D_n \mid {\rm{des}}_D({\gamma})=k\}|,
\end{equation}
and we call them the {\em Eulerian numbers of type} $D$.
For example, if ${\gamma}=[1,-3,4,-5,-2,-6]$, then:
$${\rm{Des}}_D({\gamma})=\{0,1,3,5\} \mbox{, but } {\rm{Des}}_B({\gamma})=\{1,3,5\}.$$
\section{Set partitions of types $B$ and $D$}\label{Set partitions}
In this section, we introduce the concepts of set partitions of types $B$ and $D$ as defined by Reiner \cite{R}.
As mentioned above, we denote by $L(\textcolor{magenta}thcal{W})$
the intersection lattice corresponding to the Coxeter hyperplane arrangement $\textcolor{magenta}thcal{W}$ of a finite Coxeter group $W$.
We will focus only on the hyperplane arrangements of types $A$, $B$ and
$D$. In terms of the coordinate functions $x_1,\dots,x_n$ in ${\textcolor{magenta}thbb{R}}^n$, they can be defined as follows:
{\beta}gin{eqnarray*}
\textcolor{magenta}thcal{A}_n &:=& \{\ \{ x_i = x_j\} \mid 1 \leq i < j \leq n \},\\
\textcolor{magenta}thcal{B}_n &:=& \{\ \{ x_i = \pm x_j\} \mid 1 \leq i < j \leq n \} \cup \{\ \{ x_i = 0\} \mid 1 \leq i \leq n\},\\
\textcolor{magenta}thcal{D}_n &:=& \{\ \{ x_i = \pm x_j\} \mid 1 \leq i < j \leq n \}.
\end{eqnarray*}
It is well-known that in type $A$, the intersection lattice $L(\textcolor{magenta}thcal{A}_n)$ is isomorphic to the lattice of set partitions of $[n]$.
In type $B$, let us consider the following element of $L(\textcolor{magenta}thcal{B}_9)$:
$$\{x_1=-x_3=x_6=x_8=x_9, x_2=x_4=0, x_5=-x_7\}.$$
It can be easily presented as the following set partition of $[\pm 9]$:
$$\{\{1, -3,6,8,-9\},\{-1,3,-6,-8,9\},\{2,-2,4,-4\},\{5,-7\},\{-5, 7\}\}.$$
This probably was Reiner's motivation to define the set partitions of type $B$, as follows:
{\beta}gin{defn}
A {\it $B_n$-partition} is a set partition of $[\pm n]$ into blocks such that the following conditions are satisfied:
{\beta}gin{itemize}
\item There exists at most one block satisfying $-C=C$, called the {\em zero-block}. It is a subset of $[\pm n]$ of the form $\{\pm i \mid i \in S\}$ for some $S \subseteq [n]$.
\item If $C$ appears as a block in the partition, then $-C$ also appears in that partition.
\end{itemize}
\end{defn}
A similar definition holds for set partitions of type $D$:
{\beta}gin{definition}
A {\em $D_n$-partition} is a $B_n$-partition such that the zero-block, if exists, contains at least two positive elements.
\end{definition}
We denote by $S_B(n,r)$ ({\em resp.} $S_D(n,r)$) the number of $B_n$- ({\em resp.}\break $D_n$-) partitions having exactly $r$ pairs of nonzero blocks. These numbers are called {\em Stirling numbers (of the second kind) of type $B$} ({\em resp. type} $D$). They correspond, respectively, to the sequences oeis.org/A039755 and oeis.org/A039760 in the OEIS. Tables~\ref{Table1} and~\ref{Table2} record these numbers for small values of $n$ and $r$.
We now define the concept of an ordered set partition:
{\beta}gin{defn}
A $B_n$-partition (or $D_n$-partition) is {\em ordered} if the set of blocks is totally ordered and the following conditions are satisfied:
{\beta}gin{itemize}
\item If the zero-block exists, then it appears as the first block.
\item For each block $C$ which is not the zero-block, the blocks $C$ and $-C$ are adjacent.
\end{itemize}
\end{defn}
{\beta}gin{exa} The following partitions
{\beta}gin{eqnarray*}P_1 & = &\{ \{ \pm 3\}, \pm\{ -2,1 \}, \pm\{ -4,5 \} \}, \\
P_2 & = &\{ \pm\{ 1 \},\pm\{ 2 \}, \pm\{ -4,3 \} \},\\
P_3 &= &\left[ \{ \pm 1, \pm 3\}, \{ -2 \}, \{ 2 \},\{ -4,5 \},\{ -5,4 \} \right],
\end{eqnarray*}
are respectively, a $B_5$-partition which is not a $D_5$-partition, a $D_4$-partition with no zero-block, and an ordered $D_5$-partition having a zero-block.
\end{exa}
{\beta}gin{table}
{\beta}gin{center}
{\beta}gin{tabular}{r||r|r|r|r|r|r|r}
$n/r$ & 0 & 1 & 2 & 3 & 4 & 5 & 6 \\
\hline\hline
0 & 1 & & & & & & \\
1 & 1 & 1 & & & & & \\
2 & 1 & 4 & 1 & & & & \\
3 & 1 & 13 & 9 & 1 & & & \\
4 & 1 & 40 & 58 & 16 & 1 & & \\
5 & 1 & 121 & 330 & 170 & 25 & 1 & \\
6 & 1 & 364 & 1771 & 1520 & 395 & 36 & 1 \\
\end{tabular}
\end{center}
\caption{Stirling numbers $S_B(n,r)$ of the second kind of type $B$.}\label{Table1}
\end{table}
{\beta}gin{table}
{\beta}gin{center}
{\beta}gin{tabular}{r||r|r|r|r|r|r|r}
$n/r$ & 0 & 1 & 2 & 3 & 4 & 5 & 6 \\
\hline\hline
0 & 1 & & & & & & \\
1 & 0 & 1 & & & & & \\
2 & 1 & 2 & 1 & & & & \\
3 & 1 & 7 & 6 & 1 & & & \\
4 & 1 & 24 & 34 & 12 & 1 & & \\
5 & 1 & 81 & 190 & 110 & 20 & 1 & \\
6 & 1 & 268 & 1051 & 920 & 275 & 30 & 1 \\
\end{tabular}
\end{center}
\caption{Stirling numbers $S_D(n,r)$ of the second kind of type $D$.}\label{Table2}
\end{table}
\section{Connections between Stirling and Eulerian numbers of types $B$ and $D$}\label{connections between Stirling and Euler}
In this section, we present two generalizations of Theorem \ref{thm:typeA} for Coxeter groups of types $B$ and $D$.
{\beta}gin{thm}\label{main_thm_B} For all non-negative integers $n {\gamma}eq r$, we have:
$$ S_{B}(n,r)= \frac{1}{2^{r}r!} \sum\limits_{k=0}^r {A_B(n,k){\binom{n-k}{r-k}}}.$$
\end{thm}
{\beta}gin{thm}\label{main_thm_D} For all non-negative integers $n {\gamma}eq r$, with $n\neq 1$, we have:
$$ S_D(n,r) = \frac{1}{2^{r}r!} \left[ \sum\limits_{k=0}^r {A_D(n,k){\binom{n-k}{r-k}}} + n \cdot 2^{n-1}(r-1)! \cdot S(n-1,r-1)\right],$$
where $S(n-1,r-1)$ is the usual Stirling number of the second kind.
\end{thm}
Now, by inverting these formulas, similarly to the known equality in type $A$, mentioned in \cite[Corollary 1.18]{Bo}:
{\beta}gin{equation}\label{Ank}
A(n,k)= \sum\limits_{r=1}^k {(-1)^{k-r} \cdot r! \cdot S(n,r) \cdot \binom{n-r}{k-r}},
\end{equation}
we get the following expressions for the Eulerian numbers of type $B$ ({\em resp.} type $D$) in terms of the Stirling numbers of type $B$ ({\em resp.} type $D$):
{\beta}gin{cor}\label{inverse_main_thm_star} For all non-negative integers $n {\gamma}eq k$, we have:
$$A_B(n,k)= \sum\limits_{r=0}^k {(-1)^{k-r} \cdot 2^{r}r! \cdot S_B(n,r) \cdot \binom{n-r}{k-r}}.$$
\end{cor}
{\beta}gin{cor}\label{inverse_main_thm_star_D} For all non-negative integers $n{\gamma}eq k$, with $n\neq 1$, we have:
{\beta}gin{small}
{\beta}gin{eqnarray*}\label{eq_D} \nonumber
A_D(n,k)
= \left[ \sum\limits_{r=0}^k (-1)^{k-r} \cdot 2^{r}r! \cdot S_D(n,r) \cdot \binom{n-r}{k-r} \right] - n \cdot 2^{n-1} \cdot A(n-1,k-1).
\end{eqnarray*}
\end{small}
\end{cor}
\subsection{Proof for type $B$}
The proofs in this and in the next subsections use arguments similar to Bona's proof for the corresponding identity for type $A$, see \cite[Theorem 1.17]{Bo}.
{\beta}gin{proof}[Proof of Theorem~\ref{main_thm_B}]\label{section4.1}
We have to prove the following equality:
$$ 2^{r}r!S_{B}(n,r)= \sum\limits_{k=0}^r {A_B(n,k){\binom{n-k}{r-k}}}.$$
The number $2^r r!S_B(n,r)$ in the left-hand side is the number of ordered $B_n$-partitions having $r$ pairs of nonzero blocks. Now, let us show that the right-hand side counts the same set of partitions in a different way.
Let ${\beta} \in B_n$ be a signed permutation with ${\rm{des}}_B({\beta})=k$, written in its window notation. We start by adding a separator after each descent of ${\beta}ta$ and after ${\beta}(n)$. If $0 \in {\rm{Des}}_B({\beta})$, this means that a separator is added before ${\beta}(1)$. If $r>k$, we add extra $r-k$ {\em artificial separators} in some of the $n-k$ empty spots, where by a {\em spot} we mean a gap between two consecutive entries of ${\beta}$ or the gap before the first entry ${\beta}(1)$.
This splits ${\beta}$ into a set of $r$ blocks,
where the block $C_i$ is defined as the set of entries between the $i$th and the $(i+1)$th separators for $1 \leq i \leq r$.
Now, this set of blocks is transformed into the ordered $B_n$-partition with $r$ pairs of nonzero blocks:
$$[C_0,C_1,-C_1,\ldots,C_r,-C_r],$$
where the (optional) zero-block $C_0$ equals to $\{\pm {\beta}(1),\ldots, \pm {\beta}(j)\}$ if the first separator is after ${\beta}(j)$, for $j {\gamma}eq 1$, and it does not exist if the first separator is before ${\beta}(1)$.
For example, if ${\beta}ta=[-2,3,5,1,-4] \in B_5$, then after adding the separators induced by descents, we get the sequence
$[ \ | \ -2,3,5\ | \ 1 \ | \ -4 \ | \ ]$, which is transformed into the ordered partition $[\{-2,3,5\},\{2,-3,-5\},\{1\},\{-1\},\{-4\},\{4\}]$.
On the other hand, if ${\beta}ta'=[2,3,5,-1,-4] \in B_5$, then after adding the separators induced by the descents, we have
${\beta}ta'=[ \ 2,3,5 \ | \ -1 \ | \ -4 \ | \ ]$, which gives rise to the ordered partition $[\{ \pm 2, \pm 3,\pm 5 \},\{-1\},\{1\},\{-4\},\{4\}]$, with a zero-block, and two nonzero blocks.
There are exactly $\binom{n-k}{r-k}$ ordered $B_n$-partitions obtained from ${\beta}$ in this way. From now on, we refer to this process of creating $B_n$-partitions starting from a single signed permutation ${\beta}$, as the {\em $B$-procedure}.
It is easy to see that the $B$-procedure applied to different signed permutations produces disjoint sets of ordered $B_n$-partitions; therefore,
one can create in this way $\sum_{k=0}^r {A_B(n,k){\binom{n-k}{r-k}}}$ distinct ordered $B_n$-partitions with $r$ pairs of nonzero blocks.
Let us show that any ordered $B_n$-partition $\lambda=[C_0,C_1,-C_1,\ldots,C_r,-C_r],$ can be obtained through the $B$-procedure.
If $\lambda$ contains a zero-block $C_0$, then put the positive elements of $C_0$ in increasing order at the beginning of a sequence $\textcolor{magenta}thbf{S}$, and add a separator after them. Then, order increasingly the elements in each of the blocks $C_1,\ldots,C_r$, and write them sequentially in $\textcolor{magenta}thbf{S}$ (after the first separator if exists), by adding a separator after the last entry coming from each block. Reading the formed sequence $\textcolor{magenta}thbf{S}$ from the left to the right, one obtains the window notation of a signed permutation ${\beta}$. Note that the number of descents in ${\beta}$ is smaller than or equal to $r$, since the elements in each block are ordered increasingly. Now, it is clear that $\lambda$ can be obtained by applying the $B$-procedure to ${\beta}$, where the artificial separators are easily recovered.
\end{proof}
{\beta}gin{exa}
The signed permutation
$${\beta}=[ \ 1,4 \mid -5,-3,2 \ | \ ] \in B_5$$
has $2$ as a descent. It produces the following ordered $B_5$-partition with one pair of nonzero blocks
$$[ \{\pm 1,\pm 4\}, \{-5,-3,2\}, \{5,3,-2\}],$$
and exactly ${\binom{4}{1}}$ ordered $B_5$-partitions with two pairs of nonzero blocks, namely:
{\beta}gin{eqnarray*}
&[ \{1,4\},\{-1,-4\}, \{-5,-3,2\}, \{5,3,-2\}],\\
&[ \{\pm 1\},\{4\},\{-4\}, \{-5,-3,2\}, \{5,3,-2\}],\\
&[ \{ \pm 1, \pm 4\}, \{-5\}, \{5\},\{-3,2\}, \{3,-2\}], \\
&[ \{ \pm 1,\pm 4\}, \{-5,-3\},\{5,3\},\{2\}, \{-2\}],
\end{eqnarray*}
obtained by placing one artificial separator before entries $1,2,4$ and $5$, respectively. The other ordered partitions coming from ${\beta}$ with more blocks are obtained similarly.
Conversely, let
$$\lambda=[\{ \pm 1, \pm 4\}, \{5\},\{-5\}, \{-3,2\}, \{3,-2\}]$$
be an ordered $B_5$-partition. The corresponding signed permutation with the added separators is
${\beta}=[ \ 1,4 \parallel 5 \ | - 3,2 \ | \ ] \in B_5$.
Note that although $C_1=\{5\}$ is a separate block, there is no descent between $4$ and $5$, meaning that $\lambda$ is obtained by adding an artificial separator in the spot between these two entries, denoted $\|$.
\end{exa}
\subsection{Proof for type $D$}
The proof of Theorem \ref{main_thm_D} is a bit more tricky. The basic idea is the same as before: obtaining the whole set of ordered $D_n$-partitions with $r$ pairs of nonzero blocks from elements in $D_n$ with at most $r$ descents. We will use the $B$-procedure presented in the previous subsection, with the addition of an extra step, to take care of the special structure of the $D_n$-partitions.
First of all, we recall that we might have ${\rm{Des}}_D({\gamma}) \neq {\rm{Des}}_B({\gamma})$ for ${\gamma} \in D_n$, see an example at the end of Section \ref{Eulerian numbers}.
Let ${\gamma} \in D_n$ be such that ${\rm{des}}_D({\gamma})=k$. We start by adding the separators after the $D$-descents of ${\gamma}$ and the artificial ones in case that $k<r$. Using the $B$-procedure, we transform ${\gamma}$, equipped with the set of separators, into a $B_n$-partition. The result is also a $D_n$-partition, except in the case when there is a separator (either induced by a $D$-descent or by an artificial addition) between ${\gamma}(1)$ and ${\gamma}(2)$, but not before ${\gamma}(1)$. In fact, in this case, we obtain an ordered $B_n$-partition with a zero-block containing exactly one pair of elements, which is not a $D_n$-partition.
Hence, only in this case, we slightly modify the algorithm as follows. First we toggle the sign of ${\gamma}(1)$ and move the separator from after ${\gamma}(1)$ to before it. We call this action the {\em switch operation}.
Then, we transform this new sequence of entries and separators into a $D_n$-partition by applying the $B$-procedure. We refer to this process of associating a permutation ${\gamma} \in D_n$ with the obtained set of ordered $D_n$-partitions, as the {\em $D$-procedure}.
Before proving that this procedure indeed creates ordered $D_n$-partitions, we give an example of an element ${\gamma} \in D_n$, for which the application of the switch operation is required.
{\beta}gin{exa}
Let ${\gamma}=[ \ -1 \ \| \ 3, 4 \mid -2 \mid -6,-5 \mid \ ] \in D_6$ be a permutation equipped with the separators induced by the $D$-descents and one artificial separator added after position $1$. The $B$-procedure, applied to ${\gamma}$, results in an illegal ordered $D_6$-partition, since the zero-block $B_0=\{\pm 1\}$ consists of only one pair. Toggling the sign of ${\gamma}(1)$, and moving the artificial separator before position $1$, we obtain:
$${\gamma}'=[ \ \| \ 1,3,4 \mid -2 \mid -6,-5\mid \ ] \in B_6\setminus D_6,$$
that is transformed into the ordered $D_6$-partition:
$$[\{1,3,4\},\{-1,-3,-4\},\{-2\},\{2\},\{-6,-5\},\{6,5\}].$$
\end{exa}
As in type $B$, it is easy to see that by applying the $D$-procedure to all the permutations in $D_n$, we obtain disjoint sets of ordered $D_n$-partitions, though, in this case we do not obtain all of them.
The next lemma specifies exactly which $D_n$- partitions are not reached:
{\beta}gin{lem}\label{structure of odd partitions}
The ordered $D_n$-partitions with $r$ pairs of nonzero blocks, which cannot be obtained by the $D$-procedure are exactly those of the form
{\beta}gin{equation}\label{odd_partitions}
\lambda = [C_1=\{*\}, -C_1=-\{*\}, C_2, -C_2,\ldots,C_r,-C_r],
\end{equation}
where $*$ stands for a single element of $[\pm n]$, and such that the total number
of negative entries in the blocks $C_1=\{*\},C_2, \dots, C_r$ is odd.
\end{lem}
{\beta}gin{proof}
First of all, we remark that when the $D$-procedure is applied to a permutation (equipped with separators) without the use of the switch operation, it produces ordered $D_n$-partitions $[C_0, C_1,-C_1,\dots, C_r,-C_r]$ with an even number of negative entries in the union $C_1 \cup C_2 \cup \cdots \cup C_r$. Let us call an ordered $D_n$-partition {\em even} ({\it resp.} {\em odd}) if it satisfies ({\it resp}. does not satisfy) the latter condition.
In contrast, if the switch operation is applied, only odd partitions of the form $[C_1,-C_1,\dots, C_r,-C_r]$ without a zero-block are obtained, and the first block $C_1$ contains at least the two entries ${\gamma}(1)$ and ${\gamma}(2)$.
From this it follows that the partitions in \eqref{odd_partitions} cannot be reached.
Let us show, that all other ordered $D_n$-partitions can be obtained using the $D$-procedure.
Let $\lambda=[C_0,C_1,-C_1,\dots,C_r,-C_r]$ be an ordered $D_n$-partition with a non-empty zero-block $C_0$.
We look for the preimage ${\gamma} \in D_n$ of $\lambda$. Since the switch operation on a permutation ${\gamma} \in D_n$ produces $D_n$-partitions without a zero-block, in our case the switch operation has not been applied to ${\gamma}$.
We start by defining a sequence ${\textcolor{magenta}thbf S}$ as follows: we first put the positive entries of $C_0$ in their natural increasing order as the first elements of $\textcolor{magenta}thbf{S}$, followed by a separator. If $\lambda$ is odd, we change the sign of the first entry of $\textcolor{magenta}thbf{S}$ to be negative.
Now, as described in the proof of Theorem~\ref{main_thm_B}, we complete $\textcolor{magenta}thbf{S}$ by concatenating the $r$ sequences composed by the elements of the blocks $C_1,\ldots,C_r$, where in each block the elements are ordered increasingly and followed by a separator.
We now consider the obtained sequence $\textcolor{magenta}thbf{S}$ as a permutation ${\gamma} \in D_n$. Note that $0 \notin {\rm Des}_D({\gamma})$, since by construction $|{\gamma}(1)| < {\gamma}(2)$ and so ${\gamma}(1) + {\gamma}(2) > 0$. Moreover, it is clear that applying the $D$-procedure to ${\gamma}$ yields the partition $\lambda$.
Now assume that $\lambda=[C_1,-C_1,\dots,C_r,-C_r]$ is an ordered $D_n$-partition without a zero-block.
If $\lambda$ is even, it is easy to see that the above construction without the initial step of reordering $C_0$, produces ${\gamma} \in D_n$ which is the preimage of $\lambda$.
Finally, if $\lambda$ is odd and is not listed in Equation (\ref{odd_partitions})
it means that the first block $C_1$ has at least two elements,
and that the switch operation is necessary (due to the parity).
As before, we define a sequence $\textcolor{magenta}thbf{S}$ by reordering increasingly all the blocks $C_i$. Since $C_1$ has at least two elements, we have that $\textcolor{magenta}thbf{S}(1)<\textcolor{magenta}thbf{S}(2)$.
Since the partition is odd with no zero-block, we have applied a switch operation on its preimage. Therefore, the sign of $\textcolor{magenta}thbf{S}(1)$ is negative. Now consider $\textcolor{magenta}thbf{S}$ as a permutation ${\gamma}amma \in D_n$. It is easy to see that the obtained permutation ${\gamma}amma \in D_n$ is indeed the preimage of $\lambda$.
\end{proof}
{\beta}gin{comment}
We deal with it case-by-case: in the three cases $0 \notin {\rm Des}_D(\pi)$.
{\beta}gin{enumerate}
\item If $\pi(1)>0$ and $\pi(2)>0$, then the separator between $\pi(1)$ and $\pi(2)$ is an artificial one.
\item If $\pi(1)<0$ and $\pi(2)<0$, then $-a_1>a_2$, so there is a separator induced by a descent of type $D$ between $\pi(1)$ and $\pi(2)$ .
\item If $a_1<0$ and $a_2>0$, then the separator between $\pi(1)$ and $\pi(2)$ is either induced by a descent or an artificial one depending if $a_1>a_2$ or not.
\end{enumerate}
\end{comment}
We give now two examples of the reverse procedure: both examples are ordered odd $D_5$-partitions, but one has a zero-block, while the other has no zero-block, so the latter requires the switch operation.
{\beta}gin{exa}\label{example for recovery}
(a) Let
$$\lambda_1=\left[ C_0=\{ \pm 1, \pm 4\}, \{3\}, \{-3\}, \{-5,2\}, \{5,-2\} \right]$$
be an ordered odd $D_5$-partition with a zero-block $C_0$ which is odd since we have one negative sign in $\{3\}\cup \{-5,2\}$.
For recovering its preimage ${\gamma}_1 \in D_5$, we choose the negative sign for the smallest positive entry in the zero-block, which is $1$. After inserting the other positive entry of $C_0$ and a separator, we insert the other blocks, where each block is ordered increasingly followed by a separator, to obtain the permutation:
$${\gamma}_1=[ \ -1,4\ |\ 3 \ | -5,2\ | \ ] \in D_5,$$
which is the preimage of the partition $\lambda_1$ using the $D$-procedure.
\noindent
(b) Let $$\lambda_2= \left[ \{ -4, 3 \},\{4,-3\},\{2\},\{-2\},\{-5,-1\},\{5,1\} \right]$$
be an ordered odd $D_5$-partition without a zero-block. Hence, it is created by the switch operation. First, by the standard reverse procedure, we get the element:
$${\gamma}_2'=\left[\ | -4,3 \ |\ 2 \ | \ -5,-1 \ | \ \right] \in B_5 \setminus D_5.$$
Then, after performing the toggling of the sign of the first digit, we obtain:
$${\gamma}_2=[ \ 4 \mid 3 \mid 2 \mid -5,-1 \ ]\in D_5,$$ that is the permutation from which the partition $\lambda_2$ is obtained.
Note that in this case, artificial separators are not needed.
\end{exa}
We can now complete the proof of Theorem~\ref{main_thm_D}.
{\beta}gin{proof}[Proof of Theorem ~\ref{main_thm_D}]
The equation in the statement of the theorem is equivalent to the following:
$$2^{r}r! S_D(n,r) = \sum\limits_{k=0}^r {A_D(n,k){\binom{n-k}{r-k}}}+ n \cdot 2^{n-1}(r-1)! \cdot S(n-1,r-1).$$
The left-hand side of the above equation counts the number of ordered $D_n$-partitions with $r$ pairs of nonzero blocks.
The right-hand side counts the same set of partitions divided in two categories: those coming from the $D$-procedure, that are induced by permutations counted in $A_D(n,k)$, and those that are not, which are listed in Lemma \ref{structure of odd partitions}.
It is easy to see that the latter can be enumerated in the following way: first choose the absolute value of the unique element in $C_1=\{ * \}$, which can be done in $n$ ways. Then, choose and order the $r-1$ remaining blocks, which can be done in $(r-1)! \cdot S(n-1,r-1)$ ways. Finally, choose the sign of each entry in the blocks $C_1,C_2,\dots, C_r$, in such a way that an odd number of entries will be signed, and this can be done in $2^{n-1}$ ways.
This completes the proof.
\end{proof}
\section{Falling factorials for Coxeter groups \\of types $B$ and $D$}\label{Falling polynomials for Coxeter groups}
In this section, we present generalizations of Theorem \ref{thm:typeA_falling} for Coxeter groups of types $B$ and $D$ and provide combinatorial proofs for them.
\subsection{Type $B$}
The following theorem is a natural generalization of Theorem~\ref{thm:typeA_falling} for the Stirling numbers of type $B$, and it is a particular case of an identity appearing in Bala ~\cite{Bala}, where the numbers $S_B(n,k)$ correspond to the sequence denoted there by $S_{(2,0,1)}$. Bala uses generating functions techniques for proving this identity.
{\beta}gin{thm}[Bala]\label{thm_bala}
Let $x \in \textcolor{magenta}thbb{R}$ and let $n \in \textcolor{magenta}thbb{N}$. Then we have
{\beta}gin{equation}\label{B}
x^n=\sum\limits_{k=0}^n{S_B(n,k)[x]^B_k},
\end{equation}
where $[x]^B_k:=(x-1)(x-3)\cdots (x-2k+1)$ and $[x]^B_0:=1$.
\end{thm}
A combinatorial interpretation of $S_{B}(n,k)$ using the model of $k$-attacking rooks was given by Remmel and Wachs \cite{RW} (specifically, this is $S_{n,k}^{0,2}(1,1)$ in their notation).
More information on the rook interpretation of this and other factorization theorems can be found in Miceli and Remmel \cite{MR}.
Here we provide a kind of a geometric proof, suggested to us by Reiner, which is related to a method used by Blass and Sagan~\cite{BS} to compute the characteristic polynomial of the poset $L(\textcolor{magenta}thcal{B}_n)$.
{\beta}gin{proof}
Being a polynomial identity, it is sufficient to prove it only for odd integers $x=2m+1$ where $m \in \textcolor{magenta}thbb{N}$.
The left-hand side of Equation (\ref{B}) counts the number of lattice points in the $n$-dimensional cube $\{-m,-m+1,\dots,-1,0,1,\dots,m\}^n$. We show that the right-hand side of Equation (\ref{B}) counts the same set of points using the maximal intersection subsets of hyperplanes the points lay on.
More precisely, let $\lambda=\{C_0,\pm C_1,\dots,\pm C_k\}$ be a $B_n$-partition with $k$ pairs of nonzero blocks, with
$0\leq k \leq n$. We associate to this partition the set of lattice points of the form $(x_1,\dots,x_n)$, where $x_j=0$ for all $j \in C_0$, and $x_{j_1}= x_{j_2}\neq 0$ ({\em resp.} $x_ {j_1}=-x_{j_2}\neq 0$) whenever $j_1,j_2$ ({\em resp.} $j_1,-j_2$) belong to the same block $C_i$ ({\em resp.} $-C_i$).
For the first pair of nonzero blocks $\pm C_1$ of the set partition $\lambda$, if $j_1 \in C_1 \cup -C_1$ then there are $x-1$ possibilities (excluding the value $0$) to choose the value of $x_{j_1}$. For the second pair of blocks $\pm C_2$ of the partition $\lambda$, we have $x-3$ possibilities (excluding the value $0$ and the value $x_{j_1}$ chosen for $\pm C_1$ and its negative). We continue in this way until we get $x-(2k-3)$ possibilities for the last pair of blocks $\pm C_k$.
In particular, for $k=0$, $\lambda$ consists of only the zero-block $\{\pm 1,\dots,\pm n\}$, and is associated with the single lattice point $(0,\dots,0)$; for $k=n$, the only $B_n$-partition having $n$ pairs of nonzero blocks is
$$\{ \pm\{1\},\dots,\pm\{n\}\}$$
which corresponds to the lattice points $(x_1,\dots,x_n)$ such that
$x_i \neq \pm x_j\neq 0$ for all $i\neq j$.
Note that these are the $(x-1)(x-3)\cdots (x-(2n-1))$ lattice points that do not lie on any hyperplane.
\end{proof}
{\beta}gin{exa}
Let $n=2$ and $m=3$, so we have that $x=2m+1=7$. The lattice $([-3,3] \times [-3,3]) \cap \textcolor{magenta}thbb{Z}^2$ is presented in Figure \ref{fig_typeB}.
{\beta}gin{figure}[!ht]
\centering
\includegraphics[scale=0.5]{typeB_lattice.eps}
\caption{Lattice points for type $B$}
\label{fig_typeB}
\end{figure}
For $k=0$, we have exactly one $B_2$-partition $\lambda_0$ consisting only of the zero-block: $\lambda_0=\{\{\pm 1,\pm 2\}\}$. The corresponding subspace is $\{x_1=x_2=0\}$, which counts only the lattice point $(0,0)$.
For $k=1$, we have four $B_2$-partitions, two of them contain a zero-block:
$$\lambda_1 = \{\{\pm 1\}, \pm\{2\} \}; \qquad \lambda_2 = \{\{\pm 2\},\pm\{1\} \},$$
and two of them do not:
$$\lambda_3 = \{ \pm\{1,2\} \}; \qquad \lambda_4 = \{ \pm\{1,-2\} \}.$$
The partitions $\lambda_1$ and $\lambda_2$ correspond to the axes $x_1=0$ and $x_2=0$, respectively.
The second pair $\lambda_3$ and $\lambda_4$ corresponds to the diagonals $x_1=x_2$ and $x_1=-x_2$ respectively. Each of these hyperplanes contains $6$ lattice points (since the origin is excluded).
For $k=2$, the single $B_2$-partition:
$$\lambda_5 = \{ \pm \{1\},\pm\{2\}\}$$
corresponds to the set of lattice points $(x_1,x_2)$ with $x_1\neq \pm x_2 \neq 0$, which are those not lying on any hyperplane.
\end{exa}
{\beta}gin{rem}
Note that Blass and Sagan \cite[Theorem 2.1]{BS} show that, when $x$ is an odd number, the cardinality of the set of lattice points not lying on any hyperplane is counted by the characteristic polynomial $\chi(\textcolor{magenta}thcal{B}_n,x)$ of the lattice $L(\textcolor{magenta}thcal{B}_n)$.
\end{rem}
\subsection{Type $D$}
The {\em falling factorial in type} $D$ is defined as follows: (see \cite{BS})
$$[x]_k^D:=\left\{ {\beta}gin{array}{ll}
1, & k=0 ;\\
(x-1)(x-3)\cdots (x-(2k-1)), & 1 \leq k <n ;\\
(x-1)(x-3)\cdots (x-(2n-3))(x-(n-1)),& k=n.\end{array}\right.$$
We have found no generalization of Equation (\ref{reg}) for type $D$ in the literature, so we supply one here.
{\beta}gin{thm}\label{thm_bala_D}
For all $n \in \textcolor{magenta}thbb{N}$ and $x \in \textcolor{magenta}thbb{R}$:
{\beta}gin{equation}\label{D}
x^n=\sum\limits_{k=0}^n{S_D(n,k)[x]_k^D} + n \left((x-1)^{n-1} -[x]_{n-1}^D\right).
\end{equation}
\end{thm}
{\beta}gin{proof}
For $D_n$-partitions having $0 \leq k< n$ pairs of nonzero blocks the proof goes verbatim as in type $B$, so let $k=n$.
In this case, we have only one possible $D_n$-partition having $n$ pairs of nonzero blocks: $\{\pm\{1\}, \dots, \pm\{n\} \}$.
We associate this $D_n$-partition with the lattice points of the form $(x_1,\dots,x_n)$ such that $x_i \neq \pm x_j$ for $i \neq j$, having at most one appearance of the value $0$.
Note that the points with exactly one appearance of $0$ cannot be obtained by any $D_n$-partition having $k<n$ blocks, since the zero-block cannot consist of exactly one pair.
If $0$ does appear, then we have to place it in one of the $n$ coordinates and then we are left with $(x-1)(x-3)\cdots (x-(2n-3))$ possibilities for the rest, while if $0$ does not exist, then we have $(x-1)(x-3)\cdots (x-(2n-1))$ possibilities. These two values sum up to a total of $$[x]_n^D=(x-1)(x-3)\cdots (x-(2n-3))(x-(n-1)).$$
As in type $B$, this number is equal to the evaluation of the characteristic polynomial $\chi(\textcolor{magenta}thcal{D}_n,x)$ of $L(\textcolor{magenta}thcal{D}_n)$, where $x$ is odd.
Note that during the above process of collecting lattice points of the $n$-dimensional cube, the points containing exactly one appearance of $0$ and at least two nonzero coordinates are assigned the same absolute value are not counted, since the zero-block (if exists) must contain at least two elements. This phenomenon happens when $n>2$, and the number of such points is $n((x-1)^{n-1} - [x]_{n-1}^D)$. This concludes the proof.\end{proof}
{\beta}gin{exa}
As in the previous example, let $n=2$ and $m=3$, so we have: $x=2m+1=7$. The lattice $([-3,3] \times [-3,3]) \cap \textcolor{magenta}thbb{Z}^2$ is presented in Figure \ref{fig_typeD}.
{\beta}gin{figure}[!ht]
\centering
\includegraphics[scale=0.5]{typeD_lattice.eps}
\caption{Lattice points for type $D$}
\label{fig_typeD}
\end{figure}
For $k=0$, as in type $B$ we have exactly one $D_2$-partition
$\lambda_0=\{\{\pm 1,\pm 2\}\}$ which counts only the lattice point $(0,0)$.
For $k=1$, we have only two $D_2$-partitions:
$\{\pm\{1,2\} \}$ and $\{\pm\{1,-2\} \}$, which correspond, as in the previous example,
to the diagonals $x_1=x_2$ and $x_1=-x_2$ (without the origin), respectively
For $k=2$, as before, there is a single $D_n$-partition with two pairs of nonzero blocks:
$ \lambda = \{ \pm \{1\},\pm \{2\}\}.$
The lattice points corresponding to this set partition are those with different values in their coordinates, i.e. $x_1\neq x_2$, but in the case of type $D$ (in contrast to type $B$) the value $0$ can also appear. In the figure, these are all the lattice points which do not lie on the diagonals.
Note that in the case $n=2$ the second term in Equation \eqref{D} is $0$ and hence does not count any missing lattice points, since we have already counted all the points. The missing points start to appear from $n=3$, as presented in the next example.
\end{exa}
{\beta}gin{exa}
Let $n=m=3$, so that $x=2m+1=7$. The lattice points which are not counted have the form $(x_1,x_2,x_3)$, such that exactly one of their coordinates is $0$ and the other two share the same absolute value, e.g. the lattice points $(0,2,2)$ and $(0,2,-2)$ are not counted. In this case, the number of such missing lattice points (which is the first summand in the right-hand side of Equation \eqref{D}) is: $3\cdot 6^2-3\cdot 6 \cdot 4 =36$.
\end{exa}
\section{Some generalizations} \label{Possible generalizations}
In this section, we present some generalizations and variants related to our main results in some different directions. In Section \ref{grn_section}, we start with a short introduction to the colored permutations group and we generalize Theorems \ref{main_thm_B} and \ref{thm_bala} to this case. In Section \ref{flag_des_section}, we provide a version of Theorem \ref{main_thm_B} for the flag descent parameter in type $B$.
\subsection{The colored permutations group} \label{grn_section}
\begin{defn}
Let $m$ and $n$ be positive integers. {\it The group of
colored permutations of $n$ digits with $m$ colors} is the wreath product
$$G_{m,n}=\textcolor{magenta}thbb{Z}_m \wr S_n=\textcolor{magenta}thbb{Z}_m^n \rtimes S_n,$$
consisting of all the pairs $(\vec{z},\tau)$, where
$\vec{z}$ is an $n$-tuple of integers between $0$ and $m-1$ and $\tau \in S_n$.
\end{defn}
A convenient way to look at ${\gamma}rn$ is to consider the alphabet
$$\Sigma:=\left\{1,\dots,n, 1^{[1]},\dots,n^{[1]}, \dots, 1^{[m-1]},\dots,n^{[m-1]} \right\},$$
as the set $[n]$ colored by the colors
$0,\dots,m-1$. Then, an element of ${\gamma}rn$ is a bijection $\pi: \Sigma \rightarrow \Sigma$
satisfying the following condition:
$$\mbox{if } \pi (i^{[\alphapha]})=j^{[{\beta}ta]}\mbox{, then }
\pi ( i^{[\alphapha+1]})=j^{[{\beta}ta+1]},$$
where the exponent $[\cdot]$ is computed modulo $m$. The elements of $G_{m,n}$ are usually called {\em colored permutations}.
In particular, $G_{1,n}=S_n$ is the symmetric group, while $G_{2,n}=B_n$ is the group of signed permutations.
\begin{defn}
The {\it color order} on $\Sigma$ is defined to be
$$1^{[m-1]} \prec \cdots \prec n^{[m-1]} \prec \cdots \prec 1^{[1]} \prec 2^{[1]} \prec \cdots \prec n^{[1]} \prec 1 \prec \cdots \prec n.$$
\end{defn}
{\beta}gin{defn}
Let $\pi \in G_{m,n}$. Assume that $\pi(1)=a_1^{[z_1]}$. We define $${\rm des}_G(\pi):={\rm des}_A(\pi)+\varepsilon(\pi),$$
where
{\beta}gin{equation}\label{desA}
{\rm des}_A(\pi):=|\{i \in [n-1] \mid \pi(i) \succ \pi(i+1)\}|,
\end{equation}
where `$\succ$' refers to the color order, and
{\beta}gin{equation}\label{epsilon}
\varepsilon(\pi):=\left\{{\beta}gin{array}{cc}
1, & {\rm if} \ z_1 \not \equiv 0 \ {\rm mod} \ m;\\
0, & {\rm if} \ z_1 \equiv 0 \ {\rm mod} \ m.
\end{array}\right.
\end{equation}
For example, if $\pi=[3,\bar{1},\bar{\bar{2}}] \in G_{3,3}$, we have ${\rm des}_G(\pi)= 2+0=2.$ Note that for $m=1$, ${\rm{des}}_G={\rm{des}}$ and for $m=2$, ${\rm{des}}_G={\rm{des}}_B$.
Moreover, we define the {\it Eulerian number of type $G_{m,n}$} to be:
$$A_m(n,k):=|\{\pi \in G_{m,n} \mid {\rm des}_G(\pi)=k\}|.$$
\end{defn}
Let $C\subseteq \Sigma$. Denote $C^{[t]}=\{x^{[i+t]}\mid x^{[i]} \in C\}$.
{\beta}gin{defn}
A {\it ${\gamma}rn$-partition} is a set partition of $\Sigma$
into blocks such that the following conditions are satisfied:
{\beta}gin{itemize}
\item There exists at most one block satisfying $C^{[1]}=C$.
This block will be called the {\it zero-block}.
\item If $C$ appears as a block in the partition, then $C^{[1]}$ also appears in that partition.
\end{itemize}
Two blocks $C_1$ and $C_2$ will be called {\it equivalent} if there is a natural number $t \in \textcolor{magenta}thbb{N}$ such that $C_1=C_2^{[t]}$.
The number of ${\gamma}rn$-partitions with $r$ non-equivalent nonzero blocks is denoted by $S_{m}(n,r)$.
\end{defn}
For example, the following is a $G_{3,4}-$ partition:
$$\{\{1,\bar{1},\bar{\bar{1}},2,\bar{2},\bar{\bar{2}}\},\{3,\bar{4}\},\{\bar{3},\bar{\bar{4}}\},\{\bar{\bar{3}},4\}\},$$ with a zero-block: $\{1,\bar{1},\bar{\bar{1}},2,\bar{2},\bar{\bar{2}}\}$.
We define now the concept of an {\it ordered} ${\gamma}rn$-partition:
{\beta}gin{defn}
A ${\gamma}rn$-partition is {\em ordered} if the set of blocks is totally ordered and the following conditions are satisfied:
{\beta}gin{itemize}
\item If the zero-block exists, then it appears as the first block.
\item For each nonzero block $C$, the blocks $C^{[i]}$ for $1 \leq i \leq m-1$ appear consecutively right after $C$, i.e. $C,C^{[1]}, C^{[2]},\dots, C^{[m-1]}$.
\end{itemize}
\end{defn}
The generalization of Theorem \ref{thm:typeA} in this setting is as follows.
{\beta}gin{thm}\label{main_thm_2}
For all positive integers $n,m$ and $r$, we have:
$$ S_m(n,r)= \frac{1}{m^r r!} \sum\limits_{k=0}^{r} {A_m(n,k){\binom{n-k}{r-k}}}.$$
\end{thm}
The proof is similar to that of Theorem \ref{main_thm_B}, so it is omitted.
In order to generalize Theorem~\ref{thm:typeA_falling}, we define the {\em falling factorial of type} ${\gamma}rn$ as follows: (see Equation 15 in \cite{Bala})
$$[x]_k^m:=\left\{ {\beta}gin{array}{ll}
1, & k=0 ;\\
(x-1)(x-1-m)\cdots (x-1-(k-1)m), & 1 \leq k \leq n.
\end{array}\right.$$
We have:
{\beta}gin{thm}\label{thm Balla for grn} Let $x \in {\textcolor{magenta}thbb{R}}$ and $n\in \textcolor{magenta}thbb{N}$. Then we have:
$$x^n=\sum\limits_{k=0}^n{S_m(n,k)[x]_k^m}.$$
\end{thm}
We present here the idea of the proof.
{\beta}gin{proof}[Sketch of the proof.]
Divide the unit circle $S^1$ in the plane into $m$ parts according to the $m$th roots of unity: $1,\rho_m,\rho_m^2,\dots,\rho_m^{m-1}$, see Figure \ref{S1}, where $m=3$ and the roots are represented by small bullets. This divides the circle into $m$ arcs. Now, in each arc, locate $t$ points in equal distances from each other (see Figure \ref{S1} where $t=5$ and the points are represented by small lines). Including the point $(1,0)$, we get $x=mt+1$ points on the unit circle.
Consider now the $n$-dimensional torus $(S^1)^n=S^1 \times \cdots \times S^1 $ with $x^n$ {\em lattice points on it}. The same arguments we presented in the proof of Theorem \ref{thm_bala} will apply now to Theorem \ref{thm Balla for grn}, when we interpret the ${\gamma}rn$-partitions as intersections of subsets of hyperplanes in $\textcolor{magenta}thcal{G}_{m,n}$, where by $\textcolor{magenta}thcal{G}_{m,n}$ we mean the following generalized hyperplane arrangement for the colored permutations group:
{\beta}gin{eqnarray*}
\textcolor{magenta}thcal{G}_{m,n}&:=&\{ \ \{x_i=\rho_m^k x_j\} \mid 1 \leq i < j \leq n ,0 \leq k <m\} \\
&\cup& \{ \ \{x_i=0\} \mid 1 \leq i \leq n\},
\end{eqnarray*}
See e.g. \cite[p. 244]{OT}.
\end{proof}
{\beta}gin{figure}[!ht]
\centering
{\beta}gin{tikzpicture}[cap=round,line width=2pt]
\draw (0,0) circle (2cm);
\foreach \angle in
{20, 40, 60, 80, 100, 140, 160 , 180,
200, 220, 260,280, 300, 320, 340}
{
\draw[line width=2pt] (\angle:1.9cm) -- (\angle:2.1cm);
}
\foreach \angle in {0,120,240}
\draw[line width=2pt,fill] (\angle:2cm) circle [radius=0.1];
\draw[->, line width=1pt] (-2,0) -- (3,0);
\draw[->, line width=1pt] (0,-3) -- (0,3);
\node[above right] at (2cm,0){\tiny $\rho_3^0=1$};
\node[right] at (20:2.1cm){\tiny $1$};
\node[right] at (40:2.1cm){\tiny $2$};
\node[above right] at (60:2cm){\tiny $3$};
\node[above] at (80:2.1cm){\tiny $4$};
\node[above] at (100:2.1cm){\tiny $5$};
\node[above left] at (120:2cm){\tiny $\rho_3^1$};
\node[left] at (140:2.1cm){\tiny $\rho_3^1 \cdot 1$};
\node[left] at (160:2.1cm){\tiny $\rho_3^1 \cdot 2$};
\node[left] at (180:2.1cm){\tiny $\rho_3^1 \cdot 3$};
\node[left] at (200:2.1cm){\tiny $\rho_3^1 \cdot 4$};
\node[below left] at (220:2cm){\tiny $\rho_3^1 \cdot 5$};
\node[below left] at (240:2cm){\tiny $\rho_3^2$};
\node[below] at (260:2.1cm){\tiny \hspace{-5pt}$\rho_3^2 \cdot 1$};
\node[below] at (280:2.1cm){\tiny \hspace{5pt}$\rho_3^2 \cdot 2$};
\node[below right] at (300:2cm){\tiny $\rho_3^2 \cdot 3$};
\node[right] at (320:2.1cm){\tiny $\rho_3^2 \cdot 4$};
\node[right] at (340:2.1cm){\tiny $\rho_3^2 \cdot 5$};
\end{tikzpicture}
\caption{The 16 lattice points on $S^1$, representing the first coordinate for $m=3$ and $t=5$.}
\label{S1}
\end{figure}
\subsection{The flag descent parameter for the Coxeter group of type~$B$}\label{flag_des_section}
Another possibility to generalize these results is to consider the {\em flag descent statistics} defined on group of signed permutations.
Such parameters produce, in this context, similar expressions of those presented in the previous sections, but less elegant.
As an example, we show here only one of these possible extensions.
This is a different generalization of Theorem~ \ref{main_thm_B} by using the {\em flag-descent number} fdes, that is defined in~\cite{AR} for a signed permutation ${\beta} \in B_n$:
$${\rm fdes}({\beta}):=2 \cdot {{\rm{des}}_A}({\beta})+\varepsilon({\beta}).$$
where ${\rm{des}}_A({\beta})$ is defined as in Equation \eqref{desA}, and $\varepsilon({\beta})$ as in Equation \eqref{epsilon}.
We denote by $A^*_B(n,k)$ the number of permutations ${\beta} \in B_n$ satisfying ${\rm fdes}({\beta})=k-1$, and by
$S^*_{B}(n,r)$ the number of $B_n$-partitions having exactly $r$ blocks. Here, differently from $S_B(n,r)$, every block counts: the zero-block is counted once, and any pair $\pm C_i$ is counted twice.
These two new parameters satisfy the identity stated below:
{\beta}gin{thm}\label{thm_flag}
For all nonnegative integers $n,r$ where $n {\gamma}eq r$, we have:
$$2^{\left\lfloor \frac{r}{2}\right\rfloor} \left\lfloor \frac{r}{2} \right\rfloor ! \ S^*_{B}(n,r)= \sum\limits_{k=1}^r {A^*_B(n,k)\binom{n- \left\lceil\frac{k}{2}\right\rceil}{ \left\lfloor\frac{r-k}{2}\right\rfloor}}.$$
\end{thm}
The proof uses arguments similar to those in the proof of Theorem~\ref{main_thm_B}, and is therefore omitted.
{\beta}gin{thebibliography}{Sta}
\bibitem{AR} R.M. Adin and Y. Roichman, {\it The flag major index and group actions on polynomial rings}, Europ. J. Combin. {\bf 22} (2001), 431--446.
\bibitem{Bala} P. Bala, {\it A 3-parameter family of generalized Stirling numbers} (2015). Electronic version: https://oeis.org/A143395/a143395.pdf
\bibitem{BB} A. Bj\"orner and F. Brenti, Combinatorics of Coxeter Groups, Graduate Texts in Math. {\bf 231}, Springer-Verlag, New York, 2005.
\bibitem{BjS} A. Bj\"orner and B. Sagan, {\em Subspace arrangements of type $B_n$ and $D_n$}, J. Alg. Combin. {\bf 5} (1996), 291--314.
\bibitem{BjW} A. Bj\"orner and M. Wachs, {\em Geometric constructed bases for homology of partition lattices of types $A$, $B$ and $D$}, Electron. J. Combin. {\bf 11} (2004), \#R3.
\bibitem{BS} A. Blass and B.E. Sagan, {\it Characteristic and Ehrhart polynomials}, J. Alg. Combin., {\bf 7}(2) (1998), 115--126.
\bibitem{Bo} M. Bona, Combinatorics of Permutations, Chapman \& Hall /CRC, 2004.
\bibitem{Boy} K.N. Boyadzhiev, {\it Close encounters with the Stirling numbers of the second kind}, Math. Magazine {\bf 85}(4) (2012), 252--266.
\bibitem{CW1} W.Y.C. Chen and D.G.L. Wang, {\em Minimally intersecting set partitions of type B}, Electron. J. Combin. {\bf 17} (2010), \#R22.
\bibitem{CW2} W.Y.C. Chen and D.G.L. Wang, {\em Singletons and adjacencies of set partitions of type B}, Discr. Math. {\bf 311}(6) (2011), 418--422.
\bibitem{Eu} L. Eulero, {\it Methdus universalis series summandi ulterious promota}. Commentarii academi \ae scientiarum imperialis Petropolitan \ae, {\bf 8} (1736), 147--158. Reprinted in his Opera Omnia, Series 1, Vol. 14, 124--137.
\bibitem{K} R. E. Knop, {\it A note on hyper cube partitions}, J. Combin. Theory (A) {\bf 15} (1973), 338--342.
\bibitem{MR} B.K. Miceli and J.B. Remmel, {\it Augmented rook boards and general product formulas}, Electron. J. Combin. {\bf 15} (2008), \#R85.
\bibitem{OT} P. Orlik and H. Terao, {\it Arrangements of Hyperplanes}, Grundleheren der mathermatische Wissenschaften {\bf 300}, Springer-Verlag, Berlin-Heidelberg-New York, 1992.
\bibitem{R} V. Reiner, {\it Non-crossing partitions for classical reflection groups}, Discrete Math. {\bf 177}(1-?3) (1997), 195--222.
\bibitem{R1} V. Reiner, {\it Personal communication}, 2018.
\bibitem{RW} J.B. Remmel and M.L. Wachs, {\it Rook Theory, Generalized Stirling numbers and $(p,q)$-analogues}, Electron. J. Combin. {\bf 11} (2004), \#R84.
\bibitem{Stanley-arr} R.P. Stanley, {\it An introduction to hyperplane arrangements}, in: {\it Geometric Combinatorics} (E. Miller, V. Reiner, and B. Sturmfels, eds.), IAS/Park City Mathematics Series, vol. 13, American Mathematical Society, Providence, RI, 2007, 389--496.
\bibitem{EC1} R.P. Stanley, Enumerative Combinatorics, Vol. 1, Second edition. Cambridge University Press, 2012.
\bibitem{Su} R. Suter, {\it Two analogues of a classical sequence}. J. Int. Seq. {\bf 3} (2000), Article 00.1.8, 18 pages.
\bibitem{W} D.G.L. Wang, {\it The limiting distribution of the number of block pairs in Type B set partitions}, arXiv:1108.1264v1.
\bibitem{Za} T. Zaslavsky, {\it The geometry of root systems and signed graphs}. Amer. Math. Monthly {\bf 88} (1981), 88--105.
\end{thebibliography}
\end{document} |
\begin{document}
\baselineskip = 13.5pt
\title{\bf On weak-strong uniqueness and singular limit for the compressible Primitive Equations }
\author{ Hongjun Gao$^{1}$ \footnote{Email:gaohj@njnu.edu.cn}\ \ \
\v{S}\'{a}rka Ne\v{c}asov\'{a}$^2$
\footnote{Email: matus@math.cas.cz} \ \ \ Tong Tang$^{3,2}$ \footnote{Email: tt0507010156@126.com}\\
{\small 1.Institute of Mathematics, School of Mathematical Sciences,}\\
{\small Nanjing Normal University, Nanjing 210023, P.R. China}\\
{\small 2. Institute of Mathematics of the Academy of Sciences of the Czech Republic,} \\
{\small \v Zitn\' a 25, 11567, Praha 1, Czech Republic}\\
{\small 3. Department of Mathematics, College of Sciences,}\\
{\small Hohai University, Nanjing 210098, P.R. China}\\
\date{}}
\maketitle
\begin{abstract}
The paper addresses the weak-strong uniqueness property and singular limit for the compressible Primitive Equations (PE). We show that a weak solution coincides with the strong solution emanating from the same initial data. On the other hand, we prove compressible PE will approach to the incompressible inviscid PE equations in the regime of low Mach number and large Reynolds number in the case of well-prepared initial data. To the best of the authors' knowledge, this is the first work to bridge the link between the compressible PE with incompressible inviscid PE.
{{\bf Key words:} compressible Primitive Equations, singular limit, low Mach number, weak-strong uniqueness.}
{ {\bf 2010 Mathematics Subject Classifications}: 35Q30.}
\end{abstract}
\maketitle
\section{Introduction}\setcounter{equation}{0}
The earth is surrounded and occupied by atmosphere and ocean, which play an important role in human's life. From the mathematical point of view and numerical perspective, it is very complicated to use the full hydrodynamical and thermodynamical equations to describe the motion and fascinating phenomena of atmosphere and ocean. In order to simplify model, scientists introduce the Primitive Equations (PE) model in meteorology and geophysical fluid dynamics, which helps us to predict the long-term weather and detect the global climate changes. In this paper, we study the following Compressible Primitive Equations (CPE):
\begin{eqnarray}
\left\{
\begin{array}{llll} \partial_{t}\rho+\text{div}_x(\rho \mathbf{u})+\partial_z(\rho w)=0, \\
\partial_t(\rho \mathbf{u})+\textrm{div}_x(\rho\mathbf{u}\otimes\mathbf{u})+\partial_z(\rho\mathbf uw)+\nabla_x p(\rho)=\mu\Delta_x\mathbf u+\lambda\partial^2_{zz}\mathbf u,\\
\partial_zp(\rho)=0,
\end{array}\right.\label{a}
\end{eqnarray}
in $(0,T)\times\Omega$. Here $\Omega=\{(x,z)|x\in\mathbb{T}^2,0<z<1\}$, $x$ denotes the horizontal direction and $z$ denotes the vertical direction. $\rho=\rho(t,x)$, $\mathbf{u}(t,x,z)\in\mathbb{R}^2$ and $w(t,x,z)\in\mathbb{R}$
represent the density, the horizonal velocity and vertical velocity respectively.
From the hydrostatic balance equation $(1.1)_3$, it follows that {\bf the density $\rho$ is independent of $z$}.
$\mu>0$, $\lambda\geq0$ are the constant viscosity coefficients. The system is supplemented by the boundary conditions
\begin{eqnarray}
w|_{z=0}=w|_{z=1}=0,\hspace{4pt}\partial_z\mathbf u|_{z=0}=\partial_z\mathbf u|_{z=1}=0,
\end{eqnarray}
and initial data
\begin{eqnarray}
\rho\mathbf u|_{t=0}=\mathbf m_0(x,z),\hspace{3pt}\rho|_{t=0}=\rho_0(x).
\end{eqnarray}
The pressure $p(\rho)$ satisfies the barotropic pressure law where the pressure and the density are related by the following formula:
\begin{eqnarray}
p(\rho)=\rho^\gamma\hspace{5pt}(\gamma>1).
\end{eqnarray}
The PE model is widely used in meteorology and geophysical fluid dynamics, due to its accurate theoretical analysis and practical numerical computing. Concerning geophysical fluid dynamics we can refer to work by Chemin, Desjardins, Gallagher and Grenier \cite{ch} or Feireisl, Gallagher, Novotn\'{y} \cite{e}. There is a great number of results about PE, such as \cite{bg,b1,c2,c4,c5,l3,l4,s,t,ws}. We just mention some of results. Guill\'{e}n-Gonz\'{a}lez, Masmoudi and Rodr\'{\i}guez-Bellido \cite{gu} proved the local existence of strong solutions. The celebrated breakthrough result was made by Cao and Titi \cite{c1}. They were first who proved the global well-posedness of PE. After that a lot of scientists were focused on the dynamics and regularity of PE e.g. \cite{g1,g2,ju,kukavica}. Recently in \cite{c2,c4,c5}, the authors considered the strong solution for PE with vertical eddy diffusivity and only horizontal dissipation. About random perturbations of PE, the local and global strong
solution of PE can be referred to \cite{d1, d2, gao}, large deviation principles, see \cite{dong} and diffusion limit, see \cite{g3}. On the other hand, regarding to inviscid PE (hydrostatic incompressible Euler equations), the existence and uniqueness is an outstanding open problem. Only a few results are available. Under the convex horizontal velocity assumptions, Brenier \cite{b} proved the existence of smooth solutions in two-dimensions. Then, Masmoudi and Wong \cite{m} utilized the weighted $H^s$ a priori estimates and obtained the existence, uniqueness and weak-strong uniqueness. Removing the convex horizontal velocity assumptions, they extended Brenier's result. By virtue of Cauchy-Kowalewshi theorem, the authors \cite{k} constructed a locally, unique and real-analytic solution. Notably, Brenier \cite{by} suggested that the existence problem may be ill-posed in Sobolev spaces. Further Cao et al. \cite{c3} established the blow up for certain class of smooth solutions in finite time.
In order to show the atmosphere and ocean have compressible property, Ersoy et al. \cite{er1} consider that the vertical scale of atmosphere is significantly smaller than the horizontal scales and they derive the CPE from the compressible Navier-Stokes equations. To be precise, the CPE system is obtained by replacing the vertical velocity momentum equation with hydrostatic balance equation. Compared with compressible Navier-Stokes equations, the regularity of vertical velocity is less regular than horizontal velocity in the CPE system. In the absence of sufficient information about the vertical velocity, it inevitably leads to difficulty for obtaining the existence of solutions. \emph{ Lions, Teman and Wang \cite{l1,l2} were first to study the CPE and received fundamental results in this field.} Under a smart $P-coordinates$, they reformulated the system into the classical PE with the incompressible condition. Later on, Gatapov and Kazhikhov \cite{g}, Ersoy and Ngom \cite{er2} proved the global existence of weak solutions in 2D case. Liu and Titi \cite{liu1} used the classical methods to proved the local existence of strong solutions in 3D case. Ersoy et al. \cite{er1}, Tang and Gao \cite{tang} showed the stability of weak solutions with the viscosity coefficients depending on the density. The stability means that a subsequence of weak solutions will converge to another weak solutions if it satisfies some uniform bounds. Recently, based on the work \cite{b1,b2,b3,li,v}, Liu and Titi \cite{liu2} and independently Wang et al. \cite{w} used the B-D entropy to prove the global existence of weak solutions in the case where the viscosity coefficients are depending on the density.
Our paper is divided into two parts. The first part concerns the weak-strong uniqueness of CPE. Recently, Liu and Titi \cite{liu3} studied the zero Mach number limit of CPE, proving it converges to incompressible PE, which is a breakthrough result to bridge the link between CPE and PE system. In the second part, inspired by \cite{liu3}, we investigate the singular limit of CPE, showing it converges to incompressible inviscid PE system. \emph{This is the first attempt to use the relative entropy method to study asymptotic limit for CPE.} Let us mention that the corner-stone analysis of our results is based on the relative energy inequality which was invented by Dafermos, see \cite{D}. It was introduced by Germain \cite{ge} and generalized by Feireisl \cite{e2} for compressible fluid model. Feireisl and his co-authors \cite{e3,e4} used the versatile tool to solve various problems. However, compared with the previous classical results, there is significant difference in the process of using relative energy inequality to CPE model due to the absence of the information on the vertical velocity. Therefore, it is not straightforward to apply the method from Navier-Stokes to CPE. We utilize the special structure of CPE to find the deeper relationship and reveal the important feature of CPE.
The paper is organized as follows. In Section 2, we introduce the dissipative weak solutions, relative energy and state our first theorem. In Section 3, we prove the weak-strong uniqueness. We recall the target system, state the singular limit theorem and derive the necessary uniform bounds in Section 4. Section 5 is devoted to proof of the convergence in the case of well-prepared initial data.
\vskip 0.5cm
\noindent {\bf Part I: Weak-Strong uniqueness}
\vskip 0.2cm
In this part, we focus on the weak-strong uniqueness of the CPE system.
\section{Preliminaries and main result}
First of all, we should point out that a proper notion of weak solution to CPE has not been well understood. Recently, Bresch and Jabin \cite{br} consider different compactness method from Lions or Feireisl which can be applied to anisotropical stress tensor similarly. They obtain the global existence of weak solutions if $|\mu-\lambda|$ are not too large. Let us state one of the possible definitions here.
\subsection{Dissipative weak solutions}
\begin{definition}\label{def1}
We say that $[\rho,\mathbf u,w]$ is a dissipative weak solution to the system of \eqref{a}, supplemented with initial data (1.3) and pressure follows (1.4) if $\rho=\rho(x,t)$ and
\begin{align}
\mathbf u\in L^2(0,T;H^1(\Omega)),\hspace{3pt} \rho|\mathbf u|^2\in L^\infty(0,T; L^1(\Omega)),
\hspace{3pt}\rho\in L^\infty(0,T;L^\gamma(\Omega)\cap L^1(\Omega)).
\end{align}
\noindent
$\bullet$ the continuity equation
\begin{align}
[\int_\Omega\rho\varphi dxdz]^{t=\tau}_{t=0}=\int^\tau_0\int_{\Omega}\rho\partial_t\varphi+\rho\mathbf{u}\nabla_x\varphi+\rho w\partial_z\varphi dxdzdt,
\end{align}
holds for all $\varphi\in C^\infty_c([0,T)\times\Omega)$;
\noindent
$\bullet$
the momentum equation
\begin{align}
[\int_\Omega\rho\mathbf u\varphi dxdz]^{t=\tau}_{t=0}&=\int^\tau_0\int_{\Omega}\rho\mathbf{u}\partial_t\varphi+ \rho\mathbf{u}\otimes\mathbf{u}:\nabla_x\varphi+\rho\mathbf uw\partial_z\varphi+ p(\rho)\text{div}\varphi dxdzdt\nonumber\\
&\hspace{8pt}-\int^\tau_0\int_{\Omega}[\mu\nabla_x\mathbf u:\nabla_x\varphi+\lambda\partial_z\mathbf u\partial_z\varphi]dxdzdt,
\end{align}
holds for all $\varphi\in C^\infty_c([0,T)\times\Omega)$,
\noindent
$\bullet$
the energy inequality
\begin{align}
[\int_{\Omega}\frac{1}{2}\rho|\mathbf{u}|^2+P(\rho)-P'(\overline{\rho})(\rho-\overline{\rho})-P(\overline{\rho})dxdz]|^{t=\tau}_{t=0}
+\int^\tau_0\int_\Omega(\mu|\nabla_x\mathbf u|^2+\lambda|\partial_z\mathbf u|^2)dxdzdt\leq 0,
\end{align}
holds for a.a $\tau\in(0,T)$, a arbitrary constant $\overline{\rho}$, where $P(\rho)=\rho\int^\rho_1\frac{p(z)}{z^2}dz$.
Moreover, as there is no information about $w$, so we need the following equation:
\begin{align}
\rho w(x,z,t)=-\rm{div}_x(\rho\widetilde{\mathbf u})+z\rm{div}_x(\rho\overline{\mathbf u}), \hspace{4pt}
\text{in the sense of} \hspace{4pt}H^{-1}(\Omega),
\label{b1}
\end{align}
where
\begin{align*}
\widetilde{\mathbf u}(x,z,t)=\int^z_0u(x,s,t)ds,\hspace{5pt}\overline{u}(x,t)=\int^1_0u(x,z,t)dz.
\end{align*}
\end{definition}
We should emphasize that \eqref{b1} is the key step to obtain the existence of weak solution in \cite{liu2,w}, which is inspired by incompressible case.
\subsection{Relative entropy inequality}
Motivated by \cite{e2,e3}, for any finite weak solution $(\rho,\mathbf u,w)$ to the CPE system, we introduce the relative energy functional
\begin{align}
\mathcal{E}(\rho,\mathbf{u}|r, \mathbf{U})&=\int_{\Omega}[\frac{1}{2}\rho|\mathbf u-\mathbf U|^2+P(\rho)-P'(r)(\rho-r)-P(r)]dxdz\nonumber\\
&=\int_\Omega(\frac{1}{2}\rho^2|\mathbf u|+P(\rho))dxdz-\int_\Omega\rho\mathbf u\cdot\mathbf Udxdz
+\int_\Omega\rho[\frac{|\mathbf U|^2}{2}-P'(r)]dxdz+\int_\Omega p(r)dxdz\nonumber\\
&=\sum^4_{i=1}I_i,\label{a1}
\end{align}
where $r>0$, $\mathbf U$ are smooth ``test'' functions, $r$, $\mathbf U$ compactly supported in $\Omega$.
\begin{lemma}\label{relativeentropy}
Let $(\rho,\mathbf{u}, w)$ be a dissipative weak solution introduced in Definition \ref{def1}. Then $(\rho,\mathbf{u}, w)$
satisfy the relative entropy inequality
\begin{align}
\mathcal{E}&(\rho,\mathbf{u}|r,\mathbf U)|^{t=\tau}_{t=0}+\int^\tau_0\int_\Omega\big{(}\mu\nabla_x\mathbf u\cdot(\nabla_x\mathbf u-\nabla_x\mathbf U)+\lambda\partial_z\mathbf u(\partial_z\mathbf u-\partial_z\mathbf U)\big{)}dxdzdt\nonumber\\
&\leq\int^\tau_0\int_{\Omega}\rho(\mathbf U-\mathbf u)\partial_t\mathbf U+\rho\mathbf u(\mathbf U-\mathbf u)\cdot\nabla_x\mathbf U
+\rho w(\mathbf U-\mathbf u)\cdot\partial_z\mathbf U-p(\rho)\text{div}_x\mathbf Udxdzdt\nonumber\\
&\hspace{15pt}-\int^\tau_0\int_{\Omega}P''(r)(\rho\partial_tr+\rho\mathbf u\nabla_xr)dxdzdt
+\int^\tau_0\int_{\Omega}\partial_tp(r)dxdzdt.
\end{align}
\end{lemma}
{\bf Proof:}
From the weak formulation and energy inequality (2.2)-(2.4) we deduce
\begin{align}
&I_1|^{t=\tau}_{t=0}+\int^\tau_0\int_\Omega(\mu|\nabla_x\mathbf u|^2+\lambda|\partial_z\mathbf u|^2)dxdzdt\leq0,\\
&I_2|^{t=\tau}_{t=0}=-\int^\tau_0\int_\Omega\rho\mathbf u\partial_t\mathbf U+\rho\mathbf u\otimes\mathbf u:\nabla_x\mathbf U+
\rho\mathbf uw\partial_z\mathbf U+p(\rho)\text{div}_x\mathbf Udxdzdt\nonumber\\
&\hspace{40pt}+\int^\tau_0\int_\Omega\mu\nabla_x\mathbf u:\nabla_x\mathbf U+\lambda\partial_z\mathbf u\partial_z\mathbf Udxdzdt,\\
&I_3|^{t=\tau}_{t=0}=\int^\tau_0\int_\Omega\rho\partial_t\frac{|\mathbf U|^2}{2}+\rho\mathbf u\cdot\nabla_x\frac{|\mathbf U|^2}{2}+\rho w\partial_z\frac{|\mathbf U|^2}{2}dxdzdt\nonumber\\
&\hspace{40pt}-\int^\tau_0\int_\Omega\rho\partial_tP'(r)+\rho\mathbf u\cdot\nabla_xP'(r)+\rho w\partial_zP'(r)dxdzdt\nonumber\\
&\hspace{20pt}=\int^\tau_0\int_\Omega\rho\mathbf U\partial_t\mathbf U+\rho\mathbf u\mathbf U\cdot\nabla_x\mathbf U+\rho w\mathbf U\partial_z\mathbf Udxdzdt\nonumber\\
&\hspace{30pt}-\int^\tau_0\int_\Omega\rho P''(r)\partial_tr+P''(r)\rho\mathbf u\cdot\nabla_xr dxdzdt,\\
&I_4|^{t=\tau}_{t=0}=[\int_\Omega p(\rho)dxdz]|^{t=\tau}_{t=0}.
\end{align}
Summing (2.6)-(2.10) together, we obtain
\begin{align}
\mathcal{E}&(\rho,\mathbf{u}|r,\mathbf U)|^{t=\tau}_{t=0}+\int^\tau_0\int_\Omega\big{(}\mu\nabla_x\mathbf u\cdot(\nabla_x\mathbf u-\nabla_x\mathbf U)+\lambda\partial_z\mathbf u(\partial_z\mathbf u-\partial_z\mathbf U)\big{)}dxdzdt\nonumber\\
&\leq\int^\tau_0\int_{\Omega}\rho(\mathbf U-\mathbf u)\partial_t\mathbf U+\rho\mathbf u(\mathbf U-\mathbf u)\cdot\nabla_x\mathbf U
+\rho w(\mathbf U-\mathbf u)\cdot\partial_z\mathbf U-p(\rho)\text{div}_x\mathbf Udxdzdt\nonumber\\
&\hspace{15pt}-\int^\tau_0\int_{\Omega}P''(r)(\rho\partial_tr+\rho\mathbf u\nabla_xr)dxdzdt
+\int^\tau_0\int_{\Omega}\partial_tp(r)dxdzdt.
\end{align}
\subsection{Main result}
We say that $(r,\mathbf U,W)$ is a strong solution to the CPE system $(1.1)-(1.4)$ in $(0,T)\times\Omega$, if
\begin{align*}
&r^\frac{1}{2}\in L^\infty(0,T;H^2(\Omega)),\hspace{3pt}\partial_tr^\frac{1}{2}\in L^\infty(0,T;H^1(\Omega)),\hspace{3pt}r>0\hspace{3pt}\text{for all}\hspace{3pt}(t,x),\\
&\mathbf U\in L^\infty(0,T;H^3(\Omega))\cap L^2(0,T;H^4(\Omega)),\hspace{3pt} \partial_t\mathbf U\in L^2(0,T; H^2(\Omega)),
\end{align*}
with initial data $r^\frac{1}{2}_0\in H^2(\Omega)$, $r_0>0$ and $\mathbf U_0\in H^3(\Omega)$.
Now, we are ready to state our first result.
\begin{theorem}
Let $\gamma>6$, $(\rho,\mathbf u,w)$ be a dissipative weak solution to the CPE system $(1.1)-(1.4)$ in $(0,T)\times\Omega$. Let $(r,\mathbf U, W)$ be a strong solution to the same problem and emanating from the same initial data. Then,
\begin{align*}
\rho=r,\hspace{5pt}\mathbf u=\mathbf U,\hspace{4pt}\text{in}\hspace{3pt}(0,T)\times\Omega.
\end{align*}
\end{theorem}
\begin{remark}
Liu and Titi \cite{liu1} obtained the local existence of strong solutions to CPE. It is important to point out that our result holds under more regularity than the strong solutions obtained in \cite{liu1}.
\end{remark}
Section 3 is devoted to the proof of the above theorem.
\section{Weak-strong uniqueness}
The proof of Theorem 2.1 depends on the relative energy inequality by considering the strong solution $[r,\mathbf U, W]$ as test function in the relative energy inequality \eqref{a1}.
\subsection{Step 1}
We write
\begin{align*}
\int_\Omega\rho\mathbf u(\mathbf U-\mathbf u)\cdot\nabla_x\mathbf Udxdz=
\int_\Omega\rho(\mathbf u-\mathbf U)(\mathbf U-\mathbf u)\cdot\nabla_x\mathbf Udxdz
+\int_\Omega\rho\mathbf U(\mathbf U-\mathbf u)\cdot\nabla_x\mathbf Udxdz.
\end{align*}
As $[r,\mathbf U, W]$ is a strong solution, it is easy to obtain that
\begin{align}
\int_\Omega\rho(\mathbf u-\mathbf U)(\mathbf U-\mathbf u)\cdot\nabla_x\mathbf Udxdz
\leq C\mathcal{E}(\rho,\mathbf u|r,\mathbf U).
\end{align}
Moreover, the momentum equation reads as
\begin{align*}
(r\mathbf U)_t+\text{div}(r\mathbf U\otimes\mathbf U)+\partial_z(r\mathbf UW)+\nabla_xp(r)=\mu\Delta_x\mathbf U+\lambda\partial_{zz}\mathbf U,
\end{align*}
implying that
\begin{align*}
\mathbf U_t+\mathbf U\cdot\nabla_x\mathbf U+W\partial_z\mathbf U=-\frac{1}{r}\nabla_xp(r)+\frac{\mu}{r}\Delta_x\mathbf U
+\frac{\lambda}{r}\partial_{zz}\mathbf U.
\end{align*}
So we rewrite
\begin{align*}
\int_\Omega\rho(\mathbf U-\mathbf u)\cdot\partial_t\mathbf U+
\rho\mathbf U(\mathbf U-\mathbf u)\cdot\nabla_x\mathbf U
+\rho W(\mathbf U-\mathbf u)\cdot\partial_z\mathbf U+\rho(w-W)(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdz\\
=\int_\Omega\frac{\rho}{r}(\mathbf U-\mathbf u)(-\nabla_xp(r)+\mu\Delta_x\mathbf U+\lambda\partial_{zz}\mathbf U)dxdz
+\int_\Omega\rho(w-W)(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdz.
\end{align*}
Thus, we obtain that
\begin{align*}
\mathcal{E}&(\rho,\mathbf{u}|r,\mathbf U)|^{t=\tau}_{t=0}+\int^\tau_0\int_\Omega\big{(}\mu\nabla_x\mathbf u\cdot(\nabla_x\mathbf u-\nabla_x\mathbf U)+\lambda\partial_z\mathbf u(\partial_z\mathbf u-\partial_z\mathbf U)\big{)}dxdzdt\nonumber\\
&\leq C\int^\tau_0\mathcal{E}(\rho,\mathbf{u}|r,\mathbf U)dt
-\int^\tau_0\int_{\Omega}P''(r)(\rho\partial_tr+\rho\mathbf u\nabla_xr)dxdzdt\nonumber\\
&\hspace{8pt}+\int^\tau_0\int_\Omega\frac{\rho}{r}(\mathbf U-\mathbf u)(\mu\Delta_x\mathbf U+\lambda\partial_{zz}\mathbf U)dxdz
-\int^\tau_0\int_\Omega\frac{\rho}{r}(\mathbf U-\mathbf u)\nabla_xp(r)dxdz\nonumber\\
&\hspace{8pt}+\int^\tau_0\int_\Omega\rho(w-W)(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdzdt+\int^\tau_0\int_\Omega\partial_tp(r)dxdzdt
-\int^\tau_0\int_\Omega p(\rho)\text{div}_x\mathbf Udxdzdt.\nonumber\\
\end{align*}
Before estimating, we should recall the following useful inequality from \cite{e2}:
\begin{equation} \label{pres}
P(\rho)-P'(r)(\rho-r)-P(r)\geq\left\{
\begin{array}{llll} C|\rho-r|^2,\hspace{5pt}\text{when} \hspace{3pt} \frac{r}{2}<\rho<r, \nonumber\\
C(1+\rho^\gamma),\hspace{5pt}\text{otherwise}.
\end{array}\right.
\end{equation}
Moreover, from \cite{e2}, we learn that
\begin{align}
&\mathcal{E}(\rho,\mathbf{u}|r,\mathbf U)(t)\in L^\infty(0,T),\hspace{3pt}
\int_\Omega\chi_{\rho\geq r}\rho^{\gamma}dxdz\leq C\mathcal{E}(\rho,\mathbf{u}|r,\mathbf U)(t),\nonumber\\
&\int_\Omega\chi_{\rho\leq \frac{r}{2}}1dxdz\leq C\mathcal{E}(\rho,\mathbf{u}|r,\mathbf U)(t),\hspace{3pt}
\int_\Omega\chi_{\frac{r}{2}<\rho<r}(\rho-r)^2dxdz\leq C\mathcal{E}(\rho,\mathbf{u}|r,\mathbf U)(t).\label{a3}
\end{align}
The main difficulty is to estimate the complicated nonlinear term $\int_\Omega\rho(w-W)(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdz$, we rewrite it as
\begin{align}
\int_\Omega\rho(w-W)(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdz
=\int_\Omega\rho w(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdz-\int_\Omega\rho W(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdz.\label{b}
\end{align}
According to \cite{e2,kr}, we divide the second term on the right side of (3.3) into three parts
\begin{align}
\int_\Omega&\rho W(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdz\nonumber\nonumber\\
&=\int_\Omega\chi_{\rho\leq \frac{r}{2}}\rho W(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdz
+\int_\Omega\chi_{\frac{r}{2}<\rho<r}\rho W(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdz+\int_\Omega\chi_{\rho\geq r}\rho W(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdz\nonumber\nonumber\\
&\leq \|\chi_{\rho\leq \frac{r}{2}}1\|_{L^2(\Omega)}\|r\|_{L^\infty}\|W\partial_z\mathbf U\|_{L^3}\|\mathbf U-\mathbf u\|_{L^6(\Omega)}
+\int_\Omega\chi_{\rho\geq r}\rho^{\frac{\gamma}{2}}W\partial_z\mathbf U\cdot(\mathbf U-\mathbf u)dxdz\nonumber\\
&\hspace{8pt}+C\|\chi_{\frac{r}{2}<\rho< r}(\rho-r)\|_{L^2(\Omega)}\|W\partial_z\mathbf U\|_{L^3}\|\mathbf U-\mathbf u\|_{L^6(\Omega)}\nonumber\\
&\leq C\int_\Omega\chi_{\rho\leq \frac{r}{2}}1dxdz +C\int_\Omega\chi_{\frac{r}{2}<\rho<r}(\rho-r)^2dxdz
+C\int_\Omega\chi_{\rho\geq r}\rho^\gamma dxdz
+\delta\|\mathbf U-\mathbf u\|^2_{L^6(\Omega)}\nonumber\\
&\leq C\mathcal{E}(\rho,\mathbf u|r,\mathbf U)+\delta\|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^2(\Omega)}
+\delta\|\partial_z\mathbf U-\partial_z\mathbf u\|^2_{L^2(\Omega)},\label{33c}
\end{align}
where in the last inequality, we have used the following celebrated inequality from Feireisl \cite{e1}:
\begin{lemma}
Let $2\leq p\leq6$, and $\rho\geq0$ such that $0<\int_\Omega\rho dx\leq M$ and $\int_\Omega\rho^\gamma dx\leq E_0$ for some $(\gamma>1)$ then
\begin{align*}
\|f\|_{L^p(\Omega)}\leq C\|\nabla f\|_{L^2(\Omega)}+\|\rho^{\frac{1}{2}}f\|_{L^2(\Omega)},
\end{align*}
where $C$ depends on $M$ and $E_0$.
\end{lemma}
On the other hand, we take \eqref{b1} into the first term on the right hand of \eqref{b} and get
\begin{align}
\int_\Omega\rho w&(\mathbf U-\mathbf u)\cdot\partial_z\mathbf Udxdz\nonumber\\
&=\int_\Omega[-\text{div}_x(\rho\widetilde{\mathbf u})+z\text{div}_x(\rho\overline{\mathbf u})]\partial_z\mathbf U
\cdot(\mathbf U-\mathbf u)dxdz\nonumber\\
&=\int_\Omega(\rho\widetilde{\mathbf u}-z\rho\overline{\mathbf u})\partial_z\nabla_x\mathbf U\cdot(\mathbf U-\mathbf u)dxdz+\int_\Omega(\rho\widetilde{\mathbf u}-z\rho\overline{\mathbf u})\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz.\label{c}
\end{align}
In the following, we will estimate the terms on the right hand side of \eqref{c}. We choose the most complicated terms as examples to estimate, the remaining terms can be analyzed similarly. Firstly, we deal with $\int_\Omega\rho\widetilde{\mathbf u}\partial_z\nabla_x\mathbf U\cdot(\mathbf U-\mathbf u)dxdz$ in the following,
\begin{align*}
\int_\Omega\rho\widetilde{\mathbf u}\partial_z\nabla_x\mathbf U\cdot(\mathbf U-\mathbf u)dxdz
&=\int_\Omega\rho(\widetilde{\mathbf u}-\widetilde{\mathbf U})\partial_z\nabla_x\mathbf U\cdot(\mathbf U-\mathbf u)dxdz
+\int_\Omega\rho\widetilde{\mathbf U}\partial_z\nabla_x\mathbf U\cdot(\mathbf U-\mathbf u)dxdz\\
&=J_1+J_2.
\end{align*}
where $\widetilde{\mathbf U}=\int^z_0\mathbf U(x,s,t)ds$.
Similar to the above analysis, we divide the term $J_2$ into three parts
\begin{align*}
J_2&=\int_\Omega\rho\widetilde{\mathbf U}\partial_z\nabla_x\mathbf U\cdot(\mathbf U-\mathbf u)dxdz\\
&=\int_\Omega\chi_{\rho\leq \frac{r}{2}}\rho\widetilde{\mathbf U}\partial_z\nabla_x\mathbf U\cdot(\mathbf U-\mathbf u)dxdz
+\int_\Omega\chi_{\frac{r}{2}<\rho<r}\rho\widetilde{\mathbf U}\partial_z\nabla_x\mathbf U\cdot(\mathbf U-\mathbf u)dxdz\\
&\hspace{5pt}+\int_\Omega\chi_{\rho\geq r}\rho\widetilde{\mathbf U}\partial_z\nabla_x\mathbf U\cdot(\mathbf U-\mathbf u)dxdz\\
&\leq \|\chi_{\rho\leq \frac{r}{2}}1\|_{L^2(\Omega)}\|r\|_{L^\infty}\|\widetilde{\mathbf U}\partial_z\nabla_x\mathbf U\|_{L^3}\|\mathbf U-\mathbf u\|_{L^6(\Omega)}
+\|\chi_{\rho\geq r}\rho^{\frac{\gamma}{2}}\|_{L^2(\Omega)}\|\widetilde{\mathbf U}\partial_z\nabla_x\mathbf U\|_{L^3(\Omega)}\|\mathbf U-\mathbf u\|_{L^6(\Omega)}\\
&\hspace{8pt}+C\|\chi_{\frac{r}{2}<\rho<r}(\rho-r)\|_{L^2(\Omega)}\|\widetilde{\mathbf U}\partial_z\nabla_x\mathbf U\|_{L^3(\Omega)}\|\mathbf U-\mathbf u\|_{L^6(\Omega)}\\
&\leq C\mathcal{E}(\rho,\mathbf u|r,\mathbf U)(t)+\delta\|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^2(\Omega)}
+\delta\|\partial_z\mathbf U-\partial_z\mathbf u\|^2_{L^2(\Omega)}.
\end{align*}
On the other hand, by virtue of Cauchy inequality, we obtain
\begin{align}
J_1&=\int_\Omega\rho(\widetilde{\mathbf u}-\widetilde{\mathbf U})\partial_z\nabla_x\mathbf U\cdot(\mathbf U-\mathbf u)dxdz\nonumber\\
&\leq\|\partial_z\nabla_x\mathbf U\|_{L^\infty}\int_\Omega\rho|\widetilde{\mathbf u}-\widetilde{\mathbf U}|^2dxdz+\int_\Omega\rho|\mathbf u-\mathbf U|^2dxdz\nonumber\\
&\leq C\int_\Omega\rho|\int^z_0(\mathbf u(s)-\mathbf U(s))ds|^2dxdz+\mathcal{E}(\rho,\mathbf u|r,\mathbf U)\nonumber\\
&\leq C\int_\Omega\rho\big{(}\int^1_0|\mathbf u-\mathbf U|^2ds\big{)}dxdz+\mathcal{E}(\rho,\mathbf u|r,\mathbf U)\nonumber\\
&\leq C\int^1_0\int_\Omega \rho|\mathbf u-\mathbf U|^2dxdzds+\mathcal{E}(\rho,\mathbf u|r,\mathbf U)\nonumber\\
&\leq C\int_\Omega \rho|\mathbf u-\mathbf U|^2dxdz+\mathcal{E}(\rho,\mathbf u|r,\mathbf U)\nonumber\\
&\leq C\mathcal{E}(\rho,\mathbf u|r,\mathbf U).\label{3aaa}
\end{align}
Secondly, we will tackle with another complicated nonlinear term $\int_\Omega\rho\widetilde{\mathbf u}\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz$. It is easy to rewrite it as
\begin{align}
\int_\Omega\rho\widetilde{\mathbf u}&\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz\nonumber\\
&=\int_\Omega\chi_{\rho< r}\rho\widetilde{\mathbf u}\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz
+\int_\Omega\chi_{\rho\geq r}\rho\widetilde{\mathbf u}\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz,\label{2a}
\end{align}
where
\begin{align*}
\int_\Omega\chi_{\rho< r}&\rho\widetilde{\mathbf u}\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz\\
&=\int_{\Omega}\chi_{\rho<r}\rho(\widetilde{\mathbf u}-\widetilde{\mathbf U})\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz
+\int_{\Omega}\chi_{\rho<r}\rho\widetilde{\mathbf U}\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz\\
&=\int_{\Omega}\chi_{\rho<r}\rho(\widetilde{\mathbf u}-\widetilde{\mathbf U})\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz
+\int_{\Omega}\chi_{\frac{r}{2}<\rho<r}\rho\widetilde{\mathbf U}\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz\\
&\hspace{20pt}+\int_{\Omega}\chi_{\rho\leq \frac{r}{2}}\rho\widetilde{\mathbf U}\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz\\
&\leq \|\chi_{\rho<r}\rho^{\frac{1}{2}}\|_{L^\infty(\Omega)}\|\sqrt{\rho}(\widetilde{\mathbf u}-\widetilde{\mathbf U})\|_{L^2(\Omega)}\|\partial_z\mathbf U\|_{L^\infty(\Omega)}\|\nabla_x\mathbf U-\nabla_x\mathbf u\|_{L^2(\Omega)}\\
&\hspace{10pt}+\|\chi_{\frac{r}{2}<\rho< r}\rho\|_{L^2(\Omega)}\|\widetilde{\mathbf U}\partial_z\mathbf U\|_{L^\infty(\Omega)}\|\nabla_x\mathbf U-\nabla_x\mathbf u\|_{L^2(\Omega)}\\
&\hspace{10pt}+\|\chi_{\rho\leq \frac{r}{2}}1\|_{L^2(\Omega)}\|r\|_{L^\infty(\Omega)}
\|\widetilde{\mathbf U}\partial_z\mathbf U\|_{L^\infty(\Omega)}\|\nabla_x\mathbf U-\nabla_x\mathbf u\|_{L^2(\Omega)}\\
&\leq C\mathcal{E}(\rho,\mathbf u|r,\mathbf U)(t)+\delta\|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^2(\Omega)}.
\end{align*}
Then we will deal with the second term on the right side of \eqref{2a}:
\begin{align}
\int_\Omega\chi_{\rho\geq r}&\rho\widetilde{\mathbf u}\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz\nonumber\\
&=\int_\Omega\chi_{\rho\geq r}\rho(\widetilde{\mathbf u}-\widetilde{\mathbf U})\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz
+\int_\Omega\chi_{\rho\geq r}\rho\widetilde{\mathbf U}\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz\nonumber\\
&=K_1+K_2,\label{333}
\end{align}
where
\begin{align}
K_2&\leq \int_\Omega\chi_{\rho\geq r}\rho^{\frac{\gamma}{2}}\widetilde{\mathbf U}\partial_z\mathbf U\cdot(\nabla_x\mathbf U-\nabla_x\mathbf u)dxdz\nonumber\\
&\leq \|\chi_{\rho\geq r}\rho^{\frac{\gamma}{2}}\|_{L^2(\Omega)}
\|\widetilde{\mathbf U}\partial_z\mathbf U\|_{L^\infty(\Omega)}
\|\nabla_x\mathbf U-\nabla_x\mathbf u\|_{L^2(\Omega)}\nonumber\\
&\leq C\|\chi_{\rho\geq r}\rho^{\frac{\gamma}{2}}\|^2_{L^2(\Omega)}
+\delta\|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^2(\Omega)}\nonumber\\
&\leq C\mathcal{E}(\rho,\mathbf u|r,\mathbf U)(t)+\delta\|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^2(\Omega)}.
\end{align}
Next, by virtue of H\"{o}lder inequality, we get
\begin{align*}
K_1&\leq \|\chi_{\rho\geq r}\rho\|_{L^\gamma(\Omega)}
\|\chi_{\rho\geq r}(\widetilde{\mathbf u}-\widetilde{\mathbf U})\|_{L^3(\Omega)}
\|\partial_z\mathbf U\|_{L^\frac{6\gamma}{\gamma-6}(\Omega)}
\|\nabla_x\mathbf U-\nabla_x\mathbf u\|_{L^2(\Omega)}\nonumber\\
&\leq C\|\chi_{\rho\geq r}\rho\|^2_{L^\gamma(\Omega)}
\|\chi_{\rho\geq r}(\widetilde{\mathbf u}-\widetilde{\mathbf U})\|^2_{L^3(\Omega)}
+\delta\|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^2(\Omega)}\nonumber\\
&\leq C\|\chi_{\rho\geq r}\rho\|^2_{L^\gamma(\Omega)}
\|\chi_{\rho\geq r}(\widetilde{\mathbf u}-\widetilde{\mathbf U})\|_{L^2(\Omega)}
\|\chi_{\rho\geq r}(\widetilde{\mathbf u}-\widetilde{\mathbf U})\|_{H^1(\Omega)}
+\delta\|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^2(\Omega)}\nonumber\\
&\leq C\|\chi_{\rho\geq r}\rho\|^4_{L^\gamma(\Omega)}
\|\chi_{\rho\geq r}(\widetilde{\mathbf u}-\widetilde{\mathbf U})\|^2_{L^2(\Omega)}
+\delta\|\chi_{\rho\geq r}(\widetilde{\mathbf u}-\widetilde{\mathbf U})\|^2_{L^2(\Omega)}
+\delta\|\nabla_x\widetilde{\mathbf U}-\nabla_x\widetilde{\mathbf u}\|^2_{L^2(\Omega)}\nonumber\\
&\hspace{10pt}+\delta\|\partial_z\widetilde{\mathbf U}-\partial_z\widetilde{\mathbf u}\|^2_{L^2(\Omega)}
+\delta\|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^2(\Omega)},
\end{align*}
where we have used the interpolation inequality
\begin{align*}
\|f\|_{L^3}\leq\|f\|^{\frac{1}{2}}_{L^2}\|f\|^{\frac{1}{2}}_{H^1}.
\end{align*}
According \eqref{a3} and \eqref{3aaa}, we have
\begin{align*}
\|\chi_{\rho\geq r}\rho\|^4_{L^\gamma(\Omega)}
=(\int_{\rho\geq r}\rho^\gamma dxdz)^{\frac{4}{\gamma}}
\leq\mathcal{E}(\rho,\mathbf u|r,\mathbf U)^{\frac{4}{\gamma}}(t),
\end{align*}
and
\begin{align*}
\|\chi_{\rho\geq r}(\widetilde{\mathbf u}-\widetilde{\mathbf U})\|^2_{L^2(\Omega)}
=\int_{\rho\geq r}|\widetilde{\mathbf u}-\widetilde{\mathbf U}|^2dxdz
=\int_{\rho\geq r}\frac{1}{\rho}\rho|\widetilde{\mathbf u}-\widetilde{\mathbf U}|^2dxdz
\leq \frac{1}{\|r\|_{\infty(\Omega)}}\mathcal{E}(\rho,\mathbf u|r,\mathbf U)(t).
\end{align*}
Similar to the estimate of \eqref{3aaa}, we obtain
\begin{align*}
\|\nabla_x\widetilde{\mathbf U}-\nabla_x\widetilde{\mathbf u}\|^2_{L^2(\Omega)}
\leq \|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^2(\Omega)},
\hspace{5pt}\|\partial_z\widetilde{\mathbf U}-\partial_z\widetilde{\mathbf u}\|^2_{L^2(\Omega)}
\leq\|\partial_z\mathbf U-\partial_z\mathbf u\|^2_{L^2(\Omega)}.
\end{align*}
Combining the above estimates, we get
\begin{align*}
\int^\tau_0K_1dt\leq C\int^\tau_0h(t)\mathcal{E}(\rho,\mathbf u|r,\mathbf U)(t)dt
+\delta\int^\tau_0\|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^2(\Omega)}+
\|\partial_z\mathbf U-\partial_z\mathbf u\|^2_{L^2(\Omega)}dt,
\end{align*}
where $h(t)\in L^1(0,T)$.
Using the same method we estimate the remaining terms. Therefore, we conclude that
\begin{align*}
\mathcal{E}&(\rho,\mathbf{u}|r,\mathbf U)|^{t=\tau}_{t=0}+\int^\tau_0\int_\Omega\big{(}\mu\nabla_x\mathbf u\cdot(\nabla_x\mathbf u-\nabla_x\mathbf U)+\lambda\partial_z\mathbf u(\partial_z\mathbf u-\partial_z\mathbf U)\big{)}dxdzdt\nonumber\\
&\leq C\int^\tau_0h(t)\mathcal{E}(\rho,\mathbf{u}|r,\mathbf U)dt+\delta\int^\tau_0\|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^{2}(\Omega)}
+\|\partial_z\mathbf U-\partial_z\mathbf u\|^2_{L^{2}(\Omega)}dt\nonumber\\
&\hspace{8pt}+\int^\tau_0\int_\Omega\frac{\rho}{r}(\mathbf U-\mathbf u)(\mu\Delta_x\mathbf U+\lambda\partial_{zz}\mathbf U)dxdzdt
-\int^\tau_0\int_\Omega\frac{\rho}{r}(\mathbf U-\mathbf u)\nabla_xp(r)dxdzdt\nonumber\\
&\hspace{8pt}-\int^\tau_0\int_{\Omega}P''(r)(\rho\partial_tr+\rho\mathbf u\nabla_xr)dxdzdt
+\int^\tau_0\int_\Omega\partial_tp(r)dxdzdt-\int^\tau_0\int_\Omega p(\rho)\text{div}_x\mathbf Udxdzdt.
\end{align*}
Then we deduce that
\begin{align}
\mathcal{E}&(\rho,\mathbf{u}|r,\mathbf U)|^{t=\tau}_{t=0}+\int^\tau_0\int_\Omega\big{(}\mu(\nabla_x\mathbf u-\nabla_x\mathbf U):(\nabla_x\mathbf u-\nabla_x\mathbf U)+\lambda(\partial_z\mathbf u-\partial_z\mathbf U)^2\big{)}dxdzdt\nonumber\\
&\leq C\int^\tau_0h(t)\mathcal{E}(\rho,\mathbf{u}|r,\mathbf U)dt+\delta\int^\tau_0\|\nabla_x\mathbf U-\nabla_x\mathbf u\|^2_{L^{2}(\Omega)}
+\|\partial_z\mathbf U-\partial_z\mathbf u\|^2_{L^{2}(\Omega)}dt\nonumber\\
&\hspace{8pt}+\int^\tau_0\int_\Omega(\frac{\rho}{r}-1)(\mathbf U-\mathbf u)(\mu\Delta_x\mathbf U+\lambda\partial_{zz}\mathbf U)dxdzdt
-\int^\tau_0\int_\Omega\frac{\rho}{r}(\mathbf U-\mathbf u)\nabla_xp(r)dxdzdt\nonumber\\
&\hspace{8pt}-\int^\tau_0\int_{\Omega}P''(r)(\rho\partial_tr+\rho\mathbf u\nabla_xr)dxdzdt
+\int^\tau_0\int_\Omega\partial_tp(r)dxdzdt-\int^\tau_0\int_\Omega p(\rho)\text{div}_x\mathbf Udxdzdt.
\end{align}
It is easy to check that
\begin{align}
-\int^\tau_0&\int_\Omega\frac{\rho}{r}(\mathbf U-\mathbf u)\nabla_xp(r)+p(\rho)\text{div}_x\mathbf U+P''(r)(\rho\partial_tr+\rho\mathbf u\nabla_xr)dxdzdt
+\int^\tau_0\int_\Omega\partial_tp(r)dxdzdt\nonumber\\
&=-\int^\tau_0\int_\Omega(\rho-r)P''(r)\partial_tr+P''(r)\rho\mathbf u\cdot\nabla_xr+\rho P''(r)(\mathbf U-\mathbf u)\cdot\nabla_xr+p(\rho)\text{div}_x\mathbf Udxdzdt\nonumber\\
&=-\int^\tau_0\int_\Omega(\rho-r)P''(r)\partial_tr+P''(r)\rho\mathbf U\cdot\nabla_xr+p(\rho)\text{div}_x\mathbf Udxdzdt\nonumber\\
&=-\int^\tau_0\int_\Omega\rho P''(r)(\partial_tr+\mathbf U\cdot\nabla_xr)
-rP''(r)\partial_tr+p(\rho)\text{div}_x\mathbf Udxdzdt\nonumber\\
&=-\int^\tau_0\int_\Omega\rho P''(r)(-r\text{div}_x\mathbf U-r\partial_zW)
-rP''(r)\partial_tr+p(\rho)\text{div}_x\mathbf Udxdzdt\nonumber\\
&=-\int^\tau_0\int_\Omega\text{div}_x\mathbf U\big{(}p(\rho)-p'(r)(\rho-r)-p(r)\big{)}dxdzdt
+\int^\tau_0\int_\Omega p'(r)(\rho-r)\partial_zWdxdzdt,
\end{align}
where we have used the fact that $\partial_tr+\text{div}_x\mathbf Ur+\mathbf U\cdot\nabla_xr+r\partial_zW=0$.
Recalling the boundary condition $W|_{z=0,1}=0$, we have
\begin{align}
\int^\tau_0\int_\Omega p'(r)(\rho-r)\partial_zWdxdzdt
=\int^\tau_0dt\int_{\mathbb{T}^2}(\int^1_0\partial_zWdz)p'(r)(\rho-r)dx=0.
\end{align}
Moreover, we can use the method as \cite{kr} Section 6.3 to get
\begin{align}
\int_\Omega&(\frac{\rho}{r}-1)(\mathbf U-\mathbf u)(\mu\Delta_x\mathbf U+\lambda\partial_{zz}\mathbf U)dxdz\nonumber\\
&\leq C\mathcal{E}(\rho,\mathbf u|r,\mathbf U)+\delta\|\nabla_x\mathbf u-\nabla_x\mathbf U\|^2_{L^2}
+\delta\|\partial_z\mathbf u-\partial_z\mathbf U\|^2_{L^2}.
\end{align}
Putting $(3.10)-(3.13)$ together, we have
\begin{align}
\mathcal{E}(\rho,\mathbf u|r,\mathbf U)(\tau)\leq C\int^\tau_0h(t)\mathcal{E}(\rho,\mathbf u|r,\mathbf U)(t)dt.
\end{align}
Then applying the Gronwall's inequality, we finish the proof of Theorem 2.1.
\vskip 0.5cm
\noindent {\bf Part II: Singular limit of CPE}
\vskip 0.2cm
This part is devoted to studying the singular limit of the CPE in the case of well-prepared initial data.
\section{Preliminaries and main result}
From the notable survey paper by Klein, see \cite{Klein}, singular limits of fluids play an important role in mathematics, physics and meteorology. We consider the following scale CPE system with Coriolis forces:
\begin{eqnarray}
\left\{
\begin{array}{llll} \partial_{t}\rho_\epsilon+\text{div}_x(\rho_\epsilon \mathbf{u}_\epsilon)+\partial_z(\rho_\epsilon w_\epsilon)=0, \\
\partial_t(\rho_\epsilon\mathbf{u}_\epsilon)+\textrm{div}_x(\rho_\epsilon\mathbf{u}_\epsilon\otimes\mathbf{u}_\epsilon)
+\partial_x(\rho_\epsilon\mathbf u_\epsilon w_\epsilon)
+\rho_\epsilon\mathbf u_\epsilon\times\omega+\frac{1}{\epsilon^2}\nabla_x p(\rho_\epsilon)=\mu\Delta_x\mathbf u_\epsilon+\lambda\partial^2_{zz}\mathbf u_\epsilon,\\
\partial_zp(\rho_\epsilon)=0,
\end{array}\right.\label{4a}
\end{eqnarray}
where $\epsilon$ represents the Mach number, $\omega=(0,0,1)$ is the rotation axis. The boundary conditions and pressure are the same as (1.2) and (1.4). Problem \eqref{4a} is supplemented with initial data
\begin{align}
\rho_\epsilon (0, \cdot) = \rho_{0, \epsilon} =\overline{\rho} + \epsilon \rho^{(1)}_{0, \epsilon},\hspace{5pt}
\mathbf{u}_\epsilon (0, \cdot) = \mathbf{u}_{0, \epsilon},
\end{align}
where the constant $\overline{\rho}$ in (4.2) can be taken arbitrary.
There is a quite broad consensus that the compressible flows become incompressible in the low Mach number limit. In the following sections, we assume $\rho=\rho_\epsilon$ and $\mathbf u=\mathbf u_\epsilon$. In this part, our goal is to study system \eqref{4a} in the case of singular limit $\epsilon\rightarrow0$, meaning the inviscid, incompressible limit. Precisely speaking, we want to show that the weak solutions of CPE converge to the incompressible PE system.
\subsection{Target equation}
The expected limit problem reads
\begin{align}
&\partial_t\mathbf{V}+(\mathbf{V}\cdot\nabla_x)\mathbf{V}+\partial_z\mathbf VW+\mathbf V^{\perp}+\nabla_x\Pi=0,\nonumber\\
&\text{div}_x\mathbf{V}+\partial_zW=0,\nonumber\\
&\partial_z\Pi=0,\label{4b}
\end{align}
where $\mathbf V^{\perp}=(v_2,-v_1)$ and the $\Pi$ is the pressure. We supplement the system with the initial condition
\begin{align*}
\mathbf V|_{t=0}=\mathbf V_0.
\end{align*}
As shown by Kukavica et al. \cite{kukavica}, the problem \eqref{4b} possesses a local unique analytic solution $\mathbf V$ and $\Pi$ for some $T>0$ and any initial solution
\begin{align}
\mathbf V_0\in C^{\infty}(\Omega),\hspace{3pt}\int^1_0\text{div}\mathbf V_0dz=0.
\end{align}
\subsection{Relative energy inequality}
According to the previous definition, we define the relative entropy functional,
\begin{align}
\mathcal{E}(\rho,\mathbf{u}|r, \mathbf{V})=\int_{\Omega}[\frac{1}{2}\rho|\mathbf u-\mathbf V|^2+\frac{1}{\epsilon^2}(P(\rho)-P'(r)(\rho-r)-P(r))]dxdz,
\end{align}
where $r$ and $\mathbf V$ are continuously differentiable, it is something not understandable "text functions". The following relation can be deduced
\begin{align}
\mathcal{E}&(\rho,\mathbf{u}|r,\mathbf V)|^{t=\tau}_{t=0}+\int^\tau_0\int_\Omega\big{(}\mu\nabla_x\mathbf u\cdot(\nabla_x\mathbf u-\nabla_x\mathbf V)+\lambda\partial_z\mathbf u(\partial_z\mathbf u-\partial_z\mathbf V)\big{)}dxdzdt\nonumber\\
&\leq\int^\tau_0\int_{\Omega}\rho(\mathbf V-\mathbf u)\partial_t\mathbf V+\rho\mathbf u(\mathbf V-\mathbf u)\cdot\nabla_x\mathbf V
+\rho w(\mathbf V-\mathbf u)\partial_z\mathbf V-\frac{1}{\epsilon^2}p(\rho)\text{div}_x\mathbf Vdxdzdt\nonumber\\
&\hspace{15pt}-\frac{1}{\epsilon^2}\int^\tau_0\int_{\Omega}P''(r)(\rho\partial_tr+\rho\mathbf u\nabla_xr)dxdzdt
+\frac{1}{\epsilon^2}\int^\tau_0\int_{\Omega}\partial_tp(r)dxdzdt\nonumber\\
&\hspace{15pt}-\int^\tau_0\int_\Omega(\rho\mathbf u\times\omega)\cdot(\mathbf V-\mathbf u)dxdzdt,\label{4c}
\end{align}
for any $r,\mathbf V$$\in C'([0,T]\times\Omega)$, $r>0$.
\subsection{Main result}
The second result concerns the singular limit.
\begin{theorem}
Let $\gamma>6$, and $(\rho,\mathbf u,w)$ be a weak solution of the scaled system \eqref{4a} on a time interval $(0,T)$ with well-prepared initial data satisfying the following assumptions
\begin{align}
&\|\rho^{(1)}_{0,\epsilon}\|_{L^\infty(\Omega)}+\|\mathbf u_{0,\epsilon}\|_{L^\infty(\Omega)}\leq D,\nonumber\\
&\frac{\rho_{0,\epsilon}-\overline{\rho}}{\epsilon}\rightarrow 0\hspace{3pt}\text{in}\hspace{3pt}L^{1}(\Omega),\hspace{8pt}
\mathbf{u}_{0,\epsilon}\rightarrow \mathbf{V}_0\hspace{3pt}\text{in}\hspace{3pt}L^{2}(\Omega).
\end{align}
Let $\mathbf V$ be the unique analytic solution of the target problem \eqref{4b}. Suppose that $T<T_{\max}$, where $T_{\max}$ denotes the maximal life-span of the regular solution to the incompressible PE system \eqref{4b} with initial data $\mathbf V_0$, then
\begin{align}
\sup_{t\in[0,T]}&\int_\Omega[\rho|\mathbf u-\mathbf V|^2+\frac{1}{\epsilon^2}(P(\rho)-P'(\overline{\rho})(\rho-\overline{\rho})-P(\overline{\rho}))]\nonumber\\
&\leq C[\epsilon+\mu+\lambda+\int_\Omega|\mathbf u_{0,\epsilon}-\mathbf V_0|^2],
\end{align}
where the constant $C$ depends on the initial data $\rho_{0}$, $\mathbf u_{0}$, $\mathbf V_0$ and $T$, and the size $D$ of the initial data perturbation. The constant $\overline{\rho}$ can be taken arbitrary.
\end{theorem}
\begin{remark}
Theorem 4.1 yields that $\rho_\epsilon$ and $\mathbf u_\epsilon$ converge to the solution of target system in the regime of $\epsilon\rightarrow0$ and $\mu,\lambda\rightarrow0$ for the well-prepared initial data, in other words, the expression of the right hand of (4.8) tends to zero.
\end{remark}
\subsection{Uniform bounds}
Before proving Theorems 4.1, we derive uniforms bounds of weak solutions $(\rho,\mathbf u)$. Here and hereafter, the constant $C$ denotes a positive constant, independent on $\epsilon$, that will not have the same value when used in different parts of text. The following uniform bounds are derived from the relative energy inequality \eqref{4c}, if we take $r=\overline{\rho}$ and $\mathbf U=0$:
\begin{align}
&ess\sup_{t\in(0,T)}||\frac{\rho-\overline{\rho}}{\epsilon}||_{L^2(\Omega)+L^\gamma(\Omega)}\leq C,\nonumber\\
&ess\sup_{t\in(0,T)}||\sqrt{\rho}\mathbf u||_{L^2(\Omega)}\leq C,\hspace{5pt}
\sqrt{\mu}||\nabla_x\mathbf u||_{L^2((0,T)\times\Omega)}
+\sqrt{\lambda}||\partial_z\mathbf u||_{L^2((0,T)\times\Omega)}\leq C.
\end{align}
\section{Convergence of well-prepared initial data}
The proof of convergence is based on the ansatz
\begin{equation}
r=\overline{\rho},\hspace{5pt} \mathbf{U}=\mathbf{V},
\end{equation}
in the relative energy inequality \eqref{4c}, where $\mathbf V$ is the analytic solution of the target problem \eqref{4b}. The corresponding relative energy inequality reads as:
\begin{align}
\mathcal{E}&(\rho,\mathbf{u}|\overline{\rho}, \mathbf V)(\tau)+\int^\tau_0\int_\Omega\mu(\nabla_x\mathbf u-\nabla_x\mathbf V\big{)}:(\nabla_x\mathbf u -\nabla_x\mathbf V)
+\lambda(\partial_z\mathbf u-\partial_z\mathbf V)^2dxdzdt\nonumber\\
&\leq\mathcal{E}(\rho,\mathbf{u}|\overline{\rho}, \mathbf V)(0)
+\int^\tau_0\int_{\Omega}\rho(\mathbf V-\mathbf u)\partial_t\mathbf V+\rho\mathbf u(\mathbf V-\mathbf u)\cdot\nabla_x\mathbf V+\rho w(\mathbf V-\mathbf u)\partial_z\mathbf Vdxdzdt\nonumber\\
&\hspace{10pt}-\frac{1}{\epsilon^2}\int^\tau_0\int_\Omega p(\rho)\text{div}_x\mathbf Vdxdzdt
-\int^\tau_0\int_\Omega(\rho\mathbf u\times\omega)\cdot(\mathbf V-\mathbf u)dxdzdt\nonumber\\
&\hspace{10pt}+\int^\tau_0\int_\Omega\mu\nabla_x\mathbf V(\nabla_x\mathbf u-\nabla_x\mathbf V)dxdzdt
+\int^\tau_0\int_\Omega\lambda\partial_z\mathbf V(\partial_z\mathbf u-\partial_z\mathbf V)dxdzdt.
\end{align}
First we deal with initial data and viscous term. It is easy to computer the initial relative energy inequality:
\begin{align}
\mathcal{E} (\rho_{},\mathbf{u}_{}|\overline{\rho}, \mathbf V)|_{t=0}\leq C\int_\Omega[|\mathbf u_{0,\epsilon}-\mathbf V_0|^2+|\rho_{0,\epsilon}-\overline{\rho}|^2]dx,
\end{align}
and viscous term
\begin{align}
&\mu\int^\tau_0\int_\Omega\nabla_x\mathbf V(\nabla_x\mathbf u-\nabla_x\mathbf V)dxdzdt\leq
\int^\tau_0\frac{\mu}{2}\|\nabla_x\mathbf u-\nabla_x\mathbf V\|^2_{L^2(\Omega)}
+\frac{\mu}{2}\|\nabla_x\mathbf V\|^2_{L^2(\Omega)}dt,\nonumber\\
&\lambda\int^\tau_0\int_\Omega\partial_z\mathbf V(\partial_z\mathbf u-\partial_z\mathbf V)dxdzdt\leq
\int^\tau_0\frac{\lambda}{2}\|\nabla_x\mathbf u-\nabla_x\mathbf V\|^2_{L^2(\Omega)}
+\frac{\lambda}{2}\|\partial_z\mathbf V\|^2_{L^2(\Omega)}dt.
\end{align}
Next, we consider the remaining terms. Utilizing $(4.3)_1$, we get that
\begin{align}
\int^\tau_0\int_{\Omega}\rho&(\mathbf V-\mathbf u)\partial_t\mathbf V+\rho\mathbf u(\mathbf V-\mathbf u)\cdot\nabla_x\mathbf V+\rho w(\mathbf V-\mathbf u)\partial_z\mathbf Vdxdzdt\nonumber\\
&=\int^\tau_0\int_{\Omega}\rho(\mathbf V-\mathbf u)(\partial_t\mathbf V+(\mathbf V\cdot\nabla_x)\mathbf V+W\partial_z\mathbf V)+\rho(\mathbf u-\mathbf V)(\mathbf V-\mathbf u)\nabla_x\mathbf V\nonumber\\
&\hspace{30pt}+\rho(\mathbf V-\mathbf u)(w-W)\partial_z\mathbf Vdxdzdt\nonumber\\
&=\int^\tau_0\int_{\Omega}\rho(\mathbf u-\mathbf V)(\nabla_x\Pi+\mathbf V^\bot)+\rho(\mathbf u-\mathbf V)(\mathbf V-\mathbf u)\nabla_x\mathbf V
+\rho(\mathbf V-\mathbf u)(w-W)\partial_z\mathbf Vdxdzdt.
\end{align}
It is easy to check that
\begin{align*}
\int^\tau_0\int_\Omega\rho(\mathbf u-\mathbf V)(\mathbf V-\mathbf u)\nabla_x\mathbf Vdxdzdt\leq C\int^\tau_0\mathcal{E}(\rho,\mathbf{u}|\overline{\rho}, \mathbf V)(t)dt.
\end{align*}
Next, we estimate the term $\int^\tau_0\int_{\Omega}\rho\mathbf V\cdot\nabla_x\Pi dxdzdt$, and rewrite in the form
\begin{align}
\int^\tau_0\int_{\Omega}\rho\mathbf V\cdot\nabla_x\Pi dxdzdt=
\epsilon\int^\tau_0\int_{\Omega}\frac{\rho-\overline{\rho}}{\epsilon}\mathbf V\cdot\nabla_x\Pi dxdzdt+
\overline{\rho}\int^\tau_0\int_{\Omega}\mathbf V\cdot\nabla_x\Pi dxdzdt,
\end{align}
The second term on the right side of (5.6) is estimated as:
\begin{align*}
\int^\tau_0\int_{\Omega}\mathbf V\nabla_x\Pi dxdzdt
=-\int^\tau_0\int_{\Omega}\text{div}_x\mathbf V\Pi dxdzdt
=\int^\tau_0\int_{\Omega}\partial_zW\Pi dxdzdt=0,
\end{align*}
where we have used the fact that $\Pi$ is independent of $z$. We deduce from the energy inequality that
\begin{align}
\int_{\Omega}\frac{1}{\epsilon^2}(P(\rho)-P'(r)(\rho-r)-P(r))dxdz\leq C, \hspace{5pt}\text{uniformly as}
\hspace{3pt}\epsilon\rightarrow0.
\end{align}
Similar to the previous analysis, it is enough to establish a uniform bound
\begin{align*}
\int_\Omega\frac{\rho-\overline{\rho}}{\epsilon}dxdz\leq C.
\end{align*}
As we know that the pressure $\Pi$ is analytic, so that the rightmost integral of (5.6) can be vanished as $\epsilon\rightarrow0$.
From the previous definition of dissipative weak solutions, we choose $\Pi$ as the test function, so that
\begin{align*}
\int^\tau_0\int_{\Omega}&\rho\mathbf u\nabla_x\Pi dxdzdt\\
&=[\int_\Omega\rho\Pi dxdz]|^{t=\tau}_{t=0}-\int^\tau_0\int_\Omega\rho\partial_t\Pi dxdzdt
-\int^\tau_0\int_\Omega\rho w\partial_z\Pi dxdzdt\\
&=\epsilon[\int_\Omega\frac{\rho-\overline{\rho}}{\epsilon}\Pi dxdz]|^{t=\tau}_{t=0}
-\epsilon\int^\tau_0\int_\Omega\frac{\rho-\overline{\rho}}{\epsilon}\partial_t\Pi dxdzdt\rightarrow0,
\hspace{3pt}\text{as}\hspace{3pt}\epsilon\rightarrow0.
\end{align*}
Compared with Navier-Stokes equations, the pressure term in PE system is easy to estimate. By virtue of incompressible condition and $(4.3)_3$, we have that
\begin{align*}
-\frac{1}{\epsilon^2}\int^\tau_0\int_\Omega p(\rho)\text{div}_x\mathbf Vdxdzdt=\frac{1}{\epsilon^2}\int^\tau_0\int_\Omega p(\rho)\partial_zWdxdzdt=0.
\end{align*}
Moreover, we find that
\begin{align*}
\int_{\Omega}\rho(\mathbf u-\mathbf V)\cdot\mathbf V^\bot dxdz+\int_\Omega(\rho\mathbf u\times\omega)\cdot(\mathbf V-\mathbf u)dxdz=0.
\end{align*}
Now, utilizing \eqref{b1}, we deal the complex nonlinear term
\begin{align*}
\int^\tau_0\int_{\Omega}&\rho(\mathbf V-\mathbf u)(w-W)\partial_z\mathbf V dxdzdt\\
&=\int^\tau_0\int_{\Omega}(\mathbf V-\mathbf u)\partial_z\mathbf V\big{(}
-\text{div}_x(\rho\widetilde{\mathbf u})+z\text{div}_x(\rho\overline{\mathbf u})
-\rho W \big{)}dxdzdt.
\end{align*}
These nonlinear terms are estimated one by one
\begin{align}
-\int_{\Omega}(\mathbf V-\mathbf u)\partial_z\mathbf V
\text{div}_x(\rho\widetilde{\mathbf u})dxdz
=\int_{\Omega}\rho\widetilde{\mathbf u}(\nabla_x\mathbf V-\nabla_x\mathbf u)\cdot\partial_z\mathbf Vdxdz
+\int_{\Omega}\rho\widetilde{\mathbf u}(\mathbf V-\mathbf u)\cdot\partial_z\nabla_x\mathbf Vdxdz.\label{55a}
\end{align}
From the incompressible condition, it follows that $W=-\int^z_0\text{div}_x\mathbf V(x,s,t)ds$. We define $\widetilde{\mathbf V}=\int^z_0\mathbf V(x,s,t)ds$ and get
\begin{align}
\int_{\Omega}&\rho\widetilde{\mathbf u}(\nabla_x\mathbf V-\nabla_x\mathbf u)\cdot\partial_z\mathbf Vdxdz\nonumber\\
&=\int_{\Omega}\chi_{\rho\leq\overline{\rho}}\rho\widetilde{\mathbf u}(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz
+\int_{\Omega}\chi_{\rho\geq\overline{\rho}}\rho\widetilde{\mathbf u}(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz\nonumber\\
&=\int_{\Omega}\chi_{\rho\leq\overline{\rho}}\rho(\widetilde{\mathbf u}-\widetilde{\mathbf V})(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz
+\int_{\Omega}\chi_{\rho\leq\overline{\rho}}\rho\widetilde{\mathbf V}(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz\nonumber\\
&\hspace{5pt}+\int_{\Omega}\chi_{\rho\geq\overline{\rho}}\rho\widetilde{\mathbf u}(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz\label{5a}
\end{align}
The foremost two terms on the right side of \eqref{5a} can be handed as \eqref{2a}
\begin{align}
\int_{\Omega}&\chi_{\rho\leq\overline{\rho}}\rho(\widetilde{\mathbf u}-\widetilde{\mathbf V})(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz
+\int_{\Omega}\chi_{\rho\leq\overline{\rho}}\rho\widetilde{\mathbf V}(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz\nonumber\\
&=\int_{\Omega}\chi_{\rho\leq\overline{\rho}}\rho(\widetilde{\mathbf u}-\widetilde{\mathbf V})(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz
+\int_{\Omega}\chi_{\frac{\overline{\rho}}{2}<\rho\leq\overline{\rho}}\rho\widetilde{\mathbf V}(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz\nonumber\\
&\hspace{5pt}+\int_{\Omega}\chi_{\rho\leq\frac{\overline{\rho}}{2}}\rho\widetilde{\mathbf V}(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz\nonumber\\
&\leq\delta\|\nabla_x\mathbf V-\nabla_x\mathbf u\|_{L^2(\Omega)}^2
+C\mathcal{E}(\rho,\mathbf{u}|\overline{\rho}, \mathbf V)(t).
\end{align}
On the other hand, following \eqref{333}, we have
\begin{align}
\int^\tau_0\int_{\Omega}&\chi_{\rho\geq\overline{\rho}}\rho\widetilde{\mathbf u}(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz\nonumber\\
&=\int^\tau_0\int_{\Omega}\chi_{\rho\geq\overline{\rho}}\rho(\widetilde{\mathbf u}-\widetilde{\mathbf V})(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz
+\int^\tau_0\int_{\Omega}\chi_{\rho\geq\overline{\rho}}\rho\widetilde{\mathbf V}(\nabla_x\mathbf V-\nabla_x\mathbf u)\partial_z\mathbf Vdxdz\nonumber\\
&\leq C\int^\tau_0h(t)\mathcal{E}(\rho,\mathbf u|r,\mathbf U)(t)dt
+\delta\int^\tau_0\|\nabla_x\mathbf V-\nabla_x\mathbf u\|^2_{L^2(\Omega)}
+\|\partial_z\mathbf V-\partial_z\mathbf u\|^2_{L^2(\Omega)}dt.
\end{align}
Similarly, the second nonlinear term on the right side of \eqref{55a} is divided into two parts:
\begin{align}
\int_{\Omega}\rho\widetilde{\mathbf u}(\mathbf V-\mathbf u)\cdot\partial_z\nabla_x\mathbf Vdxdz
&=\int_{\Omega}\rho(\widetilde{\mathbf u}-\widetilde{\mathbf V})(\mathbf V-\mathbf u)\partial_z\nabla_x\mathbf Vdxdz
+\int_{\Omega}\rho\widetilde{\mathbf V}(\mathbf V-\mathbf u)\partial_z\nabla_x\mathbf Vdxdz.\nonumber
\end{align}
Utilizing the similar estimates in (3.6), we have
\begin{align}
\int_{\Omega}\rho(\widetilde{\mathbf u}-\widetilde{\mathbf V})(\mathbf V-\mathbf u)\partial_z\nabla_x\mathbf Vdxdz
\leq C\mathcal{E}(\rho,\mathbf{u}|\overline{\rho}, \mathbf V)(t).
\end{align}
Moreover, similar to \eqref{33c}, we get
\begin{align}
\int_{\Omega}&\rho\widetilde{\mathbf V}(\mathbf V-\mathbf u)\partial_z\nabla_x\mathbf Vdxdz\nonumber\\
&=\int_{\Omega}\chi_{\rho\leq\frac{\overline{\rho}}{2}}\rho\widetilde{\mathbf V}(\mathbf V-\mathbf u)\partial_z\nabla_x\mathbf Vdxdz
+\int_{\Omega}\chi_{\frac{\overline{\rho}}{2}<\rho<\overline{\rho}}\rho\widetilde{\mathbf V}(\mathbf V-\mathbf u)\partial_z\nabla_x\mathbf Vdxdz\nonumber\\
&\hspace{5pt}+\int_{\Omega}\chi_{\rho\geq\overline{\rho}}\rho\widetilde{\mathbf V}(\mathbf V-\mathbf u)\partial_z\nabla_x\mathbf Vdxdz\nonumber\\
&\leq \delta\|\nabla_x\mathbf V-\nabla_x\mathbf u\|_{L^2(\Omega)}^2
+\delta\|\partial_z\mathbf V-\partial_z\mathbf u\|_{L^2(\Omega)}^2
+C\mathcal{E}(\rho,\mathbf{u}|\overline{\rho}, \mathbf V)(t),
\end{align}
and
\begin{align}
\int_{\Omega}(\mathbf V-\mathbf u)\partial_z\mathbf Vz\text{div}_x(\rho\overline{\mathbf u})dxdz
\leq \delta\|\nabla_x\mathbf V-\nabla_x\mathbf u\|_{L^2(\Omega)}^2
+\delta\|\partial_z\mathbf V-\partial_z\mathbf u\|_{L^2(\Omega)}^2
+C\mathcal{E}(\rho,\mathbf{u}|\overline{\rho}, \mathbf V)(t).
\end{align}
The last term can be estimated as
\begin{align*}
\int_{\Omega}&\rho(\mathbf V-\mathbf u)\partial_z\mathbf VWdxdz\\
&=\int_\Omega\chi_{\rho\leq\frac{\overline{\rho}}{2}}\rho(\mathbf V-\mathbf u)\partial_z\mathbf VWdxdz+
\int_\Omega\chi_{\rho\geq\overline{\rho}}\rho(\mathbf V-\mathbf u)\partial_z\mathbf VWdxdz\\
&\hspace{8pt}+\int_\Omega\chi_{\frac{\overline{\rho}}{2}<\rho<\overline{\rho}}\rho(\mathbf V-\mathbf u)\partial_z\mathbf VWdxdz\\
& \leq\delta\|\nabla_x\mathbf V-\nabla_x\mathbf u\|_{L^2(\Omega)}^2
+\delta\|\partial_z\mathbf V-\partial_z\mathbf u\|_{L^2(\Omega)}^2
+C\mathcal{E}(\rho,\mathbf{u}|\overline{\rho}, \mathbf V)(t).
\end{align*}
Combining the above estimates together and using Grownwall inequality, we prove Theorem 4.1.
\vskip 0.5cm
\noindent {\bf Acknowledgements}
\vskip 0.1cm
We are very much indebted to an anonymous referee for many helpful suggestions. The research of H. G is partially supported by the NSFC Grant No. 11531006. The research of \v S.N. leading to these results has received funding from the Czech Sciences Foundation (GA\v CR), GA19-04243S and RVO 67985840. The research of T.T. is supported by the NSFC Grant No. 11801138. The paper was written when Tong Tang was visiting the Institute of Mathematics of the Czech Academy of Sciences which {hospitality and support} is gladly acknowledged.
\end{document} |
\begin{document}
\newtheorem{lem}{Lemma}
\newtheorem{lemma}[lem]{Lemma}
\newtheorem{prop}{Proposition}
\newtheorem{thm}{Theorem}
\newtheorem{theorem}[thm]{Theorem}
\def\,\, \substack{+\\ E}\,\,{\,\, \substack{+\\ E}\,\,}
\def\,\, \substack{-\\ E}\,\,{\,\, \substack{-\\ E}\,\,}
\def\,\, \substack{*\\ E}\,\,{\,\, \substack{*\\ E}\,\,}
\def\,\, \substack{-\\ G}\,\,{\,\, \substack{-\\ G}\,\,}
\def\,\, \substack{*\\ G}\,\,{\,\, \substack{*\\ G}\,\,}
\title{\sc On the size of the set $A(A+1)$}
\author{M. Z. Garaev and Ch.-Y. Shen}
\author{
{\sc Moubariz Z. Garaev} \\
{Instituto de Matem{\'a}ticas}\\
{Universidad Nacional Aut\'onoma de M{\'e}xico} \\
{C.P. 58089, Morelia, Michoac{\'a}n, M{\'e}xico} \\
{\tt garaev@matmor.unam.mx}\\
\and
{\sc Chun-Yen Shen} \\
{Department of Mathematics}\\
{Indiana University} \\
{Rawles Hall, 831 East Third St.}\\
{Bloomington, IN 47405, USA}\\
{\tt shenc@indiana.edu}}
\maketitle
\begin{abstract}
Let $F_p$ be the field of a prime order $p.$ For a subset $A\subset
F_p$ we consider the product set $A(A+1).$ This set is an image of
$A\times A$ under the polynomial mapping $f(x,y)=xy+x:F_p\times
F_p\to F_p.$ In the present note we show that if $|A|<p^{1/2},$ then
$$
|A(A+1)|\ge |A|^{106/105+o(1)}.
$$
If $|A|>p^{2/3},$ then we prove that
$$
|A(A+1)|\gg \sqrt{p\, |A|}
$$
and show that this is the optimal in general settings bound up to
the implied constant. We also estimate the cardinality of $A(A+1)$
when $A$ is a subset of real numbers. We show that in this case one
has the Elekes type bound
$$
|A(A+1)|\gg |A|^{5/4}.
$$
\end{abstract}
\footnotetext[1]{{\it 2000 Mathematics Subject Classification:}\,
11B75.} \footnotetext[2]{{\it Key words and phrases.}\, sums,
products and expanding maps.}
\section {Introduction}
Let $F_p$ be the field of residue classes modulo a prime number $p$
and let $A$ be a non-empty subset of $F_p.$ It is known
from~\cite{BGK, BKT} that if $|A|<p^{1-\delta},$ where $\delta>0,$
then one has the sum-product estimate
$$
|A+A|+|AA|\gg |A|^{1+\varepsilon}; \qquad
\varepsilon=\varepsilon(\delta)>0.
$$
This estimate and its proof consequently have been quantified and
simplified in~\cite{BG},~\cite{Gar1}--\cite{HIS},~\cite{KS}--\cite{Sh},~\cite{TV}. From the
sum-product estimate and Ruzsa's triangle inequalities (see,~\cite{R1} and~\cite{R2}) it follows
that the polynomial $f(x,y,z)=xy+z:F_p^3\to F_p$ possesses an
expanding property, in a sense that for any subsets $A,B,C$ with
$|A|\sim |B|\sim |C|\sim p^{\alpha},$ where $0<\alpha<1$ is fixed,
the set $f(A,B,C)$ has cardinality greater than $p^{\beta}$ for
some $\beta=\beta(\alpha)>\alpha.$ The problem raised by Widgerson
asks to explicitly write a polynomial with two variables which
would satisfy the expanding condition. This problem was solved by
Bourgain~\cite{B1}, showing that one can take $f(x,y)=x^2+xy.$
Now consider the polynomial $f(x,y)=xy+x.$ This polynomial, of
course, does not possess the expanding property in the way defined
above. Nevertheless, from Bourgain's work~\cite{B1} it is known that
if $|A|\sim p^{\alpha},$ where $0<\alpha<1,$ then
$$
|f(A,A)|=|A(A+1)|\ge p^{\beta};\qquad \beta=\beta(\alpha)>\alpha.
$$
In the present note we deal with explicit lower bounds for the size
of the set $A(A+1).$ Our first result addresses the most nontrivial
case $|A|<p^{1/2}.$
\begin{theorem}
\label{thm:106/105} Let $A\subset F_p$ with $|A|<p^{1/2}.$ Then
$$
|A(A+1)| \ge |A|^{106/105+o(1)}.
$$
\end{theorem}
Theorem~\ref{thm:106/105} will be derived from the
Balog-Szemer\'edi-Gowers type estimate and a version of the
sum-product estimate given in~\cite{BG}. We remark that the
statement of Theorem~\ref{thm:106/105} remains true in a slightly
wider range than $|A|<p^{1/2}.$ On the other hand, if $|A|>p^{2/3},$ then we
have the optimal in general settings bound.
\begin{theorem}
\label{thm:optimal} For any subsets $A, B, C\subset F_p^*$ the
following bound holds:
$$
|AB|\cdot|(A+1)C|\gg \min\Bigl\{p\,|A|,\,
\frac{|A|^2\cdot|B|\cdot|C|}{p}\Bigr\}.
$$
\end{theorem}
Theorem~\ref{thm:optimal} can be compared with the following
estimate from~\cite{Gar2}:
$$
|A+B|\cdot |AC|\gg \min\Bigl\{p\, |A|,\,
\frac{|A|^2\cdot|B|\cdot|C|}{p}\Bigr\}.
$$
Taking $B=A+1,\, C=A,$ Theorem~\ref{thm:optimal} implies
$$
|A(A+1)|\gg \min\Bigl\{\sqrt{p\,|A|},\,
\frac{|A|^2}{p^{1/2}}\Bigr\}.
$$
In particular, if $|A|>p^{2/3},$ then
$$
|A(A+1)|\gg \sqrt{p\, |A|}.
$$
Let us show that this is optimal in general settings bound up to the implied
constant. Let $N<0.1p$ be a positive integer,
$M=[2\sqrt{Np}]$ and let $g$ be a generator of $F_p^*.$ Consider the
set
$$
X=\{g^{n}-1:\, n=1,2,\ldots, M\}.
$$
From the pigeon-hole principle, there is a number $L$ such that
$$
|X\cap\{g^{L+1},\ldots, g^{L+M}\}|\ge \frac{M^2}{2p}\ge N.
$$
Take
$$
A=X\cap\{g^{L+1},\ldots, g^{L+M}\}.
$$
Then we have $|A|\ge N$ and
$$
|A(A+1)|\le 2M\le 2\sqrt{pN}.
$$
Thus, it follows that for any positive integer $N<p$ there exists a
set $A\subset F_p$ with $|A|=N$ such that
$$
|A(A+1)|\ll\sqrt{p|A|}.
$$
This observation illustrates the precision of our result for large
subsets of $F_p.$
When $|A|\cdot|B|\cdot|C|\approx p^2,$ Theorem~\ref{thm:optimal}
implies that
$$
|AB|\cdot|(A+1)C|\gg \sqrt{|A|^3\cdot |B|\cdot|C|}.
$$
This coincides with the bound that one can get when $A,B,C$ are
subsets of the set of real numbers $\mathbb{R}.$
\begin{theorem}
\label{thm:5/4} Let $A,B,C$ be finite subsets of \,
$\mathbb{R}\setminus\{0,-1\}.$ Then
$$
|AB|\cdot|(A+1)C|\gg \sqrt{|A|^3\cdot |B|\cdot |C|}.
$$
\end{theorem}
In particular, taking $B=A+1,\, C=A,$ we obtain the bound
$$
|A(A+1)| \gg |A|^{5/4}.
$$
We mention Elekes' sum-product estimate~\cite{El} in the case of
real numbers:
$$
|A+A|+|AA|\gg |A|^{5/4}.
$$
More generally Elekes' work implies that if $A,B,C$ are finite
subsets of the set
$\mathbb{R}\setminus\{0\},$ then
$$
|AB|\cdot |A+C|\gg \sqrt{|A|^3\cdot |B|\cdot|C|}.
$$
The best known bound up to date in the ``pure" sum-product problem
for real numbers is $|A+A|+|AA|\gg |A|^{4/3+o(1)},$ due to Solymosi~\cite{Sol}.
\section{Proof of Theorem~\ref{thm:106/105}}
For $E\subset A\times B$ we write
$$
A\,\, \substack{-\\ E}\,\, B=\{a-b: (a,b)\in E\}.
$$
A basic tool in the proof of Theorem~\ref{thm:106/105} is the
following explicit Balog-Szemer\'edi-Gowers type estimate given by
Bourgain and Garaev~\cite{BG}.
\begin{lemma}
\label{lem:BG1} Let $A\subset F_p, \, B\subset F_p,\, E\subset
A\times B$ be such that $|E|\ge |A||B|/K.$ There exists a subset
$A'\subset A$ such that $|A'|\ge 0.1 |A|/K$ and
$$
|A\,\, \substack{-\\ E}\,\, B|^4\ge \frac{|A'-A'|\cdot|A|\cdot|B|^{2}}{10^4K^{5}}.
$$
\end{lemma}
Theorem~\ref{thm:106/105} will be derived from the combination of
Lemma~\ref{lem:BG1} with the following specific variation of the
sum-product estimate from~\cite{BG}.
\begin{lemma}
\label{lem:BG2} Let $A\subset F_p,\, |A|<p^{1/2}.$ Then,
$$
|A-A|^8\cdot|A(A+1)|^4\ge |A|^{13+o(1)}
$$
\end{lemma}
The proof of Lemma~\ref{lem:BG2} follows from straightforward
modification of the proof of Theorem 1.1 of~\cite{BG}, so we only
sketch it. It suffices to show that
$$
|A-A|^5\cdot|2A-2A|\cdot|A(A+1)|^4\ge |A|^{11+o(1)}.
$$
Indeed, having this estimate established, one can apply it to large
subsets of $A,$ iterate the argument of Katz and Shen~\cite{KS}
several times and finish the proof; for more details, see~\cite{BG}.
We can assume that $A\cap \{0, -1\}=\emptyset$ and $|A|\ge 10.$
There exists a fixed element $b_0\in A$ such that
$$
\sum_{a\in A}|(a+1)A\cap (b_0+1)A|\ge \frac{|A|^3}{|A(A+1)|}.
$$
Decomposing into level sets, we get a positive integer $N$ and a
subset $A_1\subset A$ such that
\begin{equation}
\label{eqn:aAcapbAge} N\le |(a+1)A\cap (b_0+1)A|< 2N \quad {\rm for
\quad any} \quad a\in A_1,
\end{equation}
\begin{equation}
\label{eqn:N|A1|} N|A_1|\ge \frac{|A|^3}{2|A(A+1)|\cdot\log|A|}.
\end{equation}
In particular,
\begin{equation}
\label{eqn:boundA1} N\ge
\frac{|A|^2}{2|A|\cdot|A(A+1)|\cdot\log|A|}.
\end{equation}
We can assume that $|A_1|>1.$ Due to the observation of Glibichuk and Konyagin~\cite{GK}, either
$$
\frac{A_1-A_1}{A_1-A_1}=F_p
$$
or we can choose elements $b'_1,b'_2,b'_3,b'_4\in A_1$ such that
$$
\frac{b'_1-b'_2}{b'_3-b'_4}-1\not\in \frac{A_1-A_1}{A_1-A_1}.
$$
Using the step of Katz and Shen~\cite{KS}, we deduce that in either case there exist elements $b_1,b_2,b_3,b_4\in A_1$
such that
\begin{equation}
\label{eqn:length4} \Bigl|(b_1-b_2)A+(b_3-b_4)A\Bigr|\gg
\frac{|A_1|^3}{|A-A|}.
\end{equation}
To each element $x\in (b_1-b_2)A+(b_3-b_4)A$ we attach one fixed
representation
\begin{equation}
\label{eqn:attach x} x=(b_1-b_2)a(x)+(b_3-b_4)a'(x),\quad a(x),
a'(x)\in A.
\end{equation}
Denote
$$
S=(b_1-b_2)A+(b_3-b_4)A,\quad S_i=(b_i+1)A\cap (b_0+1)A; \quad
i=1,2,3,4.
$$
As in~\cite{BG}, we consider the mapping
$$
f: S\times S_1\times S_2 \times S_3\times S_4 \to (2A-2A)\times
(A-A)\times(A-A)\times(A-A)\times(A-A)
$$
defined as follows. Given
$$
x\in S, \quad x_i\in S_i; \quad i=1,2,3,4,
$$
we represent $x$ in the form~\eqref{eqn:attach x}, represent $x_i$
in the form
$$
x_i=(b_i+1)a_i(x_i)=(b_0+1)a_i'(x_i),\quad a_i(x_i)\in A,\quad
a_i'(x_i)\in A,\quad (i=1,2,3,4),
$$
and define
$$
f(x,x_1,x_2,x_3,x_4)=(u,u_1,u_2,u_3, u_4),
$$
where
$$
u=a_1'(x_1)-a_2'(x_2)+a_3'(x_3)-a_4'(x_4),
$$
$$
u_1=a(x)-a_1(x_1), \quad u_2=a(x)-a_2(x_2),
$$
$$
u_3=a'(x)-a_3(x_3),\quad u_4=a'(x)-a_4(x_4).
$$
From the construction we have
$$
x=(b_1+1)u_1-(b_2+1)u_2+(b_3+1)u_3-(b_4+1)u_4+(b_0+1)u.
$$
Therefore, the vector $(u,u_1,u_2,u_3,u_4)$ determines $x$ and thus
determines $a(x), a'(x)$ and consequently determines $a_1(x_1),
a_2(x_2), a_3(x_3), a_4(x_4)$ which determines $x_1,x_2,x_3,x_4.$
Hence, since $|(b_i+1)A\cap (b_0+1)A|\ge N,$ we get that
$$
|(b_1-b_2)A+(b_3-b_4)A|N^4\le |A-A|^4\cdot |2A-2A|.
$$
Taking into account~\eqref{eqn:length4}, we get
$$
|A-A|^4\cdot |2A-2A|\gg \frac{|A_1|^3N^4}{|A-A|}.
$$
Using~\eqref{eqn:aAcapbAge}--\eqref{eqn:boundA1}, we conclude the
proof of Lemma~\ref{lem:BG2}.
We proceed to prove Theorem~\ref{thm:106/105}. Denote
$$
E=\{(x, x+xy):\,\, x\in A, \, y\in A\}\subset A\times A(A+1),
$$
Then,
$$
|E|=|A|^2=\frac{|A|\cdot|A(A+1)|}{K},\quad K=\frac{|A(A+1)|}{|A|}.
$$
Let $B=A(A+1).$ Observe that
$$
-AA=A\,\, \substack{-\\ E}\,\, B.
$$
According to Lemma~\ref{lem:BG1} there exists $A'\subset A$ with
\begin{equation}
\label{eqn:A'end} |A'|\gg \frac{|A|}{K}=\frac{|A|^2}{|A(A+1)|}
\end{equation}
such that
$$
|AA|^4|A(A+1)|^3\gg |A'-A'||A|^6.
$$
Raising to eights power and multiplying by $|A(A+1)|^4\ge
|A'(A'+1)|^4,$ we get
$$
|AA|^{32}\cdot|A(A+1)|^{28}\gg |A'-A'|^8|A'(A'+1)|^4|A|^{48}.
$$
Combining this with Lemma~\ref{lem:BG2} (applied to $A'$), we obtain
$$
|AA|^{32}\cdot|A(A+1)|^{28}\gg |A'|^{13}|A|^{48+o(1)}.
$$
Taking into account the inequality~\eqref{eqn:A'end}, we get
$$
|AA|^{32}\cdot|A(A+1)|^{41}\ge |A|^{74+o(1)}.
$$
From Ruzsa's triangle inequalities in multiplicative form, we have
$$
|AA|\le\frac{|A(A+1)|\cdot|(A+1)A|}{|A+1|}=\frac{|A(A+1)|^2}{|A|}.
$$
Putting last two inequalities together, we conclude that
$$
|A(A+1)|^{105}\ge |A|^{106+o(1)}.
$$
\section{Proof of Theorem~\ref{thm:optimal}}
Let $J$ be the number of solutions of the equation
$$
x^{-1}y(z^{-1}t-1)=1, \quad (x,y,z,t)\in
AB\times B\times C\times (A+1)C.
$$
Observe that for any given triple $(a,b,c)\in A\times B\times C$ the
quadruple
$(x,y,z,t)=(ab, \, b, \, c, \, (a+1)c)$
is a solution of this equation. Thus,
\begin{equation}
\label{eqn:Jlower} J\ge |A|\cdot|B|\cdot|C|.
\end{equation}
On the other hand for any nonprincipal character $\chi$ modulo $p$
we have
$$
\Bigl|\sum_{z\in C}\,\,\sum_{t\in (A+1)C}\chi(z^{-1}t-1)\Bigr|\le
\sqrt{p\,|C|\cdot |(A+1)C|},
$$
see, for example, the solution to exercise 8 of~\cite[Chapter
V]{Vin}. Therefore, the method of solving multiplicative ternary
congruences implies that
$$
J=\frac{1}{p-1}\sum_{\chi}\sum_{x,y,z,t}\chi\Bigl(x^{-1}y(z^{-1}t-1)\Bigr)=
$$
$$
=\frac{1}{p-1}\sum_{x,y,z,t}\chi_0\Bigl(x^{-1}y(z^{-1}t-1)\Bigr)+
\frac{1}{p-1}\sum_{\chi\not=\chi_0}\sum_{x,y,z,t}\chi(x^{-1})\chi(y)\chi(z^{-1}t-1)
$$
$$
\le \frac{|AB|\cdot|B|\cdot |C|\cdot |(A+1)C|}{p-1}+\sqrt{p\,
|C|\cdot|(A+1)C|\cdot|AB|\cdot |B|}.
$$
Comparing this with~\eqref{eqn:Jlower}, we conclude the proof.\\
{\bf Remark}. In Karatsuba's survey paper ~\cite{Kar} the interested reader will find many applications of character sums to multiplicative congruences.
\section{Proof of Theorem~\ref{thm:5/4}}
Since $A\cap \{0, -1\}=\emptyset,$ we can assume that $|A|$ is
large. We will use the Szemer\'edi-Trotter incidence theorem, which
claims that if $\mathcal{P}$ is a finite set of points $(x,y)\in
\mathbb{R}^2$ and $\mathcal{L}$ is a finite set of lines
$\ell\subset \mathbb{R}^2,$ then
$$
\#\Bigl\{\Bigl((x,y),\ell\Bigr)\in \mathcal{P}\times \mathcal{L}:\,
(x,y)\in \ell\Bigr\}\ll
|\mathcal{P}|+|\mathcal{L}|+(|\mathcal{P}||\mathcal{L}|)^{2/3}.
$$
We mention that this theorem was applied by Elekes in the above mentioned
work~\cite{El} to the sum-product problem for subsets of $\mathbb{R}.$ In application to our problem,
we let
$$
\mathcal{P}=\{(x,y):\, x\in AB,\, y\in (A+1)C\}
$$
and let $\mathcal{L}$ to be the family of lines $\{\ell=\ell(z,t):
z\in C,\, t\in B\}$ given by the equation
$$
y-\frac{z}{t}\,x-z=0.
$$
In particular,
$$
|\mathcal{P}|=|AB|\cdot|(A+1)C|,\quad |\mathcal{L}|=|B||C|.
$$ Each line $\ell(z,t)\in \mathcal{L}$ contains $|A|$ distinct points
$(x,y)\in \mathcal{P}$ of the form
$$
(x,y)=(at,\,(a+1)z);\quad a\in A.
$$
Thus,
$$
\#\Bigl\{\Bigl((x,y),\ell\Bigr)\in \mathcal{P}\times \mathcal{L}:\,
(x,y)\in \ell\Bigr\}\ge |A||\mathcal{L}|=|A|\cdot |B|\cdot |C|.
$$
Therefore, the Szemer\'edi-Trotter incidence theorem implies that
$$
|A|\cdot |B|\cdot |C| \ll
|AB|\cdot|(A+1)C|+|B||C|+\Bigl(|AB|\cdot|(A+1)C|\cdot
|B|\cdot|C|\Bigr)^{2/3}.
$$
Since $|A|$ is large and $|AB|\cdot|(A+1)C|\ge |A|^2,$ the result
follows.
\end{document} |
\begin{document}
\begin{abstract}
We prove a formula for the orbifold Chow ring of semi-projective toric DM stacks, generalizing the orbifold Chow ring formula of projective toric DM stacks by Borisov-Chen-Smith. We also consider a special kind of semi-projective toric DM stacks, the Lawrence toric DM stacks. We prove that the orbifold Chow ring of a Lawrence toric DM stack is isomorphic to the orbifold Chow ring of its associated hypertoric DM stack studied in \cite{JT}.
\end{abstract}
\maketitle
_{\sigma}ection{Introduction}
The main goal of this paper is to generalize the orbifold Chow ring formula of Borisov-Chen-Smith for
projective toric DM stacks to the case of semi-projective toric DM stacks.
In the paper \cite {BCS}, Borisov, Chen, and Smith developed the theory of toric DM stacks using stacky fans. A stacky fan is a triple $\mathbf{\Sigma}=(N,\Sigma,\beta)$, where $N$ is a finitely generated abelian group, $\Sigma$ is a simplicial fan in the lattice $\overline{N}:=N_{\sigma}lash _{\tau}ext{torsion}$ and $\beta: \mathbb{Z}^n\rightarrow N$ is a map given by a collection of vectors $\{b_1,\cdots,b_n\}_{\sigma}ubset N$ such that the images $\{\overline{b}_{1},\cdots,\overline{b}_{n}\}$ generate the fan $\Sigma$. A toric DM stack $\mathcal{X}(\mathbf{\Sigma})$ is defined using $\mathbf{\Sigma}$; it is a quotient stack whose coarse moduli space is the toric variety $X(\Sigma)$ corresponding to the simplicial fan $\Sigma$.
The construction of toric DM stacks was slightly generalized later in \cite{Jiang}, in which the notion of extended stacky fans was introduced. This new notion is based on that of stacky fans plus some extra data. Extended stacky fans yield toric DM stacks in the same way as stacky fans do. The main point is that extended stacky fans provide presentations of toric DM stacks not available from stacky fans.
When $X(\Sigma)$ is projective, it is found in \cite{BCS} that the orbifold Chow ring (or Chen-Ruan cohomology ring) of $\mathcal{X}(\mathbf{\Sigma})$ is isomorphic to a deformed ring of the group ring of $N$. We call a toric DM stack $\mathcal{X}(\mathbf{\Sigma})$ {\em semi-projective} if its coarse moduli space $X(\Sigma)$ is semi-projective. Hausel and Sturmfels \cite{HS} computed the Chow ring of semi-projective toric varieties. Their answer is also known as the ``Stanley--Reisner'' ring of a fan. Using their result, we prove a formula of the orbifold Chow ring of semi-projective toric DM stacks.
Consider an extended stacky fan $\mathbf{\Sigma}=(N,\Sigma,\beta)$, where $\Sigma$ is the simplicial fan of the semi-projective toric variety $X(\Sigma)$. Let $N_{tor}$ be the torsion subgroup of $N$, then $N=\overline{N}{{\rm op}}lus N_{tor}$. Let $N_{\Sigma}:=|\Sigma|{{\rm op}}lus N_{tor}$. Note that $|\Sigma|$ is convex, so $|\Sigma|{{\rm op}}lus N_{tor}$ is a subgroup of $N$. Define the deformed ring $\mathbb{Q}[N_{\Sigma}]:=\bigoplus_{c\in N_{\Sigma}}\mathbb{Q}y^{c}$ with the product structure given by
\begin{equation}\label{productA}
y^{c_{1}}\cdot y^{c_{2}}:=\begin{cases}y^{c_{1}+c_{2}}&_{\tau}ext{if
there is a cone}~ _{\sigma}igma\in\Sigma ~_{\tau}ext{such that}~ \overline{c}_{1}\in_{\sigma}igma, \overline{c}_{2}\in_{\sigma}igma\,;\\
0&_{\tau}ext{otherwise}\,.\end{cases}
\end{equation}
Note that if $\mathcal{X}(\mathbf{\Sigma})$ is projective, then $N_{\Sigma}=N$ and $\mathbb{Q}[N_{\Sigma}]$ is the deformed ring
$\mathbb{Q}[N]^{\mathbf{\Sigma}}$ in \cite{BCS}.
Let $A^{*}_{orb}(\mathcal{X}(\mathbf{\Sigma}))$ denote the orbifold Chow ring of the toric DM stack $\mathcal{X}(\mathbf{\Sigma})$.
\begin{thm}\label{main}
Assume that $\mathcal{X}(\mathbf{\Sigma})$ is semi-projective. There is an isomorphism of rings
$$A^{*}_{orb}(\mathcal{X}(\mathbf{\Sigma}))\cong {\rm f}rac{\mathbb{Q}[N_{\Sigma}]}{\{_{\sigma}um_{i=1}^{n}e(b_{i})y^{b_{i}}:e\in N^{\mathrel{\mid}ar}\}}.$$
\end{thm}
The strategy of proving Theorem \ref{main} is as follows. We use a formula in \cite{HS} for the ordinary Chow ring of semi-projective toric varieties. We prove that each twisted sector is also a semi-projective toric DM stack. With this, we use a method similar to that in \cite{BCS} and \cite{Jiang} to prove the isomorphism as modules. The argument to show the isomorphism as rings is the same as that in \cite{BCS}, except that we only take elements in the support of the fan.
An interesting class of examples of semi-projective toric DM stack is the Lawrence toric DM stacks. We discuss the properties of such stacks. We prove that each 3-twisted sector or twisted sector is again a Lawrence toric DM stack. This allows us to draw connections to hypertoric DM stacks studied in \cite{JT}. We prove that the orbifold Chow ring of a Lawrence toric DM stack is isomorphic to the orbifold Chow ring of its associated hypertoric DM stack. This is an analog of Theorem 1.1 in \cite{HS} for orbifold Chow rings.
The rest of this text is organized as follows. In Section \ref{semi-pro} we define semi-projective toric DM stacks and prove Theorem 1.1. Results on Lawrence toric DM stacks are discussed in Section \ref{Lawrence}.
_{\sigma}ubsection*{Conventions}
In this paper we work entirely algebraically over the field of
complex numbers. Chow rings and orbifold Chow rings are taken with
rational coefficients. By an orbifold we mean a smooth
Deligne-Mumford stack with trivial generic stabilizer.
For a simplicial fan $\Sigma$, we use $|\Sigma|$ to represent the lattice points in $\Sigma$.
Note that if $\Sigma$ is convex, $|\Sigma|$ is a free abelian subgroup of $N$.
We write $N^{\mathrel{\mid}ar}$ for $Hom_{\mathbb{Z}}(N,\mathbb{Z})$ and $N_{\tau}o \overline{N}$ the natural map of modding out torsions. We refer to \cite{BCS} for the construction of the Gale dual
$\beta^{\vee}: \mathbb{Z}^{m}_{\tau}o DG(\beta)$ of $\beta: \mathbb{Z}^{m}_{\tau}o N$.
_{\sigma}ubsection*{Acknowledgments}
We thank Kai Behrend and Nicholas Proudfoot for valuable discussions.
_{\sigma}ection{Semi-projective toric DM stacks and their orbifold Chow rings}\label{semi-pro}
In this section we define semi-projective toric DM stacks and discuss their properties.
_{\sigma}ubsection{Semi-projective toric DM stacks}
\begin{defn}[\cite{HS}]
A toric variety $X$ is called semi-projevtive if the natural map
$$\pi: X\rightarrow X_{0}=_{\tau}ext{Spec}(H^{0}(X,\mathcal{O}_{X})),$$
is projective and $X$ has at least one torus-fixed point.
\end{defn}
\begin{defn}[\cite{Jiang}]\label{stackyfan}
An extended stacky fan $\mathbf{\Sigma}$ is a triple $(N,\Sigma,\beta)$,
where $N$ is a finitely generated abelian group, $\Sigma$ is a
simplicial fan in $N_{\mathbb{R}}$ and $\beta:
\mathbb{Z}^{m}_{\tau}o N$ is the map determined by the
elements $\{b_{1},\cdots,b_{m}\}$ in $N$ such that
$\{\overline{b}_{1},\cdots,\overline{b}_{n}\}$ generate the
simplicial fan $\Sigma$ (here $m\geq n$).
\end{defn}
Given an extended stacky fan $\mathbf{\Sigma}=(N,\Sigma,\beta)$,
we have the following exact sequences:
\begin{equation}\label{exact1}
0\longrightarrow DG(\beta)^{\mathrel{\mid}ar}\longrightarrow
\mathbb{Z}^{m}\mathrel{\mid}ackrel{\beta}{\longrightarrow} N\longrightarrow
Coker(\beta)\longrightarrow 0,
\end{equation}
\begin{equation}\label{exact2}
0\longrightarrow N^{\mathrel{\mid}ar}\longrightarrow
\mathbb{Z}^{m}\mathrel{\mid}ackrel{\beta^{\vee}}{\longrightarrow}
DG(\beta)\longrightarrow Coker(\beta^{\vee})\longrightarrow 0,
\end{equation}
where $\beta^{\vee}$ is the Gale dual of $\beta$ (see \cite{BCS}). Applying $Hom_\mathbb{Z}(-,\mathbb{C}^{*})$ to (\ref{exact2}) yields
\begin{equation}\label{exact3}
1\longrightarrow \mu\longrightarrow
G\mathrel{\mid}ackrel{\alpha}{\longrightarrow}
(\mathbb{C}^{*})^{m}\longrightarrow
(\mathbb{C}^{*})^{d}\longrightarrow 1. \
\end{equation}
The toric
DM stack $\mathcal{X}(\mathbf{\Sigma})$ is the quotient stack $[Z/G]$, where
$Z:=(\mathbb{C}^{n}_{\sigma}etminus V(J_{\Sigma}))_{\tau}imes (\mathbb{C}^{*})^{m-n}$, $J_{\Sigma}$ is the
irrelevant ideal of the fan $\Sigma$ and $G$ acts on $Z$ through the map $\alpha$ in (\ref{exact3}). The
coarse moduli space of $\mathcal{X}(\mathbf{\Sigma})$ is the simplicial toric variety
$X(\Sigma)$ corresponding to the simplicial fan $\Sigma$, see \cite{BCS} and \cite{Jiang}.
\begin{defn}\label{semi-toric}
A toric DM stack $\mathcal{X}(\mathbf{\Sigma})$ is {\em semi-projective} if the coarse
moduli space $X(\Sigma)$ is semi-projective.
\end{defn}
\begin{thm}\label{semitor}
The following notions are equivalent:
\begin{enumerate}
\item A semi-projective toric DM stack $\mathcal{X}(\mathbf{\Sigma})$;
\item A toric DM stack $\mathcal{X}(\mathbf{\Sigma})$ such that the simplicial fan $\Sigma$
is a regular triangulation of $\mathcal{B}=\{\overline{b}_{1},\cdots,\overline{b}_{n}\}$
which spans the lattice $\overline{N}$.
\end{enumerate}
\end{thm}
\begin{pf}
Since the toric DM stack is semi-projective if its coarse moduli space is semi-projective,
the theorem follows from results in \cite{HS}.
\end{pf}
_{\sigma}ubsection{The inertia stack}
Let $\mathbf{\Sigma}$ be an extended stacky fan and $_{\sigma}igma\in\Sigma$ a cone. Define $link(_{\sigma}igma):=\{_{\tau}au:_{\sigma}igma+_{\tau}au\in \Sigma, _{\sigma}igma\cap _{\tau}au=0\}$. Let $\{\widetilde{\rho}_{1},\ldots,\widetilde{\rho}_{l}\}$ be the rays in $link(_{\sigma}igma)$. Consider the quotient extended stacky fan $\mathbf{\Sigma/_{\sigma}igma}=(N(_{\sigma}igma),\Sigma/_{\sigma}igma,\beta(_{\sigma}igma))$, with $\beta(_{\sigma}igma): \mathbb{Z}^{l+m-n}_{\tau}o N(_{\sigma}igma)$ given by the images of $b_{1},\ldots,b_{l}$ and $b_{n+1},\ldots,b_{m}$
under $N_{\tau}o N(_{\sigma}igma)$. By the construction of toric Deligne-Mumford stacks,
if $_{\sigma}igma$ is contained in a top dimensional cone in $\Sigma$, we have $\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}):=[Z(_{\sigma}igma)/G(_{\sigma}igma)]$,
where $Z(_{\sigma}igma)=(\mathbb{A}^{l}_{\sigma}etminus\mathbb{V}(J_{\Sigma/_{\sigma}igma}))_{\tau}imes (\mathbb{C}^{*})^{m-n}$ and $G(_{\sigma}igma)=Hom_{\mathbb{Z}}(DG(\beta(_{\sigma}igma)),\mathbb{C}^{*})$.
\begin{lem}
If $\mathcal{X}(\mathbf{\Sigma})$ is semi-projective, so is $\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma})$.
\end{lem}
\begin{pf}
Semi-projectivity of the stack $\mathcal{X}(\mathbf{\Sigma})$ means the simplicial fan $\Sigma$ is a fan coming from a regular triangulation of $\mathcal{B}=\{\overline{b}_{1},\cdots,\overline{b}_{n}\}$ which spans the lattice $\overline{N}$. Let $pos(\mathcal{B})$ be the convex polyhedral cone generated by $\mathcal{B}$. Then from \cite{HS}, the triangulation is supported on $pos(\mathcal{B})$ and is dermined by a simple polyhedron whose normal fan is $\Sigma$. So $_{\sigma}igma$ is contained in a top-dimensional
cone $_{\tau}au$ in $\Sigma$. The image $\widetilde{_{\tau}au}$ of $_{\tau}au$ under quotient by $_{\sigma}igma$ is a top-dimensional cone in the quotient fan $\Sigma/_{\sigma}igma$. So the toric variety $X(\Sigma/_{\sigma}igma)$ is semi-projective by Theorem \ref{semitor}, and the stack $\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma})$ is semi-projective by definition.
\end{pf}
Recall in \cite{BCS} that for each top-dimensional cone $_{\sigma}igma$ in $\Sigma$, define $Box(_{\sigma}igma)$ to be the set of elements $v\in N$ such that $\overline{v}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma}a_{i}\overline{b}_{i}$ for some $0\leq a_{i}<1$. Elements in $Box(_{\sigma}igma)$ are in one-to-one correspondence with elements in the finite group $N(_{\sigma}igma)=N/N_{_{\sigma}igma}$, where $N(_{\sigma}igma)$ is a local group of the stack
$\mathcal{X}(\mathbf{\Sigma})$. In fact, we write $\overline{v}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma(\overline{v})}a_{i}\overline{b}_{i}$ for some $0<a_{i}<1$, where $_{\sigma}igma(\overline{v})$ is the minimal cone containing $\overline{v}$. Denoted by $Box(\mathbf{\Sigma})$ the union of $Box(_{\sigma}igma)$ for all top-dimensional cones $_{\sigma}igma$.
\begin{prop}
The $r$-inertia stack is given by
\begin{equation}\label{inertia}
\mathcal{I}_{r}\left(\mathcal{X}(\mathbf{\Sigma})\right)=\coprod_{(v_{1},\cdots,v_{r})\in Box(\mathbf{\Sigma})^{r}}
\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}(\overline{v}_{1},\cdots,\overline{v}_{r})),
\end{equation}
where $_{\sigma}igma(\overline{v}_{1},\cdots,\overline{v}_{r})$ is the minimal cone in
$\Sigma$ containing $\overline{v}_{1},\cdots,\overline{v}_{r}$.
\end{prop}
\begin{pf}
Since $G$ is an abelian group, we have
$$\mathcal{I}_{r}\left(\mathcal{X}(\mathbf{\Sigma})\right)=[(\coprod_{(v_{1},\cdots,v_{r})\in (G)^{r}}
Z^{(v_{1},\cdots,v_{r})})_{\sigma}lash G],$$
where $Z^{(v_{1},\cdots,v_{r})}_{\sigma}ubset Z$ is the subvariety fixed by $v_{1},\cdots,v_{r}$. Since
$_{\sigma}igma(\overline{v}_{1},\cdots,\overline{v}_{r})$ is contained in a top-dimensional cone in
$\Sigma$. We use the same method as in Lemma 4.6 and Proposition 4.7 of \cite{BCS} to prove that
$[Z^{(v_{1},\cdots,v_{r})}_{\sigma}lash G]\cong \mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}(\overline{v}_{1},\cdots,\overline{v}_{r}))$.
\end{pf}
Note that in (\ref{inertia}) each component is semi-projective.
_{\sigma}ubsection{The orbifold Chow ring}\label{ring}
In this section we compute the orbifold Chow ring of semi-projective toric DM stacks and prove Theorem \ref{main}.
_{\sigma}ubsubsection{The module structure}
Let $\mathbf{\Sigma}=(N,\Sigma,\beta)$ be an extended stacky fan such that the toric DM stack $\mathcal{X}(\mathbf{\Sigma})$ is semi-projective. Since the fan $\Sigma$ is convex, $|\Sigma|$ is an abelian subgroup of $N$. We put $N_{\Sigma}:=|\Sigma|{{\rm op}}lus N_{tor}$, where $N_{tor}$ is the torsion
subgroup of $N$. Define the deformed ring $\mathbb{Q}[N_{\Sigma}]:=\bigoplus_{c\in N_{\Sigma}}\mathbb{Q}y^{c}$ with the product structure given by (\ref{productA}).
Let $\{\rho_{1},\ldots,\rho_{n}\}$ be the rays of $\Sigma$, then each $\rho_{i}$ corresponds to a line
bundle $L_{i}$ over the toric Deligne-Mumford stack $\mathcal{X}(\mathbf{\Sigma})$ given by the trivial line bundle $\mathbb{C}_{\tau}imes Z$ over $Z$ with the $G$ action on $\mathbb{C}$ given by the $i$-th component $\alpha_{i}$ of $\alpha: G_{\tau}o (\mathbb{C}^{*})^{m}$ in (\ref{exact3}). The first Chern classes of the line bundles $L_{i}$, which we identify with $y^{b_{i}}$, generate the cohomology ring of the simplicial toric variety $X(\Sigma)$.
Let $S_{\mathbf{\Sigma}}$ be the quotient ring ${\rm f}rac{\mathbb{Q}[y^{b_{1}},\cdots,y^{b_{n}}]}{I_{\Sigma}}$, where $I_{\Sigma}$ is the square-free ideal of the fan $\Sigma$ generated by the monomials
$$\{y^{b_{i_{1}}}\cdots y^{b_{i_{k}}}: \overline{b}_{i_{1}},\cdots, \overline{b}_{i_{k}} _{\tau}ext{ do not generate a cone in }\Sigma\}.$$
It is clear that $S_{\mathbf{\Sigma}}$ is a subring of the deformed ring $\mathbb{Q}[N_{\Sigma}]$.
\begin{lem}
Let $A^{*}(\mathcal{X}(\mathbf{\Sigma}))$ be the ordinary Chow ring of a semi-projective toric DM stack $\mathcal{X}(\mathbf{\Sigma})$. Then there is a ring isomorphism:
$$A^{*}(\mathcal{X}(\mathbf{\Sigma}))\cong {\rm f}rac{S_{\mathbf{\Sigma}}}
{\{_{\sigma}um_{i=1}^{n}e(b_{i})y^{b_{i}}: e\in N^{\mathrel{\mid}ar}\}}.$$
\end{lem}
\begin{pf}
The Lemma is easily proven from the fact that the Chow ring of a DM stack is isomorphic to the Chow ring of its coarse moduli space (\cite{V}) and Proposition 2.11 in \cite{HS}.
\end{pf}
Now we study the module structure on $A_{orb}^{*}\left(\mathcal{X}(\mathbf{\Sigma})\right)$. Because $\Sigma$ is a simplicial fan, we have:
\begin{lem}\label{smalllemma}
For any $c\in N_{\Sigma}$, let $_{\sigma}igma$ be the minimal cone in $\Sigma$ containing $\overline{c}$. Then there is a unique expression $c=v+_{\sigma}um_{\rho_{i}_{\sigma}ubset_{\sigma}igma}m_{i}b_{i}$ where $m_{i}\in \mathbb{Z}_{\geq 0}$, and $v\in Box(_{\sigma}igma)$.
\end{lem}
\begin{prop}\label{vectorspace}
Let $\mathcal{X}(\mathbf{\Sigma})$ be a semi-projective toric DM stack associated to an extended stacky fan $\mathbf{\Sigma}$. We have an isomorphism of $A^{*}(\mathcal{X}(\mathbf{\Sigma}))$-modules:
$$\bigoplus_{v\in Box(\mathbf{\Sigma})}A^{*}\left(\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}(\overline{v}))\right)[deg(y^{v})]\cong {\rm f}rac{\mathbb{Q}[N_{\Sigma}]}{\{_{\sigma}um_{i=1}^{n}e(b_{i})y^{b_{i}}: e\in N^{\mathrel{\mid}ar}\}}.$$
\end{prop}
\begin{pf}
From the definition of $\mathbb{Q}[N_{\Sigma}]$ and Lemma \ref{smalllemma}, we see that $\mathbb{Q}[N_{\Sigma}]=\bigoplus_{v\in Box(\mathbf{\Sigma})}y^{v}\cdot S_{\mathbf{\Sigma}}$. The rest is similar to the proof of Proposition 4.7 in \cite{Jiang}, we leave it to the readers.
\end{pf}
_{\sigma}ubsubsection{The Chen-Ruan product structure}
The orbifold cup product on a DM stack $\mathcal{X}$ is defined using genus zero, degree zero 3-pointed orbifold Gromov-Witten invariants on $\mathcal{X}$. The relevant moduli space is the disjoint union of all 3-twisted sectors (i.e. the double inertia stack). By (\ref{inertia}), the 3-twisted sectors of a semi-projective toric DM stack $\mathcal{X}(\mathbf{\Sigma})$ are
\begin{equation}\label{3-sector}
\coprod_{(v_{1},v_{2},v_{3})\in Box(\mathbf{\Sigma})^{3},
v_{1}v_{2}v_{3}=1}
~\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3})).
\end{equation}
Let $ev_{i}:
\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3}))_{\tau}o
\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}(\overline{v}_{i}))$ be the evaluation maps. The obstruction bundle (see \cite{CR2}) $Ob_{(v_{1},v_{2},v_{3})}$ over the 3-twisted sector
$\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3}))$ are defined by
\begin{equation}\label{obstruction}
Ob_{(v_{1},v_{2},v_{3})}:=\left(e^{*}T\left(\mathcal{X}(\mathbf{\Sigma})\right)\otimes
H^{1}(C,\mathcal{O}_{C})\right)^{H},
\end{equation}
where $e: \mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3}))_{\tau}o
\mathcal{X}(\mathbf{\Sigma})$ is the embedding, $C_{\tau}o \mathbb{P}^{1}$ is the $H$-covering branched over three marked points $\{0,1,\infty\}_{\sigma}ubset \mathbb{P}^{1}$, and $H$ is the group generated by $v_{1},v_{2},v_{3}$.
A general result in \cite{CH} and \cite{JKK} about the obstruction bundle implies the following.
\begin{prop}\label{obstructionbdle}
Let
$\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3}))$ be a 3-twisted sector of the stack $\mathcal{X}(\mathbf{\Sigma})$. Suppose $v_{1}+v_{2}+v_{3}=_{\sigma}um_{\rho_{i}_{\sigma}ubset
_{\sigma}igma(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3})}a_{i}b_{i}$, $a_{i}=1$ or $2$. Then the Euler class of the obstruction bundle $Ob_{(v_{1},v_{2},v_{3})}$ on
$\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3}))$
is
$$\mathop{\rm pr}\nolimitsod_{a_{i}=2}c_{1}(L_{i})|_{\mathcal{X}(\mathbf{\Sigma/_{\sigma}igma}(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3}))},$$
where $L_{i}$ is the line bundle over $\mathcal{X}(\mathbf{\Sigma})$ corresponding to the ray $\rho_{i}$.
\end{prop}
Let $v\in Box(\mathbf{\Sigma})$, say $v\in N(_{\sigma}igma)$ for some top-dimensional cone $_{\sigma}igma$. Let $\mathop{_{\sigma}f X}eck{v}\in Box(\mathbf{\Sigma})$ be the inverse of $v$ as an element in the group $N(_{\sigma}igma)$. Equivalently, if $v=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma(\overline{v})}\alpha_{i}b_{i}$ for $0<\alpha_{i}<1$, then $\mathop{_{\sigma}f X}eck{v}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma(\overline{v})}(1-\alpha_{i})b_{i}$. Then for $\alpha_{1},\alpha_{2}\in A^{*}_{orb}(\mathcal{X}(\mathbf{\Sigma}))$, the orbifold cup product is defined by
\begin{equation}\label{cupproduct}
\alpha_{1}\cup_{orb}\alpha_{2}=\widehat{ev}_{3*}(ev_{1}^{*}\alpha_{1}\cup ev_{2}^{*}\alpha_{2}\cup e(Ob_{(v_{1},v_{2},v_{3})})),
\end{equation}
where $\widehat{ev}_{3}=I\circ ev_{3}$, and $I: \mathcal{I}\mathcal{X}(\mathbf{\Sigma}) \rightarrow \mathcal{I}\mathcal{X}(\mathbf{\Sigma})$ is the natural map given by $(x,g)\mapsto (x,g^{-1})$.
_{\sigma}ubsubsection*{Proof of Theorem 1.1}
By Proposition \ref{vectorspace}, it remains to consider the cup product. In this case, for any
$v_{1},v_{2}\in Box(\mathbf{\Sigma})$, we also have
$$v_{1}+v_{2}=\mathop{_{\sigma}f X}eck{v}_{3}+_{\sigma}um_{a_{i}=2}b_{i}+_{\sigma}um_{i\in J}b_{i},$$
where $J$ represents the set of $j$
such that $\rho_{j}$ belongs to
$_{\sigma}igma(\overline{v}_{1},\overline{v}_{2})$, but not belong to
$_{\sigma}igma(\overline{v}_{3})$. Then the proof is the same as the proof in \cite{BCS}.
We omit the details.
_{\sigma}ection{Lawrence Toric DM stacks}\label{Lawrence}
In this section we study a special type of semi-projective toric DM stacks called the Lawrence toric DM stacks. Their orbifold Chow rings are shown to be isomorphic to the orbifold Chow rings of their associated hypertoric DM stacks studied in \cite{JT}.
_{\sigma}ubsection{Stacky hyperplane arrangements}
Let $N$, $\{b_{1},\cdots,b_{m}\}\in N$, $\beta:\mathbb{Z}^{m}_{\tau}o N$, and $\{\overline{b}_{1},\cdots,\overline{b}_{m}\}_{\sigma}ubset \overline{N}$ be as in Definition \ref{stackyfan}. We assume that $\{b_{1},\cdots,b_{m}\}\in N$ are nontorsion integral vectors. We still have the exact sequences (\ref{exact1}) and (\ref{exact2}). The Gale dual map $\beta^{\vee}$ of $\beta$ is given by a collection of integral vectors $\beta^{\vee}=(a_1,\cdots,a_m)$. Choose a generic element $_{\tau}heta\in DG(\beta)$ and let $\psi:=(r_{1},\cdots,r_{m})$ be a lifting of $_{\tau}heta$ in $\mathbb{Z}^{m}$ such that $_{\tau}heta=-\beta^{\vee}\psi$. Note that $_{\tau}heta$ is generic if and only if it is not in any hyperplane of the configuration determined by $\beta^{\vee}$ in $DG(\beta)_{\mathbb{R}}$. Associated to $_{\tau}heta$ there is a hyperplane arrangement $\mathcal{H}=\{H_{1},\cdots,H_{m}\}$ defined as follows: let $H_{i}$ be the hyperplane
\begin{equation}\label{arrangement}
H_{i}:=\{v\in M_{\mathbb{R}}|<\overline{b}_{i},v>+r_{i}=0\}_{\sigma}ubset M_{\mathbb{R}}.
\end{equation}
This determines hyperplane arrangement in $M_{\mathbb{R}}$, up to translation.
It is well-known that hyperplane arrangements determine the topology of hypertoric varieties (\cite{BD}).
We call $\mathcal{A}:=(N,\beta,_{\tau}heta)$ a {\em stacky hyperplane arrangement}.
The toric variety $X(\Sigma)$ is defined by the weighted polytope $\mathbf{{{\mathbb G}_{\mbox{_{\tau}iny\rm a}}}mma}:=\bigcap_{i=1}^{m}F_{i}$, where $F_{i}=\{v\in M_{\mathbb{R}}|<b_{i},v>+r_{i}\geq 0\}$. Suppose that $\mathbf{{{\mathbb G}_{\mbox{_{\tau}iny\rm a}}}mma}$ is bounded, the fan $\Sigma$ is the normal fan of $\mathbf{{{\mathbb G}_{\mbox{_{\tau}iny\rm a}}}mma}$ in $M_{\mathbb{R}}=\mathbb{R}^{d}$ with one dimensional rays generated by $\overline{b}_{1},\cdots,\overline{b}_{n}$. By reordering, we may assume that $H_{1},\cdots,H_{n}$ are the hyperplanes that bound the polytope $\mathbf{{{\mathbb G}_{\mbox{_{\tau}iny\rm a}}}mma}$, and $H_{n+1},\cdots,H_{m}$ are the other hyperplanes. Then we have an extended stacky fan
$\mathbf{\Sigma}=(N,\Sigma,\beta)$ as in Definition \ref{stackyfan}, with $\Sigma$ the normal fan of $\mathbf{{{\mathbb G}_{\mbox{_{\tau}iny\rm a}}}mma}$, $\beta:\mathbb{Z}^{m}_{\tau}o N$ given by $\{b_{1},\cdots,b_{n},b_{n+1},\cdots,b_{m}\}_{\sigma}ubset N$, and $\{b_{n+1},\cdots,b_{m}\}$ the extra data. We define the hypertoric DM stack $\mathcal{M}(\mathcal{A})$ using this $\mathcal{A}$, see \cite{JT} for more details.
_{\sigma}ubsection{Lawrence toric DM stacks}
Applying Gale dual to the map
\begin{equation}\label{betaL}
\mathbb{Z}^{m}{{\rm op}}lus \mathbb{Z}^{m}_{\tau}o DG(\beta),
\end{equation}
given by $(\beta^{\vee},-\beta^{\vee})$, we obtain
$$\beta_{L}: \mathbb{Z}^{m}{{\rm op}}lus \mathbb{Z}^{m}\longrightarrow N_{L},$$
which is given by integral vectors $\{b_{L,1},\cdots,b_{L,m},b'_{L,1},\cdots,b'_{L,m}\}$ in $N_{L}$. The natural images $\{\overline{b}_{L,1},\cdots,\overline{b}_{L,m},\overline{b}'_{L,1},\cdots,\overline{b}'_{L,m}\}_{\sigma}ubset \overline{N}_{L}$
are called the Lawrence lifting of $\{\overline{b}_{1},\cdots,\overline{b}_{m}\}_{\sigma}ubset \overline{N}$.
Associated to the generic element $_{\tau}heta$, let $\overline{_{\tau}heta}$ be the natural image under the map $DG(\beta)\rightarrow \overline{DG(\beta)}$. Then the map $\overline{\beta}^{\vee}: \mathbb{Z}^{m}\rightarrow \overline{DG(\beta)}$ is given by $\overline{\beta}^{\vee}=(\overline{a}_1,\cdots,\overline{a}_m)$. For any column basis of the form $C=\{\overline{a}_{i_{1}},\cdots,\overline{a}_{i_{m-d}}\}$, there exist unique $\lambda_{1},\cdots,\lambda_{m-d}$ such that
$$a_{i_{1}}\lambda_{1}+\cdots+a_{i_{m-d}}\lambda_{m-d}=\overline{_{\tau}heta}.$$
Let $\mathbb{C}[z_{1},\cdots,z_{m},w_{1},\cdots,w_{m}]$ be the coordinate ring of $\mathbb{C}^{2m}$. Let
$_{\sigma}igma(C,_{\tau}heta)=\{\overline{b}_{i_{j}}~|\lambda_{j}>0\}_{\sigma}qcup\{\overline{b}'_{i_{j}}|~\lambda_{j}<0\},$
and
$C(_{\tau}heta)=\{z_{i_{j}}~|\lambda_{j}>0\}_{\sigma}qcup\{w_{i_{j}}|~\lambda_{j}<0\}$.
We set
\begin{equation}\label{irrelevant}
\mathbf{\mathcal{I}}_{_{\tau}heta}:=<\mathop{\rm pr}\nolimitsod
C(_{\tau}heta)|~C~_{\tau}ext{is a column basis of}~\overline{\beta}^{\vee}>,
\end{equation}
and
\begin{equation}\label{fan}
\Sigma_{_{\tau}heta}:=\{\overline{_{\sigma}igma}(C,_{\tau}heta):~C~_{\tau}ext{is a column basis of}~\overline{\beta}^{\vee}\},
\end{equation}
where $\overline{_{\sigma}igma}(C,_{\tau}heta)=
\{\overline{b}_{L,1},\cdots,\overline{b}_{L,m},\overline{b}'_{L,1},\cdots,\overline{b}'_{L,m}\}_{\sigma}etminus_{\sigma}igma(C,_{\tau}heta)$
is the complement of $_{\sigma}igma(C,_{\tau}heta)$ and corresponds to the maximal cones in $\Sigma_{_{\tau}heta}$.
According to \cite{HS}, $\Sigma_{_{\tau}heta}$ is the fan of
Lawrence toric variety $X(\Sigma_{_{\tau}heta})$ corresponding to
$_{\tau}heta$ in the lattice $\overline{N}_{L}$.
The ideal
$\mathcal{I}_{_{\tau}heta}$ is the irrelevant ideal of the fan $\Sigma_{_{\tau}heta}$.
Then we have the Lawrence stacky fan $\mathbf{\Sigma_{_{\tau}heta}}=(N_{L},\Sigma_{_{\tau}heta},\beta_{L})$ introduced in \cite{JT}.
Applying $Hom_\mathbb{Z}(-,\mathbb{C}^{*})$ functor to (\ref{betaL}), we get
\begin{equation}\label{Lawrencemap}
\alpha_{h}: G\rightarrow (\mathbb{C}^{*})^{2m}.
\end{equation}
So $G$ acts on $\mathbb{C}^{2m}$ through $\alpha_{h}$. From Section 2,
$\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}})=[(\mathbb{C}^{2m}_{\sigma}etminus V(\mathcal{I}_{_{\tau}heta}))_{\sigma}lash G]$ whose coarse moduli space is the Lawrence toric variety $X(\Sigma_{_{\tau}heta})=(\mathbb{C}^{2m}_{\sigma}etminus V(\mathcal{I}_{_{\tau}heta}))_{\sigma}lash G$. Let $Y_{\sigma}ubset \mathbb{C}^{2m}_{\sigma}etminus V(\mathcal{I}_{_{\tau}heta})$
be the subvariety defined by the ideal:
\begin{equation}\label{ideal1}
I_{\beta^{\vee}}:=<_{\sigma}um_{i=1}^{m}(\beta^{\vee})^{\mathrel{\mid}ar}(x)_{i}a_{ij}z_{i}w_{i}|{\rm f}orall x\in DG(\beta)^{\mathrel{\mid}ar}>,
\end{equation}
where $(\beta^{\vee})^{\mathrel{\mid}ar}: DG(\beta)^{\mathrel{\mid}ar}\rightarrow \mathbb{Z}^{m}$ is the dual map of $\beta^{\vee}$ and $(\beta^{\vee})^{\mathrel{\mid}ar}(x)_{i}$ is the $i$-th component
of the vector $(\beta^{\vee})^{\mathrel{\mid}ar}(x)$.
From \cite{JT}, the hypertoric DM stack $\mathcal{M}(\mathcal{A})=[Y/G]$ whose coarse moduli space is the hypertoric variety $Y(\beta^{\vee},_{\tau}heta)=Y_{\sigma}lash G$.
\begin{defn}(\cite{JT})
The Lawrence toric DM stack is the toric DM stack $\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}})$ corresponding to the Lawrence stacky fan $\mathbf{\Sigma_{_{\tau}heta}}$.
\end{defn}
By \cite{HS}, $X(\Sigma_{_{\tau}heta})$ is semi-projective. So the Lawrence toric DM stack $\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}})$ is semi-projective by definition.
_{\sigma}ubsection{Comparison of inertia stacks}
Next we compare the orbifold Chow ring of the hypertoric DM stack and the orbifold Chow ring of the Lawrence toric DM stack. First we compare the inertia stacks. From the map
$\beta: \mathbb{Z}^{m}\rightarrow N$ which is given by vectors $\{b_{1},\cdots,b_m\}$. Let $Cone(\beta)$ be a partially ordered finite set of cones generated by $\overline{b}_{1},\cdots,\overline{b}_{m}$. The partial order is defined by: $_{\sigma}igma{\mbox{_{\tau}iny pre}}c_{\tau}au$ if $_{\sigma}igma$ is a face of $_{\tau}au$, and we have the minimum element $\hat{0}$ which is the cone consisting of the origin. Let $Cone(\overline{N})$ be the set of all convex polyhedral cones in the lattice $\overline{N}$. Then we have a map
$$C: Cone(\beta)\longrightarrow Cone(\overline{N}),$$ such that for any $_{\sigma}igma\in Cone(\beta)$, $C(_{\sigma}igma)$ is the cone in $\overline{N}$. Then $\Delta_{\mathbf{\beta}}:=(C,Cone(\beta))$ is a simplicial {\em multi-fan} in the sense of \cite{HM}.
For the multi-fan $\Delta_{\mathbf{\beta}}$, let $Box(\Delta_{\mathbf{\beta}})$ be the set of pairs $(v,_{\sigma}igma)$, where $_{\sigma}igma$ is a cone in $\Delta_{\mathbf{\beta}}$, $v\in N$ such that $\overline{v}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma}\alpha_{i}b_{i}$ for $0<\alpha_{i}<1$. (Note that $_{\sigma}igma$ is the minimal cone in $\Delta_{\mathbf{\beta}}$ satisfying the above condition.) From \cite{JT}, an element $(v,_{\sigma}igma)\in Box(\Delta_{\mathbf{\beta}})$ gives a component of the inertia stack $\mathcal{I}(\mathcal{M}(\mathcal{A}))$.
Also consider the set $Box(\mathbf{\Sigma_{_{\tau}heta}})$ associated to the stacky fan $\mathbf{\Sigma_{_{\tau}heta}}$, see Section 2.2 for its definition. An element $v\in Box(\mathbf{\Sigma_{_{\tau}heta}})$ gives a component of the inertia stack $\mathcal{I}(\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}}))$.
By the Lawrence lifting property, a vector $\overline{b}_{i}$ in $\overline{N}$ lifts to two vectors
$\overline{b}_{L,i},\overline{b}'_{L,i}$ in $\overline{N}_{L}$. Let $\{\overline{b}_{L,i_{1}},\cdots,\overline{b}_{L,i_{k}}, \overline{b}'_{L,i_{1}},\cdots,\overline{b}'_{L,i_{k}}\}$ be the Lawrence lifting of $\{\overline{b}_{i_{1}},\cdots,\overline{b}_{i_{k}}\}$.
\begin{lem}\label{conemulti}
$\{\overline{b}_{i_{1}},\cdots,\overline{b}_{i_{k}}\}$ generate a cone $_{\sigma}igma$ in $\Delta_{\mathbf{\beta}}$ if and only if $\{\overline{b}_{L,i_{1}},\cdots,\overline{b}_{L,i_{k}}, \overline{b}'_{L,i_{1}},\cdots,\overline{b}'_{L,i_{k}}\}$ generate a cone $_{\sigma}igma_{_{\tau}heta}$ in $\Sigma_{_{\tau}heta}$.
\end{lem}
\begin{pf}
Suppose $_{\sigma}igma$ is a cone in $\Delta_{\mathbf{\beta}}$ generated by $\{\overline{b}_{i_{1}},\cdots,\overline{b}_{i_{k}}\}$, it is contained in a top-dimensional cone $_{\tau}au$. Assume that $_{\tau}au$ is generated by $\{\overline{b}_{i_{1}},\cdots,\overline{b}_{i_{k}}, \overline{b}_{i_{k+1}},\cdots,\overline{b}_{i_{d}}\}$. Let $C$ be the complement $\{\overline{b}_{1},\cdots,\overline{b}_{m}\}_{\sigma}etminus _{\tau}au$. Then $C$ corresponds to a column basis of $\overline{\beta}^{\vee}$ in the map $\overline{\beta}^{\vee}: \mathbb{Z}^{m}\rightarrow \overline{DG(\beta)}$. By the definition of $\Sigma_{_{\tau}heta}$ in (\ref{fan}), $C$ corresponds to a maximal cone $_{\tau}au_{_{\tau}heta}$ in $\Sigma_{_{\tau}heta}$ which contains
the rays generated by $\{\overline{b}_{L,i_{1}},\cdots,\overline{b}_{L,i_{k}},
\overline{b}'_{L,i_{1}},\cdots,\overline{b}'_{L,i_{k}}\}$. Thus these rays generate a cone $_{\sigma}igma_{_{\tau}heta}$
in $\Sigma_{_{\tau}heta}$.
Conversely, suppose $_{\sigma}igma_{_{\tau}heta}$ is a cone in $\Sigma_{_{\tau}heta}$ generated by $\{\overline{b}_{L,i_{1}},\cdots,\overline{b}_{L,i_{k}}, \overline{b}'_{L,i_{1}},\cdots,\overline{b}'_{L,i_{k}}\}$. Using the similar method above we prove that $\{\overline{b}_{i_{1}},\cdots,\overline{b}_{i_{k}}\}$ must be contained in a top-dimensional cone of $\Delta_{\mathbf{\beta}}$. So $\{\overline{b}_{i_{1}},\cdots,\overline{b}_{i_{k}}\}$ generate a cone $_{\sigma}igma$ in $\Delta_{\mathbf{\beta}}$.
\end{pf}
\begin{lem}\label{box}
There is an one-to-one correspondence between the elements in $Box(\mathbf{\Sigma_{_{\tau}heta}})$ and the elements in $Box(\Delta_{\mathbf{\beta}})$. Moreover, their degree shifting numbers coincide.
\end{lem}
\begin{pf}
First the torsion elements in $Box(\mathbf{\Sigma_{_{\tau}heta}})$ and $Box(\Delta_{\mathbf{\beta}})$ are both
isomorphic to $\mu=ker(\alpha)=ker(\alpha_{h})$ in (\ref{exact3}) and (\ref{Lawrencemap}).
Let $(v,_{\sigma}igma)\in Box(\Delta_{\mathbf{\beta}})$ with $\overline{v}=_{\sigma}um_{\rho_i_{\sigma}ubseteq _{\sigma}igma}\alpha_{i}\overline{b}_{i}$.
Then $v$ may be identified with an element (which we ambiguously denote by) $v\in G:=Hom_\mathbb{Z}(DG(\beta),\mathbb{C}^*)$. Certainly $v$ fixes a point in $\mathbb{C}^{m}$. Consider the map $\alpha$ in (\ref{exact3}), put $\alpha(v)=(\alpha^{1}(v),\cdots,\alpha^{m}(v))$. Then $\alpha^{i}(v)\neq 1$ if $\rho_i_{\sigma}ubseteq_{\sigma}igma$, and $\alpha^{i}(v)= 1$ otherwise. By Lemma \ref{conemulti}, let $\{\overline{b}_{L,i},\overline{b}'_{L,i}:i=1,\cdots,|_{\sigma}igma|\}$ be the Lawrence lifting of $\{\overline{b}_{i}\}_{\rho_i_{\sigma}ubseteq_{\sigma}igma}$. Since the action of $v$ on $\mathbb{C}^{2m}$ is given by $(v,v^{-1})$, $v$ fixes a point in $\mathbb{C}^{2m}$ and yields an element $v__{\tau}heta$ in $Box(\mathbf{\Sigma__{\tau}heta})$.
From the map (\ref{Lawrencemap}), let
\begin{equation}\label{vtheta}
\alpha_{h}(v_{_{\tau}heta})=(\alpha^{1}_{h}(v_{_{\tau}heta}),\cdots,\alpha^{m}_{h}(v_{_{\tau}heta}),
\alpha^{m+1}_{h}(v_{_{\tau}heta}),\cdots,\alpha^{2m}_{h}(v_{_{\tau}heta})).
\end{equation}
Then $\alpha_{h}^{i}(v_{_{\tau}heta})\neq 1$ and $\alpha_{h}^{i+m}(v_{_{\tau}heta})\neq 1$ if $\rho_i_{\sigma}ubseteq_{\sigma}igma$; $\alpha_{h}^{i}(v_{_{\tau}heta})= \alpha_{h}^{i+m}(v_{_{\tau}heta})= 1$ otherwise. So $_{\sigma}igma_{_{\tau}heta}(\overline{v}_{_{\tau}heta})=\{\overline{b}_{L,i},\overline{b}'_{L,i}:i=1,\cdots,|_{\sigma}igma|\}$ is the minimal cone in $\Sigma_{_{\tau}heta}$ containing $\overline{v}_{_{\tau}heta}$. Furthermore, $\overline{v}_{_{\tau}heta}=_{\sigma}um_{\rho_i_{\sigma}ubseteq _{\sigma}igma}\alpha_{i}\overline{b}_{L,i}+ _{\sigma}um_{\rho_i_{\sigma}ubseteq _{\sigma}igma}(1-\alpha_{i})\overline{b}'_{L,i}$.
Conservely, given an element $v_{_{\tau}heta}\in Box(\mathbf{\Sigma_{_{\tau}heta}})$, let $_{\sigma}igma_{_{\tau}heta}(\overline{v}_{_{\tau}heta})$ be the minimal cone in $\Sigma_{_{\tau}heta}$ containing $\overline{v}_{_{\tau}heta}$.
Then from the action of $G$ on $\mathbb{C}^{2m}$ and (\ref{vtheta}), we have $\alpha^{i}_{h}(v_{_{\tau}heta})=(\alpha^{i+m}_{h}(v_{_{\tau}heta}))^{-1}$. If $\alpha^{i}_{h}(v_{_{\tau}heta})\neq 1$, then $\alpha^{i+m}_{h}(v_{_{\tau}heta})\neq 1$, which means that $\overline{b}_{L,i}, \overline{b}_{L,i+m}\in _{\sigma}igma_{_{\tau}heta}(\overline{v}_{_{\tau}heta})$. The cone $_{\sigma}igma_{_{\tau}heta}(\overline{v}_{_{\tau}heta})$ is the one in $\Sigma_{_{\tau}heta}$ containing $\overline{b}_{L,i}, \overline{b}_{L,i+m}$'s satisfying this condition.
Then $\overline{v}_{_{\tau}heta}=_{\sigma}um_{i}(\alpha_{i}\overline{b}_{L,i}+(1-\alpha_{i})\overline{b}^{'}_{L,i})$.
By Lemma \ref{conemulti},
$_{\sigma}igma_{_{\tau}heta}(\overline{v}_{_{\tau}heta})$ is the Lawrence lifting of a cone $_{\sigma}igma$ generated by the $\{\overline{b}_{i}\}$'s in $\Delta_{\mathbf{\beta}}$. Let $v=_{\sigma}um_{\rho_i_{\sigma}ubseteq_{\sigma}igma}\alpha_{i}b_{i}$. So it also determines an element $(v,_{\sigma}igma)\in Box(\Delta_{\mathbf{\beta}})$.
\end{pf}
For $(v_{1},_{\sigma}igma_{1}),(v_{2},_{\sigma}igma_{2}),(v_{3},_{\sigma}igma_{3})\in Box(\Delta_{\mathbf{\beta}})$, let $_{\sigma}igma(\overline{v}_{1}, \overline{v}_{2},\overline{v}_{3})$ be the miniaml cone containing $\overline{v}_{1}, \overline{v}_{2},\overline{v}_{3}$ in $\Delta_{\mathbf{\beta}}$ such that $\overline{v}_{1}+\overline{v}_{2}+\overline{v}_{3}=_{\sigma}um_{\rho_i_{\sigma}ubseteq _{\sigma}igma(\overline{v}_{1}, \overline{v}_{2},\overline{v}_{3})}a_{i}\overline{b}_{i}$ and $a_{i}=1,2$. Let $v_{_{\tau}heta,1}, v_{_{\tau}heta,2},v_{_{\tau}heta,3}$ be the corresponding elements in $Box(\mathbf{\Sigma_{_{\tau}heta}})$ and $_{\sigma}igma(\overline{v}_{_{\tau}heta,1}, \overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3})$ the minimal cone containing $\overline{v}_{_{\tau}heta,1}, \overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3}$ in $\mathbf{\Sigma_{_{\tau}heta}}$. Then by Lemmas \ref{conemulti} and \ref{box},
$_{\sigma}igma(\overline{v}_{_{\tau}heta,1}, \overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3})$ is the Lawrence lifting of $_{\sigma}igma(\overline{v}_{1}, \overline{v}_{2},\overline{v}_{3})$. Suppose that $_{\sigma}igma$
is generated by $\{\overline{b}_{i_{1}},\cdots,\overline{b}_{i_{s}}\}$, then $_{\sigma}igma(\overline{v}_{_{\tau}heta,1}, \overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3})$ is generated by $\{\overline{b}_{L,i_{1}},\cdots,\overline{b}_{L,i_{s}},\overline{b}^{'}_{L,i_{1}},\cdots,\overline{b}^{'}_{L,i_{s}}\}$, the Lawrence lifting
of $\{\overline{b}_{i_{1}},\cdots,\overline{b}_{i_{s}}\}$. Let $\{\overline{b}_{j_{1}},\cdots,\overline{b}_{j_{m-l-s}}\}$ be the rays
not in $_{\sigma}igma\cup link(_{\sigma}igma)$, we have the Lawrence lifting
$\{\overline{b}_{L,j_{1}},\cdots,\overline{b}_{L,j_{m-l-s}},\overline{b}^{'}_{L,j_{1}},\cdots,\overline{b}^{'}_{L,j_{m-l-s}}\}$. Then
from the definition of Lawrence fan $\Sigma_{_{\tau}heta}$ in (\ref{fan}), we have the following lemma:
\begin{lem}\label{keycone}
There exist $m-l-s$ vectors in $\{\overline{b}_{L,j_{1}},\cdots,\overline{b}_{L,j_{m-l-s}},\overline{b}^{'}_{L,j_{1}},\cdots,\overline{b}^{'}_{L,j_{m-l-s}}\}$
such that the rays they generate plus the rays in $_{\sigma}igma(\overline{v}_{_{\tau}heta,1}, \overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3})$
generate a cone $_{\sigma}igma_{_{\tau}heta}$ in $\Sigma_{_{\tau}heta}$. $_{\sigma}quare$
\end{lem}
\begin{prop}\label{3-twisted-sector}
The stack $\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}}/_{\sigma}igma_{_{\tau}heta})$ is also a Lawrence toric DM stack.
\end{prop}
\begin{pf}
For simplicity, put $_{\sigma}igma:=_{\sigma}igma(\overline{v}_{1},
\overline{v}_{2},\overline{v}_{3})$. Suppose there are
$l$ rays in the $link(_{\sigma}igma)$. Then by Lemma \ref{conemulti} there
are $2l$ rays in $link(_{\sigma}igma_{_{\tau}heta})$, the Lawrence lifting of $link(_{\sigma}igma)$.
Let $s:=|_{\sigma}igma|$, then $2s+m-l-s=|_{\sigma}igma_{_{\tau}heta}|$. Applying Gale dual to the diagrams
\[
\begin{CD}
0 @ >>>\mathbb{Z}^{s}@ >>> \mathbb{Z}^{l+s}@ >>> \mathbb{Z}^{l} @
>>> 0\\
&& @VV{\beta_{_{\sigma}igma}}V@VV{\widetilde{\beta}}V@VV{\beta(_{\sigma}igma)}V \\
0@ >>>N_{_{\sigma}igma} @ >{}>>N@ >>> N(_{\sigma}igma) @>>> 0,
\end{CD}
\]
and
\[
\begin{CD}
0 @ >>>\mathbb{Z}^{l+s}@ >>> \mathbb{Z}^{m}@ >>> \mathbb{Z}^{m-l-s} @
>>> 0\\
&& @VV{\widetilde{\beta}}V@VV{\beta}V@VV{}V \\
0@ >>>N @ >{\cong}>>N@ >>> 0 @>>> 0
\end{CD}
\]
yields
\begin{equation}\label{3-sector2}
\begin{CD}
0 @ >>>\mathbb{Z}^{l}@ >>> \mathbb{Z}^{l+s}@ >>> \mathbb{Z}^{s} @
>>> 0\\
&& @VV{\beta(_{\sigma}igma)^{\vee}}V@VV{\widetilde{\beta}^{\vee}}V@VV{\beta_{_{\sigma}igma}^{\vee}}V \\
0@ >>>DG(\beta(_{\sigma}igma)) @ >{\varphi_{1}}>>DG(\widetilde{\beta})@ >>> DG(\beta_{_{\sigma}igma})
@>>> 0,
\end{CD}
\end{equation}
and
\begin{equation}\label{3-sector22}
\begin{CD}
0 @ >>>\mathbb{Z}^{m-l-s}@ >>> \mathbb{Z}^{m}@ >>> \mathbb{Z}^{l+s} @
>>> 0\\
&& @VV{\cong}V@VV{\beta^{\vee}}V@VV{\widetilde{\beta}^{\vee}}V \\
0@ >>>\mathbb{Z}^{m-l-s} @ >{}>>DG(\beta)@ >{\varphi_{2}}>> DG(\widetilde{\beta})
@>>> 0.
\end{CD}
\end{equation}
Since $\mathbb{Z}^{s}\cong N_{_{\sigma}igma}$, we have that $DG(\beta_{_{\sigma}igma})=0$.
We add two exact sequences
$$0\longrightarrow \mathbb{Z}^{l}\longrightarrow\mathbb{Z}^{m}\longrightarrow\mathbb{Z}^{m-l}\longrightarrow 0,$$
and
$$0\longrightarrow 0\longrightarrow\mathbb{Z}^{m}\longrightarrow\mathbb{Z}^{m}\longrightarrow 0,$$
on the rows of the diagrams (\ref{3-sector2}),(\ref{3-sector22}) and make suitable maps to the
Gale duals we get
\begin{equation}\label{3-sectors}
\begin{CD}
0 @ >>>\mathbb{Z}^{2l}@ >>> \mathbb{Z}^{l+s+m}@ >>>
\mathbb{Z}^{s+m-l} @
>>> 0\\
&& @VV{(\beta(_{\sigma}igma)^{\vee},-\beta(_{\sigma}igma)^{\vee})}V@VV{(\widetilde{\beta}^{\vee},-\beta^{\vee})}V@VV{0}V \\
0@ >>>DG(\beta(_{\sigma}igma)) @ >{\cong}>>DG(\widetilde{\beta})@ >>> 0
@>>> 0,
\end{CD}
\end{equation}
and
\begin{equation}\label{3-sectors2}
\begin{CD}
0 @ >>>\mathbb{Z}^{m-l-s}@ >>> \mathbb{Z}^{2m}@ >>>
\mathbb{Z}^{l+s+m} @
>>> 0\\
&& @VV{\cong}V@VV{(\beta^{\vee},-\beta^{\vee})}V@VV{(\widetilde{\beta}^{\vee},-\beta^{\vee})}V \\
0@ >>>\mathbb{Z}^{m-l-s} @ >{}>>DG(\beta)@ >>> DG(\widetilde{\beta})
@>>> 0.
\end{CD}
\end{equation}
Applying Gale dual to (\ref{3-sectors}), (\ref{3-sectors2}) we get
\[
\begin{CD}
0 @ >>>\mathbb{Z}^{s+m-l}@ >>> \mathbb{Z}^{l+s+m}@ >>> \mathbb{Z}^{2l} @ >>> 0\\
&& @VV{\cong}V@VV{\widetilde{\beta}_{L}}V@VV{\beta_{L}(_{\sigma}igma_{_{\tau}heta})}V \\
0@ >>>\mathbb{Z}^{s+m-l} @ >{}>>\widetilde{N}_{L}@ >>> N_{L}(_{\sigma}igma_{_{\tau}heta})
@>>> 0,
\end{CD}
\]
and
\[
\begin{CD}
0 @ >>>\mathbb{Z}^{l+s+m}@ >>> \mathbb{Z}^{2m}@ >>> \mathbb{Z}^{m-l-s} @ >>> 0\\
&& @VV{\widetilde{\beta}_{L}}V@VV{\beta_{L}}V@VV{0}V \\
0@ >>>\widetilde{N}_{L} @ >{\cong}>>N_{L}@ >>> 0
@>>> 0.
\end{CD}
\]
For the generic element
$_{\tau}heta$, from them map $\varphi_{2}$ in (\ref{3-sector22}),
$_{\tau}heta$ induces $\widetilde{_{\tau}heta}\in DG(\widetilde{\beta})$, and from the isomorphism
$\varphi_{1}$ in (\ref{3-sector2}), $\widetilde{_{\tau}heta}=_{\tau}heta(_{\sigma}igma)\in DG(\beta(_{\sigma}igma))$.
So we a quotient stacky hyperplane arrangement $\mathcal{A}(_{\sigma}igma)=(N(_{\sigma}igma),\beta(_{\sigma}igma),_{\tau}heta(_{\sigma}igma))$.
From the above diagrams we see that the quotient fan
$\Sigma_{_{\tau}heta}/_{\sigma}igma_{_{\tau}heta}$ in
$\overline{N}_{L}(_{\sigma}igma_{_{\tau}heta})$ also comes from a Lawrence
construction of the map $\beta(_{\sigma}igma)^{\vee}:
\mathbb{Z}^{l}\rightarrow DG(\beta(_{\sigma}igma))$. Let
$X(_{\sigma}igma)=\mathbb{C}^{2l}_{\sigma}etminus
V(\mathcal{I}_{_{\tau}heta(_{\sigma}igma)})$, where
$\mathcal{I}_{_{\tau}heta(_{\sigma}igma)}$ is the irrelevant ideal of the
quotient fan $\Sigma_{_{\tau}heta}_{\sigma}lash _{\sigma}igma_{_{\tau}heta}$. Let
$G(_{\sigma}igma)=Hom_\mathbb{Z}(DG(\beta(_{\sigma}igma)),\mathbb{C}^{*})$. The
stack
$\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}}/_{\sigma}igma_{_{\tau}heta})=[X(_{\sigma}igma)/G(_{\sigma}igma)]$
is a Lawrence toric Deligne-Mumford stack.
\end{pf}
\begin{cor}\label{sectors}
$\mathcal{M}(\mathcal{A}(_{\sigma}igma(\overline{v}_{1},
\overline{v}_{2},\overline{v}_{3})))$ is the hypertoric DM stack associated to the quotient Lawrence toric DM stack
$\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}}/_{\sigma}igma_{_{\tau}heta})$.
\end{cor}
\begin{pf}
$\mathcal{M}(\mathcal{A}(_{\sigma}igma(\overline{v}_{1},
\overline{v}_{2},\overline{v}_{3})))$ is constructed in \cite{JT} as a quotient stack
$[Y(_{\sigma}igma)/G(_{\sigma}igma)]$, where $Y(_{\sigma}igma)_{\sigma}ubset X(_{\sigma}igma)$ is defined by $I_{\beta(_{\sigma}igma)^{\vee}}$, which is
the ideal in (\ref{ideal1}) corresponding to the map $\beta(_{\sigma}igma)^{\vee}$ in (\ref{3-sector2}). So the
stack $\mathcal{M}(\mathcal{A}(_{\sigma}igma(\overline{v}_{1},
\overline{v}_{2},\overline{v}_{3})))$ is the associated hypertoric DM stack in
the Lawrence toric DM stack $\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}}/_{\sigma}igma_{_{\tau}heta})$.
\end{pf}
\begin{rmk}
For any $v_{_{\tau}heta}\in Box(\mathbf{\Sigma_{_{\tau}heta}})$, let $v^{-1}_{_{\tau}heta}$ be its inverse. We have
the quotient Lawrence toric stack $\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}}/_{\sigma}igma_{_{\tau}heta})$.
Let $(v,_{\sigma}igma)$ be the corresponding element in $Box(\Delta_{\mathbf{\beta}})$, then
$$\mathcal{M}(\mathcal{A}(_{\sigma}igma(\overline{v},
\overline{v}^{-1},1)))\cong \mathcal{M}(\mathcal{A}(_{\sigma}igma)).$$
By Proposition \ref{3-twisted-sector} and Corollary \ref{sectors}, the twisted sector
$\mathcal{M}(\mathcal{A}(_{\sigma}igma))$ is the associated hypertoric DM stack
of the Lawrence toric DM stack $\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}}/_{\sigma}igma_{_{\tau}heta})$.
\end{rmk}
\begin{rmk}
From Lemma \ref{keycone}, the cone $_{\sigma}igma_{_{\tau}heta}$ is not the minimal cone
$_{\sigma}igma(\overline{v}_{_{\tau}heta,1}, \overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3})$ containing
$\overline{v}_{_{\tau}heta,1}, \overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3}$ in $\Sigma_{_{\tau}heta}$.
So $\mathcal{X}(\Sigma_{_{\tau}heta}/_{\sigma}igma(\overline{v}_{_{\tau}heta,1}, \overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3}))$
is not a Lawrence toric DM stack. But from the construction of Lawrence toric DM stack,
the quotient stack $\mathcal{X}(\Sigma_{_{\tau}heta}/_{\sigma}igma(\overline{v}_{_{\tau}heta,1}, \overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3}))$
is homotopy equivalent to the quotient stack $\mathcal{X}(\Sigma_{_{\tau}heta}/_{\sigma}igma_{_{\tau}heta})$.
Since we do not need this to compare the orbifold Chow ring, we omit the details.
\end{rmk}
_{\sigma}ubsection{Comparison of orbifold Chow rings}
Recall that $N_{L}=\overline{N}_{L}{{\rm op}}lus N_{L,tor}$, where $N_{L,tor}$ is the torsion subgroup
of $N_{L}$. Let $N_{\Sigma_{_{\tau}heta}}=N_{L,tor}{{\rm op}}lus |\Sigma_{_{\tau}heta}|$. By Theorem 1.1, we have
\begin{prop}\label{orbifoldlawrence}
The orbifold Chow ring $A^{*}_{orb}(\mathcal{X}(\mathbf{\Sigma}__{\tau}heta))$ of the
Lawrence toric DM stack $\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}})$ is isomorphic to the ring
\begin{equation}\label{chowringlawrence}
{\rm f}rac{\mathbb{Q}[N_{\Sigma_{_{\tau}heta}}]}
{\{_{\sigma}um_{i=1}^{m}e(b_{L,i})y^{b_{L,i}}+_{\sigma}um_{i=1}^{m}e(b'_{L,i})y^{b'_{L,i}}:e\in N_{L}^{\mathrel{\mid}ar}\}}.
\end{equation}~ $_{\sigma}quare$
\end{prop}
Recall in \cite{JT} that for any $c\in N$, there is a cone
$_{\sigma}igma\in \Delta_\mathbf{\beta}$ such that
$\overline{c}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma}\alpha_{i}\overline{b}_{i}$ where
$\alpha_{i}>0$ are rational numbers. Let $N^{\Delta_\mathbf{\beta}}$ denote
all the pairs $(c,_{\sigma}igma)$. Then $N^{\Delta_\mathbf{\beta}}$ gives rise a
group ring
$$\mathbb{Q}[\Delta_\mathbf{\beta}]=\bigoplus_{(c,_{\sigma}igma)\in N^{\Delta_\mathbf{\beta}}}\mathbb{Q}\cdot y^{(c,_{\sigma}igma)},$$
where $y$ is a formal variable.
For any $(c,_{\sigma}igma)\in N^{\Delta_\mathbf{\beta}}$, there exists a unique element
$(v,_{\tau}au)\in Box(\Delta_\mathbf{\beta})$ such that $_{\tau}au_{\sigma}ubseteq_{\sigma}igma$ and
$c=v+_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma}m_{i}b_{i}$,
where
$m_{i}$ are nonnegative integers. We call $(v,_{\tau}au)$ the fractional part of $(v,_{\sigma}igma)$. We define the $ceiling ~function$ for fans.
For $(c,_{\sigma}igma)$ define the ceiling function $\mathop{\rm lc}eil c \rceil_{_{\sigma}igma}$ by
$\mathop{\rm lc}eil c \rceil_{_{\sigma}igma}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\tau}au}b_{i}+_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma}m_{i}b_{i}$. Note that
if $\overline{v}=0$, $\mathop{\rm lc}eil c \rceil_{_{\sigma}igma}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma}m_{i}b_{i}$.
For two pairs $(c_1,_{\sigma}igma_1)$, $(c_2,_{\sigma}igma_2)$, if $_{\sigma}igma_1\cup_{\sigma}igma_2$ is a cone in $\Delta_\mathbf{\beta}$, define
$\epsilon(c_1,c_2):=\mathop{\rm lc}eil c_1 \rceil_{_{\sigma}igma_{1}}+\mathop{\rm lc}eil c_2 \rceil_{_{\sigma}igma_{2}}-\mathop{\rm lc}eil c_1+c_2 \rceil_{_{\sigma}igma_{1}\cup_{\sigma}igma_2}$.
Let $_{\sigma}igma_{\epsilon}_{\sigma}ubseteq_{\sigma}igma_1\cup_{\sigma}igma_2$ be the minimal cone in $\Delta_\mathbf{\beta}$ containing $\epsilon(c_1,c_2)$ so that
$(\epsilon(c_1,c_2),_{\sigma}igma_{\epsilon})\in N^{\Delta_\mathbf{\beta}}$. We define the grading on $\mathbb{Q}[\Delta_{\mathbf{\beta}}]$ as follows.
For any $(c,_{\sigma}igma)$, write $c=v+_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma}m_{i}b_{i}$, then
$deg(y^{(c,_{\sigma}igma)})=|_{\tau}au|+_{\sigma}um_{\rho_{i}_{\sigma}ubseteq_{\sigma}igma}m_{i}$, where
$|_{\tau}au|$ is the dimension of $_{\tau}au$.
By abuse of notation, we write $y^{(b_{i},\rho_i)}$ as $y^{b_{i}}$.
The multiplication
is defined by
\begin{equation}\label{product1}
y^{(c_{1},_{\sigma}igma_{1})}\cdot y^{(c_{2},_{\sigma}igma_{2})}:=
\begin{cases}
(-1)^{|_{\sigma}igma_{\epsilon}|}y^{(c_{1}+c_{2}+\epsilon(c_1,c_2),_{\sigma}igma_1\cup_{\sigma}igma_2)}&_{\tau}ext{if
$_{\sigma}igma_{1}\cup_{\sigma}igma_{2}$ is a cone in $\Delta_{\mathbf{\beta}}$}\,,\\
0&_{\tau}ext{otherwise}\,.
\end{cases}
\end{equation}
From the property of $ceiling~ function$ we check that the multiplication is commutative and associative. So $\mathbb{Q}[\Delta_\mathbf{\beta}]$ is
a unital associative commutative ring.
In \cite{JT}, it is shown that
\begin{equation}\label{chowringhyper}
A^{*}_{orb}(\mathcal{M}(\mathcal{A}))\cong {\rm f}rac{\mathbb{Q}[\Delta_{\mathbf{\beta}}]}{\{_{\sigma}um_{i=1}^{m}e(b_{i})y^{b_{i}}: e\in N^{\mathrel{\mid}ar}\}}.
\end{equation}
Consider the map $\beta: \mathbb{Z}^{m}\rightarrow N$ which is given by the vectors
$\{b_{1},\cdots,b_{m}\}$.
We take $\{1,\cdots,m\}$ as the vertex set of the {\em matroid complex}
$M_{\beta}$, defined from $\beta$ by requiring that $F\in
M_{\beta}$ iff the vectors
$\{\overline{b}_{i}\}_{i\in F}$ are linearly independent in $\overline{N}$.
A face $F\in M_{\beta}$ corresponds
to a cone in $\Delta_{\mathbf{\beta}}$ generated by $\{\overline{b}_{i}\}_{i\in F}$. By
\cite{S}, the ``Stanley-Reisner'' ring of the matroid
$M_{\beta}$ is
$$\mathbb{Q}[M_{\beta}]={\rm f}rac{\mathbb{Q}[y^{b_{1}},\cdots,y^{b_{m}}]}{I_{M_{\beta}}},$$
where $I_{M_{\beta}}$ is the matroid ideal generated by the set
of square-free monomials
$$\{y^{b_{i_{1}}}\cdots y^{b_{i_{k}}}|
\overline{b}_{i_{1}},\cdots,\overline{b}_{i_{k}} ~_{\tau}ext{linearly
dependent in}~\overline{N}\}.$$
It is proved in \cite{JT} that ,
$$\mathbb{Q}[\Delta_{\mathbf{\beta}}]\cong\bigoplus_{(v,_{\sigma}igma)\in Box(\Delta_{\mathbf{\beta}})}y^{(v,_{\sigma}igma)}\cdot \mathbb{Q}[M_{\beta}].$$
For any $(v_{1},_{\sigma}igma_{1}),(v_{2},_{\sigma}igma_{2})\in
Box(\Delta_{\mathbf{\beta}})$,
let $(v_{3},_{\sigma}igma_{3})$ be the unique element in $Box(\Delta_{\mathbf{\beta}})$
such that $v_1+v_2+v_3\equiv 0$ in the local group given by $_{\sigma}igma_1\cup_{\sigma}igma_2$, where
$\equiv 0$ means that there exists a cone $_{\sigma}igma(\overline{v}_{1},
\overline{v}_{2},\overline{v}_{3})$ in
$\Delta_{\mathbf{\beta}}$ such that
$\overline{v}_{1}+\overline{v}_{2}+\overline{v}_{3}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq_{\sigma}igma(\overline{v}_{1},
\overline{v}_{2},\overline{v}_{3})}a_{i}\overline{b}_{i}$,
where $a_{i}=1 ~_{\tau}ext{or}~ 2$. Let $\overline{v}_1=_{\sigma}um_{\rho_j_{\sigma}ubseteq_{\sigma}igma_1}\alpha_{j}^{1}\overline{b}_{j}$,
$\overline{v}_2=_{\sigma}um_{\rho_j_{\sigma}ubseteq_{\sigma}igma_2}\alpha_{j}^{2}\overline{b}_{j}$,
$\overline{v}_3=_{\sigma}um_{\rho_j_{\sigma}ubseteq_{\sigma}igma_3}\alpha_{j}^{3}\overline{b}_{j}$ with
$0<\alpha_{j}^{1},\alpha_{j}^{2},\alpha_{j}^{3}<1$. Let $I$ be the set of $i$ such that
$a_{i}=1$ and $\alpha_{j}^{1},\alpha_{j}^{2},\alpha_{j}^{3}$ exist, $J$ the set of $j$ such that $\rho_{j}$ belongs
to $_{\sigma}igma(\overline{v}_{1},
\overline{v}_{2},\overline{v}_{3})$ but not $_{\sigma}igma_{3}$.
If $(v,_{\sigma}igma)\in Box(\Delta_\mathbf{\beta})$, let
$(\mathop{_{\sigma}f X}eck{v},_{\sigma}igma)$ be the inverse of $(v,_{\sigma}igma)$. Except torsion elements, equivalently, if $\overline{v}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma}\alpha_{i}\overline{b}_{i}$ for
$0<\alpha_{i}<1$, then $\mathop{_{\sigma}f X}eck{\overline{v}}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma}(1-\alpha_{i})\overline{b}_{i}$.
By abuse of notation, we write $y^{(b_{i},\rho_i)}$ as $y^{b_{i}}$.
We have that $v_{1}+v_{2}=\mathop{_{\sigma}f X}eck{v}_{3}+_{\sigma}um_{a_{i}=2}b_{i}+_{\sigma}um_{j\in J}b_{j}$. From (\ref{product1}), Lemma 5.11
and Lemma 5.12 in \cite{JT},
if $\overline{v}_{1},\overline{v}_{2}\neq 0$, we have
$$
\mathop{\rm lc}eil v_1\rceil_{_{\sigma}igma_1}+\mathop{\rm lc}eil v_2\rceil_{_{\sigma}igma_2}-\mathop{\rm lc}eil v_1+v_2\rceil_{_{\sigma}igma_1\cup_{\sigma}igma_2}=
\begin{cases}
_{\sigma}um_{i\in I}b_{i}+_{\sigma}um_{j\in J}b_{j}&_{\tau}ext{if
$\overline{v}_{1}\neq\mathop{_{\sigma}f X}eck{\overline{v}}_{2}$}\,,\\
_{\sigma}um_{j\in J}b_{j}&_{\tau}ext{if
$\overline{v}_{1}=\mathop{_{\sigma}f X}eck{\overline{v}}_{2}$}\,.\\
\end{cases}
$$
So it is easy to check that the multiplication
$y^{(v_{1},_{\sigma}igma_{1})}\cdot y^{(v_{2},_{\sigma}igma_{2})}$ can be written as
\begin{equation}\label{product}
\begin{cases}
(-1)^{|I|+|J|}y^{(\mathop{_{\sigma}f X}eck{v}_{3},_{\sigma}igma_{3})}\cdot\mathop{\rm pr}\nolimitsod_{a_{i}=2}
y^{b_{i}}\cdot\mathop{\rm pr}\nolimitsod_{i\in I}
y^{b_{i}}\cdot \mathop{\rm pr}\nolimitsod_{j\in J}y^{2b_{j}}&_{\tau}ext{if
$\overline{v}_{1},\overline{v}_{2}\in_{\sigma}igma$ for $_{\sigma}igma \in\Delta_{\mathbf{\beta}}$ and $\overline{v}_{1}\neq \mathop{_{\sigma}f X}eck{\overline{v}}_{2}$}\,,\\
(-1)^{|J|} \mathop{\rm pr}\nolimitsod_{j\in J}y^{2b_{j}}&_{\tau}ext{if
$\overline{v}_{1},\overline{v}_{2}\in_{\sigma}igma$ for $_{\sigma}igma \in\Delta_{\mathbf{\beta}}$ and $\overline{v}_{1}=\mathop{_{\sigma}f X}eck{\overline{v}}_{2}$}\,,\\
0&_{\tau}ext{otherwise}\,.
\end{cases}
\end{equation}
The following is the main result of this Section.
\begin{thm}\label{main2}
There is an isomorphism of orbifold Chow rings $A_{orb}^{*}(\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}}))\cong A_{orb}^{*}(\mathcal{M}(\mathcal{A}))$.
\end{thm}
\begin{pf}
The ring $\mathbb{Q}[N_{\Sigma_{_{\tau}heta}}]$ is generated by $\{y^{b_{L,i}},y^{b'_{L,i}}: i=1,\cdots,m\}$
and $y^{v_{_{\tau}heta}}$ for $v_{_{\tau}heta}\in Box(\mathbf{\Sigma_{_{\tau}heta}})$ by the definition. By Lemma \ref{box}, define a morphism
$$\phi: \mathbb{Q}[N_{\Sigma_{_{\tau}heta}}]\rightarrow\mathbb{Q}[\Delta_{\mathbf{\beta}}] $$
by $y^{b_{L,i}}\mapsto y^{b_{i}}, y^{b'_{L,i}}\mapsto -y^{b_{i}}$ and $y^{v_{_{\tau}heta}}\mapsto y^{(v,_{\sigma}igma)}$. By \cite{HS}, the ideal $\mathcal{I}_{_{\tau}heta}$ goes to the ideal $I_{M_{\beta}}$ and the relation
$\{_{\sigma}um_{i=1}^{m}e(b_{L,i})y^{b_{L,i}}+_{\sigma}um_{i=1}^{m}e(b'_{L,i})y^{b'_{L,i}}:e\in N_{L}^{\mathrel{\mid}ar}\}$
goes to the relation $\{_{\sigma}um_{i=1}^{m}e(b_{i})y^{b_{i}}:e\in N^{\mathrel{\mid}ar}\}$. Thus the two rings are isomorphic
as modules.
It remains to check the multiplications. For any $y^{v_{_{\tau}heta}}$ and $y^{b_{L,i}}$ or $y^{b'_{L,i}}$, let
$y^{(v,_{\sigma}igma)}$ be the corresponding element in $\mathbb{Q}[\Delta_{\mathbf{\beta}}]$.
By the property of $v_{_{\tau}heta}$ and Lemma \ref{box}, the minimal cone in
$\Sigma_{_{\tau}heta}$ containing $\overline{v}_{_{\tau}heta}, \overline{b}_{L,i}$ must contains $\overline{b}'_{L,i}$.
By Lemma \ref{conemulti}, there is a cone in $\Delta_{\mathbf{\beta}}$ containing $\overline{v}, \overline{b}_{i}$.
In this way, $y^{v_{_{\tau}heta}}\cdot y^{b_{L,i}}$ goes to $y^{(v,_{\sigma}igma)}\cdot y^{b_{i}}$ and
$y^{v_{_{\tau}heta}}\cdot y^{b'_{L,i}}$ goes to $-y^{(v,_{\sigma}igma)}\cdot y^{b_{i}}$.
If there is no cone in $\Sigma_{_{\tau}heta}$
containing $\overline{v}_{_{\tau}heta}, \overline{b}_{L,i},\overline{b}'_{L,i}$, then by Lemma \ref{conemulti} there is no cone in $\Delta_{\mathbf{\beta}}$ containing $\overline{v}, \overline{b}_{i}$. So
$y^{v_{_{\tau}heta}}\cdot y^{b_{L,i}}=0$ goes to $y^{(v,_{\sigma}igma)}\cdot y^{b_{i}}=0$ and
$y^{v_{_{\tau}heta}}\cdot y^{b'_{L,i}}=0$ goes to $-y^{(v,_{\sigma}igma)}\cdot y^{b_{i}}=0$.
For any $y^{v_{_{\tau}heta,1}},y^{v_{_{\tau}heta,2}}$, let $y^{(v_{1},_{\sigma}igma_{1})},y^{(v_{2},_{\sigma}igma_{2})}$ be the corresponding
elements in $\mathbb{Q}[\Delta_{\mathbf{\beta}}]$. If there is no cone
in $\Sigma_{_{\tau}heta}$ containing $\overline{v}_{_{\tau}heta,1},\overline{v}_{_{\tau}heta,2}$, then by Lemmas \ref{conemulti}
and \ref{box}, there is no cone in $\Delta_{\mathbf{\beta}}$ containing $\overline{v}_{1},\overline{v}_{2}$. So
$y^{v_{_{\tau}heta,1}}\cdot y^{v_{_{\tau}heta,2}}=0$ goes to $y^{(v_{1},_{\sigma}igma_{1})}\cdot y^{(v_{2},_{\sigma}igma_{2})}=0$.
Suppose there
is a cone containing $\overline{v}_{_{\tau}heta,1},\overline{v}_{_{\tau}heta,2}$, let
$v_{_{\tau}heta,3}\in Box(\mathbf{\Sigma}_{_{\tau}heta})$ such that $v_{_{\tau}heta,1}+v_{_{\tau}heta,2}+v_{_{\tau}heta,3}\equiv 0$.
Let $_{\sigma}igma(\overline{v}_{_{\tau}heta,1},
\overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3})$ be the minimal cone containing $\overline{v}_{_{\tau}heta,1},
\overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3}$ in $\Sigma_{_{\tau}heta}$. Then by Lemmas \ref{conemulti}
and \ref{box},
$_{\sigma}igma(\overline{v}_{_{\tau}heta,1},
\overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3})$ is the Lawrence lifting of $_{\sigma}igma(\overline{v}_{1},
\overline{v}_{2},\overline{v}_{3})$ for $(v_{1},_{\sigma}igma_{1}),(v_{2},_{\sigma}igma_{2}),(v_{3},_{\sigma}igma_{3})\in Box(\Delta_{\mathbf{\beta}})$.
So we may write
$\overline{v}_{_{\tau}heta,1}+\overline{v}_{_{\tau}heta,2}+\overline{v}_{_{\tau}heta,3}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3})}a_{i}\overline{b}_{L,i}
+_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3})}a'_{i}\overline{b}'_{L,i}$.
The corresponding $\overline{v}_{1}+\overline{v}_{2}+\overline{v}_{3}=
_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3})}a_{i}\overline{b}_{i}$.
Let
$(\mathop{_{\sigma}f X}eck{v},_{\sigma}igma)$ be the inverse of $(v,_{\sigma}igma)$ in
$Box(\Delta_{\mathbf{\beta}})$, i.e. if $v$ is nontorsion and
$\overline{v}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq _{\sigma}igma}\alpha_{i}\overline{b}_{i}$
for $0<\alpha_{i}<1$, then $\mathop{_{\sigma}f X}eck{\overline{v}}=_{\sigma}um_{\rho_{i}_{\sigma}ubseteq
_{\sigma}igma}(1-\alpha_{i})\overline{b}_{i}$. The $\mathop{_{\sigma}f X}eck{v}_{_{\tau}heta}$ is defined similarly
in $Box(\mathbf{\Sigma_{_{\tau}heta}})$. The notation $J$ represents the set of $j$ such that $\rho_{j}$ belongs
to $_{\sigma}igma(\overline{v}_{1},\overline{v}_{2},\overline{v}_{3})$ but not $_{\sigma}igma_{3}$, the
corresponding $\rho_{L,j},\rho'_{L,j}$ belong
to $_{\sigma}igma(\overline{v}_{_{\tau}heta,1},\overline{v}_{_{\tau}heta,2},\overline{v}_{_{\tau}heta,3})$ but not $_{\sigma}igma(\overline{v}_{_{\tau}heta,3})$.
If some $\overline{v}_{_{\tau}heta,i}=0$ which means that $v_{_{\tau}heta,i}$ is a torsion. Then
from Lemma (\ref{box}) the corresponding
$v$ is also a torsion element. In this case we know that the orbifold cup product
$y^{v_{_{\tau}heta,1}}\cdot y^{v_{_{\tau}heta,2}}$ is the usual product, and under the
map $\phi$, is equal to
$y^{(v_{1},_{\sigma}igma_{1})}\cdot y^{(v_{2},_{\sigma}igma_{2})}$.
If $\overline{v}_{_{\tau}heta,1}=\mathop{_{\sigma}f X}eck{\overline{v}}_{_{\tau}heta,2}$, then $\overline{v}_{_{\tau}heta,3}=0$ and the obstruction bundle over the corresponding 3-twisted sector is zero.
The set $J$ is the set $j$ such that $\rho_j$ belongs to $_{\sigma}igma(\overline{v}_{_{\tau}heta,1})$.
So from \cite{BCS}, we have
$$y^{v_{_{\tau}heta,1}}\cdot y^{v_{_{\tau}heta,2}}=
\mathop{\rm pr}\nolimitsod_{j\in J}y^{b_{L,j}}\cdot y^{b'_{L,j}}.$$
Under the map $\phi$ we see that $y^{(v_{1},_{\sigma}igma_{1})}\cdot y^{(v_{2},_{\sigma}igma_{2})}$ is equal to the second line in the product
(\ref{product}).
If $\overline{v}_{_{\tau}heta,1}\neq\mathop{_{\sigma}f X}eck{\overline{v}}_{_{\tau}heta,2}$, then $\overline{v}_{_{\tau}heta,3}\neq 0$ and the obstruction bundle
is given by Proposition \ref{obstructionbdle}. If all $\alpha_{j}^{1},\alpha_{j}^{2},\alpha_{j}^{3}$ exist, the coefficients $a_{i}$ and $a'_{i}$ satisfy that if $a_{i}=1$ then
$a'_{i}=2$, and if $a_{i}=2$ then
$a'_{i}=1$. So
from \cite{BCS},
$$y^{v_{_{\tau}heta,1}}\cdot y^{v_{_{\tau}heta,2}}=
y^{\mathop{_{\sigma}f X}eck{v}_{_{\tau}heta,3}}\cdot\mathop{\rm pr}\nolimitsod_{a_{i}=2}
y^{b_{L,i}}\cdot\mathop{\rm pr}\nolimitsod_{i\in I}
y^{b'_{L,i}}\cdot \mathop{\rm pr}\nolimitsod_{j\in J}y^{b_{L,j}}\cdot y^{b'_{L,j}}.$$
Under the map $\phi$ we see that $y^{(v_{1},_{\sigma}igma_{1})}\cdot y^{(v_{2},_{\sigma}igma_{2})}$ is equal to the first line in the product
(\ref{product}). By Lemma \ref{box}, the box elements have the same orbifold degrees.
By Corollary \ref{sectors} and the definition of orbifold cup product in (\ref{cupproduct}),
the products $y^{v_{_{\tau}heta,1}}\cdot y^{v_{_{\tau}heta,2}}$ and $y^{(v_{1},_{\sigma}igma_{1})}\cdot y^{(v_{2},_{\sigma}igma_{2})}$ have
the same degrees in both Chow rings.
So $\phi$ induces a ring isomorphism
$A_{orb}^{*}(\mathcal{X}(\mathbf{\Sigma_{_{\tau}heta}}))\cong A_{orb}^{*}(\mathcal{M}(\mathcal{A}))$.
\end{pf}
\begin{rmk}
The presentation (\ref{chowringhyper}) of orbifold Chow ring only depends on the matroid complex corresponding to the map
$\beta: \mathbb{Z}^{m}\rightarrow N$, not $_{\tau}heta$. Note that the presentation (\ref{chowringlawrence}) depends on
the fan $\Sigma_{_{\tau}heta}$. We couldn't see explicitly from this presentation that the ring is independent to the
choice of generic elements $_{\tau}heta$.
\end{rmk}
\end{document} |
\begin{document}
\widetilde{t}itle {\bf Explicit Relations between Kaneko--Yamamoto Type Multiple Zeta Values and Related Variants}
^{(A)}uthor{
{Ce Xu${}^{a,}$\widetilde{t}hanks{Email: cexu2020@ahnu.edu.cn}\quad and Jianqiang Zhao${}^{b,}$\widetilde{t}hanks{Email: zhaoj@ihes.fr}}\\[1mm]
\small a. School of Mathematics and Statistics, Anhui Normal University, Wuhu 241000, PRC\\
\small b. Department of Mathematics, The Bishop's School, La Jolla, CA 92037, USA \\
[5mm]
Dedicated to Professor Masanobu Kaneko on the occasion of his 60th birthday}
\date{}
\maketitle \noindent{\bf Abstract.} In this paper we first establish several integral identities. These integrals are of the form
\[\int_0^1 x^{an+b} f(x)\,dx\quad (a\in\{1,2\},\ b\in\{-1,-2\})\]
where $f(x)$ is a single-variable multiple polylogarithm function or $r$-variable multiple polylogarithm function or Kaneko--Tsumura A-function (this is a single-variable multiple polylogarithm function of level two). We find that these integrals can be expressed in terms of multiple zeta (star) values and their related variants (multiple $t$-values, multiple $T$-values, multiple $S$-values etc.), and multiple harmonic (star) sums and their related variants (multiple $T$-harmonic sums, multiple $S$-harmonic sums etc.), which are closely related to some special types of Schur multiple zeta values and their generalization. Using these integral identities, we prove many explicit evaluations of Kaneko--Yamamoto multiple zeta values and their related variants. Further, we derive some relations involving multiple zeta (star) values and their related variants.
\noindent{\bf Keywords}: Multiple zeta (star) values, multiple $t$-values, multiple $T$-values, multiple $M$-values, Kaneko--Yamamoto multiple zeta values, Schur multiple zeta values.
\noindent{\bf AMS Subject Classifications (2020):} 11M06, 11M32, 11M99, 11G55.
\section{Introduction and Notations}
\sum\limits_{n=1}^\inftybsection{Multiple zeta values (MZVs) and Schur MZVs}
We begin with some basic notations. A finite sequence $\bfk \equiv {\bfk_r}:= (k_1,\dotsc, k_r)$ of positive integers is called a \emph{composition}. We put
\[|\bfk|:=k_1+\dotsb+k_r,\quad {\rm dep}(\bfk):=r,\]
and call them the weight and the depth of $\bfk$, respectively.
For $0\leq j\leq i$, we adopt the following notations:
\begin{align*}
&{\bfk}_i^j:=(\underbrace{k_{i+1-j},k_{i+2-j},\dotsc,k_i}_{j\ \widetilde{t}ext{components}})
\end{align*}
and
\begin{align*}
&{\bfk}_i\equiv{\bfk}_i^i:=(k_1,k_2,\dotsc,k_i),
\end{align*}
where ${\bfk}_i^0:=\emptyset$\quad $(i\geq 0)$. If $i<j$, then ${\bfk}_i^j:=\emptyset$.
For a composition $\bfk_r=(k_1,\dotsc,k_r)$ and positive integer $n$, the multiple harmonic sums (MHSs) and multiple harmonic star sums (MHSSs) are defined by
\begin{align*}
\zeta_n(k_1,\dotsc,k_r):=\sum\limits_{n=1}^\inftym\limits_{0<m_1<\cdots<m_r\leq n } \frac{1}{m_1^{k_1}m_2^{k_2}\cdots m_r^{k_r}}
\end{align*}
and
\begin{align*}
\zeta^\star_n(k_1,\dotsc,k_r):=\sum\limits_{n=1}^\inftym\limits_{0<m_1\leq \cdots\leq m_r\leq n} \frac{1}{m_1^{k_1}m_2^{k_2}\cdots m_r^{k_r}},
\end{align*}
respectively. If $n<r$ then ${\zeta_n}(\bfk_r):=0$ and ${\zetaeta _n}(\emptyset )={\zeta^\star_n}(\emptyset ):=1$.
The multiple zeta values (abbr. MZVs) and the multiple zeta-star value (abbr. MZSVs) are defined by
\begin{equation*}
\zetaeta(\bfk):=\lim_{n\widetilde{t}o\infty } \zeta_n(\bfk)
\qquad\widetilde{t}ext{and}\qquad
\zeta^\star(\bfk):=\lim_{n\widetilde{t}o\infty } \zeta_n^\star(\bfk),
\end{equation*}
respectively. These series converge if and only if $k_r\ge2$ so we call a composition $\bfk_r$ \emph{admissible} if this is the case.
The systematic study of multiple zeta values began in the early 1990s with the works of Hoffman \cite{H1992} and Zagier \cite{DZ1994}. Due to their surprising and sometimes mysterious appearance in the study of many branches of mathematics and theoretical physics, these special values have attracted a lot of attention and interest in the past three decades (for example, see the book by the second author \cite{Z2016}). A common generalization of the MZVs and MZSVs is given
by the Schur multiple zeta values \cite{MatsumotoNakasuji2020,NPY2018}, which are defined using the skew Young tableaux.
For example, for integers $a,c,d,e,f\geq 1,\,c,g \geq 2$ the following sum is an example of a Schur multiple mixed values
\begin{equation}\label{equ:SchurEg}
\zetaeta\left(\ {\ytableausetup{centertableaux, boxsize=1.2em}
\begin{ytableau}
\none & a& b & c \\
d& e & \none\\
f & g & \none
\end{ytableau}}\ \right)
= \sum\limits_{n=1}^\inftym_{{\scriptsize
^{(A)}rraycolsep=1.4pt\def^{(A)}rraystretch{0.8}
\begin{array}{ccccccc}
&&m_a&\leq&m_b&\leq& m_c \\
&&\vsmall&& && \\
m_d&\leq&m_e&&&& \\
\vsmall&&\vsmall&&& \\
m_f&\leq&m_g&&&&
\end{array} }} \frac{1}{m_a^{\,a} \,\, m_b^b \,\, m_c^c \,\, m_d^d \,\, m_e^e \,\, m_f^f} \,,
\end{equation}
In this paper, we shall study some families of variations of MZVs. First, consider the following special form of
the Schur multiple zeta values.
\begin{defn} (cf. \cite{KY2018})
For any two compositions of positive integers $\bfk=(k_1,\dotsc,k_r)$ and $\bfl=(l_1,\dotsc,l_s)$, define
\begin{align}\label{equ:KYMZVs}
\zetaeta(\bfk\circledast{\bfl^\star})
:=&\sum\limits_{n=1}^\inftym\limits_{0<m_1<\cdots<m_r=n_s\geq \cdots \geq n_1 > 0} \frac{1}{m_1^{k_1}\cdots m_r^{k_r}n_1^{l_1}\cdots n_s^{l_s}} \\
=& \sum\limits_{n=1}^\infty \frac{\zeta_{n-1}(k_1,\dotsc,k_{r-1})\zeta^\star_n(l_1,\dotsc,l_{s-1})}{n^{k_r+l_s}}. \notag
\end{align}
We call them \emph{Kaneko--Yamamoto multiple zeta values} (K--Y MZVs for short).
\end{defn}
The K--Y MZVs defined by \eqref{equ:KYMZVs} are the special cases of the Schur multiple zeta values
$\zetaeta_\gl({\boldsymbol{\sl{s}}})$ given by the following skew Young tableaux of anti-hook type
\[
{\boldsymbol{\sl{s}}}={\footnotesize \ytableausetup{centertableaux, boxsize=1.8em}
\begin{ytableau}
\none & \none & \none & \widetilde{t}ikznode{a1}{\scriptstyle k_1} \\
\none & \none & \none & \vdots \\
\none & \none & \none & \scriptstyle k_{r-1} \\
\widetilde{t}ikznode{a2}{\scriptstyle l_1} & \cdots & \scriptstyle l_{s-1} & \widetilde{t}ikznode{a3}{\scriptstyle x}
\end{ytableau}}
\]
where $x=k_r+l_s$ and $\gl$ is simply the Young diagram underlying the above tableaux.
\sum\limits_{n=1}^\inftybsection{Variations of MZVs with even/odd summation indices}
One may modify the definition MZVs by restricting the summation indices to even/odd numbers.
These values are apparently NOT in the class of Schur multiple zeta values.
For instance, in recent papers \cite{KanekoTs2018b,KanekoTs2019}, Kaneko and Tsumura introduced a new kind of
multiple zeta values of level two, called \emph{multiple T-values} (MTVs), defined
for admissible compositions $\bfk=(k_1,\dotsc,k_r)$ by
\begin{align}
T(\bfk):&=2^r \sum\limits_{n=1}^\inftym_{0<m_1<\cdots<m_r^{(A)}top m_i\equiv i\ {\rm mod}\ 2} \frac{1}{m_1^{k_1}m_2^{k_2}\cdots m_r^{k_r}}\nonumber\\
&=2^r\sum\limits_{n=1}^\inftym\limits_{0<n_1<\cdots<n_r} \frac{1}{(2n_1-1)^{k_1}(2n_2-2)^{k_2}\cdots (2n_r-r)^{k_r}}.
\end{align}
This is in contrast to Hoffman's \emph{multiple $t$-values} (MtVs) defined in \cite{H2019} as follows: for
admissible compositions $\bfk=(k_1,\dotsc,k_r)$
\begin{align*}
t(\bfk):=\sum\limits_{n=1}^\inftym_{0<m_1<\cdots<m_r^{(A)}top \forall m_i: odd} \frac{1}{m_1^{k_1}m_2^{k_2}\cdots m_r^{k_r}}
=\sum\limits_{n=1}^\inftym\limits_{0<n_1<\cdots<n_r} \frac{1}{(2n_1-1)^{k_1}(2n_2-1)^{k_2}\cdots (2n_r-1)^{k_r}}.
\end{align*}
Moreover, in \cite{H2019} Hoffman also defined its star version, called \emph{multiple $t$-star value} (MtSVs), as follows:
\begin{align*}
t^\star(\bfk):=\sum\limits_{n=1}^\inftym_{0<m_1\leq \cdots\leq m_r^{(A)}top \forall m_i: odd} \frac{1}{m_1^{k_1}m_2^{k_2}\cdots m_r^{k_r}}
=\sum\limits_{n=1}^\inftym\limits_{0<n_1\leq \cdots\leq n_r} \frac{1}{(2n_1-1)^{k_1}(2n_2-1)^{k_2}\cdots (2n_r-1)^{k_r}}.
\end{align*}
Very recently, the authors have defined another variant of multiple zeta values in \cite{XZ2020}, called \emph{multiple mixed values} or \emph{multiple $M$-values} (MMVs for short). For $\bfeps=(\varepsilon_1, \dots, \varepsilon_r)\in\{\pm 1\}^r$
and admissible compositions $\bfk=(k_1,\dotsc,k_r)$,
\begin{align}
M(\bfk;\bfeps):&=\sum\limits_{n=1}^\inftym_{0<m_1<\cdots<m_r} \frac{(1+\varepsilon_1(-1)^{m_1}) \cdots (1+\varepsilon_r(-1)^{m_r})}{m_1^{k_1} \cdots m_r^{k_r}}\nonumber\\
&=\sum\limits_{n=1}^\inftym_{0<m_1<\cdots<m_r^{(A)}top 2| m_j\ \widetilde{t}ext{if}\ \varepsilon_j=1\ \widetilde{t}ext{and}\ 2\nmid m_j\ \widetilde{t}ext{if}\ \varepsilon_j=-1} \frac{2^r}{m_1^{k_1}m_2^{k_2} \cdots m_r^{k_r}}.
\end{align}
For brevity, we put a check on top of the component $k_j$ if $\varepsilon_j=-1$. For example,
\begin{align*}
M(1,2,\check{3})=&\, \sum\limits_{n=1}^\inftym_{0<m_1<m_2<m_3} \frac{(1+(-1)^{m_1}) (1+(-1)^{m_2}) (1-(-1)^{m_3})}{m_1 m_2^{2}m_3^{3}}\\
=&\, \sum\limits_{n=1}^\inftym_{0<l<m<n} \frac{8}{(2\ell) (2m)^{2} (2n-1)^{3}}.
\end{align*}
It is obvious that MtVs satisfy the series stuffle relation, however, it is nontrivial to see that MTVs can be expressed using iterated integral and satisfy both the duality relations (see \cite[Theorem 3.1]{KanekoTs2019}) and the integral shuffle relations (see \cite[Theorem 2.1]{KanekoTs2019}). Similar to MZVs, MMVs satisfy both series stuffle relations and the integral shuffle relations. Moreover, in \cite{XZ2020}, we also introduced and studied
a class of MMVs that is opposite to MTVs, called \emph{multiple S-values} (MSVs). For admissible compositions $\bfk=(k_1,\dotsc,k_r)$,
\begin{align}
S(\bfk):&=2^r \sum\limits_{n=1}^\inftym_{0<m_1<\cdots<m_r^{(A)}top m_i\equiv i-1\ {\rm mod}\ 2} \frac{1}{m_1^{k_1}m_2^{k_2}\cdots m_r^{k_r}}\nonumber\\
&=2^r \sum\limits_{n=1}^\inftym_{0<n_1<\cdots<n_r} \frac{1}{(2n_1)^{k_1}(2n_2-1)^{k_2}\cdots (2n_r-r+1)^{k_r}}.
\end{align}
It is clear that every MMV can be written as a linear combination of alternating MZVs
(also referred to as Euler sums or colored multiple zeta values) defined as follows.
For $\bfk\in\mathbb{N}^r$ and $\bfeps\in\{\pm 1\}^r$, if $(k_r,\eps_r)\ne(1,1)$ (called \emph{admissible} case) then
\begin{equation*}
\zetaeta(\bfk;\bfeps):=\sum\limits_{n=1}^\inftym\limits_{0<m_1<\cdots<m_r} \frac{\eps_1^{m_1}\cdots \eps_r^{m_r} }{ m_1^{k_1}\cdots m_r^{k_r}}.
\end{equation*}
We may compactly indicate the presence of an alternating sign as follows.
Whenever $\eps_j=-1$, we place a bar over the corresponding integer exponent $k_j$. For example,
\begin{equation*}
\zetaeta(\bar 2,3,\bar 1,4)=\zetaeta( 2,3,1,4;-1,1,-1,1).
\end{equation*}
Similarly, a star-version of alternating MZVs (called \emph{alternating multiple zeta-star values}) is defined by
\begin{equation*}
\zeta^\star(\bfk;\bfeps):=\sum\limits_{n=1}^\inftym\limits_{0<m_1\leq\cdots\leq m_r} \frac{\eps_1^{m_1}\cdots \eps_r^{m_r} }{ m_1^{k_1}\cdots m_r^{k_r}}.
\end{equation*}
Deligne showed that the rational spaced generated by alternating MZVs of weight $w$ is bounded by the Fibonacci number $F_w$
where $F_0=F_1=1$. In \cite[Theorem 7.1]{XZ2020} we showed that the rational spaced generated by MMVs of weight $w$
is bounded by $F_w-1$. The missing piece is the one-dimensional space generated by $\ln^w 2$.
\sum\limits_{n=1}^\inftybsection{Variations of Kaneko--Yamamoto MZVs with even/odd summation indices}
Now, we introduce the $T$-variant of Kaneko--Yamamoto MZVs.
For positive integers $m$ and $n$ such that $n\ge m$, we define
\begin{align*}
&D_{n,m} :=
\left\{
\begin{array}{ll}
^{(B)}ig\{(n_1,n_2,\dotsc,n_m)\in\mathbb{N}^{m} \mid 0<n_1\leq n_2< n_3\leq \cdots \leq n_{m-1}<n_{m}\leq n ^{(B)}ig\},\phantom{\frac12}\ & \hbox{if $2\nmid m$;} \\
^{(B)}ig\{(n_1,n_2,\dotsc,n_m)\in\mathbb{N}^{m} \mid 0<n_1\leq n_2< n_3\leq \cdots <n_{m-1}\leq n_{m}<n ^{(B)}ig\},\phantom{\frac12}\ & \hbox{if $2\mid m$,}
\end{array}
\right. \\
&E_{n,m} :=
\left\{
\begin{array}{ll}
^{(B)}ig\{(n_1,n_2,\dotsc,n_{m})\in\mathbb{N}^{m}\mid 1\leq n_1<n_2\leq n_3< \cdots< n_{m-1}\leq n_{m}< n ^{(B)}ig\},\phantom{\frac12}\ & \hbox{if $2\nmid m$;} \\
^{(B)}ig\{(n_1,n_2,\dotsc,n_{m})\in\mathbb{N}^{m}\mid 1\leq n_1<n_2\leq n_3< \cdots \leq n_{m-1}< n_{m}\leq n ^{(B)}ig\}, \phantom{\frac12}\ & \hbox{if $2\mid m$.}
\end{array}
\right.
\end{align*}
\begin{defn} (\cite[Defn. 1.1]{XZ2020}) For positive integer $m$, define
\begin{align}
&T_n({\bfk_{2m-1}}):= \sum\limits_{n=1}^\inftym_{\bfn\in D_{n,2m-1}} \frac{2^{2m-1}}{(\prod_{j=1}^{m-1} (2n_{2j-1}-1)^{k_{2j-1}}(2n_{2j})^{k_{2j}})(2n_{2m-1}-1)^{k_{2m-1}}},\label{MOT}\\
&T_n({\bfk_{2m}}):= \sum\limits_{n=1}^\inftym_{\bfn\in D_{n,2m}} \frac{2^{2m}}{\prod_{j=1}^{m} (2n_{2j-1}-1)^{k_{2j-1}}(2n_{2j})^{k_{2j}}},\label{MET}\\
&S_n({\bfk_{2m-1}}):= \sum\limits_{n=1}^\inftym_{\bfn\in E_{n,2m-1}} \frac{2^{2m-1}}{(\prod_{j=1}^{m-1} (2n_{2j-1})^{k_{2j-1}}(2n_{2j}-1)^{k_{2j}})(2n_{2m-1})^{k_{2m-1}}},\label{MOS}\\
&S_n({\bfk_{2m}}):= \sum\limits_{n=1}^\inftym_{\bfn\in E_{n,2m}} \frac{2^{2m}}{\prod_{j=1}^{m} (2n_{2j-1})^{k_{2j-1}}(2n_{2j}-1)^{k_{2j}}},\label{MES}
\end{align}
where $T_n({\bfk_{2m-1}}):=0$ if $n<m$, and $T_n({\bfk_{2m}})=S_n({\bfk_{2m-1}})=S_n({\bfk_{2m}}):=0$ if $n\leq m$. Moreover, for convenience sake, we set $T_n(\emptyset)=S_n(\emptyset):=1$. We call \eqref{MOT} and \eqref{MET} \emph{multiple $T$-harmonic sums} ({\rm MTHSs} for short), and call \eqref{MOS} and \eqref{MES} \emph{multiple $S$-harmonic sums} ({\rm MSHSs} for short).
\end{defn}
In \cite{XZ2020}, we used the MTHSs and MSHSs to define the convoluted $T$-values $T({\bfk}\circledast {\bfl})$, which can be regarded as a $S$- or $T$-variant of K--Y MZVs.
\begin{defn} (\cite[Defn. 1.2]{XZ2020}) For positive integers $m$ and $p$, the \emph{convoluted $T$-values} are defined by
\begin{align}\label{equ:schur1}
T({\bfk_{2m}}\circledast{\bfl_{2p}})=&\,2\sum\limits_{n=1}^\infty \frac{T_n({\bfk_{2m-1}})T_n({\bfl_{2p-1}})}{(2n)^{k_{2m}+l_{2p}}},\\
T({\bfk_{2m-1}}\circledast{\bfl_{2p-1}})=&\,2\sum\limits_{n=1}^\infty \frac{T_n({\bfk_{2m-2}})T_n({\bfl_{2p-2}})}{(2n-1)^{k_{2m-1}+l_{2p-1}}},\\
T({\bfk_{2m}}\circledast{\bfl_{2p-1}})=&\,2\sum\limits_{n=1}^\infty \frac{T_n({\bfk_{2m-1}})S_n({\bfl_{2p-2}})}{(2n)^{k_{2m}+l_{2p-1}}},\\
T({\bfk_{2m-1}}\circledast{\bfl_{2p}})=&\,2\sum\limits_{n=1}^\infty \frac{T_n({\bfk_{2m-2}})S_n({\bfl_{2p-1}})}{(2n-1)^{k_{2m-1}+l_{2p}}}.\label{equ:schur4}
\end{align}
We may further define the \emph{convoluted $S$-values} by
\begin{align}\label{equ:schur1}
S({\bfk_{2m}}\circledast{\bfl_{2p}})=&\,2\sum\limits_{n=1}^\infty \frac{S_n({\bfk_{2m-1}})S_n({\bfl_{2p-1}})}{(2n-1)^{k_{2m}+l_{2p}}},\\
S({\bfk_{2m-1}}\circledast{\bfl_{2p-1}})=&\,2\sum\limits_{n=1}^\infty \frac{S_n({\bfk_{2m-2}})S_n({\bfl_{2p-2}})}{(2n)^{k_{2m-1}+l_{2p-1}}}.\label{equ:schur6}
\end{align}
\end{defn}
In view of the interpretation of K--Y MZVs as special Schur MZVs, one may wonder if
Schur MZVs can be generalized so that the convoluted $S$- and $T$-values are special cases.
\sum\limits_{n=1}^\inftybsection{Schur MZVs modulo $N$}
We now generalize the concept of Schur multiple zeta functions (resp. values)
to Schur multiple zeta functions (resp. values) modulo any positive integer $N$,
the case $N=2$ of which contain all the MMVs as special cases.
It turns out that when $N=2$ the only differences between these values and the Schur MZVs is
that each box in the Young diagram is decorated by either ``0'' or ``1'' at upper left corner so that the running
index appearing in that box must be either even or odd. For example, a variation of
the example in \eqref{equ:SchurEg} can be given as follows:
\begin{equation*}
\zetaeta\left(\ {\ytableausetup{centertableaux, boxsize=1.2em}
\begin{ytableau}
\none & {}^{\widetilde{t}ext{0}} a& {}^{\widetilde{t}ext{0}}b & {}^{\widetilde{t}ext{1}}c \\
{}^{\widetilde{t}ext{1}}d& {}^{\widetilde{t}ext{1}}e & \none\\
{}^{\widetilde{t}ext{0}}f & {}^{\widetilde{t}ext{1}}g & \none
\end{ytableau}}\ \right)
:= \sum\limits_{n=1}^\inftym_{{\scriptsize
^{(A)}rraycolsep=1.4pt\def^{(A)}rraystretch{0.8}
\begin{array}{cccccccl}
&&m_a&\leq&m_b&\leq& m_c \quad &\ 2|m_a,2|m_b,2\nmid m_c\\
&&\vsmall&& &&&\ \\
m_d&\leq&m_e&&&& &\ 2\nmid m_d,2\nmid m_e\\
\vsmall&&\vsmall&&&& \\
m_f&\leq&m_g&&&& &\ 2|m_f,2\nmid m_g
\end{array} }} \frac{2^7}{m_a^{\,a} \,\, m_b^b \,\, m_c^c \,\, m_d^d \,\, m_e^e \,\, m_f^f} \,,
\end{equation*}
We now briefly describe this idea in general. For a skew Young diagram
$\gl$ with $n$ boxes (denoted by $\shufflerp(\gl)=n$),
let $T(\gl, X)$ be the set of all Young tableaux of shape $\gl$ over a set $X$.
Let $D(\gl)=\{(i,j): 1\le i\le r, \ga_i\le j\le \gb_i\}=\{(i,j): 1\le j\le s, a_j\le i\le b_j\}$ be the skew Young diagram of $\gl$
so that $(i,j)$ refers to the box on the $i$th row and $j$th column of $\gl$.
Fix any positive integer $N$, we may decorate $D(\gl)$ by putting a residue class $\pi_{ij}$ mod $N$ at
the upper left corner of $(i,j)$-th box. We call such a decorated diagram a Young diagram modulo $N$,
denoted by $\gl^\pi$. Further, we define the set of semi-standard skew Young tableaux of shape $\gl^\pi$ by
$$
\widetilde{S}SYT(\gl^\pi):=\left\{(m_{i,j})\in T(\gl, \mathbb{N})\left|
^{(A)}ligned
& m_{i,\ga_i}\le m_{i,\ga_i+1}\le \dotsm \le m_{i,\gb_i},\ \ m_{a_j,j}< m_{a_j+1,j}<\dotsm<m_{b_j,j},\\
& m_{i,j}\equiv \pi_{i,j} \pmod{N} \ \ \forall 1\le i\le r, \ga_i\le j\le \gb_i
\endaligned
\right.\right\}.
$$
For ${\boldsymbol{\sl{s}}} = (s_{i,j} )\in T(\gl,\mathbb{C})$, the \emph{Schur multiple zeta function} \emph{modulo} $N$
associated with $\gl^\pi$ is defined by the series
\begin{equation*}
\zetaeta_{\gl^\pi}({\boldsymbol{\sl{s}}}):=\sum\limits_{n=1}^\inftym_{M\in \widetilde{S}SYT(\gl^\pi)} \frac{2^{\shufflerp(\gl)} }{M^{\boldsymbol{\sl{s}}}}
\end{equation*}
where $M^{\boldsymbol{\sl{s}}}=(m_{i,j})^{\boldsymbol{\sl{s}}}:=\prod{}_{(i,j)\in D(\gl)} m_{i,j}^{s_{i,j}}$.
Similar to \cite[Lemma 2.1]{NPY2018}, it is not too hard to prove that the above series converges
absolutely whenever ${\boldsymbol{\sl{s}}}\in W_\gl$ where
$$
W_\gl:=\left\{{\boldsymbol{\sl{s}}}=(s_{i,j})\in T(\gl, \mathbb{C}) \left|
^{(A)}ligned
& \mathbb{R}}\def\pa{\partiale(s_{i,j})\ge 1 \ \forall (i,j)\in D(\gl)\setminus C(\gl) \\
& \mathbb{R}}\def\pa{\partiale(s_{i,j})> 1 \ \forall (i,j)\in C(\gl)
\endaligned
\right.\right\},
$$
where $C(\gl)$ is the set of all corners of $\gl$. But this domain of convergence is not ideal. To define
the most accurate domain of convergence we need the following terminology. Given any two boxes $B_1$ and $B_2$
in $\gl$ we define $B_1\preceq B_2$ if $B_1$ is to the left or above $B_2$, namely, $B_1$
must be in the gray area in the picture
\begin{tikzpicture}[scale=0.05]
\filldraw [gray!30!white] (0,1) -- (3,1) -- (3,4) -- (8,4) -- (8,6) -- (0,6) -- (0,1);
\fill[black] (3,3) -- (4,3) -- (4,4) -- (3,4) -- (3,3);
\end{tikzpicture}
where the $B_2$ is the black box.
An \emph{allowable move} along a path from box $B$ is a move to a box $C$ such that $B\preceq C$ and all boxes above $C$ and to the left of $C$, if there are any, are already covered by the previous moves along the path.
An \emph{allowable path} in a skew Young diagram is a sequence of allowable moves covering all
the boxes without backtracking.
Then the domain of convergence of $M_{\gl^\pi}({\boldsymbol{\sl{s}}})$ is the subset of $W_\gl$
defined by the condition that $\mathbb{R}}\def\pa{\partiale( \sum\limits_{n=1}^\inftym_{s_{ij}\in\mathcal{P}_\ell} s_{ij})>\ell$ for all allowable paths
$\mathcal{P}$, where $\mathcal{P}_\ell$ is the sub-path of $\mathcal{P}$ covering the last $\ell$ boxes ending at a corner.
For example, the graph
$
{ \ytableausetup{centertableaux, boxsize=.5em}\begin{ytableau}
\none & \none & \scriptscriptstyle 1 & \scriptscriptstyle 4 & \scriptscriptstyle 6 \\
\scriptscriptstyle 2 & \scriptscriptstyle 5 & \scriptscriptstyle 7 & \none & \none \\
\scriptscriptstyle 3 & \none & \none & \none & \none \\
\end{ytableau}}
$
($1\widetilde{t}o 2\widetilde{t}o \cdots \widetilde{t}o 7$) shows an allowable path in a skew Young diagram.
Similar to K--Y MZVs, the above convoluted $S$- and $T$-values are all special cases of Schur MZVs modulo 2 corresponding to anti-hook type Young diagrams. The six convoluted $S$- or $T$-values in
\eqref{equ:schur1}-\eqref{equ:schur6} are all given by mod 2 Schur MZVs
$\zetaeta_{\gl_j^{\pi_j}}$ ($1\le j\le 6$) below, respectively:
$$
^{(A)}ligned
& \gl_1^{\pi_1}={ \ytableausetup{centertableaux, boxsize=1.8em}\begin{ytableau}
\none & \none & \none & \none & \widetilde{t}ikznode{a1}{~} \\
\none & \none & \none & \none & \widetilde{t}ikznode{a2}{~} \\
\none & \none & \none & \none & \vdots \\
\none & \none & \none & \none & \widetilde{t}ikznode{a3}{~} \\
\widetilde{t}ikznode{a5}{~} &\widetilde{t}ikznode{a4}{~}& \cdots & \widetilde{t}ikznode{a7}{~} & \widetilde{t}ikznode{a6}{~}
\end{ytableau}},\qquad
\gl_2^{\pi_2}={ \ytableausetup{centertableaux, boxsize=1.8em}\begin{ytableau}
\none & \none & \none & \none & \widetilde{t}ikznode{b1}{~} \\
\none & \none & \none & \none & \widetilde{t}ikznode{b2}{~} \\
\none & \none & \none & \none & \vdots \\
\none & \none & \none & \none & \widetilde{t}ikznode{b8}{~} \\
\widetilde{t}ikznode{b5}{~} &\widetilde{t}ikznode{b4}{~}& \cdots & \widetilde{t}ikznode{b6}{~} & \widetilde{t}ikznode{b7}{~}
\end{ytableau}},\qquad
\gl_3^{\pi_3}={ \ytableausetup{centertableaux, boxsize=1.8em}\begin{ytableau}
\none & \none & \none & \none & \widetilde{t}ikznode{c1}{~} \\
\none & \none & \none & \none & \widetilde{t}ikznode{c2}{~} \\
\none & \none & \none & \none & \vdots \\
\none & \none & \none & \none & \widetilde{t}ikznode{c3}{~} \\
\widetilde{t}ikznode{c4}{~} &\widetilde{t}ikznode{c5}{~} & \cdots & \widetilde{t}ikznode{c7}{~}& \widetilde{t}ikznode{c6}{~}
\end{ytableau}},\\
&\gl_4^{\pi_4}={ \ytableausetup{centertableaux, boxsize=1.8em}\begin{ytableau}
\none & \none & \none & \none & \widetilde{t}ikznode{d1}{~} \\
\none & \none & \none & \none & \widetilde{t}ikznode{d2}{~} \\
\none & \none & \none & \none & \vdots \\
\none & \none & \none & \none & \widetilde{t}ikznode{d8}{~} \\
\widetilde{t}ikznode{d4}{~} &\widetilde{t}ikznode{d5}{~} & \cdots &\widetilde{t}ikznode{d6}{~} & \widetilde{t}ikznode{d7}{~}
\end{ytableau}},\qquad
\gl_5^{\pi_5}={ \ytableausetup{centertableaux, boxsize=1.8em}\begin{ytableau}
\none & \none & \none & \none & \widetilde{t}ikznode{e1}{~} \\
\none & \none & \none & \none & \widetilde{t}ikznode{e2}{~} \\
\none & \none & \none & \none & \vdots \\
\none & \none & \none & \none & \widetilde{t}ikznode{e3}{~} \\
\widetilde{t}ikznode{e5}{~} &\widetilde{t}ikznode{e4}{~}& \cdots & \widetilde{t}ikznode{e7}{~} & \widetilde{t}ikznode{e6}{~}
\end{ytableau}},\qquad
\gl_6^{\pi_6}={ \ytableausetup{centertableaux, boxsize=1.8em}\begin{ytableau}
\none & \none & \none & \none & \widetilde{t}ikznode{f1}{~} \\
\none & \none & \none & \none & \widetilde{t}ikznode{f2}{~} \\
\none & \none & \none & \none & \vdots \\
\none & \none & \none & \none & \widetilde{t}ikznode{f8}{~} \\
\widetilde{t}ikznode{f4}{~} &\widetilde{t}ikznode{f5}{~} & \cdots & \widetilde{t}ikznode{f6}{~}& \widetilde{t}ikznode{f7}{~}
\end{ytableau}}
\endaligned
$$
\widetilde{t}ikz[overlay,remember picture]{
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]a1.north west) --
([yshift=0.5em,xshift=-0.5em]a1.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]a1.center) --
([yshift=0mm,xshift=0mm]a1.center) node[midway]{$k_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]a2.north west) --
([yshift=0.5em,xshift=-0.5em]a2.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]a2.center) --
([yshift=0mm,xshift=0mm]a2.center) node[midway]{$k_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]a3.north west) --
([yshift=0.5em,xshift=-0.5em]a3.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]a3.center) --
([yshift=0mm,xshift=0mm]a3.center) node[midway]{$k_{m'}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]a4.north west) --
([yshift=0.5em,xshift=-0.5em]a4.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]a4.center) --
([yshift=0mm,xshift=0mm]a4.center) node[midway]{$l_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]a5.north west) --
([yshift=0.5em,xshift=-0.5em]a5.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]a5.center) --
([yshift=0mm,xshift=0mm]a5.center) node[midway]{$l_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]a6.north west) --
([yshift=0.5em,xshift=-0.5em]a6.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]a6.center) --
([yshift=0mm,xshift=0mm]a6.center) node[midway]{$x_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]a7.north west) --
([yshift=0.5em,xshift=-0.5em]a7.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]a7.center) --
([yshift=0mm,xshift=0mm]a7.center) node[midway]{$l_{p'}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]b1.north west) --
([yshift=0.5em,xshift=-0.5em]b1.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]b1.center) --
([yshift=0mm,xshift=0mm]b1.center) node[midway]{$k_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]b2.north west) --
([yshift=0.5em,xshift=-0.5em]b2.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]b2.center) --
([yshift=0mm,xshift=0mm]b2.center) node[midway]{$k_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]b8.north west) --
([yshift=0.5em,xshift=-0.5em]b8.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]b8.center) --
([yshift=0mm,xshift=0mm]b8.center) node[midway]{$k_{m''}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]b4.north west) --
([yshift=0.5em,xshift=-0.5em]b4.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]b4.center) --
([yshift=0mm,xshift=0mm]b4.center) node[midway]{$l_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]b5.north west) --
([yshift=0.5em,xshift=-0.5em]b5.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]b5.center) --
([yshift=0mm,xshift=0mm]b5.center) node[midway]{$l_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]b6.north west) --
([yshift=0.5em,xshift=-0.5em]b6.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]b6.center) --
([yshift=0mm,xshift=0mm]b6.center) node[midway]{$l_{p''}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]b7.north west) --
([yshift=0.5em,xshift=-0.5em]b7.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]b7.center) --
([yshift=0mm,xshift=0mm]b7.center) node[midway]{$x_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]c1.north west) --
([yshift=0.5em,xshift=-0.5em]c1.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]c1.center) --
([yshift=0mm,xshift=0mm]c1.center) node[midway]{$k_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]c2.north west) --
([yshift=0.5em,xshift=-0.5em]c2.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]c2.center) --
([yshift=0mm,xshift=0mm]c2.center) node[midway]{$k_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]c3.north west) --
([yshift=0.5em,xshift=-0.5em]c3.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]c3.center) --
([yshift=0mm,xshift=0mm]c3.center) node[midway]{$k_{m'}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]c4.north west) --
([yshift=0.5em,xshift=-0.5em]c4.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]c4.center) --
([yshift=0mm,xshift=0mm]c4.center) node[midway]{$l_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]c5.north west) --
([yshift=0.5em,xshift=-0.5em]c5.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]c5.center) --
([yshift=0mm,xshift=0mm]c5.center) node[midway]{$l_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]c6.north west) --
([yshift=0.5em,xshift=-0.5em]c6.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]c6.center) --
([yshift=0mm,xshift=0mm]c6.center) node[midway]{$x_3$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]c7.north west) --
([yshift=0.5em,xshift=-0.5em]c7.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]c7.center) --
([yshift=0mm,xshift=0mm]c7.center) node[midway]{$l_{p''}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]d1.north west) --
([yshift=0.5em,xshift=-0.5em]d1.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]d1.center) --
([yshift=0mm,xshift=0mm]d1.center) node[midway]{$k_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]d2.north west) --
([yshift=0.5em,xshift=-0.5em]d2.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]d2.center) --
([yshift=0mm,xshift=0mm]d2.center) node[midway]{$k_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]d8.north west) --
([yshift=0.5em,xshift=-0.5em]d8.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]d8.center) --
([yshift=0mm,xshift=0mm]d8.center) node[midway]{$k_{m''}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]d4.north west) --
([yshift=0.5em,xshift=-0.5em]d4.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]d4.center) --
([yshift=0mm,xshift=0mm]d4.center) node[midway]{$l_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]d5.north west) --
([yshift=0.5em,xshift=-0.5em]d5.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]d5.center) --
([yshift=0mm,xshift=0mm]d5.center) node[midway]{$l_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]d6.north west) --
([yshift=0.5em,xshift=-0.5em]d6.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]d6.center) --
([yshift=0mm,xshift=0mm]d6.center) node[midway]{$l_{p'}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]d7.north west) --
([yshift=0.5em,xshift=-0.5em]d7.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]d7.center) --
([yshift=0mm,xshift=0mm]d7.center) node[midway]{$x_4$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]e1.north west) --
([yshift=0.5em,xshift=-0.5em]e1.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]e1.center) --
([yshift=0mm,xshift=0mm]e1.center) node[midway]{$k_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]e2.north west) --
([yshift=0.5em,xshift=-0.5em]e2.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]e2.center) --
([yshift=0mm,xshift=0mm]e2.center) node[midway]{$k_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]e3.north west) --
([yshift=0.5em,xshift=-0.5em]e3.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]e3.center) --
([yshift=0mm,xshift=0mm]e3.center) node[midway]{$k_{m'}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]e4.north west) --
([yshift=0.5em,xshift=-0.5em]e4.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]e4.center) --
([yshift=0mm,xshift=0mm]e4.center) node[midway]{$l_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]e5.north west) --
([yshift=0.5em,xshift=-0.5em]e5.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]e5.center) --
([yshift=0mm,xshift=0mm]e5.center) node[midway]{$l_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]e6.north west) --
([yshift=0.5em,xshift=-0.5em]e6.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]e6.center) --
([yshift=0mm,xshift=0mm]e6.center) node[midway]{$x_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]e7.north west) --
([yshift=0.5em,xshift=-0.5em]e7.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]e7.center) --
([yshift=0mm,xshift=0mm]e7.center) node[midway]{$l_{p'}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]f1.north west) --
([yshift=0.5em,xshift=-0.5em]f1.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]f1.center) --
([yshift=0mm,xshift=0mm]f1.center) node[midway]{$k_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]f2.north west) --
([yshift=0.5em,xshift=-0.5em]f2.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]f2.center) --
([yshift=0mm,xshift=0mm]f2.center) node[midway]{$k_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]f8.north west) --
([yshift=0.5em,xshift=-0.5em]f8.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]f8.center) --
([yshift=0mm,xshift=0mm]f8.center) node[midway]{$k_{m''}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]f4.north west) --
([yshift=0.5em,xshift=-0.5em]f4.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]f4.center) --
([yshift=0mm,xshift=0mm]f4.center) node[midway]{$l_2$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]f5.north west) --
([yshift=0.5em,xshift=-0.5em]f5.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]f5.center) --
([yshift=0mm,xshift=0mm]f5.center) node[midway]{$l_1$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]f6.north west) --
([yshift=0.5em,xshift=-0.5em]f6.north west) node[midway]{${}^\widetilde{t}ext{1}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]f6.center) --
([yshift=0mm,xshift=0mm]f6.center) node[midway]{$l_{p''}$};
\draw[decorate,decoration={brace},thick] ([yshift=0.5em,xshift=-0.5em]f7.north west) --
([yshift=0.5em,xshift=-0.5em]f7.north west) node[midway]{${}^\widetilde{t}ext{0}$};
\draw[decorate,decoration={brace},thick] ([yshift=0mm,xshift=0mm]f7.center) --
([yshift=0mm,xshift=0mm]f7.center) node[midway]{$x_2$};
}
where $m'=2m-1,m''=2m-2,p'=2p-1,p''=2p-2$, $x_1=k_{2m}+l_{2p}$, $x_2=k_{2m-1}+l_{2p-1}$, $x_3=k_{2m}+l_{2p-1}$, and $x_4=k_{2m-1}+l_{2p}$.
The primary goals of this paper are to study the explicit relations of K--Y MZVs $\zetaeta(\bfk\circledast{\bfl^\star})$ and their related variants, such as $T$-variants $T(\bfk\circledast{\bfl})$. Then using these explicit relations, we establish some explicit formulas of multiple zeta (star) values and their related variants.
The remainder of this paper is organized as follows. In Section \ref{sec2}, we first establish the explicit evaluations of integrals $\int_0^1 x^{n-1}{\rm Li}_{\bfk}(x)\,dx$ and $\int_0^1 x^{2n+b}{\rm A}(\bfk;x)\,dx$ for all positive integers $n$ and $b\in\{-1,-2\}$, where ${\rm Li}_{\bfk}(x)$ is the single-variable multiple polylogarithm (see \eqref{equ:singleLi})
and ${\rm A}(\bfk;x)$ is the Kaneko--Tsumura A-function (see \eqref{equ:defnA}). Then, for all compositions $\bfk$ and $\bfl$, using these explicit formulas obtained and by considering the two kind of integrals
\[I_L(\bfk;\bfl):=\int_0^1 \frac{{\rm Li}_{\bfk}(x){\rm Li}_{\bfl}(x)}{x}\,dx\quad\widetilde{t}ext{and}\quad I_A(\bfk;\bfl):=\int_0^1 \frac{{\rm A}(\bfk;x){\rm A}(\bfl;x)}{x} \, dx,\]
we establish some explicit relations of $\zetaeta(\bfk\circledast{\bfl^\star})$ and $T(\bfk\circledast{\bfl})$. Further, we express the integrals $I_L(\bfk;\bfl)$ and $I_A(\bfk;\bfl)$ in terms of multiple integrals associated with 2-labeled posets following the idea of Yamamoto \cite{Y2014}.
In Section \ref{sec3}, we first define a variation of the classical multiple polylogarithm function with $r$-variable
$\gl_{\bfk}(x_1,x_2,\dotsc,x_r)$ (see \eqref{equ:gl}), and give the explicit evaluation of the integral
$$ \int_0^1 x^{n-1} \gl_{\bfk}(\sigma_1x,\sigma_2x,\dotsc,\sigma_rx)\,dx, \quad \sigma_j\in\{\pm 1\}.$$
Then we will consider the integral
\[I_\gl((\bfk;{\boldsymbol{\sl{s}}}i),(\bfl;\bfeps)):=
\int_0^1 \frac{\gl_{\bfk_r}(\sigma_1x,\dotsc,\sigma_rx)\gl_{\bfl_s}(\varepsilon_1x,\dotsc,\varepsilon_sx)}{x}\,dx
\]
to find some explicit relations of alternating Kaneko--Yamamoto MZVs $\zeta((\bfk;{\boldsymbol{\sl{s}}}i)\circledast(\bfl;\bfeps)^\star)$. Further, we will find some relations involving alternating MZVs. Finally, we express the integrals $I_\gl((\bfk;{\boldsymbol{\sl{s}}}i),(\bfl;\bfeps))$ in terms of multiple integrals associated with 3-labeled posets.
In Section \ref{sec4}, we define the multiple $t$-harmonic (star) sums and the function $t(\bfk;x)$ related to multiple $t$-values. Further, we establish some relations involving multiple $t$-star values.
\section{Formulas of Kaneko--Yamamoto MZVs and $T$-Variants}\label{sec2}
In this section we will prove several explicit formulas of Kaneko--Yamamoto MZVs and $T$-variants, and find some explicit relations among MZ(S)Vs and MTVs.
\sum\limits_{n=1}^\inftybsection{Some relations of Kaneko--Yamamoto MZVs}
\begin{thm}\label{Thm1} Let $r,n\in \mathbb{N}$ and ${\bfk}_r:=(k_1,\dotsc,k_r)\in \mathbb{N}^r$. Then
\begin{align}\label{a1}
\int_0^1 x^{n-1}{\rm Li}_{{\bfk}_r}(x)dx&=\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1} \frac{(-1)^{j-1}}{n^j}\zeta\left({\bfk}_{r-1},k_r+1-j\right)+\frac{(-1)^{|{\bfk}_r|-r}}{n^{k_r}}\zeta^\star_n\left(1,{\bfk}_{r-1}\right)\nonumber\\
&\quad+\sum\limits_{n=1}^\inftym_{l=1}^{r-1} (-1)^{|{\bfk}_r^l|-l} \sum\limits_{n=1}^\inftym_{j=1}^{k_{r-l}-1}\frac{(-1)^{j-1}}{n^{k_r}} \zeta^\star_n\left(j,{\bfk}_{r-1}^{l-1}\right)\zeta\left({\bfk}_{r-l-1},k_{r-l}+1-j\right),
\end{align}
where ${{\rm Li}}_{{{k_1},\dotsc,{k_r}}}(z)$ is the single-variable multiple polylogarithm function defined by
\begin{align}\label{equ:singleLi}
&{{\rm Li}}_{{{k_1},\dotsc,{k_r}}}(z): = \sum\limits_{n=1}^\inftym\limits_{0< {n_1} < \cdots < {n_r}} {\frac{{{z^{{n_r}}}}}{{n_1^{{k_1}}\cdots n_r^{{k_r}}}}},\quad z \in \left[ { - 1,1} \right).
\end{align}
\end{thm}
\begin{proof}
It's well known that multiple polylogarithms can be expressed by the iterated integral
\begin{align*}
{\rm Li}_{k_1,\dotsc,k_r}(x)=\int_0^x \frac{dt}{1-t}\left(\frac{dt}{t}\right)^{k_1-1} \dotsm\frac{dt}{1-t}\left(\frac{dt}{t}\right)^{k_r-1},
\end{align*}
where for 1-forms $\ga_1(t)=f_1(t)\, dt,\dotsc,\ga_\ell(t)=f_\ell(t)\, dt$, we define iteratively
\begin{equation*}
\int_a^b \ga_1(t) \cdots \ga_\ell(t) = \int_a^b \left(\int_a^y \ga_1(t)\cdots\ga_{\ell-1}(t)\right) f_\ell(y)\, dy.
\end{equation*}
Using integration by parts, we deduce the recurrence relation
\[
\int_0^1 x^{n-1}{\rm Li}_{{\bfk}_r}(x)dx=\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1} \frac{(-1)^{j-1}}{n^j}\zeta({\bfk}_{r-1},k_r+1-j)+\frac{(-1)^{k_r-1}}{n^{k_r}}\sum\limits_{n=1}^\inftym_{j=1}^n \int_0^1 x^{j-1}{\rm Li}_{{\bfk}_{r-1}}(x)dx.
\]
Thus, we arrive at the desired formula by a direct calculation.
\end{proof}
For any string $\{s_1,\dots,s_d\}$ and $r\in \mathbb{N}$, we denote by $\{s_1,\dots,s_d\}_r$
the concatenated string obtained by repeating $\{s_1,\dots,s_d\}$ exactly $r$ times.
\begin{cor}\label{cor-I2}\emph{(cf. \cite{Xu2017})} For positive integers $n$ and $r$,
\begin{align*}
\int_0^1 x^{n-1}\log^r(1-x)dx=(-1)^rr!\frac{\zeta^\star_n(\{1\}_{r})}{n}.
\end{align*}
\end{cor}
For any nontrivial compositions $\bfk$ and $\bfl$,
we consider the integral
\[
I_L(\bfk;\bfl):=\int_0^1 \frac{{\rm Li}_{\bfk}(x){\rm Li}_{\bfl}(x)}{x}\,dx
\]
and use \eqref{a1} to find some explicit relations of K--Y MZVs. We prove the following theorem.
\begin{thm}\label{thm-KY} For compositions ${\bfk}_r=(k_1,\dotsc,k_r)\in \mathbb{N}^r$ and ${\bfl}_s=(l_1,l_2,\dotsc,l_s)\in \mathbb{N}^s$,
\begin{align}\label{a2}
&\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1} (-1)^{j-1}\zeta\left({\bfk}_{r-1},k_r+1-j \right)\zeta\left({\bfl}_{s-1},l_s+j\right)+(-1)^{|{\bfk}_r|-r}\zeta\left({\bfl}_s\circledast^{(B)}ig(1,{\bfk}_r^{(B)}ig)^\star\right)\nonumber\\
&+\sum\limits_{n=1}^\inftym_{i=1}^{r-1} (-1)^{|{\bfk}_r^i|-i}\sum\limits_{n=1}^\inftym_{j=1}^{k_{r-i}-1}(-1)^{j-1} \zeta\left({\bfk}_{r-i-1},k_{r-i}+1-j\right)\zeta\left({\bfl}_s\circledast^{(B)}ig(j,{\bfk}_r^i^{(B)}ig)^\star\right)\nonumber\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{l_s-1} (-1)^{j-1}\zeta\left({\bfl}_{s-1},l_s+1-j \right)\zeta\left({\bfk}_{r-1},k_r+j\right)+(-1)^{|{\bfl}_s|-s}\zeta\left({\bfk}_r\circledast^{(B)}ig(1,{\bfl}_s^{(B)}ig)^\star\right)\nonumber\\
&\quad+\sum\limits_{n=1}^\inftym_{i=1}^{s-1} (-1)^{|{\bfl}_s^i|-i}\sum\limits_{n=1}^\inftym_{j=1}^{l_{s-i}-1}(-1)^{j-1} \zeta\left({\bfl}_{s-i-1},l_{s-i}+1-j\right)\zeta\left({\bfk}_r\circledast^{(B)}ig(j,{\bfl}_s^i^{(B)}ig)^\star\right).
\end{align}
\end{thm}
\begin{proof}
According to the definition of multiple polylogarithm, we have
\begin{align*}
\int_0^1 \frac{{\rm Li}_{\bfk_r}(x){\rm Li}_{\bfl_s}(x)}{x}&= \sum\limits_{n=1}^\infty \frac{\zeta_{n-1}(\bfl_{s-1})}{n^{l_s}} \int_0^1 x^{n-1} {\rm Li}_{\bfk_r}(x)dx\\
&= \sum\limits_{n=1}^\infty \frac{\zeta_{n-1}(\bfk_{r-1})}{n^{k_r}} \int_0^1 x^{n-1} {\rm Li}_{{\bfl}_s}(x)dx
\end{align*}
Then using \eqref{a1} with a direct calculation, we may deduce the desired evaluation.
\end{proof}
The formula in Theorem \ref{thm-KY} seems to be related to the harmonic product of Schur MZVs of anti-hook type
in \cite[Theorem 3.2]{MatsumotoNakasuji2020} and the general harmonic product
formula in \cite[Lemma 2.2]{BachmannYamasaki2018}. However, it does not seem to follow
from them easily.
As a special case, setting $r=2,s=1$ in \eqref{a2} and noting the fact that
\[\zeta(l_1\circledast(1,k_1,k_2)^\star)=\zeta^\star(1,k_1,k_2+l_1)\]
and \[\zeta(l_1\circledast(j,k_2)^\star)=\zeta^\star(j,l_1+k_2)\]
we find that
\begin{align}\label{a3}
&\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1}(-1)^{j-1} \zeta(k_1,k_2+1-j)\zeta(l_1+j)+(-1)^{k_1+k_2}\zeta^\star (1,k_1,k_2+l_1)\nonumber\\
&+(-1)^{k_2-1}\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1} (-1)^{j-1} \zeta(k_1+1-j)\zeta^\star(j,l_1+k_2)\nonumber\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{l_1-1}(-1)^{j-1}\zeta(l_1+1-j)\zeta(k_1,k_2+j)+(-1)^{l_1-1}\zeta((k_1,k_2)\circledast(1,l_1)^\star).
\end{align}
On the other hand, from the definition of K--Y MZVs, it is easy to find that
\[\zeta((k_1,k_2)\circledast(1,l_1)^\star)=\zeta^\star(k_1,1,k_2+l_1)+\zeta^\star(1,k_1,k_2+l_1)-\zeta^\star(k_1+1,k_2+l_1)-\zeta^\star(1,k_1+k_2+l_1).\]
Hence, we can get the following corollary.
\begin{cor} For positive integers $k_1,k_2$ and $l_1$,
\begin{align}\label{a4}
&((-1)^{l_1-1}+(-1)^{k_1+k_2-1}) \zeta^\star(1,k_1,k_2+l_1)+(-1)^{l_1-1}\zeta^\star(k_1,1,k_2+l_1)\nonumber\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1}(-1)^{j-1} \zeta(k_1,k_2+1-j)\zeta(l_1+j)-(-1)^{k_2}\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1} (-1)^{j-1} \zeta(k_1+1-j)\zeta^\star(j,l_1+k_2)\nonumber\\
&\quad-\sum\limits_{n=1}^\inftym_{j=1}^{l_1-1}(-1)^{j-1}\zeta(l_1+1-j)\zeta(k_1,k_2+j)+(-1)^{l_1-1}\zeta^\star(k_1+1,k_2+l_1)\nonumber\\&\quad+(-1)^{l_1-1}\zeta^\star(1,k_1+k_2+l_1).
\end{align}
\end{cor}
Next, we establish an identity involving
\emph{Arakawa--Kaneko zeta function} (see \cite{AM1999}) which is defined by
\begin{align}
\xi(k_1,\dotsc,k_r;s):=\frac{1}{\Gamma(s)} \int\limits_{0}^\infty \frac{t^{s-1}}{e^t-1}\overrightarrow{\bfl}dLi_{k_1,\dotsc,k_r}(1-e^{-t})dt\quad (\mathbb{R}}\def\pa{\partiale(s)>0).
\end{align}
Setting variables $1-e^{-t}=x$ and $s=p+1\in \mathbb{N}$, we deduce
\begin{align*}
\xi(k_1,\dotsc,k_r;p+1)&=\frac{(-1)^{p}}{p!}\int\limits_{0}^1 \frac{\log^{p}(1-x){\mathrm{Li}}_{{{k_1},{k_2}, \cdots ,{k_r}}}\left( x \right)}{x}dx\\
&=\sum\limits_{n=1}^\inftym\limits_{n=1}^\infty \frac{\zeta_{n-1}(k_1,\dotsc,k_{r-1})\zeta^\star_n(\{1\}_{p})}{n^{k_r+1}}=\zetaeta({\bfk}\circledast (\{1\}_{p+1})^\star),
\end{align*}
where we have used Corollary \ref{cor-I2}. Clearly, the Arakawa--Kaneko zeta value is a special case of integral $I_L(\bfk;\bfl)$.
Further, setting $l_1=l_2=\cdots=l_s=1$ in Theorem \ref{thm-KY} yields
\begin{align*}
&\xi(k_1,\dotsc,k_r;s+1)=\zetaeta({\bfk}\circledast (\{1\}_{s+1})^\star)\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1} (-1)^{j-1}\zeta\left({\bfk}_{r-1},k_r+1-j \right)\zeta\left(\{1\}_{s-1},1+j\right)+(-1)^{|{\bfk}_r|-r}\zeta\left(\{1\}_s\circledast^{(B)}ig(1,{\bfk}^{(B)}ig)^\star\right)\nonumber\\
&\quad+\sum\limits_{n=1}^\inftym_{i=1}^{r-1} (-1)^{|{\bfk}_r^i|-i}\sum\limits_{n=1}^\inftym_{j=1}^{k_{r-i}-1}(-1)^{j-1} \zeta\left({\bfk}_{r-i-1},k_{r-i}+1-j\right)\zeta\left(\{1\}_s\circledast^{(B)}ig(j,{\bfk}_r^i^{(B)}ig)^\star\right).
\end{align*}
We end this section by the following theorem and corollary.
\begin{thm} For any positive integer $m$ and composition $\bfk=(k_1,\dotsc,k_r)$,
\begin{align}\label{czt}
&2\sum\limits_{n=1}^\inftym_{j=0}^{m-1} {\bar \zetaeta}(2m-1-2j) \sum\limits_{n=1}^\infty \frac{\zeta_{n-1}(\bfk_{r-1})T_n(\{1\}_{2j+1})}{n^{k_r+1}}+\sum\limits_{n=1}^\infty \frac{\zeta_{n-1}(\bfk_{r-1})S_n(\{1\}_{2m})}{n^{k_r+1}}\nonumber\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1} (-1)^{j-1}2^j \zeta(\bfk_{r-1},k_r+1-j)T(\{1\}_{2m-1},j+1)+(-1)^{|\bfk|-r}\sum\limits_{n=1}^\infty \frac{T_n(\{1\}_{2m-1})\zeta^\star_n(1,\bfk_{r-1})}{n^{k_r+1}}\nonumber\\
&\quad+\sum\limits_{n=1}^\inftym_{l=1}^{r-1} (-1)^{|\bfk_r^l|-l}\sum\limits_{n=1}^\inftym_{j=1}^{k_{r-l}-1}(-1)^{j-1} \zeta(\bfk_{r-l-1},k_{r-l}+1-j)\sum\limits_{n=1}^\infty \frac{T_n(\{1\}_{2m-1})\zeta^\star_n^{(B)}ig(j,\bfk_{r-1}^{l-1}^{(B)}ig)}{n^{k_r+1}}.
\end{align}
\end{thm}
\begin{proof}
On the one hand, in \cite[Theorem 3.6]{XZ2020}, we proved that
\begin{align*}
\int_{0}^1 \frac{1}{x}\cdot \overrightarrow{\bfl}dLi_{\bfk}(x^2)\log^{2m}\left(\frac{1-x}{1+x} \right)\, dx=\frac{(2m)!}{2}\widetilde{t}imes[\widetilde{t}ext{The left-hand side of \eqref{czt}}].
\end{align*}
On the other hand, we note that
\begin{align*}
\int_{0}^1 \frac{1}{x}\cdot \overrightarrow{\bfl}dLi_{\bfk}(x^2)\log^{2m}\left(\frac{1-x}{1+x} \right)\, dx&=(2m)!\sum\limits_{n=1}^\infty \frac{T_n(\{1\}_{2m-1})}{n} \int_0^1 x^{2n-1} {\rm Li}_{\bfk}(x^2)dx\\
&=(2m)!\sum\limits_{n=1}^\infty \frac{T_n(\{1\}_{2m-1})}{2n} \int_0^1 x^{n-1} {\rm Li}_{\bfk}(x)dx.
\end{align*}
Then using (\ref{a1}) with an elementary calculation, we have
\begin{align*}
\int_{0}^1 \frac{1}{x}\cdot \overrightarrow{\bfl}dLi_{\bfk}(x^2)\log^{2m}\left(\frac{1-x}{1+x} \right)\, dx=\frac{(2m)!}{2}\widetilde{t}imes[\widetilde{t}ext{The right-hand side of \eqref{czt}}].
\end{align*}
Thus, formula \eqref{czt} holds.
\end{proof}
In particular, setting $\bfk=(\{1\}_{r-1},k)$ we obtain \cite[Theorem 3.9]{XZ2020}. Setting $\bfk=(\{2\}_{r-1},k)$ we get the following corollary.
\begin{cor} For any positive integers $k,m$ and $r$,
\begin{multline} \label{cztb}
2\sum\limits_{n=1}^\inftym_{j=0}^{m-1} {\bar \zetaeta}(2m-1-2j) \sum\limits_{n=1}^\infty \frac{\zeta_{n-1}(\{2\}_{r-1})T_n(\{1\}_{2j+1})}{n^{k+1}}+\sum\limits_{n=1}^\infty \frac{\zeta_{n-1}(\{2\}_{r-1})S_n(\{1\}_{2m})}{n^{k+1}} \\
=\sum\limits_{n=1}^\inftym_{j=1}^{k-1} (-1)^{j-1}2^j \zeta(\{2\}_{r-1},k+1-j)T(\{1\}_{2m-1},j+1) \\
+\sum\limits_{n=1}^\inftym_{l=1}^{r} (-1)^{l+k} \zeta(\{2\}_{r-l})\sum\limits_{n=1}^\infty \frac{T_n(\{1\}_{2m-1})\zeta^\star_n(j,\{2\}_{l-1})}{n^{k+1}}.
\end{multline}
\end{cor}
\sum\limits_{n=1}^\inftybsection{Some relations of $T$-varinat of Kaneko--Yamamoto MZVs}
Recall that the Kaneko--Tsumura A-function ${{\rm A}}(k_1,\dotsc,k_r;z)$ (see \cite{KanekoTs2018b}) is defined by
\begin{align}\label{equ:defnA}
&{{\rm A}}(k_1,\dotsc,k_r;z): = 2^r\sum\limits_{n=1}^\inftym\limits_{1 \le {n_1} < \cdots < {n_r}^{(A)}top n_i\equiv i\ {\rm mod}\ 2} {\frac{{{z^{{n_r}}}}}{{n_1^{{k_1}} \cdots n_r^{{k_r}}}}},\quad z \in \left[ { - 1,1} \right).
\end{align}
In this subsection, we present a series of results concerning this function.
\begin{thm} For positive integers $m$ and $n$,
\begin{align}
&\int_0^1 x^{2n-2} {\rm A}(\bfk_{2m};x)\, dx=\sum\limits_{n=1}^\inftym_{j=1}^{\bfk_{2m}-1} \frac{(-1)^{j-1}}{(2n-1)^j} T(\bfk_{2m-1},k_{2m}+1-j)+\frac{(-1)^{|\bfk_{2m}|}}{(2n-1)^{k_{2m}}}T_n(1,\bfk_{2m-1})\nonumber\\
&\quad+\frac{1}{(2n-1)^{k_{2m}}}\sum\limits_{n=1}^\inftym_{i=1}^{m-1} (-1)^{|\bfk_{2m}^{2i}|} \sum\limits_{n=1}^\inftym_{j=1}^{k_{2m-2i}-1} (-1)^{j-1} T(\bfk_{2m-2i-1},k_{2m-2i}+1-j)T_n(j,\bfk_{2m-1}^{2i-1})\nonumber\\
&\quad-\frac{1}{(2n-1)^{k_{2m}}}\sum\limits_{n=1}^\inftym_{i=0}^{m-1} (-1)^{|\bfk_{2m}^{2i+1}|} \sum\limits_{n=1}^\inftym_{j=1}^{k_{2m-2i-1}-1} (-1)^{j-1} T(\bfk_{2m-2i-2},k_{2m-2i-1}+1-j)S_n(j,\bfk_{2m-1}^{2i})\nonumber\\
&\quad -\frac{1}{(2n-1)^{k_{2m}}} \sum\limits_{n=1}^\inftym_{i=0}^{m-1} (-1)^{|\bfk_{2m}^{2i+1}|} \left(\int_0^1 {\rm A}(\bfk_{2m-2i-1},1;x)dx\right) T_n(\bfk_{2m-1}^{2i}),\label{a5}\\
&\int_0^1 x^{2n-1} {\rm A}(\bfk_{2m+1};x)\, dx=\sum\limits_{n=1}^\inftym_{j=1}^{\bfk_{2m+1}-1} \frac{(-1)^{j-1}}{(2n)^j} T(\bfk_{2m},k_{2m+1}+1-j)-\frac{(-1)^{|\bfk_{2m+1}|}}{(2n)^{k_{2m+1}}}T_n(1,\bfk_{2m})\nonumber\\
&\quad-\frac{1}{(2n)^{k_{2m+1}}}\sum\limits_{n=1}^\inftym_{i=0}^{m-1} (-1)^{|\bfk_{2m+1}^{2i+1}|} \sum\limits_{n=1}^\inftym_{j=1}^{k_{2m-2i}-1} (-1)^{j-1} T(\bfk_{2m-2i-1},k_{2m-2i}+1-j)T_n(j,\bfk_{2m}^{2i})\nonumber\\
&\quad+\frac{1}{(2n)^{k_{2m+1}}}\sum\limits_{n=1}^\inftym_{i=0}^{m-1} (-1)^{|\bfk_{2m+1}^{2i+2}|} \sum\limits_{n=1}^\inftym_{j=1}^{k_{2m-2i-1}-1} (-1)^{j-1} T(\bfk_{2m-2i-2},k_{2m-2i-1}+1-j)S_n(j,\bfk_{2m}^{2i+1})\nonumber\\
&\quad +\frac{1}{(2n)^{k_{2m+1}}} \sum\limits_{n=1}^\inftym_{i=0}^{m-1} (-1)^{|\bfk_{2m+1}^{2i+2}|} \left(\int_0^1 {\rm A}(\bfk_{2m-2i-1},1;x)dx\right) T_n(\bfk_{2m}^{2i+1}),\label{a6}\\
&\int_0^1 x^{2n-2} {\rm A}(\bfk_{2m+1};x)\,dx=\sum\limits_{n=1}^\inftym_{j=1}^{\bfk_{2m+1}-1} \frac{(-1)^{j-1}}{(2n-1)^j} T(\bfk_{2m},k_{2m+1}+1-j)-\frac{(-1)^{|\bfk_{2m+1}|}}{(2n-1)^{k_{2m+1}}}S_n(1,\bfk_{2m})\nonumber\\
&\quad+\frac{1}{(2n-1)^{k_{2m+1}}}\sum\limits_{n=1}^\inftym_{i=1}^{m} (-1)^{|\bfk_{2m+1}^{2i}|} \sum\limits_{n=1}^\inftym_{j=1}^{k_{2m+1-2i}-1} (-1)^{j-1} T(\bfk_{2m-2i},k_{2m+1-2i}+1-j)T_n(j,\bfk_{2m}^{2i-1})\nonumber\\
&\quad-\frac{1}{(2n-1)^{k_{2m+1}}}\sum\limits_{n=1}^\inftym_{i=0}^{m-1} (-1)^{|\bfk_{2m+1}^{2i+1}|} \sum\limits_{n=1}^\inftym_{j=1}^{k_{2m-2i}-1} (-1)^{j-1} T(\bfk_{2m-2i-1},k_{2m-2i}+1-j)S_n(j,\bfk_{2m}^{2i})\nonumber\\
&\quad -\frac{1}{(2n-1)^{k_{2m+1}}} \sum\limits_{n=1}^\inftym_{i=0}^{m} (-1)^{|\bfk_{2m+1}^{2i+1}|} \left(\int_0^1 {\rm A}(\bfk_{2m-2i},1;x)dx\right) T_n(\bfk_{2m}^{2i}),\label{a7}\\
&\int_0^1 x^{2n-1} {\rm A}(\bfk_{2m};x)\, dx=\sum\limits_{n=1}^\inftym_{j=1}^{\bfk_{2m}-1} \frac{(-1)^{j-1}}{(2n)^j} T(\bfk_{2m-1},k_{2m}+1-j)+\frac{(-1)^{|\bfk_{2m}|}}{(2n)^{k_{2m}}}S_n(1,\bfk_{2m-1})\nonumber\\
&\quad-\frac{1}{(2n)^{k_{2m}}}\sum\limits_{n=1}^\inftym_{i=1}^{m} (-1)^{|\bfk_{2m}^{2i-1}|} \sum\limits_{n=1}^\inftym_{j=1}^{k_{2m+1-2i}-1} (-1)^{j-1} T(\bfk_{2m-2i},k_{2m+1-2i}+1-j)T_n(j,\bfk_{2m-1}^{2i-2})\nonumber\\
&\quad+\frac{1}{(2n)^{k_{2m}}}\sum\limits_{n=1}^\inftym_{i=1}^{m-1} (-1)^{|\bfk_{2m}^{2i}|} \sum\limits_{n=1}^\inftym_{j=1}^{k_{2m-2i}-1} (-1)^{j-1} T(\bfk_{2m-2i-1},k_{2m-2i}+1-j)S_n(j,\bfk_{2m-1}^{2i-1})\nonumber\\
&\quad +\frac{1}{(2n)^{k_{2m}}} \sum\limits_{n=1}^\inftym_{i=1}^{m} (-1)^{|\bfk_{2m}^{2i}|} \left(\int_0^1 {\rm A}(\bfk_{2m-2i},1;x)dx\right) T_n(\bfk_{2m-1}^{2i-1}),\label{a8}
\end{align}
where we allow $m=0$ in \eqref{a6} and \eqref{a7}.
\end{thm}
\begin{proof}
It is easy to see that the A-function can be expressed by an iterated integral:
\begin{align*}
{\rm A}(k_1,\dotsc,k_r;x)=\int_0^x \frac{2dt}{1-t^2}\left(\frac{dt}{t}\right)^{k_1-1}
\cdots\frac{2dt}{1-t^2}\left(\frac{dt}{t}\right)^{k_r-1}.
\end{align*}
Using integration by parts, we deduce the recurrence relation
\begin{align*}
\int_0^1 x^{2n-2} {\rm A}(\bfk_r;x)\, dx&=\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1}\frac{(-1)^{j-1}}{(2n-1)^j} T(\bfk_{r-1},k_r+1-j)+\frac{(-1)^{k_r-1}}{(2n-1)^{k_r}} \int_0^1 {\rm A}(\bfk_{r-1},1;x)\, dx\\
&\quad+\frac{(-1)^{k_r-1}}{(2n-1)^{k_r}}2\sum\limits_{n=1}^\inftym_{k=1}^{n-1} \int_0^1 x^{2k-1} {\rm A}(\bfk_{r-1};x)\, dx,
\end{align*}
and
\begin{align*}
\int_0^1 x^{2n-1} {\rm A}(\bfk_r;x) \, dx&=\sum\limits_{n=1}^\inftym_{j=0}^{k_r-2}\frac{(-1)^{j}}{(2n)^{j+1}} T(\bfk_{r-1},k_r-j)+\frac{(-1)^{k_r-1}}{(2n)^{k_r}}2\sum\limits_{n=1}^\inftym_{k=1}^{n} \int_0^1 x^{2k-2} {\rm A}(\bfk_{r-1};x)\, dx.
\end{align*}
Hence, using the recurrence formulas above, we may deduce the four desired evaluations
after an elementary but rather tedious computation, which we leave to the interested reader.
\end{proof}
\begin{lem}\label{equ:Aones}
For any positive integer $r$ we have
\begin{equation*}
\int_0^1 {\rm A}(\{1\}_{r};x) \, dx = -2^{1-r} \zetaeta(\bar r)=
\left\{
\begin{array}{ll}
\phantom{\frac12} \log 2, & \hbox{if $r=1$;} \\
2^{1-r}(1-2^{1-r}) \zetaeta(r), \qquad \ & \hbox{if $r\ge 2$.}
\end{array}
\right.
\end{equation*}
\end{lem}
\begin{proof}
Consider the generating function
\begin{equation*}
G(u):=1+\sum\limits_{n=1}^\inftym_{r=1}^\infty \left(\int_0^1 {\rm A}(\{1\}_{r};x) \, dx \right) (-2u)^r.
\end{equation*}
By definition
\begin{align*}
G(u) =\, & 1+\sum\limits_{n=1}^\inftym_{r=1}^\infty (-2u)^r \int_0^1 \int_0^{x} \left(\frac{dt}{1-t^2} \right)^r \, dx \\\
=\, & 1+\sum\limits_{n=1}^\inftym_{r=1}^\infty \frac{(-2u)^r}{r!} \int_0^1 \left( \int_0^{x} \frac{dt}{1-t^2} \right)^r \, dx \\
=\, & 1+\int_0^1 \left( \sum\limits_{n=1}^\inftym_{r=1}^\infty \frac{1}{r!} \left(-u\log \left(\frac{1+x}{1-x}\right)\right)^r \right) \, dx \\
=\, & \int_0^1 \left(\frac{1-x}{1+x}\right)^{u} \, dx .
\end{align*}
Taking $a=u,b=1,c=u+2$ and $t=-1$ in the formula
\begin{equation*}
{}_2F_1\left(\left.{
a,b ^{(A)}top c}\right|t \right)=\frac{\Gamma(c)}{\Gamma(b)\Gamma(c-b)}
\int_0^1 v^{b-1} (1-v)^{c-b-1} (1-vt)^{-a} \,dv,
\end{equation*}
we obtain
\begin{align*}
G(u)=\, &\frac{1}{u+1} \sum\limits_{n=1}^\inftym_{k\ge 0} \frac{u(u+1)}{(u+k)(u+k+1)} (-1)^k \\
=\, &u\sum\limits_{n=1}^\inftym_{k\ge0} (-1)^k \left(\frac{1}{u+k}-\frac{1}{u+k+1}\right)\\
=\, & 1+\sum\limits_{n=1}^\inftym_{k\ge 1} 2 (-1)^k \frac{u}{u+k} \\
=\, & 1-\sum\limits_{n=1}^\inftym_{k\ge 1} 2 (-1)^k \sum\limits_{n=1}^\inftym_{r\ge 0} \left(\frac{-u}{k}\right)^{r+1} \\
=\, & 1-2 \sum\limits_{n=1}^\inftym_{r\ge 1} \zetaeta(\bar r)(-u)^r.
\end{align*}
The lemma follows immediately.
\end{proof}
\begin{thm}\label{thm-IA}
For composition $\bfk=(k_1,\dotsc,k_r)$, the integral
\[\int_0^1 {\rm A}(k_1,\dotsc,k_r,1;x)dx\]
can be expressed as a $\mathbb{Q}$-linear combination of alternating MZVs.
\end{thm}
\begin{proof}
It suffices to prove the integral can be expressed in terms of $\log 2$ and MMVs since these values
generate the same $\mathbb{Q}$-vector space as that by alternating MZVs as shown in \cite{XZ2020}.
Suppose $k_r>1$. Then
\begin{align*}
\,& \int_0^1 {\rm A}(k_1,\dotsc,k_r,1;x)\, dx\\
=\,& 2^{r+1}\sum\limits_{n=1}^\inftym_{\sum\limits_{n=1}^\inftybstack{ 0<n_1<\cdots<n_r<n_{r+1} \\ n_i\equiv i \pmod{2} }}
\frac{1}{n_1^{k_1}\cdots n_r^{k_r}} \left(\frac1{n_{r+1}}- \frac1{n_{r+1}+1} \right) \\
=\,&
\left\{
\begin{array}{ll}
M_*(\breve{k_1},k_2,\breve{k_3},\dotsc, \breve{k_r},1) - M_*(\breve{k_1},k_2,\dotsc, \breve{k_r},\breve{1} ),& \qquad \hbox{if $2\nmid r$;} \\
M_*(\breve{k_1},k_2,\breve{k_3},\dotsc, k_r,\breve{1}) - M_*(\breve{k_1},k_2,\dotsc, k_r,1), & \qquad \hbox{if $2\mid r$,}
\end{array}
\right. \\
=\,& \left\{
\begin{array}{ll}
M(\breve{k_1},k_2,\breve{k_3},\dotsc, \breve{k_r})\big(M_*(1)-M_*(\breve{1})\big) & \pmod{MMV}, \qquad \hbox{if $2\nmid r$;} \\
M(\breve{k_1},k_2,\breve{k_3},\dotsc, k_r)\big(M_*(\breve{1})-M_*(1)\big) & \pmod{MMV},\qquad \hbox{if $2\mid r$,}
\end{array}
\right. \\
=\,& \left\{
\begin{array}{ll}
-2M(\breve{k_1},k_2,\breve{k_3},\dotsc, \breve{k_r})\log 2 & \pmod{MMV} ,\qquad \hbox{if $2\nmid r$;} \\
2M(\breve{k_1},k_2,\breve{k_3},\dotsc, k_r)\log 2 & \pmod{MMV},\qquad \hbox{if $2\mid r$.}
\end{array}
\right.
\end{align*}
which can be expressed as a $\mathbb{Q}$-linear combination of MMVs by \cite[Theorem 7.1]{XZ2020}.
In general, we may assume $k_r>1$ and consider $\int_0^1 {\rm A}(k_1,\dotsc,k_r,\{1\}_\ell;x)\, dx$.
By induction on $\ell$, we see that
\begin{align*}
&\, \int_0^1 {\rm A}(k_1,\dotsc,k_r,\{1\}_\ell;x)\, dx\\
=&\, \left\{
\begin{array}{ll}
M(\breve{k_1},k_2,\breve{k_3},\dotsc, \breve{k_r})\big(M_*(\bfu,1)-M_*(\bfu,\breve{1})\big) & \pmod{MMV} , \qquad \hbox{if $2\nmid r$, $2\nmid \ell$;} \\
M(\breve{k_1},k_2,\breve{k_3},\dotsc, \breve{k_r})\big(M_*(\bfu',1,\breve{1})-M_*(\bfu',1,1)\big) &\pmod{MMV} , \qquad \hbox{if $2\nmid r$, $2\mid\ell$;} \\
M(\breve{k_1},k_2,\breve{k_3},\dotsc, k_r)\big(M_*(\bfv,\breve{1})-M_*(\bfv,1)\big) & \pmod{MMV},\qquad \hbox{if $2\mid r$, $2\nmid\ell$;}\\
M(\breve{k_1},k_2,\breve{k_3},\dotsc, k_r)\big(M_*(\bfv',\breve{1},1)-M_*(\bfv',\breve{1},\breve{1})\big) & \pmod{MMV}, \qquad\hbox{if $2\mid r$, $2\mid\ell$,}
\end{array}
\right.
\end{align*}
where $\bfu=\{1,\breve{1}\}_{(\ell-1)/2},\bfu'=\{1,\breve{1}\}_{(\ell-2)/2}, \bfv=\{\breve{1},1\}_{(\ell-1)/2}, \bfv'=\{\breve{1},1\}_{(\ell-2)/2}.$ By Lemma \ref{equ:Aones} we see that
$M_*(\cdots,1)-M_*(\cdots,\breve{1})=\mp 2\zetaeta(\bar\ell)$. This finishes the proof of the theorem.
\end{proof}
\begin{exa}\label{exa-A} Applying the idea in the proof of Theorem~\ref{thm-IA} we can find that for any positive integer $k$,
\begin{align*}
\int_0^1 {\rm A}(k,1;x)\,dx&=M_*(\breve{k},1)-M_*(\breve{k},\breve{1})\\
&=M(\breve{k})(M_*(1)-M_*(\breve{1}))+M(\breve{1},\breve{k})-M(1,\breve{k})+2M\big((k+1)\breve{\, }\big).
\end{align*}
Observing that $M_*(\breve{1})-M_*(1)=2\log(2),\ M(\breve{k})=T(k),\ M(\breve{1},\breve{k})=4t(1,k)$ and $M(1,\breve{k})=S(1,k)$, we obtain
\begin{align*}
\int_0^1 {\rm A}(k,1;x)\,dx=-2\log(2)T(k)+2T(k+1)+4t(1,k)-S(1,k).
\end{align*}
\end{exa}
{}From Lemma \ref{equ:Aones} we can get the following corollary, which was proved in \cite{XZ2020}.
\begin{cor}\label{cor-II}\emph{(\cite[Theorem 3.1]{XZ2020})} For positive integers $m$ and $n$, the following identities hold.
\begin{align}
&\begin{aligned}
\int_{0}^1 t^{2n-2} \log^{2m}\widetilde{t}t dt&= \frac{2(2m)!}{2n-1} \sum\limits_{n=1}^\inftym_{j=0}^m {\bar \zetaeta}(2j)T_n(\{1\}_{2m-2j}),\label{ee}
\end{aligned}\\
&\begin{aligned}
\int_{0}^1 t^{2n-2} \log^{2m-1}\widetilde{t}t dt&= -\frac{(2m-1)!}{2n-1} \left(2\sum\limits_{n=1}^\inftym_{j=1}^{m} {\bar \zetaeta}(2j-1)T_n(\{1\}_{2m-2j}) + S_n(\{1\}_{2m-1}) \right),\label{eo}
\end{aligned}\\
&\begin{aligned}
\int_{0}^1 t^{2n-1} \log^{2m}\widetilde{t}t dt&=\frac{(2m)!}{n} \left(\sum\limits_{n=1}^\inftym_{j=1}^{m} {\bar \zetaeta}(2j-1)T_n(\{1\}_{2m-2j+1})+ S_n(\{1\}_{2m})\right),\label{oe}
\end{aligned}\\
&\begin{aligned}
\int_{0}^1 t^{2n-1} \log^{2m-1}\widetilde{t}t dt&= -\frac{(2m-1)!}{n} \sum\limits_{n=1}^\inftym_{j=0}^{m-1} {\bar \zetaeta}(2j-2)T_n(\{1\}_{2m-2j-1}),\label{oo}
\end{aligned}
\end{align}
where ${\bar \zetaeta}(m):=-\zetaeta(\overline{ m})$, and ${\bar \zetaeta}(0)$ should be interpreted as $1/2$ wherever it occurs.
\end{cor}
We now derive some explicit relations about $T$-variant of K--Y MZV $T(\bfk\circledast\bfl)$ by considering the integral
\[
I_A(\bfk;\bfl):=\int_0^1 \frac{{\rm A}(\bfk;x){\rm A}(\bfl;x)}{x} \, dx.
\]
\begin{thm} \label{thm:S2Ts}
For positive integers $k$ and $l$, we have
\begin{multline*}
((-1)^l-(-1)^k)S(1,k+l)
=\sum\limits_{n=1}^\inftym_{j=1}^l (-1)^{j-1} T(l+1-j)T(k+j)+\sum\limits_{n=1}^\inftym_{j=1}^k (-1)^{j} T(k+1-j)T(l+j),
\end{multline*}
where $T(1):=2\log(2)$.
\end{thm}
\begin{proof} One may deduce the formula by a straightforward calculation of the integral
\begin{align*}
\int_0^1 \frac{{\rm A}(k;x){\rm A}(l;x)}{x}\, dx.
\end{align*}
We leave the details to the interested reader.
\end{proof}
For example, setting $k=1$ and $l=2p\ (p\in\mathbb{N})$ in Theorem \ref{thm:S2Ts} yields
\begin{align*}
S(1,2p+1)=\sum\limits_{n=1}^\inftym_{j=0}^{p-1} (-1)^{j-1} T(2p+1-j)T(j+1)-\frac{(-1)^p}{2}T^2(p+1).
\end{align*}
\begin{thm}\label{thm-TT2} For positive integers $k_1,k_2$ and $l$,
\begin{align}\label{b17}
&(-1)^{l-1}T((k_1,k_2)\circledast(1,l))+(-1)^{k_1+k_2-1}T(1,k_1,k_2+l)\nonumber\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1} (-1)^{j-1}T(k_1,k_2+1-j)T(l+j)-\sum\limits_{n=1}^\inftym_{j=1}^{l-1} (-1)^{j-1} T(l+1-j)T(k_1,k_2+j)\nonumber\\
&\quad-(-1)^{k_2}\sum\limits_{n=1}^\inftym_{j=1}^{k_1-1}(-1)^{j-1} T(k_1+1-j)S(j,k_2+l)-(-1)^{k_2}T(k_2+l)\int_0^1 {\rm A}(k_1,1;x) \, dx,
\end{align}
where $\int_0^1 {\rm A}(k,1;x) \, dx$ is given by Example \ref{exa-A}.
\end{thm}
\begin{proof}
From \eqref{a5} and \eqref{a6}, we deduce
\begin{align*}
\int_0^1 x^{2n-1}{\rm A}(k;x)\,dx=\sum\limits_{n=1}^\inftym_{j=1}^{k-1} \frac{(-1)^{j-1}}{(2n)^j}T(k+1-j)+\frac{(-1)^{k-1}}{(2n)^k}T_n(1)
\end{align*}
and
\begin{multline*}
\int_0^1 x^{2n-2}{\rm A}(k_1,k_2;x)\,dx=\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1} \frac{(-1)^{j-1}}{(2n-1)^j}T(k_1,k_2+1-j)+\frac{(-1)^{k_1+k_2}}{(2n-1)^{k_2}}T_n(1,k_1)\\
+\frac{(-1)^{k_2-1}}{(2n-1)^{k_2}}\sum\limits_{n=1}^\inftym_{j=1}^{k_1-1} (-1)^{j-1} T(k_1+1-j)S_n(j)
+\frac{(-1)^{k_2-1}}{(2n-1)^{k_2}}\int_0^1 {\rm A}(k_1,1,;x)\, dx.
\end{multline*}
According to the definitions of A-functions, MTVs and MSVs, on the one hand, we have
\begin{align*}
&\int_0^1 \frac{{\rm A}(k_1,k_2;x){\rm A}(l;x)}{x}\, dx=2\sum\limits_{n=1}^\infty\frac{1}{(2n-1)^l} \int_0^1 x^{2n-2}{\rm A}(k_1,k_2;x)\,dx\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1} (-1)^{j-1}T(k_1,k_2+1-j)T(l+j)-(-1)^{k_2}\sum\limits_{n=1}^\inftym_{j=1}^{k_1-1}(-1)^{j-1} T(k_1+1-j)S(j,k_2+l)\\
&\quad-(-1)^{k_2}T(k_2+l)\int_0^1 {\rm A}(k_1,1;x) \, dx+(-1)^{k_1+k_2}T(1,k_1,k_2+l).
\end{align*}
On the other hand,
\begin{multline*}
\int_0^1 \frac{{\rm A}(k_1,k_2;x){\rm A}(l;x)}{x}\, dx=2\sum\limits_{n=1}^\infty\frac{T_n(k_1)}{(2n)^{k_2}} \int_0^1 x^{2n-1}{\rm A}(l;x)\,dx\\
=\sum\limits_{n=1}^\inftym_{j=1}^{l-1} (-1)^{j-1} T(l+1-j)T(k_1,k_2+j)+(-1)^{l-1} T((k_1,k_2)\circledast(1,l)).
\end{multline*}
Hence, combining two identities above, we obtain the desired evaluation.
\end{proof}
\begin{thm}\label{thm-TT3} For positive integers $k_1,k_2$ and $l_1,l_2$, we have
\begin{align*}
&(-1)^{k_1+k_2}T((l_1,l_2)\circledast(1,k_1,k_2)) -(-1)^{l_1+l_2}T((k_1,k_2)\circledast(1,l_1,l_2))\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1} (-1)^{j} T(k_1,k_2+1-j)T(l_1,l_2+j)-\sum\limits_{n=1}^\inftym_{j=1}^{l_2-1} (-1)^{j} T(l_1,l_2+1-j)T(k_1,k_2+j) \\
&\quad-(-1)^{k_2}\sum\limits_{n=1}^\inftym_{j=1}^{k_1} (-1)^{j} T(k_1+1-j)T((l_1,l_2)\circledast(j,k_2)) \\
&\quad+(-1)^{l_2}\sum\limits_{n=1}^\inftym_{j=1}^{l_1} (-1)^{j} T(l_1+1-j)T((k_1,k_2)\circledast(j,l_2)),
\end{align*}
where $T(1):=2\log(2).$
\end{thm}
\begin{proof}
Consider the integral
\[\int_0^1 \frac{{\rm A}(k_1,k_2;x){\rm A}(l_1,l_2;x)}{x}\, dx.\]
By a similar argument used in the proof of Theorem \ref{thm-TT2}, we can prove Theorem \ref{thm-TT3}.
\end{proof}
Moreover, according to the definitions of Kaneko--Tsumura $\psi$-function and Kaneko--Tsumura A-function (which is a single-variable multiple polylogarithm function of level two) \cite{KanekoTs2018b,KanekoTs2019},
\begin{align}\label{a14}
\psi(k_1,\dotsc,k_r;s):=\frac{1}{\Gamma(s)} \int\limits_{0}^\infty \frac{t^{s-1}}{\sinh(t)}{\rm A}({k_1,\dotsc,k_r};\widetilde{t}anh(t/2))dt\quad (\mathbb{R}}\def\pa{\partiale(s)>0)
\end{align}
and
\begin{align}\label{a15}
&{\rm A}(k_1,\dotsc,k_r;z): = 2^r\sum\limits_{n=1}^\inftym\limits_{1 \le {n_1} < \cdots < {n_r}^{(A)}top n_i\equiv i\ {\rm mod}\ 2} {\frac{{{z^{{n_r}}}}}{{n_1^{{k_1}} \cdots n_r^{{k_r}}}}},\quad z \in \left[ { - 1,1} \right).
\end{align}
Setting $\widetilde{t}anh(t/2)= x$ and $s =p+1\in\mathbb{N}$, we have
\begin{align}\label{cc8}
\psi(k_1,\dotsc,k_r;p+1)&=\frac{(-1)^{p}}{p!}\int\limits_{0}^1 \frac{\log^{p}\left(\frac{1-x}{1+x}\right){\rm A}(k_1,\dotsc,k_r;x)}{x}dx\nonumber\\
&=\int\limits_{0}^1 \frac{{\rm A}(\{1\}_p;x){\rm A}(k_1,\dotsc,k_r;x)}{x}dx,
\end{align}
where we have used the relation
\begin{align*}
{\rm A}({\{1\}_r};x)=\frac{1}{r!}({\rm A}(1;x))^r=\frac{(-1)^r}{ r!}\log^r\left(\frac{1-x}{1+x}\right).
\end{align*}
We remark that the Kaneko--Tsumura $\psi$-values can be regarded as a special case of the integral $I_A(\bfk;\bfl)$. So, one can prove \cite[Theorem 3.3]{XZ2020} by considering the integrals $I_A(\bfk;\bfl)$.
\sum\limits_{n=1}^\inftybsection{Multiple integrals associated with 2-labeled posets}
According to iterated integral expressions, we know that ${\rm Li}_{\bfk(x)}$ and ${\rm A}(\bfk;x)$ satisfy the shuffle product relation. In this subsection, we will express integrals $I_L(\bfk;\bfl)$ and $I_A(\bfk;\bfl)$ in terms of multiple integral associated with 2-labeled posets, which implies that the integrals $I_L(\bfk;\bfl)$ and $I_A(\bfk;\bfl)$ can be expressed in terms of linear combination of MZVs (or MTVs). The key properties of these integrals was first studied by Yamamoto in \cite{Y2014}.
\begin{defn}
A \emph{$2$-poset} is a pair $(X,\delta_X)$, where $X=(X,\leq)$ is
a finite partially ordered set and $\delta_X$ is a map from $X$ to $\{0,1\}$.
We often omit $\delta_X$ and simply say ``a 2-poset $X$''.
The $\delta_X$ is called the \emph{label map} of $X$.
A 2-poset $(X,\delta_X)$ is called \emph{admissible} if
$\delta_X(x)=0$ for all maximal elements $x\in X$ and
$\delta_X(x)=1$ for all minimal elements $x\in X$.
\end{defn}
\begin{defn}
For an admissible 2-poset $X$, we define the associated integral
\begin{equation}\label{4.1}
I_j(X)=\int_{\Delta_X}\prod_{x\in X}\om^{(j)}_{\delta_X(x)}(t_x), \qquad j=1,2,
\end{equation}
where
\[\Delta_X=\bigl\{(t_x)_x\in [0,1]^X \bigm| t_x<t_y \widetilde{t}ext{ if } x<y\bigr\}\]
and
\[\om^{(1)}_0(t)=\om^{(2)}_0(t)=\frac{dt}{t}, \quad \om^{(1)}_1(t)=\frac{dt}{1-t}, \quad \om^{(2)}_1(t)=\frac{2dt}{1-t^2}. \]
\end{defn}
For the empty 2-poset, denoted $\emptyset$, we put $I_j(\emptyset):=1\ (j=1,2)$.
\begin{pro}\label{prop:shuffl2poset}
For non-comparable elements $a$ and $b$ of a $2$-poset $X$, $X^b_a$ denotes the $2$-poset that is obtained from $X$ by adjoining the relation $a<b$. If $X$ is an admissible $2$-poset, then the $2$-poset $X^b_a$ and $X^a_b$ are admissible and
\begin{equation}\label{4.2}
I_j(X)=I_j(X^b_a)+I_j(X^a_b)\quad (j=1,2).
\end{equation}
\end{pro}
Note that the admissibility of a 2-poset corresponds to
the convergence of the associated integral. We use Hasse diagrams to indicate 2-posets, with vertices $\circ$ and $\bullet$ corresponding to $\delta(x)=0$ and $\delta(x)=1$, respectively. For example, the diagram
\[\begin{xy}
{(0,-4) ^{(A)}r @{{*}-o} (4,0)},
{(4,0) ^{(A)}r @{-{*}} (8,-4)},
{(8,-4) ^{(A)}r @{-o} (12,0)},
{(12,0) ^{(A)}r @{-o} (16,4)}
\end{xy} \]
represents the 2-poset $X=\{x_1,x_2,x_3,x_4,x_5\}$ with order
$x_1<x_2>x_3<x_4<x_5$ and label
$(\delta_X(x_1),\dotsc,\delta_X(x_5))=(1,0,1,0,0)$.
This 2-poset is admissible.
To describe the corresponding diagram, we introduce an abbreviation:
For a sequence $\bfk_r=(k_1,\dotsc,k_r)$ of positive integers,
we write
\[\begin{xy}
{(0,-3) ^{(A)}r @{{*}.o} (0,3)},
{(1,-3) ^{(A)}r @/_1mm/ @{-} _{\bfk_r} (1,3)}
\end{xy}\]
for the vertical diagram
\[\begin{xy}
{(0,-24) ^{(A)}r @{{*}-o} (0,-20)},
{(0,-20) ^{(A)}r @{.o} (0,-14)},
{(0,-14) ^{(A)}r @{-} (0,-10)},
{(0,-10) ^{(A)}r @{.} (0,-4)},
{(0,-4) ^{(A)}r @{-{*}} (0,0)},
{(0,0) ^{(A)}r @{-o} (0,4)},
{(0,4) ^{(A)}r @{.o} (0,10)},
{(0,10) ^{(A)}r @{-{*}} (0,14)},
{(0,14) ^{(A)}r @{-o} (0,18)},
{(0,18) ^{(A)}r @{.o} (0,24)},
{(1,-24) ^{(A)}r @/_1mm/ @{-} _{k_1} (1,-14)},
{(4,-3) ^{(A)}r @{.} (4,-11)},
{(1,0) ^{(A)}r @/_1mm/ @{-} _{k_{r-1}} (1,10)},
{(1,14) ^{(A)}r @/_1mm/ @{-} _{k_r} (1,24)}
\end{xy}.\]
Hence, for admissible composition $\bfk$, using this notation of multiple associated integral, one can verify that
\begin{equation*}
\zeta(\bfk)=I_1\left(\ \begin{xy}
{(0,-3) ^{(A)}r @{{*}.o} (0,3)},
{(1,-3) ^{(A)}r @/_1mm/ @{-} _\bfk (1,3)}
\end{xy}\right)\quad\widetilde{t}ext{and}\quad T(\bfk)=I_2\left(\ \begin{xy}
{(0,-3) ^{(A)}r @{{*}.o} (0,3)},
{(1,-3) ^{(A)}r @/_1mm/ @{-} _\bfk (1,3)}
\end{xy}\right).
\end{equation*}
Therefore, according to the definitions of $I_L(\bfk;\bfl)$ and $I_A(\bfk;\bfl)$, and using this notation of multiple associated integral, we can get the following theorem.
\begin{thm}\label{thm-ILA} For compositions $\bfk$ and $\bfl$, we have
\begin{equation*}
I_L(\bfk;\bfl)=I_1\left(\xybox{
{(0,-9) ^{(A)}r @{{*}-o} (0,-4)},
{(0,-4) ^{(A)}r @{.o} (0,4)},
{(0,4) ^{(A)}r @{-o} (5,9)},
{(10,-9) ^{(A)}r @{{*}-o} (10,-4)},
{(10,-4) ^{(A)}r @{.o} (10,4)},
{(10,4) ^{(A)}r @{-} (5,9)},
{(-1,-9) ^{(A)}r @/^1mm/ @{-} ^\bfk (-1,4)},
{(11,-9) ^{(A)}r @/_1mm/ @{-} _{\bfl} (11,4)},
}\ \right)\quad\widetilde{t}ext{\rm and}\quad I_A(\bfk;\bfl)=I_2\left(\xybox{
{(0,-9) ^{(A)}r @{{*}-o} (0,-4)},
{(0,-4) ^{(A)}r @{.o} (0,4)},
{(0,4) ^{(A)}r @{-o} (5,9)},
{(10,-9) ^{(A)}r @{{*}-o} (10,-4)},
{(10,-4) ^{(A)}r @{.o} (10,4)},
{(10,4) ^{(A)}r @{-} (5,9)},
{(-1,-9) ^{(A)}r @/^1mm/ @{-} ^\bfk (-1,4)},
{(11,-9) ^{(A)}r @/_1mm/ @{-} _{\bfl} (11,4)},
}\ \right).
\end{equation*}
\end{thm}
\begin{proof}This follows immediately from the definitions of $I_L(\bfk;\bfl)$ and $I_A(\bfk;\bfl)$.
We leave the detail to the interested reader.
\end{proof}
It is clear that using Theorem \ref{thm-ILA}, the integrals $I_L(\bfk;\bfl)$ (or $I_A(\bfk;\bfl)$) can be expressed in terms of MZVs (or MTVs). In particular, for any positive integer $s$ the integrals $I_L(\bfk;\{1\}_s)$ and $I_A(\bfk;\{1\}_s)$ become the Arakawa--Kaneko zeta values and Kankeo--Tsumura $\psi$-values, respectively. Moreover, Kawasaki--Ohno \cite{KO2018} and Xu--Zhao \cite{XZ2020} have used the multiple integrals associated with 2-posets to prove explicit formulas for all Arakawa--Kaneko zeta values and Kankeo--Tsumura $\psi$-values.
Now, we end this section by the following duality relations. For any $n\in\mathbb{N}$ and composition $\bfk=(k_1,\dotsc,k_r)$, set
\begin{equation*}
\bfk_{+n}:=(k_1,\dotsc,k_{r-1},k_r+n).
\end{equation*}
\begin{thm}\label{thmDFILA} For any $p\in\mathbb{N}$ and compositions of positive integers $\bfk$, $\bfl$, we have
\begin{equation*}
I_L(\bfk_{+(p-1)};\bfl)+(-1)^p I_L(\bfk;\bfl_{+(p-1)})
=\sum\limits_{n=1}^\inftym_{j=1}^{p-1} (-1)^{j-1} \zeta(\bfk_{+(p-j)})\zeta(\bfl_{+j})
\end{equation*}
and
\begin{equation*}
I_A(\bfk_{+(p-1)};\bfl)+(-1)^p I_A(\bfk;\bfl_{+(p-1)})
=\sum\limits_{n=1}^\inftym_{j=1}^{p-1} (-1)^{j-1} T(\bfk_{+(p-j)})T(\bfl_{+j}).
\end{equation*}
\end{thm}
\begin{proof}
This follows easily from the definitions of $I_L(\bfk;\bfl)$ and $I_A(\bfk;\bfl)$ by using integration by parts.
We leave the detail to the interested reader.
\end{proof}
Setting $\bfk=(\{1\}_r)$ and $\bfl=(\{1\}_s)$ in Theorem \ref{thmDFILA} and noting the duality relations $\zeta(\{1\}_{r-1},s+1)=\zeta(\{1\}_{s-1},r+1)$ and $T(\{1\}_{r-1},s+1)=T(\{1\}_{s-1},r+1)$ , we obtain the following two well-known duality formulas for Arakawa--Kaneko zeta values and Kankeo--Tsumura $\psi$-values (see \cite{AM1999,KanekoTs2018b})
\begin{align*}
&\xi(\{1\}_{r-1},p;s+1)+(-1)^p\xi(\{1\}_{s-1},p;r+1)=\sum\limits_{n=1}^\inftym\limits_{j=0}^{p-2} (-1)^j \zeta(\{1\}_{r-1},p-j) \zeta(\{1\}_j,s+1)
\end{align*}
and
\begin{align*}
&\psi(\{1\}_{r-1},p;s+1)+(-1)^p\psi(\{1\}_{s-1},p;r+1)=\sum\limits_{n=1}^\inftym\limits_{j=0}^{p-2} (-1)^j T(\{1\}_{r-1},p-j) T(\{1\}_j,s+1).
\end{align*}
\section{Alternating variant of Kaneko--Yamamoto MZVs}\label{sec3}
\sum\limits_{n=1}^\inftybsection{Integrals of multiple polylogarithm function with $r$-variable}
For any composition $\bfk_r=(k_1,\dotsc,k_r)\in\mathbb{N}^r$, we define the \emph{classical multiple polylogarithm function} with $r$-variable by
\begin{align*}
\overrightarrow{\bfl}dLi_{\bfk_r}(x_1,\dotsc,x_r):=\sum\limits_{n=1}^\inftym_{0<n_1<n_2<\dotsb<n_r} \frac{x_1^{n_1}\dotsm x_r^{n_r}}{n_1^{k_1}\dotsm n_r^{k_r}}
\end{align*}
which converges if $|x_j\cdots x_r|<1$ for all $j=1,\dotsc,r$. It can be analytically continued to a multi-valued meromorphic function on $\mathbb{C}^r$ (see \cite{Zhao2007d}). We also consider the following two variants. The first one is
the star version:
\begin{align*}
{\rm Li}^\star_{\bfk_r}(x_1,\dotsc,x_r):=\sum\limits_{n=1}^\inftym_{0<n_1\leq n_2\leq \dotsb\leq n_r} \frac{x_1^{n_1}\dotsm x_r^{n_r}}{n_1^{k_1}\dotsm n_r^{k_r}}.
\end{align*}
The second is the most useful when we need to apply the technique of iterated integrals:
\begin{align}
\gl_{\bfk_r}(x_1,\dotsc,x_{r-1},x_r):=&\, \overrightarrow{\bfl}dLi_{\bfk_r}(x_1/x_2,\dotsc,x_{r-1}/x_r,x_r) \notag\\
=&\,\sum\limits_{n=1}^\inftym_{0<n_1<n_2<\dotsb<n_r} \frac{(x_1/x_2)^{n_1}\dotsm (x_{r-1}/x_r)^{n_{r-1}}x_r^{n_r}}{n_1^{k_1}\dotsm n_{r-1}^{k_{r-1}}n_r^{k_r}}\label{equ:gl}
\end{align}
which converges if $|x_j|<1$ for all $j=1,\dotsc,r$. Namely,
\begin{equation}\label{equ:glInteratedInt}
\gl_{\bfk_r}(x_1,\dotsc,x_r)= \int_0^1 \left(\frac{x_1\, dt}{1-x_1t}\right)\left(\frac{dt}{t}\right)^{k_1-1}\cdots
\left(\frac{x_r\, dt}{1-x_r t}\right)\left(\frac{dt}{t}\right)^{k_r-1}.
\end{equation}
Similarly, we define the parametric multiple harmonic sums and parametric multiple harmonic star sums with $r$-variable are defined by
\begin{align*}
\zeta_n(k_1,\dotsc,k_r;x_1,\dotsc,x_r):=\sum\limits_{n=1}^\inftym\limits_{0<m_1<\cdots<m_r\leq n } \frac{x_1^{m_1}\cdots x_r^{m_r}}{m_1^{k_1}\cdots m_r^{k_r}}
\end{align*}
and
\begin{align*}
\zeta^\star_n(k_1,\dotsc,k_r;x_1,\dotsc,x_r):=\sum\limits_{n=1}^\inftym\limits_{0<m_1\leq \cdots\leq m_r\leq n} \frac{x_1^{m_1}\cdots x_r^{m_r}}{m_1^{k_1}\cdots m_r^{k_r}},
\end{align*}
respectively. Obviously,
\begin{align*}
\lim_{n\rightarrow \infty} \zeta_n(k_1,\dotsc,k_r;x_1,\dotsc,x_r)=\overrightarrow{\bfl}dLi_{k_1,\dotsc,k_r}(x_1,\dotsc,x_r)
\end{align*}
and
\begin{align*}
\lim_{n\rightarrow \infty} \zeta^\star_n(k_1,\dotsc,k_r;x_1,\dotsc,x_r)={\rm Li}^\star_{k_1,\dotsc,k_r}(x_1,\dotsc,x_r).
\end{align*}
\begin{defn}
For any two compositions of positive integers $\bfk=(k_1,\dotsc,k_r)$, $\bfl=(l_1,\dotsc,l_s)$, ${\boldsymbol{\sl{s}}}i:=(\sigma_1,\dotsc,\sigma_r)\in\{\pm 1\}^r$ and $\bfeps:=(\varepsilon_1,\dotsc,\varepsilon_s)\in\{\pm 1\}^s$, define
\begin{align}
&\zeta((\bfk;{\boldsymbol{\sl{s}}}i)\circledast(\bfl;\bfeps)^\star)\equiv\zeta((k_1,\dotsc,k_r;\sigma_1,\dotsc,\sigma_r)\circledast (l_1,\dotsc,l_s;\varepsilon_1,\dotsc,\varepsilon_s)^\star)\nonumber\\
&:=\sum\limits_{n=1}^\infty \frac{\zeta_{n-1}(k_1,\dotsc,k_{r-1};\sigma_1,\dotsc,\sigma_{r-1})
\zeta^\star_n(l_1,\dotsc,l_{s-1};\varepsilon_1,\dotsc,\varepsilon_{r-1})}{n^{k_r+l_s}}(\sigma_r\varepsilon_s)^n.
\end{align}
We call them \emph{alternating Kaneko--Yamamoto MZVs}.
\end{defn}
\begin{thm}
For $n\in\mathbb{N}$, $\bfk_r=(k_1,\dotsc,k_r)\in\mathbb{N}^r$ and ${\boldsymbol{\sl{s}}}i_r:=(\sigma_1,\dotsc,\sigma_r)\in\{\pm 1\}^r$, we have
\begin{align*}
&\int_0^1 x^{n-1} \gl_{k_1,\dotsc,k_r}(\sigma_1x,\dotsc,\sigma_rx)\,dx\nonumber\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1} \frac{(-1)^{j-1}}{n^{j-1}} \gl_{\bfk_{r-1},k_r+1-j}({\boldsymbol{\sl{s}}}i_r)
+\frac{(-1)^{k_r}}{n^{k_r}}(\sigma_r^n-1)\gl_{\bfk_{r-1},1}({\boldsymbol{\sl{s}}}i_r)\nonumber\\
&\quad-\frac{\sigma_r^n}{n^{k_r}} \sum\limits_{n=1}^\inftym_{l=1}^{r-1} (-1)^{|\bfk_r^l|}\sum\limits_{n=1}^\inftym_{j=1}^{k_{r-l}-1}(-1)^{j}\zeta^\star_n^{(B)}ig(j,\bfk_{r-1}^{l-1};\sigma_{r-l+1},({\boldsymbol{\sl{s}}}i_{r}{\boldsymbol{\sl{s}}}i_{r-1})^{l-1}^{(B)}ig) \gl_{\bfk_{r-l-1},k_{r-l}+1-j}({\boldsymbol{\sl{s}}}i_{r-l})\nonumber\\
&\quad-\frac{\sigma_r^n}{n^{k_r}} \sum\limits_{n=1}^\inftym_{l=1}^{r-1} (-1)^{|\bfk_{r}^{l+1}|-l}\gl_{\bfk_{r-l-1},1}({\boldsymbol{\sl{s}}}i_{r-l})^{(B)}ig(\zeta^\star_n\big(\bfk_{r-1}^l;\sigma_{r-l+1},({\boldsymbol{\sl{s}}}i_{r}{\boldsymbol{\sl{s}}}i_{r-1})^{l-1}\big)-\zeta^\star_n\big(\bfk_{r-1}^l;({\boldsymbol{\sl{s}}}i_{r}{\boldsymbol{\sl{s}}}i_{r-1})^{l}\big) ^{(B)}ig)\nonumber\\
&\quad +(-1)^{|\bfk|-r}\frac{\sigma_r^n}{n^{k_r}} \zeta^\star_n^{(B)}ig(1,\bfk_{r-1};\sigma_1,({\boldsymbol{\sl{s}}}i_r{\boldsymbol{\sl{s}}}i_{r-1})^{r-1}^{(B)}ig),
\end{align*}
where $({\boldsymbol{\sl{s}}}i_{r}{\boldsymbol{\sl{s}}}i_{r-1})^{l}:=(\sigma_{r-l+1}\sigma_{r-l},\sigma_{r-l+2}\sigma_{r-l+1},\dotsc,\sigma_r\sigma_{r-1})$ and $({\boldsymbol{\sl{s}}}i_{r}{\boldsymbol{\sl{s}}}i_{r-1})^{0}:=\emptyset$. If $\sigma_r=1$ then $(\sigma_r^n-1)\gl_{\bfk_{r-1},1}({\boldsymbol{\sl{s}}}i_r):=0$, and if $\sigma_{r-l}=1$ then
\[\gl_{\bfk_{r-l-1},1}({\boldsymbol{\sl{s}}}i_{r-l})^{(B)}ig(\zeta^\star_n\big(\bfk_{r-1}^l;\sigma_{r-l+1},({\boldsymbol{\sl{s}}}i_{r}{\boldsymbol{\sl{s}}}i_{r-1})^{l-1}\big)-\zeta^\star_n\big(\bfk_{r-1}^l;({\boldsymbol{\sl{s}}}i_{r}{\boldsymbol{\sl{s}}}i_{r-1})^{l}\big)^{(B)}ig):=0.\]
\end{thm}
\begin{proof}
According to definition,
\begin{align*}
\frac{d}{dx}\gl_{k_1,\dotsc,k_r}(\sigma_1x,\dotsc,\sigma_{r-1}x,\sigma_rx)=
\left\{
\begin{array}{ll} \frac{1}{x} \gl_{k_1,\dotsc,k_{r-1},k_r-1}(\sigma_1x,\dotsc,\sigma_{r-1}x,\sigma_rx),
&\quad \hbox{if $k_r\geq 2$}; \\
\frac{\sigma_r}{1-\sigma_rx}\gl_{k_1,\dotsc,k_{r-1}}(\sigma_1x,\dotsc,\sigma_{r-1}x), &\quad\hbox{if $k_r = 1$}. \\
\end{array}
\right.
\end{align*}
Hence, using identity above we can get the following recurrence relation
\begin{align*}
&\int_0^1 x^{n-1} \gl_{k_1,\dotsc,k_r}(\sigma_1x,\sigma_2x,\dotsc,\sigma_rx)\,dx\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1} \frac{(-1)^{j-1}}{n^j} \gl_{k_1,\dotsc,k_{r-1},k_r+1-j}(\sigma_1,\sigma_2,\dotsc,\sigma_r)
+\frac{(-1)^{k_r}}{n^{k_r}}(\sigma_r^n-1)\gl_{k_1,\dotsc,k_{r-1},1}(\sigma_1,\dotsc,\sigma_r)\\
&\quad-\frac{(-1)^{k_r}}{n^{k_r}}\sigma_r^n\sum\limits_{n=1}^\inftym_{k=1}^{n}\sigma_r^k \int_0^1 x^{k-1} \gl_{k_1,k_2,\dotsc,k_{r-1}}(\sigma_1x,\sigma_2x,\dotsc,\sigma_{r-1}x)\,dx.
\end{align*}
Thus, we obtain the desired formula by using the recurrence relation above.
\end{proof}
Letting $r=1$ and $2$, we can get the following two corollaries.
\begin{cor}\label{cor-IL} For positive integers $n,k$ and $\sigma\in\{\pm 1\}$,
\begin{align*}
\int_0^1 x^{n-1}\gl_k(\sigma x)\,dx=\frac{(-1)^{k}}{n^k}(\sigma^n-1)\gl_1(\sigma)
-(-1)^{k}\frac{\sigma^n}{n^k}\zeta^\star_n(1;\sigma)-\sum\limits_{n=1}^\inftym_{j=1}^{k-1} \frac{(-1)^{j}}{n^j}\gl_{k+1-j}(\sigma).
\end{align*}
\end{cor}
\begin{cor}\label{cor-IIL} For positive integers $n,k_1,k_2$ and $\sigma_1,\sigma_2\in\{\pm 1\}$,
\begin{align*}
&\int_0^1 x^{n-1}\gl_{k_1,k_2}(\sigma_1 x,\sigma_2 x)\,dx\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1}\frac{(-1)^{j-1}}{n^j} \gl_{k_1,k_2+1-j}(\sigma_1,\sigma_2)
+(-1)^{k}\frac{\sigma_2^n}{n^{k_2}} \sum\limits_{n=1}^\inftym_{j=1}^{k_1-1}(-1)^{j}\gl_{k_1+1-j}(\sigma_1)\zeta^\star_n(j;\sigma_2)\\
&\quad+(-1)^{k_2}\frac{\sigma_2^n-1}{n^{k_2}}\gl_{k_1,1}(\sigma_1,\sigma_2)+(-1)^{k_1+k_2}\gl_1(\sigma_1)\frac{\sigma_2^n}{n^{k_2}}^{(B)}ig(\zeta^\star_n(k_1;\sigma_2)-\zeta^\star_n(k_1;\sigma_2\sigma_1) ^{(B)}ig)\\
&\quad+(-1)^{k_1+k_2}\frac{\sigma_2^n}{n^{k_2}}\zeta^\star_n(1,k_1;\sigma_1,\sigma_2\sigma_1).
\end{align*}
\end{cor}
Clearly, setting $\sigma_1=\sigma_2=\cdots=\sigma_r=1$ gives the formula \eqref{a1}.
\sum\limits_{n=1}^\inftybsection{Explicit formulas for alternating Kaneko--Yamamoto MZVs}
Obviously, we can consider the integral
\[I_\gl((\bfk;{\boldsymbol{\sl{s}}}i),(\bfl;\bfeps)):=\int_0^1 \frac{\gl_{\bfk_r}(\sigma_1x,\dotsc,\sigma_rx)\gl_{\bfl_s}(\varepsilon_1x,\dotsc,\varepsilon_sx)}{x}\,dx\]
to find some explicit relations of $\zeta((\bfk;{\boldsymbol{\sl{s}}}i)\circledast(\bfl;\bfeps)^\star)$. We have the following theorems.
\begin{thm} For positive integers $k,l$ and $\sigma,\varepsilon\in\{\pm 1\}$,
\begin{align}
&(-1)^k{\rm Li}^\star_{1,k+l}(\sigma,\sigma\varepsilon)-(-1)^l{\rm Li}^\star_{1,k+l}(\varepsilon,\sigma\varepsilon)\nonumber\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k-1} (-1)^{j-1} \gl_{k+1-j}(\sigma)\gl_{l+j}(\varepsilon)-\sum\limits_{n=1}^\inftym_{j=1}^{l-1} (-1)^{j-1} \gl_{l+1-j}(\varepsilon)\gl_{k+j}(\sigma)\nonumber\\
&\quad+(-1)^l\gl_1(\varepsilon)(\gl_{k+l}(\sigma)-\gl_{k+l}(\sigma\varepsilon))
-(-1)^k\gl_1(\sigma)(\gl_{k+l}(\varepsilon)-\gl_{k+l}(\sigma\varepsilon)),
\end{align}
where if $\sigma=1$ then $\gl_1(\sigma)(\gl_{k+l}(\varepsilon)-\gl_{k+l}(\sigma\varepsilon)):=0$. Similarly, if $\varepsilon=1$ then $\gl_1(\varepsilon)(\gl_{k+l}(\sigma)-\gl_{k+l}(\sigma\varepsilon)):=0$.
\end{thm}
\begin{proof}
Considering the integral $\int_0^1 \frac{\gl_k(\sigma x)\gl_l(\varepsilon x)}{x}\,dx$ and using Corollary \ref{cor-IL} with an elementary calculation, we obtain the formula.
\end{proof}
\begin{thm} For positive integers $k_1,k_2,l$ and $\sigma_1,\sigma_2,\varepsilon\in\{\pm 1\}$,
\begin{align}\label{c7}
&\sum\limits_{n=1}^\inftym_{j=1}^{l-1} (-1)^{j-1} \gl_{l+1-j}(\varepsilon)\overrightarrow{\bfl}dLi_{k_1,k_2+j}(\sigma_1\sigma_2,\sigma_2)
-(-1)^l \zeta((k_1,k_2;\sigma_1\sigma_2,\sigma_2)\circledast(1,l;\varepsilon,\varepsilon)^\star)\nonumber\\
&\quad-(-1)^l\gl_{1}(\varepsilon)^{(B)}ig(\overrightarrow{\bfl}dLi_{k_1,k_2+l}(\sigma_1\sigma_2,\sigma_2)-\overrightarrow{\bfl}dLi_{k_1,k_2+l}(\sigma_1\sigma_2,\sigma_2\varepsilon)^{(B)}ig)\nonumber\\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1} (-1)^{j-1} \gl_{k_1,k_2+1-j}(\sigma_1,\sigma_2)\gl_{l+j}(\varepsilon)
-(-1)^{k_2}\sum\limits_{n=1}^\inftym_{j=1}^{k_1-1}(-1)^{j-1} \gl_{k_1+1-j}(\sigma_1){\rm Li}^\star_{j,k_2+l}(\sigma_2,\varepsilon\sigma_2)\nonumber\\
&\quad-(-1)^{k_2}\gl_{k_1,1}(\sigma_1,\sigma_2)^{(B)}ig(\gl_{k_2}(\varepsilon)-\gl_{k_2}(\varepsilon\sigma_2) ^{(B)}ig)
+(-1)^{k_1+k_2} {\rm Li}^\star_{1,k_1,k_2+l}(\sigma_1,\sigma_2\sigma_1,\sigma_2\varepsilon),
\nonumber\\
&\quad+(-1)^{k_1+k_2} \gl_1(\sigma_1)^{(B)}ig({\rm Li}^\star_{k_1,k_2+l}(\sigma_2,\varepsilon\sigma_2)-{\rm Li}^\star_{k_1,k_2+l}(\sigma_2\sigma_1,\varepsilon\sigma_2) ^{(B)}ig)\end{align}
where if $\varepsilon=1$ then $\gl_{1}(\varepsilon)^{(B)}ig(\overrightarrow{\bfl}dLi_{k_1,k_2+l}(\sigma_1\sigma_2,\sigma_2)-\overrightarrow{\bfl}dLi_{k_1,k_2+l}(\sigma_1\sigma_2,\sigma_2\varepsilon)^{(B)}ig):=0$; if $\sigma_1=1$ then $\gl_1(\sigma_1)^{(B)}ig({\rm Li}^\star_{k_1,k_2+l}(\sigma_2,\varepsilon\sigma_2)-{\rm Li}^\star_{k_1,k_2+l}(\sigma_2\sigma_1,\varepsilon\sigma_2) ^{(B)}ig):=0$ and if $\sigma_2=1$ then $\gl_{k_1,1}(\sigma_1,\sigma_2)^{(B)}ig(\gl_{k_2}(\varepsilon)-\gl_{k_2}(\varepsilon\sigma_2) ^{(B)}ig):=0$.
\end{thm}
\begin{proof}
Similarly, considering the integral $\int_0^1 \frac{\gl_{k_1,k_2}(\sigma_1 x,\sigma_1 x)\gl_l(\varepsilon x)}{x}\,dx$ and using Corollary \ref{cor-IIL} with an elementary calculation, we prove the formula.
\end{proof}
On the other hand, according to definition, we have
\begin{align*}
&\zeta((k_1,k_2;\sigma_1\sigma_2,\sigma_2)\circledast(1,l;\varepsilon,\varepsilon)^\star)=\sum\limits_{n=1}^\infty \frac{\zeta_{n-1}(k_1;\sigma_1\sigma_2)\zeta^\star_n(1;\varepsilon)}{n^{k_2+l}}(\sigma_2\varepsilon)^n\\
&={\rm Li}^\star_{k_1,1,k_2+l}(\sigma_1\sigma_2,\varepsilon,\sigma_2\varepsilon)+{\rm Li}^\star_{1,k_1,k_2+l}(\varepsilon,\sigma_1\sigma_2,\sigma_2\varepsilon)-{\rm Li}^\star_{k_1+1,k_2+l}(\sigma_1\sigma_2\varepsilon,\sigma_2\varepsilon)
-{\rm Li}^\star_{1,k_1+k_2+l}(\varepsilon,\sigma_1\varepsilon).
\end{align*}
Substituting it into \eqref{c7} yields the following corollary.
\begin{cor}\label{cor:c8}
For positive integers $k_1,k_2,l$ and $\sigma_1,\sigma_2,\varepsilon\in\{\pm 1\}$,
\begin{align*}
&(-1)^{l}{\rm Li}^\star_{k_1,1,k_2+l}(\sigma_1\sigma_2,\varepsilon,\sigma_2\varepsilon)
+(-1)^{l}{\rm Li}^\star_{1,k_1,k_2+l}(\varepsilon,\sigma_1\sigma_2,\sigma_2\varepsilon) +(-1)^{k_1+k_2} {\rm Li}^\star_{1,k_1,k_2+l}(\sigma_1,\sigma_2\sigma_1,\sigma_2\varepsilon) \\
&=\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1} (-1)^{j} \gl_{k_1,k_2+1-j}(\sigma_1,\sigma_2)\gl_{l+j}(\varepsilon)
-(-1)^{k_2}\sum\limits_{n=1}^\inftym_{j=1}^{k_1-1}(-1)^{j} \gl_{k_1+1-j}(\sigma_1){\rm Li}^\star_{j,k_2+l}(\sigma_2,\varepsilon\sigma_2) \\
&\quad-\sum\limits_{n=1}^\inftym_{j=1}^{l-1} (-1)^{j} \gl_{l+1-j}(\varepsilon)\overrightarrow{\bfl}dLi_{k_1,k_2+j}(\sigma_1\sigma_2,\sigma_2)
+(-1)^{k_2}\gl_{k_1,1}(\sigma_1,\sigma_2)^{(B)}ig(\gl_{k_2}(\varepsilon)-\gl_{k_2}(\varepsilon\sigma_2) ^{(B)}ig) \\
&\quad-(-1)^{k_1+k_2} \gl_1(\sigma_1)^{(B)}ig({\rm Li}^\star_{k_1,k_2+l}(\sigma_2,\varepsilon\sigma_2)-{\rm Li}^\star_{k_1,k_2+l}(\sigma_2\sigma_1,\varepsilon\sigma_2) ^{(B)}ig) \\
&\quad-(-1)^l\gl_{1}(\varepsilon)^{(B)}ig(\overrightarrow{\bfl}dLi_{k_1,k_2+l}(\sigma_1\sigma_2,\sigma_2)-\overrightarrow{\bfl}dLi_{k_1,k_2+l}(\sigma_1\sigma_2,\sigma_2\varepsilon)^{(B)}ig) \\
&\quad+(-1)^{l}{\rm Li}^\star_{k_1+1,k_2+l}(\sigma_1\sigma_2\varepsilon,\sigma_2\varepsilon)
+(-1)^{l}{\rm Li}^\star_{1,k_1+k_2+l}(\varepsilon,\sigma_1\varepsilon).
\end{align*}
\end{cor}
Clearly, setting $\sigma_1=\sigma_2=\varepsilon=1$ in Corollary \ref{cor:c8} gives the formula \eqref{a4}. We also find numerous explicit relations involving alternating MZVs. For example, letting $k_1=k_2=l=2$ and $\sigma_1=\eps=-1, \sigma_2=1$, we have
\begin{align*}
\zeta^\star(\bar 2,\bar 1,\bar 4)+2\zeta^\star(\bar 1,\bar 2,\bar 4)
&=3 {\rm Li}_4\left(\frac{1}{2}\right) \zetaeta (3)-\frac{7 \pi ^4 \zetaeta (3)}{128}+\frac{61 \pi ^2 \zetaeta (5)}{192}-\frac{105 \zetaeta (7)}{128}+\frac{1}{8} \zetaeta (3) \log ^4(2)\\&\quad-\frac{1}{8} \pi ^2 \zetaeta (3) \log ^2(2)+\frac{63}{16} \zetaeta (3)^2 \log (2)-\frac{61 \pi ^6 \log (2)}{10080},
\end{align*}
where we used Au's Mathematica package \cite{Au2020}.
\sum\limits_{n=1}^\inftybsection{Multiple integrals associated with 3-labeled posets}
In this subsection, we introduce the multiple integrals associated with 3-labeled posets, and express the integrals $I_\gl((\bfk;{\boldsymbol{\sl{s}}}i),(\bfl;\bfeps))$ in terms of multiple integrals associated with 3-labeled posets.
\begin{defn}
A \emph{$3$-poset} is a pair $(X,\delta_X)$, where $X=(X,\leq)$ is
a finite partially ordered set and $\delta_X$ is a map from $X$ to $\{-1,0,1\}$.
We often omit $\delta_X$ and simply say ``a 3-poset $X$''.
The $\delta_X$ is called the \emph{label map} of $X$.
Similar to 2-poset, a 3-poset $(X,\delta_X)$ is called \emph{admissible}?
if $\delta_X(x) \ne 1$ for all maximal
elements and $\delta_X(x) \ne 0$ for all minimal elements $x \in X$.
\end{defn}
\begin{defn}
For an admissible $3$-poset $X$, we define the associated integral
\begin{equation}
I(X)=\int_{\Delta_X}\prod_{x\in X}\omega_{\delta_X(x)}(t_x),
\end{equation}
where
\[\Delta_X=\bigl\{(t_x)_x\in [0,1]^X \bigm| t_x<t_y \widetilde{t}ext{ if } x<y\bigr\}\]
and
\[\omega_{-1}(t)=\frac{dt}{1+t},\quad \omega_0(t)=\frac{dt}{t}, \quad \omega_1(t)=\frac{dt}{1-t}.\]
\end{defn}
For the empty 3-poset, denoted $\emptyset$, we put $I(\emptyset):=1$.
\begin{pro}\label{prop:shuffl3poset}
For non-comparable elements $a$ and $b$ of a $3$-poset $X$, $X^b_a$ denotes the $3$-poset that is obtained from $X$ by adjoining the relation $a<b$. If $X$ is an admissible $3$-poset, then the $3$-poset $X^b_a$ and $X^a_b$ are admissible and
\begin{equation}
I(X)=I(X^b_a)+I(X^a_b).
\end{equation}
\end{pro}
Note that the admissibility of a $3$-poset corresponds to
the convergence of the associated integral. We use Hasse diagrams to indicate $3$-posets, with vertices $\circ$ and ``$\bullet\ \sigma$" corresponding to $\delta(x)=0$ and $\delta(x)=\sigma\ (\sigma\in\{\pm 1\})$, respectively. For convenience, if $\sigma=1$, replace ``$\bullet\ 1$" by $\bullet$ and if $\sigma=-1$, replace ``$\bullet\ -1$" by ``$\bullet\ {\bar1}$". For example, the diagram
\[\begin{xy}
{(0,-4) ^{(A)}r @{{*}-o} (4,0)},
{(4,0) ^{(A)}r @{-{*}} (8,-4)},
{(8,-4) ^{(A)}r @{-o}_{\bar 1} (12,0)},
{(12,0) ^{(A)}r @{-o} (16,4)},
{(16,4) ^{(A)}r @{-{*}} (24,-4)},
{(24,-4) ^{(A)}r @{-o}_{\bar 1} (28,0)},
{(28,0) ^{(A)}r @{-o} (32,4)}
\end{xy} \]
represents the $3$-poset $X=\{x_1,x_2,x_3,x_4,x_5,x_6,x_7,x_8\}$ with order
$x_1<x_2>x_3<x_4<x_5>x_6<x_7<x_8$ and label
$(\delta_X(x_1),\dotsc,\delta_X(x_8))=(1,0,-1,0,0,-1,0,0)$. For composition $\bfk=(k_1,\dotsc,k_r)$ and ${\boldsymbol{\sl{s}}}i\in\{\pm 1\}^r$ (admissible or not),
we write
\[\begin{xy}
{(0,-3) ^{(A)}r @{{*}.o} (0,3)},
{(1,-3) ^{(A)}r @/_1mm/ @{-} _{(\bfk,{\boldsymbol{\sl{s}}}i)} (1,3)}
\end{xy}\]
for the `totally ordered' diagram:
\[\begin{xy}
{(0,-24) ^{(A)}r @{{*}-o}_{\sigma_1} (4,-20)},
{(4,-20) ^{(A)}r @{.o} (10,-14)},
{(10,-14) ^{(A)}r @{-} (14,-10)},
{(14,-10) ^{(A)}r @{.} (20,-4)},
{(20,-4) ^{(A)}r @{-{*}} (24,0)},
{(24,0) ^{(A)}r @{-o}_{\sigma_{r-1}}(28,4)},
{(28,4) ^{(A)}r @{.o} (34,10)},
{(34,10) ^{(A)}r @{-{*}} (38,14)},
{(38,14) ^{(A)}r @{-o}_{\sigma_r} (42,18)},
{(42,18) ^{(A)}r @{.o} (48,24)},
{(0,-23) ^{(A)}r @/^2mm/ @{-}^{k_1} (9,-14)},
{(24,1) ^{(A)}r @/^2mm/ @{-}^{k_{r-1}} (33,10)},
{(38,15) ^{(A)}r @/^2mm/ @{-}^{k_r} (47,24)}
\end{xy} \]
If $k_i=1$, we understand the notation $\begin{xy}
{(0,-5) ^{(A)}r @{{*}-o}_{\sigma_i} (4,-1)},
{(4,-1) ^{(A)}r @{.o} (10,5)},
{(0,-4) ^{(A)}r @/^2mm/ @{-}^{k_i} (9,5)}
\end{xy}$ as a single $\bullet\ {\sigma_i}$.
We see from \eqref{equ:glInteratedInt}
\begin{align}\label{5.19}
I\left(\ \begin{xy}
{(0,-3) ^{(A)}r @{{*}.o} (0,3)},
{(1,-3) ^{(A)}r @/_1mm/ @{-} _{(\bfk,{\boldsymbol{\sl{s}}}i)} (1,3)}
\end{xy}\right)=\frac{\gl_{k_1,\dotsc,k_r}(\sigma_1,\sigma_2,\dotsc,\sigma_r)}{\sigma_1\sigma_2\cdots \sigma_r}.
\end{align}
Therefore, according to the definition of $I_\gl((\bfk;{\boldsymbol{\sl{s}}}i),(\bfl;\bfeps))$, and using this notation of multiple associated integral, we can get the following theorem.
\begin{thm}\label{thm-ILA-} For compositions $\bfk\equiv \bfk_r$ and $\bfl\equiv\bfl_s$ with ${\boldsymbol{\sl{s}}}i\in\{\pm 1\}^r$ and $\bfeps\in\{\pm 1\}^s$,
\begin{equation*}
I_\gl((\bfk;{\boldsymbol{\sl{s}}}i),(\bfl;\bfeps))=I\left(\xybox{
{(0,-9) ^{(A)}r @{{*}-o} (0,-4)},
{(0,-4) ^{(A)}r @{.o} (0,4)},
{(0,4) ^{(A)}r @{-o} (5,9)},
{(10,-9) ^{(A)}r @{{*}-o} (10,-4)},
{(10,-4) ^{(A)}r @{.o} (10,4)},
{(10,4) ^{(A)}r @{-} (5,9)},
{(-1,-9) ^{(A)}r @/^1mm/ @{-} ^{(\bfk,{\boldsymbol{\sl{s}}}i)} (-1,4)},
{(11,-9) ^{(A)}r @/_1mm/ @{-} _{(\bfl,\bfeps))} (11,4)},
}\ \right).
\end{equation*}
\end{thm}
\begin{proof}This follows immediately from the definition of $I_\gl((\bfk;{\boldsymbol{\sl{s}}}i),(\bfl;\bfeps))$.
We leave the detail to the interested reader.
\end{proof}
Finally, we end this section by the following a theorem which extends \cite[Theorem~ 4.1]{KY2018} to level two.
\begin{thm} For any non-empty compositions $\bfk_r$, $\bfl_s$ and ${\boldsymbol{\sl{s}}}i_r\in\{\pm 1\}^r$, we have
\begin{align}\label{5.21}
I\left( \raisebox{16pt}{\begin{xy}
{(-3,-18) ^{(A)}r @{{*}-}_{\sigma_1'} (0,-15)},
{(0,-15) ^{(A)}r @{{o}.} (3,-12)},
{(3,-12) ^{(A)}r @{{o}.} (9,-6)},
{(9,-6) ^{(A)}r @{{*}-}_{\sigma_r'} (12,-3)},
{(12,-3) ^{(A)}r @{{o}.} (15,0)},
{(15,0) ^{(A)}r @{{o}-} (18,3)},
{(18,3) ^{(A)}r @{{o}-} (21,6)},
{(21,6) ^{(A)}r @{{o}.} (24,9)},
{(24,9) ^{(A)}r @{{o}-} (27,3)},
{(27,3) ^{(A)}r @{{*}-} (30,6)},
{(30,6) ^{(A)}r @{{o}.} (33,9)},
{(33,9) ^{(A)}r @{{o}-} (35,5)},
{(37,6) ^{(A)}r @{.} (41,6)},
{(42,3) ^{(A)}r @{{*}-} (45,6)},
{(45,6) ^{(A)}r @{{o}.{o}} (48,9)},
{(-3,-17) ^{(A)}r @/^1mm/ @{-}^{k_1} (2,-12)},
{(9,-5) ^{(A)}r @/^1mm/ @{-}^{k_r} (14,0)},
{(18,4) ^{(A)}r @/^1mm/ @{-}^{l_s} (23,9)},
{(28,3) ^{(A)}r @/_1mm/ @{-}_{l_{s-1}} (33,8)},
{(43,3) ^{(A)}r @/_1mm/ @{-}_{l_1} (48,8)},
\end{xy}}
\right)=\frac{\zeta((\bfk_r;{\boldsymbol{\sl{s}}}i_r)\circledast(\bfl_s;\{1\}_s)^\star)}{\sigma_1'\sigma_2'\cdots\sigma_r'},
\end{align}
where $\sigma_j'=\sigma_j\sigma_{j+1}\cdots\sigma_r$, and $\bullet\ \sigma_j'$ corresponding to $\delta(x)=\sigma_j'$.
\end{thm}
\begin{proof} The proof is done straightforwardly by computing the multiple integral
on the left-hand side of \eqref{5.21} as a repeated integral ``from left to right'' using the key
ideas in the proof of \eqref{equ:glInteratedInt} and \cite[Corollary 3.1]{Y2014}.
\end{proof}
If letting all $\sigma_i=1\ (i=1,2,\dotsc,r)$, then we obtain the ``integral-series" relation of Kaneko--Yamamoto \cite{KY2018}.
From Proposition \ref{prop:shuffl3poset} and (\ref{5.19}), it is clear that the left-hand side of (\ref{5.21}) can be expressed in terms of a linear combination of alternating multiple zeta values. Hence, we can find many linear relations of alternating multiple zeta values from (\ref{5.21}). For example,
\begin{align}
&2\gl_{1,1,3}(\sigma_1',\sigma_2',1) +2\gl_{1,1,3}(\sigma_1',1,\sigma_2')+2\gl_{1,1,3}(1,\sigma_1',\sigma_2')\nonumber\\&\quad+\gl_{1,2,2}(\sigma_1',1,\sigma_2')+\gl_{1,2,2}(1,\sigma_1',\sigma_2')+\gl_{2,1,2}(1,\sigma_1',\sigma_2')\nonumber\\
&=\zeta(2,1,2;1,\sigma_1,\sigma_2)+\zeta(1,2,2;\sigma_1,1,\sigma_2)+\zeta(3,2;\sigma_1,\sigma_2)+\zeta(1,4;\sigma_1,\sigma_2).
\end{align}
If $(\sigma_1,\sigma_2)=(1,1)$ and $(1,-1)$, then we get the following two cases
\begin{align*}
&6\zetaeta(1,1,3)+2\zetaeta(1,2,2)+\zetaeta(2,1,2)=\zetaeta(1,2,2)+\zetaeta(2,1,2)+\zetaeta(3,2)+\zetaeta(1,4),
\end{align*}
and
\begin{align*}
&2\zetaeta(1,{\bar 1},3)+2\zetaeta({\bar 1},{\bar 1},{\bar 3})+2\zetaeta({\bar 1},{ 1},{\bar 3})+\zetaeta({\bar 1},{\bar 2},{\bar 2})+\zetaeta({\bar 1},{ 2},{\bar 2})+\zetaeta({\bar 2},1,{\bar 2})\\
&\quad=\zetaeta(2,1,{\bar 2})+\zetaeta(1,2,{\bar 2})+\zetaeta(3,{\bar 2})+\zetaeta(1,{\bar 4}).
\end{align*}
\section{Integrals about multiple $t$-harmonic (star) sums}\label{sec4}
Similar to MHSs and MHSSs, we can define the following their $t$-versions.
\begin{defn} For any $n, r\in\mathbb{N}$ and composition $\bfk:=(k_1,\dotsc,k_r)\in\mathbb{N}^r$,
\begin{align}
&t_n(k_1,\dotsc,k_r):=\sum\limits_{n=1}^\inftym_{0<n_1<n_2<\dotsb<n_r\leq n} \frac{1}{(2n_1-1)^{k_1}(2n_2-1)^{k_2}\dotsm (2n_r-1)^{k_r}},\label{t-1}\\
&t^\star_n(k_1,\dotsc,k_r):=\sum\limits_{n=1}^\inftym_{0<n_1\leq n_2\leq \dotsb\leq n_r\leq n} \frac{1}{(2n_1-1)^{k_1}(2n_2-1)^{k_2}\dotsm (2n_r-1)^{k_r}},\label{t-2}
\end{align}
where we call \eqref{t-1} and \eqref{t-2} are multiple $t$-harmonic sums and multiple $t$-harmonic star sums, respectively. If $n<r$ then ${t_n}(\bfk):=0$ and ${t_n}(\emptyset )={t^\star _n}(\emptyset ):=1$.
\end{defn}
For composition $\bfk:=(k_1,\dotsc,k_r)$, define
\begin{align*}
L(k_1,\dotsc,k_r;x):=\frac{1}{2^{k_1+\dotsb+k_r}} {\rm Li}_{k_1,\dotsc,k_r}(x^2)
\end{align*}
where $L(\emptyset;x):=1$. Set $L(k_1,\dotsc,k_r):=L(k_1,\dotsc,k_r;1)$. Similarly, define
\begin{align*}
t(k_1,\dotsc,k_r;x):&=\sum\limits_{n=1}^\inftym_{0<n_1<n_2<\dotsb<n_r} \frac{x^{2n_r-1}}{(2n_1-1)^{k_1}(2n_2-1)^{k_2}\dotsm (2n_r-1)^{k_r}}\\
&=\sum\limits_{n=1}^\infty \frac{t_{n-1}(k_1,\dotsc,k_{r-1})}{(2n-1)^{k_r}}x^{2n-1},
\end{align*}
where $t(\emptyset;x):=1/x$. Note that $t(k_1,\dotsc,k_r;1)=t(k_1,\dotsc,k_r)$.
\begin{thm} For composition $\bfk:=(k_1,\dotsc,k_r)$ and positive integer $n$,
\begin{align}\label{d3}
&\int_0^1 x^{2n-2} L(\bfk_r;x)\,dx=\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1} \frac{(-1)^{j-1}}{(2n-1)^j}L(\bfk_{r-1},k_r+1-j)
+ \frac{(-1)^{|\bfk|-r}}{(2n-1)^{k_r}} t^\star_n(1,\bfk_{r-1})\nonumber\\
&\quad+\frac{1}{(2n-1)^{k_r}} \sum\limits_{n=1}^\inftym_{l=1}^{r-1} (-1)^{|\bfk_r^l|-l}\sum\limits_{n=1}^\inftym_{j=1}^{k_{r-l}-1} (-1)^{j-1}L(\bfk_{r-l-1},k_{r-l}+1-j)t^\star_n(j,\bfk_{r-1}^{l-1})\nonumber\\
&\quad-\frac{1}{(2n-1)^{k_r}}\sum\limits_{n=1}^\inftym_{l=0}^{r-1} (-1)^{|\bfk_r^{l+1}|-l-1}\left(\int_0^1 \frac{L(\bfk_{r-l-1},1;x)}{x^2}\,dx\right)t^\star_n(\bfk_{r-1}^l).
\end{align}
\end{thm}
\begin{proof}
By the simple substitution $t\widetilde{t}o t^2/x^2$ in \eqref{equ:glInteratedInt} we see quickly that
\begin{align*}
L(k_1,\dotsc,k_r;x)=\int_{0}^x \frac{tdt}{1-t^2}\left(\frac{dt}{t}\right)^{k_1-1}\dotsm \frac{tdt}{1-t^2}\left(\frac{dt}{t}\right)^{k_r-1}.
\end{align*}
By an elementary calculation, we deduce the recurrence relation
\begin{align*}
\int_0^1 x^{2n-2} L(\bfk_r;x)\,dx&=\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1} \frac{(-1)^{j-1}}{(2n-1)^j}L(\bfk_{r-1},k_r+1-j) -\frac{(-1)^{k_r-1}}{(2n-1)^{k_r}} \int_0^1 \frac{L(\bfk_{r-1},1;x)}{x^2}dx\\
&\quad+\frac{(-1)^{k_r-1}}{(2n-1)^{k_r}}\sum\limits_{n=1}^\inftym_{l=1}^n \int_0^1 x^{2l-2}L(\bfk_{r-1};x)dx.
\end{align*}
Hence, using the recurrence relation, we obtain the desired evaluation by direct calculations.
\end{proof}
\begin{thm} For composition $\bfk:=(k_1,\dotsc,k_r)$ and positive integer $n$,
\begin{align}
&\int_0^1 x^{2n-2} t(\bfk_r;x)\,dx=\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1} \frac{(-1)^{j-1}}{(2n-1)^j}t(\bfk_{r-1},k_r+1-j) + \frac{(-1)^{|\bfk|-r}}{(2n-1)^{k_r}} s^\star_n(1,\bfk_{r-1})\nonumber\\
&\quad+\frac{1}{(2n-1)^{k_r}} \sum\limits_{n=1}^\inftym_{l=1}^{r-1} (-1)^{|\bfk_r^l|-l}\sum\limits_{n=1}^\inftym_{j=1}^{k_{r-l}-1} (-1)^{j-1}t(\bfk_{r-l-1},k_{r-l}+1-j)\widehat{t}^\star_n(j,\bfk_{r-1}^{l-1})\nonumber\\
&\quad+\frac{1}{(2n-1)^{k_r}}\sum\limits_{n=1}^\inftym_{l=0}^{r-1} (-1)^{|\bfk_r^{l+1}|-l-1}\left(\int_0^1t(\bfk_{r-l-1},1;x)\,dx\right)\widehat{t}^\star_n(\bfk_{r-1}^l),
\end{align}
where
\begin{align*}
&\widehat{t}^\star_n(k_1,\dotsc,k_r):=\sum\limits_{n=1}^\inftym_{2\leq n_1\leq n_2\leq \dotsb\leq n_r\leq n} \frac{1}{(2n_1-1)^{k_1}(2n_2-1)^{k_2}\cdots(2n_r-1)^{k_r}},\\
&s^\star_n(k_1,\dotsc,k_r):=\sum\limits_{n=1}^\inftym_{2\leq n_1\leq n_2\leq \dotsb\leq n_r\leq n} \frac{1}{(2n_1-2)^{k_1}(2n_2-1)^{k_2}\cdots(2n_r-1)^{k_r}}.
\end{align*}
\end{thm}
\begin{proof}
By definition we have
\begin{align*}
\frac{d}{dx}t({{k_1}, \cdots ,k_{r-1},{k_r}}; x)= \left\{ {\begin{array}{*{20}{c}} \frac{1}{x} t({{k_1}, \cdots ,{k_{r-1}},{k_r-1}};x)
{\ \ (k_r\geq 2),} \\
{\frac{x}{1-x^2}t({{k_1}, \cdots ,{k_{r-1}}};x)\;\;\;\ \ \ (k_r = 1),} \\
\end{array} } \right.
\end{align*}
where $t(\emptyset;x):=1/x$. Hence, we obtain the iterated integral
\begin{align*}
t(k_1,\dotsc,k_r;x)=\int_{0}^x \frac{dt}{1-t^2}\left(\frac{dt}{t}\right)^{k_1-1}\frac{tdt}{1-t^2}\left(\frac{dt}{t}\right)^{k_{2}-1} \cdots \frac{tdt}{1-t^2}\left(\frac{dt}{t}\right)^{k_r-1}.
\end{align*}
By an elementary calculation, we deduce the recurrence relation
\begin{align*}
\int_0^1 x^{2n-2} t(\bfk_r;x)\,dx&=\sum\limits_{n=1}^\inftym_{j=1}^{k_r-1} \frac{(-1)^{j-1}}{(2n-1)^j}t(\bfk_{r-1},k_r+1-j) +\frac{(-1)^{k_r-1}}{(2n-1)^{k_r}} \int_0^1 t(\bfk_{r-1},1;x)\,dx\\
&\quad+\frac{(-1)^{k_r-1}}{(2n-1)^{k_r}}\sum\limits_{n=1}^\inftym_{l=2}^n \int_0^1 x^{2l-2}t(\bfk_{r-1};x)dx.
\end{align*}
Hence, using the recurrence relation, we obtain the desired evaluation by direct calculations.
\end{proof}
\begin{thm}\label{thm:L1111}
For any positive integer $r$, $\int_0^1 \frac{L(\{1\}_{r};x)}{x^2} \, dx$ can be expressed as a $\mathbb{Q}$-linear combinations of
products of $\log 2$ and Riemann zeta values. More precisely, we have
\begin{equation*}
1-\sum\limits_{n=1}^\inftym_{r\ge 1} \left(\int_0^1 \frac{L(\{1\}_{r};x)}{x^2} \, dx\right) u^r
=\exp\left( \sum\limits_{n=1}^\inftym_{n=1}^\infty \frac{\zeta(\bar n)}{n}u^n\right)
=\exp\left(-\log(2)u-\sum\limits_{n=1}^\inftym_{n=2}^\infty \frac{1-2^{1-n}}{n}\zeta(n)u^n\right).
\end{equation*}
\end{thm}
\begin{proof}
Consider the generating function
\begin{equation*}
F(u):=1-\sum\limits_{n=1}^\inftym_{r=1}^\infty 2^r\left(\int_0^1 \frac{L(\{1\}_{r};x)}{x^2} \, dx\right) u^r.
\end{equation*}
By definition
\begin{align*}
F(u) =\, & 1-\sum\limits_{n=1}^\inftym_{r=1}^\infty u^r \int_0^1 \int_0^{x^2} \left(\frac{dt}{1-t} \right)^r \frac{dx}{x^2} \\\
=\, & 1-\sum\limits_{n=1}^\inftym_{r=1}^\infty \frac{u^r}{r!} \int_0^1 \left( \int_0^{x^2} \frac{dt}{1-t} \right)^r \frac{dx}{x^2} \\
=\, & 1- \int_0^1 \left( \sum\limits_{n=1}^\inftym_{r=1}^\infty \frac{(-u\log(1-x^2))^r}{r!} \right) \frac{dx}{x^2} \\
=\, & 1+\int_0^1 ^{(B)}ig( (1-x^2)^{-u}-1^{(B)}ig) d(x^{-1})
=
\frac{\Gamma(1-u) \Gamma(1/2)}{\Gamma(1/2-u)}
\end{align*}
by integration by parts followed by the substitution $x=\sqrt{t}$.
Using the expansion
\begin{align*}
\Gamma(1-u)=\exp\left(\gamma u+\sum\limits_{n=1}^\inftym_{n=2}^\infty \frac{\zeta(n)}{n}u^n\right)\qquad(|u|<1).
\end{align*}
and setting $x=1/2-u$ in the duplication formula $\Gamma(x)\Gamma(x+1/2)=2^{1-2x}\sqrt{\pi}\Gamma(2x)$, we obtain
\begin{align*}
\log\Gamma(1/2-u)=\frac{\log\pi}{2}+\gamma u+2u\log(2)+\sum\limits_{n=1}^\inftym_{n=2}^\infty \frac{(2^n-1)\zeta(n)}{n}u^n\qquad(|u|<1/2).
\end{align*}
Therefore
\begin{equation*}
F(u)=\exp\left(-2\log(2)u-\sum\limits_{n=1}^\inftym_{n=2}^\infty \frac{2^n-2}{n}\zeta(n)u^n\right)=
\exp\left( \sum\limits_{n=1}^\inftym_{n=1}^\infty \frac{2^n}{n}\zeta(\bar n)u^n\right)
\end{equation*}
by the facts that $\zetaeta(\bar1)=-\log 2$ and $2^n \zetaeta(\bar n)=(2-2^n)\zetaeta(n)$ for $n\ge 2$.
The theorem follows immediately.
\end{proof}
Clearly, $\int_0^1 \frac{L(\{1\}_{r};x)}{x^2}\,dx\in\mathbb{Q}[\log(2),\zeta(2),\zeta(3),\zeta(4),\ldots]$.
For example,
\begin{align} \label{equ:Lxdepth=1}
&\int_0^1 \frac{L(1;x)}{x^2}\,dx=\log(2),\\
&\int_0^1 \frac{L(1,1;x)}{x^2}\,dx=\frac{1}{4}\zeta(2)-\frac1{2}\log^2(2), \notag\\
&\int_0^1 \frac{L(1,1,1;x)}{x^2}\,dx=\frac1{4}\zeta(3)+\frac1{6}\log^3(2)-\frac1{4}\zeta(2)\log(2). \notag
\end{align}
More generally, using a similar argument as in the proof of Theorem \ref{thm-IA}, we can prove the following more general results.
\begin{thm}\label{thm:LandtIntegrals}
Let $r,n$ be two non-negative integers and $\bfk_r=(k_1,\dotsc,k_r)\in\mathbb{N}^r$ with $\bfk_0=\emptyset$. Then
one can express all of the integrals
\begin{equation*}
\int_0^1 \frac{L(k_1,\dotsc,k_r,1;x)}{x^n}\,dx\ \quad (0\le n\le 2r+2)
\end{equation*}
and
\begin{equation*}
\int_0^1 \frac{t(k_1,\dotsc,k_r,1;x)}{x^n}\,dx\ \quad(0\le n\le 2r+1)
\end{equation*}
as $\mathbb{Q}$-linear combinations of alternating MZVs (and number $1$ for $\int_0^1 L(\bfk_r,1;x)\,dx$).
\end{thm}
\begin{proof} The case $n=1$ is trivial as both integrals are clearly already MMVs after the integration.
If $n=0$ then we have
\begin{equation*}
\int_0^1 t(k_1,\dotsc,k_r,1;x) \,dx
=\int_0^1 \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m} \frac{x^{2m-1} \,dx}{(2n_1-1)^{k_1}\cdots (2n_r-1)^{k_r}(2m-1)}
=\frac{\lim_{N\widetilde{t}o \infty} c_N}{2^{r+1}}
\end{equation*}
where
\begin{align}
c_N =\, &\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le N} \frac{2^{r+1}}{(2n_1-1)^{k_1}\cdots (2n_r-1)^{k_r}(2m-1)(2m)} \notag\\
=\,& \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le 2N} \frac{(1-(-1)^{n_1})\cdots (1-(-1)^{n_r})}{n_1^{k_1}\cdots n_r^{k_r}}
\left(\frac{1-(-1)^m}{m}-\frac{1-(-1)^m}{m+1}\right) \notag\\
=\,&- 2\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le 2N} \frac{(1-(-1)^{n_1})\cdots (1-(-1)^{n_r})(-1)^m}{n_1^{k_1}\cdots n_r^{k_r}m}\notag\\
+\,&\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r\le 2N} \frac{(1-(-1)^{n_1})\cdots (1-(-1)^{n_r})}{n_1^{k_1}\cdots n_r^{k_r}}
\sum\limits_{n=1}^\inftym_{m=n_r+1}^{2N} \left(\frac{1+(-1)^m}{m}-\frac{1-(-1)^m}{m+1}\right) \notag\\
=\,&- 2\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le 2N} \frac{(1-(-1)^{n_1})\cdots (1-(-1)^{n_r})(-1)^m}{n_1^{k_1}\cdots n_r^{k_r}m}\notag\\
+\,&\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r\le 2N} \frac{(1-(-1)^{n_1})\cdots (1-(-1)^{n_r})}{n_1^{k_1}\cdots n_r^{k_r}}
\left(\frac{1-(-1)^{n_r}}{n_r+1}\right). \label{equ:tintInductionStep}
\end{align}
By partial fraction decomposition
\begin{equation*}
\frac1{n^k(n+1)}=\sum\limits_{n=1}^\inftym_{j=2}^k \frac{(-1)^{k-j}}{n^j}-(-1)^k\left(\frac{1}{n}-\frac{1}{n+1}\right)
\end{equation*}
setting $n=n_r$, $k=k_r+1$ and taking $N\widetilde{t}o\infty$ in the above, we may assume $k_r=1$ without loss of generality.
Now if $r=1$ then by \eqref{equ:tintInductionStep}
\begin{equation*}
c_N = 2\sum\limits_{n=1}^\inftym_{0<n<m\le 2N}\frac{(-1)^{n+m}-(-1)^m}{nm} + \sum\limits_{n=1}^\inftym_{0<n\le 2N} \frac{(1-(-1)^n)^2}{n(n+1)}.
\end{equation*}
Hence
\begin{equation}\label{equ:tIntr=1}
\int_0^1 t(1,1;x)\,dx=\frac14 \big(2\zetaeta(\bar1,\bar1)-2\zetaeta(1,\bar1)+4\log 2\big)=\log 2-\frac14 \zetaeta(2)
\end{equation}
by \cite[Proposition 14.2.5]{Z2016}. If $r>1$ then
\begin{align*}
\sum\limits_{n=1}^\inftym_{n=m}^{2N} \frac{1-(-1)^{n}}{n(n+1)}
= \sum\limits_{n=1}^\inftym_{n=m}^{2N} \frac{1}{n(n+1)} - \sum\limits_{n=1}^\inftym_{n=m}^{2N} \frac{(-1)^{n}}{n}+\sum\limits_{n=1}^\inftym_{n=m}^{2N} \frac{(-1)^{n}}{n+1}
= \frac{1+(-1)^{m}}{m} - 2\sum\limits_{n=1}^\inftym_{n=m}^{2N} \frac{(-1)^{n}}{n}.
\end{align*}
Taking $m=n_{r-1}+1$ and $n=n_r$ in \eqref{equ:tintInductionStep} we get
\begin{align*}
c_N =\,&- 2\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le 2N} \frac{(1-(-1)^{n_1})\cdots (1-(-1)^{n_r})(-1)^m}{n_1^{k_1}\cdots n_r^{k_r}m} \\
+\,&2\sum\limits_{n=1}^\inftym_{j=2}^{k_r} (-1)^{k_r-j} \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le 2N} \frac{(1-(-1)^{n_1})\cdots (1-(-1)^{n_r})}{n_1^{k_1}\cdots n_{r-1}^{k_{r-1}}n_r^{j}} \\
-\,& 4(-1)^{k_r} \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<2N} \frac{(1-(-1)^{n_1})\cdots (1-(-1)^{n_{r-1}})}{n_1^{k_1}\cdots n_{r-1}^{k_{r-1}}}
\left(\frac{1}{n_{r-1}+1} - \sum\limits_{n=1}^\inftym_{n_r=n_{r-1}+1}^{2N} \frac{(-1)^{n_r}}{n_r} \right).
\end{align*}
Here, when $r=1$ the last line above degenerates to $4(-1)^{k_r} \sum\limits_{n=1}^\inftym_{n_1=1}^{2N} \frac{(-1)^{n_1}}{n_1}$.
Taking $N\widetilde{t}o\infty$ and using induction on $r$, we see that the claim for
$\int_0^1 t(\bfk_r,1;x)\,dx$ in the theorem follows.
The computation of $\int_0^1 L(\bfk_r,1;x)\,dx$ is completely similar to that of $\int_0^1 t(\bfk_r,1;x)\,dx$. Thus we can get
\begin{align*}
\int_0^1 L(\bfk_r,1;x)\,dx=
\,& \frac{1}{2^r} \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m} \frac{(1+(-1)^{n_1})\cdots (1+(-1)^{n_r})(-1)^m}{n_1^{k_1}\cdots n_r^{k_r}m} \\
+\,&\frac{1}{2^r} \sum\limits_{n=1}^\inftym_{j=2}^{k_r} (-1)^{k_r-j} \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r } \frac{(1+(-1)^{n_1})\cdots (1+(-1)^{n_r})}{n_1^{k_1}\cdots n_{r-1}^{k_{r-1}} n_r^{j}} \\
-\,& \frac{2(-1)^{k_r}}{2^r} \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r} \frac{(1+(-1)^{n_1})\cdots (1+(-1)^{n_{r-1}})(-1)^{n_r}}{n_1^{k_1}\cdots n_{r-1}^{k_{r-1}} n_r}\\
-\,& \frac{2(-1)^{k_r}}{2^r} \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r} \frac{(1+(-1)^{n_1})\cdots (1+(-1)^{n_{r-1}})}{n_1^{k_1}\cdots n_{r-1}^{k_{r-1}}(n_{r-1}+1)}.
\end{align*}
Here, when $r=1$ the last line above degenerates to $-(-1)^{k_1}$.
So by induction on $r$ we see that the claim for $\int_0^1 L(\bfk_r,1;x)\,dx$ is true.
Similarly, if $n=2$ then we can apply the same technique as above to get
\begin{equation*}
\int_0^1 \frac{L(k_1,\dotsc,k_r,1;x)}{x^2}\,dx
=\int_0^1 \frac{1}{2^{k_1+\dots+k_r+1}} \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m} \frac{x^{2m-2}\,dx}{n_1^{k_1}\cdots n_r^{k_r}m}
=\frac{\lim_{N\widetilde{t}o \infty} d_N}{2^{r+1}}
\end{equation*}
where
\begin{align*}
d_N=\,& \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le N} \frac{2^{r+1}}{(2n_1)^{k_1}\cdots (2n_r)^{k_r}2m(2m-1)} \\
=\,& \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le 2N} \frac{(1+(-1)^{n_1})\cdots (1+(-1)^{n_r})}{n_1^{k_1}\cdots n_r^{k_r}}
\left(\frac{1+(-1)^m}{m-1}-\frac{1+(-1)^m}{m}\right) \\
=\,& \sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le 2N} \frac{(1+(-1)^{n_1})\cdots (1+(-1)^{n_r})}{n_1^{k_1}\cdots n_r^{k_r}}
\left[\left(\frac{1-(-1)^m}{m}-\frac{1+(-1)^m}{m}\right) \right. \\
\,& \hskip7cm + \left. \left(\frac{1+(-1)^m}{m-1}-\frac{1-(-1)^m}{m}\right)\right]\\
=\,&- 2\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le 2N} \frac{(1+(-1)^{n_1})\cdots (1+(-1)^{n_r})(-1)^m}{n_1^{k_1}\cdots n_r^{k_r}m}\\
+\,&\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<2N} \frac{(1+(-1)^{n_1})\cdots (1+(-1)^{n_r})}{n_1^{k_1}\cdots n_r^{k_r}}
\sum\limits_{n=1}^\inftym_{m=n_r+1}^{2N} \left(\frac{1+(-1)^m}{m-1}-\frac{1-(-1)^m}{m}\right) \\
=\,& -2\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le 2N} \frac{(1+(-1)^{n_1})\cdots (1+(-1)^{n_r})(-1)^m}{n_1^{k_1}\cdots n_r^{k_r}m}\\
+ \,&\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<2N} \frac{(1+(-1)^{n_1})\cdots (1+(-1)^{n_r})}{n_1^{k_1}\cdots n_r^{k_r}}
\left(\frac{1-(-1)^{n_r}}{n_r}\right) \\
=\,& -2\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m\le 2N} \frac{(1+(-1)^{n_1})\cdots (1+(-1)^{n_r})(-1)^m}{n_1^{k_1}\cdots n_r^{k_r}m}\\
\widetilde{t}o \,& -2\sum\limits_{n=1}^\inftym_{\eps_j=\pm1,1\le j\le r} \zetaeta(k_1,\dotsc,k_r,1;\eps_1,\dotsc,\eps_r,-1)
\end{align*}
as $N\widetilde{t}o\infty$. Hence,
\begin{equation} \label{equ:LintInductionStep}
\int_0^1 \frac{L(k_1,\dotsc,k_r,1;x)}{x^2}\,dx
=\frac{-1}{2^r}\sum\limits_{n=1}^\inftym_{\eps_j=\pm1,1\le j\le r} \zetaeta(k_1,\dotsc,k_r,1;\eps_1,\dotsc,\eps_r,-1).
\end{equation}
By exactly the same approach as above, we find that
\begin{align} \notag
\int_0^1 \frac{t(k_1,\dotsc,k_r,1;x)}{x^2}\,dx
=&\,\frac{-1}{2^r}\sum\limits_{n=1}^\inftym_{0<n_1<\dots<n_r<m } \frac{(1-(-1)^{n_1})\cdots (1-(-1)^{n_r})(-1)^m}{n_1^{k_1}\cdots n_r^{k_r}m}\\
=&\,\frac{-1}{2^r}\sum\limits_{n=1}^\inftym_{\eps_j=\pm1,1\le j\le r} \eps_1\cdots\eps_r\zetaeta(k_1,\dotsc,k_r,1;\eps_1,\dotsc,\eps_r,-1). \label{equ:tintoverx2}
\end{align}
More generally, for any larger values of $n$ we may use the partial fraction technique and similar argument as above to express
the integrals in Theorem~\eqref{thm:LandtIntegrals} as $\mathbb{Q}$-linear combinations of alternating MZVs.
So we leave the details to the interested reader.
This finishes the proof of the theorem.
\end{proof}
\begin{exa}
For positive integer $k>1$, by the computation in $n=0$ case in the proof the Theorem~\ref{thm:LandtIntegrals} we get
\begin{align*}
\int_0^1 t(k,1;x)\,dx
=\, & \frac12 \big(\zetaeta(\bar{k},\bar1)-\zetaeta(k,\bar1) \big) -(-1)^k \log 2+\frac12\sum\limits_{n=1}^\inftym_{j=2}^k (-1)^{k-j}\big(\zetaeta(j)-\zetaeta(\bar{j})\big),\\
\int_0^1 L(k,1;x)\,dx =\, & \frac12 \big(\zetaeta(\bar{k},\bar1)+\zetaeta(k,\bar1) \big)-(-1)^k +(-1)^k \log 2+\frac12\sum\limits_{n=1}^\inftym_{j=2}^k (-1)^{k-j}\big(\zetaeta(j)+\zetaeta(\bar{j})\big) \\
=\, & \frac12 \big(\zetaeta(\bar{k},\bar1)+\zetaeta(k,\bar1) \big)-(-1)^k +(-1)^k \log 2+\sum\limits_{n=1}^\inftym_{j=2}^k \frac{(-1)^{k-j}}{2^j} \zetaeta(j) .
\end{align*}
\end{exa}
\begin{exa}
For positive integer $k>1$, we see from \eqref{equ:LintInductionStep} and \eqref{equ:tintoverx2} that
\begin{align}\label{equ:Lr=1}
\int_0^1 \frac{L(k,1;x)}{x^2}\,dx=&\, -\frac12\big( \zetaeta(k,\bar1)+\zetaeta(\bar{k},\bar1)\big),\\
\int_0^1 \frac{t(k,1;x)}{x^2}\,dx=&\, \frac12\big( \zetaeta(k,\bar1)-\zetaeta(\bar{k},\bar1)\big). \notag
\end{align}
Taking $r=2$ in \eqref{equ:LintInductionStep} and \eqref{equ:tintoverx2} we get
\begin{align*}
\int_0^1 \frac{L(k_1,k_2,1;x)}{x^2}\,dx=-\frac14\big(\zetaeta(k_1,k_2,\bar1)+
\zetaeta(k_1,\bar{k_2},\bar1)+\zetaeta(\bar{k_1},k_2,\bar1)+\zetaeta(\bar{k_1},\bar{k_2},\bar1)\big),\\
\int_0^1 \frac{t(k_1,k_2,1;x)}{x^2}\,dx=-\frac14\big(\zetaeta(k_1,k_2,\bar1)-
\zetaeta(k_1,\bar{k_2},\bar1)-\zetaeta(\bar{k_1},k_2,\bar1)+\zetaeta(\bar{k_1},\bar{k_2},\bar1)\big).
\end{align*}
\end{exa}
\begin{re}
It is possible to give an induction proof of Theorem~\ref{thm:LandtIntegrals} using the regularized values of MMVs as
defined by \cite[Definition 3.2]{YuanZh2014b}. However,
the general formula for the integral of $L(\bfk,1;x)$ would be implicit. To illustrate the idea for computing $\int_0^1 t(\bfk,1;x)\,dx$, we consider the case $\bfk=k\in \mathbb{N}$.
Notice that
\begin{align*}
\sum\limits_{n=1}^\inftym_{0<m<n<N} \frac{1}{(2m-1)(2n-1)2n}=&\, \sum\limits_{n=1}^\inftym_{\sum\limits_{n=1}^\inftybstack{0<m<n<2N\\ m,n\ \widetilde{t}ext{odd}}}
\frac{1}{m}\left(\frac{1}{n}-\frac{1}{n+1} \right) \\
=&\, \sum\limits_{n=1}^\inftym_{\sum\limits_{n=1}^\inftybstack{0<m<n<2N\\ m,n\ \widetilde{t}ext{odd}}} \frac{1}{mn} -
\sum\limits_{n=1}^\inftym_{\sum\limits_{n=1}^\inftybstack{0<m<n\le 2N\\ m \ \widetilde{t}ext{odd}, n \ \widetilde{t}ext{even}}} \frac{1}{mn}+
\sum\limits_{n=1}^\inftym_{\sum\limits_{n=1}^\inftybstack{0<m<2N\\ m \ \widetilde{t}ext{odd} }} \frac{1}{m(m+1)}.
\end{align*}
By using regularized values, we see that
\begin{align*}
\int_0^1 t(1,1;x)\,dx =\, & \sum\limits_{n=1}^\inftym_{0<m<n} \frac{1}{(2m-1)(2n-1)2n}=\frac14\big(M_*(\breve{1},\breve{1})-M_*(\breve{1},1)\big)
+\log 2.
\end{align*}
We have
\begin{equation*}
M_*(\breve{1},\breve{1})=\frac12\big(M_*(\breve{1})^2-2M_*(\breve{2})\big)=\frac12\big((T+2\log 2)^2-2M(\breve{2})\big),
\end{equation*}
Since $2M(\breve{2})=3\zetaeta(2)$,
\begin{equation*}
M_\shuffle(\breve{1},\breve{1})= \frac12\rho\big((T+2\log 2)^2-3\zetaeta(2)\big)=\frac12\big((T+\log 2)^2-2\zetaeta(2)\big)
\end{equation*}
by \cite[Theorem 2.7]{Z2016}. On the other hand,
\begin{equation*}
\rho\big(M_*(\breve{1},1)\big)=M_\shuffle(\breve{1},1)= \frac12 M_\shuffle(\breve{1})^2=\frac12(T+\log 2)^2.
\end{equation*}
Since $\rho$ is an $\mathbb{R}}\def\pa{\partial$-linear map,
\begin{equation*}
\int_0^1 t(1,1;x)\,dx =\log 2+\frac14\rho\big(M_*(1,\breve{1})-M_*(1,1)\big)=\log 2-\frac14\zetaeta(2).
\end{equation*}
which agrees with \eqref{equ:tIntr=1}.
\end{re}
Similarly, by considering some related integrals we can establish many relations
involving multiple $t$-star values. For example, from \eqref{d3} we have
\begin{align*}
\int_0^1 x^{2n-2} L(k_1,k_2;x)dx&=\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1} \frac{(-1)^{j-1}}{(2n-1)^j}L(k_1,k_2+1-j)+\frac{(-1)^{k_2}}{(2n-1)^{k_2}} \int_0^1 \frac{L(k_1,1;x)}{x^2}dx\\
&\quad-\frac{(-1)^{k_2}}{(2n-1)^{k_2}} \sum\limits_{n=1}^\inftym_{j=1}^{k_1-1} (-1)^{j-1} L(k_1+1-j)t^\star_n(j)\\
&\quad-\frac{(-1)^{k_1+k_2}}{(2n-1)^{k_2}} \log(2)t^\star_n(k_1)+\frac{(-1)^{k_1+k_2}}{(2n-1)^{k_2}}t^\star_n(1,k_1).
\end{align*}
Hence, considering the integral $\int_0^1 \frac{{\rm A}(l;x)L(k_1,k_2;x)}{x}\,dx$ or $\int_0^1 \frac{t(l;x)L(k_1,k_2;x)}{x}\,dx$, we can get the following theorem.
\begin{thm} For positive integers $k_1,k_2$ and $l$,
\begin{align}
&\sum\limits_{n=1}^\inftym_{j=1}^{k_2-1} (-1)^{j-1} L(k_1,k_2+1-j)T(l+j)+(-1)^{k_2}T(k_2+l) \int_0^1 \frac{L(k_1,1;x)}{x^2}\,dx\nonumber\\
&-(-1)^{k_2} 2\sum\limits_{n=1}^\inftym_{j=1}^{k_1-1}(-1)^{j-1} L(k_1+1-j)t^\star(j,k_2+l)\nonumber\\
&-(-1)^{k_1+k_2}2\log(2)t^\star(k_1,k_2+l)+(-1)^{k_1+k_2}2t^\star(1,k_1,k_2+l)\nonumber\\
&=\frac{1}{2^{k_1+k_2}} \sum\limits_{n=1}^\inftym_{j=1}^{l-1} \frac{(-1)^{j-1}}{2^j} T(l+1-j)\zeta(k_1,k_2+j)-\frac{(-1)^l}{2^{k_1+k_2+l}} \sum\limits_{n=1}^\infty \frac{\zeta_{n-1}(k_1)T_n(1)}{n^{k_2+l}},
\end{align}
where $\int_0^1 \frac{L(k_1,1;x)}{x^2}\,dx$ is given by \eqref{equ:Lr=1}.
\end{thm}
{\bf Acknowledgments.} The first author is supported by the Scientific Research
Foundation for Scholars of Anhui Normal University and the University Natural Science Research Project of Anhui Province (Grant No. KJ2020A0057).
{\small
}
\end{document} |
\begin{document}
\title{Quantum Correlations in Cavity QED Networks}
\author{Miguel Orszag$^{1,*}$, Nellu Ciobanu$^{1}$, Raul Coto$^{1}$, Vitalie Eremeev$^{2}$ }
\affiliation
{$^{1}$Facultad de F\'{i}sica, Pontificia Universidad Cat\'{o}lica de Chile, Casilla 306, Santiago, Chile\\
$^{2}$Facultad de Ingenier\'{i}a, Universidad Diego Portales, Santiago, Chile}
\email{* Email:morszag@fis.puc.cl}
\begin{abstract}
We present a Review of the dynamical features such as generation, propagation, distribution, sudden transition and freezing of the various quantum correlation measures, as Concurrence, Entanglement of Formation, Quantum Discord, as well their geometrical measure counterparts within the models of Cavity Quantum Electrodynamics Networks.
Dissipation and thermal effects are discussed both in the generation of quantum correlations as well as their effect on the sudden changes and freezing of the classical and quantum correlations in a cavity quantum electrodynamical network.
For certain initial conditions, double transitions in the Bures geometrical discord are found. One of these transitions tends to disappear at a critical temperature.
\end{abstract}
\pacs{03.67.Bg, 03.65.Yz, 03.67.Lx, 03.67.Mn}
\maketitle
\section{Introduction}
Quantum correlations play a fundamental role in quantum computation and quantum information processing \cite{Nielsen, Modi12, Horodecki}, where entanglement is usually considered a popular measure of such correlations. Entanglement (verschr\"ankung) introduced in physics originally by Schr\"odinger \cite{Schrodinger} and considered a native feature of the quantum world, is the most outstanding and studied phenomenon to test the fundamentals of quantum mechanics, as well as an essential engineering tool for the quantum communications. However,
entanglement is a property that is hard to reach technologically and even when achieved, it is a very unstable quantum state, vulnerable
under the effects of decoherence, any dissipative process as a result of the coupling to environment. Conventionally these effects
are considered mainly destructive for entanglement, nevertheless some recent studies of this subject attest results different from the
common conviction, even appearing as counterintuitive at first glance \cite{Cirac11, Sorensen, Memarzadeh}.
An alternative approach to measure the entire correlations in a quantum system was suggested originally in Refs. \cite{Vedral, Zurek}. By using the concepts of mutual information and quantum discord (QD) the quantum correlations may be distinguished from the classical ones.
Further, the QD could be compared to the entanglement of formation (E) \cite{Wootters} or relative entropy of entanglement (REE) \cite{Vedral97} in order to find out if the system is in a quantum inseparable state (entangled), or in a separable state with quantum correlations, such as QD \cite{Luo, Alber, Lu, Fanchini}. In the last few years, some alternative measures of the QD were proposed and studied intensively. These measures are related basically to the entropic and geometric quantities of QD-like correlations, for an exhaustive review see Ref. \cite {Modi12}, where the most common non-classical correlations are: quantum dissonance proposed in \cite{Modi10}; geometric QD (GQD) based on the trace norm as Hilbert-Schmidt distance \cite{Dakic10}, Schatten p-norm \cite{Debara} and Bures distance \cite{Spehner, Aaronson2013}, \textit{etc.}. The QD has shown to capture non-classical correlations including the completely separable systems, e.g., deterministic quantum computation with one quantum bit (DQC1) model \cite{Datta08}.
In this Review we propose to analyze broadly the phenomena such as generation, propagation, distribution and measurement of quantum and classical correlations in a particular kind of quantum systems known as cavity quantum electrodynamics (CQED) networks which deal with atoms placed in cavities interconnected by fibers, in the framework of the
physical model suggested in Ref. \cite{Cirac97}, which attracted a high interest for quantum information applications and subsequently
developed from different aspects \cite{Pellizzari, Mancini, Serafini, Zheng, Ritter}. The inclusion of the interaction of the quantum system (atoms + fields) with the environment plays an important role in physics, implying a more realistic picture because the dissipation is always present in the real devices. The entire system is considered open because of the leakage of the electromagnetic field from the cavities and fiber into their own reservoirs. We initiated our investigations few years ago by proposing a 2-node CQED network: two atoms (qubits) each one trapped in a cavity and interconnected by fiber, with coupling of each quantum subsystem to the individual Markovian thermal environment, see Fig.1 in \cite{Mont}. In this system, in the approximation of one excitation, for all the reservoirs at zero temperature we found the effect of generation and oscillation in time of the entanglement measured by the concurrence as shown in Figs. 3-4 in \cite{Mont} for an initially separable state and sharing one excitation between the two qubits. Also, in the same work, the possibility of preservation of the entanglement at its maximal value for a period of time even in the presence of losses was proposed, by managing the atom-cavity detuning as shown in Figs. 5-8 of \cite{Mont}. Next we investigated the influence of the reservoirs' temperature on the classical and quantum correlations. For example, in the paper \cite{MEO} we have shown that for the initial states similar to the ones considered in \cite{Mont} it is possible to stimulate the enhancement of the maximal entanglement by the thermal reservoirs up to a particular "critical" temperature, beyond which the entanglement starts decreasing, as shown in Fig. 3 in \cite{MEO}. Inspired by this effect of entanglement gain by the thermal environments in the given CQED network, we developed our study further. So, in \cite{EMO} we demonstrated that it is possible, for a two-qubit system, initially in zero-excitation state, to generate long-lived quantum correlations as entanglement and quantum discord with the assistance of thermal environments, see Figs. 2-5 in \cite{EMO}, and the optimal situation is found using the fiber thermal reservoir, as shown in Fig. 6, ibid. Hence, we came to the conclusion that it is possible to generate atomic quantum correlations in CQED networks with dissipation channels by the processes of absorption and emission, i.e. exchanging excitations with the thermal reservoirs.
The propagation of quantum correlations, over the past decades, have captured the attention of many researchers due to it`s powerful applications in a wide range of physics \cite {Nielsen}. Cavity QED networks, are particularly convenient for the creation and propagation of these correlations.
There are several ways to build a system for quantum computation or communication, depending on the distribution of the cavities, the way these are coupled together, boundary conditions, etc. The most typical is a chain of cavities, see (Fig. \ref{fig1}).
\begin{figure}
\caption{Array of three Cavity-Atom Systems.}
\label{fig1}
\end{figure}
We can in principle, couple neighboring cavities in two ways, either via an optical fiber \cite{Pellizzari, Mancini, Serafini, Mont, EMO, zhong, Zhang, mancini} or by tunnel effect \cite{cavities,angelakis1,angelakis2}. In the latter, the cavities need to be close enough so that photon hopping can occur due to the overlap of the spatial profile of the cavity modes. The former type of coupling becomes important mainly when large distance needs to be covered, e.g., quantum communication between two distant nodes in the network, for experiment see \cite{Ritter}. Also, it may be useful in generating photon phases when going from one cavity to the other \cite{Serafini}.
Multipartite Quantum Correlations is one of the most relevant indicator of the quantumness of a system in many body systems. This remarkable feature is in general difficult to characterize and the known definitions are hard to measure. Besides the efforts dedicated to solve this problem, the question of which is the best approach remains open.
Many attempts of extending the bipartite correlations to the multipartite case have been made \cite{wootters, multipostulates,woottersGeneral,coefficientmatrix,zambrini,fanchini,witness,sarandy}, but still questions remain about these generalizations. The first approach was the \textit{Tangle} \cite{wootters}, which is related to the entanglement. In that paper, the authors argue that unlike classical correlations, quantum entanglement cannot be freely shared among many objects. For example, in the case of three partite system, labeled as ``$1$", ``$2$" and `$3$"; the amount of entanglement that the first system can share with the two others, must satisfy the inequality:
\begin{equation}\label{relation}
\mathit{C}_{12}^2 + \mathit{C}_{13}^2\leq 4\det[\rho_1]
\end{equation}
with $\rho_1=tr_{23}[\rho_{123}]$. The above equation can be rewritten as $\mathit{C}_{12}^2 + \mathit{C}_{13}^2\leq \mathit{C}_{1(23)}^2$, for the case of pure states. Then, it is defined the quantity,
\begin{equation}\label{tangle}
\tau_{123}=\mathit{C}_{1(23)}^2-\mathit{C}_{12}^2 - \mathit{C}_{13}^2
\end{equation}
This residual entanglement represents a collective property of the three qubits that is unchanged by permutations; it
measures an essential three-qubit entanglement. In words, the entanglement of ``$1$" with ``$23$" can be manifested
in three forms, entanglement with ``$2$", entanglement with ``$3$", and an essential three-way entanglement.
These three forms must share the total entanglement.
For the case of mixed states $\rho$, $\mathit{C}_{1(23)}(\rho)$ is not defined. However, one can consider all possible pure-state decompositions of the state $\rho$, that is, all sets $\lbrace (\psi_i,p_i)\rbrace$ such that $\rho=\sum_i p_i\vert \psi_i\rangle\langle\psi_i\vert$. For each of these decompositions, one can compute the average value $\langle\mathit{C}_{1(23)}^2\rangle(\rho)=\sum_i p_i\mathit{C}_{1(23)}^2(\psi_i)$. Then, with the minimum of this average over all decompositions of $\rho$, the analogue of Eq.(\ref{tangle}) for mixed state will be;
\begin{equation}\label{tanglemix}
\tau_{123}(\rho)=(\mathit{C}_{1(23)}^2)^{min}-\mathit{C}_{12}^2 - \mathit{C}_{13}^2
\end{equation}
This task is usually computational expensive, but there are some good approximations \cite{upperbound,lowerbound,quasipure,Davidovich}. The first two references correspond to an upper and lower bound respectively. The main idea is to narrow down the values of the tangle with these bounds to get closer to the real value. Before explaining both methods, it is important notice that the term in the right side of Eq.(\ref{relation}) can be rewritten such that $\mathit{C}_{1(23)}^2=2(1-tr[\rho_1^2])$ \cite{buzek}. This form is more convenient as we will see next. The upper bound is found by taking just a pure state, which means using Eq.(\ref{tangle}) as if the system was pure. The lower bound comes from computing $\mathit{C}_{1(23)}^2(\rho)=2(tr[\rho^2]-tr[\rho_1^2])$, where $tr[\rho^2]$ is the purity of the total system.
In the reference \cite{quasipure}, the authors did a quasi-pure approximation, but their procedure is not as simple as the one used in \cite{upperbound,lowerbound}. In order to find an exact solution, a conservation law for the distributed entanglement of formation and quantum discord has been found \cite{winter,caldeira}. This method, which works fine only for small dimensional systems, it is well explained in reference \cite{fanchini}.
Another kind of interesting effects observed in quantum open systems are related to the unusual dynamics of the classical and quantum decoherence originally reported in \cite{Maziero2009, Mazzola2010} and confirmed experimentally in \cite{Xu, Auccaise2011}, hence stimulating a high interest in the investigation of the phenomena of sudden changes in the correlations for different physical systems. During the last years, intensive efforts have focused to explain the nature of the sudden transitions and freezing effects of the quantum correlations and the conditions under which such transitions occur. Also, from the perspective of the applications, how efficiently one could engineer these phenomena in quantum technologies. As has been shown in the studies \cite{Maziero2009, Auccaise2011, Xu, Pinto2013, He2013, Mazzola2010, Lo Franco2012, You2012, Aaronson2013}, the puzzling peculiarities of the sudden transitions and freezing phenomena are hidden in the structure of the density operator during the whole evolution of a bipartite quantum system for particular decoherence processes. Nevertheless, important questions remain open - how these fascinating effects are affected by the presence of the noisy environments and if there are efficient mechanisms to control them in both non dissipative or dissipative decoherence models. The state-of-the-art research of CQED networks has shown so far a modest progress on the influence of the environments on the the sudden transitions and freezing phenomena \cite{He2013}, with very little research on the influence of thermal baths in such quantum open systems. Motivated by this interest in the field, very recently in \cite{ECO} we presented some novel results concerning the sudden transitions and freezing effects of the quantum correlations for the same CQED network as in Fig. \ref{fig1ol}, but developed for many excitations in the whole system and including the environments. We have shown that the detrimental effect of the thermal reservoirs on the freezing of correlations can be compensated via an efficient coupling of the fiber connecting the two cavities. Furthermore, for certain initial conditions, a double sudden transition in the dynamics of the Bures geometrical quantum discord was observed. Similar double transitions were reported for Schatten one-norm geometric quantum correlations (GQD-1) in \cite {Montealegre2013, Paula2013}. In our system, the second transition tends to disappear at a critical temperature, hence freezing the discord. We concluded that by controlling the dissipation mechanisms it is possible to engineer sudden changes and freezing periods in the temporal evolution of the quantum correlations with multiples effects which can find practical applications. This kind of critical thermal effects appear in CQED networks as well as other systems \cite {Werlang, Hu}
\section{Classical and Quantum Correlations}
\textbf{Entanglement of formation}
\newline For a given ensemble of pure states $\left\{ p_{i},\mid \psi _{i}\rangle \right\} $, the entanglement
of formation is the average entropy of entanglement over a set of states that minimizes this average over all possible decompositions of $\rho$,
\cite{BENNETT96}.
\begin{equation}
E(\rho )=\min{\sum_{i}p_{i}E(\psi _{i})},
\end{equation}
where the entanglement $E(\psi )$ is defined as the von Neumann entropy of
either one of the subsystems $E(\psi _{A/B} )=S(\rho _{A/B})$, with $
S(\rho )=-tr[\rho \log _{2}\rho ]$. However, it is very difficult to know which ensemble $\{p_{i},\psi _{i}\}$
is the one that minimizes the average. A concept closely related to the
entanglement of formation is the concurrence \cite{Wootters, WOOTTERS97}.
\newline
For a general mixed state $\rho _{AB}$ of two qubits, we define $\widetilde{
\rho }$ to be the spin-flipped state $\widetilde{\rho }_{AB}=(\sigma
_{y}\otimes \sigma _{y})\rho _{AB}^{\ast }(\sigma _{y}\otimes \sigma _{y})$
where $\rho ^{\ast }$ is the complex conjugate of $\rho $ and $\sigma _{y}$
is the Pauli matrix. The concurrence is defined as
\begin{equation}
C(\rho )=\text{max}\{0,\lambda _{1}-\lambda _{2}-\lambda _{3}-\lambda
_{4}\},
\end{equation}
where $\{\lambda _{i}\}$ are the square roots in decreasing order of the
eigenvalues of the non-hermitian matrix $\rho \widetilde{\rho }$. \newline
Finally, the entanglement of formation is related to concurrence as follows
\begin{equation}
E(\rho )=H \left( \frac{1}{2}+\frac{1}{2}\sqrt{1-C^2} \right)
\label{enta}
\end{equation}
with $H(x)=-x\log _{2}x-(1-x)\log _{2}(1-x)$. \newline The entanglement vanishes for a \emph{separable} state, defined as
\begin{equation}
\rho =\sum_{\scriptscriptstyle{i}}p_{\scriptscriptstyle{i}}{\rho _{\scriptscriptstyle{i}}}^{\scriptscriptstyle{A}}\otimes {\rho _{\scriptscriptstyle{i}}}^{\scriptscriptstyle{B}}
\end{equation}
and it is equal to one for maximally entangled states.
\textbf{Quantum Discord}
The total correlations of a quantum system are
quantified by the quantum mutual information $I(\rho )=S(\rho ^{A})+S(\rho ^{B})-S(\rho )$. The total amount of correlations can be separated into
classical and quantum correlations $I(\rho )=C(\rho )+Q(\rho )$. In search of a formula for measuring the classical correlations, Henderson and Vedral proposed a list
of conditions that the measure of classical correlations must satisfy \cite{Vedral}. Correspondingly they proposed a quantifier that fulfilled all the conditions, so the classical correlations are measured as
\begin{equation}
C(\rho ^{AB})=\displaystyle\max_{\{B_{k}\}}{[S(\rho ^{A})-S(\rho^{AB} |\{B_{k}\})]
} \label{clasi}
\end{equation}
with the quantum conditional entropy of A defined as $S(\rho^{AB} |\{B_{k}\})=
\displaystyle\sum_k{p_{k}S(\rho _{k})}$, where $\{\rho _{k},p_{k}\}$
is the ensemble of all possible results for the outcome from the set of von
Neumann measurements $\{B_{k}\}$ made in the subsystem $B$. Also $\rho _{k}=(I\otimes B_{k})\rho (I\otimes B_{k})/p_{k}$ is the state of the system after a measurement, where $p_{k}=tr(I\otimes B_{k})\rho (I\otimes
B_{k})$ is the probability for obtaining the outcome $k$ after the measurement. The maximization in Eq(\ref{clasi}) is done over all possible
measurements of B, which implies to look for the measurement that disturbs the least the overall quantum state. \newline With this definition for
the classical correlation, we get the Quantum Discord as $QD(\rho )=I(\rho )-C(\rho )$. For pure states, this formula coincides with entanglement
of formation. \newline A problem with the QD is that it is asymmetrical with respect to which part of the bi-partite system is measured. However
it becomes symmetrical for particular systems with $S(\rho ^{A})=S(\rho ^{B})$.
\newline
\textbf{Geometric quantum discord (GQD) and geometric entanglement (GE)}
In this Review we will use the calculations of GQD for two-qubit states with maximally mixed marginals (Bell-diagonal) as in Eq.(\ref{rhoBD}) measured by the Bures distance, which is the minimal geometric distance of a quantum state of a bipartite system AB to the set of classical states for subsystem A \cite{Spehner}. Based on this reference, we present here the main formulas used in our computations of the Bures GQD quantified by the normalized quantity $\tilde{D}_A$ as follows
\begin{equation}
\widetilde{D}_A(\rho) = \left(1-\frac{1} {\sqrt{2}}\right)^{-1} \left(1-\sqrt{\frac{1+b_{\text{max}}}{2}}\right)
\label{BuresQD}
\end{equation}
with
\begin{eqnarray}
b_{\text{max}}=\frac{1}{2} \text{max} \big \{ \sqrt{(1+c_1)^2-(c_2-c_3)^2}+\sqrt{(1-c_1)^2-(c_2+c_3)^2}, \nonumber \\
\sqrt{(1+c_2)^2-(c_1-c_3)^2}+\sqrt{(1-c_2)^2-(c_1+c_3)^2}, \nonumber \\
\sqrt{(1+c_3)^2-(c_1-c_2)^2}+\sqrt{(1-c_3)^2-(c_1+c_2)^2} \big \},
\label{bmax}
\end{eqnarray}
and the Bell-diagonal (BD) density matrix is defined as
\begin{equation}
\rho_{BD}=[I\otimes I+\vec{c} \cdot (\vec{\sigma} \otimes \vec{\sigma})]/4=\frac{1}{4}\begin{pmatrix} 1+c_3 &0&0&c_1-c_2\\ 0& 1-c_3& c_1+c_2&0\\ 0& c_1+c_2& 1-c_3&0\\ c_1-c_2&0&0&1+c_3 \end{pmatrix},
\label{rhoBD}
\end{equation}
where $\vec{\sigma}=(\sigma_1, \sigma_2, \sigma_3)$ is the vector given by Pauli matrices, $I$ is the identity matrix, and the vector $\vec{c}=(c_1, c_2, c_3)$ defines completely the state with $-1 \le c_i \le1$.
Similarly to the entropic entanglement such as entanglement of formation (E) and relative entropy of entanglement (REE) \cite {Modi12, Wootters, Vedral97}, which are used often for comparison to the entropic QD, one can define the geometric measure of entanglement (GE), that for the two qubits case is given by \cite{Spehner}
\begin{equation}
GE(\rho) = 2-\sqrt{2}\left(1+\sqrt{1-C(\rho)^2}\right)^{1/2},
\label{GE}
\end{equation}
where $C(\rho)=\text{max} \{ \vert{c_1-c_2}\vert-1+c_3, \vert{c_1+c_2}\vert-1-c_3, 0 \}/2$ is the Wooters concurrence \cite{Wootters} computed here for the BD matrix. The normalized geometric entanglement is $\widetilde{GE}(\rho) =GE(\rho)/(2-\sqrt{2})$ whose dynamics will be compared to Bures GQD, $\widetilde{D}_A(\rho)$, in the next section.
\section{Generation and criticallity of Correlations}
\subsection{2-node CQED network with dissipations to thermal reservoirs}
We present here the model schematically shown in Fig.\ref{fig1ol} where the two remote qubits (two-level atoms) interact with individual cavity and coupled
by a transmission line (e.g., fiber, waveguide). For simplicity we consider the short fiber limit: only one mode of the fiber
interacts with the cavity modes \cite{Serafini}.
The Hamiltonian of the system under the rotating-wave approximation (RWA) in units of $\hbar$ reads
\begin{align}
H_s &= \omega_f a_3^{\dag} a_3+\sum\nolimits_{j=1}^2 \left( \omega_a S_{j,z}+\omega_0 a^{\dag}_j a_j \right) \nonumber \\
&+ \sum\nolimits_{j=1}^2 \left( g_j S^+_j a_j + J a_3 a^{\dag}_j + H.c.\right),
\label{Ham}
\end{align}
where $a_1(a_2)$ and $a_3$ is the boson operator for the cavity 1(2) and the fiber mode, respectively;
$\omega_0$, $\omega_f$ and $\omega_a$ are the cavity, fiber and atomic frequencies, respectively; $g_j (J)$ the atom(fiber)-cavity coupling constants; $S_{z}$, $S^{\pm}$ are the atomic inversion and ladder operators, respectively.
\begin{figure}
\caption{Two atoms trapped in distant coupled cavities. The cavities and transmission line exchange the energy at the rates
$\gamma_1$, $\gamma_2$ and $\gamma_3$ with their baths having the temperatures $T_1$, $T_2$ and $T_3$, respectively.}
\label{fig1ol}
\end{figure}
One of the important advance in our proposal of CQED network model is based on the generalization to large number of excitations in the whole system. To the best of our knowledge, this approach of many excitations in similar systems \cite{Cirac97, Pellizzari, Mancini, Serafini, Zheng} is not common, and may be one of few existent studies. To describe the evolution of an open quantum-optical system usually the approach of the Kossakowski-Lindblad phenomenological master equation is considered with the system Hamiltonian decomposed on the eigenstates of the field-free subsystems. However, sometimes a CQED system is much more realistically modeled based on the microscopic master equation (MME), developed in \cite{Scala, Breuer} where the system-reservoir interactions are described by a master equation with the system Hamiltonian mapped on the atom-field eigenstates, known as dressed states. In our case the system consists in two atoms within their own cavities connected by a fiber and we consider the leakage of the two cavities and the fiber via a coupling to individual external environments, thus identifying three independent dissipation channels. Commonly, in CQED the main sources of dissipation originate from the leakage of the cavity photons due to the imperfect reflectivity of the cavity mirrors. Another mechanism of dissipation corresponds to the spontaneous emission of photons by the atom, however this kind of loss is negligible small in the CQED regime considered in our model, and consequently is neglected. Hence, it is straightforward to bring the Hamiltonian $H_s$ in Eq. (\ref{Ham}) to a matrix representation in the atom-field eigenstates basis. To define a general state of the whole system we use the notation: $\ket{i}=\ket{A_1}\otimes\ket{A_2}\otimes\ket{C_1}\otimes\ket{C_2}\otimes\ket{F} \equiv \ket{A_1A_2C_1C_2F}$, where
${A}_{1,2}$ correspond to the atomic states, that can be $e(g)$ for excited(ground) state, while ${C}_{1,2}$ and ${F}$ define the cavities and fiber states, respectively, which may correspond to $0$, $1$, ..., $n$ photon states. Because the quantum system is dissipative, the excitations may leak to the reservoirs degrees of freedom, hence the ground state of the system, $\ket{0}=\ket{gg000}$, should be also considered in the basis of the states. Therefore, in the case of $N$ excitations in our system, the number of dressed states, $\ket{i}$, having minimum one excitation, i.e excluding the ground state $\ket{0}$, is computed by a simple relation: $d_N=N+2\sum_{k=1}^N k(k+1)$. For example, in case of $N=2$ excitations the Hamiltonian $H_s$ in Eq. (\ref{Ham}) is decomposed in a state-basis of the dimension $1+d_2$, i.e. is a $19\times19$ matrix; for 6 excitations $H_s$ is represented by a $231\times231$ matrix, and so on. Hence it is evident that for large $N$ the general problem becomes hard to solve even numerically. In our work \cite{Mont,MEO,EMO,ECO} we developed the calculations from the simplest case of $N=1$ up to 6 excitations, which is an improvement as compared to some previous works at similar subject, e.g. with two excitations \cite{Serafini}. In the present Review we present some results with $N=9$ excitations.
Considering the above assumptions and following the approach of \cite{Scala, Breuer}, the MME for the reduced density operator $\rho(t)$ of the system is derived
\begin{equation}
\frac {\partial \rho}{\partial t}=-i\left[ H_s,\rho \right]+\mathcal{L}(\bar{\omega}) \rho+\mathcal{L}(-\bar{\omega}) \rho,
\label{MME}
\end{equation}
where the dissipation terms are defined as follows (with $\bar{\omega}>0$)
\begin{equation}
\mathcal{L}(\bar{\omega}) \rho = \sum_{j=1}^3 \gamma_j(\bar{\omega})\bigg(A_j(\bar{\omega})\rho A_j^ \dag (\bar{\omega})- \frac{1}{2} \left[ A_j^
\dag (\bar{\omega}) A_j(\bar{\omega}),\rho \right] _{+} \bigg).
\label{Lind}
\end{equation}
In the above equations the following definitions are considered: $A_j(\bar{\omega}) = \sum_{\bar{\omega}_{\alpha, \beta}} \aket{\alpha}
\abra{\alpha} (a_j + a_j^{\dag}) \aket{\beta}\abra{\beta}$ fulfilling the properties $A_j(-\bar{\omega})= A_j^{\dag}(\bar{\omega})$,
where $\bar{\omega}_{\alpha, \beta} = \Omega_{\beta} - \Omega_{\alpha}$ with $\Omega_k$ as an eigenvalue of Hamiltonian $H_s$ and its corresponding eigenvector $\aket{k}$, denoting the \textit{k}-th dressed-state (see Fig.\ref{figDS}).
\begin{figure}
\caption{The schematic representation of the transitions in the space of dressed states of the system Hamiltonian Eq. (\ref{Ham}
\label{figDS}
\end{figure}
In order to solve such a MME we will use the numerical simulations, because in the most general case there is almost impossible to find the analytic solution for the eigenvalue equation based on Hamiltonian $H_s$ (\ref{Ham}). In the following we develop the equation for the density operator $\rho(t)$ mapped on the eigenstates basis, $\bra{\phi_m}\rho(t)\ket{\phi_n} = \rho_{mn}$ for the case of $N$ excitations in the system
\begin{eqnarray}
\dot{\rho}_{mn}= - i \bar{\omega}_{n,m} \rho_{mn} + \sum_{k=1}^{d_N} \big[ \frac{\gamma_{k \to 0}}{2} \big( 2\delta_{m0}\delta_{0n} \rho_{kk} - \delta_{mk}\rho_{kn} - \delta_{kn} \rho_{mk} \big)\nonumber \\
+\frac{\gamma_{0 \to k}}{2} \big( 2\delta_{mk}\delta_{kn} \rho_{00} - \delta_{m0}\rho_{0n} - \delta_{0n}\rho_{m0} \big) \big],
\label{rhosys}
\end{eqnarray}
here $\delta_{mn}$ is the Kronecker delta; the physical meaning of the damping coefficients $\gamma_{k \to 0}$ and $\gamma_{0 \to k}$ refer to the rates of the transitions between the eigenfrequencies $\Omega_k$ and $\Omega_0$ downward and upward, respectively, defined as follows $\gamma_{k \to 0}=\sum_{j=1}^3 c_i^2\gamma_j(\bar{\omega}_{0,k}) \left[\langle n(\bar{\omega}_{0,k})\rangle_{T_j} + 1\right] $ and by the Kubo-Martin-Schwinger (KMS) condition we have $\gamma_j(-\bar{\omega})=\mathrm{exp}\left(-\bar{\omega}/ T_j\right) \gamma_j({\bar{\omega}})$, where $c_i$ are the elements of the transformation matrix from the states $\{\ket{0}, \ket{1}, ... , \ket{d_N}\}$ to the states $\{\aket{0}, \aket{1}, ... , \aket{d_N} \}$ (similar to Eq. (14) and Appendix A in \cite{Mont}). The KMS condition ensures that the system tends to a thermal equilibrium for $t \to \infty$. Here $\langle n(\bar{\omega}_{\alpha, \beta})\rangle_{T_j} = \left ( \mathrm{e}^{(\Omega_\beta- \Omega_\alpha) / T_j} - 1\right )^{-1}$ corresponds to the average number of the thermal photons (with $k_B=1$). The damping coefficients play a very important role in our model because their dependence on the reservoirs temperatures imply a complex exchange mechanism between the elements of the system and the baths. Further, one solves numerically the coupled system of the first-order differential equations (\ref{rhosys}) and compute the evolution of different kind of correlations between the two distant atoms, given some finite temperature of the reservoirs. In order to get the reduced density matrix for the atoms one performs a measurement on the cavities and the fiber vacuum states, $\ket{000} = \ket{0}_{C1}\otimes \ket{0}_{C2} \otimes \ket{0}_{F}$. Later, we will explain how this task can be realized experimentally. We find that, after the projection, the reduced atomic density matrix has a X-form and the quantum and classical correlations can be computed easily as developed in \cite{Luo, Alber, Fanchini, Spehner}.
\begin{figure*}
\caption{Evolution of the concurrence for $g=J=5\gamma$ and different atom-cavity
detunings: (a) $\Delta =0$ , (b) $\Delta=10^{-4}
\label{fig2pra}
\end{figure*}
In the following, we present the results obtained recently using this model. Beginning with the first explorations in \cite{Mont}, we observed few interesting effects. Considering the baths at zero temperature and the initial two-qubit state as separable sharing one excitation, the generation and oscillation in time of the entanglement was found. Additionally, the conservation of the maximal value of the entanglement for a period of time in the presence of losses was obtained with specific atom-cavity detunings, as can be seen in Figs. 5-8 of \cite{Mont}. Inspired by these findings, we proceed further \cite{EMO} exploring the time evolution of the atomic entanglement measured via concurrence and entanglement of formation, as well as the classical correlations and quantum discord, all these quantities as functions of the temperature of the reservoirs.
The system under numerical study in \cite{EMO} considered atoms with long radiative lifetimes, each coupled to its own cavity. The two cavities are connected by a fiber with the damping rates $\gamma_1=\gamma_2=\gamma_3 \equiv \gamma=2 \pi$ MHz, respectively, which are within the current technology \cite{Serafini}. The transition frequency of the atom is chosen to be mid-infrared (MIR), i.e. $\omega_a/2 \pi=4$THz and hence, for experimental purposes the coupling between the distant cavities can be realized by using the modern resources of IR fiber optics, e. g. hollow glass waveguides \cite{Harrington}, plastic fibers \cite{Chen}, etc. We choose the range of MIR frequencies in order to limit the thermal reservoir only up to room temperature (300K), which corresponds to one thermal photon and so satisfy the approximation of maximum one excitation ($N=1$) in the system during the evolution. The values of the coupling constants and the atom-cavity detuning were varied in order to search for the optimal result. We mention that to satisfy the RWA we should have $2g\gg \gamma_{max}(\bar{\omega})$ \cite{Scala}. We take the values $g_1=g_2\equiv g=J=5\gamma$, considering all the reservoirs at the same temperature and study how the atomic entanglement evolves as a function of the atom-cavity detuning, $\Delta$. The result is shown in Fig. \ref{fig2pra} from which we conclude that the atom-cavity detuning facilitates in this case the generation of a quasi-stationary atomic entanglement and for $\Delta=0.1\omega_a$ the system reaches a long-lived entanglement state. Of course, in the asymptotic limit the concurrence will vanish and the atoms eventually disentangle themselves due to the damping action of the reservoirs.
\begin{figure}
\caption{Evolution of the quantum discord (QD), entanglement of formation (E) and classical correlations (CC) for one thermal
excitation and the parameters chosen as in Fig. \ref{fig2pra}
\label{fig5pra}
\end{figure}
Nowadays researchers tend to use quantum discord rather than entanglement as a good measure of the quantum correlations \cite{Vedral, Zurek, Luo, Alber, Lu,
Fanchini}. Since in our case the two-qubit density matrix has a simplified $X$ form (see Eq. 4 in \cite{EMO}), we easily compute the quantum and classical correlations in the system by using a particular case for the algorithm discussed in \cite{Alber}, as well we checked by the approach proposed in \cite{Fanchini} and got the same results. So, in Fig. \ref{fig5pra} one finds the time evolution of the QD is very similar to that of the entanglement, but the initial growth of the discord is steeper which implies the appearance of the quantum correlations, quantified by QD, prior to the entanglement. For a better illustration of the thermal effect under discussion, we show in the inset the temperature dependence of the steady values (flat time plateau) of the quantum and classical correlations.
Next, interesting results are also found in \cite{ECO}, that are related to the phenomena of sudden transitions and freezing of the classical and quantum correlations, observed and analyzed in quite different physical systems \cite{{Maziero2009}, {Auccaise2011}, {Xu}, {Pinto2013}, {He2013}, {Mazzola2010}, {Lo Franco2012}, {You2012}, {Aaronson2013}}. To search for similar effects in our system we improved considerable the approximation by increasing the number of the excitations from $N=1$ to $N=9$, and therefore we had sufficient degrees of freedom to engineer the desired initial state of the two qubits and consider many thermal excitations from the baths. For the following analysis, we consider the initial state of the two qubits (atoms) in a Bell-diagonal (BD) state, described by an X-type density matrix defined in Eq. (\ref{rhoBD}).
As a result, we find, in the dynamics, the sudden transitions between the classical and quantum correlations. In the Fig. \ref{fig1bol} we show the time evolution of the classical and quantum correlations for the case of two excitations with the qubits initially prepared in a BD state and all the reservoirs at zero temperature. One observes the quantum-classical sudden transition in our model similar to other studied systems like \cite{{Auccaise2011}, {Xu}, {He2013}, {Mazzola2010}, {You2012}, {Lo Franco2012}, {Aaronson2013}} and others. Besides the classical correlations (CC), entropic quantum discord (QD) and relative entropy of entanglement (REE), we also studied two geometrical measures, the geometric entanglement (GE) and geometric quantum discord (GQD) defined with Bures distance \cite{Spehner}. We evidence here that the Bures GQD and QD show similar behaviors, having flat regions not affected by the dissipation processes during a particular time period, effect known as freezing of the correlations. At the same time CC decay and meet QD in a point where a sudden change occurs. After this point the CC remains constant during another time period until other sudden change follows and so we observe periodic revival of the correlations, found in other systems too \cite{{Xu}, {Lo Franco2012}}. On the other hand, the entanglement shows a different dynamics, evidencing oscillations and no flat regions.
\begin{figure}
\caption{Time evolution of the correlations: CC (blue solid), QD (red solid), GQD (green solid), REE (magenta dotted) and GE (brown dashed) for the reservoirs at zero temperatures. The parameters are the same as in Fig. \ref{fig2ol}
\label{fig1bol}
\end{figure}
Next, we study the dynamics of the various correlation measures for $\omega_a/2 \pi=10$ GHz (for many experimental data see ref \cite{Haroche}). The atom-cavity couplings satisfy the constraint of the MME in a Markovian environment, i.e. $2g \gg \gamma$ and we set the values $g_1=g_2=g=10\gamma$ in the Figs. \ref{fig1bol}-\ref{fig2ol}. The values of $\gamma$ and J are tuned to evidence the effects of the thermal baths. We find that the detunings do not have an important impact on the effects of sudden transitions and freezing. We set the values $\omega_f=\omega_a$ and $\omega_a-\omega_0=0.1\omega_a$. We compute the time evolution of the atomic correlations keeping in mind the main objective of our explorations is to find the influence of the thermal baths on these correlations. In order to compute the general correlations - classical and quantum for the given system, we consider the concepts of mutual information, classical correlations and entropic quantum discord \cite{Vedral, Zurek}, as well the geometric quantum discord with Bures distance, recently developed by one of us \cite{Spehner} and independently in \cite{Aaronson2013}.
\begin{figure}
\caption{The dynamics of the correlations: CC (blue line), QD (red line) and Bures GQD (green line) for $\gamma=0.008 \omega_a$ and (a) varying the temperature of the fiber's reservoir given by the average number of the thermal photons, i.e. $\bar{n}
\label{fig2ol}
\end{figure}
Figure \ref{fig1bol} shows the effects of sudden change and sudden change and freezing of the correlations for the reservoirs at zero temperature .
In our numerical analysis we find that the freezing effects of the QD and GQD decay by increasing individually or collectively the temperatures of the cavities or the fiber. In the Fig. \ref{fig2ol}(a) we show the effect of heating the fiber to four thermal photons and observe that the thermal effects act destructively on the freezing of both the entropic and geometric discords. However, the sudden transitions persist.
Now, could one recover from the damaging effects of the system being coupled to the thermal reservoirs? Exploring this task, we find that it is possible to engineer such a recovery by a suitable increase in the fiber-cavity coupling. Hence, in Fig. \ref{fig2ol}(b) we show that keeping the fiber's bath temperature at four thermal excitations, such recovery of the correlations is feasible if we increase the fiber-cavity coupling. Through this effect, we understand the important role of the photon as the carrier of the quantum correlations between the remote qubits in such a network.
\begin{figure}
\caption{Dynamics of the classical and quantum correlations: (a) GQD (green line) evidencing double sudden transitions, and (b) CC (blue line) and QD (red line) with only one sudden change. The parameters considered here are $\gamma=0.1 \omega_a$, $J=g=5\gamma$ for $\bar{n}
\label{fig3ol}
\end{figure}
To finish with the analysis of this model, we point out here on some different effect of multi sudden transitions in the correlations. Very recent results reported in \cite {Montealegre2013} show theoretically another interesting class of sudden transitions and freezing of the quantum correlations, which later was observed experimentally in NMR setups \cite {Paula2013}. They found the formation of an environment induced double transition of Schatten one-norm geometric quantum correlations (GQD-1), which is not observed in the classical correlations, thus a truly quantum effect. So, stimulated by this recent findings we compute the dynamics of the Bures GQD for our model and find a type of double sudden transitions somewhat different from the ones observed in \cite {{Montealegre2013}, {Paula2013}}. By this result we come to the conclusion that the both, GQD-1 and Bures GQD evidence similar quantum effects. In Fig. \ref{fig3ol} we see the double sudden changes in the dynamics of Bures GQD for the reservoirs at zero temperatures, meanwhile the CC and QD suffer only one sudden change. Next, if increase the temperature of the fiber's bath, there is a peculiar tendency to freeze the GQD and the second transition tends to disappear, at a critical temperature. Even though we donot have an adequate explanation of the physics of these effects, they still remain attractive for further fundamental and applied research.
The experimental realization of the 2-node CQED network hinges on the possibility of realizing a quantum non-demolition (QND) measurements of the photon states in the fiber-cavities system. There is an extensive literature on QND measurements in CQED, for review see \cite {Grangier}. In our scheme we propose to measure the two-qubit density matrix under the condition that all the fields are in the vacuum state, so it is feasible to monitor the probability of this state during the temporal evolution of the system, similarly to the results presented in Fig.6 of \cite {EMO}.
\subsection{Other types of QED networks studied in literature}
A reliable quantum network scheme that implies the transfer of information between two particles in the presence of decoherence was proposed by Pellizzari \cite{Pellizzari} more than a decade ago.
As was shown, even when cavities and fibers are lossy, that is connected to reservoirs, one can still transfer successfully the quantum information between locally distinct nodes (atoms) of the quantum network. The ideas of quantum network with optical fibers was developed for arrays of cavities QED \cite{Zhang}, where the same protocol of coupling is used. The phenomenon of entanglement sudden death (ESD) was observed, where the influence of the initial state preparation of the system plays an important role.
Another interesting model that involves the effective realization of quantum gates between two qubits in a QED network was suggested by Serafini \textit{et al} in \cite{Serafini}. The influence of the spontaneous emission and losses on the propagation of entanglement was investigated. It was observed that the coupling strengths (between the cavity mode and atoms, as well as atoms and fiber mode) can serve as the main parameters for the interaction control and distribution of quantum information.
To realize this idea in a quantum computer seems more complicated, as the number of trapped qubits should be extended and the manipulation and control between multiple particles need additional requirements. However, a many atoms interaction scheme in quantum state transfer and logical gates for two remote cavities connected via an optical fiber was investigated \cite{Yin}. In the absence of collective effects between the atoms, the coupling between the fiber and the cavity modes is sufficiently strong as compared to the atom-cavity coupling strength, and the system can be considered as two qubits resonantly coupled to a harmonic oscillator. The state transfer operation and the evolution of entangled multi-qubits are discussed in detail.
Diamond nitrogen-vacancy centers (NV), are considered promising candidates for qubits in the quantum applications nowadays since they have a long electronic spin coherence \cite{Togan}. A model of entanglement generation between two separated NV centers coupled to a photonic molecule has been proposed recently \cite{Liu}. A strong dependence on the hopping strength of the photonic crystal cavities and NV qubit-cavity detuning in the entanglement dynamics was remarked. Controlling with the NV the qubit-cavity coupling constants, a long time entanglement plateau was achieved.
Several interesting schemes that are based on two NV centers coupled to a common whispering-gallery-mode microresonator (WGM) have been investigated \cite{{Wu, Schietinger}}. The dynamics of entanglement generation was studied as function of the coupling strengths of NV centers with WGM, distance between the NV centers and the state preparation of the system. The defect centers in diamond are sufficiently stable at room temperature that allow a good control with WGM interaction. Manipulating the distance between two NV centers, a maximum entanglement can be achieved.
\section{Propagation of Quantum Correlations}
The Hamiltonian of an $N$-atom-cavity system in the rotating wave approximation, joined by optical fibers, can be written as follows
\begin{equation}
\mathit{H}=\mathit{H}^{\textit{free}}+\mathit{H}^{\textit{int}},
\end{equation}
with
\begin{equation}
\mathit{H}^{\textit{free}}=\sum_{i=1}^N \omega_i^a|e\rangle_i\langle e| + \sum_{i=1}^N \omega_i^c a_i^{\dagger}a_i + \sum_{i=1}^{N-1}\omega_i^{f}b_i^{\dagger}b_i
\end{equation}
\begin{equation}
\mathit{H}^{\textit{int}}=\sum_{j=1}^N g_j(a_j^{\dagger}|g\rangle_j\langle e|+a_j|e\rangle_j\langle g|) + \sum_{j=1}^{N-1}J_j[(a_j^{\dagger}+e^{i\phi}a_{j+1}^{\dagger})b_j + H.c.],
\end{equation}
where $|g\rangle_j$ and $|e\rangle_j$ are the ground and excited states of the two-level atom with transition frequency $\omega^a$, and $a_i^{\dagger}$($a_i$) and $b_i^{\dagger}$($b_i$) are the creation(annihilation) operators of the cavity and fiber mode, respectively. The first, second and third term in $\mathit{H}^{\textit{free}}$ are the free Hamiltonian of the atom, cavity field and fiber field, respectively. In addition, the first term in the $\mathit{H}^{\textit{int}}$ describes the interaction between the cavity mode and the atom inside the cavity with the coupling strength $g_j$, and the second term is the interaction between the cavity and the fiber modes with the coupling strength $J_j$. Notice that the phase $\phi$ is due to the propagation of the field through the fiber of length $l$: $\phi=2\pi\omega l/c$ \cite{Serafini}. Also we assume that $2l\mu/2\pi c\lesssim 1$(short fiber limit), with $\mu$ being the decay rate of the cavity fields into a continuum of fiber modes.
The first two terms of $\mathit{H}^{\textit{free}}$ and the first term of $\mathit{H}^{\textit{int}}$ can be jointly diagonalized in the basis of polaritons. These states are given by,
\begin{eqnarray}
\vert n-\rangle &=& \sin(\theta)\vert n,g\rangle -\cos(\theta)\vert n-1,e\rangle\nonumber\\
\vert n+\rangle &=& \cos(\theta)\vert n,g\rangle + \sin(\theta)\vert n-1,e\rangle \nonumber\\
E_{n\pm} &=& \omega^c n+\frac{\Delta}{2}\pm \frac{\sqrt{\Delta^2 +4g^2 n}}{2}
\end{eqnarray}
with $\Delta=\omega^a-\omega^c$, $\theta=\dfrac{1}{2}\arctan(\frac{g\sqrt{n}}{\Delta/2})$ and $n$ representing the number of photons. For simplicity we consider the resonance between atom and cavity $\omega_i^a=\omega_i^c=\omega_i$. Then, we can only have one photon per cavity, at most, because of the photon blockade, thus double or higher occupancy of the polaritonic states is prohibited \cite{blockade1,blockade2}.
In the rotating wave-approximation and interaction picture, the hopping terms between different polaritons $\mathit{L}_{i}^{{-}^{\dagger}}\mathit{L}_{i+1}^{+}$ and $\mathit{L}_{i}^{{+}^{\dagger}}\mathit{L}_{i+1}^{-}$, with $\mathit{L}^{{\pm}^{\dagger}}=\vert 1\pm\rangle\langle 0\vert$ defined as the creation operator, are fast rotating and they average zero. This implies that if we started creating a polariton with $\mathit{L}^{{-}^{\dagger}}$, the state $\vert 1 +\rangle$ will never show up. Finally, we restrict the subsystem to only two states, $\vert G\rangle=\vert 0\rangle$(ground state) and $\vert E\rangle =\vert 1 -\rangle =\frac{1}{\sqrt{2}}(\vert 1 g \rangle-\vert 0 e \rangle)$(excited state), and from now on we will omit label ``$-$" on $\mathit{L}$.
\begin{equation}
\mathit{H}=\sum_{i=1}^N(\omega_i-g_i)|E\rangle_i\langle E|+ \sum_{i=1}^{N-1}\omega_i^{f}b_i^{\dagger}b_i+\sum_{i=1}^{N-1}\frac{J_i}{\sqrt{2}}[(L_i^{\dagger}+L_{i+1}^{\dagger})b_i + (L_i+L_{i+1})b_i^{\dagger}]
\end{equation}
In the present model we are not interested in the fiber, so we want to eliminate it. One way is solving the complete Hamiltonian and finally trace or measure with a projecting operator on the fiber \cite{Mont, EMO}. Another way is to eliminate the fiber first, and then analyze the time evolution of the system. We propose to discuss two different approaches in the latter case.
On the one hand, we can eliminate adiabatically the fiber mode and obtain the effective Hamiltonian \cite{mancini}
\begin{equation}\label{hamilt_a}
\mathit{H}_{\textit{a}}^{\textit{eff}}= \sum_{i=1}^{N}\omega_{i}^{\prime}\mathit{L}_{i}^{\dagger}\mathit{L}_{i} + \sum_{i=1}^{N-1}J_i^{\prime}(\mathit{L}_{i+1}^{\dagger}\mathit{L}_{i} + \mathit{L}_{i}^{\dagger}\mathit{L}_{i+1}),
\end{equation}
where $\omega_{i}^{\prime}=\omega_i -g_i-\frac{2J_i^2}{\omega_i^f} + \frac{J_i^2}{\omega_i^f}\delta_{i,1} + \frac{J_i^2}{\omega_i^f}\delta_{i,N}$ and $J_i^{\prime}=-\frac{J_i^2}{\omega_i^f}$.
On the other hand, we can use perturbation theory to eliminate the fiber \cite{cohen,raul1}. We assume first that all cavities and fibers, and their corresponding coupling constants are equal, i.e., $\omega_i=\omega$, $\omega_i^f=\omega^f$, $g_i=g$ and $J_i=J$, and second, that the total detuning $\delta=(\omega-g)-\omega^f\gg J$. Then, we project the fiber state into the zero photon mode, generating an effective Hamiltonian given by
\begin{equation}\label{hamilt_p}
\mathit{H}_{\textit{p}}^{\textit{eff}}= \sum_{i=1}^{N-1}\lambda (|E\rangle_i\langle E| + |E\rangle _{i+1}\langle E|) + \sum_{i=1}^{N-1}\lambda (\mathit{L}_i^{\dagger}\mathit{L}_{i+1} + \mathit{L}_{i+1}^{\dagger}\mathit{L}_i),
\end{equation}
where $\lambda=\frac{J^2}{2\delta}$. For this approach it is important that the fibers be weakly coupled to the cavities. Also, when obtaining this effective Hamiltonian we allow just one excitation in the chain.
A different model, still having an optical fiber, was proposed by Zhong \cite{zhong}. In this work, he used a configuration where one cavity (e.g. the central cavity) is connected through optical fibers to several cavities, which are not connected between them.
In this proposal, it is possible to generate entangled states for multiple atoms trapped in distant cavities, connected by optical fibers. There is also considered resonant interaction between atoms, cavities and fibers, so the interaction time is short, which is an important factor when dealing with decoherence.
A straight generalization of the one chain model was studied by Zhang \textit{et al.} \cite{Zhang}, proposing a system with two non-interacting chains, see Fig. \ref{fig2}. This idea is quite interesting, since it opens a new series of applications, different from the one chain models.
However, this model, does not include losses and it is limited to the analysis of the propagation of one kind of quantum correlation (entanglement). In an endeavour to improve the model, we added losses and did a more exhaustive study \cite{raul1}, including the propagation of entanglement and quantum discord, the distribution of the entanglement, and we discussed a possible application for quantum communications \cite{raul2}.
\begin{figure}
\caption{Array of two rows of three cavity-atom systems.}
\label{fig2}
\end{figure}
We are interested in sending information through both chains. This implies that starting with two correlated qubits corresponding, for example, to the $11'$ cavities. We then study the dynamics of our system such that after some time, the $33'$ pair becomes correlated. We used the following notation, $|\Psi\rangle=|X_1X_{1'}X_2X_{2'}X_3X_{3'}\rangle$, where $X$ could be $G$ or $E$.
The time evolution of the whole system is given by the Hamiltonian $\mathit{H}=\mathit{H}_1\otimes\mathit{H}_2$, with $\mathit{H}_{\lbrace 1,2\rbrace}$ defined in Eq.(\ref{hamilt_p}). We took the parameters: $J=2\pi\cdot30\,GHz$, $\delta=2\pi\cdot300\,GHz$ and studied the dynamics for two different initial states:
\begin{eqnarray}\label{initialstate1}
&|\Psi\rangle_a=\sin(\theta)|GEGGGG\rangle + \cos(\theta)|EGGGGG\rangle\nonumber\\
&|\Psi\rangle_b=\sin(\theta)|GGGGGG\rangle + \cos(\theta)|EEGGGG\rangle
\end{eqnarray}
We found that the transmission properties of the entanglement depend strongly on the initial conditions. For example, we observed that for the initial state $|\Psi\rangle_a$, $74.2\%$ of the concurrence in the cavity-pair $11'$ is transmitted to the $33'$ pair, independent of the angle $\theta$. On the other hand, for $|\Psi\rangle_b$ the transmission depends strongly on $\theta$. For example, for $\theta=\pi/3$ we get $63\%$ and for $\theta=\pi/8$, $28\%$. The final concurrence $33'$ is shown in Fig.\ref{fig3}, for the initial state $|\Psi\rangle_a$.
\begin{figure}
\caption{Concurrence, C in case of the cavities $33'$ for the initial condition $|\Psi\rangle_a$ with constant $\gamma=0.01$ and varying the angle as: $\theta =\pi/4$ (solid line), $\theta =\pi/3$ (red-dotted) and $\theta =\pi/8$ (blue-dashed).}
\label{fig3}
\end{figure}
Now, we compare the quantum discord and the entanglement to enquire which of this quantifiers is more robust against decoherence in this system. In some of the following calculations, when comparing the various measures of correlations, it will be more convenient to calculate the entanglement of formation ($\mathit{E}$), rather than the concurrence. Next, we plot both, for the cavity $33'$, and study the time evolution of the system.
\begin{figure}
\caption{Quantum discord, QD (red solid) is more robust than entanglement of formation, E (blue-dashed) in case of the cavities $33'$ for the initial condition $|\Psi\rangle_b$ with constant $\theta=\pi/4$ and varying $\gamma$ as: $\gamma=0.05$ (left panel) and $\gamma=0.5$ (right panel).}
\label{fig4}
\end{figure}
From Fig.\ref{fig4} it can be seen that the quantum discord, remains bigger than the entanglement of formation. Notice that near the origin, QD grows up before than E, being as a precursor of the generation of quantum correlations. Asymptotically for all $\gamma$, QD tends to be larger than E, even when the latter vanishes, which is in agreement with previous work in cavity QED \cite{bellomo}.
As another example, we extend the comparison changing the previous pure initial condition for a Werner state(W) for the cavities $11'$, with the rest of the cavities being in the ground state($|G\rangle$)
\begin{equation}\label{initialstate2}
\rho (0)=(\dfrac{1-a}{4}\mathit{I}+a|\psi\rangle\langle\psi|)\otimes|G_2G_{2'}G_3G_{3'}\rangle\langle G_2G_{2'}G_3G_{3'}|
\end{equation}
where $\mathit{I}$ is the identity operator for two qubits and $|\psi\rangle =\frac{1}{\sqrt{2}}(|E_1G_{1'}\rangle +|G_1E_{1'}\rangle)$.
In figures (\ref{fig5}), we plot the $QD$ and $E$, and see the behavior arising from varying the mixedness of the initial state. When the system is nearly a pure state ($a=0.9$), there is not a big difference between the two curves (Fig.$\ref{fig6}$). However, when the system becomes more mixed ($a=0.6$), there is a substantial difference between $\mathit{QD}$ and $\mathit{E}$ and obviously the $\mathit{QD}$ is the better option for the propagation of the information (Fig.$\ref{fig5}$).
\begin{figure}
\caption{Quantum discord, QD (red solid) and entanglement of formation, E (blue-dashed) for the cavities $33'$ with Werner initial state, where $a=0.9$ (left panel) and $a=0.6$ (right panel).}
\label{fig5}
\end{figure}
Next, we analyze the effect on QD of $33'$ if a third person (an eavesdropper) performs a measurement on cavity $2$ for example. Let's choose a projective measure such as $\Pi=|G_2\rangle\langle G_2|$. The main idea behind this, is to compute the QD after the measurement($\mathit{QDM}$) at the $33'$ pair and compare it with the undisturbed case. From Fig.$\ref{fig6}$ left, we can see that for a nearly pure maximally entangled state, the curve corresponding to the Quantum Discord after the measurement is reduced to almost zero (blue-dashed) as compared with the undisturbed Quantum Discord without any measurement (red-dotted).
It is quite apparent that in this case we have a very good instrument to detect any external measurement. However if the state becomes more mixed ($a=0.6$), the discrimination becomes inconclusive, since in Fig.$\ref{fig6}$ right, we do not observe relevant differences anymore between the two curves.
\begin{figure}
\caption{Quantum discord after the measure, QDM (blue-dashed) varies considerably for highly pure states, as compared with quantum discord, QD (red-dotted) for the cavities $33'$ considering: $a=0.9$ (left panel) and $a=0.6$ (right panel).}
\label{fig6}
\end{figure}
\section{Distribution of Quantum Correlations}
As we discussed in the Introduction, Coffman et al \cite{wootters} consider "`distributed entanglement"' or the fact that quantum correlations cannot be freely shared among several particles. They go even further and define the "`tangle"' as a measure to describe the multipartite (beyond the bipartite) entanglement.
In particular, in a three particle system, $\tau_{123}$ represents the collective entanglement of the three qubits, or the "three-qubit entanglement".
Unfortunately, these concepts are only valid for pure states.
For mixed states, which is the more realistic case of open systems, governed by Master Equations, if the losses are not too large, we can only estimate the lower and upper bounds of the tangle.
In the present section, we study the tangle for pure states as well as the upper and lower bounds for mixed states, under various initial conditions.
We take relatively moderate losses, in order to have reasonable good approximate bounds.
Going back to the previous models, for one chain, with the system evolving with the Hamiltonian defined in Eq.(\ref{hamilt_p}), no tangle was found. A possible reason is because of the restriction of only one excitation in the chain, since the tangle is a collective effect and needs more than one excitation in the system. We can see this more clearly in the two chain model. Here, for the initial state $|\Psi_a\rangle$ in Eq.(\ref{initialstate1}), the tangle is always zero, independent of $\theta$. On the other hand, the state $|\Psi_b\rangle$ has non vanishing tangle, see Fig. \ref{fig7}. Then, the presence of two excitation in the last case is responsible for the multipartite correlation.
\begin{figure}
\caption{Tangle for the initial condition $|\Psi\rangle_b$ with $\gamma=0$ for different angles: $\theta =\pi/4$ (black solid), $\theta =\pi/3$ (red-dotted), $\theta =\pi/8$ (blue-dashed) }
\label{fig7}
\end{figure}
In figure(\ref{fig7}) we plotted the tangle, taking cavity $1$ as the reference one, $\tau=\mathit{C}^2_{1(23...)}-\mathit{C}_{12}^2-\mathit{C}_{13}^2-\mathit{C}_{11^{\prime}}^2 -\mathit{C}_{12^{\prime}}^2-\mathit{C}_{13^{\prime}}^2$. We set the cavity losses to zero ,such that the whole system remains pure and Eq.(\ref{tangle}) is correct. In all cases, the initial tangle is zero, since we start with a bi-partite entanglement between the first pair of cavities and therefore there are no higher-order correlations.
If we now turn on the interaction with the individual reservoirs, the situation becomes more involved, and in principle it would require a complex convex roof optimization procedure. Nevertheless, when the system experiences losses, if these are moderate, we can still estimate lower and upper bounds to the tangle, in the case where the mixedness of the system, measured through $tr[\rho^2]$, varies slowly between $1$ and $0.89$ for $\gamma=0.01$. For higher losses, like $\gamma=0.5$, the gap between the bounds is significantly bigger and the above approximations fail. In Fig. \ref{fig8} we observe the upper and lower bounds of the tangle. In the lower bound approximation, we need to guarantee that the system is weakly mixed and strongly entangled. In particular for $\lambda t\approx 9$, the lower bound becomes negative. On the other hand, we notice that in this region, $\mathit{C}_{1(23...)}^2$ is comparatively small, thus violating the assumptions made by the lower bound approximation, and therefore the results are unreliable. However, for $\lambda t\in \lbrace 0,6 \rbrace $, the area between the upper and lower bound is rather small, giving us a good estimation of the tangle.
\begin{figure}
\caption{Tangle for the initial condition $|\Psi\rangle_b$ with $\theta =\pi/4$, $\gamma=0.01$ and two different bounds: upper bound, UB (solid) and lower bound, LB (red-dotted);}
\label{fig8}
\end{figure}
Finally, if the system is used as a channel, our best option is to use states like $|\Psi_a\rangle$ as the initial condition, since the distribution or multipartite entanglement vanishes, finding only bi-partite quantum correlations and as a consequence we get higher values of the concurrence at the extreme of the chains. For low losses, the entanglement of formation is a good measure of the quantum correlations. However, as previously shown, the quantum discord is more robust against decoherence, thus is a better measure for higher loss rates. If our purpose is to distribute the quantum correlations among the various elements of our system, we choose $|\Psi_b\rangle$ as our initial condition, since we have a considerable multipartite entanglement or tangle. Furthermore, we observed from Fig. \ref{fig7} that the tangle deteriorates rapidly, as we depart from the Bell states ($\theta=\pi/4$).
\section{Summary and Discussion}
In this Review we have dealt with various aspects of the quantum correlations using different measures such as the Entanglement of Formation, Concurrence, Quantum Discord, Relative Entropy of Entanglement and Geometric Quantum Discord with Bures Distance.
The studies we have covered are related to the phenomena as generation, propagation, distribution, thermal and critical effects of these correlations in the models of cavity QED networks.
We have discussed the possibility of generating atomic entanglement with atoms located at distant cavities and connected via an optical fiber, and finding a wide time plateau of the concurrence between the atoms, even when the system is connected to various reservoirs, implying lossy cavities and fiber.
Dissipation and thermal effects are normally considered destructive from the quantum correlations viewpoint. However, examples are shown that under certain conditions, these effects may contribute to the generation of these correlations. These effects are found in cavity QED networks as well as in other physical systems.
The way the quantum correlations propagate and distribute themselves between various components of a given physical system is still an open problem.
We explore analytically and numerically the propagation and distribution of quantum correlations through two chains of atoms inside cavities joined by optical fibers. This particular system can be used as a channel of quantum communication or a network of quantum computation. One can readily select the appropriate initial condition in order to optimize the performance for the former or latter application.
Finally, we discuss the thermal effects on the sudden changes and freezing of the classical and quantum correlations in a cavity quantum electrodynamic network with losses. For certain initial conditions, double transitions in the Bures Geometrical discord are found. One of these transitions tend to disappear at a critical temperature, hence freezing the discord.
We acknowledge the financial support of the Fondecyt projects no.100039 and no.1140994, the project Conicyt-PIA Anillo ACT-112, "Red de analisis estocastico y aplicaciones", as well Pontificia Universidad Catolica de Chile and Conicyt doctoral fellowships.
\end{document} |
\begin{document}
\title{$k$-Universal Finite Graphs}
\author{Eric Rosen}
\address{Department of Computer Science, Technion--Israel Institute of
Technology, Technion City, Haifa 32000, Israel}
\email{erosen@csa.cs.technion.ac.il}
\thanks{The first author was supported in part by a postdoctoral fellowship
from
the Israel Council for Higher Education and by a research grant of the German
Israeli Foundation (GIF)}
\author{Saharon Shelah}
\address{Institute of Mathematics, The Hebrew University of Jerusalem, 91904
Jerusalem, Israel {\rm and} Department of Mathematics, Rutgers University, New
Brunswick, New Jersey 08854}
\email{shlhetal@sunset.huji.ac.il}
\thanks{The second author's research was supported by the United States-Israel
Binational Science Foundation and by DIMACS. Publication No.~611.}
\author{Scott Weinstein}
\address{Department of Philosophy, University of Pennsylvania, Philadelphia,
Pennsylvania 19104}
\email{weinstein@cis.upenn.edu}
\thanks{The third author was supported in part by NSF CCR-9403447.
We would like to thank John Baldwin and an anonymous referee for useful
comments on an earlier draft of this paper.}
\subjclass{Primary 03C13, 03C75, 05C80}
\date{April 9, 1996}
\begin{abstract}
This paper investigates
the class of $k$-universal finite graphs,
a local analog of the class of universal graphs,
which arises naturally in the study of finite variable logics.
The main results of the paper, which are due to Shelah, establish that the
class of $k$-universal graphs is not definable by an infinite disjunction of
first-order existential
sentences with a finite number of variables and that there exist
$k$-universal graphs with no $k$-extendible induced subgraphs.
\end{abstract}
\maketitle
\section{Introduction}
This paper continues the investigation of the existential fragment of \mbox{$L^{\omega}_{\inftyty\omega}$}\
from the point of view of finite model theory initiated in \cite{rnw} and
\cite{Rosen-diss}.
In particular, we further study an analog of universal structures, namely,
$k$-universal structures, which arise
naturally in the context of finite variable logics.
The main results of this paper, Theorems \ref{main-thm} and
\ref{univnotextend-thm},
which are due to Shelah,
apply techniques from the theory of sparse
random graphs as developed in \cite{shelah-spencer} and \cite{baldwin-shelah}
to
answer some
questions about $k$-universal structures left open in these earlier works.
In order to make the current paper more or less self-contained, we recall some
notions and notations
from the papers cited above, which may be consulted for further
background and references.
We restrict our attention to languages which contain only relation symbols.
We let \mbox{\rm FO}k\ denote the fragment of first-order logic consisting of those
formulas
all of whose variables both free and bound are among $x_1, \ldots, x_k,$
and similarly, \mbox{$L^k_{\inftyty\omega}$}\ is the $k$-variable fragment of the infinitary language
\mbox{$L_{\inftyty\omega}$}.
We let \mbox{\rm FO}ke\ denote the collection of existential formulas of \mbox{\rm FO}k,
that is, those
formulas
obtained by closing the set of atomic
formulas and negated atomic formulas of \mbox{\rm FO}k\ under the operations of
conjunction, disjunction, and existential quantification, and we let \mbox{$L^k_{\inftyty\omega}$}e\ be
the existential fragment of \mbox{$L^k_{\inftyty\omega}$}.
The fragments \mbox{$\bigwedge L^{k}(\exists)$}\ and \mbox{$\bigvee L^{k}(\exists)$}\ of \mbox{$L^k_{\inftyty\omega}$}e\ consist of the countable
conjunctions and the countable disjunctions of formulas of \mbox{\rm FO}ke\
respectively. We write \qr{\theta}\ for the quantifier rank of the formula
$\theta,$ which is defined as usual.
\begin{definition}\label{compatible-def}
Let $A$ and $B$ be structures of the same relational signature. $A \mbox{$\preceq^{k}$} B$ ($A
\mbox{$\preceq^{k,n}$} B$) ($A \mbox{$\preceq_{\inftyty\omega}^k$} B$), if and only if,
for all $\theta\in \mbox{\rm FO}ke$ (with $\qr{\theta} \leq n$) (for all $\theta \in
\mbox{$L^k_{\inftyty\omega}$}e$), if $A \mbox{\rm Mod}els \theta,$
then $B \mbox{\rm Mod}els \theta.$
\end{definition}
These relations may be usefully characterized in terms
of the following non-alternating, local variants of the
Ehrenfeucht-Fraisse
game.
The {\em $n$-round}, \mbox{$\exists^k$-game}\ from $A$ to
$B$ is played between two players, Spoiler and Duplicator, with $k$
pairs of pebbles, $(\alpha_{1}, \beta_{1}), \ldots ,$
$(\alpha_{k}, \beta_{k})$.
The Spoiler begins each round by choosing a pebble $\alpha_{i}$
that may or may not be in play and placing it on an element of $A.$
The Duplicator then plays $\beta_i$ onto an element of $B.$
The Spoiler wins the game if after any round $m \leq n$ the function $f$ from $A$ to
$B,$ which sends the element pebbled by $\alpha_{i}$ to the
element pebbled by
$\beta_i$ is not a partial
isomorphism; otherwise, the Duplicator wins the game.
The {\em eternal} \mbox{$\exists^k$-game}\ is an infinite version of the $n$-round game in
which
the
play continues through a sequence of rounds of order type $\omega.$ The Spoiler
wins the game, if and only if, he wins at the $n^{\rm th}$-round for some $n
\in \omega$ as above; otherwise, the Duplicator wins.
The following proposition provides the link between the \mbox{$\exists^k$-game}\ and logical
definability.
\begin{proposition}[\cite{KV-datalog}] \label{ekgame-prop}
\begin{enumerate}
\item For all structures $A$ and $B,$ the following conditions are equivalent.
\begin{enumerate}
\item $A \mbox{$\preceq^{k,n}$} B.$
\item The Duplicator has a winning strategy for the
$n$-round \mbox{$\exists^k$-game}\ from $A$ to $B.$
\end{enumerate}
\item For all structures $A$ and $B,$ the following conditions are equivalent.
\begin{enumerate}
\item $A \mbox{$\preceq_{\inftyty\omega}^k$} B.$
\item The Duplicator has a winning strategy for the
eternal \mbox{$\exists^k$-game}\ from $A$ to $B.$
\end{enumerate}
\item For all structures $A$ and finite structures $B,$ the following
conditions are equivalent.
\begin{enumerate}
\item $A \mbox{$\preceq_{\inftyty\omega}^k$} B.$
\item $A \mbox{$\preceq^{k}$} B.$
\end{enumerate}
\end{enumerate}
\end{proposition}
In this paper, we will focus our attention on the class of finite simple
graphs, that
is, finite structures with one binary relation which is irreflexive and
symmetric. We will use the term graph to refer to such structures.
In general, we let $A, B, \ldots$ refer both to graphs and to their underlying
vertex sets and we let $|A|$ denote the cardinality of $A.$ We use
$E$ for the edge relation of a graph. \edges{A} is the edge set of the graph
$A,$ that is, $\edges{A} = \{ \{a,b\}\subseteq A : E(a,b) \}.$
\section{$k$-Universal Graphs: Definability and Structure}
We say that a graph $G$ is {\em k-universal}, if and only if, for all graphs
$H, H \mbox{$\preceq^{k}$} G.$ By Proposition \ref{ekgame-prop}, this is equivalent to $G$
satisfying every sentence of \mbox{$L^k_{\inftyty\omega}$}e\ which is satisfied by some (possibly
infinite)
graph.
We say that a graph $G$ is {\em k-extendible}, if
and only if, $k \leq |G|$ and
for each $1 \leq l \leq k$
\begin{equation*}
\begin{split}
G \mbox{\rm Mod}els \mbox{\rm FO}rall x_1 \ldots \mbox{\rm FO}rall x_{k-1}\exists x_k
&( \bigwedge_{1 \leq i
< j \leq
k-1} x_i \neq x_j \rightarrow \\
&(\bigwedge_{1 \leq i \leq
k-1} x_i \neq x_k \wedge
\bigwedge_{1 \leq i < l} E(x_i,x_k) \wedge \bigwedge_{l \leq i < k}
\neg E(x_i,x_k))).
\end{split}
\end{equation*}
It is easy to verify, by applying Proposition \ref{ekgame-prop}, that
every $k$-extendible graph is $k$-universal.
The class of $k$-extendible graphs plays an important role in
the study of $0-1$ laws for certain infinitary logics and logics with
fixed point operators (see \cite{KV-IC}).
Indeed, the existence of $k$-universal
finite graphs follows immediately from the fact that for every $k,$
the random graph $G = G(n,p)$ with constant edge probability $0 < p < 1$ is
almost surely $k$-extendible (see, for example, \cite{Bollobas-text}).
Let \mbox{$\mathcal{U}^k$}\ be the class of $k$-universal graphs and let
$$\Xi^k = \{ \theta \in
\mbox{\rm FO}ke : \exists G(G\ {\rm is\ a\ graph\ and\ } G \mbox{\rm Mod}els \theta) \}.$$
Note that
for all graphs $G, G \in \mbox{$\mathcal{U}^k$},$ if and only if, $G \mbox{\rm Mod}els \bigwedge \Xi^k.$
Thus, \mbox{$\mathcal{U}^k$}\ is definable in \mbox{$\bigwedge L^{k}(\exists)$}\ over the class of graphs.
In \cite{rnw}, we established via an explicit construction
that for all $2 \leq k,$ \mbox{$\mathcal{U}^k$}\ is not definable
in \mbox{$\bigvee L^{k}(\exists)$}. The following theorem
significantly strengthens this result for large
enough $k$; its proof involves a probabilistic construction employing
techniques
from the theory of sparse random graphs.
\begin{theorem}\label{main-thm}
For all $k \geq 7$ and $k'\in \omega,$
\mbox{$\mathcal{U}^k$}\ is not definable in \dke{k'} over
the class of graphs.
\end{theorem}
We call a class of structures $\mathcal{C}$ finitely based, if and only if, there
is a finite set of structures $\{A_1, \ldots , A_n \} \subseteq \mathcal{C}$ such
that for every structure $B \in \mathcal{C}, A_i \subseteq B$ for some $ 1 \leq i
\leq n.$ We obtain the following result as a corollary to the proof of Theorem
\ref{main-thm}.
\begin{corollary}\label{finbase-cor}
For all $k \geq 7,$
\begin{enumerate}
\item \mbox{$\mathcal{U}^k$}\ is not finitely based, and
\item the class of $k$-extendible graphs is not finitely based.
\end{enumerate}
\end{corollary}
In \cite{rnw}, we observed that for all $k,$ \mbox{$\mathcal{U}^k$}\ is decidable in
deterministic polynomial time. The following theorem gives a stronger
``descriptive complexity'' result.
\begin{theorem}\label{univinlfp-thm}
For all $k,$ \mbox{$\mathcal{U}^k$}\ is definable in least fixed point logic.
\end{theorem}
It is clear that if $G$ is $k$-extendible and $G \subseteq H,$ then $H$ is
$k$-universal. The question naturally arises whether
there are $k$-universal graphs which contain no $k$-extendible subgraph.
The following theorem answers this question
affirmatively.
\begin{theorem}\label{univnotextend-thm}
For each $k \geq 4,$ there is a graph $G$ such that
\begin{enumerate}
\item $G$ is $k$-universal, and
\item $\mbox{\rm FO}rall H \subseteq G, H$ is not $k$-extendible.
\end{enumerate}
\end{theorem}
The next theorem is a strengthening of the first part of Corollary
\ref{finbase-cor}. The proof of this theorem expands on the construction
developed to prove Theorem \ref{univnotextend-thm}. We say a graph $G$ is a
{\em minimal} $k$-universal graph just in case $G$ is $k$-universal and
contains no proper induced subgraph which is $k$-universal.
\begin{theorem} \label{kineqminkuniv-thm}
For all $k \geq 6$, there is an infinite set of pairwise
$L^k$-inequivalent minimal $k$-universal graphs.
\end{theorem}
We proceed to prove the above results.
Theorem \ref{main-thm} is an immediate corollary of the following lemma
which
is due to Shelah.
\begin{lemma}\label{central-lemma}
For all $k \geq 7$ and $k' \in \omega,$
there is a
graph
$N$ such that
\begin{enumerate}
\item $N$ is $k$-extendible and
\item for every $\theta \in \ffoe{k'},$ if $N \mbox{\rm Mod}els \theta,$ then there is a
structure $M$ such that $M \mbox{\rm Mod}els \theta$ and $M$ is not $k$-universal.
\end{enumerate}
\end{lemma}
We approach the proof of Lemma \ref{central-lemma} through a sequence of
sublemmas. We first introduce some graph-theoretic concepts
which play a central role in the argument.
\begin{definition}\label{coloring-number-def}
Let $A$ be a finite graph.
\begin{enumerate}
\item We say
$\overline{a}=\langle a_1, \ldots , a_n\rangle$ is a {\em t-witness} for $A$,
if and only
if, $\overline{a}$ is an injective enumeration of $A$ and
for each $i
\leq n, |\{ j < i : E(a_j,a_i) \}| \leq t.$
\item $\cn{A} = $\ the least $t$ such that there is a $t$-witness for $A.$
(\cn{A} is the {\em coloring number} of $A.$)
\item $\ki{t} = \{ A : \cn{A} \leq t \}.$
\item \speciall{t}{A}{B}, if and only if, $A \subseteq B,
B \in \ki{t}$ and every $t$-witness for
$A$ can be extended to a $t$-witness for $B,$ that is, if $\overline{a}$ is a
$t$-witness for $A,$ then there is a $\overline{b}$ such that
$\overline{a}\overline{b}$ is a $t$-witness for $B.$
\end{enumerate}
\end{definition}
The coloring number was
introduced and extensively studied in \cite{erdos-hajnal-66}.
The following sublemma states a free amalgamation property of
$\speciall{t}{}{}.$
\begin{definition}
Let $A$ and $B$ be finite graphs.
\begin{enumerate}
\item $A$ is {\em compatible} with $B$, if and only if, the subgraph of $A$
induced by $A \cap B$ is identical to the subgraph of $B$ induced by $A \cap
B.$
\item Suppose $A$ is compatible with $B$ and let $C$ be the subgraph of $A$
induced by $A \cap B.$
The {\em free join} of $A$ and $B$ over $C,$
denoted by $\freejoin{A}{B}{C},$ is the graph whose vertex set is $A \cup B$
and whose edge set is $\edges{A} \cup \edges{B}.$
\end{enumerate}
\end{definition}
\begin{sublemma}\label{freeamalgam-sublemma}
Suppose $A,B \in \ki{t},$
$A$ is compatible with $B,$ $C$ is the subgraph of $A$
induced by $A \cap B,$ $\speciall{t}{C}{A},$ and
$\speciall{t}{C}{B}.$ Then, $\freejoin{A}{B}{C} \in \ki{t},$
$\speciall{t}{A}{\freejoin{A}{B}{C}},$ and
$\speciall{t}{B}{\freejoin{A}{B}{C}}.$
\end{sublemma}
\begin{proof}
The sublemma follows immediately from the definitions.
\end{proof}
The next sublemma establishes a lower bound on \cn{G}\ when $G$ is
$k$-universal. For the proof of the sublemma we extend the definition of
$k$-universality to apply also to tuples.
We also introduce a
refinement of the concept that will be used in the
proof of Theorem 2. An $m$-tuple $\overline{a}
= (a_1, \ldots , a_m )$ is {\em proper} iff
for all $i < j \leq m, a_i \neq a_j$. For all models $A$ and
$B$, and $j$-tuples $\overline{a} \subseteq A, \overline{b}
\subseteq B$, we write $(A, \overline{a})\mbox{$\preceq^{k}$} (B, \overline{b})
((A, \overline{a})\mbox{$\preceq^{k,n}$} (B, \overline{b}))$ iff for all formulas
$\theta(\overline{x}) \in \mbox{\rm FO}ke$ (with $qr(\theta) \leq n$),
with $j$ free variables,
if $A \mbox{\rm Mod}els \theta[\overline{a}]$, then $B \mbox{\rm Mod}els \theta[\overline{b}]$.
\begin{definition}
For $j \leq k$, a proper $j$-tuple $\overline{a} \subseteq A$ is
{\em $k$-universal in $A$} ({\em $k,n$-universal in $A$}) iff for all $B$, and
proper $j$-tuples
$\overline{b} \subseteq B$ such that the partial function
$f(x)$ from $A$ to $B$ that maps $a_i$ to $b_i$ is a partial
isomorphism,
$(B,\overline{b}) \mbox{$\preceq^{k}$} (A, \overline{a})$
($(B,\overline{b}) \mbox{$\preceq^{k,n}$} (A, \overline{a})$). The {\em rank} of
$\overline{a} \subseteq A$ is $\omega$ if it is $k$-universal, and
the greatest $n$ such that it is $k,n$-universal, otherwise.
\end{definition}
\begin{sublemma}\label{cnuniv-sublemma}
If $\cn{G} < 2^{k-2},$
then $G$ is not $k$-universal.
\end{sublemma}
\begin{proof}
Suppose $\cn{G} < 2^{k-2},$ and, for
{\em reductio}, that $G$
is $k$-universal.
Suppose $G = \{ a_i : i < n \},$ and
let $$I = \{ \langle i_1, \ldots , i_k \rangle : i_1 < i_2
< \ldots
< i_k < n\ {\rm and}\ \langle a_{i_1}, \ldots, a_{i_k}\rangle\ {\rm is}\ k{\rm-universal\ in}\ G \}.$$ Since $G$ is $k$-universal, it follows that $I \neq
\emptyset.$ Let $\langle i_1, \ldots , i_k \rangle \in I$ with $i_k$
maximal. Let $w = \{j <
i_k : E(a_j,a_{i_k}) \},$ and for each $j \in w,$ let $u_j = \{ l : l \in
\{1, \ldots , k-1 \}\ {\rm and}\ E(a_j,a_{i_l}) \}.$ Choose $l^{\mbox{\rm a.s.}t} \in \{1, \ldots ,
k-1 \}.$ As $ |w| < 2^{k-2},$ there is $u \subseteq \{1, \ldots, k-1 \}
- \{ l^{\mbox{\rm a.s.}t} \}$ such that for every $j \in w, u \neq u_j - \{ l^{\mbox{\rm a.s.}t} \}.$
Now, let $H$ be a $k$-extendible graph with edge relation $E'.$ Since $\langle
a_{i_1},
\ldots,
a_{i_k}\rangle$ is $k$-universal
in $G,$ we may choose $b_1, \ldots , b_k \in H$ such that the
Duplicator
has a winning strategy for the \mbox{$\exists^k$-game}\ played from $H$ to $G$ with the $j^{\rm
th}$ pair of pebbles placed on $b_j$ and $a_{i_j}.$ We show that, in fact, the
Spoiler can force a win from this position, which yields the desired
contradiction. The Spoiler picks up the pebble resting on $b_{l^{\mbox{\rm a.s.}t}}$ and places
it on a point $b \in H - \{ b_1 , \ldots , b_k \}$ such that $E'(b,b_k)$ and
$E'(b,b_l)$ for each $l \in u$
while $\neg E'(b,b_l)$ for each $l \in \{1, \ldots , k-1\} - (u \cup \{ l^{\mbox{\rm a.s.}t} \}).$
In order to successfully answer the Spoiler's move, the Duplicator must move the
pebble now resting on $a_{i_{l^{\mbox{\rm a.s.}t}}}$ and place it on a point $a_m \in G$ such
that $E(a_m,a_{i_k})$ and $a_m \neq a_{i_k}.$ In order to achieve this, she must
choose $a_m$ so that either $i_k < m$ or $m \in w.$ But in the first case we
would have that the position $\langle \ldots, \langle b_j,a_{i_j}\rangle , \ldots ,
\langle b_k,a_{i_k}\rangle,\langle b,a_m\rangle : j \neq l^{\mbox{\rm a.s.}t}\rangle$ is a winning position for the
Duplicator in the \mbox{$\exists^k$-game}\ from $H$ to $G.$ This implies that $\langle \ldots ,
a_{i_j} , \ldots , a_{i_k} , a_m : j \neq l^{\mbox{\rm a.s.}t}\rangle$ is $k$-universal
in $A.$ But then, since $i_k < m,$ we have
$\langle \ldots, i_j , \ldots, i_k , m : j
\neq l^{\mbox{\rm a.s.}t} \rangle \in I.$ But, this contradicts the choice of $i_k$ to be maximal with
this property. Therefore, it suffices to show that $m \not\in w.$ But this
follows immediately from the fact that $m < i_k$ and the
construction of $u.$
\end{proof}
The next sublemmas deal with the theory of the random graph $G=\mbox{$G(n,n^{-\alpha})$}$,
$\alpha$ an irrational between $0$ and $1,$ as
developed in \cite{shelah-spencer} (see also
\cite{baldwin-shelah} for connections with model theory).
We say a property holds almost surely (abbreviated \mbox{\rm a.s.}) in \mbox{$G(n,n^{-\alpha})$}, if and
only if, its probability approaches $1$ as $n$ increases. Shelah and Spencer
showed (see \cite{shelah-spencer}) that for any first-order property $\theta$
and any irrational $\alpha$ between $0$ and $1,$ either $\theta$ holds \mbox{\rm a.s.}\ in
\mbox{$G(n,n^{-\alpha})$}\ or $\neg\theta$ holds \mbox{\rm a.s.}\ in \mbox{$G(n,n^{-\alpha})$}. For each such $\alpha,$ we
let $T^{\alpha} = \{ \theta : \theta\ {\rm holds\ \mbox{\rm a.s.}\ in\ } \mbox{$G(n,n^{-\alpha})$}
\}$ and we let \mbox{$K^{\alpha}_{\inftyty}$}\ be the set of finite graphs each of which is
embeddable in every model of $T^{\alpha}.$ We will suppress the superscripts on
these notations, when no confusion is likely to result; in general, we will use
notations which leave reference to a particular $\alpha$ implicit, as in the
following definition.
\begin{definition}[\cite{shelah-spencer}]\label{sparse-def}
Let $G$ and $H$ be graphs with $G \subseteq H,$ and let $\alpha$ be a fixed
irrational between $0$ and $1.$
\begin{enumerate}
\item $(G,H)$ is {\em sparse}, if and only if, $|\edges{H} - \edges{G}|/|H - G|
< 1/\alpha.$
\item $(G,H)$ is {\em dense}, if and only if, $|\edges{H} - \edges{G}|/|H - G|
> 1/\alpha.$
\item \safesub{G}{H}, if and only if, for every $I,$ if $G \subset I \subseteq
H,$ then $(G,I)$ is sparse.
\item \rigidsub{G}{H}, if and only if, for every $I,$ if $G \subseteq I \subset
H,$ then $(I,H)$ is dense.
\end{enumerate}
We say $G$ is {\em sparse} ({\em dense}), if and only if, $(\emptyset,G)$ is
sparse (dense).
\end{definition}
Note that since $\alpha$ is irrational every $(G,H)$ as above is either sparse
or dense.
\begin{sublemma}\label{finmod-sparse-sublemma}
If $G \in \mbox{$K_{\inftyty}$},$ then $\safesub{\emptyset}{G}.$
\end{sublemma}
\begin{proof}
The reader may find a proof of this sublemma in
\cite{spencer-90}.
\end{proof}
\begin{sublemma}\label{finmod-cn-sublemma}
If $\alpha$ is irrational and $1/(k+1) < \alpha < 1,$ then
\begin{enumerate}
\item $\mbox{$K_{\inftyty}$} \subseteq \ki{(2k+1)}$ and
\item if \safesub{A}{B}, then \speciall{2k+1}{A}{B}.
\end{enumerate}
\end{sublemma}
\begin{proof}
1. By Sublemma \ref{finmod-sparse-sublemma}, it suffices to show
that if $\safesub{\emptyset}{G},$
then $G \in \ki{2k+1}.$
So suppose $\safesub{\emptyset}{G}.$ We inductively define a $2k+1$-witness for
$G$ proceeding from the top down. Since $G$ is sparse,
$|\edges{G}|/|G| < k+1,$ from which it follows immediately that there is a
point $a \in G$ whose degree is $< 2k+2.$ We let $a = a_{|G|}$ be the last
element of our $2k+1$-witness for $G.$ Now, since
$\safesub{\emptyset}{G},$ $G' = G - \{a\}$ is sparse, so we may find an $a' \in
G'$ whose degree (in $G'$) is $< 2k+2$ as before. We let $a' = a_{|G|-1}$ be
the next to last element of our $2k+1$-witness for $G.$ Proceeding in this way,
we may complete the construction of a $2k+1$-witness for $G.$
2. Suppose \safesub{A}{B} and suppose $\overline{a}$ is a $2k+1$-witness for
$A.$ Just as above we may inductively construct an enumeration $\overline{b}$
of $B-A$ so that $\overline{a}\overline{b}$ is a $2k+1$-witness for $B.$
\end{proof}
The following closure operator plays an important role in the proof of Lemma
\ref{central-lemma}.
\begin{definition}\label{closure-def}
We define for graphs $G, H$ with $G \subseteq H$ and natural numbers $l,$ a
closure operator $\cl{l}{m}{G}{H}$\ by recursion on $m.$
\begin{enumerate}
\item $\cl{l}{0}{G}{H} = G;$
\item $\cl{l}{m+1}{G}{H} = \bigcup \{ B : B \subseteq H\ {\rm and}\ |B| \leq l\
{\rm and}\ B \cap
\cl{l}{m}{G}{H} \mbox{$\leq_{i}$} B \}.$
\end{enumerate}
We let $\cl{l}{\inftyty}{G}{H} = \bigcup_{m \in \omega}\cl{l}{m}{G}{H}.$
We say that $H$ is $l$-{\em small}, if and
only if, there is a $G \subseteq H$ such that $|G| \leq l$ and
$\cl{l}{\inftyty}{G}{H} = H.$
\end{definition}
The following lemma gives the crucial property of closures we will
exploit -- for a fixed $l$ there is almost surely in \mbox{$G(n,n^{-\alpha})$}\ a uniform
bound on the cardinality of the closure of a set of size at most $l.$
\begin{sublemma}\label{closure-sublemma}
For every $l$ there
is an
$l^{\mbox{\rm a.s.}t}$ such that \mbox{\rm a.s.}\
for every $A \subseteq G (= \mbox{$G(n,n^{-\alpha})$})$, if $|A| \leq l,$ then
$|\cl{l}{\inftyty}{A}{G}| \leq l^{\mbox{\rm a.s.}t}.$
\end{sublemma}
\begin{proof}
Note that if \rigidsub{B}{B'} and $B \subseteq C \subseteq B',$
then \rigidsub{C}{B'}. It follows that we may represent \cl{l}{\inftyty}{A}{G} as
$A \cup \bigcup_{i < i^{\mbox{\rm a.s.}t}} B_i$ where $|B_i| \leq l$ and
$(A \cup \bigcup_{j < i} B_j) \cap B_i \leq_i B_i.$
Moreover, we may suppose,
without loss of generality, that this last extension is strict, for otherwise
$B_i$ could be omitted from the representation. Next we argue that there is an
$m$ (depending on $l$) which \mbox{\rm a.s.}\ uniformly bounds $i^{\mbox{\rm a.s.}t},$ that is, there
is
an $m$ such that
\begin{quote}
$(\dagger)$ \mbox{\rm a.s.}\ in $G = \mbox{$G(n,n^{-\alpha})$}$ for all $A \subseteq G, |A| \leq l,$
there is an $i^{\mbox{\rm a.s.}t} \leq m$ such that
\cl{l}{\inftyty}{A}{G} may be represented as
$A \cup \bigcup_{i < i^{\mbox{\rm a.s.}t}} B_i$ where $|B_i| \leq l$ and
$\rigidsub{(A \cup \bigcup_{j < i} B_j) \cap B_i}{B_i}.$
\end{quote}
The sublemma follows
immediately from this, for then $l^{\mbox{\rm a.s.}t} = m\cdot l$ is an \mbox{\rm a.s.}\ uniform bound
on $|\cl{l}{\inftyty}{A}{G}|.$
Let
\begin{equation*}
\begin{split}
\varepsilon = {\rm Min}(\{& (\alpha \cdot |\edges{B}-\edges{C}|)
-(|B-C|): \\
&B \subseteq G, |B| \leq l, \rigidsub{A \cap B}{B}, A \cap B \subseteq C \subset
B \}).
\end{split}
\end{equation*}
It follows from the definition of $\leq_i$ that
$\varepsilon >
0.$
Let $m = 1 + l/\varepsilon.$ We claim that $m$ satisfies condition $(\dagger).$
Let $$w_i = |A \cup \bigcup_{j < i} B_j| - \alpha \cdot
|\edges{A \cup \bigcup_{j < i} B_j}|.$$
Then, by hypothesis, $w_0 \leq |A| \leq l.$ Moreover, $w_{i+1} \leq (w_i -
\varepsilon).$ To see this, let $C = B_i \cap (A \cup \bigcup_{j < i} B_j)).$
Then, $A \cap B_i \subseteq C \subset B_i.$ Hence,
$w_{i+1} = |(A \cup \bigcup_{j < i} B_j) \cup B_i| - \alpha \cdot |\edges{(A
\cup \bigcup_{j < i} B_j) \cup B_i}| \leq (|A \cup \bigcup_{j < i} B_j| + |B_i
- C|) - \alpha \cdot (|\edges{A
\cup \bigcup_{j < i} B_j}| +(|\edges{B_i}| - |\edges{C}|)) \leq (w_i -
\varepsilon).$
It follows, by induction, that $w_i \leq l - i\cdot\varepsilon.$ Therefore,
if $i > l/\varepsilon,$ then $w_i < 0.$ So, by Sublemma
\ref{finmod-sparse-sublemma},
if $i^{\mbox{\rm a.s.}t} \geq m,$
then $\cl{l}{\inftyty}{A}{G} =
A \cup \bigcup_{i < i^{\mbox{\rm a.s.}t}}B_i \not\in \mbox{$K_{\inftyty}$}.$
Therefore, \mbox{\rm a.s.}\
$i^{\mbox{\rm a.s.}t} < m.$
\end{proof}
For the purposes of the next sublemma and beyond, we introduce the
following notational convention:
we write \speciallns{A}{B}\ for \speciall{t}{A}{B}, when $t = 2^{k-2}-1.$
\begin{sublemma}\label{cn-closure-sublemma}
If $\alpha$ is irrational, $1/(k+1) < \alpha < 1, k \geq 7$ and $k+1 < k'$ then
the following condition holds \mbox{\rm a.s.}\ in $G = \mbox{$G(n,n^{-\alpha})$}.$
For all $a_1,
\ldots, a_{k'} \in G,$ if $A = \cl{k'}{\inftyty}{\{a_1,\ldots,a_{k'-1}\}}{G}$
and $B = \cl{k'}{\inftyty}{\{a_1,\ldots,a_{k'}\}}{G},$ then
\begin{enumerate}
\item $B \in \mbox{$K_{\inftyty}$}$ and
\item \speciallns{A}{B}.
\end{enumerate}
\end{sublemma}
\begin{proof}
1. This is an immediate consequence of the preceding Sublemma. By
the first-order 0-1 law for \mbox{$G(n,n^{-\alpha})$}, given any fixed bound $l^{\mbox{\rm a.s.}t},$ \mbox{\rm a.s.}\
for all $A \subseteq G,$ if $|A| \leq l^{\mbox{\rm a.s.}t},$ then $A \in \mbox{$K_{\inftyty}$}.$ \\
2. First observe that our closure operator is monotone in $\subseteq,$ hence $A
\subseteq B$ and also, by the definition of the closure operator, that for no
$C \subseteq B, C \not\subseteq A,
|C| \leq k'$ do we have $\rigidsub{A \cap C}{C}.$ We argue that
\speciallns{A}{B} as follows. Suppose $\overline{a} = \langle a_1, \ldots, a_{|A|}\rangle$
is a
$2^{k-2}-1$-witness for $A,$ and let $\overline{b} = \langle b_1, \ldots, b_{|B|}\rangle$ be
a
$2k+1$-witness for $B.$ The latter exists by Sublemma
\ref{finmod-cn-sublemma} since $B \in
\mbox{$K_{\inftyty}$}.$ Now, for every $b \in B-A, |\{a\in A : E(a,b)\}| \leq k,$ for otherwise
we could find a set $C \subseteq B, C \not\subseteq A, |C| = k+2,$ such that
\rigidsub{A \cap
C}{C}. Let $w =\{ i : 1 \leq i \leq |B|\ {\rm and}\ b_i \not\in A \},$ and let
$\overline{b'} = \langle b_i : i \in w\rangle$ be the restriction of $\overline{b}$ to an
enumeration of $B-A.$ By hypothesis, $k \geq 7,$ so $(2k+1)+k \leq
2^{k-2}-1;$ hence, we may
conclude that $\overline{a}\overline{b'}$ is a $2^{k-2}-1$-witness for $B.$
\end{proof}
\begin{sublemma}\label{alpha-kextendible-sublemma}
If $0 < \alpha < 1/k,$ then \mbox{$G(n,n^{-\alpha})$} is \mbox{\rm a.s.}\ $k$-extendible.
\end{sublemma}
\begin{proof}
The reader may find a proof of this sublemma in
\cite{mcarthur-diss}.
\end{proof}
We are now in a position to proceed to the proof of Lemma \ref{central-lemma}.
\begin{proof}[Proof of Lemma \ref{central-lemma}]
Let $k \geq 7$ and, without
loss
of
generality, let $k' > k+1.$
Fix $\alpha$ to be an irrational
number between $1/(k+1)$ and $1/k.$ It then follows from Sublemmas
\ref{cn-closure-sublemma} and
\ref{alpha-kextendible-sublemma} that there is a finite graph $N$ such that
\begin{enumerate}
\item[(N1)] $N$ is $k$-extendible;
\item[(N2)] for all $a_1,
\ldots, a_{k'} \in N,$ if $A = \cl{k'}{\inftyty}{\{a_1,\ldots,a_{k'-1}\}}{N}$
and \\
$B = \cl{k'}{\inftyty}{\{a_1,\ldots,a_{k'}\}}{N},$ then $B \in \mbox{$K_{\inftyty}$}$ and
\speciallns{A}{B}.
\end{enumerate}
To complete the proof we must construct for each $\theta \in \ffoe{k'},$ a
graph $M$ such that $M$ is not $k$-universal and if $N \mbox{\rm Mod}els \theta,$ then $M
\mbox{\rm Mod}els \theta.$ By Sublemma \ref{cnuniv-sublemma} and Proposition
\ref{ekgame-prop}, it suffices to construct for each $d \in \omega$ a graph $M$
such that
\begin{enumerate}
\item[(M1)] $\cn{M} < 2^{k-2},$ and
\item[(M2)] the Duplicator has a winning strategy for the $d$-move \mbox{$\exists^{k'}$-game}\
from $N$ to
$M.$
\end{enumerate}
We proceed to construct a structure $M$ that satisfies conditions (M1) and
(M2).
We first define chains of structures
$\langle M_i : i \leq d+1\rangle$ and
$\langle M_{i,j} : i \leq d, j
\leq j_i \rangle ,$ satisfying the
following conditions.
\begin{enumerate}
\item If $A \subseteq M_i, \speciallns{A}{B}, B \in \mbox{$K_{\inftyty}$},$ and $B$ is
$k'$-small, then for some $j < j_i, A = A_{i,j}$ and $B$ and $B_{i,j}$ are
isomorphic over $A.$
\item $M_0 = \emptyset.$
\item For all $i \leq d+1, \cn{M_i} < 2^{k-2}.$
\item For each $i \leq d, M_{i,0} = M_i$ and $M_{i,j_i} = M_{i+1}.$
\item For each $j<j_i,$ there are $A_{i,j}, B_{i,j}$ with
\begin{enumerate}
\item $B_{i,j}$ is $k'$-small;
\item $B_{i,j} \in \mbox{$K_{\inftyty}$}$;
\item $A_{i,j} \subseteq M_i$;
\item $\speciallns{A_{i,j}}{B_{i,j}}$;
\item $B_{i,j}$ is compatible with $M_{i,j}$ and $A_{i,j}$ is the subgraph
of $M_{i,j}$ induced by $B_{i,j} \cap M_{i,j}$;
\item $M_{i,j+1} = \freejoin{M_{i,j}}{B_{i,j}}{A_{i,j}}$;
\end{enumerate}
\end{enumerate}
By Sublemma \ref{closure-sublemma}, there are only
finitely many $k'$-small $B \in \mbox{$K_{\inftyty}$}.$
The existence of chains satisfying the above conditions then follows
immediately from the free amalgamation property for \speciallns{}{}\
stated in Sublemma \ref{freeamalgam-sublemma}.
We now let $M = M_{d+1}.$ It follows immediately from the construction
that $M$ satisfies condition (M1)
above. Thus, it only remains to show that $M$ satisfies condition (M2). In
order to do so, it suffices to verify the following claim which
supplies a
winning strategy for the Duplicator in the $d$-move \mbox{$\exists^{k'}$-game}\ from $N$ to
$M.$
\begin{quote}
{\em Claim}: Suppose $A = \{a_1, \ldots, a_{k'}\} \subseteq N, A' =
\cl{k'}{\inftyty}{A}{N}$
and $f$ is an embedding of $A'$ (the subgraph of $N$ induced
by $A'$) into $M_{(d+1)-i}.$ Then the pebble position with $\alpha_r$ on $a_r$
and
$\beta_r$ on $f(a_r),$ for $1\leq r \leq k'$ is a winning position for the
Duplicator in the $i$-move \mbox{$\exists^{k'}$-game}\ from $N$ to $M.$
\end{quote}
We proceed to establish the claim by induction. Given $1 \leq i \leq d,$
suppose that $A, A',f,$ and the pebble position are as
described. It suffices to show that given any move by the Spoiler, the
Duplicator can respond with a move into $M_{(d+1) - (i-1)}$ which will allow
the
conditions of the claim to be preserved. Suppose, without loss of generality,
that the Spoiler moves $\alpha_{k'}$ onto a vertex $a \in N.$
Let $A'' = \cl{k'}{\inftyty}{\{a_1, \ldots, a_{k'-1}\}}{N}$ and
let $A''' = \cl{k'}{\inftyty}{\{a_1, \ldots, a_{k'-1},a\}}{N}.$ Then, by
condition
(N2), $A''' \in \mbox{$K_{\inftyty}$}$ and \speciallns{A''}{A'''}.
Then, by condition 5 on the construction of our chains defining $M,$ there is a
$B \subseteq M_{(d+1) - (i-1)}$ and an isomorphism $f'$ from $A'''$ onto $B$
with $f'$ and $f$ having identical restrictions to $A''.$ Therefore, the
conditions of the claim will be preserved, if the Duplicator plays pebble
$\beta_{k'}$ onto $f'(a).$
\end{proof}
\begin{proof}[Proof
of Corollary \ref{finbase-cor}]
Let $k \geq 7.$ 1. Suppose,
for
{\em reductio}, that \mbox{$\mathcal{U}^k$}\ is finitely based with ``basis'' $\{ A_1, \ldots
, A_n \}.$ Let $k'$ be the maximum of the cardinalities of the $A_i.$ Then,
there is a sentence of \ffoe{k'} which defines \mbox{$\mathcal{U}^k$}, contradicting Theorem
\ref{main-thm}.
2. Suppose for {\em reductio} that the class of
$k$-extendible structures is finitely based and choose $k'$ as above with
respect to a ``basis'' for this class. As in the proof of Lemma
\ref{central-lemma}, there is a $k$-extendible graph $N$ such that each
\ffoe{k'} sentence true in $N$ has a model which is not $k$-universal and hence
not $k$-extendible. This implies that every submodel of $N$ of size at most
$k'$ is not $k$-extendible, which yields the desired contradiction.
\end{proof}
\begin{proof}[Proof
of Theorem \ref{univinlfp-thm}]
We show that the complement of \mbox{$\mathcal{U}^k$}\ is definable in
least fixed point logic, which is
sufficient since the language is closed under negation.
In fact, it is defined by a purely universal sentence.
The main idea
is to show that for all $A, A \not\in \mbox{$\mathcal{U}^k$}$ iff either $card(A) < k-1$
or for all
proper $k-1$-tuples $\overline{a} \subseteq A$, $\overline{a}$ is not
$k,m$-universal for some $m \in \omega$.
Equivalently, every proper $k-1$-tuple has finite rank.
This follows easily from the
following sequence of observations.
\begin{enumerate}
\item For all $A$,
$A$ is $k$-universal iff there is a proper $k-1$-tuple $\overline{a}
\subseteq A$ such that $\overline{a}$ is $k$-universal in $A$.
\item For all $A$, and every proper $k-1$-tuple $\overline{a}
\subseteq A$, $\overline{a}$ is $k$-universal in $A$ iff
$\overline{a}$ is $k, m$-universal in $A$, for all $m \in \omega$.
\item For every $A$ and proper $k-1$-tuple $\overline{a}$,
if $\overline{a}$
has rank $m+1$ in $A,$ then there is some
set $S \subseteq \{1, \ldots , k-1 \}$ and
formula $\varphi(x_1, \ldots , x_k) =
\bigwedge_{i < k} x_i \neq x_k \wedge \bigwedge_{i \in S}
E(x_i, x_k) \wedge \bigwedge_{i \not\in S} \neg E(x_i, x_k)$,
such that for all $a' \in A$, if $A \mbox{\rm Mod}els \varphi(\overline{a}a')$,
then $\overline{a}a'$ has rank $\leq m$.
\end{enumerate}
Observations 1 and 2 essentially follow immediately from the definitions.
Observation 3 may be verified by considering the $k$-extendible models.
The above conditions yield an easy inductive definition of all
the proper $k-1$-tuples that are not $k$-universal. Call a formula
of the form of $\varphi$ above a {\em $k$-extension formula}.
Let $\varphi_1, \ldots , \varphi_t$ be the set of $k$-extension
formulas.
By observation 3, a proper $k-1$-tuple $\overline{a}$ has
rank 0 iff there is some $k$-extension formula $\varphi$ such that
there is no $a'$ such that $A \mbox{\rm Mod}els \varphi(\overline{a}a')$;
and $\overline{a}$ has rank $\leq m+1$
iff there is some $k$-extension formula $\varphi$
such that for all $a'$,
if $A \mbox{\rm Mod}els \varphi(\overline{a}a')$, then $\overline{a}a'$
has rank $\leq m$.
We now show how to express this definition by a least fixed point formula. Let
$\theta(x_1, \ldots , x_{k-1})$ be the following formula:
$$
\bigvee_{i < j \leq k-1} x_i =x_j \vee
\bigvee_{s \leq t}\mbox{\rm FO}rall x_k (\neg
\varphi_s(\overline{x}x_k) \vee \bigvee_{j \leq k} R(x_1, \ldots , x_{j-1},
x_{j+1}, \ldots , x_k)).$$
$R$ appears positively in the
formula, so that $\theta$ defines an inductive operator on each
graph $G, \Theta_G(X)$, that maps $k-1$-ary relations $P$ to
$k-1$-ary relations $\Theta_G(P)$. Let $\Theta^0_G = \Theta_G
(\emptyset)$, and let $\Theta^{n+1}_G = \Theta_G(\Theta^n_G)$.
If $\Theta^{n+1}_G = \Theta^n_G$, then $\Theta^n_G$ is a fixed point of
the operator. In fact, it is the least fixed point, which we
denote $\Theta^{\inftyty}_G$. Observe that for all proper $k-1$-tuples
$\overline{a}, \overline{a} \in \Theta^{n+1}_G -\Theta^n_G$
iff the rank of $\overline{a}$ is $n$.
By the above observation, $G$ is $k$-universal iff $\Theta^{\inftyty}_G
=A^{k-1}$. Therefore, the following formula defines the
class of graphs that are not in \mbox{$\mathcal{U}^k$}.
$$\mbox{\rm FO}rall x_1 \ldots x_{k-1} \bigvee_{i < j \leq k-1} x_i =x_j
\vee \mbox{\rm FO}rall x_1 \ldots x_{k-1} \Theta^{\inftyty}_G(x_1, \ldots , x_{k-1})$$
This completes the proof.
\end{proof}
\begin{proof}[Proof
of Theorem \ref{univnotextend-thm}]
Let $k \geq 4.$ We construct
$G$
as follows. Let $V$ be the set of
binary sequences of length $k,$ that is, $V$ is the set of $0,1$-valued
functions with domain $\{1, \ldots, k \}.$ For each $1 \leq i \leq k,$ let
$V_i = V \times \{i\}$ and let $U = \bigcup_{1 \leq i \leq k} V_i.$
$U$
is the set of vertices of the graph $G.$ The edge relation $E$ of $G$ is
defined
as follows:
$$E((f,i),(g,j)) \longleftrightarrow (i \neq j \wedge f(j) = g(i)).$$
We proceed to verify that $G$ satisfies the conditions of the theorem.
First we show that $G$ is $k$--universal. Let $H$ be an arbitrary graph. We
describe a winning strategy for
the Duplicator in the \mbox{$\exists^k$-game}\ from $H$ to $G.$
At each round the Duplicator plays so as to pebble at
most one element of each $V_i.$ We may suppose without loss of generality that
all $k$ pebbles are on the board at round $s,$ that the Duplicator has played
$\beta_i$ on an element of $V_i,$ and that the map from the elements
pebbled in $H$ to the corresponding elements pebbled in $G$ is a partial
isomorphism. Suppose the Spoiler plays $\alpha_j$ onto an element $b
\in H$ at round $s+1$
and let $X$ be the set of $i$ such that there is an edge between $b$ and
the vertex of $H$ pebbled by $\alpha_i.$ Let $(f_i,i)$ be the
vertex of $G$ pebbled by $\beta_i$ at round $s.$ We must
show that the Duplicator may play $\beta_j$ at round $s+1$ onto a
vertex $(g,j) \in V_j$ such that for all $1 \leq i \leq k,$
$$E((g,j),(f_i,i)) \longleftrightarrow i \in X.$$
It is clear that $(g,j)$ satisfies this condition when $g$ is defined as
follows: $g(i) = f_i(j),$ if $i \in X$; $g(i) = 1 - f_i(j),$ if $i \not\in X.$
This completes the proof that $G$ is $k$-universal.
Let $H \subseteq G$, and suppose, for {\em reductio}, that $H$ is
$k$-extendible. It is easy to verify that any graph $H$ is
$k$-extendible iff
for all $j$-tuples $\overline{a}$ in $H$, $j \leq k$,
$\overline{a}$ is $k$-universal in $H$\@. To establish the
contradiction, we show that there are $a_1, a_2 \in H$ such
that $(a_1, a_2)$ is not $k$-universal in $G$, which
immediately implies that $(a_1,a_2)$ is not $k$-universal in $H$ either.
The cardinality of any $k$-extendible graph is $\geq k+1$, so there
is an $l \leq k$ such that $H$ contains two vertices,
$(f_1, l), (f_2, l)$, in $V_l$. Let $w' = \{j \mid
j \neq l \mbox{ and } f_1(j) \neq f_2(j)\}$
and let $w'' = \{j \mid j \neq l \mbox{ and } f_1(j) = f_2(j)\}$.
Let $w =w'$, if $|w'| \leq |w''|$, and let $w = w''$, otherwise.
Observe that $|w| \leq (k-1)/2,$ which is $< k-2$ for all $k \geq 4.$
We now show that $(f_1, l), (f_2, l)$ is not $k, |w| +1$-universal
in $G$\@. Suppose that $w=w'$.
Let $\theta(x_1, \ldots , x_{|w| +3}) = $
$$\bigwedge_{1 \leq i<j\leq |w|+3} x_i \neq x_j
\wedge \bigwedge_{3\leq i \leq |w|+3} (E(x_1, x_i) \wedge \neg E(x_2, x_i))
\wedge \bigwedge_{3\leq i < j \leq |w| + 3}E(x_i,x_j).$$
(Note that $|w|+3 \leq k$, since $k\geq 4$.) Observe that
for any $|w|+3$-tuple $\overline{a} = (a_1, \ldots , a_{|w|+3})$
such that $a_1 = (f_1, l)$ and $a_2 = (f_2, l)$,
$G \not\mbox{\rm Mod}els \theta(\overline{a})$. If we let $$\varphi(x_1,x_2)
= \exists x_3 \ldots x_{|w|+3}\theta(x_1, \ldots , x_{|w|+3}),$$
then it follows that $G \not\mbox{\rm Mod}els \varphi((f_1,l),(f_2,l))$.
Therefore $((f_1,l),(f_2,l))$ is not $k,|w|+1$-universal in $G$\@.
The argument for $w = w''$ is similar.
\end{proof}
The above construction may be extended to arbitrary finite relational
signatures.
\begin{proof}[Proof
of Theorem \ref{kineqminkuniv-thm}]
Let $k \geq 6.$ For all $n \geq 4k$, we construct
graphs $G_n$ such that:
\begin{enumerate}
\item $G_n$ is $k$-universal.
\item For all $H \subseteq G_n$, if $H$ is $k$-universal, then
the diameter of $H$ is $\geq\lfloor (n-1)/2\rfloor /(k-1)$.
\end{enumerate}
(Recall that the diameter of a graph is the maximum distance
between any two vertices if it is connected, and $\omega$ otherwise.
It is an easy exercise to show that for $k \geq 3$, every minimal
$k$-universal graph is connected.)
This immediately yields the fact that there are
minimal $k$-universal models of arbitarily large finite diameter.
It is easy to check that the property of having finite diameter
$=d$ is expressible in $L^3$, which implies that any two graphs
with different diameters are $L^k$-inequivalent.
The graphs $G_n$ are based on a modification of the construction
from the proof of Theorem 3. Let $V$ be the set of functions from
the interval $\{-(k-2), \ldots , k-2\}$ into $\{0, 1\}$.
For each $m, 0 \leq m \leq n-1$, let $V_{m} = \{0, 1\}
\times V \times \{ m\}$.
The set of vertices of $G_n$ is $\bigcup_m V_{m}$. The edge
relation on $G_n$ is defined as follows. For all
$m,m'$, $a \in V_m, a' \in V_{m'}$, if $m= m'$ or
$k \leq m-m' \leq n-k(mod\ n)$,
then $\neg E(a,a').$ If $0 < m-m' <k-1 (mod\ n)$,
and $a = (\delta , f, m), a' =(\delta ', f', m')$, with $\delta ,
\delta ' \in \{0, 1\}$ and $f,f' \in V$, then $E(a,a')$
iff $f'(m-m')=f(m'-m)$. (Here, subtraction is $modulo\ n$.)
Finally, if $m-m' = k-1 (mod\ n)$, then
$E(a,a')$ iff $\delta = 1$. In this case, each $a \in V_m$
is either adjacent to every vertex in $V_{m'}$ or to none of them.
If $m' = m + \lfloor (n-1)/2\rfloor$, then the distance $d(a,a') \geq
\lfloor(n-1)/2\rfloor /(k-1)$.
Observe also that for all $l \leq n-1$,
there is an automorphism of $G_n$ taking each $V_m$
to $V_{m+l}$. (All indices are $modulo\ n$.)
First we show that $G_n$ is $k$-universal. Let $G'$ be an arbitary
graph. It suffices to prove that the D wins the $\exists^k$-game
from $G'$ to $G_n$.
By an argument similar to the one given in the proof of
Theorem 3, it is easy to see that the D can play so that in each
round $i \leq k$, she plays a pebble on a vertex in $V_i $.
We now argue by induction that in each subsequent round $j > k$,
she can maintain the following condition: there is some $l \leq n$
such that there is exactly one pebble on each $V_m$, for $m$ such that
$0 \leq m-l \leq k-1(mod\ n)$. The basis step is already taken care of.
Suppose that in round $j$, the D has a single pebble in each
vertex set $V_l, \ldots , V_{l+(k-1)}$. We consider
two cases. One, the S replays the pebble $\alpha_i$ whose pair
$\beta_i$ in $G_n$
is on an element of $V_l$. It is easy to verify that the
D can respond by playing $\beta_i$ on a vertex in $V_{l+k}$.
Observe that the D's pebbles are now on $V_{l+1}, \ldots ,
V_{l+k}$, as desired.
Two, the S replays any other pebble $\alpha_{i'}$, whose pair
$\beta_{i'}$ is on some element
of $V_{l'}, l \neq l'$. The D can respond by replaying
the pebble on some other element of $V_{l'}$. Again,
that this is possible essentially follows from the proof of Theorem 3.
Next we argue that any $k$-universal $H \subseteq G_n$ has diameter
$\geq \lfloor (n-1)/2\rfloor /(k-1)$. In particular, it is
sufficient to prove $H$ must contain a vertex from each $V_m, m\leq
n-1$. Let $A$ be any $k$-extendible graph. The argument proceeds
by establishing that, in the $\exists^k$-game from $A$ to $H$,
the S can eventually force the D to play a pebble on a vertex in
each $V_m \cap H$. If $V_m \cap H = \emptyset$, for some $m$,
then the D loses.
In rounds 1 through $k$, the S plays on a $k$-clique in $A$.
For every $k$-clique in $G_n$, and hence also in $H$, there is an
$m \leq n-1$ such that each $V_{m'}, 0 \leq m'-m \leq k-1(mod\ n)$, contains
exactly one element from the clique. Therefore, after $k$ rounds,
the D must have a single pebble on each of $V_m, \dots ,
V_{m+(k-1)}$, for some $m$. It suffices to show that the S
can force the D to play so that exactly one pebble occupies a
vertex in each set $V_{m+1},
\ldots , V_{m+k}$, since by iterating this strategy, he can
force the D to play onto each $V_{l}$.
To simplify the notation, we assume $m=0$ and that each pebble $\beta_i,
0 \leq i \leq k-1$, is on a vertex in $V_{i}$. Let
$b_i = (\delta_i, f_i, i), \delta_i \in \{0, 1\}, f_i \in V$,
be the element pebbled by $\beta_i$.
In round $k+1$, the S replays
pebble $\alpha_0$ and places it on an element $a \in A$ such
that $E(a, \alpha_1)$ and
for $i \in \{2, \ldots , k-1\}$, $E(a, \alpha_i)$ iff
$\delta_i = 0$. (Here we abuse notation
and use
$\alpha_j$ to refer also to the element on which the pebble is
located.)
Since $\alpha_0$ and $\alpha_1$ are now adjacent in $A$, the D has to
play $\beta_0$ on some element in a set $V_l$, for
$-(k-2) \leq l \leq k(mod\ n)$, so that it is adjacent to $\beta_1$.
By the condition that for $i \in \{2, \ldots ,
k-1\}$, $E(a, \alpha_i)$ iff $\delta_i = 0$, the D cannot play
in $V_l$, for $-(k-3) \leq l \leq 0(mod\ n)$.
If the D plays the pebble in $V_k$, then
the S has succeeded. Suppose that the D plays
$\beta_0$ on an element of $V_{-(k-2)}$. We now claim that
there is no 3-clique in $G_n$ [$H$] each of whose elements is
adjacent to both $\beta_{k-1}$ and $\beta_0$.
This is because $(i)$ the only elements of $G_n$ that are adjacent
to vertices in both $V_{-(k-2)}$ and $V_{k-1}$ are members
of either $V_{0}$ or $V_{1}$, and $(ii)$ there is no 3-clique
in $V_{0} \cup V_{1}$. Thus the S can force a win in 3 moves
by replaying pebbles $\alpha_1, \alpha_2, \alpha_3$ so that they
occupy a 3-clique each of whose elements are adjacent to $\alpha_0$
and $\alpha_{k-1}$.
The remaining case occurs when the D plays $\beta_0$ on a vertex
in $V_{j}$, for $1 \leq j \leq k-1$. Without loss of generality,
let $j = k-2$, and let $b'$ be the vertex now occupied by
$\beta_0$. Let $w'= \{i \mid 1 \leq i \leq 3 \mbox{ and }
E(b_{k-2}, b_i) \mbox{ iff } E(b', b_i)\}$ and
$w''= \{i \mid 1 \leq i \leq 3 \mbox{ and }
E(b_{k-2}, b_i) \mbox{ iff } \neg E(b', b_i)\}$.
Again without loss of generality, suppose that
$|w'| \geq 2$ and $w' =\{1, 2\}$.
By exploiting the fact that $\beta_0$ and $\beta_{k-2}$ both
occupy vertices in $V_{k-2}$, the S can now force
the D to play $\beta_2$ onto $V_k$.
The S first places $\alpha_2$
on a vertex such that for all $j, 1 \leq j \leq k-1, j\neq 2$,
$E(\alpha_2, \alpha_j)$, and $\neg E(\alpha_2, \alpha_0)$.
It is easy to see that the D must put $\beta_2$ on either $V_0$ or
$V_k$.
In the first case, the S responds by playing
$\alpha_1$ so that for all $j, 2 \leq j
\leq k-1$, $E(\alpha_1, \alpha_j)$ and $\neg E(\alpha_1, \alpha_0)$.
The D now loses immediately. The only vertices adjacent to each
$\beta_j, 2 \leq j \leq k-1$, are elements of $V_1$ or $V_2$,
but for each $b \in V_1$ or $V_2,$ $E(b, \beta_{k-2})$ iff
$E(b, \beta_0)$.
In the second case, the S then plays
$\alpha_0$ onto a vertex such that for all $j, 1 \leq j \leq k-1$,
$E(\alpha_0, \alpha_j)$. This compels the D to play $\beta_0$ in
$V_{2}$, so that there is a now a single pebble in each
$V_1, \ldots , V_k$,
as desired.
\end{proof}
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Steady-state simulation of reflected Brownian motion and related
stochastic networks\thanksref{T1}}
\runtitle{Steady-state simulation of reflected Brownian motion}
\thankstext{T1}{Supported in part by the Grants NSF-CMMI-0846816
and NSF-CMMI-1069064.}
\begin{aug}
\author[A]{\fnms{Jose} \snm{Blanchet}\corref{}\ead[label=e1]{jose.blanchet@columbia.edu}}
\and
\author[B]{\fnms{Xinyun} \snm{Chen}\ead[label=e2]{xinyun.chen@stonybrook.edu}}
\runauthor{J. Blanchet and X. Chen}
\affiliation{Columbia University and Stony Brook University}
\address[A]{Industrial Engineering\\
\quad and Operations Research\\
Columbia University\\
340 S. W. Mudd Building\\
500 W. 120 Street\\
New York, New York 10027\\
USA\\
\printead{e1}}
\address[B]{Department of Applied Mathematics\\
\quad and Statistics\\
Stony Brook University\\
Math Tower B148\\
Stony Brook, New York 11794-3600\\
USA\\
\printead{e2}}
\end{aug}
\received{\smonth{1} \syear{2012}}
\revised{\smonth{9} \syear{2014}}
\begin{abstract}
This paper develops the first class of algorithms that enable unbiased
estimation of steady-state expectations for multidimensional reflected
Brownian motion. In order to explain our ideas, we first consider the
case of compound Poisson (possibly Markov modulated) input. In this
case, we analyze the complexity of our
procedure as the dimension of the network increases and show that, under
certain assumptions, the algorithm has polynomial-expected termination
time. Our methodology includes procedures that are of
interest beyond steady-state simulation and reflected processes. For instance,
we use wavelets to construct a piecewise linear function that can be
guaranteed to be within $\varepsilon$ distance (deterministic) in the uniform
norm to Brownian motion in any compact time interval.
\end{abstract}
\begin{keyword}[class=AMS]
\kwd{60J65}
\kwd{65C05}
\end{keyword}
\begin{keyword}
\kwd{Reflected Brownian motion}
\kwd{steady-state simulation}
\kwd{dominated coupling from the past}
\kwd{wavelet representation}
\end{keyword}
\end{frontmatter}
\section{Introduction}\label{sec1}
This paper studies simulation methodology that allows estimation, without
any bias, of steady-state expectations of multidimensional reflected
processes. Our algorithms are presented with companion rates of
\mbox{convergence}. Multidimensional reflected processes, as we shall explain,
are very
important for the analysis of stochastic queueing networks. However, in
order to
motivate the models that we study, let us quickly review a formulation
introduced by \citet{Kella1996}.
Consider a network of $d$ queueing stations indexed by $\{1,2,\ldots,d\}$.
Suppose that jobs arrive to the network according to a Poisson process with
rate $\lambda$, denoted by $(N ( t ) \dvtx t\geq0)$. Specifically,
the $
k $th arrival brings a vector of job requirements $\mathbf{W} (
k ) = ( W_{1} ( k ),\ldots,W_{d} ( k )
) ^{T}$
which are nonnegative random variables (r.v.'s), and they add to the
workload at each station right at the moment of arrival. So if the $k$th
arrival occurs at time $t$, the workload of the $i$th station (for $
i\in\{1,\ldots,d\}$) increases by $W_{i} ( k ) $ units right
at time $
t $. We assume that $\mathbf{W}=(\mathbf{W} ( k ) \dvtx k\geq1$) is a
sequence of i.i.d. (independent and identically distributed) nonnegative
r.v.'s. For fixed $k$, the coordinates of $\mathbf{W} ( k ) $ are
not necessarily independent; however, $\mathbf{W}$ is assumed to be
independent of $N ( \cdot ) $.
Throughout the paper we shall use boldface to write vector quantities, which
are encoded as columns. For instance, we write $\mathbf{y}= (
y_{1},\ldots,y_{d} ) ^{T}$.
The total amount of external work that arrives to the $i$th station up
to (and
including) time $t$ is denoted by
\[
J_{i} ( t ) =\sum_{k=1}^{N ( t ) }W_{i}
( k ).
\]
Now, assume that the workload at the $i$th station is processed as a fluid
by the server at a rate $r_{i}$, continuously in time. This means that if
the workload in the~$i$th station remains strictly positive during the time
interval $[t,t+dt]$, then the output from station $i$ during this time
interval equals $r_{i}\,dt$. In addition, suppose that a proportion $
Q_{i,j}\geq0$ of the fluid processed by the $i$th station is circulated to
the $j$th server. We have that $\sum_{j=1}^{d}Q_{i,j}\leq1$, $Q_{i,i}=0$,
and we define $Q_{i,0}=1-\sum_{j=1}^{d}Q_{i,j}$. The proportion $Q_{i,0}$
corresponds to the fluid that goes out of the network from station $i$.
The dynamics stated in the previous paragraph are expressed formally by a
differential equation as follows. Let $Y_{i} ( t ) $ denote the
workload content of the $i$th station at time $t$. Then for given
$Y_{i} (
0 ) $, we have
\begin{eqnarray}
\label{S1b} dY_{i} ( t ) & =&dJ_{i} ( t ) -
r_{i}I \bigl( Y_{i} ( t ) >0 \bigr) \,dt+\sum
_{j:j\neq i}Q_{j,i}r_{j}I \bigl(
Y_{j} ( t ) >0 \bigr) \,dt
\nonumber
\\
& =&dJ_{i} ( t ) -r_{i}\,dt+\sum
_{j:j\neq i}Q_{j,i}r_{j}\,dt
\\
&&{} +r_{i}I \bigl( Y_{i} ( t ) =0 \bigr) \,dt-\sum
_{j:j\neq
i}Q_{j,i}r_{j}I \bigl(
Y_{j} ( t ) =0 \bigr) \,dt\nonumber
\end{eqnarray}
for $i\in\{1,\ldots,d\}$. It is well known that the resulting vector-valued
workload process, $\mathbf{Y} ( t ) = ( Y_{1} ( t ),\ldots,Y_{d} ( t ) ) ^{T}$, is Markovian. The differential
equation (\ref{S1b}) admits a unique piecewise linear solution that is
right-continuous and has left limits (RCLL). This can be established by
elementary methods, and we shall comment on far-reaching extensions shortly.
The equations given in (\ref{S1b}) take a neat form in matrix notation. This
notation is convenient when examing stability issues and other topics which
are related to the steady-state simulation problem we investigate.
In particular, let $\mathbf{r}= ( r_{1},\ldots,r_{d} ) ^{T}$ be
the column vector corresponding to the service rates, write $R= (
I-Q ) ^{T}$ and define
\[
\mathbf{X} ( t ) =\mathbf{J} ( t ) -R\mathbf{r}t,
\]
where $\mathbf{J} ( t ) $ is a column vector with its $i$th
coordinate equal to $J_{i} ( t ) $. Then
equation~(\ref{S1b}) can be expressed as
\begin{equation}
\mathbf{Y} ( t ) =\mathbf{Y} ( 0 ) +\mathbf{X} ( t ) +R\mathbf{L} ( t ),
\label{SP1}
\end{equation}
where $\mathbf{L} ( t ) $ is a column vector with its $i$th
coordinate equal to
\[
L_{i} ( t ) =\int_{0}^{t}r_{i}I
\bigl( Y_{i} ( s ) =0 \bigr) \,ds.
\]
As mentioned earlier, $\mathbf{Y=(Y} ( t ) \dvtx t\geq0)$ is a Markov
process. Let us assume that $Q^{n}\rightarrow0$ as $
n\rightarrow\infty$. This assumption is synonymous with the assumption
that the network
is open. In detail, for each $i$ such that $\lambda_{i}>0$, there
exists a path ($i_{1},i_{2},\ldots,i_{k})$
satisfying that $\lambda
_{i}Q_{i,i_{1}}Q_{i_{1},i_{2}}\cdots Q_{i_{k-1},i_{k}}>0$ with $i_{k}=0$
and $k\leq d$. In addition, under this assumption the matrix $R^{-1}$ exists and has
nonnegative coordinates. To ensure stability, we assume that
$R^{-1}E\mathbf{X} (1 ) <0$---inequalities involving vectors are understood
coordinate-wise throughout the paper. It follows from Theorem~2.4 of
\citet{KellaRamasubramanian2012} that $\mathbf{Y} ( t )$
converges in distribution to $\mathbf{Y}
( \infty ) $ as $t\rightarrow\infty$, where $\mathbf{Y} (
\infty ) $ is an r.v. with the (unique) stationary distribution of
$
\mathbf{Y} ( \cdot ) $.
The first contribution of this paper is that we develop an exact sampling
algorithm (i.e., simulation without bias) for $\mathbf{Y} ( \infty
) $. This algorithm is developed in Section~\ref{SecCompPoi}
of this paper under the assumption that $\mathbf{W} ( k ) $
has a finite moment-generating function. In addition, we analyze the
order of computational complexity
(measured in terms of expected random numbers generated) of our
algorithm as $d$ increases, and we show that it is
polynomially bounded.
Moreover, we extend our exact sampling algorithm to the case in
which there is an independent Markov chain driving the arrival rates, the
service rates, and the distribution of job sizes at the time of arrivals.
This extension is discussed in Section~\ref{SectionExtension}.
The workload process $ ( \mathbf{Y} ( t ) \dvtx t\geq0 )
$ is
a particular case of a reflected (or constrained) stochastic network.
Although the models introduced in the previous paragraphs are
interesting in
their own right, our main interest is the steady-state
simulation techniques for reflected Brownian motion. These techniques
are obtained by
abstracting the construction formulated in (\ref{SP1}). This
abstraction is
presented in terms of a Skorokhod problem, which we describe as
follows. Let $\mathbf{X=} ( \mathbf{X}
( t ) \dvtx t\geq0 ) $ with $\mathbf{X}(0)\geq0$, and $R$
be an $M$-matrix $R$ so that the inverse $R^{-1}$ exists and has
nonnegative coordinates. To solve the Skorokhod problem requires
finding a pair of processes $ (
\mathbf{Y,L} ) $ satisfying equation (\ref{SP1}), subject to:
\begin{longlist}[(iii)]
\item[(i)] $\mathbf{Y} ( t ) \geq0$ for each $t$,
\item[(ii)] $L_{i} ( \cdot ) $ nondecreasing for each $i\in\{
1,\ldots, d\}$ and
$L_{i} ( 0 ) =0$,
\item[(iii)] $\int_{0}^{t}Y_{i} ( s ) \,dL_{i} ( s ) =0$ for
each $t$.
\end{longlist}
Eventually we shall take the input process $\mathbf{X} ( \cdot
) $ as a
Brownian motion with constant drift $\mathbf{v}=E\mathbf{X} (
1 ) $
and nondegenerate covariance matrix $\Sigma$. There
then exists a strong solution (i.e., path-by-path and not only in law)
to the stochastic differential equation (SDE) (\ref{SP1}) subject to the
Skorokhod problem constraints (i) to (iii), and the initial condition
$\mathbf{
Y} ( 0 ) $. This was proved by \citet{HarrisonReiman1981}, who
introduced the notion of reflected Brownian motion (RBM). When $R$ is
an $M$-matrix, $R^{-1}\bolds{\mu}<0$ is a necessary and
sufficient condition for the stability of an RBM; see \citet
{HarrisonWilliams1987}.
Our algorithm for the RBM is motivated by the
fact that in great generality (i.e., only requiring the existence of
variances of
service times and inter-arrival times), the so-called generalized Jackson
networks (which are single-server queues connected with Markovian routing)
converge weakly to a reflected Brownian motion in a heavy traffic asymptotic
environment as in \citet{Reiman1984}. Moreover, recent papers from
\citet{GamarnikZeevi2006} and \citet{BudhirajaLee2009} have
shown that convergence occurs also at the level of steady-state
distributions. Therefore, reflected Brownian motion
(RBM) plays a central role in queueing theory.
The second contribution of this paper is the development of an
algorithm that allows estimation with no bias of $E [g ( \mathbf
{Y} (
\infty ) ) ] $ for positive and continuous functions $
g ( \cdot ) $. Moreover, given $\varepsilon>0$,
we provide a simulation algorithm that outputs a random variable
$\mathbf{Y}
_{\varepsilon} ( \infty ) $ that can be guaranteed to be
within $\varepsilon$ distance (say in the Euclidian norm) from an
unbiased sample $\mathbf{Y} ( \infty ) $ from the
steady-state distribution of RBM. This contribution is developed in
Section~\ref{SecRBM} of this paper. We show that the number of Gaussian random
variables generated to produce $\mathbf{Y}_{\varepsilon} ( \infty
) $ is of order $O(\varepsilon^{-a_{C}-2}\log(1/\varepsilon))$
as $\varepsilon\searrow0$, where $a_{C}$ is a
constant only depending on the covariance matrix of the Brownian
motion; see
Section~\ref{SubSecCCRBM}. In the special case when the $d$-dimensional
Brownian motion has nonnegative correlations, the number of random
variables generated is of order $O(\varepsilon^{-d-2}\log
(1/\varepsilon
)) $.
Our methods allow estimation without bias of $E [g ( \mathbf
{Y} (
t_{1} ),\mathbf{Y} ( t_{2} ),\ldots,\break \mathbf{Y} (
t_{m} ) ) ]$ for a positive function $g ( \cdot
) $
continuous almost everywhere and for any $0<t_{1}<t_{2}<\cdots<t_{m}$.
Simulation of RBM has been studied in the literature. In the one-dimensional
setting it is not difficult to sample RBM exactly; this follows, for
instance, from the methods in \citet{Devroye2009}. The paper of
\citet{Asmussenetal1995} also studies the one-dimensional case and
provides an enhanced Euler-type scheme with an improved convergence rate.
The work of \citet{BurdzyChen2008} provides approximations of reflected
Brownian motion with orthogonal reflection (the case in which $R=I$).
With regard to steady-state computations, the work of
\citet{DaiHarrison1992} provides numerical methods for approximating the
steady-state expectation by numerically evaluating the density of
$\mathbf{Y}
( \infty ) $. In contrast to our methods, Dai and Harrison's
procedure is based on projections in mean-squared norm with respect to a
suitable reference measure. Since such an algorithm is nonrandomized,
it is
therefore, in some sense, preferable to simulation approaches, which are
necessarily randomized. However, the theoretical justification of Dai and
Harrison's algorithm relies on a conjecture that is believed to be true but
has not been rigorously established; see \citet{DaiDieker2011}. In addition,
no rate of convergence is known for this procedure, even assuming that the
conjecture is true.
Finally, we briefly discuss some features of our procedure and our strategy
at a high level. There are two sources of bias that arise in the
setting of
steady-state simulation of RBM. First, discretization error in the
simulation of the process $\mathbf{Y}$ is inevitable due to the
continuous nature of Brownian motion, especially when the reflection
matrix $R$ is not the identity. This issue is present even in finite
time horizon. The
second issue is, naturally, that we are concerned with steady-state
expectations which inherently involve, in principle, an infinite time
horizon.
In order to concentrate on removing the bias issues arising from the
infinite horizon, we first consider the reflected compound Poisson case
where we can simulate the solution of the
Skorokhod problem in any finite interval exactly and without any bias.
Our strategy is based on the dominated coupling from the past
(DCFTP). This technique was proposed by \citet{Kendall2004}, following the
introduction of coupling from the past by \citet{ProppWilson1996}. The
idea behind DCFTP is to construct suitable upper- and lower-bound
processes that can be simulated in stationarity and backward in time.
We take the lower bound to be the process identically equal to zero. We use
results from \citet{HarrisonWilliams1987} (for the RBM) and \citet
{Kella1996} (for the reflected compound Poisson process), to construct
an upper bound process based on the solution of the Skorokhod
problem with reflection matrix $R=I$. It turns out that simulation of the
stationary upper-bound process backward involves sampling the infinite
horizon maximum (coordinate-wise) from $t$ to infinity of a $d$-dimensional compound Poisson Process with negative drift. We use sequential
acceptance/rejection techniques (based on a exponential tilting
distributions used in rare-event simulation) to simulate from an infinite
horizon maximum process.
Then we turn to RBM. A problem that arises, in addition to the
discretization error given the continuous nature of Brownian motion, is the
fact that in dimensions higher than one (as in our setting) RBM never
reaches the origin. Nevertheless, it will be arbitrarily close to the origin,
and we shall certainly leverage off this property to obtain simulation that
is guaranteed to be $\varepsilon$-close to a genuine steady-state sample. Now
in order to deal with the discretization error we use wavelet-based
techniques. We take advantage of a well-known wavelet construction of
Brownian motion; see \citet{Steele2001}.
Instead of simply simulating Brownian motion using the wavelets,
which is the standard practice, we simulate the wavelet coefficients jointly
with suitably defined random times. Consequently, we are able to guarantee
with probability one that our wavelet approximation is $\varepsilon
$-close in
the uniform metric to Brownian motion in any compact time interval (note
that $\varepsilon$ is deterministic and defined by the user; see Section~\ref
{SubWave}).
Finally, we use the following fact. Let process $\mathbf{Y}$ be the
solution to the Skorokhod problem. Then the process $\mathbf{Y}$, as a
function of the input process $
\mathbf{X}$, is Lipschitz continuous with a computable Lipschitz constant,
under the uniform topology. These observations combined with an
additional randomization, in the spirit of \citet{Beskosetal2012}, allow
estimation with no bias of the steady-state expectation.
We strongly believe that the use of tolerance-enforced coupling based on
wavelet constructions, as we illustrate here, can be extended more broadly
in the numerical analysis of the Skorokhod and related problems.
We perform some numerical experiments to validate
our algorithms. Our results are reported in Section~\ref
{SectionNumerics}. Further numerical experiments are pursued in a companion
paper, in which we also discuss further implementation issues and some
adaptations, which are specially important in the case of RBM.
The rest of the paper is organized as follows: in Section~\ref{SecCompPoi},
we consider the problem of exact simulation from the steady-state
distribution of the reflected compound Poisson process discussed
earlier; we then
show how our procedure is adapted without major complications to
Markov-modulated input in Section~\ref{SectionExtension}; in
Section~\ref{SecRBM}, we continue explaining the main strategy to be
used for the
reflected Brownian motion case; finally, the numerical
experiments are given in Section~\ref{SectionNumerics}.
\section{Exact simulation of reflected compound Poisson processes}\label
{SecCompPoi}
The model that we consider has been explained at the beginning of the
\hyperref[sec1]{Introduction}. We summarize the assumptions that we shall impose next.
\emph{Assumptions}:
(A1) the matrix $R$ is an $M$-matrix;
(A2) $R^{-1}E\mathbf{X} ( 1 ) <0$ (recall that inequalities apply
coordinate-wise for vectors);
(A3) there exists $\bolds{\theta}>0$, $\bolds{\theta}\in
\mathbb{R}^{d}$ such that
\[
E\bigl[\exp \bigl( \bolds{\theta}^{T}\mathbf{W} ( k ) \bigr)
\bigr] <\infty.
\]
We have commented on (A1) and (A2) in the \hyperref[sec1]{Introduction}. Assumption (A3) is
important in order to do exponential tilting when we simulate a
stationary version of the upper-bound process.
In addition to (A1) to (A3), we shall assume that one can simulate from
exponential tilting distributions associated to the marginal
distribution of
$\mathbf{W} ( k ) $. That is, we can simulate from $
P_{\theta_{i}} ( \cdot ) $ such that
\begin{eqnarray*}
&& P_{\theta_{i}} \bigl( W_{1} ( k ) \in dy_{1},
\ldots,W_{d} ( k ) \in dy_{d} \bigr)
\\
&&\qquad= \frac{\exp ( \theta_{i}y_{i} ) }{E\exp (
\theta_{i}W_{i} ( k ) ) }P \bigl( W_{1} ( k ) \in dy_{1},
\ldots,W_{d} ( k ) \in dy_{d} \bigr),
\end{eqnarray*}
where $\theta_{i}\in\mathbb{R}$ and $E\exp ( \theta_{i}W_{i} (
k ) ) <\infty$. We will determine the value of $\theta_i$ through
assumption (A3b), as given below.
Let us briefly explain our program, which is based on DCFTP. First, we will
construct a \textit{stationary} dominating process $ ( \mathbf{Y}
^{+} ( s ) \dvtx -\infty<s\leq0 ) $ that is \emph{coupled}
with our
target process, that is, a stationary version of the process $ (
\mathbf{Y} ( s ) \dvtx -\infty<s\leq0 ) $ satisfying the Skorokhod
problem (\ref{SP1}). Under coupling, the dominating process satisfies
\begin{equation}
R^{-1}\mathbf{Y} ( s ) \leq R^{-1}\mathbf{Y}^{+}
( s ), \label{Dom}
\end{equation}
for each $s\leq0$. We then simulate the process $\mathbf{Y}^{+} (
\cdot ) $ backward up to a time $-\tau\leq0$ such that $\mathbf{Y}
^{+} ( -\tau ) =0$. Following the tradition of the CFTP literature,
we call a time $-\tau$ such that $\mathbf{Y}^{+} ( -\tau ) =0$ a
coalescence time. Since $\mathbf{Y} ( s ) \geq0$, inequality
(\ref
{Dom}) yields $\mathbf{Y} ( -\tau ) =0$. The next and final
step in our strategy is to evolve the solution $\mathbf{Y}(s)$ of the
Skorokhod problem (\ref{SP1}) forward from $s=-\tau$ to $s=0$ with
$\mathbf{
Y}(-\tau)=0$, \textit{using the same input that drives the
construction of}
$\mathbf{(Y}^+ ( s ) \dvtx -\tau\leq s\leq0)$ so that $\mathbf{Y}$
and $
\mathbf{Y}^+$ are coupled. The output is therefore $\mathbf{Y} (
0 ) $, which is stationary. The precise algorithm will be
summarized in Section~\ref{Subsec_Main1}.
So, a crucial part of the whole plan is the construction of $\mathbf{Y}
^{+} ( \cdot ) $ together with a coupling that guarantees
inequality (\ref{Dom}). In addition, the coupling must be such that one can
use the driving randomness that defines $\mathbf{Y}^{+} ( \cdot
) $
directly as an input to the Skorokhod problem (\ref{SP1}) that is then used
to evolve $\mathbf{Y}^{+} ( \cdot ) $. We shall first start by
constructing a time reversed stationary version of a suitable dominating
process~$\mathbf{Y}^{+}$.
\subsection{Construction of the dominating process}\label{sec2.1}
In order to construct the dominating process $\mathbf{Y}^{+} (
\cdot )$, we first need the following result attributed to
\citet{Kella1996} (Lemma~3.1).
\begin{lemma}
\label{LmK_W_Comp}There exists $\mathbf{z}$ such that $E\mathbf{X} (
1 ) <\mathbf{z}$ and $R^{-1}\mathbf{z}<0$. Moreover, if
\[
\mathbf{Z} ( t ) =\mathbf{X} ( t ) -\mathbf{z}t,
\]
and $\mathbf{Y}^{+} ( \cdot ) $ is the solution to the
Skorokhod problem
\begin{eqnarray}\label{SP_Bnd}
d\mathbf{Y}^{+} ( t ) & =&d\mathbf{Z} ( t ) +d\mathbf
{L}
^{+} ( t ),\qquad \mathbf{Y}^{+} ( 0 ) =
\mathbf{y}_{0},
\nonumber
\\[-8pt]
\\[-8pt]
\nonumber
\mathbf{Y}^{+} ( t ) & \geq&0,\qquad Y_{j}^{+}
( t ) \,dL_{j}^{+} ( t )=0,\qquad L _{j}^{+}
( 0 ) =0,
\qquad dL_{j}^{+} ( t ) \geq0,
\end{eqnarray}
then $0\leq R^{-1}\mathbf{Y} ( t ) \leq R^{-1}\mathbf
{Y}^{+} (
t ) $ for all $t\geq0$ where $\mathbf{Y} ( \cdot ) $ solves
the Skorokhod problem
\begin{eqnarray*}
d\mathbf{Y} ( t ) & =&d\mathbf{X} ( t ) +R\,d\mathbf {L} ( t ),\qquad \mathbf{Y} (
0 ) =\mathbf{y}_{0},
\\
\mathbf{Y} ( t ) & \geq& 0,\qquad Y_{j} ( t ) \,dL_{j} ( t )
=0,\qquad L_{j} ( 0 ) =0,\qquad dL_{j} ( t ) \geq0.
\end{eqnarray*}
\end{lemma}
We note that computing $\mathbf{z}$ from the previous lemma is not
difficult. One can simply pick $\mathbf{z}=E\mathbf{X} ( 1 )
+\delta\mathbf{1}$, where $\mathbf{1}= ( 1,\ldots,1 ) ^{T}$
and with $\delta$
chosen so that $0<\delta R^{-1}\mathbf{1}<-R^{-1}E\mathbf{X} (
1 ) $. In what follows we shall assume that $\mathbf{z}$ has been selected in
this form, and we shall assume without loss of generality that
$E[\mathbf{Z}
(1)]<0$.
The Skorokhod problem corresponding to the dominating process can be solved
explicitly. It is not difficult to verify [see, e.g.,
\citet{HarrisonReiman1981}] that if $\mathbf{Y}^{+} ( 0 ) =0$,
the solution of the Skorokhod problem (\ref{SP_Bnd}) is given by
\begin{equation}
\mathbf{Y}^{+} ( t ) =\mathbf{Z} ( t ) -\min_{0\leq
u\leq
t}
\mathbf{Z} ( u ) =\max_{0\leq u\leq t}\bigl(\mathbf{Z} ( t ) -
\mathbf{Z} ( u ) \bigr), \label{SKR_solution}
\end{equation}
where the running maximum is obtained coordinate-by-coordinate.
In order to construct a stationary version of $\mathbf{Y}^{+} (
\cdot ) $ backward in time, we first extend $\mathbf{Z} (
\cdot ) $ to a two-sided compound Poisson process with $\mathbf{Z}
( 0 ) =0$. We define a time-reversal of $\mathbf{Z}(\cdot)$
as $
\mathbf{Z}^{\leftarrow} ( t ) =-\mathbf{Z} ( -t ) $.
It is
easy to check that $\mathbf{Z}^{\leftarrow} ( \cdot ) $ has
stationary and independent increments that are identically distributed as
those of~$\mathbf{Z}(\cdot)$.
For any given $T\leq0$, we define a process $\mathbf{Z}_{T}^{\leftarrow}$
via $\mathbf{Z}_{T}^{\leftarrow} ( t ) =\mathbf{Z}
^{\leftarrow} ( T+t ) $ for $0\leq t\leq|T|$. And for any
given $
\mathbf{y}\geq0$ we define $\mathbf{Y}_{T}^{+} ( t,\mathbf{y} )$
for $0\leq t\leq|T|$ to be the solution to the Skorokhod problem with input
process $\mathbf{Z}_T^{\leftarrow}$, initial condition $\mathbf
{Y}_T^+(0,
\mathbf{y})=\mathbf{y}$ and reflection matrix $R=I$. In detail, $\mathbf
{Y}
_{T}^{+} ( \cdot,\mathbf{y} )$ solves
\begin{eqnarray}\label{Ypls_def}
d\mathbf{Y}_{T}^{+} ( t,\mathbf{y} ) &=&d
\mathbf{Z}
_{T}^{\leftarrow} ( t ) +d\mathbf{L}_{T}^{+}
( t,\mathbf{y}
),\qquad \mathbf{Y}_{T}^{+}( 0,\mathbf{y}) =\mathbf{y},
\nonumber
\\
\mathbf{Y}_T^{+} ( t,\mathbf{y} ) & \geq&0,\qquad Y
_{T,j}^{+} ( t,\mathbf{y} ) \,dL_{T,j}^{+}
( t,\mathbf{y}
) = 0,\\
L_{T,j}^{+} ( 0,
\mathbf{y} ) &=&0,\qquad d{L}_{T,j}^{+} ( t,\mathbf{y} )
\geq0.\nonumber
\end{eqnarray}
According to (\ref{SKR_solution}), if $\mathbf{y}=0$,
\begin{equation}
\mathbf{Y}_{T}^{+} ( t,0 ) =\max_{0\leq u\leq t}
\bigl(\mathbf{Z}
_{T}^{\leftarrow} ( t ) -
\mathbf{Z}_{T}^{\leftarrow} ( u ) \bigr). \label{Exp1_Y_pls}
\end{equation}
Since $E[\mathbf{Z}(1)]<0$, the process $\mathbf{Y}^{+}$ satisfying the
Skorokhod problem (\ref{SP_Bnd}) with orthogonal reflection ($R=I$)
possesses a unique stationary distribution. So, we can construct a
stationary version of $ ( \mathbf{Y}^{+} ( s )
\dvtx -\infty<s\leq0 ) $ as
\begin{equation}
\mathbf{Y}_*^{+} ( s ) =\lim_{T\rightarrow-\infty}
\mathbf{Y}
_{T}^{+} ( -T-s,0 ). \label{Ypls_stat}
\end{equation}
The following representation of $\mathbf{Y}_*^{+}(\cdot)$ is known in
the queueing literature; still we include a short proof to make the
presentation self-contained.
\begin{proposition}\label{pr1}
Given any $t\geq0$,
\begin{equation}
\mathbf{Y}_*^{+}(-t)=-\mathbf{Z}(t)+\max_{t\leq u<\infty}
\mathbf{Z}(u). \label{Stat_version}
\end{equation}
\end{proposition}
\begin{pf}
Expression (\ref{Exp1_Y_pls}) together with the definition of $\mathbf
{Z}
_{T}^{\leftarrow} ( \cdot ) $ yields
\begin{eqnarray*}
\mathbf{Y}_{T}^{+} ( -T+s,0 ) & =&\max_{0\leq u\leq-T+s}
\bigl(\mathbf{Z}^{\leftarrow} ( s ) -\mathbf{Z}^{\leftarrow} ( T+u ) \bigr) =
\max_{T\leq r\leq s}\bigl(\mathbf{Z}^{\leftarrow} ( s ) -
\mathbf{Z}^{\leftarrow} ( r ) \bigr)
\\
&=&\max_{T\leq r\leq s}\bigl(-\mathbf{Z} ( -s ) +\mathbf{Z} ( -r )
\bigr) =-\mathbf{Z} ( -s ) +\max_{T\leq r\leq s}\mathbf{Z} ( -r ).
\end{eqnarray*}
Let $-s=t\geq0$ and $-r=u\geq0$, and we obtain
$\mathbf{Y}_{T}^{+} ( -T-t,0 ) =-\mathbf{Z} (
t ) +\max_{t\leq u\leq-T}\mathbf{Z} ( u )$.
Now send $-T\rightarrow\infty$ and arrive at (\ref{Stat_version}), thereby
obtaining the result.
\end{pf}
\subsection{The structure of the main simulation procedure}\label
{Subsec_Main1}
We now are ready to explain our main algorithm to simulate unbiased samples
from the steady-state distribution of $\mathbf{Y}$. For this purpose,
let us
first define
\[
\mathbf{M} ( t ) =\max_{t\leq u<\infty}\mathbf{Z}(u),
\]
for $t\geq0$ so that $\mathbf{Y}_*^+(-t)=\mathbf{M}(t)-\mathbf{Z}(t)$.
Since $
E[\mathbf{Z} ( 1 ) ] <0$, it follows that $\mathbf{M}(0)<\infty
$, and hence $ ( \mathbf{M} (
t ) \dvtx t\geq0 ) $ is a stochastic process with finite value. We
assume that we can simulate $\mathbf{M} ( \cdot ) $
jointly with $\mathbf{Z}(\cdot)$ until the coalescence time $\tau$, and
we shall
explain how to perform such simulation procedures in Section~\ref{sec2.3}.
\begin{algorithm}[{[Exact sampling of $\mathbf{Y} ( \infty )$]}]\label{alg1}
\textit{Step} 1: Simulate $(\mathbf{M}(t),\mathbf{Z}(t))$ jointly until time
$\tau\geq0$ such that $\mathbf{Z}(\tau)=\mathbf{M}(\tau)$.
\textit{Step} 2: Set $\mathbf{X}_{-\tau}^{\leftarrow} ( t ) =
\mathbf{Z}(\tau)-\mathbf{Z} ( \tau-t ) +\mathbf{z}\times t$, and
compute $\mathbf{Y}_{-\tau} ( t,0 ) $ for $0\leq t\leq
\tau$ that solves the Skorokhod problem with input process $\mathbf{X}
_{-\tau}^{\leftarrow} ( t )$ and initial value $\mathbf{Y}
_{-\tau}(0,0)=0$. In detail, $\mathbf{Y}_{-\tau} ( t,0
) $ solves
\begin{eqnarray*}
d\mathbf{Y}_{-\tau} ( t,0 ) & =&d\mathbf{X}_{-\tau
}^{\leftarrow}
( t ) +R\,d\mathbf{L}_{-\tau} ( t,0 ),
\\
\mathbf{Y}_{-\tau} ( t,0 ) & \geq& 0,\qquad
Y_{-\tau,j}
( t,0 ) \,dL_{-\tau,j} ( t,0 ) =0,\\
L_{-\tau,j} ( 0,0 ) &=&0,\qquad
dL_{-\tau,j} ( t,0 ) \geq0,
\end{eqnarray*}
for $\tau$ units of time.
\textit{Step} 3: Output $\mathbf{Y}_{-\tau} ( \tau,0 ) $
which has the distribution of $\mathbf{Y} ( \infty ) $.
In step 2, The constant $\mathbf{z}$ is chosen according to Lemma~\ref{LmK_W_Comp} such
that $\mathbf{Z}(t)=\mathbf{X}(t)-\mathbf{z}t$. The time is $-\tau$
precisely the coalescence time as in a DCFTP algorithm. The following
proposition summarizes the validity of this algorithm.
\end{algorithm}
\begin{proposition}\label{pr2}
The previous algorithm terminates with probability one, and its output
is an
unbiased sample from the distribution of $\mathbf{Y} ( \infty
) $.
\end{proposition}
\begin{pf}The argument is similar to the classic Lyones construction.
Let us start by first noting that
\[
\mathbf{Y}_+^*(0)=\mathbf{M}(0)=0\vee\bigl(-U_1\bolds{\mu}+
\mathbf {W}(1)+\mathbf{M}'\bigr).
\]
Here $U_1$ is the arrival time of the first job and follows an
exponential distribution. $\mathbf{M}'=\max_{0\leq t<\infty} \mathbf
{Z}(t+U_1)-\mathbf{Z}(U_1)<\infty$ is equal in distribution to $\mathbf
{M}(0)$. Then $P(\mathbf{Y}_+^*(0)=0)=P(U_1\geq\max_i (W_i(1)+M'_i)/\mu
_i)>0$ since $U_1$ has infinite support and is independent of both
$\mathbf{W}(1)$ and $\mathbf{M}'$. Therefore, $\mathbf{Y
}^{+} ( \infty ) $ has an atom at zero.
This implies that $
\tau<\infty$ with probability one. Actually, we will show later that
$E[\exp(\delta\tau)]<\infty$ for some $\delta>0$ in Theorem~\ref{th1}. Let
$T<0$, and note that, thanks to
Lemma~\ref{LmK_W_Comp}, for $t\in(0,\vert T\vert ]$
\begin{equation}
R^{-1}\mathbf{Y}_{T}(t,0)\leq R^{-1}
\mathbf{Y}_{T}^{+}(t,0).\label{Comparison}
\end{equation}
In addition, by monotonicity of the solution to the Skorokhod problem
in terms
of its initial condition [see \citet{KellaWhitt1996}], we also have
[using the definition of $\mathbf{Y}^+_T(t,\mathbf{y})$ from (\ref
{Ypls_def}) and $\mathbf{Y}^{+}_*(T)$ from (\ref{Ypls_stat})] that
\begin{equation}
\mathbf{Y}_{T}^{+}(t,0)\leq\mathbf{Y}_{T}^{+}
\bigl(t,\mathbf{Y}
^{+}_*(T)\bigr)=\mathbf{Y}_*^{+}(T+t).\label{Comparison_2}
\end{equation}
So $\mathbf{Y}_*^{+}(T+t)=0$ implies $\mathbf{Y}_{T}^{+}(t,0)=0$. One
step further, as $R^{-1}$ has nonnegative coordinates, equations (\ref
{Comparison}) and (\ref{Comparison_2}) imply that $\mathbf{Y}_{T}(t,0)=0$.
Consequently, if $-T>\tau\geq0$,
\[
\mathbf{Y}_{T}\bigl(\vert T\vert -\tau,0\bigr)=0,
\]
which in particular yields that $\mathbf{Y}_{T}(-T,0)=\mathbf{Y}
_{-\tau}(\tau,0)$. We then obtain that
\[
\lim_{T\to-\infty}\mathbf{Y}_{T}(-T,0)=\mathbf{Y}
_{-\tau}(
\tau,0),
\]
thereby concluding that $\mathbf{Y}_{\tau}(-\tau,0)$ follows the distribution
$\mathbf{Y} ( \infty ) $ as claimed.
\end{pf}
Step 2 in Algorithm \ref{alg1.1} is straightforward to implement because the
process $
\mathbf{X}_{-\tau}^{\leftarrow} ( \cdot ) $ is piecewise linear,
and the solution to the Skorokhod problem, namely $\mathbf{Y}_{-\tau
} (
\cdot,0 )$, is also piecewise linear. The gradients are simply
obtained by solving a sequence of linear system of equations which are
dictated by evolving the ordinary differential equations given in (\ref
{S1b}
). Therefore, the most interesting part is the simulation of the stochastic
object $ ( \mathbf{M} ( t ) \dvtx 0\leq t\leq\tau ) $ in
step 1,
as we will discuss in Section~\ref{sec2.3}.
\subsection{Simulation of the stationary dominating process}\label{sec2.3}
As customary, we use the notation $E_{0} ( \cdot ) $ or $
P_{0} ( \cdot ) $ to indicate the conditioning $\mathbf{Z} (
0 ) =0$. We define $\phi_i(\theta)=E_0[\exp(\theta Z_i(1))]$ to be the
moment-generating function of $Z_i(1)$, and let $\psi_i(\theta)=\log
(\phi_i(
\theta))$. In order to simplify the explanation of the simulation procedure
to sample $ ( \mathbf{M} ( t ) \dvtx t\geq0 ) $, we introduce
the following assumption:
\emph{Assumption}: (A3b) Suppose that in every dimension $i$ there
exists $\theta_{i}^{\ast}\in ( 0,\infty ) $ such that
\[
\psi_{i} \bigl( \theta^*_{i} \bigr) =\log E_{0}
\exp \bigl( \theta _{i}^*Z_{i} ( 1 ) \bigr) = 0.
\]
This assumption is a strengthening of assumption (A3), and it is known as
Cramer's condition in the large deviations literature. As we shall explain
at the end of Section~\ref{sec2.3}, it is possible to dispense this assumption and
only work under assumption (A3). For the moment, we continue under assumption
(A3b).
We wish to simulate $ ( \mathbf{Z} ( t ) \dvtx 0\leq t\leq
\tau ) $ where $\tau$ is a time such that
\[
\mathbf{Z}(\tau)=\mathbf{M}(\tau)=\max_{s\geq\tau}\mathbf{Z}(s)\quad
\mbox{and hence}\quad
\forall0\leq t\leq\tau, \qquad\mathbf{M}(t)=\max_{t\leq s\leq\tau
}
\mathbf{Z
}(s).
\]
Recall that $-\tau$ is precisely the coalescence time since $\mathbf{Y}
^+_*(-\tau)=0$. We also keep in mind that our formulation at the
beginning of the \hyperref[sec1]{Introduction} implies that
\[
\mathbf{Z}(t)=\mathbf{J} ( t ) -R\mathbf{r}t-\mathbf{z}
t=\sum
_{k=1}^{N ( t ) }\mathbf{W} ( k ) -R\mathbf{r}t-
\mathbf{z}t,
\]
where $\mathbf{z}$ is selected according to Lemma~\ref{LmK_W_Comp}. Define
\[
\bolds{\mu}=R\mathbf{r}+\mathbf{z},
\]
and let $\mu_{i}>0$ be the $i$th coordinate of $\bolds{\mu}$. In addition,
we assume that we can choose a constant $m>0$ large enough such that
\begin{equation}
\sum_{i=1}^{d}\exp \bigl( -
\theta^*_{i}m \bigr) <1. \label{L1}
\end{equation}
Define
\begin{equation}
T_{m}=\inf\bigl\{t\geq0\dvtx Z_{i}(t)\geq m,\mbox{for
some } i\bigr\}. \label{T_m}
\end{equation}
Now we are ready to propose the following procedure to simulate $\tau$:
\renewcommand{2.1}{1.1}
\begin{algorithm}[(Simulating the coalescence time)]\label{alg1.1}
The output of this algorithm is $ ( \mathbf{Z} ( t ) \dvtx 0\leq
t\leq\tau ) $, and the coalescence time $\tau$. Choose the
constance $m$
according to (\ref{L1}):
\begin{longlist}[(1)]
\item[(1)] Set $\tau=0$, $\mathbf{Z} ( 0 ) =0$.
\item[(2)] Generate an inter-arrival time $U$ distributed Exp$(\lambda
)$, and
sample $\mathbf{W}= ( W_{1},\ldots,W_{d} ) $ independent of $U$.
\item[(3)] Let $\mathbf{Z} ( \tau+t ) =\mathbf{Z} ( \tau
) -t
\bolds{\mu}$ for $0\leq t<U$ and $\mathbf{Z} ( \tau+U )
=\mathbf{Z}
( \tau ) +\mathbf{W}-U\bolds{\mu}$.
\item[(4)] If there exists an index $i$, such that $W_{i}-U\mu_{i}\geq-m$,
then return to step~2 and reset $\tau\longleftarrow\tau+U$. Otherwise,
sample a Bernoulli $I $ with parameter $p=P_{0}(T_{m}<\infty)$.
\item[(5)] If $I=1$, simulate a new \textit{conditional path} $ (
\mathbf{C}
( t ) \dvtx 0\leq t\leq T_{m} ) $ following the conditional
distribution of $ ( \mathbf{Z} ( t ) \dvtx 0\leq t\leq
T_{m} ) $
given that $T_{m}<\infty$ and $\mathbf{Z} ( 0 ) =0$. Let $
\mathbf{Z} ( \tau+t ) =\mathbf{Z} ( \tau ) +\mathbf{C}
( t ) $ for $0\leq t\leq T_{m}$, and reset $\tau\longleftarrow
\tau+$
$T_{m}$. Return to step~2.
\item[(6)] Else, if $I=0$, stop and return $\tau$ along with the feed-in
path $
( \mathbf{Z}(t)\dvtx 0\leq t\leq\tau ) $.
\end{longlist}
\end{algorithm}
We shall now explain how to execute the key steps in the previous algorithm,
namely, steps 4 and 5.
\subsubsection{Simulating a path conditional on reaching a positive
level in
finite time}\label{sec2.3.1}
The procedure that we shall explain now is an extension of the
one-dimensional procedure given in \citet{BlanchetSigman2011}; see also the
related one-dimensional procedure by \citet{EnsorGlynn2000}. The strategy
is to use acceptance/rejection. The proposed distribution is based on
importance sampling by means of exponential tilting. In order to describe
our strategy, we need to introduce some notation.
We think of the probability measure $P_{0} ( \cdot ) $ as defined
on the canonical space of right-continuous with left-limits $\mathbb
{R}^{d}$-valued functions, namely, the ambient space of ($\mathbf{Z} (
t )
\dvtx t\geq0)$ which we denote by $\Omega=D_{[0,\infty)} ( \mathbb{R}
^{d} ) $. We endow the probability space with the Borel $\sigma$-field
generated by the Skorokhod $J_{1}$ topology; see \citet{Billingsley1999}.
Our goal is to simulate from the conditional law of $(\mathbf
{Z}(t)\dvtx 0\leq
t\leq T_m)$ given that $T_m<\infty$ and $\mathbf{Z}(0)=0$, which we
shall denote by $P^*_0$ in the rest of this part.
Now let us introduce our proposed distribution, $P_{0}^{\prime} (
\cdot ) $, defined on the space $\Omega^{\prime}=D_{[0,\infty
)} (
\mathbb{R}^{d} ) \times\{1,2,\ldots,d\}$. We endow the probability space
with the product $\sigma$-field induced by the Borel $\sigma$-field
generated by the Skorokhod $J_{1}$ topology and all the subsets of $
\{1,2,\ldots,d\}$. So, a typical element $\omega^{\prime}$ sampled
under $
P_{0}^{\prime} ( \cdot ) $ is of the form $\omega^{\prime
}=((
\mathbf{Z} ( t ) \dvtx t\geq0),\operatorname{Index})$, where $\operatorname{Index}\in\{
1,2,\ldots,d\}$.
The distribution of $\omega^{\prime}$ induced by $P_{0}^{\prime} (
\cdot ) $ is described as follows. First, set
\begin{equation}
P_{0}^{\prime} ( \operatorname{Index}=i ) =w_{i}:=
\frac{\exp (
-\theta_{i}^{\ast}m ) }{\sum_{j=1}^{d}\exp (
-\theta_{j}^{\ast}m ) }. \label{DisK}
\end{equation}
Now, given $\operatorname{Index}=i$, for every set $A\in\sigma(\mathbf{Z} (
s )
\dvtx 0\leq s\leq t)$,
\[
P_{0}^{\prime} ( A|\operatorname{Index}=i ) =E_{0}\bigl[ \exp
\bigl( \theta_{i}^{\ast}Z_{i} ( t ) \bigr)
I_{A}\bigr].
\]
So, in particular, the Radon--Nikodym derivative (i.e., the likelihood ratio)
between the distribution of $\omega=(\mathbf{Z} ( s ) \dvtx 0\leq
s\leq
t)$ under $P_{0}^{\prime} ( \cdot ) $ and $P_{0} ( \cdot
)
$ is given by
\[
\frac{dP_{0}^{\prime}}{dP_{0}} ( \omega ) =\sum_{i=1}^{d}w_{i}
\exp \bigl( \theta_{i}^{\ast}Z_{i} ( t ) \bigr) .
\]
\textit{The distribution of $(\mathbf{Z} ( s ) \dvtx s\geq0)$ under
$
P_{0}^{\prime} ( \cdot ) $ is precisely the proposed distribution
that we shall use to apply acceptance/rejection.} It is straightforward to
simulate under $P_{0}^{\prime} ( \cdot ) $. First, sample $\operatorname{Index}$
according to the distribution (\ref{DisK}). Then, conditional on $\operatorname{Index}=i$,
the process $\mathbf{Z} ( \cdot ) $ also follows a compound Poisson
process. Given $\operatorname{Index}=i$, under $P_{0}^{\prime} ( \cdot ) $,
it follows that $\mathbf{J} ( t ) $ can be represented as
\begin{equation}
\mathbf{J} ( t ) =\sum_{k=1}^{\hat{N} ( t ) }\mathbf
{W}
^{\prime} ( k ), \label{J_prime}
\end{equation}
where $\hat{N} ( \cdot ) $ is a Poisson process with rate $
\lambda E[\exp(\theta^*_iW_i)]$. In addition, the distribution of
$\mathbf{W}^{\prime
}$ is obtained by exponential titling such that for all $A\in\sigma
(\mathbf{W
})$,
\begin{equation}
P^{\prime}\bigl(\mathbf{W}^{\prime}\in A\bigr)=E\bigl[\exp\bigl(
\theta_i^*W_i\bigr)I_A\bigr]. \label{J_P2}
\end{equation}
In sum, conditional on $\operatorname{Index}=i$, we simply let
\begin{equation}
\mathbf{Z} ( t ) =\sum_{k=1}^{\hat{N} ( t ) }\mathbf
{W}
^{\prime} ( k ) -\bolds{\mu}t. \label{J_P3}
\end{equation}
Now, note that we can write
\begin{eqnarray*}
E_{0}^{\prime} \bigl( Z_{\operatorname{Index}} ( t ) \bigr) & =&\sum
_{i=1}^{d}E_{0}
\bigl(Z_{i}(t)\exp \bigl( \theta_{i}^{\ast}Z_{i}
( t ) \bigr) \bigr)P^{\prime} ( \operatorname{Index}=i )
\\
& =&\sum_{i=1}^{d}\frac{d\phi_{i} ( \theta_{i}^{\ast} )
}{d\theta}
w_{i}>0,
\end{eqnarray*}
where the last inequality follows by convexity of $\psi_{k} (
\cdot ) $ and by definition of $\theta_{k}^{\ast}$. So, we have
that $
Z_{\operatorname{Index}} ( t ) \nearrow\infty$ as $t\nearrow\infty$ with
probability one under $P_{0}^{\prime} ( \cdot ) $ by the law of
large numbers. Consequently $T_{m}<\infty$ a.s. under $P_{0}^{\prime
} (
\cdot ) $.
Recall that $P_{0}^{\ast} ( \cdot ) $ is the conditional law
of $
( \mathbf{Z} ( t ) \dvtx 0\leq t\leq T_{m} ) $ given that
$
T_{m}<\infty$ and $\mathbf{Z} ( 0 ) =0$. In order to
assure that we can indeed apply acceptance/rejection theory to simulate
from $P^*_0(\cdot)$, we need to show that the likelihood ratio $
dP_{0}/dP_{0}^{\prime}$ is bounded:
\begin{eqnarray}
\label{ARB} && \frac{dP_{0}^{\ast}}{dP_{0}^{\prime}} \bigl( \mathbf{Z} ( t ) \dvtx 0\leq t\leq
T_{m} \bigr)
\nonumber
\\
&&\qquad =\frac{1}{P_{0} ( T_{m}<\infty ) }\times\frac{dP_{0}}{
dP_{0}^{\prime}} \bigl( \mathbf{Z} ( t ) \dvtx 0\leq
t\leq T_{m} \bigr)
\\
&&\qquad =\frac{1}{P_{0} ( T_{m}<\infty ) }\times\frac{1}{
\sum_{i=1}^{d}w_{i}\exp ( \theta_{i}^{\ast}Z_{i} ( T_{m} )
) }.\nonumber
\end{eqnarray}
Upon $T_m$, there is an index $L$ ($L$ may be different from $\operatorname{Index}$) such
that $\exp ( \theta_{L}^{\ast}Z_{L} ( T_{m} ) ) \geq
\exp ( \theta_{L}^{\ast}m ) $, therefore
\begin{equation}
\frac{1}{\sum_{i=1}^{d}w_{i}\exp ( \theta_{i}^{\ast}Z_{i} (
T_{m} ) ) } \leq\frac{1 }{w_{L}\exp (
\theta_{L}^{\ast}m )}=\sum_{i=1}^{d}
\exp \bigl( -\theta _{i}^{\ast}m \bigr) < 1, \label{B_1}
\end{equation}
where the last inequality follows by (\ref{L1}). Consequently, plugging
(\ref
{B_1}) into (\ref{ARB}) we obtain that
\begin{equation}
\frac{dP_{0}^{\ast}}{dP_{0}^{\prime}} \bigl( \mathbf{Z} ( t ) \dvtx 0\leq t\leq T_{m}
\bigr) \leq\frac{1}{P_{0} ( T_{m}<\infty ) }. \label{ARB_1}
\end{equation}
We now are ready to summarize our acceptance/rejection procedure and the
proof of its validity.
\renewcommand{2.1}{1.1.1}
\begin{algorithm}[(Simulation of paths conditional on
$T_{m}<\infty$)]\label{alg1.1.1}
\textit{Step} 1: Sample $ ( \mathbf{Z} ( t ) \dvtx 0\leq t\leq
T_{m} ) $ according to $P_{0}^{\prime} ( \cdot ) $ as
indicated via equations~(\ref{DisK}), (\ref{J_prime}) and (\ref{J_P3}).
\textit{Step} 2: Given $ ( \mathbf{Z} ( t ) \dvtx 0\leq t\leq
T_{m} ) $, simulate a Bernoulli $I$ with probability
\[
\frac{1}{\sum_{i=1}^{d}w_{i}\exp ( \theta_{i}^{\ast}Z_{i} (
T_{m} ) ) }.
\]
[Note that the previous quantity is less than unity due to (\ref{B_1}
).]
\textit{Step} 3: If $I=1$, output $ ( \mathbf{Z} ( t )
\dvtx 0\leq
t\leq T_{m} ) $ and Stop, otherwise go to step 1.
\end{algorithm}
\begin{proposition}\label{pr3}
The probability that $I=1$ at any given call of step 3 in Algorithm
\ref{alg1.1.1} is
$P_0(T_m<\infty)$. Moreover, the output of Algorithm \ref{alg1.1.1} follows the
distribution $P^*_0$.
\end{proposition}
\begin{pf}
The result follows directly from the theory of acceptance/rejection; see
\citet{AsmussenGlynn2007}, pages 39--42. According to it, since the two
probability measures $P_0^*$ and $P'_0$ satisfy
\[
\frac{dP^*_0}{dP'_0}\leq c = \frac{1}{P_0(T_m<\infty)},
\]
as indicated by (\ref{ARB}) and (\ref{ARB_1}), one can sample exactly
from $P_0^*$ by the so-called acceptance/rejection procedure:
\begin{longlist}[(1)]
\item[(1)] Generate i.i.d. samples $\{\omega_i\}$ from $P'_0$ and
i.i.d. random numbers $U_i\sim U[0,1]$ independent of $\{\omega_i\}$.
\item[(2)] Define $N=\inf\{n\geq1\dvtx U_n\leq c^{-1}\frac{dP^*_0}{
dP'_0}(\omega_i)\}$.
\item[(3)] Output $\omega_N$.
\end{longlist}
The output $w_N$ follows exactly the law $P^*_0$, and $N$ is a
geometric random variable with mean $c$; in other words, the
probability of accepting a proposal is $c$. In our specific case, we
have $c=1/P_0(T_m<\infty)$, and according to (\ref{ARB}) the likelihood
ration divided by constant $c$ is
\[
c^{-1}\frac{dP^*_0}{ dP'_0}(\omega)=\frac{1}{\sum_{i=1}^{d}w_{i}\exp
( \theta_{i}^{\ast}Z_{i} (
T_{m} ) )}.
\]
Therefore, Algorithm \ref{alg1.1.1} has acceptance probability
$P(I=1)=P_0(T_m<\infty)$, and it generates a path exactly from $P^*_0$
upon acceptance.
\end{pf}
As the previous result shows, the output of the previous procedure
follows exactly the distribution of $ ( \mathbf{Z} ( t )
\dvtx 0\leq t\leq
T_{m} ) $ given that $T_{m}<\infty$ and $\mathbf{Z} ( 0 )
=
0$. Moreover, the Bernoulli random variable $I$ has probability $
P_{0} ( T_{m}<\infty ) $ of success. So this procedure actually
allows both steps 4 and 5 in Algorithm \ref{alg1.1} to be executed
simultaneously. In
detail, one simulates a path following the law of $P^{\prime}_0$ until
$T_m$, and then, if the proposed path is accepted, it can be concluded that
$T_m$ is
finite and the proposed path is exactly a sample path following the law
of $
P^*_0$; otherwise one can conclude that $T=\infty$.
\begin{remark*}
As mentioned earlier, assumption (A3b) is a
strengthening of
assumption (A3). We can carry out our ideas under assumption (A3) as follows.
First, instead of $(\mathbf{M} ( t ) \dvtx t\geq0)$, we consider
the following
process $\mathbf{Z}_{\mathbf{a}}(\cdot)$ and $\mathbf{M}_{\mathbf
{a}} (
\cdot ) $ defined by
\[
\mathbf{Z}_{\mathbf{a}} ( t ):=\mathbf{Z} ( t ) +\mathbf{a
}t,\qquad
\mathbf{M}_{\mathbf{a}} ( t ) =\max_{s\geq t} \bigl(
\mathbf{Z}
_{\mathbf{a}} ( s ) \bigr).
\]
We shall explain how to choose the nonnegative vector $
\mathbf{a}= ( a_{1},a_{2},\ldots,a_{d} ) ^{T}$ in a moment.
Note that we can simulate $ ( \mathbf{M} ( t ) \dvtx t\geq
0 ) $
jointly with $ ( \mathbf{Z} ( t ) \dvtx t\geq0 ) $ if we are
able to simulate $ ( \mathbf{M}_{\mathbf{a}} ( t )
\dvtx t\geq0 ) $ jointly with $(\mathbf{Z}_{\mathbf{a}} ( t )
\dvtx t\geq0)$. Now note that $\psi_{i} ( \cdot ) $ is strictly convex
and that $\dot{\psi}_{i} ( 0 ) <0$, so there exists $a_{i}>0$
large enough to force the existence of $\theta_{i}^{\ast}>0$ such that $
E\exp ( \theta_{i}^{\ast}Z_{i} ( 1 )
+a_{i}\theta_{i}^{\ast} ) =1$, but at the same time small enough to
keep $E ( Z_{i} ( 1 ) +a_{i} ) <0$; again, this
follows by
strict convexity of $\psi_{i} ( \cdot ) $ at the origin. So, if
assumption (A3b) does not hold, but assumption (A3) holds, one can then
execute Algorithm \ref{alg1.1} based on the process $\mathbf{Z_a}(\cdot)$.
\end{remark*}
\subsection{Computational complexity}\label{sec2.4}
In this section we provide a complexity analysis of our algorithm. We first
make some direct observations assuming the dimension of the network remains
fixed. In particular, we note that the expected number of random variables
simulated has a finite moment-generating function in a neighborhood of the
origin.
\begin{theorem}\label{th1}
Suppose that \textup{(A1)} to \textup{(A3)} are in force. Let $\tau$ be the coalescence
time, and
$N$ be the number of random variables generated to terminate the overall
procedure to sample $\mathbf{Y} ( \infty ) $. Then there
exists $
\delta>0$ such that
\[
E\exp ( \delta\tau+\delta N ) <\infty.
\]
\end{theorem}
\begin{pf}
This follows directly from classical results about random walks; see
\citet{Gut2009}. In particular it follows that $E_{0}^{\prime}(\exp (
\delta T_{m} ) )<\infty$. The rest of the proof follows from elementary
properties of compound geometric random variables arising from the
acceptance/rejection procedure.
\end{pf}
We are more interested, however, in complexity properties as the network
increases. We shall impose some regularity conditions that allow us to
consider a sequence of systems indexed by the number of dimensions $d$. We
shall grow the size of the network in a meaningful way; in particular, we
need to make sure that the network remains stable as the dimension $d$
increases. Additional regularity will also be imposed.
\emph{Assumptions}:
There exists two constants $0<\delta<1<H<\infty$ independent of $d$
satisfying the following conditions:
(C1) $R^{-1}E[\mathbf{X}(1)]<-2\delta R^{-1}\mathbf{1}$ in each network.
(C2) Let $\theta^*_i$ for $i=1,\ldots,d$ be the tilting parameters as
defined in
assumption~(A3b), then
\[
E\exp \bigl[ \bigl(\delta+\theta_i^*\bigr) W_{i} \bigr]
\leq H <\infty
\]
and
\[
H> \delta+\theta^{*}_{i}\qquad\mbox{for all }1\leq i\leq d.
\]
(C3) The arrival rate $\lambda\in(\delta, H)$.
\begin{remark*}
Assumption (C1) implies that $\bolds{\mu}=R\mathbf
{r}+\mathbf{
z}>\delta\mathbf{1}$, where $\mathbf{z}$ is defined according to Lemma~\ref{LmK_W_Comp}. In
detail, we choose $\mathbf{z}=E[\mathbf{X}(1)]+\delta\mathbf{1}$ and
therefore, $R\mathbf{r}+\mathbf{z}=E[\mathbf{J}(1)]+\delta\mathbf
{1}>\delta
\mathbf{1}$.
Note that $x\leq\exp(ax)/(ae)$ for any $a>0$ and $x\geq0$. Plugging in
$a=\theta^*_i+\delta$, we have $
E[W_i]\leq E[\exp((\theta^*_i+\delta)W_i)]/(e(\delta+\theta
^*_i))<H/(e\delta)$ and therefore
\[
\bolds{\mu}=\lambda E[\mathbf{W}]+\delta\mathbf{1}<\bigl(H^2/(e
\delta )+\delta\bigr)\mathbf{1}=H'\mathbf{1},
\]
where $H'=H^2/(e\delta)+\delta$.
Similarly, we also have that $E[W_i^2]\leq E[4\exp((\theta^*_i+\delta
)W_i)]/(e^2(\theta^*_i+\delta)^2)\leq4H/(e^2\delta^2)$, and then we
can compute
\begin{eqnarray*}
E\bigl[Z_i(1)^2\bigr]&=&E\Biggl[\Biggl(\sum
_{k=1}^{N(1)}W_i(k)-\mu_i
\Biggr)^2\Biggr]\leq2E\Biggl[\mu_i^2+\Biggl(
\sum_{k=1}^{N(1)}W_i(k)
\Biggr)^2\Biggr]
\\
&\leq&2\mu_i^2+2\bigl(\lambda+\lambda^2\bigr)
\frac{4H}{e^2\delta^2}\leq 2{H'}^2+\frac{8(H^2+H^3)}{e^2\delta^2}:=H''.
\end{eqnarray*}
In sum, we can conclude that
\[
\max_{1\leq i\leq d}E_0\bigl[Z_{i}(1)^2
\bigr]\leq H''.
\]
In the complexity analysis, we shall only use the fact that $H$, $H'$
and $H''$ are constants independent of $d$. As a result, for the
simplicity of notation, we shall write $H$ for $H$, $H'$ and $H''$
in the rest of this section and assume, without loss of generality, that
\[
\bolds{\mu}\leq H\mathbf{1}\quad\mbox{and}\quad\max_{1\leq i\leq
d}E_0
\bigl[Z_{i}(1)^2\bigr]\leq H.
\]
\end{remark*}
As discussed in Section~\ref{sec2.3.1}, in Algorithm \ref{alg1.1}, we actually do steps~4 and~5 simultaneously. Therefore, we can rewrite Algorithm \ref{alg1.1} as follows:
\renewcommand{2.1}{1.1$'$}
\begin{algorithm}[(Simulate the coalescence time)]\label{alg1.1'}
\begin{longlist}[(1)]
\item[(1)] Set $\tau=0$, $\mathbf{Z}(0)=0$, $N=0$.
\item[(2)] Simulate a sample from $\mathbf{W}-U\bolds{\mu}$. Here
$U$ is
exponentially distributed with mean $1/\lambda$ and independent of
$\mathbf{W
}$. Record the value of $\mathbf{Z}(t)$ for $\tau\leq t\leq\tau+U$.
Reset $
N\leftarrow N+1$, $\mathbf{Z}(\tau+U)\leftarrow\mathbf{Z}(\tau)+\mathbf
{W}-U
\bolds{\mu}$, $\tau\leftarrow\tau+U$.
\item[(3)] If there exists some index $i$, such that $W_{i}-Ur_{i}\geq-m$,
return to step 2.
\item[(4)] Otherwise, simulate a random walk $\{\mathbf{C}(n)\}$ such
that $
\mathbf{C}(0)=0$ and $\mathbf{C}(n)=\mathbf{C}(n-1)+\mathbf{W}^{\prime
}(n)-U^{\prime}(n)\bolds{\mu}$, where $\mathbf{W}^{\prime
}(n)-U^{\prime
}(n) \bolds{\mu}$ are independent and identically distributed as
$\mathbf{W}
^{\prime}-U^{\prime}\bolds{\mu}$ under the tilted measure
$P^{\prime}$
defined in Section~\ref{sec2.3.1} through (\ref{J_prime}) to (\ref{J_P3}). Perform
the simulation until $N_{m}=\inf\{n\geq0\dvtx C_{i}(n)>m\mbox{ for some }i\}$.
\item[(5)] Reset $N\leftarrow N+N_{m}$. Compute $p=1/\sum_{k=1}^{d}
w_{k}\exp
(\theta^{*}_{k} C_{k}(N_{m}))$, and sample a Bernoulli $I$ with
probability $
p $. If $I=1$, $\mathbf{Z}(\tau+\sum_{k=1}^{N_{m}}U^{\prime
}(k))=\mathbf{Z}
(\tau)+\mathbf{C}(N_{m})$ and $\tau=\tau+\sum_{k=1}^{N_{m}}U^{\prime}(k)$.
Return to step 2.
\item[(6)] If $I=0$, stop and output $\tau$ with $ ( \mathbf
{Z}(t)\dvtx 0\leq
t\leq\tau ) $.
\end{longlist}
\end{algorithm}
In this algorithm, the total number of random variables required to generate
is $d\cdot N$. Use $N(d)$ instead of $N$ to emphasize the dependence on the
number of dimensions $d$. The following result shows that our algorithm has
polynomial complexity with respect to $d$:
\begin{theorem}\label{th2}
Under assumptions \textup{(C1)} to \textup{(C3)},
\[
E\bigl[N(d)\bigr]=O\bigl(d^{\gamma}\bigr)\qquad\mbox{as }d\to\infty,
\]
for some $\gamma$ depending on $\delta$ and $H$.
\end{theorem}
Denote the number of Bernoulli's generated in step 5 by $N_{b}$ and the
number of random variables generated before executing step 4 in a single
iteration by $N_{a}$. By Wald's identity, we can conclude
\[
E\bigl[N(d)\bigr]=E[N_{b}]\bigl(E[N_{a}]+E[N_{m}]
\bigr).
\]
The following proposition gives an estimate for $E[N_{m}]$.
\begin{proposition}\label{pr4}
Under assumptions \textup{(C1)} to \textup{(C3)},
\[
E[N_{m}]=O(\log d),
\]
and the coefficient in the bound depends only on $\delta$ and $H$.
\end{proposition}
\begin{pf}
First, let us consider the cases in which $W_{i}$ are uniformly bounded from
above by some constant $B$.
Recall that $\phi_i(\theta)=E_0[\exp(\theta Z_i(1))]$. Given $\operatorname{Index}=i$,
one can check that $E_0'[C_i(1)]= \dot{\phi}_i(\theta_i^*)/(\lambda
E[\exp(\theta^*_iW_i)])\geq\dot{\phi}_i(\theta_i^*)/(\lambda H)$.
$N_{m}$ is a stopping time and $C_{i}(N_{m})<m+B$. By the optional
sampling theorem, we have
\[
E[N_{m}]=\sum_{i=1}^d
\omega_i\frac{ E_0'[C_{i}(N_m)]}{E_0'[C_i(1)]}\leq \sum_{i=1}^d
\omega_i\frac{\lambda H(m+B)}{\dot{\phi}_i(\theta_i^*)}.
\]
For each $1\leq i\leq d$, we are going to estimate a lower bound for
$\dot{\phi}(\theta_i^*)$. Using Taylor's expansion around 0, we have
\[
\phi_{i}\bigl(\theta_{i}^{*}\bigr)=
\phi_{i}(0)+\theta_{i}^{*}\dot{
\phi}_{i}
(0)+\frac{(\theta^{*}_{i})^{2}}{2}\ddot{\phi}_{i}
\bigl(u_1\theta^{*}
_{i}\bigr),
\]
for some $u_1\in[0,1]$. As $\phi_{i}(\theta_{i}^{*})=\phi_{i}(0)=1$, we have
\[
\theta_{i}^{*}\dot{\phi}_{i}(0)+
\frac{(\theta^{*}_{i})^{2}}{2}
\ddot{\phi}_{i}\bigl(u_1
\theta^{*}_{i}\bigr)=0.
\]
As $\theta^{*}_{i}>0$,
\begin{equation}
\dot{\phi}_{i}(0)+\frac{\theta^{*}_{i}}{2}\ddot{\phi} _{i}
\bigl(u_1\theta^{*}_{i}\bigr)=0.\label{theta}
\end{equation}
Under assumption (C1), $\dot{\phi}_{i}(0)=E_0[Z_{i}(1)]<-\delta$. Under
assumption (C2), we have that
\begin{eqnarray*}
E_0\bigl[\exp\bigl(\bigl(\delta+\theta_i^*
\bigr)Z_{i}(1)\bigr)\bigr]&\leq&\exp\bigl(\lambda\log\bigl(E\bigl[\exp
\bigl(\bigl(\delta+\theta_i^*\bigr) W_i\bigr)\bigr]\bigr)
\bigr)\\
&\leq& H^\lambda\leq H^H\triangleq H_1<\infty.
\end{eqnarray*}
As a result,
\begin{eqnarray*}
\ddot{\phi}_{i}\bigl(u_1\theta^{*}_{i}
\bigr) & =&E\bigl[Z_{i}(1)^{2}\exp\bigl(u_1\theta
^{*}_{i}Z_{i}(1)\bigr)\bigr]
\\
& \leq& E\bigl[Z_{i}(1)^{2}I\bigl(Z_{i}(1)\leq0
\bigr)\bigr]+E\bigl[Z_{i}(1)^{2}\exp\bigl(
\theta^{*}_{i}
Z_{i}(1)\bigr)I
\bigl(Z_{i}(0)>0\bigr)\bigr]
\\
& \leq& E\bigl[Z_{i}(1)^{2}\bigr]+E\bigl[Z_{i}(1)^{2}
\exp\bigl(\theta^{*}_{i}Z_{i}(1)\bigr)I
\bigl(Z_{i}
(0)>0\bigr)\bigr]
\\
&\leq& E\bigl[Z_i(1)^2\bigr]+ E\bigl[Z_{i}(1)^{2}
\exp\bigl(-\delta Z_{i}(1)\bigr)\cdot\exp\bigl(\bigl(\delta+
\theta_i^*\bigr) Z_{i}(1)\bigr)\bigr].
\end{eqnarray*}
Besides, one can check that for any $x>0$, $x^{2}\exp(-\delta x)\leq
4e^{-2}/\delta^2$. Therefore,
\begin{eqnarray*}
\ddot{\phi}_{i}\bigl(u\theta^{*}_{i}\bigr) &
\leq
& E\bigl[Z_{i}(1)^{2}\bigr]+\frac
{4}{\delta^2}e^{-2}E
\bigl[\exp\bigl(\bigl(\delta+\theta_i^*\bigr) Z_{i}(1)
\bigr)\bigr]
\\
& \leq& H+\frac{4}{\delta^2}e^{-2}H_1.
\end{eqnarray*}
Plug this result into equation (\ref{theta}) and use that $\dot{\phi
}_i(0)<-\delta$ to complete the inequality
\begin{equation}
\theta^{*}_{i}\geq\frac{2\delta}{H+4e^{-2}H_1/\delta^2}.\label{thetastar}
\end{equation}
On the other hand, by a Taylor expansion of $\phi_i(\cdot)$ around
$\theta_i^*$, we can conclude that
\begin{equation}
\dot{\phi}_i\bigl(\theta^*_i\bigr)=\frac{\theta^*_i}{2}
\ddot{\phi}\bigl(u_2\theta _i^*\bigr),\label{phi_prime}
\end{equation}
for some $u_2\in[0,1]$.
Note that
\begin{eqnarray*}
\ddot{\phi}_{i}\bigl(u_2\theta^{*}_{i}
\bigr) & =&E_0\bigl[Z_{i}(1)^{2}\exp
\bigl(u_2\theta ^{*}_{i}Z_{i}(1)
\bigr)\bigr] \geq E_0\bigl[Z_{i}(1)^{2}\exp
\bigl(u_2\theta ^*_iZ_i(1)\bigr)I(U>1)\bigr]
\\
&\geq& E\bigl[\mu_i^2\exp\bigl(-\theta^*_i
\mu_i\bigr)I(U>1)\bigr]\geq\mu_i^2\exp(-H\mu
_i)\exp(-\lambda)\\
&\geq&\delta^2\exp\bigl(-H^2-H
\bigr).
\end{eqnarray*}
Thus (\ref{thetastar}) together with (\ref{phi_prime}) imply
\begin{equation}
\dot{\phi}_{i}\bigl(\theta^{*}_{i}\bigr)\geq
\frac{1}{2}\theta^{*}_{i} \delta^{2}
e^{-H^2-H}
\geq\frac{\delta^3 e^{-H^2-H}}{H+4e^{-2}H_1/\delta^2}.\label{phi_prime_bound}
\end{equation}
Note that for lower bound (\ref{phi_prime_bound}) to hold, we do not
require $W_i$ to be bounded.
Therefore,
\[
E[N_{m}]\leq\sum_{i=1}^d
\omega_i\frac{
\lambda H(m+B)}{\dot{\phi}_i(\theta_i^*)} \leq\frac{\lambda H(m+B)(H+4e^{-2}H_1/\delta^2)}{\delta^3 e^{-H^2-H}
},
\]
as $\omega_i>0$ and $\sum_i \omega_i=1$.
By (\ref{thetastar}), we have that $\theta^*_i$ are all uniformly
bounded away from 0, so we can choose $m=O(\log d/\min_{i}
\theta^{*}_{i})=O(\log d)$ to satisfy equation (\ref{L1}). Now we can
conclude that $E[N_{m}]=O(\log d)$ as $B$, $H$ and $\delta$ are all
constants independent of~$d$.
Now, let us consider the more general cases when the $W_{i}$'s are not
bounded from
above. Recall that $\mathbf{W}'$ is derived from $\mathbf{W}$ by
exponential tilting; see (\ref{J_P2}). For any $B>0$, define $\tilde
{\mathbf{W}}'$ by $\tilde{W}'_{i}=W'_{i}I(W'_i\leq B)$ as the
truncation of $\mathbf{W}'$, and define the random walk $\tilde
{C}_{i}(n)=\tilde{C}
_{i}(n-1)+\tilde{W}'_{i}(n)-U'(n)\mu_{i}$. Let $\tilde{N}_{m}=\inf\{n\dvtx \tilde{C}_{i}(n)>m\mbox{ for some }i\}$. Since $\tilde{C}_{i}(n)\leq
C_{i}
(n)$, we have $\tilde{N}_{m}\leq N_{m}$. Our goal is to show that one
can choose a proper value for $B$ such that $E[\tilde{N}_m]=O(\log d)$
and hence so is $E[N_m]$.
Since $\tilde{W}'_{i}$ is
bounded from above by $B$, by the optimal stopping theorem, we have
\[
E[\tilde{N}_{m}]\leq\sum_{i=1}^d
\omega_i\frac{m+B}{E[\tilde{C}_i(1)]}.
\]
By definition,
\[
E\bigl[\tilde{C}_i(1)\bigr]= E\bigl[\bigl(W_iI(W_i
\leq B)-U\mu_i\bigr)\exp\bigl(\theta _i^*
\bigl(W_iI(W_i\leq B)-U\mu_i\bigr)\bigr)
\bigr].
\]
Since $U\mu_{i}\geq0$, we have
\begin{eqnarray*}
&& E\bigl[\bigl(W_{i}I(W_i\leq B)-U\mu_{i}
\bigr)\exp\bigl(\theta^{*}_{i} \bigl(W_{i}I(W_i
\leq B)-U\mu_{i}\bigr)\bigr)\bigr]
\\
&&\qquad\geq E\bigl[(W_{i}-U\mu_i)\exp\bigl(\theta^{*}_{i}(W_{i}-U
\mu_{i})\bigr)\bigr]-E\bigl[W_{i}\exp \bigl(\theta^{*}_{i}W_{i}
\bigr)I(W_{i}>B)\bigr].
\end{eqnarray*}
By assumption (C2), $\delta$ and $H>0$ are constants independent of $d$
such that
\[
E \bigl[\exp\bigl(\bigl(\delta+\theta^*_i\bigr) W_i
\bigr)\bigr]\leq H<\infty.
\]
As a consequence,
\begin{eqnarray*}
E\bigl[W_{i}\exp\bigl(\theta^{*}_{i}W_{i}
\bigr)I(W_{i}>B)\bigr]&\leq& E\bigl[W_i\exp(-\delta
W_i)I(W_i>B)\exp\bigl(\bigl(\delta+\theta_i^*
\bigr) W_i\bigr)\bigr]
\\
&\leq&\max_{w>B}\bigl\{w\exp(-\delta w)\bigr\}E\bigl[\exp
\bigl(\bigl(\delta+\theta_i^*\bigr) W_i\bigr)\bigr]\\
&\leq& B
\exp(-\delta B)H
\end{eqnarray*}
for all $B>1/\delta$.
Recall that by (\ref{phi_prime_bound}),
\begin{eqnarray*}
E\bigl[(W_{i}-U\mu_i)\exp\bigl(\theta^{*}_{i}(W_{i}-U
\mu_i)\bigr)\bigr]&=&E\bigl[C_i(1)\bigr]\geq\dot {
\phi}_i\bigl(\theta_i^*\bigr)/(\lambda H)\\
&\geq&
\frac{\delta^3 e^{-H^2-H}}{\lambda
H(H+4e^{-2}H_1/\delta^2)},
\end{eqnarray*}
where $H_1=H^H$. Therefore, we can take $B=O(-\frac{1}{\delta}\log(\frac
{\delta^3 e^{-H^2-H}}{2\lambda H^2(H+4{\delta}e^{-2}H_1/\delta^2)}))$
independent
of $d$ such that
\begin{eqnarray*}
B\exp(-\delta B)H&<&\frac{\delta^3 e^{-H^2-H}}{2\lambda
H(H+4e^{-2}H_1/\delta^2)}\quad \mbox{and hence}\\
E\bigl[
\tilde{C}_i(1)\bigr]&\geq& \frac{\delta^3 e^{-H^2-H}}{2\lambda H(H+4e^{-2}H_1/\delta^2)}.
\end{eqnarray*}
In the end, since $m=O(\log(d))$, we have
\[
E[N_{m}]\leq E[\tilde{N}_{m}]\leq\frac{ 2\lambda
H(m+B)(2H+8e^{-2}H_1/\delta^2)}{\delta^3 e^{-H^2-H}}=O(\log
d).
\]
\upqed\end{pf}
Now we give the proof of the main result in this subsection.
\begin{pf*}{Proof of Theorem~\ref{th2}}
Recall that
\[
E[N]=E[N_b]\bigl(E[N_{a}]+E[N_{m}]\bigr).
\]
Since $N_b$ is the number of trials required to obtain $I=0$, $E[N_b]=
1/P(I=0)$. As discussed in Section~\ref{sec2.3.1}, $P(I=0)\geq1-\sum_{i=1}^{d}
\exp(-\theta^{*}_{i} m)$ and hence
\[
E[N_b]\leq\frac{1}{1-\sum_{i=1}^{d} \exp(-\theta^{*}_{i} m)}\leq\frac
{1}{1-{1}/{d}}
\]
if we take $m=2\log d/\min_{i}\theta^{*}_{i}$.
Similarly, we have $E[N_{a}]=1/P(U>(m+W_{i})/\mu_{i}, \forall i)$. For
any $K>0$,
\[
P\biggl(U>\frac{m+W_{i}}{\mu_{i}}, \forall i\biggr)\geq P\biggl(U>\frac{m+K}{\min_{i} \mu_{i}};
W_{i}\leq K\mbox{ for all }i\biggr).
\]
Under assumption (C2), we have
\[
P(W_{i}\leq K\mbox{ for all }i)\geq1-\sum
_{i=1}^{d}P(W_{i}>K)\geq 1-dH\exp{(- K
\delta)}.
\]
Under assumption (C3), we have
\[
P\biggl(U>\frac{m+K}{\min_i\mu_i}\biggr)\geq\exp\biggl(-\frac{H(m+K)}{\min_i\mu_i}\biggr).
\]
As $U$ and $\mathbf{W}$ are independent,
\[
P\biggl(U>\frac{m+W_{i}}{\mu_{i}}, \forall i\biggr)\geq\exp \biggl( -
\frac
{H(m+K)}{\min_{i}
\mu_{i}} \biggr) \bigl(1-dH\exp(-K\delta)\bigr).
\]
Choosing
$K=(2\log{d}+\log{H})/\delta$ and plugging in $m=2\log d/\min_i\theta
_i^*$, we get
\[
E[N_{a}]\leq\frac{1}{1-{1}/{d}} d^{(2H/(\min_{i} \mu_{i}\min
_{i}\theta^{*}_{i})+2H/(\delta\min_i\mu_i))} H^{H/(\delta
\min_i\mu_i)}.
\]
By Proposition~\ref{pr4} we
have $E[N_{m}]=O(\log d)$. In summary, we have
\begin{eqnarray*}
E[N]&=&E[N_b]\bigl(E[N_{a}]+E[N_{m}]\bigr)=O
\biggl(\biggl(\frac{1}{1-{1}/{d}}\biggr)^2 \log d d^{{2H}/{(\min_{i}
\mu_{i}\min_{i}\theta^{*}_{i})}}
\biggr)
\\
&=&O\bigl(d^{1+{2H}/{(\min_{i} \mu_{i}\min
_{i}\theta^{*}_{i})}}\bigr).
\end{eqnarray*}
As discussed in the proof of Proposition~\ref{pr4}, $\theta^{*}_{i}\geq
\delta/(H+4e^{-2}H_1/\delta^2)$ and $\mu_{i}\geq\delta$ are
uniformly bounded away from $0$, therefore,
\[
E[N]=O\bigl(d^{1+{2H(H+4e^{-2}H_1/\delta)}/{\delta^2}}\bigr).
\]
\upqed\end{pf*}
\section{Extension to Markov-modulated processes}
\label{SectionExtension}
We shall briefly explain how our development in Section~\ref{SecCompPoi}, specifically
Algorithm \ref{alg1}, can be implemented beyond input with stationary and
independent increments. As an example, we shall concentrate on
Markov-modulated stochastic fluid networks. Our extension to Markov-modulated
networks is first explained in the one-dimensional case, and later we will
indicate how to treat the multidimensional setting.
Let $ ( \hat{I} ( t ) \dvtx t\geq0 ) $ be an irreducible
continuous-time Markov chain taking values on the set $\{1,\ldots,n\}$. We
assume that, conditional on $\hat{I} ( \cdot ) $, the number of
arrivals, $\hat{N} ( \cdot ) $, follows a time-inhomogeneous
Poisson process with rate $\lambda_{\hat{I} ( \cdot ) }$. We
further assume that $\int_{0}^{t}\lambda_{\hat{I} ( s ) }\,ds>0$
with positive probability. The process $\hat{N} ( \cdot ) $ is
said to be a Markov-modulated Poisson process with intensity $\lambda
_{\hat{I} ( \cdot ) }$. Define $\hat{A}_k$ to be the time of
the $k$th arrival, for $k\geq1$; that is, $\hat{A}_k=\inf\{t\geq0\dvtx \hat{N
} ( t ) =k\}$.
We assume that the $k$th arrival brings a job requirement equal to $
\hat{W} ( k ) $. We also assume that the $\hat{W} (
k ) $'s are conditionally independent given the process $\hat
{I} ( \cdot ) $. Moreover, we assume that the moment-generating
function $\phi_{i} ( \cdot ) $ defined via
\[
\phi_{i} ( \theta ) =E \bigl( \exp \bigl( \theta\hat{X} ( k ) \bigr)
|\hat{I} ( \hat{A}_k ) =i \bigr),
\]
is finite in a neighborhood of the origin. In simple words, the job
requirement of the $k$th arrival might depend upon the environment, $
\hat{I} ( \cdot ) $, at the time of arrival. But, conditional on
the environment, the job sizes are independent. Finally, we assume that the
service rate at time $t$ is equal to $\mu_{\hat{I} ( t ) }\geq0$.
Let $\hat{X}(t)=\sum_{k=1}^{\hat{N}(t)}\hat{W}({k})-\int_{0}^{t}\mu
_{\hat{I} (s)}\,ds$. Then the workload process, $(Y(t)\dvtx t\geq0)$, can
be expressed as
\[
Y(t)=\hat{X}(t)-\inf_{0\leq s\leq t}\hat{X}(s),
\]
assuming that $Y(0)=0$. In order for the process $Y (
\cdot ) $ to be stable, in the sense of having a stationary
distribution, we assume that $\sum_i\pi_i(\lambda_iE[
\hat{W}|\hat{I}=i]-\mu_i)<0$, where $\pi_i$ is the stationary
distribution of the Markov chain $\hat{I}$. Following the same argument
as in Section~\ref{SecCompPoi}, we can
construct a stationary version of the process $Y ( \cdot ) $
by a
time reversal argument.
Since $\hat{I}(\cdot)$ is irreducible, one can define its associated
\textit{stationary} time-reversed Markov chain ${I(\cdot)}$ with transition
rate matrix $\mathcal{A}$; for the existence and detailed description of
such reversed chain, see Chapter~2.5 of \citet{Asmussen2003}. Let us
write $
N ( \cdot ) $ to denote a Markov-modulated Poisson process with
intensity $\lambda_{I ( \cdot ) }$, and let $A_{k}=\inf
\{t\geq0\dvtx N ( t ) =k\}$. We consider a sequence $ ( W (
k ) \dvtx k\geq1 ) $ of conditionally independent random variables
representing the service requirements (backward in time) such that $
\phi_{i} ( \theta ) =E ( \exp ( \theta W ( k )
) |I ( A_{k} ) =i ) $.
We then can define $Z(t)=\sum_{k=1}^{N(t)}W ( k )
-\int_{0}^{t}\mu_{I(s)}\,ds$. Following the same arguments as in
Section~\ref{SecCompPoi}, we
can run a stationary version $Y^{\ast}$ of $Y$ backward via the process
\[
Y^{\ast}(-t)=\sup_{s\geq t} \bigl( Z(s)-Z(t) \bigr).
\]
Therefore, $Y^{\ast}(-t)$ can be simulated exactly as long as a convenient
change of measure can be constructed for the process $ ( I (
\cdot ),Z(\cdot) ) $, so that a suitable adaptation of Algorithm
\ref{alg1.1.1} can be applied. Once the adaptation of Algorithm \ref{alg1.1.1} is in place,
the adaptation of Algorithms~\ref{alg1.1} and~\ref{alg1} is straightforward.
In order to define such change of measure, let us define the matrix $
\mathcal{M}(\theta,t)\in\mathbb{R}^{n\times n}$, for $t\geq0$, via
\[
\mathcal{M}_{ij}(\theta,t)=E_{i}\bigl[\exp\bigl(\theta
Z(t)\bigr);I(t)=j\bigr],
\]
where the notation $E_{i} ( \cdot ) $ means that $I (
0 )
=i$. Note that $\mathcal{M}(\cdot,t)$ is well defined in a neighborhood of
the origin. In what follows we assume that $\theta$ is such that all
coordinates of $\mathcal{M}
(\theta,t)$ are finite.
It is known [see, e.g., Chapters~11.2 and~13.8 of
\citet{Asmussen2003} and the references therein] that $\mathcal{M}
(\theta,t)=\exp(tG(\theta))$ where the matrix $G$ is
defined by
\[
G_{ij}(\theta)=
\cases{\mathcal{A}_{ij}, &\quad $\mbox{if }i\neq j$, \vspace*{2pt}
\cr
\mathcal{A}_{ii}-
\mu_{i}\theta+\lambda_{i}\phi_{i}(\theta), &\quad $\mbox{if }i=j.$}
\]
Besides, $G(\theta)$ has a unique eigenvalue $\beta(\theta)$
corresponding to a strictly positive eigenvector $(u ( i,\theta
)
\dvtx 1\leq i\leq n)$. The eigenvalue $\beta(\theta)$ has the following
properties which follow from Propositions~2.4 and~2.10 in
Chapter~11.2 of \citet{Asmussen2003}:
\begin{lemma}
\label{PropSummaryMM}
\begin{longlist}[(1)]
\item[(1)] $\beta(\theta)$ is convex in $\theta$ and $\dot{\beta}(\theta)$ is well defined.
\item[(2)] $\lim_{t\rightarrow\infty}Z(t)/t=\dot{\beta}(0)=\lim_{t\rightarrow
\infty}\hat{X} ( t ) /t<0$.
\item[(3)] $ ( M ( t,\theta ) \dvtx t\geq0 ) $ defined via
\[
M ( t,\theta ) =\frac{u(I ( t ),\theta)}{u(I (
0 ),\theta)}\exp\bigl(\theta Z(t)-t\beta(\theta)\bigr)
\]
is a martingale.
\end{longlist}
\end{lemma}
As explained in Chapter~13.8 of \citet{Asmussen2003}, the martingale $
M ( \cdot ) $ induces a change of measure for the process
$ (
I ( \cdot ),Z(\cdot) ) $ as we shall explain. Let $P$ be the
probability law of $(I(\cdot),Z(\cdot))$, and define a new probability
measure $\tilde{P}$ for $(I(s),Z(s)\dvtx s\leq t)$ as $d\tilde{P}=M (
t,\theta ) \,dP$.
We now describe the law of $ ( I ( \cdot ),Z (
\cdot ) ) $ under $\tilde{P}$. The process $I(\cdot)$ is a
continuous time Markov chain with rate matrix $\widetilde{\mathcal
{A}}
_{ij}=\mathcal{A}_{ij}u(j,\theta)/u ( i,\theta ) $ for $i\neq j$
(and $\widetilde{\mathcal{A}}_{ii}=-\sum_{j\neq i}\widetilde{\mathcal
{A}}
_{ij}$). In addition,
\[
Z(t)\stackrel{d} {=}\sum_{k=1}^{\tilde{N}(t)}
\tilde{W} ( k ) -\int_{0}^{t}\mu_{I ( s ) }\,ds,
\]
where $\tilde{N}$ is a Markov-modulated Poisson process with rate at
time $t
$ equal to $\phi_{I ( t ) } ( \theta ) \lambda
(I(t))$, and
the $\tilde{W} ( k ) $'s are conditionally independent given $
I ( \cdot ) $ with moment generating function $\widetilde{\phi}
_{i} ( \cdot ) $ defined via
\[
\widetilde{\phi}_{i} ( \eta;\theta ) =\widetilde{E}\bigl(\exp\bigl(
\eta \tilde{W} ( k ) \bigr)|A_{k}=i\bigr)=\phi_{i} ( \eta+
\theta ) /\phi_{i} ( \eta ),
\]
which is finite in a neighborhood of the origin. In addition, $
Z(t)/t\rightarrow\dot{\beta}(\theta)$ under~$\tilde{P}$.
Because of the stability condition of the system, we have that $\dot
{\beta}(0)<0$. Then, following the same argument as in the remark given
at the end
of Section~\ref{sec2.3}, we may assume the existence of the Cramer root $\theta
^{\ast
}>0$ such that $\beta(\theta^{\ast})=0$ and $\dot{\beta} (
\theta^{\ast} ) >0$. The change of measure that allows adaption of
Algorithm \ref{alg1.1.1} is given by selecting $\theta^{\ast}>0$ as indicated. Now,
select $m>0$ such that
\begin{equation}
K:=\exp \bigl( -\theta^{\ast}m \bigr) \max_{i,j}
\frac{u ( i,\theta
^{\ast} ) }{u ( j,\theta^{\ast} ) }\leq1. \label{SelectionK}
\end{equation}
We will use the notation $P_{0,i} ( \cdot ) $ to denote the
law $
P ( \cdot ) $ conditional on $Z ( 0 ) =0$ and $I (
0 ) =i$. Let us write $P_{0,i}^{\ast} ( \cdot ) $ to denote
the law of $(Z ( t ) \dvtx 0\leq t\leq T_{m})$ [under $P_{0,i} (
\cdot ) $] conditional on $T_{m}<\infty$. Further, we write $\tilde
{P}
_{0,i} ( \cdot ) $ to denote the law of $\tilde{P} (
\cdot ) $, selecting $\theta=\theta^{\ast}$, conditional on $Z (
0 ) =0$ and $I ( 0 ) =i$. Then we have that $\tilde{P}
_{0,i} ( T_{m}<\infty ) =1 $ [by Lemma~\ref{PropSummaryMM}
since $
\dot{\beta} ( \theta^{\ast} ) >0$], and therefore [by (\ref
{SelectionK})], we have
\begin{eqnarray*}
&&\frac{dP_{0,i}^{\ast}}{d\tilde{P}_{0,i}} \bigl( \bigl( I ( t ) ,Z ( t ) \bigr) \dvtx 0\leq t\leq
T_{m} \bigr) \\
&&\qquad=\frac{u (
i,\theta^{\ast} ) }{u ( I ( T_{m} ),\theta^{\ast
} ) }
\times\frac{\exp ( -\theta^{\ast}Z ( T_{m} ) ) I (
T_{m}<\infty ) }{P_{0,i} ( T_{m}<\infty ) }
\\
&&\qquad\leq\frac{K}{P_{0,i} ( T_{m}<\infty ) }\leq\frac
{1}{P_{0,i} (
T_{m}<\infty ) }.
\end{eqnarray*}
It is clear from this identity, which is completely analogous to
identities (
\ref{ARB}) and~(\ref{ARB_1}), which are the basis for Algorithm \ref{alg1.1.1}, that
the corresponding adaptation to our current setting follows.
For the $d$-dimensional case ($d>1$), we first assume the existence of the
Cramer root $\theta_{j}^{\ast}>0$ for each dimension $j\in\{1,\ldots,d\}
$. In
this setting we also must compute the corresponding positive
eigenvector $(
u_{j}( i,\theta_{j}^{\ast}) \dvtx 1\leq i\leq n) $ for each $j\in\{1,\ldots
,d\}$.
The desired change of measure that allows the adaptation of Algorithm \ref{alg1.1.1}
is just a mixture of changes of measures such as those described above
induced by $M( \cdot,\theta_{j}^{\ast}) $ in each direction, just as
discussed in Section~\ref{sec2.3.1}, with weight $w_{j}=\exp( -\theta_{j}^{\ast}m)
/\sum_{k=1}^{m}\exp( -\theta_{k}^{\ast}m) $. The corresponding likelihood
ratio is then
\begin{eqnarray*}
&&\frac{dP_{0,i}^{\ast}}{d\tilde{P}_{0,i}} \bigl( \bigl( I ( t ) ,Z ( t ) \bigr) \dvtx 0\leq t\leq
T_{m} \bigr)
\\
&&\qquad=\frac{1}{\sum_{j=1}^{d}w_{j}\exp ( \theta_{j}^{\ast}Z_{j} (
T_{m} ) ) u_{j} ( I ( T_{m} )
,\theta_{j}^{\ast} ) /u_{j} ( i,\theta_{j}^{\ast} ) },
\end{eqnarray*}
and $m$ must be selected so that
\[
\sum_{j=1}^{d}\exp \bigl( -
\theta_{j}^{\ast}m \bigr) \sup_{j,i,k}
\frac{
u_{j} ( i,\theta_{j}^{\ast} ) }{u_{j} ( k,\theta_{j}^{\ast
} ) }\leq1.
\]
\section{Algorithm for reflected Brownian motion}
\label{SecRBM}
In this section, we revise our algorithm and explain how we can apply
it to
the case of reflected Brownian motion. Consider a multidimensional Brownian
motion
\[
\mathbf{X} ( t ) =\mathbf{v}t+A\mathbf{B} ( t ),
\]
where $\mathbf{v}\in\mathbb{R}^{d}$ is the drift vector, and $A\cdot
A^{T}\triangleq\Sigma\in\mathbb{R}^{d\times d}$ is the positive definite
covariance matrix. Our target process $\mathbf{Y}(t)$ is the solution
to the
following Skorokhod problem with input process $\mathbf{X}(\cdot)$ and
initial value $\mathbf{Y}(0)=\mathbf{y}_0$:
\begin{eqnarray*}
d\mathbf{Y} ( t ) & =&d\mathbf{X} ( t ) +R\,d\mathbf {L} ( t ), \qquad\mathbf{Y} (
0 ) =\mathbf{y}_{0},
\\
\mathbf{Y} ( t ) & \geq&0,\qquad Y_{j} ( t ) \,d L_{j} ( t )
\geq0,\qquad L_{j} ( 0 ) =0,\qquad
dL_{j} ( t
) \geq0.
\end{eqnarray*}
We assume that the reflection matrix $R$ is an $M$-matrix of the form $
R=I-Q^T$, where $Q$ has nonnegative coordinates and a spectral radius equal
to $\alpha<1$ so that $R^{-1}$ has only nonnegative elements; see page 304
of \citet{HarrisonReiman1981}. We also assume the stability condition $
R^{-1}\mathbf{v<0}$ for the existence of the steady-state distribution. As
discussed in the \citet{HarrisonReiman1981}, there is a unique solution
pair $(\mathbf{Y},\mathbf{L})$ to the Skorokhod problem associated with
$
\mathbf{X}$, and the process $\mathbf{Y}$ is called a reflected Brownian
Motion (RBM). We wish to sample $\mathbf{Y} ( \infty ) $ (at least
approximately, with a pre-defined controlled error).
The stochastic dominance result for reflected Brownian motions that is
analogous to Lemma~\ref{LmK_W_Comp} was first developed in the proof of
Lemma 12 in \citet{HarrisonWilliams1987}. In detail, we can construct
a dominating process $\mathbf{Y}
^+(\cdot)$ as follows. First, we can choose $\mathbf{z}\in\mathbb{R}^d $
such that $\mathbf{v<z}$ and $R^{-1}\mathbf{z<0}$. Define a process
\begin{equation}
\mathbf{Z} ( t ) =\mathbf{X} ( t ) -\mathbf {z}t:=A\mathbf{B
} ( t )-
\bolds{\mu}t, \label{BMinput}
\end{equation}
where $\bolds{\mu}=\mathbf{v-z}$, and let $\mathbf{Y}^+(\cdot)$ be
the RBM
corresponding to the Skorokhod problem (\ref{SP_Bnd}), which has orthogonal
reflection. Then $R^{-1}\mathbf{Y} ( t ) \leq R^{-1}\mathbf{Y}
^{+} ( t ) $. As a result, we can assume without loss of generality
that the input Brownian motion has strictly negative drift
coordinatewise. In sum, the following assumption is in force
throughout this
section:
\renewcommand{(D)}{(D)}
\begin{ass}\label{assD}
The input process $\mathbf{Z}(\cdot)$
satisfies (\ref
{BMinput}) with $\mu_i>\delta_0>0$ for all $1\leq i\leq d$, and we assume
that $A$ is nondegenerate so that $A^TA$ is positive definite.
\end{ass}
Since $\mathbf{Z}(\cdot)$ has strictly negative drift, following the same
argument given for Proposition~\ref{pr1}, we can construct a stationary version of
the dominating process as
\begin{equation}
\mathbf{Y}^+(-t)=-\mathbf{Z}(t)+\max_{u\geq t}\mathbf{Z}(u)
\triangleq \mathbf{Z}(t)-\mathbf{M}(t)\qquad \mbox{for all }t\geq0. \label{domBM}
\end{equation}
In order to apply the same strategy as in Algorithm \ref{alg1} to the RBM,
we need to address two problems. First, the input process $\mathbf{Z}$
requires a continuous path description while the computer can only encode
and generate discrete objects. Second, the dominating process is a reflected
Brownian motion with orthogonal reflection. Therefore the hitting time
$\tau$
to the origin is almost surely infinity [see \citet
{VaradhanWilliams1985}
], which means that Algorithm \ref{alg1} will not terminate in finite time,
in this case. To solve the first problem, we take advantage of a wavelet
representation of Brownian motion and use it to simulate a piecewise linear
approximation with uniformly small (deterministic) error. To solve the
second problem, we define an approximated coalescent time $
\tau_{\varepsilon} $ as the first passage time to a small ball around the
origin so that $E[\tau_{\varepsilon}]<\infty$ and the error caused by
replacing $\tau$ with $\tau_{\varepsilon}$ is bounded by $\varepsilon$. In
sum, we concede to an algorithm that is not exact but one that could give
any user-defined $\varepsilon$ precision. Nevertheless, at the end of
Section~\ref{sec4.1} we will show that we can actually use this $\varepsilon$-biased
algorithm to estimate without any bias the steady-state expectation of
continuous functions of RBM by introducing an extra randomization step.
Section~\ref{SecRBM} is organized as follows. In Section~\ref{sec4.1}, we will describe the main
strategy of our algorithm. In Section~\ref{SubWave}, we use a wavelet representation
to simulate a piecewise linear approximation of Brownian motion. In
Section~\ref{sec4.3}, we will discuss the details in simulating jointly $\tau
_{\varepsilon}$
and the stationary dominating process based on the techniques we have
already used for the compound Poisson cases. In the end, in
Section~\ref{SubSecCCRBM}, we
will give an estimate of the computational complexity of our algorithm.
\subsection{The structure of the main simulation procedure}\label{sec4.1}
The main strategy of the algorithm is almost the same as Algorithm \ref{alg1},
except for two modifications due to the two issues discussed above: first,
instead of simulating the input process $\mathbf{Z}$ exactly, we
simulate a
piecewise linear approximation $\mathbf{Z}^{\varepsilon}$ such that $
|Z^{\varepsilon}_{i}(t)-Z_{i}(t)|<\varepsilon$ for all indices $i$ and $t\geq
0$; second,
instead of sampling the coalescence time $\tau$ such that $\mathbf
{M}(\tau)=
\mathbf{Z}(\tau)$, we simulate an approximation coalescence time, $
\tau_{\varepsilon}$, such that $\mathbf{M}(\tau_{\varepsilon})\leq\mathbf{Z}
(\tau_{\varepsilon})+\bolds{\varepsilon}$.
With this notation, we now give the structure of our algorithm. The
details will be given later in Sections \ref{SubWave} and \ref{sec4.3}:
\renewcommand{2.1}{2}
\begin{algorithm}[{[Sampling with controlled error of $\mathbf{Y} (
\infty )$]}]\label{alg2}
\textit{Step} 1: Let $\tau_{\varepsilon}\geq0$ be any time for which
$\mathbf{M}
( \tau_{\varepsilon} ) \leq\mathbf{Z} ( \tau_{\varepsilon}
) +
\bolds{\varepsilon} $, and simulate, jointly with $\tau_{\varepsilon}$,
$\mathbf{Z}
_{-\tau_{\varepsilon}}^{\leftarrow} ( t ) =-\mathbf{Z}
^{\varepsilon} ( \tau_{\varepsilon}-t ) $ for $0\leq t\leq\tau
_{\varepsilon}$
.
\textit{Step} 2: Define $\mathbf{X}_{-\tau_{\varepsilon}}^{\leftarrow} (
t ) =\mathbf{Z}^{\varepsilon}(\tau_{\varepsilon})-\mathbf{Z}^{\varepsilon
} (
\tau_{\varepsilon}-t ) +\mathbf{z}t$, and compute $\mathbf{Y}
^{\varepsilon}_{-\tau_{\varepsilon}} ( \tau_{\varepsilon},0 ) $
which is obtained by evolving the solution $\mathbf{Y}^{\varepsilon}_{-\tau
_{
\varepsilon}} ( \cdot,0 ) $ to the Skorokhod problem
\begin{eqnarray*}
d\mathbf{Y}^{\varepsilon}_{-\tau_{\varepsilon}} ( t,0 ) & =&d
\mathbf{X}_{-\tau_{\varepsilon} }^{\leftarrow} ( t ) +R\,d\mathbf{L}
_{-\tau}
( t,0 ),
\\
\mathbf{Y}^{\varepsilon}_{-\tau_{\varepsilon}} ( t,0 ) & \geq&0,
\qquad Y^{\varepsilon}_{-\tau_{\varepsilon},j} ( t,0 ) \,dL_{-\tau_{\varepsilon},j}
( t,0 ) \geq0, \\
L_{-\tau_{\varepsilon},j} ( 0,0 )& =&0,\qquad dL_{-\tau
_{\varepsilon},j} ( t,0 )
\geq0,
\end{eqnarray*}
for $\tau_{\varepsilon}$ units of time.
\textit{Step} 3: Output $\mathbf{Y}^{\varepsilon}_{-\tau_{\varepsilon}}(\tau_{
\varepsilon},0)$.
\end{algorithm}
First, we show that there exists a stationary version $\{\mathbf{Y}
^{*}(t)\dvtx t\leq0\}$ that is coupled with the dominating stationary
process $\{
\mathbf{Y}^{+}(t)\dvtx t\leq0\}$ as given by (\ref{domBM}).
\begin{lemma}\label{le3}
There exists a stationary version $\{\mathbf{Y}^*(t)\dvtx t\leq0\}$ of
$\mathbf{Y}
$ such that $R^{-1}\mathbf{Y}^{*}(t)\leq R^{-1}\mathbf{Y}^{+}(t)$ for
all $
t\leq0$.
\end{lemma}
\begin{pf}
The proof follows the same argument as that of Proposition~\ref{pr2}.
\end{pf}
The following proposition shows that the error of the above algorithm
has a
small and deterministic bound.
\begin{proposition}\label{pr5}
Suppose $\mathbf{X}\in\mathbb{R}^{d}$. Let
$r=\max_{i,j}R^{-1}_{ij}/\min_{i,j}\{R_{ij}^{-1}\dvtx\break R^{-1}_{ij}>0\}$. Then there exists a stationary version
$\mathbf{Y}^{*}$of $\mathbf{Y}$ such that in each index $i$,
\[
\bigl|Y_{i}^{*}(0)-Y^{\varepsilon}_{\tau_{\varepsilon},i}(
\tau_{\varepsilon},0)\bigr|\leq\biggl(
\frac{1}{1-\alpha}+dr\biggr)
\varepsilon.
\]
Here $0\leq \alpha <1$ is the spectral radius of the matrix $Q$.
\end{proposition}
\begin{pf}
Consider three processes on $[-\tau_{\varepsilon},0]$. The first is the coupled
stationary process $\mathbf{Y}^{*}(\cdot)$ as constructed in Lemma~\ref{le3},
which is
the solution to the Skorokhod problem with initial value $\mathbf{Y}^{*}
(-\tau_{\varepsilon})$ at time $-\tau_{\varepsilon}$ and input process
$\tilde{\mathbf{X}}(\cdot)=\mathbf{X}(\tau_{\varepsilon})-\mathbf{X(-\cdot
)}$ on
$[-\tau_{\varepsilon},0]$; the second is a process $\tilde{\mathbf
{Y}}(\cdot)$,
which is the solution to the Skorokhod problem with initial value $0$
at time
$-\tau_{\varepsilon}$ and input process $\tilde{\mathbf{X}}(\cdot)$; the
third is
the process $\mathbf{Y}^{\varepsilon}_{-\tau_{\varepsilon}}(t,0)$ as we
described in
the algorithm, which is the solution to the Skorokhod problem with initial
value $0$ at time $-\tau_{\varepsilon}$ and input process $\mathbf{X}
_{-\tau_{\varepsilon} }^{\leftarrow} ( t ) $ as defined in step 2
of Algorithm \ref{alg2}.\vadjust{\goodbreak}
By definition, we know that for each index $i$, $|Y_{i}^{+}
(-\tau_{\varepsilon})|<\varepsilon$. Since\break $R^{-1}\mathbf{Y}(\tau_{\varepsilon
})\leq
R^{-1}\mathbf{Y}^{+}(\tau_{\varepsilon})$, the coupled process $Y_{i}^{*}
(-\tau_{\varepsilon})<dr\,\varepsilon$. Note that $\mathbf{Y}^{*}(\cdot)$ has the
same input data as $\tilde{\mathbf{Y}}(\cdot)$ except for their initial values.
According to the comparison theorem of \citet{Ramasubramanian2000}, the
difference between these two
processes is uniformly bounded by the difference of their initial
values coordinate-wise. Therefore, we can conclude $|Y^{*}_{i}(0)-\tilde
{Y}
_{i}(0)|<dr\,\varepsilon$.
On the other hand, $\tilde{\mathbf{Y}}(\cdot)$ and $\mathbf{Y}^{\varepsilon
}_{-\tau_{\varepsilon}}(\cdot,0)$ have common initial value 0 and input processes
whose difference is uniformly bounded by $\varepsilon$. It was proved in
\citet{HarrisonReiman1981} that the Skorokhod mapping is Lipschitz\break
continuous under the uniform metric $d_T(Y^1(\cdot),Y^2(\cdot
))\triangleq\break \max_{1\leq i\leq d}\sup_{0\leq t\leq
T}|Y_i^1(t)-Y_i^2(t)|$ for all $0<T<\infty$, and the Lipschitz constant
is equal to $1/(1-\alpha)$, where $0\leq\alpha<1$ is the spectral
radius of $Q$. Therefore, we have that $|\tilde{Y}_{i}(0)-Y^{\varepsilon
}_{-\tau_{\varepsilon},i}
(\tau_{\varepsilon},0)|<\varepsilon/(1-\alpha)$.
Simply applying the triangle inequality, we obtain that
\[
\bigl|Y_{i}^{*}(0)-Y^{\varepsilon}_{\tau_{\varepsilon},i}(
\tau_{\varepsilon},0)\bigr|\leq \biggl(\frac{1}{1-\alpha}+dr\biggr)\varepsilon.
\]
\upqed\end{pf}
We conclude this subsection by explaining how to remove the $\varepsilon$
-bias induced by Algorithm \ref{alg2}. Let $T$ be any positive random variable with
positive density $\{f(t)\dvtx t\geq0\}$ independent of $\mathbf{Y}^*(0)$.
Let $g\dvtx
\mathbb{R}^d\to\mathbb{R}$ be any positive Lipschitz continuous function
such that there exists constant $K>0$ and for all $\mathbf{x}$ and
$\mathbf{y
}\in\mathbb{R}^d$, $|g(\mathbf{x})-g(\mathbf{y})|\leq K\max_{i=1}|x_i-y_i|$
. As illustrated in \citet{Beskosetal2012},
\begin{eqnarray*}
E \bigl[g\bigl(\mathbf{Y}^*(0)\bigr) \bigr]&=&E \biggl[\int_0^{g(\mathbf
{Y}^*(0))}\,dt
\biggr]
=E \biggl[\int_0^{g(\mathbf{Y}^*(0))}
\frac{f(t)}{f(t)}\,dt \biggr]
\\
&=&E \biggl[\frac{1(g(\mathbf{Y}^*(0))>T)}{f(T)} \biggr].
\end{eqnarray*}
Since $|Y_{i}^{*}(0)-Y^{\varepsilon}_{\tau_{\varepsilon},i}(\tau_{\varepsilon
},0)|
\leq(1+dr)\varepsilon$, we can sample $T$ first, and then select $\varepsilon>0$
small enough, output $1(g(\mathbf{Y}^{\varepsilon}_{\tau_{\varepsilon}}(\tau_{
\varepsilon},0))>T)/f(T)$ as an unbiased estimator of $E[g(\mathbf{Y}^*(0))]$
without the need for computing $\mathbf{Y}^*(0)$ exactly. It is
important to
have $ (\mathbf{Y}^{\varepsilon}_{\tau_{\varepsilon}}(\tau_{\varepsilon},0)\dvtx
\varepsilon>0 ) $ coupled as $\varepsilon\to0$, and this can be achieved
thanks to the wavelet construction that we will discuss next.
\subsection{Wavelet representation of Brownian motion}
\label{SubWave}
In this part, we give an algorithm to generate piecewise linear
approximations to a Brownian motion path-by-path, with uniform
precision on
any finite time interval. The main idea is to use a wavelet representation
for Brownian motion.
By the Cholesky decomposition, any multidimensional Brownian motion can be
expressed as a linear combination of independent one-dimensional Brownian
motions. Our goal is to give a piecewise linear approximation to a $d$
-dimensional Brownian motion $\mathbf{Z}$ with uniform precision
$\varepsilon$
on $[0,1]$. Suppose that we can write $\mathbf{Z}=A\mathbf{B}$, where
$A$ is
the Cholesky decomposition of the covariance matrix, and the $B_{i}$'s are
independent standard Brownian motions. If we are able to give a piecewise
linear approximation $\tilde{B}_{i}$ to each $B_{i}$ on $[0,1]$ with
precision $\varepsilon/(d\cdot a)$ where $a=\max_{i,j}|A_{ij}|$, then
$A\tilde{
\mathbf{B}}$ is a piecewise linear approximation to $\mathbf{Z}$ with
uniform error~$\varepsilon$. Therefore, in the rest of this part, we only need
to work with a standard one-dimensional Brownian motion.
Now let us introduce the precise statement of a wavelet representation of
Brownian motion; see \citet{Steele2001}, pages 34--39. First we need to define
step function $H(\cdot)$ on $[0,1]$ by
\[
H(t)=
\cases{1, &\quad $\mbox{for }0\leq t<\tfrac{1}{2},$ \vspace*{2pt}
\cr
-1, &\quad $\mbox{for }\tfrac{1}{2}\leq t\leq1,$ \vspace*{2pt}
\cr
0, & \quad $\mbox{otherwise.}$}
\]
Then define a family of functions
\[
H_{k}(t)=2^{j/2}H\bigl(2^{j}t-l\bigr)
\]
for $k=2^{j}+l$ where $j>0$ and $0\leq l\leq2^{j}$. Set $H_{0}(t)=1$. The
following wavelet representation theorem can be seen in \citet{Steele2001}:
\begin{theorem}\label{th3}
If $\{W^{k}\dvtx 0\leq k<\infty\}$ is a sequence of independent standard normal
random variables, then the series defined by
\[
B_{t}=\sum_{k=0}^{\infty} \biggl(
W^{k}\int_{0}^{t}H_{k}(s)
\,ds \biggr)
\]
converges uniformly on $[0,1]$ with probability one. Moreover, the
process $
\{B_{t}\}$ defined by the limit is a standard Brownian motion on $[0,1]$.
\end{theorem}
Choose $\eta_{k}=4\cdot\sqrt{\log k}$, and note that $P(|W^{k}|>\eta
_{k})=O(k^{-4})$, so\break $\sum_{k=0}^{\infty}P(|W^{k}|>\eta_{k})<\infty$.
Therefore, $P(|W^{k}|>\eta_{k},\mathrm{i.o.})=0$. The simulation strategy will
be to
sample $\{W^{k}\}$ jointly with the finite set $\{k\dvtx |W^{k}|\geq\eta
_{k}\}$.
Note that if we take $j=\lceil\log_{2}{k}\rceil$, as shown in
\citet{Steele2001},
\[
\sum_{k=1}^{\infty} \biggl( W^{k}
\int_{0}^{t}H_{k}(s) \,ds \biggr) \leq
\sum_{j=0}^{\infty} \Bigl(2^{-j/2}\cdot
\max_{2^{j}\leq k\leq2^{j+1}-1} \bigl|W^{k}\bigr| \Bigr).
\]
Since $\sum_{j=0}2^{-j/2}\sqrt{j+1}<\infty$, for any $\varepsilon>0$ there
exists $K_{0}>0$, such that
\begin{equation}
\sum_{j=\lceil\log{K_{0}}\rceil}2^{-j/2}\sqrt{j+1}<\varepsilon.
\label{definition of K_0}
\end{equation}
As a result, define
\begin{equation}
K=\max\bigl\{k\dvtx \bigl|W^{k}\bigr|>\eta_{k}\bigr\}\vee
K_{0}<\infty, \label{definiation of K}
\end{equation}
then $\sum_{k=K+1}^{\infty}|W^{k}|\int_{0}^{t}H_{k}(s)\,ds\leq\varepsilon$.
If we
can simulate $\{(W^{k})_{k=1}^{K},K\}$ jointly,
\begin{equation}
B^{\varepsilon}(t)=\sum_{k=0}^{K}W^{k}
\int_{0}^{t}H_{k}(s)\,ds \label{EQ-BE}
\end{equation}
will be a piecewise linear approximation to a standard Brownian motion
within precision $\varepsilon$ in $C[0,1]$.
Now we show how to simulate $K$ jointly with $\{W^k\dvtx 1\leq k\leq K\}$. The
algorithm is as below with $\rho=4$ as we have chosen $\eta_k=4\cdot
\sqrt{
\log k}$:
\renewcommand{2.1}{2\normalfont{w}}
\begin{algorithm}[(Simulate $K$ jointly with $\{W^k\}$)]\label{alg2w}
\textit{Step} 0: Initialize $G=K_0$ and $S$ to be an empty array.
\textit{Step} 1: Set $U=1$, $D=0$. Simulate $V\sim \operatorname{Uniform}(0,1)$.
\textit{Step} 2: While $U>V>D$, set $G\leftarrow G+1$ and $U\leftarrow
P(|W^{G}|\leq
\rho\sqrt{\log{G}})\times U$ and $D\leftarrow$ $(1-G^{1-\rho
^{2}/2})\times U$.
\textit{Step} 3: If $V\geq U$, add $G$ to the end of $S$, that is, $S=[S,G]$,
and return
to step~1.
\textit{Step} 4: If $V\leq D$, $K=\max(S,K_0)$.
\textit{Step} 5: For every $k\in S$, generate $W^{k}$ according to the conditional
distribution of $Z$ given $\{|W|>\rho\sqrt{\log{k}}\}$; for other
$1\leq
k\leq K$, generate $W^{k}$ according to the conditional distribution of $W$
given $\{|W|\leq\rho\sqrt{\log{k}}\}$.
\end{algorithm}
In this algorithm, we keep an array $S$, which is used to record the indices
such that $|W^{k}|>\rho\sqrt{\log k}$, and a number $G$ which is the next
index to be added into $S$. Precisely speaking, given that the last element
in array $S$ is $N$, say, $\max(S)=N$, $G=\inf\{k\geq N+1\dvtx |W^{k}|>\rho
\sqrt{\log k}\}$. The key part of the algorithm is to simulate a Bernoulli
with success parameter $P(G<\infty)$ and to sample $G$ given $G<\infty$.
For this purpose, we keep updating two constants $U$ and $D$ such that $
U>P(G=\infty)>D$ and $(U-D)\to0$ as the number of iterations grows. To
illustrate this point, denote the value of $U$ and $D$ in the $m$th
iteration by $U_m$ and $D_m$, respectively. Then for all $m>0$,
\[
P(G=\infty)=\prod_{k=N+1}^{\infty}P
\bigl(\bigl|W^k\bigr|\leq\rho\sqrt{\log{k}}
\bigr)<\prod
_{k=N+1}^{N+m}P\bigl(\bigl|W^k\bigr|\leq\rho\sqrt{
\log{k}}\bigr)=U_m.
\]
On the other hand, for all $\rho>\sqrt{2}$ and $N$ large enough,
\begin{eqnarray*}
\prod_{k=N+m+1}^{\infty}P\bigl(\bigl|W^k\bigr|
&\leq&\rho\sqrt{\log{k}}\bigr)>1-\sum_{k=N+m+1}^{
\infty}P
\bigl(\bigl|W^k\bigr|>\rho\sqrt{\log k}\bigr)\\
&\geq&1-(N+m+1)^{1-\rho^2/2},
\end{eqnarray*}
and hence we conclude that $D_m=(1-(N+m+1)^{1-\rho
^{2}/2})U_m<P(G=\infty)$.
Because $(1-(N+m+1)^{1-\rho^2/2})\to1$ as $m\to\infty$, the algorithm
proceeds to steps 3 or 4 after a finite number of iterations, and
we can
decide whether $G<\infty$ or not.
Now we show that we can actually sample $G$ simultaneously as the\break Bernoulli
with success probability $P(G<\infty)$ is generated. If $V<D$, we conclude
that $V<P(G=\infty)$ and hence $G=\infty$ and $K=\max(S)$. Otherwise, we
have $G<\infty$. In this case, suppose step 2 ends in the $(m+1)$th
iteration and $V>U$. Since $U_m=P(|W^k|\leq\rho\sqrt{\log k}\mbox{ for
}
k=K+1,\ldots,K+m )$, $U_{m+1}\leq V<U_m$ implies nothing but that $
K+m+1=\inf\{k\geq K+1\dvtx |W^{k}|>\rho\sqrt{\log k}\}$. Therefore, by
definition, $G=K+m+1$ and should be added into array $S$. Once $S$ and $K$
are generated, $\{W^k\dvtx 1\leq k\leq K\}$ can be generated jointly with
$S$ and
$K$ according to step 5.
Also we note that $B^{\varepsilon}(t)$ has the following nice property:
\begin{proposition}\label{pr6}
\[
B^{\varepsilon}(1)=B(1).
\]
\end{proposition}
\begin{pf}
The equality follows from the fact that $\int_{0}^{1}
H_{n}(s)\,ds=0$ for any $n\geq1$ and $m\geq1$.
\end{pf}
As a consequence of this property, for any compact time interval $[0,T]$
(without loss of generality, assume $T$ is an integer), in order to
give an
approximation for $B(t)$ on $[0,T]$ with guaranteed $\varepsilon$ precision
uniformly in $[0,T]$, we only need to run the above algorithm $T$ times to
get $T$ i.i.d. sample paths $\{B^{\varepsilon,(i)}(t)\dvtx t\in [0,1]\}$ for $
i=1,2,\ldots,T$, and define recursively
\[
B^{\varepsilon}(t)=\sum_{i=1}^{\lfloor t\rfloor}B^{\varepsilon
,(i)}(1)+B_{\lfloor
t\rfloor}^{\varepsilon}
\bigl(t-\lfloor t\rfloor\bigr).
\]
\subsection{A conceptual framework for the joint simulation of \texorpdfstring{$\tau_{\varepsilon}$}{tauvarepsilon} and
\texorpdfstring{$\mathbf{Z}^{\varepsilon}$}{Zvarepsilon}}\label{sec4.3}
Our goal now is to develop an algorithm for simulating $\tau_{\varepsilon
}$ and
$ ( \mathbf{Z}^{\varepsilon}(t)\dvtx 0\leq t\leq\tau_{\varepsilon} ) $
jointly. In detail, we want to simulate $\mathbf{Z}^{\varepsilon}(t)$ forward
in time and stop at a random time $\tau_{\varepsilon}$ such that for any
time $
s>\tau_{\varepsilon}$, $Z_{i}(s)\leq Z_{i}(\tau_{\varepsilon})+\varepsilon$ for $
1\leq i\leq d$.
Because of the special structure of the wavelet representation used in
simulating the process $\mathbf{Z}^\varepsilon(\cdot)$, the time $
T_m\triangleq\inf\{t\geq0\dvtx Z^{\varepsilon}_i(t)>m\mbox{ for some }1\leq
i\leq
d\}$ is no longer a stopping time with respect to the filtration generated
by $\mathbf{Z}(\cdot)$. As a consequence, we cannot directly carry out
importance sampling as in Algorithm~\ref{alg1.1.1}. To remedy this problem, we
decompose the process $\mathbf{Z}^{\varepsilon}(t)$ into two parts: a
random walk $\{\mathbf{Z}^{\varepsilon}(n)\dvtx n\geq0\}$ with Gaussian increment
and a series of independent Brownian bridges $\{\bar{\mathbf{B}}
_n(s)\triangleq\mathbf{Z}^{\varepsilon}(n+s)-\mathbf{Z}^{\varepsilon}(n)\dvtx s\in
[0,1]
,n\geq0\}$. Our strategy is to first carry out the importance sampling
as in
Algorithm \ref{alg1.1.1} to the random walk $\{\mathbf{Z}^{\varepsilon}(n)\dvtx n\geq0\}
$ to
find its upper bound, and next develop a new scheme to control the upper
bounds attained in the intervals $\{(n,n+1)\dvtx n\geq0\}$ for the i.i.d.
Brownian bridges $\{\bar{\mathbf{B}}_n(s)\dvtx s\in[0,1],n\geq0\}$.
The whole procedure is based on the wavelet representation of Brownian
motion. Let $\{W_{n}^{k}(i)\dvtx n,k\in\mathbb{N}, i=1,2,\ldots,d\}$ be a sequence
of i.i.d. standard normal random variables. According to the expression
given in Theorem~\ref{th3}, for any $t=n+s$, $s\in[0,1]$,
\begin{eqnarray}\label{wv}
Z_{i}(t)&=&Z_{i}(n)+s\bigl(Z_i(n+1)-Z_i(n)
\bigr)
\nonumber
\\[-8pt]
\\[-8pt]
\nonumber
&&{}+\sum_{j=1}^{d} A_{ij}
\Biggl(\sum_{k=1}^{\infty}W_{n}^{k}(j)
\int_{0}^{s} H_{k}(u)\,du\Biggr).
\end{eqnarray}
Let us put (\ref{wv}) in matrix form,
\[
\mathbf{Z}(t)=\mathbf{Z}(n)+s\bigl(\mathbf{Z}(n+1)-\mathbf{Z}(n)\bigr)+A\sum
_{k=1}^{
\infty}\mathbf{W}_{n}^{k}
\cdot\int_{0}^{s} H_{k}(u)\,du.
\]
For all $n\geq0$ and $s\in[0,1]$, $\bar{\mathbf{B}}_{n}(s)=
A\sum_{k=1}^{\infty}\mathbf{W}_{n}^{k}\cdot\int_{0}^{s}
H_{k}(u)\,du$. Then the sequence $\{\bar{\mathbf{B}}_{n}(\cdot)\dvtx n\geq0\}$
is i.i.d.
Note that $(Z_i(n+1)-Z_i(n))$ is independent of $\{W_{n}^{k}(i)\dvtx k\geq1\}$.
We can split the simulation into two independent parts:
\begin{longlist}[(1)]
\item[(1)] Simulate the discrete-time random walk $\{\mathbf
{Z}(n)\dvtx n\geq0\}$
with i.i.d. Gaussian increments and $\mathbf{Z}(0)=0$. That is, $Z_{i}(0)=0$
and $Z_i(n+1)=Z_i(n)+\sum_{j=1}^{d} A_{ij}W_{n+1}^{0}(j)-\mu_i$, where
$
\{W_n^0(j)\dvtx n\geq0\}$ are i.i.d. standard normals.
\item[(2)] For each $n$, simulate $\bar{\mathbf{B}}_{n}(s)$ to do bridging
between $\mathbf{Z}(n)$ and $\mathbf{Z}(n+1)$.
\end{longlist}
Now, any time $t_0>0$ is an approximate coalescence time $\tau_\varepsilon
$ if
there exists some positive constant $\zeta>0$ such that the following two
conditions hold for all $n\geq t_0$: Condition (1), $\mathbf{Z}(n)\leq
\mathbf{Z}(t_0)-\zeta(n-\lceil t_0\rceil)\mathbf{1}+\bolds{\varepsilon
}$, and
condition (2), $\max\{\bar{\mathbf{B}}_{n}(s)\dvtx s\in[0,1]\}\leq\zeta
(n-\lceil
t_0\rceil)\mathbf{1}$. Based on these observations, we develop an algorithm
to simulate the approximate coalescence time $\tau_{\varepsilon}$ jointly
with $
\{ \mathbf{Z}^{\varepsilon}(t)\dvtx 0\leq t\leq\tau_{\varepsilon}\} $.
By Assumption \ref{assD}, $\mu_{i}>\delta_{0}$ for some $\delta_{0}>0 $. Let $
\zeta=\delta_{0}/2$, and define $\mathbf{S}(n)=\mathbf{Z}(n)+n\bolds
{\zeta}
\mathbf{1}$ such that $\{\mathbf{S}(n)\dvtx n\geq0\}$ is a random walk with
strictly negative drift. Therefore, condition (1) can be checked by carrying
out the importance sampling procedure as in Algorithm \ref{alg1.1.1} for the random
walk $\{\mathbf{S}(n)\dvtx n\geq0\}$. More precisely, since $S_i(n)$ has
Gaussian increments, we can compute explicitly that $\theta^*_i=2(\mu_i-
\zeta)/\sigma_i$ and choose $m>0$ satisfying (\ref{L1}) in order to carry
out the importance sampling procedure for the random walk $\{\mathbf{S}
(n)\dvtx n\geq0\}$. Suppose we use the importance sampling procedure and
find $
t_0$ such that $\mathbf{S}(n)\leq\mathbf{S}(t_0)$ for all $n\geq t_0$, and
hence condition (1) is satisfied for $t_0$.
About condition (2), recall that $\bar{\mathbf{B}}_{n}(\cdot)$'s are i.i.d.
linear combinations of Brownian bridges, and let $M$ be a random time, finite
almost surely, such that
\begin{equation}
M\geq\max\Bigl\{n\geq t_0\dvtx \max_{0\leq s\leq1}\bigl(
\bar{B}_{n,i}(s)-\zeta (n-t_0)\bigr)>0
\mbox{ for
some }i\Bigr\}. \label{Def_M}
\end{equation}
Observe that for $t_0$ to be an approximate coalescence time, conditions (1)
and~(2) must hold simultaneously. If for time $t_0$, for example,
condition~(1) is satisfied while condition (2) is not, we need to
continue the
testing procedure and simulation of the process for $t>t_0$. Then, however,
the random walk $\{\mathbf{S}(n)\dvtx n\geq\lceil t_0\rceil\}$ should be
conditioned on that $\mathbf{S}(n)\leq\mathbf{S}(t_0)$ for the fact that
condition (1) holds for $t_0$ reveals ``additional information'' on the random
walk for $n\geq t_0$. Therefore, such ``additional information'' or
``conditioning event'' must be incorporated and tracked when conditions
(1) and
(2) are sequentially tested. All of these conditioning events are described
and accounted for in Section~\ref{sec4.3.2}, which also includes the overall
procedure to sample $\tau_{\varepsilon}$ jointly with $\mathbf{Z}^{\varepsilon}$.
Now, let us first provide a precise description of $M$ and explain the
simulation algorithm for $M$ in Section~\ref{sec4.3.1}.
\subsubsection{Simulating $M$ and \texorpdfstring{$\{\bar{\mathbf{B}}^{\protect\varepsilon}_{n}(\cdot)\dvtx 1\leq n\leq M\}$}
{\{bar{{B}}{varepsilon}{n}(cdot):1<=n<=M\}}}\label{sec4.3.1}
Recall that $\bar{\mathbf{B}}_{n}(t)= \break A\sum_{k=1}^{\infty}\mathbf{W}
_{n}^{k}\cdot\int_{0}^{t} H_{k}(u)\,du$, where $\{W_{n}^{k}(i)\dvtx n\geq
0,k\geq
1, 1\leq i\leq d\}$ are i.i.d. standard normals. Note that
\[
\sum_{n=0}\sum_{k=1} P
\bigl(\bigl|W_{n}^{k}(i)\bigr|\geq4\sqrt{\log(n+1)}+4\sqrt {\log
k
} \bigr) \leq\sum_{n=0} \sum
_{k=1}\frac{1}{((n+1)k)^{4}} <\infty.
\]
By the Borel--Cantelli lemma, we can conclude that for each $i\in\{
1,\ldots,d\}$
there exists $M^{i}<\infty$ such that for all $(n+1)k>M^{i} $, $
|W_{n}^{k}(i)|\leq4\sqrt{\log(n+1)}+4\sqrt{\log k}$. Clearly, $\sqrt
{\log t}
=o(t)$ as $t\rightarrow\infty$, so we can select a $m_{0}$ large enough such
that for any $n>m_{0}$,
\[
(n+1)\zeta-ad\Biggl(4\sqrt{\log(n+1)}-\sum_{j=1}^{\infty}2^{-j}
\sqrt{j}\Biggr)\geq0.
\]
Note that $M^i$ can be simulated jointly with $(W_n^k(i)\dvtx n\geq0,k\geq1,
1\leq i\leq d, (n+1)k\leq M^i)$ by adapting Algorithm \ref{alg2w} in Section~\ref{SubWave} and $M^i$'s are independent of each other. Then, for any $
n>\max_{i=1}^{d}M^{i}\vee m_{0}$,
\begin{eqnarray*}
\bar{\mathbf{B}}_{n}(t) & =&A\sum_{k=1}^{\infty}
\mathbf{W}_{n}^{k}\cdot \int_{0}^{t}H_{k}(u)\,du
\\
& \leq& ad\Biggl(4\sqrt{\log(n+1)}+\sum_{j=1}^{\infty}2^{-j/2}
\sqrt{j}\Biggr)\leq (n+1)\zeta,
\end{eqnarray*}
where, $j=\lceil\log_{2}{k}\rceil$. Therefore, we can choose $
M=\max_{i}M^{i}\vee m_{0}$.
Now we introduce a variation of Algorithm \ref{alg2w} that will be used in the
procedure to simulate $M$ and $\{\bar{B}_n^{\varepsilon}(\cdot)\dvtx 1\leq
n\leq M\}$
jointly. In the following algorithm, a~sequence of ``conditioning
events'' of
the form $|W^k|\leq\beta_k$, for some given constants $\{\beta^k\dvtx \beta
^k>4
\sqrt{\log k}\}$, is in force. Let $\Phi(a)=P(|W|<a)$ for all $a>0$,
where $W
$ is a standard normal. The random number $K$ to be simulated is
defined as in~(
\ref{definiation of K}).
\renewcommand{2.1}{2\normalfont{w}$'$}
\begin{algorithm}[(Simulate $K$ jointly with $\{W^k\dvtx 1\leq k\leq K\}$
conditional on $|W^k|\leq\beta^k$ for all $k\geq1$)]\label{alg2w'}
\textit{Step} 0: Initialize $G=K_0$ as defined in (\ref{definition of K_0}) and $S$
to be an empty array.
\textit{Step} 1: Set $U=1$, $D=0$. Simulate $V\sim \operatorname{Uniform}(0,1)$.
\textit{Step} 2: While $U>V>D$, set $G\leftarrow G+1$ and $U\leftarrow\frac{\Phi
(4
\sqrt{\log G})}{\Phi(\beta^k)}\times U$ and $D\leftarrow$
$(1-G^{-7})\times
U$.
\textit{Step} 3: If $V\geq U$, add $G$ to the end of $S$, that is, $S=[S,G]$,
and return
to step~1.
\textit{Step} 4: If $V\leq D$, $K=\max(S,K_0)$.
\textit{Step} 5: For every $k\in S$, generate $W^{k}$ according to the conditional
distribution of $Z$ given $\{4\sqrt{\log{k}}<|W|\leq\beta^k\}$; for
other $
1\leq k\leq K$, generate $W^{k}$ according to the conditional distribution
of $W$ given $\{|W|\leq4\sqrt{\log{k}}\}$.
\end{algorithm}
The main difference between Algorithm \ref{alg2w'} and the original Algorithm
\ref{alg2w} is
that $U$ and $V$ are now computed from the conditional probability; however,
the relations $U>V>D$ and $U-D\to0$ still hold, and hence Algorithm \ref{alg2w'}
is valid. Based on this, we can now give the main procedure to simulate $M$
and $\{\bar{B}_n^{\varepsilon}(\cdot)\dvtx 1\leq n\leq M\}$ jointly:
\renewcommand{2.1}{2\normalfont{m}}
\begin{algorithm}[(Simulating of $M$ and $\{\bar{\mathbf{B}}
_{n}^{\varepsilon}(\cdot)\dvtx 1\leq n\leq M\}$ jointly)]\label{alg2m}
\begin{longlist}[(1)]
\item[(1)] For each index $i$, simulate $M^{i}$ and $(W_{n}^{k}(i)\dvtx n\geq0,
k\geq1, nk<M)$. Compute $M=\max_{i} M^{i}\vee m_{0}$. (As discussed
earlier, $
M^i$'s are simulated by adapting Algorithm \ref{alg2w}.)
\item[(2)] For each $0\leq n\leq M$ and each index $i$, $\{
W_{n}^{k}(i)\dvtx k<
M^i/n\}$ are already given in step 1. For $k\geq M^i/n$, use
Algorithm \ref{alg2w'}
to simulate $K_n^i$ jointly with $\{W_n^k(i)\dvtx M^i/n\leq k\leq K\}$
conditional on $|W_n^k(i)|\leq4(\sqrt{\log(n+1)}+\sqrt{\log
k})\triangleq
\beta^k>4\sqrt{\log k}$.
\item[(3)] For any $0\leq n\leq M$, compute and output
\begin{equation}
\bar{B}_{n,i}^{\varepsilon}(t)= \sum_{i=1}^{d}
A_{ij}\Biggl(\sum_{k=1}^{K_n^{i}}
W_{n}^{k}(i)\int_{0}^{t}
H_{k}(u)\,du\Biggr). \label{bridge}
\end{equation}
\end{longlist}
\end{algorithm}
In step 1 of Algorithm \ref{alg2m}, we can use a similar procedure as in
Algorithm~\ref{alg2w'}
to impose conditioning events of form $|W_n^k(i)|\leq\beta_n^k(i)$ while
simulating $M_i$'s jointly with $W_n^k(i)$'s. In this way, we derive an
algorithm that is able to simulate $M$ jointly with $\{\bar{\mathbf{B}}
_{n}^{\varepsilon}(\cdot)\dvtx 1\leq n\leq M\}$ conditional on $|W_n^k(i)|\leq
\beta_n^k(i)$ for all $n\geq0$, $k\geq1$ and $1\leq i\leq d$ for any given
sequence of $\{\beta_n^k(i)\}$ such that $\beta_n^k(i)>4(\sqrt{\log
(n+1)}+
\sqrt{\log k})$.
\renewcommand{2.1}{2\normalfont{m}$'$}
\begin{algorithm}[(Simulating of $M$ and $\{\bar{\mathbf{B}}
_{n}^{\varepsilon}(\cdot)\dvtx 1\leq n\leq M\}$ jointly conditional on $
|W_n^k(i)|\leq\beta_n^k(i)$ for all $n\geq0$, $k\geq1$ and $1\leq
i\leq d$)]\label{alg2m'}
\begin{longlist}[(1)]
\item[(1)] For each index $i$, simulate $M_{i}$ and $(W_{n}^{k}(i)\dvtx n\geq0,
k\geq1, nk<M)$ conditional on $|W_n^k(i)|\leq\beta_n^k(i)$ using a similar
procedure as in Algorithm \ref{alg2w'}. Compute $M=\max_{i} M^{i}\vee m_{0}$.
\item[(2)] For each $0\leq n\leq M$ and each index $i$, $\{
W_{n}^{k}(i)\dvtx k<
M^i/n\}$ are already given in step 1. For $k\geq M^i/n$, use Algorithm \ref{alg2w'}
to simulate $K_n^i$ jointly with $\{W_n^k(i)\dvtx M^i/n\leq k\leq K\}$
conditional on $|W_n^k(i)|\leq4(\sqrt{\log(n+1)}+\sqrt{\log k})$. [Note
that $\beta_n^k(i)>4(\sqrt{\log(n+1)}+\sqrt{\log k})>4\sqrt{\log k}$, and
hence this step is well defined.]
\item[(3)] For any $0\leq n\leq M$, compute and output
\[
\bar{B}_{n,i}^{\varepsilon}(t)= \sum_{i=1}^{d}
A_{ij}\Biggl(\sum_{k=1}^{K_n^{i}}
W_{n}^{k}(i)\int_{0}^{t}
H_{k}(u)\,du\Biggr).
\]
\end{longlist}
\end{algorithm}
Algorithm \ref{alg2m'} will be used in the next section in order to keep track of
``conditioning events'' corresponding to condition (2).
\subsubsection{Keeping track of the conditioning events}\label{sec4.3.2}
As we have discussed just prior to the beginning of Section~\ref{sec4.3.1}, we need
to keep track of several conditioning events introduced by conditions
(1) and
(2). First, let us explain how to deal with the conditioning event
corresponding to condition (1). These conditioning events involve only the
random walk $\mathbf{S}(\cdot)$. Now we split $\mathbf{S}(\cdot)$ according
to the sequences of $\{\Gamma_{l}\dvtx l\geq1\}$ and $\{\Delta_{l}\dvtx l\geq1\}$ of
random times defined as follows:
\begin{longlist}[(1)]
\item[(1)] Set $\Delta_{1}=\min\{n\dvtx S_{i}(n)\leq-2m\mbox{ for every }i\}$.
\item[(2)] Define $\Gamma_{l}=\min\{n\geq\Delta
_{l}\dvtx S_{i}(n)>S_{i}(\Delta_{l})+m
\mbox{ for some }i\}$.
\item[(3)] Put $\Delta_{l+1}=\min\{n\geq\Gamma_{l} I(\Gamma_{l}<\infty
)\vee
\Delta_{l}\dvtx S_{i}(n)<S_{i}( \Delta_{l})-2m\mbox{ for every }i\}$.
\end{longlist}
\begin{figure}
\caption{Illustration for the random times $\{\Delta_n\}
\label{fig1}
\end{figure}
Figure~\ref{fig1} illustrates a sample path of the random walk with the sequence of
random times $\{\Gamma_{l}\dvtx l\geq1\}$ and $\{\Delta_{l}\dvtx l\geq1\}$ in one
dimension. The message is that the joint simulation of $\{\mathbf{S}
(n)\dvtx n\geq0\}$ with $\{\Gamma_{l}\dvtx l\geq1\}$ and $\{\Delta_{l}\dvtx l\geq1\}$
allows us to keep track of the process $\{\max_{m\geq n}\mathbf
{S}(m)\dvtx n\geq
0\}$, which includes the ``additional information'' introduced by condition
(1). The main steps in the simulation of $\{\mathbf{S}(n)\dvtx n\geq0\}$ jointly
with $\{\Gamma_l\dvtx l\geq1\}$ and $\{\Delta_l\dvtx l\geq1\}$ are explained in
Lemma~2 through Lemma~4 in \citet{BlanchetSigman2011}. The approach of
\citet{BlanchetSigman2011}, which works in one dimension, could be
modified for multidimensional cases using the change-of-measure as described
in Section~\ref{sec2.3.1}.
Regarding the verification of condition (2) involving $M$ and the
Brownian bridges, as per the discussion in Section~\ref{sec4.3.1}, we just need to
keep track of certain deterministic $\beta_n^k(i)$ for each
$|W_{n}^{k}(i)|$, in order to condition on the events of the form
$|W_n^k(i)|\leq\beta_n^k(i)
$. These events are related to the sequential construction of the
random variable $M$ when testing condition (2) as described in
Section~\ref{sec4.3.1}. Now, we
can write down the integrated version of our algorithm for sampling $
\tau_\varepsilon$ and $\{\mathbf{Z}^{\varepsilon}(t)\dvtx 0\leq t\leq\tau_{\varepsilon
}\}$
jointly.
\renewcommand{2.1}{2.1}
\begin{algorithm}[(Simulating $\tau_{\varepsilon}$ and $\{\mathbf{Z}
^{\varepsilon}(t)\dvtx 0\leq t\leq\tau_{\varepsilon}\}$)]\label{alg2.1}
The output of this algorithm is $\{\mathbf{Z}^{\varepsilon}(t)\dvtx 0\leq
t\leq\tau_{\varepsilon}\}$, and the approximation coalescence time $
\tau_{\varepsilon}$.
\begin{longlist}[(1)]
\item[(1)] Set $\beta_{n}^{k}(i)=\infty$ for all $n\geq1$, $k\geq1$ and
$1\leq
i\leq d $. Set $L=0$ and $\tau_{\varepsilon}=0$.
\item[(2)] Simulate $\mathbf{S}(n)$ until $\Delta_{l}$, where $l=\min\{
j\dvtx \Gamma
_{j}=\infty,\Delta_{j}>\tau_{\varepsilon}\}$. Compute $\mathbf{Z}^{\varepsilon
}(n)=
\mathbf{S}(n)-n\bolds{\zeta}$.
\item[(3)] For each $n\in[\tau_{\varepsilon},\Delta_{l}]\cap\mathbb
{Z}_+$ and
each index $1\leq i\leq d$, compute the i.i.d. bridges $\{\bar{\mathbf
{B}
}^\varepsilon_n(\cdot)\}$ using (\ref{bridge}), in which $K_n^i$ is\vspace*{1pt} jointly
simulated with $(W_n^k(i)\dvtx 1\leq k\leq K_n^i )$ conditional on that $
|W_n^k(i)|\leq\beta_n^k(i)$ for all $k\geq1$ using Algorithm \ref{alg2w'}.
Given $
\bar{\mathbf{B}}^\varepsilon_n(\cdot)$ and $\mathbf{S}(n)$ for $n\in[
\tau_{\varepsilon},\Delta_{l}]\cap\mathbb{Z}_+$, the process $\mathbf
{Z}
^{\varepsilon}(t)$ for $t\in[\tau_{\varepsilon},\Delta_{l}]$ can be directly
computed. If there exists some $t\geq\Gamma_{l-1}$ such that for all
$t\leq
s\leq\Delta_{l}$, $Z_{i}^{\varepsilon}(t)\geq Z^{\varepsilon}_{i}(s)-2\varepsilon$
and $Z^{\varepsilon}_{i}(t)\geq Z^{\varepsilon}_{i}(\Delta_{l})+m-2\varepsilon
$, set
$\tau_{\varepsilon}\leftarrow t$, and go to step 4. Otherwise, set $
\tau_{\varepsilon}\leftarrow\Delta_{l}$ and return to step 2.
\item[(4)] Use Algorithm \ref{alg2m'} to simulate $M$ jointly with $(\bar{\mathbf
{B}}
^\varepsilon_{\tau_{\varepsilon}+n}(\cdot)\dvtx 0\leq n\leq M)$ conditional on $
|W_{\tau_{\varepsilon}+n}^{k}(i)|\leq\beta_{\tau_\varepsilon+n}^{k}(i)$ for
all $
n\geq0$, $k\geq1$ and $1\leq i\leq d$. Update $\beta_{\tau_{
\varepsilon}+n}^{k}(i)\leftarrow4\sqrt{\log{(n+1)}}+4\sqrt{\log{k}}$ for
all $
n\cdot k\geq M^{i}$. Keep simulating $\mathbf{S}(n)$ until $n=\Delta_l+M$,
and compute $\{\mathbf{Z}^{\varepsilon}(t)\dvtx t\in[\Delta_l,\Delta_l+M]\}$. If
there exist some $t$ and $i$ such that $Z_{i}^{\varepsilon}(t)>Z^{\varepsilon
}_i(
\tau_{\varepsilon})+\varepsilon$, set $\tau_{\varepsilon} \leftarrow t$ and
return to
step 2.
\item[(5)] Otherwise, stop and output $\tau_{\varepsilon}$ as the approximation
coalescence time along with $(\mathbf{Z}^{\varepsilon}(t)\dvtx 0\leq
t\leq\tau_{\varepsilon})$.
\end{longlist}
\end{algorithm}
\subsection{Computational complexity}
\label{SubSecCCRBM}
In this part, we will discuss the complexity of our algorithm when $d$ and
the other parameters $\bolds{\mu}$ and $A$ are fixed but send the precision
parameter $\varepsilon$ to 0. Denote the total number of random variables
needed by $N(\varepsilon)$ when the precision parameter for the algorithm
is $
\varepsilon$.
According to Assumption \ref{assD}, the input process $\mathbf{Z}(t)$ equals $-
\bolds{\mu}t+A\mathbf{B}(t)$ with $\mu_{i}>\delta_0>0$. Let $\max_{i,j}
|A_{ij}|=a$. The following result shows that our algorithm's running
time is
polynomial in $1/\varepsilon$:
\begin{theorem}\label{th4}
\label{ThmMain2}Under Assumption \textup{\ref{assD}},
\[
E\bigl[N(\varepsilon)\bigr]=O\biggl(\varepsilon^{-a_C-2}\log\biggl(
\frac{1}{\varepsilon}\biggr)\biggr)\qquad\mbox{as }
\varepsilon\rightarrow0,
\]
where $a_C$ is a computable constant depending only on $A$.
\end{theorem}
The random variables we need to simulate in the algorithm can be divided
into two parts: first, the random variables used to construct the discrete
random walk $\mathbf{Z}(n)$ for $n\leq T$ and second, the conditional
normals used to bridging between $\mathbf{Z}(n-1)$ and $\mathbf{Z}(n)$.
Since $1(|W|>\eta)$ and $1(|W|\leq\beta)$ are negatively correlated, it
follows that
\[
P\bigl(|W|>\eta| |W|\leq\beta\bigr)\leq P\bigl(|W|>\eta\bigr).
\]
Therefore, the expected number of conditional Gaussian random variables used
for Brownian bridges between $\mathbf{Z}(n-1)$ and $\mathbf{Z}(n)$ is
smaller than the expected number that we would obtain if we use standard
Gaussian random variables instead in steps~3 and 4 in Algorithm
\ref{alg2.1}. Let
$K=\max\{k\dvtx |W_{k}|>\eta_{k}\}\vee K_{0}$ as defined in (\ref
{definiation of
K}). As discussed above, the expected number of truncated Gaussian random
variables needed for each bridge $\bar{B}^{\varepsilon}_{n,i}(\cdot)$ is
bounded by $E[K]$.
Therefore,
\[
E\bigl[N(\varepsilon)\bigr]\leq\bigl(d E[K]+1\bigr) \bigl(E[T]+1\bigr).
\]
To prove Theorem~\ref{th2}, we first need to study $E[K]$ and $E[T]$.
\begin{proposition}\label{pr7}
\[
E[K]=O\biggl(\varepsilon^{-2}\log{ \biggl( \frac{1}{\varepsilon} \biggr) }
\biggr).
\]
\end{proposition}
\begin{pf}
Recall that $\eta_{k}=4\sqrt{\log k}$, and let $p_{k}=P(|W^{k}|>\eta
_{k})$. Then
$p_{k}=O(k^{-4})$. Therefore
\begin{eqnarray*}
E[K] & =&\sum_{n=1}^{\infty}P(K>n)\leq
K_{0}+\sum_{n=K_{0}+1}^{\infty}
\sum_{k=n}^{\infty}p_{k}
\\
& =&K_{0}+\sum_{k=K_{0}+1}^{\infty}k\cdot
p_{k}\leq K_{0}+O\biggl(\sum_{k=1}
^{\infty}k^{-3}
\biggr).
\end{eqnarray*}
The second term of the left-hand side is finite and independent of
$\varepsilon$
and $K_{0}$.
On the other side,
\[
\sum_{j=\log_{2}{K_{0}}}2^{-j/2}\sqrt{j+1}\leq
\frac{2}{\log2}(\sqrt {K_{0}
})^{-1}\biggl(
\sqrt{\log_{2}{K_{0}}}+\frac{2}{\log{2}}\biggr).
\]
Therefore, we can choose $K_{0}=O(\varepsilon^{-2}\log{(\frac{1}{\varepsilon})})$
such that $\sum_{j=\log_{2}{K_{0}}}2^{-j/2}\times\break \sqrt{j+1}<\varepsilon$.
In order to get the approximation within error at most $\varepsilon$ for
the $d$-dimensional process, according to the Cholesky decomposition as
discussed in
Section~\ref{SubWave}, we should replace $\varepsilon$ by $\frac{\varepsilon}{da}$.
Therefore,
\[
E[K]=O\biggl( \biggl( \frac{\varepsilon}{da} \biggr) ^{-2}\log{ \biggl(
\frac
{da}{\varepsilon
} \biggr) }\biggr)=O\biggl(\varepsilon^{-2}\log{ \biggl(
\frac{1}{\varepsilon} \biggr) }\biggr).
\]
\upqed\end{pf}
What remains is to estimate $E[T]$. Let $T_{a}$ be the time before
the algorithm executes step 4 in a single iteration. Using the same notation
as in Algorithm \ref{alg2.1} and a similar argument as in Section~\ref{sec2.4}, we have
\[
E[T]=\frac{E[T_{a}]+E[T_{m}|T_{m}<\infty]+E[M]}{P(T_{m}<\infty)p},
\]
where
\[
p=P\Bigl(\max_{i}Z_{i}^{\varepsilon}(t)<m+
\varepsilon,\forall 0\leq t\leq M |
\mathbf{Z}(0)=0;\mathbf{S}(n)<m
\Bigr).
\]
As $\mathbf{Z}^{\varepsilon}(t)=\mathbf{S}(n)-n\zeta\mathbf{1}+A\bar
{\mathbf{B}}_n(t-n)$
and the Brownian bridge $\bar{\mathbf{B}}_n(\cdot)$ is independent of $
\mathbf{S}(\cdot)$, it follows that
\[
p\geq P\Bigl(\max_{i}\max_{t\geq0}Z_{i}(t)<m|
\mathbf{Z}(0)=0\Bigr).
\]
Since $\mathbf{S}(1)$ is a multidimensional Gaussian random vector with
strictly negative drift, assumptions (C1) to (C3) are satisfied. Applying
Proposition~\ref{pr4}, we can get upper bounds for $E[T_{m}|T_{m}<\infty]$, $
1/P(T_{m}<\infty)$ and $1/P(\max_{i}\max_{t} Z_{i}(t)<m|\mathbf{Z}(0)=0)$,
which depend only on $d, a$ and $\delta$ and thus are independent of $
\varepsilon$. Besides, the bound for $E[M]$ can be estimated by the same method
as in Proposition~\ref{pr7} in terms of $\zeta=\delta/2$; hence such a bound is also
independent of $\varepsilon$. Therefore, we only need to estimate $E[T_{a}]$.
\begin{proposition}
\label{PropCorThmMain2}$E[T_{a}]=O(\varepsilon^{-a_{C}})$ as $\varepsilon
\rightarrow0$. Here $a_{C}$ only depends on the matrix $A$. Moreover,
in the
special cases where $A_{ij}\geq0$, $a_{C}=d$.
\end{proposition}
\begin{pf}
Recall that $\mathbf{Z}(t)=-\bolds{\mu}t+A\mathbf{B}(t)$ and $\mu
_i>\delta=2\zeta>0$ as given in Assumption \ref{assD}. We divide the path
of $\mathbf{Z}(t)$ into segments with length $2(m+\varepsilon)/\zeta$,
\[
\biggl\{ \biggl(Z\biggl(k\cdot\frac{2(m+\varepsilon)}{\zeta}+s\biggr)\dvtx 0\leq s\leq
\frac
{2(m+\varepsilon
)}{\zeta} \biggr)\dvtx k\geq0 \biggr\}.
\]
Let
\begin{eqnarray*}
&&N_{b}=\min\biggl\{k\dvtx A\mathbf{B}\biggl(k\cdot
\frac{2(m+\varepsilon)}{\zeta}+s\biggr)-A\mathbf {B}\biggl(k\cdot\frac{2(m+\varepsilon)}{\zeta}\biggr)\leq
\bolds{\varepsilon}\\
&&\hspace*{170pt}\mbox{for all } 0\leq s\leq\frac{2(m+\varepsilon
)}{\zeta} \biggr\}.
\end{eqnarray*}
By independence and stationarity of the increments of Brownian motion, $N_{b}$
is a geometric random variable with parameter
\[
p=P \biggl( A\mathbf{B}(s)\leq \bolds{\varepsilon}\mbox{ for all } 0\leq s\leq
\frac{2(m+\varepsilon
)}{\zeta} \biggr).
\]
On the other hand, since $-\mu_{i}<-2\zeta$, we have:
\begin{longlist}[(1)]
\item[(1)] $Z_{i}(N_{b}\cdot\frac{2(m+\varepsilon)}{\zeta}+s)\leq
Z_{i}(N_{b}
\cdot\frac{2(m+\varepsilon)}{\zeta})+\varepsilon$, for all $0\leq s\leq
\frac{2(m+\varepsilon)}{\zeta}$.
\item[(2)] $Z_{i}((N_{b}+1)\cdot\frac{2(m+\varepsilon)}{\zeta})\leq
Z_{i}(N_{b}
\cdot\frac{2(m+\varepsilon)}{\zeta})-m$.
\end{longlist}
Therefore, Algorithm \ref{alg2.1} should execute step 4 after at most $\frac
{2(m+\varepsilon)}{\zeta}(N_{b}+1)$ units of time in a single iteration,
\[
E[T_{a}]\leq\frac{2(m+\varepsilon)}{\zeta}E[N_{b}+1]=
\frac{2(m+\varepsilon)}
{\zeta}\biggl(1+\frac{1}{p}\biggr).
\]
From this inequality, it is now sufficient to show that $p=O(\varepsilon
^{a_{C}
})$.
Note that the set $C=\{\mathbf{y}\in\mathbb{R}^{d}\dvtx A\mathbf{y}\leq
\bolds{\varepsilon}\}$ forms a cone with vertex $A^{-1}\bolds
{\varepsilon}$ in
$\mathbb{R}^{d}$ since $A$ is of full rank under Assumption \ref{assD}. Define
$\tau_{C}=\inf\{t\geq0\dvtx \mathbf{B}(t)\notin C\}$ given $\mathbf
{B}(0)=0$, then
\[
p=P\biggl(\tau_{C}>\frac{2(m+\varepsilon)}{\zeta}\biggr).
\]
If $d=2$, it is proved by \citet{Burkholder1977} that $a_{C}=\frac{\pi
}{\theta}$ where $\theta\in{}[0,\pi)$ is the angle formed by the column
vectors of $A^{-1}$. Therefore, we can compute explicitly that
\[
\theta=\arccos \biggl(-\frac{A_{11}A_{21}+A_{12}A_{22}}{\sqrt
{(A_{11}^{2}+A_{12}^{2})(A_{21}
^{2}+A_{22}^{2})}} \biggr),
\]
which only depends on $A$.
On the other hand, if $d\geq3$, applying the results on exit times for
Brownian motions given by Corollary~1.3 in \citet{DeBlassie},
\[
P\biggl(\tau_{C}>\frac{2(m+\varepsilon)}{\zeta}\biggr)\sim u\cdot\bigl\Vert
A^{-1}
\bolds{\varepsilon}\bigr\Vert^{a_{C}}
\]
as $\varepsilon\rightarrow0$. Here $\Vert\cdot\Vert$ represent the
Euclidian norm,
and $u$ is some constant independent of $\varepsilon$. The rate $a_{C}$ is
determined by the principal eigenvalue of the Laplace--Beltrami
operator on
$(\mathbf{S}^{d-1}\cap C)$, where $\mathbf{S}^{d-1}$ is a unit sphere centered
at the vertex of $C$, namely $A^{-1}\varepsilon$. The principal eigenvalue only
depends on the geometric features of $C$, and it is independent of
$\varepsilon$;
hence so is $a_{C}$. Since $A$ is given, we have
\[
P\biggl(\tau_{C}>\frac{2(m+\varepsilon)}{\zeta}\biggr)=O\bigl(
\varepsilon^{a_{C}}\bigr)\qquad\mbox{as }\varepsilon\rightarrow0.
\]
Computing $a_{C}$ for $d\geq3$ is not straightforward in general. However,
when $A_{ij}\geq0$, we can estimate $a_{C}$ from first principles.
Indeed, if
$A_{ij}\geq0$ and we let $a=\max A_{ij}$, we have that
\[
C=\bigl\{\mathbf{y}\in\mathbb{R}^{d}\dvtx A\mathbf{y}\leq\bolds{
\varepsilon}
\bigr\}\subset\biggl\{\mathbf{y}\in\mathbb{R}^{d}
\dvtx y_{i}\leq\frac{\varepsilon}{ad}\biggr\}.
\]
As the coordinates of $\mathbf{B}(t)$ are independent,
\[
p\geq P \biggl( \max_{0\leq t\leq{2(m+\varepsilon)}/{\zeta}}B(t)\leq \frac{\varepsilon}{ad} \biggr)
^{d},
\]
where $B(\cdot)$ is a standard Brownian motion on real line.
Applying the reflection principle, we have
\begin{eqnarray*}
&&P \biggl( \max_{0\leq t\leq{2(m+\varepsilon)}/{\zeta} }B(t)\leq\frac{\varepsilon
}{ad} \biggr) \\
&&\qquad=\int
_{-{\varepsilon}/{(ad)}}^{{\varepsilon}/{(ad)}}\frac{1}
{\sqrt{2\pi({2(m+\varepsilon)}/{\zeta})}}\exp{ \biggl( -
\frac{x^{2}}
{2({2(m+\varepsilon)}/{\zeta})} \biggr) }\\
&&\qquad=O(\varepsilon).
\end{eqnarray*}
As a result, $p=O(\varepsilon^{d})$ when the correlations are all nonnegative.
\end{pf}
Given these propositions, we can now prove the main result in this part.
\begin{pf*}{Proof of Theorem~\ref{ThmMain2}}
As we have discussed,
\[
E\bigl[N(\varepsilon)\bigr]\leq\bigl(dE[K]+1\bigr) \bigl(E[\tau_{\varepsilon}]+1
\bigr).
\]
First, by Proposition~\ref{pr7},
$
E[K]=O(\varepsilon^{-2}\log{(\frac{1}{\varepsilon})})$.
Besides, as discussed above,
\[
E[T]\leq\frac{E[T_{a}]+E[T_{m}|T_{m}<\infty]+E[M]}
{P(T_{m}<\infty)P(\max_{i}\max_{t\geq0} Z_{i}(t)<m|\mathbf{Z}(0)=0)}.
\]
According to Proposition~\ref{PropCorThmMain2}, $E[T_{a}]=O(\varepsilon^{-a_{C}})$, and
$a_{C}$ is a
constant when $A$ is fixed. In the end, as we have discussed,
$E[T_{m}|T_{m}<\infty]$,
$P(T_{m}<\infty)$, $P(\max_{i}\max_{t} Z_{i}(t)<m|\mathbf{Z}(0)=0)$ and $E[M]$
are independent of $\varepsilon$. Therefore,
\[
E[T]=O\bigl(\varepsilon^{-a_{C}}\bigr).
\]
In sum, we have
\[
E\bigl[N(\varepsilon)\bigr]=O\biggl(\varepsilon^{-a_{C}-2}\log{ \biggl(
\frac{1}{\varepsilon} \biggr) }\biggr).
\]
\upqed\end{pf*}
\section{Numerical results}
\label{SectionNumerics}
We first implemented Algorithm \ref{alg1} in order to generate exact samples
from the
steady-state distribution of stochastic fluid networks, and then we implemented
Algorithm \ref{alg2}. Our implementations were performed in Matlab. In all the
experiments we simulated 10,000 independent replications, and we
displayed our
estimates with a margin of error obtained using a 95\% confidence interval
based on the central limit theorem.
For the case of stochastic fluid networks, we considered a 10-station system
in tandem. So, $Q_{i,i+1}=1$ for $i=1,2,\ldots,9$ and $Q_{10,j}=0$ for
all $
j=1,\ldots,10$. We assume the arrival rate $\lambda=1$ and the job
sizes are
exponentially distributed with unit mean. The service rates $ ( \mu
_{1},\ldots,\mu_{10} ) ^{T}$ are given by $
(1.55,1.5,1.45,1.4,1.35,1.3,1.25,1.2,1.15,1.1)$. We are interested in
computing the steady-state mean and the second moment of the workload at
each station (i.e., $E[Y_{i} ( \infty ) ]$ and $E[Y_{i} (
\infty ) ^{2}]$ for $i=1,2,\ldots,10$). For a network of this
type, it
turns out that the true values of the quantities we are interested in
can be
computed from the corresponding Laplace transforms as given in
\citet{Debickietal2007}.
Both the simulation results and the true values are reported in
Table~\ref{tab1}. The
procedure took a few minutes (less than 5) on a desktop, which is quite a
reasonable time.
\begin{table}
\caption{Unbiased estimates of $E[Y_{i} ( \infty ) ]$ and $
E[Y_{i}^{2} ( \infty ) ]$ for a network with ten stations in
tandem}\label{tab1}
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lcd{1.4}cd{2.4}@{}}
\hline
& \multicolumn{2}{c}{$\bolds{E[Y_{i} ( \infty ) ]}$} & \multicolumn{2}{c@{}}{$\bolds{E[Y_{i}^{2} ( \infty )
]}$} \\[-4pt]
& \multicolumn{2}{c}{\hrulefill} & \multicolumn{2}{c@{}}{\hrulefill} \\
\textbf{Station} & \textbf{Simulation result} & \multicolumn{1}{c}{\textbf{True value}} &
\textbf{Simulation result} &\multicolumn{1}{c@{}}{\textbf{True
value}}\\
\hline
\phantom{0}1 & 1.7919${}\pm{}$0.0521& 1.8182 & 10.2755${}\pm{}$0.5289& 10.2479 \\
\phantom{0}2 & 0.1761${}\pm{}$0.0068& 0.1818 & 0.1511${}\pm{}$0.0170& 0.1642 \\
\phantom{0}3 & 0.2171${}\pm{}$0.0083& 0.2222 & 0.2242${}\pm{}$0.0224& 0.2382 \\
\phantom{0}4 & 0.2706${}\pm{}$0.0102& 0.2778 & 0.3462${}\pm{}$0.0339& 0.3610 \\
\phantom{0}5 & 0.3516${}\pm{}$0.0131& 0.3571 & 0.5717${}\pm{}$0.0590& 0.5778 \\
\phantom{0}6 & 0.4737${}\pm{}$0.0171& 0.4762 & 0.9840${}\pm{}$0.0871& 0.9921\\
\phantom{0}7 & 0.6632${}\pm{}$0.0233& 0.6667 & 1.8472${}\pm{}$0.1513& 1.8715\\
\phantom{0}8 & 1.0033${}\pm{}$0.0345& 1.0000 & 4.1004${}\pm{}$0.3377& 4.0300\\
\phantom{0}9 & 1.6497${}\pm{}$0.0542& 1.6667 & 10.3734${}\pm{}$0.7823& 10.6065\\
10 & 3.3200${}\pm{}$0.1040& 3.3333 & 39.2015${}\pm{}$2.9950& 39.3631\\
\hline
\end{tabular*}
\end{table}
We then implemented a two-dimensional RBM example. Let us denote the RBM
by $
\mathbf{Y}(t)$. The parameters to specify $\mathbf{Y}$ are as follows: drift
vector $\mu=(-1,-1)$, covariance matrix $\Sigma=[1,0;0,1]$ and reflection
matrix $R=[1,-0.2;-0.2,1]$. For this so-call symmetric RBM, one could
compute in close that $E[Y_{1}(\infty)]=E[Y_{2}(\infty)]=5/12\simeq
0.4167$; see, for instance, \citet{DaiHarrison1992}. The output of our
simulation
algorithm is reported in Table~\ref{tab2}.
\begin{table}[b]
\caption{Estimates of $E[Y_{i} ( \infty ) ]$ for a 2-dimensional
RBM with precision $\protect\varepsilon=0.01$}\label{tab2}
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lcc@{}}
\hline
& \textbf{Simulation result} & \textbf{True value}\\
\hline
$E[Y_1(\infty)]$ & 0.4164${}\pm{}$0.0137 & 0.4167\\
$E[Y_2(\infty)]$ & 0.4201${}\pm{}$0.0131 & 0.4167\\
\hline
\end{tabular*}
\end{table}
Our implementations here are given with the objective of verifying
empirically the validity of the algorithms proposed. We stress that a direct
implementation of Algorithm \ref{alg2}, although capable of ultimately producing
unbiased estimations of the expectations of RBM, might not be
practical. The
simulations took substantially more time to be produced than those reported
for the stochastic fluid models. This can be explained by the
dependence on $
\varepsilon$ in Theorem~\ref{ThmMain2}. The bottleneck in the
algorithm is finding a time at which both stations are close to
$\varepsilon
$. An efficient algorithm based on suitably trading a strongly controlled
bias with variance can be used to produce faster running times; we
expect to
report this algorithm in the future.
\section*{Acknowledgments}
The authors thank Offer Kella for pointing out Lem\-ma~\ref{LmK_W_Comp} and
thank Amy Biemiller for her editorial assistance. The authors thank the
Editor and referees for their useful comments and suggestions.
\printaddresses
\end{document} |
\begin{document}
\title{Memory-based control with recurrent neural networks}
\begin{abstract}
Partially observed control problems are a challenging aspect of
reinforcement learning. We extend two related, model-free algorithms for
continuous control -- deterministic policy gradient and stochastic value gradient
-- to solve partially observed domains using recurrent neural networks trained with
backpropagation through time.
We demonstrate that this approach, coupled with long-short term memory is able
to solve a variety of physical control problems exhibiting an
assortment of memory requirements. These include the short-term
integration of information from noisy sensors and the identification of system
parameters, as well as long-term memory problems that require preserving
information over many time steps. We also demonstrate success on a
combined exploration and memory problem in the form of a simplified version of
the well-known Morris water maze task. Finally, we show that our approach can
deal with high-dimensional observations by learning directly from
pixels.
We find that recurrent deterministic and stochastic policies are able to
learn similarly good solutions to these tasks, including the water maze where
the agent must learn effective search strategies.
\end{abstract}
\section{Introduction}
The use of neural networks for solving continuous control problems has a long
tradition. Several recent papers successfully apply model-free,
direct policy search methods to the problem of learning neural network control
policies for challenging continuous domains with many degrees of freedoms
\cite{balduzzi2015compatible, heess2015svg,
lillicrap2015continuous,schulman2015trust,schulman2015advantage,levine2015end}.
However, all of this work assumes fully observed state.
Many real world control problems are
partially observed. Partial observability can arise from different sources
including the need to remember information that is only temporarily available
such as a way sign in a navigation task, sensor limitations or noise, unobserved
variations of the plant under control (system identification), or state-aliasing
due to function approximation. Partial observability also arises naturally in
many tasks that involve control from vision: a static image of a dynamic scene
provides no information about velocities, occlusions occur as a consequence of
the three-dimensional nature of the world, and most vision sensors are
bandwidth-limited and only have a restricted field-of-view.
Resolution of partial observability is non-trivial. Existing methods
can roughly be divided into two broad classes:
On the one hand there are approaches that
explicitly maintain a belief state that corresponds to the
distribution over the world state given the observations so far. This approach
has two major disadvantages: The first is the need for a model, and the second
is the computational cost that is typically associated with the update of the
belief state \cite{kaelbling1998planning, shani2013survey}.
On the other hand there are model free approaches that learn to form
memories based on interactions with the world. This is challenging
since it is a priori unknown which features of the observations will be
relevant later, and associations may have to be formed over many steps.
For this reason, most model free approaches tend to assume the fully-observed
case.
In practice, partial observability is often solved by hand-crafting
a solution such as providing multiple-frames at each timestep
to allow velocity estimation
\cite{mnih2015human, lillicrap2015continuous}.
In this work we investigate a natural extension of
two recent, closely related policy gradient algorithms
for learning continuous-action policies to handle partially observed problems.
We primarily consider the Deterministic Policy Gradient algorithm (DPG) \cite{silver2014deterministic}, which
is an off-policy policy gradient algorithm that has recently produced promising
results on a broad range of difficult, high-dimensional continuous control
problems, including direct control from pixels \cite{lillicrap2015continuous}.
DPG is an actor-critic algorithm that uses a learned approximation of the action-value (Q) function to obtain
approximate action-value gradients. These are then used
to update a deterministic policy via the chain-rule. We also consider DPG's
stochastic counterpart, SVG(0) (\cite{heess2015svg}; SVG stands for ``Stochastic
Value Gradients'') which similarly updates the policy via backpropagation of
action-value gradients from an action-value critic but learns a
stochastic policy.
We modify both algorithms to use
recurrent networks trained with backpropagation through time.
We demonstrate that the resulting algorithms, Recurrent
DPG (RDPG) and Recurrent SVG(0) (RSVG(0)), can be applied to a number of
partially observed physical control problems with diverse memory
requirements. These problems include: short-term integration of sensor
information to estimate the system state (pendulum and cartpole
swing-up tasks without velocity
information); system identification (cart pole swing-up with
variable and unknown pole-length); long-term memory (a robot arm
that needs to reach out and grab a payload to move it to the position the arm
started from); as well as a simplified version of the water maze task which
requires the agent to learn an exploration strategy to find a hidden platform
and then remember the platform's position in order to return to it subsequently.
We also demonstrate successful control directly from pixels.
Our results suggest that actor-critic algorithms that rely on bootstrapping for
estimating the value function can be a viable option for learning control
policies in partially observed domains. We further find that, at least in the
setup considered here, there is little performance difference between stochastic
and deterministic policies, despite the former being typically presumed to be
preferable in partially observed domains.
\section{Background}
We model our environment as discrete-time,
partially-observed Markov Decision process (POMDP). A POMDP is described
a set of environment states $\mathcal{S}$ and a set of actions $\mathcal{A}$,
an initial state distribution $p_0(s_0)$,
a transition function $p(s_{t + 1} | s_{t}, a_{t})$ and reward function
$r(s_t, a_t)$. This underlying MDP is partially observed when the agent
is unable to observe the state $s_t$ directly and instead receives observations
from the set $\mathcal{O}$ which are conditioned on the underlying state
$p(o_t | s_{t} )$.
The agent only indirectly observes the underlying state of the MDP
through the observations. An optimal agent may, in principle, require
access to the entire history
$h_t = (o_1, a_1, o_2, a_2, ... a_{t-1}, o_t)$.
The goal of the agent is thus to learn a policy $\pi(h_t)$ which maps from the
history to a distribution over actions $P(\mathcal{A})$ which
maximizes the expected discounted reward (below we consider both stochastic and
deterministic policies).
For stochastic policies we want to maximise
\begin{equation}
J = \E_{\tau}\left[ \sum_{t=1}^\infty \gamma^{t - 1} r(s_t, a_t) \right],
\end{equation}
where the trajectories $\tau = (s_1, o_1, a_1, s_2, \dots )$ are drawn from the trajectory distribution induced by the policy $\pi$: $p(s_1)p(o_1 | s_1) \pi(a_1|h_1) p(s_2 | s_1,a_1) p(o_2 | s_2 )\pi(a_2 | h_2) \dots$ and where $h_t$ is defined as above.
For deterministic policies we replace $\pi$ with a deterministic function
$\mu$ which maps directly from states $\mathcal{S}$ to actions $\mathcal{A}$ and
we replace $a_t\sim \pi(\cdot | h_t) $
with $a_t = \mu(h_t)$.
In the algorithms below we make use of the action-value function $Q^\pi$.
For a fully observed MDP, when we have access to $s$, the action-value function is defined as the expected future discounted
reward when in state $s_t$ the agent takes action $a_t$ and thereafter follows policy $\pi$.
Since we are interested in the partially observed case where the agent does not
have access to $s$ we instead define $Q^\pi$ in terms of $h$:
\begin{equation}
Q^\pi(h_t, a_t) = \E_{s_t | h_t} \left [ r_t(s_t,a_t) \right ]+ \E_{ \tau_{>t} |
h_t, a_t } \left [ \sum_{i=1}^\infty \gamma^{i} r(s_{t + i}, a_{t +i} ) \right]
\end{equation}
where $\tau_{> t} = (s_{t+1}, o_{t+1}, a_{t+1} \dots)$ is the
future trajectory and the two expectations are taken with respect to the
conditionals $p(s_t | h_t)$ and $p(\tau_{> t} | h_t, a_t)$ of the trajectory
distribution associated with $\pi$. Note that this equivalent to defining $Q^\pi$ in terms of the
belief state since $h$ is a sufficient statistic.
Obviously, for most POMDPs of interest, it is not tractable to condition on the
entire sequence of observations. A central challenge is to learn how to summarize
the past in a scalable way.
\section{Algorithms}
\subsection{Recurrent DPG} We extend the Deterministic Policy Gradient (DPG)
algorithm for MDPs introduced in \cite{silver2014deterministic} to deal with
partially observed domains and pixels.
The core idea of the DPG algorithm for the \emph{fully observed} case is that for a deterministic policy
$\mu^\theta$ with parameters $\theta$, and given access to the true action-value
function associated with the current policy $Q^\mu$, the policy can be updated
by backpropagation:
\begin{equation}
\partialAt{J(\theta)}{\theta} =
\expectationE{ \left. \partialAt{ Q^\mu(s,a)}{a} \right \rvert_{a = \mu^\theta(s)} \partialAt{\mu^\theta(s)}{\theta} }
{s \sim \rho^\mu},
\label{eq:DPGpolicyUpdate}
\end{equation}
where the expectation is taken with respect to the (discounted) state visitation distribution $\rho^\mu$ induced by the
current policy $\mu^\theta$ \cite{silver2014deterministic}. Similar ideas had previously been exploited in
NFQCA \cite{hafner2011reinforcement} and in the ADP \cite{lewis2009reinforcement}
community.
In practice the exact action-value function $Q^\mu$ is replaced by an
approximate (critic) $Q^\omega$ with parameters $\omega$ that is
differentiable in $a$ and which can be learned e.g.\ with Q-learning.
In order to ensure the applicability of our approach to
large observation spaces (e.g.\ from pixels), we use
neural networks for all function approximators. These networks, with
convolutional layers have proven effective at many sensory processing tasks
\cite{krizhevsky2012imagenet, razavian2014cnn},
and been demonstrated to be effective for scaling reinforcement learning to
large state spaces \cite{lillicrap2015continuous,mnih2015human}.
\cite{lillicrap2015continuous} proposed modifications to DPG necessary in order
to learn effectively with deep neural networks which we make use of here (cf.\
sections \ref{sec:Algorithms:ER}, \ref{sec:Algorithms:Target}).
Under partial observability the optimal policy and the associated action-value
function are both functions of the entire preceding observation-action history
$h_t$. The primary change we introduce is
the use of recurrent neural networks, rather than feedforward networks,
in order to allow the network to learn to preserve (limited) information about
the past which is needed in order to solve the POMDP. Thus, writing $\mu(h)$ and
$Q(h,a)$ rather than $\mu(s)$ and $Q(s,a)$ we obtain the following policy
update:
\begin{equation}
\partialAt{J(\theta)}{\theta} =
\expectationE{
\sum_t \gamma^{t-1} \left. \partialAt{ Q^\mu(h_t,a)}{a} \right \rvert_{a = \mu^\theta(h_t)} \partialAt{\mu^\theta(h_t)}{\theta}
}
{\tau},
\label{eq:RDPGpolicyUpdate}
\end{equation}
where we have written the expectation now explicitly over entire trajectories $\tau = (s_1, o_1, a_1,
s_2, o_2, a_2, \dots )$ which are drawn from the trajectory distribution induced by the current policy
and $h_t = (o_1, a_1, \dots, o_{t-1}, a_{t-1}, o_t) $ is the observation-action trajectory prefix at time step $t$, both as introduced above\footnote{ A
discount factor $\gamma^t$ appears implicitly in the update which is absorbed in
the discounted state-visitation distribution in eq.\ \ref{eq:DPGpolicyUpdate}.
In practice we ignore this term as is often done in policy gradient
implementations in practice (e.g.\ \cite{thomas2014bias}). }. In
practice, as in the fully observed case, we replace $Q^\mu$ by learned
approximation $Q^\omega$ (which is also a recurrent
network with parameters $\omega$).
Thus, rather than directly conditioning on the entire observation history,
we effectively train recurrent neural networks to summarize this history in
their recurrent state using backpropagation through time (BPTT).
For long episodes or continuing tasks it is possible to use
truncated BPTT, although we do not use this here.
The full algorithm is given below (Algorithm \ref{algo:rdpg}).
RDPG is an algorithm for learning deterministic policies. As discussed in the
literature \cite{singh94learningwithout,sallans2002reinforcement}
it is possible to construct examples where deterministic
policies perform poorly under partial observability. In RDPG the policy is
conditioned on the entire history but since we are using function approximation
state aliasing may still occur, especially early in
learning. We therefore also investigate a recurrent version of the stochastic
counterpart to DPG: SVG(0) \cite{heess2015svg} (DPG can be seen as the deterministic limit of
SVG(0)). In addition to learning stochastic policies SVG(0) also admits on-policy
learning whereas DPG is inherently off policy (see below).
Similar to
DPG, SVG(0) updates the policy by backpropagation $\partial Q / \partial a$
from the action-value function, but does so for stochastic policies. This is
enabled through a ``re-parameterization'' (e.g.\ \cite{kingma2013autoencoding,rezende2014stochastic})
of the stochastic policy: The
stochastic policy is represented in terms of a fixed, independent noise source
and a parameterized deterministic function that transforms a draw from that
noise source, i.e., in our case, $a = \pi^\theta(h, \nu)$ with $\nu \sim \beta(\cdot)$ where
$\beta$ is some fixed distribution.
For instance, a Gaussian policy $\pi^\theta(a | h) = N(a | \mu^\theta(h), \sigma^2)$ can be re-parameterized as follows:
$a = \pi^\theta(h, \nu) = \mu^\theta(h) + \sigma \nu$ where $\nu \sim N(\cdot | 0,1)$. See \cite{heess2015svg} for more details.
The stochastic policy is updated as follows:
\begin{equation}
\partialAt{J(\theta)}{\theta} =
\expectationE{ \sum_t \gamma^{t-1} \left. \partialAt{ Q^{\pi^\theta}(h_t,a)}{a} \right \rvert_{a = \pi^\theta(h_t, \nu_t)} \partialAt{\pi^\theta(h_t,\nu_t)}{\theta} }{\tau, \nu},
\label{eq:SVGpolicyUpdate}
\end{equation}
with $\tau$ drawn from the trajectory distribution which is conditioned on IID draws of $\nu_t$ from $\beta$ at each time step.
The full algorithm is provided in the supplementary (Algorithm \ref{algo:rsvg0}).
\subsubsection{Off-policy learning and experience replay}
\label{sec:Algorithms:ER}
DPG is typically used in an off-policy setting
due to the fact that the policy is deterministic but exploration is
needed in order to learn the gradient of $Q$ with respect to the actions.
Furthermore, in practice, data efficiency and stability can also be greatly
improved by using experience replay (e.g.\
\cite{hafner2011reinforcement,hausknecht2015deep,lillicrap2015continuous,mnih2015human,heess2015svg})
and we use the same approach here (see Algorithms \ref{algo:rdpg},
\ref{algo:rsvg0}). Thus, during learning we store experienced trajectories in a
database and then replace the expectation in eq.\ (\ref{eq:RDPGpolicyUpdate})
with trajectories sampled from the database.
One consequence of this is a bias in the state distribution in eqs.
(\ref{eq:DPGpolicyUpdate}, \ref{eq:SVGpolicyUpdate}) which no longer corresponds
to the state distribution induced by the current policy . With function
approximation this can lead to a bias in the learned policy, although
this typically ignored in practice.
RDPG and RSVG(0) may similarly be affected;
in fact since policies (and Q) are not just a function of the state but of an entire
action-observation history (eq.\ \ref{eq:RDPGpolicyUpdate}) the bias might be
more severe.
One potential advantage of (R)SVG(0) in this context is that it allows
on-policy learning although we do not explore this possibility
here. We found that off-policy
learning with experience replay remained effective in the partially observed
case.
\subsubsection{Target networks}
\label{sec:Algorithms:Target}
A second algorithmic feature that has been found to greatly improve the
stability of neural-network based reinforcement learning algorithms that rely on
bootstrapping for learning value functions is the use of \emph{target networks}
\cite{hafner2011reinforcement,lillicrap2015continuous,mnih2015human,heess2015svg}:
The algorithm maintains two copies of the value function $Q$ and of the policy
$\pi$ each, with parameters $\theta$ and $\theta'$, and $\omega$ and $\omega'$
respectively. $\theta$ and $\omega$ are the parameters that are being updated by
the algorithm; $\theta'$ and $\omega'$ track them with some delay and are used
to compute the ``targets values'' for the $Q$ function update.
Different authors have explored different approaches to updating $\theta'$ and
$\omega'$. In this work we use ``soft updates'' as in
\cite{lillicrap2015continuous} (see Algorithms \ref{algo:rdpg} and
\ref{algo:rsvg0} below).
\begin{algorithm}[h]
\caption{RDPG algorithm \label{algo:rdpg}}
\begin{algorithmic}
\STATE Initialize critic network $Q^\omega(a_t, h_t )$ and actor $\mu^\theta(h_t )$ with parameters $\omega$ and $\theta$.
\STATE Initialize target networks $Q^{\omega'}$ and $\mu^{\theta'}$ with weights $\omega' \leftarrow \omega$, $\theta' \leftarrow \theta$.
\STATE Initialize replay buffer $R$.
\FOR{episodes = 1, M}
\STATE initialize empty history $h_0$
\FOR{t = 1, T}
\STATE receive observation $o_t$
\STATE $h_t \leftarrow h_{t-1}, a_{t-1}, o_t$ (append observation and previous action to history)
\STATE select action $a_t = \mu^\theta(h_t) + \epsilon$ (with $\epsilon$: exploration noise)
\ENDFOR
\STATE Store the sequence $(o_1, a_1, r_1 ... o_T, a_T, r_T)$ in $R$
\STATE Sample a minibatch of $N$ episodes $(o_1^i, a_1^i, r_1^i, ... o_T^i, a_T^i, r_T^i)_{i=1, \dots ,N}$ from $R$
\STATE Construct histories $h_t^i = (o_1^i, a_1^i, \dots a_{t-1}^i, o_t^i)$
\STATE Compute target values for each sample episode
$(y_1^i, ... y_T^i)$ using the recurrent target networks
\begin{equation*}
y^i_t = r^i_t + \gamma Q^{\omega'}( h^i_{t+1}, \mu^{\theta'}(h^i_{t+1}) )
\end{equation*}
\STATE Compute critic update (using BPTT)
\begin{equation*}
\Delta \omega = \frac{1}{NT} \sum_i \sum_t \left (y^i_t - Q^\omega(h_t^i, a^i_t ) \right ) \partialAt{Q^\omega(h_t^i, a^i_t )}{\omega}
\end{equation*}
\STATE Compute actor update (using BPTT)
\begin{equation*}
\Delta \theta = \frac{1}{NT} \sum_i \sum_t \partialAt{Q^\omega(h_t^i, \mu^\theta(h_t^i) )}{a} \partialAt{\mu^\theta(h_t^i)}{\theta}
\end{equation*}
\STATE Update actor and critic using Adam \cite{kingma2014adam}
\STATE Update the target networks
\begin{align*}
\omega' & \leftarrow \tau \omega + (1 - \tau) \omega' \\
\theta' & \leftarrow \tau \theta + (1 - \tau) \theta'
\end{align*}
\ENDFOR
\end{algorithmic}
\end{algorithm}
\section{Results}
We tested our algorithms on a variety of partial-observed environments,
covering different types of memory problems. Videos of the learned policies for all
the domains are included in our supplementary
videos\footnote{Video of all the learned policies is available at \url{https://youtu.be/V4_vb1D5NNQ}}, we encourage viewing them as
these may provide a better intuition for the environments.
All physical control problems except the simulated water maze
(section \ref{sec:Results:Watermaze}) were simulated in MuJoCo
\cite{todorov2012mujoco}. We tested both standard recurrent networks as well as
LSTM networks.
\subsection{Sensor integration and system identification}
Physical control problems with noisy sensors are one of the
paradigm examples of partially-observed environments.
A large amount of research has focused on how to efficiently integrate
noisy sensory information over multiple timesteps in order to derive accurate
estimates of the system state, or to estimate derivatives of important properties
of the system \cite{thrun2005probabilistic}.
Here, we consider two simple, standard
control problems often used in reinforcement learning, the under-actuated
pendulum and cartpole swing up.
We modify these standard benchmarks tasks such that in both cases the agent
receives no direct information of the velocity of any of the components, i.e.\
for the pendulum swing-up task the observation comprises only the angle of the
pendulum, and for cartpole swing-up it is limited to the angle of the pole and
the position of the cart. Velocity is crucial for solving the task
and thus it must be estimated from the history of the system. Figure
\ref{fig:pendulum}a shows the learning curves for pendulum swing-up. Both RDPG
and RSVG0 were tested on the pendulum task, and are able to learn good solutions
which bring the pole to upright.
For the cartpole swing-up task, in addition to not providing the agent with
velocity information, we also
varied the length of the pole from episode to episode. The pole length is
invisible to the agent and needs to be inferred from the response of the
system. In this task the sensor integration problem is thus paired with the need
for system identification. As can be seen in figure \ref{fig:pendulum}b, the RDPG
agent with an LSTM network reliably solves this task every time while a simple
feedforward agent (DDPG) fails entirely. RDPG with a simple RNN performs
considerably less well than the LSTM agent, presumably due to relatively long
episodes (T=350 steps) and the failure to backpropagate gradients effectively
through the plain RNN. We found that a feedforward agent that does receive velocity
information can solve the variable-length swing-up task partly but does so less
reliably than the recurrent agent as it is unable to identify the relevant
system parameters (not shown).
\begin{SCfigure}
\subfloat[][]{\includegraphics[width=3.5cm]{poPendulum} \label{sb:pendulum}}
\subfloat[][]{\includegraphics[width=3.5cm]{POcartpole} \label{sb:cartpole}}
\caption{
\protect\subref{sb:pendulum} The reward curve for the partially-observed pendulum
task. Both RDPG and RSVG(0) are able to learn
policies which bring the pendulum to an upright position.
\protect\subref{sb:cartpole} The reward curve for the cartpole with no velocity and
varying cartpole lengths. RDPG with LSTM, is able to reliably learn a good solution for
this task; a purely feedforward agent (DDPG),
which will not be able to estimate velocities nor to infer the pole length, is not able
to solve the problem.
}
\label{fig:pendulum}
\end{SCfigure}
\subsection{Memory tasks}
Another type of partially-observed task, which has been less studied in the
context of reinforcement learning, involves the need to remember explicit
information over a number of steps. We constructed two tasks like this. One was
a 3-joint reacher which must reach for a randomly positioned target, but the
position of the target is only provided to the agent in the initial
observation (the entire episode is 80 timesteps). As a harder variant of this task, we
constructed a 5 joint gripper which must reach for a (fully-observed) payload
from a randomized initial configuration and then return the payload to the
initial position of its "hand" (T=100). Note that this is a challenging control problem
even in the fully observed case. The results for both tasks are shown in figure
\ref{fig:memory}, RDPG agents with LSTM networks solve both tasks reliably
whereas purely feedforward agents fail on the memory components of the task as
can be seen in the supplemental video.
\begin{figure}
\caption{ Reward curves for the \protect\subref{sb:reacher}
\label{sb:reacher}
\label{sb:gripper}
\label{sb:3ReacherExample}
\label{sb:gripperExample}
\label{fig:memory}
\end{figure}
\subsection{Water maze}
\label{sec:Results:Watermaze}
The Morris water maze has been used extensively in rodents for the study of
memory \cite{dhooge2001applications}. We tested our algorithms on a
simplified version of the task. The agent moves in a 2-dimensional circular
space where a small region of the space is an invisible ``platform'' where the
agent receives a positive reward. At the beginning of the episode the agent and
platform are randomly positioned in the tank. The platform position is not
visible to the agent but it ``sees'' when it is on platform. The agent needs to
search for and stay on the platform to receive reward by controlling
its acceleration. After 5 steps on the
platform the agent is reset randomly to a new position in the tank but the
platform stays in place for the rest of the episode (T=200). The agent
needs to remember the position of the platform to return to it quickly.
It is sometimes presumed that a stochastic policy is required in order to solve
problems like this, which require learning a search strategy.
Although there is some variability in the results, we found that both RDPG
and RSVG(0) were able to find similarly
good solutions (figure \ref{sb:wmLearningCurve}), indicating RDPG is able to
learn reasonable, deterministic search strategies. Both solutions were able to
make use of memory to return to the platform more quickly after discovering it
during the initial search (figure \ref{sb:wmBar}). A non-recurrent agent (DDPG)
is able to learn a limited search strategy but fails to exploit memory to return the
platform after having been reset to a random position in the tank.
\begin{figure}
\caption{
\protect\subref{sb:wmLearningCurve}
\label{sb:wmLearningCurve}
\label{sb:wmBar}
\label{sb:wmRDPG}
\label{sb:wmRSVG}
\label{sb:wmDDPG}
\label{fig:watermaze}
\end{figure}
\subsection{High-dimensional observations}
We also tested our agents, with convolutional networks, on solving tasks
directly from high-dimensional pixel spaces. We tested on the pendulum
task (but now the agent is given only a static rendering of the pendulum at each timestep),
and a two-choice reaching task, where the target disappears after 5 frames
(and the agent is not allowed to move during the first 5 frames to prevent it from encoding the target position in its initial trajectory).
We found that RDPG was able to learn effective policies from high-dimensional
observations which integrate information from multiple timesteps to estimate velocity and remember the
visually queued target
for the full length of the episode (in the reacher task). Figure \ref{fig:pixels}
shows the results.
\begin{figure}
\caption{
RDPG was able to learn good policies directly from high-dimensional
renderings for pendulum
\protect\subref{sb:pixelsPendulum}
\label{sb:pixelsPendulum}
\label{sb:pixelsReacher}
\label{sb:reacherPixelsExample}
\label{fig:pixels}
\end{figure}
\section{Discussion}
\subsection{Variants}
In the experiments presented here, the actor and critic networks are entirely
disjoint. However, particularly when learning deep, convolutional networks
the filters required in the early layers may be similar between the policy and
the actor. Sharing these early layers could improve computational efficiency
and learning speed.
Similar arguments apply to the recurrent part of the network,
which could be shared between the actor and the critic.
Such sharing, however, can also result in instabilities as updates to one network
may unknowingly damage or shift the other network. For this reason, we have not
used any sharing here, although it is a potential topic for further investigation.
\subsection{Related work}
There is a large body of literature on solving partially observed control
problems. We focus on the most closely related work that aims to solve such problems with
learned memory.
Several groups \cite{lin1993hidden,bakker2002reinforcement,hausknecht2015deep}
have studied the use of model-free algorithms with
recurrent networks to solve POMDPs with discrete action spaces.
\cite{bakker2002reinforcement} focused on relatively long-horizon ("deep") memory
problems in small state-action spaces.
In contrast, \cite{hausknecht2015deep}
modified the Atari DQN architecture \cite{mnih2015human} (i.e.\ they perform
control from high-dimensional pixel inputs) and demonstrated that recurrent Q learning \cite{lin1993hidden}
can perform the
required information integration to resolve short-term partial observability (e.g.\ to estimate velocities) that is
achieved via stacks of frames in the original DQN architecture.
Continuous action problems with relatively low-dimensional observation spaces
have been considered e.g.\ in
\cite{wierstra2007deep,wierstra2007critics,utsunomiya2009contextual,zhang2015memory}.
\cite{wierstra2007deep} trained LSTM-based stochastic policies using Reinforce;
\cite{wierstra2007critics,utsunomiya2009contextual,zhang2015memory} used
actor-critic architectures.
The algorithm of \cite{wierstra2007critics} can be seen as a special case of DPG
where the deterministic policy produces the
parameters of an action distribution from which the actions are then sampled.
This requires suitable exploration at the level of distribution parameters
(e.g.\ exploring in terms of means and variances of a Gaussian distribution); in
contrast, SVG(0) also learns stochastic policies but allows
exploration at the action level only.
All works mentioned above, except for \cite{zhang2015memory}, consider the memory
to be internal to the policy and learn the RNN parameters using BPTT,
back-propagating either TD errors or policy gradients. \cite{zhang2015memory}
instead take the view of \cite{peshkin1999external} and consider memory as extra
state dimensions that can can be read and set by the policy. They optimize the
policy using guided policy search \cite{levine2015end} which performs explicit
trajectory optimization along reference trajectories and, unlike our approach,
requires a well defined full latent state and access to this latent state during
training.
\section{Conclusion}
We have demonstrated that two related model-free approaches can be extended
to learn effectively with recurrent neural networks on a variety of partially-observed
problems, including directly from pixel observations. Since these algorithms
learn using standard backpropagation through time, we are able to benefit
from innovations in supervised recurrent neural networks, such as
long-short term memory networks \cite{hochreiter1997long}, to solve challenging
memory problems such as the Morris water maze.
{
}
\marginpar{NEW}page
\section{Supplementary}
\begin{algorithm}[h]
\caption{RSVG(0) algorithm \label{algo:rsvg0}}
\begin{algorithmic}
\STATE Initialize critic network $Q^\omega(a_t, h_t )$ and actor $\pi^\theta(h_t )$ with parameters $\omega$ and $\theta$.
\STATE Initialize target networks $Q^{\omega'}$ and $\pi^{\theta'}$ with weights $\omega' \leftarrow \omega$, $\theta' \leftarrow \theta$.
\STATE Initialize replay buffer $R$.
\FOR{episodes = 1, M}
\STATE initialize empty history $h_0$
\FOR{t = 1, T}
\STATE receive observation $o_t$
\STATE $h_t \leftarrow h_{t-1}, a_{t-1}, o_t$ (append observation and previous action to history)
\STATE select action $a_t = \pi^\theta(h_t, \nu) $ with $\nu \sim \beta$)
\ENDFOR
\STATE Store the sequence $(o_1, a_1, r_1 ... o_T, a_T, r_T)$ in $R$
\STATE Sample a minibatch of $N$ episodes $(o_1^i, a_1^i, r_1^i, ... o_T^i, a_T^i, r_T^i)_{i=1, \dots ,N}$ from $R$
\STATE Construct histories $h_t^i = (o_1^i, a_1^i, \dots a_{t-1}^i, o_t^i)$
\STATE Compute target values for each sample episode
$(y_1^i, ... y_T^i)$ using the recurrent target networks
\begin{equation*}
y^i_t = r^i_t + \gamma Q^{\omega'}( h^i_{t+1}, \pi^{\theta'}(h^i_{t+1}, \nu) ) ~~~~\mathrm{with} ~~\nu \sim \beta
\end{equation*}
\STATE Compute critic update (using BPTT)
\begin{equation*}
\Delta \omega = \frac{1}{NT} \sum_i \sum_t (y^i_t - Q^\omega(h_t^i, a^i_t )\partialAt{Q^\omega(h_t^i, a^i_t )}{\omega}
\end{equation*}
\STATE Compute actor update (using BPTT)
\begin{equation*}
\Delta \theta = \frac{1}{NT} \sum_i \sum_t \partialAt{Q^\omega(h_t^i, \pi^\theta(h_t^i, \nu) )}{a}
\partialAt{\pi^\theta(h_t^i, \nu)}{\theta} ~~~~~~ \mathrm{with} ~~\nu \sim \beta
\end{equation*}
\STATE Update actor and critic using Adam \cite{kingma2014adam}
\STATE Update the target networks
\begin{align*}
\omega' & \leftarrow \tau \omega + (1 - \tau) \omega' \\
\theta' & \leftarrow \tau \theta + (1 - \tau) \theta'
\end{align*}
\ENDFOR
\end{algorithmic}
\end{algorithm}
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Stochastic Navier-Stokes equations with Caputo derivative driven by fractional noises}
\author[author1]{Guang-an Zou}
\author[author1]{Guangying Lv}
\author[author2]{Jiang-Lun Wu\corref{cor1}}
\cortext[cor1]{Corresponding author}
\ead{j.l.wu@swansea.ac.uk}
\address[author1]{School of Mathematics and Statistics, Henan University, Kaifeng 475004, P. R. China}
\address[author2]{Department of Mathematics, Swansea University, Swansea SA2 8PP, United Kingdom}
\begin{abstract}
In this paper, we consider the extended stochastic Navier-Stokes equations with Caputo derivative driven by fractional
Brownian motion. We firstly derive the pathwise spatial and temporal regularity of the generalized Ornstein-Uhlenbeck process. Then we discuss the existence, uniqueness, and H\"{o}lder regularity of mild solutions to the given problem under certain sufficient conditions, which depend on the fractional order $\alpha$ and Hurst parameter $H$. The results obtained in this study improve some results in existing literature.
\end{abstract}
\begin{keyword}
Caputo derivative, stochastic Navier-Stokes equations, fractional Brownian motion, mild solutions.
\end{keyword}
\end{frontmatter}
\section{Introduction}
Stochastic Navier-Stokes equations (SNSEs) are widely regarded as one of the most fascinating problems in fluid mechanics, in particular, stochasticity could even lead to a better understanding of physical phenomenon and mechanisms of turbulence in fluids. Furthermore, the presence of noises could give rise to some statistical features and important phenomena, for example, a unique invariant measure and ergodic behavior for the SNSEs driven by degenerate noise have been established, which can not be found in deterministic Navier-Stokes equations \cite{Flandoli-Maslowski-1995,Hairer-2006}. Since the seminal work of Bensoussan and Temam \cite{Bensoussan-1973}, the SNSEs have been intensively investigated in the literature. The existence and uniqueness of solutions for the SNSEs with multiplicative Gaussian noise were proved in \cite{Da-2002,Mikulevicius-2005,Taniguchi-2011}. The large deviation principle for SNSEs with multiplicative noise had been established in \cite{Wang-2015,Xu-2009}. The study of random attractors of SNSEs can be found in \cite{Brzezniak-2013, Flandoli-1995}, just mention a few.
On the other hand, fractional calculus has gained considerable popularity during the past decades owing to its demonstrated to describe physical systems possessing long-term memory and long-range spatial interactions, which play an important roles in diverse areas of science and engineering. Some theoretical analysis and experimental data have shown that fractional derivative can be recognized as one of the best tools to model anomalous diffusion processes \cite{Podlubny-1999,Srivastava-2006,Zhou-Wang-2016}. Consequently, the generalized Navier-Stokes equations with fractional derivative can be introduced to simulate anomalous diffusion in fractal media \cite{Momani-2006,Zhou-2017}. Recently, time-fractional Navier-Stokes equations have been initiated from the perspective of both analytical and numerical solutions, see \cite{De-2015,Ganji-2010,Kumar-2015,Li-2016,Zhou-Peng-2017,Zou-Zhou-2017} for more details.
We would like to emphasize that it is natural and also important to study the generalized SNSEs with time-fractional derivatives, which might be useful to model reasonably the phenomenon of anomalous diffusion with intrinsic random effects.
In this paper, we are concerned with the following generalized stochastic Navier-Stokes equations with time-fractional derivative on a finite time interval $[0,T]$ driven by fractional noise, defined on a domain $D\subset\mathbb{R}^d,d\ge1,$ with regular boundary $\partial D$
\begin{align*}
^{C}D_{t}^{\alpha}u=\nu\Delta u-(u\cdot\nabla)u-\nabla p+f(u)+\dot{B}^{H},~x\in D,~t>0, \tag{1.1}
\end{align*}
with the incompressibility condition:
\begin{align*}
\mathrm{div} u=0,~x\in D,t\geq0, \tag{1.2}
\end{align*}
subject to the initial condition:
\begin{align*}
u(x,0)=u_{0}(x),~x\in D,t=0, \tag{1.3}
\end{align*}
and the Dirichlet boundary conditions:
\begin{align*}
u(x,t)=0,~x\in \partial D,t\geq0, \tag{1.4}
\end{align*}
in which $u=u(x,t)$ represents the velocity field of the fluid; $\nu>0$ is the viscosity coefficient; $p=p(x,t)$ is the associated pressure field; $f(u)$ stands for the deterministic external forces; The term $\dot{B}^{H}=\frac{d}{dt}B^{H}(t)$, and $B^{H}(t)$ represents a cylindrical fractional Brownian motion (fBm) with Hurst parameter $H\in(0,1)$ describes a state dependent random noise. Here, $^{C}D_{t}^{\alpha}$ denotes the Caputo-type derivative of order $\alpha$ ($0<\alpha<1$) for a function $u(x,t)$ with respect to time $t$ defined by
\begin{align*}
^{C}D_{t}^{\alpha}u(x,t)=\begin{cases}
\frac{1}{\Gamma(1-\alpha)}\int_{0}^{t}\frac{\partial u(x,s)}{\partial s}\frac{ds}{(t-s)^{\alpha}},~0<\alpha<1,\\
\frac{\partial u(x,t)}{\partial t}, ~~~~~~~~~~~~~~~~~~~~~~ \alpha =1,\\
\end{cases} \tag{1.5}
\end{align*}
where $\Gamma(\cdot)$ stands for the gamma function $\Gamma(x)=\int_{0}^{\infty}t^{x-1}e^{-t}dt$. Define the Stokes operator subject to the no-slip homogeneous Dirichlet boundary condition (1.4) as the formula
\begin{align*}
Au:=-\nu P_{H}\Delta u,
\end{align*}
where $P_{H}$ is the Helmhotz-Hodge projection operator, we also define the nonlinear operator $B$ as
\begin{align*}
B(u,v)=-P_{H}(u\cdot\nabla)v,
\end{align*}
with a slight abuse of notation $B(u):=B(u,u)$.
By applying the Helmholtz-Hodge operator $P_{H}$ to each term of time-fractional SNSEs, we can rewrite the Eqs.(1.1)-(1.4) as follows in the abstract form:
\begin{align*}
\begin{cases}
^{C}D_{t}^{\alpha}u=-Au+B(u)+f(u)+\dot{B}^{H},t>0,\\
u(0)=u_{0},
\end{cases} \tag{1.6}
\end{align*}
where we shall also use the same notations $f(u)$ instead of $P_{H}f$, and the solutions of problem (1.6) is also the solutions of Eqs.(1.1)-(1.4).
Note that the totality of fractional Brownian motions (fBms) form a subclass of Gaussian processes, which are positively correlated for $H\in(1/2,1)$ and negatively correlated for $H\in(0,1/2)$ while $H=1/2$ are standard Gaussian processes. So it is interesting to consider the stochastic differential equations with fBm, and the subject of stochastic calculus with respect to fBm has attracted much attentions \cite{Biagini-2008,Duncan-2009,Jiang-2012,Mishura-2008}. In recent years, the existence and uniqueness of solutions for stochastic Burgers equations with fBm have been examined in \cite{Jiang-2012,Wang-Zeng-2010}. In addition, Zou and Wang investigated the time-fractional stochastic Burgers equation with standard Brownian motion \cite{Zou-2017}. However, to the best of our knowledge, the study of time-fractional SNSEs with fBm has not been addressed yet, which is indeed a fascinating and more interesting (and also practical) problem. The objective of the present paper is to establish the existence and uniqueness of mild solutions by Banach fixed point theorem and Mainardi's Wright-type function, the key and difficulty of problems are how to deal with stochastic convolution. We also prove the H\"{o}lder regularity of mild solutions to time-fractional SNSEs. Our consideration extends and improves the existing results carried out in previous studies \cite{De-2015,Flandoli-Maslowski-1995,Mikulevicius-2005,Wang-Zeng-2010,Zhou-2017}.
The rest of the paper is organized as follows. In the next section, we introduce several notions and we give certain preliminaries needed in our later analysis. In Section 3, we establish the pathwise spatial and temporal regularity of the generalized Ornstein-Uhlenbeck process. In Section 4, we show the existence and uniqueness of mild solutions to time-fractional SNSEs. We end our paper by proving the H\"{o}lder regularity of the mild solution.
\section{Notations and preliminaries}
In this section, we give some notions and certain important preliminaries, which will be used in the subsequent discussions.
Let $(\Omega,\mathcal{F},\mathds{P},\{\mathcal{F}_{t}\}_{t\geq0})$ be a filtered probability space with a normal filtration $\{\mathcal{F}_{t}\}_{t\geq0}$. We assume that the operator $A$ is self-adjoint and there exist the eigenvectors $e_{k}$ corresponding to eigenvalues $\gamma_{k}$ such that
\begin{align*}
Ae_{k}=\gamma_{k}e_{k},e_{k}=\sqrt{2}\sin(k\pi),\gamma_{k}=\pi^{2}k^{2},k\in N^{+}.
\end{align*}
For any $\sigma>0$, $A^{\frac{\sigma}{2}}e_{k}=\gamma_{k}^{\frac{\sigma}{2}}e_{k}, k=1,2,\ldots$, and let $\dot{H}^{\sigma}$ be the domain of the fractional power defined by
\begin{align*}
\dot{H}^{\sigma}=\mathcal{D}(A^{\frac{\sigma}{2}})=\{v\in L^{2}(D),s.t.~\|v\|_{ \dot{H}^{\sigma}}^{2}=\sum\limits_{k=1}^{\infty}\gamma_{k}^{\frac{\sigma}{2}}v_{k}^{2}<\infty\},
\end{align*}
where $v_{k}=(v,e_{k})$ and the norm $\|v\|_{ \dot{H}^{\sigma}}=\|A^{\frac{\sigma}{2}}v\|$. Let $L^{2}(\Omega,H)$ be a Hilbert space of $H$-valued random variables equipped with the inner product $\mathbb{E}(\cdot,\cdot)$ and norm $\mathbb{E}\|\cdot\|$, it is given by
\begin{align*}
L_{2}(\Omega,H)=\{\chi:\mathbb{E}\|\chi\|_{H}^{2}=\int_{\Omega}\|\chi(\omega)\|_{H}^{2}d\mathds{P}(\omega)<\infty,\omega\in \Omega\}.
\end{align*}
\textbf{Definition 2.1.} For $H\in(0,1)$, a continuous centered Gaussian process $\{\beta^{H}(t),t\in[0,\infty)\}$ with covariance function
\begin{align*}
R_{H}(t,s)=\mathbb{E}[\beta^{H}(t)\beta^{H}(s)]=\frac{1}{2}(t^{2H}+s^{2H}-|t-s|^{2H}),~t,s\in [0,\infty)
\end{align*}
is called a one-dimensional fractional Brownian motion (fBm), and $H$ is the Hurst parameter. In particular when $H=\frac{1}{2}$, $\beta^{H}(t)$ represents a standard Brownian motion.
Now let us introduce the Wiener integral with respect to the fBm. To begin with, we represent $\beta^{H}(t)$ as following (see \cite{Biagini-2008})
\begin{align*}
\beta^{H}(t)=\int_{0}^{t}K_{H}(t,s)dW(s),
\end{align*}
where $W=\{W(t),t\in[0,T]\}$ is a Wiener process on the space $(\Omega,\mathcal{F},\mathds{P},\{\mathcal{F}_{t}\}_{t\geq0})$ and the kernel $K_{H}(t,s), 0\le s< t\le T$, is given by
\begin{align*}
K_{H}(t,s):=c_{H}(t-s)^{H-\frac{1}{2}}+c_{H}(\frac{1}{2}-H)\int_{s}^{t}(u-s)^{H-\frac{3}{2}}(1-(\frac{s}{u})^{\frac{1}{2}-H})du, \tag{2.1}
\end{align*}
for $0<H<\frac{1}{2}$ and $c_{H}=(\frac{2H\Gamma(\frac{3}{2}-H)}{\Gamma(H+\frac{1}{2})\Gamma(2-2H)})^{\frac{1}{2}}$ is a constant. When $\frac{1}{2}<H<1$, there holds
\begin{align*}
K_{H}(t,s)=c_{H}(H-\frac{1}{2})s^{\frac{1}{2}-H}\int_{s}^{t}(u-s)^{H-\frac{3}{2}}u^{H-\frac{1}{2}}du.\tag{2.2}
\end{align*}
It is easy to verify that
\begin{align*}
\frac{\partial K_{H}}{\partial t}(t,s)=c_{H}(H-\frac{1}{2})(\frac{s}{t})^{\frac{1}{2}-H}(t-s)^{H-\frac{3}{2}}. \tag{2.3}
\end{align*}
We denote by $\mathcal{H}$ the reproducing kernel Hilbert space of the fBm. Let $K_{\tau}^{*}:\mathcal{H}\rightarrow L^{2}([0,T])$ be the linear map given by
\begin{align*}
(K_{\tau}^{*}\psi)(s)=\varphi(s)K_{H}(\tau,s)+\int_{s}^{\tau}(\psi(t)-\psi(s))\frac{\partial K_{H}}{\partial t}(t,s)dt \tag{2.4}
\end{align*}
for $0<H<\frac{1}{2}$, and if $\frac{1}{2}<H<1$, we denote
\begin{align*}
(K_{\tau}^{*}\psi)(s)=\int_{s}^{\tau}\psi(t)\frac{\partial K_{H}}{\partial t}(t,s)dt. \tag{2.5}
\end{align*}
We refer the reader to \cite{Mishura-2008} for the proof of the fact that $K_{\tau}^{*}$ is an isometry between $\mathcal{H}$ and $L^{2}([0,T])$. Moreover, for any $\psi\in \mathcal{H}$, we have the following relation between the Wiener integral with respect to fBm and the It\^{o} integral with respect to Wiener process
\begin{align*}
\int_{0}^{t}\psi(s)d\beta^{H}(s)=\int_{0}^{t}(K_{\tau}^{*}\psi)(s)dW(s),~t\in [0,T].
\end{align*}
Generally, following the standard approach for $H=\frac{1}{2}$, we consider $Q$-Wiener process with linear bounded covariance operator $Q$ such that $\mathrm{Tr} (Q)<\infty$. Furthermore, there exists the eigenvalues $\lambda_{n}$ and corresponding eigenfunctions $e_{k}$ satisfying $Q e_{k}=\lambda_{n}e_{k},k=1,2,\ldots$, then we define the infinite dimensional fBm with covariance $Q$ as
\begin{align*}
B^{H}(t):=\sum\limits_{k=1}^{\infty}\lambda^{1/2}_{k}e_{k}\beta_{k}^{H}(t),
\end{align*}
where $\beta_{k}^{H}$ are real-valued independent fBm's. In order to define Wiener integrals with repect to $Q$-fBm, we introduce $\mathcal{L}_{2}^{0}:=\mathcal{L}_{2}^{0}(Y,X)$ of all $Q$-Hilbert-Schmidt operators $\psi:Y\rightarrow X$, where $Y$ and $X$ are two real separable Hilbert spaces. We associate the $Q$-Hilbert-Schmidt operators $\psi$ with the norm
\begin{align*}
\|\varphi\|_{\mathcal{L}_{2}^{0}}^{2}=\sum\limits_{k=1}^{\infty}\|\lambda^{1/2}_{k}\psi e_{k}\|^{2}<\infty.
\end{align*}
As a consequence, for $\psi\in \mathcal{L}_{2}^{0}(Y,X)$, the Wiener integral of $\psi$ with respect to $B^{H}(t)$ is defined by
\begin{align*}
\int_{0}^{t}\psi(s)dB^{H}(s)=\sum\limits_{k=1}^{\infty}\int_{0}^{t}\lambda^{1/2}_{k}\psi(s)e_{k}d\beta_{k}^{H}(s)=\sum\limits_{k=1}^{\infty}\int_{0}^{t}\lambda^{1/2}_{k}(K_{\tau}^{*}\psi e_{k})(s)d\beta_{k}(s), \tag{2.6}
\end{align*}
where $\beta_{k}$ is the standard Brownian motion.
\textbf{Definition 2.2.} An $\mathcal{F}_{t}$-adapted stochastic process $(u(t),t\in[0,T])$ is called a mild solution to (1.6) if
the following integral equation is satisfied
\begin{align*}
u(t)&=E_{\alpha}(t)u_{0}+\int_{0}^{t}(t-s)^{\alpha-1}E_{\alpha,\alpha}(t-s)[B(u(s))+f(u(s))]ds\\
&~~~+\int_{0}^{t}(t-s)^{\alpha-1}E_{\alpha,\alpha}(t-s)dB^{H}(s), \tag{2.7}
\end{align*}
where the generalized Mittag-Leffler operators $E_{\alpha}(t)$ and $E_{\alpha,\alpha}(t)$ are defined, respectively, by
\begin{align*}
E_{\alpha}(t):=\int_{0}^{\infty}\xi_{\alpha}(\theta)T(t^{\alpha}\theta)d\theta,
\end{align*}
and
\begin{align*}
E_{\alpha,\alpha}(t):=\int_{0}^{\infty}\alpha\theta\xi_{\alpha}(\theta)T(t^{\alpha}\theta)d\theta,
\end{align*}
where $T(t)=e^{-tA},t\geq0$ is an analytic semigroup generated by the operator $-A$, and the Mainardi's Wright-type function with $\alpha\in (0,1)$ is given by
\begin{align*}
\xi_{\alpha}(\theta)=\sum_{k=0}^{\infty}\frac{(-1)^{k}\theta^{k}}{k!\Gamma(1-\alpha(1+k))}.
\end{align*}
Furthermore, for any $\alpha\in (0,1)$ and $-1<\nu<\infty$, it is not difficult to verity that
\begin{align*}
\xi_{\alpha}(\theta)\geq0 ~and~ \int_{0}^{\infty}\theta^{\nu}\xi_{\alpha}(\theta)d\theta=\frac{\Gamma(1+\nu)}{\Gamma(1+\alpha\nu)}, \tag{2.8}
\end{align*}
for all $\theta\geq0$. The derivation of mild solution (2.7) can refer to \cite{Zou-2017}.
The operators $\{E_{\alpha}(t)\}_{t\geq0}$ and $\{E_{\alpha,\alpha}(t)\}_{t\geq0}$ in (2.7) have the following properties \cite{Zou-2017}:
\textbf{Lemma 2.1.} For any $t>0$, $E_{\alpha}(t)$ and $E_{\alpha,\alpha}(t)$ are linear and bounded operators. Moreover, for $0<\alpha<1$ and $0\leq\nu<2$, there exists a constant $C>0$ such that
\begin{align*}
\|E_{\alpha}(t)\chi\|_{\dot{H}^{\nu}}\leq Ct^{-\frac{\alpha\nu}{2}}\|\chi\|,~\|E_{\alpha,\alpha}(t)\chi\|_{\dot{H}^{\nu}}\leq Ct^{-\frac{\alpha\nu}{2}}\|\chi\|.
\end{align*}
\textbf{Lemma 2.2.} For any $t>0$, the operators $E_{\alpha}(t)$ and $E_{\alpha,\alpha}(t)$ are strongly continuous. Moreover, for $0<\alpha<1$ and $0\leq\nu<2$ and $0\leq t_{1}< t_{2}\leq T$, there exists a constant $C>0$ such that
\begin{align*}
\|(E_{\alpha}(t_{2})-E_{\alpha}(t_{1}))\chi\|_{\dot{H}^{\nu}}\leq C(t_{2}-t_{1})^{\frac{\alpha\nu}{2}}\|\chi\|,
\end{align*}
and
\begin{align*}
\|(E_{\alpha,\alpha}(t_{2})-E_{\alpha,\alpha}(t_{1}))\chi\|_{\dot{H}^{\nu}}\leq C(t_{2}-t_{1})^{\frac{\alpha\nu}{2}}\|\chi\|.
\end{align*}
Throughout the paper, we assume that the mapping $f: \Omega\times H \rightarrow H$ satisfies
the following global Lipschitz and growth conditions
\begin{align*}
\|f(u)-f(v)\|^{2}\leq C\|u-v\|^{2},~\|f(u)\|^{2}\leq C(1+\|u\|^{2})\tag{2.9}
\end{align*}
for any $u,v\in H$.
\section{Regularity of the stochastic convolution}
In this section, we state and prove the basic properties of stochastic convolution. Firstly, we introduce the following generalized Ornstein-Uhlenbeck process
\begin{equation*}
Z(t):=\int_{0}^{t}(t-s)^{\alpha-1}E_{\alpha,\alpha}(t-s)dB^{H}(s). \tag{3.1}
\end{equation*}
Obviously, it is very important to establish the basic properties of the stochastic integrals (3.1)
in the study of the problem (1.6). For the sake of convenience, we introduce the following operator and show some properties.
\textbf{Lemma 3.1.} Let $\mathcal{S}_{\alpha}(t)=t^{\alpha-1}E_{\alpha,\alpha}(t)$, for $0\leq\nu< 2$ and $0<\alpha<1$, there exists a constant $C>0$ such that
\begin{align*}
\|\mathcal{S}_{\alpha}(t)\chi\|_{\dot{H}^{\nu}}\leq Ct^{\frac{(2-\nu)\alpha-2}{2}}\|\chi\|,~\|[\mathcal{S}_{\alpha}(t_{2})-\mathcal{S}_{\alpha}(t_{1})]\chi\|_{\dot{H}^{\nu}}\leq C(t_{2}-t_{1})^{\frac{ 2-(2-\nu)\alpha}{2}}\|\chi\|
\end{align*}
for any $0\leq t_{1}<t_{2}\leq T$.
\textbf{Proof.} By Lemma 2.1, we get
\begin{align*}
\|\mathcal{S}_{\alpha}(t)\chi\|_{\dot{H}^{\nu}}=\|t^{\alpha-1}E_{\alpha,\alpha}(t)\chi\|_{\dot{H}^{\nu}}\leq Ct^{\frac{(2-\nu)\alpha-2}{2}}\|\chi\|.
\end{align*}
Next, utilizing the property of semigroup $\|A^{\sigma}e^{-tA}\|\leq Ct^{-\sigma}$ for $\sigma\geq0$, we have
\begin{align*}
\|\frac{d}{dt}\mathcal{S}_{\alpha}(t)\chi \|_{\dot{H}^{\nu}}&=\|(\alpha-1)t^{\alpha-2}E_{\alpha,\alpha}(t)-\int_{0}^{\infty}\alpha^{2}t^{2\alpha-2}\theta^{2}\xi_{\alpha}(\theta)AT(t^{\alpha}\theta)d\theta\|_{\dot{H}^{\nu}}\\
&\leq (1-\alpha)t^{\alpha-2}\|E_{\alpha,\alpha}(t)\chi\|_{\dot{H}^{\nu}}+\int_{0}^{\infty}\alpha^{2}t^{2\alpha-2}\theta^{2}\xi_{\alpha}(\theta)\|A^{1+\frac{\nu}{2}}e^{-t^{\alpha}\theta A}\chi\|d\theta\\
&\leq C(1-\alpha)t^{\frac{(2-\nu)\alpha-4}{2}}\|\chi\|+\frac{\alpha^{2}\Gamma(2-\frac{\nu}{2})}{\Gamma(1+\alpha(1-\frac{\nu}{2}))}t^{\frac{(2-\nu)\alpha-4}{2}}\|\chi\|\\
&\leq Ct^{\frac{(2-\nu)\alpha-4}{2}}\|\chi\|.
\end{align*}
Hence, we have the following
\begin{align*}
\|[\mathcal{S}_{\alpha}(t_{2})-\mathcal{S}_{\alpha}(t_{1})]\chi\|_{\dot{H}^{\nu}}&=\|\int_{t_{1}}^{t_{2}}\frac{d}{dt}\mathcal{S}_{\alpha}(t)\chi dt\|_{\dot{H}^{\nu}}\\
&\leq\int_{t_{1}}^{t_{2}}Ct^{\frac{(2-\nu)\alpha-4}{2}}\|\chi\|dt\\
&=\frac{2C}{2-(2-\nu)\alpha}[t_{1}^{\frac{(2-\nu)\alpha-2}{2}}-t_{2}^{\frac{(2-\nu)\alpha-2}{2}}]\\
&\leq \frac{2C}{[2-(2-\nu)\alpha]T_{0}^{2-(2-\nu)\alpha}}(t_{2}-t_{1})^{\frac{2-(2-\nu)\alpha}{2}},
\end{align*}
where $0<T_{0}\leq t_{1}<t_{2}\leq T$, and we have used $t_{2}^{\omega}-t_{1}^{\omega}\leq C(t_{2}-t_{1})^{\omega}$ for $0\leq \omega\leq1$, in the above derivation.
In what follows, let us establish the pathwise spatial-temporal regularity of the stochastic convolution (3.1).
\textbf{Theorem 3.1.} For $0\leq\nu<2$ and $0<\alpha<1$, the generalized Ornstein-Uhlenbeck process $(Z(t))_{t\geq0}$ with the Hurst parameter $\frac{1}{4}<H<1$ is well defined. Moreover, there holds
\begin{align*}
\sup\limits_{t\in[0,T]}\mathbb{E}\|Z(t)\|_{\dot{H}^{\nu}}^{2}\leq C(H,Q)t^{\sigma}<\infty,~
0\leq t\leq T,
\end{align*}
where the index should satisfy $\sigma=\min\{(2-\nu)\alpha+4H-3,(2-\nu)\alpha+2H-1\}>0$.
\textbf{Proof.} Using the Wiener integral with respect to fBm and noticing the expression of $K_{t}^{*}$ and the properties of It\^{o} integral, for $0<H<\frac{1}{2}$, we get
\begin{align*}
\mathbb{E}\|Z(t)\|_{\dot{H}^{\nu}}^{2}&=\mathbb{E}\|\int_{0}^{t}(t-s)^{\alpha-1}E_{\alpha,\alpha}(t-s)dB^{H}(s)\|_{\dot{H}^{\nu}}^{2}\\
&=\sum\limits_{k=1}^{\infty}\mathbb{E}\|\int_{0}^{t}\lambda^{1/2}_{k}(K_{t}^{*}\mathcal{S}_{\alpha}(t-s)e_{k})(s)d\beta_{k}(s)\|_{\dot{H}^{\nu}}^{2}\\
&=\sum\limits_{k=1}^{\infty}\int_{0}^{t}\mathbb{E}\|\lambda^{1/2}_{k}(K_{t}^{*}\mathcal{S}_{\alpha}(t-s)e_{k})(s)\|_{\dot{H}^{\nu}}^{2}ds\\
&=\sum\limits_{k=1}^{\infty}\int_{0}^{t}\mathbb{E}\|\lambda^{1/2}_{k}\mathcal{S}_{\alpha}(t-s)K_{H}(t,s)e_{k}\\
&\hspace{2mm}+\int_{s}^{t}\lambda^{1/2}_{k}[\mathcal{S}_{\alpha}(t-r)-\mathcal{S}_{\alpha}(t-s)]\frac{\partial K_{H}}{\partial r}(r,s)e_{k}dr\|_{\dot{H}^{\nu}}^{2}ds\\
&\leq 2\sum\limits_{k=1}^{\infty}\int_{0}^{t}\mathbb{E}\|\lambda^{1/2}_{k}\mathcal{S}_{\alpha}(t-s)K_{H}(t,s)e_{k}\|_{\dot{H}^{\nu}}^{2}ds\\
&\hspace{2mm}+2\sum\limits_{k=1}^{\infty}\int_{0}^{t}\mathbb{E}\|\int_{s}^{t}\lambda^{1/2}_{k}[\mathcal{S}_{\alpha}(t-r)-\mathcal{S}_{\alpha}(t-s)]\frac{\partial K_{H}}{\partial r}(r,s)e_{k}dr\|_{\dot{H}^{\nu}}^{2}ds\\
&=:I_{1}+I_{2}. \tag{3.2}
\end{align*}
With the help of the following inequality (see \cite{Wang-Zeng-2010})
\begin{align*}
K_{H}(t,s)\leq C(H)(t-s)^{H-\frac{1}{2}}s^{H-\frac{1}{2}},
\end{align*}
and further combining Lemma 3.1 and the H\"{o}lder inequality, we obtain
\begin{align*}
I_{1}&=2\sum\limits_{k=1}^{\infty}\int_{0}^{t}\mathbb{E}\|\lambda^{1/2}_{k}\mathcal{S}_{\alpha}(t-s)K_{H}(t,s)e_{k}\|_{\dot{H}^{\nu}}^{2}ds\\
&\leq 2C(H)(\int_{0}^{t}(t-s)^{(2-\nu)\alpha+2H-3}s^{2H-1}\sum\limits_{k=1}^{\infty}\mathbb{E}\|\lambda^{1/2}_{k}e_{k}\|^{2}ds)\\
&\leq 2C(H)Tr(Q)(\int_{0}^{t}(t-s)^{2[(2-\nu)\alpha+2H-3]}ds)^{\frac{1}{2}}(\int_{0}^{t}s^{2(2H-1)}ds)^{\frac{1}{2}}\\
&\leq C(H,Q)t^{(2-\nu)\alpha+4H-3}, \tag{3.3}
\end{align*}
and on the other hand, utilizing the expression (2.3), we get
\begin{align*}
I_{2}&=2\sum\limits_{k=1}^{\infty}\int_{0}^{t}\mathbb{E}\|\int_{s}^{t}\lambda^{1/2}_{k}[\mathcal{S}_{\alpha}(t-r)-\mathcal{S}_{\alpha}(t-s)]\frac{\partial K_{H}}{\partial r}(r,s)e_{k}dr\|_{\dot{H}^{\nu}}^{2}ds\\
&\leq 2\sum\limits_{k=1}^{\infty}\int_{0}^{t}\mathbb{E}(\int_{s}^{t}\|[\mathcal{S}_{\alpha}(t-r)-\mathcal{S}_{\alpha}(t-s)]\frac{\partial K_{H}}{\partial r}(r,s)\|_{\dot{H}^{\nu}}^{2}dr)(\int_{s}^{t}\mathbb{E}\|\lambda^{1/2}_{k}e_{k}\|^{2}dr)ds\\
&\leq2C_{H}^{2}(H-\frac{1}{2})^{2}Tr(Q)\int_{0}^{t}(t-s)(\int_{s}^{t}|(s-r)^{\frac{(2-\nu)\alpha}{2}}(\frac{s}{r})^{\frac{1}{2}-H}(r-s)^{H-\frac{3}{2}}|^{2}dr)ds\\
&\leq C(H,Q)(\int_{0}^{t}(t-s)^{(2-\nu)\alpha+4H-3}s^{1-2H}ds)\\
&\leq C(H,Q)t^{(2-\nu)\alpha+2H-1}. \tag{3.4}
\end{align*}
When $\frac{1}{4}<H<\frac{1}{2}$ and $\sigma=\min\{(2-\nu)\alpha+4H-3,(2-\nu)\alpha+2H-1\}>0$, by combining (3.2)-(3.4), one can easily get that
\begin{align*}
\mathbb{E}\|Z(t)\|_{\dot{H}^{\nu}}^{2}\leq C(H,Q)t^{\nu}\leq C(H,Q)T^{\sigma}<\infty.
\end{align*}
Similarly, for $\frac{1}{2}<H<1$, one can derive that
\begin{align*}
&\mathbb{E}\|Z(t)\|_{\dot{H}^{\nu}}^{2}\\
&=\mathbb{E}\|\int_{0}^{t}(t-s)^{\alpha-1}E_{\alpha,\alpha}(t-s)dB^{H}(s)\|_{\dot{H}^{\nu}}^{2}\\
&=\sum\limits_{k=1}^{\infty}\int_{0}^{t}\mathbb{E}\|\lambda^{1/2}_{k}(K_{t}^{*}\mathcal{S}_{\alpha}(t-s)e_{k})(s)\|_{\dot{H}^{\nu}}^{2}ds\\
&=\sum\limits_{k=1}^{\infty}\int_{0}^{t}\mathbb{E}\|\int_{s}^{t}\lambda^{1/2}_{k}\mathcal{S}_{\alpha}(t-r)\frac{\partial K_{H}}{\partial r}(r,s)e_{k}dr\|_{\dot{H}^{\nu}}^{2}ds\\
&\leq C_{H}^{2}(H-\frac{1}{2})^{2}\int_{0}^{t}\mathbb{E}(\int_{s}^{t}\|\mathcal{S}_{\alpha}(t-r)(\frac{s}{r})^{\frac{1}{2}-H}(r-s)^{H-\frac{3}{2}}\|_{\dot{H}^{\nu}}^{2}dr)(\int_{s}^{t}\mathbb{E}\|\lambda^{1/2}_{k}e_{k}\|^{2}dr)ds\\
&\leq C(H,Q)(\int_{0}^{t}(t-s)^{(2-\nu)\alpha+4H-3}s^{1-2H}ds)\\
&\leq C(H,Q)t^{(2-\nu)\alpha+2H-1}.
\end{align*}
Thus, if $\frac{1}{2}<H<1$ and $(2-\nu)\alpha+2H-1>0$, one can directly obtain $\mathbb{E}\|Z(t)\|_{\dot{H}^{\nu}}^{2}<C(H,Q)T^{(2-\nu)\alpha+2H-1}<\infty$. When $H=\frac{1}{2}$, $B^{H}(t)$ is standard Brownian motion and it is easy to obtain $Z(t)$ is well defined. This completes the proof. $\square$
\textbf{Theorem 3.2.} For $0\leq\nu<2$ and $0<\alpha<1$, the stochastic process $(Z_{t})_{t\geq0}$ with $\frac{1}{4}<H<1$ is continuous and it satisfies
\begin{align*}
\mathbb{E}\|Z(t_{2})-Z(t_{1})\|_{\dot{H}^{\nu}}^{2}\leq C(H,Q)(t_{2}-t_{1})^{\gamma},~
0\leq t_{1}<t_{2}\leq T,
\end{align*}
where the index $\gamma=\min\{2-(2-\nu)\alpha,(2-\nu)\alpha+4H-3,(2-\nu)\alpha+2H-1\}>0$.
\textbf{Proof.} From (2.7), according to the relation between the Wiener integral and fBm, we have
\begin{align*}
Z(t_{2})-Z(t_{1})&=\int_{0}^{t_{2}}(t_{2}-s)^{\alpha-1}E_{\alpha,\alpha}(t_{2}-s)dB^{H}(s)-\int_{0}^{t_{1}}(t_{1}-s)^{\alpha-1}E_{\alpha,\alpha}(t_{1}-s)dB^{H}(s)\\
&=\int_{0}^{t_{1}}(\mathcal{S}_{\alpha}(t_{2}-s)-\mathcal{S}_{\alpha}(t_{1}-s))dB^{H}(s)+\int_{t_{1}}^{t_{2}}\mathcal{S}_{\alpha}(t_{2}-s)dB^{H}(s)\\
&=\sum\limits_{k=1}^{\infty}\int_{0}^{t_{1}}\lambda^{1/2}_{k}(K_{t}^{*}(\mathcal{S}_{\alpha}(t_{2}-s)-\mathcal{S}_{\alpha}(t_{1}-s))e_{k})(s)d\beta_{k}(s)\\
&\hspace{2mm}+\sum\limits_{k=1}^{\infty}\int_{t_{1}}^{t_{2}}\lambda^{1/2}_{k}(K_{t}^{*}\mathcal{S}_{\alpha}(t_{2}-s)e_{k})(s)d\beta_{k}(s)\\
&=:J_{1}+J_{2}. \tag{3.5}
\end{align*}
For the term $J_{1}$, we get
\begin{align*}
&\mathbb{E}\|J_{1}\|_{\dot{H}^{\sigma}}^{2}\\
&=\mathbb{E}\|\sum\limits_{k=1}^{\infty}\int_{0}^{t_{1}}\lambda^{1/2}_{k}(K_{t}^{*}(\mathcal{S}_{\alpha}(t_{2}-s)-\mathcal{S}_{\alpha}(t_{1}-s))e_{k})(s)d\beta_{k}(s)\|_{\dot{H}^{\nu}}^{2}\\
&=\sum\limits_{k=1}^{\infty}\int_{0}^{t_{1}}\mathbb{E}\|\lambda^{1/2}_{k}(K_{t}^{*}(\mathcal{S}_{\alpha}(t_{2}-s)-\mathcal{S}_{\alpha}(t_{1}-s))e_{k})(s)\|_{\dot{H}^{\nu}}^{2}ds\\
&=\sum\limits_{k=1}^{\infty}\int_{0}^{t_{1}}\mathbb{E}\|\lambda^{1/2}_{k}(\mathcal{S}_{\alpha}(t_{2}-s)-\mathcal{S}_{\alpha}(t_{1}-s))K_{H}(t,s)e_{k}\\
&\hspace{2mm}+\int_{s}^{t}\lambda^{1/2}_{k}[(\mathcal{S}_{\alpha}(t_{2}-r)-\mathcal{S}_{\alpha}(t_{1}-r))-(\mathcal{S}_{\alpha}(t_{2}-s)-\mathcal{S}_{\alpha}(t_{1}-s))]\frac{\partial K_{H}}{\partial r}(r,s)e_{k}dr\|_{\dot{H}^{\nu}}^{2}ds\\
&\leq 2 (t_{2}-t_{1})^{2-(2-\nu)\alpha}\int_{0}^{t}(\|K_{H}(t,s)\|^{2}\mathbb{E}\|\lambda^{1/2}_{k}e_{k}\|^{2}+2\mathbb{E}(\int_{s}^{t}\|\frac{\partial K_{H}}{\partial r}(r,s)\|^{2}dr)(\int_{s}^{t}\mathbb{E}\|\lambda^{1/2}_{k}e_{k}\|^{2}dr))ds\\
&\leq C(H)Tr(Q)(t_{2}-t_{1})^{2-(2-\nu)\alpha}\int_{0}^{t}[(t-s)^{2H-1}s^{2H-1}+(t-s)(\int_{s}^{t}(\frac{s}{r})^{1-2H}(r-s)^{2H-3}dr)]ds\\
&\leq C(H,Q)t^{2H}(t_{2}-t_{1})^{2-(2-\nu)\alpha}. \tag{3.6}
\end{align*}
Applying Lemma 3.1 and the H\"{o}lder inequality, we obtain
\begin{align*}
\mathbb{E}\|J_{2}\|_{\dot{H}^{\nu}}^{2}&=\mathbb{E}\|\sum\limits_{k=1}^{\infty}\int_{t_{1}}^{t_{2}}\lambda^{1/2}_{k}(K_{t}^{*}\mathcal{S}_{\alpha}(t_{2}-s)e_{k})(s)d\beta_{k}(s)\|_{\dot{H}^{\nu}}^{2}\\
&=\sum\limits_{k=1}^{\infty}\int_{t_{1}}^{t_{2}}\mathbb{E}\|\lambda^{1/2}_{k}(K_{t}^{*}\mathcal{S}_{\alpha}(t_{2}-s)e_{k})(s)\|_{\dot{H}^{\nu}}^{2}ds\\
&=\sum\limits_{k=1}^{\infty}\int_{t_{1}}^{t_{2}}\mathbb{E}\|\lambda^{1/2}_{k}\mathcal{S}_{\alpha}(t_{2}-s)K_{H}(t,s)e_{k}\\
&\hspace{2mm}+\int_{s}^{t}\lambda^{1/2}_{k}[\mathcal{S}_{\alpha}(t_{2}-r)-\mathcal{S}_{\alpha}(t_{2}-s)]\frac{\partial K_{H}}{\partial r}(r,s)e_{k}dr\|_{\dot{H}^{\nu}}^{2}ds\\
&\leq 2\int_{t_{1}}^{t_{2}}\|\mathcal{S}_{\alpha}(t_{2}-s)K_{H}(t,s)\|_{\dot{H}^{\nu}}^{2}\mathbb{E}\|\lambda^{1/2}_{k}e_{k}\|^{2}ds\\
&\hspace{2mm}+2\sum\limits_{k=1}^{\infty}\int_{t_{1}}^{t_{2}}\mathbb{E}\|\int_{s}^{t}\lambda^{1/2}_{k}[\mathcal{S}_{\alpha}(t_{2}-r)-\mathcal{S}_{\alpha}(t_{2}-s)]\frac{\partial K_{H}}{\partial r}(r,s)e_{k}dr\|_{\dot{H}^{\nu}}^{2}ds\\
&\leq C(H)[(t_{2}-t_{1})^{(2-\nu)\alpha+4H-3}+(t_{2}-t_{1})^{(2-\nu)\alpha+2H-1}]. \tag{3.7}
\end{align*}
In a similar manner, for $\frac{1}{2}<H<1$, we have
\begin{align*}
&\mathbb{E}\|Z(t_{2})-Z(t_{1})\|_{\dot{H}^{\nu}}^{2}\\
&\leq 2\sum\limits_{k=1}^{\infty}\mathbb{E}\|\int_{0}^{t_{1}}\lambda^{1/2}_{k}(K_{t}^{*}(\mathcal{S}_{\alpha}(t_{2}-s)-\mathcal{S}_{\alpha}(t_{1}-s))e_{k})(s)d\beta_{k}(s)\|_{\dot{H}^{\nu}}^{2}\\
&\hspace{2mm}+2\sum\limits_{k=1}^{\infty}\mathbb{E}\|\int_{t_{1}}^{t_{2}}\lambda^{1/2}_{k}(K_{t}^{*}\mathcal{S}_{\alpha}(t_{2}-s)e_{k})(s)d\beta_{k}(s)\|_{\dot{H}^{\nu}}^{2}\\
&= 2\sum\limits_{k=1}^{\infty}\int_{0}^{t_{1}}\mathbb{E}\|\int_{s}^{t}\lambda^{1/2}_{k}(\mathcal{S}_{\alpha}(t_{2}-r)-\mathcal{S}_{\alpha}(t_{1}-r))\frac{\partial K_{H}}{\partial r}(r,s)e_{k}dr\|_{\dot{H}^{\nu}}^{2}ds\\
&\hspace{2mm}+2\sum\limits_{k=1}^{\infty}\int_{t_{1}}^{t_{2}}\|\int_{s}^{t}\lambda^{1/2}_{k}\mathcal{S}_{\alpha}(t_{2}-r)\frac{\partial K_{H}}{\partial r}(r,s)e_{k}dr\|_{\dot{H}^{\nu}}^{2}ds\\
&\leq 2 (t_{2}-t_{1})^{2-(2-\nu)\alpha}\int_{0}^{t_{1}}(\int_{s}^{t}\|\frac{\partial K_{H}}{\partial r}(r,s)\|^{2}dr)(\int_{s}^{t}\mathbb{E}\|\lambda^{1/2}_{k}e_{k}\|^{2}dr)ds\\
&\hspace{2mm}+2\int_{t_{1}}^{t_{2}}(\int_{s}^{t}\|\mathcal{S}_{\alpha}(t_{2}-r)\frac{\partial K_{H}}{\partial r}(r,s)\|_{\dot{H}^{\nu}}^{2}dr)(\int_{s}^{t}\mathbb{E}\|\lambda^{1/2}_{k}e_{k}\|^{2}dr)ds\\
&\leq C(H,Q)[t^{2H}(t_{2}-t_{1})^{2-(2-\nu)\alpha}+(t_{2}-t_{1})^{(2-\nu)\alpha+2H-1}]. \tag{3.8}
\end{align*}
When $H=\frac{1}{2}$, we can deduce that
\begin{align*}
\mathbb{E}\|Z(t_{2})-Z(t_{1})\|_{\dot{H}^{\nu}}^{2}\leq C(H,Q)(t_{2}-t_{1})^{2-(2-\nu)\alpha}. \tag{3.9}
\end{align*}
Therefore, when we set $\gamma=\min\{2-(2-\nu)\alpha,(2-\nu)\alpha+4H-3,(2-\nu)\alpha+2H-1\}>0$ with $\frac{1}{4}<H<1$, taking expectation on both side of (3.5) and combining (3.6)-(3.9) in turn then lead to complete the proof. $\square$
\section{Existence and regularity of mild solution}
In this section, the existence and uniqueness of mild solution to (1.6) will be proved by Banach fixed point theorem. Let $K>0$ be constant to be determined later. We define the following space
\begin{align*}
B_{R}^{T}:=\{u:u\in C([0,T];\dot{H}^{\sigma}),\sup\limits_{t\in[0,T]}\|u(t)\|_{ \dot{H}^{\sigma}}\leq K,~\forall t\in[0,T],\sigma\geq0\},
\end{align*}
where we denote $\dot{H}^{0}:=L^{2}(D)$. The following statement holds.
\textbf{Theorem 4.1.} For $0\leq\nu<2$ and $0<\alpha<1$, then there exists a stopping time $T^{*}>0$ such that (1.6) has a unique mild solution in $L^{2}(\Omega,B_{R}^{T^{*}})$.
\textbf{Proof.} We first define a map $\mathcal{F}:B_{R}^{T}\rightarrow C([0,T];\dot{H}^{\sigma})$ in the following manner: for any $u\in B_{R}^{T}$,
\begin{align*}
(\mathcal{F}u)(t)&=E_{\alpha}(t)u_{0}+\int_{0}^{t}(t-s)^{\alpha-1}E_{\alpha,\alpha}(t-s)[B(u(s))+f(u(s))]ds\\
&~~~+\int_{0}^{t}(t-s)^{\alpha-1}E_{\alpha,\alpha}(t-s)dB^{H}(s),~t\in [0,T]. \tag{4.1}
\end{align*}
To begin with, we need to show that $\mathcal{F}u\in B_{R}^{T}$ for $u\in B_{R}^{T}$. Making use of Lemma 3.1 and Theorem 3.1 and H\"{o}lder inequality, and based on $\|B(u)\|\leq C\|u\|\|A^{\frac{1}{2}}u\|$, we get
\begin{align*}
\mathbb{E}\|\mathcal{F}u\|_{\dot{H}^{\nu}}^{2}&\leq 3\mathbb{E}\|E_{\alpha}(t)u_{0}\|_{\dot{H}^{\nu}}^{2}+3\mathbb{E}\|\int_{0}^{t}\mathcal{S}_{\alpha}(t-s)[B(u)+f(u)]ds\|_{\dot{H}^{\nu}}^{2}+3\mathbb{E}\|Z(t)\|_{\dot{H}^{\nu}}^{2}\\
&\leq C\mathbb{E}\|u_{0}\|_{\dot{H}^{\nu}}^{2}+Ct^{(2-\nu)\alpha-1}\mathbb{E}(\int_{0}^{t}(\|B(u)\|^{2}+\|f(u)\|^{2})ds)+C(H,Q)t^{\sigma}\\
&\leq C\mathbb{E}\|u_{0}\|_{\dot{H}^{\nu}}^{2}+Ct^{(2-\nu)\alpha}(1+K^{2}+K^{4})+C(H,Q)t^{\sigma}, \tag{4.2}
\end{align*}
which implies that $\mathcal{F}u\in B_{R}^{T}$ as $T>0$ is sufficiently small and $K$ is sufficiently large. By a similar calculation as showing (4.2), we get the continuity of $\mathcal{F}u$.
Given any $u,v\in B_{R}^{T}$, it follows from Lemma 3.1 that
\begin{align*}
\mathbb{E}\|\mathcal{F}u-\mathcal{F}v\|_{\dot{H}^{\nu}}^{2}&\leq 2\mathbb{E}\|\int_{0}^{t}\mathcal{S}_{\alpha}(t-s)[B(u)-B(v)]ds\|_{\dot{H}^{\nu}}^{2}+2\mathbb{E}\|\int_{0}^{t}\mathcal{S}_{\alpha}(t-s)[f(u)-f(v)]ds\|_{\dot{H}^{\nu}}^{2}\\
&\leq Ct^{2\alpha-1}\mathbb{E}(\int_{0}^{t}K^{2}\|u-v\|_{\dot{H}^{\nu}}^{2}ds)+Ct^{2\alpha-1}\mathbb{E}(\int_{0}^{t}\|u-v\|_{\dot{H}^{\nu}}^{2}ds),\tag{4.3}
\end{align*}
which further implies
\begin{align*}
\sup\limits_{t\in[0,T]}\mathbb{E}\|\mathcal{F}u-\mathcal{F}v\|_{\dot{H}^{\nu}}^{2}\leq C(T^{*})^{2\alpha}(1+K^{2})\sup\limits_{t\in[0,T]}\mathbb{E}\|u-v\|_{\dot{H}^{\nu}}^{2}.\tag{4.4}
\end{align*}
Next, let us take $T^{*}$ such that
\begin{align*}
0<C(T^{*})^{2\alpha}(1+K^{2})<1,
\end{align*}
and so that $\mathcal{F}$ is a strict contraction mapping on $B_{R}^{T}$. By the Banach fixed point theorem, there exist a unique fixed point $u\in L^{2}(\Omega,B_{R}^{T^{*}})$, which is a mild solution of (1.6). This completes the proof. $\square$
Our final main result is devoted to the H\"{o}lder regularity of the mild solution and is stated as follows.
\textbf{Theorem 4.2.} For $0\leq\nu<2$, $\frac{1}{4}<H<1$ and $0<\alpha<1$, there exists a unique mild solution $u(t)$ satisfying
\begin{align*}
\mathbb{E}\|u(t_{2})-u(t_{1})\|_{\dot{H}^{\nu}}^{2}< (t_{2}-t_{1})^{\beta}, ~0\leq t_{1}<t_{2}\leq T,
\end{align*}
where $\beta=\min\{\alpha\nu,(2-\nu)\alpha,2-(2-\nu)\alpha,(2-\nu)\alpha+4H-3,(2-\nu)\alpha+2H-1\}>0$.
\textbf{Proof.} From (2.7) we have
\begin{align*}
u(t_{2})-u(t_{1})&=E_{\alpha}(t_{2})u_{0}-E_{\alpha}(t_{1})u_{0}+\int_{0}^{t_{2}}\mathcal{S}_{\alpha}(t_{2}-s)B(u(s))ds-\int_{0}^{t_{1}}\mathcal{S}_{\alpha}(t_{1}-s)B(u(s))ds\\
&\hspace{2mm}+\int_{0}^{t_{2}}\mathcal{S}_{\alpha}(t_{2}-s)f(u(s))ds-\int_{0}^{t_{1}}\mathcal{S}_{\alpha}(t_{1}-s)f(u(s))ds+Z(t_{2})-Z(t_{1})\\
&=:J_{1}+J_{2}+J_{3}+J_{4}, \tag{4.5}
\end{align*}
where we define
\begin{align*}
J_{1}:=E_{\alpha}(t_{2})u_{0}-E_{\alpha}(t_{1})u_{0},~J_{4}:=Z(t_{2})-Z(t_{1}),
\end{align*}
and
\begin{align*}
J_{2}:&=\int_{0}^{t_{2}}\mathcal{S}_{\alpha}(t_{2}-s)B(u(s))ds-\int_{0}^{t_{1}}\mathcal{S}_{\alpha}(t_{1}-s)B(u(s))ds\\
&=\int_{0}^{t_{1}}[\mathcal{S}_{\alpha}(t_{2}-s)-\mathcal{S}_{\alpha}(t_{1}-s)]B(u(s))ds+\int_{t_{1}}^{t_{2}}\mathcal{S}_{\alpha}(t_{2}-s)B(u(s))ds\\
&=:J_{21}+J_{22},
\end{align*}
and
\begin{align*}
J_{3}&:=\int_{0}^{t_{2}}\mathcal{S}_{\alpha}(t_{2}-s)f(u(s))ds-\int_{0}^{t_{1}}\mathcal{S}_{\alpha}(t_{1}-s)f(u(s))ds\\
&=\int_{0}^{t_{1}}[\mathcal{S}_{\alpha}(t_{2}-s)-\mathcal{S}_{\alpha}(t_{1}-s)]f(u(s))ds+\int_{t_{1}}^{t_{2}}\mathcal{S}_{\alpha}(t_{2}-s)f(u(s))ds\\
&=:J_{31}+J_{32}.
\end{align*}
The application of Lemma 2.2 follows that
\begin{align*}
\mathbb{E}\|J_{1}\|_{\dot{H}^{\nu}}^{2}=\mathbb{E}\|E_{\alpha}(t_{2})u_{0}-E_{\alpha}(t_{1})u_{0}\|_{\dot{H}^{\nu}}^{2}\leq (t_{2}-t_{1})^{\alpha\nu}\mathbb{E}\|u_{0}\|^{2}. \tag{4.6}
\end{align*}
Applying the Lemma 3.1 and H\"{o}lder inequality, we get
\begin{align*}
\mathbb{E}\|J_{2}\|_{\dot{H}^{\nu}}^{2}&\leq 2\mathbb{E}\|J_{21}\|_{\dot{H}^{\nu}}^{2}+\mathbb{E}\|J_{22}\|_{\dot{H}^{\nu}}^{2}\\
&\leq2\mathbb{E}(\int_{0}^{t_{1}}\|\mathcal{S}_{\alpha}(t_{2}-s)-\mathcal{S}_{\alpha}(t_{1}-s)\|_{\dot{H}^{\nu}}^{2}ds)(\int_{0}^{t_{1}}\|B(u(s))\|^{2}ds)\\
&\hspace{2mm}+2\mathbb{E}(\int_{t_{1}}^{t_{2}}\|\mathcal{S}_{\alpha}(t_{2}-s)\|_{\dot{H}^{\nu}}^{2}ds)(\int_{t_{1}}^{t_{2}}\|B(u(s))\|^{2}ds)\\
&\leq CK^{4}T^{2}(t_{2}-t_{1})^{2-(2-\nu)\alpha}+CK^{4}(t_{2}-t_{1})^{(2-\nu)\alpha}. \tag{4.7}
\end{align*}
and
\begin{align*}
\mathbb{E}\|J_{3}\|_{\dot{H}^{\nu}}^{2}&\leq 2\mathbb{E}\|J_{31}\|_{\dot{H}^{\nu}}^{2}+\mathbb{E}\|J_{32}\|_{\dot{H}^{\nu}}^{2}\\
&\leq2\mathbb{E}(\int_{0}^{t_{1}}\|\mathcal{S}_{\alpha}(t_{2}-s)-\mathcal{S}_{\alpha}(t_{1}-s)\|_{\dot{H}^{\nu}}^{2}ds)(\int_{0}^{t_{1}}\|f(u(s))\|^{2}ds)\\
&\hspace{2mm}+2\mathbb{E}(\int_{t_{1}}^{t_{2}}\|\mathcal{S}_{\alpha}(t_{2}-s)\|_{\dot{H}^{\nu}}^{2}ds)(\int_{t_{1}}^{t_{2}}\|f(u(s))\|^{2}ds)\\
&\leq C(1+K^{2})T^{2}(t_{2}-t_{1})^{2-(2-\nu)\alpha}+C(1+K^{2})(t_{2}-t_{1})^{(2-\nu)\alpha}.\tag{4.8}
\end{align*}
By Theorem 3.2, we have
\begin{align*}
\mathbb{E}\|J_{4}\|_{\dot{H}^{\nu}}^{2}=\mathbb{E}\|Z(t_{2})-Z(t_{1})\|_{\dot{H}^{\nu}}^{2}\leq C(H,Q)(t_{2}-t_{1})^{\gamma}.\tag{4.9}
\end{align*}
Taking expectation on both side of (4.5) and combining (4.6)-(4.9), the proof of Theorem 4.2 is then completed. $\square$
\end{document} |
\begin{document}
{\theta}itle[A study on multiple zeta values]{A study on multiple zeta values from the viewpoint of zeta-functions of root systems}
{\delta}ate{}
\begin{abstract}
We study multiple zeta values (MZVs) from the viewpoint of zeta-functions associated with the root systems which we have studied in our previous papers. In fact, the $r$-ple zeta-functions of Euler-Zagier type can be regarded as the zeta-function associated with a certain sub-root system of type $C_r$. Hence, by the action of the Weyl group, we can find new aspects of MZVs which imply that the well-known
formula for MZVs given by Hoffman and Zagier coincides with Witten's volume formula
associated with the above sub-root system of type $C_r$. Also, from this observation,
we can prove some new formulas which especially include the parity results of double and triple zeta values. As another important application, we give certain refinement of restricted sum formulas, which gives restricted sum formulas among MZVs of an arbitrary depth $r$ which were previously known only in the cases of depth $2,3,4$.
Furthermore, considering a sub-root system of type $B_r$ analogously, we can give relevant analogues of the Hoffman-Zagier formula, parity results and restricted sum formulas.
\end{abstract}
\maketitle
\section{Introduction}{\lambda}bel{sec-1}
Let $\mathbb{N}$, $\mathbb{N}_0$, $\mathbb{Z}$, $\mathbb{Q}$, $\mathbb{R}$,
$\mathbb{C}$ be the set of positive integers, non-negative integers, rational integers,
rational numbers, real numbers, and complex numbers, respectively.
We define the Euler-Zagier $r$-ple zeta-function (simply called the Euler-Zagier sum) by
\begin{equation}
{\mathbb{Z}}eta_r(s_1,\ldots,s_r)=\sum_{0<n_1<{\mathbb{C}}dots<n_r}\frac{1}{n_1^{s_1}n_2^{s_2}
{\mathbb{C}}dots n_r^{s_r}},{\lambda}bel{e-1-1}
\end{equation}
where $s_1,\ldots,s_r$ are complex variables. When $(s_1,s_2,\ldots,s_r)\in \mathbb{N}^r$ $(s_r>1)$, this is called the multiple zeta value (MZV) of depth $r$ first studied by Hoffman {\mathbb{C}}ite{Hoff} and Zagier{\mathbb{C}}ite{Za}.
Though the opposite order of summation in the
definition of ${\mathbb{Z}}eta_{r}(s_1,\ldots,s_r)$ is also used recently, we use the order in \eqref{e-1-1} through this paper because it is natural in our study.
In the research of MZVs, the main target is to give non-trivial relations
among them, in order to investigate the structure of the algebra generated by them
(for the details, see Kaneko {\mathbb{C}}ite{Ka}).
In our previous papers {\mathbb{C}}ite{KMT}-{\mathbb{C}}ite{KM4} and {\mathbb{C}}ite{MT1}, as more general multiple series,
we defined and studied multi-variable zeta-functions associated with root systems of
type $X_r$ ($X=A,B,C,D,E,F,G$) denoted by
${\mathbb{Z}}eta_r(s_1,\ldots,s_n;X_r)$ where $n$ is the number of positive roots of type $X_r$
(see definition \eqref{def-zeta}). In particular when $s_1={\mathbb{C}}dots=s_r=s$,
${\mathbb{Z}}eta_r(s,\ldots,s;X_r)$ essentially coincides with the Witten zeta-function
(see Witten {\mathbb{C}}ite{Wi} and Zagier{\mathbb{C}}ite{Za}). An important fact is
\begin{equation}
{\mathbb{Z}}eta_r(2k,2k,\ldots,2k;X_r)\in \mathbb{Q}{\mathbb{C}}dot \pi^{2kn}{\mathbb{Q}}uad (k\in \mathbb{N}),
{\lambda}bel{Witten-VF}
\end{equation}
which is a consequence of Witten's volume formula given in {\mathbb{C}}ite{Wi}. Since we
considered multi-variable version of Witten zeta-function, we were able to determine
the rational coefficients in \eqref{Witten-VF} explicitly in a generalized form
(see {\mathbb{C}}ite[Thoerem 4.6]{KM3}).
Recently, in our previous paper {\mathbb{C}}ite{KMT-MZ}, we regarded MZVs as special values of
zeta-functions of root systems of type $A_r$, and clarified the structure of the
shuffle product
procedure for MZVs from this viewpoint. In fact, we showed that the shuffle product
procedure can be described in terms of partial fraction decompositions of
zeta-functions of root systems of type $A_r$.
The main idea in the present paper is to regard \eqref{e-1-1} as a specialization of
zeta-functions of root
systems of type $C_r$ (see below).
It is essential in our theory that $C_r$ is not simply-laced.
In fact, there exists a subset of the root system of
type $C_r$ so that the Euler-Zagier sum \eqref{e-1-1} is the zeta-function
associated with this subset (see Section {\mathbb{R}}ef{sec-4}).
This subset itself is a root system, and hence the Weyl group
naturally acts on \eqref{e-1-1}. General fundamental results will be stated in Section
{\mathbb{R}}ef{sec-3}, and their proofs will be given in Section {\mathbb{R}}ef{sec-proof1}.
As a consequence, it can be shown that a kind of formula \eqref{Witten-VF}
corresponding to this sub-root system implies the well-known result given by Hoffman {\mathbb{C}}ite[Section 2]{Hoff} and Zagier {\mathbb{C}}ite[Section 9]{Za} independently:
\begin{equation}
{\mathbb{Z}}eta_r(2k,2k,\ldots,2k)\in \mathbb{Q}{\mathbb{C}}dot \pi^{2kr}{\mathbb{Q}}uad (k\in \mathbb{N}){\lambda}bel{H-Z}
\end{equation}
(see Corollary {\mathbb{R}}ef{Cor-Z}).
Furthermore, based on this observation in the cases when $r=2,3$, we will give explicit
formulas for double series and for triple series (see Proposition {\mathbb{R}}ef{Pr-1} and
Theorem {\mathbb{R}}ef{T-5-1}) which include what is called the parity results for double and
triple zeta values (see Corollary {\mathbb{R}}ef{C-5-3}).
Similarly we can consider analogues of those results corresponding to a sub-root system of type $B_r$. In fact, we can define a $B_r$-type analogue of ${\mathbb{Z}}eta_{r}({\bf s})$ by
\begin{align}
& {\mathbb{Z}}eta_{r}^\sharp({\bf s}) =\sum_{m_1,\ldots,m_r=1}^\infty
\prod_{i=1}^{r}
\frac{1}{(2\sum_{j=r-i+1}^{r-1}m_j+m_r)^{{s_i}}}, {\lambda}bel{def-Br-Zeta}
\end{align}
which is a ``partial sum'' of the series of ${\mathbb{Z}}eta_{r}({\bf s})$ (see Section {\mathbb{R}}ef{sec-6}). From the viewpoint of root systems, we see that this has some properties similar to those of ${\mathbb{Z}}eta_{r}({\bf s})$, because the root system of type $B_r$ is a dual of that of type $C_r$. Actually we can obtain an analogue of \eqref{H-Z} for this series (see Corollary {\mathbb{R}}ef{C-6-2}). We also prove a formula between the values of ${\mathbb{Z}}eta_{2}^\sharp({\bf s})$ and the Riemann zeta values (see Theorem {\mathbb{R}}ef{T-B2-EZ}), which gives the parity result corresponding to type $B_r$ (see Theorem {\mathbb{R}}ef{T-B2-EZ}). This result plays an important role in a recent study on the dimension of the linear space spanned by double zeta values of level $2$ given by Kaneko and Tasaka (see {\mathbb{C}}ite{Ka-Ta}).
The fact that parity results hold in those classes implies that those are ``nice''
classes. In Section {\mathbb{R}}ef{sec-acs} we will study those classes from the analytic
point of view, and prove that those classes, as well as the subclass of
zeta-functions of root systems of type $A_r$ introduced in {\mathbb{C}}ite{KMT-MZ},
are ``closed'' in a certain analytic sense.
Another important consequence of our fundamental theorem in Section {\mathbb{R}}ef{sec-3} is
the ``refined restricted sum formulas'' for the values of ${\mathbb{Z}}eta_{r}({\bf s})$
and ${\mathbb{Z}}eta_{r}^\sharp({\bf s})$, which are embodied in Corollaries {\mathbb{R}}ef{Cor-Cr-Sr}
and {\mathbb{R}}ef{Cor-Br-Sr}.
One of the famous formulas among MZVs is the sum formula, which is, in the case of
double zeta values, written as
\begin{equation}
\sum_{j=2}^{K-1}{\mathbb{Z}}eta_2(K-j,j)={\mathbb{Z}}eta(K){\mathbb{Q}}uad (K\in \mathbb{Z}_{\geq 3}). {\lambda}bel{sumformula}
\end{equation}
Gangl, Kaneko and Zagier {\mathbb{C}}ite{GKZ} obtained the following formulas, which
``divide'' \eqref{sumformula} for even $K$ into two parts:
\begin{equation}
\begin{split}
& \sum_{a,b \in \mathbb{N}\atop a+b=N} {\mathbb{Z}}eta_2(2a,2b)=\frac{3}{4}{\mathbb{Z}}eta(2N)\in \mathbb{Q}{\mathbb{C}}dot \pi^{2N}{\mathbb{Q}}uad (N\in \mathbb{Z}_{\geq 2}), \\
& \sum_{a,b \in \mathbb{N}\atop a+b=N} {\mathbb{Z}}eta_2(2a-1,2b+1)=\frac{1}{4}{\mathbb{Z}}eta(2N)\in \mathbb{Q}{\mathbb{C}}dot \pi^{2N}{\mathbb{Q}}uad (N\in \mathbb{Z}_{\geq 2}),
\end{split}
{\lambda}bel{F-GKZ}
\end{equation}
which are sometimes called the restricted sum formulas.
More recently, Shen and Cai {\mathbb{C}}ite{Shen-Cai} gave restricted sum formulas for triple and fourth zeta values (see \eqref{sumf-triple} and \eqref{sumf-fourth}).
As we will discuss in Section {\mathbb{R}}ef{sumf},
our Corollaries {\mathbb{R}}ef{Cor-Cr-Sr} and {\mathbb{R}}ef{Cor-Br-Sr} give more refined restricted
sum formulas for ${\mathbb{Z}}eta_r({\bf s})$ and for ${\mathbb{Z}}eta_r^\sharp({\bf s})$ of an
arbitrary depth $r$. From these refined formulas we can deduce the restricted
sum formulas for an arbitrary depth $r$,
actually in a generalized form involving a parameter $d$
(see Theorems {\mathbb{R}}ef{sumf-EZ-Cr} and {\mathbb{R}}ef{sumf-EZ-Br}).
A part of the results in the present paper has been announced in {\mathbb{C}}ite{KMT-PJA}.
\section{Zeta-functions of root systems and root sets}{\lambda}bel{sec-2}
In this section, we recall the definition of zeta-functions of root systems studied in our papers {\mathbb{C}}ite{KMT}-{\mathbb{C}}ite{KM3}.
For the details of basic facts about root systems and Weyl groups, see
{\mathbb{C}}ite{Bourbaki,Hum72,Hum}.
Let $V$ be an $r$-dimensional real vector space equipped with an inner product ${\lambda}ngle {\mathbb{C}}dot,{\mathbb{C}}dot{\mathbb{R}}angle$.
The dual space $V^*$ is identified with $V$ via the inner product of $V$.
Let $\Delta$ be a finite irreducible reduced root system, and
$\Psi=\{\alpha_1,\ldots,\alpha_r\}$ its fundamental system.
We fix
$\Delta_+$ and $\Delta_-$ as the set of all positive roots and negative roots respectively.
Then we have a decomposition of the root system $\Delta=\Delta_+{\mathbb{C}}oprod\Delta_-$.
Let $Q=Q(\Delta)$ be the root lattice, $Q^\vee$ the coroot lattice,
$P=P(\Delta)$ the weight lattice, $P^\vee$ the coweight lattice,
and
$P_{++}$ the set of integral strongly dominant weights
respectively defined by
\begin{align*}
& Q=\bigoplus_{i=1}^r\mathbb{Z}\,\alpha_i,{\mathbb{Q}}quad
Q^\vee=\bigoplus_{i=1}^r\mathbb{Z}\,\alpha^\vee_i,\\
& P=\bigoplus_{i=1}^r\mathbb{Z}\,{\lambda}mbda_i, {\mathbb{Q}}quad
P^\vee=\bigoplus_{i=1}^r\mathbb{Z}\,{\lambda}mbda^\vee_i,{\mathbb{Q}}quad P_{++}=\bigoplus_{i=1}^r\mathbb{N}\,{\lambda}mbda_i,
\end{align*}
where the fundamental weights $\{{\lambda}mbda_j\}_{j=1}^r$
and
the fundamental coweights $\{{\lambda}mbda_j^\vee\}_{j=1}^r$
are the dual bases of $\Psi^\vee$ and $\Psi$
satisfying ${\lambda}ngle \alpha_i^\vee,{\lambda}mbda_j{\mathbb{R}}angle={\delta}elta_{ij}$ (Kronecker's delta)
and ${\lambda}ngle {\lambda}mbda_i^\vee,\alpha_j{\mathbb{R}}angle={\delta}elta_{ij}$ respectively.
Let $\sigma_\alpha :V{\theta}o V$ be the reflection with respect to a root $\alpha\in\Delta$ defined by
$$\sigma_\alpha:v\mapsto v-{\lambda}ngle \alpha^\vee,v{\mathbb{R}}angle\alpha.$$
For a subset $A\subset\Delta$, let
$W(A)$ be the group generated by reflections $\sigma_\alpha$ for all $\alpha\in A$. In particular, $W=W(\Delta)$ is the Weyl group, and
$\{\sigma_j:=\sigma_{\alpha_j}\,|\,1\leq j \leq r\}$ generates $W$.
For $w\in W$, denote
$\Delta_w=\Delta_+{\mathbb{C}}ap w^{-1}\Delta_-$.
The zeta-function associated with $\Delta$ is defined by
\begin{equation}
{\mathbb{Z}}eta_r(\mathbf{s},\mathbf{y};\Delta)
=
\sum_{{\lambda}mbda\in P_{++}}
e^{2\pi i{\lambda}ngle\mathbf{y},{\lambda}mbda{\mathbb{R}}angle}\prod_{\alpha\in\Delta_+}
\frac{1}{{\lambda}ngle\alpha^\vee,{\lambda}mbda{\mathbb{R}}angle^{s_\alpha}}, {\lambda}bel{def-zeta}
\end{equation}
where $\mathbf{s}=(s_{\alpha})_{\alpha\in\Delta_+}\in \mathbb{C}^{|\Delta_+|}$
and $\mathbf{y}\in V$.
This can be regarded as a
multi-variable version of Witten zeta-functions formulated by Zagier {\mathbb{C}}ite{Za}
based on the work of Witten {\mathbb{C}}ite{Wi}.
Let $\Delta^*$ be a subset of $\Delta_+$. We call $\Delta^*$ a
{\it root set} (or a {\it root subset} of $\Delta_+$)
if, for any ${\lambda}mbda_j$ ($1\leq j\leq r$), there exists an element
$\alpha\in\Delta^*$ for which ${\lambda}ngle\alpha,{\lambda}mbda_j {\mathbb{R}}angle{\mathbb{N}}eq 0$ holds.
We define the zeta-function associated with a root set $\Delta^*$ by
\begin{equation}
{\mathbb{Z}}eta_r(\mathbf{s},\mathbf{y};\Delta^*)
=
\sum_{{\lambda}mbda\in P_{++}}
e^{2\pi i{\lambda}ngle\mathbf{y},{\lambda}mbda{\mathbb{R}}angle}\prod_{\alpha\in\Delta^*}
\frac{1}{{\lambda}ngle\alpha^\vee,{\lambda}mbda{\mathbb{R}}angle^{s_\alpha}}. {\lambda}bel{def-zeta-root-set}
\end{equation}
In the case $\mathbf{y}=\mathbf{0}$, this zeta-function was introduced in {\mathbb{C}}ite{KM2}.
When the root system is of type $X_r$, we write $\Delta=\Delta(X_r)$,
$\Delta^*=\Delta^*(X_r)$, and so on.
\begin{remark}
The notion of ${\mathbb{Z}}eta_r(\mathbf{s},\mathbf{y};\Delta^*)$ depends not only on
$\Delta^*$, but also on $\Delta_+$, because the summation on
\eqref{def-zeta-root-set} runs over all strongly dominant weights associated
with $\Delta_+$.
\end{remark}
\section{Fundamental formulas}{\lambda}bel{sec-3}
In this section, we state several fundamental formulas which are certain extensions
of our previous results given in {\mathbb{C}}ite{KM2,KM5,KM3}.
Proofs of theorems stated in this section will be given in Section {\mathbb{R}}ef{sec-proof1}.
Let
$\mathscr{V}$ be the set of all bases $\mathbf{V}\subset\Delta_+$.
Let
$\mathbf{V}^*=\{\mu_\beta^{\mathbf{V}}\}_{\beta\in\mathbf{V}}$ be the dual basis of $\mathbf{V}^\vee=\{\beta^\vee\}_{\beta\in\mathbf{V}}$.
Let
$L(\mathbf{V}^\vee)
=\bigoplus_{\beta\in\mathbf{V}}\mathbb{Z}\beta^\vee$. Then
we have $\abs{Q^\vee/L(\mathbf{V}^\vee)}<\infty$.
Fix $\phi\in V$ such that ${\lambda}ngle\phi,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle{\mathbb{N}}eq 0$ for all $\mathbf{V}\in\mathscr{V}$ and $\beta\in\mathbf{V}$.
If the root system $\Delta$ is of $A_1$ type, then we choose $\phi=\alpha_1^\vee$.
We define a multiple generalization of the fractional part as
\begin{equation*}
\{\mathbf{y}\}_{\mathbf{V},\beta}=
\begin{cases}
\{{\lambda}ngle\mathbf{y},\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle\}{\mathbb{Q}}uad&({\lambda}ngle\phi,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle>0),\\
1-\{-{\lambda}ngle\mathbf{y},\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle\}&({\lambda}ngle\phi,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle<0),
\end{cases}
\end{equation*}
where the notation $\{x\}$ on the right-hand sides stands for the usual fractional
part of $x\in\mathbb{R}$.
Let $\mathbf{T}=\{t\in\mathbb{C}\;|\;|t|<2\pi\}^{|\Delta_+|}$.
\begin{definition}
{\lambda}bel{thm:exp_F}
For $\mathbf{t}=(t_{\alpha})_{\alpha\in\Delta_+}\in\mathbf{T}$ and
$\mathbf{y}\in V$,
we define
\begin{equation}{\lambda}bel{def-F}
\begin{split}
F&(\mathbf{t},\mathbf{y};\Delta)=
\sum_{\mathbf{V}\in\mathscr{V}}
\Bigl(
\prod_{\gamma\in\Delta_+\setminus\mathbf{V}}
\frac{t_\gamma}
{t_\gamma-\sum_{\beta\in\mathbf{V}}t_\beta{\lambda}ngle\gamma^\vee,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle}
\Bigr)
\\
&{\theta}imes
\frac{1}{\abs{Q^\vee/L(\mathbf{V}^\vee)}}
\sum_{q\in Q^\vee/L(\mathbf{V}^\vee)}
\Bigl(
\prod_{\beta\in\mathbf{V}}\frac{t_\beta\exp
(t_\beta\{\mathbf{y}+q\}_{\mathbf{V},\beta})}{e^{t_\beta}-1}
\Bigr)
\\
&=
\sum_{\mathbf{k}\in\mathbb{N}_{0}^{|\Delta_+|}}
\mathcal{P}(\mathbf{k},\mathbf{y};\Delta)
\prod_{\alpha\in\Delta_+} \frac{t_\alpha^{k_\alpha}}{k_\alpha!}
\end{split}
\end{equation}
which is independent of choice of $\phi$.
\end{definition}
\begin{remark}
In {\mathbb{C}}ite{KM5}, $F(\mathbf{t},\mathbf{y};\Delta)$ is defined in a different way.
The above is {\mathbb{C}}ite[Theorem 4.1]{KM5}. In particular when $\Delta=\Delta(A_1)$, we see that
$$F(\mathbf{t},\mathbf{y};\Delta(A_1))=\frac{te^{t\{y\}}}{e^t-1},$$
which is the generating function of ordinary Bernoulli periodic functions $\{B_k(\{y\})\}$.
\end{remark}
Let
\begin{equation}
{\lambda}bel{eq:def_S}
S(\mathbf{s},\mathbf{y};\Delta)
=\sum_{{\lambda}mbda\in P\setminus H_{\Delta^\vee}}
e^{2\pi i{\lambda}ngle \mathbf{y},{\lambda}mbda{\mathbb{R}}angle}
\prod_{\alpha\in\Delta_+}
\frac{1}{{\lambda}ngle\alpha^\vee,{\lambda}mbda{\mathbb{R}}angle^{s_\alpha}},
\end{equation}
where $H_{\Delta^\vee}=\{v\in V~|~{\lambda}ngle \alpha^\vee,v{\mathbb{R}}angle=0{\theta}ext{
for some }\alpha\in\Delta\}$ is the set of all walls of Weyl
chambers. For $\mathbf{s}\in\mathbb{C}^{|\Delta_+|}$, we define
$(w\mathbf{s})_{\alpha}=s_{w^{-1}\alpha}$,
where if $w^{-1}\alpha\in\Delta_-$ we use the convention $s_{-\alpha}=s_\alpha$.
\begin{prop}[{{\mathbb{C}}ite[Theorem 4.4]{KM3},{\mathbb{C}}ite[Proposition 3.2]{KM5}}]
{\lambda}bel{prop:ZP}
\begin{equation}
{\lambda}bel{eq:formula1}
\begin{split}
S(\mathbf{k},\mathbf{y};\Delta)
&=
\sum_{w\in W}
\Bigl(\prod_{\alpha\in\Delta_+{\mathbb{C}}ap w\Delta_-}
(-1)^{k_{\alpha}}\Bigr)
{\mathbb{Z}}eta_r(w^{-1}\mathbf{k},w^{-1}\mathbf{y};\Delta)\\
&=(-1)^{\abs{\Delta_+}}
\mathcal{P}(\mathbf{k},\mathbf{y};\Delta)
\biggl(\prod_{\alpha\in\Delta_+}
\frac{(2\pi i)^{k_\alpha}}{k_\alpha!}\biggr)
\end{split}
\end{equation}
for $k_\alpha\in\mathbb{Z}_{\geq2}$ ($\alpha\in\Delta_+$).
\end{prop}
\begin{remark}
It should be noted that
the formula \eqref{eq:formula1} holds in the cases
$k_\alpha=1$ for some $\alpha\in\Delta_+$, while it does not hold in the cases
$k_\alpha=0$ for any $\alpha\in\Delta_+$.
\end{remark}
For $\mathbf{v}\in V$, and a differentiable function $f$ on $V$,
let
\begin{equation*}
(\partial_{\mathbf{v}}f)(\mathbf{y})=\lim_{h{\theta}o 0}\frac{f(\mathbf{y}+h\mathbf{v})-f(\mathbf{y})}{h}
\end{equation*}
and for $\alpha\in\Delta_+$,
\begin{equation*}
\mathfrak{D}_\alpha=
\frac{\partial}{\partial t_\alpha}
\biggr{\mathbb{R}}vert_{t_\alpha=0}\partial_{\alpha^\vee}.
\end{equation*}
Let $\Delta^*\subset\Delta_+$ be a root set and
let $A=\Delta_+\setminus\Delta^*=\{{\mathbb{N}}u_1,\ldots,{\mathbb{N}}u_N\}\subset\Delta_+$, and define
\begin{equation*}
\mathfrak{D}_A=\mathfrak{D}_{{\mathbb{N}}u_N}{\mathbb{C}}dots \mathfrak{D}_{{\mathbb{N}}u_1}.
\end{equation*}
Similarly we define
\begin{gather}
\mathfrak{D}_{\alpha,2}=
\frac{1}{2}\frac{\partial^2}{\partial t_\alpha^2}
\biggr{\mathbb{R}}vert_{t_\alpha=0}\partial^2_{\alpha^\vee},\\
\mathfrak{D}_{A,2}=\mathfrak{D}_{{\mathbb{N}}u_N,2}{\mathbb{C}}dots \mathfrak{D}_{{\mathbb{N}}u_1,2}.
\end{gather}
Further, let $A_j=\{{\mathbb{N}}u_1,\ldots,{\mathbb{N}}u_j\}$ ($1\leq j\leq N-1$), $A_0=\emptyset$, and
\begin{equation*}
\mathscr{V}_A=\{\mathbf{V}\in\mathscr{V}~|~
{\theta}ext{${\mathbb{N}}u_{j+1}{\mathbb{N}}otin{{\mathbb{R}}m L.h.}[\mathbf{V}{\mathbb{C}}ap A_j]\;(0\leq j \leq N-1)$} \},
\end{equation*}
where ${{\mathbb{R}}m L.h.}[\;{\mathbb{C}}dot\;]$ denotes the linear hull (linear span).
Let $\mathscr{R}$ be the set of all linearly independent subsets
$\mathbf{R}=\{\beta_1,\ldots,\beta_{r-1}\}\subset\Delta$
and
\begin{equation}
{\lambda}bel{eq:def_H_R}
\mathfrak{H}_{\mathscr{R}}:=
\bigcup_{\substack{\mathbf{R}\in\mathscr{R}\{\mathbb{Q}}\in Q^\vee}}({{\mathbb{R}}m L.h.}
[\mathbf{R}^\vee]+q).
\end{equation}
\begin{remark}{\lambda}bel{tsuika}
It is to be noted that $\mathbf{y}\in\mathfrak{H}_{\mathscr{R}}$ if and only if
${\lambda}ngle\mathbf{y}+q,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle\in\mathbb{Z}$ for some
$\mathbf{V}\in\mathscr{V},\mathbf{\beta}\in\mathbf{V},q\in Q^\vee$.
In fact, if $\mathbf{y}\in\mathfrak{H}_{\mathscr{R}}$ then we can write
$\mathbf{y}=\sum_{j=1}^{r-1}a_j \beta_j^{\vee}+q$ ($a_j\in\mathbb{R}$).
We can find an element $\beta_r\in\Delta$ such that
$\mathbf{V}=\{\beta_1,\ldots,\beta_r\}\in\mathscr{V}$.
Then ${\lambda}ngle\mathbf{y}-q,\mu_{\beta_r}^{\mathbf{V}}{\mathbb{R}}angle=0\in\mathbb{Z}$.
Conversely, assume ${\lambda}ngle\mathbf{y}+q,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle=c\in\mathbb{Z}$.
Write $\mathbf{V}=\{\beta_1,\ldots,\beta_{r-1},\beta\}$. Since this is a basis, we
may write $\mathbf{y}+q=\sum_{j=1}^{r-1}a_j \beta_j^{\vee}+a\beta^{\vee}$ with
$a_j,a\in\mathbb{R}$. Then
$c={\lambda}ngle\mathbf{y}+q,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle=a$, especially $a\in\mathbb{Z}$.
Therefore $a\beta^{\vee}-q\in Q^{\vee}$, which implies
$\mathbf{y}\in \mathfrak{H}_{\mathscr{R}}$.
\end{remark}
\begin{definition}
For $\Delta_+\setminus \Delta^*=A=\{{\mathbb{N}}u_1,\ldots,{\mathbb{N}}u_N\}\subset\Delta_+$, $\mathbf{t}_{\Delta^*}=\{t_\alpha\}_{\alpha\in\Delta^*}$ and $\mathbf{y}\in V$,
we define
\begin{equation}
\begin{split}
F_{\Delta^*}(\mathbf{t}_{\Delta^*},\mathbf{y};\Delta)
&=
\sum_{\mathbf{V}\in\mathscr{V}_A}
(-1)^{\abs{A\setminus\mathbf{V}}}\\
& {\theta}imes\Bigl(
\prod_{\gamma\in\Delta_+\setminus(\mathbf{V}{\mathbb{C}}up A)}
\frac{t_\gamma}
{t_\gamma-\sum_{\beta\in\mathbf{V}\setminus A}t_\beta{\lambda}ngle\gamma^\vee,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle}
\Bigr)
\\
&{\theta}imes
\frac{1}{\abs{Q^\vee/L(\mathbf{V}^\vee)}}
\sum_{q\in Q^\vee/L(\mathbf{V}^\vee)}
\Bigl(
\prod_{\beta\in\mathbf{V}\setminus A}\frac{t_\beta\exp
(t_\beta\{\mathbf{y}+q\}_{\mathbf{V},\beta})}{e^{t_\beta}-1}
\Bigr).
\end{split}
\end{equation}
\end{definition}
\begin{thrm}
{\lambda}bel{thm:main0}
For $\Delta_+\setminus \Delta^*=A=\{{\mathbb{N}}u_1,\ldots,{\mathbb{N}}u_N\}\subset\Delta_+$, $\mathbf{t}_{\Delta^*}=\{t_\alpha\}_{\alpha\in\Delta^*}$ and $\mathbf{y}\in V\setminus\mathfrak{H}_{\mathscr{R}}$.
we have
\begin{equation}
{\lambda}bel{eq:main0}
\bigl(\mathfrak{D}_A F\bigr) (\mathbf{t}_{\Delta^*},\mathbf{y};\Delta)=
\bigl(\mathfrak{D}_{A,2} F\bigr) (\mathbf{t}_{\Delta^*},\mathbf{y};\Delta)=
F_{\Delta^*}(\mathbf{t}_{\Delta^*},\mathbf{y};\Delta),
\end{equation}
and hence is independent of choice of the order of $A$.
The function $F_{\Delta^*}(\mathbf{t}_{\Delta^*},\mathbf{y};\Delta)$
is the continuous extension
of $\bigl(\mathfrak{D}_A F\bigr) (\mathbf{t}_{\Delta^*},\mathbf{y};\Delta)$
in $\mathbf{y}$ in the sense that
$\bigl(\mathfrak{D}_A F\bigr) (\mathbf{t}_{\Delta^*},\mathbf{y}+c\phi;\Delta)$
tends continuously to $F_{\Delta^*}(\mathbf{t}_{\Delta^*},\mathbf{y};\Delta)$
when $c{\theta}o 0+$,
and is holomorphic with respect to $\mathbf{t}_{\Delta^*}$ around the origin.
\end{thrm}
\begin{definition}
For $\Delta^*\subset\Delta_+$ and $\mathbf{t}_{\Delta^*}=\{t_\alpha\}_{\alpha\in\Delta^*}$, we define $\mathcal{P}_{\Delta^*}(\mathbf{k}_{\Delta^*},\mathbf{y};\Delta)$ by
\begin{align*}
&
F_{\Delta^*}(\mathbf{t}_{\Delta^*},\mathbf{y};\Delta)
\\
&=
\sum_{\mathbf{k}_{\Delta^*}\in \mathbb{N}_{0}^{\abs{\Delta^*}}}\mathcal{P}_{\Delta^*}(\mathbf{k}_{\Delta^*},\mathbf{y};\Delta)
\prod_{\alpha\in\Delta^*}
\frac{t_{\alpha}^{k_\alpha}}{k_\alpha!}.
\end{align*}
\end{definition}
\begin{thrm}
{\lambda}bel{thm:main1}
For
$\mathbf{s}=\mathbf{k}=(k_\alpha)_{\alpha\in\Delta_+}$ with
$k_\alpha\in\mathbb{Z}_{\geq2}$ ($\alpha\in \Delta^*$),
$k_\alpha=0$ ($\alpha\in \Delta_+\setminus \Delta^*$),
we have
\begin{align}
&\sum_{w\in W}
\Bigl(\prod_{\alpha\in\Delta_+{\mathbb{C}}ap w\Delta_-}
(-1)^{k_{\alpha}}\Bigr)
{\mathbb{Z}}eta_r(w^{-1}\mathbf{k},w^{-1}\mathbf{y};\Delta) {\lambda}bel{Th-main}\\
& =(-1)^{\abs{\Delta_+}}
\mathcal{P}_{\Delta^*}(\mathbf{k}_{\Delta^*},\mathbf{y};\Delta)
\biggl(\prod_{\alpha\in\Delta^*}
\frac{(2\pi i)^{k_\alpha}}{k_\alpha!}\biggr)
{\mathbb{N}}otag
\end{align}
provided all the series on the left-hand side absolutely converge.
\end{thrm}
Assume that $\Delta$ is not simply-laced. Then we have the disjoint
union $\Delta=\Delta_l{\mathbb{C}}up\Delta_s$, where $\Delta_l$ is the set of
all long roots and $\Delta_s$ is the set of all short roots.
Note that if there is an odd $k_i$, then both hand sides vanish in \eqref{Th-main}. On the other hand, when all $k_i's$ are even,
by applying Theorem {\mathbb{R}}ef{thm:main1} to $\Delta^*=\Delta_l$ or $\Delta_s$, we obtain
the following theorem immediately, which is a generalization of the explicit
volume formula proved in {\mathbb{C}}ite[Theorem 4.6]{KM3}.
\begin{thrm}
{\lambda}bel{thm:main2}
Let $\Delta_1=\Delta_l$ (resp.~$\Delta_s$), $\Delta_2=\Delta_s$ (resp.~$\Delta_l$),
and $\Delta_{j+}=\Delta_j{\mathbb{C}}ap\Delta_+$ ($j=1,2$).
Then $\Delta_{j+}$ ($j=1,2$) is a root subset of $\Delta_+$.
For
$\mathbf{s}_1=\mathbf{k}_1=(k_\alpha)_{\alpha\in\Delta_{1+}}$ with
$k_\alpha=k\in 2\mathbb{N}$ (for all $\alpha\in\Delta_{1+}$)
and ${\mathbb{N}}u\in P^\vee/Q^\vee$, we have
\begin{align}
&{\mathbb{Z}}eta_r(\mathbf{k}_1,{\mathbb{N}}u;\Delta_{1+}) =\frac{(-1)^{\abs{\Delta_+}}}{|W|}
\mathcal{P}_{\Delta_{1+}}(\mathbf{k}_1,{\mathbb{N}}u;\Delta)
\biggl(\prod_{\alpha\in\Delta_{1+}}
\frac{(2\pi i)^{k_\alpha}}{k_\alpha!}\biggr).{\lambda}bel{main2}
\end{align}
\end{thrm}
\begin{remark}
Let $\mathbf{s}=\mathbf{k}=(k_\alpha)_{\alpha\in\Delta_{+}}$ with
$k_{\alpha}=k\in 2\mathbb{N}$ ($\alpha\in\Delta_{1+}$) and
$k_\alpha=0$ ($\alpha\in\Delta_{2+}$). Then obviously
${\mathbb{Z}}eta_r(\mathbf{k}_1,{\mathbb{N}}u;\Delta_{1+})={\mathbb{Z}}eta_r(\mathbf{k},{\mathbb{N}}u;\Delta)$.
Our proof of Theorem {\mathbb{R}}ef{thm:main2} is actually based on the latter viewpoint.
\end{remark}
\section{Multiple zeta values and zeta-functions of root system of type $C_r$}
{\lambda}bel{sec-4}
Now we study MZVs from the viewpoint of zeta-functions of root systems of type $C_r$.
For $\Delta=\Delta(C_r)$, we have the disjoint union $\Delta_+^\vee=(\Delta_{l+})^\vee{\mathbb{C}}up(\Delta_{s+})^\vee$,
where $\Delta_{l+}=\Delta_{l+}(C_r)=\Delta_l(C_r){\mathbb{C}}ap\Delta_+(C_r)$,
$\Delta_{s+}=\Delta_{s+}(C_r)=\Delta_s(C_r){\mathbb{C}}ap\Delta_+(C_r)$, and
\begin{align*}
(\Delta_{l+})^\vee&
=\{
\alpha_r^\vee,\
\alpha_{r-1}^\vee+\alpha_r^\vee,\
\alpha_{r-2}^\vee+\alpha_{r-1}^\vee+\alpha_r^\vee,\
\ldots,
\alpha_1^\vee+{\mathbb{C}}dots+\alpha_r^\vee
\}.
\end{align*}
Since $P^\vee/Q^\vee=\{\mathbf{0},{\lambda}mbda_r^\vee\}$,
Therefore, for $\mathbf{s}_l=(s_{\alpha})_{\alpha\in\Delta_{l+}}$, we have
\begin{align*}
{\mathbb{Z}}eta_r(\mathbf{s}_l,\mathbf{0};\Delta_{l+}(C_r))&=\sum_{m_1,\ldots,m_r=1}^\infty
\prod_{i=1}^{r}
\frac{1}{(\sum_{j=r-i+1}^{r-1}m_j+m_r)^{{s_i}}},\\
{\mathbb{Z}}eta_r(\mathbf{s}_l,{\lambda}mbda_r^\vee;\Delta_{l+}(C_r))&=\sum_{m_1,\ldots,m_r=1}^\infty
\prod_{i=1}^{r}
\frac{(-1)^{m_r}}{(\sum_{j=r-i+1}^{r-1}m_j+m_r)^{{s_i}}},
\end{align*}
where the first equation is exactly the Euler-Zagier sum ${\mathbb{Z}}eta_{r}(s_1,\ldots,s_r)$ (see \eqref{e-1-1}).
In order to apply Theorems {\mathbb{R}}ef{thm:main1} and {\mathbb{R}}ef{thm:main2} to MZVs,
we rewrite the root system of type $C_r$ in terms of standard orthonormal basis $\{e_1,\ldots,e_r\}$.
We put $\alpha_i^\vee=e_{i}-e_{i+1}$ for $1\leq i\leq r-1$ and $\alpha_r^\vee=e_r$.
Then we have
\begin{equation*}
(\Delta_{l+})^\vee
=\{
\alpha_r^\vee=e_r,\
\alpha_{r-1}^\vee+\alpha_r^\vee=e_{r-1},\
\alpha_{r-2}^\vee+\alpha_{r-1}^\vee+\alpha_r^\vee=e_{r-2},\
\ldots,
\alpha_1^\vee+{\mathbb{C}}dots+\alpha_r^\vee=e_1
\}.
\end{equation*}
In this realization, we see that
$W(C_r)=(\mathbb{Z}/2\mathbb{Z})^r{\mathbb{R}}times \mathfrak{S}_r$, where
$\mathfrak{S}_r$ is the symmetric group of degree $r$ which permutes
bases, and the $j$-th $\mathbb{Z}/2\mathbb{Z}$ flips the sign of
$e_j$. Since the sign flips act trivially on the variables
$\mathbf{s}_l$, from Theorem {\mathbb{R}}ef{thm:main1} we obtain
the following formulas. These are the ``refined restricted sum formulas'' for
${\mathbb{Z}}eta_r(\mathbf{s})$, which we will discuss in Section {\mathbb{R}}ef{sumf}.
\begin{cor} {\lambda}bel{Cor-Cr-Sr}
Let $\Delta=\Delta(C_r)$. For $(2{\bf k})_l=(2k_{\alpha})_{\alpha\in\Delta_{l+}}=(2k_1,\ldots,2k_r)\in \left(2\mathbb{N}{\mathbb{R}}ight)^r$ and
$\mathbf{y}={\mathbb{N}}u\in P^\vee/Q^\vee$,
\begin{equation} {\lambda}bel{EZ-Sr-1}
\sum_{\sigma\in\mathfrak{S}_r}
{\mathbb{Z}}eta_r(\sigma^{-1}(2\mathbf{k})_l,{\mathbb{N}}u;\Delta_{l+})
=\frac{(-1)^{r}}{2^r}\mathcal{P}_{\Delta_{l+}}
((2\mathbf{k})_l,{\mathbb{N}}u;\Delta)
\prod_{j=1}^{r}\frac{(2\pi i)^{2k_j}}{(2k_j)!} \in\mathbb{Q}{\mathbb{C}}dot \pi^{2\sum_{j=1}^{r}k_j}.
\end{equation}
In particular when ${\mathbb{N}}u={\bf 0}$,
\begin{equation} {\lambda}bel{EZ-Sr-11}
\sum_{\sigma\in\mathfrak{S}_r}
{\mathbb{Z}}eta_r(2k_{\sigma^{-1}(1)},\ldots,2k_{\sigma^{-1}(r)})
=\frac{(-1)^{r}}{2^r}\mathcal{P}_{\Delta_{l+}}
((2\mathbf{k})_l,{\bf 0};\Delta)
\prod_{j=1}^{r}\frac{(2\pi i)^{2k_j}}{(2k_j)!} \in\mathbb{Q}{\mathbb{C}}dot \pi^{2\sum_{j=1}^{r}k_j}.
\end{equation}
\end{cor}
Also Theorem {\mathbb{R}}ef{thm:main2} in the case of type $C_r$ immediately gives the following.
\begin{cor}{\lambda}bel{Cor-Z}
Let $\Delta=\Delta(C_r)$. For $({\bf 2k})_l=(2k,\ldots,2k)$ with any $k\in \mathbb{N}$,
\begin{align}
& {\mathbb{Z}}eta_{r}(2k,2k,\ldots,2k)=\frac{(-1)^{r}}{2^r r!}\mathcal{P}_{\Delta_{l+}}
((\mathbf{2k})_l,{\bf 0};\Delta)
\frac{(2\pi i)^{2kr}}{\{ (2k)! \}^r}
\in\mathbb{Q}{\mathbb{C}}dot \pi^{2kr}. {\lambda}bel{Zagier-F2}
\end{align}
\end{cor}
\begin{remark}{\lambda}bel{Rem-Hof}
The fact
$$\sum_{\sigma\in\mathfrak{S}_r} {\mathbb{Z}}eta_r(2k_{\sigma^{-1}(1)},\ldots,2k_{\sigma^{-1}(r)})\in\mathbb{Q}{\mathbb{C}}dot \pi^{2\sum_{j=1}^{r}k_j}$$
was first proved by Hoffman {\mathbb{C}}ite[Theorem 2.2]{Hoff}. This gives the well-known result
$${\mathbb{Z}}eta_{r}(2k,\ldots,2k)\in\mathbb{Q}{\mathbb{C}}dot \pi^{2kr},$$
which was also given by Zagier {\mathbb{C}}ite[Section 9]{Za} independently.
Broadhurst, Borwein and Bradley gave explicit formulas for these values in {\mathbb{C}}ite[Section 2]{BBB}.
Also it is known that
\begin{equation}
{\mathbb{Z}}eta_{r}(2k,\ldots,2k)={\mathbb{C}}c_r^{(k)}\frac{(2\pi i)^{2kr}}{(2kr)!},{\lambda}bel{Zagier-F}
\end{equation}
where
$${\mathbb{C}}c_0^{(k)}=1,\ \ {\mathbb{C}}c_{n}^{(k)}=\frac{1}{2n}\sum_{j=1}^{n}(-1)^j \binom{2nk}{2jk}B_{2jk}{\mathbb{C}}c_{n-j}^{(k)}\ \ (n \geq 1).$$
Formula \eqref{Zagier-F} was first published in the lecture notes {\mathbb{C}}ite{AK1}, {\mathbb{C}}ite{AK2}
written in Japanese (Exercise 5, Section 1.1 of those lecture notes).
See also Muneta {\mathbb{C}}ite{Mun}.
We emphasize that \eqref{Zagier-F} can be regarded as a kind of Witten's volume formula \eqref{Zagier-F2}.
Because \eqref{Zagier-F2} and \eqref{Zagier-F} in the case $r=1$ are both Euler's
well-known formula
\begin{equation}
{\mathbb{Z}}eta(2k)=-B_{2k}\frac{(2\pi i)^{2k}}{2(2k)!}{\mathbb{Q}}quad (k\in \mathbb{N}), {\lambda}bel{Euler-F}
\end{equation}
we can see that
$\mathcal{P}_{\Delta_{l+}}((\mathbf{2k})_l,{\bf 0};\Delta)$ and ${\mathbb{C}}c_r^{(k)}$ are different types of generalizations of the ordinary Bernoulli number $B_{2k}$.
\end{remark}
\begin{example} {\lambda}bel{Exam-C2}
Let $\Delta=\Delta(C_2)$ be the root system of type $C_2$.
By Theorem {\mathbb{R}}ef{thm:main0}, we have
\begin{align*}
\ (\mathfrak{D}_{\Delta_{s+}}F)(t_1,t_2,y_1,y_2;\Delta)
& =1+\frac{t_1 t_2 e^{\{y_2\}t_1}}{(e^{t_1}-1)(t_1-t_2)}\\
& {\mathbb{Q}}uad +\frac{t_1 t_2 e^{\{y_2\} t_2}}{(e^{t_2}-1) (-t_1+t_2)}
+\frac{t_1 t_2 e^{(1-\{y_1-y_2\}) t_1+\{y_1\} t_2}}{(e^{t_1}-1) (e^{t_2}-1)}
\\
&{\mathbb{Q}}uad
-\frac{t_1 t_2 e^{(1-\{2 y_1-y_2\}) t_1}}{(e^{t_1}-1) (t_1+t_2)}
-\frac{t_1 t_2 e^{\{2 y_1-y_2\} t_2}}{(e^{t_2}-1) (t_1+t_2)}
\\
& =\sum_{k_1,k_2=1}^\infty \mathcal{P}_{\Delta_{l+}}(k_1,k_2,y_1,y_2;\Delta)\frac{t_1^{k_1}t_2^{k_2}}{k_1!k_2!}.
\end{align*}
Set $(y_1,y_2)=(0,0)$ and $\mathbf{k}=(0,k_1,k_2,0)$. Then ${\mathbb{Z}}eta_2(0,k_1,k_2,0;y_1,y_2;\Delta)={\mathbb{Z}}eta_2(k_1,k_2)$ for $\Delta=\Delta(C_2)$. Hence it follows from \eqref{Th-main} that
\begin{align}
& (1+(-1)^{k_1})(1+(-1)^{k_2}) {\mathbb{Z}}eta_2(k_1,k_2)+(1+(-1)^{k_2})(1+(-1)^{k_1}) {\mathbb{Z}}eta_2(k_2,k_1) {\lambda}bel{exam-C2} \\
& = (-1)^4\mathcal{P}_{\Delta_{l+}}(k_1,k_2,0,0;\Delta)\frac{(2\pi i)^{k_1+k_2}}{k_1!k_2!} {\mathbb{N}}otag
\end{align}
for $k_1,k_2\geq 2$.
For example, we can compute
$$\mathcal{P}_{\Delta_{l+}}(4,4,0,0;\Delta)=\frac{1}{6300}$$
from the above expansion. Hence we obtain
$${\mathbb{Z}}eta_2(4,4)=\frac{(-1)^4}{8}\frac{1}{6300} \frac{(2\pi i)^8}{(4!)^2}=\frac{\pi^8}{113400}.$$
Similarly we can compute ${\mathbb{Z}}eta_{2}(2k,2k)$ for
$k\in \mathbb{N}$, though in this case we can also compute ${\mathbb{Z}}eta_{2}(2k,2k)$
by using the well-known harmonic product formula for double zeta values
\begin{equation}
{\mathbb{Z}}eta(s){\mathbb{Z}}eta(t)={\mathbb{Z}}eta_{2}(s,t)+{\mathbb{Z}}eta_{2}(t,s) +{\mathbb{Z}}eta(s+t). {\lambda}bel{harm}
\end{equation}
In the next section, we introduce a slight generalization of Corollary {\mathbb{R}}ef{Cor-Z} which gives evaluation formulas of ${\mathbb{Z}}eta_{2}(k,l)$ for odd $k+l$ in terms of ${\mathbb{Z}}eta(s)$ (see Proposition {\mathbb{R}}ef{Pr-1}).
\end{example}
\begin{remark}
In the general $C_r$ case, considering the expansion of
$$(\mathfrak{D}_{\Delta_{s+}}F)({\bf t}_{\Delta_{l+}},{\bf 0};\Delta(C_r))$$
similarly, we can systematically compute ${\mathbb{Z}}eta_{r}(2k,\ldots,2k)$. Moreover,
considering the case ${\mathbb{N}}u{\mathbb{N}}ot={\bf 0}$ for ${\mathbb{Z}}eta_r(\mathbf{s},{\mathbb{N}}u;\Delta(C_r))$,
we can give character analogues of Corollary {\mathbb{R}}ef{Cor-Z} for multiple $L$-values,
which were first proved by Yamasaki {\mathbb{C}}ite{Ya}.
\end{remark}
\section{Some relations and parity results for double and triple zeta values}{\lambda}bel{sec-5}
In Theorem {\mathbb{R}}ef{thm:main1}, we considered the sum over $W$ on the left-hand side of \eqref{Th-main}.
Here, more generally, we consider the sum over a certain set of minimal coset
representatives on the left-hand side of \eqref{Th-main}.
In this case, it is not easy to execute its computation directly.
Hence we use a more technical method which was already introduced in {\mathbb{C}}ite{KMT-CJ}. First we show the following result for double zeta values corresponding to a sub-root system of type $C_2$, where the number of the terms on the left-hand side is just the half of that on the left-hand side of \eqref{exam-C2}.
\begin{prop}{\lambda}bel{Pr-1}
For $p,q \in \mathbb{N}_{\geq 2}$,
\begin{align*}
& \left( 1+(-1)^p{\mathbb{R}}ight){\mathbb{Z}}eta_{2}(p,q)+\left( 1+(-1)^q{\mathbb{R}}ight) {\mathbb{Z}}eta_{2}(q,p) \\
& \ =2\sum_{j=0}^{[p/2]}\binom{p+q-2j-1}{q-1}{\mathbb{Z}}eta(2j){\mathbb{Z}}eta(p+q-2j) \\
& {\mathbb{Q}}uad +2\sum_{j=0}^{[q/2]}\binom{p+q-2j-1}{p-1}{\mathbb{Z}}eta(2j){\mathbb{Z}}eta(p+q-2j) -{\mathbb{Z}}eta(p+q).
\end{align*}
\end{prop}
\begin{proof}
The proof was essentially stated in {\mathbb{C}}ite[Theorem 3.1]{KMT-CJ} which is a simpler form of a previous result for zeta-functions of type $A_2$ given by the third-named author {\mathbb{C}}ite[Theorem 4.5]{Ts-Cam}. In fact, setting $(k,l,s)=(p,q,0)$ in {\mathbb{C}}ite[Theorem 3.1]{KMT-CJ}, we have
\begin{align*}
& {\mathbb{Z}}eta(p){\mathbb{Z}}eta(q)+(-1)^p{\mathbb{Z}}eta_{2}(p,q)+(-1)^q {\mathbb{Z}}eta_{2}(q,p) \\
& \ =2\sum_{j=0}^{[p/2]}\binom{p+q-2j-1}{q-1}{\mathbb{Z}}eta(2j){\mathbb{Z}}eta(p+q-2j) \\
& {\mathbb{Q}}uad +2\sum_{j=0}^{[q/2]}\binom{p+q-2j-1}{p-1}{\mathbb{Z}}eta(2j){\mathbb{Z}}eta(p+q-2j).
\end{align*}
Combining this and \eqref{harm},
we have the assertion.
\end{proof}
In particular when $p$ and $q$ are of different parity, we see that ${\mathbb{Z}}eta_{2}(p,q)\in \mathbb{Q}[\{{\mathbb{Z}}eta(j+1)\,|\,j\in \mathbb{N}\}]$ which was first proved by Euler. For example, we have
$${\mathbb{Z}}eta_2(2,3)=3{\mathbb{Z}}eta(2){\mathbb{Z}}eta(3)-\frac{11}{2}{\mathbb{Z}}eta(5).$$
Next we consider triple zeta values. From the viewpoint of the root system of $C_3$ type, we have the following theorem. Note that, unlike the case of double zeta values, this result seems not to be led from the result on the case of type $A_3$ (cf. {\mathbb{C}}ite[Theorems 5.9 and 5.10]{MT1}).
\begin{thrm} {\lambda}bel{T-5-1}\ For $a,b,c\in \mathbb{N}_{\geq 2}$,
\begin{align*}
&(1+(-1)^a){\mathbb{Z}}eta_3(a,b,c)+(1+(-1)^b)\{ {\mathbb{Z}}eta_3(b,a,c)+{\mathbb{Z}}eta_3(b,c,a)\}+(-1)^b(1+(-1)^c){\mathbb{Z}}eta_3(c,b,a)\\
& =2\bigg\{ \sum_{\xi=0}^{[a/2]}{\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{a-2\xi}\binom{\omega+b-1}{\omega}\binom{a+c-2\xi-\omega-1}{c-1}{\mathbb{Z}}eta_2(b+\omega,a+c-2\xi-\omega)\\
& +\sum_{\xi=0}^{[b/2]}{\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{a-1}\binom{\omega+b-2\xi}{\omega}\binom{a+c-\omega-2}{c-1}{\mathbb{Z}}eta_2(b-2\xi+\omega+1,a+c-1-\omega)\\
& +(-1)^b\sum_{\xi=0}^{[c/2]}{\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{c-2\xi}\binom{\omega+b-1}{\omega}\binom{a+c-2\xi-\omega-1}{a-1}{\mathbb{Z}}eta_2(b+\omega,a+c-2\xi-\omega)\\
& +(-1)^b\sum_{\xi=0}^{[b/2]}{\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{c-1}\binom{\omega+b-2\xi}{\omega}\binom{a+c-\omega-2}{a-1}{\mathbb{Z}}eta_2(b-2\xi+\omega+1,a+c-1-\omega)\bigg\}\\
& -{\mathbb{Z}}eta_2(a+b,c)-(1+(-1)^b){\mathbb{Z}}eta_2(b,a+c)-(-1)^b{\mathbb{Z}}eta_2(b+c,a).
\end{align*}
\end{thrm}
The proof of this theorem will be given in Section {\mathbb{R}}ef{sec-proof2}.
This theorem especially implies the following result which was proved by Borwein and Girgensohn (see {\mathbb{C}}ite{BG}).
\begin{cor} {\lambda}bel{C-5-3} \ Let
$$\mathfrak{X}=\mathbb{Q}\left[ \left\{ {\mathbb{Z}}eta(j+1),{\mathbb{Z}}eta_2(k,l+1){\mathbb{R}}ight\}_{j,k,l\in \mathbb{N}}{\mathbb{R}}ight],$$
namely the $\mathbb{Q}$-algebra generated by Riemann zeta values and double zeta values at positive integers except singularities.
Suppose $a,b,c\in \mathbb{N}_{\geq 2}$ satisfy that $a+b+c$ is even. Then ${\mathbb{Z}}eta_3(a,b,c)\in \mathfrak{X}$.
\end{cor}
\begin{proof}
We recall the harmonic product formula
\begin{align}
& {\mathbb{Z}}eta_3(a,b,c)+{\mathbb{Z}}eta_3(b,a,c)+{\mathbb{Z}}eta_3(b,c,a) ={\mathbb{Z}}eta(a){\mathbb{Z}}eta_2(b,c)-{\mathbb{Z}}eta_2(b,c+a)-{\mathbb{Z}}eta_2(a+b,c) {\lambda}bel{harmonic}
\end{align}
for $a,b,c\in \mathbb{N}_{\geq 2}$ (see {\mathbb{C}}ite{Ka}).
Let $a,b,c \in \mathbb{N}_{\geq 2}$ satisfying that $a+b+c$ is even. First we assume that $a,b,c$ are all even. Then, combining Theorem {\mathbb{R}}ef{T-5-1} and \eqref{harmonic}, we see that ${\mathbb{Z}}eta_3(c,b,a)\in \mathfrak{X}$.
Next we assume that $a$ is even and $b,c$ are odd. Then, by Theorem {\mathbb{R}}ef{T-5-1}, we see that ${\mathbb{Z}}eta_3(a,b,c)\in \mathfrak{X}$.
As for other cases, we can similarly obtain the assertions by using Theorem {\mathbb{R}}ef{T-5-1} and \eqref{harmonic}. Thus we complete the proof.
\end{proof}
\begin{remark}
The following property of the multiple zeta value is sometimes called the parity result:
\it The multiple zeta value ${\mathbb{Z}}eta_r(k_1,k_2,\ldots,k_r)$ of depth $r$ can be expressed as a rational linear combination of products of MZVs of lower depth than $r$, when its depth $r$ and its weight $\sum_{j=1}^{n}k_j$ are of different parity.
{\mathbb{R}}m
The fact in case of depth 2 was proved by Euler, and that of depth $3$ was proved by Borwein and Girgensohn (see {\mathbb{C}}ite{BG}). Further they conjectured the above assertion in the case of an arbitrary depth. This conjecture was proved by the third-named author {\mathbb{C}}ite{TsActa04} and by Ihara, Kaneko and Zagier {\mathbb{C}}ite{IKZ} independently.
It should be stressed that our Corollary {\mathbb{R}}ef{C-5-3} gives an explicit expression of the parity result for the triple zeta value under the condition $a,b,c\in \mathbb{N}_{\geq 2}$.
Therefore it seems important to generalize Theorem {\mathbb{R}}ef{T-5-1} in order to give an explicit expression of the parity result of an arbitrary depth.
\end{remark}
\begin{example}
Putting $(a,b,c)=(2,2,4)$ in Theorem {\mathbb{R}}ef{T-5-1}, we have
\begin{align*}
& 2{\mathbb{Z}}eta_3(2,2,4)+2\{{\mathbb{Z}}eta_3(2,2,4)+{\mathbb{Z}}eta_3(2,4,2)\}+2{\mathbb{Z}}eta_3(4,2,2)\\
& =2{\mathbb{Z}}eta(4){\mathbb{Z}}eta_2(2,2)+{\mathbb{Z}}eta(2)\{8{\mathbb{Z}}eta_2(4,2)+ 12{\mathbb{Z}}eta_2(3,3)+16{\mathbb{Z}}eta_2(2,4)+16{\mathbb{Z}}eta_2(1,5)\} \\
& {\mathbb{Q}}uad -16{\mathbb{Z}}eta_2(6,2) - 20{\mathbb{Z}}eta_2(5,3)-25{\mathbb{Z}}eta_2(4,4)-24{\mathbb{Z}}eta_2(3,5)-17{\mathbb{Z}}eta_2(2,6).
\end{align*}
Therefore, using \eqref{harmonic}, we obtain
\begin{align*}
{\mathbb{Z}}eta_3(4,2,2)& ={\mathbb{Z}}eta(4){\mathbb{Z}}eta_2(2,2)+{\mathbb{Z}}eta(2)\{4{\mathbb{Z}}eta_2(4,2)+ 6{\mathbb{Z}}eta_2(3,3)+7{\mathbb{Z}}eta_2(2,4)+8{\mathbb{Z}}eta_2(1,5)\}\\
& {\mathbb{Q}}uad -8{\mathbb{Z}}eta_2(6,2) - 10{\mathbb{Z}}eta_2(5,3)-\frac{23}{2}{\mathbb{Z}}eta_2(4,4)-12{\mathbb{Z}}eta_2(3,5)-\frac{15}{2}{\mathbb{Z}}eta_2(2,6) \in \mathfrak{X}.
\end{align*}
Note that this formula can be proved by combining known results for MZVs given by the double shuffle relations and harmonic product formulas (see, for example, {\mathbb{C}}ite[Section 5]{MP}).
\end{example}
\begin{remark}
If we replace \eqref{5-1-0} (in Section {\mathbb{R}}ef{sec-proof2}) by
$$\sum_{l\in \mathbb{N}}\sum_{m\in \mathbb{Z}^*} (-1)^{l+m}x^l y^m e^{i(l+m){\theta}heta}, $$
and argue along the same line as in the proof of Theorem {\mathbb{R}}ef{T-5-1}, then we can obtain
\begin{align*}
& \left( 1+(-1)^a{\mathbb{R}}ight)\left( 1+(-1)^c{\mathbb{R}}ight)\left\{ {\mathbb{Z}}eta_3(a,b,c)+{\mathbb{Z}}eta_3(a,c,b)+{\mathbb{Z}}eta_3(c,a,b){\mathbb{R}}ight\} \\
& \ +\left( 1+(-1)^b{\mathbb{R}}ight)\left( 1+(-1)^c{\mathbb{R}}ight)\left\{ {\mathbb{Z}}eta_3(c,b,a)+{\mathbb{Z}}eta_3(b,c,a)+{\mathbb{Z}}eta_3(b,a,c){\mathbb{R}}ight\}\\
& {\mathbb{Q}}quad \in \mathbb{Q}[\{{\mathbb{Z}}eta(j+1)\,|\,j\in \mathbb{N}\}]
\end{align*}
for $a,b,c\in \mathbb{N}_{\geq 2}$.
In particular when $a,b,c$ are both even, we have \eqref{Zagier-F} for the triple zeta value which can be regarded as a kind of Witten's volume formula \eqref{Zagier-F2} (see Section {\mathbb{R}}ef{sec-4}). Furthermore, when
$a$ is odd and both $b$ and $c$ are even, then
\begin{align*}
& {{\mathbb{Z}}eta_3(c,b,a)+{\mathbb{Z}}eta_3(b,c,a)+{\mathbb{Z}}eta_3(b,a,c)} \in \mathbb{Q}\left[ \left\{{\mathbb{Z}}eta(j+1)\,|\,j\in \mathbb{N}{\mathbb{R}}ight\}{\mathbb{R}}ight].
\end{align*}
Note that this result can also be deduced by combining \eqref{harmonic} and Proposition {\mathbb{R}}ef{Pr-1}.
\end{remark}
\section{Multiple zeta values associated with the root system of type $B_r$}
{\lambda}bel{sec-6}
In this section we discuss the $B_r$-analogue of our theory developed in the preceding
two sections.
As for the root system of type $B_r$, namely for $\Delta=\Delta(B_r)$,
we see that
\begin{align*}
(\Delta_{s+})^\vee&
=\{\alpha_r^\vee,\ 2\alpha_{r-1}^\vee+\alpha_r^\vee,
2\alpha_{r-2}^\vee+2\alpha_{r-1}^\vee+\alpha_r^\vee,
\ldots, 2\alpha_1^\vee+{\mathbb{C}}dots+2\alpha_{r-1}^\vee+\alpha_r^\vee \}.
\end{align*}
Therefore for $\mathbf{s}_s=(s_{\alpha})_{\alpha\in\Delta_{s+}}$ we have
\begin{align}
& {\mathbb{Z}}eta_r({\bf s}_s,{\bf 0};\Delta_{s+}(B_r))=\sum_{m_1,\ldots,m_r=1}^\infty
\prod_{i=1}^{r}
\frac{1}{(2\sum_{j=r-i+1}^{r-1}m_j+m_r)^{{s_i}}}, {\lambda}bel{B2-zeta}
\end{align}
which is a partial sum of ${\mathbb{Z}}eta_{r}({\bf s})$. For example, we have
\begin{align}
& {\mathbb{Z}}eta_2({\bf s}_s,{\bf 0};\Delta_{s+}(B_2))=\sum_{l,m=1}^\infty \frac{1}{m^{s_1}(2l+m)^{s_2}},{\lambda}bel{6-1}\\
& {\mathbb{Z}}eta_3({\bf s}_s,{\bf 0};\Delta_{s+}(B_3))=\sum_{l,m,n=1}^\infty \frac{1}{n^{s_1}(2m+n)^{s_2}(2l+2m+n)^{s_3}},{\lambda}bel{6-2}
\end{align}
where $s_j=s_{\alpha_j}$ corresponding to $\alpha_j\in \Delta_{s+}$.
From the viewpoint of zeta-functions
of root systems, values of \eqref{B2-zeta} at positive integers can be regarded as the
objects dual to MZVs, in the sense that $B_r$ and $C_r$ are dual of each other.
Hence we denote \eqref{B2-zeta} by ${\mathbb{Z}}eta_r^\sharp(s_1,\ldots,s_r)$.
Since $W(B_r)\simeq W(C_r)$, just like Corollary {\mathbb{R}}ef{Cor-Cr-Sr},
from Theorem {\mathbb{R}}ef{thm:main1} we can obtain
the following result, which gives the ``refined restricted sum formulas'' for
${\mathbb{Z}}eta_r^{\sharp}(\mathbf{s})$.
\begin{cor}{\lambda}bel{Cor-Br-Sr}
Let $\Delta=\Delta(B_r)$. For $(2{\bf k})_s=(2k_{\alpha})_{\alpha\in\Delta_{s+}}=(2k_1,\ldots,2k_r)\in \left(2\mathbb{N}{\mathbb{R}}ight)^r$ and
$\mathbf{y}={\mathbb{N}}u\in P^\vee/Q^\vee$,
\begin{equation}
\sum_{\sigma\in\mathfrak{S}_r}
{\mathbb{Z}}eta_r (\sigma^{-1}(2\mathbf{k})_s,{\mathbb{N}}u;\Delta_{l+})
=\frac{(-1)^{r}}{2^r}\mathcal{P}_{\Delta_{s+}}
((2\mathbf{k})_s,{\mathbb{N}}u;\Delta)
\prod_{j=1}^{r}\frac{(2\pi i)^{2k_j}}{(2k_j)!} \in\mathbb{Q}{\mathbb{C}}dot \pi^{2\sum_{j=1}^{r}k_j}.
\end{equation}
In particular when ${\mathbb{N}}u={\bf 0}$,
\begin{equation} {\lambda}bel{EZ-Sr-1-2}
\sum_{\sigma\in\mathfrak{S}_r}
{\mathbb{Z}}eta_r^\sharp(2k_{\sigma^{-1}(1)},\ldots,2k_{\sigma^{-1}(r)})
=\frac{(-1)^{r}}{2^r}\mathcal{P}_{\Delta_{s+}}
((2\mathbf{k})_s,{\bf 0};\Delta)
\prod_{j=1}^{r}\frac{(2\pi i)^{2k_j}}{(2k_j)!} \in\mathbb{Q}{\mathbb{C}}dot \pi^{2\sum_{j=1}^{r}k_j}.
\end{equation}
\end{cor}
From Theorem {\mathbb{R}}ef{thm:main2}, we obtain an analogue of Corollary {\mathbb{R}}ef{Cor-Z}, which is a kind of Witten's volume formula and also a $B_r$-type analogue of \eqref{Zagier-F}.
\begin{cor} {\lambda}bel{C-6-2}
Let $\Delta=\Delta(B_r)$. For $({\bf 2k})_s=(2k,\ldots,2k)$ with any $k\in \mathbb{N}$,
\begin{align*}
& {\mathbb{Z}}eta_r^\sharp(2k,\ldots,2k)=\frac{(-1)^{r}}{2^r r!}\mathcal{P}_{\Delta_{s+}}
((\mathbf{2k})_s,{\bf 0};\Delta)\prod_{j=1}^{r}\frac{(2\pi i)^{2k_j}}{(2k_j)!}
\in\mathbb{Q}{\mathbb{C}}dot \pi^{2kr}.
\end{align*}
\end{cor}
\begin{example}{\lambda}bel{B-EZ-Exam}
\begin{align*}
{\mathbb{Z}}eta_2^\sharp(2,2)&=\sum_{m,n=1}^\infty \frac{1}{n^{2} (2m+n)^{2}}=\frac{1}{320}\pi^4,\\
{\mathbb{Z}}eta_2^\sharp(4,4)&=\sum_{m,n=1}^\infty \frac{1}{n^{4} (2m+n)^{4}}=\frac{23}{14515200}\pi^8,\\
{\mathbb{Z}}eta_2^\sharp(6,6)&=\sum_{m,n=1}^\infty \frac{1}{n^{6} (2m+n)^{6}}=\frac{1369}{871782912000}\pi^{12}.
\end{align*}
These formulas can be obtained by calculating the generating function of type $B_2$
similarly to the case of type $C_2$ in Example {\mathbb{R}}ef{Exam-C2} (see Section {\mathbb{R}}ef{sec-4}). Also we can obtain these formulas by Theorem {\mathbb{R}}ef{T-B2-EZ} in the case $(p,q)=(2k,2k)$ for $k\in \mathbb{N}$.
However, unlike the ordinary
double zeta value,
these cannot be easily deduced from \eqref{harm}.
Similarly, calculating the generating function of type $B_3$, we have explicit examples of Corollary {\mathbb{R}}ef{C-6-2}:
\begin{align*}
{\mathbb{Z}}eta_3^\sharp(2,2,2)&=\sum_{l,m,n=1}^\infty \frac{1}{n^{2} (2m+n)^{2}(2l+2m+n)^2}=\frac{1}{40320}\pi^{6},\\
{\mathbb{Z}}eta_3^\sharp(4,4,4)&=\sum_{l,m,n=1}^\infty \frac{1}{n^{4} (2m+n)^{4}(2l+2m+n)^4}=\frac{23}{697426329600}\pi^{12},\\
{\mathbb{Z}}eta_3^\sharp(6,6,6)&=\sum_{l,m,n=1}^\infty \frac{1}{n^{6} (2m+n)^{6}(2l+2m+n)^6}=\frac{1997}{17030314057236480000}\pi^{18}.
\end{align*}
\end{example}
Also, similarly to Proposition {\mathbb{R}}ef{Pr-1}, we can obtain the following result whose proof will be given in Section {\mathbb{R}}ef{sec-proof2}.
\begin{thrm}{\lambda}bel{T-B2-EZ}
For $p,q\in \mathbb{N}_{\geq 2}$,
\begin{align}
& \ (1+(-1)^p){\mathbb{Z}}eta_2^\sharp (p,q) +(1+(-1)^q){\mathbb{Z}}eta_2^\sharp(q,p){\lambda}bel{Pr-2-1}\\
& = 2 \sum_{j=0}^{[p/2]} \frac{1}{2^{p+q-2j}}\binom{p+q-1-2j}{q-1}{\mathbb{Z}}eta(2j){\mathbb{Z}}eta(p+q-2j){\mathbb{N}}otag\\
& + 2\sum_{j=0}^{[q/2]} \frac{1}{2^{p+q-2j}}\binom{p+q-1-2j}{p-1}{\mathbb{Z}}eta(2j){\mathbb{Z}}eta(p+q-2j) -{\mathbb{Z}}eta(p+q). {\mathbb{N}}otag
\end{align}
\end{thrm}
Theorem {\mathbb{R}}ef{T-B2-EZ} in the case that $p$ and $q$ are of different parity implies the following.
\begin{cor} {\lambda}bel{parity-B2}
Let $p,q \in \mathbb{N}_{\geq 2}$. Suppose $p$ and $q$ are of different parity, then
$${\mathbb{Z}}eta_2^\sharp(p,q)\in \mathbb{Q}\left[ \left\{{\mathbb{Z}}eta(j+1)\,|\,j\in \mathbb{N}{\mathbb{R}}ight\}{\mathbb{R}}ight],$$
which is a parity result for ${\mathbb{Z}}eta_2^\sharp$.
\end{cor}
\begin{remark}
This parity result for ${\mathbb{Z}}eta_2^\sharp(p,q)$ is important in a recent study of the dimension of the linear space spanned by double zeta values of level $2$ given by Kaneko and Tasaka (see {\mathbb{C}}ite{Ka-Ta}).
For example, setting $(p,q)=(3,2)$ in \eqref{Pr-2-1}, we have
\begin{align*}
{\mathbb{Z}}eta_2^\sharp(2,3)&=\sum_{m,n=1}^\infty \frac{1}{n^{2} (2m+n)^{3}}=-\frac{21}{32}{\mathbb{Z}}eta(5)+\frac{3}{8}{\mathbb{Z}}eta(2){\mathbb{Z}}eta(3).
\end{align*}
It should be noted that this property can be given by combining the known facts for double zeta values and for their alternating series
$$\varphi_2(s_1,s_2)=\sum_{m,n=1}^\infty \frac{(-1)^m}{n^{s_1}(m+n)^{s_2}}.$$
Actually we see that
$${\mathbb{Z}}eta_2^\sharp(s_1,s_2)=\frac{1}{2}\left\{{\mathbb{Z}}eta_2(s_1,s_2)+\varphi_2(s_1,s_2){\mathbb{R}}ight\}.$$
When $p$ and $q$ are of different parity ($p,q \in \mathbb{N}$ and $q\geq 2$), Euler proved that
$${\mathbb{Z}}eta_2(p,q)\in \mathbb{Q}\left[ \left\{{\mathbb{Z}}eta(j+1)\,|\,j\in \mathbb{N}{\mathbb{R}}ight\}{\mathbb{R}}ight],$$
and Borwein et al. proved that
$$\varphi_2(p,q)\in \mathbb{Q}\left[ \left\{{\mathbb{Z}}eta(j+1)\,|\,j\in \mathbb{N}{\mathbb{R}}ight\}{\mathbb{R}}ight]$$
(see {\mathbb{C}}ite{BBG}), from which Corollary {\mathbb{R}}ef{parity-B2} follows.
However \eqref{Pr-2-1} gives more explicit information on the parity result for ${\mathbb{Z}}eta_2^\sharp(p,q)$.
\end{remark}
Furthermore we can obtain the following result which can be regarded as an analogue of Theorem {\mathbb{R}}ef{T-5-1} for type $B_3$. This can be proved similarly to Theorem {\mathbb{R}}ef{T-5-1}, hence we omit its proof here.
\begin{thrm}{\lambda}bel{T-5-2}
For $a,b,c\in \mathbb{N}_{\geq 2}$,
\begin{align*}
&(1+(-1)^a){\mathbb{Z}}eta_3^\sharp(a,b,c)+(1+(-1)^b)\{ {\mathbb{Z}}eta_3^\sharp(b,a,c)+{\mathbb{Z}}eta_3^\sharp(b,c,a)\}+(-1)^b(1+(-1)^c){\mathbb{Z}}eta_3^\sharp(c,b,a)\\
& =2^{1-a-b-c}\bigg\{ \sum_{\xi=0}^{[a/2]}2^\xi {\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{a-2\xi}\binom{\omega+b-1}{\omega}\binom{a+c-2\xi-\omega-1}{c-1}{\mathbb{Z}}eta_2(b+\omega,a+c-2\xi-\omega)\\
& +\sum_{\xi=0}^{[b/2]}2^\xi{\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{a-1}\binom{\omega+b-2\xi}{\omega}\binom{a+c-\omega-2}{c-1}{\mathbb{Z}}eta_2(b-2\xi+\omega+1,a+c-1-\omega)\\
& +(-1)^b\sum_{\xi=0}^{[c/2]}2^\xi{\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{c-2\xi}\binom{\omega+b-1}{\omega}\binom{a+c-2\xi-\omega-1}{a-1}{\mathbb{Z}}eta_2(b+\omega,a+c-2\xi-\omega)\\
& +(-1)^b\sum_{\xi=0}^{[b/2]}2^\xi{\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{c-1}\binom{\omega+b-2\xi}{\omega}\binom{a+c-\omega-2}{a-1}{\mathbb{Z}}eta_2(b-2\xi+\omega+1,a+c-1-\omega)\bigg\}\\
& -{\mathbb{Z}}eta_2^\sharp(a+b,c)-(1+(-1)^b){\mathbb{Z}}eta_2^\sharp(b,a+c)-(-1)^b{\mathbb{Z}}eta_2^\sharp(b+c,a).
\end{align*}
\end{thrm}
\begin{remark}
In {\mathbb{C}}ite{KMT-Lie}, we study zeta-functions of weight lattices of
semisimple compact connected Lie groups. We can prove analogues of Theorem {\mathbb{R}}ef{thm:main1}
for those zeta-functions by a method similar to the above.
We will give the details in a forthcoming paper.
\end{remark}
\section{Certain restricted sum formulas for ${\mathbb{Z}}eta_r({\bf s})$ and for ${\mathbb{Z}}eta_r^\sharp({\bf s})$} {\lambda}bel{sumf}
In this section, we give certain restricted sum formulas for ${\mathbb{Z}}eta_r({\bf s})$ and for ${\mathbb{Z}}eta_r^\sharp({\bf s})$ of an arbitrary depth $r$ which essentially include known results.
As we stated in Section {\mathbb{R}}ef{sec-1},
Gangl, Kaneko and Zagier {\mathbb{C}}ite{GKZ} obtained the restricted sum formulas \eqref{F-GKZ} for double zeta values. Recently
Nakamura {\mathbb{C}}ite{Na-Sh} gave certain analogues of \eqref{F-GKZ}.
More recently, Shen and Cai {\mathbb{C}}ite{Shen-Cai} gave the following restricted sum formulas for triple and fourth zeta values:
\begin{align}
& \sum_{a_1,a_2,a_3 \in \mathbb{N}\atop a_1+a_2+a_3=N}
{\mathbb{Z}}eta_3(2a_1,2a_2,2a_3)= \frac{5}{8}{\mathbb{Z}}eta(2N) - \frac{1}{4}{\mathbb{Z}}eta(2){\mathbb{Z}}eta(2N - 2)\in \mathbb{Q}{\mathbb{C}}dot \pi^{2N}{\mathbb{Q}}uad (N\in \mathbb{Z}_{\geq 3}), {\lambda}bel{sumf-triple}\\
& \sum_{a_1,a_2,a_3,a_4 \in \mathbb{N}\atop a_1+a_2+a_3+a_4=N}
{\mathbb{Z}}eta_4(2a_1,2a_2,2a_3,2a_4){\lambda}bel{sumf-fourth}\\
& {\mathbb{Q}}uad = \frac{35}{64}{\mathbb{Z}}eta(2N) - \frac{5}{16}{\mathbb{Z}}eta(2){\mathbb{Z}}eta(2N - 2)\in \mathbb{Q}{\mathbb{C}}dot \pi^{2N}(N\in \mathbb{Z}_{\geq 4}). {\mathbb{N}}otag
\end{align}
Also Machide {\mathbb{C}}ite{Mach} gave certain restricted sum formulas for triple zeta values.
Now recall our Corollaries {\mathbb{R}}ef{Cor-Cr-Sr} and {\mathbb{R}}ef{Cor-Br-Sr}.
In the above restricted sum formulas, the summations are taken over all tuples
$(a_1,\ldots,a_r)$ satisfying $a_1+{\mathbb{C}}dots+a_r=N$. On the other hand, the summations
in the formulas of Corollaries {\mathbb{R}}ef{Cor-Cr-Sr} and {\mathbb{R}}ef{Cor-Br-Sr} are running over
much smaller range, that is, just all the permutations of one fixed
$(a_1,\ldots,a_r)$ with $a_1+{\mathbb{C}}dots+a_r=N$. Therefore our Corollaries give
subdivisions, or refinements, of known restricted sum formulas.
Summing our formulas for all tuples $(a_1,\ldots,a_r)$ satisfying $a_1+{\mathbb{C}}dots+a_r=N$,
we can obtain the $r$-ple generalization of \eqref{F-GKZ}, \eqref{sumf-triple} and
\eqref{sumf-fourth}.
Moreover we can show the following further generalization, which gives a new type of
restricted sum formulas.
For $d\in \mathbb{N}$ and $N\in \mathbb{N}$, let
$$I_{r}(d,N)=\left\{ (2da_1,\ldots,2da_r)\in (2d\mathbb{N})^r\,|\,a_1+{\mathbb{C}}dots+a_r=N{\mathbb{R}}ight\}.$$
Denote by $P_r$ the set of all partitions of $r$, namely
$$P_r=\bigcup_{{\mathbb{N}}u=1}^{r}\{(j_1,{\mathbb{C}}dots,j_{\mathbb{N}}u)\in \mathbb{N}^{\mathbb{N}}u\,|\,j_1+{\mathbb{C}}dots+j_{\mathbb{N}}u=r\}.$$
For $J=(j_1,{\mathbb{C}}dots,j_{\mathbb{N}}u)\in P_r$, we set
$$\mathcal{A}_r(d,N,J)=\left\{ ((2dh_1)^{[j_1]},\ldots,(2dh_{\mathbb{N}}u)^{[j_{\mathbb{N}}u]})\in I_{r}(d,N)\,|\, h_1<{\mathbb{C}}dots<h_{\mathbb{N}}u{\mathbb{R}}ight\},$$
where $(2h)^{[j]}=(2h,\ldots,2h)\in (2\mathbb{N})^j$.
Then we have the following restricted sum formulas of depth $r$.
\begin{thrm} {\lambda}bel{sumf-EZ-Cr}
For $d\in \mathbb{N}$ and $N\in \mathbb{N}$ with $N\geq r$,
\begin{align}
& \sum_{a_1,\ldots,a_r \in \mathbb{N} \atop a_1+{\mathbb{C}}dots+a_r=N}{\mathbb{Z}}eta_r(2da_1,\ldots,2da_r){\lambda}bel{R-sumf}\\
& =\frac{(-1)^{r}}{2^r}\sum_{J=(j_1,{\mathbb{C}}dots,j_{\mathbb{N}}u)\in P_r}\frac{1}{j_1!{\mathbb{C}}dots j_{\mathbb{N}}u !}{\mathbb{N}}otag\\
& {\mathbb{Q}}quad {\theta}imes \sum_{(2d\mathbf{k})_l \in \mathcal{A}_r(d,N,J)}\mathcal{P}_{\Delta_{l+}}
((2d\mathbf{k})_l,{\bf 0};\Delta(C_r)) \prod_{{\mathbb{R}}ho=1}^{r}\frac{(2\pi i)^{2dk_{\mathbb{R}}ho}}{(2k_{\mathbb{R}}ho)!} \in\mathbb{Q}{\mathbb{C}}dot \pi^{2dN}.{\mathbb{N}}otag
\end{align}
\end{thrm}
\begin{remark}
In the case $d=1$ and $r=2,3,4$, we essentially obtain \eqref{F-GKZ}, \eqref{sumf-triple}, \eqref{sumf-fourth}. Also, in the case $N=r$, we obtain \eqref{Zagier-F2} stated in Corollary {\mathbb{R}}ef{Cor-Z}. More generally,
in the case $d=1$ and $r\geq 2$, Muneta {\mathbb{C}}ite{Mu} already conjectured an explicit expression of the left-hand side of \eqref{R-sumf} in terms of $\{{\mathbb{Z}}eta(2k)\,|\,k\in \mathbb{N}\}$.
\end{remark}
\begin{proof}[Proof of Theorem {\mathbb{R}}ef{sumf-EZ-Cr}]
Let $(2da_1,\ldots,2da_r)\in I_{r}(d,N)$.
Denote a set of different elements in $\{a_1,\ldots,a_r\}$ by
$\{h_1,\ldots,h_{\mathbb{N}}u\}$, and put
$j_\mu=\sharp \{ a_m\,|\, a_m=h_\mu\}$ $(1\leq \mu\leq {\mathbb{N}}u)$.
We may assume $h_1<{\mathbb{C}}dots<h_{{\mathbb{N}}u}$.
We can easily see that there exist $\sigma\in \mathfrak{S}_r$ and
$((2dh_1)^{[j_1]},\ldots,(2dh_{\mathbb{N}}u)^{[j_{\mathbb{N}}u]}) \in \mathcal{A}_r(d,N,J)$ with
$J=(j_1,{\mathbb{C}}dots,j_{\mathbb{N}}u)\in P_r$ such that
$$(2da_1,\ldots,2da_r)=((2dh_1)^{[j_1]},\ldots,(2dh_{\mathbb{N}}u)^{[j_{\mathbb{N}}u]})^\sigma,$$
where we use the notation
$$(k_1,\ldots,k_r)^\sigma=(k_{\sigma(1)},\ldots,k_{\sigma(r)}).$$
On the other hand, the set
$\{\left( (2dh_1)^{[j_1]},\ldots,(2dh_{\mathbb{N}}u)^{[j_{\mathbb{N}}u]}{\mathbb{R}}ight)^{\theta}au\,|\,{\theta}au\in \mathfrak{S}_r\}$ contains $j_1!{\mathbb{C}}dots j_{\mathbb{N}}u!$-copies of each element.
In fact, if we denote by $\frak{S}(1,...,j_1)$ the set of all permutations among
$\{1,...,j_1\}$, then
$$\mathfrak{X}(J):=\frak{S}(1,{\delta}ots,j_1) {\theta}imes \frak{S}(j_1+1,\ldots,j_1+j_2)
{\theta}imes{\mathbb{C}}dots{\theta}imes\frak{S}(\sum_{{\mathbb{R}}ho=1}^{{\mathbb{N}}u-1}j_{\mathbb{R}}ho+1, \ldots,\sum_{{\mathbb{R}}ho=1}^{{\mathbb{N}}u}j_{\mathbb{R}}ho) \subset \frak{S}_r
$$
forms the stabilizer subgroup of $((2dh_1)^{[j_1]},\ldots,(2dh_{\mathbb{N}}u)^{[j_{\mathbb{N}}u]})$,
and hence $\sharp \mathfrak{X}(J)=j_1!{\mathbb{C}}dots j_{\mathbb{N}}u!$.
Therefore, using Corollary {\mathbb{R}}ef{Cor-Cr-Sr}, we have
\begin{align*}
& \sum_{a_1,\ldots,a_r \in \mathbb{N} \atop a_1+{\mathbb{C}}dots+a_r=N}
{\mathbb{Z}}eta_r(2da_1,\ldots,2da_r)=\sum_{(2da_1,\ldots,2da_r)\in I_{r}(d,N)}{\mathbb{Z}}eta_r(2da_1,\ldots,2da_r) \\
& =\sum_{J=(j_1,{\mathbb{C}}dots,j_{\mathbb{N}}u)\in P_r}\frac{1}{j_1!{\mathbb{C}}dots j_{\mathbb{N}}u !}\sum_{(2dk_1,\ldots,2dk_r) \atop \in \mathcal{A}_r(d,N,J)}\sum_{\sigma\in \mathfrak{S}_r}{\mathbb{Z}}eta_r(2dk_{\sigma(1)},\ldots,2dk_{\sigma(r)})\\
& =\frac{(-1)^{r}}{2^r}\sum_{J=(j_1,{\mathbb{C}}dots,j_{\mathbb{N}}u)\in P_r}\frac{1}{j_1!{\mathbb{C}}dots j_{\mathbb{N}}u !}\sum_{(2d\mathbf{k})_l \in \mathcal{A}_r(d,N,J)}\mathcal{P}_{\Delta_{l+}(C_r)}
((2d\mathbf{k})_l,{\bf 0};\Delta) \prod_{{\mathbb{R}}ho=1}^{r}\frac{(2\pi i)^{2dk_{\mathbb{R}}ho}}{(2dk_{\mathbb{R}}ho)!}.
\end{align*}
This completes the proof.
\end{proof}
Similarly, using Corollary {\mathbb{R}}ef{Cor-Br-Sr}, we obtain the following.
\begin{thrm} {\lambda}bel{sumf-EZ-Br}
For $d\in \mathbb{N}$ and $N\in \mathbb{N}$ with $N\geq r$,
\begin{align*}
& \sum_{a_1,\ldots,a_r \in \mathbb{N} \atop a_1+{\mathbb{C}}dots+a_r=N}{\mathbb{Z}}eta_r^\sharp (2da_1,\ldots,2da_r)\\
& =\frac{(-1)^{r}}{2^r}\sum_{J=(j_1,{\mathbb{C}}dots,j_{\mathbb{N}}u)\in P_r}\frac{1}{j_1!{\mathbb{C}}dots j_{\mathbb{N}}u !}{\mathbb{N}}otag\\
& {\mathbb{Q}}uad {\theta}imes \sum_{(2d\mathbf{k})_s \in \mathcal{A}_r(d,N,J)}\mathcal{P}_{\Delta_{s+}}
((2d\mathbf{k})_s,{\bf 0};\Delta(B_r)) \prod_{{\mathbb{R}}ho=1}^{r}\frac{(2\pi i)^{2dk_{\mathbb{R}}ho}}{(2k_{\mathbb{R}}ho)!} \in\mathbb{Q}{\mathbb{C}}dot \pi^{2dN}.
\end{align*}
\end{thrm}
\section{Analytically closed subclass}{\lambda}bel{sec-acs}
In this section we observe our theory from the analytic point of view.
First consider the case of type $C_r$. In Section {\mathbb{R}}ef{sec-4} we have shown that
the zeta-functions corresponding to the sub-root system of type $C_r$ consisting
of all long roots are exactly the family of Euler-Zagier sums. On the other hand,
it is known that the Euler-Zagier $r$-fold sum can be expressed as an integral
involving the Euler-Zagier $(r-1)$-fold sum in the integrand. In fact, it holds
that
\begin{align}{\lambda}bel{acs-1}
{\mathbb{Z}}eta_r(s_1,\ldots,s_r)=\frac{1}{2\pi i}\int_{(\kappa)}\frac{\Gamma(s_r+z)\Gamma(-z)}
{\Gamma(s_r)}{\mathbb{Z}}eta_{r-1}(s_1,\ldots,s_{r-2},s_{r-1}+s_r+z){\mathbb{Z}}eta(-z)dz
\end{align}
for $r\geq 2$, where $-\Re s_r<\kappa<-1$ and the path of integral is the vertical line
from $\kappa-i\infty$ to $\kappa+i\infty$ (see {\mathbb{C}}ite[Section 12]{Mat-NMJ},
{\mathbb{C}}ite[Section 3]{Mat-JNT}). This formula is proved by applying the classical
Mellin-Barnes integral formula (\eqref{acs-2} below),
so we may call \eqref{acs-1} the Mellin-Barnes
integral expression of ${\mathbb{Z}}eta_r(s_1,\ldots,s_r)$.
Formula \eqref{acs-1} implies that the family of Euler-Zagier sums is closed
under the Mellin-Barnes integral operation. (Note that the Riemann zeta-function,
also appearing in the integrand, is the Euler-Zagier sum with $r=1$.)
When some family of zeta-functions is closed in this sense, we call the family
{\it analytically closed}.
The aim of this section is to prove that the subclasses of type $B_r$
and of type $A_r$ discussed in our theory are both analytically closed.
\begin{prop}{\lambda}bel{prop-acs-1}
The family of zeta-functions ${\mathbb{Z}}eta_r({\bf s}_s,{\bf 0};\Delta_{s+}(B_r))$ defined by
\eqref{B2-zeta} is analytically closed.
\end{prop}
\begin{proof}
Recall the Mellin-Barnes formula
\begin{align}{\lambda}bel{acs-2}
(1+{\lambda}mbda)^{-s}=\frac{1}{2\pi i}\int_{(\kappa)}\frac{\Gamma(s+z)\Gamma(-z)}
{\Gamma(s)}{\lambda}mbda^z dz,
\end{align}
where $s,{\lambda}mbda\in\mathbb{C}$ with $\Re s>0$, ${\lambda}mbda{\mathbb{N}}eq 0$,
$|\arg{\lambda}mbda|<\pi$, $\kappa$ is real with $-\Re s<\kappa<0$.
Dividing the factor $(2(m_1+{\mathbb{C}}dots+m_{r-1})+m_r)^{-s_r}$ as
$$
(2(m_2+{\mathbb{C}}dots+m_{r-1})+m_r)^{-s_r}
\left(1+\frac{2m_1}{2(m_2+{\mathbb{C}}dots+m_{r-1})+m_r}{\mathbb{R}}ight)^{-s_r}
$$
and applying \eqref{acs-2} to the second factor with
${\lambda}mbda=2m_1/(2(m_2+{\mathbb{C}}dots+m_{r-1})+m_r)$, we obtain
\begin{align}
&{\mathbb{Z}}eta_r((s_1,\ldots,s_r),{\bf 0};\Delta_{s+}(B_r)){\lambda}bel{acs-3}\\
&=\frac{1}{2\pi i}\int_{(\kappa)}\frac{\Gamma(s_r+z)\Gamma(-z)}{\Gamma(s_r)}
\sum_{m_1,\ldots,m_r=1}^{\infty}\prod_{i=1}^{r-1}\frac{1}
{(2\sum_{j=r-i+1}^{r-1}m_j+m_r)^{s_i}}{\mathbb{N}}otag\\
&{\mathbb{Q}}quad{\theta}imes(2(m_2+{\mathbb{C}}dots+m_{r-1})+m_r)^{-s_r}
\left(\frac{2m_1}{2(m_2+{\mathbb{C}}dots+m_{r-1})+m_r}{\mathbb{R}}ight)^z dz{\mathbb{N}}otag\\
&=\frac{1}{2\pi i}\int_{(\kappa)}\frac{\Gamma(s_r+z)\Gamma(-z)}{\Gamma(s_r)}
\sum_{m_1=1}^{\infty}(2m_1)^z{\mathbb{N}}otag\\
&\;{\theta}imes\sum_{m_2,\ldots,m_r=1}^{\infty}
\prod_{i=1}^{r-2}\frac{1}{(2\sum_{j=r-i+1}^{r-1}m_j+m_r)^{s_i}}
(2(m_2+{\mathbb{C}}dots+m_{r-1})+m_r)^{-s_{r-1}-s_r-z}dz{\mathbb{N}}otag\\
&=\frac{1}{2\pi i}\int_{(\kappa)}\frac{\Gamma(s_r+z)\Gamma(-z)}{\Gamma(s_r)}
2^z {\mathbb{Z}}eta(z)
{\mathbb{Z}}eta_{r-1}((s_1,\ldots,s_{r-2},s_{r-1}+s_r+z),{\bf 0};\Delta_{s+}(B_{r-1}))dz.{\mathbb{N}}otag
\end{align}
This implies the assertion.
\end{proof}
Next we consider the subclass of type $A_r$ which we studied in {\mathbb{C}}ite{KMT-MZ},
and prove that it is also analytically closed. This part may be regarded as
a supplement of {\mathbb{C}}ite{KMT-MZ}.
The explicit form of the zeta-function of the root system of type $A_r$ is given by
\begin{align}{\lambda}bel{acs-4}
{\mathbb{Z}}eta_r(\mathbf{s},\mathbf{0};\Delta(A_r))=\sum_{m_1,\ldots,m_r=1}^{\infty}\prod_{h=1}^r
\prod_{j=h}^r\left(\sum_{k=h}^{r+h-j}m_k{\mathbb{R}}ight)^{-s_{hj}}
\end{align}
(where $\mathbf{s}=(s_{hj})_{h,j}$; see {\mathbb{C}}ite[formula (13)]{KMT-MZ}).
Let $a,b\in\mathbb{N}$, $c\in\mathbb{N}_0$ with $a+b+c=r$.
The main result in {\mathbb{C}}ite{KMT-MZ} asserts
that the shuffle product procedure can be completely described by the partial
fraction decomposition of zeta-functions \eqref{acs-4} at special values
$\mathbf{s}=\mathbf{d}=(d_{hj})_{h,j}$, where $d_{hj}$ for
\begin{align}{\lambda}bel{acs-5}
\left\{
\begin{array}{lll}h=1,\;1\leq j\leq c\\
h=1,\;b+c+1\leq j\leq a+b+c\\
h=a+1,\;a+c+1\leq j\leq a+b+c
\end{array}
{\mathbb{R}}ight.
\end{align}
are all positive integers, and all other $d_{hj}$ are equal to 0.
Let $\Delta_+^{(a,b,c)}=\Delta_+^{(a,b,c)}(A_r)$ be the set of all positive roots
corresponding to $s_{hj}$
with $(h,j)$ in the list \eqref{acs-5}. Then this is a root set, and the
above special values can be interpreted as special values of zeta-functions of
$\Delta_+^{(a,b,c)}$.
\begin{thrm}{\lambda}bel{Th-A}
The family of zeta-functions ${\mathbb{Z}}eta_r(\mathbf{s}^{(a,b,c)},\mathbf{0};
\Delta_+^{(a,b,c)}(A_r))$ is analytically closed, where
$\mathbf{s}^{(a,b,c)}=(s_{hj})_{h,j}$ with $(h,j)$ in the list \eqref{acs-5}.
\end{thrm}
\begin{proof}
We prove that zeta-functions ${\mathbb{Z}}eta_{r+1}$ belonging to the above family can be
expressed as a Mellin-Barnes integral, or multiple integrals, involving
${\mathbb{Z}}eta_r$ also belonging to the above family.
Let $a,b\in\mathbb{N}$, $c\in\mathbb{N}_0$ with $a+b+c=r$.
We show that all of the zeta-functions ${\mathbb{Z}}eta_{r+1}$ associated with
(i) $\Delta_+^{(a+1,b,c)}$, (ii) $\Delta_+^{(a,b+1,c)}$, (iii) $\Delta_+^{(a,b,c+1)}$
have integral expressions involving the zeta-function of $\Delta_+^{(a,b,c)}$.
From \eqref{acs-4} we see that
\begin{align}
{\mathbb{Z}}eta_r(\mathbf{s}^{(a,b,c)},\mathbf{0};
\Delta_+^{(a,b,c)}(A_r))
&=\sum_{m_1,\ldots,m_{a+b+c}=1}^{\infty}
\prod_{j=1}^c(m_1+m_2+{\mathbb{C}}dots+m_{a+b+c+1-j})^{-s_{1j}}{\lambda}bel{acs-6}\\
&{\theta}imes\prod_{j=b+c+1}^{a+b+c}(m_1+m_2+{\mathbb{C}}dots+m_{a+b+c+1-j})^{-s_{1j}}{\mathbb{N}}otag\\
&{\theta}imes\prod_{j=a+c+1}^{a+b+c}(m_{a+1}+m_{a+2}+{\mathbb{C}}dots+m_{2a+b+c+1-j})
^{-s_{a+1,j}},
{\mathbb{N}}otag
\end{align}
which is, by renaming the variables,
\begin{align}
=&\sum_{m_1,\ldots,m_{a+b+c}=1}^{\infty}(m_1+{\mathbb{C}}dots+m_{a+b+1})^{-s_{11}}{\mathbb{C}}dots
(m_1+{\mathbb{C}}dots+m_{a+b+c})^{-s_{1c}}{\lambda}bel{acs-7}\\
&{\theta}imes m_1^{-s_{21}}(m_1+m_2)^{-s_{22}}{\mathbb{C}}dots(m_1+{\mathbb{C}}dots+m_a)^{-s_{2a}}{\mathbb{N}}otag\\
&{\theta}imes m_{a+1}^{-s_{31}}(m_{a+1}+m_{a+2})^{-s_{32}}{\mathbb{C}}dots
(m_{a+1}+{\mathbb{C}}dots+m_{a+b})^{-s_{3b}}.{\mathbb{N}}otag
\end{align}
Now we consider the above three cases (i), (ii) and (iii) separately.
The simplest case is (iii). When we replace $c$ by $c+1$ in \eqref{acs-7},
the differences are that the summation is now with respect to
$m_1,\ldots,m_{a+b+c+1}$, and a new factor
$(m_1+{\mathbb{C}}dots+m_{a+b+c+1})^{-s_{1,c+1}}$ appears.
Dividing this factor as
\begin{align*}
\lefteqn{(m_1+{\mathbb{C}}dots+m_{a+b+c+1})^{-s_{1,c+1}}}\\
&=(m_1+{\mathbb{C}}dots+m_{a+b+c})^{-s_{1,c+1}}
\left(1+\frac{m_{a+b+c+1}}{m_1+{\mathbb{C}}dots+m_{a+b+c}}{\mathbb{R}}ight)^{-s_{1,c+1}}
\end{align*}
and apply \eqref{acs-2} as in the argument of \eqref{acs-3},
we find that the sum with respect to $m_{a+b+c+1}$ is separated, which produces
a Riemann zeta factor, and hence the zeta-function of $\Delta_+^{(a,b,c+1)}$
can be expressed as an integral of Mellin-Barnes type, involving gamma factors,
a Riemann zeta factor, and the zeta-function of $\Delta_+^{(a,b,c)}$.
Next consider the case (ii). When we replace $b$ by $b+1$, \eqref{acs-7} is
changed to
\begin{align}
=&\sum_{m_1,\ldots,m_{a+b+c+1}=1}^{\infty}(m_1+{\mathbb{C}}dots+m_{a+b+2})^{-s_{11}}{\mathbb{C}}dots
(m_1+{\mathbb{C}}dots+m_{a+b+c+1})^{-s_{1c}}{\lambda}bel{acs-8}\\
&{\theta}imes m_1^{-s_{21}}(m_1+m_2)^{-s_{22}}{\mathbb{C}}dots(m_1+{\mathbb{C}}dots+m_a)^{-s_{2a}}{\mathbb{N}}otag\\
&{\theta}imes m_{a+1}^{-s_{31}}(m_{a+1}+m_{a+2})^{-s_{32}}{\mathbb{C}}dots
(m_{a+1}+{\mathbb{C}}dots+m_{a+b})^{-s_{3b}}{\mathbb{N}}otag\\
&{\theta}imes(m_{a+1}+{\mathbb{C}}dots+m_{a+b+1})^{-s_{3,b+1}}. {\mathbb{N}}otag
\end{align}
The last factor is
\begin{align}
&=(m_{a+1}+{\mathbb{C}}dots+m_{a+b})^{-s_{3,b+1}}\left(1+\frac{m_{a+b+1}}
{m_{a+1}+{\mathbb{C}}dots+m_{a+b}}{\mathbb{R}}ight)^{-s_{3,b+1}}{\lambda}bel{acs-9}\\
&=(m_{a+1}+{\mathbb{C}}dots+m_{a+b})^{-s_{3,b+1}}{\mathbb{N}}otag\\
&{\mathbb{Q}}quad{\theta}imes\frac{1}{2\pi i}\int_{(\kappa)}\frac{\Gamma(s_{3,b+1}+z)
\Gamma(-z)}{\Gamma(s_{3,b+1})}\left(\frac{m_{a+b+1}}{m_{a+1}+{\mathbb{C}}dots+m_{a+b}}
{\mathbb{R}}ight)^z dz.{\mathbb{N}}otag
\end{align}
The factors $(m_1+{\mathbb{C}}dots+m_{a+b+n})^{-s_{1,n-1}}$ ($2\leq n\leq c+1$) also
include the term $m_{a+b+1}$. We divide these factors as
\begin{align*}
\lefteqn{(m_1+{\mathbb{C}}dots+m_{a+b}+m_{a+b+2}+{\mathbb{C}}dots+m_{a+b+n})^{-s_{1,n-1}}}\\
&{\theta}imes\left(1+\frac{m_{a+b+1}}{m_1+{\mathbb{C}}dots+m_{a+b}+m_{a+b+2}+{\mathbb{C}}dots+m_{a+b+n}}
{\mathbb{R}}ight)^{-s_{1,n-1}}
\end{align*}
and apply \eqref{acs-2} to obtain
\begin{align}
\lefteqn{(m_1+{\mathbb{C}}dots+m_{a+b+n})^{-s_{1,n-1}}}{\lambda}bel{acs-10}
\\
&=(m_1+{\mathbb{C}}dots+m_{a+b}+m_{a+b+2}+{\mathbb{C}}dots+m_{a+b+n})^{-s_{1,n-1}}{\mathbb{N}}otag\\
&{\theta}imes\frac{1}{2\pi i}\int_{(\kappa_n)}\frac{\Gamma(s_{1,n-1}+z_n)
\Gamma(-z_n)}{\Gamma(s_{1,n-1})}\left(\frac{m_{a+b+1}}
{m_1+{\mathbb{C}}dots+m_{a+b}+m_{a+b+2}+{\mathbb{C}}dots+m_{a+b+n}}{\mathbb{R}}ight)^{z_n}dz_n {\mathbb{N}}otag
\end{align}
for $2\leq n\leq c+1$. Substituting \eqref{acs-9} and \eqref{acs-10} into
\eqref{acs-8}, we find that the sum with respect to $m_{a+b+1}$ is separated
and gives a Riemann zeta factor
${\mathbb{Z}}eta(-z_2-{\mathbb{C}}dots-z_{c+1}-z)$.
Since the remaining sum produces the zeta-function of
$\Delta_+^{(a,b,c)}$, we obtain that the zeta-function of
$\Delta_+^{(a,b+1,c)}$ can be expressed as a $(c+1)$-ple integral of
Mellin-Barnes type involving ${\mathbb{Z}}eta(-z_2-{\mathbb{C}}dots-z_{c+1}-z)$ and the
zeta-function of $\Delta_+^{(a,b,c)}$.
The case (i) is similar; we omit the details, only noting that in this case the
variable to be separated is $m_{a+1}$. The proof of Theorem {\mathbb{R}}ef{Th-A} is now
complete.
\end{proof}
\section{Proof of fundamental formulas}{\lambda}bel{sec-proof1}
In this section we prove fundamental formulas stated in Section {\mathbb{R}}ef{sec-3}.
\begin{lem}
For $B\subset \Delta_+$ and $\mathbf{V}\in\mathscr{V}$, we have
\begin{equation}
{{\mathbb{R}}m L.h.}[\mathbf{V}{\mathbb{C}}ap B]
=\{v\in V~|~{\theta}ext{${\lambda}ngle v,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle=0$ for all $\beta\in\mathbf{V}\setminus B$}\}.
\end{equation}
\end{lem}
\begin{proof}
Let $v$ be an element of the right-hand side.
We write $v=\sum_{\beta\in\mathbf{V}}c_\beta \beta$
and have $c_\beta=0$ for all $\beta\in\mathbf{V}\setminus B$
and hence
\begin{equation}
v=\sum_{\beta\in\mathbf{V}{\mathbb{C}}ap B}c_\beta \beta\in{{\mathbb{R}}m L.h.}[\mathbf{V}{\mathbb{C}}ap B].
\end{equation}
The converse is shown similarly.
\end{proof}
\begin{proof}[Proof of Theorem {\mathbb{R}}ef{thm:main0}]
For $\mathbf{t}=(t_{\alpha})_{\alpha\in\Delta_+}\in\mathbf{T}$, $\mathbf{y}\in V$,
$\mathbf{V}\in\mathscr{V}$, $B\subset \Delta_+$ and $q\in Q^\vee/L(\mathbf{V}^\vee)$, let
\begin{equation}
\begin{split}
F(\mathbf{t},\mathbf{y};\mathbf{V},B,q)&=
(-1)^{\abs{B\setminus\mathbf{V}}}
\Bigl(
\prod_{\gamma\in\Delta_+\setminus(\mathbf{V}{\mathbb{C}}up B)}
\frac{t_\gamma}
{t_\gamma-\sum_{\beta\in\mathbf{V}\setminus B}t_\beta{\lambda}ngle\gamma^\vee,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle}
\Bigr)
\\
&{\mathbb{Q}}quad{\theta}imes
\Bigl(
\prod_{\beta\in\mathbf{V}\setminus B}\frac{t_\beta\exp
(t_\beta\{\mathbf{y}+q\}_{\mathbf{V},\beta})}{e^{t_\beta}-1}
\Bigr),
\end{split}
\end{equation}
so that
\begin{equation}{\lambda}bel{termwise}
F(\mathbf{t},\mathbf{y};\Delta)=
\sum_{\mathbf{V}\in\mathscr{V}}
\frac{1}{\abs{Q^\vee/L(\mathbf{V}^\vee)}}
\sum_{q\in Q^\vee/L(\mathbf{V}^\vee)}
F(\mathbf{t},\mathbf{y};\mathbf{V},\emptyset,q).
\end{equation}
Assume
$\mathbf{y}\in V\setminus\mathfrak{H}_{\mathscr{R}}$,
and let
\begin{equation}
F_j=F(\mathbf{t},\mathbf{y};\mathbf{V},A_j,q).
\end{equation}
We calculate $\mathfrak{D}_{{\mathbb{N}}u_{j+1}} F_j$.
First, since $\mathbf{y}{\mathbb{N}}otin\mathfrak{H}_{\mathscr{R}}$, noting Remark {\mathbb{R}}ef{tsuika}
we find that
\begin{equation}
\partial_{{\mathbb{N}}u_{j+1}^\vee} F_j
=
\Bigl(\sum_{\beta\in\mathbf{V}\setminus A_j}t_\beta{\lambda}ngle{\mathbb{N}}u_{j+1}^\vee,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle\Bigr)
F_j.
\end{equation}
Consider the case ${\mathbb{N}}u_{j+1}\in\mathbf{V}$.
Then ${\lambda}ngle{\mathbb{N}}u_{j+1}^\vee,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle={\delta}elta_{{\mathbb{N}}u_{j+1},\beta}$ and
\begin{equation}
\sum_{\beta\in\mathbf{V}\setminus A_j}t_\beta{\lambda}ngle{\mathbb{N}}u_{j+1}^\vee,
\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle=t_{j+1},
\end{equation}
where we write $t_{{\mathbb{N}}u_{j+1}}=t_{j+1}$ for brevity.
Hence we have $\partial_{{\mathbb{N}}u_{j+1}^\vee} F_j =t_{j+1} F_j$.
Therefore we obtain
\begin{equation}
\begin{split}
\mathfrak{D}_{{\mathbb{N}}u_{j+1}} F_j
&=
(-1)^{\abs{A_j\setminus\mathbf{V}}}
\Bigl(
\prod_{\gamma\in\Delta_+\setminus(\mathbf{V}{\mathbb{C}}up A_j)}
\frac{t_\gamma}
{t_\gamma-\sum_{\beta\in\mathbf{V}\setminus(A_j{\mathbb{C}}up\{{\mathbb{N}}u_{j+1}\})}t_\beta{\lambda}ngle\gamma^\vee,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle}
\Bigr)
\\
&
{\mathbb{Q}}quad {\theta}imes
\Bigl(
\prod_{\beta\in\mathbf{V}\setminus(A_j{\mathbb{C}}up\{{\mathbb{N}}u_{j+1}\})}\frac{t_\beta\exp
(t_\beta\{\mathbf{y}+q\}_{\mathbf{V},\beta})}{e^{t_\beta}-1}
\Bigr)
\end{split}
\end{equation}
which is equal to $F_{j+1}$ because
$\Delta_+\setminus(\mathbf{V}{\mathbb{C}}up (A_j{\mathbb{C}}up\{{\mathbb{N}}u_{j+1}\}))=\Delta_+
\setminus(\mathbf{V}{\mathbb{C}}up A_j)$ and
$\abs{(A_j{\mathbb{C}}up\{{\mathbb{N}}u_{j+1}\})\setminus\mathbf{V}}=\abs{A_j\setminus\mathbf{V}}$.
Next consider the case ${\mathbb{N}}u_{j+1}{\mathbb{N}}otin\mathbf{V}$.
If ${\lambda}ngle{\mathbb{N}}u_{j+1}^\vee,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle=0$ for all $\beta\in\mathbf{V}\setminus A_j$, then
\begin{equation}
\partial_{{\mathbb{N}}u_{j+1}^\vee} F_j
=
\Bigl(\sum_{\beta\in\mathbf{V}\setminus A_j}t_\beta{\lambda}ngle{\mathbb{N}}u_{j+1}^\vee,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle\Bigr)
F_j
=0
\end{equation}
and hence $\mathfrak{D}_{{\mathbb{N}}u_{j+1}} F_j=0$.
Otherwise, since
\begin{equation}
\frac{\partial}{\partial t_{j+1}}\biggr{\mathbb{R}}vert_{t_{j+1}=0}
\Bigl(\frac{t_{j+1}}
{t_{j+1}-\sum_{\beta\in\mathbf{V}\setminus A_j}t_\beta{\lambda}ngle{\mathbb{N}}u_{j+1}^\vee,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle}
\Bigr)
=
-\frac{1}{\sum_{\beta\in\mathbf{V}\setminus A_j}t_\beta{\lambda}ngle{\mathbb{N}}u_{j+1}^\vee,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle}
\end{equation}
we have
\begin{equation}
\begin{split}
\mathfrak{D}_{\mathbb{N}}u F_j&=
(-1)^{\abs{A_j\setminus\mathbf{V}}+1}
\Bigl(
\prod_{\gamma\in\Delta_+\setminus(\mathbf{V}{\mathbb{C}}up A_j{\mathbb{C}}up\{{\mathbb{N}}u_{j+1}\})}
\frac{t_\gamma}
{t_\gamma-\sum_{\beta\in\mathbf{V}\setminus A_j}t_\beta{\lambda}ngle\gamma^\vee,\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle}
\Bigr)
\\
&{\mathbb{Q}}quad{\theta}imes
\Bigl(
\prod_{\beta\in\mathbf{V}\setminus A_j}\frac{t_\beta\exp
(t_\beta\{\mathbf{y}+q\}_{\mathbf{V},\beta})}{e^{t_\beta}-1}
\Bigr).
\end{split}
\end{equation}
By noting
$\mathbf{V}\setminus (A_j{\mathbb{C}}up\{{\mathbb{N}}u_{j+1}\})=
\mathbf{V}\setminus A_j$ and
$\abs{(A_j{\mathbb{C}}up \{{\mathbb{N}}u_{j+1}\})\setminus \mathbf{V}}=
\abs{A_j\setminus \mathbf{V}}+1$
we find that the right-hand side is equal to $F_{j+1}$.
We see that
the condition
${\lambda}ngle{\mathbb{N}}u_{j+1},\mu^{\mathbf{V}}_\beta{\mathbb{R}}angle=0$ for all $\beta\in\mathbf{V}\setminus A_j$
is equivalent to
the condition
${\mathbb{N}}u_{j+1}\in{{\mathbb{R}}m L.h.}[\mathbf{V}{\mathbb{C}}ap A_j]$.
Therefore the above results can be summarized as
\begin{equation}
\mathfrak{D}_{{\mathbb{N}}u_{j+1}} F_j
=
\begin{cases}
0{\mathbb{Q}}quad &({\mathbb{N}}u_{j+1}\in{{\mathbb{R}}m L.h.}[\mathbf{V}{\mathbb{C}}ap A_j]),\\
F_{j+1}{\mathbb{Q}}quad &({\mathbb{N}}u_{j+1}{\mathbb{N}}otin{{\mathbb{R}}m L.h.}[\mathbf{V}{\mathbb{C}}ap A_j]).
\end{cases}
\end{equation}
Hence
\begin{equation}
{\lambda}bel{eq:DAF0}
\mathfrak{D}_A F_0
=
\begin{cases}
0{\mathbb{Q}}quad &(\mathbf{V}{\mathbb{N}}otin\mathscr{V}_A),\\
F_N{\mathbb{Q}}quad &(\mathbf{V}\in\mathscr{V}_A).
\end{cases}
\end{equation}
Similarly to the above calculations, we see that $\mathfrak{D}_{A,2} F_0$ gives the
same result as \eqref{eq:DAF0}.
Thus, since $F_0=F(\mathbf{t},\mathbf{y};\mathbf{V},\emptyset,q)$, from
\eqref{termwise} we obtain \eqref{eq:main0}.
The continuity
follows from the limit
\begin{equation}
\lim_{c{\theta}o0+}\{\mathbf{y}+q+c\phi\}_{\mathbf{V},\beta}
=\{\mathbf{y}+q\}_{\mathbf{V},\beta}
\end{equation}
(see the last part of the proof of {\mathbb{C}}ite[Theorem 4.1]{KM5}.)
Finally, since $F(\mathbf{t},\mathbf{y};\Delta)$ is holomorphic with respect to $\mathbf{t}$ around the origin,
so is $\bigl(\mathfrak{D}_A F\bigr) (\mathbf{t}_{\Delta^*},\mathbf{y};\Delta)$
with respect to $\mathbf{t}_{\Delta^*}$.
The proof of Theorem {\mathbb{R}}ef{thm:main0} is thus complete.
\end{proof}
\begin{proof}[Proof of Theorem {\mathbb{R}}ef{thm:main1}]
First assume $\mathbf{y}\in V\setminus\mathfrak{H}_{\mathscr{R}}$.
Let
$\mathbf{k}'=(k'_\alpha)_{\alpha\in\Delta_+}$ with
$k'_\alpha=k_\alpha$ ($\alpha\in \Delta^*$),
$k'_\alpha=2$ ($\alpha\in \Delta_+\setminus\Delta^*=A$).
Then by Proposition {\mathbb{R}}ef{prop:ZP}, we have
\begin{equation}
\begin{split}
S(\mathbf{k}',\mathbf{y};\Delta)
&=
\sum_{{\lambda}mbda\in P\setminus H_{\Delta^\vee}}
e^{2\pi i{\lambda}ngle \mathbf{y},{\lambda}mbda{\mathbb{R}}angle}
\prod_{\alpha\in\Delta_+}
\frac{1}{{\lambda}ngle\alpha^\vee,{\lambda}mbda{\mathbb{R}}angle^{k'_\alpha}}
\\
&=
\sum_{w\in W}
\Bigl(\prod_{\alpha\in\Delta_+{\mathbb{C}}ap w\Delta_-}
(-1)^{k'_{\alpha}}\Bigr)
{\mathbb{Z}}eta_r(w^{-1}\mathbf{k}',w^{-1}\mathbf{y};\Delta)
\\
&=(-1)^{\abs{\Delta_+}}
\mathcal{P}(\mathbf{k}',\mathbf{y};\Delta)
\biggl(\prod_{\alpha\in\Delta_+}
\frac{(2\pi i)^{k'_\alpha}}{k'_\alpha!}\biggr).
\end{split}
\end{equation}
Applying $\prod_{\alpha\in A}\partial_{\alpha^\vee}^2$ to the above.
From the first line we observe that each $\partial_{\alpha^\vee}^2$ produces
the factor $(2\pi i{\lambda}ngle\alpha^{\vee},{\lambda}mbda{\mathbb{R}}angle)^2$.
Hence the factor
${\mathbb{Z}}eta_r(w^{-1}\mathbf{k}',w^{-1}\mathbf{y};\Delta)$ on the second line
is transformed into
$(2\pi i)^{2|A|}{\mathbb{Z}}eta_r(w^{-1}\mathbf{k},w^{-1}\mathbf{y};\Delta)$.
Therefore we have
\begin{multline}
{\lambda}bel{eq:key1}
(2\pi i)^{2|A|}
\sum_{w\in W}
\Bigl(\prod_{\alpha\in\Delta_+{\mathbb{C}}ap w\Delta_-}
(-1)^{k_{\alpha}}\Bigr)
{\mathbb{Z}}eta_r(w^{-1}\mathbf{k},w^{-1}\mathbf{y};\Delta)
\\
=(-1)^{\abs{\Delta_+}}
\Bigl(\prod_{\alpha\in A}\partial_{\alpha^\vee}^2 \Bigr)
\mathcal{P}(\mathbf{k}',\mathbf{y};\Delta)
\biggl(\prod_{\alpha\in\Delta_+}
\frac{(2\pi i)^{k'_\alpha}}{k'_\alpha!}\biggr).
\end{multline}
Since
\begin{align*}
\biggl(\prod_{\alpha\in\Delta_+}
\frac{(2\pi i)^{k'_\alpha}}{k'_\alpha!}\biggr)=
\biggl(\prod_{\alpha\in\Delta^*}
\frac{(2\pi i)^{k'_\alpha}}{k'_\alpha!}\biggr)
\biggl(\prod_{\alpha\in A}
\frac{(2\pi i)^{2}}{2!}\biggr),
\end{align*}
we have
\begin{equation} {\lambda}bel{eq:key1bis}
\begin{split}
&\sum_{w\in W}
\Bigl(\prod_{\alpha\in\Delta_+{\mathbb{C}}ap w\Delta_-}
(-1)^{k_{\alpha}}\Bigr)
{\mathbb{Z}}eta_r(w^{-1}\mathbf{k},w^{-1}\mathbf{y};\Delta)
\\
&=(-1)^{\abs{\Delta_+}}
\Bigl(\prod_{\alpha\in A}\frac{1}{2}\partial_{\alpha^\vee}^2 \Bigr)
\mathcal{P}(\mathbf{k}',\mathbf{y};\Delta)
\biggl(\prod_{\alpha\in\Delta^*}
\frac{(2\pi i)^{k'_\alpha}}{k'_\alpha!}\biggr).
\end{split}
\end{equation}
From \eqref{def-F} it follows that
\begin{equation}
{\lambda}bel{eq:key2}
\Bigl(\prod_{\alpha\in A}\frac{1}{2}\frac{\partial^2 }{\partial t_\alpha^2 }\biggr{\mathbb{R}}vert_{t_\alpha=0}\partial_{\alpha^\vee}^2 \Bigr)
F(\mathbf{t},\mathbf{y};\Delta)
\\
=
\sum_{\substack{\mathbf{m}=(m_\alpha)_{\alpha\in\Delta_+}\\m_\alpha\in
\mathbb{N}_{0}(\alpha\in\Delta^*)\\m_\alpha=2(\alpha\in A)}}
\Bigl(\prod_{\alpha\in A}\frac{1}{2}\partial_{\alpha^\vee}^2 \Bigr)
\mathcal{P}(\mathbf{m},\mathbf{y};\Delta)
\prod_{\alpha\in\Delta^*} \frac{t_\alpha^{m_\alpha}}{m_\alpha!}.
\end{equation}
By Theorem {\mathbb{R}}ef{thm:main0}, we see that
the left-hand side of \eqref{eq:key2} is equal to
\begin{equation}
{\lambda}bel{eq:key3}
F_{\Delta^*}(\mathbf{t}_{\Delta^*},\mathbf{y};\Delta)
=
\sum_{\mathbf{m}_{\Delta^*}\in \mathbb{N}_{0}^{\abs{\Delta^*}}}\mathcal{P}_{\Delta^*}(\mathbf{m}_{\Delta^*},\mathbf{y};\Delta)
\prod_{\alpha\in\Delta^*}
\frac{t_{\alpha}^{m_\alpha}}{m_\alpha!}.
\end{equation}
Comparing \eqref{eq:key2} with \eqref{eq:key3} we find that
\begin{align*}
\Bigl(\prod_{\alpha\in A}\frac{1}{2}\partial_{\alpha^\vee}^2 \Bigr)
\mathcal{P}(\mathbf{k}',\mathbf{y};\Delta)
=\mathcal{P}_{\Delta^*}(\mathbf{k}_{\Delta^*},\mathbf{y};\Delta).
\end{align*}
Therefore \eqref{eq:key1bis} implies the desired result when
$\mathbf{y}\in V\setminus\mathfrak{H}_{\mathscr{R}}$.
By the continuity with respect to $\mathbf{y}$,
the result is also valid in the case when
$\mathbf{y}\in\mathfrak{H}_{\mathscr{R}}$.
\end{proof}
\begin{remark}
It is possible to prove Theorem {\mathbb{R}}ef{thm:main1}
by use of $\mathfrak{D}_A$ instead of $\mathfrak{D}_{A,2}$.
In this method, we need to consider the case $k_\alpha=1$ for some $\alpha\in A$
and such an argument is indeed valid. (See {\mathbb{C}}ite[Remark 3.2]{KM3}.)
\end{remark}
\section{Proofs of Theorems {\mathbb{R}}ef{T-5-1} and {\mathbb{R}}ef{T-B2-EZ}}{\lambda}bel{sec-proof2}
In this final section we prove Theorems {\mathbb{R}}ef{T-5-1} and {\mathbb{R}}ef{T-B2-EZ}.
The basic principle of the proofs of these theorems is similar to that of the
argument developed in {\mathbb{C}}ite[Section 7]{KMT-CJ}.
We first state the following lemma.
\begin{lem} {\lambda}bel{L-5-2} \ For an
arbitrary function $f\,:\, \mathbb{N}_{0} {\theta}o \mathbb{C}$ and $d\in \mathbb{N}$, we have
\begin{align}
&\sum_{k=0}^{d}\phi(d-k){\varepsilon}_{d-k}\sum_{{\mathbb{N}}u=0}^{k}f(k-{\mathbb{N}}u)\frac{(i\pi)^{{\mathbb{N}}u}}{{\mathbb{N}}u!}
=-\frac{i\pi}{2}f(d-1)+\sum_{\xi=0}^{[d/2]} {\mathbb{Z}}eta(2\xi)f(d-2\xi), {\lambda}bel{MNOT}
\end{align}
where we denote the integer part of $x\in \mathbb{R}$ by $[x]$,
${\varepsilon}_j=(1+(-1)^j)/2$ $(j\in \mathbb{Z})$
and $\phi(s)=\sum_{m\geq 1}(-1)^m m^{-s}=\left(2^{1-s}-1{\mathbb{R}}ight){\mathbb{Z}}eta(s)$.
\end{lem}
\begin{proof}
This can be immediately obtained by combining (2.6) and (2.7) (with the choice
$g(x)=i\pi f(x-1)$$\;$)
in {\mathbb{C}}ite[Lemma 2.1]{MNOT}.
\end{proof}
\begin{proof}[Proof of Theorem {\mathbb{R}}ef{T-5-1}]
From {\mathbb{C}}ite[(4.31) and (4.32)]{KMT-CJ}, we have
\begin{align}
& \sum_{n\in \mathbb{Z}^*} \frac{(-1)^{n}e^{in{\theta}heta}}{n^a}-2\sum_{j=0}^{a}\
\phi(a-j){\varepsilon}_{a-j} \frac{(i{\theta}heta)^{j}}{j!}=0 {\lambda}bel{e-5-1}
\end{align}
for $a\geq 2$ and ${\theta}heta \in [-\pi,\pi]$, where
$\mathbb{Z}^*=\mathbb{Z}\smallsetminus \{0\}$. For $x,y \in \mathbb{R}$ with
$|x|<1$
and $|y|<1$, multiply the above by
\begin{equation}
\sum_{l,m\in \mathbb{N}} (-1)^{l+m}x^l y^m e^{i(l+m){\theta}heta}. {\lambda}bel{5-1-0}
\end{equation}
Separating the terms corresponding to $l+m+n=0$, we obtain
\begin{align*}
& \sum_{l,m\in \mathbb{N}}\sum_{n\in \mathbb{Z}^*\atop l+m+n{\mathbb{N}}ot=0}
\frac{(-1)^{l+m+n}x^l y^m e^{i(l+m+n){\theta}heta}}{n^a}\\
& \ -2\sum_{j=0}^{a}\ \phi(a-j){\varepsilon}_{a-j}\sum_{l,m\in \mathbb{N}}(-1)^{l+m}x^l y^m
e^{i(l+m){\theta}heta} \frac{(i{\theta}heta)^{j}}{j!}\\
& \ =-(-1)^a\sum_{l,m\in \mathbb{N}} \frac{x^l y^m}{(l+m)^a}
\end{align*}
for ${\theta}heta \in [-\pi,\pi]$.
The right-hand side of the above is constant with respect to ${\theta}heta$.
Therefore we can apply {\mathbb{C}}ite[Lemma 6.2]{KMT-CJ} with
$h=1$, $a_1=a$, $d=c\geq 2$,
$$C(N)=\sum_{l,m\in\mathbb{N},n\in\mathbb{Z}^*\atop l+m+n=N}\frac{x^l y^m}{n^a},$$
\begin{align*}
D(N;r;1)=
\begin{cases}
\sum_{l,m\in\mathbb{N}\atop l+m=N}x^l y^m & (N\geq 2,r=0),\\
0 & ({{\mathbb{R}}m otherwise})
\end{cases}
\end{align*}
in the notation of {\mathbb{C}}ite{KMT-CJ}. The result is
\begin{align*}
& \sum_{l,m\in \mathbb{N}}\sum_{n\in \mathbb{Z}^*\atop {l+m+n{\mathbb{N}}ot=0}}
\frac{(-1)^{l+m+n}x^l y^m e^{i(l+m+n){\theta}heta}}{n^a(l+m+n)^c} \\
& \ -2\sum_{j=0}^{a}\ \phi(a-j){\varepsilon}_{a-j}\sum_{\xi=0}^{j} \binom{j-\xi+c-1}{j-\xi}
(-1)^{j-\xi}\sum_{l,m\in \mathbb{N}}\frac{(-1)^{l+m}x^l y^m e^{i(l+m){\theta}heta}}
{(l+m)^{c+j-\xi}} \frac{(i{\theta}heta)^{\xi}}{\xi!}\\
& \ +2\sum_{j=0}^{c}\ \phi(c-j){\varepsilon}_{c-j}\sum_{\xi=0}^{j} \binom{j-\xi+a-1}{a-1}
(-1)^{a-1}\sum_{l,m\in \mathbb{N}}\frac{x^l y^m }{(l+m)^{a+j-\xi}} \frac{(i{\theta}heta)^{\xi}}
{\xi!}=0.
\end{align*}
Replace $x$ by $-xe^{-i{\theta}heta}$ and separate the term corresponding to $m+n=0$ in the
first member on the left-hand side, and apply {\mathbb{C}}ite[Lemma 6.2]{KMT-CJ} again with
$d=b\geq 2$. Then we can obtain
\begin{align}
& \sum_{l,m\in \mathbb{N}}\sum_{n\in \mathbb{Z}^*\atop {m+n{\mathbb{N}}ot=0 \atop l+m+n{\mathbb{N}}ot=0}}
\frac{(-1)^{m+n}x^l y^m e^{i(m+n){\theta}heta}}{n^a(m+n)^b (l+m+n)^c} {\lambda}bel{triple-1} \\
& \ =2\sum_{j=0}^{a}\ \phi(a-j){\varepsilon}_{a-j}\sum_{\xi=0}^{j} \sum_{\omega=0}^{j-\xi}
\binom{\omega+b-1}{\omega}(-1)^\omega \binom{j-\xi-\omega+c-1}{c-1}(-1)^{j-\xi-\omega} {\mathbb{N}}otag\\
& {\mathbb{Q}}quad {\mathbb{Q}}quad {\theta}imes \sum_{l,m\in \mathbb{N}}\frac{(-1)^{m}x^l y^m e^{im{\theta}heta}}
{m^{b+\omega}(l+m)^{c+j-\xi-\omega}} \frac{(i{\theta}heta)^{\xi}}{\xi!} {\mathbb{N}}otag\\
& \ -2\sum_{j=0}^{b}\ \phi(b-j){\varepsilon}_{b-j}\sum_{\xi=0}^{j} \sum_{\omega=0}^{a-1}
\binom{\omega+j-\xi}{\omega}(-1)^\omega \binom{a-1-\omega+c-1}{c-1}(-1)^{a-1-\omega} {\mathbb{N}}otag\\
& {\mathbb{Q}}quad {\mathbb{Q}}quad {\theta}imes \sum_{l,m\in \mathbb{N}}\frac{x^l y^m}{m^{j-\xi+\omega+1}
(l+m)^{a+c-1-\omega}} \frac{(i{\theta}heta)^{\xi}}{\xi!} {\mathbb{N}}otag\\
& \ -2\sum_{j=0}^{c}\ \phi(c-j){\varepsilon}_{c-j}\sum_{\xi=0}^{j} \sum_{\omega=0}^{j-\xi}
\binom{\omega+b-1}{\omega}(-1)^\omega \binom{j-\xi-\omega+a-1}{a-1}(-1)^{a-1} {\mathbb{N}}otag\\
& {\mathbb{Q}}quad {\mathbb{Q}}quad {\theta}imes \sum_{l,m\in \mathbb{N}}\frac{(-1)^{l}x^l y^m e^{-il{\theta}heta}}
{(-l)^{b+\omega}(l+m)^{a+j-\xi-\omega}} \frac{(i{\theta}heta)^{\xi}}{\xi!} {\mathbb{N}}otag\\
& \ +2\sum_{j=0}^{b}\ \phi(b-j){\varepsilon}_{b-j}\sum_{\xi=0}^{j} \sum_{\omega=0}^{c-1}
\binom{\omega+j-\xi}{\omega}(-1)^\omega \binom{a-1-\omega+c-1}{a-1}(-1)^{a-1} {\mathbb{N}}otag\\
& {\mathbb{Q}}quad {\mathbb{Q}}quad {\theta}imes \sum_{l,m\in \mathbb{N}}\frac{x^l y^m}{(-l)^{j-\xi+\omega+1}
(l+m)^{a+c-1-\omega}} \frac{(i{\theta}heta)^{\xi}}{\xi!}. {\mathbb{N}}otag
\end{align}
Since $a,b,c \geq 2$, we can let $x,y {\theta}o 1$ on the both sides because of
absolute convergence. Then set ${\theta}heta=\pi$, and consider
the left-hand side of the resulting formula first.
The contribution of the terms corresponding to $m+2n=0$ is obviously
$(-1)^a{\mathbb{Z}}eta_2(a+b,c)$.
The contribution of the terms corresponding to $l+m+2n=0$ is (with rewriting
$-n$ by $n$)
\begin{align*}
(-1)^a\sum_{m,n\in\mathbb{N}\atop m{\mathbb{N}}eq n, m<2n}\frac{1}{n^{a+c}(m-n)^b},
\end{align*}
which is, by separating into two parts according to $n<m<2n$ and $0<m<n$,
equal to $(-1)^a(1+(-1)^b){\mathbb{Z}}eta_2(b,a+c)$. We can also see that the
contribution of the terms corresponding to $l+2m+2n=0$ is
\begin{align*}
(-1)^a \sum_{m,n\in\mathbb{N}\atop n>m}\frac{1}{n^a(m-n)^b(n-m)^c}
=(-1)^{a+b}{\mathbb{Z}}eta_2(b+c,a).
\end{align*}
The remaining part of the left-hand side is
\begin{align*}
& \sum_{l,m\in \mathbb{N}}\sum_{n\in \mathbb{Z}^*\atop {m+n{\mathbb{N}}ot=0 \atop {m+2n{\mathbb{N}}ot=0
\atop {l+m+n{\mathbb{N}}ot=0 \atop {l+m+2n{\mathbb{N}}ot=0 \atop l+2m+2n{\mathbb{N}}ot=0}}}}} \frac{1}
{n^a(m+n)^b (l+m+n)^c} {\mathbb{N}}otag\\
& ={\mathbb{Z}}eta_3(a,b,c)+(-1)^a\sum_{l,m\in \mathbb{N}}
\sum_{n\in \mathbb{N}\atop {m{\mathbb{N}}ot=n \atop
{m{\mathbb{N}}ot=2n \atop {l+m{\mathbb{N}}ot=n \atop {l+m{\mathbb{N}}ot=2n \atop l+2m{\mathbb{N}}ot=2n}}}}} \frac{1}
{n^a(m-n)^b (l+m-n)^c}.
\end{align*}
On the above double sum,
replace $j=m-n$ and $k=n-m$ correspondingly to $m>n$ and $m<n$, respectively.
On the part corresponding to $m>n$, we further divide the sum into three parts
according to $l+j<n$, $j<n<l+j$, $n<j$ and find that the contribution of this part is
$$
(-1)^a\left\{{\mathbb{Z}}eta_3(b,c,a)+{\mathbb{Z}}eta_3(b,a,c)+{\mathbb{Z}}eta_3(a,b,c){\mathbb{R}}ight\}.
$$
Similarly we treat the part $m<n$.
Collecting the above results, we obtain that the left-hand side is
\begin{align*}
(-1)^a&\bigg\{(1+(-1)^a){\mathbb{Z}}eta_3(a,b,c)+(1+(-1)^b)\left( {\mathbb{Z}}eta_3(b,a,c)+
{\mathbb{Z}}eta_3(b,c,a){\mathbb{R}}ight)\\
& {\mathbb{Q}}quad +(-1)^b(1+(-1)^c){\mathbb{Z}}eta_3(c,b,a)+{\mathbb{Z}}eta_2(a+b,c)\\
& {\mathbb{Q}}quad +(1+(-1)^b){\mathbb{Z}}eta_2(b,a+c)+(-1)^b{\mathbb{Z}}eta_2(b+c,a)\bigg\}.
\end{align*}
On the other hand, applying Lemma {\mathbb{R}}ef{L-5-2}, we can rewrite the right-hand side
to
\begin{align*}
& 2(-1)^a\bigg\{ \sum_{\xi=0}^{[a/2]}{\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{a-2\xi}\binom{\omega+b-1}
{\omega}\binom{a+c-2\xi-\omega-1}{c-1}{\mathbb{Z}}eta_2(b+\omega,a+c-2\xi-\omega)\\
& \ +\sum_{\xi=0}^{[b/2]}{\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{a-1}\binom{\omega+b-2\xi}{\omega}
\binom{a+c-\omega-2}{c-1}{\mathbb{Z}}eta_2(b-2\xi+\omega+1,a+c-1-\omega)\\
& \ +(-1)^b\sum_{\xi=0}^{[c/2]}{\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{c-2\xi}\binom{\omega+b-1}{\omega}
\binom{a+c-2\xi-\omega-1}{a-1}{\mathbb{Z}}eta_2(b+\omega,a+c-2\xi-\omega)\\
& \ +(-1)^b\sum_{\xi=0}^{[b/2]}{\mathbb{Z}}eta(2\xi)\sum_{\omega=0}^{c-1}\binom{\omega+b-2\xi}{\omega}
\binom{a+c-\omega-2}{a-1}{\mathbb{Z}}eta_2(b-2\xi+\omega+1,a+c-1-\omega)\bigg\}.
\end{align*}
This completes the proof of Theorem {\mathbb{R}}ef{T-5-1}.
\end{proof}
Finally we give the proof of Theorem {\mathbb{R}}ef{T-B2-EZ}.
\begin{proof}[Proof of Theorem {\mathbb{R}}ef{T-B2-EZ}]
Let $p\in \mathbb{N}_{\geq 2}$ and $s\in \mathbb{R}_{>1}$. It follows from
{\mathbb{C}}ite[Equation (4.7)]{KMT-Pala} that
\begin{equation*}
\begin{split}
& \sum_{l\in \mathbb{Z}^*, m\in\mathbb{N}\atop l+m{\mathbb{N}}ot=0} \frac{(-1)^{l+m}x^m e^{i(l+m){\theta}heta}}{l^{p}m^{s}}-2\sum_{j=0}^{p}\ \phi(p-j)\varepsilon_{p-j}\left\{ \sum_{m=1}^\infty \frac{(-1)^{m}x^m e^{im{\theta}heta}}{m^s}{\mathbb{R}}ight\} \frac{(i{\theta}heta)^{j}}{j!}\\
& \ \ \ \ +(-1)^{p}\sum_{m=1}^\infty \frac{x^m}{m^{s+p}}=0
\end{split}
\end{equation*}
for ${\theta}heta \in [-\pi,\pi]$ and $x\in \mathbb{C}$ with $|x|\leq 1$. Setting $x=-e^{i{\theta}heta}$ on the both sides and separating the term corresponding to $l+2m=0$ of the first term on the left-hand side, we have
\begin{align*}
& \sum_{l\in \mathbb{Z}^*,m\in\mathbb{N}\atop {l+m{\mathbb{N}}ot=0 \atop l+2m{\mathbb{N}}ot=0}} \frac{(-1)^{l} e^{i(l+2m){\theta}heta}}{l^{p}m^{s}} -2\sum_{j=0}^{p}\ \phi(p-j)\varepsilon_{p-j}\left\{ \sum_{m=1}^\infty \frac{ e^{2im{\theta}heta}}{m^s}{\mathbb{R}}ight\} \frac{(i{\theta}heta)^{j}}{j!}\\
& \ \ \ \ +(-1)^{p}\sum_{m=1}^\infty \frac{(-1)^me^{im{\theta}heta}}{m^{s+p}}=-\sum_{m=1}^\infty \frac{1}{(-2m)^p m^s}.
\end{align*}
By {\mathbb{C}}ite[Lemma 6.2]{KMT-CJ} with $d=q\geq 2$, we obtain
\begin{align}
& \sum_{l\in \mathbb{Z}^*,m\in\mathbb{N}\atop{l+m{\mathbb{N}}ot=0 \atop l+2m{\mathbb{N}}ot=0}} \frac{(-1)^{l} e^{i(l+2m){\theta}heta}}{l^{p}m^{s}(l+2m)^q} {\lambda}bel{eq-9-2}\\
& {\mathbb{Q}}uad =2\sum_{j=0}^{p}\ \phi(p-j)\varepsilon_{p-j}\sum_{\xi=0}^{j}\binom{j-\xi+q-1}{j-\xi}\frac{(-1)^{j-\xi}}{2^{q+j-\xi}}\sum_{m=1}^{\infty}\frac{e^{2im{\theta}heta}}{m^{s+q+j-\xi}}\frac{(i{\theta}heta)^{\xi}}{\xi!}{\mathbb{N}}otag\\
& {\mathbb{Q}}uad -2\sum_{j=0}^{q}\ \phi(q-j)\varepsilon_{q-j}\sum_{\xi=0}^{j}\binom{j-\xi+p-1}{j-\xi}\frac{(-1)^{p-1}}{2^{p+j-\xi}}\sum_{m=1}^{\infty}\frac{1}{m^{s+p+j-\xi}}\frac{(i{\theta}heta)^{\xi}}{\xi!}{\mathbb{N}}otag\\
& \ \ \ \ -(-1)^{p}\sum_{m=1}^{\infty}\frac{(-1)^m e^{im{\theta}heta}}{m^{s+p+q}}. {\mathbb{N}}otag
\end{align}
Let ${\theta}heta=\pi$ and using Lemma {\mathbb{R}}ef{L-5-2}. Then the right-hand side of \eqref{eq-9-2} is equal to
\begin{align}
& 2(-1)^{p}\sum_{\xi=0}^{[p/2]}\ \frac{1}{2^{p+q-2\xi}}\binom{p+q-1-2\xi}{q-1}{\mathbb{Z}}eta(2\xi){\mathbb{Z}}eta(s+p+q-2\xi){\lambda}bel{eq-9-3} \\
& +2(-1)^{p}\sum_{\xi=0}^{[q/2]}\ \frac{1}{2^{p+q-2\xi}}\binom{p+q-1-2\xi}{p-1}{\mathbb{Z}}eta(2\xi){\mathbb{Z}}eta(s+p+q-2\xi) {\mathbb{N}}otag\\
& -(-1)^{p}{\mathbb{Z}}eta(s+p+q). {\mathbb{N}}otag
\end{align}
On the other hand, we can see that the left-hand side can be written in terms of
the zeta-function of $B_2$. Recall that
\begin{align*}
{\mathbb{Z}}eta_2(s_1,s_2,s_3,s_4;B_2)&={\mathbb{Z}}eta_2((s_1,s_2,s_3,s_4),{\bf 0};\Delta(B_2))\\
&=\sum_{m_1=1}^{\infty}\sum_{m_2=1}^{\infty}\frac{1}{m_1^{s_1}m_2^{s_2}
(m_1+m_2)^{s_3}(2m_1+m_2)^{s_4}}.
\end{align*}
The contribution of the terms with $l>0$ to the left-hand side is obviously
${\mathbb{Z}}eta_2(s,p,0,q;B_2)$. As for the terms with $l<0$, we rewrite $-l$ by $l$,
divide the sum into three parts according to the conditions $l<m$, $m<l<2m$
and $l>2m$, and evaluate each part in terms of the zeta-function of $B_2$.
The conclusion is that the left-hand side is
\begin{align}
& {\mathbb{Z}}eta_2(s,p,0,q;B_2)+(-1)^p{\mathbb{Z}}eta_2(0,p,s,q;B_2) +(-1)^p{\mathbb{Z}}eta_2(0,q,s,p;B_2){\lambda}bel{eq-9-4}\\
& {\mathbb{Q}}quad +(-1)^{p+q}{\mathbb{Z}}eta_2(s,q,0,p;B_2).{\mathbb{N}}otag
\end{align}
We combine \eqref{eq-9-3} and \eqref{eq-9-4} and multiply by $(-1)^p$. Then we can set $s=0$ because \eqref{eq-9-3} and \eqref{eq-9-4} are absolutely convergent for $s>-1$.
Noting ${\mathbb{Z}}eta_2(0,p,0,q;B_2)={\mathbb{Z}}eta_2^\sharp(p,q)$, we complete the proof of Theorem {\mathbb{R}}ef{T-B2-EZ}.
\end{proof}
\
\proof[Acknowledgements]
The authors would like to express their sincere gratitude to
Professor Mike Hoffman for pointing out that symmetric sums for MZVs in \eqref{EZ-Sr-11} can be written in terms of products of Riemann's zeta values at even positive integers and giving related valuable comments (see Remark {\mathbb{R}}ef{Rem-Hof}).
\
\end{document} |
\betagin{document}
\timestle[Homotopy category of exact quasi-category]{External triangulation of the homotopy category of exact quasi-category}
\author{Hiroyuki Nakaoka}
\address{Graduate School of Mathematics, Nagoya University, Furocho, Chikusaku, Nagoya 464-8602, Japan}
\email{nakaoka.hiroyuki@math.nagoya-u.ac.jp}
\author{Yann Palu}
\address{LAMFA, Universit\'e de Picardie Jules Verne, 33 rue Saint-Leu, Amiens, France}
\email{yann.palu@u-picardie.fr}
\urladdr{http://www.lamfa.u-picardie.fr/palu/}
\thanks{}
\thanks{The first author is supported by JSPS KAKENHI Grant Numbers JP19K03468, JP20K03532. Both authors were partially supported by the French ANR grant SC3A~(15\,CE40\,0004\,01). Both authors wish to thank Gustavo Jasso for his interest, and for introducing them a possible relation with a future work of his student.}
\betagin{abstract}
Extriangulated categories axiomatize extension-closed subcategories of triangulated categories. We show that the homotopy category of an exact quasi-category can be equipped with a natural extriangulated structure.
\end{abstract}
\maketitle
\tableofcontents
\subseteqction{Introduction}
For triangulated categories, several possible higher-categorical enhancements are known, one of which is the celebrated notion of a \emph{stable $\infty$-category} by Lurie (\circte{L2}).
Recently, as a wider class of $\infty$-categories, the notion of an \emph{exact $\infty$-category} has appeared in Barwick's \circte{B1}, where it is used in order to prove that the $K$-theory of an idempotent complete stable quasi-category endowed with a bounded t-structure is weakly equivalent to the $K$-theory of its heart. A non-additive version of exact quasi-categories also appeared in \circte{DK} as a source of examples of 2-Segal spaces, via Waldhausen's S-construction. Instances of exact quasi-categories are given by
\betagin{itemize}
\item the nerve of any ordinary exact category,
\item any extension-closed full additive subcategory of a stable $\infty$-category
\end{itemize}
as stated in \circte[Examples~3.3 and 3.5]{B1}.
Regarding its importance and naturality, it should be natural to expect that the homotopy category of an exact $\infty$-category would have a nice structure very close to triangulation.
In this article, we give a positive answer to this expectation, with the notion of an \emph{extriangulated category} which has been introduced as a unification of exact categories and triangulated categories in our previous article \circte{NP}. It is defined as an additive category equipped with a structure called an \emph{external triangulation}, which is more flexible than triangulations in the sense that it is naturally inherited by relative theories, extension-closed subcategories, and ideal quotients by some projective-injectives (\circte[Section~3.2]{HLN}, \circte[Remark~2.18, Proposition~3.30]{NP}). In particular, extension-closed subcategories of triangulated categories are typical examples of extriangulated categories, which dovetails the above-mentioned feature of exact $\infty$-categories.
Let $\mathscr{C}$ be an exact quasi-category (the definition is recalled in \cref{Section_ExactSequences}).
We say that two exact sequences in $\mathscr{C}$
\[
\xy
(-12,0)*+{\mathbf{S} =};
(-6,6)*+{A}="0";
(6,6)*+{B}="2";
(-6,-6)*+{O}="4";
(6,-6)*+{C}="6";
{\ar@{>->}^i "0"+(3.5,0);"2"};
{\ar "0";"4"};
{\ar@{->>}^p "2";"6"};
{\ar "4";"6"};
(24,0)*+{\mathbf{S}^{\prime} =};
(30,6)*+{A}="10";
(42,6)*+{B^{\prime}}="12";
(30,-6)*+{O^{\prime}}="14";
(42,-6)*+{C}="16";
{\ar@{>->} "10"+(3.5,0);"12"};
{\ar "10";"14"};
{\ar@{->>} "12";"16"};
{\ar "14";"16"};
\endxy
\]
are equivalent if there is a cube in $\mathscr{C}$ from $\mathbf{S}$ to $\mathbf{S}^{\prime}$
\[
\mathscr{C}f=\
\xy
(-8,9)*+{A}="0";
(8,9)*+{B}="2";
(1,3)*+{O}="4";
(17,3)*+{C}="6";
(-8,-7)*+{A}="10";
(8,-7)*+{B^{\prime}}="12";
(1,-13)*+{O^{\prime}}="14";
(17,-13)*+{C}="16";
{\ar "0";"2"};
{\ar "0";"4"};
{\ar "2";"6"};
{\ar "4";"6"};
{\ar_{a} "0";"10"};
{\ar "2";"12"};
{\ar "4";"14"};
{\ar^{c} "6";"16"};
{\ar "10";"12"};
{\ar "10";"14"};
{\ar "12";"16"};
{\ar "14";"16"};
\endxy
\]
where $\overseterline{a}=\mathrm{id}_A$ and $\overseterline{c}=\mathrm{id}_C$ in the homotopy category $h\mathscr{C}$.
This is an equivalence relation (\cref{PropEquivExSeq}), and we define $\mathbb{E}(C,A)$ to be the set of equivalence classes of exact sequences with fixed end terms $A$ and $C$.
Then $\mathbb{E}\colon(h\mathscr{C})^\mathrm{op}\times(h\mathscr{C})\to\mathit{Ab}$ is an additive bifunctor (\cref{CorEAddFtr}).
We also define an additive realization $\mathfrak{s}$ (\cref{LemET}) by sending the equivalence class of $\mathbf{S}$ to the equivalence class (defined similarly as for short exact sequences) of $A\overset{\overseterline{i}}{\longrightarrow}B\overset{\overseterline{p}}{\longrightarrow}C$ in $h\mathscr{C}$.
\betagin{thm}[\cref{ThmET,propExactFunctors}]
Let $\mathscr{C}$ be an exact quasi-category. Then
\betagin{enumerate}
\item its homotopy category $(h\mathscr{C},\mathbb{E},\mathfrak{s})$ is extriangulated.
\item Moreover, the functor $\mathscr{C}\to h\mathscr{C}$ sends exact sequences $\mathbf{S}$ as above to extriangles
$A\overset{\overseterline{i}}{\rightarrowtail}B\overset{\overseterline{p}}{\twoheadrightarrow}C\overset{\undersetderline{\mathbf{S}}}{\dashrightarrow}$.
\item If $F\colon\mathscr{C}\to\mathscr{D}$ is an exact functor of exact quasi-categories, then the induced functor $hF\colonlon h\mathscr{C}\to h\mathscr{D}$ is an exact functor of extriangulated categories.
\end{enumerate}
\end{thm}
When $\mathscr{C}$ is stable, this recovers the usual triangulated structure on $h\mathscr{C}$ (\cref{PropCompatStable}).
In Section~\ref{Section_NotationsAndTerminology}, we organize notations and terminology which will be used in the proceeding sections. In Section~\ref{Section_ExactSequences} we remind the definition of an exact $\infty$-category in \circte{B1}, and summarize properties of exact sequences.
In Section~\ref{Section_ExtriangulatedStructure}, after a brief introduction of the definition of an extriangulated category, we proceed to construct an external triangulation of the homotopy category. As the main theorem (Theorem~\ref{ThmET}), indeed we will show that the homotopy category of any exact $\infty$-category becomes an extriangulated category. In the last Subsection~\ref{Subsection_StableCase}, in the stable case we will also see that this extriangulated structure is compatible with the triangulation given in \circte{L2}.
This work came out of our exploration for an existing higher-categorical notion appropriate to enhance extriangulated categories. We hope that the existence of such a notion in turn guarantees the validity and naturality of the notion of an extriangulated category.
\subseteqction{Notations and terminology}\label{Section_NotationsAndTerminology}
Basically we follow the notations and terminology in \circte{L1} and \circte{B1}, while we use the term \emph{quasi-category} as in Joyal's \circte{J}, instead of $\infty$-category. Besides, we will also employ some ad hoc ones which we will introduce in this section, to facilitate the later argument dealing with exact sequences.
Throughout this article, $\mathscr{C}$ denotes an essentially small quasi-category (\circte[Notation~1.3]{B2}). By definition it is a simplicial set which admits every inner horn extensions (\circte[Definition~1.1.2.4.]{L1}). As being a simplicial set, it has the \emph{face maps} $d_k\colon \mathscr{C}_{n+1}\to\mathscr{C}_n$ $(k=0,1,\ldots,n+1)$, and the \emph{degeneracy maps} $s_k\colon\mathscr{C}_n\to\mathscr{C}_{n+1}$ $(k=0,1,\ldots,n)$ for any $n\ge0$, where each $\mathscr{C}_n$ denotes the set of its $n$-simplices. In particular $0$-simplices and $1$-simplices in $\mathscr{C}$ are called \emph{objects} and \emph{morphisms}. For any pair of objects $A,B\in\mathscr{C}_0$, we write $f\in\mathscr{C}_1(A,B)$ to indicate that $f$ is a morphism satisfying $d_1(f)=A$ and $d_0(f)=B$.
For an object $A\in\mathscr{C}_0$, we abbreviate $s_0(A)\in\mathscr{C}_1(A,A)$ to $1_A$ for simplicity.
The homotopy category of $\mathscr{C}$ will be denoted by $h\mathscr{C}$. For a morphism $a\in\mathscr{C}_1(A,B)$, its image in the homotopy category will be denoted by $\overseterline{a}\in(h\mathscr{C})(A,B)$. We use the symbol $\mathrm{id}_A=\overseterline{1}_A$ exclusively for the identity morphisms in the homotopy category.
\subsection{Squares, rectangles and cubes}
As in \circte[Section~4.4.2]{L1}, a \emph{square} in $\mathscr{C}$ is a map $\mathbf{S}\colon\mathscr{D}elta^1\times\mathscr{D}elta^1\to\mathscr{C}$, namely an object in the quasi-category of functors $\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})$ (see \circte[Notation~1.2.7.2, Proposition~1.2.7.3]{L1}). We will often denote $\mathbf{S}$ as a diagram in $\mathscr{C}$
\[
\xy
(-8,7)*+{\mathbf{S}(0,0)}="0";
(8,7)*+{\mathbf{S}(0,1)}="2";
(-8,-7)*+{\mathbf{S}(1,0)}="4";
(8,-7)*+{\mathbf{S}(1,1)}="6";
{\ar^{} "0";"2"};
{\ar_{} "0";"4"};
{\ar^{} "2";"6"};
{\ar_{} "4";"6"};
\endxy,
\]
or by labelling some of its simplices
\betagin{equation}\label{Sq}
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{D}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta}} "10";"11"};
\endxy
\ \ \text{or}\ \
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{D}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar_{z} "0";"6"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
\endxy
\ \ \text{or just}\ \
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{D}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
\endxy.
\end{equation}
It can be also identified with a pair of $2$-simplices $\xi$ and $\eta$ satisfying $d_1(\xi)=d_1(\eta)$, which correspond to
\[ \xi=\mathbf{S}|_{(\{0\}\times\mathscr{D}elta^1)\ast\{(1,1)\}}\quad\text{and}
\quad
\eta=\mathbf{S}|_{\{(0,0)\}\ast(\{1\}\times\mathscr{D}elta^1)}. \]
In this article we will also express a square as above by $\mathbf{S}=\SQ{\eta}{\xi}$. For a square $\mathbf{S}=\SQ{\eta}{\xi}$, its transpose $\mathbf{S}^t=\SQ{\xi}{\eta}$ is given by the below.
\[
\xy
(-7,7)*+{A}="0";
(7,7)*+{D}="2";
(-7,-7)*+{B}="4";
(7,-7)*+{C}="6";
{\ar^{i} "0";"2"};
{\ar_{x} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{j} "2";"6"};
{\ar_{y} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\eta}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\xi}} "10";"11"};
\endxy
\]
If we fix a map $F\colon\Lambda^2_0\to\mathscr{C}$ denoted by a diagram
\betagin{equation}\label{FixBD}
\xy
(-6,6)*+{A}="0";
(6,6)*+{B}="2";
(-6,-6)*+{D}="4";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
\endxy
\end{equation}
in $\mathscr{C}$, then squares
\[
\mathbf{S}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{D}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta}} "10";"11"};
\endxy
\quad\text{and}\quad
\mathbf{S}^{\prime}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{D}="4";
(7,-7)*+{C^{\prime}}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar|*+{_{z^{\prime}}} "0";"6"};
{\ar^{y^{\prime}} "2";"6"};
{\ar_{j^{\prime}} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_(0.4){^{\xi^{\prime}}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_(0.4){_{\eta^{\prime}}} "10";"11"};
\endxy
\]
can be regarded as objects in the quasi-category under $(\ref{FixBD})$ (\circte[Remark 1.2.9.5.]{L1}).
In this undercategory, a morphism from $\mathbf{S}$ to $\mathbf{S}^{\prime}$ is nothing but a pair of $3$-simplices $\varphi,\psi$ in $\mathscr{C}$ satisfying
\[ d_1(\varphi)=d_1(\psi),d_2(\varphi)=\eta^{\prime},d_2(\psi)=\xi^{\prime},d_3(\varphi)=\eta,d_3(\psi)=\xi. \]
In this article, we denote this morphism by $\roundup{\varphi}{\psi}\colon\mathbf{S}\to\mathbf{S}^{\prime}$. We refer to such a pair simply as a morphism in \lq\lq the" undercategory, when $(\ref{FixBD})$ is clear from the context.
A \emph{rectangle} in this article means a map $\mathbf{R}\colon\mathscr{D}elta^1\times\mathscr{D}elta^2\to\mathscr{C}$ which will be denoted as a diagram
\[
\xy
(-16,7)*+{\mathbf{R}(0,0)}="0";
(0,7)*+{\mathbf{R}(0,1)}="2";
(16,7)*+{\mathbf{R}(0,2)}="4";
(-16,-7)*+{\mathbf{R}(1,0)}="10";
(0,-7)*+{\mathbf{R}(1,1)}="12";
(16,-7)*+{\mathbf{R}(1,2)}="14";
{\ar^{} "0";"2"};
{\ar_{} "2";"4"};
{\ar^{} "0";"10"};
{\ar_{} "2";"12"};
{\ar_{} "4";"14"};
{\ar^{} "10";"12"};
{\ar_{} "12";"14"};
\endxy
\]
often by labelling some of its simplices similarly as for squares.
It can be identified with a triplet of $3$-simplices $\mathcal{X},\mathcal{Y},\mathcal{Z}$ satisfying $d_1(\mathcal{X})=d_1(\mathcal{Y})$ and $d_2(\mathcal{Y})=d_2(\mathcal{Z})$,
which correspond to
\[ \mathcal{X}=\mathbf{R}|_{\{(0,0)\}\ast(\{1\}\times\mathscr{D}elta^2)},\ \ \mathcal{Y}=\mathbf{R}|_{(\{0\}\times [0,1])\ast(\{1\}\times [1,2])},\ \ \mathcal{Z}=\mathbf{R}|_{(\{0\}\times\mathscr{D}elta^2)\ast\{(1,2)\}}. \]
In this article, we express such a rectangle by $\mathbf{R}=\RT{\mathcal{X}}{\mathcal{Y}}{\mathcal{Z}}$. The left, right, and outer squares in $\mathbf{R}$ will be denoted by $\mathbf{R}_{\mathrm{left}}=\mathbf{R}|_{\mathscr{D}elta^1\times [0,1]}$, $\mathbf{R}_{\mathrm{right}}=\mathbf{R}|_{\mathscr{D}elta^1\times [1,2]}$ and $\mathbf{R}_{\mathrm{out}}=\mathbf{R}|_{\mathscr{D}elta^1\times \Lambda^2_1}$, respectively.
If $\mathbf{R}_{\mathrm{left}}$ is a push-out, then $\mathbf{R}_{\mathrm{out}}$ is a push-out if and only if $\mathbf{R}_{\mathrm{right}}$ is a push-out (\circte[Lemma~4.4.2.1]{L1}). Dually for pull-backs.
Remark that a square $(\ref{Sq})$ can be thought of as a morphism in the quasi-category $\mathrm{Fun}(\mathscr{D}elta^1,\mathscr{C})$ from $A\overset{x}{\longrightarrow}B$ to $D\overset{j}{\longrightarrow}C$. In this view, a consecutive pair of squares can be filled into a $2$-simplex in $\mathrm{Fun}(\mathscr{D}elta^1,\mathscr{C})$. Slightly more strictly, we may also designate the 2-simplices in $\mathscr{C}$ used as guides for this pasting operation, as follows.
\betagin{prop}\label{PropComposeSquares}
Let
\[
\mathbf{S}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{A^{\prime}}="4";
(7,-7)*+{B^{\prime}}="6";
{\ar^{x} "0";"2"};
{\ar_{a} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{b} "2";"6"};
{\ar_{x^{\prime}} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta}} "10";"11"};
\endxy
\quad\text{and}\quad
\mathbf{S}^{\prime}=\
\xy
(-7,7)*+{A^{\prime}}="0";
(7,7)*+{B^{\prime}}="2";
(-7,-7)*+{A^{\prime}r}="4";
(7,-7)*+{B^{\prime}r}="6";
{\ar^{x^{\prime}} "0";"2"};
{\ar_{a^{\prime}} "0";"4"};
{\ar|*+{_{z^{\prime}}} "0";"6"};
{\ar^{b^{\prime}} "2";"6"};
{\ar_{x^{\prime}r} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_(0.4){^{\xi^{\prime}}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_(0.4){_{\eta^{\prime}}} "10";"11"};
\endxy
\]
be any pair of squares such that $\mathbf{S}|_{\{1\}\times\mathscr{D}elta^1}=\mathbf{S}^{\prime}|_{\{0\}\times\mathscr{D}elta^1}=x^{\prime}$, and let
\[
\TwoSP{A}{A^{\prime}}{A^{\prime}r}{a}{a^{\prime}}{a^{\prime}r}{\alpha}
\ , \
\TwoSP{B}{B^{\prime}}{B^{\prime}r}{b}{b^{\prime}}{b^{\prime}r}{\beta}
\]
be arbitrarily taken pair of $2$-simplices. Then there exists a rectangle
\[
\mathbf{R}=
\xy
(-16,7)*+{A}="0";
(0,7)*+{A^{\prime}}="2";
(16,7)*+{A^{\prime}r}="4";
(-16,-7)*+{B}="10";
(0,-7)*+{B^{\prime}}="12";
(16,-7)*+{B^{\prime}r}="14";
{\ar_{a} "0";"2"};
{\ar_{a^{\prime}} "2";"4"};
{\ar@/^1.0pc/^{a^{\prime}r} "0";"4"};
{\ar_{x} "0";"10"};
{\ar_{x^{\prime}} "2";"12"};
{\ar^{x^{\prime}r} "4";"14"};
{\ar^{b} "10";"12"};
{\ar^{b^{\prime}} "12";"14"};
{\ar@/_1.0pc/_{b^{\prime}r} "10";"14"};
\endxy
\]
such that $\mathbf{R}_{\mathrm{left}}=\mathbf{S}^t$, $\mathbf{R}_{\mathrm{right}}=\mathbf{S}^{\prime t}$ and $\mathbf{R}|_{\{0\}\times\mathscr{D}elta^2}=\alpha$, $\mathbf{R}|_{\{1\}\times\mathscr{D}elta^2}=\beta$.
Moreover, if $\mathbf{S}=\SQ{s_0(x^{\prime})}{\xi}$ and $\alpha=s_0(a^{\prime})$ (hence consequently $A^{\prime}=A, a=1_A, z=x^{\prime}, a^{\prime}r=a^{\prime}$), then $\mathbf{R}$ can be chosen to be of the form $\mathbf{R}=\RT{\mathcal{X}}{s_0(\xi^{\prime})}{s_0(\eta^{\prime})}$.
\end{prop}
\betagin{proof}
This is just rephrasing the fact that $(\mathscr{D}elta^1\times\Lambda^2_1)\cup(\partial\mathscr{D}elta^1\times\mathscr{D}elta^2)\hookrightarrow\mathscr{D}elta^1\times\mathscr{D}elta^2$ is an inner anodyne map. (\circte{J}. See also \circte[Proposition~2.3.2.1]{L1}, \circte[Proposition~3.2.3]{C}).
We also remark that as for the latter part, we only have to take a $3$-simplex
\[
\mathcal{X}=\
\xy
(-2,11)*+{A}="0";
(-12,-2)*+{B}="2";
(10,0)*+{B^{\prime}r}="4";
(2,-12)*+{B^{\prime}}="6";
{\ar_{x} "0";"2"};
{\ar^{z^{\prime}} "0";"4"};
{\ar_(0.3){z} "0";"6"};
{\ar_(0.35){b^{\prime}r}|!{"0";"6"}\hole "2";"4"};
{\ar_{b} "2";"6"};
{\ar_{b^{\prime}} "6";"4"};
\endxy
\]
such that $d_0(\mathcal{X})=\beta, d_1(\mathcal{X})=\xi^{\prime}, d_3(\mathcal{X})=\xi$.
\end{proof}
If we are given a rectangle $\mathbf{R}\colon\mathscr{D}elta^1\times\mathscr{D}elta^2\to\mathscr{C}$ and a morphism $f\in\mathscr{C}_1(\mathbf{R}((1,2)),C)$ to some $C\in\mathscr{C}_0$, then we can extend them to obtain a map $F\colon(\mathscr{D}elta^1\times\mathscr{D}elta^2)^{\vartriangleright}\to\mathscr{C}$. More strictly, we may designate some of the simplices involved, as below.
This ad hoc lemma will be used later in the proof of Lemma~\ref{LemExSeqPO}.
\betagin{lem}\label{LemRectProl}
Let
\[
\mathbf{R}=\RT{\psi}{\varphi}{\theta}=\
\xy
(-16,7)*+{A}="0";
(0,7)*+{A^{\prime}}="2";
(16,7)*+{A^{\prime}r}="4";
(-16,-7)*+{B}="10";
(0,-7)*+{B^{\prime}}="12";
(16,-7)*+{B^{\prime}r}="14";
{\ar_{a} "0";"2"};
{\ar_{a^{\prime}} "2";"4"};
{\ar@/^1.0pc/^{a^{\prime}r} "0";"4"};
{\ar_{x} "0";"10"};
{\ar_{x^{\prime}} "2";"12"};
{\ar^{x^{\prime}r} "4";"14"};
{\ar^{b} "10";"12"};
{\ar^{b^{\prime}} "12";"14"};
{\ar@/_1.0pc/_{b^{\prime}r} "10";"14"};
\endxy
\]
be any rectangle, let
\[ \roundup{\zeta}{\zeta^{\prime}}\colon\mathbf{R}_{\mathrm{out}}\to
\mathbf{S}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{A^{\prime}r}="2";
(-7,-7)*+{B}="4";
(7,-7)*+{C}="6";
{\ar^{a^{\prime}r} "0";"2"};
{\ar_{x} "0";"4"};
{\ar^{} "2";"6"};
{\ar_{} "4";"6"};
\endxy
\]
be any morphism in the undercategory, and let
\[
\wp=
\xy
(-2,11)*+{A}="0";
(-12,-2)*+{A^{\prime}}="2";
(10,0)*+{C}="4";
(2,-12)*+{A^{\prime}r}="6";
{\ar_{a} "0";"2"};
{\ar^{} "0";"4"};
{\ar_(0.3){a^{\prime}r} "0";"6"};
{\ar_(0.35){}|!{"0";"6"}\hole "2";"4"};
{\ar_{a^{\prime}} "2";"6"};
{\ar_{} "6";"4"};
\endxy
\]
be any $3$-simplex in $\mathscr{C}$ satisfying $d_1(\wp)=d_2(\zeta^{\prime})$ and $d_3(\wp)=d_3(\theta)$. Then we can extend this to a map $(\mathscr{D}elta^1\times\mathscr{D}elta^2)^{\vartriangleright}\to\mathscr{C}$. Namely, there is a triplet of $4$-simplices
\[
\Psi=\Penta{A}{B}{B^{\prime}}{B^{\prime}r}{C}{x}{b}{b^{\prime}}{},\
\Phi=\Penta{A}{A^{\prime}}{B^{\prime}}{B^{\prime}r}{C}{a}{x^{\prime}}{b^{\prime}}{},\
\Theta=\Penta{A}{A^{\prime}}{A^{\prime}r}{B^{\prime}r}{C}{a}{a^{\prime}}{x^{\prime}r}{}
\]
such that $d_1(\Psi)=d_1(\Phi)$, $d_2(\Phi)=d_2(\Theta)$, $\roundup{\zeta}{\zeta^{\prime}}=\roundup{d_2(\Psi)}{d_1(\Theta)}$, $d_3(\Theta)=\wp$, and $\RT{\psi}{\varphi}{\theta}=\RT{d_4(\Phi)}{d_4(\Psi)}{d_4(\Theta)}$.
\end{lem}
\betagin{proof}
Take a $2$-simplex
\[ \TwoSP{B^{\prime}}{B^{\prime}r}{C}{b^{\prime}}{}{}{\varpi} \]
such that $d_0(\varpi)=d_0d_1(\zeta)=d_0d_1(\zeta^{\prime})$ and $d_2(\varpi)=b^{\prime}$. Take $3$-simplices
\[
\alpha=\ThreeSP{A^{\prime}}{A^{\prime}r}{B^{\prime}r}{C},\
\beta=\ThreeSP{A}{B^{\prime}}{B^{\prime}r}{C},\
\gamma=\ThreeSP{A^{\prime}}{B^{\prime}}{B^{\prime}r}{C},\
\delta=\ThreeSP{B}{B^{\prime}}{B^{\prime}r}{C}
\]
such that
\betagin{eqnarray*}
&d_0(\alpha)=d_0(\zeta^{\prime}),\ d_2(\alpha)=d_0(\wp),\ d_3(\alpha)=d_0(\theta),&\\
&d_0(\beta)=\varpi,\ d_1(\beta)=d_1(\zeta),\ d_3(\beta)=d_1(\varphi)=d_1(\psi),&\\
&d_0(\gamma)=\varpi,\ d_1(\gamma)=d_1(\alpha),\ d_3(\gamma)=d_0(\varphi),&\\
&d_0(\delta)=\varpi,\ d_1(\delta)=d_0(\zeta),\ d_3(\delta)=d_0(\psi).&
\end{eqnarray*}
Then we can take $\Theta,\Phi,\Psi$ so that
\betagin{eqnarray*}
&d_0(\Theta)=\alpha,\ d_1(\Theta)=\zeta^{\prime},\ d_3(\Theta)=\wp,\ d_4(\Theta)=\theta,&\\
&d_0(\Phi)=\gamma,\ d_1(\Phi)=\beta,\ d_2(\Phi)=d_2(\Theta),\ d_4(\Phi)=\varphi,&\\
&d_0(\Psi)=\delta,\ d_1(\Psi)=\beta,\ d_2(\Psi)=\zeta,\ d_4(\Psi)=\psi&
\end{eqnarray*}
hold, as desired.
\end{proof}
A \emph{cube} in $\mathscr{C}$ is a map $\mathscr{C}f\colon\mathscr{D}elta^1\times\mathscr{D}elta^1\times\mathscr{D}elta^1\to\mathscr{C}$,
namely an object in $\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})$, which we denote by a diagram
\[
\xy
(-11,15)*+{\mathscr{C}f(0,0,0)}="0";
(11,15)*+{\mathscr{C}f(0,1,0)}="2";
(0,5)*+{\mathscr{C}f(1,0,0)}="4";
(22,5)*+{\mathscr{C}f(1,1,0)}="6";
(-11,-5)*+{\mathscr{C}f(0,0,1)}="10";
(11,-5)*+{\mathscr{C}f(0,1,1)}="12";
(0,-15)*+{\mathscr{C}f(1,0,1)}="14";
(22,-15)*+{\mathscr{C}f(1,1,1)}="16";
{\ar^{} "0";"2"};
{\ar_{} "0";"4"};
{\ar^{} "2";"6"};
{\ar_{} "4";"6"};
{\ar^{} "0";"10"};
{\ar_{}|!{(9,5);(13,5)}\hole "2";"12"};
{\ar^{} "4";"14"};
{\ar_{} "6";"16"};
{\ar^{}|!{(0,-3);(0,-7)}\hole "10";"12"};
{\ar_{} "10";"14"};
{\ar^{} "12";"16"};
{\ar_{} "14";"16"};
\endxy
\]
in $\mathscr{C}$ or by labelling some of its simplices for example as follows,
\betagin{equation}\label{Cb}
\mathscr{C}f=\
\xy
(-11,12)*+{A}="0";
(11,12)*+{B}="2";
(0,2)*+{D}="4";
(22,2)*+{C}="6";
(-11,-8)*+{A^{\prime}}="10";
(11,-8)*+{B^{\prime}}="12";
(0,-18)*+{D^{\prime}}="14";
(22,-18)*+{C^{\prime}}="16";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_(0.7){j} "4";"6"};
{\ar_(0.4){z} "0";"6"};
{\ar_{a} "0";"10"};
{\ar^(0.7){b}|!{(9,2);(13,2)}\hole|!{(9,6.2);(13,4.2)}\hole "2";"12"};
{\ar_(0.3){d} "4";"14"};
{\ar^{c} "6";"16"};
{\ar^(0.7){x^{\prime}}|!{(0,-6);(0,-10)}\hole "10";"12"};
{\ar_{i^{\prime}} "10";"14"};
{\ar^{y^{\prime}} "12";"16"};
{\ar_{j^{\prime}} "14";"16"};
{\ar_{z^{\prime}} "10";"16"};
\endxy,
\end{equation}
similarly as for squares. We may identify it with a pair of rectangles
\[
\xy
(-28,4)*+{\mathbf{R}_{\mathrm{f}}=}="-";
(-16,7)*+{A}="0";
(0,7)*+{B}="2";
(16,7)*+{C}="4";
(-16,-7)*+{A^{\prime}}="10";
(0,-7)*+{B^{\prime}}="12";
(16,-7)*+{C^{\prime}}="14";
{\ar_{x} "0";"2"};
{\ar_{y} "2";"4"};
{\ar@/^1.0pc/^{z} "0";"4"};
{\ar_{a} "0";"10"};
{\ar_{b} "2";"12"};
{\ar^{c} "4";"14"};
{\ar^{x^{\prime}} "10";"12"};
{\ar^{y^{\prime}} "12";"14"};
{\ar@/_1.0pc/_{z^{\prime}} "10";"14"};
\endxy
\quad\text{and}\quad
\xy
(-28,4)*+{\mathbf{R}_{\mathrm{b}}=}="-";
(-16,7)*+{A}="0";
(0,7)*+{D}="2";
(16,7)*+{C}="4";
(-16,-7)*+{A^{\prime}}="10";
(0,-7)*+{D^{\prime}}="12";
(16,-7)*+{C^{\prime}}="14";
{\ar_{i} "0";"2"};
{\ar_{j} "2";"4"};
{\ar@/^1.0pc/^{z} "0";"4"};
{\ar_{a} "0";"10"};
{\ar_{d} "2";"12"};
{\ar^{c} "4";"14"};
{\ar^{i^{\prime}} "10";"12"};
{\ar^{j^{\prime}} "12";"14"};
{\ar@/_1.0pc/_{z^{\prime}} "10";"14"};
\endxy
\]
sharing the outer square in common.
Each of these rectangles is expressed as $\mathbf{R}_{\mathrm{f}}=\RT{\mathcal{X}_{\mathrm{f}}}{\mathcal{Y}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{f}}}$ and $\mathbf{R}_{\mathrm{b}}=\RT{\mathcal{X}_{\mathrm{b}}}{\mathcal{Y}_{\mathrm{b}}}{\mathcal{Z}_{\mathrm{b}}}$. In summary, a cube $\mathscr{C}f$ can be expressed by a $6$-tuple of $3$-simplices in $\mathscr{C}$
\betagin{equation}\label{Exp_Cube}
\mathscr{C}f=\mathscr{C}B{\mathcal{X}_{\mathrm{f}}}{\mathcal{Y}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{f}}}{\mathcal{X}_{\mathrm{b}}}{\mathcal{Y}_{\mathrm{b}}}{\mathcal{Z}_{\mathrm{b}}}
\end{equation}
satisfying $d_1(\mathcal{X}_{\mathrm{f}})=d_1(\mathcal{Y}_{\mathrm{f}}), d_2(\mathcal{Y}_{\mathrm{f}})=d_2(\mathcal{Z}_{\mathrm{f}}), d_2(\mathcal{X}_{\mathrm{f}})=d_2(\mathcal{X}_{\mathrm{b}}), d_1(\mathcal{Z}_{\mathrm{f}})=d_1(\mathcal{Z}_{\mathrm{b}})$, $d_1(\mathcal{X}_{\mathrm{b}})=d_1(\mathcal{Y}_{\mathrm{b}}), d_2(\mathcal{Y}_{\mathrm{b}})=d_2(\mathcal{Z}_{\mathrm{b}})$.
\betagin{rem}
From a cube $\mathscr{C}f$ as above, we may obtain cubes
\[
\mathscr{C}B{\mathcal{Y}_{\mathrm{b}}}{\mathcal{X}_{\mathrm{b}}}{\mathcal{X}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{b}}}{\mathcal{Z}_{\mathrm{f}}}{\mathcal{Y}_{\mathrm{f}}}=\
\xy
(-8,9)*+{A}="0";
(8,9)*+{A^{\prime}}="2";
(1,3)*+{B}="4";
(17,3)*+{B^{\prime}}="6";
(-8,-7)*+{D}="10";
(8,-7)*+{D^{\prime}}="12";
(1,-13)*+{C}="14";
(17,-13)*+{C^{\prime}}="16";
{\ar^{a} "0";"2"};
{\ar_{x} "0";"4"};
{\ar^{x^{\prime}} "2";"6"};
{\ar_(0.3){b} "4";"6"};
{\ar_{i} "0";"10"};
{\ar^(0.7){i^{\prime}}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){y} "4";"14"};
{\ar^{y^{\prime}} "6";"16"};
{\ar^(0.3){d}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{j} "10";"14"};
{\ar^{j^{\prime}} "12";"16"};
{\ar_{c} "14";"16"};
\endxy
\ ,\
\mathscr{C}B{\mathcal{Z}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{b}}}{\mathcal{Y}_{\mathrm{b}}}{\mathcal{Y}_{\mathrm{f}}}{\mathcal{X}_{\mathrm{f}}}{\mathcal{X}_{\mathrm{b}}}=\
\xy
(-8,9)*+{A}="0";
(8,9)*+{D}="2";
(1,3)*+{A^{\prime}}="4";
(17,3)*+{D^{\prime}}="6";
(-8,-7)*+{B}="10";
(8,-7)*+{C}="12";
(1,-13)*+{B^{\prime}}="14";
(17,-13)*+{C^{\prime}}="16";
{\ar^{i} "0";"2"};
{\ar_{a} "0";"4"};
{\ar^{d} "2";"6"};
{\ar_(0.3){i^{\prime}} "4";"6"};
{\ar_{x} "0";"10"};
{\ar^(0.7){j}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){x^{\prime}} "4";"14"};
{\ar^{j^{\prime}} "6";"16"};
{\ar^(0.3){y}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{b} "10";"14"};
{\ar^{c} "12";"16"};
{\ar_{y^{\prime}} "14";"16"};
\endxy \]
by transpositions, and also
$\mathscr{C}B{\mathcal{X}_{\mathrm{b}}}{\mathcal{Y}_{\mathrm{b}}}{\mathcal{Z}_{\mathrm{b}}}{\mathcal{X}_{\mathrm{f}}}{\mathcal{Y}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{f}}}$,
$\mathscr{C}B{\mathcal{Y}_{\mathrm{f}}}{\mathcal{X}_{\mathrm{f}}}{\mathcal{X}_{\mathrm{b}}}{\mathcal{Z}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{b}}}{\mathcal{Y}_{\mathrm{b}}}$,
$\mathscr{C}B{\mathcal{Z}_{\mathrm{b}}}{\mathcal{Z}_{\mathrm{f}}}{\mathcal{Y}_{\mathrm{f}}}{\mathcal{Y}_{\mathrm{b}}}{\mathcal{X}_{\mathrm{b}}}{\mathcal{X}_{\mathrm{f}}}$.
\end{rem}
We often regard a cube $(\ref{Cb})$ as a morphism from $\mathscr{C}f|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{0\}}$ to $\mathscr{C}f|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{1\}}$ in $\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})$. In this view, we have the following.
\betagin{ex}\label{ExTrivCubes}
Let
\[
\mathbf{S}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{D}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta}} "10";"11"};
\endxy
\]
be any square.
\betagin{enumerate}
\item There is a cube consisting of degenerated $3$-simplices
\[
\mathbf{I}_{\mathbf{S}}=\mathscr{C}B{s_0(\xi)}{s_1(\xi)}{s_2(\xi)}{s_0(\eta)}{s_1(\eta)}{s_2(\eta)}
\]
such that $\mathbf{I}_{\mathbf{S}}|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{0\}}=\mathbf{I}_{\mathbf{S}}|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{1\}}=\mathbf{S}$.
This is nothing but $1_{\mathbf{S}}=s_0(\mathbf{S})$ in $\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})$.
\item Suppose that $\mathscr{C}$ is pointed (\circte[Definition~2.1]{L2}). Then any square
\[
\mathbf{O}=\
\xy
(-7,7)*+{O_A}="0";
(7,7)*+{O_B}="2";
(-7,-7)*+{O_D}="4";
(7,-7)*+{O_C}="6";
{\ar^{x_o} "0";"2"};
{\ar_{i_o} "0";"4"};
{\ar|*+{_{z_o}} "0";"6"};
{\ar^{y_o} "2";"6"};
{\ar_{j_o} "4";"6"};
\endxy
\]
in which $O_A,O_B,O_C,O_D$ are zero objects in $\mathscr{C}$, gives a zero object in $\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})$. In particular, there exists a cube $\mathscr{C}f$ such that $\mathscr{C}f|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{0\}}=\mathbf{S}$ and $\mathscr{C}f|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{1\}}=\mathbf{O}$. Indeed, by using the definition of zero objects, such a cube can be taken compatibly with arbitrarily given $5$-tuple of squares
\[
\xy
(-5,5)*+{A}="0";
(5,5)*+{B}="2";
(-5,-5)*+{O_A}="4";
(5,-5)*+{O_B}="6";
{\ar^{x} "0";"2"};
{\ar_{a} "0";"4"};
{\ar^{b} "2";"6"};
{\ar_{x_o} "4";"6"};
\endxy\ ,\
\xy
(-5,5)*+{A}="0";
(5,5)*+{D}="2";
(-5,-5)*+{O_A}="4";
(5,-5)*+{O_D}="6";
{\ar^{i} "0";"2"};
{\ar_{a} "0";"4"};
{\ar^{d} "2";"6"};
{\ar_{i_o} "4";"6"};
\endxy\ ,\
\xy
(-5,5)*+{B}="0";
(5,5)*+{C}="2";
(-5,-5)*+{O_B}="4";
(5,-5)*+{O_C}="6";
{\ar^{y} "0";"2"};
{\ar_{b} "0";"4"};
{\ar^{c} "2";"6"};
{\ar_{y_o} "4";"6"};
\endxy\ ,\
\xy
(-5,5)*+{D}="0";
(5,5)*+{C}="2";
(-5,-5)*+{O_D}="4";
(5,-5)*+{O_C}="6";
{\ar^{j} "0";"2"};
{\ar_{c} "0";"4"};
{\ar^{d} "2";"6"};
{\ar_{j_o} "4";"6"};
\endxy\ ,\
\xy
(-5,5)*+{A}="0";
(5,5)*+{C}="2";
(-5,-5)*+{O_A}="4";
(5,-5)*+{O_C}="6";
{\ar^{z} "0";"2"};
{\ar_{a} "0";"4"};
{\ar^{c} "2";"6"};
{\ar_{z_o} "4";"6"};
\endxy
\]
sharing the morphisms with the same labels in common.
Similarly for the existence of a cube $\mathscr{C}f^{\prime}$ with $\mathscr{C}f^{\prime}|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{0\}}=\mathbf{O}$ and $\mathscr{C}f^{\prime}|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{1\}}=\mathbf{S}$.
\end{enumerate}
\end{ex}
Similarly as in Proposition~\ref{PropComposeSquares}, any consecutive pair of cubes can be filled into a $2$-simplex in $\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})$. More strictly, the following holds.
\betagin{prop}\label{PropComposeCubes}
Let
\[
\mathscr{C}f=\
\xy
(-8,9)*+{A}="0";
(8,9)*+{B}="2";
(1,3)*+{D}="4";
(17,3)*+{C}="6";
(-8,-7)*+{A^{\prime}}="10";
(8,-7)*+{B^{\prime}}="12";
(1,-13)*+{D^{\prime}}="14";
(17,-13)*+{C^{\prime}}="16";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_(0.3){j} "4";"6"};
{\ar_{a} "0";"10"};
{\ar^(0.7){b}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){d} "4";"14"};
{\ar^{c} "6";"16"};
{\ar^(0.3){x^{\prime}}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i^{\prime}} "10";"14"};
{\ar^{y^{\prime}} "12";"16"};
{\ar_{j^{\prime}} "14";"16"};
\endxy
\quad,\quad
\mathscr{C}f^{\prime}=\
\xy
(-8,9)*+{A^{\prime}}="0";
(8,9)*+{B^{\prime}}="2";
(1,3)*+{D^{\prime}}="4";
(17,3)*+{C^{\prime}}="6";
(-8,-7)*+{A^{\prime}r}="10";
(8,-7)*+{B^{\prime}r}="12";
(1,-13)*+{D^{\prime}r}="14";
(17,-13)*+{C^{\prime}r}="16";
{\ar^{x^{\prime}} "0";"2"};
{\ar_{i^{\prime}} "0";"4"};
{\ar^{y^{\prime}} "2";"6"};
{\ar_(0.3){j^{\prime}} "4";"6"};
{\ar_{a^{\prime}} "0";"10"};
{\ar^(0.7){b^{\prime}}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){d^{\prime}} "4";"14"};
{\ar^{c^{\prime}} "6";"16"};
{\ar^(0.3){x^{\prime}r}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i^{\prime}r} "10";"14"};
{\ar^{y^{\prime}r} "12";"16"};
{\ar_{j^{\prime}r} "14";"16"};
\endxy
\]
be a pair of cubes satisfying $\mathscr{C}f|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{1\}}=\mathscr{C}f^{\prime}|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{0\}}$, and let
\[
\TwoSP{A}{A^{\prime}}{A^{\prime}r}{a}{a^{\prime}}{a^{\prime}r}{\alpha},\
\TwoSP{B}{B^{\prime}}{B^{\prime}r}{b}{b^{\prime}}{b^{\prime}r}{\beta},\
\TwoSP{C}{C^{\prime}}{C^{\prime}r}{c}{c^{\prime}}{c^{\prime}r}{\gamma},\
\TwoSP{D}{D^{\prime}}{D^{\prime}r}{d}{d^{\prime}}{d^{\prime}r}{\delta}
\]
be arbitrarily taken $2$-simplices.
Then there exists $F\colon\mathscr{D}elta^1\times\mathscr{D}elta^1\times\mathscr{D}elta^2\to\mathscr{C}$ such that
$F|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times [0,1]}=\mathscr{C}f, F|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times [1,2]}=\mathscr{C}f^{\prime}$ and
\[ F|_{\{(0,0)\}\times\mathscr{D}elta^2}=\alpha, F|_{\{(0,1)\}\times\mathscr{D}elta^2}=\beta, F|_{\{(1,1)\}\times\mathscr{D}elta^2}=\gamma, F|_{\{(1,0)\}\times\mathscr{D}elta^2}=\delta. \]
Especially we obtain a cube
\[
F|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times \Lambda^2_1}=\
\xy
(-8,9)*+{A}="0";
(8,9)*+{B}="2";
(1,3)*+{D}="4";
(17,3)*+{C}="6";
(-8,-7)*+{A^{\prime}r}="10";
(8,-7)*+{B^{\prime}r}="12";
(1,-13)*+{D^{\prime}r}="14";
(17,-13)*+{C^{\prime}r}="16";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_(0.3){j} "4";"6"};
{\ar_{a^{\prime}r} "0";"10"};
{\ar^(0.7){b^{\prime}r}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){d^{\prime}r} "4";"14"};
{\ar^{c^{\prime}r} "6";"16"};
{\ar^(0.3){x^{\prime}r}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i^{\prime}r} "10";"14"};
{\ar^{y^{\prime}r} "12";"16"};
{\ar_{j^{\prime}r} "14";"16"};
\endxy
\]
compatible with the given data.
\end{prop}
\betagin{proof}
Similarly as Proposition~\ref{PropComposeSquares}, this is just rephrasing the fact that $((\mathscr{D}elta^1\times\mathscr{D}elta^1)\times\Lambda^2_1)\cup(\{(0,0),(0,1),(1,0),(1,1)\}\times\mathscr{D}elta^2)\hookrightarrow\mathscr{D}elta^1\times\mathscr{D}elta^1\times\mathscr{D}elta^2$ is an inner anodyne map (\circte{J}, \circte[Proposition~2.3.2.1]{L1}, \circte[Proposition~3.2.3]{C}).
\end{proof}
\betagin{cor}\label{CorComposeCubes}
Let $\mathbf{S}, \mathbf{S}^{\prime}$ be any pair of squares such that $\mathbf{S}((0,0))=\mathbf{S}^{\prime}((0,0))=A$ and $\mathbf{S}((1,1))=\mathbf{S}^{\prime}((1,1))=C$. Assume that there is a cube $\mathscr{C}f$ such that
\[ \mathscr{C}f|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{0\}}=\mathbf{S},\ \mathscr{C}f|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{1\}}=\mathbf{S}^{\prime} \]
and that $a=\mathscr{C}f|_{\{(0,0)\}\times\mathscr{D}elta^1}$ and $c=\mathscr{C}f|_{\{(1,1)\}\times\mathscr{D}elta^1}$ satisfies $\overseterline{a}=\mathrm{id}_A$ and $\overseterline{c}=\mathrm{id}_C$.
Then there exists a cube $\mathscr{C}f^{\prime}$ such that
\[ \mathscr{C}f^{\prime}|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{0\}}=\mathbf{S},\ \mathscr{C}f^{\prime}|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{1\}}=\mathbf{S}^{\prime} \]
and $\mathscr{C}f^{\prime}|_{\{(0,0)\}\times\mathscr{D}elta^1}=1_A$, $\mathscr{C}f^{\prime}|_{\{(0,1)\}\times\mathscr{D}elta^1}=\mathscr{C}f|_{\{(0,1)\}\times\mathscr{D}elta^1}$, $\mathscr{C}f^{\prime}|_{\{(1,0)\}\times\mathscr{D}elta^1}=\mathscr{C}f|_{\{(1,0)\}\times\mathscr{D}elta^1}$, $\mathscr{C}f^{\prime}|_{\{(1,1)\}\times\mathscr{D}elta^1}=1_C$.
\end{cor}
\betagin{proof}
This follows from Proposition~\ref{PropComposeCubes} applied to the cubes $\mathscr{C}f,\mathbf{I}_{\mathbf{S}^{\prime}}$ and $2$-simplices of the following form,
\[
\TwoSP{A}{A}{A}{a}{1_A}{1_A}{},\
\TwoSP{B}{B^{\prime}}{B^{\prime}}{b}{1_{B^{\prime}}}{b}{s_1(b)},\
\TwoSP{C}{C}{C}{c}{1_C}{1_C}{},\
\TwoSP{D}{D^{\prime}}{D^{\prime}}{d}{1_{D^{\prime}}}{d}{s_1(d)},\
\]
where we denote $\mathscr{C}f|_{\{(0,1)\}\times\mathscr{D}elta^1}$ and $\mathscr{C}f|_{\{(1,0)\}\times\mathscr{D}elta^1}$ by $B\overset{b}{\longrightarrow}B^{\prime}$ and $D\overset{d}{\longrightarrow}D^{\prime}$, respectively.
\end{proof}
\subseteqction{Exact sequences in exact quasi-categories}\label{Section_ExactSequences}
First we recall the definition of an exact quasi-category. For the detail, see \circte{B1},\circte{B2}.
A quasi-category $\mathscr{C}$ is called \emph{additive} (\circte[Definition~2.2]{B1}) if it is pointed, has all finite products and finite coproducts, and moreover if the homotopy category $h\mathscr{C}$ is additive as an ordinary category. In particular, for any pair of objects $X_1,X_2\in h\mathscr{C}$, the unique morphism $\mathfrak{v}\in (h\mathscr{C})(X_1\amalg X_2,X_1\times X_2)$ which satisfies
\[ p_{k^{\prime}}\circ \mathfrak{v}\circ i_k\colon X_k\to X_{k^{\prime}}=
\betagin{cases}
\mathrm{id}& \text{if}\ k=k^{\prime}\\
0 & \text{if}\ k\ne k^{\prime}
\end{cases}
\]
should become an isomorphism. Here, $X_1\overset{p_1}{\longleftarrow}X_1\times X_2\overset{p_2}{\longrightarrow}X_2$ and $X_1\overset{i_1}{\longrightarrow}X_1\amalg X_2\overset{i_2}{\longleftarrow}X_2$ are product and coproduct of $X_1,X_2$ in $h\mathscr{C}$, respectively.
Let $\mathscr{C}$ be a quasi-category, and let $\mathscr{C}_{\dag},\mathscr{C}^{\dag}$ be two subcategories of $\mathscr{C}$ containing all homotopy equivalences. A morphism in $\mathscr{C}_{\dag}$ is called \emph{ingressive}, and a morphism in $\mathscr{C}^{\dag}$ is called \emph{egressive}.
A square
\betagin{equation}\label{AmbPB'}
\xy
(-6,6)*+{A}="0";
(6,6)*+{B}="2";
(-6,-6)*+{D}="4";
(6,-6)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
\endxy
\end{equation}
is called an \emph{ambigressive pull-back} if it is a pull-back square in which $j$ is ingressive and $y$ is egressive.
Dually, $(\ref{AmbPB'})$ is an \emph{ambigressive push-out} if it is a push-out square in which $x$ is ingressive and $i$ is egressive.
\betagin{dfn}\label{DefExactQuasiCat}(\circte[Definition~3.1]{B1})
The triplet $(\mathscr{C},\mathscr{C}_{\dag},\mathscr{C}^{\dag})$ is an \emph{exact quasi-category} ($=$ exact $\infty$-category) if it satisfies the following conditions.
\betagin{itemize}
\item[{\rm (Ex0)}] $\mathscr{C}$ is additive.
\item[{\rm (Ex1)}] If $O\in \mathscr{C}_0$ is a zero object, then any morphism $O\to X$ in $\mathscr{C}$ is ingressive. Dually, any morphism $X\to O$ in $\mathscr{C}$ is egressive.
\item[{\rm (Ex2)}] Push-outs of ingressive morphisms exist and are ingressive. Dually, pull-backs of egressive morphisms exist and are egressive.
\item[{\rm (Ex3)}] A square in $\mathscr{C}$ is an ambigressive pull-back if and only if it is an ambigressive push-out.
\end{itemize}
\end{dfn}
In the rest of this article, let $(\mathscr{C},\mathscr{C}_{\dag},\mathscr{C}^{\dag})$ be an exact quasi-category defined as above. We also simply say that $\mathscr{C}$ is an exact quasi-category.
\betagin{dfn}\label{DefExSeq}(\circte[Definition~3.1]{B1})
$\ \ $
\betagin{enumerate}
\item
An \emph{exact sequence} starting from $A$ and ending in $C$ is an ambigressive pull-back (hence an ambigressive push-out)
\betagin{equation}\label{ExSeq}
\mathbf{S}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{O}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta}} "10";"11"};
\endxy
\end{equation}
in which $O$ is a zero object in $\mathscr{C}$. When we emphasize the end-objects $A$ and $C$, we write ${}_A\mathbf{S}_C$ in this article.
\end{enumerate}
\end{dfn}
\betagin{ex}\label{ExTrivExSeq}
If a morphism $x\in\mathscr{C}_1(X,X)$ satisfies $\overseterline{x}=\mathrm{id}_X$, then any square
\[
\xy
(-6,6)*+{X}="0";
(6,6)*+{X}="2";
(-6,-6)*+{O}="4";
(6,-6)*+{O^{\prime}}="6";
{\ar^{x} "0";"2"};
{\ar_{} "0";"4"};
{\ar^{} "2";"6"};
{\ar_{} "4";"6"};
\endxy,\ \
\Bigg(\text{similarly,}\
\xy
(-6,6)*+{O}="0";
(6,6)*+{X}="2";
(-6,-6)*+{O^{\prime}}="4";
(6,-6)*+{X}="6";
{\ar^{} "0";"2"};
{\ar_{} "0";"4"};
{\ar^{x} "2";"6"};
{\ar_{} "4";"6"};
\endxy
\Bigg)
\]
in which $O,O^{\prime}$ are zero objects, is an exact sequence.
\end{ex}
\subsection{Morphisms of exact sequences}
\betagin{dfn}\label{DefMorphExSeq}
Let $\mathbb{E}sc\subseteq\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})$ be the full subcategory spanned by exact sequences. In particular, for a pair of exact sequences
\betagin{equation}\label{TwoExSeq}
\mathbf{S}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{O}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta}} "10";"11"};
\endxy
\quad
\text{and}
\quad
\mathbf{S}^{\prime}=\
\xy
(-7,7)*+{A^{\prime}}="0";
(7,7)*+{B^{\prime}}="2";
(-7,-7)*+{O^{\prime}}="4";
(7,-7)*+{C^{\prime}}="6";
{\ar^{x^{\prime}} "0";"2"};
{\ar_{i^{\prime}} "0";"4"};
{\ar|*+{_{z^{\prime}}} "0";"6"};
{\ar^{y^{\prime}} "2";"6"};
{\ar_{j^{\prime}} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_(0.4){^{\xi^{\prime}}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_(0.4){_{\eta^{\prime}}} "10";"11"};
\endxy,
\end{equation}
a \emph{morphism of exact sequences from $\mathbf{S}$ to $\mathbf{S}^{\prime}$} means a cube
\betagin{equation}\label{MorphExSeq}
\mathscr{C}f=\
\xy
(-8,9)*+{A}="0";
(8,9)*+{B}="2";
(1,3)*+{O}="4";
(17,3)*+{C}="6";
(-8,-7)*+{A^{\prime}}="10";
(8,-7)*+{B^{\prime}}="12";
(1,-13)*+{O^{\prime}}="14";
(17,-13)*+{C^{\prime}}="16";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_(0.3){j} "4";"6"};
{\ar_{a} "0";"10"};
{\ar^(0.7){b}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){o} "4";"14"};
{\ar^{c} "6";"16"};
{\ar^(0.3){x^{\prime}}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i^{\prime}} "10";"14"};
{\ar^{y^{\prime}} "12";"16"};
{\ar_{j^{\prime}} "14";"16"};
\endxy
\end{equation}
which satisfies $\mathscr{C}f|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{0\}}=\mathbf{S}$ and $\mathscr{C}f|_{\mathscr{D}elta^1\times\mathscr{D}elta^1\times\{1\}}=\mathbf{S}^{\prime}$. We abbreviate this to $\mathscr{C}f\colon\mathbf{S}\to\mathbf{S}^{\prime}$. When we indicate morphisms $a$ and $c$, we will also write ${}_a\mathscr{C}f_c\colon\mathbf{S}\to\mathbf{S}^{\prime}$ in this article.
\end{dfn}
\betagin{prop}\label{PropWIsom}
Let ${}_A\mathbf{S}_C,{}_A\mathbf{S}^{\prime}_C,$ be any pair of exact sequences starting from $A$ and ending in $C$, and let ${}_a\mathscr{C}f_c\colon\mathbf{S}\to\mathbf{S}^{\prime}$ be a morphism such that $\overseterline{a}=\mathrm{id}_A$ and $\overseterline{c}=\mathrm{id}_C$, as depicted in $(\ref{CbPair})$ below.
Suppose that there also exists a morphism in the opposite direction ${}_{a^{\prime}}\mathscr{C}f^{\prime}_{c^{\prime}}\colon\mathbf{S}^{\prime}\to\mathbf{S}$ as below, such that $\overseterline{a^{\prime}}=\mathrm{id}_A$ and $\overseterline{c^{\prime}}=\mathrm{id}_C$.
Then $b$ is a homotopy equivalence.
\betagin{equation}\label{CbPair}
\mathscr{C}f=\ \xy
(-8,9)*+{A}="0";
(8,9)*+{B}="2";
(1,3)*+{O}="4";
(17,3)*+{C}="6";
(-8,-7)*+{A}="10";
(8,-7)*+{B^{\prime}}="12";
(1,-13)*+{O^{\prime}}="14";
(17,-13)*+{C}="16";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_(0.3){j} "4";"6"};
{\ar_{a} "0";"10"};
{\ar^(0.7){b}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){} "4";"14"};
{\ar^{c} "6";"16"};
{\ar^(0.3){x^{\prime}}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i^{\prime}} "10";"14"};
{\ar^{y^{\prime}} "12";"16"};
{\ar_{j^{\prime}} "14";"16"};
\endxy
\quad,\quad
\mathscr{C}f^{\prime}=\ \xy
(-8,9)*+{A}="0";
(8,9)*+{B^{\prime}}="2";
(1,3)*+{O^{\prime}}="4";
(17,3)*+{C}="6";
(-8,-7)*+{A}="10";
(8,-7)*+{B}="12";
(1,-13)*+{O}="14";
(17,-13)*+{C}="16";
{\ar^{x^{\prime}} "0";"2"};
{\ar_{i^{\prime}} "0";"4"};
{\ar^{y^{\prime}} "2";"6"};
{\ar_(0.3){j^{\prime}} "4";"6"};
{\ar_{a^{\prime}} "0";"10"};
{\ar^(0.7){b^{\prime}}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){} "4";"14"};
{\ar^{c^{\prime}} "6";"16"};
{\ar^(0.3){x}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i} "10";"14"};
{\ar^{y} "12";"16"};
{\ar_{j} "14";"16"};
\endxy
\end{equation}
\end{prop}
\betagin{proof}
It suffices to show that $\overseterline{b}$ is an isomorphism in $h\mathscr{C}$.
Remark that $\mathscr{C}f$ and $\mathscr{C}f^{\prime}$ yield commutative diagrams
\[
\xy
(-12,6)*+{A}="2";
(0,6)*+{B}="4";
(12,6)*+{C}="6";
(-12,-6)*+{A}="12";
(0,-6)*+{B^{\prime}}="14";
(12,-6)*+{C}="16";
{\ar^{\overseterline{x}} "2";"4"};
{\ar^{\overseterline{y}} "4";"6"};
{\ar@{=} "2";"12"};
{\ar^{\overseterline{b}} "4";"14"};
{\ar@{=} "6";"16"};
{\ar_{\overseterline{x^{\prime}}} "12";"14"};
{\ar_{\overseterline{y^{\prime}}} "14";"16"};
{\ar@{}|\circrclearrowright "2";"14"};
{\ar@{}|\circrclearrowright "4";"16"};
\endxy
\ \ ,\ \
\xy
(-12,6)*+{A}="2";
(0,6)*+{B^{\prime}}="4";
(12,6)*+{C}="6";
(-12,-6)*+{A}="12";
(0,-6)*+{B}="14";
(12,-6)*+{C}="16";
{\ar^{\overseterline{x^{\prime}}} "2";"4"};
{\ar^{\overseterline{y^{\prime}}} "4";"6"};
{\ar@{=} "2";"12"};
{\ar^{\overseterline{b^{\prime}}} "4";"14"};
{\ar@{=} "6";"16"};
{\ar_{\overseterline{x}} "12";"14"};
{\ar_{\overseterline{y}} "14";"16"};
{\ar@{}|\circrclearrowright "2";"14"};
{\ar@{}|\circrclearrowright "4";"16"};
\endxy
\]
in $h\mathscr{C}$, whose rows are both weak kernel and weak cokernel. For $\mathfrak{b}=\overseterline{b^{\prime}}\circ\overseterline{b}\in(h\mathscr{C})(B,B)$, since we have $(\mathrm{id}_B-\mathfrak{b})\circ\overseterline{x}=0$ and $\overseterline{y}\circ(\mathrm{id}_B-\mathfrak{b})=0$, there are $\mathfrak{d}\in(h\mathscr{C})(B,A)$, $\mathfrak{e}\in(h\mathscr{C})(C,B)$ such that $\overseterline{x}\circ\mathfrak{d}=\mathrm{id}_B-\mathfrak{b}$ and $\mathfrak{e}\circ\overseterline{y}=\mathrm{id}_B-\mathfrak{b}$.
It follows that $(\mathrm{id}_B-\mathfrak{b})\circ(\mathrm{id}_B-\mathfrak{b})=\mathfrak{e}\circ\overseterline{y}\circ\overseterline{x}\circ\mathfrak{d}=0$, equivalently $\mathfrak{b}\circ(2\cdot\mathrm{id}_B-\mathfrak{b})=(2\cdot\mathrm{id}_B-\mathfrak{b})\circ\mathfrak{b}=\mathrm{id}_B$. In particular $\mathfrak{b}=\overseterline{b^{\prime}}\circ\overseterline{b}$ is an isomorphism.
The same argument shows that $\overseterline{b}\circ\overseterline{b^{\prime}}$ is also an isomorphism. Hence so is $\overseterline{b}$.
\end{proof}
\betagin{rem}
Later in Proposition~\ref{PropInvert} we will see (in combination with Corollary~\ref{CorComposeCubes}) that the existence of $\mathscr{C}f^{\prime}$ is not necessary to assume, and that it always holds automatically.
\end{rem}
\betagin{prop}\label{PropMorphExSeq}
Let $\mathbf{S}=\SQ{\eta}{\xi},\mathbf{S}^{\prime}=\SQ{\eta^{\prime}}{\xi^{\prime}}$ be any pair of exact sequences as in $(\ref{TwoExSeq})$, and let
\betagin{equation}\label{Square_AB}
\mathbf{T}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{A^{\prime}}="4";
(7,-7)*+{B^{\prime}}="6";
{\ar^{x} "0";"2"};
{\ar_{a} "0";"4"};
{\ar^{b} "2";"6"};
{\ar_{x^{\prime}} "4";"6"};
\endxy
\end{equation}
be any square. Then there exists a morphism of exact sequences $\mathscr{C}f\colon\mathbf{S}\to\mathbf{S}^{\prime}$ such that $\mathscr{C}f|_{\mathscr{D}elta^1\times\{0\}\times\mathscr{D}elta^1}=\mathbf{T}$. More precisely, the following holds.
\betagin{enumerate}
\item There exist rectangles of the form
\betagin{eqnarray*}
&\mathbf{R}=\RT{s_2(\xi)}{s_2(\eta)}{\mathcal{Z}}=\
\xy
(-16,7)*+{A}="0";
(0,7)*+{O}="2";
(16,7)*+{O^{\prime}}="4";
(-16,-7)*+{B}="10";
(0,-7)*+{C}="12";
(16,-7)*+{C}="14";
{\ar_{i} "0";"2"};
{\ar_{} "2";"4"};
{\ar@/^1.0pc/^{i_p} "0";"4"};
{\ar_{x} "0";"10"};
{\ar_{j} "2";"12"};
{\ar^{} "4";"14"};
{\ar^{y} "10";"12"};
{\ar^{1_C} "12";"14"};
{\ar@/_1.0pc/_{y} "10";"14"};
\endxy,&\\
&\mathbf{R}^{\prime}=\RT{\mathcal{X}^{\prime}}{\mathcal{Y}^{\prime}}{\mathcal{Z}^{\prime}}=\
\xy
(-16,7)*+{A}="0";
(0,7)*+{A^{\prime}}="2";
(16,7)*+{O^{\prime}}="4";
(-16,-7)*+{B}="10";
(0,-7)*+{B^{\prime}}="12";
(16,-7)*+{C^{\prime}}="14";
{\ar_{a} "0";"2"};
{\ar_{i^{\prime}} "2";"4"};
{\ar@/^1.0pc/^{i_p} "0";"4"};
{\ar_{x} "0";"10"};
{\ar_{x^{\prime}} "2";"12"};
{\ar^{j^{\prime}} "4";"14"};
{\ar^{b} "10";"12"};
{\ar^{y^{\prime}} "12";"14"};
{\ar@/_1.0pc/_{} "10";"14"};
\endxy&
\end{eqnarray*}
sharing some common morphism $i_p\in\mathscr{C}_1(A,O^{\prime})$,
such that $\mathbf{S}=(\mathbf{R}_{\mathrm{left}})^t$, $\mathbf{S}^{\prime}=(\mathbf{R}^{\prime}_{\mathrm{right}})^t$ and $\mathbf{T}=(\mathbf{R}^{\prime}_{\mathrm{left}})^t$.
Moreover, if $A=A^{\prime},a=1_A$ and $\mathbf{T}=\SQ{s_0(x^{\prime})}{\xi}$, then we may take $i_p=i^{\prime}$, and $\mathbf{R}^{\prime}$ can be chosen to be of the form $\RT{\mathcal{X}^{\prime}}{s_0(\xi^{\prime})}{s_0(\eta^{\prime})}$.
\item For any $\mathbf{R}=\RT{s_2(\xi)}{s_2(\eta)}{\mathcal{Z}}$ and $\mathbf{R}^{\prime}=\RT{\mathcal{X}^{\prime}}{\mathcal{Y}^{\prime}}{\mathcal{Z}^{\prime}}$ as in {\rm (1)}, there exist three $3$-simplices $\Phi,\Psi_{\mathrm{f}},\Psi_{\mathrm{b}}$ which gives a morphism
\[ \mathscr{C}f=\mathscr{C}B{\mathcal{Y}^{\prime}}{\mathcal{X}^{\prime}}{\Psi_{\mathrm{f}}}{\mathcal{Z}^{\prime}}{\Phi}{\Psi_{\mathrm{b}}}\colon\mathbf{S}\to\mathbf{S}^{\prime} \]
satisfying $d_3(\Phi)=d_3(\mathcal{Z})$.
\end{enumerate}
\end{prop}
\betagin{proof}
{\rm (1)} By using the definition of a zero object, we may take a $3$-simplex
\[
\mathcal{Z}=\
\xy
(-2,11)*+{A}="0";
(-12,-2)*+{O}="2";
(10,0)*+{O^{\prime}}="4";
(2,-12)*+{C}="6";
{\ar_{i} "0";"2"};
{\ar^{i_p} "0";"4"};
{\ar_(0.3){z} "0";"6"};
{\ar_(0.35){}|!{"0";"6"}\hole "2";"4"};
{\ar_{j} "2";"6"};
{\ar_{} "4";"6"};
\endxy
\]
with arbitrarily chosen morphisms $A\overset{i_p}{\longrightarrow}O^{\prime}$, $O\to O^{\prime}$, $O^{\prime}\to C$, such that $d_2(\mathcal{Z})=\eta$. This gives $\mathbf{R}$ as desired. By the definition of a zero object, there also exists a $2$-simplex $\alpha$ as below. Also, we may take a $2$-simplex $\beta$ arbitrarily as below, with some $y_p\in\mathscr{C}_1(B,C^{\prime})$.
\[ \TwoSP{A}{A^{\prime}}{O^{\prime}}{a}{i^{\prime}}{i_p}{\alpha},\ \ \TwoSP{B}{B^{\prime}}{C^{\prime}}{b}{y^{\prime}}{y_p}{\beta} \]
Then the existence of $\mathbf{R}^{\prime}$ follows from Proposition~\ref{PropComposeSquares} applied to $\mathbf{T},\mathbf{S}^{\prime},\alpha,\beta$. The latter part is also obvious from the construction, in which case we use $\alpha=s_0(i^{\prime})$.
{\rm (2)} We may take a $3$-simplex
\[
\Phi=\
\xy
(-2,11)*+{A}="0";
(-12,-2)*+{O}="2";
(10,0)*+{C^{\prime}}="4";
(2,-12)*+{O^{\prime}}="6";
{\ar_{i} "0";"2"};
{\ar^{} "0";"4"};
{\ar_(0.3){i_p} "0";"6"};
{\ar_(0.35){}|!{"0";"6"}\hole "2";"4"};
{\ar_{} "2";"6"};
{\ar_{j^{\prime}} "6";"4"};
\endxy
\]
such that $d_1(\Phi)=d_1(\mathcal{Z}^{\prime})$ and $d_3(\Phi)=d_3(\mathcal{Z})$, with arbitrarily taken compatible $d_0(\Phi)$ by using the definition of a zero object.
Put $\mathbf{S}^p=(\mathbf{R}_{\mathrm{out}})^t=\SQ{d_1(\mathcal{Z})}{\xi}$ and
\[ \mathbf{S}^q=\SQ{d_1(\mathcal{Z}^{\prime})}{d_2(\mathcal{X}^{\prime})}=\
\xy
(-6,6)*+{A}="0";
(6,6)*+{B}="2";
(-6,-6)*+{O^{\prime}}="4";
(6,-6)*+{C^{\prime}}="6";
{\ar^{x} "0";"2"};
{\ar_{i_p} "0";"4"};
{\ar^{} "2";"6"};
{\ar_{j^{\prime}} "4";"6"};
\endxy.
\]
Since $(\mathbf{R}_{\mathrm{left}})^t,(\mathbf{R}_{\mathrm{right}})^t$ are exact sequences, so is $\mathbf{S}^p$. Thus by the definition of push-out, there exists a morphism $\roundup{\Gamma}{\Psi_{\mathrm{f}}}$ from $\mathbf{S}^p$ to $\mathbf{S}^q$ in the undercategory, namely a pair of $3$-simplices $\Psi_{\mathrm{f}},\Gamma$ in $\mathscr{C}$ satisfying
$d_2(\Psi_{\mathrm{f}})=d_2(\mathcal{X}^{\prime})$, $d_3(\Psi_{\mathrm{f}})=\xi$, $d_2(\Gamma)=d_1(\mathcal{Z}^{\prime})$, $d_3(\Gamma)=d_1(\mathcal{Z})$, and $d_1(\Gamma)=d_1(\Psi_{\mathrm{f}})$.
We may take a $3$-simplex $\nu$
such that $d_0(\nu)=d_0(\Gamma)$, $d_2(\nu)=d_0(\Phi)$, $d_3(\nu)=d_0(\mathcal{Z})$ and then a $4$-simplex
\[
\Theta=\Penta{A}{O}{O^{\prime}}{C}{C^{\prime}}{i}{}{}{}
\]
such that $d_0(\Theta)=\nu$, $d_1(\Theta)=\Gamma$, $d_3(\Theta)=\Phi$, $d_4(\Theta)=\mathcal{Z}$. If we put $\Psi_{\mathrm{b}}=d_2(\Theta)$, then $\Phi,\Psi_{\mathrm{f}},\Psi_{\mathrm{b}}$ satisfy the required conditions.
\end{proof}
The following will be used later to replace the zero objects appearing in exact sequences.
\betagin{lem}\label{LemReplZero}
Let
\[
\nu=\ \xy
(-1,7)*+{A}="0";
(-8,-2)*+{O}="1";
(1,-10)*+{O^{\prime}}="2";
(9,-1)*+{C}="3";
{\ar_{} "0";"1"};
{\ar^{} "0";"2"};
{\ar_{} "0";"3"};
{\ar_{} "1";"2"};
{\ar^{}|!{"0";"2"}\hole "1";"3"};
{\ar_{} "2";"3"};
\endxy
\quad\text{and}\quad
\nu^{\prime}=\
\xy
(-1,7)*+{A}="0";
(-8,-2)*+{O}="1";
(1,-10)*+{O^{\prime}r}="2";
(9,-1)*+{C}="3";
{\ar_{} "0";"1"};
{\ar^{} "0";"2"};
{\ar_{} "0";"3"};
{\ar_{} "1";"2"};
{\ar^{}|!{"0";"2"}\hole "1";"3"};
{\ar_{} "2";"3"};
\endxy
\]
be any pair of $3$-simplices satisfying $d_2(\nu)=d_2(\nu^{\prime})$, in which $O,O^{\prime},O^{\prime}r$ are zero objects. Let $o\in\mathscr{C}_1(O^{\prime}r,O^{\prime})$ be arbitrarily taken morphism. Then there exists a $3$-simplex
\[
\mu=\
\xy
(-1,7)*+{A}="0";
(-8,-2)*+{O^{\prime}r}="1";
(1,-10)*+{O^{\prime}}="2";
(9,-1)*+{C}="3";
{\ar_{} "0";"1"};
{\ar^{} "0";"2"};
{\ar_{} "0";"3"};
{\ar_{o} "1";"2"};
{\ar^{}|!{"0";"2"}\hole "1";"3"};
{\ar_{} "2";"3"};
\endxy
\]
such that $d_1(\mu)=d_1(\nu)$ and $d_2(\mu)=d_1(\nu^{\prime})$.
\end{lem}
\betagin{proof}
We can show the existence of a $4$-simplex
\[ \Phi=\Penta{A}{O}{O^{\prime}r}{O^{\prime}}{C}{}{}{o}{} \]
such that $d_2(\Phi)=\nu$ and $d_3(\Phi)=\nu^{\prime}$, with arbitrarily chosen compatible $3$-simplices $d_0(\Phi)$ and $d_4(\Phi)$ by using the definition of zero objects. This gives $\mu=d_1(\Phi)$ as desired.
\end{proof}
\betagin{prop}\label{PropReplZero}
Let
\[
{}_A\mathbf{S}_C=\SQ{\eta}{\xi}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{O}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta}} "10";"11"};
\endxy
\quad\text{and}\quad
{}_A\mathbf{S}^{\prime}_C=\SQ{\eta^{\prime}}{\xi}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{O^{\prime}}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i^{\prime}} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{y} "2";"6"};
{\ar_{j^{\prime}} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta^{\prime}}} "10";"11"};
\endxy
\]
be any pair of squares sharing common $\xi$ in the upper triangles. Suppose that there is a $3$-simplex $\nu$ such that $d_1(\nu)=\eta^{\prime}$ and $d_2(\nu)=\eta$.
Then the following holds.
\betagin{enumerate}
\item There exists a $3$-simplex $\nu^{\prime}$ such that $d_1(\nu^{\prime})=\eta$ and $d_2(\nu^{\prime})=\eta^{\prime}$.
\item $\mathbf{S}$ is an exact sequence if and only if $\mathbf{S}^{\prime}$ is an exact sequence.
\end{enumerate}
We also remark that in case {\rm (2)}, the given $\nu$ and $\nu^{\prime}$ obtained in {\rm (1)} are nothing but morphisms of exact sequences
\[ \mathscr{C}B{s_0(\xi)}{s_1(\xi)}{s_2(\xi)}{s_0(\eta^{\prime})}{\nu}{s_2(\eta)}\colon\mathbf{S}\to\mathbf{S}^{\prime}
\quad\text{and}\quad
\mathscr{C}B{s_0(\xi)}{s_1(\xi)}{s_2(\xi)}{s_0(\eta)}{\nu^{\prime}}{s_2(\eta^{\prime})}\colon\mathbf{S}^{\prime}\to\mathbf{S}. \]
\end{prop}
\betagin{proof}
{\rm (1)} is immediate from Lemma~\ref{LemReplZero}, applied to $\nu$ and $s_1(\eta)$. {\rm (2)} follows from the existence of a rectangle $\mathbf{R}=\RT{\nu}{s_0(\eta^{\prime})}{s_0(\xi)}$, in which $\mathbf{R}_{\mathrm{left}}$ is an exact sequence and $\mathbf{R}_{\mathrm{out}}=\mathbf{S}$, $\mathbf{R}_{\mathrm{right}}=\mathbf{S}^{\prime}$.
\end{proof}
\subsection{Decompositions of morphisms}
\betagin{lem}\label{LemExSeqPO}
Let ${}_A\mathbf{S}_C=\SQ{\eta}{\xi}$ be any exact sequence as in $(\ref{ExSeq})$, and let $a\in\mathscr{C}_1(A,A^{\prime})$ be any morphism. Let
\betagin{equation}\label{SquareABPO}
\mathbf{P}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{A^{\prime}}="4";
(7,-7)*+{B_0}="6";
{\ar^{x} "0";"2"};
{\ar_{a} "0";"4"};
{\ar^{b_0} "2";"6"};
{\ar_{x_0} "4";"6"};
\endxy
\end{equation}
be arbitrarily chosen push-out square, which always exists by the definition of exact quasi-category. Then, we have the following.
\betagin{enumerate}
\item There exists an exact sequence
\betagin{equation}\label{ExSeqPO}
\mathbf{S}_0=\
\xy
(-7,7)*+{A^{\prime}}="0";
(7,7)*+{B_0}="2";
(-7,-7)*+{O}="4";
(7,-7)*+{C}="6";
{\ar^{x_0} "0";"2"};
{\ar_{i_0} "0";"4"};
{\ar|*+{_{z_0}} "0";"6"};
{\ar^{y_0} "2";"6"};
{\ar_{j} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_(0.4){^{\xi_0}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_(0.4){_{\eta_0}} "10";"11"};
\endxy
\end{equation}
and a morphisms of exact sequences $\mathscr{D}f\colon \mathbf{S}\to\mathbf{S}_0$ of the form
\[
\mathscr{D}f=\mathscr{C}B{\varphi}{\psi}{s_2(\xi)}{\theta}{s_1(\eta)}{s_2(\eta)}\ =
\xy
(-8,9)*+{A}="0";
(8,9)*+{B}="2";
(1,3)*+{O}="4";
(17,3)*+{C}="6";
(-8,-7)*+{A^{\prime}}="10";
(8,-7)*+{B_0}="12";
(1,-13)*+{O}="14";
(17,-13)*+{C}="16";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_(0.3){j} "4";"6"};
{\ar_{a} "0";"10"};
{\ar^(0.7){b_0}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){1_O} "4";"14"};
{\ar^{1_C} "6";"16"};
{\ar^(0.3){x_0}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i_0} "10";"14"};
{\ar^{y_0} "12";"16"};
{\ar_{j} "14";"16"};
\endxy
\]
such that $\mathscr{D}f|_{\mathscr{D}elta^1\times\{0\}\times\mathscr{D}elta^1}=\mathbf{P}$. We denote any such square $\mathbf{S}_0$ admitting $\mathscr{D}f$ by $a_{\ast}\mathbf{S}$.
\item Let $\mathscr{D}f\colon \mathbf{S}\to\mathbf{S}_0=a_{\ast}\mathbf{S}$ be arbitrarily as in {\rm (1)}. For any morphism of exact sequences
\[ {}_a\mathscr{C}f_c=\mathscr{C}B{\mathcal{X}_{\mathrm{f}}}{\mathcal{Y}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{f}}}{\mathcal{X}_{\mathrm{b}}}{\mathcal{Y}_{\mathrm{b}}}{\mathcal{Z}_{\mathrm{b}}}\colon {}_A\mathbf{S}_C\to {}_{A^{\prime}}\mathbf{S}^{\prime}_{C^{\prime}}=\SQ{\eta^{\prime}}{\xi^{\prime}} \]
as in $(\ref{MorphExSeq})$, there exists a morphism of exact sequences ${}_{1_{A^{\prime}}}\mathscr{C}f^{\prime}_{c}\colon a_{\ast}\mathbf{S}\to\mathbf{S}^{\prime}$ of the form
\betagin{equation}\label{CubeFactorPO}
\mathscr{C}f^{\prime}=\mathscr{C}B{s_0(\xi^{\prime})}{\mathcal{Y}^{\prime}_{\mathrm{f}}}{\mathcal{Z}^{\prime}_{\mathrm{f}}}{s_0(\eta^{\prime})}{\mathcal{Y}^{\prime}_{\mathrm{b}}}{\mathcal{Z}^{\prime}_{\mathrm{b}}}=\
\xy
(-8,9)*+{A^{\prime}}="0";
(8,9)*+{B_0}="2";
(1,3)*+{O}="4";
(17,3)*+{C}="6";
(-8,-7)*+{A^{\prime}}="10";
(8,-7)*+{B^{\prime}}="12";
(1,-13)*+{O^{\prime}}="14";
(17,-13)*+{C^{\prime}}="16";
{\ar^{x_0} "0";"2"};
{\ar_{i_0} "0";"4"};
{\ar^{y_0} "2";"6"};
{\ar_(0.3){j} "4";"6"};
{\ar_{1_{A^{\prime}}} "0";"10"};
{\ar^(0.7){b^{\prime}}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){o} "4";"14"};
{\ar^{c} "6";"16"};
{\ar^(0.3){x^{\prime}}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i^{\prime}} "10";"14"};
{\ar^{y^{\prime}} "12";"16"};
{\ar_{j^{\prime}} "14";"16"};
\endxy
\end{equation}
for some $\mathcal{Y}^{\prime}_{\mathrm{f}},\mathcal{Y}^{\prime}_{\mathrm{b}},\mathcal{Z}^{\prime}_{\mathrm{f}},\mathcal{Z}^{\prime}_{\mathrm{b}}$, such that $\overseterline{b}=\overseterline{b^{\prime}}\circ\overseterline{b}_0$ holds in $h\mathscr{C}$.
\end{enumerate}
Dually, for any ${}_A\mathbf{S}_C=\SQ{\eta}{\xi}$ as above and any $c\in\mathscr{C}(C^{\prime},C)$, we may obtain a morphism of exact sequences of the form
\[ {}_{1_A}\mathscr{D}f^{\prime}_c=\mathscr{C}B{s_0(\xi)}{s_1(\xi)}{\mathcal{Z}_{\mathrm{f}}}{s_0(\eta)}{\mathcal{Y}_{\mathrm{b}}}{\mathcal{Z}_{\mathrm{b}}}\colon\mathbf{S}_0^{\prime}\to\mathbf{S} \]
in which $\mathscr{D}f^{\prime}|_{\mathscr{D}elta^1\times\{1\}\times\mathscr{D}elta^1}$ is a pull-back. In this case, we denote such $\mathbf{S}_0^{\prime}$ by $c^{\ast}\mathbf{S}$. It has a property dual to {\rm (2)}.
\end{lem}
\betagin{proof}
{\rm (1)} By using the definition of a zero object, we can take a $3$-simplex
\[
\theta=\
\xy
(-3,7)*+{A}="0";
(-12,-6)*+{A^{\prime}}="2";
(10,-4)*+{C}="4";
(2,-16)*+{O}="6";
{\ar_{a} "0";"2"};
{\ar^{z} "0";"4"};
{\ar_(0.3){i} "0";"6"};
{\ar_(0.7){z_0}|!{"0";"6"}\hole "2";"4"};
{\ar_{i_0} "2";"6"};
{\ar_{j} "6";"4"};
\endxy
\]
such that $d_1(\theta)=\eta$, with some morphisms $i_0\in\mathscr{C}_1(A^{\prime},O)$ and $z_0\in\mathscr{C}_1(A^{\prime},C)$. Put $\nu=d_2(\theta)$. Since $\mathbf{P}$ is a push-out, we obtain a morphism
\[ \roundup{\varphi}{\psi}\colon\mathbf{P}\to
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{A^{\prime}}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{a} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{y} "2";"6"};
{\ar_{z_0} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\nu}} "10";"11"};
\endxy
\]
in the undercategory.
Then this gives a rectangle $\mathbf{R}=\RT{\psi}{\varphi}{\theta}$ with
$\mathbf{R}_{\mathrm{left}}=\mathbf{P}^t$, $\mathbf{R}_{\mathrm{out}}=\mathbf{S}^t$ and $\mathbf{R}_{\mathrm{right}}=(\mathbf{S}_0)^t$.
Since $\mathbf{P}$, $\mathbf{S}^t$ are push-outs, it follows that $x_0$ is ingressive and that $\mathbf{S}_0=(\mathbf{R}_{\mathrm{right}})^t$ is also a push-out. Thus $\mathbf{S}_0$ is an exact sequence, and $\mathscr{D}f=\mathscr{C}B{\varphi}{\psi}{s_2(\xi)}{\theta}{s_1(\eta)}{s_2(\eta)}\colon\mathbf{S}\to\mathbf{S}_0$ indeed gives a morphism with the stated property.
{\rm (2)} We may take a $4$-simplex
\[ \Omega=\Penta{A}{A^{\prime}}{O}{O^{\prime}}{C^{\prime}}{}{}{}{}{} \]
such that $d_1(\Omega)=\mathcal{Y}_{\mathrm{b}}$ and $d_2(\Omega)=\mathcal{X}_{\mathrm{b}}$, for arbitrarily chosen compatible $d_0(\Omega)$ and $d_4(\Omega)$ satisfying $d_3d_4(\Omega)=d_3(\theta)$ using the definition of zero object.
Applying Lemma~\ref{LemRectProl} to
\[ \mathbf{R}=\RT{\psi}{\varphi}{\theta},\ \ \roundup{\mathcal{Z}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{b}}}\colon\mathbf{R}_{\mathrm{out}}\to\xy
(-6,6)*+{A}="0";
(6,6)*+{O}="2";
(-6,-6)*+{B}="4";
(6,-6)*+{C^{\prime}}="6";
{\ar^{i} "0";"2"};
{\ar_{x} "0";"4"};
{\ar^{} "2";"6"};
{\ar_{} "4";"6"};
\endxy,\ \ \wp=d_3(\Omega), \]
we obtain $4$-simplices $\Psi,\Phi,\Theta$ such that
$d_1(\Psi)=d_1(\Phi)$, $d_2(\Phi)=d_2(\Theta)$, $\roundup{\mathcal{Z}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{b}}}=\roundup{d_2(\Psi)}{d_1(\Theta)}$, $d_3(\Theta)=\wp$, and $\RT{\psi}{\varphi}{\theta}=\RT{d_4(\Phi)}{d_4(\Psi)}{d_4(\Theta)}$.
If we put
\betagin{eqnarray*}
&\mathbf{T}=\mathscr{C}f|_{\{0\}\times\mathscr{D}elta^1\times\mathscr{D}elta^1}=\SQ{d_3(\mathcal{X}_{\mathrm{f}})}{d_3(\mathcal{Y}_{\mathrm{f}})},&\\
&\mathbf{T}^{\prime}=\SQ{d_2(\mathcal{X}_{\mathrm{b}})}{d_2(\mathcal{Z}_{\mathrm{f}})}=\SQ{d_2(\mathcal{X}_{\mathrm{f}})}{d_2(\mathcal{Y}_{\mathrm{f}})},&
\end{eqnarray*}
Then $\mathbf{T},\mathbf{T}^{\prime}$ and $\mathbf{P}$ can be regarded as objects in the quasi-category under
\[
\xy
(-6,6)*+{A}="0";
(6,6)*+{B}="2";
(-6,-6)*+{A^{\prime}}="4";
{\ar^{x} "0";"2"};
{\ar_{a} "0";"4"};
\endxy
\]
and there is a morphism $\roundup{\mathcal{X}_{\mathrm{f}}}{\mathcal{Y}_{\mathrm{f}}}\colon\mathbf{T}\to\mathbf{T}^{\prime}$.
By the construction so far, we also have $\roundup{d_3(\Phi)}{d_3(\Psi)}\colon\mathbf{P}\to\mathbf{T}^{\prime}$ in this undercategory.
Since $\mathbf{P}$ is a push-out, we obtain another morphism $\roundup{\kappa}{\lambda}\colon\mathbf{P}\to\mathbf{T}$.
Then, again by the definition of push-out, we obtain a $2$-simplex in this undercategory, namely a pair of $4$-simplices in $\mathscr{C}$
\[
\Gamma=\Penta{A}{A^{\prime}}{B_0}{B^{\prime}}{C^{\prime}}{a}{x_0}{}{}\ ,\ \
\Xi=\Penta{A}{B}{B_0}{B^{\prime}}{C^{\prime}}{x}{b_0}{}{}
\]
such that
\betagin{eqnarray*}
&d_1(\Gamma)=d_1(\Xi), d_2(\Gamma)=\mathcal{X}_{\mathrm{f}}, d_2(\Xi)=\mathcal{Y}_{\mathrm{f}},&\\
&d_3(\Gamma)=d_3(\Phi), d_3(\Xi)=d_3(\Psi), d_4(\Gamma)=\kappa, d_4(\Xi)=\lambda.&
\end{eqnarray*}
Now
\[ \mathscr{C}f^{\prime}=\mathscr{C}B{s_0(\xi^{\prime})}{d_0(\Gamma)}{d_0(\Phi)}{s_0(\eta^{\prime})}{d_0(\Omega)}{d_0(\Theta)}\colon\mathbf{S}_0\to\mathbf{S}^{\prime} \]
gives a morphism of exact sequences with the stated properties.
\end{proof}
\betagin{lem}\label{LemForSym}
Let ${}_A\mathbf{S}_C,{}_{A^{\prime}}\mathbf{S}^{\prime}_{C^{\prime}}$ be two exact sequences as in $(\ref{TwoExSeq})$, and let
\[ {}_a\mathscr{C}f_c=\mathscr{C}B{\mathcal{X}_{\mathrm{f}}}{\mathcal{Y}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{f}}}{\mathcal{X}_{\mathrm{b}}}{\mathcal{Y}_{\mathrm{b}}}{\mathcal{Z}_{\mathrm{b}}}\colon\mathbf{S}\to\mathbf{S}^{\prime} \]
be a morphism as in $(\ref{MorphExSeq})$. If $C=C^{\prime}$ and $\overseterline{c}=\mathrm{id}_C$, then the following holds.
\betagin{enumerate}
\item The square
\betagin{equation}\label{CPB}
\mathscr{C}f|_{\{0\}\times\mathscr{D}elta^1\times\mathscr{D}elta^1}=\
\xy
(-6,6)*+{A}="0";
(6,6)*+{B}="2";
(-6,-6)*+{A^{\prime}}="4";
(6,-6)*+{B^{\prime}}="6";
{\ar^{x} "0";"2"};
{\ar_{a} "0";"4"};
{\ar^{b} "2";"6"};
{\ar_{x^{\prime}} "4";"6"};
\endxy
\end{equation}
is a pull-back.
\item If moreover $b$ is egressive, then $(\ref{CPB})$ is an ambigressive push-out.
\end{enumerate}
\end{lem}
\betagin{proof}
{\rm (1)} $\mathscr{C}f$ contains rectangles
\betagin{eqnarray*}
&\mathbf{R}=\RT{\mathcal{Y}_{\mathrm{f}}}{\mathcal{X}_{\mathrm{f}}}{\mathcal{X}_{\mathrm{b}}}=\
\xy
(-12,6)*+{A}="0";
(0,6)*+{A^{\prime}}="2";
(12,6)*+{O^{\prime}}="4";
(-12,-6)*+{B}="10";
(0,-6)*+{B^{\prime}}="12";
(12,-6)*+{C}="14";
{\ar^{a} "0";"2"};
{\ar^{i^{\prime}} "2";"4"};
{\ar_{x} "0";"10"};
{\ar_{x^{\prime}} "2";"12"};
{\ar^{j^{\prime}} "4";"14"};
{\ar_{b} "10";"12"};
{\ar_{y^{\prime}} "12";"14"};
\endxy,
&\\
&\mathbf{R}^{\prime}=\RT{\mathcal{Z}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{b}}}{\mathcal{Y}_{\mathrm{b}}}=\
\xy
(-12,6)*+{A}="0";
(0,6)*+{O}="2";
(12,6)*+{O^{\prime}}="4";
(-12,-6)*+{B}="10";
(0,-6)*+{C}="12";
(12,-6)*+{C}="14";
{\ar^{i} "0";"2"};
{\ar^{o} "2";"4"};
{\ar_{x} "0";"10"};
{\ar_{j} "2";"12"};
{\ar^{j^{\prime}} "4";"14"};
{\ar_{y} "10";"12"};
{\ar_{c} "12";"14"};
\endxy&
\end{eqnarray*}
which satisfies $(\mathbf{R}_{\mathrm{right}})^t=\mathbf{S}^{\prime}$, $\mathbf{R}_{\mathrm{out}}=\mathbf{R}^{\prime}_{\mathrm{out}}$ and $(\mathbf{R}^{\prime}_{\mathrm{left}})^t=\mathbf{S}$. Since $(\mathbf{R}_{\mathrm{right}})^t$ is an exact sequence as in Example~\ref{ExTrivExSeq}, it follows that $\mathscr{C}f|_{\{0\}\times\mathscr{D}elta^1\times\mathscr{D}elta^1}=(\mathbf{R}_{\mathrm{left}})^t$ is a pull-back.
{\rm (2)} is immediate from the definition of exact quasi-category.
\end{proof}
\betagin{prop}\label{PropForSym}
Let
\[
\mathbf{S}=\SQ{\eta}{\xi}=
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{O}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta}} "10";"11"};
\endxy
\quad\text{and}\quad
\mathbf{S}^{\prime}=\SQ{\eta^{\prime}}{\xi^{\prime}}=
\xy
(-7,7)*+{A}="0";
(7,7)*+{B^{\prime}}="2";
(-7,-7)*+{O^{\prime}}="4";
(7,-7)*+{C}="6";
{\ar^{x^{\prime}} "0";"2"};
{\ar_{i^{\prime}} "0";"4"};
{\ar|*+{_{z^{\prime}}} "0";"6"};
{\ar^{y^{\prime}} "2";"6"};
{\ar_{j^{\prime}} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_(0.4){^{\xi^{\prime}}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_(0.4){_{\eta^{\prime}}} "10";"11"};
\endxy
\]
be two exact sequences starting from $A$ and ending in $C$. Let
\betagin{equation}\label{CubeForSym}
{}_{1_A}\mathscr{C}f_{1_C}=\mathscr{C}B{\mathcal{X}_{\mathrm{f}}}{\mathcal{Y}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{f}}}{\mathcal{X}_{\mathrm{b}}}{\mathcal{Y}_{\mathrm{b}}}{\mathcal{Z}_{\mathrm{b}}}=\
\xy
(-8,9)*+{A}="0";
(8,9)*+{B}="2";
(1,3)*+{O}="4";
(17,3)*+{C}="6";
(-8,-7)*+{A}="10";
(8,-7)*+{B^{\prime}}="12";
(1,-13)*+{O^{\prime}}="14";
(17,-13)*+{C}="16";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_(0.3){j} "4";"6"};
{\ar_{1_A} "0";"10"};
{\ar^(0.7){b}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){o} "4";"14"};
{\ar^{1_C} "6";"16"};
{\ar^(0.3){x^{\prime}}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i^{\prime}} "10";"14"};
{\ar^{y^{\prime}} "12";"16"};
{\ar_{j^{\prime}} "14";"16"};
\endxy\ \ \colon\mathbf{S}\to\mathbf{S}^{\prime}
\end{equation}
be any morphism.
If moreover $b=\mathscr{C}f|_{\{(0,1)\}\times\mathscr{D}elta^1}$ is a homotopy equivalence in $\mathscr{C}$, then there also exists a morphism in the opposite direction ${}_{1_A}(\mathscr{C}f^{\prime})_{1_C}\colon\mathbf{S}^{\prime}\to\mathbf{S}$. In addition, $b^{\prime}=\mathscr{C}f^{\prime}|_{\{(0,1)\}\times\mathscr{D}elta^1}$ becomes a homotopy equivalence in $\mathscr{C}$.
\end{prop}
\betagin{proof}
Put $\xi^{(1)}=d_2(\mathcal{Y}_{\mathrm{f}}),\eta^{(1)}=d_2(\mathcal{Y}_{\mathrm{b}}),\eta^{(2)}=d_1(\mathcal{X}_{\mathrm{b}})$, and put
\[
\mathbf{S}^{(1)}=\SQ{\eta^{(1)}}{\xi^{(1)}}=
\xy
(-6,6)*+{A}="0";
(6,6)*+{B}="2";
(-6,-6)*+{O}="4";
(6,-6)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{} "2";"6"};
{\ar_{} "4";"6"};
\endxy,\ \
\mathbf{S}^{(2)}=\SQ{\eta^{(2)}}{\xi^{(1)}}=
\xy
(-6,6)*+{A}="0";
(6,6)*+{B}="2";
(-6,-6)*+{O^{\prime}}="4";
(6,-6)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{} "0";"4"};
{\ar^{} "2";"6"};
{\ar_{j^{\prime}} "4";"6"};
\endxy.
\]
In the following rectangle, since $(\mathbf{R}_{\mathrm{left}})^t=\mathbf{S}$ and $(\mathbf{R}_{\mathrm{right}})^t$ are exact sequences,
\[
\mathbf{R}=\RT{\mathcal{Z}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{b}}}{s_1(\eta^{(1)})}=\
\xy
(-12,6)*+{A}="0";
(0,6)*+{O}="2";
(12,6)*+{O}="4";
(-12,-6)*+{B}="10";
(0,-6)*+{C}="12";
(12,-6)*+{C}="14";
{\ar^{i} "0";"2"};
{\ar^{1_O} "2";"4"};
{\ar_{x} "0";"10"};
{\ar_{j} "2";"12"};
{\ar^{} "4";"14"};
{\ar_{y} "10";"12"};
{\ar_{1_C} "12";"14"};
\endxy
\]
so is $\mathbf{S}^{(1)}=(\mathbf{R}_{\mathrm{out}})^t$. Moreover,
\[ {}_{1_A}\mathscr{C}f^{(1)}_{1_C}=
\mathscr{C}B{s_0(\xi^{(1)})}{s_1(\xi^{(1)})}{\mathcal{Z}_{\mathrm{f}}}{s_0(\eta^{(1)})}{s_1(\eta^{(1)})}{\mathcal{Z}_{\mathrm{b}}}
\colon {}_A\mathbf{S}_C\to {}_A\mathbf{S}^{(1)}_C \]
is a morphism of exact sequences. By Proposition~\ref{PropReplZero}, the existence of the $3$-simplex $\mathcal{Y}_{\mathrm{b}}$ shows that $\mathbf{S}^{(2)}$ is also an exact sequence, and that
\[ {}_{1_A}\mathscr{C}f^{(2)}_{1_C}=
\mathscr{C}B{s_0(\xi^{(1)})}{s_1(\xi^{(1)})}{s_2(\xi^{(1)})}{s_0(\eta^{(2)})}{\mathcal{Y}_{\mathrm{b}}}{s_2(\eta^{(1)})}
\colon {}_A\mathbf{S}^{(1)}_C\to {}_A\mathbf{S}^{(2)}_C \]
is a morphisms of exact sequences. Besides, there is another morphism
\[ {}_{1_A}\mathscr{C}f^{(3)}_{1_C}=
\mathscr{C}B{\mathcal{X}_{\mathrm{f}}}{\mathcal{Y}_{\mathrm{f}}}{s_2(\xi^{(1)})}{\mathcal{X}_{\mathrm{b}}}{s_1(\eta^{(2)})}{s_2(\eta^{(2)})}
\colon {}_A\mathbf{S}^{(2)}_C\to {}_A\mathbf{S}^{\prime}_C, \]
hence we have a sequence of morphisms
$\mathbf{S}\overset{\mathscr{C}f^{(1)}}{\longrightarrow}\mathbf{S}^{(1)}\overset{\mathscr{C}f^{(2)}}{\longrightarrow}\mathbf{S}^{(2)}\overset{\mathscr{C}f^{(3)}}{\longrightarrow}\mathbf{S}^{\prime}$.
By Lemma~\ref{LemForSym}, it follows that $\mathscr{C}f^{(3)}|_{\{0\}\times\mathscr{D}elta^1\times\mathscr{D}elta^1}$ is an ambigressive push-out. Thus $\mathscr{C}f^{(3)}$ satisfies the conditions required in Lemma~\ref{LemExSeqPO} {\rm (1)}. Thus by {\rm (2)} of the same lemma applied to $\mathscr{C}f^{(3)}$ and $\mathbf{I}_{\mathbf{S}^{(2)}}\colon\mathbf{S}^{(2)}\to\mathbf{S}^{(2)}$, we obtain a morphism ${}_{1_A}(\mathscr{C}f^{(3)\prime})_{1_C}\colon\mathbf{S}^{\prime}\to\mathbf{S}^{(2)}$. Similarly, by using the duals of Lemmas~\ref{LemExSeqPO} and \ref{LemForSym}, we obtain a morphism ${}_{1_A}(\mathscr{C}f^{(1)\prime})_{1_C}\colon\mathbf{S}^{(1)}\to\mathbf{S}$.
By Proposition~\ref{PropReplZero}, we also have a morphism ${}_{1_A}(\mathscr{C}f^{(2)\prime})_{1_C}\colon\mathbf{S}^{(2)}\to\mathbf{S}^{(1)}$.
Now by Proposition~\ref{PropComposeCubes} applied iteratively to the sequence of morphisms
\[ \mathbf{S}\overset{\mathscr{C}f^{(1)\prime}}{\longleftarrow}\mathbf{S}^{(1)}\overset{\mathscr{C}f^{(2)\prime}}{\longleftarrow}\mathbf{S}^{(2)}\overset{\mathscr{C}f^{(3)\prime}}{\longleftarrow}\mathbf{S}^{\prime}, \]
we obtain a morphism $\mathscr{C}f^{\prime}\colon\mathbf{S}^{\prime}\to\mathbf{S}$ as desired.
The last assertion for $b^{\prime}$ follows from Proposition~\ref{PropWIsom}.
\end{proof}
\betagin{rem}\label{RemCi}
Proposition~\ref{PropForSym} also follows immediately from \circte[Corollary~3.5.12]{C}. In fact, the morphism $\mathscr{C}f\in\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})(\mathbf{S},\mathbf{S}^{\prime})$ satisfying the assumptions in Proposition~\ref{PropForSym} becomes a homotopy equivalence in $\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})(\mathbf{S},\mathbf{S}^{\prime})$ (hence in $\mathbb{E}sc$).
\end{rem}
\betagin{rem}\label{RemForSym}
The assumption on $b$ in Proposition~\ref{PropForSym} is redundant. Indeed in Proposition~\ref{PropInvert} we will see that it holds automatically.
\end{rem}
\subsection{Splitting exact sequences}
\betagin{dfn}\label{DefForSplit}
Let $A,C\in\mathscr{C}_0$ be any pair of objects, and let
\betagin{equation}\label{2SimplexETA}
\TwoSP{A}{O}{C}{i}{j}{z}{\eta}
\end{equation}
be any $2$-simplex in $\mathscr{C}$, in which $O\in\mathscr{C}_0$ is a zero object. Let $A\overset{p_A}{\longleftarrow}A\times C\overset{p_C}{\longrightarrow}C$ and $A\overset{i_A}{\longrightarrow}A\amalg C\overset{i_C}{\longleftarrow}C$ be a product and a coproduct of $A$ and $C$, respectively.
\betagin{enumerate}
\item By the definition of product, we obtain a pair of $2$-simplices as below.
\betagin{equation}\label{ProdAC}
\xy
(0,7)*+{A}="0";
(-18,-7)*+{A}="2";
(0,-7)*+{A\times C}="4";
(18,-7)*+{C}="6";
{\ar_{1_A} "0";"2"};
{\ar^{u_A} "0";"4"};
{\ar^{z} "0";"6"};
{\ar^{p_A} "4";"2"};
{\ar_{p_C} "4";"6"};
(-13,-2)*+{}="10";
(-11,-7.5)*+{}="11";
{\ar@/^0.2pc/@{-}^{_{\mu_{A,C}}} "10";"11"};
(13,-2)*+{}="20";
(11,-7.5)*+{}="21";
{\ar@/_0.2pc/@{-}_{_{\pi_{A,C}}} "20";"21"};
\endxy
\end{equation}
\item Dually, we obtain a pair of $2$-simplices as below.
\[
\xy
(0,-7)*+{C}="0";
(-18,7)*+{A}="2";
(0,7)*+{A\amalg C}="4";
(18,7)*+{C}="6";
{\ar_{z} "2";"0"};
{\ar^{j_C} "4";"0"};
{\ar^{1_C} "6";"0"};
{\ar^(0.4){i_A} "2";"4"};
{\ar_(0.4){i_C} "6";"4"};
(-13,2)*+{}="10";
(-11,7.5)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\iota_{A,C}}} "10";"11"};
(13,2)*+{}="20";
(11,7.5)*+{}="21";
{\ar@/^0.2pc/@{-}^{_{\nu_{A,C}}} "20";"21"};
\endxy
\]
\end{enumerate}
We will use these symbols in the rest, often abbreviating $\pi_{A,C}$ to $\pi$ and so on.
\end{dfn}
\betagin{prop}\label{PropForSplit}
Let $(\ref{2SimplexETA})$ be any $2$-simplex, in which $O\in\mathscr{C}_0$ is a zero object. Then the squares
\betagin{equation}\label{SplitExSeq}
{}_A\mathbf{N}_C=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{A\times C}="2";
(-7,-7)*+{O}="4";
(7,-7)*+{C}="6";
{\ar^(0.4){u_A} "0";"2"};
{\ar_{i} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{p_C} "2";"6"};
{\ar_{j} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\pi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta}} "10";"11"};
\endxy
\ \ ,\ \
{}_A\mathbf{N}^{\prime}_C=\ \xy
(-7,7)*+{A}="0";
(7,7)*+{A\amalg C}="2";
(-7,-7)*+{O}="4";
(7,-7)*+{C}="6";
{\ar^(0.4){i_A} "0";"2"};
{\ar_{i} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{j_C} "2";"6"};
{\ar_{j} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\iota}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta}} "10";"11"};
\endxy
\end{equation}
are exact sequences.
\end{prop}
\betagin{proof}
By using the definition of a zero object we may obtain a rectangle $\mathbf{R}$ of the following form
\[
\xy
(-16,7)*+{A}="0";
(0,7)*+{A\times C}="2";
(16,7)*+{A}="4";
(-16,-7)*+{O}="10";
(0,-7)*+{C}="12";
(16,-7)*+{O}="14";
{\ar_(0.4){u_A} "0";"2"};
{\ar_(0.6){p_A} "2";"4"};
{\ar@/^1.0pc/^{1_A} "0";"4"};
{\ar_{i} "0";"10"};
{\ar_{p_C} "2";"12"};
{\ar^{i} "4";"14"};
{\ar^{j} "10";"12"};
{\ar^{} "12";"14"};
{\ar@/_1.0pc/_{1_O} "10";"14"};
\endxy
\]
in which $\mathbf{R}_{\mathrm{left}}=\mathbf{N}$,
and $\mathbf{R}_{\mathrm{right}}$ is a pull-back. As in Example~\ref{ExTrivExSeq} we know that $\mathbf{R}_{\mathrm{right}}$ is an exact sequence, thus it follows that $\mathbf{N}$ is also a pull-back and $p_C$ is egressive. This shows that $\mathbf{N}$ is an exact sequence. Dually for $\mathbf{N}^{\prime}$.
\end{proof}
\betagin{rem}
The above definition of ${}_A\mathbf{N}_C$ and ${}_A\mathbf{N}^{\prime}_C$ depends on the choice of $(\ref{2SimplexETA})$ and simplices taken. Later we will introduce an equivalence relation for exact sequences in Section~\ref{Section_EquivalenceRelation}, to show that their equivalence classes ${}_A\undersetderline{\mathbf{N}}_C, {}_A\undersetderline{\mathbf{N}^{\prime}}_C$ do not depend on these choices, and moreover satisfy ${}_A\undersetderline{\mathbf{N}}_C={}_A\undersetderline{\mathbf{N}^{\prime}}_C$.
\end{rem}
\betagin{lem}\label{LemRelSplit}
Let ${}_A\mathbf{S}_C=\SQ{\eta}{\xi}$ be any exact sequence as in $(\ref{ExSeq})$. Let ${}_A\mathbf{N}_C=\SQ{\eta}{\pi},{}_A\mathbf{N}^{\prime}_C=\SQ{\eta}{\iota}$ be as in Proposition~\ref{PropForSplit}, obtained by using the $2$-simplex $\eta$. The following holds.
\betagin{enumerate}
\item If $\overseterline{x}$ is a split monomorphism in $h\mathscr{C}$, then there is a $3$-simplex
\[
\varphi=\
\xy
(-3,7)*+{B}="0";
(-12,-6)*+{A}="2";
(10,-4)*+{C}="4";
(2,-16)*+{A\times C}="6";
{\ar^{x} "2";"0"};
{\ar^{y} "0";"4"};
{\ar_{} "0";"6"};
{\ar_{}|!{"0";"6"}\hole "2";"4"};
{\ar_{u_A} "2";"6"};
{\ar_{p_C} "6";"4"};
\endxy \]
such that $d_1(\varphi)=\pi_{A,C}$ and $d_2(\varphi)=\xi$.
\item Dually, if $\overseterline{y}$ is a split epimorphism in $h\mathscr{C}$, then there is a $3$-simplex $\psi$ such that $d_1(\psi)=\xi$ and $d_2(\psi)=\iota_{A,C}$.
\end{enumerate}
\end{lem}
\betagin{proof}
Since {\rm (2)} can be shown dually, it is enough to show {\rm (1)}. Suppose that $\overseterline{x}$ is a split monomorphism in $h\mathscr{C}$. This means that there is a $2$-simplex $\omega$ of the following form.
\[ \TwoSP{A}{B}{A}{x}{r}{1_A}{\omega} \]
Then by the definition of product, we obtain a pair of $2$-simplices as below.
\betagin{equation}\label{PAeKS1}
\xy
(0,7)*+{B}="0";
(-18,-7)*+{A}="2";
(0,-7)*+{A\times C}="4";
(18,-7)*+{C}="6";
{\ar_{r} "0";"2"};
{\ar^{b} "0";"4"};
{\ar^{y} "0";"6"};
{\ar^{p_A} "4";"2"};
{\ar_{p_C} "4";"6"};
(-13,-2)*+{}="10";
(-11,-7.5)*+{}="11";
{\ar@/^0.2pc/@{-}^{} "10";"11"};
(13,-2)*+{}="20";
(11,-7.5)*+{}="21";
{\ar@/_0.2pc/@{-}_{} "20";"21"};
\endxy
\end{equation}
Also, $\omega$ and $\xi$ gives the following pair of $2$-simplices.
\betagin{equation}\label{PAeKS2}
\xy
(0,7)*+{A}="0";
(-18,-7)*+{A}="2";
(0,-7)*+{B}="4";
(18,-7)*+{C}="6";
{\ar_{1_A} "0";"2"};
{\ar^{x} "0";"4"};
{\ar^{z} "0";"6"};
{\ar^{r} "4";"2"};
{\ar_{y} "4";"6"};
(-13,-2)*+{}="10";
(-11,-7.5)*+{}="11";
{\ar@/^0.2pc/@{-}^{\omega} "10";"11"};
(13,-2)*+{}="20";
(11,-7.5)*+{}="21";
{\ar@/_0.2pc/@{-}_{\xi} "20";"21"};
\endxy
\end{equation}
By the definition of product, $(\ref{PAeKS1})$ and $(\ref{PAeKS2})$ give a pair of $3$-simplices as below,
\[
\varphi^{\prime}=\
\xy
(-2,7)*+{A}="0";
(-12,-6)*+{A}="2";
(10,-4)*+{B}="4";
(2,-16)*+{A\times C}="6";
{\ar_{1_A} "0";"2"};
{\ar^{x} "0";"4"};
{\ar_(0.7){u_A} "0";"6"};
{\ar_(0.3){r}|!{"0";"6"}\hole "4";"2"};
{\ar^{p_A} "6";"2"};
{\ar^{b} "4";"6"};
\endxy\ \ ,\ \ \varphi=\
\xy
(2,7)*+{A}="0";
(12,-6)*+{C}="2";
(-10,-4)*+{B}="4";
(-2,-16)*+{A\times C}="6";
{\ar^{z} "0";"2"};
{\ar_{x} "0";"4"};
{\ar^(0.7){u_A} "0";"6"};
{\ar^(0.3){y}|!{"0";"6"}\hole "4";"2"};
{\ar_{p_C} "6";"2"};
{\ar_{b} "4";"6"};
\endxy
\]
such that $d_3(\varphi)=d_3(\varphi^{\prime})$ holds and the pairs $(d_k(\varphi^{\prime}),d_0(\varphi))$ $(k=0,1,2)$ agree with $(\ref{PAeKS1}),(\ref{ProdAC}),(\ref{PAeKS2})$, respectively. This $\varphi$ satisfies the stated properties.
\end{proof}
\betagin{prop}\label{PropRelSplit}
Let $(\ref{2SimplexETA})$ be any $2$-simplex in which $O\in\mathscr{C}_0$ is a zero object, and let ${}_A\mathbf{N}_C,{}_A\mathbf{N}^{\prime}_C$ be as in Proposition~\ref{PropForSplit}.
Then there exists a $3$-simplex
\[
\alphaeph_{A,C}=\
\xy
(-3,8)*+{A\amalg C}="0";
(-15,-9)*+{A}="2";
(13,-5)*+{C}="4";
(2,-19)*+{A\times C}="6";
{\ar^{i_A} "2";"0"};
{\ar^{j_C} "0";"4"};
{\ar_(0.37){\upsilon_{A,C}} "0";"6"};
{\ar_(0.4){z}|!{"0";"6"}\hole "2";"4"};
{\ar_{u_A} "2";"6"};
{\ar_{p_C} "6";"4"};
\endxy
\]
such that $d_1(\alphaeph_{A,C})=\pi_{A,C}$, $d_2(\alphaeph_{A,C})=\iota_{A,C}$ and that $\overseterline{\upsilon_{A,C}}$ is an isomorphism in $h\mathscr{C}$.
\end{prop}
\betagin{proof}
Take any $2$-simplex as below.
\[
\xy
(-7,7)*+{C}="0";
(-7,-7)*+{O}="4";
(7,-7)*+{A}="6";
{\ar_{i^{\prime}} "0";"4"};
{\ar^{z^{\prime}} "0";"6"};
{\ar_{j^{\prime}} "4";"6"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta^{\prime}}} "10";"11"};
\endxy
\]
By the definition of a coproduct, we obtain a pair of $2$-simplices
\[
\xy
(0,-7)*+{A}="0";
(-18,7)*+{A}="2";
(0,7)*+{A\amalg C}="4";
(18,7)*+{C}="6";
{\ar_{1_A} "2";"0"};
{\ar^{j_A} "4";"0"};
{\ar^{z^{\prime}} "6";"0"};
{\ar^(0.4){i_A} "2";"4"};
{\ar_(0.4){i_C} "6";"4"};
(-13,2)*+{}="10";
(-11,7.5)*+{}="11";
{\ar@/_0.2pc/@{-}_{} "10";"11"};
(13,2)*+{}="20";
(11,7.5)*+{}="21";
{\ar@/^0.2pc/@{-}^{} "20";"21"};
\endxy
\]
similarly as in Definition~\ref{DefForSplit} {\rm (2)}.
Then we have $\overseterline{j_A}\circ\overseterline{i_A}=\mathrm{id}_A$ in $h\mathscr{C}$, hence by Lemma~\ref{LemRelSplit} we obtain a $2$-simplex $\alphaeph_{A,C}$ with $d_1(\alphaeph_{A,C})=\pi_{A,C},d_2(\alphaeph_{A,C})=\iota_{A,C}$. As in the proof of Lemma~\ref{LemRelSplit}, morphism $\upsilon_{A,C}=d_0d_3(\alphaeph_{A,C})$ is obtained as a morphism appearing in the following
\[
\xy
(0,8)*+{A\amalg C}="0";
(-18,-7)*+{A}="2";
(0,-7)*+{A\times C}="4";
(18,-7)*+{C}="6";
{\ar_{j_A} "0";"2"};
{\ar_{\upsilon_{A,C}} "0";"4"};
{\ar^{j_C} "0";"6"};
{\ar^{p_A} "4";"2"};
{\ar_{p_C} "4";"6"};
(-13,-2)*+{}="10";
(-11,-7.5)*+{}="11";
{\ar@/^0.2pc/@{-}^{} "10";"11"};
(13,-2)*+{}="20";
(11,-7.5)*+{}="21";
{\ar@/_0.2pc/@{-}_{} "20";"21"};
\endxy
\]
by construction. In the homotopy category $h\mathscr{C}$, morphism $\mathfrak{v}=\overseterline{\upsilon_{A,C}}$ is the unique one which makes
\betagin{equation}\label{UniqueComm}
\xy
(0,8)*+{A\amalg C}="0";
(-18,-7)*+{A}="2";
(0,-7)*+{A\times C}="4";
(18,-7)*+{C}="6";
(12,2)*+{}="3";
(-12,2)*+{}="5";
{\ar_{\overseterline{j_A}} "0";"2"};
{\ar_{\mathfrak{v}} "0";"4"};
{\ar^{\overseterline{j_C}} "0";"6"};
{\ar^{\overseterline{p_A}} "4";"2"};
{\ar_{\overseterline{p_C}} "4";"6"};
{\ar@{}|\circrclearrowright "3";"4"};
{\ar@{}|\circrclearrowright "4";"5"};
\endxy
\end{equation}
commutative, since the bottom row gives a product of $A$ and $C$ in $h\mathscr{C}$. By the additivity of $h\mathscr{C}$, this forces $\mathfrak{v}$ to be an isomorphism.
\end{proof}
\betagin{rem}\label{Remv}
In the sequel we will continue to use the symbol $\upsilon_{A,C}$ for a morphism which gives the unique (iso)morphism $\mathfrak{v}=\overseterline{\upsilon_{A,C}}$ in $h\mathscr{C}$ making $(\ref{UniqueComm})$ commutative.
\end{rem}
\betagin{cor}\label{CorRelSplit}
Let ${}_A\mathbf{S}_C=\SQ{\eta}{\xi}, {}_A\mathbf{N}_C=\SQ{\eta}{\pi}$ and ${}_A\mathbf{N}^{\prime}_C=\SQ{\eta}{\iota}$ be as in Lemma~\ref{LemRelSplit}. Then, the following are equivalent.
\betagin{itemize}
\item[{\rm (i)}] There exists a morphism
\betagin{equation}\label{MorphSN1}
{}_a\mathscr{C}f_c=\
\xy
(-9,10)*+{A}="0";
(9,10)*+{B}="2";
(2,4)*+{O}="4";
(20,4)*+{C}="6";
(-9,-8)*+{A}="10";
(9,-8)*+{A\times C}="12";
(2,-14)*+{O}="14";
(20,-14)*+{C}="16";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_(0.3){j} "4";"6"};
{\ar_{a} "0";"10"};
{\ar^(0.7){b}|!{(10,4);(14,4)}\hole "2";"12"};
{\ar_(0.3){o} "4";"14"};
{\ar^{c} "6";"16"};
{\ar^(0.3){u_A}|!{(2,-4);(2,-8)}\hole "10";"12"};
{\ar_{i} "10";"14"};
{\ar^(0.6){p_C} "12";"16"};
{\ar_{j} "14";"16"};
\endxy\ \ \colon\mathbf{S}\to\mathbf{N}
\end{equation}
such that $\overseterline{a}=\mathrm{id}_A$, $\overseterline{c}=\mathrm{id}_C$.
\item[{\rm (i)$^{\prime}$}] There exists a morphism ${}_{a^{\prime}}\mathscr{C}f^{\prime}_{c^{\prime}}\colon\mathbf{N}^{\prime}\to\mathbf{S}$ such that $\overseterline{a^{\prime}}=\mathrm{id}_A$, $\overseterline{c^{\prime}}=\mathrm{id}_C$.
\item[{\rm (ii)}] There exists a morphism ${}_{1_A}\mathscr{C}f_{1_C}\colon\mathbf{S}\to\mathbf{N}$ in which $b=\mathscr{C}f|_{\{(0,1)\}\times\mathscr{D}elta^1}$ is a homotopy equivalence.
\item[{\rm (ii)$^{\prime}$}] There exists a morphism ${}_{1_A}\mathscr{C}f^{\prime}_{1_C}\colon\mathbf{N}^{\prime}\to\mathbf{S}$ in which $b^{\prime}=\mathscr{C}f^{\prime}|_{\{(0,1)\}\times\mathscr{D}elta^1}$ is a homotopy equivalence.
\item[{\rm (iii)}] $\overseterline{x}$ is a split monomorphism in $h\mathscr{C}$.
\item[{\rm (iii)$^{\prime}$}] $\overseterline{y}$ is a split epimorphism in $h\mathscr{C}$.
\end{itemize}
Remark that the conditions {\rm (iii)} and {\rm (iii)$^{\prime}$} are independent of the choices of $\mathbf{N}$ and $\mathbf{N}^{\prime}$.
\end{cor}
\betagin{proof}
Equivalence $\mathrm{(ii)}\mathbb{E}Q\mathrm{(ii)^{\prime}}$ follows from Proposition~\ref{PropComposeCubes}, Propositions~\ref{PropForSym} and \ref{PropRelSplit}. Since $\mathrm{(i)^{\prime}}\mathbb{E}Q\mathrm{(ii)^{\prime}}\mathbb{E}Q\mathrm{(iii)^{\prime}}$ can be shown dually, it is enough to show $\mathrm{(i)}\mathbb{E}Q\mathrm{(ii)}\mathbb{E}Q\mathrm{(iii)}$.
Remark that $\mathrm{(ii)}\Rightarrow\mathrm{(i)}$ is trivial, and that $\mathrm{(iii)}\Rightarrow\mathrm{(i)}$ follows from in Lemma~\ref{LemRelSplit}.
Also, $\mathrm{(i)}\Rightarrow\mathrm{(iii)}$ is immediate. Indeed if there is a morphism $(\ref{MorphSN1})$ as in {\rm (i)}, then we have $\overseterline{p_A}\circ\overseterline{b}\circ\overseterline{x}=\overseterline{p_A}\circ\overseterline{u_A}\circ\overseterline{a}=\mathrm{id}_A$, which in particular means that $\overseterline{x}$ is a split monomorphism.
It remains to show $\mathrm{(i)}\Rightarrow\mathrm{(ii)}$.
Suppose that we have a morphism $(\ref{MorphSN1})$ satisfying $\overseterline{a}=\mathrm{id}_A$ and $\overseterline{c}=\mathrm{id}_C$. By Corollary~\ref{CorComposeCubes}, we may assume $a=1_A$ and $c=1_C$ from the beginning. Let us show that $\overseterline{b}$ becomes an isomorphism in $h\mathscr{C}$. Remark that the $2$-simplex $\pi_{A,C}$ in $(\ref{ProdAC})$ induces a split short exact sequence
\[ A\overset{\overseterline{u_A}}{\longrightarrow}A\times C\overset{\overseterline{p_C}}{\longrightarrow}C \]
in $h\mathscr{C}$. Replacing it by the isomorphic one
$A\overset{\left[\betagin{smallmatrix}1\\0\end{smallmatrix}\right]}{\longrightarrow}A^\mathrm{op}lus C\overset{[0\ 1]}{\longrightarrow}C$,
we see that $\mathscr{C}f$ induces the following commutative diagram in $h\mathscr{C}$,
\betagin{equation}\label{PAeKSComm}
\xy
(-14,6)*+{A}="2";
(0,6)*+{B}="4";
(14,6)*+{C}="6";
(-14,-6)*+{A}="12";
(0,-6)*+{A^\mathrm{op}lus C}="14";
(14,-6)*+{C}="16";
{\ar^{\overseterline{x}} "2";"4"};
{\ar^{\overseterline{y}} "4";"6"};
{\ar@{=} "2";"12"};
{\ar^{\left[\betagin{smallmatrix} \mathfrak{r}\\\overseterline{y}\end{smallmatrix}\right]} "4";"14"};
{\ar@{=} "6";"16"};
{\ar_(0.4){\left[\betagin{smallmatrix}1\\0\end{smallmatrix}\right]} "12";"14"};
{\ar_(0.6){[0\ 1]} "14";"16"};
{\ar@{}|\circrclearrowright "2";"14"};
{\ar@{}|\circrclearrowright "4";"16"};
\endxy
\end{equation}
where we put $\mathfrak{r}=\overseterline{p_A}\circ\overseterline{b}\in (h\mathscr{C})(B,A)$. It suffices to show that $\left[\betagin{smallmatrix}\mathfrak{r}\\\overseterline{y}\end{smallmatrix}\right]$ is an isomorphism.
Since the top row of $(\ref{PAeKSComm})$ is a weak cokernel sequence, there is some $\mathfrak{w}\in(h\mathscr{C})(C,B)$ such that $\mathrm{id}_B-\overseterline{x}\circ\mathfrak{r}=\mathfrak{w}\circ\overseterline{y}$. This shows that $[\overseterline{x}\ \mathfrak{w}]\colon A^\mathrm{op}lus C\to B$ gives a left inverse of $\left[\betagin{smallmatrix}\mathfrak{r}\\\overseterline{y}\end{smallmatrix}\right]$. On the other hand, since the right square of $(\ref{PAeKSComm})$ is a weak push-out by the dual of Lemma~\ref{LemForSym} {\rm (1)}, it can be verified that $\left[\betagin{smallmatrix}\mathfrak{r}\\\overseterline{y}\end{smallmatrix}\right]$ is an epimorphism, by a straightforward argument in the ordinary additive category $h\mathscr{C}$. Thus $\left[\betagin{smallmatrix}\mathfrak{r}\\\overseterline{y}\end{smallmatrix}\right]$ is an isomorphism.
\end{proof}
\betagin{dfn}\label{DefSplit}
An exact sequence $\mathbf{S}$ is said to \emph{split}, if it satisfies condition {\rm (iii)} (or other equivalent conditions) in Corollary~\ref{CorRelSplit}.
\end{dfn}
\betagin{cor}\label{CorSplitEachOther}
Let $A,C\in\mathscr{C}_0$ be any pair of objects, and let ${}_A\mathbf{S}_C,{}_A\mathbf{S}^{\prime}_C$ be exact sequences starting from $A$ and ending in $C$. Suppose that there exists a morphism ${}_{1_A}\mathscr{C}f_{1_C}\colon\mathbf{S}\to\mathbf{S}^{\prime}$. If one of ${}_A\mathbf{S}_C,{}_A\mathbf{S}^{\prime}_C$ splits, then so does the other.
\end{cor}
\betagin{proof}
This can be easily checked by using {\rm (iii)} and {\rm (iii)$^{\prime}$} in Corollary~\ref{CorRelSplit}.
\end{proof}
\subsection{Equivalence of exact sequences}\label{Section_EquivalenceRelation}
\betagin{lem}\label{LemInvert}
Let ${}_A\mathbf{S}_C$ be any exact sequence as in $(\ref{ExSeq})$. Then as $x_{\ast}\mathbf{S}$ of Lemma~\ref{LemExSeqPO}, we may take a splitting one. Dually, $y^{\ast}\mathbf{S}$ can be chosen to split.
\end{lem}
\betagin{proof}
By \circte[Lemma~4.6]{B1}, we have a pull-back of the following form.
\[
\xy
(-7,7)*+{A\times B}="0";
(7,7)*+{B}="2";
(-7,-7)*+{B}="4";
(7,-7)*+{C}="6";
{\ar^(0.7){p_B} "0";"2"};
{\ar_{} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_{y} "4";"6"};
\endxy
\]
If we use this pull-back, the resulting $y^{\ast}\mathbf{S}$ becomes of the form
\[
\xy
(-7,7)*+{A}="0";
(7,7)*+{A\times B}="2";
(-7,-7)*+{O}="4";
(7,-7)*+{B}="6";
{\ar^{} "0";"2"};
{\ar_{} "0";"4"};
{\ar^{p_B} "2";"6"};
{\ar_{} "4";"6"};
\endxy,
\]
which obviously satisfies {\rm (iii)$^{\prime}$} of Corollary~\ref{CorRelSplit}. Dually for $x_{\ast}\mathbf{S}$.
\end{proof}
\betagin{rem}\label{RemInvert}
Later in Corollary~\ref{CorFunctoriality} {\rm (1)} we will see that all $x_{\ast}\mathbf{S}$ become \emph{equivalent}, and hence in particular always split, independently of the choices made. Similarly for $y^{\ast}\mathbf{S}$.
\end{rem}
\betagin{prop}\label{PropInvert}
For any morphism of exact sequences ${}_{1_A}\mathscr{C}f_{1_C}\colon\mathbf{S}\to\mathbf{S}^{\prime}$, the morphism $b=\mathscr{C}f|_{\{(0,1)\}\times\mathscr{D}elta^1}$ becomes a homotopy equivalence.
Thus the assumption on $b$ in Proposition~\ref{PropForSym} is always satisfied, hence there exists a morphism in the opposite direction ${}_{1_A}\mathscr{C}f^{\prime}_{1_C}\colon\mathbf{S}^{\prime}\to\mathbf{S}$.
\end{prop}
\betagin{proof}
Label $\mathscr{C}f$ as in $(\ref{CubeForSym})$.
Take $x_{\ast}\mathbf{S}$ and $x_{\ast}\mathbf{S}^{\prime}$. By Lemma~\ref{LemInvert}, we may choose $x_{\ast}\mathbf{S}$ to split. There are morphisms
\[ {}_x\mathscr{D}f_{1_C}\colon\mathbf{S}\to x_{\ast}\mathbf{S}
\quad\text{and}\quad
{}_x\mathscr{D}f^{\prime}_{1_C}\colon\mathbf{S}^{\prime}\to x_{\ast}\mathbf{S}^{\prime} \]
as in Lemma~\ref{LemExSeqPO} {\rm (1)}. By Proposition~\ref{PropComposeCubes} applied to $\mathbf{S}\overset{{}_{1_A}\mathscr{C}f_{1_C}}{\longrightarrow}\mathbf{S}^{\prime}\overset{{}_x(\mathscr{D}f^{\prime})_{1_C}}{\longrightarrow}x_{\ast}\mathbf{S}^{\prime}$, we obtain a morphism ${}_{x}\mathscr{C}f^{\prime}_{1_C}\colon\mathbf{S}\to x_{\ast}\mathbf{S}^{\prime}$.
Thus by Lemma~\ref{LemExSeqPO} {\rm (2)}, we obtain a morphism ${}_{1_B}\mathscr{C}f^{\prime}r_{1_C}\colon x_{\ast}\mathbf{S}\to x_{\ast}\mathbf{S}^{\prime}$.
Since $x_{\ast}\mathbf{S}$ splits, it follows that $x_{\ast}\mathbf{S}^{\prime}$ also splits by Corollary~\ref{CorSplitEachOther}.
Let us show that $\overseterline{b}$ is an isomorphism in $h\mathscr{C}$. If we label simplices of $\mathscr{D}f^{\prime}$ as below,
\[
\xy
(-8,9)*+{A}="0";
(8,9)*+{B^{\prime}}="2";
(1,3)*+{O^{\prime}}="4";
(17,3)*+{C}="6";
(-8,-7)*+{B}="10";
(8,-7)*+{M}="12";
(1,-13)*+{O^{\prime}}="14";
(17,-13)*+{C}="16";
{\ar^{x^{\prime}} "0";"2"};
{\ar_{i^{\prime}} "0";"4"};
{\ar^{y^{\prime}} "2";"6"};
{\ar_(0.3){j^{\prime}} "4";"6"};
{\ar_{x} "0";"10"};
{\ar^(0.7){b_M}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){1_{O^{\prime}}} "4";"14"};
{\ar^{1_C} "6";"16"};
{\ar^(0.3){x_M}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{} "10";"14"};
{\ar^{y_M} "12";"16"};
{\ar_{j^{\prime}} "14";"16"};
\endxy
\]
then we have the following commutative diagram
\[
\xy
(-12,6)*+{A}="2";
(0,6)*+{B^{\prime}}="4";
(12,6)*+{C}="6";
(-12,-6)*+{B}="12";
(0,-6)*+{M}="14";
(12,-6)*+{C}="16";
{\ar^{\overseterline{x^{\prime}}} "2";"4"};
{\ar^{\overseterline{y^{\prime}}} "4";"6"};
{\ar_{\overseterline{x}} "2";"12"};
{\ar^{\overseterline{b_M}} "4";"14"};
{\ar@{=} "6";"16"};
{\ar_{\overseterline{x_M}} "12";"14"};
{\ar_{\overseterline{y_M}} "14";"16"};
{\ar@{}|\circrclearrowright "2";"14"};
{\ar@{}|\circrclearrowright "4";"16"};
\endxy
\]
in $h\mathscr{C}$. Since $x_{\ast}\mathbf{S}^{\prime}$ splits, there is some $\mathfrak{r}\in(h\mathscr{C})(M,B)$ such that $\mathfrak{r}\circ\overseterline{x_M}=\mathrm{id}_B$. Since $A\overset{\overseterline{x}}{\longrightarrow}B\overset{\overseterline{y}}{\longrightarrow}C$ is a weak cokernel sequence, there exists $\mathfrak{d}\in(h\mathscr{C})(C,B)$ such that
$\mathrm{id}_B-\mathfrak{r}\circ\overseterline{b_M}\circ\overseterline{b}=\mathfrak{d}\circ\overseterline{y}$.
If we put $\mathfrak{b}=\mathfrak{r}\circ\overseterline{b_M}+\mathfrak{d}\circ\overseterline{y^{\prime}}\in(h\mathscr{C})(B^{\prime},B)$, then it satisfies
\[ \mathfrak{b}\circ\overseterline{b}=\mathfrak{r}\circ\overseterline{b_M}\circ\overseterline{b}+\mathfrak{d}\circ\overseterline{y^{\prime}}\circ\overseterline{b}=\mathrm{id}_B, \]
which shows that $\mathfrak{b}$ is a left inverse of $\overseterline{b}$.
A dual argument using $y^{\prime\ast}\mathbf{S}$ and $y^{\prime\ast}\mathbf{S}^{\prime}$ shows that $\overseterline{b}$ has also a right inverse, and thus it is an isomorphism.
\end{proof}
\betagin{dfn}\label{DefEquivExSeq}
Let $A,C\in\mathscr{C}_0$ be any pair of objects. Let
\betagin{equation}\label{TwoExSeqEquiv}
{}_A\mathbf{S}_C=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{O}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
\endxy
\ ,\ \
{}_A\mathbf{S}^{\prime}_C=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B^{\prime}}="2";
(-7,-7)*+{O^{\prime}}="4";
(7,-7)*+{C}="6";
{\ar^{x^{\prime}} "0";"2"};
{\ar_{i^{\prime}} "0";"4"};
{\ar^{y^{\prime}} "2";"6"};
{\ar_{j^{\prime}} "4";"6"};
\endxy
\end{equation}
be two exact sequences starting from $A$ and ending in $C$. We say \emph{$\mathbf{S}$ is equivalent to $\mathbf{S}^{\prime}$} if there is a morphism of exact sequences ${}_{1_A}\mathscr{C}f_{1_C}\colon\mathbf{S}\to\mathbf{S}^{\prime}$. In this case we write $\mathbf{S}\sim\mathbf{S}^{\prime}$.
By Corollary~\ref{CorComposeCubes}, this is equivalent to the existence of a morphism ${}_a\mathscr{C}f^{\prime}_c\colon \mathbf{S}\to\mathbf{S}^{\prime}$ satisfying $\overseterline{a}=\mathrm{id}_A$ and $\overseterline{c}=\mathrm{id}_C$.
\end{dfn}
\betagin{prop}\label{PropEquivExSeq}
For any $A,C\in\mathscr{C}_0$, relation $\sim$ is an equivalence relation.
\end{prop}
\betagin{proof}
Reflexive law is obvious, since there is always $\mathbf{I}_{\mathbf{S}}\colon\mathbf{S}\to\mathbf{S}$ for any exact sequence $\mathbf{S}$ as in Example~\ref{ExTrivCubes}. Symmetric law and transitive law follow from Propositions~\ref{PropInvert} and \ref{PropComposeCubes}, respectively.
\end{proof}
\subsection{(Co)products of exact sequences}
We begin with the following consequence of \circte[Corollary~5.1.2.3]{L1}.
Let
\betagin{equation}\label{PairOfExSeqForProd}
\mathbf{S}_k=\
\xy
(-7,7)*+{A_k}="0";
(7,7)*+{B_k}="2";
(-7,-7)*+{O_k}="4";
(7,-7)*+{C_k}="6";
{\ar^{x_k} "0";"2"};
{\ar_{i_k} "0";"4"};
{\ar|*+{_{z_k}} "0";"6"};
{\ar^{y_k} "2";"6"};
{\ar_{j_k} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_(0.4){^{\xi_k}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_(0.4){_{\eta_k}} "10";"11"};
\endxy
\quad(k=1,2)
\end{equation}
be any pair of squares. By \circte[Corollary~5.1.2.3]{L1}, a product $\mathbf{S}_1\times\mathbf{S}_2$ of $\mathbf{S}_1,\mathbf{S}_2$ in $\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})$ can be obtained by taking products on each vertex of $\mathscr{D}elta^1\times\mathscr{D}elta^1$ as follows,
\betagin{equation}\label{ProdExSeqPair}
\mathbf{S}_1\times\mathbf{S}_2=\
\xy
(-11,11)*+{A_1\times A_2}="0";
(11,11)*+{B_1\times B_2}="2";
(-11,-11)*+{O_1\times O_2}="4";
(11,-11)*+{C_1\times C_2}="6";
{\ar^{x_1\times x_2} "0";"2"};
{\ar_{i_1\times i_2} "0";"4"};
{\ar|*+{_{z_1\times z_2}} "0";"6"};
{\ar^{y_1\times y_2} "2";"6"};
{\ar_{j_1\times j_2} "4";"6"};
(3,11)*+{}="00";
(11,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi_1\times \xi_2}} "00";"01"};
(-3,-11)*+{}="10";
(-11,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta_1\times \eta_2}} "10";"11"};
\endxy
\end{equation}
together with projections $\mathbf{Q}_k\in\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})_1(\mathbf{S}_1\times\mathbf{S}_2,\mathbf{S}_k)$ $(k=1,2)$. In particular, for any square $\mathbf{S}$ and any pair of morphisms $\mathscr{C}f_k\in\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})_1(\mathbf{S},\mathbf{S}_k)$ $(k=1,2)$, we obtain a morphism $(\mathscr{C}f_1,\mathscr{C}f_2)$ and a pair of $2$-simplices
\[
\xy
(0,12)*+{\mathbf{S}}="0";
(-22,-7)*+{\mathbf{S}_1}="2";
(0,-7)*+{\mathbf{S}_1\times \mathbf{S}_2}="4";
(22,-7)*+{\mathbf{S}_2}="6";
{\ar_{\mathscr{C}f_1} "0";"2"};
{\ar|*+{_{(\mathscr{C}f_1,\mathscr{C}f_2)}} "0";"4"};
{\ar^{\mathscr{C}f_2} "0";"6"};
{\ar^{\mathbf{Q}_1} "4";"2"};
{\ar_{\mathbf{Q}_2} "4";"6"};
(-16,-0.5)*+{}="10";
(-13,-7.5)*+{}="11";
{\ar@/^0.2pc/@{-}^{} "10";"11"};
(16,-0.5)*+{}="20";
(13,-7.5)*+{}="21";
{\ar@/_0.2pc/@{-}_{} "20";"21"};
\endxy
\]
in $\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})$.
Dually, we have a coproduct
\[
\mathbf{S}_1\amalg\mathbf{S}_2=\
\xy
(-11,11)*+{A_1\amalg A_2}="0";
(11,11)*+{B_1\amalg B_2}="2";
(-11,-11)*+{O_1\amalg O_2}="4";
(11,-11)*+{C_1\amalg C_2}="6";
{\ar^{x_1\amalg x_2} "0";"2"};
{\ar_{i_1\amalg i_2} "0";"4"};
{\ar|*+{_{z_1\amalg z_2}} "0";"6"};
{\ar^{y_1\amalg y_2} "2";"6"};
{\ar_{j_1\amalg j_2} "4";"6"};
(3,11)*+{}="00";
(11,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi_1\amalg \xi_2}} "00";"01"};
(-3,-11)*+{}="10";
(-11,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta_1\amalg \eta_2}} "10";"11"};
\endxy.
\]
For any square $\mathbf{S}$ and any $\mathscr{C}f_k\in\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})_1(\mathbf{S}_k,\mathbf{S})$ $(k=1,2)$, we have $\mathscr{C}f_1\cup\mathscr{C}f_2\in\mathrm{Fun}(\mathscr{D}elta^1\times\mathscr{D}elta^1,\mathscr{C})_1(\mathbf{S}_1\amalg\mathbf{S}_2,\mathbf{S})$ with a property dual to the above.
Again by \circte[Corollary~5.1.2.3]{L1}, if $\mathbf{S}_1,\mathbf{S}_2$ are pull-backs, then so is $\mathbf{S}_1\times\mathbf{S}_2$. Similarly, if $\mathbf{S}_1,\mathbf{S}_2$ are push-outs, then so is $\mathbf{S}_1\amalg\mathbf{S}_2$.
\betagin{prop}\label{PropPCrod}
If $\mathbf{S}_1,\mathbf{S}_2$ are exact sequences, then so are $\mathbf{S}_1\times\mathbf{S}_2$ and $\mathbf{S}_1\amalg\mathbf{S}_2$.
\end{prop}
\betagin{proof}
Let $\mathbf{S}_1,\mathbf{S}_2$ be as in $(\ref{PairOfExSeqForProd})$. Since egressive morphisms are stable under taking pull-backs, it follows that $y_1\times 1_{A_2}$ and $1_{B_1}\times y_2$ are egressive morphisms. Since there is a $2$-simplex
\[
\xy
(-12,11)*+{A_1\times A_2}="0";
(-12,-6)*+{B_1\times A_2}="4";
(12,-6)*+{B_1\times B_2}="6";
{\ar_{y_1\times 1_{A_2}} "0";"4"};
{\ar_{1_{B_1}\times y_2} "4";"6"};
{\ar^{y_1\times y_2} "0";"6"};
(-4,-6.2)*+{}="10";
(-12.2,1.8)*+{}="11";
{\ar@/_0.3pc/@{-}_{_{}} "10";"11"};
\endxy
\]
in $\mathscr{C}$, it follows that $y_1\times y_2$ is also egressive. Since $j_1\times j_2$ in $(\ref{ProdExSeqPair})$ is ingressive because $O_1\times O_2$ is a zero object, we see that $\mathbf{S}_1\times\mathbf{S}_2$ is an exact sequence. Dually for $\mathbf{S}_1\amalg\mathbf{S}_2$.
\end{proof}
\betagin{ex}\label{ExProdExSeq}
Let ${}_A\mathbf{S}_C$ be any exact sequence as in $(\ref{ExSeq})$. From $\mathbf{I}_{\mathbf{S}}\colon\mathbf{S}\to\mathbf{S}$, we obtain the following morphisms of exact sequences, which we denote by $\nabla_{\mathbf{S}}=\mathbf{I}_{\mathbf{S}}\cup\mathbf{I}_{\mathbf{S}}\colon\mathbf{S}\amalg\mathbf{S}\to\mathbf{S}$ and $\mathscr{D}elta_{\mathbf{S}}=(\mathbf{I}_{\mathbf{S}},\mathbf{I}_{\mathbf{S}})\colon\mathbf{S}\to\mathbf{S}\times\mathbf{S}$, respectively.
\[
\xy
(-11,18)*+{A\amalg A}="0";
(11,18)*+{B\amalg B}="2";
(0,8)*+{O\amalg O}="4";
(22,8)*+{C\amalg C}="6";
(-11,-2)*+{A}="10";
(11,-2)*+{B}="12";
(0,-12)*+{O}="14";
(22,-12)*+{C}="16";
{\ar^{x\amalg x} "0";"2"};
{\ar^(0.6){i\amalg i} "0";"4"};
{\ar^(0.6){y\amalg y} "2";"6"};
{\ar_{} "4";"6"};
{\ar_{\nabla_A} "0";"10"};
{\ar^(0.75){\nabla_B}|!{(9,8);(13,8)}\hole "2";"12"};
{\ar_(0.3){\nabla_O} "4";"14"};
{\ar^{\nabla_C} "6";"16"};
{\ar_(0.7){x}|!{(0,0);(0,-4)}\hole "10";"12"};
{\ar_{i} "10";"14"};
{\ar_(0.4){y} "12";"16"};
{\ar_{j} "14";"16"};
\endxy
\quad,\quad
\xy
(-11,18)*+{A}="0";
(11,18)*+{B}="2";
(0,8)*+{O}="4";
(22,8)*+{C}="6";
(-11,-2)*+{A\times A}="10";
(11,-2)*+{B\times B}="12";
(0,-12)*+{O\times O}="14";
(22,-12)*+{C\times C}="16";
{\ar^{x} "0";"2"};
{\ar^(0.6){i} "0";"4"};
{\ar^(0.6){y} "2";"6"};
{\ar^(0.3){j} "4";"6"};
{\ar_{\mathscr{D}elta_A} "0";"10"};
{\ar^(0.75){\mathscr{D}elta_B}|!{(9,8);(13,8)}\hole "2";"12"};
{\ar_(0.25){\mathscr{D}elta_O} "4";"14"};
{\ar^{\mathscr{D}elta_C} "6";"16"};
{\ar_(0.6){}|!{(0,0);(0,-4)}\hole "10";"12"};
{\ar_{i\times i} "10";"14"};
{\ar_(0.4){y\times y} "12";"16"};
{\ar_{j\times j} "14";"16"};
\endxy
\]
\end{ex}
\betagin{prop}\label{PropProdExSeq1}
Let
\[
\mathscr{C}f_k=\ \xy
(-8,9)*+{A_k}="0";
(8,9)*+{B_k}="2";
(1,3)*+{O_k}="4";
(17,3)*+{C_k}="6";
(-8,-7)*+{A_k^{\prime}}="10";
(8,-7)*+{B_k^{\prime}}="12";
(1,-13)*+{O_k^{\prime}}="14";
(17,-13)*+{C_k^{\prime}}="16";
{\ar^{x_k} "0";"2"};
{\ar_{i_k} "0";"4"};
{\ar^{y_k} "2";"6"};
{\ar_(0.3){j_k} "4";"6"};
{\ar_{a_k} "0";"10"};
{\ar^(0.7){b_k}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){o_k} "4";"14"};
{\ar^{c_k} "6";"16"};
{\ar^(0.3){x_k^{\prime}}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i_k^{\prime}} "10";"14"};
{\ar^{y_k^{\prime}} "12";"16"};
{\ar_{j_k^{\prime}} "14";"16"};
\endxy
\ \ \colon\mathbf{S}_k\to\mathbf{S}_k^{\prime}\quad(k=1,2)
\]
be any pair of morphisms of exact sequences.
Then we have the following morphism of exact sequences $\mathbf{S}_1\amalg\mathbf{S}_2\to\mathbf{S}_1^{\prime}\times\mathbf{S}_2^{\prime}$
\betagin{equation}\label{MorphUps}
\xy
(-12,20)*+{A_1\amalg A_2}="0";
(12,20)*+{B_1\amalg B_2}="2";
(3,10)*+{O_1\amalg O_2}="4";
(27,10)*+{C_1\amalg C_2}="6";
(-12,-2)*+{A_1^{\prime}\times A_2^{\prime}}="10";
(12,-2)*+{B_1^{\prime}\times B_2^{\prime}}="12";
(3,-12)*+{O_1^{\prime}\times O_2^{\prime}}="14";
(27,-12)*+{C_1^{\prime}\times C_2^{\prime}}="16";
{\ar^{x_1\amalg x_2} "0";"2"};
{\ar^(0.6){i_1\amalg i_2} "0";"4"};
{\ar^(0.6){y_1\amalg y_2} "2";"6"};
{\ar_(0.6){j_1\amalg j_2} "4";"6"};
{\ar_{\upsilon_{a_1,a_2}} "0";"10"};
{\ar^(0.75){\upsilon_{b_1,b_2}}|!{(9,10);(13,10)}\hole "2";"12"};
{\ar_(0.25){\upsilon_{o_1,o_2}} "4";"14"};
{\ar^{\upsilon_{c_1,c_2}} "6";"16"};
{\ar^(0.4){x_1^{\prime}\times x_2^{\prime}}|!{(4,0);(4,-4)}\hole "10";"12"};
{\ar_{i_1^{\prime}\times i_2^{\prime}} "10";"14"};
{\ar^{y_1^{\prime}\times y_2^{\prime}} "12";"16"};
{\ar_{j_1^{\prime}\times j_2^{\prime}} "14";"16"};
\endxy
\end{equation}
in which $\upsilon_{a_1,a_2}$ gives the unique morphism $\overseterline{\upsilon_{a_1,a_2}}=\overseterline{a_1}^\mathrm{op}lus\overseterline{a_2}\in(h\mathscr{C})(A_1^\mathrm{op}lus A_2,A_1^{\prime}^\mathrm{op}lus A_2^{\prime})$ in the homotopy category, and similarly for $\upsilon_{b_1,b_2},\upsilon_{c_1,c_2},\upsilon_{o_1,o_2}$.
\end{prop}
\betagin{proof}
As in Example~\ref{ExTrivCubes}, we have morphisms
\[ \mathbf{I}_k^{\prime}=\mathbf{I}_{\mathbf{S}^{\prime}_k}\colon\mathbf{S}^{\prime}_k\to\mathbf{S}^{\prime}_k,\quad\ \mathscr{C}f_k^{\prime}\colon\mathbf{S}^{\prime}_k\to\mathbf{O},\quad\mathscr{C}f^{\prime}r_k\colon\mathbf{O}\to\mathbf{S}^{\prime}_k. \]
By Proposition~\ref{PropComposeCubes} applied to $\mathbf{S}^{\prime}_1\overset{\mathscr{C}f_1^{\prime}}{\longrightarrow}\mathbf{O}\overset{\mathscr{C}f_2^{\prime}r}{\longrightarrow}\mathbf{S}^{\prime}_2$, we obtain a morphism $\mathbf{Z}_1^{\prime}\colon\mathbf{S}_1^{\prime}\to\mathbf{S}_2^{\prime}$. Similarly for $\mathbf{Z}_2^{\prime}\colon\mathbf{S}_2^{\prime}\to\mathbf{S}_1^{\prime}$.
Then we have morphisms
\[ (\mathbf{I}_1^{\prime},\mathbf{Z}_1^{\prime})\colon\mathbf{S}_1^{\prime}\to\mathbf{S}_1^{\prime}\times\mathbf{S}_2^{\prime}\quad\text{and}\quad(\mathbf{Z}_2^{\prime},\mathbf{I}_2^{\prime})\colon\mathbf{S}_2^{\prime}\to\mathbf{S}_1^{\prime}\times\mathbf{S}_2^{\prime}. \]
By Proposition~\ref{PropComposeCubes} applied to $\mathbf{S}_1\overset{\mathscr{C}f_1}{\longrightarrow}\mathbf{S}^{\prime}_1\overset{(\mathbf{I}_1^{\prime},\mathbf{Z}_1^{\prime})}{\longrightarrow}\mathbf{S}^{\prime}_1\times\mathbf{S}^{\prime}_2$ and $\mathbf{S}_2\overset{\mathscr{C}f_2}{\longrightarrow}\mathbf{S}^{\prime}_2\overset{(\mathbf{Z}_2^{\prime},\mathbf{I}_2^{\prime})}{\longrightarrow}\mathbf{S}^{\prime}_1\times\mathbf{S}^{\prime}_2$ respectively, we obtain morphisms $\mathbf{F}_1\colon\mathbf{S}_1\to\mathbf{S}^{\prime}_1\times\mathbf{S}^{\prime}_2$ and $\mathbf{F}_2\colon\mathbf{S}_2\to\mathbf{S}^{\prime}_1\times\mathbf{S}^{\prime}_2$.
Then we obtain a morphism $\mathbf{F}_1\cup\mathbf{F}_2\colon\mathbf{S}_1\amalg\mathbf{S}_2\to\mathbf{S}^{\prime}_1\times\mathbf{S}^{\prime}_2$ as in $(\ref{MorphUps})$ with the stated property.
\end{proof}
\betagin{cor}\label{CorProdExSeq1}
For any pair of squares $\mathbf{S}_k$ $(k=1,2)$ as in $(\ref{PairOfExSeqForProd})$, we obtain the following morphism $\mathbf{S}_1\amalg\mathbf{S}_2\to\mathbf{S}_1\times\mathbf{S}_2$
\[
\xy
(-12,20)*+{A_1\amalg A_2}="0";
(12,20)*+{B_1\amalg B_2}="2";
(3,10)*+{O_1\amalg O_2}="4";
(27,10)*+{C_1\amalg C_2}="6";
(-12,-2)*+{A_1\times A_2}="10";
(12,-2)*+{B_1\times B_2}="12";
(3,-12)*+{O_1\times O_2}="14";
(27,-12)*+{C_1\times C_2}="16";
{\ar^{x_1\amalg x_2} "0";"2"};
{\ar^(0.6){i_1\amalg i_2} "0";"4"};
{\ar^(0.6){y_1\amalg y_2} "2";"6"};
{\ar_(0.6){j_1\amalg j_2} "4";"6"};
{\ar_{\upsilon_{A_1,A_2}} "0";"10"};
{\ar^(0.75){\upsilon_{B_1,B_2}}|!{(9,10);(13,10)}\hole "2";"12"};
{\ar_(0.25){\upsilon_{O_1,O_2}} "4";"14"};
{\ar^{\upsilon_{C_1,C_2}} "6";"16"};
{\ar^(0.4){x_1\times x_2}|!{(4,0);(4,-4)}\hole "10";"12"};
{\ar_{i_1\times i_2} "10";"14"};
{\ar^{y_1\times y_2} "12";"16"};
{\ar_{j_1\times j_2} "14";"16"};
\endxy
\]
in which, the vertical morphisms are homotopy equivalences as in Remark~\ref{Remv}.
\end{cor}
\betagin{proof}
This follows from Proposition~\ref{PropProdExSeq1} applied to $\mathbf{I}_{\mathbf{S}_1}\colon\mathbf{S}_1\to\mathbf{S}_1$ and $\mathbf{I}_{\mathbf{S}_2}\colon\mathbf{S}_2\to\mathbf{S}_2$.
\end{proof}
\subseteqction{Extriangulated structure on the homotopy category}\label{Section_ExtriangulatedStructure}
By definition, an extriangulated category is an additive category equipped with a structure $(\mathbb{E},\mathfrak{s})$ satisfying some conditions. For the convenience of the reader, we write them down in the case of the homotopy category $h\mathscr{C}$. For the detail, see \circte{NP}.
\betagin{dfn}\label{DefEquivSeq}$($\circte[Definition~2.7]{NP}$)$
For a pair of objects $A,C\in h\mathscr{C}$, two consecutive morphisms $A\overset{\mathfrak{x}}{\longrightarrow}B\overset{\mathfrak{y}}{\longrightarrow}C$ and $A\overset{\mathfrak{x}^{\prime}}{\longrightarrow}B^{\prime}\overset{\mathfrak{y}^{\prime}}{\longrightarrow}C$ are said to be \emph{equivalent} if there is an isomorphism $\mathfrak{b}\in(h\mathscr{C})(B,B^{\prime})$ such that $\mathfrak{b}\circ\mathfrak{x}=\mathfrak{x}^{\prime}$ and $\mathfrak{y}^{\prime}\circ\mathfrak{b}=\mathfrak{y}$.
In the sequel, the equivalence class to which a sequence $A\overset{\mathfrak{x}}{\longrightarrow}B\overset{\mathfrak{y}}{\longrightarrow}C$ belongs will be denoted by $[A\overset{\mathfrak{x}}{\longrightarrow}B\overset{\mathfrak{y}}{\longrightarrow}C]$.
\end{dfn}
\betagin{dfn}\label{DefAddReal}$($\circte[Definitions~2.9 and 2.10]{NP}$)$
Suppose that there is a biadditive functor $\mathbb{E}\colon(h\mathscr{C})^\mathrm{op}\times(h\mathscr{C})\to\mathit{Ab}$ to the category of abelian groups $\mathit{Ab}$. Let $\mathfrak{s}$ be a correspondence which associates an equivalence class $\mathfrak{s}(\deltata)=[A\overset{\mathfrak{x}}{\longrightarrow}B\overset{\mathfrak{y}}{\longrightarrow}C]$ to each element $\delta\in\mathbb{E}(C,A)$. Such $\mathfrak{s}$ is called an \emph{additive realization of $\mathbb{E}$} if it satisfies the following conditions.
\betagin{itemize}
\item[{\rm (i)}] Let $\delta\in\mathbb{E}(C,A)$ and $\delta^{\prime}\in\mathbb{E}(C^{\prime},A^{\prime})$ be any pair of elements, with
\betagin{equation}\label{PairReal}
\mathfrak{s}(\delta)=[A\overset{\mathfrak{x}}{\longrightarrow}B\overset{\mathfrak{y}}{\longrightarrow}C],\ \ \mathfrak{s}(\delta^{\prime})=[A^{\prime}\overset{\mathfrak{x}^{\prime}}{\longrightarrow}B^{\prime}\overset{\mathfrak{y}^{\prime}}{\longrightarrow}C^{\prime}].
\end{equation}
Then, for any pair of morphisms $\mathfrak{a}\in(h\mathscr{C})(A,A^{\prime}),\mathfrak{c}\in(h\mathscr{C})(C,C^{\prime})$ satisfying $\mathbb{E}(C,\mathfrak{a})(\delta)=\mathbb{E}(\mathfrak{c},A^{\prime})(\delta^{\prime})$, there exists a morphism $\mathfrak{b}\in(h\mathscr{C})(B,B^{\prime})$ such that $\mathfrak{b}\circ\mathfrak{x}=\mathfrak{x}^{\prime}\circ\mathfrak{a}$ and $\mathfrak{y}^{\prime}\circ\mathfrak{b}=\mathfrak{c}\circ\mathfrak{y}$.
\item[{\rm (ii)}] For any $A,C\in h\mathscr{C}$, the zero element $0\in\mathbb{E}(C,A)$ satisfies
\[ \mathfrak{s}(0)=[A\overset{\left[\betagin{smallmatrix} 1\\0\end{smallmatrix}\right]}{\longrightarrow}A^\mathrm{op}lus C\overset{[0\ 1]}{\longrightarrow}C]. \]
\item[{\rm (iii)}] For any pair of elements $\delta\in\mathbb{E}(C,A)$ and $\delta^{\prime}\in\mathbb{E}(C^{\prime},A^{\prime})$ with $(\ref{PairReal})$, we have
\[ \mathfrak{s}(\delta^\mathrm{op}lus\delta^{\prime})=[A^\mathrm{op}lus A^{\prime}\overset{\mathfrak{x}^\mathrm{op}lus\mathfrak{x}^{\prime}}{\longrightarrow}B^\mathrm{op}lus B^{\prime}\overset{\mathfrak{y}^\mathrm{op}lus\mathfrak{y}^{\prime}}{\longrightarrow}C^\mathrm{op}lus C^{\prime}]. \]
Here $\delta^\mathrm{op}lus\delta^{\prime}\in\mathbb{E}(C^\mathrm{op}lus C^{\prime},A^\mathrm{op}lus A^{\prime})$ denotes the element which corresponds to $(\deltata,0,0,\deltata^{\prime})$ through the natural isomorphism
\[ \mathbb{E}(C^\mathrm{op}lus C^{\prime},A^\mathrm{op}lus A^{\prime})\colonng\mathbb{E}(C,A)^\mathrm{op}lus\mathbb{E}(C,A^{\prime})^\mathrm{op}lus\mathbb{E}(C^{\prime},A)^\mathrm{op}lus\mathbb{E}(C^{\prime},A^{\prime}) \]
induced from the biadditivity of $\mathbb{E}$.
\end{itemize}
If $\mathfrak{s}$ is an additive realization, any pair $(A\overset{\mathfrak{x}}{\longrightarrow}B\overset{\mathfrak{y}}{\longrightarrow}C,\delta)$ of a sequence $A\overset{\mathfrak{x}}{\longrightarrow}B\overset{\mathfrak{y}}{\longrightarrow}C$ and an element $\delta\in\mathbb{E}(C,A)$ satisfying $\mathfrak{s}(\delta)=[A\overset{\mathfrak{x}}{\longrightarrow}B\overset{\mathfrak{y}}{\longrightarrow}C]$ is called an \emph{$\mathfrak{s}$-triangle} (or simply an \emph{extriangle} if $\mathfrak{s}$ is obvious from the context), and abbreviately expressed by $A\overset{\mathfrak{x}}{\longrightarrow}B\overset{\mathfrak{y}}{\longrightarrow}C\overset{\delta}{\dashrightarrow}$.
\end{dfn}
\betagin{dfn}\label{DefExtriangulation}$($\circte[Definition~2.12]{NP}$)$
A triplet $(h\mathscr{C},\mathbb{E},\mathfrak{s})$ is called an \emph{extriangulated category} if it satisfies the following conditions. In this case, the pair $(\mathbb{E},\mathfrak{s})$ is called an \emph{external triangulation} of $h\mathscr{C}$.
\betagin{itemize}
\item[{\rm (ET1)}] $\mathbb{E}\colon(h\mathscr{C})^\mathrm{op}\times(h\mathscr{C})\to\mathit{Ab}$ is a biadditive functor.
\item[{\rm (ET2)}] $\mathfrak{s}$ is an additive realization of $\mathbb{E}$.
\item[{\rm (ET3)}] Let $\delta\in\mathbb{E}(C,A)$ and $\delta^{\prime}\in\mathbb{E}(C^{\prime},A^{\prime})$ be any pair of elements with $(\ref{PairReal})$.
Then, for any commutative square
\betagin{equation}\label{SquareForET3}
\xy
(-12,6)*+{A}="0";
(0,6)*+{B}="2";
(12,6)*+{C}="4";
(-12,-6)*+{A^{\prime}}="10";
(0,-6)*+{B^{\prime}}="12";
(12,-6)*+{C^{\prime}}="14";
{\ar^{\mathfrak{x}} "0";"2"};
{\ar^{\mathfrak{y}} "2";"4"};
{\ar_{\mathfrak{a}} "0";"10"};
{\ar^{\mathfrak{b}} "2";"12"};
{\ar_{\mathfrak{x}^{\prime}} "10";"12"};
{\ar_{\mathfrak{y}^{\prime}} "12";"14"};
{\ar@{}|{\circrclearrowright} "0";"12"};
\endxy
\end{equation}
in $h\mathscr{C}$, there exists $\mathfrak{c}\in(h\mathscr{C})(C,C^{\prime})$ such that $\mathfrak{c}\circ\mathfrak{y}=\mathfrak{y}^{\prime}\circ\mathfrak{b}$ and $\mathbb{E}(C,\mathfrak{a})(\delta)=\mathbb{E}(\mathfrak{c},A^{\prime})(\delta^{\prime})$.
\item[{\rm (ET3)$^\mathrm{op}$}] Dual of {\rm (ET3)}.
\item[{\rm (ET4)}] Let $\delta\in\mathbb{E}(D,A)$ and $\delta^{\prime}\in\mathbb{E}(F,B)$ be any pair of elements, with
\[ \mathfrak{s}(\delta)=[A\overset{\mathfrak{f}}{\longrightarrow}B\overset{\mathfrak{f}^{\prime}}{\longrightarrow}D]\ \ \text{and}\ \ \mathfrak{s}(\delta^{\prime})=[B\overset{\mathfrak{g}}{\longrightarrow}C\overset{\mathfrak{g}^{\prime}}{\longrightarrow}F]. \]
Then there exist an object $E\in h\mathscr{C}$, a commutative diagram
\betagin{equation}\label{DiagET4}
\xy
(-18,6)*+{A}="0";
(-6,6)*+{B}="2";
(6,6)*+{D}="4";
(-18,-6)*+{A}="10";
(-6,-6)*+{C}="12";
(6,-6)*+{E}="14";
(-6,-18)*+{F}="22";
(6,-18)*+{F}="24";
{\ar^{\mathfrak{f}} "0";"2"};
{\ar^{\mathfrak{f}^{\prime}} "2";"4"};
{\ar@{=} "0";"10"};
{\ar_{\mathfrak{g}} "2";"12"};
{\ar^{\mathfrak{d}} "4";"14"};
{\ar_{\mathfrak{h}} "10";"12"};
{\ar_{\mathfrak{h}^{\prime}} "12";"14"};
{\ar_{\mathfrak{g}^{\prime}} "12";"22"};
{\ar^{\mathfrak{e}} "14";"24"};
{\ar@{=} "22";"24"};
{\ar@{}|{\circrclearrowright} "0";"12"};
{\ar@{}|{\circrclearrowright} "2";"14"};
{\ar@{}|{\circrclearrowright} "12";"24"};
\endxy
\end{equation}
in $h\mathscr{C}$, and an element $\delta^{\prime}r\in\mathbb{E}(E,A)$ such that $\mathfrak{s}(\delta^{\prime}r)=[A\overset{\mathfrak{h}}{\longrightarrow}C\overset{\mathfrak{h}^{\prime}}{\longrightarrow}E]$, which satisfy the following compatibilities.
\betagin{itemize}
\item[{\rm (i)}] $\mathfrak{s}(\mathbb{E}(F,\mathfrak{f}^{\prime})(\delta^{\prime}))=[D\overset{\mathfrak{d}}{\longrightarrow}E\overset{\mathfrak{e}}{\longrightarrow}F]$,
\item[{\rm (ii)}] $\mathbb{E}(\mathfrak{d},A)(\delta^{\prime}r)=\deltata$,
\item[{\rm (iii)}] $\mathbb{E}(E,\mathfrak{f})(\delta^{\prime}r)=\mathbb{E}(\mathfrak{e},B)(\delta^{\prime})$.
\end{itemize}
\item[{\rm (ET4)$^\mathrm{op}$}] Dual of {\rm (ET4)}.
\end{itemize}
\end{dfn}
In the rest of this section, we will show that $h\mathscr{C}$ can be equipped with an external triangulation $(\mathbb{E},\mathfrak{s})$, for any exact quasi-category $\mathscr{C}$.
\subsection{Construction of the functor $\mathbb{E}bb$}
Firstly, in this subsection we construct a functor $\mathbb{E}\colon(h\mathscr{C})^\mathrm{op}\times(h\mathscr{C})\to\mathit{Set}$ to the category $\mathit{Set}$ of sets.
\betagin{dfn}\label{DefEofCA}
Let $A,C\in\mathscr{C}_0$. Define $\mathbb{E}bb(C,A)$ to be the set of equivalence classes of exact sequences $\mathbf{S}$ starting from $A$ and ending in $C$, modulo the equivalence relation $\sim$ defined in Definition~\ref{DefEquivExSeq}.
For each exact sequence ${}_A\mathbf{S}_C$, let $\delta=\undersetderline{\mathbf{S}}$ denote its equivalence class with respect to $\sim$. When we emphasize the end-objects $A$ and $C$, we write ${}_A\delta_C={}_A\undersetderline{\mathbf{S}}_C$.
\end{dfn}
We make use of the notation $a_{\ast}\mathbf{S}$ from \cref{LemExSeqPO}.
\betagin{prop}\label{PropFunctoriality}
Suppose that we have ${}_A\mathbf{S}_C\sim {}_A\mathbf{S}^{\prime}_C$, and let
\[
\xy
(-7,7)*+{A}="0";
(-7,-7)*+{A^{\prime}}="4";
(7,-7)*+{A^{\prime}r}="6";
{\ar_{a_1} "0";"4"};
{\ar^{a_3} "0";"6"};
{\ar_{a_2} "4";"6"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\alpha}} "10";"11"};
\endxy
\]
be any $2$-simplex in $\mathscr{C}$. Then we have $a_{2\ast}a_{1\ast}\mathbf{S}^{\prime}\sim a_{3\ast}\mathbf{S}$.
\end{prop}
\betagin{proof}
By the assumption, there is a morphism ${}_{1_A}\mathscr{C}f_{1_C}\colon\mathbf{S}\to\mathbf{S}^{\prime}$.
By the definition of $a_{1\ast}\mathbf{S}^{\prime}$, $a_{2\ast}a_{1\ast}\mathbf{S}^{\prime}$ and $a_{3\ast}\mathbf{S}$, there are morphisms of exact sequences
\[
{}_{a_1}\mathscr{D}f^{(1)}_{1_C}\colon\mathbf{S}^{\prime}\to a_{1\ast}\mathbf{S}^{\prime},\ \
{}_{a_2}\mathscr{D}f^{(2)}_{1_C}\colon a_{1\ast}\mathbf{S}^{\prime}\to a_{2\ast}a_{1\ast}\mathbf{S}^{\prime},\ \
{}_{a_3}\mathscr{D}f^{(3)}_{1_C}\colon\mathbf{S}\to a_{3\ast}\mathbf{S}
\]
in which $\mathscr{D}f^{(k)}|_{\{0\}\times\mathscr{D}elta^1\times\mathscr{D}elta^1}$ $(k=1,2,3)$ are push-outs.
By Proposition~\ref{PropComposeCubes} applied iteratively to $\mathbf{S}\overset{\mathscr{C}f}{\longrightarrow}\mathbf{S}^{\prime}\overset{\mathscr{D}f^{(1)}}{\longrightarrow}a_{1\ast}\mathbf{S}^{\prime}\overset{\mathscr{D}f^{(2)}}{\longrightarrow}a_{2\ast}a_{1\ast}\mathbf{S}^{\prime}$, we obtain a morphism ${}_{a_3}\mathscr{C}f^{\prime}_{1_C}\colon\mathbf{S}\to a_{2\ast}a_{1\ast}\mathbf{S}^{\prime}$.
Then by Lemma~\ref{LemExSeqPO} {\rm (2)}, we obtain a morphism ${}_{1_{A^{\prime}r}}\mathscr{C}f^{\prime}r_{1_C}\colon a_{3\ast}\mathbf{S}\to a_{2\ast}a_{1\ast}\mathbf{S}^{\prime}$.
This shows $a_{3\ast}\mathbf{S}\sim a_{2\ast}a_{1\ast}\mathbf{S}^{\prime}$.
\end{proof}
\betagin{cor}\label{CorFunctoriality}
We have the following.
\betagin{enumerate}
\item If morphisms $a,a^{\prime}\in\mathscr{C}_1(A,A^{\prime})$ and exact sequences ${}_A\mathbf{S}_C,{}_A\mathbf{S}^{\prime}_C$ satisfy $\overseterline{a}=\overseterline{a^{\prime}}$ and $\mathbf{S}\sim\mathbf{S}^{\prime}$, then $a_{\ast}\mathbf{S}\sim a^{\prime}_{\ast}\mathbf{S}^{\prime}$. Thus for any $\mathfrak{a}=\overseterline{a}\in(h\mathscr{C})(A,A^{\prime})$ and any $C\in\mathscr{C}_0$, the map
\[ \mathfrak{a}_{\ast}\colon\mathbb{E}(C,A)\to\mathbb{E}(C,A^{\prime})\ ;\ \delta=\undersetderline{\mathbf{S}}\mapsto\mathfrak{a}_{\ast}\delta=\undersetderline{a_{\ast}\mathbf{S}} \]
is well-defined.
\item The maps obtained in {\rm (1)} satisfy the following.
\betagin{itemize}
\item[{\rm (i)}] For any $A,C\in\mathscr{C}_0$, we have $(\mathrm{id}_A)_{\ast}=\mathrm{id}_{\mathbb{E}(C,A)}\colon\mathbb{E}(C,A)\to\mathbb{E}(C,A)$.
\item[{\rm (ii)}] For any $\mathfrak{a}\in (h\mathscr{C})(A,A^{\prime}),\mathfrak{a}^{\prime}\in (h\mathscr{C})(A^{\prime},A^{\prime}r)$ and any $C\in\mathscr{C}_0$, we have $(\mathfrak{a}^{\prime}\circ\mathfrak{a})_{\ast}=\mathfrak{a}^{\prime}_{\ast}\circ\mathfrak{a}_{\ast}\colon\mathbb{E}(C,A)\to\mathbb{E}(C,A^{\prime}r)$.
\end{itemize}
\end{enumerate}
\end{cor}
\betagin{proof}
{\rm (1)} and {\rm (ii)} of {\rm (2)} are immediate from Proposition~\ref{PropFunctoriality}. {\rm (i)} of {\rm (2)} is also obvious by the existence of $\mathbf{I}_{\mathbf{S}}$ for any ${}_A\mathbf{S}_C$.
\end{proof}
\betagin{lem}\label{LemET2}
Let ${}_A\mathbf{S}_C,{}_{A^{\prime}}\mathbf{S}^{\prime}_{C^{\prime}}$ be any pair of exact sequences, and put $\delta=\undersetderline{\mathbf{S}},\delta^{\prime}=\undersetderline{\mathbf{S}^{\prime}}$. Let $\mathfrak{a}\in (h\mathscr{C})(A,A^{\prime})$ and $\mathfrak{c}\in (h\mathscr{C})(C,C^{\prime})$ be any pair of morphisms in the homotopy category. The following are equivalent.
\betagin{enumerate}
\item $\mathfrak{a}_{\ast}\delta=\mathfrak{c}^{\ast}\delta^{\prime}$.
\item There exists a morphism ${}_a\mathscr{C}f_c\colon\mathbf{S}\to\mathbf{S}^{\prime}$
such that $\overseterline{a}=\mathfrak{a}$ and $\overseterline{c}=\mathfrak{c}$.
\end{enumerate}
\end{lem}
\betagin{proof}
$(1)\Rightarrow(2)$.
Take any $a\in\mathscr{C}_1(A,A^{\prime})$ and $c\in\mathscr{C}_1(C,C^{\prime})$ such that $\mathfrak{a}=\overseterline{a}$ and $\mathfrak{c}=\overseterline{c}$. By definition, there exist morphisms $\mathbf{S}\overset{{}_a\mathscr{D}f_{1_C}}{\longrightarrow}a_{\ast}\mathbf{S}\overset{{}_{1_{A^{\prime}}}\mathscr{C}f^{(1)}_{1_C}}{\longrightarrow}c^{\ast}\mathbf{S}^{\prime}\overset{{}_{1_{A^{\prime}}}\mathscr{D}f^{\prime}_c}{\longrightarrow}\mathbf{S}^{\prime}$ of exact sequences. By Proposition~\ref{PropComposeCubes}, we obtain a morphism ${}_a\mathscr{C}f_c\colon\mathbf{S}\to\mathbf{S}^{\prime}$ as desired.
$(2)\Rightarrow(1)$. Let ${}_a\mathscr{C}f_c\colon\mathbf{S}\to\mathbf{S}^{\prime}$ be a morphism as in $(\ref{MorphExSeq})$ such that $\overseterline{a}=\mathfrak{a}$, $\overseterline{c}=\mathfrak{c}$. By Lemma~\ref{LemExSeqPO}, we obtain a morphism ${}_{1_{A^{\prime}}}\mathscr{C}f^{\prime}_{c}\colon a_{\ast}\mathbf{S}\to\mathbf{S}^{\prime}$.
Then by the dual of Lemma~\ref{LemExSeqPO} applied to $\mathscr{C}f^{\prime}$ and a pull-back along $c$, we obtain a morphism ${}_{1_{A^{\prime}}}\mathscr{C}f^{\prime}r_{1_C}\colon a_{\ast}\mathbf{S}\to c^{\ast}\mathbf{S}^{\prime}$.
Thus $a_{\ast}\mathbf{S}\sim c^{\ast}\mathbf{S}^{\prime}$ follows.
\end{proof}
\betagin{prop}\label{PropEFtr}
For any $\mathfrak{a}\in(h\mathscr{C})(A,A^{\prime})$ and $\mathfrak{c}\in(h\mathscr{C})(C^{\prime},C)$, we have
\[ \mathfrak{a}_{\ast}\mathfrak{c}^{\ast}=\mathfrak{c}^{\ast}\mathfrak{a}_{\ast}\colon\mathbb{E}(C,A)\to\mathbb{E}(C^{\prime},A^{\prime}). \]
\end{prop}
\betagin{proof}
Take any $a\in\mathscr{C}_1(A,A^{\prime})$ and $c\in\mathscr{C}_1(C^{\prime},C)$ so that $\mathfrak{a}=\overseterline{a}$ and $\mathfrak{c}=\overseterline{c}$ hold. Let ${}_A\mathbf{S}_{C}$ be any exact sequence starting from $A$ and ending in $C$. By Lemma~\ref{LemExSeqPO} {\rm (1)} and its dual, we obtain morphisms of exact sequences
\[ c^{\ast}\mathbf{S}\overset{{}_{1_A}\mathscr{D}f^{\prime}_{c}}{\longrightarrow}\mathbf{S}\overset{{}_{a}\mathscr{D}f_{1_{C}}}{\longrightarrow}a_{\ast}\mathbf{S}. \]
By Proposition~\ref{PropComposeCubes}, we obtain a morphism ${}_{a}\mathscr{C}f^{\prime}r_{c}\colon c^{\ast}\mathbf{S}\to a_{\ast}\mathbf{S}$. Lemma~\ref{LemET2} shows
$\mathfrak{a}_{\ast}(\mathfrak{c}^{\ast}\undersetderline{\mathbf{S}})=\mathfrak{c}^{\ast}(\mathfrak{a}_{\ast}\undersetderline{\mathbf{S}})$.
\end{proof}
The argument so far gives the following functor.
\betagin{dfn}\label{DefEFtr}
Functor $\mathbb{E}bb\colon(h\mathscr{C})^\mathrm{op}\times h\mathscr{C}\to\mathit{Set}$ is defined by the following.
\betagin{itemize}
\item For an object $(C,A)\in(h\mathscr{C})^\mathrm{op}\times(h\mathscr{C})$, the set $\mathbb{E}(C,A)$ is the one defined in Definition~\ref{DefEofCA}.
\item For a morphism $(\mathfrak{c},\mathfrak{a})\in\big((h\mathscr{C})^\mathrm{op}\times(h\mathscr{C})\big)\big((C,A),(C^{\prime},A^{\prime})\big)$, the map
\[ \mathbb{E}(\mathfrak{c},\mathfrak{a})\colon\mathbb{E}(C,A)\to\mathbb{E}(C^{\prime},A^{\prime}) \]
is the one defined by $\mathbb{E}(\mathfrak{c},\mathfrak{a})=\mathfrak{c}^{\ast}\mathfrak{a}_{\ast}=\mathfrak{a}_{\ast}\mathfrak{c}^{\ast}$.
\end{itemize}
\end{dfn}
\subsection{Biadditivity of $\mathbb{E}$}
In this subsection we will show that the functor $\mathbb{E}$ obtained in Definition~\ref{DefEFtr} indeed factors through $\mathit{Ab}$.
\betagin{dfn}\label{DefZeroElement}
By Corollary~\ref{CorSplitEachOther}, the equivalence class of the splitting exact sequences forms one element in $\mathbb{E}(C,A)$ for each $A,C\in h\mathscr{C}$. We denote this particular element by ${}_A0_C$. Especially we have ${}_A0_C={}_A\undersetderline{\mathbf{N}}_C= {}_A\undersetderline{\mathbf{N}^{\prime}}_C$.
\end{dfn}
\betagin{rem}\label{RemZeroElement}
It is not hard to see that $\mathbb{E}bb(O,O)$ consists of only one object for each zero object $O\in\mathscr{C}_0$, which necessarily agrees with ${}_O0_O$. Remark that a morphism $a\in\mathscr{C}_1(A,A^{\prime})$ satisfies $\overseterline{a}=0$ in $h\mathscr{C}$ if and only if it factors through some (equivalently, any) zero object. For any $\delta\in\mathbb{E}bb(C,A)$ and any $a\in\mathscr{C}_1(A,A^{\prime})$ such that $\overseterline{a}=0$, we have $a_{\ast}\delta={}_{A^{\prime}}0_C$ by Lemma~\ref{LemInvert}. Similarly we have $0^{\ast}\delta={}_A0_{C^{\prime}}$ for the zero morphism $0\in (h\mathscr{C})(C^{\prime},C)$.
\end{rem}
\betagin{prop}\label{PropSumExtension}
The following holds.
\betagin{enumerate}
\item For any $A,A^{\prime}\in\mathscr{C}_0$, there is a natural isomorphism
\[ (\upsilon_{A,A^{\prime}})_{\ast}\colon\mathbb{E}bb(-,A\amalg A^{\prime})\overset{\colonng}{\Longrightarrow}\mathbb{E}bb(-,A\times A^{\prime}) \]
of functors $(h\mathscr{C})^\mathrm{op}\to\mathit{Set}$.
\item For any $C,C^{\prime}\in\mathscr{C}_0$, there is a natural isomorphism
\[ (\upsilon_{C,C^{\prime}})^{\ast}\colon\mathbb{E}bb(C\times C^{\prime},-)\overset{\colonng}{\Longrightarrow}\mathbb{E}bb(C\amalg C^{\prime},-) \]
of functors $h\mathscr{C}\to\mathit{Set}$.
\item For any $A,A^{\prime},C,C^{\prime}\in\mathscr{C}_0$, there is an isomorphism
\[ \vartheta=\vartheta_{A^{\prime},C^{\prime}}^{A,C}\colon\mathbb{E}bb(C\amalg C^{\prime},A\amalg A^{\prime})\overset{\colonng}{\Longrightarrow}\mathbb{E}bb(C\times C^{\prime},A\times A^{\prime}) \]
given by $\vartheta_{A^{\prime},C^{\prime}}^{A,C}=((\upsilon_{C,C^{\prime}})^{\ast})^{-1}\circ(\upsilon_{A,A^{\prime}})_{\ast}$. This sends $\delta\amalg\delta^{\prime}=\undersetderline{\mathbf{S}\amalg\mathbf{S}^{\prime}}$ to $\delta\times\delta^{\prime}=\undersetderline{\mathbf{S}\times\mathbf{S}^{\prime}}$ for any $\delta=\undersetderline{\mathbf{S}}\in\mathbb{E}bb(C,A)$ and $\delta^{\prime}=\undersetderline{\mathbf{S}^{\prime}}\in\mathbb{E}bb(C^{\prime},A^{\prime})$.
\end{enumerate}
\end{prop}
\betagin{proof}
Since $\overseterline{\upsilon_{A,A^{\prime}}}$ is an isomorphism in $h\mathscr{C}$, {\rm (1)} is a formal consequence of the functoriality of $\mathbb{E}bb$. Dually for {\rm (2)}.
The former part of {\rm (3)} is immediate from {\rm (1)} and {\rm (2)}. The latter part follows from Corollary~\ref{CorProdExSeq1}, since it implies $\undersetderline{(\upsilon_{A,A^{\prime}})_{\ast}(\mathbf{S}\amalg\mathbf{S}^{\prime})}=\undersetderline{(\upsilon_{C,C^{\prime}})^{\ast}(\mathbf{S}\times\mathbf{S}^{\prime})}$ by Lemma~\ref{LemET2}.
\end{proof}
\betagin{rem}\label{RemIdentify}
In the rest we will identify $A^\mathrm{op}lus A^{\prime}=A\amalg A^{\prime}$ with $A\times A^{\prime}$ through the isomorphism $\overseterline{\upsilon_{A,A^{\prime}}}\colon A^\mathrm{op}lus A^{\prime}\overset{\colonng}{\longrightarrow}A\times A^{\prime}$ in $h\mathscr{C}$.
Also we put $\delta^\mathrm{op}lus\delta^{\prime}=\delta\amalg\delta^{\prime}\in\mathbb{E}bb(C^\mathrm{op}lus C^{\prime},A^\mathrm{op}lus A^{\prime})$. By the above identification and Proposition~\ref{PropSumExtension} {\rm (3)}, we will often identify $\delta^\mathrm{op}lus\delta^{\prime}$ with $\delta\times \delta^{\prime}=\vartheta(\delta^\mathrm{op}lus\delta^{\prime})$.
This agrees with the one in {\rm (iii)} of Definition~\ref{DefAddReal} via the above identification.
\end{rem}
\betagin{lem}\label{LemForAddExtension}
We have the following.
\betagin{enumerate}
\item For any $\delta\in\mathbb{E}bb(C,A)$, we have
\betagin{equation}
(\nabla_A)_{\ast}(\delta^\mathrm{op}lus\delta)=(\nabla_C)^{\ast}\delta
\quad\text{and}\quad
(\mathscr{D}elta_A)_{\ast}\delta=(\mathscr{D}elta_C)^{\ast}(\delta^\mathrm{op}lus\delta).
\end{equation}
\item For any $\delta_k\in\mathbb{E}bb(C_k,A_k),\mathfrak{a}_k\in(h\mathscr{C})(A_k,A_k^{\prime}),\mathfrak{c}_k\in(h\mathscr{C})(C_k,C_k^{\prime})$ $(k=1,2)$, we have
\[ (\mathfrak{a}_1^\mathrm{op}lus \mathfrak{a}_2)_{\ast}(\delta_1^\mathrm{op}lus\delta_2)=\mathfrak{a}_{1\ast}\delta_1^\mathrm{op}lus \mathfrak{a}_{2\ast}\delta_2\ \ \text{and}\ \
(\mathfrak{c}_1^\mathrm{op}lus \mathfrak{c}_2)^{\ast}(\delta_1^\mathrm{op}lus\delta_2)=\mathfrak{c}_1^{\ast}\delta_1^\mathrm{op}lus \mathfrak{c}_2^{\ast}\delta_2.
\]
\end{enumerate}
\end{lem}
\betagin{proof}
{\rm (1)}
This follows from Lemma~\ref{LemET2} applied to the morphisms in Example~\ref{ExProdExSeq}.
{\rm (2)}
Take representatives $\delta_k=\undersetderline{\mathbf{S}_k}$, $\mathfrak{a}_k=\overseterline{a_k}$ $(k=1,2)$. By the definition of $a_{k\ast}\mathbf{S}_k$, there are morphisms of exact sequences
\[ {}_{a_k}(\mathscr{D}f_k)_{1_{C_k}}\colon {}_{A_k}(\mathbf{S}_k)_{C_k}\to {}_{A_k^{\prime}}(a_{k\ast}\mathbf{S}_k)_{C_k} \]
for $k=1,2$. By Proposition~\ref{PropProdExSeq1}, we have a morphism ${}_{\upsilon}\mathscr{C}f_{\upsilon^{\prime}}\colon\mathbf{S}_1\amalg\mathbf{S}_2\to\mathbf{S}_1\times\mathbf{S}_2$ in which, under the identifications in Remark~\ref{RemIdentify}, the morphisms $\upsilon\in\mathscr{C}_1(A_1\amalg A_2,A_1^{\prime}\times A_2^{\prime})$ and $\upsilon^{\prime}\in\mathscr{C}_1(C_1\amalg C_2,C_1\times C_2)$ induce
\betagin{eqnarray*}
\overseterline{\upsilon}=\overseterline{a_1}^\mathrm{op}lus\overseterline{a_2}=\mathfrak{a}_1^\mathrm{op}lus\mathfrak{a}_2&\in&(h\mathscr{C})(A_1^\mathrm{op}lus A_2,A_1^{\prime}^\mathrm{op}lus A_2^{\prime}),\\
\overseterline{\upsilon^{\prime}}=\overseterline{1_{C_1}}^\mathrm{op}lus\overseterline{1_{C_2}}=\mathrm{id}_{C_1^\mathrm{op}lus C_2}&\in&(h\mathscr{C})(C_1^\mathrm{op}lus C_2,C_1^\mathrm{op}lus C_2)
\end{eqnarray*}
in the homotopy category, thus implies
\[
(\mathfrak{a}_1^\mathrm{op}lus\mathfrak{a}_2)_{\ast}(\delta_1^\mathrm{op}lus\delta_2)
=\undersetderline{\upsilon_{\ast}(\mathbf{S}_1\amalg\mathbf{S}_2)}
=\undersetderline{\upsilon^{\prime\ast}(a_{1\ast}\mathbf{S}_1\times a_{2\ast}\mathbf{S}_2)}
=\mathfrak{a}_{1\ast}\delta_1^\mathrm{op}lus\mathfrak{a}_{2\ast}\delta_2
\]
by Lemma~\ref{LemET2}. Similarly for $(\mathfrak{c}_1^\mathrm{op}lus\mathfrak{c}_2)^{\ast}(\delta_1^\mathrm{op}lus\delta_2)=\mathfrak{c}_1^{\ast}\delta_1^\mathrm{op}lus\mathfrak{c}_2^{\ast}\delta_2$.
\end{proof}
\betagin{dfn}\label{DefAddExtension}
For any $\delta_1,\delta_2\in\mathbb{E}bb(C,A)$, define $\delta_1+\delta_2\in\mathbb{E}bb(C,A)$ by
\[ \delta_1+\delta_2=(\nabla_A)_{\ast}(\mathscr{D}elta_C)^{\ast}(\delta_1^\mathrm{op}lus\delta_2). \]
Obviously this operation is commutative.
\end{dfn}
\betagin{prop}\label{PropAddExtMor}
Let $\delta\in\mathbb{E}bb(C,A)$ be any element.
\betagin{enumerate}
\item For any pair of endomorphisms $\mathfrak{a},\mathfrak{a}^{\prime}\in\mathbb{E}nd_{h\mathscr{C}}(A)$ of $A$ in $h\mathscr{C}$, we have $(\mathfrak{a}+\mathfrak{a}^{\prime})_{\ast}\delta=\mathfrak{a}_{\ast}\delta+\mathfrak{a}^{\prime}_{\ast}\delta$.
\item For any pair of endomorphisms $\mathfrak{c},\mathfrak{c}^{\prime}\in\mathbb{E}nd_{h\mathscr{C}}(C)$ of $C$ in $h\mathscr{C}$, we have $(\mathfrak{c}+\mathfrak{c}^{\prime})^{\ast}\delta=\mathfrak{c}^{\ast}\delta+\mathfrak{c}^{\prime\ast}\delta$.
\end{enumerate}
\end{prop}
\betagin{proof}
{\rm (1)} By Lemma~\ref{LemForAddExtension}, we have
\betagin{eqnarray*}
\mathfrak{a}_{\ast}\delta+\mathfrak{a}^{\prime}_{\ast}\delta&=&(\nabla_A)_{\ast}(\mathscr{D}elta_C)^{\ast}(\mathfrak{a}_{\ast}\delta^\mathrm{op}lus\mathfrak{a}^{\prime}_{\ast}\delta)
\ =\ (\nabla_A)_{\ast}(\mathscr{D}elta_C)^{\ast}(\mathfrak{a}^\mathrm{op}lus\mathfrak{a}^{\prime})_{\ast}(\delta^\mathrm{op}lus\delta)\\
&=&(\nabla_A)_{\ast}(\mathfrak{a}^\mathrm{op}lus\mathfrak{a}^{\prime})_{\ast}(\mathscr{D}elta_C)^{\ast}(\delta^\mathrm{op}lus\delta)
\ =\ (\nabla_A)_{\ast}(\mathfrak{a}^\mathrm{op}lus\mathfrak{a}^{\prime})_{\ast}(\mathscr{D}elta_A)_{\ast}\delta\\
&=&(\nabla_A\circrc (\mathfrak{a}^\mathrm{op}lus\mathfrak{a}^{\prime})\circrc\mathscr{D}elta_A)_{\ast}\delta\ =\ (\mathfrak{a}+\mathfrak{a}^{\prime})_{\ast}\delta.
\end{eqnarray*}
Similarly for {\rm (2)}.
\end{proof}
In the following corollary, remark that $h\mathscr{C}$ is an additive category by definition, hence there are $-\mathrm{id}_A\in (h\mathscr{C})(A,A)$ and $-\mathrm{id}_C\in (h\mathscr{C})(C,C)$.
\betagin{cor}\label{CorAddExtMor}
For any $\delta\in\mathbb{E}bb(C,A)$, the following holds in $\mathbb{E}bb(C,A)$.
\betagin{enumerate}
\item $\delta+{}_A0_C=\delta$.
\item $\delta+(-\mathrm{id}_C)^{\ast}\delta=\delta+(-\mathrm{id}_A)_{\ast}\delta={}_A0_C$.
\end{enumerate}
In particular it follows that $(-\mathrm{id}_C)^{\ast}\delta=(-\mathrm{id}_A)_{\ast}\delta$ gives the additive inverse of $\delta$.
\end{cor}
\betagin{proof}
This follows from Remark~\ref{RemZeroElement} and Proposition~\ref{PropAddExtMor}.
\end{proof}
\betagin{prop}\label{PropEAddFtr}
The following holds.
\betagin{enumerate}
\item For any $A_1,A_2,C\in\mathscr{C}_0$,
\betagin{equation}\label{CanBij}
([1\ 0]_{\ast},[0\ 1]_{\ast})\colon\mathbb{E}bb(C,A_1^\mathrm{op}lus A_2)\to\mathbb{E}bb(C,A_1)\times\mathbb{E}bb(C,A_2)
\end{equation}
is bijective.
\item For any $A,C_1,C_2\in\mathscr{C}_0$,
\[ (\spmatrix{1}{0}^{\ast},\spmatrix{0}{1}^{\ast})\colon\mathbb{E}bb(C_1^\mathrm{op}lus C_2,A)\to\mathbb{E}bb(C_1,A)\times\mathbb{E}bb(C_2,A) \]
is bijective.
\end{enumerate}
\end{prop}
\betagin{proof}
{\rm (1)} Let us denote the map $(\ref{CanBij})$ by $F$ for simplicity. It suffices to show that
\[ G\colon\mathbb{E}bb(C,A_1)\times\mathbb{E}bb(C,A_2)\to\mathbb{E}bb(C,A_1^\mathrm{op}lus A_2)\ ;\ (\delta_1,\delta_2)\mapsto \spmatrix{1}{0}_{\ast}\delta_1+\spmatrix{0}{1}_{\ast}\delta_2 \]
gives its inverse. $G\circ F=\mathrm{id}$ follows from Proposition~\ref{PropAddExtMor}, since we have
\[ \spmatrix{1}{0}_{\ast}[1\ 0]_{\ast}\delta+\spmatrix{0}{1}_{\ast}[0\ 1]_{\ast}\delta=(\spmatrix{1}{0}[1\ 0]+\spmatrix{0}{1}[0\ 1])_{\ast}\delta=(\mathrm{id}_{A_1^\mathrm{op}lus A_2})_{\ast}\delta=\delta \]
for any $\delta\in\mathbb{E}bb(C,A_1^\mathrm{op}lus A_2)$.
For any $(\delta_1,\delta_2)\in\mathbb{E}bb(C,A_1)\times\mathbb{E}bb(C,A_2)$, we have
\[ [1\ 0]_{\ast}(\delta_1^\mathrm{op}lus\delta_2)=[1\ 0]^{\ast}\delta_1 \]
by Proposition~\ref{PropProdExSeq1} applied to $\mathbf{I}\colon\mathbf{S}_1\to\mathbf{S}_1$ and $\mathbf{S}_2\to\mathbf{O}$. Thus by Lemma~\ref{LemForAddExtension}, we obtain
\betagin{eqnarray*}
[1\ 0]_{\ast}(\spmatrix{1}{0}_{\ast}\delta_1+\spmatrix{0}{1}_{\ast}\delta_2)
&=&[1\ 0]_{\ast}(\nabla_{A_1^\mathrm{op}lus A_2})_{\ast}(\mathscr{D}elta_C)^{\ast}(\spmatrix{1}{0}_{\ast}\delta_1^\mathrm{op}lus\spmatrix{0}{1}_{\ast}\delta_2)\\
&=&[1\ 0]_{\ast}(\nabla_{A_1^\mathrm{op}lus A_2})_{\ast}(\mathscr{D}elta_C)^{\ast}(\spmatrix{1}{0}^\mathrm{op}lus\spmatrix{0}{1})_{\ast}(\delta_1^\mathrm{op}lus\delta_2)\\
&=&(\mathscr{D}elta_C)^{\ast}\Big([1\ 0]\circ\nabla_{A_1^\mathrm{op}lus A_2}\circ(\spmatrix{1}{0}^\mathrm{op}lus\spmatrix{0}{1})\Big)_{\ast}(\delta_1^\mathrm{op}lus\delta_2)\\
&=&(\mathscr{D}elta_C)^{\ast}[1\ 0]_{\ast}(\delta_1^\mathrm{op}lus\delta_2)
\ = (\mathscr{D}elta_C)^{\ast}[1\ 0]^{\ast}\delta_1\ =\ \delta_1.
\end{eqnarray*}
Similarly we have $[0\ 1]_{\ast}(\spmatrix{1}{0}_{\ast}\delta_1+\spmatrix{0}{1}_{\ast}\delta_2)=\delta_2$, and thus $F\circ G((\delta_1,\delta_2))=(\delta_1,\delta_2)$.
{\rm (2)} can be shown dually.
\end{proof}
\betagin{cor}\label{CorEAddFtr}
$\mathbb{E}bb(C,A)$ has a natural structure of additive group, whose addition is given by Definition~\ref{DefAddExtension}, with the zero element ${}_A0_C$.
Moreover,
\[ \mathbb{E}bb\colon(h\mathscr{C})^\mathrm{op}\times h\mathscr{C}\to\mathit{Ab} \]
becomes a biadditive functor.
\end{cor}
\betagin{proof}
This is a formal consequence of the functoriality of $\mathbb{E}bb\colon(h\mathscr{C})^\mathrm{op}\times h\mathscr{C}\to\mathit{Set}$, Corollary~\ref{CorAddExtMor} and Proposition~\ref{PropEAddFtr}.
\end{proof}
\subsection{External triangulation of $h\mathscr{C}$}
\betagin{dfn}\label{DefReal}
Let $A,C\in\mathscr{C}_0$ be any pair of objects. For each $\delta\in\mathbb{E}(C,A)$, take any
\[
\mathbf{S}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{O}="4";
(7,-7)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
\endxy
\]
such that $\delta=\undersetderline{\mathbf{S}}$, and put $\mathfrak{s}(\delta)=[A\overset{\overseterline{x}}{\longrightarrow}B\overset{\overseterline{y}}{\longrightarrow}C]$, where the equivalence class of the sequences is the one in Definition~\ref{DefEquivSeq}. Well-definedness follows from Proposition~\ref{PropInvert}.
\end{dfn}
\betagin{lem}\label{LemET}
$\mathfrak{s}$ is an additive realization of $\mathbb{E}bb$.
\end{lem}
\betagin{proof}
Let us confirm the conditions {\rm (i),(ii),(iii)} in Definition~\ref{DefAddReal}.
{\rm (i)} This follows from Lemma~\ref{LemET2}.
{\rm (ii)}
Let $A,C\in\mathscr{C}_0$ be any pair of objects. Since ${}_A0_C={}_A\undersetderline{\mathbf{N}^{\prime}}_C$
, we have
\[ \mathfrak{s}({}_A0_C)=[A\overset{\overseterline{i}_A}{\longrightarrow}A\amalg C\overset{\overseterline{j}_C}{\longrightarrow}C]=[A\overset{\left[\betagin{smallmatrix} 1\\0\end{smallmatrix}\right]}{\longrightarrow}A\amalg C\overset{[0\ 1]}{\longrightarrow}C]. \]
{\rm (iii)}
Let $\delta=\undersetderline{\mathbf{S}}\in\mathbb{E}bb(C,A),\delta^{\prime}=\undersetderline{\mathbf{S}^{\prime}}\in\mathbb{E}bb(C^{\prime},A^{\prime})$ be any pair of elements, with
\[
\mathbf{S}=\
\xy
(-6,6)*+{A}="0";
(6,6)*+{B}="2";
(-6,-6)*+{O}="4";
(6,-6)*+{C}="6";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar_{j} "4";"6"};
\endxy
\quad
\text{and}
\quad
\mathbf{S}^{\prime}=\
\xy
(-6,6)*+{A^{\prime}}="0";
(6,6)*+{B^{\prime}}="2";
(-6,-6)*+{O^{\prime}}="4";
(6,-6)*+{C^{\prime}}="6";
{\ar^{x^{\prime}} "0";"2"};
{\ar_{i^{\prime}} "0";"4"};
{\ar^{y^{\prime}} "2";"6"};
{\ar_{j^{\prime}} "4";"6"};
\endxy.
\]
By definition we have
\[ \mathfrak{s}(\delta)=[A\overset{\mathfrak{x}}{\longrightarrow}B\overset{\mathfrak{y}}{\longrightarrow}C],\ \ \mathfrak{s}(\delta^{\prime})=[A^{\prime}\overset{\mathfrak{x}^{\prime}}{\longrightarrow}B^{\prime}\overset{\mathfrak{y}^{\prime}}{\longrightarrow}C^{\prime}], \]
and $\delta^\mathrm{op}lus\delta^{\prime}=\undersetderline{\mathbf{S}\amalg\mathbf{S}^{\prime}}$ as in Remark~\ref{RemIdentify}.
It follows
\[ \mathfrak{s}(\delta^\mathrm{op}lus\delta^{\prime})=[A\amalg A^{\prime}\overset{\overseterline{x\amalg x^{\prime}}}{\longrightarrow}B\amalg B^{\prime}\overset{\overseterline{y\amalg y^{\prime}}}{\longrightarrow}C\amalg C^{\prime}]=[A^\mathrm{op}lus A^{\prime}\overset{\overseterline{x}^\mathrm{op}lus\overseterline{x^{\prime}}}{\longrightarrow}B^\mathrm{op}lus B^{\prime}\overset{\overseterline{y}^\mathrm{op}lus\overseterline{y^{\prime}}}{\longrightarrow}C^\mathrm{op}lus C^{\prime} ]. \]
\end{proof}
\betagin{thm}\label{ThmET}
$(h\mathscr{C},\mathbb{E}bb,\mathfrak{s})$ is an extriangulated category.
\end{thm}
\betagin{proof}
{\rm (ET1)} and {\rm (ET2)} are shown in Corollary~\ref{CorEAddFtr} and Lemma~\ref{LemET}, respectively.
{\rm (ET3)} follows from Proposition~\ref{PropMorphExSeq} and Lemma~\ref{LemET2}. Dually for {\rm (ET3)$^\mathrm{op}$}. It remains to show {\rm (ET4)} and {\rm (ET4)$^\mathrm{op}$}.
Since {\rm (ET4)$^\mathrm{op}$} can be shown dually, it is enough to show {\rm (ET4)}.
Let $\delta=\undersetderline{\mathbf{S}}\in\mathbb{E}(D,A)$ and $\delta^{\prime}=\undersetderline{\mathbf{S}^{\prime}}\in\mathbb{E}(F,B)$ be any pair of elements given by the following exact sequences.
\[
\mathbf{S}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{O}="4";
(7,-7)*+{D}="6";
{\ar^{f} "0";"2"};
{\ar_{i} "0";"4"};
{\ar|*+{_z} "0";"6"};
{\ar^{f^{\prime}} "2";"6"};
{\ar_{j} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\xi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\eta}} "10";"11"};
\endxy,\quad
\mathbf{S}^{\prime}=\
\xy
(-7,7)*+{B}="0";
(7,7)*+{C}="2";
(-7,-7)*+{O^{\prime}}="4";
(7,-7)*+{F}="6";
{\ar^{g} "0";"2"};
{\ar_{i^{\prime}} "0";"4"};
{\ar|*+{_{z^{\prime}}} "0";"6"};
{\ar^{g^{\prime}} "2";"6"};
{\ar_{j^{\prime}} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_(0.4){^{\xi^{\prime}}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_(0.4){_{\eta^{\prime}}} "10";"11"};
\endxy
\]
By definition, we have $\mathfrak{s}(\delta)=[A\overset{\overseterline{f}}{\longrightarrow}B\overset{\overseterline{f^{\prime}}}{\longrightarrow}D]$ and $\mathfrak{s}(\delta^{\prime})=[B\overset{\overseterline{g}}{\longrightarrow}C\overset{\overseterline{g^{\prime}}}{\longrightarrow}F]$.
Since $f,g$ are ingressive, we may take a $2$-simplex
\[
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="4";
(7,-7)*+{C}="6";
{\ar^{f} "0";"4"};
{\ar_{h} "0";"6"};
{\ar^{g} "4";"6"};
(3,7)*+{}="10";
(7,3)*+{}="11";
{\ar@/_0.2pc/@{-}_{_{\chi}} "10";"11"};
\endxy
\]
in which $h$ is ingressive by the definition of exact quasi-category.
As in \circte[Remark~3.2]{B1}, $h$ should appear as the fiber of some exact sequence as below.
\[
\mathbf{S}^{\prime}r=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{C}="2";
(-7,-7)*+{O^{\prime}r}="4";
(7,-7)*+{E}="6";
{\ar^{h} "0";"2"};
{\ar_{i^{\prime}r} "0";"4"};
{\ar|*+{_{z^{\prime}r}} "0";"6"};
{\ar^{h^{\prime}} "2";"6"};
{\ar_{j^{\prime}r} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_(0.3){^{\xi^{\prime}r}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_(0.3){_{\eta^{\prime}r}} "10";"11"};
\endxy
\]
Put $\delta^{\prime}r=\undersetderline{\mathbf{S}^{\prime}r}$. We have $\mathfrak{s}(\delta^{\prime}r)=[A\overset{\overseterline{h}}{\longrightarrow}C\overset{\overseterline{h^{\prime}}}{\longrightarrow}E]$ by definition.
Applying Proposition~\ref{PropMorphExSeq} to $\mathbf{S},\mathbf{S}^{\prime}r$ and
\[
\mathbf{T}=\SQ{s_0(h)}{\chi}=\
\xy
(-7,7)*+{A}="0";
(7,7)*+{B}="2";
(-7,-7)*+{A}="4";
(7,-7)*+{C}="6";
{\ar^{f} "0";"2"};
{\ar_{1_A} "0";"4"};
{\ar|*+{_h} "0";"6"};
{\ar^{g} "2";"6"};
{\ar_{h} "4";"6"};
(3,7)*+{}="00";
(7,3)*+{}="01";
{\ar@/_0.2pc/@{-}_{^{\chi}} "00";"01"};
(-3,-7)*+{}="10";
(-7,-3)*+{}="11";
{\ar@/_0.2pc/@{-}_(0.3){_{s_0(h)}} "10";"11"};
\endxy
\]
we obtain a morphism of the form
\[
{}_{1_A}\mathscr{C}f_d=\mathscr{C}B{s_0(\xi^{\prime}r)}{\mathcal{Y}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{f}}}{s_0(\eta^{\prime}r)}{\mathcal{Y}_{\mathrm{b}}}{\mathcal{Z}_{\mathrm{b}}}=\
\xy
(-8,9)*+{A}="0";
(8,9)*+{B}="2";
(1,3)*+{O}="4";
(17,3)*+{D}="6";
(-8,-7)*+{A}="10";
(8,-7)*+{C}="12";
(1,-13)*+{O^{\prime}r}="14";
(17,-13)*+{E}="16";
{\ar^{f} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{f^{\prime}} "2";"6"};
{\ar_(0.3){j} "4";"6"};
{\ar_{1_A} "0";"10"};
{\ar^(0.7){g}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){o} "4";"14"};
{\ar^{d} "6";"16"};
{\ar^(0.3){h}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i^{\prime}r} "10";"14"};
{\ar^{h^{\prime}} "12";"16"};
{\ar_{j^{\prime}r} "14";"16"};
\endxy\ \ \colon\mathbf{S}\to\mathbf{S}^{\prime}r.
\]
By the dual of Lemma~\ref{LemForSym} {\rm (1)}, the square
\[
\mathbf{P}=\big(\mathscr{C}f|_{\mathscr{D}elta^1\times\{1\}\times\mathscr{D}elta^1}\big)^t=\
\xy
(-7,7)*+{B}="0";
(7,7)*+{C}="2";
(-7,-7)*+{D}="4";
(7,-7)*+{E}="6";
{\ar^{g} "0";"2"};
{\ar_{f^{\prime}} "0";"4"};
{\ar^{h^{\prime}} "2";"6"};
{\ar_{d} "4";"6"};
\endxy
\]
is a push-out. By Lemma~\ref{LemExSeqPO}, using this push-out we obtain a morphism of exact sequences
\[
{}_{f^{\prime}}\mathscr{D}f_{1_F}=\mathscr{C}B{\mathcal{X}_{\mathrm{f}}^{\prime}}{\mathcal{Y}_{\mathrm{f}}^{\prime}}{s_2(\xi^{\prime})}{\mathcal{X}_{\mathrm{b}}^{\prime}}{s_1(\eta^{\prime})}{s_2(\eta^{\prime})}=\
\xy
(-8,9)*+{B}="0";
(8,9)*+{C}="2";
(1,3)*+{O^{\prime}}="4";
(17,3)*+{F}="6";
(-8,-7)*+{D}="10";
(8,-7)*+{E}="12";
(1,-13)*+{O^{\prime}}="14";
(17,-13)*+{F}="16";
{\ar^{g} "0";"2"};
{\ar_{i^{\prime}} "0";"4"};
{\ar^{g^{\prime}} "2";"6"};
{\ar_(0.3){j^{\prime}} "4";"6"};
{\ar_{f^{\prime}} "0";"10"};
{\ar^(0.7){h^{\prime}}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){1_{O^{\prime}}} "4";"14"};
{\ar^{1_F} "6";"16"};
{\ar^(0.3){d}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i_0} "10";"14"};
{\ar^{e} "12";"16"};
{\ar_{j^{\prime}} "14";"16"};
\endxy\ \ \colon\mathbf{S}^{\prime}\to f^{\prime}_{\ast}\mathbf{S}^{\prime}.
\]
By the construction so far, we have a commutative diagram in $h\mathscr{C}$
\[
\xy
(-21,14)*+{A}="0";
(-7,14)*+{B}="2";
(7,14)*+{D}="4";
(21,14)*+{}="6";
(-21,0)*+{A}="10";
(-7,0)*+{C}="12";
(7,0)*+{E}="14";
(21,0)*+{}="16";
(-7,-14)*+{F}="22";
(7,-14)*+{F}="24";
{\ar^{\overseterline{f}} "0";"2"};
{\ar^{\overseterline{f^{\prime}}} "2";"4"};
{\ar@{=} "0";"10"};
{\ar_{\overseterline{g}} "2";"12"};
{\ar^{\overseterline{d}} "4";"14"};
{\ar_{\overseterline{h}} "10";"12"};
{\ar_{\overseterline{h^{\prime}}} "12";"14"};
{\ar_{\overseterline{g^{\prime}}} "12";"22"};
{\ar^{\overseterline{e}} "14";"24"};
{\ar@{=} "22";"24"};
{\ar@{}|\circrclearrowright "0";"12"};
{\ar@{}|\circrclearrowright "2";"14"};
{\ar@{}|\circrclearrowright "12";"24"};
\endxy
\]
satisfying $\mathfrak{s}(\overseterline{f^{\prime}}_{\ast}\delta^{\prime})=[D\overset{\overseterline{d}}{\longrightarrow}E\overset{\overseterline{e}}{\longrightarrow}F]$.
Besides, the existence of $\mathscr{C}f$ shows $\delta=\overseterline{d}^{\ast}\delta^{\prime}r$ by Lemma~\ref{LemET2}.
It remains to show $\overseterline{f}_{\ast}\delta^{\prime}r=\overseterline{e}^{\ast}\delta^{\prime}$. By Lemma~\ref{LemET2}, it suffices to show the existence of a morphism ${}_{f}\mathscr{C}f^{\prime}_{e}\colon\mathbf{S}^{\prime}r\to\mathbf{S}^{\prime}$. Remark that we have cubes
\betagin{eqnarray*}
&\mathscr{C}B{\mathcal{Y}_{\mathrm{f}}}{s_0(\xi^{\prime}r)}{s_0(\eta^{\prime}r)}{\mathcal{Z}_{\mathrm{f}}}{\mathcal{Z}_{\mathrm{b}}}{\mathcal{Y}_{\mathrm{b}}}=\
\xy
(-8,9)*+{A}="0";
(8,9)*+{A}="2";
(1,3)*+{O}="4";
(17,3)*+{O^{\prime}r}="6";
(-8,-7)*+{B}="10";
(8,-7)*+{C}="12";
(1,-13)*+{D}="14";
(17,-13)*+{E}="16";
{\ar^{1_A} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{i^{\prime}r} "2";"6"};
{\ar_(0.3){o} "4";"6"};
{\ar_{f} "0";"10"};
{\ar^(0.7){h}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){j} "4";"14"};
{\ar^{j^{\prime}r} "6";"16"};
{\ar^(0.3){g}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{f^{\prime}} "10";"14"};
{\ar^{h^{\prime}} "12";"16"};
{\ar_{d} "14";"16"};
\endxy&,\\
&\mathscr{C}B{s_2(\eta^{\prime})}{s_2(\xi^{\prime})}{\mathcal{Y}_{\mathrm{f}}^{\prime}}{s_1(\eta^{\prime})}{\mathcal{X}_{\mathrm{b}}^{\prime}}{\mathcal{X}_{\mathrm{f}}^{\prime}}=\
\xy
(-8,9)*+{B}="0";
(8,9)*+{C}="2";
(1,3)*+{D}="4";
(17,3)*+{E}="6";
(-8,-7)*+{O^{\prime}}="10";
(8,-7)*+{F}="12";
(1,-13)*+{O^{\prime}}="14";
(17,-13)*+{F}="16";
{\ar^{g} "0";"2"};
{\ar_{f^{\prime}} "0";"4"};
{\ar^{h^{\prime}} "2";"6"};
{\ar_(0.3){d} "4";"6"};
{\ar_{i^{\prime}} "0";"10"};
{\ar^(0.7){g^{\prime}}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar_(0.3){i_0} "4";"14"};
{\ar^{e} "6";"16"};
{\ar^(0.3){j^{\prime}}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{1_{O^{\prime}}} "10";"14"};
{\ar^{1_F} "12";"16"};
{\ar_{j^{\prime}} "14";"16"};
\endxy&
\end{eqnarray*}
as transpositions of $\mathscr{C}f$ and $\mathscr{D}f$.
As in Proposition~\ref{PropComposeCubes}, we obtain a map $\mathscr{D}elta^1\times\mathscr{D}elta^1\times\mathscr{D}elta^2\to\mathscr{C}$
\[
\xy
(-8,16)*+{A}="0";
(8,16)*+{A}="2";
(1,10)*+{O}="4";
(17,10)*+{O^{\prime}r}="6";
(-8,0)*+{B}="10";
(8,0)*+{C}="12";
(1,-6)*+{D}="14";
(17,-6)*+{E}="16";
(-8,-16)*+{O^{\prime}}="20";
(8,-16)*+{F}="22";
(1,-22)*+{O^{\prime}}="24";
(17,-22)*+{F}="26";
{\ar^{1_A} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{i^{\prime}r} "2";"6"};
{\ar_(0.3){o} "4";"6"};
{\ar_{f} "0";"10"};
{\ar^(0.7){h}|!{(9,10);(13,10)}\hole "2";"12"};
{\ar_(0.3){j} "4";"14"};
{\ar^{j^{\prime}r} "6";"16"};
{\ar^(0.3){g}|!{(1,4);(1,0)}\hole "10";"12"};
{\ar_{f^{\prime}} "10";"14"};
{\ar^{h^{\prime}} "12";"16"};
{\ar_(0.7){d} "14";"16"};
{\ar_{i^{\prime}} "10";"20"};
{\ar^(0.7){g^{\prime}}|!{(9,-6);(13,-6)}\hole "12";"22"};
{\ar_(0.3){i_0} "14";"24"};
{\ar^{e} "16";"26"};
{\ar^(0.3){j^{\prime}}|!{(1,-12);(1,-16)}\hole "20";"22"};
{\ar_{1_{O^{\prime}}} "20";"24"};
{\ar^{1_F} "22";"26"};
{\ar_{j^{\prime}} "24";"26"};
\endxy
\]
compatibly with these cubes. Especially it contains $4$-simplices
\betagin{eqnarray*}
\Theta=\ \Penta{A}{B}{C}{E}{F}{f}{g}{h^{\prime}}{e}&,&
\Xi=\ \Penta{A}{O}{D}{O^{\prime}}{F}{i}{j}{i_0}{j^{\prime}},\\
\Phi=\ \Penta{A}{B}{D}{O^{\prime}}{F}{f}{f^{\prime}}{i_0}{j^{\prime}}&,&
\Psi=\ \Penta{A}{O}{O^{\prime}r}{E}{F}{i}{o}{j^{\prime}r}{e}
\end{eqnarray*}
such that $\mathbf{S}^{\prime}r=\SQ{d_1d_4(\Psi)}{d_1d_4(\Theta)}, \mathbf{S}^{\prime}=\SQ{d_0d_2(\Phi)}{d_0d_3(\Theta)}$ and
\[ d_2d_3(\Theta)=d_2d_2(\Phi),\ d_1d_1(\Theta)=d_1d_1(\Psi),\ d_1d_2(\Xi)=d_1d_2(\Phi). \]
Applying Lemma~\ref{LemReplZero} to $d_2(\Xi)$ and $d_3(\Psi)$, we obtain a $3$-simplex $\mu$ such that
$d_1(\mu)=d_1d_2(\Xi)$
and
$d_2(\mu)=d_1d_3(\Psi)$.
If we put
\[ \mathscr{C}f^{\prime}=\mathscr{C}B{d_3(\Theta)}{s_1d_1d_3(\Theta)}{d_1(\Theta)}{d_2(\Phi)}{\mu}{d_1(\Psi)}, \]
then this give a morphism ${}_f\mathscr{C}f^{\prime}_e\colon\mathbf{S}^{\prime}r\to\mathbf{S}^{\prime}$ as desired.
\end{proof}
\betagin{dfn}
Inspired from~\circte{S}, we call \emph{topological} any extriangulated category which is equivalent to the homotopy category of an exact quasi-category.
\end{dfn}
\betagin{lem}\label{LemExtClosed}
Let $\mathscr{C}$ be an exact quasi-category, and let $\mathscr{D}$ be a full subcategory of $\mathscr{C}$ closed by homotopy equivalences of objects in $\mathscr{C}$.
Assume that $\mathscr{D}$ is extension-closed in $\mathscr{C}$, meaning that, $\mathscr{D}$ contains the zero objects of $\mathscr{C}$ and that, for any exact sequence $_A\mathbf{S}_C$ in $\mathscr{C}$ with $A,C\in\mathscr{D}_0$, the middle term $B$ also belongs to $\mathscr{D}_0$.
Define $(\mathscr{D}_{\dag})_1$ by considering those morphisms in $\mathscr{D}$ that are ingressive in $\mathscr{C}$ and for which some cofiber belongs to $\mathscr{D}$. (For $n\ge 2$, we define $(\mathscr{D}_{\dag})_n\subseteq(\mathscr{C}_{\dag})_n\cap\mathscr{D}_n$ to be the subset consisting of $n$-simplices whose edges are in $(\mathscr{D}_{\dag})_1$. The face and degeneracy maps in $\mathscr{D}_{\dag}$ are restrictions of those in $\mathscr{D}$.)
Define $\mathscr{D}^{\dag}$ dually.
Then $(\mathscr{D},\mathscr{D}_{\dag},\mathscr{D}^{\dag})$ is an exact quasi-category.
\end{lem}
\betagin{proof}
Stability under (split) extensions implies that $\mathscr{D}$ is additive.
To see that $\mathscr{D}_{\dag}$ is a subcategory, let
\[
\TwoSP{X}{Y}{Z}{f}{g}{h}{}\]
any $2$-simplex in $\mathscr{D}$ in which $f,g$ belong to $(\mathscr{D}_{\dag})_1$.
Consider the diagram of ambigressive push-outs
\[
\xy
(-18,12)*+{X}="0";
(-6,12)*+{Y}="2";
(6,12)*+{Z}="4";
(-18,0)*+{O}="10";
(-6,0)*+{Z^{\prime}}="12";
(6,0)*+{Y^{\prime}}="14";
(-6,-12)*+{O^{\prime}}="22";
(6,-12)*+{X^{\prime}}="24";
{\ar^{f} "0";"2"};
{\ar^{g} "2";"4"};
{\ar "0";"10"};
{\ar "2";"12"};
{\ar "4";"14"};
{\ar "10";"12"};
{\ar "12";"14"};
{\ar "12";"22"};
{\ar "14";"24"};
{\ar "22";"24"};
\endxy
\]
where $Z^{\prime}$ and $X^{\prime}$ belong to $\mathscr{D}$ by assumption.
The bottom square is an exact sequence, hence $Y^{\prime}$ also belongs to $\mathscr{D}$.
The outer upper rectangle shows that $X\overset{h}{\longrightarrow} Z$ belongs to $\mathscr{D}_{\dag}$.
Dually, $\mathscr{D}^{\dag}$ is a subcategory of $\mathscr{D}$.
The remaining axioms readily follow from those for $\mathscr{C}$.
\end{proof}
\betagin{prop}
Every extension-closed full subcategory, containing zero, of a topological extriangulated category is topological.
\end{prop}
\betagin{proof}
Let $\mathscr{C}$ be an exact quasi-category and let $\mathcal{D}$ be a full subcategory of $h\mathscr{C}$ that we assume extension-closed.
By \circte[Remark 2.18]{NP}, $\mathcal{D}$ inherits an extriangulated structure from that of $h\mathscr{C}$.
Define $\mathscr{D}$ to be the full subcategory of $\mathscr{C}$ given by $\mathcal{D}$.
More explicitly, $\mathscr{D}$ is given by the pull-back, in the category of simplicial sets:
\[
\xy
(-10,6)*+{\mathscr{D}}="0";
(10,6)*+{\mathscr{C}}="2";
(-10,-6)*+{N(\mathcal{D})}="4";
(10,-6)*+{N(h\mathscr{C})}="6";
{\ar "0";"2"};
{\ar "0";"4"};
{\ar "2";"6"};
{\ar "4";"6"};
\endxy
\]
Then $\mathscr{D}$ is extension-closed in $\mathscr{C}$.
Indeed, any exact sequence $X\to Y\to Z$ in $\mathscr{C}$ with $X,Z\in\mathscr{D}_0$ induce, by \cref{ThmET}, an extriangle $X\rightarrowtail Y\twoheadrightarrow Z\dashrightarrow$ in $h\mathscr{C}$.
By assumption, we have $Y\in\mathcal{D}$.
We can thus apply \cref{LemExtClosed}: $\mathscr{D}$ is an exact quasi-category.
The claim follows since $h\mathscr{D}=\mathcal{D}$.
\end{proof}
We plan to investigate the following
\betagin{question}\label{QuestionTopoExtricat}
Is every topological extriangulated category equivalent to some extension-closed full subcategory of a triangulated category?
\end{question}
Our next result is an elementary tool that might help answering \cref{QuestionTopoExtricat}.
Let $F\colon\mathscr{C}\to\mathscr{D}$ be an exact functor of exact quasi-categories \circte[Definition 4.1]{B1}.
In other words, $F$ is a map of simplicial sets that preserves zero objects, ingressive morphisms, egressive morphisms, push-outs along ingressive morphisms and pull-backs along egressive morphisms.
Because $F$ is a map of simplicial sets, it commutes to faces and degeneracies and thus induces a functor $hF\colon h\mathscr{C}\to h\mathscr{D}$ which coincide with $F$ on objects.
It is defined on morphisms by $hF\, \overseterline{a} = \overseterline{Fa}$.
\betagin{dfn}[{\circte[Definition 2.31]{BTS}}]
A functor $G\colon(\mathcal{C},\mathbb{E},\mathfrak{s})\to(\mathcal{D},\mathbb{E}^{\prime},\mathfrak{s}^{\prime})$ between extriangulated categories is \emph{exact} (or \emph{extriangulated}) if it is additive, and if there is a natural transformation $\Gamma\colon\mathbb{E}\Rightarrow\mathbb{E}^{\prime}(G^\mathrm{op}-,G-)$ such that, for any $\mathfrak{s}$-triangle
$A\overset{i}{\rightarrowtail}B\overset{p}{\twoheadrightarrow}C\overset{\deltata}{\dashrightarrow}$ in $\mathcal{C}$, its image
$GA\overset{Gi}{\rightarrowtail}GB\overset{Gp}{\twoheadrightarrow}GC\overset{\Gamma_{C,A}(\deltata)}{\dashrightarrow}$ is an $\mathfrak{s}^{\prime}$-triangle in $\mathcal{D}$.
\end{dfn}
\betagin{prop}\label{propExactFunctors}
Let $F\colon\mathscr{C}\to\mathscr{D}$ be an exact functor of exact quasi-categories.
Then $hF\colon h\mathscr{C}\to h\mathscr{D}$ is an exact functor of extriangulated categories.
\end{prop}
\betagin{proof}
Because $F$ preserves push-outs of ingressive morphisms, it preserves coproducts, and similarly for products.
It follows that $hF$ is additive.
Define $\Gamma$ as follows: For any $A,C\in\mathscr{C}_0$, and any $\deltata=\undersetderline{\mathbf{S}}\in\mathbb{E}(C,A)$, let $\Gamma_{C,A}(\deltata) = \undersetderline{F\mathbf{S}}$. The map $\Gamma_{C,A}$ is well-defined since if $_a\mathscr{C}f_c$ is a cube in $\mathscr{C}$ from $\mathbf{S}$ to $\mathbf{S}^{\prime}$ with $\overseterline{a} = \mathrm{id}_A$ and $\overseterline{c}=\mathrm{id}_C$ in $h\mathscr{C}$, then $_{Fa}\mathscr{C}f^{\prime}_{Fc}=F\mathscr{C}f$ is a cube in $\mathscr{D}$ from $F\mathbf{S}$ to $F\mathbf{S}^{\prime}$ with $\overseterline{Fa}=\mathrm{id}_{FA}$ and $\overseterline{Fc}=\mathrm{id}_{FC}$ in $h\mathscr{D}$.
It remains to show that $\Gamma = (\Gamma_{C,A})$ is natural.
Let $\deltata=\undersetderline{\mathbf{S}}\in\mathbb{E}(C,A)$ and let $a\in\mathscr{C}_1(A,A^{\prime})$. Consider a cube
\[
\mathscr{D}f=
\xy
(-8,9)*+{A}="0";
(8,9)*+{B}="2";
(1,3)*+{O}="4";
(17,3)*+{C}="6";
(-8,-7)*+{A^{\prime}}="10";
(8,-7)*+{B_0}="12";
(1,-13)*+{O}="14";
(17,-13)*+{C}="16";
{\ar^{x} "0";"2"};
{\ar_{i} "0";"4"};
{\ar^{y} "2";"6"};
{\ar^(0.3){j} "4";"6"};
{\ar_{a} "0";"10"};
{\ar^(0.6){b_0}|!{(9,3);(13,3)}\hole "2";"12"};
{\ar^(0.4){1_O} "4";"14"};
{\ar^{1_C} "6";"16"};
{\ar^(0.3){x_0}|!{(1,-3);(1,-7)}\hole "10";"12"};
{\ar_{i_0} "10";"14"};
{\ar^{y_0} "12";"16"};
{\ar_{j} "14";"16"};
\endxy
\]
defining $a_{\ast}\mathbf{S}$ as in \cref{LemExSeqPO}.
In particular, the back square is a push-out, with $x$ ingressive.
Since $F$ preserves push-outs along ingressive morphisms, the cube $F\mathscr{D}f$ witnesses the fact that $F(a_{\ast}\mathbf{S})=(Fa)_{\ast} F\mathbf{S}$.
Dually, for any $C^{\prime}\overset{c}{\longrightarrow}C$, we have $F(c^{\ast}\mathbf{S})=(Fc)^{\ast} F\mathbf{S}$.
Whence $\Gamma_{C^{\prime},A^{\prime}}\circrc \overseterline{c}^{\ast} \overseterline{a}_{\ast} = (hF\overseterline{c})^{\ast}(hF\overseterline{a})_{\ast} \circrc \Gamma_{C,A}$.
\end{proof}
\subsection{Compatibility with the triangulation in stable case}\label{Subsection_StableCase}
In this subsection, let $\mathscr{C}$ be a stable quasi-category, which is
a particular case of an exact quasi-category, as in \circte[Example~3.3]{B1}. In this case it is known that the homotopy category $h\mathscr{C}$ can be equipped with a structure of a triangulated category (\circte{L2}).
Let us show that the structure of an extriangulated category on $h\mathscr{C}$ obtained in Theorem \ref{ThmET} is compatible with it.
First let us briefly review the construction of the shift functor. For any object $A\in\mathscr{C}_0$, we choose an exact sequence
\[
\mathbf{U}_A=\SQ{\eta_A}{\zeta_A}=\
\xy
(-6,6)*+{A}="0";
(6,6)*+{O_A}="2";
(-6,-6)*+{O_A^{\prime}}="4";
(6,-6)*+{\Sigma A}="6";
{\ar^{} "0";"2"};
{\ar_{} "0";"4"};
{\ar^{} "2";"6"};
{\ar_{} "4";"6"};
\endxy
\quad\Bigg(\text{respectively,}\
\mathbf{V}_A=\
\xy
(-6,6)*+{\Omega A}="0";
(6,6)*+{O_A^{\prime}r}="2";
(-6,-6)*+{O_A^{\prime\prime\prime}}="4";
(6,-6)*+{A}="6";
{\ar^{} "0";"2"};
{\ar_{} "0";"4"};
{\ar^{} "2";"6"};
{\ar_{} "4";"6"};
\endxy\Bigg)
\]
in which $O_A,O_A^{\prime}$ (resp. $O_A^{\prime}r,O_A^{\prime\prime\prime}$) are zero objects.
If we put $\mu_A=\undersetderline{\mathbf{U}_A}\in\mathbb{E}(\Sigma A,A)$ (resp. $\nu_A=\undersetderline{\mathbf{V}_A}\in\mathbb{E}(A,\Omega A)$), then we have an $\mathfrak{s}$-triangle
\betagin{equation}\label{ShiftsTria}
A\to0\to\Sigma A\overset{\mu_A}{\dashrightarrow},\quad (\text{resp.}\ \ \Omega A\to0\to A\overset{\nu_A}{\dashrightarrow}).
\end{equation}
Especially, as in \circte[Proposition~3.3]{NP} we obtain natural isomorphisms of functors induced by the Yoneda's lemma
\[ (\mu_A)_\sharp\colon(h\mathscr{C})(-,\Sigma A)\overset{\colonng}{\Longrightarrow}\mathbb{E}(-,A) \]
which assigns $(\mu_A)_\sharp(\mathfrak{c})=\mathfrak{c}^{\ast}(\mu_A)$ to each $\mathfrak{c}\in(h\mathscr{C})(C,\Sigma A)$. Thus for any $\mathfrak{a}\in(h\mathscr{C})(A,A^{\prime})$, there exists a morphism $\Sigma\mathfrak{a}\in(h\mathscr{C})(\Sigma A,\Sigma A^{\prime})$ uniquely so that
\[
\xy
(-14,6)*+{(h\mathscr{C})(-,\Sigma A)}="0";
(14,6)*+{\mathbb{E}(-,A)}="2";
(-14,-6)*+{(h\mathscr{C})(-,\Sigma A^{\prime})}="4";
(14,-6)*+{\mathbb{E}(-,A^{\prime})}="6";
{\ar@{=>}^(0.56){(\mu_A)_\sharp} "0";"2"};
{\ar@{=>}_{(\Sigma\mathfrak{a})\circ-} "0";"4"};
{\ar@{=>}^{\mathfrak{a}_{\ast}} "2";"6"};
{\ar@{=>}_(0.56){(\mu_{A^{\prime}})_\sharp} "4";"6"};
{\ar@{}|\circrclearrowright "0";"6"};
\endxy
\]
becomes commutative. Then the correspondence
\betagin{itemize}
\item for any object $A\in h\mathscr{C}$, associate $\Sigma A\in h\mathscr{C}$ using the chosen $\mathbf{U}_A$,
\item for any morphism $\mathfrak{a}\in(h\mathscr{C})(A,A^{\prime})$, associate the morphism $\Sigma\mathfrak{a}\in(h\mathscr{C})(\Sigma A,\Sigma A^{\prime})$ by the above
\end{itemize}
gives the endofunctor $\Sigma\colon h\mathscr{C}\to h\mathscr{C}$. By Lemma~\ref{LemET2}, morphism $\Sigma\overseterline{a}$ for $a\in\mathscr{C}_1(A,A^{\prime})$ is equal to the one given by $\Sigma\overseterline{a}=\overseterline{s}$ where $s\in\mathscr{C}_1(\Sigma A,\Sigma A^{\prime})$ admits some morphism ${}_a\mathscr{C}f_{s}\colon\mathbf{U}_A\to\mathbf{U}_{A^{\prime}}$ of exact sequences. Similarly for the functor $\Omega\colon h\mathscr{C}\to h\mathscr{C}$, which gives a quasi-inverse of $\Sigma$.
Using these structures, we can complete any morphism $\overseterline{z}\in(h\mathscr{C})(C,\Sigma A)$ into a triangle diagram of the form $A\to B\to C\overset{\overseterline{z}}{\longrightarrow}\Sigma A$ compatibly with the external triangulation in the following way. Indeed, this is how to relate triangulations with external triangulations (\circte[Proposition~3.22]{NP}).
\betagin{itemize}
\item For each $\overseterline{z}\in(h\mathscr{C})(C,\Sigma A)$, take
\betagin{equation}\label{DistTriET}
\mathfrak{s}((\mu_A)_\sharp(\overseterline{z}))=[A\overset{\overseterline{x}}{\longrightarrow}B\overset{\overseterline{y}}{\longrightarrow}C]
\end{equation}
to obtain $A\overset{\overseterline{x}}{\longrightarrow}B\overset{\overseterline{y}}{\longrightarrow}C\overset{\overseterline{z}}{\longrightarrow}\Sigma A$.
\end{itemize}
\betagin{rem}
In fact, the existence of $\mathfrak{s}$-triangles of the form $(\ref{ShiftsTria})$ is equivalent to that the extriangulated category $(h\mathscr{C},\mathbb{E},\mathfrak{s})$ admits a compatible triangulation. Indeed, existence of such $\mathfrak{s}$-triangles is equivalent to that $(h\mathscr{C},\mathbb{E},\mathfrak{s})$ is \emph{Frobenius} with trivial projective-injectives in the sense of \circte[Definition~7.1]{NP} and \circte[Subsection~3.3]{ZZ}, and thus $(h\mathscr{C},\Sigma,\triangle)$ becomes a triangulated category by \circte[Corollary~7.6]{NP} or \circte[Example~3.15]{ZZ}, in which $\triangle$ is defined to be the class of triangles isomorphic to those $A\overset{\overseterline{x}}{\longrightarrow}B\overset{\overseterline{y}}{\longrightarrow}C\overset{\overseterline{z}}{\longrightarrow}\Sigma A$ obtained in the above way.
\end{rem}
On the other hand, the distinguished triangles in the triangulation given in \circte{L2} are those isomorphic to
$A\overset{\overseterline{x^{\prime}}}{\longrightarrow}B^{\prime}\overset{\overseterline{y^{\prime}}}{\longrightarrow}C\overset{\overseterline{z}}{\longrightarrow}\Sigma A$ which fits in some rectangle
\betagin{equation}\label{Recta}
\mathbf{R}=\RT{\mathcal{X}}{\mathcal{Y}}{\mathcal{Z}}=\
\xy
(-16,7)*+{A}="0";
(0,7)*+{B^{\prime}}="2";
(16,7)*+{O_A}="4";
(-16,-7)*+{O_A^{\prime}}="10";
(0,-7)*+{C}="12";
(16,-7)*+{\Sigma A}="14";
{\ar^{x^{\prime}} "0";"2"};
{\ar_{} "2";"4"};
{\ar_{} "0";"10"};
{\ar_{y^{\prime}} "2";"12"};
{\ar^{} "4";"14"};
{\ar^{} "10";"12"};
{\ar^{z} "12";"14"};
\endxy
\end{equation}
such that $\mathbf{R}_{\mathrm{out}}=\mathbf{U}_A$ and that $\mathbf{R}_{\mathrm{right}}$ is a pull-back.
Thus to see the compatibility, it is enough to show the following.
\betagin{prop}\label{PropCompatStable}
For any morphism $z\in\mathscr{C}_1(C,\Sigma A)$ and any rectangle $(\ref{Recta})$ in which $\mathbf{R}_{\mathrm{out}}=\mathbf{U}_A$ holds and $\mathbf{R}_{\mathrm{right}}$ is a pull-back, we have $\mathfrak{s}((\mu_A)_\sharp(\overseterline{z}))=[A\overset{\overseterline{x^{\prime}}}{\longrightarrow}B^{\prime}\overset{\overseterline{y^{\prime}}}{\longrightarrow}C]$.
\end{prop}
\betagin{proof}
In the rectangle $\mathbf{R}$, we see that $\mathbf{S}^{\prime}=\mathbf{R}_{\mathrm{left}}$ is an exact sequence, and satisfies $\mathfrak{s}(\undersetderline{\mathbf{S}^{\prime}})=[A\overset{\overseterline{x^{\prime}}}{\longrightarrow}B^{\prime}\overset{\overseterline{y^{\prime}}}{\longrightarrow}C]$ by definition. Moreover, $\mathbf{R}$ gives a morphism of exact sequences
\[ {}_{1_A}\mathscr{C}f_{z}=\mathscr{C}B{s_0(\zeta_A)}{\mathcal{Z}}{\mathcal{Y}}{s_0(\eta_A)}{s_1(\eta_A)}{\mathcal{X}}\colon\mathbf{S}^{\prime}\to\mathbf{U}_A, \]
which shows $(\mu_A)_\sharp(\overseterline{z})=(\overseterline{z})^{\ast}\undersetderline{\mathbf{U}_A}=\undersetderline{\mathbf{S}^{\prime}}$ by Lemma~\ref{LemET2}.
\end{proof}
\betagin{thebibliography}{B-TS}
\bibitem[B1]{B1} Barwick, C.: \emph{On exact $\infty$-categories and the theorem of the heart}. Compos. Math. \textbf{151} (2015), no. 11, 2160--2186.
\bibitem[B2]{B2} Barwick, C.: \emph{On the algebraic $K$-theory of higher categories}. J. Topol. \textbf{9} (2016), no. 1, 245--347.
\bibitem[B-TS]{BTS} Bennett-Tennenhaus, R.; Shah, A.: \emph{Transport of structure in higher homological algebra}. arXiv:2003.02254
\bibitem[C]{C} Cisinski, D-C.: \emph{Higher categories and homotopical algebra}. Cambridge Studies in Advanced Mathematics, \textbf{180}. Cambridge University Press, Cambridge, 2019. xviii+430 pp.
\bibitem[DK]{DK} Dyckerhoff, T.; Kapranov, M.: \emph{Higher {S}egal spaces}. Lecture Notes in Mathematics, \textbf{2244}. Springer, Cham, 2019. xv+218 pp.
\bibitem[HLN]{HLN} Herschend, H.; Liu, L.; Nakaoka, H.: \emph{$n$-exangulated categories}. arXiv:1709.06689v2.
\bibitem[J]{J} Joyal, A.: \emph{Notes on quasi-categories}.
\bibitem[L1]{L1} Lurie, J.: \emph{Higher topos theory}. Annals of Mathematics Studies, \textbf{170}. Princeton University Press, Princeton, NJ, 2009. xviii+925 pp.
\bibitem[L2]{L2} Lurie, J.: \emph{Stable infinity categories}. arXiv:math/0608228.
\bibitem[NP]{NP} Nakaoka, H.; Palu, Y.: \emph{Extriangulated categories, Hovey twin cotorsion pairs and model structures}. Cah. Topol. G\'{e}om. Diff\'{e}r. Cat\'{e}g. \textbf{60} (2019), no. 2, 117--193.
\bibitem[S]{S} Schwede, S.: \emph{Algebraic versus topological triangulated categories}. In Cambridge Univ. Press, Cambridge, \emph{Triangulated categories}, London Math. Soc. Lecture Note Ser. \textbf{375} (2010), 389--407.
\bibitem[ZZ]{ZZ} Zhou, P.; Zhu, B.: \emph{Triangulated quotient categories revisited}. J. Algebra \textbf{502} (2018), 196--232.
\end{thebibliography}
\end{document} |
\begin{equation}gin{document}
\begin{equation}gin{abstract}
In this note, we give a rigorous proof that the NLS periodic Akhmediev breather is unstable. The proof follows the ideas in \cite{Munoz1}, in the sense that a suitable modification of the Stokes wave is the global attractor of the local Akhmediev dynamics for sufficiently large time, and therefore the latter cannot be stable in any suitable finite energy periodic Sobolev space.
\end{abstract}
\maketitle
\section{Introduction}
Let $a\in (0,\frac12)$. The Akhmediev breather \cite{Akhmediev}
\begin{equation}\lambdabel{Ak}
\begin{equation}gin{aligned}
A(t,x):=& ~ e^{it}\begin{equation}ta igg[ 1+ \frac{\alphapha^2 \cosh( \begin{equation}ta t) +i\begin{equation}ta \sinh(\begin{equation}ta t) }{ \sqrt{2a} \cos(\alphapha x) -\cosh(\begin{equation}ta t)}\begin{equation}ta igg], \qquad t,x\in\R,\\
\begin{equation}ta=&~ (8a(1-2a))^{1/2}, \quad \alphapha=(2(1-2a))^{1/2},
\end{aligned}
\end{equation}
is a {\bf $\frac{2\pi}{a}$-periodic in space, localized in time} smooth solution to the \emph{focusing} cubic nonlinear Schr\"odinger equation (NLS) in one dimension:
\begin{equation}\lambdabel{NLS}
i\partial_t u + \partial_x^2 u +|u|^{2} u=0, \quad u=u(t,x)\in {\mathbb C}om, \quad t,x\in \R.
\end{equation}
See Fig.\operatorname{Re}f{Fig:1}-\operatorname{Re}f{Fig:2} for details. This equation appears as a model of propagation of light in nonlinear optical fibers (with different meanings for time and space variables), as well as in small-amplitude gravity waves on the surface of deep inviscid water. Additionally, this equation is completely integrable, as showed by Zakharov and Shabat \cite{Zakharov0}.
\begin{equation}gin{minipage}{\linewidth}
\centering
\begin{equation}gin{minipage}{0.45\linewidth}
\includegraphics[scale=0.317]{A1.pdf}
\captionof{figure}{\small $|A|$ with $a=0.2$.}\lambdabel{Fig:1}
\end{minipage}
\hspace{0.05\linewidth}
\begin{equation}gin{minipage}{0.45\linewidth}
\includegraphics[width=\linewidth]{A2.pdf}
\captionof{figure}{\small $|A|$ with $a=0.4$.}\lambdabel{Fig:2}
\end{minipage}
\end{minipage}
A particular feature of $A$ above is its nonzero boundary value at infinity in time and space. Indeed, $A$ converges, as $t\to\pm\infty$, to the Stokes wave $e^{it}$, also solution of \eqref{NLS}:
\begin{equation}\lambdabel{Asymptotic}
\lim_{t\to\pm \infty} \|A(t,x) - e^{\pm i \theta}e^{it}\|_{H^1_\sharp}=0, \quad e^{i\theta}=1-\alpha^2 -i\begin{equation}ta .
\end{equation}
Here, $H^s_\sharp:= H^s_\sharp((0,\frac{2\pi}{a}))$ denotes the standard Sobolev space $H^s$ of $\frac{2\pi}{a}$-space periodic functions. Consequently, $A(t,x)$ exemplifies the \emph{modulational instability} phenomenon, which -roughly speaking- says that small perturbations of the Stokes wave are unstable and grow quickly. This unstable growth leads to a nontrivial competition with the (focusing) nonlinearity, time at which the solution is apparently stabilized. The Akhmediev breather is also a candidate to explain the famous \emph{rogue waves}. An alternative explanation to the rogue wave phenomena is given by the notion of \emph{dispersive blow-up}, see Bona and Saut \cite{Bona_Saut}.
Two standard conserved quantities for \eqref{NLS} in the periodic setting are
\begin{equation}\lambdabel{Mass}
M[u]: =\int_{0}^{\frac{2\pi}{a}} (|u|^2 -1), \qquad \hbox{(Mass)}
\end{equation}
and
\begin{equation}\lambdabel{Energy}
E[u]:= \int_{0}^{\frac{2\pi}{a}} \left( |u_x|^2 - \frac12 (|u|^2-1)^2 \right), \qquad \hbox{(Energy).}
\end{equation}
A third one is given by \cite{AFM}
\begin{equation}\lambdabel{F_NLS}
F[u]:= \int_{0}^{\frac{2\pi}{a}} \begin{equation}ta ig(|u_{xx}|^2 -3 (|u|^2-1)|u_x|^2 -\frac12((|u|^2)_x)^2 + \frac12 (|u|^2-1)^3 \begin{equation}ta ig).
\end{equation}
This third conserved quantity appears from the integrability of the equation.
In this paper, we continue the work started by one of us in \cite{Munoz1}, where we proved that the Peregrine \cite{Peregrine,Akhmediev2} and Kuznetsov-Ma \cite{Kuznetsov,Ma} breathers are unstable under finite energy perturbations in any Sobolev space $H^s(\R)$, $s>\frac12$. Previously, the Peregrine soliton was showed to be numerically unstable under small perturbations by Klein and Haragus \cite{KH}. See \cite{Munoz1} for more details on those breathers, as well as a more or less accurate account of the current literature.
However, the stability analysis of \eqref{Ak} was left open because of its spatial periodic behavior. Our first and main result is the following:
\begin{equation}gin{thm}\lambdabel{Insta_A}
The Akhmediev breather \eqref{Ak} is unstable under small perturbations in $H^s_\sharp$, $s>\frac12$.
\end{thm}
By stability, we mean the following concept \cite{Munoz1}. Fix $s>\frac12$, and $t_0\in \R$. We say that a particular $\frac{2\pi}{a}$-periodic globally defined solution $U= e^{it}(1+W)$ of \eqref{NLS} is \emph{orbitally stable} in $H^s_\sharp(\frac{2\pi}{a})$ if there are constants $C_0,\varepsilon_0>0$ such that, for any $0<\varepsilon<\varepsilon_0$,
\begin{equation}\lambdabel{Stability}
\begin{equation}gin{aligned}
\| u_0 - & ~{} U(t_0)\|_{H^s_\sharp} < \varepsilon\\
& \displaystyleownarrow \\
{\bf Example}:\ ists ~ x_0(t),\gamma_0(t)\in \R ~ \hbox{such that }~ & \sup_{t\in\R} \|u(t) - e^{i\gamma(t)}U(t,x-x_0(t)) \|_{H^s_\sharp} <C_0\, \varepsilon.
\end{aligned}
\end{equation}
\noindent
Here $u(t)$ is the solution to the IVP \eqref{NLS} with initial datum $u(t_0)=u_0$ (see Proposition \operatorname{Re}f{MT2}), and $x_0(t),\gamma_0(t)$ can be assumed continuous because the IVP is well-posed in a continuous-in-time Sobolev space. If \eqref{Stability} is not satisfied, we will say that $U$ is {\bf unstable}. Note additionally that condition \eqref{Stability} requires $w$ globally defined, otherwise $U$ is trivially unstable, since $U$ is globally defined.
The proof of Theorem \operatorname{Re}f{Insta_A} uses \eqref{Asymptotic} in a crucial way: a modified Stokes wave is an attractor of the dynamics around the Akhmediev breather for large time. See also \cite{AMP1,AMP2} for
numerical studies of the stability of mKdV and Sine-Gordon breathers in the periodic and nonperiodic settings.
Other rigorous stability results for breathers can be found in \cite{AM1,AM2, MP,Munoz,Alejo}.
No NLS \eqref{NLS} breather seems to be stable. In fact, Peregrine, Kuznetsov-Ma and now Akhmediev were shown to be unstable.
This is not necessarily consequence of the nonzero background. Indeed, even breathers on zero background \cite{AFM},
called Satsuma-Yajima breathers, are unstable.
Being $A$ unstable, it does not mean that it has no structure at all. In this paper we advance,
following the ideas introduced in \cite{AFM}, that indeed, $A$ has a very rich (unstable) variational structure. In particular,
\begin{equation}gin{thm}\lambdabel{MT}
The Akhmediev breather $A$ \eqref{Ak} is a critical point of the functional
\[
\mathcal H[u]:= F[u] - \alpha^2 E[u],
\]
that is to say, $\mathcal H'[A][w]=0$ for all $w\in H^2_\sharp$. In particular, for each $t\in\R$ $A(t,x)$ satisfies the nonlinear ODE
\begin{equation}\lambdabel{Ec_A}
\begin{equation}gin{aligned}
& A_{(4x)} + 3A_x^2 \begin{equation*}r A +(4 |A|^2-3) A_{xx}+ A^2 \begin{equation*}r A_{xx} + 2 |A_x|^2 A \\
&\qquad + \frac32 (|A|^2-1)^2 A + \alphapha^2(A_{xx} + (|A|^2-1) A) =0.
\end{aligned}
\end{equation}
\end{thm}
The proof of this result follows easily from the methods in \cite{AFM}, in which one expands $\mathcal H[A+w]$. We get
\[
\mathcal H[A+w] =\mathcal H[A] + \mathcal H'[A][w] + O(\|w\|_{H^2_\sharp}^2).
\]
Then, performing some lengthy computations, one proves that $\mathcal H'[A][w] =0$ independently of $w$. See Section \operatorname{Re}f{3} for the proof.
We believe that the variational structure appearing in breather solutions is independent of the well-posed character of the equation. In particular, we claim that the explicit breather of the strongly ill-posed bad Boussinesq equation
\begin{equation}\lambdabel{Bad}
u_{tt} - u_{xx} - \left( u_{xx} + \frac{3}{2} u^2 \right)_{xx}=0.
\end{equation}
has an associated rich variational structure \cite{ACM}.
\noindent
{\bf Acknowledgments.} We thank the referee for his/her fruitful comments and suggestions which helped to improve this paper.
\section{Proof of Theorem \operatorname{Re}f{Insta_A}}\lambdabel{2}
The proof is not difficult at all. We just need a preliminary well-posedness result. Set
\begin{equation}\lambdabel{deco}
u(t,x) = A(t,x) + w(t,x), \quad w~ \hbox{ unknown}.
\end{equation}
Then \eqref{NLS} becomes a modified NLS equation
\begin{equation}\lambdabel{mNLS}
\begin{equation}gin{aligned}
i\partial_t w + \partial_x^2 w & =~ - G[w] , \\
G[w]:= & ~ {} |A + w|^2(A+w) -|A|^2 A .
\end{aligned}
\end{equation}
\begin{equation}gin{prop}\lambdabel{MT2}
The NLS equation \eqref{NLS} is locally well-posed for any initial data at time $t=t_0$ of the form $A(t_0,x) + w_0(x)$, with $w_0\in H^s_\sharp$, $s>\frac12$.
\end{prop}
\begin{equation}gin{proof}
See Appendix \operatorname{Re}f{A}.
\end{proof}
Note that there is always a local solution $u$ of \eqref{NLS} such that $u(t)= A(t) +w(t)$, with $w\in H^s_\sharp$. In particular, given time dependent parameters $x_0(t),\gamma_0(t)\in\R$, if the decomposition $u(t)= e^{i\gamma_0(t)}A(t,x-x_0(t)) +\tilde w(t)$ holds, then $\tilde w(t)$ still belongs to $H^s_\sharp$. This is not true in the non periodic case, see \cite{Munoz1}.
We did not try to improve the local well-posedness result for \eqref{mNLS} because the flow contains a non oscillatory bad component in the case of small frequencies, see \cite{Munoz1} for details. In particular, Strichartz estimates are not available in this case. Also, the global well-posedness of \eqref{mNLS} is an open question.
\subsection{End of proof} We only treat the case $t\to+\infty$, the other being very similar. Fix $s>\frac12$. Let us assume that the Akhmediev breather $A$ in \eqref{Ak} is orbitally stable, as in \eqref{Stability}. Write (see \eqref{Asymptotic})
\begin{equation}\lambdabel{Q_def}
\begin{equation}gin{aligned}
A(t,x)= &~ {}e^{it} (e^{i\theta} + Q(t,x)),\\
Q(t,x):= &~{} \frac{\alphapha^2 \cosh( \begin{equation}ta t) +i\begin{equation}ta \sinh(\begin{equation}ta t) }{ \sqrt{2a} \cos(\alphapha x) -\cosh(\begin{equation}ta t)} +(\alpha^2+i\begin{equation}ta) .
\end{aligned}
\end{equation}
Now consider, as a perturbation of the Akhmediev breather, the $\frac{2\pi}{a}$-periodic \emph{Stokes wave $ e^{i\theta}e^{it}$}. Indeed, we have (see \eqref{Q_def}),
\[
\lim_{t\to +\infty} \| A(t) -e^{i\theta}e^{it} \|_{H^s_\sharp}= \lim_{t\to +\infty} \| Q(t) \|_{H^s_\sharp} =0.
\]
Indeed, this follows from the identity
\begin{equation}\lambdabel{Q_new}
Q(t,x)= \alphapha^2 \left(1 - \frac{1}{ 1-\sqrt{2a} \frac{\cos(\alphapha x)}{\cosh(\begin{equation}ta t)}} \right) +i\begin{equation}ta \left( 1- \frac{\tanh(\begin{equation}ta t) }{ 1-\sqrt{2a} \frac{\cos(\alphapha x)}{\cosh(\begin{equation}ta t)}} \right).
\end{equation}
Therefore, we have two solutions to \eqref{NLS} that converge to the same profile as $t\to +\infty$. This fact contradicts the orbital stability, since for $x_0(t),\gamma_0(t)\in \R$ given in \eqref{Stability}, and the definition of $A$ in \eqref{Ak},
\[
\begin{equation}gin{aligned}
\| e^{i\theta} - e^{i\gamma_0(0)} A(0,x-x_0(0)) \|_{H^s_\sharp} = &~{} \norm{ e^{i\theta} - e^{i\gamma_0(0)} \begin{equation}ta igg[ 1+ \frac{\alphapha^2 }{ \sqrt{2a} \cos(\alphapha (x-x_0(0)) -1}\begin{equation}ta igg] }_{H^s_\sharp} \\
= &~{}c_s>0,
\end{aligned}
\]
is a fixed number, but if $t_0=T$ is taken large enough, $ \| Q(T) \|_{H^s_\sharp}$ can be made arbitrarily small. Indeed, by classical interpolation ($\|u\|_{H^s_\sharp}^2:= \sum_{n\geq0} n^{2s}|\hat u(n)|^2$, and $n^{2s} = n^{2(0+s.1)}$ and H\"older)
\begin{equation}\lambdabel{interpolation}
\|Q(T)\|_{H^s_\sharp} \lesssim_s \|Q(T)\|_{L^2_\sharp}^{1-s}\|Q(T)\|_{H^1_\sharp}^{s},\quad s\in (0,1).
\end{equation}
Now, to evaluate $\lim_{t\to +\infty}\|Q(t)\|_{L^2_\sharp}$ requires some care. Clearly from \eqref{Q_new} we have $Q(t,x)\to 0$ as $t\to +\infty$, for all $x\in [0,\frac{2\pi}{\alphapha})$. Also,
\[
|Q(t,x)| \lesssim \frac{\alphapha^2\sqrt{2a}}{(1-\sqrt{2a})\cosh(\begin{equation}ta t)} + \frac{\begin{equation}ta}{(1-\sqrt{2a})}\left((1-\tanh(\begin{equation}ta t)) + \frac{\sqrt{2a}}{\cosh(\begin{equation}ta t)}\right).
\]
Therefore, by using dominated convergence we conclude. As for the derivative, note that
\begin{equation}\lambdabel{Q_new_x}
\partial_x Q(t,x)= \frac{\alphapha^3 \sqrt{2a} \sin(\alpha x)}{\cosh(\begin{equation}ta t) \left( 1-\sqrt{2a} \frac{\cos(\alphapha x)}{\cosh(\begin{equation}ta t)} \right)^2 } +i\frac{ \alphapha \begin{equation}ta\sqrt{2a} \tanh(\begin{equation}ta t) \sin(\alpha x)}{\left( 1-\sqrt{2a} \frac{\cos(\alphapha x)}{\cosh(\begin{equation}ta t)} \right)^2}.
\end{equation}
Proceeding in a similar fashion as in the $L^2$ norm, we have $\lim_{t\to +\infty}\|\partial_x Q(t)\|_{L^2_\sharp}=0.$ Therefore, we conclude from \eqref{interpolation} that $ \| Q(T) \|_{H^s_\sharp}$ can be made arbitrarily small if $T$ is sufficiently large.
Note finally that the Cauchy problem for \eqref{NLS} with initial data at time $T$ given by $u_0=e^{iT}e^{i\theta} = A(T)-e^{iT}Q(T)$ is well-defined from Proposition \operatorname{Re}f{MT2}, since $e^{iT}Q(T) \in H^s_\sharp$. This proves Theorem \operatorname{Re}f{Insta_A}.
\begin{equation}gin{rem}
We conjecture that any soliton solution constructed using B\"acklund transformations, with attached Akhmediev breathers, must be unstable.
\end{rem}
\section{Proof of Theorem \operatorname{Re}f{MT}}\lambdabel{3}
Explicitly, we have from \eqref{Energy} and \eqref{F_NLS}, integration by parts, and the periodic character of $A$ and its spatial derivatives at the boundaries, and $w$ its first and second spatial derivatives,
\[
\begin{equation}gin{aligned}
&\mathcal H[A+w]= F[A+w] - \alpha^2 E[A+w]\\
&\quad =\int_{0}^{\frac{2\pi}{a}}\!\! \begin{equation}ta ig(|A_{xx} + w_{xx}|^2 -3 (|A+w|^2-1)|A_x+w_x|^2 -\frac12((|A+w|^2)_x)^2 + \frac12 (|A+w|^2-1)^3 \begin{equation}ta ig)\\
&\quad \quad -\alphapha^2\int_{0}^{\frac{2\pi}{a}} \left( |A_x+w_x|^2 - \frac12 (|A+w|^2-1)^2 \right)\\
&\quad = \mathcal H[A] + \int_{0}^{\frac{2\pi}{a}} \begin{equation}ta ig(2\operatorname{Re}(A_{4x}\begin{equation*}r{w}) - 3(|A|^2-1)2\operatorname{Re}(A_x\begin{equation*}r{w}_x) - 3(2\operatorname{Re}(A\begin{equation*}r{w}))|A_x|^2\\
&\quad \quad \qquad \qquad \qquad - (|A|^2)_x(2\operatorname{Re}(A\begin{equation*}r{w}))_x + \frac32(|A|^2-1)^22\operatorname{Re}(A\begin{equation*}r{w})\begin{equation}ta ig)\\
& \quad \quad -\alphapha^2\int_{0}^{\frac{2\pi}{a}} \begin{equation}ta ig(-2\operatorname{Re}(A_{xx}\begin{equation*}r{w}) -(|A|^2-1)2\operatorname{Re}(A\begin{equation*}r{w}) \begin{equation}ta ig) + O(\|w\|_{H^2_\sharp}^2)\\
&\quad = \mathcal H[A] + 2\operatorname{Re}\int_{0}^{\frac{2\pi}{a}} \begin{equation}ta ig(A_{4x}\begin{equation*}r{w} - 3(|A|^2-1)A_x\begin{equation*}r{w}_x - 3A\begin{equation*}r{w}|A_x|^2 + (|A|^2)_{xx}A\begin{equation*}r{w}\\
&\quad \quad \qquad \qquad \qquad\qquad + \frac32(|A|^2-1)^2A\begin{equation*}r{w} -\alphapha^2\begin{equation}ta ig[-A_{xx}\begin{equation*}r{w} -(|A|^2-1)A\begin{equation*}r{w} \begin{equation}ta ig]\begin{equation}ta ig) + O(\|w\|_{H^2_\sharp}^2)\\
&\quad = \mathcal H[A] \\
&\quad \quad + 2\operatorname{Re}\int_{0}^{\frac{2\pi}{a}}\begin{equation*}r{w} \begin{equation}ta ig(A_{4x} + 3(|A|^2-1)A_{xx} + 3(A_x^2\begin{equation*}r{A}+A|A_{x}|^2) - 3A|A_x|^2\\
&\quad \quad \qquad \qquad \qquad + A_{xx}|A|^2 + A^2\begin{equation*}r{A}_{xx} + 2A|A_x|^2 + \frac32(|A|^2-1)^2A +\alphapha^2(A_{xx} + (|A|^2-1)A)\begin{equation}ta ig) \\
&~ \quad \quad + O(\|w\|_{H^2_\sharp}^2)\\
&\quad = \mathcal H[A] +2\operatorname{Re}\int_{0}^{\frac{2\pi}{a}}\begin{equation*}r{w} \begin{equation}ta ig(A_{(4x)} + 3A_x^2 \begin{equation*}r A +(4 |A|^2-3) A_{xx}+ A^2 \begin{equation*}r A_{xx} + 2 |A_x|^2 A \\
&\quad \quad \qquad \qquad \qquad\qquad + \frac32 (|A|^2-1)^2 A + \alphapha^2(A_{xx} + (|A|^2-1) A)\begin{equation}ta ig) + O(\|w\|_{H^2_\sharp}^2),
\end{aligned}
\]
and therefore we get
\[
\mathcal H[A+w] =\mathcal H[A] + \mathcal H'[A][w] + O(\|w\|_{H^2_\sharp}^2).
\]
Then, performing some lengthy computations (see Appendix \operatorname{Re}f{A0}) one proves that $\mathcal H'[A][w] =0$ independently of $w$. This proves Theorem \operatorname{Re}f{MT}.
\appendix
\section{Proof of \eqref{Ec_A}}\lambdabel{A0}
Following \cite{AFM}, let us use the notation for the Akhmediev breather solution \eqref{Ak}:
\begin{equation}\lambdabel{AppA}
\begin{equation}gin{aligned}
&A=e^{it}\left(1 + \frac{M}{N}\right), \quad\thetaxt{with}\\
& M:=\alphapha^2 \cosh( \begin{equation}ta t) +i\begin{equation}ta \sinh(\begin{equation}ta t) ,\\
& N:= \sqrt{2a} \cos(\alphapha x) -\cosh(\begin{equation}ta t).
\end{aligned}
\end{equation}
Now, we rewrite the identity \eqref{Ec_A} in terms of $M,N$ in the following way
\begin{equation}\lambdabel{AppKAAA}
\eqref{Ec_A} = \frac{e^{it}}{N^5}\sum_{i=1}^{6}R_i,
\end{equation}
\noindent
with $R_i$ given explicitly by:
\begin{equation}\lambdabel{AppAs1}
\begin{equation}gin{aligned}
R_1:= & ~{} \frac{1}{2}N\begin{equation}ta ig(6iMN_tN_x^2-2iN(N_x(M_tN_x+M(iN_x+2N_{xt})) + N_t(2M_xN_x+MN_{xx}))\\
& ~ {} \qquad +N^3(M_{xx}-iM_{xxt})+N^2(-2M_x(N_x-iN_{xt}) \\
& ~ {} \qquad +i(2N_xM_{xt}+N_tM_{xx}+iMN_{xx}+M_tN_{xx}+MN_{xxt}))\begin{equation}ta ig),
\end{aligned}
\end{equation}
\begin{equation}\lambdabel{AppAs2}
\begin{equation}gin{aligned}
R_2:=&~{}\frac12\begin{equation}ta ig(2M(\begin{equation*}r{M} + N) + N(2\begin{equation*}r{M} + (\alphapha^2-1)N)\begin{equation}ta ig)\\
&\cdot\begin{equation}ta ig(2MN_x^2 + N^2M_{xx}-N(2M_xN_x+MN_{xx})\begin{equation}ta ig),
\end{aligned}
\end{equation}
\begin{equation}\lambdabel{AppAs3}
\begin{equation}gin{aligned}
&R_3:=~{}(M+N)(-NM_x+MN_x)(N\begin{equation*}r{M}_x - \begin{equation*}r{M}N_x),
\end{aligned}
\end{equation}
\begin{equation}\lambdabel{AppAs4}
\begin{equation}gin{aligned}
&R_4:=~{}\frac12(\begin{equation*}r{M}+N)(NM_x-MN_x)^2,
\end{aligned}
\end{equation}
\begin{equation}\lambdabel{AppAs5}
\begin{equation}gin{aligned}
&R_5:=~{}\frac{1}{2}N^2(M+N)\begin{equation}ta ig((\frac32 - \alphapha^2)N^2 + (-3 + \alphapha^2)(\begin{equation*}r{M}+N)(M+N)\begin{equation}ta ig),
\end{aligned}
\end{equation}
and
\begin{equation}\lambdabel{AppAs6}
\begin{equation}gin{aligned}
R_6:=&~{} \frac34(M+N)^3(\begin{equation*}r{M}+N)^2.
\end{aligned}
\end{equation}
Now substituting the explicit functions $M,N$ \eqref{AppA} in $R_i,~i=1,\dots,6$ and collecting terms, we get
\[
\begin{equation}gin{aligned}
&\sum_{i=1}^{6}R_i \\
&~{} = a_1\cosh(t\begin{equation}ta) + a_2\cosh^3(\begin{equation}ta t) + a_3\cosh^5(\begin{equation}ta t) + a_4\sinh(t\begin{equation}ta) + a_5\cosh^2(\begin{equation}ta t)\sinh(\begin{equation}ta t) \\
&\quad + a_6\cosh^4(\begin{equation}ta t)\sinh(\begin{equation}ta t) + a_7\cos( \alphapha x) + a_8\cosh^2(\beta t)\cos(\alpha t) + a_9\cosh^4(\beta t)\cos(\alpha x)\\
&\quad + a_{10} \cosh(\beta t)\sinh(\beta t)\cos(\alpha x) + a_{11}\cosh^3(\beta t)\sinh(\beta t)\cos(\alpha x) + a_{12}\cosh(\beta t)\cos^2(\alpha x) \\
& \quad + a_{13}\cosh^3(\beta t)\cos^2(\alpha x) + a_{14}\cosh^2(\beta t)\cos^3(\alpha x) + a_{15}\cosh(\beta t)\cos^4(\alpha x),
\end{aligned}
\]
with coefficients $a_i,~i=1,\dots,15$ given as follows
\[
\begin{equation}gin{aligned}
&a_1=\frac32 (-1 + \alpha^2) \beta^2 (-4 a \alpha^2 + \beta^2),\\
&a_2=(-(-1 + \alpha^2) \beta^2 (-5 \alpha^2 + 3 \alpha^4 + 3 \beta^2) + 2 a (-5 \alpha^6 + 3 \alpha^8 - \alpha^2 \beta^2 + 3 \alpha^4 \beta^2)) ,\\%R^3
&a_3=\frac12(-1 + \alpha^2) (-10 \alpha^6 + 3 \alpha^8 - 10 \alpha^2 \beta^2 + 3 \beta^4 + \alpha^4 (8 + 6 \beta^2)),\\% R^5
&a_4=\frac32 i \beta^3 (-4 a \alpha^2 + \beta^2),\\% S
&a_5=i \beta (\beta^2 (5 \alpha^2 - 3 \alpha^4 - 3 \beta^2) + a (-8 \alpha^4 + 6 \alpha^6 + 6 \alpha^2 \beta^2)),\\% R^2 S
&a_6=\frac12i \beta (-10 \alpha^6 + 3 \alpha^8 - 10 \alpha^2 \beta^2 + 3 \beta^4 + \alpha^4 (8 + 6 \beta^2)),\\% R^4 S
&a_7=\frac32 \sqrt{2a}\beta^2 (-4 a \alpha^2 + \beta^2),\\% X
&a_8=-\sqrt{2a}(\beta^2 (-7 \alpha^2 + 5 \alpha^4 + 3 \beta^2) + a (-6 \alpha^6 + 2 \alpha^2 \beta^2)),\\% R^2 X
&a_9=\frac12\sqrt{2a}(-24 \alpha^6 + 7 \alpha^8 - 16 \alpha^2 \beta^2 + 3 \beta^4 + 10 \alpha^4 (2 + \beta^2)),\\% R^4 X
&a_{10}=2 i \sqrt{2a}\alpha^2 \beta (4 a \alpha^2 - \beta^2),\\% R S X
&a_{11}=2 i \sqrt{2a}\alpha^2 \beta (-2 \alpha^2 + \alpha^4 + \beta^2),\\% R^3 S X
&a_{12}=4 a \alpha^2 (-\beta^2 + 2 a (\alpha^4 + \beta^2)),\\% R X^2
&a_{13}=6 a \alpha^2 (-2 \alpha^2 + \alpha^4 + \beta^2),\\% R^3 X^2
&a_{14}=2 \sqrt{2a}a\alpha^2 (-2 \alpha^2 + \alpha^4 + \beta^2),\\% R^2 X^3
&a_{15}=-4 a^2 \alpha^2 (-2 \alpha^2 + \alpha^4 + \beta^2).
\end{aligned}
\]
Finally, using that $\alpha =\sqrt{2 (1-2 a)}$ and $\beta =\sqrt{8a (1-2 a)}$, we have that all $a_i$ vanish, and we conclude.
\section{Sketch of Proof of Proposition \operatorname{Re}f{MT2}}\lambdabel{A}
First of all, we have from \eqref{mNLS} that
\[
G[w]= 2A\operatorname{Re}(A \begin{equation*}r w) + |A|^2 w + A |w|^2 + 2\operatorname{Re}(A \begin{equation*}r w)w +|w|^2w.
\]
By scaling and the subcritical character of \eqref{mNLS}, we can assume that the linear term in $G[w]$ above is small. We can also assume the initial time $t_0=0$. By the Duhamel's formula, we have
\[
w(t) = e^{it\partial_x^2} w_0 - \int_0^t e^{i(t-s)\partial_x^2}G[w](s)ds.
\]
Hence, applying the standard Sobolev estimates in $H^s_\sharp$, with $s>\frac12$, we readily obtain the contraction principle required. Note that no use of Strichartz estimates is needed. See \cite{Cazenave} or \cite{LP} for additional details on the fixed point argument. We skip the details.
\subsection*{Conflict of interest statement:} The authors certify that no conflict of interest, of any possible type, is affected to this article.
\providecommand{\bysame}{\leavevmode\hbox to3em{\hrulefill}\thinspace}
\providecommand{\MR}{\operatorname{Re}lax\ifhmode\unskip\space\fi MR }
\providecommand{\MRhref}[2]{
\href{http://www.ams.org/mathscinet-getitem?mr=#1}{#2}
}
\providecommand{\href}[2]{#2}
\begin{equation}gin{thebibliography}{99}
\bibitem{Akhmediev2} N. Akhmediev, A. Ankiewicz, and M. Taki, \emph{Waves that appear from nowhere and disappear without a trace}, Phys. Lett. A 373 (2009) 675--678.
\bibitem{Akhmediev} N. Akhmediev, and V. I. Korneev, \emph{Modulation instability and periodic solutions of the nonlinear Schr\"odinger equation}. Theor. Math. Phys. 69, 1089--1093 (1986).
\bibitem{Alejo} M. A. Alejo, \emph{Nonlinear stability of Gardner breathers}, J. Diff. Eqns. 264/2, (2018) pp. 1192--1230.
\bibitem{ACM} M. A. Alejo, F. Cortez, and C. Mu\~noz, \emph{Variational structure of breathers in strongly ill-posed equations}, under preparation 2018.
\bibitem{AFM} M. A. Alejo, L. Fanelli, and C. Mu\~noz, \emph{Stability and instability of breathers in the $U(1)$ Sasa-Satsuma and Nonlinear Schr\"odinger models}, preprint arXiv:1901.10381.
\bibitem{AM1} M. A. Alejo, and C. Mu\~noz, \emph{Nonlinear stability of mKdV breathers}, Comm. Math. Phys. (2013), Vol. 324, Issue 1, pp. 233--262.
\bibitem{AM2} M. A. Alejo, and C. Mu\~noz, \emph{Dynamics of complex-valued modified KdV solitons with applications to the stability of breathers}, Anal. and PDE. \thetaxtbf{8} (2015), no. 3, 629--674.
\bibitem{AMP1} M. A. Alejo, C. Mu\~noz, and J. M. Palacios, \emph{On the Variational Structure of Breather Solutions I: Sine-Gordon equation}, J. Math. Anal. Appl. Vol. 453 2, 1111--1138.
\bibitem{AMP2} M. A. Alejo, C. Mu\~noz, and J. M. Palacios, \emph{On the Variational Structure of Breather Solutions II: periodic mKdV equation}. EJDE Vol. 2017 (2017), No. 56, pp. 1--26.
\bibitem{Bona_Saut} J. L. Bona, and J.-C. Saut, \emph{Dispersive Blowup of solutions of generalized Korteweg-de Vries equations}, J. Diff. Eqns. 103 no. 1 (1993) 3--57.
\bibitem{Cazenave} T. Cazenave, \emph{Semilinear Schr\"odinger equations}, Courant Lecture Notes in Mathematics, 10. New York University, Courant Institute of Mathematical Sciences, New York; American Mathematical Society, Providence, RI, 2003. xiv+323 pp. ISBN: 0-8218-3399-5.
\bibitem{KH} C. Klein, and M. Haragus, \emph{Numerical study of the stability of the Peregrine breather}, preprint arXiv:1507.06766. Annals of Math. Sciences and Appl. Vol. 2, 2, 217--239, (2017).
\bibitem{Kuznetsov} E. Kuznetsov, \emph{Solitons in a parametrically unstable plasma}, Sov. Phys. Dokl. 22, 507--508 (1977).
\bibitem{LP} F. Linares, and G. Ponce, \emph{Introduction to nonlinear dispersive equations}, Second edition. Universitext. Springer, New York, 2015. xiv+301 pp.
\bibitem{Ma} Y. C. Ma, \emph{The perturbed plane-wave solutions of the cubic Schr\"odinger equation}, Stud. Appl. Math. 60, 43--58 (1979).
\bibitem{Munoz} C. Mu\~noz, \emph{Stability of integrable and nonintegrable structures}, Adv. Differential Equations 19 (2014), no. 9-10, 947--996.
\bibitem{Munoz1} C. Mu\~noz, \emph{Instability in nonlinear Schr\"odinger breathers}. Proyecciones (Antofagasta) (2017), vol. 36, n. 4, pp. 653--683.
\bibitem{MP} C. Mu\~noz, and J. M. Palacios, \emph{Nonlinear stability of 2-solitons of the Sine-Gordon equation in the energy space}, Ann. IHP Analyse Nonlin\'eaire Volume 36, Issue 4, July 2019, Pages 977--1034.
\bibitem{Peregrine} D. H. Peregrine, \emph{Water waves, nonlinear Schr\"odinger equations and their solutions}, J. Austral. Math. Soc. Ser. B 25, 16--43 (1983).
\bibitem{Zakharov0} V. E. Zakharov, and A. B. Shabat, \emph{Exact theory of two-dimensional self-focusing and one-dimensional self-modulation of waves in nonlinear media}, JETP, 34 (1): 62--69.
\end{thebibliography}
\end{document} |
\begin{document}
\title{Communicating without shared reference frames}
\author{Alexander R. H. Smith}
\email[]{alexander.r.smith@dartmouth.edu}
\affiliation{Department of Physics and Astronomy, Dartmouth College, Hanover, New Hampshire 03755, USA}
\date{\today}
\begin{abstract}
We generalize a quantum communication protocol introduced by Bartlett \emph{et al.} [New. J. Phys. 11, 063013 (2009)] in which two parties communicating do not share a classical reference frame, to the case where changes of their reference frames form a one-dimensional noncompact Lie group. Alice sends to Bob the state $\rho_R \otimes \rho_S$, where $\rho_S$ is the state of the system Alice wishes to communicate and $\rho_R$ is the state of an ancillary system serving as a token of her reference frame. Because Bob is ignorant of the relationship between his reference frame and Alice's, he will describe the state $\rho_R \otimes \rho_S$ as an average over all possible reference frames. Bob measures the reference token and applies a correction to the system Alice wished to communicate conditioned on the outcome of the measurement. The recovered state $\rho_S'$ is decohered with respect to $\rho_S$, the amount of decoherence depending on the properties of the reference token $\rho_R$. We present an example of this protocol when Alice and Bob do not share a reference frame associated with the one-dimensional translation group and use the fidelity between $\rho_S$ and $\rho_S'$ to quantify the success of the recovery operation.
\end{abstract}
\maketitle
\section{Introduction}
\label{Introduction}
Most quantum communication protocols assume that the parties communicating share a classical background reference frame. For example, suppose Alice wishes to communicate to Bob the state of a qubit using a teleportation protocol \cite{Nielsen:2010}. Alice begins by having the qubit she wishes to communicate to Bob interact with one half of an entangled pair of qubits shared by her and Bob. Alice then measures the two qubits in her possession and picks up the phone and informs Bob of the measurement result. Bob uses this information to apply an appropriate gate to his half of the entangled pair to recover the state Alice wished to send to him.
The success of this protocol depends on Alice's ability to classically communicate to Bob which gates he should apply to his half of the entangled state. This can only be done if Alice and Bob share a reference frame. As an example, suppose Alice informs Bob that he needs to apply the Pauli $z$ operator to the qubit in his position. If Bob is ignorant of the orientation of his lab with respect to Alice's, he does not know in which direction to orient the magnetic field in his Stern-Gerlach apparatus to implement the Pauli $z$ operator to recover the state sent by Alice. In this case the teleportation protocol is unable to be carried out perfectly \cite{Giulio-Chiribella:2012, Verdon:2018,*Verdon2:2018}.
This motivates the study of quantum communication without a shared reference frame \cite{Bartlett:2007}. One way Alice can communicate to Bob, despite not sharing a reference frame with him, is to encode information into degrees of freedom that are invariant under a change of Alice's reference frame. Without knowing his relation to Alice's reference frame, Bob is able to extract both classical and quantum information encoded in these degrees of freedom \cite{Rudolph:2003}. However, in practice such communication schemes may be challenging to implement since they require highly entangled states of many qubits.
Another possibility for Alice and Bob to communicate without a shared reference frame is for Alice to send Bob a quantum system $\rho_R$ to serve as a token of her reference frame, together with the state $\rho_S$ she wishes to communicate to him. Since Bob does not know the relation between his reference frame and Alice's, with respect to his reference frame he will see the joint state $\rho_R \otimes \rho_S$ averaged over all possible orientations of his lab with respect to Alice's; this averaging operation is referred to as the \mbox{$G$-twirl} and the averaged state denoted as $\mathcal{G} [\rho_R \otimes \rho_S]$. Bob can apply a recovery operation to this $G$-twirled state by measuring the reference token and applying an appropriate correction to the system Alice wishes to send to him, allowing him to recover a state $\rho_S'$ that is close to $\rho_S$. This recovery operation was first constructed by Bartlett \emph{et al.} \cite{Bartlett:2009}, and its success was found to depend on the size of the reference token, which is necessarily bounded if the reference token is described by a finite dimensional Hilbert space.
However, this communication protocol is based on Bob assigning the $G$-twirled state $\mathcal{G} [\rho_R \otimes \rho_S]$ to the system and reference token, and the $G$-twirl does not yield normalizable states when the group of reference frames being averaged over is noncompact \cite{Smith:2016}. This begs the question: Can an analogous communication protocol involving a reference token sent by Alice and a recovery operation implemented by Bob be constructed given that changes of their reference frames form a noncompact group? Furthermore, if the Hilbert space of the reference token is infinite dimensional, for example $\mathcal{H}_R \simeq L^2(\mathbb{R})$, what physical aspect of the reference token acts as its effective size?
The purpose of this article is to examine these questions. Considerations of noncompact groups within the theory of quantum reference frames is important if one hopes to apply the theory to the physically relevant Galilean and Poincar\'{e} groups, which are both noncompact.
We begin in Sec.~\ref{Communication with our a shared reference frame} by describing the encoding and recovery operations introduced by Bartlett \emph{et al.} \cite{Bartlett:2009}. In Sec.~\ref{A recovery operation for noncompact groups} we introduce a $G$-twirl over a compact subset of a noncompact group and a complementary recovery operation, such that in the limit when this $G$-twirl becomes an average over the entire noncompact group, the composition of the recovery operation with this $G$-twirl results in properly normalized states. We then apply this construction in Sec.~\ref{Application to reference frames with the translation group} to the case when Alice and Bob do not share a reference frame associated with the one-dimensional translation group, which is relevant for parties communicating without a shared positional reference frame. In this case, we identify the inverse of the width in position space of the reference token's state as the effective size of the reference token and demonstrate that in the limit when this width goes to zero Alice and Bob are able to communicate perfectly without a shared reference frame. We conclude in Sec.~\ref{SummaryCh7} with a summary of our results and an outlook to future questions.
\section{Communication without a shared classical reference frame}
\label{Communication with our a shared reference frame}
Consider two parties, Alice and Bob, each employing their own classical reference frame to describe the state of a single quantum system associated with the Hilbert space $\mathcal{H}_S$. Suppose that this system transforms via a unitary representation of the group $G$ when changing the reference frame used to describe the system; for the time being we will assume $G$ is a compact Lie group.
Let $g\in G$ label the group element which describes the transformation from Alice's to Bob's reference frame. If Alice prepares the system in the state $\rho_S \in \mathcal{S}(\mathcal{H}_S)$ with respect to her reference frame, where $\mathcal{S}(\mathcal{H}_S)$ is the space of states on $\mathcal{H}_S$, and $g$ is completely unknown to Bob, then the state with respect to his reference frame will be given by a uniform average over all possible $g \in G$; that is, by the $G$-twirl
\begin{align}
\mathcal{G}[\rho_S] \colonequals \int_G d g \, U_S(g) \, \rho_S \, U_S(g)^\dagger, \label{firstGtwirl}
\end{align}
where $d g$ denotes the Haar measure associated with $G$ and $U_S(g)\in \mathcal{U}(\mathcal{H}_S)$ is the unitary representation of the group element $g\in G$ on $\mathcal{H}_S$, with $\mathcal{U}(\mathcal{H}_S)$ denoting the space of unitary operators on $\mathcal{H}_S$. If instead Bob has some partial information about the relation between his reference frame and Alice's, the uniform average over all possible $g\in G$ in Eq.~\eqref{firstGtwirl} would be replaced with a weighted average encoding Bob's partial information~\cite{Miatto:2012, Ahmadi:2015}.
In general, the $G$-twirl results in decoherence, not from the system interacting with an environment and information being lost to the environment, but from Bob's lack of knowledge about the relationship between his reference frame and Alice's. To combat this decoherence, Alice may prepare another quantum system, described by the Hilbert space $\mathcal{H}_R$, to serve as a token of her reference frame (a good representative of her reference frame). Suppose Alice prepares the token in the state $\ket{e} \in \mathcal{H}_R$, then the reference token and system relative to Bob's frame will be given by the encoding operation
\begin{align}
\mathcal{E}[\rho_S] &\colonequals \mathcal{G} \big[ \ket{e} \! \bra{e} \otimes \rho_S \big] \nonumber \\
&= \int_G dg \, \mathcal{U}_R(g)\!\left[ \ket{e}\! \bra{e} \right] \otimes \mathcal{U}_S(g)\!\left[ \rho_S \right],
\label{encoding}
\end{align}
where $\mathcal{U}_i(g) \! \left[ \rho \right] \colonequals U_i(g) \, \rho \, U_i(g)^\dagger$ denotes the adjoint representation of the action of the group element $g\in G$ on $\rho \in \mathcal{S} \left(\mathcal{H}_i \right)$ for $i \in \{R, S\}$.
Bob's task is now to best recover the state of the system $\rho_S$ given the encoded state $\mathcal{E}[\rho_S]$. In other words, he must construct a recovery operation
\begin{align}
\mathcal{R}: \mathcal{S}(\mathcal{H}_R\otimes \mathcal{H}_S) \to \mathcal{S}(\mathcal{H}_S),
\end{align}
that when applied to $\mathcal{E}[\rho_S]$ results in a state $\rho_S' \in \mathcal{S}(\mathcal{H_S})$ that is as close as possible to $\rho_S$. A recovery operation
$\mathcal{R}$ was constructed by Bartlett \emph{et al.} \cite{Bartlett:2009} with such properties, and its action on the encoded state $\mathcal{E}[\rho_S]$ yields
\begin{align}
\rho_S' \colonequals \mathcal{R} \circ \mathcal{E}[\rho_S] = \int_G d g \, p\!\left(g\right) \mathcal{U}_S(g) \!\left[ \rho_S \right],
\label{composition}
\end{align}
where $p\!\left(g\right) \propto \abs{\braket{e| U_R(g) | e}}^2$ with $U_R(g)\in \mathcal{U}(\mathcal{H}_R)$ being the unitary representation of $g\in G$ on $\mathcal{H}_R$. We will explicitly construct a similar recovery operation in the next section for the case when $G$ is noncompact.
\begin{figure}
\caption{
The communication channel $\mathcal{R}
\label{CommunicationChannel}
\end{figure}
\section{A recovery operation for noncompact groups}
\label{A recovery operation for noncompact groups}
The action of the $G$-twirl over a noncompact group on a state results is a non-normalizable density matrix \cite{Smith:2016}. For example, consider the $G$-twirl over the non-compact group of translations in one dimension $T_1$ of the state $\rho \in \mathcal{S} \left( L^2(\mathbb{R})\right)$. The unitary representation of $g\in T_1$ is $U(g) = e^{-iPg}$, where $P$ is the momentum operator on $L^2(\mathbb{R})$, and the $G$-twirl over $T_1$ is
\begin{align}
\mathcal{G}_{T_1}\!\left[\rho \right] &= \int d{g} \, e^{-i g {P}} \left(\int dp dp' \, \rho\! \left(p, p' \right) \ket{p}\!\bra{p'} \right) e^{ig {P}} \nonumber \\
& = 2\pi \int d{p} \, \rho \!\left(p,p \right) \ket{p}\!\bra{p}, \label{TranslationTwirl}
\end{align}
where $\ket{p}$ denote the eigenkets of the momentum operator $P$, $\rho\! \left(p, p' \right) \colonequals \braket{p' | \rho | p}$, and $dg$ is the Haar measure\footnote{Even though $G$ is a noncompact Lie group, it is still locally compact, and thus possesses a nontrivial left invariant Haar measure that is unique up to a positive constant~\cite{Nachbin:1965}. In the case of the translations group considered here, $dg$ corresponds to the Lebesgue measure on the real line.} associated with $T_1$; in going from the first to the second equality we have used the definition of the Dirac delta function $2 \pi \delta(p-p') \colonequals \int d g \, e^{ig(p-p')}$. From Eq.~\eqref{TranslationTwirl} it is clear that $\mathcal{G}_{T_1}\!\left[\rho \right] \notin \mathcal{S}\left( \mathcal{H} \right)$, which can be verified by computing the norm of $\mathcal{G}_{T_1}\!\left[\rho \right]$ which is infinite.
Given that the codomain of the $G$-twirl over a noncompact group does not necessarily correspond to the state space $\mathcal{S}(\mathcal{H})$, it is not clear whether the encoding operation $\mathcal{E}$ or the recovery operation $\mathcal{R}$ discussed above are applicable to reference frames associated with noncompact groups. We now demonstrate that despite this fact, the composition of an encoding operation associated with a noncompact group with a suitably defined recovery operation results in a properly normalized state.
The approach we will take is to define a compact $G$-twirl over a compact subset of the noncompact group $G$ associated with the reference frame, which corresponds to Bob having partial information that the relation between his reference frame and Alice's is described by $g \in [-\tau, \tau] \subset G$. This compact $G$-twirl will be used in an encoding operation analogous to Eq.~\eqref{encoding}. We will then construct a complementary recovery operation, compose it with this encoding operation (similar to Eq.~\eqref{composition}), and finally take a limit in which the compact $G$-twirl corresponds to a uniform average over the entire noncompact group $G$. We will show that in this limit the recovered state is properly normalized and contained in $\mathcal{S} ( \mathcal{H}_S )$.
\subsubsection{The encoding map}
Consider all possible transformations of Alice's and Bob's classical reference frames to form a strongly continuous one-parameter noncompact Lie group $G$. Suppose that the unitary representation of a group element $g\in G$ on the Hilbert space $\mathcal{H}_R$ describing the reference token is $U_R(g)\in \mathcal{U}(\mathcal{H}_R)$. By Stone's theorem~\cite{Stone:1930}, $U_R(g) = e^{igA_R}$ is generated by a self-adjoint operator ${A}_R$, the spectrum of which we denote by $\sigma(A_R)$ and assume to be continuous\footnote{This is true of the group generated by either the position or momentum operator on $L^2(\mathbb{R})$. We note that the following construction does not rely on $\sigma(A_R)$ being continuous. }. For each element of the spectrum $f(a_R) \in \sigma(A_R)$ there corresponds an eigenket $\ket{a_R}$ such that
\begin{align}
{A}_R \ket{a_R} =f(a_R) \ket{a_R},
\label{eigenEquation}
\end{align}
with eigenvalue $f(a_R) \in \mathbb{R}$. Since $\sigma(A_R)$ is continuous and $A_R$ is self-adjoint, these eigenkets are normalized with the Dirac delta function
\begin{align}
\braket{a_R|a_R'} = \delta\big(a_R-a_R'\big).
\end{align}
From the above normalization condition we see that $\ket{a_R} \not\in \mathcal{H}_R$, as these eigenkets are not square integrable and therefore do not represent physical states\footnote{More precisely~\cite{Ballentine:1998}, when dealing with operators with continuous spectrum the theory is defined on a rigged Hilbert space defined by the triplet $\Phi \subset \mathcal{H}_R \subset \Phi'$, where $\Phi$ is a proper subset dense in $\mathcal{H}_R$ and $\Phi'$ is the dual of $\Phi$, defined through the inner product on $\mathcal{H}_R$. In our case, $\Phi$ is the Schwarz space of smooth rapidly decreasing functions on $\mathbb{R}$ and $\Phi'$ is the space of tempered distributions on $\mathbb{R}$. The eigenkets $\ket{a_R}$ are in $\Phi'$.}.
Our first step is to construct a well defined encoding operation analogous to Eq.~\eqref{encoding}. To do so, we suppose the state of Alice's reference token $\ket{e} \in \mathcal{H}_R$, expressed in the basis furnished by the eigenkets of ${A}_R$, is
\begin{align}
\ket{e} \colonequals \int d a_R \, \psi_R(a_R) \ket{a_R},\label{ReferenceTokenState}
\end{align}
where $\psi_R(a_R) \colonequals \braket{a_R | e}$. Next, let us introduce the set of states
\begin{align}
\big\{ \ket{e(g)} \colonequals U_R(g) \ket{e} \, \big| \, \forall g \in G \big\},
\label{SetOfReferenceTokens}
\end{align}
where each $\ket{e(g)}$ corresponds to a different orientation of Alice's reference frame. The state of the reference token $\ket{e}$ should be chosen such that each $\ket{e(g)}$ defined in Eq.~\eqref{SetOfReferenceTokens} is distinct, that is, the state of the reference token should not be invariant with respect to $G$. Furthermore, for the states $\ket{e(g)}$ to imitate a classical reference frame, they must be orthogonal
so as they are perfectly distinguishable.
Now suppose Alice prepares her reference token in the state $\rho_R \in \mathcal{S}(\mathcal{H}_R)$
and wishes to send Bob the state $\rho_S \in \mathcal{S}(\mathcal{H}_S)$ of a system associated with the Hilbert space $\mathcal{H}_S$. If Bob knows the relation between his reference frame and Alice's is given by a group element $g \in [-\tau,\tau] \subset G$, but within this interval he is completely ignorant of which group element corresponds to this relation, he will describe the joint state of the reference token and system by the output of the encoding operation
\begin{align}
\mathcal{E}_\tau : \ \mathcal{S}(\mathcal{H}_S) &\to \mathcal{S}(\mathcal{H}_R \otimes \mathcal{H_S}) \nonumber \\
\rho_S &\mapsto \mathcal{E}_\tau[\rho_S] \colonequals \mathcal{G}_\tau \big[ \rho_R \otimes \rho_S \big],
\label{tauEncoding}
\end{align}
where the map $\mathcal{G}_\tau$ is a uniform average of $\rho_S$ over the compact interval $[-\tau, \tau] \subset G$,
\begin{align}
\mathcal{G}_\tau \left[ \rho_R \otimes \rho_S \right] \colonequals \frac{1}{2\tau} \int^\tau_{-\tau} d g \ \mathcal{U}_R(g) \! \left[ \rho_R \right] \otimes \mathcal{U}_S(g) \! \left[ \rho_S \right],
\end{align}
where $dg$ is the Haar measure associated with $G$.
\subsubsection{The recovery operation}
As demonstrated by Bartlett \emph{et al.} \cite{Bartlett:2009}, Bob may perform a recovery operation $\mathcal{R}$ by first making a measurement of the reference token, followed by a reorientation of the system conditioned on the outcome of the measurement, and then discarding both the reference token and measurement result. We follow this procedure in constructing the recovery operation to be applied to the encoded state $\mathcal{E}_\tau(\rho_S)$.
Bob will make a measurement $R$ of the reference token described by the POVM elements
\begin{align}
R \colonequals \big\{ dg \, E(g) , \ \forall g \in [-\tau, \tau] \subset G\big\} \cup \big\{ {E}_\tau \big\},
\end{align}
where
\begin{align}
{E}_\tau \colonequals I_R -\int_{-\tau}^\tau d g \,E(g),
\label{Edef}
\end{align}
$dg \, E(g)$ is the POVM element associated with outcome $g \in G$, and $I_R$ is the identity operator on $\mathcal{H}_R$. We assume\footnote{To the best of the authors' knowledge the question of whether such a measurement exists for any $G$ is an open problem, as suggested by the remarks in Sec. III.4.4 of Ref. \cite{Busch:1997}. However, it is suggested in this reference that it seems plausible that such a measurement can be constructed, although there does not seem to be an easy general procedure for its construction. Nonetheless, such measurements exist for physically relevant groups like the translation group considered in the following section.} that these POVM elements satisfy the covariance relation
\begin{align}
\mathcal{U}_R(g')\!\left[E(g)\right] = E(g + g') \quad \forall g \in G. \label{covariance}
\end{align}
If the outcome of the measurement of ${R}$ is $g \in [-\tau,\tau] $, associated with the POVM element $ dg \, E(g)$, then Bob will reorient the system by implementing the unitary map $\mathcal{U}_{S}(g^{-1})$, which corresponds to the transformation of the reference token by an amount indicated by the measurement result (1st term in Eq.~\eqref{rhoStau}). If the outcome of the measurement is associated with the operator ${E}_\tau$, Bob will do nothing (2nd term in Eq.~\eqref{rhoStau}). After this measurement and reorientation, Bob will discard (trace out) the reference token and measurement result. This entire procedure will constitute the recovery operation $\mathcal{R}_\tau$.
The action of the recovery operation $\mathcal{R}_\tau$ on the encoded state $\mathcal{E}_\tau[\rho_S]$ is given by
\begin{widetext}
\begin{align}
\rho_S'(\tau) &= \mathcal{R}_\tau \circ \mathcal{E}_\tau[\rho_S] \nonumber \\
&= \frac{1}{2\tau} \int_{-\tau}^\tau d g' \int_{-\tau}^\tau d g \, \tr \big( E\!\left(g'\right) \mathcal{U}_R(g) \left[ \rho_R \right] \big) \, \mathcal{U}_S(g'^{-1}) \circ \mathcal{U}_S(g) \left[ \rho_S \right] +\frac{1}{2\tau} \int_{-\tau}^\tau d g \, \tr \big( {E}_\tau \, \mathcal{U}_R(g) \left[ \rho_R \right] \big) \, \mathcal{U}_S(g) \left[ \rho_S \right].
\label{rhoStau}
\end{align}
\end{widetext}
\subsubsection{Taking the limit $\tau \to \infty$}
The limit of Eq.~\eqref{rhoStau} in which $\tau$ becomes infinite corresponds to the scenario in which Bob knows nothing about the orientation of his reference frame with respect to Alice's\,---\,the $G$-twirl appearing in the encoding map in Eq.~\eqref{tauEncoding} is an average over the entire group $G$.
As is clear from Eq.~\eqref{Edef}, in the limit $\tau \to \infty$ the operator $E_\tau$ vanishes, and thus the second term in Eq.~\eqref{rhoStau} goes to zero. Taking this into account, the $\tau\to\infty$ limit of Eq.~\eqref{rhoStau} is
\begin{align}
\rho_S' &= \lim_{\tau\to \infty} \frac{1}{2\tau} \int_{-\tau}^\tau d g' \int_{-\tau}^\tau d g \, \tr \big( E(g' - g) \rho_R \big) \nonumber \\
&\qquad \qquad \qquad \qquad \qquad \qquad \times \mathcal{U}_S(g-g') \! \left[ \rho_S \right],
\label{rhoStau2}
\end{align}
where we have used the covariance property of the POVM elements expressed in Eq.~\eqref{covariance}.
Changing the integration variables to $u \colonequals g' - g$ and $v\colonequals g'$, the recovered state becomes
\begin{align}
\rho_S' &= \lim_{\tau\to \infty} \frac{1}{2\tau} \int_{-\tau}^\tau d v \int_{v-\tau}^{v+\tau} d u \, \tr \big( E(u) \rho_R \big) \, \mathcal{U}^\dagger_S(u) \! \left[ \rho_S \right].
\label{rhoStau3}
\end{align}
Denoting the antiderivative of the above integrand as
\begin{align}
F(x) \colonequals \int_0^x du \,\tr \big( E(u) \rho_R \big) \, \mathcal{U}^\dagger_S(u) \! \left[ \rho_S \right],
\end{align}
Eq.~\eqref{rhoStau3} takes the form
\begin{align}
\rho_S' &= \lim_{\tau\to \infty} \frac{1}{2\tau} \int_{-\tau}^\tau d v \,\big(F(v+\tau) -F(v-\tau) \big).
\end{align}
Making the substitution $h\colonequals\tau+v$ and $h\colonequals\tau-v$ in the first and second terms, respectively, the recovered state simplifies to
\begin{align}
\rho_S' &= \lim_{\tau\to \infty} \frac{1}{2\tau} \int_{0}^{2\tau} d h \, \big(F(h) -F(-h) \big).
\end{align}
Taking the limit by applying L'H\^{o}pital's rule\footnote{Suppose $f(x)$ and $g(x)$ are real differentiable function in \mbox{$(a,b) \subset \mathbb{R}$}, and $g'(x) \neq 0$ for all $x \in (a,b)$. Further, suppose that $f'(x)/g'(x) \to A$ as $x \to a$. Then L'H\^{o}pital's rule states that if $f(x) \to 0$ and $g(x) \to 0$ as $x \to a$ or if $g(x) \to \infty$ as $x \to a$, then $f(x) / g(x) \to A$ as $x \to a$ \cite{Rudin:1976}.} yields
\begin{align}
\rho_S' &= \frac{1}{2} \lim_{\tau \to \infty} \frac{\partial}{\partial \tau} \int_{0}^{2\tau} d h \, \big(F(h) -F(-h) \big) \nonumber \\
&= \lim_{\tau \to \infty} \big(F(\tau) -F(-\tau) \big) \nonumber \\
&= \int_G d g \, \tr \big( E(g) \rho_R \big) \, \mathcal{U}_S(g) \! \left[ \rho_S \right],
\end{align}
where the integration is carried out over the entire group~$G$.
This brings us to our main result: even though the action of the $G$-twirl over a noncompact group yields non-normalizable states, the composition of the encoding operation, which makes use of the $G$-twirl, with the recovery operation applied to $\rho_S$ results in a properly normalized state in $\mathcal{S}(\mathcal{H}_S)$. Explicitly
\begin{align}
\rho_S' &= \lim_{\tau\to\infty} \mathcal{R}_\tau \circ \mathcal{E}_\tau [\rho_S]
\nonumber \\
&= \int_G d g \, p\!\left(g\right) \mathcal{U}_S(g) \! \left[ \rho_S \right] \in \mathcal{S}(\mathcal{H}_S),
\label{LimitOfCompostion}
\end{align}
where $p\!\left(g\right)\colonequals \tr \big( E(g) \rho_R \big)$ is a normalized probability distribution on $G$.
Equation \eqref{LimitOfCompostion} is identical to the expression for the composition of the recovery and encoding map defined for compact groups given in Eq.~\eqref{composition}. From Eq. \eqref{LimitOfCompostion} we see that if $p\!\left(g\right)$ is highly peaked around the identity group element then the only unitary that will contribute significantly is the identity operator, and the state recovered by Bob will be close to the state sent by Alice, $\rho_S' \approx \rho_S$. Thus, the success of the recovery operation, and consequently the quality of the reference token, can be quantified in terms of the width of $p\!\left(g\right)$, analogous to the compact case \cite{Bartlett:2009}.
By expressing $\rho_S$ in the basis furnished by the eigenkets of the generator $A_S$ of the group $G$, we find the recovered state to be
\begin{align}
\rho_S' &= \int_G dg \, p\!\left(g\right) \int da_S da_S' \, \rho_S(a_S,a_S') \nonumber \\
&\qquad \qquad \qquad \qquad \qquad \qquad \times e^{i A_S g} \ket{a_S}\!\bra{a_S'} e^{-i A_S g} \nonumber \\
&= \int da_S da_S' \, \left[ \int_G dg \, p\!\left(g\right) e^{i g (a_S -a_S')} \right] \nonumber \\
&\qquad \qquad \qquad \qquad \qquad \qquad \times\rho_S(a_S,a_S') \ket{a_S}\!\bra{a_S'} \nonumber \\
&= \int da_S da_S' \, \tilde{p}(a_S - a_S') \rho_S(a_S,a_S') \ket{a_S}\!\bra{a_S'},
\end{align}
where in the last equality we have defined the Fourier transform of $p\!\left(g\right)$
\begin{align}
\tilde{p}(a_S - a_S') \colonequals \int_G dg \, p\!\left(g\right) e^{i g (a_S -a_S')}.
\end{align}
From the definition of the characteristic function \mbox{$\tilde{p}(a_S -a_S')$} above, we see that if $a_S = a_S'$, then \mbox{$\tilde{p}(a_S - a_S') =1$}, and consequently the diagonal elements of $\rho_S$ are unaffected by the action of the communication channel $ \lim_{\tau\to\infty} \mathcal{R}_\tau \circ \mathcal{E}_\tau$. On the other hand, since the characteristic function is bounded, $\abs{\tilde{p}(a_S - a_S') }\leq 1$, when $a_S \neq a_S'$ the off diagonal elements of $\rho_S'$ are equal to those of $\rho_S$ multiplied by a factor whose magnitude is less than or equal to unity. From this observation we see that the decoherence induced by $\lim_{\tau\to\infty} \mathcal{R}_\tau \circ \mathcal{E}_\tau$ occurs in the basis furnished by the eigenkets associated with the generator $A_S$ of the group $G$.
To quantify the success of the recovery operation\,---\,how close the recovered state $\rho_S'$ is to the initial state $\rho_S$\,---\,we will make use of the fidelity $F(\rho_S', \rho_S)$ between the recovered state $\rho_S'$ and the state $\rho_S=\ket{\psi_S}\!\bra{\psi_S} \in \mathcal{S}(\mathcal{H}_R)$ that Alice sent, which we will take to be pure \begin{align}
\ket{\psi_S} = \int da_S \, \psi_S(a_S) \ket{a_S},
\end{align}
where $\psi_S(a_S) \colonequals \braket{a_S | e}$. The fidelity $F(\rho_S',\rho_S)$ is then given by
\begin{align}
F(\rho_S',\rho_S) &\colonequals \braket{\psi_S | \rho_S' |\psi_S} \nonumber \\
&= \int_G dg \, p\!\left(g\right) \abs{\braket{\psi_S| U_S(g) | \psi_S } }^2 \nonumber \\
&= \int da_S da_S' \, \tilde{p}(a_S - a_S') \abs{\psi_S(a_S)}^2 \abs{\psi_S(a_S')}^2. \label{FidelityResult}
\end{align}
\section{Reference frames associated with the translation group}
\label{Application to reference frames with the translation group}
We now examine the recovered state $ \rho_S'= \lim_{\tau\to\infty} \mathcal{R}_\tau \circ \mathcal{E}_\tau [\rho_S] $ when the relevant reference frame is associated with the one-dimensional translation group.
Consider Alice and Bob being completely ignorant of the relation between the spatial origins of their labs, i.e., the relation between their positional reference frames. The group formed by all possible changes of Alice's reference frame is the one-dimensional translation group $T_1$. The unitary representation of the group element $g\in T_1$ on the system is $U_S(g) \in \mathcal{U}_S ( \mathcal{H}_S)$ and on the reference token is $U_R(g) \in \mathcal{U}_R ( \mathcal{H}_R)$. These representations are generated by their respective momentum operators ${A}_S = {P}_S$ and ${A}_R ={P}_R$.
Suppose as a token of Alice's reference frame she prepares the state $\ket{e_\sigma} \in \mathcal{H}_R\simeq L^2(\mathbb{R})$, which we take to be a Gaussian state
\begin{align}
\ket{e_\sigma} &= \frac{1}{\pi^{1/4} \sqrt{\sigma} } \int dx_R \, e^{- x_R^2/2\sigma^2} \ket{x_R},
\end{align}
where we have expressed $\ket{e_\sigma}$ in the basis furnished by the eigenkets $\ket{x_R}$ of the position operator $X_R$ on $\mathcal{H}_R$ and $\sigma >0$ is the spread of this state with respect to this basis. Note that the different orientations of this token state $\ket{e_\sigma(g)}\colonequals U(g) \ket{e_\sigma}$ are orthogonal in the limit that $\sigma$ vanishes, $\lim_{\sigma \to 0} \braket{e_\sigma(g)|e_\sigma(g')}= \delta_{g,g'}$, imitating a classical reference frame as discussed in the previous section. In this limit token states corresponding to different positional reference frames are completely distinguishable from each other.
We must now construct the recovery measurement $R$ for which the associated set of POVM elements satisfy the covariance relation in Eq.~\eqref{covariance} with respect to the translation group $T_1$. One such set is given by the PVM elements associated with the position operator $X_R$, namely, $E(x) \colonequals \ket{x_R}\! \bra{x_R}$ for all $x_R\in \mathbb{R} \simeq T_1$, where $\ket{x_R}$ denotes the eigenket of $X_R$ associated with the eigenvalue $x_R$. This follows from the fact that the position and momentum operators acting on $\mathcal{H}_R$ satisfy the canonical commutation relation $[X_R, P_R]=i$, which implies that $P_R$ generates translations of the operator $X_R$, or equivalently $U_R(g) \ket{x_R} = \ket{x_R + g}$. However, there is a more general set of POVM elements corresponding to unsharp measurements of the position operator constructed by the convolution of $E(x)$ with some confidence measure $\mu$
\begin{align}
E^{\mu}(x) \colonequals \int d\mu(q) \, E(x+q). \label{}
\end{align}
Direct substitution of $E^{\mu}(x)$ into Eq.~\eqref{covariance} shows that indeed these unsharp POVM elements are covariant with respect to the translation group. In what follows we consider the family of unsharp POVM elements $E^{\mu}_{\delta}(x)$ defined by choosing a Gaussian measure parametrized by $\delta >0$,
\begin{align}
E^{\mu}_{\delta}(x) \colonequals \int dq \, \frac{e^{-q^2/\delta^2}}{\sqrt{\pi} \delta} E(x+q). \label{POVMdelta}
\end{align}
In the limit $\delta \to 0$, we have $E^{\mu}_{\delta}(x) \to E(x)$.
Given that Alice prepared the reference token in the state $\rho_R = \ket{e_\sigma}\! \bra{e_\sigma} \in \mathcal{S}(\mathcal{H}_R)$, the probability distribution $p\!\left(g\right)$ appearing in Eq.~\eqref{LimitOfCompostion} is
\begin{align}
p\!\left(g\right)\colonequals \tr \big( E^{\mu}_{\delta}(g) \rho_R\big) = \frac{ e^{- \frac{g^2}{\sigma^2 + \delta^2} }}{\sqrt{\pi} \sqrt{\sigma^2 + \delta^2 }} .
\end{align}
We note that $p\!\left(g\right)$ is peaked around $g=0$ with a width of $\sqrt{\sigma^2 + \delta^2 }$. From Eq.~\eqref{LimitOfCompostion}, and the discussion that immediately follows, we see that the parameter $\sqrt{\sigma^2 + \delta^2 }$ determines the quality of the recovery operation: the smaller $\sigma$ and $\delta$ are, the more peaked $p\!\left(g\right)$ is around the identity element and the closer Bob's recovered state will be to the state sent by Alice.
\begin{figure}
\caption{
The fidelity $F(\rho_S',\rho_S)$ between the state sent by Alice $\rho_S$ and the state recovered by Bob $\rho_S'$ as a function of $\sqrt{\sigma^2 + \delta^2}
\label{FidelityPlot}
\end{figure}
As a concrete example, suppose Alice wishes to send Bob the state $\rho_S = \ket{\psi_S}\! \bra{\psi_S}$, where $\ket{\psi_S} \in \mathcal{H}_S \simeq L^2(\mathbb{R})$ is a Gaussian state
\begin{align}
\ket{\psi_S} &= \frac{1}{\pi^{1/4} \sqrt{\Delta}} \int dx_S \, e^{i \mu_p x} e^{- \left(x_S-\mu_x\right)^2/2\Delta^2} \ket{x_S}, \label{GaussSystemState}
\end{align}
and $\Delta$ is the width of the Gaussian state in the position basis $\ket{x_S}$ for $\mathcal{H}_S$, and $\mu_x$ and $\mu_p$ are its average position and momentum. Using Eq.~\eqref{FidelityResult}, the fidelity between $\rho_S$ and the state recovered by Bob $\rho_S'$ is
\begin{align}
F(\rho_S',\rho_S)
&= \frac{\Delta}{\sqrt{ \Delta^2 + \frac{1}{2} \left(\sigma^2 + \delta^2\right)}}. \label{FidelityTranslation}
\end{align}
As might be expected, in the limit where $\sigma$ and $\delta$ vanish the fidelity $F(\rho_S',\rho_S)$ is equal to unity and the recovered state is exactly equal to the state Alice wished to send to Bob, $\rho_S' = \rho_S$. This limit corresponds different orientations of the reference token described by Eq.~\eqref{SetOfReferenceTokens} being orthogonal, thus imitating a classical reference frame, and the measurement of the token's position being carried out perfectly.
From Eq.~\eqref{FidelityTranslation} we also observe that states less localized in the position basis (larger~$\Delta$) are better recovered by Bob, as illustrated in Fig.~\ref{FidelityPlot} in which the fidelity is plotted as a function of $\sqrt{\sigma^2 + \delta^2 }$ for different $\Delta$. Note that the expression for the fidelity is independent of $\mu_x$ and $\mu_p$, implying that for Gaussian states the success of the recovery operation is independent of where the state is localized in phase space.
As a second example, suppose Alice prepares her token in a superposition of two Gaussian wave packets
\begin{align}
\ket{e} = \frac{1}{\sqrt{N}} \big( \ket{\psi(\bar{x},\bar{p},\sigma)} + \ket{\psi(-\bar{x},-\bar{p},\sigma)} \big) \in \mathcal{H}_R,
\label{GaussSuper}
\end{align}
where $N$ is an appropriate normalization constant and $\ket{\psi(\bar{x},\bar{p},\sigma)}$ denotes the state of a Gaussian wave packet of width $\sigma$ in position space with average position and momentum $\bar{x}$ and $\bar{p}$, respectively. As they appear in Eq.~\eqref{GaussSuper}, $\bar{x}$ and $\bar{p}$ quantify the size of the superposition in position and momentum space, respectively. Further, suppose that Bob is able to make a perfect measurement of the position of the reference token as described by the POVM elements $\lim_{\delta \to 0} E^{\mu}_{\delta}(x)$. And again, suppose Alice wishes to communicate the Gaussian state given in Eq.~\eqref{GaussSystemState}.
\begin{figure}
\caption{
For a reference token prepared in a superposition of two Gaussian states described by Eq.~\eqref{GaussSuper}
\label{plot2}
\end{figure}
Given the above, the fidelity expressed in Eq.~\eqref{FidelityResult} yields
\begin{align}
F(\rho_S',\rho_S) &= \beta \, \frac{e^{ \beta^2 \bar{x}^2 /\sigma^2 }
+e^{ -\beta^2 \bar{p}^2 \sigma^2 }
}{e^{\bar{x}^2 / \sigma^2 } + e^{ -\bar{p}^2 \sigma^2 }},\label{supFidelity}
\end{align}
where $\beta \colonequals \Delta/ \sqrt{\Delta^2 + \sigma^2/2}$; note that $\beta \in (0,1)$ and is equal to Eq.~\eqref{FidelityTranslation} when $\delta \to 0$. Further, $\beta$ takes its maximum (minimum) value when $\Delta \gg \sigma$ ($\Delta \ll \sigma$). Observe that the fidelity in Eq.~\eqref{supFidelity} is independent of $\mu_x$ and $\mu_p$ appearing in Eq.~\eqref{GaussSystemState}, implying that the success of the recovery operation is independent of where $\ket{\psi_S}$ is localized in phase space.
The fidelity in Eq.~\eqref{supFidelity} is a monotonically decreasing function of $\bar{x}$, which implies that Alice should prepare the size of the superposition in position space to be as small as possible (i.e., small $\bar{x}$) in order to maximize the fidelity. A second observation can be made by inspection of Fig.~\ref{plot2}, which is a plot of both the maximum fidelity, $F_{\rm max} \colonequals \max \left[F(\rho_S',\rho_S)\ | \ \bar{x}, \bar{p}, \sigma >0 \right]$, and the value $\bar{p}_{\rm max}/\sigma$ which realizes this maximum as a function of the width $\Delta/\sigma$ of the state $\ket{\psi_S}$ Alice wishes to send to Bob; since the fidelity is monotonically decreasing in $\bar{x} \sigma$, this maximum occurs when $\bar{x} \sigma=0$. From Fig.~\ref{plot2} we see that depending on the value of $\Delta/\sigma$, Alice can adjust the state of the reference token by choosing the size of the superposition in momentum space $\bar{p}/\sigma$ so that the fidelity is maximized. That is, having the ability to create different sizes of superposition in momentum space can act as a resource to improve the communication channel specific to the state Alice wishes to send to Bob.
\section{Conclusions and Outlook}
\label{SummaryCh7}
We began by introducing a communication protocol between two parties, Alice and Bob, that do not share a reference frame associated with a compact group. Alice sends to Bob a token of her reference frame along with a system she wishes to communicate to him, and then Bob performs an appropriate recovery operation that enables him to recover a state close to the one Alice wished to communicate.
In Sec.~\ref{A recovery operation for noncompact groups} we showed that this communication protocol can be applied when Alice's and Bob's reference frames are associated with a noncompact group, even though averaging states over the entire group leads to non-normalizable states. Furthermore, we demonstrated that this communication channel induces decoherence in the basis furnished by the eigenkets of the generator of the group. In Sec.~\ref{Application to reference frames with the translation group} we applied this result to the study of communication between two parties who do not share a reference frame associated with the translation group. We introduced a sequence of Gaussian states $\ket{e_\sigma}$ of the reference token with spatial width $\sigma$, and saw that in the limit $\sigma \to 0$, $\ket{e_\sigma}$ imitates a classical reference frame. This suggests that the parameter $1/\sigma$ acts as the effective size of the reference token, since as $1/\sigma$ becomes large the two parties are able to communicate perfectly (assuming Bob is able to measure the reference token perfectly, $\delta \to 0$). We also demonstrated that for finite size reference tokens, i.e., when $1/\sigma$ is finite, states less localized in the position basis are better communicated to Bob and examined the case when the reference token is prepared in a superposition.
We note that the group of time translations generated by a Hamiltonian is a strongly continuous one-dimensional noncompact Lie group. Thus, provided a covariant measurement of the reference token corresponding to a time observable can be constructed \cite{Busch:1997}, the above communication scheme can be employed. This will be fruitful for communication between parties who do not share a temporal reference frame, that is, their clocks are not synchronized. Furthermore, it will be interesting to see how the above construction can be applied to noncompact Lie groups of higher dimension, such as the Euclidean group in two and three dimensions, the Galilean group, and ultimately the Poincar\'{e} group.
The intended application of the results in this article, as well as one of the primary motivation for this investigation, is to study the act of changing quantum reference frames\footnote{See Refs. \cite{Giacomini:2018, Vanrietvelde:2018, Hoehn:2018} for a different approach.}. Palmer \emph{et al.} \cite{Palmer:2013} have constructed an operational protocol for changing quantum reference frames associated with compact groups. They used the state $\mathcal{G}[\rho_A \otimes \rho_S]$ as a relational description of the state $\rho_S$ with respect to a quantum reference frame $\rho_A$, and then considered the operation of changing the quantum reference frame from the state $\rho_A$ to $\rho_B$. They found that this operation could not be done perfectly and that the best one could do is
\begin{align}
\mathcal{G}[\rho_A \otimes \rho_S] \to \mathcal{G}[\rho_B \otimes \rho_S'],
\end{align}
where $\rho_S' = \mathcal{R} \circ \mathcal{E} [\rho_S]$. In other words, one is not able to change quantum reference frames without affecting the state of the system described with respect to the reference frame\,---\,$\rho_S$ changes to $\rho_S'$ when the reference frame is changed. This results in a fundamental decoherence mechanism associated with the act of changing quantum reference frames.
This is in stark contrast to the classical case, in which the act of changing reference frames does not affect the system being described with respect to the reference frames. This decoherence is described by the composition of the encoding and recovery operations $\mathcal{R} \circ \mathcal{E}$ discussed in this article. Having generalized the operation $\mathcal{R} \circ \mathcal{E}$ to reference frames associated with noncompact groups, we hope to study the effect of changing quantum reference frames associated with the Galilean and Poincar\'{e} groups. Understanding the process of changing quantum reference frames is an essential step in the construction of a relational quantum theory, in which all objects, including reference frames, are treated quantum mechanically.
\begin{acknowledgments}
I wish to thank Marco Piani, Robert B. Mann, and Urgje~\cite{219979} for useful discussions and Mehdi Ahmadi and Lorenza Viola for a careful reading of this manuscript. This work was supported by the Natural Sciences and Engineering Research Council of Canada and the Dartmouth College Society of Fellows.
\end{acknowledgments}
\end{document} |
\begin{document}
\title{Local probe for connectivity and coupling strength in quantum complex networks}
\author{Johannes Nokkala}
\email{jsinok@utu.fi}
\affiliation{Turku Centre for Quantum Physics, Department of Physics and Astronomy,
University of Turku, FI-20014, Turun Yliopisto, Finland}
\author{Sabrina Maniscalco}
\affiliation{Turku Centre for Quantum Physics, Department of Physics and Astronomy,
University of Turku, FI-20014, Turun Yliopisto, Finland}
\affiliation{Centre for Quantum Engineering, Department of Applied Physics, School of Science, Aalto University, P.O. Box 11000, FIN-00076 Aalto, Finland}\author{Jyrki Piilo}
\affiliation{Turku Centre for Quantum Physics, Department of Physics and Astronomy,
University of Turku, FI-20014, Turun Yliopisto, Finland}
\date{\today}
\begin{abstract}
We develop a local probe to estimate the connectivity of complex quantum networks.
Our results show how global properties of different classes of complex networks can be estimated -- in quantitative manner with high accuracy -- by coupling a probe to a single node of the network. Here, our interest is focused on probing the connectivity, i.e. the degree sequence, and the value of the coupling constant within the complex network. The scheme combines results on classical graph theory with the ability to develop quantum probes for networks of quantum harmonic oscillators. Whilst our results are proof-of-principle type, within the emerging field of quantum complex networks they may have potential applications for example to the efficient transfer of quantum information or energy or possibly to shed light on the connection between network structure and dynamics.
\end{abstract}
\maketitle
\section{Introduction}
While the study of classical complex networks has enjoyed considerable interest throughout the last 20 years \cite{BarabasiRMP,Newman,Barabasi17}, the study of interacting quantum systems as quantum complex networks has only recently started to emerge \cite{Bianconi15,Biamonte17}. The topics range from state \cite{quanttransport} and energy transfer \cite{quanttransportenergy} as well as random quantum walks \cite{Faccin13} on such networks to modeling structured finite environments \cite{complexnetworkenvironments} and investigating the possible quantum effects in photosynthesis \cite{quantphoto}. Quantum networks are also important in development of more complicated quantum communication schemes \cite{qcn1,qcn2}. Experimental platforms that could be used to implement the quantum complex networks in the near future include arrays of micromechanical resonators cooled near to their ground state \cite{resonatorarrays}, cold atoms in lattices \cite{coldatoms} and cluster states or networks of bosonic modes \cite{paris1,paris2,paris3}.
Broadly speaking, networks are any systems that can be thought of as being composed of many interacting or otherwise related subsystems or entities. This includes an immense variety of large complex systems such as acquaintance networks \cite{socialnetworks}, the global shipping network \cite{gsn} and food webs in an ecosystem \cite{foodweb1,foodweb2}, but also microscopic ones like metabolic processes in a cell \cite{metabolic1,metabolic2} and light-harvesting complexes \cite{FMOcomplex}. The ability to capture the essential features of so many different systems of interest makes network theory a powerful tool. Much of its power stems from reducing a complicated system into an abstract graph composed of nodes connected by links. This can then be studied independently of what the physical network is and revealing, e.g., important information on mechanisms influencing the construction and evolution of these complex systems. This is expected to hold true even if the constituents of the complex network are quantum physical objects.
An important problem in network theory is the extraction of information about the network when only a small subset of its constituents can be accessed. This has also been considered in the quantum case, and it has been shown that, provided one has suitable prior knowledge of the network, it is possible to determine several of its properties indirectly using a probe system, such as the network state \cite{probeState}, temperature \cite{qnt1,qnt2}, and coupling strengths between nodes \cite{Daniel2009,qubitchains}. In particular, in the case of full access, the structure of the network can in principle be determined exactly \cite{complexnetworkenvironments,Daniel2011}. The developed theoretical tools are crucial on the one hand for understanding how the structure of a nontrivial quantum environment is encoded in the dynamics of an open quantum system, and on the other hand for identifying and measuring the key properties of different quantum networks.
In this work, we consider the estimation of connectivity given by the number of links, or degree, of each node in the case of a simple and connected abstract graph. This choice is motivated by the fact that the degree sequence and corresponding distribution is one of the most important and commonly used concepts in characterising complex networks.
By simple, we mean that between any two nodes there is at most one link and no node has a link with itself, and by connected that any node can be reached from any other by following the links. We also assume that the links are undirected, meaning that the interactions or relations modeled by the links are taken to be symmetric.
Our results are general in the sense that the only assumption one must make about a physical network is that we know the number of nodes within the network and it is possible to perform measurements with results that are in a known relationship with the eigenvalues of the Laplace matrix of the corresponding graph. As an example of this type of system, we use a network of identical quantum harmonic oscillators interacting with spring-like couplings of constant magnitude \cite{complexnetworkenvironments,PalmaScirep}.
Earlier work for quantum networks has been done in the case of networks of spins, based on continuous-measurement-based approach of small networks up to $5$ nodes with uniform or approximately uniform couplings \cite{Kato2014}, as well as for quantum oscillator networks where the mutual information between a node and the rest of the network was shown to be characteristic of the topology when the network is at or near its ground state \cite{infosharingnetworks}. In contrast, our approach can in principle be applied to any kind of classical or quantum networks as long as the Laplace eigenvalues can be extracted. In practice, the amount of available computational power will limit the size of the networks.
Our main result is that it is indeed possible to obtain accurate estimate for the degree sequence of the network by using only a single probe that is coupled to one of the nodes of the complex network. This result is based on
exploiting known mathematical relations between the Laplace eigenvalues and the connectivity, and using the possibilities that quantum probing provides. The numerical evidence shows that the scheme works very well for different classes of network structures and is robust to small errors in the probed quantities. We also consider the case where the coupling strength in an oscillator network is uniform but a priori unknown. It turns out that for some classes of networks the coupling strength can always be correctly deduced, and numerical evidence suggests that the estimation succeeds with high probability in the general case.
For the sake of simplicity, we show first - in terms of classical graph theory - how the degree sequence of complex networks can be estimated once the eigenvalues of the Laplace matrix are known. After this, we turn our attention to quantum networks and develop a scheme to probe locally these eigenvalues and the corresponding eigenfrequencies within the network of quantum harmonic oscillators.
\section{Connectivity estimation}
Once the nodes of a simple and connected graph have been labeled, its structure may be encoded into a matrix in many ways. In particular, the Laplace matrix $\textbf{L}$ of the graph has elements
\begin{equation}
\label{eq.1}
L_{ij}=\delta_{ij}d_i - (1 - \delta_{ij})l_{ij},
\end{equation}
\noindent
where $d_i$ is the degree of node $i$ and $l_{ij}=1$ if there is a link between nodes $i$ and $j$ and $0$ otherwise; notice that $l_{ij}=l_{ji}$. Given the eigenvalues $\lambda_i$ of the Laplace matrix, the objective is to estimate the degrees $d_i$. This can be done by combining several results from spectral graph theory, which studies the relationship between graphs and the eigenvalues of their matrices.
In addition to bounds on minimum and maximum degree by eigenvalues $\lambda_i$, the following relations must be fulfilled \cite{spectraofgraphs,grone}
\begin{align}
\text{Tr}\textbf{L}=\sum\nolimits_i^N d_i&=\sum\nolimits_i^N \lambda_i, \\
\text{Tr}\textbf{L}^2-\text{Tr}\textbf{L}= \sum\nolimits_i^N d_i^2&=\sum\nolimits_i^N (\lambda_i^2-\lambda_i), \\
1+\sum\nolimits_i^{m<N} d_i &\leqslant \sum\nolimits_i^{m<N} \lambda_i.
\end{align}
The above restrictions are illustrated using a small example in Fig. ~\ref{fig0}. We use a method to construct sequences $\mathbf{d}'$ of $N$ positive integers that satisfies simultaneously the degree bounds and restrictions $(2)$, $(3)$ and $(4)$, and call them solutions.
\begin{figure}
\caption{\label{fig0}
\label{fig0}
\end{figure}
There are special cases where there is only one solution, and consequently the degree sequence $\mathbf{d}$ is unambigiously determined by the eigenvalues. It is straightforward to see that these include a simple chain, completely connected network and all regular graphs, i.e. graphs for which all degrees coincide. For the first two this follows from the fact that the squared sum in Eq.~$(3)$ attains its minimimum or maximum value for a given number of nodes, while a direct application of Cauchy-Schwarz inequality shows that only regular graphs have equality in $(\sum_i^N d_i)^2 \leqslant N \sum_i^N d_i^2$. There is also an important class of graphs called threshold graphs \cite{trg} that are uniquely determined by their degree sequence and their degree sequence is in turn determined by the Laplace eigenvalues, however the eigenvalues will typically be degenerate.
More generally, Eqs.~$(2)$ and $(3)$ for a given $N$ fix the mean and variance of the bounded solutions and Eq.~$(4)$ further refines them by ruling out cases where deviations from $\mathbf{d}$ are bunched together. For any solution, the deviations must cancel out because the correct sum of degrees is enforced; similarly also deviations between any element-wise squared solution and the element-wise squared $\mathbf{d}$ must cancel out. Since the possible values of degrees are integers, the number of solutions is finite. Given enough computational power and time, all of them can be found; this is feasible on a normal tabletop computer when the number of solutions is in the lower end of hundreds of thousands, limiting $N$ to tens of nodes. We stress that different classes of networks will have different scaling between $N$ and the number of solutions.
To find the solutions, we consider Eq. $(3)$ as an integer partitioning problem, where the sum of squared elements must be partitioned into $N$ integers. The allowed integers are square numbers with bounds determined from the eigenvalues. Taking the element-wise squareroots of each found partition and filtering the results according to equations $(2)$ and $(4)$ will provide the solutions. Alternatively, one could start from Eq. $(2)$ and then filter but we found that this is more wasteful and consequently uses more memory and computation time.
We tested our estimation scheme on Erd\H{o}s-R\'{e}nyi random graphs \cite{ER}, Barab\'{a}si-Albert graphs \cite{BA}, Watts-Strogatz graphs \cite{WS} and tree graphs. An Erd\H{o}s-R\'{e}nyi random graph refers to either of two closely related models of generating random graphs. In both, the number of nodes is fixed. Using the so-called $G(N,L)$ model, one chooses uniformly among all possible graphs with $N$ nodes and exactly $L$ links, while using the $G(N,p)$ model, one starts from a completely connected graph and includes each link in the final graph with probability $p$. Here we use the former model unless otherwise stated. A Barab\'{a}si-Albert random graph $G(N,K)$ is constructed starting from a cyclic graph of three nodes and iteratively adding a new node with $K$ links until the graph has $N$ nodes, connecting the new links randomly but favoring nodes with higher degree. It can be shown that graphs constructed like this have a degree distribution that follows a powerlaw. Watts-Strogatz graphs $G(N,k,p)$ are constructed by starting from a circular graph where each node is connected to up to $k$-th nearest neighbors. Then each link is rewired with probability $p$, creating a graph with small world properties. Finally, a tree of $N$ nodes is any connected graph with exactly $N-1$ links; this gives them the property that they have no cycles, i.e. closed walks without repetitions of links or nodes other than the starting and ending node.
As a figure of merit of a solution $\mathbf{d}'$ we chose the $\ell_1$ distance from $\mathbf{d}$ normalized by the total degree of the graph, i.e.
\begin{equation}
f(\mathbf{d}')=\Vert \mathbf{d}-\mathbf{d}' \Vert_1/\Vert \mathbf{d}\Vert_1= \sum\nolimits_i^N |d_i-d'_i|/|d_i|.
\end{equation}
This choice is motivated by the fact that this quantity can be interpreted as the average deviation from the real degree per link. We found that, for all considered cases, $f(\mathbf{d}')<1/2$. By choosing as final estimate the solution that has the smallest $\ell_1$ distance from the mean of solutions it is possible to single out a solution particularly close to $\mathbf{d}$, since the deviations, that must cancel out for any particular solution as explained previously, will then be partly averaged out. By mean of solutions, we indicate the sequence where each element is the corresponding mean degree calculated from all solutions. On the other hand, the set of all solutions always contains $\mathbf{d}$ while the estimate is typically not a perfect match.
The results, averaged over 1000 realizations with network size fixed to $N=30$, are shown in Fig.~\ref{fig1}. Besides the parameter values considered here, we have also checked other values and found similar results. For Erd\H{o}s-R\'{e}nyi random graph, we used $L=87$. This would be the expected number of links for $G(N,p)$ of same size with $p=1/5$. For Barab\'{a}si-Albert graph, we used $K=2$. In the latter case the estimation performs worst, and in particular none of the estimates coincided with the real degree sequence. This is caused by the high variance of $\mathbf{d}$ for this class of random graphs: higher variance allows the solutions to deviate more from $\mathbf{d}$ and consequently the estimation is less accurate.
\begin{figure*}
\caption{\label{fig1}
\label{fig1}
\end{figure*}
Compared to the other two graphs which had typically thousands of solutions, Watts-Strogatz graphs and trees had much less solutions, with the former having tens and the latter only a handful with the used parameter values. Consequently a significant fraction of estimates were a perfect match with $\mathbf{d}$. The plots are not smooth, indicating that certain values are much less likely than others, a feature not present for the other two graphs. For the former, we used $k=2$ and $p=0.2$. Unlike for the other graphs, more than half of the solutions had the same distance from $\mathbf{d}$. We believe this to be because this class of random graphs had the smallest variance of $\mathbf{d}$ since they are generated from regular graphs. Trees had the biggest fraction of perfect matches out of all graphs, but this is mostly because the number of solutions was so small to begin with. This is essentially caused by any tree having the smallest possible number of links for a given number of nodes, greatly restricting also the solutions.
While $\mathbf{d}'$ close to mean solution are alike, the outliers are different from both them and $\mathbf{d}$. This is because there are many relatively smooth sequences that satisfy the constraints, but only a few jagged ones that pass. Indeed, the estimation works poorly on graphs with jagged degree sequences since the majority of solutions will be much smoother. We stress that choosing an outlier and realizing it as a network will in general not yield the same solutions since degree bounds and restrictions imposed by Eq.~$(4)$ can change even between different realizations of a fixed $\mathbf{d}$.
\section{Application to quantum networks}
To exploit the previous results for quantum probing and networks, we consider networks of uniformly coupled quantum harmonic oscillators \cite{complexnetworkenvironments}. We will use units as referred to an arbitrary (but fixed) frequency unit and give coupling strengths, times and temperatures in terms of this unit. We will also set $\hbar=1$ and $k_B=1$. The network is composed of $N$ unit mass quantum harmonic oscillators coupled by springs, each having the same bare frequency $\omega_0$. The couplings between network oscillators are assumed to be uniform with the strength given by $g$. We can express the network Hamiltonian in a compact way as
\begin{equation}
H_{E}=\textbf{p}^{T}\textbf{p}/2+\textbf{q}^{T}(\omega_0^2\textbf{I}+g\textbf{L})\textbf{q}/2,
\end{equation}
\noindent where $\textbf{p}=\left\lbrace p_{1}, p_{2}, ..., p_{N}\right\rbrace ^T $ and $\textbf{q}=\left\lbrace q_{1}, q_{2}, ..., q_{N}\right\rbrace ^T $ are the vectors of momentum and position operators, $\textbf{I}$ is the identity matrix and $\textbf{L}$ is the Laplace matrix of the underlying graph. We will assume that $g$ and $N$ are known, but make no assumptions on $\textbf{L}$. Since the row sums of any Laplace matrix are zero, the eigenvalues $\lambda_i$ are non-negative. This, together with a positive coupling constant $g$, ensures the positivity of Hamiltonian $H_E$.
Since the network Hamiltonian is quadratic in position and momentum operators for any configuration given by $\textbf{L}$, it can be diagonalized with an orthogonal transformation. This allows us to move into an equivalent picture of noninteracting eigenmodes of the network. In this picture, $H_E=\sum\nolimits_{i=1}^N (P_{i}^2+\Omega_{i}^2Q_{i}^2)/2$, where $P_{i}$ and $Q_{i}$ are the position and momentum operators of the network eigenmodes and $\Omega_{i}$ are their frequencies, related to the eigenvalues $\lambda_i$ of the Laplace matrix $\textbf{L}$ as
\begin{equation}
\label{eq.2}
\lambda_i=(\Omega_{i}^2-\omega_0^2)/g.
\end{equation}
This is the key equation which allows us to use the previously described estimation procedure for the degree sequence.
In other words, if we can probe the eigenfrequencies $\Omega_i$ of the network, this gives us direct information about the eigenvalues of the Laplace matrix and therefore a way to estimate the connectivity of the network.
It is also worth mentioning that, since $\omega_0$ coincides with the smallest eigenfrequency, it is not necessary to know it beforehand.
Assuming that the network is in a thermal state of known temperature $T$, the detection of eigenfrequencies can be done by measuring the mean excitations $\braket{n(t)}{}$ of a bosonic probe weakly coupled to a node in the network and doing a frequency sweep across the range that covers the spectrum \cite{complexnetworkenvironments}. The probe is assumed to be a quantum harmonic oscillator with the Hamiltonian $H_S=(p_{S}^2+\omega_{S}^2q_{S}^2)/2$, where $p_{S}$ and $q_{S}$ are its momentum and position operators and $\omega_{S}$ is its frequency, while the interaction Hamiltonian is of the form $H_I=-kq_Sq_j$, where $k$ is the strength of the coupling and $q_j$ is the position operator of the node interacting with the probe. By fixing the states of the probe and the network, the reduced dynamics of the probe can be determined exactly by diagonalizing the total Hamiltonian, solving the Heisenberg equations of motion for the decoupled oscillators, and returning to old operators. While here we fix the state of the probe and the network to be vacuum and thermal state of temperature $T$, respectively, the accuracy is largely insensitive to the state of the probe as long as there is an energy difference between the probe and the network \cite{complexnetworkenvironments}.
When coupled strongly to the network, the probe will exchange energy with all eigenmodes and the reduced dynamics depends on the structure of the network in a complex way. On the other hand, with a sufficiently weak coupling the interaction becomes limited to only the few closest modes in the vicinity of system frequency $\omega_{S}$, and this makes the reduced dynamics very sensitive to the resonance condition in the sense that when $\omega_{S}$ matches an eigenfrequency, a significantly larger amount of energy can flow between the network and the probe before finite size effects cause the flow to be reversed. An example is shown in Fig. \ref{fig2}, which demonstrates that even a small difference in frequencies can lead to a very different value of $\braket{n(t)}{}$, for sufficiently long interaction times and a weak coupling, provided that there is an energy difference between the probe and the network. While this behaviour is universal to finite networks, the number of nodes $N$ is assumed to be known in the probing protocol because otherwise one does not know when all eigenfrequencies have been found.
\begin{figure}
\caption{\label{fig2}
\label{fig2}
\end{figure}
The probe must interact with an eigenmode to detect its frequency. The spectrum should also be nondegenerate because any degenerate eigenfrequencies are interpreted as a single frequency. This is typically the case, and it can be seen by considering the oscillators in terms of the eigenmodes: any $q_i$ can be expressed as a weighted sum of eigenmode position operators where the weights are given by the elements of the $i$th eigenvector of the matrix $(\omega_0^2\textbf{I}+g\textbf{L})/2$. For a generic $\textbf{L}$, all eigenvalues are distinct and the eigenvectors will not have zero elements, which means that the probe will interact with and resolve all eigenmodes from any node.
In the non-ideal case, there might be some errors in the values of eigenfrequencies or the coupling strengths might be only approximately uniform. We checked the robustness against both for all four classes of networks. For all of them, $1$ \% unbiased error in either eigenfrequencies or coupling strengths will typically not cause any errors in the detected sum of degrees while perturbing the probed sum of squared degrees, degree bounds, and bounds on partial sums very little if at all. With larger errors, the worst case accuracy of results averaged over many realizations deteoriates slowly, but the differences between individual realizations grows. We also found that the number of solutions had a large impact on the robustness of the best case accuracy, as this was affected very quickly for trees and Watts-Strogatz networks while the other two classes of networks were much more resilient. Sometimes the affected bounds on partial sums did not provide any solutions at all for trees or Watts-Strogatz networks, in which case we considered the accuracy of solutions without this restriction.
In the case of nonuniform coupling strengths, the eigenvalues of a weighted Laplace matrix $\textbf{L}$ can be recovered from $\Omega_i^2-\omega_0^2$ . Now the off-diagonal elements of $\textbf{L}$ are the coupling strengths between the oscillators and the diagonal has the sums of coupling strengths to each oscillator. While other restrictions still apply as before, the eigenfrequencies only upper bound the sum of the squares of diagonal elements of $\textbf{L}$ and conversely, their variance can only be bounded from above, reducing the accuracy of the estimation considerably. The number of possible solutions can still be finite if the coupling strengths in the network are divisible by the same number, for instance if there is a weakest coupling and others are its integer multiples.
\section{Estimation of an unknown coupling constant}
If the coupling strengths are known to be uniform but the value of the coupling constant is not known, one can estimate it from the probed eigenfrequencies using the relation $g \lambda _i=\Omega_i^2-\omega_0^2$ obtained from Eq. $(7)$. The estimation procedure uses general properties of the eigenvalues $\lambda_i$ of an unweighted connected graph. We stress that the success or failure of the estimation depends only on the structure of the graph, rather than on a particular value of $g$. As will be seen below, for generic degree sequences it succeeds.
\begin{figure}
\caption{\label{fig3}
\label{fig3}
\end{figure}
Because the graph is connected and simple, we know that $2(N-1)\leq \sum_i^N d_i \leq N(N-1)$. Since $\sum_i^N d_i = \sum_i^N \lambda_i$, this leads to $\frac{\sum_i^N(\Omega_i^2-\omega_0^2)}{N(N-1)}\leq g \leq \frac{\sum_i^N(\Omega_i^2-\omega_0^2)}{2(N-1)}$. We can reduce this range to a finite set of values by demanding that both $\sum_i^N d_i$ and $\sum_i^N d_i^2$ are even integers, as they must be for a connected graph. This set can be further refined by using results related to regular graphs and the largest eigenvalue $\lambda_N$. As mentioned before, for any connected graph $N \sum_i^N d_i^2-(\sum_i^N d_i)^2 \geq 0$ with equality iff the graph is regular. This property can be violated for values of the coupling constant larger than $g$, which can be used to rule them out. On the other hand, values smaller than $g$ can violate the property $\lambda_N \leq N$ \cite{largesteig}. Typically several values pass these tests, however as we will argue below, they are not equally likely to be correct.
Clearly, if some $g'$ satisfies the condition that both $\sum_i^N d_i$ and $\sum_i^N d_i^2$ are even, then so does any $g'/x$ where $x=2,3,4,...$. This suggests that $g$ is more likely to be among the larger values satisfying the constraints. In fact, for trees and regular networks, the largest possible value coincides with $g$. In the former case this follows directly from the fact that the sum of degrees attains its minimum value, and hence any $g'>g$ will violate the assumption that the network is connected. In the latter case this can be seen by letting $g'=ag$ and noticing that then $N \sum_i^N d_i^2-(\sum_i^N d_i)^2=\frac{N^2\Delta(1-a)}{a^2}<0$ for all $a>1$, where $\Delta$ is the constant degree of the network.
More generally, for some $g'>g$ to lead to even sum of degrees and squared degrees, it has to be the case that $\sum_i^N (d_i^2+d_i)d_i^2/D'^2$ is even, where $D'<\sum_i^N d_i$ is the wrong sum of degrees corresponding to $g'$. While such a $g'$ might still be ruled out by the other constraints, this implies that without prior knowledge of the structure of the network, $g$ can be determined unambigiously only when no other value passes the tests. We studied how well the estimation works in the case of the Erd\H{o}s-R\'{e}nyi random network as a function of connection probability $p$, as shown in Fig \ref{fig3} -- unlike in the previous section, here we use the $G(N,p)$ model since for prime values of the total degree the estimation almost always succeeds, and consequently the $G(N,L)$ model leads to a discontinuous plot. The results confirm that the largest value coincides with $g$ with high probability, success rate improving for larger values of $p$. Also shown is the fraction of conclusive cases, i.e. when $g$ is the only possible value. The curve shows an interesting behaviour, with a sudden transient from most cases being inconclusive to most being conclusive, between $p=0.2$ and $p=0.4$. This is essentially because then $\lambda_N>N/2$, which will rule out any $g'\leq g/2$. This does not guarantee conclusiveness since some $g>g'>g/2$ might still pass, but this requires special values of $\sum_i^N d_i$ and $\sum_i^N d_i^2$.
\section{Discussion and conclusion}
Connectivity is an important structural property of complex networks. We considered simple connected graphs and showed how connectivity can be estimated from the eigenvalues of the Laplace matrix. Our estimation scheme is applicable to any network, quantum or classical, amenable to the extraction of Laplace eigenvalues from measurement results. While the accuracy is best for networks with a degree sequence having small variance, the estimation performs well also for, e.g., networks where the degrees follow a powerlaw. In practice, a network can be too large for completing the entire estimation procedure in a reasonable amount of time, however since the mean and variance of connectivity can be easily and exactly determined from the eigenvalues, the connectivity of these networks can still be classified accordingly.
We applied our results to networks of identical uniformly coupled quantum harmonic oscillators and showed how not only the connectivity but also the uniform coupling strength can be estimated with local probing of any of the oscillators in the network, making such quantum networks universally suited to the extraction of global properties from locally extractable information.
In this work, we have demonstrated how even in the quantum case, graph theory can be highly useful in eludicating the properties of coupled many-body systems. While here we used information extractable from a quantum network with minimal access, it would be interesting to study the case where a small subset of nodes could be accessed, or investigate how knowing also some of the eigenvectors of the graph could be exploited.
\begin{acknowledgments}
The authors acknowledge financial support from the Horizon 2020 EU collaborative projects QuProCS (Grant Agreenement No. 641277). J. N. acknowledges the Wihuri foundation for financing his graduate studies.
\end{acknowledgments}
\end{document} |
\begin{document}
\title{Correlation Clustering with Local Objectives}
\author{Sanchit Kalhan \and Konstantin Makarychev \and Timothy Zhou}
\date{Northwestern University}
\maketitle
\begin{abstract}
Correlation Clustering is a powerful graph partitioning model that aims to cluster items based on the notion of similarity between items. An instance of the Correlation Clustering problem consists of a graph $G$ (not necessarily complete) whose edges are labeled by a binary classifier as
``similar'' and ``dissimilar''. An objective which has received a lot of attention in literature is that of minimizing the number of disagreements: an edge is in disagreement if it is a ``similar'' edge and is present across clusters or if it is a ``dissimilar'' edge and is present within a cluster. Define the disagreements vector to be an $n$ dimensional vector indexed by the vertices, where the $v$-th index is the number of disagreements at vertex $v$.
Recently, Puleo and Milenkovic (ICML '16) initiated
the study of the Correlation Clustering framework in which the objectives were more general functions of the disagreements vector. In this paper, we study algorithms for minimizing $\ell_q$ norms $(q \geq 1)$ of the disagreements vector for both arbitrary and complete graphs. We present the first known algorithm for minimizing the $\ell_q$ norm of the disagreements vector on arbitrary graphs and also provide an improved algorithm for minimizing the $\ell_q$ norm $(q \geq 1)$ of the disagreements vector on complete graphs. We also study an alternate cluster-wise local objective introduced by Ahmadi, Khuller and Saha (IPCO '19), which aims to minimize the maximum number of disagreements associated with a cluster. We also present an improved ($2 + \varepsilon$) approximation algorithm for this objective. Finally, we compliment our algorithmic results for minimizing the $\ell_q$ norm of the disagreements vector with some hardness results.
\end{abstract}
\section{Introduction}
A basic task in machine learning is that of clustering items based on the similarity between them. This task can be elegantly captured by Correlation Clustering, a clustering framework first introduced by \citet{BBC04}. In this model, we are given access to items and the \textit{similarity/dissimilarity} between them in the form of a graph $G$ on $n$ vertices. The edges of $G$ represent whether the items are \textit{similar} or \textit{dissimilar} and are labelled as (``$+$'') and (``$-$'') respectively. The goal is to produce a clustering that agrees with the labeling of the edges as much as possible, i.e., to group positive edges in the same cluster and place negative edges across different clusters (a positive edge that is present across clusters or a negative edge that is present within the same cluster is said to be in disagreement). The Correlation Clustering problem can be viewed as an agnostic learning problem, where we are given noisy examples and the task is to fit a hypothesis as best as possible to these examples. Co-reference resolution (see e.g., \citet*{CR01, CR02}), spam detection (see e.g., \citet{RFV07,BGL14}) and image segmentation (see e.g., \citet*{Wirth17}) are some of the applications to which Correlation Clustering has been applied to in practice.
This task is made trivial if the labeling given is consistent (transitive): if $(u,v)$ and $(v,w)$ are similar, then $(u,w)$ is similar for
all vertices $u,v,w$ in $G$ (the connected components on similar edges would give an optimal clustering). Instead, it is assumed that the given labeling is inconsistent, i.e., it is possible that $(u,w)$ are dissimilar even though $(u,v)$ and $(v,w)$ are similar. For such a triplet $u,v,w$, every possible clustering incurs a disagreement on at least one edge and thus, no perfect clustering exists. The optimal clustering is the one which minimizes the disagreements. Moreover, as the number of clusters is not predefined, the optimal clustering can use anywhere from $1$ to $n$ clusters.
Minimizing the total weight of edges in disagreement is the objective that has received the most consideration in literature. Define the disagreements vector be an $n$ dimensional vector indexed by the vertices where the $v$-th coordinate equals the number of disagreements at $v$. Thus, minimizing the total number of disagreements is equivalent to minimizing the $\ell_1$ norm of the disagreements vector. \citet*{PM16} initiated the study of local objectives in the Correlation Clustering framework. They focus on complete graphs and study the minimization of $\ell_q$ norms $(q \geq 1)$ of the disagreements vector -- for which they provided a $48$ approximation algorithm. \citet*{CGS17} gave an improved $7$ approximation algorithm for minimizing $\ell_q$ disagreements on complete graphs. They also studied the problem of minimizing the $\ell_\infty$ norm of the disagreements vector
(also known as Min Max Correlation Clustering) for arbitrary graphs, for which they provided a $O(\sqrt{n})$ approximation.
For higher values of $q$ (particularly $q=\infty$), a clustering optimized for minimizing the $\ell_q$ norm prioritizes reducing the
disagreements at vertices that are worst off. Thus, such metrics are very unforgiving in most cases as it is possible that in
the optimal clustering there is only one vertex with high disagreements while every other vertex has low disagreements. Hence, one is forced to infer the most pessimistic picture about the overall clustering. The $\ell_2$ norm is a solution to this tension between the $\ell_1$ and $\ell_\infty$ objectives. The $\ell_2$ norm of the disagreements vector takes into account the disagreements at each vertex while also penalizing the vertices with high disagreements more heavily. Thus, a clustering optimized for the minimum $\ell_2$ norm gives a more balanced clustering as it takes into consideration both the global and local picture.
Recently, \cite*{AKS} introduced an alternative min max objective for correlation clustering (which we call AKS min max objective).
For a cluster $C \subseteq V$, let us refer to similar edges with exactly one endpoint in $C$ and dissimilar edges with both endpoints in $C$
as edges in disagreements with respect to $C$. We call the weight of all edges in disagreement with $C$ the cost of $C$. Then,
the AKS min max objective asks to find a clustering $C_1,\dots, C_T$ that minimizes the maximum cost $C_i$.
\cite{AKS} give an $O(\log n)$ approximation algorithm for this objective.
\textbf{Our contributions. } In this paper, we provide positive and negative results for Correlation Clustering with the $\ell_q$ objective. We first study the problem of minimizing disagreements on arbitrary graphs. We present the first approximation algorithm minimizing any $\ell_q$ norm $(q \geq 1)$ of the disagreements vector.
\begin{theorem}\label{Main.Thm.}
There exists a polynomial time $O(n^{\frac{1}{2} - \frac{1}{2q}} \cdot \log^{\frac{1}{2} + \frac{1}{2q}} n)$ approximation algorithm for the minimum $\ell_q$ disagreements problem on general weighted graphs.
\end{theorem}
For the $\ell_2$ objective, the above algorithm leads to an approximation ratio of $\tilde{O}(n^{\nicefrac{1}{4}})$, thus providing the first known approximation ratio for optimizing the clustering for this version of the objective. Note that the above algorithm matches the best approximation guarantee of $O(\log n)$ for the classical objective of minimizing the $\ell_1$ norm of the disagreements vector. For the $\ell_\infty$ norm, our algorithm matches the guarantee of the algorithm by \citet*{CGS17} up to $\log$ factors. Fundamental combinatorial optimization problems like \textit{Multicut, Multiway Cut} and \textit{s-t Cut} can be framed as special cases of Correlation Clustering. Thus, Theorem \ref{Main.Thm.} leads
to the first known algorithms for \textit{Multicut, Multiway Cut} and \textit{s-t Cut} with the $\ell_q$ objective when
$q\neq 1$ and $q \neq \infty$. We can also
use the algorithm from Theorem~\ref{Main.Thm.} to obtain $O(n^{\frac{1}{2} - \frac{1}{2q}} \cdot \log^{\frac{1}{2} + \frac{1}{2q}} n)$
bi-criteria approximation for Min $k$-Balanced Partitioning with the $\ell_q$ objective (we omit details here).
Next, we study the case of complete graphs. For this case, we present an improved $5$ approximation algorithm for minimizing any $\ell_q$ norm $(q \geq 1)$ of the disagreements vector.
\begin{theorem}
There exists a polynomial time $5$ approximation algorithm for the minimum $\ell_q$ disagreements problem on complete graphs.
\end{theorem}
We also study the case of complete bipartite graphs where disagreements need to be bounded for only one side of the bipartition, and not the whole vertex set. We give an improved $5$ approximation algorithm for minimizing any $\ell_q$ norm $(q \geq 1)$ of the disagreements vector.
\begin{theorem}\label{thm:complete}
There exists a polynomial time $5$ approximation algorithm for the minimum $\ell_q$ disagreements problem on complete bipartite graphs where disagreements are measured for only one side of the bipartition.
\end{theorem}
In this paper, we also consider the AKS min max objective. For this objective, we give a $(2 + \varepsilon)$ approximation algorithm,
which improves the approximation ratio of $O(\log n)$ given by~\cite*{AKS}.
\begin{theorem}
There exists a polynomial time $(2 + \varepsilon)$ approximation algorithm for the AKS min max problem on arbitrary graphs.
\end{theorem}
\confversionOnly{
Finally, in the full version of this paper (see supplemental materials), we present an integrality gap of $\Omega(n^{\frac{1}{2} - \frac{1}{2q}})$
for minimum $\ell_q$ $s-t$ cut and
prove a hardness of approximation of 2 for minimum $\ell_\infty$ $s-t$ cut.
}
yesOnly{
Our algorithm for the minimum $\ell_q$ disagreements problem is based on rounding the natural convex programming relaxation for this problem. We show that our result is best possible according to this relaxation by providing an almost matching integrality gap. The integrality gap example we provide is for the minimum $\ell_q$ $s-t$ cut problem (a special case of correlation clustering) and show the following result.
\begin{theorem}
The natural convex programming relaxation for the minimum $\ell_q$ disagreements problem has an integrality gap of $\Omega(n^{\frac{1}{2} - \frac{1}{2q}})$ on arbitrary graphs.
\end{theorem}
Finally, we present a hardness of approximation result for minimum $\ell_\infty$ $s-t$ cut.
\begin{theorem}
There is no $\alpha$-approximation algorithm for the min $\ell_\infty$ \textit{s-t cut problem} for $\alpha<2$ unless P = NP.
\end{theorem}
}
\textbf{Previous work.} \citet*{BBC04} showed that it is NP-hard to find a clustering that minimizes the total disagreements, even on complete graphs. They give a constant-factor approximation algorithm to minimize disagreements and a PTAS to maximize agreements on complete graphs. For complete graphs, \citet*{ACN08} presented a randomized algorithm with an approximation guarantee of $3$ to minimize total disagreements. They also gave a $2.5$ approximation algorithm based on LP rounding. This factor was improved to slightly less than $2.06$ by \citet*{CMSY15}. Since, the natural LP is known to have an integrality gap of $2$, the problem of optimizing the classical objective is almost settled with respect to the natural LP. For arbitrary graphs, the best known approximation ratio is $O(\log n)$ (see \citet*{CGW03, DEFI06}). Assuming the Unique Games Conjecture, there is no constant-factor approximation algorithm for minimizing $\ell_1$ disagreements on arbitrary graphs (see~\citet{CKKRS06}). \citet*{PM16} first studied Correlation Clustering with more local objectives. For minimizing $\ell_q$ $(q \geq 1)$ norms of the disagreements vector on complete graphs, their algorithm achieves an approximation guarantee of $48$. This was improved to $7$ by \citet*{CGS17}. \citet{CGS17} also studied the problem of minimizing the $\ell_\infty$ norm of the disagreements vector on general graphs. They showed that the natural LP/SDP has an integrality gap of $\nicefrac{n}{2}$ for this problem and provided a $O(\sqrt{n})$ approximation algorithm for minimum $\ell_\infty$ disagreements. \citet*{PM16} also initiated the study of minimizing the $\ell_q$ norm of the disagreements vector (for one side of the bipartition) on complete bipartite graphs. The presented a $10$ approximation algorithm for this problem, which was improved to $7$ by \citet*{CGS17}. Recently, \cite{AKS} studied an alternative objective for the correlation clustering problem. Motivated by creating balanced communitites for problems such as image segmentation and community detection in social networks, they propose a new cluster-wise min-max objective. This objective minimizes the maximum weight of edges in disagreement associated with a cluster, where an edge is in disagreement with respect to a cluster if it is a similar edge and has exactly one end point in the cluster or if it is a dissimilar edge and has both its endpoints in the cluster. They gave an $O(\log n)$ approximation algorithm for this objective.
\section{Preliminaries}\label{sec:prelim}
We now formally define the Correlation Clustering with $\ell_q$ objective problem. We will need the following
definition. Consider a set of points $V$ and two disjoint sets of edges on $V$: positive edges $E^+$ and negative edges
$E^-$. We assume that every edge has a weight $w_{uv}$. For every partition $\mathcal{P}$ of $V$, we say that a positive
edge is in disagreement with $\mathcal{P}$ if the endpoints $u$ and $v$ belongs to different parts of $\mathcal{P}$; and
a negative edge is in disagreement with $\mathcal{P}$ if the endpoints $u$ and $v$ belongs to the same part of $\mathcal{P}$.
The vector of disagreements, denoted by $\disagree(\mathcal{P}, E^+, E^-)$, is a $|V|$ dimensional vector
indexed by elements of $V$. Its coordinate $v$ equals
$$
\disagree_u(\mathcal{P}, E^+, E^-) = \smashoperator{\sum_{v:(u,v)\in E^+\cup E^-}} w_{uv} \mathds{1}((u,v) \text{ is in disagreement with }\mathcal{P}).
$$
That is, $\disagree_u(\mathcal{P}, E^+, E^-)$ is the weight of disagreeing edges incident to $u$. We similarly define a cut vector
for a set of edges $E$:
$$
\cut_u(\mathcal{P}, E) = \smashoperator{\sum_{v:(u,v)\in E}} w_{uv} \mathds{1}(u \text{ and } v \text{ are separated by }\mathcal{P}).
$$
We use the standard definition for the $\ell_q$ norm of a vector $x$: $\|x\|_q= (\sum_u x_u^q)^{\frac{1}{q}}$ and
$\|x\|_{\infty}= \max_u x_u$. For a partition $\mathcal{P}$, we denote by $\mathcal{P}(u)$ the piece that contains vertex $u$.
\begin{definition}
In the Correlation Clustering problem with $\ell_q$ objective, we are given a graph $G$ on a set $V$ with
two disjoint set of edges $E^+$ and $E^-$ and a set of weights $w_{uv}$. The goal is find a partition $\mathcal{P}$ that minimizes the $\ell_q$
norm of the disagreements vector, $\|\disagree(\mathcal{P}, E^+, E^-)\|_q$.
\end{definition}
In our algorithm for Correlation Clustering on arbitrary graphs, we will use a powerful technique of padded
metric space decompositions~(see e.g., \citet*{Bartal96, Rao99, FT03, GKL03}).
\begin{definition}[Padded Decomposition]
Let $(X, d)$ be a metric space on $n$ points, and let $\Delta > 0$. A probabilistic distribution of partitions $\mathcal{P}$ of $X$ is called a padded decomposition if it satisfies the following properties:
\begin{itemize}
\item Each cluster $C \in \mathcal{P}$ has diameter at most $\Delta$.
\item For every $u \in X$ and $\varepsilon > 0$,
$\mathrm{Pr}(\Ball(u, \delta) \not\subset \mathcal{P}(u)) \leq D\cdot \frac{\delta}{\Delta}$
where $\Ball(u, \delta) = \{v \in X : d(u,v) \leq \delta\}$
\end{itemize}
\end{definition}
\begin{theorem}[\citet*{FRT03}]\label{prelim:thm:padded-decomposition}
Every metric space $(X,d)$ on $n$ points admits a $D=O(\log n)$ separating padded decomposition. Moreover, there is a polynomial-time algorithm that samples
a partition from this distribution.
\end{theorem}
\section{Convex Relaxation}\label{sec:lp}
\begin{figure*}
\caption{Convex relaxation for Correlation Clustering with min $\ell_q$ objective for $q < \infty$.}
\label{figure:LPRelaxation}
\end{figure*}
In our algorithms for minimizing $\ell_q$ disagreements in arbitrary and complete graphs, we use a convex relaxation given in Figure~\ref{figure:LPRelaxation}. Our convex relaxation for
Correlation Clustering is fairly standard. It is similar to relaxations used in the papers by~\citet*{GVY96, DEFI06, CGW03}. For every pair
of vertices $u$ and $v$, we have a variable $x_{uv}$ that is equal to the distance between $u$ and $v$ in the ``multicut metric''.
Variables $x_{uv}$ satisfy the triangle inequality constraints~(P3). They are also symmetric~(P4) and $x_{uv}\in [0,1]$~(P5). Thus, the set of
vertices $V$ equipped with the distance function $d(u,v)= x_{uv}$ is a metric space.
Additionally, for every vertex $u\in V$, we have variables $y_u$ and $z_u$ (see constraints~(P1) and (P2)) that lower bound the number of disagreeing edges
incident to $u$. The objective of our convex program is to minimize $\max(\|y\|_q, (\sum_{u} z_u)^{\frac{1}{q}})$. Note that all constraints in
the program (P) are linear; however, the objective function of (P) is not convex as is. So in order to find the optimal solution, we raise
the objective function to the power of $q$ and find feasible $x,y,z$ that minimizes the objective $\max(\|y\|^q_q, \sum_{u} z_u)$.
Let us verify that program (P) is a relaxation for Correlation Clustering. Consider an arbitrary partitioning $\mathcal{P}$ of $V$.
In the integral solution corresponding to $\mathcal{P}$, we set $x_{uv} = 0$ if $u$ and $v$ are in the same cluster in $\mathcal{P}$; and $x_{uv} = 1$ if $u$ and $v$ are
in different clusters in $\mathcal{P}$. In this solution, distances $x_{uv}$ satisfy triangle inequality constraints~(P3) and
$x_{uv} = x_{vu}$ (P4). Observe that a positive edge $(u,v)\in E^+$ is in disagreement with $\mathcal{P}$ if $x_{uv} = 1$; and a negative edge
$(u,v)\in E^-$ is in disagreement if $x_{uv} = 0$. Thus, in this integral solution, $y_u = \disagree_u(\mathcal{P}, E^+,E^-)$ and
moreover, $z_u \leq y^q_u$. Therefore, in the integral solution corresponding to $\mathcal{P}$, the objective function of (P) equals
$\|\disagree_u(\mathcal{P}, E^+,E^-)\|_q$. Of course, the cost of the optimal fractional solution to the problem may be
less than the cost of the optimal integral solution. Thus, (P) is a relaxation for our problem. Below, we
denote the cost of the optimal fraction solution to (P) by $LP$.
We remark that we can get a simpler relaxation by removing variables $z$ and changing the objective function to $\|y\|_q$.
This relaxation also works for $\ell_{\infty}$ norm. We use it in our 5-approximation algorithm.
\section{Correlation Clustering on Arbitrary Graphs}
In this section, we describe our algorithm for minimizing $\ell_q$ disagreements on arbitrary graphs. We will prove the following main theorem.
\begin{theorem}\label{thm:arbit-Graphs}
There exists a randomized polynomial-time $O(n^{\frac{q-1}{2q}}\log^{\frac{q+1}{2q}} n)$ approximation algorithm for Correlation Clustering with the $\ell_q$ objective ($q\geq 1$).
\end{theorem}
We remark that the same algorithm gives $O(\sqrt{n\log n})$ approximation for the $\ell_{\infty}$ norm. We omit the details in the conference version of the paper.
\noindent Our algorithm relies on a procedure for partitioning arbitrary metric spaces into pieces of small diameter, which we describe first.
\input{padded-decomp}
\subsection{Proof of Theorem~\ref{thm:arbit-Graphs}}
We now show how to use the above metric space partitioning scheme to obtain an approximation algorithm for Correlation Clustering on arbitrary graphs.
\begin{proof}[Proof of Theorem~\ref{thm:arbit-Graphs}]
Our algorithm first finds the optimal solution $x,y,z$ to the convex relaxation (P) presented in Section~\ref{sec:lp}. Then, it defines a metric $d(u,v)= x_{uv}$ on
the vertices of the graph. Finally, it runs the metric space partitioning algorithm with $\Delta = 1/2$ from Section~\ref{sec:partition-metric-spaces}
(see Theorem~\ref{thm:part-metric-spaces}) and outputs the obtained partitioning $\mathcal{P}$.
Let us analyze the performance of this algorithm. Denote the cost of the optimal solution $x,y,z$ by $LP$. We know that the cost of the optimal solution
$OPT$ is lower bounded by $LP$ (see Section~\ref{sec:lp} for details). By Theorem~\ref{thm:part-metric-spaces}, applied to the graph $G=(V,E^+)$
(note: we ignore negative edges for now),
\begin{equation}\label{eq:thm:part-metric-spaces:approx-alg}
\mathbb{E}\Big[\|\cut(\mathcal{P}, E^+)\|_q\Big] \leq \frac{C}{\Delta} n^{\frac{q-1}{2q}}\log^{\frac{q+1}{2q}} n \cdot \Big(\big(\sum_{u\in V} y_u^q\big)^{\frac{1}{q}} +
\big(\sum_{u\in V} z_u\big)^{\frac{1}{q}}\Big)\leq 4C n^{\frac{q-1}{2q}}\log^{\frac{q+1}{2q}} n \cdot LP.
\end{equation}
Recall that a positive edge is not in agreement if and only if it is cut. Hence, $\disagree(\mathcal{P},E^+,\varnothing) = \cut(\mathcal{P}, E^+)$, and the bound above holds
for $\mathbb{E} \|\disagree(\mathcal{P}, E^+,\varnothing)\|_q $. By the triangle inequality,
$\mathbb{E}\|\disagree(\mathcal{P}, E^+,E^-)\|_q \leq \mathbb{E}\|\disagree(\mathcal{P}, E^+,\varnothing)\|_q + \mathbb{E}\|\disagree(\mathcal{P}, \varnothing, E^-)\|_q$.
Hence, to finish the proof, it remains to upper bound $\mathbb{E}\|\disagree(\mathcal{P}, \varnothing, E^-)\|_q$.
Observe that the diameter of every cluster returned by the algorithm is at most $\Delta = 1/2$. For all disagreeing
negative edges $(u,v)\in E^-$, we have $x_{uv}\leq 1/2$ and $1-x_{uv}\geq 1/2$. Thus, $\disagree_u(\mathcal{P}, \varnothing, E^-)\leq 2y_u$ for
every $u$, and $\mathbb{E}\|\disagree(\mathcal{P}, \varnothing, E^-)\|_q\leq 2\|y\|_q\leq 2LP$.
This completes the proof.
\end{proof}
\section{Correlation Clustering on Complete Graphs}\label{sec:cor-clust-complete}
In this section, we present our algorithm for Correlation Clustering on complete graphs and its analysis. Our algorithm
achieves an approximation ratio of $5$ and is an improvement over the approximation ratio of $7$ by \citet*{CGS17}.
\subsection{The Algorithm}
Our algorithm is based on rounding an optimal solution to the convex relaxation~(P). Recall that for complete graphs, we can get a simpler relaxation by removing the variables $z$ in our convex programming formulation. We start with considering the entire vertex set of
unclustered vertices. At each step $t$ of the algorithm, we select a subset of vertices as a cluster $C_t$ and remove it
from unclustered vertices. Thus, each vertex is assigned to a cluster exactly once and is never removed from a cluster once it is assigned.
For each vertex $w \in V$, let $\Ball(w,\rho) = \{u \in V : x_{uw} \leq \rho\}$ be the set of vertices within a distance of $\rho$ from $w$.
For $r = 1/5$ the quantity $r - x_{uw}$ where $u \in Ball(w,r)$ represents the distance from $u$ to the boundary of the ball of
radius $1/5$ around $w$. Let $V_t \subseteq V$ be the set of unclustered vertices at step $t$, and define
$$L_t(w) = \sum_{u \in \Ball(w,r) \cap V_t} r - x_{uw}.$$
At each step $t$, we select the vertex $w_t$ that maximizes the quantity $L_t(w)$ over all unclustered vertices $w\in V_t$ and select the set $Ball(w_t,2r)$ as a cluster.
We repeat this step until all the nodes have been clustered. A pseudo-code for our algorithm is given in Figure~\ref{fig:Alg2}.
\begin{figure}
\caption{Algorithm for Correlation Clustering on complete graphs.}
\label{fig:Alg2}
\label{alg:corelation-complete}
\end{figure}
\subsection{Analysis}
In this section, we present an analysis of our algorithm.
\begin{theorem}\label{thm:5-apx-main}
Algorithm 2 gives a $5$-approximation for Correlation Clustering on complete graphs.
\end{theorem}
For an edge $(u,v) \in E$, let $LP(u,v)$ be the LP cost of the edge $(u,v)$:
$\lp{u,v} = x_{uv} $ if $(u,v) \in E^+$ and $\lp{u,v} = 1 - x_{uv}$ if $(u,v) \in E^-$. Let $\alg{u,v} = \mathds{1}( (u,v) \text{ is in disagreement )}$.
Define
$$\pft{u} = \sum_{(u,v) \in E} \lp{u,v} - r \sum_{(u,v) \in E} \alg{u,v},$$
where $r=1/5$. We show that for each vertex $u \in V$, we have $\pft{u} \geq 0$ (see Lemma~\ref{lem:pft} below) and,
therefore, the number of disagreeing edges incident to $u$ is upper bounded by $5y(u)$:
$$ALG(u) = \smashoperator[r]{\sum_{v:(u,v) \in E}} \alg{u,v} \leq \frac{1}{r} \smashoperator[r]{\sum_{v:(u,v) \in E}} \lp{u,v} = 5y(u).$$
Thus, $\|ALG\|_q \leq 5 \|y\|_q$ for any $q\geq 1$. Consequently, the approximation ratio of the algorithm is at most $5$ for any norm $\ell_q$.
\begin{lemma}\label{lem:pft}
For every $u\in V$, we have $\pft{u} \geq 0$.
\end{lemma}
At each step $t$ of the algorithm, we create a new cluster $C_t$ and remove it from the graph. We also remove all
edges with at least one endpoint in $C_t$. Denote this set of edges by
$$\Delta E_t=\{(u,v): u\in C_t \text{ or } v \in C_t\}.$$
Now let
$$
\mathrm{Pr}ft{u,v}{t} = \begin{cases}\lp{u,v} - r \alg{u,v},& \text{if } (u,v)\in \Delta E\\0,&\text{otherwise}\end{cases}.
$$
\begin{equation}\label{eq:for-profit-u}
\mathrm{Pr}ft{u}{t} = \sum_{v\in V_t}\mathrm{Pr}ft{u,v}{t}
= \smashoperator[r]{\sum_{(u,v) \in \Delta E_t}} \lp{u,v} - r \smashoperator[r]{\sum_{(u,v) \in \Delta E_t}}\alg{u,v}.
\end{equation}
As all sets $\Delta E_t$ are disjoint, $\pft{u} = \sum_t \mathrm{Pr}ft{u}{t}$. Thus, to prove Lemma~\ref{lem:pft},
it is sufficient to show that $\mathrm{Pr}ft{u}{t}\geq 0$ for all $t$. Note that we only need to consider $u\in V_t$ as $\mathrm{Pr}ft{u}{t} = 0$ for
$u\notin V_t$.
\iffalse
yesOnly{
\begin{figure}
\caption{Illustration for Algorithm 2.}
\end{figure}}
\fi
Consider a step $t$ of the algorithm and vertex $u\in V_t$. Let $w = w_t$ be the center of the cluster chosen at this step.
First, we show that since the diameter of the cluster $C_t$ is $4r$, for all negative edges $(u,v) \in E^-$ with $u,v \in C_t$, we can charge the cost of
disagreement to the edge itself, that is, $\mathrm{Pr}ft{u,v}{t}$ is nonnegative for $(u,v)\in E^-$ (see Lemma~\ref{cl:neg-edge-profit-nenneg}). We then consider two cases:
$x_{uw}\in [0, r]\cup [3r,1]$ and $x_{uw}\in (r,3r]$.
The former case is fairly simple since disagreeing positive edges $(u,v)\in E^+$ (with $x_{uw}\in [0, r]\cup [3r,1]$) have a ``large'' LP cost. In Lemma~\ref{lem:0r} and Lemma~\ref{lem:r1},
we prove that the cost of disagreement can be charged to the edge itself and hence $\mathrm{Pr}ft{u}{t} \geq 0$.
We then consider the latter case. For vertices $u$ with $x_{uw} \in (r, 3r]$,
$\mathrm{Pr}ft{u,v}{t}$ for some disagreeing positive edges $(u,v)$ might be negative. Thus, we split the profit at step $t$ for such vertices $u$ into the profit
they get from edges $(u,v)$ with $v$ in $\Ball(w,r)\cap V_t$ and from edges with $v$ in $V_t\setminus \Ball(w,r)$. That is,
$$
\mathrm{Pr}ft{u}{t} = \underbrace{\sum_{v\in \Ball(w,r)}\mathrm{Pr}ft{u,v}{t}}_{P_{high}(u)} + \underbrace{\sum_{v\in V_t\setminus \Ball(w,r)}\mathrm{Pr}ft{u,v}{t}}_{P_{low}(u)}.
$$
Denote the first term by $P_{high}(u)$ and the second term by $P_{low}(u)$. We show that $P_{low}(u)\geq -L_t(u)$ (see Lemma~\ref{lem:PLow-Ltu})
and $P_{high}\geq L_t(w)$ (see Lemma~\ref{lem:PHigh-Ltw}) and
conclude that $\mathrm{Pr}ft{u}{t} = P_{high}(u) + P_{low}(u)\geq L_t(w)-L_t(u)\geq 0$ since $L_t(w) = \max_{w'\in V_t} L_t(w') \geq L_t(u)$.
In the following claim, we show that we can charge the cost of disagreement of a negative edge to the edge itself.
\begin{claim}\label{cl:neg-edge-profit-nenneg}
For a negative edge $(u,v)\in E^-$, $\mathrm{Pr}ft{u,v}{t}$ is always nonnegative.
\end{claim}
\begin{proof}
The only case when $(u,v)$ is in disagreement is when both $u$ and $v$ belong to the new cluster. In this
case, they lie in the ball of radius $2r$ around $w$ (and thus $x_{uw}, x_{vw} \leq 2r$). Thus the distance $x_{uv}$ between them is at most $4r$ (because $x_{uv} \leq x_{uw} + x_{vw} \leq 4r$). The LP cost of the edge $(u,v)$ is at least $LP(u,v) = 1 - x_{uv} \geq 1- 4r = r$. Thus, $ \mathrm{Pr}ft{u,v}{t} = LP(u,v)-r ALG(u,v) = LP(u,v)- r \geq 0$.
\end{proof}
In Lemma~\ref{lem:0r} and Lemma~\ref{lem:r1}, we consider the case when $x_{uw} \in [0,r] \cup (3r, 1]$.
\begin{lemma}\label{lem:0r}
If $x_{uw}\leq r$, then $\mathrm{Pr}ft{u,v}{t}\geq 0$ for all $v\in V_t$.
\end{lemma}
\begin{proof}
If $x_{uw}\in E^-$, then $\mathrm{Pr}ft{u,v}{t}\geq 0$ by Claim~\ref{cl:neg-edge-profit-nenneg}. Assume that $x_{uw}\in E^+$. Since
$x_{uw}\leq r$, $u$ belongs to the cluster $C_t$. Thus, $(u,v)$ disagrees only if $v$ does not belong to that cluster.
In this case, $x_{wv}\geq 2r$ and by the triangle inequality $x_{uv}\geq x_{vw} - x_{uw}\geq r$. Therefore,
$\mathrm{Pr}ft{u,v}{t} = x_{u,v}-r \geq 0$.
\end{proof}
\begin{lemma}\label{lem:r1}
If $x_{uw}\geq 3r$, then $\mathrm{Pr}ft{u,v}{t}\geq 0$ for all $v \in V_t$.
\end{lemma}
\begin{proof}
As in the previous lemma, we can assume that $x_{uw}\in E^+$. If $x_{uw}\geq 3r$,
then $u$ does not belong to the new cluster $C_t$. Thus, $(u,v)$ disagrees only if $v$ belongs to $C_t$.
In this case, $x_{wv}\leq 2r$ and by the triangle inequality $x_{uv}\geq x_{uw} - x_{vw}\geq r$. Therefore,
$\mathrm{Pr}ft{u,v}{t} = x_{u,v}-r \geq 0$.
\end{proof}
We next consider $u$ such that $x_{uw} \in (r, 3r]$. First, we show that the profit we obtain from every edge $(u,v)$ with $v \in \Ball(w,r)$ is
at least $r - x_{vw}$, regardless of whether the edge is positive or negative.
\begin{claim}\label{claim:prof-from-core-v}
If $x_{uw} \in (r,3r]$ and $v \in \Ball(w,r)\cap V_t$, then $\mathrm{Pr}ft{u,v}{t}\geq r-x_{vw}$.
\end{claim}
\begin{proof}
First consider $u$ such that $x_{uw} \in (r, 2r]$. Note that $x_{uv} \geq x_{uw} - x_{vw} \geq r - x_{vw}$. Moreover, $x_{uv} \leq x_{uw} + x_{vw} \leq 2r + x_{vw}$. Thus, if $(u,v) \in E^+$, then $\mathrm{Pr}ft{u,v}{t} \geq r - x_{vw}$. Otherwise, $\mathrm{Pr}ft{u,v}{t} \geq (1 - 2r - x_{vw}) - r \geq 2r - x_{vw}$.
For $u \in (2r, 3r]$, note that $x_{uv} \geq x_{uw} - x_{vw} \geq 2r - x_{vw}$. Moreover, $x_{uv} \leq x_{uw} + x_{vw} \leq 3r + x_{vw}$.
Thus, if $(u,v) \in E^+$, then $\mathrm{Pr}ft{u,v}{t} \geq (2r - x_{vw}) - r \geq r - x_{vw}$. Otherwise, $\mathrm{Pr}ft{u,v}{t} \geq (1 - 3r - x_{vw}) \geq 2r - x_{vw}$.
\end{proof}
Using the above claim, we can sum up the profits from all vertices $v$ in $\Ball(w, r)$ and lower bound $P_{high}(u)$ as follows.
\begin{lemma}\label{lem:PHigh-Ltw}
If $x_{uw}\in (r,3r]$, then $P_{high}(u) \geq L_t(w)$.
\end{lemma}
\begin{proof}
By Claim~\ref{claim:prof-from-core-v}, we have $\mathrm{Pr}ft{u,v}{t}\geq r-x_{vw}$ for all $v\in V_t$. Thus,
$$
P_{high}(u) = \sum_{v\in \Ball(w,r)\cap V_t}\mathrm{Pr}ft{u,v}{t}\geq \sum_{v\in \Ball(w,r)\cap V_t}r-x_{vw} = L_t(w).
$$
\end{proof}
We now lower bound $P_{low}(u)$. To this end. we estimate each term $\mathrm{Pr}ft{u,v}{t}$ in the definition of $P_{low}$.
\begin{claim}\label{claim:prof-uv-lower-bound}
If $x_{uw} \in (r,3r]$ and $v \in V_t \setminus \Ball(w,r)$, then $\mathrm{Pr}ft{u,v}{t}\geq \min(x_{uv} - r, 0)$.
\end{claim}
\begin{proof}
By Claim~\ref{cl:neg-edge-profit-nenneg}, if $(u,v)$ is a negative edge, then $\mathrm{Pr}ft{u,v}{t} \geq 0$. The profit is $0$ if $x_{uv}\notin \Delta E_t$ (i.e., neither $u$ nor $v$ belong
to the new cluster). So let us assume that $(u,v)$ is a positive edge in $\Delta E_t$. Then, the profit obtained from $(u,v)$ is $x_{uv}$ if $(u,v)$ is in agreement
and $x_{uv} - r$ if $(u,v)$ is in disagreement. In any case, $\mathrm{Pr}ft{u,v}{t} \geq x_{uv} - r \geq \min(x_{uv} - r, 0)$.
\end{proof}
Lemma~\ref{lem:PLow-Ltu} is an immediate corollary of Claim~\ref{claim:prof-uv-lower-bound}.
\begin{lemma}\label{lem:PLow-Ltu}
If $x_{uw}\in (r,3r]$, then $P_{low}(u) \geq -L_t(u)$.
\end{lemma}
\begin{proof}
By Claim~\ref{claim:prof-uv-lower-bound}, we have $\mathrm{Pr}ft{u,v}{t}\geq \min(x_{uv} - r,0)$ for all $v\in V_t$. Thus,
\begin{align*}
P_{low}(u) &= \sum_{v\in V_t\setminus \Ball(w,r)}\mathrm{Pr}ft{u,v}{t}\\
&\geq \sum_{v\in V_t\setminus \Ball(w,r)} \min(x_{uv} - r,0)\\
&\overset{a}{\geq} \;\;\;\;\;\sum_{v\in V_t} \min(x_{uv} - r,0) \\
&\overset{b}{=} \sum_{v\in \Ball(u,r) \cap V_t} x_{uv} - r \\
&= - L(u).
\end{align*}
Here we used that (a) all terms $\min(x_{uv} - r,0)$ are nonpositive, and (b) $\min(x_{uv} - r, 0) = 0$ if $v\notin \Ball(u,r)$.
\end{proof}
This finishes the proof of Lemma~\ref{lem:pft}.
\pagebreak
\section{Correlation Clustering with AKS Min Max Objective}
In this section, we present our improved algorithm for Correlation Clustering with AKS Min Max Objective. Our algorithm produces a clustering of cost at most $(2 + \varepsilon) OPT$, which improves upon the bound of $O(\log n)$ given by~\citet*{AKS}.
For a subset $S \subseteq V$ of vertices, we use $\cost^+(S)$ to refer to the weight of positive edges ``associated'' with $S$ that
are in disagreement. These are the edges with exactly one end point in $S$. Thus, $\cost^+(S) = \sum_{(u,v) \in E^+, u \in S, v \not\in S} w_{uv}$. Similarly, we use $\cost^-(S)$ to refer to the weight of dissimilar edges ``associated'' with $S$ that are in disagreement. These are the edges with both endpoints in $S$. Thus, $\cost^-(S) = \sum_{(u,v) \in E^-, u,v \in S} w_{uv}$. The total cost of the set $S$ is $\cost(S) = \cost^+(S) + \cost^-(S)$.
Similar to the algorithm of~\citet{AKS}, our algorithm works in two phases. In the first phase,
the algorithm covers all vertices of the graph with (possibly overlapping) sets $S_1,\dots, S_k$ such that the cost of each set $S_i$ is at most $2OPT$ (i.e., $\cost(S_i)\leq 2OPT$ for each $i \in \{1,\dots, k\}$).
In the second phase, the algorithm finds sets $P_1,\dots, P_k$ such that:
(1) $P_1,\dots, P_k$ are disjoint and cover the vertex set;
(2) $P_i \subseteq S_i$ (and, consequently, $\cost^-(P_i)\leq \cost^-(S_i)$);
(3) $\cost^+(P_i)\leq (1+\varepsilon)\cost^+(S_i)$.
The sets $P_1,\dots, P_k$ are obtained from $S_1, \ldots,S_k$ using an uncrossing procedure of~\cite{BFKMNS}. Hence the clustering that is output is $\mathcal{P}=(P_1,\dots, P_k)$. The improvement in the approximation factor comes from the first phase of the algorithm.
\subsection{The algorithm}\label{alg:mincostcover}
At the core of our algorithm is a simple subproblem:
For a given vertex $z \in V$, find a subset $S \subseteq V$ containing $z$ such that $\cost(S)$ is minimized.
We solve this subproblem using a linear programming relaxation, which is formulated as follows:
The LP has a variable $x_u$ for each vertex $u\in V$. In the intended integral
solution, we have $x_u = 1$ if $u$ is in the set $S$, and $x_u = 0$, otherwise. That is, $x_u$ is the indicator of the event
``$u\in S$''. The LP has only one constraint: $x_z = 1$. A complete description of the LP can be found in Figure~\ref{fig:cover-z}. In Claim~\ref{clm:valid-lp-subproblem} we show that this LP is indeed a valid relaxation for our subproblem.
\begin{figure}
\caption{LP relaxation for covering $z$ with a low cost set $S$.}
\label{fig:cover-z}
\end{figure}
\begin{claim}\label{clm:valid-lp-subproblem}
The LP relaxation described in Figure~\ref{fig:cover-z} is a valid relaxation for the subproblem.
\end{claim}
\begin{proof}
Let us verify that this is a valid relaxation for the problem. As we discussed above,
in the intended integral solution, we have $x_u = 1$ if $u$ is in the set $S$, and $x_u = 0$, otherwise.
That is, $x_u$ is the indicator of the event ``$u\in S$''.
Consider a positive edge $(u,v)\in E^+$. In the integral solution, $|x_u-x_v|=1$ if and only if one of the vertices $u$ or $v$ is in $S$ and the other one is not.
In this case, the edge $(u,v)$ is in disagreement with $S$. Now, consider
a negative edge $(u,v)\in E^-$. In the integral solution, $(x_u + x_v -1)^+ = 1$ if and only if both $u$ and $v$ are
in $S$. Again, in this case, the edge $(u,v)$ is in disagreement with $S$. Thus, this LP is a relaxation for our
problem.
Note that we can linearize the $|\cdot|$ and $(\cdot)^+$ terms in the objective as follows. We can replace terms of the type $|x_u - x_v|$ with variables $\mu_{uv}$ and introduce the constraints $\mu_{uv} \geq (x_u - x_v)$ and $\mu_{uv} \geq (x_v - x_u)$. Similarly, we can replace terms of the type $(x_u + x_v -1)^+$ with variables $\eta_{uv}$ and introduce the constraints
$\eta_{uv} \geq (x_u + x_v -1)$ and $\eta_{uv} \geq 0$. It is easy to see that the minimum values for the variables $\mu_{uv}$ and $\eta_{uv}$ is attained at $|x_u - x_v|$ and $(x_u + x_v -1)^+$ respectively.
\end{proof}
We are now ready to present our algorithm.
\noindent \textbf{Algorithm (Find minimum cost set):} For each $t\in [0,1]$, define a threshold
set $S_t$ as $S_t=\{u:x_u \geq t\}$. There are at most $n$ such distinct sets $S_t$ (since the set $\{x_u:u\in V\}$
contains at most $n$ elements). Our algorithm picks a set $S_t$ that minimizes
$\cost(S_t)$ and outputs it.
\begin{lemma}\label{lm:2approx-round}
The algorithm described above finds a set of cost at most $2LP$, where $LP$ is the
cost of the $LP$ solution.
\end{lemma}
\begin{proof}
We show that if we pick $t$ uniformly at random in $[1/2,1]$
then the expected cost of a random set $S_t$ is at most $2LP$. Consequently,
the minimum cost of set $S_t$ for $t\in[0,1]$ is at most $2LP$ and, hence,
the algorithm returns a solution of cost at most $2 LP$.
The probability that a positive edge $(u,v)\in E^+$ is in disagreement
with $S$ equals the probability that random $t$ lies between $x_u$ and $x_v$,
which is at most $2|x_u-x_v|$ (since $|x_u-x_v|$ is the length of the
interval $[x_u,x_v]$ and $2$ is the density of the random variable $t$
on the interval $[1/2,1]$). That is, this probability is upper bounded by
twice the LP cost of the edge $(u,v)$.
The probability that a negative edge $(u,v)\in E^-$ is
in disagreement with $S$ equals the probability that $t\leq \min(x_u,x_v)$
which is $2(\min(x_u,x_v) - 1/2)^+ = (2\min(x_u,x_v) -1)^+$.
This probability is upper bounded by the LP cost
of the negative edge $(u,v)$ (i.e., $(x_u+x_v-1)^-$). This concludes the
proof.
\end{proof}
Thus, to obtain a cover of all the vertices, we pick yet uncovered vertices $z\in V$ one by one and for each $z$, find a set $S(z)$ as described above.
Then, we remove those sets $S(z)$ that are completely covered by other sets. The obtained family of sets $\mathcal{S}=\{S(z)\}$ satisfies the following
properties: (1) Sets in $\mathcal{S}$ cover the entire set $V$; (2) $\cost(S) \leq 2OPT$ for each $S\in \mathcal{S}$; (3) Each set $S\in \mathcal{S}$ is not covered by the other
sets in $\mathcal{S}$ (that is, for each $S \in \mathcal{S}$, $S \not\subset \cup_{S' \in (\mathcal{S} \setminus \{S\})} S'$). However, sets $S$ in $\mathcal{S}$ are not necessarily disjoint.
Following~\cite{AKS}, we then apply an uncrossing procedure developed by~\cite{BFKMNS} to the sets $S_i$ in $\mathcal{S}$ and obtain
disjoint sets $P_i$. Each set $P_i$ is a subset of $S_i$ and therefore $\cost^-(P_i)\leq \cost^-(S_i)$. Moreover, $\cost^+(P_i)\leq \cost^+(S_i) + \varepsilon OPT$ (see Section ~\ref{sec:aks-uncrossing}).
Hence, $P_1,\dots, P_k$ is a $2(1+\varepsilon)$ approximation for Correlation Clustering with the AKS Min Max objective.
\appendix
\section{Uncrossing Overlapping Sets}\label{sec:aks-uncrossing}
For completeness, we present here a proof of the following lemma from~\cite{BFKMNS}. Denote by $\delta(S)$ the set of all positive edges leaving set
$S$ in graph $G$. Then, $\cost^+(S)=w(\delta(S))$.
\begin{lemma}[Uncrossing argument in~\cite{BFKMNS}]
There exists a polynomial-time algorithm that given a weighted graph $G=(V,E)$, a family of sets $S_1,\dots S_k$ that covers all
vertices in $G$, and a parameter $\varepsilon = 1/poly(n)$, finds disjoint sets $P_1,\dots,P_k$ covering $V$ such that for each $i$:
\begin{enumerate}
\item $P_i\subset S_i$; and
\item $w(\delta(P_i))\leq w(\delta(S_i)) + \varepsilon \max_j w(\delta(S_j))$.
\end{enumerate}
\end{lemma}
\begin{proof}
Let us first describe the uncrossing algorithm from the paper~~\cite{BFKMNS}. Initially, the algorithm
sets $P^0_i = S_i \setminus \cup_{j< i}S_j$ for each $i\in\{1,\dots, k\}$. Then, at every step $t$, it finds a set $P_i^t$ violating the
desired bound
\begin{equation}\label{eq:bound-uncrossing}
w(\delta(P^t_i))\leq w(\delta(S_i)) + \varepsilon \max_j w(\delta(S_j))
\end{equation}
and updates all sets as follows: $P^{t+1}_i = S_i$; and $P^{t+1}_j = P^t_j \setminus S_i$.
The algorithm terminates and outputs sets $P^t_i$ when bound~(\ref{eq:bound-uncrossing}) holds for all sets $P^t_i$.
It easy to see that the following loop invariants hold at every step of the algorithm: (1) each $P^t_i$ is a subset of $S_i$; (2) sets $P^t_i$ are disjoint; (4) sets $P^t_i$ cover all vertices in $V$. It is also immediate that when or if the algorithm terminates
sets $P^t_i$ satisfy~(\ref{eq:bound-uncrossing}). We only need to check that the algorithm stops in polynomial time.
Let $B = \max_j w(\delta(S_j))$. Define a potential function $\varphi(t) = \sum_{i=1}^k w(\delta(P_i))$. Observe that initially
$\varphi(0)\leq 2\sum_i w(\delta(S_i))$, since every edge cut by the partition $(P_1,\dots, P_k)$ belongs to some $S_i$.
Since, $w(\delta(S_i))\leq B$ for all $i$, we have $\varphi(0)\leq 2kB$. We will show that at every step of the algorithm
$\varphi(t)$ decreases by at least $2\varepsilon B$ and thus the algorithm terminates in at most $k/\varepsilon$ steps.
Consider step $t$ of the algorithm. Suppose that at this step of the algorithm, set $P^t_i$ violated the constraint and thus it was replaced by
$S_i$. Write,
\begin{align*}
\varphi(t+1) - \varphi(t) &= \Big(w(\delta(S_i)) - w(\delta(P^t_i))\Big) + \sum_{j\neq i}(w(\delta(P^{t+1}_i)) - w(\delta(P^{t}_i)))\\
&=\Big(w(\delta(S_i)) - w(\delta(P^t_i))\Big) + \sum_{j\neq i}(w(\delta(P^{t}_i\setminus S_i)) - w(\delta(P^{t}_i))).
\end{align*}
Observe that for every two subsets of vertices $P$ and $S$ we have the following inequality:
\begin{align*}
w(\delta(P\setminus S)) - w(\delta(P)) &= \phantom{-} \Big(w(E(P \setminus S, V\setminus P)) + w(E(P \setminus S, P\cap S))\Big) \\
&\phantom{=} - \Big(w(E(P \setminus S, V\setminus P)) + w(E(P\cap S, V \setminus P))\Big)\\
&= \phantom{-} w(E(P\cap S, P \setminus S)) - w(E(P\cap S, V \setminus P))\\
&\leq \phantom{-} w(E(P\cap S, P \setminus S)) - w(E(P\cap S, S \setminus P))\\
&=\phantom{-}\Big(w(E(P\cap S, P \setminus S)) + w(E(S \setminus P, P \setminus S))\Big) \\ &\phantom{=} -
\Big(w(E(P\cap S, S \setminus P)) + w(E(P \setminus S, S \setminus P))\Big)\\
&=\phantom{-}w(E(S, P \setminus S)) -w(E(P, S \setminus P)).
\end{align*}
Also, note that $P^t_i \subset S_i \setminus P_j^t$ (since $P^t_i \subset S_i$ and all $P^t_j$ are disjoint). Consequently,
$w(E(P^t_i, P^t_j)) \leq w(E(S_i \setminus P_j^t, P^t_j))$.
Therefore,
\begin{align*}
\varphi(t+1) - \varphi(t)
&= \Big(w(\delta(S_i)) - w(\delta(P^t_i))\Big) + \sum_{j\neq i} w(E(S_i, P_j^t \setminus S_i)) -w(E(P_j^t, S_i \setminus P_j^t))\\
&\leq \Big(w(\delta(S_i)) - w(\delta(P^t_i))\Big) + \sum_{j\neq i} w(E(S_i, P_j^t \setminus S_i)) -w(E(P_j^t, P_i^t)).
\end{align*}
Using again that the sets $P_j^t$ partition $V$ into disjoint pieces, we get
\begin{align*}
\varphi(t+1) - \varphi(t)
&\leq \Big(w(\delta(S_i)) - w(\delta(P^t_i))\Big) + \sum_{j\neq i} w(E(S_i, P_j^t \setminus S_i)) -w(E(P_j^t, P_i^t))\\
&= \Big(w(\delta(S_i)) - w(\delta(P^t_i))\Big) + w(E(S_i, \cup_{j\neq i} P_j^t \setminus S_i)) -w(E( \cup_{j\neq i} P_j^t, P_i^t))\\
&=\Big(w(\delta(S_i)) - w(\delta(P^t_i))\Big) + \underbrace{w(E(S_i, V \setminus S_i))}_{=\delta(S_i)} - \underbrace{w(E(V\setminus P_i^t, P_i^t))}_{=\delta(P_i^t)}\\
&=2 \Big(w(\delta(S_i)) - w(\delta(P^t_i))\Big) \leq -2\varepsilon B.
\end{align*}
This concludes the proof.
\end{proof}
\section{Integrality gap}
In this section, we present an integrality gap example for the convex program (P). We describe an instance of the $\ell_q$ $s-t$ cut problem on $\Theta(n)$ vertices that has an integrality gap of $\Omega(n^{\frac{1}{2} - \frac{1}{2q}})$. In our integrality gap example, we describe a layered graph with $\Theta(n^\frac{1}{2})$ layers, with each layer consisting of a complete bipartite graph on $\Theta(n^\frac{1}{2})$ vertices. Between each layer $i$ and $i+1$, there is a terminal $s_i$ which connects these two layers. Finally, the terminals $s$ and $t$ are located at opposite ends of this layered graph. We will observe that for any integral cut separating $s$ and $t$, there will be at least one vertex such that a large fraction of the edges incident to it are cut. We will show that there is a corresponding fractional solution that is cheaper compared to any integral cut as the fractional solution can ``spread'' the cut equally across the layers, thus not penalizing any individual layer too harshly. In doing so, we will prove the following theorem,
\begin{theorem}
The integrality gap for the convex relaxation (P) is $\Omega(n^{\frac{1}{2}-\frac{1}{2q}})$.
\end{theorem}
\begin{proof}
We now give a more formal description of the layered graph discussed above. The construction has two parameters $a$ and $b$, so we will call such a graph $G_{a,b}$. The graph consists of $b$ layers with each layer consisting of the complete bipartite graph $K_{a,a}$. We refer to layer $i$ of the graph as $G^i_{a,b}$ and refer to the left and right hand of the bipartition as $L(G^i_{a,b})$ and $R(G^i_{a,b})$ respectively. In addition to these layers, the graph consists of $b+1$ terminals $\{s, t, s_1, \ldots, s_{b-1}\}$ (we will refer to $s$ as $s_0$ and $t$ as $s_b$ interchangeably). For each $i \in \{1, \ldots, b-1\}$, the vertex $s_i$ is connected to all the vertices in $R(G^i_{a,b})$ and $L(G^{i+1}_{a,b})$. Finally, $s$ is connected to all the vertices in $L(G^1_{a,b})$ and $t$ is connected to all the vertices in $R(G^b_{a,b})$.
Consider any integral cut separating $s$ and $t$ in the graph $G_{a,b}$. Any such cut must disconnect at least one pair of consecutive terminals (if all pairs of consecutive terminals are connected, then $s$ is still connected to $t$). Thus let $j \in \{0, 1, \ldots, b\}$ be such that $s_{j-1}$ is disconnected from $s_{j}$ and consider the subgraph induced on $\{s_{j-1} \cup s_{j} \cup G^j_{a,b}\}$. We will show that this induced subgraph contains a vertex such that $\Omega(a^\frac{1}{2})$ of its incident edges are cut. Intuitively, since $s_{j-1}$ is separated from $s_j$, if the majority of the edges incident to $s_{j-1}$ and $s_j$ are not cut, then $s_{j-1}$ and $s_{j}$ have many neighbors in $L(G^{j}_{a,b})$ and $R(G^{j}_{a,b})$ respectively. As $G^{j}_{a,b}$ is highly connected, in order for $s_{j-1}$ to be separated from $s_j$, there must be a vertex in $G^j_{a,b}$ with many incident edges which are cut. If $cut(s_{j-1})$ or $cut(s_{j})$ is at least $a/2$, then we are done. Otherwise, $s_j$ is connected to at least $a/2$ vertices in $R(G^{j}_{a,b})$, so every $u$ adjacent to $s_{j-1}$ must have at least $a/2$ incident edges which are cut. Therefore, $OPT^q \geq \Omega(a^q)$.
We now present a fractional cut separating $s$ and $t$. If an edge $e$ connects $s_i$ to a vertex in $R(G^i_{a,b})$ for some $i \in \{1, \ldots, b\}$, set the length of the edge to be $1/b$; otherwise set the edge length to be $0$. We let $x_{uv}$ be the shortest path metric in this graph. It is easy to see that such a solution is feasible. We now analyze the quality of this solution.
For each $i \in \{1, \ldots, b\}$, we have $y_{s_i} = a/b$ and for each $u \in R(G^i_{a,b})$, we have $y_u = 1/b$. Thus
$$LP^q = ab\Big(\frac{1}{b}\Big)^q + b\Big(\frac{a}{b}\Big)^q.$$
If $b>a$, then
$$LP^q \leq ab\Big(\frac{1}{b}\Big) + b\Big(\frac{a}{b}\Big) = 2a$$
and if $b > a$, then
$$LP^q \leq ab\Big(\frac{1}{b}\Big) + b\Big(\frac{a}{b}\Big)^q \leq a^q\Big(a^{-(q-1)} + b^{-(q-1)}\Big).$$
Setting $a = b = \Omega({n^\frac{1}{2}})$ gives
$$\frac{OPT^q}{LP^q} = \Omega\Big(n^{\frac{q}{2}-\frac{1}{2}}\Big),$$
so the integrality gap is $\frac{OPT}{LP} = \Omega(n^{\frac{1}{2}-\frac{1}{2q}})$.
\end{proof}
\begin{figure*}
\caption{Integrality gap example.}
\label{fig:int-gap}
\end{figure*}
\section{Hardness of approximation}
In this section, we prove the following hardness result.
\begin{theorem}
It is NP-hard to approximate the min $\ell_\infty$ s-t cut problem within a factor of $2 - \varepsilon$ for every positive $\varepsilon$.
\end{theorem}
\begin{proof}
The proof follows a reduction from $3$SAT. We will describe a procedure that reduces every instance of a $3$CNF formula $\phi$ to a graph $G_\phi$ such that the minimum $\ell_\infty$ \textit{s-t} cut for $G_\phi$ has a certain value if and only if the formula $\phi$ is satisfiable.
\noindent \textbf{Reduction from 3SAT:} Given a $3$CNF instance $\phi$ with $n$ literals and $m$ clauses, we describe a graph $G_\phi$ with $(2 + 4n + 5m)$ vertices
and $(6n + 8m)$ edges. We refer to the vertex and edge set of $G_\phi$ as $V(G_\phi)$ and $E(G_\phi)$. For every literal $x_i, i \in \{1,\dots, n\}$, we have four nodes,
$x^T_i$, $x^F_i$, $x^{\dagger}_i$ and $\br{x}_i^\dagger$. Additionally, we have a ``False'' and a ``True'' node. For every $i \in \{1,\dots,n\}$, we connect
``False'' with $x^F_i$ and ``True'' with $x^T_i$ using an infinite weight edge. Both $x^F_i$ and $x^T_i$ are connected to $x^{\dagger}_i$ and $\br{x}_i^\dagger$ using edges
of weight $1$.
For every clause $C$ in $\phi$, we will create a gadget in $G_\phi$ consisting of five nodes. We will refer to the subgraph induced by these nodes as $G_\phi[C]$. Let the
clause $C = (y_1 \lor y_2 \lor y_3)$. We have a node in the gadget for each $y_i, i \in \{1,2,3\}$, and two additional nodes $C_a$ and $C_b$. We connect $y_2$ and $y_3$ to $C_b$,
and $y_1$ and $C_b$ to $C_a$, all using unit weight edges.
We connect the gadget $G_\phi[C]$ for clause $C = (y_1 \lor y_2 \lor y_3)$ to the main graph as follows. For each $i \in \{1,2,3\}$, connect the vertex for the
literal $y_i$ to the vertex $y^\dagger_i$ with a unit weight edge. Finally connect the node $C_a$ to the ``True'' vertex using an infinite weight edge. An example of
a 3CNF formula $\phi$ and the corresponding $G_\phi$ is given in Figure~\ref{figure:hardness}.
\textbf{Fact 1.} Consider the gadget $G_\phi[C]$ for the clause $C = (y_1 \lor y_2 \lor y_3)$. If all three nodes $y_1, y_2$, and $y_3$ need to be disconnected
from $C_a$, then either $|\cut_{C_a}| = 2$ or $\cut_{C_b} = 2$. If at most two of the three nodes $y_1, y_2$ and $y_3$ need to be disconnected from $C_a$, then there
is a cut that separates those nodes from $C_a$ such that both $\cut_{C_a}$ and $\cut_{C_b}$ are at most $1$.
\begin{lemma}
Given a 3CNF formula $\phi$, consider the graph $G_\phi$ constructed according to the reduction described above. The formula $\phi$ is satisfiable
if and only if the minimum $\ell_\infty$ True-False cut $\mathcal{P}$ for the graph $G_\phi$ has value $1$, that is, $||\cut_\mathcal{P}||_\infty = 1$.
\end{lemma}
\begin{proof}
\textbf{3SAT $\Rightarrow$ minimum $\ell_\infty$ \textit{True-False cut}}: If the 3CNF formula $\phi$ is satisfiable, then the graph $G_\phi$ has a minimum $\ell_\infty$ \textit{s-t} cut of value exactly $1$. This can be seen as follows. Given a satisfying assignment $x^*$, we will construct a cut $E_\mathcal{P}$ (and corresponding partition $\mathcal{P}$) such that for every vertex $u \in V(G_\phi)$, $\cut_\mathcal{P}(u) \leq 1$. For every $i \in \{1,\dots, n\}$, if $x^*_i$ is True, then include $(x^{\dagger}_i, x_i^F)$ and $(\br{x}_i^\dagger, x_i^T)$ as part of the cut $E_\mathcal{P}$, else include $(x^{\dagger}_i, x_i^T)$ and $(\br{x}_i^\dagger, x_i^F)$ as part of the cut $E_\mathcal{P}$. Note that this cuts exactly one edge incident to each vertex $x^{\dagger}_i, x_i^F, \br{x}_i^\dagger$ and $x_i^T$ for $i \in \{1,\dots, n\}$. Since $\phi$ has a satisfiable assignment, each clause $C$ has at least one literal which is True, and hence the node corresponding to this literal is not connected to the vertex False in $G_\phi - E_\mathcal{P}$. Thus, each clause $C$ has at most two literals that are False, and thus there are at most two False-True paths that go through this gadget. From Fact 1, we can know that we can include edges from $E(G_\phi[C])$ in $E_\mathcal{P}$ such that both $\cut_\mathcal{P}(C_a)$ and $\cut_\mathcal{P}(C_b)$ are at most $1$ and the False-True paths through this gadget are disconnected. Thus, cut $E_\mathcal{P}$ disconnects True from False such that $||\cut_\mathcal{P}(G_\phi)||_\infty = 1$.
\textbf{minimum $\ell_\infty$ \textit{True-False cut} $\Rightarrow$ 3SAT}: Let $G_\phi$ be the graph constructed for the 3CNF formula $\phi$ such that there is a cut $E_\mathcal{P} \subseteq E(G_\phi)$ (and corresponding partition $\mathcal{P}$) such that $\mathcal{P}$ separates True from False and $||\cut_\mathcal{P}(G_\phi)||_\infty = 1$. We will construct a satisfying assignment $x^*$ from the formula $\phi$. Since $\cut_\mathcal{P}(u) \leq 1$ for every $u \in V(G_\phi)$, none of the $(True, x^T_i)$, $(x^F_i, False)$ edges are part of the cut $\mathcal{P}$ for $i \in \{1,\dots, n\}$. In order for True to be separated from False, either the edges $(x^{\dagger}_i, x_i^F)$ and $(\br{x}_i^\dagger, x_i^T)$ are part of the cut $E_\mathcal{P}$, or the edges $(x^{\dagger}_i, x_i^T)$ and $(\br{x}_i^\dagger, x_i^F)$ are part of the cut $E_\mathcal{P}$. This gives us our assignment; for each $i \in \{1,\dots, n\}$, if $(x^T_i, x^{\dagger}_i) \in E \setminus E_\mathcal{P}$, then assign $x^*_i$ as True and $\br{x}^*$ as False. Otherwise $(x^F_i, x^{\dagger}_i) \in E \setminus E_\mathcal{P}$, so assign $x^*_i$ as False and $\br{x}^*$ as True. Now, we show that $x^*$ is a satisfiable assignment for $\phi$. To see this, note that for each clause $C$, there exists at least one literal $y_i$ such that the node corresponding to $y_i$ is still connected to $C_a$. As the cut $E_\mathcal{P}$ separates True and False, $(y^\dagger_i, y^T_i) \in E \setminus E(G_\phi)$ and hence $y^*_i = $ True. Thus, the assignment $x^*$ is satisfiable for $\phi$.
\end{proof}
Thus, we can conclude Theorem 5.1 from the reduction procedure provided and Lemma 5.2.
\end{proof}
\begin{figure*}
\caption{$G_\phi$ for the 3CNF formula $\phi = (x_1 \lor \br{x}
\label{figure:hardness}
\end{figure*}
\section{Correlation Clustering on Complete Bipartite Graphs}
Let $(V = L \cup R, E)$ be a complete bipartite graph with $L$ and $R$ being the bipartition of the vertex set. In this section, we provide and analyze an algorithm for correlation clustering on complete graphs with an approximation guarantee of $5$ for minimizing the mistakes on one side of the bipartition (which without loss of generality will be $L$). The algorithm and analysis for complete bipartite graphs is very similar to the algorithm and analysis for complete graphs. At each step $t$ of our algorithm, we select a cluster center $w_t \in L$ and a cluster $C_t \subseteq (L \cup R)$ and remove it from the graph. This clustering step is repeated until all vertices in $L$ are part of some cluster. If there are any remaining vertices in $R$ which are unclustered, we put them in a single cluster.
Similar to the definition of $\Ball(w, \rho)$ in Section~\ref{sec:cor-clust-complete}, we define $\Ball_S(w, \rho) = \{u \in S : x_{uw} \leq \rho\}$. We select the cluster centers $w_t$ in step $t$ as follows. Let $V_t \subseteq V$ be the set of unclustered vertices at the start of step $t$. We redefine $L^S_t(w) = \sum_{u \in Ball_{V_t \cap S}(w, r)} r - x_{uw}$. We select $w_t$ as the vertex $w \in L$ that maximizes $L_t(w)$. We then select $Ball_{L \cup R}(w, 2r)$ as our cluster and repeat. A pseudocode for the above algorithm is provided in Figure~\ref{fig:Alg3}.
\begin{figure}
\caption{Correlation Clustering on complete bipartite graphs}
\caption{Algorithm for Correlation Clustering on complete bipartite graphs.}
\label{alg:corelation-bipartite-complete}
\label{fig:Alg3}
\label{alg:corelation-bipartite-complete}
\end{figure}
\subsection{Analysis}
In this section, we present an analysis of our algorithm.
\begin{theorem}
Algorithm 3 gives a $5$-approximation for Correlation Clustering on complete biparite graphs where disagreements are measured on only one side of the bipartition.
\end{theorem}
The proof of this theorem is almost identical to the proof of Theorem~\ref{thm:5-apx-main}. We define
$\lp{u,v}$, $\alg{u,v}$, $\mathrm{Pr}ft{u,v}{t}$ for every edge $(u,v)$ and $\pft{u}$, $\mathrm{Pr}ft{u}{t}$ for every vertex $u$
as in Section~\ref{sec:cor-clust-complete}. We then
show that for each vertex $u \in L$, we have $\pft{u} \geq 0$ and, therefore, the number of
disagreeing edges incident to $u$ is upper bounded by $5y(u)$:
$$ALG(u) = \smashoperator[r]{\sum_{v:(u,v) \in E}} \alg{u,v} \leq \frac{1}{r} \smashoperator[r]{\sum_{v:(u,v) \in E}} \lp{u,v} = 5y(u).$$
Thus, $\|ALG\|_q \leq 5 \|y\|_q$ for any $q\geq 1$. Consequently, the approximation ratio of the algorithm is at most $5$ for any norm $\ell_q$.
\begin{lemma}
For every $u\in L$, we have $\pft{u} \geq 0$.
\end{lemma}
As in Lemma~\ref{lem:pft}, we need to show that $\mathrm{Pr}ft{u}{t}\geq 0$ for all $t$. Note that we only need
to consider $u\in V_t\cap L$ as $\mathrm{Pr}ft{u}{t} = 0$ for $u\notin V_t$.
Consider a step $t$ of the algorithm and vertex $u\in V_t \cap L$. Let $w = w_t$ be the center of the cluster chosen at this step.
First, we show that since the diameter of the cluster $C_t$ is $4r$, for all negative edges $(u,v) \in E^-$ with $u,v \in C_t$, we can charge the cost of
disagreement to the edge itself, that is, $\mathrm{Pr}ft{u,v}{t}$ is nonnegative for $(u,v)\in E^-$ (see Lemma~\ref{cl:neg-edge-profit-nenneg}). We then consider two cases:
$x_{uw}\in [0, r]\cup [3r,1]$ and $x_{uw}\in (r,3r]$.
The former case is fairly simple since disagreeing positive edges $(u,v)\in E^+$ (with $x_{uw}\in [0, r]\cup [3r,1]$) have a ``large'' LP cost. In Lemma~\ref{lem:0r} and Lemma~\ref{lem:r1},
we prove that the cost of disagreement can be charged to the edge itself and hence $\mathrm{Pr}ft{u}{t} \geq 0$.
We then consider the latter case. Similarly to Lemma~\ref{lem:pft}, we split the profit at step $t$ for vertices $u$ with $x_{uw} \in (r, 3r]$
into the profit they get from edges $(u,v)$ with $v$ in $\Ball_R(w,r)\cap V_t$ and from edges with $v$ in $V_t \setminus \Ball_R(w,r)$. That is,
\begin{multline*}
\mathrm{Pr}ft{u}{t} =\\= \underbrace{\sum_{v\in \Ball_R(w,r) \cap V_t}\mathrm{Pr}ft{u,v}{t}}_{P_{high}(u)} + \underbrace{\sum_{v\in V_t\setminus \Ball_R(w,r)}\mathrm{Pr}ft{u,v}{t}}_{P_{low}(u)}.
\end{multline*}
Denote the first term by $P_{high}(u)$ and the second term by $P_{low}(u)$. We show that $P_{low}(u)\geq -L^R_t(u)$ (see Lemma~\ref{lem:PLow-LtRu} )
and $P_{high}\geq L^R_t(w) = \sum_{v \in \Ball_R{w, r} \cap V_t} r - x_{vw}$ (see Lemma~\ref{lem:PHigh-LtRu} ) and
conclude that $\mathrm{Pr}ft{u}{t} = P_{high}(u) + P_{low}(u)\geq L^R_t(w)-L^R_t(u)\geq 0$ since $L^R_t(w) = \max_{w'\in V_t} L^R_t(w') \geq L^R_t(u)$.
Consider $u$ such that $x_{uw} \in (r, 3r]$. First, we show that the profit we obtain from every edge $(u,v)$ with $v \in \Ball_R(w,r)$ is
at least $r - x_{vw}$, regardless of whether the edge is positive or negative.
\begin{claim}
If $x_{uw} \in (r,3r]$ and $v \in \Ball_R(w,r)\cap V_t$, then $\mathrm{Pr}ft{u,v}{t}\geq r-x_{vw}$.
\end{claim}
\begin{proof}
First consider $u$ such that $x_{uw} \in (r, 2r]$. Note that $x_{uv} \geq x_{uw} - x_{vw} \geq r - x_{vw}$. Moreover, $x_{uv} \leq x_{uw} + x_{vw} \leq 2r + x_{vw}$. Thus, if $(u,v) \in E^+$, then $\mathrm{Pr}ft{u,v}{t} \geq r - x_{vw}$. Otherwise, $\mathrm{Pr}ft{u,v}{t} \geq (1 - 2r - x_{vw}) - r \geq 2r - x_{vw}$.
For $u \in (2r, 3r]$, note that $x_{uv} \geq x_{uw} - x_{vw} \geq 2r - x_{vw}$. Moreover, $x_{uv} \leq x_{uw} + x_{vw} \leq 3r + x_{vw}$.
Thus, if $(u,v) \in E^+$, then $\mathrm{Pr}ft{u,v}{t} \geq (2r - x_{vw}) - r \geq r - x_{vw}$. Otherwise, $\mathrm{Pr}ft{u,v}{t} \geq (1 - 3r - x_{vw}) \geq 2r - x_{vw}$.
\end{proof}
Using the above claim, we can sum up the profits from all vertices $v$ in $\Ball_R(w, r)$ and lower bound $P_{high}(u)$ as follows.
\begin{lemma}\label{lem:PHigh-LtRu}
If $x_{uw}\in (r,3r]$, then $P_{high}(u) \geq L^R_t(w)$.
\end{lemma}
\begin{proof}
By Claim , we have $\mathrm{Pr}ft{u,v}{t}\geq r-x_{vw}$ for all $v\in R \cap V_t$. Thus,
\begin{align*}
P_{high}(u) &= \sum_{v\in \Ball_R(w,r)\cap V_t}\mathrm{Pr}ft{u,v}{t}\\ &\geq \sum_{v\in \Ball_R(w,r)\cap V_t}r-x_{vw} = L^R_t(w).
\end{align*}
\end{proof}
We now lower bound $P_{low}(u)$. To this end. we estimate each term $\mathrm{Pr}ft{u,v}{t}$ in the definition of $P_{low}$.
\begin{claim}\label{claim:lb-bp}
If $x_{uw} \in (r,3r]$ and $v \in V_t \setminus \Ball_R(w,r)$, then $\mathrm{Pr}ft{u,v}{t}\geq \min(x_{uv} - r, 0)$.
\end{claim}
\begin{proof}
By Claim~\ref{cl:neg-edge-profit-nenneg}, if $(u,v)$ is a negative edge, then $\mathrm{Pr}ft{u,v}{t} \geq 0$. The profit is $0$ if $x_{uv}\notin \Delta E_t$ (i.e., neither $u$ nor $v$ belong
to the new cluster). So let us assume that $(u,v)$ is a positive edge in $\Delta E_t$. Then, the profit obtained from $(u,v)$ is $x_{uv}$ if $(u,v)$ is in agreement
and $x_{uv} - r$ if $(u,v)$ is in disagreement. In any case, $\mathrm{Pr}ft{u,v}{t} \geq x_{uv} - r \geq \min(x_{uv} - r, 0)$.
\end{proof}
Lemma~\ref{lem:PLow-LtRu} is an immediate corollary of Claim~\ref{claim:lb-bp}.
\begin{lemma}\label{lem:PLow-LtRu}
If $x_{uw}\in (r,3r]$, then $P_{low}(u) \geq -L^R_t(u)$.
\end{lemma}
\begin{proof}
By Claim~\ref{claim:prof-uv-lower-bound}, we have $\mathrm{Pr}ft{u,v}{t}\geq \min(x_{uv} - r,0)$ for all $v\in V_t$. Thus,
\begin{align*}
P_{low}(u) &= \sum_{v\in V_t\setminus \Ball_R(w,r)}\mathrm{Pr}ft{u,v}{t}\\
&\geq \sum_{v\in V_t\setminus \Ball_R(w,r)} \min(x_{uv} - r,0)\\
&\overset{a}{\geq} \;\;\;\;\;\sum_{v\in V_t} \min(x_{uv} - r,0) \\
&\overset{b}{=} \sum_{v\in \Ball_R(u,r) \cap V_t} x_{uv} - r \\
&= - L_t^R(u).
\end{align*}
Here we used that (a) all terms $\min(x_{uv} - r,0)$ are nonpositive, and (b) $\min(x_{uv} - r, 0) = 0$ if $v\notin \Ball(u,r)$.
\end{proof}
\end{document} |
\begin{document}
\title[Persistence of Diophantine flows...]{Persistence of Diophantine flows for quadratic nearly-integrable Hamiltonians under slowly decaying aperiodic time dependence}
\author{Alessandro Fortunati}
\thanks{This research was supported by ONR Grant No.~N00014-01-1-0769 and MINECO: ICMAT Severo Ochoa project SEV-2011-0087.}
\address{School of Mathematics, University of Bristol, Bristol BS8 1TW, United Kingdom}
\email{alessandro.fortunati@bristol.ac.uk}
\keywords{Hamiltonian systems, Kolmogorov Theorem, Aperiodic time dependence.}
\subjclass[2010]{Primary: 70H08. Secondary: 37J40, 37J25}
\author{Stephen Wiggins}
\email{s.wiggins@bristol.ac.uk}
\maketitle
\begin{abstract}
The aim of this paper is to prove a Kolmogorov-type result for a nearly-integrable Hamiltonian, quadratic in the actions, with an aperiodic time dependence. The existence of a torus with a prefixed Diophantine frequency is shown in the forced system, provided that the perturbation is real-analytic and (exponentially) decaying with time. The advantage consists of the possibility to choose an arbitrarily small decaying coefficient, consistently with the perturbation size.
\\
The proof, based on the Lie series formalism, is a generalization of a work by A. Giorgilli.
\end{abstract}
\section{Introduction}
The celebrated Kolmogorov Theorem, stated in \cite{kolm} with a guideline for the proof, has been for years a fruitful
source of ideas, culminating in the collection of tools and techniques nowadays known as KAM theory. As undisputed members of the acronym, Arnold \cite{arn1} and Moser \cite{mos62}, \cite{moser67} proposed complete proofs of
Kolmogorov's result. The two approaches exhibited some technical differences, but were both based on the concepts of
\emph{super-convergent method} and \emph{implicit function theorem} over the complexified phase space (see e.g.
\cite{chierchia09} for a detailed exposition). The applicability of these tools to certain infinite dimensional
problems were investigated in \cite{moser1966}, giving rise to the modern theory of Nash-Moser arguments (see
\cite{zehnder76} and \cite{berbolpro} for an advanced setting).\\
The proof based on the Lie formalism proposed in \cite{nuovocimento} then continued in \cite{giorgloca97}, \cite{giorgmorbi97} and \cite{giorgloca99}, makes use of the well known class of canonical change in \emph{explicit form}. This has the remarkable advantage to avoid the inversion and the difficulties related to implicit function arguments. Furthermore, this feature has been widely and profitably used for the computer implementation of normalization algorithms.\\
In a substantially different direction, the approach developed in \cite{chierchiafalco94}, \cite{chierchiafalco96} and by the Gallavotti's school \cite{gallavottitwistlesskam}, \cite{gallagent95}, \cite{gentmastro95} and subsequent papers, is based on \emph{renormalization group} tools and \emph{diagrammatic} analysis of the Lindstedt's series convergence due to cancellation phenomena. The analysis is an extensive improvement of the pioneering challenge of the small divisors problem faced in \cite{eliasson88}.\\
The historical legacy between the Kolmogorov Theorem and problems arising from Celestial Mechanics, has led to a development in the treatment of quasi-periodic perturbations of integrable Hamiltonians, mainly in the presence of weaker regularity hypothesis.\\
Our aim is to proceed in a slightly different direction, investigating the possibility of obtaining the conservation of (strongly) non-resonant tori in the case of an analytic perturbation (quadratic in the actions), but with an \emph{aperiodic} time dependence. For this purpose we shall follow the exposition \cite{gior}, a revisited essay of the techniques used in \cite{nuovocimento}. The case of a quadratic Hamiltonian, has been chosen for simplicity of discussion. On the other hand, this choice allows substantial simplification of the ``known'' technical part, emphasizing the differences introduced by the non-quasi-periodic time dependence. As we shall discuss, the exponential rate of the perturbation decay, say $\exp(-at)$, is a simplified choice as well. \\
The philosophy behind the present analysis is very close to the Nekhoroshev stability result for aperiodically perturbed system of \cite{forwig}, but some substantial differences arise. Mainly, the Nekhoroshev normal form can be constructed by modifying the original normalization scheme, with the sole hypothesis that the perturbation depends $\mu-$slowly
on time. Hence the technical part consists in giving an estimate of the extra-terms arising from the aperiodic dependence. The key point is that, as it is clear from the normal form statement (see \cite[Thm 2]{forwig}), this is possible only because the number $r$ of normalization steps is \emph{finite} and the threshold for $\mu$ is actually a function of $r$.\\
The same phenomenon,
even in the presence of a different normalization scheme, can be found if the Kolmogorov construction is extended
\emph{tout-court} to the case of aperiodic perturbations, and the slow dependence hypothesis would inevitably degenerate to a trivial case
i.e. $\mu = 0$.\\
The above described difficulty, has required the modification of the transformation suggested by Kolmogorov in a way to annihilate certain time dependent terms arising in the normalization algorithm. The standard homological equation is modified, in this way, into a linear PDE involving time. The
apparently ``cheating'' hypothesis of time decaying perturbation (asymptotically the problem is trivial)
turns out to be a technical ingredient in order to ensure the resolvability of this equation at each step
of the normal form construction. Nevertheless, as a feature behind the \emph{slow decay}, the whole argument does not impose lower bounds on $a$. Consistently, the slower the decay, the smaller the perturbation size.\\
The self-contained exposition is closely carried along the lines of \cite{gior}. The same notational setting is used for a more efficient comparison.
\section{Preliminaries and statement of the result}
Let us consider the following Hamiltonian
\beq{eq:hamtempo}
\mathcal{H}(Q,P,t)=\frac{1}{2}\langle \Gamma P,P \rangle +
\ep f(Q,P,t) \mbox{,}
\end{equation}
where $\Gamma$ is a $n \times n$ real symmetric matrix, $(Q,P) \in \TT^n \times \RR^n$ is a set of action-angle variables, $t \in \RR^+$ is an additional variable (time) and $\ep>0$ is a small parameter. The perturbing function $f$ is assumed to be quadratic in $P$. \\
The Kolmogorov approach to (\ranglef{eq:hamtempo}) begins by considering a given $\hat{P} \in \RR^n$ then expanding the first term of $\mathcal{H}$ around it. The canonical change (translation) $(q,p):=(Q,P-\hat{P})$, and the definition of $\eta \in \RR$ as the momentum conjugate to $\timesi:=t$, yields (up to a constant) the following autonomous Hamiltonian
\beq{eq:ham}
H(q,p,\timesi,\eta):=
\langle \omega, p \rangle + \frac{1}{2}\langle \Gamma p,p \rangle + \eta + \ep f(q,p,\timesi) \mbox{,}
\end{equation}
where $\omega:=\Gamma \hat{P}$.\\
In order to use the standard tools concerning analytic functions, we consider a complex extension of the ambient space. More precisely, define $\mathcal{D}:=\Delta_{\rho} \times \TT_{2 \sigma}^n \times \mathcal{S}_{\rho} \times \mathcal{R}_{\zeta}$ where
$$
\begin{array}{rclrcl}
\Delta_{\rho}&:=&\{p \in \CC^n:|p|<\rho\},
& \qquad
\TT_{2 \sigma}^n&:=&\{q \in \CC^n: |\Im q| < 2 \sigma\},\\
\mathcal{S}_{\rho}&:=&\{\eta \in \CC: |\Im \eta| <\rho\}, & \qquad
\mathcal{R}_{\zeta}&:=&\{\timesi =:x+iy \in \CC:|x| <\zeta\ ; \,
y > -\zeta\} \mbox{,}
\end{array}
$$
and $\rho,\sigma,\zeta \in (0,1)$. Similarly to \cite{gior}, we consider the usual \emph{supremum norm}
$$
\snorm{g}{[\rho,\sigma;\zeta]}:=\sup_{(p,q) \in \mathcal{D}} |g(q,p,\timesi)| \mbox{,}
$$
and the \emph{Fourier norm}, defined for all $\nu \in(0,1/2]$,
\beq{eq:fouriernorm}
\norm{g}{[\rho,\sigma;\zeta]}:=\sum_{k \in \ZZ^n} \snorm{g_k(p,\timesi)}{(\rho,\sigma)}
e^{2|k|(1-\nu)\sigma} \mbox{,}
\end{equation}
where $g_k(p,\timesi)$ are the coefficient of the Fourier expansion $g=\sum_{k \in \ZZ^n} g_k(p,\timesi) e^{i \langle k , q \rangle}$. For all vector-valued functions $w:\mathcal{D} \rightarrow \CC^n $ we shall set $\norm{w}{[\rho,\sigma;\zeta]}:=\sum_{l=1}^n \norm{w_l}{[\rho,\sigma;\zeta]}$.\\
System (\ranglef{eq:ham}) will be studied under the following
\begin{hyp}\langlebel{hyp}
\begin{itemize}
\item There exists $m\in (0,1)$ such that, for all $ v \in \CC^n$
\beq{eq:hypongamma}
|\Gamma v| \leq m^{-1}|v| \mbox{.}
\end{equation}
\item (Slow decay): The perturbation is an analytic function on $\mathcal{D}$ satisfying
\beq{eq:slowdecay}
\norm{f(q,p,\timesi)}{[\rho,\sigma;\zeta]} \leq M_f e^{-a |\timesi|} \mbox{,}
\end{equation}
for some $M_f>0$ and $a \in (0,1)$.
\end{itemize}
\end{hyp}
We specify that the assumption $a<1$ (which includes, of course, the ``interesting'' case of $a$ small) is not of technical nature, but it is often useful to obtain more compact estimates. As a difference with \cite{forwig}, hypothesis (\ranglef{eq:slowdecay}) is not of slow time dependence: in principle, the constant $M_f$ could be the bound of an arbitrary (analytic) function of $\ph$ and of $\timesi$.\\
In this framework, the main result is stated as follows
\begin{satz}[Aperiodic Kolmogorov]\langlebel{thm}
Consider Hamiltonian (\ranglef{eq:ham}) under the Hypothesis \ranglef{hyp} and suppose that $\hat{P}$ is such that $\omega$ is a $\gamma-\tau$ Diophantine vector\footnote{Namely, there exist $\gamma$ and $\tau>n-1$ such that $|\langle \omega, k \rangle | \geq \gamma |k|^{-\tau}$, for all $k \in \ZZ^{n}\setminus \{0\}$, understood $|k|:=|k_1|+\ldots + |k_n|$.}.\\
Then, for all $a \in (0,1)$ there exists\footnote{See (\ranglef{eq:finalvalueep}) for an explicit estimate.} $\ep_a>0$ such that, for all $\ep \in (0,\ep_a]$,
it is possible to find a canonical, $\ep-$close to the identity, analytic change of variables $(q,p,\timesi,\eta) = \mathcal{K}(q^{(\infty)},p^{(\infty)},\timesi,\eta^{(\infty)})$, $\mathcal{K}:\mathcal{D}_* \rightarrow \mathcal{D}$ with $\mathcal{D}_* \subset \mathcal{D}$,
casting Hamiltonian (\ranglef{eq:ham}) into the \emph{Kolmogorov normal form}
\beq{eq:kolnormal}
H_{\infty}(q^{(\infty)},p^{(\infty)},\timesi,\eta^{(\infty)})=\langle \omega, p^{(\infty)} \rangle + \eta^{(\infty)}+
\mathcal{Q}(q^{(\infty)},p^{(\infty)},\timesi;\ep) \mbox{,}
\end{equation}
with $\partial_p^{\alpha} \mathcal{Q}(\cdot,0,\cdot;\ep)=0$ for all $\alpha \in \NN^n$ such that $\alpha_i \leq 1$ ($\mathcal{Q}$ is a homogeneous polynomial of degree $2$ in $p$).
\end{satz}
Hamiltonian (\ranglef{eq:kolnormal}) is defined up to a function of $\timesi$ that is not relevant for the $(q,p)-$ flow we are interested in. The normal form (\ranglef{eq:kolnormal}) clearly implies the persistence of the (lower dimensional for (\ranglef{eq:ham}) i.e. maximal for (\ranglef{eq:hamtempo})) invariant torus with frequency $\omega$ under perturbations satisfying (\ranglef{eq:slowdecay}) and for sufficiently small $\ep$. \\
The rest of the paper is devoted to the proof of Theorem \ranglef{thm}. As usual, it has the structure of an iterative statement divided into a formal part (Lemma \ranglef{lem:iterativeformal}) and a quantitative part (Lemma \ranglef{lem:iterative}). In the first part we modify the Kolmogorov scheme in order to build a suitable normalization algorithm for the problem at hand. The homological equation on $\TT_{2 \sigma}^n \times \mathcal{R}_{\zeta}$ arising in this case requires a substantially different treatment of the bounds on the small divisors as described in Prop. \ranglef{prop:small}.\\
In the second, quantitative part, the well established tools of the Lie series theory (recalled in Sec. \ranglef{sec:technical}), are used to control the size of the unwanted terms during the normalization process, proving that the constructed Kolmogorov transformation has the feature to make them smaller and smaller.\\
The final part consists in showing that the described iterative argument can be iterated infinitely many times, and the contribution of the unwanted terms completely removed: once more, the choice of a particular torus $P=\hat{P}$ suggested by Kolmogorov, is required for the convergence of this particular scheme.
\section{The formal perturbative setting}
Following \cite{gior} we construct a perturbative scheme in which the $j-$th step is based on the canonical transformation
\[
\mathcal{K}_j:=\exp(\mathcal{L}_{\chi^{(j)}}) \circ \exp(\mathcal{L}_{\phi^{(j)}}) \mbox{,}
\]
where the \emph{Lie series operator} is formally defined by
\[
\exp(\mathcal{L}_{G}):=\id+\sum_{s \geq 1} \frac{1}{s!}\mathcal{L}_G^s \mbox{,}
\]
and $\mathcal{L}_G \cdot :=\{G,\cdot\}=(\partial_q G \partial_p + \partial_{\timesi}G \partial_{\eta}-\partial_p G \partial_q-\partial_{\eta}G \partial_{\timesi})\cdot $ is the \emph{Lie derivative}. The \emph{generating functions} will be chosen of the form $\phi^{(j)}=\phi^{(j)}(q,\timesi)$ and $\chi^{(j)}=\chi^{(j)}(q,p,\timesi):=\langle Y^{(j)}(q,\timesi),p \rangle $. The latter being the equivalent of the classical case.
\begin{lem}\langlebel{lem:iterativeformal}
Suppose that for some $j \in \NN$, Hamiltonian (\ranglef{eq:ham}) can be written in the form
\beq{eq:hamricorsiva}
H_j=\langle \omega,p \rangle + \eta + A^{(j)}(q,\timesi) + \langle B^{(j)}(q,\timesi),p \rangle
+ \frac{1}{2} \langle C^{(j)}(q,\timesi)p,p \rangle \mbox{,}
\end{equation}
with $C^{(j)}$ symmetric matrix. Then it is possible to determine $\phi^{(j)}$ and $Y^{(j)}$ such that
$H_{j+1}:=\mathcal{K}_j H_j$ has the structure (\ranglef{eq:hamricorsiva}) for suitable $A^{(j+1)},B^{(j+1)}$ and $C^{(j+1)}$ symmetric matrix as well.
\end{lem}
The possibility to write the Hamiltonian (\ranglef{eq:ham}) in the form (\ranglef{eq:hamricorsiva}), and then to complete an iterative scheme, will be discussed in Sec. \ranglef{seq:inducbasis}.
\begin{rem}
The variables change casting $H_j$ into $H_{j+1}$ follows directly from the Gr{\"o}bner \emph{exchange} Theorem\footnote{Namely, let for simplicity $H=H(q,p)$ and $\chi$ be a generating function, one has
\[
H(q,p)|_{(q,p)=\exp(\mathcal{L}_{\chi})(q',p')}=[\exp(\mathcal{L}_{\chi})H(q,p)]_{(q,p)=(q',p')} \mbox{,}
\]
understood $\exp(\mathcal{L}_{\chi})(q',p')=(\exp(\mathcal{L}_{\chi})q',\exp(\mathcal{L}_{\chi}) p')$.
} and reads as
\beq{eq:change}
(q^{(j)},p^{(j)},\eta^{(j)},\timesi^{(j)})=\mathcal{K}_j
(q^{(j+1)},p^{(j+1)},\eta^{(j+1)},\timesi^{(j+1)}) \mbox{.}
\end{equation}
As a basic feature of this method, the variables superscript is not relevant in order to deal with the Hamiltonian transformation, and it will be omitted throughout the proof.
\end{rem}
The perturbative feature of this result is not transparent until a quantitative control of the action of $\mathcal{K}_j$ is established. Indeed, the subsequent step is to show that the ``size'' (in a sense that will be made precise later) of the terms $A^{(j)},B^{(j)}$ is infinitesimal as $j$ tends to infinity, obtaining in this way the desired \emph{Kolmogorov normal form}.
\proof
It is convenient to discuss separately the action of the two transformations.
\subsubsection*{First transformation} Firstly we examine the action of $\exp(\mathcal{L}_{\phi^{(j)}})$ on $H_j$.
A key feature of $\mathcal{L}_{\phi^{(j)}}$, is that the degree of polynomials in $p$ on which it acts are decreased by one order. This implies that $\exp(\mathcal{L}_{\phi^{(j)}})H_j$ turns out to be simply
\beqar
\exp(\mathcal{L}_{\phi^{(j)}})H_j & = & \displaystyle \langle \omega , p \rangle + \partial_{\omega} \phi^{(j)} + \eta + \partial_{\timesi} \phi^{(j)} + A^{(j)}+ \langle B^{(j)}, p \rangle+ \langle B^{(j)}, \partial_q \phi^{(j)} \rangle\\
& + & \displaystyle
\frac{1}{2} \langle C^{(j)} p,p \rangle + \langle C^{(j)} \partial_q \phi^{(j)}, p \rangle +
\frac{1}{2}\langle C^{(j)} \partial_q \phi^{(j)} ,\partial_q \phi^{(j)} \rangle \mbox{,}
\end{equation}ar
where $\partial_{\omega} \cdot :=\langle \omega , \partial_q \cdot \rangle$. Note that the symmetry of $C^{(j)}$ has been repeatedly used.
\begin{rem} The finite number of terms in the previous expression is clearly one of the main simplifications introduced by a $p-$quadratic Hamiltonian. By considering the remainder of degree $\geq 3$ in $p$, the Lie series operator would have produced an infinite number of terms.
\end{rem}
The first generating function $\phi^{(j)}(q,\timesi)$ is determined as the solution of the following \emph{time dependent homological equation}
\beq{eq:firsthomological}
\partial_{\timesi} \phi^{(j)} (q,\timesi)+ \partial_{\omega} \phi^{(j)} (q,\timesi) + A^{(j)} (q,\timesi)=0\mbox{.}
\end{equation}
This equation can be formally solved on the Fourier space, giving rise to an infinite set of decoupled ODEs, see Prop. \ranglef{prop:small} for more details. In spite of this difficulty, the presence of the term $\partial_{\timesi} \phi^{(j)}$ allows the resolvability of the equation also for the $0-$th Fourier coefficient ($q-$average\footnote{We shall denote also with $\overline{f(q,\timesi)}:=(2 \pi)^{-n}\int_{\TT^n} f(q,\timesi) dq$ the $q-$average of $f$.}). This feature, not necessary in this case ($\overline{A(q,\timesi)}$ could be removed from this equation and kept in the Hamiltonian without affecting the normal form) will play a key role in the determination of $Y^{(j)}(q,\timesi)$. Now, defining
\begin{subequations}
\begin{align}
\hat{A}^{(j)}(q,\timesi)& := \displaystyle
\langle B^{(j)}, \partial_q \phi^{(j)} \rangle + \frac{1}{2} \langle C^{(j)} \partial_q \phi^{(j)} ,\partial_q \phi^{(j)} \rangle \mbox{,}\langlebel{eq:ahat}\\
\hat{B}^{(j)}(q,\timesi)& := \displaystyle B^{(j)}+ C^{(j)} \partial_q \phi^{(j)} \mbox{,}\langlebel{eq:bhat}
\end{align}
\end{subequations}
we obtain
\beq{eq:hamhalstep}
\hat{H}_j:=\exp(\mathcal{L}_{\psi_j})H_j=\langle \omega , p \rangle + \eta + \hat{A}^{(j)}(q,\timesi)+
\langle \hat{B}^{(j)}(q,\timesi),p \rangle + \frac{1}{2} \langle C^{(j)}(q,\timesi)p,p \rangle \mbox{.}
\end{equation}
\subsubsection*{Second transformation} Our aim is now to determine $Y^{(j)}(q,\timesi)$. Explicitly we have
\beqar
\exp(\mathcal{L}_{\chi^{(j)}}) \hat{H}_j & = & \id \hat{H}_j + \displaystyle \mathcal{L}_{\chi^{(j)}} \langle \omega , p \rangle + \mathcal{L}_{\chi^{(j)}} \eta+ \sum_{s \geq 2} \frac{1}{s!} \mathcal{L}_{\chi^{(j)}}^s \langle \omega , p \rangle+ \sum_{s \geq 1} \frac{1}{s!} \mathcal{L}_{\chi^{(j)}}^s \hat{A}^{(j)} \\
& + & \displaystyle \sum_{s \geq 1} \frac{1}{s!} \mathcal{L}_{\chi^{(j)}}^s \langle \hat{B}^{(j)},p \rangle + \displaystyle \sum_{s \geq 1} \frac{1}{s!} \mathcal{L}_{\chi^{(j)}}^s \langle C^{(j)} p,p \rangle
+ \sum_{s \geq 2} \frac{1}{s!} \mathcal{L}_{\chi^{(j)}}^s \eta \mbox{.}
\end{equation}ar
The function $\chi^{(j)}(q,\timesi)$ is determined in such a way
\beq{eq:secondhompre}
\mathcal{L}_{\chi^{(j)}} \eta+\mathcal{L}_{\chi^{(j)}} \langle \omega,p \rangle + \langle \hat{B}^{(j)}(q,\timesi),p \rangle=0 \mbox{.}
\end{equation}
Noting that
\beqar
\displaystyle \sum_{s \geq 2} \frac{1}{s!} \mathcal{L}_{\chi^{(j)}}^s \langle \omega , p \rangle
+ \sum_{s \geq 1} \frac{1}{s!} \mathcal{L}_{\chi^{(j)}}^s \langle \hat{B}^{(j)},p \rangle
&=& \displaystyle \sum_{s \geq 1} \frac{1}{(s+1)!} \mathcal{L}_{\chi^{(j)}}^s
[\mathcal{L}_{\chi^{(j)}} \langle \omega , p \rangle + (s+1) \langle \hat{B}^{(j)},p \rangle ] \\
& \Heq{\ranglef{eq:secondhompre}}{=} & \displaystyle \sum_{s \geq 1} \frac{s}{(s+1)!} \mathcal{L}_{\chi^{(j)}}^s \langle \hat{B}^{(j)},p \rangle
- \sum_{s \geq 2} \frac{1}{s!} \mathcal{L}_{\chi^{(j)}}^s \eta \mbox{,}
\\
\end{equation}ar
the transformed Hamiltonian simplifies as follows
\[
\exp(\mathcal{L}_{\chi_j}) \hat{H}_j = \displaystyle \langle \omega , p \rangle +\eta + \exp(\mathcal{L}_{\chi^{(j)}})\hat{A}^{(j)}+ \sum_{s \geq 1} \frac{s}{(s+1)!} \mathcal{L}_{\chi^{(j)}}^s \langle \hat{B}^{(j)},p \rangle + \frac{1}{2}\exp(\mathcal{L}_{\chi^{(j)}}) \langle C^{(j)} p,p \rangle\mbox{.}
\]
It is sufficient to define
\begin{subequations}
\begin{align}
A^{(j+1)}(q,\timesi) &:= \displaystyle \exp(\mathcal{L}_{\chi^{(j)}})\hat{A}^{(j)} \mbox{,} \langlebel{eq:ajpuno}\\
\langle B^{(j+1)}(q,\timesi),p \rangle & := \displaystyle
\sum_{s \geq 1} \frac{s}{(s+1)!} \mathcal{L}_{\chi^{(j)}}^s
\langle \hat{B}^{(j)}, p \rangle \mbox{,} \langlebel{eq:bjpuno}\\
\langle C^{(j+1)}(q,\timesi) p,p \rangle & := \exp(\mathcal{L}_{\chi^{(j)}}) \langle C^{(j)} p,p \rangle \mbox{,}
\langlebel{eq:cjpuno}
\end{align}
\end{subequations}
in order to obtain
\beq{eq:transformed}
H_{j+1}:=\exp(\mathcal{L}_{\chi^{(j)}}) \hat{H}_j= \displaystyle \langle \omega , p \rangle + \eta +
A^{(j+1)}(q,\timesi)+ \langle B^{(j+1)}(q,\timesi),p \rangle + \frac{1}{2} \langle C^{(j+1)}(q,\timesi)p,p \rangle \mbox{,}
\end{equation}
which has the structure (\ranglef{eq:hamricorsiva}). The symmetry of $C^{(j+1)}$ follows from its definition.\\
It is immediate to check that (\ranglef{eq:secondhompre}) is equivalent to $\langle (\partial_{\timesi} Y^{(j)}+ \partial_{\omega} Y^{(j)} + \hat{B}^{(j)}), p \rangle=0$, i.e.,
\beq{eq:secondhom}
\partial_{\timesi} Y^{(j)}(q,\timesi)+ \partial_{\omega} Y^{(j)}(q,\timesi) + \hat{B}^{(j)}(q,\timesi)=0 \mbox{,}
\end{equation}
which has the same form of (\ranglef{eq:firsthomological}) if considered component-wise. The necessity to solve (\ranglef{eq:secondhom}) also for the $0-$th Fourier mode is now clear: any ``residual'' term would imply a frequency correction and the failure of the program.
\section{Technical tools}\langlebel{sec:technical}
From this section on, we shall profitably use the complex analysis tools in order to show the convergence of the Kolmogorov scheme. Let us firstly recall a well known property of the analytic functions: if $g=g(q,p,\timesi)$ is analytic on $\mathcal{D}$, one has $|g_k| \leq \snorm{g}{[\rho,\sigma;\zeta]} e^{-2|k|\sigma}$ then, by (\ranglef{eq:fouriernorm}), $\norm{g}{[\rho,\sigma;\zeta]}< \infty$ for all $\nu>0$. Vice-versa, if $\norm{g}{[\rho,\sigma;\zeta]}< \infty$ for all $\nu>0$ (no matter how small), then the Fourier coefficients of $g$ decay as $e^{-2|k|\sigma}$, hence the corresponding series defines an analytic function\footnote{I.e. the finiteness of the Fourier norm characterizes analytic functions on $\mathcal{D}$, see e.g. \cite[Chap. $4$]{giorgilli02}. The choice of $\nu$ will be tacitly understood in the follow as sufficiently small in order to ensure that the function at hand is analytic in a domain that is as large as possible.} on $\mathcal{D}$. \\
As in \cite{gior} we collect some basic inequalities in the following
\begin{prop} Let $v(q,\timesi)$ and $C(q,\timesi)$ respectively a vector and a matrix defined on $\mathcal{D}$. Then the following property hold
\begin{itemize}
\item
\beq{eq:inequno}
\norm{\langle v(q,\timesi),p \rangle}{[\rho,\sigma;\zeta]} \leq \rho \norm{v}{[\sigma;\zeta]} \mbox{.}
\end{equation}
Vice-versa, if for some $\tilde{d}e{M}>0$
\beq{eq:inequnocontrary}
\norm{\langle v(q,\timesi),p \rangle}{[\rho,\sigma;\zeta]} \leq \tilde{d}e{M} \rho, \qquad
\mbox{then} \qquad \norm{v(q,\timesi)}{[\sigma;\zeta]} \leq \tilde{d}e{M} \mbox{.}
\end{equation}
\item If, for some $\hat{M}>0$
\beq{eq:ineqdue}
\norm{\langle C(q,\timesi)p,p \rangle }{[\rho,\sigma;\zeta]} \leq \hat{M} \rho^2, \qquad
\mbox{then} \qquad \norm{C_{kl}(q,\timesi)}{[\sigma;\zeta]} \leq \hat{M} \mbox{.}
\end{equation}
\end{itemize}
\end{prop}
\proof It can be extended without difficulties to our case, by following the sketch proposed in \cite[Pag. 160]{gior} \endproof
It will be also useful to recall the bound below, valid in particular on $\mathcal{R}_{\zeta}$
\beq{eq:ineq}
e^{-a|x|} \leq e^{a \zeta} e^{-a|\timesi|} \mbox{.}
\end{equation}
\subsection{Solution of the time dependent homological equation}
Let us consider the following P.D.E.
\beq{eq:hompde}
\partial_{\timesi} \ph + \partial_{\omega} \ph = \psi \mbox{,}
\end{equation}
where $\psi=\psi(q,\timesi):\mathcal{D} \rightarrow \CC$ is a given function. It is possible to state the following
\begin{prop}\langlebel{prop:small} Let $\displaystylelta \in [0,1)$ and suppose that $\psi$ is analytic on $\TT_{2 (1 - \displaystylelta) \sigma}^n \times \mathcal{R}_{\zeta} $ and \emph{exponentially decaying} with $|\timesi|$, i.e.
\beq{eq:expdec}
\norm{\psi}{[(1-\displaystylelta)\sigma;\zeta]} \leq K e^{-a |\timesi|} \mbox{,}
\end{equation}
where $a$ has been defined in (\ranglef{eq:slowdecay}).\\
Then for all $d \in (0, 1-\displaystylelta)$ and for all $\zeta$ such that
\beq{eq:sceltazeta}
2 |\omega| \zeta \leq d \sigma \mbox{,}
\end{equation}
the solution of (\ranglef{eq:hompde}) exists and satisfies
\begin{subequations}
\begin{align}
\norm{\ph}{[(1-\displaystylelta-d)\sigma;\zeta]} & \leq
\frac{K S_1}{a(d \sigma)^{\tau}} e^{-a |\timesi|} \mbox{,} \langlebel{eq:stimehomuno}
\\
\norm{\partial_{q_m} \ph }{[(1-\displaystylelta-d)\sigma;\zeta]} &
\leq \frac{K S_2}{a (d \sigma)^{\tau+1}}
e^{-a |\timesi|},\qquad m=1,\ldots,n \mbox{,}
\langlebel{eq:stimehomdue}
\end{align}
\end{subequations}
\end{prop}
where $S_{1,2}>0$ are constants defined for all sufficiently small $\nu>0$.
\proof
By expanding $\ph=\ph(q,\timesi)$ we have that equation (\ranglef{eq:hompde}) in terms of Fourier coefficients reads as
\[
i \langlembda \ph_k(\timesi) + \ph_k'(\timesi)= \psi_k(\timesi) \mbox{,}
\]
with $\langlembda:=\langle \omega , k \rangle$. We firstly discuss the case $k \neq 0$, hence $\langlembda \neq 0$ by assumption. The solution in this case is
\[
\ph_k(\timesi)=e^{-i \langlembda \timesi} \left[ \ph_k(0) + \int_0^\timesi \psi_k(s) e^{i \langlembda s} ds \right] \mbox{.}
\]
The integral is meant to be computed along an arbitrary path ($\mathcal{R}_{\zeta}$ is simply connected) joining the origin and $\timesi \in \CC$. More precisely, we shall choose
\beq{eq:integralasymp}
\int_0^\timesi \psi_k(s) e^{i \langlembda s} ds=\int_0^x \psi_k(x') e^{i \langlembda x'} dx'
+ i e^{i \langlembda x} \int_0^y \psi_k(x+i y') e^{-\langlembda y'} dy' \mbox{.}
\end{equation}
The complex number $\ph_k(0)$ denotes the value of the solution at the complex plane origin and it will be determined in such a way
$\lim_{\Re(\timesi) \rightarrow \infty} \ph_k(\timesi)=0$, i.e. taking into account the hypothesis (\ranglef{eq:expdec})
\[
\ph_k(0)=-\int_0^{+\infty} \psi_k(x) e^{i \langlembda x} dx
\mbox{.}
\]
As a consequence, the solution satisfies
\[
|\ph_k(\timesi)| \leq e^{\langlembda y} \left[
\int_0^y |\psi_k(x+i y')| e^{-\langlembda y'} dy'
+
\int_x^{+\infty} |\psi_k(x')| dx'
\right] \mbox{.}
\]
By hypothesis (\ranglef{eq:expdec}) it follows that $|\psi_k(\timesi)| \leq K e^{-[a|\timesi|+2|k|(1-\displaystylelta)\sigma]} $, hence the integrals appearing in the previous formula can be bounded on the strip $\mathcal{R}_{\zeta}$ as follows
\beqar
\displaystyle \int_0^y |\psi_k(x+i y')| e^{-\langlembda y'} dy' & \leq & \displaystyle K e^{-[a |x| +2|k|(1-\displaystylelta)\sigma] } \int_0^y e^{|\langlembda|y'} dy' \\ [8pt]
& \leq & \displaystyle |\langlembda|^{-1}K e^{-[a |x| +2|k|(1-\displaystylelta)\sigma-|\langlembda|\zeta]} \mbox{,}\\ [5pt]
\displaystyle \int_x^{\infty} |\psi_k(x')| dx' & \leq & \displaystyle K e^{-2|k|(1-\displaystylelta)\sigma} \int_x^{\infty} e^{-a |x'|} dx'\\ [8pt]
& \leq & \displaystyle 2 K a^{-1} e^{a \zeta} e^{-[a |x| +2|k|(1-\displaystylelta)\sigma]} \mbox{.}
\end{equation}ar
The obtained estimates imply
\beq{eq:stimafinalephk}
|\ph_k(\timesi)| \leq
K e^{-[a x +2|k|(1-\displaystylelta)\sigma-2|\langlembda|\zeta]} \left[ \frac{1}{|\langlembda|}+\frac{2e^{a \zeta}}{a}\right] \leq
2 K \frac{(a \gamma+e^{a \zeta})}{a} |k|^{\tau} e^{-[a |x| +2|k|(1-\displaystylelta)\sigma-2|\langlembda|\zeta]} \mbox{,}
\end{equation}
where we used the Diophantine condition. Now using inequalities $|\langlembda| \leq |k||\omega|$,
\[
|k|^{\tau} e^{-d |k| \sigma} \leq \left( \frac{ \tau}{e d \sigma}\right)^{\tau} \mbox{,}
\]
and finally hypothesis (\ranglef{eq:sceltazeta}), one has
\beq{eq:phk}
|\ph_k(\timesi)| \leq 2 K \frac{(a \gamma+e^{a \zeta})}{a}
\left( \frac{ \tau}{e d \sigma}\right)^{\tau}
e^{-a|x|} e^{-2|k|(1-\displaystylelta-d)\sigma} \mbox{.}
\end{equation}
Hence the series $\sum_{k \in \ZZ^n \setminus \{0\}} \ph_k(\timesi)$ defines an analytic function on $\TT_{2 (1 - \displaystylelta- d) \sigma}^n \times \mathcal{R}_{\zeta} $.\\ The simpler case $k=0$, yielding the equation $\partial_{\timesi} \ph_0(\timesi)=\psi_0(\timesi)$, can be treated in similar way. More precisely, by determining $\ph_0(0)$ as in (\ranglef{eq:integralasymp}) and bounding the two resulting integrals of the path we get
\beq{eq:phz}
|\ph_0(\timesi)| \leq \zeta K e^{-a|x|} + \frac{2Ke^{a \zeta}}{a} e^{-a|x|} \leq \frac{4Ke^{a \zeta}}{a} e^{-a|x|} \mbox{.}
\end{equation}
Now recall definition (\ranglef{eq:fouriernorm}). By (\ranglef{eq:phk}) and (\ranglef{eq:phz}), the use of (\ranglef{eq:ineq}) (recalling $a,\zeta<1$), and finally by setting
\[
S_1:=4 e^2+2 (\gamma+e)(\tau/e)^{\tau} \sum_{k \in \ZZ^n \setminus \{0\}} e^{-2 \nu|k|(1-\displaystylelta-d)\sigma}
\]
we get (\ranglef{eq:stimehomuno}). Note that, as long as $d+\displaystylelta<1$, the upper bound for $S_1$ is independent on $d,\displaystylelta$, being $\nu$ arbitrarily small. As for as $\partial_{q_m}\ph$, directly from the Fourier expansion we find
$
\partial_{q_m} \ph(q,\timesi)=i \sum_{k \in \ZZ^n\setminus\{0\}} k_m \ph_k(\timesi)e^{i \langle k , q \rangle}$. By using bound (\ranglef{eq:stimafinalephk}) (the average term is not relevant in such case) and proceeding in a similar way we get (\ranglef{eq:stimehomdue}), where $S_2:=[(\tau+1)/e]^{(\tau+1)}\sum_{k \in \ZZ^n \setminus \{0\}} e^{-2 \nu|k|(1-\displaystylelta-d)\sigma}$.
\endproof
\subsection{Convergence of the Lie series operator}
\begin{lem}\langlebel{lem:two}
Let $d',d'' \in \RR^+$ such that $d'+d'' <1$ and $F,G$ be two functions on $\mathcal{D}$ such that $\norm{G}{[(1-d')(\rho,\sigma);\zeta]}$ and $\norm{F}{[(1-d'')(\rho,\sigma);\zeta]}$ are bounded for all $\timesi \in \mathcal{R}_{\zeta}$.\\
Then, for all $0<d<1-d'-d''$ and all $\nu \in (0,1/2]$, the following inequality holds at each point of $\mathcal{R}_{\zeta}$
\beq{eq:twoparameter}
\norm{\mathcal{L}_{G} F}{[(1-d-d'-d'')(\rho,\sigma);\zeta]} \leq C \norm{G}{[(1-d')(\rho,\sigma);\zeta]}
\norm{F}{[(1-d'')(\rho,\sigma);\zeta]}\mbox{,}
\end{equation}
where $C=2[e \rho \sigma (d+d')(d+d'')]^{-1}$.
\end{lem}
\proof
Straightforward\footnote{The different norm used in this paper does not imply substantial differences.} from \cite{gz92}.
\endproof
\begin{prop}\langlebel{prop:chipsi}
Let $d_1,d_2 \in [0,1/2]$ and $\chi$ and $\psi$ be two functions on $\mathcal{D}$ such that $\norm{\chi}{[(1-d_1)(\rho,\sigma);\zeta]}$ and $\norm{\psi}{[(1-d_2)(\rho,\sigma);\zeta]}$ are bounded for all $\timesi \in \mathcal{R}_{\zeta}$.\\
Then for all $\tilde{d}e{d} \in (0,1-\hat{d})$ where $\hat{d}:=\max\{d_1,d_2\}$ and for all $s \geq 1$ one has the following estimate
\beq{eq:iterativo}
\norm{\mathcal{L}_{\chi}^s \psi}{[(1-\tilde{d}e{d}-\hat{d})(\rho,\sigma);\zeta]} \leq
\frac{s!}{e^2} \left( \frac{8e}{\rho \sigma \tilde{d}e{d}^2}\right)^s
\norm{\chi}{[(1-d_1)(\rho,\sigma);\zeta]}^s
\norm{\psi}{[(1-d_2)(\rho,\sigma);\zeta]} \mbox{.}
\end{equation}
\end{prop}
\proof
Straightforward going along the lines of Lemma 4.2 of \cite{giorgilli02} and by using\footnote{the factor $8$, in place of $2$ obtained in \cite{giorgilli02}, follows from a rescaling $(\rho,\sigma)\leftarrow (1-\hat{d})(\rho,\sigma)$ and from $\hat{d} \leq 1/2$.} Lemma \ranglef{lem:two}.
\endproof
\begin{prop}\langlebel{prop:exp}
In the same hypotheses of Prop. \ranglef{prop:chipsi}, suppose that, in addition,
\beq{eq:convergence}
\mathfrak{L}=\frac{8 e}{\tilde{d}e{d}^2 \rho \sigma} \norm{\chi}{[(1-d_1)(\rho,\sigma);\zeta]} \leq \frac{1}{2} \mbox{.}
\end{equation}
Then the operator $\exp(\mathcal{L}_{\chi}) \psi$ is well defined and for all $\tilde{d}e{d} \in (0,1-\hat{d})$ the following estimate holds
\beq{eq:estimatelieuno}
\norm{\sum_{s \geq 1} \frac{1}{s!} \mathcal{L}_{\chi}^s \psi}{[(1-\tilde{d}-\hat{d})(\rho,\sigma);\zeta]} \leq \frac{2 \mathfrak{L}}{e^2} \norm{\psi}{[(1-d_2)(\rho,\sigma);\zeta]} \mbox{,}
\end{equation}
in particular
\beq{eq:estimateliedue}
\norm{\exp(\mathcal{L}_{\chi}) \psi}{[(1-\tilde{d}-\hat{d})(\rho,\sigma);\zeta]} \leq 2 \norm{\psi}{[(1-d_2)(\rho,\sigma)]} \mbox{.}
\end{equation}
\end{prop}
\proof
It is sufficient to recall the definition of $\exp(\mathcal{L}_{\chi})$, apply Prop. \ranglef{prop:chipsi}, and then use $\mathfrak{L} \leq 1/2$.
\endproof
Note that the previous result holds also if an arbitrary domain restriction $\zeta \rightarrow (1-d)\zeta$ is considered, for all $d \in [0,1)$.
\section{Quantitative estimates on the formal scheme}
Consider the following set of parameters by setting $u_j \equiv (u_j^1,\ldots,u_j^6):=(d_j,\epsilon_j,\zeta_j,m_j,\rho_j,\sigma_j)$ with $u_j^l \in [0,1)$ for all $l=1,\ldots, 6$ and all $j \geq 0$. The vector $u_0$ will be chosen later (see Sec. \ranglef{seq:inducbasis}).\\ Set, in addition $u_*:=(0,0,0,m_*,\rho_*,\sigma_*)$ for some $m_*,\rho_*,\sigma_*>0$ to be determined (Sec. \ranglef{sec:controlseq}). As well as for $a$, the property $u_j^l \in [0,1)$ will be repeatedly used in the follow (without an explicit mention) allowing to obtain simpler estimates.
\begin{lem}\langlebel{lem:iterative}
In the same assumption of Lemma \ranglef{lem:iterativeformal}, suppose, in addition, the existence of $u_j$ with $u_j>u_*$, satisfying
\begin{enumerate}
\item
\beq{eq:iterativeitemone}
\max\left\{\norm{A^{(j)}}{[\sigma_j;\zeta_j]},\norm{B^{(j)}}{[\sigma_j;\zeta_j]}\right\} \leq \epsilon_j e^{-a|\timesi|}
\mbox{,}
\end{equation}
\item for all vector valued functions $w=w(q,\timesi)$ holds
\beq{eq:iterativeitemthree}
\norm{C^{(j)}(q,\timesi) w(q,\timesi)}{[\sigma_j;\zeta_j]} \leq m_j^{-1} \norm{w(q,\timesi)}{[\sigma_j;\zeta_j]} \mbox{,}
\end{equation}
\item holds $d_j \leq 1/6$ and $\zeta_j$ is set as
\beq{eq:zetaj}
2 |\omega| \zeta_j=d_j \sigma_j \mbox{,}
\end{equation}
\end{enumerate}
Then there exists a constant $D$ such that: if
\beq{eq:piccolaunmezzo}
\epsilon_j \frac{D}{a^3 m_j^4 d_j^{4(\tau+1)}} \leq \frac{1}{2} \mbox{,}
\end{equation}
then it is possible to choose $u_{j+1} < u_j$ under the constraint (\ranglef{eq:zetaj})\footnote{I.e. satisfying $2 |\omega| \zeta_{j+1}=d_{j+1} \sigma_{j+1}$. As well as in the follow, the indices should be changed in $j+1$ where necessary .}, for which (\ranglef{eq:iterativeitemone}) and (\ranglef{eq:iterativeitemthree}) are satisfied by $A^{(j+1)},B^{(j+1)}$ and $C^{(j+1)}$ given by (\ranglef{eq:ajpuno}), (\ranglef{eq:bjpuno}) and (\ranglef{eq:cjpuno}), respectively.
\end{lem}
\proof This result is the quantitative counterpart of Lemma \ranglef{lem:iterativeformal} end this proof is split for the sake of clarity, depending on the considered objects. In order to simplify the notation, the index $j$ will be dropped from all the iterative objects depending on $j$, being restored only in the final estimates.
\subsubsection{Estimates on the generating functions}
Let us consider equation (\ranglef{eq:firsthomological}). Due to the assumptions, we can apply Prop. \ranglef{prop:small} with $\displaystylelta=0$ and $K=\epsilon$, obtaining
\begin{subequations}
\begin{align}
\norm{\phi}{[(1-d)\sigma; \zeta]} & \leq
\displaystyle \epsilon \frac{M_0}{a d^{\tau} } e^{-a |\timesi|} \langlebel{eq:homx} \mbox{,} \\
\norm{\partial_q \phi}{[(1-d)\sigma; \zeta]} & \leq \displaystyle \epsilon \frac{M_1}{a d^{\tau+1} } e^{-a |\timesi|} \langlebel{eq:homxfirst} \mbox{,}
\end{align}
\end{subequations}
where $M_0:=S_1\sigma_*^{-\tau}$ and $M_1:=nS_2 \sigma_*^{-(\tau+1)}$.\\
Recalling the definition (\ranglef{eq:bhat}) then using (\ranglef{eq:iterativeitemone}), (\ranglef{eq:homxfirst}) and (\ranglef{eq:iterativeitemthree}), one gets
\begin{eqnarray}
\norm{\hat{B}}{[(1-d)\sigma;\zeta]} & \leq &
\displaystyle \epsilon e^{-a |\timesi|} + \frac{1}{m} \norm{\partial_q \phi}{[(1-d)\sigma;\zeta]} \leq
\displaystyle \epsilon \frac{(1+M_1)}{a m d^{\tau+1}} e^{-a |\timesi|} \langlebel{eq:bhatest}\mbox{,} \\
\displaystyle \norm{\partial_{\timesi} \phi}{[(1-d)\sigma;(1-d)\zeta]} & \leq &
\displaystyle \frac{1}{d \zeta} \norm{\phi}{[(1-d)\sigma;\zeta]}
\Heq{\ranglef{eq:homx}}{\leq} \displaystyle \epsilon \frac{M_0}{a d^{\tau+1} \zeta} e^{-a |\timesi|} \langlebel{eq:xxi}\mbox{.}
\end{eqnarray}
As for equation (\ranglef{eq:secondhom}), Prop. \ranglef{prop:small} used component-wise with $\displaystylelta=d$, similarly yields by (\ranglef{eq:bhatest})
\begin{subequations}
\begin{align}
\norm{Y}{[(1-2d)\sigma;\zeta]} & \leq
\displaystyle \epsilon \frac{M_2 \sigma_*}{a^2 m d^{2 \tau +1}} e^{-a |\timesi|} \langlebel{eq:homy} \mbox{,} \\
\norm{\partial_q Y}{[(1-2d)\sigma;\zeta]} & \leq \epsilon \frac{M_3}{a^2 m d^{2 \tau +2}} e^{-a |\timesi|} \langlebel{eq:homyfirst} \mbox{,}
\end{align}
\end{subequations}
where
\begin{eqnarray}
M_2&:=& \displaystyle n S_1 (1+M_1) \sigma_*^{-(\tau+1)} \mbox{,}
\langlebel{eq:cdue}\\
M_3&:=& \displaystyle n^2 S_2 (1+M_1) \sigma_*^{-(\tau+1)}
\langlebel{eq:ctre}
\mbox{.}
\end{eqnarray}
As a consequence we have, by using (\ranglef{eq:inequno})
\begin{eqnarray}
\displaystyle \norm{\langle Y,p \rangle}{[\rho,(1-2d)\sigma;\zeta]} & \leq &
\displaystyle \epsilon \frac{M_2 \rho \sigma_*}{a^2 m d^{2 \tau +1}} e^{-a |\timesi|} \mbox{,} \langlebel{eq:ydotp}\\
\displaystyle \norm{Y_{\timesi}}{[(1-2d)\sigma;(1-d)\zeta]} & \leq &
\displaystyle \frac{1}{d \zeta} \norm{Y}{[(1-2d)\sigma;\zeta]}
\leq
\displaystyle \epsilon \frac{M_2}{a^2 m d^{2 \tau +2} \zeta} e^{-a |\timesi|} \langlebel{eq:yxi}\mbox{.}
\end{eqnarray}
By (\ranglef{eq:ydotp}), Prop. \ranglef{prop:exp} and setting $\mathfrak{L}:=Q_1 e^{-a|\timesi|}$, we have that $\exp(\mathcal{L}_{\langle Y,p \rangle})$ converges uniformly on $\mathcal{R}_{\zeta}$ provided\footnote{In this case $d_1:=2d$, while $d_2 \leq 2d$ as used below, so it is possible to set $\tilde{d}e{d} \equiv d <1-2d$ by hypothesis $(3)$. Moreover, the latter implies $d_1,d_2 \leq 1/2$ as required by Prop. \ranglef{prop:chipsi}.}
\beq{eq:convergenceliesectre}
Q_1:=\epsilon \frac{8 e M_2}{a^2 m d^{2 \tau+3}} \leq \frac{1}{2}
\end{equation}
\subsubsection{Estimates on the transformed Hamiltonian}
Firstly, by (\ranglef{eq:ahat}), using (\ranglef{eq:iterativeitemthree}) and (\ranglef{eq:homxfirst}) one gets
\[ \norm{\hat{A}}{[(1-d)\sigma;\zeta]}
\leq \epsilon^2 \frac{M_1(1+M_1)}{a^2 m d^{2 \tau+2}} e^{-2a |\timesi|}\mbox{.}
\]
Hence by (\ranglef{eq:ajpuno}), Prop. \ranglef{prop:exp} with $d_2=d$ and after an arbitrary restriction in $\rho$ and $\zeta$, we have
\beq{eq:stimaaprimo}
\norm{A^{(j+1)}}{[(1-3d_j)(\rho_j,\sigma_j;\zeta_j)]} \leq \displaystyle \epsilon_j^2 \frac{M_4}{a^2 m_j d_j^{2 \tau+2}} e^{-2 a|\timesi|} \mbox{,}
\end{equation}
where
\beq{eq:cquattro}
M_4:= 2 M_1(1+M_1) \mbox{.}
\end{equation}
On the other hand, by (\ranglef{eq:estimatelieuno}), (\ranglef{eq:bhatest}) and (\ranglef{eq:inequno})
\[
\displaystyle \norm{\sum_{s \geq 1} \frac{s}{(s+1)!} \mathcal{L}_{\langle Y,p \rangle}^s \langle \hat{B}, p \rangle}{[(1-3d)(\rho,\sigma);\zeta]} \leq \displaystyle \frac{2 \mathfrak{L}}{e^2} \norm{\langle \hat{B},p \rangle}{[(1-d)(\rho,\sigma);\zeta]} \leq
\epsilon \frac{2 \rho (1+M_1)Q_1}{a m e^2 d^{\tau+1}} e^{-2a|\timesi|} \mbox{.}
\]
Recalling (\ranglef{eq:bjpuno}), the definition in (\ranglef{eq:convergenceliesectre}) and (\ranglef{eq:inequnocontrary}),
\beq{eq:stimabprimo}
\displaystyle \norm{ B^{(j+1)}}{[(1-3d_j)(\rho_j,\sigma_j;\zeta_j)]} \leq \epsilon_j^2 \frac{M_5}{a^3 m_j^2 d_j^{3 \tau+4}}e^{-2 a |\timesi|} \mbox{,}
\end{equation}
with
\beq{eq:ccinque}
M_5:=16 n (1+M_1) M_2(e \sigma_*)^{-1} \mbox{.}
\end{equation}
Let us set $C':=C^{(j+1)}$. Directly from (\ranglef{eq:cjpuno}), Prop. \ranglef{prop:exp} and (\ranglef{eq:iterativeitemthree}) one has
\beq{eq:stimaccprimo}
\norm{\langle (C'-C)p,p \rangle}{[(1-3d)(\rho,\sigma);\zeta]} \leq \frac{2 \mathfrak{L}}{e^2} \norm{\langle Cp,p \rangle}{[(1-2d)(\rho,\sigma);\zeta]} \leq \epsilon \frac{16 M_2 }{a m^3 e d^{2 \tau+3}} \rho^2 e^{-a|\timesi|} \mbox{,}
\end{equation}
implying, by (\ranglef{eq:ineqdue})
\beq{eq:estimateckl}
\norm{C_{kl}'-C_{kl}}{[(1-3d)\sigma;\zeta]} \leq \epsilon \frac{M_6 }{a^2 m^3 n d^{2 \tau+3}} e^{-a|\timesi|} \mbox{,}
\end{equation}
with
\beq{eq:csei}
M_6:=16 n M_2(e \sigma_*)^{-1} \mbox{.}
\end{equation}
Now set
\beq{eq:mprimo}
m':=m- \epsilon \frac{M_6 }{a^2 m^3 d^{2 \tau+3}} e^{-a|\timesi|}\mbox{,}
\end{equation}
which is well defined provided that, e.g.
\beq{eq:limcsei}
\epsilon \frac{M_6 }{a^2 m^4 d^{2 \tau+3}} \leq \frac{1}{2} \mbox{.}
\end{equation}
giving, in particular, $m' \in [m/2,m]$. In this way we have for all $w=w(q,\timesi)$
\beq{eq:stimacprimo}
\begin{array}{rcl}
\displaystyle \norm{C'w}{[(1-3d)\sigma;\zeta]} & \Heq{\ranglef{eq:iterativeitemthree})(\ranglef{eq:stimaccprimo}}{\leq} &
\displaystyle \left(\frac{1}{m}+ \epsilon \frac{M_6 }{a^2 m^3 d^{2 \tau+3}} e^{-a|\timesi|} \right) \norm{w}{[(1-3d)\sigma;\zeta]} \\
& \leq & \displaystyle \frac{1}{m'} \norm{w}{[(1-3d)\sigma;\zeta]} \mbox{,}
\end{array}
\end{equation}
where the inequality $a^{-1}+b<(a-b)^{-1}$, valid for all $0<b<a<1$, then (\ranglef{eq:mprimo}) have been used in the last passage.
\subsubsection*{Determination of parameters} Let us set
\beq{eq:epjpuno}
\epsilon_{j+1}:=\frac{D}{a^3 m_j^4 d_j^{4(\tau+1)}} \epsilon_j^2 \mbox{.}
\end{equation}
In this way, conditions (\ranglef{eq:convergenceliesectre}), (\ranglef{eq:limcsei}) and those obtained by comparing (\ranglef{eq:stimaaprimo}) and (\ranglef{eq:stimabprimo}) with (\ranglef{eq:iterativeitemone}), are implied \emph{a fortiori} by hypothesis (\ranglef{eq:piccolaunmezzo}), provided that $
D:=\max\{8 e M_2 ,M_4 , M_5, M_6\}$. The property $\epsilon_{j+1}<\epsilon_j$ is an easy consequence of (\ranglef{eq:piccolaunmezzo}) and of $\epsilon_j<1$.\\
By taking into account the estimates (\ranglef{eq:stimaaprimo}) and (\ranglef{eq:stimabprimo}), we have that the domain on which these hold requires the restriction described by the following choices
\beq{eq:sigmarhojpuno}
\sigma_{j+1}:=(1-3 d_j) \sigma_j, \qquad \rho_{j+1}:=(1-3 d_j) \rho_j \mbox{.}
\end{equation}
As for $\zeta_{j+1}$, condition (\ranglef{eq:sceltazeta}) is valid at the $j+1-$th step if
$
\zeta_{j+1}=(2 |\omega|)^{-1} \min \{ (1-3 d_j)d_j \sigma_j, d_{j+1} \sigma_{j+1}\}
$.
As $d_j \leq 1/6$ by hypothesis, by the first of (\ranglef{eq:sigmarhojpuno}) the previous condition is of the form
(\ranglef{eq:zetaj}) provided that $d_{j+1} < d_j$ is chosen. This implies $\zeta_{j+1}<\zeta_j$.\\
The only parameter left is $m_j$. Note that (\ranglef{eq:piccolaunmezzo}) implies, in particular
$\epsilon M_6/(a^2 m^3 d^{2 \tau+3}) \leq m d^{2 \tau+1} $, then
\[
m':=m-\epsilon \frac{M_6}{a^2 m^3 d^{2 \tau+3} \zeta} e^{-a|\timesi|} \geq m(1-d^{2\tau+1}) \mbox{.}
\]
In conclusion, inequality (\ranglef{eq:stimacprimo}), hence (\ranglef{eq:iterativeitemthree}), are satisfied by setting
\beq{eq:mjpuno}
m_{j+1}:=m_j(1-d_j^{2\tau+1}) \mbox{.}
\end{equation}
The choice of $u_{j+1}$ is now complete\footnote{The freedom in the choice of $d_{j+1}$ (subject only to the constraint $d_{j+1}<d_j$) will be profitably used later.}.
\endproof
\subsection{Estimates on the transformation of variables}
\begin{prop}\langlebel{prop:trasf} Assume the validity of Lemma \ranglef{lem:iterative}. Then, for all $j \in \NN$, the transformation (\ranglef{eq:change}) is a symplectic transformation
\[
\mathcal{K}_j: \mathcal{D}_{j+1} \longrightarrow \mathcal{D}_j \mbox{,}
\]
where $\mathcal{D}_j:=\Delta_{\rho_j}(0)\times \TT_{2 \sigma_j}^n \times \mathcal{S}_{\rho_j} \times \mathcal{R}_{\zeta_j} \ni (q^{(j)},p^{(j)},\eta^{(j)},\timesi^{(j)})$, for which there exists a constant $T$ such that,
\begin{subequations}
\begin{align}
|q^{(j+1)}-q^{(j)}| & \leq T \sigma_j d_j e^{-a|\timesi|} \mbox{,}\\
|p^{(j+1)}-p^{(j)}| & \leq T \rho_j d_j e^{-a|\timesi|} \mbox{,}\\
|q^{(j+1)}-q^{(j)}| & \leq T \rho_j d_j e^{-a|\timesi|} \mbox{,}
\end{align}
while $|\timesi^{(j+1)}-\timesi^{(j)}|=0$, i.e. $\timesi^{(j)}=:\timesi$ for all $j$. Moreover $\mathcal{K}_j$ is $\epsilon_0-$``close to the identity'', i.e. $\lim_{\epsilon_0 \rightarrow 0} \mathcal{K}_j=\id$ for all $j$.
\end{subequations}
\end{prop}
\proof
Once more it is convenient to examine separately the transformations realising $\mathcal{K}_j$
\begin{align*}
(\hat{q}^{(j)},\hat{p}^{(j)},\hat{\eta}^{(j)},\hat{\timesi}^{(j)})&:=
\exp(\mathcal{L}_{\phi^{(j)}}) (q^{(j+1)},p^{(j+1)},\eta^{(j+1)},\timesi^{(j+1)}) \mbox{,}\\
(q^{(j)},p^{(j)},\eta^{(j)},\timesi^{(j)})&:=
\exp(\mathcal{L}_{\chi^{(j)}})
(\hat{q}^{(j)},\hat{p}^{(j)},\hat{\eta}^{(j)},\hat{\timesi}^{(j)}) \mbox{.}
\end{align*}
Due to the structure of $\phi^{(j)}$ the action of the first operator reduces to the first term for the momenta,
\begin{align*}
\hat{p}^{(j)}&=p^{(j+1)}+ [\partial_{q} \phi^{(j)}]_{(q,\timesi)=(q^{(j+1)},\timesi^{(j+1)})} \mbox{,}\\
\hat{\eta}^{(j)}&=\eta^{(j+1)}+[\partial_{\timesi} \phi^{(j)}]_{(q,\timesi)=(q^{(j+1)},\timesi^{(j+1)})} \mbox{,}
\end{align*}
while it is the identity in the other variables: $\hat{q}^{(j)}=q^{(j+1)}$ and $\hat{\timesi}^{(j)}=\timesi^{(j+1)}$. Quantitatively we find
\[
|\hat{p}^{(j)}-p^{(j+1)}| \Heq{\ranglef{eq:homxfirst}}{\leq}
\epsilon_j \frac{M_1}{a d_j^{\tau+1}} e^{-a |\timesi^{(j+1)}|},\qquad
|\hat{\eta}^{(j)}-\eta^{(j+1)}| \Heq{\ranglef{eq:xxi}}{\leq}
\epsilon_j
\frac{M_0}{a d_j^{\tau+1} \zeta_j} e^{-a |\timesi^{(j+1)}|} \mbox{.}
\]
As for the second transformation, first note that
\beq{eq:four}
\mathcal{L}_{\chi^{(j)}} q = Y^{(j)}, \quad
\mathcal{L}_{\chi^{(j)}} p = \langle \partial_{q} Y^{(j)} ,p \rangle \quad
\mathcal{L}_{\chi^{(j)}} \timesi = 0 ,\quad
\mathcal{L}_{\chi^{(j)}} \eta = \langle \partial_{\timesi} Y^{(j)}, p \rangle \mbox{,}
\end{equation}
where the expressions above are meant to be evaluated at $(q,p,\eta,\timesi)=
(\hat{q}^{(j)},\hat{p}^{(j)},\hat{\eta}^{(j)},\hat{\timesi}^{(j)})$. Now consider bound (\ranglef{eq:iterativo}) for $s-1$, setting $\chi:=\chi^{(j)}$ and $\psi$ as the objects in the (\ranglef{eq:four}) r.h.sides one by one. We get, e.g., for the first of them
\[
\norm{\mathcal{L}_{\chi^{(j)}}^s q}{[(1-3d_j)(\rho_j,\sigma_j;\zeta_j)]} \leq
\frac{s!}{e^2} \mathfrak{L}^{s-1}
\norm{Y^{(j)}}{[\rho_j,(1-2d_j)\sigma_j;(1-d_j)\zeta_j]} \leq
s! \frac{d^2 \sigma_* }{8 e^3} \mathfrak{L}^s \mbox{.}
\]
Repeating this computation also for the other variables we get (recall $\sum_{s \geq 1} \mathfrak{L}^s \leq 2 \mathfrak{L}$)
\begin{subequations}
\begin{align}
\displaystyle |q^{(j+1)}-\hat{q}^{(j)}| & \leq \displaystyle \frac{d_j^2 \sigma_*}{4 e^3} \mathfrak{L} = \epsilon_j \frac{2 M_2}{a^2 e^2 m_j d_j^{2 \tau+1}} e^{-a |\timesi^{(j)}|} \langlebel{eq:qhatal} \mbox{,}\\
\displaystyle |p^{(j+1)}-\hat{p}^{(j)}| & \leq \displaystyle \frac{ d_j \rho_j M_3}{4 e^3 M_2} \mathfrak{L}
= \epsilon_j \frac{2 M_3 \rho_j}{a^2 e^2 m_j d_j^{2 \tau+2} } e^{-a |\timesi^{(j)}|} \mbox{,}\\
\displaystyle |\eta^{(j+1)}-\hat{\eta}^{(j)}| & \leq \displaystyle \frac{d_j \rho_j}{4 e^3 \zeta_j} \mathfrak{L}
= \epsilon_j \frac{2 M_2 \rho_j}{a^2 e^2 m_j d_j^{2 \tau+2} \zeta_j} e^{-a |\timesi^{(j)}|} \mbox{,}
\end{align}
\end{subequations}
and clearly $\displaystyle |\timesi^{(j+1)}-\hat{\timesi}^{(j)}| = 0$, implying $\timesi^{(j+1)} \equiv \timesi^{(j)}$.
\begin{rem} It is finally evident that the transformation $\mathcal{K}_j$ does not act on time, hence we can set $\timesi^{(j)} \equiv \timesi$ for all $j \in \NN$ as in the statement. On the other hand this is a necessary property in order to obtain a meaningful result.
\end{rem}
Collecting the obtained estimates we get that $|q^{(j+1)}-q^{(j)}|$ is given by (\ranglef{eq:qhatal}), while
\beq{eq:pieta}
\begin{array}{rcl}
\displaystyle |p^{(j+1)}-p^{(j)}| & \leq &
\displaystyle \epsilon_j \frac{(M_1 e^2+2 M_3) \rho_j}{a^2 e^2 m_j d_j^{2\tau+2} \rho_*} e^{-a|\timesi|} \mbox{,}
\\
\displaystyle |\eta^{(j+1)}-\eta^{(j)}| & \leq & \epsilon_j \displaystyle \frac{(M_0 e^2+2 M_2) \rho_j}{a^2 e^2 m_j d_j^{2\tau+2} \rho_* \zeta_j} e^{-a|\timesi|} \mbox{,}
\end{array}
\end{equation}
having used $\rho_j>\rho_*$. Hence it is possible to find\footnote{Precisely $T:=(D e^2 \rho_* \sigma_*)^{-1}\max\{ M_2 \rho_*,(M_1 e^2+2 M_3)\sigma_*,2|\omega|(M_0 e^2+2M_2)\}$, by (\ranglef{eq:qhatal}), (\ranglef{eq:pieta}) and using (\ranglef{eq:piccolaunmezzo}) and (\ranglef{eq:zetaj}).} $T$, obtaining the desired estimates.\\
The $\epsilon_0-$closeness to the identity easily follows from (\ranglef{eq:qhatal}), (\ranglef{eq:pieta}) and from the monotonicity of $\{\epsilon_j\}$.
\endproof
\section{Convergence of the formal scheme}
\subsection{Construction of the control sequence}\langlebel{sec:controlseq}
\begin{lem} In the assumptions of Lemma \ranglef{lem:iterative}, it is possible to determine $u_*$ and construct the sequence $\{u_j\}_{j \in \NN}$ such that
\beq{eq:limit}
\lim_{j \rightarrow \infty} u_j=u_* \mbox{.}
\end{equation}
\end{lem}
\proof
Let us choose in (\ranglef{eq:epjpuno}) $\epsilon_j=\epsilon_0 j^{-8(\tau+1)}$, obtaining
\beq{eq:dj}
d_j = \left(\frac{D \epsilon_0}{a^3 m_j^4} \right)^{\frac{1}{4(\tau+1)}} \frac{(j+1)^2}{j^4} \mbox{.}
\end{equation}
The following bound is immediate for all $j \geq 1$
\beq{eq:bounddk}
d_j \leq \displaystyle 2 \frac{\mathcal{A}}{j^2},\qquad \mathcal{A}:=\left(\frac{D \epsilon_0}{a^3 m_*^4} \right)^{\frac{1}{4(\tau+1)}} \mbox{.}
\end{equation}
Imposing condition $d_{j} \geq d_{j+1}$ in (\ranglef{eq:dj}) one gets $(1-d_j^{2 \tau+3})^{\frac{1}{\tau+1}} \geq j^4(j+2)^2/(j+1)^6$. By using (\ranglef{eq:bounddk}), it takes the stronger form
\[
1-2 \mathcal{A} j^{-2} \geq \frac{j^4(j+2)^2}{(j+1)^6} \mbox{.}
\]
The latter is true for all $j$ provided that it holds for $j=1$. This is achieved if $\mathcal{A} \leq 55/128$, a condition that can be enforced by requiring $\mathcal{A} \leq 1/12$. In this way we obtain $d_j \leq d_1 \leq 1/6$ as required by Lemma \ranglef{lem:iterative}, item ($3$). This immediately implies
\beq{eq:seriesdj}
\sum_{j \geq 1} d_j \leq \frac{1}{6} \sum_{j \geq 1} j^{-2} < \left(\frac{\pi}{6}\right)^2 \mbox{.}
\end{equation}
In this way, the range of the admissible values for $\epsilon_0$ is determined once and for all; more explicitly
\beq{eq:limep}
\frac{D \epsilon_0}{a^3 m_*^4} \leq \frac{1}{12^{4(\tau+1)}} \mbox{.}
\end{equation}
We only need to prove the limit (\ranglef{eq:limit}). Let us start from $\rho_j$. By (\ranglef{eq:sigmarhojpuno}) we have that if $\prod_{j \geq 1} (1-3 d_j)$
is lower bounded by a constant, say $M_{\rho}$, then $\rho_0 M_{\rho} $ is a lower bound for $\rho_j$ for all $j$.\\
Consider
\[
\log \prod_{j \geq 1} (1-3 d_j) = \sum_{j \geq 1} \log (1-3 d_j) \geq - 6 \log 2 \sum_{j \geq 1} d_j > -\log 4 \mbox{,}
\]
in which we have used the inequality $0 \geq \log(1-x) \geq -2 x \log 2$, valid for $x \in [0,1/2]$. Hence $\prod_{j \geq 1} (1-3 d_j) \leq 1/4$. This implies that the required lower bound holds for $\rho_*=\rho_0/4$ and then $\sigma_*=\sigma_0/4$.
A similar arguments applies for $m_j$, yielding $m_*=m_0/2$.
\endproof
\subsection{Induction basis and conclusion of the proof}\langlebel{seq:inducbasis}
In this final part we check that the inductive hypotheses described in Lemmas \ranglef{lem:iterativeformal} and \ranglef{lem:iterative} hold at the initial step, i.e. $j=0$, fixing in this way $u_0$.\\
First of all we see that $H$ is of the form (\ranglef{eq:hamricorsiva}) in a way we can set $H_0:=H$. It is sufficient to consider the (finite) Taylor expansion of $f$ around $p=0$ in (\ranglef{eq:ham}) then define
\[
A^{(0)}:=\ep f(q,0,\timesi),\qquad
B^{(0)}:=\ep \partial_p f(q,0,\timesi),\qquad
C^{(0)}:=\Gamma+\ep \partial_p^2 f(q,0,\timesi) \mbox{.}
\]
Note that $C^{(0)}$ is symmetric. Now set $\rho_0:=\rho/2$ and $\sigma_0:=\sigma$. By a Cauchy estimate and (\ranglef{eq:slowdecay}) we have
\beq{eq:lastcauchy}
\norm{\partial_p f}{[\rho_0,\sigma_0;\zeta_0]} \leq
M_f \rho_0^{-1} e^{-a |\timesi|},\qquad \norm{\partial_p^2 f}{[\rho_0,\sigma_0;\zeta_0]} \leq
M_f \rho_0^{-2}e^{-a |\timesi|} \mbox{,}
\end{equation}
for all $\zeta_0$ (determined below). Hence (\ranglef{eq:iterativeitemone}) is satisfied for $j=0$ by setting $\epsilon_0:=\ep M_f / \rho_0$. By Prop. \ranglef{prop:trasf}, this shows that the sequence $\{\mathcal{K}_j\}$ and then the composition
\beq{eq:composition}
\mathcal{K}:=\lim_{j \rightarrow \infty} \mathcal{K}_j \circ \mathcal{K}_{j-1} \circ \ldots \circ \mathcal{K}_0 \mbox{,}
\end{equation}
is $\ep-$close to the identity.\\It is natural to realize that (\ranglef{eq:iterativeitemthree}) holds by virtue of (\ranglef{eq:hypongamma}) and for sufficiently small $\ep$. From the quantitative point of view one can ask $ |C^{(0)} v| \leq m_0^{-1}|v|$ for all $v \in \CC^n$ with $m_0:=m/2$. This is true for all $\ep \leq \tilde{d}e{\ep}$ where
\beq{eq:limitepzero}
\tilde{d}e{\ep}:=\rho^2 (16 M_f n)^{-1}(\sqrt{m^2 \norm{\Gamma}{\infty}^2+12}-m \norm{\Gamma}{\infty}) \mbox{,}
\end{equation}
denoted\footnote{This bound follows from a straightforward check. By the second of (\ranglef{eq:lastcauchy}) we have $C^{(0)}=\Gamma+\ep h E$ where $h:= M_f \rho_0^{-2}$ and $E_{kl} \in [-1,1]$ for all $k,l=1,\ldots,n$. It is now sufficient to use the (exact) Mac Laurin expansion $|C^{(0)}(\ep)v|^2=|\Gamma v|^2+2 \ep h \langle \Gamma v, E v \rangle + \ep^2 |E v|^2$ and (\ranglef{eq:hypongamma}) to get the (\ranglef{eq:limitepzero}).}
$\norm{\Gamma}{\infty}:=\max_i \sum_{j=1}^n |\Gamma_{ij}|$.\\
The choice of $u_0$ is now complete by choosing $d_0=1/6$ and $\zeta_0$ as determined by (\ranglef{eq:sceltazeta}). By using (\ranglef{eq:limep}) and recalling the choice for $\epsilon_0$ and $m^*$ above, we finally obtain the limitation for $\ep_a$
\beq{eq:finalvalueep}
\ep_a = \min\{ \rho a^3 m^4 (2^9 12^{4(\tau+1)} D M_f)^{-1},\tilde{d}e{\ep}\} \mbox{.}
\end{equation}
The validity\footnote{The allowed range for $\ep$ found above, exploits a well known issue in the KAM theory: the numerical coefficient in
(\ranglef{eq:finalvalueep}) is smaller than $10^{-9}$ (and rapidly decreasing as the number $n$, hence $\tau$, increase). This value is practically unsuitable for interesting physical applications (such as Celestial Mechanics problems). A relevant branch of the KAM theory is devoted to the development of tools capable to increase this threshold. See \cite{celgiorloc} for an example or \cite{cellettikam} for a comprehensive application of the computer-assisted proofs approach.} of condition (\ranglef{eq:piccolaunmezzo}) for $j=0$ follows from (\ranglef{eq:limep}).\\
The very last step consists in showing the convergence of the composition (\ranglef{eq:composition}). By Prop. \ranglef{prop:trasf} and recalling (\ranglef{eq:seriesdj}) we find
\[
|q_{\infty}-q| \leq T \sum_{k \geq 0} |q_{k+1}-q_k| < 2 \sigma T \mbox{.}
\]
Analogously we find $|p_{\infty}-p|,|\eta_{\infty}-\eta| < 2 \rho T $. Hence by the Weierstra{\ss} Theorem (see, e.g. \cite{dettman}) the transformation (\ranglef{eq:composition}) converges uniformly in all compact subsets of $\mathcal{E}_*:=\Delta_{\rho_*} \times \TT_{2 \sigma_*}^n \times \mathcal{S}_{\rho_*}$. Note that the degeneration of $\mathcal{R}_{\zeta_j}$ is not an issue as the transformation is trivial in the $\timesi$ variable. The proof is completed by setting $\mathcal{D}_*= \mathcal{E}_* \times \RR^+$.
\end{document} |
\begin{document}
\begin{frontmatter}
\title{Simulation paradoxes related to a fractional Brownian motion
with small Hurst index}
\author{\inits{V.}\fnm{Vitalii}\snm{Makogin}}\email{makoginv@ukr.net}
\address{Department of Probability Theory, Statistics and Actuarial Mathematics,
Taras Shevchenko National University of Kyiv,
64, Volodymyrska St., 01601 Kyiv, Ukraine}
\markboth{V. Makogin}{Simulation paradoxes related to a fractional
Brownian motion with small Hurst index}
\begin{abstract}
We consider the simulation of sample paths of a fractional Brownian
motion with small values of the Hurst index and estimate the behavior
of the expected maximum. We prove that, for each fixed $N$, the error
of approximation $\mathbf{E}\max_{t \in[0,1]}B^H(t) - \mathbf{E}\max_{i =
\overline{1,N}} B^H (i/N )$ grows rapidly to $\infty$ as the
Hurst index tends to 0.
\end{abstract}
\begin{keyword}
\xch{Fractional}{fractional} Brownian motion\sep
Monte Carlo simulations \sep
expected maximum\sep
discrete approximation
\MSC[2010]
65C50 \sep60G22
\end{keyword}
\received{31 May 2016}
\revised{19 June 2016}
\accepted{20 June 2016}
\publishedonline{4 July 2016}
\end{frontmatter}
\section{Introduction}\label{}
A fractional Brownian motion $\{B^H(t),t\geq0\}$ is a centered
Gaussian stochastic process with covariance function
\[
\mathbf{E}\bigl[B^H(t)B^H (u)\bigr] = \frac{1}2
\bigl(t^{2H} +u^{2H} -|t - u|^{2H}\bigr),\quad t,u\geq0,
\]
where $H\in(0,1)$ is the Hurst index. The fractional Brownian motion
is a~self-similar process with index $H$, that is, for any $a > 0$,
\[
\bigl\{B^H(t),t\geq0\bigr\}\stackrel{d} {=}\bigl
\{a^{-H}B^H(at), t \geq0\bigr\},
\]
where $\stackrel{d}{=}$ means the equality of finite-dimensional distributions.
Due to self-similarity, we have that, for all $T>0$,
\[
\bigl\{B^H(t),t\in[0,T]\bigr\}\stackrel{d} {=}\bigl
\{T^{H}B^H\bigl(T^{-1}t\bigr), t \in[0,T]\bigr\}=
\bigl\{ T^{H}B^H(s), s \in[0,1]\bigr\}.
\]
Based on such a invariance of distributions, it is appropriate to
investigate the properties of the fractional Brownian motion only over
the time interval $[0,1]$.
In this paper, we consider the behavior of the maximum functional\break
$\max_{t \in[0,1]}B^H(t)$ with small values of Hurst index.
It should be noted that the fractional Brownian motion process with
$H=1/2$ is the Wiener process $\{W(t),t\geq0\}$. The distribution of
$\max_{t \in[0,1]}W(t)$ is known. Namely,
\[
\mathbf{P} \Bigl(\max_{t \in[0,1]}W(t)\leq x \Bigr)=\sqrt{\frac{2}{\pi}}
\int_{0}^x e^{-y^2/2}dy,\quad x\ge0,
\]
and, therefore,
\[
\mathbf{E} \Bigl[\max_{t \in[0,1]}W(t) \Bigr]=\sqrt{\frac{2}{\pi}}.
\]
Many papers are devoted to the distribution of the maximum functional
of the fractional Brownian motion, where usually asymptotic properties
for large values of time horizon $T$ are considered. For example,
Molchan \cite{Mol} has found an asymptotic behavior of small-ball
probabilities for the maximum of the fractional Brownian motion.
Talagrand \cite{Tal} obtained lower bounds for the expected maximum of
the fractional Brownian motion. In several works, the distribution of
the maximum is investigated when the Hurst index $H$ is close to $1/2$.
In particular, this case was considered by Sinai \cite{Sinai} and
recently by Delorme and Weise \cite{Del}.
Currently, an analytical expression for the distribution of the maximum
of the functional Brownian motion remains unknown. Moreover, the exact
value of the expectation of such a functional is unknown too.
From the paper of Borovkov et al.\ \cite{Borovkov} we know the
following bounds:
\begin{equation}
\label{maxbounds} \frac{1}{2\sqrt{H \pi e \ln2}} \leq\mathbf{E}\max_{t \in
[0,1]}B^H(t)
< \frac{16.3}{\sqrt{H}}.
\end{equation}
On the other hand, we may get an approximate value of the expected
maximum using Monte Carlo simulations. That is, for sufficiently large
$N $,
\begin{equation}
\label{appr} \mathbf{E}\max_{t \in[0,1]}B^H(t) \approx\mathbf{E}\max
_{i = \overline{1,N}} B^H(i/N).
\end{equation}
The authors of \cite{Borovkov} obtain an upper bound for the error
$\varDelta_N$ of approximation~\eqref{appr}. Namely, for $N\geq2^{1/H}$,
\begin{align}
\label{apprbounds1} 0\leq\varDelta_N:={}& \mathbf{E}\max_{t \in[0,1]}B^H(t)
- \mathbf{E}\max_{i = \overline{1,N}} B^H (i/N )
\\[4pt]
\label{apprbounds2}\leq{}&\frac{2 \sqrt{\ln N}}{N^H} \biggl(1+\frac{4}{N^H}+
\frac{0.0074}{(\ln N)^{3/2}} \biggr).
\end{align}
The implementation of approximation \eqref{appr} has technical
limitations. Due to modern computer capabilities, we assume that $N
\leq2^{20} \approx10^6$. Under such conditions, inequality \eqref
{apprbounds2} is true when $H \geq0.05$, and $\varDelta_N < 11.18$.
In this article, we make Monte Carlo simulations and estimate \linebreak
$\mathbf{E}\max_{i = \overline{1,N}} B^H(i/N)$. Also, we investigate the
behavior of $\varDelta_N$ with small values of the Hurst index $H$ and
show that, for a fixed $N$, the approximation error $\varDelta_N\to+\infty
$ as $H\to0$. For the rate of this convergence, when $N=2^{20}$, we
prove the inequality $\varDelta_N>c_1 H^{-1/2} - c_2,\ H\in(0,1)$, where
the constants $c_1=0.2055$ and $c_2=3.4452$ are calculated numerically.
Thus, when the values of $H$ are small, approximation \eqref{appr} is
not appropriate for evaluation of $\mathbf{E}\max_{t \in[0,1]}B^H(t)$.
The article is organized as follows. The first section presents the
methodology of computing. The second section presents the results of
computing of the expected maximum of the fractional Brownian motion.
In the third section, we obtain a lower bound for the error $ \varDelta_N$
and calculate the constants $c_1 $ and~$c_2 $.
\section{Methods of approximate calculations}
\subsection{Simulation of a vector $(B^H(i/N))_{1\leq i \leq N}$}
Let us consider briefly the method proposed by Wood and Chan \cite
{W-Ch}. Let $G$ be the autocovariance matrix of $(B^H(1/N),\ldots
,B^H(N/N))$. Embed $G$ in a~circulant $m\times m$ matrix $C$ given by
\begin{equation*}
C=
\begin{pmatrix}
c_0 & c_1 & \cdots& c_{m-1}\\
c_{m-1} & c_0 & \cdots& c_{m-2}\\
\vdots& \vdots& \ddots& \vdots\\
c_1 & c_2 & \cdots& c_{0}
\end{pmatrix}
,
\end{equation*}
where
\begin{equation*}
c_j=
\begin{cases}
\frac{1}{N^{2H}} (|j-1|^{2H}-2 j^{2H}+(j+1)^{2H} ), & 0\le j\le\frac
{m}{2}, \\[3pt]
\frac{1}{N^{2H}} ((m-j-1)^{2H}-2(m-j)^{2H}+(m-j+1)^{2H} ), & \frac
{m}{2}< j\le m-1.
\end{cases}
\end{equation*}
\begin{proposition}
Let $m=2^{1+\nu}$, where
$ 2^\nu$ is the minimum power of $2$ not less than~$N$. Then the
matrix $C$ allows a representation $ C = QJQ^T$, where $J$ is a
diagonal matrix of eigenvalues of the matrix $C$, and $Q$ is the
unitary matrix with elements
\begin{equation*}
(Q)_{j,k}=\frac{1}{\sqrt{m}}c_j\exp \biggl(-2 i \pi
\frac{jk}{m} \biggr),\quad j,k=\overline{0,m-1}.
\end{equation*}
The eigenvalues $\lambda_k$ of the matrix $C$ are equal to
\begin{equation*}
\lambda_k=\sum_{j=0}^{m-1}\exp
\biggl(2 i \pi\frac{jk}{m} \biggr),\quad k=\overline{0,m-1}.
\end{equation*}
\end{proposition}
Since $Q$ is unitary, we can set $Y=Q J^{1/2}Q^{T} Z$, where $Z\sim
N(0,I_m)$. Therefore, we get $Y\sim N(0,C)$. Thus, the distributions of
the vectors $(Y_0$, $Y_0+Y_1,\ldots,Y_0+\cdots+Y_{N-1})$ and
$(B^H(1/N),\ldots,B^H(N/N))$ coincide.
The method of Wood and Chan is exact and has complexity $ O (N \log
(N))$. A more detailed description of the algorithm, a comparison with
other methods of simulation of the fractional Brownian motion, and a
program code are contained in the paper \cite{Coe}. For reasons of
optimization of calculations, simulations in the present paper are made
by the method of Wood and Chan.
The estimate of the mean value $\mathbf{E}\max_{i = \overline{1,N}} B^H(i/N)$
is a sample mean over the sample of size $n$. That is why
the total complexity of the algorithm is $O(n N\log(N))$.
\subsection{Clark's method}
Instead of generating samples and computing sample means, there exists
a~method of Clark \cite{clark} for approximating the expected maximum.
Due to this method, the first four moments of the random variable $\max
\{\xi_1, \ldots,\allowbreak\xi_N\}$, where $(\xi_1,\ldots,\xi_N)$ is a
Gaussian vector, are calculated approximately. Since the fractional
Brownian motion is a Gaussian process, we put $(\xi _1,\ldots,\xi_N)
=\break(B^H(1/N),\ldots,B^H(N/N))$ and apply Clark's method for
approximate computing of $\mathbf{E}\max_{i=\overline{1,N}}B^H (i/N )$.
Let us illustrate the basic idea of Clark's method of calculating
\linebreak$\mathbf{E}\max\{\xi, \eta, \tau\}$, where $ \xi, \eta, \tau$ are
Gaussian distributed.
\begin{proposition}
Let $\xi,\eta,\tau$ be Gaussian random variables. Put $a=\mathbf{Var}(\xi)+\mathbf{Var}
(\eta)-\mathbf{Cov}(\xi,\eta)$ and let $a>0$.
Denote $\alpha:=(\mathbf{E}\xi-\mathbf{E}\eta)/a$. Then we have
\begin{align}
\mathbf{E}\max\{\xi,\eta\}&=\varPhi(\alpha) \mathbf{E}\xi+ \varPhi (-\alpha) \mathbf{E}
\eta+ a \varphi(\alpha);
\nonumber\\
\mathbf{E} \bigl(\max\{\xi,\eta\} \bigr)^2&=\varPhi(\alpha) \mathbf{E}
\xi^2 + \varPhi(-\alpha) \mathbf{E}\eta^2 + a \varphi(\alpha) ( \mathbf{E}
\xi+\mathbf{E}\eta),\label{Clark-exp}
\end{align}
where
$\varphi(x)=\frac{1}{\sqrt{2\pi}}\exp(-\frac{x^2}{2} )$ and $\varPhi
(x)=\int_{-\infty}^x \phi(t)dt$.
\end{proposition}
So, the exact value of $\mathbf{E}\max\{\xi,\eta\}$ is obtained from the
previous proposition.
\begin{proposition}
Let $\xi,\eta,\tau$ be Gaussian random variables. Let $\mathbf{Corr}(\tau,\xi)$
and\break $\mathbf{Corr}(\tau,\eta)$ be known. Then
\begin{equation*}
\mathbf{Corr} \bigl(\tau,\max\{\xi,\eta\} \bigr)=\frac{\sqrt{\mathbf{Var}(\xi)}\mathbf{Corr}(\tau
,\xi)\varPhi(\alpha)+\sqrt{\mathbf{Var}(\eta)}\mathbf{Corr}(\tau,\eta)\varPhi(-\alpha
)}{\sqrt{\mathbf{E}(\max\{\xi,\eta\})^2-(\mathbf{E}\max\{\xi,\eta\})^2}}.
\end{equation*}
\end{proposition}
For approximate computing $\mathbf{E}\max\{\xi,\eta,\tau\}=\mathbf{E}\max\{\tau,\max\{
\xi,\eta\}\}$, we assume that $\max\{\xi,\eta\}$ has a Gaussian
distribution. In fact, this is not true, but it allows us to apply
formula \eqref{Clark-exp} for random variables $\tau$ and $\max\{\xi
,\eta\}$.
Thus, iteratively, we can calculate the approximate mean for any finite
number of Gaussian random variables.
\section{Computing the expected maximum}
In this section, we present results of approximate computing
$\mathbf{E}\max_{i=\overline{1,N}}B^H (i/N )$
by generating random samples and applying Clark's method. Also, we
compare the computational results obtained by these two methods.
The values of the Hurst index are taken from the set $\{
10^{-4}(1+4i),\, i=\overline{0,24}\}\cup\{10^{-2}i,\,i=\overline{1,9}\}
$. The values of $N$ are chosen from the set $\{2^j,j=\overline{8,19}\}$.
The values of $(B^H(1/N),B^H(2/N)$, $\ldots, B^H(N/N))$ are simulated
by the method of Wood and Chan for each pair $N,H$ with the sample size
$n=1000$. For each element in the sample, we calculate the following
functionals:
\begin{align}
\label{maxf} &\max_{i=\overline{1,N}}B^H (i/N ),
\\
\label{intf} &\frac{1}{N}\sum_{i=1}^N
B^H (i/N ).
\end{align}
\subsection{Approximation error of $ \frac{1}{N}\sum_{i=0}^N B^H (i/N)$}
We compute the sample mean and variance of \eqref{intf}.
The values of theoretical moments of \eqref{intf} are known:
\[
\mathbf{E}\frac{1}{N}\sum_{i=0}^N
B^H(i/N)=0,
\]
\[
\quad\mathbf{E} \Biggl(\frac{1}{N}\sum_{i=0}^N
B^H(i/N) \Biggr)^2=\frac{1}{N^{2H+2}}\sum
_{i=1}^{N}{i^{2H+1}}\to\frac{1}{(2H+2)},\quad N
\to\infty.
\]
\begin{figure}
\caption{Sample means of $\frac{1}
\label{integrE}
\end{figure}
\begin{figure}
\caption{Sample variances of $\frac{1}
\label{integrV}
\end{figure}
The sample moments of \eqref{intf} when $H=\{10^{-4}(1+4i),\,i=\overline
{0,24}\}$ and $N=\{2^j,\,j=\overline{8,19}\}$ are presented in
Figs.~\ref{integrE} and \ref{integrV}.
In the figures, the lines indicate the theoretical moments and
confidence intervals corresponding to the reliability of 95\%.
The data confirm the correctness of calculations of \eqref{intf} with
the reliability of 95\% even for small values of $H$.
\subsection{Computing functional $\max_{i = \overline{1, N}} B^H (i/N) $}
For each pair $ N, H$, we obtain the sample of values of the maximum
functional~\eqref{maxf}
with sample size $1000$. For some values of $ H $, the sample means
and approximate values of the expected maximum, obtained by Clark's
method, are presented in Table~\ref{tableWC}.
\begin{table}
\caption{\xch{The approximate values of the expected maximum}{}} \label{tableWC}
\renewcommand{1.05}{1.05}
\tabcolsep=4pt
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}l@{}cccc@{}cccc@{}}
\hline
&\multicolumn{4}{c}{Sample means of \eqref{maxf}} & \multicolumn{4}{c}{Values due to Clark's method}\\
\cline{2-5}
\cline{6-9}
$N\diagdown H$ & 0.0900 & 0.0100 & 0.0013 & 0.0001 & 0.0900 & 0.0100 & 0.0013 & 0.0001 \\
\hline
$2^8$ & 1.7017 & 2.0019 & 1.9897 & 1.9769 & 1.1738 & 1.8691 & 1.9696 & 1.9839 \\
$2^9$ & 1.7693 & 2.0875 & 2.1602 & 2.1360 & 1.1903 & 1.9991 & 2.1194 & 2.1366 \\
$2^{10}$ & 1.9487 & 2.2504 & 2.3047 & 2.2854 & 1.1971 & 2.1193 & 2.2604 & 2.2806 \\
$2^{11}$ & 2.0138 & 2.4203 & 2.4446 & 2.4184 & 1.1966 & 2.2310 & 2.3939 & 2.4174 \\
$2^{12}$ & 2.0886 & 2.5086 & 2.5948 & 2.5334 & 1.1910 & 2.3351 & 2.5208 & 2.5476 \\
$2^{13}$ & 2.1938 & 2.6396 & 2.6934 & 2.6885 & 1.1822 & 2.4327 & 2.6420 & 2.6723 \\
$2^{14}$ & 2.2591 & 2.7612 & 2.7829 & 2.7940 & 1.1714 & 2.5242 & 2.7579 & 2.7919 \\
$2^{15}$ & 2.3327 & 2.8837 & 2.9452 & 2.9258 & 1.1586 & 2.6104 & 2.8693 & 2.9070 \\
$2^{16}$ & 2.4050 & 2.9973 & 3.0526 & 3.0464 & 1.1436 & 2.6917 & 2.9765 & 3.0181 \\
$2^{17}$ & 2.4620 & 3.0791 & 3.1386 & 3.1121 & 1.1263 & 2.7685 & 3.0798 & 3.1256 \\
$2^{18}$ & 2.5328 & 3.1900 & 3.2102 & 3.2421 & 1.1068 & 2.8412 & 3.1798 & 3.2297 \\
$2^{19}$ & 2.5597 & 3.3481 & 3.3487 & 3.3663 & 1.0855 & 2.9101 & 3.2766 & 3.3307 \\
\hline
\end{tabular*}
\end{table}
Within the data obtained by the different methods, we get that the
approximate values obtained by Clark's algorithm differ from the sample
means at most by 57\xch{.}{,}6\% when $H=0.09$, by 13\xch{.}{,}08\% when $H=0.01$, by
2\xch{.}{,}85\% when $H=0.0013$, and by 1.06\% when $H=0.0001$.
Thus, when $H\leq0.0013$,
the values of the expected maximum, obtained by these completely
different methods, are numerically identical. This indicates that the
sample mean is approximately equal to $\mathbf{E}\max_{i=\overline{1,N}}B^H(i/N)$.
\section{Bounds for the approximation error}
In this section, we find bounds for the error of approximation \eqref
{appr}. As noted before, $\mathbf{E}\max_{t \in[0,1]}B^H(t)$ $\ge(4H \pi e
\ln2)^{-1/2}$.
It is expected that obtained sample means of the maximum functional
\eqref{maxf} also satisfies this constraint.
In Fig.~\ref{boundsPIC}, the sample means and the values of $(4H \pi e
\ln2)^{-1/2}$ are presented.
\begin{figure}
\caption{Sample means of the maximal functional}
\label{boundsPIC}
\end{figure}
As one can see, the inequality $\mathbf{E}\max_{i =\overline{1,N}}B^H(i/N) \ge
(4H \pi e \ln2)^{-1/2}$ is false for small values of $ H$.
There are two possible explanations of this fact: either there is a
significant error in calculations, or the approximation error $\varDelta
_N$ grows rapidly as $ H \to0$. Let us verify these two explanations.
From \cite[Theorem 4.2]{Borovkov} we get that the expectation of the
maximal functional \eqref{maxf} grows as $H\to0$ and has the limit
\begin{equation}
\label{Eqlim} \lim_{H\to0}\mathbf{E}\max_{i=\overline{1,N}}B^{H}(i/N)=
\frac{1}{\sqrt{2}}\mathbf{E} \Bigl(\max_{i=\overline{1,N}}\xi_i
\Bigr)^+,
\end{equation}
where $\xi_1,\ldots,\xi_N$ are i.i.d. r.v.s, $\xi_1\sim N(0,1)$, and
$x^+:=\max\{0,x\}$.\vadjust{\eject}
Moreover, the rate of convergence in \eqref{Eqlim} is also obtained in
\cite{Borovkov}:
\begin{equation}
\label{Evel} 0 \leq\frac{1}{\sqrt{2}}\mathbf{E} \Bigl(\max_{i=\overline{1,N}}
\xi_i \Bigr)^+ - \mathbf{E}\max_{i=\overline{1,N}}B^{H}(i/N)
\leq1-\frac{1}{N^{2H}}.
\end{equation}
The right-hand side of \eqref{Evel} does not exceed 0.1 when $N=2^{20}$
and $H<0.0038$.
We apply two approaches to calculate $\frac{1}{\sqrt{2}}\mathbf{E} (\max_{i=\overline{1,N}}\xi_i )^+$. The first one is Monte Carlo simulations.
The sample means of $\frac{1}{\sqrt{2}} (\max_{i=\overline{1,N}}\xi_i
)^+$ are presented in Table~\ref{table4} for several sample sizes $n$.
\begin{table}
\caption{\xch{Values of limit \eqref{Eqlim}}{}}
\label{table4}
\tabcolsep=3pt
\renewcommand{1.05}{1.05}
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}lccccc@{}c@{}}
\hline
&
\multicolumn{5}{c}{\rule{0pt}{10pt}Sample means of $\frac{1}{\sqrt{2}} (\max_{i=\overline{1,N}}\xi_i )^+$}&
$N \int_{0.5}^{1}\mathrm{erf}^{(-1)}(2z-1) z^{N-1}d z$\\
\noalign{\vspace*{1pt}}\cline{2-6}
$N\diagdown n$ & 1000 & 5000 & 10000 & 15000 & 20000 &\\
\hline
$2^8$ & 1.9908 & 1.9908 & 1.9957 & 1.9965 & 1.9961 & 1.9989\\
$2^9$ & 2.1462 & 2.1506 & 2.1520 & 2.1526 & 2.1525 &2.1524\\
$2^{10}$ & 2.3071 & 2.3033 & 2.3006 & 2.3004 & 2.2994 &2.2969\\
$2^{11}$ & 2.4409 & 2.4362 & 2.4360 & 2.4371 & 2.4351 &2.4337\\
$2^{12}$ & 2.5712 & 2.5657 & 2.5648 & 2.5635 & 2.5643 &2.5640\\
$2^{13}$ & 2.6824 & 2.6847 & 2.6877 & 2.6874 & 2.6867 &2.6887\\
$2^{14}$ & 2.8150 & 2.8066 & 2.8065 & 2.8060 & 2.8078 &2.8082\\
$2^{15}$ & 2.9190 & 2.9259 & 2.9235 & 2.9248 & 2.9244 &2.9232\\
$2^{16}$ & 3.0301 & 3.0372 & 3.0353 & 3.0340 & 3.0348 &3.0343\\
$2^{17}$ & 3.1387 & 3.1372 & 3.1424 & 3.1418 & 3.1414 &3.1417\\
$2^{18}$ & 3.2394 & 3.2456 & 3.2469 & 3.2460 & 3.2461 &3.2458\\
$2^{19}$ & 3.3402 & 3.3442 & 3.3450 & 3.3458 & 3.3460 &3.3469\\
\hline
\end{tabular*}
\end{table}
As we see, with increasing sample size 20 times, the sample means
differ at most by 0.33\% for each $N$. Therefore, to ensure the
accuracy of calculations, it suffices to put $n = 1000$.
Under such conditions, technical resources allow us to calculate the
sample means for larger values of $ N$. In Table~\ref{table5}, the
values of the sample means are presented for $ N =\{2 ^ {20}, 2 ^ {21},
2 ^ {22}, 2 ^ {23}, 2 ^ {24}, 2 ^ {25} \} $ .\vadjust{\eject}
\begin{table}[h]
\caption{\xch{Values of limit \eqref{Eqlim} for $N\geq 2^{20}$}{}}
\label{table5}
\begin{tabular}{lcccccc}
\hline
$N$ & \rule{0pt}{10pt}$2^{20}$ & $2^{21}$ & $2^{22}$ & $2^{23}$ & $2^{24}$ & $2^{25}$ \\\hline
Sample means of $\frac{1}{\sqrt{2}} (\max_{i=\overline{1,N}}\xi_i )^+$ & \rule{0pt}{10pt} 3.4516 & 3.536 & 3.627 & 3.724 & 3.816 & 4.073 \\
$N \int_{0.5}^{1}\mathrm{erf}^{(-1)}(2z-1) z^{N-1}d z$ & 3.4452 & 3.541 & 3.634 & 3.726 & 3.815 & 3.902 \\\hline
\end{tabular}
\end{table}
Instead of generating random samples, we may calculate the value of
\linebreak $\frac{1}{\sqrt{2}}\mathbf{E} (\max_{i=\overline{1,N}}\xi_i )^+$
as an integral.
\begin{proposition}
Let $\xi_1,\ldots,\xi_N$ be i.i.d. r.v.s, $\xi_1\sim N(0,1)$. Then
\begin{align}
\nonumber
\frac{1}{\sqrt{2}}\mathbf{E} \Bigl(\max_{i=\overline{1,N}}
\xi_i \Bigr)^+&= \frac{N}{\sqrt{2}} \int_{1/2}^{1}
\varPhi^{(-1)}(z) z^{N-1}d z
\\
\label{intmax}&=N \int_{1/2}^{1}
\mathrm{erf}^{(-1)}(2z-1) z^{N-1}d z,
\end{align}
where $\varPhi^{(-1)}$ is the inverse function of $\varPhi(x)=\int_{-\infty}^{x}\frac{e^{-y^2/2}}{\sqrt{2\pi}}dy$, $x\in\mathbb{R}$, and $\mathrm
{erf}^{(-1)}$ is the inverse function of the error function $\mathrm{erf}$.
\end{proposition}
\begin{proof}
The proposition follows straightforwardly by quantile transformation.
\end{proof}
We immediately get the following corollary.
\begin{corollary}For any $H\in(0,1)$ and $N\geq1$, we have
\begin{equation}
\label{ErB} \mathbf{E}\max_{i=\overline{1,N}}B^{H}(i/N)\leq N \int
_{1/2}^{1}\mathrm{erf}^{(-1)}(2z-1)
z^{N-1}d z.
\end{equation}
\end{corollary}
The integrand in \eqref{ErB} is not an elementary function, but its
values are tabulated, and there exist methods for its numerical
computing. For the present paper, the integral $N \int_{0.5}^{1}\mathrm
{erf}^{(-1)}(2z-1) z^{N-1}d z$ is calculated numerically, and the
corresponding values are presented in Tables~\ref{table4} and \ref{table5}.
By maintaining the accuracy of calculations, the maximum possible value
of $N$ is $2^{31}$, and the value of the integral reaches 4.390.
The values of $\frac{1}{\sqrt{2}}\mathbf{E} (\max_{i=\overline{1,N}}\xi_i
)^+$, obtained by the two methods, differ at most by 0.44 \% when
$N\leq2^{24}$. When $N=2^{20}$, the absolute error of numerical
computing of \eqref{intmax} is less than $1.3\times10^{-5}$.
Thereafter, for $N=2^{20}$, inequality \eqref{ErB} becomes
\begin{equation}
\label{ErightB} \mathbf{E}\max_{i=\overline{1,N}}B^H (i/N ) \leq 3.4452,
H\in(0,1).
\end{equation}
Let us return to the lower bound for $ \mathbf{E}\max_ {i = \overline{1, N}}
B^H (i / N )$. By Sudakov's inequality \cite{Borovkov,Sudakov} we have
\begin{equation}
\label{EleftB} \mathbf{E}\max_{i=\overline{1,N}}B^H (i/N )\ge\sqrt{
\frac{\ln(N+1)}{ N^{2H}2\pi\ln2}}.
\end{equation}
Moreover, the maximum of the right-hand side of \eqref{EleftB} equals
$(4H \pi e \ln2)^{- 1/2} $ and is reached when $ N = [e ^ {1 / {2H}} ] $.
The values of the lower bound are presented in Table~\ref{values}.
\begin{table}
\caption{Lower bounds}
\label{values}
\begin{tabular*}{\textwidth}{@{\extracolsep{\fill}}clcccccc@{}}
\hline
&& $H$ & 0.5000 & 0.0900 & 0.0100 & 0.0013 & 0.0001 \\
\hline
\rule{0pt}{9pt}$(2\sqrt{H \pi e \ln2})^{-1}$ & && 0.5811 & 1.3696 & 4.1089 & 11.396 & 41.089 \\
$e^{1/2H}$ & && 2.7183 & 258.67 & 5.18 $\times10^{21}$ & 1.1 $\times 10^{167}$ & $2.97\times10^{2171}$ \\\hline
& $N$ && & & & & \\\hline
& $2^8$ && 0.0705 & 0.6853 & 1.0679 & 1.1207 & 1.1282 \\
& $2^9$ && 0.0529 & 0.6828 & 1.1246 & 1.1873 & 1.1963 \\
& $2^{10}$ && 0.0394 & 0.6761 & 1.1772 & 1.2503 & 1.2608 \\
& $2^{11}$ && 0.0292 & 0.6662 & 1.2260 & 1.3101 & 1.3222 \\
& $2^{12}$ && 0.0216 & 0.6537 & 1.2717 & 1.3671 & 1.3808 \\
$ (\frac{\ln(N+1)}{ N^{2H}2\pi\ln2} )^{1/2}$ & $2^{13}$ && 0.0159 & 0.6393 & 1.3145 & 1.4217 & 1.4371 \\
& $2^{14}$ && 0.0117 & 0.6233 & 1.3547 & 1.4740 & 1.4913 \\
& $2^{15}$ && 0.0085 & 0.6061 & 1.3925 & 1.5244 & 1.5435 \\
& $2^{16}$ && 0.0062 & 0.5881 & 1.4283 & 1.5729 & 1.5940 \\
& $2^{17}$ && 0.0045 & 0.5696 & 1.4620 & 1.6199 & 1.6429 \\
& $2^{18}$ && 0.0033 & 0.5507 & 1.4940 & 1.6653 & 1.6905 \\
& $2^{19}$ && 0.0024 & 0.5315 & 1.5244 & 1.7094 & 1.7367 \\
\hline
\end{tabular*}
\end{table}
Combining Tables~\ref{tableWC}, \ref{table4}, and \ref{values}, we get
that all obtained sample means for\break $\mathbf{E}\max_{i=\overline
{1,N}}B^{H}(i/N)$ satisfy the constraint
\[
\biggl(\frac{\ln(N+1)}{ N^{2H}2\pi\ln2} \biggr)^{1/2} \leq\mathbf{E}\max_{i=\overline
{1,N}}B^{H}(i/N)
\leq N \int_{1/2}^{1}\text{erf}^{(-1)}(2z-1)
z^{N-1}d z.
\]
Therefore, even with small values of the parameter $H$, the simulation
does not lead to contradiction.
Now let us find a lower bound for the approximation error $ \varDelta_N$.
We prove the following proposition.
\begin{proposition} Let $\varDelta_N$ be defined by \eqref{apprbounds1}.
Then, for any $H\in(0,1)$ and $N\geq1$, we have
\begin{equation}
\label{DeltLeft} \varDelta_N \geq\frac{1}{2\sqrt{H \pi e \ln2}} - N \int
_{1/2}^{1}\mathrm{erf}^{(-1)}(2z-1)
z^{N-1}d z.
\end{equation}
\end{proposition}
\begin{proof}
The statement follows from inequalities \eqref{maxbounds} and \eqref{ErB}.
\end{proof}
From this it follows that, for a fixed $N$, the approximation error
$\varDelta_N \to+ \infty$ as $ H \to0$. We also have the following
evident corollaries.
\begin{corollary}
Let $N=2^{20}$. Then
\begin{equation}
\label{upb} \varDelta_N \geq\frac{0.2055}{\sqrt{H }} - 3.4452,\quad H\in
(0,1).
\end{equation}
\end{corollary}
\begin{proof}
The statement follows from inequalities \eqref{EleftB} and \eqref{DeltLeft}.
\end{proof}
\begin{corollary}Let $N=2^{20}$. Then for the relative error, we have
\begin{equation}
\label{deltLeft} \delta_H:=\frac{\varDelta_N} { \mathbf{E}\max_{t \in
[0,1]}B^H(t)}\geq1- 16.765 \sqrt{H},
\quad H\in(0,1).
\end{equation}
\end{corollary}
\begin{proof}
The statement follows from inequalities \eqref{maxbounds} and \eqref{ErightB}.
\end{proof}\goodbreak
When $N=2^{20}$, from inequalities \eqref{upb} and \eqref{deltLeft} we
get the following conclusions:
\begin{itemize}
\item if $H<0.00022$, then the relative error $\delta_H\geq75\%$, and
$\varDelta_N>10.34$;
\item if $H<0.00089$, then the relative error $\delta_H\geq50\%$, and
$\varDelta_N>3.45$;
\item if $H<0.0020$, then the relative error $\delta_H\geq25\%$, and
$\varDelta_N>1.15$;
\item if $H<0.0028$, then the relative error $\delta_H\geq10\%$, and
$\varDelta_N>0.38$;
\item if $H<0.0032$, then the relative error $\delta_H\geq5\%$, and
$\varDelta_N>0.18$;
\item if $H<0.0035$, then the relative error $\delta_H\geq1\%$, and
$\varDelta_N>0.03$.
\end{itemize}
Thus, we conclude that the estimation of $ \mathbf{E}\max_{t \in[0,1]} B^H
(t) $ by Monte Carlo simulations leads to significant errors for small
values of the parameter $H$.
\section*{Acknowledgments}
The author is grateful to prof. Yu. Mishura for numerous interesting
discussions and active support.
\end{document} |
\begin{document}
\begin{abstract}
Let $X$, $Y$ and $Z$ be Banach spaces and let $U$ be a subspace of $\mathcal{L}(X^*,Y)$, the Banach
space of all operators from $X^*$ to~$Y$. An operator $S: U \to Z$
is said to be $(\ell^s_p,\ell_p)$-summing (where $1\leq p <\infty$) if there is a constant $K\geq 0$ such that
$$
\Big( \sum_{i=1}^n \|S(T_i)\|_Z^p \Big)^{1/p}
\le K
\sup_{x^* \in B_{X^*}} \Big(\sum_{i=1}^n \|T_i(x^*)\|_Y^p\Big)^{1/p}
$$
for every $n\in \mathbb{N}$ and every $T_1,\dots,T_n \in U$.
In this paper we study this class of operators, introduced by Blasco and Signes
as a natural generalization of the $(p,Y)$-summing operators of Kislyakov.
On one hand, we discuss Pietsch-type domination results for $(\ell^s_p,\ell_p)$-summing operators. In this direction,
we provide a negative answer to a question raised by Blasco and Signes, and we also
give new insight on a result by Botelho and Santos.
On the other hand, we extend to this setting the classical theorem of Kwapie\'{n} characterizing those
operators which factor as $S_1\circ S_2$, where $S_2$ is absolutely $p$-summing and
$S_1^*$ is absolutely $q$-summing ($1<p,q<\infty$ and $1/p+1/q \leq 1$).
\end{abstract}
\title{A class of summing operators acting in spaces of operators}
\section{Introduction}
Summability of series in Banach spaces is a classical central topic in the field of mathematical analysis.
This study is faced from an abstract point of view as a part of the general analysis of the summability properties of operators,
using some remarkable results of the theory of operator ideals. Pietsch's Factorization Theorem is nowadays the central tool in this topic,
and different versions of this result adapted to other contexts are currently known. This theorem establishes that operators that transform weakly $p$-summable sequences
into absolutely $p$-summable ones can always be dominated by an integral, and factored through a subspace of an $L_p$-space. Some related relevant
results can also be formulated in terms of integral domination and factorization of operators. For example, recall that an operator
between Banach spaces $S:X \to Y$ is said to be
$(p,q)$-dominated (where $1<p,q<\infty$ and $1/p+1/q=1/r\leq 1$) if for every couple of finite sequences $(x_i)_{i=1}^n$ in~$X$ and $(y_i^*)_{i=1}^n$ in~$Y^*$, the
strong $\ell_r$-norm of the sequence $(\langle S(x_i), y_i^* \rangle )_{i=1}^n$ is bounded above by the product of the
weak $\ell_p$-norm of $(x_i)_{i=1}^n$ and the weak $\ell_q$-norm of $(y_i^*)_{i=1}^n$
(up to a multiplying constant independent of both sequences and their length). Kwapie\'{n}'s Factorization
Theorem~\cite{kwa} states that an operator is $(p,q)$-dominated
if and only if it can be written as the composition $S_1\circ S_2$ of operators such that $S_2$ is absolutely $p$-summing
and the adjoint $S_1^*$ is absolutely $q$-summing (cf. \cite[\S 19]{def-flo}).
The aim of this paper is to continue with the specific study of the summability properties of operators defined on spaces of operators.
Throughout this paper $X$, $Y$ and $Z$ are Banach spaces.
\begin{definition}[Blasco-Signes, \cite{bla-sig}]\label{definition:pPettisSumming}
Let $1\leq p<\infty$ and let $U$ be a subspace of $\mathcal L(X^*,Y)$. An operator $S: U \to Z$ is said to be {\em $(\ell^s_p,\ell_p)$-summing} if
there is a constant $K\geq 0$ such that
\begin{equation}\label{eqn:psumming}
\Big( \sum_{i=1}^n \|S(T_i)\|_Z^p \Big)^{1/p}
\le K
\sup_{x^* \in B_{X^*}} \Big(\sum_{i=1}^n \|T_i(x^*)\|_Y^p\Big)^{1/p}
\end{equation}
for every $n\in \mathbb{N}$ and every $T_1,\dots,T_n \in U$.
\end{definition}
Some fundamental properties of this type of operators are already known, as well as the main picture of their summability properties.
The works of Blasco and Signes~\cite{bla-sig}
and Botelho and Santos \cite{bot-san} fixed the framework and solved a great part of the natural problems appearing in this context. In
the particular case when $U$ is the injective tensor product $X \hat{\otimes}_\varepsilon Y$
(naturally identified as a subspace of~$\mathcal{L}(X^*,Y)$),
$(\ell^s_p,\ell_p)$-summing operators had been studied earlier by Kislyakov~\cite{kis}
as ``$(p,Y)$-summing'' operators. In particular, he gave a Pietsch-type domination theorem
for $(\ell^s_p,\ell_p)$-summing operators defined on $X \hat{\otimes}_\varepsilon Y$
(see \cite[Theorem~1.1.6]{kis}). This led to the natural question of whether
a Pietsch-type domination theorem holds for arbitrary $(\ell^s_p,\ell_p)$-summing operators, see \cite[Question~5.2]{bla-sig}.
Botelho and Santos extended Kislyakov's result by showing that this is the case when $U$ is Schwartz's $\varepsilon$-product $X\varepsilon Y$, i.e.
the subspace of all operators from~$X^*$ to~$Y$ which are ($w^*$-to-norm) continuous when restricted to~$B_{X^*}$
(see \cite[Theorem~3.1]{bot-san}).
This paper is organized as follows.
In Section~\ref{section:Pietsch} we give new insight on the Botelho-Santos theorem
and we provide a negative answer to the aforementioned question, see Example~\ref{example:counterBS}.
To this end, we characterize those $(\ell^s_p,\ell_p)$-summing operators admitting
a Pietsch-type domination by means of the strong operator topology (Theorem~\ref{theorem:equiv}).
All of this is naturally connected with a discussion on measurability properties of operators which might be
of independent interest.
In Section~\ref{section:Kwapien} we start a general analysis of the summability properties of operators defined on spaces of operators
that imply similar properties for the adjoint maps. Our main result along this way is a Kwapie\'{n}-type theorem
involving the special summation that arises in this setting related to the strong operator topology, see Theorem~\ref{theorem:equiv2}.
\subseteqsubsection*{Notation and terminology}
All our Banach spaces are real and all our topological spaces are Hausdorff. By a {\em subspace} of a Banach space we mean a norm-closed linear subspace.
By an {\em operator} we mean a continuous linear map between Banach spaces.
The norm of a Banach space~$X$ is denoted by $\|\cdot\|_X$ or simply $\|\cdot\|$. We write
$B_X=\{x\in X:\|x\|\leq 1\}$ (the closed unit ball of~$X$). The topological dual of~$X$ is denoted by~$X^*$ and
we write $w^*$ for its weak$^*$-topology. The evaluation of a functional $x^*\in X^*$
at $x\in X$ is denoted by either $\langle x,x^*\rangle$ or $\langle x^*,x\rangle$.
We write $X\not \supseteq \ell_1$ to say that $X$ does not contain subspaces isomorphic to~$\ell_1$.
We denote by $\mathcal{L}(X^*,Y)$ the Banach space of all operators from~$X^*$ to~$Y$, equipped
with the operator norm. The {\em strong operator topology} ({\em SOT} for short) on $\mathcal{L}(X^*,Y)$
is the locally convex topology for which the sets
$$
\{T\in \mathcal{L}(X^*,Y): \, \|T(x^*)\|_Y<\varepsilon\},
\quad x^*\in X^*,
\quad \varepsilon>0,
$$
are a subbasis of open neighborhoods of~$0$. That is, a net $(T_\alpha)$ in $\mathcal{L}(X^*,Y)$ is SOT-convergent to~$0$
if and only if $\|T_\alpha(x^*)\|_Y\to 0$ for every $x^*\in X^*$. Given a compact topological space~$L$, we denote by
$C(L)$ the Banach space of all real-valued continuous functions on~$L$, equipped with the supremum norm.
Thanks to Riesz's representation theorem, the elements of $C(L)^*$ are identified with regular Borel signed measures on~$L$.
We denote by $P(L) \subseteq C(L)^*$ the convex $w^*$-compact set of all regular Borel probability measures on~$L$.
For each $t\in L$, we write $\delta_t\in P(L)$ to denote the evaluation functional at~$t$, i.e. $\delta_t(h):=h(t)$
for all $h\in C(L)$. A function defined on~$L$ with values in a Banach space is said to be {\em universally strongly measurable} if it is
strongly $\mu$-measurable for all $\mu \in P(L)$. We will mostly consider the case when $L$ is the dual closed unit ball $B_{X^*}$
equipped with the weak$^*$-topology.
\section{Pietsch-type domination of $(\ell^s_p,\ell_p)$-summing operators}\label{section:Pietsch}
Throughout this section we fix $1\leq p<\infty$.
The aforementioned Pietsch-type domination theorem for $(\ell^s_p,\ell_p)$-summing operators proved in~\cite[Theorem~3.1]{bot-san} reads as follows:
\begin{theorem}[Botelho-Santos]\label{theorem:BS}
Let $U$ be a subspace of $X\varepsilon Y$ and let $S:U\to Z$ be an $(\ell^s_p,\ell_p)$-summing operator. Then
there exist a constant $K\geq 0$ and $\mu \in P(B_{X^*})$ such that
\begin{equation}\label{eqn:BotelhoSantos}
\|S(T)\|_Z \leq K \Big(\int_{B_{X^*}}\|T(\cdot)\|_{Y}^p \, d\mu\Big)^{1/p}
\end{equation}
for every $T\in U$.
\end{theorem}
A first comment is that the integral of inequality~\eqref{eqn:BotelhoSantos} is always well-defined for any $T\in X\varepsilon Y$ and $\mu\in P(B_{X^*})$. Indeed,
the restriction $T|_{B_{X^*}}$ is ($w^*$-to-norm) continuous, so
it is universally strongly measurable. Since in addition $T|_{B_{X^*}}$ is bounded, it belongs to the Lebesgue-Bochner space $L_p(\mu,Y)$.
\begin{remark}\label{remark:BS}
Actually, Theorem~\ref{theorem:BS} is proved in~\cite[Theorem~3.1]{bot-san} for operators~$S$ defined on
a subspace~$U$ contained in
$$
\mathcal{L}_{w^*,\|\cdot\|}(X^*,Y)=\{T\in \mathcal L(X^*,Y):\, T \text{ is ($w^*$-to-norm) continuous}\}.
$$
The proof given there is based on the abstract Pietsch-type domination theorem of Botelho, Pellegrino and Rueda~\cite{bot-pel-rue},
and the argument works for subspaces of $X\varepsilon Y$ as well. We stress that $\mathcal{L}_{w^*,\|\cdot\|}(X^*,Y)$
consists of finite rank operators, one has
$$
\overline{\mathcal{L}_{w^*,\|\cdot\|}(X^*,Y)}^{\|\cdot\|}=X\hat{\otimes}_\varepsilon Y \subseteq X \varepsilon Y
$$
and, in general, $\mathcal{L}_{w^*,\|\cdot\|}(X^*,Y)\neq X\varepsilon Y$.
\end{remark}
We next provide a more direct proof of Theorem~\ref{theorem:BS}. Yet another approach will be presented at the end of this section.
\begin{proof}[Proof of Theorem~\ref{theorem:BS}] For any $n\in \mathbb{N}$ and $\bar{T}=(T_1,\dots,T_n)\in U^n$, we define
$$
\Delta_{\bar{T}}: P(B_{X^*}) \to \mathbb{R}, \quad
\Delta_{\bar{T}}(\mu):=\sum_{i=1}^n \|S(T_i)\|_Z^p
- K^{p}
\int_{B_{X^*}} \sum_{i=1}^n \|T_i(\cdot)\|_{Y}^p \, d\mu,
$$
where $K\geq 0$ is a constant as in Definition~\ref{definition:pPettisSumming}.
Clearly, $\Delta_{\bar{T}}$ is convex and $w^*$-continuous, because the real-valued function
$$
x^*\mapsto \sum_{i=1}^n \|T_i(x^*)\|_Y^p
$$
is $w^*$-continuous on~$B_{X^*}$. This function attains its supremum at some $x_{\bar{T}}^*\in B_{X^*}$. Bearing
in mind that $S$ is $(\ell^s_p,\ell_p)$-summing, we get $\Delta_{\bar{T}}(\delta_{x^*_{\bar{T}}})\leq 0$.
Note also that the collection of all functions
of the form $\Delta_{\bar{T}}$ is a convex cone in~$\mathbb{R}^{P(B_{X^*})}$. Indeed, given $\bar{T}=(T_1,\dots,T_n)\in U^n$,
$\bar{R}=(R_1,\dots,R_m)\in U^m$, $\alpha\geq 0$ and $\beta \geq 0$, we have
$\alpha\Delta_{\bar{T}}+\beta\Delta_{\bar{R}}=\Delta_{\bar{H}}$, where
$$
\bar{H}=(\alpha^{1/p}T_1,\dots,\alpha^{1/p}T_n,\beta^{1/p}R_1,\dots,\beta^{1/p}R_m).
$$
Therefore, by Ky Fan's Lemma (see e.g. \cite[Lemma~9.10]{die-alt}),
there is $\mu \in P(B_{X^*})$ such that $\Delta_{\bar{T}}(\mu)\leq 0$ for all functions of the form $\Delta_{\bar{T}}$.
In particular, inequality~\eqref{eqn:BotelhoSantos} holds for all $T\in U$.
\end{proof}
Clearly, in order to extend the statement of Theorem~\ref{theorem:BS} to other subspaces $U$ of $\mathcal{L}(X^*,Y)$,
the real-valued map $\|T(\cdot )\|_Y$ needs to be $\mu$-measurable for every $T\in U$. This holds automatically if $U$ is a subspace of
$$
\mathcal{UM}(X^*,Y):=\{T\in \mathcal{L}(X^*,Y): \, T|_{B_{X^*}} \mbox{ is universally strongly measurable}\}.
$$
Note that $\mathcal{UM}(X^*,Y)$ is a SOT-sequentially closed subspace of $\mathcal{L}(X^*,Y)$.
\begin{example}\label{example:UM1}
\begin{enumerate}
\item[(i)] We have $X\varepsilon Y \subseteq \mathcal{UM}(X^*,Y)$ according to the comment preceding Remark~\ref{remark:BS}.
\item[(ii)] More generally, {\em every ($w^*$-to-weak) continuous operator from~$X^*$ to~$Y$
belongs to $\mathcal{UM}(X^*,Y)$.} Indeed, just bear in mind that any weakly continuous function from a compact topological space to a Banach space is universally strongly measurable,
see \cite[Proposition~4]{ari-alt}. We stress that, by the Banach-Dieudonn\'{e} theorem, an operator $T:X^*\to Y$ is ($w^*$-to-weak) continuous
if and only if the restriction $T|_{B_{X^*}}$ is ($w^*$-to-weak) continuous.
\item[(iii)] In particular, {\em if $X$ is reflexive, then $\mathcal{L}(X^*,Y)=\mathcal{UM}(X^*,Y)$}.
\end{enumerate}
\end{example}
\begin{example}\label{example:UM2}
{\em If $X \not \supseteq \ell_1$, then every $T\in \mathcal{L}(X^*,Y)$ with separable range
belongs to~$\mathcal{UM}(X^*,Y)$.} Indeed,
a result of Haydon~\cite{hay-J} (cf. \cite[Theorem~6.9]{van})
states that $X^{**}=\mathcal{UM}(X^*,\mathbb{R})$ if and only if $X\not\supseteq \ell_1$. The conclusion now follows from
Pettis' measurability theorem applied to $T|_{B_{X^*}}$ and each $\mu\in P(B_{X^*})$,
see e.g. \cite[p.~42, Theorem~2]{die-uhl-J}.
\end{example}
So, we will look for conditions ensuring that an $(\ell^s_p,\ell_p)$-summing operator
defined on a subspace of $\mathcal{UM}(X^*,Y)$ is $(\ell^s_p,\ell_p)$-controlled, according to the following:
\begin{definition}\label{definition:dominated}
Let $U$ be a subspace of $\mathcal{UM}(X^*,Y)$. An operator $S: U \to Z$ is said to be {\em $(\ell^s_p,\ell_p)$-controlled}
if there exist a constant $K\geq 0$ and $\mu \in P(B_{X^*})$ such that
\begin{equation}\label{eqn:domi}
\|S(T)\|_Z \leq K \Big(\int_{B_{X^*}} \|T(\cdot)\|_{Y}^p\, d\mu\Big)^{1/p}
\end{equation}
for every $T\in U$.
\end{definition}
\begin{proposition}\label{proposition:facto}
Let $U$ be a subspace of $\mathcal{UM}(X^*,Y)$ and let $S: U \to Z$ be an operator.
Then $S$ is $(\ell^s_p,\ell_p)$-controlled if and only if there exist $\mu\in P(B_{X^*})$, a subspace $W \subseteq L_p(\mu,Y)$ and an operator $\tilde{S}:W \to Y$ such that $S$ factors as
$$
\xymatrix@R=3pc@C=3pc{U
\ar[r]^{S} \ar[d]_{i_\mu|_U} & Z\\
W \ar@{->}[ur]_{\tilde{S}} & \\
}
$$
where $i_\mu:\mathcal{UM}(X^*,Y)\to L_p(\mu,Y)$ is the operator that maps each $T\in \mathcal{UM}(X^*,Y)$ to the equivalence
class of $T|_{B_{X^*}}$ in~$L_p(\mu,Y)$.
\end{proposition}
\begin{proof}
It is clear that such factorization implies that $S$ is $(\ell^s_p,\ell_p)$-controlled. Conversely, inequality~\eqref{eqn:domi} in Definition~\ref{definition:dominated}
allows us to define a linear continuous map $\tilde{S}_0: i_\mu(U) \to Z$ by declaring $\tilde{S}_0(i_\mu(T)):=S(T)$ for all $T\in U$.
Now, we can extend $\tilde{S}_0$ to an operator $\tilde{S}$ from $W:=\overline{i_\mu(U)}$ to~$Z$. Clearly, we have $\tilde{S}\circ i_\mu|_U=S$.
\end{proof}
We next give a couple of applications of Proposition~\ref{proposition:facto} related to topological properties
of $(\ell^s_p,\ell_p)$-controlled operators.
The class of Banach spaces~$X$ such that $L_1(\mu)$ is separable for every $\mu \in P(B_{X^*})$
is rather wide. It contains, for instance, all weakly compactly generated Banach spaces
(cf. \cite[Theorem~13.20 and Corollary~14.6]{fab-ultimo})
as well as all Banach spaces not containing subspaces isomorphic to~$\ell_1$ (see \cite[Proposition~B.1]{avi-mar-ple}).
For such spaces we have:
\begin{corollary}\label{corollary:SeparableRange}
Suppose that $L_1(\mu)$ is separable for every $\mu \in P(B_{X^*})$ and that $Y$ is separable.
Let $U$ be a subspace of $\mathcal{UM}(X^*,Y)$ and let $S:U \to Z$ be an $(\ell^s_p,\ell_p)$-controlled operator. Then
$S$ has separable range.
\end{corollary}
\begin{proof}
Under such assumptions, $L_p(\mu,Y)$ is separable for any $\mu\in P(B_{X^*})$. The result
now follows from Proposition~\ref{proposition:facto}.
\end{proof}
A subset of a Banach space is said to be {\em weakly precompact} if every sequence in it
admits a weakly Cauchy subsequence. Rosenthal's $\ell_1$-theorem~\cite{ros} (cf. \cite[Theorem~5.37]{fab-ultimo})
characterizes weakly precompact sets as those which are bounded and contain no sequence equivalent to the unit basis of~$\ell_1$.
An operator between Banach spaces is said to be {\em weakly precompact} if it maps bounded sets to
weakly precompact sets; this is equivalent to saying that it factors through a Banach space not containing subspaces isomorphic to~$\ell_1$.
For more information on weakly precompact operators we refer the reader to~\cite{gon-abe}.
\begin{corollary}\label{corollary:IdealProperties}
Let $U$ be a subspace of $\mathcal{UM}(X^*,Y)$ and let $S:U \to Z$ be an $(\ell^s_p,\ell_p)$-controlled operator. Then:
\begin{enumerate}
\item[(i)] $S$ is weakly compact whenever $Y$ is reflexive.
\item[(iii)] $S$ is weakly precompact whenever $Y \not\supseteq \ell_1$.
\end{enumerate}
\end{corollary}
\begin{proof} We consider a factorization of~$S$ as in Proposition~\ref{proposition:facto} and we distinguish two cases:
{\em Case $1<p<\infty$.} If $Y$ is reflexive, then so is $L_p(\mu,Y)$ (see e.g. \cite[p.~100, Corollary~2]{die-uhl-J}) and the same holds for~$W$, hence $S$ is weakly compact.
On the other hand, if $Y \not\supseteq \ell_1$, then $L_p(\mu,Y) \not\supseteq\ell_1$ (see e.g. \cite[Theorem~2.2.2]{cem-men})
and so $W\not\supseteq\ell_1$, hence $S$ is weakly precompact.
{\em Case $p=1$.} Let $j: L_2(\mu,Y)\to L_1(\mu,Y)$ be the identity operator.
Since
$$
i_\mu(B_U) \subseteq j(B_{L_2(\mu,Y)}),
$$
we deduce that $i_\mu(B_U)$ is relatively weakly compact
(resp. weakly precompact) whenever $Y$ is reflexive (resp. $Y \not\supseteq \ell_1$), and the same holds
for $S(B_U)=\tilde{S}(i_\mu(B_U))$.
\end{proof}
The following result shows the link between $(\ell^s_p,\ell_p)$-controlled
and $(\ell^s_p,\ell_p)$-summing operators.
\begin{theorem}\label{theorem:equiv}
Let $U$ be a subspace of~$\mathcal{UM}(X^*,Y)$ and let $S:U\to Z$ be an operator. Let us consider the following statements:
\begin{enumerate}
\item[(i)] $S$ is $(\ell^s_p,\ell_p)$-controlled.
\item[(ii)] $S$ is $(\ell^s_p,\ell_p)$-summing and (SOT-to-norm) sequentially continuous.
\end{enumerate}
Then (i)$\Rightarrow$(ii). Moreover, both statements are equivalent whenever $U \cap X\varepsilon Y$ is SOT-sequentially dense in~$U$.
\end{theorem}
\begin{proof}
Suppose first that $S$ is $(\ell^s_p,\ell_p)$-controlled and consider a factorization of~$S$ as in Proposition~\ref{proposition:facto}.
We will deduce that $S$ is $(\ell^s_p,\ell_p)$-summing and (SOT-to-norm) sequentially continuous by checking that so is~$i_\mu$.
On one hand, $i_\mu$ is $(\ell^s_p,\ell_p)$-summing, because
for every $n\in \mathbb{N}$ and $T_1,\dots,T_n\in \mathcal{UM}(X^*,Y)$ we have
$$
\sum_{i=1}^n \|i_\mu(T_i)\|_{L_p(\mu,Y)}^p=
\int_{B_{X^*}}\sum_{i=1}^n \|T_i(\cdot)\|_Y^p \, d\mu
\leq \sup_{x^*\in B_{X^*}} \sum_{i=1}^n \|T_i(x^*)\|_Y^p.
$$
On the other hand, $i_\mu$ is (SOT-to-norm) sequentially continuous. Indeed, let $(T_n)$ be a sequence in~$\mathcal{UM}(X^*,Y)$
which SOT-converges to~$0$, i.e. $\|T_n(x^*)\|_Y\to 0$ for every $x^*\in X^*$.
By the Banach-Steinhaus theorem,
$\sup\{\|T_n\|:\, n\in\mathbb{N}\}<\infty$.
From Lebesgue's dominated convergence theorem it follows that $(i_\mu(T_n))$ converges to~$0$ in the norm topology of~$L_p(\mu,Y)$.
Suppose now that (ii) holds and that $U \cap X\varepsilon Y$ is SOT-sequentially dense in~$U$.
The restriction $S|_{U \cap X\varepsilon Y}$ is $(\ell^s_p,\ell_p)$-summing
and so Theorem~\ref{theorem:BS} and Proposition~\ref{proposition:facto} ensure the existence of $\mu \in P(B_{X^*})$, a
subspace $W \subseteq L_p(\mu,Y)$ and an operator $\tilde{S}:W\to Z$ such that $i_\mu(U\cap X\varepsilon Y)\subseteq W$ and
$$
\tilde{S}\circ i_\mu|_{U\cap X\varepsilon Y}=S|_{U \cap X\varepsilon Y}.
$$
Then we have $i_\mu(U)\subseteq W$ and $\tilde{S}\circ i_\mu|_{U}=S$, because
$S$ and $i_\mu$ are (SOT-to-norm) sequentially continuous
and $U \cap X\varepsilon Y$ is SOT-sequentially dense in~$U$.
Therefore, $S$ is $(\ell^s_p,\ell_p)$-controlled.
\end{proof}
We are now ready to present a negative answer to \cite[Question 5.2]{bla-sig}:
\begin{example}\label{example:counterBS}
Suppose that $X$ is not reflexive and $X^*$ is separable (e.g. $X=c_0$).
Then $X^{**}=\mathcal{UM}(X^*,\mathbb{R})$, every $S\in X^{***}$ is $(\ell^s_p,\ell_p)$-summing, but
no $S\in X^{***}\setminus X^*$ is $(\ell^s_p,\ell_p)$-controlled (as operators from $X^{**}$ to~$\mathbb{R}$).
\end{example}
\begin{proof} The equality $X^{**}=\mathcal{UM}(X^*,\mathbb{R})$
follows from the fact that $X\not\supseteq\ell_1$, according to
Haydon's result which we already mentioned in Example~\ref{example:UM2}.
Every $S\in X^{***}$ is easily seen to be $(\ell^s_p,\ell_p)$-summing as an operator from $X^{**}$ to~$\mathbb{R}$
(use that $B_{X^*}$ is $w^*$-dense in~$B_{X^{***}}$, by Goldstine's theorem).
On the other hand, if $S\in X^{***}$ is $(\ell^s_p,\ell_p)$-controlled, then it is $w^*$-sequentially continuous
by Theorem~\ref{theorem:equiv} (bear in mind that SOT$=w^*$ on~$X^{**}$).
Since $(B_{X^{**}},w^*)$ is metrizable (because $X^*$ is separable),
the restriction $S|_{B_{X^{**}}}$ is $w^*$-continuous and so, by the Banach-Dieudonn\'{e} theorem,
$S$ is $w^*$-continuous, i.e. $S\in X^*$.
\end{proof}
In order to apply Theorem~\ref{theorem:equiv}, there are many examples of subspaces $U$ of $\mathcal{UM}(X^*,Y)$
for which $U \cap X\varepsilon Y$ is SOT-sequentially dense in~$U$.
An operator $T:X^*\to Y$ is said to be {\em affine Baire-1} (we write $T\in \mathcal{AB}(X^*,Y)$ for short)
if there is a sequence in $X\varepsilon Y$ which SOT-converges to~$T$.
Affine Baire-1 operators were studied by Mercourakis and Stamati~\cite{mer-sta} and
Kalenda and Spurn\'{y}~\cite{kal-spu}. We present below some examples. Recall first that a Banach space
$Y$ has the {\em approximation property} ({\em AP}) if for each norm-compact set $C \subseteq Y$ and each $\varepsilon>0$
there is a finite rank operator $R:Y \to Y$ such that $\|R(y)-y\|_Y\leq \varepsilon$ for all $y\in C$. If in addition $R$ can be chosen in such a way that
$\|R\| \leq \lambda$ for some constant $\lambda\geq 1$ (independent of~$C$ and~$\varepsilon$), then $Y$ is said to have
the {\em $\lambda$-bounded approximation property} ({\em $\lambda$-BAP}). A Banach space is said to have the {\em bounded
approximation property} ({\em BAP}) if it has the $\lambda$-BAP for some $\lambda\geq 1$. For instance,
every Banach space with a Schauder basis has the BAP. In general, the AP and the BAP are different. However,
a separable dual Banach space has the AP if and only if it has the $1$-BAP. For more information
on these properties we refer the reader to~\cite{casazza}.
\begin{example}\label{example:weak}
{\em Suppose that $Y$ has the BAP. If $T \in \mathcal{L}(X^*,Y)$ is ($w^*$-to-weak) continuous and has separable range, then
$T\in \mathcal{AB}(X^*,Y)$.}
\end{example}
\begin{proof}
Let $\lambda\geq 1$ be a constant such that $Y$ has the $\lambda$-BAP.
Given any countable set $D \subseteq Y$, there is a sequence $(R_n)$ of finite rank operators on~$Y$ such that
$\|R_n\|\leq \lambda$ for all $n\in \mathbb{N}$ and $\|R_n(y)-y\|_Y \to 0$ for every $y\in D$. Therefore,
$\|R_n(y)-y\|_Y \to 0$ for every $y\in \overline{D}$ (the norm-closure of~$D$). In particular, if this argument is applied to
any countable set $D$ such that $D\subseteq T(X^*) \subseteq \overline{D}$, we get that the sequence $(R_n\circ T)$ is SOT-convergent to~$T$
in~$\mathcal{L}(X^*,Y)$. Note that each $R_n\circ T$ is ($w^*$-to-weak) continuous (because so is~$T$) and has finite rank, hence it
belongs to~$\mathcal{L}_{w^*,\|\cdot\|}(X^*,Y)\subseteq X\varepsilon Y$.
\end{proof}
\begin{example}\label{example:MS}
{\em Suppose that $X^*$ is separable and that either $X^*$ or $Y$ has the BAP. Then}
$$
\mathcal{L}(X^*,Y) = \mathcal{AB}(X^*,Y),
$$
see \cite[Theorems~2.18 and~2.19]{mer-sta}. The proofs of these results contain a gap which
was commented and corrected in \cite[Remark~4.4]{kal-spu}. Note that the separability assumption on~$Y$ that appears
in the statement of \cite[Theorem~2.19]{mer-sta} can be removed by using the arguments of~\cite{kal-spu}.
\end{example}
Clearly, $\mathcal{AB}(X^*,Y)$ is a linear subspace of $\mathcal{L}(X^*,Y)$. It is norm-closed whenever $Y$ has the BAP, as we next show.
To this end, we use an argument similar to the usual proof that the uniform limit of
a sequence of real-valued Baire-1 functions is Baire-1 (see e.g. \cite[Proposition~A.126]{luk-alt}). However,
some technicalities arise since we need to approximate with operators instead of arbitrary continuous maps.
\begin{lemma}\label{lem:closed}
\it If $Y$ has the BAP, then $\mathcal{AB}(X^*,Y)$ is norm-closed in $\mathcal{L}(X^*,Y)$.
\end{lemma}
\begin{proof}
Fix $\lambda\geq 1$ such that $Y$ has the $\lambda$-BAP.
Let $T \in \overline{\mathcal{AB}(X^*,Y)}^{\|\cdot\|}$ with $\|T\|=1$. Let $(U_k)$ be a sequence
in $\mathcal{AB}(X^*,Y)$ such that $\|U_k\|\leq 2^{-k+1}$ for all $k\in \mathbb{N}$ and $T=\sum_{k\in \mathbb{N}}U_k$ in the operator norm.
Given $k\in \mathbb{N}$, we can apply to~$U_k$ the vector-valued version of Mokobodzki's theorem proved in \cite[Theorem~2.2]{kal-spu} to obtain
a sequence $(S_{k,n})_{n\in \mathbb{N}}$ in $X\varepsilon Y$ such that
\begin{itemize}
\item $(S_{k,n})_{n\in \mathbb{N}}$ SOT-converges to~$U_k$;
\item $\|S_{k,n}\|\leq \lambda 2^{-k+1}$ for all $n\in \mathbb{N}$.
\end{itemize}
Define a sequence $(T_n)$ in $X \varepsilon Y$ by
$$
T_n:=\sum_{k=1}^n S_{k,n}
\quad\mbox{for all }n\in \mathbb{N}.
$$
It is easy to check that $(T_n)$ SOT-converges to~$T$, hence $T\in \mathcal{AB}(X^*,Y)$.
\end{proof}
As usual, we denote by $\mathcal{K}(X^*,Y)$ the subspace of $\mathcal{L}(X^*,Y)$ consisting of all compact operators from~$X^*$ to~$Y$.
Clearly, we have $X\varepsilon Y \subseteq \mathcal{K}(X^*,Y)$.
\begin{example}\label{example:MScompact}
\it Suppose that $X$ is separable and $X\not \supseteq \ell_1$.
\begin{enumerate}
\item[(i)] Every finite rank operator $T:X^* \to Y$ is affine Baire-1.
\item[(ii)] If $Y$ has the BAP, then
$$
\mathcal{K}(X^*,Y) \subseteq \mathcal{AB}(X^*,Y).
$$
\end{enumerate}
\end{example}
\begin{proof} (i) It suffices to check it for rank one operators. Fix $x^{**}\in X^{**}$ and $y\in Y$
in such a way that $T(x^*)=\langle x^{**},x^*\rangle y$ for all $x^*\in X^*$.
Since $X$ is $w^*$-sequentially dense in~$X^{**}$
(by the Odell-Rosenthal theorem~\cite{ode-ros}, cf. \cite[Theorem~4.1]{van}), there is
a sequence $(x_n)$ in~$X$ which $w^*$-converges to~$x^{**}$. For each $n\in \mathbb{N}$
we define $T_n\in \mathcal{L}_{w^*,\|\cdot\|}(X^*,Y) \subseteq X\varepsilon Y$
by declaring $T_n(x^*):=\langle x_n,x^*\rangle y$ for all $x^*\in X^*$.
Clearly, $(T_n)$ is SOT-convergent to~$T$.
(ii) Take any $T\in \mathcal{K}(X^*,Y)$. Since $Y$ has the AP,
there is a sequence $(T_n)$ of finite rank operators from~$X^*$ to~$Y$
converging to~$T$ in the operator norm. Each $T_n$ is affine Baire-1 by~(i). An appeal to Lemma~\ref{lem:closed}
ensures that $T\in \mathcal{AB}(X^*,Y)$.
\end{proof}
The proof of Theorem~\ref{theorem:BS} makes essential use of the $w^*$-continuity on~$B_{X^*}$
of the real-valued map $\|T(\cdot)\|_Y$ for $T\in X\varepsilon Y$. We next present an abstract Pietsch-type domination theorem
for $(\ell^s_p,\ell_p)$-summing operators that does not require that continuity assumption, at the price
of dominating with a {\em finitely additive} measure. As a consequence of this result, we will obtain another proof of Theorem~\ref{theorem:BS}.
Given a measurable space~$(\Omega,\Sigma)$, we denote by
$B(\Sigma)$ the Banach space of all bounded $\Sigma$-measurable real-valued functions on~$\Omega$, equipped with the supremum norm.
The dual $B(\Sigma)^*$ can be identified with the Banach space ${\rm ba}(\Sigma)$ of all
bounded finitely additive real-valued measures on~$\Sigma$, equipped with the variation norm. The duality is given
by integration, that is, $\langle h,\nu \rangle=\int_\Omega h \, d\nu$ for every $h\in B(\Sigma)$ and $\nu\in {\rm ba}(\Sigma)$, see e.g.
\cite[p.~77, Theorem~7]{die-J}.
\begin{theorem}\label{theorem:FA}
Let $\Sigma$ be a $\sigma$-algebra on~$B_{X^*}$ and let $U$ be a subspace of $\mathcal{L}(X^*,Y)$
such that the restriction of $\|T(\cdot)\|_Y$ to~$B_{X^*}$ is $\Sigma$-measurable for every $T\in U$.
Let $S:U \to Z$ be an $(\ell^s_p,\ell_p)$-summing operator.
Then there exist a constant $K\geq 0$ and a finitely additive probability $\nu$ on~$\Sigma$ such that
\begin{equation}\label{eqn:FA}
\|S(T)\|_Z \leq K \Big(\int_{B_{X^*}} \|T(\cdot)\|_{Y}^p \, d\nu \Big)^{1/p}
\end{equation}
for every $T\in U$.
\end{theorem}
\begin{proof}
For each $T\in U$ we define $\psi_T\in B(\Sigma)$ by
$$
\psi_T(x^*):=\|T(x^*)\|^p_Y \quad
\mbox{for all }x^*\in B_{X^*}.
$$
Let $L \subseteq {\rm ba}(\Sigma)=B(\Sigma)^*$ be the convex $w^*$-compact set of all finitely additive probabilities on~$\Sigma$.
For any $n\in \mathbb{N}$ and $\bar{T}=(T_1,\dots,T_n)\in U^n$, we define
$$
\Delta_{\bar{T}}: L \to \mathbb{R}, \quad
\Delta_{\bar{T}}(\nu):=\sum_{i=1}^n \|S(T_i)\|_Z^p
- K^{p}
\int_{K} \sum_{i=1}^n \psi_{T_i} \, d\nu,
$$
where $K\geq 0$ is a constant as in Definition~\ref{definition:pPettisSumming}.
Clearly, $\Delta_{\bar{T}}$ is convex and $w^*$-continuous. Moreover,
by the Hahn-Banach theorem there is $\eta_{\bar{T}} \in{\rm ba}(\Sigma)$
with $\|\eta_{\bar{T}}\|_{{\rm ba}(\Sigma)}=1$ such that
$$
\Big\langle \sum_{i=1}^n \psi_{T_i},\eta_{\bar{T}} \Big\rangle=
\Big\|\sum_{i=1}^n \psi_{T_i}\Big\|_{B(\Sigma)}.
$$
Bearing in mind that $\sum_{i=1}^n \psi_{T_i}\geq 0$, it follows that the variation $|\eta_{\bar{T}}| \in L$
satisfies
$$
\Big\langle \sum_{i=1}^n \psi_{T_i},|\eta_{\bar{T}}| \Big\rangle=\sup_{x^*\in B_{X^*}}\sum_{i=1}^n \psi_{T_i}(x^*).
$$
Therefore, inequality~\eqref{eqn:psumming} in Definition~\ref{definition:pPettisSumming} yields
$$
\Delta_{\bar{T}}\big(|\eta_{\bar{T}}|\big) = \sum_{i=1}^n \|S(T_i)\|_Z^p
- K^p \Big\langle \sum_{i=1}^n \psi_{T_i},|\eta_{\bar{T}}| \Big\rangle \leq 0.
$$
The collection of all functions of the form $\Delta_{\bar{T}}$ is easily seen to be a convex cone in~$\mathbb{R}^{L}$.
By Ky Fan's Lemma (see e.g. \cite[Lemma~9.10]{die-alt}),
there is $\nu \in L$ such that $\Delta_{\bar{T}}(\nu)\leq 0$ for all functions of the form $\Delta_{\bar{T}}$.
In particular, \eqref{eqn:FA} holds for every $T\in U$.
\end{proof}
\begin{proof}[Another proof of Theorem~\ref{theorem:BS}]
Let $\Sigma:={\rm Borel}(B_{X^*},w^*)$.
Let $K$ and $\nu$ be as in Theorem~\ref{theorem:FA}. Define $\varphi\in B(\Sigma)^*$ by $\langle h,\varphi\rangle:=\int_{B_{X^*}}h \, d\nu$
for all $h\in B(\Sigma)$. Let $\mu\in C(B_{X^*})^*$ be the restriction of $\varphi$ to~$C(B_{X^*})$ (as a subspace of $B(\Sigma)$).
Then $\mu\in P(B_{X^*})$ and~\eqref{eqn:FA} now reads as
$$
\|S(T)\|_Z \leq K \Big(\int_{B_{X^*}} \|T(\cdot)\|_{Y}^p \, d\mu \Big)^{1/p}
$$
for every $T\in U \subseteq X\varepsilon Y$.
\end{proof}
\section{Kwapie\'{n}-type theorem for $(\ell^s_p,\ell^s_q)$-dominated operators}\label{section:Kwapien}
Throughout this section we fix $1< p, q< \infty$ such that $1/p + 1/q \leq 1$. Let $1\leq r < \infty$ be defined by $1/p + 1/q =1/r$.
An operator
$S:X\to Y$ is said to be {\em $(p,q)$-dominated} if there is a constant $K\geq 0$ such that
$$
\Big( \sum_{i=1}^n | \langle S(x_i),y^*_i \rangle|^r \Big)^{1/r}
\\ \le K
\sup_{x^* \in B_{X^*}} \Big(\sum_{i=1}^n |\langle x_i,x^*\rangle|^p\Big)^{1/p} \cdot
\sup_{y \in B_{Y}} \Big(\sum_{i=1}^n | \langle y,y^*_i \rangle|^q\Big)^{1/q}
$$
for every $n\in \mathbb{N},$ every $x_1,\dots,x_n \in X$ and every $y^*_1, \dots, y^*_n \in Y^*$.
The classical result of Kwapie\'{n}~\cite{kwa} mentioned in the introduction says that an operator between Banach spaces
is $(p,q)$-dominated if and only if it can be written as $S_1\circ S_2$ for some operators $S_1$ and $S_2$ such that $S_2$ is absolutely $p$-summing
and $S_1^*$ is absolutely $q$-summing (cf. \cite[\S 19]{def-flo}). Our aim in this section is
to extend Kwapie\'{n}'s result to the framework of $(\ell^s_p,\ell_p)$-summing operators, see Theorem~\ref{theorem:equiv2} below.
From now on we assume that $Z^*$ is a subspace of~$\mathcal{UM}(E^*,F)$ for some fixed Banach spaces $E$ and $F$.
Accordingly, the adjoint of any operator taking values in~$Z$ is defined on a subspace of~$\mathcal{UM}(E^*,F)$
and we can discuss whether it is $(\ell^s_q,\ell_q)$-summing or $(\ell^s_q,\ell_q)$-controlled.
\begin{definition}\label{definition:pqdom}
Let $U$ be a subspace of $\mathcal L(X^*,Y)$.
An operator $S: U \to Z$ is said to be {\em $(\ell^s_p,\ell^s_q)$-dominated} if there is a constant $K\geq 0$ such that
\begin{multline}\label{eqn:pqdom}
\Big( \sum_{i=1}^n | \langle S(T_i),z^*_i \rangle|^r \Big)^{1/r}
\\ \le K
\sup_{x^* \in B_{X^*}} \Big(\sum_{i=1}^n \|T_i(x^*)\|_Y^p\Big)^{1/p} \cdot
\sup_{e^* \in B_{E^*}} \Big(\sum_{i=1}^n \| z^*_i (e^*)\|_{F}^q\Big)^{1/q}
\end{multline}
for every $n\in \mathbb{N},$ every $T_1,\dots,T_n \in U$ and every $z^*_1, \dots, z^*_n \in Z^*$.
\end{definition}
\begin{theorem}\label{theorem:equiv2}
Let $U$ be a subspace of~$\mathcal{UM}(X^*,Y)$ and let $S:U\to Z$ be an operator. Consider the following statements:
\begin{enumerate}
\item[(i)] $S$ is $(\ell^s_p,\ell^s_q)$-dominated.
\item[(ii)] There exist a constant $K\geq 0$ and measures $\mu \in P(B_{X^*})$ and $\eta \in P(B_{E^{*}})$ such that
\begin{equation}\label{eqn:intpqdom}
| \langle S(T), z^*\rangle| \leq K \Big(\int_{B_{X^*}}\|T(\cdot)\|^p_{Y} \, d\mu\Big)^{1/p}
\cdot
\Big(\int_{B_{E^*}}\| z^*(\cdot)\|^q_{F} \, d\eta \Big)^{1/q}
\end{equation}
for every $T \in U\cap X\varepsilon Y$ and every $z^* \in Z^*\cap E \varepsilon F$.
\item[(iii)] There exist a constant $K\geq 0$ and measures $\mu \in P(B_{X^*})$ and $\eta \in P(B_{E^{*}})$ such that
\eqref{eqn:intpqdom} holds for every $T \in U$ and every $z^* \in Z^*$.
\item[(iv)] There exist a Banach space $W$, an
$(\ell^s_p,\ell_p)$-controlled operator $S_2:U\to W$ and an operator
$S_1:W\to Z$ with $(\ell^s_q,\ell_q)$-controlled adjoint such that $S$ factors as $S= S_1 \circ S_2$.
\item[(v)] There exist a Banach space $W$, an
$(\ell^s_p,\ell_p)$-summing operator $S_2:U\to W$ and an operator
$S_1:W\to Z$ with $(\ell^s_q,\ell_q)$-summing adjoint such that $S$ factors as~$S= S_1 \circ S_2$.
\end{enumerate}
Then (iii)$\Rightarrow$(iv)$\Rightarrow$(v)$\Rightarrow$(i)$\Rightarrow$(ii). All statements are equivalent if, in addition, we assume that:
\begin{enumerate}
\item[(a)] the identity map on~$Z^*$ is (SOT-to-$w^*$) sequentially continuous;
\item[(b)] $Z^* \cap E\varepsilon F$ is SOT-sequentially dense in~$Z^*$;
\item[(c)] $U \cap X\varepsilon Y$ is SOT-sequentially dense in~$U$;
\item[(d)] $S$ is (SOT-to-norm) sequentially continuous.
\end{enumerate}
\end{theorem}
For the sake of brevity it is convenient to introduce the following:
\begin{definition}\label{definition:admissible}
We say that the triple $(Z,E,F)$ is {\em admissible}
if conditions~(a) and~(b) above hold.
\end{definition}
Before embarking on the proof of Theorem~\ref{theorem:equiv2} we present some examples of
admissible triples. Recall that the {\em weak operator topology} ({\em WOT} for short) on $\mathcal{L}(E^*,F)$
is the locally convex topology for which the sets
$$
\{R\in \mathcal{L}(E^*,F): \, |\langle R(e^*),f^*\rangle|<\varepsilon\},
\quad e^*\in E^*, \quad f^*\in F^*,
\quad \varepsilon>0,
$$
are a subbasis of open neighborhoods of~$0$. So, a net $(R_\alpha)$ in $\mathcal{L}(E^*,F)$ is WOT-convergent to~$0$
if and only if $(R_\alpha(e^*))$ is weakly null in~$F$ for every $e^*\in E^*$.
\begin{example}\label{example:wot-vs-weak}
{\em If $Z^* \subseteq E \varepsilon F$, then $(Z,E,F)$ is admissible.} Indeed, (b) holds trivially, while (a) follows from the fact that
a sequence in $E\varepsilon F$ is WOT-convergent to~$0$ if and only if it is weakly null in~$E\varepsilon F \subseteq \mathcal{L}(E^*,F)$
(see e.g. \cite[Theorem~1.3]{col-rue}).
\end{example}
\begin{example}\label{example:triple-l1}
Suppose that $E \not\supseteq \ell_1$. Take $Z:=E^*$ and $F:=\mathbb{R}$.
Then we have $Z^{*} = E^{**} = \mathcal{UM}(E^{*},F)$ (see Example~\ref{example:UM2}) and, of course, SOT $=w^*$ on~$Z^*$,
so that (a) holds. If in addition $E$ is separable, then (b) also holds, i.e. $E\varepsilon F = E$
is $w^*$-sequentially dense in $E^{**}$, by the Odell-Rosenthal theorem~\cite{ode-ros} (cf. \cite[Theorem~4.1]{van}).
\end{example}
\begin{example}\label{example:projective}
Suppose that $F:=X_0^*$ for a Banach space~$X_0$. Take $Z:=E^* \hat{\otimes}_\pi X_0$ (the projective tensor product of~$E^*$ and~$X_0$).
Then:
\begin{enumerate}
\item[(i)] $Z^*=\mathcal{L}(E^*,F)$ in the natural way (see e.g. \cite[p.~230, Corollary~2]{die-uhl-J}).
\item[(ii)] The identity map on~$Z^*$ is (WOT-to-$w^*$) sequentially continuous.
\item[(iii)] If $E^*$ is separable and either $E^*$ or~$F$ has the BAP, then $Z^*=\mathcal{UM}(E^*,F)$ and
$(Z,E,F)$ is admissible.
\end{enumerate}
\end{example}
\begin{proof} (ii) Let $(\varphi_n)$ be a sequence in~$Z^*=\mathcal{L}(E^*,F)$ which WOT-converges to~$0$. Then it is bounded
(by the Banach-Steinhaus theorem) and
$$
\langle e^*\otimes x_0,\varphi_n \rangle=\langle x_0,\varphi_n(e^*)\rangle \to 0
\quad\mbox{for all }e^*\in E^*\mbox{ and }x_0\in X_0,
$$
hence $(\varphi_n)$ is $w^*$-null.
(iii) Under such assumptions $E\varepsilon F$ is SOT-sequentially dense in
$\mathcal{L}(E^*,F)$ (see Example~\ref{example:MS}). In particular, we have $\mathcal{L}(E^*,F)=\mathcal{UM}(E^*,F)$.
Bearing in mind~(ii) it follows that $(Z,E,F)$ is admissible.
\end{proof}
\begin{proof}[Proof of Theorem~\ref{theorem:equiv2}]
(iii)$\Rightarrow$(iv) By assumption we have
$$
|\langle S(T),z^*\rangle| \leq K \|i_\mu(T)\|_{L_p(\mu,Y)} \|z^*\|_{Z^*}
\quad\mbox{for every }T\in U \mbox{ and }z^*\in Z^*,
$$
hence
$$
\|S(T)\|_Z \leq K \|i_\mu(T)\|_{L_p(\mu,Y)}
\quad \mbox{for every }T\in U.
$$
Write $W:=\overline{i_\mu(U)}$. By the previous inequality, there is an operator
$S_1: W \to Z$ such that $S_1\circ i_\mu|_U=S$
(cf. the proof of Proposition~\ref{proposition:facto}).
Of course, $S_2:=i_\mu|_U$ is $(\ell^s_p,\ell_p)$-controlled.
We claim that $S_1^*:Z^* \to W^*$
is $(\ell^s_q,\ell_q)$-controlled.
Indeed, inequality~\eqref{eqn:intpqdom} reads as
$$
|\langle i_\mu(T), S_1^* (z^*) \rangle | \le K \, \|i_\mu(T)\|_{L_p(\mu,Y)} \, \| i_\eta(z^*)\|_{L_q(\eta,F)}
$$
for every $T\in U$ and $z^*\in Z^*$.
Thus, $\|S_1^*(z^*)\|_{W^*} \leq K \| i_\eta(z^*)\|_{L_q(\eta,F)}$
for every $z^*\in Z^*$, so that $S_1^*$
is $(\ell^s_q,\ell_q)$-controlled.
(iv)$\Rightarrow$(v) This follows from Theorem~\ref{theorem:equiv}.
(v)$\Rightarrow$(i) Fix $n\in \mathbb{N}$ and take $T_1,\dots,T_n \in U$ and $z^*_1, \dots, z^*_n \in Z^*$. Then Holder's inequality
and the fact that $S_2$ (resp.~$S_1^*$) is $(\ell^s_p,\ell_p)$-summing
(resp. $(\ell^s_q,\ell_q)$-summing) yield
\begin{multline*}
\Big(\sum_{i=1}^n | \langle S(T_i),z^*_i \rangle|^r\Big)^{1/r}=
\Big(\sum_{i=1}^n | \langle S_2(T_i),S_1^*(z^*_i) \rangle|^r\Big)^{1/r}
\\\leq \Big(\sum_{i=1}^n \|S_2(T_i)\|_W^r \cdot\|S_1^*(z^*_i)\|_{W^*}^r\Big)^{1/r}
\leq \Big( \sum_{i=1}^n \|S_2(T_i)\|_W^p\Big)^{1/p}
\cdot \Big( \sum_{i=1}^n \|S_1^*(z_i^*)\|_{W^*}^q\Big)^{1/q}
\\ \leq K \sup_{x^* \in B_{X^*}} \Big(\sum_{i=1}^n \|T_i(x^*)\|_Y^p\Big)^{1/p} \cdot
\sup_{e^* \in B_{E^*}} \Big(\sum_{i=1}^n \| z^*_i (e^*)\|_{F}^q\Big)^{1/q}
\end{multline*}
for some constant $K\geq 0$ independent of the $T_i$'s and $z_i^*$'s. This shows that
$S$ is $(\ell^s_p,\ell^s_q)$-dominated.
(i)$\Rightarrow$(ii) Observe that $L:=P(B_{X^*}) \times P(B_ {E^{*}})$ is a compact convex set of the locally convex space
$C(B_{X^*})^* \times C(B_{E^*})^*$, equipped with the product of the corresponding $w^*$-topologies. Fix $n\in \mathbb{N}$,
$$
\bar{T}=(T_1,\dots,T_n ) \in (U\cap X\varepsilon Y)^n
\ \ \mbox{and}
\ \ \bar{z^*}=(z^*_1,\dots, z^*_n) \in (Z^*\cap E\varepsilon F)^n.
$$
Consider the function $\Delta_{\bar{T}, \bar{z^*} }: L \to \mathbb{R}$
given by
$$
\Delta_{\bar{T},\bar{z^*} }(\mu,\eta):=
$$
$$
\sum_{i=1}^n |\langle S(T_i), z^*_i \rangle|^r
- K^r \frac{r}{p}
\int_{B_{X^*}} \sum_{i=1}^n \|T_i(\cdot)\|_{Y}^p \, d\mu - K^r \frac{r}{q} \int_{{B_{E^{*}}}} \sum_{i=1}^n \|z^*_i(\cdot)\|_{F}^q \, d\eta,
$$
where $K\geq 0$ is a constant as in Definition~\ref{definition:pqdom}.
Clearly, $\Delta_{\bar{T}, \bar{z^*} }$ is convex and continuous,
because $T_i\in X\varepsilon Y$ and $z_i^*\in E\varepsilon F$ for every~$i=1,\dots,n$.
We claim that $\Delta_{\bar{T}, \bar{z^*}}(\mu,\eta)\leq 0$ for some $(\mu,\eta)\in L$.
Indeed, since the functions
$$
x^*\mapsto \sum_{i=1}^n \|T_i(x^*)\|_Y^p \quad\mbox{and}\quad e^{*} \mapsto \sum_{i=1}^n \|z^*_i(e^*)\|_F^q
$$
are $w^*$-continuous on~$B_{X^*}$ and $B_{E^{*}}$, they attain their suprema at some $x_{\bar{T}}^*\in B_{X^*}$
and $e^{*}_{\bar{z^*}} \in B_{E^{*}}$, respectively. By taking into account Young's inequality, we have
\begin{multline}\label{eqn:ultima}
\sum_{i=1}^n | \langle S(T_i),z^*_i \rangle|^r
\stackrel{\eqref{eqn:pqdom}}{\le} K^r
\Big(\sum_{i=1}^n \|T_i(x_{\bar{T}}^*)\|_Y^p\Big)^{r/p} \cdot
\Big(\sum_{i=1}^n \|z^*_i (e^{*}_{\bar{z^*}})\|_{F}^q\Big)^{r/q}
\\
\leq
K^r\frac{r}{p}\sum_{i=1}^n \|T_i(x_{\bar{T}}^*)\|_Y^p+K^r\frac{r}{q}\sum_{i=1}^n \|z^*_i (e^{*}_{\bar{z^*}})\|_{F}^q.
\end{multline}
If we write $\mu:=\delta_{x^*_{\bar{T}}}\in P(B_{X^*})$
and $\eta:=\delta_{e^{*}_{\bar{z^*}}}\in P(B_{E^*})$, then \eqref{eqn:ultima} yields $\Delta_{\bar{T}, \bar{z^*}}(\mu,\eta)\leq 0$, as required.
The collection $\mathcal{C}$ of all functions
$\Delta_{\bar{T},\bar{z^*}}$ as above is a convex cone in $\mathbb{R}^{L}$. Indeed, $\mathcal{C}$
is obviously closed under sums and we have
$$
\alpha\Delta_{\bar{T},\bar{z^*}}=\Delta_{(\alpha^{1/p}T_1,\dots,\alpha^{1/p}T_n),(\alpha^{1/q}z_1^*,\dots,\alpha^{1/q}z_n^*)}
$$
for all $\alpha\geq 0$.
By Ky Fan's Lemma (see e.g. \cite[Lemma~9.10]{die-alt}), there is
$(\mu, \eta) \in L$ such that $\Delta_{\bar{T},\bar{z^*}}(\mu,\eta) \leq 0$ for
every $\Delta_{\bar{T},\bar{z^*}}\in \mathcal{C}$.
In particular,
\begin{multline}\label{eqn:fromKF}
|\langle S(T), z^*\rangle|^r \le
K^r \frac{r}{p}
\int_{B_{X^*}} \|T(\cdot)\|_{Y}^p \, d\mu + K^r \frac{r}{q} \int_{B_{E^{*}}} \|z^*(\cdot)\|_{F}^q \, d\eta
\\ \quad\mbox{for every }T \in U\cap X \varepsilon Y
\mbox{ and }z^*\in Z^*\cap E \varepsilon F.
\end{multline}
Fix $T \in U\cap X \varepsilon Y$ and $z^*\in Z^*\cap E \varepsilon F$. We will check that \eqref{eqn:intpqdom} holds. Write
$$
a:= \Big(\int_{B_{X^*}} \|T(\cdot)\|_{Y}^p \, d\mu \Big)^{1/p}
\quad\mbox{and}\quad
b:= \Big(\int_{B_{E^{*}}} \|z^*(\cdot)\|_{F}^q \, d\eta \Big)^{1/q}.
$$
If either $a=0$ or $b=0$, then $\langle S(T), z^*\rangle=0$. Indeed, if $a=0$, then
for each $n\in \mathbb{N}$ inequality~\eqref{eqn:fromKF} applied to the pair $(nT,z^*)$ yields
$$
|\langle S(T), z^*\rangle|^r =\frac{1}{n^r}\cdot |\langle S(nT), z^*\rangle|^r \leq \frac{1}{n^r} \cdot \frac{K^rrb^q}{q},
$$
hence $\langle S(T), z^*\rangle=0$.
A similar argument works for the case $b=0$. On the other hand, if $a\neq 0$ and $b\neq 0$, then
inequality~\eqref{eqn:fromKF} applied to the pair $(\frac{1}{a}T,\frac{1}{b}z^*)$ yields
$$
|\langle S(T), z^*\rangle|^r = a^r \, b^r \, \Big|\Big\langle S\Big(\frac{1}{a}T\Big),\frac{1}{b}z^*\Big\rangle\Big|^r
$$
$$
\le
K^r \, a^r \, b^r \left( \frac{r}{p \, a^p}
\int_{B_{X^*}} \|T(\cdot)\|_{Y}^p \, d\mu + \frac{r}{q \, b^q} \int_{B_{E^{*}}} \|z^*(\cdot) \|_{F}^q \, d\eta \right)
=
K^r \, a^r \, b.^r
$$
This proves~\eqref{eqn:intpqdom} when $T \in U\cap X \varepsilon Y$ and $z^*\in Z^*\cap E \varepsilon F$.
Finally, we prove the implication (ii)$\Rightarrow$(iii) under the additional assumptions. Fix $T \in U$ and $z^*\in Z^*$.
By~(c) (resp.~(b)), we can take a sequence $(T_n)$ (resp. $(z_n^*)$) in
$U\cap X \varepsilon Y$ (resp. $Z^*\cap E \varepsilon F$)
which SOT-converges to~$T$ (resp.~$z^*$). For each $n\in \mathbb{N}$ we have
\begin{equation}\label{eqn:tothelimit}
|\langle S(T_n),z_n^*\rangle|
\leq
K \Big(\int_{B_{X^*}}\|T_n(\cdot)\|_Y^p \, d\mu\Big)^{1/p} \cdot \Big(\int_{B_{E^*}}\|z_n^*(\cdot)\|_F^q \, d\eta\Big)^{1/q}.
\end{equation}
Since the operators $i_\mu$ and $i_\eta$ are (SOT-to-norm) sequentially continuous (see the proof of Theorem~\ref{theorem:equiv}), we have
$$
\lim_{n\to \infty} \Big(\int_{B_{X^*}}\|T_n(\cdot)\|_Y^p \, d\mu\Big)^{1/p} = \Big(\int_{B_{X^*}}\|T(\cdot)\|_Y^p \, d\mu\Big)^{1/p}
$$
and
$$
\lim_{n\to \infty} \Big(\int_{B_{E^*}}\|z_n^*(\cdot)\|_F^q \, d\eta\Big)^{1/q} = \Big(\int_{B_{E^*}}\|z^*(\cdot)\|_F^q \, d\eta\Big)^{1/q}.
$$
Moreover, $S$ is (SOT-to-norm) sequentially continuous by assumption~(d), so the sequence $(S(T_n))$ converges to~$S(T)$ in the norm topology.
Since $(z_n^*)$ is $w^*$-convergent to~$z^*$ (by~(a)), we conclude that
\begin{multline*}
|\langle S(T),z^*\rangle| =\lim_{n\to \infty}
|\langle S(T_n),z_n^*\rangle|
\\ \stackrel{\eqref{eqn:tothelimit}}{\leq}
K \Big(\int_{B_{X^*}}\|T(\cdot)\|_Y^p \, d\mu\Big)^{1/p} \cdot \Big(\int_{B_{E^*}}\|z^*(\cdot)\|_F^q \, d\eta\Big)^{1/q},
\end{multline*}
as we wanted. The proof is finished.
\end{proof}
\begin{remark}\label{remark:SOT-norm}
Statement (iv) in Theorem~\ref{theorem:equiv2} implies that $S_2$ is (SOT-to-norm) sequentially continuous (by Theorem~\ref{theorem:equiv}) and so is~$S$.
\end{remark}
\begin{corollary}\label{corollary:Kwapien}
Suppose that $Z^* \subseteq E \varepsilon F$. Let $U$ be a subspace of~$X\varepsilon Y$ and let $S:U\to Z$ be an operator.
Then the following statements are equivalent:
\begin{enumerate}
\item[(i)] $S$ is $(\ell^s_p,\ell^s_q)$-dominated.
\item[(ii)] There exist a constant $K\geq 0$ and measures $\mu \in P(B_{X^*})$ and $\eta \in P(B_{E^{*}})$ such that
$$
| \langle S(T), z^*\rangle| \leq K \Big(\int_{B_{X^*}}\|T(\cdot)\|^p_{Y} \, d\mu\Big)^{1/p}
\cdot
\Big(\int_{B_{E^*}}\| z^*(\cdot)\|^q_{F} \, d\eta \Big)^{1/q}
$$
for every $T \in U$ and every $z^* \in Z^*$.
\item[(iii)] There exist a Banach space $W$, an
$(\ell^s_p,\ell_p)$-summing operator $S_2:U\to W$ and an operator
$S_1:W\to Z$ with $(\ell^s_q,\ell_q)$-summing adjoint such that $S$ factors as~$S= S_1 \circ S_2$.
\end{enumerate}
\end{corollary}
\subseteqsection*{Acknowledgements}
The authors thank J.M. Calabuig and P. Rueda for valuable discussions at the early stage of this work.
Research partially supported by {\em Agencia Estatal de Investigaci\'{o}n} [MTM2017-86182-P to J.R.
and MTM2016-77054-C2-1-P to E.A.S.P., both grants cofunded by ERDF, EU]; and {\em Fundaci\'on S\'eneca} [20797/PI/18 to J.R.]
\end{document} |
\begin{document}
\newcommand{1707.05216}{1707.05216}
\renewcommand{035}{035}
\FirstPageHeading
\ShortArticleName{On Basic Fourier--Bessel Expansions}
\ArticleName{On Basic Fourier--Bessel Expansions}
\Author{Jos\'e Luis CARDOSO}
\AuthorNameForHeading{J.L.~Cardoso}
\Address{Mathematics Department, University of Tr\'as-os-Montes e Alto Douro (UTAD),\\ Vila Real, Portugal}
\Email{\href{mailto:jluis@utad.pt}{jluis@utad.pt}}
\ArticleDates{Received September 27, 2017, in final form April 11, 2018; Published online April 17, 2018}
\Abstract{When dealing with Fourier expansions using the third Jackson (also known as Hahn--Exton) $q$-Bessel function, the corresponding positive zeros $j_{k\nu}$ and the ``shifted'' zeros,~$qj_{k\nu}$, among others, play an essential role. Mixing classical analysis with $q$-analysis we were able to prove asymptotic relations between those zeros and the ``shifted'' ones, as well as the asymptotic behavior of the third Jackson $q$-Bessel
function when computed on the ``shifted'' zeros. A version of a~$q$-analogue of the Riemann--Lebesgue theorem within the scope of basic Fourier--Bessel expansions is also exhibited.}
\Keywords{third Jackson $q$-Bessel function; Hahn--Exton $q$-Bessel function; basic Fourier--Bessel expansions; basic hypergeometric function; asymptotic behavior; Riemann--Lebesgue theorem}
\Classification{42C10; 33D45; 33D15}
\section{Introduction}
When dealing with basic Fourier--Bessel expansions, due to convergence issues, it is crucial to know the asymptotic behavior of the third Jackson $q$-Bessel function when computed in its own shifted zeros. For this purpose, in the sequel of Rahman, as pointed out by Koelink and Swarttouw \cite[p.~696]{KS}, ``the intermingling of (ordinary) analysis and $q$-analysis may be fruitful''.
In the literature, the function $J_{\nu}^{(3)}(z;q)\equiv J_{\nu}(z;q)$, where $\nu$ and $q$ are parameters satisfying $\nu>-1$ and $0<q<1$, is usually identified by the third Jackson $q$-Bessel function or by the Hahn--Exton $q$-Bessel function:
\begin{gather}\label{def}
J_{\nu }^{(3)}(z;q)\equiv J_{\nu }(z;q):=z^{\nu }\frac{\big(q^{\nu +1};q\big)_{\infty }}{(q;q)_{\infty }} \sum\limits_{k=0}^{+\infty}(-1)^{k}\frac{q^{\frac{k(k+1)}{2}}}{\big(q^{\nu +1};q\big)_{k}(q;q)_{k}}z^{2k} .
\end{gather}
Using the basic hypergeometric representation \cite[p.~4]{GR} for ${_r}\phi_s$, it is very well known that (\ref{def}) can be written as
\begin{gather}\label{def2}
J_{\nu }(z;q):=z^{\nu }\frac{\big(q^{\nu +1};q\big)_{\infty }}{(q;q)_{\infty }} {_1}\phi_1\big(0;q^{\nu+1};q,qz^2\big) .
\end{gather}
In \cite{A2} it was shown, under some restrictions, that these functions are the only ones that satisfy a $q$-analogue of the Hardy result~\cite{Ha} about functions \emph{orthogonal with respect to their own zeros}.
We have the following limit
\begin{gather*}\lim_{q\rightarrow1}J_{\nu}\left(\frac{1-q}{2}x;q\right)=J_{\nu}(x) ,\end{gather*}
where $J_{\nu}(x)$ is the (classical) Bessel function of the first kind~\cite{Watson} of order~$\nu$,{\samepage
\begin{gather*} J_{\nu}(x):= \left(\frac{x}{2}\right)^{\nu} \sum_{k=0}^{+\infty}\frac{(-1)^k\big(\frac{x}{2}\big)^{2k}}{k!\Gamma(\nu+k+1)} ,\end{gather*}
which shows that $J_{\nu }(z;q)$ is a $q$-analogue of the Bessel function $J_{\nu}(z)$.}
Exton originally in \cite{E1978,E} and later Koelink and Swarttouw in \cite[Proposition~3.5, p.~696]{KS}, proved that the function~(\ref{def}) satisfies an orthogonality of the form
\begin{gather}\label{ort}
\int_{0}^{1}xJ_{\nu }\big(qj_{n\nu}x;q^{2}\big)J_{\nu}\big(qj_{m\nu}x;q^{2}\big){\rm d}_{q}x=\eta_{n,\nu}\delta _{n,m},\\
\eta_{n,\nu}\equiv\eta_{n,\nu}(q)= \int_{0}^{1}tJ_{\nu}^2\big(qj_{k\nu}t;q^{2}\big){\rm d}_{q}t ,\nonumber
\end{gather}
where $j_{n\nu}\equiv j_{n\nu}\big(q^2\big)$, with $j_{1\nu }(q^{2})<j_{2\nu }(q^{2})<\cdots$, represent the (ordered) positive zeros of $J_{\nu}\big(z;q^2\big)$ and the $q$-integral in the interval $[0,1]$ is the one introduced by Thomae (in~1869 and in~1870)
\begin{gather}\label{qintegral-0a}
\int_{0}^{1}f(t){\rm d}_{q}t:= (1-q ) \sum_{k=0}^{+\infty }f\big(q^{k}\big)q^{k} ,
\end{gather}
which was later generalized by Jackson to any interval $[a,b]$ (see \cite[p.~19]{GR}).
Using the definition~(\ref{qintegral-0a}) we may consider an inner product by setting
\begin{gather}\label{inner-product}
\langle f,g\rangle :=\int_{0}^{1}f(t)\overline{g(t)}{\rm d}_{q}t .
\end{gather}
The resulting Hilbert space is commonly denoted by $L_{q}^{2}[0,1]$, being this space a separable Hilbert space \cite{Ann,CP}, consisting of the (quotient) set of functions $f$ such that
\begin{gather*} \int_{0}^{1}|f(t)|^2{\rm d}_{q}t<+\infty .\end{gather*}
Abreu and Bustoz showed in \cite{AB} that the sequence $\{u_{k}\}_{k}$, where
\begin{gather*}
u_{k}(x)=\frac{x^{\frac{1}{2}}J_{\nu }\big(j_{k\nu }qx;q^{2}\big)}{\big\Vert x^{\frac{1}{2}}J_{\nu }\big(j_{k\nu }qx;q^{2}\big)\big\Vert } ,
\end{gather*}
define a \emph{complete} system in $L_{q}^{2}[0,1]$, meaning that, whenever a function $f$ is in $L_{q}^{2}[0,1]$, if
\begin{gather*} \int_{0}^{1}f(x)u_{k}(x){\rm d}_{q}x=0 , \qquad k=1,2,3,\ldots ,\end{gather*}
then $f \big(q^{k}\big)=0$, $k=0,1,2,\ldots$.
Regarding criteria for completeness in $L_{q}[0,1]$ see \cite{A}, with an interesting application to a~$q$-version of the uncertainty principle via the $q$-Hankel transform and a~completeness property of the third Jackson $q$-Bessel function.
Basic Fourier expansions were studied in \cite{BC,BS}, with respect to quadratic grids and to linear grids, respectively. For an overview over basic Fourier expansions see~\cite{S2003}. In~\cite{JLC,JLC2} were presented results regarding convergence issues concerning basic Fourier expansions involving the basic sine and cosine functions considered by Suslov~\cite{S}, which are equivalent to the ones introduced by Exton~\cite{E}. With properties connected to this or related functions we refer to~\cite{St}. For new definitions of $q$-exponential and $q$-trigonometric functions see~\cite{C}. Since we are using and proving some asymptotic results, we highlight~\cite{D} on this subject, where a~complete asymptotic expansion for the $q$-Pochhammer symbol (or, the infinite $q$-shifted factorial $(z;q)_{\infty}$) was exhibited. We also point out~\cite{SS2016} with an appendix where, among others, asymptotic results for the theta function, for the ${_1}\phi_1(0;\omega;q,z)$ function and for its derivative were presented. This last two ones, with an useful separation of the terms that \emph{increase} from the terms that \emph{decrease}, were crucial to establish our results of the third Jackson $q$-Bessel function.
Other publications also show estimates or inequalities involving the third Jackson $q$-Bessel function: for instance, equation~(3.2.14) of \cite{S-PhD} when $\nu=n$, equation~(2.4) of Proposition~2.1 from~\cite{KooS}, Lemma~1 from~\cite{BBEB} and~\cite{ESF} for the particular case of third Jackson $q$-Bessel function of order zero.
In our judgement the main results of this work are Theorems~\ref{asymptotic-derivativeJ3-at-zeros},~\ref{shiftedzero},
\ref{AsymptoticJ3-at-shifted-zeros} and~\ref{Riemann--Lebesgue-2}. We also emphasize Theorem~\ref{signderivative-J3-2} since it was decisive for the proof of Theorem~\ref{shiftedzero} and mostly because of its important Corollaries~\ref{monotonyJ3-1} and \ref{monotonyJ3-2}. We believe that all the results stated in Section~\ref{asymptotic-properties-J3} and Theorem \ref{Riemann--Lebesgue-2} of Section~\ref{Riemann--Lebesgue-theorem} are original.
To know the asymptotic behaviors given by Theorems \ref{asymptotic-derivativeJ3-at-zeros} and \ref{AsymptoticJ3-at-shifted-zeros}, are decisive to study convergence properties of the basic Fourier--Bessel expansions. This is an important issue and, in our opinion, it is the most relevant contribution of the present work.
Many questions concerning basic Fourier or basic Fourier--Bessel expansions can be raised: analogues of Dirichlet's kernel, Riemann--Lebesgue theorem, Dini's condition or summability (Fej\'er's theorem) and many other topics are open problems since some of the nice properties used in the corresponding proofs are no longer valid in the context of basic expansions. Regarding this, we approach these difficulties and push a little further towards a $q$-analogue of the Riemann--Lebesgue theorem.
The paper is organized as follows: in Section~\ref{def-pre} we collect the main definitions and preliminary results that were taken from other publications; in Section~\ref{asymptotic-properties-J3} we present some asymptotic behavior of the third Jackson $q$-Bessel function and of its derivative when computed at certain points. We also study the asymptotic behavior of the zeros $j_{m\nu}$ and their relations with the ``shifted'' zeros $qj_{m\nu}$ or $\frac{j_{m\nu}}{q}$, for large values of $m=1,2,3,\ldots$ and explore its consequences to obtain other results; we finish with Section~\ref{Riemann--Lebesgue-theorem} where an analogue of the Riemann--Lebesgue theorem concerning basic Fourier--Bessel expansion is proved.
\section{Definitions and preliminary results}\label{def-pre}
Fixing $0<q<1$ and following the standard notations of \cite{AAR,GR}, the $q$-shifted factorial for a~finite positive integer~$n$ is defined by
\begin{gather*}
(a;q)_{n}= ( 1-q ) ( 1-aq )\cdots\big(1-aq^{n-1}\big)
\end{gather*}
and the zero and infinite cases as
\begin{gather*}
(a;q)_{0}=1 ,\qquad (a;q)_{\infty }=\lim\limits_{n\rightarrow \infty }(a;q)_{n} .
\end{gather*}
The third Jackson $q$-Bessel function has a countable infinite number of real and simple zeros, as it was shown in~\cite{KS}. In \cite[Theorem~2.3]{ABC} it was proved the following theorem:
\begin{Theorem}\label{TheoremA} For every $q\in {}]0,1[$, $k_{0}\in N$ exists such that, if $k\geq k_{0}$ then
\begin{gather*}
j_{k\nu }=q^{-k+\epsilon _{k}^{(\nu )}(q^{2})} ,
\end{gather*}
with
\begin{gather*}
0<\epsilon _{k}^{(\nu )}\big(q^{2}\big)<\alpha _{k}^{(\nu )}\big(q^{2}\big) ,
\qquad \text{where} \quad
\alpha _{k}^{(\nu )}\big(q^{2}\big)=\frac{\log {\big( 1-q^{2(k+\nu)}/\big(1-q^{2k}\big)\big) }}{2\log q} .
\end{gather*}
\end{Theorem}
On this subject see \cite{AM} and \cite{SS2016}. The latter one improved the accuracy of the asymptotic expression for the zeros of the basic hypergeometric function ${_1}\phi_1\big(0;\omega;q,z\big)$, which appears in the definition~(\ref{def2}) of the Hahn--Exton $q$-Bessel function.
See also~\cite{H} where Hayman obtained an expression for the asymptotic behavior of the zeros of a certain class of entire functions, which perhaps may be extended to the third Jackson $q$-Bessel function.
Using Taylor expansion it can be shown that, as $k\rightarrow \infty$,
\begin{gather}\label{asymptoticbehavior}
\alpha _{k}^{(\nu )}\big(q^{2}\big)=\mathcal{O}\big(q^{2k}\big) .
\end{gather}
Formally, the $q$-Fourier Bessel series associated with a function $f$, by the orthogonal relation~(\ref{ort}), is defined by
\begin{gather*}
S_{q}^{\nu}[f](x):=\sum_{k=1}^{\infty }b_{k}^{(\nu)}\left(f\right)x^{\frac{1}{2}} J_{\nu}\big(qj_{k\nu }x;q^{2}\big) ,
\end{gather*}
with the coefficients $b_{k}^{(\nu)}$ given by
\begin{gather*}
b_{k}^{(\nu)}(f) =\frac{1}{\eta_{k,\nu}} \int_{0}^{1}t^{\frac{1}{2}}f(t)J_{\nu }\big(qj_{k\nu}t;q^{2}\big){\rm d}_{q}t ,
\end{gather*}
or, which we rather prefer,
\begin{gather}\label{q-FourierSeries}
S_{q}^{(\nu)}[f](x):=\sum_{k=1}^{+\infty }a_{k}^{(\nu)} (f ) J_{\nu}\big(qj_{k\nu }x;q^{2}\big) ,
\end{gather}
with the coefficients $a_{k}^{(\nu)}$ given by
\begin{gather}\label{q-FCoefficient}
a_{k}^{(\nu)}(f) =\frac{1}{\eta_{k,\nu}}\int_{0}^{1}tf(t)J_{\nu }\big(qj_{k\nu}t;q^{2}\big){\rm d}_{q}t
\end{gather}
and $\eta _{k,\nu}$ by
\begin{gather}
\int_{0}^{1}tJ_{\nu}^2\big(qj_{k\nu}t;q^{2}\big){\rm d}_{q}t = \frac{q-1}{2}q^{\nu -1}J_{\nu +1}\big(qj_{k\nu
};q^{2}\big)J_{\nu}^{\prime}\big(j_{k\nu };q^{2}\big) \nonumber\\
\hphantom{\int_{0}^{1}tJ_{\nu}^2\big(qj_{k\nu}t;q^{2}\big){\rm d}_{q}t }{} = \frac{q-1}{2j_{k\nu}}q^{\nu-2}J_{\nu}\big(qj_{k\nu};q^{2}\big)J_{\nu}^{\prime}\big(j_{k\nu};q^{2}\big) ,\label{eta}
\end{gather}
where the last equality can be derived from \cite[Proposition~3.5]{KS} or \cite[Proposition~5]{JLC3}.
The asymptotic behavior of the $q$-integral that appears in the Fourier coefficient~(\ref{q-FCoefficient}), as well as the asymptotic behavior (as $k\to\infty$) of the factors $J_{\nu}\big(qj_{k\nu};q^{2}\big)$ and $J_{\nu}^{\prime}\big(j_{k\nu};q^{2}\big)$ appearing in~$\eta _{k,\nu}$, are crucial for further developments related with convergence issues of the Fourier--Bessel expansion~(\ref{q-FourierSeries}). To study those behaviors will be the main purpose in this work.
Using the expansion obtained by Olde Daalhuis \cite[equation~(3.13), p.~905]{D} for the (infinite) $q$-shifted factorial (or $q$-Pochhammer symbol), \v{S}tampach and \v{S}\v{t}ov\'i\v{c}ek~\cite{SS2016} rewrote it in the following clearer form: considering the notation
\begin{gather}\label{A.1}
\tilde{q}=e^{\frac{4\pi^2}{\ln{(q)}}} ,\qquad\beta(z)=\frac{\pi\ln{(z)}}{\ln{(q)}}
\end{gather}
and
\begin{gather}\label{A.2}
A(z)=2q^{-\frac{1}{12}}\sqrt{z}e^{-\frac{\ln^2{(z)}}{2\ln{(q)}}+\frac{\pi^2}{3\ln{(q)}}}
\big|\big(\tilde{q}e^{2i\beta(z)};\tilde{q}\big)_{\infty}\big|^2
\end{gather}
then
\begin{gather*}
(z;q)_{\infty}=\frac{A(z)}{\big(\frac{q}{z};q\big)_{\infty}}\sin{(\beta(z))},
\end{gather*}
where $z>0$.
Using a symmetric relation \cite[equation~(2.3), p.~448]{KooS} satisfied by the basic function ${_1}\phi_1(0;\omega;q,z)$, Olde Daalhuis \cite[pp.~907--908]{D} describes briefly how to obtain an asymptotic expansion for the function
\begin{gather*}J_{\nu}\big(z;q^{2}\big)=z^{\nu}\frac{(q^{\nu+1};q)_{\infty}}{(q;q)_{\infty}}
{_1}\phi_1\big(0;q^{2(\nu+1)};q,qz^2\big).\end{gather*}
Later, \v{S}tampach and \v{S}\v{t}ov\'i\v{c}ek \cite{SS2016}, proved the following theorem which displays an asymptotic behavior for the function ${_1}\phi_1(0;\omega;q,z)$ and for its derivative, as $z\to\infty$.
\begin{Theorem} \label{TheoremB} Let $K(z):=\big[\frac 1 2-\frac{\ln{(z)}}{\ln{(q)}}\big]$ where $[x]$ represents the integer part of $x\in\mathbb{R}$. With the notation \eqref{A.1}, \eqref{A.2} and assuming that $0\leq \omega<1$, there exist functions $B(\omega,z)$ and $C(\omega,z)$ such that
\begin{gather*}{_1}\phi_1(0;\omega;q,z)=\frac{B(\omega,z)}{(\omega;q)_{\infty}}\\
\qquad{} \times \left(A(z)\sin{(\beta(z))}+(-1)^{K(z)+1}q^{\frac{(K(z)+1)K(z)}{2}}
\omega^{K(z)+1}\frac{\big(q^{K(z)+1}z;q\big)_{\infty}}{(q;q)_{\infty}}C(\omega,z)\right),
\end{gather*}
where, for $\omega$ fixed, $B(\omega,z)=1+O\big(z^{-1}\big)$, $C(\omega,z)=1+O\big(z^{-1}\big)$ as $z\to+\infty$.
\end{Theorem}
\begin{Theorem} \label{TheoremC} Under the same assumptions of the previous theorem,
\begin{gather*}
\frac{\partial {_1}\phi_1(0;\omega;q,z)}{\partial z} = \frac{A(z)}{(\omega;q)_{\infty}z} \left(\left(-\frac{\beta(z)}{\pi}+\frac{1}{2}\right)\sin{(\beta(z))}+ \frac{\pi}{\ln{(q)}}\cos{(\beta(z))} \right. \\
\left.\hphantom{\frac{\partial {_1}\phi_1(0;\omega;q,z)}{\partial z} =}{} +\frac{8\pi}{\ln{(q)}}\sum_{k=1}^{\infty} \frac{\tilde{q}^k}{\big|1-\tilde{q}^ke^{-2i\beta(z)}\big|^2} \sin^2{(\beta(z))}\cos{(\beta(z))}+O\left(\frac{\ln{(z)}}{z}\right)\right),
\end{gather*}
as $z\to+\infty$.
\end{Theorem}
\section[Asymptotic properties of the function $J_{\nu }\big(z;q^2\big)$ and its derivative]{Asymptotic properties of the function $\boldsymbol{J_{\nu }\big(z;q^2\big)}$\\ and its derivative}\label{asymptotic-properties-J3}
Theorem~2 of \cite[p.~12]{JLC3},
\begin{gather*}\big| J_{\nu }\big(qj_{k\nu};q\big)\big|\leq A_{\nu}(q)
q^{-\big(k+\frac{\nu-2}{2}-\epsilon_k^{(\nu)}\big)^2} , \qquad \text{where} \quad A_{\nu}(q)>0 ,\end{gather*}
establishes a superior bound for the asymptotic behavior of $J_{\nu}\big(qj_{k\nu};q^{2}\big)$ as $k\to\infty$.
We notice that this bound can be enlarged for the cases{\samepage
\begin{gather*}
\big| J_{\mu }\big(qj_{k\nu};q\big)\big|\leq B_{\mu}(q)
q^{-\big(k+\frac{\mu-3}{2}-\epsilon_k^{(\nu)}\big)^2} , \qquad \text{where} \quad B_{\mu}(q)
=\frac{q^{\frac{\mu}{2}\big(\frac{\mu}{2}-1\big)}}{\big(1-q^2\big)\big(q^2;q^2\big)^2_{\infty}} .
\end{gather*}
Its prove is essentially coincident with the corresponding one of the \cite{JLC3} so we omit it.}
However, at least when $\mu=\nu$ or $\mu=\nu+1$, the above estimate for $J_{\mu}\big(qj_{k\nu};q\big)$ does not seem accurate. As a matter of fact, by Theorem~\ref{TheoremA} and (\ref{asymptoticbehavior}), as $k\to\infty$, the product $qj_{k\nu}$ is ``closed'' to the positive zero $ j_{k-1,\nu}$ of the function $J_{\nu }\big(z;q^2\big)$ and, as a consequence, we expect $J_{\nu}\big(qj_{k\nu};q^{2}\big)$ to approach zero. So we look for a better bound when $\mu=\nu$ or $\mu=\nu+1$.
Because of the basic hypergeometric representation~(\ref{def2}), in order to keep the results more general and applicable to other situations, at the final of this section we present a subsection with the corresponding main results for the function ${_1}\phi_1(0;\omega;q,z)$.
\subsection[Asymptotic properties of $J_{\nu}^{\prime}\big(z;q^2\big)$]{Asymptotic properties of $\boldsymbol{J_{\nu}^{\prime}\big(z;q^2\big)}$}
During this subsection and to avoid any confusion, most of the times we prefer to use $\frac{\partial J_{\nu }(z;q^2)}{\partial z}$ rather than $J_{\nu}^{\prime}\big(z;q^2\big)$.
\begin{Theorem}\label{signderivative-J3-2}
Let $\{\theta_m\}_m$ be a sequence such that $0\leq\theta_m<1$ for $m=1,2,3,\ldots$:
\begin{enumerate}\itemsep=0pt
\item[$(i)$] if $\lim\limits_{m\to\infty}m\theta_m=0$ then $\operatorname{sgn}\Big(\frac{\partial J_{\nu}(z;q^2)}{\partial z}_{\big|z=q^{-m+\theta_m}}\Big)=(-1)^{m}$;
\item[$(ii)$] if $\lim\limits_{m\to\infty}m\theta_m=\infty$ then $\operatorname{sgn}\Big(\frac{\partial J_{\nu }(z;q^2)}{\partial z}_{\big|z=q^{-m+\theta_m}}\Big)=(-1)^{m-1}$, being both signs valid for large values of~$m$.
\end{enumerate}
\end{Theorem}
\begin{proof} By the definition of the Hahn--Exton $q$-Bessel function~(\ref{def}) we may write
\begin{gather*}
J_{\nu}(z;q^2)= \frac{\big(q^{2(\nu+1)};q^2\big)_{\infty}}{\big(q^2;q^2\big)_{\infty}}z^{\nu}{_1}\phi_1\big(0;q^{2(\nu+1)};q^2,q^2z^2\big).
\end{gather*}
Computing its derivative one gets
\begin{gather*}
\frac{\partial J_{\nu }\big(z;q^2\big)}{\partial z}= \frac{\big(q^{2(\nu+1)};q^2\big)_{\infty}}{\big(q^2;q^2\big)_{\infty}} \\
\hphantom{\frac{\partial J_{\nu }\big(z;q^2\big)}{\partial z}=}{}\times
\left(\nu z^{\nu-1}{_1}\phi_1\big(0;q^{2(\nu+1)};q^2,q^2z^2\big)+ 2q^2z^{\nu+1}\frac{\partial {_1}\phi_1\big(0;q^{2(\nu+1)};q^2,y\big)}{\partial y}_{\big|y=q^{2}z^2}\right).
\end{gather*}
Now, by Theorems~\ref{TheoremB} and~\ref{TheoremC}, with the notation $\omega=q^{2(\nu+1)}$ and~(\ref{A.1}), (\ref{A.2}) with $q$ shifted to $q^2$, as $z\to+\infty$, we have
\begin{gather}
\frac{\partial J_{\nu }\big(z;q^2\big)}{\partial z}\equiv J_{\nu}^{\prime}\big(z;q^2\big)
=\frac{z^{\nu-1}}{(q^2;q^2)_{\infty}}\Bigg\{A\big(q^2z^2\big)\Bigg\{\left(\nu B\big(\omega,q^2z^2\big)-
\frac{2}{\pi}\beta\big(q^2z^2\big)+1\right)\nonumber\\
\hphantom{\frac{\partial J_{\nu }\big(z;q^2\big)}{\partial z}\equiv}{} \times \sin \big(\beta\big(q^2z^2\big)\big)+\frac{\pi}{\ln{q}}\cos{\big(\beta\big(q^2z^2\big)\big)} \nonumber\\
\hphantom{\frac{\partial J_{\nu }\big(z;q^2\big)}{\partial z}\equiv}{} +\frac{8\pi}{\ln{q}}\!\sum_{k=1}^{\infty}\!\frac{
\tilde{q}^k}{\big|1-\tilde{q}^ke^{-2i\beta(q^2z^2)}\big|^2}
\sin^2{\big(\beta\big(q^2z^2\big)\big)}\cos{\big(\beta\big(q^2z^2\big)\big)}+
O\!\left(\frac{\ln\big(q^2z^2\big)}{q^2z^2}\right)\!\Bigg\}\!\nonumber \\
\hphantom{\frac{\partial J_{\nu }\big(z;q^2\big)}{\partial z}\equiv}{}
+(-1)^{K\big(q^2z^2\big)+1} q^{\big(K\big(q^2z^2\big)+1\big)K\big(q^2z^2\big)} \omega^{K\big(q^2z^2\big)+1}\nu B\big(\omega,q^2z^2\big)\nonumber\\
\hphantom{\frac{\partial J_{\nu }\big(z;q^2\big)}{\partial z}\equiv}{}
\times \frac{\big(q^{2K(q^2z^2)+4}z^2;q^2\big)_{\infty}}{\big(q^2;q^2\big)_{\infty}}C\big(\omega,q^2z^2\big)\Bigg\} .\label{derivative-J3}
\end{gather}
Taking into account that, shifting $q$ to $q^2$ and putting $z=q^{-m+\theta_m}$ for $m\in\mathbb{N}$, $\beta\big(q^2z^2\big)=(-m+1+\theta_m)\pi$, $K\big(q^2z^2\big)=\big[m-\frac{1}{2}-\theta_m\big]$, identity~(\ref{derivative-J3}) gives, as $m\to\infty$,
\begin{gather}
\frac{\partial J_{\nu }\big(z;q^2\big)}{\partial z}_{\big|z=q^{-m+\theta_m}}=
\frac{q^{(-m+\theta_m)(\nu-1)}}{\big(q^2;q^2\big)_{\infty}}\Bigg\{A\big(q^{2-2m+2\theta_m}\big)(-1)^{m-1} \nonumber\\
\qquad {} \times
\Bigg\{\big(\nu B\big(\omega,q^{2-2m+2\theta_m}\big)+2m-1-2\theta_m\big)
\sin{(\pi\theta_m)}+\frac{\pi}{\ln{q}}\cos{(\pi\theta_m)}\nonumber\\
\qquad{} +\frac{8\pi}{\ln{q}} \sum_{k=1}^{\infty}\frac{\tilde{q}^k}{\big|1-\tilde{q}^ke^{-2i\theta_m\pi}\big|^2}
\sin^2{ (\pi\theta_m )}\cos{ (\pi\theta_m )}
+O\left(\frac{\ln\big(q^{2-2m+2\theta_m}\big)}{q^{2-2m+2\theta_m}}\right)\Bigg\}\nonumber\\
\qquad {} +(-1)^{[m-\frac{1}{2}-\theta_m]+1}q^{([m-\frac{1}{2}-\theta_m]+1)[m-\frac{1}{2}-\theta_m]}
\omega^{[m-\frac{1}{2}-\theta_m]+1}\nonumber \\
\qquad{} \times\nu B\big(\omega,q^{2-2m+2\theta_m}\big)\frac{\big(q^{2[m-\frac{1}{2}-\theta_m]+4-2m+2\theta_m};q^2\big)_{\infty}}{\big(q^2;q^2\big)_{\infty}}
C\big(\omega,q^{2-2m+2\theta_m}\big)\Bigg\} .\label{derivativeJ3-at-q}
\end{gather}
We notice that, for large values of $m$,
\begin{gather*} A\big(q^{-m+\theta_m}\big)=2 q^{-\frac{(m+1)m}{2}+m\theta_m+\frac{\theta_m(1-\theta_m)}{2}+\frac{\pi^2}{3\ln{(q)}}-\frac{1}{12}}
\big|\big(e^{2i\pi\theta_m}e^{\frac{4\pi^2}{\ln{(q)}}}; e^{\frac{4\pi^2}{\ln{(q)}}}\big)_{\infty}\big|^2,
\end{gather*} hence
\begin{gather}
A\big(q^{2-2m+2\theta_m}\big) = 2q^{-m(m-1)+2(m-1)\theta_m+\theta_m(1-\theta_m)+\frac{2\pi^2}{3\ln{(q)}}-\frac{1}{6}}
\big|\big(e^{2i\pi\theta_m}e^{\frac{2\pi^2}{\ln{(q)}}};
e^{\frac{2\pi^2}{\ln{(q)}}}\big)_{\infty}\big|^2>0\!\!\!\!\label{Aq2}
\end{gather}
and, by Theorem~\ref{TheoremB}, $B(\omega,z)=1+O\big(z^{-1}\big)$, $C(\omega,z)=1+O\big(z^{-1}\big)$ as $z\to+\infty$. We also
note that $\big[m-\frac{1}{2}-\theta_m\big]$ equals $m-1$ or $m-2$.
Now, from (\ref{derivativeJ3-at-q}), we conclude the following:
On one hand, if $\lim\limits_{m\to\infty}m\theta_m=0$ then, as $m\to\infty$, the dominant term of the sign of~(\ref{derivativeJ3-at-q}) is
$(-1)^{m-1}\frac{\pi}{\ln{(q)}}\cos{(\pi\theta_m)}$, with $\frac{\pi}{\ln{(q)}}<0$. This proves part~(i) of the theorem.
On the other hand, if $\lim\limits_{m\to\infty}m\theta_m=\infty$ then, as $m\to\infty$, the dominant term for the sign turns to be
$(-1)^{m-1}(2m-1-2\theta_m)\sin{(\pi\theta_m)}$, which proves part~(ii).
\end{proof}
\begin{Remark}The assumption of Theorem \ref{signderivative-J3-2} requiring that $0\leq\theta_m<1$ for $m=1,2,3,\ldots$, can be weakened
(with minor changes in the corresponding proof) to $0\leq\theta_m<1$ for sufficient large values of~$m$.
Also, when the sequence $\{m\theta_m\}_{m}$ converges to a strictly positive real number, or when it is a bounded but not convergent sequence, then it is also possible to state conditions in order to obtain conclusions.
\end{Remark}
We notice that if $\{\theta_m^*\}_m$ is any sequence which satisfies $0<\theta_m^*<1$ for all $m=1,2,3,\ldots$ and $\lim\limits_{m\to\infty}m\theta_m^*=0$ then, by (\ref{derivativeJ3-at-q}), we conclude that part~(i) of the Theorem~\ref{signderivative-J3-2} remains true for every other sequence $\{\gamma_m\}_m$ such that $0<\gamma_m\leq\theta_m^*$. This implies the next result.
\begin{Corollary}\label{monotonyJ3-1} Let $\{\theta_m^*\}_m$, with $0<\theta_m^*<1$, be a sequence such that $\lim\limits_{m\to\infty}m\theta_m^*=0$. Then, for large values of $m$, the sign of $\frac{\partial J_{\nu }(z;q^2)}{\partial z}$ remains constant in each interval $\big]q^{-m+\theta_m^{*}},q^{-m}\big[$.
\end{Corollary}
In particular, because of (\ref{asymptoticbehavior}), it follows immediately the following corollary.
\begin{Corollary}\label{monotonyJ3-2}
Considering $\theta_m^*=\alpha_m^{(\nu)}$, for $m=1,2,3,\ldots$, of Theorem~{\rm \ref{TheoremA}} then, for large values of~$m$,
the sign of $\frac{\partial J_{\nu }(z;q^2)}{\partial z}$ remains constant in each interval $\big]q^{-m+\alpha_m^{(\nu)}},q^{-m}\big[$.
\end{Corollary}
We end this subsection with the following theorem.
\begin{Theorem}\label{asymptotic-derivativeJ3-at-zeros}
For large values of $m$,
\begin{gather*}\frac{\partial J_{\nu}\big(z;q^2\big)}{\partial z}_{\big|z=j_{m\nu}}
\equiv J_{\nu}^{\prime}\big(j_{m\nu};q^{2}\big)=
O\big(q^{-m(m+\nu-2)}\big) ,\qquad \text{as}\quad m\to\infty .\end{gather*}
\end{Theorem}
\begin{proof}
By Theorem~\ref{TheoremA}, consider $j_{m\nu}=q^{-m+\epsilon_m^{(\nu)}}$, where $0<\epsilon_m^{(\nu)}<\alpha_m^{(\nu)}$, and replace $\theta_m$ by~$\epsilon_m^{(\nu)}$ in~(\ref{derivativeJ3-at-q}) and~(\ref{Aq2}).
By (\ref{asymptoticbehavior}) we have $\lim\limits_{m\to\infty}m\alpha_m^{(\nu)}=0$ hence, by Theorem~\ref{TheoremA}, we also have $\lim\limits_{m\to\infty}m\epsilon_m^{(\nu)}=0$. Furthermore, taking into consideration that
\begin{gather}\label{Aq2epsilon}
A\big(q^{2-2m+2\epsilon_m^{(\nu)}}\big)=2q^{-m(m-1)+2(m-1)\epsilon_m^{(\nu)}+\epsilon_m^{(\nu)}(1-\epsilon_m^{(\nu)})+
\frac{2\pi^2}{3\ln{(q)}}-\frac{1}{6}}\big|\big(e^{2i\pi\epsilon_m^{(\nu)}}e^{\frac{2\pi^2}{\ln{(q)}}};
e^{\frac{2\pi^2}{\ln{(q)}}}\big)_{\infty}\big|^2
\end{gather}
then, for large values of $m$, the resulting dominant term of $J_{\nu}^{\prime}\big(j_{m\nu};q^{2}\big)$ from~(\ref{derivativeJ3-at-q}) is
\begin{gather}\label{dominant-term}
\frac{q^{(-m+\epsilon_m^{(\nu)})(\nu-1)}}{\big(q^2;q^2\big)_{\infty}}A\big(q^{2-2m+2\epsilon_m^{(\nu)}}\big)
(-1)^{m-1}\frac{\pi}{\ln{q}}\cos{\big(\pi\epsilon_m^{(\nu)}\big)} .
\end{gather}
Introducing (\ref{Aq2epsilon}) into (\ref{dominant-term}) and, again, using $\lim_{m\to\infty}m\alpha_m^{(\nu)}=0$, then we immediately conclude that
\begin{gather*}
\frac{\partial J_{\nu}\big(z;q^2\big)}{\partial z}_{\big|z=j_{m\nu}}\equiv
J_{\nu}^{\prime}\big(j_{m\nu};q^{2}\big)=O\big(q^{-m(m+\nu-2)}\big) ,\qquad \text{as}\quad m\to\infty .\tag*{\qed}
\end{gather*}\renewcommand{\qed}{}
\end{proof}
\subsection[Behavior of $J_{\nu }\big(qj_{k\nu};q^2\big)$]{Behavior of $\boldsymbol{J_{\nu }\big(qj_{k\nu};q^2\big)}$}
We begin this subsection by quoting the following theorem, where $j_{k\nu}$, $\epsilon_{k}^{(\nu)}\equiv\epsilon_{k}^{(\nu)}\big(q^2\big)$ and $\alpha_{k}^{(\nu)}\equiv\alpha_{k}^{(\nu)}\big(q^2\big)$ respects the notations of~(\ref{ort}) and Theorem~\ref{TheoremA}.
\begin{Theorem}\label{shiftedzero}
For large values of $k$,
\begin{gather*}qj_{k\nu}\in {}\big ] j_{k-1,\nu} , q^{-k+1} \big[ .\end{gather*}
\end{Theorem}
\begin{proof}From (\ref{ort}) with $m=n=k$ and by~(vii) of Proposition~5 \cite[p.~330]{JLC3}, we get
\begin{gather}\label{Ineq1}
J_{\nu}\big(qj_{k\nu};q^2\big)J_{\nu}^{\prime}\big(j_{k\nu};q^2\big)<0 .
\end{gather}
However, by (\ref{asymptoticbehavior}),
$\lim\limits_{k\to\infty}k\alpha_k^{(\nu)}=0$, hence one may
conclude, by Theorem \ref{signderivative-J3-2} and Corollary
\ref{monotonyJ3-2}, that the sign of
\begin{gather*}
\frac{\partial J_{\nu}\big(z;q^2\big)}{\partial z}= J_{\nu}^{\prime}\big(z;q^2\big)
\end{gather*}
in the interval $\big]q^{-k+\alpha_k^{(\nu)}},q^{-k}\big[$ is the opposite to the sign in $\big]q^{-k+1+\alpha_{k-1}^{(\nu)}},q^{-k+1}\big[$, for large values of~$k$.
Thus, for large values of $k$, by Theorem~\ref{TheoremA},
\begin{gather}\label{Ineq2}
J_{\nu}^{\prime}\big(j_{k\nu};q^2\big)J_{\nu}^{\prime}\big(j_{k-1,\nu};q^2\big)<0 .
\end{gather}
Using, now, (\ref{Ineq1}) and (\ref{Ineq2}) we may write, for large values of $k$,
\begin{gather*}
J_{\nu}\big(qj_{k\nu};q^2\big)J_{\nu}^{\prime}\big(j_{k-1,\nu};q^2\big)>0 .
\end{gather*}
This guarantees that, for large values of $k$,
\begin{gather*}
qj_{k,\nu}>j_{k-1,\nu} ,
\end{gather*}
which proves the theorem since, trivially by Theorem~\ref{TheoremA},
$qj_{k,\nu}=q^{1-k+\epsilon_k^{(\nu)}}<q^{1-k}$.
\end{proof}
The following corollaries are immediate consequences of the previous theorem.
\begin{Corollary} For large values of $k$, the sequence $\big\{\epsilon_{k}^{(\nu)}\big\}_{k}$ that appears in Theorem~{\rm \ref{TheoremA}} is strictly decreasing, i.e., there exists a positive integer $k_0$ such that $\epsilon_{k+1}^{(\nu)}<\epsilon_{k}^{(\nu)}$ whenever $k\geq k_0$.
\end{Corollary}
\begin{proof} The previous theorem guarantees that, for large values of $k$, $qj_{k\nu}>j_{k-1,\nu}$, which is equivalent to $q^{1-k+\epsilon_k^{(\nu)}}>q^{1-k+\epsilon_{k-1}^{(\nu)}}$, hence $\epsilon_k^{(\nu)}<\epsilon_{k-1}^{(\nu)}$ for large values of $k$.
\end{proof}
\begin{Corollary} For large values of $k$,
\begin{gather*}\frac{j_{k\nu}}{q}\in {}\big] q^{-k-1+\alpha_{k+1}^{(\nu)}} , j_{k+1,\nu} \big[ .\end{gather*}
\end{Corollary}
\begin{proof}
According to Theorem~\ref{TheoremA} we have both $j_{k\nu}= q^{-k+\epsilon_k^{(\nu)}}$ and $j_{k+1,\nu} = q^{-k-1+\epsilon_{k+1}^{(\nu)}}$. Therefore, $\frac{j_{k\nu}}{q}= q^{-k-1+\epsilon_{k}^{(\nu)}}$. However, since by the previous corollary, there exists a positive integer $k_0$ such that $\epsilon_{k+1}^{(\nu)}<\epsilon_{k}^{(\nu)}$ whenever $k\geq k_0$, then $q^{\epsilon_k^{(\nu)}}<q^{\epsilon_{k+1}^{(\nu)}}$ whenever $k\geq k_0$, hence it follows that $\frac{j_{k\nu}}{q}<j_{k+1,\nu}$ and, by Theorem~\ref{TheoremA}, $\frac{j_{k\nu}}{q}>q^{-k-1+\alpha_k^{(\nu)}}$, both for large values of $k$.
\end{proof}
We now prove the following theorem.
\begin{Theorem}\label{AsymptoticJ3-at-shifted-zeros} For large values of $k$,
\begin{gather*}\big|J_{\nu}\big(qj_{k\nu};q^2\big)\big|\leq
\frac{\big({-}q^2,-q^{2(\nu+1)};q^2\big)_{\infty}}{\big(q^2;q^2\big)_{\infty}} q^{(k+\nu)(k-1)} .\end{gather*}
\end{Theorem}
\begin{proof}On one hand, being $j_{k\nu}$, for $k=1,2,3,\ldots$, the positive zeros of the Hahn--Exton $q$-Bessel function, we have
\begin{gather}\label{1}
J_{\nu}\big(j_{k-1,\nu};q^2\big)=0, \qquad k=2,3,4,\ldots .
\end{gather}
On the other hand, by \cite[equation~(12), p.~1205]{BBEB},
\begin{gather}\label{2}
\big|J_{\nu}\big(q^{-k+1};q^2\big)\big|\leq \frac{\big({-}q^2,-q^{2(\nu+1)};q^2\big)_{\infty}}{\big(q^2;q^2\big)_{\infty}} q^{(k+\nu)(k-1)} .
\end{gather}
This last result was first presented in \cite{KooS} and it can also be obtained in an equivalent form using directly Theorem~\ref{TheoremB}.
Notice that, by Theorem~\ref{TheoremA}, $j_{k-1,\nu}=q^{-k+1+\epsilon_{k-1}^{(\nu)}}$ where, as a~consequence of~(\ref{asymptoticbehavior}), $\lim\limits_{k\to\infty}(k-1)\epsilon_{k-1}^{(\nu)}=0$. Thus, by Corollary~\ref{monotonyJ3-1}, $J_{\nu}\big(z;q^2\big)$ is strictly monotone in each interval $]j_{k-1,\nu} , q^{-k+1}[$, for large values of~$k$. Now, since by Theorem~\ref{shiftedzero}, $qj_{k,\nu}\in {}]j_{k-1,\nu} , q^{-k+1}[$, then, using~(\ref{1}) and~(\ref{2}), the theorem follows.
\end{proof}
\subsection[Corresponding properties for the function ${_1}\phi_1(0;\omega;q,z)$]{Corresponding properties for the function $\boldsymbol{{_1}\phi_1(0;\omega;q,z)}$}
\begin{Theorem}Let $\omega$ be fixed in $[0,1[$ and $\{\tau_m\}_m$ be a sequence such that $0\leq\tau_m<1$ for $m=1,2,3,\ldots$:
\begin{enumerate}\itemsep=0pt
\item[$(i)$] if $\lim\limits_{m\to\infty}m\tau_m=0$ then $\operatorname{sgn}\Big(\frac{\partial {_1}\phi_1(0;\omega;q,z)}{\partial
z}_{\big|z=q^{-m+\tau_m}}\Big)=(-1)^{m+1}$;
\item[$(ii)$] if $\lim\limits_{m\to\infty}m\tau_m=\infty$ then $\operatorname{sgn}\Big(\frac{\partial {_1}\phi_1(0;\omega;q,z)}{\partial z}_{\big|z=q^{-m+\tau_m}}\Big)=(-1)^{m}$, being both signs valid for large values of~$m$.
\end{enumerate}
\end{Theorem}
\begin{proof}
Considering $z=q^{-m+\tau_m}$ in (\ref{A.1}) and (\ref{A.2}) one obtains, respectively,
\begin{gather*}
\beta\big(q^{-m+\tau_m}\big)=(-m+\tau_m)\pi
\end{gather*}
and
\begin{gather*}
A\big(q^{-m+\tau_m}\big)=2 q^{-\frac{(m+1)m}{2}+m\tau_m+\frac{\tau_m(1-\tau_m)}{2}+\frac{\pi^2}{3\ln{(q)}}-\frac{1}{12}}
\big|\big(e^{2i\pi\tau_m}e^{\frac{4\pi^2}{\ln{(q)}}};e^{\frac{4\pi^2}{\ln{(q)}}}\big)_{\infty}\big|^2.
\end{gather*}
Then, Theorem~\ref{TheoremC} enables one to write
\begin{gather*}
\frac{\partial {_1}\phi_1(0;\omega;q,z)}{\partial z}_{\big|z=q^{-m+\tau_m}} =C_q(\omega)q^{-\frac{m(m-1)}{2}+(m-1)\tau_m+\frac{(\tau_m+1)\tau_m}{2}}\\
\qquad{} \times\Bigg\{\left(m-\tau_m+\frac{1}{2}\right)\sin{(\pi(-m+\tau_m))}+\frac{\pi}{\ln{(q)}}\cos{(\pi(-m+\tau_m))} \\
\qquad{} +\frac{8\pi}{\ln{(q)}} \sum_{k=1}^{\infty}
\frac{\tilde{q}^k}{\big|1-\tilde{q}^ke^{-2i (\pi(-m+\tau_m) )}\big|^2}\sin^2{ (\pi(-m+\tau_m) )}\cos{ (\pi(-m+\tau_m) )} \\
\qquad{} +O\left(\frac{\ln{(q^{-m+\tau_m})}}{q^{-m+\tau_m}}\right)\Bigg\}
\end{gather*}
as $m\to+\infty$, or equivalently,
\begin{gather}
\frac{\partial {_1}\phi_1(0;\omega;q,z)}{\partial z}_{\big|z=q^{-m+\tau_m}} =
C_q(\omega)q^{-\frac{m(m-1)}{2}+(m-1)\tau_m+\frac{(\tau_m+1)\tau_m}{2}}(-1)^m \nonumber\\
\hphantom{\frac{\partial {_1}\phi_1(0;\omega;q,z)}{\partial z}_{\big|z=q^{-m+\tau_m}} =}{} \times\Bigg\{\left(m-\tau_m+\frac{1}{2}\right) \sin{(\pi\tau_m)}+ \frac{\pi}{\ln{(q)}}\cos{(\pi\tau_m)} \nonumber\\
\hphantom{\frac{\partial {_1}\phi_1(0;\omega;q,z)}{\partial z}_{\big|z=q^{-m+\tau_m}} =}{}
+\frac{8\pi}{\ln{(q)}}\sum_{k=1}^{\infty}\frac{\tilde{q}^k}{\big|1-\tilde{q}^ke^{-2i\pi\tau_m}\big|^2}
\sin^2{(\pi\tau_m)}\cos{(\pi\tau_m)} \nonumber\\
\hphantom{\frac{\partial {_1}\phi_1(0;\omega;q,z)}{\partial z}_{\big|z=q^{-m+\tau_m}} =}{} +O\left(\frac{\ln{\big(q^{-m+\tau_m}\big)}}{q^{-m+\tau_m}}\right)\Bigg\}\label{B1}
\end{gather}
as $m\to+\infty$, where $C_q(\omega)>0$.
On one hand we have that, if $\lim\limits_{m\to\infty}m\tau_m=0$ then, as $m\to\infty$, the dominant term of the sign of (\ref{B1}) is $(-1)^m\frac{\pi}{\ln{(q)}}\cos{(\pi\tau_m)}$, with $\frac{\pi}{\ln{(q)}}$ negative. This proves part (i) of the theorem.
On the other hand, if $\lim\limits_{m\to\infty}m\tau_m=\infty$ then, as $m\to\infty$, the dominant term turns to be $(-1)^m\big(m-\tau_m+\frac{1}{2}\big)\sin{(\pi\tau_m)}$. This proves part (ii) of the theorem.
\end{proof}
We notice that if $\{\tau_m\}_m$ is any sequence satisfying the condition (i) of the previous theorem then, by (\ref{B1}), the same conclusion of part~(i) remains true for any other sequence~$\{\gamma_m\}_m$ such that $0\leq \gamma_m\leq\tau_m$. This implies the next result.
\begin{Corollary}
Let $\{\tau_m\}_m$ be a sequence such that $\lim\limits_{m\to\infty}m\tau_m=0$. Then, for large values of~$m$, the sign of
\begin{gather*}\frac{\partial
{_1}\phi_1(0;\omega;q,z)}{\partial z}\end{gather*} remains constant in each interval $]q^{-m+\tau_m},q^{-m}[$.
\end{Corollary}
\section[$q$-analogue of the Riemann--Lebesgue theorem]{$\boldsymbol{q}$-analogue of the Riemann--Lebesgue theorem}\label{Riemann--Lebesgue-theorem}
Following the framework of \cite{CP}, we rewrite the system $\{u_m\}_{m}$ mentioned in the introduction as
\begin{gather*}u_{m}(x)=\frac{x^{\frac{1}{2}}J_{\nu }\big(j_{m\nu }qx;q^{2}\big)}{\big\Vert x^{\frac{1}{2}}J_{\nu }\big(j_{m\nu}qx;q^{2}\big)\big\Vert_{L_q^{2}[0,1]}} ,
\end{gather*}
where, by (\ref{eta}),
\begin{gather*}
\eta_{m,\nu} = \big\Vert x^{\frac{1}{2}}J_{\nu}\big(j_{m\nu}qx;q^{2}\big)\big\Vert_{L_q^{2}[0,1]}^2
= \int_{0}^{1}xJ_{\nu}^2\big(qj_{m\nu}x;q^{2}\big){\rm d}_{q}x \\
\hphantom{\eta_{m,\nu}}{} = \frac{q-1}{2j_{m\nu}}q^{\nu-2}J_{\nu}\big(qj_{m\nu};q^{2}\big)J_{\nu}^{\prime}\big(j_{m\nu};q^{2}\big).
\end{gather*}
The sequence $\{u_m\}_{m}$ defines a system of functions which is orthonormal with respect to the inner product defined in the $L_q^{2}[0,1]$ space by~(\ref{inner-product}) and with the norm $\Vert \cdot\Vert_{L_q^{2}[0,1]}$ induced by it.
In this context, we are able to state the following analogue of the Riemann--Lebesgue theorem, based on a indirect proof within the scope of the inner product spaces: \emph{if $f\in L_q^2[0,1]$ then}
\begin{gather*}\lim_{m\to\infty}\int_{0}^{1}tf(t)J_{\nu}\big(qj_{m\nu}t;q^{2}\big){\rm d}_{q}t=0 .\end{gather*}
This is true since the sequence $\{u_m\}_{m}$ is orthonormal with respect to the inner product space $L_q^{2}[0,1]$, thus the corresponding proof can be carried out like in the classical case \cite[Corollary~36.4, p.~118]{WM}), being a consequence of the Bessel's inequality.
Alternatively, with a direct approach, we can extend the set of functions which satisfy the above property and state the following $q$-analogue of the Riemann--Lebesgue theorem.
\begin{Theorem}\label{Riemann--Lebesgue-2}
If $t^{\frac 1 2}f(t)\in L_q^2[0,1]$ then
\begin{gather*}\lim_{m\to\infty}\int_{0}^{1}tf(t)J_{\nu}\big(qj_{m\nu}t;q^{2}\big){\rm d}_{q}t=0 .\end{gather*}
\end{Theorem}
\begin{proof} Starting from the inner product (\ref{inner-product}) and then using the $q$-type H\"older inequality of \cite[Theorem~3.4, p.~346]{CP} with $p=2$, i.e., a $q$-type Cauchy--Schwartz inequality, we may write
\begin{gather}
\left|\int_{0}^{1}tf(t)J_{\nu}\big(qj_{m\nu}t;q^{2}\big){\rm d}_{q}t\right|
\leq \left(\int_{0}^{1}t|f(t)|^2{\rm d}_{q}t\right)^{\frac 1 2} \left(\int_{0}^{1}tJ_{\nu}^2\big(qj_{m\nu}t;q^{2}\big){\rm d}_{q}t\right)^{\frac
1 2}\nonumber \\
\hphantom{\left|\int_{0}^{1}tf(t)J_{\nu}\big(qj_{m\nu}t;q^{2}\big){\rm d}_{q}t\right|}{}
= \left(\int_{0}^{1}t|f(t)|^2{\rm d}_{q}t\right)^{\frac 1 2}\eta_{m\nu}^{\frac 1 2},\label{C-S}
\end{gather}
where, by (\ref{eta}), $\eta_{m\nu}=\frac{q-1}{2j_{m\nu}}q^{\nu-2} J_{\nu}\big(qj_{m\nu};q^{2}\big)J_{\nu}^{\prime}\big(j_{m\nu};q^{2}\big)$.
In the expression for $\eta_{m\nu}$, we already control the asymptotic behavior, as $m\to\infty$, of all its factors.
Thus, joining Theorem~\ref{TheoremA} and~(\ref{asymptoticbehavior}), together with Theorems~\ref{AsymptoticJ3-at-shifted-zeros} and~\ref{asymptotic-derivativeJ3-at-zeros}, we obtain
\begin{gather}\label{asymptotic-eta}
\eta_{m\nu}=O\big(q^{2m}\big) ,\qquad \text{as} \quad m\to\infty .
\end{gather}
Finally, using in (\ref{C-S}) the hypothesis $t^{\frac 1 2}f(t)\in L_q^2[0,1]$ and the asymptotic relation~(\ref{asymptotic-eta}), it follows
\begin{gather*}
\int_{0}^{1}tf(t)J_{\nu}\big(qj_{m\nu}t;q^{2}\big){\rm d}_{q}t=O\big(q^{m}\big) ,\qquad \text{as}\quad m\to\infty ,
\end{gather*}
which proves this version of the Riemann--Lebesgue theorem.
\end{proof}
\begin{Remark} We emphasize that (\ref{asymptotic-eta}) implies that
\begin{gather*}\big\Vert x^{\frac{1}{2}}J_{\nu }\big(j_{m\nu }qx;q^{2}\big)\big\Vert_{L_q^{2}[0,1]}
=\sqrt{\eta_{m\nu}}=O\big(q^{m}\big) ,\qquad \text{as}\quad m\to\infty ,\end{gather*} hence
\begin{gather*}\lim_{m\to\infty} \big\Vert x^{\frac{1}{2}}J_{\nu }\big(j_{m\nu }qx;q^{2}\big)\big\Vert_{L_q^{2}[0,1]}=0 .\end{gather*}
\end{Remark}
However, for the following version of the classical Riemann--Lebesgue theorem: \emph{if $f$ is Riemann or Lebesgue integrable in $[a,b]$ then}
\begin{gather*}
\lim_{\mu\to\infty}\int_{a}^{b}f(t)\sin{(\mu t)}{\rm d}t=0 ,\qquad
\lim_{\mu\to\infty}\int_{a}^{b}f(t)\cos{(\mu t)}{\rm d}t=0 ,
\end{gather*}
we do not expect to prove a similar version for the case of the basic Fourier--Bessel expansions since we do not have, in this context, the nice properties and the formulary that the classical trigonometric functions satisfy, i.e., we do not expect to prove that
\begin{gather*}\lim_{\mu\to\infty}\int_{0}^{1}tf(t)J_{\nu}\big(\mu t;q^{2}\big){\rm d}_{q}t=0, \end{gather*}
when $f\in L_q[0,1]$ or $t^{\frac 1 2}f(t)\in L_q[0,1]$.
Some of the main reasons for that possible failure rely on the fact that, in the proof of the classical Riemann--Lebesgue theorem, involving the classical trigonometric functions, it is used the fact that these functions are bounded as well as some other known properties of the sine and cosine functions.
In this last direction, for the classical Bessel function $J_{\nu}(x)$, since it fails to satisfy the properties of the trigonometric function, an analogue of the Riemann--Lebesgue theorem \cite[p.~589]{Watson} was proved, not for the Bessel function $J_{\nu}(x)$ itself but for the function
\begin{gather*} T_n(t,x)=\sum_{m=1}^{n}\frac{2J_{\nu}\left(j_m x\right)J_{\nu}(j_m t)}{J_{\nu+1}^2(j_m)} ,\end{gather*}
where $j_m$, $m=1,2,3,\dots$, denote the positive zeros of the Bessel function $J_{\nu}(x)$ arranged in ascendent order of magnitude and $0<x\leq 1$, $0\leq t\leq 1$, $\nu\geq -\frac 1 2$:
\textit{if $\int_a^bt^{\frac 1 2}f(t){\rm d}t$ exists and is absolutely convergent then}
\begin{gather*}\lim_{n\to\infty}\int_a^btf(t)T_n(t,x){\rm d}t=0 ,\qquad 0<x\leq 1\quad \text{with} \quad a<b \quad \text{and} \quad a,b\in (0,1) .
\end{gather*} With this regard see also \cite[Remark~4, p.~13]{JLC3}.
\LastPageEnding
\end{document} |
\begin{document}
\title{Biquandles with structures related to \\ virtual links and twisted links}
\author{Naoko Kamada \\
Graduate School of Natural Sciences, Nagoya City University, \\
Aichi, 467-8501, Japan\\
kamada@nsc.nagoya-cu.ac.jp\\
\\
and \\
\\
Seiichi Kamada \\
Department of Mathematics, Hiroshima University, \\
Hiroshima 739-8526, Japan\\
kamada@math.sci.hiroshima-u.ac.jp}
\maketitle
\abstract{We introduce two kinds of structures, called v-structures and t-structures, on biquandles. These structures are used for colorings of diagrams of virtual links and twisted links such that the numbers of colorings are invariants.
Given a biquandle or a quandle, we give a method of constructing a biquandle with these structures. Using the numbers of colorings, we show that Bourgoin's twofoil and non-orientable virtual $m$-foils
do not represent virtual links.}
{\it Keywords}: Biquandles; virtual links; twisted links.
Mathematics Subject Classification 2000: 57M25
\section{Introduction}
A {\em virtual link diagram} is an oriented link diagram possibly with encircled crossings, called virtual crossings, that are neither positive crossings nor negative crossings. Two diagrams are {\em equivalent} if there is a sequence of the generalized Reidemeister moves defined in \cite{Kauf99}, which are generated by moves R1, \dots, R3, V1, \dots, V4 in Figure~\ref{fgtwistmoves}. The equivalence class of a virtual link diagram is called a {\em virtual link}. Virtual links correspond to stable equivalence classes of oriented links in the trivial $I$-bundles over closed orientable surfaces \cite{CKS02, KK00, Ku03}. A {\em twisted link diagram} is a virtual link diagram which may have some bars on edges. Two diagrams are {\em equivalent} if there is a sequence of the extended Reidemeister moves defined in \cite{Bo08},
which are generated by all moves in Figure~\ref{fgtwistmoves}. The equivalence class of a twisted link diagram is called a {\em twisted link}.
(In \cite{Bo08} the extended Reidemeister moves are illustrated without orientations. Note that all moves with possible orientations are obtained by combining the moves in Figure~\ref{fgtwistmoves}. For example, see Figure~\ref{fgtwistmovesB}.)
Twisted links correspond to stable equivalence classes of oriented links in oriented $3$-manifolds that are orientation $I$-bundles over closed but not necessarily orientable surfaces \cite{Bo08}.
In this paper we define two kinds of structures on biquandles which are related to virtual links and twisted links.
{\begin{figure}
\caption{The extended Reidemeister moves}
\label{fgtwistmoves}
\end{figure}}
{\begin{figure}
\caption{Another T1 move}
\label{fgtwistmovesB}
\end{figure}}
A {\em biquandle} is a pair $(X,R)$ consisting of a set $X$ and a bijection $R: X^2 \to X^2$ satisfying certain conditions corresponding to Reidemeister moves for classical link diagrams \cite{FJK04, FRS93, St06} (Section~\ref{sect:basicsbiquandles}).
In Section~\ref{sect:vtbiquandles} we introduce the notions of a v-structure $V: X^2 \to X^2$ and a t-structure $T: X \to X$
which are additional structures on a biquandle $(X,R)$ related to virtual links and twisted links. The pair $(V, T)$ is called a vt-structure of $(X,R)$.
A coloring of a virtual link diagram by a v-structured biquandle $(X, R, V)$ or
a coloring of a twisted link diagram by a vt-structured biquandle $(X, R, V, T)$ is defined as follows: Let $D$ be a diagram of a virtual link or a twisted link. The {\em edges} of $D$ mean the connected arcs obtained when all the real crossings, virtual crossings and bars are removed.
\begin{definition}\label{def:coloring}{\rm
A {\em coloring} of $D$ by $(X, R, V)$ or $(X, R, V, T)$
is a map from the set of edges of $D$ to $X$ such that for each crossing or bar, say $v$, of $D$, if $x_1, x_2, x_3, x_4$ are elements of $X$ assigned the edges around $v$ as in Figure~\ref{fglabelreal} then
\begin{itemize}
\item[(1)] $R(x_1, x_2) = (x_3, x_4)$ when $v$ is a positive crossing,
\item[(2)] $R^{-1}(x_1, x_2) = (x_3, x_4)$ when $v$ is a negative crossing,
\item[(3)] $V(x_1, x_2) = (x_3, x_4)$ when $v$ is a virtual crossing, and
\item[(4)] $T(x_1) = x_2$ when $v$ is a bar.
\end{itemize}
We also call a coloring by $(X, R, V)$ an {\em $(X, R, V)$-coloring}, and a coloring by $(X, R, V, T)$ an {\em $(X, R, V, T)$-coloring}.
}\end{definition}
The concept of a coloring by $(X, R, V)$ in Definition~\ref{def:coloring} and the following theorem (Theorem~\ref{thm:coloringvirtual}) were considered in \cite{BaF11}.
Refer to \cite{BaF11} for examples.
{\begin{figure}
\caption{Colorings}
\label{fglabelreal}
\end{figure}}
\begin{theorem}[\cite{BaF11}]\label{thm:coloringvirtual}
If $D$ and $D'$ are virtual link diagrams representing the same virtual link, then there is a bijection between the set of colorings of $D$ by a v-structured biquandle $(X, R, V)$ and that of $D'$.
\end{theorem}
This is generalized to twisted link diagrams.
\begin{theorem}\label{thm:coloring}
If $D$ and $D'$ are twisted link diagrams representing the same twisted link, then there is a bijection between the set of colorings of $D$ by a vt-structured biquandle $(X, R, V, T)$ and that of $D'$.
\end{theorem}
Therefore the number of colorings by a vt-structured biquandle $(X, R, V, T)$ is an invariant of a twisted link.
Given a biquandle $(X_0, R_0)$ and
automorphisms $f$
and $g$ with $f^2=1$ and $fg = gf$,
we give a method of constructing a vt-structured biquandle $(X, R, V_f, T_g)$, which we call a
{\em twisted product} of $(X_0, R_0)$.
Let $(X_0, R_0)$ be a biquandle.
We use the notation due to \cite{FJK04} such that for $a, b \in X_0$,
$$R_0 (a,b) = (b_a, a^b),$$
namely, $b_a = p_1 R_0(a,b)$ and $a^b = p_2 R_0(a,b)$, where $p_i : X_0 \times X_0 \to X_0$ is the $i$th factor projection.
\begin{theorem}\label{thm:twistedproduct}
Let $(X_0, R_0)$ be a biquandle.
Let $X = X_0 \times X_0$. Define a map $R : X^2 \to X^2$ by
$$ R( (a_1, b_1), (a_2, b_2)) = (( {a_2}_{a_1}, {b_2}^{b_1}), ({a_1}^{a_2}, {b_1}_{b_2})). $$
For automorphisms $f$ and $g$ of $(X_0, R_0)$,
define maps $V_f: X^2 \to X^2$ and $T_g: X \to X$ by
\begin{eqnarray*}
V_f ( (a_1, b_1), (a_2, b_2) ) &=& ( (f^{-1}a_2, f^{-1} b_2), ( f a_1, f b_1 )), \mbox {and} \\
T_g(a, b) &=& (g^{-1} b, g a).
\end{eqnarray*}
Then the following holds.
$(1)$ $(X, R)$ is a biquandle.
$(2)$ $V_f$ is a v-structure of $(X, R)$.
$(3)$ Suppose that $f^2 =1$ and $fg =gf$. Then $(V_f, T_g)$ is a vt-structure of $(X, R)$.
\end{theorem}
\begin{definition} {\rm
In the situation of Theorem~\ref{thm:twistedproduct}, we call the biquandle $(X, R)$ the {\em twisted product biquandle} of $(X_0, R_0)$, and the vt-structured biquandle
$(X, R, V_f, T_g)$ a {\em twisted product} of $(X_0, R_0)$. When $f=g=1$, we call the quadruplet the {\em standard twisted product} of $(X_0, R_0)$.
}\end{definition}
A {\em quandle} is a pair $(Q, \ast)$ consisting of a set $Q$ and a binary operation $\ast: Q \times Q \to Q$, $(a, b) \mapsto a \ast b$, such that (i) for any $a \in Q$, $a \ast a=a$, (ii) for any $a, b \in Q$, there exists a unique element $c$ with $c \ast b =a$, and (iii) for any $a, b , c \in Q$, $(a \ast b) \ast c = (a \ast c) \ast (b \ast c)$ \cite{FR92, Joyce1982, Matveev82}. The {\em dual} operation $\overline{\ast}$ of $\ast$ is a binary operation $\overline{\ast}: Q \times Q \to Q$ such that $a \,\overline{\ast}\, b = c \Longleftrightarrow c \ast b =a $. In Section~\ref{sect:proofs}, we use Fenn and Rourke's notation \cite{FR92}: $a \ast b$ and $a \,\overline{\ast}\, b$ are denoted by $a^b$ and $a^{\overline b}$ (or $a^{b^{-1}}$), respectively, and
$a^{bc}$ means $(a^b)^c$, etc.
When $(X_0, R_0)$ is the {\em biquandle derived from a quandle $(Q, \ast)$}, i.e.,
$X_0=Q$ and $ R_0 (x, y) = (y, x \ast y)$, we call the twisted product biquandle $(X, R)$ of $(X_0, R_0)$ the {\em twisted product biquandle of $(Q, \ast)$}.
If $f$ and $g$ are quandle automorphisms of $(Q, \ast)$, then they are biquandle automorphisms of $(X_0, R_0)$. Suppose that $f^2=1$ and $fg = gf$.
A {\em twisted product of $(Q, \ast)$} means a twisted product $(X, R, V_f, T_g)$ of $(X_0, R_0)$.
The operations $R, V_f, T_g$ on $X=Q^2$ are given as follows:
\begin{eqnarray*}
R( (a_1, b_1), (a_2, b_2) ) &=& (( a_2, {b_2} \ast {b_1}), ( {a_1} \ast {a_2}, b_1)), \\
V_f ( (a_1, b_1), (a_2, b_2) ) &=& ( (f a_2, f b_2), ( f a_1, f b_1 )), \mbox { and} \\
T_g(a, b) &=& (g^{-1} b, g a).
\end{eqnarray*}
When $f=g=1$, we have the following definition.
\begin{definition}\label{def:twistedproductquandleA} {\rm
The {\em standard twisted product} of a quandle $(Q, \ast)$, which we denote by ${\cal B}(Q, \ast)$, is a vt-structured biquandle $(X, R, V, T)$ such that
$X= Q^2$ and
\begin{eqnarray*}
R( (a_1, b_1), (a_2, b_2) ) &=& (( a_2, {b_2} \ast {b_1}), ( {a_1} \ast {a_2}, b_1)), \\
V ( (a_1, b_1), (a_2, b_2) ) &=& ( (a_2, b_2), ( a_1, b_1 )), \mbox {and} \\
T(a, b) &=& (b, a).
\end{eqnarray*}
}\end{definition}
For a quandle $(Q, \ast)$, the number of upper $(Q, \ast)$-colorings
of a virtual link diagram is an invariant of a virtual link, and so is
that of lower $(Q, \ast)$-colorings \cite{Kauf99}. (A geometric interpretation of the upper/lower knot quandles of a virtual link is given in \cite{KK00}. The upper/lower $(Q, \ast)$-colorings correspond to the homomorphisms from these geometric quandles to $(Q, \ast)$ as in the classical case \cite{FR92, Joyce1982, Matveev82}.)
Let ${\cal B}(Q, \ast)$ be the standard twisted product of a quandle $(Q, \ast)$.
\begin{theorem} \label{thm:bourgoinproductcoloring}
If $D$ is a twisted link diagram which is equivalent to a virtual link diagram $D'$, then
the number of ${\cal B}(Q, \ast)$-colorings of $D$ is the product of the number of upper $(Q, \ast)$-colorings
of $D'$ and that of lower $(Q, \ast)$-colorings
of $D'$.
\end{theorem}
\begin{corollary}\label{cor:bourgoinproductcoloring}
Let $(Q, \ast)$ be a finite quandle with $n$ elements.
If the number of ${\cal B}(Q, \ast)$-colorings of a twisted link diagram $D$ is less than $n^2$, then $D$ does not represent a virtual link.
\end{corollary}
\proof \quad Every virtual link diagram has at least
$n$ trivial upper $(Q, \ast)$-colorings and at least $n$ trivial lower $(Q, \ast)$-colorings. If $D$ is equivalent to a virtual link diagram, by Theorem~\ref{thm:bourgoinproductcoloring} there are at least $n^2$ colorings of $D$ by ${\cal B}(Q, \ast)$. \qed
Using an argument due to \cite{Do90}, Bourgoin \cite{Bo08} showed that the twisted link diagram illustrated in Figure~\ref{fgtwistedtrefoil}, which we call {\em Bourgoin's twofoil}, does not represent a classical link. He also defined a twisted link invariant, called the {\em twisted Jones polynomial}, and
showed that Bourgoin's twofoil has the same twisted Jones polynomial with a certain virtual link diagram (Figures 6 and 7 of \cite{Bo08}). Thus, as mentioned in \cite{Bo08}, one cannot distinguish it from virtual links by use of the twisted Jones polynomial.
{\begin{figure}
\caption{Bourgoin's twofoil}
\label{fgtwistedtrefoil}
\end{figure}}
We call a diagram on the left-hand side of Figure~\ref{fgtwistedmfoil} {\em a non-orientable virtual $m$-foil} and denote it by $F_m$,
where $m$ is the number of the real crossings $(m \geq 1)$. When $m=2$, it is Bourgoin's twofoil.
It has a realization as a link diagram on a projective plane depicted
on the right-hand side of the figure, where the unit disk is made into a projective plane.
By a calculation using induction on $m$, we see that the twisted Jones polynomial of $F_m$ is
$$ A^{-2m} ( A^{-4m} + (-1)^{m+1} ( 1 + A^2 + A^{-2})) \in {\bf Z}[A^{\pm 1}, M]. $$
This polynomial is also the twisted Jones polynomial of a virtual link diagram obtained when the two bars are removed from $F_m$. Thus one cannot distinguish it from virtual links by use of the twisted Jones polynomial.
{\begin{figure}
\caption{non-orientable virtual $m$-foil}
\label{fgtwistedmfoil}
\end{figure}}
In Section~\ref{sect:proofs} we study colorings of the diagram $F_m$ by the standard twisted product ${\cal B}(Q, \ast)$, and show the following.
\begin{theorem}\label{thm:twofoil}
\begin{itemize}
\item[$(1)$] For $m > m' \geq 1$, $F_m$ and $F_{m'}$ represent distinct twisted links.
\item[$(2)$]
For $m \geq 1$, $F_m$ does not represent a virtual link.
\end{itemize}
\end{theorem}
The first assertion of this theorem is also seen by the twisted Jones polynomials.
Now we have an infinite family of twisted links which are not virtual links, but they are not distinguished from virtual links by the twisted Jones polynomials. This example was suggested the authors by Roger Fenn.
\begin{remark}\label{remark:twistedproductquandleB} {\rm
Given a quandle $(Q, \ast)$, let
$X= Q^2$ and
\begin{eqnarray*}
R( (a_1, b_1), (a_2, b_2) ) &=& (( a_2, {b_2} \, \overline{\ast}\, {b_1}), ( {a_1} \ast {a_2}, b_1)), \\
V ( (a_1, b_1), (a_2, b_2) ) &=& ( (a_2, b_2), ( a_1, b_1 )), \mbox {and} \\
T(a, b) &=& (b, a).
\end{eqnarray*}
Then $(X, R)$ is a biquandle, and $V$ is a v-structure of $(X, R)$. Since
\begin{eqnarray*}
(T \times T)R(T\times T) ( (a_1, b_1), (a_2, b_2) ) &=& ( (a_2 \, \overline{\ast}\, a_1, b_2), (a_1, b_1 \ast b_2) ) \quad \mbox{ and } \\
VRV ( (a_1, b_1), (a_2, b_2) ) &=& ( (a_2 \ast a_1, b_2), (a_1, b_1 \, \overline{\ast}\, b_2) ),
\end{eqnarray*}
the operation $T: X^2 \to X^2$ does not satisfy $(T \times T)R(T\times T) = VRV$ unless $(Q, \ast)$ is an {\em involutory} quandle, i.e., $\ast = \overline \ast$.
Thus the quadruplet
$(X, R, V, T)$ is not a vt-structured biquandle and one should not use this for colorings of twisted links.
}\end{remark}
We recall the notion of a biquandle, and prepare some lemmas in
Section~\ref{sect:basicsbiquandles}. In Section~\ref{sect:vtbiquandles} the definitions of a t-structure and a v-structure are given, and Theorem~\ref{thm:twistedproduct} is proved.
Section~\ref{sect:proofs} is devoted to proofs of Theorems~\ref{thm:coloring}, \ref{thm:bourgoinproductcoloring} and \ref{thm:twofoil}.
\section{Biquandles}\label{sect:basicsbiquandles}
For a set $X$ we denote by $X^n$ the $n$-fold Cartesian product of $X$, and denote by $p_i: X^n \to X$ the $i$th factor projection for each $i =1, \dots, n$. The composition $g \circ f$ of two maps $f$ and $g$ is also denoted by $g \cdot f$ or $gf$. The identity map $x \mapsto x$ on $X$ is denoted by $1_X$ or $1$, and
the transposition map $(x, y) \mapsto (y,x)$ on $X^2$ is denoted by $\tau_{X^2}$ or $\tau$.
The basic idea of a birack was given in \cite{FRS93}.
The following is the definition of a (strong) birack and a (strong) biquandle introduced by R.~Fenn, M.~Jordan-Santana and L.~Kauffman (Definitions~4.2 and 4.6 of \cite{FJK04}).
\begin{definition}\label{def:biquandlefenn}{\rm (\cite{FJK04})
A pair $(X, R)$ of a set $X$ and a bijection $R: X^2 \to X^2$ is a {\em birack} if the following conditions
(B1) and (B2)
are satisfied. It is a {\em biquandle} if (B1), (B2) and (B3) are satisfied.
\begin{itemize}
\item[(B1)] $R$ satisfies the set-theoretic Yang-Baxter equation, i.e.,
$$ (R \times 1) (1 \times R) (R \times 1) = (1 \times R) (R \times 1) (1 \times R) : X^3 \to X^3.$$
\item[(B2)] For $a, b \in X$, let $f_a: X \to X$ and $f^b: X \to X$ be maps defined by
$$f_a(x) = p_1 R(a,x) \quad \mbox{and} \quad f^b(x) = p_2 R(x,b).$$
Then both $f_a$ and $f^b$ are bijections for every $a$ and $ b$ of $X$.
\item[(B3)] For every $a$ and $b$ of $X$,
$$ (f_a)^{-1}(a) = f^{(f_a)^{-1}(a)} (a) \quad \mbox{and} \quad (f^b)^{-1}(b) = f_{(f^b)^{-1}(b)} (b). $$
\end{itemize}
}\end{definition}
As in Section 3 of \cite{FJK04} a birack/biquandle operation $R: X^2 \to X^2$ defines two binary operations on $X$; $(a,b) \mapsto b_a$ and $(a,b) \mapsto a^b$ such that $R(a,b) =(b_a, a^b)$. Refer to \cite{BaF08, BuF04, CSWEM09, F09, FJK04, HK07, KauMan05} for examples of biracks and biquandles.
Let $(X, R)$ and $(X', R')$ be biracks or biquandles. A map $h: X \to X'$ is called a {\em homomorphism} if
$( h \times h) R = R' (h \times h) : X^2 \to X^2$. We denote it by $h: (X, R) \to (X', R')$. An {\em isomorphism} is a bijection which is a homomorphism.
Using the notion of a sideways operation due to Fenn, et al. \cite{BuF04, F09, FJK04}, we can restate Definition~\ref{def:biquandlefenn} as follows.
\begin{definition}\label{def:biquandle} {\rm
A pair $(X, R)$ consisting of a set $X$ and a bijection $R: X^2 \to X^2$ is a {\em birack} if the following conditions
(B1) and (B2$'$)
are satisfied. It is a {\em biquandle} if (B1), (B2$'$) and (B3$'$) are satisfied.
\begin{itemize}
\item[(B1)]
$ (R \times 1) (1 \times R) (R \times 1) = (1 \times R) (R \times 1) (1 \times R) : X^3 \to X^3$.
\item[(B2$'$)] There is a unique bijection $S : X^2 \to X^2$ such that for any $x_1, \dots, x_4 \in X$,
$$ S(x_1, x_3) = (x_2, x_4) \Longleftrightarrow R(x_1, x_2) = (x_3, x_4). $$
\item[(B3$'$)] There is a bijection $s: X \to X$ such that for any $x \in X$,
$$ R(x, s(x) ) = (x, s(x) ). $$
\end{itemize}
}\end{definition}
We call the bijections $S: X^2 \to X^2$ and $s: X\to X$ above the {\em sideways operation} of $R$ and the {\em shift operation} of $R$,
and denote them by ${\rm side} R$ and ${\rm shift} R$, respectively. (Note that if $({\rm B}2')$ holds then
a bijection $s: X \to X$ in $({\rm B}3')$ is unique if there exists, since $S$ is unique.) Refer to \cite{BuF04, F09, FJK04} for sideways operations.
First we observe that Definitions~\ref{def:biquandlefenn} and \ref{def:biquandle} are equivalent, and give some lemmas on biquandles.
\begin{theorem}
$(1)$
For a bijection $R: X^2 \to X^2$, the conditions $({\rm B}2)$ and $({\rm B}2')$ are equivalent.
$(2)$
For a bijection $R: X^2 \to X^2$ satisfying $({\rm B}2)$, the conditions $({\rm B}3)$ and $({\rm B}3')$ are equivalent.
\end{theorem}
\proof \quad
(1)
Suppose $({\rm B}2)$.
Define maps $S: X^2 \to X^2$ and $S^{-1}: X^2 \to X^2$ by
\begin{eqnarray*}
S(x, y ) &=& ( (f_x)^{-1}(y), p_2 R(x, (f_x)^{-1}(y)) ) \quad \mbox{and} \quad \\
S^{-1}(x,y) &=& ( (f^x)^{-1}(y), p_1 R((f^x)^{-1}(y), x) ).
\end{eqnarray*}
Then $S S^{-1} = S^{-1} S= 1$, and $ S(x_1, x_3) = (x_2, x_4) \Longleftrightarrow R(x_1, x_2) = (x_3, x_4)$.
Let $S': X^2 \to X^2$ be another bijection such that $ S'(x_1, x_3) = (x_2, x_4) \Longleftrightarrow R(x_1, x_2) = (x_3, x_4)$. For any $x, y \in X$, since $f_{x}$ is bijective, we have $p_1 S(x,y) = p_1 S'(x,y)$. Then
$p_2 S(x,y) = p_2 R(x, p_1 S(x,y)) = p_2 R(x, p_1 S'(x,y)) = p_2 S'(x,y) $. Thus $S=S'$.
Suppose $({\rm B}2')$.
The inverse maps of $f_a$ and $f^b$ are obtained by
$$(f_a)^{-1}(x) = p_1 S(x,a) \quad \mbox{and} \quad (f^b)^{-1}(x) = p_1 S^{-1}(b,x).$$
(2)
Suppose $({\rm B}3)$.
Let $s: X \to X$ and $s^{-1}: X \to X$ be maps defined by
$$s(x) = (f_x)^{-1}(x) \quad \mbox{and} \quad s^{-1}(y) = (f^y)^{-1}(y). $$
Since $s(x) = (f_x)^{-1}(x) = f^{(f_x)^{-1}(x)}(x) =
f^{s(x)}(x)$, we have $R(x, s(x)) = (x, s(x))$. Since
$s^{-1}(y) = (f^y)^{-1}(y) = f_{(f^y)^{-1}(y)} (y) = f_{s^{-1}(y)}(y) $, we have
$R(s^{-1}(y), y) = (s^{-1}(y), y)$. Then $s: X \to X$ and $s^{-1}: X \to X$ are the inverse maps of each other.
Suppose $({\rm B}3')$.
Since $R(a, s(a)) = (a, s(a))$, we have
$ (f_a)^{-1}(a) = s(a) = f^{(f_a)^{-1}(a)} (a)$. Since $R(s^{-1}(b) , b ) =(s^{-1}(b) ,b )$, we have
$ (f^b)^{-1}(b) = s^{-1}(b) = f_{(f^b)^{-1}(b)} (b)$.
\qed
Now one may use Definition~\ref{def:biquandle} instead of Definition~\ref{def:biquandlefenn}.
We give some lemmas on biquandles, which are also valid for biracks.
\begin{lemma}
$(1)$ If $(X, R)$ is a biquandle, then $(X, R^{-1})$ is a biquandle with
${\rm side} R^{-1} = \tau \cdot {\rm side} R \cdot \tau$ and
${\rm shift} R^{-1} = {\rm shift} R $.
\end{lemma}
\proof \quad
Since $R^{-1} \times 1 = ( R \times 1)^{-1} : X^3 \to X^3$, the map $R^{-1}$ satisfies the set-theoretic Yang-Baxter equation.
Put $S' = \tau \cdot {\rm side} R \cdot \tau$. Then
$S'(x_3, x_1) = (x_4, x_2)$ $\Longleftrightarrow$ ${\rm side} R(x_1, x_3) = (x_2, x_4)$ $\Longleftrightarrow$ $R(x_1, x_2) = (x_3, x_4)$
$\Longleftrightarrow$ $(x_1, x_2) = R^{-1}(x_3, x_4)$. The uniqueness of $S'$ follows from that of ${\rm side} R$.
Let $s={\rm shift} R$. Since $R(x, s(x)) = (x, s(x))$, we have $R^{-1}(x, s(x)) = (x, s(x))$.
\qed
\begin{lemma}\label{lem:tauRtau}
If $(X, R)$ is a biquandle, then $(X, \tau R \tau )$ is a biquandle with
${\rm side} (\tau R \tau) = ({\rm side} R)^{-1}$ and
${\rm shift} (\tau R \tau) = ({\rm shift} R)^{-1} $.
\end{lemma}
\proof \quad
It is obvious that $ \tau R \tau$ is bijective.
For simplicity we denote by $\tau_1, \tau_2, R_1$ and $R_2$ the maps
$\tau \times 1,
1 \times \tau, R\times 1$ and $1 \times R$, respectively. Noting that
$\tau_1 \tau_2 \tau_1 = \tau_2 \tau_1 \tau_2$
and $R_1 \tau_2 \tau_1 = \tau_2 \tau_1 R_2$, we see that
$(\tau_1 R_1 \tau_1) (\tau_2 R_2 \tau_2) (\tau_1 R_1 \tau_1) =
\tau_1 \tau_2 \tau_1 R_1 R_2 R_1 \tau_1 \tau_2 \tau_1$ and
$(\tau_2 R_2 \tau_2) (\tau_1 R_1 \tau_1) (\tau_2 R_2 \tau_2) =
\tau_2 \tau_1 \tau_2 R_2 R_1 R_2 \tau_2 \tau_1 \tau_2$.
Thus $R_1 R_2 R_1 = R_2 R_1 R_2$ implies that
$$(\tau_1 R_1 \tau_1) (\tau_2 R_2 \tau_2) (\tau_1 R_1 \tau_1) = (\tau_2 R_2 \tau_2) (\tau_1 R_1 \tau_1) (\tau_2 R_2 \tau_2). $$
So $\tau R \tau$ satisfies the set-theoretic Yang-Baxter equation.
$\tau R \tau (x_1, x_2) = (x_3, x_4)$ $\Longleftrightarrow$
$R(x_2, x_1)= (x_4, x_3)$ $\Longleftrightarrow$
${\rm side} R (x_2, x_4) = (x_1, x_3)$ $\Longleftrightarrow$
$({\rm side} R)^{-1} (x_1, x_3) = (x_2, x_4)$. Thus ${\rm side}(\tau R \tau) = ({\rm side} R)^{-1}$. (The uniqueness of ${\rm side}(\tau R \tau)$ follows from that of ${\rm side} R$.)
Let $s={\rm shift} R$. Since $R(x, s(x))= (x, s(x))$ for every $x \in X$, we have $\tau T \tau (s(x), x) = (s(x), x)$. Thus ${\rm shift} (\tau R \tau) = ({\rm shift} R)^{-1} $. \qed
\begin{lemma}\label{tauhomo}
Let $(X, R)$ and $(X', R')$ be biquandles, and let $f: X \to X'$ be a map. The following three conditions are mutually equivalent.
\begin{itemize}
\item[${\rm (i)}$] $f$ is a homomorphism from $(X, R)$ to $(X', R')$.
\item[${\rm (ii)}$] $f$ is a homomorphism from $(X, R^{-1})$ to $(X', R'^{-1})$.
\item[${\rm (iii)}$] $f$ is a homomorphism from $(X, \tau R \tau)$ to $(X', \tau R' \tau)$.
\end{itemize}
\end{lemma}
\proof \quad
Since $( f \times f) R = R' (f \times f)$ $\Longleftrightarrow$ $R'^{-1} ( f \times f) = (f \times f) R^{-1}$, we have (i) $\Longleftrightarrow$ (ii).
Since $( f \times f) R = R' (f \times f)$ $\Longleftrightarrow$ $ \tau ( f \times f) R \tau = \tau (f \times f) R' \tau $ $\Longleftrightarrow$ $ ( f \times f) \tau R \tau = (f \times f) \tau R' \tau $, we have (i) $\Longleftrightarrow$ (iii).
\qed
\begin{lemma}\label{lem:prod}
Let $(X_1, R_1)$ and $(X_2, R_2)$ be biquandles.
Let $R: (X_1 \times X_2)^2 \to (X_1 \times X_2)^2$ be a map defined by
$$R( (a_1, b_1), (a_2, b_2) ) = ( (p_1 R_1(a_1, a_2), p_1 R_2(b_1, b_2)), (p_2 R_1(a_1, a_2), p_2 R_2(b_1, b_2)).$$
Then $(X_1 \times X_2, R)$ is a biquandle. Moreover, if $f_i: (X_i, R_i) \to (X_i, R_i)$ $(i=1,2)$ are homomorphisms, then
$f_1 \times f_2: (X_1 \times X_2, R) \to (X_1 \times X_2, R)$ is a homomorphism.
\end{lemma}
\proof \quad
The operation $R$ satisfies the set-theoretic Yang-Baxter equation, since $R_1$ and $R_2$ do.
The sideways operation $S$ of $R$ is given by
$$S( (a_1, b_1), (a_3, b_3) ) = ( (p_1 S_1(a_1, a_3), p_1 S_2(b_1, b_3)), (p_2 S_1(a_1, a_3), p_2 S_2(b_1, b_3)), $$
where $S_1$ and $S_2$ are the sideways opetations of $R_1$ and $R_2$.
The shift operation $s$ of $R$ is given by
$$s( (a, b) ) = (s_1(a), s_2(b)), $$
where $s_1$ and $s_2$ are the shift opetations of $R_1$ and $R_2$.
It is obvious that $f_1 \times f_2$ is a homomorphism.
\qed
We call the biquandle $(X_1 \times X_2, R)$ in Lemma~\ref{lem:prod}
the {\em direct product} of $(X_1, R_1)$ and $(X_2, R_2)$.
\section{v- and t-Strucures on biquandles}\label{sect:vtbiquandles}
First we introduce the notion of a v-structure.
\begin{definition}\label{def:tstructure}{\rm (cf. \cite{BaF11})
Let $(X, R)$ be a biquandle.
A bijection $V: X^2 \to X^2$ is a {\em v-structure} of $(X, R)$ if the following conditions are satisfied.
\begin{itemize}
\item[(1)] $(X, V)$ is a biquandle.
\item[(2)] $V^2 =1: X^2 \to X^2$.
\item[(3)] $(V \times 1) (1 \times V) (R \times 1) = (1 \times R) (V \times 1) (1 \times V) : X^3 \to X^3$.
\end{itemize}
We call $(X, R, V)$ a {\em v-structured biquandle}.
}\end{definition}
A v-structure is used for colorings of virtual link diagrams.
\begin{example} {\rm
Let $(X, R)$ be a biquandle. Let $V$ be the transposition $\tau: X^2 \to X^2, (x,y) \mapsto (y,x)$. It is a v-structure.
A biquandle with this v-structure is used for colorings of virtual link diagrams in \cite{CSWEM09, FJK04, HK07, KR03}, etc.
}\end{example}
\begin{example}\label{example:virtualquandle} {\rm
Let $(X, R)$ be a biquandle. Let $f: (X, R) \to (X, R)$ be an automorphism.
Let $V: X^2 \to X^2$ be a map defined by $V(x_1,x_2) = (f^{-1} x_2, f x_1)$. It is a v-structure.
A biquandle with this structure is called a {\em virtual biquandle}.
See \cite{KauMan05} and Definition~3.3 of \cite{CN09}.
}\end{example}
We introduce the notion of a t-structure, or a
vt-structure, which is related to twisted links.
\begin{definition}\label{def:vtstructure}{\rm
Let $(X, R, V)$ be a v-structured biquandle.
A bijection $T: X \to X$ is a {\em t-structure} of $(X, R, V)$ if the following conditions are satisfied.
\begin{itemize}
\item[(1)] $T^2 =1$.
\item[(2)] $V (T \times 1) = ( 1 \times T) V$.
\item[(3)] $(T \times T) R (T \times T) = V R V$.
\end{itemize}
We call $(X, R, V, T)$ a {\em vt-structured biquandle}, and $(V, T)$ a {\em vt-structure} of $(X, R)$.
}\end{definition}
Since $V^2=1$, the condition (2) of Definition~\ref{def:vtstructure} is equivalent to that $V (1 \times T) = (T \times 1) V$.
\indent
{\it Proof of Theorem~\ref{thm:twistedproduct}}.
(1) By Lemma~\ref{lem:tauRtau}, when $(X_0, R_0)$ is a biquandle, so is $(X_0, \tau R_0 \tau)$. By Lemma~\ref{lem:prod},
$(X, R)$ is a biquandle, since it is the direct product of $(X_0, R_0)$ and $(X_0, \tau R_0 \tau)$. (2) It follows from Lemmas~\ref{tauhomo} and \ref{lem:prod} that $f \times f: X =X_0 \times X_0 \to X =X_0 \times X_0$ is an automorphism of $(X, R)$. By Example~\ref{example:virtualquandle} we have (2).
The assertion (3) is verified by direct calculation, which is left to the reader. \qed
\section{Proof of Theorems} \label{sect:proofs}
In this section we prove Theorems~\ref{thm:coloring}, \ref{thm:bourgoinproductcoloring} and \ref{thm:twofoil}.
\indent
{\it Proof of Theorem~\ref{thm:coloring}}. Let $\Delta$ be a $2$-disk where a move in Figure~\ref{fgtwistmoves} transforms $D$ to $D'$. For each move, there is a bijection of the colorings of $D$ and $D'$ such that the corresponding colorings are the identical outside $\Delta$. When the move is of type R1, R2 or R3, it follows from that $(X, R)$ is a biquandle \cite{FJK04}.
When the move is of type V1, V2 or V3, it follows from that
$(X, V)$ is a biquandle and $V^2=1$. The case of V4 follows from the equality $(V \times 1)(1 \times V)(R \times 1)= (1 \times R)(V \times 1) (1 \times V)$.
The case of type T1 follows from $V (T \times 1) = (1 \times T)V$. The case of type T2 follows from $T^2=1$. The case of type T3 follows from $(T \times T)R (T \times R) = VRV$.
\qed
\indent
{\it Proof of Theorem~\ref{thm:bourgoinproductcoloring}}.
Let ${\cal B}(Q, \ast) = (X, R, V, T)$.
By Theorem~\ref{thm:coloring} it is sufficient to show that for a virtual link diagram $D$ there is a bijection between the set of $(X, R, V, T)$-colorings of $D$, denoted by ${\rm Col}(D, (X, R, V, T))$, and the Cartesian product of ${\rm Col^u}(D, (Q, \ast))$ and ${\rm Col^l}(D, (Q, \ast))$, where ${\rm Col^u}(D, (Q, \ast))$ is the set of upper $(Q, \ast)$-colorings of $D$ and ${\rm Col^l}(D, (Q, \ast))$ is the set of lower $(Q, \ast)$-colorings of $D$. Since $D$ is a virtual link diagram, an $(X, R, V, T)$-coloring of $D$ is nothing more than an $(X, R, V)$-coloring of $D$.
Let $c: E(D) \to X$ be an $(X, R, V)$-coloring of $D$, where $E(D)$ is the set of edges of $D$. Let $c^{\rm u}: E(D) \to Q$ and $c^{\rm l}: E(D) \to Q$ be the maps defined by $ c^{\rm u}= p_1 c$ and $c^{\rm l} = p_2 c$ where $p_i$ is the $i$th factor projection $X=Q^2 \to Q$. Then $c^{\rm u}$ is an upper $(Q, \ast)$-coloring of $D$, and $c^{\rm l}$ is a lower $(Q, \ast)$-coloring of $D$. Conversely for any upper $(Q, \ast)$-coloring $c^{\rm u}$ and any lower $(Q, \ast)$-coloring $c^{\rm l}$, the map $c=(c^{\rm u}, c^{\rm l}): E(D) \to X$ is an $(X, R, V)$-coloring of $D$. \qed
In order to prove Theorem~\ref{thm:twofoil}, we give a proposition on ${\cal B}(Q, \ast)$-colorings of the non-orientable virtual $m$-foil $F_m$.
Let $(Q, \ast)$ be a quandle and ${\cal B}(Q, \ast)$ the standard twisted quandle.
Consider a ${\cal B}(Q, \ast)$-coloring of the diagram $F_m$ and let $x, y, z$ and $w$ be elements of $Q^2$ given the edges as depicted in Figure~\ref{fgtwistedmfoil}. Put $x=(x_1, x_2)$ and $y=(y_1, y_2) \in Q^2$.
\begin{proposition}\label{prop:coloringFm}
In this situation, $x_1$ and $y_1$ satisfy
\begin{equation}
x_1 = x_1^{(y_1 x_1)^m} \quad \mbox{and} \quad
y_1 = y_1^{(x_1 y_1)^m}.
\end{equation}
And $x_2$ and $y_2$ are determined by
\begin{equation}
\left\{
\begin{array}{l}
x_2 = y_1^{(x_1 y_1)^n} \\
y_2 = x_1^{(y_1 x_1)^{n-1}y_1}
\end{array}
\right.
\quad \mbox{if $m=2n$, and }
\left\{
\begin{array}{l}
x_2 = x_1^{(y_1 x_1)^n y_1} \\
y_2 = y_1^{(x_1 y_1)^n}
\end{array}
\right.
\quad \mbox{if $m=2n+1$.}
\end{equation}
Conversely, for any elements $x_1$ and $y_1$ of $Q$ satisfying $(1)$, there exists a unique ${\cal B}(Q, \ast)$-coloring of $F_m$.
\end{proposition}
\indent
{\it Proof}.
The standard twisted product of a quandle is given by
\begin{equation*}
R( (x_1, x_2), (y_1, y_2)) = ((y_1, y_2^{x_2}), (x_1^{y_1}, x_2)).
\end{equation*}
Applying that $m$ times, we have
\begin{equation}
(z, w) = (( x_1^{(y_1 x_1)^{n-1}y_1}, x_2^{(y_2 x_2)^n} ), ( y_1^{(x_1 y_1)^n}, y_2^{ (x_2 y_2)^{n-1} x_2}))
\quad \mbox{if $m=2n$,}
\end{equation}
and
\begin{equation}
(z, w) = (( y_1^{(x_1 y_1)^n}, y_2^{ (x_2 y_2)^n x_2}), ( x_1^{(y_1 x_1)^n y_1}, x_2^{(y_2 x_2)^n} ))
\quad \mbox{if $m=2n+1$.}
\end{equation}
Since $(x, y)= V (T\times T)(z,w)$, we have
\begin{equation}
\left\{
\begin{array}{ll}
x_1 = y_2^{(x_2 y_2)^{n-1}x_2}, & x_2 = y_1^{(x_1 y_1)^n} \\
y_1 = x_2^{(y_2 x_2)^n}, & y_2 = x_1^{(y_1 x_1)^{n-1}y_1}
\end{array}
\right.
\quad \mbox{if $m=2n$,}
\end{equation}
and
\begin{equation}
\left\{
\begin{array}{ll}
x_1 = x_2^{(y_2 x_2)^n}, & x_2 = x_1^{(y_1 x_1)^n y_1} \\
y_1 = y_2^{(x_2 y_2)^n x_2}, & y_2 = y_1^{(x_1 y_1)^n}
\end{array}
\right.
\quad \mbox{if $m=2n+1$.}
\end{equation}
From (5) and (6) we have (2), and eliminating $x_2$ and $y_2$ we have (1). Conversely, for any elements $x_1$ and $y_1$ of $Q$ satisfying $(1)$, let $x_2$ and $y_2$ be as in (2). Then $x=(x_1, x_2)$ and $y=(y_1, y_2)$ satisfy (5) and (6). Thus we have a coloring. \qed
For a quandle $(Q, \ast)$, let $\Delta_m(Q, \ast)$ denote a subset
\begin{equation}
\{ (a,b) \in Q^2 \mid a= a^{(ba)^m}, \, b= b^{(ab)^m} \}
\end{equation}
of $Q^2$.
\begin{corollary}\label{cor:colorFm}
For any quandle $(Q, \ast)$, assigning $(x_1, y_1)$ to a ${\cal B}(Q, \ast)$-coloring of $F_m$ as in Proposition~\ref{prop:coloringFm}
is a bijection from ${\rm Col}(F_m, {\cal B}(Q, \ast))$, the set of ${\cal B}(Q, \ast)$-colorings of $F_m$, to $\Delta_m(Q, \ast)$.
\end{corollary}
\indent
{\it Proof of Theorem~\ref{thm:twofoil}}.
(1) Let $(Q_{2m}, \ast)$ be the dihedral quandle of order $2m$, i.e., $Q_{2m}= {\mathbb Z} / 2m{\mathbb Z}$ and $a \ast b = 2b -a$.
It is easily seen that $\Delta_m(Q_{2m}, \ast) = (Q_{2m})^2$. Thus, by Corollary~\ref{cor:colorFm}, $\# {\rm Col}(F_m, {\cal B}(Q_{2m}, \ast)) = \# \Delta_m(Q_{2m}, \ast) = 4m^2$. On the other hand, if $m > m' \geq 1$, then $\Delta_{m'}(Q_{2m}, \ast) \neq (Q_{2m})^2$, for $(1,0)\in (Q_{2m})^2$ does not belong to $ \Delta_{m'}(Q_{2m}, \ast)$.
Hence, by Corollary~\ref{cor:colorFm} again, $\# {\rm Col}(F_{m'}, {\cal B}(Q_{2m}, \ast)) = \# \Delta_{m'}(Q_{2m}, \ast) < 4m^2$. Therefore $F_m$ and $F_{m'}$ represent distinct twisted link.
(2) Let $(Q_{2m+1}, \ast)$ be the dihedral quandle of order $2m+1$. Since $(1,0)\in (Q_{2m+1})^2$ does not belong to $ \Delta_{m}(Q_{2m+1}, \ast)$, we have $\Delta_m(Q_{2m+1}, \ast) \neq (Q_{2m+1})^2$.
By Corollary~\ref{cor:colorFm}, $\# {\rm Col}(F_{m}, {\cal B}(Q_{2m+1}, \ast)) = \# \Delta_{m}(Q_{2m+1}, \ast) <
\# (Q_{2m+1})^2$.
By Corollary~\ref{cor:bourgoinproductcoloring}, $F_m$ does not represent a virtual link. \qed
{\it Acknowledgements.}
The authors would like to express their gratitude to Roger Fenn for his valuable suggestions.
The first author is partially supported by JSPS KAKENHI 22540093 and Grant-in-Aid for Research in Nagoya City University. The second author is partially supported by JSPS KAKENHI 21340015.
\end{document} |
\begin{document}
\title{Tilted Cone and Cylinder, Cone and Tilted Sphere}
\author{Mehmet Kirdar}
\maketitle
\begin{abstract}
In this note, we will consider two classical volume problems related to
elliptic integrals. The first problem has a neat formula by means of
elliptic integrals. We remade it with details. In the second problem, we
found a messy formula. On the other hand, it seems to be useful to find a
good approximation for the volume.
\textit{Key Words. Cone, cylinder, sphere, elliptic, integral.}
\textit{Mathematics Subject Classification. [2020] 51M25, 33E05.}
\end{abstract}
\section{Introduction}
In this note, I discuss two classical volume problems. The first problem
which, I saw in [2], dates back to 1932, has a neat solution formula by
means of elliptic integrals. I reproduced the formula for the case $k<1$
with some details for elliptic integrals. There is a key identity which also
appeared in the second problem but is not available in [2]. I believe that
Rhodes did this computations somewhere else. His purpose in this article was
Landen transformations but I believe that they are sometimes complications.
The solution for the second problem I found is not very neat. I used
Maclaurin's series expansions of elliptic integrals of the first kind and
the second kind and wrote the solution as an infinite series of
trigonometric integrals. It seems to be useful to find a good approximation
for the volume. I do not know whether this formula was known before. I have
not seen.
I must also mention the beautiful book of Harris Hancock, [1], which helped
me to understand the tricky identities about elliptic integrals.
WolframAlpha helped me a lot during my research. Its abilities are amazing.
\section{Tilted cone and cylinder}
Consider the cylinder\textbf{\ }$x^{2}+y^{2}=1$ and the cone $z=\cot \alpha
\sqrt{(x-k)^{2}+y^{2}}$, $0\leq k\leq 1.$ We want to find the volume of the
bounded region inside the cylinder, under the cone and above $z=0$. Here $
\alpha $ is the fixed angle of the cone, the angle between the cone and its
axis, $0\leq \alpha \leq \frac{\pi }{2}.$
Let the origin be $O=(0,0,0),$ the vertex of the cone be $T=(k,0,0)$ and let
$P=(\cos \theta ,\sin \theta ,0)$, $0\leq \theta \leq 2\pi ,$ be a point one
the unit circle of the $xy$-plane. Let the angle between $TP$ and positive
side of the $x$-axis be\ $\phi $, $0\leq \phi \leq 2\pi $. See [2] for some
figures about this problem. If $TP=R$ then by law of cosines, $R=\sqrt{
1-k^{2}\sin ^{2}\phi }-k\cos \phi $. The perpendicular from $P$ to $xy$
-plane cuts the cone with height $R\cot \alpha $. Therefore, the
parameterization of the region in tilted cylindrical coordinates is $0\leq
r\leq R,0\leq \phi \leq 2\pi $ and $0\leq z\leq r\cot \alpha $. And in
tilted coordinates volume differential is $dV=rdrd\phi dz$. With two
successive integrations, the volume integral can be reduced to $V=\dfrac{
2\cot \alpha }{3}\dint\limits_{0}^{\pi }R^{3}d\phi $.
Now, by putting $R^{3}$ and observing that
\[
\dint\limits_{0}^{\pi }(-k^{3}\cos ^{3}\phi -3k\cos \phi +3k^{3}\allowbreak
\cos \phi \sin ^{2}\phi )d\phi =\allowbreak 0
\]
we have
\[
V=\dfrac{4\cot \alpha }{3}\dint\limits_{0}^{\dfrac{\pi }{2}
}(3k^{2}+1-4k^{2}\sin ^{2}\phi )\sqrt{1-k^{2}\sin ^{2}\phi }d\phi .
\]
By the definition of the elliptic integral of the second kind $E(k)$, $V$
becomes
\[
V=\dfrac{4(3k^{2}+1)\cot \alpha }{3}E(k)-\dfrac{16k^{2}\cot \alpha }{3}
\dint\limits_{0}^{\dfrac{\pi }{2}}\sin ^{2}\phi \sqrt{1-k^{2}\sin ^{2}\phi }
d\phi .
\]
Next, $E_{2}(k)=$ $\dint\limits_{0}^{\dfrac{\pi }{2}}\sin ^{2}\phi \sqrt{
1-k^{2}\sin ^{2}\phi }d\phi $ must be computed in terms of elliptic
integrals. Let $\Delta =\sqrt{1-k^{2}\sin ^{2}\phi }$, $S=\sin \phi $ and $
C=\cos \phi $. The tricky identity is
\[
S^{2}\Delta =\frac{2k^{2}-1}{3k^{2}}\Delta +\frac{1-k^{2}}{3k^{2}}\dfrac{1}{
\Delta }+\left[ -\frac{1}{3}(1-2S^{2})\Delta +\frac{k^{2}}{3}S^{2}C^{2}\frac{
1}{\Delta }\right] .
\]
Integrating from $0$ to $\dfrac{\pi }{2}$, since
\[
\dint\limits_{0}^{\pi /2}\left[ -\frac{1}{3}(1-2S^{2})\Delta +\frac{k^{2}}{3}
S^{2}C^{2}\frac{1}{\Delta }\right] d\phi =\left[ -\frac{1}{3}SC\Delta \right]
_{0}^{\pi /2}=0,
\]
we find
\[
E_{2}(k)=\frac{2k^{2}-1}{3k^{2}}E(k)+\frac{1-k^{2}}{3k^{2}}K(k)\text{ }
(\star )
\]
and
\[
V=\frac{4}{9}\cot \alpha \left[ (k^{2}+7)E(k)+4(k^{2}-1)K(k)\right]
\]
where $K(k)=\dint\limits_{0}^{\dfrac{\pi }{2}}\left( 1-k^{2}\sin ^{2}\phi
\right) ^{-\frac{1}{2}}d\phi .$ This formula is obtained in [2]. There,
formula for $k>1$ case is also obtained and then they are combined with a
Landen transformation interpretation.
\section{Cone and tilted sphere}
Let us find the volume of the bounded region between the tilted sphere $
(x+k)^{2}+y^{2}+z^{2}=1,$ $0\leq k\leq 1$ and the cone $z=\cot \alpha \sqrt{
x^{2}+y^{2}}.$The set-up of the volume integral is easier than that of the
first problem. So, we can skip figures. The sphere in spherical coordinates
is $\rho ^{2}+2k\rho \cos \theta \sin \phi +k^{2}-1=0$ and the cone is $\phi
=\alpha $. Thus, the volume of the region
\[
0\leq \rho \leq -k\cos \theta \sin \phi +\sqrt{1-k^{2}+k^{2}\cos ^{2}\theta
\sin ^{2}\phi },\text{ }0\leq \theta \leq 2\pi ,\text{ }0\leq \phi \leq
\alpha
\]
is found as
\[
V=\dint\limits_{0}^{2\pi }\dint\limits_{0}^{\alpha }\left\{
\begin{array}{c}
(k^{3}\cos \theta \sin ^{2}\phi -k\cos \theta \sin ^{2}\phi -\frac{4}{3}
k^{3}\allowbreak \cos ^{3}\theta \sin ^{4}\phi )+ \\
\left( \frac{4}{3}k^{2}\cos ^{2}\theta \sin ^{3}\phi +\frac{1-k^{2}}{3}\sin
\phi \right) \sqrt{1-k^{2}+k^{2}\cos ^{2}\theta \sin ^{2}\phi }
\end{array}
\right\} d\phi d\theta .
\]
Since
\[
\dint\limits_{0}^{2\pi }(k^{3}\cos \theta \sin ^{2}\phi -k\cos \theta \sin
^{2}\phi -\frac{4}{3}k^{3}\allowbreak \cos ^{3}\theta \sin ^{4}\phi )d\theta
=\allowbreak 0
\]
and due to symmetry, we find
\[
V=\dint\limits_{0}^{\alpha }\dint\limits_{0}^{\dfrac{\pi }{2}}\left( \frac{16
}{3}k^{2}\cos ^{2}\theta \sin ^{3}\phi +\frac{4}{3}(1-k^{2})\sin \phi
\right) \sqrt{1-k^{2}+k^{2}\cos ^{2}\theta \sin ^{2}\phi }d\theta d\phi .
\]
Let us define
\[
K=\dfrac{k\sin \phi }{\sqrt{1-k^{2}\cos ^{2}\phi }}
\]
and thus,
\begin{eqnarray*}
V &=&\dint\limits_{0}^{\alpha }\left( \frac{16}{3}k^{2}\sin ^{3}\phi +\frac{4
}{3}(1-k^{2})\sin \phi \right) \sqrt{1-k^{2}\cos ^{2}\phi }E(K)d\phi \\
&&-\dint\limits_{0}^{\alpha }\text{ }\frac{16}{3}k^{2}\sin ^{3}\phi \sqrt{
1-k^{2}\cos ^{2}\phi }\left( \dint\limits_{0}^{\dfrac{\pi }{2}}\sin
^{2}\theta \sqrt{1-K^{2}\sin ^{2}\theta }d\theta \right) d\phi .
\end{eqnarray*}
By using the star identity, $(\star )$ of the first problem, it can be
written as
\[
V=\frac{4}{9}\dint\limits_{0}^{\alpha }(8k^{2}\sin ^{3}\phi +7(1-k^{2})\sin
\phi )\sqrt{1-k^{2}\cos ^{2}\phi }E(K)d\phi -\frac{16}{9}\dint\limits_{0}^{
\alpha }\sin \phi \sqrt{1-k^{2}\cos ^{2}\phi }K(K)d\phi .
\]
We can now insert infinite series of $E(K)$ and $K(K)$ and do term by term
integration to obtain a formula which involves trigonometric integrals.
Let us recall that $E(K)=\dfrac{\pi }{2}\dsum\limits_{n=0}\dfrac{c_{n}}{1-2n}
K^{2n}$ and $K(K)=\dfrac{\pi }{2}\dsum\limits_{n=0}c_{n}K^{2n}$ where $
c_{n}=\left( \dfrac{(2n)!}{2^{2n}(n!)^{2}}\right) ^{2}.$ Putting these and $
K $ in the last equation we obtain
\[
V=\frac{2\pi }{9}\dsum\limits_{n=0}^{\infty }\frac{c_{n}k^{2n}}{1-2n}
\dint\limits_{0}^{\alpha }\frac{8k^{2}\sin ^{2n+3}\phi +(3-7k^{2}+8n)\sin
^{2n+1}\phi }{(1-k^{2}\cos ^{2}\phi )^{n-\frac{1}{2}}}d\phi .
\]
The zeroth term of the series gives the following approximation of the
volume for small $k$:
\[
\frac{2\pi }{9}\left( (1+2k^{2})\sqrt{1-k^{2}}-\cos \alpha
(1+4k^{2}-2k^{2}\cos \alpha )\sqrt{1-k^{2}\cos ^{2}\alpha }+(2-3k^{2})\frac{
\arcsin k-\arcsin (k\cos \alpha )}{k}\right) .
\]
This gives the exact value $\dfrac{2\pi }{3}(1-\cos \alpha )$ in the limit $
k\rightarrow 0.$
\end{document} |
\begin{document}
\title{Pythagorean Triples, B\'{e}zout Coefficients and the Erd\H{o}s-Straus Conjecture
}
\author{Kyle Bradford
}
\institute{K. Bradford \at
1332 Southern Dr, Statesboro, GA 30458 \\
Tel.: +1-703-314-8920\\
\email{kbradford@georgiasouthern.edu}
}
\date{Received: date / Accepted: date}
\maketitle
\begin{abstract}
\
\keywords{}
\end{abstract}
\section{Introduction} \label{sec: intro}
The Erd\H{o}s-Straus conjecture was introduced in 1948 by Paul Erd\H{o}s and Ernst Straus. It is one of many conjectures that involve Egyptian fractions. These are sums of positive rational numbers with unit numerator. As outlined in \cite{Bra1}, to prove the Erd\H{o}s-Straus conjecture, it suffices to show that for each prime $p$ there exist positive integers $x \leq y \leq z$ so that the following diophantine equation is satisfied:
\begin{equation} \label{eq: one}
\frac{4}{p} = \frac{1}{x} + \frac{1}{y} + \frac{1}{z}.
\end{equation}
\
\noindent If a solution exists for a given prime $p$, then by insisting that $x \leq y \leq z$ it was shown in \cite{Bra1} that $p$ cannot divide $x$, $p$ must divide $z$ and $p$ sometimes divides $y$. This necessarily means that for every prime $p$, it is impossible to have $x=y=z$ simultaneously. Also all solutions to (\ref{eq: one}) with either $x=y$ or $y=z$ are of the following form:
\begin{alignat*}{4}
\bullet &\quad p=2 &&\qquad x =1 &&\qquad y=2& &\qquad z=2 \\
\bullet &\quad p \equiv 3 \text{ mod } 4 &&\qquad x = \frac{p+1}{2} &&\qquad y = \frac{p+1}{2} &&\qquad z = \frac{p(p+1)}{4} \\
\bullet &\quad p \equiv 3 \text{ mod } 4 &&\qquad x = \frac{p+1}{4} &&\qquad y = \frac{p(p+1)}{2} &&\qquad z = \frac{p(p+1)}{2}. \\
\end{alignat*}
\
\noindent I will call these trivial solutions to (\ref{eq: one}) and focus on finding non-trivial solutions where $x<y<z$. For each odd prime $p$ it is unknown whether or not a non-trivial solution to (\ref{eq: one}) exists. If one exists, it was shown in \cite{Bra1} that
\begin{equation} \label{eq: two}
z = \frac{xyp}{\gcd(p,y) \gcd(xy, x+y)}.
\end{equation}
\
\noindent Similarly it can be shown for an odd prime $p$ that if a non-trivial solution to (\ref{eq: one}) exists, then
\begin{equation} \label{eq: three}
y = \frac{xz \gcd(p,y)}{p \gcd(xz, x+z)}
\end{equation}
\
\noindent and
\begin{equation} \label{eq: four}
x = \frac{yz}{p \gcd(yz, y+z)}.
\end{equation}
\
\noindent Manipulating (\ref{eq: one}), (\ref{eq: two}), (\ref{eq: three}) and (\ref{eq: four}) leads to necessary equations for solutions to the Erd\H{o}s-Straus conjecture. For a given odd prime $p$ and positive integers $x < y$, it is necessary that
\begin{equation} \label{eq: five}
2 \left( \frac{2xy}{\gcd(xy, x+y)} \right) - \frac{p}{y-x} \left( \frac{y^{2} - x^{2}}{\gcd(xy, x+y)} \right) = \gcd(p,y),
\end{equation}
\
\noindent for a given odd prime $p$ and positive integers $x < z$, it is necessary that
\begin{equation} \label{eq: six}
2 \left( \frac{2xz}{\gcd(xz, x+z)} \right) - \frac{p}{z-x} \left( \frac{z^{2} - x^{2}}{\gcd(xz, x+z)} \right) = \frac{p^{2}}{\gcd(p,y)},
\end{equation}
\
\noindent and for a given odd prime $p$ and positive integers $y < z$, it is necessary that
\begin{equation} \label{eq: seven}
2 \left( \frac{2yz}{\gcd(yz, y+z)} \right) - \frac{p}{z-y} \left( \frac{z^{2} - y^{2}}{\gcd(yz, y+z)} \right) = p^{2}.
\end{equation}
\
\noindent The reason to focus on the expressions in (\ref{eq: five}), (\ref{eq: six}) and (\ref{eq: seven}) is to highlight a connection to the work in \cite{Gue1}. For example, by letting
\begin{equation} \label{eq: twelve}
\begin{split}
A &= \frac{2xy}{\gcd(xy, x+y)} \\
B &= \frac{y^{2} - x^{2}}{\gcd(xy, x+y)}
\end{split}
\end{equation}
\
\noindent we have the two smaller legs of a Pythagorean triple. I will outline these observations in greater detail in the following section.
\section{Pythagorean Triples} \label{sec: main}
\noindent A non-trivial Pythagorean triple $(A,B,C)$ has $A,B,C \in \mathbb{Z}^{+}$ so that $A^{2}+B^{2}=C^{2}$. Either all three terms are even or only one of $A$ or $B$ is even, so let $A$ be an even term. Note that this does not necessarily imply that $A<B$. By insisting that $A$ is even, some unique examples Pythagorean triples under my insistence are $(4,3,5), (12,5,13), (6,8,10)$ and $(8,6,10)$. Here I am treating $(6,8,10)$ and $(8,6,10)$ as different Pythagorean triples. \\
\noindent A primitive Pythagorean triple has $A$ and $B$ coprime. All primitive Pythagorean triples can be connected through a Berggren Tree \cite{Ber1}. Figure \ref{fig: one} shows the history of the development of Pythagorean triples. In this tree they do not insist that $A$ is even, rather that $A<B<C$. For example, you can see that Pythagoras' branch has triples with C=B+1, Plato's branch has $C=B+2$, and the middle branch has $B=A+1$. \\
\noindent This tree has a root that can be expressed as a column vector $(3,4,5)^{T}$ and you can find the coefficients of any other Pythagorean triple through repeated left-hand multiplication of combinations of the following matrices:
$$
\begin{bmatrix}
1 & -2 & 2 \\
2 & -1 & 2 \\
2 & -2 & 3
\end{bmatrix} \qquad
\begin{bmatrix}
1 & 2 & 2 \\
2 & 1 & 2 \\
2 & 2 & 3
\end{bmatrix} \qquad
\begin{bmatrix}
-1 & 2 & 2 \\
-2 & 1 & 2 \\
-2 & 2 & 3
\end{bmatrix}.
$$
\begin{figure}
\caption{This image of a Berggren Tree is thanks to Luis Teia \cite{Tei1}
\label{fig: one}
\end{figure}
\noindent One goal is to find a relationship between the odd primes and specific triples in this tree. If a pattern can be found, then a solution to the Erd\H{o}s-Straus conjecture can be found.
\subsection{\underline{Pythagorean triples of the first type}}
\noindent Given an odd prime $p$ and a non-trivial solution to (\ref{eq: one}) the formulas in (\ref{eq: twelve}) provide the smaller legs of a Pythagorean triple. These will be called Pythagorean triples of the first type. Figure \ref{fig: two} gives these values for the non-trivial solutions for primes less than $19$. These triples are not necessarily primitive. In fact it can be shown that
$$ \gcd(A,B) = \gcd(x,y,z) \gcd \left( 2, \frac{y^{2} - x^{2}}{\gcd^{2}(x,y)} \right).$$
\
\noindent Manipulating (\ref{eq: five}) and (\ref{eq: twelve}) leads to the following expressions
\begin{equation} \label{eq: thirteen}
\begin{split}
x &= \frac{A-B+C}{2 \left( \frac{(2A- \gcd(p,y))}{p} \right)} \\
y &= \frac{A+B+C}{2 \left( \frac{(2A-\gcd(p,y))}{p} \right)} \\
z &= \frac{Ap}{2 \gcd(y,p)}.
\end{split}
\end{equation}
\
\noindent The equations in (\ref{eq: thirteen}) help us in the following way: given an odd prime $p$ and a Pythagorean triple $(A,B,C)$, depending on the whether or not we are seeking solutions with $p$ dividing $y$, we have two sets of possible equations to find non-trivial solutions to (\ref{eq: one}).
\subsection{\underline{Pythagorean triples of the second type}}
\noindent Letting
\begin{equation} \label{eq: fourteen}
\begin{split}
A &= \frac{2xz}{\gcd(xz, x+z)} \\
B &= \frac{z^{2} - x^{2}}{\gcd(xz, x+z)}
\end{split}
\end{equation}
\
\noindent we also have the two smaller legs of a Pythagorean triple. These will be called Pythagorean triples of the second type. Figure \ref{fig: three} gives these values for the non-trivial solutions for primes less than $19$. These triples are also not necessarily primitive. In fact it can be shown that
$$ \gcd(A,B) = \gcd(x,y,z) \gcd \left( 2, \frac{z^{2} - x^{2}}{\gcd^{2}(x,z)} \right).$$
\
\noindent Manipulating (\ref{eq: six}) and (\ref{eq: fourteen}) leads to the following expressions
\begin{equation} \label{eq: seventeen}
\begin{split}
x &= \frac{A-B+C}{2 \left( \frac{\left(2A- \frac{p^{2}}{\gcd(p,y)} \right)}{p} \right)} \\
y &= \frac{A\gcd(y,p)}{2 p} \\
z &= \frac{A+B+C}{2 \left( \frac{\left(2A-\frac{p^{2}}{\gcd(p,y)} \right)}{p} \right)}.
\end{split}
\end{equation}
\
\noindent The equations in (\ref{eq: seventeen}) help us in the following way: given an odd prime $p$ and a Pythagorean triple $(A,B,C)$, depending on the whether or not we are seeking solutions with $p$ dividing $y$, we have two sets of possible equations to find non-trivial solutions to (\ref{eq: one}).
\subsection{\underline{Pythagorean triples of the third type}}
Letting
\begin{equation} \label{eq: fifteen}
\begin{split}
A &= \frac{2yz}{\gcd(yz, y+z)} \\
B &= \frac{z^{2} - y^{2}}{\gcd(yz, y+z)}
\end{split}
\end{equation}
\
\noindent we also have the two smaller legs of a Pythagorean triple. These will be called Pythagorean triples of the third type. Figure \ref{fig: four} gives these values for the non-trivial solutions for primes less than $19$. These triples are also not necessarily primitive. In fact it can be shown that
$$ \gcd(A,B) = \gcd(p,y) \gcd(x,y,z) \gcd \left( 2, \frac{z^{2} - y^{2}}{\gcd^{2}(y,z)} \right).$$
\
\noindent Manipulating (\ref{eq: seven}) and (\ref{eq: fifteen}) leads to the following expressions
\begin{equation} \label{eq: sixteen}
\begin{split}
x &= \frac{A}{2p} \\
y &= \frac{A-B+C}{2 \left( \frac{(2A- p^{2})}{p} \right)} \\
z &= \frac{A+B+C}{2 \left( \frac{(2A-p^{2})}{p} \right)}.
\end{split}
\end{equation}
\
\noindent The equations in (\ref{eq: sixteen}) help us in the following way: given an odd prime $p$ and a Pythagorean triple $(A,B,C)$, we have a set of equations to find non-trivial solutions to (\ref{eq: one}).
\section{B\'{e}zout Coefficients} \label{sec: Bezout}
\noindent Given a prime $p$ a solution to the conjecture requires finding positive integers $x<y$ that satisfy (\ref{eq: five}). Making the substitutions allows us to find a Pythagorean triple smaller legs $A$ and $B$ instead. There is one more fundamental way to decompose this problem and it is discussed in this section. Ultimately we will be looking for two other integer values instead. While the complexity is not reduced, it gives insight to the solution values as roots of polynomials. \\
\noindent Let $p$ be an odd prime. Solutions to (\ref{eq: one}) with $\gcd(p,y)=1$ were called type I solutions in \cite{Bra1}. There are necessary conditions for these types of solutions. To further understand these necessary conditions, the first goal is to find an expression for all values $b,c \in \mathbb{N}$ such that
\begin{equation} \label{eq: nine}
4c - pb = \gcd( c,b).
\end{equation}
\
\noindent Notice that if we can find $x,y \in \mathbb{N}$ so that $b=x+y$ and $c=xy$, then we have type I solutions to (\ref{eq: three}). \\
\noindent Let $b_{1} = 4 \left\lceil p \slash 4 \right\rceil - p$ and let $c_{1} = (b_{1} p+1) \slash 4$. \\
\noindent Notice that both $b_{1}$ and $c_{1}$ are positive integers and $$ 4 c_{1} + (-p)b_{1} = 1.$$
\
\noindent This by definition shows that $b_{1}$ and $c_{1}$ are coprime. Indeed we see that $$4 c_{1} - p b_{1} = \gcd( c_{1}, b_{1})$$
\
\noindent where $| b_{1}| \leq 4$ and $|c_{1}| \leq p$. This guarantees that these are the minimal B\'{e}zout coefficients. \\
\noindent For $k \in \mathbb{N}$ let $b_{k} = 4(k-1) + b_{1}$ and let $c_{k} = p(k-1) + c_{1}$. \\
\noindent Notice that $b_{k}$ and $c_{k} = (b_{k}p + 1) \slash 4$ are always positive integers. Also notice that $b_{k}$ and $c_{k}$ are by definition coprime for each $k \in \mathbb{N}$. \\
\noindent It should also be clear that these are the only values so that $$4 c_{k} - p b_{k} = \gcd( c_{k}, b_{k}).$$
\
\noindent B\'{e}zout's identity tells us that multiples of these coefficients are the only solutions to (\ref{eq: four}). In other words the solution set for possible type I solutions have $x,y \in \mathbb{N}$ so that $$(x+y, xy) \in \{ (mb_{k}, mc_{k}) | m,k \in \mathbb{N}, \gcd(b_{k},c_{k}) =1 \}. $$
\
\noindent Now let p be an any prime. Solutions to (\ref{eq: one}) with $\gcd(p,y)=p$ were called type II solutions in \cite{Bra1}. There are also necessary conditions for these types of solutions. To further understand these necessary conditions, the next goal is to find an expression for all values $b,c \in \mathbb{N}$ such that
\begin{equation} \label{eq: ten}
4c - pb = p \gcd( c,b).
\end{equation}
\
\noindent Similarly notice that if we can find $x,y \in \mathbb{N}$ so that $b=x+y$ and $c=xy$, then we have type II solutions to (\ref{eq: three}). \\
\noindent Let $b_{1} = 3$ and let $c_{1} = p$. \\
\noindent For $k \in \mathbb{N}$ let $b_{k} = 4(k-1)+b_{1}$ and let $c_{k} = p(k-1) + c_{1}$. \\
\noindent Notice that $b_{k}$ and $c_{k} = (b_{k} + 1)p \slash 4$ are positive integers for all $k \in \mathbb{N}$. Also notice that $b_{k}$ and $c_{k}$ are coprime when $p \neq 3$. If $p = 3$, then $b_{k}$ and $c_{k}$ are coprime for $k \not\equiv 1 \mod 3$. \\
\noindent It should be clear that these are the only values so that $$4 c_{k} - p b_{k} = p \gcd( c_{k}, b_{k}).$$
\
\noindent Using a similar reasoning we can conclude that multiples of these coefficients are the only solutions to (\ref{eq: five}). The solution set for possible type II solutions have $x,y \in \mathbb{N}$ so that $$ (x+y, xy) \in \{ (mb_{k}, mc_{k}) | m,k \in \mathbb{N}, \gcd(b_{k},c_{k}) =1 \}. $$
\
\noindent In general, to solve the Erd\H{o}s-Straus conjecture we are looking for solutions like the ones outlined above where $x+y = mb_{k}$ and $xy = mc_{k}$ for some $m,k \in \mathbb{N}$ with $\gcd(b_{k},c_{k})=1$. As these two equations are symmetric with respect to $x$ and $y$, we see that these two values are going to be the roots of a polynomial $T^2 - mb_{k}T +mc_{k}$. For each prime $p$, if (\ref{eq: one}). \\
\noindent The roots of this polynomial are going to be integers if and only if $$m^{2} b_{k}^{2} - 4mc_{k}$$
\
\noindent is a square number. \\
\noindent For type I solutions this reduces to finding $m,k \in \mathbb{N}$ so that $$ m^{2} (4k-(4-4 \left\lceil p \slash 4 \right\rceil + p))^{2} - pm(4k-(4-4 \left\lceil p \slash 4 \right\rceil + p)) -m$$
\
\noindent is a square number. \\
\noindent For type II solutions this reduces to finding $m,k \in \mathbb{N}$ so that $$ m^{2} (4k-1)^{2} - 4pmk$$
\
\noindent is a square number, where $k \not\equiv 1 \mod 3$ for $p =3$. \\
\noindent Consider a type II solution. Let $y^{*} = y \slash p$ and $z^{*} = z \slash p$. \\
\noindent One can show that $$ 4y^{*}z^{*} - (y^{*}+z^{*}) = p \gcd(y^{*}z^{*}, y^{*}+z^{*}).$$
\
\noindent Let $A^{*} = y^{*}z^{*} \slash \gcd(y^{*}z^{*}, y^{*}+z^{*})$ and let $B^{*} = (y^{*}+z^{*}) \slash \gcd(y^{*}z^{*}, y^{*}+z^{*})$. \\
\noindent Notice that $B^{*} = 4A^{*} - p$. Because $p = 4\left( \left\lceil p \slash 4 \right\rceil \right) - \left( 4 \left\lceil p \slash 4 \right\rceil - p \right)$, we see that $B^{*} = 4\left(A^{*} - \left\lceil p \slash 4 \right\rceil \right) + \left( 4 \left\lceil p \slash 4 \right\rceil - p \right)$. Leting $m= \gcd(y^{*}z^{*}, y^{*}+z^{*})$ and letting $A = (k-1) + \left\lceil p \slash 4 \right\rceil$, if we can similarly show that $$m^{2} \left(4k - \left( 4 - 4 \left\lceil p \slash 4 \right\rceil + p \right) \right)^{2} - m\left( 4k - \left(4 - 4 \left\lceil p \slash 4 \right\rceil + p \right) \right)- mp$$
\
\noindent is a square number, then we would be able to find our solution. As $A^{*}$ is an expression for $x$ for type II solutions, we see that $(k-1) \leq \left\lceil p \slash 4 \right\rceil$.
\noindent There are many other second degree polynomials in $m$ and $k$ that can be found and the ultimate goal will be to show that the expressions are square. In \cite{Bra1} a functional relationship between $x$ and $y$ was found for type I solutions, but no such relationship has been found between the legs of the Pythagorean triple smaller legs or the values of $m$ and $k$. The point of considering these different techniques is to develop a deeper understanding of the problem and to find creative ways to approach the problem. Hopefully this expository paper will bring more attention to the problem and the work in \cite{Gue1}.
\section{Appendix} \label{sec: appendix}
\begin{figure}
\caption{This table shows related Pythagorean triples of the first type for some non-trivial solutions to the Erd\H{o}
\label{fig: two}
\end{figure}
\begin{figure}
\caption{This table shows related Pythagorean triples of the second type for some non-trivial solutions to the Erd\H{o}
\label{fig: three}
\end{figure}
\begin{figure}
\caption{This table shows related Pythagorean triples of the third type for some non-trivial solutions to the Erd\H{o}
\label{fig: four}
\end{figure}
\end{document} |
\begin{document}
\title{Dimensions of Prym Varieties}
\author{Amy E. Ksir\\
Mathematics Department\\
State University of New York at Stony Brook \\
Stony Brook, NY, 11794}
\date{July 26, 2000}
\email{ksir@math.sunysb.edu}
\begin{abstract}
Given a tame Galois branched cover of curves $\pi: X \to Y$ with any finite
Galois group $G$ whose representations are rational, we compute the dimension
of the (generalized) Prym variety $\Prym_{\rho}(X)$ corresponding to any
irreducible representation $\rho$ of $G$. This formula can be applied to the
study of algebraic integrable systems using Lax pairs, in particular systems
associated with Seiberg-Witten theory. However, the formula is much
more general and its computation and proof are entirely algebraic.
\end{abstract}
\maketitle
\section{Introduction}
The most familiar Prym variety arises from a (possibly
branched) double cover $\pi: X \to Y$ of curves. In this
situation, there is a surjective norm map $\Nm: \Jac(X) \to
\Jac(Y)$, and the Prym (another abelian variety) is a connected
component of its kernel. Another way to think of this is that the
involution $\sigma$ of the double cover induces an action of $\mathbb{Z}/2\mathbb{Z}$ on
the vector space $H^{0}(X, \omega_{X})$, which can then be
decomposed as a representation of $\mathbb{Z}/2\mathbb{Z}$. The Jacobian of the base
curve Y and the Prym correspond to the trivial and sign
representations, respectively. The Prym variety can
be defined as the component containing the identity of
$(\Jac(X) \otimes_{\mathbb{Z}} \varepsilon)^{\sigma}$, where $\varepsilon$ denotes
the sign representation of $\mathbb{Z}/2\mathbb{Z}$.
The generalization of this construction that we will study in this
paper is as follows. Let $G$ be a finite group, and $\pi: X \to Y$ be a
tame Galois branched cover, with Galois group $G$, of smooth projective
curves over an algebraically closed field. The action of $G$ on $X$ induces
an action on the vector space of differentials $H^0(X,\omega_{X})$,
and on the Jacobian $\Jac(X)$. For any representation $\rho$ of
$G$, we define $\Prym_{\rho}(X)$ to be the connected component
containing the identity of $(\Jac(X) \otimes_{\mathbb{Z}} \rho^{*})^{G}$.
The vector space $H^0(X,\omega_{X})$ will decompose as a $\mathbb{Z}[G]$-module into a
direct sum of isotypic pieces
\begin{equation}
H^0(X,\omega_{X}) = \bigoplus_{j=1}^{N} \rho_j \otimes V_j
\end{equation}
where $\rho_1, \ldots , \rho_N$ are the irreducible representations of $G$.
If $G$ is such that all of its representations are rational, then the
Jacobian will also decompose, up to isogeny, into a direct sum of
Pryms [D2]:
\begin{equation}
\Jac(X) \sim \bigoplus_{j=1}^{N} \rho_j \otimes \Prym_{\rho_j}(X).
\end{equation}
In particular, if $G$ is the Weyl group of a semisimple Lie algebra, then it
will satisfy this property.
The goal of this paper is to compute the dimension of such a Prym
variety. This formula is given in section 2, with a proof that uses
only the Riemann-Hurwitz theorem and some character theory. Special
cases of this formula relevant to integrable systems have appeared
previously [A, Me, S, MS].
One motivation for this work comes from the study of algebraically integrable
systems. An algebraically integrable system is a Hamiltonian system
of ordinary differential equations, where
the phase space is an algebraic variety with an algebraic (holomorphic,
over $\mathbb{C}$)
symplectic structure. The complete integrability of the system means
that there are $n$ commuting Hamiltonian functions on the
$2n$-dimensional phase
space. For an algebraically integrable system, these functions should
be algebraic, in which case they define a morphism to an
$n$-dimensional space of states for the system. The flow of the
system will be linearized on the fibers of this morphism, which, if
they are compact, will be $n$-dimensional abelian varieties.
Many such systems can be solved by expressing the system as a Lax
pair depending on a parameter $z$. The equations can be written in the
form $\frac{d}{dt}A = [A,B]$, where $A$ and $B$ are elements of a Lie
algebra $\mathfrak{g}$, and depend both on time $t$ and on a
parameter $z$, which is thought of as a coordinate on a curve $Y$.
In this case, the flow of the system is linearized on a subtorus of
the Jacobian of a Galois cover of $Y$. If it can be shown that this
subtorus is isogenous to a Prym of the correct dimension, then the
system is completely integrable.
In section 3, we will
briefly discuss two examples of such systems, the periodic Toda lattice
and Hitchin systems. Both of these are important in Seiberg-Witten
theory, providing solutions to $\mathcal{N}=2$ supersymmetric
Yang-Mills gauge theory in four dimensions.
This work appeared as part of a Ph.D. thesis at the University of
Pennsylvania. The author would like to thank her thesis advisor, Ron Donagi,
for suggesting this project and for many helpful discussions. Thanks
are also due to David Harbater, Eyal Markman, and Leon Takhtajan.
\section{Dimensions}
We can start by using the Riemann-Hurwitz formula to find the genus $g_X$ of
$X$, which will be the dimension of the whole space $H^0(X,\omega_{X})$ and of
$\Jac (X)$. Since $\pi: X \to Y$ is a cover of degree $|G|$,
we get
\begin{equation}
g_X = 1 + |G|(g-1) + \frac{\deg R}{2}
\end{equation}
where $g$ is the genus of the base curve $Y$ and $R$ is the ramification
divisor.
The first isotypic piece we can find the dimension of is $V_1$, corresponding
to the trivial representation. The subspace where $G$ acts trivially is
the subspace of differentials which are pullbacks by $\pi$
of differentials on $Y$. This tells us that $\dim V_1 = \dim
H^0(Y,\omega_{Y}) = g$.
In the case of classical Pryms, where $G = \mathbb{Z}/2$, there is only one
other isotypic piece, $V_{\varepsilon}$ corresponding to the sign
representation $\varepsilon$. Thus we have
\begin{equation}
\dim V_{\varepsilon} = g_X - g = g-1 + \frac{\deg R}{2}.
\end{equation}
For larger groups $G$, there are more isotypic pieces, but we also have
more information: we can look at intermediate
curves, i.e. quotients of $X$ by subgroups $H$ of $G$. Differentials
on $X/H$ pull back to differentials on $X$ where $H$ acts trivially. Thus
\begin{equation}
H^0(X/H,\omega_{X/H}) = \bigoplus_{j=1}^{N} (\rho_j)^{H} \otimes V_j.
\end{equation}
The map $\pi_H: X/H \to Y$ will be a cover of degree $\frac{|G|}{|H|}$, so
Riemann-Hurwitz gives us the following formula for the genus $g_H$ of
$X/H$, which is the dimension of $H^0(X/H,\omega_{X/H})$:
\begin{equation}
g_H = 1 + \frac{|G|}{|H|}(g-1) + \frac{\deg R_H}{2}.
\end{equation}
where again $R_H$ is the ramification divisor.
We can further analyze the ramification divisor, by classifying the branch
points according to their inertial groups. Since
$\pi: X \to Y$ is a Galois cover of curves over $\mathbb{C}$, all of the
inertial groups must be cyclic.
\begin{lemma}
Let $G$ be a finite group all of whose characters are defined over $\mathbb{Q}$.
If two elements $x,y \in G$ generate conjugate cyclic subgroups, then they are
conjugate.
\end{lemma}
Proof (adapted from [BZ]): We want to show that for any character $\chi$ of
$G$, $\chi(x) = \chi(y)$. Then the properties of characters will tell us
that $x$ and $y$ must be in the same conjugacy class.
We may assume that $x$ and $y$ generate the same subgroup, $H$. Then
$y = x^k$ for some integer $k$ relatively prime to $|H|$. Let $\chi$
be a character of $G$, and $\rho: G \to GL(n,\mathbb{C})$
a representation with character $\chi$. Then $\rho(x)$ will be a matrix
with eigenvalues $\lambda_1, \ldots, \lambda_n$, and $\rho(y)$ will
have eigenvalues $\lambda_1^k, \ldots, \lambda_n^k$. Since $x^{|H|} = 1$,
we have $\lambda_1^{|H|} = \ldots = \lambda_n^{|H|} = 1$. Let $\xi$ be
a primitive $|H|$th root of unity. Then we can write $\lambda_1 =
\xi^{\nu_1}, \ldots, \lambda_n = \xi^{\nu_n}$ for some integers $\nu_i$.
Now $\chi(x)$ = Trace($\rho(x)$) = $\lambda_1 + \ldots + \lambda_n$,
and $\chi(y)$ = $\chi(x^k)$ = $\lambda_1^k + \ldots + \lambda_n^k$.
Thus $\chi(y)$ will be the image of $\chi(x)$ under the element of
Gal($\mathbb{Q}(\xi)/\mathbb{Q}$) which sends $\xi \mapsto \xi^k$.
Since the values of $\chi$ are rational, this element will act trivially,
so $\chi(y) = \chi(x)$.
$\square$
From now on, we will suppose that $G$ is such that all of its characters are
rational. (This will be true, for instance, if $G$ is a Weyl group). Pick
representative
elements $h_1 \ldots h_N$ for each conjugacy class in $G$,
and let $H_1 \ldots H_N$ be the cyclic groups that each of them generates.
By Lemma 1, this will be the whole set (up to conjugacy) of cyclic
subgroups of $G$. We can partially order this set of cyclic subgroups
by their size, so that $H_1$ is the trivial subgroup. Now we can classify
the branch points: let $R_k, k=2 \ldots N$ be the degree of the branch
locus with inertial group conjugate to $H_k$ (ignoring the trivial group).
Over each point of the branch
locus where the inertial group is conjugate to $H_k$, there will be $|G|/|H_k|$
points in the fiber. Thus the degree of the ramification
divisor $R$ of $\pi: X \to Y$ will be
\begin{equation}
\deg R = \sum_{k=1}^{N} (|G| - \frac{|G|}{|H_k|}) R_k
\end{equation}
For each quotient curve $X/H$, each point in the fiber of $\pi_H: X/H \to Y$
over a point with inertial group $H_k$ will correspond to a double coset
$H_k \backslash G / H$. Thus the degree of the ramification divisor $R_H$
will be
\begin{equation}
\deg R_H = \sum_{k=1}^{N} (\frac{|G|}{|H|} - \#(H_k \backslash G / H)) R_k.
\end{equation}
Combining these formulas with the earlier Riemann-Hurwitz computations, we get:
\begin{equation}
g_X = 1 + |G|(g-1) + \sum_k(|G| - \frac{|G|}{|H|} ) \frac{R_k}{2}
\end{equation}
\begin{equation}
g_H = 1 + \frac{|G|}{|H|}(g-1) + \sum_k(\frac{|G|}{|H|} -
\#(H_k \backslash G / H) ) \frac{R_k}{2}
\end{equation}
Since the genera $g_H$ are exactly the dimensions $\dim
H^0(X/H,\omega_{X/H})$,
we also have
\begin{equation}
g_H = \sum_{j=1}^{N} \dim \rho_j^{H} \dim V_j.
\end{equation}
For each subgroup $H$, this is a linear equation for
the unknown dimensions $\dim V_j$ in terms of the genus $g_{H}$.
Thus by taking quotients by the set of all cyclic subgroups $H_1 \ldots H_N$,
we get a system of $N$ equations. We wish to
invert the matrix $\dim\rho_j^{H_i}$ and find the $N$ unknowns $\dim V_j$.
\begin{lemma}
The matrix $\dim\rho_j^{H_i}$ is invertible.
\end{lemma}
Proof: We show that the rows of the matrix are linearly independent, using
the fact that rows of the character table are linearly independent.
First, note that $\dim \rho_j^{H_i}$, the dimension of the subspace of $\rho_j$
invariant under $H_i$, is equal to the inner product of characters
$\langle \Res^G_{H_i} \rho_j, \mathbf{1} \rangle$,
which we can read off from the character table of $G$ as
\begin{equation}
\dim \rho_j^{H_i} = \frac{1}{|H_i|} \sum_{a_i \in H_i} \chi_{\rho_j}(a_i).
\end{equation}
Compare this matrix to the matrix of the character table $\chi_{\rho_j}(a_i)$.
From (12) we see that each row is a sum of multiples of rows of the
character table. Since each element of a subgroup has order less than or
equal to the order of the subgroup, the rows of the
character table being added to get row $i$ appear at or below row $i$ in
the character table. Thus if we write the matrix $\dim\rho_j^{H_i}$
in terms of the basis of the character table, we will get a lower triangular
matrix with non-zero entries on the diagonal. By row reduction, we see
that the linear independence of the rows of $\dim\rho_j^{H_i}$ is equivalent
to the linear independence of the rows of the character table.
$\square$
\begin{theorem}
For each nontrivial irreducible representation $\rho_j$ of $G$,
$V_j$ has dimension
\begin{equation}
(\dim \rho_j) (g-1) + \sum_{k=1}^{N} \Bigl((\dim \rho_j) - (\dim \rho_j^{H_k})
\Bigr) \frac{R_{H_k}}{2}
\end{equation}
\end{theorem}
Proof: Since the matrix $\dim\rho_j^{H_i}$ is invertible, there is a
unique solution to the system of equations (11), so we only need to show
that this is a solution. Namely, given this formula for $\dim V_j$,
and combining (10) and (11),
we wish to show that for each cyclic subgroup $H_i$,
\begin{equation}
\sum_{j=1}^{N} \dim \rho_j^{H_i} \dim V_j = 1 + \frac{|G|}{|H_i|}(g-1)
+ \sum_k(\frac{|G|}{|H_i|} - \#(H_k \backslash G / H_i) ) \frac{R_k}{2}.
\end{equation}
Note that on the left side we are summing over all representations, not just
the nontrivial ones, so our notation will be simpler if we write
$\dim V_1 = g$ in a similar form to (11). For the trivial representation
$\rho_1$, $(\dim \rho_1) - (\dim \rho_1^{H_k}) = 0$ (since $\rho_1$ is fixed by
any subgroup $H_k$), so
\begin{equation}
\dim V_1 = 1 + (\dim \rho_1) (g-1) + \sum_{k=1}^{N} \Bigl((\dim \rho_1) -
(\dim \rho_1^{H_k}) \Bigr) \frac{R_{H_k}}{2}.
\end{equation}
The sum on the left hand side of (14) will be
\begin{equation}
1 + \sum_{j=1}^{N} \dim \rho_j^{H_i} \Bigl((\dim \rho_j) (g-1)
+ \sum_{k=1}^{N} \bigl((\dim \rho_j) - (\dim \rho_j^{H_k}) \bigr)
\frac{R_{H_k}}{2} \Bigr).
\end{equation}
Let us look at the $(g-1)$ term and the $R_{H_k}$ terms separately.
For the $(g-1)$ coefficient, we can write both $\dim \rho_j^{H_i}$ and
$\dim \rho_j$ in terms of characters of $G$ (as in (12)) and exchange the
order of summation to get
\begin{equation}
\sum_{j=1}^{N} \dim \rho_j^{H_i} \dim \rho_j =
\frac{1}{|H_i|} \sum_{a_i \in H_i} \sum_{j=1}^N \chi_{\rho_j}(a_i)
\chi_{\rho_j}(e)
\end{equation}
where $e$ is the identity element of $G$. The inner sum amounts to taking
the inner product of two columns of the character table of $G$. The
orthogonality of characters tells us that this inner product will be zero
unless the two columns are the same, in this case if $a_i = e$.
Thus the sum over elements in $H_i$ disappears, and we get the sum of the
squares of the dimensions of the characters:
\begin{equation}
\frac{1}{|H_i|} \sum_{j=1}^N \chi_{\rho_j}(e)^2 = \frac{|G|}{|H_i|}.
\end{equation}
which is what we want.
The $R_{H_k}$ term looks like
\begin{equation}
\sum_{j=1}^{N} \dim \rho_j^{H_i} \sum_{k=1}^{N} \bigl((\dim \rho_j) -
(\dim \rho_j^{H_k}) \bigr) \frac{R_{H_k}}{2}.
\end{equation}
We can distribute and rearrange the sums to get:
\begin{equation}
\sum_{k=1}^{N} \Bigl(\sum_{j=1}^{N} \dim \rho_j^{H_i} \dim \rho_j -
\sum_{j=1}^{N} \dim \rho_j^{H_i} \dim \rho_j^{H_k}
\Bigr)\frac{R_{H_k}}{2}.
\end{equation}
As in (17) and (18), the first term becomes $\frac{|G|}{|H_i|}$. The
second term is also the inner product of columns of the character table:
\begin{equation}
\sum_{j=1}^{N} \dim \rho_j^{H_i} \dim \rho_j^{H_k} =
\frac{1}{|H_i|} \frac{1}{|H_k|} \sum_{a_i \in H_i} \sum_{a_k \in H_k}
\sum_{j=1}^{N} \chi_{\rho_j}(a_i) \chi_{\rho_j}(a_k).
\end{equation}
This will be
zero unless $a_i$ and $a_k$ are conjugate, in which case $\chi_{\rho_j}(a_i)
= \chi_{\rho_j}(a_k)$ and character theory tells us (see for example [FH],
p. 18) that
\begin{equation}
\sum_{j=1}^N \chi_{\rho_j}(a_i)^2 = \frac{|G|}{c(a_i)},
\end{equation}
where $c(a_i)$ is the number of elements in the conjugacy class of $a_i$.
Now the second term has become
\begin{equation}
\frac{|G|}{|H_i||H_k|} \sum_{\{a_i, a_k\}} \frac{1}{c(a_i)}
\end{equation}
where the sum is taken over pairs of elements $a_i \in H_i, a_k \in H_k$
such that $a_i$ and $a_k$ are conjugate. This is exactly the number of
double cosets $\#(H_k \backslash G / H_i)$.
Adding up all of the terms, the sum on the left hand side becomes
\begin{equation}
1 + \frac{|G|}{|H_i|}(g-1) + (\frac{|G|}{|H_i|} - \#(H_k \backslash G / H_i))
\frac{R_{H_k}}{2}
\end{equation}
which is exactly the right hand side.
$\square$
\begin{corollary}
For each nontrivial irreducible representation $\rho_j$ of $G$,
$\Prym_{\rho_j}(X)$ has dimension
\begin{equation}
(\dim \rho_j) (g-1) + \sum_{k=1}^{N} \Bigl((\dim \rho_j) - (\dim \rho_j^{H_k})
\Bigr) \frac{R_{H_k}}{2}.
\end{equation}
$\square$
\end{corollary}
\section{Integrable Systems.}
\textbf{Periodic Toda lattice.}
The periodic Toda system is a Hamiltonian system of differential
equations with Hamiltonian
\begin{equation*}
H(p,q) = \frac{|p|^{2}}{2} + \sum_{\alpha} e^{\alpha(q)}
\end{equation*}
where $p$ and $q$ are elements of the Cartan subalgebra $\mathfrak{t}$
of a semisimple Lie algebra $\mathfrak{g}$, and the sum is over the
simple roots of $\mathfrak{g}$ plus the highest root. This system
can be expressed in Lax form [AvM] $\frac{d}{dt}A = [A,B]$, where $A$
and $B$ are elements of the loop algebra $\mathfrak{g}^{(1)}$. and
can be thought of as elements of $\mathfrak{g}$ which depend on a
parameter $z \in \mathbb{P}^{1}$. For $\mathfrak{sl}(n)$, $A$ is of
the form
\begin{equation*}
\begin{pmatrix}
y_{1} & 1 & & x_{0}z \\
x_{1} & y_{2} & \ddots & \\
& \ddots & \ddots & 1 \\
z & & x_{n-1} & y_{n}
\end{pmatrix}
\end{equation*}
For any representation $\varrho$ of
$\mathfrak{g}$, the spectral curve $S_{\varrho}$ defined by the
equation $\det (\varrho(A(z) - \lambda I) = 0$ is independent of
time (i.e. is a conserved quantity of the system). The spectral curve is a
finite cover of $Y$ which for generic $z$ parametrizes the eigenvalues of
$\varrho(A(z))$. While the eigenvalues are conserved by the system,
the eigenvectors are not. The eigenvectors of $\varrho(A)$ determine
a line bundle on the spectral cover, so an element of
$\Jac(S_{\varrho})$. The flow of the system is linearized on this Jacobean.
Since the original system of equations didn't depend on a choice of
representation $\varrho$, the flow is actually linearized on an abelian
variety which is a subvariety of $\Jac(S_{\varrho})$ for every
$\varrho$.
In fact, instead of considering each spectral cover we can look at
the cameral cover $X \to \mathbb{P}^{1}$. This is constructed as a pullback to
$\mathbb{P}^{1}$ of the cover
$\mathfrak{t} \to \mathfrak{t}/G$, where $G$ is the Weyl group of
$\mathfrak{g}$. This cover is pulled back by the rational map
$\mathbb{P}^{1} \dashrightarrow \mathfrak{t}/G$ defined by the class of $A(z)$
under the adjoint action of the corresponding Lie group. (For $A(z)$
a regular semisimple element of $\mathfrak{sl}(n)$, this map sends
$z$ to the unordered set of eigenvalues of $A(z)$.) Thus the cameral
cover is a finite Galois cover of $\mathbb{P}^{1}$ whose Galois group $G$
is the Weyl group of $\mathfrak{g}$. The flow of the Toda system is
linearized on the Prym of this cover corresponding to the representation
of $G$ on $\mathfrak{t}^{*}$. This is an $r$-dimensional representation,
where $r$ is the rank, so the dimension of this Prym is
\begin{equation*}
r(-1) + \sum_{k=1}^{N} \Bigl(r - (\dim \mathfrak{t}^{H_k})
\Bigr) \frac{R_{H_k}}{2}.
\end{equation*}
The ramification of this cover has been analyzed in [D1] and [MS].
There are $2r$ branch points where the inertial group $H$ is $\mathbb{Z}/2\mathbb{Z}$
generated by one reflection, so for each of these $\dim
\mathfrak{t}^{H}$ is $r-1$. There are also two points ($z=0$ and
$\infty$) where the inertial group $H$ is generated by the Coxeter
element, the product of the reflections corresponding to the simple
roots. This element of $G$ doesn't fix any element of
$\mathfrak{t}$, so for these two points $\dim \mathfrak{t}^{H} = 0$.
Thus the dimension of the Prym is
\begin{eqnarray*}
-r + (r-(r-1)) \frac{2r}{2} + (r-0) \frac{2}{2} \\
= r.
\end{eqnarray*}
Since the original system of equations had a $2r$-dimensional phase
space, this is the answer that we want.
\textbf{Hitchin systems.} Hitchin showed [H] that the cotangent
bundle to the moduli space of semistable
vector bundles on a curve $Y$ has the structure of an algebraically
completely integrable system. His proof, later extended to
principal $\mathcal{G}$ bundles with any reductive Lie group $\mathcal{G}$ [F,S],
uses the fact that this moduli space is equivalent (by deformation
theory) to the space of \emph{Higgs pairs}, pairs $(P,\phi)$ of a
principal bundle and an endomorphism $\phi \in H^{0}(Y, ad(P) \otimes
\omega_{Y})$.
As in the case of the Toda system, the key construction is of a
cameral cover of $Y$. The eigenvalues of $\phi$, which are sections
of the line bundle $\omega_{Y}$, determine a spectral cover of $Y$ in the
total space bundle. The eigenvectors determine a line bundle on this
spectral cover. The Hitchin map sends a Higgs pair $(P,\phi)$ to
the set of coefficients of the characteristic polynomial. Each coefficient is a
section of a power of $\omega_{Y}$, so the image of the Hitchin map is
$B := \bigoplus_{i=1}^{r}H^{0}(Y,\omega_{Y}^{\otimes d_i})$, where
the $d_{i}$ are the degrees of the basic invariant polynomials of the Lie
algebra $\mathfrak{g}$.
Again, we can consider instead the cameral cover $X_{b} \to Y$,
which is obtained as a pullback to $Y$ vi $\phi$ of $\mathfrak{t} \otimes
\omega_{Y} \to \mathfrak{t} \otimes \omega_{Y}/G$.
The generic fiber of the Hitchin map is isogenous to
$\Prym_{\mathbb{t}}(X)$, which has dimension
\begin{equation*}
r(g-1) + \sum_{k=1}^{N} \Bigl(r - (\dim \mathfrak{t}^{H_k})
\Bigr) \frac{R_{H_k}}{2}
\end{equation*}
By looking at the generic fiber,
we can restrict our attention to cameral covers where the only
ramification is of order two, with inertial group $H$ generated by one
reflection. The last piece of information we need to compute the
dimension is the degree of the branch divisor of $X \to Y$.
The cover $\mathfrak{t} \otimes
\omega_{Y} \to \mathfrak{t} \otimes \omega_{Y}/G$ is ramified
where any of the roots, or their product, is equal to zero.
There are $(\dim \mathcal{G} - r)$ roots, so this defines a
hypersurface of degree $(\dim \mathcal{G} - r)$ in the total space of
$\omega_{Y}$. The ramification divisor of $X \to Y$ is the
intersection of this hypersurface with the section $\phi$,
which is the divisor
corresponding to the line bundle $\omega_{Y}^{\otimes(\dim \mathcal{G} - r)}.$
Thus the degree of the branch divisor will be $(\dim \mathcal{G} - r)(2g-2)$.
Combining all of this information, we see that the dimension of the
Prym is
\begin{eqnarray*}
\dim \Prym_{\mathbb{t}} (X)
& = & r (g - 1) + (r-(r-1)) \frac{(\dim \mathcal{G} - r)(2g-2)}{2} \\
& = & r(g-1) + (\dim \mathcal{G} - r)(g-1) \\
& = & \dim \mathcal{G}(g-1).
\end{eqnarray*}
By comparison, the dimension of the base space is
\begin{equation*}
\Sigma_{i=1}^{r} h^{0}(Y,\omega_{Y}^{d_i})
\end{equation*}
The sum of the degrees $d_{i}$ of the basic invariant polynomials of
$\mathfrak{g}$ is the dimension of a Borel subalgebra, $(\dim
\mathcal{G} + r)/2$. For $g>1$, Riemann-Roch gives
\begin{eqnarray*}
\Sigma_{i=1}^{r} h^{0}(Y,\omega_{Y}^{d_i})
& = & \Sigma_{i=1}^{r} (2d_{i}-1) (g-1) \\
& = & (\dim \mathcal{G} + r - r)(g-1) \\
& = & \dim \mathcal{G}(g-1).
\end{eqnarray*}
Which, as Hitchin said, ``somewhat miraculously'' turns out to be the same thing.
Markman [Ma] and Bottacin [B] generalized the Hitchin system by twisting
the line bundle $\omega_{Y}$ by an effective divisor $D$. The effect of
this is to create a family of integrable systems, parametrized by
the residue of the Higgs field $\phi$ at $D$. The base space of each
system is a fiber of the map
\begin{gather*}
B := \bigoplus_{i=1}^{r}H^{0}(Y,\omega_{Y}(D)^{\otimes d_i}) \\
\downarrow \\
\bar{B} := \text{the space of possible residues at } D
\end{gather*}
which sends the set of $r$ sections in $B$
to its set of residues at $D$. At each point of $D$, there are $r$ independent
coefficients, so the dimension of $\bar B$ is $r(\deg D)$. Thus the base space
of each system has dimension
\begin{eqnarray*}
\dim B - \dim \bar B
& = & \sum_{i=1}^{r}h^{0}(Y,\omega_{Y}(D)^{\otimes d_i}) - r(\deg D) \\
& = & \sum_{i=1}^{r}( d_{i}(2g-2 + \deg D) - (g-1)) -r(\deg D) \\
& = & (1/2)(\dim \mathcal{G} + r)(2g - 2 + \deg D) -r(g-1) -r(\deg D) \\
& = & (\dim \mathcal{G})(g-1) + \frac{\dim \mathcal{G} -r}{2} \deg D
\end{eqnarray*}
Markman showed that the generic fiber of this system is again isogenous to
$\Prym_{\mathfrak{t}}(X)$, where $X$ is a cameral cover of the base curve $Y$.
The construction of the cameral cover is similar to the case of the
Hitchin system, except that $\phi$ is a section of $ad(P) \otimes
\omega_{Y}(D)$. Thus the ramification divisor is
$(\omega_{Y}(D))^{\otimes(\dim \mathcal{G} -r)}$, and the dimension is
\begin{eqnarray*}
\dim \Prym_{\mathfrak{t}} (X)
& = & r (g - 1) + \frac{(\dim \mathcal{G} - r)(2g-2 +
\deg D)}{2} \\
& = & \dim \mathcal{G}(g-1) + \frac{(\dim \mathcal{G} -r)}{2} \deg D.
\end{eqnarray*}
Again, this is the same dimension as the base of the system.
\end{document} |
\begin{document}
\title{Floquet Theory for Quaternion-valued Differential Equations}
\begin{abstract}
\normalsize
This paper describes the Floquet theory for quaternion-valued differential equations (QDEs). The Floquet normal form of fundamental matrix for linear QDEs with periodic coefficients is presented and the stability of quaternionic periodic systems is accordingly studied. As an important application of Floquet theory, we give a discussion on the stability of quaternion-valued Hill's equation. Examples are presented to illustrate the proposed results.
\end{abstract}
\begin{keywords}
Floquet theory, periodic systems, quaternion, non-commutativity, Hill's equation.
\end{keywords}
\begin{msc}
34D08, 34B30, 20G20.
\end{msc}
\section{Introduction}\label{S1}
The theory of quaternion-valued differential equations (QDEs) has gained a prominent attention in recent years due to its applications in many fields, including spatial kinematic modelling and attitude dynamics \cite{chou1992quaternion,gupta1998linear}, fluid mechanics \cite{gibbon2002quaternionic,gibbon2006quaternions}, quantum mechanics \cite{alder1986quaternionic,adler1995quaternionic}, etc. A feature of quaternion skew field is that the multiplication of quaternion numbers is noncommutative, this property brings challenges to the study of QDEs. Therefore, although QDEs appear in many fields, the mathematical researches in QDEs are not so many. Leo and Ducati \cite{de2003solving} solved some simple second order quaternionic differential equations by using the real matrix representation of left/right acting quaternionic operators. Applying the topological degree methods, Campos and Mawhin \cite{campos2006periodic} initiated a study of the $T$-periodic solutions of quaternion-valued first order differential equations. Later, Wilczynski \cite{wilczynski2009quaternionic,wilczynski2012quaternionic} presented some sufficient conditions for the existence of at least two periodic solutions of the quaternionic Riccati equation and the existence of at least one periodic solutions of the quaternionic polynomial equations. The existence of periodic orbits, homoclinic loops, invariant tori for 1D autonomous homogeneous QDE $\dot{q}=a q^n, (n=2,3)$ was proposed by Gasull \emph{et al.} \cite{gasull2009one}. The study of Zhang \cite{zhang2011global} is devoted to the global struture of 1D quaternion Bernoulli equations. Recently, the basic theory and fundamental results of linear QDEs was established by Kou and Xia \cite{kouxialinear2018,kou2015linear2,xia2016algorithm}. They proved that the algebraic structure of the solutions to QDEs is different from the classical case. Moreover, for lack of basic theory such as fundamental theorem of algebra, Vieta's formulas of quaternions, it is difficult to solve QDEs. In \cite{kouxialinear2018,kou2015linear2,xia2016algorithm,cheng2018unified}, the authors proposed several new methods to construct the fundamental matrices of linear QDEs.
As a generalization, QDEs have many properties similar to ODEs. At the same time, for the relatively complicated algebraic structure of quaternion, one may encounter various new difficulties when studying QDEs.
\begin{enumerate}
\item Factorization theorem and Vieta's formulas (relations between the roots and the coefficients) for quaternionic polynomials are not valid (see e. g. \cite{eilenberg1944fundamental, serodio2001zeros,pogorui2004structure}).
\item A quaternion matrix usually has infinite number of eigenvalues. Besides, the set of all eigenvectors corresponding to a non-real eigenvalue is not a module (see e. g. \cite{zhang1997quaternions,rodman2014topics}).
\item The study of quaternion matrix equations is of intricacy (see e. g. \cite{wang2008common,wang2009ranks}).
\item Even the quaternionic polynomials are not "regular" (an analogue concept of holomorphic). This fact leads to noticeable difficulties for studying analytical properties of quaternion-valued functions (see e. g. \cite{sudbery1979quaternionic,wilczynski2009quaternionic}).
\end{enumerate}
Up to present, the theory of QDEs remains far from systemic. To the best of authors' knowledge, there was virtually nonexistent study about the stability theory of QDEs. Based on this fact, we are motivated to investigate the stability of the linear QDEs
\begin{equation}\label{homo linear systems}
\dot{x}=A(t)x
\end{equation}
where $A$ is a smooth $n\times n$ quaternion-matrix-valued function. In particular, we will focus on the important special cases where $A$ is a quaternionic constant or periodic quaternion-valued function. In the real-valued systems, the well-known Floquet theory indicates that the case where $A$ is a periodic matrix-valued function is reducible to the constant case (see e. g. \cite{chicone2006ordinary,hale2009ordinary}). Floquet theory is an effective tool for analyzing the periodic solutions and the stability of dynamic systems. Owing to its importance, Floquet theory has been extended in different directions. Johnson \cite{johnson1980floquet} generalized the Floquet theory to the almost-periodic systems. In \cite{chow1994floquet,kuchment1993floquet,kuchment1994behavior}, the authors extended the Floquet theory to the partial differential equations. Recently, the Floquet theory has been extensively explored for dynamic systems on time scales (see e. g. \cite{ahlbrandt2003floquet,dacunha2011unified,agarwal2014floquet,adivar2016floquet}).
As a continuation of \cite{kouxialinear2018,kou2015linear2,xia2016algorithm}, we generalize the Floquet theory to QDEs in this paper. Specifically, the contributions of this paper are summarized as follows.
\begin{enumerate}
\item We show that the stability of constant coefficient homogeneous linear QDEs is determined by the standard eigenvalues of its coefficient matrix.
\item Floquet normal form of the fundamental matrix for linear QDEs with periodic coefficients is presented.
\item The monodromy matrix, characteristic multiplier and characteristic exponent for QDEs are defined. Moreover, the stability of quaternionic periodic systems is discussed.
\item We propose some sufficient conditions for the existence of periodic solution of quaternionic periodic systems.
\item Without question, there are some results of ODEs are inevitably invalid for QDEs. We will discuss some of these results. Specifically, we will discuss the stability of quaternion-valued Hill's equation.
\end{enumerate}
The rest of the paper is organized as follow. In Section \ref{S2}, some basic concepts of quaternion algebra are reviewed. Besides, several lemmas of quaternion matrices are derived. Section \ref{S3} is devoted to the stability of constant coefficient linear homogeneous QDEs. In Section \ref{S4}, we establish the Floquet theory for QDEs. Specifically, Floquet normal form of the fundamental matrix for quaternionic periodic systems is presented. Some important concepts such as monodromy matrix, characteristic multiplier and characteristic exponent for QDEs are defined and the stability of quaternionic periodic systems is accordingly studied. The stability of quaternion-valued Hill's equation is discussed in Section \ref{S5}. Finally, conclusions are drawn at the end of the paper.
\section{Preliminaries}\label{S2}
\subsection{Quaternion algebra}\label{S2.1}
The quaternions were first described by Hamilton in 1843 \cite{sudbery1979quaternionic}. The algebra of quaternions is denoted by
\begin{equation*}
\H:= \{q=q_0+q_1 \bm {i} +q_2 \qj+ q_3\qk\}
\end{equation*}
where $q_0,q_1,q_2,q_3$ are real numbers and the elements $\qi$, $\qj$ and $\qk$ obey Hamilton's multiplication rules:
\begin{equation*}
\qi\qj=-\qj\qi=\qk,~~\qj\qk=-\qk\qj=\qi,~~\qk\qi=-\qi\qk=\qj,~~\qi^2=\qj^2=\qi\qj\qk=-1.
\end{equation*}
For every quaternion $q=q_0+ \bm {i}q_1+ \bm{j}q_2+ \bm{k}q_3$, the scalar and vector parts of $q$, are defined as $\mathcal{R}(q)=q_0$ and $\mathcal{V}(q)=q_1 \bm {i} +q_2 \qj+ q_3\qk$, respectively. If $q= \mathcal{V}(q)$, then $q$ is called pure imaginary quaternion.
The quaternion conjugate is defined by $\overline{q}= q_0- \bm {i}q_1- \bm{j}q_2- \bm{k}q_3$, and the norm $|q|$ of $q$ defined as
$|q|^2={q\overline{q}}={\overline{q}q}=\sum_{m=0}^{m=3}{q_m^2}$.
Using the conjugate and norm of $q$, one can define the inverse of $q\in\H\backslash\{0\}$ by $q^{-1}=\overline{q}/|q|^2$.
For each fixed unit pure imaginary quaternion $\bm \varsigma$, the quaternion has subset $\mathbb{C}_{\bm \varsigma}:=\{a+b \bm \varsigma :a,b\in\mathbb{R}\}$. The complex number field $\mathbb{C}$ can be viewed as a subset of $\H$ since it is isomorphic to $\mathbb{C}_{\qi}$. Therefore we will denote $\mathbb{C}_{\qi}$ by $\mathbb{C}$ for simplicity.
\subsection{Matrices of quaternions}\label{S2.2}
The quaternion exponential function $ \exp(A)$ for $A\in \H^{n\times n}$ is defined by means of an infinite series as
\begin{equation*}
\exp({A}):=\sum_{n=0}^\infty \frac{A^n}{n!}.
\end{equation*}
When $n=1$ and $A=q\in \H$, analogous to the complex case one may derive a closed-form representation:
\begin{equation*}
e^{q}=\exp(q)= e^{q_0}\left(\cos| \mathcal{V}(q)|+\frac{ \mathcal{V}(q)}{| \mathcal{V}(q)|}\sin| \mathcal{V}(q)|\right).
\end{equation*}
Every quaternion
matrix $A\in\H^{m\times n}$
can be expressed uniquely in the form of
\begin{equation*}
A=A_1+ A_2\qj, ~~~\text{where}~~ A_1, A_2 \in \C^{m\times n}.
\end{equation*}
Then the \emph{complex adjoint matrix} \cite{aslaksen1996quaternionic,zhang1997quaternions} of the quaternion matrix $A$ is defined as
\begin{equation}\label{complex adjoint matrix}
\chi_A=\begin{pmatrix}
A_1&A_2\\
-\overline{A_2} & \overline{A_1}
\end{pmatrix}.
\end{equation}
By using the complex adjoint matrix, the $q$-determinant of $A$ is defined by
\begin{equation}\label{q-det}
\abs{A}_q:=\abs{\chi_A},
\end{equation}
where $\abs{\cdot}$ is the conventional determinant for complex matrices.
By direct computations, it is easy to see that $\abs{A}_q=\abs{A}^2$ when $A$ is a complex matrix.
From \cite{kouxialinear2018}, we know that $\H^n$ over the division ring $\H$ is a right $\H$-module (a similar concept to linear space) and $\bmeta_1,\bmeta_2,\cdot\cdot\cdot,\bmeta_k\in \H^n$ are right linearly independent if
\begin{equation*}
\bmeta_1\alpha_1+\bmeta_2\alpha_2+\cdot\cdot\cdot+\bmeta_k\alpha_k=0,\alpha_i\in \H~~ \text{implies that} ~~\alpha_1=\alpha_2=\cdot\cdot\cdot=\alpha_k=0.
\end{equation*}
Let $A\in\H^{n\times n}$, a nonzero $\bmeta\in \H^{n\times 1}$ is said to be a \emph{right eigenvector} of $A$ corresponding to the \emph{right eigenvalue} $\lambda\in \H$ provided that
\begin{equation*}
A\bmeta=\bmeta \lambda
\end{equation*}
holds. A matrix $A_1$ is said to be similar to a matrix $A_2$ if $A_2=S^{-1}AS$ for some nonsingular matrix $S$. In particular, we say that two quaternions $p,q$ are similar if $p=\alpha^{-1}q\alpha$ for some nonzero quaternion $\alpha$. By Theorem 2.2 in \cite{zhang1997quaternions}, we know that the similarity of quaternions defines an equivalence relation. The set
\begin{equation*}
[q]:= \{ p= \alpha^{-1}q\alpha: \alpha = \mathbb{H}\setminus \{0\}\}
\end{equation*}
is called an equivalence class of $q$. It is easy to see that $[q]$ can also be recognized by
\begin{equation*}
[q]:= \{ p\in \mathbb{H}: \mathcal{R}(p)=\mathcal{R}(q), \abs{\mathcal{V}(p)}=\abs{\mathcal{V}(q)}\}.
\end{equation*}
It follows that any equivalence class $[q]$ has one and only one complex-valued element with nonnegative imaginary part.
We recall some basic results about quaternion matrices which can be found, for instance, in \cite{zhang1997quaternions,baker1999right,rodman2014topics}.
\begin{theorem}\label{thm of q matrix}
Let $A\in\H^{n\times n}$, then the following statements hold.
\begin{enumerate}
\item $A$ has exactly $n$ right eigenvalues (including multiplicity) which are complex numbers with nonnegative imaginary parts. These eigenvalues are called standard eigenvalues of $A$.
\item If $A$ is a complex matrix and its eigenvalues are $\lambda_1=\alpha_1+ \bm {i}\beta_1, \lambda_2=\alpha_2+ \bm {i}\beta_2, \cdots,\lambda_n=\alpha_n+ \bm {i}\beta_n$ (repeated according to their multiplicity). Then the standard eigenvalues of $A$ are $\widetilde{\lambda}_1=\alpha_1+ \bm {i}\abs{\beta_1}, \widetilde{\lambda}_2=\alpha_2+ \bm {i}\abs{\beta_2}, \cdots, \widetilde{\lambda}_n=\alpha_n+ \bm {i}\abs{\beta_n}$. In particular, $\abs{\widetilde{\lambda}_j}=\abs{{\lambda}_j}$ for $j=1,2,\cdots,n$.
\item $A$ is invertible if and only if $\chi_A$ is invertible.
\item If $A$ is (upper or lower) triangular, then the only eigenvalues are the diagonal elements (and the quaternions similar to them).
\end{enumerate}
\end{theorem}
Let $\Omega$ be the totality of all $2n\times 2n$ partitioned complex matrices which have form of (\ref{complex adjoint matrix}). It has been shown in \cite{zhang2001jordan,rodman2014topics} that $\Omega$ is closed under addition, multiplication and inversion. Furthermore, each $A\in \H^{n\times n}$ has a Jordan form in $\C^{n\times n}$.
\begin{lemma}\label{expmultiply}
\cite{zhang2001jordan} Let $A,B\in \H^{n\times n}$. Then $\chi_A+\chi_B =\chi_{A+B}\in \Omega$ and $\chi_A\chi_B = \chi_{AB} \in \Omega$. Moreover, if $A$ is invertible, then $\chi_A^{-1}=\chi_{A^{-1}} \in \Omega$.
\end{lemma}
\begin{lemma}\label{Jordanform}
\cite{zhang2001jordan,rodman2014topics} Let $A \in \H^{n\times n}$. Then there exists a $P\in \H^{n\times n}$ such that
\begin{equation*}
\chi_P^{-1}\chi_A\chi_P=\begin{pmatrix}
J&0\\
0 & \overline{J}
\end{pmatrix}
\end{equation*}
is a Jordan canonical form of $\chi_A$, where and $J\in\C^{n\times n}$ has all its diagonal entries with nonnegative imaginary parts. Consequently, $P^{-1}A P=J$ is a Jordan canonical form of $A$ in $\C^{n\times n}$.
\end{lemma}
\begin{remark}
The diagonal entries of $J$ are actually the standard eigenvalues of $A$.
\end{remark}
If $\lambda$ is a standard eigenvalue of $A \in \H^{n\times n}$, its algebraic multiplicity is defined by the number of its occurrences in the Jordan canonical form $J$. Since the totality of solutions for
$A\bmeta=\bmeta \lambda$ is not a $\H$-module. Thus we could not use dimensionality of 'eigenspace' to define the geometric multiplicity for $\lambda$. Note that $\lambda$ is a eigenvalue of $\chi_A$ and motivated by Lemma \ref{Jordanform}, we may define the geometric multiplicity for the standard eigenvalues of quaternion matrices as follows.
\begin{definition}
Let $\lambda$ be a standard eigenvalue of $A \in \H^{n\times n}$, the geometric multiplicity for $\lambda$ is defined as the dimensionality of the (complex) linear space $\{\bx\in \C^n : (J-\lambda I)\bx=0\}$, where $J$ is the Jordan canonical form of $A$ in $\C^{n\times n}$.
\end{definition}
Employing above lemmas, it is not difficult to verify that $\Omega$ is also closed under exponential.
\begin{lemma}\label{exp q-matrix eqn}
Let $A,C\in \H^{n\times n}$, where $C$ is invertible. Then $e^{\chi_A}=\chi_{e^A}\in \Omega$ and there is a $B \in \H^{n\times n}$ such that $e^B=C$.
\end{lemma}
\begin{proof}
By Lemma \ref{Jordanform}, there is a $P\in \H^{n\times n}$ such that $P^{-1}A P=J \in\C^{n\times n}$. Observe that $\exp({\overline{J}})=\overline{\exp(J)}$ and therefore
\begin{equation*}
\begin{split}
\chi_P^{-1}e^{\chi_A}\chi_P & =e^{\chi_P^{-1}\chi_A\chi_P}\\
& =e^{\begin{pmatrix}
J&0\\
0 & \overline{J}
\end{pmatrix}}=\begin{pmatrix}
e^J&0\\
0 & e^{\overline{J}}
\end{pmatrix}=\chi_{e^J}.
\end{split}
\end{equation*}
Hence $e^{\chi_A}=\chi_P\chi_{e^J}\chi_P^{-1}=\chi_{Pe^JP^{-1}}=\chi_{e^{PJP^{-1}}}=\chi_{e^A}$.
For quaternion matrix $C$, there is a $S \in \H^{n\times n}$ such that $S^{-1}CS=K\in \C^{n\times n}$. Since $C$ is invertible, then $K$ is nonsingular. Moreover, there exists a complex matrix $D$ such that $K=e^D$ by Theorem 2.82 in \cite{chicone2006ordinary}. Therefore
\begin{equation*}
\begin{split}
\chi_C & =\chi_S\chi_K\chi_S^{-1} \\
& =\chi_S\chi_{e^D}\chi_S^{-1}=\chi_S e^{\chi_D}\chi_S^{-1}=e^{\chi_S \chi_D \chi_S^{-1}}=e^{\chi_{SDS^{-1}}}=\chi_{e^{SDS^{-1}}}.
\end{split}
\end{equation*}
Thus
$C=e^{SDS^{-1}}$. Set $B=SDS^{-1}$, we complete the proof.
\end{proof}
By Lemma \ref{exp q-matrix eqn} and Theorem \ref{thm of q matrix}, we obtain the following spectral mapping theorem.
\begin{theorem}\label{spectral exponetial map}
If $A\in\H^{n\times n}$ and $\lambda_1,\lambda_2,\cdots,\lambda_n$ are the standard eigenvalues of $A$ repeated according to their multiplicity, then $e^{\widetilde{\lambda}_1}, e^{\widetilde{\lambda}_2}, \cdots, e^{\widetilde{\lambda}_n}$ are the standard eigenvalues of $e^A$, where $\widetilde{\lambda}_j$ ($j=1,2,\cdots,n$) is defined by
\begin{equation*}
\widetilde{\lambda}_j:=
\begin{cases}
\lambda_j,& \mathrm{if}~ e^{\lambda_j} ~\mathrm{has~ nonnegative ~imaginary~ part};\\
\overline{\lambda_j}, &\mathrm{otherwise}.
\end{cases}
\end{equation*}
\end{theorem}
\begin{proof}
If $\lambda_1,\lambda_2,\cdots,\lambda_n$ are the standard eigenvalues of $A$, then
$\lambda_1,\lambda_2,\cdots,\lambda_n,\overline{\lambda_1},\overline{\lambda_2},\cdots,\overline{\lambda_n}$ are the eigenvalues of $\chi_A$. By spectral mapping theorem of complex-valued matrix, we conclude that $\sigma=\{e^{\lambda_1},e^{\lambda_2},\cdots,e^{\lambda_n},e^{\overline{\lambda_1}},e^{\overline{\lambda_2}},\cdots,e^{\overline{\lambda_n}}\}$ is the spectrum of $e^{\chi_A}$. Note that $e^{\chi_A}=\chi_{e^A}$ by Lemma
\ref{exp q-matrix eqn}, we know that
$\sigma$ is the spectrum of $\chi_{e^A}$. From Theorem \ref{thm of q matrix}, all elements of $\sigma$ are complex-valued eigenvalues of $e^A$; in particular, the complex numbers possessing the nonnegative imaginary parts in $\sigma$ are the standard eigenvalues of $e^A$.
\end{proof}
\section{Stability of linear homogeneous QDEs with constant coefficients}\label{S3}
Analogous to ODEs, we can define the concept of stability (in Lyapunov sense) for QDEs.
\begin{definition}
Let $\bfunc: [t_0,\infty)\times\H ^n\to \H ^n$. Consider $\dot{\bx}=f(t,\bx), ~t\in [t_0,\infty) $. The solution $\bphi(t,t_0,\bx_0)$ (satisfying initial condition $\bx(t_0)=\bx_0$) is called stable if for any $\epsilon>0$, there is a $\delta>0$ such that $\norm{\bx-\bx_0}<\delta$ implies $\norm{\bphi(t,t_0,\bx)-\bphi(t,t_0,\bx_0)}<\varepsilon$ for all $t\geq t_0$. The solution $\bphi(t,t_0,\bx_0)$ is called asymptotically stable if there is a $\delta>0$ such that $\lim_{t\to \infty}\norm{\bphi(t,t_0,\bx)-\bphi(t,t_0,\bx_0)}=0$ whenever $\norm{\bx-\bx_0}<\delta$.
\end{definition}
For any $A=(a_{ij})_{n\times n}\in\H^{n\times n}$ and $\bmeta=(\eta_1,\eta_2,\cdots,\eta_n)^T\in \H^n$, the norm of $A$ and $\bmeta$ are respectively defined by
\begin{equation*}
\norm{A}=\sum_{i,j=1}^n | a_{ij}|,~~~\norm{\bmeta}=\sum_{k=1}^n |\eta_k|.
\end{equation*}
The norm $\norm{\cdot}$ defined for $A$ is a matrix norm. It is easy to verify that for any $A,B\in \H^{n\times n}$, the submultiplicativity holds, that is
\begin{equation*}
\norm{AB}\leq \norm{A} \norm{B}.
\end{equation*}
By similar arguments to Theorem 1.1 in \cite{afanasiev2013mathematical}, we see that the stability of zero solution of (\ref{homo linear systems}) implies the stability of any other solutions. Thus it is permissible to simply say that system (\ref{homo linear systems}) is stable (or unstable).
\begin{theorem}\label{judging theorem}
Let $M(t)$ be a fundamental matrix of (\ref{homo linear systems}). Then the system
(\ref{homo linear systems}) is stable if and only if $\norm{M(t)}$ is bounded . The system (\ref{homo linear systems}) is asymptotically stable if and only if $\lim_{t\to \infty}\norm{M(t)}=0$.
\end{theorem}
\begin{proof}
Let $L$ be an upper bound for $\norm{M(t)}$, $L_1=\norm{M^{-1}(t_0)}$ and $\bphi(t,t_0,\bxi)$ be the solution of (\ref{homo linear systems}) with $\bphi(t_0,t_0,\bxi)=\bxi=(\xi_1,\xi_2,\cdots,\xi_n)^T$. Then $\bphi(t,t_0,\bxi)=M(t)M^{-1}(t_0) \bxi$.
For any $\epsilon>0$, let $\delta=\frac{\epsilon}{L L_1}$, then
$\norm{\bphi(t,t_0,\bxi)-0}=\norm{M(t)M^{-1}(t_0) \bxi}\leq L L_1 \norm{\bxi}<\epsilon$ whenever $\norm{\bxi}<\delta$.
If for any $\epsilon > 0$ there is a $\delta>0$ such that
$\norm{M(t)M^{-1}(t_0) \bxi }<\epsilon$ for $\norm{\bxi} <\delta$. Then
\begin{equation*}
\begin{split}
\norm{M(t)M^{-1}(t_0)} & =n \norm{M(t)M^{-1}(t_0)(\frac{1}{n}, \frac{1}{n}, \cdots, \frac{1}{n})^T} \\
&\leq n \sup_{\norm{\bmeta}\leq 1} \norm{M(t)M^{-1}(t_0)\bmeta}\\
&= n \sup_{\norm{\bxi}\leq \delta} \norm{M(t)M^{-1}(t_0) \delta^{-1} \bxi}\\
&< n \epsilon \delta^{-1}
\end{split}
\end{equation*}
Therefore $\norm{M(t)}< n \epsilon \delta^{-1} L_1^{-1}$ is bounded.
If $\lim_{t\to \infty}\norm{M(t)}=0$. Then $\norm{\bphi(t,t_0,\bxi)-0}=\norm{M(t)M^{-1}(t_0) \bxi}=\norm{M(t)} L_1 \norm{\bxi}$ tends to $0$ as $t\to \infty$ whenever $\norm{\bxi}<\delta$. Conversely, it is easy to see that if the zero solution is asymptotically stable, then $\norm{M(t)}$ has to be convergent to $0$ as $t\to \infty$.
\end{proof}
By using the Jordan canonical form of $A \in \H^{n\times n}$, we can obtain a matrix representation for $e^{tA}$. Let $P$ be a quaternion matrix such that $P^{-1}AP=J \in C^{n\times n}$, then
$P^{-1}e^{tA} P = e^{tP^{-1}AP}=e^{tJ}$. Let $\lambda_1, \lambda_2, \cdots, \lambda_k$ be the distinct standard eigenvalues of $A$ that correspond to multiplicities $n_1, n_2, \cdots, n_k$, respectively. Then $J=\mathrm{diag}(J_1, J_2, \cdots, J_k)$ where $J_i =\lambda_i I + N_i$ with $N_i^{n_i}=0$. Thus we have that
\begin{equation*}
e^{tJ_i}=e^{t(\lambda_i I+N_i)}=e^{t\lambda_i}e^{tN_i}=e^{t\lambda_1}\left(I+t N_i+\frac{t^2}{2!}N_i^2+ \cdots + \frac{t^{n_i-1}}{(n_i-1)!} N_i^{n_i-1}\right).
\end{equation*}
Note that $e^{tJ}=\mathrm{diag}(e^{t J_1}, e^{t J_2}, \cdots, e^{t J_k})$, then we obtain an explicit matrix representation for $e^{tA}=Pe^{tJ}P^{-1}$. Moreover, this representation has a similar form with the cases where $A$ is a real or complex matrix. Hence by similar arguments to Theorem 4.2 in \cite{hale2009ordinary}, we have the following theorem.
\begin{theorem}\label{stablility of constant system}
The system $ \dot{\bx}=A\bx$
\begin{enumerate}
\item is stable if and only if the standard eigenvalues of $A$ all have non-positive real parts and the algebraic multiplicity equals the geometric multiplicity of each standard eigenvalue with zero real part;
\item is asymptotically stable if and only if all the standard eigenvalues of $A$ have negative real parts.
\end{enumerate}
\end{theorem}
\begin{remark}
Since any two similar quaternions possess the same scalar part, thus the phrase "standard" in Theorem \ref{stablility of constant system} can be removed.
\end{remark}
\begin{example}\label{ex1}
Consider the system $\dot{\bx}=A\bx$, where
\begin{equation*}
A=\begin{pmatrix}
\qi&\qj&\qj\\
\qk&1&\qk\\
0&0&1
\end{pmatrix}
\end{equation*}
The principal fundamental matrix at $t=0$ ($M(0)=I$) is given by
\begin{equation*}\label{fundamental matrix 1}
M(t) = \begin{pmatrix}
\frac{1-\qi}{2}+ \frac{1+\qi}{2}\gamma_1& \frac{\qk-\qj}{2}+\gamma_2& \bm{j}\gamma_3+\gamma_4-e^t\\
\frac{\qj-\qk}{2}+\frac{\qk-\qj}{2}\gamma_1 &\frac{1-\qi}{2}-\qj\gamma_2& \bm {i}\gamma_3-\qj\gamma_4-(1-\qj-\qk)e^t\\
0&0&e^{ t}
\end{pmatrix},
\end{equation*}
where $\gamma_1=e^{(1+\qi)t}$, $\gamma_2= \frac{\qj-\qk}{2}e^{(1-\qi)t}$, $\gamma_3=\frac{\qk-1-\qi-\qj}{2} $, $\gamma_4=e^{(1+\qi)t}\frac{1-\qi+\qj-\qk}{2}$. By straightforward computations, we have the result shown in Table \ref{table-ex-1}.
\begin{table}[ht]
\centering
\tabcolsep7.5pt
\caption{Description of Example \ref{ex1}}{
\begin{tabular}{ |@{}c | c | c@{}|}
\hline
Fundamental & The standard & \multirow{2}*{\centering Stability~}\\
matrix & eigenvalues of $A$ & \\ \hline
\multirow{3}*{\centering $~\displaystyle\lim_{t\to \infty}\norm{M(t)}=\infty$ } & $\lambda_1=0$, $\mathcal{R}(\lambda_1)=0$; & \multirow{3}*{\centering unstable~} \\
& $\lambda_2=1$, $\mathcal{R}(\lambda_2)>0$; & \\
& $\lambda_3=1+\qi$, $\mathcal{R}(\lambda_3)>0$ & \\ \hline
\end{tabular}}
\label{table-ex-1}
\end{table}
\end{example}
\begin{example}\label{ex2}
Consider the system $\dot{\bx}=A\bx$, where
\begin{equation*}
A= \begin{pmatrix}
\qi&1&0\\
0&\qj&0\\
0&1&\qk
\end{pmatrix}
\end{equation*}
The principal fundamental matrix at $t=0$ is given by
\begin{equation*}\label{fundamental matrix 2}
M(t) = \begin{pmatrix}
e^{ \bm {i}t}& \frac{t}{2} \left(e^{ \bm {i}t}- \bm{k}e^{- \bm {i}t} \right)+\frac{1+\qk}{2}\sin t&0\\
0&e^{ \bm{j}t}&0\\
0& \frac{t}{2} \left(e^{ \bm{j}t}+ \bm {i}e^{ \bm{j}t} \right)+\frac{1-\qi}{2}\sin t&e^{ \bm{k}t}
\end{pmatrix}.
\end{equation*}
By straightforward computations, we have the result shown in Table \ref{table-ex-2}.
\begin{table}[ht]
\centering
\caption{Description of Example \ref{ex2}}{
\begin{tabular}{ |@{} c | c | c@{}| }
\hline
~Fundamental & The standard & \multirow{2}*{\centering Stability~}\\
matrix & eigenvalues of $A$ & \\ \hline
$\displaystyle \norm{M(t)}$ is & $\lambda_1=\lambda_2=\lambda_3=\qi$; & \multirow{2}*{\centering unstable~} \\
unbounded & $\mathcal{R}(\lambda_1)=0$; & \\
\hline
\end{tabular}}
\label{table-ex-2}
\end{table}
Notice that the the standard eigenvalue $\lambda=\qi$ has zero real part, we need to show its algebraic multiplicity is less than its algebraic multiplicity $3$. By some basic calculations, we find a quaternion matrix
\begin{equation*}
P= \begin{pmatrix}
-1+\qi&-2 \qi&-\qk\\
0 & 0 &-2\qi-2\qj\\
0&1-\qi-\qj-\qk&-1+\qi
\end{pmatrix}
\end{equation*}
such that
\begin{equation*}
P^{-1}AP= \begin{pmatrix}
\qi&0&0\\
0& \bm {i}& 1\\
0&0& \qi
\end{pmatrix}.
\end{equation*}
This implies that the algebraic multiplicity of $\lambda=\qi$ is $2$.
\end{example}
\begin{example}\label{ex3}
Consider the system $\dot{\bx}=A\bx$, where
\begin{equation*}
A= \begin{pmatrix}
-1+2\qj-\qk&-1+2\qi+\qj\\
-i+\qj+2\qk&-2-\qi+\qk
\end{pmatrix}
\end{equation*}
The principal fundamental matrix $ M(t) $ at $t=0$ is given by
\begin{equation*}\label{fundamental matrix 3}
\begin{pmatrix}
\frac{3+\qi+\qj-\qk}{6}+\frac{2-\qj-\qk}{6}\gamma_1+\frac{1-\qi+2\qk}{6}\gamma_2& \frac{-1+3\qi-\qj-\qk}{6}+\frac{2\qi+\qj-\qk}{6}\gamma_1+\frac{1-\qi+2\qk}{6}\gamma_2 \\
\frac{-1-3\qi+\qj+\qk}{6}+\frac{2\qi+\qj-\qk}{6}\gamma_1+\frac{1+\qi-2\qj}{6}\gamma_2& \frac{3- \qi-\qj-\qk}{6}+\frac{2 +\qj+\qk}{6}\gamma_1+\frac{1+\qi-2\qj}{6}\gamma_2
\end{pmatrix}
\end{equation*}
where $\gamma_1=e^{-(3+3\qi)t}$ and $\gamma_2=e^{(3\qi-3)t}$.
By straightforward computations, we have the result shown in Table \ref{table-ex-3}.
\begin{table}[ht]
\centering
\caption{Description of Example \ref{ex3}}{
\begin{tabular}{| @{}c | c | c@{} |}
\hline
~Fundamental & The standard & \multirow{2}*{\centering Stability}\\
matrix & eigenvalues of $A$ & \\ \hline
$\displaystyle \norm{M(t)}$ & $\lambda_1=0$, $\mathcal{R}(\lambda_1)=0$; & stable but not \\
is bounded & $ \lambda_2 =-3+3\qi$, $\mathcal{R}(\lambda_1)<0$ & asymptotically~ \\ \hline
\end{tabular}}
\label{table-ex-3}
\end{table}
\end{example}
\begin{example}\label{ex4}
Consider the system $\dot{\bx}=A\bx$, where
\begin{equation*}
A= \begin{pmatrix}
-1+\qi-\qk&-\qi\\
1+\qi-\qj+\qk&-2-\qk
\end{pmatrix}
\end{equation*}
The principal fundamental matrix $ M(t) $ at $t=0$ is given by
\begin{equation*}\label{fundamental matrix 4}
\begin{pmatrix}
(\frac{1-\qi}{2}e^{ \bm {i}t}+\frac{\qk-\qj}{2}e^{- \bm {i}t})e^{-2t} + \frac{1+\qi+\qj-\qk}{2}e^{-t}& \frac{1-\qi}{2}e^{(\qi-2)t}+\frac{1-\qi}{2}e^{-t} \\
(1+\qi)(1-e^{(\qj-1)t})e^{-t} & \frac{1-\qi-\qj-\qk}{2}e^{-t}+\frac{1+\qi+\qj+\qk}{2}e^{(\qi-2)t}
\end{pmatrix}
\end{equation*}
By straightforward computations, we have the result shown in Table \ref{table-ex-4}.
\begin{table}[ht]
\centering
\caption{Description of Example \ref{ex4}}{
\begin{tabular}{ |@{} c | c | c@{} |}
\hline
Fundamental & The standard & \multirow{2}*{\centering Stability}\\
matrix & eigenvalues of $A$ & \\ \hline
\multirow{2}*{\centering $~\displaystyle\lim_{t\to \infty}\norm{M(t)}=0$ } & $\lambda_1=-1$, $\mathcal{R}(\lambda_1)<0$; & asymptotically~ \\
& $ \lambda_2 =-1+ \frac{\qi}{2}$, $\mathcal{R}(\lambda_1)<0$ & stable \\ \hline
\end{tabular}}
\label{table-ex-4}
\end{table}
\end{example}
\section{Floquet theory for QDEs}\label{S4}
We consider the quaternionic periodic systems
\begin{equation}\label{q-periodic-systems}
\dot{\bx}=A(t) \bx
\end{equation}
where $A(t)$ is a $T$-periodic continuous quaternion-matrix-valued function. The following Floquet's theorem gives a canonical form for fundamental matrices of (\ref{q-periodic-systems}).
\begin{theorem}
If $M(t)$ is a fundamental matrix of (\ref{q-periodic-systems}). Then
\begin{equation*}
M(t+T)=M(t)M^{-1}(0)M(T).
\end{equation*}
In addition, it has the form
\begin{equation}\label{floquetnormalform}
M(t)=P(t)e^{tB}
\end{equation}
where P(t) is a $T$-periodic quaternion-matrix-valued function and $B$ satisfying
\begin{equation*}
e^{TB}=M^{-1}(0)M(T).
\end{equation*}
\end{theorem}
\begin{proof}
Since $M(t)$ is a fundamental matrix of (\ref{q-periodic-systems}) and $A(t+T)=A(t)$, then
\begin{equation*}
\dot{M}(t+T)=A(t+T)M(t+T)=A(t)M(t+T).
\end{equation*}
That means $M(t+T)$ is also a fundamental matrix. Therefore, there is a nonsingular quaternion matrix $C$ such that $M(t+T)=M(t)C$. By Lemma \ref{exp q-matrix eqn}, there is a quaternion matrix $B$ such that $C=e^{TB}$. Let $P(t):=M(t)e^{-tB}$, then
\begin{equation*}
P(t+T)=M(t+T)e^{-TB-tB}=M(t)C e^{TB}e^{-tB}=M(t)e^{-tB}=P(t)
\end{equation*}
and $M(t)=P(t)e^{tB}$. By letting $t=0$, we have $ e^{TB}=C=M^{-1}(0)M(T)$ which completes the proof.
\end{proof}
\begin{remark}
In the above proof, we used the fact that if $A_1, A_2 \in \H^{n\times n}$ are commutable then $e^{A_1}e^{A_2}=e^{A_1+A_2}$. We know that this assertion is true for complex matrices. We now verify that this result is also valid for quaternion matrices.
If $A_1, A_2$ are commutable, so are $\chi_{A_1}$ $\chi_{A_2}$. By applying Lemma \ref{expmultiply} and \ref{exp q-matrix eqn}, we have that
\begin{equation*}
\chi_{e^{A_1}e^{A_2}}=\chi_{e^{A_1}}\chi_{e^{A_2}}=e^{\chi_{A_1}}e^{\chi_{A_2}}=e^{\chi_{A_1}+\chi_{A_2}}=e^{\chi_{(A_1+A_2)}}=\chi_{e^{A_1+A_2}}.
\end{equation*}
It follows that $e^{A_1}e^{A_2}=e^{A_1+A_2}$.
\end{remark}
\begin{corollary}\label{monodromy matrix}
Suppose that $M_1(t)$, $M_2(t)$ are fundamental matrices of (\ref{q-periodic-systems}) and $e^{TB_1}=M_1^{-1}(0)M_1(T)$, $e^{TB_2}=M_2^{-1}(0)M_2(T)$. Then $e^{TB_1}$, $e^{TB_2}$ are similar and therefore they have the same standard eigenvalues.
\end{corollary}
\begin{proof}
Let $M_0(t)$ be the fundamental matrix such that $M_0(0)=I$, then $M_1(t)=M_0(t)M_1(0)$ and $M_2(t)=M_0(t)M_2(0)$ for every $t\in \mathbb{R}$. Therefore $M_1(T)M_1^{-1}(0)=M_2(T)M_2^{-1}(0)=M_0(T)$. Note that both $M_1^{-1}(0)M_1(T)$ and $M_2^{-1}(0)M_2(T)$ are similar with $M_0(T)$. Thus $e^{TB_1}$, $e^{TB_2}$ are similar and they possess the same standard eigenvalues.
\end{proof}
The representation (\ref{floquetnormalform}) is called a Floquet normal form for the fundamental matrix $M(t)$. From this normal form, we accordingly define several concepts for quaternionic periodic system (\ref{q-periodic-systems}) as follows.
\begin{itemize}
\item For any fundamental matrix $M(t)$, $ e^{TB}=M^{-1}(0)M(T)$ is called a monodromy matrix of (\ref{q-periodic-systems}). By Corollary \ref{monodromy matrix}, we see that any two monodromy matrices are similar.
\item The standard eigenvalues of any monodromy matrix are called characteristic multipliers of (\ref{q-periodic-systems}). The totality of characteristic multipliers is denoted by ${CM}$.
\item A complex number $\mu$ is called a characteristic exponent of (\ref{q-periodic-systems}), if $\rho$ is a characteristic multiplier and $e^{\mu T}=\rho$. The totality of characteristic exponents is denoted by ${CE}$.
\end{itemize}
\begin{theorem}\label{eigenvalue of B}
Consider system (\ref{q-periodic-systems}), suppose that $M(t)=P(t)e^{tB}$ is a Floquet norm form for the fundamental matrix $M(t)$. Let $\mu_1,\mu_2,\cdots,\mu_n$ be the standard eigenvalues of $B$. Then $\widetilde{\mu}_1,\widetilde{\mu}_1,\cdots,\widetilde{\mu}_n$ are characteristic exponents, where $\widetilde{\mu}_j$ ($j=1,2,\cdots,n$) is defined by
\begin{equation*}
\widetilde{\mu}_j:=
\begin{cases}
\mu_j,& \mathrm{if}~ e^{\mu_j T} ~\mathrm{has~ nonnegative ~imaginary~ part};\\
\overline{\mu_j}, &\mathrm{otherwise}.
\end{cases}
\end{equation*}
If $\mu$ is a characteristic exponent of (\ref{q-periodic-systems}), then there exists $1\leq k\leq n$ such that $\{e^{\mu T}\}\cap \{e^{\mu_k T}, e^{ \overline{\mu_k} T}\}\neq \emptyset$ and $\mathcal{R}(\mu)=\mathcal{R}(\mu_k)$.
\end{theorem}
\begin{proof}
If $\mu_1,\mu_2,\cdots,\mu_n$ are the standard eigenvalues of $B$, from Theorem \ref{spectral exponetial map}, $e^{\widetilde{\mu}_jT}$ ($j=1,2,\cdots,n$) is a standard eigenvalue of $e^{TB}$. That is, $\{e^{\widetilde{\mu}_jT} :j=1,2,\cdots,n\}={CM}$. Therefore $\widetilde{\mu}_1,\widetilde{\mu}_1,\cdots,\widetilde{\mu}_n$ are characteristic exponents. If $\mu$ is a characteristic exponent, then $\rho=e^{\mu T}$ is a standard eigenvalue of $e^{TB}$. Hence there exists $1\leq k\leq n$, such that $\rho=e^{\widetilde{\mu}_k T}$. It follows that
\begin{equation*}
\{e^{\mu T}\}\cap \{e^{\mu_k T}, e^{ \overline{\mu_k} T}\}\neq \emptyset,
\end{equation*}
and
$$e^{\mathcal{R}(\mu)T}=\abs{e^{\mu T}}=\abs{e^{\widetilde{\mu}_k T}}=e^{\mathcal{R}(\widetilde{\mu}_k)T}=e^{\mathcal{R}({\mu}_k)T}.$$
Thus $\mathcal{R}(\mu)=\mathcal{R}(\mu_k)$.
\end{proof}
As an immediate consequence of Theorem \ref{eigenvalue of B}, we have the following result.
\begin{corollary}
Consider system (\ref{q-periodic-systems}), Let $M(t)=P(t)e^{tB}$ be a Floquet norm form for the fundamental matrix $M(t)$. Then
\begin{equation*}
\{ \mathcal{R}(\mu): \mu \in {CE}\}=\{ \mathcal{R}(\mu): \mu \in \sigma(B)\}
\end{equation*}
where $\sigma(B)$ is the totality of the standard eigenvalues of $B$.
\end{corollary}
\begin{theorem}
If $\rho_j=e^{\mu_j T}$, $j=1,2,\cdots,n$, are the characteristic multipliers of (\ref{q-periodic-systems}), then
\begin{align}
&\prod_{j=1}^{n} \abs{\rho_j}= \exp\left(\int_0^T \mathcal{R}(\mathrm{tr} A(\tau))d\tau\right), \label{characteristicmultipliers} \\
& \mathcal{R}\left(\sum_{j=1}^n\mu_j\right)=
\frac{1}{T}\left(\int_0^T\mathcal{R}\left( \mathrm{tr}A(\tau)\right)d\tau\right). \label{characteristicexponent}
\end{align}
\end{theorem}
\begin{proof}
Let $M(t)$ be a fundamental matrix of (\ref{q-periodic-systems}), by Liouville's formula of QDEs (see \cite{kou2015linear2}), we have
\begin{equation}\label{Liouville}
\abs{M(t)}_q=\exp\left(2\int_{t_0}^t \mathcal{R}(\mathrm{tr} A(\tau))d\tau\right)\abs{M(t_0)}_q.
\end{equation}
Note that $\rho_j$, $j=1,2,\cdots,n$, are the standard eigenvalues of $M(T)M^{-1}(0)$, by the definition of $q$-determinant, we have
\begin{equation*}
\abs{M(T)}_q\abs{M(0)}_q^{-1}=\abs{M(T)M^{-1}(0)}_q=\prod_{j=1}^{n} \abs{\rho_j}^2.
\end{equation*}
Taking $t=T, t_0=0$ in (\ref{Liouville}), we obtain
\begin{equation*}
\prod_{j=1}^{n} \abs{\rho_j}^2= \exp\left(2\int_{0}^T \mathcal{R}(\mathrm{tr} A(\tau))d\tau\right),
\end{equation*}
and therefore (\ref{characteristicmultipliers}) holds. Observe that $\abs{\rho_j}=\abs{e^{\mu_j T}}=e^{\mathcal{R}(\mu_j) T}$, then (\ref{characteristicmultipliers}) implies that
\begin{equation*}
\exp\left(\mathcal{R}\left(\sum_{j=1}^n\mu_j\right)T\right)=\exp\left(\int_0^T \mathcal{R}(\mathrm{tr} A(\tau))d\tau\right).
\end{equation*}
This proves the theorem.
\end{proof}
If $\rho=e^{\mu T}$, where $\rho, \mu$ are complex numbers. Since $\abs{\rho}=\abs{e^{\mu T}}=e^{\mathcal{R}(\mu) T}$, it is easy to see that the following assertions hold.
\begin{itemize}
\item $\abs{\rho}=1$ if and only if $\mathcal{R}(\mu)=0$.
\item $\abs{\rho}<1$ if and only if $\mathcal{R}(\mu)<0$.
\item $\abs{\rho}>1$ if and only if $\mathcal{R}(\mu)>0$.
\end{itemize}
The next result demonstrates that the stability of (\ref{q-periodic-systems}) is equivalent to the stability of the linear system with constant coefficients $\dot{\by}=B \by$, where $B$ stems from the Floquet normal form (\ref{floquetnormalform}).
\begin{theorem}\label{stablility of quaternionic periodic system}
Let $M(t)=P(t)e^{tB}$ is a Floquet norm form for the fundamental matrix $M(t)$ of (\ref{q-periodic-systems}). Then the following assertions hold.
\begin{enumerate}
\item The system (\ref{q-periodic-systems}) is stable if and only if the standard eigenvalues of $B$ all have non-positive real parts and the algebraic multiplicity equals the geometric multiplicity of each standard eigenvalue with zero real part; or equivalently, the characteristic multipliers of (\ref{q-periodic-systems}) all have modulus not larger than $1$ ($\leq1$) and the algebraic multiplicity equals the geometric multiplicity of each characteristic multiplier with modulus one.
\item The system (\ref{q-periodic-systems}) is asymptotically stable if and only if the standard eigenvalues of $B$ all have negative real parts; or equivalently, the characteristic multipliers of (\ref{q-periodic-systems}) all have modulus less than $1$.
\end{enumerate}
\end{theorem}
\begin{theorem}\label{periodic sol}
If $\mu$ is a characteristic exponent and $\rho=e^{\mu T}$ is a characteristic multiplier of (\ref{q-periodic-systems}), then there is a nontrivial solution of the form
\begin{equation*}
\bx(t)=\bp (t)e^{\mu t}.
\end{equation*}
Moreover $\bp(t+T)=\bp(t)$ and $\bx(t+T)=\bx(t)\rho$.
\end{theorem}
\begin{proof}
Let $M(t)=P(t)e^{tB}$ is a Floquet norm form for the principal fundamental matrix $M(t)$ at $t=0$. By Theorem \ref{eigenvalue of B}, there is a standard eigenvalue $\mu_1$ of $B$ such that $$\{e^{\mu T}\}\cap \{e^{\mu_1 T}, e^{ \overline{\mu_1} T}\}\neq \emptyset.$$
Without loss of generality, we assume that $\rho=e^{\mu T}=e^{\mu_1 T}$. Then there exists a $k\in\mathbb{Z}$ such that $\mu_1=\mu+ \frac{2k\pi\qi}{T}$. Let $\bmeta\neq 0$ be an eigenvector of $B$ corresponding to $\mu_1$. It follows that $B\bmeta=\bmeta \mu_1$ and therefore $e^{tB}\bmeta=\bmeta e^{\mu_1 t}$. Thus the solution $\bx(t):=M(t)\bmeta$ can also be represented in the form
\begin{equation*}
\bx(t)=P(t)e^{tB}\bmeta=P(t)\bmeta e^{\frac{2k\pi \bm {i}t}{T}}e^{\mu t}.
\end{equation*}
Let $\bp(t)=P(t)\bmeta e^{\frac{2k\pi \bm {i}t}{T}}$. It is easy to see that
$\bp(t)$ is a $T$-periodic function. Moreover
\begin{equation*}
\bx(t+T)=\bp(t+T)e^{\mu(t+T)}=\bp(t)e^{\mu t}e^{\mu T}=\bx(t) \rho.
\end{equation*}
This completes the proof.
\end{proof}
\begin{theorem}\label{periodic sol inverse}
If $\mu$ is a complex number, $\bp(t+T)=\bp(t)$, and $\bx(t)=\bp(t)e^{\mu t}\neq 0$ is a nontrivial solution of (\ref{q-periodic-systems}), then one of $\mu, \overline{\mu}$ is a characteristic exponent.
\end{theorem}
\begin{proof}
Let $M(t)=P(t)e^{tB}$ be a Floquet norm form for the principal fundamental matrix $M(t)$ at $t=0$ and $\bmeta=\bp(0)$, then $\bmeta\neq 0$. Otherwise, $\bx(t)\equiv 0$ is the trivial solution by uniqueness of solution. Note that both $\bp(t)e^{\mu t}$ and $P(t)e^{t B}\bmeta$ are solutions of (\ref{q-periodic-systems}) with the same initial value at $t=0$, therefore
\begin{equation}\label{same-initial}
\bp(t)e^{\mu t}=P(t)e^{t B}\bmeta
\end{equation}
Taking $t=T$ in (\ref{same-initial}) and note that
$\bp(T)=\bp(0)=\bmeta$, $P(T)=P(0)=I$ by periodicity. It follows that
\begin{equation*}
\bmeta e^{\mu T}=e^{TB}\bmeta.
\end{equation*}
Hence $e^{\mu T}$ is a complex-valued eigenvalue of $e^{TB}$. Thus, one of $e^{\mu T}$, $e^{\overline{\mu} T}$ is a characteristic multiplier of (\ref{q-periodic-systems}). Therefore, one of $\mu, \overline{\mu}$ is a characteristic exponent of (\ref{q-periodic-systems}).
\end{proof}
Next result is a direct consequence of Theorem \ref{periodic sol} and \ref{periodic sol inverse}.
\begin{corollary}\label{T-periodic-or-2T-periodic}
There is a $T$-periodic solution of (\ref{q-periodic-systems})
if and only if there is a zero characteristic exponent; or equivalently, there is a characteristic multiplier $\rho=1$. If there is a characteristic exponent of the form $\mu=\frac{2k+1}{T}\pi \qi$ for some $k\in \mathbb{Z}$, or equivalently, there is a characteristic multiplier $\rho=-1$, then there is a $2T$-periodic solution of (\ref{q-periodic-systems}).
\end{corollary}
The following result shows that different characteristic multipliers will generate linearly independent solutions.
\begin{corollary}
Assume that $\mu_1, \mu_2$ are characteristic exponents of (\ref{q-periodic-systems}) satisfying $\rho_1=e^{\mu_1 T}$, $\rho_2=e^{\mu_2 T}$. If the characteristic multipliers $\rho_1$, $\rho_2$ are not equal, then there are $T$-periodic functions $\bp_1(t)$, $\bp_2(t)$ such that
\begin{equation*}
\bx_1(t)=\bp_1(t)e^{\mu_1 t}
\end{equation*}
and
\begin{equation*}
\bx_2(t)=\bp_2(t)e^{\mu_2 t}
\end{equation*}
are linearly independent solutions of (\ref{q-periodic-systems}).
\end{corollary}
\begin{proof}
Let $M(t)=P(t)e^{tB}$ be a Floquet norm form for the principal fundamental matrix $M(t)$ at $t=0$ and $\bmeta_1=\bx_1(0)$, $\bmeta_2=\bx_2(0)$. By similar arguments of Theorem
\ref{periodic sol inverse}, we conclude that $\bmeta_1$, $\bmeta_2$ are eigenvectors of $B$ corresponding to the standard eigenvalues $\rho_1$, $\rho_2$ respectively. Note that $\rho_1\neq\rho_2$. It follows that $\bx_1(0)$ and $\bx_2(0)$ are linearly independent and therefore $\bx_1(t)$ and $\bx_2(t)$ are linearly independent solutions of (\ref{q-periodic-systems}).
\end{proof}
\begin{example}\label{ex5}
Consider the system (\ref{q-periodic-systems}), where $A(t)$ is $\pi$-periodic function and given by
\begin{equation*}
A(t)= \begin{pmatrix}
1&1\\
0 &\qi+2e^{2 \bm {i}t}\qj
\end{pmatrix}
\end{equation*}
Then the principal fundamental matrix is
\begin{equation*}
M(t)= \begin{pmatrix}
e^t& \frac{-1+\qi-\qj-\qk}{4}e^{ \bm{j}t}+\frac{-1-3\qi-3\qj+\qk}{20}e^{3 \bm{j}t}+\frac{3-\qi+4\qj+2\qk}{10}e^t\\
0 &e^{ \bm {i}t}e^{2 \bm{j}t}
\end{pmatrix}.
\end{equation*}
By straightforward computations, we have $\lim_{t\to \infty}\norm{M(t)}=\infty$. That is, $\norm{M(t)}$ is unbounded. Thus this system is unstable by Theorem \ref{judging theorem}.
Observe that $M(0)=I$ and
\begin{equation*}
M(\pi)= \begin{pmatrix}
e^{\pi}& \frac{3-\qi+4\qj+2\qk}{10}(1+e^{\pi})\\
0 &-1
\end{pmatrix}.
\end{equation*}
Therefore the characteristic multipliers are $\rho_1=e^{\pi}$, $\rho_2=-1$.
From Lemma \ref{exp q-matrix eqn}, there is a quaternion-valued matrix
\begin{equation*}
B= \begin{pmatrix}
1& \frac{1-2\qi+\qj+ 3\qk}{5}\\
0 &\qi
\end{pmatrix}
\end{equation*}
such that $M(\pi)=e^{\pi B}$. Applying the definition of exponential function,
\begin{equation*}
e^{tB}= \begin{pmatrix}
e^t& \frac{3- \qi+ 4 \qj+ 2\qk}{10} (e^t-e^{ \bm {i}t})\\
0 &e^{ \bm {i}t}
\end{pmatrix}
\end{equation*}
Then we obtain the Floquet norm form $P(t)e^{tB}$ for $M(t)$, where $P(t)$ is given by
\begin{equation*}
P(t)= \begin{pmatrix}
1& \frac{3- \qi+ 4 \qj+ 2\qk}{10} +\frac{-1+\qi-\qj-\qk}{4}e^{ \bm{j}t}e^{- \bm {i}t}+\frac{-1-3\qi-3\qj+\qk}{20} e^{3 \bm{j}t}e^{- \bm {i}t}\\
0 &\cos 2t +e^{2 \bm {i}t}\qj\sin 2t
\end{pmatrix}
\end{equation*}
It is easy to see that $P(t)$ is $\pi$-periodic as required. The standard eigenvalues of $B$ are $\mu_1=1$, $\mu_2=\qi$ and the corresponding eigenvectors are
\begin{equation*}
\bmeta_1= \begin{pmatrix}
1 \\
0
\end{pmatrix}~~~\mathrm{and}~~~ \bmeta_2= \begin{pmatrix}
-\frac{7+\qi+10\qj}{10} \\
2+\qi
\end{pmatrix}.
\end{equation*}
Note that $\mu_1, \mu_2$ are characteristic exponents. By Theorem \ref{periodic sol}, there are two nontrivial solutions
\begin{equation*}
\bx_1(t)=
M(t)\bmeta_1=\bp_1(t)e^t
~~\mathrm{and}~~ \bx_2(t)=M(t)\bmeta_2=\bp_2(t)e^{ \bm {i}t},
\end{equation*}
where $\bp_1(t), \bp_2(t)$ are $\pi$-periodic functions given by
\begin{equation*}
\bp_1(t)= \begin{pmatrix}
1 \\
0
\end{pmatrix}~~\mathrm{and}~~ \bp_2(t)= \begin{pmatrix}
\frac{-1+\qi-\qj-\qk}{4}e^{ \bm{j}t}e^{- \bm {i}t}(2+\qi)+\frac{-1-3\qi-3\qj+\qk}{20} e^{3 \bm{j}t}e^{- \bm {i}t}(2+\qi) \\
(2+\qi)\cos2t+2 e^{2 \bm {i}t}(\qj+\qk)\sin2t
\end{pmatrix}.
\end{equation*}
By Corollary \ref{T-periodic-or-2T-periodic}, $\bx_2(t)$ is a $2\pi$-periodic solution. To provide a direct description of the system, Table \ref{table-ex-5} is presented to visualize its properties.
\begin{table}[ht]
\centering
\caption{Description of Example \ref{ex5}}{
\begin{tabular}{| @{} c | c | c| c@{}|}
\hline
~Fundamental & Characteristic& The standard & \multirow{2}*{\centering Stability~}\\
matrix & multipliers & eigenvalues of $B$ & \\ \hline
$\norm{M(t)}$ is & $\rho_1=e^{\pi}$, $\abs{\rho_1}>1$;& $\mu_1=1$, $\mathcal{R}(\mu_1)>0$; & \multirow{2}*{\centering unstable~} \\
unbounded &$\rho_2=-1$, $\abs{\rho_2}=1$& $\mu_2=\qi$, $\mathcal{R}(\mu_2)=0$ & \\ \hline
\end{tabular}}
\label{table-ex-5}
\end{table}
\end{example}
\begin{example}\label{ex6}
Consider the system (\ref{q-periodic-systems}), where $A(t)$ is $\pi$-periodic function and is given by
\begin{equation*}
A(t)= \begin{pmatrix}
\qk&1\\
0 &\qi+2e^{2 \bm {i}t}\qj
\end{pmatrix}
\end{equation*}
Then the principal fundamental matrix $M(t)$ is
\begin{equation*}
\begin{pmatrix}
e^{ \bm{k}t}& \frac{1-\qi-\qj+\qk}{4}\sin t +\frac{1+\qi+\qj+\qk}{4}e^{ \bm{j}t} t +\frac{1+\qi-\qj-\qk}{4}e^{2 \bm{j}t}\sin t + \frac{1-\qi-\qj+\qk}{16}(e^{3 \bm{j}t}-e^{- \bm{j}t})\\
0 &e^{ \bm {i}t}e^{2 \bm{j}t}
\end{pmatrix}.
\end{equation*}
By straightforward computations, $\norm{M(t)}$ is unbounded. Thus this system is unstable by Theorem \ref{judging theorem}.
Observe that $M(0)=I$ and
\begin{equation*}
M(\pi)= \begin{pmatrix}
-1& - \frac{1+\qi+ \qj+ \qk}{4} \pi\\
0 &-1
\end{pmatrix}.
\end{equation*}
Therefore the characteristic multipliers are $\rho_1= \rho_2=-1$.
There is a quaternion-valued matrix
\begin{equation*}
B= \begin{pmatrix}
-\qi& \frac{1+\qi+ \qj+ \qk}{4}\\
0 &-\qk
\end{pmatrix}
\end{equation*}
such that $M(\pi)=e^{\pi B}$. The standard eigenvalues of $B$ are $\mu_1= \mu_2=\qi$. To provide a direct description of the system, Table \ref{table-ex-6} is presented to visualize its properties. By some basic calculations, we obtain the Jordan canonical form of $B$:
\begin{equation*}
J=\begin{pmatrix}
\qi& 1\\
0 &\qi
\end{pmatrix}.
\end{equation*}
This implies that the geometric multiplicity of $\mu=\qi$ is $1$, which is less than its algebraic multiplicity.
\begin{table}[ht]
\centering
\caption{Description of Example \ref{ex6}}{
\begin{tabular}{ |@{} c | c | c| c @{}|}
\hline
~Fundamental & Characteristic& The standard & \multirow{2}*{\centering Stability~}\\
matrix & multipliers & eigenvalues of $B$ & \\ \hline
$\norm{M(t)}$ is & $\rho_1=\rho_2=-1$;& $\mu_1=\mu_2=\qi$; & \multirow{2}*{\centering unstable~} \\
unbounded &$\abs{\rho_1}=\abs{\rho_2}=1 $ & $\mathcal{R}(\mu_1)=\mathcal{R}(\mu_2)=0$ & \\ \hline
\end{tabular}}
\label{table-ex-6}
\end{table}
\end{example}
\begin{example}\label{ex7}
Consider the system (\ref{q-periodic-systems}), where $A(t)$ is $\pi$-periodic function and given by
\begin{equation*}
A(t)= \begin{pmatrix}
\frac{\qk}{2}&e^{-2 \bm {i}t}\\
0 &\qi+2\qj\cos 2t+2\qk\sin 2t
\end{pmatrix}
\end{equation*}
Then the principal fundamental matrix $M(t)$ is
\begin{equation*}
\begin{pmatrix}
e^{\frac{\qk}{2} t}& \frac{-2+2\qi+5\qj-5\qk}{21}e^{-\frac{\qj}{2}t} +\frac{2+2\qi+3\qj+3\qk}{5}e^{ \frac{\qj}{2} t} -\frac{1+2\qi+2\qj-\qk}{4}e^{ \bm{j}t} + \frac{1+6\qi-6\qj-\qk}{35} e^{3 \bm{j}t} \\
0 &e^{ \bm {i}t}e^{2 \bm{j}t}
\end{pmatrix}.
\end{equation*}
It is easy to see that $\norm{M(t)}$ is bounded but is not convergent to zero as $t$ tends to infinity. Thus this system is stable (but not asymptotically) by Theorem \ref{judging theorem}.
Observe that $M(0)=I$ and
\begin{equation*}
M(\pi)= \begin{pmatrix}
\qk& -\frac{2}{35}-\frac{12}{35} \bm {i}+ \frac{4}{3} \bm{j}+ \frac{2}{3}\qk\\
0 &-1
\end{pmatrix}.
\end{equation*}
Therefore the characteristic multipliers are $\rho_1=\qi$, $\rho_2=-1$.
There is a quaternion-valued matrix
\begin{equation*}
B= \begin{pmatrix}
\frac{\qk}{2}& \frac{33-76\qi-12\qj+104 \bm{k}}{105}\\
0 &\qi
\end{pmatrix}
\end{equation*}
such that $M(\pi)=e^{\pi B}$. The standard eigenvalues of $B$ are $\mu_1= \frac{\qi}{2}$, $\mu_2=\qi$. To provide a direct description of the system, Table \ref{table-ex-7} is presented to visualize its properties.
\begin{table}[ht]
\centering
\caption{Description of Example \ref{ex7}}{
\begin{tabular}{| @{} c | c | c| c @{}|}
\hline
Fundamental & Characteristic& The standard & \multirow{2}*{\centering Stability}\\
matrix & multipliers & eigenvalues of $B$ & \\ \hline
$\norm{M(t)}$ is unbounded & $\rho_1=\qi\neq -1= \rho_2$;& $\mu_1= \frac{\qi}{2} \neq \qi=\mu_2$; & stable but not \\
~but not convergent to $0$ &$\abs{\rho_1}=\abs{\rho_2}=1$& $\mathcal{R}(\mu_1)= \mathcal{R}(\mu_2)=0$ & asymptotically~ \\ \hline
\end{tabular}}\label{table-ex-7}
\end{table}
\end{example}
\begin{example}\label{ex8}
Consider the system (\ref{q-periodic-systems}), where $A(t)$ is $\pi$-periodic function and given by
\begin{equation*}
A(t)= \begin{pmatrix}
\frac{\qi}{2}-1 &e^{2 \bm{j}t}e^{- \bm{k}\sin 2t}\\
0 &2 \bm{k}\cos 2t -1
\end{pmatrix}
\end{equation*}
Then the principal fundamental matrix $M(t)$ is
\begin{equation*}
\begin{pmatrix}
e^{\frac{\qi}{2} t}e^{-t}& \frac{ 1}{5}(e^{-(1+2\qi)t} - e^{(\frac{\qi}{2}-1)t})(\qi-\qj) + \frac{1 }{3}(e^{(\frac{\qi}{2}-1)t}-e^{(2\qi-1)t})(\qi+\qj) \\
0 & e^{-t}e^{ \bm{k}\sin 2t}
\end{pmatrix}.
\end{equation*}
It is easy to see that $\lim_{t\to \infty}\norm{M(t)}=0$. Thus this system is asymptotically stable by Theorem \ref{judging theorem}.
Observe that $M(0)=I$ and
\begin{equation*}
M(\pi)= \begin{pmatrix}
\bm {i}e^{-\pi}& \frac{-2-2\qi-8\qj+8\qk}{15}e^{-\pi}\\
0 & e^{-\pi}
\end{pmatrix}.
\end{equation*}
Therefore the characteristic multipliers are $\rho_1= \bm {i}e^{-\pi}$, $\rho_2= e^{-\pi}$.
There is a quaternion-valued matrix
\begin{equation*}
B= \begin{pmatrix}
\frac{\qi}{2}-1& \frac{-1+4 \bm{k}}{15}\\
0 & -1
\end{pmatrix}
\end{equation*}
such that $M(\pi)=e^{\pi B}$. The standard eigenvalues of $B$ are $\mu_1= \frac{\qi}{2}-1$, $\mu_2=-1$. To provide a direct description of the system, Table \ref{table-ex-8} is presented to visualize its properties.
\begin{table}[ht]
\centering
\caption{Description of Example \ref{ex8}}{
\begin{tabular}{ |@{} c | c | c| c @{}|}
\hline
Fundamental & Characteristic& The standard & \multirow{2}*{\centering Stability}\\
matrix & multipliers & eigenvalues of $B$ & \\ \hline
\multirow{2}*{\centering $~\displaystyle\lim_{t\to \infty}\norm{M(t)}=0$ }& $\rho_1= \bm {i}e^{-\pi}$, $\abs{\rho_1}<1$;& $\mu_1=\frac{\qi}{2}-1$, $\mathcal{R}(\mu_1)<0$; & asymptotically~ \\
&$\rho_2=e^{-\pi}$, $\abs{\rho_2}<1$& $\mu_2=-1$, $\mathcal{R}(\mu_2)<0$ & stable\\ \hline
\end{tabular}}\label{table-ex-8}
\end{table}
\end{example}
\begin{remark}
Thanks to the assertion 2 of Theorem \ref{thm of q matrix}, the above results are coincide with the traditional results when $A(t)$ is complex-valued.
\end{remark}
\section{Quaternion-valued Hill's equations}\label{S5}
For real-valued systems, the Floquet theory effectively depict the stability of Hill's equation (see e.g \cite{chicone2006ordinary})
\begin{equation*}\label{Hill eqn}
\ddot{u} +a(t)u=0,~~a(t)=a(t+T).
\end{equation*}
We will consider the quaternion case where $a(t)$ is a quaternion-valued function. Let $\bx=(u,u')^T$, then quaternion-valued Hill's equation is equivalent to the quaternionic periodic systems
(\ref{q-periodic-systems}) with
\begin{equation*}
A(t)=\begin{pmatrix}
0& 1\\
-a(t) &0
\end{pmatrix}.
\end{equation*}
Let $M(t)$ be the principal fundamental matrix at $t=0$. By Liouville's formula of QDEs, we have
\begin{equation*}
\abs{M(t)}_q=\exp\left(2\int_{t_0}^t \mathcal{R}(\mathrm{tr} A(\tau))d\tau\right)\abs{M(0)}_q=1.
\end{equation*}
If $a(t)$ is real-valued, then $M(T)$ is a real-valued matrix. If $\alpha=\alpha_1+ \bm {i}\alpha_2$ and $\beta=\beta_1+ \bm {i}\beta_2$ are roots of the equation
\begin{equation}\label{character eqn}
\lambda^2-(\mathrm{tr}M(T))\lambda+\abs{M(T)}_q= \lambda^2-(\mathrm{tr}M(T))\lambda+1=0.
\end{equation}
Then $\rho_1=\alpha_1+ \bm {i}\abs{\alpha_2}$ and $\rho_2=\beta_1+ \bm {i}\abs{\beta_2}$ are characteristic multipliers of (\ref{q-periodic-systems}) and $\abs{\rho_1} =\abs{\alpha}$, $\abs{\rho_2} =\abs{\beta}$. It is well-known that the stability of real-valued Hill's equation depends on the value of $\mathrm{tr}M(T)$ (see e.g \cite{chicone2006ordinary}).
\begin{table}[ht]
\centering
\caption{Description of Real-valued Hill's equation}{
\begin{tabular}{| @{} c | c | c @{} |}
\hline
The value &The roots & Stability of real-valued \\
of $\mathrm{tr}M(T)$ & of (\ref{character eqn}) & Hill's equation \\ \hline
$\mathrm{tr}M(T)<-2$ & $\alpha<-1<\beta<0$; & unstable \\ \hline
$-2<\mathrm{tr}M(T)<2$ &$\beta=\overline{\alpha}$, $\abs{\alpha}=1$, $\Im(\alpha)\neq 0$; & stable but not asymptotically \\ \hline
$ \mathrm{tr}M(T)=2$ &$\beta= \alpha=1$; & stable if and only if $M(T)=I$ \\ \hline
$ \mathrm{tr}M(T)>2$ & $0<\alpha<1<\beta$; & unstable \\ \hline
$ \mathrm{tr}M(T)=-2$ &$\beta= \alpha=-1$; & stable if and only if $M(T)=-I$~ \\ \hline
\end{tabular}}\label{real Hill}
\end{table}
If $a(t)$ is quaternion-valued, then $M(T)$ is a quaternion matrix. Therefore we can not use (\ref{character eqn}) to find the characteristic multipliers (the standard eigenvalues of $M(T)$). In this case, $\mathrm{tr}M(T)$ is a quaternion. The structure of the set of zeros of quaternionic polynomials is more complicated than complex polynomials. It is natural to modify (\ref{character eqn}) to be
\begin{equation}\label{character eqn 2}
\lambda^2-\mathcal{R}(\mathrm{tr}M(T))\lambda+\abs{M(T)}_q=0.
\end{equation}
This raises the question of whether the roots of (\ref{character eqn 2}) and characteristic multipliers possess the same absolute value. The answer is negative. This implies that even if we add $ \mathcal{R}$ to the front of $\mathrm{tr}M(T)$, the stability of quaternion-valued Hill's equation can not be determined by Table \ref{real Hill}.
\begin{example}\label{ex-5-1}
Consider the quaternion-valued Hill's equation with $a(t)=2+\qj\cos^2 2t+ \bm{k}\sin 2t$. Note that $a(t)$ is a quaternion-valued $\pi$-periodic function. Based on the numerical methods, we obtain $M(\pi)\approx\begin{pmatrix} m_1&m_2\\ m_3&m_4 \end{pmatrix}$, where
\begin{equation*}
\begin{cases}
m_1= -0.131186+0.037757\qi+0.584454\qj-0.418119\qk, \\
m_2= -0.607206+0.255374\qi-0.025292\qj, \\
m_3= 1.900430+0.005637\qi+0.173381\qj, \\
m_4= -0.131186+0.037757\qi+0.584454\qj+ 0.418119\qk.
\end{cases}
\end{equation*}
Therefore, by direct computations, we have $ \mathcal{R}(\mathrm{tr}M(\pi))\approx -0.262372\in (-2,2)$. The characteristic multipliers are $\rho_1\approx -0.197803+1.73905\qi$ and $\rho_2\approx -0.064569 +0.567682\qi$. Note that $\abs{\rho_1}>1$, thus this equation is unstable. On the other hand, the roots of $\lambda^2- \mathcal{R}(\mathrm{tr}M(\pi))+\abs{M(\pi)}_q\approx\lambda^2+0.262372\lambda+1=0$ are $\alpha\approx 0.131186 +0.991358 \qi$ and $\beta=\overline{\alpha}$.
\end{example}
In fact, if $\rho_1$, $\rho_2$ are characteristic multipliers, we only have
\begin{equation}\label{character eqn 3}
\begin{cases}
\mathcal{R}(\rho_1)+\mathcal{R}(\rho_2) = \mathcal{R}(\mathrm{tr}M(T)),\\
\abs{\rho_1 } \abs{\rho_2} =\abs{M(T)}_q=1.
\end{cases}
\end{equation}
If $\abs{ \mathcal{R}(\mathrm{tr}M(T))}>2$, then one of $\abs{\mathcal{R}(\rho_1)}$ and $\abs{ \mathcal{R}(\rho_2)}$ has to be larger than $1$. In this case, the equation is unstable. By similar arguments, we could know the stability of quaternion-valued Hill's equation when $\abs{ \mathcal{R}(\mathrm{tr}M(T))}=2$.
In summary, Table \ref{quaternion Hill} is presented to visualize the stability of quaternion-valued Hill's equation.
\begin{table}[ht]
\centering
\caption{Description of quaternion-valued Hill's equation}{
\begin{tabular}{ |@{} c | c @{} |}
\hline
The value & Stability of quaternion-valued \\
of $ \mathcal{R}(\mathrm{tr}M(T))$ & Hill's equation \\ \hline
$\abs{ \mathcal{R}(\mathrm{tr}M(T))}>2$ & unstable \\ \hline
$\abs{\mathcal{R}(\mathrm{tr}M(T))}<2$ & undetermined \\ \hline
$ \mathcal{R}(\mathrm{tr}M(T))=2$ & stable if and only if $M(T)=I$ \\ \hline
$ \mathcal{R}(\mathrm{tr}M(T))=-2$ & stable if and only if $M(T)=-I$~ \\ \hline
\end{tabular}}\label{quaternion Hill}
\end{table}
We use the following example to illustrate (\ref{character eqn 3}) and Table \ref{quaternion Hill}.
\begin{example}\label{ex-5-2}
Consider the quaternion-valued Hill's equation with $a(t)=-1+\qj\cos 2t+ \bm{k}\sin 2t$. Based on the numerical methods, we obtain $M(\pi)\approx\begin{pmatrix} m_1&m_2\\ m_3&m_4 \end{pmatrix}$, where
\begin{equation*}
\begin{cases}
m_1=13.6488 -2.9075 \bm {i}-1.1093 \bm{j}-2.3529\qk, \\
m_2=12.3192 -2.2187 \qi+2.3529\qj, \\
m_3=14.6721-2.2187\qi-5.2605 \qj, \\
m_4=13.6488-2.9075 \bm {i}-1.1093 \bm{j}+ 2.3529\qk.
\end{cases}
\end{equation*}
Therefore, by direct computation, we have the following result (Table \ref{table-ex-10}).
\begin{table}[ht]
\centering
\caption{Description of Example \ref{ex-5-2}}{
\begin{tabular}{ | @{} c | c | c @{} | }
\hline
The value & Characteristic & \multirow{2}*{\centering Stability~}\\
of $ \mathcal{R}(\mathrm{tr}M(\pi))$ & multipliers & \\ \hline
\multirow{2}*{\centering $ \mathcal{R}(\mathrm{tr}M(\pi))\approx 27.2976 >2$ }& $\rho_1\approx 27.2621 + 4.96756 \qi$, $\abs{\rho_1}>1$; & \multirow{2}*{\centering unstable~}\\
&$\rho_2 \approx 0.0355 + 0.0065 \qi$, $\abs{\rho_1}\abs{\rho_2}\approx 1$ & \\ \hline
\end{tabular}}\label{table-ex-10}
\end{table}
\end{example}
For the case of $\abs{\mathcal{R}(\mathrm{tr}M(T))}<2$, the scalar part of $\mathrm{tr}M(T)$ is not enough to determine the stability of quaternion-valued Hill's equation. To take the vector part of $\mathrm{tr}M(T)$ into account, however, we still can't determine the stability of quaternion-valued Hill's equation at this moment. This raises the question: can we determine the stability of quaternion-valued Hill's equation by $\mathrm{tr}M(T)$ (including scalar and vector parts)? If yes, how to determine the stability of quaternion-valued Hill's equation by $\mathrm{tr}M(T)$?
Multiplying $M(T)$ by its conjugate transpose $M(T)^{\dag}$ we construct a positive semidefinite matrix $K(T):=M(T)M(T)^{\dag}$. It is easy to see that the eigenvalues of $K(T)$ are $\kappa_1=\abs{\rho_1}^2, \kappa_2=\abs{\rho_2}^2$. Note that $\abs{\rho_1}\abs{\rho_2} = 1$ and $\mathrm{tr}K(T)=\norm{M(T)}_F^2$ where $\norm{\cdot}_F$ is the Frobenius norm. It follows that
$\kappa_1, \kappa_2$ are solutions of $\lambda^2- \norm{M(T)}_F^2 \lambda+1=0$. Then we have the following result.
\begin{theorem}\label{Fnorm}
If $\norm{M(T)}_F^2>2$, then quaternion-valued Hill's equation is unstable.
\end{theorem}
By direct computation, we have that $\abs{\mathcal{R}(\mathrm{tr}M(T))}< \norm{M(T)}_F^2$. It turns out that $\abs{\mathcal{R}(\mathrm{tr}M(T))}>2$ implies $\norm{M(T)}_F^2>2$. In Example \ref{ex-5-1}, $\abs{\mathcal{R}(\mathrm{tr}M(T))}=0.262372<2$ and $\norm{M(T)}_F^2=5.14637>2$. It means that the stability of Example \ref{ex-5-1} can be determined by Theorem \ref{Fnorm}. In fact, for some quaternion-valued Hill's equations with $\abs{\mathcal{R}(\mathrm{tr}M(T))}<2$, the corresponding $\norm{M(T)}_F^2$ can be very large.
\begin{example}\label{ex-5-4}
Consider the quaternion-valued Hill's equation with $a(t)=-1+ \bm{j}e^{\cos 2t}+ \bm{k}\sin 2t$.
Based on the numerical methods, we have the following result (Table \ref{table-ex-5-4}).
\begin{table}[ht]
\centering
\caption{Description of Example \ref{ex-5-4}}{
\begin{tabular}{ | @{} c | c | c @{} | }
\hline
The value of & Characteristic & \multirow{2}*{\centering Stability~}\\
~ $\abs{\mathcal{R}(\mathrm{tr}M(\pi))}$ and $\norm{M(T)}_F^2$ & multipliers & \\ \hline
$\abs{\mathcal{R}(\mathrm{tr}M(\pi))}\approx 1.0394<2$ & $\rho_1\approx -1.03876 + 40.196 \qi$, $\abs{\rho_1}>1$; & \multirow{2}*{\centering unstable~}\\
$\norm{M(T)}_F^2\approx 1942.77>2$ &$\rho_2 \approx -0.0006425 + 0.024862 \qi$, $\abs{\rho_1}\abs{\rho_2}\approx 1$ & \\ \hline
\end{tabular}}\label{table-ex-5-4}
\end{table}
\end{example}
\section{Conclusions}\label{S6}
The Floquet theory for QDEs is developed, which coincides with the classical Floquet theory when considering ODEs. The concepts of characteristic multipliers and characteristic exponents for QDEs are introduced. The newly obtained results are useful to determine the stability of quaternionic periodic systems. As an important example of applications of Floquet theory for QDEs, we discuss the stability of quaternion-valued Hill's equation in detail.
It is shown that some results of real-valued Hill's equation are invalid for the quaternion-valued Hill's equation. Throughout the paper, adequate examples are provided to support the results.
\end{document} |
\begin{document}
\begin{nouppercase}
\maketitle
\end{nouppercase}
\begin{abstract}
Let $A$ be an abelian variety defined over a number field $F$. Suppose its dual abelian variety $A'$ has good non-ordinary reduction at the primes above $p$. Let $F_{\infty}/F$ be a $\Zp$-extension, and for simplicity, assume that there is only one prime $\mfp$ of $F_{\infty}$ above $p$, and $F_{\infty, \mfp}/\Qp$ is totally ramified and abelian. (For example, we can take $F=\Q(\zeta_{p^N})$ for some $N$, and $F_{\infty}=\Q(\zeta_{p^{\infty}})$.) As Perrin-Riou did in \cite{Perrin-Riou-1}, we use Fontaine's theory (\cite{Fontaine}) of group schemes to construct series of points over each $F_{n, \mfp}$ which satisfy norm relations associated to the Dieudonne module of $A'$ (in the case of elliptic curves, simply the Euler factor at $\mfp$), and use these points to construct characteristic power series $\bfL_{\alpha} \in \Qp[[X]]$ analogous to Mazur's characteristic polynomials in the case of good ordinary reduction. By studying $\bfL_{\alpha}$, we obtain a weak bound for $\rank E(F_n)$.
In the second part, we establish a more robust Iwasawa Theory for elliptic curves, and find a better bound for their ranks under the following conditions: Take an elliptic curve $E$ over a number field $F$. The conditions for $F$ and $F_{\infty}$ are the same as above. Also as above, we assume $E$ has supersingular reduction at $\mfp$. We discover that we can construct series of local points which satisfy finer norm relations under some conditions related to the logarithm of $E/F_{\mfp}$. Then, we apply Sprung's (\cite{Sprung}) and Perrin-Riou's insights to construct \textit{integral} characteristic polynomials $\bfLalg^{\sharp}$ and $\bfLalg^{\flat}$. One of the consequences of this construction is that if $\bfLalg^{\sharp}$ and $\bfLalg^{\flat}$ are not divisible by a certain power of $p$, then $E(F_{\infty})$ has a finite rank modulo torsions.
\end{abstract}
\tableofcontents
\begin{section}{Introduction}
A good place to start our discussion is Mazur's influential work on the rational points of abelian varieties over towers of number fields (\cite{Mazur}). Suppose $A$ is an abelian variety over a number field $F$, $A$ has good ordinary reduction at every prime above $p$, and $F_{\infty}$ is a $\Zp$-extension of $F$ (i.e., $\operatorname{Gal}(F_{\infty}/F) \cong \Zp$). First, he established the Control Theorem for $\operatorname{Sel}_p(A[p^{\infty}]/F_n)$'s (meaning he showed that the natural map $\operatorname{Sel}_p(A[p^{\infty}]/F_n)\to \operatorname{Sel}_p(A[p^{\infty}]/F_{\infty})^{\operatorname{Gal}(F_{\infty}/F_n)}$ has bounded kernel and cokernel as $n$ varies), and second, he demonstrated the existence of the characteristic polynomial $f(F_{\infty}/F, A)$ of $\operatorname{Sel}_p(A[p^{\infty}]/F_{\infty})$. (Any attempt to reduce his immense work to two sentences should be resisted, and readers should understand that the author is only trying to describe how his work has influenced this paper.)
It means that we can use powerful tools of Iwasawa Theory. For example, if $f(F_{\infty}/F, A)\not=0$ (which is true if $A(F_n)^{\chi_n}$ and the $\chi_n$-part of the Shafarevich-Tate group $\Sha(A/F_n)[p^{\infty}]^{\chi_n}$ are finite for any $n\geq 0$ and any character $\chi_n$ of $\operatorname{Gal}(F_n/F)$), then $A(F_{\infty})$ has a finite rank modulo torsions. (Torsions over $F_{\infty}$ are often finite.)
Regarding the rank of $A(F_{\infty})$, now we have a stronger result for elliptic curves over $\Q$ by Kato (\cite{Kato}). However, we want to emphasize that Mazur's work and Kato's work have different goals and strengths.
Can we establish a result analogous to Mazur's for abelian varieties with good \textit{non-ordinary} reduction at primes above $p$? (See Section~\ref{Reduction} for the discussion about reduction types. We will not treat bad reduction primes, which seem to require a very different approach except for multiplicative reduction primes.)
The answer is that it is not easy to do Mazur's work directly for non-ordinary reduction primes. The main problem seems to be that the local universal norms are trivial when the primes are non-ordinary.
One of the more successful strategies to overcome this difficulty is to construct a series of local points which satisfy certain norm relations associated with the Euler factor $X^2-a_p(E)X+p$. Rubin introduced the idea of $\pm$-Selmer groups of elliptic curves (\cite{Rubin}). His method was to use the Heegner points as local points. Perrin-Riou (\cite{Perrin-Riou-1}) invented a way to construct such local points purely locally using Fontaine's theory of formal group schemes (\cite{Fontaine}). Her brilliant idea was all but forgotten for a long time, but are getting more influential recently. (And, this paper owes much to her work.)
More recently, Kobayashi (\cite{Kobayashi}) also constructed such local points of elliptic curves using a more explicit method, and demonstrated the potential that the theory for supersingular reduction primes can be as good as the theory for ordinary reduction primes.
Kobayashi assumed $a_p(E)=0$ for an elliptic curve $E$ defined over $\Q$ (which is automatically true by the Hasse inequality if $E$ has good supersingular reduction at $p$ and $p>3$). Sprung introduced a new idea, what he calls $\sharp/\flat$-Selmer groups for elliptic curves, which does not require $a_p(E)=0$ (\cite{Sprung}). His work has particular relevance to this paper because we are interested in the abelian varieties and elliptic curves over ramified fields. Even when we assume $a_p(E)=0$ or an equivalent condition, the assocaited formal groups behave as if $a_p$ is not 0 because the fields are ramified. We will make much use of his idea of the $\sharp/\flat$-decomposition in the second part.
Whereas our predecessors were concerned with abelian varieties over $\Q$ (and therefore formal groups defined over $\Qp$), we are concerned with abelian varieties defined over fields whose primes above $p$ are ramified, which present new difficulties.
First (Section~\ref{Case 1}), we take an abelian variety $A$ over a number field $F$, and let $A'$ be its dual abelian variety. For simplicity, we assume there is only one prime $\mfp$ of $F$ above $p$, and it is totally ramified over $F/\Q$. We assume $A'$ has good reduction at $\mfp$. Suppose $F_{\infty}$ is a $\Zp$-extension of $F$ such that $\mfp$ is totally ramified over $F_{\infty}/F$, and $F_{\infty, \mfp}/\Qp$ is abelian. For example, take $F =\Q(\zeta_{p^N})$ for some $N$, and $F_{\infty}=\Q (\zeta_{p^{\infty}})$.
Suppose $A'/F_{\mfp}$ has dimension $1$. (Generalizing to higher dimensions may not be very hard.) Let $H^{\vee}(X)=X^d+pb_1X^{d-1}+p^2b_2X^{d-2}+\cdots+p^db_d$ be the characteristic polynomial of the Verschiebung ${\bf V}$ acting on the Dieudonne module. For example, for an elliptic curve, that is simply $X^2-a_p(E)X+p$. Suppose that $A'(F_{\infty, \mfp})_{tor}$ is annihilated by some $M'>0$. Then, we construct points $Q(\pi_{N+n}) \in A'(F_{n, \mfp})$ such that we have
$$ \operatorname{Tr}_{F_{n, \mathfrak p}/F_{n-d, \mathfrak p}} Q(\pi_{N+n}) = \sum_{i=1}^d -p^i\cdot b_i \operatorname{Tr}_{F_{n-i, \mathfrak p}/F_{n-d, \mathfrak p}} Q(\pi_{N+n-i}).
$$
Fontaine's theory of finite group schemes (\cite{Fontaine}) is instrumental in our construction, as it is in Perrin-Riou's work (\cite{Perrin-Riou-1}). As Perrin-Riou does, for each root $\alpha$ of $H^{\vee}(X)$ with $v_p(\alpha)<1$, we can construct a characteristic power series $\bfL_{\alpha}(X)\in \Qp[[X]]$ which is analogous to Mazur's characteristic polynomial $f(F_{\infty}/F, A)$ except that it is not an integral power series unless $v_p(\alpha)=0$.
Then, we can obtain the following bound for the coranks of the Selmer groups (and thus, for the ranks of $A(F_n)$):
\begin{theorem}[Proposition~\ref{ZeroGo}]
Let $\lambda=v_p(\alpha)$.
\begin{enumerate}
\item If $\bfL_{\alpha}\not=0$, then
\[ \corank_{\Zp} \operatorname{Sel}_p(A[p^{\infty}]/F_n) \leq e(p-1) \times \left\{ p^{n-1}+p^{n-2}+ \cdots+ p^m \right\}+O(1)\]
where $n-m = \lambda n +O(1)$.
\item
If any root $\alpha$ of $H^{\vee}(X)$ has valuation 0 (i.e., if $A'$ has ``in-between'' reduction or ordinary reduction), then $\corank_{\Zp}(\operatorname{Sel}_p(A[p^{\infty}]/F_n))$ is bounded by the number of roots of $\bfL_{\alpha}$.
\end{enumerate}
\end{theorem}
We have $\bfL_{\alpha}\not=0$ if $\operatorname{Sel}_p(A[p^{\infty}]/F_n)^{\chi_n}$ is finite for any $n$ and any character $\chi_n$ of $\operatorname{Gal}(F_n/F)$. Also note that $\rank(A(F_n))$ is bounded by $\corank_{\Zp} \operatorname{Sel}_p(A[p^{\infty}]/F_n)$.
In addition, we construct similar local points over the extensions $F_{\mfp}(\sqrt[p^n]{\pi})$ ($n\geq 0$) for any uniformizer $\pi$ of $F_{\mfp}$ (Section~\ref{Kummer}). On one hand, this construction is fully general. On the other hand, since $\cup_n F_{\mfp}(\sqrt[p^n]{\pi})$ is not abelian over $F_{\mfp}$, it is not clear what we can do with it. (For instance, we cannot apply Iwasawa Theory to the points.)
Furthermore, assuming additional hypotheses, and with the crucial help of Sprung's insight, we can establish an Iwasawa Theory that is more closely aligned with Mazur's theory. In Section~\ref{Case 2}, we take an elliptic curve $E$ over $F$, and suppose $E$ has \textit{good supersingular reduction} at $\mfp$ (i.e., $a_{\mfp}(E)$ is not prime to $p$).
We choose a logarithm $\bfl$ of $E$ over $F_{\mfp}$ and a generator $\bfm$ of the Dieudonne module of $E$, and write
\[ \bfl=\alpha_1 \bfm+\alpha_2 {\bf F}\bfm\]
for some $\alpha_1, \alpha_2 \in F_{\mfp}$. We assume $p| \frac{\alpha_2}{\alpha_1}$ (Assumption~\ref{Assumption K}). Also we assume Assumption~\ref{Assumption L}, which is too technical to explain here, but is probably true in most cases.
One crucial step is that we modify our construction so that the resulting local points satisfy a finer norm relation (Proposition~\ref{Mark IV}). Another crucial step is that like Perrin-Riou, we construct $p$-adic characteristics, but this time, by applying an idea inspired by Sprung's insight of $\sharp/\flat$ (\cite{Sprung}), we construct integral $p$-adic characteristic polynomials $\bfLalg^{\sharp}(E), \bfLalg^{\flat}(E) \in \Lambda$. Since these are integral, they are more analogous to Mazur's characteristic $f(F_{\infty}/F, A)$, and it is likely that they have nice properties. They may not necessarily satisfy a control theorem in a literal sense, but nonetheless we manage to prove Proposition~\ref{DDT}, by which we can obtain the following.
\begin{theorem}[Theorem~\ref{DDR}]
Suppose $a_p$ and $\alpha$ are divisible by $p^T$ for some $T$, and neither $\bfLalg^{\sharp}(E)$ nor $\bfLalg^{\flat}(E)$ is divisible by $p^S$ for some $S$ with $S+\frac{[F:\Q]\times p}{(p-1)^2}<T$. Then, $E(F_{\infty})$ has a finite rank modulo torsions, and $\Sha(E/F_n)[p^{\infty}]^{\chi_n}$ is finite for all sufficiently large $n$ and primitive characters $\chi_n$ of $\operatorname{Gal}(F_n/F)$.
\end{theorem}
\end{section}
\begin{section}{Reduction Types} \Label{Reduction}
In this short section, we discuss reduction types.
For elliptic curves, what good reduction, good ordinary reduction, and good supersingular reduction mean is clear. Suppose an elliptic curve $E$ is defined over a local field $K$. Then, we may suppose it has a minimal model over $\OO_K$. Let $\tilde E$ denote the reduced curve of the minimal model modulo $\mm_{\OO_K}$. We say $E$ has good reduction if $\tilde E$ is non-singular (i.e., smooth). Furthermore, we say $E$ has good ordinary reduction if $\tilde E$ is non-singular, and $\tilde E[p]$ is non-trivial, and has good supersingular reduction if $\tilde E$ is non-singular, and $\tilde E[p]$ is trivial. There are other equivalent definitions.
For general abelian varieties, it may be advantageous to use the Dieudonne modules to define reduction types. (There are other definitions, but the one using Dieudonne modules seems relatively simple.) Suppose $G$ is a formal group scheme over $\OO_K$ where $K$ is a local field. Let $G_{/k}$ be its reduction over the residue field $k$. If $G_{/k}$ is smooth, then we say $G$ has good reduction. Assume $G$ has good reduction, and let $M$ be its Dieudonne module $\hat{CW}(R_{G_{/k}})$ where $R_{G_{/k}}$ is the affine algebra that defines $G_{/k}$, and $\hat{CW}$ denotes the completion of the co-Witt vectors. (See \cite{Dieudonne-1}, \cite{Dieudonne-2}, \cite{Fontaine}, or Section~\ref{Fontaine}.) The Frobenius ${\bf F}$ and the Verschiebung ${\bf V}$ act on $M$ through $\hat{CW}$ with ${\bf F}{\bf V}={\bf V}{\bf F}=p$.
Let $H(X)$ be the characteristic polynomial of ${\bf F}$ as action on $M$, i.e., $H(X)=\det (X\cdot 1_M-{\bf F}|M)$. Write
\[ H(X)=X^d+a_{d-1}X^{d-1}+\cdots+a_0.\]
Then, ${\bf F}$ is a topological nilpotent if and only if the roots of $H(X)$ are non-units.
Since ${\bf F}{\bf V}=p$,
$$H^{\vee}(X) \stackrel{def}=X^d+p\frac{a_1}{a_0}X^{d-1}+p^2 \frac{a_2}{a_0}X^{d-2}+\cdots+p^{d-1} \frac{a_{d-1}}{a_0} X+p^d \frac 1{a_0}$$
is the characteristic polynomial of ${\bf V}$ as action on $M$. We define the following terminology we will use in this paper.
\begin{definition} \Label{Calais}
Assume $G$ has good reduction. Also assume ${\bf F}$ is a topological nilpotent. Recall that ${\bf V}$ is a topological nilpotent if all the roots of $H^{\vee}(X)$ are non-units.
\begin{enumerate}
\item If all the roots of $H^{\vee}$ are units, then we say $G$ has ordinary reduction.
\item If ${\bf V}$ is a topological nilpotent (i.e., all the roots of $H^{\vee}$ are non-untis), then we say $G$ has supersingular reduction.
\item If some roots of $H^{\vee}$ are units and some are not, then we say $G$ has in-between reduction.
\end{enumerate}
\end{definition}
The last terminology is our own ad-hoc invention.
Definition~\ref{Calais} makes it clear that in this paper, we assume ${\bf F}$ is a topological nilpotent, but this condition is used only in a minor way, and when we use that assumption, we will mention it.
\end{section}
\begin{section}{Fontaine's functor for ramified extensions} \Label{Fontaine}
Our primary reference is \cite{Fontaine}~Chapter~4. We will keep his notation wherever possible. Fontaine's book is out of print, and not many libraries have a copy. So, we will explain his work briefly.
Let
\begin{enumerate}[(a)]
\item $K'$: an extension over $\Qp$ (possibly ramified),
\item $\mathcal O_{K'}$: its ring of integers,
\item $\mathfrak m$: its maximal ideal,
\item $e$: the ramification index of $K'$.
\end{enumerate}
Let $k$ be the residue field of $\OO_{K'}$, and let $K$ be the fractional field of $W=W(k)$, the set of Witt vectors of $k$. In other words, it is the maximal unramified extension of $\Qp$ contained in $K'$. Then, there is the $p$-th Frobenius $\sigma$ on $K$. We let
\[ \mathbf D_k\stackrel{def}=W[{\bf F}, {\bf V}] \]
where
\begin{enumerate}[(a)]
\item ${\bf F}$ acts $\sigma$-linearly, and ${\bf V}$ acts $\sigma^{-1}$-linearly on $W$. In other words, ${\bf F} a=\sigma(a)$ and ${\bf V} a=\sigma^{-1}(a)$ where $a\in W$.
\item ${\bf F}{\bf V}={\bf V}{\bf F}=p$
\end{enumerate}
If $K'$ is totally ramified so that $k=\mathbb F_p$, we drop $k$ from $\mathbf D_k$.
Suppose $G$ is a smooth finite-dimensional (commutative) formal group scheme over $\OO_{K'}$ such that $G_{/k}$ is smooth. Fontaine found a way to describe $G$ by linear algebra. More specifically, he can describe $G$ completely up to isogeny (or, up to isomorphism if $e<p-1$) by the Dieudonne module $M$, and the set $L$ of its ``logarithms'', and his description is given by expressing the points of $G$ by the linear algebra of $L$ and $M$. Together, $(L, M)$ is called the Honda system of $G$.
We briefly summarize Fontaine's work: Let $R$ be the affine algebra of $G$ (i.e., $G(g)\cong \operatorname{Hom}(R, g)$ for any algebra $g$ over $\OO_{K'}$ where $\operatorname{Hom}$ is the set of ring homomorphisms). Then, $R_k=R/\mm R$ is the affine algebra of the special fiber $G_{/k}$. Set
\[ M\stackrel{def}=\operatorname{Hom}(G_{/k}, \hat{CW}) \]
where $\hat{CW}$ is the functor of completed co-Witt vectors. Then, $M=\operatorname{Hom}(G_{/k}, \hat{CW})\cong \hat{CW}(R_k)$. Since the Frobenius ${\bf F}$ and the Verschiebung ${\bf V}$ act on $CW$ by
\[ {\bf F}(\ldots, a_{-n},\ldots)=(\ldots, a_{-n}^p,\ldots),\]
\[ {\bf V}(\ldots, a_{-2}, a_{-1}, a_0)=(\ldots, a_{-2}, a_{-1}),\]
${\bf F}$ and ${\bf V}$ also act on $M$ accordingly.
For any algebra $A$ over $k$,
\[ G_{/k}(A)\cong \operatorname{Hom}_{\mathbf D_k}(M, A). \]
Suppose $N$ is a $\mathbb Q_p/\mathbb Z_pieu_k$-module. Let $N^{(j)}$ denote the $\mathbf D_k$-module with the same underlying set $N$ and action twisted by $\sigma^j$. In other words, for $n \in N^{(j)}$ and $\lambda \in W$,
\[ \lambda\circ n=\sigma^{-j}(\lambda)n.\]
We note that ${\bf F}$ induces a $\mathbf D_k$-linear isomorphism ${\bf F}: M^{(j)} \to M^{(j-1)}$, and ${\bf V}$ induces a $\mathbf D_k$-linear isomorphism ${\bf V}: M^{(j)} \to M^{(j+1)}$. So, we can define the following maps:
\begin{enumerate}[(a)]
\item
$$\varphi_{i, j}:\mm^i\otimes_{\OO_{K'}} N^{(j)} \to \mm^{i-1}\otimes_{\OO_{K'}}N^{(j)}$$
is a natural map induced by the inclusion $\mathfrak m^i \to \mathfrak m^{i-1}$,
\item
$$f_{i,j}: \mm^i \otimes_{\OO_{K'}} N^{(j)} \to \mm^i \otimes_{\OO_{K'}} N^{(j-1)}$$
induced by ${\bf F}: N^{(j)} \to N^{(j-1)}$, and
\item
$$v_{i,j}: \mm^i \otimes_{\OO_{K'}} N^{(j)} \to \mm^{i-e} \otimes_{\OO_{K'}} N^{(j+1)}$$
given by $v_{i,j}(\lambda \otimes m)= p^{-1}\lambda \otimes {\bf V} m$.
\end{enumerate}
For a subset $I$ of $\Z\times \Z$, we let $\mathcal D_I(N)$ denote the system of diagrams (in the category of $\OO_{K'}$-modules) of the objects $\mm^i\otimes N^{(j)}$ where $(i,j) \in I$ and the maps $\varphi_{i,j}, f_{i,j}, v_{i,j}$ between the objects of $\mathcal D_I(N)$. (See \cite{Fontaine}~p.189.)
We define
\[ I_0=\{ (i,j) \in \Z\times \Z, \; (j\geq 0) \; | \; i\geq 0 \text{ if }j=0, i \geq p^{j-1}-je \text{ if }j \geq 1\}, \]
and let
$$N_{\OO_{K'}}\stackrel{def}=\varinjlim\mathcal D_{I_0}(N).$$
For $j'>0$, we also define
\[ I_{j'}=\{ (i,j) \in \Z\times \Z, \; (j \geq j') \; | i \geq p^{j-1}-je \}, \]
and let
$$N_{\mathcal O_{K'}}[j']\stackrel{def}=\varinjlim\mathcal D_{I_{j'}}(N).$$
When $M$ is a $\mathbf D_k$-module without ${\bf F}$-torsion, it is well-known that $M_{\mathcal O_{K'}}[1]\to M_{\mathcal O_{K'}}$ is injective, and
\[ M/{\bf F} M\cong M_{\mathcal O_{K'}}/ M_{\mathcal O_{K'}}[1] \]
(\cite{Fontaine}~5.2.5, Corollaire~1).
\begin{definition}
\begin{enumerate}[(a)]
\item
For an algebra $g$ over $\OO_{K'}$, we can define
\begin{eqnarray*} \omega_g: \hat{CW}(g) &\to & \Qp\otimes g \\
(\cdots, a_{-n},\cdots, a_{-1},a_0) &\mapsto & \sum_{n=0}^{\infty} p^{-n}a_{-n}^{p^n}.
\end{eqnarray*}
\item
We define $P'(g)$ as the $\mathcal O_{K'}$-submodule of $\Qp\otimes g$ generated by $p^{-n} a^{p^n}$ for all $n\geq 0$ and all $a \in \mathfrak m \cdot g$.
\end{enumerate}
We will drop $g$ from $\omega_g$ if it does not cause confusion.
\end{definition}
This group $P'(g)$ is not indefinitely large. In fact, we have
\[ \mathfrak m \cdot g \subset P'(g) \subset \mathfrak m^{v} \cdot g \]
where $v=\text{min}(p^n-ne)$ (in particular, if $e\leq p-1$, $P'(g)=\mathfrak m \cdot g$). See \cite{Fontaine}~p.197.
It is easy to see $\omega_g$ naturally extends to
\[ \omega'_g: \mathcal O_{K'} \otimes \hat{CW}_k(g/\mm \cdot g) \to \Qp \otimes g/P'(g)\]
by choosing a lifting $(\tilde a_{-n})\in \hat{CW}_k(g)$ of $(a_{-n})\in \hat{CW}_k(g/\mm \cdot g)$.
\begin{proposition}[\cite{Fontaine}~Proposition~2.5] \Label{Atlantic}
Let $N$ be a $\mathbb Q_p/\mathbb Z_pieu_k$-module so that ${\bf V} N=N$. Then, the canonical map $\OO_{K'}\otimes N \to N_{\OO_{K'}}$ is surjective, and its kernel is $\sum_{j=1}^{\infty} \mm^{p^{j-1}}\otimes \operatorname{Ker} {\bf V}^j$.
\end{proposition}
\begin{proposition}[\cite{Fontaine}~Lemme~3.1] \Label{Pacific}
The kernel of $\omega_g'$ contains $\sum_{j=1}^{\infty} \mm^{p^{j-1}}\otimes \operatorname{Ker} V^j$.
\end{proposition}
There is a natural map $\mathcal O_{K'} \otimes \hat{CW}_k(g/\mm g) \to \hat{CW}_{k}(g/\mm g)_{\OO_{K'}}$. Note ${\bf V} \hat{CW}_k(g/\mm g)= \hat{CW}_k(g/\mm g)$. Thus, by Propositions~\ref{Atlantic} and \ref{Pacific}, $\omega_g'$ factors through
\[ \omega_g: \hat{CW}_{k}(g/\mm g)_{\OO_{K'}} \to \Qp \otimes g/P'(g) \]
(\cite{Fontaine}~p.197).
Recall that $R$ is the affine algebra of $G$. Then, there is the coproduct map $\delta: R\to R\hat\otimes_{\OO_{K'}} R$ which induces the group operation of $G$.
Let $P_R$ be the $R$-module generated by $a^{p^n}/p^n$ for every $a \in R$ and $n\geq 0$. Let $L$ be the set of $a \in P_R$ so that $a\otimes 1 - \delta(a)+1\otimes a=0$. In other words, $L$ is the set of logarithms. It naturally satisfies
\[ L/\mathfrak m L \stackrel{\sim}\to M_{\mathcal O_{K'}}/ M_{\mathcal O_{K'}}[1] \cong M/{\bf F} M. \]
Fontaine defined the following functor $G(L, M)$:
\begin{definition}[\cite{Fontaine}~Section~4.4] \Label{Fontaine-Nara}
For an algebra $g$ over $\OO_{K'}$ (i.e., $g$ is a ring containing $\OO_{K'}$), $G(L, M)(g)$ is the set of points $(\mathbf y, \mathbf x)$ with $\mathbf x \in \operatorname{Hom}_{\mathbb Q_p/\mathbb Z_pieudonne_k}( M, CW_k(g/\mm \cdot g))$, and $\mathbf y \in \operatorname{Hom}_{\mathcal O_{K'}}(L, \Qp\otimes g)$ satisfying the following: $\bfx$ naturally induces a map
$$\bfx_{\OO_{K'}}: M_{\mathcal O_{K'}} \to CW_k(g/\mm \cdot g)_{\mathcal O_{K'}}.$$
Then, $(\bfy, \bfx)$ is a fiber product in the sense that $\bfx_{\OO_{K'}}$ and $\bfy$ are identical through
\begin{eqnarray*}
\operatorname{Hom}_{\mathcal O_{K'} \otimes \mathbb Q_p/\mathbb Z_pieu_k}( M_{\mathcal O_{K'}}, CW_k(g/\mm g)_{\mathcal O_{K'}}) \to & \operatorname{Hom}_{\mathcal O_{K'}}( L, \Qp\otimes g/ P'(g))\\
&\uparrow\\
&\operatorname{Hom}_{\mathcal O_{K'}}(L, \Qp\otimes g),
\end{eqnarray*}
\end{definition}
There is a natural map $i_G:G\to G(L, M)$, and also we can find a map in the reverse direction $j_G:G(L, M)\to G$. These maps are not necessarily isomorphisms unless $e<p-1$. Rather, $i_G\circ j_G=p^t$, $j_G\circ i_G=p^t$ for some $t$ which depends on the ramification index $e$.
\end{section}
\begin{section}{Perrin-Riou's insight, and weak bounds for ranks} \Label{Constructing}
In this section, we construct points of formal group schemes over local fields satisfying certain norm relations. The local points we construct are analogous to the points that Perrin-Riou constructed (\cite{Perrin-Riou-1}), and indeed, this section is an effort to find a way to make her idea work for group schemes defined over ramified fields. As in her work, Fontaine's functor (\cite{Fontaine}, and also \cite{Dieudonne-1}, \cite{Dieudonne-2}) plays a central role, but we need a functor defined for group schemes over ramified fields. There is a brief discussion about the functor in the previous section (Section~\ref{Fontaine}). And then, again following Perrin-Riou, we construct power series analogous to Mazur's characteristic polynomials of the Selmer groups. Our power series have limited utility unlike Mazur's characteristics because they are not integral. Nonetheless, they give a bound for the coranks of the Selmer groups (thus a bound for the ranks of the Mordell-Weil groups).
\begin{subsection}{Constructing the Perrin-Riou local points} \Label{Some special}
Suppose $k_{\infty}/\Qp$ is a totally ramified normal extension with $\operatorname{Gal}(k_{\infty}/\Qp)\cong \Z_p^{\times}$. By local class field theory, it is given by a Lubin-Tate group of height $1$ over $\Zp$. In other words, there is $\varphi(X)=X^p+\alpha_{p-1} X^{p-1}+\cdots+\alpha_1X \in \Zp[X]$ with $p|\alpha_i$, $v_p(\alpha_1)=1$ so that
\[ k_{\infty}=\cup_n \Qp(\pi_n) \]
where $\varphi(\pi_n)=\pi_{n-1}$ ($\pi_n\not=0$ for $n > 0$, $\pi_0=0$).
\begin{remark} \Label{Edward the Confessor}
We can also study a more general case where $k_{\infty}/\Qp$ is ``merely'' ramified (rather than totally ramified). It can certainly be done as the author did in a different context and for a different problem in \cite{Kim-1}. The notation will become much more complicated.
\end{remark}
Suppose $K'=\Qp(\pi_N)$ for some $N>0$. Let $\mm=\mm_{\OO_{K'}}$, and $k$ be $\OO_{K'}/\mm_{\OO_{K'}}$ (which is simply $\mathbb F_p$).
We let $G$ be a formal group scheme over $\mathcal O_{K'}$ such that its reduced group scheme $G_{/k}$ (i.e., the special fiber) is smooth (therefore, $G$ has good reduction).
As in section~\ref{Fontaine}, we set $M=\operatorname{Hom}(G_{/k}, \hat{CW})$, which is a $\mathbf D$-module, and define $L$ as we did in Section~\ref{Fontaine}. In addition, we assume
\begin{assumption}
The dimension of $G$ is $1$ (i.e., $L$ is rank $1$ over $\OO_{K'}$).
\end{assumption}
This assumption will make our work much simpler.
\begin{remark}
Even though the author has not thought much about it, the case where the dimension of $G$ is not $1$ may not be so difficult. We only need to consider multiple logarithms.
\end{remark}
Also, we assume
\begin{assumption} \Label{Assumption-1}
Recall that we assume $G$ has good reduction. Also we assume $G$ does not have ordinary reduction. (See Definition~\ref{Calais}.)
\end{assumption}
Clearly, the case where $G$ has good ordinary reduction is covered well by Mazur's work (\cite{Mazur}).
Since we always assume that ${\bf F}$ acts on $M$ as a topological nilpotent, $M$ can be considered as a $\Zp[[{\bf F}]]$-module.
We set
\[ d=\rank_{\Zp} M. \]
Since we assume $G_{/k}$ is of dimension $1$, $\dim_{\mathbb F_p} M/{\bf F} M=1$, thus we may choose $\bfm \in M$ so that it generates $M$ over $\Zp[[{\bf F}]]$. More specifically,
\[ \bfm, {\bf F}\bfm, \cdots, {\bf F}^{d-1} \bfm,\]
are $\Zp$-linearly independent, and generate $M$ over $\Zp$.
\begin{remark}
In fact, this seems to be the only place in this section where we use the condition that ${\bf F}$ is a topological nilpotent.
\end{remark}
We may also choose an $\OO_{K'}$-generator $\bfl$ of $L$. Since $L \subset M_{\OO_{K'}}$, we may write
\begin{eqnarray*}
\bfl &=& (\bfl_{ij})_{(i,j)\in I_0},\\
\bfl_{ij} &=& \sum_{k=0}^{d-1} \alpha_k^{(ij)} {\bf F}^k \bfm \in \mm^i \otimes M^{(j)}
\end{eqnarray*}
for some $\alpha_k^{(ij)} \in \mm^i$.
We set
\[ H(X)={\det}_{\Zp} (X\cdot 1_M-{\bf F}|M)=X^d+a_{d-1}X^{d-1}+\cdots+a_0 \in \Zp[X], \]
then
\[ \bar H(X)\stackrel{def}= \displaystyle \frac{H(X)}{a_0} = 1+ \displaystyle \frac{a_1}{a_0}X+\cdots+\frac{a_{d-1}}{a_0}X^{d-1}+\frac1{a_0}X^d. \]
We let
\[J(X)\stackrel{def}=\bar H(X) -1=b_1X+b_2X^{2}+\cdots+b_dX^{d} \]
then formally we have
\[ \bar H(X)^{-1}=1-J(X)+J(X)^2-\cdots .\]
\begin{notation}
\begin{enumerate}
\item Recall $H(X)=X^d+a_{d-1}X^{d-1}+\cdots+ a_0$, and $\varphi(X)=X^p+\alpha_{p-1} X^{p-1}+\cdots+\alpha_1X$. Define
\[ \epsilon\stackrel{def}=\displaystyle \frac{a_0 \alpha_{p-1}}{p\cdot (a_0+a_1+\cdots+a_{d-1}+1)}. \]
(Note that $\epsilon \in p\Zp$ because $a_0+a_1+\cdots+a_{d-1}+1 \in 1+p\Zp$.)
\item Let $\mathcal P$ be the $\Zp[[X]]$-submodule of $\Qp[[X]]$ which is generated by $\frac{X^{p^n}}{p^n}$ for $n=0,1,2,\cdots$. And, let $\bar{\mathcal P}=\mathcal P/p\Zp[[X]]$, which is isomorphic to $\hat{CW}(\mathbb F_p[[X]])$ through
\begin{eqnarray*} \omega: \hat{CW}(\mathbb F_p[[X]]) &\to & \bar{\mathcal P} \\
(\cdots, a_1, a_0) &\mapsto & \sum \displaystyle \frac{\tilde a_n^{p^n}}{p^n}
\end{eqnarray*}
($\tilde a_n \in \Zp[[X]]$ is a lifting of $a_n$).
\item
Let $\varphi$ be an operator on $\mathcal P$ given by
\[ \varphi(X^n):=\varphi(X)^n \]
which is equivalent to ${\bf F}$ on $\bar{\mathcal P} \cong \hat {CW}(\mathbb F_p)$.
(More precisely, for $a\in W$,
\[ \varphi(a)=\sigma(a) \]
where $\sigma$ is the $p$-th Frobenius map on $W$, and thus $\varphi$ is a $\sigma$-linear operator. But, here we have $W=\Zp$, so we can safely ignore this.)
Then, we define
\[ l(X)=\left[ 1-J(\varphi)+J(\varphi)^2-\cdots \right] \circ X. \]
\item Define $\bfx \in G_{/k}(\mathbb F_p[[X]]) \cong \operatorname{Hom}_{\mathbb Q_p/\mathbb Z_pieu}(M, \hat {CW}(\mathbb F_p[[X]])) \cong \operatorname{Hom}_{\mathbb Q_p/\mathbb Z_pieu}(M, \bar{\mathcal P})$ by
\[ \bfx(\bfm)= l(X) \pmod{p\Zp[[X]]}\]
and extend $\mathbb Q_p/\mathbb Z_pieu$-linearly. (Note that $\operatorname{Hom}_{\mathbb Q_p/\mathbb Z_pieu}(M, \bar{\mathcal P}) \cong \operatorname{Hom}_{\Zp[{\bf F}]}(M, \bar{\mathcal P})$ by \cite{Perrin-Riou-1}~Section~3.1~p.261.)
\end{enumerate}
\end{notation}
\begin{proposition} \Label{Sino-Japanese War}
$\bfx$ is well-defined.
\end{proposition}
\begin{proof}
First, we need to show $l(X)$ is well-defined. Because $G$ has supersingular reduction, $p^i b_i \in p\Zp$ ($i=1,2,\cdots, d$). Thus, $l(X)$ is well-defined (i.e., the infinite summation which defines $l(X)$ is convergent). Then, we check
\[ (1+J(\varphi))\circ \left\{ 1-J(\varphi)+J(\varphi)^2-\cdots \right\} \circ X=1\circ X.\]
Since ${\bf F}$ is a topological nilpotent, $p|a_0$, thus $H(\varphi)\circ l(X)=a_0X \in p\Zp[[X]]$, in other words, $H({\bf F})\circ l(X)=0 \in \hat CW(\mathbb F_p [[X]])$. Since $H(X)$ is irreducible, $\bfx$ extends to the entire $M$ $\mathbb Q_p/\mathbb Z_pieu$-linearly.
\end{proof}
\begin{notation} \Label{Moscow}
\begin{enumerate}
\item Define a lifting $\tilde \bfx \in \operatorname{Hom}_{\Zp}(M, \mathcal P)$ of $\bfx$ by
\[ \tilde \bfx({\bf F}^k \bfm)=\epsilon+\varphi^k \circ l(X)=\epsilon+ l(\varphi^{(k)}(X)), \quad k=0,1,\cdots,d-1\]
where $\varphi^{(k)}=\varphi(\varphi(\cdots(X)))$ ($k$-times).
\item Recall
\begin{eqnarray*}
\bfl &=& (\bfl_{ij})_{(i,j)\in I_0},\\
\bfl_{ij} &=& \sum_{k=0}^{d-1} \alpha_k^{(ij)} {\bf F}^k \bfm \in \mm^i \otimes M^{(j)}.
\end{eqnarray*}
We can write
\[ {\bf F}^j \bfl_{ij} = \sum_{k=0}^{d-1} \beta_k^{(ij)} {\bf F}^k \bfm\]
for some $\beta_k^{(ij)}\in \mm^i$.
\item
Define $\bfy \in \operatorname{Hom}_{\OO_{K'}}(L, K'[[X]])$ explicitly as follows:
We set
\[ \bfy(\bfl)=\sum_{(i,j)\in I_0} \sum_{k=0}^{d-1} \beta_k^{(ij)} \tilde\bfx ({\bf F}^k \bfm) \]
and extend to $L$ $\OO_{K'}$-linearly.
\item Then, we set $P=(\bfy, \bfx) \in G(L, M)(\OO_{K'}[[X]])$.
\end{enumerate}
\end{notation}
\begin{proposition}
$P=(\bfy, \bfx)$ is well-defined.
\end{proposition}
\begin{proof}
We need to show it is a fiber product in the sense of Definition~\ref{Fontaine-Nara}. We let $\bfx$ also denote the extended map $\bfx: M_{\OO_{K'}}\to \hat{CW}(\mathbb F_p[[X]])_{\OO_{K'}}$.
For each $\bfl_{ij}\in \mm^i \otimes M^{(j)}$,
$$\bfx(\bfl_{ij})=\bfx(\sum_{k=0}^{d-1} \alpha_k^{(ij)} {\bf F}^k \bfm)=\sum_{k=0}^{d-1} \alpha_k^{(ij)} \bfx({\bf F}^k \bfm) \in \mm^i \otimes \hat{CW}(\mathbb F_p[[X]])^{(j)}.$$
Because $\omega$ on $\hat{CW}(\mathbb F_p[[X]])_{\OO_{K'}}$is deduced from $\omega: \OO_{K'}\otimes \hat{CW}(\mathbb F_p[[X]]) \to K'[[X]]/P'(\OO_{K'}[[X]])$ through $\OO_{K'}\otimes \hat{CW}(\mathbb F_p[[X]]) \to \hat{CW}(\mathbb F_p[[X]])_{\OO_{K'}}$, to evaluate $\omega$ on $\sum_{k=0}^{d-1} \alpha_k^{(ij)} \bfx({\bf F}^k \bfm) \in \mm^i \otimes \hat{CW}(\mathbb F_p[[X]])^{(j)}$, we need to send it to $p^j \cdot \mm^i \otimes \hat{CW}(\mathbb F_p[[X]])$ by ${\bf F}^j$, and obtain
\begin{eqnarray*}
\omega(\bfx(\bfl_{ij})) &=& \omega \left( {\bf F}^j\sum_{k=0}^{d-1} \alpha_k^{(ij)} \bfx({\bf F}^k \bfm) \right) \\
&=& \omega \left( \sum_{k=0}^{d-1} \beta_k^{(ij)} \bfx({\bf F}^k \bfm) \right) \\
&=& \sum_{k=0}^{d-1} \beta_k^{(ij)} l(\varphi^k(X)) \pmod{P'(\OO_{K'}[[X]])}.
\end{eqnarray*}
Thus, $\omega(\bfx(\bfl))=\bfy(\bfl) \pmod{P'(\OO_{K'}[[X]])}$, and by extending $\OO_{K'}$-linearly, $\bfx=\bfy$ as elements of $\operatorname{Hom}_{\OO_{K'}}(L, K'[[X]]/P'(\OO_{K'}[[X]]))$, and our claim follows.
\end{proof}
For simplicity, let $\operatorname{Tr}_{n/m}$ denote $\operatorname{Tr}_{K'(\pi_n)/K'(\pi_m)}$.
\begin{proposition} \Label{Despicable-Laundry-Machine}
Modulo torsions, we have
\begin{eqnarray*} \operatorname{Tr}_{n/n-d} P(\pi_n) &=& -p\cdot b_1\cdot \operatorname{Tr}_{n-1/n-d} P(\pi_{n-1} ) - p^2 \cdot b_2 \cdot \operatorname{Tr}_{n-2/n-d} P(\pi_{n-2} ) \\
&& - \quad \cdots \quad - p^d \cdot b_d\cdot P(\pi_{n-d} )
\end{eqnarray*}
for every $n\geq N+d$.
\end{proposition}
\begin{proof}
Note that $(0, \mathbf z) \in G(L, M)(g)$ is a torsion point for any $\mathbf z \in G_{/k}(g/\mm g)$. Thus, we only need to show the identity of the $L$-parts.
First, we find
\begin{eqnarray*} \operatorname{Tr}_{n/n-1} l(\pi_n) &=& \operatorname{Tr}_{n/n-1} \left. \left[ 1-J(\varphi)+J(\varphi)^2-\cdots \right] \circ X \right|_{X=\pi_n} \\
&=&\operatorname{Tr}_{n/n-1} \pi_n - \operatorname{Tr}_{n/n-1} J(\varphi)\circ \left. \left[ 1-J(\varphi)+J(\varphi)^2- \right]\circ X \right|_{X=\pi_n} \\
&=& -\alpha_{p-1}-\operatorname{Tr}_{n/n-1} \left[ b_1l(\varphi(X))+\cdots+b_{d}l(\varphi^{(d)}(X))\right]_{X=\pi_n} \\
&=& -\alpha_{p-1}-p\cdot \left[b_1 l(\pi_{n-1})+\cdots+b_d l(\pi_{n-d}) \right].
\end{eqnarray*}
Then, we can also find
\begin{eqnarray*} \operatorname{Tr}_{n/n-d} l(\pi_n) &=& -p^{d-1}\alpha_{p-1}-p\cdot b_1 \cdot \operatorname{Tr}_{n-1/n-d} l(\pi_{n-1}) \\
&& -\quad \cdots \quad -p^{d-1}\cdot b_{d-1} \cdot \operatorname{Tr}_{n-d+1/n-d} l(\pi_{n-d+1})-p^d\cdot b_d \cdot l(\pi_{n-d}).
\end{eqnarray*}
We recall that $b_1=\frac{a_1}{a_0},\cdots, b_{d-1}=\frac{a_{d-1}}{a_0}, b_d=\frac 1{a_0}$, thus from the definition of $\epsilon$, we have
\[ p^d\cdot \left( 1+\displaystyle \frac{a_1}{a_0}+\cdots+\frac{a_{d-1}}{a_0}+\frac1{a_0} \right) \cdot \epsilon=p^{d-1} \cdot \alpha_{p-1}. \]
Thus, we have
\begin{multline} \Label{Tokyo}
\operatorname{Tr}_{n/n-d}(\epsilon+l(\pi_n)) =-p\cdot \displaystyle \frac{a_1}{a_0} \cdot \operatorname{Tr}_{n-1/n-d} (\epsilon+l(\pi_{n-1})) \\
\qquad \qquad \qquad - \quad \cdots \quad - p^{d-1}\cdot \displaystyle \frac{a_{d-1}}{a_0} \cdot \operatorname{Tr}_{n-d+1/n-d} (\epsilon+ l(\pi_{n-d+1})) -p^d\cdot \frac1{a_0} \cdot (\epsilon+ l(\pi_{n-d})).
\end{multline}
Similarly, we check the following: For $0<i<d$,
\begin{eqnarray*} (\varphi^i\circ l)(\pi_n) &=& \varphi^{(i)}(\pi_n) -\varphi^i\circ J(\varphi)\circ [1-J(\varphi)+J(\varphi)^2-\cdots ] \circ X |_{\pi_n} \\
&=& \pi_{n-i} - \left[ b_1 l(\pi_{n-i-1})+\cdots+b_d l(\pi_{n-i-d}) \right].
\end{eqnarray*}
Then, we have
\begin{eqnarray*} \operatorname{Tr}_{n/n-d} (\varphi^i\circ l) (\pi_n) &=& -p^{d-1}\alpha_{p-1}-p\cdot b_1 \cdot \operatorname{Tr}_{n-1/n-d} l(\pi_{n-i-1}) \\
&& -\quad \cdots \quad -p^{d-1}\cdot b_{d-1} \cdot \operatorname{Tr}_{n-d+1/n-d} l(\pi_{n-i-d+1})-p^d\cdot b_d \cdot l(\pi_{n-i-d}) \\
&=& -p^{d-1}\alpha_{p-1}-p\cdot b_1 \cdot \operatorname{Tr}_{n-1/n-d} (\varphi^i\circ l)(\pi_{n-1}) \\
&& -\quad \cdots \quad -p^{d-1}\cdot b_{d-1} \cdot \operatorname{Tr}_{n-d+1/n-d} (\varphi^i\circ l) (\pi_{n-d+1}) \\
&&-p^d\cdot b_d \cdot (\varphi^i\circ l) (\pi_{n-d}),
\end{eqnarray*}
and by repeating the argument used above, we obtain an identity analogous to (\ref{Tokyo}).
Recall
\[ \bfy(\bfl)=\sum_{(i,j)\in I_0} \sum_{k=0}^{d-1} \beta_k^{(ij)} (\epsilon+l(\varphi^{(k)}(X)) \]
from Notation~\ref{Moscow}. By the above discussion, we have
\begin{eqnarray*} \operatorname{Tr}_{n/n-d} \bfy(\bfl)|_{X=\pi_n} &=& -p\cdot b_1\cdot \operatorname{Tr}_{n-1/n-d} \bfy(\bfl) |_{X=\pi_{n-1}} - p^2 \cdot b_2 \cdot \operatorname{Tr}_{n-2/n-d} \bfy(\bfl) |_{X=\pi_{n-2}} \\
&& - \quad \cdots \quad - p^d \cdot b_d\cdot \bfy(\bfl) |_{X=\pi_{n-d}}
\end{eqnarray*}
and by extending it to $L$ $\OO_{K'}$-linearly, we obtain our claim.
\end{proof}
\end{subsection}
\begin{subsection}{Construction for Kummer Extensions} \Label{Kummer}
Now we define a slightly different operator $\varphi$ on $\Zp[[X]]$ by
\[ \varphi(X)=X^p, \quad \varphi(a)=\sigma(a), a \in \Zp \]
where $\sigma$ is the $p$-th Frobenius map mentioned earlier (actually, $\sigma$ acts trivially on $\Zp$, so the action of $\varphi$ on $\Zp$ is purely symbolic.)
\begin{notation}
\begin{enumerate}
\item $K'$ is a totally ramified extension of $\Qp$, and $\zeta_p \not\in K'$. Let $\mm$ denote $\mm_{\OO_{K'}}$.
\item Set $e=[K':\Qp]$. Assume $e<p$.
\item
Choose a uniformizer $\pi$ of $K'$, and choose $\pi_n$ for every $n \geq 0$ such that
\[ \pi_0=\pi,\quad \pi_{n+1}^p=\pi_n \quad \text{ for every }\quad n \geq 0.\]
\item For any $n\geq m\geq 0$, we let $\operatorname{Tr}_{n/m}$ denote $\operatorname{Tr}_{K'(\pi_n)/K'(\pi_m)}$.
\end{enumerate}
\end{notation}
Supppose $G$ is a formal group scheme of dimension $1$ over $\OO_{K'}$, its reduced scheme $G_{/k}$ over $k=\OO_{K'}/\mm$ is smooth (thus $G$ has good reduction), and $G$ has supersingular reduction. We recall from Section~\ref{Fontaine} that a Honda system $(M, L)$ is attached to $G$.
Like Section~\ref{Some special}, we choose an $\OO_{K'}$-generator $\bfl$ of $L$ and a $\Zp[{\bf F}]$-generator $\bfm$ of $M$. Then,
\begin{eqnarray*}
\bfl = (\bfl_{ij})_{(i,j) \in I_0}, \quad \bfl_{ij}= \sum_{k=0}^{d-1} \alpha_k^{(ij)} {\bf F}^k \bfm \in \mm^i \otimes M^{(j)}
\end{eqnarray*}
for some $\alpha_k^{(ij)} \in K'$.
Again, similar to Section~\ref{Some special}, we define
\begin{notation}
\[ H(X)={\det}_{\Zp} (X\cdot 1_M-{\bf F}|M)=X^d+a_{d-1}X^{d-1}+\cdots+a_0 \in \Zp[X], \]
\[ \bar H(X)\stackrel{def}= \displaystyle \frac{H(X)}{a_0},\]
\[ J(X)\stackrel{def}=\bar H(X) -1=b_1X+b_2X^{2}+\cdots+b_dX^{d} \]
\[ l(X)\stackrel{def}= \left\{ 1-J(\varphi)+J(\varphi)^2-\cdots \right\} \circ X. \]
\end{notation}
\begin{proposition}
Recall $G(k[[X]])\cong \operatorname{Hom}_{\Zp[{\bf F}]}(M, \bar {\mathcal P}$) (\cite{Perrin-Riou-1}~Section~3.1 p.261). We define $\bfx \in G(k[[X]])$ by
\[ \bfx(\bfm)=l(X) \pmod{p\Zp[[X]]},\]
and expand $\Zp[{\bf F}]$-linearly. Then, $\bfx$ is well-defined.
\end{proposition}
\begin{proof}
See Proposition~\ref{Sino-Japanese War}.
\end{proof}
Now, we choose a lifting $\bfy \in \operatorname{Hom}_{\OO_{K'}}(L, K'[[X]])$ of $\bfx$ as follows:
\begin{notation}
\begin{enumerate}
\item Define a lifting $\tilde \bfx \in \operatorname{Hom}_{\Zp}(M, \mathcal P)$ of $\bfx$ by
\[ \tilde \bfx({\bf F}^i \bfm)=\varphi^i \circ l(X)=l(X^{p^i}), \quad i=0,1,\cdots,d-1.\]
Then, define $\bfy \in \operatorname{Hom}_{\OO_{K'}}(L, K'[[X]])$ explicitly as follows:
Write ${\bf F}^j \bfl_{ij}=\sum_{k=0}^{d-1} \beta_k^{(ij)} {\bf F}^k \bfm$ for some $\beta_k^{(ij)} \in K'$. We set
\[ \bfy(\bfl)=\sum_{(i,j) \in I_0} \sum_{k=0}^{d-1} \beta_k^{(ij)} \tilde\bfx ({\bf F}^k\bfm)= \sum_{(i,j) \in I_0} \sum_{k=0}^{d-1} \beta_k^{(ij)} l(X^{p^k}) \]
and expand $\bfy$ $\OO_{K'}$-linearly.
\item Then, we set $P=(\bfx, \bfy) \in G(M,L)(\Zp[[X]] \otimes \OO_{K'})$.
\end{enumerate}
\end{notation}
We note
\begin{eqnarray} \Label{Note}
\operatorname{Tr}_{n/n-1} \pi_n^i =0 \quad \text{for all }n >0
\end{eqnarray}
for $i\leq e$ because $e<p$.
\begin{proposition} \Label{Chicken-Burger}
For $n>d$ and $i=1,2,\cdots, e$, modulo torsions, we have
\begin{eqnarray*} \operatorname{Tr}_{n/n-d} P(\pi_n^i) &=& -p\cdot b_1\cdot \operatorname{Tr}_{n-1/n-d} P(\pi_{n-1}^i) - p^2 \cdot b_2 \cdot \operatorname{Tr}_{n-2/n-d} P(\pi_{n-2}^i) \\
&& - \quad \cdots \quad -b_d\cdot p^d \cdot P(\pi_{n-d}^i).
\end{eqnarray*}
\end{proposition}
\begin{proof}
This is similar to Proposition~\ref{Despicable-Laundry-Machine} in Section~\ref{Some special}, so we will provide only a brief proof.
\begin{eqnarray*} \operatorname{Tr}_{n/n-1}l(\pi_n^i)&=& \operatorname{Tr}_{n/n-1}\left\{ \pi_n^i -[J(\varphi)-J(\varphi)^2+\cdots]\circ X|_{X=\pi_n^i} \right\} \\
&=& \operatorname{Tr}_{n/n-1} \left\{-[J(\varphi)-J(\varphi)^2+\cdots]\circ X|_{X=\pi_n^i} \right\} \\
&=& -p\cdot (J(\varphi)\circ l)(\pi_n^i).
\end{eqnarray*}
The last line is equal to
\begin{eqnarray*} -p\cdot (J(\varphi)\circ l)(\pi_n^i)&=& -p \left\{ b_1 \cdot l(\pi_n^{i\cdot p})+b_2 \cdot l(\pi_n^{i\cdot p^2})+\cdots+b_d \cdot l(\pi_n^{i\cdot p^d}) \right\} \\
&=& -p \left\{ b_1 \cdot l(\pi_{n-1}^i) + b_2 \cdot l(\pi_{n-2} ^i)+ \cdots + b_d \cdot l(\pi_{n-d}^i) \right\}.
\end{eqnarray*}
Thus by applying $\operatorname{Tr}_{n-1/n-d}$ to it, we have
\begin{eqnarray*} \operatorname{Tr}_{n/n-d} l(\pi_n^i) &=& -p \cdot b_1 \cdot \operatorname{Tr}_{n-1/n-d} l(\pi_{n-1}^i)-p^2 \cdot b_2 \cdot \operatorname{Tr}_{n-2/n-d}l(\pi_{n-2}^i) \\
&&-\quad \cdots\quad -p^d \cdot b_d \cdot l(\pi_{n-d}^i).
\end{eqnarray*}
Also similar to Proposition~\ref{Despicable-Laundry-Machine}, for $j=1,\cdots, d-1$ we have
\begin{eqnarray*} \operatorname{Tr}_{n/n-d} l((\pi_n^i)^{p^j})
&=& -b_1 \cdot p \cdot \operatorname{Tr}_{n-1/n-d} l((\pi_{n-1}^i)^{p^j} )- b_2 \cdot p^2\cdot \operatorname{Tr}_{n-2/n-d} l((\pi_{n-2}^i)^{p^j}) \\
&&- \quad \cdots \quad - b_d \cdot p^d \cdot l((\pi_{n-d}^i)^{p^j}) .
\end{eqnarray*}
Thus, we have
\begin{eqnarray*} \operatorname{Tr}_{n/n-d} \bfy (\bfl)|_{X=\pi_n^i} &=& -p \cdot b_1 \cdot \operatorname{Tr}_{n-1/n-d} \bfy (\bfl)|_{X=\pi_{n-1}^i} \\
&&-p^2 \cdot b_2 \cdot \operatorname{Tr}_{n-2/n-d} \bfy (\bfl)|_{X=\pi_{n-2}^i} \\
&&-\quad \cdots\quad \\
&&-p^d \cdot b_d \cdot \bfy (\bfl)|_{X=\pi_{n-d}^i}.
\end{eqnarray*}
Similar to Proposition~\ref{Despicable-Laundry-Machine}, we obtain our claim.
\end{proof}
The problem is that we do not know whether these points are useful or not. The extension $K'(\pi_{\infty})/K'$ is not even normal. Its normal closure $K'(\pi_{\infty}, \zeta_{p^{\infty}})/K'$ is not abelian. So, it seems impossible to use Iwasawa Theory, and the author cannot see any other use for them.
\end{subsection}
\begin{subsection}{The Perrin-Riou characteristics, and weak bounds for ranks} \Label{Case 1}
In this section, we apply the construction in Section~\ref{Some special}. As in that section, we suppose $k_{\infty}/\Qp$ is a totally ramified normal extension with $\operatorname{Gal}(k_{\infty}/\Qp)\cong \Z_p^{\times}$. By local class field theory, it is given by a Lubin-Tate group of height $1$ over $\Zp$. In other words, there is $\varphi(X)=X^p+\alpha_{p-1} X^{p-1}+\cdots+\alpha_1X \in \Zp[X]$ with $p|\alpha_i$, $v_p(\alpha_1)=1$ so that
\[ k_{\infty}=\cup_n \Qp(\pi_n) \]
where $\varphi(\pi_n)=\pi_{n-1}$ ($\pi_n\not=0$ for $n > 0$, $\pi_0=0$).
We let $F$ be a number field, $F_{\infty}$ be a $\Zp$-extension of $F$ (i.e., $\operatorname{Gal}(F_{\infty}/F) \cong \Zp$), $A$ be an abelian variety over $F$, and $A'$ be its dual abelian variety over $F$ so that there is a non-degenerate Weil pairing $e_n: A[n]\times A'[n]\to \Z/n\Z$ for every integer $n$, which is non-degenerate and commutative with the action of $G_F$. Let $\mathbf T=T_pA$, and let $\mathbf A\stackrel{def}=\varinjlim \mathbf T/p^n \mathbf T$.
In this section, we suppose there is only one prime $\mathfrak p$ of $F$ above $p$, $\mathfrak p$ is totally ramified over $F/\Q$, $\mathfrak p$ is totally ramified over $F_{\infty}/F$, $F_{\infty, \mathfrak p}=k_{\infty}$, and $F_{\mathfrak p}=\Qp(\pi_N)$ for some $N\geq 1$.
Let $G/\OO_{F_{\mathfrak p}}$ denote the formal completion of $A'/F_{\mathfrak p}$. As in Section~\ref{Some special}, we assume $G$ has dimension $1$, which means that the group of its logarithms has rank $1$ over $\OO_{F_{\mathfrak p}}$.
\begin{example}
An obvious example that satisfies all these conditions is an elliptic curve $E$ defined over $\rat(\zeta_{p^N})$ with good supersingular reduction at the unique prime $\mathfrak p$ above $p$.
\end{example}
We recall the points $P(\pi_n) \in G(M, L)(\mm_{\Qp(\pi_n)})$ constructed in Section~\ref{Some special}.
\begin{assumption} There is $M'>0$ so that $M' \cdot G(\OO_{k_{\infty}})_{tors}=0$.
\end{assumption}
This assumption is obviously true if $G(\OO_{k_{\infty}})_{tors}$ is finite.
\begin{definition}
\begin{enumerate}[(a)]
\item
Let $M$ be the Dieudonne module $\operatorname{Hom}(G_{/\mathbb F_p}, \hat{CW})$, and $L$ be the set of logarithms of $G$ as defined in Section~\ref{Fontaine}.
\item As in Section~\ref{Some special}, we set
\[ H(X)={\det}_{\Zp} (X\cdot 1_M-{\bf F}|M)=X^d+a_{d-1}X^{d-1}+\cdots+a_0 \in \Zp[X], \]
and
\begin{eqnarray*} \bar H(X) &\stackrel{def}=& \displaystyle \frac{H(X)}{a_0} = 1+ \displaystyle \frac{a_1}{a_0}X+\cdots+\frac{a_{d-1}}{a_0}X^{d-1}+\frac1{a_0}X^d \\
&=&1+b_1X+b_2X^{2}+\cdots+b_dX^{d}.
\end{eqnarray*}
\end{enumerate}
\end{definition}
\begin{definition} From Section~\ref{Fontaine}, recall that there is a natural map $i_G:G\to G(L, M)$, and a map $j_G:G(L, M)\to G$ so that $i_G\circ j_G=p^t$, $j_G\circ i_G=p^t$ for some $t$ which depends on the ramification index $e$. Also, let $i':G\to A'$ be the natural injection from the formal group scheme $G$ to the abelian variety $A'$. We define
\begin{enumerate}[(a)]
\item Where $e=[\Qp(\pi_N):\Qp]=[F_{\mfp}:\Qp]$, let
$$\{ \pi_{N,1},\cdots, \pi_{N, e}\}=\{ \pi_N^{\sigma} \}_{\sigma \in\operatorname{Gal}(\Qp(\pi_N)/\Qp)}.$$
\item Then, for every $n> N$ and for each $i=1,\cdots,e$, choose $\pi_{n,i}$ so that $\varphi(\pi_{n,i})=\pi_{n-1,i}$.
\item For $i=1,\cdots, e$,
\[ Q(\pi_{N+n, i})=M'\cdot i'\circ j_G\left( P(\pi_{N+n, i}) \right) \in A'(F_{n, \mfp}). \]
\end{enumerate}
\end{definition}
\begin{proposition} \Label{Lunch}
For every $n\geq d$, we have
\begin{eqnarray*} \operatorname{Tr}_{F_{n, \mathfrak p}/F_{n-d, \mathfrak p}} Q(\pi_{N+n, i}) &=& -p\cdot b_1\cdot \operatorname{Tr}_{F_{n-1, \mathfrak p}/F_{n-d, \mathfrak p}} Q(\pi_{N+n-1, i}) \\
&&- p^2 \cdot b_2 \cdot \operatorname{Tr}_{F_{n-2, \mathfrak p}/F_{n-d, \mathfrak p}} Q(\pi_{N+n-2, i}) \\
&& - \quad \cdots \quad - p^d \cdot b_d\cdot Q(\pi_{N+n-d, i}).
\end{eqnarray*}
\end{proposition}
\begin{proof}
Note that $M'$ annihilates every torsion of $G(\OO_{F_{n-d, \mfp}})$. Thus, the claim follows immediately from Proposition~\ref{Despicable-Laundry-Machine}.
\end{proof}
\begin{definition}[Relaxed Selmer groups] \Label{Relaxed Selmer}
Where $L$ is a number field,
\[ \operatorname{Sel}r(\mathbf A/L)\stackrel{def}= \ker \left( H^1(L, \mathbf A)\to \prod_{v\nmid p} \displaystyle \frac{H^1(L_v, \mathbf A)}{H^1_f(L_v, \mathbf A)} \right)\]
where
\[ H^1_f (L_v, \mathbf A)\stackrel{def}=H^1_{un}(L_v, \mathbf A)\stackrel{def}=H^1(L_v^{un}/L_v, \mathbf A^{G_{L_v^{un}}}).\]
\end{definition}
In fact, when ${G_{L_v^{un}}}$ acts trivially on $A$ (i.e., good reduction at $v$), $H^1_{un}(L_v, \mathbf A)$ is the standard definition for a local condition $H^1_f(L_v, A)$. (Local conditions for a finite number of primes not above $p$ do not affect our result.)
Set
\[ \Gamma\stackrel{def}=\operatorname{Gal}(F_{\infty}/F),\]
\[ \Lambda\stackrel{def}=\Zp[[\Gamma]]\cong \Zp[[X]] \]
where the last isomorphism is (non-canonically) given by choosing a topological generator $\gamma$ of $\Gamma$, and set $\gamma=X+1$.
\begin{assumption} \Label{Fries}
Let $M^{\vee}$ denote the Pontryagin dual $\operatorname{Hom}(M,\rat/\Z)$. We assume
\[ \rank_{\Lambda} \operatorname{Sel}r(\mathbf A/ F_{\infty})^{\vee}=[F_{\mfp}:\Qp]=e. \]
\end{assumption}
If $\dim G$ is not $1$, then we probably need to multiply it to $e$ in Assumption~\ref{Fries}.
We can show Assumption~\ref{Fries} is true if $\operatorname{Sel}(\mathbf A/F)$ or $\operatorname{Sel}(\mathbf A/F_n)^{\chi}$ for some primitive character $\chi$ of $\operatorname{Gal}(F_n/F)$ is finite. Although there are some notable counterexamples to this assumption (for instance, when $F_{\infty}$ is the anti-cyclotomic extension), for all intents and purposes, it is a safe assumption.
Let
\[ S_{tor}=\left( \operatorname{Sel}r(\mathbf A/ F_{\infty})^{\vee} \right)_{\Lambda-torsion}.\]
If we assume Assumption~\ref{Fries}, then there is a short exact sequence
\begin{eqnarray} \Label{Ice-Cream} 0 \to \operatorname{Sel}r(\mathbf A/F_{\infty})^{\vee}/S_{tor} \to \Lambda^e \to C \to 0
\end{eqnarray}
for a finite group $C$.
\begin{notation}
\begin{enumerate}
\item For each $n\geq 0$,
\[ \Gamma_n=\Gamma/\Gamma^{p^n}, \quad \Lambda_n=\Zp[\Gamma_n]. \]
\item For a group $M$ on which $\Gamma$ acts,
\[ M_{/\Gamma^{p^n}}=M/\{ (1-a)\cdot m \; |\; a \in \Gamma^{p^n}, m \in M\}. \]
Equivalently, where $\gamma$ is a topological generator of $\Gamma$,
\[ M_{/\Gamma^{p^n}}=M/(1-\gamma^{p^n})\cdot M. \]
\end{enumerate}
\end{notation}
\begin{lemma} Suppose there is an exact sequence of $\Lambda$-modules
\[ 0 \to A_1\to A_2 \to A_3 \to A_4 \to 0, \]
and $A_1$ and $A_4$ are finite. Then, for every $n$, the orders of the kernel and cokernel of
\[ \left( A_2 \right)_{/\Gamma^{p^n}} \to \left( A_3 \right)_{/\Gamma^{p^n}} \]
are bounded by $|A_1|\cdot|A_4|$.
\end{lemma}
\begin{proof}
The exact sequence induces two short exact sequences
\[ 0 \to A_1 \to A_2 \to A_2/A_1 \to 0,\]
\[ 0 \to A_2/A_1 \to A_3 \to A_4 \to 0, \]
which in turn induce
\[ (A_1)_{/\Gamma^{p^n}} \to (A_2)_{/\Gamma^{p^n}} \to (A_2/A_1)_{/\Gamma^{p^n}} \to 0,\]
\[ (A_4)^{\Gamma^{p^n}} \to (A_2/A_1)_{/\Gamma^{p^n}} \to (A_3)_{/\Gamma^{p^n}} \to (A_4)_{/\Gamma^{p^n}} \to 0.\]
Our claim follows immediately.
\end{proof}
It is not difficult to show $\operatorname{Sel}r(\mathbf A/F_n) \to \operatorname{Sel}r(\mathbf A/F_{\infty})^{\Gamma^{p^n}}$ has bounded kernel and cokernel for every $n$. For the sake of argument, we assume it is an isomorphism, which will not hurt the integrity of our argument.
The map in (\ref{Ice-Cream}) induces the following:
\[
\alpha_n:
(\operatorname{Sel}r(\mathbf A/F_{\infty})^{\vee}/S_{tor})_{/\Gamma^{p^n}} \to \Lambda_n^e
\]
which induces
\[ \alpha_n': \operatorname{Sel}r(\mathbf A/F_n)^{\vee} \to \Lambda_n^e \]
by the above assumption. We note that there is a map
\[ \beta_n: A'(F_{n, \mathfrak p}) \to \operatorname{Sel}r(\mathbf A/F_n)^{\vee} \]
given by the local Tate duality which states that $A'(F_{n, \mathfrak p})$ is the Pontryagin dual of $H^1(F_{n, \mathfrak p}, \mathbf A)/A(F_{n, \mathfrak p})\otimes \Qp/\Zp$.
\begin{definition}
\begin{enumerate}[(a)]
\item Let $R(\pi_{N+n, i}) \in \Lambda_n^e$ be the image of $Q(\pi_{N+n, i})$ under $\alpha_n' \circ \beta_n$.
\item Let $\Proj_n^m$ be the natural projection from $\Lambda_m$ to $\Lambda_n$ ($m\geq n$).
\end{enumerate}
\end{definition}
Let $H^{\vee}(X)= X^d+pb_1X^{d-1}+p^2b_2X^{d-2}+\cdots+p^db_d=0$. By Proposition~\ref{Lunch} we have
\begin{eqnarray} \Label{Smolensk} \Proj_{n-d}^n R(\pi_{N+n, i}) +\sum_{k=1}^d p^k b_k \Proj_{n-d}^{n-k} R(\pi_{N+n-k, i})=0
\end{eqnarray}
for each $i$.
Here we recall Perrin-Riou's lemma: In the following $\Lambda_{\alpha}$ is the set of power series $f(T) \in \overline{\Q}_p[[T]]$ satisfying $|f(x)| < C |1/\alpha^n|$ for some fixed $C>0$ for every $n \geq 1$ and $x \in \C_p$ with $|x| < |1/\sqrt[p^n]p|$.
\begin{lemma}[\cite{Perrin-Riou-1}~Lemme~5.3.] \Label{Perrin-Riou-Lemma}
Let $R(T)=\sum a_kT^k$ be a monic polynomial of $\Zp[T]$ whose roots are simple, non-zero, and have $p$-adic valuation strictly less than $1$. Suppose $f^{(n)}$'s are elements of $\Lambda$ satisfying the recurrence relation
\[ \sum_k a_k f^{(n+k)} \equiv 0 \pmod {(T+1)^{p^n}-1}. \]
Then, for every root $\alpha$ of $R(T)$, there is unique $f_{\alpha} \in \Lambda_{\alpha}$ so that for some fixed constant $c$,
\[ f^{(n)}\equiv \sum_{\alpha} f_{\alpha} \alpha^{n+1} \pmod{c^{-1}((T+1)^{p^n}-1) \Lambda} \]
for every $n$.
\end{lemma}
\begin{proof} Simple linear algebra. See \cite{Perrin-Riou-1}.
\end{proof}
Since we assume ${\bf F}$ is a topological nilpotent on $M$, all the roots of $H^{\vee}(X)=0$ have $p$-adic valuation less than $1$.
Thus, by Lemma~\ref{Perrin-Riou-Lemma} and (\ref{Smolensk}), for each root $\alpha$ of $H^{\vee}(X)$, there is $f_{\alpha, i} \in \Lambda_{\alpha}^e$ associated to $\{ R(\pi_{N+n, i}) \}_n$.
\begin{definition}
Choose a generator $g_{tor}\in \Lambda$ of the characteristic ideal of $(\operatorname{Sel}r(\mathbf A/F_{\infty})^{\vee})_{\Lambda-torsion}$. Then we let
\[ \bfL_{\alpha} \stackrel{def}= g_{tor}\times \det [f_{\alpha, 1}, \cdots, f_{\alpha, e}].\]
\end{definition}
Suppose $\chi_n$ is a primitive character of $\operatorname{Gal}(F_n/F)$, and $\zeta_{p^n}=\chi_n (\gamma)$. Suppose $g_{tor}(\zeta_{p^n}-1)\not=0$ (true if $n$ is large enough). Then, we can see that
\begin{center}
``$\operatorname{Sel}_p(\mathbf A/F_n)^{\chi_n}$ is infinite $\leftrightarrow$ the $\chi_n$-part of the cokernel of $\alpha'\circ\beta_n$ is infinite
$\leftrightarrow$ $\{ R(\pi_{N+n,i})^{\chi_n} \}_{i=1,\cdots,e}$ generates a subgroup of $(\Lambda_n^e)^{\chi_n}$ of infinite index
$\longrightarrow$ $\left. \det [f_{\alpha, 1}, \cdots, f_{\alpha, e}] \right|_{\gamma=\zeta_{p^n}} =0$.''
\end{center}
And, in such a case,
\begin{eqnarray} \Label{Three Go}
\corank_{\Zp[\zeta_{p^n}]} \operatorname{Sel}_p(\mathbf A/F_n)^{\chi_n} \leq e.
\end{eqnarray}
Consider the following Perrin-Riou's lemma.
\begin{lemma}[\cite{Perrin-Riou-1}~Lemme~5.2.] Let $\lambda=v_p(\alpha)$.
Suppose $f \in \Lambda_{\alpha}$. Let $s_m$ be the number of positive integers $n$ ($n\leq m$) such that $f(\zeta_{p^n}-1)=0$ for every $p^n$-th primitive root of unity $\zeta_{p^n}$. If $s_m -\lambda m \to \infty$ as $m\to\infty$, then $f=0$.
\end{lemma}
In other words, $s_m =\lambda m +O(1)$ if $f \not=0$.
She assumed $0\leq \lambda <1$. But, in fact, since we assume ${\bf F}$ is a topological nilpotent, $\lambda<1$, so that condition is unnecessary.
We can modify Perrin-Riou's proof slightly, and obtain the following:
\begin{proposition} \Label{ZeroGo}
\begin{enumerate}[(a)]
\item If $\bfL_{\alpha}\not=0$, then for some fixed $C$
\[ \corank_{\Zp} \operatorname{Sel}_p(\mathbf A/F_n) \leq e(p-1) \times \left\{ p^{n-1}+p^{n-2}+ \cdots+ p^m \right\}+C\]
where $n-m = \lambda n +O(1)$.
\item If any root $\alpha$ is a unit, then $\corank_{\Zp} \operatorname{Sel}_p(\mathbf A/F_n)$ is bounded by the number of roots of $\bfL_{\alpha}$ (counting multiplicity).
\end{enumerate}
\end{proposition}
\begin{proof}
Let $t_n$ be the number of the primitive $p^n$-th roots of unity which are roots of $\bfL_{\alpha}$.
By applying Perrin-Riou's proof for the above lemma, we get
\[ \sum_{m\leq n} t_m < e\lambda n +O(1). \]
Then we obtain our claim by (\ref{Three Go}).
If $\alpha$ is a unit, then $\bfL_{\alpha}$ is integral, so it has a finite number of roots. Thus, (b) is clear.
\end{proof}
This is a rough bound unless $H^{\vee}(X)$ has a unit root (i.e., unless the abelian variety has good ``in-between'' reduction). Probably it is possible to obtain a slightly better bound (ideally, something like ``$e(p-1)\times \{ p^{n-1}-p^{n-2}+\cdots \}$''), but not a substantially better one from $\bfL_{\alpha}$ alone, because any power series in $\Lambda_{\alpha}$ has an infinite number of roots. (For example, see R. Pollack's $\log_p^{\pm}$, \cite{Pollack}).
Thus, we need a new tool, and perhaps a new Selmer group. There is precisely such a tool in Sprung's $\sharp/\flat$-decomposition theory (\cite{Sprung}), and we will present our result in that direction in the next section.
Lastly, we want to discuss how Perrin-Riou obtained the result that $\rank E(\Q(\mu_{\infty}))$ is bounded. As stated above, it does not seem possible to obtain a finite bound from $\bfL_{\alpha}$ alone. However, she noted that her points $P_n \in E(\Q_{p, n})$ satisfy
\begin{eqnarray} \Label{PR-Relations}
\operatorname{Tr}_{\Q_{p, n+1}/\Q_{p, n}}P_{n+1}-a_p P_n+P_{n-1}=0.
\end{eqnarray}
This is more sophisticatead than the relation $\operatorname{Tr}_{\Q_{p, n}/\Q_{p, n-2}}P_{n}-a_p \operatorname{Tr}_{\Q_{p, n-1}/\Q_{p, n-2}}P_{n-1}+p P_{n-2}=0$. She used these relations skilfully to obtain her result. Indeed, with the benefit of hindsight, we now know that recognizing such relations is the first step of the $\pm$-Iwasawa Theory, the $\sharp/\flat$-Iwasawa Theory, and so on.
In fact, in the next section, we will construct points satisfying relations analogous to (\ref{PR-Relations}), and use them to find a finite bound for $E(F_{\infty})$ where $F$ is ramified under some conditions. But, because the field is ramified, the relation will be given by matrices which vary depending on $n$.
\end{subsection}
\end{section}
\begin{section}{Refined Local Points, Sprung's $\sharp/\flat$-Decomposition, and Finiteness of Ranks} \Label{Case 2}
In this section, we consider only elliptic curves for simplicity. Take an elliptic curve $E$ over a number field $F$. Except that, our setting is the same as Section~\ref{Case 1}. But for readers' convenience, we will repeat our conditions and assumptions.
As in that section, we suppose
\begin{enumerate}
\item $k_{\infty}/\Qp$ is a totally ramified normal extension with $\operatorname{Gal}(k_{\infty}/\Qp)\cong \Z_p^{\times}$. By local class field theory, it is given by a Lubin-Tate group of height $1$ over $\Zp$. In other words, there is $\varphi(X)=X^p+\alpha_{p-1} X^{p-1}+\cdots+\alpha_1X \in \Zp[X]$ with $p|\alpha_i$, $v_p(\alpha_1)=1$ so that
\[ k_{\infty}=\cup_n \Qp(\pi_n) \]
where $\varphi(\pi_n)=\pi_{n-1}$ ($\pi_n\not=0$ for $n\geq 0$, $\pi_0=0$).
\item We let $F$ be a number field, and $F_{\infty}$ be a $\Zp$-extension of $F$. Since $E$ is an elliptic curve, its dual abelian variety is itself. Let $\mathbf T=T_pE$, and let $\mathbf A\stackrel{def}=\cup_n E[p^n]$.
\item We suppose there is only one prime $\mathfrak p$ of $F$ above $p$, $\mathfrak p$ is totally ramified over $F_{\infty}/F$, $F_{\infty, \mathfrak p} = k_{\infty}$, and $F_{\mathfrak p}=\Qp(\pi_N)$ for some $N\geq 1$.
\item We set
\[ H(X)={\det}_{\Zp} (X\cdot 1_M-{\bf F}|M)=X^2-a_pX+p. \]
(Then, $a_p=1+N\mfp - \# \tilde E(\OO_{F_{\mfp}}/\mm_{\OO_{F_{\mfp}}})$). And, we set
\begin{eqnarray*} \bar H(X) \stackrel{def}= \displaystyle \frac{H(X)}p &=& 1- \displaystyle \frac{a_p}p X+\frac1p X^d \\
&=&1+b_1X+b_2X^{2}.
\end{eqnarray*}
\item We assume $E$ has \textbf{good supersingular reduction} at $\mathfrak p$.
\end{enumerate}
\begin{subsection}{Fontaine's functor (revisited), and our assumptions.}
Let $G$ be the formal group scheme given by the formal completion of $E/F_{\mfp}$, and let $M$ be the Dieudonne module of $G_{/\mathbb F_p}$, and $L$ be the set of logarithms of $G$. As in earlier sections, we choose a $\Zp[[{\bf F}]]$-generator $\bfm$ of $M$, and an $\OO_{F_{\mfp}}$-generator $\bfl$ of $L$.
Let $A'$ denote $\OO_{F_{\mfp}}$, and let $\mm$ denote its maximal ideal. Let $e$ be the ramification index of $F_{\mfp}$. (Since it is totally ramified, $e=[F_{\mfp}:\Qp]$.) Recall that $M_{A'}$ is the direct (i.e., injective) limit of
$$ \{ \mm^i \otimes M^{(j)} \}_{I_0}$$
where $I_0$ is the set of $(i,j) \in \Z\times \Z$ so that $j\geq 0$, and
$$ \left\{
\begin{array}{ll}
i\geq 0 & \text{if } j=0,\\
i\geq p^{j-1}-je & \text{if }j\geq 1.
\end{array} \right.
$$
with maps $\varphi_{i,j}, f_{i,j}$, and $v_{i,j}$ between $\mm^i \otimes M^{(j)}$'s. Note that there is $s$ so that $p^s-(s+1)e \leq p^{j-1}-je$ for every $j\geq 1$.
\begin{proposition} \Label{CA}
Let $E=p^s-(s+1)e$. There is a map
\[ \iota: M_{A'} \to \mm^E \otimes M \]
which is well-defined, and its cokernel is finite. If $M_{A'}$ is torsion-free, $\iota$ is injective.
\end{proposition}
\begin{proof}
For each $\mm^i \otimes M^{(j)}$, we have a map $\mm^i \otimes M^{(j)}\to \mm^i \otimes M$ given by $f_{i,1}\circ f_{i,2}\circ \cdots \circ f_{i,j}$. Since $i \geq p^s-(s+1)e$, there is a map $\mm^i \otimes M\to \mm^{E} \otimes M$ given by $\varphi_{E+1,0} \circ \varphi_{E+2,0} \circ \cdots \circ \varphi_{i,0}$. The rest is clear.
\end{proof}
Then we can write
\[ \iota( \bfl)=\alpha_1 \bfm+\alpha_2 {\bf F} \bfm \]
for some $\alpha_1, \alpha_2 \in \mm^E$. We assume
\begin{assumption} \Label{Assumption K}
\[ p|\frac{\alpha_2}{\alpha_1}. \]
\end{assumption}
Doubtlessly, some formal groups associated to elliptic curves satisfy this condition, and many others do not. In fact, $\frac{\alpha_2}{\alpha_1}$ can have a negative $p$-adic valuation, although it is bounded below, and the bound depends on $e$.
Also we assume
\begin{assumption} \Label{Assumption Torsions}
The group of torsions of $E(F_{\infty, \mfp})$ is finite.
\end{assumption}
This is a reasonable assumption. In fact, we can often show that $E[p]$ is irreducible as a $G_{F_{\mfp}}$-module.
\end{subsection}
\begin{subsection}{Finite bounds for ranks} \Label{AlphaGo}
\begin{notation} \Label{BetaGo}
\begin{enumerate}[(a)]
\item Where $e=[\Qp(\pi_N):\Qp]$, let $\{ \pi_{N,1},\cdots, \pi_{N, e}\}=\{ \pi_N^{\sigma} \}_{\sigma \in\operatorname{Gal}(\Qp(\pi_N)/\Qp)}$.
\item Then, for every $n> N$ and for each $i=1,\cdots,e$, choose $\pi_{n,i}$ so that $\varphi(\pi_{n,i})=\pi_{n-1,i}$. (Then, $\pi_{n,i}$ is a uniformizer of $\Qp(\pi_n)$.)
\end{enumerate}
\end{notation}
Similar to Section~\ref{Some special} but slightly differently, we define the following.
\begin{definition} \Label{MI}
\begin{enumerate}
\item
\[ J(X)=\bar H(X)-1=b_1X+b_2X^2=-\displaystyle \frac {a_p}pX+\frac 1p X^2, \]
\item
\[ \epsilon=\displaystyle \frac{\alpha_{p-1}}{p-a_p+1}, \]
\item
\[ l(X)=[1-J(\varphi)+J(\varphi)^2-\cdots ]\circ X \]
where $\varphi\circ X^n=\varphi(X)^n$,
\item Define $\tilde \bfx \in \operatorname{Hom}_{\Zp}(M, \mathcal P)$ given by
\[ \tilde \bfx(\bfm)=\epsilon+l(X)\]
\[ \tilde \bfx({\bf F} \bfm)=\varphi \circ l(X).\]
\end{enumerate}
\end{definition}
We define the following functor.
\begin{definition}
We let $M'=\mm^E \otimes M$, and let $L'$ denote the maximal $A'$-submodule of $M'$ which contains $\iota(L)\subset M'$, and $L'/\iota(L)$ has a finite index.
\begin{enumerate}
\item For an $A'$-algebra $g$, we define $G'(L',M)(g)$ as the set of $(u_{L'}, u_M)$ where
\[ u_{L'} \in \operatorname{Hom}_{A'}(L', \Q \otimes g),\]
\[ u_M \in \operatorname{Hom}_{\mathbf D} (M, CW(g/\mm g))\]
which naturally induces $u_M': M'(=\mm^E \otimes M) \to \mm^E \otimes CW(g/\mm g)$, so that $u_{L'}$ and $u_M'$ are identical under
\begin{eqnarray*}
\operatorname{Hom}_{A'\otimes \mathbf D}(M', \mm^E \otimes CW (g/\mm g))& \stackrel{\omega_g'}\longrightarrow & \operatorname{Hom}_{A'}(L', (\Q \otimes g)/\mm^E \cdot P'(g)) \\
& & \qquad \uparrow \\
& & \operatorname{Hom}_{A'} (L', \Q \otimes g).
\end{eqnarray*}
\item Similarly, but slightly differently, define $G'(L', M)(A'[[X]])$ as the set of $(u_{L'}, u_M)$ where (in the following, $K'$ denotes $Frac(A')$)
\[ u_{L'} \in \operatorname{Hom}_{A'}(L', K'[[X]]),\]
\[ u_M \in \operatorname{Hom}_{\mathbf D} (M, CW(\mathbb F_p[[X]]))\]
which naturally induces $u_M': M' \to \mm^E \otimes CW(\mathbb F_p[[X]])$, so that $u_{L'}$ and $u_M'$ are identical under
\begin{eqnarray*}
\operatorname{Hom}_{A'\otimes \mathbf D}(M', \mm^E \otimes CW (\mathbb F_p[[X]]))& \longrightarrow & \operatorname{Hom}_{A'}(L', K'[[X]] /\mm^E \cdot P'(A'[[X]])) \\
& & \qquad \uparrow \\
& & \operatorname{Hom}_{A'} (L', K'[[X]]).
\end{eqnarray*}
\item And, choose $M \in \Z(\geq 0)$ such that $p^M \cdot \mm^E \in A'$.
\end{enumerate}
\end{definition}
\begin{definition}
\begin{enumerate}
\item Recall $\tilde\bfx$ from Definition~\ref{MI}. Modulo $p\Zp[[X]]$, it induces $\bfx \in \operatorname{Hom}_{\mathbf D}(M, \overline{\mathcal P})$ satisfying
\[ \bfx(\bfm)=l(X) \pmod{p\Zp[[X]]}.\]
Then, define
\begin{enumerate}
\item We can choose an $A'$-generator $\bfl'$ of $L'$, and write it as
\[ \bfl'=\beta_1 \bfm+\beta_2 {\bf F} \bfm \]
where $\displaystyle \frac{\beta_2}{\beta_1}=\frac{\alpha_2}{\alpha_1}$. Then, define $\bfy \in \operatorname{Hom}_{A'}(L', K'[[X]])$ by
\[ \bfy(\bfl)= \beta_1 \tilde \bfx(\bfm)+\beta_2 \tilde \bfx({\bf F} \bfm)= \beta_1 \left( \epsilon + l(X) \right)+\beta_2 l(\varphi(X)) \]
and extend $A'$-linearly.
\item Then, we set
\[ P'=(\bfy, \bfx) \in G'(L', M)(A'[[X]]). \]
\end{enumerate}
\item Then, for every $n \geq N$ and $i=1,2,\cdots, e$, we obtain points $P'(\pi_{n,i}) \in G'(L', M)(\Zp[\pi_n])$ by substituting $X=\pi_{n,i}$.
\end{enumerate}
\end{definition}
We make the following assumption analogous to \cite{Kobayashi}~Proposition~8.12~(ii).
\begin{assumption} \Label{Assumption L}
$\{ P'(\pi_{n,1}), \cdots, P'(\pi_{n,e}) , P'(\pi_{n-1,1}), \cdots, P'(\pi_{n-1,e}) \}$ generates $G'(L', M)(\Zp[\pi_n])$ over $\Zp[\operatorname{Gal}(\Qp(\pi_n)/\Qp(\pi_N))]$ modulo torsions for every $n > N$.
\end{assumption}
We can apply the proof of \cite{Kobayashi} to this assumption certainly in some cases. We hope we can in most cases.
\begin{definition}We define a map $\xi:G'(L',M)\to G(L,M)$ as follows:
\begin{enumerate}[(a)]
\item First, recall that $G(L, M)(g)$ is the set of $(u_L, u_M)$ where $u_L:L\to \Qp\otimes g$ and $u_{M_{A'}}: M_{A'} \to CW_{k}(g/\mm g)_{A'}$ are identical through the diagram
\begin{eqnarray*}
\operatorname{Hom}_{A'\otimes \mathbf D}(M_{A'}, CW_k (g/\mm g)_{A'} ) & \longrightarrow & \operatorname{Hom}_{A'}(L, (\Qp\otimes g) /P'(g)) \\
& & \qquad \uparrow \\
& & \operatorname{Hom}_{A'} (L, \Qp\otimes g).
\end{eqnarray*}
We also recall that $G'(L',M)(g)$ is the set of $(u_{L'}, u_M)$ where
$u_{L'} \in \operatorname{Hom}_{A'}(L', F)$, and $u_M \in \operatorname{Hom}_{\mathbf D} (M, CW(g/\mm g))$
which naturally induces $u_M': M'(=\mm^E \otimes M) \to \mm^E \otimes CW(g/\mm g)$ satisfy that $u_{L'}=u_M'$ through the diagram
\begin{eqnarray*}
\operatorname{Hom}_{A'\otimes \mathbf D}(M', \mm^E \otimes CW (g/\mm g)) & \longrightarrow & \operatorname{Hom}_{A'}(L', (\Qp\otimes g)/\mm^E P'(g)) \\
& & \qquad \uparrow \\
& & \operatorname{Hom}_{A'} (L', \Qp\otimes g).
\end{eqnarray*}
\item We recall that $\iota: M_{A'} \to M'(=\mm^E \otimes M)$ (which is identity on $M$) also induces $\iota:L\to L'(\supset \iota(L))$. Then, $p^M\cdot\iota^*$ induces
\begin{eqnarray*}
p^M\cdot\iota^* &:& \operatorname{Hom}_{A'\otimes \mathbf D}(M', \mm^E \otimes CW (g/\mm g)) \to \operatorname{Hom}_{A'\otimes \mathbf D}(M_{A'}, CW_{k} (g/\mm g)_{A'} ), \\
p^M\cdot\iota^* &:& \operatorname{Hom}_{A'} (L', \Qp\otimes g) \to \operatorname{Hom}_{A'} (L, \Qp\otimes g), \\
p^M\cdot\iota^* &:& \operatorname{Hom}_{A'} (L', (\Qp\otimes g)/ \mm^E P'(g)) \to \operatorname{Hom}_{A'} (L, (\Qp\otimes g)/P'(g)),
\end{eqnarray*}
because $p^M\cdot \mm^E \subset A'$.
\end{enumerate}
Thus, $p^M\cdot \iota^*$ induces a map $\xi: G'(L', M)\to G(L, M)$.
\end{definition}
We also define:
\begin{definition}
Recall the isogeny $j_G: G(L, M) \to G$, and an embedding $i:G \to E$. We define
\[ P(\pi_{n,i})=i \circ j_G \circ \xi (P'(\pi_{n,i})) \]
for every $n \geq N$ and $i=1,2,\cdots, e$.
\end{definition}
\begin{proposition} \Label{Mark IV}
For now, let $\operatorname{Tr}_{n/m}$ denote $\operatorname{Tr}_{\Qp(\pi_n)/\Qp(\pi_m)}$. For $n>N$, we have
\[ \operatorname{Tr}_{n/n-1} \begin{bmatrix} P(\pi_{n, 1}) \\ \vdots \\ P(\pi_{n, e}) \end{bmatrix} = pA_{n-1} \begin{bmatrix} P(\pi_{n-1, 1}) \\ \vdots \\ P(\pi_{n-1, e}) \end{bmatrix}- A'_{n-1} \begin{bmatrix} P(\pi_{n-2, 1}) \\ \vdots \\ P(\pi_{n-2, e}) \end{bmatrix} \]
where $A_{n-1}$ is an $e\times e$ matrix with entries in $\Zp[\operatorname{Gal}(\Qp(\pi_{n-1})/\Qp(\pi_N))]$, and $A'_{n-1}$ is an $e\times e$ matrix also with entries in $\Zp[\operatorname{Gal}(\Qp(\pi_{n-1})/\Qp(\pi_N))]$ so that
\[ A'_{n-1}\equiv I_e \pmod p. \]
\end{proposition}
\begin{proof} As in the proof of Proposition~\ref{Despicable-Laundry-Machine},
\begin{eqnarray*} \operatorname{Tr}_{n/n-1} l(\pi_{n,i}) = -\alpha_{p-1}-p\cdot \left[b_1 l(\pi_{n-1, i})+b_2 l(\pi_{n-2, i}) \right]
\end{eqnarray*}
thus
\begin{eqnarray} \Label{Falcon}
\operatorname{Tr}_{n/n-1} (\epsilon+l(\pi_{n, i}))-a_p(\epsilon+l(\pi_{n-1, i}))+(\epsilon+l(\pi_{n-2, i}))=0.
\end{eqnarray}
On the other hand, again as in the proof of Proposition~\ref{Despicable-Laundry-Machine},
\[ l(\varphi(\pi_{n, i}))=\pi_{n-1, i}-\left( \displaystyle \frac {-a_p}p l(\pi_{n-2, i})+\frac 1p l(\pi_{n-3, i}) \right) \]
thus
\begin{multline} \operatorname{Tr}_{n/n-1} l(\varphi(\pi_{n, i})) = p \pi_{n-1, i}+a_pl(\pi_{n-2, i})-l(\pi_{n-3, i}) \\
= \Label{Eagle} p\pi_{n-1, i} + a_p l(\varphi(\pi_{n-1, i}))-l(\varphi(\pi_{n-2, i})).
\end{multline}
Since $\displaystyle \frac{\beta_2}{\beta_1} \pi_{n-1, i}$ is divisible by $p$, there is $d_{n-1,i} \in \mm_{\Zp[\pi_{n-1}]}$ so that
\[ (\epsilon+l(d_{n-1,i}))+\displaystyle \frac{\beta_2}{\beta_1} l(\varphi(d_{n-1,i}))=\frac{\beta_2}{\beta_1} \pi_{n-1, i}. \]
In other words, $\bfy(\bfl)(d_{n-1,i})=\beta_2 \pi_{n-1, i}$.
Let $D_{n-1, i}=P'(d_{n-1,i}) \in G'(L', M)(\Zp[\pi_{n-1}])$.
By Assumption~\ref{Assumption L},
\begin{multline} \Label{ABCD}
\begin{bmatrix} D_{n-1, 1}\\ \vdots \\ D_{n-1, e} \end{bmatrix}=
\begin{bmatrix} a_{n-1, 11} &\cdots & a_{n-1, 1e} \\ \vdots & \ddots & \vdots \\ a_{n-1, e1} &\cdots & a_{n-1, ee} \end{bmatrix} \cdot \begin{bmatrix} P'(\pi_{n-1, 1}) \\ \vdots \\ P'(\pi_{n-1, e}) \end{bmatrix} \\
+ \begin{bmatrix} a_{n-1, 11}' &\cdots & a_{n-1, 1e}' \\ \vdots & \ddots & \vdots \\ a_{n-1, e1}' &\cdots & a_{n-1, ee}' \end{bmatrix} \cdot \begin{bmatrix} P'(\pi_{n-2, 1}) \\ \vdots \\ P'(\pi_{n-2, e}) \end{bmatrix}
\end{multline}
modulo torsions for some $a_{n-1, ij}, a_{n-1, ij}' \in \Zp[\operatorname{Gal}(\Qp(\pi_{n-1})/\Qp(\pi_N))]$.
For $Q=(y_Q, x_Q) \in G'(L', M)(g)$ with $y_Q \in \operatorname{Hom}_{A'}(L', \Qp\otimes g)$, we let $\bfl(Q)$ denote $y_Q(\bfl)$. For example, $\bfl(P'(\pi_{n,i}))=\beta_1(\epsilon+l(\pi_{n,i}))+\beta_2l(\varphi(\pi_{n,i}))$.
Then, by (\ref{Falcon}), (\ref{Eagle}), and (\ref{ABCD}),
\begin{eqnarray*} \operatorname{Tr}_{n/n-1} \begin{bmatrix} \bfl(P'(\pi_{n, 1})) \\ \vdots \\ \bfl(P'(\pi_{n, e})) \end{bmatrix} &=& a_p \begin{bmatrix} \bfl(P'(\pi_{n-1, 1})) \\ \vdots \\ \bfl(P'(\pi_{n-1, e})) \end{bmatrix}- \begin{bmatrix} \bfl(P'(\pi_{n-2, 1})) \\ \vdots \\ \bfl(P'(\pi_{n-2, e})) \end{bmatrix} \\
&& +pB_{n-1}\cdot \begin{bmatrix} \bfl(P'(\pi_{n-1, 1})) \\ \vdots \\ \bfl(P'(\pi_{n-1, e})) \end{bmatrix}+ p B_{n-1}' \cdot \begin{bmatrix} \bfl(P'(\pi_{n-2, 1})) \\ \vdots \\ \bfl(P'(\pi_{n-2, e})) \end{bmatrix}
\end{eqnarray*}
where $B_{n-1}, B_{n-1}'$ are the matrices that appear in (\ref{ABCD}). Since $L'$ is one-dimensional, this implies an analogous identity for the $y$-part of $P'(\pi_{n,i})$'s, therefore an analogous identity for $P'(\pi_{n,i})$'s holds modulo torsions.
By taking $ i\circ j_G \circ (p^M\cdot \iota^*)$, we obtain our claim because $p^M$ annihilates the torsions.
\end{proof}
This relation is finer than the one used in Section~\ref{Case 1}, and we will adopt Sprung's insight of $\sharp/\flat$-decomposition to produce the characteristics $\bfL^{\sharp}, \bfL^{\flat}$ which are integral power series, which make a big difference between this section and the previous one.
Recall that $e=[F_{\mfp}:\Qp]=[F:\Q]$. As in Section~\ref{Case 1}, we assume
\[ \rank_{\Lambda} \operatorname{Sel}r(E[p^{\infty}]/F_{\infty})^{\vee}=e. \]
It is not difficult to prove this assumption when $\operatorname{Sel}_p(E[p^{\infty}]/F_n)^{\chi_n}$ is finite for some $n$ and a character $\chi_n$.
Also as in Section~\ref{Case 1}, we let
\[ S_{tor}=\left( \operatorname{Sel}r(E[p^{\infty}]/F_{\infty})^{\vee} \right)_{\Lambda-torsion}. \]
As Section~\ref{Case 1}, there is
\begin{eqnarray} 0 \to \operatorname{Sel}r(A/F_{\infty})^{\vee}/S_{tor} \to \Lambda^e \to C \to 0
\end{eqnarray}
for a finite group $C$.
It is often true that $\operatorname{Sel}r(E[p^{\infty}] /F_n) \to \operatorname{Sel}r(E[p^{\infty}] /F_{\infty})^{\Gamma^{p^n}}$ is an isomorphism, and even when it is not, its kernel and cokernel are bounded, so are easy to deal with. In this section, for convenience, assume it is an isomorphism for each $n$. The above short exact sequence induces
\[ \alpha_n': \operatorname{Sel}r(E[p^{\infty}]/F_n)^{\vee} \to \left( \operatorname{Sel}r(A/F_{\infty})^{\vee}/S_{tor} \right)_{/\Lambda^{p^n}} \to \Lambda_n^e.\]
\begin{definition}
\begin{enumerate}[(a)]
\item
Recall $P(\pi_{N+n, i})$ is a point of $E(\Qp(\pi_{N+n}))=E(F_{n, \mfp})$.
Recall $\Lambda_n=\Zp[\Gamma_n]$. Let $R(\pi_{N+n, i})$ be the image of $P(\pi_{N+n, i})$ under the map
\[ E(F_{n,\mfp}) \to \operatorname{Sel}r(E[p^{\infty}]/F_n)^{\vee} \stackrel{\alpha_n'}\to \Lambda_n^e. \]
\item
Choose a lifting $\tilde R(\pi_{N+n, i}) \in \Lambda^e$ of $ R(\pi_{N+n, i})$ for each $n$. Our result will not depend on the choice of $\tilde R(\pi_{N+n, i})$. Let
\[ \mathbf R_{N+n}=\begin{bmatrix} \tilde R(\pi_{N+n, 1})^t \\ \vdots \\ \tilde R(\pi_{N+n, e})^t \end{bmatrix} \in M_{e}(\Lambda) .\]
\item Let $\Phi_n \in \Lambda$ be the minimal polynomial of $\zeta_{p^n}-1$, i.e., $\Phi_n=\displaystyle \frac{(1+X)^{p^n}-1}{(1+X)^{p^{n-1}}-1}$ if $n \geq 1$, and $\Phi_0=X$. And, let $\omega_n=(1+X)^{p^n}-1$. We consider $\Phi_n$ and $\omega_n$ as elements of $\Lambda$ under the identification $\Lambda=\Zp[[X]]$.
\end{enumerate}
\end{definition}
We note $\Phi_n=\sum _{\sigma \in \operatorname{Ker}(\Gamma_n \to \Gamma_{n-1})} \sigma \pmod{\omega_n}$.
\begin{proposition} \Label{Montana}
\[ \begin{bmatrix} \mathbf R_{N+n+1} \\ \mathbf{ R}_{N+n} \end{bmatrix} =
\begin{bmatrix} pA_{N+n} & -A'_{N+n} \Phi_n \\
I_e & 0 \end{bmatrix} \cdot
\begin{bmatrix} \mathbf R_{N+n} \\
\mathbf{ R}_{N+n-1}
\end{bmatrix}
\pmod{\omega_n}.
\]
\end{proposition}
\begin{proof}
This follows immediately from Proposition~\ref{Mark IV}.
\end{proof}
\begin{definition} \Label{Mateo}
We choose liftings $\tilde A_{N+n}, \tilde A'_{N+n} \in M_e(\Lambda)$ of $A_{N+n}, A'_{N+n}$ for every $n$. We set
\begin{eqnarray*} \begin{bmatrix} \tilde \bfL^{\sharp}(E) \\ \tilde \bfL^{\flat}(E) \end{bmatrix}
&\stackrel{def}=&
\varprojlim_n
\begin{bmatrix} p\tilde A_{N+1} & -\tilde A'_{N+1} \Phi_1 \\
I_e & 0 \end{bmatrix}^{-1}
\cdot
\begin{bmatrix} p \tilde A_{N+2} & -\tilde A'_{N+2} \Phi_2 \\
I_e & 0 \end{bmatrix} ^{-1}
\cdot \\
&& \cdots\quad \cdot
\begin{bmatrix} p \tilde A_{N+n} & -\tilde A'_{N+n} \Phi_n \\
I_e & 0 \end{bmatrix}^{-1} \cdot
\begin{bmatrix} \mathbf R_{N+n+1} \\ \mathbf{ R}_{N+n} \end{bmatrix}. \\
\bfLalg^{\sharp}(E) &\stackrel{def}=& \det(\tilde \bfL^{\sharp}(E)), \\
\bfLalg^{\flat}(E) &\stackrel{def}=& \det(\tilde \bfL^{\flat}(E)).
\end{eqnarray*}
\end{definition}
\begin{proposition} \Label{GagConcert}
(a) $\tilde \bfL^{\sharp}(E)$ and $\tilde \bfL^{\flat}(E)$ are well-defined (i.e., the projective limits exist), and (b) their entries are in $\Lambda$.
\end{proposition}
\begin{proof}
First, we show the following:
Let $c_{n+1}=\mathbf R_{N+n+1}, d_{n+1}=\mathbf R_{N+n}$, and
\[ \begin{bmatrix} c_i \\ d_i \end{bmatrix} =\begin{bmatrix} p \tilde A_{N+i} & -\tilde A'_{N+i} \Phi_i \\
I_e & 0 \end{bmatrix}^{-1} \cdots \begin{bmatrix} p \tilde A_{N+n} & -\tilde A'_{N+n} \Phi_n \\
I_e & 0 \end{bmatrix}^{-1} \begin{bmatrix} \mathbf R_{N+n+1} \\ \mathbf R_{N+n} \end{bmatrix} \]
for every $1 \leq i \leq n$. We will show that
\begin{enumerate}[(1)]
\item $c_i, d_i \in M_e(\Lambda)$,
\item $c_i \equiv \mathbf R_{N+i} \pmod{\omega_i}, d_i \equiv \mathbf R_{N+i-1} \pmod{\omega_{i-1}}$ for every $1\leq i\leq n+1$.
\end{enumerate}
We prove it inductively as follows:
\emph{Step 1.} By the definition of $c_{n+1}$ and $d_{n+1}$, the claim is true for $i=n+1$.
\emph{Step 2.} Suppose the claim is true for $c_{i+1}, d_{i+1}$. Then,
\begin{eqnarray*} \begin{bmatrix} c_i \\ d_i \end{bmatrix} &=& \begin{bmatrix} p \tilde A_{N+i} & -\tilde A'_{N+i} \Phi_i \\
I_e & 0 \end{bmatrix}^{-1} \begin{bmatrix} c_{i+1} \\ d_{i+1} \end{bmatrix} \\
&=& \displaystyle \frac1{\Phi_i} (\tilde A_{N+i}')^{-1}
\begin{bmatrix} 0& \tilde A_{N+i}' \Phi_i \\
-I_e & p\tilde A_{N+i} \end{bmatrix}
\begin{bmatrix} c_{i+1} \\ d_{i+1}
\end{bmatrix} \\
&=& \begin{bmatrix} d_{i+1} \\
\displaystyle \frac1{\Phi_i} (\tilde A_{N+i}')^{-1} ( -c_{i+1}+p \tilde A_{N+i} d_{i+1} )
\end{bmatrix}
\end{eqnarray*}
By the induction hypothesis and Proposition~\ref{Montana}, we have
\begin{eqnarray*} -c_{i+1}+p \tilde A_{N+i} d_{i+1} &=& -\mathbf R_{N+i+1}+p\tilde A_{N+i} \mathbf R_{N+i} \pmod{\omega_i} \\
&=& \tilde A_{N+i}' \Phi_i \mathbf R_{N+i-1} \pmod{\omega_i}.
\end{eqnarray*}
Thus,
\[ \displaystyle \frac1{\Phi_i} (\tilde A_{N+i}')^{-1} \left[ -c_{i+1}+p \tilde A_{N+i} d_{i+1} \right] \equiv \mathbf R_{N+i-1} \pmod{\omega_{i-1}} .\]
Thus, $c_i=d_{i+1}\equiv \mathbf R_{N+i} \pmod{\omega_i}$, and $d_i\equiv \mathbf R_{N+i-1} \pmod{\omega_{i-1}}$, and $c_i, d_i \in \Lambda^e$. Inductively, $c_1,d_1 \in M_e( \Lambda)$.
Second, we show the following:
By the above, for any $m \geq n$,
\[ \begin{bmatrix} p\tilde A_{N+n+1}& -\tilde A_{N+n+1}' \Phi_{n+1} \\
I_e & 0
\end{bmatrix}^{-1} \cdots \begin{bmatrix} p\tilde A_{N+m}& -\tilde A_{N+m}' \Phi_m \\
I_e & 0
\end{bmatrix}^{-1}
\begin{bmatrix} \mathbf R_{N+m+1} \\ \mathbf R_{N+m}
\end{bmatrix}
= \begin{bmatrix} r_{N+n+1} \\ s_{N+n+1}
\end{bmatrix} \]
where $r_{N+n+1} \equiv \mathbf R_{N+n+1} \pmod {\omega_{n+1}}$, $s_{N+n+1} \equiv \mathbf R_{N+n} \pmod {\omega_{n}}$. Let
\[ \begin{bmatrix} e_{n+1} \\ e_n \end{bmatrix}= \begin{bmatrix} r_{N+n+1} \\ s_{N+n+1}
\end{bmatrix} - \begin{bmatrix} \mathbf R_{N+n+1} \\ \mathbf R_{N+n}
\end{bmatrix}, \]
then $e_{n+1} \equiv 0 \pmod {\omega_{n+1}}$, $e_n \equiv 0 \pmod{\omega_n}$.
Let
\[ \begin{bmatrix} e_i \\ e_{i-1} \end{bmatrix} =\begin{bmatrix} p \tilde A_{N+i} & -\tilde A'_{N+i} \Phi_i \\
I_e & 0 \end{bmatrix}^{-1} \cdots \begin{bmatrix} p \tilde A_{N+n} & -\tilde A'_{N+n} \Phi_n \\
I_e & 0 \end{bmatrix}^{-1} \begin{bmatrix} e_{n+1} \\ e_n \end{bmatrix} \]
for every $1 \leq i \leq n$.
For our immediate purpose, we devise the following way of counting the number of divisors of elements of $M_e(\Lambda)$. If $f=p$ or $f=\Phi_i$ for some $i$, and $f|a \in M_e(\Lambda)$, we say $f$ is a divisor of $a$. Any other irreducible polynomial that divides $a$ is ignored in our way of counting. To define the number of divisors of $a$, we count $p$ any number of times that $p$ divides $a$ (for example, if $p^k|a$, then $p$ is counted $k$ times towards the number of divisors), but we count each $\Phi_i$ that divides $a$ only once (for example, if $\Phi_i^k|a$, then $\Phi_i$ is counted only once towards the number of divisors). For example, if $p^3 (X^2+2)| a \in M_e(\Lambda)$, then $a$ has at least $3$ divisors ($p$ is counted $3$ times, and $X^2+2$ is not counted), and if $p\Phi_2^2 \Phi_3^2 |b$, then $b$ has at least $3$ divisors ($\Phi_2$ and $\Phi_3$ are each counted only once).
If $a=\sum a_i$ for some $a_i \in M_e(\Lambda)$ with each $a_i$ having at least $k$ divisors, we say $a$ is a sum of elements, each of which has at least $k$ divisors.
Suppose $e_{i+1}$ is a sum of elements, each of which has at least $n_{i+1}$ divisors, and suppose $e_i$ is a sum of elements, each of which has at least $n_{i}$ divisors. And, suppose $\omega_{i+1}|e_{i+1}$ and $\omega_i | e_i$. Then,
\[ e_{i-1}=\displaystyle \frac1{\Phi_i} \tilde A_{N+i}'^{-1} (-e_{i+1}+p\tilde A_{N+i} e_i) = \tilde A_{N+i}'^{-1}(- \displaystyle \frac 1{\Phi_i} e_{i+1}+\frac p{\Phi_i} \tilde A_{N+i} e_i)\]
and $\frac 1{\Phi_i} e_{i+1}$ and $\frac p{\Phi_i} \tilde A_{N+i} e_i$ are respectively a sum of elements, each of which has at least $n_{i+1}-1$ divisors, and a sum of elements, each of which has at least $n_{i}$ divisors. Both are divisible by $\omega_{i-1}$. Thus, $e_{i-1}$ is a sum of elements, each of which has at least $\operatorname{min}(n_{i+1}-1, n_i)$ divisors, and is divisible by $\omega_{i-1}$.
Since $\omega_{n+1}|e_{n+1}$ and $\omega_n|e_n$, it is not difficult to see that $e_1$ and $e_0$ are sums of elements, each of which has at least $n/2$ divisors.
For $i\geq 1$, $\Phi_i\equiv 0 \pmod{(p, X^{p^{i-1}})}$, so when $0\leq \alpha_1 < \cdots < \alpha_{n'}$ for some $n'$,
\begin{eqnarray*} p^j \Phi_{\alpha_1}\cdots \Phi_{\alpha_{n'}} &\equiv & p^j p^{n'-i} * \pmod{X^{p^{i-1}}} \\
&=& p^{n'-i+j} * \pmod{X^{p^{i-1}}}
\end{eqnarray*}
($*$ indicates any element). Thus, it follows that
\[ \begin{bmatrix} e_1 \\ e_0 \end{bmatrix}
\equiv \begin{bmatrix} 0\\ 0 \end{bmatrix}
\pmod{p^{n/2-i}, X^{p^{i-1}}}. \]
In other words,
\begin{eqnarray*}
\bfL_{n,m}& \stackrel{def}= & \begin{bmatrix} p\tilde A_{N+1}& -\tilde A_{N+1}' \Phi_{1} \\
I_e & 0
\end{bmatrix}^{-1}
\cdots \begin{bmatrix} p\tilde A_{N+m}& -\tilde A_{N+m}' \Phi_m \\
I_e & 0
\end{bmatrix}^{-1}
\begin{bmatrix} \mathbf R_{N+m+1} \\ \mathbf R_{N+m}
\end{bmatrix} \\
&&
-\begin{bmatrix} p\tilde A_{N+1}& -\tilde A_{N+1}' \Phi_{1} \\
I_e & 0
\end{bmatrix}^{-1}
\cdots \begin{bmatrix} p\tilde A_{N+n}& -\tilde A_{N+n}' \Phi_n \\
I_e & 0
\end{bmatrix}^{-1}
\begin{bmatrix} \mathbf R_{N+n+1} \\ \mathbf R_{N+n}
\end{bmatrix} \\
&=&
\begin{bmatrix} 0 \\ 0 \end{bmatrix}
\pmod{p^{n/2-i}, X^{p^{i-1}}},
\end{eqnarray*}
so $\bfL_{n,m}$ converges to $0$ uniformly as $n,m \to \infty$.
Thus, we obtain our claim.
\end{proof}
In the proof of Proposition~\ref{GagConcert}, we see that there are $\mathbf R_{N+n}^{(m)}, \mathbf R_{N+n-1}^{(m)} \in M_e(\Lambda)$ so that $\mathbf R_{N+n}^{(m)} \equiv \mathbf R_{N+n} \pmod{\omega_n}, \mathbf R_{N+n-1}^{(m)}\equiv \mathbf R_{N+n-1} \pmod{\omega_{n-1}}$, and
\[ \begin{bmatrix} \mathbf R_{N+n}^{(m)} \\ \mathbf R_{N+n-1}^{(m)} \end{bmatrix} =\begin{bmatrix} p \tilde A_{N+n} & -\tilde A'_{N+n} \Phi_n \\
I_e & 0 \end{bmatrix}^{-1} \cdots \begin{bmatrix} p \tilde A_{N+m} & -\tilde A'_{N+m} \Phi_m \\
I_e & 0 \end{bmatrix}^{-1} \begin{bmatrix} \mathbf R_{N+m+1} \\ \mathbf R_{N+m} \end{bmatrix}. \]
From Definition~\ref{Mateo},
\begin{multline*} \begin{bmatrix} p \tilde A_{N+n-1} & -\tilde A'_{N+n-1} \Phi_{n-1} \\
I_e & 0 \end{bmatrix}
\cdot \cdots \cdot
\begin{bmatrix} p \tilde A_{N+2} & - \tilde A'_{N+2} \Phi_2 \\
I_e & 0 \end{bmatrix}
\cdot
\begin{bmatrix} p \tilde A_{N+1} & -\tilde A'_{N+1} \Phi_1 \\
I_e & 0 \end{bmatrix}
\cdot
\begin{bmatrix} \tilde \bfL^{\sharp}(E) \\ \tilde \bfL^{\flat}(E) \end{bmatrix} \\
= \varprojlim_m \begin{bmatrix} \mathbf R_{N+n}^{(m)} \\ \mathbf{ R}_{N+n-1}^{(m)} \end{bmatrix} ,
\end{multline*}
and for a primitive $p^n$-th root of unity $\zeta_{p^n}$, $\varprojlim \mathbf R_{N+n}^{(m)}|_{X=\zeta_{p^n}-1}= \mathbf R_{N+n}|_{X=\zeta_{p^n}-1}$.
Then, naturally we would hope for the following:
Let $\chi$ be a finite character of $\Gamma$ satisfying $\chi(\gamma)=\zeta_{p^{n}}$. We may also consider it as a character of $\operatorname{Gal}(F_n/F)$.
It is not hard to see that assuming $S_{tor}^{\chi}$ is finite, $\det(\mathbf R_{N+n}|_{X=\zeta_{p^n}-1})=0$ if and only if $\operatorname{Sel}_p(E[p^{\infty}]/F_n)^{\chi}$ is infinite. Since $\bfLalg^{\sharp}=\det(\tilde\bfL^{\sharp}(E))$ and $\bfLalg^{\flat}=\det(\tilde\bfL^{\flat}(E))$ are in $\Lambda$, and therefore have a finite number of roots, we would hope that it implies that $\operatorname{Sel}_p(E[p^{\infty}]/F_n)^{\chi}$ is infinite for a finite number of characters $\chi$. But, the author finds it a little difficult to show that because we may have $\det R_{N+n}|_{X=\zeta_{p^n}-1}=0$ even when $\bfLalg^{\sharp}(\zeta_{p^n}-1)\not=0$ and $\bfLalg^{\flat}(\zeta_{p^n}-1)\not=0$.
Instead, we make a more modest claim:
\begin{proposition} \Label{DDT}
Suppose $\bfLalg^{\sharp}$ and $\bfLalg^{\flat}$ are not 0, and $a_p$ and $\frac{\beta_2}{\beta_1}$ are divisible by $p^T$ for some $T>0$. Suppose $\chi$ is a primitive character of $\Gamma_n$ for sufficiently large $n$. Also, suppose that
\begin{enumerate}[(a)]
\item if $n$ is odd, $p^S\nmid \bfLalg^{\sharp}$ for some $S$ with $S+\displaystyle \frac{ep}{(p-1)^2}<T$, or
\item if $n$ is even, $p^{S'} \nmid \bfLalg^{\flat}$ for some $S'$ with $S'+\displaystyle \frac{ep}{(p-1)^2}<T$.
\end{enumerate}
Then, $E(F_n)^{\chi}$ and $\Sha(E/F_n)[p^{\infty}]^{\chi}$ are finite.
\end{proposition}
\begin{proof}
First, we note $p^{T-1}|B_{i}$ and $p^{T-1}|B_{i}'$ for each $i$ where $B_{i}, B_{i}'$ are the matrices in the proof of Proposition~\ref{Mark IV}, thus $p^{T-1}|A_{i}$ and $A_{i}'\equiv I_e \pmod{p^T}$. Then, we can choose $\tilde A_i, \tilde A_i'$ so that $p^{T-1}|\tilde A_i, \tilde A_i' \equiv I_e \pmod{p^T}$.
Thus, if $n$ is odd, for $\zeta_{p^n}=\chi(\gamma)$,
\begin{multline*}
\left. \begin{bmatrix} p \tilde A_{N+n-1} & -\tilde A'_{N+n-1} \Phi_{n-1} \\
I_e & 0 \end{bmatrix}
\cdot \cdots \cdot
\begin{bmatrix} p \tilde A_{N+2} & -\tilde A'_{N+2} \Phi_2 \\
I_e & 0 \end{bmatrix}
\cdot
\begin{bmatrix} p \tilde A_{N+1} & -\tilde A'_{N+1} \Phi_1 \\
I_e & 0 \end{bmatrix} \right|_{X=\zeta_{p^n}-1}\\
\equiv
\begin{bmatrix} 0 & -\Phi_{n-1}(\zeta_{p^n}-1) I_e \\ I_e&0 \end{bmatrix} \cdot \cdots \cdot \begin{bmatrix} 0 & -\Phi_1(\zeta_{p^n}-1) I_e \\ I_e&0 \end{bmatrix}
=
\begin{bmatrix} a I_e&0 \\ 0&bI_e \end{bmatrix} \pmod{p^T}
\end{multline*}
for some $a,b$ with $v_p(a), v_p(b) < p/(p-1)^2$, and if $n$ is even, it is congruent to $\begin{bmatrix} 0&a I_e \\ b I_e &0 \end{bmatrix}$.
Then, in case (a), $a \tilde \bfL^{\sharp}(\zeta_{p^n}-1)\equiv \mathbf R_{N+n}(\zeta_{p^n}-1) \pmod {p^T}$, and in case (b), $a\tilde \bfL^{\flat}(\zeta_{p^n}-1)\equiv \mathbf R_{N+n}(\zeta_{p^n}-1) \pmod {p^T}$. If $n$ is sufficiently large, $v_p(a^e \bfLalg^{\sharp}(\zeta_{p^n}-1))<T$ and $v_p(a^e \bfLalg^{\flat}(\zeta_{p^n}-1))<T$ respectively by our assumption, thus $\det(\mathbf R_{N+n}(\zeta_{p^n}-1)) \not\equiv 0 \pmod{p^T}$, and also $S_{tor}^{\chi}$ is finite for a sufficiently large $n$. Thus our claim follows.
\end{proof}
Then we immediately have:
\begin{theorem} \Label{DDR}
Suppose
\begin{enumerate}
\item $a_p$ and $\frac{\beta_2}{\beta_1}$ are divisible by $p^T$ for some $T>0$,
\item $p^S\nmid \bfLalg^{\sharp}, p^{S} \nmid \bfLalg^{\flat}$ for some $S$ with $S+\displaystyle \frac{ep}{(p-1)^2}<T$.
\end{enumerate}
Then, $E(F_{\infty})/E(F_{\infty})_{tor}$ is a group of finite rank, and $\Sha(E/F_n)[p^{\infty}]^{\chi}$ is finite for all sufficiently large $n$, and every primitive character $\chi$ of $\operatorname{Gal}(F_n/F)$.
\end{theorem}
We note that it is often relatively easy to show that $E(F_{\infty})$ has a finite number of $p$-power torsions.
\end{subsection}
\begin{subsection}{Appendix: Sprung's $\sharp/\flat$-Selmer groups}
\Label{Appendix}
Even though we do not use them in this paper, using the points constructed in Section~\ref{AlphaGo}, we can construct $\operatorname{Sel}_p^{\sharp}(E/F_{\infty})$ and $\operatorname{Sel}_p^{\flat}(E/F_{\infty})$ as Sprung did (\cite{Sprung}).
\begin{definition}[Perrin-Riou map]
\begin{enumerate}
\item Let $(\cdot, \cdot)_{N+n}$ denote the following pairing given by the local class field theory:
\[ (\cdot, \cdot)_{N+n}: H^1(\Qp(\pi_{N+n}), T_p) \times H^1(\Qp(\pi_{N+n}), T_p) \to \Zp.\]
Recall $\Gamma_n=\operatorname{Gal}(F_n/F) \cong \operatorname{Gal}(\Qp(\pi_{N+n})/ \Qp(\pi_N))$, $\Gamma=\varprojlim \Gamma_n$, and $\Lambda=\Zp[[\Gamma]]\cong \Zp[[X]]$ (non-canonically).
For $z \in H^1(\Qp(\pi_{N+n}), T_p)$ and $x = [x_1,\cdots, x_e]^t \in E(\Qp(\pi_{N+n}))^e$,
\[ \bfP_{N+n, x}(z)\stackrel{def}= \begin{bmatrix} \sum_{\sigma \in \Gamma_n} (z, x_1^{\sigma})_{N+n} \cdot \sigma \\
\sum_{\sigma \in \Gamma_n} (z, x_2^{\sigma})_{N+n} \cdot \sigma \\
\vdots \\
\sum_{\sigma \in \Gamma_n} (z, x_e^{\sigma})_{N+n} \cdot \sigma
\end{bmatrix}
\in \Zp[\Gamma_n]^e.\]
\item Also, let $\tilde \bfP_{N+n, x}(z)$ denote its lifting to $\Zp[\Gamma_{n+1}]^e$.
\end{enumerate}
\end{definition}
\begin{notation}
\begin{enumerate}
\item Let $x_{N+n}$ denote
\[ x_{N+n}=[ P(\pi_{N+n, 1}), \cdots, P(\pi_{N+n, e}) ]^t.
\]
\item Let $\Proj_{n/m}$ denote the natural projection from $\Zp[\Gamma_n]$ to $\Zp[\Gamma_m]$.
\end{enumerate}
\end{notation}
By Proposition~\ref{Mark IV}, for any $z=(z_n) \in \varprojlim_{n \geq N} H^1(\Qp(\pi_n), T_p)$,
\[ \Proj_{n+1/n} \begin{bmatrix} \bfP_{N+n+1, x_{N+n+1}} (z_{N+n+1}) \\ \tilde \bfP_{N+n, x_{N+n}} (z_{N+n}) \end{bmatrix} =
\begin{bmatrix} pA_{N+n} & -A'_{N+n} \Phi_n \\
I_e & 0 \end{bmatrix} \cdot
\begin{bmatrix} \bfP_{N+n, x_{N+n}} (z_{N+n}) \\
\tilde\bfP_{N+n-1, x_{N+n-1}} (z_{N+n-1})
\end{bmatrix}
\]
Following Sprung (\cite{Sprung}), we can define the following:
\begin{definition}
From the previous section we recall the liftings $\tilde A_{N+n}, \tilde A'_{N+n} \in M_e(\Lambda)$ of $A_{N+n}, A'_{N+n} \in M_e(\Lambda_n)$ for every $n$.
For $z=(z_n) \in \varprojlim_{n \geq N} H^1(\Qp(\pi_n), T_p)$,
\begin{eqnarray*} \begin{bmatrix} \Col^{\sharp}(z) \\ \Col^{\flat}(z) \end{bmatrix}
&\stackrel{def}=& \varprojlim_n
\begin{bmatrix} p \tilde A_{N+1} & -\tilde A'_{N+1} \Phi_1 \\
I_e & 0 \end{bmatrix}^{-1}
\cdot
\begin{bmatrix} p \tilde A_{N+2} & -\tilde A'_{N+2} \Phi_2 \\
I_e & 0 \end{bmatrix}^{-1}
\cdot \\
&& \cdots\quad \cdot
\begin{bmatrix} p \tilde A_{N+n} & -\tilde A'_{N+n} \Phi_n \\
I_e & 0 \end{bmatrix}^{-1} \cdot
\begin{bmatrix} \bfP_{N+n+1, x_{N+n+1}}(z_{N+n+1}) \\ \tilde \bfP_{N+n, x_{N+n}}(z_{N+n}) \end{bmatrix}.
\end{eqnarray*}
\end{definition}
Similar to Proposition~\ref{GagConcert}, we can show $\Col^{\sharp}(z), \Col^{\flat}(z) \in \Lambda^e$. We omit its proof.
\begin{definition} \Label{Sharp Distinction}
We recall the definition of the relaxed Selmer group $\operatorname{Sel}r$ from Definition~\ref{Relaxed Selmer}.
We define
\[ \operatorname{Sel}_p^{\sharp}(E[p^{\infty}] /F_{\infty}) \stackrel{def}= \ker\left( \operatorname{Sel}r(E[p^{\infty}] /F_{\infty}) \to \displaystyle \frac {H^1(F_{\infty, \mfp}, E[p^{\infty}])}{\left( \ker \Col^{\sharp}\right)^{\perp}} \right) \]
where $\left( \ker \Col^{\sharp}\right)^{\perp}$ denotes the orthogonal complement of $\ker \Col^{\sharp}$ with respect to the local pairing $\varprojlim_n H^1(\Qp(\pi_n), T_p) \times H^1(\Qp(\pi_{\infty}), E[p^{\infty}])\to \Qp/\Zp$.
Similarly, we define $\operatorname{Sel}_p^{\flat}(E/F_{\infty})$.
\end{definition}
It seems likely that $\operatorname{Sel}_p^{\sharp}(E[p^{\infty}]/F_{\infty})$ and $\operatorname{Sel}_p^{\flat}(E[p^{\infty}]/F_{\infty})$ are $\Lambda$-cotorsion under some suitable assumptions. In fact, we can imagine
\begin{eqnarray} \Label{Speculation One}
char(\operatorname{Sel}_p^{\sharp}(E[p^{\infty}] /F_{\infty})^{\vee})=char(S_{tor})\cdot (\bfLalg^{\sharp}),
\end{eqnarray}
\begin{eqnarray} \Label{Speculation Two}
(\text{resp.} \quad char(\operatorname{Sel}_p^{\flat}(E[p^{\infty}] /F_{\infty})^{\vee})=char(S_{tor}) \cdot (\bfLalg^{\flat}).)
\end{eqnarray}
But, the ways that $\operatorname{Sel}_p^{\sharp/\flat}$ and $\bfLalg^{\sharp/\flat}$ are defined seem to be dual to each other. Thus, we suspect that to prove such an equality, we may need some kind of self-duality (similar to the Tate local duality) of the local conditions such as the one proven in \cite{Kim-1}. The author cannot say with certainty that such self-duality exists for the local conditions in Definition~\ref{Sharp Distinction}, but, an analogous result has been proven for a different but related Selmer group (\cite{Lei-Ponsinet}), and the author is hopeful that equalities such as (\ref{Speculation One}) and (\ref{Speculation Two}) will be proven soon.
\end{subsection}
\end{section}
\end{document} |
\begin{equation}gin{document}
\pagenumbering{arabic}
\title{A primal-dual majorization-minimization method \\
for large-scale linear programs
}
\author{
Xin-Wei Liu,\thanks{Institute of Mathematics, Hebei University of Technology, Tianjin 300401, China. E-mail:
mathlxw@hebut.edu.cn. The research is supported by the Chinese NSF grants (nos. 12071108 and 11671116).} \
Yu-Hong Dai,\thanks{Academy of Mathematics and Systems Science, Chinese Academy of Sciences, Beijing 100190, China \& School of Mathematical Sciences, University of Chinese Academy of Sciences, Beijing 100049, China. This author is supported by the NSFC grants (nos. 12021001, 11991021, 11991020 and 11971372), the National Key R\&D Program of China (nos. 2021YFA 1000300 \& 2021YFA 1000301),
the Strategic Priority Research Program of Chinese Academy of Sciences (no. XDA27000000).} \
and\
Ya-Kui Huang\thanks{Institute of Mathematics, Hebei University of Technology, Tianjin 300401, China.
This author is supported by the Chinese NSF grant (no. 11701137).}
}
\maketitle
\noindent\underline{\hspace*{6.3in}}
\par
\vskip 10 true pt \noindent{\small{\bf Abstract.}
We present a primal-dual majorization-minimization method for solving large-scale linear programs. A smooth barrier augmented Lagrangian (SBAL) function with strict convexity for the dual linear program is derived. The majorization-minimization approach is naturally introduced to develop the smoothness and convexity of the SBAL function. Our method only depends on a factorization of the constant matrix independent of iterations and does not need any computation on step sizes, thus can be expected to be particularly appropriate for large-scale linear programs. The method shares some similar properties to the first-order methods for linear programs, but its convergence analysis is established on the differentiability and convexity of our SBAL function. The global convergence is analyzed without prior requiring either the primal or dual linear program to be feasible. Under the regular conditions, our method is proved to be globally linearly convergent, and a new iteration complexity result is given.
\noindent{\bf Key words:} linear programming, majorization-minimization method, augmented Lagrangian, global convergence, linear convergence
\noindent{\bf AMS subject classifications.} 90C05, 90C25
\noindent\underline{\hspace*{6.3in}}
\vfil\eject
}
\sect{Introduction}
We consider to solve the linear program in the dual form \begin{equation}a
\min_y~-b^Ty\quad\hbox{s.t.}~A^Ty\le c, \lambdabel{prob1}\end{equation}a
where $y\in\Re^m$ is the unknown, $b\in\Re^m$, $A\in\Re^{m\times n}$ and $c\in\Re^n$ are given data. Corresponding to the dual problem \reff{prob1}, the primal linear program has the form \begin{equation}a
\min_x~c^Tx\quad\hbox{s.t.}~Ax=b,~x\ge 0,\lambdabel{prob2}\end{equation}a
where $x\in\Re^n$. Problem \reff{prob2} is called the standard form of linear programming. In the literature, most of the methods and theories for linear programming are developed with the standard form (see, for example, \cite{NocWri99,SunYua06,wright97,ye}). Moreover, it is often assumed that $m<n$, $\hbox{rank}(A)=m$.
The simplex methods are the most efficient and important methods for linear programming before 1980s. These methods search the optimal solution in vertices of a polyhedral set along the boundary of the feasible region of linear programming. The initial point should be a so-called basic feasible solution corresponding to a vertex of the polyhedron which may be obtained by solving some auxiliary linear programming problem with a built-in starting point. The main computation for a new iteration point is the solution of the linear systems \begin{equation}a
Bu=a,\quad B^Tv=d, \lambdabel{sec1f1}\end{equation}a
where $u\in\Re^m$ and $v\in\Re^m$ are the unknowns, $B\in\Re^{m\times m}$ is a nonsingular sub-matrix of $A$ and its one column is rotated in every iteration, $a\in\Re^m$ and $d\in\Re^m$ are some given vectors. The simplex methods are favorite since the systems in \reff{sec1f1} are thought to be easily solved.
It was discovered in \cite{KleeM72}, however, that the simplex approach could be inefficient for certain pathological problems since the number of iterations (also known as the worst-case time complexity) was exponential in the sizes of problems. In contrast, the interior-point approach initiated in 1984 by Karmarkar \cite{karmar} has been proved to be of the worst-case polynomial time complexity, a much better theoretical property than that for the simplex methods. Up to now, the best worst-case polynomial time complexity on interior-point methods is ${\cal O}(\sqrt{n}\log\frac{1}{\epsilonsilon})$ (see, for example, \cite{wright97,ye}).
In general, interior-point methods converge to the optimal solution along a central path of the feasible polytope. The central path is usually defined by a parameter-perturbed Karush-Kuhn-Tucker (KKT) system. The system can be induced by the KKT conditions of the logarithmic-barrier problem \begin{equation}a\min~c^Tx-\mu\sum_{i=1}^n\ln x_i\quad \hbox{s.t.}~Ax=b, \lambdabel{sec1f2}\end{equation}a
where $\mu>0$ is the barrier parameter, $x_i>0$ for $i=1,\ldots,n$ (that is, $x$ should be an interior-point). It is known that the well-defined central path depends on the nonempty of the set of the primal-dual interior-points \begin{equation}a
{\cal F}:=\{(x,y,s)| Ax=b,~A^Ty+s=c,~x>0,~s>0\}. \nonumber\end{equation}a
Although there are various interior-point methods, such as the affine-scaling methods, the logarithmic-barrier methods, the potential-reduction methods, the path-following methods, etc., all these methods share some common features that distinguish them from the simplex methods.
Distinct from the simplex methods in starting from a feasible point, the interior-point methods require the initial point to be an interior-point which may not be feasible to the problem. While the simplex methods usually require a larger number of relatively inexpensive iterations, every interior-point iteration needs to solve a system with the form \begin{equation}a
AS^{-1}XA^Tv=d, \lambdabel{sec1f3}\end{equation}a
where $S=\hbox{diag}(s)$ and $X=\hbox{diag}(x)$. This is generally more expensive to compute than \reff{sec1f1} but can make significant progress towards the solution. In particular, as the primal and dual iterates tend to the solutions of the primal and dual problems, some components of $x$ and $s$ can be very close to zero, which can bring about both huge and tiny values of the elements of $S^{-1}X$ and an ill-conditioned Jacobian matrix of the system \reff{sec1f3} (see \cite{NocWri99}). Some advanced methods for improving classic interior-point methods have been proposed, including the sparse matrix factorization, the Krylov subspace method and the preconditioned conjugate gradient method (see, for example, \cite{BYZ00,CMTH19,Davis06,FGZ14,Gon12a,Gon12b}).
Recently, some first-order methods for solving linear programs and linear semidefinite programming have been presented, see \cite{LMYZ,WZW} and the references therein. These methods are mainly the alternating direction augmented Lagrangian methods of multipliers (ADMM)-based methods, and can be free of solving systems \reff{sec1f1} and \reff{sec1f3}. Since the solved problems may be reformulated in different ways which result in various augmented Lagrangian function, these methods may be distinct in the augmented Lagrangian subproblems. For example, Lin et al. \cite{LMYZ} proposed their ADMM-based interior-point method based on the well-behaved homogeneous self-dual embedded linear programming model \cite{ye}, while the method in \cite{WZW} is established on using the classic augmented Lagrangian function and the projection on the cone of positive semidefinite matrices.
\subsection{Our contributions.}
We present a primal-dual majorization-minimization method on basis of solving linear programs in dual form \reff{prob1}. In our method, $y_i~(i=1,\ldots,m)$ are the primal variables, and $x_j~(j=1,\ldots,n)$ the dual variables. The method is originated from a combination of the Fiacco-McCormick logarithmic-barrier method and the Hestenes-Powell augmented Lagrangian method (see \cite{LDHmc} for more details on general nonlinear inequality-constrained optimization). A smooth barrier augmented Lagrangian (SBAL) function with strict convexity for the dual linear program is derived. Based on the smoothness and convexity of SBAL function, a majorization surrogate function is naturally designed to find the approximate minimizer of the augmented Lagrangian on primal variables, and the dual estimates are derived by a step for maximizing a minorization surrogate function of the augmented Lagrangian on dual variables. Our method can avoid the computation on the ill-conditioned Jacobian matrix like \reff{sec1f3} and does not solve some iteration-varying system \reff{sec1f1} or \reff{sec1f3} like the simplex methods and interior-point methods.
Our method initiates from the logarithmic-barrier reformulation of problem \reff{prob1}, thus can be thought of an interior-point majorization-minimization method, and shares some similar features as \cite{LMYZ}. It can also be taken as a smooth version of \cite{WZW} for linear programs, but it does not depend on any projection and computes more steps on primal iterates. Differing from the fixed-point framework for proving convergence in \cite{WZW}, based on the smoothness and convexity of our augmented Lagrangian, we can do the global convergence and prove the results on convergence rate and iteration complexity based on the well developed theories on convex optimization \cite{Nest18}.
Our proposed method only needs the factorization of the constant matrix $AA^T$, which is distinguished from the existing simplex methods and interior-point methods for linear programs necessary to solve either \reff{sec1f1} or \reff{sec1f3} varied in every iteration. Since the factorization is independent of iterations and can be done in preprocessing, our method can be implemented easily with very cheap computations, thus is especially suitable for large-scale linear programs. In addition, our method does not need any computation on step sizes, which is the other outstanding feature of our method in contrast to the existing interior-point methods for linear programs. Similar to \cite{LMYZ}, the global convergence is analyzed without prior requiring either the primal or dual linear program to be feasible. Moreover, under the regular conditions, we prove that our method can be of globally linear convergence, and a new iteration complexity result is obtained.
\subsection{Some related works.}
The augmented Lagrangian methods minimize an augmented Lagrangian function approximately and circularly with update of multipliers. The augmented Lagrangian function has been playing a very important role in the development of effective numerical methods and theories for convex and nonconvex optimization problems (see some recent references, such as \cite{BM14,CDZ15,DLS17,GKR20,GR12,GP99,HL17,HFD16,LMYZ,LiuDai18,LDHmm,WZW}). The augmented Lagrangian was initially proposed by Hestenes \cite{hesten} and Powell \cite{powell} for solving optimization problems with only equality constraints. The Hestenes-Powell augmented Lagrangian method was then generalized by Rockafellar \cite{rockaf1} to solve the optimization problems with inequality constraints. Since most of the augmented Lagrangian functions for inequality-constrained optimization depend on some kind of projection, the subproblems on the augmented Lagrangian minimization are generally solved by the first-order methods.
The majorization-minimization (MM) algorithm operates on a simpler surrogate function that majorizes the objective in minimization \cite{Lange}. Majorization can be understood to be a combination of tangency and domination. Similarly, we have the minorization-maximazation algorithm when we want to maximize an objective. The MM principle can be dated to Ortega and Rheinboldt
\cite{OR70} in 1970, where the majorization idea has been stated clearly in the context of line searches. The famed expectation-maximization (EM) principle \cite{MK08} of computational statistics is a special case of the MM principle. So far, MM methods have been developed and applied efficiently for imaging and inverse problems, computer vision problems, and so on (for example, see \cite{AIG06,cohen96,FBN06,Lange,QBP15}).
Recently, by combining the Hestenes-Powell augmented Lagrangian and the interior-point logarithmic-barrier technique (\cite{LiuSun01,LiuYua07,NocWri99,SunYua06}), the authors of \cite{LDHmc} introduce a novel barrier augmented Lagrangian function for nonlinear optimization with general inequality constraints. Distinct from the classic augmented Lagrangian function for inequality constrained optimization only first-order differentiable, the newly proposed one shares the same-order differentiability with the objective and constraint functions and is convex when the optimization is convex. In order to distinguish the new barrier augmented Lagrangian function to those proposed in \cite{GKR20,GP99}, we refer to it as the smooth barrier augmented Lagrangian (SBAL for short). For linear problems \reff{prob1} and \reff{prob2}, the SBAL functions are strictly convex and concave, respectively, with respect to the primal and dual variables. In particular, the SBAL functions are well defined without requiring either primal or dual iterates to be interior-points. These outstanding features of the SBAL functions provide natural selections for the majorization-minimization methods.
\subsection{Organization and notations.}
Our paper is organized as follows. In section 2, we describe the application of our augmented Lagrangian method in \cite{LDHmc} to the linear programs and present the associated preliminary results. The majorized functions and our primal-dual majorization-minimization method are proposed in section 3. The analysis on the global convergence and the convergence rates is done, respectively, in sections 4 and 5. We conclude our paper in the last section.
Throughout the paper, all vectors are column vectors. We use capital letters to represent matrices, and a capital letter with a subscript such as $A_i$ means the $i$th column of matrix $A$. The small letters are used to represent vectors, and a small letter with a subscript such as $s_i$ means the $i$th component of vector $s$. The capital letter $S$ means the diagonal matrix of which the components of vector $s$ are the diagonal elements. In general, we use the subscripts $k$ and $\ell$ to illustrate the letters to be related to the $k$th and $\ell$th iterations, and $i$ and $j$ the $i$th and $j$th components of a vector or the $i$th and $j$th sub-vectors of a matrix. In other cases, it should be clear from the context. To quantify the convergence of sequences, we introduce the weighted norm $\|y\|_M=\sqrt{y^TMy}$, where $y$ is a column vector, $M$ is either a positive semi-definite or positive definite symmetric matrix with the same order as $y$. The symbol $e$ is the all-one vector, for which the dimension may be varying and can be known by the context. For the symmetric positive definite matrix $B$, we use $\lambdambda_{\min}(B)$ and $\lambdambda_{\max}(B)$ to represent the minimum and maximum of eigenvalues of $B$, respectively. As usual, we use the capital letters in calligraphy to represent the index sets, $\|\cdot\|$ is the Euclidean norm, $x\circ s$ is the Hadamard product of vectors $x$ and $s$, and $x\in\Re_{++}^n$ means $x\in\Re^n$ and $x>0$ in componentwise.
\sect{The SBAL function and some preliminary results}
Recently, the authors in \cite{LDHmm,LDHmc} presented a novel barrier augmented Lagrangian function for nonlinear optimization with general inequality constraints. For problem \reff{prob1}, we reformulate it as \begin{equation}a
\min_{y,s}~-b^Ty \quad\hbox{s.t.}~A^Ty+s=c,~s\ge 0, \lambdabel{sec2f1}\end{equation}a
where $s\in\Re^n$ is a slack vector. The logarithmic-barrier problem associated with \reff{sec2f1} has the form \begin{equation}a
\min_{y,s}~-b^Ty-\mu\sum_{i=1}^n\ln s_i\quad\hbox{s.t.}~s-c+A^Ty=0, \lambdabel{sec2f2}\end{equation}a
where $s=(s_i)>0$, $\mu>0$ is the barrier parameter. Noting that problem \reff{sec2f2} is one with only equality constraints, we can use the Hestenes-Powell augmented Lagrangian function to reformulate it into a unconstrained optimization problem as follows, \begin{equation}a
\min_{y,s}~F_{(\mu,\rho)}(y,s;x):=-\rho b^Ty-\rho\mu\sum_{i=1}^n\ln s_i+\rho x^T(s-c+A^Ty)+\frac{1}{2}\|s-c+A^Ty\|^2, \lambdabel{sec2f3}\end{equation}a
where $\rho>0$ is the penalty parameter which may be reduced adaptively if necessary, $x\in\Re^n$ is an estimate of the Lagrange multiplier vector.
Since $\frac{\partial^2 F_{(\mu,\rho)}(y,s;x)}{\partial s_i^2}=\frac{\rho\mu}{s_i^2}+1>0$, no matter what are $(y,s)$ and $x$, $F_{(\mu,\rho)}(y,s;x)$ is a strictly convex function with respect to $s_i$. Therefore, $F_{(\mu,\rho)}(y,s;x)$ will take the minimizer when \begin{equation}a
\frac{\partial F_{(\mu,\rho)}(y,s;x)}{\partial s_i}=-\frac{\rho\mu}{s_i}+\rho x_i+(s_i-c_i+A_i^Ty)=0, \nonumber\end{equation}a
where $A_i\in\Re^m$ is the $i$th column vector of $A$. Equivalently, one has
\begin{equation}a s_i=\frac{1}{2}(\sqrt{(\rho x_i-c_i+A_i^Ty)^2+4\rho\mu}-(\rho x_i-c_i+A_i^Ty)). \nonumber\end{equation}a
Based on the observation that $s_i$ will be altered with $y$ and $x$ and is dependent on the parameters $\mu$ and $\rho$, and for simplicity of statement, we define $s=s(y,x;\mu,\rho)$ and $z=z(y,x;\mu,\rho)$ in componentwise as \begin{equation}a
&\!\!\!\!&\!\!\!\! s_i(y,x;\mu,\rho)=\frac{1}{2}(\sqrt{(\rho x_i-c_i+A_i^Ty)^2+4\rho\mu}-(\rho x_i-c_i+A_i^Ty)), \lambdabel{sdf}\\
&\!\!\!\!&\!\!\!\! z_i(y,x;\mu,\rho)=\frac{1}{2}(\sqrt{(\rho x_i-c_i+A_i^Ty)^2+4\rho\mu}+(\rho x_i-c_i+A_i^Ty)), \lambdabel{zdf}\end{equation}a
where $i=1,\ldots,n$. By \reff{sdf} and \reff{zdf}, $z=s-c+A^Ty+\rho x$.
Correspondingly, the objective function $F_{(\mu,\rho)}(y,s;x)$ of the unconstrained optimization problem \reff{sec2f3} can be written as \begin{equation}a
L_B(y,x;\mu,\rho)=-\rho b^Ty+\sum_{i=1}^nh_i(y,x;\mu,\rho), \lambdabel{Ldf}\end{equation}a
where $y\in\Re^m$ and $x\in\Re^n$ are the primal and dual variables of problem \reff{prob1}, $\mu>0$ and $\rho>0$ are, respectively, the barrier parameter and the penalty parameter, \begin{equation}a
&\!\!\!\!&\!\!\!\! h_i(y,x;\mu,\rho)=-\rho\mu\ln s_i(y,x;\mu,\rho)+\frac{1}{2} z_i(y,x;\mu,\rho)^2-\frac{1}{2}\rho^2x_i^2. \lambdabel{hdf}\end{equation}a
We may write $s$ and $z$ for simplicity in the sequel when their dependence on $(y,x)$ and $(\mu,\rho)$ is clear from the context.
Similar to \cite{LDHmc}, we can prove the differentiability of the functions $s$, $z$ defined by \reff{sdf}, \reff{zdf}, and the barrier augmented Lagrangian function $L_B(y,x;\mu,\rho)$ defined by \reff{Ldf}.
\begin{lem}\lambdabel{lemzp} For given $\mu>0$ and $\rho>0$, let $L_B(y,x;\mu,\rho)$ be defined by \reff{Ldf}, $s=(s_i(y,x;\mu,\rho))\in\Re^n$ and $z=(z_i(y,x;\mu,\rho))\in\Re^n$, $S=\hbox{diag}\,(s)$ and $Z=\hbox{diag}\,(z)$.
(1) Both $s$ and $z$ are differentiable with respect to $y$ and $x$, and \begin{equation}a
&&\nabla_ys=-A(S+Z)^{-1}S, \quad \nabla_yz=A(S+Z)^{-1}Z, \lambdabel{20140327a}\\
&&\nabla_{x}s=-{\rho}(S+Z)^{-1}S, \quad
\nabla_{x}z={\rho}(S+Z)^{-1}Z. \lambdabel{20140327b} \end{equation}a
(2) The function $L_B(y,x;\mu,\rho)$ is twice continuously differentiable with respect to $y$, and
\begin{equation}a &\!\!\!\!&\!\!\!\! \nabla_y L_B(y,x;\mu,\rho)=Az(y,x;\mu,\rho)-\rho b,\nonumber\\
&\!\!\!\!&\!\!\!\! \nabla_{yy}^2L_B(y,x;\mu,\rho)=A(S+Z)^{-1}ZA^T. \nonumber \end{equation}a
Thus, $L_B(y,x;\mu,\rho)$ is strictly convex with respect to $y$.
(3) The function $L_B(y,x;\mu,\rho)$ is twice continuously differentiable and strictly concave with respect to $x$, and
\begin{equation}a &\!\!\!\!&\!\!\!\!\nabla_x L_B(y,x;\mu,\rho)=\rho (s(y,x;\mu,\rho)-c+A^Ty),\nonumber\\
&\!\!\!\!&\!\!\!\! \nabla_{xx}^2L_B(y,x;\mu,\rho)=-{\rho}^2(S+Z)^{-1}S. \nonumber \end{equation}a
\end{lem}\noindent {\sl Proof.} \rm (1) By \reff{sdf} and \reff{zdf}, $s-z=c-A^Ty-\rho x$ and $$s_i+z_i=\sqrt{(\rho x_i-c_i+A_i^Ty)^2+4\rho\mu}.$$ Thus, one has \begin{equation}a
&\!\!\!\!&\!\!\!\!\nabla_y s-\nabla_y z=-A, \nonumber\\[2pt]
&\!\!\!\!&\!\!\!\!\nabla_y s+\nabla_y z=A(S+Z)^{-1}\hbox{diag}\,(\rho x-c+A^Ty)=A(I-2(S+Z)^{-1}S). \nonumber\end{equation}a
Thus, by doing summation and subtraction, respectively, on both sides of the preceding equations, we have
\begin{equation}a &\!\!\!\!&\!\!\!\! 2\nabla_y s=-2A(S+Z)^{-1}S, \nonumber\\[2pt]
&\!\!\!\!&\!\!\!\! -2\nabla_y z=-2A(I-(S+Z)^{-1}S)=-2A(S+Z)^{-1}Z. \nonumber \end{equation}a
Therefore, \reff{20140327a} follows immediately. The results in \reff{20140327b} can be derived in the same way by differentiating with respect to $x$.
(2) Let $h(y,x;\mu,\rho)=(h_i(y,x;\mu,\rho))\in\Re^n$. Due to \reff{hdf} and noting that $SZ=\rho\mu I$,
\begin{equation}a \nabla_y h(y,x;\mu,\rho)=-{\rho\mu}\nabla_y sS^{-1}+\nabla_y zZ=A(S+Z)^{-1}(\rho\mu I+Z^2)=AZ. \nonumber
\end{equation}a Thus, $\nabla_yL_B(y,x;\mu,\rho)=-\rho b+\nabla_y h(y,x;\mu,\rho)e=Az-\rho b$. Furthermore, by (1), \begin{equation}a
\nabla_{yy}^2L_B(y,x;\mu,\rho)=\nabla_y zA^T=A(S+Z)^{-1}ZA^T. \nonumber \end{equation}a
(3) Note that \begin{equation}a&\!\!\!\!&\!\!\!\!\nabla_{x} h(y,x;\mu,\rho)=-\rho\mu S^{-1}\nabla_{x} s+Z\nabla_{x} z-{\rho}^2X=\rho (Z-{\rho}X), \nonumber\\[2pt]
&\!\!\!\!&\!\!\!\!\nabla_{xx}^2 h(y,x;\mu,\rho)={\rho}(\nabla_x Z-\rho\nabla_x X), \nonumber\end{equation}a
and $\nabla_{x}L_B(y,x;\mu,\rho)=\nabla_{x} h(y,x;\mu,\rho)e$, $\nabla_{xx}^2L_B(y,x;\mu,\rho)={\rho}(\nabla_x z-\rho\nabla_x x)$.
The desired formulae in (3) can be derived immediately from the equation $s-c+A^Ty=z-\rho x$ and the results of (1). \eop
The next result gives the relation between the SBAL function and the logarithmic-barrier problem.
\begin{thm} For given $\mu>0$ and $\rho>0$, let $L_B(y,x;\mu,\rho)$ be defined by \reff{Ldf}. Then $((y^*,s^*),x^*)$ is a KKT pair of the logarithmic-barrier problem \reff{sec2f2} if and only if $s^*-c+A^Ty^*=0$ and
\begin{equation}a L_B(y^*,x;\mu,\rho)\le L_B(y^*,x^*;\mu,\rho)\le L_B(y,x^*;\mu,\rho), \lambdabel{220530b}\end{equation}a
i.e., $(y^*,x^*)$ is a saddle point of the SBAL function $L_B(y,x;\mu,\rho)$. \end{thm}\noindent {\sl Proof.} \rm
Due to \refl{lemzp} (3), for any $y$ such that $c_i-A_i^Ty>0$, $L_B(y,x;\mu,\rho)$ reaches its maximum with respect to $x_i$ at $x^*_i=\frac{\mu}{c_i-A_i^Ty}$ since $\frac{\partial{L_B(y,x;\mu,\rho)}}{\partial x_i}|_{x_i=x_i^*}=0$.
If $c_i-A_i^Ty\le 0$, then $\frac{\partial L_B(y,x;\mu,\rho)}{\partial x_i}>0$, which means that $L_B(y,x;\mu,\rho)$ is strictly monotonically increasing to $\infty$ as $x_i\rightarrow \infty$.
Thus, \begin{equation}a \hbox{argmax}_{x_i\in\Re} L_B(y,x;\mu,\rho)=\left\{\begin{array}{ll}
\frac{\mu}{c_i-A_i^Ty}, & \hbox{if}\quad c_i-A_i^Ty>0; \\
\infty, & \hbox{otherwise.}\end{array}\right. \lambdabel{220530a}\end{equation}a
If $((y^*,s^*),x^*)$ is a KKT pair of the logarithmic-barrier problem \reff{sec2f2}, then $s^*>0$ and \begin{equation}a
Ax^*=b,~s^*-c+A^Ty^*=0,~\hbox{and}~x_i^*s_i^*=\mu,~i=1,\ldots,n. \nonumber\end{equation}a
Thus, $s_i^*=c_i-A_i^Ty^*>0$ and $x_i^*=\frac{\mu}{c_i-A_i^Ty^*},~i=1,\ldots,n$.
Therefore, by \reff{220530a}, \begin{equation}a L_B(y^*,x^*;\mu,\rho)=-\rho b^Ty^*-\rho\mu\sum_{i=1}^n\ln (c_i-A_i^Ty^*)\ge L_B(y^*,x;\mu,\rho). \nonumber\end{equation}a
Furthermore, the condition $x_i^*s_i^*=\mu$ implies $z_i(y^*,x^*;\mu,\rho)-\rho x_i^*=0.$ Thus, $Az(y^*,x^*;\mu,\rho)=\rho b.$
It follows from \refl{lemzp} (2), $y^*$ is the minimizer of $L_B(y,x^*;\mu,\rho)$. That is, the right-hand-side inequality in \reff{220530b} holds.
In reverse, if $(y^*,x^*)$ satisfies \reff{220530b}, then $y^*$ is a minimizer of $L_B(y,x^*;\mu,\rho)$ and $x^*$ is a maximizer of $L_B(y^*,x;\mu,\rho)$. Thus, due to \refl{lemzp} (2) and (3), one has \begin{equation}a
Az(y^*,x^*;\mu,\rho)=\rho b,\quad s(y^*,x^*;\mu,\rho)-c+A^Ty^*=0. \nonumber\end{equation}a
The second equation further implies $z(y^*,x^*;\mu,\rho)-\rho x^*=0$ and $x_i^*(c_i-A_i^Ty^*)=\mu,~i=1,\ldots,n$.
Let $s^*=s(y^*,x^*;\mu,\rho)$. Then $s^*=c-A^Ty^*$, and $((y^*,s^*),x^*)$ is a KKT pair of the logarithmic-barrier problem \reff{sec2f2}.
\eop
The following result shows that, under suitable conditions, a minimizer of problem \reff{prob1} is an approximate minimizer of the SBAL function.
\begin{thm}\lambdabel{a} Let $y^*$ be a minimizer of the problem \reff{prob1} and $x^*$ is the associated Lagrange multiplier vector. If the Slater constraint qualification holds, then for $\mu>0$ sufficiently small and for $\rho>0$, there exists a neighborhood of $x^*$ such that $y^*$ is a $\sqrt{\rho\mu}$-approximate strict global minimizer of the augmented Lagrangian $L_B(y,x;\mu,\rho)$ (that is, there a scalar $\deltalta>0$ such that $\|\nabla_y L_B(y^*,x;\mu,\rho)\|\le\deltalta\sqrt{\rho\mu}$).
\end{thm}\noindent {\sl Proof.} \rm Under the conditions of the theorem, $x^*$ is a KKT point of problem \reff{prob1}. Thus, \begin{equation}a
Ax^*=b,~A^Ty^*\le c,~x^*\ge 0,\ (x^*)^T(c-A^Ty^*)=0. \lambdabel{20210603a}\end{equation}a
Let $z^*_i=z_i(y^*,x^*;\mu,\rho)$. Note that $x_i^*(c_i-A_i^Ty^*)=0$ for $i=1,\ldots,n$. Then \begin{equation}a
z_i^*=\left\{\begin{array}{ll}
\frac{1}{2} (\sqrt{(\rho x_i^*)^2+4\rho\mu}+\rho x_i^*), & \hbox{if}\ c_i-A_i^Ty^*=0,~x_i^*>0; \\[5pt]
\frac{1}{2} (\sqrt{(c_i-A_i^Ty^*)^2+4\rho\mu}-(c_i-A_i^Ty^*)), & \hbox{if}\ c_i-A_i^Ty^*>0,~x_i^*=0;\\[5pt]
\sqrt{\rho\mu}, &\hbox{otherwise.} \end{array}\right.\nonumber\end{equation}a
Since $\sqrt{(\rho x_i^*)^2+4\rho\mu}\le\rho x_i^*+2\sqrt{\rho\mu}$ and $\sqrt{(c_i-A_i^Ty^*)^2+4\rho\mu}\le (c_i-A_i^Ty^*)+2\sqrt{\rho\mu}$, one has \begin{equation}a
\rho x^*\le z^*\le\rho x^*+\sqrt{\rho\mu},\quad \|z^*-\rho x^*\|_{\infty}\le \sqrt{\rho\mu}. \lambdabel{20210603b}\end{equation}a
We will prove the result by showing $\|\nabla_y L_B(y^*,x^*;\mu,\rho)\|\le\deltalta\sqrt{\rho\mu}$ for some scalar $\deltalta$ and $\nabla^2_{yy} L_B(y^*,x^*;\mu,\rho)$ is positive definite for $\rho>0$. By using \refl{lemzp} (2), and \reff{20210603a}, \reff{20210603b}, we have \begin{equation}a
\|\nabla_y L_B(y^*,x^*;\mu,\rho)\|=\|Az^*-\rho b\|=\|A(z^*-\rho x^*)\|\le\sqrt{\rho\mu}\|A\|_1, \nonumber \end{equation}a
which verifies the first part of the result.
Now we prove the second part of the result by showing that $d^T\nabla^2_{yy} L_B(y^*,x^*;\mu,\rho)d>0$ for all nonzero $d\in\Re^n$ and $\rho>0$. Let $s_i^*=s_i(y^*,x^*;\mu,\rho)$. Then \begin{equation}a
\frac{z_i^*}{s_i^*+z_i^*}=\left\{\begin{array}{ll}
\frac{1}{2} (1+\frac{\rho x_i^*}{\sqrt{(\rho x_i^*)^2+4\rho\mu}}), & \hbox{if}\ c_i-A_i^Ty^*=0,~x_i^*>0; \\[5pt]
\frac{1}{2} (1-\frac{(c_i-A_i^Ty^*)}{\sqrt{(c_i-A_i^Ty^*)^2+4\rho\mu}}), & \hbox{if}\ c_i-A_i^Ty^*>0,~x_i^*=0;\\[5pt]
\frac{1}{2}, &\hbox{otherwise.} \end{array}\right.\nonumber\end{equation}a
Therefore, by \refl{lemzp} (2), \begin{equation}a
&\!\!\!\!&\!\!\!\!\nabla^2_{yy} L_B(y^*,x^*;\mu,\rho) \nonumber\\
&\!\!\!\!&\!\!\!\!=\sum_{i=1}^n \frac{z_i^*}{s_i^*+z_i^*}A_iA_i^T\quad (A_i~\hbox{is the}~i\hbox{th column of}~A) \nonumber\\
&\!\!\!\!&\!\!\!\!=\frac{1}{2}(\sum_{i\in {\cal I}_1}(1+\frac{\rho x_i^*}{\sqrt{(\rho x_i^*)^2+4\rho\mu}})A_iA_i^T+\sum_{i\in {\cal I}_2}A_iA_i^T) \nonumber\\
&\!\!\!\!&\!\!\!\!\quad+\frac{1}{2}\sum_{i\in {\cal I}_3}(1-\frac{(c_i-A_i^Ty^*)}{\sqrt{(c_i-A_i^Ty^*)^2+4\rho\mu}})A_iA_i^T \nonumber\\
&\!\!\!\!&\!\!\!\!\ge\frac{1}{2}(1-\max\{\frac{(c_i-A_i^Ty^*)}{\sqrt{(c_i-A_i^Ty^*)^2+4\rho\mu}},~i=1,\ldots,n\})AA^T, \end{equation}a
where ${\cal I}_1=\{i|c_i-A_i^Ty^*=0,~x_i^*>0\}$, ${\cal I}_2=\{i|c_i-A_i^Ty^*=0,~x_i^*=0\}$, ${\cal I}_3=\{i|c_i-A_i^Ty^*>0,~x_i^*=0\}$. The result follows easily because of the positive definiteness of $AA^T$. \eop
Based on the newly proposed barrier augmented Lagrangian function, \cite{LDHmc} presented a novel augmented Lagrangian method of multipliers for optimization with general inequality constraints. The method alternately updates the primal and dual iterates by \begin{equation}a
&\!\!\!\!&\!\!\!\! y_{k+1}=\hbox{argmin}_y L_B(y,x_k;\mu_k,\rho_k), \lambdabel{subp1}\\
&\!\!\!\!&\!\!\!\! x_{k+1}=\frac{1}{\rho_k} z(y_{k+1},x_k;\mu_k,\rho_k). \lambdabel{subp2}\end{equation}a
The update of parameters $\mu_{k+1}$ and $\rho_{k+1}$ depend on the residual $\|s(y_{k+1},x_{k+1};\mu_k,\rho_k)-c+A^Ty_{k+1}\|$ and the norm $\|x_{k+1}\|$ of dual multiplier vector.
To end this section, we show some monotone properties of our defined functions $L_B(y,x;\mu,\rho)$, $s_i(y,x;\mu,\rho)$ and $z_i(y,x;\mu,\rho)$ with respect to the parameters.
\begin{lem}\lambdabel{lem24an} Denote $L_B(y,x;\mu,\rho)=\rho\phi(y,x;\mu,\rho)+\frac{1}{2} R^2(y,x;\mu,\rho)$, where \begin{equation}a
&\!\!\!\!&\!\!\!\! \phi(y,x;\mu,\rho)=-b^Ty-\mu\sum_{i=1}^n\ln s_i(y,x;\mu,\rho)+x^T(s(y,x;\mu,\rho)-c+A^Ty), \nonumber\\
&\!\!\!\!&\!\!\!\! R(y,x;\mu,\rho)=\|s(y,x;\mu,\rho)-c+A^Ty\|. \nonumber\end{equation}a
Let $\hphantom{a}t y_{k+1}=\hbox{\rm argmin}_y L_B(y,x_k;\mu_k,\hphantom{a}t\rho_k)$ and $\tilde y_{k+1}=\hbox{\rm argmin}_y L_B(y,x_k;\mu_k,\tilde\rho_k)$ be attained. If $\hphantom{a}t\rho_k>\tilde\rho_k$, then \begin{equation}a \phi(\hphantom{a}t y_{k+1},x_k;\mu_k,\hphantom{a}t\rho_k)<\phi(\tilde y_{k+1},x_k;\mu_k,\tilde\rho_k),~
R(\hphantom{a}t y_{k+1},x_k;\mu_k,\hphantom{a}t\rho_k)>R(\tilde y_{k+1},x_k;\mu_k,\tilde\rho_k). \nonumber
\end{equation}a\end{lem}\noindent {\sl Proof.} \rm Let $\hphantom{a}t s_{k+1}=s(\hphantom{a}t y_{k+1},x_k;\mu_k,\hphantom{a}t\rho_k)$ and $\tilde s_{k+1}=s(\tilde y_{k+1},x_k;\mu_k,\tilde\rho_k)$. Then, by \reff{sec2f3}, \begin{equation}a (\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1})=\hbox{\rm argmin}_{y,s}F_{(\mu_k,\hphantom{a}t\rho_k)}(y,s;x_k),~(\tilde y_{k+1},\tilde s_{k+1})=\hbox{\rm argmin}_{y,s}F_{(\mu_k,\tilde\rho_k)}(y,s;x_k). \nonumber \end{equation}a
Thus, if we denote $\psi_{\mu}(y,s;x)=-b^Ty-\mu\sum_{i=1}^n s_i+x^T(s-c+A^Ty)$ and $W(y,s;x)=\|s-c+A^Ty\|$, then
$F_{(\mu,\rho)}(y,s;x)=\rho\psi_{\mu}(y,s;x)+\frac{1}{2} W^2(y,s;x)$, and
\begin{equation}a &\!\!\!\!&\!\!\!\! \phi(\hphantom{a}t y_{k+1},x_k;\mu_k,\hphantom{a}t\rho_k)=\psi_{\mu_k}(\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1};x_k),~\phi(\tilde y_{k+1},x_k;\mu_k,\tilde\rho_k)=\psi_{\mu_k}(\tilde y_{k+1},\tilde s_{k+1};x_k), \nonumber\\
&\!\!\!\!&\!\!\!\! R(\hphantom{a}t y_{k+1},x_k;\mu_k,\hphantom{a}t\rho_k)=W(\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1};x_k),~R(\tilde y_{k+1},x_k;\mu_k,\tilde\rho_k)=W(\tilde y_{k+1},\tilde s_{k+1};x_k). \nonumber \end{equation}a
Moreover, \begin{equation}a &\!\!\!\!&\!\!\!\! F_{(\mu_k,\hphantom{a}t\rho_k)}(\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1};x_k)< F_{(\mu_k,\hphantom{a}t\rho_k)}(\tilde y_{k+1},\tilde s_{k+1};x_k), \nonumber\\
&\!\!\!\!&\!\!\!\! F_{(\mu_k,\tilde\rho_k)}(\tilde y_{k+1},\tilde s_{k+1};x_k)< F_{(\mu_k,\tilde\rho_k)}(\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1};x_k). \nonumber\end{equation}a
It follows that \begin{equation}a
&\!\!\!\!&\!\!\!\! F_{(\mu_k,\hphantom{a}t\rho_k)}(\tilde y_{k+1},\tilde s_{k+1};x_k)-F_{(\mu_k,\hphantom{a}t\rho_k)}(\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1};x_k) \nonumber\\
&\!\!\!\!&\!\!\!\! +F_{(\mu_k,\tilde\rho_k)}(\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1};x_k)-F_{(\mu_k,\tilde\rho_k)}(\tilde y_{k+1},\tilde s_{k+1};x_k) \nonumber\\
&\!\!\!\!&\!\!\!\! =(\hphantom{a}t\rho_k-\tilde\rho_k)(\psi_{\mu_k}(\tilde y_{k+1},\tilde s_{k+1};x_k)-\psi_{\mu_k}(\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1};x_k))>0. \nonumber\end{equation}a
That is, $\phi(\tilde y_{k+1},x_k;\mu_k,\tilde\rho_k)=\psi_{\mu_k}(\tilde y_{k+1},\tilde s_{k+1};x_k)>\psi_{\mu_k}(\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1};x_k)=\phi(\hphantom{a}t y_{k+1},x_k;\mu_k,\hphantom{a}t\rho_k)$. Therefore, \begin{equation}a
&\!\!\!\!&\!\!\!\! \frac{1}{2} R^2(\hphantom{a}t y_{k+1},x_k;\mu_k,\hphantom{a}t\rho_k)=\frac{1}{2} W^2(\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1};x_k) \nonumber\\
&\!\!\!\!&\!\!\!\! >\frac{1}{2} W^2(\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1};x_k)+\tilde\rho_k (\psi_{\mu_k}(\hphantom{a}t y_{k+1},\hphantom{a}t s_{k+1};x_k)-\psi_{\mu_k}(\tilde y_{k+1},\tilde s_{k+1};x_k)) \nonumber\\
&\!\!\!\!&\!\!\!\! >\frac{1}{2} W^2(\tilde y_{k+1},\tilde s_{k+1};x_k)=\frac{1}{2} R^2(\tilde y_{k+1},x_k;\mu_k,\tilde\rho_k), \nonumber\end{equation}a
which completes our proof. \eop
\begin{lem}\lambdabel{lem24n} For given parameters $\mu>0$ and $\rho>0$, the following results are true.
(1) Both $s_i(y,x;\mu,\rho)$ and $z_i(y,x;\mu,\rho)$ are monotonically increasing with respect to $\mu$.
(2) If $(s_i(y,x;\mu,\rho)-c_i+A_i^Ty)\ne 0$, then $(s_i(y,x;\mu,\rho)-c_i+A_i^Ty)^2$ will be decreasing as $\rho$ is decreasing.
(3) If $\|s(y,x;\mu,\rho)-c+A^Ty\|\ne 0$, then the function $\frac{1}{\rho}L_B(y,x;\mu,\rho)$ is monotonically decreasing with respect to $\rho$.
(4) The function $\frac{1}{\rho}L_B(y,x;\mu,\rho)$ is strictly convex with respect to $\rho$.
\end{lem}\noindent {\sl Proof.} \rm It should be noted that all related functions are differentiable with respect to $\mu$ and $\rho$. In addition, due to \begin{equation}a
&\!\!\!\!&\!\!\!\!\frac{\partial s_i(y,x;\mu,\rho)}{\partial\mu}=\frac{\partial z_i(y,x;\mu,\rho)}{\partial\mu}=\frac{\rho}{s_i+z_i}>0, \\[2pt]
&\!\!\!\!&\!\!\!\!\frac{\partial ((s_i(y,x;\mu,\rho)-c_i+A_i^Ty)^2)}{\partial\rho}=\frac{2}{\rho}\frac{s_i}{s_i+z_i}(s_i-c_i+A_i^Ty)^2>0, \\[2pt]
&\!\!\!\!&\!\!\!\!\frac{\partial \frac{1}{\rho}L_B(y,x;\mu,\rho)}{\partial\rho}=-\frac{1}{2}\frac{1}{\rho^2}\|s-c+A^Ty\|^2<0, \\[2pt]
&\!\!\!\!&\!\!\!\!\frac{\partial^2 (\frac{1}{\rho}L_B(y,x;\mu,\rho))}{\partial\rho^2}=\frac{1}{\rho^3}(s-c+A^Ty)^T(S+Z)^{-1}Z(s-c+A^Ty), \end{equation}a
the desired results are obtained immediately. \eop
\sect{Our primal-dual majorization-minimization method}
Our method in this paper focuses on how to solve the subproblem \reff{subp1} efficiently. Noting the strict convexity of the SBAL function $L_B(y,x;\mu,\rho)$ with respect to $y$ and the special structure of the Hessian matrix $\nabla_{yy}^2L(y,x;\mu,\rho)$, the introduction of the majorization-minimization method is a natural selection. In particular, we will see that the dual update is precisely a step which can be derived by the minorization-maximization.
Let $(y_k,x_k)$ be the current iteration point, $\mu_k>0$ and $\rho_k>0$ are the current values of the parameters. For any given $x\in\Re^n$, we consider the quadratic surrogate function $Q_k(\cdot,x): \Re^m\to\Re$, \begin{equation}a
Q_k(y,x)=&\!\!\!\!&\!\!\!\! L_B(y_k,x;\mu_k,\rho_k)+(Az(y_k,x;\mu_k,\rho_k)-\rho_k b)^T(y-y_k) \nonumber\\
&\!\!\!\!&\!\!\!\! +\frac{1}{2}(y-y_k)^TAA^T(y-y_k), \lambdabel{Qf}\end{equation}a
which is an approximate function of the objective in \reff{subp1} and majorizes the objective function with respect to $y$.
\begin{lem}\lambdabel{lem21} For any given $x=\hphantom{a}t x$ and the parameters $\mu_k>0$ and $\rho_k>0$, there holds $Q_k(y_k,\hphantom{a}t x)=L_B(y_k,\hphantom{a}t x;\mu_k,\rho_k)$ and $L_B(y,\hphantom{a}t x;\mu_k,\rho_k)\le Q_k(y,\hphantom{a}t x)$ for all $y\in\Re^m$. \end{lem}\noindent {\sl Proof.} \rm
The equation $Q_k(y_k,\hphantom{a}t x)=L_B(y_k,\hphantom{a}t x;\mu_k,\rho_k)$ is obtained from \reff{Qf}.
By Taylor's theorem with remainder, \begin{equation}a
&\!\!\!\!&\!\!\!\! L_B(y,\hphantom{a}t x;\mu_k,\rho_k)=L_B(y_k,\hphantom{a}t x;\mu_k,\rho_k)+\nabla_y L_B(y_k,\hphantom{a}t x;\mu_k,\rho_k)^T(y-y_k) \nonumber\\
&\!\!\!\!&\!\!\!\! \quad+\int_0^1\left(\nabla_yL_B(y_k+\tauu (y-y_k),\hphantom{a}t x;\mu_k,\rho_k)-\nabla_y L_B(y_k,\hphantom{a}t x;\mu_k,\rho_k)\right)^T(y-y_k)d\tauu. \lambdabel{lem21f1} \end{equation}a
Due to \refl{lemzp} (2), one has \begin{equation}a
&\!\!\!\!&\!\!\!\!\nabla_yL_B(y_k+\tauu (y-y_k),\hphantom{a}t x;\mu_k,\rho_k)-\nabla_y L_B(y_k,\hphantom{a}t x;\mu_k,\rho_k) \nonumber\\
&\!\!\!\!&\!\!\!\!=\int_{0}^1\tauu\nabla_{yy}^2L_B(y_k+\alphaha\tauu (y-y_k),\hphantom{a}t x;\mu_k,\rho_k)(y-y_k)d\alphaha \nonumber\\
&\!\!\!\!&\!\!\!\!=\int_{0}^1\tauu A(\hphantom{a}t S_k+\hphantom{a}t Z_k)^{-1}\hphantom{a}t Z_kA^T(y-y_k)d\alphaha \nonumber\\
&\!\!\!\!&\!\!\!\!=\tauu AA^T(y-y_k)-\int_{0}^1\tauu A(\hphantom{a}t S_k+\hphantom{a}t Z_k)^{-1}\hphantom{a}t S_kA^T(y-y_k)d\alphaha, \nonumber \end{equation}a
where $\hphantom{a}t S_k=\hbox{diag}\,(s(y_k+\tauu (y-y_k),\hphantom{a}t x;\mu_k,\rho_k))$ and $\hphantom{a}t Z_k=\hbox{diag}\,(z(y_k+\tauu (y-y_k),\hphantom{a}t x;\mu_k,\rho_k))$.
Noting $$\int_0^1\int_{0}^1\tauu (y-y_k)^TA(\hphantom{a}t S_k+\hphantom{a}t Z_k)^{-1}\hphantom{a}t S_kA^T(y-y_k)d\alphaha d\tauu\ge 0,$$
the inequality $L_B(y,\hphantom{a}t x;\mu_k,\rho_k)\le Q_k(y,\hphantom{a}t x)$ follows from \refl{lemzp} (2) and \reff{lem21f1} immediately. \eop
In a similar way, if for given $y\in\Re^m$ and the parameters $\mu_k>0$ and $\rho_k>0$, we define $P_k(y,\cdot): \Re^n\to\Re$ be the function \begin{equation}a
P_k(y,x)=&\!\!\!\!&\!\!\!\! L_B(y,x_k;\mu_k,\rho_k)+\rho_k(s(y,x_k;\mu_k,\rho_k)-c+A^Ty)^T(x-x_k) \nonumber\\
&\!\!\!\!&\!\!\!\! -\frac{1}{2}{\rho_k}^2(x-x_k)^T(x-x_k), \lambdabel{Df}\end{equation}a
then $P_k(y,x_k)=L_B(y,x_k;\mu_k,\rho_k)$ and $L_B(y,x;\mu_k,\rho_k)\ge P_k(y,x)$ for all $x\in\Re^n$. That is, $P_k(y,x)$ is an approximate surrogate function of the objective in optimization \begin{equation}a
\max_x L_B(y,x;\mu_k,\rho_k) \nonumber\end{equation}a
and minorizes the objective function with respect to $x$ (i.e., majorizes the negative objective function).
By the strict convexity of $Q_k(\cdot,x)$ and the strict concavity of $P_k(y,\cdot)$, there are a unique minimizer of $Q_k(y,\hphantom{a}t x)$ and a unique maximizer of $P_k(\hphantom{a}t y,x)$, where $\hphantom{a}t x\in\Re^n$ and $\hphantom{a}t y\in\Re^m$ are any given vectors.
\begin{lem}\lambdabel{lem22} Given $\mu_k>0$ and $\rho_k>0$. Let $Q_k(\cdot,x): \Re^m\to\Re$ and $P_k(y,\cdot): \Re^n\to\Re$ be functions defined by \reff{Qf} and \reff{Df}, respectively.
(1) For any given $\hphantom{a}t x$, $Q_k(y,\hphantom{a}t x)$ has a unique minimizer $y^*_k$. Moreover, $y_k^*$ satisfies the equation \begin{equation}a
AA^T(y-y_k)=-(Az(y_k,\hphantom{a}t x;\mu_k,\rho_k)-\rho_k b).\end{equation}a
(2) For any given $\hphantom{a}t y$, $P_k(\hphantom{a}t y,x)$ has a unique maximizer $x_k^*$, and \begin{equation}a
x_k^*=x_k+\frac{1}{\rho_k} (s(\hphantom{a}t y,x_k;\mu_k,\rho_k)-c+A^T\hphantom{a}t y). \lambdabel{220601a}\end{equation}a
(3) For any given $\hphantom{a}t x$ and $\hphantom{a}t y$, one has \begin{equation}a
&\!\!\!\!&\!\!\!\! L_B(y_k^*,\hphantom{a}t x;\mu_k,\rho_k)-L_B(y_k,\hphantom{a}t x;\mu_k,\rho_k)\le-\frac{1}{2}\|Az(y_k,\hphantom{a}t x;\mu_k,\rho_k)-\rho_k b\|_{(AA^T)^{-1}}^2, \lambdabel{lem22f3}\\
&\!\!\!\!&\!\!\!\! L_B(\hphantom{a}t y,x_k^*;\mu_k,\rho_k)-L_B(\hphantom{a}t y,x_k;\mu_k,\rho_k)\ge\frac{1}{2}\|s(\hphantom{a}t y,x_k;\mu_k,\rho_k)-c+A^T\hphantom{a}t y\|^2. \end{equation}a
\end{lem}\noindent {\sl Proof.} \rm Since \begin{equation}a
&\!\!\!\!&\!\!\!\!\nabla_yQ_k(y,\hphantom{a}t x)=AA^T(y-y_k)+(Az(y_k,\hphantom{a}t x;\mu_k,\rho_k)-{\rho_k} b), \nonumber\\
&\!\!\!\!&\!\!\!\!\nabla_xP_k(\hphantom{a}t y,x)=-{\rho_k}^2(x-x_k)+\rho_k(s(\hphantom{a}t y,x_k;\mu_k,\rho_k)-c+A^T\hphantom{a}t y), \nonumber\end{equation}a
and noting the strict convexity of $Q_k(y,\hphantom{a}t x)$ with respect to $y$, and the strict concavity of $P_k(\hphantom{a}t y,x)$ with respect to $x$, the results (1) and (2) are obtained immediately from the optimality conditions of general unconstrained optimization (see \cite{NocWri99,SunYua06}).
By the preceding results, one has \begin{equation}a
&\!\!\!\!&\!\!\!\! Q_k(y_k^*,\hphantom{a}t x)=Q_k(y_k,\hphantom{a}t x)-\frac{1}{2}\|Az(y_k,\hphantom{a}t x;\mu_k,\rho_k)-\rho_kb\|_{(AA^T)^{-1}}^2, \nonumber\\
&\!\!\!\!&\!\!\!\! P_k(\hphantom{a}t y,x_k^*)=P_k(\hphantom{a}t y,x_k)+\frac{1}{2}\|s(\hphantom{a}t y,x_k;\mu_k,\rho_k)-c+A^T\hphantom{a}t y\|^2. \nonumber\end{equation}a
Due to \refl{lem21}, there hold \begin{equation}a
&\!\!\!\!&\!\!\!\! L_B(y_k^*,\hphantom{a}t x;\mu_k,\rho_k)-L_B(y_k,\hphantom{a}t x;\mu_k,\rho_k)\le Q_k(y_k^*,\hphantom{a}t x)-Q_k(y_k,\hphantom{a}t x), \nonumber\\
&\!\!\!\!&\!\!\!\! L_B(\hphantom{a}t y,x_k^*;\mu_k,\rho_k)-L_B(\hphantom{a}t y,x_k;\mu_k,\rho_k)\ge P_k(\hphantom{a}t y,x_k^*)-P_k(\hphantom{a}t y,x_k), \nonumber\end{equation}a
which complete our proof. \eop
Because of \reff{sdf} and \reff{zdf}, \reff{220601a} is equivalent to $x_k^*=\frac{1}{\rho_k} z(\hphantom{a}t y,x_k;\mu_k,\rho_k)$, which is consistent with \reff{subp2}. This fact shows that the dual update $x_{k+1}$ in \reff{subp2} can be obtained from maximizing the minorized function $P_k(y_{k+1},x)$. In the following, we describe our algorithm for linear programming.
\noindent\underline{\hspace*{6.3in}}\\[-10pt]
\begin{array}l\lambdabel{alg1}(A primal-dual majorization-minimization method for problem \reff{prob1})
{\small \alglist
\item[{\bf Step}] {\bf 0}. Given $(y_0,x_0)\in\Re^{m}\times\Re^n$, $\mu_0>0$, $\rho_0>0$, $\deltalta>0$, $\gammamma\in(0,1)$, $\epsilonsilon>0$.
Set $k:=0$.
\item[{\bf Step}] {\bf 1}. Approximately minimize $L_B(y,x_k;\mu_k,\rho_k)$ by the majorization-minimization method starting from $y_k$.
Set $\hphantom{a}t y_{0}=y_k$, $\hphantom{a}t\rho_{0}=\rho_k$, $\ell:=0$.
{\bf Step 1.1}. Solve the equation \begin{equation}a
AA^T(y-\hphantom{a}t y_{\ell})=-(Az(\hphantom{a}t y_{\ell},x_k;\mu_{k},\hphantom{a}t\rho_{\ell})-\hphantom{a}t\rho_{\ell}b) \lambdabel{yiter}\end{equation}a
\hspace{0.5cm} to obtain the solution $\hphantom{a}t y_{\ell+1}$. Evaluate \begin{equation}a
E_{k+1}^{primal}=\|A z(\hphantom{a}t y_{\ell+1},x_{k};\mu_k,\hphantom{a}t\rho_{\ell})-\hphantom{a}t\rho_{\ell}b\|. \nonumber\end{equation}a
\hspace{0.5cm} If $E_{k+1}^{primal}>\mu_{k}$,
set $\hphantom{a}t\rho_{\ell+1}=\hphantom{a}t\rho_{\ell}$, $\ell:=\ell+1$ and repeat Step 1.1. Otherwise, compute \begin{equation}a
E_{k+1}^{dual}=\|s(\hphantom{a}t y_{\ell+1},x_{k};\mu_{k},\hphantom{a}t\rho_{\ell})-c+A^T\hphantom{a}t y_{\ell+1}\|. \nonumber\end{equation}a
\hspace{0.5cm} If $E_{k+1}^{dual}>\max\{\hphantom{a}t\rho_{\ell},\mu_k\}$, set $\hphantom{a}t\rho_{\ell+1}\ge 0.5\hphantom{a}t\rho_{\ell}$, $\ell:=\ell+1$ and repeat Step 1.1; else set
$y_{k+1}=\hphantom{a}t y_{\ell+1}$,
\hspace{0.5cm} $\rho_{k+1}=\hphantom{a}t\rho_{\ell}$, end.
\item[{\bf Step}] {\bf 2}. Update $x_k$ to \begin{equation}a
x_{k+1}=x_{k}+\frac{1}{\rho_{k+1}}(s(y_{k+1},x_{k};\mu_{k},\rho_{k+1})-c+A^Ty_{k+1}). \lambdabel{xiter}\end{equation}a
\item[{\bf Step}] {\bf 3}. If $\mu_k<\epsilonsilon$, stop the algorithm. Otherwise,
set $\mu_{k+1}\le\gammamma\mu_k$, $\rho_{k+1}=\min\{\rho_{k+1}, \frac{\deltalta}{\|x_{k+1}\|_{\infty}}\}$, $k:=k+1$. End (while)
\end{list}}
\end{array}l
\noindent\underline{\hspace*{6.3in}}
The initial point for our algorithm can be arbitrary, which is different from both the simplex methods and the interior-point methods starting from either a feasible point or an interior-point. Theoretically, since the augmented Lagrangian function is an exact penalty function, we can always select the initial penalty parameter $\rho_0$ sufficiently small such that, under desirable conditions, $E_{k+1}^{dual}$ is sufficiently small. The initial barrier parameter $\mu_0$ can be selected to be small without affecting the well-definedness of the algorithm, but it may impact the strict convexity of the SBAL function and bring about more iterations for solving the subproblem \reff{subp1}.
The Step 1 is the core and the main computation of our algorithm. For fixed $x_k$, $\mu_k$ and $\rho_k$, we attempt to find a new estimate $y_{k+1}$, which is an approximate minimizer of the SBAL function $L_B(y,x_k;\mu_k,\rho_k)$ with respect to $y$. The main computation is in solving the system \reff{yiter}, which depends on the decomposition of $AA^T$. Since $AA^T$ is independent of the iteration, its decomposition can be fulfilled in preprocessing. If $L_B(y,x_k;\mu_k,\rho_k)$ is lower bounded, then the Step 1 will terminate in a finite number of iterations.
By Step 2 of \refal{alg1}, we have $x_{k+1}=\frac{1}{\rho_{k+1}} z(y_{k+1},x_{k};\mu_{k},\rho_{k+1})$, thus $x_{k+1}>0$ for all $k\ge 0$. Due to \refl{lem22} (3) and the strict concavity, one has \begin{equation}a \|s(y_{k+1},x_{k+1};\mu_{k},\rho_{k+1})-c+A^Ty_{k+1}\|<\|s(y_{k+1},x_{k};\mu_{k},\rho_{k+1})-c+A^Ty_{k+1}\|.
\lambdabel{220612c}\end{equation}a
Due to the Step 3, $\mu_k\to 0$ as $k\to\infty$, $\rho_{k+1}\|x_{k+1}\|_{\infty}\le\deltalta$ for all $k>0$.
\sect{Global convergence}
We analyze the convergence of \refal{alg1} in this section. Firstly, we prove that, if the original problem has a minimizer, then the Step 1 will always terminate in a finite number of iterations and $\{y_k\}$ will be obtained. After that, we prove that, without prior requiring either the primal or the dual linear problem to be feasible, our algorithm can recognize the KKT point of problem \reff{prob1}, or illustrate that either its dual problem \reff{prob2} is unbounded as problem \reff{prob1} is feasible, or a minimizer with lease violations of constraints is found as problem \reff{prob1} is infeasible.
\begin{lem}\lambdabel{le41n} If problem \reff{prob1} has a solution, then for any given $x_k\in\Re^n_{++}$ and any given parameters $\mu_k>0$ and $\rho_k>0$, the SBAL function $L_B(y,x_k;\mu_k,\rho_k)$ is lower bounded from $-\infty$, and the Step 1 will terminate in a finite number of iterations. \end{lem}\noindent {\sl Proof.} \rm If problem \reff{prob1} has a solution, then the logarithmic-barrier problem \reff{sec2f2} is feasible when the original problem is strictly feasible (that is, the Slater constraint qualification holds), otherwise problem \reff{sec2f2} is infeasible. Correspondingly, the objective
$-b^Ty-\mu\sum_{i=1}^n\ln s_i$ of problem \reff{sec2f2} either takes its minimizer at an interior-point of problem \reff{prob1} (in this case the minimizer is attained) or is $+\infty$. It is noted that $A_i^Ty\rightarrow-\infty$ for any $i=1,\ldots,n$ if and only if $A_i^Ty<c_i$, the strict feasibility of the corresponding constraint of problem \reff{prob1}. The preceding result shows that no matter when $y$ is such that $A_i^Ty\to-\infty$ for any $i=1,\ldots,n$, the minimizer of $-b^Ty-\mu\sum_{i=1}^n\ln s_i$ with $s_i=\max\{c_i-A_i^Ty,0\}$ will be lower bounded away from $-\infty$.
If $L_B(y,x_k;\mu_k,\rho_k)$ is not lower bounded, then $L_B(y,x_k;\mu_k,\rho_k)\to-\infty$ as $A_i^Ty\rightarrow-\infty$ for some $i=1,\ldots,n$. Let ${\cal I}(y)=\{i|A_i^Ty\to-\infty\}$. Since \begin{equation}a
&\!\!\!\!&\!\!\!\! L_B(y,x_k;\mu_k,\rho_k)\ge-b^Ty-\mu_k\sum_{i=1}^n\ln s_i(y,x_k;\mu_k,\rho_k) \nonumber\\
&\!\!\!\!&\!\!\!\!=-b^Ty-\mu_k\sum_{i\in{\cal I}(y)}\ln (c_i-A_i^Ty)-\mu_k\sum_{i\in{\cal I}(y)}\ln\frac{s_i(y,x_k;\mu_k,\rho_k)}{c_i-A_i^Ty}-\mu_k\sum_{i\not\in{\cal I}(y)}\ln s_i(y,x_k;\mu_k,\rho_k) \nonumber\\
&\!\!\!\!&\!\!\!\!>-\infty, \nonumber\end{equation}a
it shows that $L_B(y,x_k;\mu_k,\rho_k)$ is lower bounded away from $-\infty$.
Now we prove that for any fixed $\hphantom{a}t\rho_{\ell}$, if the Step 1 of \refal{alg1} does not terminate finitely, then $E_{k+1}^{primal}\to 0$ as $\ell\to\infty$. By \refl{lem22}, $\{L_B(\hphantom{a}t y_{\ell},x_k;\mu_k,\hphantom{a}t\rho_{\ell})\}$ is monotonically non-increasing as $\ell\to\infty$. Thus either there is a finite limit for the sequence $\{L_B(\hphantom{a}t y_{\ell},x_k;\mu_k,\hphantom{a}t\rho_{\ell})\}$ or the whole sequence tends to $-\infty$. Since $L_B(y,x_k;\mu_k,\hphantom{a}t\rho_{\ell})$ is bounded below, due to \reff{lem22f3}, one has \begin{equation}a
\lim_{\ell\to\infty} \|Az(\hphantom{a}t y_{\ell},x_k;\mu_k,\hphantom{a}t\rho_{\ell})-\hphantom{a}t\rho_{\ell} b\|_{(AA^T)^{-1}}=0, \end{equation}a
which shows that the condition $E_{k+1}^{primal}\le\mu_k$ will be satisfied in a finite number of iterations.
Since problem \reff{prob1} is supposed to be feasible, for every $s>0$ one has $s-c+A^Ty>0$. It follows from \refl{lem24an} that there is a scalar $\rho_{k+1}>0$ such that for given $\mu_k>0$ and for all $\hphantom{a}t\rho_{\ell}\le\rho_{k+1}$, $E_{k+1}^{dual}\le\mu_k$ as $\ell$ is large enough. Thus, the Step 1 will terminate in a finite number of iterations.
\eop
The next result shows that, if the Step 1 does not terminate finitely, then either problem \reff{prob1} is unbounded or a point with least constraint violations will be found.
\begin{lem}\lambdabel{le42n} For given $x_k\in\Re_{++}^n$ and parameters $\mu_k>0$ and $\rho_k>0$, if the Step 1 of \refal{alg1} does not terminate finitely and an infinite sequence $\{\hphantom{a}t y_{\ell}\}$ is generated, then either problem \reff{prob1} is unbounded or any cluster point of $\{\hphantom{a}t y_{\ell}\}$ is an infeasible stationary point $y^*$ satisfying \begin{equation}a
A\max\{A^Ty^*-c,0\}=0. \end{equation}a \lambdabel{s4f2a}
The point $y^*$ is also a solution for minimizing the $\ell_2$-norm of constraint violations of problem \reff{prob1}, and shows that problem \reff{prob2} is unbounded.\end{lem}\noindent {\sl Proof.} \rm If that the Step 1 of \refal{alg1} does not terminate finitely is resulted from $E_{k+1}^{primal}$ not being small enough for given $\rho_k$, then $\{\hphantom{a}t y_{\ell}\}$ is unbounded and $L_B(\hphantom{a}t y_{\ell},x_k;\mu_k,\hphantom{a}t\rho_{\ell})\to-\infty$ as $\ell\to\infty$, which by the arguments in the proof of \refl{le41n} implies that problem \reff{prob1} is feasible and unbounded.
Now we consider the case that $\{\hphantom{a}t y_{\ell}\}$ is bounded for given $\rho_k$. Suppose that ${\cal L}$ is a subset of indices such that $\hphantom{a}t y_{\ell}\to{\hphantom{a}t y_{\ell}}^*$ as $\ell\in{\cal L}$ and $\ell\to\infty$ for given $\rho_k$. Then \begin{equation}a
Az({\hphantom{a}t y_{\ell}}^*,x_k;\mu_k,\rho_k)-\rho_k b=0. \lambdabel{220725a}\end{equation}a
Due to $z({\hphantom{a}t y_{\ell}}^*,x_k;\mu_k,\rho_k)>0$, \reff{220725a} shows that problem \reff{prob2} is feasible. Furthermore, considering the fact that the Step 1 of \refal{alg1} does not terminate finitely, one has $\hphantom{a}t\rho_{\ell}\to 0$. Thus,
the result \reff{s4f2a} follows since $z({\hphantom{a}t y_{\ell}}^*,x_k;\mu_k,\hphantom{a}t\rho_{\ell})\to\max\{A^Ty^*-c,0\}$ as $\hphantom{a}t\rho_{\ell}\to 0$.
In addition, since $s({\hphantom{a}t y_{\ell}}^*,x_k;\mu_k,\hphantom{a}t\rho_{\ell})-c+A^T{\hphantom{a}t y_{\ell}}^*>\mu_k$ for given $\mu_k>0$ and $\ell\in\{\ell|\hphantom{a}t\rho_{\ell+1}\le0.5\hphantom{a}t\rho_{\ell}\}$, and \begin{equation}a
s({\hphantom{a}t y_{\ell}}^*,x_k;\mu_k,\hphantom{a}t\rho_{\ell})-c+A^T{\hphantom{a}t y_{\ell}}^*\to\max\{A^Ty^*-c,0\}~\hbox{as}~\hphantom{a}t\rho_{\ell}\to 0, \nonumber\end{equation}a
then $\max\{A^Ty^*-c,0\}\ge\mu_k>0$. That is, $y^*$ is infeasible to the problem \reff{prob1}, which by \cite{NocWri99,SunYua06,wright97,ye} implies that problem \reff{prob2} is unbounded. Noting that \reff{s4f2a} suggests that $y^*$ satisfies the stationary condition of the linear least square problem \begin{equation}a
\min_y~\frac{1}{2}\|\max\{A^Ty-c,0\}\|^2, \nonumber\end{equation}a
$y^*$ is a point with the least $\ell_2$-norm of constraint violations of problem \reff{prob1}. \eop
In the following analysis of this section, we suppose that the Step 1 of \refal{alg1} terminates finitely for every $k$. In order to analyze the convergence of \refal{alg1}, we also suppose that \refal{alg1} does not terminate finitely, and an infinite sequence $\{y_k\}$ is generated.
Corresponding to the sequence $\{y_k\}$, we also have the sequence $\{\mu_k\}$ of barrier parameters, the sequence $\{\rho_k\}$ of penalty parameters, the sequence $\{x_k\}$ of the estimates of multipliers. In particular, $\{\mu_k\}$ is a monotonically decreasing sequence and tends to $0$, $\{\rho_k\}$ is a monotonically non-increasing sequence which either keeps unchanged after a finite number of steps or tends to $0$, \begin{equation}a&\!\!\!\!&\!\!\!\! x_{k+1}=x_k+\frac{1}{\rho_{k+1}}(s(y_{k+1},x_{k};\mu_{k},\rho_{k+1})-c+A^Ty_{k+1}) \nonumber\\
&\!\!\!\!&\!\!\!\!=x_{k-1}+\frac{1}{\rho_{k}}(s(y_{k},x_{k-1};\mu_{k-1},\rho_{k})-c+A^Ty_{k})+\frac{1}{\rho_{k+1}}(s(y_{k+1},x_{k};\mu_{k},\rho_{k+1})-c+A^Ty_{k+1}) \nonumber\\
&\!\!\!\!&\!\!\!\!=x_0+\sum_{\ell=0}^k\frac{1}{\rho_{\ell+1}}(s(y_{\ell+1},x_{\ell};\mu_{\ell},\rho_{\ell+1})-c+A^Ty_{\ell+1}). \nonumber\end{equation}a
If the sequence $\{x_k\}$ is bounded, then $\frac{1}{\rho_{k+1}}(s(y_{k+1},x_{k};\mu_{k},\rho_{k+1})-c+A^Ty_{k+1})\to 0$ as $k\to\infty$, and $\{\rho_k\}$ is bounded away from zero.
\begin{lem}\lambdabel{lem41} If $\rho_k\to 0$, then any cluster point of $\{y_k\}$ is a Fritz-John point of problem \reff{prob1}. In particular, there exists an infinite subset ${\cal K}$ of indices such that for $k\in{\cal K}$ and $k\to\infty$, $y_k\to y^*$, $z_k\to z^*\ge 0$, $s_k\to s^*\ge 0$ and \begin{equation}a s^*-c+A^Ty^*=0,\quad Az^*=0,\quad z^*\circ s^*=0, \lambdabel{s4f1}\end{equation}a
which shows that problem \reff{prob1} is feasible but problem \reff{prob2} is unbounded.
\end{lem}\noindent {\sl Proof.} \rm Without loss of generality, we assume that $\{y_k\}$ is bounded.
Because of the boundedness of $\{{\rho_k}{x_k}\}$, both $\{s_k\}$ and $\{z_k\}$ are bounded. Without loss of generality, we let $y_k\to y^*$, $z_k\to z^*$, $s_k\to s^*$ for $k\in{\cal K}$ and $k\to\infty$. Then $z^*\ge0$ and $s^*\ge 0$. Therefore, \reff{s4f1} follows immediately from \begin{equation}a
\mu_k\to 0,~E_k^{primal}\le\mu_k,~E_k^{dual}\le\max\{\rho_{k},\mu_k\},~\hbox{and}~z_k\circ s_k={\rho_k}{\mu_k} e. \nonumber\end{equation}a
That is, $y^*$ is a Fritz-John point of problem \reff{prob1}.
The equations in \reff{s4f1} show that, if $\rho_k\to 0$, \refal{alg1} will converge to a feasible point $y^*$ of \reff{prob1}.
By the first part of the proof of \refl{le42n}, the finite termination of Step 1 implies that problem \reff{prob2} is strictly feasible. Thus, its set of solutions are unbounded since for any feasible point $x$ of problem \reff{prob2}, due to \reff{s4f1}, $x+\alphaha z^*$ is feasible to problem \reff{prob2} and $c^T(x+\alphaha z^*)=c^Tx$ for all $\alphaha\ge 0$. \eop
In what follows, we prove the convergence of \refal{alg1} to a KKT point.
\begin{lem}\lambdabel{lem42} If $\rho_k$ is bounded away from zero, then $\{x_k\}$ is bounded, and every cluster point of $\{(y_k,x_k)\}$ is a KKT pair of problem \reff{prob1}. \end{lem}\noindent {\sl Proof.} \rm
Suppose that $\rho_{k}\ge\rho^*>0$ for all $k\ge 0$ and for some scalar $\rho^*$, then by Step 3 of \refal{alg1}, $\frac{1}{\|x_k\|_{\infty}}\ge\rho^*$. Thus, $\|x_k\|_{\infty}\le\frac{1}{\rho^*}$.
Since $\|x_k\|$ is bounded, $\lim_{k\to\infty}E_{k}^{dual}=0$. Thus, $E_{k}^{dual}\le\rho^*$ for all $k$ sufficiently large. Together with the facts $\mu_k\to 0$ and $$E_k^{primal}=\|\rho_kAx_{k}-\rho_k b\|_{(AA^T)^{-1}}\le\mu_k,$$
one has the result immediately. \eop
In summary, we have the following global convergence results on \refal{alg1}.
\begin{thm} One of following three cases will arise when implementing \refal{alg1}.
(1) The Step 1 does not terminate finitely for some $k\ge 0$, $\hphantom{a}t\rho_{\ell}\to 0$, either problem \reff{prob1} is unbounded, or problem \reff{prob1} is infeasible and problem \reff{prob2} is unbounded, and a point for minimizing the $\ell_2$ norm of constraint violations is found.
(2) The Step 1 terminate finitely for all $k\ge 0$, $\mu_k\to 0$ and $\rho_k\to 0$ as $k\to\infty$, problem \reff{prob2} is unbounded, problem \reff{prob1} is feasible and every cluster point of $\{y_k\}$ is a Fritz-John point of problem \reff{prob1}.
(3) The Step 1 terminate finitely for all $k\ge 0$, $\mu_k\to 0$ as $k\to\infty$, and $\rho_k$ is bounded away from zero, both problems \reff{prob1} and \reff{prob2} are feasible and every cluster point of $\{y_k\}$ is a KKT point of problem \reff{prob1}.
\end{thm}\noindent {\sl Proof.} \rm The results can be obtained straightforward from the preceding results Lemmas \ref{le41n}, \ref{le42n}, \ref{lem41}, and \ref{lem42} in this section. \eop
For reader's convenient, we summarize our global convergence results in Table \ref{tb1}.
\renewcommand1.0{2}
\begin{equation}gin{table}[ht!b]
\centering
\caption{The overview on the global convergence results of \refal{alg1}.}\lambdabel{tb1}
\begin{equation}gin{tabular}{|c|c|c|>{\centering\arraybackslash}p{0.35\textwidth}|}
\hline
\multicolumn{1}{|c|}{\multirow{2}{*}{\refal{alg1}}} &\multicolumn{3}{c|}{Results}\\
\cline{2-4}
&\multicolumn{1}{c|}{Dual LP \reff{prob1}} &\multicolumn{1}{c|}{Primal LP \reff{prob2}} &\multicolumn{1}{c|}{Solution obtained}
\\
\hline
\multirow{2}{*}{$\hphantom{a}t\rho_{\ell}\to 0$, $\mu_k> 0$}
& unbounded & - &- \\
\cline{2-4}
& infeasible & unbounded &\multirow{1}{0.35\textwidth}{A point for minimizing
constraint violations of LP \reff{prob1}}\\
\hline
$\mu_k\to 0$, $\rho_k\to 0$
& feasible
& unbounded
& A Fritz-John point of LP \reff{prob1}\\
\hline
$\mu_k\to 0$, $\rho_k>0$
& feasible
& feasible
& A KKT point\\
\hline
\end{tabular}
\end{table}
\sect{Convergence rates and the complexity}
In this section, we concern about the convergence rate of \refal{alg1} under the situation that both problems \reff{prob1} and \reff{prob2} are feasible, which corresponds to the result (3) of the preceding global convergence theorem. Firstly, without any additional assumption, based on theory on convex optimization \cite{Nest18}, we prove that for given penalty parameter $\rho_k$, the convergence rate of the sequence of objective function values on the SBAL minimization subproblem is ${\lambdarge O}(\frac{1}{\ell})$, where $\ell>0$ is a positive integer which is also the number of iterations of the Step 1. Secondly, under the regular conditions on the solution, we show that the iterative sequence $\{{\hphantom{a}t y}_{\ell}\}$ on the SBAL minimization subproblem is globally linearly convergent. Finally, without loss of generality, by assuming that $\rho_k$ is small enough such that in Step 1, $E^{dual}_{k+1}\le\max\{\rho_k,\mu_k\}$ for given $\rho_k$, and using the preceding global linear convergence result, we can establish the iteration complexity of our algorithm.
\begin{thm} For given $x_k$ and parameters $\mu_k$ and $\rho_k$, let $F_k(y)=L_B(y,x_k;\mu_k,\rho_k)$, $\{\hphantom{a}t y_{\ell}\}$ be a sequence generated by Step 1 of \refal{alg1} for minimizing $F_k(y)$, and $F_k^*=\inf_y F_k(y)$, $y_k^*=\hbox{\rm argmin}_y F_k(y)$. Then \begin{equation}a
F_k(\hphantom{a}t y_{\ell})-F_k^*\le\frac{1}{2{\ell}}\|\hphantom{a}t y_0-y_k^*\|_{AA^T}^2, \end{equation}a
where $\hphantom{a}t y_0$ is an arbitrary starting point. \end{thm}\noindent {\sl Proof.} \rm It follows from \refl{lem22} that \begin{equation}a
F_k(\hphantom{a}t y_{\ell+1})&\!\!\!\!\le&\!\!\!\! F_k(\hphantom{a}t y_{\ell})-\frac{1}{2}\|Az(\hphantom{a}t y_{\ell},x_k;\mu_k,\rho_k)-\rho_k b\|^2_{(AA^T)^{-1}} \nonumber\\
&\!\!\!\!\le&\!\!\!\! F_k^*+\nabla F_k(\hphantom{a}t y_{\ell})^T(\hphantom{a}t y_{\ell}-y_k^*)-\frac{1}{2}\|Az(\hphantom{a}t y_{\ell},x_k;\mu_k,\rho_k)-\rho_k b\|^2_{(AA^T)^{-1}} \nonumber\\
&\!\!\!\!=&\!\!\!\! F_k^*+\frac{1}{2}(\|\hphantom{a}t y_{\ell}-y_k^*\|_{AA^T}^2-\|\hphantom{a}t y_{\ell}-y_k^*-(AA^T)^{-1}\nabla F_k(\hphantom{a}t y_{\ell})\|_{AA^T}^2) \\
&\!\!\!\!=&\!\!\!\! F_k^*+\frac{1}{2}(\|\hphantom{a}t y_{\ell}-y_k^*\|_{AA^T}^2-\|\hphantom{a}t y_{\ell+1}-y_k^*\|_{AA^T}^2), \nonumber\end{equation}a
where the second inequality follows from the convexity of $F_k(y)$, and the last equality is obtained by \reff{yiter}.
Thus, \begin{equation}a
\sum_{t=1}^{\ell}(F_k(\hphantom{a}t y_{t})-F_k^*)&\!\!\!\!\le&\!\!\!\!\sum_{t=1}^{\ell}\frac{1}{2}(\|\hphantom{a}t y_{t-1}-y_k^*\|_{AA^T}^2-\|\hphantom{a}t y_{t}-y_k^*\|_{AA^T}^2) \nonumber\\
&\!\!\!\!=&\!\!\!\!\frac{1}{2}(\|\hphantom{a}t y_{0}-y_k^*\|_{AA^T}^2-\|\hphantom{a}t y_{\ell}-y_k^*\|_{AA^T}^2), \nonumber \end{equation}a
which implies $F_k(\hphantom{a}t y_{\ell})-F_k^*\le\frac{1}{2\ell}\|\hphantom{a}t y_{0}-y_k^*\|_{AA^T}^2$.
\eop
In order to derive the convergence rate of the iterative sequence $\{\hphantom{a}t y_{\ell}\}$ of our method for the subproblem, we need to prove some lemmas.
\begin{lem}\lambdabel{lem0614a} For given $x_k$ and parameters $\mu_k$ and $\rho_k$, let $F_k(y)=L_B(y,x_k;\mu_k,\rho_k)$.
Then for any $u,v\in\Re^m$, \begin{equation}a (\nabla F_k(u)-\nabla F_k(v))^T(u-v)\ge\|\nabla F_k(u)-\nabla F_k(v)\|_{(AA^T)^{-1}}^2. \lambdabel{lem52f1}
\end{equation}a\end{lem}\noindent {\sl Proof.} \rm
For proving \reff{lem52f1}, we consider the auxiliary function \begin{equation}a G_u(v)=F_k(v)-\nabla F_k(u)^Tv, \nonumber\end{equation}a
where $v$ is the variable and $u$ is any given vector. Then $\nabla G_u(u)=0$ and $\nabla^2 G_u(v)=\nabla^2 F_k(v)$, which means that $G_u(v)$ is convex as $F_k(v)$ and $u$ is precisely a global minimizer of $G_u(v)$. Therefore, we have a similar result to \refl{lem21} (1), that is, for every $w,v\in\Re^m$, \begin{equation}a
G_u(w)\le G_u(v)+\nabla G_u(v)^T(w-v)+\frac{1}{2}(w-v)^TAA^T(w-v), \nonumber \end{equation}a
which implies $G_u(v)-G_u(u)\ge\frac{1}{2}\|\nabla G_u(v)\|_{(AA^T)^{-1}}^2$ for every $v\in\Re^m$. Because of $\nabla G_u(v)=\nabla F_k(v)-\nabla F_k(u)$, the preceding inequality is equivalent to \begin{equation}a
F_k(v)-F_k(u)-\nabla F_k(u)^T(v-u)\ge\frac{1}{2}\|\nabla F_k(v)-\nabla F_k(u)\|_{(AA^T)^{-1}}^2. \lambdabel{lem52f2}\end{equation}a
Similarly, one can prove \begin{equation}a
F_k(u)-F_k(v)-\nabla F_k(v)^T(u-v)\ge\frac{1}{2}\|\nabla F_k(v)-\nabla F_k(u)\|_{(AA^T)^{-1}}^2. \lambdabel{lem52f3}\end{equation}a
Summarizing two sides of \reff{lem52f2} and \reff{lem52f3} brings about our desired result. \eop
In the subsequent analysis, let $y^*$ be the solution of problem \reff{prob1} and $x^*$ be the associated Lagrange multiplier vector, and $s^*=c-A^Ty^*$. Thus, $x^*\circ s^*=0$. We need the following blanket assumption.
\begin{array}s\lambdabel{ass2} Denote ${\cal I}=\{i=1,\ldots,n| x_i^*>0\}$. Suppose that the strict complementarity holds, and the columns of $A$ corresponding to the positive components of $x^*$ are linearly independent. That is, $x^*+s^*>0$ and $|{\cal I}|=m$, $B=A_{{\cal I}}A_{{\cal I}}^T$ is positive definite, where $|\cdot|$ is the cardinality of the set, $A_{{\cal I}}$ is a submatrix of $A$ consisting of $A_i,~i\in{\cal I}$. \end{array}s
Under the \refa{ass2}, there exists a scalar $\deltalta>0$ such that, for $i\in{\cal I}$ and for all $\ell\ge 0$, $(s_{\ell i}+z_{\ell i})^{-1}z_{\ell i}\ge\deltalta>0$. Thus, for any $y\in\Re^m$, \begin{equation}a
y^TA(S+Z)^{-1}ZA^Ty&\!\!\!\!&\!\!\!\!\ge y^T(A_{{\cal I}}(S_{{\cal I}}+Z_{{\cal I}})^{-1}Z_{{\cal I}}A_{{\cal I}}^T)y \nonumber\\
&\!\!\!\!&\!\!\!\!\ge\deltalta y^T(A_{{\cal I}}A_{{\cal I}}^T)y\ge\deltalta^{'}y^Ty\ge\deltalta^{''}y^TAA^Ty, \nonumber \end{equation}a
where $\deltalta^{'}\le\deltalta\lambdambda_{\min}(AA^T)$ and $\deltalta^{''}\le\frac{\deltalta^{'}}{\lambdambda_{\max}(AA^T)}<1$.
\begin{lem} For given $x_k$ and parameters $\mu_k$ and $\rho_k$, let $F_k(y)=L_B(y,x_k;\mu_k,\rho_k)$. Under the \refa{ass2}, there exists a scalar $\deltalta^{''}\in (0,1)$ such that, for any $u, v\in\Re^m$, \begin{equation}a
&\!\!\!\!&\!\!\!\! (\nabla F_k(u)-\nabla F_k(v))^T(u-v) \nonumber\\
&\!\!\!\!&\!\!\!\!\ge\frac{1}{1+\deltalta^{''}}\|\nabla F_k(u)-\nabla F_k(v)\|_{(AA^T)^{-1}}^2+\frac{\deltalta^{''}}{1+\deltalta^{''}}\|u-v\|_{AA^T}^2. \end{equation}a
\end{lem}\noindent {\sl Proof.} \rm Let $G_k(y)=F_k(y)-\frac{1}{2}\deltalta^{''} y^TAA^Ty$. Then $G_k(y)$ and $\frac{1}{2} (1-\deltalta^{''})y^TAA^Ty-G_k(y)$ are convex, which suggests that $G_k(y)$ shares the similar properties with $F_k(y)$. Thus, the result of \refl{lem0614a} still holds for $G_k(y)$, i.e.,
for any $u,v\in\Re^m$, \begin{equation}a (\nabla G_k(u)-\nabla G_k(v))^T(u-v)\ge\|\nabla G_k(u)-\nabla G_k(v)\|_{(AA^T)^{-1}}^2. \nonumber\end{equation}a
Due to $\nabla G_k(y)=\nabla F_k(y)-\deltalta^{''}AA^Ty$, the preceding inequality can be rewritten as \begin{equation}a
&\!\!\!\!&\!\!\!\! (\nabla F_k(u)-\nabla F_k(v))^T(u-v) \nonumber\\
&\!\!\!\!&\!\!\!\! \ge\frac{1}{1-\deltalta^{''}}\|\nabla F_k(u)-\nabla F_k(v)-\deltalta^{''}AA^T(u-v)\|_{(AA^T)^{-1}}^2+\deltalta^{''}\|u-v\|_{AA^T}^2. \nonumber\end{equation}a
Thus, one has \begin{equation}a &\!\!\!\!&\!\!\!\! (\nabla F_k(u)-\nabla F_k(v))^T(u-v) \nonumber\\
&\!\!\!\!&\!\!\!\! \ge\frac{1}{1+\deltalta^{''}}\|\nabla F_k(u)-\nabla F_k(v)\|_{(AA^T)^{-1}}^2+\frac{\deltalta^{''}}{1+\deltalta^{''}}\|u-v\|_{AA^T}^2, \nonumber\end{equation}a
which completes our proof. \eop
Set $u=\hphantom{a}t y_{\ell}$ and $v=y_k^*$. Due to $\nabla F_k(y^*)=0$, \begin{equation}a\nabla F_k(\hphantom{a}t y_{\ell})^T(\hphantom{a}t y_{\ell}-y_k^*)\ge\frac{1}{1+\deltalta^{''}}\|\nabla F_k(\hphantom{a}t y_{\ell})\|_{(AA^T)^{-1}}^2+\frac{\deltalta^{''}}{1+\deltalta^{''}}\|\hphantom{a}t y_{\ell}-y_k^*\|_{AA^T}^2. \nonumber\end{equation}a
The next result shows that sequence $\{\hphantom{a}t y_{\ell}\}$ can be of global linear convergence for the SBAL minimization subproblem.
\begin{thm}\lambdabel{th8n} Let $y_k^*=\hbox{\rm argmin} F_k(y)$. Under \refa{ass2}, there is a scalar $\tauu\in (0,1)$ such that \begin{equation}a
\|\hphantom{a}t y_{\ell}-y_k^*\|_{AA^T}^2\le\tauu^{\ell}\|\hphantom{a}t y_{0}-y_k^*\|_{AA^T}^2. \nonumber\end{equation}a
That is, $\{\hphantom{a}t y_{\ell}\}$ is of global linear convergence to $y_k^*$.
\end{thm}\noindent {\sl Proof.} \rm Note that
\begin{equation}a &\!\!\!\!&\!\!\!\!\|\hphantom{a}t y_{\ell+1}-y_k^*\|_{AA^T}^2 \nonumber\\
&\!\!\!\!&\!\!\!\!=\|\hphantom{a}t y_{\ell}-(AA^T)^{-1}\nabla F_k(\hphantom{a}t y_{\ell})-y_k^*\|_{AA^T}^2 \nonumber\\
&\!\!\!\!&\!\!\!\!=\|\hphantom{a}t y_{\ell}-y_k^*\|_{AA^T}^2-{2}\nabla F_k(\hphantom{a}t y_{\ell})^T(\hphantom{a}t y_{\ell}-y_k^*)+\|\nabla F_k(\hphantom{a}t y_{\ell})\|_{(AA^T)^{-1}}^2 \nonumber\\
&\!\!\!\!&\!\!\!\!\le(1-\frac{2\deltalta^{''}}{1+\deltalta^{''}})\|\hphantom{a}t y_{\ell}-y_k^*\|_{AA^T}^2+(1-\frac{2}{1+\deltalta^{''}})\|\nabla F_k(\hphantom{a}t y_{\ell})\|_{(AA^T)^{-1}}^2 \nonumber\\
&\!\!\!\!&\!\!\!\!=\frac{1-\deltalta^{''}}{1+\deltalta{''}}\|\hphantom{a}t y_{\ell}-y_k^*\|_{AA^T}^2-\frac{1-\deltalta^{''}}{1+\deltalta^{''}}\|\nabla F_k(\hphantom{a}t y_{\ell})\|_{(AA^T)^{-1}}^2 \nonumber\\
&\!\!\!\!&\!\!\!\!\le\frac{1-\deltalta^{''}}{1+\deltalta{''}}\|\hphantom{a}t y_{\ell}-y_k^*\|_{AA^T}^2. \nonumber\end{equation}a
By setting $\tauu=\frac{1-\deltalta^{''}}{1+\deltalta{''}}$, the result follows immediately. \eop
Finally, based on the preceding global linear convergence result, we can obtain a new iteration complexity result on the algorithms for linear programs.
\begin{thm} Suppose that both problems \reff{prob1} and \reff{prob2} are feasible, and \refa{ass2} holds. For $\rho_0$ sufficiently small, if \refal{alg1} is terminated when $\mu_k<\epsilonsilon$, where $\epsilonsilon>0$ is a pre-given tolerance, then the iteration complexities of the MM methods for the subproblem and for problem \reff{prob1} are respectively \begin{equation}a
T_{\rm MM}=O\left(\frac{1}{\ln\sqrt{\frac{\kappa_A+1}{\kappa_A-1}}}\ln \left(\frac{1}{\epsilonsilon}\right)\right),\quad T_{\rm PDMM}=O\left(\frac{1}{\ln\sqrt{\frac{\kappa_A+1}{\kappa_A-1}}}\left(\ln \left(\frac{1}{\epsilonsilon}\right)\right)^2\right).
\end{equation}a
\end{thm}\noindent {\sl Proof.} \rm Due to \refl{lemzp} (2), one has \begin{equation}a &\!\!\!\!&\!\!\!\!\|Az(\hphantom{a}t y_{\ell+1},x_k;\mu_k,\rho_0)-\rho_0 b\| \nonumber\\
&\!\!\!\!&\!\!\!\!=\|\nabla_y L(\hphantom{a}t y_{\ell+1},x_k;\mu_k,\rho_0)-\nabla_y L(y_{k}^*,x_k;\mu_k,\rho_0)\| \nonumber\\
&\!\!\!\!&\!\!\!\!\le\|\hphantom{a}t y_{\ell+1}-y_k^*\|_{AA^T}. \nonumber \end{equation}a
In order to obtain $\|Az(\hphantom{a}t y_{\ell+1},x_k;\mu_k,\rho_0)-\rho_0 b\|\le\mu_k\le\epsilonsilon$, by \reft{th8n}, $T_{\rm MM}$ should satisfy \begin{equation}a
\sqrt{\tauu}^{T_{\rm MM}}\|y_k-y_k^*\|_{AA^T}\le\epsilonsilon, \nonumber\end{equation}a
where $y_k=\hphantom{a}t y_0$, $\tauu$ is denoted in \reft{th8n} and can be replaced by $\tauu=\frac{\kappa_A-1}{\kappa_A+1}$ ($\kappa_A=\lambdambda_{\max}(AA^T)/\lambdambda_{\min}(AA^T)$).
Thus, \begin{equation}a T_{\rm MM}\ln\frac{1}{\sqrt{\tauu}}\ge\ln\frac{\|y_k-y_k^*\|_{AA^T}}{\epsilonsilon}. \nonumber\end{equation}a
That is, \begin{equation}a T_{\rm MM}=O\left(\frac{1}{\ln\sqrt{\frac{\kappa_A+1}{\kappa_A-1}}}\ln\left(\frac{1}{\epsilonsilon}\right)\right). \nonumber\end{equation}a
In addition, similarly, the number of iterations needed for driving $\mu_k<\epsilonsilon$ is \begin{equation}a
T_{\rm out}\ge\frac{1}{\ln\frac{1}{\gammamma}}\ln\frac{\mu_0}{\epsilonsilon}. \nonumber\end{equation}a Thus, we have the estimate on the total number of iterations \begin{equation}a T_{\rm PDMM}=\sum_{k=1}^{T_{\rm out}}T_{\rm MM}=T_{\rm out}T_{\rm MM}=O\left(\frac{1}{\ln\sqrt{\frac{\kappa_A+1}{\kappa_A-1}}}\left(\ln \left(\frac{1}{\epsilonsilon}\right)\right)^2\right), \end{equation}a
which completes our proof. \eop
\sect{Conclusion}
The simplex methods and the interior-point methods are two kinds of main and effective methods for solving linear programs. Relatively, the former is more inexpensive for every iteration but may require more iterations to find the solution, while the latter is more expensive for one iteration but the number of iterations may not be changed greatly with different problems. Theoretically, the iteration complexity of the simplex methods can be exponential on the sizes of linear programs, while the interior-point methods can be polynomial.
In this paper, we present a primal-dual majorization-minimization method for linear programs. The method is originated from the application of the Hestenes-Powell augmented Lagrangian method to the logarithmic-barrier problems. A novel barrier augmented Lagrangian (SBAL) function with second-order smoothness and strict convexity is proposed. Based the SBAL function, a majorization-minimization approach is introduced to solve the augmented Lagrangian subproblems. Distinct from the existing simplex methods and interior-point methods for linear programs, but similar to some alternate direction methods of multipliers (ADMM), the proposed method only depends on a factorization of the constant matrix independent of iterations which can be done in the preprocessing, and does not need any computation on step sizes, thus is much more inexpensive for iterations and can be expected to be particularly appropriate for large-scale linear programs. The global convergence is analyzed without prior assuming either primal or dual problem to be feasible. Under the regular conditions, based on theory on convex optimization, we prove that our method can be of globally linear convergence. The results show that the iteration complexity on our method is dependent on the conditioned number of the product matrix of the coefficient matrix and its transpose.
\
\begin{equation}gin{thebibliography}{999}
\small
\bibitem{AIG06}
\sc M. Allain, J. Idier, and Y. Goussard, \it On global and local convergence of half-quadratic algorithms, \rm
IEEE Trans. Im. Proc., 15(2006), 1130--1142.
\bibitem{BYZ00}
\sc S.J. Benson, Y. Ye, and X. Zhang, \it Solving large-scale sparse semidefinite programs for combinatorial
optimization, \rm SIAM J. Optim. 10(2000), 443--461.
\bibitem{BM14}
\sc E.G. Birgin and J.M. Martinez, \it Practical Augmented Lagrangian Methods for Constrained
Optimization, \rm Book Series: Fundamentals of Algorithms, SIAM, PA 19104-2688 USA, 2014.
\bibitem{CDZ15}
\sc N. Chatzipanagiotis, D. Dentcheva and M.M. Zavlanos, \it An augmented Lagrangian method for distributed optimization, \rm Math. Program., 152(2015), 405--434.
\bibitem{cohen96}
\sc L.D. Cohen, \it Auxiliary variables and two-step iterative algorithms in computer vision problems, \rm J.
Math. Im. Vision, 6(1996), 59--83.
\bibitem{CMTH19}
\sc Y. Cui, K. Morikuni, T. Tsuchiya, and K. Hayami, \it Implementation of interior-point methods for LP based on Krylov subspace iterative solvers with inner-iteration preconditioning, \rm Comput. Optim. Appl. 74(2019), 143--176.
\bibitem{DLS17}
\sc Y.-H. Dai, X.-W. Liu and J. Sun, \it A primal-dual interior-point method capable of rapidly
detecting infeasibility for nonlinear programs, \rm J. Ind. Manag. Optim., 2018, doi: 10.3934/jimo.2018190.
\bibitem{Davis06}
\sc T.A. Davis, \it Direct Methods for Sparse Linear Systems, SIAM Fundamentals of Algorithms, \rm
SIAM, Philadephia, 2006.
\bibitem{FBN06}
\sc M. Figueiredo, J. Bioucas-Dias and R. Nowak, \it Majorization-minimization algorithms for wavelet-based image restoration, \rm IEEE Trans. Image. Process., 16(2006), 2980--2991.
\bibitem{FGZ14}
\sc K. Fountoulakis, J. Gondzio, and P. Zhlobich, \it Matrix-free interior point method for compressed
sensing problems, \rm Math. Program. Comput. 6(2014), 1--31.
\bibitem{GKR20}
\sc P.E. Gill, V. Kungurtsev and D.P. Robinson, \it A shifted primal-dual penalty-barrier method for
nonlinear optimization, \rm SIAM J. Optim., 30 (2020), 1067--1093.
\bibitem{GR12}
\sc P.E. Gill and D.P. Robinson, \it A primal-dual augmented Lagrangian, \rm Comput. Optim. Appl., 51 (2012), 1--25.
\bibitem{GP99}
\sc D. Goldfarb, R. A. Polyak, K. Scheinberg and I. Yuzefovich, \it A modified barrier-augmented
Lagrangian method for constrained minimization, \rm Comput. Optim. Appl., 14 (1999), 55--74.
\bibitem{Gon12a}
\sc J. Gondzio, \it Interior point methods 25 years later, \rm Eur. J. Oper. Res. 218(2012), 587--601.
\bibitem{Gon12b}
\sc J. Gondzio, \it Matrix-free interior point method, \rm Comput.Optim.Appl. 51(2012), 457--480.
\bibitem{hesten}
\sc M.R. Hestenes, \it Multiplier and gradient methods. \rm J. Optim. Theory Appl. 4(1969), 303--320.
\bibitem{HL17}
\sc M.Y. Hong and Z.Q. Luo, \it On the linear convergence of the alternating direction method of multipliers, \rm Math. Program., 162 (2017), 165--199.
\bibitem{HFD16}
\sc B. Houska, J. Frasch and M. Diehl, \it An augmented Lagrangian based algorithm for distributed nonconvex optimization, \rm SIAM J. Optim., 26(2016), 1101--1127.
\bibitem{karmar}
\sc N. Karmarkar, \it A new polynomial-time algorithm for linear programming, \rm
Combinatorics, 4 (1984), 373--395.
\bibitem{KleeM72}
\sc V. Klee and G.J. Minty, \it How good is the simplex algorithm?\rm in Inequalities, O. Shisha, ed.,
Academic Press, New York, 1972, 159--175.
\bibitem{Lange}
\sc K. Lange, \it MM Optimization Algorithms, \rm SIAM, 2016.
\bibitem{LMYZ}
\sc T. Lin, S. Ma, Y. Ye and S. Zhang, \it An ADMM-based
interior-point method for large-scale linear programming, \rm Optim. Methods Soft.,
36 (2021), 389--424.
\bibitem{LiuDai18}
\sc X.-W. Liu and Y.-H. Dai, \it A globally convergent primal-dual interior-point relaxation
method for nonlinear programs, \rm Math. Comput., 89 (2020), 1301--1329.
\bibitem{LDHmm}
\sc X.-W. Liu, Y.-H. Dai and Y.-K. Huang, \it A primal-dual interior-point relaxation method with
global and rapidly local convergence for nonlinear programs, \rm Optimization Online, April 2020. To appear in Math. Meth. Oper. Res.
\bibitem{LDHmc}
\sc X.-W. Liu, Y.-H. Dai, Y.-K. Huang and J. Sun, \it A novel augmented Lagrangian
method of multipliers for optimization with general inequality constraints, \rm arXiv:2106.15044 [math.OC], June 2021. To appear in Math. Comput.
\bibitem{LiuSun01}
\sc X.-W. Liu and J. Sun, \it A robust primal-dual interior point
algorithm for nonlinear programs, \rm SIAM J. Optim., {14} (2004),
1163--1186.
\bibitem{LiuYua07}
\sc X.-W. Liu and Y.-X. Yuan, \it A null-space primal-dual
interior-point algorithm for nonlinear optimization with nice
convergence properties, \rm Math. Program., 125(2010), 163--193.
\bibitem{MK08}
\sc G.J. McLachlan and T. Krishnan, \it The EM Algorithm and Extensions, \rm 2nd ed. Wiley, Hoboken, NJ, 2008.
\bibitem{Nest18}
\sc Y. Nesterov, \it Lectures on Convex Optimization, \rm 2nd ed., Berlin: Springer, 2018.
\bibitem{NocWri99}
\sc J. Nocedal and S. Wright, \it Numerical Optimization, \rm Springer-Verlag, New York, 1999.
\bibitem{OR70}
\sc J.M. Ortega and W.C. Rheinboldt, \it Iterative Solution of Nonlinear Equations in Several Variables, \rm
Academic Press, New York, 1970.
\bibitem{powell}
\sc M.J.D. Powell, \it A method for nonlinear constraints in minimization problems. In Fletcher, R. (ed.)
Optimization, \rm pp. 283--298. Academic, New York (1972).
\bibitem{QBP15}
\sc T. Qiu, P. Babu and D.P. Palomar, \it PRIME: phase retrieval via majorization-minimization, \rm
arXiv preprint:1511.01669, 2015.
\bibitem{rockaf1}
\sc R.T. Rockafellar, \it A dual approach to solving nonlinear programming problems by unconstrained
optimization, \rm Math. Program. 5 (1973), 354--373.
\bibitem{SunYua06}
\sc W. Sun and Y. Yuan, \it Optimization Theory and Methods: Nonlinear Programming, \rm Springer, New York, 2006.
\bibitem{wright97}
\sc S.J. Wright, \it Primal-Dual Interior-Point Methods, \rm SIAM Publications, Philadelphia, Pa, 1997.
\bibitem{ye}
\sc Y. Ye, \it Interior Point Algorithms: Theory and Analysis, \rm John Wiley \& Sons, Inc., 1997.
\bibitem{WZW}
\sc Z. Wen, D. Goldfarb and W. Yin, \it Alternating direction augmented Lagrangian methods
for semidefinite programming, \rm Math. Prog. Comp., 2 (2010), 203--230.
\end{thebibliography}
\end{document} |
\begin{document}
\title{Revisiting Over-smoothing in BERT from the Perspective of Graph}
\begin{abstract}
Recently over-smoothing phenomenon of Transformer-based models is observed in both vision and language fields.
However, no existing work has delved deeper to further investigate the main cause
of this phenomenon.
In this work, we make the attempt to analyze the over-smoothing problem from the perspective of graph, where such problem was first discovered and explored. Intuitively,
the self-attention matrix can be seen as a normalized adjacent matrix of a corresponding graph. Based on the above connection, we provide some theoretical analysis and find that layer normalization plays a key role in the over-smoothing issue of Transformer-based models. Specifically, if the standard deviation of layer normalization is sufficiently large, the output of Transformer stacks will converge to a specific low-rank subspace and result in over-smoothing.
To alleviate the over-smoothing problem, we consider hierarchical fusion strategies, which combine the representations from different layers adaptively to make the output more diverse. Extensive experiment results on various data sets illustrate the effect of our fusion method.
\end{abstract}
\section{Introduction}
Over the past few years, Transformer \citep{vaswani2017attention} has been widely used
in various natural language processing (NLP) tasks,
including
text classification \citep{wang2018glue}, text translation \citep{ott2018scaling},
question answering \citep{rajpurkar2016squad,rajpurkar2018know} and text generation \citep{brown2020language}. The recent application of Transformer in computer vision (CV) field also demonstrate the potential capacity of Transformer architecture. For instance, Transformer variants have been successfully used for image classification \citep{dosovitskiy2020image}, object detection \citep{carion2020end} and semantic segmentation \citep{strudel2021segmenter}.
Three fundamental descendants from Transformer include
BERT \citep{devlin2019bert}, RoBERTa \citep{liu2019roberta} and ALBERT \citep{lan2019albert},
which
achieve state-of-the-art performance
on a wide range of NLP tasks.
Recently, \cite{dong2021attention} observes the ``token uniformity'' problem,
which reduces the capacity of
Transformer-based architectures
by making all token representations identical.
They claim that pure self-attention (SAN) modules cause token uniformity,
but they do not discuss whether the token uniformity problem still exists in Transformer blocks.
On the other hand,
\cite{gong2021improve} observe
the ``over-smoothing'' problem
for ViT \citep{dosovitskiy2020image},
in
that different input patches are mapped to a similar latent representation.
To prevent loss of information, they introduce additional loss functions to encourage diversity and successfully improve model performance by suppressing over-smoothing.
Moreover, ``overthinking'' phenomenon, indicating that shallow representations are better than deep representations, also be observed in \citep{zhou2020bert,kaya2019shallow}. As discussed in Section~{\textnormal{e}}f{sec:exist}, this phenomenon has some inherent connection with over-smoothing.
In this paper, we use ``over-smoothing'' to unify the above issues, and refer
this as the phenomenon that the model performance is deteriorated because different inputs are mapped to a similar representation.
As the over-smoothing problem is first studied in the graph neural network (GNN) literature
\citep{li2018deeper,xu2018representation,zhao2019pairnorm}, in this paper, we attempt to explore the cause of such problem by building
a relationship between Transformer blocks and graphs. Specifically, we consider
the self-attention matrix as the normalized adjacency matrix of a weighted graph, whose nodes are the tokens in a sentence.
Furthermore, we consider the inherent connection between BERT and graph convolutional networks \citep{kipf2017semi}. Inspired by the over-smoothing problem in GNN,
we study over-smoothing in BERT from a theoretical view via matrix projection.
As opposed to
\cite{dong2021attention}, where the authors claim that layer normalization is irrelevant to over-smoothing, we
find that layer normalization \citep{ba2016layer} plays an important role in
over-smoothing. Specifically, we theoretically prove that, if the standard deviation in layer normalization is sufficiently
large, the outputs of the Transformer stacks will converge to a low-rank subspace,
resulting in over-smoothing. Empirically, we verify that the conditions hold for a certain number of samples for a pre-trained and fine-tuned BERT model \citep{devlin2019bert}, which is consistent with our above observations.
To alleviate the over-smoothing problem, we propose a hierarchical fusion strategy
that adaptively fuses representations from different layers.
Three fusion approaches are used: ({\em i})~ Concat Fusion, ({\em i})~i Max Fusion, and ({\em i})~ii Gate Fusion.
The proposed method reduces the similarity between tokens and outperforms BERT baseline on the GLUE \citep{wang2018glue}, SWAG \citep{zellers2018swag} and SQuAD \citep{rajpurkar2016squad,rajpurkar2018know} data sets.
In summary, the contributions of this paper are as follows: ({\em i})~ We develop the
relationship between self-attention and graph for a better understanding of
over-smoothing in BERT. ({\em i})~i We provide theoretical analysis on over-smoothing in
the BERT model, and empirically verify the theoretical results. ({\em i})~ii We propose
hierarchical fusion strategies that adaptively combine different layers to alleviate
over-smoothing. Extensive experimental results verify our methods' effectiveness.
\section{Related Work}
\subsection{Transformer Block and Self-Attention \label{sec:trans_block}}
Transformer block is a basic component in Transformer model \citep{vaswani2017attention}.
Each Transformer block consists of a self-attention layer and a feed-forward layer.
Let $\boldsymbol{X}\in\mathbb{R}^{n\times d}$ be the input to a Transformer block, where $n$ is the number of input tokens and $d$ is the embedding size.
The self-attention
layer output
can be written
as:
\begin{align}
Attn(\boldsymbol{X}) &= \boldsymbol{X}+\sum_{k=1}^{h}\sigma(\boldsymbol{X}\boldsymbol{W}_{k}^{Q}(\boldsymbol{X}\boldsymbol{W}_{k}^{K})^{\top} )\boldsymbol{XW}_{k}^{V}\boldsymbol{W}_{k}^{O\top}
=\boldsymbol{X}+\sum_{k=1}^{h}\hat{\boldsymbol{A}}_{k}\boldsymbol{X}\boldsymbol{W}_{k}^{VO}, \label{eq:attn}
\end{align}
where $h$ is the number of heads,
$\sigma$ is
the softmax function, and
$\boldsymbol{W}_{k}^{Q},\boldsymbol{W}_{k}^{K},\boldsymbol{W}_{k}^{V},\boldsymbol{W}_{k}^{O}\in\mathbb{R}^{d\times
d_{h}}$ (where $d_{h}=d/h$ is the dimension of a single-head output) are weight matrices for the query, key, value, and output,
respectively of the $k$th head. In particular,
the self-attention matrix
\begin{equation} \label{eq:matrix}
\hat{\boldsymbol{A}}=\sigma(\boldsymbol{XW}^{Q}(\boldsymbol{XW}^{K})^{\top})=\sigma(\boldsymbol{QK}^{\top})
\end{equation}
in ({\textnormal{e}}f{eq:attn})
plays a key role in the self-attention layer
\citep{park2019sanvis,gong2019efficient,kovaleva2019revealing}.
As in \citep{yun2020n,shi2021sparsebert,dong2021attention}, we drop the scale
product $1/\sqrt{d_h}$ to simplify analysis.
The feed-forward layer usually has
two fully-connected (FC) layers
with residual connection:
\[
FF(\boldsymbol{X})=Attn(\boldsymbol{X})+ReLU(Attn(\boldsymbol{X})\boldsymbol{W}_{1}+\boldsymbol{b}_{1})\boldsymbol{W}_{2}+\boldsymbol{b}_{2},
\]
where
$\boldsymbol{W}_{1}\in\mathbb{R}^{d\times
d_{\text{ff}}},\boldsymbol{W}_{2}\in\mathbb{R}^{d_{\text{ff}}\times d}$ ($d_{\text{ff}}$ is the size of the intermediate layer)
are the weight matrices, and
$\boldsymbol{b}_{1},\boldsymbol{b}_{2}$
are the biases. Two layer normalization \citep{ba2016layer} operations are
performed after the self-attention layer and fully-connected layer, respectively.
\subsection{Over-smoothing}\label{sec:over-smooth}
In graph neural networks, over-smoothing refers to the problem that the performance deteriorates as representations of all the nodes become similar
\citep{li2018deeper,xu2018representation,huang2020tackling}. Its main cause is
the stacked aggregation layer using the same adjacency matrix. Recently, several approaches have been proposed to alleviate the over-smoothing problem.
\cite{xu2018representation} propose a jumping knowledge network for better structure-aware representation, which flexibly leverages different neighborhood ranges.
ResGCN
\citep{li2019deepgcns}
adapts the residual connection and dilated convolution in
the graph
convolutional network (GCN), and successfully scales the GCN to $56$ layers.
\cite{zhao2019pairnorm}
propose
PairNorm,
a novel normalization layer, that prevents node embeddings from becoming too similar.
DropEdge
\citep{rong2019dropedge,huang2020tackling}
randomly removes edges from the input graph at each training epoch,
and reduces the effect of over-smoothing.
Unlike graph neural networks, over-smoothing in Transformer-based architectures has not been discussed in detail.
\cite{dong2021attention}
introduce
the
``token-uniformity''
problem
for self-attention, and show that skip
connections and multi-layer perceptron can mitigate this problem. However,
\cite{gong2021improve} still observe over-smoothing
on the Vision Transformers \citep{dosovitskiy2020image}.
\section{Does Over-smoothing Exist in BERT?} \label{sec:exist}
\begin{figure}
\caption{Over-smoothing in BERT models.}
\label{subfig:similarity}
\label{subfig:overthinking}
\label{subfig:motivation}
\label{fig:1}
\end{figure}
In this section, we first explore the
existence of
over-smoothing
in BERT, by
measuring the similarity between tokens
in each
Transformer layer.
Specifically, we use the token-wise cosine similarity \citep{gong2021improve} as our similarity measure:
\[ \text{CosSim}=\frac{1}{n(n-1)}\sum_{i\neq j}\frac{\boldsymbol{h}_i^\top
\boldsymbol{h}_j}{\Vert \boldsymbol{h}_i\Vert_2\Vert
\boldsymbol{h}_j\Vert_2}, \]
where $n$ is the number of tokens, $\boldsymbol{h}_i$ and $\boldsymbol{h}_j$ are
two representations of different tokens, and $\Vert\cdot\Vert_2$ is the Euclidean norm.
Following \cite{dong2021attention}, we use
WikiBio \citep{lebret2016neural} as input to the following Transformer-based models fine-tuned on the SQuAD data set \citep{rajpurkar2018know}: ({\em i})~ BERT
\citep{devlin2019bert}, ({\em i})~i RoBERTa \citep{liu2019roberta} and ({\em i})~ii ALBERT
\citep{lan2019albert}.\footnote{Our implementation is based on the HuggingFace’s
Transformers library \citep{wolf2020Transformers}.}
For comparison,
all three models are stacked with $12$ blocks.
We calculate each \textit{CosSim} for each data sample and
show
the average and standard derivation of \textit{CosSim} values over all WikiBio data.
In the figures, layer $0$ represents original input token representation, and layer $1$-$12$ represents the corresponding transformer layers.
As shown in Figure~{\textnormal{e}}f{subfig:similarity},
the original token representations
are different from each
other, while token similarities are high in the last layer. For instance, the average token-wise
cosine similarity of the last layer of ALBERT and RoBERTa are both larger than $90\%$.
To illustrate the relationship between ``over-thinking''
and ``over-smoothing'',
we compare the token-wise cosine similarity
at each layer
with the corresponding
error rate.
As for the corresponding
error rate of layer $i$, we use the representations from layer $i$ as the final output and fine-tune the classifier.
Following \cite{zhou2020bert}, we experiment with ALBERT \citep{lan2019albert} fine-tuned on the MRPC
data set \citep{dolan2005automatically} and use their error rate results for convenience. As shown in
Figure~{\textnormal{e}}f{subfig:overthinking},
layer $10$ has the lowest cosine similarity and error rate. At layers $11$ and
$12$,
the tokens
have larger cosine similarities,
making them harder to distinguish and resulting in the performance drop.
Thus, ``over-thinking'' can be explained by ``over-smoothing''.
A direct consequence of over-smoothing is that the performance cannot be improved
when the model gets deeper,
since the individual tokens are no longer distinguishable.
To illustrate this, we increase the number of layers in BERT to 24
while keeping the other settings. As shown in
Figure~{\textnormal{e}}f{subfig:motivation},
the performance of vanilla BERT
cannot improve as
the model gets deeper. In contrast,
the proposed hierarchical fusion
(as will be discussed in Section~{\textnormal{e}}f{sec:method})
consistently
outperforms
the baseline,
and
has better and better performance
as the
model
gets
deeper.
Based on these observations,
we conclude that the
over-smoothing problem still exists in BERT.
\section{Relationship between Self-Attention and Graph} \label{sec:relationship}
Since over-smoothing is first discussed in the graph neural network
literature \citep{li2018deeper,zhao2019pairnorm},
we attempt to understand its cause from a graph perspective
in this section.
\subsection{Self-Attention vs ResGCN} \label{sec:formulation}
Given a Transformer block,
construct a weighted graph $\mathcal{G}$ with
the input tokens
as nodes
and
$\exp(\boldsymbol{Q}_i^\top \boldsymbol{K}_j)$ as the $(i,j)$th entry of its
adjacency matrix $\boldsymbol{A}$.
By
rewriting
the self-attention matrix
$\hat{\boldsymbol{A}}$ in ({\textnormal{e}}f{eq:matrix})
as
$\hat{A}_{i,j}=\sigma(\boldsymbol{QK}^{\top})_{i,j}=\exp(\boldsymbol{Q}_i^\top
\boldsymbol{K}_{j})/\sum_l\exp(\boldsymbol{Q}_i^\top \boldsymbol{K}_{l})$,
$\hat{\boldsymbol{A}}$
can thus be viewed as
$\mathcal{G}$'s
normalized adjacency matrix
\citep{von2007tutorial}. In other words,
$\hat{\boldsymbol{A}}=\boldsymbol{D}^{-1}\boldsymbol{A}$, where
$\boldsymbol{D}=\text{diag}(d_1, d_2, \dots, d_n)$ and $d_i=\sum_j A_{i,j}$.
Figure~{\textnormal{e}}f{fig:relationship} shows an example for
the sentence ``worth the effort to watch." from the SST-2 data set
\citep{socher2013recursive} processed by BERT.
Note that graph convolutional network
combined with residual connections (ResGCN) \citep{kipf2017semi} can be expressed as follows.
\begin{equation}
ResGCN(\boldsymbol{X}) = \boldsymbol{X}+ ReLU(\boldsymbol{D}^{-1/2}\boldsymbol{AD}^{-1/2}\boldsymbol{XW})=\boldsymbol{X}+ ReLU(\hat{\boldsymbol{A}}\boldsymbol{XW}),
\end{equation}
which has the similar form with the self-attention layer in Eq.
({\textnormal{e}}f{eq:attn}).
By comparing self-attention module with ResGCN,
we have the following observations:
(i) Since
$A_{i,j}\neq A_{j,i}$ in general, $\mathcal{G}$ in self-attention is a directed graph;
(ii)
$\hat{\boldsymbol{A}}=\boldsymbol{D}^{-1}\boldsymbol{A}$ in self-attention is the random walk
normalization
\citep{chung1997spectral}, while
GCN usually uses
the symmetric normalization version
$\hat{\boldsymbol{A}}=\boldsymbol{D}^{-1/2}\boldsymbol{AD}^{-1/2}$; (iii) The
attention matrices
constructed
at different Transformer layers
are different, while in
typical graphs,
the adjacency matrices are usually
static.
\begin{figure}
\caption{Illustration of self-attention and the corresponding graph
$\mathcal{G}
\label{subfig:graph}
\label{subfig:A}
\label{subfig:hat_A}
\label{fig:relationship}
\end{figure}
\subsection{Unshared Attention Matrix vs Shared Attention Matrix \label{sec:ashare}}
As discussed in Section~{\textnormal{e}}f{sec:over-smooth},
over-smoothing in graph neural networks is
mainly due to
the repeated aggregation operations using the
same adjacency matrix.
To compare the
self-attention matrices ($\hat{\boldsymbol{A}}$'s) at different Transformer layers,
we first flatten the multi-head attention and then measure the cosine similarity
between $\hat{\boldsymbol{A}}$'s
at successive
layers.
Experiment
is performed
with BERT \citep{devlin2019bert}, RoBERTa
\citep{liu2019roberta} and ALBERT \citep{lan2019albert} on the WikiBio data
set
\citep{lebret2016neural}.
Figure~{\textnormal{e}}f{fig:sim1}
shows
the cosine similarities obtained. As can be seen,
the similarities
at the last few
layers are high,\footnote{For example,
in BERT,
the attention matrices
$\hat{\boldsymbol{A}}$'s for the last $8$ layers are very similar.
}
while those at the first few layers are different from each other.
In other words,
the attention patterns at the first few layers are changing, and become stable at
the upper layers.
\begin{wrapfigure}{r}{7cm}
\includegraphics[width=0.48\columnwidth]{figures/attn_sim.png}
\caption{Consine similarity between the attention matrices
$\hat{\boldsymbol{A}}$'s
at layer $i$ and its next higher
layer.\label{fig:sim1}}
\end{wrapfigure}
In the following, we
focus on BERT and
explore how many layers can
share the same
self-attention matrix.
Note that this is different from
ALBERT,
which shares model parameters
instead of attention matrices.
Results are shown in
Table~{\textnormal{e}}f{tab:ashare}.
As can be seen, sharing attention matrices among the last $8$ layers (i.e.,
layers 5-12) does not harm
model performance. This is consistent with the observation in
Figure~{\textnormal{e}}f{fig:sim1}.
Note that sharing attention matrices not only reduces
the number of parameters
in the self-attention module, but also
makes the model more efficient by
reducing the
computations
during training and inference.
As shown in Table~{\textnormal{e}}f{tab:ashare}, BERT (5-12) reduces
$44.4\%$ FLOPs in
the self-attention modules compared with the vanilla BERT, while still achieving
comparable
average GLUE scores.
\begin{table}[t]
\caption{Performance
(\%)
on the GLUE development set
by the original BERT (top row) and various BERT variants with different degrees of
self-attention matrix
sharing.
Numbers in parentheses are the
layers
that share
the self-attention matrix
(e.g., BERT (1-12) means that
the $\hat{\boldsymbol{A}}$'s from
layers
1-12
are shared).
The last column shows
the FLOPs in the self-attention modules. \label{tab:ashare}}
\centering
{\textnormal{e}}sizebox{0.99\textwidth}{!}{
\begin{tabular}{lcccccccccc}
\hline
& MNLI (m/mm) & QQP & QNLI & SST-2 & COLA & STS-B & MRPC & RTE & Average & FLOPs \tabularnewline
\hline
BERT & 85.4/85.8 & 88.2 & 91.5 & 92.9 & 62.1 & 88.8 & 90.4 & 69.0 & 83.8 & $2.7$G\tabularnewline
BERT (11-12) & 84.9/85.0 & 88.1 & 91.0 & 93.0 & 62.3 & 89.7 & 91.1 & 70.8 & 84.0 & $2.4$G \tabularnewline
BERT (9-12) & 85.3/85.1 & 88.1 & 90.1 & 92.9 & 62.6 & 89.3 & 91.2 & 68.5 & 83.7 & $2.1$G\tabularnewline
BERT (7-12) & 84.2/84.8 & 88.0 & 90.6 & 92.1 & 62.7 & 89.2 & 90.5 & 68.2 & 83.4 & $1.8$G \tabularnewline
BERT (5-12) & 84.0/84.3 & 88.0 & 89.7 & 92.8 & 64.1 & 89.0 & 90.3 & 68.2 & 83.4 & $1.5$G \tabularnewline
BERT (3-12) & 82.5/82.4 & 87.5 & 88.6 & 91.6 & 57.0 & 87.9 & 88.4 & 65.7 & 81.3 & $1.2$G\tabularnewline
BERT (1-12) & 81.3/81.7 & 87.3 & 88.5 & 92.0 & 57.7 & 87.4 & 87.5 & 65.0 & 80.9 & $1.1$G \tabularnewline
\hline
\end{tabular}}
\end{table}
\section{Over-smoothing in BERT}
In this section, we
analyze the over-smoothing problem in BERT
theoretically,
and then verify the result empirically.
\subsection{Theoretical Analysis}
Our analysis is based on matrix projection.
We define a subspace $\mathcal{M}$, in which each row vector of the element in
this subspace is identical.
\begin{definition}
Define $\mathcal{M}:=\{\boldsymbol{Y}\in\mathbb{R}^{n\times d}|\boldsymbol{Y}=\boldsymbol{eC}, \boldsymbol{C}\in\mathbb{R}^{1\times d}\}$ as a subspace in $\mathbb{R}^{n\times d}$, where $\boldsymbol{e}=[1, 1, \dots, 1]^\top\in\mathbb{R}^{n\times1}$, $n$ is the number of tokens and $d$ is the dimension of token representation.
\end{definition}
Each $\boldsymbol{Y}$ in subspace $\mathcal{M}$ suffers from the over-smoothing issue since the representation of each token is $\boldsymbol{C}$, which is the same with each other.
We define the distance between matrix $\boldsymbol{H}\in\mathbb{R}^{n\times d}$
and $\mathcal{M}$ as $d_\mathcal{M}(H):=\min_{\boldsymbol{Y}\in\mathcal{M}} \Vert
\boldsymbol{H}-\boldsymbol{Y}\Vert_F$, where $\Vert\cdot\Vert_F$ is the Frobenius norm. Next, we investigate the distance between the output of layer $l$ and subspace $\mathcal{M}$.
We have the following Lemma.
\begin{lemma} \label{lemma:1}
For self-attention matrix $\hat{\boldsymbol{A}}$, any $\boldsymbol{H},\boldsymbol{B}\in \mathbb{R}^{n\times d}$ and $\alpha_1, \alpha_2 \geq 0$, we have:
\begin{align}
d_\mathcal{M}(\boldsymbol{HW}) &\leq sd_\mathcal{M}(\boldsymbol{H}), \\
d_\mathcal{M}(\text{ReLU}(\boldsymbol{H})) &\leq d_\mathcal{M}(\boldsymbol{H}), \\
d_\mathcal{M}(\alpha_1 \boldsymbol{H}+\alpha_2 \boldsymbol{B}) &\leq \alpha_1 d_\mathcal{M}(\boldsymbol{H}) + \alpha_2 d_\mathcal{M}(\boldsymbol{B}), \\
d_\mathcal{M}(\hat{\boldsymbol{A}}\boldsymbol{H}) &\leq \sqrt{\lambda_{\max}} d_\mathcal{M}(\boldsymbol{H}) \label{eq:novel},
\end{align}
where $\lambda_{\max}$ is the
largest eigenvalue of
$\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}$
and $s$ is the largest singular value of $\boldsymbol{W}$.
\end{lemma}
\begin{figure}
\caption{The illustration of over-smoothing problem. Recursively, $\boldsymbol{H}
\label{fig:illustration}
\end{figure}
Using Lemma~{\textnormal{e}}f{lemma:1}, we have the following Theorem.
\begin{theorem} \label{theorem:v}
For a BERT block with $h$ heads, we have
\begin{equation}
d_\mathcal{M}(\boldsymbol{H}_{l+1})\leq vd_\mathcal{M}(\boldsymbol{H}_l),
\end{equation}
where $v=(1+s^2)(1+\sqrt{\lambda} hs)/(\sigma_1\sigma_2)$, $s>0$ is the largest element of all singular values of all $\boldsymbol{W}_l$,
$\lambda$ is the largest eigenvalue of all
$\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}$
for each self-attention matrix $\hat{\boldsymbol{A}}$, and $\sigma_{1}$,
$\sigma_{2}$ are the minimum standard deviation
for two layer normalization operations.
\end{theorem}
Proof is in Appendix~{\textnormal{e}}f{app:a}.
Theorem~{\textnormal{e}}f{theorem:v}
shows that
if $v < 1$ (i.e., $\sigma_1\sigma_2>(1+s^2)(1+\sqrt{\lambda} hs)$), the output of layer $l+1$ will be closer to $\mathcal{M}$ than the output of layer $l$.
An illustration of Theorem~{\textnormal{e}}f{theorem:v}
is shown
in Figure~{\textnormal{e}}f{fig:illustration}. $\boldsymbol{H}_0$ is the graph
corresponding to the input layer.
Initially,
the
token representations are very different (indicated by the different colors of
the nodes).
Recursively, $\boldsymbol{H}_l$ will converge towards
to subspace $\mathcal{M}$ if $v<1$ and all representations are the same, resulting in over-smoothing.
\textbf{Remark}
Though we only focus on the case $v<1$,
over-smoothing may still exist if $v\geq 1$.
As can be seen, layer normalization plays an important role for the convergence rate $v$.
Interestingly, \cite{dong2021attention} claim that layer normalization plays no
roles for token uniformity,
which seems to conflict with the conclusion in Theorem~{\textnormal{e}}f{theorem:v}. However,
note that the matrix rank cannot indicate similarity between tokens completely because matrix rank is discrete while similarity is continuous. For instance, given two token embeddings $\boldsymbol{h}_i$ and $\boldsymbol{h}_j$, the
matrix $[\boldsymbol{h}_i, \boldsymbol{h}_j]^\top$ has
rank
$2$ only if $\boldsymbol{h}_i \neq \boldsymbol{h}_j$. In contrast, the consine similarity between tokens is $\frac{\boldsymbol{h}_i^\top \boldsymbol{h}_j}{\Vert\boldsymbol{h}_i\Vert_2\Vert\boldsymbol{h}_j\Vert_2}$.
As discussed in Section~{\textnormal{e}}f{sec:formulation}, GCN use the symmetric normalization version $\hat{\boldsymbol{A}}=\boldsymbol{D}^{-1/2}\boldsymbol{AD}^{-1/2}$, resulting in the target subspace $\mathcal{M}':=\{\boldsymbol{Y}\in\mathbb{R}^{n\times d}|\boldsymbol{Y}=\boldsymbol{D^{1/2}eC}, \boldsymbol{C}\in\mathbb{R}^{1\times d}\}$ is dependent with adjacent matrix \citep{huang2020tackling}.
In contrast, our subspace $\mathcal{M}$ is
independent of $\hat{\boldsymbol{A}}$ thanks to its random walk normalization.
Thus, Theorem~{\textnormal{e}}f{theorem:v} can be applied to the vanilla BERT
even though its attention matrix $\hat{\boldsymbol{A}}$ is not similar.
\subsection{Empirical Verification \label{sec:emp}}
Theorem~{\textnormal{e}}f{theorem:v} illustrates that the magnitude of $\sigma_1\sigma_2$ is important for over-smoothing issue. If $\sigma_1\sigma_2>(1+s^2)(1+\sqrt{\lambda} hs)$, the output will be closer to subspace $\mathcal{M}$ suffered from over-smoothing.
Since $s$ is usually small due to the $\ell_2$-penalty during training
\citep{huang2020tackling}, we neglect the effect of $s$ and compare
$\sigma_1\sigma_2$ with $1$ for simplicity.
To verify the theoretical results, we visualize $\sigma_1\sigma_2$ in different
fine-tuned BERT models. Specifically, we take the development set data of STS-B \citep{cer2017semeval}, CoLA \citep{warstadt2019neural}, SQuAD \citep{rajpurkar2016squad} as input
to the fine-tuned models and visualize the distribution of $\sigma_1\sigma_2$ at the last layer using kernel density estimation \citep{rosenblatt1956remarks}.
\begin{figure}
\caption{The estimated distribution of $\sigma_1\sigma_2$ for different fine-tuned models.\label{fig:var}
\label{subfig:var_stsb}
\label{subfig:var_cola}
\label{subfig:var_squad}
\label{fig:var}
\end{figure}
Results are shown in Figure~{\textnormal{e}}f{fig:var}.
As can be seen, the distributions of $\sigma_1\sigma_2$ can be very different
across data sets. For STS-B \citep{cer2017semeval}, $\sigma_1\sigma_2$ of all
data is larger than $1$, which means that over-smoothing is serious for this data set. For CoLA \citep{warstadt2019neural} and SQuAD
\citep{rajpurkar2016squad}, there also exists a fraction of samples satisfying
$\sigma_1\sigma_2>1$.
\section{Method} \label{sec:method}
From our proof in Appendix~{\textnormal{e}}f{app:a}, we figure out that the main reason is
the post-normalization scheme in BERT. In comparison, to train a 1000-layer GCN, \cite{li2021training} instead apply pre-normalization with skip connections to ensure $v>1$.
However, the performance of pre-normalization is not better than post-normalization for layer normalization empirically \citep{he2021realformer}. In this section, we preserve the post-normalization scheme and propose a hierarchical fusion strategy to alleviate the over-smoothing issue.
Specifically,
since only deep layers suffer from the over-smoothing issue, we allow the model select representations from
both shallow layers and deep layers as final output.
\subsection{Hierarchical Fusion Strategy}
\textbf{Concat Fusion}
We first consider a simple and direct layer-wise Concat Fusion approach. Considering a $L$-layer model, we first concatenate the representations $\boldsymbol{H}_k$ from each layer $k$ to generate a matrix $[\boldsymbol{H}_1, \boldsymbol{H}_2, \dots, \boldsymbol{H}_L]$ and then apply a linear mapping to generate the final representation $\sum_{k=1}^L\alpha_k\boldsymbol{H}_k$.
Here $\{\alpha_k\}$ are model parameters independent with inputs.
Since this scheme requires preserving feature maps from all layers, the memory cost will be huge as the model gets deep.
\textbf{Max Fusion}
Inspired by the idea of the widely adopted max-pooling mechanism, we construct the final output by taking the maximum value across all layers for each dimension of the representation.
Max Fusion is an adaptive fusion mechanism since the model can dynamically decide the important layer for each element in the representation.
Max Fusion is the most flexible strategy, since it does not require learning any additional parameters and is more efficient in terms of speed and memory.
\textbf{Gate Fusion} Gate mechanism is commonly used for information propagation in natural language processing field \citep{cho2014learning}. To exploit the advantages from different semantic levels, we propose a vertical gate fusion module, which predicts the respective importance of token-wise representations from different layers and
aggregate
them adaptively.
Given token representations $\{\boldsymbol{H}^{t}_{k}\}$, where $t$ denotes the token index and $k$ denotes the layer index, the final representation for token $t$ is calculated by
$\sum_{k=1}^L I^{t}_k \cdot \boldsymbol{H}^{t}_k$, where
$I^{t}_1, I^{t}_2, \dots, I^{t}_L = \text{softmax}(g(\boldsymbol{H}^{t}_1), g(\boldsymbol{H}^{t}_2), \dots, g(\boldsymbol{H}^{t}_L))$.
Here $L$ is the number of layers and the gate function $g(\cdot)$ is a fully-connected (FC) layer, which relies on the word representation itself in respective layers to predict its importance scores.
The weights of the gate function $g(\cdot)$ are shared across different layers.
Even though Concat Fusion and Max Fusion have been investigated in the graph field \citep{xu2018representation}, their effectiveness for pre-trained language model have not yet been explored. Besides, since the \textit{layer-wise} Concat Fusion and \textit{element-wise} Max Fusion lack the ability to generate token representations according to each token's specificity, we further propose the \textit{token-wise} Gate Fusion for adapting fusion to the language scenario.
\subsection{Experiment Results}
The BERT model is
stacked with $12$ Transformer blocks (Section~{\textnormal{e}}f{sec:trans_block}) with the
following hyper-parameters: number of tokens $n=128$, number of self-attention heads $h=12$, and hidden layer size $d=768$.
As for the feed-forward layer, we set
the filter size $d_{\text{ff}}$ to 3072 as in \cite{devlin2019bert}.
All experiments are performed on NVIDIA Tesla V100 GPUs.
\begin{table}[t]
{\bm{s}}pace{-1em}
\caption{Performance
(in \%)
of the various
BERT variants
on the GLUE development data set. \label{tab:glue}}
\centering
{\textnormal{e}}sizebox{0.99\textwidth}{!}{
\begin{tabular}{lccccccccc}
\hline
& MNLI (m/mm) & QQP & QNLI & SST-2 & COLA & STS-B & MRPC & RTE & Average\tabularnewline
\hline
BERT & 85.4/85.8 & 88.2 & 91.5 & 92.9 & 62.1 & 88.8 & 90.4 & 69.0 & 83.8 \tabularnewline
BERT (concat) & 85.3/85.4 & 87.8 & 91.8 & 93.8 & 65.1 & 89.8 & 91.3 & 71.1& 84.6 \tabularnewline
BERT (max) & 85.3/85.6 & 88.5 & 92.0 & 93.7 & 64.6 & 90.3 & 91.7 & 71.5 & 84.7 \tabularnewline
BERT (gate) & 85.4/85.7 & 88.4 & 92.3 & 93.9 & 64.0 & 90.3 & 92.0 & 73.9 & \textbf{85.1} \tabularnewline
\hline
ALBERT & 81.6/82.2 &85.6 & 90.7 & 90.3 & 50.8 & 89.4 & 91.3 & 75.5 & 81.8 \tabularnewline
ALBERT (concat) & 82.8/82.8 & 86.7 & 90.9 & 90.7 & 48.7 & 89.7 & 91.5 & 76.5 & 82.3 \tabularnewline
ALBERT (max)& 82.5/82.8 &86.9& 91.1& 90.7& 50.5 & 89.6 & 92.6 & 77.3& 82.6 \tabularnewline
ALBERT (gate) & 83.0/83.7 & 87.0& 90.9 & 90.4 & 51.3 & 90.0 & 92.4 & 76.2 & \textbf{82.7} \tabularnewline
\hline
\end{tabular}}
\end{table}
\subsubsection{Data and settings}
\textbf{Pre-training}
For the setting in pre-training phase, we mainly follows BERT paper~\citep{devlin2019bert}. Our pre-training tasks are vanilla masked language modeling (MLM) and next sentence prediction (NSP). The pre-training datasets are English BooksCorpus \citep{zhu2015aligning} and Wikipedia \citep{devlin2019bert} ($16$G in total). The WordPiece embedding \citep{wu2016google} and the dictionary containing $30,000$ tokens in \citep{devlin2019bert} are still used in our paper. To pre-process text, we use the special token {\tt [CLS]} as the first
token of each sequence and {\tt [SEP]} to separate sentences in a sequence.
The pre-training is performed for $40$ epochs.
\textbf{Fine-tuning} In the fine-tuning phase, we perform downstream experiments on the GLUE \citep{wang2018glue}, SWAG \citep{zellers2018swag} and SQuAD \citep{rajpurkar2016squad,rajpurkar2018know} benchmarks. GLUE is a natural language understanding benchmark, which includes
three categories tasks:
(i) single-sentence tasks (CoLA and SST-2); (ii) similarity and paraphrase tasks (MRPC, QQP and STS-B); (iii) inference tasks (MNLI, QNLI and RTE).
For MNLI task, we experiment on both the matched (MNLI-m) and mismatched (MNLI-mm) versions. The SWAG data set is for grounded commonsense inference, while SQuAD is a task for question answering.
In SQuAD v1.1 \citep{rajpurkar2016squad}, the answers are included in the context. SQuAD v2.0 \citep{rajpurkar2018know} is more challenge than SQuAD v1.0, in which some answers are not included in the context.
Following BERT \citep{devlin2019bert}, we report accuracy for MNLI, QNLI, RTE, SST-2 tasks, F1 score for QQP and MRPC, Spearman correlation for STS-B, and Matthews correlation for CoLA.
For SWAG task,
we use accuracy for evaluation.
For SQuAD v1.1 and v2.0,
we report the Exact Match (EM) and F1 scores.
Descriptions of the data sets and details of other
hyper-parameter settings are in Appendix~{\textnormal{e}}f{app:dataset} and in Appendix~{\textnormal{e}}f{app:hyper}, respectively.
\subsubsection{results and analysis}
\begin{wraptable}{r}{7cm}
{\bm{s}}pace{-1.5em}
\caption{Performance (in \%) on the SWAG and SQuAD development sets.
\label{tab:squad}}
{\textnormal{e}}sizebox{0.49\textwidth}{!}{
\begin{tabular}{lccccc}
\hline
& SWAG & \multicolumn{2}{c}{SQuAD v1.1} & \multicolumn{2}{c}{SQuAD v2.0} \tabularnewline
& acc & EM & F1 & EM & F1 \\
\hline
BERT & 81.6 & 79.7 & 87.1 & 72.9 & 75.5 \tabularnewline
BERT (concat) & 82.0 & 80.2 & 87.8& \textbf{74.1} & 77.0 \tabularnewline
BERT (max) & 81.9 &80.1 & 87.6 & 73.6 & 76.6 \tabularnewline
BERT (gate) & \textbf{82.1} & \textbf{80.7} & \textbf{88.0} & 73.9 & \textbf{77.3} \tabularnewline
\hline
\end{tabular}
\label{tab:predictor}}
{\bm{s}}pace{-0.5em}
\end{wraptable}
Since BERT \citep{devlin2019bert} and RoBERTa \citep{liu2019roberta} share the same architecture and the only difference is data resource and training steps, here we mainly evaluate our proposed method on BERT and ALBERT \citep{lan2019albert}.
Results on the GLUE benchmark are shown in Table~{\textnormal{e}}f{tab:glue}, while results on SWAG and SQuAD are illustrated in Table~{\textnormal{e}}f{tab:squad}.
For SQuAD task, in contrast to BERT which \citep{devlin2019bert} utilize the augmented training data during fine-tuning phase, we only fine-tune our model on the standard SQuAD data set. As can be seen, our proposed fusion strategies also perform better than baselines on various tasks consistently.
Following the previous over-smoothing measure, we visualize the token-wise cosine similarity in each layer.
Here we perform visualization on the same data sets as Section~{\textnormal{e}}f{sec:emp} and the results are shown in Figure~{\textnormal{e}}f{fig:sim}.
For all three data sets, the cosine similarity has a drop in the last layer compared with baseline. It's remarkable that the similarity drop is the most obvious in STS-B \citep{cer2017semeval}, which is consistent with our empirical verification that STS-B's $\sigma_1\sigma_2$ is the largest in Section~{\textnormal{e}}f{sec:emp}. Since the representation of tokens from prior layers is not similar with each other, our fusion method alleviates the over-smoothing issue and improve the model performance at the same time.
To study the dynamic weights of fusion gate strategy, we visualize the importance weight $I_k^t$ for each token $t$ and for each layer $k$. We randomly select three samples and the visualization results are illustrated in Figure~{\textnormal{e}}f{fig:weight}.
Note that our gate strategy will reduce to vanilla model if representation from the last layer is selected for each token.
As can be seen, the weight distribution of different tokens is adaptively decided, illustrating that the vanilla BERT stacks model is not the best choice for all tokens. The keywords which highly affect meaning of sentences (i.e. \textit{``women'', ``water'', ``fish''}) are willing to obtain more semantic representations from the deep layer, while for some simple words which appear frequently (i.e. \textit{``a'', ``is''}), the features in shallow layers are preferred.
\begin{figure}
\caption{The token-wise similarity comparison between BERT and BERT with gate fusion. Here F means the final output, which is the fusion results for our approach. \label{fig:sim}
\label{subfig:stsb}
\label{subfig:cola}
\label{subfig:squad2}
\label{fig:sim}
\end{figure}
\begin{figure}
\caption{Visualization of importance weights of gate fusion on different layers. \label{fig:weight}
\label{fig:weight}
\end{figure}
\section{Conclusion}
In this paper, we revisit the over-smoothing problem in BERT models. Since this issue has been detailed discuss in graph learning field, we firstly establish the relationship between BERT and graph for inspiration, and find out that self-attention matrix can be shared among last few blocks without performance drop. Inspired by over-smoothing discussion in graph convolutional network, we provide some theoretical analysis for BERT models and figure out the importance of layer normalization. Specifically, if the standard derivation of layer normalization is sufficiently large, the output will converge towards to a low-rank subspace. To alleviate the over-smoothing problem, we also propose a hierarchical fusion strategy to combine representations from different layers adaptively. Extensive experiment results on various data sets illustrate the effect of our fusion methods.
\appendix
\section{Proof} \label{app:a}
\begin{customlemma}{1}
For self-attention matrix $\hat{\boldsymbol{A}}$, any $\boldsymbol{H},\boldsymbol{B}\in \mathbb{R}^{n\times d}$ and $\alpha_1, \alpha_2 \geq 0$, we have:
\begin{align}
d_\mathcal{M}(\boldsymbol{HW}) &\leq sd_\mathcal{M}(\boldsymbol{H}), \tag{4} \\
d_\mathcal{M}(\text{ReLU}(\boldsymbol{H})) &\leq d_\mathcal{M}(\boldsymbol{H}), \tag{5} \\
d_\mathcal{M}(\alpha_1 \boldsymbol{H}+\alpha_2 \boldsymbol{B}) &\leq \alpha_1 d_\mathcal{M}(\boldsymbol{H}) + \alpha_2 d_\mathcal{M}(\boldsymbol{B}), \tag{6} \\
d_\mathcal{M}(\hat{\boldsymbol{A}}\boldsymbol{H}) &\leq \sqrt{\lambda_{\max}} d_\mathcal{M}(\boldsymbol{H}) \tag{7} \label{eq:novel},
\end{align}
where $\lambda_{\max}$ is the
largest eigenvalue of
$\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}$
and $s$ is the largest singular value of $\boldsymbol{W}$.
\end{customlemma}
\begin{proof}
Here we only prove the last inequality ({\textnormal{e}}f{eq:novel}), as the inequity is different from the theories in GCN since $\hat{\boldsymbol{A}}$ is not symmetric and shared in Transformer architecture. For the first three inequalities, we refer to \cite{oono2020graph} and \cite{huang2020tackling}.
Write $\boldsymbol{HH}^\top=\boldsymbol{Q\Omega Q}^\top$ for the eigin-decomposition of $\boldsymbol{HH}^\top$, where $\boldsymbol{Q}=[\boldsymbol{q}_1,\boldsymbol{q}_2, \dots, \boldsymbol{q}_n]$ is the orthogonal and $\boldsymbol{\Omega}=\text{diag}(\omega_1, \dots, \omega_n)$ with all $\omega_i\geq 0$. Recall $\boldsymbol{e}=n^{-1/2}[1,1, \dots, 1]^\top\in\mathbb{R}^{n\times 1}$.
Note that
\begin{align*}
d_\mathcal{M}(\hat{\boldsymbol{A}}\boldsymbol{H})^2
&=\Vert(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}\boldsymbol{H} \Vert^2_F \\
&=tr\{(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}\boldsymbol{HH}^\top\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\} \\
&=\sum_{i=1}^n \omega_i\boldsymbol{q}_i^\top\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}\boldsymbol{q}_i.
\end{align*}
Since matrix $\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}$ is positive semidefinite, its all eigenvalues are non-negative. Let $\lambda_{\max}$ be the largest eigenvalue of $\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}$. Consider
\begin{equation*}
\lambda_{\max}d_\mathcal{M}(\boldsymbol{H})^2-d_\mathcal{M}(\hat{\boldsymbol{A}}\boldsymbol{H})^2=\sum_{i=1}^n\omega_i\boldsymbol{q}_i^\top\{\lambda_{\max}(\boldsymbol{I}-\boldsymbol{ee}^\top)-\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}\}\boldsymbol{q}_i.
\end{equation*}
Let $\boldsymbol{\Sigma}=\lambda_{\max}(\boldsymbol{I}-\boldsymbol{ee}^\top)-\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}$.
Note that $\hat{\boldsymbol{A}}=\boldsymbol{D}^{-1}\boldsymbol{A}$ is a stochastic matrix, we have $\hat{\boldsymbol{A}}\boldsymbol{e}=\boldsymbol{e}$. Thus, $\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}$ has an eigenvalue $0$ and corresponding eigenvecter $\boldsymbol{e}$. Let $\boldsymbol{f}_i$ be a normalised eigenvector of $\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}$ orthogonal to $\boldsymbol{e}$, and $\lambda$ be its corresponding eigenvalue. Then we have
\begin{align*}
\boldsymbol{e}^\top\boldsymbol{\Sigma e} &= 0, \\
\boldsymbol{f}_i^\top\boldsymbol{\Sigma f}_i &= \lambda_{\max}-\lambda \geq 0.
\end{align*}
It follows that $d_\mathcal{M}(\hat{\boldsymbol{A}}\boldsymbol{H})^2\leq\lambda_{\max}d_\mathcal{M}(\boldsymbol{H})^2$.
\end{proof}
\textbf{Discussion} Assume further that $\hat{\boldsymbol{A}}$ is doubly stochastic (so that $\hat{\boldsymbol{A}}^\top \boldsymbol{e}=\boldsymbol{e}$) with positive entries. Then by Perron–Frobenius theorem \citep{gantmakher2000theory}, $\hat{\boldsymbol{A}}^\top\hat{\boldsymbol{A}}$ has a maximum eigenvalue $1$ with associated eigenvector $\boldsymbol{e}$ as well.
In this case, the matrix $\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}=\hat{\boldsymbol{A}}^\top\hat{\boldsymbol{A}}-\boldsymbol{ee}^\top$ has a maximum eigenvalue $\lambda_{max}<1$.
\begin{customthm}{2}
For a BERT block with $h$ heads, we have
\begin{equation}
d_\mathcal{M}(\boldsymbol{H}_{l+1})\leq vd_\mathcal{M}(\boldsymbol{H}_l) \tag{8},
\end{equation}
where $v=(1+s^2)(1+\sqrt{\lambda} hs)/(\sigma_1\sigma_2)$, $s>0$ is the largest element of all singular values of all $\boldsymbol{W}_l$,
$\lambda$ is the largest eigenvalue of all
$\hat{\boldsymbol{A}}^\top(\boldsymbol{I}-\boldsymbol{ee}^\top)\hat{\boldsymbol{A}}$
for each self-attention matrix $\hat{\boldsymbol{A}}$, and $\sigma_{1}$,
$\sigma_{2}$ are the minimum standard deviation
for two layer normalization operations.
\end{customthm}
\begin{proof}
From the definition of self-attention and feed-forward modules, we have
\begin{align*}
Attn(\boldsymbol{X}) &=\text{LayerNorm}(\boldsymbol{X}+\sum_{k=1}^H \hat{\boldsymbol{A}}^k\boldsymbol{XW}^k+\boldsymbol{1}\boldsymbol{b}^\top) =(\boldsymbol{X}+\sum_{k=1}^H \hat{\boldsymbol{A}}^k\boldsymbol{XW}^k+\boldsymbol{1b}^\top-\boldsymbol{1b}_{LN}^\top)\boldsymbol{D}_{LN}^{-1} \\
FF(\boldsymbol{X}) &=\text{LayerNorm}(\boldsymbol{X}+\text{ReLU}(\boldsymbol{XW}_1+\boldsymbol{1b_1}^\top)\boldsymbol{W}_2+\boldsymbol{1b}_2^\top)\\
&=(\boldsymbol{X}+\text{ReLU}(\boldsymbol{XW}_1+\boldsymbol{1b}_1^\top)\boldsymbol{W}_2+\boldsymbol{1b}_2^\top-\boldsymbol{1b}_{LN}^\top)\boldsymbol{D}_{LN}^{-1}
\end{align*}
Based on the Lemma~{\textnormal{e}}f{lemma:1}, we have
\begin{align*}
d_\mathcal{M}(Attn(\boldsymbol{X}))
&=d_\mathcal{M}((\boldsymbol{X}+\sum_{k=1}^h\hat{\boldsymbol{A}}^k\boldsymbol{XW}^k+\boldsymbol{1b}^\top-\boldsymbol{1b}_{LN}^\top)\boldsymbol{D}_{LN}^{-1}) \\
&\leq d_\mathcal{M}(\boldsymbol{XD}_{LN}^{-1})+d_\mathcal{M}(\sum_{k=1}^h \hat{\boldsymbol{A}}^k\boldsymbol{XW}^k\boldsymbol{D}_{LN}^{-1})+d_\mathcal{M}(\boldsymbol{1}(\boldsymbol{b}-\boldsymbol{b}_{LN})^\top) \\
&\leq \sigma_1^{-1}d_\mathcal{M}(\boldsymbol{X})+\sum_{k=1}^hd_\mathcal{M}(\hat{\boldsymbol{A}}^k\boldsymbol{XW}^k\boldsymbol{D}_{LN}^{-1}) \\
&\leq \sigma_1^{-1}d_\mathcal{M}(\boldsymbol{X})+\sqrt{\lambda} h s\sigma_1^{-1}d_\mathcal{M}(\boldsymbol{X}) \\
&=(1+\sqrt{\lambda} hs)\sigma_1^{-1}d_\mathcal{M}(\boldsymbol{X}). \\
d_\mathcal{M}(FF(\boldsymbol{X}))
&= d_\mathcal{M}((\boldsymbol{X}+\text{ReLU}(\boldsymbol{XW}_1+\boldsymbol{1b}_1^\top)\boldsymbol{W}_2+\boldsymbol{1b}_2^\top-\boldsymbol{1b}_{LN}^\top)\boldsymbol{D}_{LN}^{-1})\\
&\leq d_\mathcal{M}(\boldsymbol{XD}_{LN}^{-1})+d_\mathcal{M}(\text{ReLU}(\boldsymbol{XW}_1+\boldsymbol{1b}_1^\top)\boldsymbol{W}_2\boldsymbol{D}_{LN}^{-1})+d_\mathcal{M}(\boldsymbol{1}(\boldsymbol{b}_2^\top-\boldsymbol{b}_{LN}^\top)\boldsymbol{D}_{LN}^{-1}) \\
&\leq d_\mathcal{M}(\boldsymbol{XD}_{LN}^{-1})+d_\mathcal{M}(\boldsymbol{XW}_1\boldsymbol{W}_2\boldsymbol{D}_{LN}^{-1})+d_\mathcal{M}(\boldsymbol{1b}_1^\top \boldsymbol{W}_2\boldsymbol{D}_{LN}^{-1}) \\
&\leq \sigma_2^{-1}d_\mathcal{M}(\boldsymbol{X})+s^2\sigma_2^{-1}d_\mathcal{M}(\boldsymbol{X})\\
&= (1+s^2)\sigma_2^{-1}d_\mathcal{M}(\boldsymbol{X}).
\end{align*}
It follows that
\begin{equation*}
d_\mathcal{M}(\boldsymbol{H}_{l+1}) \leq (1+s^2)(1+ \sqrt{\lambda} hs)\sigma_1^{-1}\sigma_2^{-1} d_\mathcal{M}(\boldsymbol{H}_l).
\end{equation*}
\end{proof}
\section{Data Set \label{app:dataset}}
\subsection{MNLI}
The Multi-Genre Natural Language Inference \citep{williams2018broad} is a crowdsourced ternary classification task. Given a premise sentence and a hypothesis sentence, the target is to predict whether the last sentence is an [entailment], [contradiction], or [neutral] relationships with respect to the first one.
\subsection{QQP}
The Quora Question Pairs \citep{chen2018quora} is a binary classification task. Given two questions on Quora, the target is to determine whether these two asked questions are semantically equivalent or not.
\subsection{QNLI}
The Question Natural Language Inference \citep{wang2018multi} is a binary classification task derived from the Stanford Question Answering Dataset \citep{rajpurkar2016squad}. Given sentence pairs (question, sentence), the target is to predict whether the last sentence contains the correct answer to the question.
\subsection{SST-2}
The Stanford Sentiment Treebank \citep{socher2013recursive} is a binary sentiment classification task for a single sentence. All sentences are extracted from movie reviews with human annotations of their sentiment.
\subsection{CoLA}
The Corpus of Linguistic Acceptability \citep{warstadt2019neural} is a binary classification task consisting of English acceptability judgments extracted from books and journal articles. Given a single sentence, the target is to determine whether the sentence is linguistically acceptable or not.
\subsection{STS-B}
The Semantic Textual Similarity Benchmark \citep{cer2017semeval} is a regression task for predicting the similarity score (from $1$ to $5$) between a given sentence pair, whose sentence pairs are drawn from news headlines and other sources.
\subsection{MRPC}
The Microsoft Research Paraphrase Corpus \citep{dolan2005automatically} is a binary classification task. Given a sentence pair extracted from online news sources, the target is to determine whether the sentences in the pair are semantically equivalent.
\subsection{RTE}
The Recognizing Textual Entailment \citep{bentivogli2009fifth} is a binary entailment classification task similar to MNLI, where [neutral] and [contradiction] relationships are classified into [not entailment].
\subsection{SWAG}
The Situations with Adversarial Generations \citep{zellers2018swag} is a multiple-choice task consisting of $113$K questions
about grounded situations. Given a source sentence, the task
is to select the most possible one among four choices for
sentence continuity.
\subsection{SQuAD v1.1}
The Stanford Question Answering Dataset (SQuAD v1.1) \citep{rajpurkar2016squad} is a large-scale question and answer task consisting of $100$K question and answer pairs from more than $500$ articles. Given a passage and the question from Wikipedia, the goal is to determine the start and the end token of the answer text.
\subsection{SQuAD v2.0}
The SQuAD v2.0 task \citep{rajpurkar2018know} is the extension of above SQuAD v1.1, which contains the $100$K questions in SQuAD v1.1 and $50$K unanswerable questions. The existence of unanswerable question makes this task more realistic and challenging.
\section{Implementation Details \label{app:hyper}}
The hyper-parameters of various downstream tasks are shown in Table~{\textnormal{e}}f{tbl:hyper}.
\begin{table}[h]
\begin{center}
\caption{Hyper-parameters for different downstream tasks. \label{tbl:hyper}}
{\textnormal{e}}sizebox{0.8\textwidth}{!}{
\begin{tabular}{lcccc}
\hline
& GLUE & SWAG &SQuAD v1.1 & SQuAD v2.0 \tabularnewline
\hline
Batch size & 32 & 16 & 32 & 48 \tabularnewline
Weight decay & [0.1, 0.01]& [0.1, 0.01] &[0.1, 0.01] & [0.1, 0.01] \tabularnewline
Warmup proportion & 0.1 & 0.1 & 0.1& 0.1 \tabularnewline
Learning rate decay & linear & linear &linear & linear \tabularnewline
Training Epochs & 3 & 3 & 3 &2 \tabularnewline
Learning rate & \multicolumn{4}{c}{[2e-5, 1e-5, 1.5e-5, 3e-5, 4e-5, 5e-5]} \tabularnewline
\hline
\end{tabular}}
\end{center}
\label{tab:implementation_details}
\end{table}
\end{document} |
\begin{document}
\theoremstyle{plain}
\newtheorem{condition}{Condition}
\newtheorem{theorem}{Theorem}
\newtheorem{definition}{Definition}
\newtheorem{corollary}{Corollary}
\newtheorem{lemma}{Lemma}
\newtheorem{proposition}{Proposition}
\title{\bf Generalized One-to-One Mappings between Homomorphism Sets of Digraphs}
\author{\sc Frank a Campo}
\date{\small Seilerwall 33, D 41747 Viersen, Germany\\
{\sf acampo.frank@gmail.com}}
\maketitle
\begin{abstract}
\noindent Structural properties of finite digraphs $R$ and $S$ are studied which enforce $\# {\cal H}(G,R) \leq \# {\cal H}(G,S)$ for every finite digraph $G \in \mf{D}'$, where ${\cal H}(G,H)$ is the set of homomorphisms from $G$ to $H$, and $\mf{D}'$ is a class of digraphs. In a previous study, we have seen that the key for such a relation between $R$ and $S$ is the existence of a {\em strong S-scheme} from $R$ to $S$. Such an S-scheme $\rho$ defines a one-to-one mapping $\rho_G : {\cal S}(G,R) \rightarrow {\cal S}(G,S)$ for every $G \in \mf{D}'$, where ${\cal S}(G,H)$ is the set of homomorphisms from $G$ to $H$ mapping proper arcs of $G$ to proper arcs of $H$. In the present article, we characterize S-schemes $\rho$ which are induced by strict homomorphisms $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ between auxiliary systems of $R$ and $S$, and we analyze the mutual dependency between the properties of $\rho$ and $\epsilon$. Wide applicability of the theory is ensured by specifying the auxiliary systems ${\cal E}(R)$ and ${\cal E}(S)$ as {\em EV-systems} of $R$ and $S$. The results are applied on a rearrangement method for digraphs and on undirected graphs.
\newline
\noindent{\bf Mathematics Subject Classification:}\\
Primary: 06A07. Secondary: 06A06.\\[2mm]
{\bf Key words:} digraph, homomorphism, Hom-scheme, ${\cal G}amma$-scheme, S-scheme, EV-system.
\end{abstract}
\section{Introduction} \label{sec_introduction}
The number of homomorphisms between directed graphs (digraphs) may carry important information about structure. Freedman et al.\ \cite{Freedman_etal_2007} characterized in 2007 graph parameters which can be expressed as numbers of homomorphisms into weighted graphs. The still open reconstruction conjecture asks in different fields of graph theory \cite{Hell_Nesetril_2004,Schroeder_2016}, if two objects with at least four vertices are isomorphic if all numbers of embeddings of certain subgraphs into them are equal. An early classical result is the Theorem of Lov\'{a}sz \cite{Lovasz_1967} from 1967 which states that numbers of homomorphisms distinguish non-isomorphic ``relational structures''. With ${\cal H}(G,H)$ defined as the set of homomorphisms from a digraph $G$ to a digraph $H$, the following specifications are relevant for our purpose:
\begin{theorem}[Lov\'{a}sz \cite{Lovasz_1967}] \label{theo_Lovasz_original}
Let $\mf{C}$ be the class of finite digraphs or the class of finite posets. Then, for $R, S \in \mf{C}$
\begin{align*}
R & \simeq S \\
{\cal L}eftrightarrow \; \; \; \# {\cal H}(G,R) & = \# {\cal H}(G,S) \; \mytext{for every } G \in \mf{C}.
\end{align*}
For the class of finite posets, the equivalence holds also if we replace the homomorphism sets by the sets of strict order homomorphisms.
\end{theorem}
For digraphs, a short and simple proof of the theorem is contained in \cite{Hell_Nesetril_2004} which - with minor modification - works for posets, too.
The infinite vector ${\cal L}(H) \equiv ( \# {\cal H}(G,H) )_{G \in \mf{C}}$ is called the {\em Lov\'{a}sz-vector of $H$}. In the last decades, topics related to it have found interest \cite{Borgs_etal_2006,Lovasz_2006,Freedman_etal_2007,
Borgs_etal_2008,Lovasz_Szegedy_2008,Schrijver_2009,
Borgs_etal_2012,Cai_Govorov_2020} in connection with vertex and edge weights. For undirected graphs, Dvo\v{r}\'{a}k \cite{Dvorak_2010} investigated in 2010 proper sub-classes $\mf{U}'$ of undirected graphs for which the partial Lov\'{a}sz-vector $( \# {\cal H}(G,H) )_{G \in \mf{U}'}$ still distinguishes graphs; the distinguishing power of the vector $( \# {\cal H}(G,H) )_{H \in \mf{U}'}$ has been investigated by Fisk \cite{Fisk_1995} in 1995.
This paper continues the work of the author about the pointwise less-equal-relation between partial Lov\'{a}sz-vectors of digraphs:
\begin{quote}
{\em Given a class $\mf{D}'$ of digraphs, what is it in the structure of digraphs $R$ and $S$ that enforces}
\end{quote}
\begin{equation} \label{fragestellung}
\# {\cal H}(G,R) \leq \# {\cal H}(G,S) \; \mytext{\em for every } G \in \mf{D}' \mytext{\em ?}
\end{equation}
\begin{figure}
\caption{\label{figure_Intro}
\label{figure_Intro}
\end{figure}
The starting point of the work was the pair of posets $R$ and $S$ in Figure \ref{figure_Intro}(c). The author \cite[Theorem 5]{aCampo_2018} has proven that for these posets we have $\# {\cal H}(P,R) \leq \# {\cal H}(P,S)$ for every finite poset $P$. Additional non-trivial examples for the relation ``$\# {\cal H}(P,R) \leq \# {\cal H}(P,S)$ for every finite poset $P$'' are shown in the Figures \ref{figure_Intro}(a)-(b); more pairs of digraphs fulfilling \eqref{fragestellung} are contained in the Figures \ref{figure_RS_nichtRearr} and \ref{fig_TableConstr}.
In the recent paper, a first attempt is made to tackle the theoretical aspect of the question. After the preparatory Section \ref{sec_preparation}, a main result from \cite{aCampo_toappear_0} is recalled in Section \ref{subsec_DefHomSchemes}: it has been shown \cite[Theorem 2]{aCampo_toappear_0}, that for several important classes of digraphs $\mf{D}'$ and $R \in \mf{D}'$, \eqref{fragestellung} is implied by
\begin{align} \label{fragestellung_strict}
\# {\cal S}(G,R) \; \leq \; \# {\cal S}(G,S) \; \; \mytext{for all} \; G \in \mf{D}'
\end{align}
where ${\cal S}(G,H)$ is the set of {\em strict} homomorphisms from $G$ to $H$, i.e., of those homomorphisms from $G$ to $H$ mapping proper arcs of $G$ to proper arcs of $H$.
Due to the implication $\eqref{fragestellung_strict} \Rightarrow \eqref{fragestellung}$, our interest focuses on the existence of a generalized mapping $\rho$ providing a mapping $\rho_G : {\cal S}(G,R) \rightarrow {\cal S}(G,S)$ for every $G \in \mf{D}'$. (A formally satisfying definition is given in Definition \ref{def_S_scheme}.) We call such a generalized mapping an {\em S-scheme from $R$ to $S$}, and we call $\rho$ {\em strong} iff $\rho_G$ is one-to-one for every $G \in \mf{D}'$.
For a (strong) S-scheme $\rho$ and $G, G' \in \mf{D}'$ with $G \not= G'$, the mappings $\rho_G$ and $\rho_{G'}$ are in principle independent from each other, no matter how closely $G$ and $G'$ are related. Even in the case of $\xi \in {\cal S}(G,R) \cap {\cal S}(G',R)$, the resulting homomorphisms $\rho_G(\xi) \in {\cal S}(G,S)$ and $\rho_{G'}(\xi) \in {\cal S}(G',S)$ must not be related to each other in any way. It are the {\em induced S-schemes} defined in Section \ref{subsec_simple_induced} which introduce some regularity by ``mapping similar things to similar things'', and they are the subject of this paper. In particular, S-schemes are suitable for constructive approaches, and we will pay special attention to the types of regularity introduced by them.
For an induced S-scheme $\rho$ from $R$ to $S$, we have for all $G \in \mf{D}'$
\begin{equation*}
\rho_G \; = \; \phi \circ \epsilon \circ \alpha_G,
\end{equation*}
where $\alpha$ is a simple S-scheme from $R$ to an auxiliary digraph ${\cal E}(R)$, $\phi$ is a strict homomorphism from another auxiliary digraph ${\cal E}(S)$ to $S$, and $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ is a strict homomorphism. Obviously, the properties of $\rho$ are mainly determined by the digraphs ${\cal E}(R)$ and ${\cal E}(S)$ and the homomorphism $\epsilon$ between them. From a theoretical point of view it is interesting to analyze the mutual dependency between the properties of $\rho$ and $\epsilon$, and from a practical point of view, there is a perspective to construct a strong S-scheme $\rho$ from $R$ to $S$ with desired properties by designing (hopefully simple) digraphs ${\cal E}(R)$ and ${\cal E}(S)$ and an appropriate homomorphism $\epsilon$ between them.
The theoretical aspect is in the focus in what follows. Still in Section \ref{subsec_simple_induced}, we characterize the induced S-schemes in Theorem \ref{theo_induced_exists}. In Theorem \ref{theo_eta_RS}(1), we show that a regularity condition on $\epsilon$ called ``Condition \ref{cond_univ_aexiv}'' is equivalent to a certain regular behavior of the induced S-scheme $\rho$, and in Theorem \ref{theo_eta_RS}(2), we see that an additional property of $\epsilon$ forces $\rho$ to be strong.
In Section \ref{subsec_EVSystems_Props}, we specify ${\cal E}(R)$ and ${\cal E}(S)$ as {\em EV-systems} and describe their dependency on the class of digraphs $\mf{D}'$ they are referring to. In Theorem \ref{theo_eta_RS_inv} in Section \ref{subsec_inv_Theo_Eta_RS_2}, we use EV-systems in showing that for the classes of digraphs we are mainly interested in, Theorem \ref{theo_eta_RS}(2) can be inverted: if an induced strong S-scheme $\rho$ behaves sufficiently regular, then the corresponding $\epsilon$ is one-to-one and fulfills Condition \ref{cond_univ_aexiv}. Because Condition \ref{cond_univ_aexiv} is unwieldy to check, we show in Proposition \ref{prop_repl_Cond1} in Section \ref{subsec_replacement_Cond1} how it can be replaced by a more handy condition.
In Section \ref{subsec_rearr_induced}, we take up the rearrangement method developed in \cite{aCampo_toappear_0}. By means of this method, a digraph $R$ fulfilling certain conditions is rearranged in such a way that there exists a strong S-scheme $\rho$ from $R$ to the digraph $S$ resulting from the rearrangement. We see that $\rho$ is in fact an induced strong S-scheme, and we describe the corresponding homomorphism $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$. In Section \ref{subsec_example}, we discuss in detail the posets in Figure \ref{figure_Intro}(a) and Figure \ref{figure_Intro}(b) under these view points.
Finally, in Section \ref{sec_undirected}, we transfer our concepts and results to undirected graphs.
\section{Basics and Notation} \label{sec_preparation}
A {\em (finite) directed graph} or {\em digraph} $G$ is an ordered pair $(V(G),A(G))$ in which $V(G)$ is a non-empty, finite set and $A(G) \subseteq V(G) \mytimes V(G)$ is a binary relation on $V(G)$. We write $vw$ for an ordered pair $(v,w) \in V(G) \mytimes V(G)$. The elements of $V(G)$ are called the {\em vertices} of $G$ and the elements of $A(G)$ are called the {\em arcs} of $G$. A digraph $G$ is {\em reflexive}, or {\em symmetric}, or {\em antisymmetric}, etc., iff the relation $A(G)$ has the respective property. A {\em partially ordered set (poset)} is a reflexive, antisymmetric, transitive digraph.
For a digraph $G$ and a non-empty set $X \subseteq V(G)$, the {\em digraph $G \vert_X$ induced on $X$} is $( X, A(G) \cap ( X \mytimes X) )$. The {\em direct sum} $G + H$ of digraphs with disjoint vertex sets is defined as usual.
Vertices $v, w \in V(G)$ are {\em adjacent} iff $vw \in A(G)$ or $wv \in A(G)$. The {\em open neighborhood} $N_G(v)$ of $v \in V(G)$ is the set of all $w \in V(G) \setminus \{ v \}$ adjacent to $v$. Furthermore,
\begin{align*}
N^{in}_G(v) & \; \equiv \; \mysetdescr{ w \in N_G(v) }{ wv \in A(G) }, \\
N^{out}_G(v) & \; \equiv \; \mysetdescr{ w \in N_G(v) }{ vw \in A(G) }.
\end{align*}
An arc $vw \in A(G)$ is called {\em proper} iff $v \not= w$; otherwise, it is called a {\em loop}. All possible loops of $G$ are collected in the {\em diagonal} $\Delta_G \equiv \mysetdescr{(v,v)}{v \in V(G)}$. $G^* \equiv (V(G), A(G) \setminus \Delta_G)$ is the digraph $G$ {\em with loops removed}.
A sequence $v_0, \ldots , v_I$ of vertices of $G$ with $I \in \mathbb{N}$ is called a {\em walk} iff $v_{i-1}v_i \in A(G)$ for all $1 \leq i \leq I$. The walk is {\em closed} iff $v_0 = v_I$. A digraph is {\em acyclic} iff it does not contain a closed walk.
Let $G$ be a digraph. With ${\cal T}$ denoting the set of all transitive relations $T \subseteq V(G) \mytimes V(G)$ with $A(G) \subseteq T$, the {\em transitive hull} $( V(G), \cap {\cal T} )$ of $G$ is the digraph with vertex set $V(G)$ and the (set-theoretically) smallest transitive arc set containing $A(G)$.
Given digraphs $G$ and $H$, we call a mapping $\xi : V(G) \rightarrow V(H)$ a {\em homomorphism} from $G$ to $H$ if $\xi(v) \xi(w) \in A(H)$ for all $vw \in A(G)$. For such a mapping, we write $\xi : G \rightarrow H$; we collect the homomorphisms in the set
\begin{align*}
{\cal H}(G,H) & \; \equiv \; \mysetdescr{ \xi : V(G) \rightarrow V(H) }{ \xi \mytext{ is a homomorphism} }.
\end{align*}
${\cal A}ut(G)$ is the set of automorphisms of a digraph $G$. Isomorphism is indicated by ``$\simeq$''.
Every homomorphism $\xi : G \rightarrow H$ maps loops in $G$ to loops in $H$, but proper arcs of $G$ can be mapped to both, loops and proper arcs of $H$. We call a homomorphism from $G$ to $H$ {\em strict} iff it maps all proper arcs of $G$ to proper arcs of $H$.
\begin{align*}
{\cal S}(G,H) & \; \equiv \; {\cal H}(G,H) \cap {\cal H}(G^*,H^*)
\end{align*}
is the set of strict homomorphisms from $G$ to $H$. The set ${\cal H}(G^*,H^*) \setminus {\cal H}(G,H)$ contains all homomorphisms from $G^*$ to $H^*$ which map a vertex belonging to a loop in $G$ to a vertex of $H$ not belonging to a loop. A mapping $\xi : V(G) \rightarrow V(H)$ is thus a strict homomorphism, iff it maps loops in $G$ to loops in $H$ and proper arcs of $G$ to proper arcs of $H$. If $H$ is reflexive, then ${\cal S}(G,H) = {\cal H}(G^*,H^*)$; for posets $P$ and $Q$, the set ${\cal S}(P,Q) = {\cal H}(P^*,Q^*)$ is the set of strict order homomorphisms from $P$ to $Q$.
In order to avoid unnecessary formalism, we work with representative systems of classes of digraphs with respect to isomorphism and not with the classes itself. $\mf{D}$ is a representative system of the class of all digraphs with finite non-empty vertex set. Furthermore,
\begin{align*}
\mf{T}a & \; \equiv \; \mysetdescr{ G \in \mf{D} }{ G^* \mytext{is acyclic} }, \\
\mf{P} \; & \; \equiv \; \mysetdescr{ P \in \mf{T}a }{ P \mytext{is a poset} }, \\
\mf{P}^* & \; \equiv \; \mysetdescr{ P^* }{ P \in \mf{P} }.
\end{align*}
Equivalently, $\mf{T}a$ can be characterized as an representative system of the class of digraphs with antisymmetric transitive hull (which is the reason for the choice of the symbol $\mf{T}a$) or as the class of subgraphs of posets. $\mf{P}^*$ is the class of posets with loops removed, i.e., the class of finite irreflexive antisymmetric transitive digraphs. $\mf{P}^*$ is of interest for us, because every result about homomorphism sets ${\cal H}(P,Q)$ with $P, Q \in \mf{P}^*$ directly translates into a result about the sets of strict order homomorphisms between posets and vice versa. For example, the addendum in Theorem \ref{theo_Lovasz_original} says that the stated equivalence also holds for $\mf{C} = \mf{P}^*$.
We assume $\mf{P}^* \subset \mf{T}_a$, and we will tacitly assume that every digraph we construct in what follows belongs to the respective representative system without exchange of vertices. Nevertheless, for $G, H \in \mf{D}$, we retain the notation $G \simeq H$ instead of $G = H$ in order to emphasize that it is structural equivalence we are dealing with, not physical identity.
From set theory, we use additionally the following notation:
\begin{align*}
\mathbb{N}k{0} & \equiv \emptyset, \\
\mathbb{N}k{n} & \equiv \{ 1, \ldots, n \} \mytext{for every} n \in \mathbb{N}.
\end{align*}
${\cal A}(X,Y)$ is the set of mappings from $X$ to $Y$. For $f \in {\cal A}(X,Y)$ and $X' \subseteq X$, we write $f \vert_{X'}$ for the pre-restriction of $f$ to $X'$, and for $Y' \subseteq Y$ with $f(X) \subseteq Y'$ we write $f \vert^{Y'}$ for the post-restriction of $f$ to $Y'$. Furthermore, we use the symbol $\myurbild{f}(Y'')$ for the pre-image of $Y'' \subseteq Y$ under $f$; for $y \in Y$, we simply write $\myurbild{f}(y)$ instead of $\myurbild{f}( \{y\} )$. However, in the proof of Lemma \ref{lemma_eps_nicht_1t1_Cond1}, we use the symbol $\beta^{-1}$ also for the inverse of a bijective mapping $\beta$. $\id_X$ is the identity mapping of a set $X$.
Finally, we use the {\em Cartesian product}. Let ${\cal I}$ be a non-empty set, and let $M_i$ be a non-empty set for every $i \in {\cal I}$. Then the Cartesian product of the sets $M_i, i \in {\cal I}$, is defined as
\begin{eqnarray*}
\prod_{i \in {\cal I}} M_i & \; \equiv \; &
\mysetdescr{ f \in {\cal A} \big( {\cal I}, \bigcup_{i \in {\cal I}} M_i \big)}{ f(i) \in M_i \mytext{for all} i \in {\cal I} }.
\end{eqnarray*}
\section{S-schemes} \label{sec_HomSchemes}
In Section \ref{subsec_DefHomSchemes}, we recall the main concepts and results from \cite{aCampo_toappear_0}. It turns out that for our purpose, so-called {\em S-schemes} are in the focus: generalized mappings $\rho$ providing a mapping $\rho_G : {\cal S}(G,R) \rightarrow {\cal S}(G,S)$ for every $G \in \mf{D}' \subseteq \mf{D}$. In Section \ref{subsec_simple_induced}, we introduce {\em induced S-schemes}. These S-schemes can be described effectively by two auxiliary digraphs and a strict homomorphism $\epsilon$ between them. We characterize the induced S-schemes and show how regularity conditions on $\epsilon$ translate into regular behavior of the induced S-scheme $\rho$ and vice versa.
\subsection{Recapitulation} \label{subsec_DefHomSchemes}
Let $R, S \in \mf{D}$ and $\mf{D}' \subseteq \mf{D}$. Assume that there exists a one-to-one homomorphism $\sigma : R \rightarrow S$. Then, for every $G \in \mf{D}'$ with ${\cal H}(G,R) \not= \emptyset$, we get a one-to-one mapping $r_G : {\cal H}(G,R) \rightarrow {\cal H}(G,S)$ by setting for every $\xi \in {\cal H}(G,R)$
\begin{align} \label{eq_rho_sigma}
r_G(\xi) & \equiv \sigma \circ \xi.
\end{align}
A one-to-one homomorphisms from $R$ to $S$ delivers thus a ``natural'' (or: trivial) example for $ \# {\cal H}(G,R) \leq \# {\cal H}(G,S)$ for every $G \in \mf{D}'$. For the general investigation of this relation between $R$ and $S$, the author \cite{aCampo_2018,aCampo_toappear_0} has introduced the following concepts:
\begin{definition}[\cite{aCampo_toappear_0}, Definition 3] \label{def_Hom_scheme}
Let $\mf{D}' \subseteq \mf{D}$ be a representative system of a class of digraphs. For $R, S \in \mf{D}$, we call a mapping
\begin{align*}
\rho & \; \in \prod_{G \in \mf{D}'} {\cal A}( {\cal H}(G,R), {\cal H}(G,S) )
\end{align*}
a {\em Hom-scheme from $R$ to $S$ (with respect to $\mf{D}'$)}, and we call it {\em strong} iff $\rho_G : {\cal H}(G,R) \rightarrow {\cal H}(G,S)$ is one-to-one for every $G \in \mf{D}'$. We say that a Hom-scheme $\rho$ from $R$ to $S$ is a {\em ${\cal G}amma$-scheme}, iff
\begin{align} \label{def_grxiv_gxiv}
\gxiv & = \grgxiv
\end{align}
holds for every $G \in \mf{D}', \xi \in {\cal H}(G,R), v \in V(G)$, where $\gxiv$ is the connectivity component of $v$ in $\myurbild{\xi}(\xi(v))$ and $\grgxiv$ is the connectivity component of $v$ in $\myurbild{\rhogxi}(\rhogxiv)$. We write $R \sqsubseteq_{\cal G}amma S$ iff a strong ${\cal G}amma$-scheme from $R$ to $S$ exists. If $G$ is fixed, we write $\rho(\xi)$ instead of $\rho_G(\xi)$.
\end{definition}
Here as in the following, it does not matter if there is a $G \in \mf{D}'$ with ${\cal H}(G,R) = \emptyset$; in this case, $\rho_G = ( \emptyset, \emptyset, {\cal H}(G,S))$. The (trivial) Hom-scheme \eqref{eq_rho_sigma} is always a strong ${\cal G}amma$-scheme. We will generalize the concept of such simple Hom-schemes in Section \ref{subsec_simple_induced}.
A Hom-scheme $\rho$ from $R$ to $S$ is strong if we can determine $\xi(v)$ by means of $\rho(\xi)$ for all $G \in \fD$, $\xi \in {\cal H}(G,R)$, $v \in V(G)rStr$. We say that we can {\em reconstruct $\xi$ by means of $\rho(\xi)$}. Obviously, we can reconstruct $\xi$ by means of $\rho(\xi)$ if we can determine $\myurbild{\xi}(v)$ for every $v \in R$ by means of $\rho(\xi)$.
A (strong) ${\cal G}amma$-scheme is a (strong) Hom-scheme obeying the regularity condition \eqref{def_grxiv_gxiv} in mapping ${\cal H}(G,R)$ to ${\cal H}(G,S)$ for every $G \in \mf{D}'$. This condition has been introduced in \cite{aCampo_toappear_0} under an application-oriented point of view. It may look like an additional difficulty posed upon a question difficult enough in itself. However, in fact, \eqref{def_grxiv_gxiv} is a regularity condition making things manageable by introducing structure. For a (strong) Hom-scheme $\rho$ from $R$ to $S$ and $G, G' \in \mf{D}'$ with $G \not= G'$, the mappings $\rho_G$ and $\rho_{G'}$ are independent; even in the case of $\xi \in {\cal H}(G,R) \cap {\cal H}(G',R)$, there must be no similarity between the image-homomorphisms $\rho_G(\xi)$ and $\rho_{G'}(\xi)$. It are the ${\cal G}amma$-schemes and in particular the induced S-schemes defined in Section \ref{subsec_simple_induced} which ensure that ``similar things are mapped to similar things''. In this way, S-schemes are suitable for constructive approaches, and we will pay particular attention to the question which type of regularity is introduced by them.
The regularity condition \eqref{def_grxiv_gxiv} is in particular plausible if we regard a Hom-scheme as a technical apparatus which assigns to every $\xi \in {\cal H}(G,R)$ a well-fitting $\rho(\xi) \in {\cal H}(G,S)$. If we allow $\gxiv \subset \grxiv$ for $x \in P$, then \cite[Lemma 1]{aCampo_toappear_0} tells us that $\rho(\xi)$ preserves the structure of $G$ around $v$ worse than $\xi$, which is not satisfying. And in the case $\gxiv \not\subseteq \grxiv$, $\rho(\xi)$ has to re-distribute the points of $\gxiv \setminus \grxiv \subseteq \gxiv \setminus \{ v \}$ in $S$. Because the sets $\gxiv \setminus \rhogxiv$ can be arbitrarily complicated, this re-distribution process may require many single case decisions, which is out of the scope of a technical apparatus.
It is easily seen \cite[Corollary 3]{aCampo_toappear_0} that a homomorphism $\xi \in {\cal H}(G,H)$ is strict iff $\gxiv = \{ v \}$ for all $v \in G$. Because a ${\cal G}amma$-scheme preserves the sets $\gxiv$, we have for every Hom-scheme $\rho$ from $R$ to $S$
\begin{equation} \label{GScheme_strict}
\rho \mytext{ ${\cal G}amma$-scheme } \quad \Rightarrow \quad
\rho_G( {\cal S}(G,R) ) \subseteq {\cal S}(G,S) \quad \mytext{for all } G \in \mf{D}',
\end{equation}
hence $\# {\cal S}(G,R) \leq \# {\cal S}(G,S)$ for all $G \in \mf{D}'$ if $\rho$ is additionally strong. One of the main results of \cite{aCampo_toappear_0} adds the direction ``${\cal L}eftarrow$'' to this implication:
\begin{theorem}[\cite{aCampo_toappear_0}, Theorem 2] \label{theo_GschemeOnStrict}
Let \begin{align*}
R & \in \mf{D}' = \mf{D}, \\
R & \in \mf{T}a \subseteq \mf{D}' \subseteq \mf{D}, \\
\mytext{or} \quad R & \in \mf{D}' \mytext{ with } \mf{D}' = \mf{P} \mytext{ or } \mf{D}' = \mf{P}^*.
\end{align*}
Then, for all $S \in \mf{D}$, there exists a strong ${\cal G}amma$-scheme from $R$ to $S$ with respect to $\mf{D}'$, iff
\begin{equation*}
\# {\cal S}(G,R) \; \leq \; \# {\cal S}(G,S) \; \; \mytext{for all} \; G \in \mf{D}'.
\end{equation*}
\end{theorem}
In the investigation of strong ${\cal G}amma$-schemes, it is thus obvious to give special attention to the restriction of Hom-schemes to sets of strict homomorphisms:
\begin{definition} \label{def_S_scheme}
Let $R, S \in \mf{D}$, $\mf{D}' \subseteq \mf{D}$. We call a mapping
\begin{align*}
\rho & \; \in \; \prod_{G \in \mf{D}'} {\cal A}( {\cal S}(G,R), {\cal S}(G,S) )
\end{align*}
an {\em S-scheme from $R$ to $S$ with respect to $\mf{D}'$}. We call an S-scheme from $R$ to $S$ {\em strong} iff the mapping $\rho_G : {\cal S}(G,R) \rightarrow {\cal S}(G,S)$ is one-to-one for every $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)rgxiStr$.
\end{definition}
In fact, we have seen:
\begin{proposition}[\cite{aCampo_toappear_0}, Proposition 2] \label{prop_extend_Sscheme}
In the constellations of $R$ and $\mf{D}'$ described in Theorem \ref{theo_GschemeOnStrict}, a strong S-scheme $\rho$ from $R$ to $S$ can always be extended to a strong ${\cal G}amma$-scheme $\rho'$ with $\rho'_G \vert_{{\cal S}(G,R)}^{{\cal S}(G,S)} = \rho_G$ for all $G \in \mf{D}'$.
\end{proposition}
Theorem \ref{theo_GschemeOnStrict} and Proposition \ref{prop_extend_Sscheme} remain valid also for other sub-classes $\mf{D}'$ of $\mf{D}$, e.g., for the digraphs (posets) with at most $k$ vertices or at most $k$ arcs, and for the class of digraphs in $\mf{T}a$ for which the maximal length of a walk without loops is at most $k$.
\subsection{Induced S-schemes} \label{subsec_simple_induced}
\begin{figure}
\caption{\label{figure_KommDiagr}
\label{figure_KommDiagr}
\end{figure}
In the following definition, the main property of the trivial strong ${\cal G}amma$-scheme in \eqref{eq_rho_sigma} is generalized and transferred to S-schemes:
\begin{definition} \label{def_EVSys}
Let $T, {\cal E}(T) \in \mf{D}$, and let $\alpha$ be an S-scheme from $T$ to ${\cal E}(T)$ with respect to $\mf{D}' \subseteq \mf{D}$. We call $\alpha$ a {\em simple S-scheme} iff there exists a strict homomorphism $\phi_T : {\cal E}(T) \rightarrow T$ with
\begin{equation} \label{defeq_EVsyst}
\forall \; G \in \mf{D}', \xi \in {\cal S}(G,T) \mytext{:} \; \phi_T \circ \alpha_{G,\xi} \; = \; \xi.
\end{equation}
If $G$ is fixed, we write $\alpha_\xi(v)$ instead of $\alpha_{G,\xi}v$.
\end{definition}
Because $\alpha$ is an S-scheme, $\alpha_{G,\xi}$ is strict for all ${\cal G} \in \mf{D}'$, $\xi \in {\cal S}(G,T)$. \eqref{defeq_EVsyst} shows that $\alpha_G : {\cal S}(G,T) \rightarrow {\cal S}(G,{\cal E}(T))$ is one-to-one for every $G \in \mf{D}'$; a simple S-scheme is thus always a strong S-scheme. The difference in notation of the homomorphism argument between general S-schemes (in brackets) and simple S-schemes (as subscript) has been chosen in order to facilitate the reading of formulas.
In what follows, we work in parallel with three S-schemes with respect to $\mf{D}'$: a simple S-schemes $\alpha^R$ from $R$ to an auxiliary digraph ${\cal E}(R)$, an S-scheme $\rho$ from $R$ to $S$, and a simple S-scheme $\alpha^S$ from $S$ to an auxiliary digraph ${\cal E}(S)$. In the case of ${\cal E}(R) \in \mf{D}'$, we are in the situation shown in Figure \ref{figure_KommDiagr}. The left triangle $G - {\cal E}(S) - S$ and the right triangle $G - {\cal E}(R) - R$ commute due to \eqref{defeq_EVsyst}. Because of ${\cal E}(R) \in \mf{D}'$ and $\phi_R \in {\cal S}( {\cal E}(R), R)$, both homomorphisms $\myrhoxi{ \phi_R } : {\cal E}(R) \rightarrow S$ and $\myaTxi{S}{\rho( \phi_R )} : {\cal E}(R) \rightarrow {\cal E}(S)$ are well-defined and strict with $\rho( \phi_R ) = \phi_S \circ \myaTxi{S}{\rho( \phi_R )}$ because of \eqref{defeq_EVsyst}. We are interested in that also the outer triangle $G - {\cal E}(R) - S$ commutes
\begin{align} \label{Cond_0}
\forall \; G \in \mf{D}', \xi \in {\cal S}(G,R) \mytext{:} \myrhoxi{\xi} & \; = \; \rho( \phi_R ) \circ \aRxi,
\end{align}
which is implied by a commuting inner triangle $G - {\cal E}(R) - {\cal E}(S)$
\begin{align} \label{Cond_1}
\forall \; G \in \mf{D}', \xi \in {\cal S}(G,R) \mytext{:} \myaTxi{S}{\rho(\xi)} & \; = \; \myaTxi{S}{\rho( \phi_R )} \circ \aRxi.
\end{align}
The reason for being interested in \eqref{Cond_0} becomes visible, if we rewrite \eqref{Cond_0} by means of \eqref{defeq_EVsyst}:
\begin{align*}
\forall \; G \in \mf{D}', \xi \in {\cal S}(G,R) \mytext{:} \myrhoxi{\xi} & \; = \; \phi_S \circ \myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R )} \circ \aRxi.
\end{align*}
Here, the properties of $\rho$ are mainly determined by the digraphs ${\cal E}(R)$ and ${\cal E}(S)$ and the strict homomorphism $\epsilon \equiv \myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R )} $ between them. From a theoretical point of view, it is interesting to analyze the mutual dependency between the properties of $\rho$ and $\epsilon$, and from a practical point of view, there is a perspective to construct an S-scheme $\rho$ from $R$ to $S$ with desired properties by designing (hopefully simple) objects ${\cal E}(R)$ and ${\cal E}(S)$ and an appropriate homomorphism $\epsilon$ between them. In order to unburden the notation in what follows, we define for $T \in \{ R, S \}$:
\begin{align*}
{\cal E}_o(T) & \; \equiv \; V( {\cal E}(T)).
\end{align*}
In the discussion of Figure \ref{figure_KommDiagr}, we have used the assumption ${\cal E}(R) \in \mf{D}'$ in order to make the cogwheels interlocking. We give an own name to this assumption and to an additional one:
\begin{itemize}
\item ERD: ${\cal E}(R) \in \mf{D}'$;
\item AID: $\myaTgxi{R}{{\cal E}(R)}{\phi_R} = \id_{{\cal E}_o(R)}$.
\end{itemize}
Assumption AID requires ERD, because otherwise, $\myaTgxi{R}{{\cal E}(R)}{\phi_R}$ is not defined. If ERD holds, then also $\rho(\phi_R) : {\cal E}(R) \rightarrow S$ is well-defined and strict, because $\rho$ is an S-scheme, and $\myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R )} : {\cal E}(R) \rightarrow {\cal E}(S)$ is strict, too.
In the following definition, we generalize our approach by replacing $\myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R )}$ with an arbitrary strict homomorphism from ${\cal E}(R)$ to ${\cal E}(S)$:
\begin{definition} \label{def_eps}
Let $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ be a strict homomorphism. We define for every $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)gxiStr$
\begin{equation*}
\myegxi{G}{\xi} \quad \equiv \quad \phi_S \circ \epsilon \circ \aRgxi
\end{equation*}
and call $\eta$ the {\em S-scheme induced by $\epsilon$.}
Additionally, we define for all $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)gxiStr$
\begin{equation*}
E_G(\xi) \quad \equiv \quad \mysetdescr{ v \in G }{ \aSgexiv \in \epsilon[ {\cal E}_o(R) ] }.
\end{equation*}
We write $\eta(\xi)$ and $E(\xi)$ in the case of a fixed $G \in \mf{D}'$.
\end{definition}
For $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)gxiStr$, the mapping $\eta_G(\xi) : V(G) \rightarrow V(S)$ is a combination of strict homomorphisms, thus strict. Therefore, $\eta$ is indeed an S-scheme, as suggested by Definition \ref{def_eps}, and $\myaTgxi{S}{G}{\eta(\xi)}$ is well defined. For EDR and $\epsilon = \myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R )}$, we have $\eta(\xi) = \rho( \phi_R ) \circ \aRxi$, hence $\eta = \rho$ in the case of \eqref{Cond_0}. The reader will observe that the set $E_G(\xi)$ can be determined by means of $\epsilon$ and $\eta_G(\xi)$ for all $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)gxiStr$; knowledge about $\xi$ is not required.
Induced S-schemes are characterized as follows:
\begin{theorem} \label{theo_induced_exists}
Let $\rho$ be an S-scheme from $R$ to $S$ with respect to $\mf{D}' \subseteq \mf{D}$. If $\rho$ is induced by a strict homomorphism from ${\cal E}(R)$ to ${\cal E}(S)$ then, for all $G, H \in \mf{D}'$, $\xi \in {\cal S}(G,R)$, $\zeta \in {\cal S}(H,R)$, $v \in G$, $w \in H$,
\begin{equation} \label{eq_Escheme_rhowert}
\aRgxiv = \myaTgxiv{R}{H}{\zeta}{w}
\quad \Rightarrow \quad \rhogxiv = \myrhogxiv{H}{\zeta}{w}.
\end{equation}
On the other hand, if ERD and AID are fulfilled and $\rho$ is an S-scheme fulfilling \eqref{eq_Escheme_rhowert}, then $\rho$ is induced by $\myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R ) }$.
\end{theorem}
\begin{proof} Let $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ be a strict homomorphism inducing $\rho$. Then trivially, $\aRgxiv = \myaTgxiv{R}{H}{\zeta}{w}$ yields
\begin{equation*}
\rhogxiv \; = \; \phi_S \left( \epsilon( \aRgxiv ) \right) \; = \; \phi_S \left( \epsilon( \myaTgxiv{R}{H}{\zeta}{w} ) \right) \; = \; \myrhogxiv{H}{\zeta}{w},
\end{equation*}
thus \eqref{eq_Escheme_rhowert}.
Now assume ERD and AID and let $\rho$ be an S-scheme fulfilling \eqref{eq_Escheme_rhowert}. Due to AID, $\aRgxiv = \myaTgxiv{R}{{\cal E}(R)}{\phi_R}{\aRgxiv}$ for all $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)rStr$, hence
\begin{align*}
\rhogxiv
& \; \stackrel{\eqref{eq_Escheme_rhowert}}{=} \;
\rho_{{\cal E}(R)}(\phi_R)(\aRgxiv)
\; \stackrel{\eqref{defeq_EVsyst}}{=} \;
\phi_S \left( \myaTgxiv{S}{{\cal E}(R)}{\rho( \phi_R )}{\aRgxiv} \right),
\end{align*}
and $\rho$ is induced by $\myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R ) }$.
{\cal E}P
For given ${\cal E}(R)$ and ${\cal E}(S)$, an induced S-scheme can in general be induced by several homomorphisms $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$. Nevertheless, the following proposition shows that it is beneficial to select $\epsilon$ carefully, because suitable properties of $\epsilon$ guarantee that $\eta$ is ``close to'' being strong:
\begin{proposition} \label{prop_eta_invers}
Let $\epsilon$ be a strict homomorphism from ${\cal E}(R)$ to ${\cal E}(S)$. Assume
\begin{equation} \label{epsaepsb_ab}
\forall \; \mf{a}, \mf{b} \in {\cal E}_o(R) \mytext{:} \; \; \epsilon( \mf{a} ) = \epsilon( \mf{b} ) \; \Rightarrow \; \phi_R( \mf{a} ) = \phi_R( \mf{b} ),
\end{equation}
and assume additionally, that for every $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)gxiStr$
\begin{equation} \label{condsmall_aexiv_eaxiv}
\forall \; v \in E( \xi ) \; \eta(\xi)sts \; \mf{a} \in \myurbild{ \phi_R }( \xi(v) ) \; \mytext{: } \aSexiv = \epsilon( \mf{a} ).
\end{equation}
Then, for every $r \in V(R)$ and every $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)gxiStr$
\begin{equation} \label{eq_eta_invers}
\myurbild{\xi}(r) \cap E( \xi ) \quad = \quad \bigcup_{\mf{a} \in \myurbild{ \phi_R }(r)} \myurbild{ \aSexi }( \epsilon( \mf{a} ) );
\end{equation}
we can thus reconctruct $\xi \vert_{E(\xi)}$ by means of $\eta(\xi)$ and $\epsilon$.
\end{proposition}
\begin{proof} Let $r \in V(R)$ and let $W$ be the set on the right side of \eqref{eq_eta_invers}. For $v \in \myurbild{\xi}(r) \cap E(\xi)$, assumption \eqref{condsmall_aexiv_eaxiv} delivers an $\mf{a} \in \myurbild{ \phi_R }(r)$ with $\aSexiv = \epsilon( \mf{a} )$, hence
\begin{equation*}
v \; \in \; \myurbild{ \aSexi }( \aSexiv ) \; = \; \myurbild{ \aSexi }( \epsilon( \mf{a} ) ).
\end{equation*}
We conclude $v \in W$ due to $\phi_R( \mf{a} ) = r$.
Now let $v \in W$, i.e., $v \in \myurbild{ \aSexi }( \epsilon( \mf{a} ) )$ for an $\mf{a} \in \myurbild{ \phi_R }(r)$. Then $v \in E(\xi)$. According to \eqref{condsmall_aexiv_eaxiv}, there exists a $\mf{b} \in \myurbild{ \phi_R }( \xi(v) )$ with $\aSexiv = \epsilon( \mf{b} )$. Now we get
\begin{equation*}
\epsilon( \mf{a} ) \; = \; \myaTxiv{S}{\eta(\xi)}{v} \; = \; \epsilon( \mf{b} ),
\end{equation*}
and assumption \eqref{epsaepsb_ab} yields $r = \phi_R( \mf{a} ) = \phi_R( \mf{b} ) = \xi(v)$, hence $v \in \myurbild{\xi}(r)$.
{\cal E}P
Even if we have found a strict homomorphism $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ fulfilling the conditions in Proposition \ref{prop_eta_invers}, there remains a gap for the induced S-scheme $\eta$ to being strong: how to reconstruct $ \xi $ on $ V(G) \setminus E(\xi)$? The gap disappears if $E(\xi) = V(G)$ for all $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)gxiStr$. The following condition does even more:
\begin{condition} \label{cond_univ_aexiv}
We say that a strict homomorphism $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ {\em fulfills Condition \ref{cond_univ_aexiv}}, iff for every $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)gxiStr$
\begin{equation} \label{cond_aexiv_eaxiv}
\myaTgxi{S}{G}{\eta(\xi)} \quad = \quad \epsilon \circ \aRgxi.
\end{equation}
\end{condition}
In the case of $\epsilon = \myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R ) }$, this condition is \eqref{Cond_1}. Induced S-schemes fulfilling Condition \ref{cond_univ_aexiv} are characterized in the following theorem:
\begin{theorem} \label{theo_eta_RS} Assume that ERD and AID are fulfilled and that $\rho$ is an S-scheme from $R$ to $S$. We define $\epsilon \equiv \myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R ) }$.
(1) $\rho$ is induced by $ \epsilon$ and $\epsilon$ fulfills Condition \ref{cond_univ_aexiv}, iff $\rho$ fulfills the following regularity condition: for every $G, H \in \mf{D}'$, $\xi \in {\cal S}(G,R), \zeta \in {\cal S}(H,R)$, $v \in V(G)$, $w \in V(H)$
\begin{align} \label{eq_imagebased_gl}
\aRgxiv = \myaTgxiv{R}{H}{\zeta}{w}
& \quad \Rightarrow \quad
\myaTgxiv{S}{G}{\rho(\xi)}{v} = \myaTgxiv{S}{H}{\rho(\zeta)}{w}.
\end{align}
(2) If $\rho$ is induced by $\epsilon$ and $\epsilon$ fulfills Condition \ref{cond_univ_aexiv} and \eqref{epsaepsb_ab}, then $\rho$ is a strong S-scheme fulfilling \eqref{eq_imagebased_gl}.
In particular, in the case of $R \in \mf{D}' = \mf{D}$, $R \in \mf{T}a \subseteq \mf{D}' \subseteq \mf{D}$, or $R \in \mf{P}'$ with $\mf{P}' = \mf{P}$ or $\mf{P}' = \mf{P}^*$, $\rho$ can be extended to a strong ${\cal G}amma$-scheme $\rho'$ with $\rho'_G \vert_{{\cal S}(G,R)}^{{\cal S}(G,S)} = \rho_G$ for all $G \in \mf{D}'$.
\end{theorem}
\begin{proof} (1) If $\rho$ is induced by $ \epsilon$ and $\epsilon$ fulfills Condition \ref{cond_univ_aexiv}, then, for $\aRgxiv = \myaTgxiv{R}{H}{\zeta}{w}$,
\begin{equation*}
\myaTgxiv{S}{G}{\rho(\xi)}{v}
\; = \;
\epsilon( \aRgxiv )
\; = \;
\epsilon( \myaTgxiv{R}{G}{\zeta}{w} )
\; = \;
\myaTgxiv{S}{H}{\rho(\zeta)}{w}.
\end{equation*}
Now assume that the S-scheme $\rho$ fulfills \eqref{eq_imagebased_gl}. According to the second part of Theorem \ref{theo_induced_exists}, $\rho$ is induced by $\epsilon$, because \eqref{eq_imagebased_gl} implies \eqref{eq_Escheme_rhowert} via \eqref{defeq_EVsyst}. For $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)rStr$, AID yields $\aRgxiv = \myaTgxiv{R}{{\cal E}(R)}{\phi}{\aRgxiv}$, hence,
\begin{equation*}
\myaTgxiv{S}{G}{\rho(\xi)}{v} \; \stackrel{\eqref{eq_imagebased_gl}}{=} \; \myaTgxiv{S}{{\cal E}(R)}{\rho(\phi)}{\aRgxiv}) \; = \; \epsilon( \aRgxiv ),
\end{equation*}
and $\epsilon$ fulfills Condition \ref{cond_univ_aexiv}.
(2) Due to part (1) of the theorem, we only have to show that $\rho$ is strong. Because Condition \ref{cond_univ_aexiv} implies $E(\xi) = G$ and \eqref{condsmall_aexiv_eaxiv} for all $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)rStr$, Proposition \ref{prop_eta_invers} delivers
\begin{equation*}
\myurbild{\xi}(r) \quad = \quad \bigcup_{\mf{a} \in \myurbild{ \phi_R }(r)} \myurbild{ {\myaTgxi{S}{G}{\rho(\xi)}}}( \epsilon( \mf{a} ) ).
\end{equation*}
for every $\xi \in {\cal S}(G,R)$, $G \in \mf{D}'$, $r \in R$. $\rho_G$ is thus one-to-one for every $G \in \mf{D}'$, and $\rho$ is a strong S-scheme. The addendum follows with Proposition \ref{prop_extend_Sscheme}.
{\cal E}P
In Theorem \ref{theo_eta_RS_inv} in Section \ref{subsec_inv_Theo_Eta_RS_2}, we will see that with an appropriate choice of ${\cal E}(R)$ and ${\cal E}(S)$, the inverse of Theorem \ref{theo_eta_RS}(2) is true for the constellations of $R$ and $\mf{D}'$ described in the addendum.
\section{The EV-system of a digraph} \label{sec_EVSystems}
Until now, we have specified nothing about the auxiliary digraphs ${\cal E}(R)$ and ${\cal E}(S)$ we have used in the definition of an induced S-scheme. In Section \ref{subsec_EVSystems_Props}, we specify ${\cal E}(R)$ and ${\cal E}(S)$ as {\em EV-systems} of $R$ and $S$, and in Section \ref{subsec_inv_Theo_Eta_RS_2}, we use them in inverting Theorem \ref{theo_eta_RS}(2) for the cases $R \in \mf{D}' = \mf{D}$, $R \in \mf{T}a \subseteq \mf{D}' \subseteq \mf{D}$, and $R \in \mf{P}'$ with $\mf{P}' = \mf{P}$ or $\mf{P}' = \mf{P}^*$. Section \ref{subsec_replacement_Cond1} is devoted to the replacement of Conditon \ref{cond_univ_aexiv} by a more handy condition referring to the homomorphism $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ only.
\subsection{EV-systems and their properties} \label{subsec_EVSystems_Props}
In mechanical engineering, the exploded-view drawing of an engine shows the relationship or order of assembly of its components by distributing them in the drawing area in a well-arranged and meaningful way. That is exactly what the {\em EV-system} of a digraph does with respect to the relations between its vertices:
\begin{definition} \label{def_EVsys_alt}
Let $R$ be a digraph and $\mf{D}' \subseteq \mf{D}$. We define
\begin{align*}
{\cal E}_o(R) & \equiv \mysetdescr{ ( v, D, U ) }{ v \in V(R), D \subseteq N^{in}_R(v), U \subseteq N^{out}_R(v) }.
\end{align*}
For $\mf{a} \in {\cal E}_o(R)$, we refer to the three components of $\mf{a}$ by $\mf{a}_1, \mf{a}_2$, and $\mf{a}_3$, and we define
\begin{align*}
\phi_R : {\cal E}_o(R) & \rightarrow V(R), \\
\mf{a} & \mapsto \mf{a}_1.
\end{align*}
Furthermore, for every $G \in \mf{D}'$, $\xi \in {\cal S}(G,R)$, we define the mapping
\begin{align*}
\myaTgxi{R}{G}{\xi} : V(G) & \rightarrow {\cal E}_o(R), \\
v & \mapsto \left( \xi(v), \xi[ N^{in}_G(v) ], \xi[ N^{out}_G(v) ] \right).
\end{align*}
The {\em EV-system ${\cal E}(R)$ of $R$ with respect to $\mf{D}'$} is the digraph with vertex set ${\cal E}_o(R)$ and arc set $A( {\cal E}(R) )$ defined by
\begin{align*}& \mf{a} \mf{b} \in A( {\cal E}(R) ) \\
\equiv \quad & \eta(\xi)sts \; G \in \mf{D}', \xi \in {\cal S}(G,R), vw \in A(G) \mytext{: } \mf{a} = \aRgxiv, \; \mf{b} = \aRgxiw.
\end{align*}
\end{definition}
It is thus the arc set of an EV-system which depends on $\mf{D}'$, whereas the vertex set is independent of it. $\phi_R \circ \aRgxi = \xi$ is trivial. In Lemma \ref{lemma_phi_def}, we will see that $\phi_R$ is strict, as required. As usual, we write $\aRxi$ in the case of a fixed digraph $G$.
\begin{figure}
\caption{\label{figure_Bspl_GKonstr}
\label{figure_Bspl_GKonstr}
\end{figure}
Figure \ref{figure_Bspl_GKonstr} shows the EV-systems of the posets in Figure \ref{figure_Intro}(a)-(b) with respect to $\mf{D}$ and $\mf{P}$. All loops are omitted in the diagrams. The poset $S$ in (a) is the only one of the four for which the reference class $\mf{D}'$ makes a difference. The sets $\myurbild{\phi_T}(v)$, $v \in V(T)$, $T \in \{ R, S \}$, are encircled and labeled with the respective $v$. For each point $\mf{a}$ in the diagrams, $\mf{a}_1$ is thus given by this label, and we get $\mf{a}_2$ and $\mf{a}_3$ by looking at the labels of the starting points and end points of arrows ending and starting in $\mf{a}$, respectively.
The reader will have noticed that in the definition of the mappings $\aRgxi$, the restriction to digraphs $G$ {\em contained in $\mf{D}'$} is unnecessary; we could define the mappings in the same way also for {\em all} digraphs $G$. We did not do so for two reasons. Firstly, we would burden the discussion in this section with discriminations about where ERD is required and where not, and secondly, we would gain nothing with this effort, because starting in Section \ref{subsec_inv_Theo_Eta_RS_2}, we work with $\rho$ again and are thus restricted to $G \in \mf{D}'$.
In the following three lemmata, we collect basic properties of the EV-system ${\cal E}(R)$ with respect to an arbitrary $\mf{D}' \subseteq \mf{D}$; starting with Definition \ref{def_Xfa}, we deal with the EV-system ${\cal E}(R)$ with respect to the classes $\mf{D}'$ we are mainly interested in.
According to the definition of $A( {\cal E}(R) )$, the mapping $\alpha_{G,\xi}$ is a homomorphism from $G$ to ${\cal E}(R)$ for every $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)gxiStr$. For $vw \in A(G^*)$ we have $\xi(v) \not= \xi(w)$, hence $\alpha_{G,\xi}v \not= \alpha_{G,\xi}w$, and $\alpha_{G,\xi}$ is strict. $\alpha$ is thus an S-scheme from $R$ to ${\cal E}(R)$.
\begin{lemma} \label{lemma_fafb_simpleProp}
For all $\mf{a}, \mf{b} \in {\cal E}_o(R)$
\begin{align} \label{fafab_allgemein}
\mf{a} \mf{b} \in A( {\cal E}(R ) ) \quad & \Rightarrow \quad \mf{a}_1 \mf{b}_1 \in A(R), \\
\label{fafb_ungleich}
\mf{a} \mf{b} \in A( {\cal E}(R)^* ) \quad & \Rightarrow \quad
\mf{a}_1 \mf{b}_1 \in A( R^* ), \mf{a}_1 \in \mf{b}_2, \mf{b}_1 \in \mf{a}_3, \\
\label{fafb_gleich}
\mf{a} \mf{b} \in A( {\cal E}(R) ) \mytext{ with } \mf{a}_1 = \mf{b}_1 \quad & \Rightarrow \quad \mf{a} = \mf{b}. \\
\label{RTa_ERTa}
\mytext{Furthermore,} \quad R \in \mf{T}a \quad & \Rightarrow \quad {\cal E}(R) \in \mf{T}a.
\end{align}
\end{lemma}
\begin{proof} Let $\mf{a} \mf{b} \in A( {\cal E}(R) )$. There exist $G \in \mf{D}', \xi \in {\cal S}(G,R)$, and $vw \in A(G)$ with $\mf{a} = \aRxiv$, $\mf{b} = \aRxiw$, thus $\mf{a}_1 \mf{b}_1 = \xi(v) \xi(w) \in A(R)$. If $\mf{a} \mf{b} \in A({\cal E}(R)^*)$, then $v \not= w$, hence $\mf{a}_1 \mf{b}_1 = \xi(v) \xi(w) \in A( R^*)$. Furthermore, due to $v \in N^{in}_G(w)$, we have
\begin{equation*}
\mf{a}_1 \; = \; \xi(v) \; \in \; \xi[ N^{in}_G(w) ] \; = \; \aRxiw_2 \; = \; \mf{b}_2.
\end{equation*}
$\mf{b}_1 \in \mf{a}_3$ is similarly shown, and \eqref{fafb_ungleich} is proven. For $\mf{a} \mf{b} \in A( {\cal E}(R) )$ with $\mf{a}_1 = \mf{b}_1$, \eqref{fafab_allgemein} and \eqref{fafb_ungleich} yield $\mf{a} = \mf{b}$.
Let $R \in \mf{T}a$, and let $\mf{a}^0, \ldots, \mf{a}^I$ be a closed walk in ${\cal E}(R)^*$. Due to \eqref{fafb_ungleich}, we have $\mf{a}^{i-1}_1 \mf{a}^i_1 \in A(R^*)$ for all $i \in \mathbb{N}k{I}$, and the sequence $\mf{a}^0_1, \ldots, \mf{a}^I_1$ is a closed walk in $R^*$, in contradiction to $R \in \mf{T}a$.
{\cal E}P
\begin{lemma} \label{lemma_phi_def}
The mapping $\phi_R$ is a strict homomorphism from ${\cal E}(R)$ to $R$, and $\alpha^R$ is a simple S-scheme from $R$ to ${\cal E}(R)$ with respect to $\mf{D}'$. In the case of ERD,
\begin{align} \label{aphifa_fa}
\myaTxiv{R}{\phi_R}{\mf{a}}_2 \subseteq \mf{a}_2
& \mytext{ and }
\myaTxiv{R}{\phi_R}{\mf{a}}_3 \subseteq \mf{a}_3
\end{align}
for all $\mf{a} \in {\cal E}_o(R)$.
\end{lemma}
\begin{proof} $\phi_R$ is a strict homomorphism because of the two first implications in Lemma \ref{lemma_fafb_simpleProp}. We have already seen that $\alpha^R$ is an S-scheme from $R$ to ${\cal E}(R)$, and due to $\xi = \phi_R \circ \aRxi$ for all $G \in \mf{D}'$, $\xi \in {\cal S}(G,R)$, $\alpha^R$ is simple.
In the case of ERD, $\alpha_{\phi_R}$ is well defined with
\begin{equation*}
\myaxiv{\phi_R}{\mf{a}}_3 \; = \; \phi_R \left[ N^{out}_{{\cal E}(R)}( \mf{a} ) \right] \; = \;
\mysetdescr{ \mf{b}_1 }{ \mf{b} \in N^{out}_{{\cal E}(R)}( \mf{a} ) }
\; \stackrel{\eqref{fafb_ungleich}}{\subseteq} \; \mf{a}_3.
\end{equation*}
$\myaxiv{\phi_R}{\mf{a}}_2 \subseteq \mf{a}_2$ is shown in the same way.
{\cal E}P
\begin{lemma} \label{lemma_phi_fa}
If ERD holds, then AID is equivalent to
\begin{equation} \label{eq_EXI}
\forall\; \mf{a} \in {\cal E}_o(R) \; \eta(\xi)sts\; G \in \mf{D}', \xi \in {\cal S}(G,R), v \in V(G) \mytext{: } \mf{a} = \aRgxiv.
\end{equation}
\end{lemma}
\begin{proof} ``$\Rightarrow$'' is trivial. Assume \eqref{eq_EXI}. We write $\phi$ instead of $\phi_R$. Let $\mf{a} \in {\cal E}_o(R)$ and $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)Str$ with $\mf{a} = \aRgxiv$. Due to \eqref{aphifa_fa}, we have to show $\mf{a}_2 \subseteq \myaTgxiv{R}{{\cal E}(R)}{\phi}{\mf{a}}_2$ and $\mf{a}_3 \subseteq \myaTgxiv{R}{{\cal E}(R)}{\phi}{\mf{a}}_3$ only.
For $a \in \mf{a}_2$, there exists a $w \in N^{in}_G(v)$ with $a = \xi(w)$. $wv \in A(G^*)$ yields $\aRgxiv \in N^{in}_{{\cal E}(R)}(\mf{a})$ due to the strictness of $\aRgxi$, hence
\begin{equation*}
a \; = \; \phi( \aRgxiv ) \; \in \; \phi \left[ N^{in}_{{\cal E}(R)}(\mf{a}) \right] \; = \; \myaTgxiv{R}{{\cal E}(R)}{\phi}{\mf{a}}_2.
\end{equation*}
$\mf{a}_3 \subseteq \myaTgxiv{R}{{\cal E}(R)}{\phi}{\mf{a}}_3$ is proven in the same way.
{\cal E}P
We now show that ERD and AID are fulfilled for the choices of $\mf{D}'$ and $R$ we are particularly interested in: $R \in \mf{D}' = \mf{D}$, $R \in \mf{T}a \subseteq \mf{D}' \subseteq \mf{D}$, $R \in \mf{D}' = \mf{P}$, and $R \in \mf{D}' = \mf{P}^*$. We need the following objects:
\begin{definition} \label{def_Xfa}
For every $m, n \in \mathbb{N}_0$, we define the digraph $\Xmn \in \mf{T}a$ by
\begin{align*}
V( \Xmn ) & \; \equiv \; D \cup \{ p \} \cup U, \\
A( \Xmn ) & \; \equiv \; \left( D \times \{ p \} \right) \; \cup \; \left( \{ p \} \times U \right).
\end{align*}
where $D$ and $U$ are disjoint sets with $\# D = m$, $\# U = n$, and $p \notin D \cup U$.
Furthermore, for $\mf{a} \in {\cal E}_o(R)$, we define the digraph $X(\mf{a})$
\begin{itemize}
\item as $\myXmnC{\mf{a}_2}{\mf{a}_3}$ in the case of $R \in \mf{D}' = \mf{D}$ or $R \in \mf{T}a \subseteq \mf{D}' \subseteq \mf{D}$;
\item as the transitive hull of $\myXmnC{\mf{a}_2}{\mf{a}_3}$ in the case of $R \in \mf{D}' = \mf{P}^*$;
\item as the transitive hull of $\myXmnC{\mf{a}_2}{\mf{a}_3}$ with loops added for every vertex in the case of $R \in \mf{D}' = \mf{P}$.
\end{itemize}
$\iota(\mf{a}) : V(X(\mf{a})) \rightarrow V(R)$ is a mapping sending $p$ to $\mf{a}_1$ and $D$ and $U$ bijectively to $\mf{a}_2$ and $\mf{a}_3$, respectively.
\end{definition}
\begin{figure}
\caption{\label{figure_Xij}
\label{figure_Xij}
\end{figure}
$\Xmn$ is thus a bug with $m$ legs, $n$ tentacles and body-vertex $p$. $\myXmn{2}{3}$ is shown in Figure \ref{figure_Xij}(a), and examples for $X(\mf{a})$ are shown in Figure \ref{figure_Xij}(b). Due to $\mf{P}^*, \mf{P} \subset \mf{T}a$ and $\mf{P}^* \cap \mf{P} = \emptyset$, $X(\mf{a})$ is in all four cases uniquely determined by $\mf{D}'$, and in all four cases, $X(\mf{a})$ is an element of $\mf{D}'$. In the following corollary, simple properties of $X(\mf{a})$ and $\iota(\mf{a})$ are summarized. It is the inconspicuous first statement which will cause some trouble in Section \ref{sec_undirected} because it does not have a counterpart for undirected graphs.
\begin{corollary} \label{coro_props_Xfa}
Let $R$ and $\mf{D}'$ as in the choices in Definition \ref{def_Xfa}. For every $\mf{a} \in {\cal E}_o(R)$, $\pi \in {\cal A}ut(X(\mf{a}))$, the vertex $p$ is a fixed point of $\pi$, and $D$ and $U$ are bijectively mapped to $D$ and $U$, respectively. In consequence, for all $\mf{a} \in {\cal E}_o(R)$
\begin{align} \label{iotafa_EXI}
\myaxiv{\iota(\mf{a})}{p}
& \; = \; \mf{a}, \\ \label{alpha_aiapi}
\myaxiv{\iota(\mf{a}) \circ \pi}{v}
& \; = \;
\myaiav{\mf{a}}{\pi(v)} \quad \mytext{for all } v \in V(X(\mf{a})), \pi \in {\cal A}ut( X(\mf{a}) ) \\ \label{card_AutXfa}
\# {\cal A}ut( X(\mf{a} ) ) & \; = \; ( \# \mf{a}_2 ! ) \cdot ( \# \mf{a}_3 ! ) \; = \; \# \mysetdescr{ \iota(\mf{a}) \circ \pi }{ \pi \in {\cal A}ut( X(\mf{a}) ) }.
\end{align}
\end{corollary}
\begin{proof} For $\mf{a}_2, \mf{a}_3 \not= \emptyset$, $p$ is the only vertex $v$ of $X(\mf{a})$ with $N^{in}_{X(\mf{a})}(v)$, $N^{out}_{X(\mf{a})}(v) \not= \emptyset$ (also in the case of $\mf{a}_2 \cap \mf{a}_3 \not= \emptyset$, cf.\ Figure \ref{figure_Xij}(b)). Also in the other cases, $p$ provides a unique empty-nonempty-combination of $N^{in}_{X(\mf{a})}(v)$ and $N^{out}_{X(\mf{a})}(v)$ among the vertices $v$ of $X(\mf{a})$. Therefore, $p$ is a fixed point of every $\pi \in {\cal A}ut( X(\mf{a}) )$, and the rest follows.
{\cal E}P
In an intuitive sense, $X(\mf{a})$ is the most simple digraph $G$ in the respective class $\mf{D}'$ providing a $\xi \in {\cal S}(G,R)$ with $\myaxiv{\xi}{p} = \mf{a} $. If $\mf{a}_2$ and $\mf{a}_3$ are not disjoint (which may happen in the case of $ R \in \mf{D}' = \mf{D}$, cf.\ Figure \ref{figure_Xij}(b)), then $\iota(\mf{a})$ is not one-to-one; however, for our purpose it is enough that it is always strict and fulfills the three equations in Corollary \ref{coro_props_Xfa}.
\begin{figure}
\caption{\label{figure_H_Gfa_Gfb}
\label{figure_H_Gfa_Gfb}
\end{figure}
\begin{proposition} \label{prop_fDStr_fTa}
Let $\mf{T}a \subseteq \mf{D}' \subseteq \mf{D}$ and $R \in \mf{D}$. For all $\mf{a}, \mf{b} \in {\cal E}_o(R)$,
\begin{align} \label{fafb_ugl_fTa}
\mf{a} \mf{b} \in A( {\cal E}(R)^* ) \quad & {\cal L}eftrightarrow \quad
\mf{a}_1 \in \mf{b}_2 \mytext{ and } \mf{b}_1 \in \mf{a}_3, \\ \label{fafb_gl_fTa}
\mf{a} \mf{a} \in A( {\cal E}(R) ) \quad & {\cal L}eftrightarrow \quad \mf{a}_1 \mf{a}_1 \in A(R).
\end{align}
ERD and AID hold for $\mf{D}' = \mf{D}$ and for $R \in \mf{T}a \subseteq \mf{D}'$.
\end{proposition}
\begin{proof}
Because of Lemma \ref{lemma_fafb_simpleProp}, we have to show ``${\cal L}eftarrow$'' only in \eqref{fafb_ugl_fTa} and \eqref{fafb_gl_fTa}. Let $\mf{a}, \mf{b} \in {\cal E}_o(R)$ with $\mf{a}_1 \in \mf{b}_2$ and $\mf{b}_1 \in \mf{a}_3$. Then $\mf{a}_1 \in N^{in}_R(\mf{b}_1)$, hence $\mf{a}_1 \mf{b}_1 \in A(R^*)$. We take disjoint isomorphic copies $G_\mf{a}$ and $G_\mf{b}$ of $X(\mf{a})$ and $X(\mf{b})$ and connect them to a digraph $H \in \mf{T}a$ as indicated in Figure \ref{figure_H_Gfa_Gfb}: we identify one of the tentacle-vertices of $G_\mf{a}$ with the body-vertex $w$ of $G_\mf{b}$, and we identify one of the leg-vertices of $G_\mf{b}$ with the body-vertex $v$ of $G_\mf{a}$. The mapping $\xi$ from $V(H)$ to $V(R)$ indicated in the figure is a strict homomorphism from $H$ to $R$ with $vw \in A( H^* )$, $\alpha_\xi(v) = \mf{a}$, and $\alpha_\xi(w) = \mf{b}$, thus $\mf{a} \mf{b} \in A( {\cal E}(R)^* )$.
Let $\mf{a}_1 \mf{a}_1 \in A(R)$. We construct $G \in \mf{T}a$ by adding the loop $(p,p)$ to the arc set of $X(\mf{a})$, and with \eqref{iotafa_EXI}, we get $\mf{a} \mf{a} = \myagxiv{G}{\iota(\mf{a})}{p} \, \myagxiv{G}{\iota(\mf{a})}{p} \in A({\cal E}(R))$.
For $\mf{D}' = \mf{D}$, ERD is trivial, and for $R \in \mf{T}a \subseteq \mf{D}'$, ERD holds due to \eqref{RTa_ERTa}. In both cases, AID follows with \eqref{iotafa_EXI} and Lemma \ref{lemma_phi_fa}.
{\cal E}P
\begin{proposition} \label{prop_fDStr_Pos}
Let $\mf{P}'$ be one of the classes $\mf{P}$ or $\mf{P}^*$ and let $R \in \mf{P}'$. Then, for all $\mf{a}, \mf{b} \in {\cal E}_o(R)$,
\begin{align} \label{fafb_poset}
\mf{a} \mf{b} \in A( {\cal E}(R)^* ) \quad & {\cal L}eftrightarrow \quad
\mf{a}az \in \mf{b}_2 \mytext{ and } \mf{b}bd \in \mf{a}_3,
\end{align}
and ERD and AID hold.
\end{proposition}
\begin{proof} Let $\mf{a} \mf{b} \in A( {\cal E}(R)^* )$. $\mf{a}_1 \in \mf{b}_2$ and $\mf{b}_1 \in \mf{a}_3$ are due to \eqref{fafb_ungleich}. There exist $P \in \mf{P}'$, $\xi \in {\cal S}(P, R)$, and $vw \in A(P^*)$ with $\alpha_\xi(v) = \mf{a}$ and $\alpha_\xi(w) = \mf{b}$. The transitivity and antisymmetry of $P$ yield $N^{in}_P(v) \subseteq N^{in}_P(w)$, hence $\alpha_\xi(v)_2 = \xi[ N^{in}_P(v) ] \subseteq \xi[ N^{in}_P(w) ] = \alpha_\xi(w)_2$. The proof of $\mf{b}_3 \subseteq \mf{a}_3$ runs similarly.
For the proof of ``${\cal L}eftarrow$'' in \eqref{fafb_poset}, take the digraph $H$ in Figure \ref{figure_H_Gfa_Gfb} and remove $\# \mf{b}_3$ of the free tentacle-vertices originating from $v$ and $\# \mf{a}_2$ of the free leg-vertices leading to $w$. The transitive hull $P$ of the resulting digraph is an element of $\mf{P}^*$, and for $R \in \mf{P}^*$ and $\xi$ corresponding to the homomorphism in Figure \ref{figure_H_Gfa_Gfb}, we have $\myagxiv{P}{\xi}{v} = \mf{a}$ and $\myagxiv{P}{\xi}{w} = \mf{b}$ with $vw \in A(P^*)$. In the case of $R \in \mf{P}$, add a loop to $A(P)$ for every vertex. \eqref{fafb_poset} is now shown for both choices of $\mf{P}'$.
The transitivity of ${\cal E}(R)$ results with \eqref{fafb_poset}. Due to $\mf{P}' \subset \mf{T}a$, we have ${\cal E}(R) \in \mf{T}a$ according to \eqref{RTa_ERTa}, and ${\cal E}(R)$ is antisymmetric. For $R \in \mf{P}^*$, the irreflexivity of ${\cal E}(R)$ is due to \eqref{fafab_allgemein}. For $R \in \mf{P}$ and $\mf{a} \in {\cal E}_o(R)$, we have $\mf{a} \mf{a} = \myagxiv{G}{\iota(\mf{a})}{p} \, \myagxiv{G}{\iota(\mf{a})}{p} \in A({\cal E}(R))$ due to \eqref{iotafa_EXI}.
Again, AID follows with \eqref{iotafa_EXI} and Lemma \ref{lemma_phi_fa}.
{\cal E}P
\subsection{The inversion of Theorem \ref{theo_eta_RS}(2) } \label{subsec_inv_Theo_Eta_RS_2}
In this section, we prove that Theorem \ref{theo_eta_RS}(2) can be inverted for the choices of $R$ and $\mf{D}'$ we are particularly interested in:
\begin{theorem} \label{theo_eta_RS_inv}
Let $R \in \mf{D}' = \mf{D}$, $R \in \mf{T}a \subseteq \mf{D}' \subseteq \mf{D}$, or $R \in \mf{D}' = \mf{P}'$ with $\mf{P}' = \mf{P}$ or $\mf{P}' = \mf{P}^*$, and let ${\cal E}(R)$ and ${\cal E}(S)$ be the EV-Systems of $R$ and $S$. If $\rho$ is a strong S-scheme from $R$ to $S$ fulfilling \eqref{eq_imagebased_gl}, then $\epsilon \equiv \myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R ) }$ fulfills Condition \ref{cond_univ_aexiv}, induces $\rho$, and is one-to-one.
\end{theorem}
The theorem extends also to the sub-classes of $\mf{D}$ mentioned at the end of Section \ref{subsec_DefHomSchemes}.
For the constellations of $R$ and $\mf{D}'$ described in the theorem, ERD and AID are fulfilled according to the Propositions \ref{prop_fDStr_fTa} and \ref{prop_fDStr_Pos}. If $\rho$ fulfills \eqref{eq_imagebased_gl}, then $\epsilon$ fulfills Condition \ref{cond_univ_aexiv} and induces $\rho$ according to Theorem \ref{theo_eta_RS}(1). What is left to show is that the additional assumption ``$\rho$ strong'' implies ``$\epsilon$ one-to-one''.
It is beneficial to replace the description $\epsilon = \myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R ) }$ by a more intuitive one. We have
\begin{equation*}
\myaTxiv{R}{\phi}{\mf{a}}
\; \stackrel{\mytext{AID}}{=} \; \mf{a}
\; \stackrel{\eqref{iotafa_EXI}}{=} \; \myaTgxiv{R}{X(\mf{a})}{\iota(\mf{a})}{p}
\end{equation*}
for every $\mf{a} \in {\cal E}_o(R)$, hence
\begin{equation} \label{eps_via_aXfaifa}
\epsilon( \mf{a}) \; = \;
\myaTgxiv{S}{{\cal E}(R)}{\rho( \phi_R ) }{ \mf{a} }
\; \stackrel{\eqref{eq_imagebased_gl}}{=} \;
\myaTgxiv{S}{X(\mf{a})}{\rho(\iota(\mf{a}))}{p} \quad \mytext{for all } \mf{a} \in {\cal E}(R).
\end{equation}
This is the description of $\epsilon$ we are using in what follows. The point $p$ and the sets $D$ and $U$ have been specified in the definition of $X(\mf{a})$.
\begin{corollary} \label{coro_rhophi_DpU}
Let $\mf{a} \in {\cal E}_o(R)$. Then, for every $\pi \in {\cal A}ut(X(\mf{a}))$,
\begin{align} \label{alpha_ariapi}
\rho(\iota(\mf{a}) \circ \pi)(v)
& \; = \;
\rho(\iota(\mf{a}))(\pi(v))
\quad \mytext{for all } v \in V(X(\mf{a})),
\end{align}
and
\begin{align} \label{eqs_riapi}
\begin{split}
\rho(\iota(\mf{a}) \circ \pi)(p) & \; = \; \epsilon(\mf{a})_1, \\
\rho(\iota(\mf{a}) \circ \pi)[D] & \; = \; \epsilon(\mf{a})_2, \\
\rho(\iota(\mf{a}) \circ \pi)[U] & \; = \; \epsilon(\mf{a})_3,
\end{split}
\end{align}
\end{corollary}
\begin{proof} Let $\pi \in {\cal A}ut(X(\mf{a}))$. According to \eqref{alpha_aiapi}, we have $\myaTxiv{R}{\iota(\mf{a}) \circ \pi}{v} = \myaTxiv{R}{\iota(\mf{a})}{\pi(v)}$ for all $v \in V(X(\mf{a}))$. Equation \eqref{eq_imagebased_gl} delivers $\myaTxiv{S}{\rho( \iota(\mf{a}) \circ \pi )}{v} = \myaTxiv{S}{\rho(\iota(\mf{a}))}{\pi(v)}$ for all $v \in V(X(\mf{a}))$, and \eqref{alpha_ariapi} follows.
$p$ is a fixed point of $\pi$ according to Corollary \ref{coro_props_Xfa}, hence $\myaTxiv{S}{\rho( \iota(\mf{a}) \circ \pi )}{p} = \myaTxiv{S}{\rho(\iota(\mf{a}))}{p}$, and the first equation follows. Furthermore
\begin{align*}
\rho(\iota(\mf{a}) \circ \pi)[D]
& \; = \;
\mysetdescr{ \rho(\iota(\mf{a}) \circ \pi)(d) }{ d \in D }
\; \stackrel{\eqref{alpha_ariapi}}{=} \;
\mysetdescr{ \rho(\iota(\mf{a}))(\pi(d)) }{ d \in D } \\
& \; = \;
\mysetdescr{ \rho(\iota(\mf{a}))(d) }{ d \in D }
\; = \;
\rho(\iota(\mf{a}))[D]
\; = \;
\myaTxiv{S}{\rho(\iota(\mf{a}))}{p}_2
\; \stackrel{\eqref{eps_via_aXfaifa}}{=} \;
\epsilon( \mf{a} )_2.
\end{align*}
The last equation is proven in the same way.
{\cal E}P
\begin{lemma} \label{lemma_rhophi_isom_1}
Let $\mf{a} \in {\cal E}_o(R)$. If $\rho$ is strong, then, for every $\pi \in {\cal A}ut(X(\mf{a}))$, the mapping $\rho( \iota(\mf{a}) \circ \pi)$ is one-to-one on $D$ and $U$. In particular, $ X( \mf{a} ) \simeq X( \epsilon( \mf{a} ) )$.
\end{lemma}
\begin{proof} Let $\mf{a} \in {\cal E}_o(R)$ and $\pi \in {\cal A}ut(X(\mf{a}))$. Assume $\rho( \iota(\mf{a}) \circ \pi)(c) = \rho( \iota(\mf{a}) \circ \pi)(d)$ for $c, d \in D$. We define the automorphism $\xi : X(\mf{a}) \rightarrow X(\mf{a})$ by
\begin{align*}
\xi(v) & \equiv
\begin{cases}
\pi(v), & \mytext{if} v \in V(X(\mf{a})) \setminus \{ c, d \}; \\
\pi(d), & \mytext{if} v = c; \\
\pi(c), & \mytext{if} v = d.
\end{cases}
\end{align*}
According to \eqref{alpha_ariapi}, we have $\rho(\iota(\mf{a}) \circ \pi)(v) = \rho(\iota(\mf{a}))(\pi(v)) $
and $\rho(\iota(\mf{a}) \circ \xi)(v) = \rho(\iota(\mf{a}))(\xi(v)) $ for all $v \in V(X(\mf{a}))$. Thus, for every vertex $v \in V( X(\mf{a}) ) \setminus \{ c, d \}$,
\begin{align*}
\myrhoxiv{\iota(\mf{a}) \circ \xi}{v}
& = \;
\myrhoxiv{\iota(\mf{a})}{\xi(v)}
\; = \;
\myrhoxiv{\iota(\mf{a})}{\pi(v)} \\
& = \;
\myrhoxiv{\iota(\mf{a}) \circ \pi}{v}. \\
\mytext{Furthermore,} \quad \myrhoxiv{\iota(\mf{a}) \circ \xi}{c} & = \;
\myrhoxiv{\iota(\mf{a})}{\xi(c)}
\; = \;
\myrhoxiv{\iota(\mf{a})}{\pi(d)} \\
& = \;
\myrhoxiv{\iota(\mf{a}) \circ \pi}{d}
\; = \;
\myrhoxiv{\iota(\mf{a}) \circ \pi}{c} . \\
\mytext{and similarly} \quad \myrhoxiv{\iota(\mf{a}) \circ \xi}{d} & = \;
\myrhoxiv{\iota(\mf{a}) \circ \pi}{d},
\end{align*}
hence $\rho(\iota(\mf{a}) \circ \xi) = \rho(\iota(\mf{a}) \circ \pi)$. Because $\rho$ is strong, we have $\iota(\mf{a}) \circ \xi = \iota(\mf{a}) \circ \pi$, thus $c = d$, because all three mappings are one-to-one on $D$. In the same way we see that $\rho( \iota(\mf{a}) \circ \pi)$ is one-to-one on $U$. Now the equations \eqref{eqs_riapi} yield $X(\mf{a}) \simeq X(\epsilon(\mf{a}))$.
{\cal E}P
The following lemma finishes the proof of Theorem \ref{theo_eta_RS_inv}:
\begin{lemma} \label{lemma_eps_oneone}
If $\rho$ is strong, then $\epsilon$ is one-to-one.
\end{lemma}
\begin{proof} Let $\mf{a}, \mf{b} \in {\cal E}(R)$ with $\epsilon( \mf{a} ) = \epsilon( \mf{b} )$. According to Lemma \ref{lemma_rhophi_isom_1}, we have $X(\mf{a}) \simeq X(\epsilon(\mf{a})) = X(\epsilon(\mf{a})) \simeq X(\mf{b})$. $X(\mf{a})$ and $X(\mf{b})$ are thus isomorphic, and due to $X(\mf{a}), X(\mf{b}) \in \mf{D}'$, we have $X(\mf{a}) = X(\mf{b})$. Let $G \equiv X(\mf{a})$ and
\begin{align*}
{\cal I}(\mf{a}) & \; \equiv \; \mysetdescr{ \iota(\mf{a}) \circ \pi }{\pi \in {\cal A}ut(G)}, \\
{\cal I}(\mf{b}) & \; \equiv \; \mysetdescr{ \iota(\mf{b}) \circ \pi }{ \pi \in {\cal A}ut(G) }.
\end{align*}
With $m \equiv \# \mf{a}_2$, $n \equiv \# \mf{a}_3$, $(m !) \cdot (n !)$ is according to \eqref{card_AutXfa} the cardinality of ${\cal A}ut( G )$, ${\cal I}(\mf{a})$, and ${\cal I}(\mf{b})$. ${\cal J} \equiv {\cal I}(\mf{a}) \cup {\cal I}(\mf{b})$ is a subset of ${\cal S}(G, R)$ with $\# {\cal J} \geq (m !) \cdot (n !)$; equality holds iff $\mf{a} = \mf{b}$.
$\rho_G[ {\cal J} ]$ is a subset of ${\cal S}(G,S)$, and due to Corollary \ref{coro_rhophi_DpU} and Lemma \ref{lemma_rhophi_isom_1}, we have $\# \rho_G[ {\cal J} ] = \# {\cal A}ut(G) = (m !) \cdot (n !)$ (also in the case of $\epsilon(\mf{a})_2 \cap \epsilon(\mf{a})_3 \not= \emptyset$). Because $\rho$ is strong, we have $\# {\cal J} = \# \rho_G[ {\cal J} ]$, hence $\mf{a} = \mf{b}$.
{\cal E}P
Theorem \ref{theo_eta_RS_inv} provides more than the pure inversion of Theorem \ref{theo_eta_RS}(2): it states ``$\epsilon$ is one-to-one'', whereas in Theorem \ref{theo_eta_RS}(2), the weaker condition ``$\epsilon(\mf{a}) = \epsilon( \mf{b} ) \Rightarrow \mf{a}_1 = \mf{b}_1$'' is used. The reason is that in the situation of Theorem \ref{theo_eta_RS_inv}, the extremely simple (and thus: powerful) objects $X(\mf{a})$ belong to $\mf{D}'$: it are the properties of $X(\mf{a})$ and $\iota(\mf{a})$ summarized in Corollary \ref{coro_props_Xfa} which yield the stronger result.
\subsection{The replacement of Condition \ref{cond_univ_aexiv}} \label{subsec_replacement_Cond1}
We have introduced Condition \ref{cond_univ_aexiv} in order to close the gap left by Proposition \ref{prop_eta_invers} to $\eta$ being strong. However, Condition \ref{cond_univ_aexiv} is unwieldy to check because it refers to how $\myagexiv{G}{\xi}{v}$ looks for all $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)Str$. It is desirable to have more handy conditions referring to the homomorphism $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ only. We need
\begin{lemma} \label{lemma_aexiv_axiv}
Let $R, S \in \mf{D}$, $\mf{D}' \subseteq \mf{D}$. If $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ is a strict homomorphism between the EV-systems of $R$ and $S$, then
\begin{align} \label{aexiv_axiv}
\myaTxiv{S}{\eta(\xi)}{v}_2 \subseteq \epsilon( \aRxiv )_2 & \mytext{ and }
\myaTxiv{S}{\eta(\xi)}{v}_3 \subseteq \epsilon( \aRxiv )_3,
\end{align}
for all $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)Str$, where $\eta(\xi) \in {\cal S}(G,S)$ is defined as in Definition \ref{def_eps}.
\end{lemma}
\begin{proof} Let $a \in \aSexiv_2$. There exists a $w \in N^{in}_G(v)$ with $\eta(\xi)(w) = a$. We have $\aRxiw \aRxiv \in A( {\cal E}(R)^* )$, hence $\epsilon( \aRxiw ) \epsilon( \aRxiv ) \in A( {\cal E}(S)^* )$, and \eqref{fafb_ungleich} yields $ a = \eta(\xi)(w) = \epsilon( \aRxiw )_1 \in \epsilon( \aRxiv )_2$. The second inclusion is shown in the same way.
{\cal E}P
Now we can prove
\begin{proposition} \label{prop_repl_Cond1}
Let $R, S \in \mf{D}$, $\mf{D}' \subseteq \mf{D}$, and let $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ be a strict homomorphism between the EV-systems of $R$ and $S$. Assume that for $\mf{a} \in {\cal E}_o(R)$
\begin{align} \label{cond_epsfa_fa}
\begin{split}
\# \epsilon( \mf{a} )_2 & \; \leq \; \# \mf{a}_2, \\
\# \epsilon( \mf{a} )_3 & \; \leq \; \# \mf{a}_3,
\end{split}
\end{align}
and
\begin{align} \label{cond_eps_trennt}
\begin{split}
\forall \; \mf{b}, \mf{c} \in N^{in}_{{\cal E}(R)}( \mf{a} ) \mytext{: } \epsilon( \mf{b} )_1 = \epsilon( \mf{c} )_1 & \; \Rightarrow \; \mf{b}_1 = \mf{c}_1, \\
\forall \; \mf{b}, \mf{c} \in N^{out}_{{\cal E}(R)}( \mf{a} ) \mytext{: } \epsilon( \mf{b} )_1 = \epsilon( \mf{c} )_1 & \; \Rightarrow \; \mf{b}_1 = \mf{c}_1.
\end{split}
\end{align}
Then $\aSexiv = \epsilon( \aRxiv )$ for all $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)Str$ with $\aRxiv = \mf{a}$. In particular, $\epsilon$ fulfills Condition \ref{cond_univ_aexiv} if \eqref{cond_epsfa_fa} and \eqref{cond_eps_trennt} hold for all $\mf{a} \in {\cal E}_o(R)$.
\end{proposition}
\begin{proof} Let $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)Str$ with $\aRxiv = \mf{a}$. For every $w \in N^{in}_G(v)$, we have $\aRxiw \in N^{in}_{{\cal E}(R)}( \aRxiv )$ due to the definition of $A( {\cal E}(R) )$, hence
\begin{align*}
\# \aRxiv_2 & \; \; \; = \;
\# \mysetdescr{ \xi( w ) }{ w \in N^{in}_G(v) }
\; \stackrel{\eqref{cond_eps_trennt}}{\leq} \;
\# \mysetdescr{ \epsilon( \aRxiw )_1 }{ w \in N^{in}_G(v) } \\
& \; \; \; = \;
\# \mysetdescr{ \eta(\xi)w }{ w \in N^{in}_G(v) }
\; = \;
\# \aSexiv_2 \\
& \stackrel{\eqref{aexiv_axiv}}{\leq} \;
\# \epsilon( \aRxiv )_2
\; \stackrel{\eqref{cond_epsfa_fa}}{\leq} \;
\# \aRxiv_2,
\end{align*}
thus $ \# \aSexiv_2 = \# \epsilon( \aRxiv )_2$. Now \eqref{aexiv_axiv} delivers $ \aSexiv_2 = \epsilon( \aRxiv )_2 $.
The proof of $ \aSexiv_3 = \epsilon( \aRxiv )_3 $ is similar, and $\aSexiv = \epsilon( \aRxiv )$ is shown. The addendum is clear.
{\cal E}P
\begin{figure}
\caption{\label{figure_RS_nichtRearr}
\label{figure_RS_nichtRearr}
\end{figure}
As an application, Figure \ref{figure_RS_nichtRearr} shows digraphs $R$ and $S$ together with their EV-systems with respect to $\mf{D}$. A homomorphism $\epsilon$ between the EV-systems is indicated by corresponding bold and dotted shafts of the arrows. The homomorphism fulfills \eqref{cond_epsfa_fa} and \eqref{cond_eps_trennt} for all $\mf{a} \in {\cal E}_o(R)$, hence Condition \ref{cond_univ_aexiv}. Because it is additionally one-to-one, it induces a strong S-scheme from $R$ to $S$ with respect to $\mf{D}$ fulfilling \eqref{eq_imagebased_gl} which can be extended to a strong ${\cal G}amma$-scheme from $R$ to $S$ (Theorem \ref{theo_eta_RS}(2)). At the end of Section \ref{subsec_rearr_induced}, we will take the example up again.
Let us examine the assumptions in Proposition \ref{prop_repl_Cond1} more closely! If $\epsilon$ fulfills Condition \ref{cond_univ_aexiv}, then $\eta$ fulfills \eqref{eq_imagebased_gl} according to Theorem \ref{theo_eta_RS}(1). Looking at the second and third equation in \eqref{eqs_riapi} with $\pi = \id_{V(X(\mf{a}))}$, we conclude that \eqref{cond_epsfa_fa} is necessary for Condition \ref{cond_univ_aexiv} for the choices of $R$ and $\mf{D}'$ we are mainly interested in. However, \eqref{cond_eps_trennt} is not necessary, as we will show now.
For every flat poset $Q$, we have $\mf{a}_2 = \emptyset$ or $\mf{a}_3 = \emptyset$ for every $\mf{a} \in {\cal E}(Q)$, and ${\cal E}(Q)$ is a flat poset, too. Let $R$ be a flat connected poset with at least three points, and let $C$ be the two-element chain with $V(C) = \{ 0, 1 \}$ and $A(C) = \{ (0,0), (0,1), (1,1) \}$. Every strict homomorphism $\epsilon : {\cal E}(R) \rightarrow {\cal E}(C)$ sends the ``basement'' and the ``upper floor`` of ${\cal E}(R)$ to the basement and the upper floor of ${\cal E}(C)$, respectively. Following this rule, we define $\epsilon : {\cal E}(R) \rightarrow {\cal E}(C)$ by
\begin{align*}
\epsilon( \mf{a} ) & \equiv
\begin{cases}
(0, \emptyset, \{ 1 \} ), & \mytext{if } \mf{a}_3 \not= \emptyset \mytext{ (basement to basement)}; \\
(1, \{ 0 \}, \emptyset ), & \mytext{if } \mf{a}_2 \not= \emptyset \mytext{ (upper floor to upper floor)}; \\
(0, \emptyset, \emptyset), & \mytext{otherwise} \mytext{(isolated points to an isolated point)}.
\end{cases}
\end{align*}
$\epsilon$ is a strict homomorphism fulfilling \eqref{cond_epsfa_fa} for all $\mf{a} \in {\cal E}(R)$. But because $R$ is connected and contains at least three points, $\epsilon$ violates \eqref{cond_eps_trennt}. However, for all $P \in \mf{P}$, $\xi \in {\cal S}(P,R)$, $v \in V(P)$,
\begin{align*}
\aSexiv_2 & = \mysetdescr{ \epsilon( \aRxiw )_1 }{ w \in N^{in}_P(v) } \\
& =
\begin{cases}
\emptyset, & \mytext{if } N^{in}_P(v) = \emptyset; \\
\{ 0 \}, & \mytext{otherwise}
\end{cases} \\
& = \epsilon( \aRxiv )_2.
\end{align*}
$\aSexiv_3 = \epsilon( \aRxiv )_3$ is shown in the same way, and $\epsilon$ fulfills Condition \ref{cond_univ_aexiv}.
\section{The rearrangement method} \label{sec_rearr}
In \cite{aCampo_toappear_0}, the author has developed a method how to rearrange a digraph $R$ in such a way that the relation $R \sqsubseteq_{\cal G}amma S $ with respect to $\mf{D}$ holds for the digraph $S$ resulting from the rearrangement. In Section \ref{subsec_rearr_induced}, we see that $\rho$ is in fact induced by a strict homomorphism $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ between the EV-systems of $R$ and $S$. We describe $\epsilon$ and analyse its properties. In Section \ref{subsec_example}, we discuss as examples the pairs of posets in Figure \ref{figure_Intro}(a)-(b) under these view points.
\subsection{{\bf $\rho$} as an induced S-scheme} \label{subsec_rearr_induced}
\begin{figure}
\caption{\label{fig_abstrConstrMeth}
\label{fig_abstrConstrMeth}
\end{figure}
The concept of the rearrangement method is illustrated in Figure \ref{fig_abstrConstrMeth}. We have a digraph $R$ and sets $X, Y, M \subseteq V(R)$ with $X \cap M = \emptyset$, $Y \cap M = \emptyset$. We build a new digraph $S$ by replacing all arcs between $M$ and $X$ in $R$ by arcs between $M$ and $Y$. In detail:
\begin{definition}[\cite{aCampo_toappear_0}, Definition 5] \label{def_leq_s}
We agree on the following:
\begin{itemize}
\item $R = (Z, A(R))$ is a digraph.
\item We have disjoint subsets $X$ and $M$ of $Z$.
\item There is a subset $Y \subseteq Z$ with
\begin{align}
\label{bed_WB_empty}
M \cap Y & = \emptyset , \\
\label{bed_WBB_empty}
M \cap N_R(y) & = \emptyset \mytext{ for all } y \in Y,
\end{align}
and $ \beta : X \rightarrow Y $ is a mapping.
\item We define $S$ as the digraph with $V(S) = Z$ and
\begin{align*}
A(S) & \equiv \; \; A_r \; \cup \; A_d \; \cup \; A_u \\
\mytext{where} \; A_r & \equiv \; A(R) \setminus \left( ( M \times X ) \cup ( X \times M ) \right), \\
A_d & \equiv \;
\mysetdescr{ m \beta(x) }{ m x \in A(R) \cap ( M \times X )}, \\
A_u & \equiv \;
\mysetdescr{ \beta(x) m }{ x m \in A(R) \cap ( X \times M )}.
\end{align*}
\end{itemize}
\end{definition}
\begin{theorem}[\cite{aCampo_toappear_0}, Theorem 3] \label{theo_rhoxi}
For every $G \in \mf{D}$ and every $\xi \in {\cal S}(G,R)$, we define the mapping $\rho_G(\xi) : V(G) \rightarrow Z$ by
\begin{eqnarray} \label{descr_rhoxi}
\forall v \in V(G) \mytext{:} \; \rho_G(\xi)(v) & \equiv &
\begin{cases}
\beta(\xi(v)), & \mytext{if } v \in B_\xi, \\
\xi(v), & \mytext{otherwise}.
\end{cases}
\end{eqnarray}
where
\begin{equation*}
B_\xi \; \equiv \; \mysetdescr{ v \in V(G) }{ \xi(v) \in X \mytext{and} \; \xi[ N_G(v) ] \cap M \not= \emptyset },
\end{equation*}
Assume additionally to the assumptions in Definition \ref{def_leq_s}, that $\beta : R \vert_X \rightarrow R \vert_Y $ is a bijective homomorphism and
\begin{align}
\label{bed_nbh}
\forall x \in X \mytext{:} \; N^{in}_R(x) \setminus M \subseteq N^{in}_R( \beta(x) ) \; \mytext{and} \; N^{out}_R(x) \setminus M\subseteq N^{out}_R( \beta(x) ).
\end{align}
Then $\rho$ is a strong S-scheme from $R$ to $S$ with respect to $\mf{D}$ which can be extended to a strong ${\cal G}amma$-scheme $ \rho'$ with $\rho'_G \vert_{{\cal S}(G,R)}^{{\cal S}(G,S)} = \rho_G$ for all $G \in \mf{D}$.
\end{theorem}
It is $\aRgxiv = (\xi(v), \xi[ N^{in}_G(v) ], \xi[ N^{out}_G(v) ])$ which determines $\rhogxiv$ for every $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)r$. Therefore, \eqref{eq_Escheme_rhowert} holds, and because ERD and AID are fulfilled for $\mf{D}$ and $R$ according to Proposition \ref{prop_fDStr_fTa}, $\rho$ is according to Theorem \ref{theo_induced_exists} induced by
\begin{align*}
\epsilon : {\cal E}(R) & \rightarrow {\cal E}(S), \\
\mf{a} & \mapsto \myaTgxiv{S}{{\cal E}(R)}{\rho(\phi)}{\mf{a}}.
\end{align*}
We want to describe $\epsilon( \mf{a} )$ explicitely. We have
\begin{equation*}
B_\phi \quad = \quad \mysetdescr{ \mf{a} \in {\cal E}(R)}{ \mf{a}_1 \in X, \phi[ N_{{\cal E}(R)}( \mf{a} )] \cap M \not= \emptyset },
\end{equation*}
hence, due to \eqref{descr_rhoxi},
\begin{equation} \label{rhophi_rearr}
\epsilon( \mf{a} )_1 \; = \; \rho(\phi)(\fa) \; = \;
\begin{cases}
\beta(\mf{a}_1), & \mytext{if } \mf{a} \in B_\phi, \\
\mf{a}_1, & \mytext{otherwise}.
\end{cases}
\end{equation}
For the determination of $\epsilon( \mf{a} )_2 = \rho(\phi)\left[ N^{in}_{{\cal E}(P)}(\mf{a}) \right]$, \eqref{fafb_ugl_fTa} yields
\begin{align*}
N^{in}_{{\cal E}(P)}(\mf{a}) & \quad = \quad
\mysetdescr{ \mf{b} \in {\cal E}(P) }
{\mf{b}_1 \in \mf{a}_2 \; \mytext{and} \; \mf{a}_1 \in \mf{b}_3}.
\end{align*}
Looking at the definition of $B_\phi$ and \eqref{rhophi_rearr}, we see $\mf{a}_2 \setminus X \subseteq \epsilon( \mf{a} )_2$. Furthermore,
\begin{itemize}
\item $ \mf{a}_1 \in M$: Then every $\mf{b} \in N^{in}_{{\cal E}(P)}(\mf{a}) $ with $\mf{b}_1 \in X$ belongs to $B_\phi$ and we conclude $\epsilon( \mf{a} )_2 = ( \mf{a}_2 \setminus X) \; \cup \; \beta[ \mf{a}_2 \cap X ]$.
\item $ \mf{a}_1 \notin M$: Let $x \in \mf{a}_2 \cap X$. We have $\left( x, \emptyset, \{ \mf{a}_1 \} \right) \in \left( N^{in}_{{\cal E}(P)}(\mf{a}) \right) \setminus B_\phi$, thus $x \in \epsilon( \mf{a} )_2$. Additionally, under all elements of $N^{in}_{{\cal E}(P)}(\mf{a})$ with first component $x$, it is $(x, N^{in}_R(x), N^{out}_R(x))$ which has the largest second and third component; therefore,
$\beta( x ) \in \epsilon( \mf{a} )_2$, if the intersection $N_R(x) \cap M$ is not empty.
\end{itemize}
Making the same considerations for $\epsilon( \mf{a} )_3$, we get all together
\begin{align} \label{eps_rearr}
\begin{split}
\epsilon( \mf{a} )_1 & = \rho(\phi)(\fa), \\
\epsilon( \mf{a} )_2 & =
\begin{cases}
( \mf{a}_2 \setminus X ) \; \cup \; \beta[ \mf{a}_2 \cap X ] \quad \quad \quad \quad \quad \quad \quad \quad \quad \;
\mytext{if } \mf{a}_1 \in M; & \\
\mf{a}_2 \; \cup \; \beta \left[ \mysetdescr{ x \in \mf{a}_2 \cap X }{ N_R(x) \cap M \not= \emptyset } \right] \quad \mytext{otherwise}; &
\end{cases} \\
\epsilon( \mf{a} )_3 & =
\begin{cases}
( \mf{a}_3 \setminus X ) \; \cup \; \beta[ \mf{a}_3 \cap X ] \quad \quad \quad \quad \quad \quad \quad \quad \quad \;
\mytext{if } \mf{a}_1 \in M; & \\
\mf{a}_3 \; \cup \; \beta \left[ \mysetdescr{ x \in \mf{a}_3 \cap X }{ N_R(x) \cap M \not= \emptyset \right]} \quad \mytext{otherwise}. &
\end{cases}
\end{split}
\end{align}
Because $\rho$ is induced and strong, Theorem \ref{theo_eta_RS}(1) and Theorem \ref{theo_eta_RS_inv} yield
\begin{align} \label{eps_Cond1_1to1}
\begin{split}
& \epsilon \; \mytext{fulfills Condition \ref{cond_univ_aexiv}} \\
{\cal L}eftrightarrow \quad &
\rho \; \mytext{fulfills } \eqref{eq_imagebased_gl} \\
\Rightarrow \quad & \epsilon \; \mytext{is one-to-one.}
\end{split}
\end{align}
But in many cases, the homomorphism $\epsilon$ resulting from the rearrangement method will not be one-to-one, because, in the case of $X \cap Y = \emptyset$, the following lemma states that $\epsilon$ will be one-to-one iff every non-isolated $x \in X$ is either encapsulated by $M$ or by $V(R) \setminus M$:
\begin{lemma} \label{lemma_eps_nicht_1t1_Cond1}
Let $R$, $S$, and $\rho$ as in Theorem \ref{theo_rhoxi} and $\epsilon \equiv \alpha_{\rho(\phi)}$. If
\begin{align} \label{bed_eps_1t1}
\forall \; x \in X \mytext{: } N_R(x) \cap M = \emptyset & \mytext{ or } N_R(x) \setminus M = \emptyset.
\end{align}
then $\epsilon$ is one-to-one. In the case of $X \cap Y = \emptyset$, the inverse is true, too.
\end{lemma}
\begin{proof} ``$\Rightarrow$'': If \eqref{bed_eps_1t1} holds, then the equations \eqref{eps_rearr} become
\begin{align*}
\epsilon( \mf{a} )_1 & = \rho(\phi)(\fa), \\
\epsilon( \mf{a} )_2 & =
\begin{cases}
( \mf{a}_2 \setminus X ) \; \cup \; \beta[ \mf{a}_2 \cap X ] & \mytext{if } \mf{a}_1 \in M; \\
\; \mf{a}_2 & \mytext{otherwise};
\end{cases} \\
\epsilon( \mf{a} )_3 & =
\begin{cases}
( \mf{a}_3 \setminus X ) \; \cup \; \beta[ \mf{a}_3 \cap X ] & \mytext{if } \mf{a}_1 \in M; \\
\; \mf{a}_3 & \mytext{otherwise}.
\end{cases}
\end{align*}
Let $\mf{a} \in {\cal E}_o(R)$. If $\epsilon( \mf{a} )_1 \in M$, then $\mf{a}_1 = \epsilon( \mf{a} )_1$ due to \eqref{bed_WB_empty}, and $\mf{a}_2 \cap Y = \emptyset = \mf{a}_3 \cap Y$ due to \eqref{bed_nbh}. Therefore,
\begin{align*}
\mf{a}_2 & \; = \;
\left( \epsilon( \mf{a} )_2 \setminus Y \right) \; \cup \; \myurbild{\beta}\left( \epsilon( \mf{a} )_2 \cap Y \right), \\
\mf{a}_3 & \; = \;
\left( \epsilon( \mf{a} )_3 \setminus Y \right) \; \cup \; \myurbild{\beta}\left( \epsilon( \mf{a} )_3 \cap Y \right).
\end{align*}
If $\epsilon( \mf{a} )_1 \notin M$, then $\mf{a}_1 \notin M$ and $\epsilon(\mf{a})_2 = \mf{a}_2$, $\epsilon(\mf{a})_3 = \mf{a}_3$. If the set $( \mf{a}_2 \cup \mf{a}_3 ) \cap M$ is empty, then the triplet $\mf{a}$ cannot be an element of $B_\phi$, hence $\mf{a}_1 = \epsilon( \mf{a}_1 )$. And in the case of $( \mf{a}_2 \cup \mf{a}_3 ) \cap M \not= \emptyset$, we have $\mf{a}_1 = \myurbild{\beta}( \epsilon( \mf{a} )_1 )$ for $\epsilon( \mf{a} )_1 \in Y$ (use \eqref{bed_nbh}) and $\mf{a}_1 = \epsilon( \mf{a} )_1$ for $\epsilon( \mf{a} )_1 \notin Y$.
``${\cal L}eftarrow$'': Assume that \eqref{bed_eps_1t1} does not hold for $x \in X$. Select a vertex $v \in N_R(x) \setminus M$. In the case of $v \in N^{in}_R(x)$, define
\begin{align*}
\mf{a} & \; \equiv \; ( v, \emptyset, \{ x \} ), \\
\mf{b} & \; \equiv \; ( v, \emptyset, \{ x, \beta(x) \} ).
\end{align*}
Then $\mf{a} \in {\cal E}_o(R)$, and due to $v \in N^{in}_R( x ) \setminus M \stackrel{\eqref{bed_nbh}}{\subseteq} N^{in}_R( \beta(x) )$, we also have $\mf{b} \in {\cal E}_o(R)$. Due to $X \cap Y = \emptyset$, we have $\mf{a} \not= \mf{b}$, but the formulas in \eqref{eps_rearr} yield $\epsilon( \mf{a} ) = ( v, \emptyset, \{ x, \beta(x)\} ) = \epsilon( \mf{b} )$ (for the latter equality, we need $X \cap Y = \emptyset$ again). In the case of $v \in N^{out}_R(x)$, work with $\mf{a} \equiv ( v, \{ x \}, \emptyset )$, $ \mf{b} \equiv ( v, \{ x, \beta(x)\}, \emptyset )$.
{\cal E}P
The condition $X \cap Y = \emptyset$ cannot easily be skipped in this Lemma. For $X = Y$ and $\beta$ being the identity mapping of $X$, the rearrangement method delivers $S = R$, $\rho$ is the trivial ${\cal G}amma$-scheme with $\rho_G = \id_{{\cal H}(G,R)}$ for all $G \in \mf{D}$, and $\epsilon$ is the identity mapping of ${\cal E}_o(R)$ according to \eqref{eps_rearr} and \eqref{rhophi_rearr}. $\epsilon$ is thus one-to-one, whatever the structure of $R$ is.
We want to show that the digraph $S$ in Figure \ref{figure_RS_nichtRearr} in Section \ref{subsec_replacement_Cond1} cannot be constructed by the rearrangement of the digraph $R$ in the figure. Due to \eqref{bed_WB_empty} and \eqref{bed_WBB_empty}, neither the set $M$ nor the set $Y$ can contain the vertex $1$. These sets must be singletons, one of them containing the vertex $0$, the other one the vertex $2$. If the set $X$ contains $1$, the rearrangement of $R$ results in a digraph with V-shaped or ${\cal L}ambda$-shaped diagram (and Theorem \ref{theo_rhoxi} cannot be applied because \eqref{bed_nbh} is violated). And in the case of $1 \notin X$, we have $X = \emptyset$ or $X = Y$, and the rearrangement method produces nothing than $R$.
On the one hand, we have thus a rearrangement method producing induced strong S-schemes $\rho$ for which $\epsilon$ does not fulfill Condition \ref{cond_univ_aexiv} in many cases, and on the other hand, we have Theorem \ref{theo_eta_RS}(2) which states that many homomorphisms $\epsilon$ fulfilling Condition \ref{cond_univ_aexiv} induce a strong S-scheme $\rho$. There is still something to do in characterizing digraphs $R$ and $S$ with a strong induced S-scheme between them.
\subsection{Examples} \label{subsec_example}
\begin{figure}
\caption{\label{fig_TableConstr}
\label{fig_TableConstr}
\end{figure}
Figure \ref{fig_TableConstr} shows the Hasse-diagrams of eight pairs of posets $R$ and $S$ for which $R \sqsubseteq_{\cal G}amma S$ has been shown in \cite{aCampo_toappear_0} by means of the rearrangement method described in the previous section. The respective one-element vertex sets $X$, $Y$, and $M$ are marked. In all cases, the respective strong S-scheme $\rho$ from $R$ to $S$ is induced by the strict homomorphism $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ described by the equations \eqref{eps_rearr}.
\begin{figure}
\caption{\label{figure_Bspl_C7}
\label{figure_Bspl_C7}
\end{figure}
Figure \ref{figure_Bspl_C7} shows the Hasse-diagrams of the posets $R$ and $S$ from Figure \ref{figure_Intro}(a)-(b), their EV-systems, and the respective strict homomorphism $\epsilon$ between them; the pairs are the second and the sixth one in Figure \ref{fig_TableConstr}.
In Figure \ref{figure_Bspl_C7}(a), the main part of $\epsilon$ is indicated by the bold lines in the EV-systems. $\epsilon$ is one-to-one and by means of Proposition \ref{prop_repl_Cond1}, it is easily seen that it fulfills Condition \ref{cond_univ_aexiv}, too. According to Theorem \ref{theo_eta_RS}, $\rho$ fulfills \eqref{eq_imagebased_gl}. We have
\begin{equation*}
B_\phi \; = \; \{ ( x, \emptyset, \{ m \} )\}
\end{equation*}
and the total homomorphism is given by
\newline
\parbox{5cm}{
\begin{tabular}{| l | l | }
\hline
$ \mf{a} \in {\cal E}_o(R) $ & $\epsilon( \mf{a} ) \in {\cal E}_o(S)$ \\
\hline \hline
$ ( x, \emptyset, \emptyset ) $ &
$ ( x, \emptyset, \emptyset ) $ \\
$ ( x, \emptyset, \{ m \} ) $ &
$ ( y, \emptyset, \{ m \} ) $ \\
\hline
$ ( p, \emptyset, \{ m \} ) $ &
$ ( p, \emptyset, \{ m \} ) $ \\
$ ( p, \emptyset, \{ m, y \} ) $ &
$ ( p, \emptyset, \{ m, y \} ) $ \\
$ ( p, \emptyset, \{ m \} ) $ &
$ ( p, \emptyset, \{ m \} ) $ \\
$ ( p, \emptyset, \emptyset ) $ &
$ ( p, \emptyset, \emptyset ) $ \\
\hline
\end{tabular} }
\parbox{5cm}{
\begin{tabular}{| l | l | }
\hline
$ \mf{a} \in {\cal E}_o(R) $ & $\epsilon( \mf{a} ) \in {\cal E}_o(S)$ \\
\hline \hline
$ ( m, \emptyset, \emptyset ) $ &
$ ( m, \emptyset, \emptyset ) $ \\
$ ( m, \{ x \}, \emptyset ) $ &
$ ( m, \{ y \}, \emptyset ) $ \\
$ ( m, \{ x, p \}, \emptyset ) $ &
$ ( m, \{ y, p \}, \emptyset ) $ \\
$ ( m, \{ p \}, \emptyset ) $ &
$ ( m, \{ p \}, \emptyset ) $ \\
\hline
$ ( y, \{ p \}, \emptyset ) $ &
$ ( y, \{ p \}, \emptyset ) $ \\
$ ( y, \emptyset, \emptyset ) $ &
$ ( y, \emptyset, \emptyset ) $ \\
\hline
\end{tabular} }
\newline
\newline
The pairs of posets (C1) and (C3)-(C5) in Figure \ref{fig_TableConstr} have the same properties: the respective strong S-scheme $\rho$ fulfills \eqref{eq_imagebased_gl} and is induced by a one-to-one homomorphism from ${\cal E}(R)$ to ${\cal E}(S)$ fulfilling Condition \ref{cond_univ_aexiv}.
For the example in Figure \ref{figure_Bspl_C7}(b), three point mappings are indicated by arrows. The rest of $\epsilon$ is easily seen as follows: The sub-poset of $R$ drawn with bold lines is mapped to the isomorphic bold-lined sub-poset of $S$, whereas the sub-poset of $R$ drawn with dotted lines is flipped and mapped to the M-shaped dotted sub-poset of $S$; the two shaded points $( p, \{ x \}, \emptyset )$ and $( p, \{ x, y \}, \emptyset )$ of ${\cal E}(R)$ are both mapped to the shaded point $( p, \{ x, y \}, \emptyset )$ of ${\cal E}(S)$. According to \eqref{eps_Cond1_1to1}, $\epsilon$ cannot fulfill Condition \ref{cond_univ_aexiv} and $\rho$ cannot fulfill \eqref{eq_imagebased_gl}. The total homomorphism is listed in the following table; we have
\begin{equation*}
B_\phi \; = \; \{ ( x, \emptyset, \{ m \} ), ( x, \emptyset, \{ m, p \} ) \}.
\end{equation*}
\newline
\parbox{5cm}{
\begin{tabular}{| l | l | }
\hline
$ \mf{a} \in {\cal E}_o(R) $ & $\epsilon( \mf{a} ) \in {\cal E}_o(S)$ \\
\hline \hline
$ ( x, \emptyset, \emptyset ) $ &
$ ( x, \emptyset, \emptyset ) $ \\
$ ( x, \emptyset, \{ m \} ) $ &
$ ( y, \emptyset, \{ m \} ) $ \\
$ ( x, \emptyset, \{ m, p \} ) $ &
$ ( y, \emptyset, \{ p, m \} ) $ \\
$ ( x, \emptyset, \{ p \} ) $ &
$ ( x, \emptyset, \{ p \} ) $ \\
\hline
$ ( y, \emptyset, \emptyset ) $ &
$ ( y, \emptyset, \emptyset ) $ \\
$ ( y, \emptyset, \{ q \} ) $ &
$ ( y, \emptyset, \{ q \} ) $ \\
$ ( y, \emptyset, \{ p, q \} ) $ &
$ ( y, \emptyset, \{ p, q \} ) $ \\
$ ( y, \emptyset, \{ p \} ) $ &
$ ( y, \emptyset, \{ p \} ) $ \\
\hline
\end{tabular} }
\parbox{5cm}{
\begin{tabular}{| l | l | }
\hline
$ \mf{a} \in {\cal E}_o(R) $ & $\epsilon( \mf{a} ) \in {\cal E}_o(S)$ \\
\hline \hline
$ ( m, \emptyset, \emptyset ) $ &
$ ( m, \emptyset, \emptyset ) $ \\
$ ( m, \{ x \}, \emptyset ) $ &
$ ( m, \{ y \}, \emptyset ) $ \\
\hline
$ ( p, \emptyset, \emptyset ) $ &
$ ( p, \emptyset, \emptyset ) $ \\
$ ( p, \{ x \}, \emptyset ) $ &
$ ( p, \{ x, y \}, \emptyset ) $ \\
$ ( p, \{ x, y \}, \emptyset ) $ &
$ ( p, \{ x, y \}, \emptyset ) $ \\
$ ( p, \{ y \}, \emptyset ) $ &
$ ( p, \{ y \}, \emptyset ) $ \\
\hline
$ ( q, \emptyset, \emptyset ) $ &
$ ( q, \emptyset, \emptyset ) $ \\
$ ( q, \{ y \}, \emptyset ) $ &
$ ( q, \{ y \}, \emptyset ) $ \\
\hline
\end{tabular} }
\newline
\newline
Let $\mf{c} \equiv ( p, \{x\}, \emptyset ) \in {\cal E}_o(R)$ and $\mf{d} \equiv ( p, \{ x, y \}, \emptyset ) \in {\cal E}_o(R)$. For all points $\mf{a} \in {\cal E}_o(R) \setminus \{ \mf{c}, \mf{d} \}$, the conditions in Proposition \ref{prop_repl_Cond1} are fulfilled, but $\mf{c}$ violates \eqref{cond_epsfa_fa} and $\mf{d}$ violates \eqref{cond_eps_trennt}. The points $\mf{c}$ and $\mf{d}$ are thus the only points $\mf{a} \in {\cal E}_o(R)$ for which $G \in \fD$, $\xi \in {\cal S}(G,R)$, $v \in V(G)$ may exist with $\aRxiv = \mf{a} $ and $\aSexiv \not= \epsilon( \mf{a} )$.
\begin{figure}
\caption{\label{figure_BsplHomsC7}
\label{figure_BsplHomsC7}
\end{figure}
Figure \ref{figure_BsplHomsC7} shows the Hasse-diagrams of three posets $C$, $V$, and $N$, and for each of them a strict homomorphism to $R$ and its image resulting under $\eta$. $C$ is the two-element chain defined in Section \ref{subsec_replacement_Cond1}, and $V$ and $N$ are the posets with V- and N-shaped Hasse-diagrams. In the following tables, the objects of interest in ${\cal E}(R)$ and ${\cal E}(S)$ are listed.
\newline
\begin{tabular}{| c | l | l | l | }
\hline
$ v \in V(C) $ & $ \alpha_\xi(v) $ & $\epsilon( \alpha_\xi(v) ))$ & $ \myaexiv{\xi}{v} $\\
\hline \hline
0 & $( x, \emptyset, \{ p \} )$ & $( x, \emptyset, \{ p \} )$ & $( x, \emptyset, \{ p \} )$ \\
1 & $( p, \{ x \}, \emptyset )$ & $( p, \{ x, y \}, \emptyset )$ & $( p, \{ x \}, \emptyset )$ \\
\hline
\end{tabular}
\newline
\newline
\begin{tabular}{| c | l | l | l | }
\hline
$ v \in V(V) $ & $ \myaxiv{\zeta}{x} $ & $\epsilon( \myaxiv{\zeta}{x} )$ & $ \myaexiv{\zeta}{x} $ \\
\hline \hline
00 & $( x, \emptyset, \{ m, p \} )$ & $( y, \emptyset, \{ p, m\} )$ & $( y, \emptyset, \{ p, m\} )$ \\
10 & $( m, \{ x \}, \emptyset )$ & $( m, \{ y \}, \emptyset )$ & $( m, \{ y \}, \emptyset )$ \\
01 & $( p, \{ x \}, \emptyset )$ & $( p, \{ x, y \}, \emptyset )$ & $( p, \{ y \}, \emptyset )$ \\
\hline
\end{tabular}
\newline
\newline
\begin{tabular}{| c | l | l | l | }
\hline
$ v \in V(N) $ & $ \myaxiv{\theta}{x} $ & $\epsilon( \myaxiv{\theta}{x} )$ & $ \myaexiv{\theta}{x} $ \\
\hline \hline
100 & $( y, \emptyset, \{ p \} )$ & $( y, \emptyset, \{ p \} )$ & $( y, \emptyset, \{ p \} )$ \\
001 & $( x, \emptyset, \{ m, p \} )$ & $( y, \emptyset, \{ p, m \} )$ & $( y, \emptyset, \{ p, m \} )$ \\
101 & $( p, \{ x, y \}, \emptyset )$ & $( p, \{ x, y \}, \emptyset )$ & $( p, \{ y \}, \emptyset )$ \\
011 & $( m, \{ x \}, \emptyset )$ & $( m, \{ y \}, \emptyset )$ & $( m, \{ y \}, \emptyset )$ \\
\hline
\end{tabular}
\newline
\newline
We have thus $ \myaxiv{\xi}{1} = \myaxiv{\zeta}{01} = \mf{c}$, but
$ \epsilon( \mf{c} ) \not= \myaexiv{\xi}{1} \not= \myaexiv{\zeta}{01} \not= \epsilon( \mf{c} )$, and we have $ \myaxiv{\theta}{101} = \mf{d} $, but $ \myaexiv{\theta}{101} \not= \epsilon( \mf{d} )$.
At the end of this section, we show that no strong S-scheme $\rho$ exists from $R$ to $S$ fulfilling \eqref{eq_imagebased_gl}. Assume that such a strong S-scheme exists. According to Theorem \ref{theo_eta_RS_inv}, $\rho$ is induced by $\epsilon \equiv \alpha_{\rho(\phi)}$. Now let $P \equiv R$ and let $\xi \equiv \id_{V(R)}$ be the identity mapping of $R$. For $\myaxiv{\xi}{x} = ( x, \emptyset, \{ m, p \} )$ and $\myaxiv{\xi}{y} = ( y, \emptyset, \{ p, q \} )$, Lemma \ref{lemma_rhophi_isom_1} delivers $\# \epsilon( \myaxiv{\xi}{x} )_3 = 2 = \# \epsilon( \myaxiv{\xi}{y} )_3$. Because $y$ is the only point $v \in V(S)$ with $\# N^{out}_S(v) > 1$, we conclude
\begin{equation*}
\eta(\xi)(x) \; = \; \epsilon( \myaxiv{\xi}{x} )_1 \; = \; y
\; = \; \epsilon( \myaxiv{\xi}{y} )_1 \; = \; \eta(\xi)(y).
\end{equation*}
Therefore,
\begin{equation*}
\myaexiv{\xi}{p}_2 \; = \; \eta(\xi)[ N^{in}_G(p) ] \; = \; \eta(\xi)[ \{ x, y \} ] \; = \; \{ y \}.
\end{equation*}
But for $\myaxiv{\xi}{p} = (p, \{x,y\},\emptyset)$, Lemma \ref{lemma_rhophi_isom_1} delivers $\# \epsilon( \myaxiv{\xi}{p} )_2 = 2$. We have thus $ \myaexiv{\xi}{p}_2 \not= \epsilon( \myaxiv{\xi}{p} )_2$, and $\epsilon$ does not fulfill Condition \ref{cond_univ_aexiv}. But due to Theorem \ref{theo_eta_RS_inv}, this is a contradiction to $\rho$ being a strong S-scheme fulfilling \eqref{eq_imagebased_gl}.
Also for the pairs (C7) and (C8) in Figure \ref{fig_TableConstr}, the respective $\epsilon$ is not one-to-one and does not fulfill Condition \ref{cond_univ_aexiv}, as is easily seen by means of Lemma \ref{lemma_eps_nicht_1t1_Cond1}. In consequence, the respective S-scheme does not fulfill \eqref{eq_imagebased_gl}. For the pair $R$, $S$ in (C7), we see by inspection of $\id_{V(R)}$ and the consequences for $\eta( \id_{V(R)} )$ that there exists no strong S-scheme from $R$ to $S$ fulfilling \eqref{eq_imagebased_gl}.
\section{Undirected graphs} \label{sec_undirected}
Let $\mf{S}$ be a representative system of the non-empty finite {\em symmetric} digraphs and $\mf{U}$ a representative system of the non-empty finite {\em undirected} graphs. Already in \cite[Section 4.2]{aCampo_toappear_0}, we have exploited the fact that $\mf{S}$ and $\mf{U}$ are perfect twins because every pair $(v,w), (w,v)$ in the arc set of a symmetric digraph corresponds uniquely to the edge $\{v,w\}$ of an undirected graph and vice versa. In this way, we transferred all concepts and results about directed graphs to undirected graphs, as long as they were compatible with symmetry. Because the class $\mf{T}a \subset \mf{D}$ refers to antisymmetry, we had to replace it by the class $\mf{C}_o \subset \mf{U}$ defined as
\begin{equation*}
\mf{C}_o \; \equiv \; \mysetdescr{ G \in \mf{U} }{ G^* \mytext{\em does not contain a cycle of odd length} }.
\end{equation*}
Denoting with ${\cal H}_u$ and ${\cal S}_u$ the sets of homomorphisms and of strict homomorphisms of undirected graphs, Theorem \ref{theo_GschemeOnStrict} became
\begin{theorem}[\cite{aCampo_toappear_0}, Theorem 4] \label{theo_GschemeOnStrict_UG}
Let $R \in \mf{U}$ and $\mf{U}' \subseteq \mf{U}$. Then, for all $S \in \mf{U}$, the equivalence
\begin{align*}
R & \sqsubseteq_{\cal G}amma S \; \; \mytext{with respect to } \mf{U}'\\ {\cal L}eftrightarrow \quad \# {\cal S}_u(G,R) & \leq \# {\cal S}_u(G,S) \; \; \mytext{for all} \; G \in \mf{U}',
\end{align*}
and the implication
\begin{align*}
\# {\cal S}_u(G,R) & \leq \# {\cal S}_u(G,S) \; \; \mytext{for all} \; G \in \mf{U}' \\
\Rightarrow \quad \# {\cal H}_u(G,R) & \leq \# {\cal H}_u(G,S) \; \; \mytext{for all} \; G \in \mf{U}'
\end{align*}
hold if
\begin{align*}
R & \in \mf{U}' = \mf{U}, \\
\mytext{or} \quad R & \in \mf{C}_o \subseteq \mf{U}' \subseteq \mf{U}.
\end{align*}
\end{theorem}
Also for the present article, most of the concepts and results can be directly transferred to undirected graphs just by replacing ``digraph'' by ``undirected graph'', ``$\mf{D}'$'' and ``$\mf{D}$'' by ``$\mf{U}'$ and ``$\mf{U}$'', and ``${\cal H}$'' and ``${\cal S}$ ''by ``${\cal H}_u$'' and ``${\cal S}_u$''. In particular, S-schemes, simple S-schemes, induced S-schemes, and Condition \ref{cond_univ_aexiv} can be defined for undirected graphs in this way, and all results of Section \ref{subsec_simple_induced} can stereotypically be translated into results about undirected graphs. This includes in particular the main results of this section: the characterization of induced S-schemes provided in the Theorems \ref{theo_induced_exists} and \ref{theo_eta_RS}.
Also in the Sections \ref{sec_EVSystems} and \ref{sec_rearr}, the first step is to replace symbols, but now also the concepts change slightly. The definition of the EV-system for undirected graphs has to be modified in an obvious manner. Using the symbol $A(G)$ also for the edge set of an undirected graph $G$, we define
\begin{definition} \label{def_EVsys_alt_UG}
Let $R$ be an undirected graph and $\mf{U}' \subseteq \mf{U}$. We define
\begin{align*}
{\cal E}_o(R) & \equiv \mysetdescr{ ( v, D ) }{ v \in V(R), D \subseteq N_R(v) }.
\end{align*}
For $\mf{a} \in {\cal E}_o(R)$, we refer to the two components of $\mf{a}$ by $\mf{a}_1$ and $\mf{a}_2$, and we define
\begin{align*}
\phi_R : {\cal E}(R) & \rightarrow R, \\
\mf{a} & \mapsto \mf{a}_1.
\end{align*}
Furthermore, for every $G \in \mf{U}', \xi \in {\cal S}_u(G,R)$, we define the mapping
\begin{align*}
\myaTgxi{R}{G}{\xi} : V(G) & \rightarrow {\cal E}_o(R), \\
v & \mapsto \left( \xi(v), \xi[ N_G(v) ] \right).
\end{align*}
The {\em EV-system ${\cal E}(R)$ of $R$ with respect to $\mf{U}'$} is the undirected graph with vertex set ${\cal E}_o(R)$ and edge set $A( {\cal E}(R) )$ defined by
\begin{align*}
& \{ \mf{a}, \mf{b} \} \in A( {\cal E}(R) ) \\
\equiv \quad & \eta(\xi)sts \; G \in \mf{U}', \xi \in {\cal S}_u(G,R), \{v, w\} \in A(G) \mytext{: } \mf{a} = \aRgxiv, \; \mf{b} = \aRgxiw.
\end{align*}
\end{definition}
Again, $\alpha$ turns out to be a simple S-scheme from $R$ to ${\cal E}(R)$, and Lemma \ref{lemma_fafb_simpleProp} becomes
\begin{lemma} \label{lemma_fafb_simpleProp_UG}
For all $\mf{a}, \mf{b} \in {\cal E}_o(R)$,
\begin{align*}
\mf{a} \mf{b} \in A( {\cal E}(R ) ) \quad & \Rightarrow \quad \mf{a}_1 \mf{b}_1 \in A(R), \\
\mf{a} \mf{b} \in A( {\cal E}(R)^* ) \quad & \Rightarrow \quad
\mf{a}_1 \mf{b}_1 \in A( R^* ), \mf{a}_1 \in \mf{b}_2, \mf{b}_1 \in \mf{a}_2, \\
\mf{a}_1 = \mf{b}_1 \quad & \Rightarrow \quad \mf{a} = \mf{b}. \\
\mytext{Furthermore,} \quad R \in \mf{C}_o \quad & \Rightarrow \quad {\cal E}(R) \in \mf{C}_o.
\end{align*}
\end{lemma}
(For the proof of ${\cal E}(R) \in \mf{C}_o$, start with a walk $\mf{c}^0, \ldots , \mf{c}^I$ of odd length in ${\cal E}(R)^*$, proceed as in the original, and observe, that in the case of $\mf{c}^0 = \mf{c}^I$, the sequence $\mf{c}^0_1, \ldots , \mf{c}^I_1$ is a cycle of odd length in $R^*$.)
In the following Lemma \ref{lemma_phi_def} and Lemma \ref{lemma_phi_fa}, we just have to skip everything indexed with $3$. Now we define the objects $X(\mf{a}) \in \mf{C}_o$:
\begin{definition} \label{def_Xfa_UG}
For every $m \in \mathbb{N}_0$, we define the undirected graph $X_m \in \mf{C}_o$ by
\begin{align*}
V( X_m ) & \; \equiv \; D \cup \{ p \}, \\
A( X_m ) & \; \equiv \; \mysetdescr{ \{ p, d \} }{ d \in D },
\end{align*}
where $D$ is a set with $\# D = m$ and $p \notin D$.
Furthermore, for $\mf{a} \in {\cal E}_o(R)$, we define
\begin{equation*}
X(\mf{a}) \; \equiv \; X_{\# \mf{a}_2}.
\end{equation*}
$\iota(\mf{a}) : V(X(\mf{a})) \rightarrow V(R)$ is a mapping sending $p$ to $\mf{a}_1$ and $D$ bijectively to $\mf{a}_2$.
\end{definition}
From now on, we have to proceed more carefully. The reason is the undirected graph $X_1$ defined up to isomorphism by
\begin{align*}
V(X_1) & \; \equiv \; \{ p, d \} \mytext{with } p \not= d, \\
A(X_1) & \; \equiv \; \{ \{ p, d \} \}.
\end{align*}
${\cal A}ut(X_1)$ contains {\em two} automorphisms $\id_{V(X_1)}$ and $\chi$, the latter one interchanging $p$ and $d$. The vertex $p$ is not a fixed point of $\chi$, and we have to introduce a case discrimination in rewriting Corollary \ref{coro_props_Xfa}:
\begin{corollary} \label{coro_props_Xfa_UG}
Let $R$ and $\mf{U}'$ as in the choices in Theorem \ref{theo_GschemeOnStrict_UG}. For every $\mf{a} \in {\cal E}_o(R)$, $\pi \in {\cal A}ut(X(\mf{a}))$, we have
\begin{align*}
\myaxiv{\iota(\mf{a})}{p}
& \; = \; \mf{a}, \\
\myaxiv{\iota(\mf{a}) \circ \pi}{v}
& \; = \;
\myaiav{\mf{a}}{\pi(v)} \quad \mytext{for all } v \in V(X(\mf{a})), \pi \in {\cal A}ut( X(\mf{a}) ).
\end{align*}
Furthermore,
\begin{equation*}
\# {\cal A}ut( X(\mf{a} ) ) \; = \; \# \mysetdescr{ \iota(\mf{a}) \circ \pi }{ \pi \in {\cal A}ut( X(\mf{a}) ) } \; =
\begin{cases}
\; 2 & \mytext{if } X(\mf{a}) \simeq X_1, \\
\# \mf{a}_2 ! & \mytext{if } X(\mf{a}) \not\simeq X_1.
\end{cases}
\end{equation*}
For $X(\mf{a}) \not\simeq X_1$, the vertex $p$ is a fixed point of every $\pi \in {\cal A}ut(X(\mf{a})$, and $\pi$ maps $D$ bijectively to $D$.
\end{corollary}
To point out the difference: Definition \ref{def_Xfa_UG} assigns the undirected graph $X(\mf{a}) = X_1$ to $\mf{a} = ( p, \{d\}) \in {\cal E}_o(X_1)$. However, if we regard $X_1$ as symmetric digraph $Y_1$ with $V(Y_1) = \{ p, d \}$, $A(Y_1) = \{ (p,d), (d,p) \}$, then Definition \ref{def_Xfa} assigns to $\mf{b} = (p,\{d\},\{d\}) \in {\cal E}_o(Y_1)$ the {\em antisymmetric} digraph $X(\mf{b}) \simeq X_1^1 \in \mf{T}a$ with {\em three} vertices and $\# {\cal A}ut( X_1^1) = 1$, cf.\ Figure \ref{figure_Xij}(b).
Proposition \ref{prop_fDStr_fTa} remains nearly unchanged:
\begin{proposition} \label{prop_fDStr_fTa_UG}
Let $\mf{C}_o \subseteq \mf{U}' \subseteq \mf{U}$ and $R \in \mf{U}$. For all $\mf{a}, \mf{b} \in {\cal E}_o(R)$,
\begin{align*}
\mf{a} \mf{b} \in A( {\cal E}(R)^* ) \quad & {\cal L}eftrightarrow \quad
\mf{a}_1 \in \mf{b}_2 \mytext{ and } \mf{b}_1 \in \mf{a}_2, \\
\mf{a} \mf{a} \in A( {\cal E}(R) ) \quad & {\cal L}eftrightarrow \quad \mf{a}_1 \mf{a}_1 \in A(R).
\end{align*}
ERD and AID hold for $\mf{U}' = \mf{U}$, and for $R \in \mf{C}_o \subseteq \mf{U}'$.
\end{proposition}
Now we come to the counterpart of Theorem \ref{theo_eta_RS_inv}, which we get by just replacing symbols:
\begin{theorem} \label{theo_eta_RS_inv_UG}
Let $R \in \mf{U}' = \mf{U}$ or $R \in \mf{C}_o \subseteq \mf{U}' \subseteq \mf{U}$, and let ${\cal E}(R)$ and ${\cal E}(S)$ be the EV-Systems of $R$ and $S \in \mf{U}$. If $\rho$ is a strong S-scheme from $R$ to $S$ fulfilling \eqref{eq_imagebased_gl}, then $\epsilon \equiv \myaTgxi{S}{{\cal E}(R)}{\rho( \phi_R ) }$ fulfills Condition \ref{cond_univ_aexiv}, induces $\rho$, and is one-to-one.
\end{theorem}
However, in the proof, we have to take into account the special status of $X_1$. Corollary \ref{coro_rhophi_DpU} becomes
\begin{corollary} \label{coro_rhophi_DpU_UG}
Let $\mf{a} \in {\cal E}_o(R)$. Then, for every $\pi \in {\cal A}ut(X(\mf{a}))$,
\begin{align*}
\rho(\iota(\mf{a}) \circ \pi)(v)
& \; = \;
\rho(\iota(\mf{a})(\pi(v))
\quad \mytext{for all } v \in V(X(\mf{a})).
\end{align*}
In the case of $X(\mf{a}) \not\simeq X_1$, we have
\begin{align*}
\begin{split}
\rho(\iota(\mf{a}) \circ \pi)(p) & \; = \; \epsilon(\mf{a})_1, \\
\rho(\iota(\mf{a}) \circ \pi)[D] & \; = \; \epsilon(\mf{a})_2.
\end{split}
\end{align*}
and in the case of $X(\mf{a}) \simeq X_1$, we have $\epsilon( \mf{a} ) = ( a, \{ b \} )$ with $ab \in A(S^*)$ and
\begin{align*}
a & \; = \; \myrhoxiv{\iota(\mf{a})}{p} \; = \; \myrhoxiv{ \iota(\mf{a}) \circ \chi}{ d }, \\
b & \; = \; \myrhoxiv{\iota(\mf{a})}{d} \; = \; \myrhoxiv{ \iota(\mf{a}) \circ \chi}{ p }.
\end{align*}
\end{corollary}
Lemma \ref{lemma_rhophi_isom_1} remains unchanged:
\begin{lemma} \label{lemma_rhophi_isom_1_UG}
Let $\mf{a} \in {\cal E}_o(R)$. If $\rho$ is strong, then, for every $\pi \in {\cal A}ut(X(\mf{a}))$, the mapping $\rho( \iota(\mf{a}) \circ \pi)$ is one-to-one on $D$. In particular, $ X( \mf{a} ) \simeq X( \epsilon( \mf{a} ) )$.
\end{lemma}
(For $X(\mf{a}) \not\simeq X_1$, run the proof as in the original using Corollary \ref{coro_rhophi_DpU_UG}; for $X(\mf{a}) \simeq X_1$, the statement about $D$ is trivial, and $ X( \mf{a} ) \simeq X( \epsilon( \mf{a} ) )$ holds due to the addendum in Corollary \ref{coro_rhophi_DpU_UG}.)
Finally, in the proof of Lemma \ref{lemma_eps_oneone}, we have to introduce a case disrimination again:
\begin{lemma} \label{lemma_eps_oneone_UG}
If $\rho$ is strong, then $\epsilon$ is one-to-one.
\end{lemma}
\begin{proof} Let $\mf{a}, \mf{b} \in {\cal E}(R)$ with $\epsilon( \mf{a} ) = \epsilon( \mf{b} )$. According to Lemma \ref{lemma_rhophi_isom_1_UG}, we have $X(\mf{a}) \simeq X(\epsilon(\mf{a})) = X(\epsilon(\mf{b})) \simeq X(\mf{b})$. $X(\mf{a})$ and $X(\mf{b})$ are thus isomorphic, and due to $X(\mf{a}), X(\mf{b}) \in \mf{U}'$, we have $X(\mf{a}) = X(\mf{b})$. Let $G \equiv X(\mf{a})$ and
\begin{align*}
{\cal I}(\mf{a}) & \; \equiv \; \mysetdescr{ \iota(\mf{a}) \circ \pi }{\pi \in {\cal A}ut(G)}, \\
{\cal I}(\mf{b}) & \; \equiv \; \mysetdescr{ \iota(\mf{b}) \circ \pi }{ \pi \in {\cal A}ut(G) }.
\end{align*}
For $X(\mf{a}) \not\simeq X_1$, proceed as in the original. Now assume $X(\mf{a}) \simeq X_1$. According to Corollary \ref{coro_props_Xfa_UG}, the cardinality of ${\cal A}ut( G )$, ${\cal I}(\mf{a})$, and ${\cal I}(\mf{b})$ is $2$. ${\cal J} \equiv {\cal I}(\mf{a}) \cup {\cal I}(\mf{b})$ is a subset of ${\cal S}(G, R)$ with $\# {\cal J} = 4$ in the case of $\{ \mf{a}_1 \} \cup \mf{a}_2 \not= \{ \mf{b}_1 \} \cup \mf{b}_2$ and $\# {\cal J} = 2$ in the case of $\{ \mf{a}_1 \} \cup \mf{a}_2 = \{ \mf{b}_1 \} \cup \mf{b}_2$.
$\rho_G[ {\cal J} ]$ is a subset of ${\cal S}_u(G,S)$, and due to $\epsilon(\mf{a}) = \epsilon(\mf{b})$ and the last two equations in Corollary \ref{coro_rhophi_DpU_UG}, all elements of $\rho_G[ {\cal J} ]$ map $V(G) = \{ p, d \}$ bijectively to the same two-element-subset $\{ v, w \}$ of $V(S)$. We conclude $\# \rho_G[ {\cal J} ] \leq 2$, and because $\rho$ is strong, we have $\# {\cal J} = \# \rho_G[ {\cal J} ]$, hence $\{ \mf{a}_1 \} \cup \mf{a}_2 = \{ \mf{b}_1 \} \cup \mf{b}_2$.
Let $\mf{a} = ( v, \{ w \} )$. If $\mf{b} = ( w, \{ v \} )$, then $\mf{a} \mf{b} \in A({\cal E}(R)^*)$ according to Proposition \ref{prop_fDStr_fTa_UG} in contradiction to $\epsilon( \mf{a} ) = \epsilon( \mf{b} )$. Therefore, $\mf{a} = \mf{b}$.
{\cal E}P
In Lemma \ref{lemma_aexiv_axiv}, everything indexed with $3$ has to be skiped, and the replacement proposition becomes
\begin{proposition} \label{prop_repl_Cond1_UG}
Let $\epsilon : {\cal E}(R) \rightarrow {\cal E}(S)$ be a strict homomorphism between the EV-systems of $R$ and $S$. Assume that for $\mf{a} \in {\cal E}_o(R)$
\begin{align*}
\begin{split}
\# \epsilon( \mf{a} )_2 & \; \leq \; \# \mf{a}_2
\end{split}
\end{align*}
and
\begin{align*}
\forall \; \mf{b}, \mf{c} \in N_{{\cal E}(R)}( \mf{a} ) \mytext{: } \epsilon( \mf{b} )_1 = \epsilon( \mf{c} )_1 & \; \Rightarrow \; \mf{b}_1 = \mf{c}_1.
\end{align*}
Then $\aSexiv = \epsilon( \aRxiv )$ for all $G \in \mf{U}'$, $\xi \in {\cal S}_u(G,R)$, $v \in V(G)$ with $\aRxiv = \mf{a}$. In particular, $\epsilon$ fulfills Condition \ref{cond_univ_aexiv} if these conditions hold for all $\mf{a} \in {\cal E}_o(R)$.
\end{proposition}
We have already seen in \cite{aCampo_toappear_0}, that also the rearrangement method recalled in Section \ref{subsec_rearr_induced} can be rewritten for undirected graphs. For $R \in \mf{U}$ with the properties described in Definition \ref{def_leq_s}, define $S \in \mf{U}$ by
\begin{align*}
V(S) & \equiv \; Z, \\
A(S) & \equiv \; \left( A(R) \setminus A_{M,X} \right) \; \cup \; A_b, \\
\mytext{where} \quad \quad A_{M,X} & \equiv \; \mysetdescr{ e \in A(R) }{ e \cap M \not= \emptyset \mytext{ and } e \cap X \not= \emptyset} \\
\mytext{and} \quad \quad \quad \; A_b & \equiv \;
\mysetdescr{ ( e \cap M ) \cup \beta[ e \cap X ] }{ e \in A_{M,X} }.
\end{align*}
Then the counterpart of Theorem \ref{theo_rhoxi} delivers $R \sqsubseteq_{\cal G}amma S$ if we replace \eqref{bed_nbh} by
\begin{align*}
\forall x \in X \mytext{:} & \; N_R(x) \setminus M \subseteq N_R( \beta(x) ).
\end{align*}
With these modifications, all results and formulas in Section \ref{subsec_rearr_induced} remain valid if we just make the usual rewritings and skip everything indexed with $3$.
\end{document} |
\begin{document}
\monthyear{Month Year}
\volnumber{Volume, Number}
\setcounter{page}{1}
\title{An Infinite 2-Dimensional Array Associated With Electric Circuits}
\author{Emily Evans}
\address{Brigham Young University}
\email{EJEvans@math.byu.edu}
\author{Russell Jay Hendel}
\address{Towson University}
\email{RHendel@Towson.Edu}
\begin{abstract}
Except for Koshy who devotes seven pages to applications of Fibonacci Numbers to electric circuits, most books and the Fibonacci Quarterly have been relatively silent on applications of graphs and electric circuits to Fibonacci numbers. This paper continues a recent trend of papers studying the interplay of graphs, circuits, and Fibonacci numbers by presenting and studying the Circuit Array, an infinite 2-dimensional array whose entries are electric resistances labelling edge values of circuits associated with a family of graphs. The Circuit Array has several features distinguishing it from other more familiar arrays such as the Binomial Array and Wythoff Array. For example, it can be proven modulo a strongly supported conjecture that the numerators of its left-most diagonal do not satisfy any linear, homogeneous, recursion, with constant coefficients (LHRCC). However, we conjecture with supporting numerical evidence an asymptotic formula involving $\pi$ satisfied by the left-most diagonal of the Circuit Array.
\end{abstract}
\maketitle
\section{Electrical Circuits, Linear 2-trees, and Fibonacci Numbers}\label{sec:s1_circuits}
Koshy \cite[pp. 43-49]{Koshy} lists applications of electrical circuits yielding interesting Fibonacci identities. However, aside from this, most books and as well as the issues of the Fibonacci Quarterly have been mostly silent on this application.
To begin our review of the recent literature, which has renewed interest in this application, first, recall one modern graph metric, effective resistance, requires that the graph be represented as an electric circuit with edges in the graph represented by resistors. Figure \ref{fig:pawcircuit} illustrates this.
\begin{figure}
\caption{Illustration of a graph and its associated circuit. }
\label{fig:pawcircuit}
\end{figure}
Several papers \cite{Barrett9, Barrett0} have explored effective resistances in electrical circuits whose underlying graphs are so-called linear 2-trees. In addition to showing that these effective resistances are rational functions of Fibonacci numbers, these circuits naturally give rise to interesting and new Fibonacci identities. For example the identities
\begin{equation}\label{eq:fibid1}
\sum_{i = 1}^{m} \frac{F_i F_{i+1}}{L_i L_{i+1}} = \frac{(m+1) L_{m+1} - F_{m+1}}{5 L_{m+1}}, \quad\text{for $m \geq 1$,}
\end{equation}
and
for $k=3, 4, \dots, n-2$,
\begin{equation}\label{eq:wayne1a}
\sum_{j=3}^k {[(-1)^j F_{n-2j+1}(F_{n}+F_{j-2}F_{n-j-1})]}=-F_{k-2}F_{k+1}F_{n-k-2}F_{n+1-k}.
\end{equation}
To appreciate these recent contributions we provide additional background.
Effective resistance, also termed resistance distance in the literature, is a graph metric whose definition was motivated by the consideration of a graph as an electrical circuit. More formally, given a graph, we determine the effective resistance between any two vertices in that graph by assuming that the graph represents an electrical circuit with resistances on each edge. Given any two vertices labeled $i$ and $j$ for convenience assume that one unit of current flows into vertex $i$ and one unit of current flows out of vertex $j$. The potential difference $v_i - v_j$ between nodes $i$ and $j$ needed to maintain this current is the {\it effective resistance} between $i$ and $j$. Figure \ref{fig:pawcircuit} illustrates this.
Recent prior works~\cite{Barrett9, Barrett0, Barrett0b}, study effective resistance in a class of graphs termed {\it linear 2-trees}, also known as 2-paths, which we now define and illustrate.
\begin{definition}\label{def:2tree}
In graph--theoretic language, a 2-tree is defined inductively as follows
\begin{enumerate}
\item $K_3$ is a 2-tree.
\item If $G$ is a 2-tree, the graph obtained by inserting a vertex adjacent to the two vertices of an edge of $G$ is a 2-tree.
\end{enumerate}
A linear $2$-tree (or $2$-path) is a $2$-tree in which exactly two vertices have degree $2$. For an illustration of two sample linear 2--trees see Figure~\ref{fig:2tree}.
\end{definition}
\begin{figure}
\caption{On the left, a straight linear 2-tree with $n$ vertices. On the right, a linear 2-tree with $n$ vertices and single bend at vertex $k$. }
\label{fig:2tree}
\end{figure}
In \cite{Barrett9} network transformations (identical to those found in Section~\ref{sec:s2_basics}) were used to determine the effective resistance in a linear 2-tree with $n$ vertices; the following results were obtained.
\begin{theorem}~\cite[Th. 20]{Barrett9}\label{thm:sl2t}
Let $S_n$ be the straight linear 2-tree on $n$ vertices labeled as in the graph on the left in Figure~\ref{fig:2tree}. Then for any two vertices $u$ and $v$ of $S_n$ with $u < v$,
\begin{equation}
r_{S_n}(u,v)=\frac{\sum_{i=1}^{v-u} (F_i F_{i+2u-2}-F_{i-1} F_{i+2u-3})F_{2n-2i-2u+1}}{F_{2n-2}}. \label{eq:resdiststraightsum}
\end{equation}
or equivalently in closed form
\begin{multline*}r_{S_n}(u,v) = \frac{F_{m+1}^2+F_{v-u}^2F_{m-2j-v+u+3}^2}{F_{2m+2}}\\
+\frac{F_{m+1}\left[{F_{m-v+u}}((v-u)L_k-F_{v-u})+{F_{m-v+u+1}}\left((v-u-5)F_{v-u+1}+(2v-2u+2)F_{v-u}\right)\right]}{5F_{2m+2}}\end{multline*}
\noindent where $F_p$ is the $p$th Fibonacci number and $L_q$ is the $q$th Lucas number.
\end{theorem}
\noindent Moreover identity~\ref{eq:fibid1} was shown.
In~\cite{Barrett0} the formulas for a straight linear 2-tree were generalized to a linear 2-tree with any number of bends. See the graph on the right in Figure~\ref{fig:2tree} for an example of a linear 2--tree with a bend at vertex $k$.
The following result is the main result from~\cite{Barrett0} and nicely gives the effective resistance between two vertices in a bent linear 2--tree.
\begin{theorem}~\cite[Th. 3.1]{Barrett0}\label{cor:main2}
Given a bent linear 2-tree with $n$ vertices, and $p = p_1 + p_2 + p_3$ single bends located at nodes $k_1, k_2, \ldots, k_p$ and $k_1 < k_2 < \cdots < k_{p-1} < k_p$ the effective resistance between vertices $u$ and $v$ is given by
\begin{multline}\label{eq:genericformres}
r_G(u,v)=r_{S_n}(u,v)-\sum_{j=p_1+1}^{p_1+p_2}\Big[F_{k_j-3}F_{k_j}-2\sum_{i=p_1+1}^{j-1}[(-1)^{k_j-k_i+1+j-i}F_{k_i}F_{k_i-3}]+2(-1)^{j+u+k_j}F_{u-1}^2\Big]\cdot\\
\Big[F_{n-k_j+2}F_{n-k_j-1}+2(-1)^{v-k_j}F_{n-v}^2\Big]/F_{2n-2}.
\end{multline}
\end{theorem}
\noindent In addition, identity~\ref{eq:wayne1a} was shown.
This paper adds to the growing literature on electrical circuits and recursions by presenting, exploring, and proving results about an infinite array, $C_{i,j}, j \ge 1, 0 \le i \le 2(j-1),$ whose elements are electrical resistances associated with circuits defined on triangular grid graphs.
\section{Some Definitions }\label{sec:s2_basics}
This section gathers and defines some assorted terms used throughout the paper.
\textbf{The (Triangular) $n$-grid.} \cite[Figure 1]{Hendel},\cite[Figure2]{EvansHendel}.
Figure \ref{fig:3grid} is illustrative of the general (triangular) $n$-grid for $n=3.$ As can be seen the $n$-grid consists of $n$ rows with $i, 1 \le i \le n,$ upright oriented triangles arranged in a triangular grid. Triangles are labeled by row, top to bottom, and diagonal, left to right, as shown in Figure \ref{fig:3grid}.
\begin{figure}
\caption{A 3-grid with the upright oriented triangles labeled by row and diagonal.}
\label{fig:3grid}
\end{figure}
\textbf{The all-one $n$-grid.} Throughout this paper the edge labels of a graph correspond to actual resistance values. The \textit{all-one $n$-grid} refers to an $n$-grid all of whose resistance values are uniformly 1.
We use the notation $T_{r,d,e}$ to refer to the edge label of edge $e, e \in \{L,R,B\}$ (standing, respectively, for the left, right, and base edges of a triangle in the upright oriented position), of the triangle in row $r$ diagonal $d.$ Similarly, $T_{r,d}$ will refer to the triangle in row $r$ diagonal $d.$
Throughout the paper both the all--one $n$-grid and the $m$-grids derived from it ($1 \le m \le n-1$) possess vertical and rotational symmetry (when rotated by $\frac{\pi}{3}).$ \cite[Definition 9.1]{Hendel},\cite[Definition 2.11]{EvansHendel}.
This symmetry facilitates not presenting results separately for the left, right, and base sides. Typically we will suffice with \textit{the upper left half} of a grid, \cite[Definition 9.6]{Hendel},\cite[Definition 2.12]{EvansHendel}, defined as the set of triangles, $T_{r,d}$ with
$0 \le r \le \lfloor \frac{m+1}{2} \rfloor,$
$1 \le d \le \lfloor\frac{m+2}{2}\rfloor.$
\begin{example}\label{exa:upperlefthalf}
If $n=3,$ (see panel A1 in Figure \ref{fig:5panels}) the upper left half consists of the
triangles $\langle r,d \rangle, d=1, r=1,2.$
\end{example}
The importance of the upper left half is the following result which captures the implications of the symmetry of the $m$-grids \cite[Corollary 9.6]{Hendel},\cite[Lemma 2.14]{EvansHendel}.
\begin{lemma} \label{lem:upperlefthalf}
For an $m$-grid, once the edge values of the upper half are known, all edge values in the $m$-grid are fixed.
\end{lemma}
\textbf{Corners.} \cite[Equation (29)]{Hendel},\cite[Definition 2.15]{EvansHendel}. Graph--theoretically, a triangle is a corner of an $m$-grid if it has a degree-2 vertex. The 3 corner triangles of an $m$-grid are located at
$T_{1,1}, T_{m,1}, T_{m,m}.$ For example, for the 3-grid on Figure \ref{fig:3grid}, the three corners are located at $\langle 1,1 \rangle, \langle 3,1 \rangle, \langle 3,3 \rangle.$
\section{The Three Circuit Transformations}\label{sec:circuitfunctions}
As pointed out in Section \ref{sec:s1_circuits},
every circuit has associated with it an underlying labeled graph whose edge labels are electrical resistances. Therefore, to specify an \textit{equivalent circuit transformation} from an initial parent circuit to a transformed child circuit we must specify the vertex, edge, and label transformations. By equivalent circuit transformation we mean one that maintains the effective resistance between vertices that appear in both parent and child circuit. There are three basic circuit transformations that we use that preserve effective resistance: \textit{series, $\Delta-
Y$, and $Y-\Delta.$} Figure \ref{fig:seriesparallel} illustrates the series transformation .
The following are the key points about this transformation.
\begin{itemize}
\item The top parent graph has 3 nodes and 2 edges
\item The transformed child graph below has 2 nodes and one edge
\item There is a formula,\cite[pg. 43]{Koshy} $R_1+R_2$ giving the edge label of the child graph in terms of the edge labels of the parent graph.
\end{itemize}
\begin{figure}
\caption{Illustration of the series transformations. See narrative for further details. }
\label{fig:seriesparallel}
\end{figure}
The remaining two circuit transformations are the $\Delta-Y$ transformation which transforms a parent simple 3-edge loop to a claw (3-edge outstar), and the $Y-\Delta$ transformation which takes a claw to a 3-edge loop,\cite[Figure 2]{Hendel}, \cite[Definition 2.4]{EvansHendel}. The relevant transformation functions are
\begin{equation}\label{equ:deltay} \Delta(x,y,z) = \frac{xy}{x+y+z}; \qquad
Y(x,y,z) = \frac{xy+yz+zx}{x}.\end{equation}
Following the computations presented in this paper will not require details of these transformations or how the order of arguments relates to the underlying graphs. To follow the computations needed in this paper it suffices to know the four circuit transformation functions presented in Section \ref{sec:proofmethods}.
\section{The Reduction Algorithm}\label{sec:reduction}
This section presents the basic reduction algorithm. This algorithm was first presented in \cite[pg. 18]{Barrett0} where the algorithm was used for purposes of proof but not used computationally, since computations were done using the combinatorial Laplacian. Hendel \cite[Definition 2.3,Figure 3]{Hendel} was the first to use the reduction algorithm computationally. Moreover, \cite[Algorithm 2.8, Figure 3 and Section 4]{EvansHendel} was the first to show that four transformation functions suffice for all computations. These four circuit transformation functions will be presented in Section \ref{sec:proofmethods}; knowledge of them suffices to follow, and be able to reproduce, all computations presented in this paper. The usefulness of this algorithm in uncovering patterns is alluded to in \cite{Hendel, EvansHendel}.
We begin the presentation of the four circuit transformations with some basic illustrations.
\color{black}
The reduction algorithm takes a parent $m$ grid and \textit{reduces} it, by removing one row of triangles, to a child $m-1$ grid.
Figure \ref{fig:5panels}, illustrates the five steps in reducing the 3 grid (Panel A) to a two grid (Panel E), \cite[Steps A-E, Figure 3]{Hendel}, \cite[Algorithm 2.8]{EvansHendel}
\begin{itemize}
\item Step 1 - Panel A: Start with a labeled 3-grid
\item Step 2 - Panel B: Apply a $\Delta-Y$ transformation to each upright triangle (a 3-loop) resulting in a grid of 3 rows of 3-stars, as shown.
\item Step 3 - Panel C: Discard the corner tails, edges with a vertex of degree one. This does not affect the resistance labels of edges in the reduced two grid in panel E. (However, these corner tails are useful for computing effective resistance as shown in
\cite{Barrett0,Evans2022}).
\item Step 4 - Panel D: Perform series transformations on all consecutive pairs of boundary edges (i.e., the dashed edges in panel C).
\item Step 5 - Panel E: Apply $Y-\Delta$ transformations to all remaining claws, transforming them into 3-loops.
\end{itemize}
\begin{figure}
\caption{Illustration of the reduction algorithm, on a 3-grid. The panel labels correspond to the five steps indicated in the narrative.}
\label{fig:5panels}
\end{figure}
\color{black}
The important point here is that each of the five steps involves specific circuit transformations. However, to follow, and be able to reproduce the computations in this paper, only the four circuit transformation functions presented in the next section are needed. The derivation of these four circuit transformation functions is not needed and has been given in detail in the references cited. An example at the end of this section illustrates what is needed.
In the sequel, we will typically start with an all--one $n$-grid and successively apply the reduction algorithm resulting in a collection of $m$ grids, $1 \le m \le n-1.$ The notation
\begin{multline*} T_{r,d,X}^m, X \in \{L,R,B, LR\} \text{ indicates the resistance label of side $X$}\\ \text{in triangle $T_{r,d}$ of the all--one $n$-grid reduced $m$ times}\\ \text{The symbol LR will be used in a context} \text{ when the side depends on the parity of a parameter. }
\end{multline*}
Additionally, if we deal with a single reduction we may use the superscripts $p,c$ to distinguish between the parent grid and the child grid when the actual number of reductions used is not important.
\begin{example}\label{exa:suffices} Referring to Figure \ref{fig:5panels}, the function \emph{left} presented in the next section takes the 9 resistance edge-labels of triangles $T_{2,1}^p, T_{2,2}^p,T_{3,2}^p$ in the parent 3-grid in Panel A and computes the resistance edge-value, $T_{2,2,L}^c$ of the child 2-grid in Panel E. Thus the four transformation functions of the next section suffice to verify and reproduce the computations in this paper.
\end{example}
\section{The Four Transformation Functions.}\label{sec:proofmethods}
As mentioned in Example \ref{exa:suffices} and the surrounding narrative, this section presents the four circuit transformation functions that suffice to follow and reproduce the computations presented in this paper \cite[Section 4]{EvansHendel}:
\begin{itemize}
\item Boundary edges
\item Base (non-boundary) edges
\item Right (non-boundary)edges
\item Left (non-boundary) edges
\end{itemize}
We begin our description of the four transformation functions with the base edge case. Illustrations are based on Figure \ref{fig:3grid}. We first illustrate with the base edge of the top corner triangle in Figure \ref{fig:3grid} and then generalize. Note, that the $\Delta$ and $Y$ functions have been defined in \eqref{equ:deltay}.
We have
$$
T_{1,1,B}^c =
Y(\Delta(T_{3,2,L}^{p},
T_{3,2,R}^{p},
T_{3,2,B}^{p}),
\Delta(T_{2,1,R}^{p},
T_{2,1,B}^{p},
T_{2,1,L}^{p}),
\Delta(T_{2,2,B}^{p},
T_{2,2,L}^{p},
T_{2,2,R}^{p})).
$$
This is a function of 9 variables. At times it becomes convenient to emphasize the triangles involved. We will use the following notation to indicate the dependency on triangles.
$$
T_{1,1,B}^c =F(T_{3,2}^{p},
T_{2,1}^{p},
T_{2,2}^{p}),
$$
which is interpreted as saying \textit{the base edge of $T_{1,1}^c$ is some function ($F$) of the edge-labels of the triangles $T_{3,2}^p,T_{2,1}^p,T_{2,2}^p.$}
Clearly, this notation is mnemonical and cannot be used computationally. It is however very useful in proofs as will be seen later.
The previous two equations can be generalized to an arbitrary $m$-grid and arbitrary row and diagonal (with minor constraints, $r+2 \le n, d+1 \le r,$ on the row and diagonal). We have
\begin{multline*}
T_{r,d,B}^c =
Y(\Delta(T_{r+2,d+1,L}^{p},
T_{r+2,d+1,R}^{p},
T_{r+2,d+1,B}^{p}),
\Delta(T_{r+1,d,R}^{p},
T_{r+1,d,B}^{p},
T_{r+1,d,L}^{p}),\\
\Delta(T_{r+1,dr+1,B}^{p},
T_{r+1,d+1,L}^{p},
T_{r+1,d+1,R}^{p})).
\end{multline*}
and
$$
T_{r,d,B}^c =F(T_{r+2,d+1}^{p},
T_{r+1,d}^{p},
T_{r+1,d+1}^{p}).
$$
We next list the remaining three transformation functions.
For $r+2 \le n, d+1 \le r,$ for a boundary left edge we have
\begin{equation*}
T_{r,1,L}^c =
\Delta(T_{r,1,B}^{p},
T_{r,1,L}^{p},
T_{r,1,R}^{p})+
\Delta(T_{r+1,1,L}^{p},
T_{r+1,1,R}^{p},
T_{r+1,1,B}^{p}) ,
\end{equation*}
and
$$
T_{r,1,L}^c =F(T_{r,1}^{p},
T_{r+1,1}^{p}).
$$
For $r+2 \le n, d+1 \le r,$ for non boundary left edges we have,
\begin{multline}\label{equ:leftside9proofs}
T_{r,d,L}^c =
Y(\Delta(T_{r,d-1,R}^{p},
T_{r,d-1,B}^{p},
T_{r,d-1,L}^{p}),
\Delta(T_{r,d,B}^{p},
T_{r,d,L}^{p},
T_{r,d,R}^{p}),\\
\Delta(T_{r+1,d,L}^{p},
T_{r+1,d,R}^{p},
T_{r+1,d,B}^{p}))
\end{multline}
and
\begin{equation}\label{equ:leftside3proofs}
T_{r,d,L}^c =F(T_{r,d-1}^{p},
T_{r,d}^{p},
T_{r+1,d}^{p}).
\end{equation}
For $r+1 \le n, 2 \le d \le r-1,$ for the right sides,
\begin{multline*}
T_{r,d,R}^c =
Y(\Delta(T_{r,d,B}^{p},
T_{r,d,L}^{p},
T_{r,d,R}^{p}),
\Delta(T_{r+1,d,L}^{p},
T_{r+1,d,R}^{p},
T_{r+1,d,B}^{p},\\
\Delta(T_{r,d-1,R}^{p},
T_{r,d-1,B}^{p},
T_{r,d-1,L}^{p}))
\end{multline*}
and
$$
T_{r,d,R}^c =F(T_{r,d}^{p},
T_{r+1,d}^{p},
T_{r,d-1}^{p}).
$$
\begin{comment}
Notice that we only defined the boundary function for the left boundary ($d=1$). Similarly, notice that for example the base edge function requires $r \le n-2.$ This is not a restriction. For by Lemma \ref{lem:upperlefthalf}, once the upper left half is calculated, the remaining edge values follow by symmetry considerations. Thus the above functions with their restrictions do indeed suffice.
\end{comment}
\section{Computational Examples}\label{sec:Appendix_A}
The four transformation functions of Section \ref{sec:proofmethods} with up to 9 arguments may appear computationally challenging. The purpose of this section is to illustrate their computational use. Additionally, the results computed will be used both to motivate and prove the main theorem.
\subsection{One Reduction of an all--one $n$--grid}
An all--one $n$ grid definitionaly has uniform labels of 1. Hence, we may calculate the edge resistance values in an $n-1$ grid arising from one reduction of the all--one $n$ grid as follows:
\begin{itemize}
\item $T_{r,1,L}=\Delta(r,1,1)+\Delta(r+1,1,1)=\frac{2}{3}, 1 \le r \le n-1.$
\item The preceding bullet computes resistance labels for the left boundary. By Lemma \ref{lem:upperlefthalf}, and by symmetry considerations the same computed value holds on the other two grid boundary edges: $T_{r,r,R} = T_{n-1,r,B} = \frac{2}{3}, 1 \le r \le n-1.$
\item All other edge values are 1, since
the computation $Y(\Delta(1,1,1), \Delta(1,1,1), \Delta(1,1,1))=1$ applies to $T_{r,d,X}, X \in \{L,R,B\}$
\item Again, cases not covered by the four transformation functions are covered by symmetry considerations and Lemma \ref{lem:upperlefthalf}. For example the formula for $T_{r,d,B}$ is only valid for $r \le n-2,$ and therefore both the symmetry considerations and the lemma are needed.
\end{itemize}
We may summarize our results in a lemma, see also, \cite[Corollary 5.1]{Hendel},\cite[Lemma 6.1]{EvansHendel}.
\begin{lemma}\label{lem:1reduction}
The resistance labels of the $n-1$ grid arising from one reduction of an all--one $n$ grid are as follows:
\begin{enumerate}
\item Boundary resistance labels are uniformly $\frac{2}{3}$.
\item Interior resistance labels are uniformly 1.
\end{enumerate}
\end{lemma}
The top corner triangle, $T_{1,1}$ of the $n-1$
grid is presented in Panel A of Figure \ref{fig:motivationillustration}.
\subsection{Uniform Central Regions}
Prior to continuing with the computations we introduce the concept of the uniform center which will be used in the proof of the main theorem.
First, we can identify a triangle with the ordered list, Left, Right, Base, of its resistance labels. Two triangles are then equivalent if their edge labels are equal. By Lemma \ref{lem:1reduction} for the once reduced $n-1$ grid we have
$$
T_{r,1}=\left(\frac{2}{3},1,1\right) \text{ and }
T_{r,r} = \left(1,\frac{2}{3},1\right) \text{ for }
2 \le r \le n-1.
$$
Although $T_{r,1}$ and $T_{r,r}$ are not strictly equivalent we will say they are equivalent up to symmetry since each triangle may be derived from the other by a vertical symmetry, \cite[Definition 5.8]{EvansHendel}.
Using these concepts of triangle equivalence and triangle equality up to symmetry, we note that the central region, $2 \le r \le n-1$ of diagonal 1 of the reduced $n-1$ grid is uniform, that is all triangles are equal. We also note that the interior of the reduced $n-1$ grid is uniform.
This presence of uniformity generalizes. The formal statement of the uniform center \cite[Theorem 6.2]{EvansHendel} is as follows:
\begin{theorem}[Uniform Center]\label{the:uniformcenter} For any $s \ge 1,$ let $n \ge 4s,$ and $1 \le d \le s:$
\begin{enumerate}
\item For
\begin{equation}\label{equ:uniformcenter}
s+ d \le r \le m-2s
\end{equation}
the triangles $T_{r,d}^s$ are all equal.
\item For
\begin{equation}\label{equ:uniformcenter2}
2s-1 \le r \le m-2s
\end{equation}
the left sides $T_{r,s,L}^s$ are all equal,
$T_{2s-1,s,R}^s=T_{2s-1,s,L}^s,$
$T_{r,s,R}^s=1, 2s \le r \le m-2s,$
and $T_{r,s,B}^s=1, 2s-1 \le r \le m-2s-1.$
\item For any triangle in the uniform center, that is, satisfying \eqref{equ:uniformcenter},
$ T_{r,d,R}=T_{r,d,B}.$
\end{enumerate}
\end{theorem}
This theorem has an elegant graphical interpretation. It states that the sub triangular grid whose corner triangles are $T_{2s-1s}^s, T_{m-2s,m}^s, T_{m-2s, m-2s}^s$ has interior labels of 1 and a single uniform label along its edge boundary. However this interpretation is not needed in the sequel.
\subsection{Two Reductions of an all--one $n$--grid}
We continue illustrating computations by considering the $n-2$ grid arising from 2 reductions of the all--one $n$ grid (or one reduction of the $n-1$ grid.)
By \eqref{equ:leftside3proofs}
$$
T_{3,2,L}^2 = F(T_{3,1}^1, T_{3,2}^1, T_{4,2}^1).
$$
By Lemma \ref{lem:1reduction}, we have
$$
T_{3,1}^1=(\frac{2}{3},1,1),
T^1_{3,2}=T^p_{4,2} =(1,1,1).
$$
Hence, by \eqref{equ:leftside9proofs}
\begin{equation}\label{equ:t322627}
T_{3,2,L}^c=Y(\Delta(1,1,\frac{2}{3}),
\Delta(1,1,1), \Delta(1,1,1))=
\frac{26}{27},
\end{equation}
as shown in Panel B of Figure \ref{fig:motivationillustration}.
To continue with the computations
we define the function,
\begin{equation}\label{equ:g0}
G_0(X)=Y(\Delta(1,1,X),
\Delta(1,1,1), \Delta(1,1,1))=
\frac{X+8}{9},
\end{equation}
and confirm $G_0(\frac{2}{3}) = \frac{26}{27}.$
\subsection{Three Reductions of an all--one $n$-grid.}
We next compute $T_{5,3,L}^3.$ Continuing as in the case of $T_{3,2,1},$ we have by
\eqref{equ:leftside3proofs}
\begin{equation}\label{equ:temp1}
T_{5,3,L}^3=F(T_{5,2}^2, T_{5,3}^2, T_{6,3}^2).
\end{equation}
By Theorem \ref{the:uniformcenter}(b)
$$
T_{5,2,L}^2 = T_{3,2,L}^2,
$$
and by \eqref{equ:t322627}
$$
T_{3,2,L}^2 = \frac{26}{27},
$$
implying
$$
T_{5,2,L}^2 = \frac{26}{27}.
$$
Again, by Theorem \ref{the:uniformcenter}
all resistance labels of $T_{5,3}^2, T_{6,3}^2$ are 1. Plugging this into \eqref{equ:temp1} and using \eqref{equ:leftside9proofs} and \eqref{equ:g0},
we have
$$
T_{5,3,L}^3 = Y\left(\Delta\left(\frac{26}{27},1,1\right), \Delta(1,1,1), \Delta(1,1,1)\right)=G_0\left(\frac{26}{27}\right)=\frac{242}{243}
$$
Panel C of Figure \ref{fig:motivationillustration} illustrates this.
We can continue this process inductively. For example, $T^4_{7,4} = G_0(\frac{242}{243}).$ The result is summarized as follows.
\begin{lemma} \label{lem:row0}
With $G_0(X)$ defined by \eqref{equ:g0} we have
$T_{1,1,L}^1=\frac{2}{3}$ and for $s \ge 2,$
$T_{2s-1,s,L}=G_{0}(T_{2s-3,s-1,L}).$
\end{lemma}
An almost identical argument using the circuit transformations for the right edge shows the following.
\begin{lemma} \label{lem:row1}
Let $G_1(X)=\frac{1}{3} \frac{X+8}{X+2}.$ Then for $k \ge 0,$
$T_{3+2k,2+k,R}^{2+k}=G_1(T^k_{1+2k,1+k})$
\end{lemma}
\section{Motivation for the Circuit Array}\label{sec:motivation}
This section motivates the underlying construction of the Circuit Array. We initialize with an all--one $n$-grid for $n$ large enough. As computed in Section \ref{sec:Appendix_A}, we have:
\begin{itemize}
\item
$T_{1,1,L}^1=\frac{2}{3}=1-\frac{3}{9^1}.$
See Panel A of Figure \ref{fig:motivationillustration}.
\item $T_{3,2,L}^2=\frac{26}{27}=1-\frac{3}{9^2}.$ See Panel B of Figure \ref{fig:motivationillustration}.
\item $T_{5,3,L}^1=\frac{242}{243}=1-\frac{3}{9^3}.$ See Panel C of Figure \ref{fig:motivationillustration}.
\end{itemize}
The resulting sequence
$$
\frac{2}{3}, \frac{26}{27}, \frac{242}{243}, \dotsc
$$
satisfies
\begin{equation}\label{equ:row0}
1 -\frac{3}{9^s}, s=1,2,3, \dotsc.
\end{equation}
In this particular case the denominators, $9^s$ form a linear homogeneous recursion with constant coefficients (LHRCC) of order 1,
$$
G_s = 9 G_{s-1}, s \ge 1, \qquad G_0=1.
$$
Similarly, the numerators satisfy the LRCC,
$$
G_s =3G_{s-1}+8, s \ge 1, \qquad G_0=-2.
$$
(and therefore, since a sequence satisfying a linear, non--homogeneous recursion with constant coefficients (LRCC) will also satisfy an
LHRCC albeit with a higher degree), the sequence also satisfies an LHRCC.
The sequence just studied forms row 0 of the Circuit Array, Table \ref{tab:circuitarray}. To determine row 1 of the Circuit Array we compute the following:
\begin{itemize}
\item The right side of the triangle left-adjacent to the top corner triangle of the 2-rim of reduction 2 has label
$\frac{13}{12} = 1+\frac{2}{3} \frac{1}{9^{2-1}-1},$ as shown in Panel B of Figure \ref{fig:motivationillustration}.
\item The right side of the triangle left-adjacent to the top corner triangle of the 3-rim of reduction 3 has label
$\frac{121}{120} = 1+\frac{2}{3} \frac{1}{9^{3-1}-1},$ as shown in Panel C of Figure \ref{fig:motivationillustration}.
\item The right side of the triangle left-adjacent to the top corner triangle of the 4-rim of reduction 4 has label
$\frac{1093}{1092} = 1+\frac{2}{3} \frac{1}{9^{4-1}-1}.$
\end{itemize}
The resulting sequence
$$
\frac{13}{12}, \frac{121}{120}, \frac{1093}{1092}, \frac{9841}{9840}, \dotsc
$$
satisfies $1+\frac{2}{3 } { 9^{s-1}-1 }.$ We again see the presence of LRCC. The sequence of twice the denominators satisfies the LRCC
$$
G_{s+1} = 9G_s +24, s \ge 3, G_2 =24,
$$
while the sequence of twice the numerators satisfies the LRCC,
$$
G_{s+1} = 9G_s +8, s \ge 3, G_2 =26,
$$
and hence both numerators and denominators satisfy LHRCC, albeit of higher order.
These calculations determine the construction of the Circuit Array by rows. Figure~\ref{fig:motivationillustration} can be used to motivate a construction by columns. Each perspective provides different sequences.
\begin{itemize}
\item As shown in Panel A, Column 1, consists of the singleton $\frac{2}{3}.$ We may describe the process of generating this column by starting, at the left resistance edge label of triangle $T_{1,1},$ where 1 corresponds to the number of underlying reductions of the all--one $n$-grid, traversing to the left (in this case there is nothing further to transverse) and ending at the left--most edge of the underlying row. This singleton $\frac{2}{3}$ is column 0 of the Circuit Array, Table \ref{tab:circuitarray}.
\item As shown in Panel B, Column 2 may be obtained as follows: Start, at the left resistance-label of triangle $T_{3,2},$ where the number of reductions of of the all--one $n$-grid for this column is $2, \text{ and } 3 = 2 \times 2 -1$. This resistance is $\frac{26}{27}.$ Then traverse to the left, and end at the left--most edge of the underlying row. By recording the labels during this transversal we obtain $\frac{26}{27},\frac{13}{12}, \frac{1}{2},$ which is column 1 of the Circuit Array, Table \ref{tab:circuitarray}, starting at row 0 and ending at row 2.
\item As shown in Panel C, Column 3 may be obtained as follows: Start, at the left resistance-label of triangle $T_{5,3},$ where the number of reductions of the all--one $n$-grid for this column is $3, \text{ and } 5 = 2 \times 3 -1.$ This resistance is
$\frac{242}{243},$ traverse to the left, and end at the left most edge of the underlying row. By recording the labels during this transversal we obtain $\frac{242}{243},\frac{121}{120}, \frac{89}{100},\frac{1157}{960},\frac{13}{32},$ which is column 2 of the Circuit Array, Table \ref{tab:circuitarray}), starting at row 0 and ending at row 4.
\item The above suggests in general, that column $c \ge 1$ of the Circuit Array will consist of the resistance labels of the left and right sides of the triangles $T_{2c-1,i}^c, i=c, c-1, \dotsc 1.$ This will be formalized in the next section.
\end{itemize}
\begin{figure}
\caption{Graphical illustration showing locations of various edge resistance values computated in this section. See the narrative for further details. }
\label{fig:motivationillustration}
\end{figure}
\section{The Circuit Array}
This section formally defines the Circuit array, whose $i$-th row, $0 \le i \le 2(j-2),$ and $j$-th column, $j \ge 1,$ contains $T_{2j-1,j-\lfloor \frac{i+1}{2}\rfloor,LR}^j$
where, as indicated at the end of Section \ref{sec:reduction} the symbol $LR$ means L (respectively R) if $i$ is even (respectively odd).
\begin{example}
This example repeats the derivation of the first three columns derived at the end of \ref{sec:motivation}.
Referring to Figure \ref{fig:motivationillustration},
we see Panel A contains row 0, column 1 of the array containing $T_{2j-1,j-i}^j=T_{1,1}^1 = \frac{2}{3}.$
Panel B contains column 2 rows 0,1,2, which respectively contain $T_{3,2,L}^2=\frac{26}{27},
T_{3,2,R}^2=\frac{13}{12},
T_{3,1,L}^2=\frac{1}{2},
$
Panel C contains column 3 rows 0,1,2,3,4 which respectively contain
$T_{5,3,L}^3=\frac{242}{243},
T_{5,2,R}^3=\frac{121}{120},
T_{5,2,L}^3=\frac{89}{100},
T_{5,1,R}^3=\frac{1157}{969},
T_{5,1,L}^3=\frac{13}{32}.
$
\end{example}
Table \ref{tab:circuitarrayformal} presents the the first few rows and columns of formal entries of the Circuit Array while
Table \ref{tab:circuitarray} presents the first few rows and columns of the numerical values of the Circuit Array.
\begin{center}
\begin{table}
\begin{small}
\caption
{First few rows and columns of the formal entries of the Circuit Array. }
\label{tab:circuitarrayformal}
{
\renewcommand{1.3}{1.3}
\begin{center}
\begin{tabular}{||c||r|r|r|r|r|r|r|r||}
\hline \hline
\;&$1$&$2$&$3$&$4$&$5$&$6$&$7$&$\dotsc$\\
\hline
$0$&$T_{1,1,L}^1$&$T_{3,2,L}^2$&$T_{5,3,L}^3$&$T_{7,4,L}^4$&$T_{9,5,L}^5$&$T_{11,6,L}^6$&$T_{13,7,L}^7$&$\dotsc$\\
$1$&\;&$T_{3,2,R}^2$&$T_{5,2,R}^3$&$T_{7,3,R}^4$&$T_{9,4,R}^5$&$T_{11,5,R}^6$&$T_{13,6,R}^7$&$\dotsc$\\
$2$&\;&$T_{3',1,L}^2$&$T_{5,2,L}^3$&$T_{7,3,L}^4$&$T_{9,4,L}^5$&$T_{11,5,L}^6$&$T_{13,6,L}^7$&$\dotsc$\\
$3$&\;&\;&$T_{5,1,R}^3$&$T_{7,2,R}^4$&$T_{9,3,R}^5$&$T_{11,4,R}^6$&$T_{13,5,R}^7$&$\dotsc$\\
$4$&\;&\;&$T_{5,1,L}^3$&$T_{7,2,L}^4$&$T_{9,3,L}^5$&$T_{11,4,L}^6$&$T_{13,5,L}^7$&$\dotsc$\\
$5$&\;&\;&\;&$T_{7,1,R}^4$&$T_{9,2,R}^5$&$T_{11,3,R}^6$&$T_{13,4,R}^7$&$\dotsc$\\
$6$&\;&\;&\;&$T_{7,1,L}^4$&$T_{9,2,L}^5$&$T_{11,3,L}^6$&$T_{13,4,L}^7$&$\dotsc$\\
$7$&\;&\;&\;&\;&$T_{9,1,R}^5$&$T_{11,2,R}^6$&$T_{13,3',R}^7$&$\dotsc$\\
$8$&\;&\;&\;&\;&$T_{9,1,L}^5$&$T_{11,2,L}^6$&$T_{13,3,L}^7$&$\dotsc$\\
$9$&\;&\;&\;&\;&\;&$T_{11,1,R}^6$&$T_{13,2,R}^7$&$\dotsc$\\
$10$&\;&\;&\;&\;&\;&$T_{11,1,L}^6$&$T_{13,2,L}^7$&$\dotsc$\\
$11$&\;&\;&\;&\;&\;&\;&$T_{13,1,R}^7$&$\dotsc$\\
$12$&\;&\;&\;&\;&\;&\;&$T_{13,1,L}^7$&$\dotsc$\\
$13$&\;&\;&\;&\;&\;&\;&\;&$\ddots$\\
$14$&\;&\;&\;&\;&\;&\;&\;&$\ddots$\\
\hline \hline
\end{tabular}
\end{center}
}
\end{small}
\end{table}
\end{center}
\begin{center}
\begin{table}
\begin{large}
\caption
{ First few rows and columns of the numerical values of the Circuit Array.}
\label{tab:circuitarray}
{
\renewcommand{1.3}{1.3}
\begin{center}
\begin{tabular}{||c||r|r|r|r|r|r|r||}
\hline \hline
\;&$1$&$2$&$3$&$4$&$5$&$6$&$\dotsc$\\
\hline \hline
$0$&$\frac{2}{3}$&$\frac{26}{27}$&$\frac{242}{243}$&$\frac{2186}{2187}$&$\frac{19682}{19683}$&$\frac{177146}{177147}$&$\dotsc$\\
$1$&\;&$\frac{13}{12}$&$\frac{121}{120}$&$\frac{1093}{1092}$&$\frac{9841}{9840}$&$\frac{88573}{88572}$&$\dotsc$\\
$2$&\;&$\frac{1}{2}$&$\frac{89}{100}$&$\frac{16243}{16562}$&$\frac{335209}{336200}$&$\frac{108912805}{108958322}$&$\dotsc$\\
$3$&\;&\;&$\frac{1157}{960}$&$\frac{1965403}{1904448}$&$\frac{366383437}{364552320}$&$\frac{ 1071810914005}{1071023961216}$&$\dotsc$\\
$4$&\;&\;&$\frac{13}{32}$&$\frac{305041}{380192}$&$\frac{1303624379}{1372554304}$&$\frac{9044690242835}{9138722473024}$&$\dotsc$\\
$5$&\;&\;&\;&$\frac{224369}{167424}$&$\frac{19373074829}{18067568640}$&$\frac{308084703953915}{303469074613248}$&$\dotsc$\\
$6$&\;&\;&\;&$\frac{89}{256}$&$\frac{296645909}{412902400}$&$\frac{31631261501245}{34990560891392}$&$\dotsc$\\
$7$&\;&\;&\;&\;&$\frac{46041023}{31211520}$&$\frac{112546800611915}{99980909002752}$&$\dotsc$\\
$8$&\;&\;&\;&\;&$\frac{2521}{8192}$&$\frac{320676092095}{495976128512}$&$\dotsc$\\
$9$&\;&\;&\;&\;&\;&$\frac{4910281495}{3059613696}$&$\dotsc$\\
$10$&\;&\;&\;&\;&\;&$\frac{18263}{65536}$&$\dotsc$\\
$\dotsc$&\;&\;&\;&\;&\;&\;&$\dotsc$\\
\hline \hline
\end{tabular}
\end{center}
}
\end{large}
\end{table}
\end{center}
The \textit{leftmost} diagonal of the circuit array is defined by
\begin{equation}\label{equ:leftside}
L_s = T_{2s-1,1,L}^s, s=1,2,3,\dotsc
\end{equation}
\section{The Main Theorem}\label{sec:main}
The main theorem asserts that the Circuit Array is a recursive array. Along any fixed row, table values are a uniform function of previous row and column values. We have already introduced the row 0 function, $G_0,$ \eqref{equ:g0} (see Lemma \ref{lem:row0}) and $G_1(X)$ (see Lemma \ref{lem:row1}).
\begin{theorem} For each $e \ge 0, \text{ $e$ even,}$ there exist rational functions $G_{e}$ such that for $k \ge 0$
\begin{equation}\label{equ:maineven}
T_{e+3+2k,2+k,L}^{\frac{e}{2}+2+k} = G_{e}(T_{1+2k,1+k,L}^{1+k}, T_{3+2k,1+k,L}^{2+k}, \dotsc, T_{e+1+2k,1+k,L}^{\frac{e}{2}+1+k}).
\end{equation}
Similarly for each odd, $o =\frac{e}{2}+1$
\begin{equation}\label{equ:mainodd}
T_{\frac{e}{2}+2+2k,1+k,L}^{\frac{e}{2}+2+k} = G_{o}(T_{1+2k,1+k,L}^{1+k}, T_{3+2k,1+k,L}^{2+k}, \dotsc, T_{\frac{e}{2}+2k,1+k,L}^{\frac{e}{2}+1+k}).
\end{equation}
\end{theorem}
Proof of the main theorem is deferred to Sections \ref{sec:s13_proofbasecase} and \ref{sec:proofmain}. Illustrative examples of these recursions are provided in the next section.
\section{Illustrations of the Main Theorem}\label{sec:examples}
\begin{example} For $i=0$ we have by Lemma \ref{lem:row0},
$$
G_0(X)=\frac{X+8}{9}. \text{ Hence, }
C_{0,2}= \frac{26}{27}=G_0(C_{0,1})= G_0\left(\frac{2}{3}\right), \text{ and }
C_{0,3}= \frac{242}{243}=G_0(C_{0,2})= G_0\left(\frac{26}{27}\right).
$$
Similarly, we have by Lemma \ref{lem:row1},
$$
G_1(X)=\frac{1}{3} \frac{X+8}{X+2}. \text{ Hence, }
C_{1,2}= \frac{13}{12}=G_1(C_{0,1})= G_1\left(\frac{2}{3}\right), \text{ and }
C_{1,3}= \frac{121}{120}=G_1(C_{0,2})= G_1\left(\frac{26}{27}\right).
$$
\end{example}
\begin{example}\label{exa:row2} For $i=1$ we have
$$
G_2(X,Y)=\frac{9Y(X+2)^2+8(X+8)^2}{(X+26)^2}.$$
Hence,
$$C_{2,3}= \frac{89}{100}=G_2(C_{0,1},C_{2,2})= G_2\left(\frac{2}{3},\frac{1}{2}\right), \text{ and }$$
$$C_{2,4}= \frac{16243}{16562}=G_2(C_{0,2},C_{2,3})= G_2\left(\frac{26}{27},\frac{89}{100}\right)
$$
Similarly, we have
$$
G_3(X)=\frac{9Y(X+2)^2 (X+8)+8(X+8)^3}
{9Y(X+2)^2(X+26)+6(X+2)(X+8)(X+26)}.$$
Hence,
$$C_{3,3}= \frac{1157}{960}=G_3(C_{0,1},C_{2,2})= G_3\left(\frac{2}{3},\frac{1}{2}\right), \text{ and }$$
$$C_{3,4}= \frac{1965403}{190448}=G_3(C_{0,2},C_{2,3})= G_3\left(\frac{26}{27},\frac{89}{100}\right).
$$
\end{example}
\begin{example}
For $i=2,$ we have $G_4(X,Y,Z)=\frac{N(X,Y,Z)}{D(X,Y,Z)},$ with
\begin{equation*}
N(X,Y,Z) =
\begin{cases}
512(X+2)^0(X+8)^5(X+80)Y^0+\\
1152(X+2)^2(X+8)^3(X+80)Y^1+ \\
648(X+2)^4(X+8)^1(X+80)Y^2+ \\
36(X+2)^2(X+8)^2(X+80)^2 Y^0 Z+ \\
108(X+2)^3(X+8)^1(X+80)^2Y^1 Z+ \\
81(X+2)^4(X+8)^0(X+80)^2Y^2Z,
\end{cases}
\end{equation*}
and
\begin{equation*}
D(X,Y,Z)=
\begin{cases}
676(X+2)^0(X+8)^2Q(X)^2Y^0 +\\
1404(X+2)^2(X+8)^2Q(X)^1Y^1+ \\
729(X+2)^4(X+8)^2Q(X)^0Y^2,
\end{cases}
\end{equation*}
with, $Q(X)=13X^2+298X+2848.$
These polynomials are formatted to show certain underlying patterns the statement and proof of which will be the subject of another paper.
One then has $C_{4,4}=\frac{305041}{380192}=
G_4(C_{0,1}, C_{2,2}, C_{4,3})=
G_4\left(\frac{2}{3}, \frac{1}{2}, \frac{13}{32}\right).$
\end{example}
\section{Alternate Approaches to the Main Theorem}
The main theorem formulates the recursiveness of the circuit array in terms of recursions by rows with the number of arguments of these recursions growing by row. There are other approaches to formulating the main theorem, explored in the next few sections.
\begin{itemize}
\item Section \ref{sec:closed} explores a formulation of the main theorem in terms of closed formula similar to those found in Section \ref{sec:motivation}
\item Section \ref{sec:closed} also explores formulation of the main theorem in terms of a single variable rather than multiple variables.
\item Section \ref{sec:determinant} explores determining an LHRCC for the numerators of the leftmost diagonal and strongly conjectures its impossibility. This contrasts with other 2-dimensional arrays whose diagonals do satisfy LHRCC.
\item Section \ref{sec:product} explores asymptotic approximations to the leftmost diagonal.
\end{itemize}
\section{Recursions vs. Closed Formulae}\label{sec:closed}
This section explores a closed-formula approach to the main theorem. We begin with a review.
We have already seen (Lemmas \ref{lem:row0} and \ref{lem:row1}) that row 0 of the circuit array has a simple closed form,
$$
C_{0,s} = 1 - \frac{3}{9^s} \qquad s \ge 1;
$$
and similarly, row 1 also has a simple closed form,
$$
C_{1,s} = 1+\frac{2}{3} \frac{1}{9^{s-1}-1}.
$$
This naturally motivated seeking a formulation of the entire array in terms of closed formulae. However, this approach quickly becomes excessively cumbersome. For example, consider row 2. With the aid of \cite[A163102,A191008]{OEIS}, we found the following closed form for this row:
\begin{multline*}
\textbf{Define } n=2(s-2), \qquad d=\frac{1}{2}\biggl( 3^{s-1}-1 \biggr)\\
N=\frac{1}{4} \biggl(n\cdot3^{n+1}\biggr)+
\frac{1}{16} \biggl(5\cdot3^{n+1}+(-1)^n\biggr),
\qquad
D=\frac{1}{2} d^2 (d+1)^2
\end{multline*}
then $$ C_{2,s} = 1 - \frac{N}{D}.$$
However, this formula is much more complicated than the formula presented in Section \ref{sec:examples},
$$
C_{2,s} = G_2(X,Y)=\frac{9Y(X+2)^2+8(X+8)^2}{(X+26)^2}, \qquad \text{ with } X=C_{0,s-2}, Y= C_{2,s-1}.
$$
We present one more attempt at a closed formula which also failed, that of using a single variable. We begin by first re-labeling $\frac{2}{3}$ as $1-\frac{3}{x}$ in the first reduction of an all--one $n$-grid (see Panel A in Figure \ref{fig:motivationillustration}). If we then continue reductions, all labels are rational functions in this single variable $x$ so that upon substitution of $x=9$ we may then obtain desired resistance edge labels.
As before, the resulting formulas are highly complex. We present below these closed formulas for the left-side diagonal, $L_s$. They are derived by ``plugging in" to the four basic transformation functions of Section \ref{sec:proofmethods} as we did in Section \ref{sec:motivation}.
\begin{itemize}
\item
\[\ \frac{x-3}{x-1}
\qquad \text{gives $L_1=\frac{2}{3}$ when $x=9$}\]
\item
\[\frac{2}{3}\frac{x-3}{x-1}
\qquad \text{gives $L_2=\frac{1}{2}$ when $x=9$}\]
\item
\[\frac{(x-3)(3x-1)}{6(x-1)^2}, \text{gives $L_3=\frac{13}{32}$ when $x=9$}\]
\item
\[\frac{(x-3)(3(x-1)(x-3) + 4(3x-1)^2)}{96(x-1)^3}, \text{gives $L_4=\frac{89}{256}$ when $x=9$}\]
\item
\[\frac{(x-3)(3(x-1)(x-3)(34x-18) + 16(3x-1)^3)}{1536(x-1)^4}, \qquad
\text{gives $L_5$ when $x=9$}
\]
\item
\[\frac{(x-3)(3(x-1)(x-3)(793x^2-874x+273) + 64(3x-1)^4)}{24576(x-1)^5}, \qquad
\text{gives $L_6$ when $x=9$}
\]
\item
\[\frac{(x-3) (6(x - 1)(x - 3)(7895x^3 - 13549x^2 + 8693x - 2015)+4^4(3x-1)^5)}{393216 (x-1)^6}, \qquad
\text{gives $L_7$ when $x=9$}.
\]
\end{itemize}
There are interesting patterns in the above results and it may yield future results. One example of an interesting pattern is found in the constants appearing in the denominators. For $s \ge 3$ the denominator constants in the formulas yielding $L_s$ upon substitution of $x=9,$ satisfy $3\times2^{4(s-3)+1}$ We however do not further pursue this in this paper.
To sum up, because of the greater complexity as well as lack of completely describable patterns in the closed formula we abandoned this approach in favor of a recursive approach in several variables.
\section{Impossibility of a recursive sequence for the left-most diagonal}\label{sec:determinant}
It is natural, when studying sequences of fractions, to separately study their numerators and denominators. We have seen that for $C_0, C_1$ such an approach uncovers LHRCC. Therefore, it comes as a surprise to have a result stating the impossibility of an LHRCC.
To present this impossibility result, we first, briefly review a technique for discovering LHRCC. Suppose we have an integer sequence such as $G_1, G_2, \dotsc$ Suppose further we believe this sequence is second order, that is,
$$
G_n = x G_{n-1}+y G_{n-2}
$$
As $n$ varies this last equation generates an infinite number of equations in $x$ and $y.$
In other words, to investigate the possible recursiveness of this sequence we can solve the following set of equations for any $m$ and the use the solution to test further,
\begin{center}
$\begin{bmatrix} G_m & G_{m+1} \\ G_{m+1} & G_{m+2} \end{bmatrix}$
$ \begin{bmatrix} x \\ y \end{bmatrix}$
$=$
$ \begin{bmatrix} G_{m+2} & G_{m+3} \end{bmatrix}.$
\end{center}
Solving this set of equations by Cramer's rule naturally motivates considering the determinant
$$ \begin{vmatrix} G_m & G_{m+1} \\ G_{m+1} & G_{m+2} \end{vmatrix} $$
for any integer $m.$ While these determinants are non-zero, the order 3 determinants,
$$ \begin{vmatrix} G_m & G_{m+1} & G_{m+2}
\\ G_{m+1} & G_{m+2} & G_{m+3} \\
G_{m+2} & G_{m+3} & G_{m+4}
\end{vmatrix}, $$
must be zero because of the dependency captured by the LHRCC.
These remarks generalize to $r$-th order recursions for integer $r \ge 2,$ and explain why in the search for recursions it is natural to consider such determinants. It follows that if for some $m$ and for all $r \ge 2$ the following determinant is non-zero,
$$ \begin{vmatrix} G_m & G_{m+1} & \dotsc & G_{m+r}
\\
G_{m+1} & G_{m+2} & \dotsc & G_{m+r+1} \\
\dotsc & \dotsc & \dotsc & \dotsc \\
G_{m+r-1} & G_{m+r} & \dotsc & G_{m+2r-1}
\end{vmatrix}, $$
then it is impossible for the sequence $\{G_m\}$ to satisfy any LHRCC.
The following conjecture, verified for several dozen early values of $k$ shows a remarkable and unexpected simplicity in the values of these determinants.
\begin{conjecture}
Let $T(j)= \frac{j(j+1)}{2}$ indicate the $j$-th triangular number.
Using \eqref{equ:leftside}, define ${n'}_s$ and ${d'}_s$ by
$L_s = \frac{n_s}{d_s}=\frac{n'_s}{2^{4s-7}}$, where $n_s$ and $d_s$ are relatively prime. For any $j \ge 2$ we have
\begin{center}
$$\begin{vmatrix} n'_2 & n'_3 & \cdots& n'_{2+j}\\
n'_3 & n'_4 & \cdots &n_{3+j}\\
\vdots & \vdots & \ddots &\vdots\\
n'_{2+j}& n'_{3+j} & \cdots & n'_{2+2j}\end{vmatrix}
=9^{T(j-1)}.$$
\end{center}
\end{conjecture}
\begin{corollary} Under the conditions stated in the conjectures, it is impossible for the $\{{n'}_s\}_{s \ge 1}$ to satisfy an LHRCC of any order.
\end{corollary}
\begin{comment}
It is tempting to suggest that the numerators satisfy no LHRCC because they are growing too fast. But that is not true. We know that $L_s <1,$
\cite[Corollary 7.2]{EvansHendel} and that the denominators form a geometric sequence. It follows that the numerators are bounded by a geometric sequence. In terms of growth rate, there is no reason why the sequence shouldn't be able to satisfy an LHRCC.
\end{comment}
\section{An Asymptotic Approach}\label{sec:product}
Prior to presenting the proof of the main theorem, we explore one more approach in this section. By way of motivation recall that several infinite arrays have asymptotic formulas associated with them. For example, the central binomial coefficients have asymptotic formulas arising from Stirling's formula.
For purposes of expositional smoothness, we focus on the leftmost diagonal,
$L_s,$ \eqref{equ:leftside}.
Hendel, \cite{Hendel} introduced the idea of finding explicit formulas for edge-values in terms of products of factors. After
numerical experimentation, the following approximation was found,
\begin{equation}\label{equ:A}
L_s \asymp A_s = \frac{2}{3} \displaystyle \prod_{i=2}^s (1 - \frac{1}{2i-1}),
\end{equation}
with $A$ standing for approximation. Tables \ref{tab:leftcenter5rows} and \ref{tab:leftcenter80rows} provide numerical evidence for this approximation. The key takeaways from both tables is that both differences $L_s - A_s$ and ratios $\frac{L_s}{A_s}$ are monotone decreasing for $s \ge 3.$
\begin{center}
\begin{table}
\begin{small}
\caption
{Numerical evidence for conjectures about $L_s,$ first five rows. Notice that after $s=2$ all difference and ratio columns are monotone decreasing.}
\label{tab:leftcenter5rows}
{
\renewcommand{1.3}{1.3}
\begin{center}
\begin{tabular}{||c||c|c|c|c||c|c|c||c|c||}
\hline \hline
$s$&$L_s$&$A_s$&$L_s-A_s$&$\frac{L_s}{A_s}$&$P_s$&$A_s-P_s$&$\frac{A_s}{P_s}$&$L_s-P_s$&$\frac{L_s}{P_s}$\\
\hline
$1$&$0.6667$&$0.6667$&$0$&$1$&$0.5908$&$0.0758$&$1.1284$&$0.0758$&$1.1284$\\
$2$&$0.5$&$0.4444$&$0.0556$&$1.125$&$0.4178$&$0.0267$&$1.0638$&$0.0822$&$1.1968$\\
$3$&$0.4063$&$0.3556$&$0.0507$&$1.1426$&$0.3411$&$0.0144$&$1.0424$&$0.0651$&$1.191$\\
$4$&$0.3477$&$0.3048$&$0.0429$&$1.1407$&$0.2954$&$0.0094$&$1.0317$&$0.0522$&$1.1769$\\
$5$&$0.3077$&$0.2709$&$0.0368$&$1.136$&$0.2642$&$0.0067$&$1.0253$&$0.0435$&$1.1647$\\
\hline \hline
\end{tabular}
\end{center}
}
\end{small}
\end{table}
\end{center}
\begin{center}
\begin{table}
\begin{small}
\caption
{Numerical evidence for conjectures about $L_s,$ first 80 rows. Observe that except for a few initial values the difference and ratio columns are monotone decreasing.}
\label{tab:leftcenter80rows}
{
\renewcommand{1.3}{1.3}
\begin{center}
\begin{tabular}{||c||c|c|c|c||c|c|c||c|c||}
\hline \hline
$s$&$L_s$&$A_s$&$L_s-A_s$&$L_s/A_s$&$P_s$&$A_s-P_s$&$A_s/P_s$&$L_s-P_s$&$L_s/P_s$\\
\hline
$8$&$0.2387$&$0.2122$&$0.0265$&$1.125$&$0.2089$&$0.0033$&$1.0157$&$0.0298$&$1.1427$\\
$16$&$0.1658$&$0.1489$&$0.017$&$1.1141$&$0.1477$&$0.0012$&$1.0078$&$0.0181$&$1.1228$\\
$24$&$0.1346$&$0.1212$&$0.0134$&$1.1103$&$0.1206$&$0.0006$&$1.0052$&$0.014$&$1.1161$\\
$32$&$0.1162$&$0.1049$&$0.0114$&$1.1084$&$0.1044$&$0.0004$&$1.0039$&$0.0118$&$1.1127$\\
$40$&$0.1038$&$0.0937$&$0.0101$&$1.1072$&$0.0934$&$0.0003$&$1.0031$&$0.0103$&$1.1107$\\
$48$&$0.0946$&$0.0855$&$0.0091$&$1.1065$&$0.0853$&$0.0002$&$1.0026$&$0.0093$&$1.1094$\\
$56$&$0.0875$&$0.0791$&$0.0084$&$1.1059$&$0.079$&$0.0002$&$1.0022$&$0.0086$&$1.1084$\\
$64$&$0.0818$&$0.074$&$0.0078$&$1.1055$&$0.0739$&$0.0001$&$1.002$&$0.008$&$1.1077$\\
$72$&$0.0771$&$0.0697$&$0.0073$&$1.1052$&$0.0696$&$0.0001$&$1.0017$&$0.0075$&$1.1071$\\
$80$&$0.0731$&$0.0662$&$0.0069$&$1.105$&$0.0661$&$0.0001$&$1.0016$&$0.007$&$1.1067$\\
\hline \hline
\end{tabular}
\end{center}
}
\end{small}
\end{table}
\end{center}
The $P$ columns in these tables (which also provide good approximations as measured by differences and ratios) correspond to the following further approximation
\begin{equation}\label{equ:P}
A_s \asymp P_s = \sqrt{\frac{\pi}{9s}},
\end{equation}
with $P$ standing for the approximation of $A_s$ with $\pi$. \color{black}
Equation \eqref{equ:P} is naturally derived from \eqref{equ:A} using Stirling's formula. The next lemma contains a formal statement of the result.
\begin{lemma}
$$
A_s \asymp P_s.
$$
\end{lemma}
\begin{proof}
By \eqref{equ:A} we have
$$
A_s = \frac{2}{3} \Biggl( \frac{2}{3} \frac{4}{5} \dotsc \frac{2s-2}{2s-1} \Biggr) .
$$
Applying the identity $(2s-1)! = \Biggl(2 \cdot 4 \cdot \dotsc \cdot 2s-2 \Biggr) \Biggl( 3 \cdot 5 \cdot \dotsc 2s-1\Biggr)$ to the last equation, we have
$$
A_s = \frac{2}{3} \frac{\Biggl( 2^{s-1} (s-1)! \Biggr)^2} {(2s-1)!}.
$$
Of the many forms of Stirling's formula, we can simplify the last equation by applying the standard approximation (see for example~\cite{Wolfram}) $n! \asymp \Biggl( \frac{n}{e} \Biggr)^n \sqrt{2\pi n} $, yielding
$$
A_s = \frac{2}{3} \frac{4^s}{4} \Biggl( \frac{s-1}{e} \Biggr)^{2(s-1)} \biggl(2\pi(s-1) \biggr) \Biggl(\frac{e}{2s-1}\Biggr)^{2s-1} \frac{1}{\sqrt{2 \pi (2s-1)}}.
$$
By gathering constants, cancelling the powers of $e,$ and using the fact that $c_1 s -c_2 \asymp s$ for constants $c_1, c_2,$ we can simplify this last equation to
$$
A_s = \frac{e}{6} 4^s \sqrt{s} \sqrt{\pi} (s-1)^{2s-2} \left(\frac{1}{2s-1}\right)^{2s-1}.
$$
Further simplification is obtained by using traditional calculus identities on limits resulting in powers of $e.$
$$
(s-1)^{2s-2} = \Biggl( \frac{s-1}{s} \Biggr)^{2s} s^{2s} \frac{1}{(s-1)^2} \asymp e^{-2} s^{2s} \frac{1}{s^2},$$ $$
\frac{1}{(2s-1)^{2s-1}} = \Biggl(\frac{2s}{2s-1}\Biggr)^{2s-1} \frac{1}{(2s)^{2s-1}} \asymp e \frac{1}{4^s} \frac{1}{s^{2s}}2s.
$$
Combining these last 3 equations, cancelling powers of $e$ and $4,$ and using the fact that $c_1 s + c_2 \asymp s,$ we obtain
$$
A_s \asymp \sqrt{\pi} \frac{1}{6} 2s \frac{1}{s^2} \sqrt{s} = \frac{\sqrt{\pi}}{3 \sqrt{s}} = \sqrt{\frac{\pi}{9s}} = P_s
$$
as required.
\end{proof}
\color{black}
\section{Base Case of the Inductive Proof}\label{sec:s13_proofbasecase}
The proof of the main theorem is by induction on the row index, parametrized by whether the row is even or odd, as shown in equations \eqref{equ:maineven}-\eqref{equ:mainodd}. The base case requires proofs for rows 0,1,2,3.
We suffice throughout the proof with consideration of the the even rows, the proof for the odd rows being highly similar and hence omitted. The proof for row 0 has already been completed and is summarized in Lemma \ref{lem:row0}. Recall that the proof was based on the equations describing a non-boundary left edge (\eqref{equ:leftside3proofs} and \eqref{equ:leftside9proofs}) as well as Theorem \ref{the:uniformcenter}. A proof for rows 0 and one can be found in Lemmas \ref{lem:row0} and \ref{lem:row1}. Proofs in this and the next section are accomplished similarly by applying the appropriate transformation functions found in Section~\ref{sec:proofmethods} as well as the Uniform Center Theorem, Theorem \ref{the:uniformcenter}.
In this section we show \eqref{equ:maineven} for the case $e=2.$ We accomplish this by first proving \eqref{equ:maineven} when $k=0$ and then proving for $k>0.$ This separation into two cases is for expositional clarity since the proof can be accomplished with the single arbitrary case.
\noindent\textsc{Case 1: Proof of Equation \eqref{equ:maineven} for $e=2,k=0.$}
We must show
\begin{equation}\label{equ:tempbasecase1}
T_{5,2,L}^3 = G_3(T_{1,1,L}^1, T_{3,1,L}^2),
\end{equation}
for some function $G_3.$
By the formula for non boundary left edges, \eqref{equ:leftside3proofs}, we know
\begin{equation}\label{equ:tempbasecase2}
T_{5,2,L}^3 = F(T_{5,1}^2,T_{5,2}^2, T_{6,2}^2).
\end{equation}
Proceeding as in the proof of Lemma \ref{lem:row0} we have as follows:
\begin{itemize}
\item By Theorem \ref{the:uniformcenter}(b) the six edges of triangles $T_{5,2}^2, T_{6,2}^2$ are identically one.
\item By \eqref{equ:uniformcenter}, the uniform center for the first diagonal in the twice reduced $n$-grid begins on row $s+d=2+1=3.$
Therefore, the argument $T_{5,1}^2$ in \eqref{equ:tempbasecase2} may be replaced by the identically labeled triangle $T_{3,1}^2.$
\item Triangle $T_{3,1}^2$ has three sides, $T_{3,1,L}^2, T_{3,2,R}^2, T_{3,2,B}^2.$
\item But by Lemma \ref{lem:row1},
$T_{3,2,R}^2= G_0(T_{1,1,L}^1),$ and by Theorem \ref{the:uniformcenter}(c),
$T_{3,2,B}^2=T_{3,2,R}^2$
\end{itemize}
Applying the above to \eqref{equ:tempbasecase2} and plugging into \eqref{equ:leftside9proofs} we have
$$
T_{5,2,L}^3 = Y(\Delta(T_{3,1,L}^2,G_0(T_{1,1,L}^1), G_0(T_{1,1,L}^1)), \Delta(1,1,1),\Delta(1,1,1))
=G_3(T_{1,1,L}^1, T_{3,1,L}^2),
$$
which has the required form of \eqref{equ:tempbasecase1} as desired. This completes the proof of \eqref{equ:maineven} for the case $e=2,k=0.$
\noindent\textsc{Case 2: Proof of Equation \eqref{equ:maineven} for $e=2,k>0.$}
Proceeding exactly as we did in the case $k=0$ we have by \eqref{equ:leftside3proofs},
\begin{equation}
\label{equ:tempbasecase3}
T_{5+2K,2+K,L}^{3+K} = F(T_{5+2K,1+K}^{2+K},T_{5+2K,2+K}^{2+K}, T_{6+2K,2+K}^{2+K}).
\end{equation}
Continuing as in the case $k=0$ we have:
\begin{itemize}
\item By Theorem \ref{the:uniformcenter}(b) the six edges of triangles $T_{5+2K,2+K}^{2+K}, T_{6+2K,2+K}^{2+K}$ are identically one.
\item By \eqref{equ:uniformcenter}, the uniform center for (1+K)th diagonal of the all-one $n$ grid reduced $2+K$ times begins on row $s+d=2+K+1+K=3+2K.$
Therefore, the argument $T_{5+2K,1+K}^{2+K}$ in \eqref{equ:tempbasecase3} may be replaced by the identically labeled triangle $T_{3+2K,1+K}^{2+K}.$
\item Triangle $T_{3+2K,1+K}^{2+K}$ has three sides, $T_{3+2K,1+K,L}^{2+K}, T_{3+2K,1+K,R}^{2+K}, T_{3+2K,2+K,B}^{2+K}.$
\item But by Lemma \ref{lem:row1},
$T_{3+2K,2+K,R}^{2+K}= G_0(T_{1+2K,1+K,L}^{1+K}),$ and by Theorem \ref{the:uniformcenter}(c),
$T_{3+2K,2+K,B}^{2+K}=T_{3+2K,2+K,R}^{2+K}$
\end{itemize}
Applying the above to \eqref{equ:tempbasecase3} and plugging into the edge version of the equation, \eqref{equ:leftside9proofs}, we have
\begin{multline*}
T_{5+2K,2+K,L}^{3+K} = Y(\Delta(T_{3+2K,1+K,L}^{2+K},G_0(T_{1+2K,1+K,L}^{1+K}), G_0(T_{1+2K,1+K,L}^{1+K})), \Delta(1,1,1),\Delta(1,1,1))\\
=G_3(T_{1+2K,1+K,L}^{1+K}, T_{3+2K,1+K,L}^{2+K}),
\end{multline*}
which has the required form of \eqref{equ:tempbasecase1} as was to be shown. This completes the proof of \eqref{equ:maineven} for the second case and hence completes the proof of the base case $e=2.$
\section{Proof of the Main Theorem}\label{sec:proofmain}
This section completes the inductive proof of the main theorem, by showing equations \eqref{equ:maineven} and \eqref{equ:mainodd}, the base case of which was completed in the prior section. Accordingly throughout this section we assume $E$ an even number, corresponding to row $E$ of the Circuit Array, such that,
\begin{equation}\label{equ:ebigger4}
E \ge 4.
\end{equation}
We will utilize the following lemma, whose proof follows from an inspection of Table \ref{tab:circuitarrayformal}.
\begin{lemma}\label{lem:isamember}
Triangle $T^a_{b,c,LR}$ belongs to row $d$ column $e$ of the Circuit array if $b=2a-1,$ $a=e$ and either i) $LR=L, a=c, d=0,$ ii) $LR=L, d=2(a-c)$, or iii) $LR=R, d=2(a-c)-1>0.$
\end{lemma}
For an induction assumption we assume \eqref{equ:maineven} and \eqref{equ:mainodd} hold for all $e < E$ and proceed to prove these equations for the case $E.$ We suffice with the proof for even rows (i.e, Equation \eqref{equ:maineven}) the proof for odd rows being similar and hence omitted. The proof proceeds in a manner similar the proofs presented in the prior section.
First, by \eqref{equ:leftside3proofs}
we have
\begin{equation}\label{equ:tempfirst}
T^{\frac{E}{2}+2+k}_{E+3+2k, 2+k, L}=
F(T^{\frac{E}{2}+1+k}_{E+3+2k, 1+k},
T^{\frac{E}{2}+1+k}_{E+3+2k, 2+k},
T^{\frac{E}{2}+1+k}_{E+4+2k, 2+k})
\end{equation}
Second, utilizing assumption \eqref{equ:ebigger4} and using part (a) of the Uniform Center Theorem, in the three triangle arguments on the right hand side of \eqref{equ:tempfirst} $E+3+2k$ and $E+4+2k$ can be replaced with $E+1+2k$ since
$$
s+d =\frac{E}{2}+1+k+1+k \ge E+1+k; \quad
\text{ and similarly }
s+d = \frac{E}{2}+1+k+2+k \ge E+1+k,$$
implying
\begin{equation}\label{equ:tempsecond}
T^{\frac{E}{2}+2+k}_{E+3+2k, 2+k, L}=
F(T^{\frac{E}{2}+1+k}_{E+1+2k, 1+k},
T^{\frac{E}{2}+1+k}_{E+1+2k, 2+k},
T^{\frac{E}{2}+1+k}_{E+1+2k, 2+k}).
\end{equation}
Third, therefore expanding \eqref{equ:leftside3proofs} to a full 9 variable function, \eqref{equ:leftside9proofs}, and using Theorem \ref{the:uniformcenter}(c) we have that \eqref{equ:tempsecond} is expanded to
\begin{multline*}
T^{\frac{E}{2}+2+k}_{E+3+2k, 1+k, L}=
Y(\Delta(T^{\frac{E}{2}+1+k}_{E+1+2k,1+k,L},
T^{\frac{E}{2}+1+k}_{E+1+2k,1+k,R},
T^{\frac{E}{2}+1+k}_{E+1+2k,1+k,R}),\\
\Delta(T^{\frac{E}{2}+1+k}_{E+1+2k,2+k,L},
T^{\frac{E}{2}+1+k}_{E+1+2k,2+k,R},
T^{\frac{E}{2}+1+k}_{E+1+2k,2+k,R}),\\
\Delta(T^{\frac{E}{2}+1+k}_{E+1+2k,2+k,L},
T^{\frac{E}{2}+1+k}_{E+1+2k,2+k,R},
T^{\frac{E}{2}+1+k}_{E+1+2k,2+k,R})).
\end{multline*}
Fourth, in examining the rows to which the arguments of this last equation belong using Lemma \ref{lem:isamember} we have
\begin{itemize}
\item $T^{\frac{E}{2}+1+k}_{E+1+2k, 1+k, L}$ in row $E$,
\item $T^{\frac{E}{2}+1+k}_{E+1+2k, 1+k, R}$ in row $E-1$,
\item $T^{\frac{E}{2}+1+k}_{E+1+2k, 2+k, L}$ in row $E-2$, and
\item $T^{\frac{E}{2}+1+k}_{E+1+2k, 2+k, R}$ in row $E-3$.
\end{itemize}
The proof is completed by the induction assumption applied to rows $E-1, E-2, E-3.$ More specifically we must show that when $k=0$ the second row element is a function of all leftmost diagonal elements. But the first argument on the right hand side is the leftmost diagonal element of row $E$ while the induction assumption assures us that the other arguments are functions of the leftmost elements of previous rows, $e < E.$ This completes the proof.
\end{document} |
\begin{document}
\title{Fermionic Adaptive Sampling Theory for Variational Quantum Eigensolvers}
\author{Marco Majland}
\affiliation{Kvantify Aps, DK-2300 Copenhagen S, Denmark}
\affiliation{Department of Physics and Astronomy, Aarhus University, DK-8000 Aarhus C, Denmark}
\affiliation{Department of Chemistry, Aarhus University, DK-8000 Aarhus C, Denmark}
\author{Patrick Ettenhuber}
\affiliation{Kvantify Aps, DK-2300 Copenhagen S, Denmark}
\author{Nikolaj Thomas Zinner}
\affiliation{Kvantify Aps, DK-2300 Copenhagen S, Denmark}
\affiliation{Department of Physics and Astronomy, Aarhus University, DK-8000 Aarhus C, Denmark}
\begin{abstract}
Quantum chemistry has been identified as one of the most promising areas where
quantum computing can have a tremendous impact.
For current \ac{nisq} devices, one of the best available methods to prepare approximate wave functions
on quantum computers is the \ac{adapt}. However, ADAPT-VQE suffers from a significant measurement
overhead when estimating the importance of operators in the wave function. In this work, we propose \ac{algo},
a method for selecting operators based on importance metrics solely derived from the populations of Slater determinants in the
wave function.
Thus, our method mitigates measurement overheads for \ac{adapt} as it is only dependent on the populations of Slater
determinants which can simply be determined by measurements in the computational basis. We introduce two heuristic
importance metrics, one based on Selected Configuration Interaction with perturbation theory and one based on
approximate gradients. In state vector and finite shot simulations, \ac{algo} using the heuristic
metric based on approximate gradients converges at the same rate or faster than \ac{adapt}
and requires dramatically fewer shots.
\end{abstract}
\maketitle
\section{Introduction}\label{sec:introduction}
Quantum chemistry has been identified as one of the most promising areas where
quantum computing can have great impact on industrial applications\cite{elfving_how_2020,daley_practical_2022,gonthier_identifying_2020,aspuru-guzik_simulated_2005}.
However, current quantum hardware is subject to noise and error and thus algorithms such as quantum phase
estimation remain intractable for current and near-term devices \cite{bharti_noisy_2022,abrams_quantum_1999}. Therefore, the research
community has focused on developing algorithms suitable for an era of noise, error, limited qubits and limited quantum gates~\cite{mcclean_theory_2016,kandala_hardware-efficient_2017}.
A promising method to approximate electronic wave functions on quantum computers is the \ac{adapt} algorithm,
along with its variants, which has made tremendous progress towards this goal\cite{grimsley_adaptive_2019,tang_qubit-adapt-vqe_2021,yordanov_qubit-excitation-based_2021,lan_amplitude_2022,anastasiou_tetris-adapt-vqe_2022,bertels_symmetry_2022}.
Other adaptive algorithms include the Qubit Coupled Cluster method and the Iterative Qubit Coupled Cluster method~\cite{ryabinkin_qubit_2018,ryabinkin_iterative_2019}.
The adaptive approaches for estimating electronic wave functions contrast the static approaches such as Unitary Coupled
Cluster Theory and its variants\cite{romero_strategies_2018,anand_quantum_2022,lee_generalized_2019}.\\
The adaptive algorithms have proven to converge to chemical accuracy with fewer parameters and more compact wave functions
compared to that of the static algorithms. Thus, the adaptive algorithms may be more feasible for near-term applications.
However, one of the primary challenges of \ac{adapt} is the large measurement overhead incurred by estimating the importance metric
for selecting relevant operators for the wave function~\cite{grimsley_adaptive_2019}. Even estimating a single energy
evaluation of a wave function through the sampling of expectation values may require significant measurement resources
as was demonstrated in recent large-scale benchmarks~\cite{gonthier_identifying_2020}. For \ac{adapt}, the importance
metric for choosing operators from a predefined pool, $\mathcal{A}$, is the gradient of the energy. Therefore,
the number of measurements necessary to rank the operators scales with the size of the pool, i.e. $\mathcal{O}(|\mathcal{A}|)$.
Since $\mathcal{A}$ typically contains two-body operators, the size of the set of operators $|\mathcal{A}|$ scales as $\mathcal{O}(N^4)$,
where $N$ is a measure for the size of the chemical system. \\
In this work, we propose a method for selecting operators based on the populations of
Slater determinants in the wave function in order to establish an importance metric for excitation
operators. This is in stark contrast to \ac{adapt} where the importance of operators is established using gradient
measurements which requires the sampling of expectation values for each excitation operator.
Sampling Slater determinants requires only the sampling of a single operator rather than $\mathcal{O}(N^{4})$ operators as
in \ac{adapt}. In fact, the required quantities for evaluating the proposed metric can be extracted from a measurement of
the energy in \ac{vqe}, a measurement that would in any case have to be performed.\\
For selecting operators, we are considering two metrics, one that is related to the approximate gradient used in \ac{adapt} and
a second one that is inspired by classical \ac{sci}\cite{huron_iterative_1973}. In classical \ac{sci}, the determinants used to diagonalize the Hamiltonian are chosen using an importance metric
typically based on a perturbation method\cite{bytautas_priori_2009,anderson_breaking_2018,bender_studies_1969,whitten_configuration_1969,evangelisti_convergence_1983}.
Here we consider selecting operators based on second-order \ac{en} perturbation theory\cite{epstein_stark_1926,nesbet_configuration_1997}.
The methods are compared to \ac{adapt} by calculating the ground state energies of two small molecules which
are typically used in benchmarks, namely $\text{H}_{4}$ and $\text{LiH}$. The ground state energies are calculated using
state vector (infinite shot) and finite shot simulations to investigate the performance of the methods both cases.
The paper is organized as follows. In Sec.~\ref{sec:background}, we provide the theoretical background
of \ac{adapt} and \ac{sci}. In Sec.~\ref{sec:algo} we provide the background for the scaling reduction in \ac{algo}
and derive the gradient-based and \ac{sci}-based metrics.
In Sec.~\ref{sec:compdetails}, we
provide a pseudo-algorithm for \ac{algo} and provide the computational details of our calculations which
we will present and discuss in Sec.~\ref{sec:results}. Finally, we
conclude with a summary and present some future research avenues in Sec.~\ref{sec:conclusion}.
\section{Background} \label{sec:background}
In this section, we will provide the background necessary for understanding the construction
of our method in Sec.~\ref{sec:algo}, starting with \ac{adapt} and followed by \ac{sci}.
\subsection{ADAPT-VQE}
In \ac{adapt}, an Ansatz is built by successively adding parametrized unitary operators
acting on a reference state $\ket{\Phi_0}$, which is often taken as the \ac{hf}
ground state determinant. Thus, the \ac{adapt} wave function in iteration $k$ of the
algorithm can be expressed as
\begin{equation}
\ket{\Psi^{(k)}} = \prod_{\mu\in\mathcal{A}^{(k)}} e^{-i\theta_\mu\hat{A}_\mu} \ket{\Phi_0}, \label{eq:expans}
\end{equation}
where $\mathcal{A}^{(k)}$ is the set of operators in the wave function at iteration $k$,
$\hat{A}_\mu = \hat{\tau}_\mu - \hat{\tau}^\dagger_\mu$, with $\hat{\tau}_\mu$ being an excitation operator and $\mu$
enumerates the excitation. The excitation operators are chosen from a pool of operators, $\mathcal{A}=\{A_{\mu}\}$, based on
an importance metric, $w(\hat{A}_\mu, \ket{\Psi^{(k)}})$.
In standard \ac{adapt}, the importance metric is the gradient of the energy with respect to the parameter of the operator.
The energy of the $k+1$st iteration may be written as
\begin{equation}
E^{(k+1)} = \langle\Psi^{(k)} | e^{i\theta_\mu \hat{A}_\mu} \hat{H} e^{-i\theta_\mu \hat{A}_\mu} |\Psi^{(k)} \rangle
\end{equation}
such that
\begin{equation}
\begin{split}
g_\mu &= \mleft. \frac{\partial E^{(k+1)}}{\partial \theta_\mu}\mright|_{\theta_\mu = 0} \\
&= i \langle\Psi^{(k)} | [\hat{A}_\mu, \hat{H}] | \Psi^{(k)} \rangle. \label{eq:adaptgrad}
\end{split}
\end{equation}
To evaluate this expression, \ac{adapt} relies on measuring operators of the type $[\hat{A}_\mu, \hat{H}]$,
yielding a significant overhead in measurements to be performed.
\subsection{Selected CI}
In \ac{sci}, determinants are selected iteratively by an importance metric in order to adaptively increase the subspace in which the CI eigenvalue problem is solved.
One possibility for selecting determinants is based on perturbation theory\cite{huron_iterative_1973}. In this paper, we consider \ac{en}
perturbation theory\cite{epstein_stark_1926,nesbet_configuration_1997}. \ac{en} theory weights the importance of a Slater determinant $|D\rangle$ for extending a
wave function $|\Psi^{(k)}\rangle$ in iteration $k$ as
\begin{equation}
\begin{split}
E^{(k)}_{D} &= \frac{|\langle D | \hat{H} | \Psi^{(k)} \rangle|^2}{E^{(k)} - \langle D | \hat{H} | D \rangle}\\
&= \sum_{ij}\frac{c_i c_j^* \langle D_j | \hat{H} | D\rangle\langle D | \hat{H} | D_i\rangle}{E^{(k)} - \langle D | \hat{H} | D \rangle}, \label{eq:en}\\
\end{split}
\end{equation}
where the states $|D_i\rangle$ are Slater determinants and $c_i = \langle D_i | \Psi^{(k)} \rangle$ CI coefficients.
\section{FAST-VQE} \label{sec:algo}
In this section, we present a method for selecting operators solely based on the population
of Slater determinants in the wave function by establishing importance metrics for excitation
operators. This is in stark contrast to \ac{adapt} where the importance of operators is established by measuring
the expectation value of the non-diagonal gradient operators of Eq.~\eqref{eq:adaptgrad}. We start this section
with a discussion of sampling populations of Slater determinants and diagonal Hamiltonian measurements in Sec.~\ref{sec:diagh}
and then build the two metrics in Secs.~\ref{sec:enlm} and \ref{sec:dsgn}.
\subsection{Sampling populations of Slater determinants} \label{sec:diagh}
A population of Slater determinants may, for example, be obtained from the energy evaluations in the \ac{vqe} optimization
or as a separate measurement. For separate measurements, given $\ket{\Psi^{(k)}}$, one may repeatedly perform measurements
in the computational basis to obtain a bit string representation of determinants from $\ket{\Psi^{(k)}}$ in the \ac{hf} basis.
These measurements may be collected in a multi-set of determinants. The multi-set may be written as
\begin{equation}
S^{(k)} = \left\{ |D_i\rangle , \langle D_i | \Psi^{(k)} \rangle \neq 0\right\}, \label{eq:multiset}
\end{equation}
where the frequency of each determinant $|D_i\rangle$ is proportional to $|c_i|^2$ and where the restriction
is fulfilled by construction. With this set of determinants, we can build metrics suitable to assign
importance weights to operators from an operator pool $\mathcal{A}$ based on the expected contribution to the
wave function. In the following sections we will introduce two such metrics.\\
For energy measurements, the population of Slater determinants may be obtained through
sampling the diagonal elements of the Hamiltonian. In \ac{vqe}, the Hamiltonian is mapped to
a qubit Hamiltonian,
\begin{equation}
\hat{H} = \sum_{a} h_{a} \hat{P}_{a},
\end{equation}
where
\begin{equation}
\hat{P}_{a} = \bigotimes_{b} \hat{\sigma}_{b}^{\alpha}, \quad \alpha\in\{x,y,z\},
\end{equation}
denotes a product of Pauli operators. Consider a partitioning of the Hamiltonian $\hat{H} = \hat{H}^z + \hat{H}^c$ where $\hat{H}^z$ is diagonal, then
we can express $\hat{H}^z$ as $\hat{H}^z = \sum_{a} h_{a} \hat{P}^z_{a}$, where $\hat{P}^z_{a}$ are products of Pauli-$z$ operators.
We can then write an energy functional depending on the wave function parameters $\boldsymbol{\theta}$ in terms of this partitioned Hamiltonian as
\begin{equation}
\begin{split}
E^{(k)}(\boldsymbol{\theta}) &= \langle \Psi^{(k)}| \hat{H}^z + \hat{H}^c |\Psi^{(k)}\rangle \\
& = \sum_{a} h_{a} \langle \Psi^{(k)}| \hat{P}^z_a |\Psi^{(k)}\rangle + \langle \Psi^{(k)}| \hat{H}^c |\Psi^{(k)}\rangle\\
& = \sum_{ai} h_{a} |c_i|^2 \langle D_i | \hat{P}^z_a |D_i\rangle + \langle \Psi^{(k)}| \hat{H}^c |\Psi^{(k)}\rangle \\
& = \sum_{i} h_{ii} |c_i|^2 + \langle \Psi^{(k)}| \hat{H}^c |\Psi^{(k)}\rangle. \label{eq:equation2}
\end{split}
\end{equation}
Thus, we can perform measurements of diagonal Hamiltonian terms in the computational basis in order to sample
Slater determinants $|D_i\rangle$ in $|\Psi^{(k)}\rangle$ with a probability that is proportional to $|c_i|^2$.
Note that Eq.~\eqref{eq:equation2} is evaluated repeatedly in order to optimize the wave function
parameters $\boldsymbol{\theta}$, e.g. using \ac{vqe}, such that no additional cost is introduced to calculate
Slater determinant populations.
\subsection{Heuristic Gradient} \label{sec:dsgn}
To introduce the first heuristic importance metric, we start from $g_\mu$ in Eq.\eqref{eq:adaptgrad} which may be expressed as
\begin{equation}
\begin{split}
g_\mu &= i \langle\Psi^{(k)} | \hat{A}_\mu \hat{H} - \hat{H}\hat{A}_\mu | \Psi^{(k)} \rangle\\
&= -i \langle\Psi^{(k)} | \hat{A}^\dagger_\mu \hat{H} + \hat{H}\hat{A}_\mu | \Psi^{(k)} \rangle \\
&= -2i \Re(\langle \Psi^{(k)} | \hat{A}^\dagger_\mu \hat{H} | \Psi^{(k)}\rangle)\\
&= -2i \sum_{ij} \Re(c_i^*c_j\langle D_i | \hat{A}^\dagger_\mu \hat{H} | D_j\rangle). \label{eq:altgrad}\\
\end{split}
\end{equation}
Then, dropping the off-diagonal part of the sum in Eq.~\eqref{eq:altgrad} yields
\begin{equation}
\begin{split}
\text{diag}(g_\mu) &= 2i \sum_i \Re( |c_i|^2 \langle D_i | \hat{A}^\dagger_\mu \hat{H} | D_i \rangle) \\
&= 2i \sum_i |c_i|^2 \Re(\langle D_i | \hat{A}^\dagger_\mu \hat{H} | D_i \rangle). \label{eq:exaltmeasure}
\end{split}
\end{equation}
The manifold into which $\hat{A}^\dagger_\mu$ excites, $\{\langle D_j|\hat{A}^\dagger_\mu, D_j \in S^{(k)}\}$,
may be classically constructed. Such a manifold contains information on how the diagonal is connected to off-diagonal
elements. To include that information in the final metric, a second sum over the determinants will therefore be introduced.
In this regard, $S^{(k)}$ of Eq.~\eqref{eq:multiset} will be used directly since the number of occurrences of a determinant $\ket{D_{i}}$
in this multiset is proportional to $|c_i|^2$. Additionally, the second summation is introduced and all prefactors are removed,
as the final ranking will not be dependent on constant factors. Thus, one obtains
\begin{equation}
\alpha_\mu = \sum_{D_i \in S^{(k)}}\sum_{D_j \in S^{(k)}} \Re(\langle D_i | \hat{A}^\dagger_\mu \hat{H} | D_j \rangle), \label{eq:altmeasure}
\end{equation}
which concludes the construction of the first importance metric. This metric roughly corresponds to dropping the
phases and prefactors from Eq.~\ref{eq:altgrad}. Note that this expression can be evaluated classically once
$S^{(k)}$ has been obtained. This importance metric will be denoted \ac{dsgn} in the following.\\
In contrast to \ac{adapt}, it is necessary to remove operators already used in the Ansatz, $\mathcal{A}^{k}$,
from the operator pool, $\mathcal{A}$, in order to avoid using the same operator twice. However, to converge to the \ac{fci}
energy, it may be necessary to repeat operators in the Ansatz. Thus, whenever $\max_{\mu}(\alpha_\mu)<\epsilon$,
the operators $\mathcal{A}^{k}$ are added to the pool again.
\subsection{Heuristic Selected CI} \label{sec:enlm}
In order to introduce a second heuristically motivated metric, \ac{sci} theory will be leveraged.
In contrast to \ac{sci} theory, which works with the determinants directly, it is required to build a metric that
relates determinants and their frequencies in the sampling procedure to operators in order to gauge the effect of adding
them to the Ansatz. In this section, such a metric will be constructed based on the \ac{en} criterion
from Eq.~\eqref{eq:en}.
First, consider the \ac{adapt} Ansatz in Eq.~\eqref{eq:expans}. The addition of a new
operator corresponds to the multiplication of a new exponential
which operates on all previous exponentials and the reference wave function. Thus, the contribution must be
evaluated for all determinants already in $|\Psi^{(k)}\rangle$ and appropriately weighted. The construction of the
heuristic operator metric based on determinants begins by noting that
$\langle D_i | \hat{A}_\mu^\dagger = \langle D_k |$ is just another determinant or zero, establishing
a connection between operators and determinants. From this, it would be possible to evaluate
the contribution of $\hat{A}_\mu^\dagger$ by applying the \ac{en} criterion in Eq.~\eqref{eq:en} directly using $D_j$ as
the contribution to be evaluated. However, naively sampling the operator $H\ket{D_k}\bra{D_k}H$ comes at a significant cost with a scaling
of $\mathcal{O}(N^{8})$. In order to make this manageable and to be able to evaluate this on a classical computer, the
off-diagonal elements of the sum over $i$ and $j$ from Eq.~\eqref{eq:en} may be neglected. Furthermore, one must evaluate
and sum such a metric for all the determinants an operator $\hat{A}_\mu^\dagger$ is able to create from the determinants
in $|\Psi^{(k)}\rangle$, i.e., for practical implementations, all the determinants of the multi-set $S^{(k)}$.
For representing the wave function in Eq.~\eqref{eq:en}, the same approach as used to arrive at Eq.~\eqref{eq:altmeasure}
will be used, i.e., a finite shot representation given the determinants collected in $S^{(k)}$ and using only the diagonal
contributions.
This concludes the construction of the heuristic importance metric $\beta_\mu$, which may be written as
\begin{equation}
\begin{split}
\beta_\mu & \coloneqq \sum_{D_i\in \mathcal{S}^{(k)}} \sum_{D_j\in \mathcal{S}^{(k)}} \frac{|\langle D_i | \hat{A}^\dagger_\mu \hat{H} | D_j \rangle|^2}{E^{(k)} - \langle D_i | \hat{A}^\dagger_\mu\hat{H} \hat{A}_\mu | D_i\rangle}. \label{eq:weight}
\end{split}
\end{equation}
This importance metric will be denoted as \ac{enlm}. Note that also for this metric, we need to remove used operators
from $\mathcal{A}$, as explained in Sec.~\ref{sec:dsgn}.\\
The importance metrics in Eqns.~\eqref{eq:altmeasure} and~\eqref{eq:weight} both use the operator $\hat{A}^\dagger\hat{H}$ for the evaluation
of the importance of an operator $\hat{A}_\mu$ when improving the wavefunction in the next iteration. From a set of determinants, it is trivial to evaluate
the expectation values for this operator on a classical computational resource with polynomial scaling in the number of electrons and orbitals.
\section{Computational details} \label{sec:compdetails}
\begin{figure*}
\caption{Convergence of \ac{adapt}
\label{fig:h4}
\end{figure*}
In this section, the algorithms and computational details of the calculations will be reviewed, starting with a description of the algorithm in
Sec.~\ref{sec:algodesc}, a description of the choice of operator pool in Sec.~\ref{sec:opchoice} and finally with a description of the numerical
experiments in Sec.~\ref{sec:molecules}
\subsubsection{Review of algorithms}\label{sec:algodesc}
The general
algorithm for \ac{adapt} and \ac{algo} is presented in Alg.~1. Note that the major difference between these methods
is the skipping of lines 7-10 for \ac{adapt}. For \ac{adapt}, the importance metric reads $w(\hat{A}_\mu,\ket{\Psi^{k}})_\textrm{ADAPT-VQE} = g_\mu$,
while for \ac{algo} we are using the importance metrics introduced earlier, i.e.,
$w(\hat{A}_\mu,~|\Psi^{(k)}\rangle)_\textrm{HG}~=~\alpha_\mu$
and~$w(\hat{A}_\mu,~|\Psi^{(k)}\rangle)_\textrm{HSCI}~=~\beta_\mu$.
Note that modifications for \ac{adapt}, for example TETRIS-ADAPT-VQE\cite{anastasiou_tetris-adapt-vqe_2022},
which adds more than one operator per iteration, are also applicable to \ac{algo}. However, we do not
expect the relative performance of the algorithms to differ when using these types of improvements since
the importance metrics are identical for the operators despite adding more than one operator per iteration.
Thus, standard implementations for \ac{adapt} and \ac{algo} are used.
\vspace*{0.5cm}
\includegraphics[scale=0.9]{fast_vqe_algo.pdf}
\vspace*{-3.5cm}
\subsubsection{Choice of operator pools}\label{sec:opchoice}
In general, any type of operator pool may be utilized. However, one-body and two-body
excitation operators are enough to parametrize an FCI wave function~\cite{evangelista_exact_2019}.
Since the quantum gates required for implementing $N$-body excitation operators increase rapidly with $N$, operator pools are
typically restricted to one-body and two-body excitation operators. According to Ref.~\cite{evangelista_exact_2019}
all possible many-body operators may be decomposed as one-body and two-body excitation operators, specifically as
infinite sequences of one- and two-body particle-hole operators. Particle-hole excitation operators
are excitation operators which annihilate electrons in occupied spin-orbitals in the \ac{hf} reference state and create
electrons in virtual spin-orbitals of the \ac{hf} reference.
In the original formulation of \ac{adapt}, the operator pool consisted of general excitations (particle-hole excitations
and excitations within the pure virtual-virtual
or occupied-occupied blocks) in the \ac{jw}
encoding\cite{jordan_uber_1928}. The resulting operator pools determine the scaling and convergence of the procedures.
Additionally, rather than using these physically motivated operator pools, one can build operator pools that are
computationally motivated. For example, several approximations have been suggested such as \ac{qeb_adapt_vqe}~\cite{yordanov_qubit-excitation-based_2021} and
spin-adapted ADAPT-VQE~\cite{tsuchimochi_adaptive_2022}. Recently, operator pools which consider qubit-space operators
were suggested~\cite{tang_qubit-adapt-vqe_2021}.
In this article, we will use particle-hole excitation operators in
the \ac{qeb_adapt_vqe} encoding since the primary task of this paper is to investigate importance metrics and not the
operators themselves.
\subsubsection{Systems and details}\label{sec:molecules}
Benchmarks of the algorithms are performed by calculating the ground state energy for $\text{H}_4$ and $\text{LiH}$
which are typically used to benchmark \ac{adapt} algorithms \cite{grimsley_adaptive_2019,romero_strategies_2018,ryabinkin_iterative_2019}.
In these calculations, the STO-3G basis set were used.
The molecular integrals were obtained using PySCF. The optimization of the wavefunction parameters in \ac{vqe}
is calculated with the L-BFGS-B method as implemented in Qiskit~\cite{Qiskit}. For all molecules, four types of
calculations were performed, one state vector simulation and three simulations
with finite sampling (100, 500 and 1000 shots per expectation value estimation). The optimization of the wave function
in the \ac{vqe} was performed using statevector simulations since we are restricting our study to the evaluation of importance
measures for operators. Thus, the method for re-using \ac{vqe} optimization measurements for \ac{algo} was not used
such that finite shot simulations were performed to estimate population of Slater determinants. Since the identical
number of operators must be sampled in the \ac{vqe} optimization for each algorithm, we do not expect the relative
comparison between the \ac{adapt} and \ac{algo} to differ in terms of \ac{vqe} optimization. The state vector
and finite shot calculations were performed in Qiskit. The state vector simulation serves as a benchmark for
infinite shots. All quantum simulations are compared to an FCI calculation for the same molecule/basis set
combination in PySCF. These results are presented in Sec.~\ref{sec:results}.
\section{Results} \label{sec:results}
\begin{figure*}
\caption{Convergence of \ac{adapt}
\label{fig:lih}
\end{figure*}
In this section, the results from the setup described in Sec.~\ref{sec:compdetails} are presented. We will conclude this section
with a discussion of the results.
\subsection{H4}
In Fig.~\ref{fig:h4}, we present the ground state calculations using \ac{adapt} and \ac{algo} with both importance
metrics, \ac{enlm} and \ac{dsgn}, for a linear
chain of $\text{H}_4$ in terms of the error relative to the FCI ground state energy. To an error of above $10^{-3}$ Hartree
with respect to FCI, the convergence in terms of the number of operators (parameters) added to the Ansatz is very similar
for all methods and numbers of shots per operator evaluation.
Beyond that point, the fastest convergence is observed for the state vector simulation for \ac{adapt} closely followed by
the state vector for \ac{enlm} and \ac{dsgn} and finite shot simulations for \ac{dsgn}. \ac{enlm} converges slower
for finite shot simulations.
The slowest convergence with the number of operators added is observed for finite shot simulations for \ac{adapt}. While \ac{dsgn} calculations with a finite amount of shots
are converged with about 25 parameters to an error of $10^{-9}$ Hartree, the precision for finite shot calculations using
\ac{adapt} is orders of magnitudes lower, at about $10^{-3}$ Hartree at the same point.\\
With respect to the resulting Ansatz depth, we observe that Ans{\"a}tze constructed with the order of operators
resulting from state vector simulations using the \ac{adapt} metric result in the most compact circuits, followed
by \ac{dsgn} and \ac{enlm}. Ans{\"a}tze constructed with finite shot simulations for \ac{adapt} are the least
compact.
The total amount of shots for \ac{adapt} and \ac{algo} are very different. Both \ac{enlm} and \ac{dsgn} converge
with a total number of shots about two orders of magnitude lower than the number of shots required for finite shot
simulations using \ac{adapt}. \ac{dsgn} requires fewer shots to obtain a given precision compared to \ac{enlm}.\\
\subsection{LiH}
Similar observations as for $\text{H}_{4}$ also hold true for the $\text{LiH}$ calculations presented in Fig.~\ref{fig:lih}, even though the
overall convergence is slower. There are some other features to be observed in the convergence for this system. For example,
the finite shot simulations for \ac{adapt} with 100 and 500 shots per operator evaluation showed no sign of convergence and remained on the level of
the \ac{hf} reference state. The finite shot simulation with 1000 shots per operator evaluation shows early signs of
convergence but is not able to go much below an energy difference of $10^{-2}$ Hartree. The \ac{adapt} state vector simulation, and all
simulations for \ac{dsgn} and \ac{enlm} converge to an energy difference of $10^{-3}$ Hartree at roughly the same rate, here the \ac{dsgn}
convergence flattens out, while the remaining calculations continue to converge at a similar rate. Beyond the addition of roughly 30 parameters
\ac{dsgn} gets a dramatic increase in precision while the other calculations start flattening out, displaying an unintuitive and seemingly
erratic behaviour of convergence. It is notable that \ac{dsgn} converges below the \ac{adapt} state vector simulation.\\
With respect to Ansatz compactness and the number of shots required, similar conclusions hold true, displaying the same overall tendencies
as observed for $\text{H}_4$ including the specific features described for the energy evaluation above.
\subsection{Discussion}
The dramatic difference in the number of shots between \ac{algo} and \ac{adapt} is due to
the excessive amount of shots necessary to measure the gradients of the operator pool, $\mathcal{A}$, of \ac{adapt}.
We can write the total amount of shots as iterations times shots for \ac{algo} whereas for \ac{adapt} it reads iterations
times shots times $|\mathcal{A}|$. Such a fact also provides another reason for the slow convergence of \ac{adapt}
when using a finite amount of shots as the evaluation of the gradient in Eq.~\eqref{eq:adaptgrad} is prone to sampling error.
In contrast, Eqns.~\eqref{eq:weight} and \eqref{eq:altmeasure} for \ac{enlm} and \ac{dsgn} are evaluated on a classical
computer from states that are generated by the measurement of the energy. However, it remains be noticed that sampling error also
effects \ac{enlm} and \ac{dsgn} as these methods are dependent on a representation of the
weights of the determinants in the current wave function $|\Psi^{(k)}\rangle$. For \ac{adapt}, more precise measurements
of the gradients are required in order to improve convergence, while more precise sampling of the Slater determinants
(diagonal elements of the Hamiltonian) becomes necessary for \ac{algo}. This is especially important when the electronic
structure becomes more correlated, i.e., when many determinants are required to describe the chemical system accurately, the
necessary sampling depth may become a challenge.\\
It must also be noted that none of the proposed metrics for selecting the next operator is optimal and that there is
room for improvement. For example, despite being the overall most competitive metric, \ac{dsgn} seems to select some sub-optimal
operators for $\text{LiH}$ below $10^{-3}$ Hartree, yet it converges at an order of magnitude below the error which the \ac{adapt} state vector
simulation achieves beyond 60 parameters and \ac{enlm}, which do not exhibit the same behaviour. Additionally, for $\text{LiH}$ with the \ac{enlm} metric
the finite shot simulations with fewer shots achieve higher precisions indicating that this metric does
not capture some important correlations in this particular system.\\
The results shown here suggest that for practical purposes the introduced heuristic metrics are good enough, since they
converge at a similar rate as the \ac{adapt} state vector simulations using a finite amount of shots. However, the
systems shown here are rather small and the basis sets are limited. With the two different systems investigated,
we have observed quite different detailed behaviours of convergence with no clear indication for why the ordering
behaves so differently with different metrics. A better theoretical understanding of the limits of this method and a more rigorous
derivation of metrics could make the convergence more robust across many systems and ensure that a similar convergence rate
is retained for more complicated molecules and larger basis sets.
\section{Conclusion} \label{sec:conclusion}
In this work, we have presented \ac{algo}, a method for selecting operators based on the populations
of Slater determinants in the wave function. We have introduced two different importance metrics \ac{dsgn} and \ac{enlm}
and compared them to \ac{adapt} in terms of the convergence to the FCI ground state energy. As was demonstrated, \ac{algo}
mitigates the significant measurement overhead for \ac{adapt} by utilizing information about the population
of Slater determinants in the wave function whereas \ac{adapt} must evaluate the expectation value
of gradient operators. For infinite shots, \ac{adapt} provides the most compact wavefunction in terms
of CNOT gates but with equal amount of parameters compared to \ac{algo}. For finite shot simulations, \ac{algo} yields more
compact wave functions with dramatically reduced execution times. Of the two introduced importance metrics \ac{dsgn}
converged most rapidly and resulted in more compact circuits compared to \ac{enlm}. However, we expect that a more systematic construction of importance metrics may
improve the performance and eliminate some erratic features seen, e.g., for $\text{LiH}$.
It remains to be seen how this method performs on real quantum hardware and in combination with other operator pools and other improvements
available for \ac{adapt}. This will be the topic of future investigations.
\begin{acknowledgments}
We thank Niels Kristian Kjærgård Madsen, Mads Bøttger Hansen, Mogens Dalgaard and Stig Elkjær Rasmussen from Kvantify ApS
and Mads Greisen Højlund, Rasmus Berg Jensen and Ove Christiansen from Aarhus University for fruitful discussions.
\end{acknowledgments}
\begin{conflicts}
NTZ is a co-founder of Kvantify Aps. The authors have filed a provisional patent application covering the
method described here.
\end{conflicts}
\appendix
\end{document} |
\begin{document}
\begin{abstract}
We analyze the dichotomy between {\em sectional-Axiom A flows} (c.f. \cite{memo})
and flows with points accumulated by periodic orbits of different indices.
Indeed, this is proved for $C^1$ generic flows whose singularities accumulated by
periodic orbits have codimension one.
Our result improves \cite{mp1}.
\end{abstract}
\maketitle
\section{Introduction}
\noindent
Ma\~n\'e discussed
in his breakthrough work \cite{M} if
the {\em star property}, i.e., the property of being far away from systems with non-hyperbolic
periodic orbits, is sufficient to guarantee that a system be Axiom A.
Although this is true for diffeomorphisms \cite{h0} it is not
for flows as the geometric
Lorenz attractor \cite{abs}, \cite{gu}, \cite{GW} shows. On the other hand, if singularities are not allowed
then the answer turns on to be positive by \cite{gw}. Previously, Ma\~n\'e
connects the star property with
the nowadays called {\em Newhouse phenomenon} at least for surfaces. In fact, he proved that a $C^1$-generic
surface diffeomorphism either is Axiom A or displays infinitely many sinks or sources \cite{m}.
In the extension of this work on surfaces,
\cite{mp1} obtained
the following results about $C^1$-generic flows for closed 3-manifolds: Any $C^1$-generic
star flow is singular-Axiom A and, consequently,
any $C^1$-generic flow is singular-Axiom A or displays infinitely many
sinks or sources. The notion of {\em singular-Axiom A} was introduced in \cite{mpp}
inspired on the dynamical properties of both Axiom A flows and the
geometric Lorenz attractor.
It is then natural to investigate such generic phenomena in higher dimensions and the natural challenges are:
Is a $C^1$-generic star flow in a closed $n$-manifold
singular-Axiom A?
Does a $C^1$-generic vector field in a closed $n$-manifold is singular-Axiom A or has
infinitely many sinks or sources?
Unfortunately, what we know is that the second question has negative answer for $n\geq 5$
as counterexamples
can be obtained by suspending the diffeomorphisms in Theorem C of \cite{bv}
(but for $n=4$ the answer may be positive).
A new light
comes from the {\em sectional-Axiom A flows} introduced in \cite{memo}.
Indeed, the first author replaced the term singular-Axiom A by sectional-Axiom A
above in order to formulated the following conjecture
for $n\geq 3$ (improving that in p. 947 of \cite{gwz}):
\begin{conjecture}
\label{conj0}
$C^1$-generic star flows on closed $n$-manifolds are sectional-Axiom A.
\end{conjecture}
Analogously we can ask if a
$C^1$-generic vector field in a closed $n$-manifold is sectional-Axiom A or display
infinitely many sinks or sources.
But now the answer is
negative not only for $n=5$, by the suspension of \cite{bv} as above,
but also for $n=4$ by \cite{st} and the suspension of certain diffeomorphisms \cite{mane}.
Nevertheless, in all these counterexamples, it is possible to observe the existence of
{\em points accumulated by hyperbolic periodic orbits of different Morse indices}.
Since such a phenomenon can be observe also
in a number of well-known examples of non-hyperbolic systems and
since, in dimension three, that phenomenon implies
existence of infinitely many sinks or sources,
it is possible to formulate the following dichotomy
(which, in virtue of Proposition \ref{p1}, follows from Conjecture \ref{conj0}):
\begin{conjecture}
\label{conj1}
$C^1$-generic vector fields $X$
satisfy (only) one of the following
properties:
\begin{enumerate}
\item
$X$ has a point accumulated by hyperbolic periodic orbits of different Morse indices;
\item
$X$ is sectional-Axiom A.
\end{enumerate}
\end{conjecture}
In this paper we prove Conjecture \ref{conj1} but in a case very close to the
three-dimensional one, namely,
when the {\em singularities accumulated by
periodic orbits have codimension one} (i.e. Morse index $1$ or $n-1$).
Observe that our result
implies the dichotomy in \cite{mp1}
since the assumption about the singularities is automatic for $n=3$.
It also implies Conjecture \ref{conj1} in large classes of vector fields as, for instance,
those whose singularities (if any) have codimension one.
As an application we prove Conjecture \ref{conj0} for star flows with spectral decomposition
as soon as the singularities accumulated by periodic orbits have codimension one.
Let us state our results in a precise way.
In what follows $M$ is a compact connected boundaryless Riemannian manifold of dimension $n\geq 3$
(a {\em closed $n$-manifold} for short).
If $X$ is a $C^1$ vector field in $M$ we will
denote by $X_t$ the flow generated by $X$ in $M$.
A subset $\Lambda\subset M$ is
{\em invariant} if
$X_t(\Lambda)=\Lambda$ for all $t\in I \!\! R$.
By a {\em closed orbit} we mean a periodic orbit or a singularity.
We define the {\em omega-limit set} of $p\in M$ by
$$
\omega(p)=\left\{x\in M:
x=\lim_{n\to\infty}X_{t_n}(p)
\mbox{ for some sequence }t_n\to\infty\right\}
$$
and call $\Lambda$
{\em transitive} if
$\Lambda=\omega(p)$ for some $p\in \Lambda$.
Clearly every transitive set is compact invariant.
As customary we call $\Lambda$ {\em nontrivial} it it does not reduce to a single orbit.
Denote by $\|\cdot\|$ and $m(\cdot)$ the norm and the minimal norm
induced by the Riemannian metric and by
$Det(\cdot)$ the jacobian operation.
A compact invariant set $\Lambda$ is {\em hyperbolic}
if there are a continuous invariant tangent bundle decomposition
$$
T_\Lambda M=\hat{E}^s_\Lambda\oplus E^X_\Lambda\oplus \hat{E}^u_\Lambda
$$
and positive constants $K,\lambda$
such that $E^X_\Lambda$ is the subbundle generated by $X$,
$$
\|DX_t(x)/\hat{E}^s_x\|\leq Ke^{-\lambda t}
\quad \mbox{ and }\quad m(DX_t(x)/\hat{E}^u_x)\geq K^{-1}e^{\lambda t},
$$
for all $x\in \Lambda$ and $t\geq 0$.
Sometimes we write $\hat{E}^{s,X}_x$, $\hat{E}^{u,X}_x$ to indicate dependence on $X$.
A closed orbit $O$ is hyperbolic if it does as a compact invariant set. In such a case
we define its {\em Morse index} $I(O)=dim(\hat{E}^s_O)$, where $dim(\cdot)$
stands for the dimension operation.
If $O$ reduces to a singularity $\sigma$, then we write
$I(\sigma)$ instead of $I(\{\sigma\})$ and say
that $\sigma$ has {\em codimension one} if $I(\sigma)=1$ or $I(\sigma)=n-1$.
It is customary to call hyperbolic closed orbit of maximal (resp. minimal) Morse index
{\em sink} (resp. {\em source}).
On the other hand, an invariant splitting
$T_\Lambda M=E_\Lambda\oplus F_\Lambda$
over $\Lambda$ is {\em dominated}
(we also say that $E_\Lambda$ {\em dominates} $F_\Lambda$) if there are positive constants
$K,\lambda$ such that
$$
\frac{\|DX_t(x)/E_x\|}{m(DX_t(x)/F_x)}\leq Ke^{-\lambda t},
\quad\quad\forall x\in \Lambda \mbox{ and }t\geq 0.
$$
In this work we agree to call a compact invariant set $\Lambda$
{\em partially hyperbolic} if there is a dominated splitting $T_\Lambda M=E^s_\Lambda\oplus E^c_\Lambda$
with {\em contracting} dominating subbundle $E^s_\Lambda$,
namely,
$$
\|DX_t(x)/E^s_x\|\leq Ke^{-\lambda t},
\quad\quad\forall x\in \Lambda \mbox{ and }t\geq 0.
$$
We stress however that this is not a standard usage
(specially due to the lack of symmetry in this definition).
Anyway, in such a case, we say that $\Lambda$ has {\em contracting dimension $d$}
if $dim(E^s_x)=d$ for all $x\in \Lambda$.
Moreover, we say that the central subbundle $E^c_\Lambda$ is {\em sectionally expanding} if
$$
dim(E^c_x)\geq 2 \quad\mbox{ and }\quad
|Det(DX_t(x)/L_x)|\geq K^{-1}e^{\lambda t},
\quad\quad\forall x\in \Lambda \mbox{ and }t\geq 0
$$
and all two-dimensional subspace
$L_x$ of $E^c_x$.
A {\em sectional-hyperbolic set} is a partially hyperbolic set whose singularities (if any) are hyperbolic
and whose central subbundle is sectionally expanding
(\footnote{Some authors use the term {\em singular-hyperbolic} instead.}).
Now we recall the concept of sectional-Axiom A flow \cite{memo}.
Call a point $p\in M$ {\em nonwandering} if for every neighborhood
$U$ of $p$ and every $T>0$ there is
$t>T$ such that $X_t(U)\cap U\neq\emptyset$.
We denote by $\Omega(X)$ the set of nonwandering points of $X$ (which
is clearly a compact invariant set).
We say that $X$ is an {\em Axiom A flow} if $\Omega(X)$ is both hyperbolic
and the closure of the closed orbits.
The so-called
{\em Spectral Decomposition Theorem} \cite{hk}
asserts that
the nonwandering set of an Axiom A flow $X$
splits into finitely many disjoint
transitive sets {\em with dense closed orbits} (i.e. with a dense subset of closed orbits)
which are hyperbolic for $X$.
This motivates the following definition:
\begin{definition}
A $C^1$ vector field $X$ in $M$ is called
{\em sectional-Axiom A flow} if there is a finite disjoint decomposition
$
\Omega(X)=\Omega_1\cup \cdots \cup \Omega_k
$
formed by transitive sets with dense periodic orbits
$\Omega_1,\cdots, \Omega_k$ such that, for all $1\leq i\leq k$,
$\Omega_i$ is either a hyperbolic set for $X$ or a sectional-hyperbolic set for $X$ or a sectional-hyperbolic
set for $-X$.
\end{definition}
Let $\mathcal{X}^1$ denote the space of $C^1$ vector fields $X$ in $M$.
Notice that it is a Baire space if equipped with the standard $C^1$ topology.
The expression {\em $C^1$-generic vector field} will mean a vector field in a
certain residual subset of $\mathcal{X}^1$.
We say that a point is {\em accumulated by periodic orbits},
if it lies in the closure of the union of the periodic orbits, and
{\em accumulated by hyperbolic periodic orbits of different Morse index}
if it lies simultaneously in the closure of the hyperbolic periodic orbits of Morse index
$i$ and $j$ with $i\neq j$.
With these definitions we can state our main result settling a special case of Conjecture \ref{conj1}.
\begin{main1}
A $C^1$-generic vector field $X$ for which
the singularities accumulated by periodic orbits
have codimension one satisfies (only) one of the following
properties:
\begin{enumerate}
\item
$X$ has a point accumulated by hyperbolic periodic orbits of different Morse indices;
\item
$X$ is sectional-Axiom A.
\end{enumerate}
\end{main1}
Standard $C^1$-generic results \cite{cmp} imply
that the sectional-Axiom A flows in the second alternative above
also satisfy the no-cycle condition.
The proof of our result follows that of Theorem A in \cite{mp1}.
However, we need a more direct approach bypassing Conjecture \ref{conj0}.
Indeed, we shall use some methods in \cite{mp1}
together with a combination of results \cite{glw}, \cite{gwz}, \cite{memo} for nontrivial transitive sets
(originally proved for robustly transitive sets).
\begin{definition}[\cite{a}]
We say that $X$ has
{\em spectral decomposition} if there is a finite partition $\Omega(X)=\Lambda_1\cup\cdots\cup\Lambda_l$ formed
by transitive sets $\Lambda_1,\cdots, \Lambda_l$ .
\end{definition}
Theorem A will imply the following approach to Conjecture \ref{conj0}.
\begin{main2}
\label{the-coro}
A $C^1$-generic star flow with spectral decomposition and
for which the singularities accumulated by periodic orbits have codimension one
is sectional-Axiom A.
\end{main2}
\section{Proofs}
\label{sec2}
\noindent
Hereafter we fix a closed $n$-manifold $M$, $n\geq 3$,
$X\in \mathcal{X}^1$ and a
compact invariant set $\Lambda$ of $X$.
Denote by $Sing(X,\Lambda)$ the set of singularities of $X$ in $\Lambda$.
We shall use the following concept from \cite{glw}.
\begin{definition}
We say that $\Lambda$
has a definite index $0\leq Ind(\Lambda)\leq n-1$ if there are
a neighborhood $\mathcal{U}$ of $X$ in $\mathcal{X}^1$ and
a neighborhood $U$ of $\Lambda$ in $M$ such that
$I(O)=Ind(\Lambda)$ for
every hyperbolic periodic orbit $O\subset U$ of every vector field $Y\in \mathcal{U}$.
In such a case we say that $\Lambda$ is {\em strongly homogeneous (of index $Ind(\Lambda)$)}.
\end{definition}
It turns out that the strongly homogeneous property imposes certain constraints on the Morse indices of
the singularities \cite{gwz}. To explain this we use the concept of
{\em saddle value} of a hyperbolic singularity $\sigma$ of $X$ defined by
$$
\Delta(\sigma)=Re(\lambda)+Re(\gamma)
$$
where $\lambda$ (resp. $\gamma$) is the stable (resp. unstable) eigenvalue
with maximal (resp. minimal) real part
(c.f. \cite{sstc} p. 725).
Indeed, based on the Hayashi's connecting lemma \cite{h} and well-known results about
unfolding of homoclinic loops \cite{sstc}, Lemma 4.3 in \cite{gwz} proves that,
if $\Lambda$ is a robustly transitive set which is strongly
homogeneous with hyperbolic singularities,
then $\Delta(\sigma)\neq 0$
and, furthermore, $I(\sigma)=Ind(\Lambda)$ or $Ind(\Lambda)+1$ depending on whether
$\Delta(\sigma)<0$ or $\Delta(\sigma)>0$, $\forall \sigma\in Sing(X,\Lambda)$.
However, we can observe that the same is true for
nontrivial transitive sets (instead of robustly transitive sets) for the proof in
\cite{gwz} uses the connecting lemma only once.
In this way we obtain the following lemma.
\begin{lemma}
\label{43}
Let $\Lambda$ be a nontrivial transitive set which is strongly
homogeneous with singularities (all hyperbolic) of $X$.
Then, every $\sigma\in Sing(X,\Lambda)$ satisfies $\Delta(\sigma)\neq 0$
and one of the properties below:
\begin{itemize}
\item
If $\Delta(\sigma)<0$, then $I(\sigma)=Ind(\Lambda)$.
\item
If $\Delta(\sigma)>0$, then $I(\sigma)=Ind(\Lambda)+1$.
\end{itemize}
\end{lemma}
On the other hand, the following inequalities for strongly homogeneous sets $\Lambda$
where introduced in \cite{glw}:
\begin{equation}
\label{eq1}
I(\sigma)>Ind(\Lambda),
\quad\quad\forall \sigma\in Sing(X,\Lambda).
\end{equation}
\begin{equation}
\label{eq11}
I(\sigma)\leq Ind(\Lambda),
\quad\quad\forall \sigma\in Sing(X,\Lambda).
\end{equation}
We shall use the above lemma to present a special case where one of these
inequalities can be proved.
\begin{proposition}
\label{thCcc}
Let $\Lambda$ be a nontrivial transitive set which is strongly
homogeneous with singularities (all hyperbolic of codimension one) of $X$.
If $n\geq 4$ and $1\leq Ind(\Lambda)\leq n-2$, then $\Lambda$
satisfies either (\ref{eq1}) or (\ref{eq11}).
\end{proposition}
\begin{proof}
Otherwise
there are $\sigma_0,\sigma_1\in Sing(X,\Lambda)$
satisfying
$I(\sigma_0)\leq Ind(\Lambda)<I(\sigma_1)$.
Since both $\sigma_0$ and $\sigma_1$ have codimension one and $1\leq Ind(\Lambda)\leq n-2$
we obtain $I(\sigma_0)=1$ and $I(\sigma_1)=n-1$.
If $\Delta(\sigma_0)\geq 0$ then $I(\sigma_0)=Ind(\Lambda)+1$
by Lemma \ref{43} so $Ind(\Lambda)=0$ which contradicts $1\leq Ind(\Lambda)$.
Then $\Delta(\sigma_0)<0$ and so $Ind(\Lambda)=I(\sigma_0)=1$ by Lemma \ref{43}.
On the other hand, if
$\Delta(\sigma_1)<0$ then
$Ind(\Lambda)=I(\sigma_1)=n-1$ by Lemma \ref{43}.
As $Ind(\Lambda)=1$ we get $n=2$ contradicting $n\geq 4$.
Then $\Delta(\sigma_1)\geq0$ so $I(\sigma_1)=Ind(\Lambda)+1$ by Lemma \ref{43}
thus $n=3$ contradicting $n\geq 4$.
The proof follows.
\end{proof}
The importance of (\ref{eq1}) and (\ref{eq11}) relies on the
the following result proved in \cite{glw}, \cite{gwz}, \cite{memo}:
A $C^1$ robustly transitive set $\Lambda$ with singularities
(all hyperbolic) which is strongly homogeneous satisfying (\ref{eq1}) (resp. (\ref{eq11}))
is sectional hyperbolic for $X$ (resp. $-X$).
However, we can observe that the same is true for
nontrivial transitive sets (instead of robustly transitive sets) as soon as $1\leq Ind(\Lambda)\leq n-2$.
The proof is similar to that in \cite{glw},\cite{gwz}, \cite{memo}
but using the so-called {\em preperiodic set} \cite{w}
instead of the natural continuation of a robustly transitive sets.
Combining this with Proposition \ref{thCc}
we obtain the following corollary in which the expression
{\em up to flow-reversing} means either for $X$ or $-X$.
\begin{corollary}
\label{thCc}
Let $\Lambda$ be a nontrivial transitive set which is strongly
homogeneous with singularities (all hyperbolic of codimension one) of $X$.
If $n\geq 4$ and $1\leq Ind(\Lambda)\leq n-2$, then $\Lambda$
is sectional-hyperbolic up to flow-reversing.
\end{corollary}
A direct application of this corollary is as follows.
We say that $\Lambda$
is {\em Lyapunov stable} for $X$
if for every neighborhood $U$ of it there is a
neighborhood $W\subset U$ of it such that $X_t(p)\in U$ for every $t\ge0$ and $p\in W$.
It was proved in Theorem C of \cite{mp1} that,
for $C^1$ generic three-dimensional star flows, every nontrivial Lyapunov stable set
with singularities is singular-hyperbolic.
We will need a similar result
for higher dimensional flows, but with the term singular-hyperbolic
replaced by sectional-hyperbolic.
The following will supply such a result.
\begin{corollary}
\label{thC}
Let $\Lambda$ be a nontrivial transitive set which is strongly
homogeneous with singularities (all hyperbolic of codimension one) of $X$.
If $n\geq 4$, $1\leq Ind(\Lambda)\leq n-2$ and $\Lambda$ is Lyapunov stable, then $\Lambda$
is sectional-hyperbolic for $X$.
\end{corollary}
\begin{proof}
By Corollary \ref{thCc} it suffices to prove that $\Lambda$ cannot be sectional-hyperbolic for $-X$.
Assume by contradiction that it does. Then,
by integrating the corresponding contracting subbundle,
we obtain a strong stable manifold $W^{ss}_{-X}(x)$, $\forall x\in \Lambda$.
But $\Lambda$ is Lyapunov stable for $X$ so $W^{ss}_{-X}(x)\subset \Lambda$,
$\forall x\in \Lambda$, contradicting p. 556 in \cite{momo}.
Then, $\Lambda$ cannot be sectional-hyperbolic for $-X$ and we are done.
\end{proof}
We also use Lemma \ref{43} to prove the following proposition.
\begin{proposition}
\label{c1}
Every nontrivial transitive sectional-hyperbolic set $\Lambda$
of a vector field $X$ in a closed $n$-manifold, $n\geq 3$, is strongly homogeneous
and satisfies $I(\sigma)=Ind(\Lambda)+1$, $\forall \sigma\in Sing(X,\Lambda)$.
\end{proposition}
\begin{proof}
Since transitiveness implies connectedness we have that the strong stable subbundle
$E^s_\Lambda$ of $\Lambda$ has constant dimension.
From this and the persistence of
the sectional-hyperbolic splitting
we obtain that $\Lambda$ is strongly homogeneous of index $Ind(\Lambda)=dim(E^s_x)$,
for $x\in \Lambda$.
Now fix a singularity $\sigma$. To prove $I(\sigma)=Ind(\Lambda)+1$ we only need to prove
that $\Delta(\sigma)>0$ (c.f. Lemma \ref{43}).
Suppose by contradiction that $\Delta(\sigma)\leq 0$.
Then, $\Delta(\sigma)<0$ and $I(\sigma)=Ind(\Lambda)$ by Lemma \ref{43}.
Therefore, $dim(E^s_\sigma)=dim(\hat{E}^s_\sigma)$ where $T_\sigma M=\hat{E}^s_\sigma\oplus \hat{E}^u_\sigma$
is the hyperbolic splitting of $\sigma$ (as hyperbolic singularity of $X$).
Now, let $W^s(\sigma)$ the stable manifold of $\sigma$ and
$W^{ss}(\sigma)$ be the strong stable manifold of $\sigma$ obtained by integrating
the strong stable subbundle $E^s_\Lambda$ (c.f. \cite{hps}).
Notice that $W^{ss}(\sigma)\subset W^s(\sigma)$.
As
$dim(W^{ss}(\sigma))=dim(E^s_\sigma)=dim(\hat{E}_\sigma^s)=dim(W^s(\sigma))$ we get
$W^{ss}(\sigma)=W^s(\sigma)$.
But $\Lambda$ is nontrivial transitive so the dense orbit
will accumulate at some point in $W^s(\sigma)\setminus \{\sigma\}$.
As $W^{ss}(\sigma)=W^{s}(\sigma)$ such a point
must belong to $(\Lambda\cap W^{ss}(\sigma))\setminus \{\sigma\}$.
On the other hand, it is well known that $\Lambda\cap W^{ss}(\sigma)=\{\sigma\}$ (c.f. \cite{mp1})
so we obtain a contradiction which proves the result.
\end{proof}
We say that
$\Lambda$ is an {\em attracting set}
if there is a neighborhood $U$ of it such that
$$
\Lambda=\bigcap_{t>0}X_t(U).
$$
On the other hand, a {\em sectional-hyperbolic attractor}
is a transitive attracting set which is also a sectional-hyperbolic set.
An {\em unstable branch} of a hyperbolic singularity $\sigma$ of a vector field
is an orbit in $W^u(\sigma)\setminus\{\sigma\}$.
We say that $\Lambda$ has
{\em dense singular unstable branches} if every unstable branch of every hyperbolic singularity on it
is dense in $\Lambda$.
The following is a straightforward extension of Theorem D in \cite{mp1} to higher dimensions
(with similar proof).
\begin{proposition}
\label{thD}
Let $\Lambda$ be a Lyapunov stable sectional-hyperbolic set
of a vector field $X$ in a closed $n$-manifold, $n\geq 3$.
If $\Lambda$ has both singularities, all of Morse index $n-1$, and
dense singular unstable branches, then $\Lambda$ is a sectional-hyperbolic attractor of $X$.
\end{proposition}
Now we recall the star flow's terminology from \cite{w}.
\begin{definition}
\label{star-flow}
A {\em star flow} is a $C^1$ vector field which cannot be $C^1$-approximated
by ones exhibiting non-hyperbolic closed orbits.
\end{definition}
Corollary \ref{thC} together with propositions \ref{c1} and \ref{thD} implies
the key result below.
\begin{proposition}
\label{p1}
A $C^1$-generic vector field $X$ on a closed $n$-manifold, $\forall n\geq 3$,
without points accumulated by hyperbolic periodic orbits of different Morse indices
is a star flow. If, in addition, $n\geq 4$, then
the codimension one singularities of $X$ accumulated by periodic orbits
belong to a sectional-hyperbolic attractor up to flow-reversing.
\end{proposition}
\begin{proof}
We will use the following notation.
Given $Z\in \mathcal{X}^1$ and $0\leq i\leq n-1$ we denote by
$Per_i(Z)$ the union of the hyperbolic periodic orbits of Morse index $i$.
The closure operation will be denoted by $Cl(\cdot)$.
Since $X$ has no point accumulated by hyperbolic periodic orbits of different Morse indices
one has
\begin{equation}
\label{separa}
Cl(Per_i(X))\cap Cl(Per_j(X))=\emptyset,
\quad\quad
\forall i,j\in \{0,\cdots, n-1\}, \quad i\neq j.
\end{equation}
Then, since $X$ is $C^1$-generic, standard lower-semicontinuous arguments (c.f. \cite{cmp})
imply that there are
a neighborhood $\mathcal{U}$ of $X$ in $\mathcal{X}^1$ and
a pairwise disjoint collection of neighborhoods $\{U_i: 0\leq i\leq n-1\}$ such that
$Cl(Per_i(Y))\subset U_i$ for all $0\leq i\leq n-1$ and $Y\in \mathcal{U}$.
Let us prove that $X$ is a star flow.
When necessary we use the notation $I_X(O)$ to indicate dependence on $X$.
By contradiction assume that $X$ is not a star flow.
Then, there is a vector field $Y\in \mathcal{U}$
exhibiting a non-hyperbolic closed orbit $O$.
Since $X$ is generic we can assume by the Kupka-Smale Theorem \cite{hk} that
$O$ is a periodic orbit.
Unfolding the eigenvalues of $O$ is a suitable way we would
obtain two vector fields $Z_1,Z_2\in \mathcal{U}$ of which
$O$ is a hyperbolic periodic orbit with $I_{Z_1}(O)\neq I_{Z_2}(O)$,
$1\leq I_{Z_1}(O)\leq n-1$ and $1\leq I_{Z_2}(O)\leq n-1$.
Consequently, $O\subset U_i\cap U_j$ where $i=I_{Z_1}(O)$ and $j=I_{Z_2}(O)$
which contradicts that the collection $\{U_i: 0\leq i\leq n-1\}$ is pairwise disjoint.
Therefore, $X$ is a star flow.
Next we prove that $Cl(Per_i(X))$ is a strongly homogeneous set of index $i$, $\forall 0\leq i\leq n-1$.
Take $Y\in \mathcal{U}$ and a hyperbolic periodic orbit
$O\subset U_i$ of Morse index $I_Y(O)=j$. Then, $O\subset Cl(Per_j(Y))$ and so
$O\subset U_j$ from which we get $O\subset U_i\cap U_j$.
As the collection $\{U_i: 0\leq i\leq n-1\}$ is disjoint we conclude that
$i=j$ and so
every hyperbolic periodic orbit $O\subset U_i$ of every vector field $Y\in \mathcal{U}$
has Morse index $I_Y(O)=i$.
Therefore, $Cl(Per_i(X))$ is a strongly homogeneous set of index $i$.
Now, we prove that every codimension one singularity $\sigma$
accumulated by periodic orbits belongs to a sectional-hyperbolic attractor
up to flow-reversing.
More precisely, we prove that if $I(\sigma)=n-1$ (resp. $I(\sigma)=1$), then $\sigma$ belongs to a
sectional-hyperbolic attractor of $X$ (resp. of $-X$).
We only consider the case $I(\sigma)=n-1$ for the case $I(\sigma)=1$
can be handled analogously by just replacing $X$ by $-X$.
Since $I(\sigma)=n-1$ one has $dim(W^u(\sigma))=1$ and,
since $X$ is generic, we can assume that both
$Cl(W^u(\sigma))$ and $\omega(q)$ (for $q\in W^u(\sigma)\setminus\{\sigma\}$) are Lyapunov stable sets of
$X$ (c.f. \cite{cmp'}). As $\sigma$ is accumulated by periodic orbits we obtain from Lemma 4.2 in \cite{mp1} that
$Cl(W^u(\sigma))$ is a transitive set.
We claim that $Cl(W^u(\sigma))$ is strongly homogeneous.
Indeed, since $X$ is generic the General Density Theorem \cite{p} implies $\Omega(X)=Cl(Per(X)\cup Sing(X))$.
Denote by $Sing^*(X)$ is the set of singularities accumulated by periodic orbits.
Then, there is a decomposition
$$
\Omega(X)=\left(\bigcup_{0\leq i\leq n-1}
Cl(Per_i(X))\right)\cup\left(\bigcup_{\sigma'\in Sing(X)\setminus Sing^*(X)}\{\sigma'\}\right)
$$
which is disjoint by (\ref{separa}).
In addition,
$Cl(W^u(\sigma))$ is transitive and so it is connected
and contained in $\Omega(X)$.
As $\sigma\in Sing^*(X)$ by hypothesis
we conclude that
$Cl(W^u(\sigma)) \subset Cl(Per_{i_0}(X))$ for some $0\leq i_0\leq n-1$.
But we have proved above that $Cl(Per_{i_0}(X))$ is a strongly homogeneous set of index $i_0$,
so, $Cl(W^u(\sigma))$ is also a
strongly homogeneous set of index $i_0$. The claim follows.
On the other hand,
$X$ is a star flow and so it has finitely many sinks and sources \cite{li}, \cite{pl}.
From this we obtain $1\leq i_0\leq n-2$ and so $1\leq Ind(Cl(W^u(\sigma)))\leq n-2$.
Summarizing, we have proved that $Cl(W^u(\sigma))$ is a transitive set with singularities,
all of them of codimension one, which is a Lyapunov stable strongly homogeneous set
of index $1\leq Ind(Cl(W^u(\sigma)))\leq n-2$.
As certainly $Cl(W^u(\sigma))$ is nontrivial Corollary \ref{thC}
applied to $\Lambda=Cl(W^u(\sigma))$ implies that $Cl(W^u(\sigma))$ is sectional-hyperbolic.
Once we have proved that $Cl(W^u(\sigma))$ is sectional-hyperbolic
we apply Proposition \ref{c1} to $\Lambda=Cl(W^u(\sigma))$ yielding
$I(\sigma')=i_0+1$, $\forall\sigma'\in Sing(X,Cl(W^u(\sigma)))$.
But $\sigma\in Cl(W^u(\sigma))$ and $I(\sigma)=n-1$ so
$i_0=n-2$ by taking $\sigma'=\sigma$ above. Consequently, $I(\sigma')=n-1$ and so
$dim(W^u(\sigma'))=1$, $\forall\sigma'\in Cl(W^u(\sigma))$.
This implies two things. Firstly that every singularity in $Cl(W^u(\sigma))$ has Morse index $n-1$ and,
secondly, since $X$ is generic, we can assume that
$Cl(W^u(\sigma))$ has dense unstable branches (c.f. Lemma 4.1 in \cite{mp1}).
So, $Cl(W^u(\sigma))$ is a sectional-hyperbolic attractor by Proposition \ref{thD}
applied to $\Lambda=Cl(W^u(\sigma))$. Since $\sigma\in Cl(W^u(\sigma))$ we obtain the result.
\end{proof}
The last ingredient is the proposition below whose
proof follows from Theorem B of \cite{gw} as in the proof of Theorem B p. 1582 of \cite{mp1}.
\begin{proposition}
\label{star=>sec-axa}
If $n\geq 3$, every $C^1$-generic star flow whose
singularities accumulated by periodic orbits
belong to a sectional-hyperbolic attractor up to flow-reversing is sectional-Axiom A.
\end{proposition}
\begin{proof}[Proof of Theorem A]
Let $X$ be a $C^1$-generic vector field on a closed $n$-manifold, $n\geq 3$,
all of whose singularities accumulated by periodic orbits
have codimension one.
Suppose in addition that there is no point accumulated by hyperbolic periodic orbits of different Morse indices.
Since $X$ is $C^1$-generic we have by Proposition \ref{p1} that $X$ is a star flow.
If $n=3$ then, since $X$ is generic, Theorem B in \cite{mp1} implies that $X$ is sectional-Axiom A.
If $n\geq 4$ then, by Proposition \ref{p1}, since the singularities accumulated
by periodic orbits have codimension one,
we have that all such singularities belong to a sectional-hyperbolic attractor
up to flow-reversing.
Then, $X$ is sectional-Axiom A by Proposition \ref{star=>sec-axa}.
\end{proof}
Now we move to the proof of Theorem B.
Hereafter we denote by $W^s_X(\cdot)$ and $W^u_X(\cdot)$ the
stable and unstable manifold operations \cite{hps} with emphasis on $X$. Notation $O(p)$ (or $O_X(p)$
to emphasize $X$) will indicate the orbit of $p$ with respect to $X$.
By a {\em periodic point} we mean a point belonging to a periodic orbit of $X$.
As usual the notation $\pitchfork$ will indicate the transversal intersection operation.
\begin{lemma}
\label{l1}
There exists a residual subset ${\mathcal R}\subset \mathcal{X}^1$ with the following property:
If $X\in {\mathcal R}$ has two periodic points $q_0$ and $p_0$ such that for any
neighborhood ${\mathcal U}$ of $X$ there exists $Y\in {\mathcal U}$ such that the continuations
of $q(Y)$ and $p(Y)$ of $q_0$ and $p_0$ respectively are defined and satisfy
$W^s_Y(O(q(Y)))\pitchfork W^u_Y(O(p(Y)))\neq \emptyset$. Then $X$ satisfies
$$W^s_X(O(q_0))\pitchfork W^u_X(O(p_0))\neq \emptyset.$$
\end{lemma}
\begin{proof}
Indeed, let $\{U_n\}$ be a countable basis of the topology of $M$. Now,
we define the set $A_{n,m}$ as the set of vector fields such that there
exist a periodic point $p$ in $U_n$ and a periodic point $q$ in $U_m$
such that $W^s(O(p))\pitchfork W^u(O(q))\neq \emptyset$. Observe that
$A_{n,m}$ is an open set.
Define $B_{n,m}=\mathcal{X}^1\setminus Cl(A_{n,m})$. Thus the set
$${\mathcal R}=\bigcap_{n,m=0}^{\infty} (A_{n,m}\cup B_{n,m})$$
is residual.
If $X$ belongs to ${\mathcal R}$ and satisfies the hypothesis then there
exist $n$ and $m$ such that $p_0\in U_n$ and $q_0\in U_m$. Moreover,
the hypothesis implies that $X\notin B_{n,m}$.
Thus $X\in A_{n,m}$ and the proof follows.
\end{proof}
We use this lemma to prove the following one.
\begin{lemma}
\label{l3}
A $C^1$ generic star flow with spectral decomposition
has no points accumulated by hyperbolic periodic orbits of different Morse indices.
\end{lemma}
\begin{proof}
Let ${\mathcal R}$ be the residual subset in Lemma \ref{l1}.
Suppose that $X\in {\mathcal R}$ has spectral decomposition
but has no points accumulated by hyperbolic periodic orbits of different Morse indices.
Then, there exists $i\neq j$ such that
$Cl(Per_i(X))\cap Cl(Per_j(X))\neq \emptyset$. Without loss of generality we can assume $i<j$.
Take $x\in Cl(Per_i(X))\cap Cl(Per_j(X))$
so there are periodic orbits $O(p_0)$ (of index $i$) and $O(q_0)$ (of index $j$) arbitrarily close to $x$.
Clearly $x\in\Omega(X)$ and so there is a basic set $\Lambda$ in the spectral
decomposition of $X$ such that $x\in \Lambda$.
As the basic sets in the spectral decomposition are disjoint and
the orbits $O(p_0)$, $O(q_0)$ are close to $x$ (and belong to $\Omega(X)$)
we conclude that $O(p_0)\cup O(q_0)\subset \Lambda$.
Since $\Lambda$ is transitive, the connecting lemma \cite{h} implies that there exists $Y$
arbitrarily close to $X$ such that
$W^s_Y(O(q(Y))) \cap W^u_Y(O(p(Y)))\neq\emptyset$.
On the other hand, $j-i>0$ since $j>i$.
Moreover, $\operatorname{dim} (W^s_Y(O(q(Y))))=j+1$ and $\operatorname{dim} (W^u_Y(O(p(Y))))=n-i$
since $ind(O(q(y)))=j$ and $ind(O(p(Y)))=i$ (resp.).
Then, $\operatorname{dim} (W^s_Y(O(q(Y))))+\operatorname{dim} (W^u_Y(O(p(Y))))=j+1+n-i>n$ and so
with another perturbation we can assume that the above intersection is transversal.
Since $X\in {\mathcal R}$ we conclude that
$$W^s_X(O(q_0))\pitchfork W^u_X(O(p_0))\neq \emptyset.$$
Now, using the connecting lemma again, there exists $Y$ close to $X$ with a heterodimensional cycle.
But this contradicts the non-existence of heteroclinic cycles for star flows
(c.f. Theorem 4.1 in \cite{gw}). The proof follows.
\end{proof}
\begin{proof}[Proof of Theorem B]
Apply Theorem A and Lemma \ref{l3}.
\end{proof}
\end{document} |
\begin{document}
\baselineskip=17pt
\title{Estimates for Character Sums with Various Convolutions}
\author[Brandon Hanson]{Brandon Hanson} \address{Pennsylvania State University\\
University Park, PA}
\email{bwh5339@psu.edu}
\date{}
\maketitle
\begin{abstract}
We provide estimates for sums of the form \[\left|\sum_{a\in A}\sum_{b\in B}\sum_{c\in
C}\chi(a+b+c)\right|\]
and
\[\left|\sum_{a\in A}\sum_{b\in B}\sum_{c\in
C}\sum_{d\in D}\chi(a+b+cd)\right|\]
when $A,B,C,D\subset \FF_p$, the field with $p$ elements and $\chi$ is a non-trivial multiplicative character modulo $p$.
\end{abstract}
\section{Introduction}
\sloppy
In analytic number theory, one is often concerned with estimating a bilinear sum
of the form \begin{equation}\label{bilinear}S=\sum_{\substack{1\leq m\leq M\\
1\leq n\leq N}}a_mb_nc_{m,n}\end{equation} where $a_m,\ b_n$ and $c_{m,n}$ are
complex numbers. The standard way to handle this sum is to apply the
Cauchy-Schwarz inequality so that \begin{align*}|S|^2&\leq \lr{\sum_{1\leq m\leq
M}|a_m|\left|\sum_{1\leq n\leq N}b_nc_{m,n}\right|}^2\\&\leq\lr{\sum_{1\leq
m\leq M}|a_m|^2}\lr{\sum_{1\leq n_1,n_2\leq N}b_{n_1}\bar{b_{n_2}}\sum_{1\leq m\leq
M}c_{m,n_1}\bar{c_{m,n_2}}}.\end{align*} \noindent One usually has that
\[\sum_{1\leq m\leq M}c_{m,n_1}\bar{c_{m,n_2}}\] is small when $n_1\neq n_2$, so
that the second factor is essentially dominated by the \emph{diagonal terms}
where $n_1=n_2$.
For instance, suppose $p$ is a prime number and denote by $\FF_p$ the field with $p$ elements. We
write $e_p(u)=e^{2\pi i u/p}$ and we denote by $\chi$ a multiplicative (or Dirichlet) character
modulo $p$.
Two well-known sums of the form (\ref{bilinear}) are
\begin{equation}\label{Paley}S_\chi(A,B)=\sum_{a\in A}\sum_{b\in
B}\chi(a+b)\end{equation} and
\begin{equation}\label{exponential}T_x(A,B)=\sum_{a\in A}\sum_{b\in
B}e_p(xab)\end{equation} where $A$ and $B$ are subsets of $\FF_p$.
By the triangle inequality, each of these sums are at most $|A||B|$, but we expect an upper bound of the form $|A||B|p^{-\eps}$ for some positive $\eps$. Indeed, using the Cauchy-Schwarz inequality as above, and
orthogonality of characters, one can prove that the sums (\ref{Paley}) and
(\ref{exponential}) are at most $(p|A||B|)^{1/2}$. Such an estimate is better
than the trivial estimate when $|A||B|>p$.
For the second sum, (\ref{exponential}), the bound $(p|A||B|)^{1/2}$ is quite
sharp. Indeed, if $A=B=\{n:1\leq n\leq \delta p^{1/2}\}$ for a small number
$\delta>0$, then products $ab$ with $a,b\in A$ are at most $\delta^2p$ (here we are identifying residues mod $p$ with integers between $0$ and $p-1$). It follows that $|e_p(ab)-1|\ll \delta^2$, so the summands in (\ref{exponential}) are
essentially constant and there is little cancellation. On the other
hand, it is conjectured that the first sum, (\ref{Paley}), should exhibit
cancellation even for small sets $A$ and $B$. From now on, we will call
(\ref{Paley}) the \emph{Paley sum}. The problem of obtaining good estimates for
it beyond the range $|A||B|>p$ appears to be quite hard.
In this article we investigate character sums which are related to the Paley
sum. First, we motivate its study with the following
question of S\'ark\"ozy:
\begin{Problem}[S\'ark\"ozy]\label{Sarkozy}
Are the quadratic residues modulo $p$ a sumset? That is, do there exist sets
$A, B\subset \FF_p$ each of size at least two, and with the set $A+B$ equal
to the set of quadratic residues?
\end{Problem}
One expects that the answer to the above question is no.
Heuristically, if $B$ contains two elements $b$ and $b'$ we would require that
$A+b$ and $A+b'$ are both subsets of the quadratic residues. But we expect that
$a+b$ is a quadratic residue half of the time, and we expect that $a+b'$ also be
a residue half of the time \emph{independent} of whether or not $a+b$ is a
quadratic residue. So if $A+B$ consisted entirely of quadratic residues then
many unlikely events must have occurred. For $A+B$ to consist of all the
quadratic residues would be shocking. The difficulty in this problem
is establishing the aforementioned independence.
In \cite{Sh}, Shkredov showed the the quadratic residues are never of the form
$A+A$. In more recent work, \cite{Sh2}, he also ruled out the case that $Q=A+B$ when $A$ is a multiplicative subgroup. By way of character sum estimates, Shparlinski, building on work of S\'ark\"ozy \cite{Sar} has proved that:
\begin{UnnumberedTheorem}[S\'ark\"ozy, Shparlinski]
If $A, B\subset \FF_p$, each of size at least two with the set $A+B$ equal to
the set of quadratic residues then $|A|$ and $|B|$ are within a constant factor of $\sqrt p$.
\end{UnnumberedTheorem}
\noindent As a consequence of this theorem and a combinatorial theorem of Ruzsa,
one can deduce that the quadratic residues are not of the for $A+B+C$ with each
set of size at least two.
S\'ark\"ozy's question is settled by improved bounds for
the Paley sum. Since each sum $a+b$ with $a\in A$ and $b\in B$ is a quadratic residue we
have \[|A||B|=\sum_{a\in A}\sum_{b\in B}\leg{a+b}{p}\leq (p|A||B|)^{1/2}.\] So
$|A||B|\leq p$ and this estimate just fails to resolve S\'ark\"ozy's problem.
So even improving upon the bound
$S_{\leg{\cdot}{p}}(A,B)\leq (p|A||B|)^{1/2}$ by a constant factor would be
worthwhile.
Breaking past this barrier, often called the \emph{square-root
barrier}, is hard. In practice, the usual way we estimate character sums is via
the method of completion. One way of doing so was outlined at the beginning
of this article. With this method, we replace a short sum over a subset
$A\subset \FF_p$ with a complete sum over the whole of $\FF_p$ which lets us to use orthogonality. However some terms, the diagonal terms, exhibit no
cancellation at all and must be accounted for. By completing the sum we create
more diagonal terms, and the resulting loss becomes worse than trivial when the set $A$ is too small. One can dampen the loss from completion by using a higher moment (using H\"older's inequality as opposed to Cauchy-Schwarz). This was the
idea used by Burgess in his work on character sums in \cite{Bu1} and \cite{Bu2}, and it is still one of the only manoeuvres we have for
pushing past the square-root barrier. Still, with higher moments the
off-diagonal terms become more complicated and we must settle for worse
orthogonality estimates, which can be limiting.
In the case of the Paley sum, the square-root barrier is more than just a
consequence of our methods. Suppose $q=p^2$ so that $\FF_p$ is a subfield of
$\FF_q$ and each element in $\FF_p$ is the square of an element in $\FF_q$.
Since $\FF_p$ is closed under addition, any sum $a+b$ with $a,b\in\FF_p$ is also
a square in $\FF_q$. So, if we take $A=B=\FF_p$ and $\chi$ the quadratic
character on $\FF_q$, then there is no cancellation in $S_\chi(A,B)$. This
shows that, for the Paley sum over $\FF_q$, the bound $|S_\chi(A,B)|\leq
(q|A||B|)^{1/2}$ is essentially best possible. In order to improve the bound for the Paley
sum past the square-root barrier, we need to use an argument which is sensitive
to the fact that $\FF_p$ has no subfields. Such arguments are hard to come by
and this is perhaps the greatest source of difficulty in the problem.
There have been improvements to estimates for the Paley sum when the sets $A$
and $B$ have a particularly nice structure. In \cite{FI}, Friedlander and
Iwaniec improved the range in which one can obtain non-trivial estimates when
the set $A$ is an interval. This constraint was weakened by Mei-Chu Chang in \cite{C1}
to the case where $|A+A|$ is very small:
\begin{UnnumberedTheorem}[Chang]\label{Chang}
Suppose $A,B\subset \FF_p$ with $|A|,|B|\geq p^\alpha$ for some
$\alpha>\frac{4}{9}$ and such that $|A+A|\leq K|A|$. Then there is a constant
$\tau=\tau(K,\alpha)$ such that for $p$ sufficiently large and any non-trivial
character $\chi$, we have \[|S_\chi(A,B)|\leq
|A||B|p^{-\tau}.\]
\end{UnnumberedTheorem}
\noindent We remark that in light of Freiman's Theorem, which we will recall
shortly, the condition that $|A+A|$ has to be so small is still very
restrictive.
Often problems involving a sum of two variables,
called \emph{binary additive problems}, are hard. Introducing a third
variable gives rise to a \emph{ternary additive problem}, which may be
tractable. In this paper we establish non-trivial bounds beyond the square-root
barrier for character sums with more than two variables. These results are
different from those mentioned above since they hold for all sets which are sufficiently large
- there are no further assumptions made about their structure. Our first theorem
is the following.
\begin{Theorem}\label{TripleSum}
Given subsets $A,B,C\subset \FF_p$ each of size $|A|,|B|,|C|\geq \delta\sqrt p$,
for some $\delta>0$, and a non-trivial character $\chi$, then we have
\[\left|\sum_{a\in A}\sum_{b\in B}\sum_{c\in
C}\chi(a+b+c)\right|=o_\delta(|A||B||C|).\]
\end{Theorem}
There are analogous results for exponential sums. We mentioned above that the
sum $T_x(A,B)$ in (\ref{exponential}) also obeys the bound $|T_x(A,B)|\leq (p|A||B|)^{1/2}$.
While this bound may be sharp, Bourgain \cite{Bou} proved that with more
variables one can extend the range in which the estimate is non-trivial.
\begin{UnnumberedTheorem}[Bourgain]
There is a constant $C$ such that the following holds. Suppose $\delta>0$ and
$k\geq C\delta^{-1}$, then for $A_1,\ldots,A_k\subset \FF_p$ with $|A_i|\geq
p^\delta$ and $x\in\FF_p^\times$, we have \[\left|\sum_{a_1\in
A_i}\cdots\sum_{a_k\in A_k}e_p(xa_1\cdots
a_k)\right|<|A_1|\cdots|A_k|p^{-\tau}\] where $\tau>C^{-k}$.
\end{UnnumberedTheorem}
We cannot prove results of this strength. The reason is that one can play the
additive and multiplicative structures of the frequencies appearing in such
exponential sums and then leverage the Sum-Product Phenomenon to deduce some
cancellation. The structure of multiplicative characters is not so nice and we
rely on Burgess' method instead.
In Theorem \ref{TripleSum}, we would prefer a bound of the form
$|S_\chi(A,B,C)|\leq |A||B||C|p^{-\tau}$ for some positive $\tau$. However,
the proof of Theorem \ref{TripleSum} relies on Chang's Theorem, which only allows one to
estimate $S_\chi(A,B)$ past the square-root barrier under the hypothesis that
$|A+A|\leq K|A|$ for some constant $K$. This hypothesis plays a crucial part in
the proof of her theorem because it allows for the use of Freiman's
Classification Theorem:
\begin{UnnumberedTheorem}[Freiman]
Suppose $A$ is a finite set of integers such that $|A+A|\leq K|A|$. Then there
is a generalized arithmetic progression $P$ containing
$A$ and such that $P$ is of dimension at most $K$ and $\log(|P|/|A|)\ll K^{c}$
for some absolute constant $c$.
\end{UnnumberedTheorem}
Using this classification theorem, one can make a change of variables $a\mapsto
a+bc$, which is the first step in a Burgess type argument. Freiman's Theorem is unable to accommodate the situation $|A+A|\leq |A|^{1+\delta}$, even for
small values of $\delta>0$, which is what is needed in order
to get a power saving in our bound for ternary character sums. To circumvent the
use of Freiman's Theorem, we can replace triple sums with sums of four
variables. By incorporating both additive and multiplicative convolutions we
arrive at sums of the form \[H_\chi(A,B,C,D)=\sum_{a\in A}\sum_{b\in
B}\sum_{c\in C}\sum_{d\in D}\chi(a+b+cd).\] In this way we have essentially
\emph{forced} a scenario where we can make use of the Burgess argument. By
introducing both arithmetic operations, we are able to weigh the additive
structure in one of the variables against the multiplicative structure of that
variable in order to use a Sum-Product estimate. Our second result is:
\begin{Theorem}\label{MixedSum}
Suppose $A,B,C,D\subset \FF_p$ are sets with $|A|,|B|,|C|,|D|>p^\delta$,
$|C|<\sqrt p$ and $|D|^4|A|^{56}|B|^{28}|C|^{33}\geq p^{60+\eps}$ for some
$\delta, \eps>0$. There is a constant $\tau>0$ depending only on $\delta$ and $\epsilon$ such that
\[|H_\chi(A,B,C,D)|\ll |A||B||C||D|p^{-\tau}.\] In the case that $|A|,|B|,|D|>p^\delta$,
$|C|\geq \sqrt p$ and $|D|^8|A|^{112}|B|^{56}\geq p^{87+\eps}$ then there is a
constant $\tau>0$ depending only on $\delta$ and $\epsilon$ such that \[|H_\chi(A,B,C,D)|\ll |A||B||C||D|p^{-\tau}.\]
\end{Theorem}
Theorem \ref{MixedSum} is simplified greatly when all sets in question are assumed to have roughly the same size:
\begin{Corollary}\label{MixedSum2}
Suppose $A,B,C,D\subset \FF_p$ are sets with $|A|,|B|,|C|,|D|>p^\delta$ with $\delta>\frac{1}{2}-\frac{1}{176}$. Then $H_\chi(A,B,C,D)\leq |A||B||C||D|p^{-\eps}$ for some $\eps>0$ depending only on $\delta$.
\end{Corollary}
\section{Background}
Here we recall facts concerning multiplicative characters over finite
fields and additive combinatorics. For details concerning character sums, we
refer to Chapters 11 and 12 of \cite{IK}. The reference \cite{TV} is extremely
helpful for all things additive combinatorial.
Multiplicative characters are the characters $\chi$ of the group $\FF_q^\times$
which are extended to $\FF_q$ by setting $\chi(0)=0$. In order to carry out the
proof of a Burgess-type estimate, we shall need Weil's bound for character sums
with polynomial arguments.
\begin{Theorem}[Weil]
Let $f\in\FF_p[x]$ be a polynomial with $r$ distinct roots over $\bar{\FF_p}$.
Then if $\chi$ has order $l$ and provided $f$ is not an $l$'th power over
$\bar{\FF_p}[x]$ we have
\[\left|\sum_{x\in\FF_p}\chi(f(x))\right|\leq r\sqrt p.\]
\end{Theorem}
\begin{Lemma}\label{MomentBound}
Let $k$ be a positive integer and $\chi$ a non-trivial multiplicative character.
Then for any subset $A\subset\FF_p$ we have \[\sum_{x\in\FF_q}\left|\sum_{a\in
A}\chi(a+x)\right|^{2k}\leq |A|^{2k}2k\sqrt p+(2k|A|)^kp.\]
\end{Lemma}
\begin{proof}
Expanding the $2k$'th power and using that
$\bar\chi(y)=\chi(y^{p-2})$, we have
\begin{align*}
&\sum_{a_1,\ldots,a_{2k}\in
A}\sum_x\chi((x-a_1)\cdots(x-a_k)(x-a_{k+1})^{p-2}\cdots(x-a_{2k})^{p-2})\\
&=\sum_{\aa\in
A^{2k}}\sum_x\chi(f_{\aa}(x)).
\end{align*}
Here $f_{\aa}(t)$ is the
polynomial
\[f_{\aa}(X)=(X-a_1)\cdots(X-a_k)(X-a_{k+1})^{p-2}\cdots(X-a_{2k})^{p-2}.\]
By Weil's theorem, $\sum_x\chi(f_{\aa}(x))\leq 2k\sqrt p$ unless $f_{\aa}$ is an
$l$'th power, where $l$ is the order of $\chi$. If any of the roots $a_i$ of
$f_{\aa}$ is distinct from all other $a_j$ then it occurs in the above
expression with multiplicity 1 or $p-2$. Both $1$ and $p-2$ are prime to $l$
since $l$ divides $p-1$. Hence $f_{\aa}$ is an $l$'th power only provided all of its roots can be grouped into pairs. So, for all but at most
$\frac{(2k)!}{2^k k!}\leq (2k|A|)^k$ vectors $\aa\in A^{2k}$, we have the
estimate $2k\sqrt p$ for the inner sum. For the remaining $\aa$ we bound the sum
trivially by $p$. Hence the upper bound \[\sum_{x\in\FF_q}\left|\sum_{a\in
A}\chi(a+x)\right|^{2k}\leq |A|^{2k}2k\sqrt p+(2k|A|)^kp.\]
\end{proof}
We now turn to results from additive combinatorics. Let $A$ and $B$ be finite
subsets of an abelian group $G$. The \emph{additive energy} between $A$ and $B$
is the quantity \[E_+(A,B)=\left|\{(a,a',b,b')\in A\times A\times B\times B:a+b=a'+b'\}\right|.\]
One of the fundamental results on additive energy is the
Balog-Szemer\'edi-Gowers Theorem, which we use in the following form.
\begin{Theorem}[Balog-Szemer\'edi-Gowers]\label{BSG}
Suppose $A$ is a finite subset of an abelian group $G$ and \[E_+(A,A)\geq
\frac{|A|^3}{K}.\] Then there is a subset
$A'\subset A$ of size $|A'|\gg\frac{|A|}{K(\log(e|A|))^2}$ with \[|A'-A'|\ll
K^4\frac{|A'|^3(\log (|A|))^8}{|A|^2}.\] The implied constants are absolute.
\end{Theorem}
This version of the Balog-Szemer\'edi-Gowers Theorem has very good explicit
bounds, and is due Bourgain and Garaev. The proof is essentially a combination
of the Lemmas 2.2 and 2.4 from \cite{BG}. It was communicated to us by O.
Roche-Newton. Since we prefer to work with sumsets rather than difference sets
we have the following lemma which is a well-known application of Ruzsa's
Triangle Inequality.
\begin{Lemma}\label{sumset}
Suppose $A$ is a finite subset of an abelian group $G$. Then \[|A-A|\leq
\lr{\frac{|A+A|}{|A|}}^2|A|.\]
\end{Lemma}
We will prefer to work with the energy between a set and itself
rather than between distinct sets, so we need the following fact, which is a simple consequence of the Cauchy-Schwarz inequality.
\begin{Lemma}\label{triangle}
For sets $A$ and $B$ we have \[E_+(A,B)^2\leq E_+(A,A)E_+(B,B)\]
\end{Lemma}
We now record a general version of Burgess' argument, which is an
application of H\"older's inequality and Weil's bound. This proof is distilled
from the proof of Burgess's estimate in Chapter 12 of \cite{IK}.
\begin{Lemma}
\label{BasicBurgess}
Let $A,B,C\subset \FF_p$ and suppose $\chi$ is a non-trivial multiplicative
character. Define \[r(x)=|\{(a,b)\in A\times B:ab=x\}|.\] Then for any
positive integer $k$, we have the estimate
\begin{align*}
\sum_{x\in\FF_p}r(x)\left|\sum_{c\in
C}\chi(x+c)\right|&\leq(|A||B|)^{1-1/k}E_\times(A,A)^{1/4k}E_\times(B,B)^{1/4k}\cdot\\
&\cdot\lr{|C|^{2k}2k\sqrt p+(2k|C|)^kp}^{1/2k}.
\end{align*}
\end{Lemma}
\begin{proof}
Call the left hand side above $S$. Applying H\"older's
inequality \begin{align*}
|S|&\leq\lr{\sum_{x\in\FF_p}r(x)}^{1-1/k}\lr{\sum_{x\in\FF_p}r(x)^2}^{1/2k}\lr{\sum_{x\in\FF_p}\left|\sum_{c\in
C}\chi(x+c)\right|^{2k}}^{1/2k}\\
&=T_1^{1-1/k}T_2^{1/2k}T_3^{1/2k}.
\end{align*}
Now $T_1$ is precisely $|A||B|$ and $T_2$ is the multiplicative energy
$E_\times(A,B)$. By Lemma \ref{triangle} inequality, we have
\[E_\times(A,B)\leq\sqrt{E_\times(A,A)E_\times(B,B)}.\]
The estimate for $T_3$ is an immediate from Lemma \ref{MomentBound}.
\end{proof}
The last ingredient in our proof is the most crucial. Sum-Product estimates are
sensitive to prime fields and allow us to break the square-root barrier. We
record the following estimate of Rudnev.
\begin{Theorem}[Rudnev]
\label{EnergyEstimate}
Let $A\subset \FF_p$ satisfy $|A|<\sqrt p$. Then \[E_\times(A,A)\ll
|A||A+A|^\frac{7}{4}\log |A|.\]
\end{Theorem}
This is not the state of the art for Sum-Product theory in $\FF_p$, which at the time of this writing is found in
\cite{RNRS}, but the above estimate is more readily applied to our situation. Moreover, the strength of the Sum-Product estimates is not the bottleneck for proving non-trivial character sum estimates in a wider range (avoiding completion is).
\section{Ternary sums}\label{Triple}
We begin this section by giving a simple estimate which is non-trivial past the
square-root barrier provided we can control certain additive energy.
\begin{Lemma}\label{energy}
Given subsets $A,B,C\subset \FF_p$ and a non-trivial character $\chi$ we have
\[|S_\chi(A,B,C)|\leq\sqrt{p|A|E_+(B,C)}.\]
\end{Lemma}
\begin{proof}
Let $r(x)$ be the number of ways in which $x\in\FF_p$ is a sum $x=b+c$ with
$b\in B$ and $c\in C$. Then
\begin{align*}|S(A,B,C)|&\leq\sum_{x\in\FF_p}r(x)\left|\sum_{a\in
A}\chi(a+x)\right|\\ &\leq
\lr{\sum_{x\in\FF_p}r(x)^2}^{1/2}\lr{\sum_{x\in\FF_p}\left|\sum_{a\in
A}\chi(a+x)\right|^2}^{1/2}.\end{align*} It is straightforward to check that the
first factor above is $\lr{E_+(B,C)}^{1/2}$ and as before, the second factor is
$\lr{p|A|}^{1/2}$.
\end{proof}
\begin{Lemma}\label{arg}
Let $z_1,\ldots,z_n$ be complex numbers with $|\arg z_1-\arg z_j|\leq \delta$.
Then \[|z_1+\ldots +z_n|\geq (1-\delta)(|z_1|+\ldots+|z_n|).\]
\end{Lemma}
\begin{proof}
We have
\begin{align*}|z_1|+\ldots+|z_n|&=\theta_1z_1+\ldots+\theta_nz_n\\&=\theta_1(z_1+\ldots+z_n)+(\theta_2-\theta_1)z_2+\ldots+(\theta_n-\theta_1)z_n\end{align*}
for some complex numbers $\theta_k$ of modulus 1 with $|\theta_1-\theta_j|\leq
\delta$.
Thus by the triangle inequality \[|z_1|+\ldots+|z_n|\leq
|z_1+\ldots+z_n|+\delta(|z_2|+\ldots+|z_n|)\] and the result follows.
\end{proof}
We are now able prove Theorem \ref{TripleSum}. Ignoring technical details for the moment, either we are in a situation where Lemma \ref{energy} improves upon the trivial
estimate, or else we can appeal to the Balog-Szemer\'edi-Gowers Theorem and
deduce that $A$ has a subset with small sumset. In the latter case we can make
use of Chang's Theorem and also arrive at a non-trivial estimate, even saving a
power of $p$.
Unfortunately, this second scenario does not come in to play until one of the
sets has a lot of additive energy. This means that the saving from Lemma \ref{energy}
will become quite poor before we are rescued by Chang's estimate. We proceed
with the proof proper.
\begin{proof}[Proof of Theorem \ref{TripleSum}] Suppose, by way of
contradiction, that the theorem does not hold. This means that there is some
positive constant $\eps>0$ such that for $p$ arbitrarily large, we have sets
$A,B,C\subset\FF_p$ with $|A|,|B|,|C|\geq \delta\sqrt{p}$, and a non-trivial
character $\chi$ of $\FF_p^\times$ satisfying
\[|S_\chi(A,B,C)|\geq\eps|A||B||C|.\] It follows that
\[\eps|A||B||C|\leq\sum_{a\in A}|S_\chi(B,a+C)|.\] If we let \[A'=\{a\in
A:|S_\chi(B,a+C)|\geq \frac{\eps}{2}|B||C|\}\] then
\[\frac{\eps}{2}|A||B||C|\leq \sum_{a\in A'}|S_\chi(B,a+C)|\] and $|A'|\geq
|A|\eps/2$. Now by the same argument as in the proof of Lemma \ref{energy}, we
must have \[\frac{\eps^2}{4}|A|^2|B|^2|C|^2\leq p|C|E_+(A',B)\leq
p|C|E_+(A',A')^{1/2}E_+(B,B)^{1/2},\] the last inequality being a consequence of
Lemma \ref{triangle}. So, using that $|A|,|B|,|C|\geq \delta\sqrt p$ and
$E_+(B,B)\leq |B|^3$, we have \[E_+(A',A')\geq \frac{\eps^4\delta^4}{16}|A'|^3\]
and so by Theorem \ref{BSG} and Lemma \ref{sumset} we can find a subset
$A''\subset A'$, with size at least $(\eps\delta)^{t}\sqrt p$ and such that
$|A''+A''|\leq (\eps\delta)^{-t}|A''|$ for some $t=O(1)$. Now since $A''\subset
A'$, we have \[\frac{\eps}{2}|A''||B||C|\leq \sum_{a\in A''}|S_\chi(B,a+C)|.\]
By the pigeon-hole principle, after passing to a subset of $A'''$ of size
$|A''|/16$, we can assume that the complex numbers $S_\chi(B,a+C)$ all have
argument within
$\frac{1}{2}$ of each other. Thus, by Lemma \ref{arg}, we have
\[\frac{\eps}{4}|A'''||B||C|\leq \left|S_\chi(A''',B,C)\right|,\] we have
$|A'''|\geq (\eps\delta)^{t}\sqrt
p/16$, and we have \[|A'''+A'''|\leq|A''+A''|\leq (\eps\delta)^{-t}|A''|\leq
16(\eps\delta)^{-t}|A'''|.\] However, by the triangle inequality, this implies
that \[\frac{\eps}{4}|A'''||B+c|\leq \max_{c\in
C}\left|S_\chi(A''',B+c)\right|.\] This is in clear violation of Theorem
\ref{Chang} provided $p$ is sufficiently large in terms of $\delta$ and $\eps$.
Thus we have arrived at the desired contradiction.
\end{proof}
\section{Mixed quaternary sums}\label{Mixed}
We now turn to the estimation of the sums $H_\chi(A,B,C,D)$.
First we consider an auxiliary ternary character sum with a
multiplicative convolution. \[M_\chi(A,B,C)=\sum_{a\in A}\sum_{b\in B}\sum_{c\in
C}\chi(a+bc).\] We can bound $M_\chi$ in terms of the \emph{multiplicative energy}
\[E_\times(X,Y)=|\{(x_1,x_2,y_1,y_2)\in X\times X\times Y\times
Y:x_1y_1=x_2y_2\}|.\] As before, this satisfies the bound
\[E_\times(X,Y)^2\leq E_\times(X,X)E_\times(Y,Y).\]
Now, using Sum-Product estimates, if the sets had enough additive structure, we could
bound the multiplicative energy non-trivially and make an improvement.
This is essentially Burgess' argument, though he did not use Sum-Product theory;
rather, since he was working with arithmetic progressions, the multiplicative
energy could be bounded directly.
By fixing one element in the sum $H_\chi(A,B,C,D)$, we can view
it as a ternary sum in two different ways. First,
\[H_\chi(A,B,C,D)=\sum_{d\in D}S_\chi(A,B,d\cdot C)\] where $d\cdot C$ is the
dilate of $C$ by $d$. We can use Lemma \ref{energy} to bound this sum
non-trivially whenever we can bound $E_+(C,C)$ non-trivially. If not, we can write
\[H_\chi(A,B,C,D)=\sum_{a\in A}M_\chi(a+B,C,D)\] instead and try to bound this
non-trivially using Lemma \ref{BasicBurgess}, which we can do if $E_\times(C,C)$
is smaller than $|C|^3$. By making some simple manipulations to $H_\chi$ and using a
sum-product estimate, we will be able to guarantee one of these facts holds.
Before presenting our proof, we mention that A. Balog has communicated to us a forthcoming result with T. Wooley which asserts:
\begin{UnnumberedTheorem}
There is a positive $\delta$ such that any $X\subset \FF_p$ can be decomposed as $X=Y\cup Z$ with $E_+(Y,Y)\leq |Y|^{3-\delta}$ and $E_\times(Z,Z)\leq |Z|^{3-\delta}$.
\end{UnnumberedTheorem}
The proof of this result uses ideas similar to those in our proof of Theorem \ref{MixedSum}, and implies a non-trivial estimate for $H_\chi$. Indeed, decomposing $C=Y\cup Z$ as in the theorem,
\[|H_\chi(A,B,C,D)|\leq |H_\chi(A,B,X,D)|+|H_\chi(A,B,Y,D)|.\] Estimating each of these sums as we mentioned above, gives a non-trivial bound for $|H_\chi(A,B,C,D)|$.
Now we proceed to our proof of Theorem \ref{MixedSum}.
\begin{proof}[Proof of Theorem \ref{MixedSum}]
Let $2\leq k \ll \log p$ be a (large) parameter. First we handle the
case $|C|<\sqrt p$. Let us write \[|H_\chi(A,B,C,D)|=\Delta|A||B|||C||D|\] so
that our purpose is to estimate $\Delta$. Let \[C_1=\left\{c\in C:|S_\chi(A,B,c\cdot
D)|\geq\frac{\Delta|A||B||D|}{2}\right\}.\] We have that for any $C_2\subset C_1$
\[\frac{|C_2|}{2|C|}|H_\chi(A,B,C,D)|= |C_2|\frac{\Delta|A||B||D|}{2}\leq \sum_{c\in C_2}|S_\chi(A,B,c\cdot D)|,\] and
using that the inner quantities are at most $|A||B||D|$, we also have \[|C_1|\geq
\frac{\Delta}{2}|C|.\] Now, passing to a subset $C_2$ of $C_1$ of size at
least \[|C_2|\geq \frac{|C_1|}{16}\geq \frac{\Delta}{32}|C|,\] we can assume that the
complex numbers $S_\chi(A,B,c\cdot D)$ with $c\in C_2$ all have arguments within
$\frac{1}{2}$ of each other, so that by Lemma \ref{arg} we have
\begin{equation}\label{lowerBound}
\frac{|C_3|}{4|C|}|H_\chi(A,B,C,D)|\leq \left|\sum_{c\in
C_3}S_\chi(A,B,c\cdot D)\right|=|H_\chi(A,B,C_3,D)|
\end{equation}
whenever $C_3$ is a subset of $C_2$.
In particular, if $C_3=C_2$ we have
\[\frac{\Delta^2}{128}|A||B||C||D|\leq\frac{|C_2|}{4|C|}|H_\chi(A,B,C,D)|\leq
\sum_{d\in D}|S_\chi(A,B,d\cdot C_2)|.\] Now in view of Lemma \ref{energy}, we
see that \begin{align*}\frac{\Delta^2}{128}|A||B||C||D|&\leq|D|\max_{d\in
D}\sqrt{p|A|E_+(B,d\cdot C_2)}\\&\leq
\sqrt{p}|D||A|^{1/2}|B|^{3/4}E_+(C_2,C_2)^{1/4},\end{align*} having bounded $E_+(B,B)$ trivially by $|B|^3$. Thus
\[E_+(C_2,C_2)\geq \frac{\Delta^8}{128^4}|A|^2|B||C|^4p^{-2}\geq
\lr{\frac{\Delta^8}{128^4}|A|^2|B||C|p^{-2}}|C_2|^3.
\] For convenience, write $K^{-1}=\frac{\Delta^8}{128^4}|A|^2|B||C|p^{-2}$. By
Theorem \ref{BSG} there is a subset $C_3\subset C_2$ of size at least
$\frac{|C_2|}{K(\log p)^2}$ and such that \[|C_3-C_3|\ll
K^4\frac{|C_3|^2(\log p)^8}{|C_2|^2}|C_3|.\] In particular, by Theorem
\ref{EnergyEstimate} we have \begin{align*}
E_\times(C_3,C_3)&\ll
|C_3|K^7\lr{\frac{|C_3|^2(\log p)^8}{|C_2|^2}}^{7/4}|C_3|^{7/4}\log
p\\&=K^7|C_3|^{25/4}|C_2|^{-7/2}(\log p)^{15}.
\end{align*}
Inserting this into equation (\ref{lowerBound}), we get
\begin{align*}\frac{\Delta}{4}|A||B||C_3||D|&=\frac{|C_3|}{4|C|}|H_\chi(A,B,C,D)|\\&\leq
|H_\chi(A,B,C_3,D)|\\&\leq\sum_{a\in A}|M_\chi(a+B,C_3,D)|.\end{align*} Next we
apply Lemma \ref{BasicBurgess} to obtain that
\begin{multline*}\frac{\Delta}{4}|A||B||C_3||D|\ll
|A|(|D||C_3|)^{1-\frac{1}{k}}(E_\times(D,D)E_\times(C_3,C_3))^{1/4k}\times\\\times\lr{|B|^{2k}2k\sqrt
p+(2k|B|)^kp}^{1/2k}\end{multline*} which implies (after bounding $E_\times(D,D)$
trivially by $|D|^3$) \[\Delta^{4k} \ll|D|^{-1}|C_3|^{-4}
E_\times(C_3,C_3)\lr{2k\sqrt p+(2k|B|^{-1})^kp}^2.\]
Since $2\leq k\ll \log p$ and $|B|\geq p^\delta$, the final factor is at
most $O(p(\log p)^{2k})$ as long as $k>\frac{1}{2\delta}$, and after inserting
the upper bound for $E_\times(C_3,C_3)$ we have \[\Delta
^{4k}\ll|D|^{-1}
K^7|C_3|^{9/4}|C_2|^{-7/2}(\log
p)^{2k+15}p.\]
Now we substitute $K^{-1}=\frac{\Delta^8}{128^4}|A|^2|B||C|p^{-2}$ and see
\[\Delta^{4k+56}\ll|D|^{-1}|A|^{-14}|B|^{-7}|C|^{-7}|C_3|^{9/4}|C_2|^{-7/2}(\log
p)^{2k+15}p^{15}.\]
Bounding $|C_3|\leq |C_2|$ and $|C_2|\gg\Delta|C|$ we get
\[\Delta^{4k+\frac{229}{4}}\ll|D|^{-1}|A|^{-14}|B|^{-7}|C|^{-\frac{33}{4}}(\log
p)^{2k+15}p^{15}.\] Upon taking $4k$'th roots we have
\[\Delta^{1+229/16k}\ll
\lr{|D|^{-1}|A|^{-14}|B|^{-7}|C|^{-\frac{33}{4}}p^{15}}^{1/4k}(\log
p)^{1/2+15/4k}.\] Since
\[|D|^4|A|^{56}|B|^{28}|C|^{33}\geq p^{60+\eps},\] the quantity in brackets on
the right is at most $p^{-\eps/4}$. This shows that we must have
$\Delta<p^{-\tau}$ for some $\tau >0$ depending only on $\eps$ and $\delta$.
This is because we only needed $k$ to be sufficiently large in terms of
$\delta$.
If $|C|>\sqrt p$ then we
can break $C$ into a disjoint union of $m\approx \frac{|C|}{\sqrt p}$ sets
$C_1,\ldots, C_m$ of size at most $\sqrt p$.
Then \[|H_\chi(A,B,C,D)|\leq\sum_{j}|H_\chi(A,B,C_j,D)|.\] We obtain a
savings of $p^{-\tau}$ for each $H_\chi(A,B,C_j,D)$ and hence for
$H_\chi(A,B,C,D)$ provided
\[|D|^4|A|^{56}|B|^{28}|C_j|^{33}\gg|D|^4|A|^{56}|B|^{28}p^{33/2}\geq
p^{60+\eps}\] which is guaranteed by hypothesis (with $2\eps$ in place of
$\eps$).
\end{proof}
\end{document} |
\begin{document}
\title[]{ON STRONG $r$-HELIX SUBMANIFOLDS AND SPECIAL CURVES}
\author{Evren Z\i plar}
\address{Department of Mathematics, Faculty of Science, University of
Ankara, Tando\u{g}an, Turkey}
\email{evrenziplar@yahoo.com}
\urladdr{}
\author{Ali \c{S}enol}
\address{Department of Mathematics, Faculty of Science, \c{C}ank\i r\i\
Karatekin University, \c{C}ank\i r\i , Turkey}
\email{asenol@karatekin.edu.tr}
\author{Yusuf Yayl\i }
\address{Department of Mathematics, Faculty of Science, University of
Ankara, Tando\u{g}an, Turkey}
\email{yayli@science.ankara.edu.tr}
\thanks{}
\urladdr{}
\date{}
\subjclass[2000]{ \ 53A04, 53B25, 53C40, 53C50.}
\keywords{Strong $r$-helix submanifold; Line of curvature; Geodesic curve;
Slant helix.\\
Corresponding author: Evren Z\i plar, e-mail: evrenziplar@yahoo.com}
\thanks{}
\begin{abstract}
In this paper, we investigate special curves on a strong $r$-helix
submanifold in Euclidean $n$-space $E^{n}$. Also, we give the important
relations between strong $r$-helix submanifolds and the special curves such
as line of curvature, geodesic and slant helix.
\end{abstract}
\maketitle
\section{Introduction}
In differential geometry of manifolds, an helix submanifold of $IR^{n}$ with
respect to a fixed direction $d$ in $IR^{n}$ is defined by the property that
tangent planes make a constant angle with the fixed direction $d$ (helix
direction) in [5]. Di Scala and Ruiz-Hern\'{a}ndez have introduced the
concept of these manifolds in [5]. Besides, the concept of strong $r$-helix
submanifold of $IR^{n}$ was introduced in [4]. Let $M\subset IR^{n}$ be a
submanifold and let $H(M)$ be the set of helix directions of $M$. We say
that $M$ is a strong $r$-helix if the set $H(M)$ is a linear subspace of $
IR^{n}$ of dimension greater or equal to $r$ in [4].
Recently, M. Ghomi worked out the shadow problem given by H.Wente. And, He
mentioned the shadow boundary in [8]. Ruiz-Hern\'{a}ndez investigated that
shadow boundaries are related to helix submanifolds in [12].
Helix hypersurfaces has been worked in nonflat ambient spaces in [6,7].
Cermelli and Di Scala have also studied helix hypersurfaces in liquid
cristals in [3].
The plan of this paper is as follows. In section 2, we mention some basic
facts in the general theory of strong $r$-helix, manifolds and curves. And,
in section 3, we give the important relations between strong $r$-helix
submanifolds and some special curves such as line of curvature, geodesic and
slant helix.
\section{PRELIMINARIES}
\begin{definition}
Let $M\subset IR^{n}$ be a submanifold of a euclidean space. A unit vector $
d\in IR^{n}$ is called a helix direction of $M$ if the angle between $d$ and
any tangent space $T_{p}M$ is constant. Let $H(M)$ be the set of helix
directions of $M$. We say that $M$ is a strong $r$-helix if $H(M)$ is a $r$
-dimensional linear subspace of $IR^{n}$ [4].
\end{definition}
\begin{definition}
A submanifold $M\subset IR^{n}$ is a strong $r$-helix if the set $H(M)$ is a
linear subspace of $IR^{n}$ of dimension greater or equal to $r$ [4].
\end{definition}
\begin{definition}
A unit speed curve $\alpha :I\rightarrow E^{n}$ is called a slant helix if
its unit principal normal $V_{2}$ makes a constant angle with a fixed
direciton $U$ [1].
\end{definition}
\begin{definition}
Let the $(n-k)$-manifold $M$ be submanifold of the Riemannian manifold $
\overline{M}=E^{n}$ and let $\overline{D}$ be the Riemannian connexion on $
\overline{M}=E^{n}$. For $C^{\infty \text{ }}$fields $X$ and $Y$ with domain
$A$ on $M$ (and tangent to $M$), define $D_{X}Y$ and $V(X,Y)$ on $A$ by
decomposing $\overline{D}_{X}Y$ into unique tangential and normal
components, respectively; thus,
\begin{equation*}
\overline{D}_{X}Y=D_{X}Y+V(X,Y)\text{. }
\end{equation*}
Then, $D$ is the Riemannian connexion on $M$ and $V$ is a symmetric
vector-valued 2-covariant $C^{\infty \text{ }}$tensor called the second
fundamental tensor. The above composition equation is called the Gauss
equation [9].
\end{definition}
\begin{definition}
Let the $(n-k)$-manifold $M$ be submanifold of the Riemannian manifold $
\overline{M}=E^{n}$ , let $\overline{D}$ be the Riemannian connexion on $
\overline{M}=E^{n}$ and let $D$ be the Riemannian connexion on $M$. Then,
the formula of Weingarten
\begin{equation*}
\overline{D}_{X}N=-A_{N}(X)+D_{X}^{\bot }N
\end{equation*}
for every $X$ and $Y$ tangent to $M$ and for every $N$ normal to $M$. $A_{N}$
is the shape operator associated to $N$ also known as the Weingarten
operator corresponding to $N$ and $D^{\bot }$ is the induced connexion in
the normal bundle of $M$ ($A_{N}(X)$ is also the tangent component of $-
\overline{D}_{X}N$ and will be denoted by $A_{N}(X)$ $=$tang($-\overline{D}
_{X}N$)). Specially, if $M$ is a hypersurface in $E^{n}$, we have $
\left\langle V(X,Y),N\right\rangle =\left\langle A_{N}(X),Y\right\rangle $
for all $X$, $Y$ tangent to $M$. So,
\begin{equation*}
V(X,Y)=\left\langle V(X,Y),N\right\rangle N=\left\langle
A_{N}(X),Y\right\rangle N
\end{equation*}
and we obtain
\begin{equation*}
\overline{D}_{X}Y=D_{X}Y+\left\langle A_{N}(X),Y\right\rangle N\text{ .}
\end{equation*}
For this definition 2.5, note that the shape operator $A_{N}$ is defined by
the map $A_{N}:\varkappa (M)\rightarrow \varkappa (M)$, where $\varkappa (M)$
is the space of tangent vector fields on $M$ and if $p\in M$, the shape
operator $A_{N}$ is defined by the map $A_{p}:T_{p}(M)\rightarrow T_{p}(M)$
.The eigenvalues of $A_{p}$ are called the principal curvatures (denoted by $
\lambda _{i}$) and the eigenvectors of $A_{p}$ are called the principal
vectors [10,11].
\end{definition}
\begin{definition}
If $\alpha $ is a (unit speed) curve in $M$ with $C^{\infty \text{ }}$unit
tangent $T$, then $V(T,T)$ is called normal curvature vector field of $
\alpha $ and $k_{T}=\left\Vert V(T,T)\right\Vert $ is called the normal
curvature of $\alpha $ [9].
\end{definition}
\section{Main Theorems}
\begin{theorem}
\textbf{\ }Let $M$ be a strong $r$-helix hypersurface and $H(M)\subset E^{n}$
be the set of helix directions of $M$. If $\alpha :I\subset IR\rightarrow M$
is a (unit speed) line of curvature (not a line) on $M$, then $d_{j}\notin
Sp\left\{ N,T\right\} $ along the curve $\alpha $ for all $d_{j}\in H(M)$,
where $T$ is the tangent vector field of $\alpha $ and $N$ is a unit normal
vector field of $M$.
\end{theorem}
\begin{proof}
We assume that $d_{j}\in Sp\left\{ N,T\right\} $ along the curve $\alpha $
for any $d_{j}\in H(M)$. Then, along the curve $\alpha $, since $M$ is a
strong $r$-helix hypersurface, we can decompose $d_{j}$ in tangent and
normal components:
\begin{equation}
d_{j}=\cos (\theta _{j})N+\sin (\theta _{j})T
\end{equation}
where $\theta _{j}$ is constant. From (3.1),by taking derivatives on both
sides along the curve $\alpha $, we get:
\begin{equation}
0=\cos (\theta _{j})N^{
{\acute{}}
}+\sin (\theta _{j})T^{
{\acute{}}
}\text{ }
\end{equation}
Moreover, since $\alpha $ is a line of curvature on $M$,
\begin{equation}
N
{\acute{}}
=\lambda \alpha
{\acute{}}
\text{ }
\end{equation}
along the curve $\alpha $. By using the equations (3.2) and (3.3), we deduce
that the system $\left\{ \alpha
{\acute{}}
,T^{
{\acute{}}
}\right\} $ is linear dependent. But, the system $\left\{ \alpha
{\acute{}}
,T^{
{\acute{}}
}\right\} $ is never linear dependent. This is a contradiction. This
completes the proof.
\end{proof}
\begin{theorem}
Let $M$ be a submanifold with $(n-k)$ dimension in $E^{n}$. Let $\overline{D}
$ be Riemannian connexion (standart covariant derivative) on $E^{n}$ and $D$
be Riemannian connexion on $M$. Let us assume that $M\subset E^{n}$ be a
strong $r$-helix submanifold and $H(M)\subset E^{n}$ be the space of the
helix directions of $M$. If $\alpha :I\subset IR\rightarrow M$ is a (unit
speed) geodesic curve on $M$ and if $\left\langle V_{2},\xi
_{j}\right\rangle $ is a constant function along the curve $\alpha $, then $
\alpha $ is a slant helix in $E^{n}$, where $V_{2}$ is the unit principal
normal of $\alpha $ and $\xi _{j}$ is the normal component of a direction $
d_{j}\in H(M)$.
\end{theorem}
\begin{proof}
Let $T$ be the unit tangent vector field of $\alpha $. Then, from the
formula Gauss in Definition (2.4),
\begin{equation}
\overline{D}_{T}T=D_{T}T+V(T,T)
\end{equation}
According to the Theorem, since $\alpha $ is a geodesic curve on $M$,
\begin{equation}
D_{T}T=0
\end{equation}
So, by using (3.4),(3.5) and Frenet formulas, we have:
\begin{equation*}
\overline{D}_{T}T=k_{1}V_{2}=V(T,T)
\end{equation*}
That is, the vector field $V_{2}\in \vartheta (M)$ along the curve $\alpha $
, where $\vartheta (M)$ is the normal space of $M$. On the other hand, since
$M$ is a strong $r$-helix submanifold, we can decompose any $d_{j}\in H(M)$
in its tangent and normal components:
\begin{equation}
d_{j}=\cos (\theta _{j})\xi _{j}+\sin (\theta _{j})T_{j}
\end{equation}
where $\theta _{j}$ is constant. Moreover, according to the Theorem, $
\left\langle V_{2},\xi _{j}\right\rangle $ is a constant function along the
curve $\alpha $ for the normal component $\xi _{j}$ of \ a direction $
d_{j}\in H(M)$. Hence, doing the scalar product with $V_{2}$ in each part of
the equation (3.6), we obtain:
\begin{equation}
\left\langle d_{j},V_{2}\right\rangle =\cos (\theta _{j})\left\langle
V_{2},\xi _{j}\right\rangle +\sin (\theta _{j})\left\langle
V_{2},T_{j}\right\rangle
\end{equation}
Since $\cos (\theta _{j})\left\langle V_{2},\xi _{j}\right\rangle =$constant
and $\left\langle V_{2},T_{j}\right\rangle =0$ ( $V_{2}\in \vartheta (M)$)
along the curve $\alpha $, from (3.7) we have:
\begin{equation*}
\left\langle d_{j},V_{2}\right\rangle =\text{constant.}
\end{equation*}
along the curve $\alpha $. Consequently, $\alpha $ is a slant helix in $
E^{n} $.
\end{proof}
\begin{theorem}
Let $M$ be a submanifold with $(n-k)$ dimension in $E^{n}$. Let $\overline{D}
$ be Riemannian connexion (standart covariant derivative) on $E^{n}$ and $D$
be Riemannian connexion on $M$. Let us assume that $M\subset E^{n}$ be a
strong $r$-helix submanifold and $H(M)\subset E^{n}$ be the space of the
helix directions of $M$. If $\alpha :I\subset IR\rightarrow M$ is a (unit
speed) curve on $M$ with the normal curvature function $k_{T}=0$ and if $
\left\langle V_{2},T_{j}\right\rangle $ is a constant function along the
curve $\alpha $, then $\alpha $ is a slant helix in $E^{n}$, where $V_{2}$
is the unit principal normal of $\alpha $ and $T_{j}$ is the tangent
component of a direction $d_{j}\in H(M)$.
\end{theorem}
\begin{proof}
Let $T$ be the unit tangent vector field of $\alpha $. Then, from the
formula Gauss in Definition (2.4),
\begin{equation}
\overline{D}_{T}T=D_{T}T+V(T,T)
\end{equation}
According to the Theorem, since the normal curvature $k_{T}=0$,
\begin{equation}
V(T,T)=0
\end{equation}
So, by using (3.8),(3.9) and Frenet formulas, we have:
\begin{equation*}
\overline{D}_{T}T=k_{1}V_{2}=D_{T}T\text{.}
\end{equation*}
That is, the vector field $V_{2}\in T_{\alpha (t)}M$, where $T_{\alpha (t)}M$
is the tangent space of $M$. On the other hand, since $M$ is a strong $r$
-helix submanifold, we can decompose any $d_{j}\in H(M)$ in its tangent and
normal components:
\begin{equation}
d_{j}=\cos (\theta _{j})\xi _{j}+\sin (\theta _{j})T_{j}
\end{equation}
where $\theta _{j}$ is constant. Moreover, according to the Theorem, $
\left\langle V_{2},T_{j}\right\rangle $ is a constant function along the
curve $\alpha $ for the tangent component $T_{j}$ of \ a direction $d_{j}\in
H(M)$. Hence, doing the scalar product with $V_{2}$ in each part of the
equation (3.10), we obtain:
\begin{equation}
\left\langle d_{j},V_{2}\right\rangle =\cos (\theta _{j})\left\langle
V_{2},\xi _{j}\right\rangle +\sin (\theta _{j})\left\langle
V_{2},T_{j}\right\rangle
\end{equation}
Since $\sin (\theta _{j})\left\langle V_{2},T_{j}\right\rangle =$constant
and $\left\langle V_{2},\xi _{j}\right\rangle =0$ ($V_{2}\in T_{\alpha (t)}M$
) along the curve $\alpha $, from (3.11) we have:
\begin{equation*}
\left\langle d_{j},V_{2}\right\rangle =\text{constant.}
\end{equation*}
along the curve $\alpha $. Consequently, $\alpha $ is a slant helix in $
E^{n} $.
\end{proof}
\begin{definition}
Given an Euclidean submanifold of arbitrary codimension $M\subset IR^{n}$. A
curve $\alpha $ in $M$ is called a line of curvature if its tangent $T$ is a
principal vector at each of its points. In other words, when $T$ (the
tangent of $\alpha $) is a principal vector at each of its points, for an
arbitrary normal vector field $N\in \vartheta (M)$, the shape operator $
A_{N} $ associated to $N$ says $A_{N}(T)=$tang$(-$ $\overline{D}
_{T}N)=\lambda _{j}T$ along the curve $\alpha $, where $\lambda _{j}$ is a
principal curvature and $\overline{D}$ be the Riemannian connexion(standart
covariant derivative) on $IR^{n}$ [2].
\end{definition}
\begin{theorem}
Let $M$ be a submanifold with $(n-k)$ dimension in $E^{n}$ and let $
\overline{D}$ be Riemannian connexion (standart covariant derivative) on $
E^{n}$. Let us assume that $M\subset E^{n}$ be a strong $r$-helix
submanifold and $H(M)\subset E^{n}$ be the space of the helix directions of $
M$. If $\alpha :I\rightarrow M$ is a line of curvature with respect to the
normal component $N_{j}\in \vartheta (M)$ of a direction $d_{j}\in H(M)$ and
if $N_{j}^{
{\acute{}}
}\in \varkappa (M)$ along the curve $\alpha $, then $d_{j}\in Sp\left\{
T\right\} ^{\bot }$ along the curve $\alpha $, where $T$ \ is the unit
tangent vector field of $\alpha $.
\end{theorem}
\begin{proof}
We assume that $\alpha :I\rightarrow M$ is a line of curvature with respect
to the normal component $N_{j}\in \vartheta (M)$ of a direction $d_{j}\in
H(M)$. Since $M$ is a strong $r$-helix submanifold, we can decompose $
d_{j}\in H(M)$ in its tangent and normal components:
\begin{equation*}
d_{j}=\cos (\theta _{j})N_{j}+\sin (\theta _{j})T_{j}\text{ }
\end{equation*}
where $\theta _{j}$ is constant. So, $\left\langle N_{j},d_{j}\right\rangle
= $constant and by taking derivatives on both sides along the curve $\alpha $
, we get $\left\langle N_{j}^{
{\acute{}}
},d_{j}\right\rangle =0$. On the other hand, since $\alpha :I\rightarrow M$
is a line of curvature with respect to the $N_{j}\in \vartheta (M)$,
\begin{equation*}
A_{N_{j}}(T)\text{=tang}(-\overline{D}_{T}N_{j})=\text{tang}(-N_{j}^{
{\acute{}}
})=\lambda _{j}T\text{ }
\end{equation*}
along the curve $\alpha $. According to this Theorem, $N_{j}^{
{\acute{}}
}\in \varkappa (M)$ along the curve $\alpha $. Hence,
\begin{equation}
\text{tang}(-N_{j}^{
{\acute{}}
})=-N_{j}^{
{\acute{}}
}=\lambda _{j}T\text{ }
\end{equation}
Therefore, by using the equalities $\left\langle N_{j}^{
{\acute{}}
},d_{j}\right\rangle =0$ and (3.12), we obtain:
\begin{equation*}
\left\langle T,d_{j}\right\rangle =0
\end{equation*}
along the curve $\alpha $. This completes the proof.
\end{proof}
\begin{theorem}
Let $M$ be a submanifold with $(n-k)$ dimension in $E^{n}$ and let $
\overline{D}$ be Riemannian connexion (standart covariant derivative) on $
E^{n}$. Let us assume that $M\subset E^{n}$ be a strong $r$-helix
submanifold and $H(M)\subset E^{n}$ be the space of the helix directions of $
M$. If $\alpha :I\rightarrow M$ is a curve in $M$ and if the system $\left\{
T_{j}^{
{\acute{}}
},T\right\} $ is linear dependent along the curve $\alpha $, where $T_{j}^{
{\acute{}}
}$ is the derivative of the tangent component $T_{j}$ of a direction $
d_{j}\in H(M)$ and $T$ the tangent to the curve $\alpha $, then $\alpha $ is
a line of curvature in $M$.
\end{theorem}
\begin{proof}
Since $M$ is a strong $r$-helix submanifold, we can decompose $d_{j}\in H(M)$
in its tangent and normal components:
\begin{equation}
d_{j}=\cos (\theta _{j})N_{j}+\sin (\theta _{j})T_{j}\text{ }
\end{equation}
where $\theta _{j}$ is constant. If we take derivative in each part of the
equation (3.13) along the curve $\alpha $, we obtain:
\begin{equation}
0=\cos (\theta _{j})N_{j}^{
{\acute{}}
}+\sin (\theta _{j})T_{j}^{
{\acute{}}
}\text{ }
\end{equation}
From (3.14), we can write
\begin{equation}
N_{j}^{
{\acute{}}
}=-\tan (\theta _{j})T_{j}^{
{\acute{}}
}\text{ }
\end{equation}
So, for the tangent component of $-N_{j}^{
{\acute{}}
}$, from (3.15) we can write:
\begin{equation}
A_{N_{j}}(T)\text{=tang}(-\overline{D}_{T}N_{j})=\text{tang}(-N_{j}^{
{\acute{}}
})=\text{tang}(\tan (\theta _{j})T_{j}^{
{\acute{}}
})\text{ }
\end{equation}
along the curve $\alpha $. According to the hypothesis, the system $\left\{
T_{j}^{
{\acute{}}
},T\right\} $ is linear dependent along the curve $\alpha $. Hence, we get $
T_{j}^{
{\acute{}}
}=\lambda _{j}T$. And, by using the equation (3.16), we have:
\begin{equation*}
A_{N_{j}}(T)=\text{tang}(\tan (\theta _{j})T_{j}^{
{\acute{}}
})=\text{tang}(\tan (\theta _{j})\lambda _{j}T)
\end{equation*}
and
\begin{equation}
A_{N_{j}}(T)=\text{tang}(\tan (\theta _{j})\lambda _{j}T)\text{ }
\end{equation}
Moreover, since $T\in \varkappa (M)$, tang$(\tan (\theta _{j})\lambda
T)=(\tan (\theta _{j})\lambda _{j})T=k_{j}T$. Therefore, from (3.17), we
have:
\begin{equation*}
A_{N_{j}}(T)=k_{j}T\text{.}
\end{equation*}
It follows that $\alpha $ is a line of curvature in $M$ for $N_{j}\in
\vartheta (M)$. This completes the proof.
\end{proof}
\noindent \textbf{Acknowledgment.} The authors would like to thank two
anonymous referees for their valuable suggestions and comments that helped
to improve the presentation of this paper.
\end{document} |
\begin{document}
\maketitle
\renewcommand{\fnsymbol{footnote}}{\fnsymbol{footnote}}
\centerline{
\scshape Xianfeng Ma
}
{\footnotesize
\centerline{Department of Mathematics}
\centerline{East China University of Science and Technology, Shanghai 200237, China}
}
\centerline{\scshape Ercai Chen}
{\footnotesize
\centerline{School of Mathematical Science}
\centerline{Nanjing Normal University, Nanjing 210097, China}
\centerline{and}
\centerline{Center of Nonlinear Science}
\centerline{Nanjing University, Nanjing 210093, China}
}
\begin{abstract}
We introduce the relative tail entropy to establish a variational principle for continuous bundle random dynamical systems.
We also show that the relative tail entropy is conserved by the principal extension.
\end{abstract}
\section{Introduction}\label{sec1}
The entropy measures the complexity of a dynamical systems both in the topological and measure-theoretic settings.
The topological entropy measures the maximal dynamical complexity versus an average complexity reflected by the measure-theoretic entropy.
The relationship between these two kinds of entropy is the classical variational principle, which states that the topological entropy is the supremum of the measure-theoretic entropy over all invariant measures \cite{Goodman, Goodwyn,Misiurewicz1}.
The entropy concepts can be localized by defining topological tail entropy to quantify the amount of disorder or uncertainty in a system at arbitrary small scales \cite{Misiurewicz}.
The local complexity of a dynamical system can also be measured by the defect of uniformity in the convergence of the measure-theoretic entropy function.
A variational principle related these two aspects is established in the case of homeomorphism from subtle results in the theory of entropy structure by Downarowicz \cite{Down2005,Boyle}.
An elementary proof of this variational principle for continuous transformations is obtained in terms of essential partitions by Burguet \cite{Burguet}.
Ledrappier \cite{Ledrappier} presents a variational principle between the topological tail entropy and the defect of upper semi-continuity of the measure-theoretic entropy on the cartesian square of the dynamical system, and prove that topological tail entropy is an invariant under any principal extension.
Kifer and Weiss \cite{Kifer2002} introduce the relative tail entropies for continuous bundle RDSs by investigating the open covers and spanning subsets and deduce the equivalence between the two notions. It is shown in \cite{KiferLiu2006} that the defects of the upper semi-continuity of the relative measure-theoretic entropies are bounded from above by the relative tail entropy.
In this paper
we devote to proposing a relative variational principle for the relative tail entropy introduced by using open random covers, which enable us to treat different fibers with different open covers.
We also introduce the factor transformation and consider its basic properties related to the invariant measure and the upper semi-continuity of the relative measure-theoretic entropy for continuous bundle RDSs.
For the product RDS generated by a given RDS and any other RDS with the same probability space, we obtain a variational inequality, which shows that the defect of the upper semi-continuity of the relative measure-theoretic entropy of any invariant measure in the product RDS cannot exceed the relative tail entropy of the original RDS.
When the two continuous bundle RDSs coincide, we construct a maximal invariant measure to ensure that the relative tail entropy could be reached, and establish the variational principle.
For the probability space being trivial, it reduces to the variational principle deduced by Ledrappier \cite{Ledrappier} in deterministic dynamical systems.
As an application of the variational principle we show that the relative tail entropy is conserved by any principal extension.
The paper is organized as follows.
In Section \ref{sec2}, we recall some background in the ergodic theory, introduce the relative tail entropy with respect to open random covers and state our main results.
In Section \ref{sec3}, we give some basic properties of the relative entropy and the relative tail entropy.
In Section \ref{sec4}, we devote to the proof of the variational principle and show that the relative tail entropy is an invariant under principal extensions.
\section{Preliminaries and main results}\label{sec2}
Let $(\Omega, \mathcal{F},\mathbb{P})$ be a complete countably generated probability space together with a $\mathbb{P}$-preserving transformation $\vartheta$ and $(X,\mathcal{B})$ be a compact metric space with the Borel $\sigma$-algebra $\mathcal{B}$.
Let $\mathcal{E}$ be a measurable subset of $\Omega\times X$ with respect to the product $\sigma$-algebra $\mathcal{F}\times \mathcal{B}$ and the fibers $\mathcal{E}_{\omega}=\{x\in X: (\omega,x)\in \mathcal{E}\}$ be compact.
A continuous bundle random dynamical system (RDS) $T$ over $(\Omega, \mathcal{F},\mathbb{P},\vartheta)$ is generated by the mappings $T_{\omega}:\mathcal{E}_{\omega}\rightarrow \mathcal{E}_{\vartheta\omega}$
so that the map $(\omega,x)\rightarrow T_{\omega}x$ is measurable and the map $x\rightarrow T_{\omega}x$ is continuous for $\mathbb{P}$-almost all (a.a.) $\omega$.
The family $\{T_{\omega}:\omega\in \Omega\}$ is called a random transformation and each $T_{\omega}$ maps the fiber $\mathcal{E}_{\omega}$ to $\mathcal{E}_{\vartheta\omega}$.
The map $\Theta:\mathcal{E}\rightarrow \mathcal{E}$ defined by $\Theta(\omega,x)=(\vartheta\omega, T_{\omega}x)$ is called the skew product transformation. Observe that $\Theta^n(\omega, x)=(\vartheta^n\omega, T_{\omega}^nx)$, where $T_{\omega}^n=T_{\vartheta^{n-1}\omega}\circ\cdots T_{\vartheta\omega}\circ T_{\omega}$ for $n\geq 0$ and $T_{\omega}^0=id$.
Let $\mathcal{P}_{\mathbb{P}}(\Omega\times X)$ be the space of probability measures on $\Omega\times X$ having the marginal $\mathbb{P}$ on $\Omega$
and set $\mathcal{P}_{\mathbb{P}}(\mathcal{E})=\{\mu \in \mathcal{P}_{\mathbb{P}}(\Omega\times X): \mu(\mathcal{E})=1\}$.
Denote by $\mathcal{I}_{\mathbb{P}}(\mathcal{E})$ the space of all $\Theta-$invariant measures in $\mathcal{P}_{\mathbb{P}}(\mathcal{E})$.
Let $\mathcal{S}$ be a sub-$\sigma$-algebra of $\mathcal{F}\times \mathcal{B}$ restricted on $\mathcal{E}$, $\mathcal{R}=\{R_i\}$ be a finite or countable partition of $\mathcal{E}$ into measurable sets. For $\mu\in \mathcal{P}_{\mathbb{P}}(\Omega\times X)$ the conditional entropy of $\mathcal{R}$ given $\sigma$-algebra $\mathcal{S}$ is defined as
\begin{equation*}
H_{\mu}(\mathcal{R}\mid \mathcal{S})=-\int \sum_{i}E(1_{R_i}\mid \mathcal{S}) \log E(1_{R_i}\mid \mathcal{S})d\mu,
\end{equation*}
where $E(1_{R_i}\mid \mathcal{S})$ is the conditional expectation of $1_{R_i}$ with respect to $\mathcal{S}$.
Let $\mu\in \mathcal{I}_{\mathbb{P}}(\mathcal{E})$ and $\mathcal{S}$ is a sub$-\sigma-$algebra of $\mathcal{F}\times \mathcal{B} $ restricted on $\mathcal{E}$ satisfying $\Theta^{-1}\mathcal{S}\subset \mathcal{S}$. For a given measurable partition $\mathcal{R}$ of $\mathcal{E}$, the conditional entropy
$H_{\mu}(\mathcal{R}^{(n)}\mid \mathcal{S})$ is a non-negative sub-additive sequence, where $\mathcal{R}^{(n)}=\bigvee_{i=0}^{n-1}(\Theta^i)^{-1}\mathcal{R}$.
The {\it relative entropy $h_{\mu}(\mathcal{R}\mid \mathcal{S})$ of $\Theta$ with respect to a partition $\mathcal{R}$ } is defined as
\begin{equation*}
h_{\mu}(\mathcal{R}\mid \mathcal{S})=\lim_{n\rightarrow\infty}\frac{1}{n}H_{\mu}(\mathcal{R}^{(n)}\mid \mathcal{S})=\inf_n\frac{1}{n}H_{\mu}(\mathcal{R}^{(n)}\mid \mathcal{S}).
\end{equation*}
The {\it relative entropy of $\Theta$ } is defined by the formula
\begin{equation*}
h_{\mu}(\Theta\mid \mathcal{S})=\sup_{\mathcal{R}}h_{\mu}(\mathcal{R}\mid \mathcal{S}),
\end{equation*}
where the supremum is taken over all finite or countable measurable partitions $\mathcal{R}$ of $\mathcal{E}$ with finite conditional entropy $H_{\mu}(\mathcal{R}\mid \mathcal{S})<\infty$.
The {\it defect of upper semi-continuity of the relative entropy $h_{\mu}(\Theta\mid \mathcal{S}$)} is defined on $\mathcal{I}_{\mathbb{P}}(\mathcal{E})$ as
\begin{equation*}
h^*_m(\Theta\mid \mathcal{S})=
\begin{cases}
\limsup\limits_{\mu\rightarrow m} h_{\mu}(\Theta\mid \mathcal{S})- h_m(\Theta\mid \mathcal{S}),
& \text{if}\,\, h_m(\Theta\mid \mathcal{S})<\infty,\\
\infty, & \text{otherwise}.
\end{cases}
\end{equation*}
Any $\mu \in \mathcal{P}_{\mathbb{P}}(\mathcal{E})$ on $\mathcal{E}$ disintegrates $d\mu(\omega,x)=d\mu_{\omega}(x)d\mathbb{P}(\omega)$ (see \cite[Section 10.2]{Dudley}), where $\mu_{\omega}$ are regular conditional probabilities with respect to the $\sigma-$algebra $\mathcal{F}_{\mathcal{E}}$ formed by all sets $(F\times X)\cap \mathcal{E}$ with $F\in \mathcal{F}$.
This means that $\mu_{\omega}$ is a probability measure on $\mathcal{E}_{\omega}$ for $\mathbb{P}$-a.a. $\omega$ and for any measurable set $R\in \mathcal{E}$,
$\mathbb{P}$-a.s. $\mu_{\omega}(R(\omega))=E(R\mid\mathcal{F}_{\mathcal{E}})$ , where $R(\omega)=\{x: (\omega,x)\in R\}$ and so $\mu(R)=\int \mu_{\omega}(R(\omega))d\mathbb{P}(\omega)$.
The conditional entropy of $\mathcal{R}$ given $\sigma-$algebra $\mathcal{F}_{\mathcal{E}}$ can be written as
\begin{equation*}
H_{\mu}(\mathcal{R}\mid \mathcal{F}_{\mathcal{E}})=-\int \sum_i E(R_i\mid\mathcal{F}_{\mathcal{E}})\log E (R_i\mid\mathcal{F}_{\mathcal{E}})d \mathbb{P}=\int H_{\mu_{\omega}}(\mathcal{R}(\omega))d\mathbb{P},
\end{equation*}
where $\mathcal{R}(\omega)=\{R_i(\omega)\}$, $R_i(\omega)=\{x\in\mathcal{E}_{\omega}:(\omega,x)\in R_i\}$ is a partition of $\mathcal{E}_{\omega}$.
Let $(Y, \mathcal{C})$ be a compact metric space with the Borel $\sigma$-algebra $\mathcal{C}$ and $\mathcal{G}$ be a measurable, with respect to the product $\sigma$-algebra $\mathcal{F}\times \mathcal{C}$, subset of $\Omega\times Y$ with the fibers $\mathcal{G}_{\omega}$ being compact.
The continuous bundle RDS $S$ over $(\Omega, \mathcal{F},\mathbb{P},\vartheta)$ is generated by the mappings $S_{\omega}:\mathcal{G}_{\omega}\rightarrow \mathcal{G}_{\vartheta\omega}$
so that the map $(\omega,y)\rightarrow S_{\omega}y$ is measurable and the map $y\rightarrow S_{\omega}y$ is continuous for $\mathbb{P}$-almost all (a.a.) $\omega$.
The skew product transformation $\Lambda:\mathcal{G}\rightarrow \mathcal{G}$ is defined as $\Lambda(\omega,y)=(\vartheta\omega, S_{\omega}y)$.
\begin{definition}
Let $T, S$ are two continuous bundle RDSs over $(\Omega,\mathcal{F},\mathbb{P},\vartheta)$ on $\mathcal{E}$ and $\mathcal{G}$, respectively.
$T$ is said to be a {\it factor } of $S$, or that $S$ is an {\it extension} of $T$,
if there exists a family of continuous surjective maps $\pi_{\omega}:\mathcal{G}_{\omega}\rightarrow \mathcal{E}_{\omega}$ such that the map $(\omega,y)\rightarrow \pi_{\omega}y$ is measurable and $\pi_{\vartheta\omega}S_{\omega}=T_{\omega}\pi_{\omega}$.
The map $\pi:\mathcal{G}\rightarrow\mathcal{E}$ defined by $\pi(\omega,y)=(\omega,\pi_{\omega}y)$ is called the {\it factor or extension transformation} from $\mathcal{G}$ to $\mathcal{E}$.
The skew product system $(\mathcal{E},\Theta)$ is called a {\it factor} of $(\mathcal{G},\Lambda)$ or that $(\mathcal{G},\Lambda)$ is an {\it extension } of $(\mathcal{E},\Theta)$.
\end{definition}
Denote by $\mathcal{A}$ the restriction of $\mathcal{F}\times \mathcal{B}$ on $\mathcal{E}$
and set $\mathcal{A}_{\mathcal{G}}=\{ \pi^{-1} A : A\in \mathcal{A}\}$.
\begin{definition}
A continuous bundle RDS $T$ on $\mathcal{E}$ is called a {\it principal factor} of $S$ on $\mathcal{G}$, or that $S$ is a {\it principal extension} of $T$, if for any $\Lambda-$invariant probability measure $m$ in $\mathcal{I}_{\mathbb{P}}(\mathcal{G})$, the relative entropy of $\Lambda$ with respect to $\mathcal{A}_{\mathcal{G}}$ vanishes, {\it i.e.}, $h_{\mu}(\Lambda\mid \mathcal{A}_{\mathcal{G}})=0.$
\end{definition}
Let $T$ and $S$ are two continuous bundle RDSs over $(\Omega,\mathcal{F},\mathbb{P},\vartheta)$ on $\mathcal{E}$ and $\mathcal{G}$, respectively.
Let $\mathcal{H}=\{(\omega,y,x): y\in \mathcal{G}_{\omega}, x\in \mathcal{E}_{\omega}\}$ and
$\mathcal{H}_{\omega}=\{(y,x): (\omega,y,x)\in \mathcal{H}\}$. It is not hard to see that $\mathcal{H}$ is a measurable subset of $\Omega\times Y\times X$ with respect to the product $\sigma-$algebra $\mathcal{F}\times\mathcal{C}\times \mathcal{B}$ (as a graph of a measurable multifunction; see \cite[Proposition III.13]{Castaing}).
The continuous bundle RDS $S\times T$ over $(\Omega, \mathcal{F},\mathbb{P},\vartheta)$ is generated by the family of mappings $(S\times T )_{\omega}:\mathcal{H}_{\omega}\rightarrow \mathcal{H}_{\vartheta\omega}$ with $(y,x)\rightarrow (S_{\omega}y, T_{\omega}x)$.
The map $(\omega,y,x)\rightarrow (S_{\omega}y, T_{\omega}x)$ is measurable and the map $(y,x)\rightarrow (S_{\omega}y, T_{\omega}x)$ is continuous in $(y,x)$ for $\mathbb{P}$-a.a. $\omega$.
The skew product transformation $\Gamma$ generated by $\Theta$ and $\Lambda$ from $\mathcal{H}$ to itself is defined as $\Gamma (\omega,y,x) =(\vartheta\omega,S_{\omega}y, T_{\omega}x)$.
Let $\pi_{\mathcal{E}}:\mathcal{H} \rightarrow\mathcal{E}$ be the natural projection with $\pi_{\mathcal{E}}(\omega,y,x)=(\omega,x)$, and $\pi_{\mathcal{G}}:\mathcal{H}\rightarrow \mathcal{G}$ with $\pi_{\mathcal{G}}(\omega,y,x)=(\omega,y)$.
Then $\pi_{\mathcal{E}}$ and $\pi_{\mathcal{G}}$ are two factor transformations from $\mathcal{H}$ to $\mathcal{E}$ and $\mathcal{G}$, respectively.
Denote by $\mathcal{D}$ the restriction of $\mathcal{F}\times \mathcal{C}$ on $\mathcal{G}$
and set
$\mathcal{D}_{\mathcal{H}}=\pi^{-1}_{\mathcal{G}}( \mathcal{D})=\{(D\times X)\cap \mathcal{H}:D\in \mathcal{D}\}$,
$\mathcal{A}_{\mathcal{H}}=\pi^{-1}_{\mathcal{E}}( \mathcal{A})=\{(A\times Y)\cap \mathcal{H}:A\in \mathcal{A}\}$,
and $\mathcal{F}_{\mathcal{H}}=\{(F\times Y \times X )\cap \mathcal{H}: F\in \mathcal{F} \}$.
The {\it relative entropy of $\Gamma$ given the $\sigma-$algebra $\mathcal{D}_{H}$} is defined by
\begin{equation*}
h_{\mu}(\Gamma\mid \mathcal{D}_{H})=\sup_{\mathcal{R}}h_{\mu}(\mathcal{R}\mid \mathcal{D}_{H}),
\end{equation*}
where
\begin{equation*}
h_{\mu}(\mathcal{R}\mid \mathcal{D}_{H})=\lim_{n\rightarrow\infty}
\frac{1}{n}H_{\mu}(\bigvee_{i=0}^{n-1}(\Gamma^i)^{-1}\mathcal{R}\mid \mathcal{D}_{H})
\end{equation*}
is the {\it relative entropy of $\Gamma $ with respect to a measurable partition $\mathcal{R}$},
and
the supremum is taken over all finite or countable measurable partitions $\mathcal{R}$ of $\mathcal{H}$ with finite conditional entropy $H_{\mu}(\mathcal{R}\mid \mathcal{D}_{H})<\infty$.
Let $\mathcal{E}^{(2)}=\{(\omega,x,y):x,y\in \mathcal{E}_{\omega}\}$, which is also a measurable subset of $\Omega\times X^2$ with respect to the product $\sigma-$algebra $\mathcal{F}\times \mathcal{B}^2$.
Let $\Theta^{(2)}:\mathcal{E}^{(2)}\rightarrow \mathcal{E}^{(2)}$ be a skew-product transformation with $\Theta^{(2)}(\omega,x,y)=(\vartheta\omega,T_{\omega}x, T_{\omega}y)$.
The map $(\omega,x,y)\rightarrow (T_{\omega}x, T_{\omega}y)$ is measurable and the map $(x,y)\rightarrow (T_{\omega}x, T_{\omega}y)$ is continuous in $(x,y)$ for $\mathbb{P}$-a.a. $\omega$.
Let $\mathcal{E}_1, \mathcal{E}_2$ be two copies of $\mathcal{E}$, {\it i.e.}, $\mathcal{E}_1=\mathcal{E}_2=\mathcal{E}$, and $\pi_{\mathcal{E}_i}$ be the natural projection from $\mathcal{E}^{(2)}$ to $\mathcal{E}_i$ with $\pi_{\mathcal{E}_i}(\omega,x_1,x_2)=(\omega,x_i)$, i=1, 2.
Denote by $\mathcal{A}_{\mathcal{E}^{(2)}}=\{(A\times X)\cap\mathcal{E}^{(2)}: A\in \mathcal{F}\times \mathcal{B}\}$.
The {\it relative entropy of $\Theta^{(2)}$ given the $\sigma-$algebra $\mathcal{A}_{\mathcal{E}^{(2)}}$} is defined by
\begin{equation*}
h_{\mu}(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})=\sup_{\mathcal{R}}h_{\mu}(\mathcal{R}\mid \mathcal{A}_{\mathcal{E}^{(2)}}),
\end{equation*}
where
\begin{equation*}
h_{\mu}(\mathcal{R}\mid \mathcal{A}_{\mathcal{E}^{(2)}})=\lim_{n\rightarrow\infty}
\frac{1}{n}H_{\mu}(\bigvee_{i=0}^{n-1}((\Theta^{(2)})^i)^{-1}\mathcal{R}\mid \mathcal{A}_{\mathcal{E}^{(2)}})
\end{equation*}
is the {\it relative entropy of $\Theta^{(2)} $ with respect to a measurable partition $\mathcal{R}$},
and
the supremum is taken over all finite or countable measurable partitions $\mathcal{R}$ of $\mathcal{E}^{(2)}$ with finite conditional entropy $H_{\mu}(\mathcal{R}\mid \mathcal{A}_{\mathcal{E}^{(2)}})<\infty$.
A (closed) random set $Q$ is a measurable set valued map $\mathcal{Q}:\Omega\rightarrow 2^X $, or the graph of $Q$ denoted by the same letter, taking values in the (closed) subsets of compact metric space $X$.
An open random set $U$ is a set valued map $U:\Omega\rightarrow 2^X $ whose complement $U^c$ is a closed random set.
A measurable set $Q$ is an open (closed) random set if the fiber $Q_{\omega}$ is an open (closed) subset of $\mathcal{E}_{\omega}$ in its induced topology from $X$ for $\mathbb{P}-$almost all $\omega$(see \cite[Lemma 2.7]{Crauel}).
A random cover $\mathcal{Q}$ of $\mathcal{E}$ is a finite or countable family of random sets $\{Q\} $ such that $\mathcal{E}_{\omega}=\bigcup_{Q\in \mathcal{Q}}Q(\omega)$ for all $\omega \in \Omega$, and it will be called an open random cover if all $Q\in \mathcal{Q}$ are open random sets.
Set $\mathcal{Q}(\omega)=\{Q(\omega)\}$, $\mathcal{Q}^{(n)}=\bigvee_{i=0}^{n-1}(\Theta^i)^{-1}\mathcal{Q}$ and $\mathcal{Q}^{(n)}(\omega)=\bigvee_{i=0}^{n-1}(T_{\omega}^i)^{-1}\mathcal{Q}(\vartheta^i\omega)$.
Denote by $\mathfrak{P}(\mathcal{E})$ the set of random covers and $\mathfrak{U}(\mathcal{E})$ the set of open random covers. For $\mathcal{R}, \mathcal{Q}\in \mathfrak{P}(\mathcal{E})$, $\mathcal{R}$ is said to be finer than $\mathcal{Q}$, which we will write $\mathcal{R}\succ\mathcal{Q}$ if each element of $\mathcal{R}$ is contained in some element of $\mathcal{Q}$.
For any non-empty set $S\subset \mathcal{E}$ and a random cover $\mathcal{R}\in \mathfrak{P}(\mathcal{E})$,
let $N(S,\mathcal{R})=\min\{\text{card}(\mathcal{U}):\mathcal{U}\subset \mathcal{R}, S\subset \cup_{u\in \mathcal{U}} U\}$ and $N(\emptyset,\mathcal{R} )=1$.
Denote by $N(S,\mathcal{R})(\omega)=\min\{\text{card}(\mathcal{U}(\omega)):\mathcal{U}(\omega)\subset \mathcal{R}(\omega), S(\omega)\subset \cup_{u\in \mathcal{U}} U(\omega)\}$.
Clearly, $N(S,\mathcal{R})(\omega)\leq N(S,\mathcal{R})$ for each $\omega$.
For $\mathcal{R},\mathcal{Q}\in \mathfrak{P}(\mathcal{E})$, let
$N(\mathcal{R}\mid\mathcal{Q})=\max_{Q\in\mathcal{Q}}N(Q, \mathcal{R})$
and
$N(\mathcal{R}\mid\mathcal{Q})(\omega)=\max_{Q\in\mathcal{Q}}N(Q, \mathcal{R})(\omega)$.
\begin{lemma}
Let $\mathcal{R}\in \mathfrak{U}(\mathcal{E})$ and $ \mathcal{Q}\in \mathfrak{P}(\mathcal{E})$. The function $\omega\rightarrow N(\mathcal{R}\mid \mathcal{Q})(\omega)$ is measurable.
\end{lemma}
\begin{proof}
Let $Q\in \mathcal{Q}$ and $\mathcal{R}=\{R_1,\dots,R_l\}$.
For each $\omega,$ there exists a subset $\{j_1,\dots j_k\}$ of $\{1,\dots,l\}$ such that
$Q(\omega)\subset \cup_{i=1}^k R_{j_i}(\omega)$.
Let
\begin{equation*}
\Omega_{j_1,\dots,j_k}=\{ \omega\in \Omega: Q(\omega)\subset \bigcup_{i=1}^k R_{j_i}(\omega)\}.
\end{equation*}
Since $Q$ is a random set and
\begin{equation*}
\Omega_{j_1,\dots,j_k}= \Omega\setminus \{\omega: \big(\mathcal{E}\setminus \bigcup_{i=1}^k R_{j_i}(\omega) \big) \cap Q(\omega)\neq \emptyset \},
\end{equation*}
$\Omega_{j_1,\dots,j_k}$ is a measurable subset of $\Omega$
(see for instance \cite[Theorem II.30]{Castaing}).
One obtain a finite partition of $\Omega$ into measurable sets $\Omega^J$, where $J$ is a finite family of subsets of $\{1,\dots,l\}$ such that
$\Omega^J=\bigcap_{(j_1,\dots,j_k)\in J}\Omega_{j_1,\dots,j_k}$.
Thus
for each $\omega$,
\begin{equation*}
N(Q,\mathcal{R})(\omega)=\min_{(j_1,\dots,j_k)\in J, 1\leq k\leq l} \text{card}\{j_1,\dots,j_k\},
\end{equation*}
and $N(Q,\mathcal{R})(\omega)$ is measurable in $\omega$.
Notice that for each $t\in \mathbb{R}$,
$$
\{\omega:N(\mathcal{R}\mid \mathcal{Q})(\omega)>t\}=\bigcup_{Q\in \mathcal{Q}}\{\omega: N(Q, \mathcal{R})(\omega)>t\}.
$$
The result holds from the measurability of $N(Q,\mathcal{R})(\omega)$ in $\omega$.
\end{proof}
For any $\mathcal{R},\mathcal{Q},\mathcal{U},\mathcal{V}\in \mathfrak{P}(\mathcal{E}) $,
the following inequalities always hold.
\begin{align}
&N(\mathcal{R}\mid \mathcal{Q})(\omega)\leq N(\mathcal{U}\mid \mathcal{V})(\omega), \quad\quad \text{if}\,\,\mathcal{U}\succ\mathcal{R},\, \mathcal{Q}\succ\mathcal{V},\label{n1}\\
&N(\Theta^{-1}\mathcal{R}\mid \Theta^{-1}\mathcal{Q})(\vartheta\omega)
\leq N(\mathcal{R}\mid \mathcal{Q})(\omega),\label{n2}\\
&N( \mathcal{R}\vee\mathcal{Q}\mid \mathcal{U})(\omega)
\leq N(\mathcal{R}\mid \mathcal{U})(\omega)\cdot N(\mathcal{Q}\mid \mathcal{R}\vee \mathcal{U})(\omega),\label{n3}\\
&N( \mathcal{R}\vee\mathcal{Q}\mid \mathcal{U}\vee\mathcal{V})(\omega)\leq
N(\mathcal{R}\mid \mathcal{U})(\omega)\cdot N(\mathcal{Q}\mid \mathcal{V})(\omega).\label{n4}
\end{align}
Let $\mathcal{R}\in \mathfrak{U}(\mathcal{E})$ and $ \mathcal{Q}\in \mathfrak{P}(\mathcal{E})$.
By the inequality \eqref{n2} and \eqref{n3}
it is easy to see that the sequence $\log N(\mathcal{R}^{(n)}\mid \mathcal{Q}^{(n)})(\omega)$ is subadditive for each $\omega$.
By the subadditive ergodic theorem (see \cite{Walters, Kifer}) the following limit
\begin{equation*}
h_{\Theta}(\mathcal{R}\mid \mathcal{Q})(\omega)=\lim_{n\rightarrow \infty}\frac{1}{n}\log N(\mathcal{R}^{(n)}\mid \mathcal{Q}^{(n)})(\omega)
\end{equation*}
$\mathbb{P}-$almost surely (a.s.) exists and
\begin{equation*}
h_{\Theta}(\mathcal{R}\mid \mathcal{Q})
=\lim_{n\rightarrow \infty}\frac{1}{n}\int \log N(\mathcal{R}^{(n)}\mid \mathcal{Q}^{(n)})(\omega)d\mathbb{P}
=\int h_{\Theta}(\mathcal{R}\mid \mathcal{Q})(\omega)d\mathbb{P}.
\end{equation*}
$h_{\Theta}(\mathcal{R}\mid \mathcal{Q})$ will be called {\it relative tail entropy of $\Theta$ on an open random cover $\mathcal{R}$ with respect to a random cover $\mathcal{Q}$ }.
If $\mathcal{Q}$ is a trivial random cover, then $h_{\Theta}(\mathcal{R}\mid \mathcal{Q})$ is called the {\it relative topological entropy $h^{(r)}_{\Theta}(\mathcal{R})$ of $\Theta$ with respect to an open random cover $\mathcal{R}$,} by \eqref{n1},
\begin{equation}\label{n6}
h^{(r)}_{\Theta}(\mathcal{R})\geq h_{\Theta}(\mathcal{R}\mid \mathcal{Q}),
\end{equation}
for all $\mathcal{Q}\in \mathfrak{P}(\mathcal{E})$.
From \eqref{n1}, one can see that
\begin{equation}\label{n5}
h_{\Theta}(\mathcal{R}\mid \mathcal{Q})\leq h_{\Theta}(\mathcal{U}\mid \mathcal{V}) \quad \text{if}\,\,\mathcal{U}\succ\mathcal{R},\, \mathcal{Q}\succ\mathcal{V}.
\end{equation}
Then
there exists a limit (finite or infinite) over the directed set $\mathfrak{U}(\mathcal{E})$,
\begin{equation*}
h(\Theta\mid \mathcal{Q})=\lim_{\mathcal{R}\in \mathfrak{U}(\mathcal{E})}h_{\Theta}(\mathcal{R}\mid \mathcal{Q})=\sup_{\mathcal{R}\in \mathfrak{U}(\mathcal{E})}h_{\Theta}(\mathcal{R}\mid \mathcal{Q}),
\end{equation*}
which will be called the {\it relative tail entropy of $\Theta$ with respect to a random cover $\mathcal{Q}$. }
By the inequality \eqref{n5},
\begin{equation*}
h(\Theta\mid \mathcal{Q}) \leq h(\Theta\mid \mathcal{V}), \quad \text{if} \,\,\mathcal{Q}\succ \mathcal{V},
\end{equation*}
then one can take the limit again
\begin{equation*}
h^*(\Theta) =\lim_{\mathcal{Q}\in \mathfrak{P}(\mathcal{E})} h(\Theta\mid \mathcal{Q})
=\inf_{\mathcal{Q}\in \mathfrak{P}(\mathcal{E})}h(\Theta\mid \mathcal{Q}),
\end{equation*}
which is called the {\it relative tail entropy of $\Theta$}.
It follows from the inequality \eqref{n6} that
$h^{(r)}(\Theta)\geq h^*(\Theta)$.
\begin{remark}\label{remark1}
For each open cover $\xi=\{A_1,\dots,A_k \}$ of the compact space $X$, $\{(\Omega\times A_i)\cap\mathcal{E}\}_{i=1}^k$ naturally form an open random cover of $\mathcal{E}$.
The relative tail entropy related with this kind of random cover is discussed under the name of ``relative conditional entropy" in \cite{Kifer2002}.
\end{remark}
One of our main goals is to establish the following variational inequality, which shows that
the defect of upper semi-continuity of the relative measure-theoretical entropy function cannot exceed the relative tail entropy.
\begin{theorem}\label{prop2}
Let $S\times T$ be the continuous bundle RDS on $\mathcal{H}$ and $m\in \mathcal{I}_{\mathbb{P}}(\mathcal{H})$.
Then $h^*_m(\Gamma\mid \mathcal{D}_{\mathcal{H}})\leq h^*(\Theta)$.
\end{theorem}
\begin{remark}
For the trivial space $(Y,\mathcal{C})$ and the random cover mentioned in Remark \ref{remark1}, the above result reduces to the theorem presented by Kifer and Liu (See \cite[Theorem 1.3.5]{KiferLiu2006})
\end{remark}
We will obtain the following variational principle when we consider the continuous bundle RDS $T\times T$.
\begin{theorem}\label{theo1}
Let $T$ be a continuous bundle RDS on $\mathcal{E}$. Then
$$
\max\{h^*_{\mu}(\Theta^{(2)} \mid \mathcal{A}_{\mathcal{E}^{(2)}}):\mu \in \mathcal{I}_{\mathbb{P}}(\mathcal{E}^{(2)})\}=h^*(\Theta).
$$
\end{theorem}
\begin{definition}
A continuous bundle RDS $T$ is called relatively asymptotically $h$-expansive if the relative tail entropy $h^*(\Theta)=0$.
\end{definition}
\begin{remark}
Theorem \ref{theo1} indicates that the upper semi-continuity of the function $ h_{(\cdot)}(\Theta^{(2)}\mid\mathcal{A}_{\mathcal{E}^{(2)}})$ on $\mathcal{I}_{\mathbb{P}}(\mathcal{E}^{(2)})$ is equivalent to relatively asymptotically $h$-expansiveness of $T$.
Moreover, by Theorem \ref{prop2}, for a continuous bundle RDS $S\times T$ on $\mathcal{H}$ generated by the continuous bundle RDS $T$ and any other continuous bundle RDS $S$, relatively asymptotically $h$-expansiveness of $T$ is also equivalent to the upper semi-continuity of the function
$ h_{(\cdot)}(\Gamma\mid\mathcal{D}_{\mathcal{H}})$ on $\mathcal{I}_{\mathbb{P}}(\mathcal{H})$.
In general, the upper semi-continuity of the usual measure-theoretical entropy does not imply the relatively asymptotically $h$-expansiveness of a random transformation, even in the deterministic case (see \cite[Example 6.4]{Misiurewicz}).
An equivalence condition with respect to the upper semi-continuity of the measure-theoretic entropy is given by making use of the local entropy theory (See \cite[Lemma 6.4]{HuangYi})
\end{remark}
As an application of the variational principle, we will derive the following result.
\begin{theorem}\label{th3}
Let $T, S$ be two continuous bundle RDSs on $\mathcal{E}$ and $\mathcal{G}$, respectively. Suppose that $S$ is a principal extension of $T$ via the factor transformation $\pi$, then $h^*(\Lambda)=h^*(\Theta)$.
\end{theorem}
\begin{remark}
Theorem \ref{th3} shows that the relative tail entropy for random transformations could be conserved by the principal extension. If two continuous bundle RDSs have a common principal extension, they are equivalent in the sense of the principal extension.
\end{remark}
\section{Relative tail entropy and relative entropy}\label{sec3}
we will first give two propositions regarded as the relative tail entropy, which will be needed in the proof of variational inequality later.
\begin{proposition}\label{power}
Let $T$ be a continuous bundle RDS on $\mathcal{E}$, and $\mathcal{Q}$ be a random cover of $\mathcal{E}$. Then for each $m\in \mathbb{N}$,
\begin{equation*}
h(\Theta^m\mid \mathcal{Q}^{(m)})=mh(\Theta\mid \mathcal{Q}),
\end{equation*}
where $\mathcal{Q}^{(m)}= \bigvee_{i=0}^{m-1}(\Theta^i)^{-1}\mathcal{Q}$.
\end{proposition}
\begin{proof}
Let $\mathcal{R}$ be an open random cover of $\mathcal{E}$. Since
\begin{equation*}
\bigvee_{j=0}^{n-1}(\Theta^{mj})^{-1}\big(\bigvee_{i=0}^{m-1}(\Theta^i)^{-1}\mathcal{R} \big)
=\bigvee_{i=0}^{nm-1}(\Theta^i)^{-1}\mathcal{R},
\end{equation*}
Then for each $\omega\in \Omega$,
\begin{equation*}
N(\bigvee_{j=0}^{n-1}(\Theta^{mj})^{-1} \mathcal{R}^{(m)})\mid \bigvee_{j=0}^{n-1}(\Theta^{mj})^{-1} \mathcal{Q}^{(m)})(\omega)=N(\mathcal{R}^{(nm)}\mid \mathcal{Q}^{(nm)})(\omega).
\end{equation*}
By the definition of the relative tail entropy of $\Theta^m$ on open random cover $\mathcal{R}^{(m)}$ with respect to $\mathcal{Q}^{(m)}$,
\begin{align*}
h_{\Theta^m}(\mathcal{R}^{(m)}\mid \mathcal{Q}^{(m)})
&=\lim_{n\rightarrow}\frac{1}{n}\int \log N(\bigvee_{j=0}^{n-1}(\Theta^{mj})^{-1} \mathcal{R}^{(m)})\mid \bigvee_{j=0}^{n-1}(\Theta^{mj})^{-1} \mathcal{Q}^{(m)})(\omega)d\mathbb{P}\\
&=\lim_{n\rightarrow}\frac{1}{n}\int \log
N(\mathcal{R}^{(nm)}\mid \mathcal{Q}^{(nm)})(\omega)
d\mathbb{P}\\
&=\lim_{n\rightarrow}m\frac{1}{nm}\int \log
N(\mathcal{R}^{(nm)}\mid \mathcal{Q}^{(nm)})(\omega)
d\mathbb{P}\\
&=mh_{\Theta}(\mathcal{R}\mid \mathcal{Q}).
\end{align*}
Then
\begin{equation*}
mh(\Theta \mid \mathcal{Q})= \sup_{\mathcal{R}}h_{\Theta^m}(\mathcal{R}^{(m)}\mid \mathcal{Q}^{(m)})\leq h(\Theta^m\mid \mathcal{Q}^{(m)}),
\end{equation*}
where the supremum is taken over all open random covers $\mathcal{R}$ of $\mathcal{E}$.
Since $\mathcal{R}\prec \mathcal{R}^{(m)},$
then by the inequality \eqref{n1},
\begin{equation*}
N(\mathcal{R}^{(m)}\mid \bigvee_{j=0}^{n-1}(\Theta^{mj})^{-1} \mathcal{Q}^{(m)})(\omega)\leq N(\bigvee_{j=0}^{n-1}(\Theta^{mj})^{-1} \mathcal{R}^{(m)})\mid \bigvee_{j=0}^{n-1}(\Theta^{mj})^{-1} \mathcal{Q}^{(m)})(\omega),
\end{equation*}
which implies that
\begin{equation*}
h_{\Theta^m}(\mathcal{R}\mid \mathcal{Q}^{(m)})\leq h_{\Theta^m}(\mathcal{R}^{(m)}\mid \mathcal{Q}^{(m)})= mh_{\Theta}(\mathcal{R}\mid \mathcal{Q}).
\end{equation*}
Thus
$ h(\Theta^m\mid \mathcal{Q}^{(m)})\leq m h(\Theta\mid \mathcal{Q}) $ and the proposition is proved.
\end{proof}
We could deduce from Proposition \ref{power} the following power rule for the relative tail entropy.
\begin{proposition}
Let $T$ be a continuous bundle RDS on $\mathcal{E}$. Then for each $m\in\mathbb{N}$,
\begin{equation*}
h^*(\Theta^m)=mh^*(\Theta).
\end{equation*}
\end{proposition}
\begin{proof}
By Proposition \ref{power},
\begin{equation*}
\inf_{\mathcal{Q}}h(\Theta^m\mid \mathcal{Q}^{(m)})
=\inf_{\mathcal{Q}}m h(\Theta\mid \mathcal{Q})
= mh^*(\Theta),
\end{equation*}
where the infimum is taken over all random covers of $\mathcal{E}$.
Then $h^*(\Theta^m)\leq mh^*(\Theta).$
Since $\mathcal{Q}\prec \mathcal{Q}^{(m)}$, then
$$ h(\Theta^m\mid \mathcal{Q})\geq h(\Theta^m\mid \mathcal{Q}^{(m)})\geq mh^*(\Theta).$$
By taking infimum on the inequality over all random covers of $\mathcal{E}$, one get
$h^*(\Theta^m)\geq mh^*(\Theta)$ and the equality holds.
\end{proof}
Let $\mu\in \mathcal{P}_{\mathbb{P}}(\mathcal{E})$. A partition $\mathcal{P}$ is called $\delta-$contains a partition $\mathcal{Q}$ if there exists a partition $\mathcal{R}\preceq \mathcal{P}$ such that $\inf\sum_i \mu(R^*_i\triangle Q^*_i)<\delta$, where the infimum is taken over all ordered partitions $\mathcal{R}^*, \mathcal{Q}^*$ obtained from $\mathcal{R}$ and $\mathcal{Q}$.
The following lemma essentially comes from the argument of Theorem 4.18 in \cite{Smorodinsky} and Lemma 4.15 in \cite{Walters}.
\begin{lemma}\label{lem415}
Given $\varepsilonsilon>0$ and $k\in\mathbb{N}$. There exists $\delta=\delta(\varepsilonsilon,k)>0$ such that if the measurable partition $\mathcal{P}$ $\delta-$contains $\mathcal{Q}$, where $\mathcal{Q}$ is a finite measurable partition with $k$ elements, then $H_{\mu}(\mathcal{Q}\mid \mathcal{P})<\varepsilonsilon.$
\end{lemma}
\begin{proof}
Let $\varepsilonsilon>0$. Choose $0<\delta<\frac{1}{e}$ such that $-\delta\log \delta+(1-\delta)\log(1-\delta)+\delta \log k<\varepsilonsilon$.
Suppose that $\mathcal{R}\preceq \mathcal{P}$ is the partition with $\sum_{i}\mu(R_i\triangle Q_i)<\delta$. One can construct a partition $\mathcal{S}$ by $S_0=\bigcup_i(R_i\cap Q_i)$ and $S_i=Q_i\setminus S_0$.
Since $\mathcal{R}\vee\mathcal{Q}=\mathcal{R}\vee\mathcal{S}$, and
\begin{equation*}
H_{\mu}(\mathcal{R})+H_{\mu}(\mathcal{Q}\mid \mathcal{R})=H_{\mu}(\mathcal{R}\vee \mathcal{Q})=H_{\mu}(\mathcal{R}\vee \mathcal{S})\leq H_{\mu}(\mathcal{S})+H_{\mu}(\mathcal{R}).
\end{equation*}
Then
\begin{equation*}
H_{\mu}(\mathcal{Q}\mid \mathcal{R})\leq H_{\mu}(\mathcal{S})\leq -\delta\log \delta+(1-\delta)\log(1-\delta)+\delta \log k<\varepsilonsilon,
\end{equation*}
and $H_{\mu}(\mathcal{Q}\mid \mathcal{P})<H_{\mu}(\mathcal{Q}\mid \mathcal{R})<\varepsilonsilon$.
\end{proof}
\begin{remark}
We discuss here the conditional entropy instead of the usual measure-theoretic entropy in \cite{Smorodinsky}.
The result does not require that the two partitions have the same cardinality, which is a little different from Lemma 4.15 in \cite{Walters}.
\end{remark}
\begin{lemma}\label{Fact666}
Let $\mu^{(i)} \in \mathcal{P}_{\mathbb{P}}(\mathcal{E}), i\in \mathbb{N}$ and $\delta=\delta(\omega)$ be a positive random variable on $\Omega$. There exists a finite measurable partition $\mathcal{R}=\{R \}$ of $\mathcal{E}$ such that $\text{diam}\, R (\omega) \leq \delta(\omega)$ $\mathbb{P}-$a.s. and $\mu^{(i)}(\partial R )=0$ for each $i\in \mathbb{N}, R\in \mathcal{R} $ in the sense of $\mu^{(i)}(\partial R )=\int\mu_{\omega}^{(i)}(\partial R (\omega))d {\mathbb{P}(\omega)}$, where $\partial$ denotes the boundary.
\end{lemma}
\begin{proof}
Since $(\Omega,\mathcal{F},\mathbb{P})$ is a Lebesgue space, it can be viewed as a Borel subset of the unit interval $[0,1]$, and $\mu^{(i)}, i\in \mathbb{N}$ are also probability measures on the compact space $[0,1]\times X$ with the marginal $\mathbb{P}$ on $[0,1]$.
Fix a point $(t,x)\in [0,1]\times X$ and $\mu^{(i)}\in \mathcal{P}_{\mathbb{P}}(\mathcal{E})$. For each nonrandom $\varepsilonsilon >0$,
$\partial B((t,x),\varepsilonsilon)\subset \overline{B}((t,x),\varepsilonsilon)\setminus \text{int} B((t,x),\varepsilonsilon)$, where $B((t,x),\varepsilonsilon)$ is the open ball of center at $(t,x)$ and radius $\varepsilonsilon$ with the product metric $d=(d_1^2+d_2^2)^{\frac{1}{2}}$ on $[0,1]\times X$ and $\overline{B}$ denotes the closure of $B$. If $\varepsilonsilon_1\neq \varepsilonsilon_2$,
$\big(\overline{B}((t,x),\varepsilonsilon_1)\setminus \text{int} B((t,x),\varepsilonsilon_1)\big)\cap \big(\overline{B}((t,x),\varepsilonsilon_2)\setminus \text{int} B((t,x),\varepsilonsilon_2)\big)=\emptyset$. Then there exists only at most countably many of $\varepsilonsilon_j $ such that $\mu^{(i)}(\overline{B}((t,x),\varepsilonsilon_j)\setminus \text{int} B((t,x),\varepsilonsilon_j))>0, j=1, 2, \dots$. It follows that for each $(t,x)\in [0,1]\times X$ one could choose some positive real number $\gamma(t)<\frac{\delta(t)}{2}$ such that for all $i\in \mathbb{N}$,
$\mu^{(i)}(\overline{B}((t,x),\gamma(t))\setminus \text{int} B((t,x),\gamma(t)))=0$ and then $\mu^{(i)}(\partial B((t,x),\gamma(t)))=0$.
By the compactness of $[0,1]\times X$, there exists an open cover of $[0,1]\times X$ by finite many open ball $B_1, \dots, B_k$ with the diameter of the $t-$section of $B_j$ $\text{diam}\, B_j(t)<\delta(t)$ and
$\mu^{(i)}(\partial B_j)=\int \mu^{(i)}_{\omega}\partial B_j(\omega)d\mathbb{P}=0$ for each $1\leq j\leq k$ and $i\in \mathbb{N}$. Then $B_1, B_2\setminus B_1, \dots, B_k\setminus \cup_{j=1}^{k-1}B_j$ forms a measurable partition of $[0,1]\times X$. Let $A_1=B_1$, and $A_n=B_n\setminus \cup_{j=1}^{n-1}B_j$ for each $1\leq n\leq k$ and denote by $\xi=\{A_1,\dots, A_k\}$. Then $\xi$ is a measurable partition of $[0,1]\times X$ and for each $1\leq j\leq k$, $\text{diam}\, A_j(t)<\delta (t)$ for each $t\in [0,1]$. Since $\partial A_n\subset \cup_{j=1}^n\partial B_j$, then $\mu^{(i)}(\partial A_j)=0$, for each $1\leq j\leq k$ and $i\in \mathbb{N}$.
Let $R_j=A_j\cap \mathcal{E}$ and $R_j(\omega)=\{x:(\omega,x)\in R_j\}$. Notice that the marginal $\mathbb{P}$ is supported on $\Omega$, then $\mu^{(i)}(\partial R_j)=0$ and $\mathcal{R}=\{R_1,\dots,R_k\}$ is the measurable partition as desired.
\end{proof}
Let $T$ and $S$ be two continuous bundle RDSs on $\mathcal{E}$ and $\mathcal{G}$, respectively.
Let $T$ be a factor of $S$ via the factor transformation $\pi$.
The transformation $\pi$ induce a map, which is again denoted by $\pi$, from $\mathcal{P}_{\mathbb{P}}(\mathcal{G})$ to $\mathcal{P}_{\mathbb{P}}(\mathcal{E})$ by $\pi\mu=\mu\pi^{-1}$. This induced map $\pi$ transports every measure on $ \mathcal{G} $ to a measure $\pi\mu $ in $\mathcal{P}_{\mathbb{P}}(\mathcal{E})$. The following proposition is classical in the deterministic dynamical system \cite{Denker1976}.
\begin{proposition}\label{propm1}
The induced map $\pi$ is a continuous affine map from $\mathcal{P}_{\mathbb{P}}(\mathcal{G}) $ onto $\mathcal{P}_{\mathbb{P}}(\mathcal{E})$.
\end{proposition}
\begin{proof}
If $\mu_n \rightarrow \mu\in \mathcal{P}_{\mathbb{P}}(\mathcal{G})$,
then $\int f d\mu_n \rightarrow \int f d\mu$ for all $f \in \mathcal{C}(\mathcal{G})$, where $\mathcal{C}(\mathcal{G})$ is the set of random continuous functions on $\mathcal{G}$ (see \cite{Crauel}), and therefore $\int g\circ\pi d\mu_n\rightarrow \int g\circ\pi d\mu$ for all $g\in \mathcal{C}(\mathcal{G})$ by the measurability of the factor transformation $\pi$. This implies $\pi\mu_n\rightarrow \pi\mu$.
It is clear that $\pi(\alpha\mu+(1-\alpha)\nu)=\alpha\pi\mu+(1-\alpha)\pi\nu$ for all $\mu,\nu\in \mathcal{P}_{\mathbb{P}}(\mathcal{G})$ and $0\leq \alpha\leq 1$.
Since $\mathcal{P}_{\mathbb{P}}(\mathcal{G})$ is a compact and convex subset of $\mathcal{P}_{\mathbb{P}}(\Omega\times Y)$ (see \cite[Section 1.5]{Arnold}), and the ergodic measures on $\mathcal{G}$ are just the point measures if we take the identity transformation on $\mathcal{G}$, {\it i.e.}, $Id(\omega,x)=(\omega,x)$. Then by the Krein-Millman theorem (see \cite[P440]{Dunford}), the convex combinations of point measures are dense in $\mathcal{P}_{\mathbb{P}}(\mathcal{G})$. it follows that $\pi:\mathcal{G}\rightarrow \mathcal{E} $ is onto that $\pi:\mathcal{P}_{\mathbb{P}}(\mathcal{G})\rightarrow \mathcal{P}_{\mathbb{P}}(\mathcal{E})$ is onto.
\end{proof}
It is not hard to verify that the induced map $\pi$ send a $\Lambda$-invariant measure in $\mathcal{I}_{\mathbb{P}}(\mathcal{G})$ to a $\Theta$-invariant measure in $\mathcal{I}_{\mathbb{P}}(\mathcal{E})$. The following result shows that this map is also surjective.
\begin{proposition}\label{prop33}
Let $\mu\in \mathcal{I}_{\mathbb{P}}(\mathcal{E})$. There exists $m\in \mathcal{I}_{\mathbb{P}}(\mathcal{G})$ with $\pi m=\mu$
\end{proposition}
\begin{proof}
By Proposition \ref{propm1}, there exists a $\nu\in \mathcal{P}_{\mathbb{P}}(\mathcal{G})$ such that $\pi\nu=\mu$. Since $\Theta\mu=\mu$ and $\pi\Lambda=\Theta \pi$, one has $\pi(\Lambda\nu)=\mu$, and more generally, $\pi(\Lambda^n\nu)=mu$. By the affinity of $\pi$, $\pi(\frac{1}{n}\sum_{i=0}^{n-1}\Lambda^i\nu)=\mu$. Denote by $\nu^{(n)}=\frac{1}{n}\sum_{i=0}^{n-1}\Lambda^i\nu$ and let $m$ be one limit point of the sequence $\nu^{(n)}$, It follows from Theorem 1.5.8 in \cite{Arnold} that $m\in \mathcal{I}_{\mathbb{P}}(\mathcal{G})$. Since $\pi$ is continuous, then $\pi m=\mu$.
\end{proof}
We need the following lemma (see \cite[Section 14.3]{Glasner}) which follows from the martingale convergence theorem.
\begin{lemma}\label{lem222}
Let $\mu\in \mathcal{P}_{\mathbb{P}}(\mathcal{G})$, $\mathcal{R}=\{R_1,\dots,R_k\}$ be a finite measurable partition of $\mathcal{G}$ with $H_{\mu}(\mathcal{R})<\infty$ and $\mathcal{A}_1\prec\cdots\prec\mathcal{A}_n\prec\cdots$ be an increasing sequence of sub-$\sigma$-algebra of $\mathcal{A}$ with $\bigvee_{n=1}^{\infty}=\mathcal{A}$. Then
\begin{equation*}
H_{\mu}(\mathcal{R}\mid\mathcal{A}_{\mathcal{G}})
=\lim_{n\rightarrow\infty}H_{\mu}(\mathcal{R}\mid\mathcal{A}_n)
=\inf_n H_{\mu}(\mathcal{R}\mid\mathcal{A}_n).
\end{equation*}
\end{lemma}
The following result is a relative version of Lemma 6.6.7 in \cite{Down2011}. Similar results for random transformations could be found in \cite{LedWal,Kifer2001}
\begin{lemma}\label{lem22}
Let $(\mathcal{E},\Theta)$ be a factor of $(\mathcal{G},\Lambda)$ via a factor transformation $\pi$, $m\in \mathcal{P}_{\mathbb{P}}(\mathcal{G})$ and $\mathcal{R}=\{R\}$ be a finite measurable partition of $\mathcal{G}$ with $m(\partial R)=0$, where $\partial$ denotes the boundary and $m(\partial R)=\int m_{\omega}(\partial R(\omega))d\mathbb{P}$.
Then
\begin{enumerate}
\item[(i)]
$m$ is a supper semi-continuity point of the function $\mu\rightarrow H_{\mu}(\mathcal{R}\mid \mathcal{A}_{\mathcal{G}})$ defined on $\mathcal{P}_{\mathbb{P}}(\mathcal{G})$, {\it i.e.},
$$
\limsup_{\mu\rightarrow m}H_{\mu}(\mathcal{R}\mid \mathcal{A}_{\mathcal{G}})\leq H_m(\mathcal{R}\mid \mathcal{A}_{\mathcal{G}}).
$$
\item[(ii)]
If $m\in \mathcal{I}_{\mathbb{P}}(\mathcal{G})$, the function $\mu\rightarrow h_{\mu}(\mathcal{R}\mid \mathcal{A}_{\mathcal{G}})$ defined on $\mathcal{I}_{\mathbb{P}}(\mathcal{G})$ is upper semi-continuous at $m$, {\it i.e.},
$$
\limsup_{\mu\rightarrow m}h_{\mu}(\mathcal{R}\mid \mathcal{A}_{\mathcal{G}})\leq h_m(\mathcal{R}\mid \mathcal{A}_{\mathcal{G}}).
$$
\end{enumerate}
\end{lemma}
\begin{proof}
(i) For $R\in\mathcal{R}$ with $R(\omega)=\{x: (\omega,x)\in R\}$. Let $\overline{R}=\{(\omega,x):x\in \overline{R(\omega)}\}$ and $\underline{R}=\{(\omega,x):x\in \text{int}( R(\omega))\}$, where $\overline{R(\omega)}$ and $\text{int}( R(\omega))$ denotes the closure and the interior of $R(\omega)$, respectively.
Then $\overline{R}$ is a closed random set of $\mathcal{G}$ and $\underline{R}$ is an open random set. By Portmenteau theorem (see \cite{Crauel}),
\begin{equation*}
m(\overline{R})\geq \limsup_{\mu\rightarrow m}\mu(\overline{R})\geq \limsup_{\mu\rightarrow m}\mu(R)
\geq \liminf_{\mu\rightarrow m}\mu(R)\geq \liminf_{\mu\rightarrow m}\mu(\underline{R})\geq m(\underline{R}).
\end{equation*}
Since $m(\overline{R})=m(R)=m(\underline{R})$ by $m(\partial R)=0$, then
$\mu\rightarrow \mu(R)$ defined on $\mathcal{P}_{\mathbb{P}}(\mathcal{G})$ is continuous at $m$.
Recall that the function $t\rightarrow -t\log t$ is continuous on $[0,1]$. Then
$\mu\rightarrow H_{\mu}(\mathcal{R})$ is also continuous at $m$ on $\mathcal{P}_{\mathbb{P}}(\mathcal{G})$.
Moreover, if $\mathcal{Q}=\{Q\}$ is a measurable partition of $\mathcal{G}$ with $m(\partial Q)=0$ for each $Q\in \mathcal{Q}$, then the conditional entropy $\mu\rightarrow H_{\mu}(\mathcal{R}\mid \mathcal{Q})$ of the partition $\mathcal{R}$ over $\mathcal{Q}$ is continuous at $m$.
Let $\nu=\pi m$. By Lemma \ref{Fact666}, there exists a refining sequence of finite measurable partitions $\mathcal{Q}_k=\{Q_{k_i}\}$ of $\mathcal{E}$ satisfying $\nu(\partial Q_{k_i})=0$, for each $Q_{k_i}\in \mathcal{Q}_k$, $k=1,2,\dots$. Then $\{\pi^{-1}\mathcal{Q}_k\}$ is a refining sequence of measurable partitions of $\mathcal{G}$, all having the boundary of measure zero at $m$.
It follows that for each $k\in \mathbb{N}$, the function $\mu\rightarrow H_{\mu}(\mathcal{R}\mid \pi^{-1}\mathcal{Q}_k)$ is continuous at $m$. Notice that $\bigvee_{k=1}^{\infty}\mathcal{Q}_k=\mathcal{A}$ and $H_{\mu}(\mathcal{R}\mid \pi^{-1}\mathcal{Q}_k)$ decrease in $k$.
Thus the function $\mu\rightarrow \inf_k H_{\mu}(\mathcal{R}\mid \pi^{-1}\mathcal{Q}_k)$ is upper semi-continuous at $m$ and the property (i) follows from Lemma \ref{lem222}.
(ii) Let $n\in \mathbb{N}$. Since the function $\mu\rightarrow \frac{1}{n}H_{\mu}(\mathcal{R}^{(n)}\mid \pi^{-1}\mathcal{Q}_k)$ is also continuous at $m$ for each $k=1,2,
\dots,$ where $\mathcal{R}^{(n)}=\bigvee_{i=0}^{n-1}(\Lambda^i)^{-1}\mathcal{R}$, then the function
$\mu\rightarrow \inf_k\frac{1}{n}H_{\mu}(\mathcal{R}^{(n)}\mid \pi^{-1}\mathcal{Q}_k)=\frac{1}{n}H_{\mu}(\mathcal{R}^{(n)}\mid \mathcal{A}_{\mathcal{G}})$ is upper semi-continuous at $m$. Therefore the function $\mu\rightarrow \inf_n\frac{1}{n}H_{\mu}(\mathcal{R}^{(n)}\mid \mathcal{A}_{\mathcal{G}})=h_{\mu}(\mathcal{R}\mid \mathcal{A}_{\mathcal{G}})$ is upper semi-continuous at $m$ and the property (ii) holds.
\end{proof}
We need the following lemma which shows the basic connection between the relative entropy and relative tail entropy.
\begin{lemma}\label{lemlog}
Let $S$ be a continuous bundle RDS on $\mathcal{G}$. Suppose that $\mathcal{R}=\{R\} , \mathcal{Q}=\{Q\}$ are two finite measurable partitions of $\mathcal{G}$ and $\mu\in \mathcal{P}_{\mathbb{P}}(\mathcal{G})$, then
\begin{equation*}
H_{\mu}(\mathcal{R}\mid \mathfrak{Q}\vee \mathcal{F}_{\mathcal{G}})\leq
\int \log N(\mathcal{R}\mid \mathcal{Q})(\omega)d\mathbb{P},
\end{equation*}
where $\mathfrak{Q}$ is the sub-$\sigma$-algebra generated by the partition $\mathcal{Q}$ and $\mathcal{F}_{\mathcal{G}}=\{(F\times Y)\cap \mathcal{G}:F\in \mathcal{F} \}$.
\end{lemma}
\begin{proof}
A simple calculation (see \cite[Section 14.2]{Glasner}) shows that
\begin{equation*}
E(1_R\mid \mathfrak{Q}\vee \mathcal{F}_{\mathcal{G}})
=\sum_{Q\in \mathcal{Q}}1_Q\frac{E(1_{R\cap Q}\mid \mathcal{F}_{\mathcal{G}} )}{E(1_Q\mid \mathcal{F}_{\mathcal{G}})}.
\end{equation*}
Then
\begin{align*}
H_{\mu}(\mathcal{R}\mid \mathfrak{Q}\vee \mathcal{F}_{\mathcal{G}})
&=\int \sum_{R\in \mathcal{R}}-1_R\log E(1_R\mid \mathfrak{Q}\vee \mathcal{F}_{\mathcal{G}})d\mu\\
&=\int \sum_{R\in \mathcal{R}}-1_R\log \sum_{Q\in \mathcal{Q}}1_Q\frac{E(1_{R\cap Q}\mid \mathcal{F}_{\mathcal{G}} )}{E(1_Q\mid \mathcal{F}_{\mathcal{G}})}d\mu\\
&=\int \sum_{R\in \mathcal{R}}-1_R \sum_{Q\in \mathcal{Q}}1_Q\log\frac{E(1_{R\cap Q}\mid \mathcal{F}_{\mathcal{G}} )}{E(1_Q\mid \mathcal{F}_{\mathcal{G}})}d\mu\\
&=\int \sum_{R\in \mathcal{R}}\sum_{Q\in \mathcal{Q}}-1_{R\cap Q}\log\frac{E(1_{R\cap Q}\mid \mathcal{F}_{\mathcal{G}} )}{E(1_Q\mid \mathcal{F}_{\mathcal{G}})}d\mu\\
&=\int\sum_{R\in \mathcal{R}}\sum_{Q\in \mathcal{Q}}- E(1_{R\cap Q}
\log\frac{E(1_{R\cap Q}\mid \mathcal{F}_{\mathcal{G}} )}{E(1_Q\mid \mathcal{F}_{\mathcal{G}})} \mid \mathcal{F}_{\mathcal{G}})d\mu\\
&=\int\sum_{R\in \mathcal{R}}\sum_{Q\in \mathcal{Q}}- E(1_{R\cap Q}\mid\mathcal{F}_{\mathcal{G}})
\log\frac{E(1_{R\cap Q}\mid \mathcal{F}_{\mathcal{G}} )}{E(1_Q\mid \mathcal{F}_{\mathcal{G}})}d\mu
\end{align*}
Since $\mu$ could disintegrate $d\mu(\omega,y)=d\mu_{\omega}(y)d\mathbb{P}(\omega)$,
$E(1_{R\cap Q}\mid\mathcal{F}_{\mathcal{G}})=\mu_{\omega}((R\cap Q)(\omega))$ and $E(1_Q\mid \mathcal{F}_{\mathcal{G}})=\mu_{\omega}(Q(\omega))$ $\mathbb{P}-$a.s., then
\begin{align*}
H_{\mu}(\mathcal{R}\mid \mathfrak{Q}\vee \mathcal{F}_{\mathcal{G}})
&=\int\sum_{R\in \mathcal{R}}\sum_{Q\in \mathcal{Q}}- \mu_{\omega}((R\cap Q)(\omega))\log\frac{\mu_{\omega}((R\cap Q)(\omega))}{\mu_{\omega}(Q(\omega))}d\mathbb{P}\\
&=\int\sum_{Q\in \mathcal{Q}}\mu_{\omega}(Q(\omega))
\big(
-\sum_{R\in \mathcal{R}} \frac{\mu_{\omega}((R\cap Q)(\omega))}{\mu_{\omega}(Q(\omega))}\log\frac{\mu_{\omega}((R\cap Q)(\omega))}{\mu_{\omega}(Q(\omega))}\big)d\mathbb{P}\\
\end{align*}
Notice that
\begin{equation*}
-\sum_{R\in \mathcal{R}} \frac{\mu_{\omega}((R\cap Q)(\omega))}{\mu_{\omega}(Q(\omega))}\log\frac{\mu_{\omega}((R\cap Q)(\omega))}{\mu_{\omega}(Q(\omega))}\big)\leq \log N(Q, \mathcal{R})(\omega).
\end{equation*}
Thus
\begin{align*}
H_{\mu}(\mathcal{R}\mid \mathfrak{Q}\vee \mathcal{F}_{\mathcal{G}})
\leq \int \sum_{Q\in \mathcal{Q}}\mu_{\omega}(Q(\omega))\log N(Q, \mathcal{R})(\omega)d\mathbb{P}\leq \int \log N(\mathcal{R}\mid\mathcal{Q})(\omega)d\mathbb{P}.
\end{align*}
\end{proof}
\begin{remark}
When we consider the relative entropy $H_{\mu}(\mathcal{R}\mid \mathfrak{Q})$ with respect to two measurable partitions $\mathcal{R}$ and $\mathcal{Q}$, it is not hard to see that $H_{\mu}(\mathcal{R}\mid \mathfrak{Q})\leq N(\mathcal{R}\mid\mathcal{Q})$, which is similar to the case in the deterministic system. Moreover, the iteration of the random transformation is not necessary in this lemma, though we assume that the condition is in the environment of random dynamical systems.
\end{remark}
\section{Variational principle for relative tail entropy }\label{sec4}
We now take up the consideration of the relationship between the relative entropy and relative tail entropy on the measurable subset $\mathcal{H}$ of $\Omega\times Y\times X$ with respect to the product $\sigma-$algebra $\mathcal{F}\times\mathcal{C}\times \mathcal{B}$.
The following result follows from Lemma \ref{lem22} directly.
\begin{lemma}\label{lem2}
Let $\mathcal{R}=\{R_1,\dots, R_k\}$ be a finite measurable partition of $\mathcal{H}$. Given $m\in \mathcal{P}_{\mathbb{P}}(\mathcal{H})$ satisfying $m(\partial R_i)=0$ for each $1\leq i\leq k$, then
$m$ is a upper semi-continuity point of the function $\mu\rightarrow H_{\mu}(\mathcal{R}\mid \mathcal{D}_{\mathcal{H}})$ defined on $ \mathcal{P}_{\mathbb{P}}(\mathcal{H})$, {i.e.},
\begin{equation*}
\limsup_{\mu\rightarrow m}H_{\mu}(\mathcal{R}\mid \mathcal{D}_{\mathcal{H}})\leq H_m(\mathcal{R}\mid \mathcal{D}_{\mathcal{H}}).
\end{equation*}
\end{lemma}
\begin{lemma}\label{lem3}
Let $S\times T$ be the continuous bundle RDSs on $\mathcal{H}$ and $\mu\in\mathcal{P}_{\mathbb{P}}(\mathcal{H})$.
Suppose that $\mathcal{R}, \mathcal{Q}$ are two finite measurable partitions of $\mathcal{H}$, Then
\begin{equation*}
H_{\mu}(\mathcal{R}\mid \mathcal{D}_{\mathcal{H}})
\leq H_{\mu}(\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}}) +
\int\log N(\mathcal{R}\mid\mathcal{Q})(\omega)d\mathbb{P}.
\end{equation*}
\end{lemma}
\begin{proof}
Since $\mathcal{F}_{\mathcal{H}}$ is a sub-$\sigma$-algebra of $\mathcal{D}_{\mathcal{H}}$, then
$\mathcal{D}_{\mathcal{H}}\vee\mathcal{F}_{\mathcal{H}}=\mathcal{D}_{\mathcal{H}}$.
Let $\mathfrak{Q} $ be the sub-$\sigma$-algebra generated by the partition $\mathcal{Q}$.
By Lemma \ref{lemlog},
\begin{align*}
H_{\mu}(\mathcal{R}\mid \mathcal{D}_{\mathcal{H}})
&=H_{\mu}(\mathcal{R}\mid \mathcal{D}_{\mathcal{H}}\vee\mathcal{F}_{\mathcal{H}})\\
&\leq H_{\mu}(\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}}\vee\mathcal{F}_{\mathcal{H}})
+H_{\mu}(\mathcal{R}\mid \mathfrak{Q} \vee \mathcal{D}_{\mathcal{H}}\vee\mathcal{F}_{\mathcal{H}})
\\
&\leq H_{\mu}(\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}}\vee\mathcal{F}_{\mathcal{H}})
+H_{\mu}(\mathcal{R}\mid \mathfrak{Q} \vee\mathcal{F}_{\mathcal{H}})
\\
&\leq H_{\mu}(\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}})+
\int \log N(\mathcal{R}\mid\mathcal{Q})(\omega)d\mathbb{P},
\end{align*}
and the result holds.
\end{proof}
\begin{proposition}\label{prop1}
Let $S\times T$ be the continuous bundle RDS on $\mathcal{H}$ and $\mu\in\mathcal{I}_{\mathbb{P}}(\mathcal{H})$. Then for each finite measurable partition $\mathcal{Q}$ of $\mathcal{E}$,
$$h_{\mu}(\Gamma\mid\mathcal{D}_{\mathcal{H}} )\leq h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}})+h(\Theta\mid\mathcal{Q}).$$
\end{proposition}
\begin{proof}
Let $\mathcal{R}=\{R_1,\dots,R_k\}$ be a measurable partition of $\mathcal{E}$ and $\nu=\pi_{\mathcal{E}}\mu$.
Recall that $(\Omega,\mathcal{F},\mathbb{P})$ can be view as a Borel subset of the unit interval $[0,1]$. Then $\nu\in \mathcal{P}_{\mathbb{P}}(\mathcal{E})$ is also a probability measure on the compact space $[0,1]\times X$ with the marginal $\mathbb{P}$ on $[0,1]$. Let $\varepsilonsilon >0$ and $\delta>0$ as desired in Lemma \ref{lem415}. Since $\nu$ is regular, there exists a compact subset $P_i\subset R_i$ with $\nu(R_i\setminus P_i)<\frac{\delta}{2k}$ for each $1\leq i\leq k$.
Denote by
$P_0=\mathcal{E}\setminus \bigcup_{i=1}^kP_i$. Then $\mathcal{P}=\{P_0,P_1,\dots, P_k\}$ is a measurable partition of $\mathcal{E}$ and
$\sum_{i=1}^k\nu(R_i\setminus P_i)+\nu (P_0)< \frac{\delta}{2k}\cdot k+\frac{\delta}{2}=\delta.$
By Lemma \ref{lem415}, $H_{\nu}(\mathcal{R}\mid \mathcal{P})<\varepsilonsilon.$
Let $U_i=P_0\cup P_i$. Since for each $\omega\in \Omega$, $(P_0\cup P_i)\omega$ is an open subset of $\mathcal{E}_{\omega}$, then $\mathcal{U}=\{U_1,\dots, U_k\}$ is an open random cover of $\mathcal{E}$,
and $N(\mathcal{P}\mid \mathcal{U})(\omega)\leq N(\mathcal{P}\mid \mathcal{U})\leq 2.$
Then
\begin{align*}
h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{R}\mid \mathcal{D}_{\mathcal{H}})
&\leq h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{P}\mid \mathcal{D}_{\mathcal{H}})
+H_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{R}\mid \pi_{\mathcal{E}}^{-1}\mathcal{P})\\
&= h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{P}\mid \mathcal{D}_{\mathcal{H}})
+ H_{\nu}(\mathcal{R}\mid \mathcal{P})\\
&\leq h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{P}\mid \mathcal{D}_{\mathcal{H}})
+ \varepsilonsilon.
\end{align*}
By Lemma \ref{lem3}, one has
\begin{equation*}
H_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{P}\mid \mathcal{D}_{\mathcal{H}})
\leq H_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}})
+\int \log N(\mathcal{P}\mid \mathcal{Q})(\omega)d\mathbb{P}.
\end{equation*}
Notice that for each $\omega\in \Omega$,
$N(\mathcal{P}\mid \mathcal{Q})(\omega)\leq N(\mathcal{U}\mid \mathcal{Q})(\omega)\cdot N(\mathcal{P}\mid \mathcal{U})(\omega) $.
Then
\begin{align*}
H_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{P}\mid \mathcal{D}_{\mathcal{H}})
\leq H_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}})
+\int \log &N(\mathcal{U}\mid \mathcal{Q})(\omega)d\mathbb{P}\\
&+\int\log N(\mathcal{P} \mid \mathcal{U})(\omega)d\mathbb{P}.
\end{align*}
Applying the above result to $\bigvee_{i=0}^{n-1}(\Theta^i)^{-1}\mathcal{P}, \bigvee_{i=0}^{n-1}(\Theta^i)^{-1}\mathcal{Q},$ and $\bigvee_{i=0}^{n-1}(\Theta^i)^{-1}\mathcal{U}, $
dividing by $n$ and letting $n\rightarrow \infty$, one obtain
\begin{align*}
h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{P}\mid \mathcal{D}_{\mathcal{H}})
\leq h_{\mu}(&\pi_{\mathcal{E}}^{-1}\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}})
+h_{\Theta}(\mathcal{U}\mid \mathcal{Q})\\
&+ \lim_{n\rightarrow\infty}\frac{1}{n}\int \log N\big(\bigvee_{i=0}^{n-1}(\Theta^i)^{-1}\mathcal{P}\mid \bigvee_{i=0}^{n-1}(\Theta^i)^{-1}\mathcal{U}\big)d\mathbb{P}.
\end{align*}
Observe that
\begin{equation*}
\lim_{n\rightarrow\infty}\frac{1}{n}\int \log N\big(\bigvee_{i=0}^{n-1}(\Theta^i)^{-1}\mathcal{P}\mid \bigvee_{i=0}^{n-1}(\Theta^i)^{-1}\mathcal{U}\big)d\mathbb{P}\leq \log 2,
\end{equation*}
and $h_{\Theta}(\mathcal{U}\mid \mathcal{Q})\leq h(\Theta\mid \mathcal{Q})$,
then
\begin{align*}
h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{R}\mid \mathcal{D}_{\mathcal{H}})
&\leq h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{P}\mid \mathcal{D}_{\mathcal{H}})+H_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{R}\mid \pi_{\mathcal{E}}^{-1}\mathcal{P})\\
&\leq h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}})
+h(\Theta\mid \mathcal{Q})+\log 2+\varepsilonsilon.
\end{align*}
Let $\mathcal{R}_1\prec\cdots\prec\mathcal{R}_n\prec \cdots$ be an increasing sequence of finite measurable partitions with $\bigvee_{i=1}^{\infty}\mathcal{R}_n=\mathcal{A}$, by Lemma 1.6 in \cite{Kifer}, one has
\begin{equation}\label{ineq2}
h_{\mu}(\Gamma\mid\mathcal{D}_{\mathcal{H}})\leq h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}})
+h(\Theta\mid \mathcal{Q})+\log 2+\varepsilonsilon.
\end{equation}
Since
\begin{equation*}
H_{\mu}\big(
\bigvee_{j=0}^{n-1}(\Gamma^{mj})^{-1}(\bigvee_{i=0}^{m-1}(\Gamma^i)^{-1}\pi^{-1}_{\mathcal{E}}\mathcal{Q})
\mid \mathcal{D}_{\mathcal{H}}
\big)
=H_{\mu}\big(
\bigvee_{i=0}^{nm-1}(\Gamma^i)^{-1}\pi^{-1}_{\mathcal{E}}\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}}
\big).
\end{equation*}
It is not hard to see that
\begin{equation}\label{eq2}
h_{\mu,\Gamma^{^m}}(\bigvee_{i=0}^{m-1}(\Gamma^i)^{-1}\pi^{-1}_{\mathcal{E}}\mathcal{Q}
\mid \mathcal{D}_{\mathcal{H}})
=mh_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}} ),
\end{equation}
where $h_{\mu,\Gamma^{^m}}(\xi\mid \mathcal{D}_{\mathcal{H}})$ denotes the relative entropy of $\Gamma^m $ with respect to the partition $\xi$.
By Lemma 1.4 in \cite{Kifer}, for each $m\in \mathbb{N}$,
\begin{equation}\label{eq3}
h_{\mu}(\Gamma^m\mid \mathcal{D}_{\mathcal{H}})=mh_{\mu}(\Gamma\mid \mathcal{D}_{\mathcal{H}}),
\end{equation}
where $h_{\mu}(\Gamma^m\mid \mathcal{D}_{\mathcal{H}})$ is the relative entropy of $\Gamma^m$.
By the equality \eqref{eq2}, \eqref{eq3} and Proposition \ref{power}, and applying $\Gamma^m$, $\Theta^m$ and $\bigvee_{i=0}^{m-1}(\Theta^i)^{-1}\mathcal{Q}$ to the inequality \eqref{ineq2}, dividing by $m$ and letting $m$ go to infinity, one has
\begin{equation*}
h_{\mu}(\Gamma\mid\mathcal{D}_{\mathcal{H}})\leq h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{Q}\mid \mathcal{D}_{\mathcal{H}}) +h(\Theta\mid \mathcal{Q}),
\end{equation*}
and we complete the proof.
\end{proof}
Now we can prove Theorem \ref{prop2}, which gives a variational inequality between defect of upper semi-continuity of the relative entropy function on invariant measures and the relative tail entropy.
\begin{proof}[Proof of Theorem \ref{prop2} ]
Let $\mathcal{Q}$ be a finite random cover of $\mathcal{E}$. By Lemma \ref{Fact666}, there exists a finite measurable partition $\mathcal{R}$ of $\mathcal{E}$ with $\mathcal{Q}\prec \mathcal{R}$ and $m(\partial R)=0$ for each $R\in \mathcal{R}$.
By Proposition \ref{prop1} and $\pi_{\mathcal{E}}\Gamma=\Theta\pi_{\mathcal{E}}$, for each $\mu\in \mathcal{I}_{\mathbb{P}}(\mathcal{H})$ and $n\in \mathbb{N}$,
\begin{align*}
h_{\mu}(\Gamma\mid \mathcal{D}_{\mathcal{H}})
&\leq h_{\mu}(\pi_{\mathcal{E}}^{-1}\mathcal{R}\mid\mathcal{D}_{\mathcal{H}})+h(\Theta\mid \mathcal{R})\\
&\leq \frac{1}{n}H_{\mu}\big(
\bigvee_{i=0}^{n-1}(\Gamma^i)^{-1}\pi_{\mathcal{E}}^{-1}\mathcal{R}\mid\mathcal{D}_{\mathcal{H}} \big)+h(\Theta\mid \mathcal{Q})\\
\end{align*}
Then by Lemma \ref{lem2},
\begin{align*}
\limsup_{\mu\rightarrow m}h_{\mu}(\Gamma\mid \mathcal{D}_{\mathcal{H}})
&\leq \limsup_{\mu\rightarrow m}\frac{1}{n}H_{\mu}(
\bigvee_{i=0}^{n-1}(\Gamma^i)^{-1}\pi_{\mathcal{E}}^{-1}\mathcal{R}
\mid\mathcal{D}_{\mathcal{H}}
)+h(\Theta\mid \mathcal{Q})\\
&\leq \frac{1}{n}H_m(
\bigvee_{i=0}^{n-1}(\Gamma^i)^{-1}\pi_{\mathcal{E}}^{-1}\mathcal{R}
\mid\mathcal{D}_{\mathcal{H}}
)+h(\Theta\mid \mathcal{Q}).
\end{align*}
Thus
\begin{equation*}
\limsup_{\mu\rightarrow m}h_{\mu}(\Gamma\mid \mathcal{D}_{\mathcal{H}})\leq h_m(\Gamma\mid \mathcal{D}_{\mathcal{H}}
)+h(\Theta\mid \mathcal{Q}).
\end{equation*}
Since the partition $\mathcal{Q}$ is arbitrary, then
$h^*_m(\Gamma\mid\mathcal{D}_{\mathcal{H}} )\leq h^*(\Theta)$.
\end{proof}
Next we are concerned with the variational principle related with the relative entropy of $\mathcal{E}^{(2)}$ and the relative tail entropy of $\Theta$.
Recall that $\mathcal{E}^{(2)}=\{(\omega,x,y):x,y\in \mathcal{E}_{\omega}\}$ is a measurable subset of $\Omega\times X^2$ with respect to the product $\sigma-$algebra $\mathcal{F}\times \mathcal{B}^2$ and
$\mathcal{A}_{\mathcal{E}^{(2)}}=\{(A\times X)\cap\mathcal{E}^{(2)}: A\in \mathcal{F}\times \mathcal{B}\}$.
The skew product transformation $\Theta^{(2)}:\mathcal{E}^{(2)}\rightarrow \mathcal{E}^{(2)}$ is given by $\Theta^{(2)}(\omega,x,y)=(\vartheta\omega,T_{\omega}x, T_{\omega}y)$.
Let $\mathcal{E}_1, \mathcal{E}_2$ be two copies, {\it i.e.}, $\mathcal{E}_1=\mathcal{E}_2=\mathcal{E}$, and $\pi_{\mathcal{E}_i}$ be the natural projection from $\mathcal{E}^{(2)}$ to $\mathcal{E}_i$ with $\pi_{\mathcal{E}_i}(\omega,x_1,x_2)=(\omega,x_i)$, i=1, 2.
\begin{proposition}\label{prop3}
Let $T$ be a continuous bundle RDS on $\mathcal{E}$ and $\mathcal{Q}=\{Q_1,\dots,Q_k\}$ be an open random cover of $\mathcal{E}$. There exists a probability measure $\mu_{\mathcal{Q}}\in\mathcal{I}_{\mathbb{P}}(\mathcal{E}^{(2)})$ such that
\begin{enumerate}
\item[(i)]
$
h_{\mu_{\mathcal{Q}}}( \Theta^{(2)} \mid \mathcal{A}_{\mathcal{E}^{(2)}})\geq h(\Theta\mid \mathcal{Q})-\frac{1}{k},
$
\item[(ii)]
$\mu_{\mathcal{Q}}$ is supported on the set $\bigcup\limits_{j=1}^k\{(\omega,x,y)\in \mathcal{E}^{(2)}: x, y\in \overline{Q_j}(\omega)\}$.
\end{enumerate}
\end{proposition}
\begin{proof}
Let us choose an open random cover $\mathcal{P}=\{P_1,\dots,P_l\}$ of $\mathcal{E}$ such that
$h_{\Theta}(\mathcal{P}\mid \mathcal{Q})\geq h(\Theta\mid \mathcal{Q})-\frac{1}{k}$.
Recall that $\mathfrak{U}(\mathcal{E})$ is the collection of all open random covers on $\mathcal{E}$, $\mathcal{Q}^{(n)}=\bigvee_{i=0}^{n-1}(\Theta^i)^{-1}\mathcal{Q}$ and $\mathcal{Q}^{(n)}(\omega)=\bigvee_{i=0}^{n-1}(T_{\omega}^i)^{-1}\mathcal{Q}(\vartheta^i\omega)$.
Pick one element $Q(\omega)\in \mathcal{Q}^{(n)}(\omega)$ with $N(Q,\mathcal{P}^{(n)})(\omega)=N(\mathcal{P}^{(n)}\mid\mathcal{Q}^{(n)})(\omega)$ and a point $x\in Q(\omega)$.
Since $\mathcal{P}$ is an open random cover of $\mathcal{E}$, by the compactness of $\mathcal{E}_{\omega}$, there exists a Lebesgue number $\eta(\omega)$ for the open cover $\{P_1(\omega),\dots,P_l(\omega)\}$ and a maximal $(n,\delta)-$separated subset $E_n(\omega)$ in $Q(\omega)$ such that
$$Q(\omega)\subset \bigcup_{y\in E_n(\omega)}B_y(\omega,n,\delta),$$
where $B_y(\omega,n,\delta)$ denote the open ball in $\mathcal{E}_{\omega}$ centered at $y$ of radius $1$ with respect to the metric
$d_n^{\omega}(x,y)=\max_{0\leq k < n}\{d(T_{\omega}^kx, T_{\omega}^ky)(\delta(\vartheta^k\omega))^{-1}\},$
for each $x,y \in \mathcal{E}_{\omega}$, {\it i.e.},
$B_y(\omega,n,\delta(\omega))=
\bigcap_{i=0}^{n-1}(T_{\omega}^i)^{-1}B(T^i_{\omega}y,\delta(\vartheta^i\omega))$.
Notice that for each $0\leq i\leq n-1$, the open ball $B(T^i_{\omega}y,\delta(\vartheta^i\omega))$ is contained in some element of $\mathcal{P}(\vartheta^i\omega)$, then
$B_y(\omega,n,\delta(\omega))$ must be contained in some element of $\mathcal{P}^{(n)}(\omega)$. This means that the cardinality of $E_n(\omega)$ is no less than $N(Q,\mathcal{P}^{(n)})(\omega)$.
Consider the probability measures $\sigma^{(n)}$ of $\mathcal{E}^{(2)}$ via their disintegrations
\begin{equation*}
\sigma^{(n)}_{\omega}=\frac{1}{\text{card}E_n(\omega)}\sum_{y\in E_n(\omega)}\delta_{(\omega,x,y)}
\end{equation*}
so that $d\sigma^{(n)}(\omega,x,y)=d\sigma^{(n)}_{\omega}d\mathbb{P}(\omega)$, and let
\begin{equation*}
\mu^{(n)}=\frac{1}{n}\sum_{i=0}^{n-1}(\Theta^{(2)})^i\sigma^{(n)},
\end{equation*}
By the Krylov-Bogolyubov procedure for continuous RDS (see \cite[Theorem 1.5.8]{Arnold} or \cite[Lemma 2.1 (i)]{Kifer2001}), one can choose a subsequence $\{n_j\}$ such that $\mu^{(n_j)}$ convergence to some probability measure $\mu_{\mathcal{Q}}\in \mathcal{I}_{\mathbb{P}}(\mathcal{E}^{(2)})$.
Next we will check that the measure $\mu_{\mathcal{Q}}$ satisfies (i) and (ii).
Let $\nu=\pi_{\mathcal{E}_2}\mu_{\mathcal{Q}}$.
By Lemma \ref{Fact666}, choose a finite measurable partition $\mathcal{R}=\{R_1,\dots, R_q\}$ of $\mathcal{E}$ with $\nu(\partial R_i)=0, 0\leq i\leq q$ and $\text{diam}R_i(\omega)<\delta(\omega)$ for each $\omega$.
Set $\xi^{(n)}=\bigvee_{i=0}^{n-1}(\Theta^{(2)})^{-i}\pi_{\mathcal{E}_2}^{-1}\mathcal{R}$. Since $\pi_{\mathcal{E}_2}\Theta^{(2)}=\Theta\pi_{\mathcal{E}_2}$, then
$\xi^{(n)}=\pi_{\mathcal{E}_2}^{-1}\bigvee_{i=0}^{n-1}\Theta^{-i}\mathcal{R}
=\pi_{\mathcal{E}_2}^{-1}\mathcal{R}^{(n)}$. Denote by $\xi^{(n)}=\{D\}$ for convenience, where $D$ is a typical element of $\pi_{\mathcal{E}_2}^{-1}\mathcal{R}^{(n)}$.
For each $\omega$, let $\pi_{X_1}^{-1}\mathcal{B}(\omega)=\{(B\times X_2)\cap
\mathcal{E}^{(2)}_{\omega}: B\in \mathcal{B}\}$, where $X_1, X_2$ are two copies of the space $X$ and $\pi_{X_1}$ is the natural projection from the product space $X_1\times X_2$ to the space $X_1$.
We abbreviate it as $\pi_{X_1}^{-1}\mathcal{B}$ for convenience .
Since each element of $\mathcal{R}^{(n)}(\omega)$ contains at most one element of $E_n(\omega)$, one has
\begin{equation}\label{eq1}
E(1_{D(\omega)}\mid \pi^{-1}_{X_1}\mathcal{B}) (x,y)=\sigma^{(n)}_{\omega}(D(\omega)).
\end{equation}
Indeed, for each $d\in \pi_{X_1}\mathcal{B}$,
\begin{align*}
&\int_{d}E(1_{D(\omega)}\mid \pi_{X_1}\mathcal{B})d\sigma^{(n)}_{\omega}
&=\int_{d}1_{D(\omega)}d\sigma^{(n)}_{\omega}
&=\int 1_{d\cap D(\omega) }d\sigma^{(n)}_{\omega}= \sigma^{(n)}_{\omega}(d\cap D(\omega )).
\end{align*}
Since $d=(B\times X_2)\cap\mathcal{E}^{(2)}_{\omega}$ for some $B\in \mathcal{B}$ and
$D(\omega)=(X_1\times C)\cap \mathcal{E}^{(2)}_{\omega}$ for some $C\in \mathcal{R}^{(n)}(\omega)$,
Then $\sigma^{(n)}_{\omega}(d\cap D(\omega ))=\sigma^{(n)}_{\omega}((B\times C)\cap\mathcal{E}^{(2)}_{\omega} )$. By the construction of $\sigma^{(n)}_{\omega}$, one have
\begin{equation*}
\sigma^{(n)}_{\omega}((B\times X_2)\cap\mathcal{E}^{(2)}_{\omega} )=
\begin{cases}
1, & x\in B,\\
0, &\text{otherwise}.
\end{cases}
\end{equation*}
Then
\begin{align*}
\sigma^{(n)}_{\omega}((B\times C)\cap\mathcal{E}^{(2)}_{\omega} )=&\sigma^{(n)}_{\omega}((X_1\times C)\cap \mathcal{E}^{(2)}_{\omega})\cdot\sigma^{(n)}_{\omega}((B\times X_2)\cap\mathcal{E}^{(2)}_{\omega} )\\
=&\int_{(B\times X_2)\cap\mathcal{E}^{(2)}_{\omega}}\sigma^{(n)}_{\omega}((X_1\times C)\cap \mathcal{E}^{(2)}_{\omega})d\sigma^{(n)}_{\omega}\\
=&\int_d \sigma^{(n)}_{\omega}(D(\omega)) d\sigma^{(n)}_{\omega},
\end{align*}
and so
\begin{equation*}
\int_{d}E(1_{D(\omega)}\mid \pi_{X_1}\mathcal{B})d\sigma^{(n)}_{\omega}
=\int_d \sigma^{(n)}_{\omega}(D(\omega)) d\sigma^{(n)}_{\omega}
\end{equation*}
for all $d\in \pi_{X_1}\mathcal{B}$, which implies the equality \eqref{eq1} holds.
Thus
\begin{align*}
H_{\sigma^{(n)}_{\omega}}(\xi^{(n)}(\omega))
&=\int\sum_{D(\omega)\in \xi^{(n)}(\omega)}-E(1_{D(\omega)}\mid \pi_{X_1}\mathcal{B})\log E(1_{D(\omega)}\mid \pi_{X_1}\mathcal{B})d\sigma^{(n)}_{\omega}\\
&=\sum_{D(\omega)\in\xi^{(n)}(\omega)}-\sigma^{(n)}_{\omega}(D(\omega))\log \sigma^{(n)}_{\omega}(D(\omega))\\
&=\log \text{card}E_n(\omega)\geq \log N(\mathcal{P}^{(n)}\mid \mathcal{Q}^{(n)})(\omega).
\end{align*}
Since for each $G\in \mathcal{A}_{\mathcal{E}^{(2)}}$,
\begin{align*}
\int_GE(1_D\mid \mathcal{A}&_{\mathcal{E}^{(2)}})d\sigma^{(n)}
=\int1_G\cdot E(1_D\mid \mathcal{A}_{\mathcal{E}^{(2)}})d\sigma^{(n)}\\
&=\int E(1_G\cdot1_D\mid \mathcal{A}_{\mathcal{E}^{(2)}})d\sigma^{(n)}\\
&=\int 1_{G\cap D}(\omega,x,y)d\sigma^{(n)}\\
&=\iint 1_{(G\cap D)(\omega)}(x,y)d\sigma^{(n)}_{\omega}d\mathbb{P}\\
&=\iint E(1_{(G\cap D)(\omega)}\mid \pi_{X_1}\mathcal{B})(x,y)d\sigma^{(n)}_{\omega}d\mathbb{P}\\
&=\iint 1_{G(\omega)}(x,y)\cdot E(1_{D(\omega)}\mid \pi_{X_1}\mathcal{B})(x,y))d\sigma^{(n)}_{\omega}d\mathbb{P}\\
&=\iint 1_{G}(\omega,x,y) E(1_{D(\omega)}\mid \pi_{X_1}\mathcal{B})(x,y))d\sigma^{(n)}_{\omega}d\mathbb{P}\\
&=\iint_{G} E(1_{D(\omega)}\mid\pi_{X_1}\mathcal{B})(x,y))d\sigma^{(n)}_{\omega}d\mathbb{P}\\
&=\int_G E(1_{D(\omega)}\mid\pi_{X_1}\mathcal{B})d\sigma^{(n)}.
\end{align*}
Then
\begin{equation*}
E(1_D\mid \mathcal{A}_{\mathcal{E}^{(2)}})(\omega,x,y)=E(1_{D(\omega)}\mid \pi_{X_1}\mathcal{B})(x,y)\,\, \mathbb{P}-a.s..
\end{equation*}
Therefore,
\begin{align}\label{ineq1}
H_{\sigma^{(n)}}&(\xi^{(n)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})=
\int\sum_{D\in \xi^{(n)}}-E(1_D\mid \mathcal{A}_{\mathcal{E}^{(2)}})\log E(1_D\mid \mathcal{A}_{\mathcal{E}^{(2)}})d\sigma^{(n)}\nonumber\\
&=\iint\sum_{D(\omega)\in \xi^{(n)}(\omega)}-E(1_{D(\omega)}\mid \pi_{X_1}\mathcal{B})\log E(1_{D(\omega)}\mid \pi_{X_1}\mathcal{B})d\sigma^{(n)}_{\omega}d\mathbb{P}\nonumber\\
&=\int H_{\sigma^{(n)}_{\omega}}(\xi^{(n)}(\omega))d\mathbb{P}
\geq \int \log N(\mathcal{P}^{(n)}\mid \mathcal{Q}^{(n)})(\omega) d\mathbb{P}.
\end{align}
For $0\leq j<m<n$, one can cut the segment $(0,n-1)$ into disjoint union of $[\frac{n}{m}]-2$ segments $(j,j+m-1),\dots, (j+km,j+(k+1)m-1)$, $\dots$ and less than $3m$ other natural numbers. Then
\begin{align*}
H_{\sigma^{(n)}}(\xi^{(n)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})
&\leq \sum_{k=0}^{[\frac{n}{m}]-2}H_{\sigma^{(n)}}
\big(
\bigvee_{i=j+km}^{j+(k+1)m-1}(\Theta^{(2)})^{-i}\pi_{\mathcal{E}_2}^{-1}\mathcal{R}\mid \mathcal{A}_{\mathcal{E}^{(2)}}
\big)
+3m\log q\\
&\leq \sum_{k=0}^{[\frac{n}{m}]-2}
H_{(\Theta^{(2)})^{j+km}\sigma^{(n)}}(\xi^{(m)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})+3m\log q.
\end{align*}
By summing over all $j$, $0\leq j<m$ and considering the concavity of the entropy function $H_{(\cdot)}$, one has
\begin{align*}
mH_{\sigma^{(n)}}(\xi^{(n)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})
&\leq \sum_{k=0}^{n-1}H_{(\Theta^{(2)})^k\sigma^{(n)}}(\xi^{(m)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})+3m^2\log q\\
&\leq nH_{\mu^{(n)}}(\xi^{(m)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})+3m^2\log q.
\end{align*}
Then by inequality \eqref{ineq1},
\begin{align*}
\frac{1}{m}H_{\mu^{(n)}}(\xi^{(m)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})
&\geq
\frac{1}{n}\int \log N(\mathcal{P}^{(n)}\mid \mathcal{Q}^{(n)})(\omega) d\mathbb{P}-\frac{3m}{n}\log q
\end{align*}
Replacing the sequence $\{n\}$ by the above selected subsequence $\{n_j\}$ and letting $j\rightarrow\infty$, by Lemma \ref{lem2},
\begin{equation*}
\frac{1}{m}H_{\mu_{\mathcal{Q}}}(\xi^{(m)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})
\geq \liminf_{j\rightarrow \infty}\frac{1}{n_j}\int \log N(\mathcal{P}^{(n_j)}\mid \mathcal{Q}^{(n_j)})(\omega) d\mathbb{P}.
\end{equation*}
Then
$$
\frac{1}{m}H_{\mu_{\mathcal{Q}}}(\xi^{(m)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})\geq h(\Theta\mid \mathcal{Q})-\frac{1}{k}.
$$
By letting $m\rightarrow \infty$, one get
$h_{\mu_{\mathcal{Q}}}(\pi_{\mathcal{E}_2}^{-1}\mathcal{R}\mid \mathcal{A}_{\mathcal{E}^{(2)}}) \geq h(\Theta\mid \mathcal{Q})-\frac{1}{k}.$
Let $\mathcal{R}_1\prec \cdots\prec \mathcal{R}_n\prec \cdots $ be an increasing sequence of finite measurable partitions with $\bigvee_{i=1}^{\infty}=\mathcal{A}$, by Lemma l.6 in \cite{Kifer} one has
$h_{\mu_{\mathcal{Q}}}(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}}) \geq h(\Theta\mid \mathcal{Q}),$ which shows that the measure $\mathcal{Q}$ satisfies the property (i).
For the other part of this proposition, let $n\in\mathbb{N}$. Recall that $Q\in \mathcal{Q}^{(n)}$ and notice that $\mathcal{Q}^{(n)}\succ (\Theta^j)^{-1}\mathcal{Q}$ for all $0\leq j<n$.
Let $Q^{(2)}=\{(\omega,x,y)\in \mathcal{E}^{(2)}: x, y\in Q(\omega)\}$ and $Q_i^{(2)}=\{(\omega,x,y)\in \mathcal{E}^{(2)}: x, y\in Q_i(\omega)\}$, $1\leq i\leq k$. All of them are the measurable subsets of $\mathcal{E}^{(2)}$ with the product $\sigma-$algebra $\mathcal{F}\times \mathcal{B}^2$, and $Q^{(2)}$ is contained in $(\Theta^{(2)})^{-j}Q_i^{(2)}$ for some $1\leq i \leq k$ and $0\leq j<n$.
It follows from the construction of $\mu^{(n)}$ that
\begin{align*}
\mu^{(n)}(\bigcup_{i=1}^kQ_i^{(2)})=\frac{1}{n}\sum_{j=0}^{n-1}\sigma^{(n)}\big(
(\Theta^{(2)})^{-j}(\bigcup_{i=1}^kQ_i^{(2)})
\big)\\
\geq \frac{1}{n}\sum_{j=0}^{n-1}\sigma^{(n)}(Q^{(2)})=\sigma^{(n)}(Q^{(2)})=1.
\end{align*}
Then $$\mu^{(n)}\big(
\bigcup_{i=1}^k\{(\omega, x, y): x, y \in \overline{Q_i}(\omega)\}=1
\big).$$
Therefore the probability measure $\mu_{\mathcal{Q}}$ satisfies the property (ii) and we complete the proof.
\end{proof}
\begin{proposition}\label{prop4}
Let $T$ be a continuous bundle RDS on $\mathcal{E}$. There exists one probability measure $m\in \mathcal{I}_{\mathbb{P}}(\mathcal{E}^{(2)})$, which is supported on $\{(\omega,x,x)\in \mathcal{E}^{(2)}:x\in \mathcal{E}_{\omega}\}$, and satisfies $h^*_m(\Theta^{(2)} \mid \mathcal{A}_{\mathcal{E}^{(2)}})=h^*(\Theta)$.
\end{proposition}
\begin{proof}
Let $\mathcal{Q}_1\prec \cdots \prec \mathcal{Q}_n\prec \cdots$ be an increasing sequence of open random cover of $\mathcal{E}$. Denote by $\mathcal{Q}_n=\{Q_j^{(n)}\}_{j=1}^{k_n}$. By Property \ref{prop3}, for each $n\in \mathbb{N}$, there exists one probability measure $\mu_n\in \mathcal{I}_{\mathbb{P}}(\mathcal{E}^{(2)})$ such that $h_{\mu_n}(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})\geq h(\Theta\mid \mathcal{Q}_n)-\frac{1}{k_n}$ and $\mu_n$ is supported on $\bigcup_{j=1}^{k_n}\{(\omega,x,y):x,y\in \overline{Q_j^{(n)}}(\omega)\}$.
Let $m$ be some limit point of the sequence of $\mu_n$, then $m\in \mathcal{I}_{\mathbb{P}}(\mathcal{E}^{(2)})$ (see \cite[Lemma 2.1 (i)]{Kifer2001}) and
\begin{equation*}
\limsup_{\mu\rightarrow m}h_{\mu}(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})\geq \liminf_{n\rightarrow \infty}h_{\mu_n}(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})\geq \inf_n h(\Theta\mid \mathcal{Q}_n)=h^*(\Theta).
\end{equation*}
On the other hand, notice that the support of $m$
\begin{equation*}
\text{supp}m=\bigcap_{l=1}^{\infty}\bigcup_{j=1}^{k_{n_l}}\{(\omega,x,y): x, y\in \overline{Q_j^{(n_l)}}(\omega)\},
\end{equation*}
where $\{n_l\}$ is the subsequence of $\{n\}$ such that $\mu_{n_l}$ convergence to $m$ in the sense of the narrow topology.
Since $\mathcal{Q}_{n_l}$ is a refining sequence of measurable partition on $\mathcal{E}$, then
\begin{equation*}
\text{supp}m=\{(\omega,x,x)\in \mathcal{E}^{(2)}:x\in \mathcal{E}_{\omega}\}.
\end{equation*}
Thus for every finite measurable partition $\xi=\{\xi_1,\cdots,\xi_k\}$ on $\mathcal{E}$,
\begin{equation*}
m(\pi_{\mathcal{E}_1}^{-1}\xi_i)=m(\pi_{\mathcal{E}_1}^{-1}\xi_i\cap \text{supp}m)=m(\pi_{\mathcal{E}_2}^{-1}\xi_i)),\, 1\leq i\leq k,
\end{equation*}
This means $\pi_{\mathcal{E}_1}^{-1}\xi$ and $\pi_{\mathcal{E}_2}^{-1}\xi$ coincide up to sets of $m-$measure zero.
Observe that $E(1_{\pi_{\mathcal{E}_1}^{-1}\xi_i}\mid \mathcal{A}_{\mathcal{E}^{(2)}})=1_{\pi_{\mathcal{E}_1}^{-1}\xi_i}$ $\mathbb{P}-$a.s. for all $1\leq i\leq k$.
Then
\begin{equation*}
H_m(\pi^{-1}_{\mathcal{E}_2}\xi\mid \mathcal{A}_{\mathcal{E}^{(2)}})=H_m(\pi^{-1}_{\mathcal{E}_1}\xi\mid \mathcal{A}_{\mathcal{E}^{(2)}})=0,
\end{equation*}
and $h_m(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})=0$ by the definition of the relative entropy.
Hence,
\begin{equation*}
h^*_m(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})=\limsup_{\mu\rightarrow m}h_{\mu}(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})-h_m(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})\geq h^*(\Theta).
\end{equation*}
By Theorem \ref{prop2}, $h^*_m(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})\leq h^*(\Theta)$ and we complete the proof.
\end{proof}
The variational principle stated in Theorem \ref{theo1} follows directly from Theorem \ref{prop2} and Proposition \ref{prop4}.
We are now in a position to prove that the relative tail entropy of a continuous bundle RDS is equal to that of its factor under the principal extension.
\begin{proof}[Proof of Theorem \ref{th3}]
Denote by
$\mathcal{G}^{(2)}=\{(\omega, y, z): y, z\in \mathcal{G}_{\omega}\}$, which is a measurable subset of $\Omega\times Y\times Y$ with respect to the product $\sigma-$algebra $\mathcal{F}\times \mathcal{C}^2$.
Let $\phi:\mathcal{G}^{(2)}\rightarrow \mathcal{E}^{(2)}$ be the map induced by the factor transformation $\pi$ as $\phi (\omega,y,z)=(\omega,\pi_{\omega}y,\pi_{\omega}z)$. Then $\phi$ is a factor transformation from $(\Lambda^{(2)},\mathcal{G}^{(2)})$ to $(\Theta^{(2)},\mathcal{E}^{(2)})$.
Let $m\in \mathcal{I}_{\mathbb{P}}(\mathcal{G}^{(2)})$ and $\alpha:\mathcal{G}^{(2)}\rightarrow\mathcal{G}$ be the natural projection defined as
$\alpha(\omega,y,z)=(\omega,y)$.
By the equality 4.18 in \cite{Down2011}, for each $m\in \mathcal{I}_{\mathbb{P}}(\mathcal{G}^{(2)})$,
$h_m(\Lambda^{(2)}\mid \mathcal{D}_{\mathcal{G}^{(2)}})=h_{\alpha m}(\Lambda,\mathcal{G}),$
where $h_{\alpha m}(\Lambda, \mathcal{G})$ is the usual measure-theoretical entropy.
Let $\beta:\mathcal{E}^{(2)}\rightarrow\mathcal{E}$ be the natural projection defined as
$\beta(\omega,x,u)=(\omega,x)$.
Then $\phi m \in \mathcal{I}_{\mathbb{P}}(\mathcal{E}^{(2)})$ and $h_{\phi m}(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})=h_{\beta(\phi m)}(\Theta, \mathcal{E}) $.
Notice that $\pi\alpha=\beta\phi$.
one obtain $h_{\beta(\phi m)}(\Theta, \mathcal{E})=h_{\pi\alpha m}(\Theta, \mathcal{E}) $.
Since the continuous bundle RDS $S$ is an principal extension of the RDS $T$ via the factor transformation $\pi$,
by the Abramov-Rokhlin formula (see \cite{BogenCrau,Liu2005})
one has $h_{\pi\alpha m}(\Theta, \mathcal{E})=h_{\alpha m}(\Lambda,\mathcal{G})$.
It follows that
$h_m(\Lambda^{(2)}\mid \mathcal{D}_{\mathcal{G}^{(2)}})=h_{\phi m}(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})$,
and then
$h^*_m(\Lambda^{(2)}\mid \mathcal{D}_{\mathcal{G}^{(2)}})=h^*_{\phi m}(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})$.
Thus by Theorem \ref{theo1},
$$h^*(\Lambda)=\max_{m\in \mathcal{I}_{\mathbb{P}}(\mathcal{G}^{(2)})}h^*_m(\Lambda^{(2)}\mid \mathcal{D}_{\mathcal{G}^{(2)}})\leq \max_{\mu\in \mathcal{I}_{\mathbb{P}}(\mathcal{E}^{(2)})}h^*_{\mu}(\Theta^{(2)}\mid \mathcal{A}_{\mathcal{E}^{(2)}})=h^*(\Theta).$$
Since for each $\mu \in \mathcal{I}_{\mathbb{P}}(\mathcal{E}^{(2)})$, by Proposition \ref{prop33}, there exists some $m\in \mathcal{I}_{\mathbb{P}}(\mathcal{G}^{(2)})$ such that $\phi m=\mu.$
Therefore the other part of the above inequality holds and we complete the proof.
\end{proof}
\end{document} |
\begin{document}
\title{A non-abelian, non-Sidon, completely bounded $\Lambda(p)$ set}
\author{Kathryn E. Hare}
\address{Dept. of Pure Mathematics\\
University of Waterloo\\
Waterloo, Ont., \\
Canada}
\email{kehare@uwaterloo.ca}
\thanks{This research was supported in part by NSERC\ grant RGPIN 2016-03719}
\author{Parasar Mohanty}
\address{Dept. of Mathematics and Statistics\\
Indian Inst. of Tech.\\
Kanput, India, 208016}
\email{parasar@iitk.ac.in}
\mathbf Subjclass{Primary: 43A46, 43A30; Secondary: 42A55}
\begin{abstract}
The purpose of this note is to construct an example of a discrete
non-abelian group $G$ and a subset $E$ of $G$, not contained in any abelian
subgroup, that is a completely bounded $\Lambda (p)$ set for all $p<\infty ,$
but is neither a Leinert set nor a weak Sidon set.
\end{abstract}
\maketitle
\mathbf Section{Introduction}
The study of lacunary sets, such as Sidon sets and $\Lambda (p)$ sets,
constitutes an interesting theme in the theory of Fourier series on the
circle group ${\mathbb{T}}$. It has many applications in harmonic analysis
and in the theory of Banach spaces, and various combinatorial and arithmetic
properties of these sets have been studied extensively. These concepts have
also been investigated in the context of more general compact abelian groups
(with their discrete dual groups) and compact non-abelian groups; see \cite
{GH}, \cite{LR}, \cite{R} and the references cited therein. The study of
these sets in the setting of discrete non-abelian groups was pioneered by
Bozjeko \cite{B}, Fig\'{a}-Talamanca \cite{FP} and Picardello \cite{Pi}.
In abelian groups, there are various equivalent ways to define Sidon sets
and these sets are plentiful. Indeed, every infinite subset of a discrete
abelian group contains an infinite Sidon set. The natural analogues of these
definitions in discrete non-abelian groups are known as strong Sidon, Sidon
and weak Sidon sets. It was shown in \cite{Pi} that every weak Sidon set is $
\Lambda (p)$ for all $p<\infty $. In \cite{Le} Leinert introduced the
concept of a $\Lambda (\infty )$ set, a notion only of interest in the
non-abelian setting because in abelian groups such sets are necessarily
finite. In striking contrast to the abelian situation, Leinert showed that
the free group with two generators contains an infinite subset which is both
weak Sidon and $\Lambda (\infty ),$ but does not contain any infinite Sidon
subsets.
In \cite{Ha}, Harcharras studied the concept of completely bounded $\Lambda
(p)$ sets, a property more restrictive than $\Lambda (p),$ but still
possessed by Sidon sets. The converse is not true as every infinite discrete
abelian group admits a completely bounded $\Lambda (p)$ set which is not
Sidon; see \cite{HM}.
In this paper, we construct a non-amenable group $G$ and a set $E$ not
contained in any abelian subgroup of $G,$ which is completely bounded $
\Lambda (p)$ for every $p<\infty ,$ but is neither $\Lambda (\infty )$ nor
weak Sidon. It remains open if every infinite discrete group contains such a
set $E$.
\mathbf Section{Definitions}
Throughout this paper, $G$ will be an infinite discrete group. To define
Sidon and $\Lambda (p)$ sets in this setting one requires the concepts of
the Fourier algebra, $A(G)$, the von Neumann algebra, $VN(G),$ and the
Fourier-Stieljies algebra, $B(G)$, as developed by P. Eymard in \cite{E} for
locally compact groups. We also need the concept of a non-commutative $L^{p}$
-spaces introduced by I.E. Segal. We refer the reader to \cite{PX} for
details on these latter spaces.
\begin{defn}
(i) The set $E\mathbf Subseteq $ $G$ is said to be a \textbf{strong (weak) Sidon set
} if for all $f\in c_{0}(E)$ (resp., $l_{\infty }(E))$ there exists $g\in
A(G)$ (resp., $B(G))$ such that $f(x)=g(x)\;$for all $x\in E$.
(ii) The set $E\mathbf Subseteq G$ is said to be a \textbf{Sidon set }if there is a
constant $C$ such that for all functions $f,$ compactly supported in $E,$ we
have $\Vert f\Vert _{1}\leq C\Vert f\Vert _{VN(G)}$. The least such constant
$C$ is known as the \textbf{Sidon constant} of $E$.
\end{defn}
These definitions are well known to be equivalent in the commutative
setting. For any discrete group it is the case that strong Sidon sets are
Sidon and Sidon sets are weak Sidon. Finite groups are always strong Sidon
sets. In \cite{Pi} it is shown that $E\mathbf Subseteq G$ is Sidon if and only if
for every $f\in $ $l_{\infty }(E)$ there is some $g\in B_{\rho }(G)$ that
extends $f$, where $B_{\rho }(G)$ is the dual of the reduced $C^{\ast }$
algebra $C_{\rho }^{\ast }(G)$. Since in an amenable group $B_{\rho
}(G)=B(G),$ weak Sidon sets are Sidon in this setting. Very recently, Wang
\cite{Wa} showed that every Sidon set in any discrete group is a strong
Sidon set. It remains open if every infinite amenable group contains an
infinite Sidon subset.\textit{\ }
Picardello \cite{Pi} defined the notion of $\Lambda (p)$ sets in this
setting and Harcharras \cite{Ha} introduced completely bounded $\Lambda (p)$
sets. For these, we require further notation. Let $\leftthreetimes $ denote
the left regular representation of $G$ into $\mathcal{B}(l_{2}(G))$ and
denote by $L^{p}(\tau _{0})$ the non-commutative $L^{p}$-space associated
with the von Neumann algebra generated by $\leftthreetimes (G)$ with respect
to the usual trace $\tau _{0}$. Let $L^{p}(\tau )$ denote the
non-commutative $L^{p}$-space associated with the von Neumann algebra
generated by $\leftthreetimes (G){{t}}imes \mathcal{B}(l_{2})$ with respect to
the trace $\tau =\tau _{0}{{t}}imes tr$, where $tr$ denotes the usual trace in
$\mathcal{B}(l_{2})$. Observe that $L^{p}(\tau )$ has a cannonical operator
space structure obtained from complex interpolation in the operator space
category. We refer the reader to \cite{Pis} for more details.
\begin{defn}
(i) Let $2<p<\infty $. The set $E\mathbf Subseteq G$ is said to be a $\Lambda (p)$
\textbf{\ set} if there exists a constant $C_{1}>0$ such that for all
finitely supported functions $f$ we have
\begin{equation}
\left\Vert \mathbf Sum\limits_{t\in E}f(t)\leftthreetimes (t)\right\Vert
_{L^{p}(\tau _{0})}\leq C_{1}\left( |\mathbf Sum\limits_{t\in E}|f(t)|^{2}\right) ^{
\frac{1}{2}}. \label{Lambdap}
\end{equation}
(ii) The set $E\mathbf Subseteq G$ is said to be a \textbf{completely bounded }$
\Lambda (p)$\textbf{\ set, }denoted $\Lambda ^{cb}(p),$ if there exists a
constant $C_{2}>0$ such that
\begin{equation}
\Vert \mathbf Sum\limits_{t\in E}\leftthreetimes (t){{t}}imes x_{t}\Vert _{L^{p}(\tau
)}\leq C_{2}\max \left( \Vert (\mathbf Sum_{t\in E}x_{t}^{\ast }x_{t})^{1/2}\Vert
_{S_{p}},\Vert (\mathbf Sum_{t\in E}x_{t}x_{t}^{\ast })^{1/2}\Vert _{S_{p}}\right)
\label{CBLp}
\end{equation}
where $x_{t}$ are finitely supported families of operators in $S_{p}$, the $
p $-Schatten class on $l_{2}$.
The least such constants $C_{1}$ (or $C_{2})$ are known as the $\Lambda (p)$
\textbf{\ }(resp.,\textbf{\ }$\Lambda ^{cb}(p)$\textbf{) constants} of $E$.
\end{defn}
It is known that every infinite set contains an infinite $\Lambda (p)$ set
\cite{B} and that every weak Sidon set is a $\Lambda (p)$ set for each $
p<\infty $ \cite{Pi}. \ Completely bounded $\Lambda (p)$ sets are clearly $
\Lambda (p),$ but the converse is not true, as seen in \cite{Ha}.
Extending these notions to $p=\infty $ gives the Leinert and $L$-sets.
\begin{defn}
(i) The set $E\mathbf Subseteq $ $G$ is called a \textbf{Leinert or }$\Lambda
(\infty )$\textbf{\ set} if there exists a constant $C>0$ such that for
every function $f\in l_{2}(E)$ we have $\Vert f\Vert _{VN(G)}\leq C\Vert
f\Vert _{2}$.
(ii) The sets of interpolation for the completely bounded multipliers of $
A(G)$ are called\textbf{\ }$L$\textbf{-sets}.
\end{defn}
It is well known that the Leinert sets are the sets of interpolation for
multipliers of $A(G),$ so any $L$-set is Leinert; see \cite{Po}. The set $E$
is said to satisfy the \textbf{Leinert condition} if every tuple $
(a_{1},...,a_{2s})\in E^{2s},$ with $a_{i}\neq a_{i+1},$ satisfies the
independence-like relation
\begin{equation}
a_{1}a_{2}^{-1}a_{3}\dots a_{2s-1}a_{2s}^{-1}\neq e. \label{leinert}
\end{equation}
Here $e$ is the identity of $G$. It can be shown (\cite{Po}) that any set
that satisfies the Leinert condition is an $L$-set.
It was seen in \cite{HM} that in abelian groups there are sets that are
completely bounded $\Lambda (p)$ for all $p<\infty ,$ but not Sidon. Thus
the inclusion, weak Sidon is $\Lambda ^{cb}(p),$ is strict for groups with
infinite abelian subgroups. The purpose of this paper is to show the
existence of sets not contained in \textit{any} abelian subgroup which also
have this strict inclusion. In fact, we prove, more generally, the following
result.
\begin{theorem}
There is a discrete group $G$ that admits both infinite $L$-sets and weak
Sidon sets, and an infinite subset $E$ of $G$ that is $\Lambda ^{cb}(p)$ for
all $p<\infty ,$ but not a Leinert set, an $L$-set or a weak Sidon set.
Moreover, any subset of $E$ consisting of commuting elements is finite.
\label{mainthm}
\end{theorem}
\mathbf Section{Results and Proofs}
\mathbf Subsection{Preliminary results}
To show that the set we will construct is not a Leinert or weak Sidon set,
it is helpful to first establish some arithmetic properties of $\Lambda (p)$
and Leinert sets. We recall that a set $E\mathbf Subseteq G$ is said to be \textbf{
quasi-independent} if all the sums
\begin{equation*}
\left\{ \mathbf Sum\limits_{x\in A}x:A\mathbf Subset E,|A|<\infty \right\}
\end{equation*}
are distinct. Quasi-independent sets in abelian groups are the prototypical
Sidon sets.
The first part of the following Lemma is well known for abelian groups.
\begin{lemma}
Let $G$ be a discrete group. (i) Suppose $q>2$ and $E\mathbf Subseteq G$ is a $
\Lambda (q)$ set with $\Lambda (q)$ constant $A$. If $a\in G$ has order $
p_{n}\geq $ $2n$, then
\begin{equation*}
\left\vert E\bigcap \{a,a^{2},...,a^{n}\}\right\vert \leq 10A^{2}n^{2/q}
\text{.}
\end{equation*}
(ii) Suppose $E\mathbf Subseteq G$ is a Leinert set with Leinert constant $B$ and
let $F\mathbf Subseteq E$ be a finite commuting, quasi-independent subset. Then $
\left\vert F\right\vert \leq 6^{3}B^{2}$. \label{mainlem}
\end{lemma}
\begin{proof}
We will write $1_{X}$ for the characteristic function of a set $X$.
(i) Define the function $K_{n}$ on $G$ by
\begin{equation*}
K_{n}(x)=\mathbf Sum_{j=-2n}^{2n}\left( 1-\frac{\left\vert j\right\vert }{n}\right)
1_{\{a^{j}\}}(x).
\end{equation*}
Let $J_{n}$ denote the function on $\mathbb{Z}_{p_{n}}$ (or $\mathbb{Z}$ if $
p_{n}=\infty $) defined in the analogous fashion. It is well known that the $
A(G)$ and $VN(G)$ norms for the function $K_{n}$ are dominated by the
corresponding norms of the function $J_{n}$ on $\mathbb{Z}_{p_{n}}$.
As $L^{q^{{\parallel}rime }}(\tau _{0})$ (for $q^{{\parallel}rime }$ the dual index to $q$) is
an interpolation space between $A(G)$ and $VN(G)$, it follows that
\begin{eqnarray*}
\left\Vert K_{n}\right\Vert _{L^{q^{{\parallel}rime }}(\tau )} &\leq &\left\Vert
K_{n}\right\Vert _{A(G)}^{1/q^{{\parallel}rime }}\left\Vert K_{n}\right\Vert
_{VN(G)}^{1/q} \\
&=&\left\Vert J_{n}\right\Vert _{A(\mathbb{Z}_{p_{n}})}^{1/q^{{\parallel}rime
}}\left\Vert J_{n}\right\Vert _{VN(\mathbb{Z}_{p_{n}})}^{1/q}\leq
(4n+1)^{1/q}\text{.}
\end{eqnarray*}
Suppose $E\bigcap \{a,a^{2},...,a^{n}\}$ consists of the $M$ elements $
\{a^{s_{j}}\}_{j=1}^{M}$ and put
\begin{equation*}
k_{n}(x)=\mathbf Sum_{j=1}^{M}1_{\{a^{s_{j}}\}}(x).
\end{equation*}
Since $E$ has $\Lambda (q)$ constant $A,$ the generalized Holder's
inequality implies
\begin{eqnarray*}
\frac{M}{2} &\leq &\mathbf Sum_{j=1}^{M}K_{n}(a^{s_{j}})=\mathbf Sum_{x\in
G}K_{n}(x)k_{n}(x) \\
&\leq &\left\Vert K_{n}\right\Vert _{L^{q^{{\parallel}rime }}(\tau _{0})}\left\Vert
k_{n}\right\Vert _{L^{q}(\tau _{0})}\leq (4n+1)^{1/q}A\left\Vert
k_{n}\right\Vert _{2} \\
&=&(4n+1)^{1/q}A\mathbf Sqrt{M}.
\end{eqnarray*}
Consequently, $M\leq 2(4n+1)^{2/q}A^{2}\leq 10A^{2}n^{2/q}$, as claimed.
(ii) Let $H$ be the abelian group generated by $F$. Being quasi-independent,
$F$ is a Sidon subset of $H$ with Sidon constant at most $6\mathbf Sqrt{6}$ (\cite[
p.115]{GH}). Consider the function $h=1_{F}$ defined on $H$ and $g=1_{F}$
defined on $G$. The Sidon property, together with the fact that $\left\Vert
h\right\Vert _{VN(H)}=\left\Vert g\right\Vert _{VN(G)},$ ensures that
\begin{equation*}
\left\vert F\right\vert =\left\Vert h\right\Vert _{\ell ^{1}}\leq 6\mathbf Sqrt{6}
\left\Vert h\right\Vert _{VN(H)}=6\mathbf Sqrt{6}\left\Vert g\right\Vert _{VN(H)}.
\end{equation*}
Since $E$ has Leinert constant $B$, we have $\left\Vert f\right\Vert
_{VN(G)}\leq B\left\Vert f\right\Vert _{2}$ for any function $f$ defined on $
G$ and supported on $E$. In particular, this is true for the function $g\,$,
hence
\begin{equation*}
\left\vert F\right\vert \leq 6\mathbf Sqrt{6}\left\Vert g\right\Vert _{VN(H)}\leq 6
\mathbf Sqrt{6}B\mathbf Sqrt{\left\vert F\right\vert }.
\end{equation*}
\end{proof}
\mathbf Subsection{\noindent Proof of Theorem {\parallel}rotect\ref{mainthm}}
\begin{proof}
We will let $G$ be the free product of the cyclic groups $Z_{p_{n}}$, $n\in
N $, where $p_{n}>2^{n+1}$ are distinct odd primes. If $a_{n}$ is a
generator of $Z_{p_{n}},$ then $\{a_{n}\}_{n=1}^{\infty }$ is both a weak
Sidon and Leinert set, as shown in \cite{Pi}. The set $E$ will be the union
of finite sets $E_{n}\mathbf Subseteq Z_{p_{n}}$, where $\left\vert
E_{n}\right\vert =n^{2}$ and $E_{n}\mathbf Subset \{a_{n},...,a_{n}^{2^{n}}\}$. The
fact that any commuting subset of $E$ is finite is obvious from the
definition of $E$.\newline
We recall the following notation from \cite{Ha}: We say that a subset $
\Lambda \mathbf Subseteq G$ has the $Z(p)$ property if $Z_{p}(\Lambda )<\infty $
where
\begin{equation*}
Z_{p}(\Lambda )=\mathbf Sup_{x\in G}\left\vert \left\{ (x_{1},...,x_{p})\in \Lambda
^{p}:x_{i}\neq x_{j},x_{1}^{-1}x_{2}x_{3}^{-1}\cdot \cdot \cdot
x_{p}^{(-1)^{p}}=x\right\} \right\vert .
\end{equation*}
In \cite{Ha}, Harcharras proved that if $2<p<\infty ,$ then every subset $
\Lambda $ of $G$ with the $Z(p)$ property is a $\Lambda ^{cb}(2p).$
We will construct the sets $E_{n}$ so that they have the property that for
every even $s\geq 2$ there is an integer $n_{s}$ such that $Z_{s}\left(
\bigcup_{n\geq n_{s}}E_{n}\right) \leq s!$. Consequently, $\bigcup_{n\geq
n_{s}}E_{n}$ will be $\Lambda ^{cb}(2s)$ for all $s<\infty $. As finite sets
are $\Lambda ^{cb}(p)$ for all $p<\infty ,$ and a finite union of $\Lambda
^{cb}(p)$ sets is again $\Lambda ^{cb}(p)$, it will follow that $E$ is $
\Lambda ^{cb}(p)$ for all $p<\infty $.
We now proceed to construct the sets $E_{n}$ by an iterative argument.
Temporarily fix $n$ and take $g_{1}=a_{n}$. Inductively assume that for $
N<n^{2}$, $\{g_{i}\}_{i=1}^{N}$ $\mathbf Subseteq \{a_{n},...,a_{n}^{2^{n}}\}$ have
been chosen with the property that if
\begin{equation}
{\parallel}rod\limits_{j=1}^{N}g_{j}^{\varepsilon _{j}}=1\text{ for }\varepsilon
_{j}=0,{\parallel}m 1,{\parallel}m 2, \mathbf Sum_{j}|\varepsilon _{j}|\leq 2s,\text{ then all }
\varepsilon _{j}=0. \tag{$\mathcal{P}_{N}$}
\end{equation}
Now choose
\begin{equation*}
g_{N+1}\neq {\parallel}rod\limits_{j=1}^{N}g_{j}^{\varepsilon _{j}}\text{ for any }
\varepsilon _{j}=0,{\parallel}m 1,{\parallel}m 2\text{ and }\mathbf Sum_{j}|\varepsilon _{j}|\leq 2s
\end{equation*}
and
\begin{equation*}
g_{N+1}^{2}\neq {\parallel}rod\limits_{j=1}^{N}g_{j}^{\varepsilon _{j}}\text{ for any
}\varepsilon _{j}=0,{\parallel}m 1,{\parallel}m 2\text{ and }\mathbf Sum_{j}|\varepsilon _{j}|\leq 2s.
\end{equation*}
There are at most $\binom{N}{2s}5^{2s}\leq C_{s}N^{2s}$ terms that $g_{N+1}$
must avoid and similarly for $g_{N+1}^{2}$ as the squares of elements of $
Z_{p_{n}}$ are all distinct. Provided $2C_{s}N^{2s}\leq 2^{n}$ then we can
make such a choice of $g_{N+1}\in $ $\{a_{n},...,a_{n}^{2^{n}}\}$. Of
course, it is immediate that property ($\mathcal{P}_{N+1}$) then holds. This
can be done for every $N<n^{2}$ as long as $n$ is suitably large, say for $
n\geq n_{s}$. The set $E_{n}$ will be taken to be $\{g_{j}\}_{j=1}^{n^{2}
\text{.}}.$
Now we need to check the claim that $Z_{s}(\bigcup\limits_{n\geq
n_{s}}E_{n})\leq s!$. Towards this, suppose
\begin{equation}
x_{1}x_{2}^{-1}\cdot \cdot \cdot x_{s}^{-1}=y_{1}y_{2}^{-1}\cdot \cdot \cdot
y_{s}^{-1} \label{P1}
\end{equation}
where $x_{i}$ are all distinct, $y_{j}$ are all distinct and all $
x_{i},y_{j}\in \bigcup\limits_{n\geq n_{s}}E_{n}$. The free product property
guarantees that if this is true, then it must necessarily be the case that
if we consider only the elements $x_{i_{k}}$ and $y_{j_{l}}$ which belong to
a given $E_{n}$, we must have ${\parallel}rod\limits_{k}x_{i_{k}}^{\delta _{k}}=$ $
{\parallel}rod\limits_{l}y_{j_{l}}^{\varepsilon _{l}}$ for the appropriate choices of
$\delta _{k},\varepsilon _{l}\in \{{\parallel}m 1\}$. As there at most $s$ choices
for each of $x_{i_{k}}$ and $y_{i_{l}}$, our property ($\mathcal{P}_{N}$)
ensures that this can happen only if $\{x_{i_{k}}:\delta _{k}=1\}$ $
=\{y_{j_{l}}:\varepsilon _{l}=1\}$ and similarly for the terms with $-1$
exponents. Hence we can only satisfy (\ref{P1}) if upon reordering, $
\{x_{1},x_{3},...,x_{s-1}\}=\{y_{1},y_{3},...,y_{s-1}\},$ and similarly for
the terms with even labels. (We remark that for non-abelian groups, this is
only a necessary but not, in general, sufficient condition for (\ref{P1}).)
This suffices to establish that
\begin{equation*}
Z_{s}(\bigcup\limits_{n\geq n_{s}}E_{n})\leq ((s/2)!)^{2}\leq s!
\end{equation*}
and hence, as explained above, $E$ is a $\Lambda ^{cb}(p)$ set for all $
p\,<\infty $.
Next, we will verify that $E$ is not a weak Sidon set. We proceed by
contradiction. According to \cite{Pi}, if it was, then $E$ would be a $
\Lambda (p)$ set for each $p>2,$ with $\Lambda (p)$ constant bounded by $C
\mathbf Sqrt{p}$ for a constant $C$ independent of $p$. Appealing to Lemma \ref
{mainlem}(i), we have
\begin{equation*}
n^{2}=\left\vert E_{n}\right\vert =\left\vert E\bigcap
\{a_{n},...,a_{n}^{2^{n}}\}\right\vert \leq 10C^{2}p2^{2n/p}.
\end{equation*}
Taking $p=2n$ for sufficiently large $n$ gives a contradiction.
Finally, to see that $E$ is not a Leinert set, we first observe that an easy
combinatorial argument shows that any set of $N$ distinct elements contains
a quasi-independent subset of cardinality at least $\log N/\log 3$. Thus we
can obtain quasi-independent subsets $F_{n}\mathbf Subseteq E_{n}$ with $\left\vert
F_{n}\right\vert \rightarrow \infty $. But according to Lemma \ref{mainlem}
(ii), this would be impossible if $E$ was a Leinert set. As $E$ is not
Leinert, it is also not an $L$ set.
This concludes the proof.
\end{proof}
\
\end{document} |
\boldsymbol{e}gin{document}
\title{List Decodable Learning via Sum of Squares}
\author{Prasad Raghavendra\textsuperscript{th}\xspaceanks{University of California, Berkeley, research supported by NSF Grant CCF 1718695.} \and Morris Yau \textsuperscript{th}\xspaceanks{University of California, Berkeley, research supported by NSF Grant CCF 1718695.}}
\maketitle
\textsuperscript{th}\xspaceispagestyle{empty}
\boldsymbol{e}gin{abstract}
In the list-decodable learning setup, an overwhelming majority (say a $1-\boldsymbol{e}ta$-fraction) of the input data consists of outliers and the goal of an algorithm is to output a small list $\mathcal L$ of hypotheses such that one of them agrees with inliers.
We develop a framework for list-decodable learning via the Sum-of-Squares SDP hierarchy
and demonstrate it on two basic statistical estimation problems
\boldsymbol{e}gin{itemize}
\emem {\em Linear regression:} Suppose we are given labelled examples $\{(X_i,y_i)\}_{i \in [N]}$ containing a subset $S$ of $\boldsymbol{e}ta N$ {\em inliers} $\{X_i \}_{i \in S}$ that are drawn i.i.d. from standard Gaussian distribution $N(0,I)$ in $\mathbb R^d$, where the corresponding labels $y_i$ are well-approximated by a linear function $\epsilonilonll$. We devise an algorithm that outputs a list $\mathcal L$ of linear functions such that there exists some $\hat{\epsilonilonll} \in \mathcal L$ that is close to $\epsilonilonll$.
This yields the first algorithm for linear regression in a list-decodable setting. Our results hold for any distribution of examples whose concentration and anticoncentration can be certified by Sum-of-Squares proofs.
\emem {\em Mean Estimation:}
Given data points $\{X_i\}_{i \in [N]}$ containing a subset $S$ of $\boldsymbol{e}ta N$ {\em inliers} $\{X_i \}_{i \in S}$ that are drawn i.i.d. from a Gaussian distribution $N(\mu,I)$ in $\mathbb R^d$, we devise an algorithm that generates a list $\mathcal L$ of means such that there exists $\hat{\mu} \in \mathcal L$ close to $\mu$.
The recovery guarantees of the algorithm are analogous to the existing algorithms for the problem by
Diakonikolas et al.\xspace \cite{diakonikolas2018list} and Kothari et al.\xspace \cite{kothari2017better}.
\epsilonilonnd{itemize}
In an independent and concurrent work, Karmalkar et al.\xspace \cite{KlivansKS19} also obtain an algorithm for list-decodable linear regression using the Sum-of-Squares SDP hierarchy.
\epsilonilonnd{abstract}
\tableofcontents
\section{Introduction}
The presence of outliers in data poses a fundamental challenge to algorithms for high-dimensional statistical estimation.
While robust statistics have been explored extensively for several decades now \cite{huber2011robust}, a flurry of recent work starting with \cite{klivans2009learning, awasthi2014power, lai2016agnostic,diakonikolas2016robust} have led to new robust algorithms for high-dimensional statistical tasks such as mean estimation, covariance estimation, linear regression and learning linear separators.
More recently, a promising line of work \cite{hopkins2018mixture,kothari2017outlier,kothari2017better,klivans2018efficient} has brought to bear the sum-of-squares SDP hierarchy on problems from robust statistics, resulting in new algorithms under fairly minimal assumptions.
Continuing this line of inquiry, we further develop the SoS SDP based approach to robust statistics. Specifically, we develop a framework for list-decodable learning via the SoS SDP hierarchy.
We demonstrate the framework by devising the first polynomial-time algorithm for linear regression that can extract an underlying linear function even in the presence of an overwhelming majority of outliers.
Linear regression is a corner-stone problem statistics and the underlying optimization problem is perhaps the central example of convex optimization.
In the classical setup for linear regression, the input data consists of labelled examples $\{(X_i,y_i)\}_{i \in [N]}$ where $\{X_i\}_{i \in [N]}$ are drawn i.i.d. from a distribution $\mathcal D$ over $\mathbb R^d$, and the labels $\{y_i\}_{i \in [N]}$ are noisy evaluations of a linear function.
Specifically, the labels $y_i$ are given by $y_i = \iprod{\hat{\epsilonilonll},X_i} + \gamma_i$ where $\gamma_i$ denotes the noise.
The goal is to recover an estimate $\epsilonilonll$ to the linear function $\hat{ell}$.
In its simplest form, the distribution $\mathcal D = N(0,\Id)$ is the standard Gaussian measure, the noise $\gamma_i$ is mean zero and independent of the example $X_i$.
The linear function $\hat{ell}$ can be recovered (up to statistical deviations) by minimizing the squared loss namely,
$$ \epsilonilonll = \argmin \E_{(X,y) \sim \mathcal D}[ (\iprod{\epsilonilonll,X}-y)^2] \,. $$
From an algorithmic standpoint, the realizable setting of linear regression is fairly well understood.
The focus of this work is on algorithms for linear regression that are robust to the presence of outliers.
While there is an extensive literature on robust linear regression (see \cite{rousseeuw2005robust,bhatia2015robust,bhatia2017consistent} and the references therein), there are no algorithms that are robust to an overwhelming majority of outliers.
Concretely, consider the following problem setup: we are given labelled examples $\{(X_i,y_i)\}_{i \in [N]}$ such that a $\boldsymbol{e}ta$-fraction of these examples are drawn from the underlying distribution, while the remaining $(1-\boldsymbol{e}ta)$-fraction of examples are adversarially chosen. Formally, let us suppose $\boldsymbol{e}ta N$ examples are drawn from the distribution with $X \sim N(0,\Id)$ and $y = \hat{\epsilonilonll}(X)+\gamma$, while the rest of the examples are arbitrary.
For $\boldsymbol{e}ta < \frac{1}{2}$, it is information theoretically impossible to estimate the linear function $\hat{\epsilonilonll}$, since the input data can potentially be consistent with $\frac{1}{\boldsymbol{e}ta}$-different linear functions $\epsilonilonll$.
It is natural to ask if an efficient algorithm can recover a small list of candidate linear functions $\mathcal L = \{ \epsilonilonll_1,\ldots,\epsilonilonll_t\}$ such that one of them is close to $\epsilonilonll$.
The learning model such as the one above where the goal of the algorithm is to find a small list of candidate hypotheses is referred to as {\em list-decodable} learning.
This model was introduced by Balcan et al.\xspace \cite{BalcanBV08} in the context of clustering, and has been the subject of a line of work \cite{CharikarSV17,diakonikolas2018list,SteinhardtVC16, SteinhardtKL17,kothari2017outlier} in the recent past.
The problem of linear regression in the setup of list-decodable learning had remained open.
\cclassmacro{P}aragraph{Our Results}
In this work, we use the sum-of-squares SDP hierarchy to devise an efficient algorithm for the list-decodable linear regression problem.
Formally, we show the following result.
\boldsymbol{e}gin{theorem}\leftarrowbel{thm:main}
There is an algorithm $\mathcal A$ such that the following holds for every $\boldsymbol{e}ta > 0$.
Suppose for a sequence of labelled examples $\{ (X_i,y_i) \}_{i \in [N]}$, there exists a linear function $\hat{\epsilonilonll}(X) = \iprod{\hat{\epsilonilonll},X}$ and a subset $S \rhoet [N]$ of $\boldsymbol{e}ta N$ examples such that,
\boldsymbol{e}gin{enumerate}
\emem For any $\epsilonilonpsilon > 0$, the $k^{th}$ emprical moments of $\{X_i\}_{i \in S}$ are close to that of the underlying distribution of examples for each $k = 1 \ldots K$ i.e.,
$$ \mathbb Norm{ \E_{i \in S} X_i^{\otimesimes k} - \E_{X \sim \mathcal D} X^{\otimesimes k} }_2 \leq \epsilonilonpsilon $$
for some $K = O(1/\boldsymbol{e}ta^4)$.
\emem The injective tensor norm of the covariates is bounded. That is to say for all degree $D \geq 4$ pseudoexpectations $\cclassmacro{P}E$ over indeterminate $v_1,...,v_d$ the fourth injective tensor norm
$$\sststile{4}{} \E_{X \sim \mathcal D} \iprod{X,v}^4 \leq B\norm{v}^4$$
is certifiably upper bounded by a constant $B$. For standard gaussians $B = 3$. More generally, any distribution satisfying a \textit{poincare} inequality has certifiably upper bounded injective tensor norms, see \cite{kothari2017better}.
\emem The empirical loss of $\hat{\epsilonilonll}$ is $(2,4)$-hypercontractive, i.e.,
$$ \E_{i \in S} (y_i - \iprod{\hat{\epsilonilonll}, X_i})^4 \leq g \cdot \left( \E_{i \in S} (y_i - \iprod{\hat{\epsilonilonll},X_i})^2\right)^2 $$
for some constant $g$
\epsilonilonnd{enumerate}
Then the algorithm $\mathcal A$ running on the set of examples $\{(X_i,y_i)\}_{i \in [N]}$ outputs a list of $O\left(\left(\frac{\norm{\hat{\epsilonilonll}}}{\sigma}\right)^{\log 1/\boldsymbol{e}ta}\right)$ candidate linear functions $\mathcal L$ such that there exists $\epsilonilonll \in \mathcal L$ satisfying
$$ \norm{\epsilonilonll -\hat{\epsilonilonll}}_2 \leq O\left(\frac{\sigma}{\boldsymbol{e}ta^{3/2}}\right) $$
where $\sigma^2 \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \E_{i \in S} (y_i - \iprod{\hat{\epsilonilonll},X_i})^2 $. The runtime of the algorithm $\mathcal A$ is $\left(\frac{\norm{\hat{\epsilonilonll}}}{\sigma}\right)^{\log 1/\boldsymbol{e}ta} \cdot N^{O(1/\boldsymbol{e}ta^4)}$ for $N = d^{O(\frac{1}{\boldsymbol{e}ta^4})}$.
\epsilonilonnd{theorem}
Even in the absence of outliers, the information theoretic limit on the accuracy $\norm{\epsilonilonll - \hat{\epsilonilonll}} = \Omega(\sigma)$. To interpret the list size and runtime bounds, consider the setting $\boldsymbol{e}ta = 1/4$, $\norm{\hat{\epsilonilonll}} = 1$ and noise rate $\sigma = 10^{-7}$. In this case, the linear function $\hat{\epsilonilonll}$ is specified by an arbitrary point in the unit ball, and the algorithm finds a constant-sized list such that one of the points $\epsilonilonll$ in the ball satisfies $\norm{\epsilonilonll-\hat{\epsilonilonll}} < 0.01$.
More generally, $\left(\frac{\norm{\hat{\epsilonilonll}}}{{\sigma}}\right)^d$ would be the size of a $\sigma$-net for the ball of radius $\norm{\hat{\epsilonilonll}}$, but the list size is a fixed polynomial in $ \frac{\norm{\hat{\epsilonilonll}}}{{\sigma}}$.
Our results on linear regression apply to a broader class of probability distributions on examples we term {\em "SoS certifiably anti-concentrated"} (see \cclassmacro{P}rettyref{def:anticoncentration}).
Informally, these are probability distributions $\mathcal D$ that admit an sum-of-squares proof of their anti-concentration along every direction.
Sum-of-Squares SDPs yield a unified framework for statistical estimation tasks \cite{hopkins2018mixture,kothari2017outlier,kothari2017better,klivans2018efficient} through the notion of identifiability proofs.
Roughly speaking, if there exists a sum-of-squares proof that the statistical parameter of interest is {\em identifiable} from the data, then the sum-of-squares SDP can be utilized to estimate the statistic.
In the setting of list decodable learning, the parameter of interest
say, the underlying linear function is not uniquely determined by the data, thus breaking the paradigm of SoS proof of identifiability.
Alternately, the SoS SDP solution is potentially a convex combination of the different hypotheses on the list. Therefore, a list-decodable learning algorithm via SoS SDP will have to involve some randomized rounding to isolate one hypotheses from the mixture.
We use the technique of conditioning \cite{DBLP:conf/focs/BarakRS11, DBLP:conf/soda/RaghavendraT12} to randomly isolate one hypothesis from the SoS SDP solution.
More precisely, our algorithm iteratively conditions the SoS SDP solution on specific data points being inliers.
The analysis of the algorithm argues that after conditioning on a small number of appropriately chosen data points being inliers, the SoS SDP solution is more or less supported on a unique hypothesis, that we can output.
The framework of rounding by iterative conditioning can also be applied to list-decodable mean estimation problem. In the mean-estimation problem, a $\boldsymbol{e}ta$-fraction of inliers in a set of $N$ data points $\{ X_1,\ldots, X_N\}$ are sampled from a distribution $\mathcal D$. The goal is to recover a list of points $\{\hat{\mu}_1,\ldots, \hat{\mu}_t \}$ such that one of them is close to mean of the inliers. Diakonikolas et al.\xspace \cite{diakonikolas2018list} devise an algorithm for the problem when $\mathcal D$ is a spherical Gaussian, while Kothari and Steinhardt \cite{kothari2017outlier} solve it on a broader class of distributions referred to as SoS-certifiable distributions.
A probability distribution $\mathcal D$ is $(2k,B)$-SoS certifiable if the polynomial inequality $\E_{X \sim \mathcal D}[\iprod{v,X - \E[X]}^{2k}] \leq B^{2k} \norm{v}^{2k}$ admits a sum-of-squares proof. Similarly, an empirical distribution $\{X_i\}_{i \in S}$ is said to be $(2k,B)$-SoS certifiable if $\E_{i \in S} [\iprod{v,X_i - \E[X_i]}^{2k}] \leq B^{2k} \norm{v}^{2k}$ admits a sum-of-squares proof.
By applying our framework, we recover an algorithm for list-decodable mean estimation for SoS-certifiable distributions analogous to the work of \cite{kothari2017outlier}.
Formally, we show the following.
\boldsymbol{e}gin{theorem} \leftarrowbel{thm:main-mean}
There is an algorithm $\mathcal A$ such that the following holds for every $\boldsymbol{e}ta > 0$.
Suppose for a sequence of labelled examples $\{ X_i \}_{i \in [N]}$, there exists a subset $S \rhoet [N]$ of $\boldsymbol{e}ta N$ examples such that
the empirical distribution $\{X_i \}_{i \in S}$ is a $(2k,B)$-SoS certifiable then the algorithm $\mathcal A$ returns a list of $L$ points of length $\cclassmacro{P}oly(\frac{1}{\boldsymbol{e}ta})$ such that there exists a point $\mu \in \mathcal L$ with $\norm{\mu - \E_{i \in S} X_i} \leq O(\frac{B}{\boldsymbol{e}ta^{1/k}})$.
The runtime of the algorithm is $d^{\cclassmacro{P}oly(\frac{1}{\boldsymbol{e}ta})}$
\epsilonilonnd{theorem}
\cclassmacro{P}aragraph{Related Work}
\cclassmacro{P}aragraph{List Decodable Learning}
Balcan et al.\xspace \cite{BalcanBV08} introduced the notion of list-decodable learning, specifically, the notion of list-clustering. Charikar et al.\xspace \cite{CharikarSV17} formally defined the notions of list-decodable learning and semi-verified learning, and showed that learning problems in the two models reduce to one another.
Charikar et al.\xspace \cite{CharikarSV17} obtained algorithms for list-decodable learning in the general setting of stochastic convex optimization, and applied the algorithm to a variety of settings including mean estimation, density estimation and planted partition problems (also see \cite{SteinhardtVC16, SteinhardtKL17}).
The same model of {\em list-decodable learning} has been studied for the case of mean estimation \cite{kothari2017outlier} and Gaussian mixture learning \cite{kothari2017better,diakonikolas2018list}.
\cclassmacro{P}aragraph{Linear Regression}
Several heuristics have been developed for modifying the ordinary least squares objective with the intent of minimizing the effect of outliers (see \cite{rousseeuw2005robust}).
Often, the terminology of “robust regression” is used to refer to a more restricted noise model where only the labels are allowed
to be corrupted adversarially (see \cite{nguyen2013exact,nguyen2013robust, bhatia2015robust, bhatia2017consistent}).
The work of Bhatia et al.\xspace \cite{bhatia2017consistent} yields an algorithm for regression when the noise introduced is oblivious to the examples, but with a desirable property called consistency, in that the error rate approaches zero with increasing sample size.
There are several works on regression in the more stringent noise models.
Balakrishnan et al.\xspace \cite{balakrishnan2017computationally} devise algorithms for sparse linear regression in Huber's contamination model.
Diakonikolas et al.\xspace \cite{diakonikolas2019efficient} and Klivans et al.\xspace \cite{klivans2018efficient} yield algorithms in the most stringent noise models where both the examples and the labels can be arbitrarily corrupted.
The latter appeals to SoS SDP relaxations and is applicable to a broad class of distributions under very minimal assumptions.
All of the work described above apply at small noise rates, where the total fraction of corruptions are bounded by a small constant.
In a setting where the outliers are an overwhelming majority of the samples, linear regression algorithms have been studied for recovering a sparse vector $x$ \cite{wright2010dense, nguyen2013exact,nguyen2013robust}.
Finally, Hardt and Moitra \cite{hardt2013algorithms} consider a related problem of robust subspace recovery where a fraction of the samples lie within a $d$-dimensional subspace of $\mathbb R^n$. They devise an algorithm when there are at least $d/n$-fraction of inliers (which corresponds to $(1-1/n)$-fraction of inliers for linear regression). Furthermore, they show that if we make no additional distributional assumptions on the points, then it is computationally intractable to solve robust subspaec recovery with less than $d/n$-fraction of inliers under the Small-Set Expansion Hypothesis.
In an independent and concurrent work, Karmalkar et al.\xspace \cite{KlivansKS19} also devise algorithms for list-decodable linear regression using the sum-of-squares SDP hierarchy. The runtime and recovery guarantees of the algorithm are very similar to this work.
\iffalse
\cclassmacro{P}aragraph{Subspace Clustering}
In this case, the data arises of a mixture of several structured sources. Heterogeneity poses significant algorithmic challenges as is evidenced by the difference between learning a single Gaussian to that of learning a mixture of Gaussians.
For the example at hand, heterogeneity would correspond to all the data points lying close to one of a few different subspaces. This problem known as subspace clustering has received significant attention \cite{soltanolkotabi2012geometric,lerman2015robust}, and has numerous applications in computer vision including hand-writing recognition \cite{hastie1998metrics}, motion segmentation \cite{vidal2008multiframe}
, image segmentation \cite{yang2008unsupervised}, face clustering \cite{ho2003clustering}, image representation and
compression \cite{hong2006multiscale}.
Elhamifar and Vidal \cite{elhamifar2009sparse} have introduced an approach to subspace clustering, which relies on
ideas from the sparsity and compressed sensing literature, please see also the longer version \cite{elhamifar2013sparse}
Sparse subspace clustering
(SSC) \cite{elhamifar2009sparse,elhamifar2013sparse} is computationally efficient since it amounts to solving a sequence of L1 minimization problems and is, therefore, tractable. Continuing on
this line of work, \cite{soltanolkotabi2012geometric} showed that good theoretical performance could be achieved under broad
circumstances. In particular, if the subspaces are random, and points on them are random, one can recover as long as subspaces are of dimension $n/logn$. Same work shows that the algorithm also works when there are uniformly random outliers.
However, the model supporting the theory in \cite{soltanolkotabi2012geometric} is still noise free.
This paper considers the subspace clustering problem in the presence of noise. We introduce
a tractable clustering algorithm, which is a natural extension of SSC, and develop rigorous theory
about its performance. In a nutshell, we propose a statistical mixture model to represent data
lying near a union of subspaces, and prove that in this model, the algorithm is effective as long as
there are sufficiently many samples from each subspace and that the subspaces are not too close to
each other. In this theory, the performance of the algorithm is explained in terms of interpretable
and intuitive parameters such as (1) the values of the principal angles between subspaces, (2) the
number of points per subspace, (3) the noise level and so on. In terms of these parameters, our
theoretical results indicate that the performance of the algorithm is in some sense near the limit of
what can be achieved by any algorithm, regardless of tractability.
\cite{soltanolkotabi2014robust} analyze the algorithm in the presence of noise. See \cite{soltanolkotabi2014robust} for a survey of subspace clustering problem and various approaches to it.
\boldsymbol{e}gin{itemize}
\emem {\em Presence of few outliers}
In this case, the number of outliers is sufficiently small in that the underlying subspace containing a significant fraction of the points is more or less still uniquely determined. This problem has been extensively studied under {\em robust linear regression} or {\em robust subspace recovery}, and there is a vast body of literature on it. We refer the reader to \cite{hardt2013algorithms,klivans2018efficient} for a brief survey on the topic.
\epsilonilonnd{itemize}
\fi
\section{Preliminaries}
\leftarrowbel{sec:prelims}
\rhoection{Proofs to Algorithms: Identifiability and Why it Fails}
At a high level, The proofs to algorithms method sets up a system of polynomial equalities and inequalities $\mathcal P = \{f_1(x) = 0,f_2(x) = 0,...,f_m(x) = 0, g_1(x) \geq 0, g_2(x) \geq 0, ..., g_n(x) \geq 0\}$ and aims to output a solution $\textsuperscript{th}\xspaceeta$ to $\mathcal P$. Here we think of $\textsuperscript{th}\xspaceeta$ as a statistical parameter which in our case is either a mean estimate or a hyperplane. In general, this is too much to ask for as the solution set of $\mathcal P$ may be nonconvex and admit no discernible structure. The SoS hierarchy is a powerful tool in convex optimization, designed to approximately solve polynomial systems. The hierarchy is parameterized by its 'degree' $k$. The degree corresponds to the size of a Semidefinite Program (SDP) used to solve for solutions to $\mathcal P$. The hope is that with higher degree, larger SDP's can obtain sharper approximations to $\textsuperscript{th}\xspaceeta$. Thus, an immediate hurdle in designing efficient algorithms is to control 'k' with respect to the desired approximation guarantee.
In general, outputting a solution $\textsuperscript{th}\xspaceeta$ even approximately is still too much to ask for. Instead the SoS algorithm aims to output a fake distribution or 'pseudodistribution' over solutions to $\mathcal P$. Furthermore the SoS algorithm returns only the degree up to $k$ moments of the pseudodistribution $\zeta$. That the pseudodistribution is not a true distribution lies at the heart of obtaining computationally efficient algorithms from SoS. Thus, it can be said that pseudodistributions are relaxations of actual probability distributions over the solution set of $\mathcal P$. We will defer discussion of pseudodistributions and their dual objects pseudoexpectations to section \ref{SoS-toolkit}.
In the context of unsupervised learning the goal is to estimate a parameter $\textsuperscript{th}\xspaceeta'$ from samples $x_1,...,x_n$. Identifiability refers to the property that any solution $\textsuperscript{th}\xspaceeta$ to $\mathcal P$ is close to the true parameter $\textsuperscript{th}\xspaceeta'$, i.e $\norm{\textsuperscript{th}\xspaceeta - \textsuperscript{th}\xspaceeta'} << \text{small}$. Furthermore, if this proof of identifiability is captured by a sufficiently simple proof (a low degree SoS) then up to rounding issues $\textsuperscript{th}\xspaceeta$ can be found efficiently. This paradigm has been immensely successful in designing SoS algorithms in machine learning settings.
A key challenge for the list decoding problem is that even if it were possible to output a true distribution $\upsilon$ over solutions to $\mathcal P$, another $\boldsymbol{e}ta$ fraction of the dataset can imitate a solution to $\mathcal P$. Thus, direct identifiability fails. A natural fix would be to sample from the distribution $\upsilon$ in the hopes of finding a region of substantial probability mass around $\textsuperscript{th}\xspaceeta'$. The analogue of sampling on pseudodistributions is 'rounding'. The core technical contribution of this work is developing new techniques for rounding pseudodistributions for high dimensional parameter estimation tasks. Our method 'concentration rounding' has its roots in conditioning SoS SDP's, see \cite{DBLP:conf/focs/BarakRS11, DBLP:conf/soda/RaghavendraT12}.
Next we present some standard tools when working with SoS and some properties of conditional pseudoexpectation.
\rhoection{Sum-of-Squares Toolkit} \leftarrowbel{SoS-toolkit}
\cclassmacro{P}aragraph{Sum-of-Squares Proofs}
Fix a set of polynomial inequalities $\mathcal A = \{ p_i(x) \geq 0 \}_{i \in [m]}$ in variables $x_1,\ldots,x_n$.
Starting with these ``axioms
$\mathcal A$, a sum-of-squares proof of $q(x) \geq 0$ is given by an identity of the form,
\[
\left(1+ \sum_{k \in [m']} b_k^2(x)\right) \cdot q(x) = \sum_{j\in [m'']} s_j^2(x) + \sum_{i \in [m]} a_i^2(x) \cdot p_i(x) \,,
\]
where $\{s_j(x)\}_{j \in [m'']},\{a_i(x)\}_{i \in [m]}, \{b_k(x)\}_{i \in [m']}$ are real polynomials.
It is clear that any identity of the above form manifestly certifies that the polynomial $q(x) \geq 0$, whenever each $p_i(x) \geq 0$ for real $x$.
The degree of the sum-of-squares proof is the maximum degree of all the summands, i.e., $\max \{\mathop{}\!\mathrm{d}eg(s_j^2), \mathop{}\!\mathrm{d}eg(a_i^2 p_i)\}_{i,j}$.
Sum-of-squares proofs extend naturally to polynomial systems that involve a set of equalities $\{r_i(x) = 0\}$ along with a set of inequalities $\{ p_i(x) \geq 0\}$.
We can extend the definition syntactically by replacing each equality $r_i(x) = 0$ by a pair of inequalities $r_i(x) \geq 0$
and $-r_i(x) \geq 0$.
We will the use the notation $\mathcal A \sststile{d}{x} \set{q(x) \geq 0}$ to denote that the assertion that, there exists a degree-$d$ sum-of-squares proof of $q(x) \geq 0$ from the set of axioms $\mathcal A$.
The superscript $x$ in the notation $\mathcal A \sststile{d}{x} \set{q(x) \geq 0}$ indicates that the sum-of-squares proof is an identity of polynomials where $x$ is the formal variable.
A useful quality of SoS proofs is that they can be composed in the following sense.
\boldsymbol{e}gin{fact}
For polynomial systems $\mathcal A$ and $\mathcal B$, if $\mathcal A \sststile{d} \{p(x) \geq 0\}$ and $\mathcal B \sststile{d'} \{q(x)\geq 0\}$ then $\mathcal A \cup \mathcal B \sststile{\max(d,d')}\{p(x) + q(x) \geq 0\}$. Also $\mathcal A \cup \mathcal B \sststile{dd'} \{p(x)q(x) \geq 0\}$
\epsilonilonnd{fact}
We now turn to pseudoexpectations, the dual object to SoS proofs.
\cclassmacro{P}aragraph{Pseudoexpectations}
\boldsymbol{e}gin{definition}
Fix a polynomial system $\mathcal P$ in $n$ variables $x \in \mathbb R^n$ consisting of inequalities $\{p_i(x) \ge 0\}_{i\in[m]}$.
A degree-$d$ pseudoexpectation $\cclassmacro{P}E : \mathbb R[x]_{\leq d} \to \mathbb R$ {\em satisfying $\mathcal P$} is a linear functional over polynomials of degree at most $d$ with the properties that $\cclassmacro{P}E[1] = 1$, $\cclassmacro{P}E[p(x) a^2(x)] \geq 0$ for all $p \in \mathcal P$ and polynomials $a$ such that $\mathop{}\!\mathrm{d}eg(a^2 \cdot p) \le d$, and $\cclassmacro{P}E[q(x)^2] \ge 0$ whenever $\mathop{}\!\mathrm{d}eg(q^2) \le d$.
\epsilonilonnd{definition}
The properties above imply that when $\mathcal A \sststile{d}{x} \set{q(x) \ge 0}$, then if $\cclassmacro{P}E$ is a degree-$d$ pseudoexpectation operator for the polynomial system defined by $\mathcal A$, $\cclassmacro{P}E[q(x)] \ge 0$ as well.
This implies that $\cclassmacro{P}E$ satisfies several useful inequalities; for example, the Cauchy-Schwarz inequality.
(See e.g. \cite{DBLP:conf/stoc/BarakBHKSZ12} for details.)
\cclassmacro{P}aragraph{SoS Algorithm}
The degree $D$ moment tensor of a pseudoexpectation $\cclassmacro{P}E_{\zeta}$ is the tensor $\cclassmacro{P}E_\zeta(1, x_1, x_2, . . . , x_n)^{\otimesimes D}$.
Each entry corresponds to the pseudo-expectation of all
monomials of degree at most $D$ in x. The set of all degree-$D$ moment tensors of degree $D$ pseudoexpectations is convex, and there's a separation oracle that runs in time $n^{O(D)}$.
\boldsymbol{e}gin{fact} (\cite{Nesterov00}, \cite{Parrilo00}, \cite{Lasserre01}, \cite{Shor87}). For any $n$, $D \in \mathbb Z^+$, let $\cclassmacro{P}E_{\zeta}$ be degree $D$ pseudoexpectation satisfying a polynomial system $\mathcal P$. Then the following set has a $n^{O(D)}$-time weak
separation oracle (in the sense of \cite{GLS1981}):
$$\{\cclassmacro{P}E_\zeta(1, x_1, x_2, . . . , x_n)^{\otimesimes D}| \text{ degree } D \text{ pseudoexpectations } \cclassmacro{P}E_{\zeta} \text{ satisfying }\mathcal P \}$$
Armed with a separation oracle, the ellipsoid algorithm finds a degree $D$ pseudoexpectation in time $n^{O(D)}$, which we call the degree $D$ sum-of-squares algorithm.
\epsilonilonnd{fact}
Next we present some useful inequalities for working with SoS proofs and pseudoexpectations.
\cclassmacro{P}aragraph{Useful Inequalities}
\boldsymbol{e}gin{fact} (Cauchy Schwarz)
Let $x_1,..,x_n,y_1,...,y_n$ be indeterminates, than
$$\sststile{4}{} \boldsymbol{i}g(\sum_{i \leq n}x_iy_i\boldsymbol{i}g)^2 \leq \boldsymbol{i}g(\sum_{i \leq n}x_i^2\boldsymbol{i}g)\boldsymbol{i}g(\sum_{i \leq n}y_i^2\boldsymbol{i}g) $$
\epsilonilonnd{fact}
\boldsymbol{e}gin{fact} (Triangle Inequality)
Let $x,y$ be $n$-length vectors of indeterminates, then
$$\sststile{2}{} \norm{x + y}^2 \leq 2\norm{x}^2 + 2\norm{y}^2 $$
\epsilonilonnd{fact}
\boldsymbol{e}gin{fact}\leftarrowbel{fact:moment} (Moment Bounds)
Let $u = (u_1, . . . , u_k)$ be a vector of indeterminants. Let $\mathcal D$ be Gaussian with
variance proxy 1. Let $t \geq 0$ be an integer. Then we have
$$\sststile{2t}{} \E_{x \sim \mathcal D} \iprod{X,u}^{2t} \leq (2t)! \norm{u}^{2t}$$
\epsilonilonnd{fact}
\Pnote{the above is not true for Sub Gaussian , only true for Gaussian}
\boldsymbol{e}gin{fact}(Pseudoexpectation Cauchy Schwarz).
Let $f(x)$ and $g(x)$ be degree at most $\epsilonilonll \leq \frac{D}{2}$ polynomial in indeterminate $x$, then $$\cclassmacro{P}E[f(x)g(x)]^2 \leq \cclassmacro{P}E[f(x)^2]\cclassmacro{P}E[g(x)^2]$$
\epsilonilonnd{fact}
\boldsymbol{e}gin{fact} (Pseudoexpectation Holder's)
Let p be a degree $\epsilonilonll$ sum of squares polynomial, $t \in \mathbb{N}$, and $\tilde{\mathbb{E}}$ a degree $O(t \epsilonilonll)$ pseudoexpectation. Then
$$\cclassmacro{P}E p(x)^{t-2} \leq \boldsymbol{i}g( \cclassmacro{P}E p(x)^t \boldsymbol{i}g)^{\frac{t-2}{t}} $$
\epsilonilonnd{fact}
\boldsymbol{e}gin{fact}(SoS Holder)
Let $X_1,..,X_n$ and $w_1,...,w_n$ be indeterminates. Let $q \in \mathbb N$ be a power of $2$, then
$$\set{w_i^2 = w_i \forall i \in [n]}\sststile{O(q)}{} \boldsymbol{i}g( \sum_{i \leq n}w_iX_i\boldsymbol{i}g)^q \boldsymbol{i}g( \sum_{i \leq n} X_i^q\boldsymbol{i}g)$$
and
$$\set{w_i^2 = w_i \forall i \in [n]}\sststile{O(q)}{} \boldsymbol{i}g( \sum_{i \leq n}w_iX_i\boldsymbol{i}g)^q \boldsymbol{i}g( \sum_{i \leq n} w_iX_i^q\boldsymbol{i}g)$$
\epsilonilonnd{fact}
\boldsymbol{e}gin{fact} (Spectral Bounds)
Let $A \in \mathbb R^{d \times d}$ be a positive semidefinite matrix with $\leftarrowmbda_{max}$ and $\leftarrowmbda_{min}$ being the largest and smallest eigenvalues of $A$ respectively. Let $\cclassmacro{P}E$ be a pseudoexpectation with degree greater than or equal to $2$ over indeterminates $v = (v_1,...,v_d)$. Then we have
$$\sststile{2}{} \iprod{A,vv^T} \leq \leftarrowmbda_{max} \norm{v}^2 $$
and
$$\sststile{2}{} \iprod{A,vv^T} \geq \leftarrowmbda_{min} \norm{v}^2 $$
\epsilonilonnd{fact}
\boldsymbol{e}gin{remark}
We will make use of the following notation for $\norm{\cdot}_{op}$ and $\norm{\cdot}_{nuc}$ for operator and nuclear norm.
\epsilonilonnd{remark}
Next we will discuss some useful properties of conditional pseudoexpectation.
\cclassmacro{P}aragraph{Conditional Pseudoexpectation}
\boldsymbol{e}gin{definition} (Conditioning)
Given a degree $D$ pseudoexpectation operator \newline $\cclassmacro{P}E:\mathbb R[x_1,\ldots,x_n]^{\leq D}\to\mathbb R$ and a polynomial $e(x)$ of degree $d < D/2$, the conditioned pseudo-expectation operator $\cclassmacro{P}E\boldsymbol{i}g|_{e}$ is given by,
$$ \cclassmacro{P}E[s(x)|e(x)] = \frac{\cclassmacro{P}E[s(x) e^2(x)]}{\cclassmacro{P}E[e^2(x)]} \,.$$
$\cclassmacro{P}E\boldsymbol{i}g|_{e}$ is a degree $D-2d$ pseudoexpectation functional that satisfies the same polynomial constraints as $\cclassmacro{P}E$.
\epsilonilonnd{definition}
For a indeterminate $w$ satisfying the boolean constraint $w^2 = w$, we will use $\cclassmacro{P}E[\cdot |w]$ to denote the conditioned functional $\cclassmacro{P}E_{|w^2}$.
Given a degree $D$ pseudo-expectation operator $\cclassmacro{P}E$ satisfies a polynomial system $\mathcal P$ all of whose polynomials are of degree at most $d$, for every polynomial $e$ of degree $\leq \frac{D-d}{2}$, the conditioned pseudoexpectation functional $\cclassmacro{P}E[ \cdot |e]$ also satisfies the system $\mathcal P$,
For any two polynomials $p,q$ we define the pseudovariance as $$\cclassmacro{P}Var[p] \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \cclassmacro{P}E[p^2] - \cclassmacro{P}E[p]^2$$ and pseudo-covariance as $$\cclassmacro{P}Cov[p,q] \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \cclassmacro{P}E[p,q] - \cclassmacro{P}E[p]\cclassmacro{P}E[q]$$ We will also be making extensive use of the conditional pseudoexpectation toolkit.
\boldsymbol{e}gin{fact} (Conditional Pseudoexpectation Toolkit)
Let $w$ satisfy the boolean constraint $w^2 = w$. For a random variable $b$ taking values in $\{0,1\}$ such that $\ProbOp[b = 1] = \cclassmacro{P}E[w]$ and $\ProbOp[b = 0] = \cclassmacro{P}E[1 - w]$ we have the following useful facts
\boldsymbol{e}gin{enumerate}
\emem (Law of Total Pseudoexpectation) $\E_b\cclassmacro{P}E[p(x,w)|w = b] = \cclassmacro{P}E[p(x,w)]$
\emem (Law of Total Pseudovariance) $\cclassmacro{P}Var[p(x,w)] - \E_{b}\cclassmacro{P}Var[p(x,w)|w = b] = \Var_b[\cclassmacro{P}E[p(x,w)|w = b]]$\\
\emem (Correlation Identity) $\cclassmacro{P}E[p(x,w)|w = b] = \frac{\cclassmacro{P}Cov[p(x,w),w]}{\cclassmacro{P}Var[w]}b + \left(\cclassmacro{P}E[p(x,w)] - \frac{\cclassmacro{P}Cov[p(x,w),w]}{\cclassmacro{P}Var[w]}\cclassmacro{P}E[w]\right)$
\epsilonilonnd{enumerate}
\epsilonilonnd{fact}
\boldsymbol{e}gin{remark}
(Numerical accuracy). To make our error guarantees precise, we have to discuss issues of bit complexity.
The SoS algorithm obtains a degree $D$ pseudoexpectation $\cclassmacro{P}E_\zeta$ satisfying polynomial system $\mathcal P$ approximately. That is to say, for every $g$ a sum of squares and $p_1, . . . , p_\epsilonilonll \in \mathcal P$
with deg$[g \cclassmacro{P}rod p_i \leq D]$, one has $\cclassmacro{P}E g\cclassmacro{P}rod_{i \in \mathcal P} p_i \geq - 2^{-\Omega(n)} \norm{g}$, where $\norm{g}$ is $\epsilonilonll_2$ norm of the coefficients
of $g$. So long as all of the polynomials involved in our SoS proofs have coefficients bounded by $n^B$ for some large constant $B$, then for any polynomial system $\mathcal Q$ such that $\mathcal P \sststile{}{} \mathcal Q$, we have $\mathcal Q$ satisfied up to error $2^{-\Omega(n)}$.
\epsilonilonnd{remark}
\rhoection{Organization}
In \cclassmacro{P}rettyref{sec:reg-overview} we go over the main ideas of list decoding robust regression for the covariate distribution $\mathcal N(0,I)$. Then in section \cclassmacro{P}rettyref{sec:iteration} we wrap our algorithms in an iterative framework for sharp error guarantees. In \cclassmacro{P}rettyref{sec:conditioning} we prove the lemmas relevant to conditioning SoS SDP's. In \cclassmacro{P}rettyref{sec:reg-rounding} we present the exhibit the proof of concentration rounding. In \cclassmacro{P}rettyref{sec:anticoncentration} we define anticoncentration and prove that the Gaussian is certifiably anticoncentrated. Subsequently in \cclassmacro{P}rettyref{sec:anticoncentration-distributions} we prove that certifiable anticoncentration is closed under linear transformation, and that spherically symmetric strongly log concave distributions are certifiably anticoncentrated. We defer remaining regression lemmas to the appendix. In particular, we present our proof of list decoding mean estimation to \cclassmacro{P}rettyref{sec:mean-estimation}.
\section{Introduction }
Problem Setup: Given data $X_1,X_2,..,X_N \in \mathbb{R}^d$. A fraction $\boldsymbol{e}ta \in [0,1]$ of the points are drawn i.i.d from a $(k,l)$-certifiably subgaussian distribution. The remaining $(1-\boldsymbol{e}ta)N$ points can be adversarially chosen.
\section{Technique Overview: Robust Regression}\leftarrowbel{sec:reg-overview}
In this section we introduce many of the ideas involved in designing our rounding algorithm. We leave sharper error/runtime guarantees and general distributional assumptions to later sections.
Let $N$ be the size of the data set. Let $\boldsymbol{e}ta < \frac{1}{2}$, and let $M = \boldsymbol{e}ta N$.
We receive a data set $\mathcal D = \{(X_i,y_i)\}_{i=1}^N$ where the $X_i \in \mathbb R^d$ are the covariates and the $y_i \in \mathbb R$ are the labels. Of the covariates, $M$ points are drawn $X_i \sim \mathcal N(0,I)$. We will refer to these points as "inliers". Furthermore, we introduce boolean variables $w_1',...,w_N' \in \{0,1\}$ indicating if a data point is an inlier, equal to $1$; outlier, equal to $0$. Let $\epsilonilonll' \in \mathbb R^d$ be the $\epsilonilonll_2$ minimizer of the error over the inliers i.e
$$\epsilonilonll' \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \underset{\epsilonilonll \in \mathbb R^d}{\argmin} \sumn w_i'(y_i - \iprod{\epsilonilonll,X_i})^2$$
\Pnote{data set must have the clean data and the rest. Notation is little inconsistent above, $\epsilonilonll'$ must be minimizer on clean data alone}
Let $\sigma^2$ be a constant upper bounding the $\epsilonilonll_2$ error
$$\underset{\epsilonilonll \in \mathbb R^d}{\argmin} \sumn w_i' (y_i - \iprod{\epsilonilonll',X_i})^2 \leq \sigma^2$$
Likewise, let $g$ be a constant such that the $\epsilonilonll_4$ error of $\epsilonilonll'$ is
$$\underset{\epsilonilonll \in \mathbb R^d}{\argmin} \sumn w_i'(y_i - \iprod{\epsilonilonll',X_i})^4 \leq g\sigma^4$$
In the special case of Gaussian noise $\mathcal N(0,\sigma^2)$ we have $g = 3$. Then for any $d \in \mathbb{Z}^+$ sufficiently large, our algorithm recovers a list of estimates $L = \{\epsilonilonll_1,...,\epsilonilonll_A\}$ for $|L| = O\boldsymbol{i}g((\frac{\rho}{\sigma})^{\log(\frac{1}{\boldsymbol{e}ta})}\boldsymbol{i}g)$ such that for some $\epsilonilonll_i \in L$ we have
$$\norm{\epsilonilonll_i - \epsilonilonll'}_2 \leq O\left(\frac{\sigma}{\boldsymbol{e}ta^{3/2}}\right)$$
with high probability over the data.
If we regard $\frac{\rho}{\sigma}$ as a fixed constant then the list is of length $\cclassmacro{P}oly(\frac{1}{\boldsymbol{e}ta})$.
Furthermore, our algorithm is \textit{efficient}, running in polynomial time $\cclassmacro{P}oly(d,N)$ where we take $N = d^{O(\frac{1}{\boldsymbol{e}ta^4})}$. Here we take $N$ to be large enough to certify arbitrary closeness of the $K$'th empirical moments to the distributional moments of the covariates in $\epsilonilonll_2$ norm for $K = O(\frac{1}{\boldsymbol{e}ta^4})$. That is to say, for any constant $\epsilonilonpsilon > 0$, and for $X_1,...,X_N \sim \mathcal N(0,I_d)$ we have with high probability
$$
\mathbb Norm{\frac{1}{N}\sum_{i=1}^N X_i ^{\otimesimes \frac{K}{2}}(X_i^{\otimesimes \frac{K}{2}})^T - M_K}_F^2 \leq \epsilonilonpsilon
$$
where $M_K$ is the empirical $K$'th moment tensor of the spherical Gaussian. For our analyses we take $d > \rho$ and $\epsilonilonpsilon = O(\sigma d^{-K})$. For these settings of $N$ and $d$, and fixing $\rho,\sigma, \boldsymbol{e}ta$ to be constants, we often write $o_d(1)$ without ambiguity.
\Pnote{does exponent depend on $\boldsymbol{e}ta$?}
Our approach is to run an SoS SDP, and then \textit{round} out the list. We begin by describing the Robust Regression SoS SDP.
\boldsymbol{e}gin{algorithm}[H] \leftarrowbel{algo:robustregressionSDP}
\SetAlgoLined
\KwResult{A degree $D$ pseudoexpectation functional $\cclassmacro{P}E_\zeta$}
\textbf{Inputs}: $(\mathcal D = \{X_i,y_i\}_{i=1}^N,\rho)$ data set, and upper bound on $\norm{\epsilonilonll'}$\\
\Pnote{directly list $\{(X_i,y_i)\}$ as inputs here. It will be good to make a table like this, as self-contained as possible}
\boldsymbol{e}gin{eqnarray}
\boldsymbol{e}gin{aligned}
& \underset{\text{degree D pseudoexpectations} \cclassmacro{P}E}{\text{minimize}}
& & \sum_{i=1}^N \cclassmacro{P}E[w_i]^2 \\
& \underset{\text{satisfies the polynomial system}}{\text{such that $\cclassmacro{P}E$}}
& & (w_i^2 - w_i) = 0, \; i \in [N], \\
& & & \sum_{i=1}^N w_i - M = 0, \; i \in [N], \\
& & & \mathbb Norm{\frac{1}{M}\sum_{i=1}^N w_i X_i ^{\otimesimes \frac{t}{2}}(X_i^{\otimesimes \frac{t}{2}})^T - M_t}_F^2 - \epsilonilonpsilon \leq 0 , \; t \in [K]\\
& & & \frac{1}{M}\sum_{i=1}^N w_i (\leftarrowngle \epsilonilonll,X_i\rightarrowngle - y_i)^4 - g\sigma^4) \leq 0, \\
& & & \frac{1}{M}\sum_{i=1}^N w_i (\leftarrowngle \epsilonilonll,X_i\rightarrowngle - y_i)^2 - \sigma^2\leq 0, \\
& & & \mathbb Norm{\frac{1}{M}\sum_{i=1}^N w_i (y_i - \iprod{\epsilonilonll,X_i})X_i}_2^2 = 0, \\
& & & \norm{\epsilonilonll}^2 - \rho^2 \leq 0\\
\epsilonilonnd{aligned}
\epsilonilonnd{eqnarray}\\
\textbf{return}: $\cclassmacro{P}E_\zeta$
\caption{RobustRegressionSDP}
\epsilonilonnd{algorithm}
\Pnote{do we have to decide on a value of $\epsilonilonpsilon_t$ when we state the theorems? if so, we can just make it explicit in the SDP.}
\Pnote{it seems like we use $D_4$ to denote $g$ later}
\Pnote{$\hat{\mu}$ in the above SDP is zero, correct?}
Let $\mathcal P$ be the set of polynomial constraints of Robust Regression SDP. We elaborate on the meaning of each constraint below, and we will often refer to them in our analyses according to the numbering below.
\boldsymbol{e}gin{enumerate}
\emem The first constraint $\{w_i^2 = w_i\}$ enforces $w_i \in \{0,1\}$ and we refer to it as the \textit{booleaness} constraint.
\emem The next constraint $\{\sum_{i=1}^N w_i - M\}$ ensures we select a $\boldsymbol{e}ta$ fraction of the data set.
\emem The third constraint ensures that the pseudodistribution is over subsets with moments that match the distribution of the covariates. We refer to them as the \textit{moment} constraints.
\emem The next constraints ensures the error incurred by $\epsilonilonll$ is small, and we refer to them as the $\epsilonilonll_2$ \textit{noise} constraint.
\emem Similarly, we have a $\epsilonilonll_4$ \textit{noise} constraint.
\emem We have the $\epsilonilonll_2$ \textit{minimization} constraint, which sets $\epsilonilonll$ equal to the $\epsilonilonll_2$ minimizer of the selected $w_i$. \Pnote{$\epsilonilonll_2$ minimizer of clean data?}
\emem Finally, the \textit{scaling} constraint restricts the length of $\epsilonilonll$, $\norm{\epsilonilonll} \leq \rho^2$.
\epsilonilonnd{enumerate}
The RobustRegression SDP minimizes a convex objective which we refer to as \textit{Frobenius Minimization}. This technique first used in the work of Hopkins and Steurer \cite{hopkins2017efficient}, ensures that the SDP solution is a convex combination over every possible solution to the system.
This turns out to be crucial. To see why, consider an actual solution $W_\text{fake}$ consisting of variables $w_1,...,w_N \in \{0,1\}$ and $\epsilonilonll \in \mathbb R^d$ satisfying $\mathcal P$. The distribution that places mass $1$ on $W_\text{fake}$ and no mass on the clean data is a valid distribution over the solutions to $\mathcal P$ and therefore also a valid pseudodistribution. Since we only have assumptions on less than half the data, a malicious $W_{fake}$ can be planted anywhere confounding our efforts to recover $\epsilonilonll'$. What we need is a way to produce a distribution over solutions to $\mathcal P$ that is a convex combination over all the possible solutions. The objective function $\sum_{i=1}^N \cclassmacro{P}E[w_i]^2$ is a strictly convex function, minimizing which ensures that SDP solution is spread over all solutions to $\mathcal P$. More precisely, we have the following guarantee.
\boldsymbol{e}gin{lemma}\torestate{ \leftarrowbel{lem:FrobeniusMinimization}
(Frobenius minimization$\implies$Correlation)
Let $\mathcal P$ be a polynomial system in variables $\{w_i\}_{i \in [N]}$ and a set of indeterminates $\{\epsilonilonll_i \}_{i \in \mathbb N}$, that contains the set of inequalities:
\boldsymbol{e}gin{align*}
w_i^2 = w_i & \forall i \in [N] & &
\sum_i w_i = \boldsymbol{e}ta N
\epsilonilonnd{align*}
Let $\cclassmacro{P}E_\zeta: \mathbb R[\{w_i\}_{i \in [N]}, \{\epsilonilonll\}]^{\leq D} \to \mathbb R$ denote a degree $D$ pseudoexpectation that satisfies $\mathcal P$ and minimizes the norm $\norm{\cclassmacro{P}E_\zeta[ w]}$.
If $w'_i \in \{0,1\}$ and $\epsilonilonll'$ is a satisfying assignment to $\mathcal P$ then there is correlation with the inliers,
\boldsymbol{e}gin{equation}
\cclassmacro{P}E_\zeta\left[\frac{1}{M} \sum\limits_{i=1}^N w_iw_i' \right] \geq \boldsymbol{e}ta
\epsilonilonnd{equation}}
\epsilonilonnd{lemma}
We defer the proof of this statement to \cclassmacro{P}rettyref{sec:Frobenius}\\
\textbf{Remark}: The lemma does not guarantee that $\cclassmacro{P}E_\zeta[(\iprod{w,w'} - \boldsymbol{e}ta M)p(w,\epsilonilonll)] \geq 0$ for all SoS polynomials $p(w,\epsilonilonll)$ of deg($p(w,\epsilonilonll)$) $\leq D-1$ . That is to say, the guarantees of \cclassmacro{P}rettyref{lem:FrobeniusMinimization} are only in pseudoexpectation.
At this point we have found a pseudodistribution $\zeta$ satisfying $\mathcal P$ that in pseudoexpectation is correlated with the inliers. Pursuing a line of wishful thinking, we would like to sample from this pseudodistribution armed with access to its degree $D$ moments. This is the algorithmic task of rounding the SDP solution, and it is chief intellectual thrust of this work.
\Pnote{the description below doesn't provide much intuition. It will be good to use the term: variance. talk of pseudovariance (probably even define it here), and then use it to state the lemma as follows:
....
For the sake of exposition, let us say that $\cclassmacro{P}E$ corresponds an actual distribution over solutions to the polynomial system.
Recall that the goal of the rounding algorithm is to recover the linear function $\epsilonilonll$.
Suppose the variance of $\epsilonilonll$ as a random variable is sufficiently small, then its expectation $\cclassmacro{P}E[\epsilonilonll]$ is a good approximation to an actual solution.
.....
}
For the sake of exposition, let us say that $\cclassmacro{P}E$ corresponds an actual distribution over solutions to the polynomial system.
Recall that the goal of the rounding algorithm is to recover the linear function $\epsilonilonll'$.
Suppose the variance of $\epsilonilonll'$ as a random variable is sufficiently small, then its expectation $\cclassmacro{P}E[\epsilonilonll]$ is a good approximation to an actual solution. Formally,
\boldsymbol{e}gin{lemma}
\torestate{\leftarrowbel{lem:regressionSnapping}
Let $\epsilonilonta \in [0,\frac{1}{2}]$ be a small constant.
Let $\cclassmacro{P}E_\zeta$ be a degree $O(\frac{1}{\epsilonilonta^4})$ pseudoexpectation satisfying the constraints of RobustRegressionSDP($\mathcal D,\rho$). Then if the pseudovariance of the estimator $\cclassmacro{P}E_\zeta[\epsilonilonll]$ is small in every direction
\boldsymbol{e}gin{align}\leftarrowbel{vr}
\max_u \cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,u}] \leq \epsilonilonta\rho^2
\epsilonilonnd{align}
and there is correlation with the inliers
\boldsymbol{e}gin{align}\leftarrowbel{fm}
\cclassmacro{P}E_\zeta\left[ \frac{1}{M}\sum_i w_i w'_i\right] \geq \boldsymbol{e}ta
\epsilonilonnd{align}
then our estimator satisfies,
\boldsymbol{e}gin{align}
\norm{\cclassmacro{P}E[\epsilonilonll] - \epsilonilonll'} \leq \sqrt{ \frac{\epsilonilonta\rho^2 + O(\frac{\sigma^2}{\epsilonilonta^2})}{\boldsymbol{e}ta}}
\epsilonilonnd{align}
In particular, for $\epsilonilonta = \frac{\boldsymbol{e}ta}{8}$ and $\rho^2 > \Omega(\frac{\sigma^2}{\boldsymbol{e}ta^3})$ the degree $O(\frac{1}{\boldsymbol{e}ta^4})$ pseudoexpectation satisfies
}
$$
\norm{\cclassmacro{P}E_\zeta[\epsilonilonll] - \epsilonilonll'} \leq \frac{\rho}{2}
$$
\epsilonilonnd{lemma}
\Pnote{using "slash left bracket" and "slash right bracket", will ensure that the brackets are scaled to the required size, like in second equation above}
\Pnote{degree $1/\epsilonilonta^4$ seems different from $1/\boldsymbol{e}ta^5$ above?}
Provided we can take the pseudovariance down in every direction, the error guarantee 'contracts' from the trivial $\rho$ to $\frac{\rho}{2}$. It is then possible to iterate such a contraction procedure to achieve optimal error guarantees which is the subject of \cclassmacro{P}rettyref{sec:iteration}.
Without going into details, the proof of \cclassmacro{P}rettyref{lem:regressionSnapping} critically relies on both the concentration and anticoncentration of the covariates. For instance, if the covariates were drawn from a degenerate point distribution at the origin, then nothing can be inferred about $\epsilonilonll'$. In this sense, concentration is insufficient to recover $\epsilonilonll'$ meaningfully. To overcome this hurdle, we formalize what it means for a distribution to be SoS certifiably anticoncentrated.
\textbf{Certifiable Anticoncentration}
As will become clear in \cclassmacro{P}rettyref{sec:anticoncentration}, the smaller $\epsilonilonta$ is, the harder it is for SoS to certify the bounds in the above lemma. For purposes of anticoncentration, $\epsilonilonta$ is a parameter representing an interval about the origin. For any distribution $\mathcal D$, we think of $\mathcal D$ as being anticoncentrated if the mass of $\mathcal D$ falling within the $\epsilonilonta$ interval is small. For example, in the case of $\mathcal D = \mathcal N(0,1)$, the mass within the $\epsilonilonta$ interval is upper bounded by $\frac{\epsilonilonta}{\sqrt{2\cclassmacro{P}i}}$. Characterizing this "anticoncentration" of $\mathcal D$ about the origin becomes increasingly difficult (higher degree) as $\epsilonilonta$ falls, intuitively, because it requires a finer grained picture of the distribution $\mathcal D$. It turns out the $\mathcal N(0,I_d)$ is $O(\frac{1}{\epsilonilonta^4})$ SoS certifiably anticoncentrated the proof of which is detailed in \cclassmacro{P}rettyref{sec:gaussiananticoncentration}. That this proof is independent of the dimension $d$, along with the rounding algorithm, is what enables the list decoding to run in polynomial time.
\\
\\
\Pnote{ the above description of anticoncentration seems out of place? must be moved to a different place, where it is motivated.}
Now we move on to the actual statement of \cclassmacro{P}rettyref{lem:regressionSnapping}. In general, the variance of the SDP solution will not be small. Thus, we will iteratively reduce the variance by conditioning on the $w_i$ variables. Intuitively, we are conditioning on specific data points $(X_i,y_i)$ being part of the inliers ($w_i = 1$) or being part of the outliers ($w_i = 0$).
\boldsymbol{e}gin{comment}
the lemma states that if the pseudovariance of $\epsilonilonll$ is small and there is correlation with the plant, then we have made progress towards $\epsilonilonll'$. By frobenius minimization, we already have correlation with the plant. What remains is to find a pseudoexpectation for which $ \max_u \cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,u}]$ is small. We achieve this by iteratively \textit{conditioning} the SDP.
\epsilonilonnd{comment}
\Pnote{Reader doesn't know what the term "plant" refers to here, and "correlation with the plant".
Only thing the reader knows at this point is what we told him right before lemma 3.2, "if variance is small then rounding is easy" and Lemma 3.2 is some quantitative version of that. We need to take the reader from there, something like:
"The variance of the SDP solution will not be small in general. We will iteratively reduce the variance by conditioning on the $w_i$ variables. Intuitively, we are conditioning on specific data points $X_i,y_i$ being part of the clean data"
-- again here "clean data" is a term we should only use if we stated it formally earlier.
}
Towards these ends, let $\cclassmacro{P}E_{1}, \cclassmacro{P}E_{2},..., \cclassmacro{P}E_{R}$ be a sequence of pseudoexpectations where $\cclassmacro{P}E_1$ is the output of RobustRegressionSDP($\mathcal D,\rho$). We want to define an algorithm to update $\cclassmacro{P}E_{t}$ to $\cclassmacro{P}E_{t+1}$ where $\max_u \cclassmacro{P}Var_{t+1}[\iprod{\epsilonilonll,u}] < \max_u \cclassmacro{P}Var_{t}[\iprod{\epsilonilonll,u}]$ so that eventually $\cclassmacro{P}E_{R} < \epsilonilonta\rho^2$.
let $\mathcal Q_t$ be the pseudocovariance matrix defined
$$\mathcal Q_t = \cclassmacro{P}E_{t}[(\epsilonilonll - \cclassmacro{P}E_t[\epsilonilonll])(\epsilonilonll - \cclassmacro{P}E_{t}[\epsilonilonll])^T]$$
We have $\norm{\mathcal Q_t}_{op} = \max_u \cclassmacro{P}Var_t[\iprod{\epsilonilonll,u}]$. Let's say we have a strategy $\mathcal S$ for selecting a $w_j \in {w_1,...,w_N}$, and then apply the following update
$$
\cclassmacro{P}E_{t+1} = \left\{
\boldsymbol{e}gin{array}{ll}
\cclassmacro{P}E_{t}|_{w_j = 1} & \text{with probability} \cclassmacro{P}E_{t}[w_j]\\
\cclassmacro{P}E_{t}|_{w_j = 0} & \text{with probability} \cclassmacro{P}E_{t}[1 - w_j]\\
\epsilonilonnd{array}
\right.
$$
Let $b_j$ be a $\{0,1\}$ random variable satisfying $\ProbOp[b_j = 1] = \cclassmacro{P}E_t[w_j]$.
We wish to argue that there is a large expected decrease in the direction of largest variance.
\boldsymbol{e}gin{align*}
\norm{\mathcal Q_t}_\text{op} - \E_{\mathcal S}\E_{b_j}\norm{\mathcal Q_t\boldsymbol{i}g|_{w_j = b_j}}_\text{op} > \text{large}
\epsilonilonnd{align*}
\Pnote{good to remind reader what $_{\text{nuc}}$ norm means. Also, the more standard notation is to use $\norm{Q}_{\infty}$ or $\norm{Q}$ for operator norm, $\norm{Q}_1$ for nuclear norm.}
Unfortunately, controlling $\norm{\mathcal Q}_\text{op}$ is difficult. We will instead control
$\norm{\mathcal Q}_\text{nuc}$, i.e trace norm, and prove
\boldsymbol{e}gin{align*}
\norm{\mathcal Q}_\text{nuc} - \E_{\mathcal S}\E_{b_j}\norm{\mathcal Q\boldsymbol{i}g|_{w_j = b_j}}_\text{nuc} \geq
\Omega(\boldsymbol{e}ta^3\rho^2)
\epsilonilonnd{align*}
For the strategy $\mathcal S$ defined below
\boldsymbol{e}gin{lemma} (Variance Decrease Strategy) \torestate{\leftarrowbel{lem:regstrategy}
Let $\cclassmacro{P}E_\zeta$ satisfy the pseudoexpectation constraints of RobustRegressionSDP($\mathcal D,\rho$). Let $\mathcal Q$ be the associated pseudocovariance matrix. Let $v$ be any direction in the unit ball $S^{d-1}$.
Let $\mathcal S_v$ be a probability distribution over $[N]$\;
where for any $j \in N$ we have
\boldsymbol{e}gin{align*}
\mathcal S_v(j) \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \frac{\cclassmacro{P}Var_\zeta[w_j(y_j - \cclassmacro{P}E_\zeta[\leftarrowngle \epsilonilonll,X_i \rightarrowngle])\leftarrowngle X_j,v\rightarrowngle]}{\sum_{i=1}^N \cclassmacro{P}Var_\zeta[w_j(y_j - \cclassmacro{P}E_\zeta[\leftarrowngle \epsilonilonll,X_i \rightarrowngle])\leftarrowngle X_j,v\rightarrowngle]}
\epsilonilonnd{align*}
Then for $M_4$ being the fourth moment matrix of the Gaussian defined in RobustRegressionSDP, and for $\norm{\mathcal Q}_{nuc} > \sigma^2\sqrt{g}$ we have
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}] - \E_{j \sim S_v}\E_{b_j} \cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}|w_j = b_j]
\geq \Omega\Big(\frac{\boldsymbol{e}ta\cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}]^2}{\norm{\mathcal Q}_{nuc}}\Big)
\epsilonilonnd{align*}
}
\epsilonilonnd{lemma}
\Pnote{Can $\mathcal Q$ be any pseudocovariance matrix above? or is it the pseudocovariance matrix of $\epsilonilonll$? Can we use the notation $\cclassmacro{P}Var[\epsilonilonll]$ or something similar to denote pseudocovariance of $\epsilonilonll$}
\Pnote{$M_4, \epsilonilonpsilon_4$ undefined above. Also, ``largest eigenpair" does not seem like standard usage}
\Pnote{Does $v$ have to be the largest eigenvector for the above lemma? If $v$ can be any eigenvector, we should just state for any. Then, we can explain in text that the lemma lets one decrease the variance along a single direction, thereby decreasing the nuclear norm as follows, and state the corollary}
The above lemma allows the rounding algorithm to decrease the variance along a single direction, thereby decreasing the nuclear norm as follows.
\boldsymbol{e}gin{corollary}\leftarrowbel{cor:nnr} (Connecting variance decrease strategy to nuclear norm rounding)
For $(\leftarrowmbda,v)$ being the largest eigenvalue/vector pair of $\mathcal Q$, and $\mathcal S_v$ defined in \cclassmacro{P}rettyref{lem:regstrategy}. Let $\gamma > 0$ be a constant. If $\norm{\mathcal Q}_\text{op} \geq \gamma$ and $\norm{\mathcal Q}_{nuc} > \sigma^2\sqrt{g}$, then
\boldsymbol{e}gin{align*}
\norm{\mathcal Q}_\text{nuc} - \E_{j \sim S_v}\E_{b_j}\norm{\mathcal Q\boldsymbol{i}g|_{w_j = b_j}}_\text{nuc} \geq \Omega\left(\frac{\boldsymbol{e}ta\gamma^2}{\rho^2}\right)
\epsilonilonnd{align*}
In particular, for $\gamma = \epsilonilonta\rho^2$, we have
\boldsymbol{e}gin{align*}
\norm{\mathcal Q}_\text{nuc} - \E_{j \sim \mathcal S_v}\E_{b_j}\norm{\mathcal Q\boldsymbol{i}g|_{w_j = b_j}}_\text{nuc} \geq
\Omega(\boldsymbol{e}ta\epsilonilonta^2\rho^2)
\epsilonilonnd{align*}
\epsilonilonnd{corollary}
The corollary establishes a win-win. Either $\norm{\mathcal Q}_{op} < \gamma$ in which case the variance of our estimator is small in every direction, or we can round and decrease an upper bound on $\norm{\mathcal Q}_{op}$. We defer the proof of \cclassmacro{P}rettyref{lem:regstrategy} to \cclassmacro{P}rettyref{sec:reg-rounding} and the proof of \cclassmacro{P}rettyref{cor:nnr} to section \cclassmacro{P}rettyref{sec:conditioning}.
\Pnote{what is the following intuition for? it appears to be for the entire algorithm and not the above corollary/lemma. We should have it at a place where we describe the overall strategy, or tell the reader that we are describing the intuition behind the overall algorithm}
Taken together, the conditioning strategy iteratively chases the variance down by selecting the direction of largest variance in the pseudocovariance of our estimator, and conditions on the $w_j$ exhibiting the largest scaled variance.
We are now ready to state our main algorithm and prove the main result of this section
\Pnote{what is $\epsilonilonll'$ in the theorem statement below? we should say, for any integral solution $\epsilonilonll'$, we recover something close to $\epsilonilonll'$ with probability $\boldsymbol{e}ta/160$}
\boldsymbol{e}gin{algorithm}[H] \leftarrowbel{algo:regressionroundtree}
\SetAlgoLined
\KwResult{a $d$ dimensional hyperplane}
\textbf{Inputs}: $(\cclassmacro{P}E_1,\rho)$ The output of RobustRegressionSDP and the scaling parameter\\
\mathbb For{$t = 1:O(\frac{1}{\epsilonilonta^2\boldsymbol{e}ta^2})$}{
Let $\mathcal Q_t = \cclassmacro{P}E_t[(\epsilonilonll - \cclassmacro{P}E_t[\epsilonilonll])(\epsilonilonll - \cclassmacro{P}E_t[\epsilonilonll])^T]$ be the pseudocovariance matrix of the estimator $\epsilonilonll$\\
\text{Let }$(\leftarrowmbda,v)$\text{ be top eigenvalue/vector of }$\mathcal Q_t$ \\
\epsilonilonIf{ $\leftarrowmbda > \epsilonilonta\rho^2$}{
Let $\mathcal S_v$ be a probability distribution over $[N]$\;
Where for any $j \in N$ we have $\mathcal S_v(j) \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \frac{\cclassmacro{P}Var_t[w_j(y_j - \cclassmacro{P}E_t[\leftarrowngle \epsilonilonll,X_i \rightarrowngle])\leftarrowngle X_j,v\rightarrowngle]}{\sum_{i=1}^N \cclassmacro{P}Var_t[w_j(y_j - \cclassmacro{P}E_h[\leftarrowngle \epsilonilonll,X_i \rightarrowngle])\leftarrowngle X_j,v\rightarrowngle]}$ \\
Sample $j \sim \mathcal S_v$ \\
Sample $b_j \in $ Bern($\cclassmacro{P}E_t[w_j]$)\\
Let $\cclassmacro{P}E_{t+1} = \cclassmacro{P}E_t\boldsymbol{i}g|_{w_j = b_j}$\\
}
{
\textbf{return:} $\cclassmacro{P}E_t[\epsilonilonll]$
}
}
\caption{Regression Rounding Algorithm}
\epsilonilonnd{algorithm}
\boldsymbol{e}gin{theorem} \leftarrowbel{thm:main-regression-estimation}
Let $\epsilonilonll'$ be a solution to the constraints of RobustRegressionSDP. Let $\epsilonilonta$ be a constant greater than $0$. Let $\cclassmacro{P}E_{\zeta}$ be the output of $\text{RobustRegressionSDP}(\mathcal D,\rho)$ for degree $D = O\Big(\max(\frac{1}{\boldsymbol{e}ta^2\epsilonilonta^2},\frac{1}{\epsilonilonta^4})\Big) $. Then after $R = O(\frac{1}{\boldsymbol{e}ta^2\epsilonilonta^2})$ rounds of updates according to the strategy $\mathcal S$ in \cclassmacro{P}rettyref{lem:regstrategy}, the resulting pseudoexpectation, which we denote $\cclassmacro{P}E_R$, satisfies
$$ \norm{\cclassmacro{P}E_{R}[\epsilonilonll] - \epsilonilonll'} \leq \sqrt{\frac{\epsilonilonta\rho^2 + O\boldsymbol{i}g(\frac{\sigma^2}{\epsilonilonta^2})}{\boldsymbol{e}ta}}$$
with probability greater than $\Omega(\boldsymbol{e}ta)$ over the randomness in the algorithm. In particular for $\epsilonilonta = \Omega(\boldsymbol{e}ta)$ and for $\rho^2 \geq \Omega\boldsymbol{i}g(\frac{\sigma^2}{\boldsymbol{e}ta^3} \boldsymbol{i}g)$, the degree $D = O(\frac{1}{\boldsymbol{e}ta^4})$ pseudoexpectation satisfies
$$ \norm{\cclassmacro{P}E_{R}[\epsilonilonll] - \epsilonilonll'} \leq \frac{\rho}{2}$$
\epsilonilonnd{theorem}
\textbf{Remark}:
\Pnote{technically, this is a list decoding algorithm already, since one gets the full list by running the algorithm many many times, and boosting the probability that something close to $\epsilonilonll'$ appears. Alternately, one can think of going over all possibilities for the random choices made by the algorithm}
As stated, \cclassmacro{P}rettyref{thm:main-regression-estimation} takes down the error guarantee to $\frac{\rho}{2}$ and is not yet an iterative algorithm that obtains the optimal error guarantees, yet it contains most of the elements of the full algorithm. Issues concerning iteration are the subject of the next section on algorithms.
\boldsymbol{e}gin{proof}
We now have all the tools to prove \cclassmacro{P}rettyref{thm:main-regression-estimation}. By frobenius minimization \cclassmacro{P}rettyref{lem:FrobeniusMinimization} we have,
$$\cclassmacro{P}E_{\zeta}[\sumn w_iw_i'] \geq \boldsymbol{e}ta$$
Now we show that after $R$ rounds of conditioning,
$$ \max_{u \in \mathcal S^{d-1}} \cclassmacro{P}Var_{\zeta,R}[\iprod{\epsilonilonll,u}] \leq \epsilonilonta\rho^2$$
To apply \cclassmacro{P}rettyref{lem:regressionSnapping} we iteratively round according to $\mathcal S_v$ in \cclassmacro{P}rettyref{lem:regstrategy} to decrease $\norm{\mathcal Q}_{nuc}$. For $\norm{\mathcal Q}_{op} > \epsilonilonta\rho^2$, \cclassmacro{P}rettyref{cor:nnr} gives us
\boldsymbol{e}gin{align*}
\norm{\mathcal Q}_\text{nuc} - \E_{j \sim S_v}\E_{b_j}\norm{\mathcal Q\boldsymbol{i}g|_{w_j = b_j}}_\text{nuc} \geq \Omega(\boldsymbol{e}ta\epsilonilonta^2\rho^2)
\epsilonilonnd{align*}
\Pnote{too many sentences starting with words like Or, Either, Where, perhaps we should reduce that a bit}
We aim to show that after $R = O(\frac{1}{\boldsymbol{e}ta^2\epsilonilonta^2})$ iterations, the algorithm outputs $\norm{\mathcal Q_R}_\text{nuc} \leq \epsilonilonta\rho^2$ with probability greater than $1 - \frac{\boldsymbol{e}ta}{4}$ over the randomness in the selection strategy and ${0,1}$ conditionings. We denote the probability and expectation over the randomness in the algorithm $\ProbOp_\mathcal A[\cdot]$ and $\E_\mathcal A[\cdot]$ respectively. Thus, to prove the following
$$\ProbOp_{\mathcal A}[\norm{\mathcal Q_R}_{op} \leq \epsilonilonta\rho^2] \geq 1 - \frac{\boldsymbol{e}ta}{4}$$
we proceed by contradiction. Suppose that at each iteration of $t = 1,2,...,R$, that $\norm{\mathcal Q_t}_\text{op} > \epsilonilonta\rho^2$ with probability greater than $\frac{\boldsymbol{e}ta}{4}$. Then in expectation over $\mathcal S$ we have that each round of conditioning decreases $\norm{\mathcal Q}_\text{nuc}$ by $\frac{\boldsymbol{e}ta}{4}$ (the probability that the assumption in \ref{eq:overview1} holds) times $\Omega(\boldsymbol{e}ta\epsilonilonta^2\rho^2)$ (the expected decrease in \ref{eq:overview1}). Thus,
\boldsymbol{e}gin{align}\leftarrowbel{eq:overview1}
\E_{\mathcal A}[\norm{\mathcal Q_t}_\text{nuc}] - \E_{\mathcal A}[\norm{\mathcal Q_{t+1}}_\text{nuc}] \geq \ProbOp_{\mathcal A}[\norm{\mathcal Q_t}_\text{op} \geq \epsilonilonta\rho^2]\cdot \Omega(\boldsymbol{e}ta\epsilonilonta^2\rho^2) \geq \Omega(\boldsymbol{e}ta^2\epsilonilonta^2\rho^2)
\epsilonilonnd{align}
We also know that the initial pseudocovariance is upper bounded in nuclear norm i.e
\boldsymbol{e}gin{align}\leftarrowbel{eq:overview2}
\norm{\mathcal Q_{\zeta}}_\text{nuc} = \cclassmacro{P}E_\zeta[\norm{\epsilonilonll - \cclassmacro{P}E_\zeta[\epsilonilonll]}^2] = \cclassmacro{P}E_\zeta[\norm{\epsilonilonll}^2] - \cclassmacro{P}E_\zeta[\norm{\epsilonilonll}]^2 \leq \cclassmacro{P}E_\zeta[\norm{\epsilonilonll}^2] \leq \rho^2
\epsilonilonnd{align}
Where the last inequality is an application of the scaling constraint (7). Thus, putting together \ref{eq:overview1} and \ref{eq:overview2} after $R = O(\frac{\rho^2}{\boldsymbol{e}ta^2\epsilonilonta^2\rho^2}) = O(\frac{1}{\boldsymbol{e}ta^2\epsilonilonta^2})$ iterations, $\E_{\mathcal A}[\norm{\mathcal Q_R}_\text{nuc}] \leq 0$ which is impossible because $\norm{\mathcal Q_R}_\text{nuc} = \cclassmacro{P}E_R[\norm{\epsilonilonll - \cclassmacro{P}E_R[\epsilonilonll]}^2] \geq 0$. Thus our assumption is false, and $\ProbOp_{\mathcal A}[\norm{\mathcal Q_R}_{op} \leq \epsilonilonta\rho^2] \geq 1 - \frac{\boldsymbol{e}ta}{4}$ as desired.
\boldsymbol{e}gin{comment}
Then after
$$R = O\Big(\frac{\rho^2 - \frac{\boldsymbol{e}ta\epsilonilonta\rho^2}{4}}{\boldsymbol{e}ta^3\epsilonilonta^2\rho^2}\Big) = O\Big(\frac{1}{\boldsymbol{e}ta^3\epsilonilonta^2}\Big)$$ iterations we have that in expectation, one of the following two possibilities occur. Either, at some iteration $\norm{\mathcal Q}_{op} \leq \frac{\boldsymbol{e}ta\epsilonilonta\rho^2}{4}$ in which case we have successfully decreased the variance. Or, we made progress in expectation at each iteration so that
$$\norm{\mathcal Q_{\zeta,R}}_\text{nuc} \leq \frac{\boldsymbol{e}ta\epsilonilonta\rho^2}{4}$$
Using the fact that operator norm is upper bounded by nuclear norm we obtain
$$\norm{\mathcal Q_{\zeta,R}}_\text{op} \leq \frac{\boldsymbol{e}ta\epsilonilonta\rho^2}{4}$$
Either way, we have successfully brought down the variance in expectation over the randomness in the strategy and the selection of $\{0,1\}$ conditionings. Thus by Markov's we have that \Pnote{markovs with capital M?}
$$\ProbOp_{\mathcal A}[\norm{\mathcal Q_R}_{op} \geq \epsilonilonta\rho^2] \leq \frac{\boldsymbol{e}ta}{4}$$
So $\ProbOp_{\mathcal A}[\norm{\mathcal Q_R}_{op} \leq \epsilonilonta\rho^2] \geq 1 - \frac{\boldsymbol{e}ta}{4}$.
\Pnote{in the above Markov, what is the probability over? is the previous inequality missing an $\E$ over something}
\epsilonilonnd{comment}
We also know by the law of total pseudoexpectation that in expectation over the selection strategy and ${0,1}$ conditionings,
\[ \E_{\mathcal A}\left[\cclassmacro{P}E_{R}\left[\sumn w_iw_i'\right]\right]= \cclassmacro{P}E_{\zeta}\left[\sumn w_iw_i'\right] \geq \boldsymbol{e}ta \]
\Pnote{ $slash left bracket$ and $slash right bracket$} Note that this is a generic fact that is true regardless of which conditioning strategy we choose. Thus by Markov for random variables taking values in $[0,1]$ we have
$$\ProbOp_{\mathcal A}\left[\cclassmacro{P}E_{R}\left[\sumn w_iw_i'\right] \geq \frac{\boldsymbol{e}ta}{2}\right] \geq \frac{\boldsymbol{e}ta}{2}$$
Now that we know $\ProbOp_{\mathcal A}[\norm{\mathcal Q_R}_{op} \leq \epsilonilonta\rho^2] \geq 1 - \frac{\boldsymbol{e}ta}{4}$ and $\ProbOp_{\mathcal A}\left[\cclassmacro{P}E_{R}\left[\sumn w_iw_i'\right] \geq \frac{\boldsymbol{e}ta}{2}\right] \geq \frac{\boldsymbol{e}ta}{2}$, we conclude via union bound that the failure probability of both events is upper bounded by $\frac{\boldsymbol{e}ta}{4} + 1 - \frac{\boldsymbol{e}ta}{2} = 1 - \frac{\boldsymbol{e}ta}{4}$. Thus
the conditions of \cclassmacro{P}rettyref{lem:regressionSnapping} are satisfied with probability greater than $\frac{\boldsymbol{e}ta}{4}$ in which case
$$ \norm{\cclassmacro{P}E_{R}[\epsilonilonll] - \epsilonilonll'} \leq \sqrt{\frac{\epsilonilonta\rho^2 + O\boldsymbol{i}g(\frac{\sigma^2}{\epsilonilonta^2})}{\boldsymbol{e}ta}}$$
In particular for $\epsilonilonta = \Omega(\boldsymbol{e}ta)$ and for $\rho^2 \geq \Omega\boldsymbol{i}g(\frac{\sigma^2}{\boldsymbol{e}ta^3} \boldsymbol{i}g)$ we have
$$ \norm{\cclassmacro{P}E_{R}[\epsilonilonll] - \epsilonilonll'} \leq \frac{\rho}{2}$$
\Pnote{we should conclude, just to remind the reader why this is desired?}
\epsilonilonnd{proof}
\boldsymbol{e}gin{lemma}\torestate{\leftarrowbel{lem:roundtree}
Running Algorithm \cclassmacro{P}rettyref{algo:regressionroundtree} a total of $O(\frac{1}{\boldsymbol{e}ta})$ times produces a list $L = \{\epsilonilonll_1,...,\epsilonilonll_{O(\frac{1}{\boldsymbol{e}ta})}\}$ such that with probability 1 - c, there exists a list element $\epsilonilonll_i \in L$ satisfying
$\norm{\epsilonilonll_i - \epsilonilonll'} \leq \frac{\rho}{2}$
where $c$ is a small constant. Minor modifications enable the algorithm to succeed with high probability. }
\epsilonilonnd{lemma}
We defer the modifications required to succeed with high probability to the appendix. We proceed under the assumption that \cclassmacro{P}rettyref{algo:regressionroundtree} outputs a list $L$ satisfying the guarantees in \cclassmacro{P}rettyref{lem:roundtree} with high probability. For variety, we present the mean estimation algorithms with these modifications in place.
\Pnote{ $\epsilonilonta$ needs to be quantified in above lemma?}
\section{Iterative Contraction for Sharp Rates}\leftarrowbel{sec:iteration}
\Pnote{different title for the section?}
\Pnote{describe at a high level, what is happening in this section?
The RoundTree algorithm in previous section gets approximate. Here we boost it as follows"}
The Regression Rounding \cclassmacro{P}rettyref{algo:regressionroundtree} generates a list $L$ which contracts the error guarantee from $\rho$ to $\frac{\rho}{2}$ with high probability. In this section we wrap the algorithm in an iterative framework to obtain sharp error guarantees.
\Pnote{the following remark on parameter setting is a bit tedious on the reader. The error terms in the above lemma are also messy. We should fix the correct choice of $\epsilonilonta$ in the lemma statement itself, and only state the lemma with the simple error term $\rho/2$}
\boldsymbol{e}gin{comment}
\boldsymbol{e}gin{remark}
Let's demystify the parameters in \cclassmacro{P}rettyref{lem:roundtree}. Firstly, we think of $\rho^2 \geq \Omega(\frac{\sigma^2}{\boldsymbol{e}ta^3})$. Otherwise we could output any vector $\epsilonilonll \in \mathbb R^d$ of length $\norm{\epsilonilonll}^2 \leq \rho^2$ and we would obtain a bound $\norm{\epsilonilonll - \epsilonilonll'} \leq 2\rho \leq O(\frac{\sigma^2}{\boldsymbol{e}ta^3})$ just by triangle inequality.
Now looking at the bound \ref{po1} we are tempted to set $\epsilonilonta$ small so that the $O(\epsilonilonta\rho^2)$ term is small. However, this trades off with the term $O(\frac{\sigma^2}{\epsilonilonta^2})$. The optimal setting of $\epsilonilonta$ is then when
$$10\epsilonilonta\rho^2 = \frac{8\sigma^2}{\epsilonilonta^2}$$
Which occurs at $\epsilonilonta = (\frac{4\sigma^2}{5\rho^2})^{1/3}$. A rather inscrutable setting, which more importantly doesn't give us the $O(\sqrt{\frac{\sigma^2}{\boldsymbol{e}ta^3}})$ error bound we were hoping for. Choosing $\epsilonilonta = \frac{\boldsymbol{e}ta}{160}$ we have
$$
\norm{\cclassmacro{P}E_\zeta[\epsilonilonll] - \epsilonilonll'} \leq \sqrt{\frac{\rho^2}{8} + O\Big(\frac{\sigma^2}{\boldsymbol{e}ta^3}\Big)}
$$
as stated in the lemma. If we slightly abuse the big $O$ just to refer to the second term in the square root, we have for $\frac{\rho^2}{8} > O(\frac{\sigma^2}{\boldsymbol{e}ta^3})$ the guarantee.
$$
\norm{\cclassmacro{P}E_\zeta[\epsilonilonll] - \epsilonilonll'} \leq \frac{\rho}{2}
$$
Thus, we have succeeded in improving the estimate by a constant factor from the trivial triangle inequality bound of $2\rho$ to $\frac{\rho}{2}$. We will then design an iterative procedure to iteratively contract the problem until after $O(\log(\frac{\boldsymbol{e}ta^{3/2}\rho}{\sigma}))$ iterations we obtain the optimal error bound of
$$
\norm{\cclassmacro{P}E_\zeta[\epsilonilonll] - \epsilonilonll'} \leq O\Big(\sqrt{\frac{\sigma^2}{\boldsymbol{e}ta^3}}\Big)
$$
Of course, we could have also chosen the optimal setting of $\epsilonilonta$ and iterated to obtain the same bounds. However, there is a price in choosing $\epsilonilonta$ to be too small as we pay in runtime in the degree $O(\frac{1}{\epsilonilonta^4})$ SoS SDP.
Therefore, to avoid runtime that depend on the scaling parameter $\rho$, we pursue the constant factor contraction delineated above. That the degree of the pseudoexpectation scales as $O(\frac{1}{\epsilonilonta^4})$ is the subject of \cclassmacro{P}rettyref{sec:anticoncentration}
\epsilonilonnd{remark}
\epsilonilonnd{comment}
\Pnote{ above discussion is too technical for a reader at this stage. At this stage, the reader doesn't know the overall scheme of things yet. We should completely avoid the discussion on parameter settings, and instead use the text to explain the general schema of things more carefully.}
\Pnote{the following two algorithm descriptions are really difficult for the reader to understand, without any accompanying explanation. Ideally, the accompanying explanation is good enough that the reader skips reading the pseudocode altogether}
Our iterative framework, ListDecodeRegression \cclassmacro{P}rettyref{algo:regressioniterative}, iterates over the list $L$ generated by RoundingRobustRegression \cclassmacro{P}rettyref{algo:regressionroundtree}, and uses the list elements to shift the data so as to obtain sharper estimates. This will involve rerunning both RobustRegressionSDP \cclassmacro{P}rettyref{algo:robustregressionSDP} and RoundingRobustRegression \cclassmacro{P}rettyref{algo:regressionroundtree}. Formally, for each $\epsilonilonll_i \in L$ create a new dataset $\{(X_j,y_j')\}_{j=1}^N$ with the same covariates with shifted labels $y_1',...,y_N'$. The labels are shifted according to the hyperplane $\epsilonilonll_i$ as follows, $y_j' := y_j - \iprod{\epsilonilonll_i,X_j}$ for all $j \in [N]$. Then the scaling constraint $\{\norm{\epsilonilonll}^2 \leq \frac{\rho^2}{2} \}$ is added to the RobustRegressionSDP, and we resolve the SDP and rerun the rounding. Iterating this procedure, the error guarantee contracts each iteration from $\frac{\rho}{2}, \frac{\rho}{4}, ...$ so on and so forth, whilst the list length increases multiplicatively by factors of $O(\frac{1}{\boldsymbol{e}ta})$ until the ubiquitous assumption $\rho^2 \geq \Omega(\frac{\sigma^2}{\boldsymbol{e}ta^3})$ no longer holds and we are left with the sharp error guarantee $\norm{\epsilonilonll_i - \epsilonilonll'} \leq O(\frac{\sigma}{\boldsymbol{e}ta^{3/2}})$ for some list element $\epsilonilonll_i \in L$. The following theorem formalizes the discussion above.
\boldsymbol{e}gin{algorithm}[H] \leftarrowbel{algo:regressioniterative}
\SetAlgoLined
\KwResult{A list of hyperplanes $L = \{\epsilonilonll_1,...,\epsilonilonll_A\}$}
\textbf{inputs}: $(\mathcal D,\rho)$\\
$L = \{0\}$\\
\mathbb For{ $t \in \log_2(\frac{\rho\boldsymbol{e}ta^{3/2}}{\sigma})$}{
$\rho_t = \frac{\rho}{2^t}$\\
\% Let $Y$ to be a list of pseudoexpectations\\
$Y = \epsilonilonmptyset$ \\
\mathbb For{$\epsilonilonll_i \in L$}{
$(\mathcal X,\mathcal Y) = \mathcal D$\\
\mathbb For{$y_j \in \mathcal Y$}{$y_j = y_j - \iprod{\epsilonilonll_i,X_i}$ }
Let $Y = Y \cup \text{RobustRegressionSDP}(\mathcal D,\rho_t)$\\
}
$L = \epsilonilonmptyset$\\
\mathbb For{$\cclassmacro{P}E_\zeta \in Y$}{
$L' = \text{RegressionRounding}(\cclassmacro{P}E_\zeta,p_t)$\\
$L = L \cup L'$\\
}
}
\textbf{return:} L
\caption{ListDecodeRegression}
\epsilonilonnd{algorithm}
\boldsymbol{e}gin{comment}
\boldsymbol{e}gin{algorithm}[H] \leftarrowbel{algo:extract}
\SetAlgoLined
\KwResult{A list of hyperplanes $L = \{\epsilonilonll_1,...,\epsilonilonll_A\}$}
\textbf{Inputs}: A list $L' := \{(\epsilonilonll_i,p_i)\}$\\
$L = \epsilonilonmptyset$\\
\While{$L' \neq \epsilonilonmptyset$ }{
Let $\epsilonilonll_0$ be any leaf in $L'$\\
\mathbb For{$(\epsilonilonll,p) \in L'$}{
$M = 0$\\
\If{$\norm{\epsilonilonll - \epsilonilonll_0 }\leq \frac{\rho}{4}$}{
$M = M + p$\\
}
}
\epsilonilonIf{$M \geq \frac{\boldsymbol{e}ta}{4}$}{
$L = L\cup \epsilonilonll_0$\\
}{
$L = L'\boldsymbol{a}ckslash \epsilonilonll_0$
}
}
\textbf{return:} L
\caption{Extract List Regression}
\epsilonilonnd{algorithm}
\epsilonilonnd{comment}
\Pnote{In the theorem below, it will be good to remind the reader what $\epsilonilonll'$ is, it is any solution to the polynomial system}
\boldsymbol{e}gin{theorem}\leftarrowbel{thm:reg-final}
ListDecodeRegression($\mathcal D,\rho$) outputs a list of hyperplanes $L = \{\epsilonilonll_1,...,\epsilonilonll_A\}$ where
$A = O\boldsymbol{i}g((\frac{1}{\boldsymbol{e}ta})^{\log(\frac{\boldsymbol{e}ta^{3/2}\rho}{\sigma})}\boldsymbol{i}g)$ such that for some $\epsilonilonll_i \in L$
$$\norm{\epsilonilonll_i - \epsilonilonll'} \leq O\Big(\frac{\sigma}{\boldsymbol{e}ta^{3/2}}\Big)$$
with high probability in time $\boldsymbol{i}g(\frac{\rho}{\sigma}\boldsymbol{i}g)^{\log(1/\boldsymbol{e}ta)} N^{O(\frac{1}{\boldsymbol{e}ta^4})}$ for $N = d^{O(\frac{1}{\boldsymbol{e}ta^4})}$. Here we are running solving RobustRegressionSDP \cclassmacro{P}rettyref{algo:robustregressionSDP} for degree $D = O(\frac{1}{\boldsymbol{e}ta^4})$, and running $R = O(\frac{1}{\boldsymbol{e}ta^4})$ rounds of the RegressionRounding \cclassmacro{P}rettyref{algo:regressionroundtree}
\epsilonilonnd{theorem}
\boldsymbol{e}gin{proof}
For any call to RegressionRounding \cclassmacro{P}rettyref{algo:regressionroundtree}, we have by \cclassmacro{P}rettyref{cor:roundtree} a list $L$ and a list element $\epsilonilonll_i \in L$ satisfying $\norm{\epsilonilonll_i - \epsilonilonll'} \leq \frac{\rho}{2}$. After each iteration we construct a new data set $\{(X_j,y_j')\}_{j=1}^N$ by shifting the labels according to the rule $y_j' := y_j - \iprod{\epsilonilonll_i,X_j}$ and enforce the scaling constraint $\{\norm{\epsilonilonll}^2 \leq \frac{\rho^2}{4}\}$. The key point is that this new constraint is feasible for at least one iterate $\epsilonilonll_i \in L$. This is all we need to iterate RobustRegressionSDP \cclassmacro{P}rettyref{algo:robustregressionSDP} and subsequently the RegressionRounding Algorithm \cclassmacro{P}rettyref{algo:regressionroundtree}.
The list length grows by a factor of $O(\frac{1}{\boldsymbol{e}ta})$ per iteration for $O(\log(\frac{\boldsymbol{e}ta^{3/2}\rho}{\sigma}))$ iterations. Thus, we run RobustRegressionSDP \cclassmacro{P}rettyref{algo:robustregressionSDP} no more than $O\boldsymbol{i}g((\frac{1}{\boldsymbol{e}ta})^{\log(\frac{\boldsymbol{e}ta^{3/2}\rho}{\sigma})}\boldsymbol{i}g)$ times. From \cclassmacro{P}rettyref{lem:roundtree} we solve RobustRegressionSDP for degree $D=O(\frac{1}{\boldsymbol{e}ta^4})$. This concludes our treatment of list decoding robust regression.
\epsilonilonnd{proof}
Thus far we have assumed the covariates are distributed $\mathcal N(0,I)$ with a fourth injective tensor norm of $B = 3$. In addition, we regarded the fourth moment of the noise model upper bounded by $g\sigma^4$ for a constant $g$. We conclude this section by stating a general theorem relevant for large values of $B$ and $g$.
\boldsymbol{e}gin{theorem}
Let a $\boldsymbol{e}ta$ fraction of $X_1,...,X_N \in \mathbb R^d$ be drawn from a distribution $\mathcal D$ with identity covariance and a fourth injective tensor norm upper bounded by a constant $B$. Let $N,d,\epsilonilonll',g,\sigma,\rho$ be defined as they were previously. Then ListDecodeRegression($\mathcal D,\rho$) outputs a list of hyperplanes $L = \{\epsilonilonll_1,...,\epsilonilonll_A\}$ where
$A = O\boldsymbol{i}g((\frac{1}{\boldsymbol{e}ta})^{\log(\frac{\boldsymbol{e}ta^{3/2}\rho}{\sigma})}\boldsymbol{i}g)$ such that for some $\epsilonilonll_i \in L$
$$\norm{\epsilonilonll_i - \epsilonilonll'} \leq O\Big(\max\boldsymbol{i}g(\frac{\sigma}{\boldsymbol{e}ta^{3/2}},\sigma^2\sqrt{g}\boldsymbol{i}g)\Big)$$
with high probability in time $\boldsymbol{i}g(\frac{\rho}{\sigma}\boldsymbol{i}g)^{\log(1/\boldsymbol{e}ta)} N^{O(\frac{B}{\boldsymbol{e}ta^4})}$ for $N = d^{O(\frac{1}{\boldsymbol{e}ta^4})}$. Here we are running solving RobustRegressionSDP \cclassmacro{P}rettyref{algo:robustregressionSDP} for degree $D = O(\frac{B}{\boldsymbol{e}ta^4})$, and running $R = O(\frac{B}{\boldsymbol{e}ta^4})$ rounds of the RegressionRounding \cclassmacro{P}rettyref{algo:regressionroundtree}
\epsilonilonnd{theorem}
The proof follows by direct inspection of the proof of \cclassmacro{P}rettyref{thm:reg-final}.
\section{On Conditioning SoS SDP Solutions}\leftarrowbel{sec:conditioning}
In this section we prove facts about concentration rounding.
\rhoection{Concentration Rounding: One Dimensional Case}
\boldsymbol{e}gin{fact} (Conditional Pseudoexpectation Toolkit)
For any two polynomials $p,q$ we define $\cclassmacro{P}Var[p] \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \cclassmacro{P}E[p^2] - \cclassmacro{P}E[p]^2$, $\cclassmacro{P}Cov[p,q] \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \cclassmacro{P}E[p,q] - \cclassmacro{P}E[p]\cclassmacro{P}E[q]$.
Let $w$ satisfy the boolean constraint $w^2 = w$. For a random variable $b$ taking values in $\{0,1\}$ such that $\ProbOp[b = 1] = \cclassmacro{P}E[w]$ and $\ProbOp[b = 0] = \cclassmacro{P}E[1 - w]$ we have the following useful facts
\boldsymbol{e}gin{enumerate}
\emem (Law of Total Pseudoexpectation) $\E_b\cclassmacro{P}E[p(x,w)|w = b] = \cclassmacro{P}E[p(x,w)]$
\emem (Law of Total Pseudovariance) $\cclassmacro{P}Var[p(x)] - \E_{b}\cclassmacro{P}Var[p(x,w)|w = b] = \Var_b[\cclassmacro{P}E[p(x,w)|w = b]]$\\
\emem (Correlation Identity) $\cclassmacro{P}E[p(x,w)|w = b] = \frac{\cclassmacro{P}Cov[p(x,w),w]}{\cclassmacro{P}Var[w]}b + (\cclassmacro{P}E[p(x,w)] - \frac{\cclassmacro{P}Cov[p(x,w),w]}{\cclassmacro{P}Var[w]}\cclassmacro{P}E[w])$
\epsilonilonnd{enumerate}
\epsilonilonnd{fact}
\boldsymbol{e}gin{proof} (facts) It is easy to check that $\E[b] = \cclassmacro{P}E[w]$ and $\Var[b] = \cclassmacro{P}Var[w]$ and $\mathbb Cov[b] = \cclassmacro{P}Cov[w]$. The law of total pseudoexpectation is an application of definitions. The law of total pseudovariance is an application of the law of total pseudoexpectation. The proof is as follows.
$$\cclassmacro{P}Var[p(x,w)] - \E_{b}\cclassmacro{P}Var[p(x,w)|w = b] = \cclassmacro{P}E[p(x,w)^2] - \cclassmacro{P}E[p(x,w)]^2 - (\E_b\cclassmacro{P}E[p(x,w)^2] - \E_b\cclassmacro{P}E[p(x,w)]^2)$$
$$= \E_b[\cclassmacro{P}E[p(x,w)]^2] - \cclassmacro{P}E[p(x,w)]^2 = \E_b[\cclassmacro{P}E[p(x,w)]^2] - \E_b[\cclassmacro{P}E[p(x,w)|b]]^2 = \Var_b[\cclassmacro{P}E[p(x,w)|w = b]]$$
Lastly, we prove the correlation identity. We know $\cclassmacro{P}E[p(x,w)|w = b]$ is a function of $b$. Therefore there exists constants $c$ and $d$ such that $\cclassmacro{P}E[p(x,w)|w = b] = cb + d$. First we determine $c$. We know
\boldsymbol{e}gin{align*}
c\cclassmacro{P}Var[w] = c\Var[b] = \mathbb Cov(cb + d,b) = \mathbb Cov(\cclassmacro{P}E[p(x,w)|w=b],b)
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \E[b\cclassmacro{P}E[p(x,w)|w=b]] - \E[\cclassmacro{P}E[p(x,w)|w=b]]\E[b]
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \ProbOp[b=1]\cclassmacro{P}E[p(x,w)|w=1] - \cclassmacro{P}E[p(x,w)]\E[b]
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \cclassmacro{P}E[w]\frac{\cclassmacro{P}E[p(x,w)w]}{\cclassmacro{P}E[w]} - \cclassmacro{P}E[p(x,w)]\cclassmacro{P}E[w]
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \cclassmacro{P}Cov[p(x,w),w]
\epsilonilonnd{align*}
\Pnote{the above set of equations must be aligned, using say $begin\{align\}$}
Thus
$$c = \frac{\cclassmacro{P}Cov[p(x,w),w]}{\cclassmacro{P}Var[w]} $$
Then to obtain $d$ we apply expectation on both sides of $\cclassmacro{P}E[p(x,w)|w = b] = cb + d$.
\epsilonilonnd{proof}
Let $w_1,\ldots, w_N$ be variables that satisfy the boolean constraint for all $i \in [N]$.
Let $Z_1,\ldots, Z_N \in \mathbb R$ be numbers and let $\hat{\mu} \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \frac{1}{N} \sum\limits_{i=1}^N w_i Z_i$.
We show that pseudo-variance $\cclassmacro{P}Var[\hat{\mu}]$ decreases in expectation when we condition on the variables $w_i$ according to a carefully chosen strategy.
\boldsymbol{e}gin{theorem}\leftarrowbel{thm:onedimrounding}
Let $w_1,\ldots, w_N$ denote variables satisfying $\{ w_i^2 = w_i | i \in [N]\}$ and let $\hat{\mu} = \frac{1}{N}\sum_{i \in [N]} w_i Z_i$ for some sequence of real numbers $\{Z_i\}_{i \in [N]}$.
Define a probability distribution $\mathcal S: [N] \to \mathbb R^+$ as
$$\mathcal S(j) \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \frac{\cclassmacro{P}Var[ w_jZ_j]}{\sum_{i=1}^N \cclassmacro{P}Var[w_jZ_j]} $$
If we condition on the value of $w_j$ where $j$ is drawn from $\mathcal S$, then the pseudovariance decreases by
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var[\hat{\mu}] - \E_{j \sim \mathcal S}\E_{b_j} \cclassmacro{P}Var[\hat{\mu}|w_j = b_j] \geq \frac{ \boldsymbol{i}g(\cclassmacro{P}Var(\hat{\mu})\boldsymbol{i}g)^2}{\frac{1}{ N}\sum\limits_{i=1}^N \cclassmacro{P}Var(z_i)}
\epsilonilonnd{align*}
Where $b_j$ is $[0,1]$ random variable with $\ProbOp[b_j = 1] = \cclassmacro{P}E[w_j]$ and $\ProbOp[b_j = 0] = \cclassmacro{P}E[1 - w_j]$
This also immediately yields for $\hat{\mu} = \sumn w_iZ_i$
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var[\hat{\mu}] - \E_{j \sim \mathcal S}\E_{b_j} \cclassmacro{P}Var[\hat{\mu}|w_j = b_j] \geq \boldsymbol{e}ta\frac{ \boldsymbol{i}g(\cclassmacro{P}Var(\hat{\mu})\boldsymbol{i}g)^2}{\sumn \cclassmacro{P}Var(z_i)}
\epsilonilonnd{align*}
\epsilonilonnd{theorem}
\boldsymbol{e}gin{proof}
Let $z_i = w_iZ_i$ for all $i \in [N]$.
Since $z_i$ is a constant multiple of $w_i$, conditioning on $z_i$ is equivalent to conditioning on $w_i$.
We begin with the law of total variance
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var(\hat{\mu}) - \E_{b_j} \cclassmacro{P}Var(\hat{\mu}|w_j = b_j) = \Var_{b_j}(\cclassmacro{P}E[\hat{\mu}|w_j = b_j])
\epsilonilonnd{align*}
Then we apply the expectation over the strategy $\mathcal S$ to both sides to obtain
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var[\hat{\mu}] - \E_\mathcal S \E_{b_j} \cclassmacro{P}Var[\hat{\mu}|w_j = b_j] = \E_{\mathcal S}\Var_{b_j}[\cclassmacro{P}E[\hat{\mu}|w_j = b_j]]
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \cclassmacro{P}E_{\mathcal S}\cclassmacro{P}Var_{b_j}\Big[\frac{\cclassmacro{P}Cov[\hat{\mu},w_j]}{\cclassmacro{P}Var[w_j]}b_j\Big] = \cclassmacro{P}E_{\mathcal S}\frac{\cclassmacro{P}Cov[\hat{\mu},w_j]^2}{\cclassmacro{P}Var[w_j]^2}\Var[b_j] = \cclassmacro{P}E_{\mathcal S}\frac{\cclassmacro{P}Cov[\hat{\mu},z_j]^2}{\cclassmacro{P}Var[z_j]}
\epsilonilonnd{align*}
\Pnote{above set of equations needs to be aligned}
Writing out the distribution of $\mathcal S$ we obtain
$$
= \sum_{j=1}^N \frac{\cclassmacro{P}Var[z_j]}{\sum_{i=1}^N \cclassmacro{P}Var[z_j]}\frac{\cclassmacro{P}Cov[\hat{\mu},z_j]^2}{\cclassmacro{P}Var[z_j]} = \frac{\sum_{i=1}^N\cclassmacro{P}Cov[\hat{\mu},z_j]^2}{\sum_{i=1}^N \cclassmacro{P}Var[z_j]} = \frac{\frac{1}{N}\sum_{i=1}^N\cclassmacro{P}Cov[\hat{\mu},z_j]^2}{\frac{1}{N}\sum_{i=1}^N \cclassmacro{P}Var[z_j]}
$$
by Jensen's inequality
$$
\geq \frac{(\frac{1}{N}\sum_{i=1}^N\cclassmacro{P}Cov[\hat{\mu},z_j])^2}{\frac{1}{N
}\sum_{i=1}^N \cclassmacro{P}Var[z_j]}
= \frac{\cclassmacro{P}Var[\hat{\mu}]^2}{\frac{1}{N}\sum_{i=1}^N \cclassmacro{P}Var[z_i]}$$
\boldsymbol{e}gin{comment}
\boldsymbol{e}gin{equation*}
= \frac{1}{N^2}\E_{\mathcal S} \underset{b_j}{\mathbb Cov}\Bigg[\sum\limits_{m=1}^N \cclassmacro{P}E[z_m|w_j = b_j], \sum\limits_{n=1}^N \cclassmacro{P}E[z_n|w_j = b_j]\Bigg]
\epsilonilonnd{equation*}
Using the correlation identity into the above expression gives us
\boldsymbol{e}gin{align*}
= \frac{1}{N^2}\E_\mu \frac{1}{\cclassmacro{P}Var(z_j)}\sum\limits_{m,n=1}^N\cclassmacro{P}Cov(z_m,z_j)\cclassmacro{P}Cov(z_n,z_j)
\epsilonilonnd{align*}
Expanding out the $\E_\mu$ we obtain
\boldsymbol{e}gin{align*}
= \frac{1}{N^2}\frac{\sum\limits_{j=1}^N\boldsymbol{i}g(\sum\limits_{m=1}^N\cclassmacro{P}Cov(z_m,z_j)\boldsymbol{i}g)^2}{\sum\limits_{i=1}^N \cclassmacro{P}Var(z_i)}
= \frac{1}{N^2}\frac{\frac{1}{N}\sum\limits_{j=1}^N\boldsymbol{i}g(\sum\limits_{m=1}^N\cclassmacro{P}Cov(z_m,z_j)\boldsymbol{i}g)^2}{\frac{1}{N}\sum\limits_{i=1}^N \cclassmacro{P}Var(z_i)}
\epsilonilonnd{align*}
Apply Cauchy-Schwartz inequality
\boldsymbol{e}gin{align*}
\geq \frac{1}{N^2}\frac{\boldsymbol{i}g(\frac{1}{N}\sum\limits_{j=1}^N\sum\limits_{m=1}^N\cclassmacro{P}Cov(z_m,z_j)\boldsymbol{i}g)^2}{\frac{1}{N}\sum\limits_{i=1}^N \cclassmacro{P}Var(z_i)} = \frac{ \boldsymbol{i}g(\frac{1}{N^2}\sum\limits_{j=1}^N\sum\limits_{m=1}^N\cclassmacro{P}Cov(z_m,z_j)\boldsymbol{i}g)^2}{\frac{1}{ N}\sum\limits_{i=1}^N \cclassmacro{P}Var(z_i)}
\epsilonilonnd{align*}
Noticing that we've procured $\cclassmacro{P}Var(\hat{\mu})$ in the numerator we write
\boldsymbol{e}gin{equation} \leftarrowbel{gc}
= \frac{ \boldsymbol{i}g(\cclassmacro{P}Var(\hat{\mu})\boldsymbol{i}g)^2}{\frac{1}{ N}\sum\limits_{i=1}^N \cclassmacro{P}Var(z_i)}
\epsilonilonnd{equation}
as desired.
\epsilonilonnd{comment}
\epsilonilonnd{proof}
\Pnote{ we should define a macro for $_\text{nuc}$ and $_\text{op}$ to make switching notation easier}
\boldsymbol{e}gin{corollary} (Connecting variance decrease strategy to nuclear norm rounding)
For $\mathcal S_v$ and $\mathcal Q$ defined in \cclassmacro{P}rettyref{lem:regstrategy}. Let $\gamma > 0$ be a constant. If $\norm{\mathcal Q}_\text{op} \geq \gamma$ and $\norm{\mathcal Q}_{nuc} > \sigma^2\sqrt{g}$, then
\boldsymbol{e}gin{align*}
\norm{\mathcal Q}_\text{nuc} - \E_{j \sim S_v}\E_{b_j}\norm{\mathcal Q\boldsymbol{i}g|_{w_j = b_j}}_\text{nuc} \geq \Omega(\frac{\boldsymbol{e}ta\gamma^2}{\rho^2})
\epsilonilonnd{align*}
In particular for $\gamma = \epsilonilonta\rho^2$, we have
\boldsymbol{e}gin{align*}
\norm{\mathcal Q}_\text{nuc} - \E_{j \sim \mathcal S_v}\E_{b_j}\norm{\mathcal Q\boldsymbol{i}g|_{w_j = b_j}}_\text{nuc} \geq
\Omega(\boldsymbol{e}ta\epsilonilonta^2\rho^2)
\epsilonilonnd{align*}
\epsilonilonnd{corollary}
\boldsymbol{e}gin{proof}
Let $v, e_1,...,e_{d-1} \in R^d$ be an orthonormal basis. First we write the nuclear norm as a decomposition along an orthonormal basis i.e
\boldsymbol{e}gin{align*}
\norm{\mathcal Q}_\text{nuc} = \cclassmacro{P}Var[\leftarrowngle \epsilonilonll, v\rightarrowngle] + \sum\limits_{j=1}^{d-1} \cclassmacro{P}Var[\leftarrowngle \epsilonilonll, e_j\rightarrowngle]
\epsilonilonnd{align*}
Now we write down the expected decrease in $\norm{\mathcal Q}_\text{nuc}$ for a single conditioning to obtain
\boldsymbol{e}gin{align*}
\norm{\mathcal Q}_\text{nuc} - \E_{j \sim \mathcal S_v}\E_{b_j}\norm{\mathcal Q\boldsymbol{i}g|_{w_i = z_i}}_\text{nuc} = \boldsymbol{i}g(\cclassmacro{P}Var[\leftarrowngle \epsilonilonll, v\rightarrowngle] - \E_{j \sim S_v}\E_{b_j}\cclassmacro{P}Var[\leftarrowngle \epsilonilonll, v\rightarrowngle|w_j = b_j]\boldsymbol{i}g)\\ + \boldsymbol{i}g(\sum\limits_{j=1}^{d-1} \cclassmacro{P}Var[\leftarrowngle \epsilonilonll, e_j\rightarrowngle] - \E_{j \sim S_v}\E_{b_j}\cclassmacro{P}Var[\leftarrowngle \epsilonilonll, e_j\rightarrowngle|w_j = b_j]\boldsymbol{i}g)
\epsilonilonnd{align*}
Then we apply \cclassmacro{P}rettyref{lem:regstrategy} to the first term, and we apply the fact that pseudovariance is monotonically decreasing after conditioning (law of total pseudovariance) to the second term to obtain.
\Pnote{perhaps this part of the calculation should appear elsewhere, may be the entire lemma? because the particular error term from Lemma 3.3 looks rather complicated, and out of place in this section}
\boldsymbol{e}gin{align*}
\geq \Omega(\frac{\boldsymbol{e}ta\cclassmacro{P}Var_\zeta(\iprod{\epsilonilonll,v})^2}{\norm{\mathcal Q}_{nuc}}) - o_d(1)
\epsilonilonnd{align*}
Using the fact that $\cclassmacro{P}Var_\zeta(\iprod{\epsilonilonll,v}) = \norm{\mathcal Q}_\text{op} \geq \gamma$ and $ \norm{\mathcal Q}_\text{nuc} = \cclassmacro{P}E_\zeta[\norm{\epsilonilonll - \cclassmacro{P}E_\zeta[\epsilonilonll] }^2] \leq \cclassmacro{P}E_\zeta[\norm{\epsilonilonll}^2]\leq \rho^2$ we further lower bound by
\boldsymbol{e}gin{align*}
\geq \Omega(\frac{\boldsymbol{e}ta\gamma^2}{\rho^2}) - o_d(1)
\geq \Omega(\frac{\boldsymbol{e}ta\gamma^2}{\rho^2}) - o_d(1)
\epsilonilonnd{align*}
for $\gamma = \epsilonilonta\rho^2$, we conclude
\boldsymbol{e}gin{align*}
\norm{\mathcal Q}_\text{nuc} - \E_{j \sim \mathcal S_v}\E_{b_j}\norm{\mathcal Q\boldsymbol{i}g|_{w_i = z_i}}_\text{nuc} \geq \Omega(\boldsymbol{e}ta\epsilonilonta^2\rho^2)
\epsilonilonnd{align*}
as desired.
\epsilonilonnd{proof}
\boldsymbol{e}gin{comment}
When working over the correlation tree, there are distinct pseudodistributions at each node, which can be conveniently indexed by the conditioning path from the root. Let $p \in \{0,1\}^t$ be the binary representation of the conditioning path from the root node where $t$ is the layer of the tree in question. Then let $M^t_p$ be the pseudocovariance matrix at layer $t$ at index $p$. In a sense, the $t$ index redundant as $t = |p|$, but aids in notational clarity. Also, Let $M^t_p|_{w_i = z_i}$ denote the conditional pseudocovariance matrix where $w_i$ is equal to $z_i$
We are trying to prove $\E_{p \in \{0,1\}^t}||M^t_p||_{op}$ falls. Let $v^t_p := \leftarrowmbda_{max}(M^t_p)$. We will select our strategy $S(v^t_p)$ as a function of $v^t_p$. After conditioning we know that $\cclassmacro{P}Var[\leftarrowngle \hat{\mu}, v^t_p\rightarrowngle]$ decreases according to the rounding theorem. Unfortunately, this does not imply that $\E_{i \in S(v^t_p)}\E_{z_i \in \{0,1\}}||M^t_p|_{w_i = z_i}||_{op} < ||M^t_p||_{op}$, because the direction of largest variance can change from iteration to iteration.
A possible fix is to design a strategy that selects the $w_i$ to round entirely independent of direction.
In the absence of such a fix, we control $||M^t_p||_{op}$ by instead controlling $Tr(M^t_p) \geq ||M^t_p||_{op}$. Let $v^t_p, e_1,...,e_{d-1}$ be an orthonormal basis of $\mathbb R^d$. First we write the trace as a decomposition along an orthonormal basis i.e
\boldsymbol{e}gin{align*}
Tr(M^t_p) = \cclassmacro{P}Var[\leftarrowngle \hat{\mu}, v^t_p\rightarrowngle] + \sum\limits_{j=1}^{d-1} \cclassmacro{P}Var[\leftarrowngle \hat{\mu}, e_j\rightarrowngle]
\epsilonilonnd{align*}
And we apply the expectation over the rounding strategy to both sides to obtain
\boldsymbol{e}gin{align*}
\E_{i \sim S(v^t_p)}\E_{z_i \in \{0,1\}}Tr(M^t_p|_{w_i = z_i}) = \E_{i \sim S(v^t_p)}\E_{z_i \in \{0,1\}}\cclassmacro{P}Var[\leftarrowngle \hat{\mu}, v^t_p\rightarrowngle|w_i = z_i] + \sum\limits_{j=1}^{d-1} \E_{i \sim S(v^t_p)}\E_{z_i \in \{0,1\}}\cclassmacro{P}Var[\leftarrowngle \hat{\mu}, e_j\rightarrowngle|w_i = z_i]
\epsilonilonnd{align*}
Then we apply the correlation rounding theorem to the first term and the observation that pseudovariance is monotonically decreasing after conditioning to the second term to obtain.
\boldsymbol{e}gin{align*}
\leq (1 - \frac{\boldsymbol{e}ta}{2})\cclassmacro{P}Var[\leftarrowngle \hat{\mu}, v^t_p\rightarrowngle] + \sum\limits_{j=1}^{d-1} \cclassmacro{P}Var[\leftarrowngle \hat{\mu}, e_j\rightarrowngle] = Tr(M^t_p) - \frac{\boldsymbol{e}ta}{2} ||M^t_p||_{op}
\epsilonilonnd{align*}
This suggests a natural analysis of the rounding algorithm. Either $||M^t_p||_{op}$ is small which means the algorithm terminates. Or $||M^t_p||_{op}$ is large and we make progress in decreasing $Tr(M^t_p)$. The formal analysis is more involved, as it requires reasoning about the $Tr(M^t_p)$ over the various branches of the correlation tree, some of which may terminate before others.
\boldsymbol{e}gin{proof}
Let $M^0$ denote the pseudocovariance of the root node. Let $\gamma$ be the uniform threshold at which each branch of the correlation tree terminates. Thus at each node either $||M^t_p||_{op} < \gamma$ in which case the node is a terminal node, or $||M^t_p||_{op} \geq \gamma$. Let's define $\mathop{}\!\mathrm{d}elta(\gamma)$ to be a lower bound on the decrease in global correlation, provided that $||M^t_p||_{op} \geq \gamma$.
\boldsymbol{e}gin{align*}
\mathop{}\!\mathrm{d}elta(\gamma) := \cclassmacro{P}Var[\leftarrowngle \hat{\mu}, v^t_p\rightarrowngle] - \E_{i \sim S(v^t_p)}\E_{z_i \in \{0,1\}}\cclassmacro{P}Var[\leftarrowngle \hat{\mu}, v^t_p\rightarrowngle|w_i = z_i]
\epsilonilonnd{align*}
The value of $\mathop{}\!\mathrm{d}elta(\gamma)$ depends on our setting, mean estimation or regression. For mean estimation, if $\gamma > O(m)$ then $\mathop{}\!\mathrm{d}elta(\gamma) \geq \frac{\boldsymbol{e}ta\gamma}{2}$. For regression, if $\gamma > O(\sigma)$ then $\mathop{}\!\mathrm{d}elta(\gamma) \geq \frac{\boldsymbol{e}ta \gamma^2}{4}$. Let's denote the set of terminal nodes at depth $t$ be
$T(t) := \{p | p \in \{0,1\}^t, ||M^t_p||_{op} < \gamma\}$
Let's denote the set of nonterminal nodes at depth $t$ to be
$NT(t) := \{p | p \in \{0,1\}^t, ||M^t_p||_{op} \geq \gamma\}$.
For each nonterminal node, there is both an associated pseudodistribution, hence another $M^t_p$, and a new pseudovariable to condition on to form the two children. We denote this pseudovariable $w(p) \in \{w_1,...,w_N\}$
We denote the probability of landing on a node $N_p$ at position $p$ to be $\mathbb P(p):= \cclassmacro{P}rod_{i=1}^t \cclassmacro{P}E[w(p[:i])]^{p_i} \cclassmacro{P}e[(1 - w(p[:i])]^{(1 - p_i)}$.
Let $\kappa_t$ be the probability that a branch terminates at depth $t$, $\kappa_t := \sum\limits_{p \in T(t)}\mathbb P(p)$.
We need to show that a branch terminates in less than or equal to $m$ rounds with probability $$\sum\limits_{i=1}^m \kappa_i > 1 - \frac{\boldsymbol{e}ta}{2}$$.
Now we let $\Phi_t = \E[Tr(M^t_p)|p \in NT(t)]$ and $\Delta_t = \E[Tr(M^t_p)|p \in T(t)]$. For $t=1$, we have by the law of total expectation
\boldsymbol{e}gin{align*}
\kappa_1 \Phi_1 + (1 - \kappa_1) \Delta_1 \leq Tr(M^0) - \mathop{}\!\mathrm{d}elta(\gamma)
\epsilonilonnd{align*}
rearranging we obtain
\boldsymbol{e}gin{align*}
\Delta_1 \leq \frac{Tr(M^0) - \mathop{}\!\mathrm{d}elta(\gamma) - \kappa_1\Phi_1}{1 - \kappa_1} \leq \frac{Tr(M^0) - \mathop{}\!\mathrm{d}elta(\gamma)}{1 - \kappa_1}
\epsilonilonnd{align*}
Similarly,
\boldsymbol{e}gin{align*}
\frac{\kappa_2}{1 - \kappa_1}\Phi_2 + \frac{1 - \kappa_1 - \kappa_2}{1 - \kappa_1}\Delta_2 \leq \Delta_1 - \mathop{}\!\mathrm{d}elta(\gamma)
\epsilonilonnd{align*}
Rearranging we obtain
\boldsymbol{e}gin{align*}
\Delta_2 \leq (\Delta_1 - \mathop{}\!\mathrm{d}elta(\gamma) - \frac{\kappa_2}{1 - \kappa_1}\Phi_2)\frac{1 - \kappa_1}{1 - \kappa_1 - \kappa_2} \leq (\Delta_1 - \mathop{}\!\mathrm{d}elta(\gamma))\frac{1 - \kappa_1}{1 - \kappa_1 - \kappa_2}
\epsilonilonnd{align*}
In general
\boldsymbol{e}gin{align*}
\Delta_t \leq (\Delta_{t-1} - \mathop{}\!\mathrm{d}elta(\gamma))\frac{1 - \sum\limits_{i=1}^{t-1} \kappa_i}{1 - \sum\limits_{i=1}^t \kappa_i}
\epsilonilonnd{align*}
Plugging in the upper bounds for $\Delta_{t-1}$ and telescoping outward we find
\boldsymbol{e}gin{align*}
\Delta_m \leq \frac{Tr(M^0) - \mathop{}\!\mathrm{d}elta(\gamma) - \mathop{}\!\mathrm{d}elta(\gamma)(1 - \kappa_1) - \mathop{}\!\mathrm{d}elta(\gamma)(1 - \kappa_1 - \kappa_2) - ... - \mathop{}\!\mathrm{d}elta(\gamma)(1 - \sum\limits_{i=1}^{m-1}\kappa_i)}{1 - \sum\limits_{i=1}^m \kappa_i}
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \frac{Tr(M^0) - \mathop{}\!\mathrm{d}elta(\gamma)(1 + (1 - \kappa_1) + (1 - \kappa_1 - \kappa_2) + ... + (1 - \sum\limits_{i=1}^{m-1}\kappa_i))}{1 - \sum\limits_{i=1}^m \kappa_i}
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \frac{Tr(M^0) - \mathop{}\!\mathrm{d}elta(\gamma)(m - \sum\limits_{j=1}^{m-1}(m-j)\kappa_j)}{1 - \sum\limits_{i=1}^m \kappa_i}
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \frac{Tr(M^0) - \mathop{}\!\mathrm{d}elta(\gamma)(m(1 - \sum\limits_{j=1}^{m-1}\kappa_j) + \sum\limits_{j=1}^{m-1}j\kappa_j))}{1 - \sum\limits_{i=1}^m \kappa_i}
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
\leq \frac{Tr(M^0) - \mathop{}\!\mathrm{d}elta(\gamma)(m(1 - \sum\limits_{j=1}^{m-1}\kappa_j) )}{1 - \sum\limits_{i=1}^m \kappa_i}
\epsilonilonnd{align*}
Using the fact that $\sum\limits_{i=1}^m \kappa_i < 1 - \frac{\boldsymbol{e}ta}{2}$ in both numerator and denominator we obtain
\boldsymbol{e}gin{align*}
\leq \frac{2}{\boldsymbol{e}ta}(Tr(M^0) - \mathop{}\!\mathrm{d}elta(\gamma)m\frac{\boldsymbol{e}ta}{2} )
\epsilonilonnd{align*}
So for $m \geq \frac{2Tr(M^0)}{\boldsymbol{e}ta \mathop{}\!\mathrm{d}elta(\gamma)}$ we have a contradiction as the trace of .
\epsilonilonnd{proof}
\epsilonilonnd{comment}
\section{Frobenius Minimization}
\boldsymbol{e}gin{comment}
\boldsymbol{e}gin{theorem}\leftarrowbel{center}
For any pseudodistribution $\zeta$ output by RobustMeanSDP the following inequality holds
\boldsymbol{e}gin{equation}
\norm{\cclassmacro{P}E_\zeta[\hat{\mu}] - \mu}_2 \leq \sqrt{\frac{2\max_{u \in \mathcal S^{d-1}}\cclassmacro{P}E_\zeta[\leftarrowngle \cclassmacro{P}E_\zeta[\hat{\mu}] - \hat{\mu},u\rightarrowngle^2] + \cclassmacro{P}E_\zeta[\sumn w_iw_i'||\hat{\mu} - \mu||^2]}{\cclassmacro{P}E_\zeta[\sumn w_iw_i']}}
\epsilonilonnd{equation}
Similarly, for any pseudodistribution $\zeta$ output by RobustRegressionSDP the following inequality holds
\boldsymbol{e}gin{equation}
\norm{\cclassmacro{P}E_\zeta[\epsilonilonll] - \epsilonilonll'}_2 \leq \sqrt{\frac{2\max_{u \in \mathcal S^{d-1}}\cclassmacro{P}E_\zeta[\leftarrowngle \cclassmacro{P}E_\zeta[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + \cclassmacro{P}E_\zeta[\sumn w_iw_i'||\epsilonilonll - \epsilonilonll'||^2]}{\cclassmacro{P}E_\zeta[\sumn w_iw_i']}}
\epsilonilonnd{equation}
\epsilonilonnd{theorem}
\boldsymbol{e}gin{proof}
The proof of \ref{center} goes the same for both mean estimation and regression. For mean estimation let $R := \hat{\mu}$ and for regression let $R := \epsilonilonll$. We have,
\boldsymbol{e}gin{align*}
\cclassmacro{P}E[\sumn w_iw_i']\leftarrowngle \cclassmacro{P}E[R] - R',u\rightarrowngle^2 = \cclassmacro{P}E[\sumn w_iw_i'\leftarrowngle \cclassmacro{P}E[R] - R',u\rightarrowngle^2]
\epsilonilonnd{align*}
Applying triangle inequality
\boldsymbol{e}gin{align*}
=\cclassmacro{P}E[\sumn w_iw_i'\leftarrowngle \cclassmacro{P}E[R] - R + R - R',u\rightarrowngle^2] \leq 2
\cclassmacro{P}E[\sumn w_iw_i'\leftarrowngle \cclassmacro{P}E[R] - R,u\rightarrowngle^2] + 2\cclassmacro{P}E[\sumn w_iw_i'\leftarrowngle R - R',u\rightarrowngle^2]
\epsilonilonnd{align*}
Summing over $w_iw_i'$ in the first term to obtain global correlation, we have
\boldsymbol{e}gin{align*}
\leq 2
\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[R] - R,u\rightarrowngle^2] + 2\cclassmacro{P}E[\sumn w_iw_i'\leftarrowngle R - R',u\rightarrowngle^2]
\epsilonilonnd{align*}
by Cauchy-Schwarz on the second term we obtain
\boldsymbol{e}gin{align*}
\cclassmacro{P}E[\sumn w_iw_i']\leftarrowngle \cclassmacro{P}E[R] - R',u\rightarrowngle^2 \leq 2
\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[R] - R,u\rightarrowngle^2] + 2\cclassmacro{P}E[\sumn w_iw_i'||R - R'||^2]
\epsilonilonnd{align*}
rearranging the terms we obtain
\boldsymbol{e}gin{align*}
\leftarrowngle \cclassmacro{P}E_\zeta[R] - R', u \rightarrowngle \leq \sqrt{\frac{\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[R] - R,u\rightarrowngle^2] + \cclassmacro{P}E_\zeta[\sumn w_iw_i'||R - R'||^2]}{\cclassmacro{P}E_\zeta[\sumn w_iw_i']}}
\epsilonilonnd{align*}
Maximizing over all $u \in \mathcal S^{d-1}$
\boldsymbol{e}gin{align*}
\norm{\cclassmacro{P}E_\zeta[R] - R'}_2 \leq \sqrt{\frac{\max_{u \in \mathcal S^{d-1}}\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[R] - R,u\rightarrowngle^2] + \cclassmacro{P}E_\zeta[\sumn w_iw_i'||R - R'||^2]}{\cclassmacro{P}E_\zeta[\sumn w_iw_i']}}
\epsilonilonnd{align*}
as desired
\epsilonilonnd{proof}
\noindent
The above theorem reduces the problem of finding a pseudodistribution for which $\norm{\cclassmacro{P}E_\zeta[R] - R'}_2$ is small to that of upper bounding $\max_{u \in \mathcal S^{d-1}}\cclassmacro{P}E_\zeta[\leftarrowngle \cclassmacro{P}E_\zeta[R] - R,u\rightarrowngle^2]$ and $\cclassmacro{P}E_\zeta[\sumn w_iw_i'||R - R'||^2]$ and lower bounding $\cclassmacro{P}E_\zeta[\sumn w_iw_i']$. Henceforth, we will loosely refer to
$\max_{u \in \mathcal S^{d-1}}\cclassmacro{P}E_\zeta[\leftarrowngle \cclassmacro{P}E_\zeta[R] - R,u\rightarrowngle^2]$ as global correlation, and $\cclassmacro{P}E_\zeta[\sumn w_iw_i'||R - R'||^2]$ as the "bias" term. Global correlation captures the variance of the estimator $R$. If $R$ is concentrated, then global correlation is small and the estimator $R$ is close to its expectation $\cclassmacro{P}E_\zeta[R]$. The "bias" term intuitively captures how far $\cclassmacro{P}E_\zeta[R]$ is from $R'$.
\epsilonilonnd{comment}
\rhoection{Frobenius Norm Minimization} \leftarrowbel{sec:Frobenius}
\restatelemma{lem:FrobeniusMinimization}
\boldsymbol{e}gin{proof}
Let $\cclassmacro{P}E_P$ denote the pseudo-expectation operator corresponding to the actual assignment $\{w'_i\}_{i \in [N]}$ and $\{\epsilonilonll'\}$.
Note that $\cclassmacro{P}E_P$ is an actual expectation over an assignment satisfying the polynomial constraints.
For a constant $\kappa \in [0,1]$, let us consider the pseudoexpectation operator $\cclassmacro{P}E_R$ defined as follows for a polynomial $p(w)$,
$$\cclassmacro{P}E_R \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \kappa \cclassmacro{P}E_P + (1-\kappa)\cclassmacro{P}E_D$$
Since $\cclassmacro{P}E_D$ is the pseudoexpecation operator that minimizes $\norm{\cclassmacro{P}E_D[w]}$, we get that
\boldsymbol{e}gin{equation}\leftarrowbel{eqfm1}
\iprod{\cclassmacro{P}E_R[w], \cclassmacro{P}E_R[w]} \geq \iprod{\cclassmacro{P}E_D[w], \cclassmacro{P}E_D[w]}
\epsilonilonnd{equation}
Expanding the LHS with the definition of $R$ we have
\boldsymbol{e}gin{align*}
(1-\kappa)^2 \cdot \iprod{\cclassmacro{P}E_D[w], \cclassmacro{P}E_D[w]} + 2 \kappa (1-\kappa) \iprod{\cclassmacro{P}E_D[w], \cclassmacro{P}E_P[w]} + \kappa^2 \iprod{\cclassmacro{P}E_P[w],\cclassmacro{P}E_P[w]} \geq \iprod{\cclassmacro{P}E_D[w], \cclassmacro{P}E_D[w]}
\epsilonilonnd{align*}
Rearranging the terms we get
\boldsymbol{e}gin{align*}
\iprod{\cclassmacro{P}E_D[w], \cclassmacro{P}E_P[w]} \geq \frac{1}{2 \kappa(1-\kappa)} \left((2\kappa - \kappa^2)\iprod{\cclassmacro{P}E_D[w], \cclassmacro{P}E_D[w]} -\kappa^2 \iprod{\cclassmacro{P}E_P[w],\cclassmacro{P}E_P[w]}\right)
\epsilonilonnd{align*}
By definition, we have that $\iprod{\cclassmacro{P}E_P[w], \cclassmacro{P}E_P[w]} = \sum_i w_i^{'2} = \boldsymbol{e}ta N$. By Cauchy-Schwartz inequality, $\iprod{\cclassmacro{P}E_D[w],\cclassmacro{P}E_D[w]} \geq \frac{1}{N} \left(\sum_i \cclassmacro{P}E_D[w_i]\right)^2 = \frac{1}{N} (\boldsymbol{e}ta N)^2 = \boldsymbol{e}ta^2 N $. Substituting these bounds back we get that,
\boldsymbol{e}gin{align*}
\iprod{\cclassmacro{P}E_D[w], \cclassmacro{P}E_P[w]} \geq \frac{\left((2\kappa - \kappa^2)\boldsymbol{e}ta^2 -\kappa^2 \boldsymbol{e}ta\right)}{2 \kappa(1-\kappa)} \cdot N
\epsilonilonnd{align*}
Taking limits as $\kappa \to 0$, we get the desired result.
\epsilonilonnd{proof}
\iffalse
$$\sum\limits_{i=1}^N (\cclassmacro{P}E_R[w_i])^2
= \sum\limits_{i=1}^N (\kappa \cclassmacro{P}E_P[w_i] + (1-\kappa)\cclassmacro{P}E_D[w_i])^2$$
Expanding the square
$$ = (1 - \kappa)^2 \sum\limits_{i=1}^N (\cclassmacro{P}E_D[w_i])^2 + 2\kappa(1-\kappa)\sum\limits_{i=1}^N \cclassmacro{P}E_D[w_i]\cclassmacro{P}E_P[w_i] + \kappa^2\sum\limits_{i=1}^N \cclassmacro{P}E_P[w_i]^2$$
Using the definition of $\cclassmacro{P}E_P$
$$ = (1 - \kappa)^2 \sum\limits_{i=1}^N (\cclassmacro{P}E_D[w_i])^2 + 2\kappa(1-\kappa)\sum\limits_{i=1}^N \cclassmacro{P}E_D[w_i]w'_i + \kappa^2 \sum_{i = 1}^N w'_i^2$$
$$= (1 - \kappa)^2 \sum\limits_{i=1}^N (\cclassmacro{P}E_D[w_i])^2 + 2\kappa(1-\kappa)\cclassmacro{P}E_D[\sum\limits_{i=1}^N w_i w_i'] + \kappa^2 \boldsymbol{e}ta N$$
Now we rewrite \ref{eqfm1} as
\boldsymbol{e}gin{equation}
(1 - \kappa)^2 \sum\limits_{i=1}^N (\cclassmacro{P}E_D[w_i])^2 + 2\kappa(1-\kappa)\cclassmacro{P}E_D[\sum\limits_{i=1}^N w_i w_i'] + \kappa^2 \boldsymbol{e}ta N \geq \sum\limits_{i=1}^N (\cclassmacro{P}E_D[w_i])^2
\epsilonilonnd{equation}
Rearranging the terms we obtain
$$ 2\kappa(1-\kappa)\cclassmacro{P}E_D[\sum\limits_{i=1}^N w_i w_i'] + \kappa^2 \boldsymbol{e}ta N \geq (2\kappa - \kappa^2)\sum\limits_{i=1}^N (\cclassmacro{P}E_D[w_i])^2$$
Dividing both sides by $\kappa$ (note $\kappa > 0$)
\boldsymbol{e}gin{equation}\leftarrowbel{eqfm2}
2(1-\kappa)\cclassmacro{P}E_D[\sum\limits_{i=1}^N w_i w_i'] + \kappa \boldsymbol{e}ta N \geq (2 - \kappa)\sum\limits_{i=1}^N (\cclassmacro{P}E_D[w_i])^2
\epsilonilonnd{equation}
\Pnote{it might be good to call this Cauchy-Schwartz instead of Jensen's}
Note that by Jensen's inequality and applying the constraint $\sum\limits_{i=1}^N w_i = \boldsymbol{e}ta N$, the RHS is greater than
$$ (2 - \kappa)\sum\limits_{i=1}^N (\cclassmacro{P}E_D[w_i])^2 = (2 - \kappa)N\frac{1}{N}\sum\limits_{i=1}^N (\cclassmacro{P}E_D[w_i])^2 \geq
(2 - \kappa)N(\frac{1}{N} \sum\limits_{i=1}^N\cclassmacro{P}E_D[w_i])^2$$
$$ = (2 - \kappa)N(\frac{1}{N}\cclassmacro{P}E_D[\sum\limits_{i=1}^N w_i])^2$$
$$ = (2 - \kappa)\boldsymbol{e}ta^2N $$
Thus rewriting \ref{eqfm2} we obtain
$$2(1-\kappa)\cclassmacro{P}E_D[\sum\limits_{i=1}^N w_i w_i'] + \kappa \boldsymbol{e}ta N \geq (2 - \kappa)\boldsymbol{e}ta^2 N$$
which for sufficiently small, but positive values of $\kappa$ gives
$$\cclassmacro{P}E_D[\sum\limits_{i=1}^N w_i w_i'] \geq \boldsymbol{e}ta^2 N $$
as desired.
\fi
\boldsymbol{e}gin{proof}
\cclassmacro{P}rettyref{lem:cr2}
We invoke \cclassmacro{P}rettyref{thm:onedimrounding} for $z_i \mathrel{\mathop:}= w_i\iprod{X_i - \cclassmacro{P}E[\hat{\mu}],v}$ for $i \in [N]$.
Now it suffices to upper bound $\frac{1}{\boldsymbol{e}ta N}\sum\limits_{i=1}^N \cclassmacro{P}Var(z_i)$.
\boldsymbol{e}gin{align*}
\sumn \cclassmacro{P}Var(z_i) \leq \sumn \cclassmacro{P}E[z_i^2] = \cclassmacro{P}E\left[\sumn w_i\iprod{X_i - \E[\hat{\mu}],v}^2\right]
\epsilonilonnd{align*}
We add and subtract $\hat{\mu}$ and expand the expression with triangle inequality to obtain
\boldsymbol{e}gin{align*}
= \cclassmacro{P}E\left[\sumn w_i\iprod{X_i - \hat{\mu} + \hat{\mu} - \E\hat{\mu},v}^2\right] = \cclassmacro{P}E\left[\sumn w_i\iprod{X_i - \hat{\mu} + \hat{\mu} - \E\hat{\mu},v}^2\right]
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
\leq 2\Big(\cclassmacro{P}E\left[\frac{1}{N}\sumn w_i\iprod{X_i - \hat{\mu},v}^2\right] + \cclassmacro{P}E\left[\iprod{\hat{\mu} - \cclassmacro{P}E\hat{\mu},v}^2\right]\Big) = 2\Big(\cclassmacro{P}E\left[\sumn w_i\iprod{X_i - \hat{\mu},v}^2\right] + \cclassmacro{P}Var[\iprod{\hat{\mu},v}]\Big)
\epsilonilonnd{align*}
as desired.
\epsilonilonnd{proof}
\boldsymbol{e}gin{proof} \cclassmacro{P}rettyref{lem:meansnapping}
First, we will make the following claim which we will prove later
\boldsymbol{e}gin{claim}\leftarrowbel{mc}
\boldsymbol{e}gin{align*}
\cclassmacro{P}E\left[\sumn w_iw_i'\right](\leftarrowngle\cclassmacro{P}E[\hat{\mu}] - \mu,u\rightarrowngle)^2 \leq \cclassmacro{P}E\left[\leftarrowngle\hat{\mu} - \cclassmacro{P}E\hat{\mu},u\rightarrowngle^2\right] + \cclassmacro{P}E\left[\sumn w_iw_i' \norm{\hat{\mu} - \mu}^2\right]
\epsilonilonnd{align*}
\epsilonilonnd{claim}
Which implies
\boldsymbol{e}gin{align*}
\leftarrowngle\cclassmacro{P}E[\hat{\mu}] - \mu,u\rightarrowngle^2\leq \frac{\cclassmacro{P}E[\leftarrowngle\hat{\mu} - \E[\hat{\mu}],u\rightarrowngle^2]}{\cclassmacro{P}E\left[\sumn w_iw_i'\right]} + \cclassmacro{P}E\left[\frac{\sumn w_iw_i'}{\cclassmacro{P}E\left[\sumn w_iw_i'\right]} \norm{\hat{\mu} - \mu}^2\right]
\epsilonilonnd{align*}
Now we bound the second term on the right hand side.
Let $\boldsymbol{c}alG(w)$ be the space of polynomials over $w_1,...,w_N$. Consider the functional $\cclassmacro{P}E': \boldsymbol{c}alG(w) \rightarrow \mathbb R$ which takes a polynomial $f(w)$ and maps it to $\cclassmacro{P}E' f(w) = \cclassmacro{P}E\left[\frac{\sumn w_iw_i'}{\cclassmacro{P}E\left[\sumn w_iw_i'\right]}f(w)\right]$. We observe that $\cclassmacro{P}E'$ is a valid pseudoexpectation. Therefore, applying cauchy-schwarz we obtain
\boldsymbol{e}gin{align*}
\cclassmacro{P}E\left[\frac{\sumn w_iw_i'}{\cclassmacro{P}E\left[\sumn w_iw_i'\right]}\norm{\hat{\mu} - \mu}^2\right] = \cclassmacro{P}E'\left[ \norm{\hat{\mu} - \mu}^2\right] \leq \cclassmacro{P}E'\left[\norm{\hat{\mu} - \mu}^k\right]^{\frac{2}{k}} = \cclassmacro{P}E[\frac{\sumn w_iw_i'}{\cclassmacro{P}E[\sumn w_iw_i']}||\hat{\mu} - \mu||^k]^{2/k}
\epsilonilonnd{align*}
Rearranging we have
\boldsymbol{e}gin{align*}
\cclassmacro{P}E\left[\sumn w_iw_i'\norm{\hat{\mu} - \mu}^2\right] \leq \cclassmacro{P}E\left[\sumn w_iw_i'\norm{\hat{\mu} - \mu}^k\right]^\frac{2}{k}\cclassmacro{P}E\left[\sumn w_iw_i'\right]^{1 - \frac{2}{k}}
\epsilonilonnd{align*}
Then by SOS Cauchy-Schwarz and then the SOS inequality \cite{hopkins2018mixture} lemma 5.5 we have
\boldsymbol{e}gin{align*}
\cclassmacro{P}E\left[\sumn w_iw_i'\norm{\hat{\mu} - \mu}^k\right] \leq \cclassmacro{P}E\left[(\sumn w_iw_i')^2\norm{\hat{\mu} - \mu}^{2k}\right]^\frac{1}{2} \leq \cclassmacro{P}E\left[\sumn w,w_i\norm{\hat{\mu} - \mu}^k\right]^{\frac{1}{2}}
\epsilonilonnd{align*}
Any number that is smaller than its square root is less than one i.e
\boldsymbol{e}gin{align*}
\cclassmacro{P}E\left[\sumn w_iw_i'\norm{\hat{\mu} - \mu}^k\right] \leq 1
\epsilonilonnd{align*}
So we conclude that
\boldsymbol{e}gin{align*}
\cclassmacro{P}E\left[\sumn w_iw_i'\norm{\hat{\mu} - \mu}^2\right] \leq \cclassmacro{P}E\left[\sumn w_iw_i'\right]^{1 - \frac{2}{k}}
\epsilonilonnd{align*}
Plugging back into the second term above we obtain
\boldsymbol{e}gin{align*}
\leq \frac{\cclassmacro{P}E\left[\sumn w_iw_i'\right]^{1 - \frac{2}{k} }}{ \cclassmacro{P}E\left[\sumn w_iw_i'\right]} = \frac{1}{\cclassmacro{P}E\left[\sumn w_iw_i'\right]^\frac{2}{k}} \leq \frac{1}{\boldsymbol{e}ta^{\frac{2}{k}}}
\epsilonilonnd{align*}
Where in the last line we used the result of frobenius norm minimization $\cclassmacro{P}E\left[\sumn w_iw_i'\right] \geq \boldsymbol{e}ta$. Then by taking square root on both sides we obtain
\boldsymbol{e}gin{align*}
\norm{\E[\hat{\mu}] - \mu} \leq 1 + \frac{1}{\boldsymbol{e}ta^{\frac{1}{k}}} = O(\frac{1}{\boldsymbol{e}ta^{\frac{1}{k}}})
\epsilonilonnd{align*}
\epsilonilonnd{proof}
Now we prove the claim.
\boldsymbol{e}gin{proof}\ref{mc}
\boldsymbol{e}gin{align*}
\cclassmacro{P}E\left[\sumn w_iw_i'\right]\leftarrowngle \cclassmacro{P}E[\hat{\mu}] - \mu,u\rightarrowngle^2 = \cclassmacro{P}E\left[\sumn w_iw_i'\leftarrowngle \cclassmacro{P}E[\hat{\mu}] - \mu,u\rightarrowngle^2\right]
\epsilonilonnd{align*}
Applying triangle inequality
\boldsymbol{e}gin{align*}
=\cclassmacro{P}E\left[\sumn w_iw_i'\leftarrowngle \cclassmacro{P}E[\hat{\mu}] - \hat{\mu} + \hat{\mu} - \mu,u\rightarrowngle^2\right] \leq 2
\cclassmacro{P}E\left[\sumn w_iw_i'\leftarrowngle \cclassmacro{P}E[\hat{\mu}] - \hat{\mu},u\rightarrowngle^2\right] + 2\cclassmacro{P}E\left[\sumn w_iw_i'\leftarrowngle \hat{\mu} - \mu,u\rightarrowngle^2\right]
\epsilonilonnd{align*}
Using $w_iw_i' \leq 1$ in the first term, we have
\boldsymbol{e}gin{align*}
\leq 2
\cclassmacro{P}E\left[\leftarrowngle \cclassmacro{P}E[\hat{\mu}] - \hat{\mu},u\rightarrowngle^2\right] + 2\cclassmacro{P}E\left[\sumn w_iw_i'\leftarrowngle \hat{\mu} - \mu,u\rightarrowngle^2\right]
\epsilonilonnd{align*}
by SOS Cauchy-Schwarz on the second term we obtain
\boldsymbol{e}gin{align*}
\cclassmacro{P}E\left[\sumn w_iw_i'\right]\leftarrowngle \cclassmacro{P}E[\hat{\mu}] - \mu,u\rightarrowngle^2 \leq 2
\cclassmacro{P}E\left[\leftarrowngle \cclassmacro{P}E[\hat{\mu}] - \hat{\mu},u\rightarrowngle^2\right] + 2\cclassmacro{P}E\left[\sumn w_iw_i'\norm{\hat{\mu} - \mu}^2\right]
\epsilonilonnd{align*}
\epsilonilonnd{proof}
\rhoection{Preprocessing via Conditioning}
\boldsymbol{e}gin{theorem}
Consider the strategy $\mathcal S$ of conditioning on $w_j = 1$ where $j$ is selected with probability
$\mathcal S(j) = \frac{\cclassmacro{P}E[w_j]}{M}$. Then in expectation over the selection of $j$, we have that $\norm{\mathcal Q}_{op}$ is small in expectation.
\boldsymbol{e}gin{align*}
\E_{j \sim \mathcal S}\cclassmacro{P}Var(\iprod{\hat{\mu},u}|w_j = 1) < 4m_2
\epsilonilonnd{align*}
and there is correlation with the plant.
\boldsymbol{e}gin{align*}
\E_{j \sim \mathcal S}\cclassmacro{P}E[\sumn w_iw_i'|w_j = 1] \geq \boldsymbol{e}ta
\epsilonilonnd{align*}
\epsilonilonnd{theorem}
\boldsymbol{e}gin{proof}
The correlation with the plant follows by the definition of $\mathcal S$. We now upper bound $\norm{\mathcal Q}_{op}$ explicitly.
\boldsymbol{e}gin{align*}
\E_{j \sim \mathcal S} \cclassmacro{P}Var(\iprod{\hat{\mu},u}|w_j = 1) = \E_{j \sim \mathcal S}\cclassmacro{P}E[\iprod{\hat{\mu} - \cclassmacro{P}E[\hat{\mu}|w_j=1],u}^2|w_j=1]
\epsilonilonnd{align*}
by triangle inequality
\boldsymbol{e}gin{align*}
\leq 2\E_{j \sim \mathcal S} \cclassmacro{P}E[\iprod{\hat{\mu} - X_j,u}^2|w_j=1] + 2\cclassmacro{P}E_{j \sim \mathcal S}\iprod{X_j - \cclassmacro{P}E[\hat{\mu}|w_j=1],u}^2
\epsilonilonnd{align*}
Applying the definition of $\mathcal S$ to the first term we obtain
\boldsymbol{e}gin{align*}
= 2\cclassmacro{P}E[\frac{1}{M}\sum\limits_{j=1}^N w_j\iprod{X_j - \hat{\mu},u}^2] + 2\E_{j \sim \mathcal S}\iprod{X_j - \cclassmacro{P}E[\hat{\mu}|w_j=1],u}^2
\epsilonilonnd{align*}
Then using pseudoexpectation cauchy-schwarz on the second term we obtain
\boldsymbol{e}gin{align*}
\leq 2\cclassmacro{P}E[\frac{1}{M}\sum\limits_{j=1}^N w_j\iprod{X_j - \hat{\mu},u}^2] + 2\E_{j \sim \mathcal S}\cclassmacro{P}E[\iprod{X_j - \hat{\mu},u}^2|w_j=1]
\epsilonilonnd{align*}
Applying the definition of $\mathcal S$ to the second term we obtain
\boldsymbol{e}gin{align*}
\leq 2\cclassmacro{P}E[\frac{1}{M}\sum\limits_{j=1}^N w_j\iprod{X_j - \hat{\mu},u}^2] + 2\cclassmacro{P}E[\frac{1}{M}\sum\limits_{j=1}^Nw_j\iprod{X_j - \hat{\mu},u}^2] \leq 4m_2
\epsilonilonnd{align*}
as desired.
\epsilonilonnd{proof}
\section{Regression Rounding}\leftarrowbel{sec:reg-rounding}
In this section we prove that concentration rounding decreases $\norm{\mathcal Q}_{nuc}$. First we closely approximate $\cclassmacro{P}Var[\iprod{\epsilonilonll,u}]$ by $\cclassmacro{P}Var[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i, u \rightarrowngle]$ for any unit vector $u \in S^{d-1}$. Then we apply \cclassmacro{P}rettyref{thm:onedimrounding} with the strategy $\mathcal S_v$ to analyze a single iteration of concentration rounding.
We begin with the following useful lemma for working with pseudovariance.
\Pnote{we should state what we are trying to do in this section, the reader doesn't know what to expect in this section}
\boldsymbol{e}gin{lemma}[Pseudovariance Triangle Inequality]
Let $f(x)$ and $g(x)$ be polynomials. Then for any $\cclassmacro{P}si > 0$ there is a degree $2$ SoS proof of the following.
\boldsymbol{e}gin{equation*}
\cclassmacro{P}Var[f(x) + g(x)] \leq (1+\cclassmacro{P}si)\cclassmacro{P}Var[f(x)] + (\frac{1 + \cclassmacro{P}si}{\cclassmacro{P}si})\cclassmacro{P}Var[g(x)]
\epsilonilonnd{equation*}
\epsilonilonnd{lemma}
\Pnote{$\cclassmacro{P}si$ is not quantified here}
\boldsymbol{e}gin{proof}
\boldsymbol{e}gin{equation}\leftarrowbel{psi1}
\cclassmacro{P}Var[f(x) + g(x)] = \cclassmacro{P}E[(f(x)+ g(x) - \cclassmacro{P}E[f(x + g(x))])^2] = \cclassmacro{P}E[((f(x) - \cclassmacro{P}E[f(x)]) + (g(x) - \cclassmacro{P}E[g(x)]))^2]
\epsilonilonnd{equation}
Then we observe that there is a degree $2$ SoS proof of the fact
\boldsymbol{e}gin{align*}
(f(x) + g(x))^2 = f(x)^2 + g(x)^2 + 2\cclassmacro{P}si f(x)\frac{g(x)}{\cclassmacro{P}si} \leq (1 + \cclassmacro{P}si^2)f(x)^2 + (\frac{1 + \cclassmacro{P}si^2}{\cclassmacro{P}si^2})g(x)^2
\epsilonilonnd{align*}
Plugging this into \ref{psi1}
\boldsymbol{e}gin{align*}
\leq (1 + \cclassmacro{P}si^2)\cclassmacro{P}E[(f(x) - \cclassmacro{P}E[f(x)])^2] + (\frac{1 + \cclassmacro{P}si^2}{\cclassmacro{P}si^2})\cclassmacro{P}E[(g(x) - \cclassmacro{P}E[g(x)])^2] = (1 + \cclassmacro{P}si^2)\cclassmacro{P}Var[f(x)] + \frac{1 + \cclassmacro{P}si^2}{\cclassmacro{P}si^2}\cclassmacro{P}Var[g(x)]
\epsilonilonnd{align*}
Substituting any variable $\cclassmacro{P}si' = \cclassmacro{P}si^2 > 0$ we obtain the desired result.
\epsilonilonnd{proof}
\restatelemma{lem:regstrategy}
\Pnote{the error term looks rather messy. what is $\epsilonilonpsilon_4$, and can we substitute the value for $B$. Also, is the final term $\cclassmacro{P}E_{\zeta}[ \norm{\epsilonilonll - \cclassmacro{P}E[\epsilonilonll]}^2]$ equal to trace of pseudocovariance matrix of $\epsilonilonll$. If so, we should just define the pseudocovariance matrix at some point, and use it in this statement}
To prove \cclassmacro{P}rettyref{lem:regstrategy} we will need the following lemma
\boldsymbol{e}gin{lemma}\torestate{\leftarrowbel{lem:reg-approx-round}
Let $\cclassmacro{P}E$ be a pseudodistribution satisfying $\mathcal P$. The following holds.
\boldsymbol{e}gin{equation}\leftarrowbel{rr5}
\cclassmacro{P}Var[\leftarrowngle \epsilonilonll,u\rightarrowngle] \leq (1 + o_d(1))\cclassmacro{P}Var[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i, u \rightarrowngle] + o_d(1)
\epsilonilonnd{equation}
\boldsymbol{e}gin{equation}\leftarrowbel{rr6}
\cclassmacro{P}Var[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i, u \rightarrowngle]\leq (1 + o_d(1))\cclassmacro{P}Var[\leftarrowngle \epsilonilonll,u\rightarrowngle] + o_d(1)
\epsilonilonnd{equation}
}
\epsilonilonnd{lemma}
\Pnote{Is there a with high probability in the above statement? what is the error $1/poly(d)$ coming from?}
Informally, \ref{lem:reg-approx-round} gives us an arbitrarily good approximation (up to a negligible additive error term) to $\cclassmacro{P}Var[\leftarrowngle \epsilonilonll,u\rightarrowngle]$ by the variance of an estimator that is amenable to rounding via \cclassmacro{P}rettyref{thm:onedimrounding}. We defer the proof to the end of the section. Now we're ready to prove \cclassmacro{P}rettyref{lem:regstrategy}
\boldsymbol{e}gin{proof} (Proof of \cclassmacro{P}rettyref{lem:regstrategy})
First we apply \cclassmacro{P}rettyref{lem:reg-approx-round} to obtain an arbitrarily good constant factor approximation of the variance decrease.
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}] - \E_{j \sim S_v}\E_{b_j} \cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}|w_j = b_j]
\geq (1 - o_d(1))\cclassmacro{P}Var\left[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle\right] \\- (1+o_d(1)) \E_{j \sim S_v}\E_{b_j} \cclassmacro{P}Var\left[\sumn w_i (y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle|w_j = b_j\right] - o_d(1)
\epsilonilonnd{align*}
Using the setting $d > \rho^2$ we have $\rho^2o_d(1) = o_d(1)$ and we simplify the above expression to obtain
\boldsymbol{e}gin{align}\leftarrowbel{eq:3.3.1}
= \Big(\cclassmacro{P}Var\left[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle\right] - \E_{j \sim S_v}\E_{b_j} \cclassmacro{P}Var\left[\sumn w_i (y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle|w_j = b_j\right]\Big)-o_d(1)
\epsilonilonnd{align}
\boldsymbol{e}gin{comment}
and consider the second and third term to be small error terms. Indeed, the second term can be upper bounded via \cclassmacro{P}rettyref{lem:reg-approx-round} as follows
\boldsymbol{e}gin{align*}
\cclassmacro{P}si\Big(\cclassmacro{P}Var\left[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle\right] + \E_{j \sim S_u}\E_{b_j} \cclassmacro{P}Var\left[\sumn w_i (y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle|w_j = b_j\right]\Big)
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
\leq \cclassmacro{P}si (1+\cclassmacro{P}si)(\cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}] + \E_{j \sim S(v)}\E_{b_j} \cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}|w_j = b_j]) + O\Big(\frac{(1+\cclassmacro{P}si)^2}{\cclassmacro{P}si}\epsilonilonpsilon\rho^2\Big)
\epsilonilonnd{align*}
Upper bounding the variance of $\epsilonilonll$ in any unit direction by the sum of the variances over an orthogonal basis we obtain
\boldsymbol{e}gin{align*}
\leq \cclassmacro{P}si (1+\cclassmacro{P}si)(\norm{\mathcal Q}_{nuc} + \E_{j \sim S(v)}\E_{b_j} \norm{\mathcal Q|_{w_j = b_j}}_{nuc}) + O\Big(\frac{(1+\cclassmacro{P}si)^2}{\cclassmacro{P}si}\epsilonilonpsilon\rho^2\Big)
\epsilonilonnd{align*}
Using the scale constraint (7) we have $\norm{\mathcal Q}_{nuc} = \cclassmacro{P}E[\norm{\epsilonilonll - \cclassmacro{P}E[\epsilonilonll]}^2] \leq \cclassmacro{P}E[\norm{\epsilonilonll}^2] \leq \rho^2$. Plugging into the above expression we have
\boldsymbol{e}gin{align*}
\leq \cclassmacro{P}si (1+\cclassmacro{P}si)(\rho^2 + \E_{j \sim S(v)}\E_{b_j} \norm{\mathcal Q|_{w_j = b_j}}_{nuc}) + O\Big(\frac{(1+\cclassmacro{P}si)^2}{\cclassmacro{P}si}\epsilonilonpsilon\rho^2\Big)
\epsilonilonnd{align*}
Then using the fact that conditioning preserves the polynomial constraints $\mathcal P$, we apply the same argument to $\E_{j \sim S(v)}\E_{b_j} \norm{\mathcal Q|_{w_j = b_j}}_{nuc} \leq \rho^2$ and obtain
\boldsymbol{e}gin{align*}
\leq \cclassmacro{P}si (1+\cclassmacro{P}si)(2\rho^2) + O\Big(\frac{(1+\cclassmacro{P}si)^2}{\cclassmacro{P}si}\epsilonilonpsilon\rho^2\Big)
\epsilonilonnd{align*}
Which for $\cclassmacro{P}si = \sqrt{\epsilonilonpsilon}$ yields an error term of $O(\sqrt{\epsilonilonpsilon}\rho^2)$.
Thus we have upper bounded the error term in \cclassmacro{P}rettyref{eq:3.3.1}, which we restate here
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}] - \E_{j \sim S(v)}\E_{b_j} \cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}|w_j = b_j]
\epsilonilonnd{align*}
\boldsymbol{e}gin{align}\leftarrowbel{eq:3.3.2}
= \Big(\cclassmacro{P}Var\left[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle\right] - \E_{j \sim S_u}\E_{b_j} \cclassmacro{P}Var\left[\sumn w_i (y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle|w_j = b_j\right]\Big)
- O(\sqrt{\epsilonilonpsilon}\rho^2)
\epsilonilonnd{align}
\Pnote{Do we use Lemma 7.2 for constant $\cclassmacro{P}si$ or subconstant $\cclassmacro{P}si$ also? we might have to mention this, when we say "for any $\cclassmacro{P}si > 0$" to "for every constant $\cclassmacro{P}si$"}
\epsilonilonnd{comment}
To lower bound the first term above, we apply \cclassmacro{P}rettyref{thm:onedimrounding} with $Z_i = w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i, u\rightarrowngle$. This immediately gives us,
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var\left[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle\right] - \E_{j \sim S_u}\E_{b_j} \cclassmacro{P}Var\left[\sumn w_i (y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle|w_j = b_j\right]
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
\geq \frac{\boldsymbol{e}ta\cclassmacro{P}Var[\sumn w_i (y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle]^2}{\sumn\cclassmacro{P}Var[w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle]}
\epsilonilonnd{align*}
\Pnote{slash left bracket and slash right bracket here}
Applying \cclassmacro{P}rettyref{lem:reg-approx-round} to the numerator we obtain
\boldsymbol{e}gin{align*}
\geq \frac{(1 - o_d(1))\boldsymbol{e}ta\cclassmacro{P}Var[\iprod{\epsilonilonll,u}]^2 - o_d(1)}{\sumn\cclassmacro{P}Var[w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle]}
\epsilonilonnd{align*}
Now we upper bound the denominator by
$$ \sumn\cclassmacro{P}Var[w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle] \leq 2(g\sigma^4B)^\frac{1}{2} + 2B\norm{\mathcal Q}_{nuc} $$
The proof is as follows. First we use $\cclassmacro{P}Var(X) \leq \cclassmacro{P}E[X^2]$ to obtain
\boldsymbol{e}gin{align*}
\sumn\cclassmacro{P}Var(w_i(y_i - \iprod{ \cclassmacro{P}E[\epsilonilonll],X_i})\iprod{ X_i,u}) \leq \sumn\cclassmacro{P}E[w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)^2\leftarrowngle X_i,u\rightarrowngle^2)]
\epsilonilonnd{align*}
\Pnote{should we use $\cclassmacro{P}Var[]$ everywhere instead of $\cclassmacro{P}Var()$? It will make it similar to $\cclassmacro{P}E[]$}
\boldsymbol{e}gin{align*}
= \sumn\cclassmacro{P}E[w_i(y_i - \leftarrowngle \epsilonilonll,X_i\rightarrowngle + \leftarrowngle \epsilonilonll,X_i\rightarrowngle - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)^2\leftarrowngle X_i,u\rightarrowngle^2)]
\epsilonilonnd{align*}
Then we use degree $2$ SoS triangle inequality to obtain
\boldsymbol{e}gin{equation}\leftarrowbel{rr4}
\leq 2\cclassmacro{P}E[\sumn w_i(y_i - \leftarrowngle \epsilonilonll,X_i\rightarrowngle)^2\leftarrowngle X_i,u\rightarrowngle^2] + 2\cclassmacro{P}E[\sumn w_i\leftarrowngle\epsilonilonll - \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle^2\leftarrowngle X_i,u\rightarrowngle^2)]
\epsilonilonnd{equation}
The first term is upper bounded by pseudoexpectation Cauchy-Schwarz
\Pnote{can we use the following simpler calculation here
\boldsymbol{e}gin{align*}
\cclassmacro{P}E\left[\sumn w_i(y_i - \leftarrowngle \epsilonilonll,X_i\rightarrowngle)^2\leftarrowngle X_i,u\rightarrowngle^2\right] & = \cclassmacro{P}E\left[\sumn w_i^2(y_i - \leftarrowngle \epsilonilonll,X_i\rightarrowngle)^2\leftarrowngle X_i,u\rightarrowngle^2\right] \\
& = \left(\cclassmacro{P}E\left[\sumn w_i^2(y_i - \leftarrowngle \epsilonilonll,X_i\rightarrowngle)^4\right] \right)^{\frac{1}{2}}\left( \cclassmacro{P}E\left[\sumn w_i^2\leftarrowngle X_i,u\rightarrowngle^4\right] \right)^{\frac{1}{2}}\\
\epsilonilonnd{align*}
}
\boldsymbol{e}gin{align*}
\cclassmacro{P}E\left[\sumn w_i(y_i - \leftarrowngle \epsilonilonll,X_i\rightarrowngle)^2\leftarrowngle X_i,u\rightarrowngle^2\right] & = \cclassmacro{P}E\left[\sumn w_i^2(y_i - \leftarrowngle \epsilonilonll,X_i\rightarrowngle)^2\leftarrowngle X_i,u\rightarrowngle^2\right] \\
& = \left(\cclassmacro{P}E\left[\sumn w_i^2(y_i - \leftarrowngle \epsilonilonll,X_i\rightarrowngle)^4\right] \right)^{\frac{1}{2}}\left( \cclassmacro{P}E\left[\sumn w_i^2\leftarrowngle X_i,u\rightarrowngle^4\right] \right)^{\frac{1}{2}}\\
\epsilonilonnd{align*}
Then by degree $2$ SoS Cauchy-Schwarz, followed by applying the fourth moment constraints on noise (4) we obtain
\Pnote{ along with referring to $4th$ moment constraint on noise, it might be clearer if we number the constraints of the SDP, and refer to a particular constraint of the SDP that is used, directly with numbering}
$$\leq (g\sigma^4)^{1/2}\cclassmacro{P}E[\iprod{\sumn w_iX_i^{\otimesimes 2}(X_i^{\otimesimes 2})^T, u^{\otimesimes 2}(u^{\otimesimes 2})^T}]^\frac{1}{2}
$$
$$
= (g\sigma^4)^\frac{1}{2}\cclassmacro{P}E[\iprod{\sumn w_iX_i^{\otimesimes 2}(X_i^{\otimesimes 2})^T - M_4, u^{\otimesimes 2}(u^{\otimesimes 2})^T} + \iprod{M_4,u^{\otimesimes 2}(u^{\otimesimes 2})^T}]^\frac{1}{2}
$$
Then applying Cauchy-Schwarz, followed by applying the fourth moment constraints on the covariates (3) we obtain
$$
\leq (g\sigma^4)^\frac{1}{2}\cclassmacro{P}E[\norm{\sumn w_iX_i^{\otimesimes 2}(X_i^{\otimesimes 2})^T - M_4}_F^2 + B]^\frac{1}{2}
\leq (2g\sigma^4B)^\frac{1}{2}
$$
Next we upper bound the second term in \ref{rr4} by SoS Cauchy Schwarz
\boldsymbol{e}gin{align*}
\cclassmacro{P}E[\sumn w_i\leftarrowngle\epsilonilonll - \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle^2\leftarrowngle X_i,u\rightarrowngle^2)] = \cclassmacro{P}E[\leftarrowngle (\epsilonilonll - \cclassmacro{P}E[\epsilonilonll])^{\otimesimes 2}(u^{\otimesimes 2})^T, \sumn w_iX_i^{\otimesimes 2}(X_i^{\otimesimes 2})^T \rightarrowngle]
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \cclassmacro{P}E[\leftarrowngle (\epsilonilonll - \cclassmacro{P}E[\epsilonilonll])^{\otimesimes 2}(u^{\otimesimes 2})^T, \sumn w_iX_i^{\otimesimes 2}(X_i^{\otimesimes 2})^T - M_4\rightarrowngle] + \cclassmacro{P}E[\leftarrowngle (\epsilonilonll - \cclassmacro{P}E[\epsilonilonll])^{\otimesimes 2}(u^{\otimesimes 2})^T, M_4\rightarrowngle]
\epsilonilonnd{align*}
Applying SoS Cauchy-Schwarz to the first term we obtain
\boldsymbol{e}gin{align*}
= \cclassmacro{P}E[\norm{\epsilonilonll - \cclassmacro{P}E[\epsilonilonll]}^2\norm{\sumn w_iX_i^{\otimesimes 2}(X_i^{\otimesimes 2})^T - M_4}_F^2] + \cclassmacro{P}E[\leftarrowngle (\epsilonilonll - \cclassmacro{P}E[\epsilonilonll])^{\otimesimes 2}(u^{\otimesimes 2})^T, M_4\rightarrowngle]
\epsilonilonnd{align*}
Then applying the fourth moment constraints on the covariates (3) and applying the definition of $\norm{\mathcal Q}_{nuc}$ we obtain
\boldsymbol{e}gin{align*}
= \epsilonilonpsilon\norm{\mathcal Q}_{nuc} + \cclassmacro{P}E[\leftarrowngle (\epsilonilonll - \cclassmacro{P}E[\epsilonilonll])^{\otimesimes 2}(u^{\otimesimes 2})^T, M_4\rightarrowngle]
\epsilonilonnd{align*}
We upper bound the second term above using the assumption upper bounding the fourth injective norm of the covariates.
\boldsymbol{e}gin{align*}
\cclassmacro{P}E[\leftarrowngle (\epsilonilonll - \cclassmacro{P}E[\epsilonilonll])^{\otimesimes 2}(u^{\otimesimes 2})^T, M_4\rightarrowngle] \leq B\norm{\mathcal Q}_{nuc}
\epsilonilonnd{align*}
For $\mathcal N(0,I)$, we have $B = 3$. Plugging both terms back into \ref{rr4}, we obtain
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}] - \E_{j \sim S_v}\E_{b_j} \cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}|w_j = b_j]
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
\geq \frac{(1-o_d(1))\boldsymbol{e}ta\cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}]^2 - o_d(1)}{2\sigma^2(gB)^\frac{1}{2} + 2B\norm{\mathcal Q}_{nuc}} - o_d(1)
= \frac{\boldsymbol{e}ta\cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}]^2 }{2\sigma^2(gB)^\frac{1}{2} + 2B\norm{\mathcal Q}_{nuc}} - o_d(1)
\epsilonilonnd{align*}
\Pnote{in this bound, can we just use $B (1+\epsilonilonpsilon_4)$ by $2\norm{M_4}_inj$}
Using the assumption $\norm{\mathcal Q}_{nuc} > \sigma^2\sqrt{g}$ and setting $B = 3$ we have
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}] - \E_{j \sim S_v}\E_{b_j} \cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}|w_j = b_j]
\geq \Omega\Big(\frac{\boldsymbol{e}ta\cclassmacro{P}Var_\zeta[\iprod{\epsilonilonll,v}]^2}{\norm{\mathcal Q}_{nuc}}\Big)
\epsilonilonnd{align*}
\epsilonilonnd{proof}
\Pnote{We should replace all instances of $||$ by $\norm{}$}
\boldsymbol{e}gin{comment}
All that remains to prove for rounding regression is \cclassmacro{P}rettyref{lem:reg-approx-round}. The proofs of \ref{rr5} and \ref{rr6} are nearly identical. We include both below.
\Pnote{ a little more context will be useful here, reader is likely lost in the details of the previous proof}
\boldsymbol{e}gin{proof}(Proof of \ref{rr5})
Let $u$ be a unit direction $u \in \mathcal S^{d-1}$. We know
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var\left[\leftarrowngle \epsilonilonll,u\rightarrowngle\right] = \cclassmacro{P}Var\left[\epsilonilonll^T\left(I - \sumn w_i
X_iX_i^T\right) + \sumn w_i\leftarrowngle \epsilonilonll,X_i\rightarrowngle X_i^T u\right]
\epsilonilonnd{align*}
\Pnote{we should fix slash left bracket, slash right bracket in all of calculations below}
Using pseudovariance triangle inequality
\boldsymbol{e}gin{equation}\leftarrowbel{rr1}
\leq (1 + \cclassmacro{P}si)\cclassmacro{P}Var\left[\sumn w_i\leftarrowngle \epsilonilonll,X_i \rightarrowngle\leftarrowngle X_i, u \rightarrowngle\right] + \Big(\frac{1 + \cclassmacro{P}si}{\cclassmacro{P}si}\Big)\cclassmacro{P}Var\left[\leftarrowngle \epsilonilonll u^T,I - \sumn w_iX_iX_i ^T)\rightarrowngle\right ]
\epsilonilonnd{equation}
Applying $\epsilonilonll_2$ minimization constraint 6 to the first term we obtain \Pnote{ numbering SDP constraints, and also referring to them by number makes it more readable here}
\boldsymbol{e}gin{equation}\leftarrowbel{rr1}
= (1 + \cclassmacro{P}si)\cclassmacro{P}Var\left[\sumn w_i y_i\leftarrowngle X_i, u \rightarrowngle\right] + \Big(\frac{1 + \cclassmacro{P}si}{\cclassmacro{P}si}\Big)\cclassmacro{P}Var\left[\leftarrowngle \epsilonilonll u^T,I - \sumn w_iX_iX_i ^T)\rightarrowngle\right]
\epsilonilonnd{equation}
Consider the second term on the right hand side, which we upper bound as follows.
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var\left[\leftarrowngle \epsilonilonll u^T,I - \sumn w_iX_iX_i ^T)\rightarrowngle\right] \leq \cclassmacro{P}E\left[\leftarrowngle \epsilonilonll u^T,I - \sumn w_iX_i X_i^T)\rightarrowngle^2\right]
\epsilonilonnd{align*}
First using deg $2$ SoS Cauchy Schwarz, and then both the scaling constraint and the moment bound constraint we obtain,
\boldsymbol{e}gin{equation}\leftarrowbel{rr7}
\leq 2\cclassmacro{P}E\left[\norm{\epsilonilonll}^2\norm{I - \sumn w_iX_iX_i^T}^2\right] \leq \rho^2\epsilonilonpsilon
\epsilonilonnd{equation}
Plugging back into equation \ref{rr1} we obtain
\boldsymbol{e}gin{equation}\leftarrowbel{rr2}
\cclassmacro{P}Var\left[\leftarrowngle \epsilonilonll,u\rightarrowngle\right] \leq (1+\cclassmacro{P}si)\cclassmacro{P}Var\left[\sumn w_iy_i\leftarrowngle X_i, u \rightarrowngle\right] + \Big(\frac{1+\cclassmacro{P}si}{\cclassmacro{P}si}\Big)\rho^2\epsilonilonpsilon
\epsilonilonnd{equation}
\boldsymbol{e}gin{align*}
= (1 + \cclassmacro{P}si)\cclassmacro{P}Var\left[\sumn w_i (y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i \rightarrowngle)\leftarrowngle X_i, u \rightarrowngle + \sumn w_i \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle\leftarrowngle X_i, u \rightarrowngle \right] + \frac{1 + \cclassmacro{P}si}{\cclassmacro{P}si}\rho^2\epsilonilonpsilon
\epsilonilonnd{align*}
Using the pseudovariance triangle inequality we obtain
\boldsymbol{e}gin{equation}\leftarrowbel{rr3}
\boldsymbol{e}gin{aligned}
\leq (1 + \cclassmacro{P}si)^2\cclassmacro{P}Var\left[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i,u\rightarrowngle\right]+ \frac{(1+\cclassmacro{P}si)^2}{\cclassmacro{P}si}\cclassmacro{P}Var\left[ \sumn w_i\leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle \leftarrowngle X_i,u\rightarrowngle\right] \\ + \frac{1 + \cclassmacro{P}si}{\cclassmacro{P}si}\rho^2\epsilonilonpsilon
\epsilonilonnd{aligned}
\epsilonilonnd{equation}
Consider the second term. Subtracting the identity and adding it back we obtain
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var\left[ \sumn w_i\leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle \leftarrowngle X_i ,u\rightarrowngle\right] \leq \cclassmacro{P}Var\left[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] u^T, \sumn w_iX_i X_i^T-I\rightarrowngle + \leftarrowngle \cclassmacro{P}E[\epsilonilonll],u\rightarrowngle \right]
\epsilonilonnd{align*}
Noting that pseudovariance is invariant under constant shifts we obtain.
\boldsymbol{e}gin{align*}
\leq \cclassmacro{P}Var\left[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] u^T, \sumn w_iX_iX_i ^T-I\rightarrowngle\right]
\leq \cclassmacro{P}E\left[\leftarrowngle \cclassmacro{P}E[\epsilonilonll]u^T, \sumn w_iX_iX_i^T - I\rightarrowngle^2\right]
\epsilonilonnd{align*}
Applying Cauchy-Schwarz, then the moment constraints, then pseudoexpectation Cauchy-Schwarz, then the scaling constraints we obtain
\boldsymbol{e}gin{equation}\leftarrowbel{rr8}
\leq \cclassmacro{P}E\left[\norm{\cclassmacro{P}E[\epsilonilonll]}^2 \norm{\sumn w_iX_iX_i^T - I}_F^2\right] \leq \norm{\cclassmacro{P}E[\epsilonilonll]}^2 \epsilonilonpsilon \leq \cclassmacro{P}E[\norm{\epsilonilonll}^2]\epsilonilonpsilon \leq \rho^2\epsilonilonpsilon
\epsilonilonnd{equation}
Plugging this bound back into \ref{rr3} we obtain
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var\left[\iprod{\epsilonilonll,u}\right]
\leq (1+\cclassmacro{P}si)^2\cclassmacro{P}Var\left[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i, u \rightarrowngle\right] + \frac{(1+\cclassmacro{P}si)^2}{\cclassmacro{P}si}\rho^2\epsilonilonpsilon + \frac{1+\cclassmacro{P}si}{\cclassmacro{P}si}\rho^2\epsilonilonpsilon
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= (1+\cclassmacro{P}si)^2\cclassmacro{P}Var\left[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i, u \rightarrowngle\right] + O(\rho^2\epsilonilonpsilon)
\epsilonilonnd{align*}
\epsilonilonnd{proof}
\boldsymbol{e}gin{proof}(Proof of \ref{rr6})
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var\left[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i, u \rightarrowngle\right]
= \cclassmacro{P}Var\left[\sumn w_iy_i\leftarrowngle X_i, u \rightarrowngle - \sumn w_i\iprod{\cclassmacro{P}E[\epsilonilonll],X_i}\leftarrowngle X_i, u \rightarrowngle\right]
\epsilonilonnd{align*}
by pseudovariance triangle inequality
\boldsymbol{e}gin{align*}
\leq (1+\cclassmacro{P}si)\cclassmacro{P}Var\left[\sumn w_iy_i\leftarrowngle X_i, u \rightarrowngle\right] + \frac{1+\cclassmacro{P}si}{\cclassmacro{P}si}\cclassmacro{P}Var\left[ \sumn w_i\iprod{\cclassmacro{P}E[\epsilonilonll],X_i}\leftarrowngle X_i, u \rightarrowngle\right]
\epsilonilonnd{align*}
applying the bound in \ref{rr8}
\boldsymbol{e}gin{align*}
\leq (1+\cclassmacro{P}si)\cclassmacro{P}Var\left[\sumn w_iy_i\leftarrowngle X_i, u \rightarrowngle\right] + \frac{1+\cclassmacro{P}si}{\cclassmacro{P}si}\rho^2\epsilonilonpsilon
\epsilonilonnd{align*}
Applying the $\epsilonilonll_2$ minimization constraint we obtain
\boldsymbol{e}gin{align*}
= (1+\cclassmacro{P}si)\cclassmacro{P}Var\left[\sumn w_i
\iprod{\epsilonilonll,X_i}\leftarrowngle X_i, u \rightarrowngle\right] + \frac{1+\cclassmacro{P}si}{\cclassmacro{P}si}\rho^2\epsilonilonpsilon
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= (1+\cclassmacro{P}si)\cclassmacro{P}Var\left[\iprod{\epsilonilonll u^T,\sumn w_iX_iX_i^T - I} + \iprod{\epsilonilonll,u}\right] + \frac{1+\cclassmacro{P}si}{\cclassmacro{P}si}\rho^2\epsilonilonpsilon
\epsilonilonnd{align*}
then by pseudovariance triangle inequality
\boldsymbol{e}gin{align*}
\leq (1+\cclassmacro{P}si)^2\cclassmacro{P}Var\left[\iprod{\epsilonilonll,u}\right] + \frac{(1+\cclassmacro{P}si)^2}{\cclassmacro{P}si}\cclassmacro{P}Var\left[\iprod{\epsilonilonll u^T,\sumn w_iX_iX_i^T - I}\right] + \frac{1+\cclassmacro{P}si}{\cclassmacro{P}si}\rho^2\epsilonilonpsilon
\epsilonilonnd{align*}
Applying the bound in \ref{rr7} to the second term we conclude
\boldsymbol{e}gin{align*}
\cclassmacro{P}Var\left[\sumn w_i(y_i - \leftarrowngle \cclassmacro{P}E[\epsilonilonll],X_i\rightarrowngle)\leftarrowngle X_i, u \rightarrowngle\right]
\leq (1+\cclassmacro{P}si)^2\cclassmacro{P}Var[\iprod{\epsilonilonll,u}] + \frac{(1+\cclassmacro{P}si)^2}{\cclassmacro{P}si}\rho^2\epsilonilonpsilon + \frac{1+\cclassmacro{P}si}{\cclassmacro{P}si}\rho^2\epsilonilonpsilon
\epsilonilonnd{align*}
as desired.
\epsilonilonnd{proof}
\epsilonilonnd{comment}
\rhoection{Snapping for Regression} \leftarrowbel{sec:regressionsnapping}
\restatelemma{lem:regressionSnapping}
\boldsymbol{e}gin{proof}
Let $u \in S^{d-1}$, we have by linearity
\boldsymbol{e}gin{align*}
\cclassmacro{P}E\left[\sumn w_iw_i'\right]\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll',u\rightarrowngle^2 = \cclassmacro{P}E\left[\sumn w_iw_i'\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll',u\rightarrowngle^2\right]
\epsilonilonnd{align*}
And by degree $2$ SoS triangle inequality
\boldsymbol{e}gin{align*}
=\cclassmacro{P}E[\sumn w_iw_i'\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll + \epsilonilonll - \epsilonilonll',u\rightarrowngle^2] \leq 2
\cclassmacro{P}E[\sumn w_iw_i'\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + 2\cclassmacro{P}E[\sumn w_iw_i'\leftarrowngle\epsilonilonll - \epsilonilonll',u\rightarrowngle^2]
\epsilonilonnd{align*}
The following expression is a sum of squares $\mathcal R \SoSp{2} \Big\{\sumn (1 - w_i)w_i'\iprod{\cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u}^2 \geq 0\Big\}$ so we add it to the right hand side to obtain
\boldsymbol{e}gin{align*}
\leq 2
\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + 2\cclassmacro{P}E[\sumn w_iw_i'\leftarrowngle\epsilonilonll - \epsilonilonll',u\rightarrowngle^2]
\epsilonilonnd{align*}
applying degree $2$ SoS Cauchy-Schwarz to the second term we obtain,
\boldsymbol{e}gin{align*}
\leq 2
\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + 2\cclassmacro{P}E[\sumn w_iw_i'||\epsilonilonll - \epsilonilonll'||^2]
\epsilonilonnd{align*}
Consider the second term above. By the properties of $(c,D(\epsilonilonta))$-SoS-anticoncentration (see \cclassmacro{P}rettyref{def:anticoncentration}) we upper bound by, \Pnote{expand on this citation, mention definition and lemma}
\boldsymbol{e}gin{align*}
&\leq 2
\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + 2\left(c\rho\epsilonilonta + \frac{1}{\epsilonilonta^2}\cclassmacro{P}E\left[\sumn w_iw_i'\leftarrowngle
\epsilonilonll - \epsilonilonll',X_i\rightarrowngle^2\right]\right) \\
&\leq 2
\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + 2\left(\rho^2\epsilonilonta + \epsilonilonta^2\cclassmacro{P}E\left[\sumn w_iw_i'(\leftarrowngle
\epsilonilonll,X_i\rightarrowngle - y_i)^2 + (\leftarrowngle
\epsilonilonll',X_i\rightarrowngle - y_i)^2\right]\right)
\epsilonilonnd{align*}
\Pnote{ what is $\mathop{}\!\mathrm{d}elta$ in the above calculation?}
By SoS triangle inequality
\boldsymbol{e}gin{align*}
\leq 2
\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + 2(\rho^2\epsilonilonta + \epsilonilonta^22(\cclassmacro{P}E[\sumn w_iw_i'(\leftarrowngle
\epsilonilonll,X_i\rightarrowngle - y_i)^2] + 2\cclassmacro{P}E[\sumn w_iw_i'(\leftarrowngle
\epsilonilonll',X_i\rightarrowngle - y_i)^2]))
\epsilonilonnd{align*}
Using the fact that $\mathcal P \SoSp{2} \Big\{\sumn(1 - w_i)w_i'(\leftarrowngle
\epsilonilonll,X_i\rightarrowngle - y_i)^2 \geq 0, \sumn(1 - w_i')w_i(\leftarrowngle
\epsilonilonll,X_i\rightarrowngle - y_i)^2 \geq 0\Big\}$
we add in both polynomials to obtain
$$ \leq 2
\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + 2(\rho^2\epsilonilonta + \epsilonilonta^2 2(\cclassmacro{P}E[\sumn w_i(\leftarrowngle
\epsilonilonll,X_i\rightarrowngle - y_i)^2] + 2\cclassmacro{P}E[\sumn w_i'(\leftarrowngle
\epsilonilonll',X_i\rightarrowngle - y_i)^2]))
$$
Applying the SDP noise constraint (4) we obtain
\boldsymbol{e}gin{align*}
\leq 2
\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + (2c\rho^2\epsilonilonta + \frac{8\sigma^2}{\epsilonilonta^2})
\epsilonilonnd{align*}
Thus far we've shown in degree $D(\epsilonilonta)$ the following inequality
$$
\cclassmacro{P}E[\sumn w_iw_i']\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll',u\rightarrowngle^2 \leq 2
\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + (2c\rho^2\epsilonilonta + \frac{8\sigma^2}{\epsilonilonta^2})
$$
This inequality holds for all $u \in S^{d-1}$, in particular for the unit vector $u$ along $\cclassmacro{P}E[\epsilonilonll] - \epsilonilonll'$ we have
$$
\cclassmacro{P}E[\sumn w_iw_i']\norm{\cclassmacro{P}E[\epsilonilonll] - \epsilonilonll'}^2 \leq 2
\max_{u \in S^{d-1}}\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + (2c\rho^2\epsilonilonta + \frac{8\sigma^2}{\epsilonilonta^2})
$$
Dividing both sides by $\cclassmacro{P}E[\sumn w_iw_i']$ and taking a square root we obtain
$$
\norm{\cclassmacro{P}E[\epsilonilonll] - \epsilonilonll'} \leq \sqrt{ \frac{2
\max_{u \in S^{d-1}}\cclassmacro{P}E[\leftarrowngle \cclassmacro{P}E[\epsilonilonll] - \epsilonilonll,u\rightarrowngle^2] + (2c\rho^2\epsilonilonta + \frac{8\sigma^2}{\epsilonilonta^2})}{\cclassmacro{P}E[\sumn w_iw_i']}}
$$
Plugging in the assumptions on frobenius minimization \ref{fm} and variance reduction \ref{vr} we obtain
\Pnote{refer to the equations here}
$$
\norm{\cclassmacro{P}E[\epsilonilonll] - \epsilonilonll'} \leq \sqrt{ \frac{4c\rho^2\epsilonilonta + \frac{8\sigma^2}{\epsilonilonta^2}}{\boldsymbol{e}ta}}
$$
Since $\epsilonilonta$ is any constant in $[0,\frac{1}{2}]$ we conclude by writing
$$
\norm{\cclassmacro{P}E[\epsilonilonll] - \epsilonilonll'} \leq \sqrt{ \frac{\rho^2\epsilonilonta + O(\frac{\sigma^2}{\epsilonilonta^2})}{\boldsymbol{e}ta}}
$$
\epsilonilonnd{proof}
\section{Certifying Anticoncentration}\leftarrowbel{sec:anticoncentration}
Anticoncentration is a measure of the "spread" of a distribution. For any distribution $\mathcal D$, let $\epsilonilonta$ be a parameter $0 < \epsilonilonta < \frac{1}{2}$. If the probability mass of $\mathcal D$ contained in the $\epsilonilonta$ interval around the origin is small, than $\mathcal D$ is anticoncentrated. For example, in the case of $\mathcal D = \mathcal N(0,1)$, the mass of $\mathcal D$ in any $\epsilonilonta$ interval about the origin is upper bounded by $\frac{2}{\sqrt{2\cclassmacro{P}i}}\epsilonilonta$. This property of the probability mass decaying linearly with $\epsilonilonta$ as $\epsilonilonta$ goes to zero is what motivates the following definition.
\boldsymbol{e}gin{definition}\leftarrowbel{def:anticoncentration}
A probability distribution $\mathcal D$ over $\mathbb R^d$ is said to be $c$-anticoncentrated if for any $0 < \epsilonilonta < \frac{1}{2}$ there exists $\tau \leq c\epsilonilonta$ such that for any measurable subset $\mathcal E \in \mathbb R^n$, and for all $v \in \mathbb R^d$ with $\norm{v} \leq 1$, we have that
$$ \E[ \iprod{X,v}^2 \cdot \Ind[\mathcal E] ] \geq \epsilonilonta^2 \cdot \ProbOp[\mathcal E] \cdot \norm{v}^2 - \epsilonilonta^2\tau$$
\epsilonilonnd{definition}
\Pnote{$\Omega$ is undefined here. We should probably define $\mathcal E$ to be a measurable subset of $\mathbb R^n$}
We now state the SoS version of anticoncentration
\boldsymbol{e}gin{definition}\leftarrowbel{def:anticoncentration}
Let $D: [0,1/2] \to \mathbb N$.
A probability distribution $\mathcal D$ over $\mathbb R^d$ is said to $(c, D(\epsilonilonta))$-SoS-anticoncentrated,
If for any $0 < \epsilonilonta < \frac{1}{2}$ there exists $\tau \leq c\epsilonilonta$ and there exists a constant $k \in \mathbb N$ such that for all $N > d^k$,
with probability $1-d^{-k}$, over samples $x_1,\ldots, x_N \sim \mathcal D$ the following polynomial system
$$
\mathcal P = \left\{
\boldsymbol{e}gin{array}{ll}
w_i^2 = w_i & i \in [N]\\
\norm{v}^2 \leq \rho^2 \\
\norm{\frac{1}{N}\sum_{i=1}^N X_i^{\otimesimes \frac{t}{2}}(X_i^{\otimesimes \frac{t}{2}})^T - M_t} < \epsilonilonpsilon& t \in [k]\\
\epsilonilonnd{array}
\right.
$$
yields a degree $D(\epsilonilonta)$ SoS proof of the following inequality
\boldsymbol{e}gin{align*}
\mathcal P\SoSp{D(\epsilonilonta)}\Big\{\frac{1}{N}\sum_{i = 1}^N w_i \iprod{X_i,v}^2 \geq \epsilonilonta^2 (\frac{1}{N}\sum_i w_i) \norm{v}^2 - \epsilonilonta^2 \tau \rho^2\Big\}
\epsilonilonnd{align*}
\epsilonilonnd{definition}
\Pnote{good to remind the reader what $M_t$ denotes in the above definition. Perhaps, we should uniformly use $M_4(\mathcal D)$?}
\boldsymbol{e}gin{theorem}\leftarrowbel{thm:anti-sufficient} (Sufficient conditions for SoS anti-concentration)
If the degree $D(\epsilonilonta)$ empirical moments of $\mathcal D$ converge to the corresponding true moments $M_t$ of $\mathcal D$, that is for all $t \leq D(\epsilonilonta)$
$$
\lim_{N \rightarrow \infty}\mathbb Norm{\frac{1}{N}\sum_{i=1}^N X_i^{\otimesimes \frac{t}{2}}(X_i^{\otimesimes \frac{t}{2}})^T - M_t} = 0
$$
And if there exists a uni-variate polynomial $I_{\epsilonilonta}(z) \in \mathbb R[z]$ of degree at most $D(\epsilonilonta)$ such that
\boldsymbol{e}gin{enumerate}
\emem $I_{\epsilonilonta}(z) \geq 1-\frac{z^2}{\epsilonilonta^2\rho^2}$ for all $z \in \mathbb R$.
\emem $\mathcal P\SoSp{D(\epsilonilonta)} \Big\{\norm{v}^2 \cdot \E_{x \in \mathcal D}[I_{\epsilonilonta}(\iprod{v, x})] \leq c\epsilonilonta\rho^2\Big\}$.
\epsilonilonnd{enumerate}
Then $\mathcal D$ is $(c,D(\epsilonilonta))$ certifiably anticoncentrated.
\epsilonilonnd{theorem}
\boldsymbol{e}gin{lemma}\torestate{\leftarrowbel{lem:normal-anti}
For every $d \in \mathbb N$, the standard Gaussian distribution $\mathcal N(0,I_d)$ is $(c,O(\frac{1}{\epsilonilonta^4}))$-SoS-anticoncentrated. In particular there exists a construction for $c \leq 2\sqrt{e}$}
\epsilonilonnd{lemma}
\boldsymbol{e}gin{comment}
\boldsymbol{e}gin{theorem} There exists a degree $T$ sum of squares proof of the following polynomial inequality for $0 < ||v|| < \rho$
\boldsymbol{e}gin{align*}
\frac{\mathop{}\!\mathrm{d}elta\rho}{\sqrt{2\cclassmacro{P}i}}+ \frac{\rho^2}{\mathop{}\!\mathrm{d}elta^2}\frac{1}{\boldsymbol{e}ta N}\sum\limits_{i=1}^N w_iw_i'\leftarrowngle X,v\rightarrowngle^2 \geq \frac{1}{\boldsymbol{e}ta N} \sum\limits_{i=1}^N w_iw_i'||v||^2
\epsilonilonnd{align*}
\epsilonilonnd{theorem}
\epsilonilonnd{comment}
First we will prove \cclassmacro{P}rettyref{thm:anti-sufficient}
\boldsymbol{e}gin{proof} (\cclassmacro{P}rettyref{thm:anti-sufficient})
First, it is a standard fact that every uni-variate polynomial inequality has a sum of squares proof. More precisely, for any $p(x) \in \mathbb R[x]$ satisfying $p(x) \geq 0$, then it is true that $p(x) \succeq_{\text{deg}(p(x))} 0$. Furthermore, this is also true over any interval $[a,b] $
\boldsymbol{e}gin{fact}\leftarrowbel{fact:univar}
Let $a < b$. Then, a degree $2d$ polynomial $p(x)$ is non-negative on $[a, b]$, if and only if it can be written as
$$
\left\{
\boldsymbol{e}gin{array}{ll}
& p(x) = s(x) + (x - a)(b - x)t(x), \text{ if deg(p) is even}\\
& p(x) = (x - a)s(x) + (b - x)t(x), \text{ if deg(p) is odd}\\
\epsilonilonnd{array}
\right.
$$
where $s(x)$, $t(x)$ are SoS. In the first case, we have $deg(p) = 2d$, and $deg(s) \leq 2d$, $deg(t) \leq 2d - 2$. In
the second, $deg(p) = 2d + 1$, and $deg(s) \leq 2d$, $deg(t) \leq 2d$.
\epsilonilonnd{fact}
In light of this fact, we use \cclassmacro{P}rettyref{thm:anti-sufficient} condition 1 to lower bound $\iprod{X_i,v}^2$ by
\boldsymbol{e}gin{align*}
\mathcal P \SoSp{D(\epsilonilonta)} \leftarrowngle X,v\rightarrowngle^2 \geq \epsilonilonta^2\rho^2(1 - I_\epsilonilonta(\leftarrowngle X_i,v\rightarrowngle))
\epsilonilonnd{align*}
Therefore,
\boldsymbol{e}gin{align*}
\mathcal P \SoSp{D(\epsilonilonta)}\sumn w_iw_i'\leftarrowngle X,v\rightarrowngle^2 \geq \sumn w_iw_i'\epsilonilonta^2\rho^2 (1 - I_{\mathop{}\!\mathrm{d}elta}(\leftarrowngle X_i,v\rightarrowngle))
\epsilonilonnd{align*}
Then using the certificate that $\{\norm{v}^2 < \rho^2\}$ we obtain
\boldsymbol{e}gin{align*}
\mathcal P \SoSp{D(\epsilonilonta)}\sumn w_iw_i'\epsilonilonta^2\rho^2(1 - I_{\epsilonilonta}(\leftarrowngle X_i,v\rightarrowngle)) \geq \sumn w_iw_i'\epsilonilonta^2\norm{v}^2 (1 - I_{\epsilonilonta}(\leftarrowngle X_i,v\rightarrowngle))
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \sumn w_iw_i'\epsilonilonta^2\norm{v}^2 - \sumn w_iw_i'\epsilonilonta^2\norm{v}^2I_{\epsilonilonta}(\leftarrowngle X_i,v\rightarrowngle)
\epsilonilonnd{align*}
Then using the fact that $I_\epsilonilonta(\leftarrowngle X_i,v\rightarrowngle)$ is SoS and $\{w_i^2 = w_i\} \SoSp{} (1 - w_i) \succeq_2 0$, we subtract $\sumn w_i'(1-w_i)I_\epsilonilonta(\leftarrowngle X_i,v\rightarrowngle)$ to obtain
\boldsymbol{e}gin{align*}
\geq \sumn w_iw_i'\epsilonilonta^2\norm{v}^2 - \epsilonilonta^2 \norm{v}^2\sumn w_i'I_{\epsilonilonta}(\leftarrowngle X_i,v\rightarrowngle)
\epsilonilonnd{align*}
Expanding out $I_\mathop{}\!\mathrm{d}elta(\leftarrowngle X_i,v\rightarrowngle)$ as a degree $D(\epsilonilonta)$ polynomial with coefficients $\alpha_1,...,\alpha_{D(\epsilonilonta)}$ we have
\boldsymbol{e}gin{align*}
I_\epsilonilonta(\leftarrowngle X_i,v\rightarrowngle) = \sum_{t=1}^T \alpha_t \leftarrowngle X_i,v\rightarrowngle^t
\epsilonilonnd{align*}
We want replace the empirical average $\sumn w_i' I_\epsilonilonta(\leftarrowngle X_i,v\rightarrowngle)$ with the expectation $\E_{X \sim N(0,I)} I_\epsilonilonta(\leftarrowngle X,v \rightarrowngle)$ and bound the error term. Indeed, we know that
\boldsymbol{e}gin{align*}
\sumn w_i'I_\epsilonilonta(\leftarrowngle X_i,v\rightarrowngle) = \sum\limits_{t=1}^T \alpha_t\sumn w_i'\leftarrowngle X_i,v\rightarrowngle^t = \sum\limits_{t=1}^{D(\epsilonilonta)} \alpha_t\boldsymbol{i}g \leftarrowngle \sumn w_i' X_i^{\otimesimes \frac{t}{2}}(X_i^{\otimesimes\frac{t}{2}})^T, v^{\otimesimes \frac{t}{2}}(v^{\otimesimes\frac{t}{2}})^T \boldsymbol{i}g\rightarrowngle
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \sum\limits_{t=1}^T \alpha_t\boldsymbol{i}g \leftarrowngle \sumn w_i' X_i^{\otimesimes \frac{t}{2}}(X_i^{\otimesimes\frac{t}{2}})^T - \E_{X \sim N(0,I)} X^{\otimesimes \frac{t}{2}}(X^{\otimesimes\frac{t}{2}})^T, v^{\otimesimes \frac{t}{2}}(v^{\otimesimes\frac{t}{2}})^T \boldsymbol{i}g\rightarrowngle + \sum\limits_{t=1}^{D(\epsilonilonta)} \alpha_t\boldsymbol{i}g \leftarrowngle \E_{X \sim N(0,I)} X^{\otimesimes \frac{t}{2}}(X^{\otimesimes\frac{t}{2}})^T, v^{\otimesimes \frac{t}{2}}(v^{\otimesimes\frac{t}{2}})^T \boldsymbol{i}g\rightarrowngle
\epsilonilonnd{align*}
Then by degree $D(\epsilonilonta)$ SoS Cauchy Schwarz we obtain
\boldsymbol{e}gin{align*}
\preceq \sum\limits_{t=1}^{D(\epsilonilonta)} \alpha_t\norm{\sumn w_i' X_i^{\otimesimes \frac{t}{2}}(X_i^{\otimesimes\frac{t}{2}})^T - M_t}_F^2 \norm{v}^t + \sum\limits_{t=1}^{D(\epsilonilonta)}\alpha_t\E_{X \sim N(0,I)}\leftarrowngle X,v\rightarrowngle^t
\epsilonilonnd{align*}
Thus for our setting of $N$ and $d$ we obtain,
\boldsymbol{e}gin{align}\leftarrowbel{eq:anti1}
= \E_{X \sim N(0,I)}I_\epsilonilonta(\iprod{X_i,v}) + o_d(1)
\epsilonilonnd{align}
Note that it is important that the coefficients of $I_\epsilonilonta(z)$ are chosen independently of $d$ or at the very least don't grow too fast with respect to $d$. Our final bound is,
\boldsymbol{e}gin{align*}
\mathcal P \SoSp{D(\epsilonilonta)}\sumn w_iw_i'\leftarrowngle X,v\rightarrowngle^2 \geq \sumn w_iw_i'\epsilonilonta^2\norm{v}^2 - \epsilonilonta^2 \norm{v}^2\E_{z \sim N(0,\norm{v}^2)} I_{\epsilonilonta}(z) + o_d(1)
\epsilonilonnd{align*}
Applying sufficient condition 2 we obtain
\boldsymbol{e}gin{align*}
\mathcal P\SoSp{D(\epsilonilonta)}\Big\{\frac{1}{N}\sum_{i = 1}^N w_i \iprod{X_i,v}^2 \geq \epsilonilonta^2 (\frac{1}{N}\sum_i w_i) \norm{v}^2 - \epsilonilonta^2 \tau\rho^2 \Big\}
\epsilonilonnd{align*}
as desired.
\boldsymbol{e}gin{comment}
Consider the second term which we need to upper bound. For $||v|| < \frac{1}{2\cclassmacro{P}i}^{1/4}\sqrt{\mathop{}\!\mathrm{d}elta\rho}$ we have
\boldsymbol{e}gin{align*}
\mathop{}\!\mathrm{d}elta^2 \frac{||v||^2}{\rho^2}\E_{z \sim N(0,||v||^2)} I_{\mathop{}\!\mathrm{d}elta}(z) \leq \frac{\mathop{}\!\mathrm{d}elta^3}{\sqrt{2\cclassmacro{P}i}\rho}
\epsilonilonnd{align*}
Where we trivially upper bound the expectation by $1$.
And for $||v|| > \frac{1}{2\cclassmacro{P}i}^{1/4}\sqrt{\mathop{}\!\mathrm{d}elta\rho}$ we have
\boldsymbol{e}gin{align*}
\mathop{}\!\mathrm{d}elta^2 \frac{||v||^2}{\rho^2}\E_{z \sim N(0,||v||^2)} I_{\mathop{}\!\mathrm{d}elta}(z) \leq \mathop{}\!\mathrm{d}elta^2 \frac{||v||^2}{\rho^2}\frac{1}{\sqrt{2\cclassmacro{P}i}||v||} \mathop{}\!\mathrm{d}elta \leq \frac{\mathop{}\!\mathrm{d}elta^3}{\sqrt{2\cclassmacro{P}i}\rho}
\epsilonilonnd{align*}
Where we upper bounded $||v|| < \rho$. Thus we have the following lower bound uniformly over $0 < ||v|| < \rho$
\boldsymbol{e}gin{align*}
\frac{1}{\boldsymbol{e}ta N} \sum\limits_{i=1}^N w_iw_i'\leftarrowngle X,v\rightarrowngle^2 \geq \frac{1}{\boldsymbol{e}ta N} \sum\limits_{i=1}^N w_iw_i'\mathop{}\!\mathrm{d}elta^2 \frac{||v||^2}{\rho^2} - \frac{\mathop{}\!\mathrm{d}elta^3}{\sqrt{2\cclassmacro{P}i}\rho}
\epsilonilonnd{align*}
Rearranging terms we have,
\boldsymbol{e}gin{align*}
\frac{\mathop{}\!\mathrm{d}elta^3}{\sqrt{2\cclassmacro{P}i}\rho}+ \frac{1}{\boldsymbol{e}ta N}\sum\limits_{i=1}^N w_iw_i'\leftarrowngle X,v\rightarrowngle^2 \geq \frac{1}{\boldsymbol{e}ta N} \sum\limits_{i=1}^N w_iw_i'\mathop{}\!\mathrm{d}elta^2 \frac{||v||^2}{\rho^2}
\epsilonilonnd{align*}
Multiplying both sides by $\frac{\rho^2}{\mathop{}\!\mathrm{d}elta^2}$ we conclude
\boldsymbol{e}gin{align*}
\frac{\mathop{}\!\mathrm{d}elta\rho}{\sqrt{2\cclassmacro{P}i}}+ \frac{\rho^2}{\mathop{}\!\mathrm{d}elta^2}\frac{1}{\boldsymbol{e}ta N}\sum\limits_{i=1}^N w_iw_i'\leftarrowngle X,v\rightarrowngle^2 \geq \frac{1}{\boldsymbol{e}ta N} \sum\limits_{i=1}^N w_iw_i'||v||^2
\epsilonilonnd{align*}
Eventually we will have
\boldsymbol{e}gin{align*}
\frac{1}{\boldsymbol{e}ta N} \sum\limits_{i=1}^N w_iw_i'||v||^2 \leq \frac{\mathop{}\!\mathrm{d}elta\rho}{\sqrt{2\cclassmacro{P}i}}+ \frac{4\rho^2\sigma^2}{\mathop{}\!\mathrm{d}elta^2}
\epsilonilonnd{align*}
Setting $\mathop{}\!\mathrm{d}elta = \frac{\boldsymbol{e}ta \rho}{4}$ we obtain
\boldsymbol{e}gin{align*}
\frac{1}{\boldsymbol{e}ta N} \sum\limits_{i=1}^N w_iw_i'||v||^2 \leq \frac{\boldsymbol{e}ta\rho^2}{4\sqrt{2\cclassmacro{P}i}}+ \frac{\sigma^2}{\boldsymbol{e}ta^2}
\epsilonilonnd{align*}
Matching this with global correlation of $\frac{\rho^2\boldsymbol{e}ta}{4}$ we contract the problem from $\rho$ to $\frac{\rho}{2}$
\epsilonilonnd{comment}
\epsilonilonnd{proof}
\section{Certifiably Anticoncentrated Distributions} \leftarrowbel{sec:anticoncentration-distributions}
\restatelemma{lem:normal-anti}
\boldsymbol{e}gin{proof}
By \cclassmacro{P}rettyref{thm:anti-sufficient} it suffices to exhibit a polynomial $I_\epsilonilonta(x)$ satisfying
\boldsymbol{e}gin{enumerate}
\emem $I_\epsilonilonta(x) \geq 1 - \frac{x^2}{\epsilonilonta^2\rho^2}$
\emem $\mathcal P \SoSp{O(\frac{1}{\epsilonilonta^4})}\Big\{\norm{v}^2\E_{x \sim N(0,I)} I_\epsilonilonta(\iprod{X,v}) \leq c\epsilonilonta\rho^2 \Big\}$
\epsilonilonnd{enumerate}
\Pnote{what is the extra $\nu$ on the right hand side?}
Firstly, without loss of generality the scaling $\rho$ can be set to $1$ so that $\rho = 1$ and $\norm{v} \leq 1$. This is because any polynomial $I_\epsilonilonta(x)$ satisfying conditions 1 and 2 for $\rho = 1$ and $\norm{v} \leq 1$ can be reparameterized as $I(x') = I_\epsilonilonta(\frac{x'}{\rho})$ and satisfy conditions 1 and 2 for $\norm{v} \leq \rho$ for general $\rho$.
Next we observe that owing to the spherical symmetry of the standard Gaussian we have $\norm{v}^2\E_{x \sim N(0,I)} I_\epsilonilonta(\iprod{X,v})$ is a spherically symmetric polynomial in $X$ which implies it is a polynomial in $\norm{v}$. Thus define
$$H(\norm{v}) := \norm{v}^2\E_{x \sim N(0,I)} I_\epsilonilonta(\iprod{X,v}) = \norm{v}^2\E_{x \sim N(0,\norm{v}^2)} I_\epsilonilonta(x)$$
Furthermore we have $\{\norm{v}^2 \leq 1\} \in \mathcal P$ and $\norm{v} \geq 0$ is SoS. Therefore, it suffices to prove the inequality $H(\norm{v}) \leq \epsilonilonta$ and \cclassmacro{P}rettyref{fact:univar} implies condition 2.
\Pnote{we might have to add "Fact" to prettyref macro?}
Now we construct $I_\epsilonilonta(x)$, which we refer to as the anticoncentration polynomial. Note that the indicator function of the $[-\epsilonilonta,\epsilonilonta]$ interval satisfies both anticoncentration conditions. The idea is to approximate the indicator function with a polynomial.
\Pnote{"anticoncentration polynomial" terminology unknown to reader}
It is difficult to directly approximate the indicator function as it is not continuous. Thus we dominate the indicator by a scaled Gaussian denoted $f(x)$ which satisfies the anticoncentration conditions.
\Pnote{reader doesn't know why we are approximating indicator functions, and which indicator function we are dealing with}
We then interpolate an explicit sum of squares polynomial through $f(x)$ denoted $I_\epsilonilonta(x)$. The key here is that any uni variate positive polynomial blows up at its tails. Thus, we must prove the approximation error of $|f(x) - I_\epsilonilonta(x)|$ is small for some interval around the origin, and far away from the origin that the decay of the Gaussian tail dominates the growth of the approximation error.
We note that there are many different strategies to construct polynomials satisfying the above criterion, and we will satisfy ourselves with proving the Gaussian is $(2\sqrt{e},O(\frac{1}{\epsilonilonta^4}))$-certifiably anticoncentrated.
First let $f(x) = \sqrt{e}\epsilonilonxp(-\frac{x^2}{2\epsilonilonta^2})$. For simplicity we will design $f(x)$ such that $f(\cclassmacro{P}m\epsilonilonta) = 1$ to satisfy the first anticoncentration condition. Checking the second condition we find that
\boldsymbol{e}gin{align*}
\norm{v}^2\E_{x \sim N(0,\norm{v}^2)} f(x) = \norm{v}^2\int \frac{\sqrt{e}}{\sqrt{2\cclassmacro{P}i}\norm{v}} \epsilonilonxp(-\frac{x^2}{2\epsilonilonta^2} - \frac{x^2}{2\norm{v}^2}) dx
\epsilonilonnd{align*}
\Pnote{brackets need cleaning}
\boldsymbol{e}gin{align*}
= \norm{v}^2\frac{\sqrt{e}}{\sqrt{2\cclassmacro{P}i}\norm{v}}\int \epsilonilonxp\Big(-\frac{x^2}{2(\frac{\epsilonilonta^2\norm{v}^2}{\epsilonilonta + \norm{v}^2})} \Big) dx = \norm{v}^2\frac{\epsilonilonta \sqrt{e}}{\sqrt{\epsilonilonta^2 + \norm{v}^2}}
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
\leq \norm{v}^2 \frac{\epsilonilonta \sqrt{e}}{\norm{v}} \leq \epsilonilonta\sqrt{e}
\epsilonilonnd{align*}
Where in the last inequality we used $0 \leq \norm{v}^2 \leq 1$.
Intuitively, if we interpolate a sum of squares polynomial $I_\epsilonilonta(x)$ that closely approximates $f(x)$ in an interval around the origin, then $\E_{x \sim N(0,\norm{v}^2)} I_\epsilonilonta(x) \approx \E_{x\sim N(0,\norm{v}^2)} f(x)$. Let $(x_0,x_1,...,x_n)$ be evenly spaced points at intervals of length $\nu\epsilonilonta^r$ ranging from $[-\frac{\nu n\epsilonilonta^r}{2},\frac{\nu n\epsilonilonta^r}{2}]$ where we eventually set $\nu$ to be a constant and $r = 4$.
\Pnote{what is $r$ here? we need to specify it or remind the reader}
Let $(y_0,...,y_n)$ be the set of evaluations $y_i = f(x_i)$. Let $I_\epsilonilonta(x)$ be the following degree $2n$ polynomial.
\boldsymbol{e}gin{align*}
I_\epsilonilonta(x) = \frac{(x-x_1)^2(x - x_2)^2...(x-x_n)^2}{(x_0 - x_1)^2(x_0 - x_2)^2...(x_0 - x_n)^2}y_0 + \frac{(x-x_0)^2(x - x_1)^2...(x-x_n)^2}{(x_1 - x_0)^2(x_1 - x_2)^2...(x_1 - x_n)^2}y_1 + ... \\+ \frac{(x-x_0)^2(x - x_1)^2...(x-x_{n-1})^2}{(x_n - x_0)^2(x_n - x_1)^2...(x_n - x_{n-1})^2}y_n
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \sum\limits_{i=1}^n\Big(\cclassmacro{P}rod_{\rhotack{{0 \leq j \leq n}\\ {i \neq j}}} \frac{(x - x_i)^2}{(x_i - x_j)^2}\Big)y_j
\epsilonilonnd{align*}
$I_\epsilonilonta(x)$ is the standard interpolation polynomial where each term is squared so as to be a sum of squares. Let $R_{2n}(y)$ be the error term over the interval $[-y,y]$ be $R_{2n}(y) = \max_{x \in [-y,y]}|f(x) - I_\epsilonilonta(x)|$. It is easy to show the interpolation error is
\boldsymbol{e}gin{align*}
R_{2n}(y) = \frac{1}{(2n + 1)!}\max\limits_{x \in [-y,y]}|f^{2n + 1}(x)|\cclassmacro{P}rod_{i=0}^n (x - x_i)^2
\epsilonilonnd{align*}
One way to prove the above equality is to think of the construction of $I_\epsilonilonta(x)$ as follows. Let $\Tilde{I}(x)$ be the unique degree $2n$ interpolation of points $\{(x_i,f(x_i))\}_{i \in [n]}$ and $\{(x_i', f(x_i'))\}_{i \in [n]}$ , which is not necessarily a sum of squares.
\Pnote{interpolation of points is a little vague, interpolation through points $\{(x_i,f(x_i))\}_{i \in [n]}$ and $\{(x_i', f(x_i'))\}_{i \in [n]}$}.
It is a standard fact in polynomial approximation theory, \cite{Sauer1997}, that the error $\Tilde{I}_{2n}(y) = \max_{x \in [-y,y]}|f(x) - \Tilde{I}(x)|$ has the form.
\Pnote{some reference for the fact will be useful here}
\boldsymbol{e}gin{align*}
\Tilde{R}_{2n}(y) = \frac{1}{(2n+1)!}\max\limits_{x \in [-y,y]}|f^{2n+1}(x)|\cclassmacro{P}rod_{i=0}^n (x - x_i)\cclassmacro{P}rod_{i=0}^n (x - x_i')
\epsilonilonnd{align*}
It is easy to check that $I_\epsilonilonta(x) = \lim_{(x_0',...,x_n') \rightarrow (x_0,...,x_n)}\Tilde{I}(x)$. Thus
\boldsymbol{e}gin{align*}
R_{2n}(y) = \lim_{(x_0',...,x_n') \rightarrow (x_0,...,x_n)}\Tilde{R}(y) = \frac{1}{(2n+1)!}\max\limits_{x \in [-y,y]}|f^{2n+1}(x)|\cclassmacro{P}rod_{i=0}^n (x - x_i)^2
\epsilonilonnd{align*}
as desired.
Now we verify anticoncentration condition 2
\boldsymbol{e}gin{align}
\norm{v}^2\E_{x \sim N(0,\norm{v}^2)} I_\epsilonilonta(x) & \leq \norm{v}^2\E_{x \sim N(0,\norm{v}^2)} |f(x) - I_\epsilonilonta(x)| + \norm{v}^2\E_{x \sim N(0,\norm{v}^2)}f(x) \nonumber \\
& \leq \norm{v}^2\E_{x \sim N(0,\norm{v}^2)} |f(x) - I_{\epsilonilonta}(x)| + \sqrt{e}\epsilonilonta
\epsilonilonnd{align}
\Pnote{I don't follow all the details below. Why can we bound $R_{2n}(y)$ in second term by $R_{2n}( \nu \epsilonilonta^r n/2)$?}
\boldsymbol{e}gin{align*}
= \norm{v}^2 \boldsymbol{i}g(\int_{-\infty}^{-\frac{\nu\epsilonilonta^r n}{2}} R_{2n}(y) \frac{1}{\sqrt{2\cclassmacro{P}i}\norm{v}}\epsilonilonxp(-\frac{y^2}{2\norm{v}^2})dy + \int_{-\frac{\nu\epsilonilonta^r n}{2}}^{\frac{\nu\epsilonilonta^r n}{2}}R_{2n}(y)\frac{1}{\sqrt{2\cclassmacro{P}i}\norm{v}}\epsilonilonxp(-\frac{y^2}{2\norm{v}^2})dy \\ + \int_{\frac{\nu\epsilonilonta^r n}{2}}^{\infty} R_{2n}(y) \frac{1}{\sqrt{2\cclassmacro{P}i}\norm{v}}\epsilonilonxp(-\frac{y^2}{2\norm{v}^2})dy\boldsymbol{i}g) + \sqrt{e}\epsilonilonta
\epsilonilonnd{align*}
Since we defined $R_{2n}(y)$ to be the maximum error in the $[-y,y]$ interval, it is monotonic, and we upper bound it by its evaluation at its rightmost endpoint $R_{2n}(\frac{\nu\epsilonilonta^rn}{2})$.
\boldsymbol{e}gin{align*}
\leq \norm{v}^2 \boldsymbol{i}g(\int_{-\infty}^{-\frac{\nu\epsilonilonta^r n}{2}} R_{2n}(y) \frac{1}{\sqrt{2\cclassmacro{P}i}\norm{v}}\epsilonilonxp(-\frac{y^2}{2\norm{v}^2})dy + R_{2n}(\frac{\nu\epsilonilonta^r n}{2})\int_{-\frac{\nu\epsilonilonta^r n}{2}}^{\frac{\nu\epsilonilonta^r n}{2}}\frac{1}{\sqrt{2\cclassmacro{P}i}\norm{v}}\epsilonilonxp(-\frac{y^2}{2\norm{v}^2})dy \\ + \int_{\frac{\nu\epsilonilonta^r n}{2}}^{\infty} R_{2n}(y) \frac{1}{\sqrt{2\cclassmacro{P}i}\norm{v}}\epsilonilonxp(-\frac{y^2}{2\norm{v}^2})dy\boldsymbol{i}g) + \sqrt{e}\epsilonilonta
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \norm{v}^2 \boldsymbol{i}g( R_{2n}(\frac{\nu\epsilonilonta^r n}{2}) + 2\int_{\frac{\nu\epsilonilonta^r n}{2}}^{\infty} R_{2n}(y) \epsilonilonxp(-\frac{y^2}{2})dy\boldsymbol{i}g) + \sqrt{e}\epsilonilonta
\epsilonilonnd{align*}
Thus it suffices to show
\boldsymbol{e}gin{align*}
R_{2n}(\frac{\nu\epsilonilonta^r n}{2}) + 2\int_{\frac{\nu\epsilonilonta^r n}{2}}^{\infty} R_{2n}(y) \epsilonilonxp(-\frac{y^2}{2})dy \leq \sqrt{e}\epsilonilonta
\epsilonilonnd{align*}
Without loss of generality let $\rho = 1$. Let's start with $R_{2n}(\frac{\nu\epsilonilonta^r n}{2})$
\Pnote{its not obvious why we can set $\rho = 1$ without loss of generality here}
\boldsymbol{e}gin{align}\leftarrowbel{eq:anti1}
R_{2n}(\frac{\nu\epsilonilonta^r n}{2}) \leq \frac{1}{(2n+1)!} \max_{x \in [-y,y]}|f^{2n+1}(x)| \cclassmacro{P}rod_{i=0}^n (x - x_i)^2
\epsilonilonnd{align}
Since $f(x)$ is a scaled Gaussian, we have directly from its Taylor expansion $$\max_{x \in \mathbb R}|f^{2n+1}(x)| < \max_{x \in \mathbb R}|f^{2n+2}(x)| = |f^{2n+2}(0)| = \frac{(2n+2)!}{(n+1)!}(\frac{1}{2\epsilonilonta^2})^{2n+2}$$.
Plugging the above bound into \ref{eq:anti1} we obtain
\boldsymbol{e}gin{align*}
R_{2n}(\frac{\nu\epsilonilonta^r n}{2}) < \frac{\nu^2\epsilonilonta^{2r}(2^2\nu^2\epsilonilonta^{2r})...(n^2\nu^2\epsilonilonta^{2r})}{(2n+1)!} |f^{2n+2}(0)| = \frac{\nu^{2n}\epsilonilonta^{2rn}(n!)^2}{(2n+1)!} |f^{2n+2}(0)| = \frac{\nu^{2n}\epsilonilonta^{2rn} (n!)^2}{(2n+1)!} \frac{(2n+2)!}{(n+1)!}(\frac{1}{2\epsilonilonta^2})^{2n+2}
\epsilonilonnd{align*}
$$= 2\nu^{2n}\epsilonilonta^{2rn} (n!)\boldsymbol{i}g(\frac{1}{2\epsilonilonta^2}\boldsymbol{i}g)^{2n+2} = 2(\frac{1}{4\epsilonilonta^4})\boldsymbol{i}g(\frac{\nu\epsilonilonta^{r-2}}{2}\boldsymbol{i}g)^{2n}n! = 2(\frac{1}{4\epsilonilonta^4})\sqrt{2 \cclassmacro{P}i n}\boldsymbol{i}g(\frac{n}{e}\boldsymbol{i}g)^n\boldsymbol{i}g(\frac{\nu\epsilonilonta^{r-2}}{2}\boldsymbol{i}g)^{2n} = 2(\frac{1}{4\epsilonilonta^4})\frac{\sqrt{2\cclassmacro{P}i n}}{2^{2n}e^n} (\nu\epsilonilonta^{r-2}\sqrt{n})^{2n}$$
Where the factorial approximation is Stirling's.
Thus a sufficient condition for error decay is $\nu\epsilonilonta^{r-2}\sqrt{n} \leq 1$. Then for the benefit of tail error decay, we will set $n$ to saturate the center interval error $n := \frac{1}{\sqrt{e}\nu^2\epsilonilonta^{2(r-2)}}$ where the $\sqrt{e}$ will be to accommodate for some discrepancy in error in the tail bound. Intuitively, the larger the value of $n$ the further our the interpolation points, and the better the Gaussian tail dominates the polynomial growth in error.
Next we show the tail error is small.
\boldsymbol{e}gin{align*}
\int_{\frac{\nu\epsilonilonta^r n}{2}}^{\infty} R_{2n}(y) \epsilonilonxp(-\frac{y^2}{2})dy \leq
\int_{\frac{\nu\epsilonilonta^r n}{2}}^{\infty} \frac{(y + \frac{\nu\epsilonilonta^r n}{2})^{2n}}{(2n+1)!}|f^{2n+1}(0)| \epsilonilonxp(-\frac{y^2}{2})dy
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
\leq \int_{\frac{\nu\epsilonilonta^r n}{2}}^{\infty} \frac{(y + \frac{\nu\epsilonilonta^r
n}{2})^{2n}}{(2n+1)!}\frac{(2n+2)!}{(n+1)!}\boldsymbol{i}g(\frac{1}{2\epsilonilonta^2}\boldsymbol{i}g)^{2n+2} \epsilonilonxp(-\frac{y^2}{2})dy
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
\leq 2\int_{\frac{\nu\epsilonilonta^r n}{2}}^{\infty} \frac{(y + \frac{\nu\epsilonilonta^r n}{2})^{2n}}{n!}\boldsymbol{i}g(\frac{1}{2\epsilonilonta^2}\boldsymbol{i}g)^{2n+2} \epsilonilonxp(-\frac{y^2}{2})dy
\epsilonilonnd{align*}
The integrand evaluated at $y = \frac{\nu\epsilonilonta^r n}{2}$ is
$$\leq \frac{1}{\sqrt{2\cclassmacro{P}i n}}\boldsymbol{i}g(\frac{1}{4\epsilonilonta^4}\boldsymbol{i}g)\boldsymbol{i}g(\frac{\nu\epsilonilonta^{r-2}\sqrt{en}}{2}\boldsymbol{i}g)^{2n}\epsilonilonxp(-\frac{(\nu\epsilonilonta^rn)^2}{8}) $$
By our choice of $n$ we have both the exponential and the error term falling to zero rapidly. For $r = 4$ and $\nu = 1/100$ we have $n = O(\frac{1}{\epsilonilonta^4})$ for a degree $2n = O(\frac{1}{\epsilonilonta^4})$ polynomial.
\epsilonilonnd{proof}
The following lemma establishes that the sufficient conditions of \cclassmacro{P}rettyref{thm:anti-sufficient} are naturally extended under linear transformations of the data set.
\boldsymbol{e}gin{lemma} (Anticoncentration under Linear Transformation)
Let $\mathcal D$ be a $(c,D(\epsilonilonta))$ certifiably anticoncentrated distribution. Let $I_\epsilonilonta(z) \in \mathbb R[z]$ be a uni-variate polynomial satifying the conditions of \cclassmacro{P}rettyref{thm:anti-sufficient}. Let $x \sim \mathcal D$ be a random variable drawn from $\mathcal D$. Then for any invertible linear transformation $A \in \mathbb R^{d \times d}$, we denote the distribution of $Ax$ as $A(\mathcal D)$. Let $\Sigma = AA^T$ be the covariance of $A(\mathcal D)$ with eigenvalues $\leftarrowmbda_1,\leftarrowmbda_2,...,\leftarrowmbda_d$. Then $A(\mathcal D)$ is $(c\frac{\leftarrowmbda_1^{3/2}}{\leftarrowmbda_d^{3/2}},D(\epsilonilonta))$ certifiably anticoncentrated.
\epsilonilonnd{lemma}
\boldsymbol{e}gin{proof}
In light of this fact, we use \cclassmacro{P}rettyref{thm:anti-sufficient} condition 1 to lower bound $\iprod{X_i,v}^2$ by
\boldsymbol{e}gin{align*}
\mathcal P \SoSp{D(\epsilonilonta)} \leftarrowngle X,v\rightarrowngle^2 \geq \epsilonilonta^2(1 - I_\epsilonilonta(\leftarrowngle X_i,v\rightarrowngle))
\epsilonilonnd{align*}
Therefore,
\boldsymbol{e}gin{align*}
\mathcal P \SoSp{D(\epsilonilonta)}\frac{1}{M} \sum\limits_{i=1}^N w_iw_i'\leftarrowngle X,v\rightarrowngle^2 \geq \frac{1}{M} \sum\limits_{i=1}^N w_iw_i'\epsilonilonta^2 (1 - I_{\mathop{}\!\mathrm{d}elta}(\leftarrowngle X_i,v\rightarrowngle))
\epsilonilonnd{align*}
Then using the certificate that $\{\norm{v}^2 < 1\}$ we obtain
\boldsymbol{e}gin{align*}
\mathcal P \SoSp{D(\epsilonilonta)}\frac{1}{M} \sum\limits_{i=1}^N w_iw_i'\epsilonilonta^2 (1 - I_{\epsilonilonta}(\leftarrowngle X_i,v\rightarrowngle)) \geq \frac{1}{M} \sum\limits_{i=1}^N w_iw_i'\epsilonilonta^2 \frac{\norm{\Sigma^{1/2}v}^2}{\norm{\Sigma^{1/2}}_{op}^2} (1 - I_{\epsilonilonta}(\leftarrowngle X_i,v\rightarrowngle))
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \frac{1}{M} \sum\limits_{i=1}^N w_iw_i'\epsilonilonta^2 \frac{\norm{\Sigma^{1/2}v}^2}{\norm{\Sigma^{1/2}}_{op}^2} - \frac{1}{M} \sum\limits_{i=1}^N w_iw_i'\epsilonilonta^2\frac{\norm{\Sigma^{1/2}v}^2}{\norm{\Sigma^{1/2}}_{op}^2}I_{\epsilonilonta}(\leftarrowngle X_i,v\rightarrowngle)
\epsilonilonnd{align*}
Then using the fact that $I_\epsilonilonta(\leftarrowngle X_i,v\rightarrowngle)$ is SoS and $\{w_i^2 = w_i\} \SoSp{} (1 - w_i) \succeq_2 0$, we subtract $\epsilonilonta^2\frac{\norm{\Sigma^{1/2}v}^2}{\norm{\Sigma^{1/2}}_{op}^2}\sumn w_i'(1-w_i)I_\epsilonilonta(\leftarrowngle X_i,v\rightarrowngle)$ to obtain
\boldsymbol{e}gin{align*}
\geq \frac{1}{M} \sum\limits_{i=1}^N w_iw_i'\epsilonilonta^2\frac{\norm{\Sigma^{1/2}v}^2}{\norm{\Sigma^{1/2}}_{op}^2} - \epsilonilonta^2 \frac{\norm{\Sigma^{1/2}v}^2}{\norm{\Sigma^{1/2}}_{op}^2}\frac{1}{M}\sum\limits_{i=1}^N w_i'I_{\epsilonilonta}(\leftarrowngle X_i,v\rightarrowngle)
\epsilonilonnd{align*}
We know from the moment certificates, \ref{eq:anti1}, that
$$
\frac{1}{M}\sum\limits_{i=1}^N w_i'I_{\epsilonilonta}(\leftarrowngle X_i,v\rightarrowngle)
= \E_{X \sim N(0,\Sigma)}I_\epsilonilonta(\iprod{X_i,v}) + O(\epsilonilonpsilon) = \E_{X \sim N(0,I)}I_\epsilonilonta(\iprod{X,\Sigma^{1/2}v}) + O(\epsilonilonpsilon)
$$
so thus far we have shown,
\boldsymbol{e}gin{align*}
\mathcal P \SoSp{D(\epsilonilonta)}\sum\limits_{i=1}^N w_iw_i'\leftarrowngle X,v\rightarrowngle^2 \geq \frac{1}{M} \sum\limits_{i=1}^N w_iw_i'\epsilonilonta^2\frac{\norm{\Sigma^{1/2}v}^2}{\norm{\Sigma^{1/2}}_{op}^2} - \epsilonilonta^2 \frac{\norm{\Sigma^{1/2}v}^2}{\norm{\Sigma^{1/2}}_{op}^2}\E_{x \sim N(0,I)} I_{\epsilonilonta}(\iprod{X,\Sigma^{1/2}v}) + O(\epsilonilonpsilon)
\epsilonilonnd{align*}
For the first term on the right hand side, lower bound $\norm{\Sigma^{1/2}v} \geq \leftarrowmbda_{d}^2\norm{v}^2$. This follows by the PSD'ness of $\Sigma^{1/2}$ via degree 2 SoS. Then change the variable $\omega = \Sigma^{1/2}v$ to obtain
$$
\geq \frac{1}{M} \sum\limits_{i=1}^N w_iw_i'\epsilonilonta^2\frac{\leftarrowmbda_d\norm{v}^2}{\leftarrowmbda_1} - \epsilonilonta^2 \frac{\norm{w}^2}{\leftarrowmbda_1}\E_{x \sim N(0,I)} I_{\epsilonilonta}(\iprod{X,w}) + O(\epsilonilonpsilon)
$$
Consider the second term. Observing that $0 \leq \norm{w}^2 \leq \leftarrowmbda_1$ and scaling \cclassmacro{P}rettyref{thm:anti-sufficient} condition 2 by $\leftarrowmbda_1$ we obtain
$$
\geq \frac{1}{M} \sum\limits_{i=1}^N w_iw_i'\epsilonilonta^2\frac{\leftarrowmbda_d\norm{v}^2}{\leftarrowmbda_1} - \epsilonilonta^2 (c\epsilonilonta) + O(\epsilonilonpsilon)
$$
Let $\epsilonilonta' = \epsilonilonta\sqrt{\frac{\leftarrowmbda_d}{\leftarrowmbda_1}}$, then we conclude
$$
\geq \frac{1}{M} \sum\limits_{i=1}^N w_iw_i'\epsilonilonta'^2\norm{v}^2 - \epsilonilonta'^2 (c\frac{\leftarrowmbda_1^{3/2}}{\leftarrowmbda_d^{3/2}}\epsilonilonta') + O(\epsilonilonpsilon)
$$
as desired.
\epsilonilonnd{proof}
\boldsymbol{e}gin{corollary} (Anticoncentration of Spherically Symmetric Strongly Log Concave Distributions)
Let $p(x_1,...,x_d)$ be a distribution of the form
$$p(x_1,...,x_d) \cclassmacro{P}ropto \epsilonilonxp(-h(\norm{x})) $$
For $h(x)$ $m$-strongly convex. Then $p(x)$ is $(\sqrt{2em},O(\frac{1}{\epsilonilonta^4}))$-certifiably anticoncentrated.
\epsilonilonnd{corollary}
\boldsymbol{e}gin{proof}
The proof follows exactly as that of the Gaussian.
We begin with
$$H(\norm{v}) := \norm{v}^2\E_{x \sim p(x)} I_\epsilonilonta(\iprod{X,v}) = \norm{v}^2\E_{x \sim p(\frac{x}{\norm{v}})\frac{1}{\norm{v}}} I_\epsilonilonta(x)$$
Applying $m$-strong concavity we obtain
\boldsymbol{e}gin{align*}
\norm{v}^2\E_{x \sim p(\frac{x}{\norm{v}})\frac{1}{\norm{v}}} f(x) \leq \norm{v}^2\int \frac{\sqrt{em/2}}{\sqrt{2\cclassmacro{P}i}\norm{v}} \epsilonilonxp(-\frac{x^2}{2\epsilonilonta^2} - \frac{mx^2}{\norm{v}^2}) dx
\leq \norm{v}^2 \frac{\epsilonilonta\rho \sqrt{em/2}}{\norm{v}} \leq \epsilonilonta\sqrt{em/2}
\epsilonilonnd{align*}
With the polynomial approximation calculations following the exact same template.
\epsilonilonnd{proof}
\boldsymbol{e}gin{comment}
\rhoection{Constructing the polynomial: an attempt}
Let $H_\epsilonilonta(x) \textsuperscript{st}\xspaceackrel{\mathrm{def}}= \One[x \in [-\epsilonilonta,\epsilonilonta]] $ be the indicator function of the interval $[-\epsilonilonta,\epsilonilonta]$.
Let $F_\epsilonilonta$ be a low-degree $\epsilonilonll_2$ approximation to $H_\epsilonilonta$, i.e.,
$$ \norm{H_\epsilonilonta-F_\epsilonilonta} = \left(\E_{x \sim N(0,\rho)}[(H_\epsilonilonta(x)-F_\epsilonilonta(x))^2]\right)^{1/2} \leq \kappa$$
Condition $1$ is a little annoying, let us check condition $2$ first.
Let $\Phi(x) = \frac{\rho}{\norm{v}} e^{-\frac{x^2}{2 } (\frac{1}{\norm{v}^2}-1)}$ denote the density for $N(0,\norm{v}^2)$ under the background measure $N(0,\rho^2)$. In other words, for every function $f$,
$$ \E_{x \sim N(0,\norm{v}^2)} [f(x)] = \E_{x \sim N(0,\rho^2} [f(x)\Phi(x)]$$
Notice that $\Phi(x) \leq \frac{\rho}{\norm{v}}$ for all $x$.
\boldsymbol{e}gin{align}
\E_{x \sim N(0,\norm{v}^2)} [F_{\epsilonilonta}^2(x)] & = \E_{x \sim N(0,\rho^2} [F_{\epsilonilonta}^2(x)\Phi(x)] \\
& \leq \frac{\rho}{\norm{v}} \cdot \E_{x \sim N(0,\rho^2} [F_{\epsilonilonta}^2(x)] \\
& = \frac{\rho}{\norm{v}} \cdot \left(\E_{x \sim N(0,\rho^2} [H_{\epsilonilonta}^2(x)] + \E_{x \sim N(0,\rho^2}[(F_{\epsilonilonta}^2(x)-H_{\epsilonilonta}^2(x)) ]\right) \\
& \leq \frac{\rho}{\norm{v}} \cdot \ProbOp_{x \in N(0,\rho^2)} ( x \in [-\epsilonilonta,\epsilonilonta]) + \frac{\rho}{\norm{v}} \cdot \norm{F_\epsilonilonta-H_\epsilonilonta} \cdot \norm{H_\epsilonilonta+F_\epsilonilonta}\\
& \leq \sqrt{2 \cclassmacro{P}i} \frac{\epsilonilonta}{\norm{v}} + \frac{\rho}{\norm{v}} \cdot \kappa \cdot (2 \norm{H_\epsilonilonta}+ \kappa)\\
& \leq \sqrt{2 \cclassmacro{P}i} \frac{\epsilonilonta}{\norm{v}} + \frac{\rho}{\norm{v}} \cdot \kappa \cdot \left(2 \frac{\epsilonilonta}{\rho} + \kappa\right)
\epsilonilonnd{align}
where the last inequality used triangle inequality.
If we have $\kappa < \epsilonilonta/\rho$ then we get that
$$\E_{x \in N(0,\norm{v}^2)} [F_\epsilonilonta^2(x)] \leq O(\epsilonilonta/\norm{v}+ \epsilonilonta^2 \kappa/\norm{v}) \leq O(\epsilonilonta/\norm{v})$$
In order to satisfy condition $1$ and $2$, we need a polynomial $F_\epsilonilonta$ such that,
$$F_{\epsilonilonta}(x) \geq 1 \qquad \forall x \in [-\epsilonilonta,\epsilonilonta]$$
and
$$ \norm{F_\epsilonilonta- H_\epsilonilonta} \leq \epsilonilonta/\rho $$
then $F_\epsilonilonta^2$ is the SoS polynomial satisfying all the constraints.
Theorem 3.5 in \cite{diakonikolas2010bounded} exhibits a polynomial $P$ such that for $a = \epsilonilonta^2/\log (1/\epsilonilonta)$ and $K = O(\log^2(1/\epsilonilonta)/\epsilonilonta)$ we have
\boldsymbol{e}gin{enumerate}
\emem $P(t) \in [sign[t], sign[t]+\epsilonilonta]$ for $t \in [-1/2,-2a] \cup [0,1/2]$.
\emem $P[t] \in [-1, 1+\epsilonilonta]$ for $t \in [-2a,0]$.
\emem $P(t) \leq 2\cdot (4t)^K$ for all $|t| \geq 1/2$.
\epsilonilonnd{enumerate}
We will fix $C$ later.
Consider $F_{\epsilonilonta}(t) = P((\epsilonilonta+t)/C)+ P((\epsilonilonta-t)/C)$
\boldsymbol{e}gin{enumerate}
\emem $F_\epsilonilonta(t) \in [1,1+\epsilonilonta]$ for $t \in [-\epsilonilonta,\epsilonilonta]$.
\emem For $t \in (-\epsilonilonta,-\epsilonilonta-2aC)$, $F_\epsilonilonta(t) \in (0,2+2\epsilonilonta)$
\emem For $t \in (\epsilonilonta,\epsilonilonta+2aC)$, $F_\epsilonilonta(t) \in (0,2+2\epsilonilonta)$.
\emem For $t \in (\epsilonilonta+2aC,(C-\epsilonilonta)/2) \cup ((-C-\epsilonilonta)/2, -\epsilonilonta -2aC) $, $|P(t)| \leq \epsilonilonta$.
\emem For $|t| \geq (C-\epsilonilonta)/2$, $|P(t)| \leq 2(4t)^K$
\epsilonilonnd{enumerate}
Now we can estimate $\norm{H_\epsilonilonta - F_\epsilonilonta}$ as,
\boldsymbol{e}gin{align*}
\E_{x \in N(0,\rho)}[(H_{\epsilonilonta}(x)-F_\epsilonilonta(x))^2] & \leq (2\epsilonilonta/\rho) \cdot \epsilonilonta^2 + (4aC/\rho) \cdot 8 + (C/\rho) \epsilonilonta^2 + \int_{t > (C-\epsilonilonta)/2} 4(4t)^{2K} e^{-t^2/\rho^2}
& 2\epsilonilonta^3/\rho + O(aC/\rho) + O(\epsilonilonta^2 C/\rho)+ \int_{t > (C-\epsilonilonta)/2} 4(4t)^{2K} e^{-t^2/\rho^2}
\epsilonilonnd{align*}
Note that
\boldsymbol{e}gin{align*}
\int_{t > (C-\epsilonilonta)/2} (4t)^{2K}e^{-t^2/2\rho^2} & \leq \int_{t > C/4} (4t)^{2K}e^{-t^2/2\rho^2}
\\
& \leq \int_{t > C/4} (4t)^{4K}/C^{2K} e^{-t^2/2\rho^2} \\
& \leq \frac{1}{C^{2K}} \int_{t} (4t)^{4K} e^{-t^2/2\rho^2} \\
& \leq \frac{1}{C^{2K}} 4^{4K} \rho^{4K} (4K)!!
& \leq \epsilonilonta
\epsilonilonnd{align*}
as soon as $C > 40K \rho^2$.
Substituting back for $C$, $a$ and $K$ we get,
\epsilonilonnd{comment}
\boldsymbol{i}bliographystyle{amsalpha}
\boldsymbol{i}bliography{bib/mr,bib/dblp,bib/scholar,bib/bibliography,bib/listDecoding}
\appendix
\input{content/AppendixRegression.tex}
\input{content/MeanAlgorithms.tex}
\input{content/meanoverview.tex}
\input{content/MeanEstimation.tex}
\boldsymbol{e}gin{comment}
\Pnote{Are we using the following section?}
\rhoection{Single Round Conditioning}
\boldsymbol{e}gin{theorem}
Consider the strategy of conditioning $w_i = 1$ with probability of selecting variable $w_i$ to be
$P(i) = \frac{\cclassmacro{P}E[w_i]}{\sum\limits_{i=1}^n \cclassmacro{P}E[w_i]}$
Let's denote this measure $S$
Then in expectation over the selection of $i$, we have
\boldsymbol{e}gin{align*}
\E_{i \sim S}[\cclassmacro{P}Var(\hat{\mu}|w_i = 1)] < m
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
\E_{i \sim S}[\cclassmacro{P}E[\frac{\leftarrowngle w,w_p\rightarrowngle^2}{\boldsymbol{e}ta^2N^2}|w_i = 1]] \geq \boldsymbol{e}ta^2
\epsilonilonnd{align*}
\epsilonilonnd{theorem}
We observe that by markov we have
$$P(\cclassmacro{P}Var(\hat{\mu}|w_i = 1) < \frac{4m}{\boldsymbol{e}ta^2}) > 1 - \frac{\boldsymbol{e}ta^2}{4}$$
Since $\frac{\leftarrowngle w,w_p\rightarrowngle^2}{\boldsymbol{e}ta^2N^2} \leq 1$ and $$\E_{i \sim S}[\cclassmacro{P}E[\frac{\leftarrowngle w,w_p\rightarrowngle^2}{\boldsymbol{e}ta^2N^2}|w_i = 1]] \geq \boldsymbol{e}ta^2$$
we have
$$P\boldsymbol{i}g(\frac{\leftarrowngle w,w_p\rightarrowngle^2}{\boldsymbol{e}ta^2N^2} \geq \frac{\boldsymbol{e}ta^2}{2}\boldsymbol{i}g) \geq \frac{\boldsymbol{e}ta^2}{2} $$
Thus the probability that both events co-occur is
$$P\boldsymbol{i}g(\{\cclassmacro{P}Var(\hat{\mu}|w_i = 1) < \frac{4m}{\boldsymbol{e}ta^2}\} \boldsymbol{i}gcap \{\frac{\leftarrowngle w,w_p\rightarrowngle^2}{\boldsymbol{e}ta^2N^2} \geq \frac{\boldsymbol{e}ta^2}{2}\}\boldsymbol{i}g) \geq \frac{\boldsymbol{e}ta^2}{4} $$
Thus repeatedly drawing $i \sim S$ we satisfy both events with high probability.
Now we proceed to prove the theorem
\boldsymbol{e}gin{proof}
\boldsymbol{e}gin{align*}
\E_{i \sim S} \cclassmacro{P}Var(\hat{\mu}|w_i = 1) = \E_{i \sim S}\cclassmacro{P}E[(\hat{\mu} - \cclassmacro{P}E[\hat{\mu}|w_i=1])^2|w_i=1]
\epsilonilonnd{align*}
by triangle inequality
\boldsymbol{e}gin{align*}
\leq 2\E_{i \sim S} \cclassmacro{P}E[(\hat{\mu} - X_i)^2|w_i=1]] + 2\E_{i \sim S}\cclassmacro{P}E[(X_i - \cclassmacro{P}E[\hat{\mu}|w_i=1])^2|w_i=1]
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= 2\E_{i \sim S} \cclassmacro{P}E[\frac{1}{\boldsymbol{e}ta N}\sum\limits_{j \in [N]}w_j(X_j - X_i)^2|w_i=1]] + 2\E_{i \sim S}(X_i - \cclassmacro{P}E[\hat{\mu}|w_i=1])^2
\epsilonilonnd{align*}
We bound the two terms separately. Starting with the first term
\boldsymbol{e}gin{align*}
\E_{i \sim S} \cclassmacro{P}E[\frac{1}{\boldsymbol{e}ta N}\sum\limits_{j \in [N]}w_j(X_j - X_i)^2|w_i=1]] = \frac{\sum\limits_{i \in [N]}\cclassmacro{P}E[\frac{1}{\boldsymbol{e}ta N}\sum\limits_{j \in [N]}w_iw_j(X_j - X_i)^2]}{\sum\limits_{i \in [N]}\cclassmacro{P}E[w_i]}
\epsilonilonnd{align*}
Using $\sum\limits_{i \in [N]}w_i = \boldsymbol{e}ta N$ we obtain
\boldsymbol{e}gin{align*}
= \frac{1}{\boldsymbol{e}ta N}\sum_{i \in [N]}\cclassmacro{P}E[\frac{1}{\boldsymbol{e}ta N}\sum\limits_{j \in [N]}w_iw_j(X_j - X_i)^2] = \cclassmacro{P}E[\frac{1}{\boldsymbol{e}ta^2 N^2}\sum\limits_{i,j \in [N]}w_iw_j(X_j - X_i)^2]
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
\leq \cclassmacro{P}E\frac{1}{\boldsymbol{e}ta^2N^2} \sum_{i,j \in [N]}w_iw_j\boldsymbol{i}g(2(X_i - \hat{\mu})^2 + 2(X_j - \hat{\mu}^2)\boldsymbol{i}g)
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
\leq \cclassmacro{P}E[\frac{2}{\boldsymbol{e}ta N} \sum_{i \in [N]} w_i(X_i - \hat{\mu})^2] + \cclassmacro{P}E[\frac{2}{\boldsymbol{e}ta N} \sum\limits_{j \in [N]} w_j(X_j - \hat{\mu})^2] \leq 4m
\epsilonilonnd{align*}
For $m$ being the moment bound constant.
Now we bound the second term
\boldsymbol{e}gin{align*}
\E_{i \sim S}(X_i - \cclassmacro{P}E[\hat{\mu}|w_i=1])^2 = \E_{i \sim S}(\cclassmacro{P}E[\frac{1}{\boldsymbol{e}ta N}\sum\limits_{j \in [N]}w_j(X_j - X_i)|w_i=1])^2
\epsilonilonnd{align*}
Using the definition of conditional pseudoexpectation we have
\boldsymbol{e}gin{align*}
= \E_{i \sim S}\boldsymbol{i}g(\frac{\cclassmacro{P}E[\frac{1}{\boldsymbol{e}ta N}\sum\limits_{j \in [N]}w_iw_j(X_j - X_i)]}{\cclassmacro{P}E[w_i]}\boldsymbol{i}g)^2
\epsilonilonnd{align*}
Using $w_i^2 = w_i$ we obtain
\boldsymbol{e}gin{align*}
= \E_{i \sim S}\boldsymbol{i}g(\frac{\cclassmacro{P}E[\frac{1}{\boldsymbol{e}ta N}\sum\limits_{j \in [N]}w_i^2w_j(X_j - X_i)]}{\cclassmacro{P}E[w_i]}\boldsymbol{i}g)^2
\epsilonilonnd{align*}
Then using pseudoexpectation cauchy-schwarz we obtain
\boldsymbol{e}gin{align*}
\leq \E_{i \sim S}\boldsymbol{i}g(\frac{\cclassmacro{P}E[w_i^2]^{1/2}\cclassmacro{P}E[(\frac{1}{\boldsymbol{e}ta N}\sum\limits_{j \in [N]}w_iw_j(X_j - X_i))^2]^{1/2}}{\cclassmacro{P}E[w_i]}\boldsymbol{i}g)^2
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \E_{i \sim S}\boldsymbol{i}g(\frac{\cclassmacro{P}E[(\frac{1}{\boldsymbol{e}ta N}\sum\limits_{j \in [N]}w_iw_j(X_j - X_i))^2]}{\cclassmacro{P}E[w_i]}\boldsymbol{i}g)
\epsilonilonnd{align*}
Using SOS-holder we obtain
\boldsymbol{e}gin{align*}
\leq \E_{i \sim S}\boldsymbol{i}g(\frac{\cclassmacro{P}E[\frac{1}{\boldsymbol{e}ta N}\sum\limits_{j \in [N]}w_iw_j(X_j - X_i)^2]}{\cclassmacro{P}E[w_i]}\boldsymbol{i}g)
\epsilonilonnd{align*}
Writing the expectation out as a sum we obtain
\boldsymbol{e}gin{align*}
= \frac{\sum\limits_{i \in [N]}\cclassmacro{P}E[\frac{1}{\boldsymbol{e}ta N}\sum\limits_{j \in [N]}w_iw_j(X_j - X_i)^2]}{\sum\limits_{i \in [N]}\cclassmacro{P}E[w_i]}
\epsilonilonnd{align*}
\boldsymbol{e}gin{align*}
= \cclassmacro{P}E[\frac{1}{\boldsymbol{e}ta^2N^2}\sum_{i,j \in [N]}w_iw_j(X_i - X_j)^2 ] < m
\epsilonilonnd{align*}
Putting the first and second term back together we have a bound of
\boldsymbol{e}gin{align*}
\E_{i \sim S} \cclassmacro{P}Var(\hat{\mu}|w_i = 1) \leq 10m
\epsilonilonnd{align*}
\epsilonilonnd{proof}
\boldsymbol{e}gin{lemma}
For $i \sim S$ we have
\boldsymbol{e}gin{align*}
\E_{i \sim S}[\cclassmacro{P}E[\frac{\leftarrowngle w,w_p\rightarrowngle^2}{\boldsymbol{e}ta^2N^2}|w_i = 1]] \geq \boldsymbol{e}ta^2
\epsilonilonnd{align*}
\epsilonilonnd{lemma}
\boldsymbol{e}gin{proof}
\boldsymbol{e}gin{align*}
\E_{i \sim S}[\cclassmacro{P}E[\frac{\leftarrowngle w,w_p\rightarrowngle^2}{\boldsymbol{e}ta^2N^2}|w_i = 1]] = \frac{\sum\limits_{i \in [N]} \cclassmacro{P}E[\frac{\leftarrowngle w,w_p\rightarrowngle}{\boldsymbol{e}ta^2N^2}w_i]}{\sum\limits_{i \in [N]} \cclassmacro{P}E[w_i]}
\epsilonilonnd{align*}
Using $\sum\limits_{i \in [N]}w_i = \boldsymbol{e}ta N$ we obtain
\boldsymbol{e}gin{align*}
= \frac{1}{\boldsymbol{e}ta N} \sum\limits_{i \in [N]} \cclassmacro{P}E[\frac{\leftarrowngle w,w_p\rightarrowngle}{\boldsymbol{e}ta^2N^2}w_i] = \cclassmacro{P}E[\frac{\leftarrowngle w,w_p\rightarrowngle}{\boldsymbol{e}ta^2N^2}] \geq \boldsymbol{e}ta^2
\epsilonilonnd{align*}
as desired.
\epsilonilonnd{proof}
\epsilonilonnd{comment}
\epsilonilonnd{document} |