\documentclass[12pt]{amsart}%
\usepackage{amsfonts}
\usepackage{amsmath}
\usepackage{amssymb}
\usepackage{graphicx}%
\setcounter{MaxMatrixCols}{30}
%TCIDATA{OutputFilter=latex2.dll}
%TCIDATA{Version=5.00.0.2606}
%TCIDATA{CSTFile=amsartci.cst}
%TCIDATA{Created=Thursday, January 21, 2010 17:32:30}
%TCIDATA{LastRevised=Thursday, September 30, 2010 11:42:10}
%TCIDATA{}
%TCIDATA{}
%TCIDATA{BibliographyScheme=Manual}
%TCIDATA{}
%TCIDATA{Language=American English}
\newtheorem{theorem}{Theorem}
\theoremstyle{plain}
\newtheorem{acknowledgement}{Acknowledgement}
\newtheorem{algorithm}{Algorithm}
\newtheorem{axiom}{Axiom}
\newtheorem{case}{Case}
\newtheorem{claim}{Claim}
\newtheorem{conclusion}{Conclusion}
\newtheorem{condition}{Condition}
\newtheorem{conjecture}{Conjecture}
\newtheorem{corollary}{Corollary}
\newtheorem{criterion}{Criterion}
\newtheorem{definition}{Definition}
\newtheorem{example}{Example}
\newtheorem{exercise}{Exercise}
\newtheorem{lemma}{Lemma}
\newtheorem{notation}{Notation}
\newtheorem{problem}{Problem}
\newtheorem{proposition}{Proposition}
\newtheorem{remark}{Remark}
\newtheorem{solution}{Solution}
\newtheorem{summary}{Summary}
\numberwithin{equation}{section}
\begin{document}
\title[Symmetric and Antisymmetric Polynomials]{Symmetric and Antisymmetric Vector-valued Jack Polynomials}
\author{Charles F. Dunkl}
\address{Department of Mathematics, University of Virginia\\
Charlottesville, VA 22904-4137, US}
\email{cfd5z@virginia.edu}
\urladdr{http://people.virginia.edu/\symbol{126}cfd5z}
\subjclass[2000]{Primary 05E05, 20C30; Secondary 33C80, 05E35.}
\keywords{Jack polynomials, standard modules, Dunkl operators, hook-lengths}
\begin{abstract}
Polynomials with values in an irreducible module of the symmetric group can be
given the structure of a module for the rational Cherednik algebra, called a
standard module. This algebra has one free parameter and is generated by
differential-difference (\textquotedblleft Dunkl\textquotedblright) operators,
multiplication by coordinate functions and the group algebra. By specializing
Griffeth's ({\tt ar$\chi$iv:0707.0251}) results for the $G(r,p,N)$ setting, one obtains
norm formulae for symmetric and antisymmetric polynomials in the standard
module. Such polynomials of minimum degree have norms which involve
hook-lengths and generalize the norm of the alternating polynomial.
\end{abstract}
\maketitle
%First page headline in LaTeX for S\'eminaire Lotharingien de Combinatoire
%--first part
\thispagestyle{myheadings}
\font\rms=cmr8
\font\its=cmti8
\font\bfs=cmbx8
\markright{\its S\'eminaire Lotharingien de
Combinatoire \bfs 64 \rms (2010), Article~B64a\hfill}
\def\thepage{}
\section{Introduction}
Hook-lengths of nodes in Young tableaux appear in a variety of different
settings. Griffeth \cite{G} introduced Jack polynomials whose values lie in
irreducible modules of the family $G\left( r,p,N\right) $ of complex
reflection groups. This class of polynomials forms an orthogonal basis for the
associated standard module of the rational Cherednik algebra. In this paper we
specialize his results to the symmetric group and show how the norms of two
special symmetric and antisymmetric polynomials in the standard module depend
on the hook-lengths of the partition associated to the representation. These
norm formulae prove a necessity condition for aspherical parameter values.
This condition was first found by Gordon and Stafford \cite{GS}.
For $N\geq2,x=\left( x_{1},\ldots,x_{N}\right) \in%
%TCIMACRO{\U{211d} }%
%BeginExpansion
\mathbb{R}
%EndExpansion
^{N}$ and let $\mathbb{N}:=\left\{ 0,1,2,3,\ldots\right\} $. For
$a,b\in\mathbb{N}$ and $a\leq b$ let $\left[ a,b\right] =\left\{
a,a+1,\ldots,b\right\} $ (an interval of integers). The cardinality of a set
$E$ is denoted by $\#E$. For $\alpha\in\mathbb{N}^{N}$ (a \textit{composition}%
) let $\left\vert \alpha\right\vert :=\sum_{i=1}^{N}\alpha_{i}$, $x^{\alpha
}:=\prod_{i=1}^{N}x_{i}^{\alpha_{i}}$, a monomial of degree $\left\vert
\alpha\right\vert $. The spaces of polynomials, respectively homogeneous,
polynomials are
\begin{align*}
\mathcal{P} & :=\mathrm{span}_{\mathbb{F}}\left\{ x^{\alpha}:\alpha
\in\mathbb{N}^{N}\right\} ,\\
\mathcal{P}_{n} & :=\mathrm{span}_{\mathbb{F}}\left\{ x^{\alpha}:\alpha
\in\mathbb{N}^{N},\left\vert \alpha\right\vert =n\right\} ,~n\in\mathbb{N},
\end{align*}
where $\mathbb{F}$ is a field $\supset\mathbb{Q}$. Consider the symmetric
group $\mathcal{S}_{N}$ as the group of permutations of $\left[ 1,N\right]
$. The group acts on polynomials by linear extension of $\left( xw\right)
_{i}=x_{w\left( i\right) },w\in\mathcal{S}_{N},1\leq i\leq N$, that is,
$wf\left( x\right) :=f\left( xw\right) ,f\in\mathcal{P}$. For $\alpha
\in\mathbb{N}^{N}$ let $\left( w\alpha\right) _{i}=\alpha_{w^{-1}\left(
i\right) }$, then $w\left( x^{\alpha}\right) =x^{w\alpha}$. Also
$\mathcal{S}_{N}$ is a finite reflection group whose reflections are the
transpositions $\left( i,j\right) $; $x\left( i,j\right) =\left(
\ldots,\overset{i}{x}_{j},\ldots,\overset{j}{x}_{i},\ldots\right) $. The
simple reflections $s_{i}:=\left( i,i+1\right) ,1\leq i0\right\} $. The conjugate partition $\tau^{\prime}$ is the
partition whose diagram is the transpose of the diagram of $\tau$ (that is,
$\tau_{m}^{\prime}=\#\left\{ i:\tau_{i}\geq m\right\} $). For a node (or
point) $\left( i,j\right) \in\tau$ the \textit{arm-length} is $\mathrm{arm}%
\left( i,j\right) :=\tau_{i}-j$, the \textit{leg-length} is $\mathrm{leg}%
\left( i,j\right) :=\tau_{j}^{\prime}-i$, and the \textit{hook-length} is
$h\left( i,j\right) :=\mathrm{arm}\left( i,j\right) +\mathrm{leg}\left(
i,j\right) +1$. We will use $\mathrm{arm}\left( i,j;\tau\right) $ etc. if
it is necessary to specify the partition.
To each partition $\tau$ of $N$ there is an associated irreducible
$\mathcal{S}_{N}$-module $V_{\tau}$. We analyze the space $M\left(
\tau\right) $ of $V_{\tau}$-valued polynomials under the action of
differential-difference (\textquotedblleft Dunkl\textquotedblright) operators.
There is a canonical symmetric bilinear (the contravariant) form $\left\langle
\cdot,\cdot\right\rangle $ on this space. We will construct distinguished
polynomials $f_{\tau}^{s},f_{\tau}^{a}\in M\left( \tau\right) $, with
$f_{\tau}^{s}$ being symmetric and $f_{\tau}^{a}$ being antisymmetric, such
that
\begin{align*}
\left\langle f_{\tau}^{s},f_{\tau}^{s}\right\rangle & =c_{0}\prod
\limits_{\left( i,j\right) \in\tau}\left( 1-h\left( i,j\right)
\kappa\right) _{\mathrm{leg}\left( i,j\right) },\\
\left\langle f_{\tau}^{a},f_{\tau}^{a}\right\rangle & =c_{1}\prod
\limits_{\left( i,j\right) \in\tau}\left( 1+h\left( i,j\right)
\kappa\right) _{\mathrm{arm}\left( i,j\right) };
\end{align*}
and $c_{0},c_{1}\in\mathbb{Q}$ are constants depending on $\tau$, and the
\textit{Pochhammer symbol} is%
\[
\left( t\right) _{n}:=\prod_{i=1}^{n}\left( t+i-1\right) ,n\in\mathbb{N}.
\]
This result generalizes the situation of the trivial representation of
$\mathcal{S}_{N}$; in this case $\tau=\left( N\right) ,f_{\tau}%
^{s}=1,f_{\tau}^{a}=\prod\limits_{1\leq i\mathrm{cm}\left(
i+1,T\right) $then%
\begin{align*}
s_{i}v_{T} & =b_{i}\left( T\right) v_{T}+v_{s_{i}T},\\
s_{i}v_{s_{i}T} & =\left( 1-b_{i}\left( T\right) ^{2}\right) v_{T}%
-b_{i}\left( T\right) v_{s_{i}T};
\end{align*}
\item if $-\frac{1}{2}\leq b_{i}\left( T\right) <0$ (when
$\mathrm{rw}\left( i,T\right) >\mathrm{rw}\left( i+1,T\right) $ and\break
$\mathrm{cm}\left( i,T\right) <\mathrm{cm}\left( i+1,T\right) )$ then
$$s_{i}v_{T}=b_{i}\left( T\right) v_{T}+\left( 1-b_{i}\left( T\right)
^{2}\right) v_{s_{i}T},$$ and $$s_{i}v_{s_{i}T}=v_{T}-b_{i}\left( T\right)
v_{s_{i}T}.$$
\end{enumerate}
In cases {\em(3)} and {\em(4)} the tableau $s_{i}T$ is obtained by
interchanging the entries $i,i+1$. Furthermore in case {\em(3)} $s_{i}f_{0}=f_{0}$
and $s_{i}f_{1}=-f_{1}$ for
\begin{align*}
f_{0} & =\left( b_{i}\left( T\right) +1\right) v_{T}+v_{s_{i}T},\\
f_{1} & =\left( b_{i}\left( T\right) -1\right) v_{T}+v_{s_{i}T}.
\end{align*}
\end{proposition}
There is an ordering on tableaux such that $T>s_{i}T$ in case~(4).
\begin{corollary}
\label{vsym}Let $f=\sum_{T\in Y\left( \tau\right) }k_{T}v_{T}$ with the
coefficients $k_{T}\in\mathbb{Q}$ and $s_{i}f=\pm f$ for some $i\in\left[
1,N-1\right] $. Then
\begin{enumerate}
\item
$T,s_{i}T\in Y\left( \tau\right) $ implies
$k_{s_{i}T}=rk_{T}$ for some $r\neq0;$
\item $s_{i}f=f$ and $\mathrm{cm}%
\left( i,T\right) =\mathrm{cm}\left( i+1,T\right) $ implies $k_{T}%
=0$;
\item $s_{i}f=-f$ and $\mathrm{rw}\left( i,T\right) =\mathrm{rw}%
\left( i+1,T\right) $ implies $k_{T}=0.$
\end{enumerate}
\end{corollary}
Statement (1) means that $k_{s_{i}T}$ and $k_{T}$ are either both nonzero or
both zero.
\begin{definition}
The Jucys-Murphy elements (in the group algebra $\mathbb{Q}\mathcal{S}_{N}$)
are%
\[
\omega_{i}:=\sum_{j=i+1}^{N}\left( i,j\right) ,i\in\left[ 1,N\right] .
\]
\end{definition}
There are commutation relations: $\omega_{i}\omega_{j}=\omega_{i}\omega_{j}$
for all $i,j$; $\omega_{i}s_{j}=s_{j}\omega_{i}$ for $j\neq i-1,i$;
$s_{i}\omega_{i}-\omega_{i+1}s_{i}=1$ (see Vershik and Okounkov
\cite[Sec.~4]{OV} for the representations of the algebra generated by $\left\{
\omega_{i},\omega_{i+1},s_{i}\right\} $). Murphy proved the following:
\begin{theorem}
Suppose $T\in Y\left( \tau\right) $ and $i\in\left[ 1,N\right] $ then
$\omega_{i}v_{T}=c\left( i,T\right) v_{T}.$
\end{theorem}
Let $\left\langle \cdot,\cdot\right\rangle _{0}$ be a $\mathcal{S}_{N}%
$-invariant positive-definite bilinear form on $V_{\tau}$, (the form is unique
up to a multiplicative constant) then each $\omega_{i}$ is self-adjoint and
hence the vectors $v_{T}$ are pairwise orthogonal, being eigenvectors with
different eigenvalues. Denote $\left\Vert v\right\Vert _{0}^{2}=\left\langle
v,v\right\rangle _{0}$. For given $T$ and $i$ as in case~(4) we have
$\left\Vert v_{T}\right\Vert _{0}^{2}=b\left( i,T\right) ^{2}\left\Vert
v_{T}\right\Vert _{0}^{2}+\left\Vert v_{s_{i}T}\right\Vert _{0}^{2}$ (since
$s_{i}$ is an isometry) and thus $\left\Vert v_{s_{i}T}\right\Vert _{0}%
^{2}=\left( 1-b\left( i,T\right) ^{2}\right) \left\Vert v_{T}\right\Vert
_{0}^{2}$. There is one formula for $\left\Vert v_{T}\right\Vert _{0}^{2}$ in
\cite[Thm.~4.1]{M}. The following is based on the content vector of $T$ (that
is, $\left( c\left( 1,T\right) ,\ldots,c\left( N,T\right) \right) ):$
\begin{definition}
For $T\in Y\left( \tau\right) $ let%
\[
\left\Vert v_{T}\right\Vert _{c}^{2}=\prod\limits_{1\leq im+1$,
whenever $m\in\left[ 1,N-1\right] $ and $\left\{ T,s_{m}T\right\} \subset
Y\left( \tau\right) ,$ then%
\[
\frac{\prod_{1\leq ii_{2}$ then any
entry $j$ in this column of $T$ between $i_{1}$ and $i_{2}$ has to satisfy
$i_{1}>j>i_{2}$) then $T_{0}$ has $\mathrm{cm}\left( i,T_{0}\right)
=\mathrm{cm}\left( i+1,T_{0}\right) $ for some $i\in\left[ a,b-1\right] $.
Similarly if $\mathrm{rw}\left( i_{1},T\right) =\mathrm{rw}\left(
i_{2},T\right) $ for some $i_{1},i_{2}\in\left[ a,b\right] \ $then $T_{1}$
has $\mathrm{rw}\left( i,T_{1}\right) =\mathrm{rw}\left( i+1,T_{1}\right)
$ for some $i\in\left[ a,b-1\right] $.
\subsection{Subgroup symmetric vectors}
First consider the invariant\break (symmetric) situation. Corollary~\ref{vsym} and
the properties of $T_{0}$ imply the following necessary condition for
$V_{T}\left( \mathcal{S}_{\left[ a,b\right] }\right) $ to contain a
nontrivial $\mathcal{S}_{\left[ a,b\right] }$-invariant.
Say $T$ satisfies condition $\left[ a,b\right] _{\mathrm{cm}}$ if the
entries $a,a+1,\ldots,b$ are in distinct columns of $T$, that is, $a\leq
i\mathrm{cm}\left( j,T\right) $ for $a\leq
ii+1$%
\begin{align*}
c\left( j,T_{0}\right) -c\left( i,T_{0}\right) & =\left( \mathrm{cm}%
\left( j,T_{0}\right) -\mathrm{cm}\left( i,T_{0}\right) \right) +\left(
\mathrm{rw}\left( i,T_{0}\right) -\mathrm{rw}\left( j,T_{0}\right) \right)
\\
& \leq\mathrm{cm}\left( j,T_{0}\right) -\mathrm{cm}\left( i,T_{0}\right)
\leq i-j\leq-2,
\end{align*}
or $j=i+1$ and
\begin{align*}
& c\left( i+1,T_{0}\right) -c\left( i,T_{0}\right) \\
& =\left( \mathrm{cm}\left( i+1,T_{0}\right) -\mathrm{cm}\left(
i,T_{0}\right) \right) +\left( \mathrm{rw}\left( i,T_{0}\right)
-\mathrm{rw}\left( i+1,T_{0}\right) \right) \\
& \leq-1-1=-2.
\end{align*}
\begin{definition}
Suppose $T\in Y\left( \tau\right) $ satisfies condition $\left[ a,b\right]
_{\mathrm{cm}}$ then let%
\[
P_{0}\left( T;a,b\right) :=\prod\limits_{a\leq i\mathrm{cm}\left( i+1,T\right) \right\} ;$$
$T\in A$ implies
$s_{i}v_{T}=-v_{T}$. Then%
\[
f=\sum_{T\in A}P_{1}\left( T;a,b\right) v_{T}+\sum_{T\in B}\left(
P_{1}\left( T;a,b\right) v_{T}+P_{1}\left( s_{i}T;a,b\right) v_{s_{i}%
T}\right) .
\]
Fix $T\in B$ and compute $P_{1}\left( T;a,b\right) /P_{1}\left(
s_{i}T;a,b\right) $ using Lemma~\ref{gprod}; set $g_{mn}\left( T\right) =1$
if $\mathrm{cm}\left( m,T\right) \geq\mathrm{cm}\left( n,T\right) $ and
$g_{mn}\left( T\right) =\frac{c\left( n,T\right) -c\left( m,T\right)
}{1-c\left( n,T\right) +c\left( m,T\right) }$ if $\mathrm{cm}\left(
m,T\right) <\mathrm{cm}\left( n,T\right) $. Then $g_{i,i+1}\left(
T\right) =1$ and
\begin{align*}
g_{i,i+1}\left( s_{i}T\right) & =\frac{c\left( i+1,s_{i}T\right)
-c\left( i,s_{i}T\right) }{1-c\left( i+1,s_{i}T\right) +c\left(
i,s_{i}T\right) }\\
& =\frac{1}{-1-b_{i}\left( s_{i}T\right) }=\frac{1}{b_{i}\left( T\right)
-1}.
\end{align*}
Thus $P_{0}\left( T;a,b\right) /P_{0}\left( s_{i}T;a,b\right)
=b_{i}\left( T\right) -1$ and $s_{i}f=-f$ by Proposition~\ref{siv}. The norm
formula follows from the proof of Corollary~\ref{svnorm} with some small
modifications to take care of sign-changes.
\end{proof}
There are corresponding statements for $H=\mathcal{S}_{\left[ a_{1}%
,b_{1}\right] }\times\ldots\times\mathcal{S}_{\left[ a_{n},b_{n}\right] }$,
using disjoint intervals. The branching theorem for the restriction of
irreducible representations of $\mathcal{S}_{N}$ to those of the parabolic
subgroups (like $H$) implicitly appears in the previous discussion, in
connection with the conditions $\left[ a,b\right] _{cm}$ and $\left[
a,b\right] _{rw}$.
\section{Dunkl operators}
Let $\kappa$ be a transcendental (formal parameter) and set $\mathbb{F}%
=\mathbb{Q}\left( \kappa\right) $. Consider the space $\mathcal{P}\otimes
V_{\tau}=\mathrm{span}_{\mathbb{F}}\left\{ x^{\alpha}v_{T}:\alpha
\in\mathbb{N}^{N},T\in Y\left( \tau\right) \right\} $, polynomials
$p\left( x\right) $ on $%
%TCIMACRO{\U{211d} }%
%BeginExpansion
\mathbb{R}
%EndExpansion
^{N}\ $with values in $V_{\tau}$. The space is an $\mathcal{S}_{N}$-module
with the action $w\left( x^{\alpha}v_{T}\right) =x^{w\alpha}\left(
wv_{T}\right) $ for $w\in\mathcal{S}_{N}$, extended to all of $\mathcal{P}%
\otimes V_{\tau}$ by linearity. For $p\in\mathcal{P}$ and $u\in V_{\tau}$ and
$1\leq i\leq N$ let%
\begin{equation}
\mathcal{D}_{i}\left( p\left( x\right) u\right) :=\frac{\partial}{\partial
x_{i}}p\left( x\right) u+\kappa\sum_{j=1,j\neq i}^{N}\frac{p\left(
x\right) -p\left( x\left( i,j\right) \right) }{x_{i}-x_{j}}\left(
i,j\right) u. \label{defdi}%
\end{equation}
The definition is extended to $\mathcal{P}\otimes V_{\tau}$ by linearity. Then
$\mathcal{D}_{i}\mathcal{D}_{j}=\mathcal{D}_{j}\mathcal{D}_{i}$ for $1\leq
i,j\leq N$. The proof is a straightforward adaptation of the original proof
for scalar polynomials $p\left( x\right) $ (see \cite[Ch.~4]{DX}). There are
important commutators (appearing in the definition of the rational Cherednik
algebra, the algebra generated by $\mathbb{F}\mathcal{S}_{N}$ and $\left\{
x_{i},\mathcal{D}_{i}:i\in\left[ 1,N\right] \right\} $):%
\begin{align}
\mathcal{D}_{i}x_{j}-x_{j}\mathcal{D}_{i} & =-\kappa\left( i,j\right)
,i\neq j\label{dxxd}\\
\mathcal{D}_{i}x_{i}-x_{i}\mathcal{D}_{i} & =1+\kappa\sum_{j\neq i}\left(
i,j\right) .\nonumber
\end{align}
\begin{definition}
The space $\mathcal{P}\otimes V_{\tau}$ equipped with the action of
$\mathbb{F}\mathcal{S}_{N}$ and $\left\{ x_{i},\mathcal{D}_{i}:i\in\left[
1,N\right] \right\} $ is a standard module of the rational Cherednik algebra
and is denoted by $M\left( \tau\right) $. For $n\in\mathbb{N}$ the linear
subspace $\mathcal{P}_{n}\otimes V_{\tau}$ is denoted by $M_{n}\left(
\tau\right) $.
\end{definition}
The representation theory of rational Cherednik algebras is described in the
survey \cite{R} by Rouquier. For $p\left( x\right) \in\mathcal{P}\otimes
V_{\tau}$ set
\[
\mathcal{U}_{i}p\left( x\right) =\mathcal{D}_{i}\left( x_{i}p\left(
x\right) \right) -\kappa\sum_{j=1}^{i-1}\left( i,j\right) p\left(
x\right) ,1\leq i\leq N.
\]
The operators $\mathcal{U}_{i}$ also commute pairwise. They have a
triangularity property (a special case of a result of Griffeth \cite{G} for
the complex reflection groups $G\left( r,p,N\right) $). There is an
important function on compositions:
\begin{definition}
For $\alpha\in\mathbb{N}^{N}$ and $1\leq i\leq N$ let%
\[
r\left( \alpha,i\right) :=\#\left\{ j:\alpha_{j}>\alpha_{i}\right\}
+\#\left\{ j:1\leq j\leq i,\alpha_{j}=\alpha_{i}\right\}
\]
be the rank function.
\end{definition}
A consequence of the definition is that $r\left( \alpha,i\right) \alpha_{j}$, or $\alpha
_{i}=\alpha_{j}$ and $i\alpha_{j}$ and $i\alpha_{j}$ then $$B_{ij}x^{\alpha}=x^{\alpha
}+\left( i,j\right) x^{\alpha}+\sum\limits_{s=1}^{\alpha_{i}-\alpha_{j}%
-1}x^{\alpha-s\left( \varepsilon\left( i\right) -\varepsilon\left(
j\right) \right) }$$
and
$\alpha^{+}\vartriangleright\left( \alpha-s\left(
\varepsilon\left( i\right) -\varepsilon\left( j\right) \right) \right)
^{+}$ for $1\leq s\leq\alpha_{i}-\alpha_{j}-1$;
\item if $\alpha
_{i}<\alpha_{j}$ then
$$B_{ij}x^{\alpha}=-\sum\limits_{s=1}^{\alpha_{j}%
-\alpha_{i}-1}x^{\alpha-s\left( \varepsilon\left( j\right) -\varepsilon
\left( i\right) \right) }$$
and
$\alpha^{+}\vartriangleright\left(
\alpha-s\left( \varepsilon\left( j\right) -\varepsilon\left( i\right)
\right) \right) ^{+}$ for $1\leq s\leq\alpha_{j}-\alpha_{i}-1$.
\end{enumerate}
\end{lemma}
The following proposition can be elegantly stated in terms of conjugates of
Jucys-Murphy elements. Recall the conjugation relation $w\left( i,j\right)
w^{-1}=\left( w\left( i\right) ,w\left( j\right) \right) $.
\begin{definition}
\label{w_a}For $\alpha\in\mathbb{N}^{N}$ and $1\leq i\leq N$ let $\omega
_{i}^{\alpha}:=w_{\alpha}\omega_{r\left( \alpha,i\right) }w_{\alpha}^{-1}$,
where $w_{\alpha}$ is the inverse of $r\left( \alpha,\cdot\right) $.
Equivalently $\omega_{i}^{\alpha}=\sum\limits_{r\left( \alpha,j\right)
>r\left( \alpha,i\right) }\left( i,j\right) $.
\end{definition}
To justify the second equation observe that%
\[
w_{\alpha}\omega_{r\left( \alpha,i\right) }w_{\alpha}^{-1}=\sum
\limits_{r\left( \alpha,i\right) \alpha_{j},$ (3) $-\left( \left(
i,j\right) x^{\alpha}+q_{\alpha}\right) $ if $\alpha_{j}>\alpha_{i}$, so
that $\left( i,j\right) \alpha\vartriangleleft\alpha$. In the case $i\alpha_{j}$, so
that $\left( i,j\right) \alpha\vartriangleleft\alpha$, (3) $q_{\alpha}$ if
$\alpha_{i}<\alpha_{j}$. Thus $\kappa x^{\alpha}\left( i,j\right) u$ appears
in $\mathcal{U}_{i}x^{\alpha}u$ exactly when $\alpha_{i}>\alpha_{j}$ or
$\alpha_{i}=\alpha_{j}$ and $j>i$, that is, $r\left( \alpha,j\right)
>r\left( \alpha,i\right) $.
\end{proof}
Following Griffeth we define an order on the pairs $\left\{ \left(
\alpha,u\right) :\alpha\in\mathbb{N}^{N}\right\} $: $\left( \alpha
,u_{1}\right) \vartriangleright\left( \beta,u_{2}\right) $ means that
$\alpha\vartriangleright\beta$. For this order the leading term of
$\mathcal{U}_{i}x^{\alpha}u$ is $x^{\alpha}\left( \alpha_{i}+1+\kappa
\omega_{i}^{\alpha}\right) u$.
\section{Nonsymmetric Jack polynomials}
This section presents the structure of the simultaneous eigenvectors of
$\left\{ \mathcal{U}_{i}:1\leq i\leq N\right\} $ in $M\left( \tau\right)
$. These are vector-valued generalizations of the nonsymmetric Jack
polynomials (see \cite[Ch.~8]{DX}). The operators $\mathcal{U}_{i}$ are
self-adjoint with respect to the contravariant form, which is described as follows:
The contravariant form $\left\langle \cdot,\cdot\right\rangle $ on $M\left(
\tau\right) $ is the canonical symmetric $\mathcal{S}_{N}$-invariant bilinear
form, extending the form $\left\langle \cdot,\cdot\right\rangle _{0}$ on
$V_{\tau}$, : such that%
\[
\left\langle x_{i}f,g\right\rangle =\left\langle f,\mathcal{D}_{i}%
g\right\rangle ,i\in\left[ 1,N\right] ,f,g\in M\left( \tau\right) .
\]
An existence proof can be based on the operator
$$\sum\limits_{i=1}^{N}%
x_{i}\mathcal{D}_{i}+\kappa\sum\limits_{1\leq iai1}Suppose $\alpha\in\mathbb{N}^{N}$ and $\alpha_{i}>\alpha_{i+1}$
for some $i-\frac{1}{N}$ and $\kappa<\frac{1}{N}$ are necessary and
sufficient, respectively. Otherwise let $h_{\tau}:=\tau_{1}+\ell\left(
\tau\right) -1$, the maximum hook-length of $\tau$, then $-\frac{1}{h_{\tau}%
}<\kappa<\frac{1}{h_{\tau}}$ implies $b_{i}\left( \alpha,T\right) ^{2}<1$
for all $i,\alpha,T$. Note that $1\leq i,j\leq N,T\in Y\left( \tau\right) $
implies $\left\vert c\left( i,T\right) -c\left( j,T\right) \right\vert
\leq$ $h_{\tau}-1$.
\end{remark}
Etingof, Stoica and Griffeth \cite[Thm.~5.5]{ESG} found the complete
description of the set of values of $\kappa$ for which $L_{\kappa}\left(
\tau\right) $ provides a unitary representation of the rational Cherednik
algebra. We can find an expression for $\left\Vert \zeta_{\alpha,T}\right\Vert
^{2}$ in terms of $\left\Vert \zeta_{\alpha^{+},T}\right\Vert ^{2}$, following
the approach used in \cite[Thm.~8.5.8]{DX}.
\begin{definition}
For $\alpha\in\mathbb{N}^{N},T\in Y\left( \tau\right) $ and $\varepsilon
=\pm$ let%
\[
\mathcal{E}_{\varepsilon}\left( \alpha,T\right) =\kern-3pt\prod
\limits_{\substack{1\leq i\alpha_{i}$ for some $i\in\left[ 1,N-1\right] $ then
$\mathcal{E}_{\varepsilon}\left( s_{i}\alpha,T\right) /\mathcal{E}%
_{\varepsilon}\left( \alpha,T\right) =1+\varepsilon b_{i}\left(
\alpha,T\right) $.
\end{proposition}
\begin{proof}
Using an argument similar to that of Lemma~\ref{gprod} we have%
\begin{align*}
& \frac{\mathcal{E}_{\varepsilon}\left( s_{i}\alpha,T\right) }%
{\mathcal{E}_{\varepsilon}\left( \alpha,T\right) }\\
& =1+\frac{\varepsilon\kappa}{\left( s_{i}\alpha\right) _{i+1}-\left(
s_{i}\alpha\right) _{i}+\kappa\left( c\left( r\left( s_{i}\alpha
,i+1\right) ,T\right) -c\left( r\left( s_{i}\alpha,i\right) ,T\right)
\right) }\\
& =1+\varepsilon b_{i}\left( \alpha,T\right) ,
\end{align*}
because $r\left( s_{i}\alpha,i+1\right) =r\left( \alpha,i\right) $ and
$r\left( s_{i}\alpha,i\right) =r\left( \alpha,i+1\right) .$
\end{proof}
\begin{corollary}
Suppose $\alpha\in\mathbb{N}^{N},T\in Y\left( \tau\right) $ then%
\[
\left\Vert \zeta_{\alpha,T}\right\Vert ^{2}=\mathcal{E}_{2}\left(
\alpha,T\right) ^{-1}\left\Vert \zeta_{\alpha^{+},T}\right\Vert ^{2}.
\]
\end{corollary}
\begin{proof}
Argue by induction on $\mathrm{inv}\left( \alpha\right) $. If the formula is
valid for some $\alpha$ with $\alpha_{i}>\alpha_{i+1}$ then by
Proposition~\ref{ai>ai1}%
\begin{align*}
\left\Vert \zeta_{s_{i}\alpha,T}\right\Vert ^{2} & =\left( 1-b_{i}\left(
\alpha,T\right) ^{2}\right) ^{-1}\left\Vert \zeta_{\alpha,T}\right\Vert
^{2}\\
& =\left( 1-b_{i}\left( \alpha,T\right) ^{2}\right) ^{-1}\mathcal{E}%
_{2}\left( \alpha,T\right) ^{-1}\left\Vert \zeta_{\alpha^{+},T}\right\Vert
^{2}\\
& =\mathcal{E}_{2}\left( s_{i}\alpha,T\right) ^{-1}\left\Vert \zeta
_{\alpha^{+},T}\right\Vert ^{2}.
\end{align*}
This completes the induction.
\end{proof}
Consider the case $\alpha_{i}=\alpha_{i+1}$ and let $I=r\left( \alpha
,i\right) $ so that $r\left( \alpha,i+1\right) =I+1$ and $b_{i}\left(
\alpha,T\right) =\left( c\left( I,T\right) -c\left( I+1,T\right)
\right) ^{-1}=b_{I}\left( T\right) $ (see Proposition~\ref{siv}).
Furthermore
$$s_{i}w_{\alpha}=w_{\alpha}\left( w_{\alpha}^{-1}\left(
i\right) ,w_{\alpha}^{-1}\left( i+1\right) \right) =w_{\alpha}\left(
I,I+1\right) =w_{\alpha}s_{I}.$$
The transformation properties depend on the
positions of $I$ and $I+1$ in $T$.
\begin{proposition}
\label{ai=ai1}Suppose $\alpha\in\mathbb{N}^{N},T\in Y\left( \tau\right) $
and $\alpha_{i}=\alpha_{i+1}$ for some $i\alpha_{i+1}$;
\item $\left( b_{I}\left( T\right) +1\right) \zeta_{\alpha,T}%
+\zeta_{\alpha,s_{I}T}$, for $\alpha_{i}=\alpha_{i+1},I=r\left(
\alpha,i\right) $ and $0\alpha_{i+1}$;
\item $\left( b_{I}\left( T\right) -1\right) \zeta_{\alpha,T}%
+\zeta_{\alpha,s_{I}T}$, for $\alpha_{i}=\alpha_{i+1},I=r\left(
\alpha,i\right) $ and $0\lambda_{b_{1}+1}$ and so forth). These
intervals depend on $\lambda$ but we will not incorporate this into the
notation. Let $\lambda^{R}=\left( \lambda_{N},\lambda_{N-1},\ldots
,\lambda_{1}\right) \in\mathbb{N}^{N}$, the reverse of $\lambda$. The
permutation $w_{\lambda^{R}}$ is defined by $\left( w_{\lambda^{R}}\right)
^{-1}\left( i\right) =r\left( \lambda^{R},i\right) ,i\in\left[
1,N\right] $ (Definition~\ref{w_a}).
\end{notation}
Generally $w_{\lambda^{R}}\neq w_{0}$ where $w_{0}$ is the longest permutation
given by $w_{0}\left( i\right) =N+1-i$ (example: $\lambda=\left(
3,2,2,1\right) $ then $\left[ w_{\lambda^{R}}\left( i\right) \right]
_{i=1}^{4}=\left[ 4,2,3,1\right] $). The composition $\lambda^{R}$ is the
unique minimum for the order \textquotedblleft$\succ$\textquotedblright\ on
$\left\{ \alpha:\alpha^{+}=\lambda\right\} $. For $\alpha^{+}=\lambda$ and
$T\in Y\left( \tau\right) $ the leading term of $\zeta_{\alpha,T}$ is
$x^{\alpha}w_{\alpha}v_{T}$ (where $w_{\alpha}^{-1}\left( i\right) =r\left(
\alpha,i\right) $) and the minimality of $\lambda^{R}$ implies that the
expansion of $\zeta_{\lambda^{R},T}$ has no term of the form $x^{\alpha}u$
with $u\in V_{\tau}$ when $\alpha\neq\lambda^{R},\alpha^{+}=\lambda$. From the
expressions in (2) above we see that the subgroup $W_{\lambda}$ is an
important part of the analysis. The formulae developed in Section 2 will be
used. The method is an analog of the one for scalar nonsymmetric Jack
polynomials, introduced by Baker and Forrester \cite{BF}.
\begin{definition}
For $\lambda\in\mathbb{N}^{N,+}$ and $T\in Y\left( \tau\right) $ define the
tableau $\left\lfloor \lambda,T\right\rfloor $ to be the assignment of
$\lambda_{1},\lambda_{2},\ldots,\lambda_{N}$ to the nodes of the Ferrers
diagram of $\tau$ so that the entry at $T\left( i\right) $ is $\lambda
_{i},i\in\left[ 1,N\right] $. Thus the entries of $\left\lfloor
\lambda,T\right\rfloor $ are weakly increasing ($\leq$) in each row and in
each column. The set of $T^{\prime}$ satisfying $\left\lfloor \lambda
,T^{\prime}\right\rfloor =\left\lfloor \lambda,T\right\rfloor $ is exactly
$Y\left( T;W_{\lambda}\right) $.
\end{definition}
\subsection{Case: $\#Y\left( T;W_{\lambda}\right) =1$}
We begin with the situation of symmetrizing $\zeta_{\lambda,T}$ when
$\#Y\left( T;W_{\lambda}\right) =1$, that is, $v_{T}$ is $W_{\lambda}%
$-invariant so that each interval $\left[ a_{i},b_{i}\right] $ is contained
in a row of $T$,( $1\leq i\leq n$ ($\mathrm{rw}\left( j,T\right)
=\mathrm{rw}\left( b_{i},T\right) $ for~1$\leq i\leq n$ and $a_{i}\leq j\leq
b_{i}$). Then $\sum_{w\in\mathcal{S}_{N}}w\zeta_{\lambda,T}=\sum_{\alpha
^{+}=\lambda}A_{\alpha}\zeta_{\alpha,T}$ with coefficients to be determined.
\begin{theorem}
\label{symz}Suppose $\lambda\in\mathbb{N}^{N,+}$ and $T\in Y\left(
\tau\right) $ such that $w\in W_{\lambda}$ implies $wv_{T}=v_{T}$ then the
polynomial $f_{\lambda,T}$ defined by%
\[
f_{\lambda,T}^{s}=\sum_{\alpha^{+}=\lambda}\mathcal{E}_{-}\left(
\alpha,T\right) \zeta_{\alpha,T},
\]
is $\mathcal{S}_{N}$-invariant and
\[
\left\Vert f_{\lambda,T}^{s}\right\Vert ^{2}=\frac{N!}{\#W_{\lambda}}\frac
{1}{\mathcal{E}_{+}\left( \lambda^{R},T\right) }\left\Vert \zeta_{\lambda
,T}\right\Vert ^{2}.
\]
\end{theorem}
\begin{proof}
Fix $i\in\left[ 1,N-1\right] $ and let%
\begin{align*}
A & =\left\{ \alpha:\alpha^{+}=\lambda,\alpha_{i}=\alpha_{i+1}\right\} ,\\
B & =\left\{ \alpha:\alpha^{+}=\lambda,\alpha_{i}>\alpha_{i+1}\right\} .
\end{align*}
Write%
\[
f_{\lambda,T}^{s}=\sum_{\alpha\in A}\mathcal{E}_{-}\left( \alpha,T\right)
\zeta_{\alpha,T}+\sum_{\alpha\in B}\left( \mathcal{E}_{-}\left(
\alpha,T\right) \zeta_{\alpha,T}+\mathcal{E}_{-}\left( s_{i}\alpha,T\right)
\zeta_{s_{i}\alpha,T}\right) .
\]
Suppose $\alpha\in A$ then $r\left( i+1,\alpha\right) =r\left(
i,\alpha\right) +1$ thus the values $r\left( i,\alpha\right) $ and
$r\left( i+1,\alpha\right) $ belong to some interval $\left[ a_{j}%
,b_{j}\right] $ (where $\mathcal{S}_{\left[ a_{j},b_{j}\right] }$ is a
factor of $W_{\lambda}$) and are adjacent entries in some row of $T$, hence
$s_{i}\zeta_{\alpha,T}=\zeta_{\alpha,T}$. Next let $\alpha\in B$ then the
corresponding term in the sum is $\mathcal{E}_{-}\left( \alpha,T\right)
\left( \zeta_{\alpha,T}+\frac{\mathcal{E}_{-}\left( s_{i}\alpha,T\right)
}{\mathcal{E}_{-}\left( \alpha,T\right) }\zeta_{s_{i}\alpha,T}\right) $.
Using the techniques of Lemma~\ref{gprod} we find that%
\begin{align*}
& \dfrac{\mathcal{E}_{-}\left( s_{i}\alpha,T\right) }{\mathcal{E}%
_{-}\left( \alpha,T\right) }\\
& =1-\frac{\kappa}{\left( s_{i}\alpha\right) _{i+1}-\left( s_{i}%
\alpha\right) _{i}+\kappa\left( c\left( r\left( s_{i}\alpha,i+1\right)
,T\right) -c\left( r\left( s_{i}\alpha,i\right) ,T\right) \right) }\\
& =1-\frac{\kappa}{\alpha_{i}-\alpha_{i+1}+\kappa\left( c\left( r\left(
\alpha,i\right) ,T\right) -c\left( r\left( \alpha,i+1\right) ,T\right)
\right) }\\
&=1-b_{i}\left( \alpha,T\right) ,
\end{align*}
and thus the term for $\alpha$ in the sum over $B$ is $s_{i}$-invariant.
Consider $g=\sum_{w\in\mathcal{S}_{N}}w\zeta_{\lambda^{R},T}$; since $g$ is
$\mathcal{S}_{N}$-invariant it must equal a constant multiple $\gamma$ of
$f_{\lambda,T}^{s}$. To find $\gamma$ consider the coefficients of
$x^{\lambda}v_{T}$ in $f_{\lambda,T}^{s}$ and $g$. The leading term of
$\zeta_{\lambda^{R},T}$ is $x^{\lambda^{R}}w_{\lambda^{R}}\left(
v_{T}\right) .$ The coefficient in $f_{\lambda,T}^{s}$ is $1$ (by definition
of $\zeta_{\lambda,T}$). The term $x^{\lambda}v_{T}$ appears in $w\zeta
_{\lambda^{R},T}$ with coefficient $1$ exactly when $w=w_{1}w_{\lambda^{R}%
}^{-1}$ for $w_{1}\in W_{\lambda}$. Thus $g=\left( \#W_{\lambda}\right)
f_{\lambda,T}^{s}$ and%
\begin{align*}
\left\Vert f_{\lambda,T}^{s}\right\Vert ^{2} & =\frac{1}{\#W_{\lambda}%
}\left\langle g,f_{\lambda,T}^{s}\right\rangle =\frac{1}{\#W_{\lambda}}%
\sum_{w\in\mathcal{S}_{N}}\left\langle w\zeta_{\lambda^{R},T},f_{\lambda
,T}^{s}\right\rangle \\
& =\frac{N!}{\#W_{\lambda}}\left\langle \zeta_{\lambda^{R},T},f_{\lambda
,T}^{s}\right\rangle =\frac{N!}{\#W_{\lambda}}\mathcal{E}_{-}\left(
\lambda^{R},T\right) \left\Vert \zeta_{\lambda^{R},T}\right\Vert ^{2}\\
& =\frac{N!\mathcal{E}_{-}\left( \lambda^{R},T\right) }{\left(
\#W_{\lambda}\right) \mathcal{E}_{2}\left( \lambda^{R},T\right) }\left\Vert
\zeta_{\lambda,T}\right\Vert ^{2}.
\end{align*}
This completes the proof.
\end{proof}
Continuing with the case $\#Y\left( T;W_{\lambda}\right) =1$ we turn to the
corresponding antisymmetric function involving $\zeta_{\lambda,T}$ such that
$v_{T}$ is antisymmetric for $W_{\lambda}$. That is each interval $\left[
a_{i},b_{i}\right] $ (appearing in $W_{\lambda}$) is contained in a column of
$T$, $a_{i}\leq j\leq b_{i}$ implies $\mathrm{cm}\left( j,T\right)
=\mathrm{cm}\left( b_{i},T\right) $). The number of inversions
$\mathrm{inv}\left( \alpha\right) $ takes the place of the sign of a
permutation in order to allow $\lambda$ to have some repeated values.
\begin{theorem}
\label{skewz}Suppose $\lambda\in\mathbb{N}^{N,+}$ and $T\in Y\left(
\tau\right) $ such that $s_{i}\in W_{\lambda}$ implies $s_{i}v_{T}=-v_{T}$
then the polynomial $f_{\lambda,T}^{a}$ defined by%
\[
f_{\lambda,T}^{a}=\sum_{\alpha^{+}=\lambda}\left( -1\right) ^{\mathrm{inv}%
\left( \alpha\right) }\mathcal{E}_{+}\left( \alpha,T\right) \zeta
_{\alpha,T},
\]
is $\mathcal{S}_{N}$-alternating, and%
\[
\left\Vert f_{\lambda,T}^{a}\right\Vert ^{2}=\frac{N!}{\#W_{\lambda}}\frac
{1}{\mathcal{E}_{-}\left( \lambda^{R},T\right) }\left\Vert \zeta_{\lambda
,T}\right\Vert ^{2}.
\]
\end{theorem}
\begin{proof}
Fix $i\in\left[ 1,N-1\right] $ and let%
\begin{align*}
A & =\left\{ \alpha:\alpha^{+}=\lambda,\alpha_{i}=\alpha_{i+1}\right\} ,\\
B & =\left\{ \alpha:\alpha^{+}=\lambda,\alpha_{i}>\alpha_{i+1}\right\} .
\end{align*}
Note $\alpha\in B$ implies $\mathrm{inv}\left( s_{i}\alpha\right)
=\mathrm{inv}\left( \alpha\right) +1$. Write%
\begin{multline*}
f_{\lambda,T}^{a} =\sum_{\alpha\in A}\left( -1\right) ^{\mathrm{inv}%
\left( \alpha\right) }\mathcal{E}_{+}\left( \alpha,T\right) \zeta
_{\alpha,T}\\
+\sum_{\alpha\in B}\left( -1\right) ^{\mathrm{inv}\left( \alpha\right)
}\left( \mathcal{E}_{+}\left( \alpha,T\right) \zeta_{\alpha,T}%
-\mathcal{E}_{+}\left( s_{i}\alpha,T\right) \zeta_{s_{i}\alpha,T}\right) .
\end{multline*}
Suppose $\alpha\in A$ then $r\left( i+1,\alpha\right) =r\left(
i,\alpha\right) +1$ thus the values $r\left( i,\alpha\right) $ and
$r\left( i+1,\alpha\right) $ belong to some interval $\left[ a_{j}%
,b_{j}\right] $ (where $\mathcal{S}_{\left[ a_{j},b_{j}\right] }$ is a
factor of $W_{\lambda}$) and are adjacent entries in some column of $T$, hence
$s_{i}\zeta_{\alpha,T}=-\zeta_{\alpha,T}$. Next let $\alpha\in B$ then the
corresponding term in the sum is $\left( -1\right) ^{\mathrm{inv}\left(
\alpha\right) }\mathcal{E}_{+}\left( \alpha,T\right) \left( \zeta
_{\alpha,T}-\frac{\mathcal{E}_{+}\left( s_{i}\alpha,T\right) }%
{\mathcal{E}_{+}\left( \alpha,T\right) }\zeta_{s_{i}\alpha,T}\right) $, a
scalar multiple of $\zeta_{\alpha,T}-\left( 1+b_{i}\left( \alpha,T\right)
\right) \zeta_{s_{i}\alpha,T}$, by an argument similar to the previous
theorem. This term satisfies $s_{i}f=-f$. Thus $s_{i}f_{\lambda,T}%
^{a}=-f_{\lambda,T}^{a}$. Consider $g=\sum_{w\in\mathcal{S}_{N}}%
\mathrm{sgn}\left( w\right) w\zeta_{\lambda^{R},T}$; since $g$ is
$\mathcal{S}_{N}$-alternating it must equal a constant multiple $\gamma$ of
$f_{\lambda,T}^{a}.$ To find $\gamma$ consider the coefficients of
$x^{\lambda}v_{T}$ in $f_{\lambda,T}^{a}$ and $g$. The coefficient in
$f_{\lambda,T}^{a}$ is $1$ (by definition of $\zeta_{\lambda,T}$). The term
$x^{\lambda}v_{T}$ appears in $w\zeta_{\lambda^{R},T}$ exactly when
$w=w_{1}w_{\lambda^{R}}^{-1}$ for $w_{1}\in W_{\lambda}$. Let $\varepsilon
=\mathrm{sgn}\left( w_{\lambda^{R}}\right) =\left( -1\right)
^{\mathrm{inv}\left( \lambda^{R}\right) }$, because the length of
$w_{\lambda^{R}}$ is $\mathrm{inv}\left( \lambda^{R}\right) $). Furthermore%
\begin{align*}
\mathrm{sgn}\left( w_{1}w_{\lambda^{R}}^{-1}\right) w_{1}w_{\lambda^{R}%
}^{-1}\zeta_{\lambda^{R},T} & =\mathrm{sgn}\left( w_{1}w_{\lambda^{R}}%
^{-1}\right) w_{1}w_{\lambda^{R}}^{-1}\left( x^{\lambda^{R}}w_{\lambda^{R}%
}v_{T}\right) +h_{1}\\
& =\varepsilon~\mathrm{sgn}\left( w_{1}\right) w_{1}\left( x^{\lambda
}v_{T}\right) +h_{2}\\
& =\varepsilon x^{\lambda}v_{T}+h_{2},%
\end{align*}
where $h_{1}$ and $h_{2}$ are terms of lower order, that is, of the form
$\sum_{\beta\vartriangleleft\lambda}x^{\beta}u_{\beta}$ with $u_{\beta}\in
V_{\tau}$. Thus $g=\varepsilon\left( \#W_{\lambda}\right) f_{\lambda,T}$ and%
\begin{align*}
\left\Vert f_{\lambda,T}^{a}\right\Vert ^{2} & =\frac{\varepsilon
}{\#W_{\lambda}}\left\langle g,f_{\lambda,T}^{a}\right\rangle =\frac
{\varepsilon}{\#W_{\lambda}}\sum_{w\in\mathcal{S}_{N}}\mathrm{sgn}\left(
w\right) \left\langle w\zeta_{\lambda^{R},T},f_{\lambda,T}^{a}\right\rangle
\\
& =\frac{\varepsilon}{\#W_{\lambda}}\sum_{w\in\mathcal{S}_{N}}\mathrm{sgn}%
\left( w\right) \left\langle \zeta_{\lambda^{R},T},w^{-1}f_{\lambda,T}%
^{a}\right\rangle =\frac{\varepsilon N!}{\#W_{\lambda}}\left\langle
\zeta_{\lambda^{R},T},f_{\lambda,T}^{a}\right\rangle \\
& =\frac{\varepsilon N!}{\#W_{\lambda}}\left( -1\right) ^{\mathrm{inv}%
\left( \lambda^{R}\right) }\mathcal{E}_{+}\left( \lambda^{R},T\right)
\left\Vert \zeta_{\lambda^{R},T}\right\Vert ^{2}\\
& =\frac{N!\mathcal{E}_{+}\left( \lambda^{R},T\right) }{\left(
\#W_{\lambda}\right) \mathcal{E}_{2}\left( \lambda^{R},T\right) }\left\Vert
\zeta_{\lambda,T}\right\Vert ^{2}.
\end{align*}
This completes the proof.
\end{proof}
\subsection{Case: $\#Y\left( T;W_{\lambda}\right) >1$}
Let $T_{0}\in Y\left( \tau\right) $ such that $T_{0}$ satisfies condition
$\left[ a_{i},b_{i}\right] _{\mathrm{cm}}$ for each factor $\mathcal{S}%
_{\left[ a_{i},b_{i}\right] }$ of $W_{\lambda}$ and $a_{i}\leq j_{1}%
\mathrm{cm}\left( j_{2},T_{0}\right) $. This condition is equivalent to the
tableau $\left\lfloor \lambda,T_{0}\right\rfloor $ being column-strict (the
entries strictly increase in each column, see \cite[p.~5]{Md}, such tableaux
are also called semistandard Young tableaux) and $T_{0}$ has a certain
extremal property among all $T\in Y\left( T_{0};W_{\lambda}\right) $. Let%
\begin{align*}
f_{\lambda,T_{0}}^{s} & =\sum_{\alpha^{+}=\lambda}\sum_{T\in Y\left(
T_{0};W_{\lambda}\right) }\prod\limits_{j=1}^{n}P_{0}\left( T;a_{j}%
,b_{j}\right) \mathcal{E}_{-}\left( \alpha,T\right) \zeta_{\alpha,T},\\
u_{\lambda,T_{0}} & =\sum_{T\in Y\left( T_{0};W_{\lambda}\right) }%
\prod\limits_{j=1}^{n}P_{0}\left( T;a_{j},b_{j}\right) v_{T}\in V_{\tau}.
\end{align*}
The term involving $x^{\lambda}$ is $h_{0}=\sum\limits_{T\in Y\left(
T_{0};W_{\lambda}\right) }\prod\limits_{j=1}^{n}P_{0}\left( T;a_{j}%
,b_{j}\right) \zeta_{\lambda,T}$, thus the leading term in $f_{\lambda,T_{0}%
}^{s}$ is $x^{\lambda}u_{\lambda,T_{0}}$. From the transformation rules in
Proposition~\ref{ai=ai1} it follows that $\left\Vert h_{0}\right\Vert
^{2}=\left\Vert \zeta_{\lambda,T_{0}}\right\Vert ^{2}\left\Vert u_{\lambda
,T_{0}}\right\Vert _{0}^{2}/\left\Vert v_{T_{0}}\right\Vert _{0}^{2}$ (see
Corollary~\ref{svnorm}). Also $h_{0}$ is $W_{\lambda}$-invariant. In the
symbol $f_{\lambda,T_{0}}^{a}$ one could replace $T_{0}$ by any $T\in Y\left(
T_{0};W_{\lambda}\right) $; then $\prod_{j=1}^{n}P_{0}\left( T;a_{j}%
,b_{j}\right) =1$ and $T\in Y\left( T_{0};W_{\lambda}\right) $ implies
$T=T_{0}$.
\begin{theorem}
\label{fsT0}$wf_{\lambda,T_{0}}^{s}=f_{\lambda,T_{0}}^{s}$ for all
$w\in\mathcal{S}_{N}$ and%
\[
\left\Vert f_{\lambda,T_{0}}^{s}\right\Vert ^{2}=\frac{N!}{\#W_{\lambda}}%
\frac{\left\Vert u_{\lambda,T_{0}}\right\Vert _{0}^{2}}{\mathcal{E}_{+}\left(
\lambda^{R},T_{0}\right) \left\Vert v_{T_{0}}\right\Vert _{0}^{2}}\left\Vert
\zeta_{\lambda,T_{0}}\right\Vert ^{2}.
\]
\end{theorem}
\begin{proof}
Let $\mathcal{F}\left( \alpha,T\right) =\prod_{j=1}^{n}P_{0}\left(
T;a_{j},b_{j}\right) \mathcal{E}_{-}\left( \alpha,T\right) $. Fix
$i\in\left[ 1,N-1\right] $ and collect the terms of $f_{\lambda,T_{0}}^{s}$
into three parts. Let
\begin{align*}
L & =\left\{ \left( \alpha,T\right) :\alpha^{+}=\lambda,T\in Y\left(
T_{0};W_{\lambda}\right) \right\} \\
A & =\left\{ \left( \alpha,T\right) \in L:\alpha_{i}=\alpha
_{i+1},\mathrm{rw}\left( r\left( \alpha,i\right) ,T\right) =\mathrm{rw}%
\left( r\left( \alpha,i\right) +1,T\right) \right\} ,\\
B & =\left\{ \left( \alpha,T\right) \in L:\alpha_{i}>\alpha
_{i+1}\right\} ,\\
C & =\left\{ \left( \alpha,T\right) \in L:\alpha_{i}=\alpha
_{i+1},\mathrm{rw}\left( r\left( \alpha,i\right) ,T\right) <\mathrm{rw}%
\left( r\left( \alpha,i\right) +1,T\right) \right\} .
\end{align*}
The first part is $\sum_{\left( \alpha,T\right) \in A}\mathcal{F}\left(
\alpha,T\right) \zeta_{\alpha,T}$ and in this case $s_{i}\zeta_{\alpha
,T}=\zeta_{\alpha,T}$. The second part is%
\begin{multline*}
\sum_{\left( \alpha,T\right) \in B}\left( \mathcal{F}\left(
\alpha,T\right) \zeta_{\alpha,T}+\mathcal{F}\left( s_{i}\alpha,T\right)
\zeta_{s_{i}\alpha,T}\right) \\
=\sum_{\left( \alpha,T\right) \in B}\mathcal{F}\left( \alpha,T\right)
\left( \zeta_{\alpha,T}+\frac{\mathcal{F}\left( s_{i}\alpha,T\right)
}{\mathcal{F}\left( \alpha,T\right) }\zeta_{s_{i}\alpha,T}\right) .
\end{multline*}
Just as in Proposition~\ref{vsym} $\frac{\mathcal{F}\left( s_{i}%
\alpha,T\right) }{\mathcal{F}\left( \alpha,T\right) }=1-b_{i}\left(
\alpha,T\right) $, and hence this sum is $s_{i}$-invariant. For use in $C$
let $I\left( \alpha\right) =\mathrm{rw}\left( \alpha,i\right) $. Then the
third part is%
\begin{multline*}
\sum_{\left( \alpha,T\right) \in C}\left( \mathcal{F}\left(
\alpha,T\right) \zeta_{\alpha,T}+\mathcal{F}\left( \alpha,s_{I\left(
\alpha\right) }T\right) \zeta_{\alpha,s_{I\left( \alpha\right) }T}\right)
\\
=\sum_{\left( \alpha,T\right) \in C}\mathcal{F}\left( \alpha,s_{I\left(
\alpha\right) }T\right) \left( \frac{\mathcal{F}\left( \alpha,T\right)
}{\mathcal{F}\left( \alpha,s_{I\left( \alpha\right) }T\right) }%
\zeta_{\alpha,T}+\zeta_{\alpha,s_{I\left( \alpha\right) }T}\right) .
\end{multline*}
To show that each term is $s_{i}$-invariant we must show
$$\frac{\mathcal{F}%
\left( \alpha,T\right) }{\mathcal{F}\left( \alpha,s_{I\left(
\alpha\right) }T\right) }=b_{I\left( \alpha\right) }+1.$$ Fix such a term.
The equality $\alpha_{i}=\alpha_{i+1}$ implies $\left[ I\left(
\alpha\right) ,I\left( \alpha\right) +1\right] \subset\left[ a_{i}%
,b_{i}\right] $ for some $i$. Thus%
\[
\frac{\prod_{j=1}^{n}P_{0}\left( T;a_{j},b_{j}\right) }{\prod_{j=1}^{n}%
P_{0}\left( s_{I\left( \alpha\right) }T;a_{j},b_{j}\right) }=\frac
{P_{0}\left( T;a_{i},b_{i}\right) }{P_{0}\left( s_{I\left( \alpha\right)
}T;a_{i},b_{i}\right) }=1+b_{I\left( \alpha\right) }\left( T\right) .
\]
Finally consider $\mathcal{E}_{-}\left( \alpha,T\right) /\mathcal{E}%
_{-}\left( \alpha,s_{I\left( \alpha\right) }T\right) $; let%
$$g_{lj}\left( T\right) =1-\dfrac{\kappa}{\alpha_{j}-\alpha_{l}+\kappa\left(
c\left( r\left( \alpha,j\right) ,T\right) -c\left( r\left(
\alpha,l\right) ,T\right) \right) }$$ if $l\alpha_{i+1}\right\}
,\\
C & =\left\{ \left( \alpha,T\right) \in L:\alpha_{i}=\alpha_{i+1}%
,\mathrm{cm}\left( r\left( \alpha,i\right) ,T\right) <\mathrm{cm}\left(
r\left( \alpha,i\right) +1,T\right) \right\} .
\end{align*}
\end{proof}
The proof that each of the following satisfies $s_{i}f=-f$ is analogous to the
proof of the previous theorem:%
\begin{align*}
& \sum_{\left( \alpha,T\right) \in A}\left( -1\right) ^{\mathrm{inv}%
\left( \alpha\right) }\mathcal{F}\left( \alpha,T\right) \zeta_{\alpha
,T},\\
& \sum_{\left( \alpha,T\right) \in B}\left( -1\right) ^{\mathrm{inv}%
\left( \alpha\right) }\left( \mathcal{F}\left( \alpha,T\right)
\zeta_{\alpha,T}-\mathcal{F}\left( s_{i}\alpha,T\right) \zeta_{s_{i}%
\alpha,T}\right) \\
& =\sum_{\left( \alpha,T\right) \in B}\left( -1\right) ^{\mathrm{inv}%
\left( \alpha\right) }\mathcal{F}\left( \alpha,T\right) \left(
\zeta_{\alpha,T}-\frac{\mathcal{F}\left( s_{i}\alpha,T\right) }%
{\mathcal{F}\left( \alpha,T\right) }\zeta_{s_{i}\alpha,T}\right) ,\\
& \sum_{\left( \alpha,T\right) \in C}\left( -1\right) ^{\mathrm{inv}%
\left( \alpha\right) }\left( \mathcal{F}\left( \alpha,T\right)
\zeta_{\alpha,T}+\mathcal{F}\left( \alpha,s_{I\left( \alpha\right)
}T\right) \zeta_{\alpha,s_{I\left( \alpha\right) }T}\right) \\
& =\sum_{\left( \alpha,T\right) \in C}\left( -1\right) ^{\mathrm{inv}%
\left( \alpha\right) }\mathcal{F}\left( \alpha,s_{I\left( \alpha\right)
}T\right) \left( \frac{\mathcal{F}\left( \alpha,T\right) }{\mathcal{F}%
\left( \alpha,s_{I\left( \alpha\right) }T\right) }\zeta_{\alpha,T}%
+\zeta_{\alpha,s_{I\left( \alpha\right) }T}\right) .
\end{align*}
In the second equation$\frac{\mathcal{F}\left( s_{i}\alpha,T\right)
}{\mathcal{F}\left( \alpha,T\right) }=1+b_{i}\left( \alpha,T\right) $. In
the third equation $I=r\left( \alpha,i\right) $ and $\frac{\mathcal{F}%
\left( \alpha,T\right) }{\mathcal{F}\left( \alpha,s_{I\left(
\alpha\right) }T\right) }=b_{I\left( \alpha\right) }-1$. The proof for the
norm formula is also analogous, based on $\sum_{w\in\mathcal{S}_{N}}wh_{R}$
where
$$h_{R}=\sum\limits_{T\in Y\left( T_{0};W_{\lambda}\right)
}\prod\limits_{j=1}^{n}P_{1}\left( T;a_{j},b_{j}\right) \zeta_{\lambda
^{R},T}.$$
Note $\mathrm{sgn}\left( w_{\lambda^{R}}\right) =\left(
-1\right) ^{\mathrm{inv}\left( \lambda^{R}\right) }$.
\begin{remark}
The polynomials in Theorems~\ref{fsT0} and \ref{faT0} form orthogonal bases
for the symmetric and antisymmetric polynomials, respectively, in $M\left(
\tau\right) $.
\end{remark}
\subsection{Minimum degree polynomials}
For a given partition $\tau$ of $N$ there are unique symmetric and
antisymmetric polynomials of minimum degree in the standard module
$\mathcal{M}\left( \tau\right) $. We now establish the key results
concerning the norms of these polynomials. It is obvious that the
column-strict tableau $\left\lfloor \lambda,T\right\rfloor $ with minimum
$\left\vert \lambda\right\vert $ has the entries $0$ in row \#1, $1$ in row
\#2 and so on (consider the minimum entries in each column). Denote this
partition by $\delta^{s}\left( \tau\right) $ and the unique possible $T$ by
$T^{s}$ (the entries $N,N-1,\ldots,2,1$ are entered row-by-row in the Ferrers
diagram of $\tau$). Example: let $\tau=\left( 5,3,2\right) $ then
\[
T^{s}=%
\begin{array}
[c]{ccccc}%
10 & 9 & 8 & 7 & 6\\
5 & 4 & 3 & & \\
2 & 1 & & &
\end{array}
,\left\lfloor \delta^{s}\left( \tau\right) ,T^{s}\right\rfloor =%
\begin{array}
[c]{ccccc}%
0 & 0 & 0 & 0 & 0\\
1 & 1 & 1 & & \\
2 & 2 & & &
\end{array}
,
\]
and $\delta^{s}\left( \tau\right) =\left( 2,2,1,1,1,0,0,0,0,0\right) $.
Similarly the row-strict tableau $\left\lfloor \lambda,T\right\rfloor $ with
minimum $\left\vert \lambda\right\vert $ has the entries $0$ in column \#1,
$1$ in column \#2 and so on (consider the minimum entries in each row). Denote
this partition by $\delta^{a}\left( \tau\right) $ and the unique possible
$T$ by $T^{a}$ (the entries $N,N-1,\ldots,2,1$ are entered column-by-column in
the Ferrers diagram of $\tau$). Example: let $\tau=\left( 5,3,2\right) $
then%
\[
T^{a}=%
\begin{array}
[c]{ccccc}%
10 & 7 & 4 & 2 & 1\\
9 & 6 & 3 & & \\
8 & 5 & & &
\end{array}
,\left\lfloor \delta^{a}\left( \tau\right) ,T^{a}\right\rfloor =%
\begin{array}
[c]{ccccc}%
0 & 1 & 2 & 3 & 4\\
0 & 1 & 2 & & \\
0 & 1 & & &
\end{array}
,
\]
and $\delta^{a}\left( \tau\right) =\left( 4,3,2,2,1,1,1,0,0,0\right) $.
The sum of the hook-lengths of $\tau$ equals $\left\vert \delta^{s}\left(
\tau\right) \right\vert +\left\vert \delta^{a}\left( \tau\right)
\right\vert +N$ (see \cite[Ex.~2, p.~11]{Md}).
Let $f_{\tau}^{s}=f_{\delta^{s}\left( \tau\right) ,T^{s}}^{s}$ and $f_{\tau
}^{a}=f_{\delta^{a}\left( \tau\right) ,T^{a}}^{a}$. These polynomials are
actually independent of $\kappa$; there is no composition $\alpha$ such that
$\alpha\vartriangleleft\delta^{s}\left( \tau\right) $ and $\alpha^{+}%
\neq\delta^{s}\left( \tau\right) $ which can occur in a symmetric
polynomial, due to the minimality of $\delta^{s}\left( \tau\right) $. A
similar argument applies to $\delta^{a}\left( \tau\right) $. To compute the
norms $\left\Vert f_{\tau}^{s}\right\Vert ^{2}$ and $\left\Vert f_{\tau}%
^{a}\right\Vert ^{2}$ we use the special properties of $\delta^{s}\left(
\tau\right) $ to write simplified formulae. To use the formulae in Theorems~\ref{normz} and \ref{symz} note that $\delta_{a}\left( \tau\right) _{j}=i-1$
when $j$ appears in row $\#i$ of $T^{s}$, and the corresponding contents of
$T^{s}$ are $1-i,\ldots,\tau_{i}-i$. Let $L=\ell\left( \tau\right) $ and%
\begin{align*}
P_{1}\left( \tau\right) & =\prod_{i=2}^{L}\prod_{j=1}^{\tau_{i}}\left(
1+\kappa\left( j-i\right) \right) _{i-1},\\
P_{2}\left( \tau\right) & =\prod_{1\leq iai1}
\begin{proposition}
Suppose $\alpha\in\mathbb{N}^{N}$ and $\alpha_{i}>\alpha_{i+1}$ for some
$i\lambda_{m+1}$. Let%
\begin{align*}
\beta & =\left( \lambda_{1},\ldots,\lambda_{m-1},\lambda_{m+1}%
,\ldots,\lambda_{N},\lambda_{1}\right) ,\\
\alpha & =\left( \lambda_{1}-1,\lambda_{1},\ldots,\lambda_{m-1}%
,\lambda_{m+1},\ldots,\lambda_{N}\right) ,\\
\mu & =\left( \lambda_{1},\ldots,\lambda_{m-1},\lambda_{1}-1,\lambda
_{m+1},\ldots,\lambda_{N}\right) .
\end{align*}
Thus $\beta=\phi\left( \alpha\right) $ and%
\begin{align*}
\left\Vert \zeta_{\beta,T}\right\Vert ^{2} & =\left( \lambda_{1}+\kappa
c\left( m,T\right) \right) \left\Vert \zeta_{\alpha,T}\right\Vert ^{2}\\
& =\left( \lambda_{1}+\kappa c\left( m,T\right) \right) \mathcal{E}%
_{2}\left( \alpha,T\right) ^{-1}\left\Vert \zeta_{\mu,T}\right\Vert ^{2},\\
\left\Vert \zeta_{\lambda,T}\right\Vert ^{2} & =\mathcal{E}_{2}\left(
\beta,T\right) \left\Vert \zeta_{\beta,T}\right\Vert ^{2}.
\end{align*}
We have%
\begin{align*}
\mathcal{E}_{\varepsilon}\left( \alpha,T\right) & =\prod\limits_{j=2}%
^{m}\left( 1+\frac{\varepsilon\kappa}{1+\kappa\left( c\left( j-1,T\right)
-c\left( m,T\right) \right) }\right) ,\\
\mathcal{E}_{\varepsilon}\left( \beta,T\right) & =\prod\limits_{j=m+1}%
^{N}\left( 1+\frac{\varepsilon\kappa}{\lambda_{1}-\lambda_{j}+\kappa\left(
c\left( m,T\right) -c\left( j,T\right) \right) }\right) .
\end{align*}
The validity of the formula for $\left\Vert \zeta_{\mu,T}\right\Vert ^{2}$
thus implies the validity for $\left\Vert \zeta_{\lambda,T}\right\Vert ^{2}$
(that is, the value of $\left\Vert \zeta_{\lambda,T}\right\Vert ^{2}%
/\left\Vert \zeta_{\mu,T}\right\Vert ^{2}$ from the formula agrees with
$\left( \lambda_{1}+\kappa c\left( m,T\right) \right) \dfrac
{\mathcal{E}_{2}\left( \beta,T\right) }{\mathcal{E}_{2}\left(
\alpha,T\right) }$).
\end{proof}
\begin{thebibliography}{99} %
\bibitem {BF}T. Baker and P. Forrester, Symmetric Jack polynomials from
non-symmetric theory, \textit{Ann. Comb.} \textbf{3} (1999), 159--170.
\bibitem {BE}R. Bezrukavnikov and P. Etingof, Parabolic induction and
restriction functors for rational Cherednik algebras, \textit{Selecta Math.
(N.S.)} \textbf{14} (2009), 397--425; {\tt ar$\chi$iv:0803.3639v6},
28 Oct.\ 2009.
\bibitem {DX}C. Dunkl and Y. Xu, \textit{Orthogonal Polynomials of Several
Variables}, Encycl. of Math. and its Applications \textbf{81}, Cambridge
University Press, Cambridge, 2001.
\bibitem {ESG}P. Etingof and E. Stoica, with an appendix by S. Griffeth,
Unitary representations of rational Cherednik algebras, \textit{Represent.
Theory} \textbf{13} (2009), 349--370; {\tt ar$\chi$iv:0901.4595v3},
20 Mar.\ 2009.
\bibitem {GS}I. Gordon and J. Stafford, Rational Cherednik algebras and
Hilbert schemes, \textit{Adv. Math.} \textbf{198} (2005), 222--274.
\bibitem {G}S. Griffeth, Orthogonal functions generalizing Jack polynomials,
\textit{Trans. Amer. Math. Soc.} \textbf{362} (2010), 6131--6157;
{\tt ar$\chi$iv:0707.0251v3}, 9 Nov.\ 2008.
\bibitem {Md}I. Macdonald, \textit{Symmetric Functions and Hall Polynomials,
}2nd ed., Clarendon Press, Oxford, 1995.
\bibitem {M}G. Murphy, A new construction of Young's seminormal representation
of the symmetric groups, \textit{J. Algebra} \textbf{69} (1981), 287--297.
\bibitem {OV}A. Okounkov and A. Vershik, A new approach to representation
theory of the symmetric groups, II, {\tt ar$\chi$iv:math/0503040v3}, 20 April 2005,
(translation from Russian) \textit{J. Math. Sci. (N.Y.)} \textbf{131} (2005), 5471--5494.
\bibitem {O}E. Opdam, Dunkl operators, Bessel functions and the discriminant
of a finite Coxeter group, \textit{Compositio Math.} \textbf{85} (1993), 333--373.
\bibitem {R}R. Rouquier, Representations of rational Cherednik algebras,
Infinite-dimensional aspects of representation theory and applications,
103--131, \textit{Contemp. Math.}, \textbf{392}, Amer. Math. Soc., Providence,
RI, 2005.
\end{thebibliography}
\end{document}