diff options
-rw-r--r-- | bezout.bib | 7 | ||||
-rw-r--r-- | bezout.tex | 6 | ||||
-rw-r--r-- | cover.tex | 1358 |
3 files changed, 1367 insertions, 4 deletions
@@ -294,7 +294,12 @@ url = {https://doi.org/10.2307%2F2371510}, doi = {10.2307/2371510} } - +@book{mezard2009information, + title={Information, physics, and computation}, + author={Mezard, Marc and Montanari, Andrea}, + year={2009}, + publisher={Oxford University Press} +} @inproceedings{Scorzato_2016_The, author = {Scorzato, Luigi}, title = {The {Lefschetz} thimble and the sign problem}, @@ -44,7 +44,7 @@ Spin-glasses have long been considered the paradigm of many variable `complex landscapes,' a subject that includes neural networks and optimization problems, -most notably constraint satisfaction. The most tractable family of these +most notably constraint satisfaction \cite{mezard2009information}. The most tractable family of these are the mean-field spherical $p$-spin models \cite{Crisanti_1992_The} (for a review see \cite{Castellani_2005_Spin-glass}) defined by the energy \begin{equation} \label{eq:bare.hamiltonian} @@ -447,8 +447,8 @@ dynamics, are a problem we hope to address in future work. This paper provides a first step towards the study of a complex landscape with complex variables. The next obvious one is to study the topology of the - critical points, their basins of attraction following gradient ascent (the - Lefschetz thimbles), and descent (the anti-thimbles) \cite{Witten_2010_A, + critical points, the sets reached following gradient descent (the + Lefschetz thimbles), and ascent (the anti-thimbles) \cite{Witten_2010_A, Witten_2011_Analytic, Cristoforetti_2012_New, Behtash_2017_Toward, Scorzato_2016_The}, which act as constant-phase integrating `contours.' Locating and counting the saddles that are joined by gradient lines---the diff --git a/cover.tex b/cover.tex new file mode 100644 index 0000000..586fa38 --- /dev/null +++ b/cover.tex @@ -0,0 +1,1358 @@ + +\documentclass[12pt,reqno,a4paper,twoside]{article} +% \ProvidesPackage{makra} +\usepackage{amsmath,amsthm,amstext,amscd,amssymb,euscript} +%,showkeys} +%,times} +\usepackage{epsf} +\usepackage{color} +\usepackage{verbatim} +\usepackage{graphicx} +\usepackage{esint} +\usepackage{tikz} +\usepackage{setspace} +\usepackage{mathrsfs} + +\usepackage{todonotes} + +%\usepackage{natbib} + + + +\usepackage{bm} +\usepackage[normalem]{ulem} + + +\textwidth 6in +\topmargin -0.50in +\oddsidemargin 0in +\evensidemargin 0in +\textheight 9.00in +%\pagestyle{plain} +%%%%%%%%%%%%%%%%%% Macros %%%%%%%%%%% +\def\mybox #1{\fbox{\parbox{5.8in}{#1}}} +\newcommand{\m}[1]{{\marginpar{\scriptsize #1}}} + +\def\mep{\mathbf{mep}_{n}^{\delta}} +\def\r{{\mathbf r}} +\def\O{{\mathcal{O}}} + +\def\I{{\mathcal{I}}} +\def\fee{\mathcal{F}} + +\def\F{{\EuScript{F}}} + +\renewcommand{\phi}{\varphi} +\newcommand{\compose}{\circ} +\renewcommand{\subset}{\subseteq} +\renewcommand{\emptyset}{\varnothing} +\newcommand{\interval}{[\underline\alpha,\overline \alpha]} +\def\liminfn{\liminf_{n\to\infty}} +\def\limsupn{\limsup_{n\to\infty}} +\def\limn{\lim_{n\to\infty}} +\def\disagree{\not\longleftrightarrow} +\newcommand{\Zd}{\mathbb Z^d} +\newcommand{\kk}{\mathbf k} +\renewcommand{\Pr}{\mathbb P} +\newcommand{\dist}{\text{dist}} +\newcommand{\Cal}{\mathcal} +\def\1{ {\mathit{1} \!\!\>\!\! I} } +\newcommand\s{{\mathbf s}} + +\newcommand{\red}[1]{{\color{red} #1}} +\newcommand{\blue}[1]{{\color{blue} #1}} + +\newcommand{\eps}{\varepsilon} +\newcommand{\C}{{\mathcal C}} +\newcommand{\Y}{{\mathcal Y}} +\newcommand{\NN}{{\mathcal N}} +\newcommand{\grad}{\nabla} +\newcommand{\WW}{\mathbb W} +\newcommand{\D}{{\mathcal D}} +\newcommand{\HH}{{\mathcal H}} +\newcommand{\bol}[1]{{\boldsymbol{#1}}} + + +% \smallmatrix{ +% 0& \cr +% N&\ddots \cr +% &\ddots&\ddots \cr +% & & 1 & 0 \cr +% } +\newcommand{\ddp}[2]{\frac{\partial {#1}}{\partial {#2}}} + +\renewcommand{\brace}[1]{{ \bol{#1}}} + +\newcommand{\ket}[1]{|{#1}\rangle} + +\newcommand{\eq}{{\textnormal{eq}}} + +\renewcommand{\em}{\it} + +%%%%%%% FRANK'S MACROS + +\parskip=3pt plus 1pt minus 1pt + +\newcommand{\halmos}{\rule{1ex}{1.4ex}} + +\makeatletter +\@addtoreset{equation}{section} +\makeatother + +\renewcommand{\theequation}{\thesection.\arabic{equation}} + +\newtheorem{ittheorem}{Theorem} +\newtheorem{itlemma}{Lemma} +\newtheorem{itproposition}{Proposition} +\newtheorem{itdefinition}{Definition} +\newtheorem{itremark}{Remark} + +%\renewcommand{\theittheorem}{\thesection.\arabic{equation}} +%\renewcommand{\theitlemma}{\thesection.\arabic{equation}} +%\renewcommand{\theitproposition}{\thesection.\arabic{equation}} +%\renewcommand{\theitdefinition}{\thesection.\arabic{equation}} +%\renewcommand{\theitremark}{\thesection.\arabic{equation}} + +\newenvironment{theorem}{\addtocounter{equation}{1} +\begin{ittheorem}}{\end{ittheorem}} + +\newenvironment{lemma}{\addtocounter{equation}{1} +\begin{itlemma}}{\end{itlemma}} + +\newenvironment{proposition}{\addtocounter{equation}{1} +\begin{itproposition}}{\end{itproposition}} + +\newenvironment{definition}{\addtocounter{equation}{1} +\begin{itdefinition}}{\end{itdefinition}} + +\newenvironment{remark}{\addtocounter{equation}{1} +\begin{itremark}}{\end{itremark}} + +%\newenvironment{proof}{\noindent {\em Proof}.\,\,\,} +%{\hspace*{\fill}$\halmos$\medskip} + +\newcommand{\beq}{\begin{eqnarray}} +\newcommand{\eeq}{\end{eqnarray}} + +\newcommand{\be}{\begin{equation}} +\newcommand{\ee}{\end{equation}} + +\newcommand{\bl}{\begin{lemma}} +\newcommand{\el}{\end{lemma}} + +\newcommand{\br}{\begin{remark}} +\newcommand{\er}{\end{remark}} + +\newcommand{\bt}{\begin{theorem}} +\newcommand{\et}{\end{theorem}} + +\newcommand{\bd}{\begin{definition}} +\newcommand{\ed}{\end{definition}} + +\newcommand{\bp}{\begin{proposition}} +\newcommand{\ep}{\end{proposition}} + + + +\newcommand{\bc}{\begin{corollary}} +\newcommand{\ec}{\end{corollary}} + +\newcommand{\bpr}{\begin{proof}} +\newcommand{\epr}{\end{proof}} + +\newcommand{\bi}{\begin{itemize}} +\newcommand{\ei}{\end{itemize}} + +\newcommand{\ben}{\begin{enumerate}} +\newcommand{\een}{\end{enumerate}} + + +%%%%%%%%%%%%% SYMBOLS %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + +\newcommand{\Z}{\mathbb Z} +\newcommand{\R}{\mathbb R} +\newcommand{\N}{\mathbb N} +%\newcommand{\C}{\mathbb C} + +\newcommand{\Q}{\mathbb Q} +\newcommand{\E}{\mathbb E} +\newcommand{\T}{\mathbf T} +\newcommand{\M}{\mathbf M} +\newcommand{\ret}{\mathbf R} +\newcommand{\veee}{\mathbb V} +\newcommand{\veek}{\ensuremath{\mathcal{V}}} +\newcommand{\gee}{\ensuremath{\mathcal{G}}} +\newcommand{\pee}{\ensuremath{\mathbb{P}}} +\newcommand{\re}{\ensuremath{\mathcal{R}}} +\newcommand{\peee}{\ensuremath{\mathcal{P}}} +\newcommand{\ce}{\ensuremath{\mathcal{C}}} +\newcommand{\cee}{\ensuremath{{\bf \mathcal{C}_1}}} +\newcommand{\uu}{\ensuremath{\mathcal{U}}} +%\newcommand{\s}{\ensuremath{\mathcal{S}}} +\newcommand{\loc}{\ensuremath{\mathcal{L}}} +\newcommand{\iii}{\ensuremath{\mathcal{I}}} +\newcommand{\bee}{\ensuremath{\mathcal{B}}} +\newcommand{\weee}{\ensuremath{\mathbf W}} +\newcommand{\fe}{\ensuremath{\mathcal{F}}} +\newcommand{\mee}{\ensuremath{\mathcal{M}}} +\newcommand{\U}{\ensuremath{\mathcal{U}}} +\newcommand{\na}{\ensuremath{N^{t,x}_\varphi}} +\newcommand{\aaa}{\ensuremath{\mathcal{A}}} +\newcommand{\anepsi}{\ensuremath{{[A_n]^{\epsilon} }}} +\newcommand{\vi}{\ensuremath{\varphi}} +\newcommand{\La}{\ensuremath{\Lambda}} +\newcommand{\la}{\ensuremath{\Lambda}} +\newcommand{\si}{\ensuremath{\sigma}} +\newcommand{\om}{\ensuremath{\omega}} +\newcommand{\epsi}{\ensuremath{\epsilon}} +\newcommand{\gap}{\ensuremath{\Delta}} +\newcommand{\QED}{\hspace*{\fill}$\Box$\medskip} +\newcommand{\prodpee}{\ensuremath{\mathbb{P}\times\mathbb{P}}} +\newcommand{\prodE}{\ensuremath{\mathbb{E}\times\mathbb{E}}} +\newcommand{\tr}{\ensuremath{\mbox{Tr}}} +\newcommand{\caZ}{\ensuremath{\mathcal{Z}}} +\newcommand{\Ga}{\ensuremath{\Gamma}} +\newcommand{\ga}{\ensuremath{\gamma}} +\newcommand{\al}{\ensuremath{\alpha}} +\newcommand{\shit}{\ensuremath{[-\|X \|, \|X \|]}} +\newcommand{\haa}{\ensuremath{A+tB}} +\newcommand{\sas}{\ensuremath{\delta g}} +\newcommand{\lap}{\pee_{V_n|X_1,\ldots,X_{n-1}} (dx)} +\newcommand{\qak}{\mathbb{Q}} +\newcommand{\de}{\delta} +\newcommand{\pot}{\pee^{\scriptscriptstyle{[0,t]}}} +\newcommand{\pott}{\peet^{\scriptscriptstyle{[0,t]}}} + +\newcommand{\KK}{\mathbb{S}} + + +\def\vnim #1{ \begin{equation*}\boxed{\mbox{\Large #1}}\end{equation*} } +%%%%%%%%%%%%%%%%%% Current time %%% +\def\now{ +\ifnum\time<60 + 12:\ifnum\time<10 0\fi\number\time am + \else + \ifnum\time>719\chardef\a=`p\else\chardef\a=`a\fi + \hour=\time + \minute=\time + \divide\hour by 60 %\hour is the result of an integer divide + \ifnum\hour>12\advance\hour by -12\advance\minute by-720 \fi + \number\hour:% + \multiply\hour by 60 %Use is made of the integer divide here. + \advance\minute by -\hour + \ifnum\minute<10 0\fi\number\minute\a m\fi} +\newcount\hour +\newcount\minute +%%%%%%%%%%%%%%%%%% Environment %%%%%%% +\numberwithin{equation}{section} %%%% nomer uravnenia vkulchaet + %%%% nomer razdela, ex. (1.2) +\newtheorem{thm}{Theorem}[section] +\newtheorem{lem}[thm]{Lemma} +\newtheorem{defn}[thm]{Definition} +\theoremstyle{remark} +\newtheorem{rem}{Remark}[section] +\newtheorem{cor}[thm]{Corollary} +%%%%%%%%% + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% HEADINGS + +\def\t{{\bf t}} %%%%%%%%%% hitting time +\def\r{{\bf r}} %%%%%%%%%% recurrence time +\def\w{{\bf w}} %%%%%%%%%% waiting time + +\newcommand{\caA}{{\mathcal A}} +\newcommand{\caB}{{\mathcal B}} +\newcommand{\caC}{{\mathcal C}} +\newcommand{\caD}{{\mathcal D}} +\newcommand{\caE}{{\mathcal E}} +\newcommand{\caF}{{\mathcal F}} +\newcommand{\caG}{{\mathcal G}} +\newcommand{\caH}{{\mathcal H}} +\newcommand{\caI}{{\mathcal I}} +\newcommand{\caJ}{{\mathcal J}} +\newcommand{\caK}{{\mathcal K}} +\newcommand{\caL}{{\mathcal L}} +\newcommand{\caM}{{\mathcal M}} +\newcommand{\caN}{{\mathcal N}} +\newcommand{\caO}{{\mathcal O}} +\newcommand{\caP}{{\mathcal P}} +\newcommand{\caQ}{{\mathcal Q}} +\newcommand{\caR}{{\mathcal R}} +\newcommand{\caS}{{\mathcal S}} +\newcommand{\caT}{{\mathcal T}} +\newcommand{\caU}{{\mathcal U}} +\newcommand{\caV}{{\mathcal V}} +\newcommand{\caW}{{\mathcal W}} +\newcommand{\caX}{{\mathcal X}} +\newcommand{\caY}{{\mathcal Y}} +\newcommand{\un}{\underline} +\newcommand{\Lt}{\tilde{L}} +\newcommand{\ct}{\tilde{c}} +\newcommand{\peet}{\tilde{\pee}} +\newcommand{\mut}{\tilde{\mu}} +\newcommand{\pt}{\tilde{p}} +\newcommand{\qaz}{\mathbb{L}} +\newcommand{\bix}{\vec{x}} +\newcommand{\muT}{\mu_{T_L,T_R}} +%%%%%%%%% + +% \usepackage{verbatim} +% \usepackage[active,tightpage]{preview} +% \setlength\PreviewBorder{5pt}% +% %%%> +% +% \usepackage{ifthen} +% \usepackage{amsmath} +\usetikzlibrary{arrows,calc,intersections} + + +\newcommand{\note}[1]{\todo[inline, color=white]{#1}} +\newcommand{\col}[1]{\color{magenta} {#1}} +\newcommand{\colo}[1]{\color{red} {#1}} + + + + +%%%%%%%%%%%%%%%%%%%%%%5 + +% \newcommand{\quiver}{ +% \begin{tikzpicture} +% \foreach \a in {1,2,3,4,5,6,7,8,9,10,11,12,13,14,15} { +% \begin{scope}[shift={(0.7*\a,0)}] +% \draw (0.3*\a,0) circle (0.3cm); +% \draw[black,thick] (0.3*\a+0.3,0)--(0.3*\a+0.7,0); +% \node [below] at (0.3*\a,-0.5) {$\a$}; +% \end{scope} +% } +% \draw (16,0) circle (0.3cm); +% \node [below] at (16,-0.5) {$16$}; +% \foreach \a in {10,9,8,7,6,5,4,3,2,1} { +% \begin{scope}[shift={(17-\a,0)}] +% \node at (0,0) {$\a$}; +% \end{scope} +% } +% \node at (1,0) {$4$}; +% \node at (2,0) {$8$}; +% \node at (3,0) {$9$}; +% \node at (4,0) {$10$}; +% \node at (5,0) {$10$}; +% \node at (6,0) {$10$}; +% \draw[black,thick] (2,0.3) -- (2,0.7); +% \draw (1.7,0.7) rectangle (2.3,1.3); +% \node at (2,1) {$3$}; +% \draw[black,thick] (4,0.3) -- (4,0.7); +% \draw (3.7,0.7) rectangle (4.3,1.3); +% \node at (4,1) {$1$}; +% \draw[black,thick] (7,0.3) -- (7,0.7); +% \draw (6.7,0.7) rectangle (7.3,1.3); +% \node at (7,1) {$1$}; +% \end{tikzpicture} +% } + +% \usepackage{pgfplots} + + +\newcommand{\graph}{ +\begin{tikzpicture}[scale=.7] +% \pgfplotsset{% +% width=.2\textwidth, +% height=0.5\textwidth +% } +\draw[gray, thick] (1,-1.5)--(0,0) -- (2,3)--(4,3.4)--(5.0,2)--(4.9,1)--(3.0,-1.2)--(1,-1.5); +\tikzstyle myBG=[line width=3.5pt,opacity=1.0] +% +\draw[white,myBG] (3.2,1.5) -- (5.2,3); +\draw[gray, thick] (3.2,1.5) -- (5.2,3); +% +\draw[gray, thick] (0,0)--(-1.5,0); +\draw[gray, thick] (3,-1.2)--(4,-2); +\draw[gray, thick] (4,3.4)--(4.3,4.6); +\draw[gray, thick] (1,-1.5)--(2,0.2)--(0,0); +\draw[gray, thick] (2,0.2)--(4.9,1); +\draw[gray, thick] (3,-1.2)--(2,0.2)--(3.2,1.5)--(2,3); +\draw[gray, thick] (3.2,1.5)--(4,3.4); +\draw[gray, thick] (3.2,1.5)--(5,2); +\draw[gray, thick] (4,3.4)--(5.2,3)--(5.7,2.2)--(5.7,1.3)--(4.9,1); +\draw[gray, thick] (5.7,2.2)--(5,2); +% +% +% +\filldraw[black!70] (0,0) circle (2pt); +\filldraw[black!70] (1,-1.5) circle (2pt); +\filldraw[black!70] (2,3) circle (2pt); +\filldraw[black!70] (4,3.4) circle (2pt); +\filldraw[black!70] (5,2) circle (2pt); +\filldraw[black!70] (4.9,1) circle (2pt); +\filldraw[black!70] (2,0.2) circle (2pt); +\filldraw[black!70] (3.2,1.5) circle (2pt); +% \filldraw[black!70] (5.7,1.3) circle (2pt); +\filldraw[black!70] (5.7,2.2) circle (2pt); +\filldraw[black!70] (5.2,3) circle (2pt); +\filldraw[black!70] (3,-1.2) circle (2pt); +% +% +\filldraw[black!70] (-1.5,0) circle (2pt) node[anchor=east] {$\Gamma_3,T_3$}; +\filldraw[white] (-1.5,0) circle (1pt); +\filldraw[black!70] (4,-2) circle (2pt) node[anchor=west] {$\Gamma_2,T_2$}; +\filldraw[white] (4,-2) circle (1pt); +\filldraw[black!70] (4.3,4.6) circle (2pt) node[anchor=west] {$\Gamma_1,T_1$}; +\filldraw[white] (4.3,4.6) circle (1pt) ; +\end{tikzpicture} +} +% + +\newcommand{\graphh}{ +\begin{tikzpicture}[scale=.7] +\draw[gray, thick] (1,-1.5)--(0,0) -- (2,3)--(4,3.4)--(5.0,2)--(4.9,1)--(3.0,-1.2)--(1,-1.5); +\tikzstyle myBG=[line width=3.5pt,opacity=1.0] +% +\draw[white,myBG] (3.2,1.5) -- (5.2,3); +\draw[gray, thick] (3.2,1.5) -- (5.2,3); +% +\draw[gray, thick] (0,0)--(-1.5,0); +\draw[gray, thick] (3,-1.2)--(4,-2); +\draw[gray, thick] (4,3.4)--(4.3,4.6); +\draw[gray, thick] (1,-1.5)--(2,0.2)--(0,0); +\draw[gray, thick] (2,0.2)--(4.9,1); +\draw[gray, thick] (3,-1.2)--(2,0.2)--(3.2,1.5)--(2,3); +\draw[gray, thick] (3.2,1.5)--(4,3.4); +\draw[gray, thick] (3.2,1.5)--(5,2); +\draw[gray, thick] (4,3.4)--(5.2,3)--(5.7,2.2)--(5.7,1.3)--(4.9,1); +\draw[gray, thick] (5.7,2.2)--(5,2); +% +% +% +\filldraw[black!70] (0,0) circle (2pt); +\filldraw[black!70] (1,-1.5) circle (2pt); +\filldraw[black!70] (2,3) circle (2pt); +\filldraw[black!70] (4,3.4) circle (2pt); +\filldraw[black!70] (5,2) circle (2pt); +\filldraw[black!70] (4.9,1) circle (2pt); +\filldraw[black!70] (2,0.2) circle (2pt); +\filldraw[black!70] (3.2,1.5) circle (2pt); +% \filldraw[black!70] (5.7,1.3) circle (2pt); +\filldraw[black!70] (5.7,2.2) circle (2pt); +\filldraw[black!70] (5.2,3) circle (2pt); +\filldraw[black!70] (3,-1.2) circle (2pt); +% +% +\filldraw[black!70] (-1.5,0) circle (2pt) node[anchor=east] {$\Gamma_3,T$}; +\filldraw[white] (-1.5,0) circle (1pt); +\filldraw[black!70] (4,-2) circle (2pt) node[anchor=west] {$\Gamma_2,T$}; +\filldraw[white] (4,-2) circle (1pt); +\filldraw[black!70] (4.3,4.6) circle (2pt) node[anchor=west] {$\Gamma_1,T$}; +\filldraw[white] (4.3,4.6) circle (1pt) ; +\end{tikzpicture} +} + +\setstretch{1.24} + +\begin{document} + +\title{{\bf COVER LETTER \\`Complex complex landscapes'}} +%\footnote{{\bf Key-words}: } +%}} + +\author{ +Jaron Kent-Dobias + and +Jorge Kurchan +} + +\maketitle + + + +\vspace{1.cm} + + +The subject of `Complex Landscapes', which started in the spin-glass literature, is concerned with functions (landscapes) of many variables, having a multiplicity of minimums, which are the objects of interest. Apart from its obvious interest for glassy systems, it has found a myriad applications in many domains: Computer Science, Ecology, Economics, Biology \cite{mezard2009information}. + +In the last few years, a renewed interest has developed for landscapes for which the variables are complex. There are a few reasons for this: {\em i)} in Computational Physics, there is the main obstacle of the `sign problem', and a strategy has emerged to attack it deforming the sampling space into complex variables. This is a most natural and promising path, and any progress made will have game-changing impact in solid state physics and lattice-QCD \cite{Cristoforetti_2012_New,Scorzato_2016_The}. +{\em ii)} At a more basic level, following the seminal work of E. Witten \cite{Witten_2010_A,Witten_2011_Analytic}, there has been a flurry of activity concerning the very definition of quantum mechanics, which requires also that one move into the complex plane. + +In all these cases, just like in the real case, one needs to know the structure of the `landscape', where are the saddle points and how they are connected, typical questions of `complexity'. +However, to the best of our knowledge, there are no studies extending the methods of the theory of +complexity to +complex variables. +We believe our paper will open a field that may find +numerous applications and will widen our theoretical view of complexity in general. + + +\bibliography{bezout} + + +\end{document} + + + + + + + + + + + + + + + +\section{The Kipnis-Marchioro-Presutti model} + +Consider the following process: +\begin{itemize} +\item +choose a pair of neighbouring sites and completely +exchange energy between them +\item +if the site is one of the borders, exchange completely energy with the bath. +\end{itemize} +each choice with probability $1/(N+1)$. From here onwards, we shall denote +$\tau$ a large time, sufficient for any two-site thermalisation. + +The evolution operator in one step is: +\begin{eqnarray} +U &=& \frac{1}{N+1} \left[ e^{-\tau L_1^*} + e^{-\tau L_N^*} + \sum_{i=1}^{N-1} e^{-\tau L^*_{i,i+1}} \right] +\nonumber \\ +&=& \frac{1}{N+1} \left[ e^{-2\tau (T_1 K^-_1 + K^o_1 + k) } + e^{-2\tau(T_L K^-_L + K^o_L +k) } + + \sum_{i=1}^{N-1} e^{ \frac{-\tau}{k} +(K^+_i K^-_{i+1} + K^-_i K^+_{i+1} - 2 K^o_i K^o_{i+1} ++ 2k^2 )} \right] \nonumber \\ +~ +\end{eqnarray} +and the dynamics after $n$ steps is given by $U^n$. +Because we are considering large $\tau$, the terms in the sums are in fact projectors +onto the lowest eigenvalues of the exponents. We shall however keep the notation as it is +in order to stress the symmetry of the bulk terms. + +Let us now show that - at the level of energies - this dynamics yields the KMP process +{\em for $k=\frac{1}{2}$, that is $m=2$}. +Consider first a general $m$, and two neighbouring sites of coordinates $x = \{x_\alpha\}_{\alpha=1,\ldots,m}$, +$y=\{y_\alpha\}_{\alpha=1,\ldots,m}$. +If they are completely thermalised, it means that (cfr (\ref{bb}): +the joint probability density satisfies +\begin{equation} +\left(x_{\alpha} +\frac{\partial}{\partial y_{\beta}} - +y_{\beta}\frac{\partial}{\partial x_{\alpha}} + \right) p(x,y)=0 +\end{equation} +It is easy to see that this may happen if and only if +\begin{equation} +p(x,y)= p[ \sum_\alpha (x_\alpha^2+y_\alpha^2)] +\end{equation} +In particular let us consider the microcanonical measure +\begin{equation} +p(x,y)= \delta[ \sum_\alpha (x_\alpha^2+y_\alpha^2)-\epsilon ] +\end{equation} +Defining new random variables $\epsilon_1$ and $\epsilon_2$ +as the energies of the neighboring sites +\be +\epsilon_1 = \sum_\alpha x_\alpha^2 +\ee +\be +\epsilon_2 = \sum_\alpha y_\alpha^2 +\ee +then their joint probability density will be +\begin{equation} +p(\epsilon_1,\epsilon_2) = \frac{S_m^2}{4} \delta(\epsilon_1+\epsilon_2-\epsilon) +\epsilon_1^{\frac{1}{2}-1} \epsilon_2^{\frac{1}{2}-1} +\end{equation} +where $S_m$ denotes the surface of the unit sphere in $m$ dimension +\be +S_m = \frac{m \pi^{m/2}}{\Gamma(\frac{1}{2}+1)} +\ee +{\em This yields a flat distribution for $m=2$, i.e. the KMP model.} + + + + +\section{Dual model} + + +The expectation value of an observable at time $t$, starting from an initial +distribution $|init\rangle$ is: + + +\begin{equation} +<O> = \langle - | O e^{-Ht} | init \rangle +\end{equation} +where $\langle - |$ is a constant. +Taking the adjoint $ x_i \to x_i$, $\partial_i \to -\partial_i$: +\begin{equation} +<O> = \langle - | O e^{-Ht} | init \rangle= \langle init| e^{-H^\dag t} O |- \rangle +\end{equation} +where $H^\dag(K^\pm, K^o)=H( K^\pm, -K^o)$ (because of the change of signs of the derivatives) +\begin{eqnarray} +-H^\dag&=& \frac{4}{1} \sum_i \left( +K^+_i K^-_{i+1} + K^-_i K^+_{i+1} - 2 K^o_i K^o_{i+1} ++ \frac{m^2}{8} \right) +\nonumber\\ +&+&2 \left(T_1 K^-_1 + K^o_1 + \frac{1}{4}\right) ++2 \left(T_L K^-_L + K^o_L +\frac{1}{4}\right) +\end{eqnarray} +In particular, for the generating function we had chosen + \begin{equation} + O |- \rangle = \Pi_i \frac{x_i^{2 \xi_i}}{(2\xi_i -1)!!}|-\rangle=|\xi_1,...,\xi_N\rangle +\end{equation} + +Considered as an operator acting on `particle number', as counted by $K^o$, $H^\dag$ does not +conserve the probability. +The trick we used can be expressed as follows: introduce the particle number $\xi_o$ and $\xi_{N+1}$ +and the operators $A^+_o$ and $A^+_{N+1}$, which create particles in boundary sites with unit rate. +We consider now the {\em enlarged} process generated by +\begin{eqnarray} +-H^{dual}&=& \frac{4}{1} \sum_i \left( +K^+_i K^-_{i+1} + K^-_i K^+_{i+1} - 2 K^o_i K^o_{i+1} ++ \frac{m^2}{8} \right) +\nonumber\\ +&+&2 \left(A^+_o K^-_1 + K^o_1 - \frac{1}{4}\right) ++2 \left( A^+_{N+1} K^-_N + K^o_N -\frac{1}{4}\right) +\end{eqnarray} +which conserves ({\it seems}) particle number and probability. +We wish to prove that: + +\begin{eqnarray} +<O> &=& \langle init| e^{-H^\dag t} |\xi_1,...,\xi_N \rangle \nonumber \\ +&=& \sum_{\xi_o,\xi_{N+1}} + T_1^{\xi_o} T_{L}^{\xi_{N+1}} \langle \xi_o \xi_{N+1} | \otimes \langle + init| e^{-H^{dual} t} |\xi_1,...,\xi_N \rangle \otimes + |\xi_o=0,\xi_{N+1}=0 \rangle \nonumber \\ +\label{ggg} +\end{eqnarray} + + +I think the proof is obvious, because developing the exponential of $H^{dual}$ all the $A^+$ can be +collected because they commute with everything else, and the experctation value +\begin{equation} +\sum_{\xi_o} T_1^{\xi_o} \langle \xi_o |[A^+_o]^r |\xi_o=0 \rangle = T_1^r + \end{equation} +just puts back as many $T$'s as necessary. + +I do not know exactly how to use (\ref{ggg}) in general, but in the large time limit the evolution +voids the chain of particles + + +\section{Dual of KMP} + +I think that the argument runs through without changes if we use $U$ defined for the KMP model. +We just have to note that each term corresponds to an evolution of two sites (or a site and the bath) +and so in the dual it corresponds to sharing the particles between those two sites, or emptying +the sites at the borders. + +{\bf: NOTE by Cristian} + +We can check that the duality function chosen in the original paper by KMP +do coincide with the duality function of our process for $m=2$ (and the random +variables are the energies). +Indeed we start from +\be +f(x,\xi) = \prod_i (\sum_{\alpha} x_{i,\alpha}^2)^{\xi} +\ee +When the bath have equal temperature (let's us choose T=1) then the stationary +measure is +\be +\pi(x) = \prod_i \frac{1}{(2\pi)^{m/2}} \exp\left(-\sum_{\alpha}\frac{x_{i,\alpha}^2}{2}\right) +\ee +Let us focus on a fixed $i$ (that is in this short computation we write $x$ for $x_i$). +We have +\begin{eqnarray} +\E(f(x,\xi)) +&=& +\int dx_1 \cdots \int dx_m (x_1^2+\ldots + x_m^2)^{\xi} \exp-\left(\frac{x_{1}^2}{2}+\ldots+\frac{x_{1}^2}{2}\right) +\nonumber \\ +& = & +\int dr S_m r^{2\xi} \exp-\left(\frac{r^2}{2}\right) +\nonumber \\ +& = & +\frac{\frac{1}{2}\Gamma(\frac{1}{2}+\xi)}{\Gamma(\frac{m}{2}+1)} 2^\xi +\nonumber \\ +\end{eqnarray} +Special cases: +\begin{itemize} +\item $m=1$ + +$$ +\E(f(x,\xi)) = (2\xi-1)!! +$$ +where one uses that $\Gamma(\frac{1}{2}+\xi)= \frac{\sqrt{\pi}(2\xi-1)!!}{2^{\xi}}$ and $\Gamma(\frac{3}{2}) = \frac{\sqrt{\pi}}{2}$ +\item $m=2$ + +$$ +\E(f(x,\xi)) = \xi! 2^\xi +$$ +where one uses that $\Gamma(1+\xi)= \xi!$ and $\Gamma(2) = 1$. +Thus, if one defines the energies as +$$ +\epsilon_i = \sum_{\alpha}\frac{x_{i,\alpha}^2}{2} +$$ +one recover the choice of KMP for the dual function +$$ +O(\epsilon_i,\xi) = \prod_i \frac{\epsilon_i^{\xi_i}}{\xi_i!} +$$ +\end{itemize} + + + + + + +\section{ Dual of SEP: here goes an outline of how to proceed for the SSEP} + + +\be +H=-L_{SEP}^* +\ee +\begin{eqnarray} +L^*_{SEP} &=& \frac{1}{j} + \sum_i \left(J^+_i J^-_{i+1} + J^-_i J^+_{i+1} + 2 J^o_i J^o_{i+1} + - 2 j^2 \right)\\ +&+&\alpha (J^-_1 - J^o_1-j) + \gamma (J^+_1 + J^o_1-j) ++ \delta (J^-_L - J^o_L-j) + \beta (J^+_L + J^o_L-j)\nonumber +\end{eqnarray} +The factor $1/j$ is analogous to the factor $1/m$ in (\ref{bb}). +The operators $J^+_i, J^-_i, J^o_i$ act on the Hilbert space + corresponding to $0 \le r \le n$ particles per site $\otimes_i |r\rangle_i$ +as follows: +\begin{eqnarray} +J^+_i |r\rangle_i &=& (2j-r) |r+1\rangle_i \nonumber \\ + J^-_i |r\rangle_i &=& r |r-1\rangle_i \nonumber \\ +J^o_i |r\rangle_i &=& (r-j) |r\rangle_i +\end{eqnarray} + +The conjugation properies are as follows. There is an operator $Q$, +{\em diagonal in this basis } (I give the expression below), such that: +\begin{equation} +[J^+_i]^\dag = Q[J^-_i]Q^{-1} \qquad [J^-_i]^\dag = Q[J^+_i]Q^{-1} +\end{equation} +while $[J^z_i]^\dag=J^z_i= Q[J^z_i]Q^{-1}$. + + + +The expectation value of an observable at time $t$, starting from an initial +distribution $|init\rangle$ is: + + +\begin{equation} +<O> = \langle - | O e^{-Ht} | init \rangle +\end{equation} +where $\langle - |$ is a constant. +As before: +\begin{eqnarray} +<O> &=& \langle - | O e^{-Ht} | init \rangle= +\langle init| e^{-H^\dag t} O |- \rangle= \nonumber \\ +& & \langle init|Q e^{-{\bar H} t} Q^{-1}O |- \rangle= +\langle init|Q \; e^{-{\bar H} t} Q^{-1}O Q Q^{-1} |- \rangle +\end{eqnarray} + + +{\em $ {\bar H}$ is the same operator as $H$ but with +$J^+$ substituted by $J^-$, and vice-versa.} +Our job is now to make the rotation that will eliminate the $J^+$'s in +the border terms of $ {\bar H}$. + + + + +The transformation is of the form +\begin{eqnarray} +e^{\mu J^+} J^+ e^{-\mu J^+}&=&J^+ \nonumber \\ +e^{\mu J^+} J^o e^{-\mu J^+} &=&J^o - \mu J^+ \nonumber \\ +e^{\mu J^+} J^- e^{-\mu J^+} &=& J^- + 2 \mu J^o - \mu^2 J^+ +\end{eqnarray} +for suitable $\mu$. +Putting $\mu=-1$, we get that {\bf the bulk term is left invariant, +precisely because of the SU(2) symmetry}. The boundary terms {\bf of $\bar H$} +transform further into: +\begin{eqnarray} +& e^{\mu J^+_1} \left[ \alpha (J^+_1 - J^o_1-j) + \gamma (J^-_1 + J^o_1-j) +\right] e^{-\mu J^+_1}= \nonumber \\ & \gamma(J^-_1 + 2 \mu J^o_1 - \mu^2 +J^+_1 +J^o_1 - \mu J^+_1 -j) + \alpha (J^+_1 - J^o_1 + \mu J^+_1 -j) += \nonumber \\ +& \alpha(- J^o_1 -j) + \gamma (J^-_1 -J^o_1 -j) +\label{trans} +\end{eqnarray} +which is of the same form we have in the $SU(1,1)$ model. +The same can be done in the other boundary term. + +We thus get: +\begin{eqnarray} +<O> &=& \langle - | O e^{-Ht} | init \rangle= +\langle init|Q \; e^{-{\bar H} t} Q^{-1}O Q Q^{-1} |- \rangle \nonumber\\ +&= & \langle init|Q e^{ \sum_i J^+_i} e^{-{\bar H_{dual}} t} + e^{ -\sum_i J^+_i} Q^{-1}O Q Q^{-1} |- \rangle \nonumber \\ +&= & \langle init|Q e^{ \sum_i J^+_i} e^{-{\bar H_{dual}} t} + e^{ -\sum_i J^+_i} Q^{-1}O Q e^{ \sum_i J^+_i} + e^{ -\sum_i J^+_i} |- \rangle \nonumber \\ + &= & \langle init|Q Q^{-1} e^{ \sum_i J^+_i} e^{-{\bar H_{dual}} t} + e^{ -\sum_i J^+_i} Q^{-1}O Q e^{ \sum_i J^+_i} |-_{dual} \rangle +\end{eqnarray} +where we have defined $H_{dual}$ as the transformed Hamiltonian. + +We now have to study $ |-_{dual} \rangle \equiv e^{ -\sum_i J^+_i} + Q^{-1} |- \rangle$ +Because we know that terms like those proportional to $\gamma$ and $\alpha$ +anihilate the measure to the left: +\begin{eqnarray} +& & \langle - | (J^-_i - J^o_i-j) =0\nonumber \\ +& & \langle - | (J^+_i + J^o_i-j) =0 +\end{eqnarray} +this implies that in the new variables and following all the transformations +(cfr (\ref{trans})): +\begin{eqnarray} +& & (J^-_i -J^o_i -j)e^{ -\sum_i J^+_i} Q^{-1} |- \rangle= 0 \nonumber \\ +& & ( -J^o_i -j)e^{ -\sum_i J^+_i} Q^{-1} |- \rangle =0 +\end{eqnarray} +which implies that $( J^o_i +j) |-_{dual} \rangle= J^-_i |-_{dual} \rangle=0$, +and this means that +\begin{equation} +J^o_i |-_{dual} \rangle =-j |-_{dual} \rangle +\end{equation} +is the vacuum of particles in this base! + +All in all we are left with: +\begin{eqnarray} +<O> &=& \langle init|Q \; e^{ \sum_i J^+_i} + e^{-{\bar H_{dual}} t} e^{ -\sum_i J^+_i} Q^{-1}O Q e^{ \sum_i J^+_i} + |-_{dual} \rangle \nonumber \\ + &=& \langle init|Q \; e^{ \sum_i J^+_i} + e^{-{\bar H_{dual}} t} {\hat O} + |-_{dual} \rangle +\end{eqnarray} +where $ {\hat O} \equiv e^{ -\sum_i J^+_i} Q^{-1}O Q e^{ \sum_i +J^+_i}$. We have to start with the +vacuum $ |-_{dual} \rangle$, then apply $ {\hat O} $, (which creates +particles because it contains many $J^+$'s), and then there is the +dual evolution. The final configuration has to be overlapped with +$\langle f| \equiv \langle init|Q \; e^{ \sum_i J^+_i}$. +For large times, there will be no particle left except in the two extra sites +in the borders. + +\section{Constructive approach} + +Here I would like to say the following: if I have a modle of transport +of which I do not know if it has a Dual one, I can proceed as follows. +I take a small version with no baths and a few sites. I write the +evolution operator and I diagonalise it numerically. If there is a +non abelian group, the eigenvalues will be in degenerate +multiplets. Hence, if I find multiplets, then very probably there is a +dual model, if I do not, then there cannot be one. It would be nice +to show it with the KMP model with two or three sites. + +Another thing is to consider higher groups. $SU(3)$ has already been studied +for two kinds of particles. We know how to map to a dual in that +case, if it has not been done yet. + +\newpage +{\bf THIS PART HAS BEEN WRITTEN BY CRISTIAN} + +The aim of this file is to set notation in the two languages. +Let us focus on duality for the case we already know: +SU(1,1) model with $k=1/4$. To fix ideas let us consider only +the bulk part of the system with periodic boundary conditions. + +\section{Probabilistic language} +We have two stochastic Markovian process with continuous time. +\begin{itemize} +\item +\underline{The first process $X(t) \in \R^N$} is given by the Fokker-Planck equation: +\be +\frac{dp(x,t)}{dt} = L^* p(x,t) +\ee +where $p(x,t)$ represents the probability density +for the process $X(t)$, that is +$$ +p(x,t)dx = Prob (X(t)\in (x,x+dx)) +$$ +and +\begin{eqnarray} +L^*p(x,t) +& = & +\sum_i L^*_{i,i+1} p(x,t) \noindent\\ +& = & +\sum_i \left(x_i\frac{\partial}{\partial x_{i+1}} -x_{i+1}\frac{\partial}{\partial x_{i}}\right)^2 p(x,t) +\end{eqnarray} +\item +\underline{The second process $\Xi(t) \in \N^N$} is characterized by the master equation +\be +\frac{dP(\xi,t)}{dt} = {\cal L^*} P(\xi,t) +\ee +where $P(\xi,t)$ represents the +probability mass function for the process $\Xi(t)$, that is +$$ +P(\xi,t) = Prob (\Xi(t) = \xi) +$$ +and +\begin{eqnarray} +{\cal L}^*P(\xi,t) +& = & +\sum_i {\cal L}^*_{i,i+1}P(\xi,t) \nonumber \\ +& = & +\sum_i 2\xi_i \left(1+ 2\xi_{i+1}\right) P(\xi^{i,i+1},t) ++ \left(1+2\xi_i\right)2\xi_{i+1} P(\xi^{i+1,i},t) \nonumber\\ +& & - 2\left(2\xi_i + \frac{1}{2}\right)\left(2\xi_{i+1} + \frac{1}{2}\right) P(\xi,t) ++ \frac{1}{2}P(\xi,t) +\end{eqnarray} +and $\xi^{i,j}$ denotes the configuration that is obtained by removing one particle +at $i$ and adding one particle at $j$. +\newpage +\item +\underline{In general, Duality means the following}: +there exists functions $O(x,\xi): \R^N \times \N^N \mapsto \R$ such that +the following equality between expectations for the two processes holds +\begin{center} +\fbox{\parbox{9cm}{ +\be +\E_x( O(X(t),\xi)) =\E_\xi(O(x,\Xi(t))) +\ee +}} +\end{center} +The subscripts in the expectations denote the initial conditions of the processes: +$X(0) =x$ on the left and $\Xi(0) = \xi$ on the right. +More explicitly we have: +\be +\int dy O(y,\xi) p(y,t; x,0) = \sum_{\eta} O(x,\eta) P(\eta,t; \xi,0) +\ee +To prove duality it is sufficient to show that +\be +\label{main} +L O(x,\xi) = {\cal L} O(x,\xi) +\ee +where $L$, that is working on $x$, is the adjoint of $L^*$ and ${\cal L}$, that is working on $\xi$, +is the adjoint of ${\cal L}^*$. +Indeed we have: +\begin{eqnarray} +\E_x( O(X(t),\xi)) +& = & +\int dy O(y,\xi) p(y,t; x,0) \\ +& = & +\sum_{\eta} \int dy O(y,\eta) p(y,t; x,0) \delta_{\eta,\xi} \\ +& = & +\sum_{\eta} \int dy O(y,\eta) e^{tL^*} \delta(y-x) \delta_{\eta,\xi} \\ +& = & +\sum_{\eta} \int dy e^{tL} O(y,\eta) \delta(y-x) \delta_{\eta,\xi} \\ +& = & +\sum_{\eta} \int dy e^{t{\cal L}} O(y,\eta) \delta(y-x) \delta_{\eta,\xi} \\ +& = & +\sum_{\eta} \int dy O(y,\eta) e^{t{\cal L}^*} \delta(y-x) \delta_{\eta,\xi} \\ +& = & +\sum_{\eta} \int dy O(y,\eta) P(\eta,t;\xi,0) \delta(y-x) \\ +& = & +\sum_{\eta} O(x,\eta) P(\eta,t;\xi,0) \\ +& = & +\E_\xi(O(x,\Xi(t))) +\end{eqnarray} +\newpage +\item +\underline{For the present case, the proper function to be considered are} +\be +\label{Oss} +O(x,\xi) = \prod_{i} \frac{x_i^{2\xi_i}}{(2\xi_i-1)!!} +\ee +Let us check Eq.(\ref{main}) on this choice. We have +\begin{eqnarray*} +&& +L_{i,i+1} O(x,\xi) += +\left(\prod_{k\not\in\{i,i+1\}} \frac{x_k^{2\xi_k}}{(2\xi_k -1)!!}\right) +\times +\\ +&&\left(2\xi_{i+1}(2\xi_{i+1}-1) \frac{x_i^{2\xi_i+2}}{(2\xi_i -1)!!}\frac{x_{i+1}^{2\xi_{i+1}-2}}{(2\xi_{i+1} -1)!!} +- 2\xi_{i}(2\xi_{i+1}+1) \frac{x_i^{2\xi_i}}{(2\xi_i -1)!!}\frac{x_{i+1}^{2\xi_{i+1}}}{(2\xi_{i+1} -1)!!} +\right. +\\ +&&\left.- 2\xi_{i+1}(2\xi_{i}+1) \frac{x_i^{2\xi_i}}{(2\xi_i -1)!!}\frac{x_{i+1}^{2\xi_{i+1}}}{(2\xi_{i+1} -1)!!} ++2\xi_{i}(2\xi_{i}-1) \frac{x_i^{2\xi_i-2}}{(2\xi_i -1)!!}\frac{x_{i+1}^{2\xi_{i+1}+2}}{(2\xi_{i+1} -1)!!} +\right) +\\ +\end{eqnarray*} +which implies +\begin{eqnarray*} +L_{i,i+1} O(x,\xi) +& = & +\Big(2\xi_{i+1}(2\xi_{i}+1) [O(x,\xi^{i+1,i})-O(x,\xi)] +\\ +&& +\;+\;2\xi_{i}(2\xi_{i+1}+1) [O(x,\xi^{i,i+1})-O(x,\xi)]\Big) +\\ +& = & +{\cal L}_{i,i+1} O(x,\xi) +\end{eqnarray*} + +\item \underline{How to find the proper normalization?} +Suppose that we are in the general following situation: +\begin{itemize} +\item We have a generator $L$ of a Markov process $X(t)$. +\item We know its stationary measure $\pi(x)$: +\be +L^* \pi(x) = 0 +\ee +\item We have functions $f(x,\xi)$ for which the following holds: +\be +\label{aaa} +L f(x,\xi) = \sum_{\eta} r(\xi,\eta) f(x,\eta) +\ee +with +\be +\label{bbb} +r(\xi,\eta) \ge 0 \qquad \mbox{if}\quad \xi \neq \eta +\ee +\be +\label{ccc} +r(\xi,\xi) \le 0 \qquad \mbox{if}\quad \xi = \eta +\ee +\end{itemize} +The matrix $r$ resembles the generator of a dual Markov process, +but it is not because it does not satisfy the condition +$\sum_{\eta} r(\xi,\eta) = 0$. +In order to find the generator of the dual process we proceed as +follows: +\begin{enumerate} +\item Define +\be +m(\xi) = \int f(x,\xi) \pi(x) dx +\ee +\item Define +\be +q(\xi,\eta)= m(\xi)^{-1} r(\xi,\eta) m(\eta) +\ee +\item Define +\be +O(x,\xi) = m(\xi)^{-1} f(x,\xi) +\ee +\end{enumerate} +Then the matrix $q$ can be seen as the generator of the dual Markov process $\Xi(t)$, that is +\be +L O(x,\xi) = \sum_{\eta} q(\xi,\eta) O(x,\eta) +\ee +with +\be +q(\xi,\eta) \ge 0 \qquad \mbox{if}\quad \xi \neq \eta +\ee +\be +q(\xi,\xi) \le 0 \qquad \mbox{if}\quad \xi = \eta +\ee +\be +\sum_{\eta} q(\xi,\eta) = 0 +\ee +Indeed we have: +\begin{eqnarray} +L O(x,\xi) +&=& +L m(\xi)^{-1} f(x,\xi) \nonumber \\ +&=& +m(\xi)^{-1} \sum_{\eta} r(\xi,\eta) f(x,\eta) \nonumber \\ +&=& +m(\xi)^{-1} \sum_{\eta} m(\xi)q(\xi,\eta) m(\eta)^{-1} m(\eta) O(x,\eta)\nonumber \\ +&=& +\sum_{\eta} q(\xi,\eta) O(x,\eta) +\end{eqnarray} +and +\begin{eqnarray} +\sum_{\eta} q(\xi,\eta) +&=& +\sum_{\eta} m(\xi)^{-1} r(\xi,\eta) m(\eta) \nonumber \\ +&=& +m(\xi)^{-1} \sum_{\eta} r(\xi,\eta) \int f(x,\eta) \pi(x) dx \nonumber \\ +&=& +m(\xi)^{-1} \int L f(x,\xi) \pi(x) dx \nonumber \\ +&=& +m(\xi)^{-1} \int f(x,\xi) L^* \pi(x) dx \nonumber \\ +&=& +0 +\end{eqnarray} + + + +\item \underline{Our case}. Among all the invariant measure +of the $X(t)$ process, namely the normalized function with +spherical symmetry $p(x) = p(\sum_i x_i^2)$, a special role is +played by the Gibbs measure +$$ +\pi(x) += \left(\frac{\beta}{2\pi}\right)^{(N/2)} e^{-\beta\sum_i \frac{x_i^2}{2}} += \left(\frac{\beta}{2\pi}\right)^{(N/2)} \prod_i e^{-\beta\frac{x_i^2}{2}} +$$ +which is selected as soon as the system is placed in contact with +thermal bath working at inverse temperature $\beta$. +Moreover: If $Z$ is a centered Gaussian, namely $Z\sim N(0,\sigma^2)$, +then +$$ +\E(Z^{2n}) = \sigma^{2n} (2n-1)!! +$$ +If one start from +$$ +f(x,\xi) = \prod_i x_i^{2\xi} +$$ +which satisfy (\ref{aaa}),(\ref{bbb}),(\ref{ccc}) and apply +the previous procedure, one arrives to (\ref{Oss}). + +{\bf Remark:} Note that, in applying the procedure, the +dependence on $\beta$ disappear!!!! +\end{itemize} + + +\section{Quantum language} + + +Here we start from a quantum spin chain +$$ +H = - 4 \sum_i \left( K^+_iK^-_{i+1} + K^-_iK^+_{i+1} -2 K^0_iK^0_{i+1} + \frac{1}{8}\right) +$$ +where the spin $K_i$'s satisfy the SU(1,1) algebra +\begin{eqnarray} +\label{commutatorsSU11} +[K_i^{0},K_i^{\pm}] &=& \pm K_i^{\pm} \nonumber \\ +{[}K_{i}^{-},K_{i}^{+}{]} &=& 2K_i^{0} +\end{eqnarray} +We are going to see the Schr\"odinger equation with imaginary time +\begin{equation} +\label{schroedinger} +\frac{d}{dt}|\psi(t) \rangle = -H |\psi(t)\rangle\;. +\end{equation} +as the evolution equation for the probability distribution of +a Markovian stochastic process. +\begin{itemize} +\item +\underline{The Hamiltonian possesses the SU(1,1) invariance}. If we define +\be +K^+ = \sum_{i} K_i^+ +\ee +\be +K^- = \sum_{i} K_i^- +\ee +\be +K^0 = \sum_{i} K_i^0 +\ee +we find that +\be +[H,K^+] = 0 +\ee +\be +[H,K^-] = 0 +\ee +\be +[H,K^0] = 0 +\ee +\item +\underline{Since $[H,K^+] = 0$} there exist a basis to study the stochastic process associated to +$H$ where \underline{$K^+$ is diagonal}. We might consider the following representation +\begin{eqnarray} +\label{Koper} +K^+_i &=& \frac{1}{2} x_{i}^2 \nonumber \\ +K^-_i &=& \frac{1}{2} \frac{\partial^2}{\partial x_{i}^2} \nonumber \\ +K^o_i &=& \frac{1}{4} \left\{\frac{\partial}{\partial x_{i}} x_{i} + + x_{i} \frac{\partial}{\partial x_{i}} \right \} +\end{eqnarray} +If we use this representation then +$$ +H = -L^* +$$ +and the probability density function for the $X(t)$ process is encoded in +the state $|\psi(t)\rangle$, namely +\begin{equation} +|\psi(t) \rangle = \int dx p(x,t) |x\rangle +\end{equation} +where we have introduced the notation $|x\rangle$ to denote a completely +localized state, that is a vector which together with its transposed +$\langle x|$ form a complete basis of a Hilbert space and its dual: +\begin{equation} +\langle x|x' \rangle = \delta(x-x') +\end{equation} +It immediately follows that +\begin{equation} +\langle x|\psi(t) \rangle = p(x,t) +\end{equation} +To compute expectation with respect to the $X(t)$ process +we introduce the flat state +\begin{equation} +\langle - | = \int dx \;\langle x| +\end{equation} +which is such that +\begin{equation} +\langle - | x\rangle = 1 +\end{equation} +Then for any observable $A = A(X(t))$ we have that its expectation value +at time $t$ can be written as +\begin{equation} +\langle A(t) \rangle_x = \int dy \,A(y)\, p(y,t;x,0) = \langle -|A| \psi(t) \rangle_x = \langle -|A e ^{-tH}| x\rangle +\end{equation} +\item +\underline{Since $[H,K^0] = 0$} there exist a basis to study the stochastic process associated to +$H$ where \underline{$K^0$ is diagonal}. We might consider the following representation +\begin{eqnarray} +\label{Koper2} +K^+_i|\xi\rangle &=& \left(\frac{1}{2} + \xi\right) |\xi+1\rangle\nonumber \\ +K^-_i|\xi\rangle &=& \xi |\xi-1\rangle\nonumber \\ +K^o_i|\xi\rangle &=& \left(\xi + \frac{1}{4}\right) |\xi\rangle +\end{eqnarray} +where $|\xi\rangle$ denotes a vector which together with its transposed +$\langle \xi|$ form a complete basis of a Hilbert space and its dual, that is +\begin{equation} +\langle \xi|\eta \rangle = \delta_{\xi,\eta} +\end{equation} +If we use this representation then +$$ +H = -{\cal L}^* +$$ +and the probability mass function for the $\Xi(t)$ process is encoded in +the state $|\phi(t)\rangle$, namely +\begin{equation} +|\phi(t) \rangle = \sum_{\xi} P(\xi,t) |\xi\rangle +\end{equation} +It immediately follows that +\begin{equation} +\langle \xi|\phi(t) \rangle = P(\xi,t) +\end{equation} +To compute expectation with respect to the $\Xi(t)$ process +we introduce the flat state +\begin{equation} +\langle -_{dual} | = \sum_{\xi} \;\langle \xi| +\end{equation} +which is such that +\begin{equation} +\langle -_{dual} | \xi\rangle = 1 +\end{equation} +Then for any observable $A=A(\Xi(t))$ we have that its expectation value +at time $t$ can be written as +\begin{equation} +\langle A(t) \rangle_\xi = \sum_{\eta}\,A(\eta)\, p(\eta,t;\xi,0) = \langle -_{dual}|A| \phi(t) \rangle_{\xi} = \langle -_{dual}|A e ^{-tH}| \xi\rangle +\end{equation} +\item +\underline{The claim is the following: Duality, in general, is going from the basis +where}\\ +\underline{one generator of the group is diagonal to a basis where another generator of}\\ +\underline{ the group is diagonal.} + +In our case we change from a basis where $K^+$ is diagonal to the base where $K^0$ is diagonal. + +\begin{eqnarray} +\langle - |\prod_i\frac{(2K_i^+)^{\xi_i}}{(2\xi_i-1)!!}|\psi(t)\rangle_x +& = & +\int dy \; \langle y |\prod_i\frac{(2K_i^+)^{\xi_i}}{(2\xi_i-1)!!} e^{tL^*}|x\rangle \nonumber \\ +& = & +\sum_{\eta} \int dy \; \langle y |\prod_i\frac{(2K_i^+)^{\eta_i}}{(2\eta_i-1)!!}e^{tL^*}|x\rangle \langle \eta|\xi\rangle\nonumber \\ +& = & +\sum_{\eta} \int dy \; \langle y| \otimes \langle \eta| \prod_i\frac{(2K_i^+)^{\eta_i}}{(2\eta_i-1)!!} e^{tL^*} | x\rangle \otimes|\xi\rangle\nonumber \\ +& = & +\sum_{\eta} \int dy \; \langle x| \otimes \langle \xi | e^{tL} \prod_i\frac{(2K_i^+)^{\eta_i}}{(2\eta_i-1)!!} | y\rangle \otimes|\eta\rangle \nonumber \\ +& = & +\sum_{\eta} \int dy \; \langle x| \otimes \langle \xi | e^{tL} \prod_i\frac{y^{2\eta_i}}{(2\eta_i-1)!!} | y\rangle \otimes|\eta \rangle \nonumber \\ +& = & +\sum_{\eta} \int dy \; \langle x| \otimes \langle \xi | e^{t{\cal L}} \prod_i\frac{y_i^{2\eta_i}}{(2\eta_i-1)!!} | y\rangle \otimes|\eta \rangle \nonumber \\ +& = & +\sum_{\eta} \int dy \; \langle x| \otimes \langle \xi | e^{t{\cal L}} \prod_i\frac{y_i^{K_i^0 -\frac{1}{2}\1}}{(K_i^0 -\frac{3}{2}\1)!!} | y\rangle \otimes|\eta \rangle \nonumber \\ +& = & +\sum_{\eta} \int dy \; \langle y| \otimes \langle \eta |\prod_i\frac{y_i^{K_i^0 -\frac{1}{2}\1}}{(K_i^0 -\frac{3}{2}\1)!!} e^{t{\cal L}^*} | x\rangle \otimes|\xi \rangle\nonumber \\ +& = & +\sum_{\eta} \int dy \; \langle \eta | \prod_i\frac{y_i^{K_i^0 -\frac{1}{2}\1}}{(K_i^0 -\frac{3}{2}\1)!!} e^{t{\cal L}^*} |\xi \rangle \langle y | x\rangle \nonumber \\ +& = & +\sum_{\eta} \langle \eta | \prod_i\frac{y_i^{K_i^0 -\frac{1}{2}\1}}{(K_i^0 -\frac{3}{2}\1)!!} |\phi(t)\rangle_{\xi} \nonumber \\ +& = & +\langle -_{dual} |\prod_i\frac{y_i^{K_i^0 -\frac{1}{2}\1}}{(K_i^0 -\frac{3}{2}\1)!!}|\phi(t)\rangle_{\xi} +\end{eqnarray} + + +\end{itemize} + +\section{General k} + +A convenient $(2j+1)$-dimensional representation of the SU(2) algebra is given by +\begin{eqnarray} +J^+_i |n_i\rangle &=& (2j-n_i) |n_i+1\rangle \nonumber \\ +J^-_i |n_i\rangle &=& n_i |n_i-1\rangle \nonumber \\ +J^0_i |n_i\rangle &=& (n_i-j) |n_i\rangle +\end{eqnarray} +where the quantum numbers $n_i\in\{0,1,\ldots,2j\}$. +{\bf Note that in this representation the adjoint of $J^+_i$ is not +$J^-_i$, UNLESS $j=1/2$}. + +A matrix representation is: +$$ +J^+ = \left( +\begin{array}{cccc} + 0 & & & \\ + 2j & \ddots & & \\ + & \ddots & \ddots & \\ + & & 1 & 0\\ +\end{array}\right) +\qquad +J^- = \left( +\begin{array}{cccc} + 0 & 1 & & \\ + & \ddots & \ddots & \\ + & & \ddots & 2j \\ + & & & 0 \\ +\end{array}\right) +\qquad +J^0 = \left( +\begin{array}{cccc} + -j & & & \\ + & \ddots & & \\ + & & \ddots & \\ + & & & j\\ +\end{array}\right) +$$ + +In the SU(1,1) case one can use the infinite dimensional representation +\begin{eqnarray} +\label{newrepresentationsu11} +K^+_i |n_i\rangle &=& (2k+n_i) |n_i+1\rangle \nonumber \\ +K^-_i |n_i\rangle &=& n_i |n_i-1\rangle \nonumber \\ +K^0_i |n_i\rangle &=& (n_i+k) |n_i\rangle +\end{eqnarray} +where the quantum numbers $n_i\in\{0,1,2,\ldots\}$. +A matrix representation is: +$$ +K^+ = \left( +\begin{array}{cccc} + 0 & & & \\ + 2k & \ddots & & \\ + & 2k+1 & \ddots & \\ + & & \ddots & \ddots\\ +\end{array}\right) +\qquad +K^- = \left( +\begin{array}{cccc} + 0 & 1 & & \\ + & \ddots & 2 & \\ + & & \ddots & \ddots \\ + & & & \ddots \\ +\end{array}\right) +\qquad +K^0 = \left( +\begin{array}{cccc} + k & & & \\ + & k+1 & & \\ + & & k+2 & \\ + & & & \ddots\\ +\end{array}\right) +$$ +Let's check that in this representation the operator is stochastic. +I will do it for the bulk: +\begin{eqnarray} +L_{i,i+1}|n_i,n_{i+1}\rangle +&=& +(2k+n_i) n_{i+1}|n_i +1 ,n_{i+1}-1\rangle \nonumber\\ +&+& +n_i(2k+n_{i+1})|n_i -1 ,n_{i+1}+1\rangle \nonumber\\ +&+& +(-2(n_i+k)(n_{i+1}+k)+2k^2)|n_i,n_{i+1}\rangle +\end{eqnarray} +The sum of the rates is +$$ +(2k+n_i) n_{i+1}+ +n_i(2k+n_{i+1}) +-2(n_i+k)(n_{i+1}+k)+2k^2 =0 +$$ + + + + + + + + + +% \end{document} |