diff options
author | Jaron Kent-Dobias <jaron@kent-dobias.com> | 2024-04-28 09:33:43 -0300 |
---|---|---|
committer | Jaron Kent-Dobias <jaron@kent-dobias.com> | 2024-04-28 09:33:43 -0300 |
commit | 3ff48c18678f1bc62ee8a3b8724fac77e49010d9 (patch) | |
tree | 4431bc165c748903327b2e17fccc8007f3ec1b0a | |
parent | 2573972d0cde8d969db6a66294e76336cc29a09f (diff) | |
download | marginal-3ff48c18678f1bc62ee8a3b8724fac77e49010d9.tar.gz marginal-3ff48c18678f1bc62ee8a3b8724fac77e49010d9.tar.bz2 marginal-3ff48c18678f1bc62ee8a3b8724fac77e49010d9.zip |
Lots of work and reformatting.
-rw-r--r-- | marginal.bib | 38 | ||||
-rw-r--r-- | marginal.tex | 336 |
2 files changed, 268 insertions, 106 deletions
diff --git a/marginal.bib b/marginal.bib index 870197e..b51659e 100644 --- a/marginal.bib +++ b/marginal.bib @@ -70,3 +70,41 @@ doi = {10.1103/physrevlett.75.2847} } +@article{Fyodorov_2020_Counting, + author = {Fyodorov, Y.V. and Tublin, R.}, + title = {Counting Stationary Points of the Loss Function in the Simplest Constrained Least-square Optimization}, + journal = {Acta Physica Polonica B}, + publisher = {Jagiellonian University}, + year = {2020}, + number = {7}, + volume = {51}, + pages = {1663}, + url = {http://dx.doi.org/10.5506/APhysPolB.51.1663}, + doi = {10.5506/aphyspolb.51.1663}, + issn = {1509-5770} +} + +@article{Fyodorov_2022_Optimization, + author = {Fyodorov, Yan V and Tublin, Rashel}, + title = {Optimization landscape in the simplest constrained random least-square problem}, + journal = {Journal of Physics A: Mathematical and Theoretical}, + publisher = {IOP Publishing}, + year = {2022}, + month = {May}, + number = {24}, + volume = {55}, + pages = {244008}, + url = {http://dx.doi.org/10.1088/1751-8121/ac6d8e}, + doi = {10.1088/1751-8121/ac6d8e}, + issn = {1751-8121} +} + +@phdthesis{Tublin_2022_A, + author = {Tublin, Rashel}, + title = {A Few Results in Random Matrix Theory and Random Optimization}, + year = {2022}, + month = {September}, + url = {https://kclpure.kcl.ac.uk/portal/en/studentTheses/a-few-results-in-random-matrix-theory-and-random-optimization}, + school = {King's College London} +} + diff --git a/marginal.tex b/marginal.tex index 0a21499..f19c096 100644 --- a/marginal.tex +++ b/marginal.tex @@ -1,10 +1,10 @@ -\documentclass[fleqn,a4paper]{article} +\documentclass[aps,pre,reprint,longbibliography,floatfix]{revtex4-2} -\usepackage[utf8]{inputenc} % why not type "Bézout" with unicode? -\usepackage[T1]{fontenc} % vector fonts plz -\usepackage{fullpage,amsmath,amssymb,latexsym,graphicx} -\usepackage{newtxtext,newtxmath} % Times for PR -\usepackage{appendix} +\usepackage[utf8]{inputenc} +\usepackage[T1]{fontenc} +\usepackage{amsmath,amssymb,latexsym,graphicx} +\usepackage{newtxtext,newtxmath} +\usepackage{bbold} \usepackage[dvipsnames]{xcolor} \usepackage[ colorlinks=true, @@ -12,33 +12,21 @@ citecolor=MidnightBlue, filecolor=MidnightBlue, linkcolor=MidnightBlue -]{hyperref} % ref and cite links with pretty colors -\usepackage[ - style=phys, - eprint=true, - maxnames = 100 -]{biblatex} -\usepackage{anyfontsize,authblk} -\usepackage{bbold} - -\usepackage{tikz} - -\addbibresource{marginal.bib} +]{hyperref} \begin{document} \title{ - Conditioning the complexity of random landscapes on marginal minima + Conditioning the complexity of random landscapes on marginal optima } \author{Jaron Kent-Dobias} -\affil{Istituto Nazionale di Fisica Nucleare, Sezione di Roma I} +\affiliation{Istituto Nazionale di Fisica Nucleare, Sezione di Roma I, Rome, Italy 00184} -\maketitle \begin{abstract} \end{abstract} -\tableofcontents +\maketitle \section{Introduction} @@ -106,11 +94,11 @@ An arbitrary function $g$ of the minimum eigenvalue of a matrix $A$ can be expre Assuming \begin{equation} \begin{aligned} - \lim_{\beta\to\infty}\int\frac{d\mathbf x\,\delta(N-\mathbf x^T\mathbf x)e^{-\beta\mathbf x^TA\mathbf x}}{\int d\mathbf x'\,\delta(N-\mathbf x'^T\mathbf x')e^{-\beta\mathbf x'^TA\mathbf x'}}&g\left(\frac{\mathbf x^TA\mathbf x}N\right) - =\int\frac{d\mathbf x\,\delta(N-\mathbf x^T\mathbf x)\mathbb 1_{\operatorname{ker}(A-\lambda_\mathrm{min}(A)I)}(\mathbf x)}{\int d\mathbf x'\,\delta(N-\mathbf x'^T\mathbf x')\mathbb 1_{\operatorname{ker}(A-\lambda_\mathrm{min}(A)I)}(\mathbf x')}g\left(\frac{\mathbf x^TA\mathbf x}N\right) \\ + &\lim_{\beta\to\infty}\int\frac{d\mathbf x\,\delta(N-\mathbf x^T\mathbf x)e^{-\beta\mathbf x^TA\mathbf x}}{\int d\mathbf x'\,\delta(N-\mathbf x'^T\mathbf x')e^{-\beta\mathbf x'^TA\mathbf x'}}g\left(\frac{\mathbf x^TA\mathbf x}N\right) \\ + &=\int\frac{d\mathbf x\,\delta(N-\mathbf x^T\mathbf x)\mathbb 1_{\operatorname{ker}(A-\lambda_\mathrm{min}(A)I)}(\mathbf x)}{\int d\mathbf x'\,\delta(N-\mathbf x'^T\mathbf x')\mathbb 1_{\operatorname{ker}(A-\lambda_\mathrm{min}(A)I)}(\mathbf x')}g\left(\frac{\mathbf x^TA\mathbf x}N\right) \\ + &=g(\lambda_\mathrm{min}(A)) + \frac{\int d\mathbf x\,\delta(N-\mathbf x^T\mathbf x)\mathbb 1_{\operatorname{ker}(A-\lambda_\mathrm{min}(A)I)}(\mathbf x)}{\int d\mathbf x'\,\delta(N-\mathbf x'^T\mathbf x')\mathbb 1_{\operatorname{ker}(A-\lambda_\mathrm{min}(A)I)}(\mathbf x')} \\ &=g(\lambda_\mathrm{min}(A)) - \frac{\int d\mathbf x\,\delta(N-\mathbf x^T\mathbf x)\mathbb 1_{\operatorname{ker}(A-\lambda_\mathrm{min}(A)I)}(\mathbf x)}{\int d\mathbf x'\,\delta(N-\mathbf x'^T\mathbf x')\mathbb 1_{\operatorname{ker}(A-\lambda_\mathrm{min}(A)I)}(\mathbf x')} - =g(\lambda_\mathrm{min}(A)) \end{aligned} \end{equation} The first relation extends a technique first introduced in @@ -138,55 +126,57 @@ minimum eigenvalue is zero. We demonstrate the efficacy of the technique by rederiving a well-known result: the large-deviation function for pulling an eigenvalue from the bulk of the \textsc{goe} spectrum. -Consider an ensemble of $N\times N$ matrices $A=B+\omega I$ for $B$ drawn from the \textsc{goe} ensemble with entries +Consider an ensemble of $N\times N$ matrices $A=B+\mu I$ for $B$ drawn from the \textsc{goe} ensemble with entries whose variance is $\sigma^2/N$. We know that the bulk spectrum of $A$ is a -Wigner semicircle with radius $2\sigma$ shifted by a constant $\omega$. -Therefore, for $\omega=2\sigma$, the minimum eigenvalue will typically be zero, -while for $\omega>2\sigma$ the minimum eigenvalue would need to be a large +Wigner semicircle with radius $2\sigma$ shifted by a constant $\mu$. +Therefore, for $\mu=2\sigma$, the minimum eigenvalue will typically be zero, +while for $\mu>2\sigma$ the minimum eigenvalue would need to be a large deviation from the typical spectrum and its likelihood will be exponentially -suppressed with $N$. For $\omega<2\sigma$, the bulk of the typical spectrum contains +suppressed with $N$. For $\mu<2\sigma$, the bulk of the typical spectrum contains zero and therefore a larger $N^2$ deviation, moving an extensive number of eigenvalues, would be necessary. This final case cannot be quantified by this method, but instead the nonexistence of a large deviation linear in $N$ appears as the emergence of an imaginary part in the function. +\begin{widetext} As an example, we compute \begin{equation} \label{eq:large.dev} - e^{NG_\sigma(\omega)}=P_{\lambda_\mathrm{min}(B+\omega I)=0}=\overline{\lim_{\beta\to\infty}\int\frac{d\mathbf x\,\delta(N-\mathbf x^T\mathbf x)e^{-\beta\mathbf x^T(B+\omega I)\mathbf x}}{\int d\mathbf x'\,\delta(N-\mathbf x'^T\mathbf x')e^{-\beta\mathbf x'^T(B+\omega I)\mathbf x'}}\,\delta\big(\mathbf x^T(B+\omega I)\mathbf x\big)} + e^{NG_\lambda(\mu)}=P_{\lambda_\mathrm{min}(B+\mu I)=\lambda}=\overline{\lim_{\beta\to\infty}\int\frac{d\mathbf x\,\delta(N-\mathbf x^T\mathbf x)e^{-\beta\mathbf x^T(B+\mu I)\mathbf x}}{\int d\mathbf x'\,\delta(N-\mathbf x'^T\mathbf x')e^{-\beta\mathbf x'^T(B+\mu I)\mathbf x'}}\,\delta\big(\mathbf x^T(B+\mu I)\mathbf x-N\lambda\big)} \end{equation} where the overline is the average over $B$, and we have defined the large -deviation function $G_\sigma(\omega)$. Using replicas to treat the denominator ($x^{-1}=\lim_{n\to0}x^{n-1}$) +deviation function $G_\sigma(\mu)$. Using replicas to treat the denominator ($x^{-1}=\lim_{n\to0}x^{n-1}$) and transforming the $\delta$-function to its Fourier representation, we have \begin{equation} - e^{NG_\sigma(\omega)}=\overline{\lim_{\beta\to\infty}\lim_{n\to0}\int d\lambda\prod_{a=1}^n\left[d\mathbf x_a\,\delta(N-\mathbf x_a^T\mathbf x_a)\right] - \exp\left\{-\beta\sum_{a=1}^n\mathbf x_a^T(B+\omega I)\mathbf x_a+\lambda\mathbf x_1^T(B+\omega I)\mathbf x_1\right\}} + e^{NG_\lambda(\mu)}=\overline{\lim_{\beta\to\infty}\lim_{n\to0}\int d\hat\lambda\prod_{a=1}^n\left[d\mathbf x_a\,\delta(N-\mathbf x_a^T\mathbf x_a)\right] + \exp\left\{-\beta\sum_{a=1}^n\mathbf x_a^T(B+\mu I)\mathbf x_a+\hat\lambda\mathbf x_1^T(B+\mu I)\mathbf x_1-N\hat\lambda\lambda\right\}} \end{equation} -having introduced the parameter $\lambda$ in the Fourier representation of the $\delta$-function. +having introduced the parameter $\hat\lambda$ in the Fourier representation of the $\delta$-function. The whole expression, so transformed, is a simple exponential integral linear in the matrix $B$. Taking the average over $B$, we have \begin{equation} - e^{NG_\sigma(\omega)} - =\lim_{\beta\to\infty}\lim_{n\to0}\int d\lambda\prod_{a=1}^n\left[d\mathbf x_a\,\delta(N-\mathbf x_a^T\mathbf x_a)\right] - \exp\left\{-Nn\beta\omega+N\lambda\omega+\frac{\sigma^2}{N}\left[\beta^2\sum_{ab}^n(\mathbf x_a^T\mathbf x_b)^2 - -2\beta\lambda\sum_a^n(\mathbf x_a^T\mathbf x_1)^2 - +\lambda^2N^2 + e^{NG_\lambda(\mu)} + =\lim_{\beta\to\infty}\lim_{n\to0}\int d\hat\lambda\prod_{a=1}^n\left[d\mathbf x_a\,\delta(N-\mathbf x_a^T\mathbf x_a)\right] + \exp\left\{-Nn\beta\mu+N\hat\lambda(\mu-\lambda)+\frac{\sigma^2}{N}\left[\beta^2\sum_{ab}^n(\mathbf x_a^T\mathbf x_b)^2 + -2\beta\hat\lambda\sum_a^n(\mathbf x_a^T\mathbf x_1)^2 + +\hat\lambda^2N^2 \right]\right\} \end{equation} We make the Hubbard--Stratonovich transformation to the matrix field $Q_{ab}=\frac1N\mathbf x_a^T\mathbf x_b$. This gives \begin{equation} - e^{NG_\sigma(\omega)} - =\lim_{\beta\to\infty}\lim_{n\to0}\int d\lambda\,dQ\, + e^{NG_\lambda(\mu)} + =\lim_{\beta\to\infty}\lim_{n\to0}\int d\hat\lambda\,dQ\, \exp N\left\{ - -n\beta\omega+\lambda\omega+\sigma^2\left[\beta^2\sum_{ab}^nQ_{ab}^2 - +-\beta\lambda\sum_a^nQ_{1a}^2 - +\lambda^2 + -n\beta\mu+\hat\lambda(\mu-\lambda)+\sigma^2\left[\beta^2\sum_{ab}^nQ_{ab}^2 + +-\beta\hat\lambda\sum_a^nQ_{1a}^2 + +\hat\lambda^2 \right]+\frac12\log\det Q\right\} \end{equation} +\end{widetext} where $Q_{aa}=1$ because of the spherical constraint. We can evaluate this integral using the saddle point method. We make a replica symmetric ansatz for $Q$, because this is a 2-spin model, but with the first row singled out because -of its unique coupling with $\lambda$. This gives +of its unique coupling with $\hat\lambda$. This gives \begin{equation} Q=\begin{bmatrix} 1&\tilde q_0&\tilde q_0&\cdots&\tilde q_0\\ @@ -203,12 +193,12 @@ and \end{equation} Inserting these expressions and taking the limit of $n$ to zero, we find \begin{equation} - e^{NG_\sigma(\omega)}=\lim_{\beta\to\infty}\int d\lambda\,dq_0\,d\tilde q_0\,e^{N\mathcal S_\beta(q_0,\tilde q_0,\lambda)} + e^{NG_\sigma(\mu)}=\lim_{\beta\to\infty}\int d\hat\lambda\,dq_0\,d\tilde q_0\,e^{N\mathcal S_\beta(q_0,\tilde q_0,\hat\lambda)} \end{equation} with the effective action \begin{equation} - \mathcal S_\beta(q_0,\tilde q_0,\lambda)=\lambda\omega+\sigma^2\left[ - 2\beta^2(q_0^2-\tilde q_0^2)-2\beta\lambda(1-\tilde q_0^2)+\lambda^2 + \mathcal S_\beta(q_0,\tilde q_0,\hat\lambda)=\hat\lambda(\mu-\lambda)+\sigma^2\left[ + 2\beta^2(q_0^2-\tilde q_0^2)-2\beta\hat\lambda(1-\tilde q_0^2)+\hat\lambda^2 \right]-\log(1-q_0)+\frac12\log(1-2q_0+\tilde q_0^2) \end{equation} We need to evaluate the integral above using the saddle point method, but in the limit of $\beta\to\infty$. @@ -221,48 +211,49 @@ We expect the overlaps to concentrate on one as $\beta$ goes to infinity. We the However, taking the limit with $y\neq\tilde y$ results in an expression for the action that diverges with $\beta$. To cure this, we must take $\tilde y=y$. The result is \begin{equation} - \mathcal S_\infty(y,z,\tilde z,\lambda) - =\lambda\omega+\sigma^2\big[ - \lambda^2-4(y+z-\tilde z) + \mathcal S_\infty(y,z,\tilde z,\hat\lambda) + =\hat\lambda(\mu-\lambda)+\sigma^2\big[ + \hat\lambda^2-4(y+z-\tilde z) \big]+\frac12\log\left(1+2\frac{z-\tilde z}{y^2}\right) \end{equation} -Extremizing this action over the new parameters $y$, $\Delta z=z-\tilde z$, and $\lambda$, we have +Extremizing this action over the new parameters $y$, $\Delta z=z-\tilde z$, and $\hat\lambda$, we have \begin{align} - \lambda=-\frac1\sigma\sqrt{\frac{\omega^2}{(2\sigma)^2}-1} - && - y=\frac1{2\sigma}\left(\frac{\omega}{2\sigma}-\sqrt{\frac{\omega^2}{(2\sigma)^2}-1}\right) - && - \Delta z=\frac1{4\sigma^2}\left(1-\frac{\omega}{2\sigma}\left(\frac\omega{2\sigma}-\sqrt{\frac{\omega^2}{(2\sigma)^2}-1}\right)\right) + \hat\lambda=-\frac1\sigma\sqrt{\frac{(\mu-\lambda)^2}{(2\sigma)^2}-1} + \\ + y=\frac1{2\sigma}\left(\frac{\mu-\lambda}{2\sigma}-\sqrt{\frac{(\mu-\lambda)^2}{(2\sigma)^2}-1}\right) + &\\ + \Delta z=\frac1{4\sigma^2}\left(1-\frac{\mu-\lambda}{2\sigma}\left(\frac{\mu-\lambda}{2\sigma}-\sqrt{\frac{(\mu-\lambda)^2}{(2\sigma)^2}-1}\right)\right) \end{align} Inserting this solution into $\mathcal S_\infty$ we find \begin{equation} - G_\sigma(\omega) - =\mathop{\textrm{extremum}}_{y,\Delta z,\lambda}\mathcal S_\infty(y,\Delta z,\lambda) - =-\frac{\omega}{2\sigma}\sqrt{\frac{\omega^2}{(2\sigma)^2}-1} - +\log\left[ - \frac{\omega}{2\sigma}+\sqrt{\frac{\omega^2}{(2\sigma)^2}-1} - \right] + \begin{aligned} + &G_\lambda(\mu) + =\mathop{\textrm{extremum}}_{y,\Delta z,\hat\lambda}\mathcal S_\infty(y,\Delta z,\hat\lambda) \\ + &=-\frac{\mu-\lambda}{2\sigma}\sqrt{\frac{(\mu-\lambda)^2}{(2\sigma)^2}-1} + +\log\left( + \frac{\mu-\lambda}{2\sigma}+\sqrt{\frac{(\mu-\lambda)^2}{(2\sigma)^2}-1} + \right) + \end{aligned} \end{equation} -This function is plotted in Fig.~\ref{fig:large.dev}. For $\omega<2\sigma$ $G_\sigma(\omega)$ has an -imaginary part, which makes any additional integral over $\omega$ highly +This function is plotted in Fig.~\ref{fig:large.dev}. For $\mu<2\sigma$ $G_\sigma(\mu)$ has an +imaginary part, which makes any additional integral over $\mu$ highly oscillatory. This indicates that the existence of a marginal minimum for this parameter value corresponds with a large deviation that grows faster than $N$, rather like $N^2$, since in this regime the bulk of the typical spectrum is over zero and therefore extensively many eigenvalues have to have large deviations in order for the smallest eigenvalue to be zero. For -$\omega\geq2\sigma$ this function gives the large deviation function for the -probability of seeing a zero eigenvalue given the shift $\omega$. -$\omega=2\sigma$ is the maximum of the function with a real value, and +$\mu\geq2\sigma$ this function gives the large deviation function for the +probability of seeing a zero eigenvalue given the shift $\mu$. +$\mu=2\sigma$ is the maximum of the function with a real value, and corresponds to the intersection of the average spectrum with zero, i.e., a pseudogap. \begin{figure} - \centering - \includegraphics{figs/large_deviation.pdf} + \includegraphics[width=\columnwidth]{figs/large_deviation.pdf} \caption{ - The large deviation function $G_\sigma(\omega)$ defined in - \eqref{eq:large.dev} as a function of the shift $\omega$ to the + The large deviation function $G_\sigma(\mu)$ defined in + \eqref{eq:large.dev} as a function of the shift $\mu$ to the \textsc{goe} diagonal. As expected, $G_\sigma(2\sigma)=0$, while for - $\omega>2\sigma$ it is negative and for $\omega<2\sigma$ it gains an + $\mu>2\sigma$ it is negative and for $\mu<2\sigma$ it gains an imaginary part. } \label{fig:large.dev} \end{figure} @@ -274,7 +265,30 @@ parameters for which the spectrum is psedogapped: the equivalent of this large-deviation functions will lie on the singular boundary between a purely real and complex value. -\subsection{Application to complexity in random landscapes} +\subsection{Conditioning on a pseudogap} + +We have seen that this method effectively conditions a random matrix ensemble +on its lowest eigenvalue being zero. However, this does not correspond on its +own to marginal minima. In the previous example, most values of $\mu$ where +the calculation was valid correspond to matrices with a single isolated +eigenvalue. However, the marginal minima we are concerned with have +pseudogapped spectra, where the continuous part of the spectral density has a +lower bound at zero. + +Fortunately, our calculation can be modified to ensure that we consider only +psedogapped spectra. First, we insert a shift $\mu$ by hand into the `natural' +spectrum of the problem at hand, conditioning the trace to have a specific +value. Then, we choose this artificial shift so that the resulting conditioned +spectra are pseudogapped. This we can do by looking for the point where the +order parameter $\lambda$ associated with the marginal condition is zero. + +What is the interpretation of this? In general the condition $\lambda=0$ +corresponds to a point where the conditioning does not change the volume +measured by the integral. Therefore, the typical matrix with the value of $\mu$ +for which $\lambda=0$ has a zero eigenvalue. In isotropic problems where +isolated eigenvalues in the spectrum are atypical, this implies a pseudogap. + +\section{Marginal complexity in random landscapes} The situation in the study of random landscapes is often as follows: an ensemble of smooth functions $H:\mathbb R^N\to\mathbb R$ define random @@ -291,7 +305,7 @@ Such problems can be studied using the method of Lagrange multipliers, with one with respect to $\mathbf s$ and $\pmb\omega=\{\omega_1,\ldots,\omega_r\}$. The corresponding gradient and Hessian for the problem are \begin{align} \nabla H(\mathbf s,\pmb\omega)=\partial H(\mathbf s)+\sum_{i=1}^r\omega_i\partial g_i(\mathbf s) - && + \\ \operatorname{Hess}H(\mathbf s,\pmb\omega)=\partial\partial H(\mathbf s)+\sum_{i=1}^r\omega_i\partial\partial g_i(\mathbf s) \end{align} The number of stationary points in a landscape for a particular realization $H$ is found by integrating over the Kac--Rice measure @@ -311,9 +325,9 @@ complexity on the marginality of stationary points. We therefore define the number of marginal points in a particular instantiation $H$ as \begin{equation} \begin{aligned} - &\mathcal N_\text{marginal}(E) - =\int d\mu_H(\mathbf s,\pmb\omega\mid E)\,\delta\big(N\lambda_\mathrm{min}(\operatorname{Hess}H(\mathbf s,\pmb\omega))\big) \\ - &=\lim_{\beta\to\infty}\int d\mu_H(\mathbf s,\pmb\omega\mid E) + &\mathcal N_{0}(E,\mu) + =\int d\mu_H(\mathbf s,\pmb\omega\mid E,\mu)\,\delta\big(N\lambda_\mathrm{min}(\operatorname{Hess}H(\mathbf s,\pmb\omega))\big) \\ + &=\lim_{\beta\to\infty}\int d\mu_H(\mathbf s,\pmb\omega\mid E,\mu) \frac{d\mathbf x\,\delta(N-\mathbf x^T\mathbf x)\delta(\mathbf x^T\partial\mathbf g(\mathbf s))e^{\beta\mathbf x^T\operatorname{Hess}H(\mathbf s,\pmb\omega)\mathbf x}} {\int d\mathbf x'\,\delta(N-\mathbf x'^T\mathbf x')\delta(\mathbf x'^T\partial\mathbf g(\mathbf s))e^{\beta\mathbf x'^T\operatorname{Hess}H(\mathbf s,\pmb\omega)\mathbf x'}} \delta\big(\mathbf x^T\operatorname{Hess}H(\mathbf s,\pmb\omega)\mathbf x\big) @@ -326,22 +340,29 @@ where the $\delta$-functions \end{equation} ensure that the integrals are constrained to the tangent space of the configuration manifold at the point $\mathbf s$. This likewise allows us to define the complexity of marginal points at energy $E$ as \begin{equation} - \Sigma_\text{marginal}(E) - =\frac1N\overline{\log\mathcal N_\text{marginal}(E)} + \Sigma_0(E,\mu) + =\frac1N\overline{\log\mathcal N_0(E)} \end{equation} In practice, this can be computed by introducing replicas to treat the logarithm ($\log x=\lim_{n\to0}\frac\partial{\partial n}x^n$) and replicating again to treat each of the normalizations in the numerator. This leads to the expression \begin{equation} \begin{aligned} - \Sigma_\text{marginal}(E) - &=\lim_{\beta\to\infty}\lim_{n\to0}\frac1N\frac\partial{\partial n}\int\prod_{a=1}^n\Bigg[d\mu_H(\mathbf s_a,\pmb\omega_a\mid E)\,\delta\big((\mathbf x_a^1)^T\operatorname{Hess}H(\mathbf s_a,\pmb\omega_a)\mathbf x_a^1\big)\\ + \Sigma_0(E,\mu) + &=\lim_{\beta\to\infty}\lim_{n\to0}\frac1N\frac\partial{\partial n}\int\prod_{a=1}^n\Bigg[d\mu_H(\mathbf s_a,\pmb\omega_a\mid E,\mu)\,\delta\big((\mathbf x_a^1)^T\operatorname{Hess}H(\mathbf s_a,\pmb\omega_a)\mathbf x_a^1\big)\\ &\qquad\times\lim_{m_a\to0} \left(\prod_{b=1}^{m_a} d\mathbf x_a^b\,\delta(N-(\mathbf x_a^b)^T\mathbf x_a^b)\delta((\mathbf x_a^b)^T\partial\mathbf g(\mathbf s_a))e^{\beta(\mathbf x_a^b)^T\operatorname{Hess}H(\mathbf s_a,\pmb\omega_a)\mathbf x_a^b}\right)\Bigg] \end{aligned} \end{equation} - -\section{Examples in random landscapes} +Finally, the \emph{marginal} complexity is given by fixing $\mu=\mu_\text{m}$ so that the complexity is stationary with respect to changes in the value of the minimum eigenvalue, or +\begin{equation} + 0=\frac\partial{\partial\lambda}\Sigma_\lambda(E,\mu_\text{m}(E))\bigg|_{\lambda=0} +\end{equation} +Finally, the marginal complexity is defined by evaluating the complexity conditioned on $\lambda_{\text{min}}=0$ at $\mu_\text{m}$, +\begin{equation} + \Sigma_\text{m}(E) + =\Sigma_0(E,\mu_\text m(E)) +\end{equation} \subsection{Application to the spherical models} @@ -440,28 +461,131 @@ $\Omega=S^{N-1}\times S^{N-1}$ +\log\det(Q^{11}Q^{22}-Q^{12}Q^{12}) \end{equation} -\subsection{Multi-species spherical model} - -We consider models whose configuration space consists of the product of $r$ -spheres, each with its own dimension $N_s$, or -$\Omega=S^{N_1-1}\times\cdots\times S^{N_r-1}$. Coordinates on this space we -will typically denote -$\pmb\sigma=(\pmb\sigma^{(1)},\ldots,\pmb\sigma^{(r)})\in\Omega$, with -$\pmb\sigma^{(s)}\in S^{N_s-1}$ denoting the subcomponent restricted to a -specific subsphere. The model can be thought of as consisting of centered -random functions $H:\Omega\to\mathbb R$ with covariance -\begin{equation} - \overline{ - H(\pmb\sigma_1)H(\pmb\sigma_2) - } - =f\left( - \frac{\pmb\sigma^{(1)}_1\cdot\pmb\sigma^{(1)}_2}{N_1}, - \ldots, - \frac{\pmb\sigma^{(r)}_1\cdot\pmb\sigma^{(r)}_2}{N_r} +\subsection{Nonlinear least squares} + +In this subsection we consider perhaps the simplest example of a non-Gaussian +landscape: the problem of random nonlinear least squares optimization. Though, +for reasons we will see it is easier to make predictions for random nonlinear +\emph{most} squares, i.e., the problem of maximizing the sum of squared terms. +We also take a spherical problem with $\mathbf x\in S^{N-1}$, and consider a set +of $M$ random functions $V_k:\mathbf S^{N-1}\to\mathbb R$ that are centered Gaussians with covariance +\begin{equation} + \overline{V_i(\mathbf x)V_j(\mathbf x')}=\delta_{ij}f\left(\frac{\mathbf x^T\mathbf x'}N\right) +\end{equation} +The energy or cost function is the sum of squares of the $V_k$, or +\begin{equation} + H(\mathbf x)=\frac12\sum_{k=1}^MV_k(\mathbf x)^2 +\end{equation} +The landscape complexity and large deviations of the ground state for this problem were recently studied in a linear context, with $f(q)=\sigma^2+aq$ \cite{Fyodorov_2020_Counting, Fyodorov_2022_Optimization}. Some results on the ground state of the general nonlinear problem can also be found in \cite{Tublin_2022_A}. In particular, that work indicates that the low-lying minima of the problem tend to be either replica symmetric or full replica symmetry breaking. This is not good news for our analysis or marginal states, because in the former case the problem is typically easy to solve, and in the latter the analysis becomes much more technically challenging. + +Fortunately, the \emph{maxima} of this problem have a more amenable structure +for study, as they are typically described by 1-RSB like structure. There is a +heuristic intuition for this: in the limit of $M\to1$, this problem is just the +square of a spherical spin glass landscape. The distribution and properties of +stationary points low and high in the spherical spin glass are not changed, +except that their energies are stretched and minima are transformed into +maxima. This is why the top of the landscape doesn't qualitatively change. The +bottom, however, consists of the zero-energy level set in the spherical spin +glass. This level set is well-connected, and so the ground states should also +be well connected and flat. + +Focusing on the top of the landscape and therefore dealing with a 1-RSB like +problem is good for our analysis. First, algorithms will tend to be stuck in +the ways they are for hard optimization problems, and second we will be able +to explicitly predict where. Therefore, we will study the most squares problem +rather than the least squares one. We calculate the complexity of maxima under a replica symmetric ansatz (which covers 1-RSB like problems) for arbitrary covariance $f$, and then the marginal complexity. + +Applying the Lagrange multiplier method detailed above to enforce the spherical constraint, the gradient and Hessian are +\begin{align} + \nabla H(\mathbf x,\omega)=\sum_k^MV_k(\mathbf x)\partial V_k(\mathbf x)+\omega\mathbf x + \\ + \operatorname{Hess}H(\mathbf x,\omega)=\partial V_k(\mathbf x)\partial V_k(\mathbf x)+V_k(\mathbf x)\partial\partial V_k(\mathbf x)+\omega I +\end{align} +\begin{widetext} +The number of stationary points in a circumstance where the determinants add constructively is +\begin{equation} + \begin{aligned} + &\mathcal N(E,\mu)^n + =\int\prod_{a=1}^nd\mathbf x_a\frac{d\hat{\mathbf x}_a}{(2\pi)^N}d\omega_a\,d\hat\beta_a\,\hat\mu_a\,d\bar\eta_a\,d\eta_a\,\exp\bigg\{ + i\hat{\mathbf x}_a^T(V^k(\mathbf x_a)\partial V^k(\mathbf x_a)+\omega\mathbf x_a) + +\hat\beta(NE-\frac12V^k(\mathbf x_a)V^k(\mathbf x_a)) \\ + & +\bar\eta_a^T(\partial V^k(\mathbf x_a)\partial V^k(\mathbf x_a)^T+V^k(\mathbf x_a)\partial\partial V^k(\mathbf x_a)+\omega_a I)\eta_a + +\hat\mu_a(N\mu-\partial V^k(\mathbf x_a)^T\partial V^k(\mathbf x_a)-V^k(\mathbf x_a)\operatorname{Tr}\partial\partial V^k(\mathbf x_a)-N\omega_a) + \bigg\} + \end{aligned} +\end{equation} +To linearize the argument of the exponential with respect to $V$, we define the following new fields: $w^k_a=V^k(\mathbf x_a)$ and $\mathbf v^k_a=\partial V^k(\mathbf x_1)$. Inserting these in $\delta$ functions, we have +\begin{equation} + \begin{aligned} + &\mathcal N(E,\mu)^n + =\int\prod_{a=1}^nd\mathbf x_a\frac{d\hat{\mathbf x}_a}{(2\pi)^N}d\omega_a\,d\hat\beta_a\,\hat\mu_a\,d\bar\eta_a\,d\eta_a\,\exp\bigg\{ + i\hat{\mathbf x}_a^T(w^k_a\mathbf v^k_a+\omega\mathbf x_a) + +\hat\beta(NE-\frac12w^k_aw^k_a) \\ + & +\bar\eta_a^T(\mathbf v^k_a(\mathbf v^k_a)^T+w^k_a\partial\partial V^k(\mathbf x_a)+\omega_a I)\eta_a + +\hat\mu_a(N\mu-(\mathbf v^k_a)^T\mathbf v^k_a-w^k_a\operatorname{Tr}\partial\partial V^k(\mathbf x_a)-N\omega_a) \\ + & +i\hat w^k_a(w^k_a-V^k(\mathbf x_a)) + +i(\hat{\mathbf v}^k_a)^T(\mathbf v^k_a-\partial V^k(\mathbf x_a)) + \bigg\} + \end{aligned} +\end{equation} +which is now linear in $V$. Averaging over $V$ yields, from only the terms that depend on it and to highest order in $N$, +\begin{equation} + -\frac12\left( + f(C_{ab})\hat w^k_a\hat w^k_b + +2f'(C_{ab})\hat w^k_a\frac{\mathbf x^T_a\hat{\mathbf v}^k_b}N + +f'(C_{ab})\frac{(\hat{\mathbf v}^k_a)^T\hat{\mathbf v}^k_b}N + +f''(C_{ab})\left(\frac{\mathbf x_a^T\hat{\mathbf v}^k_b}N\right)^2 + +f''(C_{ab})w^k_aw^k_bG_{ab}^2 + \right) +\end{equation} +The resulting integrand is Gaussian in the $w$, $\hat w$, $\mathbf y$, and $\hat{\mathbf y}$, with +\begin{equation} + \exp\left\{ + -\frac12\sum_{k=1}^M\sum_{ab}^n\begin{bmatrix}w_a^k\\\mathbf v_a^k\\\hat w_a^k\\\hat{\mathbf v}_a^k\end{bmatrix}^T + \begin{bmatrix} + \hat\beta_a\delta_{ab}+G_{ab}^2f''(C_{ab}) & -i\hat{\mathbf x}_a^T\delta_{ab} & -i\delta_{ab} & 0 \\ + -i\hat{\mathbf x}_a\delta_{ab} & 2(\hat\mu_a I-\bar\eta_a\eta_a^T)\delta_{ab} & 0 & -i\delta_{ab}I\\ + -i\delta_{ab} & 0 & f(C_{ab}) & \frac1Nf'(C_{ab})\mathbf x_a^T \\ + 0 & -i\delta_{ab}I & \frac1Nf'(C_{ab})\mathbf x_b & \frac1Nf'(C_{ab})+\frac1{N^2}f''(C_{ab})\mathbf x_a\mathbf x_b^T + \end{bmatrix} + \begin{bmatrix}w_b^k\\\mathbf v_b^k\\\hat w_b^k\\\hat{\mathbf v}_b^k\end{bmatrix} + \right\} +\end{equation} +which produces +\begin{equation} + \exp\left\{ + \frac M2\log\det\left( + I+\begin{bmatrix} + \hat\beta_a\delta_{ab}+G_{ab}^2f''(C_{ab}) & -i\hat{\mathbf x}_a^T\delta_{ab} \\ + -i\hat{\mathbf x}_a\delta_{ab} & 2(\hat\mu_a I-\bar\eta_a\eta_a^T)\delta_{ab} + \end{bmatrix} + \begin{bmatrix} + f(C_{ab})&\frac1Nf'(C_{ab})\mathbf x_a^T \\ + \frac1Nf'(C_{ab})\mathbf x_b & \frac1Nf'(C_{ab})+\frac1{N^2}f''(C_{ab})\mathbf x_a\mathbf x_b^T + \end{bmatrix} \right) + \right\} +\end{equation} +\begin{equation} + \log\det\left( + \begin{bmatrix} + (\hat\beta_a\delta_{ac}+G_{ac}^2f''(C_{ac}))f(C_{cb}) + R_{ab}f'(C_{ab}) + & + \frac1N\left[(\hat\beta_a\delta_{ac}+G_{ac}^2f''(C_{ac}))f'(C_{cb})+R_{ab}f''(C_{ab})\right]\mathbf x_b^T-\frac1Nf'(C_{ab})\hat{\mathbf x}_a^T + \\ + -i\hat{\mathbf x}_af(C_{ab})+\frac1N\hat\mu f'(C_{ab})\mathbf x_b + \end{bmatrix} + \right) \end{equation} -where $f:[-1,1]^r\to\mathbb R$ is an $r$-component function of the overlaps that defines the model. -\printbibliography +The condition fixing the maximum eigenvalue adds to the integrand +\begin{equation} + \frac12\beta\sum_b^{m_a}\mathbf s^T_b(\mathbf v^k_a(\mathbf v^k_a)^T+w^k_a\partial\partial V^k(\mathbf x_a)+\omega I)\mathbf s_b + +\frac12\hat\lambda\mathbf s_1^T(\mathbf v^k_a(\mathbf v^k_a)^T+w^k_a\partial\partial V^k(\mathbf x_a)+\omega I)\mathbf s_1 +\end{equation} +\end{widetext} + + +\bibliography{marginal} \end{document} |