From 6d48942d7bdb2a015f2a52e7a290a7a9efb17eec Mon Sep 17 00:00:00 2001 From: Jaron Kent-Dobias Date: Wed, 6 Jan 2021 18:17:58 +0100 Subject: Cleaned up code and removed unused portions. --- langevin.cpp | 38 ++++++++++++++------------------------ 1 file changed, 14 insertions(+), 24 deletions(-) (limited to 'langevin.cpp') diff --git a/langevin.cpp b/langevin.cpp index 49efab4..e5ed0b8 100644 --- a/langevin.cpp +++ b/langevin.cpp @@ -69,41 +69,36 @@ Vector findSaddle(const Tensor& J, const Vector& z0, double ε, double δW, doub double W; std::tie(W, std::ignore) = WdW(J, z0); - Vector ζ = euclideanToStereographic(z0); + Vector z = z0; + Vector ζ = euclideanToStereographic(z); + Vector dH; Matrix ddH; - std::tie(std::ignore, dH, ddH) = stereographicHamGradHess(J, ζ); - - unsigned steps = 0; - unsigned gradSteps = 0; + std::tie(std::ignore, dH, ddH) = stereographicHamGradHess(J, ζ, z); while (W > ε) { - // ddH is complex symmetric, which is (almost always) invertible + // ddH is complex symmetric, which is (almost always) invertible, so a + // partial pivot LU decomposition can be used. Vector dζ = ddH.partialPivLu().solve(dH); Vector ζNew = ζ - dζ; + Vector zNew = stereographicToEuclidean(ζNew); double WNew; - std::tie(WNew, std::ignore) = WdW(J, stereographicToEuclidean(ζNew)); + std::tie(WNew, std::ignore) = WdW(J, zNew); if (WNew < W) { // If Newton's step lowered the objective, accept it! ζ = ζNew; + z = zNew; W = WNew; } else { // Otherwise, do gradient descent until W is a factor δW smaller. - Vector z; - std::tie(W, z) = gradientDescent(J, stereographicToEuclidean(ζ), γ0, W / δW); + std::tie(W, z) = gradientDescent(J, z, γ0, W / δW); ζ = euclideanToStereographic(z); - gradSteps++; } - std::tie(std::ignore, dH, ddH) = stereographicHamGradHess(J, ζ); - steps++; - - if (steps % 100 == 0) { - std::cerr << steps << " minimization steps, W is " << W << " " << gradSteps << "." << std::endl; - } + std::tie(std::ignore, dH, ddH) = stereographicHamGradHess(J, ζ, z); } - return stereographicToEuclidean(ζ); + return z; } std::tuple langevin(const Tensor& J, const Vector& z0, double T, double γ, unsigned N, Rng& r) { @@ -189,13 +184,7 @@ int main(int argc, char* argv[]) { Rng r; Tensor J = generateCouplings(N, complex_normal_distribution<>(0, σ, κ), r.engine()); - Vector z0 = randomVector(N, complex_normal_distribution<>(0, 1, 0), r.engine()); - z0 *= sqrt(N) / sqrt((Scalar)(z0.transpose() * z0)); // Normalize. - - std::function f = [δ](double W, unsigned) { - std::cout << W << std::endl; - return W < δ; - }; + Vector z0 = normalize(randomVector(N, complex_normal_distribution<>(0, 1, 0), r.engine())); Vector zSaddle = findSaddle(J, z0, ε, δ, γ); @@ -209,6 +198,7 @@ int main(int argc, char* argv[]) { std::tie(H, std::ignore, ddH) = hamGradHess(J, zNewSaddle); Eigen::SelfAdjointEigenSolver es(ddH.adjoint() * ddH); std::cout << (zNewSaddle - zSaddle).norm() << " " << real(H) << " " << imag(H) << " " << es.eigenvalues().transpose() << std::endl; + std::cerr << M * (i+1) << " steps taken to move " << (zNewSaddle - zSaddle).norm() << ", saddle information saved." << std::endl; } return 0; -- cgit v1.2.3-70-g09d2