diff options
author | Jaron Kent-Dobias <jaron@kent-dobias.com> | 2021-01-07 11:23:20 +0100 |
---|---|---|
committer | Jaron Kent-Dobias <jaron@kent-dobias.com> | 2021-01-07 11:23:20 +0100 |
commit | 2316044fd02bf22b5b6c0f414874dada2c7603e4 (patch) | |
tree | 4a44c9324a58bfa4d297e254aaf2cc9e63473663 | |
parent | 71e7c3a86a8ea99045f564a52535ed08c4172451 (diff) | |
download | code-2316044fd02bf22b5b6c0f414874dada2c7603e4.tar.gz code-2316044fd02bf22b5b6c0f414874dada2c7603e4.tar.bz2 code-2316044fd02bf22b5b6c0f414874dada2c7603e4.zip |
Implemented some lazy optimizations and C++17isms.
-rw-r--r-- | langevin.cpp | 19 | ||||
-rw-r--r-- | p-spin.hpp | 21 | ||||
-rw-r--r-- | stereographic.hpp | 6 | ||||
-rw-r--r-- | tensor.hpp | 8 |
4 files changed, 26 insertions, 28 deletions
diff --git a/langevin.cpp b/langevin.cpp index 1b328b6..b67d358 100644 --- a/langevin.cpp +++ b/langevin.cpp @@ -17,7 +17,7 @@ using Rng = randutils::random_generator<pcg32>; Vector normalize(const Vector& z) { - return z * sqrt(z.size()) / sqrt((Scalar)(z.transpose() * z)); + return z * sqrt((double)z.size() / (Scalar)(z.transpose() * z)); } template <class Distribution, class Generator> @@ -33,19 +33,14 @@ Vector randomVector(unsigned N, Distribution d, Generator& r) { std::tuple<double, Vector> gradientDescent(const Tensor& J, const Vector& z0, double ε, double γ0 = 1, double δγ = 2) { Vector z = z0; - - double W; - Vector dW; - std::tie(W, dW) = WdW(J, z); - double γ = γ0; + auto [W, dW] = WdW(J, z); + while (W > ε) { Vector zNew = normalize(z - γ * dW.conjugate()); - double WNew; - Vector dWNew; - std::tie(WNew, dWNew) = WdW(J, zNew); + auto [WNew, dWNew] = WdW(J, zNew); if (WNew < W) { // If the step lowered the objective, accept it! z = zNew; @@ -66,12 +61,12 @@ std::tuple<double, Vector> gradientDescent(const Tensor& J, const Vector& z0, do } Vector findSaddle(const Tensor& J, const Vector& z0, double ε, double δW = 2, double γ0 = 1, double δγ = 2) { - double W; - std::tie(W, std::ignore) = WdW(J, z0); - Vector z = z0; Vector ζ = euclideanToStereographic(z); + double W; + std::tie(W, std::ignore) = WdW(J, z); + Vector dH; Matrix ddH; std::tie(std::ignore, dH, ddH) = stereographicHamGradHess(J, ζ, z); @@ -19,12 +19,13 @@ using Tensor = Eigen::Tensor<Scalar, PSPIN_P>; std::tuple<Scalar, Vector, Matrix> hamGradHess(const Tensor& J, const Vector& z) { Matrix Jz = contractDown(J, z); // Contracts J into p - 2 copies of z. Vector Jzz = Jz * z; + Scalar Jzzz = Jzz.transpose() * z; - double f = factorial(p); + double pBang = factorial(p); - Matrix hessian = ((p - 1) * p / f) * Jz; - Vector gradient = (p / f) * Jzz; - Scalar hamiltonian = (1 / f) * Jzz.transpose() * z; + Matrix hessian = ((p - 1) * p / pBang) * Jz; + Vector gradient = (p / pBang) * Jzz; + Scalar hamiltonian = Jzzz / pBang; return {hamiltonian, gradient, hessian}; } @@ -34,10 +35,16 @@ std::tuple<double, Vector> WdW(const Tensor& J, const Vector& z) { Matrix hessian; std::tie(std::ignore, gradient, hessian) = hamGradHess(J, z); - Vector projectedGradient = (gradient - ((gradient.transpose() * z) * z / (double)z.size())).conjugate(); + Scalar zGrad = gradient.transpose() * z; + double N = z.size(); - double W = projectedGradient.cwiseAbs2().sum(); - Vector dW = hessian * projectedGradient - ((z.transpose() * gradient) * projectedGradient + (z.transpose() * projectedGradient) * (gradient + hessian * z)) / (double)z.size(); + Vector projGrad = gradient - (zGrad / N) * z; + Vector projGradConj = projGrad.conjugate(); + + Scalar zProjGrad = z.transpose() * projGradConj; + + double W = projGrad.norm(); + Vector dW = hessian * (projGradConj - (zProjGrad / N) * z) - (zGrad * projGradConj + zProjGrad * gradient) / N; return {W, dW}; } diff --git a/stereographic.hpp b/stereographic.hpp index 8313f25..61d81c5 100644 --- a/stereographic.hpp +++ b/stereographic.hpp @@ -51,11 +51,7 @@ Matrix stereographicJacobian(const Vector& ζ) { } std::tuple<Scalar, Vector, Matrix> stereographicHamGradHess(const Tensor& J, const Vector& ζ, const Vector& z) { - Scalar hamiltonian; - Vector gradZ; - Matrix hessZ; - std::tie(hamiltonian, gradZ, hessZ) = hamGradHess(J, z); - + auto [hamiltonian, gradZ, hessZ] = hamGradHess(J, z); Matrix jacobian = stereographicJacobian(ζ); Matrix metric = jacobian * jacobian.adjoint(); @@ -55,12 +55,12 @@ contractDown(const Eigen::Tensor<Scalar, 2>& J, const Eigen::Matrix<Scalar, Eige return Eigen::Map<const Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>>(J.data(), z.size(), z.size()); } +const std::array<Eigen::IndexPair<int>, 1> ip00 = {Eigen::IndexPair<int>(0, 0)}; + template <class Scalar, int r> Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> contractDown(const Eigen::Tensor<Scalar, r>& J, const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& z) { - Eigen::Tensor<Scalar, 1> zT = - Eigen::TensorMap<Eigen::Tensor<const Scalar, 1>>(z.data(), {z.size()}); - std::array<Eigen::IndexPair<int>, 1> ip = {Eigen::IndexPair<int>(0, 0)}; - Eigen::Tensor<Scalar, r - 1> Jz = J.contract(zT, ip); + Eigen::Tensor<Scalar, 1> zT = Eigen::TensorMap<Eigen::Tensor<const Scalar, 1>>(z.data(), {z.size()}); + Eigen::Tensor<Scalar, r - 1> Jz = J.contract(zT, ip00); return contractDown(Jz, z); } |