From 2316044fd02bf22b5b6c0f414874dada2c7603e4 Mon Sep 17 00:00:00 2001 From: Jaron Kent-Dobias Date: Thu, 7 Jan 2021 11:23:20 +0100 Subject: Implemented some lazy optimizations and C++17isms. --- p-spin.hpp | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) (limited to 'p-spin.hpp') diff --git a/p-spin.hpp b/p-spin.hpp index b90d80b..16e0a56 100644 --- a/p-spin.hpp +++ b/p-spin.hpp @@ -19,12 +19,13 @@ using Tensor = Eigen::Tensor; std::tuple hamGradHess(const Tensor& J, const Vector& z) { Matrix Jz = contractDown(J, z); // Contracts J into p - 2 copies of z. Vector Jzz = Jz * z; + Scalar Jzzz = Jzz.transpose() * z; - double f = factorial(p); + double pBang = factorial(p); - Matrix hessian = ((p - 1) * p / f) * Jz; - Vector gradient = (p / f) * Jzz; - Scalar hamiltonian = (1 / f) * Jzz.transpose() * z; + Matrix hessian = ((p - 1) * p / pBang) * Jz; + Vector gradient = (p / pBang) * Jzz; + Scalar hamiltonian = Jzzz / pBang; return {hamiltonian, gradient, hessian}; } @@ -34,10 +35,16 @@ std::tuple WdW(const Tensor& J, const Vector& z) { Matrix hessian; std::tie(std::ignore, gradient, hessian) = hamGradHess(J, z); - Vector projectedGradient = (gradient - ((gradient.transpose() * z) * z / (double)z.size())).conjugate(); + Scalar zGrad = gradient.transpose() * z; + double N = z.size(); - double W = projectedGradient.cwiseAbs2().sum(); - Vector dW = hessian * projectedGradient - ((z.transpose() * gradient) * projectedGradient + (z.transpose() * projectedGradient) * (gradient + hessian * z)) / (double)z.size(); + Vector projGrad = gradient - (zGrad / N) * z; + Vector projGradConj = projGrad.conjugate(); + + Scalar zProjGrad = z.transpose() * projGradConj; + + double W = projGrad.norm(); + Vector dW = hessian * (projGradConj - (zProjGrad / N) * z) - (zGrad * projGradConj + zProjGrad * gradient) / N; return {W, dW}; } -- cgit v1.2.3-54-g00ecf