From dedcfe0c9aa1ae79dc4d26c559f239571bb26394 Mon Sep 17 00:00:00 2001 From: Jaron Kent-Dobias Date: Thu, 14 Jan 2021 15:44:26 +0100 Subject: Updated W and dW to use correct constrained gradient. --- p-spin.hpp | 38 +++++++++++++------------------------- 1 file changed, 13 insertions(+), 25 deletions(-) diff --git a/p-spin.hpp b/p-spin.hpp index 3532556..91e0152 100644 --- a/p-spin.hpp +++ b/p-spin.hpp @@ -26,38 +26,26 @@ std::tuple hamGradHess(const Tensor& J, const Vector& z) return {hamiltonian, gradient, hessian}; } -std::tuple WdW(const Tensor& J, const Vector& z) { - /* - Vector gradient; - Matrix hessian; - std::tie(std::ignore, gradient, hessian) = hamGradHess(J, z); - - Scalar zGrad = gradient.transpose() * z; - double N = z.size(); - - Vector projGrad = gradient - (zGrad / N) * z; - Vector projGradConj = projGrad.conjugate(); - - Scalar zProjGrad = z.transpose() * projGradConj; - - double W = projGrad.norm(); - Vector dW = hessian * projGradConj - (zGrad * projGradConj + (z.transpose() * projGradConj) * (gradient + hessian * z)) / N; - */ +Vector project(const Vector& z, const Vector& x) { + Scalar xz = x.transpose() * z; + return x - (xz / z.squaredNorm()) * z.conjugate(); +} +std::tuple WdW(const Tensor& J, const Vector& z) { Vector dH; Matrix ddH; std::tie(std::ignore, dH, ddH) = hamGradHess(J, z); - double N = z.size(); - Scalar dHz = (Scalar)(dH.transpose() * z) / N; - - Vector pdH = dH - dHz * z; - Vector pdHc = pdH.conjugate(); + double a = z.squaredNorm(); + Vector dzdt = project(z, dH.conjugate()); - Scalar pdHcz = pdH.dot(z) / N; + Scalar A = (Scalar)(z.transpose() * dzdt) / a; + Scalar B = dH.dot(z) / a; - double W = pdH.squaredNorm(); - Vector dW = ddH * (pdHc - pdHcz * z) - (dHz * pdHc + pdHcz * dH); + double W = dzdt.squaredNorm(); + Vector dW = ddH * (dzdt - A * z.conjugate()) + + 2 * (conj(A) * B * z).real() + - conj(B) * dzdt - conj(A) * dH.conjugate(); return {W, dW}; } -- cgit v1.2.3-70-g09d2