diff options
author | Jaron Kent-Dobias <jaron@kent-dobias.com> | 2021-01-08 18:04:16 +0100 |
---|---|---|
committer | Jaron Kent-Dobias <jaron@kent-dobias.com> | 2021-01-08 18:04:16 +0100 |
commit | 3279288783f64e8a8e8fb6394d66a23f49899869 (patch) | |
tree | 6bb2d0c163aa3eaf168268ea41a97967af8394ad /p-spin.hpp | |
parent | 90b863e24596772d492ec2a344b4f9f7fc1680db (diff) | |
download | code-3279288783f64e8a8e8fb6394d66a23f49899869.tar.gz code-3279288783f64e8a8e8fb6394d66a23f49899869.tar.bz2 code-3279288783f64e8a8e8fb6394d66a23f49899869.zip |
Fixed some bugs, and made the simulation catch errors correctly.
Diffstat (limited to 'p-spin.hpp')
-rw-r--r-- | p-spin.hpp | 19 |
1 files changed, 18 insertions, 1 deletions
@@ -27,6 +27,7 @@ std::tuple<Scalar, Vector, Matrix> hamGradHess(const Tensor& J, const Vector& z) } std::tuple<double, Vector> WdW(const Tensor& J, const Vector& z) { + /* Vector gradient; Matrix hessian; std::tie(std::ignore, gradient, hessian) = hamGradHess(J, z); @@ -40,7 +41,23 @@ std::tuple<double, Vector> WdW(const Tensor& J, const Vector& z) { Scalar zProjGrad = z.transpose() * projGradConj; double W = projGrad.norm(); - Vector dW = hessian * (projGradConj - (zProjGrad / N) * z) - (zGrad * projGradConj + zProjGrad * gradient) / N; + Vector dW = hessian * projGradConj - (zGrad * projGradConj + (z.transpose() * projGradConj) * (gradient + hessian * z)) / N; + */ + + Vector dH; + Matrix ddH; + std::tie(std::ignore, dH, ddH) = hamGradHess(J, z); + + double N = z.size(); + Scalar dHz = (Scalar)(dH.transpose() * z) / N; + + Vector pdH = dH - dHz * z; + Vector pdHc = pdH.conjugate(); + + Scalar pdHcz = pdH.dot(z) / N; + + double W = pdH.squaredNorm(); + Vector dW = ddH * (pdHc - pdHcz * z) - (dHz * pdHc + pdHcz * dH); return {W, dW}; } |