summaryrefslogtreecommitdiff
path: root/p-spin.hpp
diff options
context:
space:
mode:
authorJaron Kent-Dobias <jaron@kent-dobias.com>2021-01-07 11:23:20 +0100
committerJaron Kent-Dobias <jaron@kent-dobias.com>2021-01-07 11:23:20 +0100
commit2316044fd02bf22b5b6c0f414874dada2c7603e4 (patch)
tree4a44c9324a58bfa4d297e254aaf2cc9e63473663 /p-spin.hpp
parent71e7c3a86a8ea99045f564a52535ed08c4172451 (diff)
downloadcode-2316044fd02bf22b5b6c0f414874dada2c7603e4.tar.gz
code-2316044fd02bf22b5b6c0f414874dada2c7603e4.tar.bz2
code-2316044fd02bf22b5b6c0f414874dada2c7603e4.zip
Implemented some lazy optimizations and C++17isms.
Diffstat (limited to 'p-spin.hpp')
-rw-r--r--p-spin.hpp21
1 files changed, 14 insertions, 7 deletions
diff --git a/p-spin.hpp b/p-spin.hpp
index b90d80b..16e0a56 100644
--- a/p-spin.hpp
+++ b/p-spin.hpp
@@ -19,12 +19,13 @@ using Tensor = Eigen::Tensor<Scalar, PSPIN_P>;
std::tuple<Scalar, Vector, Matrix> hamGradHess(const Tensor& J, const Vector& z) {
Matrix Jz = contractDown(J, z); // Contracts J into p - 2 copies of z.
Vector Jzz = Jz * z;
+ Scalar Jzzz = Jzz.transpose() * z;
- double f = factorial(p);
+ double pBang = factorial(p);
- Matrix hessian = ((p - 1) * p / f) * Jz;
- Vector gradient = (p / f) * Jzz;
- Scalar hamiltonian = (1 / f) * Jzz.transpose() * z;
+ Matrix hessian = ((p - 1) * p / pBang) * Jz;
+ Vector gradient = (p / pBang) * Jzz;
+ Scalar hamiltonian = Jzzz / pBang;
return {hamiltonian, gradient, hessian};
}
@@ -34,10 +35,16 @@ std::tuple<double, Vector> WdW(const Tensor& J, const Vector& z) {
Matrix hessian;
std::tie(std::ignore, gradient, hessian) = hamGradHess(J, z);
- Vector projectedGradient = (gradient - ((gradient.transpose() * z) * z / (double)z.size())).conjugate();
+ Scalar zGrad = gradient.transpose() * z;
+ double N = z.size();
- double W = projectedGradient.cwiseAbs2().sum();
- Vector dW = hessian * projectedGradient - ((z.transpose() * gradient) * projectedGradient + (z.transpose() * projectedGradient) * (gradient + hessian * z)) / (double)z.size();
+ Vector projGrad = gradient - (zGrad / N) * z;
+ Vector projGradConj = projGrad.conjugate();
+
+ Scalar zProjGrad = z.transpose() * projGradConj;
+
+ double W = projGrad.norm();
+ Vector dW = hessian * (projGradConj - (zProjGrad / N) * z) - (zGrad * projGradConj + zProjGrad * gradient) / N;
return {W, dW};
}