summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJaron Kent-Dobias <jaron@kent-dobias.com>2021-01-08 18:04:16 +0100
committerJaron Kent-Dobias <jaron@kent-dobias.com>2021-01-08 18:04:16 +0100
commit3279288783f64e8a8e8fb6394d66a23f49899869 (patch)
tree6bb2d0c163aa3eaf168268ea41a97967af8394ad
parent90b863e24596772d492ec2a344b4f9f7fc1680db (diff)
downloadcode-3279288783f64e8a8e8fb6394d66a23f49899869.tar.gz
code-3279288783f64e8a8e8fb6394d66a23f49899869.tar.bz2
code-3279288783f64e8a8e8fb6394d66a23f49899869.zip
Fixed some bugs, and made the simulation catch errors correctly.
-rw-r--r--langevin.cpp29
-rw-r--r--p-spin.hpp19
2 files changed, 38 insertions, 10 deletions
diff --git a/langevin.cpp b/langevin.cpp
index f494809..cf61b85 100644
--- a/langevin.cpp
+++ b/langevin.cpp
@@ -26,6 +26,12 @@ Vector randomVector(unsigned N, Distribution d, Generator& r) {
return z;
}
+class gradientDescentStallException: public std::exception {
+ virtual const char* what() const throw() {
+ return "Gradient descent stalled.";
+ }
+} gradientDescentStall;
+
std::tuple<double, Vector> gradientDescent(const Tensor& J, const Vector& z0, double ε, double γ0 = 1, double δγ = 2) {
Vector z = z0;
double γ = γ0;
@@ -46,9 +52,8 @@ std::tuple<double, Vector> gradientDescent(const Tensor& J, const Vector& z0, do
γ /= δγ;
}
- if (γ < 1e-15) {
- std::cerr << "Gradient descent stalled." << std::endl;
- exit(1);
+ if (γ < 1e-50) {
+ throw gradientDescentStall;
}
}
@@ -182,13 +187,19 @@ int main(int argc, char* argv[]) {
Vector z = zSaddle;
for (unsigned i = 0; i < n; i++) {
std::tie(W, z) = langevin(J, z, T, γ, M, r);
- Vector zNewSaddle = findSaddle(J, z, ε);
- Scalar H;
- Matrix ddH;
- std::tie(H, std::ignore, ddH) = hamGradHess(J, zNewSaddle);
- Eigen::SelfAdjointEigenSolver<Matrix> es(ddH.adjoint() * ddH);
- std::cout << (zNewSaddle - zSaddle).norm() << " " << real(H) << " " << imag(H) << " " << es.eigenvalues().transpose() << std::endl;
+ try {
+ Vector zNewSaddle = findSaddle(J, z, ε);
+ Scalar H;
+ Matrix ddH;
+ std::tie(H, std::ignore, ddH) = hamGradHess(J, zNewSaddle);
+ Eigen::SelfAdjointEigenSolver<Matrix> es(ddH.adjoint() * ddH);
+ std::cout << N << "\t" << M * (i+1) << "\t" << H << "\t"
+ << zNewSaddle.transpose() << "\t" << es.eigenvalues().transpose()
+ << std::endl;
std::cerr << M * (i+1) << " steps taken to move " << (zNewSaddle - zSaddle).norm() << ", saddle information saved." << std::endl;
+ } catch (std::exception& e) {
+ std::cerr << "Could not find a saddle: " << e.what() << std::endl;
+ }
}
return 0;
diff --git a/p-spin.hpp b/p-spin.hpp
index 83e4e39..3532556 100644
--- a/p-spin.hpp
+++ b/p-spin.hpp
@@ -27,6 +27,7 @@ std::tuple<Scalar, Vector, Matrix> hamGradHess(const Tensor& J, const Vector& z)
}
std::tuple<double, Vector> WdW(const Tensor& J, const Vector& z) {
+ /*
Vector gradient;
Matrix hessian;
std::tie(std::ignore, gradient, hessian) = hamGradHess(J, z);
@@ -40,7 +41,23 @@ std::tuple<double, Vector> WdW(const Tensor& J, const Vector& z) {
Scalar zProjGrad = z.transpose() * projGradConj;
double W = projGrad.norm();
- Vector dW = hessian * (projGradConj - (zProjGrad / N) * z) - (zGrad * projGradConj + zProjGrad * gradient) / N;
+ Vector dW = hessian * projGradConj - (zGrad * projGradConj + (z.transpose() * projGradConj) * (gradient + hessian * z)) / N;
+ */
+
+ Vector dH;
+ Matrix ddH;
+ std::tie(std::ignore, dH, ddH) = hamGradHess(J, z);
+
+ double N = z.size();
+ Scalar dHz = (Scalar)(dH.transpose() * z) / N;
+
+ Vector pdH = dH - dHz * z;
+ Vector pdHc = pdH.conjugate();
+
+ Scalar pdHcz = pdH.dot(z) / N;
+
+ double W = pdH.squaredNorm();
+ Vector dW = ddH * (pdHc - pdHcz * z) - (dHz * pdHc + pdHcz * dH);
return {W, dW};
}