summaryrefslogtreecommitdiff
path: root/langevin.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'langevin.cpp')
-rw-r--r--langevin.cpp38
1 files changed, 14 insertions, 24 deletions
diff --git a/langevin.cpp b/langevin.cpp
index 49efab4..e5ed0b8 100644
--- a/langevin.cpp
+++ b/langevin.cpp
@@ -69,41 +69,36 @@ Vector findSaddle(const Tensor& J, const Vector& z0, double ε, double δW, doub
double W;
std::tie(W, std::ignore) = WdW(J, z0);
- Vector ζ = euclideanToStereographic(z0);
+ Vector z = z0;
+ Vector ζ = euclideanToStereographic(z);
+
Vector dH;
Matrix ddH;
- std::tie(std::ignore, dH, ddH) = stereographicHamGradHess(J, ζ);
-
- unsigned steps = 0;
- unsigned gradSteps = 0;
+ std::tie(std::ignore, dH, ddH) = stereographicHamGradHess(J, ζ, z);
while (W > ε) {
- // ddH is complex symmetric, which is (almost always) invertible
+ // ddH is complex symmetric, which is (almost always) invertible, so a
+ // partial pivot LU decomposition can be used.
Vector dζ = ddH.partialPivLu().solve(dH);
Vector ζNew = ζ - dζ;
+ Vector zNew = stereographicToEuclidean(ζNew);
double WNew;
- std::tie(WNew, std::ignore) = WdW(J, stereographicToEuclidean(ζNew));
+ std::tie(WNew, std::ignore) = WdW(J, zNew);
if (WNew < W) { // If Newton's step lowered the objective, accept it!
ζ = ζNew;
+ z = zNew;
W = WNew;
} else { // Otherwise, do gradient descent until W is a factor δW smaller.
- Vector z;
- std::tie(W, z) = gradientDescent(J, stereographicToEuclidean(ζ), γ0, W / δW);
+ std::tie(W, z) = gradientDescent(J, z, γ0, W / δW);
ζ = euclideanToStereographic(z);
- gradSteps++;
}
- std::tie(std::ignore, dH, ddH) = stereographicHamGradHess(J, ζ);
- steps++;
-
- if (steps % 100 == 0) {
- std::cerr << steps << " minimization steps, W is " << W << " " << gradSteps << "." << std::endl;
- }
+ std::tie(std::ignore, dH, ddH) = stereographicHamGradHess(J, ζ, z);
}
- return stereographicToEuclidean(ζ);
+ return z;
}
std::tuple<double, Vector> langevin(const Tensor& J, const Vector& z0, double T, double γ, unsigned N, Rng& r) {
@@ -189,13 +184,7 @@ int main(int argc, char* argv[]) {
Rng r;
Tensor J = generateCouplings<Scalar, PSPIN_P>(N, complex_normal_distribution<>(0, σ, κ), r.engine());
- Vector z0 = randomVector(N, complex_normal_distribution<>(0, 1, 0), r.engine());
- z0 *= sqrt(N) / sqrt((Scalar)(z0.transpose() * z0)); // Normalize.
-
- std::function<bool(double, unsigned)> f = [δ](double W, unsigned) {
- std::cout << W << std::endl;
- return W < δ;
- };
+ Vector z0 = normalize(randomVector(N, complex_normal_distribution<>(0, 1, 0), r.engine()));
Vector zSaddle = findSaddle(J, z0, ε, δ, γ);
@@ -209,6 +198,7 @@ int main(int argc, char* argv[]) {
std::tie(H, std::ignore, ddH) = hamGradHess(J, zNewSaddle);
Eigen::SelfAdjointEigenSolver<Matrix> es(ddH.adjoint() * ddH);
std::cout << (zNewSaddle - zSaddle).norm() << " " << real(H) << " " << imag(H) << " " << es.eigenvalues().transpose() << std::endl;
+ std::cerr << M * (i+1) << " steps taken to move " << (zNewSaddle - zSaddle).norm() << ", saddle information saved." << std::endl;
}
return 0;