diff options
author | Jaron Kent-Dobias <jaron@kent-dobias.com> | 2021-02-17 15:46:00 +0100 |
---|---|---|
committer | Jaron Kent-Dobias <jaron@kent-dobias.com> | 2021-02-17 15:46:00 +0100 |
commit | 95df02c90a455c2e539e795acb1921b688e8bc66 (patch) | |
tree | 7af564d926b6d233432029c030f25f605c144f68 /stokes.hpp | |
parent | 81cc0094a2c7478144702e1532bcb067faaebf26 (diff) | |
download | code-95df02c90a455c2e539e795acb1921b688e8bc66.tar.gz code-95df02c90a455c2e539e795acb1921b688e8bc66.tar.bz2 code-95df02c90a455c2e539e795acb1921b688e8bc66.zip |
Algorithmic tweaks to rope relaxation now suceeds in finding Stokes lines relatively quickly.
Diffstat (limited to 'stokes.hpp')
-rw-r--r-- | stokes.hpp | 73 |
1 files changed, 57 insertions, 16 deletions
@@ -1,6 +1,4 @@ #include "p-spin.hpp" -#include <iostream> -#include "dynamics.hpp" template <class Scalar> Vector<Scalar> zDot(const Vector<Scalar>& z, const Vector<Scalar>& dH) { @@ -13,6 +11,12 @@ double segmentCost(const Vector<Scalar>& z, const Vector<Scalar>& dz, const Vect return 1.0 - pow(real(zD.dot(dz)), 2) / zD.squaredNorm() / dz.squaredNorm(); } +class ropeRelaxationStallException: public std::exception { + virtual const char* what() const throw() { + return "Gradient descent stalled."; + } +}; + template <class Scalar> Vector<Scalar> variation(const Vector<Scalar>& z, const Vector<Scalar>& z´, const Vector<Scalar>& z´´, const Vector<Scalar>& dH, const Matrix<Scalar>& ddH) { double z² = z.squaredNorm(); @@ -78,29 +82,58 @@ class Rope { } template <int p> - void perturb(const Tensor<Scalar, p>& J, double δ) { + std::vector<Vector<Scalar>> δz(const Tensor<Scalar, p>& J) const { + std::vector<Vector<Scalar>> δz(z.size()); + +#pragma omp parallel for for (unsigned i = 1; i < z.size() - 1; i++) { Vector<Scalar> dH; Matrix<Scalar> ddH; std::tie(std::ignore, dH, ddH) = hamGradHess(J, z[i]); - Vector<Scalar> δz = variation(z[i], this->dz(i), this->ddz(i), dH, ddH); + δz[i] = variation(z[i], this->dz(i), this->ddz(i), dH, ddH); + } - z[i] -= δ * δz.conjugate(); - z[i] = normalize(z[i]); + return δz; + } + + template <int p> + double perturb(const Tensor<Scalar, p>& J, double δ0) { + std::vector<Vector<Scalar>> Δz = this->δz(J); + + Rope rNew = *this; + double δ = δ0; + + while (rNew.cost(J) >= this->cost(J)) { + for (unsigned i = 1; i < z.size() - 1; i++) { + rNew.z[i] = z[i] - δ * Δz[i].conjugate(); + rNew.z[i] = normalize(rNew.z[i]); + } + + δ /= 2; + + if (δ < 1e-30) { + throw ropeRelaxationStallException(); + } } + + z = rNew.z; + + return δ; } void spread() { double l = 0; - for (unsigned i= 0; i < z.size() - 1; i++) { + for (unsigned i = 0; i < z.size() - 1; i++) { l += (z[i + 1] - z[i]).norm(); } double a = 0; unsigned pos = 0; + std::vector<Vector<Scalar>> zNew = z; + for (unsigned i = 1; i < z.size() - 1; i++) { double b = i * l / (z.size() - 1); @@ -111,15 +144,19 @@ class Rope { Vector<Scalar> δz = z[pos] - z[pos - 1]; - z[i] = z[pos] - (a - b) / δz.norm() * δz; - z[i] = normalize(z[i]); + zNew[i] = z[pos] - (a - b) / δz.norm() * δz; + zNew[i] = normalize(zNew[i]); } + + z = zNew; } template <int p> - void step(const Tensor<Scalar, p>& J, double δ) { - this->perturb(J, δ); + double step(const Tensor<Scalar, p>& J, double δ) { + double δNew = this->perturb(J, δ); this->spread(); + + return δNew; } template <int p> @@ -137,21 +174,25 @@ class Rope { } template <int p> - void relax(const Tensor<Scalar, p>& J, unsigned N, double δ) { - for (unsigned i = 0; i < N; i++) { - this->step(J, δ); + void relax(const Tensor<Scalar, p>& J, unsigned N, double δ0) { + double δ = δ0; + try { + for (unsigned i = 0; i < N; i++) { + δ = 2 * this->step(J, δ); + } + } catch (std::exception& e) { } } Rope<Scalar> interpolate() const { - Rope<Scalar> r(2 * z.size() - 1, z[0], z[z.size() - 1]); + Rope<Scalar> r(2 * (z.size() - 2) + 1, z[0], z[z.size() - 1]); for (unsigned i = 0; i < z.size(); i++) { r.z[2 * i] = z[i]; } for (unsigned i = 0; i < z.size() - 1; i++) { - r.z[2 * i + 1] = (z[i] + z[i + 1]) / 2.0; + r.z[2 * i + 1] = normalize(((z[i] + z[i + 1]) / 2.0).eval()); } return r; |