summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJaron Kent-Dobias <jaron@kent-dobias.com>2021-11-08 23:31:58 +0100
committerJaron Kent-Dobias <jaron@kent-dobias.com>2021-11-08 23:31:58 +0100
commit1369382dcab07078e48e929e76802cdc4a1ceeca (patch)
tree4d547853acc18e0f7ce8682817632cf1d5e0ef3d
parent54416465850a9280a121127e0b5301d97ad13a61 (diff)
downloadcode-1369382dcab07078e48e929e76802cdc4a1ceeca.tar.gz
code-1369382dcab07078e48e929e76802cdc4a1ceeca.tar.bz2
code-1369382dcab07078e48e929e76802cdc4a1ceeca.zip
Removed a lot more old code, including old Rope class.
-rw-r--r--langevin.cpp6
-rw-r--r--stokes.hpp280
2 files changed, 3 insertions, 283 deletions
diff --git a/langevin.cpp b/langevin.cpp
index 9aec7c8..86aeb9e 100644
--- a/langevin.cpp
+++ b/langevin.cpp
@@ -1,9 +1,9 @@
#include <getopt.h>
#include <limits>
-#include <stereographic.hpp>
#include <unordered_map>
#include <list>
+#include "Eigen/Dense"
#include "Eigen/src/Eigenvalues/ComplexEigenSolver.h"
#include "Eigen/src/Eigenvalues/SelfAdjointEigenSolver.h"
#include "complex_normal.hpp"
@@ -196,8 +196,8 @@ int main(int argc, char* argv[]) {
J = exp(Complex(0, φ)) * J;
- Cord test(J, zSaddle, zSaddleNext, 5);
- test.relaxNewton(J, 25, 1, 1e4);
+ Cord test(J, zSaddle, zSaddleNext, 3);
+ test.relaxNewton(J, 10, 1, 1e4);
std::cout << test.z0.transpose() << std::endl;
std::cout << test.z1.transpose() << std::endl;
diff --git a/stokes.hpp b/stokes.hpp
index cf68d9b..b3ae90b 100644
--- a/stokes.hpp
+++ b/stokes.hpp
@@ -4,232 +4,6 @@
#include "complex_normal.hpp"
#include "dynamics.hpp"
-#include <iostream>
-
-class ropeRelaxationStallException: public std::exception {
- virtual const char* what() const throw() {
- return "Gradient descent stalled.";
- }
-};
-
-template <class Scalar>
-class Rope {
- public:
- std::vector<Vector<Scalar>> z;
-
- Rope(unsigned N) : z(N + 2) {}
-
- template <int p>
- Rope(unsigned N, const Vector<Scalar>& z1, const Vector<Scalar>& z2, const Tensor<Scalar, p>& J) : z(N + 2) {
- Scalar H1, H2;
- std::tie(H1, std::ignore, std::ignore) = hamGradHess(J, z1);
- std::tie(H2, std::ignore, std::ignore) = hamGradHess(J, z2);
-
- if (real(H1) > real(H2)) {
- z.front() = z1;
- z.back() = z2;
- } else {
- z.front() = z2;
- z.back() = z1;
- }
-
- for (unsigned i = 0; i < N + 2; i++) {
- z[i] = normalize(z.front() + (z.back() - z.front()) * ((Real)i / (N + 1.0)));
- }
- }
-
- unsigned n() const {
- return z.size() - 2;
- }
-
- Real length() const {
- Real l = 0;
-
- for (unsigned i = 0; i < z.size() - 1; i++) {
- l += (z[i + 1] - z[i]).norm();
- }
-
- return l;
- }
-
- Vector<Scalar> dz(unsigned i) const {
- return z[i + 1] - z[i - 1];
- }
-
- template <int p>
- std::vector<Vector<Scalar>> generateDiscreteGradientδz(const Tensor<Scalar, p>& J, Real γ) const {
- std::vector<Vector<Scalar>> δz(z.size());
-
- std::vector<Vector<Scalar>> ż(z.size());
- std::vector<Vector<Scalar>> dH(z.size());
- std::vector<Matrix<Scalar>> ddH(z.size());
-
-#pragma omp parallel for
- for (unsigned i = 1; i < z.size() - 1; i++) {
- std::tie(std::ignore, dH[i], ddH[i]) = hamGradHess(J, z[i]);
- ż[i] = zDot(z[i], dH[i]);
- }
-
-#pragma omp parallel for
- for (unsigned i = 1; i < z.size() - 1; i++) {
- Real z² = z[i].squaredNorm();
- Matrix<Scalar> dż = (dH[i].conjugate() - (dH[i].dot(z[i]) / z²) * z[i].conjugate()) * z[i].adjoint() / z²;
- Matrix<Scalar> dżc = -ddH[i] + (ddH[i] * z[i].conjugate()) * z[i].transpose() / z²
- + (z[i].dot(dH[i]) / z²) * (
- Matrix<Scalar>::Identity(ddH[i].rows(), ddH[i].cols()) - z[i].conjugate() * z[i].transpose() / z²
- );
-
- Vector<Scalar> dC = -0.5 * (dżc * dz(i) + dż * dz(i).conjugate() - (dżc * ż[i] + dż * ż[i].conjugate()) * real(ż[i].dot(dz(i))) / ż[i].squaredNorm()) / (ż[i].norm() * dz(i).norm());
-
- if (i > 1) {
- dC += -0.5 * (ż[i - 1].conjugate() - dz(i - 1).conjugate() * real(ż[i - 1].dot(dz(i - 1))) / dz(i - 1).squaredNorm()) / (ż[i - 1].norm() * dz(i - 1).norm());
- }
-
- if (i < z.size() - 2) {
- dC += 0.5 * (ż[i + 1].conjugate() - dz(i + 1).conjugate() * real(ż[i + 1].dot(dz(i + 1))) / dz(i + 1).squaredNorm()) / (ż[i + 1].norm() * dz(i + 1).norm());
- }
-
- dC += γ * (2 * z[i] - z[i - 1] - z[i + 1]).conjugate();
-
- δz[i] = dC.conjugate();
-
- δz[i] -= z[i].conjugate() * z[i].conjugate().dot(δz[i]) / z²;
- }
-
- return δz;
- }
-
- void spread() {
- Real l = length();
-
- Real a = 0;
- unsigned pos = 0;
-
- std::vector<Vector<Scalar>> zNew = z;
-
- for (unsigned i = 1; i < z.size() - 1; i++) {
- Real b = i * l / (z.size() - 1);
-
- while (b > a) {
- pos++;
- a += (z[pos] - z[pos - 1]).norm();
- }
-
- Vector<Scalar> δz = z[pos] - z[pos - 1];
-
- zNew[i] = normalize(z[pos] - (a - b) / δz.norm() * δz);
- }
-
- z = zNew;
- }
-
- template<int p>
- Real perturb(const Tensor<Scalar, p>& J, Real δ₀, const std::vector<Vector<Scalar>>& δz, Real γ = 0) {
- Rope rNew = *this;
- Real δ = δ₀;
- // We rescale the step size by the distance between points.
- Real Δl = length() / (n() + 1);
-
- while (true) {
- for (unsigned i = 1; i < z.size() - 1; i++) {
- rNew.z[i] = normalize(z[i] - (δ * Δl) * δz[i]);
- }
-
- if (rNew.cost(J, γ) < cost(J, γ)) {
- break;
- } else {
- δ /= 2;
- }
-
- if (δ < 1e-15) {
- throw ropeRelaxationStallException();
- }
- }
-
-// rNew.spread();
-
- z = rNew.z;
-
- return δ;
- }
-
- template <int p>
- void relaxDiscreteGradient(const Tensor<Scalar, p>& J, unsigned N, Real δ0, Real γ) {
- Real δ = δ0;
- try {
- for (unsigned i = 0; i < N; i++) {
- std::vector<Vector<Scalar>> δz = generateDiscreteGradientδz(J, γ);
- double stepSize = 0;
- for (const Vector<Scalar>& v : δz) {
- stepSize += v.norm();
- }
- if (stepSize / δz.size() < 1e-6) {
- break;
- }
- std::cout << cost(J) << " " << stepSize / δz.size() << std::endl;
- δ = 2 * perturb(J, δ, δz, γ);
- }
- } catch (std::exception& e) {
- }
- }
-
- template <int p>
- Real cost(const Tensor<Scalar, p>& J, Real γ = 0) const {
- Real c = 0;
-
- for (unsigned i = 1; i < z.size() - 1; i++) {
- Vector<Scalar> dH;
- std::tie(std::ignore, dH, std::ignore) = hamGradHess(J, z[i]);
-
- Vector<Scalar> zD = zDot(z[i], dH);
-
- c += 1.0 - real(zD.dot(dz(i))) / zD.norm() / dz(i).norm();
- }
-
- for (unsigned i = 0; i < z.size() - 1; i++) {
- c += γ * (z[i + 1] - z[i]).squaredNorm();
- }
-
- return c;
- }
-
- Rope<Scalar> interpolate() const {
- Rope<Scalar> r(2 * n() + 1);
-
- for (unsigned i = 0; i < z.size(); i++) {
- r.z[2 * i] = z[i];
- }
-
- for (unsigned i = 0; i < z.size() - 1; i++) {
- r.z[2 * i + 1] = normalize(((z[i] + z[i + 1]) / 2.0));
- }
-
- return r;
- }
-};
-
-template <class Real, class Scalar, int p>
-bool stokesLineTest(const Tensor<Scalar, p>& J, const Vector<Scalar>& z1, const Vector<Scalar>& z2, unsigned n0, unsigned steps) {
- Rope stokes(n0, z1, z2, J);
-
- Real oldCost = stokes.cost(J);
-
- for (unsigned i = 0; i < steps; i++) {
- stokes.relaxDiscreteGradient(J, 1e6, 1, pow(2, steps));
-
- Real newCost = stokes.cost(J);
-
- if (newCost > oldCost) {
- return false;
- }
-
- oldCost = newCost;
-
- stokes = stokes.interpolate();
- }
- return true;
-}
-
template <class Scalar>
class Cord {
public:
@@ -510,66 +284,12 @@ public:
}
template <int p>
- std::pair<Real, Real> relaxStep(const Tensor<Scalar, p>& J, unsigned nt, Real δ₀) {
- std::vector<Vector<Scalar>> dgsTot(gs.size(), Vector<Scalar>::Zero(z0.size()));
-
- for (unsigned i = 0; i < nt; i++) {
- Real t = (i + 1.0) / (nt + 1.0);
- std::vector<Vector<Scalar>> dgsI = dgs(J, t);
-
- for (unsigned j = 0; j < gs.size(); j++) {
- dgsTot[j] += dgsI[j] / nt;
- }
- }
-
- Real stepSize = 0;
- for (const Vector<Scalar>& dgi : dgsTot) {
- stepSize += dgi.squaredNorm();
- }
- stepSize = sqrt(stepSize);
-
- Cord cNew(*this);
-
- Real δ = δ₀;
- Real oldCost = totalCost(J, nt);
- Real newCost = std::numeric_limits<Real>::infinity();
-
- while (newCost > oldCost) {
- for (unsigned i = 0; i < gs.size(); i++) {
- cNew.gs[i] = gs[i] - δ * dgsTot[i];
- }
-
- newCost = cNew.totalCost(J, nt);
-
- δ /= 2;
- }
-
- gs = cNew.gs;
-
- return {2 * δ, stepSize};
- }
-
- template <int p>
- void relax(const Tensor<Scalar, p>& J, unsigned nt, Real δ₀, unsigned maxSteps) {
- Real δ = δ₀;
- Real stepSize = std::numeric_limits<Real>::infinity();
- unsigned steps = 0;
- while (stepSize > 1e-7 && steps < maxSteps) {
- std::tie(δ, stepSize) = relaxStep(J, nt, δ);
- std::cout << totalCost(J, nt) << " " << δ << " " << stepSize << std::endl;
- δ *= 2;
- steps++;
- }
- }
-
- template <int p>
void relaxNewton(const Tensor<Scalar, p>& J, unsigned nt, Real δ₀, unsigned maxSteps) {
Real δ = δ₀;
Real stepSize = std::numeric_limits<Real>::infinity();
unsigned steps = 0;
while (stepSize > 1e-7 && steps < maxSteps) {
std::tie(δ, stepSize) = relaxStepNewton(J, nt, δ);
- std::cout << totalCost(J, nt) << " " << δ << " " << stepSize << std::endl;
δ /= 3;
steps++;
}