summaryrefslogtreecommitdiff
path: root/stokes.hpp
diff options
context:
space:
mode:
authorJaron Kent-Dobias <jaron@kent-dobias.com>2021-02-25 15:28:11 +0100
committerJaron Kent-Dobias <jaron@kent-dobias.com>2021-02-25 15:28:11 +0100
commit3276bdd1e9796fec71e169e6c41d77da72b3a4fb (patch)
tree32be646f64c83751572eb867f9354e74d146ef6b /stokes.hpp
parentc16f7fc3fd8206e5f05e07353328538b2f5c8b6b (diff)
downloadcode-3276bdd1e9796fec71e169e6c41d77da72b3a4fb.tar.gz
code-3276bdd1e9796fec71e169e6c41d77da72b3a4fb.tar.bz2
code-3276bdd1e9796fec71e169e6c41d77da72b3a4fb.zip
Many changes.
Diffstat (limited to 'stokes.hpp')
-rw-r--r--stokes.hpp154
1 files changed, 122 insertions, 32 deletions
diff --git a/stokes.hpp b/stokes.hpp
index 6cc5c4a..cbf5ebb 100644
--- a/stokes.hpp
+++ b/stokes.hpp
@@ -1,4 +1,6 @@
#include "p-spin.hpp"
+#include "complex_normal.hpp"
+#include "dynamics.hpp"
#include <iostream>
class ropeRelaxationStallException: public std::exception {
@@ -9,13 +11,13 @@ class ropeRelaxationStallException: public std::exception {
template <class Scalar>
Vector<Scalar> variation(const Vector<Scalar>& z, const Vector<Scalar>& z´, const Vector<Scalar>& z´´, const Vector<Scalar>& dH, const Matrix<Scalar>& ddH) {
- double z² = z.squaredNorm();
- double z´² = z´.squaredNorm();
+ Real z² = z.squaredNorm();
+ Real z´² = z´.squaredNorm();
Vector<Scalar> ż = zDot(z, dH);
- double ż² = ż.squaredNorm();
+ Real ż² = ż.squaredNorm();
- double Reż·z´ = real(ż.dot(z´));
+ Real Reż·z´ = real(ż.dot(z´));
Matrix<Scalar> dż = (dH.conjugate() - (dH.dot(z) / z²) * z.conjugate()) * z.adjoint() / z²;
Matrix<Scalar> dżc = -ddH + (ddH * z.conjugate()) * z.transpose() / z²
@@ -33,7 +35,7 @@ Vector<Scalar> variation(const Vector<Scalar>& z, const Vector<Scalar>& z´, con
) * z.conjugate()
) / z²;
- double dReż·z´ = real(ż.dot(z´´) + ż´.dot(z´));
+ Real dReż·z´ = real(ż.dot(z´´) + ż´.dot(z´));
Vector<Scalar> ddtdLdz´ = - (
(
@@ -72,7 +74,7 @@ class Rope {
}
for (unsigned i = 0; i < N + 2; i++) {
- z[i] = normalize(z.front() + (z.back() - z.front()) * ((double)i / (N + 1.0)));
+ z[i] = normalize(z.front() + (z.back() - z.front()) * ((Real)i / (N + 1.0)));
}
}
@@ -80,8 +82,8 @@ class Rope {
return z.size() - 2;
}
- double length() const {
- double l = 0;
+ Real length() const {
+ Real l = 0;
for (unsigned i = 0; i < z.size() - 1; i++) {
l += (z[i + 1] - z[i]).norm();
@@ -91,14 +93,14 @@ class Rope {
}
template <int p>
- double error(const Tensor<Scalar, p>& J) const {
+ Real error(const Tensor<Scalar, p>& J) const {
Scalar H0, HN;
std::tie(H0, std::ignore, std::ignore) = hamGradHess(J, z.front());
std::tie(HN, std::ignore, std::ignore) = hamGradHess(J, z.back());
- double ImH = imag((H0 + HN) / 2.0);
+ Real ImH = imag((H0 + HN) / 2.0);
- double err = 0;
+ Real err = 0;
for (unsigned i = 1; i < z.size() - 1; i++) {
Scalar Hi;
@@ -119,7 +121,7 @@ class Rope {
}
template <int p>
- std::vector<Vector<Scalar>> δz(const Tensor<Scalar, p>& J) const {
+ std::vector<Vector<Scalar>> generateGradientδz(const Tensor<Scalar, p>& J) const {
std::vector<Vector<Scalar>> δz(z.size());
#pragma omp parallel for
@@ -137,7 +139,7 @@ class Rope {
}
// We return a δz with average norm of one.
- double mag = 0;
+ Real mag = 0;
for (unsigned i = 1; i < z.size() - 1; i++) {
mag += δz[i].norm();
}
@@ -150,20 +152,87 @@ class Rope {
}
template <int p>
- double perturb(const Tensor<Scalar, p>& J, double δ0) {
- std::vector<Vector<Scalar>> Δz = δz(J);
+ std::vector<Vector<Scalar>> generateDiscreteGradientδz(const Tensor<Scalar, p>& J, Real γ) const {
+ std::vector<Vector<Scalar>> δz(z.size());
+
+ std::vector<Vector<Scalar>> ż(z.size());
+ std::vector<Vector<Scalar>> dH(z.size());
+ std::vector<Matrix<Scalar>> ddH(z.size());
+
+#pragma omp parallel for
+ for (unsigned i = 1; i < z.size() - 1; i++) {
+ std::tie(std::ignore, dH[i], ddH[i]) = hamGradHess(J, z[i]);
+ ż[i] = zDot(z[i], dH[i]);
+ }
+
+#pragma omp parallel for
+ for (unsigned i = 1; i < z.size() - 1; i++) {
+ Real z² = z[i].squaredNorm();
+ Matrix<Scalar> dż = (dH[i].conjugate() - (dH[i].dot(z[i]) / z²) * z[i].conjugate()) * z[i].adjoint() / z²;
+ Matrix<Scalar> dżc = -ddH[i] + (ddH[i] * z[i].conjugate()) * z[i].transpose() / z²
+ + (z[i].dot(dH[i]) / z²) * (
+ Matrix<Scalar>::Identity(ddH[i].rows(), ddH[i].cols()) - z[i].conjugate() * z[i].transpose() / z²
+ );
+
+ Vector<Scalar> dC = -0.5 * (dżc * dz(i) + dż * dz(i).conjugate() - (dżc * ż[i] + dż * ż[i].conjugate()) * real(ż[i].dot(dz(i))) / ż[i].squaredNorm()) / (ż[i].norm() * dz(i).norm());
+
+ if (i > 1) {
+ dC += -0.5 * (ż[i - 1].conjugate() - dz(i - 1).conjugate() * real(ż[i - 1].dot(dz(i - 1))) / dz(i - 1).squaredNorm()) / (ż[i - 1].norm() * dz(i - 1).norm());
+ }
+
+ if (i < z.size() - 2) {
+ dC += 0.5 * (ż[i + 1].conjugate() - dz(i + 1).conjugate() * real(ż[i + 1].dot(dz(i + 1))) / dz(i + 1).squaredNorm()) / (ż[i + 1].norm() * dz(i + 1).norm());
+ }
+
+ dC += - γ * (z[i - 1] + z[i + 1]).conjugate();
+ δz[i] = dC;
+ }
+
+ for (unsigned i = 1; i < z.size() - 1; i++) {
+ δz[i] = δz[i].conjugate() - (δz[i].dot(z[i]) / z[i].squaredNorm()) * z[i].conjugate();
+ }
+
+ // We return a δz with average norm of one.
+ Real mag = 0;
+ for (unsigned i = 1; i < z.size() - 1; i++) {
+ mag += δz[i].norm();
+ }
+
+ for (unsigned i = 1; i < z.size() - 1; i++) {
+ δz[i] /= mag / n();
+ }
+
+ return δz;
+ }
+
+ template<class Gen>
+ std::vector<Vector<Scalar>> generateRandomδz(Gen& r) const {
+ std::vector<Vector<Scalar>> δz(z.size());
+
+ complex_normal_distribution<> d(0, 1, 0);
+ for (unsigned i = 1; i < z.size() - 1; i++) {
+ δz[i] = randomVector<Scalar>(z[0].size(), d, r);
+ }
+
+ return δz;
+ }
+
+ template<int p>
+ Real perturb(const Tensor<Scalar, p>& J, Real δ₀, const std::vector<Vector<Scalar>>& δz, Real γ = 0) {
Rope rNew = *this;
- double δ = δ0;
+ Real δ = δ₀;
// We rescale the step size by the distance between points.
- double Δl = length() / (n() + 1);
+ Real Δl = length() / (n() + 1);
while (true) {
for (unsigned i = 1; i < z.size() - 1; i++) {
- rNew.z[i] = normalize(z[i] - (δ * Δl) * Δz[i]);
+ rNew.z[i] = normalize(z[i] - (δ * Δl) * δz[i]);
}
- if (rNew.cost(J) < cost(J)) {
+ rNew.spread();
+
+ if (rNew.cost(J, γ) < cost(J, γ)) {
break;
} else {
δ /= 2;
@@ -180,15 +249,15 @@ class Rope {
}
void spread() {
- double l = length();
+ Real l = length();
- double a = 0;
+ Real a = 0;
unsigned pos = 0;
std::vector<Vector<Scalar>> zNew = z;
for (unsigned i = 1; i < z.size() - 1; i++) {
- double b = i * l / (z.size() - 1);
+ Real b = i * l / (z.size() - 1);
while (b > a) {
pos++;
@@ -204,27 +273,44 @@ class Rope {
}
template <int p>
- double step(const Tensor<Scalar, p>& J, double δ0) {
- double δ = perturb(J, δ0);
- spread();
-
- return δ;
+ void relaxGradient(const Tensor<Scalar, p>& J, unsigned N, Real δ0) {
+ Real δ = δ0;
+ try {
+ for (unsigned i = 0; i < N; i++) {
+ std::vector<Vector<Scalar>> δz = generateGradientδz(J);
+ δ = 1.1 * perturb(J, δ, δz);
+ }
+ } catch (std::exception& e) {
+ }
}
template <int p>
- void relax(const Tensor<Scalar, p>& J, unsigned N, double δ0) {
- double δ = δ0;
+ void relaxDiscreteGradient(const Tensor<Scalar, p>& J, unsigned N, Real δ0, Real γ) {
+ Real δ = δ0;
try {
for (unsigned i = 0; i < N; i++) {
- δ = 1.1 * step(J, δ);
+ std::vector<Vector<Scalar>> δz = generateDiscreteGradientδz(J, γ);
+ δ = 1.1 * perturb(J, δ, δz, γ);
}
} catch (std::exception& e) {
}
}
+ template <int p, class Gen>
+ void relaxRandom(const Tensor<Scalar, p>& J, unsigned N, Real δ0, Gen& r) {
+ Real δ = δ0;
+ for (unsigned i = 0; i < N; i++) {
+ try {
+ std::vector<Vector<Scalar>> δz = generateRandomδz(r);
+ δ = 1.1 * perturb(J, δ, δz);
+ } catch (std::exception& e) {
+ }
+ }
+ }
+
template <int p>
- double cost(const Tensor<Scalar, p>& J) const {
- double c = 0;
+ Real cost(const Tensor<Scalar, p>& J, Real γ = 0) const {
+ Real c = 0;
for (unsigned i = 1; i < z.size() - 1; i++) {
Vector<Scalar> dH;
@@ -235,6 +321,10 @@ class Rope {
c += 1.0 - real(zD.dot(dz(i))) / zD.norm() / dz(i).norm();
}
+ for (unsigned i = 0; i < z.size() - 1; i++) {
+ c += γ * (z[i + 1] - z[i]).squaredNorm();
+ }
+
return c;
}