summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJaron Kent-Dobias <jaron@kent-dobias.com>2021-11-05 17:31:10 +0100
committerJaron Kent-Dobias <jaron@kent-dobias.com>2021-11-05 17:31:10 +0100
commitc60430fc1c5d90ae06d1fd019257474c8f395bef (patch)
tree3e54f263dd676b7e563d4a1388c5da122df59c1e
parent8209ca60b99594f26f3e9b21ccdbc8695526eb93 (diff)
downloadcode-c60430fc1c5d90ae06d1fd019257474c8f395bef.tar.gz
code-c60430fc1c5d90ae06d1fd019257474c8f395bef.tar.bz2
code-c60430fc1c5d90ae06d1fd019257474c8f395bef.zip
Lots of progress towards Hessian implementation.
-rw-r--r--langevin.cpp4
-rw-r--r--p-spin.hpp39
-rw-r--r--stokes.hpp176
-rw-r--r--tensor.hpp12
4 files changed, 229 insertions, 2 deletions
diff --git a/langevin.cpp b/langevin.cpp
index fe26332..d23cb33 100644
--- a/langevin.cpp
+++ b/langevin.cpp
@@ -194,8 +194,8 @@ int main(int argc, char* argv[]) {
*/
- Cord test(J, zSaddle, zSaddleNext, 5);
- test.relax(J, 20, 1, 1e5);
+ Cord test(J, zSaddle, zSaddleNext, 2);
+ test.relaxNewton(J, 20, 1, 1e4);
std::cout << test.z0.transpose() << std::endl;
std::cout << test.z1.transpose() << std::endl;
diff --git a/p-spin.hpp b/p-spin.hpp
index e5cd195..50da209 100644
--- a/p-spin.hpp
+++ b/p-spin.hpp
@@ -1,6 +1,7 @@
#pragma once
#include <eigen3/Eigen/Dense>
+#include <iterator>
#include "types.hpp"
#include "tensor.hpp"
@@ -104,3 +105,41 @@ Matrix<Scalar> dzDotConjugate(const Vector<Scalar>& z, const Vector<Scalar>& dH,
Matrix<Scalar>::Identity(ddH.rows(), ddH.cols()) - z.conjugate() * z.transpose() / z²
);
}
+
+template <class Scalar>
+Tensor<Scalar, 3> ddzDot(const Vector<Scalar>& z, const Vector<Scalar>& dH) {
+ Tensor<Scalar, 1> zT = Eigen::TensorMap<Tensor<const Scalar, 1>>(z.data(), {z.size()});
+ Tensor<Scalar, 1> dHT = Eigen::TensorMap<Tensor<const Scalar, 1>>(dH.data(), {dH.size()});
+
+ Eigen::array<Eigen::IndexPair<int>, 0> ei = {};
+
+ Scalar z² = z.squaredNorm();
+
+ return - zT.conjugate().contract(dHT.conjugate(), ei).contract(zT.conjugate(), ei) / pow(z², 2)
+ - dHT.conjugate().contract(zT.conjugate(), ei).contract(zT.conjugate(), ei) / pow(z², 2)
+ + zT.conjugate().contract(zT.conjugate(), ei).contract(zT.conjugate(), ei) * (2.0 * dH.dot(z) / pow(z², 3));
+}
+
+template <class Scalar>
+Tensor<Scalar, 3> ddzDotConjugate(const Vector<Scalar>& z, const Vector<Scalar>& dH, const Matrix<Scalar>& ddH, const Tensor<Scalar, 3>& dddH) {
+ Tensor<Scalar, 1> zT = Eigen::TensorMap<Tensor<const Scalar, 1>>(z.data(), {z.size()});
+ Tensor<Scalar, 1> dHT = Eigen::TensorMap<Tensor<const Scalar, 1>>(dH.data(), {dH.size()});
+ Tensor<Scalar, 2> ddHT = Eigen::TensorMap<Tensor<const Scalar, 2>>(ddH.data(), {z.size(), z.size()});
+
+ Matrix<Scalar> I = Matrix<Real>::Identity(z.size(), z.size());
+ Tensor<Scalar, 2> IT = Eigen::TensorMap<Tensor<const Scalar, 2>>(I.data(), {z.size(), z.size()});
+
+ Eigen::array<Eigen::IndexPair<int>, 0> ei = {};
+ Eigen::array<Eigen::IndexPair<int>, 1> ip00 = {Eigen::IndexPair<int>(0, 0)};
+
+ Scalar z² = z.squaredNorm();
+
+ return - dddH + dddH.contract(zT.conjugate(), ip00).contract(zT, ei) / z²
+ + IT.contract(ddHT.contract(zT.conjugate(), ip00), ei).shuffle((std::array<int, 3>){0, 2, 1}) / z²
+ + ddHT.contract(zT.conjugate(), ip00).contract(IT, ei) / z²
+ - zT.conjugate().contract(ddHT.contract(zT.conjugate(), ip00), ei).contract(zT, ei) / pow(z², 2)
+ - zT.conjugate().contract(IT, ei) * (z.dot(dH) / pow(z², 2))
+ - IT.contract(zT.conjugate(), ei).shuffle((std::array<int, 3>){0, 2, 1}) * (z.dot(dH) / pow(z², 2))
+ - ddHT.contract(zT.conjugate(), ip00).contract(zT.conjugate(), ei).contract(zT, ei) / pow(z², 2)
+ + zT.conjugate().contract(zT.conjugate(), ei).contract(zT, ei) * (2.0 * z.dot(dH) / pow(z², 3));
+}
diff --git a/stokes.hpp b/stokes.hpp
index b2694a9..948a484 100644
--- a/stokes.hpp
+++ b/stokes.hpp
@@ -333,6 +333,168 @@ public:
}
template <int p>
+ std::pair<Vector<Scalar>, Matrix<Scalar>> gsGradHess(const Tensor<Scalar, p>& J, Real t) const {
+ Vector<Scalar> z = f(t);
+ auto [H, dH, ddH] = hamGradHess(J, z);
+ Tensor<Scalar, 3> dddH = ((p - 2) * (p - 1) * p / (Real)factorial(p)) * J;
+ Vector<Scalar> ż = zDot(z, dH);
+ Tensor<Scalar, 1> żT = Eigen::TensorMap<Tensor<const Scalar, 1>>(ż.data(), {z.size()});
+ Vector<Scalar> dz = df(t);
+ Tensor<Scalar, 1> dzT = Eigen::TensorMap<Tensor<const Scalar, 1>>(dz.data(), {z.size()});
+ Matrix<Scalar> dż = dzDot(z, dH);
+ Matrix<Scalar> dżc = dzDotConjugate(z, dH, ddH);
+ Tensor<Scalar, 3> ddż = ddzDot(z, dH);
+ Tensor<Scalar, 3> ddżc = ddzDotConjugate(z, dH, ddH, dddH);
+
+ Eigen::array<Eigen::IndexPair<int>, 1> ip20 = {Eigen::IndexPair<int>(2, 0)};
+
+ Tensor<Scalar, 2> ddżcdzT = ddżc.contract(dzT, ip20);
+ Matrix<Scalar> ddżcdz = Eigen::Map<Matrix<Scalar>>(ddżcdzT.data(), z.size(), z.size());
+
+ Tensor<Scalar, 2> ddżdzT = ddż.contract(dzT.conjugate(), ip20);
+ Matrix<Scalar> ddżdz = Eigen::Map<Matrix<Scalar>>(ddżcdzT.data(), z.size(), z.size());
+
+ Tensor<Scalar, 2> ddżcżT = ddżc.contract(żT, ip20);
+ Matrix<Scalar> ddżcż = Eigen::Map<Matrix<Scalar>>(ddżcżT.data(), z.size(), z.size());
+
+ Tensor<Scalar, 2> ddżżcT = ddż.contract(żT.conjugate(), ip20);
+ Matrix<Scalar> ddżżc = Eigen::Map<Matrix<Scalar>>(ddżżcT.data(), z.size(), z.size());
+
+ Vector<Scalar> x(z.size() * gs.size());
+ Matrix<Scalar> M(x.size(), x.size());
+
+ for (unsigned i = 0; i < gs.size(); i++) {
+ Real fdg = gCoeff(i, t);
+ Real dfdg = dgCoeff(i, t);
+ Vector<Scalar> dC = - 0.5 / ż.norm() / dz.norm() * (
+ dfdg * ż.conjugate() + fdg * dżc * dz + fdg * dż * dz.conjugate()
+ - real(dz.dot(ż)) * (
+ dfdg * dz.conjugate() / dz.squaredNorm() +
+ fdg * (dżc * ż + dż * ż.conjugate()) / ż.squaredNorm()
+ )
+ );
+
+ for (unsigned j = 0; j < dC.size(); j++) {
+ x(z.size() * i + j) = dC(j);
+ }
+
+ for (unsigned j = 0; j < gs.size(); j++) {
+ Real fdgm = gCoeff(j, t);
+ Real dfdgm = dgCoeff(j, t);
+ Vector<Scalar> dCm = - 0.5 / ż.norm() / dz.norm() * (
+ dfdgm * ż.conjugate() + fdgm * dżc * dz + fdgm * dż * dz.conjugate()
+ - real(dz.dot(ż)) * (
+ dfdgm * dz.conjugate() / dz.squaredNorm() +
+ fdgm * (dżc * ż + dż * ż.conjugate()) / ż.squaredNorm()
+ )
+ );
+ Matrix<Scalar> ddC =
+ - 0.5 / ż.norm() / dz.norm() * (
+ dfdg * fdgm * dżc.transpose() + dfdgm * fdg * dżc +
+ fdg * fdgm * ddżcdz + fdg * fdgm * ddżdz
+ )
+ + 0.5 / dz.squaredNorm() * (
+ dfdg * dz.conjugate() * dCm.transpose() +
+ dfdgm * dC * dz.adjoint()
+ )
+ + 0.5 / ż.squaredNorm() * (
+ fdg * (dżc * ż + dż * ż.conjugate()) * dCm.transpose() +
+ fdgm * dC * (dżc * ż + dż * ż.conjugate()).transpose()
+ )
+ + 0.5 / ż.norm() / dz.norm() * real(dz.dot(ż)) * fdg * fdgm * (
+ ddżżc + ddżcż + dżc * dż.transpose() + dż * dżc.transpose()
+ ) / ż.squaredNorm()
+ - 0.5 / ż.norm() / dz.norm() * real(dz.dot(ż)) * dfdg * dfdgm * dz.conjugate() * dz.adjoint() / pow(dz.squaredNorm(), 2)
+ - 0.5 / ż.norm() / dz.norm() * real(dz.dot(ż)) / pow(ż.squaredNorm(), 2)
+ * fdg * (dżc * ż + dż * ż.conjugate())
+ * fdgm * (dżc * ż + dż * ż.conjugate()).transpose()
+ ;
+
+ for (unsigned k = 0; k < z.size(); k++) {
+ for (unsigned l = 0; l < z.size(); l++) {
+ M(z.size() * i + k, z.size() * j + l) = ddC(k, l);
+ }
+ }
+ }
+ }
+
+ Cord cNew(*this);
+ Matrix<Scalar> Mf = Matrix<Scalar>::Zero(x.size(), x.size());
+ for (unsigned i = 0; i < x.size(); i++) {
+ for (unsigned j = 0; j < x.size(); j++) {
+ Scalar hi = std::complex<Real>(0.03145, 0.08452);
+ Scalar hj = std::complex<Real>(0.09483, 0.02453);
+ Scalar c0 = cost(J, t);
+ cNew.gs[i / z.size()](i % z.size()) += hi;
+ Scalar ci = cNew.cost(J, t);
+ cNew.gs[j / z.size()](j % z.size()) += hj;
+ Scalar cij = cNew.cost(J, t);
+ cNew.gs[i / z.size()](i % z.size()) -= hi;
+ Scalar cj = cNew.cost(J, t);
+ cNew.gs[j / z.size()](j % z.size()) -= hj;
+
+ Mf(i, j) = (cij - ci - cj + c0) / (hi * hj);
+ }
+ }
+
+ std::cout << M << std::endl;
+ std::cout << Mf << std::endl;
+ getchar();
+
+ return {x, M};
+ }
+
+ template <int p>
+ std::pair<Real, Real> relaxStepNewton(const Tensor<Scalar, p>& J, unsigned nt, Real δ₀) {
+ Vector<Scalar> dgsTot = Vector<Scalar>::Zero(z0.size() * gs.size());
+ Matrix<Scalar> HessTot = Matrix<Scalar>::Zero(z0.size() * gs.size(), z0.size() * gs.size());
+
+ for (unsigned i = 0; i < nt; i++) {
+ Real t = (i + 1.0) / (nt + 1.0);
+ auto [dgsi, Hessi] = gsGradHess(J, t);
+
+ dgsTot += dgsi / nt;
+ HessTot += Hessi / nt;
+ }
+
+ Real stepSize = dgsTot.norm();
+
+ Cord cNew(*this);
+
+ Real δ = δ₀;
+ Real oldCost = totalCost(J, nt);
+
+ Vector<Scalar> δg = HessTot.partialPivLu().solve(dgsTot);
+
+ for (unsigned i = 0; i < gs.size(); i++) {
+ for (unsigned j = 0; j < z0.size(); j++) {
+ cNew.gs[i](j) = gs[i](j) - δg[z0.size() * i + j];
+ }
+ }
+
+ Real newCost = cNew.totalCost(J, nt);
+ if (newCost < oldCost) {
+ std::cout << "Newton step accepted!" << std::endl;
+ }
+
+ while (newCost > oldCost) {
+ for (unsigned i = 0; i < gs.size(); i++) {
+ for (unsigned j = 0; j < z0.size(); j++) {
+ cNew.gs[i](j) = gs[i](j) - δ * conj(dgsTot[z0.size() * i + j]);
+ }
+ }
+
+ newCost = cNew.totalCost(J, nt);
+
+ δ /= 2;
+ }
+
+ gs = cNew.gs;
+
+ return {2*δ, stepSize};
+ }
+
+ template <int p>
std::pair<Real, Real> relaxStep(const Tensor<Scalar, p>& J, unsigned nt, Real δ₀) {
std::vector<Vector<Scalar>> dgsTot(gs.size(), Vector<Scalar>::Zero(z0.size()));
@@ -380,6 +542,20 @@ public:
while (stepSize > 1e-7 && steps < maxSteps) {
std::tie(δ, stepSize) = relaxStep(J, nt, δ);
std::cout << totalCost(J, nt) << " " << δ << " " << stepSize << std::endl;
+ δ *= 2;
+ steps++;
+ }
+ }
+
+ template <int p>
+ void relaxNewton(const Tensor<Scalar, p>& J, unsigned nt, Real δ₀, unsigned maxSteps) {
+ Real δ = δ₀;
+ Real stepSize = std::numeric_limits<Real>::infinity();
+ unsigned steps = 0;
+ while (stepSize > 1e-7 && steps < maxSteps) {
+ std::tie(δ, stepSize) = relaxStepNewton(J, nt, δ);
+ std::cout << totalCost(J, nt) << " " << δ << " " << stepSize << std::endl;
+ δ *= 2;
steps++;
}
}
diff --git a/tensor.hpp b/tensor.hpp
index 1d90c78..aa33069 100644
--- a/tensor.hpp
+++ b/tensor.hpp
@@ -123,3 +123,15 @@ Matrix<Scalar> contractDown(const Tensor<Scalar, r>& J, const Vector<Scalar>& z)
Tensor<Scalar, r - 1> Jz = J.contract(zT, ip00);
return contractDown(Jz, z);
}
+
+template <int f, class Scalar>
+Tensor<Scalar, f> contractDownTo(const Tensor<Scalar, f>& J, const Vector<Scalar>& z) {
+ return J;
+}
+
+template <int f, class Scalar, int r>
+Tensor<Scalar, f> contractDownTo(const Tensor<Scalar, r>& J, const Vector<Scalar>& z) {
+ Tensor<Scalar, 1> zT = Eigen::TensorMap<Tensor<const Scalar, 1>>(z.data(), {z.size()});
+ Tensor<Scalar, r - 1> Jz = J.contract(zT, ip00);
+ return contractDownTo<f>(Jz, z);
+}