From 2769a7233886c8f77bf51e29feb9b14f7e207489 Mon Sep 17 00:00:00 2001
From: Jaron Kent-Dobias <jaron@kent-dobias.com>
Date: Thu, 26 Dec 2024 11:27:08 +0100
Subject: Small refactoring.

---
 walk.cpp | 40 ++++++++++++++++++++--------------------
 1 file changed, 20 insertions(+), 20 deletions(-)

(limited to 'walk.cpp')

diff --git a/walk.cpp b/walk.cpp
index ab4dbd3..f064bb5 100644
--- a/walk.cpp
+++ b/walk.cpp
@@ -11,12 +11,19 @@ using Rng = randutils::random_generator<pcg32>;
 
 using Real = double;
 using Vector = Eigen::Matrix<Real, Eigen::Dynamic, 1>;
-using Matrix = Eigen::Matrix<Real, Eigen::Dynamic, Eigen::Dynamic>;
 
-Vector normalize(const Vector& x) {
+Vector normalizeVector(const Vector& x) {
   return x * sqrt(x.size() / x.squaredNorm());
 }
 
+Vector randomVector(unsigned N, Rng& r, Real σ = 1) {
+  Vector v(N);
+  for (Real& vᵢ : v) {
+    vᵢ = r.variate<Real, std::normal_distribution>(0, σ);
+  }
+  return v;
+}
+
 Real wignerCDF(Real λ) {
   return 0.5 + (λ * sqrt(4 - pow(λ, 2)) / 4 + atan(λ / sqrt(4 - pow(λ, 2)))) / M_PI;
 }
@@ -63,7 +70,7 @@ public:
 Vector gradientDescent(const QuadraticModel& M, const Vector& x₀, Real E, Real ε = 1e-14) {
   Vector xₜ = x₀;
   Real Hₜ = M.H(x₀);
-  Real α = 1;
+  Real α = 1.0 / M.N;
   Real m;
   Vector ∇H;
 
@@ -75,7 +82,7 @@ Vector gradientDescent(const QuadraticModel& M, const Vector& x₀, Real E, Real
     Real Hₜ₊₁;
 
     while (
-      xₜ₊₁ = normalize(xₜ - α * ∇H), Hₜ₊₁ = M.H(xₜ₊₁),
+      xₜ₊₁ = normalizeVector(xₜ - α * ∇H), Hₜ₊₁ = M.H(xₜ₊₁),
       pow(Hₜ₊₁ / M.N - E, 2) > pow(Hₜ / M.N - E, 2) - α * m
     ) {
       α /= 2;
@@ -90,14 +97,11 @@ Vector gradientDescent(const QuadraticModel& M, const Vector& x₀, Real E, Real
 }
 
 Vector randomStep(const QuadraticModel& M, const Vector& x₀, Real E, Rng& r, Real Δt = 1e-4) {
-  Vector η(M.N);
-  for (Real& ηᵢ : η) {
-    ηᵢ = r.variate<Real, std::normal_distribution>(0, sqrt(2 * Δt));
-  }
-  η -= η.dot(x₀) * x₀ / x₀.squaredNorm();
+  Vector η = randomVector(M.N, r, sqrt(2 * Δt));
+  η -= (η.dot(x₀) / x₀.squaredNorm()) * x₀;
   Vector ∇H₀ = M.∇H(x₀);
-  η -= η.dot(∇H₀) * ∇H₀ / ∇H₀.squaredNorm();
-  return gradientDescent(M, normalize(x₀ + η), E);
+  η -= (η.dot(∇H₀) / ∇H₀.squaredNorm()) * ∇H₀;
+  return gradientDescent(M, normalizeVector(x₀ + η), E);
 }
 
 int main(int argc, char* argv[]) {
@@ -132,26 +136,22 @@ int main(int argc, char* argv[]) {
   }
 
   Rng r;
-  QuadraticModel ls(N, r);
+  QuadraticModel model(N, r);
 
-  Vector x₀ = Vector::Zero(N);
-  for (Real& xi : x₀) {
-    xi = r.variate<Real, std::normal_distribution>(0, 1);
-  }
-  x₀ = normalize(x₀);
-  x₀ = gradientDescent(ls, x₀, E);
+  Vector x₀ = normalizeVector(randomVector(N, r));
+  x₀ = gradientDescent(model, x₀, E);
 
   std::cout << std::setprecision(15);
 
   for (Real t = 0; t < T₀; t += Δt) {
-    x₀ = randomStep(ls, x₀, E, r, Δt);
+    x₀ = randomStep(model, x₀, E, r, Δt);
   }
 
   Vector x = x₀;
 
   for (Real t = 0; t < T; t += Δt) {
     std::cout << t << " " << x.dot(x₀) / N << std::endl;
-    x = randomStep(ls, x, E, r, Δt);
+    x = randomStep(model, x, E, r, Δt);
   }
 
   return 0;
-- 
cgit v1.2.3-70-g09d2