summaryrefslogtreecommitdiff
path: root/fits.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'fits.cpp')
-rw-r--r--fits.cpp173
1 files changed, 173 insertions, 0 deletions
diff --git a/fits.cpp b/fits.cpp
index a342791..923712b 100644
--- a/fits.cpp
+++ b/fits.cpp
@@ -1,5 +1,6 @@
#include <getopt.h>
#include <iomanip>
+#include<list>
#include "pcg-cpp/include/pcg_random.hpp"
#include "randutils/randutils.hpp"
@@ -10,3 +11,175 @@ using Rng = randutils::random_generator<pcg32>;
using Real = double;
using Vector = Eigen::Matrix<Real, Eigen::Dynamic, 1>;
+
+class fitModel {
+public:
+ unsigned N;
+
+ fitModel(unsigned N) : N(N) {}
+
+ /*
+ virtual Real basisFunctions(unsigned i, Real x) const {
+ if (i == 0) {
+ return 1;
+ } else {
+ return pow(x, i);
+ }
+ }
+ */
+
+ Real basisFunctions(unsigned i, Real x) const {
+ return abs(x - i / (N - 1.0));
+ }
+
+ Real value(const Vector& coeffs, Real x) const {
+ Real v = 0;
+
+ for (unsigned j = 0; j < N; j++) {
+ v += coeffs[j] * basisFunctions(j, x);
+ }
+
+ return v;
+ }
+
+ Real cost(const std::list<std::tuple<Real, Real>>& data, const Vector& coeffs) const {
+ Real c = 0;
+
+ for (std::tuple<Real, Real> xy : data) {
+ Real x = std::get<0>(xy);
+ Real y = std::get<1>(xy);
+ c += 0.5 * pow(value(coeffs, x) - y, 2);
+ }
+
+ return c;
+ }
+
+ Vector dCost(const std::list<std::tuple<Real, Real>>& data, const Vector& coeffs) const {
+ Vector dC = Vector::Zero(N);
+
+ for (unsigned j = 0; j < N; j++) {
+ for (std::tuple<Real, Real> xy : data) {
+ Real x = std::get<0>(xy);
+ Real y = std::get<1>(xy);
+ dC[j] += (value(coeffs, x) - y) * basisFunctions(j, x);
+ }
+ }
+
+ return dC;
+ }
+};
+
+Vector gradientDescent(const fitModel& M, const std::list<std::tuple<Real, Real>>& data, const Vector& a₀, unsigned maxSteps, Real ε = 1e-12) {
+ Vector xₜ = a₀;
+ Real Hₜ = M.cost(data, a₀);
+ Real α = 1.0;
+ Real m;
+ Vector ∇H;
+ unsigned steps = 0;
+
+ while (
+ ∇H = M.dCost(data, xₜ), m = ∇H.squaredNorm(),
+ m > ε && steps < maxSteps
+ ) {
+ Vector xₜ₊₁;
+ Real Hₜ₊₁;
+
+ while (
+ xₜ₊₁ = xₜ - α * ∇H, Hₜ₊₁ = M.cost(data, xₜ₊₁),
+ Hₜ₊₁ > Hₜ - 0.5 * α * m
+ ) {
+ α /= 2;
+ }
+
+ xₜ = xₜ₊₁;
+ Hₜ = Hₜ₊₁;
+ α *= 1.25;
+ steps++;
+ }
+
+ return xₜ;
+}
+
+Vector stochasticGradientDescent(const fitModel& M, const std::list<std::tuple<Real, Real>>& data, const Vector& a₀, Rng& r, unsigned maxSteps, Real ε = 1e-12) {
+ Vector xₜ = a₀;
+ Real α = 1e-3;
+ Real m;
+ Vector ∇H;
+ unsigned steps = 0;
+
+ while (
+ ∇H = M.dCost(data, xₜ), m = ∇H.squaredNorm(),
+ m > ε && steps < maxSteps
+ ) {
+ Vector γ(M.N);
+ for (Real& γi : γ) {
+ γi = r.variate<Real, std::normal_distribution>(0, 1e-4);
+ }
+ xₜ = xₜ - α * ∇H + γ;
+ steps++;
+ }
+
+ return xₜ;
+}
+
+std::list<std::tuple<Real, Real>> generateData(Real(*f)(Real), unsigned M, Real σ, Rng& r) {
+ std::list<std::tuple<Real, Real>> data;
+
+ for (unsigned i = 0; i < M; i++) {
+ Real x = ((Real)i) / (M - 1.0);
+ data.push_back({x, f(x) + r.variate<Real, std::normal_distribution>(0, σ)});
+ }
+
+ return data;
+}
+
+int main(int argc, char* argv[]) {
+ unsigned N = 10;
+ unsigned M = 10;
+ Real σ = 0.2;
+ unsigned maxSteps = 1e5;
+
+ int opt;
+
+ while ((opt = getopt(argc, argv, "N:M:s:S:")) != -1) {
+ switch (opt) {
+ case 'N':
+ N = (unsigned)atof(optarg);
+ break;
+ case 'M':
+ M = (unsigned)atof(optarg);
+ break;
+ case 's':
+ σ = atof(optarg);
+ break;
+ case 'S':
+ maxSteps = (unsigned)atof(optarg);
+ break;
+ default:
+ exit(1);
+ }
+ }
+
+ Rng r;
+
+ std::list<std::tuple<Real, Real>> data = generateData([](Real x) {return sin(2 * M_PI * x);}, M, σ, r);
+
+ std::cout << std::setprecision(15);
+
+ for (std::tuple<Real, Real> datum : data) {
+ std::cout << std::get<0>(datum) << " " << std::get<1>(datum) << " ";
+ }
+ std::cout << std::endl;
+
+ fitModel model(N);
+
+ Vector a₀ = Vector::Zero(N);
+ Vector a = gradientDescent(model, data, a₀, maxSteps);
+
+ for (unsigned i = 0; i < N; i++) {
+ std::cout << a[i] << " ";
+ }
+ std::cout << std::endl;
+
+ return 0;
+}