From 25781a4041fa75a3394949d111be3abbefc97c26 Mon Sep 17 00:00:00 2001 From: Jaron Kent-Dobias Date: Tue, 26 Jun 2018 15:13:46 -0400 Subject: began generalizing the potts code to work with all finite spin spaces --- lib/cluster_finite.c | 100 +++++++++++++++++++++++++++++++++++++++++++++++++++ lib/cluster_finite.h | 42 ++++++++++++++++++++++ lib/dihedral.c | 12 +++++++ lib/dihedral.h | 2 ++ lib/measurement.h | 5 +++ lib/symmetric.c | 27 ++++++++++++++ lib/symmetric.h | 13 +++++++ lib/types.h | 3 ++ lib/wolff_finite.c | 70 ++++++++++++++++++++++++++++++++++++ 9 files changed, 274 insertions(+) create mode 100644 lib/cluster_finite.c create mode 100644 lib/cluster_finite.h create mode 100644 lib/symmetric.c create mode 100644 lib/symmetric.h create mode 100644 lib/wolff_finite.c (limited to 'lib') diff --git a/lib/cluster_finite.c b/lib/cluster_finite.c new file mode 100644 index 0000000..f11a3ea --- /dev/null +++ b/lib/cluster_finite.c @@ -0,0 +1,100 @@ + +#include "cluster_finite.h" + +v_t flip_cluster_finite(state_finite_t *s, v_t v0, q_t rot_ind, gsl_rng *r) { + q_t *rot = s->transformations + s->q * rot_ind; + q_t *R_inv = symmetric_invert(s->q, s->R); + v_t nv = 0; + + ll_t *stack = NULL; // create a new stack + stack_push(&stack, v0); // push the initial vertex to the stack + + bool *marks = (bool *)calloc(s->g->nv, sizeof(bool)); + + while (stack != NULL) { + v_t v = stack_pop(&stack); + + if (!marks[v]) { + q_t s_old, s_new; + q_t *R_new, *R_inv_new; + bool external_flipped; + + marks[v] = true; + + if (v == s->g->nv - 1) { + R_new = symmetric_compose(s->q, rot, s->R); + R_inv_new = symmetric_invert(s->q, R_new); + external_flipped = true; + } else { + s_old = s->spins[v]; + s_new = symmetric_act(rot, s_old); + external_flipped = false; + } + + v_t nn = s->g->v_i[v + 1] - s->g->v_i[v]; + + for (v_t i = 0; i < nn; i++) { + q_t sn; + double prob; + bool external_neighbor = false; + + v_t vn = s->g->v_adj[s->g->v_i[v] + i]; + + if (vn == s->g->nv - 1) { + external_neighbor = true; + } else { + sn = s->spins[vn]; + } + + if (external_flipped || external_neighbor) { + q_t rot_s_old, rot_s_new; + + if (external_neighbor) { + rot_s_old = symmetric_act(R_inv, s_old); + rot_s_new = symmetric_act(R_inv, s_new); + } else { + rot_s_old = symmetric_act(R_inv, sn); + rot_s_new = symmetric_act(R_inv_new, sn); + } + + prob = s->H_probs[rot_s_new * s->q + rot_s_old]; + + s->M[rot_s_old]--; + s->M[rot_s_new]++; + + s->E += - s->H[rot_s_new] + s->H[rot_s_old]; + } else { + q_t diff_old = (s_old + s->q - sn) % s->q; + q_t diff_new = (s_new + s->q - sn) % s->q; + + prob = s->J_probs[diff_new * s->q + diff_old]; + + s->E += - s->J[diff_new] + s->J[diff_old]; + } + + if (gsl_rng_uniform(r) < prob) { // and with probability ps[e]... + stack_push(&stack, vn); // push the neighboring vertex to the stack + } + } + + if (external_flipped) { + free(s->R); + free(R_inv); + s->R = R_new; + R_inv = R_inv_new; + } else { + s->spins[v] = s_new; + } + + if (v != s->g->nv - 1) { // count the number of non-external sites that flip + nv++; + } + } + } + + free(marks); + free(R_inv); + + return nv; +} + diff --git a/lib/cluster_finite.h b/lib/cluster_finite.h new file mode 100644 index 0000000..abdc8fc --- /dev/null +++ b/lib/cluster_finite.h @@ -0,0 +1,42 @@ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "types.h" +#include "rand.h" +#include "stack.h" +#include "convex.h" +#include "graph.h" +#include "tree.h" +#include "measurement.h" +#include "symmetric.h" +#include "yule_walker.h" + +typedef struct { + graph_t *g; + q_t q; + R_t n_transformations; + q_t *transformations; + double T; + double *J; + double *H; + double *J_probs; + double *H_probs; + q_t *spins; + q_t *R; + double E; + v_t *M; +} state_finite_t; + +v_t flip_cluster_finite(state_finite_t *s, v_t v0, q_t rot, gsl_rng *r); + diff --git a/lib/dihedral.c b/lib/dihedral.c index 623041a..ac74a23 100644 --- a/lib/dihedral.c +++ b/lib/dihedral.c @@ -25,4 +25,16 @@ q_t dihedral_inverse_act(q_t q, const dihedral_t *g, q_t s) { } } +q_t *dihedral_gen_transformations(q_t q) { + q_t *transformations = (q_t *)malloc(q * q * sizeof(q_t)); + + for (q_t i = 0; i < q; i++) { + for (q_t j = 0; j < q; j++) { + transformations[q * i + j] = dihedral_act(q, i, j); + } + } + + return transformations; +} + diff --git a/lib/dihedral.h b/lib/dihedral.h index 813e796..e5e4cbd 100644 --- a/lib/dihedral.h +++ b/lib/dihedral.h @@ -15,3 +15,5 @@ q_t dihedral_act(q_t q, q_t gi, q_t s); q_t dihedral_inverse_act(q_t q, const dihedral_t *g, q_t s); +q_t *dihedral_gen_transformations(q_t q); + diff --git a/lib/measurement.h b/lib/measurement.h index eaa260b..46c034f 100644 --- a/lib/measurement.h +++ b/lib/measurement.h @@ -24,6 +24,11 @@ typedef struct { double O2; } autocorr_t; +typedef struct { + void (*f)(state_finite_t *, void *); + void *data; +} measurement_t; + void meas_update(meas_t *m, double x); double meas_dx(const meas_t *m); diff --git a/lib/symmetric.c b/lib/symmetric.c new file mode 100644 index 0000000..729b38c --- /dev/null +++ b/lib/symmetric.c @@ -0,0 +1,27 @@ + +#include "symmetric.h" + +q_t *symmetric_compose(q_t q, const q_t *g1, const q_t *g2) { + q_t *g3 = (q_t *)malloc(q * sizeof(q_t)); + + for (q_t i = 0; i < q; i++) { + g3[i] = g1[g2[i]]; + } + + return g3; +} + +q_t symmetric_act(const q_t *g, q_t s) { + return g[s]; +} + +q_t *symmetric_invert(q_t q, const q_t *g) { + q_t *g_inv = (q_t *)malloc(q * sizeof(q_t)); + + for (q_t i = 0; i < q; i++) { + g_inv[g[i]] = i; + } + + return g_inv; +} + diff --git a/lib/symmetric.h b/lib/symmetric.h new file mode 100644 index 0000000..6e00f52 --- /dev/null +++ b/lib/symmetric.h @@ -0,0 +1,13 @@ + +#pragma once + +#include + +#include "types.h" + +q_t *symmetric_compose(q_t q, const q_t *g1, const q_t *g2); + +q_t symmetric_act(const q_t *g, q_t s); + +q_t *symmetric_invert(q_t q, const q_t *g); + diff --git a/lib/types.h b/lib/types.h index fcc2ce7..572ec64 100644 --- a/lib/types.h +++ b/lib/types.h @@ -3,6 +3,7 @@ typedef uint_fast32_t v_t; typedef uint_fast8_t q_t; +typedef uint_fast16_t R_t; typedef uint_fast8_t D_t; typedef uint_fast16_t L_t; typedef uint_fast64_t count_t; @@ -10,6 +11,7 @@ typedef int_fast64_t h_t; #define MAX_v UINT_FAST32_MAX #define MAX_Q UINT_FAST8_MAX +#define MAX_R UINT_FAST16_MAX #define MAX_D UINT_FAST8_MAX #define MAX_L UINT_FAST16_MAX #define MAX_COUNT UINT_FAST64_MAX @@ -18,6 +20,7 @@ typedef int_fast64_t h_t; #define PRIv PRIuFAST32 #define PRIq PRIuFAST8 +#define PRIR PRIuFAST16 #define PRID PRIuFAST8 #define PRIL PRIuFAST16 #define PRIcount PRIuFAST64 diff --git a/lib/wolff_finite.c b/lib/wolff_finite.c new file mode 100644 index 0000000..64de9ba --- /dev/null +++ b/lib/wolff_finite.c @@ -0,0 +1,70 @@ + +#include "cluster_finite.h" + +void wolff_finite(state_finite_t *s, count_t sweeps, count_t sweeps_per_measurement, count_t n_measurements, measurement_t *measurements) { + for (count_t i = 0; i < sweeps; i++) { + + count_t n_flips = 0; + + while (n_flips / h->nv < sweeps_per_measurement) { + v_t v0 = gsl_rng_uniform_int(r, h->nv); + R_t step; + + bool changed = false; + while (!changed) { + step = gsl_rng_uniform_int(r, s->n_transformations); + if v(symmetric_act(s->transformations + q * step, s->spins[v0]) != s->spins[v0]) { + changed = true; + } + } + + v_t tmp_flips = flip_cluster_finite(s, v0, step, r); + n_flips += tmp_flips; + + if (n_runs > 0) { + n_steps++; + meas_update(clust, tmp_flips); + + if (record_autocorrelation && n_steps % ac_skip == 0) { + update_autocorr(autocorr, s->E); + } + + } + + } + + for (q_t i = 0; i < q; i++) { + meas_update(M[i], s->M[i]); + } + meas_update(E, s->E); + + q_t n_at_max = 0; + q_t max_M_i = 0; + v_t max_M = 0; + + for (q_t i = 0; i < q; i++) { + if (s->M[i] > max_M) { + n_at_max = 1; + max_M_i = i; + max_M = s->M[i]; + } else if (s->M[i] == max_M) { + n_at_max++; + } + } + + if (record_distribution) { + mag_dist[s->M[0]]++; + } + + if (n_at_max == 1) { + for (q_t i = 0; i < q; i++) { + meas_update(sM[max_M_i][i], s->M[i]); + } + meas_update(sE[max_M_i], s->E); + freqs[max_M_i]++; + } + + diff = fabs(meas_dx(clust) / clust->x); + } +} + -- cgit v1.2.3-70-g09d2