Compare commits

...

7 Commits

Author SHA1 Message Date
Florian Stecker
f89545e995 simplify image generation 2022-06-18 14:54:50 +02:00
Florian Stecker
920a530385 remove unnecessary function qext_newtype() 2022-06-16 15:43:19 +02:00
Florian Stecker
cfc13d2ba7 add useful help text 2022-06-15 18:47:55 +02:00
Florian Stecker
979cbe7922 parallelization scripts 2022-06-15 18:04:42 +02:00
Florian Stecker
ec34567ace Makefile fix 2022-06-15 15:27:08 +02:00
Florian Stecker
575c310cd3 remove irrelevant files 2022-06-15 15:19:38 +02:00
Florian Stecker
ac80bc9f3f new simpler approach to parallalization 2022-06-15 15:17:54 +02:00
35 changed files with 143 additions and 52923 deletions

View File

@@ -12,7 +12,7 @@ CC=gcc
all: complex_anosov
complex_anosov: complex_anosov.o mat.o coxeter.o enumerate.o generators.o qext.o
$(CC) $(OPTIONS) -o complex_anosov -lm complex_anosov.o mat.o coxeter.o enumerate.o generators.o qext.o -lgmp -lmps
$(CC) $(OPTIONS) -o complex_anosov complex_anosov.o mat.o coxeter.o enumerate.o generators.o qext.o -lgmp -lmps -lm
complex_anosov.o: complex_anosov.c $(HEADERS)
gcc $(OPTIONS) -c complex_anosov.c

View File

@@ -165,6 +165,13 @@ int main(int argc, char *argv[])
fprintf(stderr, "%s evs <n> <q1> <q2> <q3> <treal> <timag> enumerate group and output unique (log) eigenvalue triples\n", argv[0]);
fprintf(stderr, "%s summary <n> <q1> <q2> <q3> <treal> <timag> only output max slope etc.\n", argv[0]);
fprintf(stderr, "%s trace_ids <n> <q1> <q2> <q3> <treal> <timag> list of ids of unique traces\n", argv[0]);
fprintf(stderr, "\n");
fprintf(stderr, "This program computes the traces and inverse traces of the first n elements of a SL(3,C) representation of the\n");
fprintf(stderr, "triangle reflection group with corner orders (5,5,5). The representation is given by the parameters q1,q2,q3,\n");
fprintf(stderr, "treal, and timag. q1,q2,q3 can each be 1 or 2 and determine the connected component. (1,1,1) is Hitchin and\n");
fprintf(stderr, "(2,2,2) is Barbot. treal and timag are the components of the complex parameter and have to be given as rational\n");
fprintf(stderr, "numbers. If the environment variable IDLIST is set to a filename, a list of elements can be read from this file.\n");
fprintf(stderr, "Such a list can be precomputed with the \"trace_ids\" subcommand and speeds up the computation considerably.\n");
return 0;
}

View File

@@ -3,65 +3,43 @@
import sys
import math
def sharpen(x):
alpha = 1
y = abs(2*x-1)**alpha # between 0 and 1
if x > 0.499 and x < 0.501:
return 0.5
elif x > 0.5:
return (y+1)/2
else:
return (1-y)/2
f = open(sys.argv[1])
lines = [x.split() for x in f]
f.close()
res1 = int(sys.argv[2]) # 400 pixel per unit
res2 = int(sys.argv[3]) # 50 pixel in picture (acutally one quadrant)
res = int(sys.argv[2]) # pixel per unit
data = {(round(float(l[0])*res1), round(float(l[1])*res1)) : float(l[5]) for l in lines}
data = {(round(float(l[0])*res), round(float(l[1])*res)) : float(l[5]) for l in lines}
data[(0,0)] = 2.0
xmin = min([p[0] for p in data.keys()])
xmax = max([p[0] for p in data.keys()])
ymin = min([p[1] for p in data.keys()])
ymax = max([p[1] for p in data.keys()])
yrange=max([-ymin,ymax])
print("P3")
print("1000 1000")
print("{dx:d} {dy:d}".format(dx=xmax-xmin+1, dy=2*yrange+1))
print("255")
for i in range (-500,500):
for j in range(-500,500):
x = j/500*res2
y = i/500*res2
for i in range(-yrange,yrange+1):
for j in range(xmin,xmax+1):
x = j
y = i if i >= ymin and i <= ymax else -i
if y < 0:
y = -y
x0 = math.floor(x)
y0 = math.floor(y)
x1 = math.ceil(x)
y1 = math.ceil(y)
tx = sharpen(x-x0)
ty = sharpen(y-y0)
if not (x0,y0) in data or not (x0,y1) in data or not (x1,y0) in data or not (x1,y1) in data:
if not (x,y) in data:
value = 0
else:
value = 0.0
value += data[(x0,y0)]*(1-tx)*(1-ty)
value += data[(x0,y1)]*(1-tx)*ty
value += data[(x1,y0)]*tx*(1-ty)
value += data[(x1,y1)]*tx*ty
value -= 1
value = data[(x,y)]-1
value = 0 if value < 0 else 1 if value > 1 else value
r = round(255*math.sqrt(value))
r = round(255*value**0.5)
g = round(255*(value)**3)
b = round(255*math.sin(2*math.pi*(value)))
b = round(255*(math.sin(2*math.pi*value)))
r = 0 if r < 0 else 255 if r > 255 else r
g = 0 if g < 0 else 255 if g > 255 else g
b = 0 if b < 0 else 255 if b > 255 else b
print("%d %d %d" % (r,g,b))
#print(data)

View File

@@ -1,335 +0,0 @@
#include "enumerate_triangle_group.h"
#include "linalg.h"
int solve_characteristic_polynomial(mps_context *solv, mps_monomial_poly *poly, mpq_t tr, mpq_t trinv, double *eigenvalues)
{
mpq_t coeff1, coeff2, zero;
cplx_t *roots;
double radii[3];
double *radii_p[3];
mps_boolean errors;
int result = 0;
mpq_inits(coeff1, coeff2, zero, NULL);
mpq_set(coeff1, trinv);
mpq_sub(coeff2, zero, tr);
mps_monomial_poly_set_coefficient_int(solv, poly, 0, -1, 0);
mps_monomial_poly_set_coefficient_q(solv, poly, 1, coeff1, zero);
mps_monomial_poly_set_coefficient_q(solv, poly, 2, coeff2, zero);
mps_monomial_poly_set_coefficient_int(solv, poly, 3, 1, 0);
mps_context_set_input_poly(solv, (mps_polynomial*)poly);
mps_mpsolve(solv);
roots = cplx_valloc(3);
for(int i = 0; i < 3; i++)
radii_p[i] = &(radii[i]);
mps_context_get_roots_d(solv, &roots, radii_p);
errors = mps_context_has_errors(solv);
if(errors) {
result = 1;
} else {
for(int i = 0; i < 3; i++) {
eigenvalues[i] = cplx_Re(roots[i]);
if(fabs(cplx_Im(roots[i])) > radii[i]) // non-real root
result = 2;
}
}
cplx_vfree(roots);
mpq_clears(coeff1, coeff2, zero, NULL);
return result;
}
void continued_fraction_approximation(mpq_t out, double in, int level)
{
mpq_t tmp;
if(in < 0) {
mpq_init(tmp);
mpq_set_ui(tmp, 0, 1);
continued_fraction_approximation(out, -in, level);
mpq_sub(out, tmp, out);
mpq_clear(tmp);
return;
}
if(level == 0) {
mpq_set_si(out, (signed long int)round(in), 1); // floor(in)
} else {
continued_fraction_approximation(out, 1/(in - floor(in)), level - 1);
mpq_init(tmp);
mpq_set_ui(tmp, 1, 1);
mpq_div(out, tmp, out); // out -> 1/out
mpq_set_si(tmp, (signed long int)in, 1); // floor(in)
mpq_add(out, out, tmp);
mpq_clear(tmp);
}
}
void quartic(NUMBER out, NUMBER in, int a, int b, int c, int d, int e)
{
NUMBER tmp;
INIT(tmp, GETTYPE(in));
SET_INT(out, a);
MUL(out, out, in);
SET_INT(tmp, b);
ADD(out, out, tmp);
MUL(out, out, in);
SET_INT(tmp, c);
ADD(out, out, tmp);
MUL(out, out, in);
SET_INT(tmp, d);
ADD(out, out, tmp);
MUL(out, out, in);
SET_INT(tmp, e);
ADD(out, out, tmp);
CLEAR(tmp);
}
// discriminant of the polynomial z^3 - x z^2 + y z - 1
// that is the function x^2 y^2 - 4 x^3 - 4 y^3 - 27 + 18 xy
void discriminant(NUMBER out, NUMBER x, NUMBER y)
{
TYPE type = GETTYPE(out);
NUMBER x2, x3, y2, y3, tmp;
INIT(x2, type);INIT(x3, type);INIT(y2, type);INIT(y3, type);INIT(tmp, type);
MUL(x2, x, x);
MUL(x3, x2, x);
MUL(y2, y, y);
MUL(y3, y2, y);
MUL(out, x2, y2);
SET_INT(tmp, -4);
MUL(tmp, tmp, x3);
ADD(out, out, tmp);
SET_INT(tmp, -4);
MUL(tmp, tmp, y3);
ADD(out, out, tmp);
SET_INT(tmp, -27);
ADD(out, out, tmp);
SET_INT(tmp, 18);
MUL(tmp, tmp, x);
MUL(tmp, tmp, y);
ADD(out, out, tmp);
CLEAR(x2);CLEAR(x3);CLEAR(y2);CLEAR(y3);CLEAR(tmp);
}
void generators_triangle_rotation_generic(mat *gen, NUMBER rho1, NUMBER rho2, NUMBER rho3, mpq_t s, mpq_t q)
{
mat_workspace *ws;
mat r1,r2,r3;
NUMBER b1,c1,a2,c2,a3,b3;
TYPE type = GETTYPE(rho1);
mpq_t sinv, qinv;
ws = mat_workspace_init(3, type);
INIT(b1, type);INIT(c1, type);INIT(a2, type);INIT(c2, type);INIT(a3, type);INIT(b3, type);
mpq_init(sinv);
mpq_init(qinv);
mat_init(r1, 3, type);
mat_init(r2, 3, type);
mat_init(r3, 3, type);
// sinv = s^{-1}
mpq_inv(sinv, s);
mpq_inv(qinv, q);
// c1 = rho2 q, a2 = rho3 q, b3 = rho1 q, b1 = c2 = a3 = 1/q
SET_Q(c1, q);
SET_Q(a2, q);
SET_Q(b3, q);
MUL(c1, c1, rho2);
MUL(a2, a2, rho3);
MUL(b3, b3, rho1);
SET_INT(b1, 1);
SET_INT(c2, 1);
SET_INT(a3, 1);
SET_Q(b1, qinv);
SET_Q(c2, qinv);
SET_Q(a3, qinv);
mat_zero(r1);
mat_zero(r2);
mat_zero(r3);
SET_INT(*mat_ref(r1, 0, 0), -1);
SET_INT(*mat_ref(r1, 1, 1), 1);
SET_INT(*mat_ref(r1, 2, 2), 1);
SET_INT(*mat_ref(r2, 0, 0), 1);
SET_INT(*mat_ref(r2, 1, 1), -1);
SET_INT(*mat_ref(r2, 2, 2), 1);
SET_INT(*mat_ref(r3, 0, 0), 1);
SET_INT(*mat_ref(r3, 1, 1), 1);
SET_INT(*mat_ref(r3, 2, 2), -1);
SET(*mat_ref(r1, 1, 0), b1);
SET(*mat_ref(r1, 2, 0), c1);
SET(*mat_ref(r2, 0, 1), a2);
SET(*mat_ref(r2, 2, 1), c2);
SET(*mat_ref(r3, 0, 2), a3);
SET(*mat_ref(r3, 1, 2), b3);
mat_zero(gen[0]);
mat_zero(gen[1]);
mat_zero(gen[2]);
// gen[0] = diag(1,s^{-1},s)
SET_INT(*mat_ref(gen[0], 0, 0), 1);
SET_Q (*mat_ref(gen[0], 1, 1), sinv);
SET_Q (*mat_ref(gen[0], 2, 2), s);
// gen[1] = diag(s,1,s^{-1})
SET_Q (*mat_ref(gen[1], 0, 0), s);
SET_INT(*mat_ref(gen[1], 1, 1), 1);
SET_Q (*mat_ref(gen[1], 2, 2), sinv);
// gen[2] = diag(s^{-1},s,1)
SET_Q (*mat_ref(gen[2], 0, 0), sinv);
SET_Q (*mat_ref(gen[2], 1, 1), s);
SET_INT(*mat_ref(gen[2], 2, 2), 1);
// gen[0] = r2 * gen[0] * r3
// gen[1] = r3 * gen[1] * r1
// gen[2] = r1 * gen[2] * r2
mat_multiply(ws, gen[0], r2, gen[0]);
mat_multiply(ws, gen[0], gen[0], r3);
mat_multiply(ws, gen[1], r3, gen[1]);
mat_multiply(ws, gen[1], gen[1], r1);
mat_multiply(ws, gen[2], r1, gen[2]);
mat_multiply(ws, gen[2], gen[2], r2);
// gen[3] = gen[0]^{-1}
// gen[4] = gen[1]^{-1}
// gen[5] = gen[2]^{-1}
mat_pseudoinverse(ws, gen[3], gen[0]);
mat_pseudoinverse(ws, gen[4], gen[1]);
mat_pseudoinverse(ws, gen[5], gen[2]);
mat_workspace_clear(ws);
CLEAR(b1);CLEAR(c1);CLEAR(a2);CLEAR(c2);CLEAR(a3);CLEAR(b3);
mpq_clear(sinv);
mpq_clear(qinv);
mat_clear(r1);
mat_clear(r2);
mat_clear(r3);
}
#ifdef QEXT_TRIVIAL
// p1,p2,p3 are only allowed to be 2,3,4,6
void generators_triangle_rotation_2346_rational(mat *gen, int p1, int p2, int p3, mpq_t s, mpq_t q)
{
mpq_t rho1, rho2, rho3;
mpq_inits(rho1,rho2,rho3,NULL);
// rho_i = s^2 + 2s cos(2 pi / p_i) + 1
// coefficient 2 is the value for p=infinity, not sure if that would even work
quartic(rho1, s, 0, 0, 1, p1 == 2 ? -2 : p1 == 3 ? -1 : p1 == 4 ? 0 : p1 == 6 ? 1 : 2, 1);
quartic(rho2, s, 0, 0, 1, p2 == 2 ? -2 : p2 == 3 ? -1 : p2 == 4 ? 0 : p2 == 6 ? 1 : 2, 1);
quartic(rho3, s, 0, 0, 1, p3 == 2 ? -2 : p3 == 3 ? -1 : p3 == 4 ? 0 : p3 == 6 ? 1 : 2, 1);
generators_triangle_rotation_generic(gen, rho1, rho2, rho3, s, q);
mpq_clears(rho1,rho2,rho3,NULL);
}
#endif
#ifdef QEXT
void generators_triangle_rotation_555_barbot(mat *gen, mpq_t s_, mpq_t q_)
{
NUMBER rho, c, one, s;
INIT(rho, QT_SQRT5);INIT(c, QT_SQRT5);INIT(one, QT_SQRT5);INIT(s, QT_SQRT5);
// c = - (1+sqrt(5))/2
mpq_set_si(c->a[0], -1, 2);
mpq_set_si(c->a[1], -1, 2);
SET_ONE(one);
SET_Q(s, s_);
// rho = s^2 + cs + 1
SET(rho, one);
MUL(rho, rho, s);
ADD(rho, rho, c);
MUL(rho, rho, s);
ADD(rho, rho, one);
generators_triangle_rotation_generic(gen, rho, rho, rho, s_, q_);
CLEAR(rho);CLEAR(c);CLEAR(one);CLEAR(s);
}
#endif
char *print_word(groupelement_t *g, char *str)
{
int i = g->length - 1;
str[g->length] = 0;
while(g->parent) {
str[i--] = 'a' + g->letter;
g = g->parent;
}
return str;
}
void enumerate_triangle_rotation_subgroup(group_t *group, mat *gen, mat *matrices)
{
mat_workspace *ws;
mat tmp;
char buf[100], buf2[100], buf3[100];
// allocate stuff
TYPE type = GETTYPE(gen[0]->x[0]);
ws = mat_workspace_init(3, type);
mat_init(tmp, 3, type);
// initialize_triangle_generators(ws, gen, p1, p2, p3, s, q);
mat_identity(matrices[0]);
for(int i = 1; i < group->size; i++) {
if(group->elements[i].length % 2 != 0)
continue;
if(!group->elements[i].inverse)
continue;
if(!group->elements[i].need_to_compute)
continue;
int parent = group->elements[i].parent->id;
int grandparent = group->elements[i].parent->parent->id;
int letter;
if(group->elements[parent].letter == 1 && group->elements[i].letter == 2)
letter = 0; // p = bc
else if(group->elements[parent].letter == 2 && group->elements[i].letter == 0)
letter = 1; // q = ca
else if(group->elements[parent].letter == 0 && group->elements[i].letter == 1)
letter = 2; // r = ab
if(group->elements[parent].letter == 2 && group->elements[i].letter == 1)
letter = 3; // p^{-1} = cb
else if(group->elements[parent].letter == 0 && group->elements[i].letter == 2)
letter = 4; // q^{-1} = ac
else if(group->elements[parent].letter == 1 && group->elements[i].letter == 0)
letter = 5; // r^{-1} = ba
mat_multiply(ws, matrices[i], matrices[grandparent], gen[letter]);
}
// free stuff
mat_clear(tmp);
mat_workspace_clear(ws);
}

View File

@@ -1,30 +0,0 @@
#ifndef ENUMERATE_TRIANGLE_GROUP_H
#define ENUMERATE_TRIANGLE_GROUP_H
#include "mat.h"
#include "coxeter.h"
#include <mps/mps.h>
// rational only functions
int solve_characteristic_polynomial(mps_context *solv, mps_monomial_poly *poly, mpq_t tr, mpq_t trinv, double *eigenvalues);
void continued_fraction_approximation(mpq_t out, double in, int level);
// generators
void generators_triangle_rotation_generic(mat *gen, NUMBER rho1, NUMBER rho2, NUMBER rho3, mpq_t s, mpq_t q);
#ifdef QEXT_TRIVIAL
void generators_triangle_rotation_2346_rational(mat *gen, int p1, int p2, int p3, mpq_t s, mpq_t q);
#endif
#ifdef QEXT
void generators_triangle_rotation_555_barbot(mat *gen, mpq_t s_, mpq_t q_);
#endif
// general functions
void quartic(NUMBER out, NUMBER in, int a, int b, int c, int d, int e);
void discriminant(NUMBER out, NUMBER x, NUMBER y);
char *print_word(groupelement_t *g, char *str);
void enumerate_triangle_rotation_subgroup(group_t *group, mat *gen, mat *matrices);
#endif

View File

@@ -1,471 +0,0 @@
#include "coxeter.h"
#include "linalg.h"
#include "mat.h"
#include "enumerate_triangle_group.h"
#include "parallel.h"
#include <time.h>
#define SWAP(t,x,y) do { t _tmp = (x); (x) = (y); (y) = _tmp; } while (0);
//#define DEBUG(msg, ...) fprintf(stderr, "[%003d%10.3f] " msg, mpi_rank(0), runtime(), ##__VA_ARGS__)
#define DEBUG(msg, ...)
#define INFO(msg, ...) fprintf(stderr, "[%003d%10.3f] " msg, mpi_rank(0), runtime(), ##__VA_ARGS__)
struct result {
int id;
mpq_t tr;
mpq_t trinv;
double x;
double y;
double slope;
};
// we want as much as possible to be node data, except if it is only known to the main node
// (command line arguments) or should only be computed once (id list)
struct global_data {
// command line arguments
unsigned int nmax;
unsigned int p1, p2, p3;
unsigned int sstart, send, sdenom;
unsigned int qstart, qend, qdenom;
unsigned int *id_list;
unsigned int id_list_length;
};
struct node_data {
group_t *group;
mat* matrices;
struct result *invariants;
struct result **distinct_invariants;
int distinct_invariants_length;
mps_context *solver;
};
struct input_data {
unsigned int snum, sden;
unsigned int qnum, qden;
};
struct output_data {
int max_slope_id;
double max_slope;
};
static int compare_result(const void *a_, const void *b_)
{
int d = 0;
struct result **a = (struct result **)a_;
struct result **b = (struct result **)b_;
d = mpq_cmp((*a)->tr,(*b)->tr);
if(d == 0) {
d = mpq_cmp((*a)->trinv, (*b)->trinv);
}
return d;
}
static int compare_result_by_id(const void *a_, const void *b_)
{
int d = 0;
struct result **a = (struct result **)a_;
struct result **b = (struct result **)b_;
return (*a)->id - (*b)->id;
}
static int compare_result_by_tr_trinv_id(const void *a_, const void *b_)
{
int d = 0;
struct result **a = (struct result **)a_;
struct result **b = (struct result **)b_;
d = mpq_cmp((*a)->tr,(*b)->tr);
if(d == 0) {
d = mpq_cmp((*a)->trinv, (*b)->trinv);
if(d == 0) {
d = (*b)->id - (*a)->id;
}
}
return d;
}
static int compare_result_by_slope(const void *a_, const void *b_)
{
int d = 0;
struct result **a = (struct result **)a_;
struct result **b = (struct result **)b_;
double slopea = (*a)->x / (*a)->y;
double slopeb = (*b)->x / (*b)->y;
return slopea > slopeb ? -1 : slopea < slopeb ? 1 : 0;
}
int compute_invariants(group_t *group, mat *matrices, struct result **invariants, int *n, int unique)
{
mpq_t tmp;
mps_context *solver;
mps_monomial_poly *poly;
int index;
int ntraces = *n, nuniq;
int retval;
double evs[3];
char buf[100];
// DEBUG("Compute traces\n");
for(int i = 0; i < ntraces; i++) {
int id = invariants[i]->id;
int invid = group->elements[id].inverse->id;
mat_trace(invariants[i]->tr, matrices[id]);
mat_trace(invariants[i]->trinv, matrices[invid]);
}
if(!unique)
nuniq = ntraces;
else {
// DEBUG("Get unique traces\n");
qsort(invariants, ntraces, sizeof(struct result*), compare_result);
nuniq = 0;
for(int i = 0; i < ntraces; i++) {
if(i == 0 || compare_result(&invariants[i], &invariants[nuniq-1]) != 0) {
invariants[nuniq] = invariants[i];
nuniq++;
} else {
int oldlength = group->elements[invariants[nuniq-1]->id].length;
int newlength = group->elements[invariants[i]->id].length;
if(newlength < oldlength)
invariants[nuniq-1]->id = invariants[i]->id;
}
}
}
DEBUG("Solve characteristic polynomials\n");
solver = mps_context_new();
poly = mps_monomial_poly_new(solver, 3);
mps_context_set_output_prec(solver, 20); // relative precision
mps_context_set_output_goal(solver, MPS_OUTPUT_GOAL_APPROXIMATE);
for(int i = 0; i < nuniq; i++) {
retval = solve_characteristic_polynomial(solver, poly, invariants[i]->tr, invariants[i]->trinv, evs);
if(retval == 1) {
fprintf(stderr, "Error! Could not solve polynomial.\n");
continue;
} else if(retval == 2) {
continue;
}
if(fabs(evs[0]) < fabs(evs[1]))
SWAP(double, evs[0], evs[1]);
if(fabs(evs[1]) < fabs(evs[2]))
SWAP(double, evs[1], evs[2]);
if(fabs(evs[0]) < fabs(evs[1]))
SWAP(double, evs[0], evs[1]);
double x = log(fabs(evs[0]));
double y = -log(fabs(evs[2]));
invariants[i]->x = x;
invariants[i]->y = y;
invariants[i]->slope = y/x;
}
mps_context_free(solver);
qsort(invariants, nuniq, sizeof(struct result*), compare_result_by_id);
*n = nuniq;
return 0;
}
long check_memory_usage(mat *matrices, int n)
{
mpq_t x;
long total;
for(int i = 0; i < n; i++)
{
LOOP(j,3) LOOP(k,3) {
total += mpq_numref(M(matrices[i], j, k))->_mp_size;
total += mpq_denref(M(matrices[i], j, k))->_mp_size;
}
}
return total;
}
void destroy_node(const void *_g, void *_n)
{
struct global_data *g = (struct global_data *)_g;
struct node_data *n = (struct node_data *)_n;
for(int i = 0; i < g->nmax; i++) {
mpq_clear(n->invariants[i].tr);
mpq_clear(n->invariants[i].trinv);
}
free(n->invariants);
free(n->distinct_invariants);
for(int i = 0; i < g->nmax; i++)
mat_clear(n->matrices[i]);
free(n->matrices);
coxeter_clear(n->group);
}
int init_node(const void *_g, void *_n)
{
struct global_data *g = (struct global_data *)_g;
struct node_data *n = (struct node_data *)_n;
DEBUG("Allocate\n");
g->id_list = (int*)(g+1); // pointers get scrambled by transmission, reconstruct
n->matrices = malloc(g->nmax*sizeof(mat));
for(int i = 0; i < g->nmax; i++)
mat_init(n->matrices[i], 3);
n->invariants = malloc(g->nmax*sizeof(struct result));
n->distinct_invariants = malloc(g->nmax*sizeof(struct result)); // we won't need that many, but just in case
for(int i = 0; i < g->nmax; i++) {
mpq_init(n->invariants[i].tr);
mpq_init(n->invariants[i].trinv);
n->invariants[i].id = i;
}
// order of the triangle reflection generators: a, b, c
// order of the rotation orders: bc, ac, ab
DEBUG("Generate group\n");
n->group = coxeter_init_triangle(g->p1, g->p2, g->p3, g->nmax);
return 0;
}
int process_output(group_t *group, mat *matrices, struct result **invariants, int invariants_length, struct output_data *out)
{
out->max_slope = 0;
for(int i = 0; i < invariants_length; i++) {
double x = invariants[i]->x;
double y = invariants[i]->y;
if(y/x > out->max_slope + 1e-12 && (x > 0.1 || y > 0.1)) {
out->max_slope_id = invariants[i]->id;
out->max_slope = y/x;
}
}
}
int do_computation(const void *_g, void *_n, const void *_in, void *_out)
{
struct global_data *g = (struct global_data *)_g;
struct node_data *n = (struct node_data *)_n;
struct input_data *in = (struct input_data *)_in;
struct output_data *out = (struct output_data *)_out;
mpq_t s, q;
mpq_inits(s, q, NULL);
mpq_set_ui(s, in->snum, in->sden);
mpq_set_ui(q, in->qnum, in->qden);
INFO("Computing max slope element for s = %d/%d and q = %d/%d.\n",
in->snum, in->sden,
in->qnum, in->qden);
for(int i = 0; i < n->group->size; i++)
n->group->elements[i].need_to_compute = 0;
n->group->elements[0].need_to_compute = 1;
int needed_elements = 1;
for(int i = 0; i < g->id_list_length; i++)
{
int id = g->id_list[i];
n->distinct_invariants[i] = &n->invariants[id];
groupelement_t *cur = &n->group->elements[id];
while(cur->need_to_compute == 0) {
cur->need_to_compute = 1;
needed_elements++;
cur = cur->parent->parent; // also need to compute its even-length ancestors
}
cur = n->group->elements[id].inverse;
while(cur->need_to_compute == 0) {
cur->need_to_compute = 1;
needed_elements++;
cur = cur->parent->parent;
}
}
n->distinct_invariants_length = g->id_list_length;
DEBUG("Need to compute %d elements to get %d traces up to reflection length %d\n",
needed_elements, g->id_list_length, n->group->elements[n->group->size-1].length);
DEBUG("Compute matrices\n");
enumerate(n->group, n->matrices, g->p1, g->p2, g->p3, s, q);
DEBUG("Compute invariants\n");
compute_invariants(
n->group, n->matrices,
n->distinct_invariants, &n->distinct_invariants_length, 1);
DEBUG("Find max slopes\n");
process_output(n->group, n->matrices, n->distinct_invariants, n->distinct_invariants_length, out);
mpq_clears(s, q, NULL);
return 0;
}
int main(int argc, char *argv[])
{
char buf[100];
char buf2[100];
struct global_data *g;
struct node_data n;
start_timer();
// parse command line arguments
if(argc < 11) {
fprintf(stderr, "Usage: %s <N> <p1> <p2> <p3> <s start> <s end> <s denom> <q start> <q end> <q denom> [restart]\n", argv[0]);
exit(1);
}
int nmax = atoi(argv[1]);
g = (struct global_data*)malloc(sizeof(struct global_data) + nmax*sizeof(int));
g->id_list = (int*)(g+1);
g->nmax = nmax;
g->p1 = atoi(argv[2]);
g->p2 = atoi(argv[3]);
g->p3 = atoi(argv[4]);
g->sstart = atoi(argv[5]);
g->send = atoi(argv[6]);
g->sdenom = atoi(argv[7]);
g->qstart = atoi(argv[8]);
g->qend = atoi(argv[9]);
g->qdenom = atoi(argv[10]);
// initialize
parallel_context *ctx = parallel_init();
parallel_set_datasize_and_callbacks(ctx, init_node, do_computation, destroy_node,
sizeof(struct global_data) + g->nmax*sizeof(int),
sizeof(struct node_data),
sizeof(struct input_data),
sizeof(struct output_data));
if(ctx->mpi_mode == 1 && ctx->rank != 0) {
// worker mode
parallel_work(ctx);
parallel_destroy(ctx);
exit(0);
}
init_node(g, &n);
// use very generic values for the pilot run unless sstart=send and qstart=qend
struct input_data pilot_input;
struct output_data pilot_output;
if(g->sstart == g->send && g->qstart == g->qend) {
pilot_input.snum = g->sstart;
pilot_input.sden = g->sdenom;
pilot_input.qnum = g->qstart;
pilot_input.qden = g->qdenom;
DEBUG("Single run for s = %d/%d, q = %d/%d\n", g->sstart, g->sdenom, g->qstart, g->qdenom);
} else {
pilot_input.snum = 4;
pilot_input.sden = 100;
pilot_input.qnum = 7;
pilot_input.qden = 100;
DEBUG("Initial run for s = %d/%d, q = %d/%d\n", 4, 100, 7, 100);
}
g->id_list_length = 0;
for(int i = 0; i < n.group->size; i++)
if(n.group->elements[i].length % 2 == 0 && n.group->elements[i].inverse)
g->id_list[g->id_list_length++] = i;
do_computation(g, &n, &pilot_input, &pilot_output);
for(int i = 0; i < n.distinct_invariants_length; i++)
g->id_list[i] = n.distinct_invariants[i]->id;
g->id_list_length = n.distinct_invariants_length;
if(g->sstart != g->send || g->qstart != g->qend) {
struct input_data *inputs = malloc((g->send - g->sstart + 1)*(g->qend - g->qstart + 1)*sizeof(struct input_data));
struct output_data *outputs = malloc((g->send - g->sstart + 1)*(g->qend - g->qstart + 1)*sizeof(struct input_data));
int njobs = 0;
for(int sloop = g->sstart; sloop <= g->send; sloop++) {
for(int qloop = g->qstart; qloop <= g->qend; qloop++) {
inputs[njobs].sden = g->sdenom;
inputs[njobs].qden = g->qdenom;
inputs[njobs].snum = sloop;
inputs[njobs].qnum = qloop;
njobs++;
}
}
if(argc >= 12)
parallel_run(ctx, g, inputs, outputs, njobs, argv[11]);
else
parallel_run(ctx, g, inputs, outputs, njobs, NULL);
// DEBUG("Loop for s = %d/%d, q = %d/%d\n", sloop, g->sdenom, qloop, g->qdenom);
for(int i = 0; i < njobs; i++)
{
gmp_printf("%d/%d %d/%d %d %s %f\n",
inputs[i].snum, inputs[i].sden, inputs[i].qnum, inputs[i].qden,
outputs[i].max_slope_id,
print_word(&n.group->elements[outputs[i].max_slope_id], buf),
outputs[i].max_slope);
}
free(inputs);
free(outputs);
} else {
// output
for(int i = 0; i < n.distinct_invariants_length; i++) {
// exclude tr = trinv = 2/1/0/-1/3
mpq_t tmp;
mpq_init(tmp);
mpq_set_si(tmp, 2, 1);
if(mpq_cmp(n.distinct_invariants[i]->tr, tmp) == 0 &&
mpq_cmp(n.distinct_invariants[i]->trinv, tmp) == 0)
continue;
mpq_set_si(tmp, 1, 1);
if(mpq_cmp(n.distinct_invariants[i]->tr, tmp) == 0 &&
mpq_cmp(n.distinct_invariants[i]->trinv, tmp) == 0)
continue;
mpq_set_si(tmp, 0, 1);
if(mpq_cmp(n.distinct_invariants[i]->tr, tmp) == 0 &&
mpq_cmp(n.distinct_invariants[i]->trinv, tmp) == 0)
continue;
mpq_set_si(tmp, -1, 1);
if(mpq_cmp(n.distinct_invariants[i]->tr, tmp) == 0 &&
mpq_cmp(n.distinct_invariants[i]->trinv, tmp) == 0)
continue;
mpq_set_si(tmp, 3, 1);
if(mpq_cmp(n.distinct_invariants[i]->tr, tmp) == 0 &&
mpq_cmp(n.distinct_invariants[i]->trinv, tmp) == 0)
continue;
mpq_clear(tmp);
double slope = n.distinct_invariants[i]->y/n.distinct_invariants[i]->x;
gmp_printf("%d %s %f\n",
n.distinct_invariants[i]->id,
print_word(&n.group->elements[n.distinct_invariants[i]->id], buf),
slope);
}
}
destroy_node(g, &n);
free(g);
parallel_destroy(ctx);
}

View File

@@ -1,559 +0,0 @@
#include "coxeter.h"
#include "linalg.h"
#include "mat.h"
#include "enumerate_triangle_group.h"
#include "parallel.h"
#include <time.h>
#define SWAP(t,x,y) do { t _tmp = (x); (x) = (y); (y) = _tmp; } while (0);
//#define DEBUG(msg, ...) fprintf(stderr, "[%003d%10.3f] " msg, mpi_rank(0), runtime(), ##__VA_ARGS__)
#define DEBUG(msg, ...)
#define INFO(msg, ...) fprintf(stderr, "[%003d%10.3f] " msg, mpi_rank(0), runtime(), ##__VA_ARGS__)
struct result {
int id;
NUMBER tr;
NUMBER trinv;
int disc_sign;
};
// we want as much as possible to be node data, except if it is only known to the main node
// (command line arguments) or should only be computed once (id list)
struct global_data {
// command line arguments
unsigned int nmax;
unsigned int p1, p2, p3;
unsigned int sstart, send, sdenom;
unsigned int qstart, qend, qdenom;
unsigned int *id_list;
unsigned int id_list_length;
};
struct node_data {
group_t *group;
mat* matrices;
struct result *invariants;
struct result **distinct_invariants;
int distinct_invariants_length;
mps_context *solver;
};
struct input_data {
unsigned int snum, sden;
unsigned int qnum, qden;
};
struct output_data {
int n_non_loxodromic;
int min_wordlength;
int elements[10];
};
static int compare_result(const void *a_, const void *b_)
{
int d = 0;
struct result **a = (struct result **)a_;
struct result **b = (struct result **)b_;
d = CMP((*a)->tr,(*b)->tr);
if(d == 0) {
d = CMP((*a)->trinv, (*b)->trinv);
}
return d;
}
static int compare_result_by_id(const void *a_, const void *b_)
{
int d = 0;
struct result **a = (struct result **)a_;
struct result **b = (struct result **)b_;
return (*a)->id - (*b)->id;
}
static int compare_result_by_tr_trinv_id(const void *a_, const void *b_)
{
int d = 0;
struct result **a = (struct result **)a_;
struct result **b = (struct result **)b_;
d = CMP((*a)->tr,(*b)->tr);
if(d == 0) {
d = CMP((*a)->trinv, (*b)->trinv);
if(d == 0) {
d = (*b)->id - (*a)->id;
}
}
return d;
}
/*
static int compare_result_by_slope(const void *a_, const void *b_)
{
int d = 0;
struct result **a = (struct result **)a_;
struct result **b = (struct result **)b_;
double slopea = (*a)->x / (*a)->y;
double slopeb = (*b)->x / (*b)->y;
return slopea > slopeb ? -1 : slopea < slopeb ? 1 : 0;
}
*/
int invariants_trace_loxodromic(group_t *group, mat *matrices, struct result **invariants, int *n, int unique)
{
int ntraces = *n, nuniq; // ntraces is the number of traces we are asked to compute, nuniq is the number of unique ones after we eliminate duplicates
// compute the traces
for(int i = 0; i < ntraces; i++) {
int id = invariants[i]->id;
int invid = group->elements[id].inverse->id;
mat_trace(invariants[i]->tr, matrices[id]);
mat_trace(invariants[i]->trinv, matrices[invid]);
}
// throw out duplicates if unique == 1
if(!unique)
nuniq = ntraces;
else {
qsort(invariants, ntraces, sizeof(struct result*), compare_result);
nuniq = 0;
for(int i = 0; i < ntraces; i++) {
if(i == 0 || compare_result(&invariants[i], &invariants[nuniq-1]) != 0) {
invariants[nuniq] = invariants[i];
nuniq++;
} else {
int oldlength = group->elements[invariants[nuniq-1]->id].length;
int newlength = group->elements[invariants[i]->id].length;
if(newlength < oldlength)
invariants[nuniq-1]->id = invariants[i]->id;
}
}
}
// check if loxodromic
NUMBER disc, zero;
double disc_real;
INIT(disc, QT_SQRT5);
INIT(zero, QT_SQRT5);
SET_ZERO(zero);
for(int i = 0; i < nuniq; i++) {
discriminant(disc, invariants[i]->tr, invariants[i]->trinv);
disc_real = mpq_get_d(disc->a[0]) + sqrt(5)*mpq_get_d(disc->a[1]);
gmp_printf("%Qd %Qd %f\n", disc->a[0], disc->a[1], disc_real);
invariants[i]->disc_sign = disc_real > 0 ? 1 : disc_real < 0 ? -1 : 0;
}
CLEAR(disc);
CLEAR(zero);
// sort by ID again
qsort(invariants, nuniq, sizeof(struct result*), compare_result_by_id);
*n = nuniq;
return 0;
}
/*
int invariants_trace_slope(group_t *group, mat *matrices, struct result **invariants, int *n, int unique)
{
mpq_t tmp;
mps_context *solver;
mps_monomial_poly *poly;
int index;
int ntraces = *n, nuniq;
int retval;
double evs[3];
char buf[100];
// DEBUG("Compute traces\n");
for(int i = 0; i < ntraces; i++) {
int id = invariants[i]->id;
int invid = group->elements[id].inverse->id;
mat_trace(invariants[i]->tr, matrices[id]);
mat_trace(invariants[i]->trinv, matrices[invid]);
}
if(!unique)
nuniq = ntraces;
else {
// DEBUG("Get unique traces\n");
qsort(invariants, ntraces, sizeof(struct result*), compare_result);
nuniq = 0;
for(int i = 0; i < ntraces; i++) {
if(i == 0 || compare_result(&invariants[i], &invariants[nuniq-1]) != 0) {
invariants[nuniq] = invariants[i];
nuniq++;
} else {
int oldlength = group->elements[invariants[nuniq-1]->id].length;
int newlength = group->elements[invariants[i]->id].length;
if(newlength < oldlength)
invariants[nuniq-1]->id = invariants[i]->id;
}
}
}
DEBUG("Solve characteristic polynomials\n");
solver = mps_context_new();
poly = mps_monomial_poly_new(solver, 3);
mps_context_set_output_prec(solver, 20); // relative precision
mps_context_set_output_goal(solver, MPS_OUTPUT_GOAL_APPROXIMATE);
for(int i = 0; i < nuniq; i++) {
retval = solve_characteristic_polynomial(solver, poly, invariants[i]->tr, invariants[i]->trinv, evs);
if(retval == 1) {
fprintf(stderr, "Error! Could not solve polynomial.\n");
continue;
} else if(retval == 2) {
continue;
}
if(fabs(evs[0]) < fabs(evs[1]))
SWAP(double, evs[0], evs[1]);
if(fabs(evs[1]) < fabs(evs[2]))
SWAP(double, evs[1], evs[2]);
if(fabs(evs[0]) < fabs(evs[1]))
SWAP(double, evs[0], evs[1]);
double x = log(fabs(evs[0]));
double y = -log(fabs(evs[2]));
invariants[i]->x = x;
invariants[i]->y = y;
invariants[i]->slope = y/x;
}
mps_context_free(solver);
qsort(invariants, nuniq, sizeof(struct result*), compare_result_by_id);
*n = nuniq;
return 0;
}
*/
/*
long check_memory_usage(mat *matrices, int n)
{
mpq_t x;
long total;
for(int i = 0; i < n; i++)
{
LOOP(j,3) LOOP(k,3) {
total += mpq_numref(M(matrices[i], j, k))->_mp_size;
total += mpq_denref(M(matrices[i], j, k))->_mp_size;
}
}
return total;
}
*/
void destroy_node(const void *_g, void *_n)
{
struct global_data *g = (struct global_data *)_g;
struct node_data *n = (struct node_data *)_n;
for(int i = 0; i < g->nmax; i++) {
CLEAR(n->invariants[i].tr);
CLEAR(n->invariants[i].trinv);
}
free(n->invariants);
free(n->distinct_invariants);
for(int i = 0; i < g->nmax; i++)
mat_clear(n->matrices[i]);
free(n->matrices);
coxeter_clear(n->group);
}
int init_node(const void *_g, void *_n)
{
struct global_data *g = (struct global_data *)_g;
struct node_data *n = (struct node_data *)_n;
DEBUG("Allocate\n");
g->id_list = (int*)(g+1); // pointers get scrambled by transmission, reconstruct
n->matrices = malloc(g->nmax*sizeof(mat));
for(int i = 0; i < g->nmax; i++)
mat_init(n->matrices[i], 3, QT_SQRT5);
n->invariants = malloc(g->nmax*sizeof(struct result));
n->distinct_invariants = malloc(g->nmax*sizeof(struct result)); // we won't need that many, but just in case
for(int i = 0; i < g->nmax; i++) {
INIT(n->invariants[i].tr, QT_SQRT5);
INIT(n->invariants[i].trinv, QT_SQRT5);
n->invariants[i].id = i;
}
// order of the triangle reflection generators: a, b, c
// order of the rotation orders: bc, ac, ab
DEBUG("Generate group\n");
n->group = coxeter_init_triangle(g->p1, g->p2, g->p3, g->nmax);
return 0;
}
int process_output(group_t *group, mat *matrices, struct result **invariants, int invariants_length, struct output_data *out)
{
out->n_non_loxodromic = 0;
out->min_wordlength = INT_MAX;
for(int i = 0; i < invariants_length; i++) {
if(invariants[i]->disc_sign <= 0 && invariants[i]->id != 0 && invariants[i]->id != 4 && invariants[i]->id != 22) {
if(out->n_non_loxodromic < 10)
out->elements[out->n_non_loxodromic] = invariants[i]->id;
out->n_non_loxodromic++;
if(group->elements[invariants[i]->id].length < out->min_wordlength)
out->min_wordlength = group->elements[invariants[i]->id].length;
}
}
}
int do_computation(const void *_g, void *_n, const void *_in, void *_out)
{
struct global_data *g = (struct global_data *)_g;
struct node_data *n = (struct node_data *)_n;
struct input_data *in = (struct input_data *)_in;
struct output_data *out = (struct output_data *)_out;
mpq_t s, q;
mpq_inits(s, q, NULL);
mpq_set_ui(s, in->snum, in->sden);
mpq_set_ui(q, in->qnum, in->qden);
INFO("Computing represention with s = %d/%d and q = %d/%d.\n",
in->snum, in->sden,
in->qnum, in->qden);
// we need to compute all the elements pointed to in id_list, and all their suffixes or prefixes
// I can imagine a smarter way of doing this which checks if there is a shorter route to the element
for(int i = 0; i < n->group->size; i++)
n->group->elements[i].need_to_compute = 0;
n->group->elements[0].need_to_compute = 1;
int needed_elements = 1;
for(int i = 0; i < g->id_list_length; i++)
{
int id = g->id_list[i];
n->distinct_invariants[i] = &n->invariants[id];
groupelement_t *cur = &n->group->elements[id];
while(cur->need_to_compute == 0) {
cur->need_to_compute = 1;
needed_elements++;
cur = cur->parent->parent; // also need to compute its even-length ancestors
}
cur = n->group->elements[id].inverse;
while(cur->need_to_compute == 0) {
cur->need_to_compute = 1;
needed_elements++;
cur = cur->parent->parent;
}
}
n->distinct_invariants_length = g->id_list_length;
DEBUG("Need to compute %d elements to get %d traces up to reflection length %d\n",
needed_elements, g->id_list_length, n->group->elements[n->group->size-1].length);
DEBUG("Compute matrices\n");
mat gen[6];
for(int i = 0; i < 6; i++)
mat_init(gen[i], 3, QT_SQRT5);
generators_triangle_rotation_555_barbot(gen, s, q);
enumerate_triangle_rotation_subgroup(n->group, gen, n->matrices);
for(int i = 0; i < 6; i++)
mat_clear(gen[i]);
DEBUG("Compute invariants\n");
invariants_trace_loxodromic(
n->group, n->matrices,
n->distinct_invariants, &n->distinct_invariants_length, 1);
// DEBUG("Find max slopes\n");
process_output(n->group, n->matrices, n->distinct_invariants, n->distinct_invariants_length, out);
mpq_clears(s, q, NULL);
return 0;
}
int main(int argc, char *argv[])
{
char buf[1000];
char buf2[1000];
char buf3[1000];
struct global_data *g;
struct node_data n;
start_timer();
// parse command line arguments
if(argc < 11) {
fprintf(stderr, "Usage: %s <N> <p1> <p2> <p3> <s start> <s end> <s denom> <q start> <q end> <q denom> [restart]\n", argv[0]);
exit(1);
}
int nmax = atoi(argv[1]);
g = (struct global_data*)malloc(sizeof(struct global_data) + nmax*sizeof(int));
g->id_list = (int*)(g+1);
g->nmax = nmax;
g->p1 = atoi(argv[2]);
g->p2 = atoi(argv[3]);
g->p3 = atoi(argv[4]);
g->sstart = atoi(argv[5]);
g->send = atoi(argv[6]);
g->sdenom = atoi(argv[7]);
g->qstart = atoi(argv[8]);
g->qend = atoi(argv[9]);
g->qdenom = atoi(argv[10]);
// initialize
parallel_context *ctx = parallel_init();
parallel_set_datasize_and_callbacks(ctx, init_node, do_computation, destroy_node,
sizeof(struct global_data) + g->nmax*sizeof(int),
sizeof(struct node_data),
sizeof(struct input_data),
sizeof(struct output_data));
if(ctx->mpi_mode == 1 && ctx->rank != 0) {
// worker mode
parallel_work(ctx);
parallel_destroy(ctx);
exit(0);
}
init_node(g, &n);
// use very generic values for the pilot run unless sstart=send and qstart=qend
struct input_data pilot_input;
struct output_data pilot_output;
if(g->sstart == g->send && g->qstart == g->qend) {
pilot_input.snum = g->sstart;
pilot_input.sden = g->sdenom;
pilot_input.qnum = g->qstart;
pilot_input.qden = g->qdenom;
DEBUG("Single run for s = %d/%d, q = %d/%d\n", g->sstart, g->sdenom, g->qstart, g->qdenom);
} else {
pilot_input.snum = 4;
pilot_input.sden = 100;
pilot_input.qnum = 7;
pilot_input.qden = 100;
DEBUG("Initial run for s = %d/%d, q = %d/%d\n", 4, 100, 7, 100);
}
g->id_list_length = 0;
for(int i = 0; i < n.group->size; i++)
if(n.group->elements[i].length % 2 == 0 && n.group->elements[i].inverse)
g->id_list[g->id_list_length++] = i;
do_computation(g, &n, &pilot_input, &pilot_output);
for(int i = 0; i < n.distinct_invariants_length; i++)
g->id_list[i] = n.distinct_invariants[i]->id;
g->id_list_length = n.distinct_invariants_length;
if(g->sstart != g->send || g->qstart != g->qend) {
struct input_data *inputs = malloc((g->send - g->sstart + 1)*(g->qend - g->qstart + 1)*sizeof(struct input_data));
struct output_data *outputs = malloc((g->send - g->sstart + 1)*(g->qend - g->qstart + 1)*sizeof(struct output_data));
int njobs = 0;
for(int sloop = g->sstart; sloop <= g->send; sloop++) {
for(int qloop = g->qstart; qloop <= g->qend; qloop++) {
inputs[njobs].sden = g->sdenom;
inputs[njobs].qden = g->qdenom;
inputs[njobs].snum = sloop;
inputs[njobs].qnum = qloop;
njobs++;
}
}
if(argc >= 12)
parallel_run(ctx, g, inputs, outputs, njobs, argv[11]);
else
parallel_run(ctx, g, inputs, outputs, njobs, NULL);
// DEBUG("Loop for s = %d/%d, q = %d/%d\n", sloop, g->sdenom, qloop, g->qdenom);
for(int i = 0; i < njobs; i++)
{
/*
gmp_printf("%d/%d %d/%d %d %s %f\n",
inputs[i].snum, inputs[i].sden, inputs[i].qnum, inputs[i].qden,
outputs[i].max_slope_id,
print_word(&n.group->elements[outputs[i].max_slope_id], buf),
outputs[i].max_slope);
*/
printf("%d/%d %d/%d %d %d",
inputs[i].snum, inputs[i].sden, inputs[i].qnum, inputs[i].qden,
outputs[i].n_non_loxodromic, outputs[i].min_wordlength);
for(int j = 0; j < 10 && j < outputs[i].n_non_loxodromic; j++)
printf(" %s", print_word(&n.group->elements[outputs[i].elements[j]], buf));
printf("\n");
}
free(inputs);
free(outputs);
} else {
// output
for(int i = 0; i < n.distinct_invariants_length; i++) {
// exclude tr = trinv = 2/1/0/-1/3
/*
mpq_t tmp;
mpq_init(tmp);
mpq_set_si(tmp, 2, 1);
if(mpq_cmp(n.distinct_invariants[i]->tr, tmp) == 0 &&
mpq_cmp(n.distinct_invariants[i]->trinv, tmp) == 0)
continue;
mpq_set_si(tmp, 1, 1);
if(mpq_cmp(n.distinct_invariants[i]->tr, tmp) == 0 &&
mpq_cmp(n.distinct_invariants[i]->trinv, tmp) == 0)
continue;
mpq_set_si(tmp, 0, 1);
if(mpq_cmp(n.distinct_invariants[i]->tr, tmp) == 0 &&
mpq_cmp(n.distinct_invariants[i]->trinv, tmp) == 0)
continue;
mpq_set_si(tmp, -1, 1);
if(mpq_cmp(n.distinct_invariants[i]->tr, tmp) == 0 &&
mpq_cmp(n.distinct_invariants[i]->trinv, tmp) == 0)
continue;
mpq_set_si(tmp, 3, 1);
if(mpq_cmp(n.distinct_invariants[i]->tr, tmp) == 0 &&
mpq_cmp(n.distinct_invariants[i]->trinv, tmp) == 0)
continue;
mpq_clear(tmp);
*/
SNPRINT(buf, sizeof(buf), n.distinct_invariants[i]->tr);
SNPRINT(buf2, sizeof(buf2), n.distinct_invariants[i]->trinv);
printf("%d %s %d\n",
n.distinct_invariants[i]->id,
print_word(&n.group->elements[n.distinct_invariants[i]->id], buf3),
// mpq_get_d(n.distinct_invariants[i]->tr[0])+sqrt(5)*mpq_get_d(n.distinct_invariants[i]->tr[1]),
// mpq_get_d(n.distinct_invariants[i]->trinv[0])+sqrt(5)*mpq_get_d(n.distinct_invariants[i]->trinv[1]),
n.distinct_invariants[i]->disc_sign);
}
}
destroy_node(g, &n);
free(g);
parallel_destroy(ctx);
}

View File

@@ -1,14 +0,0 @@
#a6cee3
#1f78b4
#b2df8a
#33a02c
#fb9a99
#e31a1c
#fdbf6f
#ff7f00
#cab2d6
#6a3d9a
#ffff99
#b15928
#ffff00
#00ffff

View File

@@ -1,6 +0,0 @@
#!/bin/bash
convert -size 100x99 canvas:black combined.png
for a in partition_*.pnm; do
composite -compose plus combined.png $a combined.png
done

View File

@@ -1,69 +0,0 @@
import Text.Printf
import System.Environment
import System.Process
import Data.List
wordlist = ["abacabacbabcbacbcacbac", "abacbabcbacbac", "abacbabcbacbcacbac", "abacbacbacbacbacbacbac", "abacbacbacbacbac", "abacbacbac", "bacabacbabcbacbcacbcac", "bacabacbabcbacbcac", "baca", "bacbabcbacbcac", "bacbacbacbacbacbacbcac", "bacbacbacbacbcac", "bacbacbcac", "bacbac"]
colors = [(0.651,0.808,0.890), (0.122,0.471,0.706), (0.698,0.875,0.541), (0.200,0.627,0.173), (0.984,0.604,0.600), (0.890,0.102,0.110), (0.992,0.749,0.435), (1.000,0.498,0.000), (0.792,0.698,0.839), (0.416,0.239,0.604), (1.000,1.000,0.600), (0.694,0.349,0.157), (1.000,1.000,0.000), (0.000,1.000,1.000)]
main = sequence $ zipWith (generatePicture "result_20210419_221603.out") wordlist colors
generatePicture :: String -> String -> (Double,Double,Double) -> IO ()
generatePicture datafile word color = do
file1 <- readFile datafile
file2 <- readProcess "../special_element" [word] ""
let values1 = map (normalize.read.(!!4).words) (lines file1) :: [Double]
let values2 = map (normalize.read.(!!3).words) (lines file2) :: [Double]
let diff = zipWith (-) values1 values2
writeFile (printf "partition_%s.pnm" word) $ drawGrayscalePicture color diff
printf "Wrote partition_%s.pnm\n" word
-- kind of a failed experiment
-- supersample (width, height) to (factor*width - factor + 1, factor*height - factor + 1)
-- supersample :: Int -> Int -> Int -> [Double] -> [Double]
-- supersample width height factor orig = [pix x y | x <- [0..(width-1)*factor], y <- [0..(height-1)*factor]]
-- where
-- pix x y = xl*yl*a + xl*yr*b + xr*yl*c + xr*yr*d
-- where
-- x_ = x`div`factor
-- y_ = y`div`factor
-- xr = fromIntegral (x`mod`factor)/fromIntegral factor :: Double
-- xl = 1 - xr :: Double
-- yr = fromIntegral (y`mod`factor)/fromIntegral factor :: Double
-- yl = 1 - yr :: Double
-- aoffset = x_*height + y_
-- boffset = x_*height + y_+1
-- coffset = (x_+1)*height + y_
-- doffset = (x_+1)*height + y_+1
-- a = orig!!aoffset
-- b = if boffset < width*height then orig!!boffset else 0
-- c = if coffset < width*height then orig!!coffset else 0
-- d = if doffset < width*height then orig!!doffset else 0
-- xl*yl*a + xl*yr*b + xr*yl*c + xr*yr*d
drawGrayscalePicture :: (Double,Double,Double) -> [Double] -> String
drawGrayscalePicture (r,g,b) values = drawPicture 100 99 $ map color values
where
color x = (round (255*r*colorscale x), round (255*g*colorscale x), round (255*b*colorscale x))
drawPicture :: Int -> Int -> [(Int,Int,Int)] -> String
drawPicture w h values = printf "P3\n%d %d\n255\n%s" w h pixels
where
pixels = concat [printf "%d %d %d\n" r g b | (r,g,b) <- values] :: String
normalize x = if x < 1 then 1/x else x
readDataFile :: String -> IO [(String,Double)]
readDataFile filename = do
f <- readFile filename
return $ map process $ map words $ lines f
where
process (x:y:_:_:z:rest) = (x++" "++y,read z)
normalize x = if x < 1 then 1/x else x
colorscale :: Double -> Double
colorscale x = if cs x < 0 then 1 else if cs x > 1 then 0 else 1 - cs x
where
cs x = 1e5*x

File diff suppressed because it is too large Load Diff

View File

@@ -1,14 +0,0 @@
-- resize and convert image with
-- convert max_slope_billiards.pnm -scale 3000x max_slope_billiards.png
import Text.Printf
colors = [(0.651,0.808,0.890), (0.122,0.471,0.706), (0.698,0.875,0.541), (0.200,0.627,0.173), (0.984,0.604,0.600), (0.890,0.102,0.110), (0.992,0.749,0.435), (1.000,0.498,0.000), (0.792,0.698,0.839), (0.416,0.239,0.604), (1.000,1.000,0.600), (0.694,0.349,0.157), (1.000,1.000,0.000), (0.000,1.000,1.000)]
col = map (\(r,g,b) -> (round $ r*255, round $ g*255, round $ b*255)) colors :: [(Int,Int,Int)]
main = do
foo <- readFile "max_slopes_billiard"
let dat = map (read.(!!2).words) $ lines foo :: [Int]
writeFile "max_slope_billiards.pnm" $ header ++ unlines [printf "%d %d %d" r g b | p <- dat, let (r,g,b) = col !! (p `mod` 14)]
where
header = "P3\n300\n123\n255\n"

View File

@@ -1,15 +0,0 @@
if(!exists("index")) index = 50
set xrange [0:0.45]
set yrange [0:1]
set xyplane at 0
#plot "max_slopes_billiard" using 1:2:($3 - 10*floor($3/10)) w p pt 7 ps 1.1 lc palette t ""
plot "max_slopes_billiard" using 1:2:3 w p pt 7 ps 1.1 lc palette t ""
#splot "max_slopes_billiard" using 1:2:3 w p pt 7 ps 0.3 t ""
#plot "max_slopes_billiard" using 1:2:4 w p pt 7 ps 1.1 lc palette t ""
pause 10
#if(MOUSE_KEY == 60) index=index-1
#if(MOUSE_KEY == 62) index=index+1
#print MOUSE_KEY
reread

File diff suppressed because it is too large Load Diff

View File

@@ -1,132 +0,0 @@
bcabca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcbabcabcabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabcabcbabcabcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabcabcbabcabcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcbabcabcabcabcabcabcabcabcabcabcacbcabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcabcbabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabcacbcabcabcabcabcabcabcabcabcabcbabcabcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcbabcabcabcabcabcabcabcabcacbcabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcabcbabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcacbcabcabcabcabcabcabcabcbabcabcabcabcabcabcabcabaca
bcabcabcabcabcabcabcacbcabcabcabcabcabcabcabcbabcabcabcabcabcabcabcabacabcabcabcabcabcabcabaca
bcabcabcabcabcabcabaca
bcabcabcabcabcabcabacabcabcabcabcabcabcbabcabcabcabcabcabcacbcabcabcabcabcabcabaca
bcabcabcabcabcabcbabcabcabcabcabcabcacbcabcabcabcabcabcabaca
bcabcabcabcabcabcbabcabcabcabcabcabcacbcabcabcabcabcabcabacabcabcabcabcabcabcbabcabcabcabcabcabaca
bcabcabcabcabcabcbabcabcabcabcabcabaca
bcabcabcabcabcabcbabcabcabcabcabcabacabcabcabcabcabcacbcabcabcabcabcabcbabcabcabcabcabcabaca
bcabcabcabcabcacbcabcabcabcabcabcbabcabcabcabcabcabaca
bcabcabcabcabcacbcabcabcabcabcabcbabcabcabcabcabcabacabcabcabcabcabaca
bcabcabcabcabcacbcabcabcabcabcabcbabcabcabcabcabcbabcabcabcabcabcabacabcabcabcabcabaca
bcabcabcabcabaca
bcabcabcabcabacabcabcabcabcbabcabcabcabcabcbabcabcabcabcacbcabcabcabcabcacbcabcabcabcabaca
bcabcabcabcabacabcabcabcabcbabcabcabcabcabcbabcabcabcabcacbcabcabcabcabaca
bcabcabcabcabacabcabcabcabcbabcabcabcabcacbcabcabcabcabaca
bcabcabcabcabacabcabcabcabcbabcabcabcabcacbcabcabcabcabacabcabcabcabcbabcabcabcabcacbcabcabcabcabaca
bcabcabcabcbabcabcabcabcacbcabcabcabcabaca
bcabcabcabcbabcabcabcabcacbcabcabcabcabacabcabcabcabcbabcabcabcabaca
bcabcabcabcbabcabcabcabcacbcabcabcabcabacabcabcabcacbcabcabcabcabacabcabcabcabcbabcabcabcabaca
bcabcabcabcbabcabcabcabaca
bcabcabcabcbabcabcabcabacabcabcabcacbcabcabcabcabacabcabcabcacbcabcabcabcbabcabcabcabaca
bcabcabcabcbabcabcabcabacabcabcabcacbcabcabcabcbabcabcabcabaca
bcabcabcabcbabcabcabcabacabcabcabcacbcabcabcabcbabcabcabcabacabcabcabcacbcabcabcabcbabcabcabcabaca
bcabcabcacbcabcabcabcbabcabcabcabaca
bcabcabcacbcabcabcabcbabcabcabcabacabcabcabcacbcabcabcabcbabcabcabcabacabcabcabaca
bcabcabcacbcabcabcabcbabcabcabcabacabcabcabaca
bcabcabcacbcabcabcabcbabcabcabcbabcabcabcabacabcabcabaca
bcabcabcacbcabcabcacbcabcabcabcbabcabcabcbabcabcabcabacabcabcabaca
bcabcabcacbcabcabcacbcabcabcabcbabcabcabcbabcabcabcabacabcabcabacabcabcabaca
bcabcabcacbcabcabcacbcabcabcabcbabcabcabcbabcabcabcbabcabcabcabacabcabcabacabcabcabaca
bcabcabcacbcabcabcacbcabcabcacbcabcabcabcbabcabcabcbabcabcabcbabcabcabcabacabcabcabacabcabcabaca
bcabcabaca
bcabcabacabcabcabacabcabcabacabcabcbabcabcabcbabcabcabcbabcabcacbcabcabcacbcabcabcacbcabcabaca
bcabcabacabcabcabacabcabcbabcabcabcbabcabcabcbabcabcacbcabcabcacbcabcabcacbcabcabaca
bcabcabacabcabcabacabcabcbabcabcabcbabcabcabcbabcabcacbcabcabcacbcabcabaca
bcabcabacabcabcabacabcabcbabcabcabcbabcabcacbcabcabcacbcabcabaca
bcabcabacabcabcbabcabcabcbabcabcacbcabcabcacbcabcabaca
bcabcabacabcabcbabcabcabcbabcabcacbcabcabcacbcabcabacabcabcabacabcabcbabcabcabcbabcabcacbcabcabaca
bcabcabacabcabcbabcabcabcbabcabcacbcabcabaca
bcabcabacabcabcbabcabcacbcabcabcacbcabcabacabcabcbabcabcabcbabcabcacbcabcabaca
bcabcabacabcabcbabcabcacbcabcabaca
bcabcabacabcabcbabcabcacbcabcabacabcabcbabcabcabcbabcabcacbcabcabacabcabcbabcabcacbcabcabaca
bcabcabacabcabcbabcabcacbcabcabacabcabcbabcabcacbcabcabaca
bcabcabacabcabcbabcabcacbcabcabacabcabcbabcabcacbcabcabacabcabcbabcabcacbcabcabaca
bcabcbabcabcacbcabcabaca
bcabcbabcabcacbcabcabacabcabcbabcabcacbcabcabacabcabcbabcabcacbcabcabacabcabcbabcabaca
bcabcbabcabcacbcabcabacabcabcbabcabcacbcabcabacabcabcbabcabaca
bcabcbabcabcacbcabcabacabcabcbabcabcacbcabcabacabcacbcabcabacabcabcbabcabcacbcabcabacabcabcbabcabaca
bcabcbabcabcacbcabcabacabcabcbabcabaca
bcabcbabcabcacbcabcabacabcacbcabcabacabcabcbabcabcacbcabcbabcabcacbcabcabacabcabcbabcabaca
bcabcbabcabcacbcabcabacabcacbcabcabacabcabcbabcabaca
bcabcbabcabcacbcabcbabcabcacbcabcabacabcacbcabcabacabcabcbabcabaca
bcabcbabcabcacbcabcbabcabcacbcabcabacabcacbcabcabacabcabcbabcabacabcabcbabcabaca
bcabcbabcabcacbcabcbabcabcacbcabcabacabcacbcabcabacabcacbcabcabacabcabcbabcabacabcabcbabcabaca
bcabcbabcabaca
bcabcbabcabacabcabcbabcabacabcacbcabcabacabcacbcabcabacabcacbcabcbabcabcacbcabcbabcabaca
bcabcbabcabacabcabcbabcabacabcacbcabcabacabcacbcabcbabcabcacbcabcbabcabaca
bcabcbabcabacabcacbcabcabacabcacbcabcbabcabcacbcabcbabcabaca
bcabcbabcabacabcacbcabcabacabcacbcabcbabcabaca
bcabcbabcabacabcacbcabcbabcabcacbcabcbabcabacabcacbcabcabacabcacbcabcbabcabaca
bcabcbabcabacabcacbcabcbabcabaca
bcabcbabcabacabcacbcabcbabcabacabcacbcabcabacabcacbcabcbabcabacabcacbcabcbabcabaca
bcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabaca
bcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabaca
bcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabaca
bcacbcabcbabcabaca
bcacbcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabacabaca
bcacbcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabacabaca
bcacbcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabacabaca
bcacbcabcbabcabacabcacbcabcbabcabacabcacbcabcbabcbabcabacabcacbcabcbabcabacabcacbcabcbabcabacabaca
bcacbcabcbabcabacabcacbcabcbabcabacabaca
bcacbcabcbabcabacabcacbcabcbabcbabcabacabcacbcabcbabcabacabaca
bcacbcabcbabcabacabcacbcacbcabcbabcabacabcacbcabcbabcbabcabacabcacbcabcbabcabacabaca
bcacbcabcbabcabacabaca
bcacbcabcbabcabacabacabcacbcabcbabcbabcabacabcacbcabcbabcbabcabacabcacbcacbcabcbabcabacabaca
bcacbcabcbabcabacabacabcacbcabcbabcbabcabacabcacbcacbcabcbabcabacabaca
bcacbcabcbabcbabcabacabcacbcacbcabcbabcabacabaca
bcacbcabcbabcbabcabacabcacbcacbcabcbabcabacabacabcacbcabcbabcbabcabacabaca
bcacbcabcbabcbabcabacabcacbcacbcabcbabcabacabacabcacbcacbcabcbabcabacabacabcacbcabcbabcbabcabacabaca
bcacbcabcbabcbabcabacabaca
bcacbcabcbabcbabcabacabacabcacbcacbcabcbabcabacabacabcacbcacbcabcbabcbabcabacabaca
bcacbcabcbabcbabcabacabacabcacbcacbcabcbabcbabcabacabaca
bcacbcabcbabcbabcabacabacabcacbcacbcabcbabcbabcabacabacabcacbcacbcabcbabcbabcabacabaca
bcacbcacbcabcbabcbabcabacabaca
bcacbcacbcabcbabcbabcabacabacabcacbcacbcabcbabcbabcabacabacabcacbcacbcabcbabcbabcabacabacabaca
bcacbcacbcabcbabcbabcabacabacabcacbcacbcabcbabcbabcabacabacabaca
bcacbcacbcabcbabcbabcabacabacabcacbcacbcabcbabcbabcbabcabacabacabcacbcacbcabcbabcbabcabacabacabaca
bcacbcacbcabcbabcbabcabacabacabaca
bcacbcacbcabcbabcbabcbabcabacabacabcacbcacbcacbcabcbabcbabcabacabacabaca
bcacbcacbcabcbabcbabcbabcabacabacabaca
bcacbcacbcabcbabcbabcbabcabacabacabacabcacbcacbcacbcabcbabcbabcbabcabacabacabaca
bcacbcacbcacbcabcbabcbabcbabcabacabacabaca
bcacbcacbcacbcabcbabcbabcbabcabacabacabacabcacbcacbcacbcabcbabcbabcbabcabacabacabacabaca
bcacbcacbcacbcabcbabcbabcbabcabacabacabacabaca
bcacbcacbcacbcabcbabcbabcbabcbabcabacabacabacabcacbcacbcacbcacbcabcbabcbabcbabcabacabacabacabaca
bcacbcacbcacbcabcbabcbabcbabcbabcabacabacabacabaca
bcacbcacbcacbcacbcabcbabcbabcbabcbabcabacabacabacabaca
bcacbcacbcacbcacbcabcbabcbabcbabcbabcabacabacabacabacabaca
bcacbcacbcacbcacbcabcbabcbabcbabcbabcbabcabacabacabacabacabaca
bcacbcacbcacbcacbcacbcabcbabcbabcbabcbabcbabcabacabacabacabacabaca
bcacbcacbcacbcacbcacbcabcbabcbabcbabcbabcbabcabacabacabacabacabacabaca
bcacbcacbcacbcacbcacbcabcbabcbabcbabcbabcbabcbabcabacabacabacabacabacabaca
bcacbcacbcacbcacbcacbcacbcabcbabcbabcbabcbabcbabcbabcabacabacabacabacabacabaca
bcacbcacbcacbcacbcacbcacbcabcbabcbabcbabcbabcbabcbabcabacabacabacabacabacabacabaca
bcacbcacbcacbcacbcacbcacbcabcbabcbabcbabcbabcbabcbabcbabcabacabacabacabacabacabacabaca
bcacbcacbcacbcacbcacbcacbcacbcabcbabcbabcbabcbabcbabcbabcbabcabacabacabacabacabacabacabaca
bcacbcacbcacbcacbcacbcacbcacbcabcbabcbabcbabcbabcbabcbabcbabcabacabacabacabacabacabacabacabaca
bcacbcacbcacbcacbcacbcacbcacbcabcbabcbabcbabcbabcbabcbabcbabcbabcabacabacabacabacabacabacabacabaca
baca

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

View File

@@ -1,135 +0,0 @@
euler.ma.utexas.edu
fac1.ma.utexas.edu
fac4.ma.utexas.edu
fac8.ma.utexas.edu
fac9.ma.utexas.edu
frog.ma.utexas.edu
gummo.ma.utexas.edu
iguana.ma.utexas.edu
lab10.ma.utexas.edu
lab11.ma.utexas.edu
lab12.ma.utexas.edu
lab13.ma.utexas.edu
lab14.ma.utexas.edu
lab15.ma.utexas.edu
lab16.ma.utexas.edu
lab17.ma.utexas.edu
lab18.ma.utexas.edu
lab19.ma.utexas.edu
lab1.ma.utexas.edu
lab20.ma.utexas.edu
lab21.ma.utexas.edu
lab22.ma.utexas.edu
lab23.ma.utexas.edu
lab24.ma.utexas.edu
lab25.ma.utexas.edu
lab26.ma.utexas.edu
lab27.ma.utexas.edu
lab28.ma.utexas.edu
lab29.ma.utexas.edu
lab2.ma.utexas.edu
lab30.ma.utexas.edu
lab31.ma.utexas.edu
lab32.ma.utexas.edu
lab33.ma.utexas.edu
lab34.ma.utexas.edu
lab35.ma.utexas.edu
lab36.ma.utexas.edu
lab37.ma.utexas.edu
lab38.ma.utexas.edu
lab39.ma.utexas.edu
lab3.ma.utexas.edu
lab40.ma.utexas.edu
lab41.ma.utexas.edu
lab42.ma.utexas.edu
lab43.ma.utexas.edu
lab44.ma.utexas.edu
lab45.ma.utexas.edu
lab46.ma.utexas.edu
lab47.ma.utexas.edu
lab48.ma.utexas.edu
lab49.ma.utexas.edu
lab4.ma.utexas.edu
lab50.ma.utexas.edu
lab51.ma.utexas.edu
lab52.ma.utexas.edu
lab53.ma.utexas.edu
lab54.ma.utexas.edu
lab55.ma.utexas.edu
lab56.ma.utexas.edu
lab57.ma.utexas.edu
lab58.ma.utexas.edu
lab59.ma.utexas.edu
lab5.ma.utexas.edu
lab60.ma.utexas.edu
lab61.ma.utexas.edu
lab62.ma.utexas.edu
lab63.ma.utexas.edu
lab64.ma.utexas.edu
lab65.ma.utexas.edu
lab66.ma.utexas.edu
lab67.ma.utexas.edu
lab68.ma.utexas.edu
lab69.ma.utexas.edu
lab6.ma.utexas.edu
lab70.ma.utexas.edu
lab7.ma.utexas.edu
lab8.ma.utexas.edu
lab9.ma.utexas.edu
linux100.ma.utexas.edu
linux104.ma.utexas.edu
linux110.ma.utexas.edu
linux115.ma.utexas.edu
linux119.ma.utexas.edu
linux122.ma.utexas.edu
linux149.ma.utexas.edu
linux14.ma.utexas.edu
linux15.ma.utexas.edu
linux164.ma.utexas.edu
linux169.ma.utexas.edu
linux16.ma.utexas.edu
linux17.ma.utexas.edu
linux180.ma.utexas.edu
linux181.ma.utexas.edu
linux184.ma.utexas.edu
linux18.ma.utexas.edu
linux20.ma.utexas.edu
linux21.ma.utexas.edu
linux24.ma.utexas.edu
linux27.ma.utexas.edu
linux28.ma.utexas.edu
linux29.ma.utexas.edu
linux2.ma.utexas.edu
linux30.ma.utexas.edu
linux31.ma.utexas.edu
linux32.ma.utexas.edu
linux38.ma.utexas.edu
linux40.ma.utexas.edu
linux41.ma.utexas.edu
linux46.ma.utexas.edu
linux4.ma.utexas.edu
linux50.ma.utexas.edu
linux52.ma.utexas.edu
linux54.ma.utexas.edu
linux57.ma.utexas.edu
linux62.ma.utexas.edu
linux64.ma.utexas.edu
linux66.ma.utexas.edu
linux68.ma.utexas.edu
linux69.ma.utexas.edu
linux70.ma.utexas.edu
linux71.ma.utexas.edu
linux72.ma.utexas.edu
linux74.ma.utexas.edu
linux76.ma.utexas.edu
linux79.ma.utexas.edu
linux80.ma.utexas.edu
linux82.ma.utexas.edu
linux83.ma.utexas.edu
linux86.ma.utexas.edu
linux91.ma.utexas.edu
linux92.ma.utexas.edu
linux96.ma.utexas.edu
linux9.ma.utexas.edu

6
parallelization/check_hostfile Executable file
View File

@@ -0,0 +1,6 @@
#!/bin/bash
for a in $(cat $1 | egrep -o '^[^ ]*'); do
echo -n "$a "
ssh -o ConnectTimeout=5 $a "cat /proc/cpuinfo | egrep '^processor' | wc -l"
done

View File

@@ -0,0 +1,19 @@
#!/usr/bin/python
wordlength = 16
n = 101038
res = 200
radius = 1
q = [1,1,1]
denom = round(res/radius)
cmd = "IDLIST=idlist_{len} ../complex_anosov summary {n} {q1} {q2} {q3} {rnum}/{rden} {inum}/{iden}"
for i in range(-res,res+1):
for j in range(0,res+1):
if i == 0 and j == 0:
continue
print(cmd.format(
len=wordlength, n=n, q1=q[0], q2=q[1], q3=q[2],
rnum=i, inum=j, rden=denom, iden=denom))

View File

@@ -1,5 +0,0 @@
linux50 slots=4
linux52 slots=4
linux57 slots=4
linux110 slots=4
linux115 slots=4

View File

@@ -1,48 +0,0 @@
linux100 slots=4
linux104 slots=4
linux110 slots=4
linux122 slots=4
linux149 slots=4
linux14 slots=4
linux15 slots=2
linux16 slots=4
linux17 slots=2
linux180 slots=4
linux181 slots=2
linux184 slots=4
linux18 slots=4
linux20 slots=4
linux21 slots=4
linux24 slots=4
linux27 slots=4
linux29 slots=4
linux2 slots=4
linux30 slots=4
linux31 slots=4
linux32 slots=4
linux38 slots=2
linux40 slots=4
linux41 slots=4
linux46 slots=4
linux4 slots=4
linux50 slots=4
linux52 slots=4
linux54 slots=4
linux57 slots=4
linux62 slots=2
linux64 slots=4
linux68 slots=4
linux69 slots=4
linux70 slots=4
linux71 slots=4
linux72 slots=4
linux74 slots=4
linux76 slots=4
linux79 slots=4
linux80 slots=4
linux83 slots=4
linux86 slots=4
linux91 slots=4
linux92 slots=4
linux96 slots=4
linux9 slots=2

View File

@@ -0,0 +1,19 @@
instructions to run in parallel on a cluster:
the hostfile should look like this:
host1 slots=2
host2 slots=4
host3 slots=4
host4 slots=8
...
- git clone
- fix dependencies
- compile
- ./generate_commands.py > commands
- get hostfile
- check hostfile, delete entries which don't work
- get idlist
- ./sync
- delete old result and done files
- execute ./run on server

View File

@@ -1,51 +0,0 @@
linux100
linux104
linux110
linux115
linux122
linux149
linux14
linux15
linux164
linux169
linux16
linux17
linux180
linux181
linux184
linux18
linux20
linux21
linux24
linux27
linux29
linux2
linux30
linux31
linux32
linux38
linux40
linux41
linux46
linux4
linux50
linux52
linux54
linux57
linux62
linux64
linux68
linux69
linux70
linux71
linux72
linux74
linux76
linux79
linux80
linux83
linux86
linux91
linux92
linux96
linux9

View File

@@ -1,409 +0,0 @@
#include "parallel.h"
#include <mpi.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <unistd.h>
#include <malloc.h>
#include <stdlib.h>
//#define DEBUG INFO
#define DEBUG(msg, ...)
#define INFO(msg, ...) fprintf(stderr, "[%003d%10.3f] " msg, mpi_rank(0), runtime(), ##__VA_ARGS__)
//#define DEBUG(msg, ...) fprintf(stderr, "[ %10.3f] " msg, runtime(), ##__VA_ARGS__)
//#define DEBUG_MPI(msg, node, ...) fprintf(stderr, "[%003d%10.3f] " msg, node, runtime(), ##__VA_ARGS__)
#define DONE(x) *((int*)(x))
enum message_tag {
PARALLEL_ORDER = 0,
PARALLEL_RESULT,
PARALLEL_SHUTDOWN,
PARALLEL_GLOBAL_DATA
};
struct timespec starttime;
int mpi_rank(int activate_mpi)
{
static int active = 0;
if(activate_mpi)
active = 1;
if(!active)
return 0;
else {
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
return rank;
}
}
void start_timer()
{
clock_gettime(CLOCK_MONOTONIC, &starttime);
}
double runtime()
{
struct timespec curtime;
double diff;
clock_gettime(CLOCK_MONOTONIC, &curtime);
return (curtime.tv_sec - starttime.tv_sec) + (curtime.tv_nsec - starttime.tv_nsec) / 1e9;
}
parallel_context *parallel_init()
{
parallel_context *ctx = malloc(sizeof(parallel_context));
if(!getenv("OMPI_COMM_WORLD_SIZE")) {
ctx->mpi_mode = 0;
DEBUG("Running standalone.\n");
return ctx;
}
ctx->mpi_mode = 1;
int result = MPI_Init(NULL, NULL);
MPI_Comm_size(MPI_COMM_WORLD, &ctx->size);
MPI_Comm_rank(MPI_COMM_WORLD, &ctx->rank);
MPI_Get_processor_name(ctx->processor_name, &ctx->processor_name_len);
mpi_rank(1); // display the rank in debug output from now on
if(ctx->rank == 0)
DEBUG("Running in mpi mode, %d nodes.\n", ctx->size);
return ctx;
}
void parallel_destroy(parallel_context* ctx)
{
if(ctx->mpi_mode) {
MPI_Type_free(&ctx->order_datatype);
MPI_Type_free(&ctx->result_datatype);
MPI_Finalize();
}
free(ctx);
}
void parallel_set_datasize_and_callbacks(parallel_context *ctx, parallel_callback_init init, parallel_callback_job job, parallel_callback_destroy destroy, int global_data_size, int node_data_size, int input_size, int output_size)
{
ctx->init = init;
ctx->destroy = destroy;
ctx->job = job;
ctx->global_data_size = global_data_size;
ctx->node_data_size = node_data_size;
ctx->input_size = input_size;
ctx->output_size = output_size;
if(ctx->mpi_mode) {
// create a datatype for job orders, consisting of an integer (the job id) and a user-defined section
int order_blocklengths[2] = {1, input_size};
MPI_Aint order_displacements[2] = {0, sizeof(int)};
MPI_Datatype order_types[2] = {MPI_INT, MPI_BYTE};
MPI_Type_create_struct(2, order_blocklengths, order_displacements, order_types, &ctx->order_datatype);
MPI_Type_commit(&ctx->order_datatype);
int result_blocklengths[2] = {1, output_size};
MPI_Aint result_displacements[2] = {0, sizeof(int)};
MPI_Datatype result_types[2] = {MPI_INT, MPI_BYTE};
MPI_Type_create_struct(2, result_blocklengths, result_displacements, result_types, &ctx->result_datatype);
MPI_Type_commit(&ctx->result_datatype);
}
}
int parallel_job(parallel_context *ctx, const void *global_data, void *node_data, int block)
{
MPI_Status status;
double jobtime;
void *input_and_job_nr = malloc(ctx->input_size + sizeof(int));
void *output_and_job_nr = malloc(ctx->output_size + sizeof(int));
void *input = input_and_job_nr + sizeof(int);
int *job_nr = (int *)input_and_job_nr;
void *output = output_and_job_nr + sizeof(int);
int *output_job_nr = (int *)output_and_job_nr;
int result = 0;
int message_present;
if(block) {
jobtime = -MPI_Wtime();
MPI_Probe(0, MPI_ANY_TAG, MPI_COMM_WORLD,
&status);
jobtime += MPI_Wtime();
INFO("TIMING: Probe() took %f seconds\n", jobtime);
message_present = 1;
} else {
MPI_Iprobe(0, MPI_ANY_TAG, MPI_COMM_WORLD,
&message_present, &status);
}
// DEBUG("Message received: source = %d, tag = %d\n", status.MPI_SOURCE, status.MPI_TAG);
if(message_present) {
if(status.MPI_TAG == PARALLEL_SHUTDOWN) {
DEBUG("Shutting down\n");
result = 1;
} else if(status.MPI_TAG == PARALLEL_ORDER) {
MPI_Recv(input_and_job_nr,
1, ctx->order_datatype,
0, PARALLEL_ORDER, MPI_COMM_WORLD,
&status);
DEBUG("Working on job %d\n", *job_nr);
jobtime = -MPI_Wtime();
// do the actual work
ctx->job(global_data, node_data, input, output);
jobtime += MPI_Wtime();
INFO("TIMING: job %d took %f seconds\n", *job_nr, jobtime);
*output_job_nr = *job_nr;
jobtime = -MPI_Wtime();
MPI_Send(output_and_job_nr,
1, ctx->result_datatype,
0, PARALLEL_RESULT, MPI_COMM_WORLD);
jobtime += MPI_Wtime();
INFO("TIMING: Send() took %f seconds\n", jobtime);
}
} else {
result = 2;
}
free(input_and_job_nr);
free(output_and_job_nr);
return result;
}
int parallel_work(parallel_context *ctx)
{
// do nothing in non-mpi mode
if(ctx->mpi_mode == 0)
return 0;
void *global_data = malloc(ctx->global_data_size);
void *node_data = malloc(ctx->node_data_size);
// wait for global data
MPI_Bcast(global_data, ctx->global_data_size, MPI_BYTE, 0, MPI_COMM_WORLD);
DEBUG("Global data received\n");
// initialize node_data (and do once-per-node computation)
ctx->init(global_data, node_data);
DEBUG("Initialization completed\n");
int shutdown = 0;
while(shutdown != 1) {
shutdown = parallel_job(ctx, global_data, node_data, 1);
}
ctx->destroy(global_data, node_data);
free(global_data);
free(node_data);
return 0;
}
int parallel_run(parallel_context *ctx, const void *global_data, const void *input_array, void *output_array, unsigned int njobs, const char *_restart_filename)
{
// in non-mpi-mode, just run init, forall(jobs) job
if(ctx->mpi_mode == 0) {
void *node_data = malloc(ctx->node_data_size);
int result = ctx->init(global_data, node_data);
if(result != 0)
goto cleanup_standalone;
for(int i = 0; i < njobs; i++) {
result = ctx->job(
global_data,
node_data,
input_array + ctx->input_size*i,
output_array + ctx->output_size*i);
if(result != 0)
goto cleanup_standalone;
}
cleanup_standalone:
ctx->destroy(global_data, node_data);
return result;
} else {
// if no restart file was specified, pick a filename
char *restart_filename;
char buffer[128];
int restartf;
if(_restart_filename == NULL) {
time_t t = time(NULL);
struct tm *loctm = localtime(&t);
strftime(buffer, sizeof(buffer), "restart/restart_%y%m%d_%H%M%S", loctm);
restart_filename = buffer;
} else {
restart_filename = (char *)_restart_filename;
}
// open restart file if it exists, otherwise create it
int continuing = 1;
restartf = open(restart_filename, O_RDWR);
if(restartf == -1 && errno == ENOENT) {
restartf = open(restart_filename, O_RDWR | O_CREAT, 0666);
continuing = 0;
}
if(restartf == -1) {
DEBUG("Error opening restart file: %s\n", strerror(errno));
exit(1);
}
// map restart file
int itemsize = (ctx->output_size + sizeof(int)); // for every job, store output, and completed flag
ftruncate(restartf, njobs*itemsize);
void *alljobs = mmap(0, njobs*itemsize, PROT_READ | PROT_WRITE, MAP_SHARED, restartf, 0);
if(alljobs == MAP_FAILED) {
DEBUG("Error mapping restart file: %s\n", strerror(errno));
exit(1);
}
// count completed jobs, or initialize jobs
int completed = 0;
if(continuing) {
for(int i = 0; i < njobs; i++)
if(DONE(alljobs + i*itemsize))
completed++;
} else {
for(int i = 0; i < njobs; i++) {
DONE(alljobs + i*itemsize) = 0;
memcpy(alljobs + i*itemsize + sizeof(int), input_array + i*ctx->input_size, ctx->input_size); // copy input data
}
}
fsync(restartf);
if(continuing) {
INFO("Continuing from restart file, %d/%d jobs completed, %d nodes\n", completed, njobs, ctx->size);
} else {
INFO("Starting from scratch, %d jobs, %d nodes\n", njobs, ctx->size);
}
if(completed >= njobs)
goto cleanup_mpi;
/* Send global data */
MPI_Bcast((void*)global_data, ctx->global_data_size, MPI_BYTE, 0, MPI_COMM_WORLD);
DEBUG("Global data sent\n");
// we want to be able to run jobs ourselves, so initialize node_data
void *node_data = malloc(ctx->node_data_size);
ctx->init(global_data, node_data);
void *input_message_buffer = malloc(ctx->input_size + sizeof(int));
void *output_message_buffer = malloc(ctx->output_size + sizeof(int));
int *active_jobs = malloc(sizeof(int)*ctx->size);
memset(active_jobs, 0, ctx->size*sizeof(int));
int active_worker_nodes = ctx->size - 1; // we don't count ourselves, since we can't shut ourselves down
// find next unfinished job
int current = 0;
while(current < njobs && DONE(alljobs + current*itemsize))
current++;
// assign initial jobs, 2 for each worker thread
for(int i = 0; i < 2*ctx->size; i++) {
if(current >= njobs) // all jobs are assigned
break;
// send job id and input data
// send to all nodes except ourself (node 0)
*((int*)input_message_buffer) = current;
memcpy(input_message_buffer + sizeof(int), input_array + current*ctx->input_size, ctx->input_size);
MPI_Send(input_message_buffer, 1, ctx->order_datatype,
i%ctx->size, PARALLEL_ORDER, MPI_COMM_WORLD);
DEBUG("Job %d sent to node %d\n", current, i%ctx->size);
active_jobs[i%ctx->size]++;
current++;
}
MPI_Status status;
int message_present;
while(active_jobs[0] != 0 || active_worker_nodes != 0) {
MPI_Iprobe(MPI_ANY_SOURCE, PARALLEL_RESULT, MPI_COMM_WORLD, &message_present, &status);
DEBUG("Message present, tag = %d, source = %d\n", status.MPI_TAG, status.MPI_SOURCE);
if(!message_present) {
// if there are no more messages to process, we can run a job ourselves before returning to managing
DEBUG("Start running job myself\n");
int result = parallel_job(ctx, global_data, node_data, 0);
DEBUG("Finished running job myself, result = %d\n");
} else if(status.MPI_TAG == PARALLEL_RESULT) {
MPI_Recv(output_message_buffer, 1, ctx->result_datatype,
MPI_ANY_SOURCE, PARALLEL_RESULT, MPI_COMM_WORLD, &status);
DEBUG("Got message tag %d from node %d\n", status.MPI_TAG, status.MPI_SOURCE);
int node = status.MPI_SOURCE;
int id = *((int*)output_message_buffer);
memcpy(alljobs + id*itemsize + sizeof(int), output_message_buffer + sizeof(int), ctx->output_size);
DONE(alljobs + id*itemsize) = 1;
active_jobs[node]--;
// todo: deal with unresponsive nodes
// strategy: when no jobs left, go through unfinished list again, incrementing oversubscribe counter
// if oversubscribe counter is at limit, shut node down instead
//
if(current >= njobs) { // all jobs are assigned, try to shut down node
// don't try to shut down ourselves, and only if it has no other jobs to do
if(node != 0 && active_jobs[node] == 0) {
MPI_Send(NULL, 0, MPI_BYTE, node, PARALLEL_SHUTDOWN, MPI_COMM_WORLD);
active_worker_nodes--;
INFO("job %d completed by node %d, shut down, %d workers remaining\n", id, node, active_worker_nodes);
}
} else {
*((int*)input_message_buffer) = current;
memcpy(input_message_buffer + sizeof(int), input_array + current*ctx->input_size, ctx->input_size);
MPI_Send(input_message_buffer, 1, ctx->order_datatype,
node, PARALLEL_ORDER, MPI_COMM_WORLD);
active_jobs[node]++;
current++;
if(active_jobs[node] < 3) {
*((int*)input_message_buffer) = current;
memcpy(input_message_buffer + sizeof(int), input_array + current*ctx->input_size, ctx->input_size);
MPI_Send(input_message_buffer, 1, ctx->order_datatype,
node, PARALLEL_ORDER, MPI_COMM_WORLD);
active_jobs[node]++;
current++;
INFO("job %d completed by node %d, continues with %d and %d\n", id, node, current-1, current-2);
} else {
INFO("job %d completed by node %d, continues with %d\n", id, node, current-1);
}
}
}
}
for(int i = 0; i < njobs; i++) {
memcpy(output_array + i*ctx->output_size, alljobs + i*itemsize + sizeof(int), ctx->output_size);
}
free(input_message_buffer);
free(output_message_buffer);
free(node_data);
free(active_jobs);
cleanup_mpi:
munmap(alljobs, njobs*itemsize);
close(restartf);
}
return 0;
}

View File

@@ -1,118 +0,0 @@
#ifndef PARALLEL_H
#define PARALLEL_H
/*
this is a library to parallelize workloads which can be split up naturally
into a sequence of independent jobs, using MPI. A program will usually
- do precomputation
- fill array with input data
- do the parallel work
- print the output data
we want to enable restarts, so that only unfinshed jobs need to be repeated.
Further, we want to be resilient to slow/unreliable network and to losing
nodes. There is a main node and a number of workers. The main node does the
precomputation and then retires do do administrative work, and the workers
do the actual jobs. We also want to switch to serial mode if the program is
called without MPI.
The following data has to be transimitted between nodes:
- results of the precomputation (read-only, shared between nodes)
- job-specific input data, generated by main node before parallel part
- output data for each job
the parallel work shall be given as a callback function which takes
input data and precomputation data as parameter
the above program will look like this for us:
- parallel_init
- if we are a worker, do parallel_work(init_callback, job_callback), exit
- do precomputation
- fill array with input data
- output_array = parallel_run(input_array)
- print the output data
parallel_init:
- check if we're running as an mpi program
- init mpi, check what kind of node we are
parallel_work(init_callback1, init_callback2, job_callback):
- receive global_precomp (???)
- worker_precomp = init_callback2(global_precomp, worker_precomp)
- infinite loop:
- wait for job on network, receive input
- output = job_callback(global_precomp, worker_precomp, input)
- send output on network
- exit loop on shutdown signal
parallel_run(global_precomp, input_array, restart file, callbacks):
- check if we're running as an MPI program
- send global_precomp to all nodes (if MPI)
- if(restart file given and exists) read restart file
- else create new restart file
- until(all jobs finished):
- if MPI:
- send next job & input to appropriate node
- if all jobs are in work, reassign unfinished ones (up to limit)
- collect outputs
- if no MPI:
- worker_precomp = init_callback1
- worker_precomp = init_callback2(global_precomp, worker_precomp)
- for(j in jobs)
- output(j) = job_callback(global_precomp, worker_precomp, input(j))
- delete restart file
- return array of outputs
parallel_destroy():
- free everything
have a context? probably yes: parallel_context
plan:
- make interface
- implement no-MPI part
- restructure singular_values.c to use interface
- implement MPI part
*/
#include <mpi.h>
#include <time.h>
typedef void (*parallel_callback_destroy)(const void*, void*);
typedef int (*parallel_callback_init)(const void*,void*);
typedef int (*parallel_callback_job)(const void*,void*,const void*,void*);
typedef struct {
int mpi_mode;
struct timespec starttime;
char processor_name[MPI_MAX_PROCESSOR_NAME];
int processor_name_len;
int rank;
int size;
MPI_Datatype order_datatype;
MPI_Datatype result_datatype;
parallel_callback_init init;
parallel_callback_job job;
parallel_callback_destroy destroy;
void *global_data;
void *node_data;
int global_data_size;
int node_data_size;
int input_size;
int output_size;
} parallel_context;
parallel_context *parallel_init();
void parallel_set_datasize_and_callbacks(parallel_context *ctx, parallel_callback_init init, parallel_callback_job job, parallel_callback_destroy destroy, int global_data_size, int node_data_size, int input_size, int output_size);
int parallel_work(parallel_context *ctx);
int parallel_run(parallel_context *ctx, const void *global_data, const void *input_array, void *output_array, unsigned int njobs, const char *restart_filename);
void parallel_destroy(parallel_context* ctx);
int mpi_rank();
void start_timer();
double runtime();
#endif

7
parallelization/run Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/bash
cd /home/stecker/compute/triangle_reflection_complex/parallelization
unset DISPLAY
time mpirun -n 50 -x LD_LIBRARY_PATH=/home/stecker/compute/mps/lib --hostfile hostfile_big python3 runjobs.py

View File

@@ -1,9 +0,0 @@
#!/bin/bash
# nmax=895882 # up to reflection group word length 22 ( 555 group)
nmax=700000 # up to reflection group word length 22 ( 444 group)
# nmax=11575 # up to reflection group word length 14
#time mpirun --mca opal_warn_on_missing_libcuda 0 -x LD_LIBRARY_PATH=/home/stecker/svmpi/libs ./singular_values $nmax ejp_trg_restart test.out
time mpirun --mca opal_warn_on_missing_libcuda 0 --mca mpi_yield_when_idle 1 -np 4 ./singular_values 700000 4 4 4 1 1 100 1 100 100 $1

View File

@@ -1,14 +0,0 @@
#!/bin/bash
cd /home/stecker/svmpi/
nmax=895882 # up to reflection group word length 22
# nmax=11575 # up to reflection group word length 14
outfile=result_$(date +%Y%m%d_%H%M%S).out
unset DISPLAY
make singular_values &&
time mpirun -n 100 -x LD_LIBRARY_PATH=/home/stecker/svmpi/libs --hostfile hostfile_big ./singular_values $nmax utexas_cluster_restart $outfile

57
parallelization/runjobs.py Executable file
View File

@@ -0,0 +1,57 @@
#!/usr/bin/python
from mpi4py import MPI
import os
import re
import math
import subprocess
import time
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nodes = comm.Get_size()
# print(os.path.abspath(os.curdir))
done = set()
for f in os.listdir('.'):
if re.search('^done_[0-9]+', f):
fp = open(f, "r")
for x in fp:
done.add(int(x))
fp.close()
f = open("commands", "r")
idx = 0
todo = []
for c in f:
if not idx in done:
todo.append((idx,c))
idx = idx+1
f.close()
start = math.floor(len(todo)/nodes*rank)
end = math.floor(len(todo)/nodes*(rank+1))
if(rank == nodes-1):
end = len(todo)
print("{n:d} commands awaiting execution, {nnode:d} of them in node {rank:d}".format(n=len(todo),nnode=end-start,rank=rank))
time.sleep(1) # to make sure all nodes read the status first before more gets done
outfilename = "result_{node:003d}".format(node=rank)
donefilename = "done_{node:003d}".format(node=rank)
outfile = open(outfilename, "a")
donefile = open(donefilename, "a")
for i in range(start, end):
result = subprocess.call(todo[i][1], stdout=outfile, shell=True)
if result == 0:
donefile.write(str(todo[i][0]) + '\n')
else:
print("Command failed: {cmd}".format(cmd=todo[i][1]))
outfile.flush()
donefile.flush()
outfile.close()
donefile.close()

View File

@@ -1,20 +0,0 @@
#!/bin/bash
#SBATCH -J ejp_trg
#SBATCH -o logs/ejp_trg.o%j
#SBATCH -e logs/ejp_trg.e%j
#SBATCH -p skx-dev
#SBATCH -N 1
#SBATCH -n 48
#SBATCH -t 00:05:00
#SBATCH --mail-user=mail@florianstecker.net
#SBATCH --mail-type=all
export LD_LIBRARY_PATH=$WORK/mps/lib:$LD_LIBRARY_PATH
d=$(date +%Y%m%d_%H%M%S)
nmax=895882 # up to reflection group word length 22
# nmax=11575 # up to reflection group word length 1
ibrun ./singular_values $nmax $SCRATCH/ejp_trg_restart $WORK/ejp_trg/output/result_$d

7
parallelization/sync Executable file
View File

@@ -0,0 +1,7 @@
#!/bin/bash
rsync -rvt * utexas:compute/triangle_reflection_complex/parallelization/
#rsync -lvt /usr/lib/libmps.so* utexas:compute/mps/lib/
#rsync -rvt /usr/include/mps utexas:compute/mps/include/
# now run it with ssh utexas compute/triangle_reflection_complex/parallelization/run

View File

@@ -1,8 +0,0 @@
#!/bin/bash
rsync -vt *.c *.h Makefile stampede.slurm stampede:work/ejp_trg/
#rsync -lvt /usr/lib/libmps.so* /usr/include/mps utexas:work/ejp_trg/libs/
# now run it with a job script
# get MPSolve from https://numpi.dm.unipi.it/_media/software/mpsolve/mpsolve-3.2.1.tar.bz2

View File

@@ -1,7 +0,0 @@
#!/bin/bash
rsync -vt *.c *.h Makefile hostfile hostfile_big allhosts localnames run_utexas run_local utexas:svmpi/
rsync -lvt /usr/lib/libmps.so* utexas:svmpi/libs/
rsync -rvt /usr/include/mps utexas:svmpi/libs/
# now run it with ssh utexas -t ssh linux50 svmpi/run_utexas

10
qext.c
View File

@@ -37,16 +37,6 @@ static void qext_init_type(struct qext_type *type)
}
}
struct qext_type *qext_newtype(int rank, const int *coeffs)
{
struct qext_type *type = malloc(sizeof(struct qext_type));
type->rank = rank;
type->integer_coeffs = coeffs;
qext_init_type(type);
return type;
}
void qext_init(qext_number x, struct qext_type *type)
{
if(type->coeffs == NULL) // uninitialized default type

1
qext.h
View File

@@ -20,7 +20,6 @@ extern struct qext_type *QT_TRIVIAL;
extern struct qext_type *QT_SQRT5;
extern struct qext_type *QT_GAUSS_SQRT5;
struct qext_type *qext_newtype(int rank, const int *coeffs);
void qext_init(qext_number x, struct qext_type *type);
void qext_clear(qext_number x);
void qext_set(qext_number x, qext_number y);