enumerate-balanced-ideals/thickenings.c

687 lines
20 KiB
C
Raw Normal View History

2016-06-09 19:11:20 +00:00
#define _GNU_SOURCE
#include <stdio.h>
#include <limits.h>
#include <stdlib.h>
#include <malloc.h>
#include <memory.h>
2016-07-26 08:09:34 +00:00
#include "thickenings.h"
2016-06-09 19:11:20 +00:00
#include "coxeter.h"
#include "queue.h"
2016-07-26 08:09:34 +00:00
char *alphabetize(int *word, int len, const char *alphabet, char *buffer)
2016-06-09 19:11:20 +00:00
{
2016-10-29 12:47:55 +00:00
if(len == 0) {
buffer[0] = '1';
buffer[1] = 0;
return buffer;
}
2016-06-09 19:11:20 +00:00
int i = 0;
for(i = 0; i < len; i++)
buffer[i] = alphabet[word[i]];
buffer[i] = 0;
return buffer;
}
2016-10-14 16:49:20 +00:00
void print_thickening(int rank, int order, const signed char *thickening, int upto_level, const char *alphabet, FILE *f)
2016-08-26 12:56:23 +00:00
{
for(int i = 0; i < order; i++) {
if(thickening[i] == HEAD_MARKER)
2016-10-14 16:49:20 +00:00
fprintf(f, "\e[41;37mx\e[0m");
else if(thickening[i] < - upto_level || thickening[i] > upto_level)
fprintf(f, " ");
2016-08-26 12:56:23 +00:00
else if(thickening[i] < 0 && thickening[i] > -10)
2016-10-14 16:49:20 +00:00
fprintf(f, "\e[47;30m%d\e[0m", -thickening[i]);
2016-08-26 12:56:23 +00:00
else if(thickening[i] <= -10)
2016-10-14 16:49:20 +00:00
fprintf(f, "\e[47;30m+\e[0m");
2016-08-26 12:56:23 +00:00
else if(thickening[i] > 0 && thickening[i] < 10)
2016-10-14 16:49:20 +00:00
fprintf(f, "\e[40;37m%d\e[0m", thickening[i]);
2016-08-26 12:56:23 +00:00
else if(thickening[i] >= 10)
2016-10-14 16:49:20 +00:00
fprintf(f, "\e[40;37m+\e[0m");
2016-08-26 12:56:23 +00:00
else
fprintf(f, " ");
}
2016-10-14 16:49:20 +00:00
fprintf(f, "\e[K");
2016-06-20 08:37:21 +00:00
}
2016-06-09 19:11:20 +00:00
static int compare_wordlength(const void *a, const void *b, void *gr)
{
int i = *((int*)a);
int j = *((int*)b);
node_t *graph = (node_t*)gr;
return graph[i].wordlength - graph[j].wordlength;
}
2016-11-11 16:07:45 +00:00
void prepare_graph(semisimple_type_t type, node_t *graph)
2016-06-09 19:11:20 +00:00
{
queue_t queue;
2016-11-11 16:07:45 +00:00
edgelist_t *edgelists_lower, *edgelists_higher;
int rank, order, hyperplanes;
2016-07-26 08:09:34 +00:00
edgelist_t *edge, *previous;
2016-11-11 16:07:45 +00:00
int edgelist_count, hyperplane_count;
2016-07-26 08:09:34 +00:00
int current;
2016-06-09 19:11:20 +00:00
2016-07-26 08:09:34 +00:00
int *graph_data;
node_t *graph_unsorted;
2016-11-11 16:07:45 +00:00
int *wordlength_order, *reverse_wordlength_order, *seen;
2016-06-09 19:11:20 +00:00
2016-07-26 08:09:34 +00:00
// initialize
2016-06-09 19:11:20 +00:00
rank = coxeter_rank(type);
order = coxeter_order(type);
2016-11-11 16:07:45 +00:00
hyperplanes = coxeter_hyperplanes(type);
edgelists_higher = graph[0].bruhat_higher;
edgelists_lower = &graph[0].bruhat_higher[order*hyperplanes/2];
2016-06-09 19:11:20 +00:00
graph_data = (int*)malloc(order*rank*sizeof(int));
2016-07-26 08:09:34 +00:00
graph_unsorted = (node_t*)malloc(order*sizeof(node_t));
2016-06-09 19:11:20 +00:00
wordlength_order = (int*)malloc(order*sizeof(int));
reverse_wordlength_order = (int*)malloc(order*sizeof(int));
seen = (int*)malloc(order*sizeof(int));
for(int i = 0; i < order; i++) {
graph_unsorted[i].wordlength = INT_MAX;
2016-11-11 16:07:45 +00:00
graph[i].bruhat_lower = 0;
graph[i].bruhat_higher = 0;
graph[i].is_hyperplane_reflection = 0;
2016-06-09 19:11:20 +00:00
}
2016-07-26 08:09:34 +00:00
// get coxeter graph
generate_coxeter_graph(type, graph_data);
for(int i = 0; i < order; i++)
for(int j = 0; j < rank; j++)
2016-11-11 16:07:45 +00:00
graph_unsorted[i].left = &graph_data[i*rank];
2016-07-26 08:09:34 +00:00
// find wordlengths
2016-06-09 19:11:20 +00:00
graph_unsorted[0].wordlength = 0;
queue_init(&queue);
queue_put(&queue, 0);
while((current = queue_get(&queue)) != -1) {
for(int i = 0; i < rank; i++) {
int neighbor = graph_unsorted[current].left[i];
if(graph_unsorted[neighbor].wordlength > graph_unsorted[current].wordlength + 1) {
graph_unsorted[neighbor].wordlength = graph_unsorted[current].wordlength + 1;
queue_put(&queue, neighbor);
}
}
}
2016-07-26 08:09:34 +00:00
// sort by wordlength
2016-06-09 19:11:20 +00:00
for(int i = 0; i < order; i++)
wordlength_order[i] = i;
qsort_r(wordlength_order, order, sizeof(int), compare_wordlength, graph_unsorted); // so wordlength_order is a map new index -> old index
for(int i = 0; i < order; i++)
reverse_wordlength_order[wordlength_order[i]] = i; // reverse_wordlength_order is a map old index -> new index
for(int i = 0; i < order; i++) {
2016-11-11 16:07:45 +00:00
// we have only set left and wordlength so far, so just copy these
graph[i].wordlength = graph_unsorted[wordlength_order[i]].wordlength;
2016-06-09 19:11:20 +00:00
for(int j = 0; j < rank; j++)
2016-11-11 16:07:45 +00:00
graph[i].left[j] = reverse_wordlength_order[graph_unsorted[wordlength_order[i]].left[j]]; // rewrite references
2016-06-09 19:11:20 +00:00
}
2016-07-26 08:09:34 +00:00
// find words
2016-06-09 19:11:20 +00:00
2016-11-11 16:07:45 +00:00
for(int i = 0; i < order; i++)
memset(graph[i].word, 0, hyperplanes*sizeof(int));
2016-06-09 19:11:20 +00:00
queue_init(&queue);
queue_put(&queue, 0);
while((current = queue_get(&queue)) != -1) {
for(int i = 0; i < rank; i++) {
int neighbor = graph[current].left[i];
2016-11-11 16:07:45 +00:00
if(graph[neighbor].wordlength == graph[current].wordlength + 1 && graph[neighbor].word[0] == 0) {
2016-06-09 19:11:20 +00:00
memcpy(&graph[neighbor].word[1], &graph[current].word[0], graph[current].wordlength*sizeof(int));
graph[neighbor].word[0] = i;
queue_put(&queue, neighbor);
}
}
}
2016-07-26 08:09:34 +00:00
// generate right edges
2016-06-09 19:11:20 +00:00
for(int i = 0; i < order; i++) {
for(int j = 0; j < rank; j++) {
current = graph[0].left[j];
for(int k = graph[i].wordlength - 1; k >= 0; k--) { // apply group element from right to left
current = graph[current].left[graph[i].word[k]];
}
graph[i].right[j] = current;
}
}
2016-07-26 08:09:34 +00:00
// find opposites
2016-06-09 19:11:20 +00:00
node_t *longest = &graph[order-1];
for(int i = 0; i < order; i++) {
current = i;
for(int k = longest->wordlength - 1; k >= 0; k--)
current = graph[current].left[longest->word[k]];
graph[i].opposite = current;
}
2016-07-26 08:09:34 +00:00
// enumerate hyperplanes
2016-06-09 19:11:20 +00:00
hyperplane_count = 0;
for(int i = 0; i < order; i++) {
for(int j = 0; j < rank; j++) {
current = 0;
int *word1 = graph[i].word;
int word1len = graph[i].wordlength;
int *word2 = graph[graph[i].right[j]].word; // want to calculate word2 * word1^{-1}
int word2len = graph[graph[i].right[j]].wordlength;
for(int k = 0; k < word1len; k++) // apply inverse, i.e. go from left to right
current = graph[current].left[word1[k]];
for(int k = word2len - 1; k >= 0; k--) // now from right to left
current = graph[current].left[word2[k]];
if(graph[current].is_hyperplane_reflection == 0) {
graph[current].is_hyperplane_reflection = 1;
hyperplane_count++;
}
}
}
2016-07-26 08:09:34 +00:00
// generate folding order
2016-06-09 19:11:20 +00:00
2016-07-26 08:09:34 +00:00
edgelist_count = 0;
2016-06-09 19:11:20 +00:00
for(int i = 0; i < order; i++) {
if(graph[i].is_hyperplane_reflection) {
for(int j = 0; j < order; j++) {
current = j;
for(int k = graph[i].wordlength - 1; k >= 0; k--) // apply hyperplane reflection
current = graph[current].left[graph[i].word[k]];
if(graph[j].wordlength < graph[current].wordlength) { // current has higher bruhat order than j
2016-11-11 16:07:45 +00:00
edgelists_lower[edgelist_count].to = j;
edgelists_lower[edgelist_count].next = graph[current].bruhat_lower;
graph[current].bruhat_lower = &edgelists_lower[edgelist_count];
2016-06-09 19:11:20 +00:00
edgelist_count++;
} else if(graph[j].wordlength > graph[current].wordlength) { // j has higher bruhat order than current; these are already included from the other side
} else {
ERROR(1, "Chambers of equal word lengths should not be folded on each other!\n");
}
}
}
}
2016-07-26 08:09:34 +00:00
// remove redundant edges
2016-06-09 19:11:20 +00:00
for(int i = 0; i < order; i++) {
memset(seen, 0, order*sizeof(int));
2016-10-29 15:02:05 +00:00
queue_init(&queue);
for(int len = 1; len <= graph[i].wordlength; len++) {
2016-06-09 19:11:20 +00:00
// remove all edges originating from i of length len which connect to something already seen using shorter edges
edge = graph[i].bruhat_lower;
previous = (edgelist_t*)0;
2016-10-29 15:02:05 +00:00
2016-06-09 19:11:20 +00:00
while(edge) {
2016-10-29 15:02:05 +00:00
if(graph[i].wordlength - graph[edge->to].wordlength != len) {
previous = edge;
} else if(seen[edge->to]) {
2016-06-09 19:11:20 +00:00
if(previous)
previous->next = edge->next;
else
graph[i].bruhat_lower = edge->next;
} else {
previous = edge;
2016-10-29 15:02:05 +00:00
seen[edge->to] = 1;
queue_put(&queue, edge->to);
2016-06-09 19:11:20 +00:00
}
edge = edge->next;
}
// see which nodes we can reach using only edges up to length len, mark them as seen
while((current = queue_get(&queue)) != -1) {
edge = graph[current].bruhat_lower;
while(edge) {
2016-10-29 15:02:05 +00:00
if(!seen[edge->to]) {
2016-06-09 19:11:20 +00:00
seen[edge->to] = 1;
queue_put(&queue, edge->to);
}
edge = edge->next;
}
}
}
}
2016-07-26 08:09:34 +00:00
// reverse folding order
2016-06-09 19:11:20 +00:00
2016-11-11 16:07:45 +00:00
edgelist_count = 0;
2016-06-09 19:11:20 +00:00
for(int i = 0; i < order; i++) {
edge = graph[i].bruhat_lower;
while(edge) {
2016-11-11 16:07:45 +00:00
edgelists_higher[edgelist_count].to = i;
edgelists_higher[edgelist_count].next = graph[edge->to].bruhat_higher;
graph[edge->to].bruhat_higher = &edgelists_higher[edgelist_count];
2016-06-09 19:11:20 +00:00
edgelist_count++;
edge = edge->next;
}
}
2016-07-26 08:09:34 +00:00
free(graph_data);
free(graph_unsorted);
free(wordlength_order);
free(reverse_wordlength_order);
free(seen);
}
2016-11-11 16:07:45 +00:00
static int edgelist_contains(edgelist_t *list, int x) {
while(list) {
if(list->to == x)
return 1;
list = list->next;
}
return 0;
}
static edgelist_t *edgelist_add(edgelist_t *list, int new, edgelist_t *storage, int *storage_index)
{
edgelist_t *new_link = &storage[*storage_index];
new_link->next = list;
new_link->to = new;
(*storage_index)++;
return new_link;
}
int prepare_simplified_graph(semisimple_type_t type, unsigned long left, unsigned long right, node_t *simplified_graph)
{
node_t *full_graph;
int edgelists_used;
int rank, order, hyperplanes;
int *reduced, *group, *simplified;
int *seen;
int current;
edgelist_t *edge, *previous;
queue_t queue;
int ncosets;
if(opposition_involution(type, left) != left)
return -1;
edgelist_t *edgelists_higher = &simplified_graph[0].bruhat_higher[0];
edgelist_t *edgelists_lower = &simplified_graph[0].bruhat_higher[order*hyperplanes/2];
// get full graph
full_graph = graph_alloc(type);
prepare_graph(type, full_graph);
// initialize stuff
rank = coxeter_rank(type);
order = coxeter_order(type);
hyperplanes = coxeter_hyperplanes(type);
reduced = (int*)malloc(order*sizeof(int));
group = (int*)malloc(order*sizeof(int));
simplified = (int*)malloc(order*sizeof(int));
for(int i = 0; i < order; i++) {
group[i] = -1;
reduced[i] = i;
}
// step 1: group
for(int i = 0; i < order; i++) {
if(group[i] != -1)
continue;
queue_init(&queue);
queue_put(&queue, i);
while((current = queue_get(&queue)) != -1) {
if(group[current] != -1)
continue;
group[current] = i;
for(int j = 0; j < rank; j++) {
if(left & (1 << j))
queue_put(&queue, full_graph[current].left[j]);
if(right & (1 << j))
queue_put(&queue, full_graph[current].right[j]);
}
}
}
// step 2: find minimum
for(int i = 0; i < order; i++)
if(full_graph[i].wordlength < full_graph[reduced[group[i]]].wordlength)
reduced[group[i]] = i;
// step 3: assign minimum to all
for(int i = 0; i < order; i++)
reduced[i] = reduced[group[i]];
// step 4: assign indices to cosets
ncosets = 0;
for(int i = 0; i < order; i++)
if(reduced[i] == i)
simplified[i] = ncosets++;
for(int i = 0; i < order; i++)
simplified[i] = simplified[reduced[i]];
// fprintf(stderr, "Number of double cosets: %d\n\n", ncosets);
// simplified_graph = (node_t*) malloc(ncosets*sizeof(node_t));
seen = (int*) malloc(ncosets*sizeof(int));
edgelists_used = 0;
// step 5: set up nodes from minima
current = 0;
for(int i = 0; i < order; i++)
if(reduced[i] == i) { // is minimum
memcpy(simplified_graph[simplified[i]].word, full_graph[i].word, full_graph[i].wordlength*sizeof(int));
simplified_graph[simplified[i]].wordlength = full_graph[i].wordlength;
simplified_graph[simplified[i]].opposite = simplified[full_graph[i].opposite];
simplified_graph[simplified[i]].bruhat_lower = (edgelist_t*)0;
simplified_graph[simplified[i]].bruhat_higher = (edgelist_t*)0;
for(int j = 0; j < rank; j++) {
simplified_graph[simplified[i]].left[j] = simplified[full_graph[i].left[j]];
simplified_graph[simplified[i]].right[j] = simplified[full_graph[i].right[j]];
}
}
// step 6: find order relations
for(int i = 0; i < order; i++) {
edge = full_graph[i].bruhat_lower;
while(edge) {
int this = simplified[i];
int that = simplified[edge->to];
if(this != that) {
// found something
if(!edgelist_contains(simplified_graph[this].bruhat_lower, that))
simplified_graph[this].bruhat_lower = edgelist_add(simplified_graph[this].bruhat_lower, that, edgelists_lower, &edgelists_used);
ERROR(simplified_graph[this].wordlength <= simplified_graph[that].wordlength, "The order assumption is being violated!\n");
}
edge = edge->next;
}
}
// step 7: remove redundant edges
for(int i = 0; i < ncosets; i++) {
memset(seen, 0, ncosets*sizeof(int));
queue_init(&queue);
for(int len = 1; len <= simplified_graph[i].wordlength; len++) {
edge = simplified_graph[i].bruhat_lower;
previous = (edgelist_t*)0;
while(edge) {
// only look at edges of this length now
if(simplified_graph[i].wordlength - simplified_graph[edge->to].wordlength != len) {
// we only consider edges of length len in this pass
previous = edge;
} else if(seen[edge->to]) {
// this edge is redundant, remove it
// fprintf(stderr, "removing edge from %d to %d\n", i, edge->to);
if(previous)
previous->next = edge->next;
else
simplified_graph[i].bruhat_lower = edge->next;
} else {
// this edge was not redundant, add to seen
previous = edge;
seen[edge->to] = 1;
queue_put(&queue, edge->to);
}
edge = edge->next;
}
// calculate transitive closure of seen nodes
while((current = queue_get(&queue)) != -1) {
edge = simplified_graph[current].bruhat_lower;
while(edge) {
if(!seen[edge->to]) {
seen[edge->to] = 1;
queue_put(&queue, edge->to);
}
edge = edge->next;
}
}
}
}
// step 8: revert order
for(int i = 0; i < ncosets; i++) {
edge = simplified_graph[i].bruhat_lower;
while(edge) {
simplified_graph[edge->to].bruhat_higher =
edgelist_add(simplified_graph[edge->to].bruhat_higher,
i, edgelists_higher, &edgelists_used);
edge = edge->next;
}
}
// output as graphviz dot file
/*
fprintf(stdout, "difull_graph test123 {\n");
for(int i = 0; i < ncosets; i++) {
edge = simplified_graph[i].bruhat_lower;
while(edge) {
fprintf(stdout, "%s -> %s;\n",
alphabetize(simplified_graph[i].word, simplified_graph[i].wordlength, alphabet, buffer),
alphabetize(simplified_graph[edge->to].word, simplified_graph[edge->to].wordlength, alphabet, buffer2));
edge = edge->next;
}
}
fprintf(stdout, "}\n"); */
// some output
/* for(int i = 0; i < ncosets; i++)
fprintf(stderr, "%s <=> %s\n", simplified_graph[i].wordlength == 0 ? "1" : alphabetize(simplified_graph[i].word, simplified_graph[i].wordlength, alphabet, buffer), simplified_graph[simplified_graph[i].opposite].wordlength == 0 ? "1" : alphabetize(simplified_graph[simplified_graph[i].opposite].word, simplified_graph[simplified_graph[i].opposite].wordlength, alphabet, buffer2)); */
// fprintf(stderr, "\nAdded %d edges.\n\n", edgelists_used);
free(seen);
free(reduced);
free(group);
free(simplified);
graph_free(type, full_graph);
return ncosets;
}
node_t *graph_alloc(semisimple_type_t type)
{
int rank = coxeter_rank(type);
int order = coxeter_order(type);
int hyperplanes = coxeter_hyperplanes(type);
node_t *graph = (node_t*)malloc(order*sizeof(node_t));
int *left = (int*)malloc(order*rank*sizeof(int));
int *right = (int*)malloc(order*rank*sizeof(int));
edgelist_t *edgelists = (edgelist_t*)malloc(order*hyperplanes*sizeof(edgelist_t));
int *words = (int*)malloc(order*hyperplanes*sizeof(int));
for(int i = 0; i < order; i++) {
graph[i].left = &left[rank*i];
graph[i].right = &right[rank*i];
graph[i].word = &words[hyperplanes*i];
}
graph[0].bruhat_higher = edgelists;
return graph;
}
void graph_free(semisimple_type_t type, node_t *graph)
{
free(graph[0].left);
free(graph[0].right);
free(graph[0].word);
int order = coxeter_order(type);
// find the head of all edgelists by just taking the one having the lowest address
edgelist_t *edgelists = graph[0].bruhat_lower;
for(int i = 0; i < order; i++) {
if(graph[i].bruhat_lower < edgelists && graph[i].bruhat_lower != 0)
edgelists = graph[i].bruhat_lower;
if(graph[i].bruhat_higher < edgelists && graph[i].bruhat_higher != 0)
edgelists = graph[i].bruhat_higher;
}
free(edgelists);
}
2016-10-19 14:40:03 +00:00
/*********************************** THE ACTUAL ENUMERATION ****************************************/
typedef struct {
int rank;
int order;
2016-10-30 17:27:48 +00:00
int size; // the size of the graph; this can vary from the order if we take quotients beforehand
2016-10-19 14:40:03 +00:00
const node_t *graph;
int printstep;
const char *alphabet;
FILE *outfile;
} enumeration_info_t;
// calculate transitive closure; that is, fill current_level in every spot which must be marked with the current head (but was not already marked before), and -current_level in every opposite spot (including opposite to the head)
static int transitive_closure(const enumeration_info_t *info, signed char *level, int head, int current_level)
2016-07-26 08:09:34 +00:00
{
2016-10-19 14:40:03 +00:00
int is_slim = 1;
2016-07-26 08:09:34 +00:00
queue_t queue;
2016-10-19 14:40:03 +00:00
int current;
edgelist_t *edge;
2016-07-26 08:09:34 +00:00
2016-10-19 14:40:03 +00:00
queue_init(&queue);
level[info->graph[head].opposite] = -current_level;
queue_put(&queue, head);
2016-10-30 17:27:48 +00:00
for(int i = head + 1; level[i] != HEAD_MARKER && i < info->size; i++) { // everything which is right to the head and empty will not get marked in this or higher levels, so we can mark its opposite
2016-10-19 14:40:03 +00:00
if(level[i] == current_level) {
is_slim = 0;
break;
} if(level[i] == 0) {
level[i] = -current_level;
level[info->graph[i].opposite] = current_level;
queue_put(&queue, info->graph[i].opposite);
2016-08-26 12:56:23 +00:00
}
2016-10-19 14:40:03 +00:00
}
2016-10-19 14:40:03 +00:00
if(is_slim) {
while((current = queue_get(&queue)) != -1) {
edge = info->graph[current].bruhat_lower;
while(edge) {
if(level[edge->to] < 0) {
is_slim = 0;
break;
} else if(level[edge->to] == 0) {
level[edge->to] = current_level;
level[info->graph[edge->to].opposite] = -current_level;
queue_put(&queue, edge->to);
2016-06-09 19:11:20 +00:00
}
2016-10-19 14:40:03 +00:00
edge = edge->next;
2016-06-09 19:11:20 +00:00
}
}
2016-10-19 14:40:03 +00:00
}
2016-06-09 19:11:20 +00:00
2016-10-19 14:40:03 +00:00
return is_slim;
}
2016-06-09 19:11:20 +00:00
2016-10-19 14:40:03 +00:00
static inline void output_thickening(const enumeration_info_t *info, signed char *level, int current_level, int is_slim, int is_fat, long count)
{
// if printstep is set accordingly, write state to stderr
if(is_slim && is_fat && info->printstep > 0 && (count + 1) % info->printstep == 0) {
2016-10-30 17:27:48 +00:00
print_thickening(info->rank, info->size, level, current_level, info->alphabet, stderr);
2016-10-19 14:40:03 +00:00
fprintf(stderr, "\n");
}
else if(info->printstep < 0) {
2016-10-30 17:27:48 +00:00
print_thickening(info->rank, info->size, level, current_level - !is_slim, info->alphabet, stderr);
2016-10-19 14:40:03 +00:00
fprintf(stderr, " ");
if(is_slim) {
fprintf(stderr, "S");
if(is_fat)
fprintf(stderr, "F");
2016-06-09 19:11:20 +00:00
}
2016-10-19 14:40:03 +00:00
fprintf(stderr, "\n");
}
}
2016-07-26 08:09:34 +00:00
2016-10-19 14:40:03 +00:00
static long enumerate_tree(const enumeration_info_t *info, signed char *level, int current_level, int head)
{
ERROR(current_level >= HEAD_MARKER, "HEAD_MARKER too small!\n");
2016-10-14 16:49:20 +00:00
2016-10-19 14:40:03 +00:00
level[head] = HEAD_MARKER;
2016-07-26 08:09:34 +00:00
2016-10-19 14:40:03 +00:00
int is_slim = transitive_closure(info, level, head, current_level);
2016-11-07 16:22:16 +00:00
int is_balanced = 0;
2016-10-19 14:40:03 +00:00
int count = 0;
// we have a candidate, check if it is a balanced thickening; if so, write to stdout
if(is_slim) {
2016-11-07 16:22:16 +00:00
is_balanced = 1;
2016-10-19 14:40:03 +00:00
for(int i = head - 1; i >= 0; i--)
if(level[i] == 0)
2016-11-07 16:22:16 +00:00
is_balanced = 0;
}
2016-10-19 14:40:03 +00:00
2016-11-07 16:22:16 +00:00
// comment this out (or just put it inside the if block) to save 1/3 of the runtime
output_thickening(info, level, current_level, is_slim, is_balanced, count);
2016-10-19 14:40:03 +00:00
2016-11-07 16:22:16 +00:00
if(is_slim) {
if(is_balanced) {
2016-10-19 14:40:03 +00:00
count++;
2016-10-30 17:27:48 +00:00
fwrite(level, sizeof(signed char), info->size, info->outfile);
2016-10-19 14:40:03 +00:00
} else {
for(int i = head - 1; i >= 0; i--)
2016-08-26 12:56:23 +00:00
if(level[i] == 0)
2016-10-19 14:40:03 +00:00
count += enumerate_tree(info, level, current_level + 1, i);
2016-06-09 19:11:20 +00:00
}
2016-10-19 14:40:03 +00:00
}
2016-06-09 19:11:20 +00:00
2016-10-19 14:40:03 +00:00
// clean up
level[head] = 0;
2016-10-30 17:27:48 +00:00
for(int i = 0; i < info->size; i++)
2016-10-19 14:40:03 +00:00
if(level[i] >= current_level && level[i] != HEAD_MARKER || level[i] <= -current_level)
level[i] = 0;
2016-06-09 19:11:20 +00:00
2016-10-19 14:40:03 +00:00
return count;
}
2016-10-30 17:27:48 +00:00
long enumerate_balanced_thickenings(semisimple_type_t type, node_t *graph, int size, const char *alphabet, FILE *outfile)
2016-10-19 14:40:03 +00:00
{
signed char *level;
long count = 0;
enumeration_info_t info;
queue_t queue;
int current;
info.rank = coxeter_rank(type);
info.order = coxeter_order(type);
2016-10-30 17:27:48 +00:00
info.size = size;
2016-10-19 14:40:03 +00:00
info.graph = graph;
info.alphabet = (char*)alphabet;
info.outfile = outfile;
info.printstep = 0;
if(getenv("PRINTSTEP"))
info.printstep = atoi(getenv("PRINTSTEP"));
2016-10-30 17:27:48 +00:00
// the algorithm only works if the opposition pairing does not stabilize any element
// if this happens, there can be no balanced thickenings
for(int i = 0; i < info.size; i++)
if(graph[i].opposite == i)
return 0;
2016-10-19 14:40:03 +00:00
2016-10-30 17:27:48 +00:00
level = (signed char*)malloc(info.size*sizeof(int));
memset(level, 0, info.size*sizeof(int));
for(int i = info.size - 1; i >= 0; i--)
2016-10-19 14:40:03 +00:00
count += enumerate_tree(&info, level, 1, i);
2016-06-09 19:11:20 +00:00
free(level);
2016-10-14 16:49:20 +00:00
return count;
2016-06-09 19:11:20 +00:00
}