#define _GNU_SOURCE #include #include #include #include #include #include "thickenings.h" #include "coxeter.h" #include "queue.h" char *alphabetize(int *word, int len, const char *alphabet, char *buffer) { if(len == 0) { buffer[0] = '1'; buffer[1] = 0; return buffer; } int i = 0; for(i = 0; i < len; i++) buffer[i] = alphabet[word[i]]; buffer[i] = 0; return buffer; } void print_thickening(int rank, int order, const signed char *thickening, int upto_level, const char *alphabet, FILE *f) { for(int i = 0; i < order; i++) { if(thickening[i] == HEAD_MARKER) fprintf(f, "\e[41;37mx\e[0m"); else if(thickening[i] < - upto_level || thickening[i] > upto_level) fprintf(f, " "); else if(thickening[i] < 0 && thickening[i] > -10) fprintf(f, "\e[47;30m%d\e[0m", -thickening[i]); else if(thickening[i] <= -10) fprintf(f, "\e[47;30m+\e[0m"); else if(thickening[i] > 0 && thickening[i] < 10) fprintf(f, "\e[40;37m%d\e[0m", thickening[i]); else if(thickening[i] >= 10) fprintf(f, "\e[40;37m+\e[0m"); else fprintf(f, " "); } fprintf(f, "\e[K"); } static int compare_wordlength(const void *a, const void *b, void *gr) { int i = *((int*)a); int j = *((int*)b); node_t *graph = (node_t*)gr; return graph[i].wordlength - graph[j].wordlength; } void prepare_graph(semisimple_type_t type, node_t *graph) { queue_t queue; edgelist_t *edgelists_lower, *edgelists_higher; int rank, order, hyperplanes; edgelist_t *edge, *previous; int edgelist_count, hyperplane_count; int current; int *graph_data; node_t *graph_unsorted; int *wordlength_order, *reverse_wordlength_order, *seen; // initialize rank = coxeter_rank(type); order = coxeter_order(type); hyperplanes = coxeter_hyperplanes(type); edgelists_higher = graph[0].bruhat_higher; edgelists_lower = &graph[0].bruhat_higher[order*hyperplanes/2]; graph_data = (int*)malloc(order*rank*sizeof(int)); graph_unsorted = (node_t*)malloc(order*sizeof(node_t)); wordlength_order = (int*)malloc(order*sizeof(int)); reverse_wordlength_order = (int*)malloc(order*sizeof(int)); seen = (int*)malloc(order*sizeof(int)); for(int i = 0; i < order; i++) { graph_unsorted[i].wordlength = INT_MAX; graph[i].bruhat_lower = 0; graph[i].bruhat_higher = 0; graph[i].is_hyperplane_reflection = 0; } // get coxeter graph generate_coxeter_graph(type, graph_data); for(int i = 0; i < order; i++) for(int j = 0; j < rank; j++) graph_unsorted[i].left = &graph_data[i*rank]; // find wordlengths graph_unsorted[0].wordlength = 0; queue_init(&queue); queue_put(&queue, 0); while((current = queue_get(&queue)) != -1) { for(int i = 0; i < rank; i++) { int neighbor = graph_unsorted[current].left[i]; if(graph_unsorted[neighbor].wordlength > graph_unsorted[current].wordlength + 1) { graph_unsorted[neighbor].wordlength = graph_unsorted[current].wordlength + 1; queue_put(&queue, neighbor); } } } // sort by wordlength for(int i = 0; i < order; i++) wordlength_order[i] = i; qsort_r(wordlength_order, order, sizeof(int), compare_wordlength, graph_unsorted); // so wordlength_order is a map new index -> old index for(int i = 0; i < order; i++) reverse_wordlength_order[wordlength_order[i]] = i; // reverse_wordlength_order is a map old index -> new index for(int i = 0; i < order; i++) { // we have only set left and wordlength so far, so just copy these graph[i].wordlength = graph_unsorted[wordlength_order[i]].wordlength; for(int j = 0; j < rank; j++) graph[i].left[j] = reverse_wordlength_order[graph_unsorted[wordlength_order[i]].left[j]]; // rewrite references } // find words for(int i = 0; i < order; i++) memset(graph[i].word, 0, hyperplanes*sizeof(int)); queue_init(&queue); queue_put(&queue, 0); while((current = queue_get(&queue)) != -1) { for(int i = 0; i < rank; i++) { int neighbor = graph[current].left[i]; if(graph[neighbor].wordlength == graph[current].wordlength + 1 && graph[neighbor].word[0] == 0) { memcpy(&graph[neighbor].word[1], &graph[current].word[0], graph[current].wordlength*sizeof(int)); graph[neighbor].word[0] = i; queue_put(&queue, neighbor); } } } // generate right edges for(int i = 0; i < order; i++) { for(int j = 0; j < rank; j++) { current = graph[0].left[j]; for(int k = graph[i].wordlength - 1; k >= 0; k--) { // apply group element from right to left current = graph[current].left[graph[i].word[k]]; } graph[i].right[j] = current; } } // find opposites node_t *longest = &graph[order-1]; for(int i = 0; i < order; i++) { current = i; for(int k = longest->wordlength - 1; k >= 0; k--) current = graph[current].left[longest->word[k]]; graph[i].opposite = current; } // enumerate hyperplanes hyperplane_count = 0; for(int i = 0; i < order; i++) { for(int j = 0; j < rank; j++) { current = 0; int *word1 = graph[i].word; int word1len = graph[i].wordlength; int *word2 = graph[graph[i].right[j]].word; // want to calculate word2 * word1^{-1} int word2len = graph[graph[i].right[j]].wordlength; for(int k = 0; k < word1len; k++) // apply inverse, i.e. go from left to right current = graph[current].left[word1[k]]; for(int k = word2len - 1; k >= 0; k--) // now from right to left current = graph[current].left[word2[k]]; if(graph[current].is_hyperplane_reflection == 0) { graph[current].is_hyperplane_reflection = 1; hyperplane_count++; } } } // generate folding order edgelist_count = 0; for(int i = 0; i < order; i++) { if(graph[i].is_hyperplane_reflection) { for(int j = 0; j < order; j++) { current = j; for(int k = graph[i].wordlength - 1; k >= 0; k--) // apply hyperplane reflection current = graph[current].left[graph[i].word[k]]; if(graph[j].wordlength < graph[current].wordlength) { // current has higher bruhat order than j edgelists_lower[edgelist_count].to = j; edgelists_lower[edgelist_count].next = graph[current].bruhat_lower; graph[current].bruhat_lower = &edgelists_lower[edgelist_count]; edgelist_count++; } else if(graph[j].wordlength > graph[current].wordlength) { // j has higher bruhat order than current; these are already included from the other side } else { ERROR(1, "Chambers of equal word lengths should not be folded on each other!\n"); } } } } // remove redundant edges for(int i = 0; i < order; i++) { memset(seen, 0, order*sizeof(int)); queue_init(&queue); for(int len = 1; len <= graph[i].wordlength; len++) { // remove all edges originating from i of length len which connect to something already seen using shorter edges edge = graph[i].bruhat_lower; previous = (edgelist_t*)0; while(edge) { if(graph[i].wordlength - graph[edge->to].wordlength != len) { previous = edge; } else if(seen[edge->to]) { if(previous) previous->next = edge->next; else graph[i].bruhat_lower = edge->next; } else { previous = edge; seen[edge->to] = 1; queue_put(&queue, edge->to); } edge = edge->next; } // see which nodes we can reach using only edges up to length len, mark them as seen while((current = queue_get(&queue)) != -1) { edge = graph[current].bruhat_lower; while(edge) { if(!seen[edge->to]) { seen[edge->to] = 1; queue_put(&queue, edge->to); } edge = edge->next; } } } } // reverse folding order edgelist_count = 0; for(int i = 0; i < order; i++) { edge = graph[i].bruhat_lower; while(edge) { edgelists_higher[edgelist_count].to = i; edgelists_higher[edgelist_count].next = graph[edge->to].bruhat_higher; graph[edge->to].bruhat_higher = &edgelists_higher[edgelist_count]; edgelist_count++; edge = edge->next; } } free(graph_data); free(graph_unsorted); free(wordlength_order); free(reverse_wordlength_order); free(seen); } static int edgelist_contains(edgelist_t *list, int x) { while(list) { if(list->to == x) return 1; list = list->next; } return 0; } static edgelist_t *edgelist_add(edgelist_t *list, int new, edgelist_t *storage, int *storage_index) { edgelist_t *new_link = &storage[*storage_index]; new_link->next = list; new_link->to = new; (*storage_index)++; return new_link; } int prepare_simplified_graph(semisimple_type_t type, unsigned long left, unsigned long right, node_t *simplified_graph) { node_t *full_graph; int edgelists_used; int rank, order, hyperplanes; int *reduced, *group, *simplified; int *seen; int current; edgelist_t *edge, *previous; queue_t queue; int ncosets; if(opposition_involution(type, left) != left) return -1; edgelist_t *edgelists_higher = &simplified_graph[0].bruhat_higher[0]; edgelist_t *edgelists_lower = &simplified_graph[0].bruhat_higher[order*hyperplanes/2]; // get full graph full_graph = graph_alloc(type); prepare_graph(type, full_graph); // initialize stuff rank = coxeter_rank(type); order = coxeter_order(type); hyperplanes = coxeter_hyperplanes(type); reduced = (int*)malloc(order*sizeof(int)); group = (int*)malloc(order*sizeof(int)); simplified = (int*)malloc(order*sizeof(int)); for(int i = 0; i < order; i++) { group[i] = -1; reduced[i] = i; } // step 1: group for(int i = 0; i < order; i++) { if(group[i] != -1) continue; queue_init(&queue); queue_put(&queue, i); while((current = queue_get(&queue)) != -1) { if(group[current] != -1) continue; group[current] = i; for(int j = 0; j < rank; j++) { if(left & (1 << j)) queue_put(&queue, full_graph[current].left[j]); if(right & (1 << j)) queue_put(&queue, full_graph[current].right[j]); } } } // step 2: find minimum for(int i = 0; i < order; i++) if(full_graph[i].wordlength < full_graph[reduced[group[i]]].wordlength) reduced[group[i]] = i; // step 3: assign minimum to all for(int i = 0; i < order; i++) reduced[i] = reduced[group[i]]; // step 4: assign indices to cosets ncosets = 0; for(int i = 0; i < order; i++) if(reduced[i] == i) simplified[i] = ncosets++; for(int i = 0; i < order; i++) simplified[i] = simplified[reduced[i]]; // fprintf(stderr, "Number of double cosets: %d\n\n", ncosets); // simplified_graph = (node_t*) malloc(ncosets*sizeof(node_t)); seen = (int*) malloc(ncosets*sizeof(int)); edgelists_used = 0; // step 5: set up nodes from minima current = 0; for(int i = 0; i < order; i++) if(reduced[i] == i) { // is minimum memcpy(simplified_graph[simplified[i]].word, full_graph[i].word, full_graph[i].wordlength*sizeof(int)); simplified_graph[simplified[i]].wordlength = full_graph[i].wordlength; simplified_graph[simplified[i]].opposite = simplified[full_graph[i].opposite]; simplified_graph[simplified[i]].bruhat_lower = (edgelist_t*)0; simplified_graph[simplified[i]].bruhat_higher = (edgelist_t*)0; for(int j = 0; j < rank; j++) { simplified_graph[simplified[i]].left[j] = simplified[full_graph[i].left[j]]; simplified_graph[simplified[i]].right[j] = simplified[full_graph[i].right[j]]; } } // step 6: find order relations for(int i = 0; i < order; i++) { edge = full_graph[i].bruhat_lower; while(edge) { int this = simplified[i]; int that = simplified[edge->to]; if(this != that) { // found something if(!edgelist_contains(simplified_graph[this].bruhat_lower, that)) simplified_graph[this].bruhat_lower = edgelist_add(simplified_graph[this].bruhat_lower, that, edgelists_lower, &edgelists_used); ERROR(simplified_graph[this].wordlength <= simplified_graph[that].wordlength, "The order assumption is being violated!\n"); } edge = edge->next; } } // step 7: remove redundant edges for(int i = 0; i < ncosets; i++) { memset(seen, 0, ncosets*sizeof(int)); queue_init(&queue); for(int len = 1; len <= simplified_graph[i].wordlength; len++) { edge = simplified_graph[i].bruhat_lower; previous = (edgelist_t*)0; while(edge) { // only look at edges of this length now if(simplified_graph[i].wordlength - simplified_graph[edge->to].wordlength != len) { // we only consider edges of length len in this pass previous = edge; } else if(seen[edge->to]) { // this edge is redundant, remove it // fprintf(stderr, "removing edge from %d to %d\n", i, edge->to); if(previous) previous->next = edge->next; else simplified_graph[i].bruhat_lower = edge->next; } else { // this edge was not redundant, add to seen previous = edge; seen[edge->to] = 1; queue_put(&queue, edge->to); } edge = edge->next; } // calculate transitive closure of seen nodes while((current = queue_get(&queue)) != -1) { edge = simplified_graph[current].bruhat_lower; while(edge) { if(!seen[edge->to]) { seen[edge->to] = 1; queue_put(&queue, edge->to); } edge = edge->next; } } } } // step 8: revert order for(int i = 0; i < ncosets; i++) { edge = simplified_graph[i].bruhat_lower; while(edge) { simplified_graph[edge->to].bruhat_higher = edgelist_add(simplified_graph[edge->to].bruhat_higher, i, edgelists_higher, &edgelists_used); edge = edge->next; } } // output as graphviz dot file /* fprintf(stdout, "difull_graph test123 {\n"); for(int i = 0; i < ncosets; i++) { edge = simplified_graph[i].bruhat_lower; while(edge) { fprintf(stdout, "%s -> %s;\n", alphabetize(simplified_graph[i].word, simplified_graph[i].wordlength, alphabet, buffer), alphabetize(simplified_graph[edge->to].word, simplified_graph[edge->to].wordlength, alphabet, buffer2)); edge = edge->next; } } fprintf(stdout, "}\n"); */ // some output /* for(int i = 0; i < ncosets; i++) fprintf(stderr, "%s <=> %s\n", simplified_graph[i].wordlength == 0 ? "1" : alphabetize(simplified_graph[i].word, simplified_graph[i].wordlength, alphabet, buffer), simplified_graph[simplified_graph[i].opposite].wordlength == 0 ? "1" : alphabetize(simplified_graph[simplified_graph[i].opposite].word, simplified_graph[simplified_graph[i].opposite].wordlength, alphabet, buffer2)); */ // fprintf(stderr, "\nAdded %d edges.\n\n", edgelists_used); free(seen); free(reduced); free(group); free(simplified); graph_free(type, full_graph); return ncosets; } node_t *graph_alloc(semisimple_type_t type) { int rank = coxeter_rank(type); int order = coxeter_order(type); int hyperplanes = coxeter_hyperplanes(type); node_t *graph = (node_t*)malloc(order*sizeof(node_t)); int *left = (int*)malloc(order*rank*sizeof(int)); int *right = (int*)malloc(order*rank*sizeof(int)); edgelist_t *edgelists = (edgelist_t*)malloc(order*hyperplanes*sizeof(edgelist_t)); int *words = (int*)malloc(order*hyperplanes*sizeof(int)); for(int i = 0; i < order; i++) { graph[i].left = &left[rank*i]; graph[i].right = &right[rank*i]; graph[i].word = &words[hyperplanes*i]; } graph[0].bruhat_higher = edgelists; return graph; } void graph_free(semisimple_type_t type, node_t *graph) { free(graph[0].left); free(graph[0].right); free(graph[0].word); int order = coxeter_order(type); // find the head of all edgelists by just taking the one having the lowest address edgelist_t *edgelists = graph[0].bruhat_lower; for(int i = 0; i < order; i++) { if(graph[i].bruhat_lower < edgelists && graph[i].bruhat_lower != 0) edgelists = graph[i].bruhat_lower; if(graph[i].bruhat_higher < edgelists && graph[i].bruhat_higher != 0) edgelists = graph[i].bruhat_higher; } free(edgelists); } /*********************************** THE ACTUAL ENUMERATION ****************************************/ typedef struct { int rank; int order; int size; // the size of the graph; this can vary from the order if we take quotients beforehand const node_t *graph; int printstep; const char *alphabet; FILE *outfile; } enumeration_info_t; // calculate transitive closure; that is, fill current_level in every spot which must be marked with the current head (but was not already marked before), and -current_level in every opposite spot (including opposite to the head) static int transitive_closure(const enumeration_info_t *info, signed char *level, int head, int current_level) { int is_slim = 1; queue_t queue; int current; edgelist_t *edge; queue_init(&queue); level[info->graph[head].opposite] = -current_level; queue_put(&queue, head); for(int i = head + 1; level[i] != HEAD_MARKER && i < info->size; i++) { // everything which is right to the head and empty will not get marked in this or higher levels, so we can mark its opposite if(level[i] == current_level) { is_slim = 0; break; } if(level[i] == 0) { level[i] = -current_level; level[info->graph[i].opposite] = current_level; queue_put(&queue, info->graph[i].opposite); } } if(is_slim) { while((current = queue_get(&queue)) != -1) { edge = info->graph[current].bruhat_lower; while(edge) { if(level[edge->to] < 0) { is_slim = 0; break; } else if(level[edge->to] == 0) { level[edge->to] = current_level; level[info->graph[edge->to].opposite] = -current_level; queue_put(&queue, edge->to); } edge = edge->next; } } } return is_slim; } static inline void output_thickening(const enumeration_info_t *info, signed char *level, int current_level, int is_slim, int is_fat, long count) { // if printstep is set accordingly, write state to stderr if(is_slim && is_fat && info->printstep > 0 && (count + 1) % info->printstep == 0) { print_thickening(info->rank, info->size, level, current_level, info->alphabet, stderr); fprintf(stderr, "\n"); } else if(info->printstep < 0) { print_thickening(info->rank, info->size, level, current_level - !is_slim, info->alphabet, stderr); fprintf(stderr, " "); if(is_slim) { fprintf(stderr, "S"); if(is_fat) fprintf(stderr, "F"); } fprintf(stderr, "\n"); } } static long enumerate_tree(const enumeration_info_t *info, signed char *level, int current_level, int head) { ERROR(current_level >= HEAD_MARKER, "HEAD_MARKER too small!\n"); level[head] = HEAD_MARKER; int is_slim = transitive_closure(info, level, head, current_level); int is_balanced = 0; int count = 0; // we have a candidate, check if it is a balanced thickening; if so, write to stdout if(is_slim) { is_balanced = 1; for(int i = head - 1; i >= 0; i--) if(level[i] == 0) is_balanced = 0; } // comment this out (or just put it inside the if block) to save 1/3 of the runtime output_thickening(info, level, current_level, is_slim, is_balanced, count); if(is_slim) { if(is_balanced) { count++; fwrite(level, sizeof(signed char), info->size, info->outfile); } else { for(int i = head - 1; i >= 0; i--) if(level[i] == 0) count += enumerate_tree(info, level, current_level + 1, i); } } // clean up level[head] = 0; for(int i = 0; i < info->size; i++) if(level[i] >= current_level && level[i] != HEAD_MARKER || level[i] <= -current_level) level[i] = 0; return count; } long enumerate_balanced_thickenings(semisimple_type_t type, node_t *graph, int size, const char *alphabet, FILE *outfile) { signed char *level; long count = 0; enumeration_info_t info; queue_t queue; int current; info.rank = coxeter_rank(type); info.order = coxeter_order(type); info.size = size; info.graph = graph; info.alphabet = (char*)alphabet; info.outfile = outfile; info.printstep = 0; if(getenv("PRINTSTEP")) info.printstep = atoi(getenv("PRINTSTEP")); // the algorithm only works if the opposition pairing does not stabilize any element // if this happens, there can be no balanced thickenings for(int i = 0; i < info.size; i++) if(graph[i].opposite == i) return 0; level = (signed char*)malloc(info.size*sizeof(int)); memset(level, 0, info.size*sizeof(int)); for(int i = info.size - 1; i >= 0; i--) count += enumerate_tree(&info, level, 1, i); free(level); return count; }