#include #include #include #include #include #include "thickenings.h" #include "weyl.h" #include "queue.h" char *alphabetize(int *word, int len, const char *alphabet, char *buffer) { if(len == 0) { buffer[0] = '1'; buffer[1] = 0; return buffer; } int i = 0; for(i = 0; i < len; i++) buffer[i] = alphabet[word[i]]; buffer[i] = 0; return buffer; } void print_thickening(int rank, int order, const signed char *thickening, int upto_level, const char *alphabet, FILE *f) { for(int i = 0; i < order; i++) { if(thickening[i] == HEAD_MARKER) fprintf(f, "\e[41;37mx\e[0m"); else if(thickening[i] < - upto_level || thickening[i] > upto_level) fprintf(f, " "); else if(thickening[i] < 0 && thickening[i] > -10) fprintf(f, "\e[47;30m%d\e[0m", -thickening[i]); else if(thickening[i] <= -10) fprintf(f, "\e[47;30m+\e[0m"); else if(thickening[i] > 0 && thickening[i] < 10) fprintf(f, "\e[40;37m%d\e[0m", thickening[i]); else if(thickening[i] >= 10) fprintf(f, "\e[40;37m+\e[0m"); else fprintf(f, " "); } fprintf(f, "\e[K"); } static int compare_wordlength(const void *a, const void *b, void *gr) { int i = *((int*)a); int j = *((int*)b); node_t *graph = (node_t*)gr; return graph[i].wordlength - graph[j].wordlength; } void prepare_graph(semisimple_type_t type, node_t *graph) { queue_t queue; edgelist_t *edgelists_lower, *edgelists_higher; int rank, order, hyperplanes; edgelist_t *edge, *previous; int edgelist_count, hyperplane_count; int current; weylgroup_element_t *graph_data; node_t *graph_unsorted; int *ordering, *reverse_ordering, *seen; // initialize rank = weyl_rank(type); order = weyl_order(type); hyperplanes = weyl_hyperplanes(type); edgelists_higher = graph[0].bruhat_higher; edgelists_lower = &graph[0].bruhat_higher[order*hyperplanes/2]; graph_data = weyl_alloc(type); graph_unsorted = (node_t*)malloc(order*sizeof(node_t)); ordering = (int*)malloc(order*sizeof(int)); reverse_ordering = (int*)malloc(order*sizeof(int)); seen = (int*)malloc(order*sizeof(int)); for(int i = 0; i < order; i++) { graph_unsorted[i].wordlength = INT_MAX; graph[i].bruhat_lower = 0; graph[i].bruhat_higher = 0; graph[i].is_hyperplane_reflection = 0; } LOG("Generate Weyl group.\n"); weyl_generate(type, graph_data); for(int i = 0; i < order; i++) for(int j = 0; j < rank; j++) { graph_unsorted[i].left = graph_data[i].left; graph_unsorted[i].id = graph_data[i].id; } // find wordlengths LOG("Determine word lengths.\n"); graph_unsorted[0].wordlength = 0; queue_init(&queue); queue_put(&queue, 0); while((current = queue_get(&queue)) != -1) { for(int i = 0; i < rank; i++) { int neighbor = graph_unsorted[current].left[i]; if(graph_unsorted[neighbor].wordlength > graph_unsorted[current].wordlength + 1) { graph_unsorted[neighbor].wordlength = graph_unsorted[current].wordlength + 1; queue_put(&queue, neighbor); } } } LOG("Sort by wordlength.\n"); for(int i = 0; i < order; i++) ordering[i] = i; qsort_r(ordering, order, sizeof(int), compare_wordlength, graph_unsorted); // so ordering is a map new index -> old index for(int i = 0; i < order; i++) reverse_ordering[ordering[i]] = i; // reverse_ordering is a map old index -> new index for(int i = 0; i < order; i++) { // we have only set left, wordlength and id so far, so just copy these graph[i].wordlength = graph_unsorted[ordering[i]].wordlength; graph[i].id = graph_unsorted[ordering[i]].id; for(int j = 0; j < rank; j++) graph[i].left[j] = reverse_ordering[graph_unsorted[ordering[i]].left[j]]; // rewrite references } LOG("Find shortest words.\n"); for(int i = 0; i < order; i++) memset(graph[i].word, 0, hyperplanes*sizeof(int)); queue_init(&queue); queue_put(&queue, 0); while((current = queue_get(&queue)) != -1) { for(int i = 0; i < rank; i++) { int neighbor = graph[current].left[i]; if(graph[neighbor].wordlength == graph[current].wordlength + 1 && graph[neighbor].word[0] == 0) { memcpy(&graph[neighbor].word[1], &graph[current].word[0], graph[current].wordlength*sizeof(int)); graph[neighbor].word[0] = i; queue_put(&queue, neighbor); } } } LOG("Generate right edges.\n"); for(int i = 0; i < order; i++) { for(int j = 0; j < rank; j++) { current = graph[0].left[j]; for(int k = graph[i].wordlength - 1; k >= 0; k--) { // apply group element from right to left current = graph[current].left[graph[i].word[k]]; } graph[i].right[j] = current; } } LOG("Find opposites.\n"); node_t *longest = &graph[order-1]; for(int i = 0; i < order; i++) { current = i; for(int k = longest->wordlength - 1; k >= 0; k--) current = graph[current].left[longest->word[k]]; graph[i].opposite = current; } LOG("Enumerate hyperplanes.\n"); hyperplane_count = 0; for(int i = 0; i < order; i++) { for(int j = 0; j < rank; j++) { current = 0; int *word1 = graph[i].word; int word1len = graph[i].wordlength; int *word2 = graph[graph[i].right[j]].word; // want to calculate word2 * word1^{-1} int word2len = graph[graph[i].right[j]].wordlength; for(int k = 0; k < word1len; k++) // apply inverse, i.e. go from left to right current = graph[current].left[word1[k]]; for(int k = word2len - 1; k >= 0; k--) // now from right to left current = graph[current].left[word2[k]]; if(graph[current].is_hyperplane_reflection == 0) { graph[current].is_hyperplane_reflection = 1; hyperplane_count++; } } } LOG("Determine Bruhat order.\n"); edgelist_count = 0; for(int i = 0; i < order; i++) { if(graph[i].is_hyperplane_reflection) { for(int j = 0; j < order; j++) { current = j; for(int k = graph[i].wordlength - 1; k >= 0; k--) // apply hyperplane reflection current = graph[current].left[graph[i].word[k]]; if(graph[j].wordlength < graph[current].wordlength) { // current has higher bruhat order than j edgelists_lower[edgelist_count].to = j; edgelists_lower[edgelist_count].next = graph[current].bruhat_lower; graph[current].bruhat_lower = &edgelists_lower[edgelist_count]; edgelist_count++; } else if(graph[j].wordlength > graph[current].wordlength) { // j has higher bruhat order than current; these are already included from the other side } else { ERROR(1, "Chambers of equal word lengths should not be folded on each other!\n"); } } } } LOG("Perform transitive reduction.\n"); for(int i = 0; i < order; i++) { memset(seen, 0, order*sizeof(int)); queue_init(&queue); for(int len = 1; len <= graph[i].wordlength; len++) { // remove all edges originating from i of length len which connect to something already seen using shorter edges edge = graph[i].bruhat_lower; previous = (edgelist_t*)0; while(edge) { if(graph[i].wordlength - graph[edge->to].wordlength != len) { previous = edge; } else if(seen[edge->to]) { if(previous) previous->next = edge->next; else graph[i].bruhat_lower = edge->next; } else { previous = edge; seen[edge->to] = 1; queue_put(&queue, edge->to); } edge = edge->next; } // see which nodes we can reach using only edges up to length len, mark them as seen while((current = queue_get(&queue)) != -1) { edge = graph[current].bruhat_lower; while(edge) { if(!seen[edge->to]) { seen[edge->to] = 1; queue_put(&queue, edge->to); } edge = edge->next; } } } } LOG("Revert Bruhat order.\n"); edgelist_count = 0; for(int i = 0; i < order; i++) { edge = graph[i].bruhat_lower; while(edge) { edgelists_higher[edgelist_count].to = i; edgelists_higher[edgelist_count].next = graph[edge->to].bruhat_higher; graph[edge->to].bruhat_higher = &edgelists_higher[edgelist_count]; edgelist_count++; edge = edge->next; } } LOG("Sort opposites.\n"); // additional sorting step to force opposite property (opposite of j is at n - j - 1) for(int i = 0; i < order; i++) reverse_ordering[i] = -1; for(int i = 0, j = 0; i < order; i++) { // i = old index, j = new index if(reverse_ordering[i] == -1) { reverse_ordering[i] = j; ordering[j] = i; reverse_ordering[graph[i].opposite] = order - j - 1; ordering[order - j - 1] = graph[i].opposite; j++; } } memcpy(graph_unsorted, graph, order*sizeof(node_t)); for(int i = 0; i < order; i++) { graph[i] = graph_unsorted[ordering[i]]; graph[i].opposite = reverse_ordering[graph[i].opposite]; for(int j = 0; j < rank; j++) { graph[i].left[j] = reverse_ordering[graph[i].left[j]]; graph[i].right[j] = reverse_ordering[graph[i].right[j]]; } for(edge = graph[i].bruhat_lower; edge; edge = edge->next) edge->to = reverse_ordering[edge->to]; for(edge = graph[i].bruhat_higher; edge; edge = edge->next) edge->to = reverse_ordering[edge->to]; } weyl_free(graph_data); free(graph_unsorted); free(ordering); free(reverse_ordering); free(seen); } static int edgelist_contains(edgelist_t *list, int x) { while(list) { if(list->to == x) return 1; list = list->next; } return 0; } static edgelist_t *edgelist_add(edgelist_t *list, int new, edgelist_t *storage, int *storage_index) { edgelist_t *new_link = &storage[*storage_index]; new_link->next = list; new_link->to = new; (*storage_index)++; return new_link; } int prepare_simplified_graph(semisimple_type_t type, unsigned long left, unsigned long right, node_t *simplified_graph) { node_t *full_graph; int edgelists_used; int rank, order, hyperplanes; int *reduced, *group, *simplified; int *seen; int current; edgelist_t *edge, *previous; queue_t queue; int ncosets; rank = weyl_rank(type); order = weyl_order(type); hyperplanes = weyl_hyperplanes(type); for(int i = 0; i < rank; i++) { int oppi = weyl_opposition(type, i); if(left & BIT(i) && !(left & BIT(oppi)) || left & BIT(oppi) && !(left & BIT(i))) return -1; } edgelist_t *edgelists_higher = &simplified_graph[0].bruhat_higher[0]; edgelist_t *edgelists_lower = &simplified_graph[0].bruhat_higher[order*hyperplanes/2]; // get full graph full_graph = graph_alloc(type); prepare_graph(type, full_graph); LOG("Full graph generated.\n"); // initialize stuff reduced = (int*)malloc(order*sizeof(int)); group = (int*)malloc(order*sizeof(int)); simplified = (int*)malloc(order*sizeof(int)); for(int i = 0; i < order; i++) { group[i] = -1; reduced[i] = i; } LOG("Group by double coset.\n"); // step 1: group for(int i = 0; i < order; i++) { if(group[i] != -1) continue; queue_init(&queue); queue_put(&queue, i); while((current = queue_get(&queue)) != -1) { if(group[current] != -1) continue; group[current] = i; for(int j = 0; j < rank; j++) { if(left & (1 << j)) queue_put(&queue, full_graph[current].left[j]); if(right & (1 << j)) queue_put(&queue, full_graph[current].right[j]); } } } LOG("Find minimal length elements.\n"); // step 2: find minimum for(int i = 0; i < order; i++) if(full_graph[i].wordlength < full_graph[reduced[group[i]]].wordlength) reduced[group[i]] = i; // step 3: assign minimum to all for(int i = 0; i < order; i++) reduced[i] = reduced[group[i]]; // step 4: assign indices to cosets ncosets = 0; for(int i = 0; i < order; i++) if(reduced[i] == i) simplified[i] = ncosets++; for(int i = 0; i < order; i++) simplified[i] = simplified[reduced[i]]; seen = (int*) malloc(ncosets*sizeof(int)); edgelists_used = 0; LOG("Copy minimal elements.\n"); // step 5: set up nodes from minima current = 0; for(int i = 0; i < order; i++) if(reduced[i] == i) { // is minimum memcpy(simplified_graph[simplified[i]].word, full_graph[i].word, full_graph[i].wordlength*sizeof(int)); simplified_graph[simplified[i]].wordlength = full_graph[i].wordlength; simplified_graph[simplified[i]].opposite = simplified[full_graph[i].opposite]; simplified_graph[simplified[i]].id = full_graph[i].id; simplified_graph[simplified[i]].bruhat_lower = (edgelist_t*)0; simplified_graph[simplified[i]].bruhat_higher = (edgelist_t*)0; for(int j = 0; j < rank; j++) { simplified_graph[simplified[i]].left[j] = simplified[full_graph[i].left[j]]; simplified_graph[simplified[i]].right[j] = simplified[full_graph[i].right[j]]; } } LOG("Find induced order.\n"); // step 6: find order relations for(int i = 0; i < order; i++) { edge = full_graph[i].bruhat_lower; while(edge) { int this = simplified[i]; int that = simplified[edge->to]; if(this != that) { // found something if(!edgelist_contains(simplified_graph[this].bruhat_lower, that)) simplified_graph[this].bruhat_lower = edgelist_add(simplified_graph[this].bruhat_lower, that, edgelists_lower, &edgelists_used); ERROR(simplified_graph[this].wordlength <= simplified_graph[that].wordlength, "The order assumption is being violated!\n"); } edge = edge->next; } } LOG("Perform transitive reduction.\n"); // step 7: remove redundant edges for(int i = 0; i < ncosets; i++) { memset(seen, 0, ncosets*sizeof(int)); queue_init(&queue); for(int len = 1; len <= simplified_graph[i].wordlength; len++) { edge = simplified_graph[i].bruhat_lower; previous = (edgelist_t*)0; while(edge) { // only look at edges of this length now if(simplified_graph[i].wordlength - simplified_graph[edge->to].wordlength != len) { // we only consider edges of length len in this pass previous = edge; } else if(seen[edge->to]) { // this edge is redundant, remove it if(previous) previous->next = edge->next; else simplified_graph[i].bruhat_lower = edge->next; } else { // this edge was not redundant, add to seen previous = edge; seen[edge->to] = 1; queue_put(&queue, edge->to); } edge = edge->next; } // calculate transitive closure of seen nodes while((current = queue_get(&queue)) != -1) { edge = simplified_graph[current].bruhat_lower; while(edge) { if(!seen[edge->to]) { seen[edge->to] = 1; queue_put(&queue, edge->to); } edge = edge->next; } } } } LOG("Revert order.\n"); // step 8: revert order edgelists_used = 0; for(int i = 0; i < ncosets; i++) { edge = simplified_graph[i].bruhat_lower; while(edge) { simplified_graph[edge->to].bruhat_higher = edgelist_add(simplified_graph[edge->to].bruhat_higher, i, edgelists_higher, &edgelists_used); edge = edge->next; } } LOG("Sort opposites.\n"); int *ordering = (int*)malloc(ncosets*sizeof(int)); int *reverse_ordering = (int*)malloc(ncosets*sizeof(int)); node_t *unsorted = (node_t*)malloc(ncosets*sizeof(node_t)); int opp, pos; pos = 0; for(int i = 0; i < ncosets; i++) { // first all the pairs opp = simplified_graph[i].opposite; if(opp > i) { // first occurrence of this pair ordering[pos] = i; ordering[ncosets-1-pos] = opp; reverse_ordering[i] = pos; reverse_ordering[opp] = ncosets-1-pos; pos++; } } for(int i = 0; i < ncosets; i++) // and finally the self-opposites if(simplified_graph[i].opposite == i) { ordering[pos] = i; reverse_ordering[i] = pos; pos++; } // now really do it memcpy(unsorted, simplified_graph, ncosets*sizeof(node_t)); for(int i = 0; i < ncosets; i++) { simplified_graph[i] = unsorted[ordering[i]]; simplified_graph[i].opposite = reverse_ordering[simplified_graph[i].opposite]; for(edgelist_t *edge = simplified_graph[i].bruhat_lower; edge != (edgelist_t*)0; edge = edge->next) edge->to = reverse_ordering[edge->to]; for(edgelist_t *edge = simplified_graph[i].bruhat_higher; edge != (edgelist_t*)0; edge = edge->next) edge->to = reverse_ordering[edge->to]; for(int j = 0; j < rank; j++) { simplified_graph[i].left[j] = reverse_ordering[simplified_graph[i].left[j]]; simplified_graph[i].right[j] = reverse_ordering[simplified_graph[i].right[j]]; } } free(ordering); free(reverse_ordering); free(unsorted); free(seen); free(reduced); free(group); free(simplified); graph_free(type, full_graph); LOG("Simplified graph generated.\n"); return ncosets; } node_t *graph_alloc(semisimple_type_t type) { int rank = weyl_rank(type); int order = weyl_order(type); int hyperplanes = weyl_hyperplanes(type); node_t *graph = (node_t*)malloc(order*sizeof(node_t)); int *left = (int*)malloc(order*rank*sizeof(int)); int *right = (int*)malloc(order*rank*sizeof(int)); edgelist_t *edgelists = (edgelist_t*)malloc(order*hyperplanes*sizeof(edgelist_t)); int *words = (int*)malloc(order*hyperplanes*sizeof(int)); for(int i = 0; i < order; i++) { graph[i].left = &left[rank*i]; graph[i].right = &right[rank*i]; graph[i].word = &words[hyperplanes*i]; } graph[0].bruhat_higher = edgelists; return graph; } void graph_free(semisimple_type_t type, node_t *graph) { free(graph[0].left); free(graph[0].right); free(graph[0].word); int order = weyl_order(type); // find the head of all edgelists by just taking the one having the lowest address edgelist_t *edgelists = graph[0].bruhat_lower; for(int i = 0; i < order; i++) { if(graph[i].bruhat_lower < edgelists && graph[i].bruhat_lower != 0) edgelists = graph[i].bruhat_lower; if(graph[i].bruhat_higher < edgelists && graph[i].bruhat_higher != 0) edgelists = graph[i].bruhat_higher; } free(edgelists); } /*********************************** THE ACTUAL ENUMERATION ****************************************/ typedef struct { int size; // the size of the weyl group. We store however only the first size/2 elements bitvec_t *principal_pos; bitvec_t *principal_neg; int *principal_is_slim; void (*callback)(const bitvec_t *, int, void*); void *callback_data; } enumeration_info_t; /* This function enumerates all balanced ideals satisfying certain constraints, given by its arguments pos, neg and next_neg - info: constant information which just gets passed on to recursive calls, mainly contains the principal ideals - pos: a set of elements which have to be positive (that is, in the ideal) - neg: a set of elements which have to be negative (not in the ideal) - next_neg: this element has to be the first negative one not already contained in neg; if next_neg is info.size/2, then everything not in neg has to be positive - already_known: not a constraint, but just a hint to speed things up; tells the function that the first already_known elements are set either in neg or in pos; must be less or equal to next_neg - returns number of balanced ideals found uses the bitvector functions bv_union, bv_copy, bv_set_range_except, bv_disjoint, bv_next_zero */ static long enumerate_tree(const enumeration_info_t *info, const bitvec_t *pos, const bitvec_t *neg, int next_neg, int already_known, int level) { static long totcount = 0; bitvec_t newpos, newneg, known; int next_next_neg; long count = 0; // the omission of next_neg means inclusion of info->size - 1 - next_neg // add its principal ideal to pos and the opposite to neg if(next_neg != info->size/2) { // if the principal ideal we want to add is not slim by itself, we don't even have to try; but there is not really a performance benefit, it rather makes it slower // if(!info->principal_is_slim[info->size - 1 - next_neg]) // return 0; bv_union(&info->principal_pos[info->size - 1 - next_neg], pos, &newpos); bv_union(&info->principal_neg[info->size - 1 - next_neg], neg, &newneg); } else { // or, if there is no next_neg, just copy bv_copy(pos, &newpos); bv_copy(neg, &newneg); } // everything before next_neg which was unknown should be set to positive; to speed this up, we can start with already_known bv_set_range_except(&newpos, neg, already_known, next_neg); #ifdef _DEBUG bv_print_nice(stderr, &newpos, &newneg, -1, info->size/2); fprintf(stderr, "\n"); #endif // check if this leads to any conflicts (equivalently, violates slimness) if(!bv_disjoint(&newpos, &newneg)) return 0; // what do we know so far? bv_union(&newpos, &newneg, &known); next_next_neg = bv_next_zero(&known, next_neg + 1); if(next_next_neg >= info->size/2) { // there is no unknown left, so we found a balanced ideal if(info->callback) info->callback(&newpos, info->size, info->callback_data); return 1; } do { count += enumerate_tree(info, &newpos, &newneg, next_next_neg, next_neg + 1, level + 1); next_next_neg = bv_next_zero(&known, next_next_neg + 1); } while(next_next_neg <= info->size/2); // multiprocessing stuff // if(level == 3) // fprintf(stderr, "%d\n", count); return count; } void generate_principal_ideals(node_t *graph, int size, bitvec_t *pos, bitvec_t *neg, int *is_slim) { queue_t queue; int current; edgelist_t *edge; // generate principal ideals int *principal = (int*)malloc(size*sizeof(int)); for(int i = 0; i < size; i++) { memset(principal, 0, size*sizeof(int)); principal[i] = 1; queue_init(&queue); queue_put(&queue, i); while((current = queue_get(&queue)) != -1) for(edge = graph[current].bruhat_lower; edge; edge = edge->next) if(!principal[edge->to]) { principal[edge->to] = 1; queue_put(&queue, edge->to); } // copy the first half into bitvectors bv_clear(&pos[i]); bv_clear(&neg[i]); is_slim[i] = 1; for(int j = 0; j < size/2; j++) if(principal[j]) bv_set_bit(&pos[i], j); for(int j = 0; j < size/2; j++) if(principal[size - 1 - j]) { bv_set_bit(&neg[i], j); if(bv_get_bit(&pos[i], j)) is_slim[i] = 0; } #ifdef _DEBUG if(is_slim[i]) { fprintf(stderr, " ids: [0"); for(int j = 1; j < size; j++) if(principal[j]) fprintf(stderr, ", %d", graph[j].id); fprintf(stderr, "]\n"); } #endif } free(principal); // output principal ideals #ifdef _DEBUG for(int i = 0; i < size; i++) { fprintf(stderr, "%2d: ", i); bv_print_nice(stderr, &pos[i], &neg[i], -1, size/2); fprintf(stderr, "\n"); } fprintf(stderr,"\n"); #endif } /* enumerates all balanced ideals - graph: hasse diagram of the bruhat order (of double cosets) with opposition pairing - size: number of nodes in graph - callback to call when a balanced ideal was found - arbitrary data for callback function returns the number of balanced ideals */ long enumerate_balanced_thickenings(node_t *graph, int size, void (*callback) (const bitvec_t *, int, void*), void *callback_data) { long count = 0; enumeration_info_t info; info.size = size; info.callback = callback; info.callback_data = callback_data; info.principal_pos = (bitvec_t*)malloc(info.size*sizeof(bitvec_t)); info.principal_neg = (bitvec_t*)malloc(info.size*sizeof(bitvec_t)); info.principal_is_slim = (int*)malloc(info.size*sizeof(int)); // the algorithm only works if the opposition pairing does not stabilize any element // if this happens, there can be no balanced thickenings for(int i = 0; i < info.size; i++) if(graph[i].opposite == i) return 0; // we can only handle bitvectors up to 64*BV_QWORD_RANK bits, but we only store half of the weyl group if(info.size > 128*BV_QWORD_RANK) return -1; generate_principal_ideals(graph, size, info.principal_pos, info.principal_neg, info.principal_is_slim); // enumerate balanced ideals bitvec_t pos, neg; bv_clear(&pos); bv_clear(&neg); for(int i = 0; i <= info.size/2; i++) count += enumerate_tree(&info, &pos, &neg, i, 0, 0); free(info.principal_is_slim); free(info.principal_pos); free(info.principal_neg); return count; }