Cleanup in bitvec.h and bugfix in principal ideal generation

This commit is contained in:
Florian Stecker
2016-11-18 20:39:19 +01:00
parent ed63dc2b82
commit 882695c15e
3 changed files with 106 additions and 145 deletions

View File

@@ -567,21 +567,15 @@ void graph_free(semisimple_type_t type, node_t *graph)
/*********************************** THE ACTUAL ENUMERATION ****************************************/
typedef struct {
int rank;
int order;
int size; // the size of the graph; this can vary from the order if we take quotients beforehand
const node_t *graph;
int printstep;
const char *alphabet;
int size; // the size of the weyl group. We store however only the first size/2 elements
FILE *outfile;
bitvec_t *principal_pos;
bitvec_t *principal_neg;
int *principal_is_slim;
} enumeration_info_t;
/*
ok this is screwed up, let's start over:
pos and neg are bitvectors of size info.size/2
they stand for the first (shortest) info.size/2 elements of the weyl group
the least siginficant bit is the identity
@@ -595,8 +589,11 @@ typedef struct {
// returns number of found balanced ideals
// next_neg can be info.size/2; in that case, everything between known_until and info.size/2 is required to be in the ideal, but it does not mean that next_neg is really not contained in the ideal
// next_neg must be strictly greater than known_until, and less or equal to info.size/2
// we use bv_union, bv_copy, bv_set_range_except, bv_disjoint, bv_next_zero
static long enumerate_tree(const enumeration_info_t *info, const bitvec_t *pos, const bitvec_t *neg, int first_unknown, int next_neg)
{
static long totcount = 0;
bitvec_t newpos, newneg, known;
int next_next_neg;
long count = 0;
@@ -604,6 +601,8 @@ static long enumerate_tree(const enumeration_info_t *info, const bitvec_t *pos,
// the omission of next_neg means inclusion of info->size - 1 - next_neg
// add its principal ideal to pos and the opposite to neg
if(next_neg != info->size/2) {
// if(!info->principal_is_slim[info->size - 1 - next_neg]) // if the principal ideal we want to add is not slim by itself, we don't even have to try; but there is not really a performance benefit
// return 0;
bv_union(&info->principal_pos[info->size - 1 - next_neg], pos, &newpos);
bv_union(&info->principal_neg[info->size - 1 - next_neg], neg, &newneg);
} else { // or, if there is no next_neg, just copy
@@ -611,8 +610,8 @@ static long enumerate_tree(const enumeration_info_t *info, const bitvec_t *pos,
bv_copy(neg, &newneg);
}
// add the range from first_unknown to next_neg to newpos
bv_set_range(&newpos, first_unknown, next_neg); // including the start, excluding end
// everything before next_neg which was unknown should be set to positive; to speed this up, we can start with first_unknown
bv_set_range_except(&newpos, neg, first_unknown, next_neg);
// check if this leads to any conflicts (equivalently, violates slimness)
if(!bv_disjoint(&newpos, &newneg))
@@ -621,30 +620,23 @@ static long enumerate_tree(const enumeration_info_t *info, const bitvec_t *pos,
// what do we know so far?
bv_union(&newpos, &newneg, &known);
// do we know everything already? we have a balanced ideal then
if(bv_full(&known, info->size/2)) {
next_next_neg = bv_next_zero(&known, next_neg + 1);
fprintf(stderr, "Found balanced ideal: ");
bv_print(stderr, &newpos, info->size/2);
fprintf(stderr, " ");
bv_print(stderr, &newneg, info->size/2);
fprintf(stderr, "\n");
if(next_next_neg >= info->size/2) {
if((++totcount) % 100000000 == 0) {
fprintf(stderr, "Found balanced ideal: ");
bv_print(stderr, &newpos, info->size/2);
fprintf(stderr, "\n");
}
return 1;
}
next_next_neg = next_neg;
while(next_next_neg < info->size/2) {
int tmp = bv_next_zero(&known, next_next_neg + 1); // this could return info->size/2, but that's fine for enumerate_tree
if(tmp <= next_next_neg) {
fprintf(stderr, "%d <= %d\n", tmp, next_next_neg);
bv_print(stderr, &known, info->size/2);
fprintf(stderr, "\n");
exit(-1);
}
next_next_neg = tmp;
do {
count += enumerate_tree(info, &newpos, &newneg, next_neg + 1, next_next_neg);
}
next_next_neg = bv_next_zero(&known, next_next_neg + 1);
} while(next_next_neg <= info->size/2);
return count;
}
@@ -658,16 +650,15 @@ long enumerate_balanced_thickenings(semisimple_type_t type, node_t *graph, int s
int current;
edgelist_t *edge;
info.rank = coxeter_rank(type);
info.order = coxeter_order(type);
info.size = size;
info.graph = graph;
info.alphabet = (char*)alphabet;
info.outfile = outfile;
info.principal_pos = (bitvec_t*)malloc(info.size*sizeof(bitvec_t));
info.principal_neg = (bitvec_t*)malloc(info.size*sizeof(bitvec_t));
info.principal_is_slim = (int*)malloc(info.size*sizeof(int));
info.printstep = 0;
if(getenv("PRINTSTEP"))
info.printstep = atoi(getenv("PRINTSTEP"));
// info.printstep = 0;
// if(getenv("PRINTSTEP"))
// info.printstep = atoi(getenv("PRINTSTEP"));
// the algorithm only works if the opposition pairing does not stabilize any element
// if this happens, there can be no balanced thickenings
@@ -675,39 +666,39 @@ long enumerate_balanced_thickenings(semisimple_type_t type, node_t *graph, int s
if(graph[i].opposite == i)
return 0;
// we can only handle bitvectors up to 64*BV_QWORD_RANK bits
// we can only handle bitvectors up to 64*BV_QWORD_RANK bits, but we only store half of the weyl group
if(info.size > 128*BV_QWORD_RANK)
return -1;
// generate principal ideals, needed bitvec operations: bv_clear, bv_set_bit, bv_get_bit
bitvec_t *principal_pos = (bitvec_t*)malloc(info.size*sizeof(bitvec_t));
bitvec_t *principal_neg = (bitvec_t*)malloc(info.size*sizeof(bitvec_t));
// generate principal ideals
int *principal = (int*)malloc(info.size*sizeof(int));
for(int i = 0; i < info.size; i++) {
bv_clear(&principal_pos[i]);
bv_clear(&principal_neg[i]);
bv_set_bit(&principal_pos[i], i);
bv_set_bit(&principal_neg[i], info.size - 1 - i);
memset(principal, 0, info.size*sizeof(int));
principal[i] = 1;
queue_init(&queue);
queue_put(&queue, i);
while((current = queue_get(&queue)) != -1) {
while((current = queue_get(&queue)) != -1)
for(edge = graph[current].bruhat_lower; edge; edge = edge->next)
if(!bv_get_bit(&principal_pos[i], edge->to)) {
bv_set_bit(&principal_pos[i], edge->to);
bv_set_bit(&principal_neg[i], info.size - 1 - edge->to);
if(!principal[edge->to]) {
principal[edge->to] = 1;
queue_put(&queue, edge->to);
}
}
// copy the first half into bitvectors
bv_clear(&info.principal_pos[i]);
bv_clear(&info.principal_neg[i]);
info.principal_is_slim[i] = 1;
for(int j = 0; j < info.size/2; j++)
if(principal[j])
bv_set_bit(&info.principal_pos[i], j);
for(int j = 0; j < info.size/2; j++)
if(principal[info.size - 1 - j]) {
bv_set_bit(&info.principal_neg[i], j);
if(bv_get_bit(&info.principal_pos[i], j))
info.principal_is_slim[i] = 0;
}
}
// truncate them, as we only need the first info.size/2 elements
for(int i = 0; i < info.size; i++)
for(int j = info.size/2; j < info.size; j++) {
bv_clear_bit(&principal_pos[i], j);
bv_clear_bit(&principal_neg[i], j);
}
info.principal_pos = principal_pos;
info.principal_neg = principal_neg;
free(principal);
// enumerate balanced ideals
bitvec_t pos, neg;
@@ -716,8 +707,9 @@ long enumerate_balanced_thickenings(semisimple_type_t type, node_t *graph, int s
for(int i = 0; i <= info.size/2; i++)
count += enumerate_tree(&info, &pos, &neg, 0, i);
free(principal_pos);
free(principal_neg);
free(info.principal_is_slim);
free(info.principal_pos);
free(info.principal_neg);
return count;
}