diff --git a/allot_lookuptbl.go b/allot_lookuptbl.go index e1d218c..b795f41 100644 --- a/allot_lookuptbl.go +++ b/allot_lookuptbl.go @@ -3,11 +3,19 @@ package bart +// IdxToAllotment returns the precalculated bitset for idx. +// Only used for fast bitset intersections instead of +// range loops in table overlaps methods. +// +// Please read the ART paper ./doc/artlookup.pdf +// to understand the allotment algorithm. +func idxToAllot(idx uint) [8]uint64 { + return allotLookupTbl[idx] +} + // allotLookupTbl, as precalculated bitsets, // map the baseIndex to bitset with precomputed complete binary tree. // -// Used for bitset intersections instead of range loops in overlaps tests. -// // // 1 <= idx <= 511 // func allotRec(aTbl *bitset.BitSet, idx uint) { // aTbl = aTbl.Set(idx) diff --git a/overlaps.go b/overlaps.go index c0aa949..7d52a65 100644 --- a/overlaps.go +++ b/overlaps.go @@ -177,7 +177,7 @@ func (n *node[V]) overlapsChildrenIn(o *node[V]) bool { for _, idx := range allIndices { // get pre alloted bitset for idx - a8 := allotLookupTbl[idx] + a8 := idxToAllot(idx) prefixRoutes.InPlaceUnion(bitset.BitSet(a8[:])) } @@ -241,7 +241,7 @@ func (n *node[V]) overlapsOneRouteIn(o *node[V]) bool { // use bitset intersection with alloted stride table instead of range loops // copy pre alloted bitset for idx - a8 := allotLookupTbl[idx] + a8 := idxToAllot(idx) allotedPrefixRoutes := bitset.BitSet(a8[:]) // use bitset intersection instead of range loops @@ -260,7 +260,7 @@ func (n *node[V]) overlapsPrefix(octet byte, pfxLen int) bool { // use bitset intersection with alloted stride table instead of range loops // copy pre alloted bitset for idx - a8 := allotLookupTbl[idx] + a8 := idxToAllot(idx) allotedPrefixRoutes := bitset.BitSet(a8[:]) // use bitset intersection instead of range loops