1 // Generic radix tree implementation. It's only kernel-specific because
2 // it uses the kernel page allocator; each node takes up one page. The
3 // depth of the tree is dynamically growable (but not currently
9 #include <kern/types.h>
10 #include <kern/pagealloc.h>
11 #include <kern/compiler.h>
14 // Key must be an integer.
15 template <typename T, typename Key>
18 uint depth; // in bits
21 // Yuck... C++ doesn't allow function results to be used
22 // in constants, even if the function can be evaluated
25 #define RADIX_data_bits (ll_get_order_round_up(sizeof(T)))
26 #define RADIX_node_size (1 << RADIX_data_bits)
28 #define RADIX_final_shift (Arch::page_shift - RADIX_data_bits)
29 #define RADIX_entries_per_table (Arch::page_size / RADIX_node_size)
31 dir_shift = Arch::page_shift - _LL_LONG_LOGBYTES,
32 entries_per_dir = Arch::page_size / sizeof(void *)
35 static uint key_to_dir_offset(Key key, int shift)
37 return (key >> shift) & (entries_per_dir - 1);
40 static uint key_to_offset(Key key)
42 return key & (RADIX_entries_per_table - 1);
46 T *lookup(Key key, bool add = false)
49 void *node = toplevel;
50 int new_shift = depth == 0 ? RADIX_final_shift : dir_shift;
52 while (unlikely(key_to_dir_offset(key, shift + new_shift) != 0)) {
56 toplevel = Mem::alloc_pages(1);
57 bzero(toplevel, Arch::page_size);
59 static_cast<void **>(toplevel)[0] = node;
64 new_shift = dir_shift;
67 while (shift >= RADIX_final_shift) {
68 int off = key_to_dir_offset(key, shift);
69 void *new_node = static_cast<void **>(node)[off];
75 new_node = Mem::alloc_pages(1);
76 bzero(new_node, Arch::page_size);
77 static_cast<void **>(node)[off] = new_node;
84 assert(shift == RADIX_final_shift - dir_shift);
85 return static_cast<T *>(node) + key_to_offset(key);
90 toplevel = Mem::alloc_pages(1);
91 bzero(toplevel, Arch::page_size);
97 #undef RADIX_data_bits
98 #undef RADIX_node_size
99 #undef RADIX_final_shift
100 #undef RADIX_entries_per_table