--- /dev/null
+// Generic radix tree implementation. It's only kernel-specific because
+// it uses the kernel page allocator; each node takes up one page. The
+// depth of the tree is dynamically growable (but not currently
+// shrinkable).
+
+#ifndef _KERN_RADIX_H
+#define _KERN_RADIX_H
+
+#include <kern/types.h>
+#include <kern/pagealloc.h>
+#include <kern/compiler.h>
+
+namespace Util {
+ // Key must be an integer.
+ template <typename T, typename Key>
+ class RadixTree {
+ void *toplevel;
+ uint depth; // in bits
+
+ enum {
+ // Yuck... C++ doesn't allow function results to be used
+ // in constants, even if the function can be evaluated
+ // at compile time.
+
+ #define RADIX_data_bits (ll_get_order_round_up(sizeof(T)))
+ #define RADIX_node_size (1 << RADIX_data_bits)
+
+ #define RADIX_final_shift (Arch::page_shift - RADIX_data_bits)
+ #define RADIX_entries_per_table (Arch::page_size / RADIX_node_size)
+
+ dir_shift = Arch::page_shift - _LL_LONG_LOGBYTES,
+ entries_per_dir = Arch::page_size / sizeof(void *)
+ };
+
+ static uint key_to_dir_offset(Key key, int shift)
+ {
+ return (key >> shift) & (entries_per_dir - 1);
+ }
+
+ static uint key_to_offset(Key key)
+ {
+ return key & (RADIX_entries_per_table - 1);
+ }
+
+ public:
+ T *lookup(Key key, bool add = false)
+ {
+ int shift = depth;
+ void *node = toplevel;
+ int new_shift = depth == 0 ? RADIX_final_shift : dir_shift;
+
+ while (unlikely(key_to_dir_offset(key, shift + new_shift) != 0)) {
+ if (!add)
+ return NULL;
+
+ toplevel = Mem::alloc_pages(1);
+ bzero(toplevel, Arch::page_size);
+
+ static_cast<void **>(toplevel)[0] = node;
+ node = toplevel;
+
+ shift += new_shift;
+ depth += new_shift;
+ new_shift = dir_shift;
+ }
+
+ while (shift >= RADIX_final_shift) {
+ int off = key_to_dir_offset(key, shift);
+ void *new_node = static_cast<void **>(node)[off];
+
+ if (!new_node) {
+ if (!add)
+ return NULL;
+
+ new_node = Mem::alloc_pages(1);
+ bzero(new_node, Arch::page_size);
+ static_cast<void **>(node)[off] = new_node;
+ }
+
+ shift -= dir_shift;
+ node = new_node;
+ }
+
+ assert(shift == RADIX_final_shift - dir_shift);
+ return static_cast<T *>(node) + key_to_offset(key);
+ }
+
+ RadixTree()
+ {
+ toplevel = Mem::alloc_pages(1);
+ bzero(toplevel, Arch::page_size);
+ depth = 0;
+ }
+ };
+}
+
+#undef RADIX_data_bits
+#undef RADIX_node_size
+#undef RADIX_final_shift
+#undef RADIX_entries_per_table
+
+#endif