PageRenderTime 22ms CodeModel.GetById 27ms RepoModel.GetById 0ms app.codeStats 1ms

/deps/klib/kbtree.h

https://gitlab.com/Blueprint-Marketing/h2o
C Header | 384 lines | 335 code | 23 blank | 26 comment | 109 complexity | ca0f36c0eb7454e47844e97df1dac1d8 MD5 | raw file
  1. /*-
  2. * Copyright 1997-1999, 2001, John-Mark Gurney.
  3. * 2008-2009, Attractive Chaos <attractor@live.co.uk>
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. *
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
  16. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  17. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  18. * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
  19. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  20. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  21. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  22. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  23. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  24. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  25. * SUCH DAMAGE.
  26. */
  27. #ifndef __AC_KBTREE_H
  28. #define __AC_KBTREE_H
  29. #include <stdlib.h>
  30. #include <string.h>
  31. #include <stdint.h>
  32. typedef struct {
  33. int32_t is_internal:1, n:31;
  34. } kbnode_t;
  35. #define __KB_KEY(type, x) ((type*)((char*)x + 4))
  36. #define __KB_PTR(btr, x) ((kbnode_t**)((char*)x + btr->off_ptr))
  37. #define __KB_TREE_T(name) \
  38. typedef struct { \
  39. kbnode_t *root; \
  40. int off_key, off_ptr, ilen, elen; \
  41. int n, t; \
  42. int n_keys, n_nodes; \
  43. } kbtree_##name##_t;
  44. #define __KB_INIT(name, key_t) \
  45. kbtree_##name##_t *kb_init_##name(int size) \
  46. { \
  47. kbtree_##name##_t *b; \
  48. b = (kbtree_##name##_t*)calloc(1, sizeof(kbtree_##name##_t)); \
  49. b->t = ((size - 4 - sizeof(void*)) / (sizeof(void*) + sizeof(key_t)) + 1) >> 1; \
  50. if (b->t < 2) { \
  51. free(b); return 0; \
  52. } \
  53. b->n = 2 * b->t - 1; \
  54. b->off_ptr = 4 + b->n * sizeof(key_t); \
  55. b->ilen = (4 + sizeof(void*) + b->n * (sizeof(void*) + sizeof(key_t)) + 3) >> 2 << 2; \
  56. b->elen = (b->off_ptr + 3) >> 2 << 2; \
  57. b->root = (kbnode_t*)calloc(1, b->ilen); \
  58. ++b->n_nodes; \
  59. return b; \
  60. }
  61. #define __kb_destroy(b) do { \
  62. int i, max = 8; \
  63. kbnode_t *x, **top, **stack = 0; \
  64. if (b) { \
  65. top = stack = (kbnode_t**)calloc(max, sizeof(kbnode_t*)); \
  66. *top++ = (b)->root; \
  67. while (top != stack) { \
  68. x = *--top; \
  69. if (x->is_internal == 0) { free(x); continue; } \
  70. for (i = 0; i <= x->n; ++i) \
  71. if (__KB_PTR(b, x)[i]) { \
  72. if (top - stack == max) { \
  73. max <<= 1; \
  74. stack = (kbnode_t**)realloc(stack, max * sizeof(kbnode_t*)); \
  75. top = stack + (max>>1); \
  76. } \
  77. *top++ = __KB_PTR(b, x)[i]; \
  78. } \
  79. free(x); \
  80. } \
  81. } \
  82. free(b); free(stack); \
  83. } while (0)
  84. #define __kb_get_first(key_t, b, ret) do { \
  85. kbnode_t *__x = (b)->root; \
  86. while (__KB_PTR(b, __x)[0] != 0) \
  87. __x = __KB_PTR(b, __x)[0]; \
  88. (ret) = __KB_KEY(key_t, __x)[0]; \
  89. } while (0)
  90. #define __KB_GET_AUX0(name, key_t, __cmp) \
  91. static inline int __kb_get_aux_##name(const kbnode_t * __restrict x, const key_t * __restrict k, int *r) \
  92. { \
  93. int tr, *rr, begin, end, n = x->n >> 1; \
  94. if (x->n == 0) return -1; \
  95. if (__cmp(*k, __KB_KEY(key_t, x)[n]) < 0) { \
  96. begin = 0; end = n; \
  97. } else { begin = n; end = x->n - 1; } \
  98. rr = r? r : &tr; \
  99. n = end; \
  100. while (n >= begin && (*rr = __cmp(*k, __KB_KEY(key_t, x)[n])) < 0) --n; \
  101. return n; \
  102. }
  103. #define __KB_GET_AUX1(name, key_t, __cmp) \
  104. static inline int __kb_getp_aux_##name(const kbnode_t * __restrict x, const key_t * __restrict k, int *r) \
  105. { \
  106. int tr, *rr, begin = 0, end = x->n; \
  107. if (x->n == 0) return -1; \
  108. rr = r? r : &tr; \
  109. while (begin < end) { \
  110. int mid = (begin + end) >> 1; \
  111. if (__cmp(__KB_KEY(key_t, x)[mid], *k) < 0) begin = mid + 1; \
  112. else end = mid; \
  113. } \
  114. if (begin == x->n) { *rr = 1; return x->n - 1; } \
  115. if ((*rr = __cmp(*k, __KB_KEY(key_t, x)[begin])) < 0) --begin; \
  116. return begin; \
  117. }
  118. #define __KB_GET(name, key_t) \
  119. static key_t *kb_getp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \
  120. { \
  121. int i, r = 0; \
  122. kbnode_t *x = b->root; \
  123. while (x) { \
  124. i = __kb_getp_aux_##name(x, k, &r); \
  125. if (i >= 0 && r == 0) return &__KB_KEY(key_t, x)[i]; \
  126. if (x->is_internal == 0) return 0; \
  127. x = __KB_PTR(b, x)[i + 1]; \
  128. } \
  129. return 0; \
  130. } \
  131. static inline key_t *kb_get_##name(kbtree_##name##_t *b, const key_t k) \
  132. { \
  133. return kb_getp_##name(b, &k); \
  134. }
  135. #define __KB_INTERVAL(name, key_t) \
  136. static void kb_intervalp_##name(kbtree_##name##_t *b, const key_t * __restrict k, key_t **lower, key_t **upper) \
  137. { \
  138. int i, r = 0; \
  139. kbnode_t *x = b->root; \
  140. *lower = *upper = 0; \
  141. while (x) { \
  142. i = __kb_getp_aux_##name(x, k, &r); \
  143. if (i >= 0 && r == 0) { \
  144. *lower = *upper = &__KB_KEY(key_t, x)[i]; \
  145. return; \
  146. } \
  147. if (i >= 0) *lower = &__KB_KEY(key_t, x)[i]; \
  148. if (i < x->n - 1) *upper = &__KB_KEY(key_t, x)[i + 1]; \
  149. if (x->is_internal == 0) return; \
  150. x = __KB_PTR(b, x)[i + 1]; \
  151. } \
  152. } \
  153. static inline void kb_interval_##name(kbtree_##name##_t *b, const key_t k, key_t **lower, key_t **upper) \
  154. { \
  155. kb_intervalp_##name(b, &k, lower, upper); \
  156. }
  157. #define __KB_PUT(name, key_t, __cmp) \
  158. /* x must be an internal node */ \
  159. static void __kb_split_##name(kbtree_##name##_t *b, kbnode_t *x, int i, kbnode_t *y) \
  160. { \
  161. kbnode_t *z; \
  162. z = (kbnode_t*)calloc(1, y->is_internal? b->ilen : b->elen); \
  163. ++b->n_nodes; \
  164. z->is_internal = y->is_internal; \
  165. z->n = b->t - 1; \
  166. memcpy(__KB_KEY(key_t, z), __KB_KEY(key_t, y) + b->t, sizeof(key_t) * (b->t - 1)); \
  167. if (y->is_internal) memcpy(__KB_PTR(b, z), __KB_PTR(b, y) + b->t, sizeof(void*) * b->t); \
  168. y->n = b->t - 1; \
  169. memmove(__KB_PTR(b, x) + i + 2, __KB_PTR(b, x) + i + 1, sizeof(void*) * (x->n - i)); \
  170. __KB_PTR(b, x)[i + 1] = z; \
  171. memmove(__KB_KEY(key_t, x) + i + 1, __KB_KEY(key_t, x) + i, sizeof(key_t) * (x->n - i)); \
  172. __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[b->t - 1]; \
  173. ++x->n; \
  174. } \
  175. static void __kb_putp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, const key_t * __restrict k) \
  176. { \
  177. int i = x->n - 1; \
  178. if (x->is_internal == 0) { \
  179. i = __kb_getp_aux_##name(x, k, 0); \
  180. if (i != x->n - 1) \
  181. memmove(__KB_KEY(key_t, x) + i + 2, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
  182. __KB_KEY(key_t, x)[i + 1] = *k; \
  183. ++x->n; \
  184. } else { \
  185. i = __kb_getp_aux_##name(x, k, 0) + 1; \
  186. if (__KB_PTR(b, x)[i]->n == 2 * b->t - 1) { \
  187. __kb_split_##name(b, x, i, __KB_PTR(b, x)[i]); \
  188. if (__cmp(*k, __KB_KEY(key_t, x)[i]) > 0) ++i; \
  189. } \
  190. __kb_putp_aux_##name(b, __KB_PTR(b, x)[i], k); \
  191. } \
  192. } \
  193. static void kb_putp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \
  194. { \
  195. kbnode_t *r, *s; \
  196. ++b->n_keys; \
  197. r = b->root; \
  198. if (r->n == 2 * b->t - 1) { \
  199. ++b->n_nodes; \
  200. s = (kbnode_t*)calloc(1, b->ilen); \
  201. b->root = s; s->is_internal = 1; s->n = 0; \
  202. __KB_PTR(b, s)[0] = r; \
  203. __kb_split_##name(b, s, 0, r); \
  204. r = s; \
  205. } \
  206. __kb_putp_aux_##name(b, r, k); \
  207. } \
  208. static inline void kb_put_##name(kbtree_##name##_t *b, const key_t k) \
  209. { \
  210. kb_putp_##name(b, &k); \
  211. }
  212. #define __KB_DEL(name, key_t) \
  213. static key_t __kb_delp_aux_##name(kbtree_##name##_t *b, kbnode_t *x, const key_t * __restrict k, int s) \
  214. { \
  215. int yn, zn, i, r = 0; \
  216. kbnode_t *xp, *y, *z; \
  217. key_t kp; \
  218. if (x == 0) return *k; \
  219. if (s) { /* s can only be 0, 1 or 2 */ \
  220. r = x->is_internal == 0? 0 : s == 1? 1 : -1; \
  221. i = s == 1? x->n - 1 : -1; \
  222. } else i = __kb_getp_aux_##name(x, k, &r); \
  223. if (x->is_internal == 0) { \
  224. if (s == 2) ++i; \
  225. kp = __KB_KEY(key_t, x)[i]; \
  226. memmove(__KB_KEY(key_t, x) + i, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
  227. --x->n; \
  228. return kp; \
  229. } \
  230. if (r == 0) { \
  231. if ((yn = __KB_PTR(b, x)[i]->n) >= b->t) { \
  232. xp = __KB_PTR(b, x)[i]; \
  233. kp = __KB_KEY(key_t, x)[i]; \
  234. __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 1); \
  235. return kp; \
  236. } else if ((zn = __KB_PTR(b, x)[i + 1]->n) >= b->t) { \
  237. xp = __KB_PTR(b, x)[i + 1]; \
  238. kp = __KB_KEY(key_t, x)[i]; \
  239. __KB_KEY(key_t, x)[i] = __kb_delp_aux_##name(b, xp, 0, 2); \
  240. return kp; \
  241. } else if (yn == b->t - 1 && zn == b->t - 1) { \
  242. y = __KB_PTR(b, x)[i]; z = __KB_PTR(b, x)[i + 1]; \
  243. __KB_KEY(key_t, y)[y->n++] = *k; \
  244. memmove(__KB_KEY(key_t, y) + y->n, __KB_KEY(key_t, z), z->n * sizeof(key_t)); \
  245. if (y->is_internal) memmove(__KB_PTR(b, y) + y->n, __KB_PTR(b, z), (z->n + 1) * sizeof(void*)); \
  246. y->n += z->n; \
  247. memmove(__KB_KEY(key_t, x) + i, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
  248. memmove(__KB_PTR(b, x) + i + 1, __KB_PTR(b, x) + i + 2, (x->n - i - 1) * sizeof(void*)); \
  249. --x->n; \
  250. free(z); \
  251. return __kb_delp_aux_##name(b, y, k, s); \
  252. } \
  253. } \
  254. ++i; \
  255. if ((xp = __KB_PTR(b, x)[i])->n == b->t - 1) { \
  256. if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n >= b->t) { \
  257. memmove(__KB_KEY(key_t, xp) + 1, __KB_KEY(key_t, xp), xp->n * sizeof(key_t)); \
  258. if (xp->is_internal) memmove(__KB_PTR(b, xp) + 1, __KB_PTR(b, xp), (xp->n + 1) * sizeof(void*)); \
  259. __KB_KEY(key_t, xp)[0] = __KB_KEY(key_t, x)[i - 1]; \
  260. __KB_KEY(key_t, x)[i - 1] = __KB_KEY(key_t, y)[y->n - 1]; \
  261. if (xp->is_internal) __KB_PTR(b, xp)[0] = __KB_PTR(b, y)[y->n]; \
  262. --y->n; ++xp->n; \
  263. } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n >= b->t) { \
  264. __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
  265. __KB_KEY(key_t, x)[i] = __KB_KEY(key_t, y)[0]; \
  266. if (xp->is_internal) __KB_PTR(b, xp)[xp->n] = __KB_PTR(b, y)[0]; \
  267. --y->n; \
  268. memmove(__KB_KEY(key_t, y), __KB_KEY(key_t, y) + 1, y->n * sizeof(key_t)); \
  269. if (y->is_internal) memmove(__KB_PTR(b, y), __KB_PTR(b, y) + 1, (y->n + 1) * sizeof(void*)); \
  270. } else if (i > 0 && (y = __KB_PTR(b, x)[i - 1])->n == b->t - 1) { \
  271. __KB_KEY(key_t, y)[y->n++] = __KB_KEY(key_t, x)[i - 1]; \
  272. memmove(__KB_KEY(key_t, y) + y->n, __KB_KEY(key_t, xp), xp->n * sizeof(key_t)); \
  273. if (y->is_internal) memmove(__KB_PTR(b, y) + y->n, __KB_PTR(b, xp), (xp->n + 1) * sizeof(void*)); \
  274. y->n += xp->n; \
  275. memmove(__KB_KEY(key_t, x) + i - 1, __KB_KEY(key_t, x) + i, (x->n - i) * sizeof(key_t)); \
  276. memmove(__KB_PTR(b, x) + i, __KB_PTR(b, x) + i + 1, (x->n - i) * sizeof(void*)); \
  277. --x->n; \
  278. free(xp); \
  279. xp = y; \
  280. } else if (i < x->n && (y = __KB_PTR(b, x)[i + 1])->n == b->t - 1) { \
  281. __KB_KEY(key_t, xp)[xp->n++] = __KB_KEY(key_t, x)[i]; \
  282. memmove(__KB_KEY(key_t, xp) + xp->n, __KB_KEY(key_t, y), y->n * sizeof(key_t)); \
  283. if (xp->is_internal) memmove(__KB_PTR(b, xp) + xp->n, __KB_PTR(b, y), (y->n + 1) * sizeof(void*)); \
  284. xp->n += y->n; \
  285. memmove(__KB_KEY(key_t, x) + i, __KB_KEY(key_t, x) + i + 1, (x->n - i - 1) * sizeof(key_t)); \
  286. memmove(__KB_PTR(b, x) + i + 1, __KB_PTR(b, x) + i + 2, (x->n - i - 1) * sizeof(void*)); \
  287. --x->n; \
  288. free(y); \
  289. } \
  290. } \
  291. return __kb_delp_aux_##name(b, xp, k, s); \
  292. } \
  293. static key_t kb_delp_##name(kbtree_##name##_t *b, const key_t * __restrict k) \
  294. { \
  295. kbnode_t *x; \
  296. key_t ret; \
  297. ret = __kb_delp_aux_##name(b, b->root, k, 0); \
  298. --b->n_keys; \
  299. if (b->root->n == 0 && b->root->is_internal) { \
  300. --b->n_nodes; \
  301. x = b->root; \
  302. b->root = __KB_PTR(b, x)[0]; \
  303. free(x); \
  304. } \
  305. return ret; \
  306. } \
  307. static inline key_t kb_del_##name(kbtree_##name##_t *b, const key_t k) \
  308. { \
  309. return kb_delp_##name(b, &k); \
  310. }
  311. typedef struct {
  312. kbnode_t *x;
  313. int i;
  314. } __kbstack_t;
  315. #define __kb_traverse(key_t, b, __func) do { \
  316. int __kmax = 8; \
  317. __kbstack_t *__kstack, *__kp; \
  318. __kp = __kstack = (__kbstack_t*)calloc(__kmax, sizeof(__kbstack_t)); \
  319. __kp->x = (b)->root; __kp->i = 0; \
  320. for (;;) { \
  321. while (__kp->x && __kp->i <= __kp->x->n) { \
  322. if (__kp - __kstack == __kmax - 1) { \
  323. __kmax <<= 1; \
  324. __kstack = (__kbstack_t*)realloc(__kstack, __kmax * sizeof(__kbstack_t)); \
  325. __kp = __kstack + (__kmax>>1) - 1; \
  326. } \
  327. (__kp+1)->i = 0; (__kp+1)->x = __kp->x->is_internal? __KB_PTR(b, __kp->x)[__kp->i] : 0; \
  328. ++__kp; \
  329. } \
  330. --__kp; \
  331. if (__kp >= __kstack) { \
  332. if (__kp->x && __kp->i < __kp->x->n) __func(&__KB_KEY(key_t, __kp->x)[__kp->i]); \
  333. ++__kp->i; \
  334. } else break; \
  335. } \
  336. free(__kstack); \
  337. } while (0)
  338. #define KBTREE_INIT(name, key_t, __cmp) \
  339. __KB_TREE_T(name) \
  340. __KB_INIT(name, key_t) \
  341. __KB_GET_AUX1(name, key_t, __cmp) \
  342. __KB_GET(name, key_t) \
  343. __KB_INTERVAL(name, key_t) \
  344. __KB_PUT(name, key_t, __cmp) \
  345. __KB_DEL(name, key_t)
  346. #define KB_DEFAULT_SIZE 512
  347. #define kbtree_t(name) kbtree_##name##_t
  348. #define kb_init(name, s) kb_init_##name(s)
  349. #define kb_destroy(name, b) __kb_destroy(b)
  350. #define kb_get(name, b, k) kb_get_##name(b, k)
  351. #define kb_put(name, b, k) kb_put_##name(b, k)
  352. #define kb_del(name, b, k) kb_del_##name(b, k)
  353. #define kb_interval(name, b, k, l, u) kb_interval_##name(b, k, l, u)
  354. #define kb_getp(name, b, k) kb_getp_##name(b, k)
  355. #define kb_putp(name, b, k) kb_putp_##name(b, k)
  356. #define kb_delp(name, b, k) kb_delp_##name(b, k)
  357. #define kb_intervalp(name, b, k, l, u) kb_intervalp_##name(b, k, l, u)
  358. #define kb_size(b) ((b)->n_keys)
  359. #define kb_generic_cmp(a, b) (((b) < (a)) - ((a) < (b)))
  360. #define kb_str_cmp(a, b) strcmp(a, b)
  361. #endif