PageRenderTime 55ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 0ms

/epan/emem.c

https://github.com/labx-technologies-llc/wireshark
C | 2398 lines | 1735 code | 391 blank | 272 comment | 305 complexity | f0d8772707656b52a1ff7fe19e3b2e6a MD5 | raw file
Possible License(s): GPL-2.0, BSD-3-Clause

Large files files are truncated, but you can click here to view the full file

  1. /* emem.c
  2. * Wireshark memory management and garbage collection functions
  3. * Ronnie Sahlberg 2005
  4. *
  5. * $Id$
  6. *
  7. * Wireshark - Network traffic analyzer
  8. * By Gerald Combs <gerald@wireshark.org>
  9. * Copyright 1998 Gerald Combs
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version 2
  14. * of the License, or (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
  24. */
  25. #include "config.h"
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include <string.h>
  29. #include <stdarg.h>
  30. #include <ctype.h>
  31. #include <time.h>
  32. #ifdef HAVE_SYS_TIME_H
  33. #include <sys/time.h>
  34. #endif
  35. #ifdef HAVE_UNISTD_H
  36. #include <unistd.h>
  37. #endif
  38. #include <glib.h>
  39. #include "app_mem_usage.h"
  40. #include "proto.h"
  41. #include "emem.h"
  42. #include "wmem/wmem.h"
  43. #ifdef _WIN32
  44. #include <windows.h> /* VirtualAlloc, VirtualProtect */
  45. #include <process.h> /* getpid */
  46. #endif
  47. /* Print out statistics about our memory allocations? */
  48. /*#define SHOW_EMEM_STATS*/
  49. /* Do we want to use guardpages? if available */
  50. #define WANT_GUARD_PAGES 1
  51. #ifdef WANT_GUARD_PAGES
  52. /* Add guard pages at each end of our allocated memory */
  53. #if defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H)
  54. #include <stdint.h>
  55. #ifdef HAVE_SYS_TYPES_H
  56. #include <sys/types.h>
  57. #endif /* HAVE_SYS_TYPES_H */
  58. #include <sys/mman.h>
  59. #if defined(MAP_ANONYMOUS)
  60. #define ANON_PAGE_MODE (MAP_ANONYMOUS|MAP_PRIVATE)
  61. #elif defined(MAP_ANON)
  62. #define ANON_PAGE_MODE (MAP_ANON|MAP_PRIVATE)
  63. #else
  64. #define ANON_PAGE_MODE (MAP_PRIVATE) /* have to map /dev/zero */
  65. #define NEED_DEV_ZERO
  66. #endif /* defined(MAP_ANONYMOUS) */
  67. #ifdef NEED_DEV_ZERO
  68. #include <fcntl.h>
  69. static int dev_zero_fd;
  70. #define ANON_FD dev_zero_fd
  71. #else
  72. #define ANON_FD -1
  73. #endif /* NEED_DEV_ZERO */
  74. #define USE_GUARD_PAGES 1
  75. #endif /* defined(HAVE_SYSCONF) && defined(HAVE_MMAP) && defined(HAVE_MPROTECT) && defined(HAVE_STDINT_H) */
  76. #endif /* WANT_GUARD_PAGES */
  77. /* When required, allocate more memory from the OS in this size chunks */
  78. #define EMEM_PACKET_CHUNK_SIZE (10 * 1024 * 1024)
  79. /* The canary between allocations is at least 8 bytes and up to 16 bytes to
  80. * allow future allocations to be 4- or 8-byte aligned.
  81. * All but the last byte of the canary are randomly generated; the last byte is
  82. * NULL to separate the canary and the pointer to the next canary.
  83. *
  84. * For example, if the allocation is a multiple of 8 bytes, the canary and
  85. * pointer would look like:
  86. * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
  87. * |c|c|c|c|c|c|c|0||p|p|p|p|p|p|p|p| (64-bit), or:
  88. * |c|c|c|c|c|c|c|0||p|p|p|p| (32-bit)
  89. *
  90. * If the allocation was, for example, 12 bytes, the canary would look like:
  91. * |0|1|2|3|4|5|6|7||0|1|2|3|4|5|6|7|
  92. * [...]|a|a|a|a|c|c|c|c||c|c|c|c|c|c|c|0| (followed by the pointer)
  93. */
  94. #define EMEM_CANARY_SIZE 8
  95. #define EMEM_CANARY_DATA_SIZE (EMEM_CANARY_SIZE * 2 - 1)
  96. typedef struct _emem_chunk_t {
  97. struct _emem_chunk_t *next;
  98. char *buf;
  99. size_t size;
  100. unsigned int amount_free_init;
  101. unsigned int amount_free;
  102. unsigned int free_offset_init;
  103. unsigned int free_offset;
  104. void *canary_last;
  105. } emem_chunk_t;
  106. typedef struct _emem_pool_t {
  107. emem_chunk_t *free_list;
  108. emem_chunk_t *used_list;
  109. emem_tree_t *trees; /* only used by se_mem allocator */
  110. guint8 canary[EMEM_CANARY_DATA_SIZE];
  111. void *(*memory_alloc)(size_t size, struct _emem_pool_t *);
  112. /*
  113. * Tools like Valgrind and ElectricFence don't work well with memchunks.
  114. * Export the following environment variables to make {ep|se}_alloc() allocate each
  115. * object individually.
  116. *
  117. * WIRESHARK_DEBUG_EP_NO_CHUNKS
  118. * WIRESHARK_DEBUG_SE_NO_CHUNKS
  119. */
  120. gboolean debug_use_chunks;
  121. /* Do we want to use canaries?
  122. * Export the following environment variables to disable/enable canaries
  123. *
  124. * WIRESHARK_DEBUG_EP_NO_CANARY
  125. * For SE memory use of canary is default off as the memory overhead
  126. * is considerable.
  127. * WIRESHARK_DEBUG_SE_USE_CANARY
  128. */
  129. gboolean debug_use_canary;
  130. /* Do we want to verify no one is using a pointer to an ep_ or se_
  131. * allocated thing where they shouldn't be?
  132. *
  133. * Export WIRESHARK_EP_VERIFY_POINTERS or WIRESHARK_SE_VERIFY_POINTERS
  134. * to turn this on.
  135. */
  136. gboolean debug_verify_pointers;
  137. } emem_pool_t;
  138. static emem_pool_t ep_packet_mem;
  139. static emem_pool_t se_packet_mem;
  140. /*
  141. * Memory scrubbing is expensive but can be useful to ensure we don't:
  142. * - use memory before initializing it
  143. * - use memory after freeing it
  144. * Export WIRESHARK_DEBUG_SCRUB_MEMORY to turn it on.
  145. */
  146. static gboolean debug_use_memory_scrubber = FALSE;
  147. #if defined (_WIN32)
  148. static SYSTEM_INFO sysinfo;
  149. static OSVERSIONINFO versinfo;
  150. static int pagesize;
  151. #elif defined(USE_GUARD_PAGES)
  152. static intptr_t pagesize;
  153. #endif /* _WIN32 / USE_GUARD_PAGES */
  154. static void *emem_alloc_chunk(size_t size, emem_pool_t *mem);
  155. static void *emem_alloc_glib(size_t size, emem_pool_t *mem);
  156. /*
  157. * Set a canary value to be placed between memchunks.
  158. */
  159. static void
  160. emem_canary_init(guint8 *canary)
  161. {
  162. int i;
  163. static GRand *rand_state = NULL;
  164. if (rand_state == NULL) {
  165. rand_state = g_rand_new();
  166. }
  167. for (i = 0; i < EMEM_CANARY_DATA_SIZE; i ++) {
  168. canary[i] = (guint8) g_rand_int_range(rand_state, 1, 0x100);
  169. }
  170. return;
  171. }
  172. static void *
  173. emem_canary_next(guint8 *mem_canary, guint8 *canary, int *len)
  174. {
  175. void *ptr;
  176. int i;
  177. for (i = 0; i < EMEM_CANARY_SIZE-1; i++)
  178. if (mem_canary[i] != canary[i])
  179. return (void *) -1;
  180. for (; i < EMEM_CANARY_DATA_SIZE; i++) {
  181. if (canary[i] == '\0') {
  182. memcpy(&ptr, &canary[i+1], sizeof(void *));
  183. if (len)
  184. *len = i + 1 + (int)sizeof(void *);
  185. return ptr;
  186. }
  187. if (mem_canary[i] != canary[i])
  188. return (void *) -1;
  189. }
  190. return (void *) -1;
  191. }
  192. /*
  193. * Given an allocation size, return the amount of room needed for the canary
  194. * (with a minimum of 8 bytes) while using the canary to pad to an 8-byte
  195. * boundary.
  196. */
  197. static guint8
  198. emem_canary_pad (size_t allocation)
  199. {
  200. guint8 pad;
  201. pad = EMEM_CANARY_SIZE - (allocation % EMEM_CANARY_SIZE);
  202. if (pad < EMEM_CANARY_SIZE)
  203. pad += EMEM_CANARY_SIZE;
  204. return pad;
  205. }
  206. /* used for debugging canaries, will block */
  207. #ifdef DEBUG_INTENSE_CANARY_CHECKS
  208. gboolean intense_canary_checking = FALSE;
  209. /* used to intensivelly check ep canaries
  210. */
  211. void
  212. ep_check_canary_integrity(const char* fmt, ...)
  213. {
  214. va_list ap;
  215. static gchar there[128] = {
  216. 'L','a','u','n','c','h',0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
  217. 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
  218. 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
  219. 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 };
  220. gchar here[128];
  221. emem_chunk_t* npc = NULL;
  222. if (! intense_canary_checking ) return;
  223. va_start(ap,fmt);
  224. g_vsnprintf(here, sizeof(here), fmt, ap);
  225. va_end(ap);
  226. for (npc = ep_packet_mem.free_list; npc != NULL; npc = npc->next) {
  227. void *canary_next = npc->canary_last;
  228. while (canary_next != NULL) {
  229. canary_next = emem_canary_next(ep_packet_mem.canary, canary_next, NULL);
  230. /* XXX, check if canary_next is inside allocated memory? */
  231. if (canary_next == (void *) -1)
  232. g_error("Per-packet memory corrupted\nbetween: %s\nand: %s", there, here);
  233. }
  234. }
  235. g_strlcpy(there, here, sizeof(there));
  236. }
  237. #endif
  238. static void
  239. emem_init_chunk(emem_pool_t *mem)
  240. {
  241. if (mem->debug_use_canary)
  242. emem_canary_init(mem->canary);
  243. if (mem->debug_use_chunks)
  244. mem->memory_alloc = emem_alloc_chunk;
  245. else
  246. mem->memory_alloc = emem_alloc_glib;
  247. }
  248. static gsize
  249. emem_memory_usage(const emem_pool_t *pool)
  250. {
  251. gsize total_used = 0;
  252. emem_chunk_t *chunk;
  253. for (chunk = pool->used_list; chunk; chunk = chunk->next)
  254. total_used += (chunk->amount_free_init - chunk->amount_free);
  255. for (chunk = pool->free_list; chunk; chunk = chunk->next)
  256. total_used += (chunk->amount_free_init - chunk->amount_free);
  257. return total_used;
  258. }
  259. static gsize
  260. ep_memory_usage(void)
  261. {
  262. return emem_memory_usage(&ep_packet_mem);
  263. }
  264. /* Initialize the packet-lifetime memory allocation pool.
  265. * This function should be called only once when Wireshark or TShark starts
  266. * up.
  267. */
  268. static void
  269. ep_init_chunk(void)
  270. {
  271. static const ws_mem_usage_t ep_stats = { "EP", ep_memory_usage, NULL };
  272. ep_packet_mem.free_list=NULL;
  273. ep_packet_mem.used_list=NULL;
  274. ep_packet_mem.trees=NULL; /* not used by this allocator */
  275. ep_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_EP_NO_CHUNKS") == NULL);
  276. ep_packet_mem.debug_use_canary = ep_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_EP_NO_CANARY") == NULL);
  277. ep_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_EP_VERIFY_POINTERS") != NULL);
  278. #ifdef DEBUG_INTENSE_CANARY_CHECKS
  279. intense_canary_checking = (getenv("WIRESHARK_DEBUG_EP_INTENSE_CANARY") != NULL);
  280. #endif
  281. emem_init_chunk(&ep_packet_mem);
  282. memory_usage_component_register(&ep_stats);
  283. }
  284. static gsize
  285. se_memory_usage(void)
  286. {
  287. return emem_memory_usage(&se_packet_mem);
  288. }
  289. /* Initialize the capture-lifetime memory allocation pool.
  290. * This function should be called only once when Wireshark or TShark starts
  291. * up.
  292. */
  293. static void
  294. se_init_chunk(void)
  295. {
  296. static const ws_mem_usage_t se_stats = { "SE", se_memory_usage, NULL };
  297. se_packet_mem.free_list = NULL;
  298. se_packet_mem.used_list = NULL;
  299. se_packet_mem.trees = NULL;
  300. se_packet_mem.debug_use_chunks = (getenv("WIRESHARK_DEBUG_SE_NO_CHUNKS") == NULL);
  301. se_packet_mem.debug_use_canary = se_packet_mem.debug_use_chunks && (getenv("WIRESHARK_DEBUG_SE_USE_CANARY") != NULL);
  302. se_packet_mem.debug_verify_pointers = (getenv("WIRESHARK_SE_VERIFY_POINTERS") != NULL);
  303. emem_init_chunk(&se_packet_mem);
  304. memory_usage_component_register(&se_stats);
  305. }
  306. /* Initialize all the allocators here.
  307. * This function should be called only once when Wireshark or TShark starts
  308. * up.
  309. */
  310. void
  311. emem_init(void)
  312. {
  313. ep_init_chunk();
  314. se_init_chunk();
  315. if (getenv("WIRESHARK_DEBUG_SCRUB_MEMORY"))
  316. debug_use_memory_scrubber = TRUE;
  317. #if defined (_WIN32)
  318. /* Set up our guard page info for Win32 */
  319. GetSystemInfo(&sysinfo);
  320. pagesize = sysinfo.dwPageSize;
  321. /* calling GetVersionEx using the OSVERSIONINFO structure.
  322. * OSVERSIONINFOEX requires Win NT4 with SP6 or newer NT Versions.
  323. * OSVERSIONINFOEX will fail on Win9x and older NT Versions.
  324. * See also:
  325. * http://msdn.microsoft.com/library/en-us/sysinfo/base/getversionex.asp
  326. * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
  327. * http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfoex_str.asp
  328. */
  329. versinfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
  330. GetVersionEx(&versinfo);
  331. #elif defined(USE_GUARD_PAGES)
  332. pagesize = sysconf(_SC_PAGESIZE);
  333. if (pagesize == -1)
  334. fprintf(stderr, "Warning: call to sysconf() for _SC_PAGESIZE has failed...\n");
  335. #ifdef NEED_DEV_ZERO
  336. dev_zero_fd = ws_open("/dev/zero", O_RDWR);
  337. g_assert(dev_zero_fd != -1);
  338. #endif
  339. #endif /* _WIN32 / USE_GUARD_PAGES */
  340. }
  341. #ifdef SHOW_EMEM_STATS
  342. #define NUM_ALLOC_DIST 10
  343. static guint allocations[NUM_ALLOC_DIST] = { 0 };
  344. static guint total_no_chunks = 0;
  345. static void
  346. print_alloc_stats(void)
  347. {
  348. guint num_chunks = 0;
  349. guint num_allocs = 0;
  350. guint total_used = 0;
  351. guint total_allocation = 0;
  352. guint used_for_canaries = 0;
  353. guint total_headers;
  354. guint i;
  355. emem_chunk_t *chunk;
  356. guint total_space_allocated_from_os, total_space_wasted;
  357. gboolean ep_stat=TRUE;
  358. fprintf(stderr, "\n-------- EP allocator statistics --------\n");
  359. fprintf(stderr, "%s chunks, %s canaries, %s memory scrubber\n",
  360. ep_packet_mem.debug_use_chunks ? "Using" : "Not using",
  361. ep_packet_mem.debug_use_canary ? "using" : "not using",
  362. debug_use_memory_scrubber ? "using" : "not using");
  363. if (! (ep_packet_mem.free_list || !ep_packet_mem.used_list)) {
  364. fprintf(stderr, "No memory allocated\n");
  365. ep_stat = FALSE;
  366. }
  367. if (ep_packet_mem.debug_use_chunks && ep_stat) {
  368. /* Nothing interesting without chunks */
  369. /* Only look at the used_list since those chunks are fully
  370. * used. Looking at the free list would skew our view of what
  371. * we have wasted.
  372. */
  373. for (chunk = ep_packet_mem.used_list; chunk; chunk = chunk->next) {
  374. num_chunks++;
  375. total_used += (chunk->amount_free_init - chunk->amount_free);
  376. total_allocation += chunk->amount_free_init;
  377. }
  378. if (num_chunks > 0) {
  379. fprintf (stderr, "\n");
  380. fprintf (stderr, "\n---- Buffer space ----\n");
  381. fprintf (stderr, "\tChunk allocation size: %10u\n", EMEM_PACKET_CHUNK_SIZE);
  382. fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
  383. fprintf (stderr, "\t-------------------------------------------\n");
  384. fprintf (stderr, "\t= %u (%u including guard pages) total space used for buffers\n",
  385. total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
  386. fprintf (stderr, "\t-------------------------------------------\n");
  387. total_space_allocated_from_os = total_allocation
  388. + sizeof(emem_chunk_t) * num_chunks;
  389. fprintf (stderr, "Total allocated from OS: %u\n\n",
  390. total_space_allocated_from_os);
  391. }else{
  392. fprintf (stderr, "No fully used chunks, nothing to do\n");
  393. }
  394. /* Reset stats */
  395. num_chunks = 0;
  396. num_allocs = 0;
  397. total_used = 0;
  398. total_allocation = 0;
  399. used_for_canaries = 0;
  400. }
  401. fprintf(stderr, "\n-------- SE allocator statistics --------\n");
  402. fprintf(stderr, "Total number of chunk allocations %u\n",
  403. total_no_chunks);
  404. fprintf(stderr, "%s chunks, %s canaries\n",
  405. se_packet_mem.debug_use_chunks ? "Using" : "Not using",
  406. se_packet_mem.debug_use_canary ? "using" : "not using");
  407. if (! (se_packet_mem.free_list || !se_packet_mem.used_list)) {
  408. fprintf(stderr, "No memory allocated\n");
  409. return;
  410. }
  411. if (!se_packet_mem.debug_use_chunks )
  412. return; /* Nothing interesting without chunks?? */
  413. /* Only look at the used_list since those chunks are fully used.
  414. * Looking at the free list would skew our view of what we have wasted.
  415. */
  416. for (chunk = se_packet_mem.used_list; chunk; chunk = chunk->next) {
  417. num_chunks++;
  418. total_used += (chunk->amount_free_init - chunk->amount_free);
  419. total_allocation += chunk->amount_free_init;
  420. if (se_packet_mem.debug_use_canary){
  421. void *ptr = chunk->canary_last;
  422. int len;
  423. while (ptr != NULL) {
  424. ptr = emem_canary_next(se_packet_mem.canary, (guint8*)ptr, &len);
  425. if (ptr == (void *) -1)
  426. g_error("Memory corrupted");
  427. used_for_canaries += len;
  428. }
  429. }
  430. }
  431. if (num_chunks == 0) {
  432. fprintf (stderr, "No fully used chunks, nothing to do\n");
  433. return;
  434. }
  435. fprintf (stderr, "\n");
  436. fprintf (stderr, "---------- Allocations from the OS ----------\n");
  437. fprintf (stderr, "---- Headers ----\n");
  438. fprintf (stderr, "\t( Chunk header size: %10lu\n",
  439. sizeof(emem_chunk_t));
  440. fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
  441. fprintf (stderr, "\t-------------------------------------------\n");
  442. total_headers = sizeof(emem_chunk_t) * num_chunks;
  443. fprintf (stderr, "\t= %u bytes used for headers\n", total_headers);
  444. fprintf (stderr, "\n---- Buffer space ----\n");
  445. fprintf (stderr, "\tChunk allocation size: %10u\n",
  446. EMEM_PACKET_CHUNK_SIZE);
  447. fprintf (stderr, "\t* Number of chunks: %10u\n", num_chunks);
  448. fprintf (stderr, "\t-------------------------------------------\n");
  449. fprintf (stderr, "\t= %u (%u including guard pages) bytes used for buffers\n",
  450. total_allocation, EMEM_PACKET_CHUNK_SIZE * num_chunks);
  451. fprintf (stderr, "\t-------------------------------------------\n");
  452. total_space_allocated_from_os = (EMEM_PACKET_CHUNK_SIZE * num_chunks)
  453. + total_headers;
  454. fprintf (stderr, "Total bytes allocated from the OS: %u\n\n",
  455. total_space_allocated_from_os);
  456. for (i = 0; i < NUM_ALLOC_DIST; i++)
  457. num_allocs += allocations[i];
  458. fprintf (stderr, "---------- Allocations from the SE pool ----------\n");
  459. fprintf (stderr, " Number of SE allocations: %10u\n",
  460. num_allocs);
  461. fprintf (stderr, " Bytes used (incl. canaries): %10u\n",
  462. total_used);
  463. fprintf (stderr, " Bytes used for canaries: %10u\n",
  464. used_for_canaries);
  465. fprintf (stderr, "Bytes unused (wasted, excl. guard pages): %10u\n",
  466. total_allocation - total_used);
  467. fprintf (stderr, "Bytes unused (wasted, incl. guard pages): %10u\n\n",
  468. total_space_allocated_from_os - total_used);
  469. fprintf (stderr, "---------- Statistics ----------\n");
  470. fprintf (stderr, "Average SE allocation size (incl. canaries): %6.2f\n",
  471. (float)total_used/(float)num_allocs);
  472. fprintf (stderr, "Average SE allocation size (excl. canaries): %6.2f\n",
  473. (float)(total_used - used_for_canaries)/(float)num_allocs);
  474. fprintf (stderr, " Average wasted bytes per allocation: %6.2f\n",
  475. (total_allocation - total_used)/(float)num_allocs);
  476. total_space_wasted = (total_allocation - total_used)
  477. + (sizeof(emem_chunk_t));
  478. fprintf (stderr, " Space used for headers + unused allocation: %8u\n",
  479. total_space_wasted);
  480. fprintf (stderr, "--> %% overhead/waste: %4.2f\n",
  481. 100 * (float)total_space_wasted/(float)total_space_allocated_from_os);
  482. fprintf (stderr, "\nAllocation distribution (sizes include canaries):\n");
  483. for (i = 0; i < (NUM_ALLOC_DIST-1); i++)
  484. fprintf (stderr, "size < %5d: %8u\n", 32<<i, allocations[i]);
  485. fprintf (stderr, "size > %5d: %8u\n", 32<<i, allocations[i]);
  486. }
  487. #endif
  488. static gboolean
  489. emem_verify_pointer_list(const emem_chunk_t *chunk_list, const void *ptr)
  490. {
  491. const gchar *cptr = (gchar *)ptr;
  492. const emem_chunk_t *chunk;
  493. for (chunk = chunk_list; chunk; chunk = chunk->next) {
  494. if (cptr >= (chunk->buf + chunk->free_offset_init) && cptr < (chunk->buf + chunk->free_offset))
  495. return TRUE;
  496. }
  497. return FALSE;
  498. }
  499. static gboolean
  500. emem_verify_pointer(const emem_pool_t *hdr, const void *ptr)
  501. {
  502. return emem_verify_pointer_list(hdr->free_list, ptr) || emem_verify_pointer_list(hdr->used_list, ptr);
  503. }
  504. gboolean
  505. ep_verify_pointer(const void *ptr)
  506. {
  507. if (ep_packet_mem.debug_verify_pointers)
  508. return emem_verify_pointer(&ep_packet_mem, ptr);
  509. else
  510. return FALSE;
  511. }
  512. gboolean
  513. se_verify_pointer(const void *ptr)
  514. {
  515. if (se_packet_mem.debug_verify_pointers)
  516. return emem_verify_pointer(&se_packet_mem, ptr);
  517. else
  518. return FALSE;
  519. }
  520. static void
  521. emem_scrub_memory(char *buf, size_t size, gboolean alloc)
  522. {
  523. guint scrubbed_value;
  524. size_t offset;
  525. if (!debug_use_memory_scrubber)
  526. return;
  527. if (alloc) /* this memory is being allocated */
  528. scrubbed_value = 0xBADDCAFE;
  529. else /* this memory is being freed */
  530. scrubbed_value = 0xDEADBEEF;
  531. /* We shouldn't need to check the alignment of the starting address
  532. * since this is malloc'd memory (or 'pagesize' bytes into malloc'd
  533. * memory).
  534. */
  535. /* XXX - if the above is *NOT* true, we should use memcpy here,
  536. * in order to avoid problems on alignment-sensitive platforms, e.g.
  537. * http://stackoverflow.com/questions/108866/is-there-memset-that-accepts-integers-larger-than-char
  538. */
  539. for (offset = 0; offset + sizeof(guint) <= size; offset += sizeof(guint))
  540. *(guint*)(void*)(buf+offset) = scrubbed_value;
  541. /* Initialize the last bytes, if any */
  542. if (offset < size) {
  543. *(guint8*)(buf+offset) = scrubbed_value >> 24;
  544. offset++;
  545. if (offset < size) {
  546. *(guint8*)(buf+offset) = (scrubbed_value >> 16) & 0xFF;
  547. offset++;
  548. if (offset < size) {
  549. *(guint8*)(buf+offset) = (scrubbed_value >> 8) & 0xFF;
  550. }
  551. }
  552. }
  553. }
  554. static emem_chunk_t *
  555. emem_create_chunk(size_t size)
  556. {
  557. emem_chunk_t *npc;
  558. npc = g_new(emem_chunk_t, 1);
  559. npc->next = NULL;
  560. npc->canary_last = NULL;
  561. #if defined (_WIN32)
  562. /*
  563. * MSDN documents VirtualAlloc/VirtualProtect at
  564. * http://msdn.microsoft.com/library/en-us/memory/base/creating_guard_pages.asp
  565. */
  566. /* XXX - is MEM_COMMIT|MEM_RESERVE correct? */
  567. npc->buf = (char *)VirtualAlloc(NULL, size,
  568. MEM_COMMIT|MEM_RESERVE, PAGE_READWRITE);
  569. if (npc->buf == NULL) {
  570. g_free(npc);
  571. if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
  572. abort();
  573. else
  574. THROW(OutOfMemoryError);
  575. }
  576. #elif defined(USE_GUARD_PAGES)
  577. npc->buf = (char *)mmap(NULL, size,
  578. PROT_READ|PROT_WRITE, ANON_PAGE_MODE, ANON_FD, 0);
  579. if (npc->buf == MAP_FAILED) {
  580. g_free(npc);
  581. if (getenv("WIRESHARK_ABORT_ON_OUT_OF_MEMORY"))
  582. abort();
  583. else
  584. THROW(OutOfMemoryError);
  585. }
  586. #else /* Is there a draft in here? */
  587. npc->buf = g_malloc(size);
  588. /* g_malloc() can't fail */
  589. #endif
  590. #ifdef SHOW_EMEM_STATS
  591. total_no_chunks++;
  592. #endif
  593. npc->amount_free = npc->amount_free_init = (unsigned int) size;
  594. npc->free_offset = npc->free_offset_init = 0;
  595. return npc;
  596. }
  597. static emem_chunk_t *
  598. emem_create_chunk_gp(size_t size)
  599. {
  600. #if defined (_WIN32)
  601. BOOL ret;
  602. char *buf_end, *prot1, *prot2;
  603. DWORD oldprot;
  604. #elif defined(USE_GUARD_PAGES)
  605. int ret;
  606. char *buf_end, *prot1, *prot2;
  607. #endif /* _WIN32 / USE_GUARD_PAGES */
  608. emem_chunk_t *npc;
  609. npc = emem_create_chunk(size);
  610. #if defined (_WIN32)
  611. buf_end = npc->buf + size;
  612. /* Align our guard pages on page-sized boundaries */
  613. prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
  614. prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
  615. ret = VirtualProtect(prot1, pagesize, PAGE_NOACCESS, &oldprot);
  616. g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
  617. ret = VirtualProtect(prot2, pagesize, PAGE_NOACCESS, &oldprot);
  618. g_assert(ret != 0 || versinfo.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS);
  619. npc->amount_free_init = (unsigned int) (prot2 - prot1 - pagesize);
  620. npc->free_offset_init = (unsigned int) (prot1 - npc->buf) + pagesize;
  621. #elif defined(USE_GUARD_PAGES)
  622. buf_end = npc->buf + size;
  623. /* Align our guard pages on page-sized boundaries */
  624. prot1 = (char *) ((((intptr_t) npc->buf + pagesize - 1) / pagesize) * pagesize);
  625. prot2 = (char *) ((((intptr_t) buf_end - (1 * pagesize)) / pagesize) * pagesize);
  626. ret = mprotect(prot1, pagesize, PROT_NONE);
  627. g_assert(ret != -1);
  628. ret = mprotect(prot2, pagesize, PROT_NONE);
  629. g_assert(ret != -1);
  630. npc->amount_free_init = (unsigned int)(prot2 - prot1 - pagesize);
  631. npc->free_offset_init = (unsigned int)((prot1 - npc->buf) + pagesize);
  632. #else
  633. npc->amount_free_init = size;
  634. npc->free_offset_init = 0;
  635. #endif /* USE_GUARD_PAGES */
  636. npc->amount_free = npc->amount_free_init;
  637. npc->free_offset = npc->free_offset_init;
  638. return npc;
  639. }
  640. static void *
  641. emem_alloc_chunk(size_t size, emem_pool_t *mem)
  642. {
  643. void *buf;
  644. size_t asize = size;
  645. gboolean use_canary = mem->debug_use_canary;
  646. guint8 pad;
  647. emem_chunk_t *free_list;
  648. /* Allocate room for at least 8 bytes of canary plus some padding
  649. * so the canary ends on an 8-byte boundary.
  650. * But first add the room needed for the pointer to the next canary
  651. * (so the entire allocation will end on an 8-byte boundary).
  652. */
  653. if (use_canary) {
  654. asize += sizeof(void *);
  655. pad = emem_canary_pad(asize);
  656. } else
  657. pad = (WS_MEM_ALIGN - (asize & (WS_MEM_ALIGN-1))) & (WS_MEM_ALIGN-1);
  658. asize += pad;
  659. #ifdef SHOW_EMEM_STATS
  660. /* Do this check here so we can include the canary size */
  661. if (mem == &se_packet_mem) {
  662. if (asize < 32)
  663. allocations[0]++;
  664. else if (asize < 64)
  665. allocations[1]++;
  666. else if (asize < 128)
  667. allocations[2]++;
  668. else if (asize < 256)
  669. allocations[3]++;
  670. else if (asize < 512)
  671. allocations[4]++;
  672. else if (asize < 1024)
  673. allocations[5]++;
  674. else if (asize < 2048)
  675. allocations[6]++;
  676. else if (asize < 4096)
  677. allocations[7]++;
  678. else if (asize < 8192)
  679. allocations[8]++;
  680. else if (asize < 16384)
  681. allocations[8]++;
  682. else
  683. allocations[(NUM_ALLOC_DIST-1)]++;
  684. }
  685. #endif
  686. /* make sure we dont try to allocate too much (arbitrary limit) */
  687. DISSECTOR_ASSERT(size<(EMEM_PACKET_CHUNK_SIZE>>2));
  688. if (!mem->free_list)
  689. mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
  690. /* oops, we need to allocate more memory to serve this request
  691. * than we have free. move this node to the used list and try again
  692. */
  693. if(asize > mem->free_list->amount_free) {
  694. emem_chunk_t *npc;
  695. npc=mem->free_list;
  696. mem->free_list=mem->free_list->next;
  697. npc->next=mem->used_list;
  698. mem->used_list=npc;
  699. if (!mem->free_list)
  700. mem->free_list = emem_create_chunk_gp(EMEM_PACKET_CHUNK_SIZE);
  701. }
  702. free_list = mem->free_list;
  703. buf = free_list->buf + free_list->free_offset;
  704. free_list->amount_free -= (unsigned int) asize;
  705. free_list->free_offset += (unsigned int) asize;
  706. if (use_canary) {
  707. char *cptr = (char *)buf + size;
  708. memcpy(cptr, mem->canary, pad-1);
  709. cptr[pad-1] = '\0';
  710. memcpy(cptr + pad, &free_list->canary_last, sizeof(void *));
  711. free_list->canary_last = cptr;
  712. }
  713. return buf;
  714. }
  715. static void *
  716. emem_alloc_glib(size_t size, emem_pool_t *mem)
  717. {
  718. emem_chunk_t *npc;
  719. npc=g_new(emem_chunk_t, 1);
  720. npc->next=mem->used_list;
  721. npc->buf=(char *)g_malloc(size);
  722. npc->canary_last = NULL;
  723. mem->used_list=npc;
  724. /* There's no padding/alignment involved (from our point of view) when
  725. * we fetch the memory directly from the system pool, so WYSIWYG */
  726. npc->amount_free = npc->free_offset_init = 0;
  727. npc->free_offset = npc->amount_free_init = (unsigned int) size;
  728. return npc->buf;
  729. }
  730. /* allocate 'size' amount of memory. */
  731. static void *
  732. emem_alloc(size_t size, emem_pool_t *mem)
  733. {
  734. void *buf;
  735. #if 0
  736. /* For testing wmem, effectively redirects most emem memory to wmem.
  737. * You will also have to comment out several assertions in wmem_core.c,
  738. * specifically anything g_assert(allocator->in_scope), since it is much
  739. * stricter about when it is permitted to be called. */
  740. if (mem == &ep_packet_mem) {
  741. return wmem_alloc(wmem_packet_scope(), size);
  742. }
  743. else if (mem == &se_packet_mem) {
  744. return wmem_alloc(wmem_file_scope(), size);
  745. }
  746. #endif
  747. buf = mem->memory_alloc(size, mem);
  748. /* XXX - this is a waste of time if the allocator function is going to
  749. * memset this straight back to 0.
  750. */
  751. emem_scrub_memory((char *)buf, size, TRUE);
  752. return buf;
  753. }
  754. /* allocate 'size' amount of memory with an allocation lifetime until the
  755. * next packet.
  756. */
  757. void *
  758. ep_alloc(size_t size)
  759. {
  760. return emem_alloc(size, &ep_packet_mem);
  761. }
  762. /* allocate 'size' amount of memory with an allocation lifetime until the
  763. * next capture.
  764. */
  765. void *
  766. se_alloc(size_t size)
  767. {
  768. return emem_alloc(size, &se_packet_mem);
  769. }
  770. void *
  771. ep_alloc0(size_t size)
  772. {
  773. return memset(ep_alloc(size),'\0',size);
  774. }
  775. void *
  776. se_alloc0(size_t size)
  777. {
  778. return memset(se_alloc(size),'\0',size);
  779. }
  780. static gchar *
  781. emem_strdup(const gchar *src, void *allocator(size_t))
  782. {
  783. guint len;
  784. gchar *dst;
  785. /* If str is NULL, just return the string "<NULL>" so that the callers don't
  786. * have to bother checking it.
  787. */
  788. if(!src)
  789. src = "<NULL>";
  790. len = (guint) strlen(src);
  791. dst = (gchar *)memcpy(allocator(len+1), src, len+1);
  792. return dst;
  793. }
  794. gchar *
  795. ep_strdup(const gchar *src)
  796. {
  797. return emem_strdup(src, ep_alloc);
  798. }
  799. gchar *
  800. se_strdup(const gchar *src)
  801. {
  802. return emem_strdup(src, se_alloc);
  803. }
  804. static gchar *
  805. emem_strndup(const gchar *src, size_t len, void *allocator(size_t))
  806. {
  807. gchar *dst = (gchar *)allocator(len+1);
  808. guint i;
  809. for (i = 0; (i < len) && src[i]; i++)
  810. dst[i] = src[i];
  811. dst[i] = '\0';
  812. return dst;
  813. }
  814. gchar *
  815. ep_strndup(const gchar *src, size_t len)
  816. {
  817. return emem_strndup(src, len, ep_alloc);
  818. }
  819. gchar *
  820. se_strndup(const gchar *src, size_t len)
  821. {
  822. return emem_strndup(src, len, se_alloc);
  823. }
  824. void *
  825. ep_memdup(const void* src, size_t len)
  826. {
  827. return memcpy(ep_alloc(len), src, len);
  828. }
  829. void *
  830. se_memdup(const void* src, size_t len)
  831. {
  832. return memcpy(se_alloc(len), src, len);
  833. }
  834. static gchar *
  835. emem_strdup_vprintf(const gchar *fmt, va_list ap, void *allocator(size_t))
  836. {
  837. va_list ap2;
  838. gsize len;
  839. gchar* dst;
  840. G_VA_COPY(ap2, ap);
  841. len = g_printf_string_upper_bound(fmt, ap);
  842. dst = (gchar *)allocator(len+1);
  843. g_vsnprintf (dst, (gulong) len, fmt, ap2);
  844. va_end(ap2);
  845. return dst;
  846. }
  847. gchar *
  848. ep_strdup_vprintf(const gchar *fmt, va_list ap)
  849. {
  850. return emem_strdup_vprintf(fmt, ap, ep_alloc);
  851. }
  852. gchar *
  853. se_strdup_vprintf(const gchar* fmt, va_list ap)
  854. {
  855. return emem_strdup_vprintf(fmt, ap, se_alloc);
  856. }
  857. gchar *
  858. ep_strdup_printf(const gchar *fmt, ...)
  859. {
  860. va_list ap;
  861. gchar *dst;
  862. va_start(ap, fmt);
  863. dst = ep_strdup_vprintf(fmt, ap);
  864. va_end(ap);
  865. return dst;
  866. }
  867. gchar *
  868. se_strdup_printf(const gchar *fmt, ...)
  869. {
  870. va_list ap;
  871. gchar *dst;
  872. va_start(ap, fmt);
  873. dst = se_strdup_vprintf(fmt, ap);
  874. va_end(ap);
  875. return dst;
  876. }
  877. gchar **
  878. ep_strsplit(const gchar* string, const gchar* sep, int max_tokens)
  879. {
  880. gchar* splitted;
  881. gchar* s;
  882. guint tokens;
  883. guint str_len;
  884. guint sep_len;
  885. guint i;
  886. gchar** vec;
  887. enum { AT_START, IN_PAD, IN_TOKEN } state;
  888. guint curr_tok = 0;
  889. if ( ! string
  890. || ! sep
  891. || ! sep[0])
  892. return NULL;
  893. s = splitted = ep_strdup(string);
  894. str_len = (guint) strlen(splitted);
  895. sep_len = (guint) strlen(sep);
  896. if (max_tokens < 1) max_tokens = INT_MAX;
  897. tokens = 1;
  898. while (tokens <= (guint)max_tokens && ( s = strstr(s,sep) )) {
  899. tokens++;
  900. for(i=0; i < sep_len; i++ )
  901. s[i] = '\0';
  902. s += sep_len;
  903. }
  904. vec = ep_alloc_array(gchar*,tokens+1);
  905. state = AT_START;
  906. for (i=0; i< str_len; i++) {
  907. switch(state) {
  908. case AT_START:
  909. switch(splitted[i]) {
  910. case '\0':
  911. state = IN_PAD;
  912. continue;
  913. default:
  914. vec[curr_tok] = &(splitted[i]);
  915. curr_tok++;
  916. state = IN_TOKEN;
  917. continue;
  918. }
  919. case IN_TOKEN:
  920. switch(splitted[i]) {
  921. case '\0':
  922. state = IN_PAD;
  923. default:
  924. continue;
  925. }
  926. case IN_PAD:
  927. switch(splitted[i]) {
  928. default:
  929. vec[curr_tok] = &(splitted[i]);
  930. curr_tok++;
  931. state = IN_TOKEN;
  932. case '\0':
  933. continue;
  934. }
  935. }
  936. }
  937. vec[curr_tok] = NULL;
  938. return vec;
  939. }
  940. gchar *
  941. ep_strconcat(const gchar *string1, ...)
  942. {
  943. gsize l;
  944. va_list args;
  945. gchar *s;
  946. gchar *concat;
  947. gchar *ptr;
  948. if (!string1)
  949. return NULL;
  950. l = 1 + strlen(string1);
  951. va_start(args, string1);
  952. s = va_arg(args, gchar*);
  953. while (s) {
  954. l += strlen(s);
  955. s = va_arg(args, gchar*);
  956. }
  957. va_end(args);
  958. concat = (gchar *)ep_alloc(l);
  959. ptr = concat;
  960. ptr = g_stpcpy(ptr, string1);
  961. va_start(args, string1);
  962. s = va_arg(args, gchar*);
  963. while (s) {
  964. ptr = g_stpcpy(ptr, s);
  965. s = va_arg(args, gchar*);
  966. }
  967. va_end(args);
  968. return concat;
  969. }
  970. /* release all allocated memory back to the pool. */
  971. static void
  972. emem_free_all(emem_pool_t *mem)
  973. {
  974. gboolean use_chunks = mem->debug_use_chunks;
  975. emem_chunk_t *npc;
  976. emem_tree_t *tree_list;
  977. /* move all used chunks over to the free list */
  978. while(mem->used_list){
  979. npc=mem->used_list;
  980. mem->used_list=mem->used_list->next;
  981. npc->next=mem->free_list;
  982. mem->free_list=npc;
  983. }
  984. /* clear them all out */
  985. npc = mem->free_list;
  986. while (npc != NULL) {
  987. if (use_chunks) {
  988. while (npc->canary_last != NULL) {
  989. npc->canary_last = emem_canary_next(mem->canary, (guint8 *)npc->canary_last, NULL);
  990. /* XXX, check if canary_last is inside allocated memory? */
  991. if (npc->canary_last == (void *) -1)
  992. g_error("Memory corrupted");
  993. }
  994. emem_scrub_memory((npc->buf + npc->free_offset_init),
  995. (npc->free_offset - npc->free_offset_init),
  996. FALSE);
  997. npc->amount_free = npc->amount_free_init;
  998. npc->free_offset = npc->free_offset_init;
  999. npc = npc->next;
  1000. } else {
  1001. emem_chunk_t *next = npc->next;
  1002. emem_scrub_memory(npc->buf, npc->amount_free_init, FALSE);
  1003. g_free(npc->buf);
  1004. g_free(npc);
  1005. npc = next;
  1006. }
  1007. }
  1008. if (!use_chunks) {
  1009. /* We've freed all this memory already */
  1010. mem->free_list = NULL;
  1011. }
  1012. /* release/reset all allocated trees */
  1013. for(tree_list=mem->trees;tree_list;tree_list=tree_list->next){
  1014. tree_list->tree=NULL;
  1015. }
  1016. }
  1017. /* release all allocated memory back to the pool. */
  1018. void
  1019. ep_free_all(void)
  1020. {
  1021. emem_free_all(&ep_packet_mem);
  1022. }
  1023. /* release all allocated memory back to the pool. */
  1024. void
  1025. se_free_all(void)
  1026. {
  1027. #ifdef SHOW_EMEM_STATS
  1028. print_alloc_stats();
  1029. #endif
  1030. emem_free_all(&se_packet_mem);
  1031. }
  1032. ep_stack_t
  1033. ep_stack_new(void) {
  1034. ep_stack_t s = ep_new(struct _ep_stack_frame_t*);
  1035. *s = ep_new0(struct _ep_stack_frame_t);
  1036. return s;
  1037. }
  1038. /* for ep_stack_t we'll keep the popped frames so we reuse them instead
  1039. of allocating new ones.
  1040. */
  1041. void *
  1042. ep_stack_push(ep_stack_t stack, void* data)
  1043. {
  1044. struct _ep_stack_frame_t* frame;
  1045. struct _ep_stack_frame_t* head = (*stack);
  1046. if (head->above) {
  1047. frame = head->above;
  1048. } else {
  1049. frame = ep_new(struct _ep_stack_frame_t);
  1050. head->above = frame;
  1051. frame->below = head;
  1052. frame->above = NULL;
  1053. }
  1054. frame->payload = data;
  1055. (*stack) = frame;
  1056. return data;
  1057. }
  1058. void *
  1059. ep_stack_pop(ep_stack_t stack)
  1060. {
  1061. if ((*stack)->below) {
  1062. (*stack) = (*stack)->below;
  1063. return (*stack)->above->payload;
  1064. } else {
  1065. return NULL;
  1066. }
  1067. }
  1068. emem_tree_t *
  1069. se_tree_create(int type, const char *name)
  1070. {
  1071. emem_tree_t *tree_list;
  1072. tree_list=(emem_tree_t *)g_malloc(sizeof(emem_tree_t));
  1073. tree_list->next=se_packet_mem.trees;
  1074. tree_list->type=type;
  1075. tree_list->tree=NULL;
  1076. tree_list->name=name;
  1077. tree_list->malloc=se_alloc;
  1078. se_packet_mem.trees=tree_list;
  1079. return tree_list;
  1080. }
  1081. void *
  1082. emem_tree_lookup32(emem_tree_t *se_tree, guint32 key)
  1083. {
  1084. emem_tree_node_t *node;
  1085. node=se_tree->tree;
  1086. while(node){
  1087. if(key==node->key32){
  1088. return node->data;
  1089. }
  1090. if(key<node->key32){
  1091. node=node->left;
  1092. continue;
  1093. }
  1094. if(key>node->key32){
  1095. node=node->right;
  1096. continue;
  1097. }
  1098. }
  1099. return NULL;
  1100. }
  1101. void *
  1102. emem_tree_lookup32_le(emem_tree_t *se_tree, guint32 key)
  1103. {
  1104. emem_tree_node_t *node;
  1105. node=se_tree->tree;
  1106. if(!node){
  1107. return NULL;
  1108. }
  1109. while(node){
  1110. if(key==node->key32){
  1111. return node->data;
  1112. }
  1113. if(key<node->key32){
  1114. if(node->left){
  1115. node=node->left;
  1116. continue;
  1117. } else {
  1118. break;
  1119. }
  1120. }
  1121. if(key>node->key32){
  1122. if(node->right){
  1123. node=node->right;
  1124. continue;
  1125. } else {
  1126. break;
  1127. }
  1128. }
  1129. }
  1130. if(!node){
  1131. return NULL;
  1132. }
  1133. /* If we are still at the root of the tree this means that this node
  1134. * is either smaller than the search key and then we return this
  1135. * node or else there is no smaller key available and then
  1136. * we return NULL.
  1137. */
  1138. if(!node->parent){
  1139. if(key>node->key32){
  1140. return node->data;
  1141. } else {
  1142. return NULL;
  1143. }
  1144. }
  1145. if(node->parent->left==node){
  1146. /* left child */
  1147. if(key>node->key32){
  1148. /* if this is a left child and its key is smaller than
  1149. * the search key, then this is the node we want.
  1150. */
  1151. return node->data;
  1152. } else {
  1153. /* if this is a left child and its key is bigger than
  1154. * the search key, we have to check if any
  1155. * of our ancestors are smaller than the search key.
  1156. */
  1157. while(node){
  1158. if(key>node->key32){
  1159. return node->data;
  1160. }
  1161. node=node->parent;
  1162. }
  1163. return NULL;
  1164. }
  1165. } else {
  1166. /* right child */
  1167. if(node->key32<key){
  1168. /* if this is the right child and its key is smaller
  1169. * than the search key then this is the one we want.
  1170. */
  1171. return node->data;
  1172. } else {
  1173. /* if this is the right child and its key is larger
  1174. * than the search key then our parent is the one we
  1175. * want.
  1176. */
  1177. return node->parent->data;
  1178. }
  1179. }
  1180. }
  1181. static inline emem_tree_node_t *
  1182. emem_tree_parent(emem_tree_node_t *node)
  1183. {
  1184. return node->parent;
  1185. }
  1186. static inline emem_tree_node_t *
  1187. emem_tree_grandparent(emem_tree_node_t *node)
  1188. {
  1189. emem_tree_node_t *parent;
  1190. parent=emem_tree_parent(node);
  1191. if(parent){
  1192. return parent->parent;
  1193. }
  1194. return NULL;
  1195. }
  1196. static inline emem_tree_node_t *
  1197. emem_tree_uncle(emem_tree_node_t *node)
  1198. {
  1199. emem_tree_node_t *parent, *grandparent;
  1200. parent=emem_tree_parent(node);
  1201. if(!parent){
  1202. return NULL;
  1203. }
  1204. grandparent=emem_tree_parent(parent);
  1205. if(!grandparent){
  1206. return NULL;
  1207. }
  1208. if(parent==grandparent->left){
  1209. return grandparent->right;
  1210. }
  1211. return grandparent->left;
  1212. }
  1213. static inline void rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node);
  1214. static inline void rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node);
  1215. static inline void
  1216. rotate_left(emem_tree_t *se_tree, emem_tree_node_t *node)
  1217. {
  1218. if(node->parent){
  1219. if(node->parent->left==node){
  1220. node->parent->left=node->right;
  1221. } else {
  1222. node->parent->right=node->right;
  1223. }
  1224. } else {
  1225. se_tree->tree=node->right;
  1226. }
  1227. node->right->parent=node->parent;
  1228. node->parent=node->right;
  1229. node->right=node->right->left;
  1230. if(node->right){
  1231. node->right->parent=node;
  1232. }
  1233. node->parent->left=node;
  1234. }
  1235. static inline void
  1236. rotate_right(emem_tree_t *se_tree, emem_tree_node_t *node)
  1237. {
  1238. if(node->parent){
  1239. if(node->parent->left==node){
  1240. node->parent->left=node->left;
  1241. } else {
  1242. node->parent->right=node->left;
  1243. }
  1244. } else {
  1245. se_tree->tree=node->left;
  1246. }
  1247. node->left->parent=node->parent;
  1248. node->parent=node->left;
  1249. node->left=node->left->right;
  1250. if(node->left){
  1251. node->left->parent=node;
  1252. }
  1253. node->parent->right=node;
  1254. }
  1255. static inline void
  1256. rb_insert_case5(emem_tree_t *se_tree, emem_tree_node_t *node)
  1257. {
  1258. emem_tree_node_t *grandparent;
  1259. emem_tree_node_t *parent;
  1260. parent=emem_tree_parent(node);
  1261. grandparent=emem_tree_parent(parent);
  1262. parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
  1263. grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
  1264. if( (node==parent->left) && (parent==grandparent->left) ){
  1265. rotate_right(se_tree, grandparent);
  1266. } else {
  1267. rotate_left(se_tree, grandparent);
  1268. }
  1269. }
  1270. static inline void
  1271. rb_insert_case4(emem_tree_t *se_tree, emem_tree_node_t *node)
  1272. {
  1273. emem_tree_node_t *grandparent;
  1274. emem_tree_node_t *parent;
  1275. parent=emem_tree_parent(node);
  1276. grandparent=emem_tree_parent(parent);
  1277. if(!grandparent){
  1278. return;
  1279. }
  1280. if( (node==parent->right) && (parent==grandparent->left) ){
  1281. rotate_left(se_tree, parent);
  1282. node=node->left;
  1283. } else if( (node==parent->left) && (parent==grandparent->right) ){
  1284. rotate_right(se_tree, parent);
  1285. node=node->right;
  1286. }
  1287. rb_insert_case5(se_tree, node);
  1288. }
  1289. static inline void
  1290. rb_insert_case3(emem_tree_t *se_tree, emem_tree_node_t *node)
  1291. {
  1292. emem_tree_node_t *grandparent;
  1293. emem_tree_node_t *parent;
  1294. emem_tree_node_t *uncle;
  1295. uncle=emem_tree_uncle(node);
  1296. if(uncle && (uncle->u.rb_color==EMEM_TREE_RB_COLOR_RED)){
  1297. parent=emem_tree_parent(node);
  1298. parent->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
  1299. uncle->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
  1300. grandparent=emem_tree_grandparent(node);
  1301. grandparent->u.rb_color=EMEM_TREE_RB_COLOR_RED;
  1302. rb_insert_case1(se_tree, grandparent);
  1303. } else {
  1304. rb_insert_case4(se_tree, node);
  1305. }
  1306. }
  1307. static inline void
  1308. rb_insert_case2(emem_tree_t *se_tree, emem_tree_node_t *node)
  1309. {
  1310. emem_tree_node_t *parent;
  1311. parent=emem_tree_parent(node);
  1312. /* parent is always non-NULL here */
  1313. if(parent->u.rb_color==EMEM_TREE_RB_COLOR_BLACK){
  1314. return;
  1315. }
  1316. rb_insert_case3(se_tree, node);
  1317. }
  1318. static inline void
  1319. rb_insert_case1(emem_tree_t *se_tree, emem_tree_node_t *node)
  1320. {
  1321. emem_tree_node_t *parent;
  1322. parent=emem_tree_parent(node);
  1323. if(!parent){
  1324. node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
  1325. return;
  1326. }
  1327. rb_insert_case2(se_tree, node);
  1328. }
  1329. /* insert a new node in the tree. if this node matches an already existing node
  1330. * then just replace the data for that node */
  1331. void
  1332. emem_tree_insert32(emem_tree_t *se_tree, guint32 key, void *data)
  1333. {
  1334. emem_tree_node_t *node;
  1335. node=se_tree->tree;
  1336. /* is this the first node ?*/
  1337. if(!node){
  1338. node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
  1339. switch(se_tree->type){
  1340. case EMEM_TREE_TYPE_RED_BLACK:
  1341. node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
  1342. break;
  1343. }
  1344. node->parent=NULL;
  1345. node->left=NULL;
  1346. node->right=NULL;
  1347. node->key32=key;
  1348. node->data=data;
  1349. node->u.is_subtree = EMEM_TREE_NODE_IS_DATA;
  1350. se_tree->tree=node;
  1351. return;
  1352. }
  1353. /* it was not the new root so walk the tree until we find where to
  1354. * insert this new leaf.
  1355. */
  1356. while(1){
  1357. /* this node already exists, so just replace the data pointer*/
  1358. if(key==node->key32){
  1359. node->data=data;
  1360. return;
  1361. }
  1362. if(key<node->key32) {
  1363. if(!node->left){
  1364. /* new node to the left */
  1365. emem_tree_node_t *new_node;
  1366. new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
  1367. node->left=new_node;
  1368. new_node->parent=node;
  1369. new_node->left=NULL;
  1370. new_node->right=NULL;
  1371. new_node->key32=key;
  1372. new_node->data=data;
  1373. new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
  1374. node=new_node;
  1375. break;
  1376. }
  1377. node=node->left;
  1378. continue;
  1379. }
  1380. if(key>node->key32) {
  1381. if(!node->right){
  1382. /* new node to the right */
  1383. emem_tree_node_t *new_node;
  1384. new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
  1385. node->right=new_node;
  1386. new_node->parent=node;
  1387. new_node->left=NULL;
  1388. new_node->right=NULL;
  1389. new_node->key32=key;
  1390. new_node->data=data;
  1391. new_node->u.is_subtree=EMEM_TREE_NODE_IS_DATA;
  1392. node=new_node;
  1393. break;
  1394. }
  1395. node=node->right;
  1396. continue;
  1397. }
  1398. }
  1399. /* node will now point to the newly created node */
  1400. switch(se_tree->type){
  1401. case EMEM_TREE_TYPE_RED_BLACK:
  1402. node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
  1403. rb_insert_case1(se_tree, node);
  1404. break;
  1405. }
  1406. }
  1407. static void *
  1408. lookup_or_insert32(emem_tree_t *se_tree, guint32 key, void*(*func)(void*),void* ud, int is_subtree)
  1409. {
  1410. emem_tree_node_t *node;
  1411. node=se_tree->tree;
  1412. /* is this the first node ?*/
  1413. if(!node){
  1414. node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
  1415. switch(se_tree->type){
  1416. case EMEM_TREE_TYPE_RED_BLACK:
  1417. node->u.rb_color=EMEM_TREE_RB_COLOR_BLACK;
  1418. break;
  1419. }
  1420. node->parent=NULL;
  1421. node->left=NULL;
  1422. node->right=NULL;
  1423. node->key32=key;
  1424. node->data= func(ud);
  1425. node->u.is_subtree = is_subtree;
  1426. se_tree->tree=node;
  1427. return node->data;
  1428. }
  1429. /* it was not the new root so walk the tree until we find where to
  1430. * insert this new leaf.
  1431. */
  1432. while(1){
  1433. /* this node already exists, so just return the data pointer*/
  1434. if(key==node->key32){
  1435. return node->data;
  1436. }
  1437. if(key<node->key32) {
  1438. if(!node->left){
  1439. /* new node to the left */
  1440. emem_tree_node_t *new_node;
  1441. new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
  1442. node->left=new_node;
  1443. new_node->parent=node;
  1444. new_node->left=NULL;
  1445. new_node->right=NULL;
  1446. new_node->key32=key;
  1447. new_node->data= func(ud);
  1448. new_node->u.is_subtree = is_subtree;
  1449. node=new_node;
  1450. break;
  1451. }
  1452. node=node->left;
  1453. continue;
  1454. }
  1455. if(key>node->key32) {
  1456. if(!node->right){
  1457. /* new node to the right */
  1458. emem_tree_node_t *new_node;
  1459. new_node=(emem_tree_node_t *)se_tree->malloc(sizeof(emem_tree_node_t));
  1460. node->right=new_node;
  1461. new_node->parent=node;
  1462. new_node->left=NULL;
  1463. new_node->right=NULL;
  1464. new_node->key32=key;
  1465. new_node->data= func(ud);
  1466. new_node->u.is_subtree = is_subtree;
  1467. node=new_node;
  1468. break;
  1469. }
  1470. node=node->right;
  1471. continue;
  1472. }
  1473. }
  1474. /* node will now point to the newly created node */
  1475. switch(se_tree->type){
  1476. case EMEM_TREE_TYPE_RED_BLACK:
  1477. node->u.rb_color=EMEM_TREE_RB_COLOR_RED;
  1478. rb_insert_case1(se_tree, node);
  1479. break;
  1480. }
  1481. return node->data;
  1482. }
  1483. /* When the se data is released, this entire tree will dissapear as if it
  1484. * never existed including all metadata associated with the tree.
  1485. */
  1486. emem_tree_t *
  1487. se_tree_create_non_persistent(int type, const char *name)
  1488. {
  1489. emem_tree_t *tree_list;
  1490. tree_list=(emem_tree_t *)se_alloc(sizeof(emem_tree_t));
  1491. tree_list->next=NULL;
  1492. tree_list->type=type;
  1493. tree_list->tree=NULL;
  1494. tree_list->name=name;
  1495. tree_list->malloc=se_alloc;
  1496. return tree_list;
  1497. }
  1498. /* This tree is PErmanent and will never be released
  1499. */
  1500. emem_tree_t *
  1501. pe_tree_create(int type, const char *name)
  1502. {
  1503. emem_tree_t *tree_list;
  1504. tree_list=g_new(emem_tree_t, 1);
  1505. tree_list->next=NULL;
  1506. tree_list->type=type;
  1507. tree_list->tree=NULL;
  1508. tree_list->name=name;
  1509. tree_list->malloc=(void *(*)(size_t)) g_malloc;
  1510. return tree_list;
  1511. }
  1512. /* create another (sub)tree using the same memory allocation scope
  1513. * as the parent tree.
  1514. */
  1515. static emem_tree_t *
  1516. emem_tree_create_subtree(emem_tree_t *parent_tree, const char *name)
  1517. {
  1518. emem_tree_t *tree_list;
  1519. tree_list=(emem_tree_t *)parent_tree->malloc(sizeof(emem_tree_t));
  1520. tree_list->next=NULL;
  1521. tree_list->type=parent_tree->type;
  1522. tree_list->tree=NULL;
  1523. tree_list->name=name;
  1524. tree_list->malloc=parent_tree->malloc;
  1525. return tree_list;
  1526. }
  1527. static void *
  1528. create_sub_tree(void* d)
  1529. {
  1530. emem_tree_t *se_tree = (emem_tree_t *)d;
  1531. return emem_tree_create_subtree(se_tree, "subtree");
  1532. }
  1533. /* insert a new node in the tree. if this node matches an already existing node
  1534. * then just replace the data for that node */
  1535. void
  1536. emem_tree_insert32_array(emem_tree_t *se_tree, emem_tree_key_t *key, void *data)
  1537. {
  1538. emem_tree_t *insert_tree = NULL;
  1539. emem_tree_key_t *cur_key;
  1540. guint32 i, insert_key32 = 0;
  1541. if(!se_tree || !key) return;
  1542. for (cur_key = key; cur_key->length > 0; cur_key++) {
  1543. if(cur_key->length > 100) {
  1544. DISSECTOR_ASSERT_NOT_REACHED();
  1545. }
  1546. for (i = 0; i < cur_key->length; i++) {
  1547. /* Insert using the previous key32 */
  1548. if (!insert_tree) {
  1549. insert_tree = se_tree;
  1550. } else {
  1551. insert_tree = (emem_tree_t *)lookup_or_insert32(insert_tree, insert_key32, create_sub_tree, se_tree, EMEM_TREE_NODE_IS_SUBTREE);
  1552. }
  1553. insert_key32 = cur_key->key[i];
  1554. }
  1555. }
  1556. if(!insert_tree) {
  1557. /* We didn't get a valid key. Should we return NULL instead? */
  1558. DISSECTOR_ASSERT_NOT_REACHED();
  1559. }
  1560. emem_tree_insert32(insert_tree, insert_key32, data);
  1561. }
  1562. void *
  1563. emem_tree_lookup32_array(emem_tree_t *se_tree, emem_tree_key_t *key)
  1564. {
  1565. emem_tree_t *lookup_tree = NULL;
  1566. emem_tree_key_t *cur_key;
  1567. guint32 i, lookup_key32 = 0;
  1568. if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
  1569. for (cur_key = key; cur_key->length > 0; cur_key++) {
  1570. if(cur_key->length > 100) {
  1571. DISSECTOR_ASSERT_NOT_REACHED();
  1572. }
  1573. for (i = 0; i < cur_key->length; i++) {
  1574. /* Lookup using the previous key32 */
  1575. if (!lookup_tree) {
  1576. lookup_tree = se_tree;
  1577. } else {
  1578. lookup_tree = (emem_tree_t *)emem_tree_lookup32(lookup_tree, lookup_key32);
  1579. if (!lookup_tree) {
  1580. return NULL;
  1581. }
  1582. }
  1583. lookup_key32 = cur_key->key[i];
  1584. }
  1585. }
  1586. if(!lookup_tree) {
  1587. /* We didn't get a valid key. Should we return NULL instead? */
  1588. DISSECTOR_ASSERT_NOT_REACHED();
  1589. }
  1590. return emem_tree_lookup32(lookup_tree, lookup_key32);
  1591. }
  1592. void *
  1593. emem_tree_lookup32_array_le(emem_tree_t *se_tree, emem_tree_key_t *key)
  1594. {
  1595. emem_tree_t *lookup_tree = NULL;
  1596. emem_tree_key_t *cur_key;
  1597. guint32 i, lookup_key32 = 0;
  1598. if(!se_tree || !key) return NULL; /* prevent searching on NULL pointer */
  1599. for (cur_key = key; cur_key->length > 0; cur_key++) {
  1600. if(cur_key->length > 100) {
  1601. DISSECTOR_ASSERT_NOT_REACHED();
  1602. }
  1603. for (i = 0; i < cur_key->length; i++) {
  1604. /* Lookup using the previous key32 */
  1605. if (!lookup_tree) {
  1606. lookup_tree = se_tree;
  1607. } else {
  1608. lookup_tree = (emem_tree_t *)emem_tree_lookup32_le(lookup_tree, lookup_key32);
  1609. if (!lookup_tree) {
  1610. return NULL;
  1611. }
  1612. }
  1613. lookup_key32 = cur_key->key[i];
  1614. }
  1615. }
  1616. if(!lookup_tree) {
  1617. /* We didn't get a valid key. Should we return NULL instead? */
  1618. DISSECTOR_ASSERT_NOT_REACHED();
  1619. }
  1620. return emem_tree_lookup32_le(lookup_tree, lookup_key32);
  1621. }
  1622. /* Strings are stored as an array of uint32 containing the string characters
  1623. with 4 characters in each uint32.
  1624. The first byte of the string is stored as the most significant byte.
  1625. If the string is not a multiple of 4 characters in length the last
  1626. uint32 containing the string bytes are padded with 0 bytes.
  1627. After the uint32's containing the string, there is one final terminator
  1628. uint32 with the value 0x00000001
  1629. */
  1630. void
  1631. emem_tree_insert_string(emem_tree_t* se_tree, const gchar* k, void* v, guint32 flags)
  1632. {
  1633. emem_tree_key_t key[2];
  1634. guint32 *aligned=NULL;
  1635. guint32 len = (guint32) strlen(k);
  1636. guint32 divx = (len+3)/4+1;
  1637. guint32 i;
  1638. guint32 tmp;
  1639. aligned = (guint32 *)g_malloc(divx * sizeof (guint32));
  1640. /* pack the bytes one one by one into guint32s */
  1641. tmp = 0;
  1642. for (i = 0;i < len;i++) {
  1643. unsigned char ch;
  1644. ch = (unsigned char)k[i];
  1645. if (flags & EMEM_TREE_STRING_NOCASE) {
  1646. if(isupper(ch)) {
  1647. ch = tolower(ch);
  1648. }
  1649. }
  1650. tmp <<= 8;
  1651. tmp |= ch;
  1652. if (i%4 == 3) {
  1653. aligned[i/4] = tmp;
  1654. tmp = 0;
  1655. }
  1656. }
  1657. /* add required padding to the last uint32 */
  1658. if (i%4 != 0) {
  1659. while (i%4 != 0) {
  1660. i++;
  1661. tmp <<= 8;
  1662. }
  1663. aligned[i/4-1] = tmp;
  1664. }
  1665. /* add the terminator */
  1666. aligned[divx-1] = 0x00000001;
  1667. key[0].length = divx;
  1668. key[0].key = aligned;
  1669. key[1].length = 0;
  1670. key[1].key = NULL;
  1671. emem_tree_insert32_array(se_tree, key, v);
  1672. g_free(aligned);
  1673. }
  1674. void *
  1675. emem_tree_lookup_string(emem_tree_t* se_tree, const gchar* k, guint32 flags)
  1676. {
  1677. emem_tree_key_t key[2];
  1678. guint32 *aligned=NULL;
  1679. guint32 len = (guint) strlen(k);
  1680. guint32 divx = (len+3)/4+1;
  1681. guint32 i;
  1682. guint32 tmp;
  1683. void *ret;
  1684. aligned = (guint32 *)g_malloc(divx * sizeof (guint32));
  1685. /* pack the bytes one one by one into guint32s */
  1686. tmp = 0;
  1687. for (i = 0;i < len;i++) {
  1688. unsigned char ch;
  1689. ch = (unsigned char)k[i];
  1690. if (flags & EMEM_TREE_STRING_NOCASE) {
  1691. if(isupper(ch)) {
  1692. ch = tolower(ch);
  1693. }
  1694. }
  1695. tmp <<= 8;
  1696. tmp |= ch;
  1697. if (i%4 == 3) {
  1698. aligned[i/4] = tmp;
  1699. tmp = 0;
  1700. }
  1701. }
  1702. /* add required padding to the last uint32 */
  1703. if (i%4 != 0) {
  1704. while (i%4 != 0) {
  1705. i++;
  1706. tmp <<= 8;
  1707. }
  1708. aligned[i/4-1] = tmp;
  1709. }
  1710. /* add the terminator */
  1711. aligned[divx-1] = 0x00000001;
  1712. key[0].length = divx;
  1713. key[0].key = aligned;
  1714. key[1].length = 0;
  1715. key[1].key = NULL;
  1716. ret = emem_tree_lookup32_array(se_tree, key);
  1717. g_free(aligned);
  1718. return ret;
  1719. }
  1720. static gboolean
  1721. emem_tree_foreach_nodes(emem_tree_node_t* node, tree_foreach_func callback, void *user_data)
  1722. {
  1723. gboolean stop_traverse = FALSE;
  1724. if (!node)
  1725. return FALSE;
  1726. if(node->left) {
  1727. stop_traverse = emem_tree_foreach_nodes(node->left, callback, user_data);
  1728. if (stop_traverse) {
  1729. return TRUE;
  1730. }
  1731. }
  1732. if (node->u.is_subtree == EMEM_TREE_NODE_IS_SUBTREE) {
  1733. stop_traverse = emem_tree_foreach((emem_tree_t *)node->data, callback, user_data);
  1734. } else {
  1735. stop_traverse = callback(node->data, user_data);
  1736. }
  1737. if (stop_traverse) {
  1738. return TRUE;
  1739. }
  1740. if(node->right) {
  1741. stop_traverse = emem_tree_foreach_nodes(node->right, callback, user_data);
  1742. if (stop_traverse)

Large files files are truncated, but you can click here to view the full file