PageRenderTime 58ms CodeModel.GetById 24ms RepoModel.GetById 0ms app.codeStats 0ms

/slab.c

https://github.com/Bludge0n/ajvm
C | 754 lines | 544 code | 134 blank | 76 comment | 71 complexity | c8e16ff2fd24b7ddb903df1c60a46d11 MD5 | raw file
  1. /*
  2. * slab.c - Slab Memory alloctor
  3. *
  4. * Copywrite (c) 2011, 2012, 2013 wzt http://www.cloud-sec.org
  5. *
  6. *
  7. * ------- ------ ------ ------
  8. * |cache|--> |slab| --> |slab| -->|slab|
  9. * ------- ------ ------ ------
  10. * |cache|
  11. * -----
  12. * |cache| ...
  13. * ----- ------ ------ ------
  14. * |cache|--> |slab| --> |slab| -->|slab|
  15. * ----- ------ ----- ------
  16. * |cache| ...
  17. * -------
  18. * |cache|
  19. * -------
  20. * |cache|-->|slab|-->|slab| -->|slab|
  21. * ------- ------ ------ ------
  22. *
  23. *
  24. * current support:
  25. *
  26. * - basic implement for slab alloctor.
  27. * - hardware cache support.
  28. * - slab expand support.
  29. * - genernal slab and slab cache support.
  30. *
  31. * todo:
  32. *
  33. * - slab obj cache support.
  34. *
  35. */
  36. #include <stdio.h>
  37. #include <stdlib.h>
  38. #include <string.h>
  39. #include <assert.h>
  40. #include <sys/types.h>
  41. #include <sys/mman.h>
  42. #include <pthread.h>
  43. #include "list.h"
  44. #include "slab.h"
  45. #include "log.h"
  46. static int slab_size[SLAB_SIZE_NUM] = {8, 16, 32, 64, 128, 256, 512, 1024, 2048};
  47. void show_slab_info(struct slab *slab)
  48. {
  49. debug2("obj num: %d\tfree_num: %d\tfree_idx: %d\tbase: 0x%016x\n",
  50. slab->obj_num, slab->free_num,
  51. slab->free_idx, slab->base);
  52. }
  53. void __show_slab_list(struct list_head *list_head)
  54. {
  55. struct slab *slab;
  56. struct list_head *p;
  57. if (list_empty(list_head))
  58. return ;
  59. list_for_each(p, list_head) {
  60. slab = list_entry(p, struct slab, list);
  61. if (slab)
  62. show_slab_info(slab);
  63. }
  64. }
  65. void show_slab_cache(struct slab_cache *slab_cache)
  66. {
  67. debug2("slab size: %d slab num: %d obj_num: %d "
  68. "free num: %d color num: %d color next: %d\n",
  69. slab_cache->slab_size,
  70. slab_cache->slab_num,
  71. slab_cache->obj_num,
  72. slab_cache->free_num,
  73. slab_cache->color_num,
  74. slab_cache->color_next);
  75. }
  76. void show_slab_list(struct thread_mem *thread_mem)
  77. {
  78. int idx;
  79. for (idx = 0; idx < thread_mem->slab_cache_array_size; idx++) {
  80. debug2("slab size: %d slab num: %d free num: %d color num: %d\n",
  81. thread_mem->slab_cache_array[idx].slab_size,
  82. thread_mem->slab_cache_array[idx].slab_num,
  83. thread_mem->slab_cache_array[idx].free_num,
  84. thread_mem->slab_cache_array[idx].color_num);
  85. __show_slab_list(&(thread_mem->slab_cache_array[idx].list));
  86. }
  87. }
  88. /* bufctl just behind the slab struct. */
  89. unsigned int *slab_bufctl(struct slab *slab)
  90. {
  91. return (unsigned int *)(slab + 1);
  92. }
  93. /* get an obj from a slab. */
  94. void *get_slab_obj(struct slab *slab, struct slab_cache *slab_cache)
  95. {
  96. void *obj;
  97. obj = slab->base + slab_cache->slab_size * slab->free_idx;
  98. slab->free_idx = slab_bufctl(slab)[slab->free_idx];
  99. slab->free_num--;
  100. slab_cache->free_num--;
  101. debug2("slab base: 0x%016x\tfree_idx: %d\t"
  102. "slab_free_num: %d\tslab_cache->free_num: %d\n",
  103. slab->base, slab->free_idx,
  104. slab->free_num, slab_cache->free_num);
  105. debug2("alloc at 0x%016x ok.\n", obj);
  106. return obj;
  107. }
  108. void *get_obj_from_cache(struct slab_obj_cache *obj_cache)
  109. {
  110. --obj_cache->curr_obj;
  111. return (void *)((unsigned int *)(obj_cache->entry) + obj_cache->curr_obj);
  112. }
  113. void set_slab_obj_cache(struct slab *slab, struct slab_cache *slab_cache)
  114. {
  115. int idx;
  116. slab_cache->obj_cache->entry =
  117. (void *)malloc(sizeof(int) * slab_cache->slab_num);
  118. if (!slab_cache->obj_cache->entry) {
  119. error("malloc failed.\n");
  120. exit(-1);
  121. }
  122. /* allocte obj from end to head. */
  123. slab_cache->obj_cache->curr_obj = slab->obj_num;
  124. slab_cache->obj_cache->limit = slab->obj_num;
  125. for (idx = 0; idx < slab->obj_num - 1; idx++) {
  126. *(((unsigned int *)slab_cache->obj_cache->entry + idx)) =
  127. get_slab_obj(slab, slab_cache);
  128. }
  129. slab_cache->obj_cache->curr_obj = 0;
  130. }
  131. int check_slab_size(int size)
  132. {
  133. int i;
  134. for (i = 0; i < SLAB_SIZE_NUM; i++) {
  135. if (size <= slab_size[i])
  136. return i;
  137. }
  138. return -1;
  139. }
  140. /*
  141. * expand a new slab with PAGE_SIZE.
  142. */
  143. void *expand_slab(struct slab_cache *slab_cache)
  144. {
  145. void *new_slab = NULL;
  146. //new_slab = get_new_page(SLAB_MAX_ORDER, MEM_ALLOC_GLIBC);
  147. new_slab = get_new_page(SLAB_MAX_ORDER, MEM_ALLOC_MMAP);
  148. if (!new_slab) {
  149. error("alloc_page failed.\n");
  150. return NULL;
  151. }
  152. __init_slab(slab_cache, new_slab, slab_cache->slab_size);
  153. slab_cache->slab_num++;
  154. return new_slab;
  155. }
  156. void *slab_alloc(struct thread_mem *thread_mem, int size)
  157. {
  158. struct slab_cache *slab_cache;
  159. struct slab *new_slab = NULL;
  160. struct list_head *p = NULL;
  161. int idx = -1;
  162. if (size < 8 || size > 2048)
  163. return malloc(size);
  164. idx = check_slab_size(size);
  165. if (idx == -1)
  166. return malloc(size);
  167. debug2("idx: %d\tsize: %d\n", idx, slab_size[idx]);
  168. slab_cache = thread_mem->slab_cache_array + idx;
  169. if (slab_cache->obj_cache->curr_obj != 0) {
  170. debug2("get obj from cache.\n");
  171. return get_obj_from_cache(slab_cache->obj_cache);
  172. }
  173. debug2("get obj from slab.\n");
  174. if (!slab_cache->free_num) {
  175. debug2("expand slab obj in size %d.\n", size);
  176. if (!(new_slab = expand_slab(slab_cache))) {
  177. debug2("expand slab failed.\n");
  178. return NULL;
  179. }
  180. debug2("expand slab obj in size %d ok.\n", size);
  181. return get_slab_obj(new_slab, slab_cache);
  182. }
  183. debug2("get obj from slab list.\n");
  184. list_for_each(p, (&(slab_cache->list))) {
  185. new_slab = list_entry(p, struct slab, list);
  186. if (new_slab && new_slab->free_num) {
  187. show_slab_cache(slab_cache);
  188. return get_slab_obj(new_slab, slab_cache);
  189. }
  190. }
  191. return NULL;
  192. }
  193. /*
  194. * support for slab_free & kmem_cache_free.
  195. */
  196. struct slab *search_slab(void *addr, struct list_head *list_head)
  197. {
  198. struct slab *slab;
  199. struct list_head *p;
  200. assert(list_head != NULL);
  201. list_for_each(p, list_head) {
  202. slab = list_entry(p, struct slab, list);
  203. if (slab) {
  204. if (slab->base <= addr &&
  205. addr <= ((void *)slab + SLAB_MAX_SIZE))
  206. return slab;
  207. }
  208. }
  209. return NULL;
  210. }
  211. void *put_slab_obj(struct slab *slab, void *obj, struct slab_cache *slab_cache)
  212. {
  213. int obj_idx;
  214. assert(slab != NULL && slab_cache != NULL);
  215. debug2("free obj: 0x%016x, slab->base: 0x%016x slab size: %d\n",
  216. obj, slab->base, slab_cache->slab_size);
  217. obj_idx = (obj - slab->base) / slab_cache->slab_size;
  218. debug2("obj_idx: %d\n", obj_idx);
  219. slab_bufctl(slab)[obj_idx] = slab->free_idx;
  220. slab->free_idx = obj_idx;
  221. slab->free_num++;
  222. slab_cache->free_num++;
  223. debug2("free obj: 0x%016x, slab->base: 0x%016x slab size: %d\n",
  224. obj, slab->base, slab_cache->slab_size);
  225. }
  226. int slab_free(struct thread_mem *thread_mem, void *addr, int size)
  227. {
  228. struct slab *slab;
  229. int cache_idx;
  230. assert(thread_mem != NULL);
  231. if (!addr)
  232. return 0;
  233. cache_idx = check_slab_size(size);
  234. if (cache_idx < 0 || cache_idx >= SLAB_SIZE_NUM) {
  235. error("bad idx: %d\n", cache_idx);
  236. return -1;
  237. }
  238. slab = search_slab(addr, &(thread_mem->slab_cache_array[cache_idx].list));
  239. if (!slab) {
  240. error("search slab failed with addr: 0x%016\n", addr);
  241. return -1;
  242. }
  243. debug2("search slab %d with addr: 0x%016x ok.\n",
  244. slab_size[cache_idx], addr);
  245. put_slab_obj(slab, addr, &(thread_mem->slab_cache_array[cache_idx]));
  246. debug2("free addr 0x%016x ok.\n", addr);
  247. return 0;
  248. }
  249. /*
  250. * compute per slab obj num.
  251. */
  252. int compute_slab_obj_num(int obj_size, int slab_size)
  253. {
  254. return (slab_size - sizeof(struct slab)) / (obj_size + sizeof(int));
  255. }
  256. /*
  257. * compute slab color num for hardware cache.
  258. */
  259. int compute_slab_color_num(int obj_size, int slab_size)
  260. {
  261. return (slab_size - sizeof(struct slab)) % (obj_size + sizeof(int));
  262. }
  263. int get_slab_color(struct slab_cache *slab_cache)
  264. {
  265. if (slab_cache->color_next >= slab_cache->color_num) {
  266. slab_cache->color_next = 0;
  267. return 0;
  268. }
  269. else {
  270. return ++slab_cache->color_next;
  271. }
  272. }
  273. void *set_slab_base_addr(void *addr, struct slab *new_slab)
  274. {
  275. /*
  276. return (void *)(ALIGN((unsigned int)(addr + sizeof(struct slab) +
  277. (new_slab->obj_num * sizeof(int))), DEFAULT_ALIGN));
  278. */
  279. return (void *)(addr + sizeof(struct slab) + new_slab->obj_num * sizeof(int));
  280. }
  281. /*
  282. * support for CPU hardware cache.
  283. */
  284. void *fix_slab_base_addr(void *addr, int color)
  285. {
  286. return (void *)(addr + color);
  287. }
  288. /*
  289. * all the slab managment builtin the front of the slab, next is bufctl
  290. * array which is a sample link list of obj. the end of the slab maybe
  291. * not used, it can be used for slab color for hardware cache.
  292. *
  293. * the slab struct like this:
  294. *
  295. * +-----------------------------------------------+
  296. * | struct slab | bufctl | obj | obj | ...| color |
  297. * +-----------------------------------------------+
  298. *
  299. */
  300. int __init_slab(struct slab_cache *slab_cache, void *addr, int size)
  301. {
  302. struct slab *new_slab = (struct slab *)addr;
  303. int idx;
  304. new_slab->obj_num = compute_slab_obj_num(size, SLAB_MAX_SIZE);
  305. slab_cache->obj_num += new_slab->obj_num;
  306. new_slab->free_num = new_slab->obj_num;
  307. debug2("slab obj_num: %d\n", new_slab->obj_num);
  308. for (idx = 0; idx < new_slab->obj_num - 1; idx++)
  309. slab_bufctl(new_slab)[idx] = idx + 1;
  310. slab_bufctl(new_slab)[idx] = -1;
  311. if (slab_cache->ctor)
  312. slab_cache->ctor();
  313. slab_cache->free_num += new_slab->free_num;
  314. slab_cache->color_next = get_slab_color(slab_cache);
  315. debug2("color num: %d\n", slab_cache->color_num);
  316. debug2("color next: %d\n", slab_cache->color_next);
  317. //set_slab_obj_cache(new_slab, slab_cache);
  318. new_slab->free_idx = 0;
  319. list_add_tail(&(new_slab->list), &(slab_cache->list));
  320. new_slab->base = set_slab_base_addr(addr, new_slab);
  321. debug2("slab base: 0x%016x\n", new_slab->base);
  322. new_slab->base = fix_slab_base_addr(new_slab->base,
  323. slab_cache->color_next);
  324. debug2("new slab base: 0x%016x\n", new_slab->base);
  325. return 0;
  326. }
  327. void *get_new_page(int order, int flag)
  328. {
  329. void *mem = NULL;
  330. switch (flag) {
  331. case MEM_ALLOC_MMAP:
  332. mem = mmap(NULL, SLAB_MAX_SIZE, PROT_READ | PROT_WRITE,
  333. MAP_PRIVATE | MAP_ANONYMOUS, 0, 0);
  334. break;
  335. case MEM_ALLOC_GLIBC:
  336. mem = malloc(PAGE_SIZE * (1 << order));
  337. break;
  338. default:
  339. return NULL;
  340. }
  341. return mem;
  342. }
  343. void *free_page(int flag, void *addr)
  344. {
  345. switch (flag) {
  346. case MEM_ALLOC_MMAP:
  347. munmap(addr, SLAB_MAX_SIZE);
  348. break;
  349. case MEM_ALLOC_GLIBC:
  350. free(addr);
  351. break;
  352. default:
  353. return ;
  354. }
  355. return ;
  356. }
  357. int init_slab(struct slab_cache *slab_cache, int size)
  358. {
  359. void *addr;
  360. //addr = get_new_page(SLAB_MAX_ORDER, MEM_ALLOC_GLIBC);
  361. addr = get_new_page(SLAB_MAX_ORDER, MEM_ALLOC_MMAP);
  362. if (!addr) {
  363. error("alloc page failed.\n");
  364. return -1;
  365. }
  366. if (__init_slab(slab_cache, addr, size) == -1) {
  367. error("init slab failed.\n");
  368. return -1;
  369. }
  370. debug2("init slab ok.\n");
  371. return 0;
  372. }
  373. int init_general_slab_cache(struct thread_mem *thread_mem)
  374. {
  375. int idx;
  376. for (idx = 0; idx < thread_mem->slab_cache_array_size; idx++) {
  377. (thread_mem->slab_cache_array + idx)->obj_cache =
  378. (struct slab_obj_cache *)malloc(sizeof(struct slab_obj_cache));
  379. if (!thread_mem->slab_cache_array[idx].obj_cache) {
  380. error("alloc obj cache failed.\n");
  381. goto out;
  382. }
  383. memset((thread_mem->slab_cache_array + idx)->obj_cache, '\0',
  384. sizeof(struct slab_obj_cache));
  385. thread_mem->slab_cache_array[idx].slab_size = slab_size[idx];
  386. thread_mem->slab_cache_array[idx].slab_num = 0;
  387. thread_mem->slab_cache_array[idx].obj_num = 0;
  388. thread_mem->slab_cache_array[idx].free_num = 0;
  389. thread_mem->slab_cache_array[idx].ctor = NULL;
  390. thread_mem->slab_cache_array[idx].dtor = NULL;
  391. thread_mem->slab_cache_array[idx].color_num =
  392. compute_slab_color_num(slab_size[idx], SLAB_MAX_SIZE);
  393. thread_mem->slab_cache_array[idx].color_next = -1;
  394. thread_mem->slab_cache_array[idx].thread = thread_mem;
  395. INIT_LIST_HEAD(&(thread_mem->slab_cache_array[idx].list));
  396. INIT_LIST_HEAD(&(thread_mem->slab_cache_array[idx].cache_list));
  397. }
  398. debug2("Init genernal slab cache ok.\n");
  399. return 0;
  400. out:
  401. for (; idx > 0; idx--)
  402. free(thread_mem->slab_cache_array[idx].obj_cache);
  403. return -1;
  404. }
  405. void free_slab(struct slab_cache *slab_cache)
  406. {
  407. struct slab *slab;
  408. struct list_head *p, *q;
  409. list_for_each_safe(p, q, (&(slab_cache->list))) {
  410. slab = list_entry(p, struct slab, list);
  411. if (slab) {
  412. list_del(p);
  413. //free_page(MEM_ALLOC_GLIBC, (void *)slab);
  414. free_page(MEM_ALLOC_MMAP, (void *)slab);
  415. }
  416. }
  417. }
  418. void free_slab_cache(struct thread_mem *thread_mem)
  419. {
  420. struct slab_cache *slab_cache;
  421. struct list_head *p, *q;
  422. list_for_each_safe(p, q, (&(thread_mem->kmem_list_head))) {
  423. slab_cache = list_entry(p, struct slab_cache, cache_list);
  424. if (slab_cache) {
  425. list_del(p);
  426. //free_page(MEM_ALLOC_GLIBC, (void *)slab_cache);
  427. free_page(MEM_ALLOC_MMAP, (void *)slab_cache);
  428. }
  429. }
  430. }
  431. void destroy_general_slab_cache(struct thread_mem *thread_mem)
  432. {
  433. int idx;
  434. for (idx = 0; idx < thread_mem->slab_cache_array_size; idx++) {
  435. free((thread_mem->slab_cache_array + idx)->obj_cache);
  436. free_slab(thread_mem->slab_cache_array + idx);
  437. }
  438. }
  439. void *kmem_cache_alloc(struct slab_cache *slab_cache)
  440. {
  441. struct slab *s = NULL;
  442. struct list_head *p = NULL;
  443. void *obj = NULL;
  444. assert(slab_cache != NULL);
  445. if (!slab_cache->free_num) {
  446. if (!(s = expand_slab(slab_cache))) {
  447. error("expand slab failed.\n");
  448. return NULL;
  449. }
  450. debug2("expand slab ok.\n");
  451. obj = get_slab_obj(s, slab_cache);
  452. return obj;
  453. }
  454. if (list_empty(&(slab_cache->list))) {
  455. return NULL;
  456. }
  457. list_for_each(p, (&(slab_cache->list))) {
  458. s = list_entry(p, struct slab, list);
  459. if (s && s->free_num) {
  460. obj = get_slab_obj(s, slab_cache);
  461. return obj;
  462. }
  463. }
  464. return NULL;
  465. }
  466. struct slab_cache *search_slab_cache(struct thread_mem *thread_mem, char *name)
  467. {
  468. struct slab_cache *s = NULL;
  469. struct list_head *p = NULL;
  470. list_for_each(p, (&(thread_mem->kmem_list_head))) {
  471. s = list_entry(p, struct slab_cache, cache_list);
  472. if (s && !strcmp(name, s->name))
  473. return s;
  474. }
  475. return NULL;
  476. }
  477. struct slab_cache *kmem_cache_create(struct thread_mem *thread_mem,
  478. char *name, int size)
  479. {
  480. struct slab_cache *cachep;
  481. assert(thread_mem != NULL);
  482. if (search_slab_cache(thread_mem, name)) {
  483. error("kmem_cache: %s already exist.\n", name);
  484. return NULL;
  485. }
  486. cachep = (struct slab_cache *)kmem_cache_alloc(thread_mem->kmem_cache_st);
  487. if (!cachep) {
  488. error("create kmem cache failed.\n");
  489. return NULL;
  490. }
  491. debug2("kmem cache alloc at 0x%016x\n", cachep);
  492. cachep->slab_size = ALIGN(size, DEFAULT_ALIGN);
  493. cachep->slab_num = SLAB_NUM;
  494. cachep->obj_num = 0;
  495. cachep->free_num = 0;
  496. cachep->ctor = NULL;
  497. cachep->dtor = NULL;
  498. cachep->thread = thread_mem;
  499. strcpy(cachep->name, name);
  500. INIT_LIST_HEAD(&(cachep->list));
  501. init_slab(cachep, cachep->slab_size);
  502. list_add_tail(&(cachep->cache_list), &(thread_mem->kmem_list_head));
  503. return cachep;
  504. }
  505. int kmem_cache_free(struct slab_cache *slab_cache, void *addr)
  506. {
  507. struct slab *slab = NULL;
  508. if (!slab_cache || !addr)
  509. return -1;
  510. slab = search_slab(addr, (&(slab_cache->list)));
  511. if (!slab) {
  512. error("not found slab: %s\n", slab_cache->name);
  513. return -1;
  514. }
  515. debug2("found slab: %s\n", slab_cache->name);
  516. put_slab_obj(slab, addr, slab_cache);
  517. return 0;
  518. }
  519. void kmem_cache_destroy(struct thread_mem *thread_mem, struct slab_cache *slab_cache)
  520. {
  521. free_slab(slab_cache);
  522. //free_page(MEM_ALLOC_GLIBC, (void *)slab_cache->obj_cache);
  523. free_page(MEM_ALLOC_MMAP, (void *)slab_cache->obj_cache);
  524. kmem_cache_free(thread_mem->kmem_cache_st, (void *)slab_cache);
  525. }
  526. void kmem_cache_list_destroy(struct thread_mem *thread_mem)
  527. {
  528. struct slab_cache *slab_cache;
  529. struct list_head *p, *q;
  530. list_for_each_safe(p, q, (&(thread_mem->kmem_list_head))) {
  531. slab_cache = list_entry(p, struct slab_cache, cache_list);
  532. if (slab_cache) {
  533. if (!strcmp(slab_cache->name, "kmem_cache_st"))
  534. continue;
  535. debug2("destroy kmem cache: %s\n", slab_cache->name);
  536. list_del(p);
  537. free_slab(slab_cache);
  538. //free_page(MEM_ALLOC_GLIBC, (void *)slab_cache->obj_cache);
  539. free_page(MEM_ALLOC_MMAP, (void *)slab_cache->obj_cache);
  540. }
  541. }
  542. free_slab(thread_mem->kmem_cache_st);
  543. }
  544. void print_kmem_cache_list(struct thread_mem *thread_mem)
  545. {
  546. struct slab_cache *s = NULL;
  547. struct list_head *p = NULL;
  548. list_for_each(p, (&(thread_mem->kmem_list_head))) {
  549. s = list_entry(p, struct slab_cache, cache_list);
  550. if (s) {
  551. debug2("cache name: %s slab size: %d slab num: %d "
  552. "free num: %d color num: %d\n",
  553. s->name, s->slab_size, s->slab_num,
  554. s->free_num, s->color_num);
  555. __show_slab_list(&(s->list));
  556. }
  557. }
  558. }
  559. int init_kmem_cache(struct thread_mem *thread_mem)
  560. {
  561. thread_mem->kmem_cache_st->slab_size = SLAB_CACHE_SIZE;
  562. thread_mem->kmem_cache_st->slab_num = SLAB_NUM;
  563. thread_mem->kmem_cache_st->free_num = 0;
  564. thread_mem->kmem_cache_st->obj_num = 0;
  565. thread_mem->kmem_cache_st->color_num =
  566. compute_slab_color_num(SLAB_CACHE_SIZE, SLAB_MAX_SIZE);
  567. thread_mem->kmem_cache_st->ctor = NULL;
  568. thread_mem->kmem_cache_st->dtor = NULL;
  569. thread_mem->kmem_cache_st->thread = thread_mem;
  570. strcpy(thread_mem->kmem_cache_st->name, "kmem_cache_st");
  571. INIT_LIST_HEAD(&(thread_mem->kmem_cache_st->list));
  572. list_add_tail(&(thread_mem->kmem_cache_st->cache_list),
  573. &(thread_mem->kmem_list_head));
  574. if (init_slab(thread_mem->kmem_cache_st, SLAB_CACHE_SIZE) == -1) {
  575. error("init slab failed.\n");
  576. return -1;
  577. }
  578. debug2("Init kmem cache ok.\n");
  579. return 0;
  580. }
  581. struct thread_mem *mem_cache_init(int array_size)
  582. {
  583. struct thread_mem *thread_mem = NULL;
  584. thread_mem = (struct thread_mem *)malloc(sizeof(struct thread_mem));
  585. if (!thread_mem) {
  586. error("Malloc failed.\n");
  587. return NULL;
  588. }
  589. thread_mem->slab_cache_array_size = array_size;
  590. thread_mem->slab_cache_array = (struct slab_cache *)
  591. malloc(sizeof(struct slab_cache) * array_size);
  592. if (!thread_mem->slab_cache_array) {
  593. error("Malloc failed.\n");
  594. goto out_thread_mem;
  595. }
  596. thread_mem->kmem_cache_st = (struct slab_cache *)malloc(sizeof(struct slab_cache));
  597. if (!thread_mem->kmem_cache_st) {
  598. error("Malloc failed.\n");
  599. goto out_thread_mem;
  600. }
  601. INIT_LIST_HEAD(&(thread_mem->kmem_list_head));
  602. INIT_LIST_HEAD(&thread_mem_list_head);
  603. pthread_mutex_init(&(thread_mem->slab_lock), NULL);
  604. list_add_tail(&(thread_mem->list), &thread_mem_list_head);
  605. if (init_general_slab_cache(thread_mem) == -1)
  606. goto out_thread_mem;
  607. if (init_kmem_cache(thread_mem) == -1)
  608. goto out_thread_mem;
  609. return thread_mem;
  610. out_thread_mem:
  611. free(thread_mem->kmem_cache_st);
  612. free(thread_mem->slab_cache_array);
  613. free(thread_mem);
  614. return NULL;
  615. }
  616. void mem_cache_destroy(struct thread_mem *thread_mem)
  617. {
  618. destroy_general_slab_cache(thread_mem);
  619. free(thread_mem);
  620. }