PageRenderTime 26ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/slabs.c

https://bitbucket.org/luobailiang/memcached
C | 455 lines | 331 code | 71 blank | 53 comment | 91 complexity | 54b380ab51746897135fbbf0931bd6c5 MD5 | raw file
  1. /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
  2. /*
  3. * Slabs memory allocation, based on powers-of-N. Slabs are up to 1MB in size
  4. * and are divided into chunks. The chunk sizes start off at the size of the
  5. * "item" structure plus space for a small key and value. They increase by
  6. * a multiplier factor from there, up to half the maximum slab size. The last
  7. * slab size is always 1MB, since that's the maximum item size allowed by the
  8. * memcached protocol.
  9. */
  10. #include "memcached.h"
  11. #include <sys/stat.h>
  12. #include <sys/socket.h>
  13. #include <sys/signal.h>
  14. #include <sys/resource.h>
  15. #include <fcntl.h>
  16. #include <netinet/in.h>
  17. #include <errno.h>
  18. #include <stdlib.h>
  19. #include <stdio.h>
  20. #include <string.h>
  21. #include <assert.h>
  22. #include <pthread.h>
  23. /* powers-of-N allocation structures */
  24. typedef struct {
  25. unsigned int size; /* sizes of items */
  26. unsigned int perslab; /* how many items per slab */
  27. void **slots; /* list of item ptrs */
  28. unsigned int sl_total; /* size of previous array */
  29. unsigned int sl_curr; /* first free slot */
  30. void *end_page_ptr; /* pointer to next free item at end of page, or 0 */
  31. unsigned int end_page_free; /* number of items remaining at end of last alloced page */
  32. unsigned int slabs; /* how many slabs were allocated for this class */
  33. void **slab_list; /* array of slab pointers */
  34. unsigned int list_size; /* size of prev array */
  35. unsigned int killing; /* index+1 of dying slab, or zero if none */
  36. size_t requested; /* The number of requested bytes */
  37. } slabclass_t;
  38. static slabclass_t slabclass[MAX_NUMBER_OF_SLAB_CLASSES];
  39. static size_t mem_limit = 0;
  40. static size_t mem_malloced = 0;
  41. static int power_largest;
  42. static void *mem_base = NULL;
  43. static void *mem_current = NULL;
  44. static size_t mem_avail = 0;
  45. /**
  46. * Access to the slab allocator is protected by this lock
  47. */
  48. static pthread_mutex_t slabs_lock = PTHREAD_MUTEX_INITIALIZER;
  49. /*
  50. * Forward Declarations
  51. */
  52. static int do_slabs_newslab(const unsigned int id);
  53. static void *memory_allocate(size_t size);
  54. #ifndef DONT_PREALLOC_SLABS
  55. /* Preallocate as many slab pages as possible (called from slabs_init)
  56. on start-up, so users don't get confused out-of-memory errors when
  57. they do have free (in-slab) space, but no space to make new slabs.
  58. if maxslabs is 18 (POWER_LARGEST - POWER_SMALLEST + 1), then all
  59. slab types can be made. if max memory is less than 18 MB, only the
  60. smaller ones will be made. */
  61. static void slabs_preallocate (const unsigned int maxslabs);
  62. #endif
  63. /*
  64. * Figures out which slab class (chunk size) is required to store an item of
  65. * a given size.
  66. *
  67. * Given object size, return id to use when allocating/freeing memory for object
  68. * 0 means error: can't store such a large object
  69. */
  70. unsigned int slabs_clsid(const size_t size) {
  71. int res = POWER_SMALLEST;
  72. if (size == 0)
  73. return 0;
  74. while (size > slabclass[res].size)
  75. if (res++ == power_largest) /* won't fit in the biggest slab */
  76. return 0;
  77. return res;
  78. }
  79. /**
  80. * Determines the chunk sizes and initializes the slab class descriptors
  81. * accordingly.
  82. */
  83. void slabs_init(const size_t limit, const double factor, const bool prealloc) {
  84. int i = POWER_SMALLEST - 1;
  85. unsigned int size = sizeof(item) + settings.chunk_size;
  86. mem_limit = limit;
  87. if (prealloc) {
  88. /* Allocate everything in a big chunk with malloc */
  89. mem_base = malloc(mem_limit);
  90. if (mem_base != NULL) {
  91. mem_current = mem_base;
  92. mem_avail = mem_limit;
  93. } else {
  94. fprintf(stderr, "Warning: Failed to allocate requested memory in"
  95. " one large chunk.\nWill allocate in smaller chunks\n");
  96. }
  97. }
  98. memset(slabclass, 0, sizeof(slabclass));
  99. while (++i < POWER_LARGEST && size <= settings.item_size_max / factor) {
  100. /* Make sure items are always n-byte aligned */
  101. if (size % CHUNK_ALIGN_BYTES)
  102. size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
  103. slabclass[i].size = size;
  104. slabclass[i].perslab = settings.item_size_max / slabclass[i].size;
  105. size *= factor;
  106. if (settings.verbose > 1) {
  107. fprintf(stderr, "slab class %3d: chunk size %9u perslab %7u\n",
  108. i, slabclass[i].size, slabclass[i].perslab);
  109. }
  110. }
  111. power_largest = i;
  112. slabclass[power_largest].size = settings.item_size_max;
  113. slabclass[power_largest].perslab = 1;
  114. if (settings.verbose > 1) {
  115. fprintf(stderr, "slab class %3d: chunk size %9u perslab %7u\n",
  116. i, slabclass[i].size, slabclass[i].perslab);
  117. }
  118. /* for the test suite: faking of how much we've already malloc'd */
  119. {
  120. char *t_initial_malloc = getenv("T_MEMD_INITIAL_MALLOC");
  121. if (t_initial_malloc) {
  122. mem_malloced = (size_t)atol(t_initial_malloc);
  123. }
  124. }
  125. #ifndef DONT_PREALLOC_SLABS
  126. {
  127. char *pre_alloc = getenv("T_MEMD_SLABS_ALLOC");
  128. if (pre_alloc == NULL || atoi(pre_alloc) != 0) {
  129. slabs_preallocate(power_largest);
  130. }
  131. }
  132. #endif
  133. }
  134. #ifndef DONT_PREALLOC_SLABS
  135. static void slabs_preallocate (const unsigned int maxslabs) {
  136. int i;
  137. unsigned int prealloc = 0;
  138. /* pre-allocate a 1MB slab in every size class so people don't get
  139. confused by non-intuitive "SERVER_ERROR out of memory"
  140. messages. this is the most common question on the mailing
  141. list. if you really don't want this, you can rebuild without
  142. these three lines. */
  143. for (i = POWER_SMALLEST; i <= POWER_LARGEST; i++) {
  144. if (++prealloc > maxslabs)
  145. return;
  146. do_slabs_newslab(i);
  147. }
  148. }
  149. #endif
  150. static int grow_slab_list (const unsigned int id) {
  151. slabclass_t *p = &slabclass[id];
  152. if (p->slabs == p->list_size) {
  153. size_t new_size = (p->list_size != 0) ? p->list_size * 2 : 16;
  154. void *new_list = realloc(p->slab_list, new_size * sizeof(void *));
  155. if (new_list == 0) return 0;
  156. p->list_size = new_size;
  157. p->slab_list = new_list;
  158. }
  159. return 1;
  160. }
  161. static int do_slabs_newslab(const unsigned int id) {
  162. slabclass_t *p = &slabclass[id];
  163. int len = p->size * p->perslab;
  164. char *ptr;
  165. if ((mem_limit && mem_malloced + len > mem_limit && p->slabs > 0) ||
  166. (grow_slab_list(id) == 0) ||
  167. ((ptr = memory_allocate((size_t)len)) == 0)) {
  168. MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(id);
  169. return 0;
  170. }
  171. memset(ptr, 0, (size_t)len);
  172. p->end_page_ptr = ptr;
  173. p->end_page_free = p->perslab;
  174. p->slab_list[p->slabs++] = ptr;
  175. mem_malloced += len;
  176. MEMCACHED_SLABS_SLABCLASS_ALLOCATE(id);
  177. return 1;
  178. }
  179. /*@null@*/
  180. static void *do_slabs_alloc(const size_t size, unsigned int id) {
  181. slabclass_t *p;
  182. void *ret = NULL;
  183. if (id < POWER_SMALLEST || id > power_largest) {
  184. MEMCACHED_SLABS_ALLOCATE_FAILED(size, 0);
  185. return NULL;
  186. }
  187. p = &slabclass[id];
  188. assert(p->sl_curr == 0 || ((item *)p->slots[p->sl_curr - 1])->slabs_clsid == 0);
  189. #ifdef USE_SYSTEM_MALLOC
  190. if (mem_limit && mem_malloced + size > mem_limit) {
  191. MEMCACHED_SLABS_ALLOCATE_FAILED(size, id);
  192. return 0;
  193. }
  194. mem_malloced += size;
  195. ret = malloc(size);
  196. MEMCACHED_SLABS_ALLOCATE(size, id, 0, ret);
  197. return ret;
  198. #endif
  199. /* fail unless we have space at the end of a recently allocated page,
  200. we have something on our freelist, or we could allocate a new page */
  201. if (! (p->end_page_ptr != 0 || p->sl_curr != 0 ||
  202. do_slabs_newslab(id) != 0)) {
  203. /* We don't have more memory available */
  204. ret = NULL;
  205. } else if (p->sl_curr != 0) {
  206. /* return off our freelist */
  207. ret = p->slots[--p->sl_curr];
  208. } else {
  209. /* if we recently allocated a whole page, return from that */
  210. assert(p->end_page_ptr != NULL);
  211. ret = p->end_page_ptr;
  212. if (--p->end_page_free != 0) {
  213. p->end_page_ptr = ((caddr_t)p->end_page_ptr) + p->size;
  214. } else {
  215. p->end_page_ptr = 0;
  216. }
  217. }
  218. if (ret) {
  219. p->requested += size;
  220. MEMCACHED_SLABS_ALLOCATE(size, id, p->size, ret);
  221. } else {
  222. MEMCACHED_SLABS_ALLOCATE_FAILED(size, id);
  223. }
  224. return ret;
  225. }
  226. static void do_slabs_free(void *ptr, const size_t size, unsigned int id) {
  227. slabclass_t *p;
  228. assert(((item *)ptr)->slabs_clsid == 0);
  229. assert(id >= POWER_SMALLEST && id <= power_largest);
  230. if (id < POWER_SMALLEST || id > power_largest)
  231. return;
  232. MEMCACHED_SLABS_FREE(size, id, ptr);
  233. p = &slabclass[id];
  234. #ifdef USE_SYSTEM_MALLOC
  235. mem_malloced -= size;
  236. free(ptr);
  237. return;
  238. #endif
  239. if (p->sl_curr == p->sl_total) { /* need more space on the free list */
  240. int new_size = (p->sl_total != 0) ? p->sl_total * 2 : 16; /* 16 is arbitrary */
  241. void **new_slots = realloc(p->slots, new_size * sizeof(void *));
  242. if (new_slots == 0)
  243. return;
  244. p->slots = new_slots;
  245. p->sl_total = new_size;
  246. }
  247. p->slots[p->sl_curr++] = ptr;
  248. p->requested -= size;
  249. return;
  250. }
  251. static int nz_strcmp(int nzlength, const char *nz, const char *z) {
  252. int zlength=strlen(z);
  253. return (zlength == nzlength) && (strncmp(nz, z, zlength) == 0) ? 0 : -1;
  254. }
  255. bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c) {
  256. bool ret = true;
  257. if (add_stats != NULL) {
  258. if (!stat_type) {
  259. /* prepare general statistics for the engine */
  260. STATS_LOCK();
  261. APPEND_STAT("bytes", "%llu", (unsigned long long)stats.curr_bytes);
  262. APPEND_STAT("curr_items", "%u", stats.curr_items);
  263. APPEND_STAT("total_items", "%u", stats.total_items);
  264. APPEND_STAT("evictions", "%llu",
  265. (unsigned long long)stats.evictions);
  266. APPEND_STAT("reclaimed", "%llu",
  267. (unsigned long long)stats.reclaimed);
  268. STATS_UNLOCK();
  269. } else if (nz_strcmp(nkey, stat_type, "items") == 0) {
  270. item_stats(add_stats, c);
  271. } else if (nz_strcmp(nkey, stat_type, "slabs") == 0) {
  272. slabs_stats(add_stats, c);
  273. } else if (nz_strcmp(nkey, stat_type, "sizes") == 0) {
  274. item_stats_sizes(add_stats, c);
  275. } else {
  276. ret = false;
  277. }
  278. } else {
  279. ret = false;
  280. }
  281. return ret;
  282. }
  283. /*@null@*/
  284. static void do_slabs_stats(ADD_STAT add_stats, void *c) {
  285. int i, total;
  286. /* Get the per-thread stats which contain some interesting aggregates */
  287. struct thread_stats thread_stats;
  288. threadlocal_stats_aggregate(&thread_stats);
  289. total = 0;
  290. for(i = POWER_SMALLEST; i <= power_largest; i++) {
  291. slabclass_t *p = &slabclass[i];
  292. if (p->slabs != 0) {
  293. uint32_t perslab, slabs;
  294. slabs = p->slabs;
  295. perslab = p->perslab;
  296. char key_str[STAT_KEY_LEN];
  297. char val_str[STAT_VAL_LEN];
  298. int klen = 0, vlen = 0;
  299. APPEND_NUM_STAT(i, "chunk_size", "%u", p->size);
  300. APPEND_NUM_STAT(i, "chunks_per_page", "%u", perslab);
  301. APPEND_NUM_STAT(i, "total_pages", "%u", slabs);
  302. APPEND_NUM_STAT(i, "total_chunks", "%u", slabs * perslab);
  303. APPEND_NUM_STAT(i, "used_chunks", "%u",
  304. slabs*perslab - p->sl_curr - p->end_page_free);
  305. APPEND_NUM_STAT(i, "free_chunks", "%u", p->sl_curr);
  306. APPEND_NUM_STAT(i, "free_chunks_end", "%u", p->end_page_free);
  307. APPEND_NUM_STAT(i, "mem_requested", "%llu",
  308. (unsigned long long)p->requested);
  309. APPEND_NUM_STAT(i, "get_hits", "%llu",
  310. (unsigned long long)thread_stats.slab_stats[i].get_hits);
  311. APPEND_NUM_STAT(i, "cmd_set", "%llu",
  312. (unsigned long long)thread_stats.slab_stats[i].set_cmds);
  313. APPEND_NUM_STAT(i, "delete_hits", "%llu",
  314. (unsigned long long)thread_stats.slab_stats[i].delete_hits);
  315. APPEND_NUM_STAT(i, "incr_hits", "%llu",
  316. (unsigned long long)thread_stats.slab_stats[i].incr_hits);
  317. APPEND_NUM_STAT(i, "decr_hits", "%llu",
  318. (unsigned long long)thread_stats.slab_stats[i].decr_hits);
  319. APPEND_NUM_STAT(i, "cas_hits", "%llu",
  320. (unsigned long long)thread_stats.slab_stats[i].cas_hits);
  321. APPEND_NUM_STAT(i, "cas_badval", "%llu",
  322. (unsigned long long)thread_stats.slab_stats[i].cas_badval);
  323. APPEND_NUM_STAT(i, "touch_hits", "%llu",
  324. (unsigned long long)thread_stats.slab_stats[i].touch_hits);
  325. total++;
  326. }
  327. }
  328. /* add overall slab stats and append terminator */
  329. APPEND_STAT("active_slabs", "%d", total);
  330. APPEND_STAT("total_malloced", "%llu", (unsigned long long)mem_malloced);
  331. add_stats(NULL, 0, NULL, 0, c);
  332. }
  333. static void *memory_allocate(size_t size) {
  334. void *ret;
  335. if (mem_base == NULL) {
  336. /* We are not using a preallocated large memory chunk */
  337. ret = malloc(size);
  338. } else {
  339. ret = mem_current;
  340. if (size > mem_avail) {
  341. return NULL;
  342. }
  343. /* mem_current pointer _must_ be aligned!!! */
  344. if (size % CHUNK_ALIGN_BYTES) {
  345. size += CHUNK_ALIGN_BYTES - (size % CHUNK_ALIGN_BYTES);
  346. }
  347. mem_current = ((char*)mem_current) + size;
  348. if (size < mem_avail) {
  349. mem_avail -= size;
  350. } else {
  351. mem_avail = 0;
  352. }
  353. }
  354. return ret;
  355. }
  356. void *slabs_alloc(size_t size, unsigned int id) {
  357. void *ret;
  358. pthread_mutex_lock(&slabs_lock);
  359. ret = do_slabs_alloc(size, id);
  360. pthread_mutex_unlock(&slabs_lock);
  361. return ret;
  362. }
  363. void slabs_free(void *ptr, size_t size, unsigned int id) {
  364. pthread_mutex_lock(&slabs_lock);
  365. do_slabs_free(ptr, size, id);
  366. pthread_mutex_unlock(&slabs_lock);
  367. }
  368. void slabs_stats(ADD_STAT add_stats, void *c) {
  369. pthread_mutex_lock(&slabs_lock);
  370. do_slabs_stats(add_stats, c);
  371. pthread_mutex_unlock(&slabs_lock);
  372. }
  373. void slabs_adjust_mem_requested(unsigned int id, size_t old, size_t ntotal)
  374. {
  375. pthread_mutex_lock(&slabs_lock);
  376. slabclass_t *p;
  377. if (id < POWER_SMALLEST || id > power_largest) {
  378. fprintf(stderr, "Internal error! Invalid slab class\n");
  379. abort();
  380. }
  381. p = &slabclass[id];
  382. p->requested = p->requested - old + ntotal;
  383. pthread_mutex_unlock(&slabs_lock);
  384. }