/drivers/staging/zram/zram_drv.c

https://bitbucket.org/cyanogenmod/android_kernel_asus_tf300t · C · 855 lines · 630 code · 162 blank · 63 comment · 75 complexity · 4d4bb587ce416dce4f72905964eebfee MD5 · raw file

  1. /*
  2. * Compressed RAM block device
  3. *
  4. * Copyright (C) 2008, 2009, 2010 Nitin Gupta
  5. *
  6. * This code is released using a dual license strategy: BSD/GPL
  7. * You can choose the licence that better fits your requirements.
  8. *
  9. * Released under the terms of 3-clause BSD License
  10. * Released under the terms of GNU General Public License Version 2.0
  11. *
  12. * Project home: http://compcache.googlecode.com
  13. */
  14. #define KMSG_COMPONENT "zram"
  15. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  16. #ifdef CONFIG_ZRAM_DEBUG
  17. #define DEBUG
  18. #endif
  19. #include <linux/module.h>
  20. #include <linux/kernel.h>
  21. #include <linux/bio.h>
  22. #include <linux/bitops.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/buffer_head.h>
  25. #include <linux/device.h>
  26. #include <linux/genhd.h>
  27. #include <linux/highmem.h>
  28. #include <linux/slab.h>
  29. #include <linux/lzo.h>
  30. #include <linux/string.h>
  31. #include <linux/vmalloc.h>
  32. #include "zram_drv.h"
  33. /* Globals */
  34. static int zram_major;
  35. struct zram *devices;
  36. /* Module params (documentation at end) */
  37. unsigned int num_devices;
  38. static void zram_stat_inc(u32 *v)
  39. {
  40. *v = *v + 1;
  41. }
  42. static void zram_stat_dec(u32 *v)
  43. {
  44. *v = *v - 1;
  45. }
  46. static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
  47. {
  48. spin_lock(&zram->stat64_lock);
  49. *v = *v + inc;
  50. spin_unlock(&zram->stat64_lock);
  51. }
  52. static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
  53. {
  54. spin_lock(&zram->stat64_lock);
  55. *v = *v - dec;
  56. spin_unlock(&zram->stat64_lock);
  57. }
  58. static void zram_stat64_inc(struct zram *zram, u64 *v)
  59. {
  60. zram_stat64_add(zram, v, 1);
  61. }
  62. static int zram_test_flag(struct zram *zram, u32 index,
  63. enum zram_pageflags flag)
  64. {
  65. return zram->table[index].flags & BIT(flag);
  66. }
  67. static void zram_set_flag(struct zram *zram, u32 index,
  68. enum zram_pageflags flag)
  69. {
  70. zram->table[index].flags |= BIT(flag);
  71. }
  72. static void zram_clear_flag(struct zram *zram, u32 index,
  73. enum zram_pageflags flag)
  74. {
  75. zram->table[index].flags &= ~BIT(flag);
  76. }
  77. static int page_zero_filled(void *ptr)
  78. {
  79. unsigned int pos;
  80. unsigned long *page;
  81. page = (unsigned long *)ptr;
  82. for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
  83. if (page[pos])
  84. return 0;
  85. }
  86. return 1;
  87. }
  88. static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
  89. {
  90. if (!zram->disksize) {
  91. pr_info(
  92. "disk size not provided. You can use disksize_kb module "
  93. "param to specify size.\nUsing default: (%u%% of RAM).\n",
  94. default_disksize_perc_ram
  95. );
  96. zram->disksize = default_disksize_perc_ram *
  97. (totalram_bytes / 100);
  98. }
  99. if (zram->disksize > 2 * (totalram_bytes)) {
  100. pr_info(
  101. "There is little point creating a zram of greater than "
  102. "twice the size of memory since we expect a 2:1 compression "
  103. "ratio. Note that zram uses about 0.1%% of the size of "
  104. "the disk when not in use so a huge zram is "
  105. "wasteful.\n"
  106. "\tMemory Size: %zu kB\n"
  107. "\tSize you selected: %llu kB\n"
  108. "Continuing anyway ...\n",
  109. totalram_bytes >> 10, zram->disksize
  110. );
  111. }
  112. zram->disksize &= PAGE_MASK;
  113. }
  114. static void zram_free_page(struct zram *zram, size_t index)
  115. {
  116. u32 clen;
  117. void *obj;
  118. struct page *page = zram->table[index].page;
  119. u32 offset = zram->table[index].offset;
  120. if (unlikely(!page)) {
  121. /*
  122. * No memory is allocated for zero filled pages.
  123. * Simply clear zero page flag.
  124. */
  125. if (zram_test_flag(zram, index, ZRAM_ZERO)) {
  126. zram_clear_flag(zram, index, ZRAM_ZERO);
  127. zram_stat_dec(&zram->stats.pages_zero);
  128. }
  129. return;
  130. }
  131. if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
  132. clen = PAGE_SIZE;
  133. __free_page(page);
  134. zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
  135. zram_stat_dec(&zram->stats.pages_expand);
  136. goto out;
  137. }
  138. obj = kmap_atomic(page, KM_USER0) + offset;
  139. clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
  140. kunmap_atomic(obj, KM_USER0);
  141. xv_free(zram->mem_pool, page, offset);
  142. if (clen <= PAGE_SIZE / 2)
  143. zram_stat_dec(&zram->stats.good_compress);
  144. out:
  145. zram_stat64_sub(zram, &zram->stats.compr_size, clen);
  146. zram_stat_dec(&zram->stats.pages_stored);
  147. zram->table[index].page = NULL;
  148. zram->table[index].offset = 0;
  149. }
  150. static void handle_zero_page(struct bio_vec *bvec)
  151. {
  152. struct page *page = bvec->bv_page;
  153. void *user_mem;
  154. user_mem = kmap_atomic(page, KM_USER0);
  155. memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
  156. kunmap_atomic(user_mem, KM_USER0);
  157. flush_dcache_page(page);
  158. }
  159. static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
  160. u32 index, int offset)
  161. {
  162. struct page *page = bvec->bv_page;
  163. unsigned char *user_mem, *cmem;
  164. user_mem = kmap_atomic(page, KM_USER0);
  165. cmem = kmap_atomic(zram->table[index].page, KM_USER1);
  166. memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
  167. kunmap_atomic(cmem, KM_USER1);
  168. kunmap_atomic(user_mem, KM_USER0);
  169. flush_dcache_page(page);
  170. }
  171. static inline int is_partial_io(struct bio_vec *bvec)
  172. {
  173. return bvec->bv_len != PAGE_SIZE;
  174. }
  175. static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
  176. u32 index, int offset, struct bio *bio)
  177. {
  178. int ret;
  179. size_t clen;
  180. struct page *page;
  181. struct zobj_header *zheader;
  182. unsigned char *user_mem, *cmem, *uncmem = NULL;
  183. page = bvec->bv_page;
  184. if (zram_test_flag(zram, index, ZRAM_ZERO)) {
  185. handle_zero_page(bvec);
  186. return 0;
  187. }
  188. /* Requested page is not present in compressed area */
  189. if (unlikely(!zram->table[index].page)) {
  190. pr_debug("Read before write: sector=%lu, size=%u",
  191. (ulong)(bio->bi_sector), bio->bi_size);
  192. handle_zero_page(bvec);
  193. return 0;
  194. }
  195. /* Page is stored uncompressed since it's incompressible */
  196. if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
  197. handle_uncompressed_page(zram, bvec, index, offset);
  198. return 0;
  199. }
  200. if (is_partial_io(bvec)) {
  201. /* Use a temporary buffer to decompress the page */
  202. uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
  203. if (!uncmem) {
  204. pr_info("Error allocating temp memory!\n");
  205. return -ENOMEM;
  206. }
  207. }
  208. user_mem = kmap_atomic(page, KM_USER0);
  209. if (!is_partial_io(bvec))
  210. uncmem = user_mem;
  211. clen = PAGE_SIZE;
  212. cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
  213. zram->table[index].offset;
  214. ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
  215. xv_get_object_size(cmem) - sizeof(*zheader),
  216. uncmem, &clen);
  217. if (is_partial_io(bvec)) {
  218. memcpy(user_mem + bvec->bv_offset, uncmem + offset,
  219. bvec->bv_len);
  220. kfree(uncmem);
  221. }
  222. kunmap_atomic(cmem, KM_USER1);
  223. kunmap_atomic(user_mem, KM_USER0);
  224. /* Should NEVER happen. Return bio error if it does. */
  225. if (unlikely(ret != LZO_E_OK)) {
  226. pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
  227. zram_stat64_inc(zram, &zram->stats.failed_reads);
  228. return ret;
  229. }
  230. flush_dcache_page(page);
  231. return 0;
  232. }
  233. static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
  234. {
  235. int ret;
  236. size_t clen = PAGE_SIZE;
  237. struct zobj_header *zheader;
  238. unsigned char *cmem;
  239. if (zram_test_flag(zram, index, ZRAM_ZERO) ||
  240. !zram->table[index].page) {
  241. memset(mem, 0, PAGE_SIZE);
  242. return 0;
  243. }
  244. cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
  245. zram->table[index].offset;
  246. /* Page is stored uncompressed since it's incompressible */
  247. if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
  248. memcpy(mem, cmem, PAGE_SIZE);
  249. kunmap_atomic(cmem, KM_USER0);
  250. return 0;
  251. }
  252. ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
  253. xv_get_object_size(cmem) - sizeof(*zheader),
  254. mem, &clen);
  255. kunmap_atomic(cmem, KM_USER0);
  256. /* Should NEVER happen. Return bio error if it does. */
  257. if (unlikely(ret != LZO_E_OK)) {
  258. pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
  259. zram_stat64_inc(zram, &zram->stats.failed_reads);
  260. return ret;
  261. }
  262. return 0;
  263. }
  264. static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
  265. int offset)
  266. {
  267. int ret;
  268. u32 store_offset;
  269. size_t clen;
  270. struct zobj_header *zheader;
  271. struct page *page, *page_store;
  272. unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
  273. page = bvec->bv_page;
  274. src = zram->compress_buffer;
  275. if (is_partial_io(bvec)) {
  276. /*
  277. * This is a partial IO. We need to read the full page
  278. * before to write the changes.
  279. */
  280. uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
  281. if (!uncmem) {
  282. pr_info("Error allocating temp memory!\n");
  283. ret = -ENOMEM;
  284. goto out;
  285. }
  286. ret = zram_read_before_write(zram, uncmem, index);
  287. if (ret) {
  288. kfree(uncmem);
  289. goto out;
  290. }
  291. }
  292. /*
  293. * System overwrites unused sectors. Free memory associated
  294. * with this sector now.
  295. */
  296. if (zram->table[index].page ||
  297. zram_test_flag(zram, index, ZRAM_ZERO))
  298. zram_free_page(zram, index);
  299. user_mem = kmap_atomic(page, KM_USER0);
  300. if (is_partial_io(bvec))
  301. memcpy(uncmem + offset, user_mem + bvec->bv_offset,
  302. bvec->bv_len);
  303. else
  304. uncmem = user_mem;
  305. if (page_zero_filled(uncmem)) {
  306. kunmap_atomic(user_mem, KM_USER0);
  307. if (is_partial_io(bvec))
  308. kfree(uncmem);
  309. zram_stat_inc(&zram->stats.pages_zero);
  310. zram_set_flag(zram, index, ZRAM_ZERO);
  311. ret = 0;
  312. goto out;
  313. }
  314. ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
  315. zram->compress_workmem);
  316. kunmap_atomic(user_mem, KM_USER0);
  317. if (is_partial_io(bvec))
  318. kfree(uncmem);
  319. if (unlikely(ret != LZO_E_OK)) {
  320. pr_err("Compression failed! err=%d\n", ret);
  321. goto out;
  322. }
  323. /*
  324. * Page is incompressible. Store it as-is (uncompressed)
  325. * since we do not want to return too many disk write
  326. * errors which has side effect of hanging the system.
  327. */
  328. if (unlikely(clen > max_zpage_size)) {
  329. clen = PAGE_SIZE;
  330. page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
  331. if (unlikely(!page_store)) {
  332. pr_info("Error allocating memory for "
  333. "incompressible page: %u\n", index);
  334. ret = -ENOMEM;
  335. goto out;
  336. }
  337. store_offset = 0;
  338. zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
  339. zram_stat_inc(&zram->stats.pages_expand);
  340. zram->table[index].page = page_store;
  341. src = kmap_atomic(page, KM_USER0);
  342. goto memstore;
  343. }
  344. if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
  345. &zram->table[index].page, &store_offset,
  346. GFP_NOIO | __GFP_HIGHMEM)) {
  347. pr_info("Error allocating memory for compressed "
  348. "page: %u, size=%zu\n", index, clen);
  349. ret = -ENOMEM;
  350. goto out;
  351. }
  352. memstore:
  353. zram->table[index].offset = store_offset;
  354. cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
  355. zram->table[index].offset;
  356. #if 0
  357. /* Back-reference needed for memory defragmentation */
  358. if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
  359. zheader = (struct zobj_header *)cmem;
  360. zheader->table_idx = index;
  361. cmem += sizeof(*zheader);
  362. }
  363. #endif
  364. memcpy(cmem, src, clen);
  365. kunmap_atomic(cmem, KM_USER1);
  366. if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
  367. kunmap_atomic(src, KM_USER0);
  368. /* Update stats */
  369. zram_stat64_add(zram, &zram->stats.compr_size, clen);
  370. zram_stat_inc(&zram->stats.pages_stored);
  371. if (clen <= PAGE_SIZE / 2)
  372. zram_stat_inc(&zram->stats.good_compress);
  373. return 0;
  374. out:
  375. if (ret)
  376. zram_stat64_inc(zram, &zram->stats.failed_writes);
  377. return ret;
  378. }
  379. static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
  380. int offset, struct bio *bio, int rw)
  381. {
  382. int ret;
  383. if (rw == READ) {
  384. down_read(&zram->lock);
  385. ret = zram_bvec_read(zram, bvec, index, offset, bio);
  386. up_read(&zram->lock);
  387. } else {
  388. down_write(&zram->lock);
  389. ret = zram_bvec_write(zram, bvec, index, offset);
  390. up_write(&zram->lock);
  391. }
  392. return ret;
  393. }
  394. static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
  395. {
  396. if (*offset + bvec->bv_len >= PAGE_SIZE)
  397. (*index)++;
  398. *offset = (*offset + bvec->bv_len) % PAGE_SIZE;
  399. }
  400. static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
  401. {
  402. int i, offset;
  403. u32 index;
  404. struct bio_vec *bvec;
  405. switch (rw) {
  406. case READ:
  407. zram_stat64_inc(zram, &zram->stats.num_reads);
  408. break;
  409. case WRITE:
  410. zram_stat64_inc(zram, &zram->stats.num_writes);
  411. break;
  412. }
  413. index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
  414. offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
  415. bio_for_each_segment(bvec, bio, i) {
  416. int max_transfer_size = PAGE_SIZE - offset;
  417. if (bvec->bv_len > max_transfer_size) {
  418. /*
  419. * zram_bvec_rw() can only make operation on a single
  420. * zram page. Split the bio vector.
  421. */
  422. struct bio_vec bv;
  423. bv.bv_page = bvec->bv_page;
  424. bv.bv_len = max_transfer_size;
  425. bv.bv_offset = bvec->bv_offset;
  426. if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
  427. goto out;
  428. bv.bv_len = bvec->bv_len - max_transfer_size;
  429. bv.bv_offset += max_transfer_size;
  430. if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
  431. goto out;
  432. } else
  433. if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
  434. < 0)
  435. goto out;
  436. update_position(&index, &offset, bvec);
  437. }
  438. set_bit(BIO_UPTODATE, &bio->bi_flags);
  439. bio_endio(bio, 0);
  440. return;
  441. out:
  442. bio_io_error(bio);
  443. }
  444. /*
  445. * Check if request is within bounds and aligned on zram logical blocks.
  446. */
  447. static inline int valid_io_request(struct zram *zram, struct bio *bio)
  448. {
  449. if (unlikely(
  450. (bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
  451. (bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
  452. (bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
  453. return 0;
  454. }
  455. /* I/O request is valid */
  456. return 1;
  457. }
  458. /*
  459. * Handler function for all zram I/O requests.
  460. */
  461. static int zram_make_request(struct request_queue *queue, struct bio *bio)
  462. {
  463. struct zram *zram = queue->queuedata;
  464. if (!valid_io_request(zram, bio)) {
  465. zram_stat64_inc(zram, &zram->stats.invalid_io);
  466. bio_io_error(bio);
  467. return 0;
  468. }
  469. if (unlikely(!zram->init_done) && zram_init_device(zram)) {
  470. bio_io_error(bio);
  471. return 0;
  472. }
  473. __zram_make_request(zram, bio, bio_data_dir(bio));
  474. return 0;
  475. }
  476. void zram_reset_device(struct zram *zram)
  477. {
  478. size_t index;
  479. mutex_lock(&zram->init_lock);
  480. zram->init_done = 0;
  481. /* Free various per-device buffers */
  482. kfree(zram->compress_workmem);
  483. free_pages((unsigned long)zram->compress_buffer, 1);
  484. zram->compress_workmem = NULL;
  485. zram->compress_buffer = NULL;
  486. /* Free all pages that are still in this zram device */
  487. for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
  488. struct page *page;
  489. u16 offset;
  490. page = zram->table[index].page;
  491. offset = zram->table[index].offset;
  492. if (!page)
  493. continue;
  494. if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
  495. __free_page(page);
  496. else
  497. xv_free(zram->mem_pool, page, offset);
  498. }
  499. vfree(zram->table);
  500. zram->table = NULL;
  501. xv_destroy_pool(zram->mem_pool);
  502. zram->mem_pool = NULL;
  503. /* Reset stats */
  504. memset(&zram->stats, 0, sizeof(zram->stats));
  505. zram->disksize = 0;
  506. mutex_unlock(&zram->init_lock);
  507. }
  508. int zram_init_device(struct zram *zram)
  509. {
  510. int ret;
  511. size_t num_pages;
  512. mutex_lock(&zram->init_lock);
  513. if (zram->init_done) {
  514. mutex_unlock(&zram->init_lock);
  515. return 0;
  516. }
  517. zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
  518. zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
  519. if (!zram->compress_workmem) {
  520. pr_err("Error allocating compressor working memory!\n");
  521. ret = -ENOMEM;
  522. goto fail;
  523. }
  524. zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
  525. if (!zram->compress_buffer) {
  526. pr_err("Error allocating compressor buffer space\n");
  527. ret = -ENOMEM;
  528. goto fail;
  529. }
  530. num_pages = zram->disksize >> PAGE_SHIFT;
  531. zram->table = vzalloc(num_pages * sizeof(*zram->table));
  532. if (!zram->table) {
  533. pr_err("Error allocating zram address table\n");
  534. /* To prevent accessing table entries during cleanup */
  535. zram->disksize = 0;
  536. ret = -ENOMEM;
  537. goto fail;
  538. }
  539. set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
  540. /* zram devices sort of resembles non-rotational disks */
  541. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
  542. zram->mem_pool = xv_create_pool();
  543. if (!zram->mem_pool) {
  544. pr_err("Error creating memory pool\n");
  545. ret = -ENOMEM;
  546. goto fail;
  547. }
  548. zram->init_done = 1;
  549. mutex_unlock(&zram->init_lock);
  550. pr_debug("Initialization done!\n");
  551. return 0;
  552. fail:
  553. mutex_unlock(&zram->init_lock);
  554. zram_reset_device(zram);
  555. pr_err("Initialization failed: err=%d\n", ret);
  556. return ret;
  557. }
  558. void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
  559. {
  560. struct zram *zram;
  561. zram = bdev->bd_disk->private_data;
  562. zram_free_page(zram, index);
  563. zram_stat64_inc(zram, &zram->stats.notify_free);
  564. }
  565. static const struct block_device_operations zram_devops = {
  566. .swap_slot_free_notify = zram_slot_free_notify,
  567. .owner = THIS_MODULE
  568. };
  569. static int create_device(struct zram *zram, int device_id)
  570. {
  571. int ret = 0;
  572. init_rwsem(&zram->lock);
  573. mutex_init(&zram->init_lock);
  574. spin_lock_init(&zram->stat64_lock);
  575. zram->queue = blk_alloc_queue(GFP_KERNEL);
  576. if (!zram->queue) {
  577. pr_err("Error allocating disk queue for device %d\n",
  578. device_id);
  579. ret = -ENOMEM;
  580. goto out;
  581. }
  582. blk_queue_make_request(zram->queue, zram_make_request);
  583. zram->queue->queuedata = zram;
  584. /* gendisk structure */
  585. zram->disk = alloc_disk(1);
  586. if (!zram->disk) {
  587. blk_cleanup_queue(zram->queue);
  588. pr_warning("Error allocating disk structure for device %d\n",
  589. device_id);
  590. ret = -ENOMEM;
  591. goto out;
  592. }
  593. zram->disk->major = zram_major;
  594. zram->disk->first_minor = device_id;
  595. zram->disk->fops = &zram_devops;
  596. zram->disk->queue = zram->queue;
  597. zram->disk->private_data = zram;
  598. snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
  599. /* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
  600. set_capacity(zram->disk, 0);
  601. /*
  602. * To ensure that we always get PAGE_SIZE aligned
  603. * and n*PAGE_SIZED sized I/O requests.
  604. */
  605. blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
  606. blk_queue_logical_block_size(zram->disk->queue,
  607. ZRAM_LOGICAL_BLOCK_SIZE);
  608. blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
  609. blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
  610. add_disk(zram->disk);
  611. ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
  612. &zram_disk_attr_group);
  613. if (ret < 0) {
  614. pr_warning("Error creating sysfs group");
  615. goto out;
  616. }
  617. zram->init_done = 0;
  618. out:
  619. return ret;
  620. }
  621. static void destroy_device(struct zram *zram)
  622. {
  623. sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
  624. &zram_disk_attr_group);
  625. if (zram->disk) {
  626. del_gendisk(zram->disk);
  627. put_disk(zram->disk);
  628. }
  629. if (zram->queue)
  630. blk_cleanup_queue(zram->queue);
  631. }
  632. static int __init zram_init(void)
  633. {
  634. int ret, dev_id;
  635. if (num_devices > max_num_devices) {
  636. pr_warning("Invalid value for num_devices: %u\n",
  637. num_devices);
  638. ret = -EINVAL;
  639. goto out;
  640. }
  641. zram_major = register_blkdev(0, "zram");
  642. if (zram_major <= 0) {
  643. pr_warning("Unable to get major number\n");
  644. ret = -EBUSY;
  645. goto out;
  646. }
  647. if (!num_devices) {
  648. pr_info("num_devices not specified. Using default: 1\n");
  649. num_devices = 1;
  650. }
  651. /* Allocate the device array and initialize each one */
  652. pr_info("Creating %u devices ...\n", num_devices);
  653. devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
  654. if (!devices) {
  655. ret = -ENOMEM;
  656. goto unregister;
  657. }
  658. for (dev_id = 0; dev_id < num_devices; dev_id++) {
  659. ret = create_device(&devices[dev_id], dev_id);
  660. if (ret)
  661. goto free_devices;
  662. }
  663. return 0;
  664. free_devices:
  665. while (dev_id)
  666. destroy_device(&devices[--dev_id]);
  667. kfree(devices);
  668. unregister:
  669. unregister_blkdev(zram_major, "zram");
  670. out:
  671. return ret;
  672. }
  673. static void __exit zram_exit(void)
  674. {
  675. int i;
  676. struct zram *zram;
  677. for (i = 0; i < num_devices; i++) {
  678. zram = &devices[i];
  679. destroy_device(zram);
  680. if (zram->init_done)
  681. zram_reset_device(zram);
  682. }
  683. unregister_blkdev(zram_major, "zram");
  684. kfree(devices);
  685. pr_debug("Cleanup done!\n");
  686. }
  687. module_param(num_devices, uint, 0);
  688. MODULE_PARM_DESC(num_devices, "Number of zram devices");
  689. module_init(zram_init);
  690. module_exit(zram_exit);
  691. MODULE_LICENSE("Dual BSD/GPL");
  692. MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
  693. MODULE_DESCRIPTION("Compressed RAM Block Device");