PageRenderTime 58ms CodeModel.GetById 30ms app.highlight 23ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/staging/zram/zram_drv.c

https://bitbucket.org/cyanogenmod/android_kernel_asus_tf300t
C | 855 lines | 630 code | 162 blank | 63 comment | 75 complexity | 4d4bb587ce416dce4f72905964eebfee MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1/*
  2 * Compressed RAM block device
  3 *
  4 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
  5 *
  6 * This code is released using a dual license strategy: BSD/GPL
  7 * You can choose the licence that better fits your requirements.
  8 *
  9 * Released under the terms of 3-clause BSD License
 10 * Released under the terms of GNU General Public License Version 2.0
 11 *
 12 * Project home: http://compcache.googlecode.com
 13 */
 14
 15#define KMSG_COMPONENT "zram"
 16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 17
 18#ifdef CONFIG_ZRAM_DEBUG
 19#define DEBUG
 20#endif
 21
 22#include <linux/module.h>
 23#include <linux/kernel.h>
 24#include <linux/bio.h>
 25#include <linux/bitops.h>
 26#include <linux/blkdev.h>
 27#include <linux/buffer_head.h>
 28#include <linux/device.h>
 29#include <linux/genhd.h>
 30#include <linux/highmem.h>
 31#include <linux/slab.h>
 32#include <linux/lzo.h>
 33#include <linux/string.h>
 34#include <linux/vmalloc.h>
 35
 36#include "zram_drv.h"
 37
 38/* Globals */
 39static int zram_major;
 40struct zram *devices;
 41
 42/* Module params (documentation at end) */
 43unsigned int num_devices;
 44
 45static void zram_stat_inc(u32 *v)
 46{
 47	*v = *v + 1;
 48}
 49
 50static void zram_stat_dec(u32 *v)
 51{
 52	*v = *v - 1;
 53}
 54
 55static void zram_stat64_add(struct zram *zram, u64 *v, u64 inc)
 56{
 57	spin_lock(&zram->stat64_lock);
 58	*v = *v + inc;
 59	spin_unlock(&zram->stat64_lock);
 60}
 61
 62static void zram_stat64_sub(struct zram *zram, u64 *v, u64 dec)
 63{
 64	spin_lock(&zram->stat64_lock);
 65	*v = *v - dec;
 66	spin_unlock(&zram->stat64_lock);
 67}
 68
 69static void zram_stat64_inc(struct zram *zram, u64 *v)
 70{
 71	zram_stat64_add(zram, v, 1);
 72}
 73
 74static int zram_test_flag(struct zram *zram, u32 index,
 75			enum zram_pageflags flag)
 76{
 77	return zram->table[index].flags & BIT(flag);
 78}
 79
 80static void zram_set_flag(struct zram *zram, u32 index,
 81			enum zram_pageflags flag)
 82{
 83	zram->table[index].flags |= BIT(flag);
 84}
 85
 86static void zram_clear_flag(struct zram *zram, u32 index,
 87			enum zram_pageflags flag)
 88{
 89	zram->table[index].flags &= ~BIT(flag);
 90}
 91
 92static int page_zero_filled(void *ptr)
 93{
 94	unsigned int pos;
 95	unsigned long *page;
 96
 97	page = (unsigned long *)ptr;
 98
 99	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
100		if (page[pos])
101			return 0;
102	}
103
104	return 1;
105}
106
107static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
108{
109	if (!zram->disksize) {
110		pr_info(
111		"disk size not provided. You can use disksize_kb module "
112		"param to specify size.\nUsing default: (%u%% of RAM).\n",
113		default_disksize_perc_ram
114		);
115		zram->disksize = default_disksize_perc_ram *
116					(totalram_bytes / 100);
117	}
118
119	if (zram->disksize > 2 * (totalram_bytes)) {
120		pr_info(
121		"There is little point creating a zram of greater than "
122		"twice the size of memory since we expect a 2:1 compression "
123		"ratio. Note that zram uses about 0.1%% of the size of "
124		"the disk when not in use so a huge zram is "
125		"wasteful.\n"
126		"\tMemory Size: %zu kB\n"
127		"\tSize you selected: %llu kB\n"
128		"Continuing anyway ...\n",
129		totalram_bytes >> 10, zram->disksize
130		);
131	}
132
133	zram->disksize &= PAGE_MASK;
134}
135
136static void zram_free_page(struct zram *zram, size_t index)
137{
138	u32 clen;
139	void *obj;
140
141	struct page *page = zram->table[index].page;
142	u32 offset = zram->table[index].offset;
143
144	if (unlikely(!page)) {
145		/*
146		 * No memory is allocated for zero filled pages.
147		 * Simply clear zero page flag.
148		 */
149		if (zram_test_flag(zram, index, ZRAM_ZERO)) {
150			zram_clear_flag(zram, index, ZRAM_ZERO);
151			zram_stat_dec(&zram->stats.pages_zero);
152		}
153		return;
154	}
155
156	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
157		clen = PAGE_SIZE;
158		__free_page(page);
159		zram_clear_flag(zram, index, ZRAM_UNCOMPRESSED);
160		zram_stat_dec(&zram->stats.pages_expand);
161		goto out;
162	}
163
164	obj = kmap_atomic(page, KM_USER0) + offset;
165	clen = xv_get_object_size(obj) - sizeof(struct zobj_header);
166	kunmap_atomic(obj, KM_USER0);
167
168	xv_free(zram->mem_pool, page, offset);
169	if (clen <= PAGE_SIZE / 2)
170		zram_stat_dec(&zram->stats.good_compress);
171
172out:
173	zram_stat64_sub(zram, &zram->stats.compr_size, clen);
174	zram_stat_dec(&zram->stats.pages_stored);
175
176	zram->table[index].page = NULL;
177	zram->table[index].offset = 0;
178}
179
180static void handle_zero_page(struct bio_vec *bvec)
181{
182	struct page *page = bvec->bv_page;
183	void *user_mem;
184
185	user_mem = kmap_atomic(page, KM_USER0);
186	memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
187	kunmap_atomic(user_mem, KM_USER0);
188
189	flush_dcache_page(page);
190}
191
192static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
193				     u32 index, int offset)
194{
195	struct page *page = bvec->bv_page;
196	unsigned char *user_mem, *cmem;
197
198	user_mem = kmap_atomic(page, KM_USER0);
199	cmem = kmap_atomic(zram->table[index].page, KM_USER1);
200
201	memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
202	kunmap_atomic(cmem, KM_USER1);
203	kunmap_atomic(user_mem, KM_USER0);
204
205	flush_dcache_page(page);
206}
207
208static inline int is_partial_io(struct bio_vec *bvec)
209{
210	return bvec->bv_len != PAGE_SIZE;
211}
212
213static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
214			  u32 index, int offset, struct bio *bio)
215{
216	int ret;
217	size_t clen;
218	struct page *page;
219	struct zobj_header *zheader;
220	unsigned char *user_mem, *cmem, *uncmem = NULL;
221
222	page = bvec->bv_page;
223
224	if (zram_test_flag(zram, index, ZRAM_ZERO)) {
225		handle_zero_page(bvec);
226		return 0;
227	}
228
229	/* Requested page is not present in compressed area */
230	if (unlikely(!zram->table[index].page)) {
231		pr_debug("Read before write: sector=%lu, size=%u",
232			 (ulong)(bio->bi_sector), bio->bi_size);
233		handle_zero_page(bvec);
234		return 0;
235	}
236
237	/* Page is stored uncompressed since it's incompressible */
238	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
239		handle_uncompressed_page(zram, bvec, index, offset);
240		return 0;
241	}
242
243	if (is_partial_io(bvec)) {
244		/* Use  a temporary buffer to decompress the page */
245		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
246		if (!uncmem) {
247			pr_info("Error allocating temp memory!\n");
248			return -ENOMEM;
249		}
250	}
251
252	user_mem = kmap_atomic(page, KM_USER0);
253	if (!is_partial_io(bvec))
254		uncmem = user_mem;
255	clen = PAGE_SIZE;
256
257	cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
258		zram->table[index].offset;
259
260	ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
261				    xv_get_object_size(cmem) - sizeof(*zheader),
262				    uncmem, &clen);
263
264	if (is_partial_io(bvec)) {
265		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
266		       bvec->bv_len);
267		kfree(uncmem);
268	}
269
270	kunmap_atomic(cmem, KM_USER1);
271	kunmap_atomic(user_mem, KM_USER0);
272
273	/* Should NEVER happen. Return bio error if it does. */
274	if (unlikely(ret != LZO_E_OK)) {
275		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
276		zram_stat64_inc(zram, &zram->stats.failed_reads);
277		return ret;
278	}
279
280	flush_dcache_page(page);
281
282	return 0;
283}
284
285static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
286{
287	int ret;
288	size_t clen = PAGE_SIZE;
289	struct zobj_header *zheader;
290	unsigned char *cmem;
291
292	if (zram_test_flag(zram, index, ZRAM_ZERO) ||
293	    !zram->table[index].page) {
294		memset(mem, 0, PAGE_SIZE);
295		return 0;
296	}
297
298	cmem = kmap_atomic(zram->table[index].page, KM_USER0) +
299		zram->table[index].offset;
300
301	/* Page is stored uncompressed since it's incompressible */
302	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
303		memcpy(mem, cmem, PAGE_SIZE);
304		kunmap_atomic(cmem, KM_USER0);
305		return 0;
306	}
307
308	ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
309				    xv_get_object_size(cmem) - sizeof(*zheader),
310				    mem, &clen);
311	kunmap_atomic(cmem, KM_USER0);
312
313	/* Should NEVER happen. Return bio error if it does. */
314	if (unlikely(ret != LZO_E_OK)) {
315		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
316		zram_stat64_inc(zram, &zram->stats.failed_reads);
317		return ret;
318	}
319
320	return 0;
321}
322
323static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
324			   int offset)
325{
326	int ret;
327	u32 store_offset;
328	size_t clen;
329	struct zobj_header *zheader;
330	struct page *page, *page_store;
331	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
332
333	page = bvec->bv_page;
334	src = zram->compress_buffer;
335
336	if (is_partial_io(bvec)) {
337		/*
338		 * This is a partial IO. We need to read the full page
339		 * before to write the changes.
340		 */
341		uncmem = kmalloc(PAGE_SIZE, GFP_KERNEL);
342		if (!uncmem) {
343			pr_info("Error allocating temp memory!\n");
344			ret = -ENOMEM;
345			goto out;
346		}
347		ret = zram_read_before_write(zram, uncmem, index);
348		if (ret) {
349			kfree(uncmem);
350			goto out;
351		}
352	}
353
354	/*
355	 * System overwrites unused sectors. Free memory associated
356	 * with this sector now.
357	 */
358	if (zram->table[index].page ||
359	    zram_test_flag(zram, index, ZRAM_ZERO))
360		zram_free_page(zram, index);
361
362	user_mem = kmap_atomic(page, KM_USER0);
363
364	if (is_partial_io(bvec))
365		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
366		       bvec->bv_len);
367	else
368		uncmem = user_mem;
369
370	if (page_zero_filled(uncmem)) {
371		kunmap_atomic(user_mem, KM_USER0);
372		if (is_partial_io(bvec))
373			kfree(uncmem);
374		zram_stat_inc(&zram->stats.pages_zero);
375		zram_set_flag(zram, index, ZRAM_ZERO);
376		ret = 0;
377		goto out;
378	}
379
380	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
381			       zram->compress_workmem);
382
383	kunmap_atomic(user_mem, KM_USER0);
384	if (is_partial_io(bvec))
385			kfree(uncmem);
386
387	if (unlikely(ret != LZO_E_OK)) {
388		pr_err("Compression failed! err=%d\n", ret);
389		goto out;
390	}
391
392	/*
393	 * Page is incompressible. Store it as-is (uncompressed)
394	 * since we do not want to return too many disk write
395	 * errors which has side effect of hanging the system.
396	 */
397	if (unlikely(clen > max_zpage_size)) {
398		clen = PAGE_SIZE;
399		page_store = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
400		if (unlikely(!page_store)) {
401			pr_info("Error allocating memory for "
402				"incompressible page: %u\n", index);
403			ret = -ENOMEM;
404			goto out;
405		}
406
407		store_offset = 0;
408		zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
409		zram_stat_inc(&zram->stats.pages_expand);
410		zram->table[index].page = page_store;
411		src = kmap_atomic(page, KM_USER0);
412		goto memstore;
413	}
414
415	if (xv_malloc(zram->mem_pool, clen + sizeof(*zheader),
416		      &zram->table[index].page, &store_offset,
417		      GFP_NOIO | __GFP_HIGHMEM)) {
418		pr_info("Error allocating memory for compressed "
419			"page: %u, size=%zu\n", index, clen);
420		ret = -ENOMEM;
421		goto out;
422	}
423
424memstore:
425	zram->table[index].offset = store_offset;
426
427	cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
428		zram->table[index].offset;
429
430#if 0
431	/* Back-reference needed for memory defragmentation */
432	if (!zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)) {
433		zheader = (struct zobj_header *)cmem;
434		zheader->table_idx = index;
435		cmem += sizeof(*zheader);
436	}
437#endif
438
439	memcpy(cmem, src, clen);
440
441	kunmap_atomic(cmem, KM_USER1);
442	if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
443		kunmap_atomic(src, KM_USER0);
444
445	/* Update stats */
446	zram_stat64_add(zram, &zram->stats.compr_size, clen);
447	zram_stat_inc(&zram->stats.pages_stored);
448	if (clen <= PAGE_SIZE / 2)
449		zram_stat_inc(&zram->stats.good_compress);
450
451	return 0;
452
453out:
454	if (ret)
455		zram_stat64_inc(zram, &zram->stats.failed_writes);
456	return ret;
457}
458
459static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
460			int offset, struct bio *bio, int rw)
461{
462	int ret;
463
464	if (rw == READ) {
465		down_read(&zram->lock);
466		ret = zram_bvec_read(zram, bvec, index, offset, bio);
467		up_read(&zram->lock);
468	} else {
469		down_write(&zram->lock);
470		ret = zram_bvec_write(zram, bvec, index, offset);
471		up_write(&zram->lock);
472	}
473
474	return ret;
475}
476
477static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
478{
479	if (*offset + bvec->bv_len >= PAGE_SIZE)
480		(*index)++;
481	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
482}
483
484static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
485{
486	int i, offset;
487	u32 index;
488	struct bio_vec *bvec;
489
490	switch (rw) {
491	case READ:
492		zram_stat64_inc(zram, &zram->stats.num_reads);
493		break;
494	case WRITE:
495		zram_stat64_inc(zram, &zram->stats.num_writes);
496		break;
497	}
498
499	index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
500	offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
501
502	bio_for_each_segment(bvec, bio, i) {
503		int max_transfer_size = PAGE_SIZE - offset;
504
505		if (bvec->bv_len > max_transfer_size) {
506			/*
507			 * zram_bvec_rw() can only make operation on a single
508			 * zram page. Split the bio vector.
509			 */
510			struct bio_vec bv;
511
512			bv.bv_page = bvec->bv_page;
513			bv.bv_len = max_transfer_size;
514			bv.bv_offset = bvec->bv_offset;
515
516			if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
517				goto out;
518
519			bv.bv_len = bvec->bv_len - max_transfer_size;
520			bv.bv_offset += max_transfer_size;
521			if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
522				goto out;
523		} else
524			if (zram_bvec_rw(zram, bvec, index, offset, bio, rw)
525			    < 0)
526				goto out;
527
528		update_position(&index, &offset, bvec);
529	}
530
531	set_bit(BIO_UPTODATE, &bio->bi_flags);
532	bio_endio(bio, 0);
533	return;
534
535out:
536	bio_io_error(bio);
537}
538
539/*
540 * Check if request is within bounds and aligned on zram logical blocks.
541 */
542static inline int valid_io_request(struct zram *zram, struct bio *bio)
543{
544	if (unlikely(
545		(bio->bi_sector >= (zram->disksize >> SECTOR_SHIFT)) ||
546		(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)) ||
547		(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))) {
548
549		return 0;
550	}
551
552	/* I/O request is valid */
553	return 1;
554}
555
556/*
557 * Handler function for all zram I/O requests.
558 */
559static int zram_make_request(struct request_queue *queue, struct bio *bio)
560{
561	struct zram *zram = queue->queuedata;
562
563	if (!valid_io_request(zram, bio)) {
564		zram_stat64_inc(zram, &zram->stats.invalid_io);
565		bio_io_error(bio);
566		return 0;
567	}
568
569	if (unlikely(!zram->init_done) && zram_init_device(zram)) {
570		bio_io_error(bio);
571		return 0;
572	}
573
574	__zram_make_request(zram, bio, bio_data_dir(bio));
575
576	return 0;
577}
578
579void zram_reset_device(struct zram *zram)
580{
581	size_t index;
582
583	mutex_lock(&zram->init_lock);
584	zram->init_done = 0;
585
586	/* Free various per-device buffers */
587	kfree(zram->compress_workmem);
588	free_pages((unsigned long)zram->compress_buffer, 1);
589
590	zram->compress_workmem = NULL;
591	zram->compress_buffer = NULL;
592
593	/* Free all pages that are still in this zram device */
594	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
595		struct page *page;
596		u16 offset;
597
598		page = zram->table[index].page;
599		offset = zram->table[index].offset;
600
601		if (!page)
602			continue;
603
604		if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED)))
605			__free_page(page);
606		else
607			xv_free(zram->mem_pool, page, offset);
608	}
609
610	vfree(zram->table);
611	zram->table = NULL;
612
613	xv_destroy_pool(zram->mem_pool);
614	zram->mem_pool = NULL;
615
616	/* Reset stats */
617	memset(&zram->stats, 0, sizeof(zram->stats));
618
619	zram->disksize = 0;
620	mutex_unlock(&zram->init_lock);
621}
622
623int zram_init_device(struct zram *zram)
624{
625	int ret;
626	size_t num_pages;
627
628	mutex_lock(&zram->init_lock);
629
630	if (zram->init_done) {
631		mutex_unlock(&zram->init_lock);
632		return 0;
633	}
634
635	zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
636
637	zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
638	if (!zram->compress_workmem) {
639		pr_err("Error allocating compressor working memory!\n");
640		ret = -ENOMEM;
641		goto fail;
642	}
643
644	zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
645	if (!zram->compress_buffer) {
646		pr_err("Error allocating compressor buffer space\n");
647		ret = -ENOMEM;
648		goto fail;
649	}
650
651	num_pages = zram->disksize >> PAGE_SHIFT;
652	zram->table = vzalloc(num_pages * sizeof(*zram->table));
653	if (!zram->table) {
654		pr_err("Error allocating zram address table\n");
655		/* To prevent accessing table entries during cleanup */
656		zram->disksize = 0;
657		ret = -ENOMEM;
658		goto fail;
659	}
660
661	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
662
663	/* zram devices sort of resembles non-rotational disks */
664	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
665
666	zram->mem_pool = xv_create_pool();
667	if (!zram->mem_pool) {
668		pr_err("Error creating memory pool\n");
669		ret = -ENOMEM;
670		goto fail;
671	}
672
673	zram->init_done = 1;
674	mutex_unlock(&zram->init_lock);
675
676	pr_debug("Initialization done!\n");
677	return 0;
678
679fail:
680	mutex_unlock(&zram->init_lock);
681	zram_reset_device(zram);
682
683	pr_err("Initialization failed: err=%d\n", ret);
684	return ret;
685}
686
687void zram_slot_free_notify(struct block_device *bdev, unsigned long index)
688{
689	struct zram *zram;
690
691	zram = bdev->bd_disk->private_data;
692	zram_free_page(zram, index);
693	zram_stat64_inc(zram, &zram->stats.notify_free);
694}
695
696static const struct block_device_operations zram_devops = {
697	.swap_slot_free_notify = zram_slot_free_notify,
698	.owner = THIS_MODULE
699};
700
701static int create_device(struct zram *zram, int device_id)
702{
703	int ret = 0;
704
705	init_rwsem(&zram->lock);
706	mutex_init(&zram->init_lock);
707	spin_lock_init(&zram->stat64_lock);
708
709	zram->queue = blk_alloc_queue(GFP_KERNEL);
710	if (!zram->queue) {
711		pr_err("Error allocating disk queue for device %d\n",
712			device_id);
713		ret = -ENOMEM;
714		goto out;
715	}
716
717	blk_queue_make_request(zram->queue, zram_make_request);
718	zram->queue->queuedata = zram;
719
720	 /* gendisk structure */
721	zram->disk = alloc_disk(1);
722	if (!zram->disk) {
723		blk_cleanup_queue(zram->queue);
724		pr_warning("Error allocating disk structure for device %d\n",
725			device_id);
726		ret = -ENOMEM;
727		goto out;
728	}
729
730	zram->disk->major = zram_major;
731	zram->disk->first_minor = device_id;
732	zram->disk->fops = &zram_devops;
733	zram->disk->queue = zram->queue;
734	zram->disk->private_data = zram;
735	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
736
737	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
738	set_capacity(zram->disk, 0);
739
740	/*
741	 * To ensure that we always get PAGE_SIZE aligned
742	 * and n*PAGE_SIZED sized I/O requests.
743	 */
744	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
745	blk_queue_logical_block_size(zram->disk->queue,
746					ZRAM_LOGICAL_BLOCK_SIZE);
747	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
748	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
749
750	add_disk(zram->disk);
751
752	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
753				&zram_disk_attr_group);
754	if (ret < 0) {
755		pr_warning("Error creating sysfs group");
756		goto out;
757	}
758
759	zram->init_done = 0;
760
761out:
762	return ret;
763}
764
765static void destroy_device(struct zram *zram)
766{
767	sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
768			&zram_disk_attr_group);
769
770	if (zram->disk) {
771		del_gendisk(zram->disk);
772		put_disk(zram->disk);
773	}
774
775	if (zram->queue)
776		blk_cleanup_queue(zram->queue);
777}
778
779static int __init zram_init(void)
780{
781	int ret, dev_id;
782
783	if (num_devices > max_num_devices) {
784		pr_warning("Invalid value for num_devices: %u\n",
785				num_devices);
786		ret = -EINVAL;
787		goto out;
788	}
789
790	zram_major = register_blkdev(0, "zram");
791	if (zram_major <= 0) {
792		pr_warning("Unable to get major number\n");
793		ret = -EBUSY;
794		goto out;
795	}
796
797	if (!num_devices) {
798		pr_info("num_devices not specified. Using default: 1\n");
799		num_devices = 1;
800	}
801
802	/* Allocate the device array and initialize each one */
803	pr_info("Creating %u devices ...\n", num_devices);
804	devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
805	if (!devices) {
806		ret = -ENOMEM;
807		goto unregister;
808	}
809
810	for (dev_id = 0; dev_id < num_devices; dev_id++) {
811		ret = create_device(&devices[dev_id], dev_id);
812		if (ret)
813			goto free_devices;
814	}
815
816	return 0;
817
818free_devices:
819	while (dev_id)
820		destroy_device(&devices[--dev_id]);
821	kfree(devices);
822unregister:
823	unregister_blkdev(zram_major, "zram");
824out:
825	return ret;
826}
827
828static void __exit zram_exit(void)
829{
830	int i;
831	struct zram *zram;
832
833	for (i = 0; i < num_devices; i++) {
834		zram = &devices[i];
835
836		destroy_device(zram);
837		if (zram->init_done)
838			zram_reset_device(zram);
839	}
840
841	unregister_blkdev(zram_major, "zram");
842
843	kfree(devices);
844	pr_debug("Cleanup done!\n");
845}
846
847module_param(num_devices, uint, 0);
848MODULE_PARM_DESC(num_devices, "Number of zram devices");
849
850module_init(zram_init);
851module_exit(zram_exit);
852
853MODULE_LICENSE("Dual BSD/GPL");
854MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
855MODULE_DESCRIPTION("Compressed RAM Block Device");