PageRenderTime 149ms CodeModel.GetById 12ms app.highlight 121ms RepoModel.GetById 1ms app.codeStats 1ms

/drivers/block/pktcdvd.c

https://bitbucket.org/evzijst/gittest
C | 2681 lines | 1973 code | 390 blank | 318 comment | 369 complexity | 5cf97ce5b6e68125d0b8ceb8b0b8443a MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
   3 * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
   4 *
   5 * May be copied or modified under the terms of the GNU General Public
   6 * License.  See linux/COPYING for more information.
   7 *
   8 * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
   9 * DVD-RW devices (aka an exercise in block layer masturbation)
  10 *
  11 *
  12 * TODO: (circa order of when I will fix it)
  13 * - Only able to write on CD-RW media right now.
  14 * - check host application code on media and set it in write page
  15 * - interface for UDF <-> packet to negotiate a new location when a write
  16 *   fails.
  17 * - handle OPC, especially for -RW media
  18 *
  19 * Theory of operation:
  20 *
  21 * We use a custom make_request_fn function that forwards reads directly to
  22 * the underlying CD device. Write requests are either attached directly to
  23 * a live packet_data object, or simply stored sequentially in a list for
  24 * later processing by the kcdrwd kernel thread. This driver doesn't use
  25 * any elevator functionally as defined by the elevator_s struct, but the
  26 * underlying CD device uses a standard elevator.
  27 *
  28 * This strategy makes it possible to do very late merging of IO requests.
  29 * A new bio sent to pkt_make_request can be merged with a live packet_data
  30 * object even if the object is in the data gathering state.
  31 *
  32 *************************************************************************/
  33
  34#define VERSION_CODE	"v0.2.0a 2004-07-14 Jens Axboe (axboe@suse.de) and petero2@telia.com"
  35
  36#include <linux/pktcdvd.h>
  37#include <linux/config.h>
  38#include <linux/module.h>
  39#include <linux/types.h>
  40#include <linux/kernel.h>
  41#include <linux/kthread.h>
  42#include <linux/errno.h>
  43#include <linux/spinlock.h>
  44#include <linux/file.h>
  45#include <linux/proc_fs.h>
  46#include <linux/seq_file.h>
  47#include <linux/miscdevice.h>
  48#include <linux/suspend.h>
  49#include <scsi/scsi_cmnd.h>
  50#include <scsi/scsi_ioctl.h>
  51
  52#include <asm/uaccess.h>
  53
  54#if PACKET_DEBUG
  55#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
  56#else
  57#define DPRINTK(fmt, args...)
  58#endif
  59
  60#if PACKET_DEBUG > 1
  61#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
  62#else
  63#define VPRINTK(fmt, args...)
  64#endif
  65
  66#define MAX_SPEED 0xffff
  67
  68#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
  69
  70static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
  71static struct proc_dir_entry *pkt_proc;
  72static int pkt_major;
  73static struct semaphore ctl_mutex;	/* Serialize open/close/setup/teardown */
  74static mempool_t *psd_pool;
  75
  76
  77static void pkt_bio_finished(struct pktcdvd_device *pd)
  78{
  79	BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
  80	if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
  81		VPRINTK("pktcdvd: queue empty\n");
  82		atomic_set(&pd->iosched.attention, 1);
  83		wake_up(&pd->wqueue);
  84	}
  85}
  86
  87static void pkt_bio_destructor(struct bio *bio)
  88{
  89	kfree(bio->bi_io_vec);
  90	kfree(bio);
  91}
  92
  93static struct bio *pkt_bio_alloc(int nr_iovecs)
  94{
  95	struct bio_vec *bvl = NULL;
  96	struct bio *bio;
  97
  98	bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
  99	if (!bio)
 100		goto no_bio;
 101	bio_init(bio);
 102
 103	bvl = kmalloc(nr_iovecs * sizeof(struct bio_vec), GFP_KERNEL);
 104	if (!bvl)
 105		goto no_bvl;
 106	memset(bvl, 0, nr_iovecs * sizeof(struct bio_vec));
 107
 108	bio->bi_max_vecs = nr_iovecs;
 109	bio->bi_io_vec = bvl;
 110	bio->bi_destructor = pkt_bio_destructor;
 111
 112	return bio;
 113
 114 no_bvl:
 115	kfree(bio);
 116 no_bio:
 117	return NULL;
 118}
 119
 120/*
 121 * Allocate a packet_data struct
 122 */
 123static struct packet_data *pkt_alloc_packet_data(void)
 124{
 125	int i;
 126	struct packet_data *pkt;
 127
 128	pkt = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
 129	if (!pkt)
 130		goto no_pkt;
 131	memset(pkt, 0, sizeof(struct packet_data));
 132
 133	pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE);
 134	if (!pkt->w_bio)
 135		goto no_bio;
 136
 137	for (i = 0; i < PAGES_PER_PACKET; i++) {
 138		pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
 139		if (!pkt->pages[i])
 140			goto no_page;
 141	}
 142
 143	spin_lock_init(&pkt->lock);
 144
 145	for (i = 0; i < PACKET_MAX_SIZE; i++) {
 146		struct bio *bio = pkt_bio_alloc(1);
 147		if (!bio)
 148			goto no_rd_bio;
 149		pkt->r_bios[i] = bio;
 150	}
 151
 152	return pkt;
 153
 154no_rd_bio:
 155	for (i = 0; i < PACKET_MAX_SIZE; i++) {
 156		struct bio *bio = pkt->r_bios[i];
 157		if (bio)
 158			bio_put(bio);
 159	}
 160
 161no_page:
 162	for (i = 0; i < PAGES_PER_PACKET; i++)
 163		if (pkt->pages[i])
 164			__free_page(pkt->pages[i]);
 165	bio_put(pkt->w_bio);
 166no_bio:
 167	kfree(pkt);
 168no_pkt:
 169	return NULL;
 170}
 171
 172/*
 173 * Free a packet_data struct
 174 */
 175static void pkt_free_packet_data(struct packet_data *pkt)
 176{
 177	int i;
 178
 179	for (i = 0; i < PACKET_MAX_SIZE; i++) {
 180		struct bio *bio = pkt->r_bios[i];
 181		if (bio)
 182			bio_put(bio);
 183	}
 184	for (i = 0; i < PAGES_PER_PACKET; i++)
 185		__free_page(pkt->pages[i]);
 186	bio_put(pkt->w_bio);
 187	kfree(pkt);
 188}
 189
 190static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
 191{
 192	struct packet_data *pkt, *next;
 193
 194	BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
 195
 196	list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
 197		pkt_free_packet_data(pkt);
 198	}
 199}
 200
 201static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
 202{
 203	struct packet_data *pkt;
 204
 205	INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
 206	INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
 207	spin_lock_init(&pd->cdrw.active_list_lock);
 208	while (nr_packets > 0) {
 209		pkt = pkt_alloc_packet_data();
 210		if (!pkt) {
 211			pkt_shrink_pktlist(pd);
 212			return 0;
 213		}
 214		pkt->id = nr_packets;
 215		pkt->pd = pd;
 216		list_add(&pkt->list, &pd->cdrw.pkt_free_list);
 217		nr_packets--;
 218	}
 219	return 1;
 220}
 221
 222static void *pkt_rb_alloc(unsigned int __nocast gfp_mask, void *data)
 223{
 224	return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
 225}
 226
 227static void pkt_rb_free(void *ptr, void *data)
 228{
 229	kfree(ptr);
 230}
 231
 232static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
 233{
 234	struct rb_node *n = rb_next(&node->rb_node);
 235	if (!n)
 236		return NULL;
 237	return rb_entry(n, struct pkt_rb_node, rb_node);
 238}
 239
 240static inline void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
 241{
 242	rb_erase(&node->rb_node, &pd->bio_queue);
 243	mempool_free(node, pd->rb_pool);
 244	pd->bio_queue_size--;
 245	BUG_ON(pd->bio_queue_size < 0);
 246}
 247
 248/*
 249 * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
 250 */
 251static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
 252{
 253	struct rb_node *n = pd->bio_queue.rb_node;
 254	struct rb_node *next;
 255	struct pkt_rb_node *tmp;
 256
 257	if (!n) {
 258		BUG_ON(pd->bio_queue_size > 0);
 259		return NULL;
 260	}
 261
 262	for (;;) {
 263		tmp = rb_entry(n, struct pkt_rb_node, rb_node);
 264		if (s <= tmp->bio->bi_sector)
 265			next = n->rb_left;
 266		else
 267			next = n->rb_right;
 268		if (!next)
 269			break;
 270		n = next;
 271	}
 272
 273	if (s > tmp->bio->bi_sector) {
 274		tmp = pkt_rbtree_next(tmp);
 275		if (!tmp)
 276			return NULL;
 277	}
 278	BUG_ON(s > tmp->bio->bi_sector);
 279	return tmp;
 280}
 281
 282/*
 283 * Insert a node into the pd->bio_queue rb tree.
 284 */
 285static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
 286{
 287	struct rb_node **p = &pd->bio_queue.rb_node;
 288	struct rb_node *parent = NULL;
 289	sector_t s = node->bio->bi_sector;
 290	struct pkt_rb_node *tmp;
 291
 292	while (*p) {
 293		parent = *p;
 294		tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
 295		if (s < tmp->bio->bi_sector)
 296			p = &(*p)->rb_left;
 297		else
 298			p = &(*p)->rb_right;
 299	}
 300	rb_link_node(&node->rb_node, parent, p);
 301	rb_insert_color(&node->rb_node, &pd->bio_queue);
 302	pd->bio_queue_size++;
 303}
 304
 305/*
 306 * Add a bio to a single linked list defined by its head and tail pointers.
 307 */
 308static inline void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail)
 309{
 310	bio->bi_next = NULL;
 311	if (*list_tail) {
 312		BUG_ON((*list_head) == NULL);
 313		(*list_tail)->bi_next = bio;
 314		(*list_tail) = bio;
 315	} else {
 316		BUG_ON((*list_head) != NULL);
 317		(*list_head) = bio;
 318		(*list_tail) = bio;
 319	}
 320}
 321
 322/*
 323 * Remove and return the first bio from a single linked list defined by its
 324 * head and tail pointers.
 325 */
 326static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio **list_tail)
 327{
 328	struct bio *bio;
 329
 330	if (*list_head == NULL)
 331		return NULL;
 332
 333	bio = *list_head;
 334	*list_head = bio->bi_next;
 335	if (*list_head == NULL)
 336		*list_tail = NULL;
 337
 338	bio->bi_next = NULL;
 339	return bio;
 340}
 341
 342/*
 343 * Send a packet_command to the underlying block device and
 344 * wait for completion.
 345 */
 346static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
 347{
 348	char sense[SCSI_SENSE_BUFFERSIZE];
 349	request_queue_t *q;
 350	struct request *rq;
 351	DECLARE_COMPLETION(wait);
 352	int err = 0;
 353
 354	q = bdev_get_queue(pd->bdev);
 355
 356	rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? WRITE : READ,
 357			     __GFP_WAIT);
 358	rq->errors = 0;
 359	rq->rq_disk = pd->bdev->bd_disk;
 360	rq->bio = NULL;
 361	rq->buffer = NULL;
 362	rq->timeout = 60*HZ;
 363	rq->data = cgc->buffer;
 364	rq->data_len = cgc->buflen;
 365	rq->sense = sense;
 366	memset(sense, 0, sizeof(sense));
 367	rq->sense_len = 0;
 368	rq->flags |= REQ_BLOCK_PC | REQ_HARDBARRIER;
 369	if (cgc->quiet)
 370		rq->flags |= REQ_QUIET;
 371	memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
 372	if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
 373		memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
 374
 375	rq->ref_count++;
 376	rq->flags |= REQ_NOMERGE;
 377	rq->waiting = &wait;
 378	rq->end_io = blk_end_sync_rq;
 379	elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
 380	generic_unplug_device(q);
 381	wait_for_completion(&wait);
 382
 383	if (rq->errors)
 384		err = -EIO;
 385
 386	blk_put_request(rq);
 387	return err;
 388}
 389
 390/*
 391 * A generic sense dump / resolve mechanism should be implemented across
 392 * all ATAPI + SCSI devices.
 393 */
 394static void pkt_dump_sense(struct packet_command *cgc)
 395{
 396	static char *info[9] = { "No sense", "Recovered error", "Not ready",
 397				 "Medium error", "Hardware error", "Illegal request",
 398				 "Unit attention", "Data protect", "Blank check" };
 399	int i;
 400	struct request_sense *sense = cgc->sense;
 401
 402	printk("pktcdvd:");
 403	for (i = 0; i < CDROM_PACKET_SIZE; i++)
 404		printk(" %02x", cgc->cmd[i]);
 405	printk(" - ");
 406
 407	if (sense == NULL) {
 408		printk("no sense\n");
 409		return;
 410	}
 411
 412	printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
 413
 414	if (sense->sense_key > 8) {
 415		printk(" (INVALID)\n");
 416		return;
 417	}
 418
 419	printk(" (%s)\n", info[sense->sense_key]);
 420}
 421
 422/*
 423 * flush the drive cache to media
 424 */
 425static int pkt_flush_cache(struct pktcdvd_device *pd)
 426{
 427	struct packet_command cgc;
 428
 429	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
 430	cgc.cmd[0] = GPCMD_FLUSH_CACHE;
 431	cgc.quiet = 1;
 432
 433	/*
 434	 * the IMMED bit -- we default to not setting it, although that
 435	 * would allow a much faster close, this is safer
 436	 */
 437#if 0
 438	cgc.cmd[1] = 1 << 1;
 439#endif
 440	return pkt_generic_packet(pd, &cgc);
 441}
 442
 443/*
 444 * speed is given as the normal factor, e.g. 4 for 4x
 445 */
 446static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed)
 447{
 448	struct packet_command cgc;
 449	struct request_sense sense;
 450	int ret;
 451
 452	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
 453	cgc.sense = &sense;
 454	cgc.cmd[0] = GPCMD_SET_SPEED;
 455	cgc.cmd[2] = (read_speed >> 8) & 0xff;
 456	cgc.cmd[3] = read_speed & 0xff;
 457	cgc.cmd[4] = (write_speed >> 8) & 0xff;
 458	cgc.cmd[5] = write_speed & 0xff;
 459
 460	if ((ret = pkt_generic_packet(pd, &cgc)))
 461		pkt_dump_sense(&cgc);
 462
 463	return ret;
 464}
 465
 466/*
 467 * Queue a bio for processing by the low-level CD device. Must be called
 468 * from process context.
 469 */
 470static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_prio_read)
 471{
 472	spin_lock(&pd->iosched.lock);
 473	if (bio_data_dir(bio) == READ) {
 474		pkt_add_list_last(bio, &pd->iosched.read_queue,
 475				  &pd->iosched.read_queue_tail);
 476		if (high_prio_read)
 477			pd->iosched.high_prio_read = 1;
 478	} else {
 479		pkt_add_list_last(bio, &pd->iosched.write_queue,
 480				  &pd->iosched.write_queue_tail);
 481	}
 482	spin_unlock(&pd->iosched.lock);
 483
 484	atomic_set(&pd->iosched.attention, 1);
 485	wake_up(&pd->wqueue);
 486}
 487
 488/*
 489 * Process the queued read/write requests. This function handles special
 490 * requirements for CDRW drives:
 491 * - A cache flush command must be inserted before a read request if the
 492 *   previous request was a write.
 493 * - Switching between reading and writing is slow, so don't it more often
 494 *   than necessary.
 495 * - Set the read speed according to current usage pattern. When only reading
 496 *   from the device, it's best to use the highest possible read speed, but
 497 *   when switching often between reading and writing, it's better to have the
 498 *   same read and write speeds.
 499 * - Reads originating from user space should have higher priority than reads
 500 *   originating from pkt_gather_data, because some process is usually waiting
 501 *   on reads of the first kind.
 502 */
 503static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
 504{
 505	request_queue_t *q;
 506
 507	if (atomic_read(&pd->iosched.attention) == 0)
 508		return;
 509	atomic_set(&pd->iosched.attention, 0);
 510
 511	q = bdev_get_queue(pd->bdev);
 512
 513	for (;;) {
 514		struct bio *bio;
 515		int reads_queued, writes_queued, high_prio_read;
 516
 517		spin_lock(&pd->iosched.lock);
 518		reads_queued = (pd->iosched.read_queue != NULL);
 519		writes_queued = (pd->iosched.write_queue != NULL);
 520		if (!reads_queued)
 521			pd->iosched.high_prio_read = 0;
 522		high_prio_read = pd->iosched.high_prio_read;
 523		spin_unlock(&pd->iosched.lock);
 524
 525		if (!reads_queued && !writes_queued)
 526			break;
 527
 528		if (pd->iosched.writing) {
 529			if (high_prio_read || (!writes_queued && reads_queued)) {
 530				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
 531					VPRINTK("pktcdvd: write, waiting\n");
 532					break;
 533				}
 534				pkt_flush_cache(pd);
 535				pd->iosched.writing = 0;
 536			}
 537		} else {
 538			if (!reads_queued && writes_queued) {
 539				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
 540					VPRINTK("pktcdvd: read, waiting\n");
 541					break;
 542				}
 543				pd->iosched.writing = 1;
 544			}
 545		}
 546
 547		spin_lock(&pd->iosched.lock);
 548		if (pd->iosched.writing) {
 549			bio = pkt_get_list_first(&pd->iosched.write_queue,
 550						 &pd->iosched.write_queue_tail);
 551		} else {
 552			bio = pkt_get_list_first(&pd->iosched.read_queue,
 553						 &pd->iosched.read_queue_tail);
 554		}
 555		spin_unlock(&pd->iosched.lock);
 556
 557		if (!bio)
 558			continue;
 559
 560		if (bio_data_dir(bio) == READ)
 561			pd->iosched.successive_reads += bio->bi_size >> 10;
 562		else
 563			pd->iosched.successive_reads = 0;
 564		if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
 565			if (pd->read_speed == pd->write_speed) {
 566				pd->read_speed = MAX_SPEED;
 567				pkt_set_speed(pd, pd->write_speed, pd->read_speed);
 568			}
 569		} else {
 570			if (pd->read_speed != pd->write_speed) {
 571				pd->read_speed = pd->write_speed;
 572				pkt_set_speed(pd, pd->write_speed, pd->read_speed);
 573			}
 574		}
 575
 576		atomic_inc(&pd->cdrw.pending_bios);
 577		generic_make_request(bio);
 578	}
 579}
 580
 581/*
 582 * Special care is needed if the underlying block device has a small
 583 * max_phys_segments value.
 584 */
 585static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q)
 586{
 587	if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
 588		/*
 589		 * The cdrom device can handle one segment/frame
 590		 */
 591		clear_bit(PACKET_MERGE_SEGS, &pd->flags);
 592		return 0;
 593	} else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
 594		/*
 595		 * We can handle this case at the expense of some extra memory
 596		 * copies during write operations
 597		 */
 598		set_bit(PACKET_MERGE_SEGS, &pd->flags);
 599		return 0;
 600	} else {
 601		printk("pktcdvd: cdrom max_phys_segments too small\n");
 602		return -EIO;
 603	}
 604}
 605
 606/*
 607 * Copy CD_FRAMESIZE bytes from src_bio into a destination page
 608 */
 609static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs)
 610{
 611	unsigned int copy_size = CD_FRAMESIZE;
 612
 613	while (copy_size > 0) {
 614		struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
 615		void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) +
 616			src_bvl->bv_offset + offs;
 617		void *vto = page_address(dst_page) + dst_offs;
 618		int len = min_t(int, copy_size, src_bvl->bv_len - offs);
 619
 620		BUG_ON(len < 0);
 621		memcpy(vto, vfrom, len);
 622		kunmap_atomic(vfrom, KM_USER0);
 623
 624		seg++;
 625		offs = 0;
 626		dst_offs += len;
 627		copy_size -= len;
 628	}
 629}
 630
 631/*
 632 * Copy all data for this packet to pkt->pages[], so that
 633 * a) The number of required segments for the write bio is minimized, which
 634 *    is necessary for some scsi controllers.
 635 * b) The data can be used as cache to avoid read requests if we receive a
 636 *    new write request for the same zone.
 637 */
 638static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, int *offsets)
 639{
 640	int f, p, offs;
 641
 642	/* Copy all data to pkt->pages[] */
 643	p = 0;
 644	offs = 0;
 645	for (f = 0; f < pkt->frames; f++) {
 646		if (pages[f] != pkt->pages[p]) {
 647			void *vfrom = kmap_atomic(pages[f], KM_USER0) + offsets[f];
 648			void *vto = page_address(pkt->pages[p]) + offs;
 649			memcpy(vto, vfrom, CD_FRAMESIZE);
 650			kunmap_atomic(vfrom, KM_USER0);
 651			pages[f] = pkt->pages[p];
 652			offsets[f] = offs;
 653		} else {
 654			BUG_ON(offsets[f] != offs);
 655		}
 656		offs += CD_FRAMESIZE;
 657		if (offs >= PAGE_SIZE) {
 658			BUG_ON(offs > PAGE_SIZE);
 659			offs = 0;
 660			p++;
 661		}
 662	}
 663}
 664
 665static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
 666{
 667	struct packet_data *pkt = bio->bi_private;
 668	struct pktcdvd_device *pd = pkt->pd;
 669	BUG_ON(!pd);
 670
 671	if (bio->bi_size)
 672		return 1;
 673
 674	VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
 675		(unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
 676
 677	if (err)
 678		atomic_inc(&pkt->io_errors);
 679	if (atomic_dec_and_test(&pkt->io_wait)) {
 680		atomic_inc(&pkt->run_sm);
 681		wake_up(&pd->wqueue);
 682	}
 683	pkt_bio_finished(pd);
 684
 685	return 0;
 686}
 687
 688static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err)
 689{
 690	struct packet_data *pkt = bio->bi_private;
 691	struct pktcdvd_device *pd = pkt->pd;
 692	BUG_ON(!pd);
 693
 694	if (bio->bi_size)
 695		return 1;
 696
 697	VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
 698
 699	pd->stats.pkt_ended++;
 700
 701	pkt_bio_finished(pd);
 702	atomic_dec(&pkt->io_wait);
 703	atomic_inc(&pkt->run_sm);
 704	wake_up(&pd->wqueue);
 705	return 0;
 706}
 707
 708/*
 709 * Schedule reads for the holes in a packet
 710 */
 711static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
 712{
 713	int frames_read = 0;
 714	struct bio *bio;
 715	int f;
 716	char written[PACKET_MAX_SIZE];
 717
 718	BUG_ON(!pkt->orig_bios);
 719
 720	atomic_set(&pkt->io_wait, 0);
 721	atomic_set(&pkt->io_errors, 0);
 722
 723	if (pkt->cache_valid) {
 724		VPRINTK("pkt_gather_data: zone %llx cached\n",
 725			(unsigned long long)pkt->sector);
 726		goto out_account;
 727	}
 728
 729	/*
 730	 * Figure out which frames we need to read before we can write.
 731	 */
 732	memset(written, 0, sizeof(written));
 733	spin_lock(&pkt->lock);
 734	for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
 735		int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
 736		int num_frames = bio->bi_size / CD_FRAMESIZE;
 737		BUG_ON(first_frame < 0);
 738		BUG_ON(first_frame + num_frames > pkt->frames);
 739		for (f = first_frame; f < first_frame + num_frames; f++)
 740			written[f] = 1;
 741	}
 742	spin_unlock(&pkt->lock);
 743
 744	/*
 745	 * Schedule reads for missing parts of the packet.
 746	 */
 747	for (f = 0; f < pkt->frames; f++) {
 748		int p, offset;
 749		if (written[f])
 750			continue;
 751		bio = pkt->r_bios[f];
 752		bio_init(bio);
 753		bio->bi_max_vecs = 1;
 754		bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
 755		bio->bi_bdev = pd->bdev;
 756		bio->bi_end_io = pkt_end_io_read;
 757		bio->bi_private = pkt;
 758
 759		p = (f * CD_FRAMESIZE) / PAGE_SIZE;
 760		offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
 761		VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
 762			f, pkt->pages[p], offset);
 763		if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
 764			BUG();
 765
 766		atomic_inc(&pkt->io_wait);
 767		bio->bi_rw = READ;
 768		pkt_queue_bio(pd, bio, 0);
 769		frames_read++;
 770	}
 771
 772out_account:
 773	VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
 774		frames_read, (unsigned long long)pkt->sector);
 775	pd->stats.pkt_started++;
 776	pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
 777	pd->stats.secs_w += pd->settings.size;
 778}
 779
 780/*
 781 * Find a packet matching zone, or the least recently used packet if
 782 * there is no match.
 783 */
 784static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
 785{
 786	struct packet_data *pkt;
 787
 788	list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
 789		if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
 790			list_del_init(&pkt->list);
 791			if (pkt->sector != zone)
 792				pkt->cache_valid = 0;
 793			break;
 794		}
 795	}
 796	return pkt;
 797}
 798
 799static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
 800{
 801	if (pkt->cache_valid) {
 802		list_add(&pkt->list, &pd->cdrw.pkt_free_list);
 803	} else {
 804		list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
 805	}
 806}
 807
 808/*
 809 * recover a failed write, query for relocation if possible
 810 *
 811 * returns 1 if recovery is possible, or 0 if not
 812 *
 813 */
 814static int pkt_start_recovery(struct packet_data *pkt)
 815{
 816	/*
 817	 * FIXME. We need help from the file system to implement
 818	 * recovery handling.
 819	 */
 820	return 0;
 821#if 0
 822	struct request *rq = pkt->rq;
 823	struct pktcdvd_device *pd = rq->rq_disk->private_data;
 824	struct block_device *pkt_bdev;
 825	struct super_block *sb = NULL;
 826	unsigned long old_block, new_block;
 827	sector_t new_sector;
 828
 829	pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
 830	if (pkt_bdev) {
 831		sb = get_super(pkt_bdev);
 832		bdput(pkt_bdev);
 833	}
 834
 835	if (!sb)
 836		return 0;
 837
 838	if (!sb->s_op || !sb->s_op->relocate_blocks)
 839		goto out;
 840
 841	old_block = pkt->sector / (CD_FRAMESIZE >> 9);
 842	if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
 843		goto out;
 844
 845	new_sector = new_block * (CD_FRAMESIZE >> 9);
 846	pkt->sector = new_sector;
 847
 848	pkt->bio->bi_sector = new_sector;
 849	pkt->bio->bi_next = NULL;
 850	pkt->bio->bi_flags = 1 << BIO_UPTODATE;
 851	pkt->bio->bi_idx = 0;
 852
 853	BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
 854	BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
 855	BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
 856	BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
 857	BUG_ON(pkt->bio->bi_private != pkt);
 858
 859	drop_super(sb);
 860	return 1;
 861
 862out:
 863	drop_super(sb);
 864	return 0;
 865#endif
 866}
 867
 868static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
 869{
 870#if PACKET_DEBUG > 1
 871	static const char *state_name[] = {
 872		"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
 873	};
 874	enum packet_data_state old_state = pkt->state;
 875	VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
 876		state_name[old_state], state_name[state]);
 877#endif
 878	pkt->state = state;
 879}
 880
 881/*
 882 * Scan the work queue to see if we can start a new packet.
 883 * returns non-zero if any work was done.
 884 */
 885static int pkt_handle_queue(struct pktcdvd_device *pd)
 886{
 887	struct packet_data *pkt, *p;
 888	struct bio *bio = NULL;
 889	sector_t zone = 0; /* Suppress gcc warning */
 890	struct pkt_rb_node *node, *first_node;
 891	struct rb_node *n;
 892
 893	VPRINTK("handle_queue\n");
 894
 895	atomic_set(&pd->scan_queue, 0);
 896
 897	if (list_empty(&pd->cdrw.pkt_free_list)) {
 898		VPRINTK("handle_queue: no pkt\n");
 899		return 0;
 900	}
 901
 902	/*
 903	 * Try to find a zone we are not already working on.
 904	 */
 905	spin_lock(&pd->lock);
 906	first_node = pkt_rbtree_find(pd, pd->current_sector);
 907	if (!first_node) {
 908		n = rb_first(&pd->bio_queue);
 909		if (n)
 910			first_node = rb_entry(n, struct pkt_rb_node, rb_node);
 911	}
 912	node = first_node;
 913	while (node) {
 914		bio = node->bio;
 915		zone = ZONE(bio->bi_sector, pd);
 916		list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
 917			if (p->sector == zone)
 918				goto try_next_bio;
 919		}
 920		break;
 921try_next_bio:
 922		node = pkt_rbtree_next(node);
 923		if (!node) {
 924			n = rb_first(&pd->bio_queue);
 925			if (n)
 926				node = rb_entry(n, struct pkt_rb_node, rb_node);
 927		}
 928		if (node == first_node)
 929			node = NULL;
 930	}
 931	spin_unlock(&pd->lock);
 932	if (!bio) {
 933		VPRINTK("handle_queue: no bio\n");
 934		return 0;
 935	}
 936
 937	pkt = pkt_get_packet_data(pd, zone);
 938	BUG_ON(!pkt);
 939
 940	pd->current_sector = zone + pd->settings.size;
 941	pkt->sector = zone;
 942	pkt->frames = pd->settings.size >> 2;
 943	BUG_ON(pkt->frames > PACKET_MAX_SIZE);
 944	pkt->write_size = 0;
 945
 946	/*
 947	 * Scan work queue for bios in the same zone and link them
 948	 * to this packet.
 949	 */
 950	spin_lock(&pd->lock);
 951	VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
 952	while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
 953		bio = node->bio;
 954		VPRINTK("pkt_handle_queue: found zone=%llx\n",
 955			(unsigned long long)ZONE(bio->bi_sector, pd));
 956		if (ZONE(bio->bi_sector, pd) != zone)
 957			break;
 958		pkt_rbtree_erase(pd, node);
 959		spin_lock(&pkt->lock);
 960		pkt_add_list_last(bio, &pkt->orig_bios, &pkt->orig_bios_tail);
 961		pkt->write_size += bio->bi_size / CD_FRAMESIZE;
 962		spin_unlock(&pkt->lock);
 963	}
 964	spin_unlock(&pd->lock);
 965
 966	pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
 967	pkt_set_state(pkt, PACKET_WAITING_STATE);
 968	atomic_set(&pkt->run_sm, 1);
 969
 970	spin_lock(&pd->cdrw.active_list_lock);
 971	list_add(&pkt->list, &pd->cdrw.pkt_active_list);
 972	spin_unlock(&pd->cdrw.active_list_lock);
 973
 974	return 1;
 975}
 976
 977/*
 978 * Assemble a bio to write one packet and queue the bio for processing
 979 * by the underlying block device.
 980 */
 981static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
 982{
 983	struct bio *bio;
 984	struct page *pages[PACKET_MAX_SIZE];
 985	int offsets[PACKET_MAX_SIZE];
 986	int f;
 987	int frames_write;
 988
 989	for (f = 0; f < pkt->frames; f++) {
 990		pages[f] = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
 991		offsets[f] = (f * CD_FRAMESIZE) % PAGE_SIZE;
 992	}
 993
 994	/*
 995	 * Fill-in pages[] and offsets[] with data from orig_bios.
 996	 */
 997	frames_write = 0;
 998	spin_lock(&pkt->lock);
 999	for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
1000		int segment = bio->bi_idx;
1001		int src_offs = 0;
1002		int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
1003		int num_frames = bio->bi_size / CD_FRAMESIZE;
1004		BUG_ON(first_frame < 0);
1005		BUG_ON(first_frame + num_frames > pkt->frames);
1006		for (f = first_frame; f < first_frame + num_frames; f++) {
1007			struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
1008
1009			while (src_offs >= src_bvl->bv_len) {
1010				src_offs -= src_bvl->bv_len;
1011				segment++;
1012				BUG_ON(segment >= bio->bi_vcnt);
1013				src_bvl = bio_iovec_idx(bio, segment);
1014			}
1015
1016			if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
1017				pages[f] = src_bvl->bv_page;
1018				offsets[f] = src_bvl->bv_offset + src_offs;
1019			} else {
1020				pkt_copy_bio_data(bio, segment, src_offs,
1021						  pages[f], offsets[f]);
1022			}
1023			src_offs += CD_FRAMESIZE;
1024			frames_write++;
1025		}
1026	}
1027	pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
1028	spin_unlock(&pkt->lock);
1029
1030	VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
1031		frames_write, (unsigned long long)pkt->sector);
1032	BUG_ON(frames_write != pkt->write_size);
1033
1034	if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
1035		pkt_make_local_copy(pkt, pages, offsets);
1036		pkt->cache_valid = 1;
1037	} else {
1038		pkt->cache_valid = 0;
1039	}
1040
1041	/* Start the write request */
1042	bio_init(pkt->w_bio);
1043	pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
1044	pkt->w_bio->bi_sector = pkt->sector;
1045	pkt->w_bio->bi_bdev = pd->bdev;
1046	pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1047	pkt->w_bio->bi_private = pkt;
1048	for (f = 0; f < pkt->frames; f++) {
1049		if ((f + 1 < pkt->frames) && (pages[f + 1] == pages[f]) &&
1050		    (offsets[f + 1] = offsets[f] + CD_FRAMESIZE)) {
1051			if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE * 2, offsets[f]))
1052				BUG();
1053			f++;
1054		} else {
1055			if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE, offsets[f]))
1056				BUG();
1057		}
1058	}
1059	VPRINTK("pktcdvd: vcnt=%d\n", pkt->w_bio->bi_vcnt);
1060
1061	atomic_set(&pkt->io_wait, 1);
1062	pkt->w_bio->bi_rw = WRITE;
1063	pkt_queue_bio(pd, pkt->w_bio, 0);
1064}
1065
1066static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
1067{
1068	struct bio *bio, *next;
1069
1070	if (!uptodate)
1071		pkt->cache_valid = 0;
1072
1073	/* Finish all bios corresponding to this packet */
1074	bio = pkt->orig_bios;
1075	while (bio) {
1076		next = bio->bi_next;
1077		bio->bi_next = NULL;
1078		bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
1079		bio = next;
1080	}
1081	pkt->orig_bios = pkt->orig_bios_tail = NULL;
1082}
1083
1084static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
1085{
1086	int uptodate;
1087
1088	VPRINTK("run_state_machine: pkt %d\n", pkt->id);
1089
1090	for (;;) {
1091		switch (pkt->state) {
1092		case PACKET_WAITING_STATE:
1093			if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
1094				return;
1095
1096			pkt->sleep_time = 0;
1097			pkt_gather_data(pd, pkt);
1098			pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
1099			break;
1100
1101		case PACKET_READ_WAIT_STATE:
1102			if (atomic_read(&pkt->io_wait) > 0)
1103				return;
1104
1105			if (atomic_read(&pkt->io_errors) > 0) {
1106				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1107			} else {
1108				pkt_start_write(pd, pkt);
1109			}
1110			break;
1111
1112		case PACKET_WRITE_WAIT_STATE:
1113			if (atomic_read(&pkt->io_wait) > 0)
1114				return;
1115
1116			if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
1117				pkt_set_state(pkt, PACKET_FINISHED_STATE);
1118			} else {
1119				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
1120			}
1121			break;
1122
1123		case PACKET_RECOVERY_STATE:
1124			if (pkt_start_recovery(pkt)) {
1125				pkt_start_write(pd, pkt);
1126			} else {
1127				VPRINTK("No recovery possible\n");
1128				pkt_set_state(pkt, PACKET_FINISHED_STATE);
1129			}
1130			break;
1131
1132		case PACKET_FINISHED_STATE:
1133			uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
1134			pkt_finish_packet(pkt, uptodate);
1135			return;
1136
1137		default:
1138			BUG();
1139			break;
1140		}
1141	}
1142}
1143
1144static void pkt_handle_packets(struct pktcdvd_device *pd)
1145{
1146	struct packet_data *pkt, *next;
1147
1148	VPRINTK("pkt_handle_packets\n");
1149
1150	/*
1151	 * Run state machine for active packets
1152	 */
1153	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1154		if (atomic_read(&pkt->run_sm) > 0) {
1155			atomic_set(&pkt->run_sm, 0);
1156			pkt_run_state_machine(pd, pkt);
1157		}
1158	}
1159
1160	/*
1161	 * Move no longer active packets to the free list
1162	 */
1163	spin_lock(&pd->cdrw.active_list_lock);
1164	list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
1165		if (pkt->state == PACKET_FINISHED_STATE) {
1166			list_del(&pkt->list);
1167			pkt_put_packet_data(pd, pkt);
1168			pkt_set_state(pkt, PACKET_IDLE_STATE);
1169			atomic_set(&pd->scan_queue, 1);
1170		}
1171	}
1172	spin_unlock(&pd->cdrw.active_list_lock);
1173}
1174
1175static void pkt_count_states(struct pktcdvd_device *pd, int *states)
1176{
1177	struct packet_data *pkt;
1178	int i;
1179
1180	for (i = 0; i <= PACKET_NUM_STATES; i++)
1181		states[i] = 0;
1182
1183	spin_lock(&pd->cdrw.active_list_lock);
1184	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1185		states[pkt->state]++;
1186	}
1187	spin_unlock(&pd->cdrw.active_list_lock);
1188}
1189
1190/*
1191 * kcdrwd is woken up when writes have been queued for one of our
1192 * registered devices
1193 */
1194static int kcdrwd(void *foobar)
1195{
1196	struct pktcdvd_device *pd = foobar;
1197	struct packet_data *pkt;
1198	long min_sleep_time, residue;
1199
1200	set_user_nice(current, -20);
1201
1202	for (;;) {
1203		DECLARE_WAITQUEUE(wait, current);
1204
1205		/*
1206		 * Wait until there is something to do
1207		 */
1208		add_wait_queue(&pd->wqueue, &wait);
1209		for (;;) {
1210			set_current_state(TASK_INTERRUPTIBLE);
1211
1212			/* Check if we need to run pkt_handle_queue */
1213			if (atomic_read(&pd->scan_queue) > 0)
1214				goto work_to_do;
1215
1216			/* Check if we need to run the state machine for some packet */
1217			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1218				if (atomic_read(&pkt->run_sm) > 0)
1219					goto work_to_do;
1220			}
1221
1222			/* Check if we need to process the iosched queues */
1223			if (atomic_read(&pd->iosched.attention) != 0)
1224				goto work_to_do;
1225
1226			/* Otherwise, go to sleep */
1227			if (PACKET_DEBUG > 1) {
1228				int states[PACKET_NUM_STATES];
1229				pkt_count_states(pd, states);
1230				VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
1231					states[0], states[1], states[2], states[3],
1232					states[4], states[5]);
1233			}
1234
1235			min_sleep_time = MAX_SCHEDULE_TIMEOUT;
1236			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1237				if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
1238					min_sleep_time = pkt->sleep_time;
1239			}
1240
1241			generic_unplug_device(bdev_get_queue(pd->bdev));
1242
1243			VPRINTK("kcdrwd: sleeping\n");
1244			residue = schedule_timeout(min_sleep_time);
1245			VPRINTK("kcdrwd: wake up\n");
1246
1247			/* make swsusp happy with our thread */
1248			if (current->flags & PF_FREEZE)
1249				refrigerator(PF_FREEZE);
1250
1251			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
1252				if (!pkt->sleep_time)
1253					continue;
1254				pkt->sleep_time -= min_sleep_time - residue;
1255				if (pkt->sleep_time <= 0) {
1256					pkt->sleep_time = 0;
1257					atomic_inc(&pkt->run_sm);
1258				}
1259			}
1260
1261			if (signal_pending(current)) {
1262				flush_signals(current);
1263			}
1264			if (kthread_should_stop())
1265				break;
1266		}
1267work_to_do:
1268		set_current_state(TASK_RUNNING);
1269		remove_wait_queue(&pd->wqueue, &wait);
1270
1271		if (kthread_should_stop())
1272			break;
1273
1274		/*
1275		 * if pkt_handle_queue returns true, we can queue
1276		 * another request.
1277		 */
1278		while (pkt_handle_queue(pd))
1279			;
1280
1281		/*
1282		 * Handle packet state machine
1283		 */
1284		pkt_handle_packets(pd);
1285
1286		/*
1287		 * Handle iosched queues
1288		 */
1289		pkt_iosched_process_queue(pd);
1290	}
1291
1292	return 0;
1293}
1294
1295static void pkt_print_settings(struct pktcdvd_device *pd)
1296{
1297	printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
1298	printk("%u blocks, ", pd->settings.size >> 2);
1299	printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
1300}
1301
1302static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
1303{
1304	memset(cgc->cmd, 0, sizeof(cgc->cmd));
1305
1306	cgc->cmd[0] = GPCMD_MODE_SENSE_10;
1307	cgc->cmd[2] = page_code | (page_control << 6);
1308	cgc->cmd[7] = cgc->buflen >> 8;
1309	cgc->cmd[8] = cgc->buflen & 0xff;
1310	cgc->data_direction = CGC_DATA_READ;
1311	return pkt_generic_packet(pd, cgc);
1312}
1313
1314static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
1315{
1316	memset(cgc->cmd, 0, sizeof(cgc->cmd));
1317	memset(cgc->buffer, 0, 2);
1318	cgc->cmd[0] = GPCMD_MODE_SELECT_10;
1319	cgc->cmd[1] = 0x10;		/* PF */
1320	cgc->cmd[7] = cgc->buflen >> 8;
1321	cgc->cmd[8] = cgc->buflen & 0xff;
1322	cgc->data_direction = CGC_DATA_WRITE;
1323	return pkt_generic_packet(pd, cgc);
1324}
1325
1326static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
1327{
1328	struct packet_command cgc;
1329	int ret;
1330
1331	/* set up command and get the disc info */
1332	init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
1333	cgc.cmd[0] = GPCMD_READ_DISC_INFO;
1334	cgc.cmd[8] = cgc.buflen = 2;
1335	cgc.quiet = 1;
1336
1337	if ((ret = pkt_generic_packet(pd, &cgc)))
1338		return ret;
1339
1340	/* not all drives have the same disc_info length, so requeue
1341	 * packet with the length the drive tells us it can supply
1342	 */
1343	cgc.buflen = be16_to_cpu(di->disc_information_length) +
1344		     sizeof(di->disc_information_length);
1345
1346	if (cgc.buflen > sizeof(disc_information))
1347		cgc.buflen = sizeof(disc_information);
1348
1349	cgc.cmd[8] = cgc.buflen;
1350	return pkt_generic_packet(pd, &cgc);
1351}
1352
1353static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
1354{
1355	struct packet_command cgc;
1356	int ret;
1357
1358	init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
1359	cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
1360	cgc.cmd[1] = type & 3;
1361	cgc.cmd[4] = (track & 0xff00) >> 8;
1362	cgc.cmd[5] = track & 0xff;
1363	cgc.cmd[8] = 8;
1364	cgc.quiet = 1;
1365
1366	if ((ret = pkt_generic_packet(pd, &cgc)))
1367		return ret;
1368
1369	cgc.buflen = be16_to_cpu(ti->track_information_length) +
1370		     sizeof(ti->track_information_length);
1371
1372	if (cgc.buflen > sizeof(track_information))
1373		cgc.buflen = sizeof(track_information);
1374
1375	cgc.cmd[8] = cgc.buflen;
1376	return pkt_generic_packet(pd, &cgc);
1377}
1378
1379static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written)
1380{
1381	disc_information di;
1382	track_information ti;
1383	__u32 last_track;
1384	int ret = -1;
1385
1386	if ((ret = pkt_get_disc_info(pd, &di)))
1387		return ret;
1388
1389	last_track = (di.last_track_msb << 8) | di.last_track_lsb;
1390	if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1391		return ret;
1392
1393	/* if this track is blank, try the previous. */
1394	if (ti.blank) {
1395		last_track--;
1396		if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
1397			return ret;
1398	}
1399
1400	/* if last recorded field is valid, return it. */
1401	if (ti.lra_v) {
1402		*last_written = be32_to_cpu(ti.last_rec_address);
1403	} else {
1404		/* make it up instead */
1405		*last_written = be32_to_cpu(ti.track_start) +
1406				be32_to_cpu(ti.track_size);
1407		if (ti.free_blocks)
1408			*last_written -= (be32_to_cpu(ti.free_blocks) + 7);
1409	}
1410	return 0;
1411}
1412
1413/*
1414 * write mode select package based on pd->settings
1415 */
1416static int pkt_set_write_settings(struct pktcdvd_device *pd)
1417{
1418	struct packet_command cgc;
1419	struct request_sense sense;
1420	write_param_page *wp;
1421	char buffer[128];
1422	int ret, size;
1423
1424	/* doesn't apply to DVD+RW or DVD-RAM */
1425	if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
1426		return 0;
1427
1428	memset(buffer, 0, sizeof(buffer));
1429	init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
1430	cgc.sense = &sense;
1431	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1432		pkt_dump_sense(&cgc);
1433		return ret;
1434	}
1435
1436	size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
1437	pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
1438	if (size > sizeof(buffer))
1439		size = sizeof(buffer);
1440
1441	/*
1442	 * now get it all
1443	 */
1444	init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
1445	cgc.sense = &sense;
1446	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
1447		pkt_dump_sense(&cgc);
1448		return ret;
1449	}
1450
1451	/*
1452	 * write page is offset header + block descriptor length
1453	 */
1454	wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
1455
1456	wp->fp = pd->settings.fp;
1457	wp->track_mode = pd->settings.track_mode;
1458	wp->write_type = pd->settings.write_type;
1459	wp->data_block_type = pd->settings.block_mode;
1460
1461	wp->multi_session = 0;
1462
1463#ifdef PACKET_USE_LS
1464	wp->link_size = 7;
1465	wp->ls_v = 1;
1466#endif
1467
1468	if (wp->data_block_type == PACKET_BLOCK_MODE1) {
1469		wp->session_format = 0;
1470		wp->subhdr2 = 0x20;
1471	} else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
1472		wp->session_format = 0x20;
1473		wp->subhdr2 = 8;
1474#if 0
1475		wp->mcn[0] = 0x80;
1476		memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
1477#endif
1478	} else {
1479		/*
1480		 * paranoia
1481		 */
1482		printk("pktcdvd: write mode wrong %d\n", wp->data_block_type);
1483		return 1;
1484	}
1485	wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
1486
1487	cgc.buflen = cgc.cmd[8] = size;
1488	if ((ret = pkt_mode_select(pd, &cgc))) {
1489		pkt_dump_sense(&cgc);
1490		return ret;
1491	}
1492
1493	pkt_print_settings(pd);
1494	return 0;
1495}
1496
1497/*
1498 * 0 -- we can write to this track, 1 -- we can't
1499 */
1500static int pkt_good_track(track_information *ti)
1501{
1502	/*
1503	 * only good for CD-RW at the moment, not DVD-RW
1504	 */
1505
1506	/*
1507	 * FIXME: only for FP
1508	 */
1509	if (ti->fp == 0)
1510		return 0;
1511
1512	/*
1513	 * "good" settings as per Mt Fuji.
1514	 */
1515	if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
1516		return 0;
1517
1518	if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
1519		return 0;
1520
1521	if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
1522		return 0;
1523
1524	printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
1525	return 1;
1526}
1527
1528/*
1529 * 0 -- we can write to this disc, 1 -- we can't
1530 */
1531static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
1532{
1533	switch (pd->mmc3_profile) {
1534		case 0x0a: /* CD-RW */
1535		case 0xffff: /* MMC3 not supported */
1536			break;
1537		case 0x1a: /* DVD+RW */
1538		case 0x13: /* DVD-RW */
1539		case 0x12: /* DVD-RAM */
1540			return 0;
1541		default:
1542			printk("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile);
1543			return 1;
1544	}
1545
1546	/*
1547	 * for disc type 0xff we should probably reserve a new track.
1548	 * but i'm not sure, should we leave this to user apps? probably.
1549	 */
1550	if (di->disc_type == 0xff) {
1551		printk("pktcdvd: Unknown disc. No track?\n");
1552		return 1;
1553	}
1554
1555	if (di->disc_type != 0x20 && di->disc_type != 0) {
1556		printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
1557		return 1;
1558	}
1559
1560	if (di->erasable == 0) {
1561		printk("pktcdvd: Disc not erasable\n");
1562		return 1;
1563	}
1564
1565	if (di->border_status == PACKET_SESSION_RESERVED) {
1566		printk("pktcdvd: Can't write to last track (reserved)\n");
1567		return 1;
1568	}
1569
1570	return 0;
1571}
1572
1573static int pkt_probe_settings(struct pktcdvd_device *pd)
1574{
1575	struct packet_command cgc;
1576	unsigned char buf[12];
1577	disc_information di;
1578	track_information ti;
1579	int ret, track;
1580
1581	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1582	cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
1583	cgc.cmd[8] = 8;
1584	ret = pkt_generic_packet(pd, &cgc);
1585	pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
1586
1587	memset(&di, 0, sizeof(disc_information));
1588	memset(&ti, 0, sizeof(track_information));
1589
1590	if ((ret = pkt_get_disc_info(pd, &di))) {
1591		printk("failed get_disc\n");
1592		return ret;
1593	}
1594
1595	if (pkt_good_disc(pd, &di))
1596		return -ENXIO;
1597
1598	switch (pd->mmc3_profile) {
1599		case 0x1a: /* DVD+RW */
1600			printk("pktcdvd: inserted media is DVD+RW\n");
1601			break;
1602		case 0x13: /* DVD-RW */
1603			printk("pktcdvd: inserted media is DVD-RW\n");
1604			break;
1605		case 0x12: /* DVD-RAM */
1606			printk("pktcdvd: inserted media is DVD-RAM\n");
1607			break;
1608		default:
1609			printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
1610			break;
1611	}
1612	pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
1613
1614	track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
1615	if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
1616		printk("pktcdvd: failed get_track\n");
1617		return ret;
1618	}
1619
1620	if (pkt_good_track(&ti)) {
1621		printk("pktcdvd: can't write to this track\n");
1622		return -ENXIO;
1623	}
1624
1625	/*
1626	 * we keep packet size in 512 byte units, makes it easier to
1627	 * deal with request calculations.
1628	 */
1629	pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
1630	if (pd->settings.size == 0) {
1631		printk("pktcdvd: detected zero packet size!\n");
1632		pd->settings.size = 128;
1633	}
1634	pd->settings.fp = ti.fp;
1635	pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
1636
1637	if (ti.nwa_v) {
1638		pd->nwa = be32_to_cpu(ti.next_writable);
1639		set_bit(PACKET_NWA_VALID, &pd->flags);
1640	}
1641
1642	/*
1643	 * in theory we could use lra on -RW media as well and just zero
1644	 * blocks that haven't been written yet, but in practice that
1645	 * is just a no-go. we'll use that for -R, naturally.
1646	 */
1647	if (ti.lra_v) {
1648		pd->lra = be32_to_cpu(ti.last_rec_address);
1649		set_bit(PACKET_LRA_VALID, &pd->flags);
1650	} else {
1651		pd->lra = 0xffffffff;
1652		set_bit(PACKET_LRA_VALID, &pd->flags);
1653	}
1654
1655	/*
1656	 * fine for now
1657	 */
1658	pd->settings.link_loss = 7;
1659	pd->settings.write_type = 0;	/* packet */
1660	pd->settings.track_mode = ti.track_mode;
1661
1662	/*
1663	 * mode1 or mode2 disc
1664	 */
1665	switch (ti.data_mode) {
1666		case PACKET_MODE1:
1667			pd->settings.block_mode = PACKET_BLOCK_MODE1;
1668			break;
1669		case PACKET_MODE2:
1670			pd->settings.block_mode = PACKET_BLOCK_MODE2;
1671			break;
1672		default:
1673			printk("pktcdvd: unknown data mode\n");
1674			return 1;
1675	}
1676	return 0;
1677}
1678
1679/*
1680 * enable/disable write caching on drive
1681 */
1682static int pkt_write_caching(struct pktcdvd_device *pd, int set)
1683{
1684	struct packet_command cgc;
1685	struct request_sense sense;
1686	unsigned char buf[64];
1687	int ret;
1688
1689	memset(buf, 0, sizeof(buf));
1690	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
1691	cgc.sense = &sense;
1692	cgc.buflen = pd->mode_offset + 12;
1693
1694	/*
1695	 * caching mode page might not be there, so quiet this command
1696	 */
1697	cgc.quiet = 1;
1698
1699	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
1700		return ret;
1701
1702	buf[pd->mode_offset + 10] |= (!!set << 2);
1703
1704	cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
1705	ret = pkt_mode_select(pd, &cgc);
1706	if (ret) {
1707		printk("pktcdvd: write caching control failed\n");
1708		pkt_dump_sense(&cgc);
1709	} else if (!ret && set)
1710		printk("pktcdvd: enabled write caching on %s\n", pd->name);
1711	return ret;
1712}
1713
1714static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
1715{
1716	struct packet_command cgc;
1717
1718	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1719	cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
1720	cgc.cmd[4] = lockflag ? 1 : 0;
1721	return pkt_generic_packet(pd, &cgc);
1722}
1723
1724/*
1725 * Returns drive maximum write speed
1726 */
1727static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed)
1728{
1729	struct packet_command cgc;
1730	struct request_sense sense;
1731	unsigned char buf[256+18];
1732	unsigned char *cap_buf;
1733	int ret, offset;
1734
1735	memset(buf, 0, sizeof(buf));
1736	cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
1737	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
1738	cgc.sense = &sense;
1739
1740	ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1741	if (ret) {
1742		cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
1743			     sizeof(struct mode_page_header);
1744		ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
1745		if (ret) {
1746			pkt_dump_sense(&cgc);
1747			return ret;
1748		}
1749	}
1750
1751	offset = 20;			    /* Obsoleted field, used by older drives */
1752	if (cap_buf[1] >= 28)
1753		offset = 28;		    /* Current write speed selected */
1754	if (cap_buf[1] >= 30) {
1755		/* If the drive reports at least one "Logical Unit Write
1756		 * Speed Performance Descriptor Block", use the information
1757		 * in the first block. (contains the highest speed)
1758		 */
1759		int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
1760		if (num_spdb > 0)
1761			offset = 34;
1762	}
1763
1764	*write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
1765	return 0;
1766}
1767
1768/* These tables from cdrecord - I don't have orange book */
1769/* standard speed CD-RW (1-4x) */
1770static char clv_to_speed[16] = {
1771	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1772	   0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1773};
1774/* high speed CD-RW (-10x) */
1775static char hs_clv_to_speed[16] = {
1776	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1777	   0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
1778};
1779/* ultra high speed CD-RW */
1780static char us_clv_to_speed[16] = {
1781	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
1782	   0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
1783};
1784
1785/*
1786 * reads the maximum media speed from ATIP
1787 */
1788static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
1789{
1790	struct packet_command cgc;
1791	struct request_sense sense;
1792	unsigned char buf[64];
1793	unsigned int size, st, sp;
1794	int ret;
1795
1796	init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
1797	cgc.sense = &sense;
1798	cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1799	cgc.cmd[1] = 2;
1800	cgc.cmd[2] = 4; /* READ ATIP */
1801	cgc.cmd[8] = 2;
1802	ret = pkt_generic_packet(pd, &cgc);
1803	if (ret) {
1804		pkt_dump_sense(&cgc);
1805		return ret;
1806	}
1807	size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
1808	if (size > sizeof(buf))
1809		size = sizeof(buf);
1810
1811	init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
1812	cgc.sense = &sense;
1813	cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
1814	cgc.cmd[1] = 2;
1815	cgc.cmd[2] = 4;
1816	cgc.cmd[8] = size;
1817	ret = pkt_generic_packet(pd, &cgc);
1818	if (ret) {
1819		pkt_dump_sense(&cgc);
1820		return ret;
1821	}
1822
1823	if (!buf[6] & 0x40) {
1824		printk("pktcdvd: Disc type is not CD-RW\n");
1825		return 1;
1826	}
1827	if (!buf[6] & 0x4) {
1828		printk("pktcdvd: A1 values on media are not valid, maybe not CDRW?\n");
1829		return 1;
1830	}
1831
1832	st = (buf[6] >> 3) & 0x7; /* disc sub-type */
1833
1834	sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
1835
1836	/* Info from cdrecord */
1837	switch (st) {
1838		case 0: /* standard speed */
1839			*speed = clv_to_speed[sp];
1840			break;
1841		case 1: /* high speed */
1842			*speed = hs_clv_to_speed[sp];
1843			break;
1844		case 2: /* ultra high speed */
1845			*speed = us_clv_to_speed[sp];
1846			break;
1847		default:
1848			printk("pktcdvd: Unknown disc sub-type %d\n",st);
1849			return 1;
1850	}
1851	if (*speed) {
1852		printk("pktcdvd: Max. media speed: %d\n",*speed);
1853		return 0;
1854	} else {
1855		printk("pktcdvd: Unknown speed %d for sub-type %d\n",sp,st);
1856		return 1;
1857	}
1858}
1859
1860static int pkt_perform_opc(struct pktcdvd_device *pd)
1861{
1862	struct packet_command cgc;
1863	struct request_sense sense;
1864	int ret;
1865
1866	VPRINTK("pktcdvd: Performing OPC\n");
1867
1868	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
1869	cgc.sense = &sense;
1870	cgc.timeout = 60*HZ;
1871	cgc.cmd[0] = GPCMD_SEND_OPC;
1872	cgc.cmd[1] = 1;
1873	if ((ret = pkt_generic_packet(pd, &cgc)))
1874		pkt_dump_sense(&cgc);
1875	return ret;
1876}
1877
1878static int pkt_open_write(struct pktcdvd_device *pd)
1879{
1880	int ret;
1881	unsigned int write_speed, media_write_speed, read_speed;
1882
1883	if ((ret = pkt_probe_settings(pd))) {
1884		DPRINTK("pktcdvd: %s failed probe\n", pd->name);
1885		return -EIO;
1886	}
1887
1888	if ((ret = pkt_set_write_settings(pd))) {
1889		DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name);
1890		return -EIO;
1891	}
1892
1893	pkt_write_caching(pd, USE_WCACHING);
1894
1895	if ((ret = pkt_get_max_speed(pd, &write_speed)))
1896		write_speed = 16 * 177;
1897	switch (pd->mmc3_profile) {
1898		case 0x13: /* DVD-RW */
1899		case 0x1a: /* DVD+RW */
1900		case 0x12: /* DVD-RAM */
1901			DPRINTK("pktcdvd: write speed %ukB/s\n", write_speed);
1902			break;
1903		default:
1904			if ((ret = pkt_media_speed(pd, &media_write_speed)))
1905				media_write_speed = 16;
1906			write_speed = min(write_speed, media_write_speed * 177);
1907			DPRINTK("pktcdvd: write speed %ux\n", write_speed / 176);
1908			break;
1909	}
1910	read_speed = write_speed;
1911
1912	if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
1913		DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name);
1914		return -EIO;
1915	}
1916	pd->write_speed = write_speed;
1917	pd->read_speed = read_speed;
1918
1919	if ((ret = pkt_perform_opc(pd))) {
1920		DPRINTK("pktcdvd: %s Optimum Power Calibration failed\n", pd->name);
1921	}
1922
1923	return 0;
1924}
1925
1926/*
1927 * called at open time.
1928 */
1929static int pkt_open_dev(struct pktcdvd_device *pd, int write)
1930{
1931	int ret;
1932	long lba;
1933	request_queue_t *q;
1934
1935	/*
1936	 * We need to re-open the cdrom device without O_NONBLOCK to be able
1937	 * to read/write from/to it. It is already opened in O_NONBLOCK mode
1938	 * so bdget() can't fail.
1939	 */
1940	bdget(pd->bdev->bd_dev);
1941	if ((ret = blkdev_get(pd->bdev, FMODE_READ, O_RDONLY)))
1942		goto out;
1943
1944	if ((ret = pkt_get_last_written(pd, &lba))) {
1945		printk("pktcdvd: pkt_get_last_written failed\n");
1946		goto out_putdev;
1947	}
1948
1949	set_capacity(pd->disk, lba << 2);
1950	set_capacity(pd->bdev->bd_disk, lba << 2);
1951	bd_set_size(pd->bdev, (loff_t)lba << 11);
1952
1953	q = bdev_get_queue(pd->bdev);
1954	if (write) {
1955		if ((ret = pkt_open_write(pd)))
1956			goto out_putdev;
1957		/*
1958		 * Some CDRW drives can not handle writes larger than one packet,
1959		 * even if the size is a multiple of the packet size.
1960		 */
1961		spin_lock_irq(q->queue_lock);
1962		blk_queue_max_sectors(q, pd->settings.size);
1963		spin_unlock_irq(q->queue_lock);
1964		set_bit(PACKET_WRITABLE, &pd->flags);
1965	} else {
1966		pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
1967		clear_bit(PACKET_WRITABLE, &pd->flags);
1968	}
1969
1970	if ((ret = pkt_set_segment_merging(pd, q)))
1971		goto out_putdev;
1972
1973	if (write)
1974		printk("pktcdvd: %lukB available on disc\n", lba << 1);
1975
1976	return 0;
1977
1978out_putdev:
1979	blkdev_put(pd->bdev);
1980out:
1981	return ret;
1982}
1983
1984/*
1985 * called when the device is closed. makes sure that the device flushes
1986 * the internal cache before we close.
1987 */
1988static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
1989{
1990	if (flush && pkt_flush_cache(pd))
1991		DPRINTK("pktcdvd: %s not flushing cache\n", pd->name);
1992
1993	pkt_lock_door(pd, 0);
1994
1995	pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
1996	blkdev_put(pd->bdev);
1997}
1998
1999static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
2000{
2001	if (dev_minor >= MAX_WRITERS)
2002		return NULL;
2003	return pkt_devs[dev_minor];
2004}
2005
2006static int pkt_open(struct inode *inode, struct file *file)
2007{
2008	struct pktcdvd_device *pd = NULL;
2009	int ret;
2010
2011	VPRINTK("pktcdvd: entering open\n");
2012
2013	down(&ctl_mutex);
2014	pd = pkt_find_dev_from_minor(iminor(inode));
2015	if (!pd) {
2016		ret = -ENODEV;
2017		goto out;
2018	}
2019	BUG_ON(pd

Large files files are truncated, but you can click here to view the full file