/drivers/scsi/scsi_lib.c
C | 2585 lines | 1551 code | 314 blank | 720 comment | 291 complexity | 8f8fde3f909bc78f876112220ae4335a MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
Large files files are truncated, but you can click here to view the full file
1/*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
11#include <linux/bitops.h>
12#include <linux/blkdev.h>
13#include <linux/completion.h>
14#include <linux/kernel.h>
15#include <linux/mempool.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
20#include <linux/hardirq.h>
21#include <linux/scatterlist.h>
22
23#include <scsi/scsi.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_dbg.h>
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_driver.h>
28#include <scsi/scsi_eh.h>
29#include <scsi/scsi_host.h>
30
31#include "scsi_priv.h"
32#include "scsi_logging.h"
33
34
35#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
36#define SG_MEMPOOL_SIZE 2
37
38struct scsi_host_sg_pool {
39 size_t size;
40 char *name;
41 struct kmem_cache *slab;
42 mempool_t *pool;
43};
44
45#define SP(x) { x, "sgpool-" __stringify(x) }
46#if (SCSI_MAX_SG_SEGMENTS < 32)
47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48#endif
49static struct scsi_host_sg_pool scsi_sg_pools[] = {
50 SP(8),
51 SP(16),
52#if (SCSI_MAX_SG_SEGMENTS > 32)
53 SP(32),
54#if (SCSI_MAX_SG_SEGMENTS > 64)
55 SP(64),
56#if (SCSI_MAX_SG_SEGMENTS > 128)
57 SP(128),
58#if (SCSI_MAX_SG_SEGMENTS > 256)
59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
60#endif
61#endif
62#endif
63#endif
64 SP(SCSI_MAX_SG_SEGMENTS)
65};
66#undef SP
67
68struct kmem_cache *scsi_sdb_cache;
69
70/*
71 * When to reinvoke queueing after a resource shortage. It's 3 msecs to
72 * not change behaviour from the previous unplug mechanism, experimentation
73 * may prove this needs changing.
74 */
75#define SCSI_QUEUE_DELAY 3
76
77/*
78 * Function: scsi_unprep_request()
79 *
80 * Purpose: Remove all preparation done for a request, including its
81 * associated scsi_cmnd, so that it can be requeued.
82 *
83 * Arguments: req - request to unprepare
84 *
85 * Lock status: Assumed that no locks are held upon entry.
86 *
87 * Returns: Nothing.
88 */
89static void scsi_unprep_request(struct request *req)
90{
91 struct scsi_cmnd *cmd = req->special;
92
93 blk_unprep_request(req);
94 req->special = NULL;
95
96 scsi_put_command(cmd);
97}
98
99/**
100 * __scsi_queue_insert - private queue insertion
101 * @cmd: The SCSI command being requeued
102 * @reason: The reason for the requeue
103 * @unbusy: Whether the queue should be unbusied
104 *
105 * This is a private queue insertion. The public interface
106 * scsi_queue_insert() always assumes the queue should be unbusied
107 * because it's always called before the completion. This function is
108 * for a requeue after completion, which should only occur in this
109 * file.
110 */
111static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
112{
113 struct Scsi_Host *host = cmd->device->host;
114 struct scsi_device *device = cmd->device;
115 struct scsi_target *starget = scsi_target(device);
116 struct request_queue *q = device->request_queue;
117 unsigned long flags;
118
119 SCSI_LOG_MLQUEUE(1,
120 printk("Inserting command %p into mlqueue\n", cmd));
121
122 /*
123 * Set the appropriate busy bit for the device/host.
124 *
125 * If the host/device isn't busy, assume that something actually
126 * completed, and that we should be able to queue a command now.
127 *
128 * Note that the prior mid-layer assumption that any host could
129 * always queue at least one command is now broken. The mid-layer
130 * will implement a user specifiable stall (see
131 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
132 * if a command is requeued with no other commands outstanding
133 * either for the device or for the host.
134 */
135 switch (reason) {
136 case SCSI_MLQUEUE_HOST_BUSY:
137 host->host_blocked = host->max_host_blocked;
138 break;
139 case SCSI_MLQUEUE_DEVICE_BUSY:
140 device->device_blocked = device->max_device_blocked;
141 break;
142 case SCSI_MLQUEUE_TARGET_BUSY:
143 starget->target_blocked = starget->max_target_blocked;
144 break;
145 }
146
147 /*
148 * Decrement the counters, since these commands are no longer
149 * active on the host/device.
150 */
151 if (unbusy)
152 scsi_device_unbusy(device);
153
154 /*
155 * Requeue this command. It will go before all other commands
156 * that are already in the queue.
157 */
158 spin_lock_irqsave(q->queue_lock, flags);
159 blk_requeue_request(q, cmd->request);
160 spin_unlock_irqrestore(q->queue_lock, flags);
161
162 kblockd_schedule_work(q, &device->requeue_work);
163
164 return 0;
165}
166
167/*
168 * Function: scsi_queue_insert()
169 *
170 * Purpose: Insert a command in the midlevel queue.
171 *
172 * Arguments: cmd - command that we are adding to queue.
173 * reason - why we are inserting command to queue.
174 *
175 * Lock status: Assumed that lock is not held upon entry.
176 *
177 * Returns: Nothing.
178 *
179 * Notes: We do this for one of two cases. Either the host is busy
180 * and it cannot accept any more commands for the time being,
181 * or the device returned QUEUE_FULL and can accept no more
182 * commands.
183 * Notes: This could be called either from an interrupt context or a
184 * normal process context.
185 */
186int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
187{
188 return __scsi_queue_insert(cmd, reason, 1);
189}
190/**
191 * scsi_execute - insert request and wait for the result
192 * @sdev: scsi device
193 * @cmd: scsi command
194 * @data_direction: data direction
195 * @buffer: data buffer
196 * @bufflen: len of buffer
197 * @sense: optional sense buffer
198 * @timeout: request timeout in seconds
199 * @retries: number of times to retry request
200 * @flags: or into request flags;
201 * @resid: optional residual length
202 *
203 * returns the req->errors value which is the scsi_cmnd result
204 * field.
205 */
206int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
207 int data_direction, void *buffer, unsigned bufflen,
208 unsigned char *sense, int timeout, int retries, int flags,
209 int *resid)
210{
211 struct request *req;
212 int write = (data_direction == DMA_TO_DEVICE);
213 int ret = DRIVER_ERROR << 24;
214
215 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
216 if (!req)
217 return ret;
218
219 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
220 buffer, bufflen, __GFP_WAIT))
221 goto out;
222
223 req->cmd_len = COMMAND_SIZE(cmd[0]);
224 memcpy(req->cmd, cmd, req->cmd_len);
225 req->sense = sense;
226 req->sense_len = 0;
227 req->retries = retries;
228 req->timeout = timeout;
229 req->cmd_type = REQ_TYPE_BLOCK_PC;
230 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
231
232 /*
233 * head injection *required* here otherwise quiesce won't work
234 */
235 blk_execute_rq(req->q, NULL, req, 1);
236
237 /*
238 * Some devices (USB mass-storage in particular) may transfer
239 * garbage data together with a residue indicating that the data
240 * is invalid. Prevent the garbage from being misinterpreted
241 * and prevent security leaks by zeroing out the excess data.
242 */
243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
245
246 if (resid)
247 *resid = req->resid_len;
248 ret = req->errors;
249 out:
250 blk_put_request(req);
251
252 return ret;
253}
254EXPORT_SYMBOL(scsi_execute);
255
256
257int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258 int data_direction, void *buffer, unsigned bufflen,
259 struct scsi_sense_hdr *sshdr, int timeout, int retries,
260 int *resid)
261{
262 char *sense = NULL;
263 int result;
264
265 if (sshdr) {
266 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
267 if (!sense)
268 return DRIVER_ERROR << 24;
269 }
270 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
271 sense, timeout, retries, 0, resid);
272 if (sshdr)
273 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
274
275 kfree(sense);
276 return result;
277}
278EXPORT_SYMBOL(scsi_execute_req);
279
280/*
281 * Function: scsi_init_cmd_errh()
282 *
283 * Purpose: Initialize cmd fields related to error handling.
284 *
285 * Arguments: cmd - command that is ready to be queued.
286 *
287 * Notes: This function has the job of initializing a number of
288 * fields related to error handling. Typically this will
289 * be called once for each command, as required.
290 */
291static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
292{
293 cmd->serial_number = 0;
294 scsi_set_resid(cmd, 0);
295 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
296 if (cmd->cmd_len == 0)
297 cmd->cmd_len = scsi_command_size(cmd->cmnd);
298}
299
300void scsi_device_unbusy(struct scsi_device *sdev)
301{
302 struct Scsi_Host *shost = sdev->host;
303 struct scsi_target *starget = scsi_target(sdev);
304 unsigned long flags;
305
306 spin_lock_irqsave(shost->host_lock, flags);
307 shost->host_busy--;
308 starget->target_busy--;
309 if (unlikely(scsi_host_in_recovery(shost) &&
310 (shost->host_failed || shost->host_eh_scheduled)))
311 scsi_eh_wakeup(shost);
312 spin_unlock(shost->host_lock);
313 spin_lock(sdev->request_queue->queue_lock);
314 sdev->device_busy--;
315 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
316}
317
318/*
319 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
320 * and call blk_run_queue for all the scsi_devices on the target -
321 * including current_sdev first.
322 *
323 * Called with *no* scsi locks held.
324 */
325static void scsi_single_lun_run(struct scsi_device *current_sdev)
326{
327 struct Scsi_Host *shost = current_sdev->host;
328 struct scsi_device *sdev, *tmp;
329 struct scsi_target *starget = scsi_target(current_sdev);
330 unsigned long flags;
331
332 spin_lock_irqsave(shost->host_lock, flags);
333 starget->starget_sdev_user = NULL;
334 spin_unlock_irqrestore(shost->host_lock, flags);
335
336 /*
337 * Call blk_run_queue for all LUNs on the target, starting with
338 * current_sdev. We race with others (to set starget_sdev_user),
339 * but in most cases, we will be first. Ideally, each LU on the
340 * target would get some limited time or requests on the target.
341 */
342 blk_run_queue(current_sdev->request_queue);
343
344 spin_lock_irqsave(shost->host_lock, flags);
345 if (starget->starget_sdev_user)
346 goto out;
347 list_for_each_entry_safe(sdev, tmp, &starget->devices,
348 same_target_siblings) {
349 if (sdev == current_sdev)
350 continue;
351 if (scsi_device_get(sdev))
352 continue;
353
354 spin_unlock_irqrestore(shost->host_lock, flags);
355 blk_run_queue(sdev->request_queue);
356 spin_lock_irqsave(shost->host_lock, flags);
357
358 scsi_device_put(sdev);
359 }
360 out:
361 spin_unlock_irqrestore(shost->host_lock, flags);
362}
363
364static inline int scsi_device_is_busy(struct scsi_device *sdev)
365{
366 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
367 return 1;
368
369 return 0;
370}
371
372static inline int scsi_target_is_busy(struct scsi_target *starget)
373{
374 return ((starget->can_queue > 0 &&
375 starget->target_busy >= starget->can_queue) ||
376 starget->target_blocked);
377}
378
379static inline int scsi_host_is_busy(struct Scsi_Host *shost)
380{
381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382 shost->host_blocked || shost->host_self_blocked)
383 return 1;
384
385 return 0;
386}
387
388/*
389 * Function: scsi_run_queue()
390 *
391 * Purpose: Select a proper request queue to serve next
392 *
393 * Arguments: q - last request's queue
394 *
395 * Returns: Nothing
396 *
397 * Notes: The previous command was completely finished, start
398 * a new one if possible.
399 */
400static void scsi_run_queue(struct request_queue *q)
401{
402 struct scsi_device *sdev = q->queuedata;
403 struct Scsi_Host *shost;
404 LIST_HEAD(starved_list);
405 unsigned long flags;
406
407 /* if the device is dead, sdev will be NULL, so no queue to run */
408 if (!sdev)
409 return;
410
411 shost = sdev->host;
412 if (scsi_target(sdev)->single_lun)
413 scsi_single_lun_run(sdev);
414
415 spin_lock_irqsave(shost->host_lock, flags);
416 list_splice_init(&shost->starved_list, &starved_list);
417
418 while (!list_empty(&starved_list)) {
419 /*
420 * As long as shost is accepting commands and we have
421 * starved queues, call blk_run_queue. scsi_request_fn
422 * drops the queue_lock and can add us back to the
423 * starved_list.
424 *
425 * host_lock protects the starved_list and starved_entry.
426 * scsi_request_fn must get the host_lock before checking
427 * or modifying starved_list or starved_entry.
428 */
429 if (scsi_host_is_busy(shost))
430 break;
431
432 sdev = list_entry(starved_list.next,
433 struct scsi_device, starved_entry);
434 list_del_init(&sdev->starved_entry);
435 if (scsi_target_is_busy(scsi_target(sdev))) {
436 list_move_tail(&sdev->starved_entry,
437 &shost->starved_list);
438 continue;
439 }
440
441 spin_unlock(shost->host_lock);
442 spin_lock(sdev->request_queue->queue_lock);
443 __blk_run_queue(sdev->request_queue);
444 spin_unlock(sdev->request_queue->queue_lock);
445 spin_lock(shost->host_lock);
446 }
447 /* put any unprocessed entries back */
448 list_splice(&starved_list, &shost->starved_list);
449 spin_unlock_irqrestore(shost->host_lock, flags);
450
451 blk_run_queue(q);
452}
453
454void scsi_requeue_run_queue(struct work_struct *work)
455{
456 struct scsi_device *sdev;
457 struct request_queue *q;
458
459 sdev = container_of(work, struct scsi_device, requeue_work);
460 q = sdev->request_queue;
461 scsi_run_queue(q);
462}
463
464/*
465 * Function: scsi_requeue_command()
466 *
467 * Purpose: Handle post-processing of completed commands.
468 *
469 * Arguments: q - queue to operate on
470 * cmd - command that may need to be requeued.
471 *
472 * Returns: Nothing
473 *
474 * Notes: After command completion, there may be blocks left
475 * over which weren't finished by the previous command
476 * this can be for a number of reasons - the main one is
477 * I/O errors in the middle of the request, in which case
478 * we need to request the blocks that come after the bad
479 * sector.
480 * Notes: Upon return, cmd is a stale pointer.
481 */
482static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
483{
484 struct request *req = cmd->request;
485 unsigned long flags;
486
487 spin_lock_irqsave(q->queue_lock, flags);
488 scsi_unprep_request(req);
489 blk_requeue_request(q, req);
490 spin_unlock_irqrestore(q->queue_lock, flags);
491
492 scsi_run_queue(q);
493}
494
495void scsi_next_command(struct scsi_cmnd *cmd)
496{
497 struct scsi_device *sdev = cmd->device;
498 struct request_queue *q = sdev->request_queue;
499
500 /* need to hold a reference on the device before we let go of the cmd */
501 get_device(&sdev->sdev_gendev);
502
503 scsi_put_command(cmd);
504 scsi_run_queue(q);
505
506 /* ok to remove device now */
507 put_device(&sdev->sdev_gendev);
508}
509
510void scsi_run_host_queues(struct Scsi_Host *shost)
511{
512 struct scsi_device *sdev;
513
514 shost_for_each_device(sdev, shost)
515 scsi_run_queue(sdev->request_queue);
516}
517
518static void __scsi_release_buffers(struct scsi_cmnd *, int);
519
520/*
521 * Function: scsi_end_request()
522 *
523 * Purpose: Post-processing of completed commands (usually invoked at end
524 * of upper level post-processing and scsi_io_completion).
525 *
526 * Arguments: cmd - command that is complete.
527 * error - 0 if I/O indicates success, < 0 for I/O error.
528 * bytes - number of bytes of completed I/O
529 * requeue - indicates whether we should requeue leftovers.
530 *
531 * Lock status: Assumed that lock is not held upon entry.
532 *
533 * Returns: cmd if requeue required, NULL otherwise.
534 *
535 * Notes: This is called for block device requests in order to
536 * mark some number of sectors as complete.
537 *
538 * We are guaranteeing that the request queue will be goosed
539 * at some point during this call.
540 * Notes: If cmd was requeued, upon return it will be a stale pointer.
541 */
542static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
543 int bytes, int requeue)
544{
545 struct request_queue *q = cmd->device->request_queue;
546 struct request *req = cmd->request;
547
548 /*
549 * If there are blocks left over at the end, set up the command
550 * to queue the remainder of them.
551 */
552 if (blk_end_request(req, error, bytes)) {
553 /* kill remainder if no retrys */
554 if (error && scsi_noretry_cmd(cmd))
555 blk_end_request_all(req, error);
556 else {
557 if (requeue) {
558 /*
559 * Bleah. Leftovers again. Stick the
560 * leftovers in the front of the
561 * queue, and goose the queue again.
562 */
563 scsi_release_buffers(cmd);
564 scsi_requeue_command(q, cmd);
565 cmd = NULL;
566 }
567 return cmd;
568 }
569 }
570
571 /*
572 * This will goose the queue request function at the end, so we don't
573 * need to worry about launching another command.
574 */
575 __scsi_release_buffers(cmd, 0);
576 scsi_next_command(cmd);
577 return NULL;
578}
579
580static inline unsigned int scsi_sgtable_index(unsigned short nents)
581{
582 unsigned int index;
583
584 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
585
586 if (nents <= 8)
587 index = 0;
588 else
589 index = get_count_order(nents) - 3;
590
591 return index;
592}
593
594static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
595{
596 struct scsi_host_sg_pool *sgp;
597
598 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
599 mempool_free(sgl, sgp->pool);
600}
601
602static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
603{
604 struct scsi_host_sg_pool *sgp;
605
606 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
607 return mempool_alloc(sgp->pool, gfp_mask);
608}
609
610static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
611 gfp_t gfp_mask)
612{
613 int ret;
614
615 BUG_ON(!nents);
616
617 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
618 gfp_mask, scsi_sg_alloc);
619 if (unlikely(ret))
620 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
621 scsi_sg_free);
622
623 return ret;
624}
625
626static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
627{
628 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
629}
630
631static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
632{
633
634 if (cmd->sdb.table.nents)
635 scsi_free_sgtable(&cmd->sdb);
636
637 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
638
639 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
640 struct scsi_data_buffer *bidi_sdb =
641 cmd->request->next_rq->special;
642 scsi_free_sgtable(bidi_sdb);
643 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
644 cmd->request->next_rq->special = NULL;
645 }
646
647 if (scsi_prot_sg_count(cmd))
648 scsi_free_sgtable(cmd->prot_sdb);
649}
650
651/*
652 * Function: scsi_release_buffers()
653 *
654 * Purpose: Completion processing for block device I/O requests.
655 *
656 * Arguments: cmd - command that we are bailing.
657 *
658 * Lock status: Assumed that no lock is held upon entry.
659 *
660 * Returns: Nothing
661 *
662 * Notes: In the event that an upper level driver rejects a
663 * command, we must release resources allocated during
664 * the __init_io() function. Primarily this would involve
665 * the scatter-gather table, and potentially any bounce
666 * buffers.
667 */
668void scsi_release_buffers(struct scsi_cmnd *cmd)
669{
670 __scsi_release_buffers(cmd, 1);
671}
672EXPORT_SYMBOL(scsi_release_buffers);
673
674static int __scsi_error_from_host_byte(struct scsi_cmnd *cmd, int result)
675{
676 int error = 0;
677
678 switch(host_byte(result)) {
679 case DID_TRANSPORT_FAILFAST:
680 error = -ENOLINK;
681 break;
682 case DID_TARGET_FAILURE:
683 cmd->result |= (DID_OK << 16);
684 error = -EREMOTEIO;
685 break;
686 case DID_NEXUS_FAILURE:
687 cmd->result |= (DID_OK << 16);
688 error = -EBADE;
689 break;
690 default:
691 error = -EIO;
692 break;
693 }
694
695 return error;
696}
697
698/*
699 * Function: scsi_io_completion()
700 *
701 * Purpose: Completion processing for block device I/O requests.
702 *
703 * Arguments: cmd - command that is finished.
704 *
705 * Lock status: Assumed that no lock is held upon entry.
706 *
707 * Returns: Nothing
708 *
709 * Notes: This function is matched in terms of capabilities to
710 * the function that created the scatter-gather list.
711 * In other words, if there are no bounce buffers
712 * (the normal case for most drivers), we don't need
713 * the logic to deal with cleaning up afterwards.
714 *
715 * We must call scsi_end_request(). This will finish off
716 * the specified number of sectors. If we are done, the
717 * command block will be released and the queue function
718 * will be goosed. If we are not done then we have to
719 * figure out what to do next:
720 *
721 * a) We can call scsi_requeue_command(). The request
722 * will be unprepared and put back on the queue. Then
723 * a new command will be created for it. This should
724 * be used if we made forward progress, or if we want
725 * to switch from READ(10) to READ(6) for example.
726 *
727 * b) We can call scsi_queue_insert(). The request will
728 * be put back on the queue and retried using the same
729 * command as before, possibly after a delay.
730 *
731 * c) We can call blk_end_request() with -EIO to fail
732 * the remainder of the request.
733 */
734void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
735{
736 int result = cmd->result;
737 struct request_queue *q = cmd->device->request_queue;
738 struct request *req = cmd->request;
739 int error = 0;
740 struct scsi_sense_hdr sshdr;
741 int sense_valid = 0;
742 int sense_deferred = 0;
743 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
744 ACTION_DELAYED_RETRY} action;
745 char *description = NULL;
746
747 if (result) {
748 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
749 if (sense_valid)
750 sense_deferred = scsi_sense_is_deferred(&sshdr);
751 }
752
753 if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
754 req->errors = result;
755 if (result) {
756 if (sense_valid && req->sense) {
757 /*
758 * SG_IO wants current and deferred errors
759 */
760 int len = 8 + cmd->sense_buffer[7];
761
762 if (len > SCSI_SENSE_BUFFERSIZE)
763 len = SCSI_SENSE_BUFFERSIZE;
764 memcpy(req->sense, cmd->sense_buffer, len);
765 req->sense_len = len;
766 }
767 if (!sense_deferred)
768 error = __scsi_error_from_host_byte(cmd, result);
769 }
770
771 req->resid_len = scsi_get_resid(cmd);
772
773 if (scsi_bidi_cmnd(cmd)) {
774 /*
775 * Bidi commands Must be complete as a whole,
776 * both sides at once.
777 */
778 req->next_rq->resid_len = scsi_in(cmd)->resid;
779
780 scsi_release_buffers(cmd);
781 blk_end_request_all(req, 0);
782
783 scsi_next_command(cmd);
784 return;
785 }
786 }
787
788 /* no bidi support for !REQ_TYPE_BLOCK_PC yet */
789 BUG_ON(blk_bidi_rq(req));
790
791 /*
792 * Next deal with any sectors which we were able to correctly
793 * handle.
794 */
795 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
796 "%d bytes done.\n",
797 blk_rq_sectors(req), good_bytes));
798
799 /*
800 * Recovered errors need reporting, but they're always treated
801 * as success, so fiddle the result code here. For BLOCK_PC
802 * we already took a copy of the original into rq->errors which
803 * is what gets returned to the user
804 */
805 if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) {
806 /* if ATA PASS-THROUGH INFORMATION AVAILABLE skip
807 * print since caller wants ATA registers. Only occurs on
808 * SCSI ATA PASS_THROUGH commands when CK_COND=1
809 */
810 if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d))
811 ;
812 else if (!(req->cmd_flags & REQ_QUIET))
813 scsi_print_sense("", cmd);
814 result = 0;
815 /* BLOCK_PC may have set error */
816 error = 0;
817 }
818
819 /*
820 * A number of bytes were successfully read. If there
821 * are leftovers and there is some kind of error
822 * (result != 0), retry the rest.
823 */
824 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
825 return;
826
827 error = __scsi_error_from_host_byte(cmd, result);
828
829 if (host_byte(result) == DID_RESET) {
830 /* Third party bus reset or reset for error recovery
831 * reasons. Just retry the command and see what
832 * happens.
833 */
834 action = ACTION_RETRY;
835 } else if (sense_valid && !sense_deferred) {
836 switch (sshdr.sense_key) {
837 case UNIT_ATTENTION:
838 if (cmd->device->removable) {
839 /* Detected disc change. Set a bit
840 * and quietly refuse further access.
841 */
842 cmd->device->changed = 1;
843 description = "Media Changed";
844 action = ACTION_FAIL;
845 } else {
846 /* Must have been a power glitch, or a
847 * bus reset. Could not have been a
848 * media change, so we just retry the
849 * command and see what happens.
850 */
851 action = ACTION_RETRY;
852 }
853 break;
854 case ILLEGAL_REQUEST:
855 /* If we had an ILLEGAL REQUEST returned, then
856 * we may have performed an unsupported
857 * command. The only thing this should be
858 * would be a ten byte read where only a six
859 * byte read was supported. Also, on a system
860 * where READ CAPACITY failed, we may have
861 * read past the end of the disk.
862 */
863 if ((cmd->device->use_10_for_rw &&
864 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
865 (cmd->cmnd[0] == READ_10 ||
866 cmd->cmnd[0] == WRITE_10)) {
867 /* This will issue a new 6-byte command. */
868 cmd->device->use_10_for_rw = 0;
869 action = ACTION_REPREP;
870 } else if (sshdr.asc == 0x10) /* DIX */ {
871 description = "Host Data Integrity Failure";
872 action = ACTION_FAIL;
873 error = -EILSEQ;
874 /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
875 } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
876 (cmd->cmnd[0] == UNMAP ||
877 cmd->cmnd[0] == WRITE_SAME_16 ||
878 cmd->cmnd[0] == WRITE_SAME)) {
879 description = "Discard failure";
880 action = ACTION_FAIL;
881 } else
882 action = ACTION_FAIL;
883 break;
884 case ABORTED_COMMAND:
885 action = ACTION_FAIL;
886 if (sshdr.asc == 0x10) { /* DIF */
887 description = "Target Data Integrity Failure";
888 error = -EILSEQ;
889 }
890 break;
891 case NOT_READY:
892 /* If the device is in the process of becoming
893 * ready, or has a temporary blockage, retry.
894 */
895 if (sshdr.asc == 0x04) {
896 switch (sshdr.ascq) {
897 case 0x01: /* becoming ready */
898 case 0x04: /* format in progress */
899 case 0x05: /* rebuild in progress */
900 case 0x06: /* recalculation in progress */
901 case 0x07: /* operation in progress */
902 case 0x08: /* Long write in progress */
903 case 0x09: /* self test in progress */
904 case 0x14: /* space allocation in progress */
905 action = ACTION_DELAYED_RETRY;
906 break;
907 default:
908 description = "Device not ready";
909 action = ACTION_FAIL;
910 break;
911 }
912 } else {
913 description = "Device not ready";
914 action = ACTION_FAIL;
915 }
916 break;
917 case VOLUME_OVERFLOW:
918 /* See SSC3rXX or current. */
919 action = ACTION_FAIL;
920 break;
921 default:
922 description = "Unhandled sense code";
923 action = ACTION_FAIL;
924 break;
925 }
926 } else {
927 description = "Unhandled error code";
928 action = ACTION_FAIL;
929 }
930
931 switch (action) {
932 case ACTION_FAIL:
933 /* Give up and fail the remainder of the request */
934 scsi_release_buffers(cmd);
935 if (!(req->cmd_flags & REQ_QUIET)) {
936 if (description)
937 scmd_printk(KERN_INFO, cmd, "%s\n",
938 description);
939 scsi_print_result(cmd);
940 if (driver_byte(result) & DRIVER_SENSE)
941 scsi_print_sense("", cmd);
942 scsi_print_command(cmd);
943 }
944 if (blk_end_request_err(req, error))
945 scsi_requeue_command(q, cmd);
946 else
947 scsi_next_command(cmd);
948 break;
949 case ACTION_REPREP:
950 /* Unprep the request and put it back at the head of the queue.
951 * A new command will be prepared and issued.
952 */
953 scsi_release_buffers(cmd);
954 scsi_requeue_command(q, cmd);
955 break;
956 case ACTION_RETRY:
957 /* Retry the same command immediately */
958 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
959 break;
960 case ACTION_DELAYED_RETRY:
961 /* Retry the same command after a delay */
962 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
963 break;
964 }
965}
966
967static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
968 gfp_t gfp_mask)
969{
970 int count;
971
972 /*
973 * If sg table allocation fails, requeue request later.
974 */
975 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
976 gfp_mask))) {
977 return BLKPREP_DEFER;
978 }
979
980 req->buffer = NULL;
981
982 /*
983 * Next, walk the list, and fill in the addresses and sizes of
984 * each segment.
985 */
986 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
987 BUG_ON(count > sdb->table.nents);
988 sdb->table.nents = count;
989 sdb->length = blk_rq_bytes(req);
990 return BLKPREP_OK;
991}
992
993/*
994 * Function: scsi_init_io()
995 *
996 * Purpose: SCSI I/O initialize function.
997 *
998 * Arguments: cmd - Command descriptor we wish to initialize
999 *
1000 * Returns: 0 on success
1001 * BLKPREP_DEFER if the failure is retryable
1002 * BLKPREP_KILL if the failure is fatal
1003 */
1004int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
1005{
1006 struct request *rq = cmd->request;
1007
1008 int error = scsi_init_sgtable(rq, &cmd->sdb, gfp_mask);
1009 if (error)
1010 goto err_exit;
1011
1012 if (blk_bidi_rq(rq)) {
1013 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
1014 scsi_sdb_cache, GFP_ATOMIC);
1015 if (!bidi_sdb) {
1016 error = BLKPREP_DEFER;
1017 goto err_exit;
1018 }
1019
1020 rq->next_rq->special = bidi_sdb;
1021 error = scsi_init_sgtable(rq->next_rq, bidi_sdb, GFP_ATOMIC);
1022 if (error)
1023 goto err_exit;
1024 }
1025
1026 if (blk_integrity_rq(rq)) {
1027 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
1028 int ivecs, count;
1029
1030 BUG_ON(prot_sdb == NULL);
1031 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
1032
1033 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
1034 error = BLKPREP_DEFER;
1035 goto err_exit;
1036 }
1037
1038 count = blk_rq_map_integrity_sg(rq->q, rq->bio,
1039 prot_sdb->table.sgl);
1040 BUG_ON(unlikely(count > ivecs));
1041 BUG_ON(unlikely(count > queue_max_integrity_segments(rq->q)));
1042
1043 cmd->prot_sdb = prot_sdb;
1044 cmd->prot_sdb->table.nents = count;
1045 }
1046
1047 return BLKPREP_OK ;
1048
1049err_exit:
1050 scsi_release_buffers(cmd);
1051 cmd->request->special = NULL;
1052 scsi_put_command(cmd);
1053 return error;
1054}
1055EXPORT_SYMBOL(scsi_init_io);
1056
1057static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1058 struct request *req)
1059{
1060 struct scsi_cmnd *cmd;
1061
1062 if (!req->special) {
1063 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1064 if (unlikely(!cmd))
1065 return NULL;
1066 req->special = cmd;
1067 } else {
1068 cmd = req->special;
1069 }
1070
1071 /* pull a tag out of the request if we have one */
1072 cmd->tag = req->tag;
1073 cmd->request = req;
1074
1075 cmd->cmnd = req->cmd;
1076 cmd->prot_op = SCSI_PROT_NORMAL;
1077
1078 return cmd;
1079}
1080
1081int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
1082{
1083 struct scsi_cmnd *cmd;
1084 int ret = scsi_prep_state_check(sdev, req);
1085
1086 if (ret != BLKPREP_OK)
1087 return ret;
1088
1089 cmd = scsi_get_cmd_from_req(sdev, req);
1090 if (unlikely(!cmd))
1091 return BLKPREP_DEFER;
1092
1093 /*
1094 * BLOCK_PC requests may transfer data, in which case they must
1095 * a bio attached to them. Or they might contain a SCSI command
1096 * that does not transfer data, in which case they may optionally
1097 * submit a request without an attached bio.
1098 */
1099 if (req->bio) {
1100 int ret;
1101
1102 BUG_ON(!req->nr_phys_segments);
1103
1104 ret = scsi_init_io(cmd, GFP_ATOMIC);
1105 if (unlikely(ret))
1106 return ret;
1107 } else {
1108 BUG_ON(blk_rq_bytes(req));
1109
1110 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
1111 req->buffer = NULL;
1112 }
1113
1114 cmd->cmd_len = req->cmd_len;
1115 if (!blk_rq_bytes(req))
1116 cmd->sc_data_direction = DMA_NONE;
1117 else if (rq_data_dir(req) == WRITE)
1118 cmd->sc_data_direction = DMA_TO_DEVICE;
1119 else
1120 cmd->sc_data_direction = DMA_FROM_DEVICE;
1121
1122 cmd->transfersize = blk_rq_bytes(req);
1123 cmd->allowed = req->retries;
1124 return BLKPREP_OK;
1125}
1126EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
1127
1128/*
1129 * Setup a REQ_TYPE_FS command. These are simple read/write request
1130 * from filesystems that still need to be translated to SCSI CDBs from
1131 * the ULD.
1132 */
1133int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1134{
1135 struct scsi_cmnd *cmd;
1136 int ret = scsi_prep_state_check(sdev, req);
1137
1138 if (ret != BLKPREP_OK)
1139 return ret;
1140
1141 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1142 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1143 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1144 if (ret != BLKPREP_OK)
1145 return ret;
1146 }
1147
1148 /*
1149 * Filesystem requests must transfer data.
1150 */
1151 BUG_ON(!req->nr_phys_segments);
1152
1153 cmd = scsi_get_cmd_from_req(sdev, req);
1154 if (unlikely(!cmd))
1155 return BLKPREP_DEFER;
1156
1157 memset(cmd->cmnd, 0, BLK_MAX_CDB);
1158 return scsi_init_io(cmd, GFP_ATOMIC);
1159}
1160EXPORT_SYMBOL(scsi_setup_fs_cmnd);
1161
1162int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
1163{
1164 int ret = BLKPREP_OK;
1165
1166 /*
1167 * If the device is not in running state we will reject some
1168 * or all commands.
1169 */
1170 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1171 switch (sdev->sdev_state) {
1172 case SDEV_OFFLINE:
1173 /*
1174 * If the device is offline we refuse to process any
1175 * commands. The device must be brought online
1176 * before trying any recovery commands.
1177 */
1178 sdev_printk(KERN_ERR, sdev,
1179 "rejecting I/O to offline device\n");
1180 ret = BLKPREP_KILL;
1181 break;
1182 case SDEV_DEL:
1183 /*
1184 * If the device is fully deleted, we refuse to
1185 * process any commands as well.
1186 */
1187 sdev_printk(KERN_ERR, sdev,
1188 "rejecting I/O to dead device\n");
1189 ret = BLKPREP_KILL;
1190 break;
1191 case SDEV_QUIESCE:
1192 case SDEV_BLOCK:
1193 case SDEV_CREATED_BLOCK:
1194 /*
1195 * If the devices is blocked we defer normal commands.
1196 */
1197 if (!(req->cmd_flags & REQ_PREEMPT))
1198 ret = BLKPREP_DEFER;
1199 break;
1200 default:
1201 /*
1202 * For any other not fully online state we only allow
1203 * special commands. In particular any user initiated
1204 * command is not allowed.
1205 */
1206 if (!(req->cmd_flags & REQ_PREEMPT))
1207 ret = BLKPREP_KILL;
1208 break;
1209 }
1210 }
1211 return ret;
1212}
1213EXPORT_SYMBOL(scsi_prep_state_check);
1214
1215int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1216{
1217 struct scsi_device *sdev = q->queuedata;
1218
1219 switch (ret) {
1220 case BLKPREP_KILL:
1221 req->errors = DID_NO_CONNECT << 16;
1222 /* release the command and kill it */
1223 if (req->special) {
1224 struct scsi_cmnd *cmd = req->special;
1225 scsi_release_buffers(cmd);
1226 scsi_put_command(cmd);
1227 req->special = NULL;
1228 }
1229 break;
1230 case BLKPREP_DEFER:
1231 /*
1232 * If we defer, the blk_peek_request() returns NULL, but the
1233 * queue must be restarted, so we schedule a callback to happen
1234 * shortly.
1235 */
1236 if (sdev->device_busy == 0)
1237 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1238 break;
1239 default:
1240 req->cmd_flags |= REQ_DONTPREP;
1241 }
1242
1243 return ret;
1244}
1245EXPORT_SYMBOL(scsi_prep_return);
1246
1247int scsi_prep_fn(struct request_queue *q, struct request *req)
1248{
1249 struct scsi_device *sdev = q->queuedata;
1250 int ret = BLKPREP_KILL;
1251
1252 if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1253 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1254 return scsi_prep_return(q, req, ret);
1255}
1256EXPORT_SYMBOL(scsi_prep_fn);
1257
1258/*
1259 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1260 * return 0.
1261 *
1262 * Called with the queue_lock held.
1263 */
1264static inline int scsi_dev_queue_ready(struct request_queue *q,
1265 struct scsi_device *sdev)
1266{
1267 if (sdev->device_busy == 0 && sdev->device_blocked) {
1268 /*
1269 * unblock after device_blocked iterates to zero
1270 */
1271 if (--sdev->device_blocked == 0) {
1272 SCSI_LOG_MLQUEUE(3,
1273 sdev_printk(KERN_INFO, sdev,
1274 "unblocking device at zero depth\n"));
1275 } else {
1276 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1277 return 0;
1278 }
1279 }
1280 if (scsi_device_is_busy(sdev))
1281 return 0;
1282
1283 return 1;
1284}
1285
1286
1287/*
1288 * scsi_target_queue_ready: checks if there we can send commands to target
1289 * @sdev: scsi device on starget to check.
1290 *
1291 * Called with the host lock held.
1292 */
1293static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1294 struct scsi_device *sdev)
1295{
1296 struct scsi_target *starget = scsi_target(sdev);
1297
1298 if (starget->single_lun) {
1299 if (starget->starget_sdev_user &&
1300 starget->starget_sdev_user != sdev)
1301 return 0;
1302 starget->starget_sdev_user = sdev;
1303 }
1304
1305 if (starget->target_busy == 0 && starget->target_blocked) {
1306 /*
1307 * unblock after target_blocked iterates to zero
1308 */
1309 if (--starget->target_blocked == 0) {
1310 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1311 "unblocking target at zero depth\n"));
1312 } else
1313 return 0;
1314 }
1315
1316 if (scsi_target_is_busy(starget)) {
1317 if (list_empty(&sdev->starved_entry))
1318 list_add_tail(&sdev->starved_entry,
1319 &shost->starved_list);
1320 return 0;
1321 }
1322
1323 /* We're OK to process the command, so we can't be starved */
1324 if (!list_empty(&sdev->starved_entry))
1325 list_del_init(&sdev->starved_entry);
1326 return 1;
1327}
1328
1329/*
1330 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1331 * return 0. We must end up running the queue again whenever 0 is
1332 * returned, else IO can hang.
1333 *
1334 * Called with host_lock held.
1335 */
1336static inline int scsi_host_queue_ready(struct request_queue *q,
1337 struct Scsi_Host *shost,
1338 struct scsi_device *sdev)
1339{
1340 if (scsi_host_in_recovery(shost))
1341 return 0;
1342 if (shost->host_busy == 0 && shost->host_blocked) {
1343 /*
1344 * unblock after host_blocked iterates to zero
1345 */
1346 if (--shost->host_blocked == 0) {
1347 SCSI_LOG_MLQUEUE(3,
1348 printk("scsi%d unblocking host at zero depth\n",
1349 shost->host_no));
1350 } else {
1351 return 0;
1352 }
1353 }
1354 if (scsi_host_is_busy(shost)) {
1355 if (list_empty(&sdev->starved_entry))
1356 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1357 return 0;
1358 }
1359
1360 /* We're OK to process the command, so we can't be starved */
1361 if (!list_empty(&sdev->starved_entry))
1362 list_del_init(&sdev->starved_entry);
1363
1364 return 1;
1365}
1366
1367/*
1368 * Busy state exporting function for request stacking drivers.
1369 *
1370 * For efficiency, no lock is taken to check the busy state of
1371 * shost/starget/sdev, since the returned value is not guaranteed and
1372 * may be changed after request stacking drivers call the function,
1373 * regardless of taking lock or not.
1374 *
1375 * When scsi can't dispatch I/Os anymore and needs to kill I/Os
1376 * (e.g. !sdev), scsi needs to return 'not busy'.
1377 * Otherwise, request stacking drivers may hold requests forever.
1378 */
1379static int scsi_lld_busy(struct request_queue *q)
1380{
1381 struct scsi_device *sdev = q->queuedata;
1382 struct Scsi_Host *shost;
1383 struct scsi_target *starget;
1384
1385 if (!sdev)
1386 return 0;
1387
1388 shost = sdev->host;
1389 starget = scsi_target(sdev);
1390
1391 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1392 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1393 return 1;
1394
1395 return 0;
1396}
1397
1398/*
1399 * Kill a request for a dead device
1400 */
1401static void scsi_kill_request(struct request *req, struct request_queue *q)
1402{
1403 struct scsi_cmnd *cmd = req->special;
1404 struct scsi_device *sdev;
1405 struct scsi_target *starget;
1406 struct Scsi_Host *shost;
1407
1408 blk_start_request(req);
1409
1410 scmd_printk(KERN_INFO, cmd, "killing request\n");
1411
1412 sdev = cmd->device;
1413 starget = scsi_target(sdev);
1414 shost = sdev->host;
1415 scsi_init_cmd_errh(cmd);
1416 cmd->result = DID_NO_CONNECT << 16;
1417 atomic_inc(&cmd->device->iorequest_cnt);
1418
1419 /*
1420 * SCSI request completion path will do scsi_device_unbusy(),
1421 * bump busy counts. To bump the counters, we need to dance
1422 * with the locks as normal issue path does.
1423 */
1424 sdev->device_busy++;
1425 spin_unlock(sdev->request_queue->queue_lock);
1426 spin_lock(shost->host_lock);
1427 shost->host_busy++;
1428 starget->target_busy++;
1429 spin_unlock(shost->host_lock);
1430 spin_lock(sdev->request_queue->queue_lock);
1431
1432 blk_complete_request(req);
1433}
1434
1435static void scsi_softirq_done(struct request *rq)
1436{
1437 struct scsi_cmnd *cmd = rq->special;
1438 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1439 int disposition;
1440
1441 INIT_LIST_HEAD(&cmd->eh_entry);
1442
1443 atomic_inc(&cmd->device->iodone_cnt);
1444 if (cmd->result)
1445 atomic_inc(&cmd->device->ioerr_cnt);
1446
1447 disposition = scsi_decide_disposition(cmd);
1448 if (disposition != SUCCESS &&
1449 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1450 sdev_printk(KERN_ERR, cmd->device,
1451 "timing out command, waited %lus\n",
1452 wait_for/HZ);
1453 disposition = SUCCESS;
1454 }
1455
1456 scsi_log_completion(cmd, disposition);
1457
1458 switch (disposition) {
1459 case SUCCESS:
1460 scsi_finish_command(cmd);
1461 break;
1462 case NEEDS_RETRY:
1463 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1464 break;
1465 case ADD_TO_MLQUEUE:
1466 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1467 break;
1468 default:
1469 if (!scsi_eh_scmd_add(cmd, 0))
1470 scsi_finish_command(cmd);
1471 }
1472}
1473
1474/*
1475 * Function: scsi_request_fn()
1476 *
1477 * Purpose: Main strategy routine for SCSI.
1478 *
1479 * Arguments: q - Pointer to actual queue.
1480 *
1481 * Returns: Nothing
1482 *
1483 * Lock status: IO request lock assumed to be held when called.
1484 */
1485static void scsi_request_fn(struct request_queue *q)
1486{
1487 struct scsi_device *sdev = q->queuedata;
1488 struct Scsi_Host *shost;
1489 struct scsi_cmnd *cmd;
1490 struct request *req;
1491
1492 if (!sdev) {
1493 while ((req = blk_peek_request(q)) != NULL)
1494 scsi_kill_request(req, q);
1495 return;
1496 }
1497
1498 if(!get_device(&sdev->sdev_gendev))
1499 /* We must be tearing the block queue down already */
1500 return;
1501
1502 /*
1503 * To start with, we keep looping until the queue is empty, or until
1504 * the host is no longer able to accept any more requests.
1505 */
1506 shost = sdev->host;
1507 for (;;) {
1508 int rtn;
1509 /*
1510 * get next queueable request. We do this early to make sure
1511 * that the request is fully prepared even if we cannot
1512 * accept it.
1513 */
1514 req = blk_peek_request(q);
1515 if (!req || !scsi_dev_queue_ready(q, sdev))
1516 break;
1517
1518 if (unlikely(!scsi_device_online(sdev))) {
1519 sdev_printk(KERN_ERR, sdev,
1520 "rejecting I/O to offline device\n");
1521 scsi_kill_request(req, q);
1522 continue;
1523 }
1524
1525
1526 /*
1527 * Remove the request from the request list.
1528 */
1529 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
1530 blk_start_request(req);
1531 sdev->device_busy++;
1532
1533 spin_unlock(q->queue_lock);
1534 cmd = req->special;
1535 if (unlikely(cmd == NULL)) {
1536 printk(KERN_CRIT "impossible request in %s.\n"
1537 "please mail a stack trace to "
1538 "linux-scsi@vger.kernel.org\n",
1539 __func__);
1540 blk_dump_rq_flags(req, "foo");
1541 BUG();
1542 }
1543 spin_lock(shost->host_lock);
1544
1545 /*
1546 * We hit this when the driver is using a host wide
1547 * tag map. For device level tag maps the queue_depth check
1548 * in the device ready fn would prevent us from trying
1549 * to allocate a tag. Since the map is a shared host resource
1550 * we add the dev to the starved list so it eventually gets
1551 * a run when a tag is freed.
1552 */
1553 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
1554 if (list_empty(&sdev->starved_entry))
1555 list_add_tail(&sdev->starved_entry,
1556 &shost->starved_list);
1557 goto not_ready;
1558 }
1559
1560 if (!scsi_target_queue_ready(shost, sdev))
1561 goto not_ready;
1562
1563 if (!scsi_host_queue_ready(q, shost, sdev))
1564 goto not_ready;
1565
1566 scsi_target(sdev)->target_busy++;
1567 shost->host_busy++;
1568
1569 /*
1570 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1571 * take the lock again.
1572 */
1573 spin_unlock_irq(shost->host_lock);
1574
1575 /*
1576 * Finally, initialize any error handling parameters, and set up
1577 * the timers for timeouts.
1578 */
1579 scsi_init_cmd_errh(cmd);
1580
1581 /*
1582 * Dispatch the command to the low-level driver.
1583 */
1584 rtn = scsi_dispatch_cmd(cmd);
1585 spin_lock_irq(q->queue_lock);
1586 if (rtn)
1587 goto out_delay;
1588 }
1589
1590 goto out;
1591
1592 not_ready:
1593 spin_unlock_irq(shost->host_lock);
1594
1595 /*
1596 * lock q, handle tag, requeue req, and decrement device_busy. We
1597 * must return with queue_lock held.
1598 *
1599 * Decrementing device_busy without checking it is OK, as all such
1600 * cases (host limits or settings) should run the queue at some
1601 * later time.
1602 */
1603 spin_lock_irq(q->queue_lock);
1604 blk_requeue_request(q, req);
1605 sdev->device_busy--;
1606out_delay:
1607 if (sdev->device_busy == 0)
1608 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1609out:
1610 /* must be careful here...if we trigger the ->remove() function
1611 * we cannot be holding the q lock */
1612 spin_unlock_irq(q->queue_lock);
1613 put_device(&sdev->sdev_gendev);
1614 spin_lock_irq(q->queue_lock);
1615}
1616
1617u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1618{
1619 struct device *host_dev;
1620 u64 bounce_limit = 0xffffffff;
1621
1622 if (shost->unchecked_isa_dma)
1623 return BLK_BOUNCE_ISA;
1624 /*
1625 * Platforms with virtual-DMA translation
1626 * hardware have no practical limit.
1627 */
1628 if (!PCI_DMA_BUS_IS_PHYS)
1629 return BLK_BOUNCE_ANY;
1630
1631 host_dev = scsi_get_device(shost);
1632 if (host_dev && host_dev->dma_mask)
1633 bounce_limit = *host_dev->dma_mask;
1634
1635 return bounce_limit;
1636}
1637EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1638
1639struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1640 request_fn_proc *request_fn)
1641{
1642 struct request_queue *q;
1643 struct device *dev = shost->shost_gendev.parent;
1644
1645 q = blk_init_queue(request_fn, NULL);
1646 if (!q)
1647 return NULL;
1648
1649 /*
1650 * this limit is imposed by hardware restrictions
1651 */
1652 blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize,
1653 SCSI_MAX_SG_CHAIN_SEGMENTS));
1654
1655 if (scsi_host_prot_dma(shost)) {
1656 shost->sg_prot_tablesize =
1657 min_not_zero(shost->sg_prot_tablesize,
1658 (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS);
1659 BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize);
1660 blk_queue_max_integrity_segments(q, shost->sg_prot_tablesize);
1661 }
1662
1663 blk_queue_max_hw_sectors(q, shost->max_sectors);
1664 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1665 blk_queue_segment_boundary(q, shost->dma_boundary);
1666 dma_set_seg_boundary(dev, shost->dma_boundary);
1667
1668 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1669
1670 if (!shost->use_clustering)
1671 q->limits.cluster = 0;
1672
1673 /*
1674 * set a reasonable default alignment on word boundaries: the
1675 * host and device may alter it using
1676 * blk_queue_update_dma_alignment() later.
1677 */
1678 blk_queue_dma_alignment(q, 0x03);
1679
1680 return q;
1681}
1682EXPORT_SYMBOL(__scsi_alloc_queue);
1683
1684struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1685{
1686 struct request_queue *q;
1687
1688 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1689 if (!q)
1690 return NULL;
1691
1692 blk_queue_prep_rq(q, scsi_prep_fn);
1693 blk_queue_softirq_done(q, scsi_softirq_done);
1694 blk_queue_rq_timed_out(q, scsi_times_out);
1695 blk_queue_lld_busy(q, scsi_lld_busy);
1696 return q;
1697}
1698
1699void scsi_free_queue(struct request_queue *q)
1700{
1701 unsigned long flags;
1702
1703 WARN_ON(q->queuedata);
1704
1705 /* cause scsi_request_fn() to kill all non-finished requests */
1706 spin_lock_irqsave(q->queue_lock, flags);
1707 q->request_fn(q);
1708 spin_unlock_irqrestore(q->queue_lock, flags);
1709
1710 blk_cleanup_queue(q);
1711}
1712
1713/*
1714 * Function: scsi_block_requests()
1715 *
1716 * Purpose: Utility function used by low-level drivers to prevent further
1717 * commands from being queued to the device.
1718 *
1719 * Arguments: shost - Host in question
1720 *
1721 * Returns: Nothing
1722 *
1723 * Lock status: No locks are assumed held.
1724 *
1725 * Notes: There is no timer nor any other means by which the requests
1726 * get unblocked other than the low-level driver calling
1727 * scsi_unblock_requests().
1728 */
1729void scsi_block_requests(struct Scsi_Host *shost)
1730{
1731 shost->host_self_blocked = 1;
1732}
1733EXPORT_SYMBOL(scsi_block_requests);
1734
1735/*
1736 * Function: scsi_unblock_requests()
1737 *
1738 * Purpose: Utility function used by low-level drivers to allow further
1739 * commands from being queued to the device.
1740 *
1741 * Arguments: shost - Host in question
1742 *
1743 * Returns: Nothing
1744 *
1745 * Lock status: No locks are assumed held.
1746 *
1747 * Notes: There is no timer nor any other means by which the requests
1748 * get unblocked other than the low-level driver calling
1749 * scsi_unblock_requests().
1750 *
1751 * This is done as an API function so that changes to the
1752 * internals of the scsi mid-layer won't require wholesale
1753 * changes to drivers that use this feature.
1754 */
1755void scsi_unblock_requests(struct Scsi_Host *shost)
1756{
1757 shost->host_self_blocked = 0;
1758 scsi_run_host_queues(shost);
1759}
1760EXPORT_SYMBOL(scsi_unblock_requests);
1761
1762int __init scsi_init_queue(void)
1763{
1764 int i;
1765
1766 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1767 sizeof(struct scsi_data_buffer),
1768 0, 0, NULL);
1769 if (!scsi_sdb_cache) {
1770 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
1771 return -ENOMEM;
1772 }
1773
1774 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1775 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1776 int size = sgp->size * sizeof(struct scatterlist);
1777
1778 sgp->slab = kmem_cache_create(sgp->name, size, 0,
1779 SLAB_HWCACHE_ALIGN, NULL);
1780 if (!sgp->slab) {
1781 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1782 sgp->name);
1783 goto cleanup_sdb;
1784 }
1785
1786 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1787 sgp->slab);
1788 if (!sgp->pool) {
1789 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1790 sgp->name);
1791 goto cleanup_sdb;
1792 }
1793 }
1794
1795 return 0;
1796
1797cleanup_sdb:
1798 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1799 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1800 if (sgp->pool)
1801 mempool_destroy(sgp->pool);
1802 if (sgp->slab)
1803 kmem_cache_destroy(sgp->slab);
1804 }
1805 kmem_cache_destroy(scsi_sdb_cache);
1806
1807 return -ENOMEM;
1808}
1809
1810void scsi_exit_queue(void)
1811{
1812 int i;
1813
1814 kmem_cache_destroy(scsi_sdb_cache);
1815
1816 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1817 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1818 mempool_destroy(sgp->pool);
1819 kmem_cache_destroy(sgp->slab);
1820 }
1821}
1822
1823/**
1824 * scsi_mode_select - issue a mode select
1825 * @sdev: SCSI device to be queried
1826 * @pf: Page format bit (1 == standard, 0 == vendor specific)
1827 * @sp: Save page bit (0 == don't save, 1 == save)
1828 * @modepage: mode page being requested
1829 * @buffer: request buffer (may not be smaller than eight bytes)
1830 * @len: length of request buffer.
1831 * @timeout: command timeout
1832 * @retries: number of retries before failing
1833 * @data: returns a structure abstracting the mode header data
1834 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1835 * must be SCSI_SENSE_BUFFERSIZE big.
1836 *
1837 * Returns zero if successful; negative error number or scsi
1838 * status on error
1839 *
1840 */
1841int
1842scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1843 unsigned char *buffer, int len, int timeout, int retries,
1844 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1845{
1846 unsigned char cmd[10];
1847 unsigned char *real_buffer;
1848 int ret;
1849
1850 memset(cmd, 0, sizeof(cmd));
1851 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1852
1853 if (sdev->use_10_for_ms) {
1854 if (len > 65535)
1855 return -EINVAL;
1856 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1857 if (!real_buffer)
1858 return -ENOMEM;
1859 memcpy(real_buffer + 8, buffer, len);
1860 len += 8;
1861 real_buffer[0] = 0;
1862 real_buffer[1] = 0;
1863 real_buffer[2] = data->medium_type;
1864 real_buffer[3] = data->device_specific;
1865 real_buffer[4] = data->longlba ? 0x01 : 0;
1866 real_buffer[5] = 0;
1867 real_buffer[6] = data->block_descriptor_length >> 8;
1868 real_buffer[7] = data->block_descriptor_length;
1869
1870 cmd[0] = MODE_SELECT_10;
1871 cmd[7] = len >> 8;
1872 cmd[8] = len;
1873 } else {
1874 if (len > 255 || data->block_descriptor_length > 255 ||
1875 data->longlba)
1876 return -EINVAL;
1877
1878 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1879 if (!real_buffer)
1880 return -ENOMEM;
1881 memcpy(real_buffer + 4, buffer, len);
1882 len += 4;
1883 real_buffer[0] = 0;
1884 real_buffer[1] = data->medium_type;
1885 real_buffer[2] = data->device_specific;
1886 real_buffer[3] = data->block_descriptor_length;
1887
1888
1889 cmd[0] = MODE_SELECT;
1890 cmd[4] = len;
1891 }
1892
1893 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
1894 sshdr, timeout, retries, NULL);
1895 kfree(real_buffer);
1896 return ret;
1897}
1898EXPORT_SYMBOL_GPL(scsi_mode_select);
1899
1900/**
1901 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1902 * @sdev: SCSI device to be queried
1903 * @dbd: set if mode sense will allow block descriptors to be returned
1904 * @modepage: mode page being requested
1905 * @bu…
Large files files are truncated, but you can click here to view the full file