/drivers/staging/hv/storvsc_drv.c
C | 794 lines | 524 code | 163 blank | 107 comment | 82 complexity | 27c1e10038e260c315a9bf855dc441d9 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
1/* 2 * Copyright (c) 2009, Microsoft Corporation. 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms and conditions of the GNU General Public License, 6 * version 2, as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple 15 * Place - Suite 330, Boston, MA 02111-1307 USA. 16 * 17 * Authors: 18 * Haiyang Zhang <haiyangz@microsoft.com> 19 * Hank Janssen <hjanssen@microsoft.com> 20 * K. Y. Srinivasan <kys@microsoft.com> 21 */ 22#include <linux/init.h> 23#include <linux/slab.h> 24#include <linux/module.h> 25#include <linux/device.h> 26#include <linux/blkdev.h> 27#include <linux/dmi.h> 28#include <scsi/scsi.h> 29#include <scsi/scsi_cmnd.h> 30#include <scsi/scsi_host.h> 31#include <scsi/scsi_device.h> 32#include <scsi/scsi_tcq.h> 33#include <scsi/scsi_eh.h> 34#include <scsi/scsi_devinfo.h> 35#include <scsi/scsi_dbg.h> 36 37#include "hyperv.h" 38#include "hyperv_storage.h" 39 40static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE; 41 42module_param(storvsc_ringbuffer_size, int, S_IRUGO); 43MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)"); 44 45static const char *driver_name = "storvsc"; 46 47/* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ 48static const struct hv_guid stor_vsci_device_type = { 49 .data = { 50 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d, 51 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f 52 } 53}; 54 55struct hv_host_device { 56 struct hv_device *dev; 57 struct kmem_cache *request_pool; 58 unsigned int port; 59 unsigned char path; 60 unsigned char target; 61}; 62 63struct storvsc_cmd_request { 64 struct list_head entry; 65 struct scsi_cmnd *cmd; 66 67 unsigned int bounce_sgl_count; 68 struct scatterlist *bounce_sgl; 69 70 struct hv_storvsc_request request; 71}; 72 73 74static int storvsc_device_alloc(struct scsi_device *sdevice) 75{ 76 /* 77 * This enables luns to be located sparsely. Otherwise, we may not 78 * discovered them. 79 */ 80 sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN; 81 return 0; 82} 83 84static int storvsc_merge_bvec(struct request_queue *q, 85 struct bvec_merge_data *bmd, struct bio_vec *bvec) 86{ 87 /* checking done by caller. */ 88 return bvec->bv_len; 89} 90 91static int storvsc_device_configure(struct scsi_device *sdevice) 92{ 93 scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG, 94 STORVSC_MAX_IO_REQUESTS); 95 96 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE); 97 98 blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec); 99 100 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY); 101 102 return 0; 103} 104 105static void destroy_bounce_buffer(struct scatterlist *sgl, 106 unsigned int sg_count) 107{ 108 int i; 109 struct page *page_buf; 110 111 for (i = 0; i < sg_count; i++) { 112 page_buf = sg_page((&sgl[i])); 113 if (page_buf != NULL) 114 __free_page(page_buf); 115 } 116 117 kfree(sgl); 118} 119 120static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count) 121{ 122 int i; 123 124 /* No need to check */ 125 if (sg_count < 2) 126 return -1; 127 128 /* We have at least 2 sg entries */ 129 for (i = 0; i < sg_count; i++) { 130 if (i == 0) { 131 /* make sure 1st one does not have hole */ 132 if (sgl[i].offset + sgl[i].length != PAGE_SIZE) 133 return i; 134 } else if (i == sg_count - 1) { 135 /* make sure last one does not have hole */ 136 if (sgl[i].offset != 0) 137 return i; 138 } else { 139 /* make sure no hole in the middle */ 140 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0) 141 return i; 142 } 143 } 144 return -1; 145} 146 147static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl, 148 unsigned int sg_count, 149 unsigned int len) 150{ 151 int i; 152 int num_pages; 153 struct scatterlist *bounce_sgl; 154 struct page *page_buf; 155 156 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT; 157 158 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC); 159 if (!bounce_sgl) 160 return NULL; 161 162 for (i = 0; i < num_pages; i++) { 163 page_buf = alloc_page(GFP_ATOMIC); 164 if (!page_buf) 165 goto cleanup; 166 sg_set_page(&bounce_sgl[i], page_buf, 0, 0); 167 } 168 169 return bounce_sgl; 170 171cleanup: 172 destroy_bounce_buffer(bounce_sgl, num_pages); 173 return NULL; 174} 175 176 177/* Assume the original sgl has enough room */ 178static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, 179 struct scatterlist *bounce_sgl, 180 unsigned int orig_sgl_count) 181{ 182 int i; 183 int j = 0; 184 unsigned long src, dest; 185 unsigned int srclen, destlen, copylen; 186 unsigned int total_copied = 0; 187 unsigned long bounce_addr = 0; 188 unsigned long dest_addr = 0; 189 unsigned long flags; 190 191 local_irq_save(flags); 192 193 for (i = 0; i < orig_sgl_count; i++) { 194 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), 195 KM_IRQ0) + orig_sgl[i].offset; 196 dest = dest_addr; 197 destlen = orig_sgl[i].length; 198 199 if (bounce_addr == 0) 200 bounce_addr = 201 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), 202 KM_IRQ0); 203 204 while (destlen) { 205 src = bounce_addr + bounce_sgl[j].offset; 206 srclen = bounce_sgl[j].length - bounce_sgl[j].offset; 207 208 copylen = min(srclen, destlen); 209 memcpy((void *)dest, (void *)src, copylen); 210 211 total_copied += copylen; 212 bounce_sgl[j].offset += copylen; 213 destlen -= copylen; 214 dest += copylen; 215 216 if (bounce_sgl[j].offset == bounce_sgl[j].length) { 217 /* full */ 218 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 219 j++; 220 221 /* if we need to use another bounce buffer */ 222 if (destlen || i != orig_sgl_count - 1) 223 bounce_addr = 224 (unsigned long)kmap_atomic( 225 sg_page((&bounce_sgl[j])), KM_IRQ0); 226 } else if (destlen == 0 && i == orig_sgl_count - 1) { 227 /* unmap the last bounce that is < PAGE_SIZE */ 228 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 229 } 230 } 231 232 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset), 233 KM_IRQ0); 234 } 235 236 local_irq_restore(flags); 237 238 return total_copied; 239} 240 241 242/* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */ 243static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl, 244 struct scatterlist *bounce_sgl, 245 unsigned int orig_sgl_count) 246{ 247 int i; 248 int j = 0; 249 unsigned long src, dest; 250 unsigned int srclen, destlen, copylen; 251 unsigned int total_copied = 0; 252 unsigned long bounce_addr = 0; 253 unsigned long src_addr = 0; 254 unsigned long flags; 255 256 local_irq_save(flags); 257 258 for (i = 0; i < orig_sgl_count; i++) { 259 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), 260 KM_IRQ0) + orig_sgl[i].offset; 261 src = src_addr; 262 srclen = orig_sgl[i].length; 263 264 if (bounce_addr == 0) 265 bounce_addr = 266 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), 267 KM_IRQ0); 268 269 while (srclen) { 270 /* assume bounce offset always == 0 */ 271 dest = bounce_addr + bounce_sgl[j].length; 272 destlen = PAGE_SIZE - bounce_sgl[j].length; 273 274 copylen = min(srclen, destlen); 275 memcpy((void *)dest, (void *)src, copylen); 276 277 total_copied += copylen; 278 bounce_sgl[j].length += copylen; 279 srclen -= copylen; 280 src += copylen; 281 282 if (bounce_sgl[j].length == PAGE_SIZE) { 283 /* full..move to next entry */ 284 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 285 j++; 286 287 /* if we need to use another bounce buffer */ 288 if (srclen || i != orig_sgl_count - 1) 289 bounce_addr = 290 (unsigned long)kmap_atomic( 291 sg_page((&bounce_sgl[j])), KM_IRQ0); 292 293 } else if (srclen == 0 && i == orig_sgl_count - 1) { 294 /* unmap the last bounce that is < PAGE_SIZE */ 295 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 296 } 297 } 298 299 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0); 300 } 301 302 local_irq_restore(flags); 303 304 return total_copied; 305} 306 307 308static int storvsc_remove(struct hv_device *dev) 309{ 310 struct Scsi_Host *host = dev_get_drvdata(&dev->device); 311 struct hv_host_device *host_dev = 312 (struct hv_host_device *)host->hostdata; 313 314 scsi_remove_host(host); 315 316 scsi_host_put(host); 317 318 storvsc_dev_remove(dev); 319 if (host_dev->request_pool) { 320 kmem_cache_destroy(host_dev->request_pool); 321 host_dev->request_pool = NULL; 322 } 323 return 0; 324} 325 326 327static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev, 328 sector_t capacity, int *info) 329{ 330 sector_t nsect = capacity; 331 sector_t cylinders = nsect; 332 int heads, sectors_pt; 333 334 /* 335 * We are making up these values; let us keep it simple. 336 */ 337 heads = 0xff; 338 sectors_pt = 0x3f; /* Sectors per track */ 339 sector_div(cylinders, heads * sectors_pt); 340 if ((sector_t)(cylinders + 1) * heads * sectors_pt < nsect) 341 cylinders = 0xffff; 342 343 info[0] = heads; 344 info[1] = sectors_pt; 345 info[2] = (int)cylinders; 346 347 return 0; 348} 349 350static int storvsc_host_reset(struct hv_device *device) 351{ 352 struct storvsc_device *stor_device; 353 struct hv_storvsc_request *request; 354 struct vstor_packet *vstor_packet; 355 int ret, t; 356 357 358 stor_device = get_stor_device(device); 359 if (!stor_device) 360 return -1; 361 362 request = &stor_device->reset_request; 363 vstor_packet = &request->vstor_packet; 364 365 init_completion(&request->wait_event); 366 367 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS; 368 vstor_packet->flags = REQUEST_COMPLETION_FLAG; 369 vstor_packet->vm_srb.path_id = stor_device->path_id; 370 371 ret = vmbus_sendpacket(device->channel, vstor_packet, 372 sizeof(struct vstor_packet), 373 (unsigned long)&stor_device->reset_request, 374 VM_PKT_DATA_INBAND, 375 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED); 376 if (ret != 0) 377 goto cleanup; 378 379 t = wait_for_completion_timeout(&request->wait_event, 5*HZ); 380 if (t == 0) { 381 ret = -ETIMEDOUT; 382 goto cleanup; 383 } 384 385 386 /* 387 * At this point, all outstanding requests in the adapter 388 * should have been flushed out and return to us 389 */ 390 391cleanup: 392 put_stor_device(device); 393 return ret; 394} 395 396 397/* 398 * storvsc_host_reset_handler - Reset the scsi HBA 399 */ 400static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) 401{ 402 int ret; 403 struct hv_host_device *host_dev = 404 (struct hv_host_device *)scmnd->device->host->hostdata; 405 struct hv_device *dev = host_dev->dev; 406 407 ret = storvsc_host_reset(dev); 408 if (ret != 0) 409 return ret; 410 411 return ret; 412} 413 414 415/* 416 * storvsc_commmand_completion - Command completion processing 417 */ 418static void storvsc_commmand_completion(struct hv_storvsc_request *request) 419{ 420 struct storvsc_cmd_request *cmd_request = 421 (struct storvsc_cmd_request *)request->context; 422 struct scsi_cmnd *scmnd = cmd_request->cmd; 423 struct hv_host_device *host_dev = 424 (struct hv_host_device *)scmnd->device->host->hostdata; 425 void (*scsi_done_fn)(struct scsi_cmnd *); 426 struct scsi_sense_hdr sense_hdr; 427 struct vmscsi_request *vm_srb; 428 429 if (cmd_request->bounce_sgl_count) { 430 431 /* FIXME: We can optimize on writes by just skipping this */ 432 copy_from_bounce_buffer(scsi_sglist(scmnd), 433 cmd_request->bounce_sgl, 434 scsi_sg_count(scmnd)); 435 destroy_bounce_buffer(cmd_request->bounce_sgl, 436 cmd_request->bounce_sgl_count); 437 } 438 439 vm_srb = &request->vstor_packet.vm_srb; 440 scmnd->result = vm_srb->scsi_status; 441 442 if (scmnd->result) { 443 if (scsi_normalize_sense(scmnd->sense_buffer, 444 SCSI_SENSE_BUFFERSIZE, &sense_hdr)) 445 scsi_print_sense_hdr("storvsc", &sense_hdr); 446 } 447 448 scsi_set_resid(scmnd, 449 request->data_buffer.len - 450 vm_srb->data_transfer_length); 451 452 scsi_done_fn = scmnd->scsi_done; 453 454 scmnd->host_scribble = NULL; 455 scmnd->scsi_done = NULL; 456 457 scsi_done_fn(scmnd); 458 459 kmem_cache_free(host_dev->request_pool, cmd_request); 460} 461 462 463/* 464 * storvsc_queuecommand - Initiate command processing 465 */ 466static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd, 467 void (*done)(struct scsi_cmnd *)) 468{ 469 int ret; 470 struct hv_host_device *host_dev = 471 (struct hv_host_device *)scmnd->device->host->hostdata; 472 struct hv_device *dev = host_dev->dev; 473 struct hv_storvsc_request *request; 474 struct storvsc_cmd_request *cmd_request; 475 unsigned int request_size = 0; 476 int i; 477 struct scatterlist *sgl; 478 unsigned int sg_count = 0; 479 struct vmscsi_request *vm_srb; 480 481 482 /* If retrying, no need to prep the cmd */ 483 if (scmnd->host_scribble) { 484 485 cmd_request = 486 (struct storvsc_cmd_request *)scmnd->host_scribble; 487 488 goto retry_request; 489 } 490 491 scmnd->scsi_done = done; 492 493 request_size = sizeof(struct storvsc_cmd_request); 494 495 cmd_request = kmem_cache_zalloc(host_dev->request_pool, 496 GFP_ATOMIC); 497 if (!cmd_request) { 498 scmnd->scsi_done = NULL; 499 return SCSI_MLQUEUE_DEVICE_BUSY; 500 } 501 502 /* Setup the cmd request */ 503 cmd_request->bounce_sgl_count = 0; 504 cmd_request->bounce_sgl = NULL; 505 cmd_request->cmd = scmnd; 506 507 scmnd->host_scribble = (unsigned char *)cmd_request; 508 509 request = &cmd_request->request; 510 vm_srb = &request->vstor_packet.vm_srb; 511 512 513 /* Build the SRB */ 514 switch (scmnd->sc_data_direction) { 515 case DMA_TO_DEVICE: 516 vm_srb->data_in = WRITE_TYPE; 517 break; 518 case DMA_FROM_DEVICE: 519 vm_srb->data_in = READ_TYPE; 520 break; 521 default: 522 vm_srb->data_in = UNKNOWN_TYPE; 523 break; 524 } 525 526 request->on_io_completion = storvsc_commmand_completion; 527 request->context = cmd_request;/* scmnd; */ 528 529 vm_srb->port_number = host_dev->port; 530 vm_srb->path_id = scmnd->device->channel; 531 vm_srb->target_id = scmnd->device->id; 532 vm_srb->lun = scmnd->device->lun; 533 534 vm_srb->cdb_length = scmnd->cmd_len; 535 536 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length); 537 538 request->sense_buffer = scmnd->sense_buffer; 539 540 541 request->data_buffer.len = scsi_bufflen(scmnd); 542 if (scsi_sg_count(scmnd)) { 543 sgl = (struct scatterlist *)scsi_sglist(scmnd); 544 sg_count = scsi_sg_count(scmnd); 545 546 /* check if we need to bounce the sgl */ 547 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) { 548 cmd_request->bounce_sgl = 549 create_bounce_buffer(sgl, scsi_sg_count(scmnd), 550 scsi_bufflen(scmnd)); 551 if (!cmd_request->bounce_sgl) { 552 scmnd->scsi_done = NULL; 553 scmnd->host_scribble = NULL; 554 kmem_cache_free(host_dev->request_pool, 555 cmd_request); 556 557 return SCSI_MLQUEUE_HOST_BUSY; 558 } 559 560 cmd_request->bounce_sgl_count = 561 ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >> 562 PAGE_SHIFT; 563 564 /* 565 * FIXME: We can optimize on reads by just skipping 566 * this 567 */ 568 copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl, 569 scsi_sg_count(scmnd)); 570 571 sgl = cmd_request->bounce_sgl; 572 sg_count = cmd_request->bounce_sgl_count; 573 } 574 575 request->data_buffer.offset = sgl[0].offset; 576 577 for (i = 0; i < sg_count; i++) 578 request->data_buffer.pfn_array[i] = 579 page_to_pfn(sg_page((&sgl[i]))); 580 581 } else if (scsi_sglist(scmnd)) { 582 request->data_buffer.offset = 583 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1); 584 request->data_buffer.pfn_array[0] = 585 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT; 586 } 587 588retry_request: 589 /* Invokes the vsc to start an IO */ 590 ret = storvsc_do_io(dev, &cmd_request->request); 591 592 if (ret == -1) { 593 /* no more space */ 594 595 if (cmd_request->bounce_sgl_count) { 596 /* 597 * FIXME: We can optimize on writes by just skipping 598 * this 599 */ 600 copy_from_bounce_buffer(scsi_sglist(scmnd), 601 cmd_request->bounce_sgl, 602 scsi_sg_count(scmnd)); 603 destroy_bounce_buffer(cmd_request->bounce_sgl, 604 cmd_request->bounce_sgl_count); 605 } 606 607 kmem_cache_free(host_dev->request_pool, cmd_request); 608 609 scmnd->scsi_done = NULL; 610 scmnd->host_scribble = NULL; 611 612 ret = SCSI_MLQUEUE_DEVICE_BUSY; 613 } 614 615 return ret; 616} 617 618static DEF_SCSI_QCMD(storvsc_queuecommand) 619 620 621/* Scsi driver */ 622static struct scsi_host_template scsi_driver = { 623 .module = THIS_MODULE, 624 .name = "storvsc_host_t", 625 .bios_param = storvsc_get_chs, 626 .queuecommand = storvsc_queuecommand, 627 .eh_host_reset_handler = storvsc_host_reset_handler, 628 .slave_alloc = storvsc_device_alloc, 629 .slave_configure = storvsc_device_configure, 630 .cmd_per_lun = 1, 631 /* 64 max_queue * 1 target */ 632 .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS, 633 .this_id = -1, 634 /* no use setting to 0 since ll_blk_rw reset it to 1 */ 635 /* currently 32 */ 636 .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT, 637 /* 638 * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge 639 * into 1 sg element. If set, we must limit the max_segment_size to 640 * PAGE_SIZE, otherwise we may get 1 sg element that represents 641 * multiple 642 */ 643 /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */ 644 .use_clustering = ENABLE_CLUSTERING, 645 /* Make sure we dont get a sg segment crosses a page boundary */ 646 .dma_boundary = PAGE_SIZE-1, 647}; 648 649 650/* 651 * storvsc_probe - Add a new device for this driver 652 */ 653 654static int storvsc_probe(struct hv_device *device) 655{ 656 int ret; 657 struct Scsi_Host *host; 658 struct hv_host_device *host_dev; 659 struct storvsc_device_info device_info; 660 661 host = scsi_host_alloc(&scsi_driver, 662 sizeof(struct hv_host_device)); 663 if (!host) 664 return -ENOMEM; 665 666 dev_set_drvdata(&device->device, host); 667 668 host_dev = (struct hv_host_device *)host->hostdata; 669 memset(host_dev, 0, sizeof(struct hv_host_device)); 670 671 host_dev->port = host->host_no; 672 host_dev->dev = device; 673 674 host_dev->request_pool = 675 kmem_cache_create(dev_name(&device->device), 676 sizeof(struct storvsc_cmd_request), 0, 677 SLAB_HWCACHE_ALIGN, NULL); 678 679 if (!host_dev->request_pool) { 680 scsi_host_put(host); 681 return -ENOMEM; 682 } 683 684 device_info.port_number = host->host_no; 685 device_info.ring_buffer_size = storvsc_ringbuffer_size; 686 /* Call to the vsc driver to add the device */ 687 ret = storvsc_dev_add(device, (void *)&device_info); 688 689 if (ret != 0) { 690 kmem_cache_destroy(host_dev->request_pool); 691 scsi_host_put(host); 692 return -1; 693 } 694 695 host_dev->path = device_info.path_id; 696 host_dev->target = device_info.target_id; 697 698 /* max # of devices per target */ 699 host->max_lun = STORVSC_MAX_LUNS_PER_TARGET; 700 /* max # of targets per channel */ 701 host->max_id = STORVSC_MAX_TARGETS; 702 /* max # of channels */ 703 host->max_channel = STORVSC_MAX_CHANNELS - 1; 704 /* max cmd length */ 705 host->max_cmd_len = STORVSC_MAX_CMD_LEN; 706 707 /* Register the HBA and start the scsi bus scan */ 708 ret = scsi_add_host(host, &device->device); 709 if (ret != 0) { 710 711 storvsc_dev_remove(device); 712 713 kmem_cache_destroy(host_dev->request_pool); 714 scsi_host_put(host); 715 return -1; 716 } 717 718 scsi_scan_host(host); 719 return ret; 720} 721 722/* The one and only one */ 723 724static struct hv_driver storvsc_drv = { 725 .probe = storvsc_probe, 726 .remove = storvsc_remove, 727}; 728 729/* 730 * We use a DMI table to determine if we should autoload this driver This is 731 * needed by distro tools to determine if the hyperv drivers should be 732 * installed and/or configured. We don't do anything else with the table, but 733 * it needs to be present. 734 */ 735 736static const struct dmi_system_id __initconst 737hv_stor_dmi_table[] __maybe_unused = { 738 { 739 .ident = "Hyper-V", 740 .matches = { 741 DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), 742 DMI_MATCH(DMI_PRODUCT_NAME, "Virtual Machine"), 743 DMI_MATCH(DMI_BOARD_NAME, "Virtual Machine"), 744 }, 745 }, 746 { }, 747}; 748MODULE_DEVICE_TABLE(dmi, hv_stor_dmi_table); 749 750static int __init storvsc_drv_init(void) 751{ 752 int ret; 753 struct hv_driver *drv = &storvsc_drv; 754 u32 max_outstanding_req_per_channel; 755 756 /* 757 * Divide the ring buffer data size (which is 1 page less 758 * than the ring buffer size since that page is reserved for 759 * the ring buffer indices) by the max request size (which is 760 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64) 761 */ 762 763 max_outstanding_req_per_channel = 764 ((storvsc_ringbuffer_size - PAGE_SIZE) / 765 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET + 766 sizeof(struct vstor_packet) + sizeof(u64), 767 sizeof(u64))); 768 769 memcpy(&drv->dev_type, &stor_vsci_device_type, 770 sizeof(struct hv_guid)); 771 772 if (max_outstanding_req_per_channel < 773 STORVSC_MAX_IO_REQUESTS) 774 return -1; 775 776 drv->driver.name = driver_name; 777 778 779 /* The driver belongs to vmbus */ 780 ret = vmbus_child_driver_register(&drv->driver); 781 782 return ret; 783} 784 785static void __exit storvsc_drv_exit(void) 786{ 787 vmbus_child_driver_unregister(&storvsc_drv.driver); 788} 789 790MODULE_LICENSE("GPL"); 791MODULE_VERSION(HV_DRV_VERSION); 792MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver"); 793module_init(storvsc_drv_init); 794module_exit(storvsc_drv_exit);