/drivers/md/dm-mpath.c
C | 1767 lines | 1248 code | 320 blank | 199 comment | 237 complexity | 5dbe6a692b7dd7709e95fa7e44b180d6 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
1/* 2 * Copyright (C) 2003 Sistina Software Limited. 3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved. 4 * 5 * This file is released under the GPL. 6 */ 7 8#include <linux/device-mapper.h> 9 10#include "dm-path-selector.h" 11#include "dm-uevent.h" 12 13#include <linux/ctype.h> 14#include <linux/init.h> 15#include <linux/mempool.h> 16#include <linux/module.h> 17#include <linux/pagemap.h> 18#include <linux/slab.h> 19#include <linux/time.h> 20#include <linux/workqueue.h> 21#include <scsi/scsi_dh.h> 22#include <asm/atomic.h> 23 24#define DM_MSG_PREFIX "multipath" 25#define MESG_STR(x) x, sizeof(x) 26#define DM_PG_INIT_DELAY_MSECS 2000 27#define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1) 28 29/* Path properties */ 30struct pgpath { 31 struct list_head list; 32 33 struct priority_group *pg; /* Owning PG */ 34 unsigned is_active; /* Path status */ 35 unsigned fail_count; /* Cumulative failure count */ 36 37 struct dm_path path; 38 struct delayed_work activate_path; 39}; 40 41#define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) 42 43/* 44 * Paths are grouped into Priority Groups and numbered from 1 upwards. 45 * Each has a path selector which controls which path gets used. 46 */ 47struct priority_group { 48 struct list_head list; 49 50 struct multipath *m; /* Owning multipath instance */ 51 struct path_selector ps; 52 53 unsigned pg_num; /* Reference number */ 54 unsigned bypassed; /* Temporarily bypass this PG? */ 55 56 unsigned nr_pgpaths; /* Number of paths in PG */ 57 struct list_head pgpaths; 58}; 59 60/* Multipath context */ 61struct multipath { 62 struct list_head list; 63 struct dm_target *ti; 64 65 spinlock_t lock; 66 67 const char *hw_handler_name; 68 char *hw_handler_params; 69 70 unsigned nr_priority_groups; 71 struct list_head priority_groups; 72 73 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */ 74 75 unsigned pg_init_required; /* pg_init needs calling? */ 76 unsigned pg_init_in_progress; /* Only one pg_init allowed at once */ 77 unsigned pg_init_delay_retry; /* Delay pg_init retry? */ 78 79 unsigned nr_valid_paths; /* Total number of usable paths */ 80 struct pgpath *current_pgpath; 81 struct priority_group *current_pg; 82 struct priority_group *next_pg; /* Switch to this PG if set */ 83 unsigned repeat_count; /* I/Os left before calling PS again */ 84 85 unsigned queue_io; /* Must we queue all I/O? */ 86 unsigned queue_if_no_path; /* Queue I/O if last path fails? */ 87 unsigned saved_queue_if_no_path;/* Saved state during suspension */ 88 unsigned pg_init_retries; /* Number of times to retry pg_init */ 89 unsigned pg_init_count; /* Number of times pg_init called */ 90 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ 91 92 struct work_struct process_queued_ios; 93 struct list_head queued_ios; 94 unsigned queue_size; 95 96 struct work_struct trigger_event; 97 98 /* 99 * We must use a mempool of dm_mpath_io structs so that we 100 * can resubmit bios on error. 101 */ 102 mempool_t *mpio_pool; 103 104 struct mutex work_mutex; 105}; 106 107/* 108 * Context information attached to each bio we process. 109 */ 110struct dm_mpath_io { 111 struct pgpath *pgpath; 112 size_t nr_bytes; 113}; 114 115typedef int (*action_fn) (struct pgpath *pgpath); 116 117#define MIN_IOS 256 /* Mempool size */ 118 119static struct kmem_cache *_mpio_cache; 120 121static struct workqueue_struct *kmultipathd, *kmpath_handlerd; 122static void process_queued_ios(struct work_struct *work); 123static void trigger_event(struct work_struct *work); 124static void activate_path(struct work_struct *work); 125 126 127/*----------------------------------------------- 128 * Allocation routines 129 *-----------------------------------------------*/ 130 131static struct pgpath *alloc_pgpath(void) 132{ 133 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); 134 135 if (pgpath) { 136 pgpath->is_active = 1; 137 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path); 138 } 139 140 return pgpath; 141} 142 143static void free_pgpath(struct pgpath *pgpath) 144{ 145 kfree(pgpath); 146} 147 148static struct priority_group *alloc_priority_group(void) 149{ 150 struct priority_group *pg; 151 152 pg = kzalloc(sizeof(*pg), GFP_KERNEL); 153 154 if (pg) 155 INIT_LIST_HEAD(&pg->pgpaths); 156 157 return pg; 158} 159 160static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti) 161{ 162 struct pgpath *pgpath, *tmp; 163 struct multipath *m = ti->private; 164 165 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { 166 list_del(&pgpath->list); 167 if (m->hw_handler_name) 168 scsi_dh_detach(bdev_get_queue(pgpath->path.dev->bdev)); 169 dm_put_device(ti, pgpath->path.dev); 170 free_pgpath(pgpath); 171 } 172} 173 174static void free_priority_group(struct priority_group *pg, 175 struct dm_target *ti) 176{ 177 struct path_selector *ps = &pg->ps; 178 179 if (ps->type) { 180 ps->type->destroy(ps); 181 dm_put_path_selector(ps->type); 182 } 183 184 free_pgpaths(&pg->pgpaths, ti); 185 kfree(pg); 186} 187 188static struct multipath *alloc_multipath(struct dm_target *ti) 189{ 190 struct multipath *m; 191 192 m = kzalloc(sizeof(*m), GFP_KERNEL); 193 if (m) { 194 INIT_LIST_HEAD(&m->priority_groups); 195 INIT_LIST_HEAD(&m->queued_ios); 196 spin_lock_init(&m->lock); 197 m->queue_io = 1; 198 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; 199 INIT_WORK(&m->process_queued_ios, process_queued_ios); 200 INIT_WORK(&m->trigger_event, trigger_event); 201 init_waitqueue_head(&m->pg_init_wait); 202 mutex_init(&m->work_mutex); 203 m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); 204 if (!m->mpio_pool) { 205 kfree(m); 206 return NULL; 207 } 208 m->ti = ti; 209 ti->private = m; 210 } 211 212 return m; 213} 214 215static void free_multipath(struct multipath *m) 216{ 217 struct priority_group *pg, *tmp; 218 219 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { 220 list_del(&pg->list); 221 free_priority_group(pg, m->ti); 222 } 223 224 kfree(m->hw_handler_name); 225 kfree(m->hw_handler_params); 226 mempool_destroy(m->mpio_pool); 227 kfree(m); 228} 229 230 231/*----------------------------------------------- 232 * Path selection 233 *-----------------------------------------------*/ 234 235static void __pg_init_all_paths(struct multipath *m) 236{ 237 struct pgpath *pgpath; 238 unsigned long pg_init_delay = 0; 239 240 m->pg_init_count++; 241 m->pg_init_required = 0; 242 if (m->pg_init_delay_retry) 243 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? 244 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); 245 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { 246 /* Skip failed paths */ 247 if (!pgpath->is_active) 248 continue; 249 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, 250 pg_init_delay)) 251 m->pg_init_in_progress++; 252 } 253} 254 255static void __switch_pg(struct multipath *m, struct pgpath *pgpath) 256{ 257 m->current_pg = pgpath->pg; 258 259 /* Must we initialise the PG first, and queue I/O till it's ready? */ 260 if (m->hw_handler_name) { 261 m->pg_init_required = 1; 262 m->queue_io = 1; 263 } else { 264 m->pg_init_required = 0; 265 m->queue_io = 0; 266 } 267 268 m->pg_init_count = 0; 269} 270 271static int __choose_path_in_pg(struct multipath *m, struct priority_group *pg, 272 size_t nr_bytes) 273{ 274 struct dm_path *path; 275 276 path = pg->ps.type->select_path(&pg->ps, &m->repeat_count, nr_bytes); 277 if (!path) 278 return -ENXIO; 279 280 m->current_pgpath = path_to_pgpath(path); 281 282 if (m->current_pg != pg) 283 __switch_pg(m, m->current_pgpath); 284 285 return 0; 286} 287 288static void __choose_pgpath(struct multipath *m, size_t nr_bytes) 289{ 290 struct priority_group *pg; 291 unsigned bypassed = 1; 292 293 if (!m->nr_valid_paths) 294 goto failed; 295 296 /* Were we instructed to switch PG? */ 297 if (m->next_pg) { 298 pg = m->next_pg; 299 m->next_pg = NULL; 300 if (!__choose_path_in_pg(m, pg, nr_bytes)) 301 return; 302 } 303 304 /* Don't change PG until it has no remaining paths */ 305 if (m->current_pg && !__choose_path_in_pg(m, m->current_pg, nr_bytes)) 306 return; 307 308 /* 309 * Loop through priority groups until we find a valid path. 310 * First time we skip PGs marked 'bypassed'. 311 * Second time we only try the ones we skipped. 312 */ 313 do { 314 list_for_each_entry(pg, &m->priority_groups, list) { 315 if (pg->bypassed == bypassed) 316 continue; 317 if (!__choose_path_in_pg(m, pg, nr_bytes)) 318 return; 319 } 320 } while (bypassed--); 321 322failed: 323 m->current_pgpath = NULL; 324 m->current_pg = NULL; 325} 326 327/* 328 * Check whether bios must be queued in the device-mapper core rather 329 * than here in the target. 330 * 331 * m->lock must be held on entry. 332 * 333 * If m->queue_if_no_path and m->saved_queue_if_no_path hold the 334 * same value then we are not between multipath_presuspend() 335 * and multipath_resume() calls and we have no need to check 336 * for the DMF_NOFLUSH_SUSPENDING flag. 337 */ 338static int __must_push_back(struct multipath *m) 339{ 340 return (m->queue_if_no_path != m->saved_queue_if_no_path && 341 dm_noflush_suspending(m->ti)); 342} 343 344static int map_io(struct multipath *m, struct request *clone, 345 struct dm_mpath_io *mpio, unsigned was_queued) 346{ 347 int r = DM_MAPIO_REMAPPED; 348 size_t nr_bytes = blk_rq_bytes(clone); 349 unsigned long flags; 350 struct pgpath *pgpath; 351 struct block_device *bdev; 352 353 spin_lock_irqsave(&m->lock, flags); 354 355 /* Do we need to select a new pgpath? */ 356 if (!m->current_pgpath || 357 (!m->queue_io && (m->repeat_count && --m->repeat_count == 0))) 358 __choose_pgpath(m, nr_bytes); 359 360 pgpath = m->current_pgpath; 361 362 if (was_queued) 363 m->queue_size--; 364 365 if ((pgpath && m->queue_io) || 366 (!pgpath && m->queue_if_no_path)) { 367 /* Queue for the daemon to resubmit */ 368 list_add_tail(&clone->queuelist, &m->queued_ios); 369 m->queue_size++; 370 if ((m->pg_init_required && !m->pg_init_in_progress) || 371 !m->queue_io) 372 queue_work(kmultipathd, &m->process_queued_ios); 373 pgpath = NULL; 374 r = DM_MAPIO_SUBMITTED; 375 } else if (pgpath) { 376 bdev = pgpath->path.dev->bdev; 377 clone->q = bdev_get_queue(bdev); 378 clone->rq_disk = bdev->bd_disk; 379 } else if (__must_push_back(m)) 380 r = DM_MAPIO_REQUEUE; 381 else 382 r = -EIO; /* Failed */ 383 384 mpio->pgpath = pgpath; 385 mpio->nr_bytes = nr_bytes; 386 387 if (r == DM_MAPIO_REMAPPED && pgpath->pg->ps.type->start_io) 388 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, &pgpath->path, 389 nr_bytes); 390 391 spin_unlock_irqrestore(&m->lock, flags); 392 393 return r; 394} 395 396/* 397 * If we run out of usable paths, should we queue I/O or error it? 398 */ 399static int queue_if_no_path(struct multipath *m, unsigned queue_if_no_path, 400 unsigned save_old_value) 401{ 402 unsigned long flags; 403 404 spin_lock_irqsave(&m->lock, flags); 405 406 if (save_old_value) 407 m->saved_queue_if_no_path = m->queue_if_no_path; 408 else 409 m->saved_queue_if_no_path = queue_if_no_path; 410 m->queue_if_no_path = queue_if_no_path; 411 if (!m->queue_if_no_path && m->queue_size) 412 queue_work(kmultipathd, &m->process_queued_ios); 413 414 spin_unlock_irqrestore(&m->lock, flags); 415 416 return 0; 417} 418 419/*----------------------------------------------------------------- 420 * The multipath daemon is responsible for resubmitting queued ios. 421 *---------------------------------------------------------------*/ 422 423static void dispatch_queued_ios(struct multipath *m) 424{ 425 int r; 426 unsigned long flags; 427 struct dm_mpath_io *mpio; 428 union map_info *info; 429 struct request *clone, *n; 430 LIST_HEAD(cl); 431 432 spin_lock_irqsave(&m->lock, flags); 433 list_splice_init(&m->queued_ios, &cl); 434 spin_unlock_irqrestore(&m->lock, flags); 435 436 list_for_each_entry_safe(clone, n, &cl, queuelist) { 437 list_del_init(&clone->queuelist); 438 439 info = dm_get_rq_mapinfo(clone); 440 mpio = info->ptr; 441 442 r = map_io(m, clone, mpio, 1); 443 if (r < 0) { 444 mempool_free(mpio, m->mpio_pool); 445 dm_kill_unmapped_request(clone, r); 446 } else if (r == DM_MAPIO_REMAPPED) 447 dm_dispatch_request(clone); 448 else if (r == DM_MAPIO_REQUEUE) { 449 mempool_free(mpio, m->mpio_pool); 450 dm_requeue_unmapped_request(clone); 451 } 452 } 453} 454 455static void process_queued_ios(struct work_struct *work) 456{ 457 struct multipath *m = 458 container_of(work, struct multipath, process_queued_ios); 459 struct pgpath *pgpath = NULL; 460 unsigned must_queue = 1; 461 unsigned long flags; 462 463 spin_lock_irqsave(&m->lock, flags); 464 465 if (!m->queue_size) 466 goto out; 467 468 if (!m->current_pgpath) 469 __choose_pgpath(m, 0); 470 471 pgpath = m->current_pgpath; 472 473 if ((pgpath && !m->queue_io) || 474 (!pgpath && !m->queue_if_no_path)) 475 must_queue = 0; 476 477 if (m->pg_init_required && !m->pg_init_in_progress && pgpath) 478 __pg_init_all_paths(m); 479 480out: 481 spin_unlock_irqrestore(&m->lock, flags); 482 if (!must_queue) 483 dispatch_queued_ios(m); 484} 485 486/* 487 * An event is triggered whenever a path is taken out of use. 488 * Includes path failure and PG bypass. 489 */ 490static void trigger_event(struct work_struct *work) 491{ 492 struct multipath *m = 493 container_of(work, struct multipath, trigger_event); 494 495 dm_table_event(m->ti->table); 496} 497 498/*----------------------------------------------------------------- 499 * Constructor/argument parsing: 500 * <#multipath feature args> [<arg>]* 501 * <#hw_handler args> [hw_handler [<arg>]*] 502 * <#priority groups> 503 * <initial priority group> 504 * [<selector> <#selector args> [<arg>]* 505 * <#paths> <#per-path selector args> 506 * [<path> [<arg>]* ]+ ]+ 507 *---------------------------------------------------------------*/ 508struct param { 509 unsigned min; 510 unsigned max; 511 char *error; 512}; 513 514static int read_param(struct param *param, char *str, unsigned *v, char **error) 515{ 516 if (!str || 517 (sscanf(str, "%u", v) != 1) || 518 (*v < param->min) || 519 (*v > param->max)) { 520 *error = param->error; 521 return -EINVAL; 522 } 523 524 return 0; 525} 526 527struct arg_set { 528 unsigned argc; 529 char **argv; 530}; 531 532static char *shift(struct arg_set *as) 533{ 534 char *r; 535 536 if (as->argc) { 537 as->argc--; 538 r = *as->argv; 539 as->argv++; 540 return r; 541 } 542 543 return NULL; 544} 545 546static void consume(struct arg_set *as, unsigned n) 547{ 548 BUG_ON (as->argc < n); 549 as->argc -= n; 550 as->argv += n; 551} 552 553static int parse_path_selector(struct arg_set *as, struct priority_group *pg, 554 struct dm_target *ti) 555{ 556 int r; 557 struct path_selector_type *pst; 558 unsigned ps_argc; 559 560 static struct param _params[] = { 561 {0, 1024, "invalid number of path selector args"}, 562 }; 563 564 pst = dm_get_path_selector(shift(as)); 565 if (!pst) { 566 ti->error = "unknown path selector type"; 567 return -EINVAL; 568 } 569 570 r = read_param(_params, shift(as), &ps_argc, &ti->error); 571 if (r) { 572 dm_put_path_selector(pst); 573 return -EINVAL; 574 } 575 576 if (ps_argc > as->argc) { 577 dm_put_path_selector(pst); 578 ti->error = "not enough arguments for path selector"; 579 return -EINVAL; 580 } 581 582 r = pst->create(&pg->ps, ps_argc, as->argv); 583 if (r) { 584 dm_put_path_selector(pst); 585 ti->error = "path selector constructor failed"; 586 return r; 587 } 588 589 pg->ps.type = pst; 590 consume(as, ps_argc); 591 592 return 0; 593} 594 595static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps, 596 struct dm_target *ti) 597{ 598 int r; 599 struct pgpath *p; 600 struct multipath *m = ti->private; 601 602 /* we need at least a path arg */ 603 if (as->argc < 1) { 604 ti->error = "no device given"; 605 return ERR_PTR(-EINVAL); 606 } 607 608 p = alloc_pgpath(); 609 if (!p) 610 return ERR_PTR(-ENOMEM); 611 612 r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table), 613 &p->path.dev); 614 if (r) { 615 ti->error = "error getting device"; 616 goto bad; 617 } 618 619 if (m->hw_handler_name) { 620 struct request_queue *q = bdev_get_queue(p->path.dev->bdev); 621 622 r = scsi_dh_attach(q, m->hw_handler_name); 623 if (r == -EBUSY) { 624 /* 625 * Already attached to different hw_handler, 626 * try to reattach with correct one. 627 */ 628 scsi_dh_detach(q); 629 r = scsi_dh_attach(q, m->hw_handler_name); 630 } 631 632 if (r < 0) { 633 ti->error = "error attaching hardware handler"; 634 dm_put_device(ti, p->path.dev); 635 goto bad; 636 } 637 638 if (m->hw_handler_params) { 639 r = scsi_dh_set_params(q, m->hw_handler_params); 640 if (r < 0) { 641 ti->error = "unable to set hardware " 642 "handler parameters"; 643 scsi_dh_detach(q); 644 dm_put_device(ti, p->path.dev); 645 goto bad; 646 } 647 } 648 } 649 650 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); 651 if (r) { 652 dm_put_device(ti, p->path.dev); 653 goto bad; 654 } 655 656 return p; 657 658 bad: 659 free_pgpath(p); 660 return ERR_PTR(r); 661} 662 663static struct priority_group *parse_priority_group(struct arg_set *as, 664 struct multipath *m) 665{ 666 static struct param _params[] = { 667 {1, 1024, "invalid number of paths"}, 668 {0, 1024, "invalid number of selector args"} 669 }; 670 671 int r; 672 unsigned i, nr_selector_args, nr_params; 673 struct priority_group *pg; 674 struct dm_target *ti = m->ti; 675 676 if (as->argc < 2) { 677 as->argc = 0; 678 ti->error = "not enough priority group arguments"; 679 return ERR_PTR(-EINVAL); 680 } 681 682 pg = alloc_priority_group(); 683 if (!pg) { 684 ti->error = "couldn't allocate priority group"; 685 return ERR_PTR(-ENOMEM); 686 } 687 pg->m = m; 688 689 r = parse_path_selector(as, pg, ti); 690 if (r) 691 goto bad; 692 693 /* 694 * read the paths 695 */ 696 r = read_param(_params, shift(as), &pg->nr_pgpaths, &ti->error); 697 if (r) 698 goto bad; 699 700 r = read_param(_params + 1, shift(as), &nr_selector_args, &ti->error); 701 if (r) 702 goto bad; 703 704 nr_params = 1 + nr_selector_args; 705 for (i = 0; i < pg->nr_pgpaths; i++) { 706 struct pgpath *pgpath; 707 struct arg_set path_args; 708 709 if (as->argc < nr_params) { 710 ti->error = "not enough path parameters"; 711 r = -EINVAL; 712 goto bad; 713 } 714 715 path_args.argc = nr_params; 716 path_args.argv = as->argv; 717 718 pgpath = parse_path(&path_args, &pg->ps, ti); 719 if (IS_ERR(pgpath)) { 720 r = PTR_ERR(pgpath); 721 goto bad; 722 } 723 724 pgpath->pg = pg; 725 list_add_tail(&pgpath->list, &pg->pgpaths); 726 consume(as, nr_params); 727 } 728 729 return pg; 730 731 bad: 732 free_priority_group(pg, ti); 733 return ERR_PTR(r); 734} 735 736static int parse_hw_handler(struct arg_set *as, struct multipath *m) 737{ 738 unsigned hw_argc; 739 int ret; 740 struct dm_target *ti = m->ti; 741 742 static struct param _params[] = { 743 {0, 1024, "invalid number of hardware handler args"}, 744 }; 745 746 if (read_param(_params, shift(as), &hw_argc, &ti->error)) 747 return -EINVAL; 748 749 if (!hw_argc) 750 return 0; 751 752 if (hw_argc > as->argc) { 753 ti->error = "not enough arguments for hardware handler"; 754 return -EINVAL; 755 } 756 757 m->hw_handler_name = kstrdup(shift(as), GFP_KERNEL); 758 request_module("scsi_dh_%s", m->hw_handler_name); 759 if (scsi_dh_handler_exist(m->hw_handler_name) == 0) { 760 ti->error = "unknown hardware handler type"; 761 ret = -EINVAL; 762 goto fail; 763 } 764 765 if (hw_argc > 1) { 766 char *p; 767 int i, j, len = 4; 768 769 for (i = 0; i <= hw_argc - 2; i++) 770 len += strlen(as->argv[i]) + 1; 771 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL); 772 if (!p) { 773 ti->error = "memory allocation failed"; 774 ret = -ENOMEM; 775 goto fail; 776 } 777 j = sprintf(p, "%d", hw_argc - 1); 778 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) 779 j = sprintf(p, "%s", as->argv[i]); 780 } 781 consume(as, hw_argc - 1); 782 783 return 0; 784fail: 785 kfree(m->hw_handler_name); 786 m->hw_handler_name = NULL; 787 return ret; 788} 789 790static int parse_features(struct arg_set *as, struct multipath *m) 791{ 792 int r; 793 unsigned argc; 794 struct dm_target *ti = m->ti; 795 const char *param_name; 796 797 static struct param _params[] = { 798 {0, 5, "invalid number of feature args"}, 799 {1, 50, "pg_init_retries must be between 1 and 50"}, 800 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"}, 801 }; 802 803 r = read_param(_params, shift(as), &argc, &ti->error); 804 if (r) 805 return -EINVAL; 806 807 if (!argc) 808 return 0; 809 810 if (argc > as->argc) { 811 ti->error = "not enough arguments for features"; 812 return -EINVAL; 813 } 814 815 do { 816 param_name = shift(as); 817 argc--; 818 819 if (!strnicmp(param_name, MESG_STR("queue_if_no_path"))) { 820 r = queue_if_no_path(m, 1, 0); 821 continue; 822 } 823 824 if (!strnicmp(param_name, MESG_STR("pg_init_retries")) && 825 (argc >= 1)) { 826 r = read_param(_params + 1, shift(as), 827 &m->pg_init_retries, &ti->error); 828 argc--; 829 continue; 830 } 831 832 if (!strnicmp(param_name, MESG_STR("pg_init_delay_msecs")) && 833 (argc >= 1)) { 834 r = read_param(_params + 2, shift(as), 835 &m->pg_init_delay_msecs, &ti->error); 836 argc--; 837 continue; 838 } 839 840 ti->error = "Unrecognised multipath feature request"; 841 r = -EINVAL; 842 } while (argc && !r); 843 844 return r; 845} 846 847static int multipath_ctr(struct dm_target *ti, unsigned int argc, 848 char **argv) 849{ 850 /* target parameters */ 851 static struct param _params[] = { 852 {0, 1024, "invalid number of priority groups"}, 853 {0, 1024, "invalid initial priority group number"}, 854 }; 855 856 int r; 857 struct multipath *m; 858 struct arg_set as; 859 unsigned pg_count = 0; 860 unsigned next_pg_num; 861 862 as.argc = argc; 863 as.argv = argv; 864 865 m = alloc_multipath(ti); 866 if (!m) { 867 ti->error = "can't allocate multipath"; 868 return -EINVAL; 869 } 870 871 r = parse_features(&as, m); 872 if (r) 873 goto bad; 874 875 r = parse_hw_handler(&as, m); 876 if (r) 877 goto bad; 878 879 r = read_param(_params, shift(&as), &m->nr_priority_groups, &ti->error); 880 if (r) 881 goto bad; 882 883 r = read_param(_params + 1, shift(&as), &next_pg_num, &ti->error); 884 if (r) 885 goto bad; 886 887 if ((!m->nr_priority_groups && next_pg_num) || 888 (m->nr_priority_groups && !next_pg_num)) { 889 ti->error = "invalid initial priority group"; 890 r = -EINVAL; 891 goto bad; 892 } 893 894 /* parse the priority groups */ 895 while (as.argc) { 896 struct priority_group *pg; 897 898 pg = parse_priority_group(&as, m); 899 if (IS_ERR(pg)) { 900 r = PTR_ERR(pg); 901 goto bad; 902 } 903 904 m->nr_valid_paths += pg->nr_pgpaths; 905 list_add_tail(&pg->list, &m->priority_groups); 906 pg_count++; 907 pg->pg_num = pg_count; 908 if (!--next_pg_num) 909 m->next_pg = pg; 910 } 911 912 if (pg_count != m->nr_priority_groups) { 913 ti->error = "priority group count mismatch"; 914 r = -EINVAL; 915 goto bad; 916 } 917 918 ti->num_flush_requests = 1; 919 ti->num_discard_requests = 1; 920 921 return 0; 922 923 bad: 924 free_multipath(m); 925 return r; 926} 927 928static void multipath_wait_for_pg_init_completion(struct multipath *m) 929{ 930 DECLARE_WAITQUEUE(wait, current); 931 unsigned long flags; 932 933 add_wait_queue(&m->pg_init_wait, &wait); 934 935 while (1) { 936 set_current_state(TASK_UNINTERRUPTIBLE); 937 938 spin_lock_irqsave(&m->lock, flags); 939 if (!m->pg_init_in_progress) { 940 spin_unlock_irqrestore(&m->lock, flags); 941 break; 942 } 943 spin_unlock_irqrestore(&m->lock, flags); 944 945 io_schedule(); 946 } 947 set_current_state(TASK_RUNNING); 948 949 remove_wait_queue(&m->pg_init_wait, &wait); 950} 951 952static void flush_multipath_work(struct multipath *m) 953{ 954 flush_workqueue(kmpath_handlerd); 955 multipath_wait_for_pg_init_completion(m); 956 flush_workqueue(kmultipathd); 957 flush_work_sync(&m->trigger_event); 958} 959 960static void multipath_dtr(struct dm_target *ti) 961{ 962 struct multipath *m = ti->private; 963 964 flush_multipath_work(m); 965 free_multipath(m); 966} 967 968/* 969 * Map cloned requests 970 */ 971static int multipath_map(struct dm_target *ti, struct request *clone, 972 union map_info *map_context) 973{ 974 int r; 975 struct dm_mpath_io *mpio; 976 struct multipath *m = (struct multipath *) ti->private; 977 978 mpio = mempool_alloc(m->mpio_pool, GFP_ATOMIC); 979 if (!mpio) 980 /* ENOMEM, requeue */ 981 return DM_MAPIO_REQUEUE; 982 memset(mpio, 0, sizeof(*mpio)); 983 984 map_context->ptr = mpio; 985 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; 986 r = map_io(m, clone, mpio, 0); 987 if (r < 0 || r == DM_MAPIO_REQUEUE) 988 mempool_free(mpio, m->mpio_pool); 989 990 return r; 991} 992 993/* 994 * Take a path out of use. 995 */ 996static int fail_path(struct pgpath *pgpath) 997{ 998 unsigned long flags; 999 struct multipath *m = pgpath->pg->m; 1000 1001 spin_lock_irqsave(&m->lock, flags); 1002 1003 if (!pgpath->is_active) 1004 goto out; 1005 1006 DMWARN("Failing path %s.", pgpath->path.dev->name); 1007 1008 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); 1009 pgpath->is_active = 0; 1010 pgpath->fail_count++; 1011 1012 m->nr_valid_paths--; 1013 1014 if (pgpath == m->current_pgpath) 1015 m->current_pgpath = NULL; 1016 1017 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, 1018 pgpath->path.dev->name, m->nr_valid_paths); 1019 1020 schedule_work(&m->trigger_event); 1021 1022out: 1023 spin_unlock_irqrestore(&m->lock, flags); 1024 1025 return 0; 1026} 1027 1028/* 1029 * Reinstate a previously-failed path 1030 */ 1031static int reinstate_path(struct pgpath *pgpath) 1032{ 1033 int r = 0; 1034 unsigned long flags; 1035 struct multipath *m = pgpath->pg->m; 1036 1037 spin_lock_irqsave(&m->lock, flags); 1038 1039 if (pgpath->is_active) 1040 goto out; 1041 1042 if (!pgpath->pg->ps.type->reinstate_path) { 1043 DMWARN("Reinstate path not supported by path selector %s", 1044 pgpath->pg->ps.type->name); 1045 r = -EINVAL; 1046 goto out; 1047 } 1048 1049 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); 1050 if (r) 1051 goto out; 1052 1053 pgpath->is_active = 1; 1054 1055 if (!m->nr_valid_paths++ && m->queue_size) { 1056 m->current_pgpath = NULL; 1057 queue_work(kmultipathd, &m->process_queued_ios); 1058 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { 1059 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) 1060 m->pg_init_in_progress++; 1061 } 1062 1063 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, 1064 pgpath->path.dev->name, m->nr_valid_paths); 1065 1066 schedule_work(&m->trigger_event); 1067 1068out: 1069 spin_unlock_irqrestore(&m->lock, flags); 1070 1071 return r; 1072} 1073 1074/* 1075 * Fail or reinstate all paths that match the provided struct dm_dev. 1076 */ 1077static int action_dev(struct multipath *m, struct dm_dev *dev, 1078 action_fn action) 1079{ 1080 int r = -EINVAL; 1081 struct pgpath *pgpath; 1082 struct priority_group *pg; 1083 1084 list_for_each_entry(pg, &m->priority_groups, list) { 1085 list_for_each_entry(pgpath, &pg->pgpaths, list) { 1086 if (pgpath->path.dev == dev) 1087 r = action(pgpath); 1088 } 1089 } 1090 1091 return r; 1092} 1093 1094/* 1095 * Temporarily try to avoid having to use the specified PG 1096 */ 1097static void bypass_pg(struct multipath *m, struct priority_group *pg, 1098 int bypassed) 1099{ 1100 unsigned long flags; 1101 1102 spin_lock_irqsave(&m->lock, flags); 1103 1104 pg->bypassed = bypassed; 1105 m->current_pgpath = NULL; 1106 m->current_pg = NULL; 1107 1108 spin_unlock_irqrestore(&m->lock, flags); 1109 1110 schedule_work(&m->trigger_event); 1111} 1112 1113/* 1114 * Switch to using the specified PG from the next I/O that gets mapped 1115 */ 1116static int switch_pg_num(struct multipath *m, const char *pgstr) 1117{ 1118 struct priority_group *pg; 1119 unsigned pgnum; 1120 unsigned long flags; 1121 1122 if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || 1123 (pgnum > m->nr_priority_groups)) { 1124 DMWARN("invalid PG number supplied to switch_pg_num"); 1125 return -EINVAL; 1126 } 1127 1128 spin_lock_irqsave(&m->lock, flags); 1129 list_for_each_entry(pg, &m->priority_groups, list) { 1130 pg->bypassed = 0; 1131 if (--pgnum) 1132 continue; 1133 1134 m->current_pgpath = NULL; 1135 m->current_pg = NULL; 1136 m->next_pg = pg; 1137 } 1138 spin_unlock_irqrestore(&m->lock, flags); 1139 1140 schedule_work(&m->trigger_event); 1141 return 0; 1142} 1143 1144/* 1145 * Set/clear bypassed status of a PG. 1146 * PGs are numbered upwards from 1 in the order they were declared. 1147 */ 1148static int bypass_pg_num(struct multipath *m, const char *pgstr, int bypassed) 1149{ 1150 struct priority_group *pg; 1151 unsigned pgnum; 1152 1153 if (!pgstr || (sscanf(pgstr, "%u", &pgnum) != 1) || !pgnum || 1154 (pgnum > m->nr_priority_groups)) { 1155 DMWARN("invalid PG number supplied to bypass_pg"); 1156 return -EINVAL; 1157 } 1158 1159 list_for_each_entry(pg, &m->priority_groups, list) { 1160 if (!--pgnum) 1161 break; 1162 } 1163 1164 bypass_pg(m, pg, bypassed); 1165 return 0; 1166} 1167 1168/* 1169 * Should we retry pg_init immediately? 1170 */ 1171static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) 1172{ 1173 unsigned long flags; 1174 int limit_reached = 0; 1175 1176 spin_lock_irqsave(&m->lock, flags); 1177 1178 if (m->pg_init_count <= m->pg_init_retries) 1179 m->pg_init_required = 1; 1180 else 1181 limit_reached = 1; 1182 1183 spin_unlock_irqrestore(&m->lock, flags); 1184 1185 return limit_reached; 1186} 1187 1188static void pg_init_done(void *data, int errors) 1189{ 1190 struct pgpath *pgpath = data; 1191 struct priority_group *pg = pgpath->pg; 1192 struct multipath *m = pg->m; 1193 unsigned long flags; 1194 unsigned delay_retry = 0; 1195 1196 /* device or driver problems */ 1197 switch (errors) { 1198 case SCSI_DH_OK: 1199 break; 1200 case SCSI_DH_NOSYS: 1201 if (!m->hw_handler_name) { 1202 errors = 0; 1203 break; 1204 } 1205 DMERR("Could not failover the device: Handler scsi_dh_%s " 1206 "Error %d.", m->hw_handler_name, errors); 1207 /* 1208 * Fail path for now, so we do not ping pong 1209 */ 1210 fail_path(pgpath); 1211 break; 1212 case SCSI_DH_DEV_TEMP_BUSY: 1213 /* 1214 * Probably doing something like FW upgrade on the 1215 * controller so try the other pg. 1216 */ 1217 bypass_pg(m, pg, 1); 1218 break; 1219 case SCSI_DH_RETRY: 1220 /* Wait before retrying. */ 1221 delay_retry = 1; 1222 case SCSI_DH_IMM_RETRY: 1223 case SCSI_DH_RES_TEMP_UNAVAIL: 1224 if (pg_init_limit_reached(m, pgpath)) 1225 fail_path(pgpath); 1226 errors = 0; 1227 break; 1228 default: 1229 /* 1230 * We probably do not want to fail the path for a device 1231 * error, but this is what the old dm did. In future 1232 * patches we can do more advanced handling. 1233 */ 1234 fail_path(pgpath); 1235 } 1236 1237 spin_lock_irqsave(&m->lock, flags); 1238 if (errors) { 1239 if (pgpath == m->current_pgpath) { 1240 DMERR("Could not failover device. Error %d.", errors); 1241 m->current_pgpath = NULL; 1242 m->current_pg = NULL; 1243 } 1244 } else if (!m->pg_init_required) 1245 pg->bypassed = 0; 1246 1247 if (--m->pg_init_in_progress) 1248 /* Activations of other paths are still on going */ 1249 goto out; 1250 1251 if (!m->pg_init_required) 1252 m->queue_io = 0; 1253 1254 m->pg_init_delay_retry = delay_retry; 1255 queue_work(kmultipathd, &m->process_queued_ios); 1256 1257 /* 1258 * Wake up any thread waiting to suspend. 1259 */ 1260 wake_up(&m->pg_init_wait); 1261 1262out: 1263 spin_unlock_irqrestore(&m->lock, flags); 1264} 1265 1266static void activate_path(struct work_struct *work) 1267{ 1268 struct pgpath *pgpath = 1269 container_of(work, struct pgpath, activate_path.work); 1270 1271 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), 1272 pg_init_done, pgpath); 1273} 1274 1275/* 1276 * end_io handling 1277 */ 1278static int do_end_io(struct multipath *m, struct request *clone, 1279 int error, struct dm_mpath_io *mpio) 1280{ 1281 /* 1282 * We don't queue any clone request inside the multipath target 1283 * during end I/O handling, since those clone requests don't have 1284 * bio clones. If we queue them inside the multipath target, 1285 * we need to make bio clones, that requires memory allocation. 1286 * (See drivers/md/dm.c:end_clone_bio() about why the clone requests 1287 * don't have bio clones.) 1288 * Instead of queueing the clone request here, we queue the original 1289 * request into dm core, which will remake a clone request and 1290 * clone bios for it and resubmit it later. 1291 */ 1292 int r = DM_ENDIO_REQUEUE; 1293 unsigned long flags; 1294 1295 if (!error && !clone->errors) 1296 return 0; /* I/O complete */ 1297 1298 if (error == -EOPNOTSUPP || error == -EREMOTEIO || error == -EILSEQ) 1299 return error; 1300 1301 if (mpio->pgpath) 1302 fail_path(mpio->pgpath); 1303 1304 spin_lock_irqsave(&m->lock, flags); 1305 if (!m->nr_valid_paths) { 1306 if (!m->queue_if_no_path) { 1307 if (!__must_push_back(m)) 1308 r = -EIO; 1309 } else { 1310 if (error == -EBADE) 1311 r = error; 1312 } 1313 } 1314 spin_unlock_irqrestore(&m->lock, flags); 1315 1316 return r; 1317} 1318 1319static int multipath_end_io(struct dm_target *ti, struct request *clone, 1320 int error, union map_info *map_context) 1321{ 1322 struct multipath *m = ti->private; 1323 struct dm_mpath_io *mpio = map_context->ptr; 1324 struct pgpath *pgpath = mpio->pgpath; 1325 struct path_selector *ps; 1326 int r; 1327 1328 r = do_end_io(m, clone, error, mpio); 1329 if (pgpath) { 1330 ps = &pgpath->pg->ps; 1331 if (ps->type->end_io) 1332 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); 1333 } 1334 mempool_free(mpio, m->mpio_pool); 1335 1336 return r; 1337} 1338 1339/* 1340 * Suspend can't complete until all the I/O is processed so if 1341 * the last path fails we must error any remaining I/O. 1342 * Note that if the freeze_bdev fails while suspending, the 1343 * queue_if_no_path state is lost - userspace should reset it. 1344 */ 1345static void multipath_presuspend(struct dm_target *ti) 1346{ 1347 struct multipath *m = (struct multipath *) ti->private; 1348 1349 queue_if_no_path(m, 0, 1); 1350} 1351 1352static void multipath_postsuspend(struct dm_target *ti) 1353{ 1354 struct multipath *m = ti->private; 1355 1356 mutex_lock(&m->work_mutex); 1357 flush_multipath_work(m); 1358 mutex_unlock(&m->work_mutex); 1359} 1360 1361/* 1362 * Restore the queue_if_no_path setting. 1363 */ 1364static void multipath_resume(struct dm_target *ti) 1365{ 1366 struct multipath *m = (struct multipath *) ti->private; 1367 unsigned long flags; 1368 1369 spin_lock_irqsave(&m->lock, flags); 1370 m->queue_if_no_path = m->saved_queue_if_no_path; 1371 spin_unlock_irqrestore(&m->lock, flags); 1372} 1373 1374/* 1375 * Info output has the following format: 1376 * num_multipath_feature_args [multipath_feature_args]* 1377 * num_handler_status_args [handler_status_args]* 1378 * num_groups init_group_number 1379 * [A|D|E num_ps_status_args [ps_status_args]* 1380 * num_paths num_selector_args 1381 * [path_dev A|F fail_count [selector_args]* ]+ ]+ 1382 * 1383 * Table output has the following format (identical to the constructor string): 1384 * num_feature_args [features_args]* 1385 * num_handler_args hw_handler [hw_handler_args]* 1386 * num_groups init_group_number 1387 * [priority selector-name num_ps_args [ps_args]* 1388 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+ 1389 */ 1390static int multipath_status(struct dm_target *ti, status_type_t type, 1391 char *result, unsigned int maxlen) 1392{ 1393 int sz = 0; 1394 unsigned long flags; 1395 struct multipath *m = (struct multipath *) ti->private; 1396 struct priority_group *pg; 1397 struct pgpath *p; 1398 unsigned pg_num; 1399 char state; 1400 1401 spin_lock_irqsave(&m->lock, flags); 1402 1403 /* Features */ 1404 if (type == STATUSTYPE_INFO) 1405 DMEMIT("2 %u %u ", m->queue_size, m->pg_init_count); 1406 else { 1407 DMEMIT("%u ", m->queue_if_no_path + 1408 (m->pg_init_retries > 0) * 2 + 1409 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2); 1410 if (m->queue_if_no_path) 1411 DMEMIT("queue_if_no_path "); 1412 if (m->pg_init_retries) 1413 DMEMIT("pg_init_retries %u ", m->pg_init_retries); 1414 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) 1415 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); 1416 } 1417 1418 if (!m->hw_handler_name || type == STATUSTYPE_INFO) 1419 DMEMIT("0 "); 1420 else 1421 DMEMIT("1 %s ", m->hw_handler_name); 1422 1423 DMEMIT("%u ", m->nr_priority_groups); 1424 1425 if (m->next_pg) 1426 pg_num = m->next_pg->pg_num; 1427 else if (m->current_pg) 1428 pg_num = m->current_pg->pg_num; 1429 else 1430 pg_num = (m->nr_priority_groups ? 1 : 0); 1431 1432 DMEMIT("%u ", pg_num); 1433 1434 switch (type) { 1435 case STATUSTYPE_INFO: 1436 list_for_each_entry(pg, &m->priority_groups, list) { 1437 if (pg->bypassed) 1438 state = 'D'; /* Disabled */ 1439 else if (pg == m->current_pg) 1440 state = 'A'; /* Currently Active */ 1441 else 1442 state = 'E'; /* Enabled */ 1443 1444 DMEMIT("%c ", state); 1445 1446 if (pg->ps.type->status) 1447 sz += pg->ps.type->status(&pg->ps, NULL, type, 1448 result + sz, 1449 maxlen - sz); 1450 else 1451 DMEMIT("0 "); 1452 1453 DMEMIT("%u %u ", pg->nr_pgpaths, 1454 pg->ps.type->info_args); 1455 1456 list_for_each_entry(p, &pg->pgpaths, list) { 1457 DMEMIT("%s %s %u ", p->path.dev->name, 1458 p->is_active ? "A" : "F", 1459 p->fail_count); 1460 if (pg->ps.type->status) 1461 sz += pg->ps.type->status(&pg->ps, 1462 &p->path, type, result + sz, 1463 maxlen - sz); 1464 } 1465 } 1466 break; 1467 1468 case STATUSTYPE_TABLE: 1469 list_for_each_entry(pg, &m->priority_groups, list) { 1470 DMEMIT("%s ", pg->ps.type->name); 1471 1472 if (pg->ps.type->status) 1473 sz += pg->ps.type->status(&pg->ps, NULL, type, 1474 result + sz, 1475 maxlen - sz); 1476 else 1477 DMEMIT("0 "); 1478 1479 DMEMIT("%u %u ", pg->nr_pgpaths, 1480 pg->ps.type->table_args); 1481 1482 list_for_each_entry(p, &pg->pgpaths, list) { 1483 DMEMIT("%s ", p->path.dev->name); 1484 if (pg->ps.type->status) 1485 sz += pg->ps.type->status(&pg->ps, 1486 &p->path, type, result + sz, 1487 maxlen - sz); 1488 } 1489 } 1490 break; 1491 } 1492 1493 spin_unlock_irqrestore(&m->lock, flags); 1494 1495 return 0; 1496} 1497 1498static int multipath_message(struct dm_target *ti, unsigned argc, char **argv) 1499{ 1500 int r = -EINVAL; 1501 struct dm_dev *dev; 1502 struct multipath *m = (struct multipath *) ti->private; 1503 action_fn action; 1504 1505 mutex_lock(&m->work_mutex); 1506 1507 if (dm_suspended(ti)) { 1508 r = -EBUSY; 1509 goto out; 1510 } 1511 1512 if (argc == 1) { 1513 if (!strnicmp(argv[0], MESG_STR("queue_if_no_path"))) { 1514 r = queue_if_no_path(m, 1, 0); 1515 goto out; 1516 } else if (!strnicmp(argv[0], MESG_STR("fail_if_no_path"))) { 1517 r = queue_if_no_path(m, 0, 0); 1518 goto out; 1519 } 1520 } 1521 1522 if (argc != 2) { 1523 DMWARN("Unrecognised multipath message received."); 1524 goto out; 1525 } 1526 1527 if (!strnicmp(argv[0], MESG_STR("disable_group"))) { 1528 r = bypass_pg_num(m, argv[1], 1); 1529 goto out; 1530 } else if (!strnicmp(argv[0], MESG_STR("enable_group"))) { 1531 r = bypass_pg_num(m, argv[1], 0); 1532 goto out; 1533 } else if (!strnicmp(argv[0], MESG_STR("switch_group"))) { 1534 r = switch_pg_num(m, argv[1]); 1535 goto out; 1536 } else if (!strnicmp(argv[0], MESG_STR("reinstate_path"))) 1537 action = reinstate_path; 1538 else if (!strnicmp(argv[0], MESG_STR("fail_path"))) 1539 action = fail_path; 1540 else { 1541 DMWARN("Unrecognised multipath message received."); 1542 goto out; 1543 } 1544 1545 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); 1546 if (r) { 1547 DMWARN("message: error getting device %s", 1548 argv[1]); 1549 goto out; 1550 } 1551 1552 r = action_dev(m, dev, action); 1553 1554 dm_put_device(ti, dev); 1555 1556out: 1557 mutex_unlock(&m->work_mutex); 1558 return r; 1559} 1560 1561static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, 1562 unsigned long arg) 1563{ 1564 struct multipath *m = (struct multipath *) ti->private; 1565 struct block_device *bdev = NULL; 1566 fmode_t mode = 0; 1567 unsigned long flags; 1568 int r = 0; 1569 1570 spin_lock_irqsave(&m->lock, flags); 1571 1572 if (!m->current_pgpath) 1573 __choose_pgpath(m, 0); 1574 1575 if (m->current_pgpath) { 1576 bdev = m->current_pgpath->path.dev->bdev; 1577 mode = m->current_pgpath->path.dev->mode; 1578 } 1579 1580 if (m->queue_io) 1581 r = -EAGAIN; 1582 else if (!bdev) 1583 r = -EIO; 1584 1585 spin_unlock_irqrestore(&m->lock, flags); 1586 1587 /* 1588 * Only pass ioctls through if the device sizes match exactly. 1589 */ 1590 if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) 1591 r = scsi_verify_blk_ioctl(NULL, cmd); 1592 1593 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); 1594} 1595 1596static int multipath_iterate_devices(struct dm_target *ti, 1597 iterate_devices_callout_fn fn, void *data) 1598{ 1599 struct multipath *m = ti->private; 1600 struct priority_group *pg; 1601 struct pgpath *p; 1602 int ret = 0; 1603 1604 list_for_each_entry(pg, &m->priority_groups, list) { 1605 list_for_each_entry(p, &pg->pgpaths, list) { 1606 ret = fn(ti, p->path.dev, ti->begin, ti->len, data); 1607 if (ret) 1608 goto out; 1609 } 1610 } 1611 1612out: 1613 return ret; 1614} 1615 1616static int __pgpath_busy(struct pgpath *pgpath) 1617{ 1618 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); 1619 1620 return dm_underlying_device_busy(q); 1621} 1622 1623/* 1624 * We return "busy", only when we can map I/Os but underlying devices 1625 * are busy (so even if we map I/Os now, the I/Os will wait on 1626 * the underlying queue). 1627 * In other words, if we want to kill I/Os or queue them inside us 1628 * due to map unavailability, we don't return "busy". Otherwise, 1629 * dm core won't give us the I/Os and we can't do what we want. 1630 */ 1631static int multipath_busy(struct dm_target *ti) 1632{ 1633 int busy = 0, has_active = 0; 1634 struct multipath *m = ti->private; 1635 struct priority_group *pg; 1636 struct pgpath *pgpath; 1637 unsigned long flags; 1638 1639 spin_lock_irqsave(&m->lock, flags); 1640 1641 /* Guess which priority_group will be used at next mapping time */ 1642 if (unlikely(!m->current_pgpath && m->next_pg)) 1643 pg = m->next_pg; 1644 else if (likely(m->current_pg)) 1645 pg = m->current_pg; 1646 else 1647 /* 1648 * We don't know which pg will be used at next mapping time. 1649 * We don't call __choose_pgpath() here to avoid to trigger 1650 * pg_init just by busy checking. 1651 * So we don't know whether underlying devices we will be using 1652 * at next mapping time are busy or not. Just try mapping. 1653 */ 1654 goto out; 1655 1656 /* 1657 * If there is one non-busy active path at least, the path selector 1658 * will be able to select it. So we consider such a pg as not busy. 1659 */ 1660 busy = 1; 1661 list_for_each_entry(pgpath, &pg->pgpaths, list) 1662 if (pgpath->is_active) { 1663 has_active = 1; 1664 1665 if (!__pgpath_busy(pgpath)) { 1666 busy = 0; 1667 break; 1668 } 1669 } 1670 1671 if (!has_active) 1672 /* 1673 * No active path in this pg, so this pg won't be used and 1674 * the current_pg will be changed at next mapping time. 1675 * We need to try mapping to determine it. 1676 */ 1677 busy = 0; 1678 1679out: 1680 spin_unlock_irqrestore(&m->lock, flags); 1681 1682 return busy; 1683} 1684 1685/*----------------------------------------------------------------- 1686 * Module setup 1687 *---------------------------------------------------------------*/ 1688static struct target_type multipath_target = { 1689 .name = "multipath", 1690 .version = {1, 3, 0}, 1691 .module = THIS_MODULE, 1692 .ctr = multipath_ctr, 1693 .dtr = multipath_dtr, 1694 .map_rq = multipath_map, 1695 .rq_end_io = multipath_end_io, 1696 .presuspend = multipath_presuspend, 1697 .postsuspend = multipath_postsuspend, 1698 .resume = multipath_resume, 1699 .status = multipath_status, 1700 .message = multipath_message, 1701 .ioctl = multipath_ioctl, 1702 .iterate_devices = multipath_iterate_devices, 1703 .busy = multipath_busy, 1704}; 1705 1706static int __init dm_multipath_init(void) 1707{ 1708 int r; 1709 1710 /* allocate a slab for the dm_ios */ 1711 _mpio_cache = KMEM_CACHE(dm_mpath_io, 0); 1712 if (!_mpio_cache) 1713 return -ENOMEM; 1714 1715 r = dm_register_target(&multipath_target); 1716 if (r < 0) { 1717 DMERR("register failed %d", r); 1718 kmem_cache_destroy(_mpio_cache); 1719 return -EINVAL; 1720 } 1721 1722 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); 1723 if (!kmultipathd) { 1724 DMERR("failed to create workqueue kmpathd"); 1725 dm_unregister_target(&multipath_target); 1726 kmem_cache_destroy(_mpio_cache); 1727 return -ENOMEM; 1728 } 1729 1730 /* 1731 * A separate workqueue is used to handle the device handlers 1732 * to avoid overloading existing workqueue. Overloading the 1733 * old workqueue would also create a bottleneck in the 1734 * path of the storage hardware device activation. 1735 */ 1736 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd", 1737 WQ_MEM_RECLAIM); 1738 if (!kmpath_handlerd) { 1739 DMERR("failed to create workqueue kmpath_handlerd"); 1740 destroy_workqueue(kmultipathd); 1741 dm_unregister_target(&multipath_target); 1742 kmem_cache_destroy(_mpio_cache); 1743 return -ENOMEM; 1744 } 1745 1746 DMINFO("version %u.%u.%u loaded", 1747 multipath_target.version[0], multipath_target.version[1], 1748 multipath_target.version[2]); 1749 1750 return r; 1751} 1752 1753static void __exit dm_multipath_exit(void) 1754{ 1755 destroy_workqueue(kmpath_handlerd); 1756 destroy_workqueue(kmultipathd); 1757 1758 dm_unregister_target(&multipath_target); 1759 kmem_cache_destroy(_mpio_cache); 1760} 1761 1762module_init(dm_multipath_init); 1763module_exit(dm_multipath_exit); 1764 1765MODULE_DESCRIPTION(DM_NAME " multipath target"); 1766MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>"); 1767MODULE_LICENSE("GPL");