/tests/zfs-tests/tests/functional/cli_root/zpool_add/add_nested_replacing_spare.ksh

https://github.com/adilger/zfs · Korn Shell · 110 lines · 54 code · 14 blank · 42 comment · 2 complexity · 507ed16790298fb813f8683cae50b8c0 MD5 · raw file

  1. #!/bin/ksh -p
  2. #
  3. # CDDL HEADER START
  4. #
  5. # The contents of this file are subject to the terms of the
  6. # Common Development and Distribution License (the "License").
  7. # You may not use this file except in compliance with the License.
  8. #
  9. # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  10. # or http://www.opensolaris.org/os/licensing.
  11. # See the License for the specific language governing permissions
  12. # and limitations under the License.
  13. #
  14. # When distributing Covered Code, include this CDDL HEADER in each
  15. # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  16. # If applicable, add the following below this CDDL HEADER, with the
  17. # fields enclosed by brackets "[]" replaced with your own identifying
  18. # information: Portions Copyright [yyyy] [name of copyright owner]
  19. #
  20. # CDDL HEADER END
  21. #
  22. #
  23. # Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
  24. #
  25. . $STF_SUITE/include/libtest.shlib
  26. #
  27. # DESCRIPTION:
  28. # 'zpool add' works with nested replacing/spare vdevs
  29. #
  30. # STRATEGY:
  31. # 1. Create a redundant pool with a spare device
  32. # 2. Manually fault a device, wait for the hot-spare and then replace it:
  33. # this creates a situation where replacing and spare vdevs are nested.
  34. # 3. Verify 'zpool add' is able to add new devices to the pool.
  35. #
  36. verify_runnable "global"
  37. function cleanup
  38. {
  39. zed_stop
  40. zed_cleanup
  41. log_must zinject -c all
  42. destroy_pool $TESTPOOL
  43. log_must rm -f $DATA_DEVS $SPARE_DEVS
  44. }
  45. log_assert "'zpool add' works with nested replacing/spare vdevs"
  46. log_onexit cleanup
  47. FAULT_DEV="$TEST_BASE_DIR/fault-dev"
  48. SAFE_DEV1="$TEST_BASE_DIR/safe-dev1"
  49. SAFE_DEV2="$TEST_BASE_DIR/safe-dev2"
  50. SAFE_DEV3="$TEST_BASE_DIR/safe-dev3"
  51. SAFE_DEVS="$SAFE_DEV1 $SAFE_DEV2 $SAFE_DEV3"
  52. REPLACE_DEV="$TEST_BASE_DIR/replace-dev"
  53. ADD_DEV="$TEST_BASE_DIR/add-dev"
  54. DATA_DEVS="$FAULT_DEV $SAFE_DEVS $REPLACE_DEV $ADD_DEV"
  55. SPARE_DEV1="$TEST_BASE_DIR/spare-dev1"
  56. SPARE_DEV2="$TEST_BASE_DIR/spare-dev2"
  57. SPARE_DEVS="$SPARE_DEV1 $SPARE_DEV2"
  58. # We need ZED running to work with spares
  59. zed_setup
  60. zed_start
  61. # Clear events from previous runs
  62. zed_events_drain
  63. for type in "mirror" "raidz1" "raidz2" "raidz3"
  64. do
  65. # 1. Create a redundant pool with a spare device
  66. truncate -s $SPA_MINDEVSIZE $DATA_DEVS $SPARE_DEVS
  67. log_must zpool create $TESTPOOL $type $FAULT_DEV $SAFE_DEVS
  68. log_must zpool add $TESTPOOL spare $SPARE_DEV1
  69. # 2.1 Fault a device, verify the spare is kicked in
  70. log_must zinject -d $FAULT_DEV -e nxio -T all -f 100 $TESTPOOL
  71. log_must zpool reopen $TESTPOOL
  72. log_must wait_vdev_state $TESTPOOL $FAULT_DEV "UNAVAIL" 60
  73. log_must wait_vdev_state $TESTPOOL $SPARE_DEV1 "ONLINE" 60
  74. log_must wait_hotspare_state $TESTPOOL $SPARE_DEV1 "INUSE"
  75. log_must check_state $TESTPOOL "" "DEGRADED"
  76. # 2.2 Replace the faulted device: this creates a replacing vdev inside a
  77. # spare vdev
  78. log_must zpool replace $TESTPOOL $FAULT_DEV $REPLACE_DEV
  79. log_must wait_vdev_state $TESTPOOL $REPLACE_DEV "ONLINE" 60
  80. zpool status | awk -v poolname="$TESTPOOL" -v type="$type" 'BEGIN {s=""}
  81. $1 ~ poolname {c=4}; (c && c--) { s=s$1":" }
  82. END { if (s != poolname":"type"-0:spare-0:replacing-0:") exit 1; }'
  83. if [[ $? -ne 0 ]]; then
  84. log_fail "Pool does not contain nested replacing/spare vdevs"
  85. fi
  86. # 3. Verify 'zpool add' is able to add new devices
  87. log_must zpool add $TESTPOOL spare $SPARE_DEV2
  88. log_must wait_hotspare_state $TESTPOOL $SPARE_DEV2 "AVAIL"
  89. log_must zpool add -f $TESTPOOL $ADD_DEV
  90. log_must wait_vdev_state $TESTPOOL $ADD_DEV "ONLINE" 60
  91. # Cleanup
  92. log_must zinject -c all
  93. destroy_pool $TESTPOOL
  94. log_must rm -f $DATA_DEVS $SPARE_DEVS
  95. done
  96. log_pass "'zpool add' works with nested replacing/spare vdevs"