PageRenderTime 29ms CodeModel.GetById 13ms RepoModel.GetById 0ms app.codeStats 1ms

/tests/zfs-tests/include/libtest.shlib

http://github.com/zfsonlinux/zfs
Korn Shell | 2646 lines | 1670 code | 353 blank | 623 comment | 310 complexity | 02b9131f0868341d38030a95ff9fc876 MD5 | raw file
Possible License(s): Apache-2.0, MPL-2.0-no-copyleft-exception

Large files files are truncated, but you can click here to view the full file

  1. #!/bin/ksh -p
  2. #
  3. # CDDL HEADER START
  4. #
  5. # The contents of this file are subject to the terms of the
  6. # Common Development and Distribution License (the "License").
  7. # You may not use this file except in compliance with the License.
  8. #
  9. # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  10. # or http://www.opensolaris.org/os/licensing.
  11. # See the License for the specific language governing permissions
  12. # and limitations under the License.
  13. #
  14. # When distributing Covered Code, include this CDDL HEADER in each
  15. # file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  16. # If applicable, add the following below this CDDL HEADER, with the
  17. # fields enclosed by brackets "[]" replaced with your own identifying
  18. # information: Portions Copyright [yyyy] [name of copyright owner]
  19. #
  20. # CDDL HEADER END
  21. #
  22. #
  23. # Copyright 2009 Sun Microsystems, Inc. All rights reserved.
  24. # Use is subject to license terms.
  25. #
  26. #
  27. # Copyright (c) 2012, 2015 by Delphix. All rights reserved.
  28. #
  29. . ${STF_TOOLS}/include/logapi.shlib
  30. # Determine if this is a Linux test system
  31. #
  32. # Return 0 if platform Linux, 1 if otherwise
  33. function is_linux
  34. {
  35. if [[ $($UNAME -o) == "GNU/Linux" ]]; then
  36. return 0
  37. else
  38. return 1
  39. fi
  40. }
  41. # Determine whether a dataset is mounted
  42. #
  43. # $1 dataset name
  44. # $2 filesystem type; optional - defaulted to zfs
  45. #
  46. # Return 0 if dataset is mounted; 1 if unmounted; 2 on error
  47. function ismounted
  48. {
  49. typeset fstype=$2
  50. [[ -z $fstype ]] && fstype=zfs
  51. typeset out dir name ret
  52. case $fstype in
  53. zfs)
  54. if [[ "$1" == "/"* ]] ; then
  55. for out in $($ZFS mount | $AWK '{print $2}'); do
  56. [[ $1 == $out ]] && return 0
  57. done
  58. else
  59. for out in $($ZFS mount | $AWK '{print $1}'); do
  60. [[ $1 == $out ]] && return 0
  61. done
  62. fi
  63. ;;
  64. ufs|nfs)
  65. out=$($DF -F $fstype $1 2>/dev/null)
  66. ret=$?
  67. (($ret != 0)) && return $ret
  68. dir=${out%%\(*}
  69. dir=${dir%% *}
  70. name=${out##*\(}
  71. name=${name%%\)*}
  72. name=${name%% *}
  73. [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
  74. ;;
  75. ext2)
  76. out=$($DF -t $fstype $1 2>/dev/null)
  77. return $?
  78. ;;
  79. zvol)
  80. if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
  81. link=$(readlink -f $ZVOL_DEVDIR/$1)
  82. [[ -n "$link" ]] && \
  83. $MOUNT | $GREP -q "^$link" && \
  84. return 0
  85. fi
  86. ;;
  87. esac
  88. return 1
  89. }
  90. # Return 0 if a dataset is mounted; 1 otherwise
  91. #
  92. # $1 dataset name
  93. # $2 filesystem type; optional - defaulted to zfs
  94. function mounted
  95. {
  96. ismounted $1 $2
  97. (($? == 0)) && return 0
  98. return 1
  99. }
  100. # Return 0 if a dataset is unmounted; 1 otherwise
  101. #
  102. # $1 dataset name
  103. # $2 filesystem type; optional - defaulted to zfs
  104. function unmounted
  105. {
  106. ismounted $1 $2
  107. (($? == 1)) && return 0
  108. return 1
  109. }
  110. # split line on ","
  111. #
  112. # $1 - line to split
  113. function splitline
  114. {
  115. $ECHO $1 | $SED "s/,/ /g"
  116. }
  117. function default_setup
  118. {
  119. default_setup_noexit "$@"
  120. log_pass
  121. }
  122. #
  123. # Given a list of disks, setup storage pools and datasets.
  124. #
  125. function default_setup_noexit
  126. {
  127. typeset disklist=$1
  128. typeset container=$2
  129. typeset volume=$3
  130. if is_global_zone; then
  131. if poolexists $TESTPOOL ; then
  132. destroy_pool $TESTPOOL
  133. fi
  134. [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
  135. log_must $ZPOOL create -f $TESTPOOL $disklist
  136. else
  137. reexport_pool
  138. fi
  139. $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
  140. $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR
  141. log_must $ZFS create $TESTPOOL/$TESTFS
  142. log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
  143. if [[ -n $container ]]; then
  144. $RM -rf $TESTDIR1 || \
  145. log_unresolved Could not remove $TESTDIR1
  146. $MKDIR -p $TESTDIR1 || \
  147. log_unresolved Could not create $TESTDIR1
  148. log_must $ZFS create $TESTPOOL/$TESTCTR
  149. log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR
  150. log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1
  151. log_must $ZFS set mountpoint=$TESTDIR1 \
  152. $TESTPOOL/$TESTCTR/$TESTFS1
  153. fi
  154. if [[ -n $volume ]]; then
  155. if is_global_zone ; then
  156. log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL
  157. block_device_wait
  158. else
  159. log_must $ZFS create $TESTPOOL/$TESTVOL
  160. fi
  161. fi
  162. }
  163. #
  164. # Given a list of disks, setup a storage pool, file system and
  165. # a container.
  166. #
  167. function default_container_setup
  168. {
  169. typeset disklist=$1
  170. default_setup "$disklist" "true"
  171. }
  172. #
  173. # Given a list of disks, setup a storage pool,file system
  174. # and a volume.
  175. #
  176. function default_volume_setup
  177. {
  178. typeset disklist=$1
  179. default_setup "$disklist" "" "true"
  180. }
  181. #
  182. # Given a list of disks, setup a storage pool,file system,
  183. # a container and a volume.
  184. #
  185. function default_container_volume_setup
  186. {
  187. typeset disklist=$1
  188. default_setup "$disklist" "true" "true"
  189. }
  190. #
  191. # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
  192. # filesystem
  193. #
  194. # $1 Existing filesystem or volume name. Default, $TESTFS
  195. # $2 snapshot name. Default, $TESTSNAP
  196. #
  197. function create_snapshot
  198. {
  199. typeset fs_vol=${1:-$TESTFS}
  200. typeset snap=${2:-$TESTSNAP}
  201. [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
  202. [[ -z $snap ]] && log_fail "Snapshot's name is undefined."
  203. if snapexists $fs_vol@$snap; then
  204. log_fail "$fs_vol@$snap already exists."
  205. fi
  206. datasetexists $fs_vol || \
  207. log_fail "$fs_vol must exist."
  208. log_must $ZFS snapshot $fs_vol@$snap
  209. }
  210. #
  211. # Create a clone from a snapshot, default clone name is $TESTCLONE.
  212. #
  213. # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
  214. # $2 Clone name, $TESTPOOL/$TESTCLONE is default.
  215. #
  216. function create_clone # snapshot clone
  217. {
  218. typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
  219. typeset clone=${2:-$TESTPOOL/$TESTCLONE}
  220. [[ -z $snap ]] && \
  221. log_fail "Snapshot name is undefined."
  222. [[ -z $clone ]] && \
  223. log_fail "Clone name is undefined."
  224. log_must $ZFS clone $snap $clone
  225. }
  226. function default_mirror_setup
  227. {
  228. default_mirror_setup_noexit $1 $2 $3
  229. log_pass
  230. }
  231. #
  232. # Given a pair of disks, set up a storage pool and dataset for the mirror
  233. # @parameters: $1 the primary side of the mirror
  234. # $2 the secondary side of the mirror
  235. # @uses: ZPOOL ZFS TESTPOOL TESTFS
  236. function default_mirror_setup_noexit
  237. {
  238. readonly func="default_mirror_setup_noexit"
  239. typeset primary=$1
  240. typeset secondary=$2
  241. [[ -z $primary ]] && \
  242. log_fail "$func: No parameters passed"
  243. [[ -z $secondary ]] && \
  244. log_fail "$func: No secondary partition passed"
  245. [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
  246. log_must $ZPOOL create -f $TESTPOOL mirror $@
  247. log_must $ZFS create $TESTPOOL/$TESTFS
  248. log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
  249. }
  250. #
  251. # create a number of mirrors.
  252. # We create a number($1) of 2 way mirrors using the pairs of disks named
  253. # on the command line. These mirrors are *not* mounted
  254. # @parameters: $1 the number of mirrors to create
  255. # $... the devices to use to create the mirrors on
  256. # @uses: ZPOOL ZFS TESTPOOL
  257. function setup_mirrors
  258. {
  259. typeset -i nmirrors=$1
  260. shift
  261. while ((nmirrors > 0)); do
  262. log_must test -n "$1" -a -n "$2"
  263. [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors
  264. log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2
  265. shift 2
  266. ((nmirrors = nmirrors - 1))
  267. done
  268. }
  269. #
  270. # create a number of raidz pools.
  271. # We create a number($1) of 2 raidz pools using the pairs of disks named
  272. # on the command line. These pools are *not* mounted
  273. # @parameters: $1 the number of pools to create
  274. # $... the devices to use to create the pools on
  275. # @uses: ZPOOL ZFS TESTPOOL
  276. function setup_raidzs
  277. {
  278. typeset -i nraidzs=$1
  279. shift
  280. while ((nraidzs > 0)); do
  281. log_must test -n "$1" -a -n "$2"
  282. [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs
  283. log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2
  284. shift 2
  285. ((nraidzs = nraidzs - 1))
  286. done
  287. }
  288. #
  289. # Destroy the configured testpool mirrors.
  290. # the mirrors are of the form ${TESTPOOL}{number}
  291. # @uses: ZPOOL ZFS TESTPOOL
  292. function destroy_mirrors
  293. {
  294. default_cleanup_noexit
  295. log_pass
  296. }
  297. #
  298. # Given a minimum of two disks, set up a storage pool and dataset for the raid-z
  299. # $1 the list of disks
  300. #
  301. function default_raidz_setup
  302. {
  303. typeset disklist="$*"
  304. disks=(${disklist[*]})
  305. if [[ ${#disks[*]} -lt 2 ]]; then
  306. log_fail "A raid-z requires a minimum of two disks."
  307. fi
  308. [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL
  309. log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3
  310. log_must $ZFS create $TESTPOOL/$TESTFS
  311. log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
  312. log_pass
  313. }
  314. #
  315. # Common function used to cleanup storage pools and datasets.
  316. #
  317. # Invoked at the start of the test suite to ensure the system
  318. # is in a known state, and also at the end of each set of
  319. # sub-tests to ensure errors from one set of tests doesn't
  320. # impact the execution of the next set.
  321. function default_cleanup
  322. {
  323. default_cleanup_noexit
  324. log_pass
  325. }
  326. function default_cleanup_noexit
  327. {
  328. typeset exclude=""
  329. typeset pool=""
  330. #
  331. # Destroying the pool will also destroy any
  332. # filesystems it contains.
  333. #
  334. if is_global_zone; then
  335. $ZFS unmount -a > /dev/null 2>&1
  336. [[ -z "$KEEP" ]] && KEEP="rpool"
  337. exclude=`eval $ECHO \"'(${KEEP})'\"`
  338. ALL_POOLS=$($ZPOOL list -H -o name \
  339. | $GREP -v "$NO_POOLS" | $EGREP -v "$exclude")
  340. # Here, we loop through the pools we're allowed to
  341. # destroy, only destroying them if it's safe to do
  342. # so.
  343. while [ ! -z ${ALL_POOLS} ]
  344. do
  345. for pool in ${ALL_POOLS}
  346. do
  347. if safe_to_destroy_pool $pool ;
  348. then
  349. destroy_pool $pool
  350. fi
  351. ALL_POOLS=$($ZPOOL list -H -o name \
  352. | $GREP -v "$NO_POOLS" \
  353. | $EGREP -v "$exclude")
  354. done
  355. done
  356. $ZFS mount -a
  357. else
  358. typeset fs=""
  359. for fs in $($ZFS list -H -o name \
  360. | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
  361. datasetexists $fs && \
  362. log_must $ZFS destroy -Rf $fs
  363. done
  364. # Need cleanup here to avoid garbage dir left.
  365. for fs in $($ZFS list -H -o name); do
  366. [[ $fs == /$ZONE_POOL ]] && continue
  367. [[ -d $fs ]] && log_must $RM -rf $fs/*
  368. done
  369. #
  370. # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
  371. # the default value
  372. #
  373. for fs in $($ZFS list -H -o name); do
  374. if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
  375. log_must $ZFS set reservation=none $fs
  376. log_must $ZFS set recordsize=128K $fs
  377. log_must $ZFS set mountpoint=/$fs $fs
  378. typeset enc=""
  379. enc=$(get_prop encryption $fs)
  380. if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
  381. [[ "$enc" == "off" ]]; then
  382. log_must $ZFS set checksum=on $fs
  383. fi
  384. log_must $ZFS set compression=off $fs
  385. log_must $ZFS set atime=on $fs
  386. log_must $ZFS set devices=off $fs
  387. log_must $ZFS set exec=on $fs
  388. log_must $ZFS set setuid=on $fs
  389. log_must $ZFS set readonly=off $fs
  390. log_must $ZFS set snapdir=hidden $fs
  391. log_must $ZFS set aclmode=groupmask $fs
  392. log_must $ZFS set aclinherit=secure $fs
  393. fi
  394. done
  395. fi
  396. [[ -d $TESTDIR ]] && \
  397. log_must $RM -rf $TESTDIR
  398. }
  399. #
  400. # Common function used to cleanup storage pools, file systems
  401. # and containers.
  402. #
  403. function default_container_cleanup
  404. {
  405. if ! is_global_zone; then
  406. reexport_pool
  407. fi
  408. ismounted $TESTPOOL/$TESTCTR/$TESTFS1
  409. [[ $? -eq 0 ]] && \
  410. log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1
  411. datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \
  412. log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1
  413. datasetexists $TESTPOOL/$TESTCTR && \
  414. log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR
  415. [[ -e $TESTDIR1 ]] && \
  416. log_must $RM -rf $TESTDIR1 > /dev/null 2>&1
  417. default_cleanup
  418. }
  419. #
  420. # Common function used to cleanup snapshot of file system or volume. Default to
  421. # delete the file system's snapshot
  422. #
  423. # $1 snapshot name
  424. #
  425. function destroy_snapshot
  426. {
  427. typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
  428. if ! snapexists $snap; then
  429. log_fail "'$snap' does not existed."
  430. fi
  431. #
  432. # For the sake of the value which come from 'get_prop' is not equal
  433. # to the really mountpoint when the snapshot is unmounted. So, firstly
  434. # check and make sure this snapshot's been mounted in current system.
  435. #
  436. typeset mtpt=""
  437. if ismounted $snap; then
  438. mtpt=$(get_prop mountpoint $snap)
  439. (($? != 0)) && \
  440. log_fail "get_prop mountpoint $snap failed."
  441. fi
  442. log_must $ZFS destroy $snap
  443. [[ $mtpt != "" && -d $mtpt ]] && \
  444. log_must $RM -rf $mtpt
  445. }
  446. #
  447. # Common function used to cleanup clone.
  448. #
  449. # $1 clone name
  450. #
  451. function destroy_clone
  452. {
  453. typeset clone=${1:-$TESTPOOL/$TESTCLONE}
  454. if ! datasetexists $clone; then
  455. log_fail "'$clone' does not existed."
  456. fi
  457. # With the same reason in destroy_snapshot
  458. typeset mtpt=""
  459. if ismounted $clone; then
  460. mtpt=$(get_prop mountpoint $clone)
  461. (($? != 0)) && \
  462. log_fail "get_prop mountpoint $clone failed."
  463. fi
  464. log_must $ZFS destroy $clone
  465. [[ $mtpt != "" && -d $mtpt ]] && \
  466. log_must $RM -rf $mtpt
  467. }
  468. # Return 0 if a snapshot exists; $? otherwise
  469. #
  470. # $1 - snapshot name
  471. function snapexists
  472. {
  473. $ZFS list -H -t snapshot "$1" > /dev/null 2>&1
  474. return $?
  475. }
  476. #
  477. # Set a property to a certain value on a dataset.
  478. # Sets a property of the dataset to the value as passed in.
  479. # @param:
  480. # $1 dataset who's property is being set
  481. # $2 property to set
  482. # $3 value to set property to
  483. # @return:
  484. # 0 if the property could be set.
  485. # non-zero otherwise.
  486. # @use: ZFS
  487. #
  488. function dataset_setprop
  489. {
  490. typeset fn=dataset_setprop
  491. if (($# < 3)); then
  492. log_note "$fn: Insufficient parameters (need 3, had $#)"
  493. return 1
  494. fi
  495. typeset output=
  496. output=$($ZFS set $2=$3 $1 2>&1)
  497. typeset rv=$?
  498. if ((rv != 0)); then
  499. log_note "Setting property on $1 failed."
  500. log_note "property $2=$3"
  501. log_note "Return Code: $rv"
  502. log_note "Output: $output"
  503. return $rv
  504. fi
  505. return 0
  506. }
  507. #
  508. # Assign suite defined dataset properties.
  509. # This function is used to apply the suite's defined default set of
  510. # properties to a dataset.
  511. # @parameters: $1 dataset to use
  512. # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
  513. # @returns:
  514. # 0 if the dataset has been altered.
  515. # 1 if no pool name was passed in.
  516. # 2 if the dataset could not be found.
  517. # 3 if the dataset could not have it's properties set.
  518. #
  519. function dataset_set_defaultproperties
  520. {
  521. typeset dataset="$1"
  522. [[ -z $dataset ]] && return 1
  523. typeset confset=
  524. typeset -i found=0
  525. for confset in $($ZFS list); do
  526. if [[ $dataset = $confset ]]; then
  527. found=1
  528. break
  529. fi
  530. done
  531. [[ $found -eq 0 ]] && return 2
  532. if [[ -n $COMPRESSION_PROP ]]; then
  533. dataset_setprop $dataset compression $COMPRESSION_PROP || \
  534. return 3
  535. log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
  536. fi
  537. if [[ -n $CHECKSUM_PROP ]]; then
  538. dataset_setprop $dataset checksum $CHECKSUM_PROP || \
  539. return 3
  540. log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
  541. fi
  542. return 0
  543. }
  544. #
  545. # Check a numeric assertion
  546. # @parameter: $@ the assertion to check
  547. # @output: big loud notice if assertion failed
  548. # @use: log_fail
  549. #
  550. function assert
  551. {
  552. (($@)) || log_fail "$@"
  553. }
  554. #
  555. # Function to format partition size of a disk
  556. # Given a disk cxtxdx reduces all partitions
  557. # to 0 size
  558. #
  559. function zero_partitions #<whole_disk_name>
  560. {
  561. typeset diskname=$1
  562. typeset i
  563. if is_linux; then
  564. log_must $FORMAT $DEV_DSKDIR/$diskname -s -- mklabel gpt
  565. else
  566. for i in 0 1 3 4 5 6 7
  567. do
  568. set_partition $i "" 0mb $diskname
  569. done
  570. fi
  571. }
  572. #
  573. # Given a slice, size and disk, this function
  574. # formats the slice to the specified size.
  575. # Size should be specified with units as per
  576. # the `format` command requirements eg. 100mb 3gb
  577. #
  578. # NOTE: This entire interface is problematic for the Linux parted utilty
  579. # which requires the end of the partition to be specified. It would be
  580. # best to retire this interface and replace it with something more flexible.
  581. # At the moment a best effort is made.
  582. #
  583. function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
  584. {
  585. typeset -i slicenum=$1
  586. typeset start=$2
  587. typeset size=$3
  588. typeset disk=$4
  589. [[ -z $slicenum || -z $size || -z $disk ]] && \
  590. log_fail "The slice, size or disk name is unspecified."
  591. if is_linux; then
  592. typeset size_mb=${size%%[mMgG]}
  593. size_mb=${size_mb%%[mMgG][bB]}
  594. if [[ ${size:1:1} == 'g' ]]; then
  595. ((size_mb = size_mb * 1024))
  596. fi
  597. # Create GPT partition table when setting slice 0 or
  598. # when the device doesn't already contain a GPT label.
  599. $FORMAT $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
  600. typeset ret_val=$?
  601. if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
  602. log_must $FORMAT $DEV_DSKDIR/$disk -s -- mklabel gpt
  603. fi
  604. # When no start is given align on the first cylinder.
  605. if [[ -z "$start" ]]; then
  606. start=1
  607. fi
  608. # Determine the cylinder size for the device and using
  609. # that calculate the end offset in cylinders.
  610. typeset -i cly_size_kb=0
  611. cly_size_kb=$($FORMAT -m $DEV_DSKDIR/$disk -s -- \
  612. unit cyl print | $HEAD -3 | $TAIL -1 | \
  613. $AWK -F '[:k.]' '{print $4}')
  614. ((end = (size_mb * 1024 / cly_size_kb) + start))
  615. log_must $FORMAT $DEV_DSKDIR/$disk -s -- \
  616. mkpart part$slicenum ${start}cyl ${end}cyl
  617. $BLOCKDEV --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
  618. block_device_wait
  619. else
  620. typeset format_file=/var/tmp/format_in.$$
  621. $ECHO "partition" >$format_file
  622. $ECHO "$slicenum" >> $format_file
  623. $ECHO "" >> $format_file
  624. $ECHO "" >> $format_file
  625. $ECHO "$start" >> $format_file
  626. $ECHO "$size" >> $format_file
  627. $ECHO "label" >> $format_file
  628. $ECHO "" >> $format_file
  629. $ECHO "q" >> $format_file
  630. $ECHO "q" >> $format_file
  631. $FORMAT -e -s -d $disk -f $format_file
  632. fi
  633. typeset ret_val=$?
  634. $RM -f $format_file
  635. [[ $ret_val -ne 0 ]] && \
  636. log_fail "Unable to format $disk slice $slicenum to $size"
  637. return 0
  638. }
  639. #
  640. # Get the end cyl of the given slice
  641. #
  642. function get_endslice #<disk> <slice>
  643. {
  644. typeset disk=$1
  645. typeset slice=$2
  646. if [[ -z $disk || -z $slice ]] ; then
  647. log_fail "The disk name or slice number is unspecified."
  648. fi
  649. if is_linux; then
  650. endcyl=$($FORMAT -s $DEV_DSKDIR/$disk -- unit cyl print | \
  651. $GREP "part${slice}" | \
  652. $AWK '{print $3}' | \
  653. $SED 's,cyl,,')
  654. ((endcyl = (endcyl + 1)))
  655. else
  656. disk=${disk#/dev/dsk/}
  657. disk=${disk#/dev/rdsk/}
  658. disk=${disk%s*}
  659. typeset -i ratio=0
  660. ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \
  661. $GREP "sectors\/cylinder" | \
  662. $AWK '{print $2}')
  663. if ((ratio == 0)); then
  664. return
  665. fi
  666. typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 |
  667. $NAWK -v token="$slice" '{if ($1==token) print $6}')
  668. ((endcyl = (endcyl + 1) / ratio))
  669. fi
  670. echo $endcyl
  671. }
  672. #
  673. # Given a size,disk and total slice number, this function formats the
  674. # disk slices from 0 to the total slice number with the same specified
  675. # size.
  676. #
  677. function partition_disk #<slice_size> <whole_disk_name> <total_slices>
  678. {
  679. typeset -i i=0
  680. typeset slice_size=$1
  681. typeset disk_name=$2
  682. typeset total_slices=$3
  683. typeset cyl
  684. zero_partitions $disk_name
  685. while ((i < $total_slices)); do
  686. if ! is_linux; then
  687. if ((i == 2)); then
  688. ((i = i + 1))
  689. continue
  690. fi
  691. fi
  692. set_partition $i "$cyl" $slice_size $disk_name
  693. cyl=$(get_endslice $disk_name $i)
  694. ((i = i+1))
  695. done
  696. }
  697. #
  698. # This function continues to write to a filenum number of files into dirnum
  699. # number of directories until either $FILE_WRITE returns an error or the
  700. # maximum number of files per directory have been written.
  701. #
  702. # Usage:
  703. # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
  704. #
  705. # Return value: 0 on success
  706. # non 0 on error
  707. #
  708. # Where :
  709. # destdir: is the directory where everything is to be created under
  710. # dirnum: the maximum number of subdirectories to use, -1 no limit
  711. # filenum: the maximum number of files per subdirectory
  712. # bytes: number of bytes to write
  713. # num_writes: numer of types to write out bytes
  714. # data: the data that will be writen
  715. #
  716. # E.g.
  717. # file_fs /testdir 20 25 1024 256 0
  718. #
  719. # Note: bytes * num_writes equals the size of the testfile
  720. #
  721. function fill_fs # destdir dirnum filenum bytes num_writes data
  722. {
  723. typeset destdir=${1:-$TESTDIR}
  724. typeset -i dirnum=${2:-50}
  725. typeset -i filenum=${3:-50}
  726. typeset -i bytes=${4:-8192}
  727. typeset -i num_writes=${5:-10240}
  728. typeset -i data=${6:-0}
  729. typeset -i odirnum=1
  730. typeset -i idirnum=0
  731. typeset -i fn=0
  732. typeset -i retval=0
  733. log_must $MKDIR -p $destdir/$idirnum
  734. while (($odirnum > 0)); do
  735. if ((dirnum >= 0 && idirnum >= dirnum)); then
  736. odirnum=0
  737. break
  738. fi
  739. $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \
  740. -b $bytes -c $num_writes -d $data
  741. retval=$?
  742. if (($retval != 0)); then
  743. odirnum=0
  744. break
  745. fi
  746. if (($fn >= $filenum)); then
  747. fn=0
  748. ((idirnum = idirnum + 1))
  749. log_must $MKDIR -p $destdir/$idirnum
  750. else
  751. ((fn = fn + 1))
  752. fi
  753. done
  754. return $retval
  755. }
  756. #
  757. # Simple function to get the specified property. If unable to
  758. # get the property then exits.
  759. #
  760. # Note property is in 'parsable' format (-p)
  761. #
  762. function get_prop # property dataset
  763. {
  764. typeset prop_val
  765. typeset prop=$1
  766. typeset dataset=$2
  767. prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null)
  768. if [[ $? -ne 0 ]]; then
  769. log_note "Unable to get $prop property for dataset " \
  770. "$dataset"
  771. return 1
  772. fi
  773. $ECHO $prop_val
  774. return 0
  775. }
  776. #
  777. # Simple function to get the specified property of pool. If unable to
  778. # get the property then exits.
  779. #
  780. function get_pool_prop # property pool
  781. {
  782. typeset prop_val
  783. typeset prop=$1
  784. typeset pool=$2
  785. if poolexists $pool ; then
  786. prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \
  787. $AWK '{print $3}')
  788. if [[ $? -ne 0 ]]; then
  789. log_note "Unable to get $prop property for pool " \
  790. "$pool"
  791. return 1
  792. fi
  793. else
  794. log_note "Pool $pool not exists."
  795. return 1
  796. fi
  797. $ECHO $prop_val
  798. return 0
  799. }
  800. # Return 0 if a pool exists; $? otherwise
  801. #
  802. # $1 - pool name
  803. function poolexists
  804. {
  805. typeset pool=$1
  806. if [[ -z $pool ]]; then
  807. log_note "No pool name given."
  808. return 1
  809. fi
  810. $ZPOOL get name "$pool" > /dev/null 2>&1
  811. return $?
  812. }
  813. # Return 0 if all the specified datasets exist; $? otherwise
  814. #
  815. # $1-n dataset name
  816. function datasetexists
  817. {
  818. if (($# == 0)); then
  819. log_note "No dataset name given."
  820. return 1
  821. fi
  822. while (($# > 0)); do
  823. $ZFS get name $1 > /dev/null 2>&1 || \
  824. return $?
  825. shift
  826. done
  827. return 0
  828. }
  829. # return 0 if none of the specified datasets exists, otherwise return 1.
  830. #
  831. # $1-n dataset name
  832. function datasetnonexists
  833. {
  834. if (($# == 0)); then
  835. log_note "No dataset name given."
  836. return 1
  837. fi
  838. while (($# > 0)); do
  839. $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
  840. && return 1
  841. shift
  842. done
  843. return 0
  844. }
  845. #
  846. # Given a mountpoint, or a dataset name, determine if it is shared.
  847. #
  848. # Returns 0 if shared, 1 otherwise.
  849. #
  850. function is_shared
  851. {
  852. typeset fs=$1
  853. typeset mtpt
  854. if is_linux; then
  855. log_unsupported "Currently unsupported by the test framework"
  856. return 1
  857. fi
  858. if [[ $fs != "/"* ]] ; then
  859. if datasetnonexists "$fs" ; then
  860. return 1
  861. else
  862. mtpt=$(get_prop mountpoint "$fs")
  863. case $mtpt in
  864. none|legacy|-) return 1
  865. ;;
  866. *) fs=$mtpt
  867. ;;
  868. esac
  869. fi
  870. fi
  871. for mtpt in `$SHARE | $AWK '{print $2}'` ; do
  872. if [[ $mtpt == $fs ]] ; then
  873. return 0
  874. fi
  875. done
  876. typeset stat=$($SVCS -H -o STA nfs/server:default)
  877. if [[ $stat != "ON" ]]; then
  878. log_note "Current nfs/server status: $stat"
  879. fi
  880. return 1
  881. }
  882. #
  883. # Given a mountpoint, determine if it is not shared.
  884. #
  885. # Returns 0 if not shared, 1 otherwise.
  886. #
  887. function not_shared
  888. {
  889. typeset fs=$1
  890. if is_linux; then
  891. log_unsupported "Currently unsupported by the test framework"
  892. return 1
  893. fi
  894. is_shared $fs
  895. if (($? == 0)); then
  896. return 1
  897. fi
  898. return 0
  899. }
  900. #
  901. # Helper function to unshare a mountpoint.
  902. #
  903. function unshare_fs #fs
  904. {
  905. typeset fs=$1
  906. if is_linux; then
  907. log_unsupported "Currently unsupported by the test framework"
  908. return 1
  909. fi
  910. is_shared $fs
  911. if (($? == 0)); then
  912. log_must $ZFS unshare $fs
  913. fi
  914. return 0
  915. }
  916. #
  917. # Check NFS server status and trigger it online.
  918. #
  919. function setup_nfs_server
  920. {
  921. # Cannot share directory in non-global zone.
  922. #
  923. if ! is_global_zone; then
  924. log_note "Cannot trigger NFS server by sharing in LZ."
  925. return
  926. fi
  927. if is_linux; then
  928. log_unsupported "Currently unsupported by the test framework"
  929. return
  930. fi
  931. typeset nfs_fmri="svc:/network/nfs/server:default"
  932. if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then
  933. #
  934. # Only really sharing operation can enable NFS server
  935. # to online permanently.
  936. #
  937. typeset dummy=/tmp/dummy
  938. if [[ -d $dummy ]]; then
  939. log_must $RM -rf $dummy
  940. fi
  941. log_must $MKDIR $dummy
  942. log_must $SHARE $dummy
  943. #
  944. # Waiting for fmri's status to be the final status.
  945. # Otherwise, in transition, an asterisk (*) is appended for
  946. # instances, unshare will reverse status to 'DIS' again.
  947. #
  948. # Waiting for 1's at least.
  949. #
  950. log_must $SLEEP 1
  951. timeout=10
  952. while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]]
  953. do
  954. log_must $SLEEP 1
  955. ((timeout -= 1))
  956. done
  957. log_must $UNSHARE $dummy
  958. log_must $RM -rf $dummy
  959. fi
  960. log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'"
  961. }
  962. #
  963. # To verify whether calling process is in global zone
  964. #
  965. # Return 0 if in global zone, 1 in non-global zone
  966. #
  967. function is_global_zone
  968. {
  969. typeset cur_zone=$($ZONENAME 2>/dev/null)
  970. if [[ $cur_zone != "global" ]]; then
  971. return 1
  972. fi
  973. return 0
  974. }
  975. #
  976. # Verify whether test is permitted to run from
  977. # global zone, local zone, or both
  978. #
  979. # $1 zone limit, could be "global", "local", or "both"(no limit)
  980. #
  981. # Return 0 if permitted, otherwise exit with log_unsupported
  982. #
  983. function verify_runnable # zone limit
  984. {
  985. typeset limit=$1
  986. [[ -z $limit ]] && return 0
  987. if is_global_zone ; then
  988. case $limit in
  989. global|both)
  990. ;;
  991. local) log_unsupported "Test is unable to run from "\
  992. "global zone."
  993. ;;
  994. *) log_note "Warning: unknown limit $limit - " \
  995. "use both."
  996. ;;
  997. esac
  998. else
  999. case $limit in
  1000. local|both)
  1001. ;;
  1002. global) log_unsupported "Test is unable to run from "\
  1003. "local zone."
  1004. ;;
  1005. *) log_note "Warning: unknown limit $limit - " \
  1006. "use both."
  1007. ;;
  1008. esac
  1009. reexport_pool
  1010. fi
  1011. return 0
  1012. }
  1013. # Return 0 if create successfully or the pool exists; $? otherwise
  1014. # Note: In local zones, this function should return 0 silently.
  1015. #
  1016. # $1 - pool name
  1017. # $2-n - [keyword] devs_list
  1018. function create_pool #pool devs_list
  1019. {
  1020. typeset pool=${1%%/*}
  1021. shift
  1022. if [[ -z $pool ]]; then
  1023. log_note "Missing pool name."
  1024. return 1
  1025. fi
  1026. if poolexists $pool ; then
  1027. destroy_pool $pool
  1028. fi
  1029. if is_global_zone ; then
  1030. [[ -d /$pool ]] && $RM -rf /$pool
  1031. log_must $ZPOOL create -f $pool $@
  1032. fi
  1033. return 0
  1034. }
  1035. # Return 0 if destroy successfully or the pool exists; $? otherwise
  1036. # Note: In local zones, this function should return 0 silently.
  1037. #
  1038. # $1 - pool name
  1039. # Destroy pool with the given parameters.
  1040. function destroy_pool #pool
  1041. {
  1042. typeset pool=${1%%/*}
  1043. typeset mtpt
  1044. if [[ -z $pool ]]; then
  1045. log_note "No pool name given."
  1046. return 1
  1047. fi
  1048. if is_global_zone ; then
  1049. if poolexists "$pool" ; then
  1050. mtpt=$(get_prop mountpoint "$pool")
  1051. # At times, syseventd activity can cause attempts to
  1052. # destroy a pool to fail with EBUSY. We retry a few
  1053. # times allowing failures before requiring the destroy
  1054. # to succeed.
  1055. typeset -i wait_time=10 ret=1 count=0
  1056. must=""
  1057. while [[ $ret -ne 0 ]]; do
  1058. $must $ZPOOL destroy -f $pool
  1059. ret=$?
  1060. [[ $ret -eq 0 ]] && break
  1061. log_note "zpool destroy failed with $ret"
  1062. [[ count++ -ge 7 ]] && must=log_must
  1063. $SLEEP $wait_time
  1064. done
  1065. [[ -d $mtpt ]] && \
  1066. log_must $RM -rf $mtpt
  1067. else
  1068. log_note "Pool does not exist. ($pool)"
  1069. return 1
  1070. fi
  1071. fi
  1072. return 0
  1073. }
  1074. #
  1075. # Firstly, create a pool with 5 datasets. Then, create a single zone and
  1076. # export the 5 datasets to it. In addition, we also add a ZFS filesystem
  1077. # and a zvol device to the zone.
  1078. #
  1079. # $1 zone name
  1080. # $2 zone root directory prefix
  1081. # $3 zone ip
  1082. #
  1083. function zfs_zones_setup #zone_name zone_root zone_ip
  1084. {
  1085. typeset zone_name=${1:-$(hostname)-z}
  1086. typeset zone_root=${2:-"/zone_root"}
  1087. typeset zone_ip=${3:-"10.1.1.10"}
  1088. typeset prefix_ctr=$ZONE_CTR
  1089. typeset pool_name=$ZONE_POOL
  1090. typeset -i cntctr=5
  1091. typeset -i i=0
  1092. # Create pool and 5 container within it
  1093. #
  1094. [[ -d /$pool_name ]] && $RM -rf /$pool_name
  1095. log_must $ZPOOL create -f $pool_name $DISKS
  1096. while ((i < cntctr)); do
  1097. log_must $ZFS create $pool_name/$prefix_ctr$i
  1098. ((i += 1))
  1099. done
  1100. # create a zvol
  1101. log_must $ZFS create -V 1g $pool_name/zone_zvol
  1102. block_device_wait
  1103. #
  1104. # If current system support slog, add slog device for pool
  1105. #
  1106. if verify_slog_support ; then
  1107. typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2"
  1108. log_must $MKFILE 100M $sdevs
  1109. log_must $ZPOOL add $pool_name log mirror $sdevs
  1110. fi
  1111. # this isn't supported just yet.
  1112. # Create a filesystem. In order to add this to
  1113. # the zone, it must have it's mountpoint set to 'legacy'
  1114. # log_must $ZFS create $pool_name/zfs_filesystem
  1115. # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem
  1116. [[ -d $zone_root ]] && \
  1117. log_must $RM -rf $zone_root/$zone_name
  1118. [[ ! -d $zone_root ]] && \
  1119. log_must $MKDIR -p -m 0700 $zone_root/$zone_name
  1120. # Create zone configure file and configure the zone
  1121. #
  1122. typeset zone_conf=/tmp/zone_conf.$$
  1123. $ECHO "create" > $zone_conf
  1124. $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf
  1125. $ECHO "set autoboot=true" >> $zone_conf
  1126. i=0
  1127. while ((i < cntctr)); do
  1128. $ECHO "add dataset" >> $zone_conf
  1129. $ECHO "set name=$pool_name/$prefix_ctr$i" >> \
  1130. $zone_conf
  1131. $ECHO "end" >> $zone_conf
  1132. ((i += 1))
  1133. done
  1134. # add our zvol to the zone
  1135. $ECHO "add device" >> $zone_conf
  1136. $ECHO "set match=$ZVOL_DEVDIR/$pool_name/zone_zvol" >> $zone_conf
  1137. $ECHO "end" >> $zone_conf
  1138. # add a corresponding zvol rdsk to the zone
  1139. $ECHO "add device" >> $zone_conf
  1140. $ECHO "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
  1141. $ECHO "end" >> $zone_conf
  1142. # once it's supported, we'll add our filesystem to the zone
  1143. # $ECHO "add fs" >> $zone_conf
  1144. # $ECHO "set type=zfs" >> $zone_conf
  1145. # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf
  1146. # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf
  1147. # $ECHO "end" >> $zone_conf
  1148. $ECHO "verify" >> $zone_conf
  1149. $ECHO "commit" >> $zone_conf
  1150. log_must $ZONECFG -z $zone_name -f $zone_conf
  1151. log_must $RM -f $zone_conf
  1152. # Install the zone
  1153. $ZONEADM -z $zone_name install
  1154. if (($? == 0)); then
  1155. log_note "SUCCESS: $ZONEADM -z $zone_name install"
  1156. else
  1157. log_fail "FAIL: $ZONEADM -z $zone_name install"
  1158. fi
  1159. # Install sysidcfg file
  1160. #
  1161. typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
  1162. $ECHO "system_locale=C" > $sysidcfg
  1163. $ECHO "terminal=dtterm" >> $sysidcfg
  1164. $ECHO "network_interface=primary {" >> $sysidcfg
  1165. $ECHO "hostname=$zone_name" >> $sysidcfg
  1166. $ECHO "}" >> $sysidcfg
  1167. $ECHO "name_service=NONE" >> $sysidcfg
  1168. $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg
  1169. $ECHO "security_policy=NONE" >> $sysidcfg
  1170. $ECHO "timezone=US/Eastern" >> $sysidcfg
  1171. # Boot this zone
  1172. log_must $ZONEADM -z $zone_name boot
  1173. }
  1174. #
  1175. # Reexport TESTPOOL & TESTPOOL(1-4)
  1176. #
  1177. function reexport_pool
  1178. {
  1179. typeset -i cntctr=5
  1180. typeset -i i=0
  1181. while ((i < cntctr)); do
  1182. if ((i == 0)); then
  1183. TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
  1184. if ! ismounted $TESTPOOL; then
  1185. log_must $ZFS mount $TESTPOOL
  1186. fi
  1187. else
  1188. eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
  1189. if eval ! ismounted \$TESTPOOL$i; then
  1190. log_must eval $ZFS mount \$TESTPOOL$i
  1191. fi
  1192. fi
  1193. ((i += 1))
  1194. done
  1195. }
  1196. #
  1197. # Verify a given disk is online or offline
  1198. #
  1199. # Return 0 is pool/disk matches expected state, 1 otherwise
  1200. #
  1201. function check_state # pool disk state{online,offline}
  1202. {
  1203. typeset pool=$1
  1204. typeset disk=${2#$DEV_DSKDIR/}
  1205. typeset state=$3
  1206. $ZPOOL status -v $pool | grep "$disk" \
  1207. | grep -i "$state" > /dev/null 2>&1
  1208. return $?
  1209. }
  1210. #
  1211. # Get the mountpoint of snapshot
  1212. # For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
  1213. # as its mountpoint
  1214. #
  1215. function snapshot_mountpoint
  1216. {
  1217. typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
  1218. if [[ $dataset != *@* ]]; then
  1219. log_fail "Error name of snapshot '$dataset'."
  1220. fi
  1221. typeset fs=${dataset%@*}
  1222. typeset snap=${dataset#*@}
  1223. if [[ -z $fs || -z $snap ]]; then
  1224. log_fail "Error name of snapshot '$dataset'."
  1225. fi
  1226. $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
  1227. }
  1228. #
  1229. # Given a pool and file system, this function will verify the file system
  1230. # using the zdb internal tool. Note that the pool is exported and imported
  1231. # to ensure it has consistent state.
  1232. #
  1233. function verify_filesys # pool filesystem dir
  1234. {
  1235. typeset pool="$1"
  1236. typeset filesys="$2"
  1237. typeset zdbout="/tmp/zdbout.$$"
  1238. shift
  1239. shift
  1240. typeset dirs=$@
  1241. typeset search_path=""
  1242. log_note "Calling $ZDB to verify filesystem '$filesys'"
  1243. $ZFS unmount -a > /dev/null 2>&1
  1244. log_must $ZPOOL export $pool
  1245. if [[ -n $dirs ]] ; then
  1246. for dir in $dirs ; do
  1247. search_path="$search_path -d $dir"
  1248. done
  1249. fi
  1250. log_must $ZPOOL import $search_path $pool
  1251. $ZDB -cudi $filesys > $zdbout 2>&1
  1252. if [[ $? != 0 ]]; then
  1253. log_note "Output: $ZDB -cudi $filesys"
  1254. $CAT $zdbout
  1255. log_fail "$ZDB detected errors with: '$filesys'"
  1256. fi
  1257. log_must $ZFS mount -a
  1258. log_must $RM -rf $zdbout
  1259. }
  1260. #
  1261. # Given a pool, and this function list all disks in the pool
  1262. #
  1263. function get_disklist # pool
  1264. {
  1265. typeset disklist=""
  1266. disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \
  1267. $GREP -v "\-\-\-\-\-" | \
  1268. $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
  1269. $ECHO $disklist
  1270. }
  1271. # /**
  1272. # This function kills a given list of processes after a time period. We use
  1273. # this in the stress tests instead of STF_TIMEOUT so that we can have processes
  1274. # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
  1275. # would be listed as FAIL, which we don't want : we're happy with stress tests
  1276. # running for a certain amount of time, then finishing.
  1277. #
  1278. # @param $1 the time in seconds after which we should terminate these processes
  1279. # @param $2..$n the processes we wish to terminate.
  1280. # */
  1281. function stress_timeout
  1282. {
  1283. typeset -i TIMEOUT=$1
  1284. shift
  1285. typeset cpids="$@"
  1286. log_note "Waiting for child processes($cpids). " \
  1287. "It could last dozens of minutes, please be patient ..."
  1288. log_must $SLEEP $TIMEOUT
  1289. log_note "Killing child processes after ${TIMEOUT} stress timeout."
  1290. typeset pid
  1291. for pid in $cpids; do
  1292. $PS -p $pid > /dev/null 2>&1
  1293. if (($? == 0)); then
  1294. log_must $KILL -USR1 $pid
  1295. fi
  1296. done
  1297. }
  1298. #
  1299. # Verify a given hotspare disk is inuse or avail
  1300. #
  1301. # Return 0 is pool/disk matches expected state, 1 otherwise
  1302. #
  1303. function check_hotspare_state # pool disk state{inuse,avail}
  1304. {
  1305. typeset pool=$1
  1306. typeset disk=${2#$DEV_DSKDIR/}
  1307. typeset state=$3
  1308. cur_state=$(get_device_state $pool $disk "spares")
  1309. if [[ $state != ${cur_state} ]]; then
  1310. return 1
  1311. fi
  1312. return 0
  1313. }
  1314. #
  1315. # Verify a given slog disk is inuse or avail
  1316. #
  1317. # Return 0 is pool/disk matches expected state, 1 otherwise
  1318. #
  1319. function check_slog_state # pool disk state{online,offline,unavail}
  1320. {
  1321. typeset pool=$1
  1322. typeset disk=${2#$DEV_DSKDIR/}
  1323. typeset state=$3
  1324. cur_state=$(get_device_state $pool $disk "logs")
  1325. if [[ $state != ${cur_state} ]]; then
  1326. return 1
  1327. fi
  1328. return 0
  1329. }
  1330. #
  1331. # Verify a given vdev disk is inuse or avail
  1332. #
  1333. # Return 0 is pool/disk matches expected state, 1 otherwise
  1334. #
  1335. function check_vdev_state # pool disk state{online,offline,unavail}
  1336. {
  1337. typeset pool=$1
  1338. typeset disk=${2#$/DEV_DSKDIR/}
  1339. typeset state=$3
  1340. cur_state=$(get_device_state $pool $disk)
  1341. if [[ $state != ${cur_state} ]]; then
  1342. return 1
  1343. fi
  1344. return 0
  1345. }
  1346. #
  1347. # Check the output of 'zpool status -v <pool>',
  1348. # and to see if the content of <token> contain the <keyword> specified.
  1349. #
  1350. # Return 0 is contain, 1 otherwise
  1351. #
  1352. function check_pool_status # pool token keyword
  1353. {
  1354. typeset pool=$1
  1355. typeset token=$2
  1356. typeset keyword=$3
  1357. $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" '
  1358. ($1==token) {print $0}' \
  1359. | $GREP -i "$keyword" > /dev/null 2>&1
  1360. return $?
  1361. }
  1362. #
  1363. # These 5 following functions are instance of check_pool_status()
  1364. # is_pool_resilvering - to check if the pool is resilver in progress
  1365. # is_pool_resilvered - to check if the pool is resilver completed
  1366. # is_pool_scrubbing - to check if the pool is scrub in progress
  1367. # is_pool_scrubbed - to check if the pool is scrub completed
  1368. # is_pool_scrub_stopped - to check if the pool is scrub stopped
  1369. #
  1370. function is_pool_resilvering #pool
  1371. {
  1372. check_pool_status "$1" "scan" "resilver in progress since "
  1373. return $?
  1374. }
  1375. function is_pool_resilvered #pool
  1376. {
  1377. check_pool_status "$1" "scan" "resilvered "
  1378. return $?
  1379. }
  1380. function is_pool_scrubbing #pool
  1381. {
  1382. check_pool_status "$1" "scan" "scrub in progress since "
  1383. return $?
  1384. }
  1385. function is_pool_scrubbed #pool
  1386. {
  1387. check_pool_status "$1" "scan" "scrub repaired"
  1388. return $?
  1389. }
  1390. function is_pool_scrub_stopped #pool
  1391. {
  1392. check_pool_status "$1" "scan" "scrub canceled"
  1393. return $?
  1394. }
  1395. #
  1396. # Use create_pool()/destroy_pool() to clean up the infomation in
  1397. # in the given disk to avoid slice overlapping.
  1398. #
  1399. function cleanup_devices #vdevs
  1400. {
  1401. typeset pool="foopool$$"
  1402. if poolexists $pool ; then
  1403. destroy_pool $pool
  1404. fi
  1405. create_pool $pool $@
  1406. destroy_pool $pool
  1407. return 0
  1408. }
  1409. #
  1410. # Verify the rsh connectivity to each remote host in RHOSTS.
  1411. #
  1412. # Return 0 if remote host is accessible; otherwise 1.
  1413. # $1 remote host name
  1414. # $2 username
  1415. #
  1416. function verify_rsh_connect #rhost, username
  1417. {
  1418. typeset rhost=$1
  1419. typeset username=$2
  1420. typeset rsh_cmd="$RSH -n"
  1421. typeset cur_user=
  1422. $GETENT hosts $rhost >/dev/null 2>&1
  1423. if (($? != 0)); then
  1424. log_note "$rhost cannot be found from" \
  1425. "administrative database."
  1426. return 1
  1427. fi
  1428. $PING $rhost 3 >/dev/null 2>&1
  1429. if (($? != 0)); then
  1430. log_note "$rhost is not reachable."
  1431. return 1
  1432. fi
  1433. if ((${#username} != 0)); then
  1434. rsh_cmd="$rsh_cmd -l $username"
  1435. cur_user="given user \"$username\""
  1436. else
  1437. cur_user="current user \"`$LOGNAME`\""
  1438. fi
  1439. if ! $rsh_cmd $rhost $TRUE; then
  1440. log_note "$RSH to $rhost is not accessible" \
  1441. "with $cur_user."
  1442. return 1
  1443. fi
  1444. return 0
  1445. }
  1446. #
  1447. # Verify the remote host connection via rsh after rebooting
  1448. # $1 remote host
  1449. #
  1450. function verify_remote
  1451. {
  1452. rhost=$1
  1453. #
  1454. # The following loop waits for the remote system rebooting.
  1455. # Each iteration will wait for 150 seconds. there are
  1456. # total 5 iterations, so the total timeout value will
  1457. # be 12.5 minutes for the system rebooting. This number
  1458. # is an approxiate number.
  1459. #
  1460. typeset -i count=0
  1461. while ! verify_rsh_connect $rhost; do
  1462. sleep 150
  1463. ((count = count + 1))
  1464. if ((count > 5)); then
  1465. return 1
  1466. fi
  1467. done
  1468. return 0
  1469. }
  1470. #
  1471. # Replacement function for /usr/bin/rsh. This function will include
  1472. # the /usr/bin/rsh and meanwhile return the execution status of the
  1473. # last command.
  1474. #
  1475. # $1 usrname passing down to -l option of /usr/bin/rsh
  1476. # $2 remote machine hostname
  1477. # $3... command string
  1478. #
  1479. function rsh_status
  1480. {
  1481. typeset ruser=$1
  1482. typeset rhost=$2
  1483. typeset -i ret=0
  1484. typeset cmd_str=""
  1485. typeset rsh_str=""
  1486. shift; shift
  1487. cmd_str="$@"
  1488. err_file=/tmp/${rhost}.$$.err
  1489. if ((${#ruser} == 0)); then
  1490. rsh_str="$RSH -n"
  1491. else
  1492. rsh_str="$RSH -n -l $ruser"
  1493. fi
  1494. $rsh_str $rhost /bin/ksh -c "'$cmd_str; \
  1495. print -u 2 \"status=\$?\"'" \
  1496. >/dev/null 2>$err_file
  1497. ret=$?
  1498. if (($ret != 0)); then
  1499. $CAT $err_file
  1500. $RM -f $std_file $err_file
  1501. log_fail "$RSH itself failed with exit code $ret..."
  1502. fi
  1503. ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \
  1504. $CUT -d= -f2)
  1505. (($ret != 0)) && $CAT $err_file >&2
  1506. $RM -f $err_file >/dev/null 2>&1
  1507. return $ret
  1508. }
  1509. #
  1510. # Get the SUNWstc-fs-zfs package installation path in a remote host
  1511. # $1 remote host name
  1512. #
  1513. function get_remote_pkgpath
  1514. {
  1515. typeset rhost=$1
  1516. typeset pkgpath=""
  1517. pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\
  1518. $CUT -d: -f2")
  1519. $ECHO $pkgpath
  1520. }
  1521. #/**
  1522. # A function to find and locate free disks on a system or from given
  1523. # disks as the parameter. It works by locating disks that are in use
  1524. # as swap devices and dump devices, and also disks listed in /etc/vfstab
  1525. #
  1526. # $@ given disks to find which are free, default is all disks in
  1527. # the test system
  1528. #
  1529. # @return a string containing the list of available disks
  1530. #*/
  1531. function find_disks
  1532. {
  1533. # Trust provided list, no attempt is made to locate unused devices.
  1534. if is_linux; then
  1535. $ECHO "$@"
  1536. return
  1537. fi
  1538. sfi=/tmp/swaplist.$$
  1539. dmpi=/tmp/dumpdev.$$
  1540. max_finddisksnum=${MAX_FINDDISKSNUM:-6}
  1541. $SWAP -l > $sfi
  1542. $DUMPADM > $dmpi 2>/dev/null
  1543. # write an awk script that can process the output of format
  1544. # to produce a list of disks we know about. Note that we have
  1545. # to escape "$2" so that the shell doesn't interpret it while
  1546. # we're creating the awk script.
  1547. # -------------------
  1548. $CAT > /tmp/find_disks.awk <<EOF
  1549. #!/bin/nawk -f
  1550. BEGIN { FS="."; }
  1551. /^Specify disk/{
  1552. searchdisks=0;
  1553. }
  1554. {
  1555. if (searchdisks && \$2 !~ "^$"){
  1556. split(\$2,arr," ");
  1557. print arr[1];
  1558. }
  1559. }
  1560. /^AVAILABLE DISK SELECTIONS:/{
  1561. searchdisks=1;
  1562. }
  1563. EOF
  1564. #---------------------
  1565. $CHMOD 755 /tmp/find_disks.awk
  1566. disks=${@:-$($ECHO "" | $FORMAT -e 2>/dev/null | /tmp/find_disks.awk)}
  1567. $RM /tmp/find_disks.awk
  1568. unused=""
  1569. for disk in $disks; do
  1570. # Check for mounted
  1571. $GREP "${disk}[sp]" /etc/mnttab >/dev/null
  1572. (($? == 0)) && continue
  1573. # Check for swap
  1574. $GREP "${disk}[sp]" $sfi >/dev/null
  1575. (($? == 0)) && continue
  1576. # check for dump device
  1577. $GREP "${disk}[sp]" $dmpi >/dev/null
  1578. (($? == 0)) && continue
  1579. # check to see if this disk hasn't been explicitly excluded
  1580. # by a user-set environment variable
  1581. $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null
  1582. (($? == 0)) && continue
  1583. unused_candidates="$unused_candidates $disk"
  1584. done
  1585. $RM $sfi
  1586. $RM $dmpi
  1587. # now just check to see if those disks do actually exist
  1588. # by looking for a device pointing to the first slice in
  1589. # each case. limit the number to max_finddisksnum
  1590. count=0
  1591. for disk in $unused_candidates; do
  1592. if [ -b $DEV_DSKDIR/${disk}s0 ]; then
  1593. if [ $count -lt $max_finddisksnum ]; then
  1594. unused="$unused $disk"
  1595. # do not impose limit if $@ is provided
  1596. [[ -z $@ ]] && ((count = count + 1))
  1597. fi
  1598. fi
  1599. done
  1600. # finally, return our disk list
  1601. $ECHO $unused
  1602. }
  1603. #
  1604. # Add specified user to specified group
  1605. #
  1606. # $1 group name
  1607. # $2 user name
  1608. # $3 base of the homedir (optional)
  1609. #
  1610. function add_user #<group_name> <user_name> <basedir>
  1611. {
  1612. typeset gname=$1
  1613. typeset uname=$2
  1614. typeset basedir=${3:-"/var/tmp"}
  1615. if ((${#gname} == 0 || ${#uname} == 0)); then
  1616. log_fail "group name or user name are not defined."
  1617. fi
  1618. log_must $USERADD -g $gname -d $basedir/$uname -m $uname
  1619. # Add new users to the same group and the command line utils.
  1620. # This allows them to be run out of the original users home
  1621. # directory as long as it permissioned to be group readable.
  1622. if is_linux; then
  1623. cmd_group=$(stat --format="%G" $ZFS)
  1624. log_must $USERMOD -a -G $cmd_group $uname
  1625. fi
  1626. return 0
  1627. }
  1628. #
  1629. # Delete the specified user.
  1630. #
  1631. # $1 login name
  1632. # $2 base of the homedir (optional)
  1633. #
  1634. function del_user #<logname> <basedir>
  1635. {
  1636. typeset user=$1
  1637. typeset basedir=${2:-"/var/tmp"}
  1638. if ((${#user} == 0)); then
  1639. log_fail "login name is necessary."
  1640. fi
  1641. if $ID $user > /dev/null 2>&1; then
  1642. log_must $USERDEL $user
  1643. fi
  1644. [[ -d $basedir/$user ]] && $RM -fr $basedir/$user
  1645. return 0
  1646. }
  1647. #
  1648. # Select valid gid and create specified group.
  1649. #
  1650. # $1 group name
  1651. #
  1652. function add_group #<group_name>
  1653. {
  1654. typeset group=$1
  1655. if ((${#group} == 0)); then
  1656. log_fail "group name is necessary."
  1657. fi
  1658. # Assign 100 as the base gid, a larger value is selected for
  1659. # Linux because for many distributions 1000 and under are reserved.
  1660. if is_linux; then
  1661. while true; do
  1662. $GROUPADD $group > /dev/null 2>&1
  1663. typeset -i ret=$?
  1664. case $ret in
  1665. 0) return 0 ;;
  1666. *) return 1 ;;
  1667. esac
  1668. done
  1669. else
  1670. typeset -i gid=100
  1671. while true; do
  1672. $GROUPADD -g $gid $group > /dev/null 2>&1
  1673. typeset -i ret=$?
  1674. case $ret in
  1675. 0) return 0 ;;
  1676. # The gid is not unique
  1677. 4) ((gid += 1)) ;;
  1678. *) return 1 ;;
  1679. esac
  1680. done
  1681. fi
  1682. }
  1683. #
  1684. # Delete the specified group.
  1685. #
  1686. # $1 group name
  1687. #
  1688. function del_group #<group_name>
  1689. {
  1690. typeset grp=$1
  1691. if ((${#grp} == 0)); then
  1692. log_fail "group name is necessary."
  1693. fi
  1694. if is_linux; then
  1695. $GETENT group $grp > /dev/null 2>&1
  1696. typeset -i ret=$?
  1697. case $ret in
  1698. # Group does not exist.
  1699. 2) return 0 ;;
  1700. # Name already exists as a group name
  1701. 0) log_must $GROUPDEL $grp ;;
  1702. *) return 1 ;;
  1703. esac
  1704. else
  1705. $GROUPMOD -n $grp $grp > /dev/null 2>&1
  1706. typeset -i ret=$?
  1707. case $ret in
  1708. # Group does not exist.
  1709. 6) return 0 ;;
  1710. # Name already exists as a group name
  1711. 9) log_must $GROUPDEL $grp ;;
  1712. *) return 1 ;;
  1713. esac
  1714. fi
  1715. return 0
  1716. }
  1717. #
  1718. # This function will return true if it's safe to destroy the pool passed
  1719. # as argument 1. It checks for pools based on zvols and files, and also
  1720. # files contained in a pool that may have a different mountpoint.
  1721. #
  1722. function safe_to_destroy_pool { # $1 the pool name
  1723. typeset pool=""
  1724. typeset DONT_DESTROY=""
  1725. # We check that by deleting the $1 pool, we're not
  1726. # going to pull the rug out from other pools. Do this
  1727. # by looking at all other pools, ensuring that they
  1728. # aren't built from files or zvols contained in this pool.
  1729. for pool in $($ZPOOL list -H -o name)
  1730. do
  1731. ALTMOUNTPOOL=""
  1732. # this is a list of the top-level directories in each of the
  1733. # files that make up the path to the files the pool is based on
  1734. FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \
  1735. $AWK '{print $1}')
  1736. # this is a list of the zvols that make up the pool
  1737. ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "$ZVOL_DEVDIR/$1$" \
  1738. | $AWK '{print $1}')
  1739. # also want to determine if it's a file-based pool using an
  1740. # alternate mountpoint...
  1741. POOL_FILE_DIRS=$($ZPOOL status -v $pool | \
  1742. $GREP / | $AWK '{print $1}' | \
  1743. $AWK -F/ '{print $2}' | $GREP -v "dev")
  1744. for pooldir in $POOL_FILE_DIRS
  1745. do
  1746. OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \
  1747. $GREP "${pooldir}$" | $AWK '{print $1}')
  1748. ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
  1749. done
  1750. if [ ! -z "$ZVOLPOOL" ]
  1751. then
  1752. DONT_DESTROY="true"
  1753. log_note "Pool $pool is built from $ZVOLPOOL on $1"
  1754. fi
  1755. if [ ! -z "$FILEPOOL" ]
  1756. then
  1757. DONT_DESTROY="true"
  1758. log_note "Pool $pool is built from $FILEPOOL on $1"
  1759. fi
  1760. if [ ! -z "$ALTMOUNTPOOL" ]
  1761. then
  1762. DONT_DESTROY="true"
  1763. log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
  1764. fi
  1765. done
  1766. if [ -z "${DONT_DESTROY}" ]
  1767. then
  1768. return 0
  1769. else
  1770. log_note "Warning: it is not safe to destroy $1!"
  1771. return 1
  1772. fi
  1773. }
  1774. #
  1775. # Get the available ZFS compression options
  1776. # $1 option type zfs_set|zfs_compress
  1777. #
  1778. function get_compress_opts
  1779. {
  1780. typeset COMPRESS_OPTS
  1781. typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
  1782. gzip-6 gzip-7 gzip-8 gzip-9"
  1783. if [[ $1 == "zfs_compress" ]] ; then
  1784. COMPRESS_OPTS="on lzjb"
  1785. elif [[ $1 == "zfs_set" ]] ; then
  1786. COMPRESS_OPTS="on off lzjb"
  1787. fi
  1788. typeset valid_opts="$COMPRESS_OPTS"
  1789. $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1
  1790. if [[ $? -eq 0 ]]; then
  1791. valid_opts="$valid_opts $GZIP_OPTS"
  1792. fi
  1793. $ECHO "$valid_opts"
  1794. }
  1795. #
  1796. # Verify zfs operation with -p option work as expected
  1797. # $1 operation, value could be create, clone or rename
  1798. # $2 dataset type, value could be fs or vol
  1799. # $3 dataset name
  1800. # $4 new dataset name
  1801. #
  1802. function verify_opt_p_ops
  1803. {
  1804. typeset ops=$1
  1805. typeset datatype=$2
  1806. typeset dataset=$3
  1807. typeset newdataset=$4
  1808. if [[ $datatype != "fs" && $datatype != "vol" ]]; then
  1809. log_fail "$datatype is not supported."
  1810. fi
  1811. # check parameters accordingly
  1812. case $ops in
  1813. create)
  1814. newdataset=$dataset
  1815. dataset=""
  1816. if [[ $datatype == "vol" ]]; then
  1817. ops="create -V $VOLSIZE"
  1818. fi
  1819. ;;
  1820. clone)
  1821. if [[ -z $newdataset ]]; then
  1822. log_fail "newdataset should not be empty" \
  1823. "when ops is $ops."
  1824. fi
  1825. log_must datasetexists $dataset
  1826. log_must snapexists $dataset
  1827. ;;
  1828. rename)
  1829. if [[ -z $newdataset ]]; then
  1830. log_fail "newdataset should not be empty" \
  1831. "when ops is $ops."
  1832. fi
  1833. log_must datasetexists $dataset
  1834. log_mustnot snapexists $dataset
  1835. ;;
  1836. *)
  1837. log_fail "$ops is not supported."
  1838. ;;
  1839. esac
  1840. # make sure the upper level filesystem does not exist
  1841. if datasetexists ${newdataset%/*} ; then
  1842. log_must $ZFS destroy -rRf ${newdataset%/*}
  1843. fi
  1844. # without -p option, operation will fail
  1845. log_mustnot $ZFS $ops $dataset $newdataset
  1846. log_mustnot datasetexists $newdataset ${newdataset%/*}
  1847. # with -p option, operation should succeed
  1848. log_must $ZFS $ops -p $dataset $newdataset
  1849. block_device_wait
  1850. if ! datasetexists $newdataset ; then
  1851. log_fail "-p option does not work for $ops"
  1852. fi
  1853. # when $ops is create or clone, redo the operation still return zero
  1854. if [[ $ops != "rename" ]]; then
  1855. log_must $ZFS $ops -p $dataset $newdataset
  1856. fi
  1857. return 0
  1858. }
  1859. #
  1860. # Get configuration of pool
  1861. # $1 pool name
  1862. # $2 config name
  1863. #
  1864. function get_config
  1865. {
  1866. typeset pool=$1
  1867. typeset config=$2
  1868. typeset alt_root
  1869. if ! poolexists "$pool" ; then
  1870. return 1
  1871. fi
  1872. alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}')
  1873. if [[ $alt_root == "-" ]]; then
  1874. value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \
  1875. '{print $2}')
  1876. else
  1877. value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \
  1878. '{print $2}')
  1879. fi
  1880. if [[ -n $value ]] ; then
  1881. value=${value#'}
  1882. value=${value%'}
  1883. fi
  1884. echo $value
  1885. return 0
  1886. }
  1887. #
  1888. # Privated function. Random select one of items from arguments.
  1889. #
  1890. # $1 count
  1891. # $2-n string
  1892. #
  1893. function _random_get
  1894. {
  1895. typeset cnt=$1
  1896. shift
  1897. typeset str="$@"
  1898. typeset -i ind
  1899. ((ind = RANDOM % cnt + 1))
  1900. typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ')
  1901. $ECHO $ret
  1902. }
  1903. #
  1904. # Random select one of item from arguments which include NONE string
  1905. #
  1906. function random_get_with_non
  1907. {
  1908. typeset -i cnt=$#
  1909. ((cnt =+ 1))
  1910. _random_get "$cnt" "$@"
  1911. }
  1912. #
  1913. # Random select one of item from arguments which doesn't include NONE string
  1914. #
  1915. function random_get
  1916. {
  1917. _random_get "$#" "$@"
  1918. }
  1919. #
  1920. # Detect if the current system support slog
  1921. #
  1922. function verify_slog_support
  1923. {
  1924. typeset dir=/tmp/disk.$$
  1925. typeset pool=foo.$$
  1926. typeset vdev=$dir/a
  1927. typeset sdev=$dir/b
  1928. $MKDIR -p $dir
  1929. $MKFILE 64M $vdev $sdev
  1930. typeset -i ret=0
  1931. if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then
  1932. ret=1
  1933. fi
  1934. $RM -r $dir
  1935. return $ret
  1936. }
  1937. #
  1938. # The function will generate a dataset name with specific length
  1939. # $1, the length of the name
  1940. # $2, the base string to construct the name
  1941. #
  1942. function gen_dataset_name
  1943. {
  1944. typeset -i len=$1
  1945. typeset basestr="$2"
  1946. typeset -i baselen=${#basestr}
  1947. typeset -i iter=0
  1948. typeset l_name=""
  1949. if ((len % baselen == 0)); then
  1950. ((iter = len / baselen))
  1951. else
  1952. ((iter = len / baselen + 1))
  1953. fi
  1954. while ((iter > 0)); do
  1955. l_name="${l_name}$basestr"
  1956. ((iter -= 1))
  1957. done
  1958. $ECHO $l_name
  1959. }
  1960. #
  1961. # Get cksum tuple of dataset
  1962. # $1 dataset name
  1963. #
  1964. # sample zdb output:
  1965. # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
  1966. # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
  1967. # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
  1968. # fill=7 cksum=11ce125712:643a9c18ee2:125e25…

Large files files are truncated, but you can click here to view the full file