PageRenderTime 100ms CodeModel.GetById 2ms app.highlight 75ms RepoModel.GetById 1ms app.codeStats 1ms

/fs/xfs/xfs_bmap_util.c

https://bitbucket.org/alfredchen/linux-gc
C | 2091 lines | 1373 code | 239 blank | 479 comment | 317 complexity | 9f6d3d50ab1290e343d31f525e449863 MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * Copyright (c) 2012 Red Hat, Inc.
   4 * All Rights Reserved.
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it would be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write the Free Software Foundation,
  17 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  18 */
  19#include "xfs.h"
  20#include "xfs_fs.h"
  21#include "xfs_shared.h"
  22#include "xfs_format.h"
  23#include "xfs_log_format.h"
  24#include "xfs_trans_resv.h"
  25#include "xfs_bit.h"
  26#include "xfs_mount.h"
  27#include "xfs_da_format.h"
  28#include "xfs_defer.h"
  29#include "xfs_inode.h"
  30#include "xfs_btree.h"
  31#include "xfs_trans.h"
  32#include "xfs_extfree_item.h"
  33#include "xfs_alloc.h"
  34#include "xfs_bmap.h"
  35#include "xfs_bmap_util.h"
  36#include "xfs_bmap_btree.h"
  37#include "xfs_rtalloc.h"
  38#include "xfs_error.h"
  39#include "xfs_quota.h"
  40#include "xfs_trans_space.h"
  41#include "xfs_trace.h"
  42#include "xfs_icache.h"
  43#include "xfs_log.h"
  44#include "xfs_rmap_btree.h"
  45#include "xfs_iomap.h"
  46#include "xfs_reflink.h"
  47#include "xfs_refcount.h"
  48
  49/* Kernel only BMAP related definitions and functions */
  50
  51/*
  52 * Convert the given file system block to a disk block.  We have to treat it
  53 * differently based on whether the file is a real time file or not, because the
  54 * bmap code does.
  55 */
  56xfs_daddr_t
  57xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
  58{
  59	return (XFS_IS_REALTIME_INODE(ip) ? \
  60		 (xfs_daddr_t)XFS_FSB_TO_BB((ip)->i_mount, (fsb)) : \
  61		 XFS_FSB_TO_DADDR((ip)->i_mount, (fsb)));
  62}
  63
  64/*
  65 * Routine to zero an extent on disk allocated to the specific inode.
  66 *
  67 * The VFS functions take a linearised filesystem block offset, so we have to
  68 * convert the sparse xfs fsb to the right format first.
  69 * VFS types are real funky, too.
  70 */
  71int
  72xfs_zero_extent(
  73	struct xfs_inode *ip,
  74	xfs_fsblock_t	start_fsb,
  75	xfs_off_t	count_fsb)
  76{
  77	struct xfs_mount *mp = ip->i_mount;
  78	xfs_daddr_t	sector = xfs_fsb_to_db(ip, start_fsb);
  79	sector_t	block = XFS_BB_TO_FSBT(mp, sector);
  80
  81	return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
  82		block << (mp->m_super->s_blocksize_bits - 9),
  83		count_fsb << (mp->m_super->s_blocksize_bits - 9),
  84		GFP_NOFS, true);
  85}
  86
  87int
  88xfs_bmap_rtalloc(
  89	struct xfs_bmalloca	*ap)	/* bmap alloc argument struct */
  90{
  91	int		error;		/* error return value */
  92	xfs_mount_t	*mp;		/* mount point structure */
  93	xfs_extlen_t	prod = 0;	/* product factor for allocators */
  94	xfs_extlen_t	ralen = 0;	/* realtime allocation length */
  95	xfs_extlen_t	align;		/* minimum allocation alignment */
  96	xfs_rtblock_t	rtb;
  97
  98	mp = ap->ip->i_mount;
  99	align = xfs_get_extsz_hint(ap->ip);
 100	prod = align / mp->m_sb.sb_rextsize;
 101	error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
 102					align, 1, ap->eof, 0,
 103					ap->conv, &ap->offset, &ap->length);
 104	if (error)
 105		return error;
 106	ASSERT(ap->length);
 107	ASSERT(ap->length % mp->m_sb.sb_rextsize == 0);
 108
 109	/*
 110	 * If the offset & length are not perfectly aligned
 111	 * then kill prod, it will just get us in trouble.
 112	 */
 113	if (do_mod(ap->offset, align) || ap->length % align)
 114		prod = 1;
 115	/*
 116	 * Set ralen to be the actual requested length in rtextents.
 117	 */
 118	ralen = ap->length / mp->m_sb.sb_rextsize;
 119	/*
 120	 * If the old value was close enough to MAXEXTLEN that
 121	 * we rounded up to it, cut it back so it's valid again.
 122	 * Note that if it's a really large request (bigger than
 123	 * MAXEXTLEN), we don't hear about that number, and can't
 124	 * adjust the starting point to match it.
 125	 */
 126	if (ralen * mp->m_sb.sb_rextsize >= MAXEXTLEN)
 127		ralen = MAXEXTLEN / mp->m_sb.sb_rextsize;
 128
 129	/*
 130	 * Lock out modifications to both the RT bitmap and summary inodes
 131	 */
 132	xfs_ilock(mp->m_rbmip, XFS_ILOCK_EXCL|XFS_ILOCK_RTBITMAP);
 133	xfs_trans_ijoin(ap->tp, mp->m_rbmip, XFS_ILOCK_EXCL);
 134	xfs_ilock(mp->m_rsumip, XFS_ILOCK_EXCL|XFS_ILOCK_RTSUM);
 135	xfs_trans_ijoin(ap->tp, mp->m_rsumip, XFS_ILOCK_EXCL);
 136
 137	/*
 138	 * If it's an allocation to an empty file at offset 0,
 139	 * pick an extent that will space things out in the rt area.
 140	 */
 141	if (ap->eof && ap->offset == 0) {
 142		xfs_rtblock_t uninitialized_var(rtx); /* realtime extent no */
 143
 144		error = xfs_rtpick_extent(mp, ap->tp, ralen, &rtx);
 145		if (error)
 146			return error;
 147		ap->blkno = rtx * mp->m_sb.sb_rextsize;
 148	} else {
 149		ap->blkno = 0;
 150	}
 151
 152	xfs_bmap_adjacent(ap);
 153
 154	/*
 155	 * Realtime allocation, done through xfs_rtallocate_extent.
 156	 */
 157	do_div(ap->blkno, mp->m_sb.sb_rextsize);
 158	rtb = ap->blkno;
 159	ap->length = ralen;
 160	error = xfs_rtallocate_extent(ap->tp, ap->blkno, 1, ap->length,
 161				&ralen, ap->wasdel, prod, &rtb);
 162	if (error)
 163		return error;
 164
 165	ap->blkno = rtb;
 166	if (ap->blkno != NULLFSBLOCK) {
 167		ap->blkno *= mp->m_sb.sb_rextsize;
 168		ralen *= mp->m_sb.sb_rextsize;
 169		ap->length = ralen;
 170		ap->ip->i_d.di_nblocks += ralen;
 171		xfs_trans_log_inode(ap->tp, ap->ip, XFS_ILOG_CORE);
 172		if (ap->wasdel)
 173			ap->ip->i_delayed_blks -= ralen;
 174		/*
 175		 * Adjust the disk quota also. This was reserved
 176		 * earlier.
 177		 */
 178		xfs_trans_mod_dquot_byino(ap->tp, ap->ip,
 179			ap->wasdel ? XFS_TRANS_DQ_DELRTBCOUNT :
 180					XFS_TRANS_DQ_RTBCOUNT, (long) ralen);
 181
 182		/* Zero the extent if we were asked to do so */
 183		if (ap->datatype & XFS_ALLOC_USERDATA_ZERO) {
 184			error = xfs_zero_extent(ap->ip, ap->blkno, ap->length);
 185			if (error)
 186				return error;
 187		}
 188	} else {
 189		ap->length = 0;
 190	}
 191	return 0;
 192}
 193
 194/*
 195 * Check if the endoff is outside the last extent. If so the caller will grow
 196 * the allocation to a stripe unit boundary.  All offsets are considered outside
 197 * the end of file for an empty fork, so 1 is returned in *eof in that case.
 198 */
 199int
 200xfs_bmap_eof(
 201	struct xfs_inode	*ip,
 202	xfs_fileoff_t		endoff,
 203	int			whichfork,
 204	int			*eof)
 205{
 206	struct xfs_bmbt_irec	rec;
 207	int			error;
 208
 209	error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
 210	if (error || *eof)
 211		return error;
 212
 213	*eof = endoff >= rec.br_startoff + rec.br_blockcount;
 214	return 0;
 215}
 216
 217/*
 218 * Extent tree block counting routines.
 219 */
 220
 221/*
 222 * Count leaf blocks given a range of extent records.
 223 */
 224STATIC void
 225xfs_bmap_count_leaves(
 226	xfs_ifork_t		*ifp,
 227	xfs_extnum_t		idx,
 228	int			numrecs,
 229	int			*count)
 230{
 231	int		b;
 232
 233	for (b = 0; b < numrecs; b++) {
 234		xfs_bmbt_rec_host_t *frp = xfs_iext_get_ext(ifp, idx + b);
 235		*count += xfs_bmbt_get_blockcount(frp);
 236	}
 237}
 238
 239/*
 240 * Count leaf blocks given a range of extent records originally
 241 * in btree format.
 242 */
 243STATIC void
 244xfs_bmap_disk_count_leaves(
 245	struct xfs_mount	*mp,
 246	struct xfs_btree_block	*block,
 247	int			numrecs,
 248	int			*count)
 249{
 250	int		b;
 251	xfs_bmbt_rec_t	*frp;
 252
 253	for (b = 1; b <= numrecs; b++) {
 254		frp = XFS_BMBT_REC_ADDR(mp, block, b);
 255		*count += xfs_bmbt_disk_get_blockcount(frp);
 256	}
 257}
 258
 259/*
 260 * Recursively walks each level of a btree
 261 * to count total fsblocks in use.
 262 */
 263STATIC int                                     /* error */
 264xfs_bmap_count_tree(
 265	xfs_mount_t     *mp,            /* file system mount point */
 266	xfs_trans_t     *tp,            /* transaction pointer */
 267	xfs_ifork_t	*ifp,		/* inode fork pointer */
 268	xfs_fsblock_t   blockno,	/* file system block number */
 269	int             levelin,	/* level in btree */
 270	int		*count)		/* Count of blocks */
 271{
 272	int			error;
 273	xfs_buf_t		*bp, *nbp;
 274	int			level = levelin;
 275	__be64			*pp;
 276	xfs_fsblock_t           bno = blockno;
 277	xfs_fsblock_t		nextbno;
 278	struct xfs_btree_block	*block, *nextblock;
 279	int			numrecs;
 280
 281	error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp, XFS_BMAP_BTREE_REF,
 282						&xfs_bmbt_buf_ops);
 283	if (error)
 284		return error;
 285	*count += 1;
 286	block = XFS_BUF_TO_BLOCK(bp);
 287
 288	if (--level) {
 289		/* Not at node above leaves, count this level of nodes */
 290		nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
 291		while (nextbno != NULLFSBLOCK) {
 292			error = xfs_btree_read_bufl(mp, tp, nextbno, 0, &nbp,
 293						XFS_BMAP_BTREE_REF,
 294						&xfs_bmbt_buf_ops);
 295			if (error)
 296				return error;
 297			*count += 1;
 298			nextblock = XFS_BUF_TO_BLOCK(nbp);
 299			nextbno = be64_to_cpu(nextblock->bb_u.l.bb_rightsib);
 300			xfs_trans_brelse(tp, nbp);
 301		}
 302
 303		/* Dive to the next level */
 304		pp = XFS_BMBT_PTR_ADDR(mp, block, 1, mp->m_bmap_dmxr[1]);
 305		bno = be64_to_cpu(*pp);
 306		if (unlikely((error =
 307		     xfs_bmap_count_tree(mp, tp, ifp, bno, level, count)) < 0)) {
 308			xfs_trans_brelse(tp, bp);
 309			XFS_ERROR_REPORT("xfs_bmap_count_tree(1)",
 310					 XFS_ERRLEVEL_LOW, mp);
 311			return -EFSCORRUPTED;
 312		}
 313		xfs_trans_brelse(tp, bp);
 314	} else {
 315		/* count all level 1 nodes and their leaves */
 316		for (;;) {
 317			nextbno = be64_to_cpu(block->bb_u.l.bb_rightsib);
 318			numrecs = be16_to_cpu(block->bb_numrecs);
 319			xfs_bmap_disk_count_leaves(mp, block, numrecs, count);
 320			xfs_trans_brelse(tp, bp);
 321			if (nextbno == NULLFSBLOCK)
 322				break;
 323			bno = nextbno;
 324			error = xfs_btree_read_bufl(mp, tp, bno, 0, &bp,
 325						XFS_BMAP_BTREE_REF,
 326						&xfs_bmbt_buf_ops);
 327			if (error)
 328				return error;
 329			*count += 1;
 330			block = XFS_BUF_TO_BLOCK(bp);
 331		}
 332	}
 333	return 0;
 334}
 335
 336/*
 337 * Count fsblocks of the given fork.
 338 */
 339static int					/* error */
 340xfs_bmap_count_blocks(
 341	xfs_trans_t		*tp,		/* transaction pointer */
 342	xfs_inode_t		*ip,		/* incore inode */
 343	int			whichfork,	/* data or attr fork */
 344	int			*count)		/* out: count of blocks */
 345{
 346	struct xfs_btree_block	*block;	/* current btree block */
 347	xfs_fsblock_t		bno;	/* block # of "block" */
 348	xfs_ifork_t		*ifp;	/* fork structure */
 349	int			level;	/* btree level, for checking */
 350	xfs_mount_t		*mp;	/* file system mount structure */
 351	__be64			*pp;	/* pointer to block address */
 352
 353	bno = NULLFSBLOCK;
 354	mp = ip->i_mount;
 355	ifp = XFS_IFORK_PTR(ip, whichfork);
 356	if ( XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS ) {
 357		xfs_bmap_count_leaves(ifp, 0, xfs_iext_count(ifp), count);
 358		return 0;
 359	}
 360
 361	/*
 362	 * Root level must use BMAP_BROOT_PTR_ADDR macro to get ptr out.
 363	 */
 364	block = ifp->if_broot;
 365	level = be16_to_cpu(block->bb_level);
 366	ASSERT(level > 0);
 367	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
 368	bno = be64_to_cpu(*pp);
 369	ASSERT(bno != NULLFSBLOCK);
 370	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
 371	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
 372
 373	if (unlikely(xfs_bmap_count_tree(mp, tp, ifp, bno, level, count) < 0)) {
 374		XFS_ERROR_REPORT("xfs_bmap_count_blocks(2)", XFS_ERRLEVEL_LOW,
 375				 mp);
 376		return -EFSCORRUPTED;
 377	}
 378
 379	return 0;
 380}
 381
 382/*
 383 * returns 1 for success, 0 if we failed to map the extent.
 384 */
 385STATIC int
 386xfs_getbmapx_fix_eof_hole(
 387	xfs_inode_t		*ip,		/* xfs incore inode pointer */
 388	int			whichfork,
 389	struct getbmapx		*out,		/* output structure */
 390	int			prealloced,	/* this is a file with
 391						 * preallocated data space */
 392	__int64_t		end,		/* last block requested */
 393	xfs_fsblock_t		startblock,
 394	bool			moretocome)
 395{
 396	__int64_t		fixlen;
 397	xfs_mount_t		*mp;		/* file system mount point */
 398	xfs_ifork_t		*ifp;		/* inode fork pointer */
 399	xfs_extnum_t		lastx;		/* last extent pointer */
 400	xfs_fileoff_t		fileblock;
 401
 402	if (startblock == HOLESTARTBLOCK) {
 403		mp = ip->i_mount;
 404		out->bmv_block = -1;
 405		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
 406		fixlen -= out->bmv_offset;
 407		if (prealloced && out->bmv_offset + out->bmv_length == end) {
 408			/* Came to hole at EOF. Trim it. */
 409			if (fixlen <= 0)
 410				return 0;
 411			out->bmv_length = fixlen;
 412		}
 413	} else {
 414		if (startblock == DELAYSTARTBLOCK)
 415			out->bmv_block = -2;
 416		else
 417			out->bmv_block = xfs_fsb_to_db(ip, startblock);
 418		fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
 419		ifp = XFS_IFORK_PTR(ip, whichfork);
 420		if (!moretocome &&
 421		    xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
 422		   (lastx == xfs_iext_count(ifp) - 1))
 423			out->bmv_oflags |= BMV_OF_LAST;
 424	}
 425
 426	return 1;
 427}
 428
 429/* Adjust the reported bmap around shared/unshared extent transitions. */
 430STATIC int
 431xfs_getbmap_adjust_shared(
 432	struct xfs_inode		*ip,
 433	int				whichfork,
 434	struct xfs_bmbt_irec		*map,
 435	struct getbmapx			*out,
 436	struct xfs_bmbt_irec		*next_map)
 437{
 438	struct xfs_mount		*mp = ip->i_mount;
 439	xfs_agnumber_t			agno;
 440	xfs_agblock_t			agbno;
 441	xfs_agblock_t			ebno;
 442	xfs_extlen_t			elen;
 443	xfs_extlen_t			nlen;
 444	int				error;
 445
 446	next_map->br_startblock = NULLFSBLOCK;
 447	next_map->br_startoff = NULLFILEOFF;
 448	next_map->br_blockcount = 0;
 449
 450	/* Only written data blocks can be shared. */
 451	if (!xfs_is_reflink_inode(ip) || whichfork != XFS_DATA_FORK ||
 452	    map->br_startblock == DELAYSTARTBLOCK ||
 453	    map->br_startblock == HOLESTARTBLOCK ||
 454	    ISUNWRITTEN(map))
 455		return 0;
 456
 457	agno = XFS_FSB_TO_AGNO(mp, map->br_startblock);
 458	agbno = XFS_FSB_TO_AGBNO(mp, map->br_startblock);
 459	error = xfs_reflink_find_shared(mp, agno, agbno, map->br_blockcount,
 460			&ebno, &elen, true);
 461	if (error)
 462		return error;
 463
 464	if (ebno == NULLAGBLOCK) {
 465		/* No shared blocks at all. */
 466		return 0;
 467	} else if (agbno == ebno) {
 468		/*
 469		 * Shared extent at (agbno, elen).  Shrink the reported
 470		 * extent length and prepare to move the start of map[i]
 471		 * to agbno+elen, with the aim of (re)formatting the new
 472		 * map[i] the next time through the inner loop.
 473		 */
 474		out->bmv_length = XFS_FSB_TO_BB(mp, elen);
 475		out->bmv_oflags |= BMV_OF_SHARED;
 476		if (elen != map->br_blockcount) {
 477			*next_map = *map;
 478			next_map->br_startblock += elen;
 479			next_map->br_startoff += elen;
 480			next_map->br_blockcount -= elen;
 481		}
 482		map->br_blockcount -= elen;
 483	} else {
 484		/*
 485		 * There's an unshared extent (agbno, ebno - agbno)
 486		 * followed by shared extent at (ebno, elen).  Shrink
 487		 * the reported extent length to cover only the unshared
 488		 * extent and prepare to move up the start of map[i] to
 489		 * ebno, with the aim of (re)formatting the new map[i]
 490		 * the next time through the inner loop.
 491		 */
 492		*next_map = *map;
 493		nlen = ebno - agbno;
 494		out->bmv_length = XFS_FSB_TO_BB(mp, nlen);
 495		next_map->br_startblock += nlen;
 496		next_map->br_startoff += nlen;
 497		next_map->br_blockcount -= nlen;
 498		map->br_blockcount -= nlen;
 499	}
 500
 501	return 0;
 502}
 503
 504/*
 505 * Get inode's extents as described in bmv, and format for output.
 506 * Calls formatter to fill the user's buffer until all extents
 507 * are mapped, until the passed-in bmv->bmv_count slots have
 508 * been filled, or until the formatter short-circuits the loop,
 509 * if it is tracking filled-in extents on its own.
 510 */
 511int						/* error code */
 512xfs_getbmap(
 513	xfs_inode_t		*ip,
 514	struct getbmapx		*bmv,		/* user bmap structure */
 515	xfs_bmap_format_t	formatter,	/* format to user */
 516	void			*arg)		/* formatter arg */
 517{
 518	__int64_t		bmvend;		/* last block requested */
 519	int			error = 0;	/* return value */
 520	__int64_t		fixlen;		/* length for -1 case */
 521	int			i;		/* extent number */
 522	int			lock;		/* lock state */
 523	xfs_bmbt_irec_t		*map;		/* buffer for user's data */
 524	xfs_mount_t		*mp;		/* file system mount point */
 525	int			nex;		/* # of user extents can do */
 526	int			subnex;		/* # of bmapi's can do */
 527	int			nmap;		/* number of map entries */
 528	struct getbmapx		*out;		/* output structure */
 529	int			whichfork;	/* data or attr fork */
 530	int			prealloced;	/* this is a file with
 531						 * preallocated data space */
 532	int			iflags;		/* interface flags */
 533	int			bmapi_flags;	/* flags for xfs_bmapi */
 534	int			cur_ext = 0;
 535	struct xfs_bmbt_irec	inject_map;
 536
 537	mp = ip->i_mount;
 538	iflags = bmv->bmv_iflags;
 539
 540#ifndef DEBUG
 541	/* Only allow CoW fork queries if we're debugging. */
 542	if (iflags & BMV_IF_COWFORK)
 543		return -EINVAL;
 544#endif
 545	if ((iflags & BMV_IF_ATTRFORK) && (iflags & BMV_IF_COWFORK))
 546		return -EINVAL;
 547
 548	if (iflags & BMV_IF_ATTRFORK)
 549		whichfork = XFS_ATTR_FORK;
 550	else if (iflags & BMV_IF_COWFORK)
 551		whichfork = XFS_COW_FORK;
 552	else
 553		whichfork = XFS_DATA_FORK;
 554
 555	switch (whichfork) {
 556	case XFS_ATTR_FORK:
 557		if (XFS_IFORK_Q(ip)) {
 558			if (ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS &&
 559			    ip->i_d.di_aformat != XFS_DINODE_FMT_BTREE &&
 560			    ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)
 561				return -EINVAL;
 562		} else if (unlikely(
 563			   ip->i_d.di_aformat != 0 &&
 564			   ip->i_d.di_aformat != XFS_DINODE_FMT_EXTENTS)) {
 565			XFS_ERROR_REPORT("xfs_getbmap", XFS_ERRLEVEL_LOW,
 566					 ip->i_mount);
 567			return -EFSCORRUPTED;
 568		}
 569
 570		prealloced = 0;
 571		fixlen = 1LL << 32;
 572		break;
 573	case XFS_COW_FORK:
 574		if (ip->i_cformat != XFS_DINODE_FMT_EXTENTS)
 575			return -EINVAL;
 576
 577		if (xfs_get_cowextsz_hint(ip)) {
 578			prealloced = 1;
 579			fixlen = mp->m_super->s_maxbytes;
 580		} else {
 581			prealloced = 0;
 582			fixlen = XFS_ISIZE(ip);
 583		}
 584		break;
 585	default:
 586		if (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS &&
 587		    ip->i_d.di_format != XFS_DINODE_FMT_BTREE &&
 588		    ip->i_d.di_format != XFS_DINODE_FMT_LOCAL)
 589			return -EINVAL;
 590
 591		if (xfs_get_extsz_hint(ip) ||
 592		    ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC|XFS_DIFLAG_APPEND)){
 593			prealloced = 1;
 594			fixlen = mp->m_super->s_maxbytes;
 595		} else {
 596			prealloced = 0;
 597			fixlen = XFS_ISIZE(ip);
 598		}
 599		break;
 600	}
 601
 602	if (bmv->bmv_length == -1) {
 603		fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, fixlen));
 604		bmv->bmv_length =
 605			max_t(__int64_t, fixlen - bmv->bmv_offset, 0);
 606	} else if (bmv->bmv_length == 0) {
 607		bmv->bmv_entries = 0;
 608		return 0;
 609	} else if (bmv->bmv_length < 0) {
 610		return -EINVAL;
 611	}
 612
 613	nex = bmv->bmv_count - 1;
 614	if (nex <= 0)
 615		return -EINVAL;
 616	bmvend = bmv->bmv_offset + bmv->bmv_length;
 617
 618
 619	if (bmv->bmv_count > ULONG_MAX / sizeof(struct getbmapx))
 620		return -ENOMEM;
 621	out = kmem_zalloc_large(bmv->bmv_count * sizeof(struct getbmapx), 0);
 622	if (!out)
 623		return -ENOMEM;
 624
 625	xfs_ilock(ip, XFS_IOLOCK_SHARED);
 626	switch (whichfork) {
 627	case XFS_DATA_FORK:
 628		if (!(iflags & BMV_IF_DELALLOC) &&
 629		    (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size)) {
 630			error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
 631			if (error)
 632				goto out_unlock_iolock;
 633
 634			/*
 635			 * Even after flushing the inode, there can still be
 636			 * delalloc blocks on the inode beyond EOF due to
 637			 * speculative preallocation.  These are not removed
 638			 * until the release function is called or the inode
 639			 * is inactivated.  Hence we cannot assert here that
 640			 * ip->i_delayed_blks == 0.
 641			 */
 642		}
 643
 644		lock = xfs_ilock_data_map_shared(ip);
 645		break;
 646	case XFS_COW_FORK:
 647		lock = XFS_ILOCK_SHARED;
 648		xfs_ilock(ip, lock);
 649		break;
 650	case XFS_ATTR_FORK:
 651		lock = xfs_ilock_attr_map_shared(ip);
 652		break;
 653	}
 654
 655	/*
 656	 * Don't let nex be bigger than the number of extents
 657	 * we can have assuming alternating holes and real extents.
 658	 */
 659	if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
 660		nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
 661
 662	bmapi_flags = xfs_bmapi_aflag(whichfork);
 663	if (!(iflags & BMV_IF_PREALLOC))
 664		bmapi_flags |= XFS_BMAPI_IGSTATE;
 665
 666	/*
 667	 * Allocate enough space to handle "subnex" maps at a time.
 668	 */
 669	error = -ENOMEM;
 670	subnex = 16;
 671	map = kmem_alloc(subnex * sizeof(*map), KM_MAYFAIL | KM_NOFS);
 672	if (!map)
 673		goto out_unlock_ilock;
 674
 675	bmv->bmv_entries = 0;
 676
 677	if (XFS_IFORK_NEXTENTS(ip, whichfork) == 0 &&
 678	    (whichfork == XFS_ATTR_FORK || !(iflags & BMV_IF_DELALLOC))) {
 679		error = 0;
 680		goto out_free_map;
 681	}
 682
 683	do {
 684		nmap = (nex> subnex) ? subnex : nex;
 685		error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
 686				       XFS_BB_TO_FSB(mp, bmv->bmv_length),
 687				       map, &nmap, bmapi_flags);
 688		if (error)
 689			goto out_free_map;
 690		ASSERT(nmap <= subnex);
 691
 692		for (i = 0; i < nmap && bmv->bmv_length &&
 693				cur_ext < bmv->bmv_count - 1; i++) {
 694			out[cur_ext].bmv_oflags = 0;
 695			if (map[i].br_state == XFS_EXT_UNWRITTEN)
 696				out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
 697			else if (map[i].br_startblock == DELAYSTARTBLOCK)
 698				out[cur_ext].bmv_oflags |= BMV_OF_DELALLOC;
 699			out[cur_ext].bmv_offset =
 700				XFS_FSB_TO_BB(mp, map[i].br_startoff);
 701			out[cur_ext].bmv_length =
 702				XFS_FSB_TO_BB(mp, map[i].br_blockcount);
 703			out[cur_ext].bmv_unused1 = 0;
 704			out[cur_ext].bmv_unused2 = 0;
 705
 706			/*
 707			 * delayed allocation extents that start beyond EOF can
 708			 * occur due to speculative EOF allocation when the
 709			 * delalloc extent is larger than the largest freespace
 710			 * extent at conversion time. These extents cannot be
 711			 * converted by data writeback, so can exist here even
 712			 * if we are not supposed to be finding delalloc
 713			 * extents.
 714			 */
 715			if (map[i].br_startblock == DELAYSTARTBLOCK &&
 716			    map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
 717				ASSERT((iflags & BMV_IF_DELALLOC) != 0);
 718
 719                        if (map[i].br_startblock == HOLESTARTBLOCK &&
 720			    whichfork == XFS_ATTR_FORK) {
 721				/* came to the end of attribute fork */
 722				out[cur_ext].bmv_oflags |= BMV_OF_LAST;
 723				goto out_free_map;
 724			}
 725
 726			/* Is this a shared block? */
 727			error = xfs_getbmap_adjust_shared(ip, whichfork,
 728					&map[i], &out[cur_ext], &inject_map);
 729			if (error)
 730				goto out_free_map;
 731
 732			if (!xfs_getbmapx_fix_eof_hole(ip, whichfork,
 733					&out[cur_ext], prealloced, bmvend,
 734					map[i].br_startblock,
 735					inject_map.br_startblock != NULLFSBLOCK))
 736				goto out_free_map;
 737
 738			bmv->bmv_offset =
 739				out[cur_ext].bmv_offset +
 740				out[cur_ext].bmv_length;
 741			bmv->bmv_length =
 742				max_t(__int64_t, 0, bmvend - bmv->bmv_offset);
 743
 744			/*
 745			 * In case we don't want to return the hole,
 746			 * don't increase cur_ext so that we can reuse
 747			 * it in the next loop.
 748			 */
 749			if ((iflags & BMV_IF_NO_HOLES) &&
 750			    map[i].br_startblock == HOLESTARTBLOCK) {
 751				memset(&out[cur_ext], 0, sizeof(out[cur_ext]));
 752				continue;
 753			}
 754
 755			/*
 756			 * In order to report shared extents accurately,
 757			 * we report each distinct shared/unshared part
 758			 * of a single bmbt record using multiple bmap
 759			 * extents.  To make that happen, we iterate the
 760			 * same map array item multiple times, each
 761			 * time trimming out the subextent that we just
 762			 * reported.
 763			 *
 764			 * Because of this, we must check the out array
 765			 * index (cur_ext) directly against bmv_count-1
 766			 * to avoid overflows.
 767			 */
 768			if (inject_map.br_startblock != NULLFSBLOCK) {
 769				map[i] = inject_map;
 770				i--;
 771			}
 772			bmv->bmv_entries++;
 773			cur_ext++;
 774		}
 775	} while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
 776
 777 out_free_map:
 778	kmem_free(map);
 779 out_unlock_ilock:
 780	xfs_iunlock(ip, lock);
 781 out_unlock_iolock:
 782	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
 783
 784	for (i = 0; i < cur_ext; i++) {
 785		/* format results & advance arg */
 786		error = formatter(&arg, &out[i]);
 787		if (error)
 788			break;
 789	}
 790
 791	kmem_free(out);
 792	return error;
 793}
 794
 795/*
 796 * dead simple method of punching delalyed allocation blocks from a range in
 797 * the inode. Walks a block at a time so will be slow, but is only executed in
 798 * rare error cases so the overhead is not critical. This will always punch out
 799 * both the start and end blocks, even if the ranges only partially overlap
 800 * them, so it is up to the caller to ensure that partial blocks are not
 801 * passed in.
 802 */
 803int
 804xfs_bmap_punch_delalloc_range(
 805	struct xfs_inode	*ip,
 806	xfs_fileoff_t		start_fsb,
 807	xfs_fileoff_t		length)
 808{
 809	xfs_fileoff_t		remaining = length;
 810	int			error = 0;
 811
 812	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 813
 814	do {
 815		int		done;
 816		xfs_bmbt_irec_t	imap;
 817		int		nimaps = 1;
 818		xfs_fsblock_t	firstblock;
 819		struct xfs_defer_ops dfops;
 820
 821		/*
 822		 * Map the range first and check that it is a delalloc extent
 823		 * before trying to unmap the range. Otherwise we will be
 824		 * trying to remove a real extent (which requires a
 825		 * transaction) or a hole, which is probably a bad idea...
 826		 */
 827		error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
 828				       XFS_BMAPI_ENTIRE);
 829
 830		if (error) {
 831			/* something screwed, just bail */
 832			if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 833				xfs_alert(ip->i_mount,
 834			"Failed delalloc mapping lookup ino %lld fsb %lld.",
 835						ip->i_ino, start_fsb);
 836			}
 837			break;
 838		}
 839		if (!nimaps) {
 840			/* nothing there */
 841			goto next_block;
 842		}
 843		if (imap.br_startblock != DELAYSTARTBLOCK) {
 844			/* been converted, ignore */
 845			goto next_block;
 846		}
 847		WARN_ON(imap.br_blockcount == 0);
 848
 849		/*
 850		 * Note: while we initialise the firstblock/dfops pair, they
 851		 * should never be used because blocks should never be
 852		 * allocated or freed for a delalloc extent and hence we need
 853		 * don't cancel or finish them after the xfs_bunmapi() call.
 854		 */
 855		xfs_defer_init(&dfops, &firstblock);
 856		error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
 857					&dfops, &done);
 858		if (error)
 859			break;
 860
 861		ASSERT(!xfs_defer_has_unfinished_work(&dfops));
 862next_block:
 863		start_fsb++;
 864		remaining--;
 865	} while(remaining > 0);
 866
 867	return error;
 868}
 869
 870/*
 871 * Test whether it is appropriate to check an inode for and free post EOF
 872 * blocks. The 'force' parameter determines whether we should also consider
 873 * regular files that are marked preallocated or append-only.
 874 */
 875bool
 876xfs_can_free_eofblocks(struct xfs_inode *ip, bool force)
 877{
 878	/* prealloc/delalloc exists only on regular files */
 879	if (!S_ISREG(VFS_I(ip)->i_mode))
 880		return false;
 881
 882	/*
 883	 * Zero sized files with no cached pages and delalloc blocks will not
 884	 * have speculative prealloc/delalloc blocks to remove.
 885	 */
 886	if (VFS_I(ip)->i_size == 0 &&
 887	    VFS_I(ip)->i_mapping->nrpages == 0 &&
 888	    ip->i_delayed_blks == 0)
 889		return false;
 890
 891	/* If we haven't read in the extent list, then don't do it now. */
 892	if (!(ip->i_df.if_flags & XFS_IFEXTENTS))
 893		return false;
 894
 895	/*
 896	 * Do not free real preallocated or append-only files unless the file
 897	 * has delalloc blocks and we are forced to remove them.
 898	 */
 899	if (ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND))
 900		if (!force || ip->i_delayed_blks == 0)
 901			return false;
 902
 903	return true;
 904}
 905
 906/*
 907 * This is called by xfs_inactive to free any blocks beyond eof
 908 * when the link count isn't zero and by xfs_dm_punch_hole() when
 909 * punching a hole to EOF.
 910 */
 911int
 912xfs_free_eofblocks(
 913	struct xfs_inode	*ip)
 914{
 915	struct xfs_trans	*tp;
 916	int			error;
 917	xfs_fileoff_t		end_fsb;
 918	xfs_fileoff_t		last_fsb;
 919	xfs_filblks_t		map_len;
 920	int			nimaps;
 921	struct xfs_bmbt_irec	imap;
 922	struct xfs_mount	*mp = ip->i_mount;
 923
 924	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
 925
 926	/*
 927	 * Figure out if there are any blocks beyond the end
 928	 * of the file.  If not, then there is nothing to do.
 929	 */
 930	end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
 931	last_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
 932	if (last_fsb <= end_fsb)
 933		return 0;
 934	map_len = last_fsb - end_fsb;
 935
 936	nimaps = 1;
 937	xfs_ilock(ip, XFS_ILOCK_SHARED);
 938	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
 939	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 940
 941	/*
 942	 * If there are blocks after the end of file, truncate the file to its
 943	 * current size to free them up.
 944	 */
 945	if (!error && (nimaps != 0) &&
 946	    (imap.br_startblock != HOLESTARTBLOCK ||
 947	     ip->i_delayed_blks)) {
 948		/*
 949		 * Attach the dquots to the inode up front.
 950		 */
 951		error = xfs_qm_dqattach(ip, 0);
 952		if (error)
 953			return error;
 954
 955		/* wait on dio to ensure i_size has settled */
 956		inode_dio_wait(VFS_I(ip));
 957
 958		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
 959				&tp);
 960		if (error) {
 961			ASSERT(XFS_FORCED_SHUTDOWN(mp));
 962			return error;
 963		}
 964
 965		xfs_ilock(ip, XFS_ILOCK_EXCL);
 966		xfs_trans_ijoin(tp, ip, 0);
 967
 968		/*
 969		 * Do not update the on-disk file size.  If we update the
 970		 * on-disk file size and then the system crashes before the
 971		 * contents of the file are flushed to disk then the files
 972		 * may be full of holes (ie NULL files bug).
 973		 */
 974		error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
 975					      XFS_ISIZE(ip));
 976		if (error) {
 977			/*
 978			 * If we get an error at this point we simply don't
 979			 * bother truncating the file.
 980			 */
 981			xfs_trans_cancel(tp);
 982		} else {
 983			error = xfs_trans_commit(tp);
 984			if (!error)
 985				xfs_inode_clear_eofblocks_tag(ip);
 986		}
 987
 988		xfs_iunlock(ip, XFS_ILOCK_EXCL);
 989	}
 990	return error;
 991}
 992
 993int
 994xfs_alloc_file_space(
 995	struct xfs_inode	*ip,
 996	xfs_off_t		offset,
 997	xfs_off_t		len,
 998	int			alloc_type)
 999{
1000	xfs_mount_t		*mp = ip->i_mount;
1001	xfs_off_t		count;
1002	xfs_filblks_t		allocated_fsb;
1003	xfs_filblks_t		allocatesize_fsb;
1004	xfs_extlen_t		extsz, temp;
1005	xfs_fileoff_t		startoffset_fsb;
1006	xfs_fsblock_t		firstfsb;
1007	int			nimaps;
1008	int			quota_flag;
1009	int			rt;
1010	xfs_trans_t		*tp;
1011	xfs_bmbt_irec_t		imaps[1], *imapp;
1012	struct xfs_defer_ops	dfops;
1013	uint			qblocks, resblks, resrtextents;
1014	int			error;
1015
1016	trace_xfs_alloc_file_space(ip);
1017
1018	if (XFS_FORCED_SHUTDOWN(mp))
1019		return -EIO;
1020
1021	error = xfs_qm_dqattach(ip, 0);
1022	if (error)
1023		return error;
1024
1025	if (len <= 0)
1026		return -EINVAL;
1027
1028	rt = XFS_IS_REALTIME_INODE(ip);
1029	extsz = xfs_get_extsz_hint(ip);
1030
1031	count = len;
1032	imapp = &imaps[0];
1033	nimaps = 1;
1034	startoffset_fsb	= XFS_B_TO_FSBT(mp, offset);
1035	allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1036
1037	/*
1038	 * Allocate file space until done or until there is an error
1039	 */
1040	while (allocatesize_fsb && !error) {
1041		xfs_fileoff_t	s, e;
1042
1043		/*
1044		 * Determine space reservations for data/realtime.
1045		 */
1046		if (unlikely(extsz)) {
1047			s = startoffset_fsb;
1048			do_div(s, extsz);
1049			s *= extsz;
1050			e = startoffset_fsb + allocatesize_fsb;
1051			if ((temp = do_mod(startoffset_fsb, extsz)))
1052				e += temp;
1053			if ((temp = do_mod(e, extsz)))
1054				e += extsz - temp;
1055		} else {
1056			s = 0;
1057			e = allocatesize_fsb;
1058		}
1059
1060		/*
1061		 * The transaction reservation is limited to a 32-bit block
1062		 * count, hence we need to limit the number of blocks we are
1063		 * trying to reserve to avoid an overflow. We can't allocate
1064		 * more than @nimaps extents, and an extent is limited on disk
1065		 * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1066		 */
1067		resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1068		if (unlikely(rt)) {
1069			resrtextents = qblocks = resblks;
1070			resrtextents /= mp->m_sb.sb_rextsize;
1071			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1072			quota_flag = XFS_QMOPT_RES_RTBLKS;
1073		} else {
1074			resrtextents = 0;
1075			resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1076			quota_flag = XFS_QMOPT_RES_REGBLKS;
1077		}
1078
1079		/*
1080		 * Allocate and setup the transaction.
1081		 */
1082		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks,
1083				resrtextents, 0, &tp);
1084
1085		/*
1086		 * Check for running out of space
1087		 */
1088		if (error) {
1089			/*
1090			 * Free the transaction structure.
1091			 */
1092			ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1093			break;
1094		}
1095		xfs_ilock(ip, XFS_ILOCK_EXCL);
1096		error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1097						      0, quota_flag);
1098		if (error)
1099			goto error1;
1100
1101		xfs_trans_ijoin(tp, ip, 0);
1102
1103		xfs_defer_init(&dfops, &firstfsb);
1104		error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1105					allocatesize_fsb, alloc_type, &firstfsb,
1106					resblks, imapp, &nimaps, &dfops);
1107		if (error)
1108			goto error0;
1109
1110		/*
1111		 * Complete the transaction
1112		 */
1113		error = xfs_defer_finish(&tp, &dfops, NULL);
1114		if (error)
1115			goto error0;
1116
1117		error = xfs_trans_commit(tp);
1118		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1119		if (error)
1120			break;
1121
1122		allocated_fsb = imapp->br_blockcount;
1123
1124		if (nimaps == 0) {
1125			error = -ENOSPC;
1126			break;
1127		}
1128
1129		startoffset_fsb += allocated_fsb;
1130		allocatesize_fsb -= allocated_fsb;
1131	}
1132
1133	return error;
1134
1135error0:	/* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1136	xfs_defer_cancel(&dfops);
1137	xfs_trans_unreserve_quota_nblks(tp, ip, (long)qblocks, 0, quota_flag);
1138
1139error1:	/* Just cancel transaction */
1140	xfs_trans_cancel(tp);
1141	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1142	return error;
1143}
1144
1145static int
1146xfs_unmap_extent(
1147	struct xfs_inode	*ip,
1148	xfs_fileoff_t		startoffset_fsb,
1149	xfs_filblks_t		len_fsb,
1150	int			*done)
1151{
1152	struct xfs_mount	*mp = ip->i_mount;
1153	struct xfs_trans	*tp;
1154	struct xfs_defer_ops	dfops;
1155	xfs_fsblock_t		firstfsb;
1156	uint			resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1157	int			error;
1158
1159	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0, &tp);
1160	if (error) {
1161		ASSERT(error == -ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1162		return error;
1163	}
1164
1165	xfs_ilock(ip, XFS_ILOCK_EXCL);
1166	error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot, ip->i_gdquot,
1167			ip->i_pdquot, resblks, 0, XFS_QMOPT_RES_REGBLKS);
1168	if (error)
1169		goto out_trans_cancel;
1170
1171	xfs_trans_ijoin(tp, ip, 0);
1172
1173	xfs_defer_init(&dfops, &firstfsb);
1174	error = xfs_bunmapi(tp, ip, startoffset_fsb, len_fsb, 0, 2, &firstfsb,
1175			&dfops, done);
1176	if (error)
1177		goto out_bmap_cancel;
1178
1179	error = xfs_defer_finish(&tp, &dfops, ip);
1180	if (error)
1181		goto out_bmap_cancel;
1182
1183	error = xfs_trans_commit(tp);
1184out_unlock:
1185	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1186	return error;
1187
1188out_bmap_cancel:
1189	xfs_defer_cancel(&dfops);
1190out_trans_cancel:
1191	xfs_trans_cancel(tp);
1192	goto out_unlock;
1193}
1194
1195static int
1196xfs_adjust_extent_unmap_boundaries(
1197	struct xfs_inode	*ip,
1198	xfs_fileoff_t		*startoffset_fsb,
1199	xfs_fileoff_t		*endoffset_fsb)
1200{
1201	struct xfs_mount	*mp = ip->i_mount;
1202	struct xfs_bmbt_irec	imap;
1203	int			nimap, error;
1204	xfs_extlen_t		mod = 0;
1205
1206	nimap = 1;
1207	error = xfs_bmapi_read(ip, *startoffset_fsb, 1, &imap, &nimap, 0);
1208	if (error)
1209		return error;
1210
1211	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1212		xfs_daddr_t	block;
1213
1214		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1215		block = imap.br_startblock;
1216		mod = do_div(block, mp->m_sb.sb_rextsize);
1217		if (mod)
1218			*startoffset_fsb += mp->m_sb.sb_rextsize - mod;
1219	}
1220
1221	nimap = 1;
1222	error = xfs_bmapi_read(ip, *endoffset_fsb - 1, 1, &imap, &nimap, 0);
1223	if (error)
1224		return error;
1225
1226	if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
1227		ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1228		mod++;
1229		if (mod && mod != mp->m_sb.sb_rextsize)
1230			*endoffset_fsb -= mod;
1231	}
1232
1233	return 0;
1234}
1235
1236static int
1237xfs_flush_unmap_range(
1238	struct xfs_inode	*ip,
1239	xfs_off_t		offset,
1240	xfs_off_t		len)
1241{
1242	struct xfs_mount	*mp = ip->i_mount;
1243	struct inode		*inode = VFS_I(ip);
1244	xfs_off_t		rounding, start, end;
1245	int			error;
1246
1247	/* wait for the completion of any pending DIOs */
1248	inode_dio_wait(inode);
1249
1250	rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_SIZE);
1251	start = round_down(offset, rounding);
1252	end = round_up(offset + len, rounding) - 1;
1253
1254	error = filemap_write_and_wait_range(inode->i_mapping, start, end);
1255	if (error)
1256		return error;
1257	truncate_pagecache_range(inode, start, end);
1258	return 0;
1259}
1260
1261int
1262xfs_free_file_space(
1263	struct xfs_inode	*ip,
1264	xfs_off_t		offset,
1265	xfs_off_t		len)
1266{
1267	struct xfs_mount	*mp = ip->i_mount;
1268	xfs_fileoff_t		startoffset_fsb;
1269	xfs_fileoff_t		endoffset_fsb;
1270	int			done = 0, error;
1271
1272	trace_xfs_free_file_space(ip);
1273
1274	error = xfs_qm_dqattach(ip, 0);
1275	if (error)
1276		return error;
1277
1278	if (len <= 0)	/* if nothing being freed */
1279		return 0;
1280
1281	error = xfs_flush_unmap_range(ip, offset, len);
1282	if (error)
1283		return error;
1284
1285	startoffset_fsb = XFS_B_TO_FSB(mp, offset);
1286	endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
1287
1288	/*
1289	 * Need to zero the stuff we're not freeing, on disk.  If it's a RT file
1290	 * and we can't use unwritten extents then we actually need to ensure
1291	 * to zero the whole extent, otherwise we just need to take of block
1292	 * boundaries, and xfs_bunmapi will handle the rest.
1293	 */
1294	if (XFS_IS_REALTIME_INODE(ip) &&
1295	    !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
1296		error = xfs_adjust_extent_unmap_boundaries(ip, &startoffset_fsb,
1297				&endoffset_fsb);
1298		if (error)
1299			return error;
1300	}
1301
1302	if (endoffset_fsb > startoffset_fsb) {
1303		while (!done) {
1304			error = xfs_unmap_extent(ip, startoffset_fsb,
1305					endoffset_fsb - startoffset_fsb, &done);
1306			if (error)
1307				return error;
1308		}
1309	}
1310
1311	/*
1312	 * Now that we've unmap all full blocks we'll have to zero out any
1313	 * partial block at the beginning and/or end.  xfs_zero_range is
1314	 * smart enough to skip any holes, including those we just created,
1315	 * but we must take care not to zero beyond EOF and enlarge i_size.
1316	 */
1317
1318	if (offset >= XFS_ISIZE(ip))
1319		return 0;
1320
1321	if (offset + len > XFS_ISIZE(ip))
1322		len = XFS_ISIZE(ip) - offset;
1323
1324	return xfs_zero_range(ip, offset, len, NULL);
1325}
1326
1327/*
1328 * Preallocate and zero a range of a file. This mechanism has the allocation
1329 * semantics of fallocate and in addition converts data in the range to zeroes.
1330 */
1331int
1332xfs_zero_file_space(
1333	struct xfs_inode	*ip,
1334	xfs_off_t		offset,
1335	xfs_off_t		len)
1336{
1337	struct xfs_mount	*mp = ip->i_mount;
1338	uint			blksize;
1339	int			error;
1340
1341	trace_xfs_zero_file_space(ip);
1342
1343	blksize = 1 << mp->m_sb.sb_blocklog;
1344
1345	/*
1346	 * Punch a hole and prealloc the range. We use hole punch rather than
1347	 * unwritten extent conversion for two reasons:
1348	 *
1349	 * 1.) Hole punch handles partial block zeroing for us.
1350	 *
1351	 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1352	 * by virtue of the hole punch.
1353	 */
1354	error = xfs_free_file_space(ip, offset, len);
1355	if (error)
1356		goto out;
1357
1358	error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1359				     round_up(offset + len, blksize) -
1360				     round_down(offset, blksize),
1361				     XFS_BMAPI_PREALLOC);
1362out:
1363	return error;
1364
1365}
1366
1367/*
1368 * @next_fsb will keep track of the extent currently undergoing shift.
1369 * @stop_fsb will keep track of the extent at which we have to stop.
1370 * If we are shifting left, we will start with block (offset + len) and
1371 * shift each extent till last extent.
1372 * If we are shifting right, we will start with last extent inside file space
1373 * and continue until we reach the block corresponding to offset.
1374 */
1375static int
1376xfs_shift_file_space(
1377	struct xfs_inode        *ip,
1378	xfs_off_t               offset,
1379	xfs_off_t               len,
1380	enum shift_direction	direction)
1381{
1382	int			done = 0;
1383	struct xfs_mount	*mp = ip->i_mount;
1384	struct xfs_trans	*tp;
1385	int			error;
1386	struct xfs_defer_ops	dfops;
1387	xfs_fsblock_t		first_block;
1388	xfs_fileoff_t		stop_fsb;
1389	xfs_fileoff_t		next_fsb;
1390	xfs_fileoff_t		shift_fsb;
1391	uint			resblks;
1392
1393	ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
1394
1395	if (direction == SHIFT_LEFT) {
1396		/*
1397		 * Reserve blocks to cover potential extent merges after left
1398		 * shift operations.
1399		 */
1400		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1401		next_fsb = XFS_B_TO_FSB(mp, offset + len);
1402		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
1403	} else {
1404		/*
1405		 * If right shift, delegate the work of initialization of
1406		 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
1407		 */
1408		resblks = 0;
1409		next_fsb = NULLFSBLOCK;
1410		stop_fsb = XFS_B_TO_FSB(mp, offset);
1411	}
1412
1413	shift_fsb = XFS_B_TO_FSB(mp, len);
1414
1415	/*
1416	 * Trim eofblocks to avoid shifting uninitialized post-eof preallocation
1417	 * into the accessible region of the file.
1418	 */
1419	if (xfs_can_free_eofblocks(ip, true)) {
1420		error = xfs_free_eofblocks(ip);
1421		if (error)
1422			return error;
1423	}
1424
1425	/*
1426	 * Writeback and invalidate cache for the remainder of the file as we're
1427	 * about to shift down every extent from offset to EOF.
1428	 */
1429	error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1430					     offset, -1);
1431	if (error)
1432		return error;
1433	error = invalidate_inode_pages2_range(VFS_I(ip)->i_mapping,
1434					offset >> PAGE_SHIFT, -1);
1435	if (error)
1436		return error;
1437
1438	/*
1439	 * The extent shiting code works on extent granularity. So, if
1440	 * stop_fsb is not the starting block of extent, we need to split
1441	 * the extent at stop_fsb.
1442	 */
1443	if (direction == SHIFT_RIGHT) {
1444		error = xfs_bmap_split_extent(ip, stop_fsb);
1445		if (error)
1446			return error;
1447	}
1448
1449	while (!error && !done) {
1450		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
1451					&tp);
1452		if (error)
1453			break;
1454
1455		xfs_ilock(ip, XFS_ILOCK_EXCL);
1456		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
1457				ip->i_gdquot, ip->i_pdquot, resblks, 0,
1458				XFS_QMOPT_RES_REGBLKS);
1459		if (error)
1460			goto out_trans_cancel;
1461
1462		xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1463
1464		xfs_defer_init(&dfops, &first_block);
1465
1466		/*
1467		 * We are using the write transaction in which max 2 bmbt
1468		 * updates are allowed
1469		 */
1470		error = xfs_bmap_shift_extents(tp, ip, &next_fsb, shift_fsb,
1471				&done, stop_fsb, &first_block, &dfops,
1472				direction, XFS_BMAP_MAX_SHIFT_EXTENTS);
1473		if (error)
1474			goto out_bmap_cancel;
1475
1476		error = xfs_defer_finish(&tp, &dfops, NULL);
1477		if (error)
1478			goto out_bmap_cancel;
1479
1480		error = xfs_trans_commit(tp);
1481	}
1482
1483	return error;
1484
1485out_bmap_cancel:
1486	xfs_defer_cancel(&dfops);
1487out_trans_cancel:
1488	xfs_trans_cancel(tp);
1489	return error;
1490}
1491
1492/*
1493 * xfs_collapse_file_space()
1494 *	This routine frees disk space and shift extent for the given file.
1495 *	The first thing we do is to free data blocks in the specified range
1496 *	by calling xfs_free_file_space(). It would also sync dirty data
1497 *	and invalidate page cache over the region on which collapse range
1498 *	is working. And Shift extent records to the left to cover a hole.
1499 * RETURNS:
1500 *	0 on success
1501 *	errno on error
1502 *
1503 */
1504int
1505xfs_collapse_file_space(
1506	struct xfs_inode	*ip,
1507	xfs_off_t		offset,
1508	xfs_off_t		len)
1509{
1510	int error;
1511
1512	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1513	trace_xfs_collapse_file_space(ip);
1514
1515	error = xfs_free_file_space(ip, offset, len);
1516	if (error)
1517		return error;
1518
1519	return xfs_shift_file_space(ip, offset, len, SHIFT_LEFT);
1520}
1521
1522/*
1523 * xfs_insert_file_space()
1524 *	This routine create hole space by shifting extents for the given file.
1525 *	The first thing we do is to sync dirty data and invalidate page cache
1526 *	over the region on which insert range is working. And split an extent
1527 *	to two extents at given offset by calling xfs_bmap_split_extent.
1528 *	And shift all extent records which are laying between [offset,
1529 *	last allocated extent] to the right to reserve hole range.
1530 * RETURNS:
1531 *	0 on success
1532 *	errno on error
1533 */
1534int
1535xfs_insert_file_space(
1536	struct xfs_inode	*ip,
1537	loff_t			offset,
1538	loff_t			len)
1539{
1540	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1541	trace_xfs_insert_file_space(ip);
1542
1543	return xfs_shift_file_space(ip, offset, len, SHIFT_RIGHT);
1544}
1545
1546/*
1547 * We need to check that the format of the data fork in the temporary inode is
1548 * valid for the target inode before doing the swap. This is not a problem with
1549 * attr1 because of the fixed fork offset, but attr2 has a dynamically sized
1550 * data fork depending on the space the attribute fork is taking so we can get
1551 * invalid formats on the target inode.
1552 *
1553 * E.g. target has space for 7 extents in extent format, temp inode only has
1554 * space for 6.  If we defragment down to 7 extents, then the tmp format is a
1555 * btree, but when swapped it needs to be in extent format. Hence we can't just
1556 * blindly swap data forks on attr2 filesystems.
1557 *
1558 * Note that we check the swap in both directions so that we don't end up with
1559 * a corrupt temporary inode, either.
1560 *
1561 * Note that fixing the way xfs_fsr sets up the attribute fork in the source
1562 * inode will prevent this situation from occurring, so all we do here is
1563 * reject and log the attempt. basically we are putting the responsibility on
1564 * userspace to get this right.
1565 */
1566static int
1567xfs_swap_extents_check_format(
1568	struct xfs_inode	*ip,	/* target inode */
1569	struct xfs_inode	*tip)	/* tmp inode */
1570{
1571
1572	/* Should never get a local format */
1573	if (ip->i_d.di_format == XFS_DINODE_FMT_LOCAL ||
1574	    tip->i_d.di_format == XFS_DINODE_FMT_LOCAL)
1575		return -EINVAL;
1576
1577	/*
1578	 * if the target inode has less extents that then temporary inode then
1579	 * why did userspace call us?
1580	 */
1581	if (ip->i_d.di_nextents < tip->i_d.di_nextents)
1582		return -EINVAL;
1583
1584	/*
1585	 * If we have to use the (expensive) rmap swap method, we can
1586	 * handle any number of extents and any format.
1587	 */
1588	if (xfs_sb_version_hasrmapbt(&ip->i_mount->m_sb))
1589		return 0;
1590
1591	/*
1592	 * if the target inode is in extent form and the temp inode is in btree
1593	 * form then we will end up with the target inode in the wrong format
1594	 * as we already know there are less extents in the temp inode.
1595	 */
1596	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1597	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE)
1598		return -EINVAL;
1599
1600	/* Check temp in extent form to max in target */
1601	if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1602	    XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
1603			XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1604		return -EINVAL;
1605
1606	/* Check target in extent form to max in temp */
1607	if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
1608	    XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
1609			XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1610		return -EINVAL;
1611
1612	/*
1613	 * If we are in a btree format, check that the temp root block will fit
1614	 * in the target and that it has enough extents to be in btree format
1615	 * in the target.
1616	 *
1617	 * Note that we have to be careful to allow btree->extent conversions
1618	 * (a common defrag case) which will occur when the temp inode is in
1619	 * extent format...
1620	 */
1621	if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1622		if (XFS_IFORK_BOFF(ip) &&
1623		    XFS_BMAP_BMDR_SPACE(tip->i_df.if_broot) > XFS_IFORK_BOFF(ip))
1624			return -EINVAL;
1625		if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
1626		    XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
1627			return -EINVAL;
1628	}
1629
1630	/* Reciprocal target->temp btree format checks */
1631	if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1632		if (XFS_IFORK_BOFF(tip) &&
1633		    XFS_BMAP_BMDR_SPACE(ip->i_df.if_broot) > XFS_IFORK_BOFF(tip))
1634			return -EINVAL;
1635		if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
1636		    XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
1637			return -EINVAL;
1638	}
1639
1640	return 0;
1641}
1642
1643static int
1644xfs_swap_extent_flush(
1645	struct xfs_inode	*ip)
1646{
1647	int	error;
1648
1649	error = filemap_write_and_wait(VFS_I(ip)->i_mapping);
1650	if (error)
1651		return error;
1652	truncate_pagecache_range(VFS_I(ip), 0, -1);
1653
1654	/* Verify O_DIRECT for ftmp */
1655	if (VFS_I(ip)->i_mapping->nrpages)
1656		return -EINVAL;
1657	return 0;
1658}
1659
1660/*
1661 * Move extents from one file to another, when rmap is enabled.
1662 */
1663STATIC int
1664xfs_swap_extent_rmap(
1665	struct xfs_trans		**tpp,
1666	struct xfs_inode		*ip,
1667	struct xfs_inode		*tip)
1668{
1669	struct xfs_bmbt_irec		irec;
1670	struct xfs_bmbt_irec		uirec;
1671	struct xfs_bmbt_irec		tirec;
1672	xfs_fileoff_t			offset_fsb;
1673	xfs_fileoff_t			end_fsb;
1674	xfs_filblks_t			count_fsb;
1675	xfs_fsblock_t			firstfsb;
1676	struct xfs_defer_ops		dfops;
1677	int				error;
1678	xfs_filblks_t			ilen;
1679	xfs_filblks_t			rlen;
1680	int				nimaps;
1681	__uint64_t			tip_flags2;
1682
1683	/*
1684	 * If the source file has shared blocks, we must flag the donor
1685	 * file as having shared blocks so that we get the shared-block
1686	 * rmap functions when we go to fix up the rmaps.  The flags
1687	 * will be switch for reals later.
1688	 */
1689	tip_flags2 = tip->i_d.di_flags2;
1690	if (ip->i_d.di_flags2 & XFS_DIFLAG2_REFLINK)
1691		tip->i_d.di_flags2 |= XFS_DIFLAG2_REFLINK;
1692
1693	offset_fsb = 0;
1694	end_fsb = XFS_B_TO_FSB(ip->i_mount, i_size_read(VFS_I(ip)));
1695	count_fsb = (xfs_filblks_t)(end_fsb - offset_fsb);
1696
1697	while (count_fsb) {
1698		/* Read extent from the donor file */
1699		nimaps = 1;
1700		error = xfs_bmapi_read(tip, offset_fsb, count_fsb, &tirec,
1701				&nimaps, 0);
1702		if (error)
1703			goto out;
1704		ASSERT(nimaps == 1);
1705		ASSERT(tirec.br_startblock != DELAYSTARTBLOCK);
1706
1707		trace_xfs_swap_extent_rmap_remap(tip, &tirec);
1708		ilen = tirec.br_blockcount;
1709
1710		/* Unmap the old blocks in the source file. */
1711		while (tirec.br_blockcount) {
1712			xfs_defer_init(&dfops, &firstfsb);
1713			trace_xfs_swap_extent_rmap_remap_piece(tip, &tirec);
1714
1715			/* Read extent from the source file */
1716			nimaps = 1;
1717			error = xfs_bmapi_read(ip, tirec.br_startoff,
1718					tirec.br_blockcount, &irec,
1719					&nimaps, 0);
1720			if (error)
1721				goto out_defer;
1722			ASSERT(nimaps == 1);
1723			ASSERT(tirec.br_startoff == irec.br_startoff);
1724			trace_xfs_swap_extent_rmap_remap_piece(ip, &irec);
1725
1726			/* Trim the extent. */
1727			uirec = tirec;
1728			uirec.br_blockcount = rlen = min_t(xfs_filblks_t,
1729					tirec.br_blockcount,
1730					irec.br_blockcount);
1731			trace_xfs_swap_extent_rmap_remap_piece(tip, &uirec);
1732
1733			/* Remove the mapping from the donor file. */
1734			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1735					tip, &uirec);
1736			if (error)
1737				goto out_defer;
1738
1739			/* Remove the mapping from the source file. */
1740			error = xfs_bmap_unmap_extent((*tpp)->t_mountp, &dfops,
1741					ip, &irec);
1742			if (error)
1743				goto out_defer;
1744
1745			/* Map the donor file's blocks into the source file. */
1746			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1747					ip, &uirec);
1748			if (error)
1749				goto out_defer;
1750
1751			/* Map the source file's blocks into the donor file. */
1752			error = xfs_bmap_map_extent((*tpp)->t_mountp, &dfops,
1753					tip, &irec);
1754			if (error)
1755				goto out_defer;
1756
1757			error = xfs_defer_finish(tpp, &dfops, ip);
1758			if (error)
1759				goto out_defer;
1760
1761			tirec.br_startoff += rlen;
1762			if (tirec.br_startblock != HOLESTARTBLOCK &&
1763			    tirec.br_startblock != DELAYSTARTBLOCK)
1764				tirec.br_startblock += rlen;
1765			tirec.br_blockcount -= rlen;
1766		}
1767
1768		/* Roll on... */
1769		count_fsb -= ilen;
1770		offset_fsb += ilen;
1771	}
1772
1773	tip->i_d.di_flags2 = tip_flags2;
1774	return 0;
1775
1776out_defer:
1777	xfs_defer_cancel(&dfops);
1778out:
1779	trace_xfs_swap_extent_rmap_error(ip, error, _RET_IP_);
1780	tip->i_d.di_flags2 = tip_flags2;
1781	return error;
1782}
1783
1784/* Swap the extents of two files by swapping data forks. */
1785STATIC int
1786xfs_swap_extent_forks(
1787	struct xfs_trans	*tp,
1788	struct xfs_inode	*ip,
1789	struct xfs_inode	*tip,
1790	int			*src_log_flags,
1791	int			*target_log_flags)
1792{
1793	struct xfs_ifork	tempifp, *ifp, *tifp;
1794	int			aforkblks = 0;
1795	int			taforkblks = 0;
1796	xfs_extnum_t		nextents;
1797	__uint64_t		tmp;
1798	int			error;
1799
1800	/*
1801	 * Count the number of extended attribute blocks
1802	 */
1803	if ( ((XFS_IFORK_Q(ip) != 0) && (ip->i_d.di_anextents > 0)) &&
1804	     (ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1805		error = xfs_bmap_count_blocks(tp, ip, XFS_ATTR_FORK,
1806				&aforkblks);
1807		if (error)
1808			return error;
1809	}
1810	if ( ((XFS_IFORK_Q(tip) != 0) && (tip->i_d.di_anextents > 0)) &&
1811	     (tip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL)) {
1812		error = xfs_bmap_count_blocks(tp, tip, XFS_ATTR_FORK,
1813				&taforkblks);
1814		if (error)
1815			return error;
1816	}
1817
1818	/*
1819	 * Before we've swapped the forks, lets set the owners of the forks
1820	 * appropriately. We have to do this as we are demand paging the btree
1821	 * buffers, and so the validation done on read will expect the owner
1822	 * field to be correctly set. Once we change the owners, we can swap the
1823	 * inode forks.
1824	 */
1825	if (ip->i_d.di_version == 3 &&
1826	    ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1827		(*target_log_flags) |= XFS_ILOG_DOWNER;
1828		error = xfs_bmbt_change_owner(tp, ip, XFS_DATA_FORK,
1829					      tip->i_ino, NULL);
1830		if (error)
1831			return error;
1832	}
1833
1834	if (tip->i_d.di_version == 3 &&
1835	    tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
1836		(*src_log_flags) |= XFS_ILOG_DOWNER;
1837		error = xfs_bmbt_change_owner(tp, tip, XFS_DATA_FORK,
1838					      ip->i_ino, NULL);
1839		if (error)
1840			return error;
1841	}
1842
1843	/*
1844	 * Swap the data forks of the inodes
1845	 */
1846	ifp = &ip->i_df;
1847	tifp = &tip->i_df;
1848	tempifp = *ifp;		/* struct copy */
1849	*ifp = *tifp;		/* struct copy */
1850	*tifp = tempifp;	/* struct copy */
1851
1852	/*
1853	 * Fix the on-disk inode values
1854	 */
1855	tmp = (__uint64_t)ip->i_d.di_nblocks;
1856	ip->i_d.di_nblocks = tip->i_d.di_nblocks - taforkblks + aforkblks;
1857	tip->i_d.di_nblocks = tmp + taforkblks

Large files files are truncated, but you can click here to view the full file