PageRenderTime 100ms CodeModel.GetById 4ms app.highlight 76ms RepoModel.GetById 1ms app.codeStats 1ms

/fs/xfs/xfs_bmap.c

https://bitbucket.org/digetx/picasso-kernel
C | 6314 lines | 4572 code | 491 blank | 1251 comment | 1101 complexity | da7ec63f1de34212f526fcbac107a93d MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
   3 * All Rights Reserved.
   4 *
   5 * This program is free software; you can redistribute it and/or
   6 * modify it under the terms of the GNU General Public License as
   7 * published by the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it would be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, write the Free Software Foundation,
  16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  17 */
  18#include "xfs.h"
  19#include "xfs_fs.h"
  20#include "xfs_types.h"
  21#include "xfs_bit.h"
  22#include "xfs_log.h"
  23#include "xfs_inum.h"
  24#include "xfs_trans.h"
  25#include "xfs_sb.h"
  26#include "xfs_ag.h"
  27#include "xfs_dir2.h"
  28#include "xfs_da_btree.h"
  29#include "xfs_bmap_btree.h"
  30#include "xfs_alloc_btree.h"
  31#include "xfs_ialloc_btree.h"
  32#include "xfs_dinode.h"
  33#include "xfs_inode.h"
  34#include "xfs_btree.h"
  35#include "xfs_mount.h"
  36#include "xfs_itable.h"
  37#include "xfs_inode_item.h"
  38#include "xfs_extfree_item.h"
  39#include "xfs_alloc.h"
  40#include "xfs_bmap.h"
  41#include "xfs_rtalloc.h"
  42#include "xfs_error.h"
  43#include "xfs_attr_leaf.h"
  44#include "xfs_quota.h"
  45#include "xfs_trans_space.h"
  46#include "xfs_buf_item.h"
  47#include "xfs_filestream.h"
  48#include "xfs_vnodeops.h"
  49#include "xfs_trace.h"
  50
  51
  52kmem_zone_t		*xfs_bmap_free_item_zone;
  53
  54/*
  55 * Prototypes for internal bmap routines.
  56 */
  57
  58#ifdef DEBUG
  59STATIC void
  60xfs_bmap_check_leaf_extents(
  61	struct xfs_btree_cur	*cur,
  62	struct xfs_inode	*ip,
  63	int			whichfork);
  64#else
  65#define xfs_bmap_check_leaf_extents(cur, ip, whichfork)		do { } while (0)
  66#endif
  67
  68
  69/*
  70 * Called from xfs_bmap_add_attrfork to handle extents format files.
  71 */
  72STATIC int					/* error */
  73xfs_bmap_add_attrfork_extents(
  74	xfs_trans_t		*tp,		/* transaction pointer */
  75	xfs_inode_t		*ip,		/* incore inode pointer */
  76	xfs_fsblock_t		*firstblock,	/* first block allocated */
  77	xfs_bmap_free_t		*flist,		/* blocks to free at commit */
  78	int			*flags);	/* inode logging flags */
  79
  80/*
  81 * Called from xfs_bmap_add_attrfork to handle local format files.
  82 */
  83STATIC int					/* error */
  84xfs_bmap_add_attrfork_local(
  85	xfs_trans_t		*tp,		/* transaction pointer */
  86	xfs_inode_t		*ip,		/* incore inode pointer */
  87	xfs_fsblock_t		*firstblock,	/* first block allocated */
  88	xfs_bmap_free_t		*flist,		/* blocks to free at commit */
  89	int			*flags);	/* inode logging flags */
  90
  91/*
  92 * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
  93 * It figures out where to ask the underlying allocator to put the new extent.
  94 */
  95STATIC int				/* error */
  96xfs_bmap_alloc(
  97	xfs_bmalloca_t		*ap);	/* bmap alloc argument struct */
  98
  99/*
 100 * Transform a btree format file with only one leaf node, where the
 101 * extents list will fit in the inode, into an extents format file.
 102 * Since the file extents are already in-core, all we have to do is
 103 * give up the space for the btree root and pitch the leaf block.
 104 */
 105STATIC int				/* error */
 106xfs_bmap_btree_to_extents(
 107	xfs_trans_t		*tp,	/* transaction pointer */
 108	xfs_inode_t		*ip,	/* incore inode pointer */
 109	xfs_btree_cur_t		*cur,	/* btree cursor */
 110	int			*logflagsp, /* inode logging flags */
 111	int			whichfork); /* data or attr fork */
 112
 113/*
 114 * Remove the entry "free" from the free item list.  Prev points to the
 115 * previous entry, unless "free" is the head of the list.
 116 */
 117STATIC void
 118xfs_bmap_del_free(
 119	xfs_bmap_free_t		*flist,	/* free item list header */
 120	xfs_bmap_free_item_t	*prev,	/* previous item on list, if any */
 121	xfs_bmap_free_item_t	*free);	/* list item to be freed */
 122
 123/*
 124 * Convert an extents-format file into a btree-format file.
 125 * The new file will have a root block (in the inode) and a single child block.
 126 */
 127STATIC int					/* error */
 128xfs_bmap_extents_to_btree(
 129	xfs_trans_t		*tp,		/* transaction pointer */
 130	xfs_inode_t		*ip,		/* incore inode pointer */
 131	xfs_fsblock_t		*firstblock,	/* first-block-allocated */
 132	xfs_bmap_free_t		*flist,		/* blocks freed in xaction */
 133	xfs_btree_cur_t		**curp,		/* cursor returned to caller */
 134	int			wasdel,		/* converting a delayed alloc */
 135	int			*logflagsp,	/* inode logging flags */
 136	int			whichfork);	/* data or attr fork */
 137
 138/*
 139 * Convert a local file to an extents file.
 140 * This code is sort of bogus, since the file data needs to get
 141 * logged so it won't be lost.  The bmap-level manipulations are ok, though.
 142 */
 143STATIC int				/* error */
 144xfs_bmap_local_to_extents(
 145	xfs_trans_t	*tp,		/* transaction pointer */
 146	xfs_inode_t	*ip,		/* incore inode pointer */
 147	xfs_fsblock_t	*firstblock,	/* first block allocated in xaction */
 148	xfs_extlen_t	total,		/* total blocks needed by transaction */
 149	int		*logflagsp,	/* inode logging flags */
 150	int		whichfork,	/* data or attr fork */
 151	void		(*init_fn)(struct xfs_buf *bp,
 152				   struct xfs_inode *ip,
 153				   struct xfs_ifork *ifp));
 154
 155/*
 156 * Search the extents list for the inode, for the extent containing bno.
 157 * If bno lies in a hole, point to the next entry.  If bno lies past eof,
 158 * *eofp will be set, and *prevp will contain the last entry (null if none).
 159 * Else, *lastxp will be set to the index of the found
 160 * entry; *gotp will contain the entry.
 161 */
 162STATIC xfs_bmbt_rec_host_t *		/* pointer to found extent entry */
 163xfs_bmap_search_extents(
 164	xfs_inode_t	*ip,		/* incore inode pointer */
 165	xfs_fileoff_t	bno,		/* block number searched for */
 166	int		whichfork,	/* data or attr fork */
 167	int		*eofp,		/* out: end of file found */
 168	xfs_extnum_t	*lastxp,	/* out: last extent index */
 169	xfs_bmbt_irec_t	*gotp,		/* out: extent entry found */
 170	xfs_bmbt_irec_t	*prevp);	/* out: previous extent entry found */
 171
 172/*
 173 * Compute the worst-case number of indirect blocks that will be used
 174 * for ip's delayed extent of length "len".
 175 */
 176STATIC xfs_filblks_t
 177xfs_bmap_worst_indlen(
 178	xfs_inode_t		*ip,	/* incore inode pointer */
 179	xfs_filblks_t		len);	/* delayed extent length */
 180
 181#ifdef DEBUG
 182/*
 183 * Perform various validation checks on the values being returned
 184 * from xfs_bmapi().
 185 */
 186STATIC void
 187xfs_bmap_validate_ret(
 188	xfs_fileoff_t		bno,
 189	xfs_filblks_t		len,
 190	int			flags,
 191	xfs_bmbt_irec_t		*mval,
 192	int			nmap,
 193	int			ret_nmap);
 194#else
 195#define	xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
 196#endif /* DEBUG */
 197
 198STATIC int
 199xfs_bmap_count_tree(
 200	xfs_mount_t     *mp,
 201	xfs_trans_t     *tp,
 202	xfs_ifork_t	*ifp,
 203	xfs_fsblock_t   blockno,
 204	int             levelin,
 205	int		*count);
 206
 207STATIC void
 208xfs_bmap_count_leaves(
 209	xfs_ifork_t		*ifp,
 210	xfs_extnum_t		idx,
 211	int			numrecs,
 212	int			*count);
 213
 214STATIC void
 215xfs_bmap_disk_count_leaves(
 216	struct xfs_mount	*mp,
 217	struct xfs_btree_block	*block,
 218	int			numrecs,
 219	int			*count);
 220
 221/*
 222 * Bmap internal routines.
 223 */
 224
 225STATIC int				/* error */
 226xfs_bmbt_lookup_eq(
 227	struct xfs_btree_cur	*cur,
 228	xfs_fileoff_t		off,
 229	xfs_fsblock_t		bno,
 230	xfs_filblks_t		len,
 231	int			*stat)	/* success/failure */
 232{
 233	cur->bc_rec.b.br_startoff = off;
 234	cur->bc_rec.b.br_startblock = bno;
 235	cur->bc_rec.b.br_blockcount = len;
 236	return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
 237}
 238
 239STATIC int				/* error */
 240xfs_bmbt_lookup_ge(
 241	struct xfs_btree_cur	*cur,
 242	xfs_fileoff_t		off,
 243	xfs_fsblock_t		bno,
 244	xfs_filblks_t		len,
 245	int			*stat)	/* success/failure */
 246{
 247	cur->bc_rec.b.br_startoff = off;
 248	cur->bc_rec.b.br_startblock = bno;
 249	cur->bc_rec.b.br_blockcount = len;
 250	return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
 251}
 252
 253/*
 254 * Check if the inode needs to be converted to btree format.
 255 */
 256static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
 257{
 258	return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
 259		XFS_IFORK_NEXTENTS(ip, whichfork) >
 260			XFS_IFORK_MAXEXT(ip, whichfork);
 261}
 262
 263/*
 264 * Check if the inode should be converted to extent format.
 265 */
 266static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
 267{
 268	return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
 269		XFS_IFORK_NEXTENTS(ip, whichfork) <=
 270			XFS_IFORK_MAXEXT(ip, whichfork);
 271}
 272
 273/*
 274 * Update the record referred to by cur to the value given
 275 * by [off, bno, len, state].
 276 * This either works (return 0) or gets an EFSCORRUPTED error.
 277 */
 278STATIC int
 279xfs_bmbt_update(
 280	struct xfs_btree_cur	*cur,
 281	xfs_fileoff_t		off,
 282	xfs_fsblock_t		bno,
 283	xfs_filblks_t		len,
 284	xfs_exntst_t		state)
 285{
 286	union xfs_btree_rec	rec;
 287
 288	xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
 289	return xfs_btree_update(cur, &rec);
 290}
 291
 292/*
 293 * Called from xfs_bmap_add_attrfork to handle btree format files.
 294 */
 295STATIC int					/* error */
 296xfs_bmap_add_attrfork_btree(
 297	xfs_trans_t		*tp,		/* transaction pointer */
 298	xfs_inode_t		*ip,		/* incore inode pointer */
 299	xfs_fsblock_t		*firstblock,	/* first block allocated */
 300	xfs_bmap_free_t		*flist,		/* blocks to free at commit */
 301	int			*flags)		/* inode logging flags */
 302{
 303	xfs_btree_cur_t		*cur;		/* btree cursor */
 304	int			error;		/* error return value */
 305	xfs_mount_t		*mp;		/* file system mount struct */
 306	int			stat;		/* newroot status */
 307
 308	mp = ip->i_mount;
 309	if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
 310		*flags |= XFS_ILOG_DBROOT;
 311	else {
 312		cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
 313		cur->bc_private.b.flist = flist;
 314		cur->bc_private.b.firstblock = *firstblock;
 315		if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
 316			goto error0;
 317		/* must be at least one entry */
 318		XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
 319		if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
 320			goto error0;
 321		if (stat == 0) {
 322			xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 323			return XFS_ERROR(ENOSPC);
 324		}
 325		*firstblock = cur->bc_private.b.firstblock;
 326		cur->bc_private.b.allocated = 0;
 327		xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
 328	}
 329	return 0;
 330error0:
 331	xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
 332	return error;
 333}
 334
 335/*
 336 * Called from xfs_bmap_add_attrfork to handle extents format files.
 337 */
 338STATIC int					/* error */
 339xfs_bmap_add_attrfork_extents(
 340	xfs_trans_t		*tp,		/* transaction pointer */
 341	xfs_inode_t		*ip,		/* incore inode pointer */
 342	xfs_fsblock_t		*firstblock,	/* first block allocated */
 343	xfs_bmap_free_t		*flist,		/* blocks to free at commit */
 344	int			*flags)		/* inode logging flags */
 345{
 346	xfs_btree_cur_t		*cur;		/* bmap btree cursor */
 347	int			error;		/* error return value */
 348
 349	if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
 350		return 0;
 351	cur = NULL;
 352	error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
 353		flags, XFS_DATA_FORK);
 354	if (cur) {
 355		cur->bc_private.b.allocated = 0;
 356		xfs_btree_del_cursor(cur,
 357			error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
 358	}
 359	return error;
 360}
 361
 362/*
 363 * Block initialisation functions for local to extent format conversion.
 364 * As these get more complex, they will be moved to the relevant files,
 365 * but for now they are too simple to worry about.
 366 */
 367STATIC void
 368xfs_bmap_local_to_extents_init_fn(
 369	struct xfs_buf		*bp,
 370	struct xfs_inode	*ip,
 371	struct xfs_ifork	*ifp)
 372{
 373	bp->b_ops = &xfs_bmbt_buf_ops;
 374	memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
 375}
 376
 377STATIC void
 378xfs_symlink_local_to_remote(
 379	struct xfs_buf		*bp,
 380	struct xfs_inode	*ip,
 381	struct xfs_ifork	*ifp)
 382{
 383	/* remote symlink blocks are not verifiable until CRCs come along */
 384	bp->b_ops = NULL;
 385	memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
 386}
 387
 388/*
 389 * Called from xfs_bmap_add_attrfork to handle local format files. Each
 390 * different data fork content type needs a different callout to do the
 391 * conversion. Some are basic and only require special block initialisation
 392 * callouts for the data formating, others (directories) are so specialised they
 393 * handle everything themselves.
 394 *
 395 * XXX (dgc): investigate whether directory conversion can use the generic
 396 * formatting callout. It should be possible - it's just a very complex
 397 * formatter. it would also require passing the transaction through to the init
 398 * function.
 399 */
 400STATIC int					/* error */
 401xfs_bmap_add_attrfork_local(
 402	xfs_trans_t		*tp,		/* transaction pointer */
 403	xfs_inode_t		*ip,		/* incore inode pointer */
 404	xfs_fsblock_t		*firstblock,	/* first block allocated */
 405	xfs_bmap_free_t		*flist,		/* blocks to free at commit */
 406	int			*flags)		/* inode logging flags */
 407{
 408	xfs_da_args_t		dargs;		/* args for dir/attr code */
 409
 410	if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
 411		return 0;
 412
 413	if (S_ISDIR(ip->i_d.di_mode)) {
 414		memset(&dargs, 0, sizeof(dargs));
 415		dargs.dp = ip;
 416		dargs.firstblock = firstblock;
 417		dargs.flist = flist;
 418		dargs.total = ip->i_mount->m_dirblkfsbs;
 419		dargs.whichfork = XFS_DATA_FORK;
 420		dargs.trans = tp;
 421		return xfs_dir2_sf_to_block(&dargs);
 422	}
 423
 424	if (S_ISLNK(ip->i_d.di_mode))
 425		return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
 426						 flags, XFS_DATA_FORK,
 427						 xfs_symlink_local_to_remote);
 428
 429	return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
 430					 XFS_DATA_FORK,
 431					 xfs_bmap_local_to_extents_init_fn);
 432}
 433
 434/*
 435 * Convert a delayed allocation to a real allocation.
 436 */
 437STATIC int				/* error */
 438xfs_bmap_add_extent_delay_real(
 439	struct xfs_bmalloca	*bma)
 440{
 441	struct xfs_bmbt_irec	*new = &bma->got;
 442	int			diff;	/* temp value */
 443	xfs_bmbt_rec_host_t	*ep;	/* extent entry for idx */
 444	int			error;	/* error return value */
 445	int			i;	/* temp state */
 446	xfs_ifork_t		*ifp;	/* inode fork pointer */
 447	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
 448	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
 449					/* left is 0, right is 1, prev is 2 */
 450	int			rval=0;	/* return value (logging flags) */
 451	int			state = 0;/* state bits, accessed thru macros */
 452	xfs_filblks_t		da_new; /* new count del alloc blocks used */
 453	xfs_filblks_t		da_old; /* old count del alloc blocks used */
 454	xfs_filblks_t		temp=0;	/* value for da_new calculations */
 455	xfs_filblks_t		temp2=0;/* value for da_new calculations */
 456	int			tmp_rval;	/* partial logging flags */
 457
 458	ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK);
 459
 460	ASSERT(bma->idx >= 0);
 461	ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
 462	ASSERT(!isnullstartblock(new->br_startblock));
 463	ASSERT(!bma->cur ||
 464	       (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
 465
 466	XFS_STATS_INC(xs_add_exlist);
 467
 468#define	LEFT		r[0]
 469#define	RIGHT		r[1]
 470#define	PREV		r[2]
 471
 472	/*
 473	 * Set up a bunch of variables to make the tests simpler.
 474	 */
 475	ep = xfs_iext_get_ext(ifp, bma->idx);
 476	xfs_bmbt_get_all(ep, &PREV);
 477	new_endoff = new->br_startoff + new->br_blockcount;
 478	ASSERT(PREV.br_startoff <= new->br_startoff);
 479	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
 480
 481	da_old = startblockval(PREV.br_startblock);
 482	da_new = 0;
 483
 484	/*
 485	 * Set flags determining what part of the previous delayed allocation
 486	 * extent is being replaced by a real allocation.
 487	 */
 488	if (PREV.br_startoff == new->br_startoff)
 489		state |= BMAP_LEFT_FILLING;
 490	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
 491		state |= BMAP_RIGHT_FILLING;
 492
 493	/*
 494	 * Check and set flags if this segment has a left neighbor.
 495	 * Don't set contiguous if the combined extent would be too large.
 496	 */
 497	if (bma->idx > 0) {
 498		state |= BMAP_LEFT_VALID;
 499		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
 500
 501		if (isnullstartblock(LEFT.br_startblock))
 502			state |= BMAP_LEFT_DELAY;
 503	}
 504
 505	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
 506	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
 507	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
 508	    LEFT.br_state == new->br_state &&
 509	    LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
 510		state |= BMAP_LEFT_CONTIG;
 511
 512	/*
 513	 * Check and set flags if this segment has a right neighbor.
 514	 * Don't set contiguous if the combined extent would be too large.
 515	 * Also check for all-three-contiguous being too large.
 516	 */
 517	if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
 518		state |= BMAP_RIGHT_VALID;
 519		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
 520
 521		if (isnullstartblock(RIGHT.br_startblock))
 522			state |= BMAP_RIGHT_DELAY;
 523	}
 524
 525	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
 526	    new_endoff == RIGHT.br_startoff &&
 527	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
 528	    new->br_state == RIGHT.br_state &&
 529	    new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
 530	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
 531		       BMAP_RIGHT_FILLING)) !=
 532		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
 533		       BMAP_RIGHT_FILLING) ||
 534	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
 535			<= MAXEXTLEN))
 536		state |= BMAP_RIGHT_CONTIG;
 537
 538	error = 0;
 539	/*
 540	 * Switch out based on the FILLING and CONTIG state bits.
 541	 */
 542	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
 543			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
 544	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
 545	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
 546		/*
 547		 * Filling in all of a previously delayed allocation extent.
 548		 * The left and right neighbors are both contiguous with new.
 549		 */
 550		bma->idx--;
 551		trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
 552		xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
 553			LEFT.br_blockcount + PREV.br_blockcount +
 554			RIGHT.br_blockcount);
 555		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
 556
 557		xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
 558		bma->ip->i_d.di_nextents--;
 559		if (bma->cur == NULL)
 560			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 561		else {
 562			rval = XFS_ILOG_CORE;
 563			error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
 564					RIGHT.br_startblock,
 565					RIGHT.br_blockcount, &i);
 566			if (error)
 567				goto done;
 568			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
 569			error = xfs_btree_delete(bma->cur, &i);
 570			if (error)
 571				goto done;
 572			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
 573			error = xfs_btree_decrement(bma->cur, 0, &i);
 574			if (error)
 575				goto done;
 576			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
 577			error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
 578					LEFT.br_startblock,
 579					LEFT.br_blockcount +
 580					PREV.br_blockcount +
 581					RIGHT.br_blockcount, LEFT.br_state);
 582			if (error)
 583				goto done;
 584		}
 585		break;
 586
 587	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
 588		/*
 589		 * Filling in all of a previously delayed allocation extent.
 590		 * The left neighbor is contiguous, the right is not.
 591		 */
 592		bma->idx--;
 593
 594		trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
 595		xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
 596			LEFT.br_blockcount + PREV.br_blockcount);
 597		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
 598
 599		xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
 600		if (bma->cur == NULL)
 601			rval = XFS_ILOG_DEXT;
 602		else {
 603			rval = 0;
 604			error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
 605					LEFT.br_startblock, LEFT.br_blockcount,
 606					&i);
 607			if (error)
 608				goto done;
 609			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
 610			error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
 611					LEFT.br_startblock,
 612					LEFT.br_blockcount +
 613					PREV.br_blockcount, LEFT.br_state);
 614			if (error)
 615				goto done;
 616		}
 617		break;
 618
 619	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
 620		/*
 621		 * Filling in all of a previously delayed allocation extent.
 622		 * The right neighbor is contiguous, the left is not.
 623		 */
 624		trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
 625		xfs_bmbt_set_startblock(ep, new->br_startblock);
 626		xfs_bmbt_set_blockcount(ep,
 627			PREV.br_blockcount + RIGHT.br_blockcount);
 628		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
 629
 630		xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
 631		if (bma->cur == NULL)
 632			rval = XFS_ILOG_DEXT;
 633		else {
 634			rval = 0;
 635			error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
 636					RIGHT.br_startblock,
 637					RIGHT.br_blockcount, &i);
 638			if (error)
 639				goto done;
 640			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
 641			error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
 642					new->br_startblock,
 643					PREV.br_blockcount +
 644					RIGHT.br_blockcount, PREV.br_state);
 645			if (error)
 646				goto done;
 647		}
 648		break;
 649
 650	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
 651		/*
 652		 * Filling in all of a previously delayed allocation extent.
 653		 * Neither the left nor right neighbors are contiguous with
 654		 * the new one.
 655		 */
 656		trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
 657		xfs_bmbt_set_startblock(ep, new->br_startblock);
 658		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
 659
 660		bma->ip->i_d.di_nextents++;
 661		if (bma->cur == NULL)
 662			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 663		else {
 664			rval = XFS_ILOG_CORE;
 665			error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
 666					new->br_startblock, new->br_blockcount,
 667					&i);
 668			if (error)
 669				goto done;
 670			XFS_WANT_CORRUPTED_GOTO(i == 0, done);
 671			bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
 672			error = xfs_btree_insert(bma->cur, &i);
 673			if (error)
 674				goto done;
 675			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
 676		}
 677		break;
 678
 679	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
 680		/*
 681		 * Filling in the first part of a previous delayed allocation.
 682		 * The left neighbor is contiguous.
 683		 */
 684		trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
 685		xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
 686			LEFT.br_blockcount + new->br_blockcount);
 687		xfs_bmbt_set_startoff(ep,
 688			PREV.br_startoff + new->br_blockcount);
 689		trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
 690
 691		temp = PREV.br_blockcount - new->br_blockcount;
 692		trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
 693		xfs_bmbt_set_blockcount(ep, temp);
 694		if (bma->cur == NULL)
 695			rval = XFS_ILOG_DEXT;
 696		else {
 697			rval = 0;
 698			error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
 699					LEFT.br_startblock, LEFT.br_blockcount,
 700					&i);
 701			if (error)
 702				goto done;
 703			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
 704			error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
 705					LEFT.br_startblock,
 706					LEFT.br_blockcount +
 707					new->br_blockcount,
 708					LEFT.br_state);
 709			if (error)
 710				goto done;
 711		}
 712		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
 713			startblockval(PREV.br_startblock));
 714		xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
 715		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
 716
 717		bma->idx--;
 718		break;
 719
 720	case BMAP_LEFT_FILLING:
 721		/*
 722		 * Filling in the first part of a previous delayed allocation.
 723		 * The left neighbor is not contiguous.
 724		 */
 725		trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
 726		xfs_bmbt_set_startoff(ep, new_endoff);
 727		temp = PREV.br_blockcount - new->br_blockcount;
 728		xfs_bmbt_set_blockcount(ep, temp);
 729		xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
 730		bma->ip->i_d.di_nextents++;
 731		if (bma->cur == NULL)
 732			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 733		else {
 734			rval = XFS_ILOG_CORE;
 735			error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
 736					new->br_startblock, new->br_blockcount,
 737					&i);
 738			if (error)
 739				goto done;
 740			XFS_WANT_CORRUPTED_GOTO(i == 0, done);
 741			bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
 742			error = xfs_btree_insert(bma->cur, &i);
 743			if (error)
 744				goto done;
 745			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
 746		}
 747
 748		if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
 749			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
 750					bma->firstblock, bma->flist,
 751					&bma->cur, 1, &tmp_rval, XFS_DATA_FORK);
 752			rval |= tmp_rval;
 753			if (error)
 754				goto done;
 755		}
 756		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
 757			startblockval(PREV.br_startblock) -
 758			(bma->cur ? bma->cur->bc_private.b.allocated : 0));
 759		ep = xfs_iext_get_ext(ifp, bma->idx + 1);
 760		xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
 761		trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
 762		break;
 763
 764	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
 765		/*
 766		 * Filling in the last part of a previous delayed allocation.
 767		 * The right neighbor is contiguous with the new allocation.
 768		 */
 769		temp = PREV.br_blockcount - new->br_blockcount;
 770		trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
 771		xfs_bmbt_set_blockcount(ep, temp);
 772		xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
 773			new->br_startoff, new->br_startblock,
 774			new->br_blockcount + RIGHT.br_blockcount,
 775			RIGHT.br_state);
 776		trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
 777		if (bma->cur == NULL)
 778			rval = XFS_ILOG_DEXT;
 779		else {
 780			rval = 0;
 781			error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
 782					RIGHT.br_startblock,
 783					RIGHT.br_blockcount, &i);
 784			if (error)
 785				goto done;
 786			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
 787			error = xfs_bmbt_update(bma->cur, new->br_startoff,
 788					new->br_startblock,
 789					new->br_blockcount +
 790					RIGHT.br_blockcount,
 791					RIGHT.br_state);
 792			if (error)
 793				goto done;
 794		}
 795
 796		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
 797			startblockval(PREV.br_startblock));
 798		trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
 799		xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
 800		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
 801
 802		bma->idx++;
 803		break;
 804
 805	case BMAP_RIGHT_FILLING:
 806		/*
 807		 * Filling in the last part of a previous delayed allocation.
 808		 * The right neighbor is not contiguous.
 809		 */
 810		temp = PREV.br_blockcount - new->br_blockcount;
 811		trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
 812		xfs_bmbt_set_blockcount(ep, temp);
 813		xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
 814		bma->ip->i_d.di_nextents++;
 815		if (bma->cur == NULL)
 816			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 817		else {
 818			rval = XFS_ILOG_CORE;
 819			error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
 820					new->br_startblock, new->br_blockcount,
 821					&i);
 822			if (error)
 823				goto done;
 824			XFS_WANT_CORRUPTED_GOTO(i == 0, done);
 825			bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
 826			error = xfs_btree_insert(bma->cur, &i);
 827			if (error)
 828				goto done;
 829			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
 830		}
 831
 832		if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
 833			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
 834				bma->firstblock, bma->flist, &bma->cur, 1,
 835				&tmp_rval, XFS_DATA_FORK);
 836			rval |= tmp_rval;
 837			if (error)
 838				goto done;
 839		}
 840		da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
 841			startblockval(PREV.br_startblock) -
 842			(bma->cur ? bma->cur->bc_private.b.allocated : 0));
 843		ep = xfs_iext_get_ext(ifp, bma->idx);
 844		xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
 845		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
 846
 847		bma->idx++;
 848		break;
 849
 850	case 0:
 851		/*
 852		 * Filling in the middle part of a previous delayed allocation.
 853		 * Contiguity is impossible here.
 854		 * This case is avoided almost all the time.
 855		 *
 856		 * We start with a delayed allocation:
 857		 *
 858		 * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
 859		 *  PREV @ idx
 860		 *
 861	         * and we are allocating:
 862		 *                     +rrrrrrrrrrrrrrrrr+
 863		 *			      new
 864		 *
 865		 * and we set it up for insertion as:
 866		 * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
 867		 *                            new
 868		 *  PREV @ idx          LEFT              RIGHT
 869		 *                      inserted at idx + 1
 870		 */
 871		temp = new->br_startoff - PREV.br_startoff;
 872		temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
 873		trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
 874		xfs_bmbt_set_blockcount(ep, temp);	/* truncate PREV */
 875		LEFT = *new;
 876		RIGHT.br_state = PREV.br_state;
 877		RIGHT.br_startblock = nullstartblock(
 878				(int)xfs_bmap_worst_indlen(bma->ip, temp2));
 879		RIGHT.br_startoff = new_endoff;
 880		RIGHT.br_blockcount = temp2;
 881		/* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
 882		xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
 883		bma->ip->i_d.di_nextents++;
 884		if (bma->cur == NULL)
 885			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 886		else {
 887			rval = XFS_ILOG_CORE;
 888			error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
 889					new->br_startblock, new->br_blockcount,
 890					&i);
 891			if (error)
 892				goto done;
 893			XFS_WANT_CORRUPTED_GOTO(i == 0, done);
 894			bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
 895			error = xfs_btree_insert(bma->cur, &i);
 896			if (error)
 897				goto done;
 898			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
 899		}
 900
 901		if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
 902			error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
 903					bma->firstblock, bma->flist, &bma->cur,
 904					1, &tmp_rval, XFS_DATA_FORK);
 905			rval |= tmp_rval;
 906			if (error)
 907				goto done;
 908		}
 909		temp = xfs_bmap_worst_indlen(bma->ip, temp);
 910		temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
 911		diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
 912			(bma->cur ? bma->cur->bc_private.b.allocated : 0));
 913		if (diff > 0) {
 914			error = xfs_icsb_modify_counters(bma->ip->i_mount,
 915					XFS_SBS_FDBLOCKS,
 916					-((int64_t)diff), 0);
 917			ASSERT(!error);
 918			if (error)
 919				goto done;
 920		}
 921
 922		ep = xfs_iext_get_ext(ifp, bma->idx);
 923		xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
 924		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
 925		trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
 926		xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
 927			nullstartblock((int)temp2));
 928		trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
 929
 930		bma->idx++;
 931		da_new = temp + temp2;
 932		break;
 933
 934	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
 935	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
 936	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
 937	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
 938	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
 939	case BMAP_LEFT_CONTIG:
 940	case BMAP_RIGHT_CONTIG:
 941		/*
 942		 * These cases are all impossible.
 943		 */
 944		ASSERT(0);
 945	}
 946
 947	/* convert to a btree if necessary */
 948	if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
 949		int	tmp_logflags;	/* partial log flag return val */
 950
 951		ASSERT(bma->cur == NULL);
 952		error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
 953				bma->firstblock, bma->flist, &bma->cur,
 954				da_old > 0, &tmp_logflags, XFS_DATA_FORK);
 955		bma->logflags |= tmp_logflags;
 956		if (error)
 957			goto done;
 958	}
 959
 960	/* adjust for changes in reserved delayed indirect blocks */
 961	if (da_old || da_new) {
 962		temp = da_new;
 963		if (bma->cur)
 964			temp += bma->cur->bc_private.b.allocated;
 965		ASSERT(temp <= da_old);
 966		if (temp < da_old)
 967			xfs_icsb_modify_counters(bma->ip->i_mount,
 968					XFS_SBS_FDBLOCKS,
 969					(int64_t)(da_old - temp), 0);
 970	}
 971
 972	/* clear out the allocated field, done with it now in any case. */
 973	if (bma->cur)
 974		bma->cur->bc_private.b.allocated = 0;
 975
 976	xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK);
 977done:
 978	bma->logflags |= rval;
 979	return error;
 980#undef	LEFT
 981#undef	RIGHT
 982#undef	PREV
 983}
 984
 985/*
 986 * Convert an unwritten allocation to a real allocation or vice versa.
 987 */
 988STATIC int				/* error */
 989xfs_bmap_add_extent_unwritten_real(
 990	struct xfs_trans	*tp,
 991	xfs_inode_t		*ip,	/* incore inode pointer */
 992	xfs_extnum_t		*idx,	/* extent number to update/insert */
 993	xfs_btree_cur_t		**curp,	/* if *curp is null, not a btree */
 994	xfs_bmbt_irec_t		*new,	/* new data to add to file extents */
 995	xfs_fsblock_t		*first,	/* pointer to firstblock variable */
 996	xfs_bmap_free_t		*flist,	/* list of extents to be freed */
 997	int			*logflagsp) /* inode logging flags */
 998{
 999	xfs_btree_cur_t		*cur;	/* btree cursor */
1000	xfs_bmbt_rec_host_t	*ep;	/* extent entry for idx */
1001	int			error;	/* error return value */
1002	int			i;	/* temp state */
1003	xfs_ifork_t		*ifp;	/* inode fork pointer */
1004	xfs_fileoff_t		new_endoff;	/* end offset of new entry */
1005	xfs_exntst_t		newext;	/* new extent state */
1006	xfs_exntst_t		oldext;	/* old extent state */
1007	xfs_bmbt_irec_t		r[3];	/* neighbor extent entries */
1008					/* left is 0, right is 1, prev is 2 */
1009	int			rval=0;	/* return value (logging flags) */
1010	int			state = 0;/* state bits, accessed thru macros */
1011
1012	*logflagsp = 0;
1013
1014	cur = *curp;
1015	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1016
1017	ASSERT(*idx >= 0);
1018	ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
1019	ASSERT(!isnullstartblock(new->br_startblock));
1020
1021	XFS_STATS_INC(xs_add_exlist);
1022
1023#define	LEFT		r[0]
1024#define	RIGHT		r[1]
1025#define	PREV		r[2]
1026
1027	/*
1028	 * Set up a bunch of variables to make the tests simpler.
1029	 */
1030	error = 0;
1031	ep = xfs_iext_get_ext(ifp, *idx);
1032	xfs_bmbt_get_all(ep, &PREV);
1033	newext = new->br_state;
1034	oldext = (newext == XFS_EXT_UNWRITTEN) ?
1035		XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
1036	ASSERT(PREV.br_state == oldext);
1037	new_endoff = new->br_startoff + new->br_blockcount;
1038	ASSERT(PREV.br_startoff <= new->br_startoff);
1039	ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
1040
1041	/*
1042	 * Set flags determining what part of the previous oldext allocation
1043	 * extent is being replaced by a newext allocation.
1044	 */
1045	if (PREV.br_startoff == new->br_startoff)
1046		state |= BMAP_LEFT_FILLING;
1047	if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
1048		state |= BMAP_RIGHT_FILLING;
1049
1050	/*
1051	 * Check and set flags if this segment has a left neighbor.
1052	 * Don't set contiguous if the combined extent would be too large.
1053	 */
1054	if (*idx > 0) {
1055		state |= BMAP_LEFT_VALID;
1056		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
1057
1058		if (isnullstartblock(LEFT.br_startblock))
1059			state |= BMAP_LEFT_DELAY;
1060	}
1061
1062	if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
1063	    LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
1064	    LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
1065	    LEFT.br_state == newext &&
1066	    LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1067		state |= BMAP_LEFT_CONTIG;
1068
1069	/*
1070	 * Check and set flags if this segment has a right neighbor.
1071	 * Don't set contiguous if the combined extent would be too large.
1072	 * Also check for all-three-contiguous being too large.
1073	 */
1074	if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
1075		state |= BMAP_RIGHT_VALID;
1076		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
1077		if (isnullstartblock(RIGHT.br_startblock))
1078			state |= BMAP_RIGHT_DELAY;
1079	}
1080
1081	if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
1082	    new_endoff == RIGHT.br_startoff &&
1083	    new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
1084	    newext == RIGHT.br_state &&
1085	    new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
1086	    ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1087		       BMAP_RIGHT_FILLING)) !=
1088		      (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
1089		       BMAP_RIGHT_FILLING) ||
1090	     LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
1091			<= MAXEXTLEN))
1092		state |= BMAP_RIGHT_CONTIG;
1093
1094	/*
1095	 * Switch out based on the FILLING and CONTIG state bits.
1096	 */
1097	switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1098			 BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
1099	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
1100	     BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1101		/*
1102		 * Setting all of a previous oldext extent to newext.
1103		 * The left and right neighbors are both contiguous with new.
1104		 */
1105		--*idx;
1106
1107		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1108		xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1109			LEFT.br_blockcount + PREV.br_blockcount +
1110			RIGHT.br_blockcount);
1111		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1112
1113		xfs_iext_remove(ip, *idx + 1, 2, state);
1114		ip->i_d.di_nextents -= 2;
1115		if (cur == NULL)
1116			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1117		else {
1118			rval = XFS_ILOG_CORE;
1119			if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1120					RIGHT.br_startblock,
1121					RIGHT.br_blockcount, &i)))
1122				goto done;
1123			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1124			if ((error = xfs_btree_delete(cur, &i)))
1125				goto done;
1126			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1127			if ((error = xfs_btree_decrement(cur, 0, &i)))
1128				goto done;
1129			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1130			if ((error = xfs_btree_delete(cur, &i)))
1131				goto done;
1132			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1133			if ((error = xfs_btree_decrement(cur, 0, &i)))
1134				goto done;
1135			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1136			if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1137				LEFT.br_startblock,
1138				LEFT.br_blockcount + PREV.br_blockcount +
1139				RIGHT.br_blockcount, LEFT.br_state)))
1140				goto done;
1141		}
1142		break;
1143
1144	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1145		/*
1146		 * Setting all of a previous oldext extent to newext.
1147		 * The left neighbor is contiguous, the right is not.
1148		 */
1149		--*idx;
1150
1151		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1152		xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
1153			LEFT.br_blockcount + PREV.br_blockcount);
1154		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1155
1156		xfs_iext_remove(ip, *idx + 1, 1, state);
1157		ip->i_d.di_nextents--;
1158		if (cur == NULL)
1159			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1160		else {
1161			rval = XFS_ILOG_CORE;
1162			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1163					PREV.br_startblock, PREV.br_blockcount,
1164					&i)))
1165				goto done;
1166			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1167			if ((error = xfs_btree_delete(cur, &i)))
1168				goto done;
1169			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1170			if ((error = xfs_btree_decrement(cur, 0, &i)))
1171				goto done;
1172			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1173			if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
1174				LEFT.br_startblock,
1175				LEFT.br_blockcount + PREV.br_blockcount,
1176				LEFT.br_state)))
1177				goto done;
1178		}
1179		break;
1180
1181	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1182		/*
1183		 * Setting all of a previous oldext extent to newext.
1184		 * The right neighbor is contiguous, the left is not.
1185		 */
1186		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1187		xfs_bmbt_set_blockcount(ep,
1188			PREV.br_blockcount + RIGHT.br_blockcount);
1189		xfs_bmbt_set_state(ep, newext);
1190		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1191		xfs_iext_remove(ip, *idx + 1, 1, state);
1192		ip->i_d.di_nextents--;
1193		if (cur == NULL)
1194			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1195		else {
1196			rval = XFS_ILOG_CORE;
1197			if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
1198					RIGHT.br_startblock,
1199					RIGHT.br_blockcount, &i)))
1200				goto done;
1201			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1202			if ((error = xfs_btree_delete(cur, &i)))
1203				goto done;
1204			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1205			if ((error = xfs_btree_decrement(cur, 0, &i)))
1206				goto done;
1207			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1208			if ((error = xfs_bmbt_update(cur, new->br_startoff,
1209				new->br_startblock,
1210				new->br_blockcount + RIGHT.br_blockcount,
1211				newext)))
1212				goto done;
1213		}
1214		break;
1215
1216	case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
1217		/*
1218		 * Setting all of a previous oldext extent to newext.
1219		 * Neither the left nor right neighbors are contiguous with
1220		 * the new one.
1221		 */
1222		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1223		xfs_bmbt_set_state(ep, newext);
1224		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1225
1226		if (cur == NULL)
1227			rval = XFS_ILOG_DEXT;
1228		else {
1229			rval = 0;
1230			if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1231					new->br_startblock, new->br_blockcount,
1232					&i)))
1233				goto done;
1234			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1235			if ((error = xfs_bmbt_update(cur, new->br_startoff,
1236				new->br_startblock, new->br_blockcount,
1237				newext)))
1238				goto done;
1239		}
1240		break;
1241
1242	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
1243		/*
1244		 * Setting the first part of a previous oldext extent to newext.
1245		 * The left neighbor is contiguous.
1246		 */
1247		trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
1248		xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
1249			LEFT.br_blockcount + new->br_blockcount);
1250		xfs_bmbt_set_startoff(ep,
1251			PREV.br_startoff + new->br_blockcount);
1252		trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
1253
1254		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1255		xfs_bmbt_set_startblock(ep,
1256			new->br_startblock + new->br_blockcount);
1257		xfs_bmbt_set_blockcount(ep,
1258			PREV.br_blockcount - new->br_blockcount);
1259		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1260
1261		--*idx;
1262
1263		if (cur == NULL)
1264			rval = XFS_ILOG_DEXT;
1265		else {
1266			rval = 0;
1267			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1268					PREV.br_startblock, PREV.br_blockcount,
1269					&i)))
1270				goto done;
1271			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1272			if ((error = xfs_bmbt_update(cur,
1273				PREV.br_startoff + new->br_blockcount,
1274				PREV.br_startblock + new->br_blockcount,
1275				PREV.br_blockcount - new->br_blockcount,
1276				oldext)))
1277				goto done;
1278			if ((error = xfs_btree_decrement(cur, 0, &i)))
1279				goto done;
1280			error = xfs_bmbt_update(cur, LEFT.br_startoff,
1281				LEFT.br_startblock,
1282				LEFT.br_blockcount + new->br_blockcount,
1283				LEFT.br_state);
1284			if (error)
1285				goto done;
1286		}
1287		break;
1288
1289	case BMAP_LEFT_FILLING:
1290		/*
1291		 * Setting the first part of a previous oldext extent to newext.
1292		 * The left neighbor is not contiguous.
1293		 */
1294		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1295		ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
1296		xfs_bmbt_set_startoff(ep, new_endoff);
1297		xfs_bmbt_set_blockcount(ep,
1298			PREV.br_blockcount - new->br_blockcount);
1299		xfs_bmbt_set_startblock(ep,
1300			new->br_startblock + new->br_blockcount);
1301		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1302
1303		xfs_iext_insert(ip, *idx, 1, new, state);
1304		ip->i_d.di_nextents++;
1305		if (cur == NULL)
1306			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1307		else {
1308			rval = XFS_ILOG_CORE;
1309			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1310					PREV.br_startblock, PREV.br_blockcount,
1311					&i)))
1312				goto done;
1313			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1314			if ((error = xfs_bmbt_update(cur,
1315				PREV.br_startoff + new->br_blockcount,
1316				PREV.br_startblock + new->br_blockcount,
1317				PREV.br_blockcount - new->br_blockcount,
1318				oldext)))
1319				goto done;
1320			cur->bc_rec.b = *new;
1321			if ((error = xfs_btree_insert(cur, &i)))
1322				goto done;
1323			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1324		}
1325		break;
1326
1327	case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
1328		/*
1329		 * Setting the last part of a previous oldext extent to newext.
1330		 * The right neighbor is contiguous with the new allocation.
1331		 */
1332		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1333		xfs_bmbt_set_blockcount(ep,
1334			PREV.br_blockcount - new->br_blockcount);
1335		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1336
1337		++*idx;
1338
1339		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1340		xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
1341			new->br_startoff, new->br_startblock,
1342			new->br_blockcount + RIGHT.br_blockcount, newext);
1343		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1344
1345		if (cur == NULL)
1346			rval = XFS_ILOG_DEXT;
1347		else {
1348			rval = 0;
1349			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1350					PREV.br_startblock,
1351					PREV.br_blockcount, &i)))
1352				goto done;
1353			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1354			if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1355				PREV.br_startblock,
1356				PREV.br_blockcount - new->br_blockcount,
1357				oldext)))
1358				goto done;
1359			if ((error = xfs_btree_increment(cur, 0, &i)))
1360				goto done;
1361			if ((error = xfs_bmbt_update(cur, new->br_startoff,
1362				new->br_startblock,
1363				new->br_blockcount + RIGHT.br_blockcount,
1364				newext)))
1365				goto done;
1366		}
1367		break;
1368
1369	case BMAP_RIGHT_FILLING:
1370		/*
1371		 * Setting the last part of a previous oldext extent to newext.
1372		 * The right neighbor is not contiguous.
1373		 */
1374		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1375		xfs_bmbt_set_blockcount(ep,
1376			PREV.br_blockcount - new->br_blockcount);
1377		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1378
1379		++*idx;
1380		xfs_iext_insert(ip, *idx, 1, new, state);
1381
1382		ip->i_d.di_nextents++;
1383		if (cur == NULL)
1384			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1385		else {
1386			rval = XFS_ILOG_CORE;
1387			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1388					PREV.br_startblock, PREV.br_blockcount,
1389					&i)))
1390				goto done;
1391			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1392			if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
1393				PREV.br_startblock,
1394				PREV.br_blockcount - new->br_blockcount,
1395				oldext)))
1396				goto done;
1397			if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1398					new->br_startblock, new->br_blockcount,
1399					&i)))
1400				goto done;
1401			XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1402			cur->bc_rec.b.br_state = XFS_EXT_NORM;
1403			if ((error = xfs_btree_insert(cur, &i)))
1404				goto done;
1405			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1406		}
1407		break;
1408
1409	case 0:
1410		/*
1411		 * Setting the middle part of a previous oldext extent to
1412		 * newext.  Contiguity is impossible here.
1413		 * One extent becomes three extents.
1414		 */
1415		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1416		xfs_bmbt_set_blockcount(ep,
1417			new->br_startoff - PREV.br_startoff);
1418		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1419
1420		r[0] = *new;
1421		r[1].br_startoff = new_endoff;
1422		r[1].br_blockcount =
1423			PREV.br_startoff + PREV.br_blockcount - new_endoff;
1424		r[1].br_startblock = new->br_startblock + new->br_blockcount;
1425		r[1].br_state = oldext;
1426
1427		++*idx;
1428		xfs_iext_insert(ip, *idx, 2, &r[0], state);
1429
1430		ip->i_d.di_nextents += 2;
1431		if (cur == NULL)
1432			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
1433		else {
1434			rval = XFS_ILOG_CORE;
1435			if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
1436					PREV.br_startblock, PREV.br_blockcount,
1437					&i)))
1438				goto done;
1439			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1440			/* new right extent - oldext */
1441			if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
1442				r[1].br_startblock, r[1].br_blockcount,
1443				r[1].br_state)))
1444				goto done;
1445			/* new left extent - oldext */
1446			cur->bc_rec.b = PREV;
1447			cur->bc_rec.b.br_blockcount =
1448				new->br_startoff - PREV.br_startoff;
1449			if ((error = xfs_btree_insert(cur, &i)))
1450				goto done;
1451			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1452			/*
1453			 * Reset the cursor to the position of the new extent
1454			 * we are about to insert as we can't trust it after
1455			 * the previous insert.
1456			 */
1457			if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
1458					new->br_startblock, new->br_blockcount,
1459					&i)))
1460				goto done;
1461			XFS_WANT_CORRUPTED_GOTO(i == 0, done);
1462			/* new middle extent - newext */
1463			cur->bc_rec.b.br_state = new->br_state;
1464			if ((error = xfs_btree_insert(cur, &i)))
1465				goto done;
1466			XFS_WANT_CORRUPTED_GOTO(i == 1, done);
1467		}
1468		break;
1469
1470	case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1471	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1472	case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
1473	case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
1474	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1475	case BMAP_LEFT_CONTIG:
1476	case BMAP_RIGHT_CONTIG:
1477		/*
1478		 * These cases are all impossible.
1479		 */
1480		ASSERT(0);
1481	}
1482
1483	/* convert to a btree if necessary */
1484	if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
1485		int	tmp_logflags;	/* partial log flag return val */
1486
1487		ASSERT(cur == NULL);
1488		error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
1489				0, &tmp_logflags, XFS_DATA_FORK);
1490		*logflagsp |= tmp_logflags;
1491		if (error)
1492			goto done;
1493	}
1494
1495	/* clear out the allocated field, done with it now in any case. */
1496	if (cur) {
1497		cur->bc_private.b.allocated = 0;
1498		*curp = cur;
1499	}
1500
1501	xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
1502done:
1503	*logflagsp |= rval;
1504	return error;
1505#undef	LEFT
1506#undef	RIGHT
1507#undef	PREV
1508}
1509
1510/*
1511 * Convert a hole to a delayed allocation.
1512 */
1513STATIC void
1514xfs_bmap_add_extent_hole_delay(
1515	xfs_inode_t		*ip,	/* incore inode pointer */
1516	xfs_extnum_t		*idx,	/* extent number to update/insert */
1517	xfs_bmbt_irec_t		*new)	/* new data to add to file extents */
1518{
1519	xfs_ifork_t		*ifp;	/* inode fork pointer */
1520	xfs_bmbt_irec_t		left;	/* left neighbor extent entry */
1521	xfs_filblks_t		newlen=0;	/* new indirect size */
1522	xfs_filblks_t		oldlen=0;	/* old indirect size */
1523	xfs_bmbt_irec_t		right;	/* right neighbor extent entry */
1524	int			state;  /* state bits, accessed thru macros */
1525	xfs_filblks_t		temp=0;	/* temp for indirect calculations */
1526
1527	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1528	state = 0;
1529	ASSERT(isnullstartblock(new->br_startblock));
1530
1531	/*
1532	 * Check and set flags if this segment has a left neighbor
1533	 */
1534	if (*idx > 0) {
1535		state |= BMAP_LEFT_VALID;
1536		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
1537
1538		if (isnullstartblock(left.br_startblock))
1539			state |= BMAP_LEFT_DELAY;
1540	}
1541
1542	/*
1543	 * Check and set flags if the current (right) segment exists.
1544	 * If it doesn't exist, we're converting the hole at end-of-file.
1545	 */
1546	if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
1547		state |= BMAP_RIGHT_VALID;
1548		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
1549
1550		if (isnullstartblock(right.br_startblock))
1551			state |= BMAP_RIGHT_DELAY;
1552	}
1553
1554	/*
1555	 * Set contiguity flags on the left and right neighbors.
1556	 * Don't let extents get too large, even if the pieces are contiguous.
1557	 */
1558	if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
1559	    left.br_startoff + left.br_blockcount == new->br_startoff &&
1560	    left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
1561		state |= BMAP_LEFT_CONTIG;
1562
1563	if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
1564	    new->br_startoff + new->br_blockcount == right.br_startoff &&
1565	    new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
1566	    (!(state & BMAP_LEFT_CONTIG) ||
1567	     (left.br_blockcount + new->br_blockcount +
1568	      right.br_blockcount <= MAXEXTLEN)))
1569		state |= BMAP_RIGHT_CONTIG;
1570
1571	/*
1572	 * Switch out based on the contiguity flags.
1573	 */
1574	switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
1575	case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
1576		/*
1577		 * New allocation is contiguous with delayed allocations
1578		 * on the left and on the right.
1579		 * Merge all three into a single extent record.
1580		 */
1581		--*idx;
1582		temp = left.br_blockcount + new->br_blockcount +
1583			right.br_blockcount;
1584
1585		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1586		xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1587		oldlen = startblockval(left.br_startblock) +
1588			startblockval(new->br_startblock) +
1589			startblockval(right.br_startblock);
1590		newlen = xfs_bmap_worst_indlen(ip, temp);
1591		xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
1592			nullstartblock((int)newlen));
1593		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
1594
1595		xfs_iext_remove(ip, *idx + 1, 1, state);
1596		break;
1597
1598	case BMAP_LEFT_CONTIG:
1599		/*
1600		 * New allocation is contiguous with a delayed allocation
1601		 * on the left.
1602		 * Merge the new allocation with the left neighbor.
1603		 */
1604		--*idx;
1605		temp = left.br_blockcount + new->br_blockcount;
1606
1607		trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
1608		xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
1609		oldlen = startblockval(left.br_startblock) +
1610			startblock

Large files files are truncated, but you can click here to view the full file