PageRenderTime 61ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 1ms

/fs/xfs/xfs_bmap.c

https://bitbucket.org/digetx/picasso-kernel
C | 6314 lines | 4572 code | 491 blank | 1251 comment | 1101 complexity | da7ec63f1de34212f526fcbac107a93d MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * Copyright (c) 2000-2006 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_types.h"
  21. #include "xfs_bit.h"
  22. #include "xfs_log.h"
  23. #include "xfs_inum.h"
  24. #include "xfs_trans.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_ag.h"
  27. #include "xfs_dir2.h"
  28. #include "xfs_da_btree.h"
  29. #include "xfs_bmap_btree.h"
  30. #include "xfs_alloc_btree.h"
  31. #include "xfs_ialloc_btree.h"
  32. #include "xfs_dinode.h"
  33. #include "xfs_inode.h"
  34. #include "xfs_btree.h"
  35. #include "xfs_mount.h"
  36. #include "xfs_itable.h"
  37. #include "xfs_inode_item.h"
  38. #include "xfs_extfree_item.h"
  39. #include "xfs_alloc.h"
  40. #include "xfs_bmap.h"
  41. #include "xfs_rtalloc.h"
  42. #include "xfs_error.h"
  43. #include "xfs_attr_leaf.h"
  44. #include "xfs_quota.h"
  45. #include "xfs_trans_space.h"
  46. #include "xfs_buf_item.h"
  47. #include "xfs_filestream.h"
  48. #include "xfs_vnodeops.h"
  49. #include "xfs_trace.h"
  50. kmem_zone_t *xfs_bmap_free_item_zone;
  51. /*
  52. * Prototypes for internal bmap routines.
  53. */
  54. #ifdef DEBUG
  55. STATIC void
  56. xfs_bmap_check_leaf_extents(
  57. struct xfs_btree_cur *cur,
  58. struct xfs_inode *ip,
  59. int whichfork);
  60. #else
  61. #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
  62. #endif
  63. /*
  64. * Called from xfs_bmap_add_attrfork to handle extents format files.
  65. */
  66. STATIC int /* error */
  67. xfs_bmap_add_attrfork_extents(
  68. xfs_trans_t *tp, /* transaction pointer */
  69. xfs_inode_t *ip, /* incore inode pointer */
  70. xfs_fsblock_t *firstblock, /* first block allocated */
  71. xfs_bmap_free_t *flist, /* blocks to free at commit */
  72. int *flags); /* inode logging flags */
  73. /*
  74. * Called from xfs_bmap_add_attrfork to handle local format files.
  75. */
  76. STATIC int /* error */
  77. xfs_bmap_add_attrfork_local(
  78. xfs_trans_t *tp, /* transaction pointer */
  79. xfs_inode_t *ip, /* incore inode pointer */
  80. xfs_fsblock_t *firstblock, /* first block allocated */
  81. xfs_bmap_free_t *flist, /* blocks to free at commit */
  82. int *flags); /* inode logging flags */
  83. /*
  84. * xfs_bmap_alloc is called by xfs_bmapi to allocate an extent for a file.
  85. * It figures out where to ask the underlying allocator to put the new extent.
  86. */
  87. STATIC int /* error */
  88. xfs_bmap_alloc(
  89. xfs_bmalloca_t *ap); /* bmap alloc argument struct */
  90. /*
  91. * Transform a btree format file with only one leaf node, where the
  92. * extents list will fit in the inode, into an extents format file.
  93. * Since the file extents are already in-core, all we have to do is
  94. * give up the space for the btree root and pitch the leaf block.
  95. */
  96. STATIC int /* error */
  97. xfs_bmap_btree_to_extents(
  98. xfs_trans_t *tp, /* transaction pointer */
  99. xfs_inode_t *ip, /* incore inode pointer */
  100. xfs_btree_cur_t *cur, /* btree cursor */
  101. int *logflagsp, /* inode logging flags */
  102. int whichfork); /* data or attr fork */
  103. /*
  104. * Remove the entry "free" from the free item list. Prev points to the
  105. * previous entry, unless "free" is the head of the list.
  106. */
  107. STATIC void
  108. xfs_bmap_del_free(
  109. xfs_bmap_free_t *flist, /* free item list header */
  110. xfs_bmap_free_item_t *prev, /* previous item on list, if any */
  111. xfs_bmap_free_item_t *free); /* list item to be freed */
  112. /*
  113. * Convert an extents-format file into a btree-format file.
  114. * The new file will have a root block (in the inode) and a single child block.
  115. */
  116. STATIC int /* error */
  117. xfs_bmap_extents_to_btree(
  118. xfs_trans_t *tp, /* transaction pointer */
  119. xfs_inode_t *ip, /* incore inode pointer */
  120. xfs_fsblock_t *firstblock, /* first-block-allocated */
  121. xfs_bmap_free_t *flist, /* blocks freed in xaction */
  122. xfs_btree_cur_t **curp, /* cursor returned to caller */
  123. int wasdel, /* converting a delayed alloc */
  124. int *logflagsp, /* inode logging flags */
  125. int whichfork); /* data or attr fork */
  126. /*
  127. * Convert a local file to an extents file.
  128. * This code is sort of bogus, since the file data needs to get
  129. * logged so it won't be lost. The bmap-level manipulations are ok, though.
  130. */
  131. STATIC int /* error */
  132. xfs_bmap_local_to_extents(
  133. xfs_trans_t *tp, /* transaction pointer */
  134. xfs_inode_t *ip, /* incore inode pointer */
  135. xfs_fsblock_t *firstblock, /* first block allocated in xaction */
  136. xfs_extlen_t total, /* total blocks needed by transaction */
  137. int *logflagsp, /* inode logging flags */
  138. int whichfork, /* data or attr fork */
  139. void (*init_fn)(struct xfs_buf *bp,
  140. struct xfs_inode *ip,
  141. struct xfs_ifork *ifp));
  142. /*
  143. * Search the extents list for the inode, for the extent containing bno.
  144. * If bno lies in a hole, point to the next entry. If bno lies past eof,
  145. * *eofp will be set, and *prevp will contain the last entry (null if none).
  146. * Else, *lastxp will be set to the index of the found
  147. * entry; *gotp will contain the entry.
  148. */
  149. STATIC xfs_bmbt_rec_host_t * /* pointer to found extent entry */
  150. xfs_bmap_search_extents(
  151. xfs_inode_t *ip, /* incore inode pointer */
  152. xfs_fileoff_t bno, /* block number searched for */
  153. int whichfork, /* data or attr fork */
  154. int *eofp, /* out: end of file found */
  155. xfs_extnum_t *lastxp, /* out: last extent index */
  156. xfs_bmbt_irec_t *gotp, /* out: extent entry found */
  157. xfs_bmbt_irec_t *prevp); /* out: previous extent entry found */
  158. /*
  159. * Compute the worst-case number of indirect blocks that will be used
  160. * for ip's delayed extent of length "len".
  161. */
  162. STATIC xfs_filblks_t
  163. xfs_bmap_worst_indlen(
  164. xfs_inode_t *ip, /* incore inode pointer */
  165. xfs_filblks_t len); /* delayed extent length */
  166. #ifdef DEBUG
  167. /*
  168. * Perform various validation checks on the values being returned
  169. * from xfs_bmapi().
  170. */
  171. STATIC void
  172. xfs_bmap_validate_ret(
  173. xfs_fileoff_t bno,
  174. xfs_filblks_t len,
  175. int flags,
  176. xfs_bmbt_irec_t *mval,
  177. int nmap,
  178. int ret_nmap);
  179. #else
  180. #define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
  181. #endif /* DEBUG */
  182. STATIC int
  183. xfs_bmap_count_tree(
  184. xfs_mount_t *mp,
  185. xfs_trans_t *tp,
  186. xfs_ifork_t *ifp,
  187. xfs_fsblock_t blockno,
  188. int levelin,
  189. int *count);
  190. STATIC void
  191. xfs_bmap_count_leaves(
  192. xfs_ifork_t *ifp,
  193. xfs_extnum_t idx,
  194. int numrecs,
  195. int *count);
  196. STATIC void
  197. xfs_bmap_disk_count_leaves(
  198. struct xfs_mount *mp,
  199. struct xfs_btree_block *block,
  200. int numrecs,
  201. int *count);
  202. /*
  203. * Bmap internal routines.
  204. */
  205. STATIC int /* error */
  206. xfs_bmbt_lookup_eq(
  207. struct xfs_btree_cur *cur,
  208. xfs_fileoff_t off,
  209. xfs_fsblock_t bno,
  210. xfs_filblks_t len,
  211. int *stat) /* success/failure */
  212. {
  213. cur->bc_rec.b.br_startoff = off;
  214. cur->bc_rec.b.br_startblock = bno;
  215. cur->bc_rec.b.br_blockcount = len;
  216. return xfs_btree_lookup(cur, XFS_LOOKUP_EQ, stat);
  217. }
  218. STATIC int /* error */
  219. xfs_bmbt_lookup_ge(
  220. struct xfs_btree_cur *cur,
  221. xfs_fileoff_t off,
  222. xfs_fsblock_t bno,
  223. xfs_filblks_t len,
  224. int *stat) /* success/failure */
  225. {
  226. cur->bc_rec.b.br_startoff = off;
  227. cur->bc_rec.b.br_startblock = bno;
  228. cur->bc_rec.b.br_blockcount = len;
  229. return xfs_btree_lookup(cur, XFS_LOOKUP_GE, stat);
  230. }
  231. /*
  232. * Check if the inode needs to be converted to btree format.
  233. */
  234. static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
  235. {
  236. return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
  237. XFS_IFORK_NEXTENTS(ip, whichfork) >
  238. XFS_IFORK_MAXEXT(ip, whichfork);
  239. }
  240. /*
  241. * Check if the inode should be converted to extent format.
  242. */
  243. static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
  244. {
  245. return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
  246. XFS_IFORK_NEXTENTS(ip, whichfork) <=
  247. XFS_IFORK_MAXEXT(ip, whichfork);
  248. }
  249. /*
  250. * Update the record referred to by cur to the value given
  251. * by [off, bno, len, state].
  252. * This either works (return 0) or gets an EFSCORRUPTED error.
  253. */
  254. STATIC int
  255. xfs_bmbt_update(
  256. struct xfs_btree_cur *cur,
  257. xfs_fileoff_t off,
  258. xfs_fsblock_t bno,
  259. xfs_filblks_t len,
  260. xfs_exntst_t state)
  261. {
  262. union xfs_btree_rec rec;
  263. xfs_bmbt_disk_set_allf(&rec.bmbt, off, bno, len, state);
  264. return xfs_btree_update(cur, &rec);
  265. }
  266. /*
  267. * Called from xfs_bmap_add_attrfork to handle btree format files.
  268. */
  269. STATIC int /* error */
  270. xfs_bmap_add_attrfork_btree(
  271. xfs_trans_t *tp, /* transaction pointer */
  272. xfs_inode_t *ip, /* incore inode pointer */
  273. xfs_fsblock_t *firstblock, /* first block allocated */
  274. xfs_bmap_free_t *flist, /* blocks to free at commit */
  275. int *flags) /* inode logging flags */
  276. {
  277. xfs_btree_cur_t *cur; /* btree cursor */
  278. int error; /* error return value */
  279. xfs_mount_t *mp; /* file system mount struct */
  280. int stat; /* newroot status */
  281. mp = ip->i_mount;
  282. if (ip->i_df.if_broot_bytes <= XFS_IFORK_DSIZE(ip))
  283. *flags |= XFS_ILOG_DBROOT;
  284. else {
  285. cur = xfs_bmbt_init_cursor(mp, tp, ip, XFS_DATA_FORK);
  286. cur->bc_private.b.flist = flist;
  287. cur->bc_private.b.firstblock = *firstblock;
  288. if ((error = xfs_bmbt_lookup_ge(cur, 0, 0, 0, &stat)))
  289. goto error0;
  290. /* must be at least one entry */
  291. XFS_WANT_CORRUPTED_GOTO(stat == 1, error0);
  292. if ((error = xfs_btree_new_iroot(cur, flags, &stat)))
  293. goto error0;
  294. if (stat == 0) {
  295. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  296. return XFS_ERROR(ENOSPC);
  297. }
  298. *firstblock = cur->bc_private.b.firstblock;
  299. cur->bc_private.b.allocated = 0;
  300. xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
  301. }
  302. return 0;
  303. error0:
  304. xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
  305. return error;
  306. }
  307. /*
  308. * Called from xfs_bmap_add_attrfork to handle extents format files.
  309. */
  310. STATIC int /* error */
  311. xfs_bmap_add_attrfork_extents(
  312. xfs_trans_t *tp, /* transaction pointer */
  313. xfs_inode_t *ip, /* incore inode pointer */
  314. xfs_fsblock_t *firstblock, /* first block allocated */
  315. xfs_bmap_free_t *flist, /* blocks to free at commit */
  316. int *flags) /* inode logging flags */
  317. {
  318. xfs_btree_cur_t *cur; /* bmap btree cursor */
  319. int error; /* error return value */
  320. if (ip->i_d.di_nextents * sizeof(xfs_bmbt_rec_t) <= XFS_IFORK_DSIZE(ip))
  321. return 0;
  322. cur = NULL;
  323. error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist, &cur, 0,
  324. flags, XFS_DATA_FORK);
  325. if (cur) {
  326. cur->bc_private.b.allocated = 0;
  327. xfs_btree_del_cursor(cur,
  328. error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
  329. }
  330. return error;
  331. }
  332. /*
  333. * Block initialisation functions for local to extent format conversion.
  334. * As these get more complex, they will be moved to the relevant files,
  335. * but for now they are too simple to worry about.
  336. */
  337. STATIC void
  338. xfs_bmap_local_to_extents_init_fn(
  339. struct xfs_buf *bp,
  340. struct xfs_inode *ip,
  341. struct xfs_ifork *ifp)
  342. {
  343. bp->b_ops = &xfs_bmbt_buf_ops;
  344. memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
  345. }
  346. STATIC void
  347. xfs_symlink_local_to_remote(
  348. struct xfs_buf *bp,
  349. struct xfs_inode *ip,
  350. struct xfs_ifork *ifp)
  351. {
  352. /* remote symlink blocks are not verifiable until CRCs come along */
  353. bp->b_ops = NULL;
  354. memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
  355. }
  356. /*
  357. * Called from xfs_bmap_add_attrfork to handle local format files. Each
  358. * different data fork content type needs a different callout to do the
  359. * conversion. Some are basic and only require special block initialisation
  360. * callouts for the data formating, others (directories) are so specialised they
  361. * handle everything themselves.
  362. *
  363. * XXX (dgc): investigate whether directory conversion can use the generic
  364. * formatting callout. It should be possible - it's just a very complex
  365. * formatter. it would also require passing the transaction through to the init
  366. * function.
  367. */
  368. STATIC int /* error */
  369. xfs_bmap_add_attrfork_local(
  370. xfs_trans_t *tp, /* transaction pointer */
  371. xfs_inode_t *ip, /* incore inode pointer */
  372. xfs_fsblock_t *firstblock, /* first block allocated */
  373. xfs_bmap_free_t *flist, /* blocks to free at commit */
  374. int *flags) /* inode logging flags */
  375. {
  376. xfs_da_args_t dargs; /* args for dir/attr code */
  377. if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
  378. return 0;
  379. if (S_ISDIR(ip->i_d.di_mode)) {
  380. memset(&dargs, 0, sizeof(dargs));
  381. dargs.dp = ip;
  382. dargs.firstblock = firstblock;
  383. dargs.flist = flist;
  384. dargs.total = ip->i_mount->m_dirblkfsbs;
  385. dargs.whichfork = XFS_DATA_FORK;
  386. dargs.trans = tp;
  387. return xfs_dir2_sf_to_block(&dargs);
  388. }
  389. if (S_ISLNK(ip->i_d.di_mode))
  390. return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
  391. flags, XFS_DATA_FORK,
  392. xfs_symlink_local_to_remote);
  393. return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
  394. XFS_DATA_FORK,
  395. xfs_bmap_local_to_extents_init_fn);
  396. }
  397. /*
  398. * Convert a delayed allocation to a real allocation.
  399. */
  400. STATIC int /* error */
  401. xfs_bmap_add_extent_delay_real(
  402. struct xfs_bmalloca *bma)
  403. {
  404. struct xfs_bmbt_irec *new = &bma->got;
  405. int diff; /* temp value */
  406. xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
  407. int error; /* error return value */
  408. int i; /* temp state */
  409. xfs_ifork_t *ifp; /* inode fork pointer */
  410. xfs_fileoff_t new_endoff; /* end offset of new entry */
  411. xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
  412. /* left is 0, right is 1, prev is 2 */
  413. int rval=0; /* return value (logging flags) */
  414. int state = 0;/* state bits, accessed thru macros */
  415. xfs_filblks_t da_new; /* new count del alloc blocks used */
  416. xfs_filblks_t da_old; /* old count del alloc blocks used */
  417. xfs_filblks_t temp=0; /* value for da_new calculations */
  418. xfs_filblks_t temp2=0;/* value for da_new calculations */
  419. int tmp_rval; /* partial logging flags */
  420. ifp = XFS_IFORK_PTR(bma->ip, XFS_DATA_FORK);
  421. ASSERT(bma->idx >= 0);
  422. ASSERT(bma->idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
  423. ASSERT(!isnullstartblock(new->br_startblock));
  424. ASSERT(!bma->cur ||
  425. (bma->cur->bc_private.b.flags & XFS_BTCUR_BPRV_WASDEL));
  426. XFS_STATS_INC(xs_add_exlist);
  427. #define LEFT r[0]
  428. #define RIGHT r[1]
  429. #define PREV r[2]
  430. /*
  431. * Set up a bunch of variables to make the tests simpler.
  432. */
  433. ep = xfs_iext_get_ext(ifp, bma->idx);
  434. xfs_bmbt_get_all(ep, &PREV);
  435. new_endoff = new->br_startoff + new->br_blockcount;
  436. ASSERT(PREV.br_startoff <= new->br_startoff);
  437. ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
  438. da_old = startblockval(PREV.br_startblock);
  439. da_new = 0;
  440. /*
  441. * Set flags determining what part of the previous delayed allocation
  442. * extent is being replaced by a real allocation.
  443. */
  444. if (PREV.br_startoff == new->br_startoff)
  445. state |= BMAP_LEFT_FILLING;
  446. if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
  447. state |= BMAP_RIGHT_FILLING;
  448. /*
  449. * Check and set flags if this segment has a left neighbor.
  450. * Don't set contiguous if the combined extent would be too large.
  451. */
  452. if (bma->idx > 0) {
  453. state |= BMAP_LEFT_VALID;
  454. xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx - 1), &LEFT);
  455. if (isnullstartblock(LEFT.br_startblock))
  456. state |= BMAP_LEFT_DELAY;
  457. }
  458. if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
  459. LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
  460. LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
  461. LEFT.br_state == new->br_state &&
  462. LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
  463. state |= BMAP_LEFT_CONTIG;
  464. /*
  465. * Check and set flags if this segment has a right neighbor.
  466. * Don't set contiguous if the combined extent would be too large.
  467. * Also check for all-three-contiguous being too large.
  468. */
  469. if (bma->idx < bma->ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
  470. state |= BMAP_RIGHT_VALID;
  471. xfs_bmbt_get_all(xfs_iext_get_ext(ifp, bma->idx + 1), &RIGHT);
  472. if (isnullstartblock(RIGHT.br_startblock))
  473. state |= BMAP_RIGHT_DELAY;
  474. }
  475. if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
  476. new_endoff == RIGHT.br_startoff &&
  477. new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
  478. new->br_state == RIGHT.br_state &&
  479. new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
  480. ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
  481. BMAP_RIGHT_FILLING)) !=
  482. (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
  483. BMAP_RIGHT_FILLING) ||
  484. LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
  485. <= MAXEXTLEN))
  486. state |= BMAP_RIGHT_CONTIG;
  487. error = 0;
  488. /*
  489. * Switch out based on the FILLING and CONTIG state bits.
  490. */
  491. switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
  492. BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
  493. case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
  494. BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
  495. /*
  496. * Filling in all of a previously delayed allocation extent.
  497. * The left and right neighbors are both contiguous with new.
  498. */
  499. bma->idx--;
  500. trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
  501. xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
  502. LEFT.br_blockcount + PREV.br_blockcount +
  503. RIGHT.br_blockcount);
  504. trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
  505. xfs_iext_remove(bma->ip, bma->idx + 1, 2, state);
  506. bma->ip->i_d.di_nextents--;
  507. if (bma->cur == NULL)
  508. rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
  509. else {
  510. rval = XFS_ILOG_CORE;
  511. error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
  512. RIGHT.br_startblock,
  513. RIGHT.br_blockcount, &i);
  514. if (error)
  515. goto done;
  516. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  517. error = xfs_btree_delete(bma->cur, &i);
  518. if (error)
  519. goto done;
  520. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  521. error = xfs_btree_decrement(bma->cur, 0, &i);
  522. if (error)
  523. goto done;
  524. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  525. error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
  526. LEFT.br_startblock,
  527. LEFT.br_blockcount +
  528. PREV.br_blockcount +
  529. RIGHT.br_blockcount, LEFT.br_state);
  530. if (error)
  531. goto done;
  532. }
  533. break;
  534. case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
  535. /*
  536. * Filling in all of a previously delayed allocation extent.
  537. * The left neighbor is contiguous, the right is not.
  538. */
  539. bma->idx--;
  540. trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
  541. xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx),
  542. LEFT.br_blockcount + PREV.br_blockcount);
  543. trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
  544. xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
  545. if (bma->cur == NULL)
  546. rval = XFS_ILOG_DEXT;
  547. else {
  548. rval = 0;
  549. error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
  550. LEFT.br_startblock, LEFT.br_blockcount,
  551. &i);
  552. if (error)
  553. goto done;
  554. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  555. error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
  556. LEFT.br_startblock,
  557. LEFT.br_blockcount +
  558. PREV.br_blockcount, LEFT.br_state);
  559. if (error)
  560. goto done;
  561. }
  562. break;
  563. case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
  564. /*
  565. * Filling in all of a previously delayed allocation extent.
  566. * The right neighbor is contiguous, the left is not.
  567. */
  568. trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
  569. xfs_bmbt_set_startblock(ep, new->br_startblock);
  570. xfs_bmbt_set_blockcount(ep,
  571. PREV.br_blockcount + RIGHT.br_blockcount);
  572. trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
  573. xfs_iext_remove(bma->ip, bma->idx + 1, 1, state);
  574. if (bma->cur == NULL)
  575. rval = XFS_ILOG_DEXT;
  576. else {
  577. rval = 0;
  578. error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
  579. RIGHT.br_startblock,
  580. RIGHT.br_blockcount, &i);
  581. if (error)
  582. goto done;
  583. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  584. error = xfs_bmbt_update(bma->cur, PREV.br_startoff,
  585. new->br_startblock,
  586. PREV.br_blockcount +
  587. RIGHT.br_blockcount, PREV.br_state);
  588. if (error)
  589. goto done;
  590. }
  591. break;
  592. case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
  593. /*
  594. * Filling in all of a previously delayed allocation extent.
  595. * Neither the left nor right neighbors are contiguous with
  596. * the new one.
  597. */
  598. trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
  599. xfs_bmbt_set_startblock(ep, new->br_startblock);
  600. trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
  601. bma->ip->i_d.di_nextents++;
  602. if (bma->cur == NULL)
  603. rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
  604. else {
  605. rval = XFS_ILOG_CORE;
  606. error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
  607. new->br_startblock, new->br_blockcount,
  608. &i);
  609. if (error)
  610. goto done;
  611. XFS_WANT_CORRUPTED_GOTO(i == 0, done);
  612. bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
  613. error = xfs_btree_insert(bma->cur, &i);
  614. if (error)
  615. goto done;
  616. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  617. }
  618. break;
  619. case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
  620. /*
  621. * Filling in the first part of a previous delayed allocation.
  622. * The left neighbor is contiguous.
  623. */
  624. trace_xfs_bmap_pre_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
  625. xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, bma->idx - 1),
  626. LEFT.br_blockcount + new->br_blockcount);
  627. xfs_bmbt_set_startoff(ep,
  628. PREV.br_startoff + new->br_blockcount);
  629. trace_xfs_bmap_post_update(bma->ip, bma->idx - 1, state, _THIS_IP_);
  630. temp = PREV.br_blockcount - new->br_blockcount;
  631. trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
  632. xfs_bmbt_set_blockcount(ep, temp);
  633. if (bma->cur == NULL)
  634. rval = XFS_ILOG_DEXT;
  635. else {
  636. rval = 0;
  637. error = xfs_bmbt_lookup_eq(bma->cur, LEFT.br_startoff,
  638. LEFT.br_startblock, LEFT.br_blockcount,
  639. &i);
  640. if (error)
  641. goto done;
  642. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  643. error = xfs_bmbt_update(bma->cur, LEFT.br_startoff,
  644. LEFT.br_startblock,
  645. LEFT.br_blockcount +
  646. new->br_blockcount,
  647. LEFT.br_state);
  648. if (error)
  649. goto done;
  650. }
  651. da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
  652. startblockval(PREV.br_startblock));
  653. xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
  654. trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
  655. bma->idx--;
  656. break;
  657. case BMAP_LEFT_FILLING:
  658. /*
  659. * Filling in the first part of a previous delayed allocation.
  660. * The left neighbor is not contiguous.
  661. */
  662. trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
  663. xfs_bmbt_set_startoff(ep, new_endoff);
  664. temp = PREV.br_blockcount - new->br_blockcount;
  665. xfs_bmbt_set_blockcount(ep, temp);
  666. xfs_iext_insert(bma->ip, bma->idx, 1, new, state);
  667. bma->ip->i_d.di_nextents++;
  668. if (bma->cur == NULL)
  669. rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
  670. else {
  671. rval = XFS_ILOG_CORE;
  672. error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
  673. new->br_startblock, new->br_blockcount,
  674. &i);
  675. if (error)
  676. goto done;
  677. XFS_WANT_CORRUPTED_GOTO(i == 0, done);
  678. bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
  679. error = xfs_btree_insert(bma->cur, &i);
  680. if (error)
  681. goto done;
  682. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  683. }
  684. if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
  685. error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
  686. bma->firstblock, bma->flist,
  687. &bma->cur, 1, &tmp_rval, XFS_DATA_FORK);
  688. rval |= tmp_rval;
  689. if (error)
  690. goto done;
  691. }
  692. da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
  693. startblockval(PREV.br_startblock) -
  694. (bma->cur ? bma->cur->bc_private.b.allocated : 0));
  695. ep = xfs_iext_get_ext(ifp, bma->idx + 1);
  696. xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
  697. trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
  698. break;
  699. case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
  700. /*
  701. * Filling in the last part of a previous delayed allocation.
  702. * The right neighbor is contiguous with the new allocation.
  703. */
  704. temp = PREV.br_blockcount - new->br_blockcount;
  705. trace_xfs_bmap_pre_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
  706. xfs_bmbt_set_blockcount(ep, temp);
  707. xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, bma->idx + 1),
  708. new->br_startoff, new->br_startblock,
  709. new->br_blockcount + RIGHT.br_blockcount,
  710. RIGHT.br_state);
  711. trace_xfs_bmap_post_update(bma->ip, bma->idx + 1, state, _THIS_IP_);
  712. if (bma->cur == NULL)
  713. rval = XFS_ILOG_DEXT;
  714. else {
  715. rval = 0;
  716. error = xfs_bmbt_lookup_eq(bma->cur, RIGHT.br_startoff,
  717. RIGHT.br_startblock,
  718. RIGHT.br_blockcount, &i);
  719. if (error)
  720. goto done;
  721. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  722. error = xfs_bmbt_update(bma->cur, new->br_startoff,
  723. new->br_startblock,
  724. new->br_blockcount +
  725. RIGHT.br_blockcount,
  726. RIGHT.br_state);
  727. if (error)
  728. goto done;
  729. }
  730. da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
  731. startblockval(PREV.br_startblock));
  732. trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
  733. xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
  734. trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
  735. bma->idx++;
  736. break;
  737. case BMAP_RIGHT_FILLING:
  738. /*
  739. * Filling in the last part of a previous delayed allocation.
  740. * The right neighbor is not contiguous.
  741. */
  742. temp = PREV.br_blockcount - new->br_blockcount;
  743. trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
  744. xfs_bmbt_set_blockcount(ep, temp);
  745. xfs_iext_insert(bma->ip, bma->idx + 1, 1, new, state);
  746. bma->ip->i_d.di_nextents++;
  747. if (bma->cur == NULL)
  748. rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
  749. else {
  750. rval = XFS_ILOG_CORE;
  751. error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
  752. new->br_startblock, new->br_blockcount,
  753. &i);
  754. if (error)
  755. goto done;
  756. XFS_WANT_CORRUPTED_GOTO(i == 0, done);
  757. bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
  758. error = xfs_btree_insert(bma->cur, &i);
  759. if (error)
  760. goto done;
  761. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  762. }
  763. if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
  764. error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
  765. bma->firstblock, bma->flist, &bma->cur, 1,
  766. &tmp_rval, XFS_DATA_FORK);
  767. rval |= tmp_rval;
  768. if (error)
  769. goto done;
  770. }
  771. da_new = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(bma->ip, temp),
  772. startblockval(PREV.br_startblock) -
  773. (bma->cur ? bma->cur->bc_private.b.allocated : 0));
  774. ep = xfs_iext_get_ext(ifp, bma->idx);
  775. xfs_bmbt_set_startblock(ep, nullstartblock(da_new));
  776. trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
  777. bma->idx++;
  778. break;
  779. case 0:
  780. /*
  781. * Filling in the middle part of a previous delayed allocation.
  782. * Contiguity is impossible here.
  783. * This case is avoided almost all the time.
  784. *
  785. * We start with a delayed allocation:
  786. *
  787. * +ddddddddddddddddddddddddddddddddddddddddddddddddddddddd+
  788. * PREV @ idx
  789. *
  790. * and we are allocating:
  791. * +rrrrrrrrrrrrrrrrr+
  792. * new
  793. *
  794. * and we set it up for insertion as:
  795. * +ddddddddddddddddddd+rrrrrrrrrrrrrrrrr+ddddddddddddddddd+
  796. * new
  797. * PREV @ idx LEFT RIGHT
  798. * inserted at idx + 1
  799. */
  800. temp = new->br_startoff - PREV.br_startoff;
  801. temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff;
  802. trace_xfs_bmap_pre_update(bma->ip, bma->idx, 0, _THIS_IP_);
  803. xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */
  804. LEFT = *new;
  805. RIGHT.br_state = PREV.br_state;
  806. RIGHT.br_startblock = nullstartblock(
  807. (int)xfs_bmap_worst_indlen(bma->ip, temp2));
  808. RIGHT.br_startoff = new_endoff;
  809. RIGHT.br_blockcount = temp2;
  810. /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */
  811. xfs_iext_insert(bma->ip, bma->idx + 1, 2, &LEFT, state);
  812. bma->ip->i_d.di_nextents++;
  813. if (bma->cur == NULL)
  814. rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
  815. else {
  816. rval = XFS_ILOG_CORE;
  817. error = xfs_bmbt_lookup_eq(bma->cur, new->br_startoff,
  818. new->br_startblock, new->br_blockcount,
  819. &i);
  820. if (error)
  821. goto done;
  822. XFS_WANT_CORRUPTED_GOTO(i == 0, done);
  823. bma->cur->bc_rec.b.br_state = XFS_EXT_NORM;
  824. error = xfs_btree_insert(bma->cur, &i);
  825. if (error)
  826. goto done;
  827. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  828. }
  829. if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
  830. error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
  831. bma->firstblock, bma->flist, &bma->cur,
  832. 1, &tmp_rval, XFS_DATA_FORK);
  833. rval |= tmp_rval;
  834. if (error)
  835. goto done;
  836. }
  837. temp = xfs_bmap_worst_indlen(bma->ip, temp);
  838. temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
  839. diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
  840. (bma->cur ? bma->cur->bc_private.b.allocated : 0));
  841. if (diff > 0) {
  842. error = xfs_icsb_modify_counters(bma->ip->i_mount,
  843. XFS_SBS_FDBLOCKS,
  844. -((int64_t)diff), 0);
  845. ASSERT(!error);
  846. if (error)
  847. goto done;
  848. }
  849. ep = xfs_iext_get_ext(ifp, bma->idx);
  850. xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
  851. trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
  852. trace_xfs_bmap_pre_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
  853. xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, bma->idx + 2),
  854. nullstartblock((int)temp2));
  855. trace_xfs_bmap_post_update(bma->ip, bma->idx + 2, state, _THIS_IP_);
  856. bma->idx++;
  857. da_new = temp + temp2;
  858. break;
  859. case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
  860. case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
  861. case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
  862. case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
  863. case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
  864. case BMAP_LEFT_CONTIG:
  865. case BMAP_RIGHT_CONTIG:
  866. /*
  867. * These cases are all impossible.
  868. */
  869. ASSERT(0);
  870. }
  871. /* convert to a btree if necessary */
  872. if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
  873. int tmp_logflags; /* partial log flag return val */
  874. ASSERT(bma->cur == NULL);
  875. error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
  876. bma->firstblock, bma->flist, &bma->cur,
  877. da_old > 0, &tmp_logflags, XFS_DATA_FORK);
  878. bma->logflags |= tmp_logflags;
  879. if (error)
  880. goto done;
  881. }
  882. /* adjust for changes in reserved delayed indirect blocks */
  883. if (da_old || da_new) {
  884. temp = da_new;
  885. if (bma->cur)
  886. temp += bma->cur->bc_private.b.allocated;
  887. ASSERT(temp <= da_old);
  888. if (temp < da_old)
  889. xfs_icsb_modify_counters(bma->ip->i_mount,
  890. XFS_SBS_FDBLOCKS,
  891. (int64_t)(da_old - temp), 0);
  892. }
  893. /* clear out the allocated field, done with it now in any case. */
  894. if (bma->cur)
  895. bma->cur->bc_private.b.allocated = 0;
  896. xfs_bmap_check_leaf_extents(bma->cur, bma->ip, XFS_DATA_FORK);
  897. done:
  898. bma->logflags |= rval;
  899. return error;
  900. #undef LEFT
  901. #undef RIGHT
  902. #undef PREV
  903. }
  904. /*
  905. * Convert an unwritten allocation to a real allocation or vice versa.
  906. */
  907. STATIC int /* error */
  908. xfs_bmap_add_extent_unwritten_real(
  909. struct xfs_trans *tp,
  910. xfs_inode_t *ip, /* incore inode pointer */
  911. xfs_extnum_t *idx, /* extent number to update/insert */
  912. xfs_btree_cur_t **curp, /* if *curp is null, not a btree */
  913. xfs_bmbt_irec_t *new, /* new data to add to file extents */
  914. xfs_fsblock_t *first, /* pointer to firstblock variable */
  915. xfs_bmap_free_t *flist, /* list of extents to be freed */
  916. int *logflagsp) /* inode logging flags */
  917. {
  918. xfs_btree_cur_t *cur; /* btree cursor */
  919. xfs_bmbt_rec_host_t *ep; /* extent entry for idx */
  920. int error; /* error return value */
  921. int i; /* temp state */
  922. xfs_ifork_t *ifp; /* inode fork pointer */
  923. xfs_fileoff_t new_endoff; /* end offset of new entry */
  924. xfs_exntst_t newext; /* new extent state */
  925. xfs_exntst_t oldext; /* old extent state */
  926. xfs_bmbt_irec_t r[3]; /* neighbor extent entries */
  927. /* left is 0, right is 1, prev is 2 */
  928. int rval=0; /* return value (logging flags) */
  929. int state = 0;/* state bits, accessed thru macros */
  930. *logflagsp = 0;
  931. cur = *curp;
  932. ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
  933. ASSERT(*idx >= 0);
  934. ASSERT(*idx <= ifp->if_bytes / sizeof(struct xfs_bmbt_rec));
  935. ASSERT(!isnullstartblock(new->br_startblock));
  936. XFS_STATS_INC(xs_add_exlist);
  937. #define LEFT r[0]
  938. #define RIGHT r[1]
  939. #define PREV r[2]
  940. /*
  941. * Set up a bunch of variables to make the tests simpler.
  942. */
  943. error = 0;
  944. ep = xfs_iext_get_ext(ifp, *idx);
  945. xfs_bmbt_get_all(ep, &PREV);
  946. newext = new->br_state;
  947. oldext = (newext == XFS_EXT_UNWRITTEN) ?
  948. XFS_EXT_NORM : XFS_EXT_UNWRITTEN;
  949. ASSERT(PREV.br_state == oldext);
  950. new_endoff = new->br_startoff + new->br_blockcount;
  951. ASSERT(PREV.br_startoff <= new->br_startoff);
  952. ASSERT(PREV.br_startoff + PREV.br_blockcount >= new_endoff);
  953. /*
  954. * Set flags determining what part of the previous oldext allocation
  955. * extent is being replaced by a newext allocation.
  956. */
  957. if (PREV.br_startoff == new->br_startoff)
  958. state |= BMAP_LEFT_FILLING;
  959. if (PREV.br_startoff + PREV.br_blockcount == new_endoff)
  960. state |= BMAP_RIGHT_FILLING;
  961. /*
  962. * Check and set flags if this segment has a left neighbor.
  963. * Don't set contiguous if the combined extent would be too large.
  964. */
  965. if (*idx > 0) {
  966. state |= BMAP_LEFT_VALID;
  967. xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT);
  968. if (isnullstartblock(LEFT.br_startblock))
  969. state |= BMAP_LEFT_DELAY;
  970. }
  971. if ((state & BMAP_LEFT_VALID) && !(state & BMAP_LEFT_DELAY) &&
  972. LEFT.br_startoff + LEFT.br_blockcount == new->br_startoff &&
  973. LEFT.br_startblock + LEFT.br_blockcount == new->br_startblock &&
  974. LEFT.br_state == newext &&
  975. LEFT.br_blockcount + new->br_blockcount <= MAXEXTLEN)
  976. state |= BMAP_LEFT_CONTIG;
  977. /*
  978. * Check and set flags if this segment has a right neighbor.
  979. * Don't set contiguous if the combined extent would be too large.
  980. * Also check for all-three-contiguous being too large.
  981. */
  982. if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) {
  983. state |= BMAP_RIGHT_VALID;
  984. xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
  985. if (isnullstartblock(RIGHT.br_startblock))
  986. state |= BMAP_RIGHT_DELAY;
  987. }
  988. if ((state & BMAP_RIGHT_VALID) && !(state & BMAP_RIGHT_DELAY) &&
  989. new_endoff == RIGHT.br_startoff &&
  990. new->br_startblock + new->br_blockcount == RIGHT.br_startblock &&
  991. newext == RIGHT.br_state &&
  992. new->br_blockcount + RIGHT.br_blockcount <= MAXEXTLEN &&
  993. ((state & (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
  994. BMAP_RIGHT_FILLING)) !=
  995. (BMAP_LEFT_CONTIG | BMAP_LEFT_FILLING |
  996. BMAP_RIGHT_FILLING) ||
  997. LEFT.br_blockcount + new->br_blockcount + RIGHT.br_blockcount
  998. <= MAXEXTLEN))
  999. state |= BMAP_RIGHT_CONTIG;
  1000. /*
  1001. * Switch out based on the FILLING and CONTIG state bits.
  1002. */
  1003. switch (state & (BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
  1004. BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG)) {
  1005. case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG |
  1006. BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
  1007. /*
  1008. * Setting all of a previous oldext extent to newext.
  1009. * The left and right neighbors are both contiguous with new.
  1010. */
  1011. --*idx;
  1012. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1013. xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
  1014. LEFT.br_blockcount + PREV.br_blockcount +
  1015. RIGHT.br_blockcount);
  1016. trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
  1017. xfs_iext_remove(ip, *idx + 1, 2, state);
  1018. ip->i_d.di_nextents -= 2;
  1019. if (cur == NULL)
  1020. rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
  1021. else {
  1022. rval = XFS_ILOG_CORE;
  1023. if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
  1024. RIGHT.br_startblock,
  1025. RIGHT.br_blockcount, &i)))
  1026. goto done;
  1027. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1028. if ((error = xfs_btree_delete(cur, &i)))
  1029. goto done;
  1030. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1031. if ((error = xfs_btree_decrement(cur, 0, &i)))
  1032. goto done;
  1033. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1034. if ((error = xfs_btree_delete(cur, &i)))
  1035. goto done;
  1036. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1037. if ((error = xfs_btree_decrement(cur, 0, &i)))
  1038. goto done;
  1039. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1040. if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
  1041. LEFT.br_startblock,
  1042. LEFT.br_blockcount + PREV.br_blockcount +
  1043. RIGHT.br_blockcount, LEFT.br_state)))
  1044. goto done;
  1045. }
  1046. break;
  1047. case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
  1048. /*
  1049. * Setting all of a previous oldext extent to newext.
  1050. * The left neighbor is contiguous, the right is not.
  1051. */
  1052. --*idx;
  1053. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1054. xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx),
  1055. LEFT.br_blockcount + PREV.br_blockcount);
  1056. trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
  1057. xfs_iext_remove(ip, *idx + 1, 1, state);
  1058. ip->i_d.di_nextents--;
  1059. if (cur == NULL)
  1060. rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
  1061. else {
  1062. rval = XFS_ILOG_CORE;
  1063. if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
  1064. PREV.br_startblock, PREV.br_blockcount,
  1065. &i)))
  1066. goto done;
  1067. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1068. if ((error = xfs_btree_delete(cur, &i)))
  1069. goto done;
  1070. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1071. if ((error = xfs_btree_decrement(cur, 0, &i)))
  1072. goto done;
  1073. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1074. if ((error = xfs_bmbt_update(cur, LEFT.br_startoff,
  1075. LEFT.br_startblock,
  1076. LEFT.br_blockcount + PREV.br_blockcount,
  1077. LEFT.br_state)))
  1078. goto done;
  1079. }
  1080. break;
  1081. case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
  1082. /*
  1083. * Setting all of a previous oldext extent to newext.
  1084. * The right neighbor is contiguous, the left is not.
  1085. */
  1086. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1087. xfs_bmbt_set_blockcount(ep,
  1088. PREV.br_blockcount + RIGHT.br_blockcount);
  1089. xfs_bmbt_set_state(ep, newext);
  1090. trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
  1091. xfs_iext_remove(ip, *idx + 1, 1, state);
  1092. ip->i_d.di_nextents--;
  1093. if (cur == NULL)
  1094. rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
  1095. else {
  1096. rval = XFS_ILOG_CORE;
  1097. if ((error = xfs_bmbt_lookup_eq(cur, RIGHT.br_startoff,
  1098. RIGHT.br_startblock,
  1099. RIGHT.br_blockcount, &i)))
  1100. goto done;
  1101. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1102. if ((error = xfs_btree_delete(cur, &i)))
  1103. goto done;
  1104. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1105. if ((error = xfs_btree_decrement(cur, 0, &i)))
  1106. goto done;
  1107. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1108. if ((error = xfs_bmbt_update(cur, new->br_startoff,
  1109. new->br_startblock,
  1110. new->br_blockcount + RIGHT.br_blockcount,
  1111. newext)))
  1112. goto done;
  1113. }
  1114. break;
  1115. case BMAP_LEFT_FILLING | BMAP_RIGHT_FILLING:
  1116. /*
  1117. * Setting all of a previous oldext extent to newext.
  1118. * Neither the left nor right neighbors are contiguous with
  1119. * the new one.
  1120. */
  1121. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1122. xfs_bmbt_set_state(ep, newext);
  1123. trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
  1124. if (cur == NULL)
  1125. rval = XFS_ILOG_DEXT;
  1126. else {
  1127. rval = 0;
  1128. if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
  1129. new->br_startblock, new->br_blockcount,
  1130. &i)))
  1131. goto done;
  1132. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1133. if ((error = xfs_bmbt_update(cur, new->br_startoff,
  1134. new->br_startblock, new->br_blockcount,
  1135. newext)))
  1136. goto done;
  1137. }
  1138. break;
  1139. case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG:
  1140. /*
  1141. * Setting the first part of a previous oldext extent to newext.
  1142. * The left neighbor is contiguous.
  1143. */
  1144. trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_);
  1145. xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1),
  1146. LEFT.br_blockcount + new->br_blockcount);
  1147. xfs_bmbt_set_startoff(ep,
  1148. PREV.br_startoff + new->br_blockcount);
  1149. trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_);
  1150. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1151. xfs_bmbt_set_startblock(ep,
  1152. new->br_startblock + new->br_blockcount);
  1153. xfs_bmbt_set_blockcount(ep,
  1154. PREV.br_blockcount - new->br_blockcount);
  1155. trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
  1156. --*idx;
  1157. if (cur == NULL)
  1158. rval = XFS_ILOG_DEXT;
  1159. else {
  1160. rval = 0;
  1161. if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
  1162. PREV.br_startblock, PREV.br_blockcount,
  1163. &i)))
  1164. goto done;
  1165. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1166. if ((error = xfs_bmbt_update(cur,
  1167. PREV.br_startoff + new->br_blockcount,
  1168. PREV.br_startblock + new->br_blockcount,
  1169. PREV.br_blockcount - new->br_blockcount,
  1170. oldext)))
  1171. goto done;
  1172. if ((error = xfs_btree_decrement(cur, 0, &i)))
  1173. goto done;
  1174. error = xfs_bmbt_update(cur, LEFT.br_startoff,
  1175. LEFT.br_startblock,
  1176. LEFT.br_blockcount + new->br_blockcount,
  1177. LEFT.br_state);
  1178. if (error)
  1179. goto done;
  1180. }
  1181. break;
  1182. case BMAP_LEFT_FILLING:
  1183. /*
  1184. * Setting the first part of a previous oldext extent to newext.
  1185. * The left neighbor is not contiguous.
  1186. */
  1187. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1188. ASSERT(ep && xfs_bmbt_get_state(ep) == oldext);
  1189. xfs_bmbt_set_startoff(ep, new_endoff);
  1190. xfs_bmbt_set_blockcount(ep,
  1191. PREV.br_blockcount - new->br_blockcount);
  1192. xfs_bmbt_set_startblock(ep,
  1193. new->br_startblock + new->br_blockcount);
  1194. trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
  1195. xfs_iext_insert(ip, *idx, 1, new, state);
  1196. ip->i_d.di_nextents++;
  1197. if (cur == NULL)
  1198. rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
  1199. else {
  1200. rval = XFS_ILOG_CORE;
  1201. if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
  1202. PREV.br_startblock, PREV.br_blockcount,
  1203. &i)))
  1204. goto done;
  1205. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1206. if ((error = xfs_bmbt_update(cur,
  1207. PREV.br_startoff + new->br_blockcount,
  1208. PREV.br_startblock + new->br_blockcount,
  1209. PREV.br_blockcount - new->br_blockcount,
  1210. oldext)))
  1211. goto done;
  1212. cur->bc_rec.b = *new;
  1213. if ((error = xfs_btree_insert(cur, &i)))
  1214. goto done;
  1215. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1216. }
  1217. break;
  1218. case BMAP_RIGHT_FILLING | BMAP_RIGHT_CONTIG:
  1219. /*
  1220. * Setting the last part of a previous oldext extent to newext.
  1221. * The right neighbor is contiguous with the new allocation.
  1222. */
  1223. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1224. xfs_bmbt_set_blockcount(ep,
  1225. PREV.br_blockcount - new->br_blockcount);
  1226. trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
  1227. ++*idx;
  1228. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1229. xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
  1230. new->br_startoff, new->br_startblock,
  1231. new->br_blockcount + RIGHT.br_blockcount, newext);
  1232. trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
  1233. if (cur == NULL)
  1234. rval = XFS_ILOG_DEXT;
  1235. else {
  1236. rval = 0;
  1237. if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
  1238. PREV.br_startblock,
  1239. PREV.br_blockcount, &i)))
  1240. goto done;
  1241. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1242. if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
  1243. PREV.br_startblock,
  1244. PREV.br_blockcount - new->br_blockcount,
  1245. oldext)))
  1246. goto done;
  1247. if ((error = xfs_btree_increment(cur, 0, &i)))
  1248. goto done;
  1249. if ((error = xfs_bmbt_update(cur, new->br_startoff,
  1250. new->br_startblock,
  1251. new->br_blockcount + RIGHT.br_blockcount,
  1252. newext)))
  1253. goto done;
  1254. }
  1255. break;
  1256. case BMAP_RIGHT_FILLING:
  1257. /*
  1258. * Setting the last part of a previous oldext extent to newext.
  1259. * The right neighbor is not contiguous.
  1260. */
  1261. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1262. xfs_bmbt_set_blockcount(ep,
  1263. PREV.br_blockcount - new->br_blockcount);
  1264. trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
  1265. ++*idx;
  1266. xfs_iext_insert(ip, *idx, 1, new, state);
  1267. ip->i_d.di_nextents++;
  1268. if (cur == NULL)
  1269. rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
  1270. else {
  1271. rval = XFS_ILOG_CORE;
  1272. if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
  1273. PREV.br_startblock, PREV.br_blockcount,
  1274. &i)))
  1275. goto done;
  1276. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1277. if ((error = xfs_bmbt_update(cur, PREV.br_startoff,
  1278. PREV.br_startblock,
  1279. PREV.br_blockcount - new->br_blockcount,
  1280. oldext)))
  1281. goto done;
  1282. if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
  1283. new->br_startblock, new->br_blockcount,
  1284. &i)))
  1285. goto done;
  1286. XFS_WANT_CORRUPTED_GOTO(i == 0, done);
  1287. cur->bc_rec.b.br_state = XFS_EXT_NORM;
  1288. if ((error = xfs_btree_insert(cur, &i)))
  1289. goto done;
  1290. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1291. }
  1292. break;
  1293. case 0:
  1294. /*
  1295. * Setting the middle part of a previous oldext extent to
  1296. * newext. Contiguity is impossible here.
  1297. * One extent becomes three extents.
  1298. */
  1299. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1300. xfs_bmbt_set_blockcount(ep,
  1301. new->br_startoff - PREV.br_startoff);
  1302. trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
  1303. r[0] = *new;
  1304. r[1].br_startoff = new_endoff;
  1305. r[1].br_blockcount =
  1306. PREV.br_startoff + PREV.br_blockcount - new_endoff;
  1307. r[1].br_startblock = new->br_startblock + new->br_blockcount;
  1308. r[1].br_state = oldext;
  1309. ++*idx;
  1310. xfs_iext_insert(ip, *idx, 2, &r[0], state);
  1311. ip->i_d.di_nextents += 2;
  1312. if (cur == NULL)
  1313. rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
  1314. else {
  1315. rval = XFS_ILOG_CORE;
  1316. if ((error = xfs_bmbt_lookup_eq(cur, PREV.br_startoff,
  1317. PREV.br_startblock, PREV.br_blockcount,
  1318. &i)))
  1319. goto done;
  1320. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1321. /* new right extent - oldext */
  1322. if ((error = xfs_bmbt_update(cur, r[1].br_startoff,
  1323. r[1].br_startblock, r[1].br_blockcount,
  1324. r[1].br_state)))
  1325. goto done;
  1326. /* new left extent - oldext */
  1327. cur->bc_rec.b = PREV;
  1328. cur->bc_rec.b.br_blockcount =
  1329. new->br_startoff - PREV.br_startoff;
  1330. if ((error = xfs_btree_insert(cur, &i)))
  1331. goto done;
  1332. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1333. /*
  1334. * Reset the cursor to the position of the new extent
  1335. * we are about to insert as we can't trust it after
  1336. * the previous insert.
  1337. */
  1338. if ((error = xfs_bmbt_lookup_eq(cur, new->br_startoff,
  1339. new->br_startblock, new->br_blockcount,
  1340. &i)))
  1341. goto done;
  1342. XFS_WANT_CORRUPTED_GOTO(i == 0, done);
  1343. /* new middle extent - newext */
  1344. cur->bc_rec.b.br_state = new->br_state;
  1345. if ((error = xfs_btree_insert(cur, &i)))
  1346. goto done;
  1347. XFS_WANT_CORRUPTED_GOTO(i == 1, done);
  1348. }
  1349. break;
  1350. case BMAP_LEFT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
  1351. case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
  1352. case BMAP_LEFT_FILLING | BMAP_RIGHT_CONTIG:
  1353. case BMAP_RIGHT_FILLING | BMAP_LEFT_CONTIG:
  1354. case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
  1355. case BMAP_LEFT_CONTIG:
  1356. case BMAP_RIGHT_CONTIG:
  1357. /*
  1358. * These cases are all impossible.
  1359. */
  1360. ASSERT(0);
  1361. }
  1362. /* convert to a btree if necessary */
  1363. if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
  1364. int tmp_logflags; /* partial log flag return val */
  1365. ASSERT(cur == NULL);
  1366. error = xfs_bmap_extents_to_btree(tp, ip, first, flist, &cur,
  1367. 0, &tmp_logflags, XFS_DATA_FORK);
  1368. *logflagsp |= tmp_logflags;
  1369. if (error)
  1370. goto done;
  1371. }
  1372. /* clear out the allocated field, done with it now in any case. */
  1373. if (cur) {
  1374. cur->bc_private.b.allocated = 0;
  1375. *curp = cur;
  1376. }
  1377. xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
  1378. done:
  1379. *logflagsp |= rval;
  1380. return error;
  1381. #undef LEFT
  1382. #undef RIGHT
  1383. #undef PREV
  1384. }
  1385. /*
  1386. * Convert a hole to a delayed allocation.
  1387. */
  1388. STATIC void
  1389. xfs_bmap_add_extent_hole_delay(
  1390. xfs_inode_t *ip, /* incore inode pointer */
  1391. xfs_extnum_t *idx, /* extent number to update/insert */
  1392. xfs_bmbt_irec_t *new) /* new data to add to file extents */
  1393. {
  1394. xfs_ifork_t *ifp; /* inode fork pointer */
  1395. xfs_bmbt_irec_t left; /* left neighbor extent entry */
  1396. xfs_filblks_t newlen=0; /* new indirect size */
  1397. xfs_filblks_t oldlen=0; /* old indirect size */
  1398. xfs_bmbt_irec_t right; /* right neighbor extent entry */
  1399. int state; /* state bits, accessed thru macros */
  1400. xfs_filblks_t temp=0; /* temp for indirect calculations */
  1401. ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
  1402. state = 0;
  1403. ASSERT(isnullstartblock(new->br_startblock));
  1404. /*
  1405. * Check and set flags if this segment has a left neighbor
  1406. */
  1407. if (*idx > 0) {
  1408. state |= BMAP_LEFT_VALID;
  1409. xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left);
  1410. if (isnullstartblock(left.br_startblock))
  1411. state |= BMAP_LEFT_DELAY;
  1412. }
  1413. /*
  1414. * Check and set flags if the current (right) segment exists.
  1415. * If it doesn't exist, we're converting the hole at end-of-file.
  1416. */
  1417. if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) {
  1418. state |= BMAP_RIGHT_VALID;
  1419. xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right);
  1420. if (isnullstartblock(right.br_startblock))
  1421. state |= BMAP_RIGHT_DELAY;
  1422. }
  1423. /*
  1424. * Set contiguity flags on the left and right neighbors.
  1425. * Don't let extents get too large, even if the pieces are contiguous.
  1426. */
  1427. if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) &&
  1428. left.br_startoff + left.br_blockcount == new->br_startoff &&
  1429. left.br_blockcount + new->br_blockcount <= MAXEXTLEN)
  1430. state |= BMAP_LEFT_CONTIG;
  1431. if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) &&
  1432. new->br_startoff + new->br_blockcount == right.br_startoff &&
  1433. new->br_blockcount + right.br_blockcount <= MAXEXTLEN &&
  1434. (!(state & BMAP_LEFT_CONTIG) ||
  1435. (left.br_blockcount + new->br_blockcount +
  1436. right.br_blockcount <= MAXEXTLEN)))
  1437. state |= BMAP_RIGHT_CONTIG;
  1438. /*
  1439. * Switch out based on the contiguity flags.
  1440. */
  1441. switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) {
  1442. case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG:
  1443. /*
  1444. * New allocation is contiguous with delayed allocations
  1445. * on the left and on the right.
  1446. * Merge all three into a single extent record.
  1447. */
  1448. --*idx;
  1449. temp = left.br_blockcount + new->br_blockcount +
  1450. right.br_blockcount;
  1451. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1452. xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
  1453. oldlen = startblockval(left.br_startblock) +
  1454. startblockval(new->br_startblock) +
  1455. startblockval(right.br_startblock);
  1456. newlen = xfs_bmap_worst_indlen(ip, temp);
  1457. xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
  1458. nullstartblock((int)newlen));
  1459. trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
  1460. xfs_iext_remove(ip, *idx + 1, 1, state);
  1461. break;
  1462. case BMAP_LEFT_CONTIG:
  1463. /*
  1464. * New allocation is contiguous with a delayed allocation
  1465. * on the left.
  1466. * Merge the new allocation with the left neighbor.
  1467. */
  1468. --*idx;
  1469. temp = left.br_blockcount + new->br_blockcount;
  1470. trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_);
  1471. xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
  1472. oldlen = startblockval(left.br_startblock) +
  1473. startblock

Large files files are truncated, but you can click here to view the full file