PageRenderTime 56ms CodeModel.GetById 23ms RepoModel.GetById 1ms app.codeStats 0ms

/fs/ext3/inode.c

https://github.com/mstsirkin/linux
C | 1638 lines | 888 code | 149 blank | 601 comment | 193 complexity | fe16179b3922034779bf4ae729b816f8 MD5 | raw file
  1. /*
  2. * linux/fs/ext3/inode.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/inode.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * Goal-directed block allocation by Stephen Tweedie
  16. * (sct@redhat.com), 1993, 1998
  17. * Big-endian to little-endian byte-swapping/bitmaps by
  18. * David S. Miller (davem@caip.rutgers.edu), 1995
  19. * 64-bit file support on 64-bit platforms by Jakub Jelinek
  20. * (jj@sunsite.ms.mff.cuni.cz)
  21. *
  22. * Assorted race fixes, rewrite of ext3_get_block() by Al Viro, 2000
  23. */
  24. #include <linux/module.h>
  25. #include <linux/fs.h>
  26. #include <linux/time.h>
  27. #include <linux/ext3_jbd.h>
  28. #include <linux/jbd.h>
  29. #include <linux/highuid.h>
  30. #include <linux/pagemap.h>
  31. #include <linux/quotaops.h>
  32. #include <linux/string.h>
  33. #include <linux/buffer_head.h>
  34. #include <linux/writeback.h>
  35. #include <linux/mpage.h>
  36. #include <linux/uio.h>
  37. #include <linux/bio.h>
  38. #include <linux/fiemap.h>
  39. #include <linux/namei.h>
  40. #include <trace/events/ext3.h>
  41. #include "xattr.h"
  42. #include "acl.h"
  43. static int ext3_writepage_trans_blocks(struct inode *inode);
  44. static int ext3_block_truncate_page(struct inode *inode, loff_t from);
  45. /*
  46. * Test whether an inode is a fast symlink.
  47. */
  48. static int ext3_inode_is_fast_symlink(struct inode *inode)
  49. {
  50. int ea_blocks = EXT3_I(inode)->i_file_acl ?
  51. (inode->i_sb->s_blocksize >> 9) : 0;
  52. return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
  53. }
  54. /*
  55. * The ext3 forget function must perform a revoke if we are freeing data
  56. * which has been journaled. Metadata (eg. indirect blocks) must be
  57. * revoked in all cases.
  58. *
  59. * "bh" may be NULL: a metadata block may have been freed from memory
  60. * but there may still be a record of it in the journal, and that record
  61. * still needs to be revoked.
  62. */
  63. int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode,
  64. struct buffer_head *bh, ext3_fsblk_t blocknr)
  65. {
  66. int err;
  67. might_sleep();
  68. trace_ext3_forget(inode, is_metadata, blocknr);
  69. BUFFER_TRACE(bh, "enter");
  70. jbd_debug(4, "forgetting bh %p: is_metadata = %d, mode %o, "
  71. "data mode %lx\n",
  72. bh, is_metadata, inode->i_mode,
  73. test_opt(inode->i_sb, DATA_FLAGS));
  74. /* Never use the revoke function if we are doing full data
  75. * journaling: there is no need to, and a V1 superblock won't
  76. * support it. Otherwise, only skip the revoke on un-journaled
  77. * data blocks. */
  78. if (test_opt(inode->i_sb, DATA_FLAGS) == EXT3_MOUNT_JOURNAL_DATA ||
  79. (!is_metadata && !ext3_should_journal_data(inode))) {
  80. if (bh) {
  81. BUFFER_TRACE(bh, "call journal_forget");
  82. return ext3_journal_forget(handle, bh);
  83. }
  84. return 0;
  85. }
  86. /*
  87. * data!=journal && (is_metadata || should_journal_data(inode))
  88. */
  89. BUFFER_TRACE(bh, "call ext3_journal_revoke");
  90. err = ext3_journal_revoke(handle, blocknr, bh);
  91. if (err)
  92. ext3_abort(inode->i_sb, __func__,
  93. "error %d when attempting revoke", err);
  94. BUFFER_TRACE(bh, "exit");
  95. return err;
  96. }
  97. /*
  98. * Work out how many blocks we need to proceed with the next chunk of a
  99. * truncate transaction.
  100. */
  101. static unsigned long blocks_for_truncate(struct inode *inode)
  102. {
  103. unsigned long needed;
  104. needed = inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9);
  105. /* Give ourselves just enough room to cope with inodes in which
  106. * i_blocks is corrupt: we've seen disk corruptions in the past
  107. * which resulted in random data in an inode which looked enough
  108. * like a regular file for ext3 to try to delete it. Things
  109. * will go a bit crazy if that happens, but at least we should
  110. * try not to panic the whole kernel. */
  111. if (needed < 2)
  112. needed = 2;
  113. /* But we need to bound the transaction so we don't overflow the
  114. * journal. */
  115. if (needed > EXT3_MAX_TRANS_DATA)
  116. needed = EXT3_MAX_TRANS_DATA;
  117. return EXT3_DATA_TRANS_BLOCKS(inode->i_sb) + needed;
  118. }
  119. /*
  120. * Truncate transactions can be complex and absolutely huge. So we need to
  121. * be able to restart the transaction at a conventient checkpoint to make
  122. * sure we don't overflow the journal.
  123. *
  124. * start_transaction gets us a new handle for a truncate transaction,
  125. * and extend_transaction tries to extend the existing one a bit. If
  126. * extend fails, we need to propagate the failure up and restart the
  127. * transaction in the top-level truncate loop. --sct
  128. */
  129. static handle_t *start_transaction(struct inode *inode)
  130. {
  131. handle_t *result;
  132. result = ext3_journal_start(inode, blocks_for_truncate(inode));
  133. if (!IS_ERR(result))
  134. return result;
  135. ext3_std_error(inode->i_sb, PTR_ERR(result));
  136. return result;
  137. }
  138. /*
  139. * Try to extend this transaction for the purposes of truncation.
  140. *
  141. * Returns 0 if we managed to create more room. If we can't create more
  142. * room, and the transaction must be restarted we return 1.
  143. */
  144. static int try_to_extend_transaction(handle_t *handle, struct inode *inode)
  145. {
  146. if (handle->h_buffer_credits > EXT3_RESERVE_TRANS_BLOCKS)
  147. return 0;
  148. if (!ext3_journal_extend(handle, blocks_for_truncate(inode)))
  149. return 0;
  150. return 1;
  151. }
  152. /*
  153. * Restart the transaction associated with *handle. This does a commit,
  154. * so before we call here everything must be consistently dirtied against
  155. * this transaction.
  156. */
  157. static int truncate_restart_transaction(handle_t *handle, struct inode *inode)
  158. {
  159. int ret;
  160. jbd_debug(2, "restarting handle %p\n", handle);
  161. /*
  162. * Drop truncate_mutex to avoid deadlock with ext3_get_blocks_handle
  163. * At this moment, get_block can be called only for blocks inside
  164. * i_size since page cache has been already dropped and writes are
  165. * blocked by i_mutex. So we can safely drop the truncate_mutex.
  166. */
  167. mutex_unlock(&EXT3_I(inode)->truncate_mutex);
  168. ret = ext3_journal_restart(handle, blocks_for_truncate(inode));
  169. mutex_lock(&EXT3_I(inode)->truncate_mutex);
  170. return ret;
  171. }
  172. /*
  173. * Called at inode eviction from icache
  174. */
  175. void ext3_evict_inode (struct inode *inode)
  176. {
  177. struct ext3_inode_info *ei = EXT3_I(inode);
  178. struct ext3_block_alloc_info *rsv;
  179. handle_t *handle;
  180. int want_delete = 0;
  181. trace_ext3_evict_inode(inode);
  182. if (!inode->i_nlink && !is_bad_inode(inode)) {
  183. dquot_initialize(inode);
  184. want_delete = 1;
  185. }
  186. /*
  187. * When journalling data dirty buffers are tracked only in the journal.
  188. * So although mm thinks everything is clean and ready for reaping the
  189. * inode might still have some pages to write in the running
  190. * transaction or waiting to be checkpointed. Thus calling
  191. * journal_invalidatepage() (via truncate_inode_pages()) to discard
  192. * these buffers can cause data loss. Also even if we did not discard
  193. * these buffers, we would have no way to find them after the inode
  194. * is reaped and thus user could see stale data if he tries to read
  195. * them before the transaction is checkpointed. So be careful and
  196. * force everything to disk here... We use ei->i_datasync_tid to
  197. * store the newest transaction containing inode's data.
  198. *
  199. * Note that directories do not have this problem because they don't
  200. * use page cache.
  201. */
  202. if (inode->i_nlink && ext3_should_journal_data(inode) &&
  203. (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
  204. tid_t commit_tid = atomic_read(&ei->i_datasync_tid);
  205. journal_t *journal = EXT3_SB(inode->i_sb)->s_journal;
  206. log_start_commit(journal, commit_tid);
  207. log_wait_commit(journal, commit_tid);
  208. filemap_write_and_wait(&inode->i_data);
  209. }
  210. truncate_inode_pages(&inode->i_data, 0);
  211. ext3_discard_reservation(inode);
  212. rsv = ei->i_block_alloc_info;
  213. ei->i_block_alloc_info = NULL;
  214. if (unlikely(rsv))
  215. kfree(rsv);
  216. if (!want_delete)
  217. goto no_delete;
  218. handle = start_transaction(inode);
  219. if (IS_ERR(handle)) {
  220. /*
  221. * If we're going to skip the normal cleanup, we still need to
  222. * make sure that the in-core orphan linked list is properly
  223. * cleaned up.
  224. */
  225. ext3_orphan_del(NULL, inode);
  226. goto no_delete;
  227. }
  228. if (IS_SYNC(inode))
  229. handle->h_sync = 1;
  230. inode->i_size = 0;
  231. if (inode->i_blocks)
  232. ext3_truncate(inode);
  233. /*
  234. * Kill off the orphan record created when the inode lost the last
  235. * link. Note that ext3_orphan_del() has to be able to cope with the
  236. * deletion of a non-existent orphan - ext3_truncate() could
  237. * have removed the record.
  238. */
  239. ext3_orphan_del(handle, inode);
  240. ei->i_dtime = get_seconds();
  241. /*
  242. * One subtle ordering requirement: if anything has gone wrong
  243. * (transaction abort, IO errors, whatever), then we can still
  244. * do these next steps (the fs will already have been marked as
  245. * having errors), but we can't free the inode if the mark_dirty
  246. * fails.
  247. */
  248. if (ext3_mark_inode_dirty(handle, inode)) {
  249. /* If that failed, just dquot_drop() and be done with that */
  250. dquot_drop(inode);
  251. end_writeback(inode);
  252. } else {
  253. ext3_xattr_delete_inode(handle, inode);
  254. dquot_free_inode(inode);
  255. dquot_drop(inode);
  256. end_writeback(inode);
  257. ext3_free_inode(handle, inode);
  258. }
  259. ext3_journal_stop(handle);
  260. return;
  261. no_delete:
  262. end_writeback(inode);
  263. dquot_drop(inode);
  264. }
  265. typedef struct {
  266. __le32 *p;
  267. __le32 key;
  268. struct buffer_head *bh;
  269. } Indirect;
  270. static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v)
  271. {
  272. p->key = *(p->p = v);
  273. p->bh = bh;
  274. }
  275. static int verify_chain(Indirect *from, Indirect *to)
  276. {
  277. while (from <= to && from->key == *from->p)
  278. from++;
  279. return (from > to);
  280. }
  281. /**
  282. * ext3_block_to_path - parse the block number into array of offsets
  283. * @inode: inode in question (we are only interested in its superblock)
  284. * @i_block: block number to be parsed
  285. * @offsets: array to store the offsets in
  286. * @boundary: set this non-zero if the referred-to block is likely to be
  287. * followed (on disk) by an indirect block.
  288. *
  289. * To store the locations of file's data ext3 uses a data structure common
  290. * for UNIX filesystems - tree of pointers anchored in the inode, with
  291. * data blocks at leaves and indirect blocks in intermediate nodes.
  292. * This function translates the block number into path in that tree -
  293. * return value is the path length and @offsets[n] is the offset of
  294. * pointer to (n+1)th node in the nth one. If @block is out of range
  295. * (negative or too large) warning is printed and zero returned.
  296. *
  297. * Note: function doesn't find node addresses, so no IO is needed. All
  298. * we need to know is the capacity of indirect blocks (taken from the
  299. * inode->i_sb).
  300. */
  301. /*
  302. * Portability note: the last comparison (check that we fit into triple
  303. * indirect block) is spelled differently, because otherwise on an
  304. * architecture with 32-bit longs and 8Kb pages we might get into trouble
  305. * if our filesystem had 8Kb blocks. We might use long long, but that would
  306. * kill us on x86. Oh, well, at least the sign propagation does not matter -
  307. * i_block would have to be negative in the very beginning, so we would not
  308. * get there at all.
  309. */
  310. static int ext3_block_to_path(struct inode *inode,
  311. long i_block, int offsets[4], int *boundary)
  312. {
  313. int ptrs = EXT3_ADDR_PER_BLOCK(inode->i_sb);
  314. int ptrs_bits = EXT3_ADDR_PER_BLOCK_BITS(inode->i_sb);
  315. const long direct_blocks = EXT3_NDIR_BLOCKS,
  316. indirect_blocks = ptrs,
  317. double_blocks = (1 << (ptrs_bits * 2));
  318. int n = 0;
  319. int final = 0;
  320. if (i_block < 0) {
  321. ext3_warning (inode->i_sb, "ext3_block_to_path", "block < 0");
  322. } else if (i_block < direct_blocks) {
  323. offsets[n++] = i_block;
  324. final = direct_blocks;
  325. } else if ( (i_block -= direct_blocks) < indirect_blocks) {
  326. offsets[n++] = EXT3_IND_BLOCK;
  327. offsets[n++] = i_block;
  328. final = ptrs;
  329. } else if ((i_block -= indirect_blocks) < double_blocks) {
  330. offsets[n++] = EXT3_DIND_BLOCK;
  331. offsets[n++] = i_block >> ptrs_bits;
  332. offsets[n++] = i_block & (ptrs - 1);
  333. final = ptrs;
  334. } else if (((i_block -= double_blocks) >> (ptrs_bits * 2)) < ptrs) {
  335. offsets[n++] = EXT3_TIND_BLOCK;
  336. offsets[n++] = i_block >> (ptrs_bits * 2);
  337. offsets[n++] = (i_block >> ptrs_bits) & (ptrs - 1);
  338. offsets[n++] = i_block & (ptrs - 1);
  339. final = ptrs;
  340. } else {
  341. ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big");
  342. }
  343. if (boundary)
  344. *boundary = final - 1 - (i_block & (ptrs - 1));
  345. return n;
  346. }
  347. /**
  348. * ext3_get_branch - read the chain of indirect blocks leading to data
  349. * @inode: inode in question
  350. * @depth: depth of the chain (1 - direct pointer, etc.)
  351. * @offsets: offsets of pointers in inode/indirect blocks
  352. * @chain: place to store the result
  353. * @err: here we store the error value
  354. *
  355. * Function fills the array of triples <key, p, bh> and returns %NULL
  356. * if everything went OK or the pointer to the last filled triple
  357. * (incomplete one) otherwise. Upon the return chain[i].key contains
  358. * the number of (i+1)-th block in the chain (as it is stored in memory,
  359. * i.e. little-endian 32-bit), chain[i].p contains the address of that
  360. * number (it points into struct inode for i==0 and into the bh->b_data
  361. * for i>0) and chain[i].bh points to the buffer_head of i-th indirect
  362. * block for i>0 and NULL for i==0. In other words, it holds the block
  363. * numbers of the chain, addresses they were taken from (and where we can
  364. * verify that chain did not change) and buffer_heads hosting these
  365. * numbers.
  366. *
  367. * Function stops when it stumbles upon zero pointer (absent block)
  368. * (pointer to last triple returned, *@err == 0)
  369. * or when it gets an IO error reading an indirect block
  370. * (ditto, *@err == -EIO)
  371. * or when it notices that chain had been changed while it was reading
  372. * (ditto, *@err == -EAGAIN)
  373. * or when it reads all @depth-1 indirect blocks successfully and finds
  374. * the whole chain, all way to the data (returns %NULL, *err == 0).
  375. */
  376. static Indirect *ext3_get_branch(struct inode *inode, int depth, int *offsets,
  377. Indirect chain[4], int *err)
  378. {
  379. struct super_block *sb = inode->i_sb;
  380. Indirect *p = chain;
  381. struct buffer_head *bh;
  382. *err = 0;
  383. /* i_data is not going away, no lock needed */
  384. add_chain (chain, NULL, EXT3_I(inode)->i_data + *offsets);
  385. if (!p->key)
  386. goto no_block;
  387. while (--depth) {
  388. bh = sb_bread(sb, le32_to_cpu(p->key));
  389. if (!bh)
  390. goto failure;
  391. /* Reader: pointers */
  392. if (!verify_chain(chain, p))
  393. goto changed;
  394. add_chain(++p, bh, (__le32*)bh->b_data + *++offsets);
  395. /* Reader: end */
  396. if (!p->key)
  397. goto no_block;
  398. }
  399. return NULL;
  400. changed:
  401. brelse(bh);
  402. *err = -EAGAIN;
  403. goto no_block;
  404. failure:
  405. *err = -EIO;
  406. no_block:
  407. return p;
  408. }
  409. /**
  410. * ext3_find_near - find a place for allocation with sufficient locality
  411. * @inode: owner
  412. * @ind: descriptor of indirect block.
  413. *
  414. * This function returns the preferred place for block allocation.
  415. * It is used when heuristic for sequential allocation fails.
  416. * Rules are:
  417. * + if there is a block to the left of our position - allocate near it.
  418. * + if pointer will live in indirect block - allocate near that block.
  419. * + if pointer will live in inode - allocate in the same
  420. * cylinder group.
  421. *
  422. * In the latter case we colour the starting block by the callers PID to
  423. * prevent it from clashing with concurrent allocations for a different inode
  424. * in the same block group. The PID is used here so that functionally related
  425. * files will be close-by on-disk.
  426. *
  427. * Caller must make sure that @ind is valid and will stay that way.
  428. */
  429. static ext3_fsblk_t ext3_find_near(struct inode *inode, Indirect *ind)
  430. {
  431. struct ext3_inode_info *ei = EXT3_I(inode);
  432. __le32 *start = ind->bh ? (__le32*) ind->bh->b_data : ei->i_data;
  433. __le32 *p;
  434. ext3_fsblk_t bg_start;
  435. ext3_grpblk_t colour;
  436. /* Try to find previous block */
  437. for (p = ind->p - 1; p >= start; p--) {
  438. if (*p)
  439. return le32_to_cpu(*p);
  440. }
  441. /* No such thing, so let's try location of indirect block */
  442. if (ind->bh)
  443. return ind->bh->b_blocknr;
  444. /*
  445. * It is going to be referred to from the inode itself? OK, just put it
  446. * into the same cylinder group then.
  447. */
  448. bg_start = ext3_group_first_block_no(inode->i_sb, ei->i_block_group);
  449. colour = (current->pid % 16) *
  450. (EXT3_BLOCKS_PER_GROUP(inode->i_sb) / 16);
  451. return bg_start + colour;
  452. }
  453. /**
  454. * ext3_find_goal - find a preferred place for allocation.
  455. * @inode: owner
  456. * @block: block we want
  457. * @partial: pointer to the last triple within a chain
  458. *
  459. * Normally this function find the preferred place for block allocation,
  460. * returns it.
  461. */
  462. static ext3_fsblk_t ext3_find_goal(struct inode *inode, long block,
  463. Indirect *partial)
  464. {
  465. struct ext3_block_alloc_info *block_i;
  466. block_i = EXT3_I(inode)->i_block_alloc_info;
  467. /*
  468. * try the heuristic for sequential allocation,
  469. * failing that at least try to get decent locality.
  470. */
  471. if (block_i && (block == block_i->last_alloc_logical_block + 1)
  472. && (block_i->last_alloc_physical_block != 0)) {
  473. return block_i->last_alloc_physical_block + 1;
  474. }
  475. return ext3_find_near(inode, partial);
  476. }
  477. /**
  478. * ext3_blks_to_allocate - Look up the block map and count the number
  479. * of direct blocks need to be allocated for the given branch.
  480. *
  481. * @branch: chain of indirect blocks
  482. * @k: number of blocks need for indirect blocks
  483. * @blks: number of data blocks to be mapped.
  484. * @blocks_to_boundary: the offset in the indirect block
  485. *
  486. * return the total number of blocks to be allocate, including the
  487. * direct and indirect blocks.
  488. */
  489. static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
  490. int blocks_to_boundary)
  491. {
  492. unsigned long count = 0;
  493. /*
  494. * Simple case, [t,d]Indirect block(s) has not allocated yet
  495. * then it's clear blocks on that path have not allocated
  496. */
  497. if (k > 0) {
  498. /* right now we don't handle cross boundary allocation */
  499. if (blks < blocks_to_boundary + 1)
  500. count += blks;
  501. else
  502. count += blocks_to_boundary + 1;
  503. return count;
  504. }
  505. count++;
  506. while (count < blks && count <= blocks_to_boundary &&
  507. le32_to_cpu(*(branch[0].p + count)) == 0) {
  508. count++;
  509. }
  510. return count;
  511. }
  512. /**
  513. * ext3_alloc_blocks - multiple allocate blocks needed for a branch
  514. * @handle: handle for this transaction
  515. * @inode: owner
  516. * @goal: preferred place for allocation
  517. * @indirect_blks: the number of blocks need to allocate for indirect
  518. * blocks
  519. * @blks: number of blocks need to allocated for direct blocks
  520. * @new_blocks: on return it will store the new block numbers for
  521. * the indirect blocks(if needed) and the first direct block,
  522. * @err: here we store the error value
  523. *
  524. * return the number of direct blocks allocated
  525. */
  526. static int ext3_alloc_blocks(handle_t *handle, struct inode *inode,
  527. ext3_fsblk_t goal, int indirect_blks, int blks,
  528. ext3_fsblk_t new_blocks[4], int *err)
  529. {
  530. int target, i;
  531. unsigned long count = 0;
  532. int index = 0;
  533. ext3_fsblk_t current_block = 0;
  534. int ret = 0;
  535. /*
  536. * Here we try to allocate the requested multiple blocks at once,
  537. * on a best-effort basis.
  538. * To build a branch, we should allocate blocks for
  539. * the indirect blocks(if not allocated yet), and at least
  540. * the first direct block of this branch. That's the
  541. * minimum number of blocks need to allocate(required)
  542. */
  543. target = blks + indirect_blks;
  544. while (1) {
  545. count = target;
  546. /* allocating blocks for indirect blocks and direct blocks */
  547. current_block = ext3_new_blocks(handle,inode,goal,&count,err);
  548. if (*err)
  549. goto failed_out;
  550. target -= count;
  551. /* allocate blocks for indirect blocks */
  552. while (index < indirect_blks && count) {
  553. new_blocks[index++] = current_block++;
  554. count--;
  555. }
  556. if (count > 0)
  557. break;
  558. }
  559. /* save the new block number for the first direct block */
  560. new_blocks[index] = current_block;
  561. /* total number of blocks allocated for direct blocks */
  562. ret = count;
  563. *err = 0;
  564. return ret;
  565. failed_out:
  566. for (i = 0; i <index; i++)
  567. ext3_free_blocks(handle, inode, new_blocks[i], 1);
  568. return ret;
  569. }
  570. /**
  571. * ext3_alloc_branch - allocate and set up a chain of blocks.
  572. * @handle: handle for this transaction
  573. * @inode: owner
  574. * @indirect_blks: number of allocated indirect blocks
  575. * @blks: number of allocated direct blocks
  576. * @goal: preferred place for allocation
  577. * @offsets: offsets (in the blocks) to store the pointers to next.
  578. * @branch: place to store the chain in.
  579. *
  580. * This function allocates blocks, zeroes out all but the last one,
  581. * links them into chain and (if we are synchronous) writes them to disk.
  582. * In other words, it prepares a branch that can be spliced onto the
  583. * inode. It stores the information about that chain in the branch[], in
  584. * the same format as ext3_get_branch() would do. We are calling it after
  585. * we had read the existing part of chain and partial points to the last
  586. * triple of that (one with zero ->key). Upon the exit we have the same
  587. * picture as after the successful ext3_get_block(), except that in one
  588. * place chain is disconnected - *branch->p is still zero (we did not
  589. * set the last link), but branch->key contains the number that should
  590. * be placed into *branch->p to fill that gap.
  591. *
  592. * If allocation fails we free all blocks we've allocated (and forget
  593. * their buffer_heads) and return the error value the from failed
  594. * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain
  595. * as described above and return 0.
  596. */
  597. static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
  598. int indirect_blks, int *blks, ext3_fsblk_t goal,
  599. int *offsets, Indirect *branch)
  600. {
  601. int blocksize = inode->i_sb->s_blocksize;
  602. int i, n = 0;
  603. int err = 0;
  604. struct buffer_head *bh;
  605. int num;
  606. ext3_fsblk_t new_blocks[4];
  607. ext3_fsblk_t current_block;
  608. num = ext3_alloc_blocks(handle, inode, goal, indirect_blks,
  609. *blks, new_blocks, &err);
  610. if (err)
  611. return err;
  612. branch[0].key = cpu_to_le32(new_blocks[0]);
  613. /*
  614. * metadata blocks and data blocks are allocated.
  615. */
  616. for (n = 1; n <= indirect_blks; n++) {
  617. /*
  618. * Get buffer_head for parent block, zero it out
  619. * and set the pointer to new one, then send
  620. * parent to disk.
  621. */
  622. bh = sb_getblk(inode->i_sb, new_blocks[n-1]);
  623. branch[n].bh = bh;
  624. lock_buffer(bh);
  625. BUFFER_TRACE(bh, "call get_create_access");
  626. err = ext3_journal_get_create_access(handle, bh);
  627. if (err) {
  628. unlock_buffer(bh);
  629. brelse(bh);
  630. goto failed;
  631. }
  632. memset(bh->b_data, 0, blocksize);
  633. branch[n].p = (__le32 *) bh->b_data + offsets[n];
  634. branch[n].key = cpu_to_le32(new_blocks[n]);
  635. *branch[n].p = branch[n].key;
  636. if ( n == indirect_blks) {
  637. current_block = new_blocks[n];
  638. /*
  639. * End of chain, update the last new metablock of
  640. * the chain to point to the new allocated
  641. * data blocks numbers
  642. */
  643. for (i=1; i < num; i++)
  644. *(branch[n].p + i) = cpu_to_le32(++current_block);
  645. }
  646. BUFFER_TRACE(bh, "marking uptodate");
  647. set_buffer_uptodate(bh);
  648. unlock_buffer(bh);
  649. BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
  650. err = ext3_journal_dirty_metadata(handle, bh);
  651. if (err)
  652. goto failed;
  653. }
  654. *blks = num;
  655. return err;
  656. failed:
  657. /* Allocation failed, free what we already allocated */
  658. for (i = 1; i <= n ; i++) {
  659. BUFFER_TRACE(branch[i].bh, "call journal_forget");
  660. ext3_journal_forget(handle, branch[i].bh);
  661. }
  662. for (i = 0; i <indirect_blks; i++)
  663. ext3_free_blocks(handle, inode, new_blocks[i], 1);
  664. ext3_free_blocks(handle, inode, new_blocks[i], num);
  665. return err;
  666. }
  667. /**
  668. * ext3_splice_branch - splice the allocated branch onto inode.
  669. * @handle: handle for this transaction
  670. * @inode: owner
  671. * @block: (logical) number of block we are adding
  672. * @where: location of missing link
  673. * @num: number of indirect blocks we are adding
  674. * @blks: number of direct blocks we are adding
  675. *
  676. * This function fills the missing link and does all housekeeping needed in
  677. * inode (->i_blocks, etc.). In case of success we end up with the full
  678. * chain to new block and return 0.
  679. */
  680. static int ext3_splice_branch(handle_t *handle, struct inode *inode,
  681. long block, Indirect *where, int num, int blks)
  682. {
  683. int i;
  684. int err = 0;
  685. struct ext3_block_alloc_info *block_i;
  686. ext3_fsblk_t current_block;
  687. struct ext3_inode_info *ei = EXT3_I(inode);
  688. block_i = ei->i_block_alloc_info;
  689. /*
  690. * If we're splicing into a [td]indirect block (as opposed to the
  691. * inode) then we need to get write access to the [td]indirect block
  692. * before the splice.
  693. */
  694. if (where->bh) {
  695. BUFFER_TRACE(where->bh, "get_write_access");
  696. err = ext3_journal_get_write_access(handle, where->bh);
  697. if (err)
  698. goto err_out;
  699. }
  700. /* That's it */
  701. *where->p = where->key;
  702. /*
  703. * Update the host buffer_head or inode to point to more just allocated
  704. * direct blocks blocks
  705. */
  706. if (num == 0 && blks > 1) {
  707. current_block = le32_to_cpu(where->key) + 1;
  708. for (i = 1; i < blks; i++)
  709. *(where->p + i ) = cpu_to_le32(current_block++);
  710. }
  711. /*
  712. * update the most recently allocated logical & physical block
  713. * in i_block_alloc_info, to assist find the proper goal block for next
  714. * allocation
  715. */
  716. if (block_i) {
  717. block_i->last_alloc_logical_block = block + blks - 1;
  718. block_i->last_alloc_physical_block =
  719. le32_to_cpu(where[num].key) + blks - 1;
  720. }
  721. /* We are done with atomic stuff, now do the rest of housekeeping */
  722. inode->i_ctime = CURRENT_TIME_SEC;
  723. ext3_mark_inode_dirty(handle, inode);
  724. /* ext3_mark_inode_dirty already updated i_sync_tid */
  725. atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
  726. /* had we spliced it onto indirect block? */
  727. if (where->bh) {
  728. /*
  729. * If we spliced it onto an indirect block, we haven't
  730. * altered the inode. Note however that if it is being spliced
  731. * onto an indirect block at the very end of the file (the
  732. * file is growing) then we *will* alter the inode to reflect
  733. * the new i_size. But that is not done here - it is done in
  734. * generic_commit_write->__mark_inode_dirty->ext3_dirty_inode.
  735. */
  736. jbd_debug(5, "splicing indirect only\n");
  737. BUFFER_TRACE(where->bh, "call ext3_journal_dirty_metadata");
  738. err = ext3_journal_dirty_metadata(handle, where->bh);
  739. if (err)
  740. goto err_out;
  741. } else {
  742. /*
  743. * OK, we spliced it into the inode itself on a direct block.
  744. * Inode was dirtied above.
  745. */
  746. jbd_debug(5, "splicing direct\n");
  747. }
  748. return err;
  749. err_out:
  750. for (i = 1; i <= num; i++) {
  751. BUFFER_TRACE(where[i].bh, "call journal_forget");
  752. ext3_journal_forget(handle, where[i].bh);
  753. ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1);
  754. }
  755. ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks);
  756. return err;
  757. }
  758. /*
  759. * Allocation strategy is simple: if we have to allocate something, we will
  760. * have to go the whole way to leaf. So let's do it before attaching anything
  761. * to tree, set linkage between the newborn blocks, write them if sync is
  762. * required, recheck the path, free and repeat if check fails, otherwise
  763. * set the last missing link (that will protect us from any truncate-generated
  764. * removals - all blocks on the path are immune now) and possibly force the
  765. * write on the parent block.
  766. * That has a nice additional property: no special recovery from the failed
  767. * allocations is needed - we simply release blocks and do not touch anything
  768. * reachable from inode.
  769. *
  770. * `handle' can be NULL if create == 0.
  771. *
  772. * The BKL may not be held on entry here. Be sure to take it early.
  773. * return > 0, # of blocks mapped or allocated.
  774. * return = 0, if plain lookup failed.
  775. * return < 0, error case.
  776. */
  777. int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
  778. sector_t iblock, unsigned long maxblocks,
  779. struct buffer_head *bh_result,
  780. int create)
  781. {
  782. int err = -EIO;
  783. int offsets[4];
  784. Indirect chain[4];
  785. Indirect *partial;
  786. ext3_fsblk_t goal;
  787. int indirect_blks;
  788. int blocks_to_boundary = 0;
  789. int depth;
  790. struct ext3_inode_info *ei = EXT3_I(inode);
  791. int count = 0;
  792. ext3_fsblk_t first_block = 0;
  793. trace_ext3_get_blocks_enter(inode, iblock, maxblocks, create);
  794. J_ASSERT(handle != NULL || create == 0);
  795. depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary);
  796. if (depth == 0)
  797. goto out;
  798. partial = ext3_get_branch(inode, depth, offsets, chain, &err);
  799. /* Simplest case - block found, no allocation needed */
  800. if (!partial) {
  801. first_block = le32_to_cpu(chain[depth - 1].key);
  802. clear_buffer_new(bh_result);
  803. count++;
  804. /*map more blocks*/
  805. while (count < maxblocks && count <= blocks_to_boundary) {
  806. ext3_fsblk_t blk;
  807. if (!verify_chain(chain, chain + depth - 1)) {
  808. /*
  809. * Indirect block might be removed by
  810. * truncate while we were reading it.
  811. * Handling of that case: forget what we've
  812. * got now. Flag the err as EAGAIN, so it
  813. * will reread.
  814. */
  815. err = -EAGAIN;
  816. count = 0;
  817. break;
  818. }
  819. blk = le32_to_cpu(*(chain[depth-1].p + count));
  820. if (blk == first_block + count)
  821. count++;
  822. else
  823. break;
  824. }
  825. if (err != -EAGAIN)
  826. goto got_it;
  827. }
  828. /* Next simple case - plain lookup or failed read of indirect block */
  829. if (!create || err == -EIO)
  830. goto cleanup;
  831. /*
  832. * Block out ext3_truncate while we alter the tree
  833. */
  834. mutex_lock(&ei->truncate_mutex);
  835. /*
  836. * If the indirect block is missing while we are reading
  837. * the chain(ext3_get_branch() returns -EAGAIN err), or
  838. * if the chain has been changed after we grab the semaphore,
  839. * (either because another process truncated this branch, or
  840. * another get_block allocated this branch) re-grab the chain to see if
  841. * the request block has been allocated or not.
  842. *
  843. * Since we already block the truncate/other get_block
  844. * at this point, we will have the current copy of the chain when we
  845. * splice the branch into the tree.
  846. */
  847. if (err == -EAGAIN || !verify_chain(chain, partial)) {
  848. while (partial > chain) {
  849. brelse(partial->bh);
  850. partial--;
  851. }
  852. partial = ext3_get_branch(inode, depth, offsets, chain, &err);
  853. if (!partial) {
  854. count++;
  855. mutex_unlock(&ei->truncate_mutex);
  856. if (err)
  857. goto cleanup;
  858. clear_buffer_new(bh_result);
  859. goto got_it;
  860. }
  861. }
  862. /*
  863. * Okay, we need to do block allocation. Lazily initialize the block
  864. * allocation info here if necessary
  865. */
  866. if (S_ISREG(inode->i_mode) && (!ei->i_block_alloc_info))
  867. ext3_init_block_alloc_info(inode);
  868. goal = ext3_find_goal(inode, iblock, partial);
  869. /* the number of blocks need to allocate for [d,t]indirect blocks */
  870. indirect_blks = (chain + depth) - partial - 1;
  871. /*
  872. * Next look up the indirect map to count the totoal number of
  873. * direct blocks to allocate for this branch.
  874. */
  875. count = ext3_blks_to_allocate(partial, indirect_blks,
  876. maxblocks, blocks_to_boundary);
  877. err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal,
  878. offsets + (partial - chain), partial);
  879. /*
  880. * The ext3_splice_branch call will free and forget any buffers
  881. * on the new chain if there is a failure, but that risks using
  882. * up transaction credits, especially for bitmaps where the
  883. * credits cannot be returned. Can we handle this somehow? We
  884. * may need to return -EAGAIN upwards in the worst case. --sct
  885. */
  886. if (!err)
  887. err = ext3_splice_branch(handle, inode, iblock,
  888. partial, indirect_blks, count);
  889. mutex_unlock(&ei->truncate_mutex);
  890. if (err)
  891. goto cleanup;
  892. set_buffer_new(bh_result);
  893. got_it:
  894. map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
  895. if (count > blocks_to_boundary)
  896. set_buffer_boundary(bh_result);
  897. err = count;
  898. /* Clean up and exit */
  899. partial = chain + depth - 1; /* the whole chain */
  900. cleanup:
  901. while (partial > chain) {
  902. BUFFER_TRACE(partial->bh, "call brelse");
  903. brelse(partial->bh);
  904. partial--;
  905. }
  906. BUFFER_TRACE(bh_result, "returned");
  907. out:
  908. trace_ext3_get_blocks_exit(inode, iblock,
  909. depth ? le32_to_cpu(chain[depth-1].key) : 0,
  910. count, err);
  911. return err;
  912. }
  913. /* Maximum number of blocks we map for direct IO at once. */
  914. #define DIO_MAX_BLOCKS 4096
  915. /*
  916. * Number of credits we need for writing DIO_MAX_BLOCKS:
  917. * We need sb + group descriptor + bitmap + inode -> 4
  918. * For B blocks with A block pointers per block we need:
  919. * 1 (triple ind.) + (B/A/A + 2) (doubly ind.) + (B/A + 2) (indirect).
  920. * If we plug in 4096 for B and 256 for A (for 1KB block size), we get 25.
  921. */
  922. #define DIO_CREDITS 25
  923. static int ext3_get_block(struct inode *inode, sector_t iblock,
  924. struct buffer_head *bh_result, int create)
  925. {
  926. handle_t *handle = ext3_journal_current_handle();
  927. int ret = 0, started = 0;
  928. unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
  929. if (create && !handle) { /* Direct IO write... */
  930. if (max_blocks > DIO_MAX_BLOCKS)
  931. max_blocks = DIO_MAX_BLOCKS;
  932. handle = ext3_journal_start(inode, DIO_CREDITS +
  933. EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
  934. if (IS_ERR(handle)) {
  935. ret = PTR_ERR(handle);
  936. goto out;
  937. }
  938. started = 1;
  939. }
  940. ret = ext3_get_blocks_handle(handle, inode, iblock,
  941. max_blocks, bh_result, create);
  942. if (ret > 0) {
  943. bh_result->b_size = (ret << inode->i_blkbits);
  944. ret = 0;
  945. }
  946. if (started)
  947. ext3_journal_stop(handle);
  948. out:
  949. return ret;
  950. }
  951. int ext3_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  952. u64 start, u64 len)
  953. {
  954. return generic_block_fiemap(inode, fieinfo, start, len,
  955. ext3_get_block);
  956. }
  957. /*
  958. * `handle' can be NULL if create is zero
  959. */
  960. struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode,
  961. long block, int create, int *errp)
  962. {
  963. struct buffer_head dummy;
  964. int fatal = 0, err;
  965. J_ASSERT(handle != NULL || create == 0);
  966. dummy.b_state = 0;
  967. dummy.b_blocknr = -1000;
  968. buffer_trace_init(&dummy.b_history);
  969. err = ext3_get_blocks_handle(handle, inode, block, 1,
  970. &dummy, create);
  971. /*
  972. * ext3_get_blocks_handle() returns number of blocks
  973. * mapped. 0 in case of a HOLE.
  974. */
  975. if (err > 0) {
  976. if (err > 1)
  977. WARN_ON(1);
  978. err = 0;
  979. }
  980. *errp = err;
  981. if (!err && buffer_mapped(&dummy)) {
  982. struct buffer_head *bh;
  983. bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
  984. if (!bh) {
  985. *errp = -EIO;
  986. goto err;
  987. }
  988. if (buffer_new(&dummy)) {
  989. J_ASSERT(create != 0);
  990. J_ASSERT(handle != NULL);
  991. /*
  992. * Now that we do not always journal data, we should
  993. * keep in mind whether this should always journal the
  994. * new buffer as metadata. For now, regular file
  995. * writes use ext3_get_block instead, so it's not a
  996. * problem.
  997. */
  998. lock_buffer(bh);
  999. BUFFER_TRACE(bh, "call get_create_access");
  1000. fatal = ext3_journal_get_create_access(handle, bh);
  1001. if (!fatal && !buffer_uptodate(bh)) {
  1002. memset(bh->b_data,0,inode->i_sb->s_blocksize);
  1003. set_buffer_uptodate(bh);
  1004. }
  1005. unlock_buffer(bh);
  1006. BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
  1007. err = ext3_journal_dirty_metadata(handle, bh);
  1008. if (!fatal)
  1009. fatal = err;
  1010. } else {
  1011. BUFFER_TRACE(bh, "not a new buffer");
  1012. }
  1013. if (fatal) {
  1014. *errp = fatal;
  1015. brelse(bh);
  1016. bh = NULL;
  1017. }
  1018. return bh;
  1019. }
  1020. err:
  1021. return NULL;
  1022. }
  1023. struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode,
  1024. int block, int create, int *err)
  1025. {
  1026. struct buffer_head * bh;
  1027. bh = ext3_getblk(handle, inode, block, create, err);
  1028. if (!bh)
  1029. return bh;
  1030. if (buffer_uptodate(bh))
  1031. return bh;
  1032. ll_rw_block(READ_META, 1, &bh);
  1033. wait_on_buffer(bh);
  1034. if (buffer_uptodate(bh))
  1035. return bh;
  1036. put_bh(bh);
  1037. *err = -EIO;
  1038. return NULL;
  1039. }
  1040. static int walk_page_buffers( handle_t *handle,
  1041. struct buffer_head *head,
  1042. unsigned from,
  1043. unsigned to,
  1044. int *partial,
  1045. int (*fn)( handle_t *handle,
  1046. struct buffer_head *bh))
  1047. {
  1048. struct buffer_head *bh;
  1049. unsigned block_start, block_end;
  1050. unsigned blocksize = head->b_size;
  1051. int err, ret = 0;
  1052. struct buffer_head *next;
  1053. for ( bh = head, block_start = 0;
  1054. ret == 0 && (bh != head || !block_start);
  1055. block_start = block_end, bh = next)
  1056. {
  1057. next = bh->b_this_page;
  1058. block_end = block_start + blocksize;
  1059. if (block_end <= from || block_start >= to) {
  1060. if (partial && !buffer_uptodate(bh))
  1061. *partial = 1;
  1062. continue;
  1063. }
  1064. err = (*fn)(handle, bh);
  1065. if (!ret)
  1066. ret = err;
  1067. }
  1068. return ret;
  1069. }
  1070. /*
  1071. * To preserve ordering, it is essential that the hole instantiation and
  1072. * the data write be encapsulated in a single transaction. We cannot
  1073. * close off a transaction and start a new one between the ext3_get_block()
  1074. * and the commit_write(). So doing the journal_start at the start of
  1075. * prepare_write() is the right place.
  1076. *
  1077. * Also, this function can nest inside ext3_writepage() ->
  1078. * block_write_full_page(). In that case, we *know* that ext3_writepage()
  1079. * has generated enough buffer credits to do the whole page. So we won't
  1080. * block on the journal in that case, which is good, because the caller may
  1081. * be PF_MEMALLOC.
  1082. *
  1083. * By accident, ext3 can be reentered when a transaction is open via
  1084. * quota file writes. If we were to commit the transaction while thus
  1085. * reentered, there can be a deadlock - we would be holding a quota
  1086. * lock, and the commit would never complete if another thread had a
  1087. * transaction open and was blocking on the quota lock - a ranking
  1088. * violation.
  1089. *
  1090. * So what we do is to rely on the fact that journal_stop/journal_start
  1091. * will _not_ run commit under these circumstances because handle->h_ref
  1092. * is elevated. We'll still have enough credits for the tiny quotafile
  1093. * write.
  1094. */
  1095. static int do_journal_get_write_access(handle_t *handle,
  1096. struct buffer_head *bh)
  1097. {
  1098. int dirty = buffer_dirty(bh);
  1099. int ret;
  1100. if (!buffer_mapped(bh) || buffer_freed(bh))
  1101. return 0;
  1102. /*
  1103. * __block_prepare_write() could have dirtied some buffers. Clean
  1104. * the dirty bit as jbd2_journal_get_write_access() could complain
  1105. * otherwise about fs integrity issues. Setting of the dirty bit
  1106. * by __block_prepare_write() isn't a real problem here as we clear
  1107. * the bit before releasing a page lock and thus writeback cannot
  1108. * ever write the buffer.
  1109. */
  1110. if (dirty)
  1111. clear_buffer_dirty(bh);
  1112. ret = ext3_journal_get_write_access(handle, bh);
  1113. if (!ret && dirty)
  1114. ret = ext3_journal_dirty_metadata(handle, bh);
  1115. return ret;
  1116. }
  1117. /*
  1118. * Truncate blocks that were not used by write. We have to truncate the
  1119. * pagecache as well so that corresponding buffers get properly unmapped.
  1120. */
  1121. static void ext3_truncate_failed_write(struct inode *inode)
  1122. {
  1123. truncate_inode_pages(inode->i_mapping, inode->i_size);
  1124. ext3_truncate(inode);
  1125. }
  1126. /*
  1127. * Truncate blocks that were not used by direct IO write. We have to zero out
  1128. * the last file block as well because direct IO might have written to it.
  1129. */
  1130. static void ext3_truncate_failed_direct_write(struct inode *inode)
  1131. {
  1132. ext3_block_truncate_page(inode, inode->i_size);
  1133. ext3_truncate(inode);
  1134. }
  1135. static int ext3_write_begin(struct file *file, struct address_space *mapping,
  1136. loff_t pos, unsigned len, unsigned flags,
  1137. struct page **pagep, void **fsdata)
  1138. {
  1139. struct inode *inode = mapping->host;
  1140. int ret;
  1141. handle_t *handle;
  1142. int retries = 0;
  1143. struct page *page;
  1144. pgoff_t index;
  1145. unsigned from, to;
  1146. /* Reserve one block more for addition to orphan list in case
  1147. * we allocate blocks but write fails for some reason */
  1148. int needed_blocks = ext3_writepage_trans_blocks(inode) + 1;
  1149. trace_ext3_write_begin(inode, pos, len, flags);
  1150. index = pos >> PAGE_CACHE_SHIFT;
  1151. from = pos & (PAGE_CACHE_SIZE - 1);
  1152. to = from + len;
  1153. retry:
  1154. page = grab_cache_page_write_begin(mapping, index, flags);
  1155. if (!page)
  1156. return -ENOMEM;
  1157. *pagep = page;
  1158. handle = ext3_journal_start(inode, needed_blocks);
  1159. if (IS_ERR(handle)) {
  1160. unlock_page(page);
  1161. page_cache_release(page);
  1162. ret = PTR_ERR(handle);
  1163. goto out;
  1164. }
  1165. ret = __block_write_begin(page, pos, len, ext3_get_block);
  1166. if (ret)
  1167. goto write_begin_failed;
  1168. if (ext3_should_journal_data(inode)) {
  1169. ret = walk_page_buffers(handle, page_buffers(page),
  1170. from, to, NULL, do_journal_get_write_access);
  1171. }
  1172. write_begin_failed:
  1173. if (ret) {
  1174. /*
  1175. * block_write_begin may have instantiated a few blocks
  1176. * outside i_size. Trim these off again. Don't need
  1177. * i_size_read because we hold i_mutex.
  1178. *
  1179. * Add inode to orphan list in case we crash before truncate
  1180. * finishes. Do this only if ext3_can_truncate() agrees so
  1181. * that orphan processing code is happy.
  1182. */
  1183. if (pos + len > inode->i_size && ext3_can_truncate(inode))
  1184. ext3_orphan_add(handle, inode);
  1185. ext3_journal_stop(handle);
  1186. unlock_page(page);
  1187. page_cache_release(page);
  1188. if (pos + len > inode->i_size)
  1189. ext3_truncate_failed_write(inode);
  1190. }
  1191. if (ret == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
  1192. goto retry;
  1193. out:
  1194. return ret;
  1195. }
  1196. int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh)
  1197. {
  1198. int err = journal_dirty_data(handle, bh);
  1199. if (err)
  1200. ext3_journal_abort_handle(__func__, __func__,
  1201. bh, handle, err);
  1202. return err;
  1203. }
  1204. /* For ordered writepage and write_end functions */
  1205. static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
  1206. {
  1207. /*
  1208. * Write could have mapped the buffer but it didn't copy the data in
  1209. * yet. So avoid filing such buffer into a transaction.
  1210. */
  1211. if (buffer_mapped(bh) && buffer_uptodate(bh))
  1212. return ext3_journal_dirty_data(handle, bh);
  1213. return 0;
  1214. }
  1215. /* For write_end() in data=journal mode */
  1216. static int write_end_fn(handle_t *handle, struct buffer_head *bh)
  1217. {
  1218. if (!buffer_mapped(bh) || buffer_freed(bh))
  1219. return 0;
  1220. set_buffer_uptodate(bh);
  1221. return ext3_journal_dirty_metadata(handle, bh);
  1222. }
  1223. /*
  1224. * This is nasty and subtle: ext3_write_begin() could have allocated blocks
  1225. * for the whole page but later we failed to copy the data in. Update inode
  1226. * size according to what we managed to copy. The rest is going to be
  1227. * truncated in write_end function.
  1228. */
  1229. static void update_file_sizes(struct inode *inode, loff_t pos, unsigned copied)
  1230. {
  1231. /* What matters to us is i_disksize. We don't write i_size anywhere */
  1232. if (pos + copied > inode->i_size)
  1233. i_size_write(inode, pos + copied);
  1234. if (pos + copied > EXT3_I(inode)->i_disksize) {
  1235. EXT3_I(inode)->i_disksize = pos + copied;
  1236. mark_inode_dirty(inode);
  1237. }
  1238. }
  1239. /*
  1240. * We need to pick up the new inode size which generic_commit_write gave us
  1241. * `file' can be NULL - eg, when called from page_symlink().
  1242. *
  1243. * ext3 never places buffers on inode->i_mapping->private_list. metadata
  1244. * buffers are managed internally.
  1245. */
  1246. static int ext3_ordered_write_end(struct file *file,
  1247. struct address_space *mapping,
  1248. loff_t pos, unsigned len, unsigned copied,
  1249. struct page *page, void *fsdata)
  1250. {
  1251. handle_t *handle = ext3_journal_current_handle();
  1252. struct inode *inode = file->f_mapping->host;
  1253. unsigned from, to;
  1254. int ret = 0, ret2;
  1255. trace_ext3_ordered_write_end(inode, pos, len, copied);
  1256. copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
  1257. from = pos & (PAGE_CACHE_SIZE - 1);
  1258. to = from + copied;
  1259. ret = walk_page_buffers(handle, page_buffers(page),
  1260. from, to, NULL, journal_dirty_data_fn);
  1261. if (ret == 0)
  1262. update_file_sizes(inode, pos, copied);
  1263. /*
  1264. * There may be allocated blocks outside of i_size because
  1265. * we failed to copy some data. Prepare for truncate.
  1266. */
  1267. if (pos + len > inode->i_size && ext3_can_truncate(inode))
  1268. ext3_orphan_add(handle, inode);
  1269. ret2 = ext3_journal_stop(handle);
  1270. if (!ret)
  1271. ret = ret2;
  1272. unlock_page(page);
  1273. page_cache_release(page);
  1274. if (pos + len > inode->i_size)
  1275. ext3_truncate_failed_write(inode);
  1276. return ret ? ret : copied;
  1277. }
  1278. static int ext3_writeback_write_end(struct file *file,
  1279. struct address_space *mapping,
  1280. loff_t pos, unsigned len, unsigned copied,
  1281. struct page *page, void *fsdata)
  1282. {
  1283. handle_t *handle = ext3_journal_current_handle();
  1284. struct inode *inode = file->f_mapping->host;
  1285. int ret;
  1286. trace_ext3_writeback_write_end(inode, pos, len, copied);
  1287. copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
  1288. update_file_sizes(inode, pos, copied);
  1289. /*
  1290. * There may be allocated blocks outside of i_size because
  1291. * we failed to copy some data. Prepare for truncate.
  1292. */
  1293. if (pos + len > inode->i_size && ext3_can_truncate(inode))
  1294. ext3_orphan_add(handle, inode);
  1295. ret = ext3_journal_stop(handle);
  1296. unlock_page(page);
  1297. page_cache_release(page);
  1298. if (pos + len > inode->i_size)
  1299. ext3_truncate_failed_write(inode);
  1300. return ret ? ret : copied;
  1301. }
  1302. static int ext3_journalled_write_end(struct file *file,
  1303. struct address_space *mapping,
  1304. loff_t pos, unsigned len, unsigned copied,
  1305. struct page *page, void *fsdata)
  1306. {
  1307. handle_t *handle = ext3_journal_current_handle();
  1308. struct inode *inode = mapping->host;
  1309. struct ext3_inode_info *ei = EXT3_I(inode);
  1310. int ret = 0, ret2;
  1311. int partial = 0;
  1312. unsigned from, to;
  1313. trace_ext3_journalled_write_end(inode, pos, len, copied);
  1314. from = pos & (PAGE_CACHE_SIZE - 1);
  1315. to = from + len;
  1316. if (copied < len) {
  1317. if (!PageUptodate(page))
  1318. copied = 0;
  1319. page_zero_new_buffers(page, from + copied, to);
  1320. to = from + copied;
  1321. }
  1322. ret = walk_page_buffers(handle, page_buffers(page), from,
  1323. to, &partial, write_end_fn);
  1324. if (!partial)
  1325. SetPageUptodate(page);
  1326. if (pos + copied > inode->i_size)
  1327. i_size_write(inode, pos + copied);
  1328. /*
  1329. * There may be allocated blocks outside of i_size because
  1330. * we failed to copy some data. Prepare for truncate.
  1331. */
  1332. if (pos + len > inode->i_size && ext3_can_truncate(inode))
  1333. ext3_orphan_add(handle, inode);
  1334. ext3_set_inode_state(inode, EXT3_STATE_JDATA);
  1335. atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
  1336. if (inode->i_size > ei->i_disksize) {
  1337. ei->i_disksize = inode->i_size;
  1338. ret2 = ext3_mark_inode_dirty(handle, inode);
  1339. if (!ret)
  1340. ret = ret2;
  1341. }
  1342. ret2 = ext3_journal_stop(handle);
  1343. if (!ret)
  1344. ret = ret2;
  1345. unlock_page(page);
  1346. page_cache_release(page);
  1347. if (pos + len > inode->i_size)
  1348. ext3_truncate_failed_write(inode);
  1349. return ret ? ret : copied;
  1350. }
  1351. /*
  1352. * bmap() is special. It gets used by applications such as lilo and by
  1353. * the swapper to find the on-disk block of a specific piece of data.
  1354. *
  1355. * Naturally, this is dangerous if the block concerned is still in the
  1356. * journal. If somebody makes a swapfile on an ext3 data-journaling
  1357. * filesystem and enables swap, then they may get a nasty shock when the
  1358. * data getting swapped to that swapfile suddenly gets overwritten by
  1359. * the original zero's written out previously to the journal and
  1360. * awaiting writeback in the kernel's buffer cache.
  1361. *
  1362. * So, if we see any bmap calls here on a modified, data-journaled file,
  1363. * take extra steps to flush any blocks which might be in the cache.
  1364. */
  1365. static sector_t ext3_bmap(struct address_space *mapping, sector_t block)
  1366. {
  1367. struct inode *inode = mapping->host;
  1368. journal_t *journal;
  1369. int err;
  1370. if (ext3_test_inode_state(inode, EXT3_STATE_JDATA)) {
  1371. /*
  1372. * This is a REALLY heavyweight approach, but the use of
  1373. * bmap on dirty files is expected to be extremely rare:
  1374. * only if we run lilo or swapon on a freshly made file
  1375. * do we expect this to happen.
  1376. *
  1377. * (bmap requires CAP_SYS_RAWIO so this does not
  1378. * represent an unprivileged user DOS attack --- we'd be
  1379. * in trouble if mortal users could trigger this path at
  1380. * will.)
  1381. *
  1382. * NB. EXT3_STATE_JDATA is not set on files other than
  1383. * regular files. If somebody wants to bmap a directory
  1384. * or symlink and gets confused because the buffer
  1385. * hasn't yet been flushed to disk, they deserve
  1386. * everything they get.
  1387. */
  1388. ext3_clear_inode_state(inode, EXT3_STATE_JDATA);
  1389. journal = EXT3_JOURNAL(inode);
  1390. journal_lock_updates(journal);
  1391. err = journal_flush(journal);
  1392. journal_unlock_updates(journal);
  1393. if (err)
  1394. return 0;
  1395. }
  1396. return generic_block_bmap(mapping,block,ext3_get_block);
  1397. }
  1398. static int bget_one(handle_t *handle, struct buffer_head *bh)
  1399. {
  1400. get_bh(bh);
  1401. return 0;
  1402. }
  1403. static int bput_one(handle_t *handle, struct buffer_head *bh)
  1404. {
  1405. put_bh(bh);
  1406. return 0;
  1407. }
  1408. static int buffer_unmapped(handle_t *handle, struct buffer_head *bh)
  1409. {
  1410. return !buffer_mapped(bh);
  1411. }
  1412. /*
  1413. * Note that we always start a transaction even if we're not journalling
  1414. * data. This is to preserve ordering: any hole instantiation within
  1415. * __block_write_full_page -> ext3_get_block() should be journalled
  1416. * along with the data so we don't crash and then get metadata which
  1417. * refers to old data.
  1418. *
  1419. * In all journalling modes block_write_full_page() will start the I/O.
  1420. *
  1421. * Problem:
  1422. *
  1423. * ext3_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
  1424. * ext3_writepage()
  1425. *
  1426. * Similar for:
  1427. *
  1428. * ext3_file_write() -> generic_file_write() -> __alloc_pages() -> ...
  1429. *
  1430. * Same applies to ext3_get_block(). We will deadlock on various things like
  1431. * lock_journal and i_truncate_mutex.
  1432. *
  1433. * Setting PF_MEMALLOC here doesn't work - too many internal memory
  1434. * allocations fail.
  1435. *
  1436. * 16May01: If we're reentered then journal_current_handle() will be
  1437. * non-zero. We simply *return*.
  1438. *
  1439. * 1 July 2001: @@@ FIXME:
  1440. * In journalled data mode, a data buffer may be metadata against the
  1441. * current transaction. But the same file is part of a shared mapping
  1442. * and someone does a writepage() on it.
  1443. *
  1444. * We will move the buffer onto the async_data list, but *after* it has
  1445. * been dirtied. So there's a small window where we have dirty data on
  1446. * BJ_Metadata.
  1447. *
  1448. * Note that this only applies to the last partial page in the file. The
  1449. * bit which block_write_full_page() uses prepare/commit for. (That's
  1450. * broken code anyway: it's wrong for msync()).
  1451. *
  1452. * It's a rare case: affects the final partial page, for journalled data
  1453. * where the file is subject to bith write() and writepage() in the same
  1454. * transction. To fix it we'll need a custom block_write_full_page().
  1455. * We'll probably need that anyway for journalling writepage() output.
  1456. *
  1457. * We don't honour synchronous mounts for writepage(). That would be
  1458. * disastrous. Any write() or metadata operation will sync the fs for
  1459. * us.
  1460. *
  1461. * AKPM2: if all the page's buffers are mapped to disk and !data=journal,
  1462. * we don't need to open a transaction here.
  1463. */
  1464. static int ext3_ordered_writepage(struct page *page,
  1465. struct writeback_control *wbc)
  1466. {
  1467. struct inode *inode = page->mapping->host;
  1468. struct buffer_head *page_bufs;
  1469. handle_t *handle = NULL;
  1470. int ret = 0;
  1471. int err;
  1472. J_ASSERT(PageLocked(page));
  1473. WARN_ON_ONCE(IS_RDONLY(inode));
  1474. /*
  1475. * We give up here if we're reentered, because it might be for a
  1476. * different filesystem.
  1477. */
  1478. if (ext3_journal_current_handle())
  1479. goto out_fail;
  1480. trace_ext3_ordered_writepage(page);
  1481. if (!page_has_buffers(page)) {
  1482. create_empty_buffers(page, inode->i_sb->s_blocksize,
  1483. (1 << BH_Dirty)|(1 << BH_Uptodate));
  1484. page_bufs = page_buffers(page);
  1485. } else {
  1486. page_bufs = page_buffers(page);
  1487. if (!walk_page_buffers(NULL, page_bufs, 0, PAGE_CACHE_SIZE,
  1488. NULL, buffer_unmapped)) {
  1489. /* Provide NULL get_block() to