PageRenderTime 29ms CodeModel.GetById 20ms RepoModel.GetById 1ms app.codeStats 0ms

/fs/ext4/migrate.c

https://github.com/mstsirkin/linux
C | 634 lines | 427 code | 65 blank | 142 comment | 78 complexity | c8b18b52df883ef5051cba0b7c646621 MD5 | raw file
  1. /*
  2. * Copyright IBM Corporation, 2007
  3. * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of version 2.1 of the GNU Lesser General Public License
  7. * as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful, but
  10. * WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  12. *
  13. */
  14. #include <linux/module.h>
  15. #include <linux/slab.h>
  16. #include "ext4_jbd2.h"
  17. #include "ext4_extents.h"
  18. /*
  19. * The contiguous blocks details which can be
  20. * represented by a single extent
  21. */
  22. struct list_blocks_struct {
  23. ext4_lblk_t first_block, last_block;
  24. ext4_fsblk_t first_pblock, last_pblock;
  25. };
  26. static int finish_range(handle_t *handle, struct inode *inode,
  27. struct list_blocks_struct *lb)
  28. {
  29. int retval = 0, needed;
  30. struct ext4_extent newext;
  31. struct ext4_ext_path *path;
  32. if (lb->first_pblock == 0)
  33. return 0;
  34. /* Add the extent to temp inode*/
  35. newext.ee_block = cpu_to_le32(lb->first_block);
  36. newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
  37. ext4_ext_store_pblock(&newext, lb->first_pblock);
  38. path = ext4_ext_find_extent(inode, lb->first_block, NULL);
  39. if (IS_ERR(path)) {
  40. retval = PTR_ERR(path);
  41. path = NULL;
  42. goto err_out;
  43. }
  44. /*
  45. * Calculate the credit needed to inserting this extent
  46. * Since we are doing this in loop we may accumalate extra
  47. * credit. But below we try to not accumalate too much
  48. * of them by restarting the journal.
  49. */
  50. needed = ext4_ext_calc_credits_for_single_extent(inode,
  51. lb->last_block - lb->first_block + 1, path);
  52. /*
  53. * Make sure the credit we accumalated is not really high
  54. */
  55. if (needed && ext4_handle_has_enough_credits(handle,
  56. EXT4_RESERVE_TRANS_BLOCKS)) {
  57. retval = ext4_journal_restart(handle, needed);
  58. if (retval)
  59. goto err_out;
  60. } else if (needed) {
  61. retval = ext4_journal_extend(handle, needed);
  62. if (retval) {
  63. /*
  64. * IF not able to extend the journal restart the journal
  65. */
  66. retval = ext4_journal_restart(handle, needed);
  67. if (retval)
  68. goto err_out;
  69. }
  70. }
  71. retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
  72. err_out:
  73. if (path) {
  74. ext4_ext_drop_refs(path);
  75. kfree(path);
  76. }
  77. lb->first_pblock = 0;
  78. return retval;
  79. }
  80. static int update_extent_range(handle_t *handle, struct inode *inode,
  81. ext4_fsblk_t pblock, ext4_lblk_t blk_num,
  82. struct list_blocks_struct *lb)
  83. {
  84. int retval;
  85. /*
  86. * See if we can add on to the existing range (if it exists)
  87. */
  88. if (lb->first_pblock &&
  89. (lb->last_pblock+1 == pblock) &&
  90. (lb->last_block+1 == blk_num)) {
  91. lb->last_pblock = pblock;
  92. lb->last_block = blk_num;
  93. return 0;
  94. }
  95. /*
  96. * Start a new range.
  97. */
  98. retval = finish_range(handle, inode, lb);
  99. lb->first_pblock = lb->last_pblock = pblock;
  100. lb->first_block = lb->last_block = blk_num;
  101. return retval;
  102. }
  103. static int update_ind_extent_range(handle_t *handle, struct inode *inode,
  104. ext4_fsblk_t pblock, ext4_lblk_t *blk_nump,
  105. struct list_blocks_struct *lb)
  106. {
  107. struct buffer_head *bh;
  108. __le32 *i_data;
  109. int i, retval = 0;
  110. ext4_lblk_t blk_count = *blk_nump;
  111. unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
  112. if (!pblock) {
  113. /* Only update the file block number */
  114. *blk_nump += max_entries;
  115. return 0;
  116. }
  117. bh = sb_bread(inode->i_sb, pblock);
  118. if (!bh)
  119. return -EIO;
  120. i_data = (__le32 *)bh->b_data;
  121. for (i = 0; i < max_entries; i++, blk_count++) {
  122. if (i_data[i]) {
  123. retval = update_extent_range(handle, inode,
  124. le32_to_cpu(i_data[i]),
  125. blk_count, lb);
  126. if (retval)
  127. break;
  128. }
  129. }
  130. /* Update the file block number */
  131. *blk_nump = blk_count;
  132. put_bh(bh);
  133. return retval;
  134. }
  135. static int update_dind_extent_range(handle_t *handle, struct inode *inode,
  136. ext4_fsblk_t pblock, ext4_lblk_t *blk_nump,
  137. struct list_blocks_struct *lb)
  138. {
  139. struct buffer_head *bh;
  140. __le32 *i_data;
  141. int i, retval = 0;
  142. ext4_lblk_t blk_count = *blk_nump;
  143. unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
  144. if (!pblock) {
  145. /* Only update the file block number */
  146. *blk_nump += max_entries * max_entries;
  147. return 0;
  148. }
  149. bh = sb_bread(inode->i_sb, pblock);
  150. if (!bh)
  151. return -EIO;
  152. i_data = (__le32 *)bh->b_data;
  153. for (i = 0; i < max_entries; i++) {
  154. if (i_data[i]) {
  155. retval = update_ind_extent_range(handle, inode,
  156. le32_to_cpu(i_data[i]),
  157. &blk_count, lb);
  158. if (retval)
  159. break;
  160. } else {
  161. /* Only update the file block number */
  162. blk_count += max_entries;
  163. }
  164. }
  165. /* Update the file block number */
  166. *blk_nump = blk_count;
  167. put_bh(bh);
  168. return retval;
  169. }
  170. static int update_tind_extent_range(handle_t *handle, struct inode *inode,
  171. ext4_fsblk_t pblock, ext4_lblk_t *blk_nump,
  172. struct list_blocks_struct *lb)
  173. {
  174. struct buffer_head *bh;
  175. __le32 *i_data;
  176. int i, retval = 0;
  177. ext4_lblk_t blk_count = *blk_nump;
  178. unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
  179. if (!pblock) {
  180. /* Only update the file block number */
  181. *blk_nump += max_entries * max_entries * max_entries;
  182. return 0;
  183. }
  184. bh = sb_bread(inode->i_sb, pblock);
  185. if (!bh)
  186. return -EIO;
  187. i_data = (__le32 *)bh->b_data;
  188. for (i = 0; i < max_entries; i++) {
  189. if (i_data[i]) {
  190. retval = update_dind_extent_range(handle, inode,
  191. le32_to_cpu(i_data[i]),
  192. &blk_count, lb);
  193. if (retval)
  194. break;
  195. } else
  196. /* Only update the file block number */
  197. blk_count += max_entries * max_entries;
  198. }
  199. /* Update the file block number */
  200. *blk_nump = blk_count;
  201. put_bh(bh);
  202. return retval;
  203. }
  204. static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
  205. {
  206. int retval = 0, needed;
  207. if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
  208. return 0;
  209. /*
  210. * We are freeing a blocks. During this we touch
  211. * superblock, group descriptor and block bitmap.
  212. * So allocate a credit of 3. We may update
  213. * quota (user and group).
  214. */
  215. needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
  216. if (ext4_journal_extend(handle, needed) != 0)
  217. retval = ext4_journal_restart(handle, needed);
  218. return retval;
  219. }
  220. static int free_dind_blocks(handle_t *handle,
  221. struct inode *inode, __le32 i_data)
  222. {
  223. int i;
  224. __le32 *tmp_idata;
  225. struct buffer_head *bh;
  226. unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
  227. bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
  228. if (!bh)
  229. return -EIO;
  230. tmp_idata = (__le32 *)bh->b_data;
  231. for (i = 0; i < max_entries; i++) {
  232. if (tmp_idata[i]) {
  233. extend_credit_for_blkdel(handle, inode);
  234. ext4_free_blocks(handle, inode, NULL,
  235. le32_to_cpu(tmp_idata[i]), 1,
  236. EXT4_FREE_BLOCKS_METADATA |
  237. EXT4_FREE_BLOCKS_FORGET);
  238. }
  239. }
  240. put_bh(bh);
  241. extend_credit_for_blkdel(handle, inode);
  242. ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
  243. EXT4_FREE_BLOCKS_METADATA |
  244. EXT4_FREE_BLOCKS_FORGET);
  245. return 0;
  246. }
  247. static int free_tind_blocks(handle_t *handle,
  248. struct inode *inode, __le32 i_data)
  249. {
  250. int i, retval = 0;
  251. __le32 *tmp_idata;
  252. struct buffer_head *bh;
  253. unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
  254. bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
  255. if (!bh)
  256. return -EIO;
  257. tmp_idata = (__le32 *)bh->b_data;
  258. for (i = 0; i < max_entries; i++) {
  259. if (tmp_idata[i]) {
  260. retval = free_dind_blocks(handle,
  261. inode, tmp_idata[i]);
  262. if (retval) {
  263. put_bh(bh);
  264. return retval;
  265. }
  266. }
  267. }
  268. put_bh(bh);
  269. extend_credit_for_blkdel(handle, inode);
  270. ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
  271. EXT4_FREE_BLOCKS_METADATA |
  272. EXT4_FREE_BLOCKS_FORGET);
  273. return 0;
  274. }
  275. static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
  276. {
  277. int retval;
  278. /* ei->i_data[EXT4_IND_BLOCK] */
  279. if (i_data[0]) {
  280. extend_credit_for_blkdel(handle, inode);
  281. ext4_free_blocks(handle, inode, NULL,
  282. le32_to_cpu(i_data[0]), 1,
  283. EXT4_FREE_BLOCKS_METADATA |
  284. EXT4_FREE_BLOCKS_FORGET);
  285. }
  286. /* ei->i_data[EXT4_DIND_BLOCK] */
  287. if (i_data[1]) {
  288. retval = free_dind_blocks(handle, inode, i_data[1]);
  289. if (retval)
  290. return retval;
  291. }
  292. /* ei->i_data[EXT4_TIND_BLOCK] */
  293. if (i_data[2]) {
  294. retval = free_tind_blocks(handle, inode, i_data[2]);
  295. if (retval)
  296. return retval;
  297. }
  298. return 0;
  299. }
  300. static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
  301. struct inode *tmp_inode)
  302. {
  303. int retval;
  304. __le32 i_data[3];
  305. struct ext4_inode_info *ei = EXT4_I(inode);
  306. struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
  307. /*
  308. * One credit accounted for writing the
  309. * i_data field of the original inode
  310. */
  311. retval = ext4_journal_extend(handle, 1);
  312. if (retval) {
  313. retval = ext4_journal_restart(handle, 1);
  314. if (retval)
  315. goto err_out;
  316. }
  317. i_data[0] = ei->i_data[EXT4_IND_BLOCK];
  318. i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
  319. i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
  320. down_write(&EXT4_I(inode)->i_data_sem);
  321. /*
  322. * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
  323. * happened after we started the migrate. We need to
  324. * fail the migrate
  325. */
  326. if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
  327. retval = -EAGAIN;
  328. up_write(&EXT4_I(inode)->i_data_sem);
  329. goto err_out;
  330. } else
  331. ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
  332. /*
  333. * We have the extent map build with the tmp inode.
  334. * Now copy the i_data across
  335. */
  336. ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
  337. memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
  338. /*
  339. * Update i_blocks with the new blocks that got
  340. * allocated while adding extents for extent index
  341. * blocks.
  342. *
  343. * While converting to extents we need not
  344. * update the orignal inode i_blocks for extent blocks
  345. * via quota APIs. The quota update happened via tmp_inode already.
  346. */
  347. spin_lock(&inode->i_lock);
  348. inode->i_blocks += tmp_inode->i_blocks;
  349. spin_unlock(&inode->i_lock);
  350. up_write(&EXT4_I(inode)->i_data_sem);
  351. /*
  352. * We mark the inode dirty after, because we decrement the
  353. * i_blocks when freeing the indirect meta-data blocks
  354. */
  355. retval = free_ind_block(handle, inode, i_data);
  356. ext4_mark_inode_dirty(handle, inode);
  357. err_out:
  358. return retval;
  359. }
  360. static int free_ext_idx(handle_t *handle, struct inode *inode,
  361. struct ext4_extent_idx *ix)
  362. {
  363. int i, retval = 0;
  364. ext4_fsblk_t block;
  365. struct buffer_head *bh;
  366. struct ext4_extent_header *eh;
  367. block = ext4_idx_pblock(ix);
  368. bh = sb_bread(inode->i_sb, block);
  369. if (!bh)
  370. return -EIO;
  371. eh = (struct ext4_extent_header *)bh->b_data;
  372. if (eh->eh_depth != 0) {
  373. ix = EXT_FIRST_INDEX(eh);
  374. for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
  375. retval = free_ext_idx(handle, inode, ix);
  376. if (retval)
  377. break;
  378. }
  379. }
  380. put_bh(bh);
  381. extend_credit_for_blkdel(handle, inode);
  382. ext4_free_blocks(handle, inode, NULL, block, 1,
  383. EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
  384. return retval;
  385. }
  386. /*
  387. * Free the extent meta data blocks only
  388. */
  389. static int free_ext_block(handle_t *handle, struct inode *inode)
  390. {
  391. int i, retval = 0;
  392. struct ext4_inode_info *ei = EXT4_I(inode);
  393. struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
  394. struct ext4_extent_idx *ix;
  395. if (eh->eh_depth == 0)
  396. /*
  397. * No extra blocks allocated for extent meta data
  398. */
  399. return 0;
  400. ix = EXT_FIRST_INDEX(eh);
  401. for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
  402. retval = free_ext_idx(handle, inode, ix);
  403. if (retval)
  404. return retval;
  405. }
  406. return retval;
  407. }
  408. int ext4_ext_migrate(struct inode *inode)
  409. {
  410. handle_t *handle;
  411. int retval = 0, i;
  412. __le32 *i_data;
  413. ext4_lblk_t blk_count = 0;
  414. struct ext4_inode_info *ei;
  415. struct inode *tmp_inode = NULL;
  416. struct list_blocks_struct lb;
  417. unsigned long max_entries;
  418. __u32 goal;
  419. /*
  420. * If the filesystem does not support extents, or the inode
  421. * already is extent-based, error out.
  422. */
  423. if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
  424. EXT4_FEATURE_INCOMPAT_EXTENTS) ||
  425. (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  426. return -EINVAL;
  427. if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
  428. /*
  429. * don't migrate fast symlink
  430. */
  431. return retval;
  432. handle = ext4_journal_start(inode,
  433. EXT4_DATA_TRANS_BLOCKS(inode->i_sb) +
  434. EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
  435. EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)
  436. + 1);
  437. if (IS_ERR(handle)) {
  438. retval = PTR_ERR(handle);
  439. return retval;
  440. }
  441. goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
  442. EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
  443. tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
  444. S_IFREG, NULL, goal);
  445. if (IS_ERR(tmp_inode)) {
  446. retval = -ENOMEM;
  447. ext4_journal_stop(handle);
  448. return retval;
  449. }
  450. i_size_write(tmp_inode, i_size_read(inode));
  451. /*
  452. * Set the i_nlink to zero so it will be deleted later
  453. * when we drop inode reference.
  454. */
  455. tmp_inode->i_nlink = 0;
  456. ext4_ext_tree_init(handle, tmp_inode);
  457. ext4_orphan_add(handle, tmp_inode);
  458. ext4_journal_stop(handle);
  459. /*
  460. * start with one credit accounted for
  461. * superblock modification.
  462. *
  463. * For the tmp_inode we already have committed the
  464. * trascation that created the inode. Later as and
  465. * when we add extents we extent the journal
  466. */
  467. /*
  468. * Even though we take i_mutex we can still cause block
  469. * allocation via mmap write to holes. If we have allocated
  470. * new blocks we fail migrate. New block allocation will
  471. * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
  472. * with i_data_sem held to prevent racing with block
  473. * allocation.
  474. */
  475. down_read((&EXT4_I(inode)->i_data_sem));
  476. ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
  477. up_read((&EXT4_I(inode)->i_data_sem));
  478. handle = ext4_journal_start(inode, 1);
  479. if (IS_ERR(handle)) {
  480. /*
  481. * It is impossible to update on-disk structures without
  482. * a handle, so just rollback in-core changes and live other
  483. * work to orphan_list_cleanup()
  484. */
  485. ext4_orphan_del(NULL, tmp_inode);
  486. retval = PTR_ERR(handle);
  487. goto out;
  488. }
  489. ei = EXT4_I(inode);
  490. i_data = ei->i_data;
  491. memset(&lb, 0, sizeof(lb));
  492. /* 32 bit block address 4 bytes */
  493. max_entries = inode->i_sb->s_blocksize >> 2;
  494. for (i = 0; i < EXT4_NDIR_BLOCKS; i++, blk_count++) {
  495. if (i_data[i]) {
  496. retval = update_extent_range(handle, tmp_inode,
  497. le32_to_cpu(i_data[i]),
  498. blk_count, &lb);
  499. if (retval)
  500. goto err_out;
  501. }
  502. }
  503. if (i_data[EXT4_IND_BLOCK]) {
  504. retval = update_ind_extent_range(handle, tmp_inode,
  505. le32_to_cpu(i_data[EXT4_IND_BLOCK]),
  506. &blk_count, &lb);
  507. if (retval)
  508. goto err_out;
  509. } else
  510. blk_count += max_entries;
  511. if (i_data[EXT4_DIND_BLOCK]) {
  512. retval = update_dind_extent_range(handle, tmp_inode,
  513. le32_to_cpu(i_data[EXT4_DIND_BLOCK]),
  514. &blk_count, &lb);
  515. if (retval)
  516. goto err_out;
  517. } else
  518. blk_count += max_entries * max_entries;
  519. if (i_data[EXT4_TIND_BLOCK]) {
  520. retval = update_tind_extent_range(handle, tmp_inode,
  521. le32_to_cpu(i_data[EXT4_TIND_BLOCK]),
  522. &blk_count, &lb);
  523. if (retval)
  524. goto err_out;
  525. }
  526. /*
  527. * Build the last extent
  528. */
  529. retval = finish_range(handle, tmp_inode, &lb);
  530. err_out:
  531. if (retval)
  532. /*
  533. * Failure case delete the extent information with the
  534. * tmp_inode
  535. */
  536. free_ext_block(handle, tmp_inode);
  537. else {
  538. retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
  539. if (retval)
  540. /*
  541. * if we fail to swap inode data free the extent
  542. * details of the tmp inode
  543. */
  544. free_ext_block(handle, tmp_inode);
  545. }
  546. /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
  547. if (ext4_journal_extend(handle, 1) != 0)
  548. ext4_journal_restart(handle, 1);
  549. /*
  550. * Mark the tmp_inode as of size zero
  551. */
  552. i_size_write(tmp_inode, 0);
  553. /*
  554. * set the i_blocks count to zero
  555. * so that the ext4_delete_inode does the
  556. * right job
  557. *
  558. * We don't need to take the i_lock because
  559. * the inode is not visible to user space.
  560. */
  561. tmp_inode->i_blocks = 0;
  562. /* Reset the extent details */
  563. ext4_ext_tree_init(handle, tmp_inode);
  564. ext4_journal_stop(handle);
  565. out:
  566. unlock_new_inode(tmp_inode);
  567. iput(tmp_inode);
  568. return retval;
  569. }