PageRenderTime 66ms CodeModel.GetById 27ms RepoModel.GetById 0ms app.codeStats 1ms

/fs/btrfs/delayed-inode.c

https://gitlab.com/LiquidSmooth-Devices/android_kernel_htc_msm8974
C | 1735 lines | 1423 code | 294 blank | 18 comment | 194 complexity | 90127eabb0004817a38d4c2ea557fbf3 MD5 | raw file
Possible License(s): GPL-2.0
  1. /*
  2. * Copyright (C) 2011 Fujitsu. All rights reserved.
  3. * Written by Miao Xie <miaox@cn.fujitsu.com>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public
  7. * License v2 as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public
  15. * License along with this program; if not, write to the
  16. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  17. * Boston, MA 021110-1307, USA.
  18. */
  19. #include <linux/slab.h>
  20. #include "delayed-inode.h"
  21. #include "disk-io.h"
  22. #include "transaction.h"
  23. #define BTRFS_DELAYED_WRITEBACK 400
  24. #define BTRFS_DELAYED_BACKGROUND 100
  25. static struct kmem_cache *delayed_node_cache;
  26. int __init btrfs_delayed_inode_init(void)
  27. {
  28. delayed_node_cache = kmem_cache_create("delayed_node",
  29. sizeof(struct btrfs_delayed_node),
  30. 0,
  31. SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
  32. NULL);
  33. if (!delayed_node_cache)
  34. return -ENOMEM;
  35. return 0;
  36. }
  37. void btrfs_delayed_inode_exit(void)
  38. {
  39. if (delayed_node_cache)
  40. kmem_cache_destroy(delayed_node_cache);
  41. }
  42. static inline void btrfs_init_delayed_node(
  43. struct btrfs_delayed_node *delayed_node,
  44. struct btrfs_root *root, u64 inode_id)
  45. {
  46. delayed_node->root = root;
  47. delayed_node->inode_id = inode_id;
  48. atomic_set(&delayed_node->refs, 0);
  49. delayed_node->count = 0;
  50. delayed_node->in_list = 0;
  51. delayed_node->inode_dirty = 0;
  52. delayed_node->ins_root = RB_ROOT;
  53. delayed_node->del_root = RB_ROOT;
  54. mutex_init(&delayed_node->mutex);
  55. delayed_node->index_cnt = 0;
  56. INIT_LIST_HEAD(&delayed_node->n_list);
  57. INIT_LIST_HEAD(&delayed_node->p_list);
  58. delayed_node->bytes_reserved = 0;
  59. }
  60. static inline int btrfs_is_continuous_delayed_item(
  61. struct btrfs_delayed_item *item1,
  62. struct btrfs_delayed_item *item2)
  63. {
  64. if (item1->key.type == BTRFS_DIR_INDEX_KEY &&
  65. item1->key.objectid == item2->key.objectid &&
  66. item1->key.type == item2->key.type &&
  67. item1->key.offset + 1 == item2->key.offset)
  68. return 1;
  69. return 0;
  70. }
  71. static inline struct btrfs_delayed_root *btrfs_get_delayed_root(
  72. struct btrfs_root *root)
  73. {
  74. return root->fs_info->delayed_root;
  75. }
  76. static struct btrfs_delayed_node *btrfs_get_delayed_node(struct inode *inode)
  77. {
  78. struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  79. struct btrfs_root *root = btrfs_inode->root;
  80. u64 ino = btrfs_ino(inode);
  81. struct btrfs_delayed_node *node;
  82. node = ACCESS_ONCE(btrfs_inode->delayed_node);
  83. if (node) {
  84. atomic_inc(&node->refs);
  85. return node;
  86. }
  87. spin_lock(&root->inode_lock);
  88. node = radix_tree_lookup(&root->delayed_nodes_tree, ino);
  89. if (node) {
  90. if (btrfs_inode->delayed_node) {
  91. atomic_inc(&node->refs);
  92. BUG_ON(btrfs_inode->delayed_node != node);
  93. spin_unlock(&root->inode_lock);
  94. return node;
  95. }
  96. btrfs_inode->delayed_node = node;
  97. atomic_inc(&node->refs);
  98. atomic_inc(&node->refs);
  99. spin_unlock(&root->inode_lock);
  100. return node;
  101. }
  102. spin_unlock(&root->inode_lock);
  103. return NULL;
  104. }
  105. static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
  106. struct inode *inode)
  107. {
  108. struct btrfs_delayed_node *node;
  109. struct btrfs_inode *btrfs_inode = BTRFS_I(inode);
  110. struct btrfs_root *root = btrfs_inode->root;
  111. u64 ino = btrfs_ino(inode);
  112. int ret;
  113. again:
  114. node = btrfs_get_delayed_node(inode);
  115. if (node)
  116. return node;
  117. node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
  118. if (!node)
  119. return ERR_PTR(-ENOMEM);
  120. btrfs_init_delayed_node(node, root, ino);
  121. atomic_inc(&node->refs);
  122. atomic_inc(&node->refs);
  123. ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM);
  124. if (ret) {
  125. kmem_cache_free(delayed_node_cache, node);
  126. return ERR_PTR(ret);
  127. }
  128. spin_lock(&root->inode_lock);
  129. ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
  130. if (ret == -EEXIST) {
  131. kmem_cache_free(delayed_node_cache, node);
  132. spin_unlock(&root->inode_lock);
  133. radix_tree_preload_end();
  134. goto again;
  135. }
  136. btrfs_inode->delayed_node = node;
  137. spin_unlock(&root->inode_lock);
  138. radix_tree_preload_end();
  139. return node;
  140. }
  141. static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root,
  142. struct btrfs_delayed_node *node,
  143. int mod)
  144. {
  145. spin_lock(&root->lock);
  146. if (node->in_list) {
  147. if (!list_empty(&node->p_list))
  148. list_move_tail(&node->p_list, &root->prepare_list);
  149. else if (mod)
  150. list_add_tail(&node->p_list, &root->prepare_list);
  151. } else {
  152. list_add_tail(&node->n_list, &root->node_list);
  153. list_add_tail(&node->p_list, &root->prepare_list);
  154. atomic_inc(&node->refs);
  155. root->nodes++;
  156. node->in_list = 1;
  157. }
  158. spin_unlock(&root->lock);
  159. }
  160. static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root,
  161. struct btrfs_delayed_node *node)
  162. {
  163. spin_lock(&root->lock);
  164. if (node->in_list) {
  165. root->nodes--;
  166. atomic_dec(&node->refs);
  167. list_del_init(&node->n_list);
  168. if (!list_empty(&node->p_list))
  169. list_del_init(&node->p_list);
  170. node->in_list = 0;
  171. }
  172. spin_unlock(&root->lock);
  173. }
  174. struct btrfs_delayed_node *btrfs_first_delayed_node(
  175. struct btrfs_delayed_root *delayed_root)
  176. {
  177. struct list_head *p;
  178. struct btrfs_delayed_node *node = NULL;
  179. spin_lock(&delayed_root->lock);
  180. if (list_empty(&delayed_root->node_list))
  181. goto out;
  182. p = delayed_root->node_list.next;
  183. node = list_entry(p, struct btrfs_delayed_node, n_list);
  184. atomic_inc(&node->refs);
  185. out:
  186. spin_unlock(&delayed_root->lock);
  187. return node;
  188. }
  189. struct btrfs_delayed_node *btrfs_next_delayed_node(
  190. struct btrfs_delayed_node *node)
  191. {
  192. struct btrfs_delayed_root *delayed_root;
  193. struct list_head *p;
  194. struct btrfs_delayed_node *next = NULL;
  195. delayed_root = node->root->fs_info->delayed_root;
  196. spin_lock(&delayed_root->lock);
  197. if (!node->in_list) {
  198. if (list_empty(&delayed_root->node_list))
  199. goto out;
  200. p = delayed_root->node_list.next;
  201. } else if (list_is_last(&node->n_list, &delayed_root->node_list))
  202. goto out;
  203. else
  204. p = node->n_list.next;
  205. next = list_entry(p, struct btrfs_delayed_node, n_list);
  206. atomic_inc(&next->refs);
  207. out:
  208. spin_unlock(&delayed_root->lock);
  209. return next;
  210. }
  211. static void __btrfs_release_delayed_node(
  212. struct btrfs_delayed_node *delayed_node,
  213. int mod)
  214. {
  215. struct btrfs_delayed_root *delayed_root;
  216. if (!delayed_node)
  217. return;
  218. delayed_root = delayed_node->root->fs_info->delayed_root;
  219. mutex_lock(&delayed_node->mutex);
  220. if (delayed_node->count)
  221. btrfs_queue_delayed_node(delayed_root, delayed_node, mod);
  222. else
  223. btrfs_dequeue_delayed_node(delayed_root, delayed_node);
  224. mutex_unlock(&delayed_node->mutex);
  225. if (atomic_dec_and_test(&delayed_node->refs)) {
  226. struct btrfs_root *root = delayed_node->root;
  227. spin_lock(&root->inode_lock);
  228. if (atomic_read(&delayed_node->refs) == 0) {
  229. radix_tree_delete(&root->delayed_nodes_tree,
  230. delayed_node->inode_id);
  231. kmem_cache_free(delayed_node_cache, delayed_node);
  232. }
  233. spin_unlock(&root->inode_lock);
  234. }
  235. }
  236. static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node)
  237. {
  238. __btrfs_release_delayed_node(node, 0);
  239. }
  240. struct btrfs_delayed_node *btrfs_first_prepared_delayed_node(
  241. struct btrfs_delayed_root *delayed_root)
  242. {
  243. struct list_head *p;
  244. struct btrfs_delayed_node *node = NULL;
  245. spin_lock(&delayed_root->lock);
  246. if (list_empty(&delayed_root->prepare_list))
  247. goto out;
  248. p = delayed_root->prepare_list.next;
  249. list_del_init(p);
  250. node = list_entry(p, struct btrfs_delayed_node, p_list);
  251. atomic_inc(&node->refs);
  252. out:
  253. spin_unlock(&delayed_root->lock);
  254. return node;
  255. }
  256. static inline void btrfs_release_prepared_delayed_node(
  257. struct btrfs_delayed_node *node)
  258. {
  259. __btrfs_release_delayed_node(node, 1);
  260. }
  261. struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
  262. {
  263. struct btrfs_delayed_item *item;
  264. item = kmalloc(sizeof(*item) + data_len, GFP_NOFS);
  265. if (item) {
  266. item->data_len = data_len;
  267. item->ins_or_del = 0;
  268. item->bytes_reserved = 0;
  269. item->delayed_node = NULL;
  270. atomic_set(&item->refs, 1);
  271. }
  272. return item;
  273. }
  274. static struct btrfs_delayed_item *__btrfs_lookup_delayed_item(
  275. struct rb_root *root,
  276. struct btrfs_key *key,
  277. struct btrfs_delayed_item **prev,
  278. struct btrfs_delayed_item **next)
  279. {
  280. struct rb_node *node, *prev_node = NULL;
  281. struct btrfs_delayed_item *delayed_item = NULL;
  282. int ret = 0;
  283. node = root->rb_node;
  284. while (node) {
  285. delayed_item = rb_entry(node, struct btrfs_delayed_item,
  286. rb_node);
  287. prev_node = node;
  288. ret = btrfs_comp_cpu_keys(&delayed_item->key, key);
  289. if (ret < 0)
  290. node = node->rb_right;
  291. else if (ret > 0)
  292. node = node->rb_left;
  293. else
  294. return delayed_item;
  295. }
  296. if (prev) {
  297. if (!prev_node)
  298. *prev = NULL;
  299. else if (ret < 0)
  300. *prev = delayed_item;
  301. else if ((node = rb_prev(prev_node)) != NULL) {
  302. *prev = rb_entry(node, struct btrfs_delayed_item,
  303. rb_node);
  304. } else
  305. *prev = NULL;
  306. }
  307. if (next) {
  308. if (!prev_node)
  309. *next = NULL;
  310. else if (ret > 0)
  311. *next = delayed_item;
  312. else if ((node = rb_next(prev_node)) != NULL) {
  313. *next = rb_entry(node, struct btrfs_delayed_item,
  314. rb_node);
  315. } else
  316. *next = NULL;
  317. }
  318. return NULL;
  319. }
  320. struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item(
  321. struct btrfs_delayed_node *delayed_node,
  322. struct btrfs_key *key)
  323. {
  324. struct btrfs_delayed_item *item;
  325. item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
  326. NULL, NULL);
  327. return item;
  328. }
  329. struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item(
  330. struct btrfs_delayed_node *delayed_node,
  331. struct btrfs_key *key)
  332. {
  333. struct btrfs_delayed_item *item;
  334. item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
  335. NULL, NULL);
  336. return item;
  337. }
  338. struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item(
  339. struct btrfs_delayed_node *delayed_node,
  340. struct btrfs_key *key)
  341. {
  342. struct btrfs_delayed_item *item, *next;
  343. item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key,
  344. NULL, &next);
  345. if (!item)
  346. item = next;
  347. return item;
  348. }
  349. struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item(
  350. struct btrfs_delayed_node *delayed_node,
  351. struct btrfs_key *key)
  352. {
  353. struct btrfs_delayed_item *item, *next;
  354. item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key,
  355. NULL, &next);
  356. if (!item)
  357. item = next;
  358. return item;
  359. }
  360. static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node,
  361. struct btrfs_delayed_item *ins,
  362. int action)
  363. {
  364. struct rb_node **p, *node;
  365. struct rb_node *parent_node = NULL;
  366. struct rb_root *root;
  367. struct btrfs_delayed_item *item;
  368. int cmp;
  369. if (action == BTRFS_DELAYED_INSERTION_ITEM)
  370. root = &delayed_node->ins_root;
  371. else if (action == BTRFS_DELAYED_DELETION_ITEM)
  372. root = &delayed_node->del_root;
  373. else
  374. BUG();
  375. p = &root->rb_node;
  376. node = &ins->rb_node;
  377. while (*p) {
  378. parent_node = *p;
  379. item = rb_entry(parent_node, struct btrfs_delayed_item,
  380. rb_node);
  381. cmp = btrfs_comp_cpu_keys(&item->key, &ins->key);
  382. if (cmp < 0)
  383. p = &(*p)->rb_right;
  384. else if (cmp > 0)
  385. p = &(*p)->rb_left;
  386. else
  387. return -EEXIST;
  388. }
  389. rb_link_node(node, parent_node, p);
  390. rb_insert_color(node, root);
  391. ins->delayed_node = delayed_node;
  392. ins->ins_or_del = action;
  393. if (ins->key.type == BTRFS_DIR_INDEX_KEY &&
  394. action == BTRFS_DELAYED_INSERTION_ITEM &&
  395. ins->key.offset >= delayed_node->index_cnt)
  396. delayed_node->index_cnt = ins->key.offset + 1;
  397. delayed_node->count++;
  398. atomic_inc(&delayed_node->root->fs_info->delayed_root->items);
  399. return 0;
  400. }
  401. static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node,
  402. struct btrfs_delayed_item *item)
  403. {
  404. return __btrfs_add_delayed_item(node, item,
  405. BTRFS_DELAYED_INSERTION_ITEM);
  406. }
  407. static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
  408. struct btrfs_delayed_item *item)
  409. {
  410. return __btrfs_add_delayed_item(node, item,
  411. BTRFS_DELAYED_DELETION_ITEM);
  412. }
  413. static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item)
  414. {
  415. struct rb_root *root;
  416. struct btrfs_delayed_root *delayed_root;
  417. delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root;
  418. BUG_ON(!delayed_root);
  419. BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM &&
  420. delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM);
  421. if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM)
  422. root = &delayed_item->delayed_node->ins_root;
  423. else
  424. root = &delayed_item->delayed_node->del_root;
  425. rb_erase(&delayed_item->rb_node, root);
  426. delayed_item->delayed_node->count--;
  427. atomic_dec(&delayed_root->items);
  428. if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND &&
  429. waitqueue_active(&delayed_root->wait))
  430. wake_up(&delayed_root->wait);
  431. }
  432. static void btrfs_release_delayed_item(struct btrfs_delayed_item *item)
  433. {
  434. if (item) {
  435. __btrfs_remove_delayed_item(item);
  436. if (atomic_dec_and_test(&item->refs))
  437. kfree(item);
  438. }
  439. }
  440. struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item(
  441. struct btrfs_delayed_node *delayed_node)
  442. {
  443. struct rb_node *p;
  444. struct btrfs_delayed_item *item = NULL;
  445. p = rb_first(&delayed_node->ins_root);
  446. if (p)
  447. item = rb_entry(p, struct btrfs_delayed_item, rb_node);
  448. return item;
  449. }
  450. struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item(
  451. struct btrfs_delayed_node *delayed_node)
  452. {
  453. struct rb_node *p;
  454. struct btrfs_delayed_item *item = NULL;
  455. p = rb_first(&delayed_node->del_root);
  456. if (p)
  457. item = rb_entry(p, struct btrfs_delayed_item, rb_node);
  458. return item;
  459. }
  460. struct btrfs_delayed_item *__btrfs_next_delayed_item(
  461. struct btrfs_delayed_item *item)
  462. {
  463. struct rb_node *p;
  464. struct btrfs_delayed_item *next = NULL;
  465. p = rb_next(&item->rb_node);
  466. if (p)
  467. next = rb_entry(p, struct btrfs_delayed_item, rb_node);
  468. return next;
  469. }
  470. static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root,
  471. u64 root_id)
  472. {
  473. struct btrfs_key root_key;
  474. if (root->objectid == root_id)
  475. return root;
  476. root_key.objectid = root_id;
  477. root_key.type = BTRFS_ROOT_ITEM_KEY;
  478. root_key.offset = (u64)-1;
  479. return btrfs_read_fs_root_no_name(root->fs_info, &root_key);
  480. }
  481. static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
  482. struct btrfs_root *root,
  483. struct btrfs_delayed_item *item)
  484. {
  485. struct btrfs_block_rsv *src_rsv;
  486. struct btrfs_block_rsv *dst_rsv;
  487. u64 num_bytes;
  488. int ret;
  489. if (!trans->bytes_reserved)
  490. return 0;
  491. src_rsv = trans->block_rsv;
  492. dst_rsv = &root->fs_info->delayed_block_rsv;
  493. num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  494. ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
  495. if (!ret) {
  496. trace_btrfs_space_reservation(root->fs_info, "delayed_item",
  497. item->key.objectid,
  498. num_bytes, 1);
  499. item->bytes_reserved = num_bytes;
  500. }
  501. return ret;
  502. }
  503. static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
  504. struct btrfs_delayed_item *item)
  505. {
  506. struct btrfs_block_rsv *rsv;
  507. if (!item->bytes_reserved)
  508. return;
  509. rsv = &root->fs_info->delayed_block_rsv;
  510. trace_btrfs_space_reservation(root->fs_info, "delayed_item",
  511. item->key.objectid, item->bytes_reserved,
  512. 0);
  513. btrfs_block_rsv_release(root, rsv,
  514. item->bytes_reserved);
  515. }
  516. static int btrfs_delayed_inode_reserve_metadata(
  517. struct btrfs_trans_handle *trans,
  518. struct btrfs_root *root,
  519. struct inode *inode,
  520. struct btrfs_delayed_node *node)
  521. {
  522. struct btrfs_block_rsv *src_rsv;
  523. struct btrfs_block_rsv *dst_rsv;
  524. u64 num_bytes;
  525. int ret;
  526. bool release = false;
  527. src_rsv = trans->block_rsv;
  528. dst_rsv = &root->fs_info->delayed_block_rsv;
  529. num_bytes = btrfs_calc_trans_metadata_size(root, 1);
  530. if (!src_rsv || (!trans->bytes_reserved &&
  531. src_rsv != &root->fs_info->delalloc_block_rsv)) {
  532. ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
  533. if (ret == -EAGAIN)
  534. ret = -ENOSPC;
  535. if (!ret) {
  536. node->bytes_reserved = num_bytes;
  537. trace_btrfs_space_reservation(root->fs_info,
  538. "delayed_inode",
  539. btrfs_ino(inode),
  540. num_bytes, 1);
  541. }
  542. return ret;
  543. } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
  544. spin_lock(&BTRFS_I(inode)->lock);
  545. if (BTRFS_I(inode)->delalloc_meta_reserved) {
  546. BTRFS_I(inode)->delalloc_meta_reserved = 0;
  547. spin_unlock(&BTRFS_I(inode)->lock);
  548. release = true;
  549. goto migrate;
  550. }
  551. spin_unlock(&BTRFS_I(inode)->lock);
  552. ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
  553. if (!ret)
  554. goto out;
  555. ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
  556. if (!ret)
  557. goto out;
  558. WARN_ON(1);
  559. ret = btrfs_block_rsv_migrate(&root->fs_info->global_block_rsv,
  560. dst_rsv, num_bytes);
  561. goto out;
  562. }
  563. migrate:
  564. ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
  565. out:
  566. if (!ret) {
  567. trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
  568. btrfs_ino(inode), num_bytes, 1);
  569. node->bytes_reserved = num_bytes;
  570. }
  571. if (release) {
  572. trace_btrfs_space_reservation(root->fs_info, "delalloc",
  573. btrfs_ino(inode), num_bytes, 0);
  574. btrfs_block_rsv_release(root, src_rsv, num_bytes);
  575. }
  576. return ret;
  577. }
  578. static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
  579. struct btrfs_delayed_node *node)
  580. {
  581. struct btrfs_block_rsv *rsv;
  582. if (!node->bytes_reserved)
  583. return;
  584. rsv = &root->fs_info->delayed_block_rsv;
  585. trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
  586. node->inode_id, node->bytes_reserved, 0);
  587. btrfs_block_rsv_release(root, rsv,
  588. node->bytes_reserved);
  589. node->bytes_reserved = 0;
  590. }
  591. static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
  592. struct btrfs_root *root,
  593. struct btrfs_path *path,
  594. struct btrfs_delayed_item *item)
  595. {
  596. struct btrfs_delayed_item *curr, *next;
  597. int free_space;
  598. int total_data_size = 0, total_size = 0;
  599. struct extent_buffer *leaf;
  600. char *data_ptr;
  601. struct btrfs_key *keys;
  602. u32 *data_size;
  603. struct list_head head;
  604. int slot;
  605. int nitems;
  606. int i;
  607. int ret = 0;
  608. BUG_ON(!path->nodes[0]);
  609. leaf = path->nodes[0];
  610. free_space = btrfs_leaf_free_space(root, leaf);
  611. INIT_LIST_HEAD(&head);
  612. next = item;
  613. nitems = 0;
  614. while (total_size + next->data_len + sizeof(struct btrfs_item) <=
  615. free_space) {
  616. total_data_size += next->data_len;
  617. total_size += next->data_len + sizeof(struct btrfs_item);
  618. list_add_tail(&next->tree_list, &head);
  619. nitems++;
  620. curr = next;
  621. next = __btrfs_next_delayed_item(curr);
  622. if (!next)
  623. break;
  624. if (!btrfs_is_continuous_delayed_item(curr, next))
  625. break;
  626. }
  627. if (!nitems) {
  628. ret = 0;
  629. goto out;
  630. }
  631. btrfs_set_path_blocking(path);
  632. keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS);
  633. if (!keys) {
  634. ret = -ENOMEM;
  635. goto out;
  636. }
  637. data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS);
  638. if (!data_size) {
  639. ret = -ENOMEM;
  640. goto error;
  641. }
  642. i = 0;
  643. list_for_each_entry(next, &head, tree_list) {
  644. keys[i] = next->key;
  645. data_size[i] = next->data_len;
  646. i++;
  647. }
  648. btrfs_clear_path_blocking(path, NULL, 0);
  649. setup_items_for_insert(trans, root, path, keys, data_size,
  650. total_data_size, total_size, nitems);
  651. slot = path->slots[0];
  652. list_for_each_entry_safe(curr, next, &head, tree_list) {
  653. data_ptr = btrfs_item_ptr(leaf, slot, char);
  654. write_extent_buffer(leaf, &curr->data,
  655. (unsigned long)data_ptr,
  656. curr->data_len);
  657. slot++;
  658. btrfs_delayed_item_release_metadata(root, curr);
  659. list_del(&curr->tree_list);
  660. btrfs_release_delayed_item(curr);
  661. }
  662. error:
  663. kfree(data_size);
  664. kfree(keys);
  665. out:
  666. return ret;
  667. }
  668. static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans,
  669. struct btrfs_root *root,
  670. struct btrfs_path *path,
  671. struct btrfs_delayed_item *delayed_item)
  672. {
  673. struct extent_buffer *leaf;
  674. struct btrfs_item *item;
  675. char *ptr;
  676. int ret;
  677. ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key,
  678. delayed_item->data_len);
  679. if (ret < 0 && ret != -EEXIST)
  680. return ret;
  681. leaf = path->nodes[0];
  682. item = btrfs_item_nr(leaf, path->slots[0]);
  683. ptr = btrfs_item_ptr(leaf, path->slots[0], char);
  684. write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr,
  685. delayed_item->data_len);
  686. btrfs_mark_buffer_dirty(leaf);
  687. btrfs_delayed_item_release_metadata(root, delayed_item);
  688. return 0;
  689. }
  690. static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans,
  691. struct btrfs_path *path,
  692. struct btrfs_root *root,
  693. struct btrfs_delayed_node *node)
  694. {
  695. struct btrfs_delayed_item *curr, *prev;
  696. int ret = 0;
  697. do_again:
  698. mutex_lock(&node->mutex);
  699. curr = __btrfs_first_delayed_insertion_item(node);
  700. if (!curr)
  701. goto insert_end;
  702. ret = btrfs_insert_delayed_item(trans, root, path, curr);
  703. if (ret < 0) {
  704. btrfs_release_path(path);
  705. goto insert_end;
  706. }
  707. prev = curr;
  708. curr = __btrfs_next_delayed_item(prev);
  709. if (curr && btrfs_is_continuous_delayed_item(prev, curr)) {
  710. path->slots[0]++;
  711. btrfs_batch_insert_items(trans, root, path, curr);
  712. }
  713. btrfs_release_delayed_item(prev);
  714. btrfs_mark_buffer_dirty(path->nodes[0]);
  715. btrfs_release_path(path);
  716. mutex_unlock(&node->mutex);
  717. goto do_again;
  718. insert_end:
  719. mutex_unlock(&node->mutex);
  720. return ret;
  721. }
  722. static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans,
  723. struct btrfs_root *root,
  724. struct btrfs_path *path,
  725. struct btrfs_delayed_item *item)
  726. {
  727. struct btrfs_delayed_item *curr, *next;
  728. struct extent_buffer *leaf;
  729. struct btrfs_key key;
  730. struct list_head head;
  731. int nitems, i, last_item;
  732. int ret = 0;
  733. BUG_ON(!path->nodes[0]);
  734. leaf = path->nodes[0];
  735. i = path->slots[0];
  736. last_item = btrfs_header_nritems(leaf) - 1;
  737. if (i > last_item)
  738. return -ENOENT;
  739. next = item;
  740. INIT_LIST_HEAD(&head);
  741. btrfs_item_key_to_cpu(leaf, &key, i);
  742. nitems = 0;
  743. while (btrfs_comp_cpu_keys(&next->key, &key) == 0) {
  744. list_add_tail(&next->tree_list, &head);
  745. nitems++;
  746. curr = next;
  747. next = __btrfs_next_delayed_item(curr);
  748. if (!next)
  749. break;
  750. if (!btrfs_is_continuous_delayed_item(curr, next))
  751. break;
  752. i++;
  753. if (i > last_item)
  754. break;
  755. btrfs_item_key_to_cpu(leaf, &key, i);
  756. }
  757. if (!nitems)
  758. return 0;
  759. ret = btrfs_del_items(trans, root, path, path->slots[0], nitems);
  760. if (ret)
  761. goto out;
  762. list_for_each_entry_safe(curr, next, &head, tree_list) {
  763. btrfs_delayed_item_release_metadata(root, curr);
  764. list_del(&curr->tree_list);
  765. btrfs_release_delayed_item(curr);
  766. }
  767. out:
  768. return ret;
  769. }
  770. static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans,
  771. struct btrfs_path *path,
  772. struct btrfs_root *root,
  773. struct btrfs_delayed_node *node)
  774. {
  775. struct btrfs_delayed_item *curr, *prev;
  776. int ret = 0;
  777. do_again:
  778. mutex_lock(&node->mutex);
  779. curr = __btrfs_first_delayed_deletion_item(node);
  780. if (!curr)
  781. goto delete_fail;
  782. ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1);
  783. if (ret < 0)
  784. goto delete_fail;
  785. else if (ret > 0) {
  786. prev = curr;
  787. curr = __btrfs_next_delayed_item(prev);
  788. btrfs_release_delayed_item(prev);
  789. ret = 0;
  790. btrfs_release_path(path);
  791. if (curr)
  792. goto do_again;
  793. else
  794. goto delete_fail;
  795. }
  796. btrfs_batch_delete_items(trans, root, path, curr);
  797. btrfs_release_path(path);
  798. mutex_unlock(&node->mutex);
  799. goto do_again;
  800. delete_fail:
  801. btrfs_release_path(path);
  802. mutex_unlock(&node->mutex);
  803. return ret;
  804. }
  805. static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node)
  806. {
  807. struct btrfs_delayed_root *delayed_root;
  808. if (delayed_node && delayed_node->inode_dirty) {
  809. BUG_ON(!delayed_node->root);
  810. delayed_node->inode_dirty = 0;
  811. delayed_node->count--;
  812. delayed_root = delayed_node->root->fs_info->delayed_root;
  813. atomic_dec(&delayed_root->items);
  814. if (atomic_read(&delayed_root->items) <
  815. BTRFS_DELAYED_BACKGROUND &&
  816. waitqueue_active(&delayed_root->wait))
  817. wake_up(&delayed_root->wait);
  818. }
  819. }
  820. static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans,
  821. struct btrfs_root *root,
  822. struct btrfs_path *path,
  823. struct btrfs_delayed_node *node)
  824. {
  825. struct btrfs_key key;
  826. struct btrfs_inode_item *inode_item;
  827. struct extent_buffer *leaf;
  828. int ret;
  829. mutex_lock(&node->mutex);
  830. if (!node->inode_dirty) {
  831. mutex_unlock(&node->mutex);
  832. return 0;
  833. }
  834. key.objectid = node->inode_id;
  835. btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY);
  836. key.offset = 0;
  837. ret = btrfs_lookup_inode(trans, root, path, &key, 1);
  838. if (ret > 0) {
  839. btrfs_release_path(path);
  840. mutex_unlock(&node->mutex);
  841. return -ENOENT;
  842. } else if (ret < 0) {
  843. mutex_unlock(&node->mutex);
  844. return ret;
  845. }
  846. btrfs_unlock_up_safe(path, 1);
  847. leaf = path->nodes[0];
  848. inode_item = btrfs_item_ptr(leaf, path->slots[0],
  849. struct btrfs_inode_item);
  850. write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item,
  851. sizeof(struct btrfs_inode_item));
  852. btrfs_mark_buffer_dirty(leaf);
  853. btrfs_release_path(path);
  854. btrfs_delayed_inode_release_metadata(root, node);
  855. btrfs_release_delayed_inode(node);
  856. mutex_unlock(&node->mutex);
  857. return 0;
  858. }
  859. int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
  860. struct btrfs_root *root)
  861. {
  862. struct btrfs_root *curr_root = root;
  863. struct btrfs_delayed_root *delayed_root;
  864. struct btrfs_delayed_node *curr_node, *prev_node;
  865. struct btrfs_path *path;
  866. struct btrfs_block_rsv *block_rsv;
  867. int ret = 0;
  868. if (trans->aborted)
  869. return -EIO;
  870. path = btrfs_alloc_path();
  871. if (!path)
  872. return -ENOMEM;
  873. path->leave_spinning = 1;
  874. block_rsv = trans->block_rsv;
  875. trans->block_rsv = &root->fs_info->delayed_block_rsv;
  876. delayed_root = btrfs_get_delayed_root(root);
  877. curr_node = btrfs_first_delayed_node(delayed_root);
  878. while (curr_node) {
  879. curr_root = curr_node->root;
  880. ret = btrfs_insert_delayed_items(trans, path, curr_root,
  881. curr_node);
  882. if (!ret)
  883. ret = btrfs_delete_delayed_items(trans, path,
  884. curr_root, curr_node);
  885. if (!ret)
  886. ret = btrfs_update_delayed_inode(trans, curr_root,
  887. path, curr_node);
  888. if (ret) {
  889. btrfs_release_delayed_node(curr_node);
  890. btrfs_abort_transaction(trans, root, ret);
  891. break;
  892. }
  893. prev_node = curr_node;
  894. curr_node = btrfs_next_delayed_node(curr_node);
  895. btrfs_release_delayed_node(prev_node);
  896. }
  897. btrfs_free_path(path);
  898. trans->block_rsv = block_rsv;
  899. return ret;
  900. }
  901. static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
  902. struct btrfs_delayed_node *node)
  903. {
  904. struct btrfs_path *path;
  905. struct btrfs_block_rsv *block_rsv;
  906. int ret;
  907. path = btrfs_alloc_path();
  908. if (!path)
  909. return -ENOMEM;
  910. path->leave_spinning = 1;
  911. block_rsv = trans->block_rsv;
  912. trans->block_rsv = &node->root->fs_info->delayed_block_rsv;
  913. ret = btrfs_insert_delayed_items(trans, path, node->root, node);
  914. if (!ret)
  915. ret = btrfs_delete_delayed_items(trans, path, node->root, node);
  916. if (!ret)
  917. ret = btrfs_update_delayed_inode(trans, node->root, path, node);
  918. btrfs_free_path(path);
  919. trans->block_rsv = block_rsv;
  920. return ret;
  921. }
  922. int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
  923. struct inode *inode)
  924. {
  925. struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
  926. int ret;
  927. if (!delayed_node)
  928. return 0;
  929. mutex_lock(&delayed_node->mutex);
  930. if (!delayed_node->count) {
  931. mutex_unlock(&delayed_node->mutex);
  932. btrfs_release_delayed_node(delayed_node);
  933. return 0;
  934. }
  935. mutex_unlock(&delayed_node->mutex);
  936. ret = __btrfs_commit_inode_delayed_items(trans, delayed_node);
  937. btrfs_release_delayed_node(delayed_node);
  938. return ret;
  939. }
  940. void btrfs_remove_delayed_node(struct inode *inode)
  941. {
  942. struct btrfs_delayed_node *delayed_node;
  943. delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node);
  944. if (!delayed_node)
  945. return;
  946. BTRFS_I(inode)->delayed_node = NULL;
  947. btrfs_release_delayed_node(delayed_node);
  948. }
  949. struct btrfs_async_delayed_node {
  950. struct btrfs_root *root;
  951. struct btrfs_delayed_node *delayed_node;
  952. struct btrfs_work work;
  953. };
  954. static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
  955. {
  956. struct btrfs_async_delayed_node *async_node;
  957. struct btrfs_trans_handle *trans;
  958. struct btrfs_path *path;
  959. struct btrfs_delayed_node *delayed_node = NULL;
  960. struct btrfs_root *root;
  961. struct btrfs_block_rsv *block_rsv;
  962. unsigned long nr = 0;
  963. int need_requeue = 0;
  964. int ret;
  965. async_node = container_of(work, struct btrfs_async_delayed_node, work);
  966. path = btrfs_alloc_path();
  967. if (!path)
  968. goto out;
  969. path->leave_spinning = 1;
  970. delayed_node = async_node->delayed_node;
  971. root = delayed_node->root;
  972. trans = btrfs_join_transaction(root);
  973. if (IS_ERR(trans))
  974. goto free_path;
  975. block_rsv = trans->block_rsv;
  976. trans->block_rsv = &root->fs_info->delayed_block_rsv;
  977. ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
  978. if (!ret)
  979. ret = btrfs_delete_delayed_items(trans, path, root,
  980. delayed_node);
  981. if (!ret)
  982. btrfs_update_delayed_inode(trans, root, path, delayed_node);
  983. mutex_lock(&delayed_node->mutex);
  984. if (delayed_node->count)
  985. need_requeue = 1;
  986. else
  987. btrfs_dequeue_delayed_node(root->fs_info->delayed_root,
  988. delayed_node);
  989. mutex_unlock(&delayed_node->mutex);
  990. nr = trans->blocks_used;
  991. trans->block_rsv = block_rsv;
  992. btrfs_end_transaction_dmeta(trans, root);
  993. __btrfs_btree_balance_dirty(root, nr);
  994. free_path:
  995. btrfs_free_path(path);
  996. out:
  997. if (need_requeue)
  998. btrfs_requeue_work(&async_node->work);
  999. else {
  1000. btrfs_release_prepared_delayed_node(delayed_node);
  1001. kfree(async_node);
  1002. }
  1003. }
  1004. static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
  1005. struct btrfs_root *root, int all)
  1006. {
  1007. struct btrfs_async_delayed_node *async_node;
  1008. struct btrfs_delayed_node *curr;
  1009. int count = 0;
  1010. again:
  1011. curr = btrfs_first_prepared_delayed_node(delayed_root);
  1012. if (!curr)
  1013. return 0;
  1014. async_node = kmalloc(sizeof(*async_node), GFP_NOFS);
  1015. if (!async_node) {
  1016. btrfs_release_prepared_delayed_node(curr);
  1017. return -ENOMEM;
  1018. }
  1019. async_node->root = root;
  1020. async_node->delayed_node = curr;
  1021. async_node->work.func = btrfs_async_run_delayed_node_done;
  1022. async_node->work.flags = 0;
  1023. btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work);
  1024. count++;
  1025. if (all || count < 4)
  1026. goto again;
  1027. return 0;
  1028. }
  1029. void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
  1030. {
  1031. struct btrfs_delayed_root *delayed_root;
  1032. delayed_root = btrfs_get_delayed_root(root);
  1033. WARN_ON(btrfs_first_delayed_node(delayed_root));
  1034. }
  1035. void btrfs_balance_delayed_items(struct btrfs_root *root)
  1036. {
  1037. struct btrfs_delayed_root *delayed_root;
  1038. delayed_root = btrfs_get_delayed_root(root);
  1039. if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
  1040. return;
  1041. if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
  1042. int ret;
  1043. ret = btrfs_wq_run_delayed_node(delayed_root, root, 1);
  1044. if (ret)
  1045. return;
  1046. wait_event_interruptible_timeout(
  1047. delayed_root->wait,
  1048. (atomic_read(&delayed_root->items) <
  1049. BTRFS_DELAYED_BACKGROUND),
  1050. HZ);
  1051. return;
  1052. }
  1053. btrfs_wq_run_delayed_node(delayed_root, root, 0);
  1054. }
  1055. int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
  1056. struct btrfs_root *root, const char *name,
  1057. int name_len, struct inode *dir,
  1058. struct btrfs_disk_key *disk_key, u8 type,
  1059. u64 index)
  1060. {
  1061. struct btrfs_delayed_node *delayed_node;
  1062. struct btrfs_delayed_item *delayed_item;
  1063. struct btrfs_dir_item *dir_item;
  1064. int ret;
  1065. delayed_node = btrfs_get_or_create_delayed_node(dir);
  1066. if (IS_ERR(delayed_node))
  1067. return PTR_ERR(delayed_node);
  1068. delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len);
  1069. if (!delayed_item) {
  1070. ret = -ENOMEM;
  1071. goto release_node;
  1072. }
  1073. delayed_item->key.objectid = btrfs_ino(dir);
  1074. btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
  1075. delayed_item->key.offset = index;
  1076. dir_item = (struct btrfs_dir_item *)delayed_item->data;
  1077. dir_item->location = *disk_key;
  1078. dir_item->transid = cpu_to_le64(trans->transid);
  1079. dir_item->data_len = 0;
  1080. dir_item->name_len = cpu_to_le16(name_len);
  1081. dir_item->type = type;
  1082. memcpy((char *)(dir_item + 1), name, name_len);
  1083. ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
  1084. BUG_ON(ret);
  1085. mutex_lock(&delayed_node->mutex);
  1086. ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
  1087. if (unlikely(ret)) {
  1088. printk(KERN_ERR "err add delayed dir index item(name: %s) into "
  1089. "the insertion tree of the delayed node"
  1090. "(root id: %llu, inode id: %llu, errno: %d)\n",
  1091. name,
  1092. (unsigned long long)delayed_node->root->objectid,
  1093. (unsigned long long)delayed_node->inode_id,
  1094. ret);
  1095. BUG();
  1096. }
  1097. mutex_unlock(&delayed_node->mutex);
  1098. release_node:
  1099. btrfs_release_delayed_node(delayed_node);
  1100. return ret;
  1101. }
  1102. static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root,
  1103. struct btrfs_delayed_node *node,
  1104. struct btrfs_key *key)
  1105. {
  1106. struct btrfs_delayed_item *item;
  1107. mutex_lock(&node->mutex);
  1108. item = __btrfs_lookup_delayed_insertion_item(node, key);
  1109. if (!item) {
  1110. mutex_unlock(&node->mutex);
  1111. return 1;
  1112. }
  1113. btrfs_delayed_item_release_metadata(root, item);
  1114. btrfs_release_delayed_item(item);
  1115. mutex_unlock(&node->mutex);
  1116. return 0;
  1117. }
  1118. int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans,
  1119. struct btrfs_root *root, struct inode *dir,
  1120. u64 index)
  1121. {
  1122. struct btrfs_delayed_node *node;
  1123. struct btrfs_delayed_item *item;
  1124. struct btrfs_key item_key;
  1125. int ret;
  1126. node = btrfs_get_or_create_delayed_node(dir);
  1127. if (IS_ERR(node))
  1128. return PTR_ERR(node);
  1129. item_key.objectid = btrfs_ino(dir);
  1130. btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY);
  1131. item_key.offset = index;
  1132. ret = btrfs_delete_delayed_insertion_item(root, node, &item_key);
  1133. if (!ret)
  1134. goto end;
  1135. item = btrfs_alloc_delayed_item(0);
  1136. if (!item) {
  1137. ret = -ENOMEM;
  1138. goto end;
  1139. }
  1140. item->key = item_key;
  1141. ret = btrfs_delayed_item_reserve_metadata(trans, root, item);
  1142. BUG_ON(ret);
  1143. mutex_lock(&node->mutex);
  1144. ret = __btrfs_add_delayed_deletion_item(node, item);
  1145. if (unlikely(ret)) {
  1146. printk(KERN_ERR "err add delayed dir index item(index: %llu) "
  1147. "into the deletion tree of the delayed node"
  1148. "(root id: %llu, inode id: %llu, errno: %d)\n",
  1149. (unsigned long long)index,
  1150. (unsigned long long)node->root->objectid,
  1151. (unsigned long long)node->inode_id,
  1152. ret);
  1153. BUG();
  1154. }
  1155. mutex_unlock(&node->mutex);
  1156. end:
  1157. btrfs_release_delayed_node(node);
  1158. return ret;
  1159. }
  1160. int btrfs_inode_delayed_dir_index_count(struct inode *inode)
  1161. {
  1162. struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode);
  1163. if (!delayed_node)
  1164. return -ENOENT;
  1165. if (!delayed_node->index_cnt) {
  1166. btrfs_release_delayed_node(delayed_node);
  1167. return -EINVAL;
  1168. }
  1169. BTRFS_I(inode)->index_cnt = delayed_node->index_cnt;
  1170. btrfs_release_delayed_node(delayed_node);
  1171. return 0;
  1172. }
  1173. void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
  1174. struct list_head *del_list)
  1175. {
  1176. struct btrfs_delayed_node *delayed_node;
  1177. struct btrfs_delayed_item *item;
  1178. delayed_node = btrfs_get_delayed_node(inode);
  1179. if (!delayed_node)
  1180. return;
  1181. mutex_lock(&delayed_node->mutex);
  1182. item = __btrfs_first_delayed_insertion_item(delayed_node);
  1183. while (item) {
  1184. atomic_inc(&item->refs);
  1185. list_add_tail(&item->readdir_list, ins_list);
  1186. item = __btrfs_next_delayed_item(item);
  1187. }
  1188. item = __btrfs_first_delayed_deletion_item(delayed_node);
  1189. while (item) {
  1190. atomic_inc(&item->refs);
  1191. list_add_tail(&item->readdir_list, del_list);
  1192. item = __btrfs_next_delayed_item(item);
  1193. }
  1194. mutex_unlock(&delayed_node->mutex);
  1195. atomic_dec(&delayed_node->refs);
  1196. }
  1197. void btrfs_put_delayed_items(struct list_head *ins_list,
  1198. struct list_head *del_list)
  1199. {
  1200. struct btrfs_delayed_item *curr, *next;
  1201. list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
  1202. list_del(&curr->readdir_list);
  1203. if (atomic_dec_and_test(&curr->refs))
  1204. kfree(curr);
  1205. }
  1206. list_for_each_entry_safe(curr, next, del_list, readdir_list) {
  1207. list_del(&curr->readdir_list);
  1208. if (atomic_dec_and_test(&curr->refs))
  1209. kfree(curr);
  1210. }
  1211. }
  1212. int btrfs_should_delete_dir_index(struct list_head *del_list,
  1213. u64 index)
  1214. {
  1215. struct btrfs_delayed_item *curr, *next;
  1216. int ret;
  1217. if (list_empty(del_list))
  1218. return 0;
  1219. list_for_each_entry_safe(curr, next, del_list, readdir_list) {
  1220. if (curr->key.offset > index)
  1221. break;
  1222. list_del(&curr->readdir_list);
  1223. ret = (curr->key.offset == index);
  1224. if (atomic_dec_and_test(&curr->refs))
  1225. kfree(curr);
  1226. if (ret)
  1227. return 1;
  1228. else
  1229. continue;
  1230. }
  1231. return 0;
  1232. }
  1233. int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
  1234. filldir_t filldir,
  1235. struct list_head *ins_list)
  1236. {
  1237. struct btrfs_dir_item *di;
  1238. struct btrfs_delayed_item *curr, *next;
  1239. struct btrfs_key location;
  1240. char *name;
  1241. int name_len;
  1242. int over = 0;
  1243. unsigned char d_type;
  1244. if (list_empty(ins_list))
  1245. return 0;
  1246. list_for_each_entry_safe(curr, next, ins_list, readdir_list) {
  1247. list_del(&curr->readdir_list);
  1248. if (curr->key.offset < filp->f_pos) {
  1249. if (atomic_dec_and_test(&curr->refs))
  1250. kfree(curr);
  1251. continue;
  1252. }
  1253. filp->f_pos = curr->key.offset;
  1254. di = (struct btrfs_dir_item *)curr->data;
  1255. name = (char *)(di + 1);
  1256. name_len = le16_to_cpu(di->name_len);
  1257. d_type = btrfs_filetype_table[di->type];
  1258. btrfs_disk_key_to_cpu(&location, &di->location);
  1259. over = filldir(dirent, name, name_len, curr->key.offset,
  1260. location.objectid, d_type);
  1261. if (atomic_dec_and_test(&curr->refs))
  1262. kfree(curr);
  1263. if (over)
  1264. return 1;
  1265. }
  1266. return 0;
  1267. }
  1268. BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item,
  1269. generation, 64);
  1270. BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item,
  1271. sequence, 64);
  1272. BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item,
  1273. transid, 64);
  1274. BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64);
  1275. BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item,
  1276. nbytes, 64);
  1277. BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item,
  1278. block_group, 64);
  1279. BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32);
  1280. BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32);
  1281. BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
  1282. BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
  1283. BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
  1284. BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
  1285. BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
  1286. BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32);
  1287. static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
  1288. struct btrfs_inode_item *inode_item,
  1289. struct inode *inode)
  1290. {
  1291. btrfs_set_stack_inode_uid(inode_item, inode->i_uid);
  1292. btrfs_set_stack_inode_gid(inode_item, inode->i_gid);
  1293. btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size);
  1294. btrfs_set_stack_inode_mode(inode_item, inode->i_mode);
  1295. btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink);
  1296. btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
  1297. btrfs_set_stack_inode_generation(inode_item,
  1298. BTRFS_I(inode)->generation);
  1299. btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence);
  1300. btrfs_set_stack_inode_transid(inode_item, trans->transid);
  1301. btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
  1302. btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
  1303. btrfs_set_stack_inode_block_group(inode_item, 0);
  1304. btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
  1305. inode->i_atime.tv_sec);
  1306. btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
  1307. inode->i_atime.tv_nsec);
  1308. btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
  1309. inode->i_mtime.tv_sec);
  1310. btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
  1311. inode->i_mtime.tv_nsec);
  1312. btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
  1313. inode->i_ctime.tv_sec);
  1314. btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
  1315. inode->i_ctime.tv_nsec);
  1316. }
  1317. int btrfs_fill_inode(struct inode *inode, u32 *rdev)
  1318. {
  1319. struct btrfs_delayed_node *delayed_node;
  1320. struct btrfs_inode_item *inode_item;
  1321. struct btrfs_timespec *tspec;
  1322. delayed_node = btrfs_get_delayed_node(inode);
  1323. if (!delayed_node)
  1324. return -ENOENT;
  1325. mutex_lock(&delayed_node->mutex);
  1326. if (!delayed_node->inode_dirty) {
  1327. mutex_unlock(&delayed_node->mutex);
  1328. btrfs_release_delayed_node(delayed_node);
  1329. return -ENOENT;
  1330. }
  1331. inode_item = &delayed_node->inode_item;
  1332. inode->i_uid = btrfs_stack_inode_uid(inode_item);
  1333. inode->i_gid = btrfs_stack_inode_gid(inode_item);
  1334. btrfs_i_size_write(inode, btrfs_stack_inode_size(inode_item));
  1335. inode->i_mode = btrfs_stack_inode_mode(inode_item);
  1336. set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
  1337. inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
  1338. BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
  1339. BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item);
  1340. inode->i_rdev = 0;
  1341. *rdev = btrfs_stack_inode_rdev(inode_item);
  1342. BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
  1343. tspec = btrfs_inode_atime(inode_item);
  1344. inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
  1345. inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
  1346. tspec = btrfs_inode_mtime(inode_item);
  1347. inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
  1348. inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
  1349. tspec = btrfs_inode_ctime(inode_item);
  1350. inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
  1351. inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
  1352. inode->i_generation = BTRFS_I(inode)->generation;
  1353. BTRFS_I(inode)->index_cnt = (u64)-1;
  1354. mutex_unlock(&delayed_node->mutex);
  1355. btrfs_release_delayed_node(delayed_node);
  1356. return 0;
  1357. }
  1358. int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
  1359. struct btrfs_root *root, struct inode *inode)
  1360. {
  1361. struct btrfs_delayed_node *delayed_node;
  1362. int ret = 0;
  1363. delayed_node = btrfs_get_or_create_delayed_node(inode);
  1364. if (IS_ERR(delayed_node))
  1365. return PTR_ERR(delayed_node);
  1366. mutex_lock(&delayed_node->mutex);
  1367. if (delayed_node->inode_dirty) {
  1368. fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
  1369. goto release_node;
  1370. }
  1371. ret = btrfs_delayed_inode_reserve_metadata(trans, root, inode,
  1372. delayed_node);
  1373. if (ret)
  1374. goto release_node;
  1375. fill_stack_inode_item(trans, &delayed_node->inode_item, inode);
  1376. delayed_node->inode_dirty = 1;
  1377. delayed_node->count++;
  1378. atomic_inc(&root->fs_info->delayed_root->items);
  1379. release_node:
  1380. mutex_unlock(&delayed_node->mutex);
  1381. btrfs_release_delayed_node(delayed_node);
  1382. return ret;
  1383. }
  1384. static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node)
  1385. {
  1386. struct btrfs_root *root = delayed_node->root;
  1387. struct btrfs_delayed_item *curr_item, *prev_item;
  1388. mutex_lock(&delayed_node->mutex);
  1389. curr_item = __btrfs_first_delayed_insertion_item(delayed_node);
  1390. while (curr_item) {
  1391. btrfs_delayed_item_release_metadata(root, curr_item);
  1392. prev_item = curr_item;
  1393. curr_item = __btrfs_next_delayed_item(prev_item);
  1394. btrfs_release_delayed_item(prev_item);
  1395. }
  1396. curr_item = __btrfs_first_delayed_deletion_item(delayed_node);
  1397. while (curr_item) {
  1398. btrfs_delayed_item_release_metadata(root, curr_item);
  1399. prev_item = curr_item;
  1400. curr_item = __btrfs_next_delayed_item(prev_item);
  1401. btrfs_release_delayed_item(prev_item);
  1402. }
  1403. if (delayed_node->inode_dirty) {
  1404. btrfs_delayed_inode_release_metadata(root, delayed_node);
  1405. btrfs_release_delayed_inode(delayed_node);
  1406. }
  1407. mutex_unlock(&delayed_node->mutex);
  1408. }
  1409. void btrfs_kill_delayed_inode_items(struct inode *inode)
  1410. {
  1411. struct btrfs_delayed_node *delayed_node;
  1412. delayed_node = btrfs_get_delayed_node(inode);
  1413. if (!delayed_node)
  1414. return;
  1415. __btrfs_kill_delayed_node(delayed_node);
  1416. btrfs_release_delayed_node(delayed_node);
  1417. }
  1418. void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
  1419. {
  1420. u64 inode_id = 0;
  1421. struct btrfs_delayed_node *delayed_nodes[8];
  1422. int i, n;
  1423. while (1) {
  1424. spin_lock(&root->inode_lock);
  1425. n = radix_tree_gang_lookup(&root->delayed_nodes_tree,
  1426. (void **)delayed_nodes, inode_id,
  1427. ARRAY_SIZE(delayed_nodes));
  1428. if (!n) {
  1429. spin_unlock(&root->inode_lock);
  1430. break;
  1431. }
  1432. inode_id = delayed_nodes[n - 1]->inode_id + 1;
  1433. for (i = 0; i < n; i++)
  1434. atomic_inc(&delayed_nodes[i]->refs);
  1435. spin_unlock(&root->inode_lock);
  1436. for (i = 0; i < n; i++) {
  1437. __btrfs_kill_delayed_node(delayed_nodes[i]);
  1438. btrfs_release_delayed_node(delayed_nodes[i]);
  1439. }
  1440. }
  1441. }