PageRenderTime 61ms CodeModel.GetById 8ms RepoModel.GetById 0ms app.codeStats 2ms

/fs/ocfs2/alloc.c

https://gitlab.com/LiquidSmooth-Devices/android_kernel_htc_msm8974
C | 6079 lines | 4928 code | 1109 blank | 42 comment | 781 complexity | 34cfb83c6db173800841d0da37a34953 MD5 | raw file
Possible License(s): GPL-2.0

Large files files are truncated, but you can click here to view the full file

  1. /* -*- mode: c; c-basic-offset: 8; -*-
  2. * vim: noexpandtab sw=8 ts=8 sts=0:
  3. *
  4. * alloc.c
  5. *
  6. * Extent allocs and frees
  7. *
  8. * Copyright (C) 2002, 2004 Oracle. All rights reserved.
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public
  12. * License as published by the Free Software Foundation; either
  13. * version 2 of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  18. * General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public
  21. * License along with this program; if not, write to the
  22. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  23. * Boston, MA 021110-1307, USA.
  24. */
  25. #include <linux/fs.h>
  26. #include <linux/types.h>
  27. #include <linux/slab.h>
  28. #include <linux/highmem.h>
  29. #include <linux/swap.h>
  30. #include <linux/quotaops.h>
  31. #include <linux/blkdev.h>
  32. #include <cluster/masklog.h>
  33. #include "ocfs2.h"
  34. #include "alloc.h"
  35. #include "aops.h"
  36. #include "blockcheck.h"
  37. #include "dlmglue.h"
  38. #include "extent_map.h"
  39. #include "inode.h"
  40. #include "journal.h"
  41. #include "localalloc.h"
  42. #include "suballoc.h"
  43. #include "sysfile.h"
  44. #include "file.h"
  45. #include "super.h"
  46. #include "uptodate.h"
  47. #include "xattr.h"
  48. #include "refcounttree.h"
  49. #include "ocfs2_trace.h"
  50. #include "buffer_head_io.h"
  51. enum ocfs2_contig_type {
  52. CONTIG_NONE = 0,
  53. CONTIG_LEFT,
  54. CONTIG_RIGHT,
  55. CONTIG_LEFTRIGHT,
  56. };
  57. static enum ocfs2_contig_type
  58. ocfs2_extent_rec_contig(struct super_block *sb,
  59. struct ocfs2_extent_rec *ext,
  60. struct ocfs2_extent_rec *insert_rec);
  61. struct ocfs2_extent_tree_operations {
  62. void (*eo_set_last_eb_blk)(struct ocfs2_extent_tree *et,
  63. u64 blkno);
  64. u64 (*eo_get_last_eb_blk)(struct ocfs2_extent_tree *et);
  65. void (*eo_update_clusters)(struct ocfs2_extent_tree *et,
  66. u32 new_clusters);
  67. void (*eo_extent_map_insert)(struct ocfs2_extent_tree *et,
  68. struct ocfs2_extent_rec *rec);
  69. void (*eo_extent_map_truncate)(struct ocfs2_extent_tree *et,
  70. u32 clusters);
  71. int (*eo_insert_check)(struct ocfs2_extent_tree *et,
  72. struct ocfs2_extent_rec *rec);
  73. int (*eo_sanity_check)(struct ocfs2_extent_tree *et);
  74. void (*eo_fill_root_el)(struct ocfs2_extent_tree *et);
  75. void (*eo_fill_max_leaf_clusters)(struct ocfs2_extent_tree *et);
  76. enum ocfs2_contig_type
  77. (*eo_extent_contig)(struct ocfs2_extent_tree *et,
  78. struct ocfs2_extent_rec *ext,
  79. struct ocfs2_extent_rec *insert_rec);
  80. };
  81. static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et);
  82. static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et,
  83. u64 blkno);
  84. static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et,
  85. u32 clusters);
  86. static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et,
  87. struct ocfs2_extent_rec *rec);
  88. static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et,
  89. u32 clusters);
  90. static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et,
  91. struct ocfs2_extent_rec *rec);
  92. static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et);
  93. static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et);
  94. static struct ocfs2_extent_tree_operations ocfs2_dinode_et_ops = {
  95. .eo_set_last_eb_blk = ocfs2_dinode_set_last_eb_blk,
  96. .eo_get_last_eb_blk = ocfs2_dinode_get_last_eb_blk,
  97. .eo_update_clusters = ocfs2_dinode_update_clusters,
  98. .eo_extent_map_insert = ocfs2_dinode_extent_map_insert,
  99. .eo_extent_map_truncate = ocfs2_dinode_extent_map_truncate,
  100. .eo_insert_check = ocfs2_dinode_insert_check,
  101. .eo_sanity_check = ocfs2_dinode_sanity_check,
  102. .eo_fill_root_el = ocfs2_dinode_fill_root_el,
  103. };
  104. static void ocfs2_dinode_set_last_eb_blk(struct ocfs2_extent_tree *et,
  105. u64 blkno)
  106. {
  107. struct ocfs2_dinode *di = et->et_object;
  108. BUG_ON(et->et_ops != &ocfs2_dinode_et_ops);
  109. di->i_last_eb_blk = cpu_to_le64(blkno);
  110. }
  111. static u64 ocfs2_dinode_get_last_eb_blk(struct ocfs2_extent_tree *et)
  112. {
  113. struct ocfs2_dinode *di = et->et_object;
  114. BUG_ON(et->et_ops != &ocfs2_dinode_et_ops);
  115. return le64_to_cpu(di->i_last_eb_blk);
  116. }
  117. static void ocfs2_dinode_update_clusters(struct ocfs2_extent_tree *et,
  118. u32 clusters)
  119. {
  120. struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci);
  121. struct ocfs2_dinode *di = et->et_object;
  122. le32_add_cpu(&di->i_clusters, clusters);
  123. spin_lock(&oi->ip_lock);
  124. oi->ip_clusters = le32_to_cpu(di->i_clusters);
  125. spin_unlock(&oi->ip_lock);
  126. }
  127. static void ocfs2_dinode_extent_map_insert(struct ocfs2_extent_tree *et,
  128. struct ocfs2_extent_rec *rec)
  129. {
  130. struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode;
  131. ocfs2_extent_map_insert_rec(inode, rec);
  132. }
  133. static void ocfs2_dinode_extent_map_truncate(struct ocfs2_extent_tree *et,
  134. u32 clusters)
  135. {
  136. struct inode *inode = &cache_info_to_inode(et->et_ci)->vfs_inode;
  137. ocfs2_extent_map_trunc(inode, clusters);
  138. }
  139. static int ocfs2_dinode_insert_check(struct ocfs2_extent_tree *et,
  140. struct ocfs2_extent_rec *rec)
  141. {
  142. struct ocfs2_inode_info *oi = cache_info_to_inode(et->et_ci);
  143. struct ocfs2_super *osb = OCFS2_SB(oi->vfs_inode.i_sb);
  144. BUG_ON(oi->ip_dyn_features & OCFS2_INLINE_DATA_FL);
  145. mlog_bug_on_msg(!ocfs2_sparse_alloc(osb) &&
  146. (oi->ip_clusters != le32_to_cpu(rec->e_cpos)),
  147. "Device %s, asking for sparse allocation: inode %llu, "
  148. "cpos %u, clusters %u\n",
  149. osb->dev_str,
  150. (unsigned long long)oi->ip_blkno,
  151. rec->e_cpos, oi->ip_clusters);
  152. return 0;
  153. }
  154. static int ocfs2_dinode_sanity_check(struct ocfs2_extent_tree *et)
  155. {
  156. struct ocfs2_dinode *di = et->et_object;
  157. BUG_ON(et->et_ops != &ocfs2_dinode_et_ops);
  158. BUG_ON(!OCFS2_IS_VALID_DINODE(di));
  159. return 0;
  160. }
  161. static void ocfs2_dinode_fill_root_el(struct ocfs2_extent_tree *et)
  162. {
  163. struct ocfs2_dinode *di = et->et_object;
  164. et->et_root_el = &di->id2.i_list;
  165. }
  166. static void ocfs2_xattr_value_fill_root_el(struct ocfs2_extent_tree *et)
  167. {
  168. struct ocfs2_xattr_value_buf *vb = et->et_object;
  169. et->et_root_el = &vb->vb_xv->xr_list;
  170. }
  171. static void ocfs2_xattr_value_set_last_eb_blk(struct ocfs2_extent_tree *et,
  172. u64 blkno)
  173. {
  174. struct ocfs2_xattr_value_buf *vb = et->et_object;
  175. vb->vb_xv->xr_last_eb_blk = cpu_to_le64(blkno);
  176. }
  177. static u64 ocfs2_xattr_value_get_last_eb_blk(struct ocfs2_extent_tree *et)
  178. {
  179. struct ocfs2_xattr_value_buf *vb = et->et_object;
  180. return le64_to_cpu(vb->vb_xv->xr_last_eb_blk);
  181. }
  182. static void ocfs2_xattr_value_update_clusters(struct ocfs2_extent_tree *et,
  183. u32 clusters)
  184. {
  185. struct ocfs2_xattr_value_buf *vb = et->et_object;
  186. le32_add_cpu(&vb->vb_xv->xr_clusters, clusters);
  187. }
  188. static struct ocfs2_extent_tree_operations ocfs2_xattr_value_et_ops = {
  189. .eo_set_last_eb_blk = ocfs2_xattr_value_set_last_eb_blk,
  190. .eo_get_last_eb_blk = ocfs2_xattr_value_get_last_eb_blk,
  191. .eo_update_clusters = ocfs2_xattr_value_update_clusters,
  192. .eo_fill_root_el = ocfs2_xattr_value_fill_root_el,
  193. };
  194. static void ocfs2_xattr_tree_fill_root_el(struct ocfs2_extent_tree *et)
  195. {
  196. struct ocfs2_xattr_block *xb = et->et_object;
  197. et->et_root_el = &xb->xb_attrs.xb_root.xt_list;
  198. }
  199. static void ocfs2_xattr_tree_fill_max_leaf_clusters(struct ocfs2_extent_tree *et)
  200. {
  201. struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
  202. et->et_max_leaf_clusters =
  203. ocfs2_clusters_for_bytes(sb, OCFS2_MAX_XATTR_TREE_LEAF_SIZE);
  204. }
  205. static void ocfs2_xattr_tree_set_last_eb_blk(struct ocfs2_extent_tree *et,
  206. u64 blkno)
  207. {
  208. struct ocfs2_xattr_block *xb = et->et_object;
  209. struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root;
  210. xt->xt_last_eb_blk = cpu_to_le64(blkno);
  211. }
  212. static u64 ocfs2_xattr_tree_get_last_eb_blk(struct ocfs2_extent_tree *et)
  213. {
  214. struct ocfs2_xattr_block *xb = et->et_object;
  215. struct ocfs2_xattr_tree_root *xt = &xb->xb_attrs.xb_root;
  216. return le64_to_cpu(xt->xt_last_eb_blk);
  217. }
  218. static void ocfs2_xattr_tree_update_clusters(struct ocfs2_extent_tree *et,
  219. u32 clusters)
  220. {
  221. struct ocfs2_xattr_block *xb = et->et_object;
  222. le32_add_cpu(&xb->xb_attrs.xb_root.xt_clusters, clusters);
  223. }
  224. static struct ocfs2_extent_tree_operations ocfs2_xattr_tree_et_ops = {
  225. .eo_set_last_eb_blk = ocfs2_xattr_tree_set_last_eb_blk,
  226. .eo_get_last_eb_blk = ocfs2_xattr_tree_get_last_eb_blk,
  227. .eo_update_clusters = ocfs2_xattr_tree_update_clusters,
  228. .eo_fill_root_el = ocfs2_xattr_tree_fill_root_el,
  229. .eo_fill_max_leaf_clusters = ocfs2_xattr_tree_fill_max_leaf_clusters,
  230. };
  231. static void ocfs2_dx_root_set_last_eb_blk(struct ocfs2_extent_tree *et,
  232. u64 blkno)
  233. {
  234. struct ocfs2_dx_root_block *dx_root = et->et_object;
  235. dx_root->dr_last_eb_blk = cpu_to_le64(blkno);
  236. }
  237. static u64 ocfs2_dx_root_get_last_eb_blk(struct ocfs2_extent_tree *et)
  238. {
  239. struct ocfs2_dx_root_block *dx_root = et->et_object;
  240. return le64_to_cpu(dx_root->dr_last_eb_blk);
  241. }
  242. static void ocfs2_dx_root_update_clusters(struct ocfs2_extent_tree *et,
  243. u32 clusters)
  244. {
  245. struct ocfs2_dx_root_block *dx_root = et->et_object;
  246. le32_add_cpu(&dx_root->dr_clusters, clusters);
  247. }
  248. static int ocfs2_dx_root_sanity_check(struct ocfs2_extent_tree *et)
  249. {
  250. struct ocfs2_dx_root_block *dx_root = et->et_object;
  251. BUG_ON(!OCFS2_IS_VALID_DX_ROOT(dx_root));
  252. return 0;
  253. }
  254. static void ocfs2_dx_root_fill_root_el(struct ocfs2_extent_tree *et)
  255. {
  256. struct ocfs2_dx_root_block *dx_root = et->et_object;
  257. et->et_root_el = &dx_root->dr_list;
  258. }
  259. static struct ocfs2_extent_tree_operations ocfs2_dx_root_et_ops = {
  260. .eo_set_last_eb_blk = ocfs2_dx_root_set_last_eb_blk,
  261. .eo_get_last_eb_blk = ocfs2_dx_root_get_last_eb_blk,
  262. .eo_update_clusters = ocfs2_dx_root_update_clusters,
  263. .eo_sanity_check = ocfs2_dx_root_sanity_check,
  264. .eo_fill_root_el = ocfs2_dx_root_fill_root_el,
  265. };
  266. static void ocfs2_refcount_tree_fill_root_el(struct ocfs2_extent_tree *et)
  267. {
  268. struct ocfs2_refcount_block *rb = et->et_object;
  269. et->et_root_el = &rb->rf_list;
  270. }
  271. static void ocfs2_refcount_tree_set_last_eb_blk(struct ocfs2_extent_tree *et,
  272. u64 blkno)
  273. {
  274. struct ocfs2_refcount_block *rb = et->et_object;
  275. rb->rf_last_eb_blk = cpu_to_le64(blkno);
  276. }
  277. static u64 ocfs2_refcount_tree_get_last_eb_blk(struct ocfs2_extent_tree *et)
  278. {
  279. struct ocfs2_refcount_block *rb = et->et_object;
  280. return le64_to_cpu(rb->rf_last_eb_blk);
  281. }
  282. static void ocfs2_refcount_tree_update_clusters(struct ocfs2_extent_tree *et,
  283. u32 clusters)
  284. {
  285. struct ocfs2_refcount_block *rb = et->et_object;
  286. le32_add_cpu(&rb->rf_clusters, clusters);
  287. }
  288. static enum ocfs2_contig_type
  289. ocfs2_refcount_tree_extent_contig(struct ocfs2_extent_tree *et,
  290. struct ocfs2_extent_rec *ext,
  291. struct ocfs2_extent_rec *insert_rec)
  292. {
  293. return CONTIG_NONE;
  294. }
  295. static struct ocfs2_extent_tree_operations ocfs2_refcount_tree_et_ops = {
  296. .eo_set_last_eb_blk = ocfs2_refcount_tree_set_last_eb_blk,
  297. .eo_get_last_eb_blk = ocfs2_refcount_tree_get_last_eb_blk,
  298. .eo_update_clusters = ocfs2_refcount_tree_update_clusters,
  299. .eo_fill_root_el = ocfs2_refcount_tree_fill_root_el,
  300. .eo_extent_contig = ocfs2_refcount_tree_extent_contig,
  301. };
  302. static void __ocfs2_init_extent_tree(struct ocfs2_extent_tree *et,
  303. struct ocfs2_caching_info *ci,
  304. struct buffer_head *bh,
  305. ocfs2_journal_access_func access,
  306. void *obj,
  307. struct ocfs2_extent_tree_operations *ops)
  308. {
  309. et->et_ops = ops;
  310. et->et_root_bh = bh;
  311. et->et_ci = ci;
  312. et->et_root_journal_access = access;
  313. if (!obj)
  314. obj = (void *)bh->b_data;
  315. et->et_object = obj;
  316. et->et_ops->eo_fill_root_el(et);
  317. if (!et->et_ops->eo_fill_max_leaf_clusters)
  318. et->et_max_leaf_clusters = 0;
  319. else
  320. et->et_ops->eo_fill_max_leaf_clusters(et);
  321. }
  322. void ocfs2_init_dinode_extent_tree(struct ocfs2_extent_tree *et,
  323. struct ocfs2_caching_info *ci,
  324. struct buffer_head *bh)
  325. {
  326. __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_di,
  327. NULL, &ocfs2_dinode_et_ops);
  328. }
  329. void ocfs2_init_xattr_tree_extent_tree(struct ocfs2_extent_tree *et,
  330. struct ocfs2_caching_info *ci,
  331. struct buffer_head *bh)
  332. {
  333. __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_xb,
  334. NULL, &ocfs2_xattr_tree_et_ops);
  335. }
  336. void ocfs2_init_xattr_value_extent_tree(struct ocfs2_extent_tree *et,
  337. struct ocfs2_caching_info *ci,
  338. struct ocfs2_xattr_value_buf *vb)
  339. {
  340. __ocfs2_init_extent_tree(et, ci, vb->vb_bh, vb->vb_access, vb,
  341. &ocfs2_xattr_value_et_ops);
  342. }
  343. void ocfs2_init_dx_root_extent_tree(struct ocfs2_extent_tree *et,
  344. struct ocfs2_caching_info *ci,
  345. struct buffer_head *bh)
  346. {
  347. __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_dr,
  348. NULL, &ocfs2_dx_root_et_ops);
  349. }
  350. void ocfs2_init_refcount_extent_tree(struct ocfs2_extent_tree *et,
  351. struct ocfs2_caching_info *ci,
  352. struct buffer_head *bh)
  353. {
  354. __ocfs2_init_extent_tree(et, ci, bh, ocfs2_journal_access_rb,
  355. NULL, &ocfs2_refcount_tree_et_ops);
  356. }
  357. static inline void ocfs2_et_set_last_eb_blk(struct ocfs2_extent_tree *et,
  358. u64 new_last_eb_blk)
  359. {
  360. et->et_ops->eo_set_last_eb_blk(et, new_last_eb_blk);
  361. }
  362. static inline u64 ocfs2_et_get_last_eb_blk(struct ocfs2_extent_tree *et)
  363. {
  364. return et->et_ops->eo_get_last_eb_blk(et);
  365. }
  366. static inline void ocfs2_et_update_clusters(struct ocfs2_extent_tree *et,
  367. u32 clusters)
  368. {
  369. et->et_ops->eo_update_clusters(et, clusters);
  370. }
  371. static inline void ocfs2_et_extent_map_insert(struct ocfs2_extent_tree *et,
  372. struct ocfs2_extent_rec *rec)
  373. {
  374. if (et->et_ops->eo_extent_map_insert)
  375. et->et_ops->eo_extent_map_insert(et, rec);
  376. }
  377. static inline void ocfs2_et_extent_map_truncate(struct ocfs2_extent_tree *et,
  378. u32 clusters)
  379. {
  380. if (et->et_ops->eo_extent_map_truncate)
  381. et->et_ops->eo_extent_map_truncate(et, clusters);
  382. }
  383. static inline int ocfs2_et_root_journal_access(handle_t *handle,
  384. struct ocfs2_extent_tree *et,
  385. int type)
  386. {
  387. return et->et_root_journal_access(handle, et->et_ci, et->et_root_bh,
  388. type);
  389. }
  390. static inline enum ocfs2_contig_type
  391. ocfs2_et_extent_contig(struct ocfs2_extent_tree *et,
  392. struct ocfs2_extent_rec *rec,
  393. struct ocfs2_extent_rec *insert_rec)
  394. {
  395. if (et->et_ops->eo_extent_contig)
  396. return et->et_ops->eo_extent_contig(et, rec, insert_rec);
  397. return ocfs2_extent_rec_contig(
  398. ocfs2_metadata_cache_get_super(et->et_ci),
  399. rec, insert_rec);
  400. }
  401. static inline int ocfs2_et_insert_check(struct ocfs2_extent_tree *et,
  402. struct ocfs2_extent_rec *rec)
  403. {
  404. int ret = 0;
  405. if (et->et_ops->eo_insert_check)
  406. ret = et->et_ops->eo_insert_check(et, rec);
  407. return ret;
  408. }
  409. static inline int ocfs2_et_sanity_check(struct ocfs2_extent_tree *et)
  410. {
  411. int ret = 0;
  412. if (et->et_ops->eo_sanity_check)
  413. ret = et->et_ops->eo_sanity_check(et);
  414. return ret;
  415. }
  416. static int ocfs2_cache_extent_block_free(struct ocfs2_cached_dealloc_ctxt *ctxt,
  417. struct ocfs2_extent_block *eb);
  418. static void ocfs2_adjust_rightmost_records(handle_t *handle,
  419. struct ocfs2_extent_tree *et,
  420. struct ocfs2_path *path,
  421. struct ocfs2_extent_rec *insert_rec);
  422. void ocfs2_reinit_path(struct ocfs2_path *path, int keep_root)
  423. {
  424. int i, start = 0, depth = 0;
  425. struct ocfs2_path_item *node;
  426. if (keep_root)
  427. start = 1;
  428. for(i = start; i < path_num_items(path); i++) {
  429. node = &path->p_node[i];
  430. brelse(node->bh);
  431. node->bh = NULL;
  432. node->el = NULL;
  433. }
  434. if (keep_root)
  435. depth = le16_to_cpu(path_root_el(path)->l_tree_depth);
  436. else
  437. path_root_access(path) = NULL;
  438. path->p_tree_depth = depth;
  439. }
  440. void ocfs2_free_path(struct ocfs2_path *path)
  441. {
  442. if (path) {
  443. ocfs2_reinit_path(path, 0);
  444. kfree(path);
  445. }
  446. }
  447. static void ocfs2_cp_path(struct ocfs2_path *dest, struct ocfs2_path *src)
  448. {
  449. int i;
  450. BUG_ON(path_root_bh(dest) != path_root_bh(src));
  451. BUG_ON(path_root_el(dest) != path_root_el(src));
  452. BUG_ON(path_root_access(dest) != path_root_access(src));
  453. ocfs2_reinit_path(dest, 1);
  454. for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
  455. dest->p_node[i].bh = src->p_node[i].bh;
  456. dest->p_node[i].el = src->p_node[i].el;
  457. if (dest->p_node[i].bh)
  458. get_bh(dest->p_node[i].bh);
  459. }
  460. }
  461. static void ocfs2_mv_path(struct ocfs2_path *dest, struct ocfs2_path *src)
  462. {
  463. int i;
  464. BUG_ON(path_root_bh(dest) != path_root_bh(src));
  465. BUG_ON(path_root_access(dest) != path_root_access(src));
  466. for(i = 1; i < OCFS2_MAX_PATH_DEPTH; i++) {
  467. brelse(dest->p_node[i].bh);
  468. dest->p_node[i].bh = src->p_node[i].bh;
  469. dest->p_node[i].el = src->p_node[i].el;
  470. src->p_node[i].bh = NULL;
  471. src->p_node[i].el = NULL;
  472. }
  473. }
  474. static inline void ocfs2_path_insert_eb(struct ocfs2_path *path, int index,
  475. struct buffer_head *eb_bh)
  476. {
  477. struct ocfs2_extent_block *eb = (struct ocfs2_extent_block *)eb_bh->b_data;
  478. BUG_ON(index == 0);
  479. path->p_node[index].bh = eb_bh;
  480. path->p_node[index].el = &eb->h_list;
  481. }
  482. static struct ocfs2_path *ocfs2_new_path(struct buffer_head *root_bh,
  483. struct ocfs2_extent_list *root_el,
  484. ocfs2_journal_access_func access)
  485. {
  486. struct ocfs2_path *path;
  487. BUG_ON(le16_to_cpu(root_el->l_tree_depth) >= OCFS2_MAX_PATH_DEPTH);
  488. path = kzalloc(sizeof(*path), GFP_NOFS);
  489. if (path) {
  490. path->p_tree_depth = le16_to_cpu(root_el->l_tree_depth);
  491. get_bh(root_bh);
  492. path_root_bh(path) = root_bh;
  493. path_root_el(path) = root_el;
  494. path_root_access(path) = access;
  495. }
  496. return path;
  497. }
  498. struct ocfs2_path *ocfs2_new_path_from_path(struct ocfs2_path *path)
  499. {
  500. return ocfs2_new_path(path_root_bh(path), path_root_el(path),
  501. path_root_access(path));
  502. }
  503. struct ocfs2_path *ocfs2_new_path_from_et(struct ocfs2_extent_tree *et)
  504. {
  505. return ocfs2_new_path(et->et_root_bh, et->et_root_el,
  506. et->et_root_journal_access);
  507. }
  508. int ocfs2_path_bh_journal_access(handle_t *handle,
  509. struct ocfs2_caching_info *ci,
  510. struct ocfs2_path *path,
  511. int idx)
  512. {
  513. ocfs2_journal_access_func access = path_root_access(path);
  514. if (!access)
  515. access = ocfs2_journal_access;
  516. if (idx)
  517. access = ocfs2_journal_access_eb;
  518. return access(handle, ci, path->p_node[idx].bh,
  519. OCFS2_JOURNAL_ACCESS_WRITE);
  520. }
  521. int ocfs2_journal_access_path(struct ocfs2_caching_info *ci,
  522. handle_t *handle,
  523. struct ocfs2_path *path)
  524. {
  525. int i, ret = 0;
  526. if (!path)
  527. goto out;
  528. for(i = 0; i < path_num_items(path); i++) {
  529. ret = ocfs2_path_bh_journal_access(handle, ci, path, i);
  530. if (ret < 0) {
  531. mlog_errno(ret);
  532. goto out;
  533. }
  534. }
  535. out:
  536. return ret;
  537. }
  538. int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster)
  539. {
  540. int ret = -1;
  541. int i;
  542. struct ocfs2_extent_rec *rec;
  543. u32 rec_end, rec_start, clusters;
  544. for(i = 0; i < le16_to_cpu(el->l_next_free_rec); i++) {
  545. rec = &el->l_recs[i];
  546. rec_start = le32_to_cpu(rec->e_cpos);
  547. clusters = ocfs2_rec_clusters(el, rec);
  548. rec_end = rec_start + clusters;
  549. if (v_cluster >= rec_start && v_cluster < rec_end) {
  550. ret = i;
  551. break;
  552. }
  553. }
  554. return ret;
  555. }
  556. static int ocfs2_block_extent_contig(struct super_block *sb,
  557. struct ocfs2_extent_rec *ext,
  558. u64 blkno)
  559. {
  560. u64 blk_end = le64_to_cpu(ext->e_blkno);
  561. blk_end += ocfs2_clusters_to_blocks(sb,
  562. le16_to_cpu(ext->e_leaf_clusters));
  563. return blkno == blk_end;
  564. }
  565. static int ocfs2_extents_adjacent(struct ocfs2_extent_rec *left,
  566. struct ocfs2_extent_rec *right)
  567. {
  568. u32 left_range;
  569. left_range = le32_to_cpu(left->e_cpos) +
  570. le16_to_cpu(left->e_leaf_clusters);
  571. return (left_range == le32_to_cpu(right->e_cpos));
  572. }
  573. static enum ocfs2_contig_type
  574. ocfs2_extent_rec_contig(struct super_block *sb,
  575. struct ocfs2_extent_rec *ext,
  576. struct ocfs2_extent_rec *insert_rec)
  577. {
  578. u64 blkno = le64_to_cpu(insert_rec->e_blkno);
  579. /*
  580. * Refuse to coalesce extent records with different flag
  581. * fields - we don't want to mix unwritten extents with user
  582. * data.
  583. */
  584. if (ext->e_flags != insert_rec->e_flags)
  585. return CONTIG_NONE;
  586. if (ocfs2_extents_adjacent(ext, insert_rec) &&
  587. ocfs2_block_extent_contig(sb, ext, blkno))
  588. return CONTIG_RIGHT;
  589. blkno = le64_to_cpu(ext->e_blkno);
  590. if (ocfs2_extents_adjacent(insert_rec, ext) &&
  591. ocfs2_block_extent_contig(sb, insert_rec, blkno))
  592. return CONTIG_LEFT;
  593. return CONTIG_NONE;
  594. }
  595. enum ocfs2_append_type {
  596. APPEND_NONE = 0,
  597. APPEND_TAIL,
  598. };
  599. enum ocfs2_split_type {
  600. SPLIT_NONE = 0,
  601. SPLIT_LEFT,
  602. SPLIT_RIGHT,
  603. };
  604. struct ocfs2_insert_type {
  605. enum ocfs2_split_type ins_split;
  606. enum ocfs2_append_type ins_appending;
  607. enum ocfs2_contig_type ins_contig;
  608. int ins_contig_index;
  609. int ins_tree_depth;
  610. };
  611. struct ocfs2_merge_ctxt {
  612. enum ocfs2_contig_type c_contig_type;
  613. int c_has_empty_extent;
  614. int c_split_covers_rec;
  615. };
  616. static int ocfs2_validate_extent_block(struct super_block *sb,
  617. struct buffer_head *bh)
  618. {
  619. int rc;
  620. struct ocfs2_extent_block *eb =
  621. (struct ocfs2_extent_block *)bh->b_data;
  622. trace_ocfs2_validate_extent_block((unsigned long long)bh->b_blocknr);
  623. BUG_ON(!buffer_uptodate(bh));
  624. rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &eb->h_check);
  625. if (rc) {
  626. mlog(ML_ERROR, "Checksum failed for extent block %llu\n",
  627. (unsigned long long)bh->b_blocknr);
  628. return rc;
  629. }
  630. if (!OCFS2_IS_VALID_EXTENT_BLOCK(eb)) {
  631. ocfs2_error(sb,
  632. "Extent block #%llu has bad signature %.*s",
  633. (unsigned long long)bh->b_blocknr, 7,
  634. eb->h_signature);
  635. return -EINVAL;
  636. }
  637. if (le64_to_cpu(eb->h_blkno) != bh->b_blocknr) {
  638. ocfs2_error(sb,
  639. "Extent block #%llu has an invalid h_blkno "
  640. "of %llu",
  641. (unsigned long long)bh->b_blocknr,
  642. (unsigned long long)le64_to_cpu(eb->h_blkno));
  643. return -EINVAL;
  644. }
  645. if (le32_to_cpu(eb->h_fs_generation) != OCFS2_SB(sb)->fs_generation) {
  646. ocfs2_error(sb,
  647. "Extent block #%llu has an invalid "
  648. "h_fs_generation of #%u",
  649. (unsigned long long)bh->b_blocknr,
  650. le32_to_cpu(eb->h_fs_generation));
  651. return -EINVAL;
  652. }
  653. return 0;
  654. }
  655. int ocfs2_read_extent_block(struct ocfs2_caching_info *ci, u64 eb_blkno,
  656. struct buffer_head **bh)
  657. {
  658. int rc;
  659. struct buffer_head *tmp = *bh;
  660. rc = ocfs2_read_block(ci, eb_blkno, &tmp,
  661. ocfs2_validate_extent_block);
  662. if (!rc && !*bh)
  663. *bh = tmp;
  664. return rc;
  665. }
  666. int ocfs2_num_free_extents(struct ocfs2_super *osb,
  667. struct ocfs2_extent_tree *et)
  668. {
  669. int retval;
  670. struct ocfs2_extent_list *el = NULL;
  671. struct ocfs2_extent_block *eb;
  672. struct buffer_head *eb_bh = NULL;
  673. u64 last_eb_blk = 0;
  674. el = et->et_root_el;
  675. last_eb_blk = ocfs2_et_get_last_eb_blk(et);
  676. if (last_eb_blk) {
  677. retval = ocfs2_read_extent_block(et->et_ci, last_eb_blk,
  678. &eb_bh);
  679. if (retval < 0) {
  680. mlog_errno(retval);
  681. goto bail;
  682. }
  683. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  684. el = &eb->h_list;
  685. }
  686. BUG_ON(el->l_tree_depth != 0);
  687. retval = le16_to_cpu(el->l_count) - le16_to_cpu(el->l_next_free_rec);
  688. bail:
  689. brelse(eb_bh);
  690. trace_ocfs2_num_free_extents(retval);
  691. return retval;
  692. }
  693. static int ocfs2_create_new_meta_bhs(handle_t *handle,
  694. struct ocfs2_extent_tree *et,
  695. int wanted,
  696. struct ocfs2_alloc_context *meta_ac,
  697. struct buffer_head *bhs[])
  698. {
  699. int count, status, i;
  700. u16 suballoc_bit_start;
  701. u32 num_got;
  702. u64 suballoc_loc, first_blkno;
  703. struct ocfs2_super *osb =
  704. OCFS2_SB(ocfs2_metadata_cache_get_super(et->et_ci));
  705. struct ocfs2_extent_block *eb;
  706. count = 0;
  707. while (count < wanted) {
  708. status = ocfs2_claim_metadata(handle,
  709. meta_ac,
  710. wanted - count,
  711. &suballoc_loc,
  712. &suballoc_bit_start,
  713. &num_got,
  714. &first_blkno);
  715. if (status < 0) {
  716. mlog_errno(status);
  717. goto bail;
  718. }
  719. for(i = count; i < (num_got + count); i++) {
  720. bhs[i] = sb_getblk(osb->sb, first_blkno);
  721. if (bhs[i] == NULL) {
  722. status = -EIO;
  723. mlog_errno(status);
  724. goto bail;
  725. }
  726. ocfs2_set_new_buffer_uptodate(et->et_ci, bhs[i]);
  727. status = ocfs2_journal_access_eb(handle, et->et_ci,
  728. bhs[i],
  729. OCFS2_JOURNAL_ACCESS_CREATE);
  730. if (status < 0) {
  731. mlog_errno(status);
  732. goto bail;
  733. }
  734. memset(bhs[i]->b_data, 0, osb->sb->s_blocksize);
  735. eb = (struct ocfs2_extent_block *) bhs[i]->b_data;
  736. strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
  737. eb->h_blkno = cpu_to_le64(first_blkno);
  738. eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
  739. eb->h_suballoc_slot =
  740. cpu_to_le16(meta_ac->ac_alloc_slot);
  741. eb->h_suballoc_loc = cpu_to_le64(suballoc_loc);
  742. eb->h_suballoc_bit = cpu_to_le16(suballoc_bit_start);
  743. eb->h_list.l_count =
  744. cpu_to_le16(ocfs2_extent_recs_per_eb(osb->sb));
  745. suballoc_bit_start++;
  746. first_blkno++;
  747. ocfs2_journal_dirty(handle, bhs[i]);
  748. }
  749. count += num_got;
  750. }
  751. status = 0;
  752. bail:
  753. if (status < 0) {
  754. for(i = 0; i < wanted; i++) {
  755. brelse(bhs[i]);
  756. bhs[i] = NULL;
  757. }
  758. mlog_errno(status);
  759. }
  760. return status;
  761. }
  762. static inline u32 ocfs2_sum_rightmost_rec(struct ocfs2_extent_list *el)
  763. {
  764. int i;
  765. i = le16_to_cpu(el->l_next_free_rec) - 1;
  766. return le32_to_cpu(el->l_recs[i].e_cpos) +
  767. ocfs2_rec_clusters(el, &el->l_recs[i]);
  768. }
  769. static int ocfs2_adjust_rightmost_branch(handle_t *handle,
  770. struct ocfs2_extent_tree *et)
  771. {
  772. int status;
  773. struct ocfs2_path *path = NULL;
  774. struct ocfs2_extent_list *el;
  775. struct ocfs2_extent_rec *rec;
  776. path = ocfs2_new_path_from_et(et);
  777. if (!path) {
  778. status = -ENOMEM;
  779. return status;
  780. }
  781. status = ocfs2_find_path(et->et_ci, path, UINT_MAX);
  782. if (status < 0) {
  783. mlog_errno(status);
  784. goto out;
  785. }
  786. status = ocfs2_extend_trans(handle, path_num_items(path));
  787. if (status < 0) {
  788. mlog_errno(status);
  789. goto out;
  790. }
  791. status = ocfs2_journal_access_path(et->et_ci, handle, path);
  792. if (status < 0) {
  793. mlog_errno(status);
  794. goto out;
  795. }
  796. el = path_leaf_el(path);
  797. rec = &el->l_recs[le16_to_cpu(el->l_next_free_rec) - 1];
  798. ocfs2_adjust_rightmost_records(handle, et, path, rec);
  799. out:
  800. ocfs2_free_path(path);
  801. return status;
  802. }
  803. static int ocfs2_add_branch(handle_t *handle,
  804. struct ocfs2_extent_tree *et,
  805. struct buffer_head *eb_bh,
  806. struct buffer_head **last_eb_bh,
  807. struct ocfs2_alloc_context *meta_ac)
  808. {
  809. int status, new_blocks, i;
  810. u64 next_blkno, new_last_eb_blk;
  811. struct buffer_head *bh;
  812. struct buffer_head **new_eb_bhs = NULL;
  813. struct ocfs2_extent_block *eb;
  814. struct ocfs2_extent_list *eb_el;
  815. struct ocfs2_extent_list *el;
  816. u32 new_cpos, root_end;
  817. BUG_ON(!last_eb_bh || !*last_eb_bh);
  818. if (eb_bh) {
  819. eb = (struct ocfs2_extent_block *) eb_bh->b_data;
  820. el = &eb->h_list;
  821. } else
  822. el = et->et_root_el;
  823. BUG_ON(!el->l_tree_depth);
  824. new_blocks = le16_to_cpu(el->l_tree_depth);
  825. eb = (struct ocfs2_extent_block *)(*last_eb_bh)->b_data;
  826. new_cpos = ocfs2_sum_rightmost_rec(&eb->h_list);
  827. root_end = ocfs2_sum_rightmost_rec(et->et_root_el);
  828. if (root_end > new_cpos) {
  829. trace_ocfs2_adjust_rightmost_branch(
  830. (unsigned long long)
  831. ocfs2_metadata_cache_owner(et->et_ci),
  832. root_end, new_cpos);
  833. status = ocfs2_adjust_rightmost_branch(handle, et);
  834. if (status) {
  835. mlog_errno(status);
  836. goto bail;
  837. }
  838. }
  839. new_eb_bhs = kcalloc(new_blocks, sizeof(struct buffer_head *),
  840. GFP_KERNEL);
  841. if (!new_eb_bhs) {
  842. status = -ENOMEM;
  843. mlog_errno(status);
  844. goto bail;
  845. }
  846. status = ocfs2_create_new_meta_bhs(handle, et, new_blocks,
  847. meta_ac, new_eb_bhs);
  848. if (status < 0) {
  849. mlog_errno(status);
  850. goto bail;
  851. }
  852. next_blkno = new_last_eb_blk = 0;
  853. for(i = 0; i < new_blocks; i++) {
  854. bh = new_eb_bhs[i];
  855. eb = (struct ocfs2_extent_block *) bh->b_data;
  856. BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb));
  857. eb_el = &eb->h_list;
  858. status = ocfs2_journal_access_eb(handle, et->et_ci, bh,
  859. OCFS2_JOURNAL_ACCESS_CREATE);
  860. if (status < 0) {
  861. mlog_errno(status);
  862. goto bail;
  863. }
  864. eb->h_next_leaf_blk = 0;
  865. eb_el->l_tree_depth = cpu_to_le16(i);
  866. eb_el->l_next_free_rec = cpu_to_le16(1);
  867. eb_el->l_recs[0].e_cpos = cpu_to_le32(new_cpos);
  868. eb_el->l_recs[0].e_blkno = cpu_to_le64(next_blkno);
  869. eb_el->l_recs[0].e_int_clusters = cpu_to_le32(0);
  870. if (!eb_el->l_tree_depth)
  871. new_last_eb_blk = le64_to_cpu(eb->h_blkno);
  872. ocfs2_journal_dirty(handle, bh);
  873. next_blkno = le64_to_cpu(eb->h_blkno);
  874. }
  875. status = ocfs2_journal_access_eb(handle, et->et_ci, *last_eb_bh,
  876. OCFS2_JOURNAL_ACCESS_WRITE);
  877. if (status < 0) {
  878. mlog_errno(status);
  879. goto bail;
  880. }
  881. status = ocfs2_et_root_journal_access(handle, et,
  882. OCFS2_JOURNAL_ACCESS_WRITE);
  883. if (status < 0) {
  884. mlog_errno(status);
  885. goto bail;
  886. }
  887. if (eb_bh) {
  888. status = ocfs2_journal_access_eb(handle, et->et_ci, eb_bh,
  889. OCFS2_JOURNAL_ACCESS_WRITE);
  890. if (status < 0) {
  891. mlog_errno(status);
  892. goto bail;
  893. }
  894. }
  895. i = le16_to_cpu(el->l_next_free_rec);
  896. el->l_recs[i].e_blkno = cpu_to_le64(next_blkno);
  897. el->l_recs[i].e_cpos = cpu_to_le32(new_cpos);
  898. el->l_recs[i].e_int_clusters = 0;
  899. le16_add_cpu(&el->l_next_free_rec, 1);
  900. ocfs2_et_set_last_eb_blk(et, new_last_eb_blk);
  901. eb = (struct ocfs2_extent_block *) (*last_eb_bh)->b_data;
  902. eb->h_next_leaf_blk = cpu_to_le64(new_last_eb_blk);
  903. ocfs2_journal_dirty(handle, *last_eb_bh);
  904. ocfs2_journal_dirty(handle, et->et_root_bh);
  905. if (eb_bh)
  906. ocfs2_journal_dirty(handle, eb_bh);
  907. brelse(*last_eb_bh);
  908. get_bh(new_eb_bhs[0]);
  909. *last_eb_bh = new_eb_bhs[0];
  910. status = 0;
  911. bail:
  912. if (new_eb_bhs) {
  913. for (i = 0; i < new_blocks; i++)
  914. brelse(new_eb_bhs[i]);
  915. kfree(new_eb_bhs);
  916. }
  917. return status;
  918. }
  919. static int ocfs2_shift_tree_depth(handle_t *handle,
  920. struct ocfs2_extent_tree *et,
  921. struct ocfs2_alloc_context *meta_ac,
  922. struct buffer_head **ret_new_eb_bh)
  923. {
  924. int status, i;
  925. u32 new_clusters;
  926. struct buffer_head *new_eb_bh = NULL;
  927. struct ocfs2_extent_block *eb;
  928. struct ocfs2_extent_list *root_el;
  929. struct ocfs2_extent_list *eb_el;
  930. status = ocfs2_create_new_meta_bhs(handle, et, 1, meta_ac,
  931. &new_eb_bh);
  932. if (status < 0) {
  933. mlog_errno(status);
  934. goto bail;
  935. }
  936. eb = (struct ocfs2_extent_block *) new_eb_bh->b_data;
  937. BUG_ON(!OCFS2_IS_VALID_EXTENT_BLOCK(eb));
  938. eb_el = &eb->h_list;
  939. root_el = et->et_root_el;
  940. status = ocfs2_journal_access_eb(handle, et->et_ci, new_eb_bh,
  941. OCFS2_JOURNAL_ACCESS_CREATE);
  942. if (status < 0) {
  943. mlog_errno(status);
  944. goto bail;
  945. }
  946. eb_el->l_tree_depth = root_el->l_tree_depth;
  947. eb_el->l_next_free_rec = root_el->l_next_free_rec;
  948. for (i = 0; i < le16_to_cpu(root_el->l_next_free_rec); i++)
  949. eb_el->l_recs[i] = root_el->l_recs[i];
  950. ocfs2_journal_dirty(handle, new_eb_bh);
  951. status = ocfs2_et_root_journal_access(handle, et,
  952. OCFS2_JOURNAL_ACCESS_WRITE);
  953. if (status < 0) {
  954. mlog_errno(status);
  955. goto bail;
  956. }
  957. new_clusters = ocfs2_sum_rightmost_rec(eb_el);
  958. le16_add_cpu(&root_el->l_tree_depth, 1);
  959. root_el->l_recs[0].e_cpos = 0;
  960. root_el->l_recs[0].e_blkno = eb->h_blkno;
  961. root_el->l_recs[0].e_int_clusters = cpu_to_le32(new_clusters);
  962. for (i = 1; i < le16_to_cpu(root_el->l_next_free_rec); i++)
  963. memset(&root_el->l_recs[i], 0, sizeof(struct ocfs2_extent_rec));
  964. root_el->l_next_free_rec = cpu_to_le16(1);
  965. if (root_el->l_tree_depth == cpu_to_le16(1))
  966. ocfs2_et_set_last_eb_blk(et, le64_to_cpu(eb->h_blkno));
  967. ocfs2_journal_dirty(handle, et->et_root_bh);
  968. *ret_new_eb_bh = new_eb_bh;
  969. new_eb_bh = NULL;
  970. status = 0;
  971. bail:
  972. brelse(new_eb_bh);
  973. return status;
  974. }
  975. static int ocfs2_find_branch_target(struct ocfs2_extent_tree *et,
  976. struct buffer_head **target_bh)
  977. {
  978. int status = 0, i;
  979. u64 blkno;
  980. struct ocfs2_extent_block *eb;
  981. struct ocfs2_extent_list *el;
  982. struct buffer_head *bh = NULL;
  983. struct buffer_head *lowest_bh = NULL;
  984. *target_bh = NULL;
  985. el = et->et_root_el;
  986. while(le16_to_cpu(el->l_tree_depth) > 1) {
  987. if (le16_to_cpu(el->l_next_free_rec) == 0) {
  988. ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
  989. "Owner %llu has empty "
  990. "extent list (next_free_rec == 0)",
  991. (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci));
  992. status = -EIO;
  993. goto bail;
  994. }
  995. i = le16_to_cpu(el->l_next_free_rec) - 1;
  996. blkno = le64_to_cpu(el->l_recs[i].e_blkno);
  997. if (!blkno) {
  998. ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
  999. "Owner %llu has extent "
  1000. "list where extent # %d has no physical "
  1001. "block start",
  1002. (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci), i);
  1003. status = -EIO;
  1004. goto bail;
  1005. }
  1006. brelse(bh);
  1007. bh = NULL;
  1008. status = ocfs2_read_extent_block(et->et_ci, blkno, &bh);
  1009. if (status < 0) {
  1010. mlog_errno(status);
  1011. goto bail;
  1012. }
  1013. eb = (struct ocfs2_extent_block *) bh->b_data;
  1014. el = &eb->h_list;
  1015. if (le16_to_cpu(el->l_next_free_rec) <
  1016. le16_to_cpu(el->l_count)) {
  1017. brelse(lowest_bh);
  1018. lowest_bh = bh;
  1019. get_bh(lowest_bh);
  1020. }
  1021. }
  1022. el = et->et_root_el;
  1023. if (!lowest_bh && (el->l_next_free_rec == el->l_count))
  1024. status = 1;
  1025. *target_bh = lowest_bh;
  1026. bail:
  1027. brelse(bh);
  1028. return status;
  1029. }
  1030. static int ocfs2_grow_tree(handle_t *handle, struct ocfs2_extent_tree *et,
  1031. int *final_depth, struct buffer_head **last_eb_bh,
  1032. struct ocfs2_alloc_context *meta_ac)
  1033. {
  1034. int ret, shift;
  1035. struct ocfs2_extent_list *el = et->et_root_el;
  1036. int depth = le16_to_cpu(el->l_tree_depth);
  1037. struct buffer_head *bh = NULL;
  1038. BUG_ON(meta_ac == NULL);
  1039. shift = ocfs2_find_branch_target(et, &bh);
  1040. if (shift < 0) {
  1041. ret = shift;
  1042. mlog_errno(ret);
  1043. goto out;
  1044. }
  1045. if (shift) {
  1046. BUG_ON(bh);
  1047. trace_ocfs2_grow_tree(
  1048. (unsigned long long)
  1049. ocfs2_metadata_cache_owner(et->et_ci),
  1050. depth);
  1051. ret = ocfs2_shift_tree_depth(handle, et, meta_ac, &bh);
  1052. if (ret < 0) {
  1053. mlog_errno(ret);
  1054. goto out;
  1055. }
  1056. depth++;
  1057. if (depth == 1) {
  1058. BUG_ON(*last_eb_bh);
  1059. get_bh(bh);
  1060. *last_eb_bh = bh;
  1061. goto out;
  1062. }
  1063. }
  1064. ret = ocfs2_add_branch(handle, et, bh, last_eb_bh,
  1065. meta_ac);
  1066. if (ret < 0) {
  1067. mlog_errno(ret);
  1068. goto out;
  1069. }
  1070. out:
  1071. if (final_depth)
  1072. *final_depth = depth;
  1073. brelse(bh);
  1074. return ret;
  1075. }
  1076. static void ocfs2_shift_records_right(struct ocfs2_extent_list *el)
  1077. {
  1078. int next_free = le16_to_cpu(el->l_next_free_rec);
  1079. int count = le16_to_cpu(el->l_count);
  1080. unsigned int num_bytes;
  1081. BUG_ON(!next_free);
  1082. BUG_ON(next_free >= count);
  1083. num_bytes = sizeof(struct ocfs2_extent_rec) * next_free;
  1084. memmove(&el->l_recs[1], &el->l_recs[0], num_bytes);
  1085. }
  1086. static void ocfs2_rotate_leaf(struct ocfs2_extent_list *el,
  1087. struct ocfs2_extent_rec *insert_rec)
  1088. {
  1089. int i, insert_index, next_free, has_empty, num_bytes;
  1090. u32 insert_cpos = le32_to_cpu(insert_rec->e_cpos);
  1091. struct ocfs2_extent_rec *rec;
  1092. next_free = le16_to_cpu(el->l_next_free_rec);
  1093. has_empty = ocfs2_is_empty_extent(&el->l_recs[0]);
  1094. BUG_ON(!next_free);
  1095. BUG_ON(el->l_next_free_rec == el->l_count && !has_empty);
  1096. if (has_empty) {
  1097. for(i = 0; i < (next_free - 1); i++)
  1098. el->l_recs[i] = el->l_recs[i+1];
  1099. next_free--;
  1100. }
  1101. for(i = 0; i < next_free; i++) {
  1102. rec = &el->l_recs[i];
  1103. if (insert_cpos < le32_to_cpu(rec->e_cpos))
  1104. break;
  1105. }
  1106. insert_index = i;
  1107. trace_ocfs2_rotate_leaf(insert_cpos, insert_index,
  1108. has_empty, next_free,
  1109. le16_to_cpu(el->l_count));
  1110. BUG_ON(insert_index < 0);
  1111. BUG_ON(insert_index >= le16_to_cpu(el->l_count));
  1112. BUG_ON(insert_index > next_free);
  1113. if (insert_index != next_free) {
  1114. BUG_ON(next_free >= le16_to_cpu(el->l_count));
  1115. num_bytes = next_free - insert_index;
  1116. num_bytes *= sizeof(struct ocfs2_extent_rec);
  1117. memmove(&el->l_recs[insert_index + 1],
  1118. &el->l_recs[insert_index],
  1119. num_bytes);
  1120. }
  1121. next_free++;
  1122. el->l_next_free_rec = cpu_to_le16(next_free);
  1123. BUG_ON(le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count));
  1124. el->l_recs[insert_index] = *insert_rec;
  1125. }
  1126. static void ocfs2_remove_empty_extent(struct ocfs2_extent_list *el)
  1127. {
  1128. int size, num_recs = le16_to_cpu(el->l_next_free_rec);
  1129. BUG_ON(num_recs == 0);
  1130. if (ocfs2_is_empty_extent(&el->l_recs[0])) {
  1131. num_recs--;
  1132. size = num_recs * sizeof(struct ocfs2_extent_rec);
  1133. memmove(&el->l_recs[0], &el->l_recs[1], size);
  1134. memset(&el->l_recs[num_recs], 0,
  1135. sizeof(struct ocfs2_extent_rec));
  1136. el->l_next_free_rec = cpu_to_le16(num_recs);
  1137. }
  1138. }
  1139. static void ocfs2_create_empty_extent(struct ocfs2_extent_list *el)
  1140. {
  1141. int next_free = le16_to_cpu(el->l_next_free_rec);
  1142. BUG_ON(le16_to_cpu(el->l_tree_depth) != 0);
  1143. if (next_free == 0)
  1144. goto set_and_inc;
  1145. if (ocfs2_is_empty_extent(&el->l_recs[0]))
  1146. return;
  1147. mlog_bug_on_msg(el->l_count == el->l_next_free_rec,
  1148. "Asked to create an empty extent in a full list:\n"
  1149. "count = %u, tree depth = %u",
  1150. le16_to_cpu(el->l_count),
  1151. le16_to_cpu(el->l_tree_depth));
  1152. ocfs2_shift_records_right(el);
  1153. set_and_inc:
  1154. le16_add_cpu(&el->l_next_free_rec, 1);
  1155. memset(&el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
  1156. }
  1157. int ocfs2_find_subtree_root(struct ocfs2_extent_tree *et,
  1158. struct ocfs2_path *left,
  1159. struct ocfs2_path *right)
  1160. {
  1161. int i = 0;
  1162. BUG_ON(path_root_bh(left) != path_root_bh(right));
  1163. do {
  1164. i++;
  1165. mlog_bug_on_msg(i > left->p_tree_depth,
  1166. "Owner %llu, left depth %u, right depth %u\n"
  1167. "left leaf blk %llu, right leaf blk %llu\n",
  1168. (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
  1169. left->p_tree_depth, right->p_tree_depth,
  1170. (unsigned long long)path_leaf_bh(left)->b_blocknr,
  1171. (unsigned long long)path_leaf_bh(right)->b_blocknr);
  1172. } while (left->p_node[i].bh->b_blocknr ==
  1173. right->p_node[i].bh->b_blocknr);
  1174. return i - 1;
  1175. }
  1176. typedef void (path_insert_t)(void *, struct buffer_head *);
  1177. static int __ocfs2_find_path(struct ocfs2_caching_info *ci,
  1178. struct ocfs2_extent_list *root_el, u32 cpos,
  1179. path_insert_t *func, void *data)
  1180. {
  1181. int i, ret = 0;
  1182. u32 range;
  1183. u64 blkno;
  1184. struct buffer_head *bh = NULL;
  1185. struct ocfs2_extent_block *eb;
  1186. struct ocfs2_extent_list *el;
  1187. struct ocfs2_extent_rec *rec;
  1188. el = root_el;
  1189. while (el->l_tree_depth) {
  1190. if (le16_to_cpu(el->l_next_free_rec) == 0) {
  1191. ocfs2_error(ocfs2_metadata_cache_get_super(ci),
  1192. "Owner %llu has empty extent list at "
  1193. "depth %u\n",
  1194. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1195. le16_to_cpu(el->l_tree_depth));
  1196. ret = -EROFS;
  1197. goto out;
  1198. }
  1199. for(i = 0; i < le16_to_cpu(el->l_next_free_rec) - 1; i++) {
  1200. rec = &el->l_recs[i];
  1201. range = le32_to_cpu(rec->e_cpos) +
  1202. ocfs2_rec_clusters(el, rec);
  1203. if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
  1204. break;
  1205. }
  1206. blkno = le64_to_cpu(el->l_recs[i].e_blkno);
  1207. if (blkno == 0) {
  1208. ocfs2_error(ocfs2_metadata_cache_get_super(ci),
  1209. "Owner %llu has bad blkno in extent list "
  1210. "at depth %u (index %d)\n",
  1211. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1212. le16_to_cpu(el->l_tree_depth), i);
  1213. ret = -EROFS;
  1214. goto out;
  1215. }
  1216. brelse(bh);
  1217. bh = NULL;
  1218. ret = ocfs2_read_extent_block(ci, blkno, &bh);
  1219. if (ret) {
  1220. mlog_errno(ret);
  1221. goto out;
  1222. }
  1223. eb = (struct ocfs2_extent_block *) bh->b_data;
  1224. el = &eb->h_list;
  1225. if (le16_to_cpu(el->l_next_free_rec) >
  1226. le16_to_cpu(el->l_count)) {
  1227. ocfs2_error(ocfs2_metadata_cache_get_super(ci),
  1228. "Owner %llu has bad count in extent list "
  1229. "at block %llu (next free=%u, count=%u)\n",
  1230. (unsigned long long)ocfs2_metadata_cache_owner(ci),
  1231. (unsigned long long)bh->b_blocknr,
  1232. le16_to_cpu(el->l_next_free_rec),
  1233. le16_to_cpu(el->l_count));
  1234. ret = -EROFS;
  1235. goto out;
  1236. }
  1237. if (func)
  1238. func(data, bh);
  1239. }
  1240. out:
  1241. brelse(bh);
  1242. return ret;
  1243. }
  1244. struct find_path_data {
  1245. int index;
  1246. struct ocfs2_path *path;
  1247. };
  1248. static void find_path_ins(void *data, struct buffer_head *bh)
  1249. {
  1250. struct find_path_data *fp = data;
  1251. get_bh(bh);
  1252. ocfs2_path_insert_eb(fp->path, fp->index, bh);
  1253. fp->index++;
  1254. }
  1255. int ocfs2_find_path(struct ocfs2_caching_info *ci,
  1256. struct ocfs2_path *path, u32 cpos)
  1257. {
  1258. struct find_path_data data;
  1259. data.index = 1;
  1260. data.path = path;
  1261. return __ocfs2_find_path(ci, path_root_el(path), cpos,
  1262. find_path_ins, &data);
  1263. }
  1264. static void find_leaf_ins(void *data, struct buffer_head *bh)
  1265. {
  1266. struct ocfs2_extent_block *eb =(struct ocfs2_extent_block *)bh->b_data;
  1267. struct ocfs2_extent_list *el = &eb->h_list;
  1268. struct buffer_head **ret = data;
  1269. if (le16_to_cpu(el->l_tree_depth) == 0) {
  1270. get_bh(bh);
  1271. *ret = bh;
  1272. }
  1273. }
  1274. int ocfs2_find_leaf(struct ocfs2_caching_info *ci,
  1275. struct ocfs2_extent_list *root_el, u32 cpos,
  1276. struct buffer_head **leaf_bh)
  1277. {
  1278. int ret;
  1279. struct buffer_head *bh = NULL;
  1280. ret = __ocfs2_find_path(ci, root_el, cpos, find_leaf_ins, &bh);
  1281. if (ret) {
  1282. mlog_errno(ret);
  1283. goto out;
  1284. }
  1285. *leaf_bh = bh;
  1286. out:
  1287. return ret;
  1288. }
  1289. static void ocfs2_adjust_adjacent_records(struct ocfs2_extent_rec *left_rec,
  1290. struct ocfs2_extent_list *left_child_el,
  1291. struct ocfs2_extent_rec *right_rec,
  1292. struct ocfs2_extent_list *right_child_el)
  1293. {
  1294. u32 left_clusters, right_end;
  1295. left_clusters = le32_to_cpu(right_child_el->l_recs[0].e_cpos);
  1296. if (!ocfs2_rec_clusters(right_child_el, &right_child_el->l_recs[0])) {
  1297. BUG_ON(right_child_el->l_tree_depth);
  1298. BUG_ON(le16_to_cpu(right_child_el->l_next_free_rec) <= 1);
  1299. left_clusters = le32_to_cpu(right_child_el->l_recs[1].e_cpos);
  1300. }
  1301. left_clusters -= le32_to_cpu(left_rec->e_cpos);
  1302. left_rec->e_int_clusters = cpu_to_le32(left_clusters);
  1303. right_end = le32_to_cpu(right_rec->e_cpos);
  1304. right_end += le32_to_cpu(right_rec->e_int_clusters);
  1305. right_rec->e_cpos = left_rec->e_cpos;
  1306. le32_add_cpu(&right_rec->e_cpos, left_clusters);
  1307. right_end -= le32_to_cpu(right_rec->e_cpos);
  1308. right_rec->e_int_clusters = cpu_to_le32(right_end);
  1309. }
  1310. static void ocfs2_adjust_root_records(struct ocfs2_extent_list *root_el,
  1311. struct ocfs2_extent_list *left_el,
  1312. struct ocfs2_extent_list *right_el,
  1313. u64 left_el_blkno)
  1314. {
  1315. int i;
  1316. BUG_ON(le16_to_cpu(root_el->l_tree_depth) <=
  1317. le16_to_cpu(left_el->l_tree_depth));
  1318. for(i = 0; i < le16_to_cpu(root_el->l_next_free_rec) - 1; i++) {
  1319. if (le64_to_cpu(root_el->l_recs[i].e_blkno) == left_el_blkno)
  1320. break;
  1321. }
  1322. BUG_ON(i >= (le16_to_cpu(root_el->l_next_free_rec) - 1));
  1323. ocfs2_adjust_adjacent_records(&root_el->l_recs[i], left_el,
  1324. &root_el->l_recs[i + 1], right_el);
  1325. }
  1326. static void ocfs2_complete_edge_insert(handle_t *handle,
  1327. struct ocfs2_path *left_path,
  1328. struct ocfs2_path *right_path,
  1329. int subtree_index)
  1330. {
  1331. int i, idx;
  1332. struct ocfs2_extent_list *el, *left_el, *right_el;
  1333. struct ocfs2_extent_rec *left_rec, *right_rec;
  1334. struct buffer_head *root_bh = left_path->p_node[subtree_index].bh;
  1335. left_el = path_leaf_el(left_path);
  1336. right_el = path_leaf_el(right_path);
  1337. for(i = left_path->p_tree_depth - 1; i > subtree_index; i--) {
  1338. trace_ocfs2_complete_edge_insert(i);
  1339. el = left_path->p_node[i].el;
  1340. idx = le16_to_cpu(left_el->l_next_free_rec) - 1;
  1341. left_rec = &el->l_recs[idx];
  1342. el = right_path->p_node[i].el;
  1343. right_rec = &el->l_recs[0];
  1344. ocfs2_adjust_adjacent_records(left_rec, left_el, right_rec,
  1345. right_el);
  1346. ocfs2_journal_dirty(handle, left_path->p_node[i].bh);
  1347. ocfs2_journal_dirty(handle, right_path->p_node[i].bh);
  1348. left_el = left_path->p_node[i].el;
  1349. right_el = right_path->p_node[i].el;
  1350. }
  1351. el = left_path->p_node[subtree_index].el;
  1352. left_el = left_path->p_node[subtree_index + 1].el;
  1353. right_el = right_path->p_node[subtree_index + 1].el;
  1354. ocfs2_adjust_root_records(el, left_el, right_el,
  1355. left_path->p_node[subtree_index + 1].bh->b_blocknr);
  1356. root_bh = left_path->p_node[subtree_index].bh;
  1357. ocfs2_journal_dirty(handle, root_bh);
  1358. }
  1359. static int ocfs2_rotate_subtree_right(handle_t *handle,
  1360. struct ocfs2_extent_tree *et,
  1361. struct ocfs2_path *left_path,
  1362. struct ocfs2_path *right_path,
  1363. int subtree_index)
  1364. {
  1365. int ret, i;
  1366. struct buffer_head *right_leaf_bh;
  1367. struct buffer_head *left_leaf_bh = NULL;
  1368. struct buffer_head *root_bh;
  1369. struct ocfs2_extent_list *right_el, *left_el;
  1370. struct ocfs2_extent_rec move_rec;
  1371. left_leaf_bh = path_leaf_bh(left_path);
  1372. left_el = path_leaf_el(left_path);
  1373. if (left_el->l_next_free_rec != left_el->l_count) {
  1374. ocfs2_error(ocfs2_metadata_cache_get_super(et->et_ci),
  1375. "Inode %llu has non-full interior leaf node %llu"
  1376. "(next free = %u)",
  1377. (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
  1378. (unsigned long long)left_leaf_bh->b_blocknr,
  1379. le16_to_cpu(left_el->l_next_free_rec));
  1380. return -EROFS;
  1381. }
  1382. if (ocfs2_is_empty_extent(&left_el->l_recs[0]))
  1383. return 0;
  1384. root_bh = left_path->p_node[subtree_index].bh;
  1385. BUG_ON(root_bh != right_path->p_node[subtree_index].bh);
  1386. ret = ocfs2_path_bh_journal_access(handle, et->et_ci, right_path,
  1387. subtree_index);
  1388. if (ret) {
  1389. mlog_errno(ret);
  1390. goto out;
  1391. }
  1392. for(i = subtree_index + 1; i < path_num_items(right_path); i++) {
  1393. ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
  1394. right_path, i);
  1395. if (ret) {
  1396. mlog_errno(ret);
  1397. goto out;
  1398. }
  1399. ret = ocfs2_path_bh_journal_access(handle, et->et_ci,
  1400. left_path, i);
  1401. if (ret) {
  1402. mlog_errno(ret);
  1403. goto out;
  1404. }
  1405. }
  1406. right_leaf_bh = path_leaf_bh(right_path);
  1407. right_el = path_leaf_el(right_path);
  1408. mlog_bug_on_msg(!right_el->l_next_free_rec, "Inode %llu: Rotate fails "
  1409. "because rightmost leaf block %llu is empty\n",
  1410. (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
  1411. (unsigned long long)right_leaf_bh->b_blocknr);
  1412. ocfs2_create_empty_extent(right_el);
  1413. ocfs2_journal_dirty(handle, right_leaf_bh);
  1414. i = le16_to_cpu(left_el->l_next_free_rec) - 1;
  1415. move_rec = left_el->l_recs[i];
  1416. right_el->l_recs[0] = move_rec;
  1417. le16_add_cpu(&left_el->l_next_free_rec, -1);
  1418. ocfs2_shift_records_right(left_el);
  1419. memset(&left_el->l_recs[0], 0, sizeof(struct ocfs2_extent_rec));
  1420. le16_add_cpu(&left_el->l_next_free_rec, 1);
  1421. ocfs2_journal_dirty(handle, left_leaf_bh);
  1422. ocfs2_complete_edge_insert(handle, left_path, right_path,
  1423. subtree_index);
  1424. out:
  1425. return ret;
  1426. }
  1427. int ocfs2_find_cpos_for_left_leaf(struct super_block *sb,
  1428. struct ocfs2_path *path, u32 *cpos)
  1429. {
  1430. int i, j, ret = 0;
  1431. u64 blkno;
  1432. struct ocfs2_extent_list *el;
  1433. BUG_ON(path->p_tree_depth == 0);
  1434. *cpos = 0;
  1435. blkno = path_leaf_bh(path)->b_blocknr;
  1436. i = path->p_tree_depth - 1;
  1437. while (i >= 0) {
  1438. el = path->p_node[i].el;
  1439. for(j = 0; j < le16_to_cpu(el->l_next_free_rec); j++) {
  1440. if (le64_to_cpu(el->l_recs[j].e_blkno) == blkno) {
  1441. if (j == 0) {
  1442. if (i == 0) {
  1443. goto out;
  1444. }
  1445. goto next_node;
  1446. }
  1447. *cpos = le32_to_cpu(el->l_recs[j - 1].e_cpos);
  1448. *cpos = *cpos + ocfs2_rec_clusters(el,
  1449. &el->l_recs[j - 1]);
  1450. *cpos = *cpos - 1;
  1451. goto out;
  1452. }
  1453. }
  1454. ocfs2_error(sb,
  1455. "Invalid extent tree at extent block %llu\n",
  1456. (unsigned long long)blkno);
  1457. ret = -EROFS;
  1458. goto out;
  1459. next_node:
  1460. blkno = path->p_node[i].bh->b_blocknr;
  1461. i--;
  1462. }
  1463. out:
  1464. return ret;
  1465. }
  1466. static int ocfs2_extend_rotate_transaction(handle_t *handle, int subtree_depth,
  1467. int op_credits,
  1468. struct ocfs2_path *path)
  1469. {
  1470. int ret = 0;
  1471. int credits = (path->p_tree_depth - subtree_depth) * 2 + 1 + op_credits;
  1472. if (handle->h_buffer_credits < credits)
  1473. ret = ocfs2_extend_trans(handle,
  1474. credits - handle->h_buffer_credits);
  1475. return ret;
  1476. }
  1477. static int ocfs2_rotate_requires_path_adjustment(struct ocfs2_path *left_path,
  1478. u32 insert_cpos)
  1479. {
  1480. struct ocfs2_extent_list *left_el;
  1481. struct ocfs2_extent_rec *rec;
  1482. int next_free;
  1483. left_el = path_leaf_el(left_path);
  1484. next_free = le16_to_cpu(left_el->l_next_free_rec);
  1485. rec = &left_el->l_recs[next_free - 1];
  1486. if (insert_cpos > le32_to_cpu(rec->e_cpos))
  1487. return 1;
  1488. return 0;
  1489. }
  1490. static int ocfs2_leftmost_rec_contains(struct ocfs2_extent_list *el, u32 cpos)
  1491. {
  1492. int next_free = le16_to_cpu(el->l_next_free_rec);
  1493. unsigned int range;
  1494. struct ocfs2_extent_rec *rec;
  1495. if (next_free == 0)
  1496. return 0;
  1497. rec = &el->l_recs[0];
  1498. if (ocfs2_is_empty_extent(rec)) {
  1499. if (next_free == 1)
  1500. return 0;
  1501. rec = &el->l_recs[1];
  1502. }
  1503. range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
  1504. if (cpos >= le32_to_cpu(rec->e_cpos) && cpos < range)
  1505. return 1;
  1506. return 0;
  1507. }
  1508. static int ocfs2_rotate_tree_right(handle_t *handle,
  1509. struct ocfs2_extent_tree *et,
  1510. enum ocfs2_split_type split,
  1511. u32 insert_cpos,
  1512. struct ocfs2_path *right_path,
  1513. struct ocfs2_path **ret_left_path)
  1514. {
  1515. int ret, start, orig_credits = handle->h_buffer_credits;
  1516. u32 cpos;
  1517. struct ocfs2_path *left_path = NULL;
  1518. struct super_block *sb = ocfs2_metadata_cache_get_super(et->et_ci);
  1519. *ret_left_path = NULL;
  1520. left_path = ocfs2_new_path_from_path(right_path);
  1521. if (!left_path) {
  1522. ret = -ENOMEM;
  1523. mlog_errno(ret);
  1524. goto out;
  1525. }
  1526. ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos);
  1527. if (ret) {
  1528. mlog_errno(ret);
  1529. goto out;
  1530. }
  1531. trace_ocfs2_rotate_tree_right(
  1532. (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
  1533. insert_cpos, cpos);
  1534. while (cpos && insert_cpos <= cpos) {
  1535. trace_ocfs2_rotate_tree_right(
  1536. (unsigned long long)
  1537. ocfs2_metadata_cache_owner(et->et_ci),
  1538. insert_cpos, cpos);
  1539. ret = ocfs2_find_path(et->et_ci, left_path, cpos);
  1540. if (ret) {
  1541. mlog_errno(ret);
  1542. goto out;
  1543. }
  1544. mlog_bug_on_msg(path_leaf_bh(left_path) ==
  1545. path_leaf_bh(right_path),
  1546. "Owner %llu: error during insert of %u "
  1547. "(left path cpos %u) results in two identical "
  1548. "paths ending at %llu\n",
  1549. (unsigned long long)ocfs2_metadata_cache_owner(et->et_ci),
  1550. insert_cpos, cpos,
  1551. (unsigned long long)
  1552. path_leaf_bh(left_path)->b_blocknr);
  1553. if (split == SPLIT_NONE &&
  1554. ocfs2_rotate_requires_path_adjustment(left_path,
  1555. insert_cpos)) {
  1556. *ret_left_path = left_path;
  1557. goto out_ret_path;
  1558. }
  1559. start = ocfs2_find_subtree_root(et, left_path, right_path);
  1560. trace_ocfs2_rotate_subtree(start,
  1561. (unsigned long long)
  1562. right_path->p_node[start].bh->b_blocknr,
  1563. right_path->p_tree_depth);
  1564. ret = ocfs2_extend_rotate_transaction(handle, start,
  1565. orig_credits, right_path);
  1566. if (ret) {
  1567. mlog_errno(ret);
  1568. goto out;
  1569. }
  1570. ret = ocfs2_rotate_subtree_right(handle, et, left_path,
  1571. right_path, start);
  1572. if (ret) {
  1573. mlog_errno(ret);
  1574. goto out;
  1575. }
  1576. if (split != SPLIT_NONE &&
  1577. ocfs2_leftmost_rec_contains(path_leaf_el(right_path),
  1578. insert_cpos)) {
  1579. *ret_left_path = left_path;
  1580. goto out_ret_path;
  1581. }
  1582. ocfs2_mv_path(right_path, left_path);
  1583. ret = ocfs2_find_cpos_for_left_leaf(sb, right_path, &cpos);
  1584. if (ret) {
  1585. mlog_errno(ret);
  1586. goto out;
  1587. }
  1588. }
  1589. out:
  1590. ocfs2_free_path(left_path);
  1591. out_ret_path:
  1592. return ret;
  1593. }
  1594. static int ocfs2_update_edge_lengths(handle

Large files files are truncated, but you can click here to view the full file