PageRenderTime 59ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 1ms

/fs/ext3/namei.c

https://github.com/mstsirkin/linux
C | 1824 lines | 1416 code | 156 blank | 252 comment | 235 complexity | fe7e6466c398b2dbb9ac1ed1b0fe467d MD5 | raw file
  1. /*
  2. * linux/fs/ext3/namei.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/namei.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * Big-endian to little-endian byte-swapping/bitmaps by
  16. * David S. Miller (davem@caip.rutgers.edu), 1995
  17. * Directory entry file type support and forward compatibility hooks
  18. * for B-tree directories by Theodore Ts'o (tytso@mit.edu), 1998
  19. * Hash Tree Directory indexing (c)
  20. * Daniel Phillips, 2001
  21. * Hash Tree Directory indexing porting
  22. * Christopher Li, 2002
  23. * Hash Tree Directory indexing cleanup
  24. * Theodore Ts'o, 2002
  25. */
  26. #include <linux/fs.h>
  27. #include <linux/pagemap.h>
  28. #include <linux/jbd.h>
  29. #include <linux/time.h>
  30. #include <linux/ext3_fs.h>
  31. #include <linux/ext3_jbd.h>
  32. #include <linux/fcntl.h>
  33. #include <linux/stat.h>
  34. #include <linux/string.h>
  35. #include <linux/quotaops.h>
  36. #include <linux/buffer_head.h>
  37. #include <linux/bio.h>
  38. #include <trace/events/ext3.h>
  39. #include "namei.h"
  40. #include "xattr.h"
  41. #include "acl.h"
  42. /*
  43. * define how far ahead to read directories while searching them.
  44. */
  45. #define NAMEI_RA_CHUNKS 2
  46. #define NAMEI_RA_BLOCKS 4
  47. #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS)
  48. #define NAMEI_RA_INDEX(c,b) (((c) * NAMEI_RA_BLOCKS) + (b))
  49. static struct buffer_head *ext3_append(handle_t *handle,
  50. struct inode *inode,
  51. u32 *block, int *err)
  52. {
  53. struct buffer_head *bh;
  54. *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
  55. bh = ext3_bread(handle, inode, *block, 1, err);
  56. if (bh) {
  57. inode->i_size += inode->i_sb->s_blocksize;
  58. EXT3_I(inode)->i_disksize = inode->i_size;
  59. *err = ext3_journal_get_write_access(handle, bh);
  60. if (*err) {
  61. brelse(bh);
  62. bh = NULL;
  63. }
  64. }
  65. return bh;
  66. }
  67. #ifndef assert
  68. #define assert(test) J_ASSERT(test)
  69. #endif
  70. #ifdef DX_DEBUG
  71. #define dxtrace(command) command
  72. #else
  73. #define dxtrace(command)
  74. #endif
  75. struct fake_dirent
  76. {
  77. __le32 inode;
  78. __le16 rec_len;
  79. u8 name_len;
  80. u8 file_type;
  81. };
  82. struct dx_countlimit
  83. {
  84. __le16 limit;
  85. __le16 count;
  86. };
  87. struct dx_entry
  88. {
  89. __le32 hash;
  90. __le32 block;
  91. };
  92. /*
  93. * dx_root_info is laid out so that if it should somehow get overlaid by a
  94. * dirent the two low bits of the hash version will be zero. Therefore, the
  95. * hash version mod 4 should never be 0. Sincerely, the paranoia department.
  96. */
  97. struct dx_root
  98. {
  99. struct fake_dirent dot;
  100. char dot_name[4];
  101. struct fake_dirent dotdot;
  102. char dotdot_name[4];
  103. struct dx_root_info
  104. {
  105. __le32 reserved_zero;
  106. u8 hash_version;
  107. u8 info_length; /* 8 */
  108. u8 indirect_levels;
  109. u8 unused_flags;
  110. }
  111. info;
  112. struct dx_entry entries[0];
  113. };
  114. struct dx_node
  115. {
  116. struct fake_dirent fake;
  117. struct dx_entry entries[0];
  118. };
  119. struct dx_frame
  120. {
  121. struct buffer_head *bh;
  122. struct dx_entry *entries;
  123. struct dx_entry *at;
  124. };
  125. struct dx_map_entry
  126. {
  127. u32 hash;
  128. u16 offs;
  129. u16 size;
  130. };
  131. static inline unsigned dx_get_block (struct dx_entry *entry);
  132. static void dx_set_block (struct dx_entry *entry, unsigned value);
  133. static inline unsigned dx_get_hash (struct dx_entry *entry);
  134. static void dx_set_hash (struct dx_entry *entry, unsigned value);
  135. static unsigned dx_get_count (struct dx_entry *entries);
  136. static unsigned dx_get_limit (struct dx_entry *entries);
  137. static void dx_set_count (struct dx_entry *entries, unsigned value);
  138. static void dx_set_limit (struct dx_entry *entries, unsigned value);
  139. static unsigned dx_root_limit (struct inode *dir, unsigned infosize);
  140. static unsigned dx_node_limit (struct inode *dir);
  141. static struct dx_frame *dx_probe(struct qstr *entry,
  142. struct inode *dir,
  143. struct dx_hash_info *hinfo,
  144. struct dx_frame *frame,
  145. int *err);
  146. static void dx_release (struct dx_frame *frames);
  147. static int dx_make_map(struct ext3_dir_entry_2 *de, unsigned blocksize,
  148. struct dx_hash_info *hinfo, struct dx_map_entry map[]);
  149. static void dx_sort_map(struct dx_map_entry *map, unsigned count);
  150. static struct ext3_dir_entry_2 *dx_move_dirents (char *from, char *to,
  151. struct dx_map_entry *offsets, int count);
  152. static struct ext3_dir_entry_2 *dx_pack_dirents(char *base, unsigned blocksize);
  153. static void dx_insert_block (struct dx_frame *frame, u32 hash, u32 block);
  154. static int ext3_htree_next_block(struct inode *dir, __u32 hash,
  155. struct dx_frame *frame,
  156. struct dx_frame *frames,
  157. __u32 *start_hash);
  158. static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
  159. struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
  160. int *err);
  161. static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
  162. struct inode *inode);
  163. /*
  164. * p is at least 6 bytes before the end of page
  165. */
  166. static inline struct ext3_dir_entry_2 *
  167. ext3_next_entry(struct ext3_dir_entry_2 *p)
  168. {
  169. return (struct ext3_dir_entry_2 *)((char *)p +
  170. ext3_rec_len_from_disk(p->rec_len));
  171. }
  172. /*
  173. * Future: use high four bits of block for coalesce-on-delete flags
  174. * Mask them off for now.
  175. */
  176. static inline unsigned dx_get_block (struct dx_entry *entry)
  177. {
  178. return le32_to_cpu(entry->block) & 0x00ffffff;
  179. }
  180. static inline void dx_set_block (struct dx_entry *entry, unsigned value)
  181. {
  182. entry->block = cpu_to_le32(value);
  183. }
  184. static inline unsigned dx_get_hash (struct dx_entry *entry)
  185. {
  186. return le32_to_cpu(entry->hash);
  187. }
  188. static inline void dx_set_hash (struct dx_entry *entry, unsigned value)
  189. {
  190. entry->hash = cpu_to_le32(value);
  191. }
  192. static inline unsigned dx_get_count (struct dx_entry *entries)
  193. {
  194. return le16_to_cpu(((struct dx_countlimit *) entries)->count);
  195. }
  196. static inline unsigned dx_get_limit (struct dx_entry *entries)
  197. {
  198. return le16_to_cpu(((struct dx_countlimit *) entries)->limit);
  199. }
  200. static inline void dx_set_count (struct dx_entry *entries, unsigned value)
  201. {
  202. ((struct dx_countlimit *) entries)->count = cpu_to_le16(value);
  203. }
  204. static inline void dx_set_limit (struct dx_entry *entries, unsigned value)
  205. {
  206. ((struct dx_countlimit *) entries)->limit = cpu_to_le16(value);
  207. }
  208. static inline unsigned dx_root_limit (struct inode *dir, unsigned infosize)
  209. {
  210. unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(1) -
  211. EXT3_DIR_REC_LEN(2) - infosize;
  212. return entry_space / sizeof(struct dx_entry);
  213. }
  214. static inline unsigned dx_node_limit (struct inode *dir)
  215. {
  216. unsigned entry_space = dir->i_sb->s_blocksize - EXT3_DIR_REC_LEN(0);
  217. return entry_space / sizeof(struct dx_entry);
  218. }
  219. /*
  220. * Debug
  221. */
  222. #ifdef DX_DEBUG
  223. static void dx_show_index (char * label, struct dx_entry *entries)
  224. {
  225. int i, n = dx_get_count (entries);
  226. printk("%s index ", label);
  227. for (i = 0; i < n; i++)
  228. {
  229. printk("%x->%u ", i? dx_get_hash(entries + i): 0, dx_get_block(entries + i));
  230. }
  231. printk("\n");
  232. }
  233. struct stats
  234. {
  235. unsigned names;
  236. unsigned space;
  237. unsigned bcount;
  238. };
  239. static struct stats dx_show_leaf(struct dx_hash_info *hinfo, struct ext3_dir_entry_2 *de,
  240. int size, int show_names)
  241. {
  242. unsigned names = 0, space = 0;
  243. char *base = (char *) de;
  244. struct dx_hash_info h = *hinfo;
  245. printk("names: ");
  246. while ((char *) de < base + size)
  247. {
  248. if (de->inode)
  249. {
  250. if (show_names)
  251. {
  252. int len = de->name_len;
  253. char *name = de->name;
  254. while (len--) printk("%c", *name++);
  255. ext3fs_dirhash(de->name, de->name_len, &h);
  256. printk(":%x.%u ", h.hash,
  257. (unsigned) ((char *) de - base));
  258. }
  259. space += EXT3_DIR_REC_LEN(de->name_len);
  260. names++;
  261. }
  262. de = ext3_next_entry(de);
  263. }
  264. printk("(%i)\n", names);
  265. return (struct stats) { names, space, 1 };
  266. }
  267. struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
  268. struct dx_entry *entries, int levels)
  269. {
  270. unsigned blocksize = dir->i_sb->s_blocksize;
  271. unsigned count = dx_get_count (entries), names = 0, space = 0, i;
  272. unsigned bcount = 0;
  273. struct buffer_head *bh;
  274. int err;
  275. printk("%i indexed blocks...\n", count);
  276. for (i = 0; i < count; i++, entries++)
  277. {
  278. u32 block = dx_get_block(entries), hash = i? dx_get_hash(entries): 0;
  279. u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
  280. struct stats stats;
  281. printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range);
  282. if (!(bh = ext3_bread (NULL,dir, block, 0,&err))) continue;
  283. stats = levels?
  284. dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
  285. dx_show_leaf(hinfo, (struct ext3_dir_entry_2 *) bh->b_data, blocksize, 0);
  286. names += stats.names;
  287. space += stats.space;
  288. bcount += stats.bcount;
  289. brelse (bh);
  290. }
  291. if (bcount)
  292. printk("%snames %u, fullness %u (%u%%)\n", levels?"":" ",
  293. names, space/bcount,(space/bcount)*100/blocksize);
  294. return (struct stats) { names, space, bcount};
  295. }
  296. #endif /* DX_DEBUG */
  297. /*
  298. * Probe for a directory leaf block to search.
  299. *
  300. * dx_probe can return ERR_BAD_DX_DIR, which means there was a format
  301. * error in the directory index, and the caller should fall back to
  302. * searching the directory normally. The callers of dx_probe **MUST**
  303. * check for this error code, and make sure it never gets reflected
  304. * back to userspace.
  305. */
  306. static struct dx_frame *
  307. dx_probe(struct qstr *entry, struct inode *dir,
  308. struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err)
  309. {
  310. unsigned count, indirect;
  311. struct dx_entry *at, *entries, *p, *q, *m;
  312. struct dx_root *root;
  313. struct buffer_head *bh;
  314. struct dx_frame *frame = frame_in;
  315. u32 hash;
  316. frame->bh = NULL;
  317. if (!(bh = ext3_bread (NULL,dir, 0, 0, err)))
  318. goto fail;
  319. root = (struct dx_root *) bh->b_data;
  320. if (root->info.hash_version != DX_HASH_TEA &&
  321. root->info.hash_version != DX_HASH_HALF_MD4 &&
  322. root->info.hash_version != DX_HASH_LEGACY) {
  323. ext3_warning(dir->i_sb, __func__,
  324. "Unrecognised inode hash code %d",
  325. root->info.hash_version);
  326. brelse(bh);
  327. *err = ERR_BAD_DX_DIR;
  328. goto fail;
  329. }
  330. hinfo->hash_version = root->info.hash_version;
  331. if (hinfo->hash_version <= DX_HASH_TEA)
  332. hinfo->hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
  333. hinfo->seed = EXT3_SB(dir->i_sb)->s_hash_seed;
  334. if (entry)
  335. ext3fs_dirhash(entry->name, entry->len, hinfo);
  336. hash = hinfo->hash;
  337. if (root->info.unused_flags & 1) {
  338. ext3_warning(dir->i_sb, __func__,
  339. "Unimplemented inode hash flags: %#06x",
  340. root->info.unused_flags);
  341. brelse(bh);
  342. *err = ERR_BAD_DX_DIR;
  343. goto fail;
  344. }
  345. if ((indirect = root->info.indirect_levels) > 1) {
  346. ext3_warning(dir->i_sb, __func__,
  347. "Unimplemented inode hash depth: %#06x",
  348. root->info.indirect_levels);
  349. brelse(bh);
  350. *err = ERR_BAD_DX_DIR;
  351. goto fail;
  352. }
  353. entries = (struct dx_entry *) (((char *)&root->info) +
  354. root->info.info_length);
  355. if (dx_get_limit(entries) != dx_root_limit(dir,
  356. root->info.info_length)) {
  357. ext3_warning(dir->i_sb, __func__,
  358. "dx entry: limit != root limit");
  359. brelse(bh);
  360. *err = ERR_BAD_DX_DIR;
  361. goto fail;
  362. }
  363. dxtrace (printk("Look up %x", hash));
  364. while (1)
  365. {
  366. count = dx_get_count(entries);
  367. if (!count || count > dx_get_limit(entries)) {
  368. ext3_warning(dir->i_sb, __func__,
  369. "dx entry: no count or count > limit");
  370. brelse(bh);
  371. *err = ERR_BAD_DX_DIR;
  372. goto fail2;
  373. }
  374. p = entries + 1;
  375. q = entries + count - 1;
  376. while (p <= q)
  377. {
  378. m = p + (q - p)/2;
  379. dxtrace(printk("."));
  380. if (dx_get_hash(m) > hash)
  381. q = m - 1;
  382. else
  383. p = m + 1;
  384. }
  385. if (0) // linear search cross check
  386. {
  387. unsigned n = count - 1;
  388. at = entries;
  389. while (n--)
  390. {
  391. dxtrace(printk(","));
  392. if (dx_get_hash(++at) > hash)
  393. {
  394. at--;
  395. break;
  396. }
  397. }
  398. assert (at == p - 1);
  399. }
  400. at = p - 1;
  401. dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
  402. frame->bh = bh;
  403. frame->entries = entries;
  404. frame->at = at;
  405. if (!indirect--) return frame;
  406. if (!(bh = ext3_bread (NULL,dir, dx_get_block(at), 0, err)))
  407. goto fail2;
  408. at = entries = ((struct dx_node *) bh->b_data)->entries;
  409. if (dx_get_limit(entries) != dx_node_limit (dir)) {
  410. ext3_warning(dir->i_sb, __func__,
  411. "dx entry: limit != node limit");
  412. brelse(bh);
  413. *err = ERR_BAD_DX_DIR;
  414. goto fail2;
  415. }
  416. frame++;
  417. frame->bh = NULL;
  418. }
  419. fail2:
  420. while (frame >= frame_in) {
  421. brelse(frame->bh);
  422. frame--;
  423. }
  424. fail:
  425. if (*err == ERR_BAD_DX_DIR)
  426. ext3_warning(dir->i_sb, __func__,
  427. "Corrupt dir inode %ld, running e2fsck is "
  428. "recommended.", dir->i_ino);
  429. return NULL;
  430. }
  431. static void dx_release (struct dx_frame *frames)
  432. {
  433. if (frames[0].bh == NULL)
  434. return;
  435. if (((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels)
  436. brelse(frames[1].bh);
  437. brelse(frames[0].bh);
  438. }
  439. /*
  440. * This function increments the frame pointer to search the next leaf
  441. * block, and reads in the necessary intervening nodes if the search
  442. * should be necessary. Whether or not the search is necessary is
  443. * controlled by the hash parameter. If the hash value is even, then
  444. * the search is only continued if the next block starts with that
  445. * hash value. This is used if we are searching for a specific file.
  446. *
  447. * If the hash value is HASH_NB_ALWAYS, then always go to the next block.
  448. *
  449. * This function returns 1 if the caller should continue to search,
  450. * or 0 if it should not. If there is an error reading one of the
  451. * index blocks, it will a negative error code.
  452. *
  453. * If start_hash is non-null, it will be filled in with the starting
  454. * hash of the next page.
  455. */
  456. static int ext3_htree_next_block(struct inode *dir, __u32 hash,
  457. struct dx_frame *frame,
  458. struct dx_frame *frames,
  459. __u32 *start_hash)
  460. {
  461. struct dx_frame *p;
  462. struct buffer_head *bh;
  463. int err, num_frames = 0;
  464. __u32 bhash;
  465. p = frame;
  466. /*
  467. * Find the next leaf page by incrementing the frame pointer.
  468. * If we run out of entries in the interior node, loop around and
  469. * increment pointer in the parent node. When we break out of
  470. * this loop, num_frames indicates the number of interior
  471. * nodes need to be read.
  472. */
  473. while (1) {
  474. if (++(p->at) < p->entries + dx_get_count(p->entries))
  475. break;
  476. if (p == frames)
  477. return 0;
  478. num_frames++;
  479. p--;
  480. }
  481. /*
  482. * If the hash is 1, then continue only if the next page has a
  483. * continuation hash of any value. This is used for readdir
  484. * handling. Otherwise, check to see if the hash matches the
  485. * desired contiuation hash. If it doesn't, return since
  486. * there's no point to read in the successive index pages.
  487. */
  488. bhash = dx_get_hash(p->at);
  489. if (start_hash)
  490. *start_hash = bhash;
  491. if ((hash & 1) == 0) {
  492. if ((bhash & ~1) != hash)
  493. return 0;
  494. }
  495. /*
  496. * If the hash is HASH_NB_ALWAYS, we always go to the next
  497. * block so no check is necessary
  498. */
  499. while (num_frames--) {
  500. if (!(bh = ext3_bread(NULL, dir, dx_get_block(p->at),
  501. 0, &err)))
  502. return err; /* Failure */
  503. p++;
  504. brelse (p->bh);
  505. p->bh = bh;
  506. p->at = p->entries = ((struct dx_node *) bh->b_data)->entries;
  507. }
  508. return 1;
  509. }
  510. /*
  511. * This function fills a red-black tree with information from a
  512. * directory block. It returns the number directory entries loaded
  513. * into the tree. If there is an error it is returned in err.
  514. */
  515. static int htree_dirblock_to_tree(struct file *dir_file,
  516. struct inode *dir, int block,
  517. struct dx_hash_info *hinfo,
  518. __u32 start_hash, __u32 start_minor_hash)
  519. {
  520. struct buffer_head *bh;
  521. struct ext3_dir_entry_2 *de, *top;
  522. int err, count = 0;
  523. dxtrace(printk("In htree dirblock_to_tree: block %d\n", block));
  524. if (!(bh = ext3_bread (NULL, dir, block, 0, &err)))
  525. return err;
  526. de = (struct ext3_dir_entry_2 *) bh->b_data;
  527. top = (struct ext3_dir_entry_2 *) ((char *) de +
  528. dir->i_sb->s_blocksize -
  529. EXT3_DIR_REC_LEN(0));
  530. for (; de < top; de = ext3_next_entry(de)) {
  531. if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh,
  532. (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb))
  533. +((char *)de - bh->b_data))) {
  534. /* On error, skip the f_pos to the next block. */
  535. dir_file->f_pos = (dir_file->f_pos |
  536. (dir->i_sb->s_blocksize - 1)) + 1;
  537. brelse (bh);
  538. return count;
  539. }
  540. ext3fs_dirhash(de->name, de->name_len, hinfo);
  541. if ((hinfo->hash < start_hash) ||
  542. ((hinfo->hash == start_hash) &&
  543. (hinfo->minor_hash < start_minor_hash)))
  544. continue;
  545. if (de->inode == 0)
  546. continue;
  547. if ((err = ext3_htree_store_dirent(dir_file,
  548. hinfo->hash, hinfo->minor_hash, de)) != 0) {
  549. brelse(bh);
  550. return err;
  551. }
  552. count++;
  553. }
  554. brelse(bh);
  555. return count;
  556. }
  557. /*
  558. * This function fills a red-black tree with information from a
  559. * directory. We start scanning the directory in hash order, starting
  560. * at start_hash and start_minor_hash.
  561. *
  562. * This function returns the number of entries inserted into the tree,
  563. * or a negative error code.
  564. */
  565. int ext3_htree_fill_tree(struct file *dir_file, __u32 start_hash,
  566. __u32 start_minor_hash, __u32 *next_hash)
  567. {
  568. struct dx_hash_info hinfo;
  569. struct ext3_dir_entry_2 *de;
  570. struct dx_frame frames[2], *frame;
  571. struct inode *dir;
  572. int block, err;
  573. int count = 0;
  574. int ret;
  575. __u32 hashval;
  576. dxtrace(printk("In htree_fill_tree, start hash: %x:%x\n", start_hash,
  577. start_minor_hash));
  578. dir = dir_file->f_path.dentry->d_inode;
  579. if (!(EXT3_I(dir)->i_flags & EXT3_INDEX_FL)) {
  580. hinfo.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
  581. if (hinfo.hash_version <= DX_HASH_TEA)
  582. hinfo.hash_version +=
  583. EXT3_SB(dir->i_sb)->s_hash_unsigned;
  584. hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
  585. count = htree_dirblock_to_tree(dir_file, dir, 0, &hinfo,
  586. start_hash, start_minor_hash);
  587. *next_hash = ~0;
  588. return count;
  589. }
  590. hinfo.hash = start_hash;
  591. hinfo.minor_hash = 0;
  592. frame = dx_probe(NULL, dir_file->f_path.dentry->d_inode, &hinfo, frames, &err);
  593. if (!frame)
  594. return err;
  595. /* Add '.' and '..' from the htree header */
  596. if (!start_hash && !start_minor_hash) {
  597. de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
  598. if ((err = ext3_htree_store_dirent(dir_file, 0, 0, de)) != 0)
  599. goto errout;
  600. count++;
  601. }
  602. if (start_hash < 2 || (start_hash ==2 && start_minor_hash==0)) {
  603. de = (struct ext3_dir_entry_2 *) frames[0].bh->b_data;
  604. de = ext3_next_entry(de);
  605. if ((err = ext3_htree_store_dirent(dir_file, 2, 0, de)) != 0)
  606. goto errout;
  607. count++;
  608. }
  609. while (1) {
  610. block = dx_get_block(frame->at);
  611. ret = htree_dirblock_to_tree(dir_file, dir, block, &hinfo,
  612. start_hash, start_minor_hash);
  613. if (ret < 0) {
  614. err = ret;
  615. goto errout;
  616. }
  617. count += ret;
  618. hashval = ~0;
  619. ret = ext3_htree_next_block(dir, HASH_NB_ALWAYS,
  620. frame, frames, &hashval);
  621. *next_hash = hashval;
  622. if (ret < 0) {
  623. err = ret;
  624. goto errout;
  625. }
  626. /*
  627. * Stop if: (a) there are no more entries, or
  628. * (b) we have inserted at least one entry and the
  629. * next hash value is not a continuation
  630. */
  631. if ((ret == 0) ||
  632. (count && ((hashval & 1) == 0)))
  633. break;
  634. }
  635. dx_release(frames);
  636. dxtrace(printk("Fill tree: returned %d entries, next hash: %x\n",
  637. count, *next_hash));
  638. return count;
  639. errout:
  640. dx_release(frames);
  641. return (err);
  642. }
  643. /*
  644. * Directory block splitting, compacting
  645. */
  646. /*
  647. * Create map of hash values, offsets, and sizes, stored at end of block.
  648. * Returns number of entries mapped.
  649. */
  650. static int dx_make_map(struct ext3_dir_entry_2 *de, unsigned blocksize,
  651. struct dx_hash_info *hinfo, struct dx_map_entry *map_tail)
  652. {
  653. int count = 0;
  654. char *base = (char *) de;
  655. struct dx_hash_info h = *hinfo;
  656. while ((char *) de < base + blocksize)
  657. {
  658. if (de->name_len && de->inode) {
  659. ext3fs_dirhash(de->name, de->name_len, &h);
  660. map_tail--;
  661. map_tail->hash = h.hash;
  662. map_tail->offs = (u16) ((char *) de - base);
  663. map_tail->size = le16_to_cpu(de->rec_len);
  664. count++;
  665. cond_resched();
  666. }
  667. /* XXX: do we need to check rec_len == 0 case? -Chris */
  668. de = ext3_next_entry(de);
  669. }
  670. return count;
  671. }
  672. /* Sort map by hash value */
  673. static void dx_sort_map (struct dx_map_entry *map, unsigned count)
  674. {
  675. struct dx_map_entry *p, *q, *top = map + count - 1;
  676. int more;
  677. /* Combsort until bubble sort doesn't suck */
  678. while (count > 2)
  679. {
  680. count = count*10/13;
  681. if (count - 9 < 2) /* 9, 10 -> 11 */
  682. count = 11;
  683. for (p = top, q = p - count; q >= map; p--, q--)
  684. if (p->hash < q->hash)
  685. swap(*p, *q);
  686. }
  687. /* Garden variety bubble sort */
  688. do {
  689. more = 0;
  690. q = top;
  691. while (q-- > map)
  692. {
  693. if (q[1].hash >= q[0].hash)
  694. continue;
  695. swap(*(q+1), *q);
  696. more = 1;
  697. }
  698. } while(more);
  699. }
  700. static void dx_insert_block(struct dx_frame *frame, u32 hash, u32 block)
  701. {
  702. struct dx_entry *entries = frame->entries;
  703. struct dx_entry *old = frame->at, *new = old + 1;
  704. int count = dx_get_count(entries);
  705. assert(count < dx_get_limit(entries));
  706. assert(old < entries + count);
  707. memmove(new + 1, new, (char *)(entries + count) - (char *)(new));
  708. dx_set_hash(new, hash);
  709. dx_set_block(new, block);
  710. dx_set_count(entries, count + 1);
  711. }
  712. static void ext3_update_dx_flag(struct inode *inode)
  713. {
  714. if (!EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
  715. EXT3_FEATURE_COMPAT_DIR_INDEX))
  716. EXT3_I(inode)->i_flags &= ~EXT3_INDEX_FL;
  717. }
  718. /*
  719. * NOTE! unlike strncmp, ext3_match returns 1 for success, 0 for failure.
  720. *
  721. * `len <= EXT3_NAME_LEN' is guaranteed by caller.
  722. * `de != NULL' is guaranteed by caller.
  723. */
  724. static inline int ext3_match (int len, const char * const name,
  725. struct ext3_dir_entry_2 * de)
  726. {
  727. if (len != de->name_len)
  728. return 0;
  729. if (!de->inode)
  730. return 0;
  731. return !memcmp(name, de->name, len);
  732. }
  733. /*
  734. * Returns 0 if not found, -1 on failure, and 1 on success
  735. */
  736. static inline int search_dirblock(struct buffer_head * bh,
  737. struct inode *dir,
  738. struct qstr *child,
  739. unsigned long offset,
  740. struct ext3_dir_entry_2 ** res_dir)
  741. {
  742. struct ext3_dir_entry_2 * de;
  743. char * dlimit;
  744. int de_len;
  745. const char *name = child->name;
  746. int namelen = child->len;
  747. de = (struct ext3_dir_entry_2 *) bh->b_data;
  748. dlimit = bh->b_data + dir->i_sb->s_blocksize;
  749. while ((char *) de < dlimit) {
  750. /* this code is executed quadratically often */
  751. /* do minimal checking `by hand' */
  752. if ((char *) de + namelen <= dlimit &&
  753. ext3_match (namelen, name, de)) {
  754. /* found a match - just to be sure, do a full check */
  755. if (!ext3_check_dir_entry("ext3_find_entry",
  756. dir, de, bh, offset))
  757. return -1;
  758. *res_dir = de;
  759. return 1;
  760. }
  761. /* prevent looping on a bad block */
  762. de_len = ext3_rec_len_from_disk(de->rec_len);
  763. if (de_len <= 0)
  764. return -1;
  765. offset += de_len;
  766. de = (struct ext3_dir_entry_2 *) ((char *) de + de_len);
  767. }
  768. return 0;
  769. }
  770. /*
  771. * ext3_find_entry()
  772. *
  773. * finds an entry in the specified directory with the wanted name. It
  774. * returns the cache buffer in which the entry was found, and the entry
  775. * itself (as a parameter - res_dir). It does NOT read the inode of the
  776. * entry - you'll have to do that yourself if you want to.
  777. *
  778. * The returned buffer_head has ->b_count elevated. The caller is expected
  779. * to brelse() it when appropriate.
  780. */
  781. static struct buffer_head *ext3_find_entry(struct inode *dir,
  782. struct qstr *entry,
  783. struct ext3_dir_entry_2 **res_dir)
  784. {
  785. struct super_block * sb;
  786. struct buffer_head * bh_use[NAMEI_RA_SIZE];
  787. struct buffer_head * bh, *ret = NULL;
  788. unsigned long start, block, b;
  789. const u8 *name = entry->name;
  790. int ra_max = 0; /* Number of bh's in the readahead
  791. buffer, bh_use[] */
  792. int ra_ptr = 0; /* Current index into readahead
  793. buffer */
  794. int num = 0;
  795. int nblocks, i, err;
  796. int namelen;
  797. *res_dir = NULL;
  798. sb = dir->i_sb;
  799. namelen = entry->len;
  800. if (namelen > EXT3_NAME_LEN)
  801. return NULL;
  802. if ((namelen <= 2) && (name[0] == '.') &&
  803. (name[1] == '.' || name[1] == 0)) {
  804. /*
  805. * "." or ".." will only be in the first block
  806. * NFS may look up ".."; "." should be handled by the VFS
  807. */
  808. block = start = 0;
  809. nblocks = 1;
  810. goto restart;
  811. }
  812. if (is_dx(dir)) {
  813. bh = ext3_dx_find_entry(dir, entry, res_dir, &err);
  814. /*
  815. * On success, or if the error was file not found,
  816. * return. Otherwise, fall back to doing a search the
  817. * old fashioned way.
  818. */
  819. if (bh || (err != ERR_BAD_DX_DIR))
  820. return bh;
  821. dxtrace(printk("ext3_find_entry: dx failed, falling back\n"));
  822. }
  823. nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
  824. start = EXT3_I(dir)->i_dir_start_lookup;
  825. if (start >= nblocks)
  826. start = 0;
  827. block = start;
  828. restart:
  829. do {
  830. /*
  831. * We deal with the read-ahead logic here.
  832. */
  833. if (ra_ptr >= ra_max) {
  834. /* Refill the readahead buffer */
  835. ra_ptr = 0;
  836. b = block;
  837. for (ra_max = 0; ra_max < NAMEI_RA_SIZE; ra_max++) {
  838. /*
  839. * Terminate if we reach the end of the
  840. * directory and must wrap, or if our
  841. * search has finished at this block.
  842. */
  843. if (b >= nblocks || (num && block == start)) {
  844. bh_use[ra_max] = NULL;
  845. break;
  846. }
  847. num++;
  848. bh = ext3_getblk(NULL, dir, b++, 0, &err);
  849. bh_use[ra_max] = bh;
  850. if (bh)
  851. ll_rw_block(READ_META, 1, &bh);
  852. }
  853. }
  854. if ((bh = bh_use[ra_ptr++]) == NULL)
  855. goto next;
  856. wait_on_buffer(bh);
  857. if (!buffer_uptodate(bh)) {
  858. /* read error, skip block & hope for the best */
  859. ext3_error(sb, __func__, "reading directory #%lu "
  860. "offset %lu", dir->i_ino, block);
  861. brelse(bh);
  862. goto next;
  863. }
  864. i = search_dirblock(bh, dir, entry,
  865. block << EXT3_BLOCK_SIZE_BITS(sb), res_dir);
  866. if (i == 1) {
  867. EXT3_I(dir)->i_dir_start_lookup = block;
  868. ret = bh;
  869. goto cleanup_and_exit;
  870. } else {
  871. brelse(bh);
  872. if (i < 0)
  873. goto cleanup_and_exit;
  874. }
  875. next:
  876. if (++block >= nblocks)
  877. block = 0;
  878. } while (block != start);
  879. /*
  880. * If the directory has grown while we were searching, then
  881. * search the last part of the directory before giving up.
  882. */
  883. block = nblocks;
  884. nblocks = dir->i_size >> EXT3_BLOCK_SIZE_BITS(sb);
  885. if (block < nblocks) {
  886. start = 0;
  887. goto restart;
  888. }
  889. cleanup_and_exit:
  890. /* Clean up the read-ahead blocks */
  891. for (; ra_ptr < ra_max; ra_ptr++)
  892. brelse (bh_use[ra_ptr]);
  893. return ret;
  894. }
  895. static struct buffer_head * ext3_dx_find_entry(struct inode *dir,
  896. struct qstr *entry, struct ext3_dir_entry_2 **res_dir,
  897. int *err)
  898. {
  899. struct super_block *sb = dir->i_sb;
  900. struct dx_hash_info hinfo;
  901. struct dx_frame frames[2], *frame;
  902. struct buffer_head *bh;
  903. unsigned long block;
  904. int retval;
  905. if (!(frame = dx_probe(entry, dir, &hinfo, frames, err)))
  906. return NULL;
  907. do {
  908. block = dx_get_block(frame->at);
  909. if (!(bh = ext3_bread (NULL,dir, block, 0, err)))
  910. goto errout;
  911. retval = search_dirblock(bh, dir, entry,
  912. block << EXT3_BLOCK_SIZE_BITS(sb),
  913. res_dir);
  914. if (retval == 1) {
  915. dx_release(frames);
  916. return bh;
  917. }
  918. brelse(bh);
  919. if (retval == -1) {
  920. *err = ERR_BAD_DX_DIR;
  921. goto errout;
  922. }
  923. /* Check to see if we should continue to search */
  924. retval = ext3_htree_next_block(dir, hinfo.hash, frame,
  925. frames, NULL);
  926. if (retval < 0) {
  927. ext3_warning(sb, __func__,
  928. "error reading index page in directory #%lu",
  929. dir->i_ino);
  930. *err = retval;
  931. goto errout;
  932. }
  933. } while (retval == 1);
  934. *err = -ENOENT;
  935. errout:
  936. dxtrace(printk("%s not found\n", entry->name));
  937. dx_release (frames);
  938. return NULL;
  939. }
  940. static struct dentry *ext3_lookup(struct inode * dir, struct dentry *dentry, struct nameidata *nd)
  941. {
  942. struct inode * inode;
  943. struct ext3_dir_entry_2 * de;
  944. struct buffer_head * bh;
  945. if (dentry->d_name.len > EXT3_NAME_LEN)
  946. return ERR_PTR(-ENAMETOOLONG);
  947. bh = ext3_find_entry(dir, &dentry->d_name, &de);
  948. inode = NULL;
  949. if (bh) {
  950. unsigned long ino = le32_to_cpu(de->inode);
  951. brelse (bh);
  952. if (!ext3_valid_inum(dir->i_sb, ino)) {
  953. ext3_error(dir->i_sb, "ext3_lookup",
  954. "bad inode number: %lu", ino);
  955. return ERR_PTR(-EIO);
  956. }
  957. inode = ext3_iget(dir->i_sb, ino);
  958. if (inode == ERR_PTR(-ESTALE)) {
  959. ext3_error(dir->i_sb, __func__,
  960. "deleted inode referenced: %lu",
  961. ino);
  962. return ERR_PTR(-EIO);
  963. }
  964. }
  965. return d_splice_alias(inode, dentry);
  966. }
  967. struct dentry *ext3_get_parent(struct dentry *child)
  968. {
  969. unsigned long ino;
  970. struct qstr dotdot = {.name = "..", .len = 2};
  971. struct ext3_dir_entry_2 * de;
  972. struct buffer_head *bh;
  973. bh = ext3_find_entry(child->d_inode, &dotdot, &de);
  974. if (!bh)
  975. return ERR_PTR(-ENOENT);
  976. ino = le32_to_cpu(de->inode);
  977. brelse(bh);
  978. if (!ext3_valid_inum(child->d_inode->i_sb, ino)) {
  979. ext3_error(child->d_inode->i_sb, "ext3_get_parent",
  980. "bad inode number: %lu", ino);
  981. return ERR_PTR(-EIO);
  982. }
  983. return d_obtain_alias(ext3_iget(child->d_inode->i_sb, ino));
  984. }
  985. #define S_SHIFT 12
  986. static unsigned char ext3_type_by_mode[S_IFMT >> S_SHIFT] = {
  987. [S_IFREG >> S_SHIFT] = EXT3_FT_REG_FILE,
  988. [S_IFDIR >> S_SHIFT] = EXT3_FT_DIR,
  989. [S_IFCHR >> S_SHIFT] = EXT3_FT_CHRDEV,
  990. [S_IFBLK >> S_SHIFT] = EXT3_FT_BLKDEV,
  991. [S_IFIFO >> S_SHIFT] = EXT3_FT_FIFO,
  992. [S_IFSOCK >> S_SHIFT] = EXT3_FT_SOCK,
  993. [S_IFLNK >> S_SHIFT] = EXT3_FT_SYMLINK,
  994. };
  995. static inline void ext3_set_de_type(struct super_block *sb,
  996. struct ext3_dir_entry_2 *de,
  997. umode_t mode) {
  998. if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_FILETYPE))
  999. de->file_type = ext3_type_by_mode[(mode & S_IFMT)>>S_SHIFT];
  1000. }
  1001. /*
  1002. * Move count entries from end of map between two memory locations.
  1003. * Returns pointer to last entry moved.
  1004. */
  1005. static struct ext3_dir_entry_2 *
  1006. dx_move_dirents(char *from, char *to, struct dx_map_entry *map, int count)
  1007. {
  1008. unsigned rec_len = 0;
  1009. while (count--) {
  1010. struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *) (from + map->offs);
  1011. rec_len = EXT3_DIR_REC_LEN(de->name_len);
  1012. memcpy (to, de, rec_len);
  1013. ((struct ext3_dir_entry_2 *) to)->rec_len =
  1014. ext3_rec_len_to_disk(rec_len);
  1015. de->inode = 0;
  1016. map++;
  1017. to += rec_len;
  1018. }
  1019. return (struct ext3_dir_entry_2 *) (to - rec_len);
  1020. }
  1021. /*
  1022. * Compact each dir entry in the range to the minimal rec_len.
  1023. * Returns pointer to last entry in range.
  1024. */
  1025. static struct ext3_dir_entry_2 *dx_pack_dirents(char *base, unsigned blocksize)
  1026. {
  1027. struct ext3_dir_entry_2 *next, *to, *prev;
  1028. struct ext3_dir_entry_2 *de = (struct ext3_dir_entry_2 *)base;
  1029. unsigned rec_len = 0;
  1030. prev = to = de;
  1031. while ((char *)de < base + blocksize) {
  1032. next = ext3_next_entry(de);
  1033. if (de->inode && de->name_len) {
  1034. rec_len = EXT3_DIR_REC_LEN(de->name_len);
  1035. if (de > to)
  1036. memmove(to, de, rec_len);
  1037. to->rec_len = ext3_rec_len_to_disk(rec_len);
  1038. prev = to;
  1039. to = (struct ext3_dir_entry_2 *) (((char *) to) + rec_len);
  1040. }
  1041. de = next;
  1042. }
  1043. return prev;
  1044. }
  1045. /*
  1046. * Split a full leaf block to make room for a new dir entry.
  1047. * Allocate a new block, and move entries so that they are approx. equally full.
  1048. * Returns pointer to de in block into which the new entry will be inserted.
  1049. */
  1050. static struct ext3_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
  1051. struct buffer_head **bh,struct dx_frame *frame,
  1052. struct dx_hash_info *hinfo, int *error)
  1053. {
  1054. unsigned blocksize = dir->i_sb->s_blocksize;
  1055. unsigned count, continued;
  1056. struct buffer_head *bh2;
  1057. u32 newblock;
  1058. u32 hash2;
  1059. struct dx_map_entry *map;
  1060. char *data1 = (*bh)->b_data, *data2;
  1061. unsigned split, move, size;
  1062. struct ext3_dir_entry_2 *de = NULL, *de2;
  1063. int err = 0, i;
  1064. bh2 = ext3_append (handle, dir, &newblock, &err);
  1065. if (!(bh2)) {
  1066. brelse(*bh);
  1067. *bh = NULL;
  1068. goto errout;
  1069. }
  1070. BUFFER_TRACE(*bh, "get_write_access");
  1071. err = ext3_journal_get_write_access(handle, *bh);
  1072. if (err)
  1073. goto journal_error;
  1074. BUFFER_TRACE(frame->bh, "get_write_access");
  1075. err = ext3_journal_get_write_access(handle, frame->bh);
  1076. if (err)
  1077. goto journal_error;
  1078. data2 = bh2->b_data;
  1079. /* create map in the end of data2 block */
  1080. map = (struct dx_map_entry *) (data2 + blocksize);
  1081. count = dx_make_map ((struct ext3_dir_entry_2 *) data1,
  1082. blocksize, hinfo, map);
  1083. map -= count;
  1084. dx_sort_map (map, count);
  1085. /* Split the existing block in the middle, size-wise */
  1086. size = 0;
  1087. move = 0;
  1088. for (i = count-1; i >= 0; i--) {
  1089. /* is more than half of this entry in 2nd half of the block? */
  1090. if (size + map[i].size/2 > blocksize/2)
  1091. break;
  1092. size += map[i].size;
  1093. move++;
  1094. }
  1095. /* map index at which we will split */
  1096. split = count - move;
  1097. hash2 = map[split].hash;
  1098. continued = hash2 == map[split - 1].hash;
  1099. dxtrace(printk("Split block %i at %x, %i/%i\n",
  1100. dx_get_block(frame->at), hash2, split, count-split));
  1101. /* Fancy dance to stay within two buffers */
  1102. de2 = dx_move_dirents(data1, data2, map + split, count - split);
  1103. de = dx_pack_dirents(data1,blocksize);
  1104. de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de);
  1105. de2->rec_len = ext3_rec_len_to_disk(data2 + blocksize - (char *) de2);
  1106. dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data1, blocksize, 1));
  1107. dxtrace(dx_show_leaf (hinfo, (struct ext3_dir_entry_2 *) data2, blocksize, 1));
  1108. /* Which block gets the new entry? */
  1109. if (hinfo->hash >= hash2)
  1110. {
  1111. swap(*bh, bh2);
  1112. de = de2;
  1113. }
  1114. dx_insert_block (frame, hash2 + continued, newblock);
  1115. err = ext3_journal_dirty_metadata (handle, bh2);
  1116. if (err)
  1117. goto journal_error;
  1118. err = ext3_journal_dirty_metadata (handle, frame->bh);
  1119. if (err)
  1120. goto journal_error;
  1121. brelse (bh2);
  1122. dxtrace(dx_show_index ("frame", frame->entries));
  1123. return de;
  1124. journal_error:
  1125. brelse(*bh);
  1126. brelse(bh2);
  1127. *bh = NULL;
  1128. ext3_std_error(dir->i_sb, err);
  1129. errout:
  1130. *error = err;
  1131. return NULL;
  1132. }
  1133. /*
  1134. * Add a new entry into a directory (leaf) block. If de is non-NULL,
  1135. * it points to a directory entry which is guaranteed to be large
  1136. * enough for new directory entry. If de is NULL, then
  1137. * add_dirent_to_buf will attempt search the directory block for
  1138. * space. It will return -ENOSPC if no space is available, and -EIO
  1139. * and -EEXIST if directory entry already exists.
  1140. *
  1141. * NOTE! bh is NOT released in the case where ENOSPC is returned. In
  1142. * all other cases bh is released.
  1143. */
  1144. static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
  1145. struct inode *inode, struct ext3_dir_entry_2 *de,
  1146. struct buffer_head * bh)
  1147. {
  1148. struct inode *dir = dentry->d_parent->d_inode;
  1149. const char *name = dentry->d_name.name;
  1150. int namelen = dentry->d_name.len;
  1151. unsigned long offset = 0;
  1152. unsigned short reclen;
  1153. int nlen, rlen, err;
  1154. char *top;
  1155. reclen = EXT3_DIR_REC_LEN(namelen);
  1156. if (!de) {
  1157. de = (struct ext3_dir_entry_2 *)bh->b_data;
  1158. top = bh->b_data + dir->i_sb->s_blocksize - reclen;
  1159. while ((char *) de <= top) {
  1160. if (!ext3_check_dir_entry("ext3_add_entry", dir, de,
  1161. bh, offset)) {
  1162. brelse (bh);
  1163. return -EIO;
  1164. }
  1165. if (ext3_match (namelen, name, de)) {
  1166. brelse (bh);
  1167. return -EEXIST;
  1168. }
  1169. nlen = EXT3_DIR_REC_LEN(de->name_len);
  1170. rlen = ext3_rec_len_from_disk(de->rec_len);
  1171. if ((de->inode? rlen - nlen: rlen) >= reclen)
  1172. break;
  1173. de = (struct ext3_dir_entry_2 *)((char *)de + rlen);
  1174. offset += rlen;
  1175. }
  1176. if ((char *) de > top)
  1177. return -ENOSPC;
  1178. }
  1179. BUFFER_TRACE(bh, "get_write_access");
  1180. err = ext3_journal_get_write_access(handle, bh);
  1181. if (err) {
  1182. ext3_std_error(dir->i_sb, err);
  1183. brelse(bh);
  1184. return err;
  1185. }
  1186. /* By now the buffer is marked for journaling */
  1187. nlen = EXT3_DIR_REC_LEN(de->name_len);
  1188. rlen = ext3_rec_len_from_disk(de->rec_len);
  1189. if (de->inode) {
  1190. struct ext3_dir_entry_2 *de1 = (struct ext3_dir_entry_2 *)((char *)de + nlen);
  1191. de1->rec_len = ext3_rec_len_to_disk(rlen - nlen);
  1192. de->rec_len = ext3_rec_len_to_disk(nlen);
  1193. de = de1;
  1194. }
  1195. de->file_type = EXT3_FT_UNKNOWN;
  1196. if (inode) {
  1197. de->inode = cpu_to_le32(inode->i_ino);
  1198. ext3_set_de_type(dir->i_sb, de, inode->i_mode);
  1199. } else
  1200. de->inode = 0;
  1201. de->name_len = namelen;
  1202. memcpy (de->name, name, namelen);
  1203. /*
  1204. * XXX shouldn't update any times until successful
  1205. * completion of syscall, but too many callers depend
  1206. * on this.
  1207. *
  1208. * XXX similarly, too many callers depend on
  1209. * ext3_new_inode() setting the times, but error
  1210. * recovery deletes the inode, so the worst that can
  1211. * happen is that the times are slightly out of date
  1212. * and/or different from the directory change time.
  1213. */
  1214. dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
  1215. ext3_update_dx_flag(dir);
  1216. dir->i_version++;
  1217. ext3_mark_inode_dirty(handle, dir);
  1218. BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
  1219. err = ext3_journal_dirty_metadata(handle, bh);
  1220. if (err)
  1221. ext3_std_error(dir->i_sb, err);
  1222. brelse(bh);
  1223. return 0;
  1224. }
  1225. /*
  1226. * This converts a one block unindexed directory to a 3 block indexed
  1227. * directory, and adds the dentry to the indexed directory.
  1228. */
  1229. static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
  1230. struct inode *inode, struct buffer_head *bh)
  1231. {
  1232. struct inode *dir = dentry->d_parent->d_inode;
  1233. const char *name = dentry->d_name.name;
  1234. int namelen = dentry->d_name.len;
  1235. struct buffer_head *bh2;
  1236. struct dx_root *root;
  1237. struct dx_frame frames[2], *frame;
  1238. struct dx_entry *entries;
  1239. struct ext3_dir_entry_2 *de, *de2;
  1240. char *data1, *top;
  1241. unsigned len;
  1242. int retval;
  1243. unsigned blocksize;
  1244. struct dx_hash_info hinfo;
  1245. u32 block;
  1246. struct fake_dirent *fde;
  1247. blocksize = dir->i_sb->s_blocksize;
  1248. dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
  1249. retval = ext3_journal_get_write_access(handle, bh);
  1250. if (retval) {
  1251. ext3_std_error(dir->i_sb, retval);
  1252. brelse(bh);
  1253. return retval;
  1254. }
  1255. root = (struct dx_root *) bh->b_data;
  1256. /* The 0th block becomes the root, move the dirents out */
  1257. fde = &root->dotdot;
  1258. de = (struct ext3_dir_entry_2 *)((char *)fde +
  1259. ext3_rec_len_from_disk(fde->rec_len));
  1260. if ((char *) de >= (((char *) root) + blocksize)) {
  1261. ext3_error(dir->i_sb, __func__,
  1262. "invalid rec_len for '..' in inode %lu",
  1263. dir->i_ino);
  1264. brelse(bh);
  1265. return -EIO;
  1266. }
  1267. len = ((char *) root) + blocksize - (char *) de;
  1268. bh2 = ext3_append (handle, dir, &block, &retval);
  1269. if (!(bh2)) {
  1270. brelse(bh);
  1271. return retval;
  1272. }
  1273. EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
  1274. data1 = bh2->b_data;
  1275. memcpy (data1, de, len);
  1276. de = (struct ext3_dir_entry_2 *) data1;
  1277. top = data1 + len;
  1278. while ((char *)(de2 = ext3_next_entry(de)) < top)
  1279. de = de2;
  1280. de->rec_len = ext3_rec_len_to_disk(data1 + blocksize - (char *) de);
  1281. /* Initialize the root; the dot dirents already exist */
  1282. de = (struct ext3_dir_entry_2 *) (&root->dotdot);
  1283. de->rec_len = ext3_rec_len_to_disk(blocksize - EXT3_DIR_REC_LEN(2));
  1284. memset (&root->info, 0, sizeof(root->info));
  1285. root->info.info_length = sizeof(root->info);
  1286. root->info.hash_version = EXT3_SB(dir->i_sb)->s_def_hash_version;
  1287. entries = root->entries;
  1288. dx_set_block (entries, 1);
  1289. dx_set_count (entries, 1);
  1290. dx_set_limit (entries, dx_root_limit(dir, sizeof(root->info)));
  1291. /* Initialize as for dx_probe */
  1292. hinfo.hash_version = root->info.hash_version;
  1293. if (hinfo.hash_version <= DX_HASH_TEA)
  1294. hinfo.hash_version += EXT3_SB(dir->i_sb)->s_hash_unsigned;
  1295. hinfo.seed = EXT3_SB(dir->i_sb)->s_hash_seed;
  1296. ext3fs_dirhash(name, namelen, &hinfo);
  1297. frame = frames;
  1298. frame->entries = entries;
  1299. frame->at = entries;
  1300. frame->bh = bh;
  1301. bh = bh2;
  1302. /*
  1303. * Mark buffers dirty here so that if do_split() fails we write a
  1304. * consistent set of buffers to disk.
  1305. */
  1306. ext3_journal_dirty_metadata(handle, frame->bh);
  1307. ext3_journal_dirty_metadata(handle, bh);
  1308. de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
  1309. if (!de) {
  1310. ext3_mark_inode_dirty(handle, dir);
  1311. dx_release(frames);
  1312. return retval;
  1313. }
  1314. dx_release(frames);
  1315. return add_dirent_to_buf(handle, dentry, inode, de, bh);
  1316. }
  1317. /*
  1318. * ext3_add_entry()
  1319. *
  1320. * adds a file entry to the specified directory, using the same
  1321. * semantics as ext3_find_entry(). It returns NULL if it failed.
  1322. *
  1323. * NOTE!! The inode part of 'de' is left at 0 - which means you
  1324. * may not sleep between calling this and putting something into
  1325. * the entry, as someone else might have used it while you slept.
  1326. */
  1327. static int ext3_add_entry (handle_t *handle, struct dentry *dentry,
  1328. struct inode *inode)
  1329. {
  1330. struct inode *dir = dentry->d_parent->d_inode;
  1331. struct buffer_head * bh;
  1332. struct ext3_dir_entry_2 *de;
  1333. struct super_block * sb;
  1334. int retval;
  1335. int dx_fallback=0;
  1336. unsigned blocksize;
  1337. u32 block, blocks;
  1338. sb = dir->i_sb;
  1339. blocksize = sb->s_blocksize;
  1340. if (!dentry->d_name.len)
  1341. return -EINVAL;
  1342. if (is_dx(dir)) {
  1343. retval = ext3_dx_add_entry(handle, dentry, inode);
  1344. if (!retval || (retval != ERR_BAD_DX_DIR))
  1345. return retval;
  1346. EXT3_I(dir)->i_flags &= ~EXT3_INDEX_FL;
  1347. dx_fallback++;
  1348. ext3_mark_inode_dirty(handle, dir);
  1349. }
  1350. blocks = dir->i_size >> sb->s_blocksize_bits;
  1351. for (block = 0; block < blocks; block++) {
  1352. bh = ext3_bread(handle, dir, block, 0, &retval);
  1353. if(!bh)
  1354. return retval;
  1355. retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
  1356. if (retval != -ENOSPC)
  1357. return retval;
  1358. if (blocks == 1 && !dx_fallback &&
  1359. EXT3_HAS_COMPAT_FEATURE(sb, EXT3_FEATURE_COMPAT_DIR_INDEX))
  1360. return make_indexed_dir(handle, dentry, inode, bh);
  1361. brelse(bh);
  1362. }
  1363. bh = ext3_append(handle, dir, &block, &retval);
  1364. if (!bh)
  1365. return retval;
  1366. de = (struct ext3_dir_entry_2 *) bh->b_data;
  1367. de->inode = 0;
  1368. de->rec_len = ext3_rec_len_to_disk(blocksize);
  1369. return add_dirent_to_buf(handle, dentry, inode, de, bh);
  1370. }
  1371. /*
  1372. * Returns 0 for success, or a negative error value
  1373. */
  1374. static int ext3_dx_add_entry(handle_t *handle, struct dentry *dentry,
  1375. struct inode *inode)
  1376. {
  1377. struct dx_frame frames[2], *frame;
  1378. struct dx_entry *entries, *at;
  1379. struct dx_hash_info hinfo;
  1380. struct buffer_head * bh;
  1381. struct inode *dir = dentry->d_parent->d_inode;
  1382. struct super_block * sb = dir->i_sb;
  1383. struct ext3_dir_entry_2 *de;
  1384. int err;
  1385. frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err);
  1386. if (!frame)
  1387. return err;
  1388. entries = frame->entries;
  1389. at = frame->at;
  1390. if (!(bh = ext3_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
  1391. goto cleanup;
  1392. BUFFER_TRACE(bh, "get_write_access");
  1393. err = ext3_journal_get_write_access(handle, bh);
  1394. if (err)
  1395. goto journal_error;
  1396. err = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
  1397. if (err != -ENOSPC) {
  1398. bh = NULL;
  1399. goto cleanup;
  1400. }
  1401. /* Block full, should compress but for now just split */
  1402. dxtrace(printk("using %u of %u node entries\n",
  1403. dx_get_count(entries), dx_get_limit(entries)));
  1404. /* Need to split index? */
  1405. if (dx_get_count(entries) == dx_get_limit(entries)) {
  1406. u32 newblock;
  1407. unsigned icount = dx_get_count(entries);
  1408. int levels = frame - frames;
  1409. struct dx_entry *entries2;
  1410. struct dx_node *node2;
  1411. struct buffer_head *bh2;
  1412. if (levels && (dx_get_count(frames->entries) ==
  1413. dx_get_limit(frames->entries))) {
  1414. ext3_warning(sb, __func__,
  1415. "Directory index full!");
  1416. err = -ENOSPC;
  1417. goto cleanup;
  1418. }
  1419. bh2 = ext3_append (handle, dir, &newblock, &err);
  1420. if (!(bh2))
  1421. goto cleanup;
  1422. node2 = (struct dx_node *)(bh2->b_data);
  1423. entries2 = node2->entries;
  1424. memset(&node2->fake, 0, sizeof(struct fake_dirent));
  1425. node2->fake.rec_len = ext3_rec_len_to_disk(sb->s_blocksize);
  1426. BUFFER_TRACE(frame->bh, "get_write_access");
  1427. err = ext3_journal_get_write_access(handle, frame->bh);
  1428. if (err)
  1429. goto journal_error;
  1430. if (levels) {
  1431. unsigned icount1 = icount/2, icount2 = icount - icount1;
  1432. unsigned hash2 = dx_get_hash(entries + icount1);
  1433. dxtrace(printk("Split index %i/%i\n", icount1, icount2));
  1434. BUFFER_TRACE(frame->bh, "get_write_access"); /* index root */
  1435. err = ext3_journal_get_write_access(handle,
  1436. frames[0].bh);
  1437. if (err)
  1438. goto journal_error;
  1439. memcpy ((char *) entries2, (char *) (entries + icount1),
  1440. icount2 * sizeof(struct dx_entry));
  1441. dx_set_count (entries, icount1);
  1442. dx_set_count (entries2, icount2);
  1443. dx_set_limit (entries2, dx_node_limit(dir));
  1444. /* Which index block gets the new entry? */
  1445. if (at - entries >= icount1) {
  1446. frame->at = at = at - entries - icount1 + entries2;
  1447. frame->entries = entries = entries2;
  1448. swap(frame->bh, bh2);
  1449. }
  1450. dx_insert_block (frames + 0, hash2, newblock);
  1451. dxtrace(dx_show_index ("node", frames[1].entries));
  1452. dxtrace(dx_show_index ("node",
  1453. ((struct dx_node *) bh2->b_data)->entries));
  1454. err = ext3_journal_dirty_metadata(handle, bh2);
  1455. if (err)
  1456. goto journal_error;
  1457. brelse (bh2);
  1458. } else {
  1459. dxtrace(printk("Creating second level index...\n"));
  1460. memcpy((char *) entries2, (char *) entries,
  1461. icount * sizeof(struct dx_entry));
  1462. dx_set_limit(entries2, dx_node_limit(dir));
  1463. /* Set up root */
  1464. dx_set_count(entries, 1);
  1465. dx_set_block(entries + 0, newblock);
  1466. ((struct dx_root *) frames[0].bh->b_data)->info.indirect_levels = 1;
  1467. /* Add new access path frame */
  1468. frame = frames + 1;
  1469. frame->at = at = at - entries + entries2;
  1470. frame->entries = entries = entries2;
  1471. frame->bh = bh2;
  1472. err = ext3_journal_get_write_access(handle,
  1473. frame->bh);
  1474. if (err)
  1475. goto journal_error;
  1476. }
  1477. err = ext3_journal_dirty_metadata(handle, frames[0].bh);
  1478. if (err)
  1479. goto journal_error;
  1480. }
  1481. de = do_split(handle, dir, &bh, frame, &hinfo, &err);
  1482. if (!de)
  1483. goto cleanup;
  1484. err = add_dirent_to_buf(handle, dentry, inode, de, bh);
  1485. bh = NULL;
  1486. goto cleanup;
  1487. journal_error:
  1488. ext3_std_error(dir->i_sb, err);
  1489. cleanup:
  1490. if (bh)
  1491. brelse(bh);
  1492. dx_release(frames);
  1493. return err;
  1494. }
  1495. /*
  1496. * ext3_delete_entry deletes a directory entry by merging it with the
  1497. * previous entry
  1498. */
  1499. static int ext3_delete_entry (handle_t *handle,
  1500. struct inode * dir,
  1501. struct ext3_dir_entry_2 * de_del,
  1502. struct buffer_head * bh)
  1503. {
  1504. struct ext3_dir_entry_2 * de, * pde;
  1505. int i;
  1506. i = 0;
  1507. pde = NULL;
  1508. de = (struct ext3_dir_entry_2 *) bh->b_data;
  1509. while (i < bh->b_size) {
  1510. if (!ext3_check_dir_entry("ext3_delete_entry", dir, de, bh, i))
  1511. return -EIO;
  1512. if (de == de_del) {
  1513. int err;
  1514. BUFFER_TRACE(bh, "get_write_access");
  1515. err = ext3_journal_get_write_access(handle, bh);
  1516. if (err)
  1517. goto journal_error;
  1518. if (pde)
  1519. pde->rec_len = ext3_rec_len_to_disk(
  1520. ext3_rec_len_from_disk(pde->rec_len) +
  1521. ext3_rec_len_from_disk(de->rec_len));
  1522. else
  1523. de->inode = 0;
  1524. dir->i_version++;
  1525. BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
  1526. err = ext3_journal_dirty_metadata(handle, bh);
  1527. if (err) {
  1528. journal_error:
  1529. ext3_std_error(dir->i_sb, err);
  1530. return err;
  1531. }
  1532. return 0;
  1533. }
  1534. i += ext3_rec_len_from_disk(de->rec_len);
  1535. pde = de;
  1536. de = ext3_next_entry(de);
  1537. }
  1538. return -ENOENT;
  1539. }
  1540. static int ext3_add_nondir(handle_t *handle,
  1541. struct dentry *dentry, struct inode *inode)
  1542. {
  1543. int err = ext3_add_entry(handle, dentry, inode);
  1544. if (!err) {
  1545. ext3_mark_inode_dirty(handle, inode);
  1546. d_instantiate(dentry, inode);
  1547. unlock_new_inode(inode);
  1548. return 0;
  1549. }
  1550. drop_nlink(inode);
  1551. unlock_new_inode(inode);
  1552. iput(inode);
  1553. return err;
  1554. }
  1555. /*
  1556. * By the time this is called, we already have created
  1557. * the directory cache entry for the new file, but it
  1558. * is so far negative - it has no inode.
  1559. *
  1560. * If the create succeeds, we fill in the inode information
  1561. * with d_instantiate().
  1562. */
  1563. static int ext3_create (struct inode * dir, struct dentry * dentry, int mode,
  1564. struct nameidata *nd)
  1565. {
  1566. handle_t *handle;
  1567. struct inode * inode;
  1568. int err, retries = 0;
  1569. dquot_initialize(dir);
  1570. retry:
  1571. handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
  1572. EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
  1573. EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
  1574. if (IS_ERR(handle))
  1575. return PTR_ERR(handle);
  1576. if (IS_DIRSYNC(dir))
  1577. handle->h_sync = 1;
  1578. inode = ext3_new_inode (handle, dir, &dentry->d_name, mode);
  1579. err = PTR_ERR(inode);
  1580. if (!IS_ERR(inode)) {
  1581. inode->i_op = &ext3_file_inode_operations;
  1582. inode->i_fop = &ext3_file_operations;
  1583. ext3_set_aops(inode);
  1584. err = ext3_add_nondir(handle, dentry, inode);
  1585. }
  1586. ext3_journal_stop(handle);
  1587. if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
  1588. goto retry;
  1589. return err;
  1590. }
  1591. static int ext3_mknod (struct inode * dir, struct dentry *dentry,
  1592. int mode, dev_t rdev)
  1593. {
  1594. handle_t *handle;
  1595. struct inode *inode;
  1596. int err, retries = 0;
  1597. if (!new_valid_dev(rdev))
  1598. return -EINVAL;
  1599. dquot_initialize(dir);
  1600. retry:
  1601. handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
  1602. EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
  1603. EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
  1604. if (IS_ERR(handle))
  1605. return PTR_ERR(handle);
  1606. if (IS_DIRSYNC(dir))
  1607. handle->h_sync = 1;
  1608. inode = ext3_new_inode (handle, dir, &dentry->d_name, mode);
  1609. err = PTR_ERR(inode);
  1610. if (!IS_ERR(inode)) {
  1611. init_special_inode(inode, inode->i_mode, rdev);
  1612. #ifdef CONFIG_EXT3_FS_XATTR
  1613. inode->i_op = &ext3_special_inode_operations;
  1614. #endif
  1615. err = ext3_add_nondir(handle, dentry, inode);
  1616. }
  1617. ext3_journal_stop(handle);
  1618. if (err == -ENOSPC && ext3_should_retry_alloc(dir->i_sb, &retries))
  1619. goto retry;
  1620. return err;
  1621. }
  1622. static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode)
  1623. {
  1624. handle_t *handle;
  1625. struct inode * inode;
  1626. struct buffer_head * dir_block = NULL;
  1627. struct ext3_dir_entry_2 * de;
  1628. int err, retries = 0;
  1629. if (dir->i_nlink >= EXT3_LINK_MAX)
  1630. return -EMLINK;
  1631. dquot_initialize(dir);
  1632. retry:
  1633. handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) +
  1634. EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 +
  1635. EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb));
  1636. if (IS_ERR(handle))
  1637. return PTR_ERR(handle);
  1638. if (IS_DIRSYNC(dir))
  1639. handle->h_sync = 1;
  1640. inode = ext3_new_inode (handle, dir, &dentry->d_name, S_IFDIR | mode);
  1641. err = PTR_ERR(inode);
  1642. if (IS_ERR(inode))
  1643. goto out_stop;
  1644. inode->i_op = &ext3_dir_inode_operations;
  1645. inode->i_fop = &ext3_dir_operations;
  1646. inode->i_size = EXT3_I(inode)->i_disksize = inode->i_sb->s_blocksize;
  1647. dir_block = ext3_bread (handle, inode, 0, 1, &err);
  1648. if (!dir_block)
  1649. goto out_clear_inode;
  1650. BUFFER_TRACE(dir_block, "get_write_access");
  1651. err = ext3_journal_get_write_access(handle, dir_block);
  1652. if (err)
  1653. goto out_clear_inode;
  1654. de = (struct ext3_dir_entry_2 *) dir_block->b_data;
  1655. de->inode = cpu_to_le32(inode->i_ino);
  1656. de->name_len = 1;
  1657. de->rec_len = ext3_rec_len_to_disk(EXT3_DIR_REC_LEN(de->name_len));
  1658. strcpy (de->name, ".");
  1659. ext3_set_de_type(dir->i_sb, de, S_IFDIR);
  1660. de = ext3_next_entry(de);
  1661. de->inode = cpu_to_le32(dir->i_ino);
  1662. de->rec_len = ext3_rec_len_to_disk(inode->i_sb->s_blocksize -
  1663. EXT3_DIR_REC_LEN(1));
  1664. de->name_len = 2;
  1665. strcpy (de->name, "..");
  1666. ext3_set_de_type(dir->i_sb, de, S_IFDIR);
  1667. inode->i_nlink = 2;
  1668. BUFFER_TRACE(dir_block, "call ext3_journal_dirty_metadata