PageRenderTime 96ms CodeModel.GetById 9ms RepoModel.GetById 0ms app.codeStats 0ms

/fs/ext4/xattr.c

https://bitbucket.org/digetx/picasso-kernel
C | 1715 lines | 1343 code | 150 blank | 222 comment | 299 complexity | 1e2ff725b643583baec61b1c91bb4d8f MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. /*
  2. * linux/fs/ext4/xattr.c
  3. *
  4. * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
  5. *
  6. * Fix by Harrison Xing <harrison@mountainviewdata.com>.
  7. * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
  8. * Extended attributes for symlinks and special files added per
  9. * suggestion of Luka Renko <luka.renko@hermes.si>.
  10. * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
  11. * Red Hat Inc.
  12. * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
  13. * and Andreas Gruenbacher <agruen@suse.de>.
  14. */
  15. /*
  16. * Extended attributes are stored directly in inodes (on file systems with
  17. * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
  18. * field contains the block number if an inode uses an additional block. All
  19. * attributes must fit in the inode and one additional block. Blocks that
  20. * contain the identical set of attributes may be shared among several inodes.
  21. * Identical blocks are detected by keeping a cache of blocks that have
  22. * recently been accessed.
  23. *
  24. * The attributes in inodes and on blocks have a different header; the entries
  25. * are stored in the same format:
  26. *
  27. * +------------------+
  28. * | header |
  29. * | entry 1 | |
  30. * | entry 2 | | growing downwards
  31. * | entry 3 | v
  32. * | four null bytes |
  33. * | . . . |
  34. * | value 1 | ^
  35. * | value 3 | | growing upwards
  36. * | value 2 | |
  37. * +------------------+
  38. *
  39. * The header is followed by multiple entry descriptors. In disk blocks, the
  40. * entry descriptors are kept sorted. In inodes, they are unsorted. The
  41. * attribute values are aligned to the end of the block in no specific order.
  42. *
  43. * Locking strategy
  44. * ----------------
  45. * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
  46. * EA blocks are only changed if they are exclusive to an inode, so
  47. * holding xattr_sem also means that nothing but the EA block's reference
  48. * count can change. Multiple writers to the same block are synchronized
  49. * by the buffer lock.
  50. */
  51. #include <linux/init.h>
  52. #include <linux/fs.h>
  53. #include <linux/slab.h>
  54. #include <linux/mbcache.h>
  55. #include <linux/quotaops.h>
  56. #include <linux/rwsem.h>
  57. #include "ext4_jbd2.h"
  58. #include "ext4.h"
  59. #include "xattr.h"
  60. #include "acl.h"
  61. #ifdef EXT4_XATTR_DEBUG
  62. # define ea_idebug(inode, f...) do { \
  63. printk(KERN_DEBUG "inode %s:%lu: ", \
  64. inode->i_sb->s_id, inode->i_ino); \
  65. printk(f); \
  66. printk("\n"); \
  67. } while (0)
  68. # define ea_bdebug(bh, f...) do { \
  69. char b[BDEVNAME_SIZE]; \
  70. printk(KERN_DEBUG "block %s:%lu: ", \
  71. bdevname(bh->b_bdev, b), \
  72. (unsigned long) bh->b_blocknr); \
  73. printk(f); \
  74. printk("\n"); \
  75. } while (0)
  76. #else
  77. # define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
  78. # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
  79. #endif
  80. static void ext4_xattr_cache_insert(struct buffer_head *);
  81. static struct buffer_head *ext4_xattr_cache_find(struct inode *,
  82. struct ext4_xattr_header *,
  83. struct mb_cache_entry **);
  84. static void ext4_xattr_rehash(struct ext4_xattr_header *,
  85. struct ext4_xattr_entry *);
  86. static int ext4_xattr_list(struct dentry *dentry, char *buffer,
  87. size_t buffer_size);
  88. static struct mb_cache *ext4_xattr_cache;
  89. static const struct xattr_handler *ext4_xattr_handler_map[] = {
  90. [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
  91. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  92. [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext4_xattr_acl_access_handler,
  93. [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext4_xattr_acl_default_handler,
  94. #endif
  95. [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler,
  96. #ifdef CONFIG_EXT4_FS_SECURITY
  97. [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler,
  98. #endif
  99. };
  100. const struct xattr_handler *ext4_xattr_handlers[] = {
  101. &ext4_xattr_user_handler,
  102. &ext4_xattr_trusted_handler,
  103. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  104. &ext4_xattr_acl_access_handler,
  105. &ext4_xattr_acl_default_handler,
  106. #endif
  107. #ifdef CONFIG_EXT4_FS_SECURITY
  108. &ext4_xattr_security_handler,
  109. #endif
  110. NULL
  111. };
  112. static __le32 ext4_xattr_block_csum(struct inode *inode,
  113. sector_t block_nr,
  114. struct ext4_xattr_header *hdr)
  115. {
  116. struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
  117. __u32 csum, old;
  118. old = hdr->h_checksum;
  119. hdr->h_checksum = 0;
  120. block_nr = cpu_to_le64(block_nr);
  121. csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
  122. sizeof(block_nr));
  123. csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
  124. EXT4_BLOCK_SIZE(inode->i_sb));
  125. hdr->h_checksum = old;
  126. return cpu_to_le32(csum);
  127. }
  128. static int ext4_xattr_block_csum_verify(struct inode *inode,
  129. sector_t block_nr,
  130. struct ext4_xattr_header *hdr)
  131. {
  132. if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
  133. EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
  134. (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
  135. return 0;
  136. return 1;
  137. }
  138. static void ext4_xattr_block_csum_set(struct inode *inode,
  139. sector_t block_nr,
  140. struct ext4_xattr_header *hdr)
  141. {
  142. if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
  143. EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
  144. return;
  145. hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
  146. }
  147. static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
  148. struct inode *inode,
  149. struct buffer_head *bh)
  150. {
  151. ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
  152. return ext4_handle_dirty_metadata(handle, inode, bh);
  153. }
  154. static inline const struct xattr_handler *
  155. ext4_xattr_handler(int name_index)
  156. {
  157. const struct xattr_handler *handler = NULL;
  158. if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
  159. handler = ext4_xattr_handler_map[name_index];
  160. return handler;
  161. }
  162. /*
  163. * Inode operation listxattr()
  164. *
  165. * dentry->d_inode->i_mutex: don't care
  166. */
  167. ssize_t
  168. ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
  169. {
  170. return ext4_xattr_list(dentry, buffer, size);
  171. }
  172. static int
  173. ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
  174. {
  175. while (!IS_LAST_ENTRY(entry)) {
  176. struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry);
  177. if ((void *)next >= end)
  178. return -EIO;
  179. entry = next;
  180. }
  181. return 0;
  182. }
  183. static inline int
  184. ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
  185. {
  186. int error;
  187. if (buffer_verified(bh))
  188. return 0;
  189. if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
  190. BHDR(bh)->h_blocks != cpu_to_le32(1))
  191. return -EIO;
  192. if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
  193. return -EIO;
  194. error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
  195. if (!error)
  196. set_buffer_verified(bh);
  197. return error;
  198. }
  199. static inline int
  200. ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size)
  201. {
  202. size_t value_size = le32_to_cpu(entry->e_value_size);
  203. if (entry->e_value_block != 0 || value_size > size ||
  204. le16_to_cpu(entry->e_value_offs) + value_size > size)
  205. return -EIO;
  206. return 0;
  207. }
  208. static int
  209. ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
  210. const char *name, size_t size, int sorted)
  211. {
  212. struct ext4_xattr_entry *entry;
  213. size_t name_len;
  214. int cmp = 1;
  215. if (name == NULL)
  216. return -EINVAL;
  217. name_len = strlen(name);
  218. entry = *pentry;
  219. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
  220. cmp = name_index - entry->e_name_index;
  221. if (!cmp)
  222. cmp = name_len - entry->e_name_len;
  223. if (!cmp)
  224. cmp = memcmp(name, entry->e_name, name_len);
  225. if (cmp <= 0 && (sorted || cmp == 0))
  226. break;
  227. }
  228. *pentry = entry;
  229. if (!cmp && ext4_xattr_check_entry(entry, size))
  230. return -EIO;
  231. return cmp ? -ENODATA : 0;
  232. }
  233. static int
  234. ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
  235. void *buffer, size_t buffer_size)
  236. {
  237. struct buffer_head *bh = NULL;
  238. struct ext4_xattr_entry *entry;
  239. size_t size;
  240. int error;
  241. ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
  242. name_index, name, buffer, (long)buffer_size);
  243. error = -ENODATA;
  244. if (!EXT4_I(inode)->i_file_acl)
  245. goto cleanup;
  246. ea_idebug(inode, "reading block %llu",
  247. (unsigned long long)EXT4_I(inode)->i_file_acl);
  248. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  249. if (!bh)
  250. goto cleanup;
  251. ea_bdebug(bh, "b_count=%d, refcount=%d",
  252. atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
  253. if (ext4_xattr_check_block(inode, bh)) {
  254. bad_block:
  255. EXT4_ERROR_INODE(inode, "bad block %llu",
  256. EXT4_I(inode)->i_file_acl);
  257. error = -EIO;
  258. goto cleanup;
  259. }
  260. ext4_xattr_cache_insert(bh);
  261. entry = BFIRST(bh);
  262. error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1);
  263. if (error == -EIO)
  264. goto bad_block;
  265. if (error)
  266. goto cleanup;
  267. size = le32_to_cpu(entry->e_value_size);
  268. if (buffer) {
  269. error = -ERANGE;
  270. if (size > buffer_size)
  271. goto cleanup;
  272. memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs),
  273. size);
  274. }
  275. error = size;
  276. cleanup:
  277. brelse(bh);
  278. return error;
  279. }
  280. int
  281. ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
  282. void *buffer, size_t buffer_size)
  283. {
  284. struct ext4_xattr_ibody_header *header;
  285. struct ext4_xattr_entry *entry;
  286. struct ext4_inode *raw_inode;
  287. struct ext4_iloc iloc;
  288. size_t size;
  289. void *end;
  290. int error;
  291. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
  292. return -ENODATA;
  293. error = ext4_get_inode_loc(inode, &iloc);
  294. if (error)
  295. return error;
  296. raw_inode = ext4_raw_inode(&iloc);
  297. header = IHDR(inode, raw_inode);
  298. entry = IFIRST(header);
  299. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  300. error = ext4_xattr_check_names(entry, end);
  301. if (error)
  302. goto cleanup;
  303. error = ext4_xattr_find_entry(&entry, name_index, name,
  304. end - (void *)entry, 0);
  305. if (error)
  306. goto cleanup;
  307. size = le32_to_cpu(entry->e_value_size);
  308. if (buffer) {
  309. error = -ERANGE;
  310. if (size > buffer_size)
  311. goto cleanup;
  312. memcpy(buffer, (void *)IFIRST(header) +
  313. le16_to_cpu(entry->e_value_offs), size);
  314. }
  315. error = size;
  316. cleanup:
  317. brelse(iloc.bh);
  318. return error;
  319. }
  320. /*
  321. * ext4_xattr_get()
  322. *
  323. * Copy an extended attribute into the buffer
  324. * provided, or compute the buffer size required.
  325. * Buffer is NULL to compute the size of the buffer required.
  326. *
  327. * Returns a negative error number on failure, or the number of bytes
  328. * used / required on success.
  329. */
  330. int
  331. ext4_xattr_get(struct inode *inode, int name_index, const char *name,
  332. void *buffer, size_t buffer_size)
  333. {
  334. int error;
  335. down_read(&EXT4_I(inode)->xattr_sem);
  336. error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
  337. buffer_size);
  338. if (error == -ENODATA)
  339. error = ext4_xattr_block_get(inode, name_index, name, buffer,
  340. buffer_size);
  341. up_read(&EXT4_I(inode)->xattr_sem);
  342. return error;
  343. }
  344. static int
  345. ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
  346. char *buffer, size_t buffer_size)
  347. {
  348. size_t rest = buffer_size;
  349. for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
  350. const struct xattr_handler *handler =
  351. ext4_xattr_handler(entry->e_name_index);
  352. if (handler) {
  353. size_t size = handler->list(dentry, buffer, rest,
  354. entry->e_name,
  355. entry->e_name_len,
  356. handler->flags);
  357. if (buffer) {
  358. if (size > rest)
  359. return -ERANGE;
  360. buffer += size;
  361. }
  362. rest -= size;
  363. }
  364. }
  365. return buffer_size - rest;
  366. }
  367. static int
  368. ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
  369. {
  370. struct inode *inode = dentry->d_inode;
  371. struct buffer_head *bh = NULL;
  372. int error;
  373. ea_idebug(inode, "buffer=%p, buffer_size=%ld",
  374. buffer, (long)buffer_size);
  375. error = 0;
  376. if (!EXT4_I(inode)->i_file_acl)
  377. goto cleanup;
  378. ea_idebug(inode, "reading block %llu",
  379. (unsigned long long)EXT4_I(inode)->i_file_acl);
  380. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  381. error = -EIO;
  382. if (!bh)
  383. goto cleanup;
  384. ea_bdebug(bh, "b_count=%d, refcount=%d",
  385. atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
  386. if (ext4_xattr_check_block(inode, bh)) {
  387. EXT4_ERROR_INODE(inode, "bad block %llu",
  388. EXT4_I(inode)->i_file_acl);
  389. error = -EIO;
  390. goto cleanup;
  391. }
  392. ext4_xattr_cache_insert(bh);
  393. error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
  394. cleanup:
  395. brelse(bh);
  396. return error;
  397. }
  398. static int
  399. ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
  400. {
  401. struct inode *inode = dentry->d_inode;
  402. struct ext4_xattr_ibody_header *header;
  403. struct ext4_inode *raw_inode;
  404. struct ext4_iloc iloc;
  405. void *end;
  406. int error;
  407. if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
  408. return 0;
  409. error = ext4_get_inode_loc(inode, &iloc);
  410. if (error)
  411. return error;
  412. raw_inode = ext4_raw_inode(&iloc);
  413. header = IHDR(inode, raw_inode);
  414. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  415. error = ext4_xattr_check_names(IFIRST(header), end);
  416. if (error)
  417. goto cleanup;
  418. error = ext4_xattr_list_entries(dentry, IFIRST(header),
  419. buffer, buffer_size);
  420. cleanup:
  421. brelse(iloc.bh);
  422. return error;
  423. }
  424. /*
  425. * ext4_xattr_list()
  426. *
  427. * Copy a list of attribute names into the buffer
  428. * provided, or compute the buffer size required.
  429. * Buffer is NULL to compute the size of the buffer required.
  430. *
  431. * Returns a negative error number on failure, or the number of bytes
  432. * used / required on success.
  433. */
  434. static int
  435. ext4_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
  436. {
  437. int ret, ret2;
  438. down_read(&EXT4_I(dentry->d_inode)->xattr_sem);
  439. ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
  440. if (ret < 0)
  441. goto errout;
  442. if (buffer) {
  443. buffer += ret;
  444. buffer_size -= ret;
  445. }
  446. ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
  447. if (ret < 0)
  448. goto errout;
  449. ret += ret2;
  450. errout:
  451. up_read(&EXT4_I(dentry->d_inode)->xattr_sem);
  452. return ret;
  453. }
  454. /*
  455. * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
  456. * not set, set it.
  457. */
  458. static void ext4_xattr_update_super_block(handle_t *handle,
  459. struct super_block *sb)
  460. {
  461. if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR))
  462. return;
  463. if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
  464. EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR);
  465. ext4_handle_dirty_super(handle, sb);
  466. }
  467. }
  468. /*
  469. * Release the xattr block BH: If the reference count is > 1, decrement it;
  470. * otherwise free the block.
  471. */
  472. static void
  473. ext4_xattr_release_block(handle_t *handle, struct inode *inode,
  474. struct buffer_head *bh)
  475. {
  476. struct mb_cache_entry *ce = NULL;
  477. int error = 0;
  478. ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr);
  479. error = ext4_journal_get_write_access(handle, bh);
  480. if (error)
  481. goto out;
  482. lock_buffer(bh);
  483. if (BHDR(bh)->h_refcount == cpu_to_le32(1)) {
  484. ea_bdebug(bh, "refcount now=0; freeing");
  485. if (ce)
  486. mb_cache_entry_free(ce);
  487. get_bh(bh);
  488. unlock_buffer(bh);
  489. ext4_free_blocks(handle, inode, bh, 0, 1,
  490. EXT4_FREE_BLOCKS_METADATA |
  491. EXT4_FREE_BLOCKS_FORGET);
  492. } else {
  493. le32_add_cpu(&BHDR(bh)->h_refcount, -1);
  494. if (ce)
  495. mb_cache_entry_release(ce);
  496. /*
  497. * Beware of this ugliness: Releasing of xattr block references
  498. * from different inodes can race and so we have to protect
  499. * from a race where someone else frees the block (and releases
  500. * its journal_head) before we are done dirtying the buffer. In
  501. * nojournal mode this race is harmless and we actually cannot
  502. * call ext4_handle_dirty_xattr_block() with locked buffer as
  503. * that function can call sync_dirty_buffer() so for that case
  504. * we handle the dirtying after unlocking the buffer.
  505. */
  506. if (ext4_handle_valid(handle))
  507. error = ext4_handle_dirty_xattr_block(handle, inode,
  508. bh);
  509. unlock_buffer(bh);
  510. if (!ext4_handle_valid(handle))
  511. error = ext4_handle_dirty_xattr_block(handle, inode,
  512. bh);
  513. if (IS_SYNC(inode))
  514. ext4_handle_sync(handle);
  515. dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
  516. ea_bdebug(bh, "refcount now=%d; releasing",
  517. le32_to_cpu(BHDR(bh)->h_refcount));
  518. }
  519. out:
  520. ext4_std_error(inode->i_sb, error);
  521. return;
  522. }
  523. /*
  524. * Find the available free space for EAs. This also returns the total number of
  525. * bytes used by EA entries.
  526. */
  527. static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
  528. size_t *min_offs, void *base, int *total)
  529. {
  530. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  531. *total += EXT4_XATTR_LEN(last->e_name_len);
  532. if (!last->e_value_block && last->e_value_size) {
  533. size_t offs = le16_to_cpu(last->e_value_offs);
  534. if (offs < *min_offs)
  535. *min_offs = offs;
  536. }
  537. }
  538. return (*min_offs - ((void *)last - base) - sizeof(__u32));
  539. }
  540. static int
  541. ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
  542. {
  543. struct ext4_xattr_entry *last;
  544. size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
  545. /* Compute min_offs and last. */
  546. last = s->first;
  547. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  548. if (!last->e_value_block && last->e_value_size) {
  549. size_t offs = le16_to_cpu(last->e_value_offs);
  550. if (offs < min_offs)
  551. min_offs = offs;
  552. }
  553. }
  554. free = min_offs - ((void *)last - s->base) - sizeof(__u32);
  555. if (!s->not_found) {
  556. if (!s->here->e_value_block && s->here->e_value_size) {
  557. size_t size = le32_to_cpu(s->here->e_value_size);
  558. free += EXT4_XATTR_SIZE(size);
  559. }
  560. free += EXT4_XATTR_LEN(name_len);
  561. }
  562. if (i->value) {
  563. if (free < EXT4_XATTR_SIZE(i->value_len) ||
  564. free < EXT4_XATTR_LEN(name_len) +
  565. EXT4_XATTR_SIZE(i->value_len))
  566. return -ENOSPC;
  567. }
  568. if (i->value && s->not_found) {
  569. /* Insert the new name. */
  570. size_t size = EXT4_XATTR_LEN(name_len);
  571. size_t rest = (void *)last - (void *)s->here + sizeof(__u32);
  572. memmove((void *)s->here + size, s->here, rest);
  573. memset(s->here, 0, size);
  574. s->here->e_name_index = i->name_index;
  575. s->here->e_name_len = name_len;
  576. memcpy(s->here->e_name, i->name, name_len);
  577. } else {
  578. if (!s->here->e_value_block && s->here->e_value_size) {
  579. void *first_val = s->base + min_offs;
  580. size_t offs = le16_to_cpu(s->here->e_value_offs);
  581. void *val = s->base + offs;
  582. size_t size = EXT4_XATTR_SIZE(
  583. le32_to_cpu(s->here->e_value_size));
  584. if (i->value && size == EXT4_XATTR_SIZE(i->value_len)) {
  585. /* The old and the new value have the same
  586. size. Just replace. */
  587. s->here->e_value_size =
  588. cpu_to_le32(i->value_len);
  589. if (i->value == EXT4_ZERO_XATTR_VALUE) {
  590. memset(val, 0, size);
  591. } else {
  592. /* Clear pad bytes first. */
  593. memset(val + size - EXT4_XATTR_PAD, 0,
  594. EXT4_XATTR_PAD);
  595. memcpy(val, i->value, i->value_len);
  596. }
  597. return 0;
  598. }
  599. /* Remove the old value. */
  600. memmove(first_val + size, first_val, val - first_val);
  601. memset(first_val, 0, size);
  602. s->here->e_value_size = 0;
  603. s->here->e_value_offs = 0;
  604. min_offs += size;
  605. /* Adjust all value offsets. */
  606. last = s->first;
  607. while (!IS_LAST_ENTRY(last)) {
  608. size_t o = le16_to_cpu(last->e_value_offs);
  609. if (!last->e_value_block &&
  610. last->e_value_size && o < offs)
  611. last->e_value_offs =
  612. cpu_to_le16(o + size);
  613. last = EXT4_XATTR_NEXT(last);
  614. }
  615. }
  616. if (!i->value) {
  617. /* Remove the old name. */
  618. size_t size = EXT4_XATTR_LEN(name_len);
  619. last = ENTRY((void *)last - size);
  620. memmove(s->here, (void *)s->here + size,
  621. (void *)last - (void *)s->here + sizeof(__u32));
  622. memset(last, 0, size);
  623. }
  624. }
  625. if (i->value) {
  626. /* Insert the new value. */
  627. s->here->e_value_size = cpu_to_le32(i->value_len);
  628. if (i->value_len) {
  629. size_t size = EXT4_XATTR_SIZE(i->value_len);
  630. void *val = s->base + min_offs - size;
  631. s->here->e_value_offs = cpu_to_le16(min_offs - size);
  632. if (i->value == EXT4_ZERO_XATTR_VALUE) {
  633. memset(val, 0, size);
  634. } else {
  635. /* Clear the pad bytes first. */
  636. memset(val + size - EXT4_XATTR_PAD, 0,
  637. EXT4_XATTR_PAD);
  638. memcpy(val, i->value, i->value_len);
  639. }
  640. }
  641. }
  642. return 0;
  643. }
  644. struct ext4_xattr_block_find {
  645. struct ext4_xattr_search s;
  646. struct buffer_head *bh;
  647. };
  648. static int
  649. ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
  650. struct ext4_xattr_block_find *bs)
  651. {
  652. struct super_block *sb = inode->i_sb;
  653. int error;
  654. ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
  655. i->name_index, i->name, i->value, (long)i->value_len);
  656. if (EXT4_I(inode)->i_file_acl) {
  657. /* The inode already has an extended attribute block. */
  658. bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
  659. error = -EIO;
  660. if (!bs->bh)
  661. goto cleanup;
  662. ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
  663. atomic_read(&(bs->bh->b_count)),
  664. le32_to_cpu(BHDR(bs->bh)->h_refcount));
  665. if (ext4_xattr_check_block(inode, bs->bh)) {
  666. EXT4_ERROR_INODE(inode, "bad block %llu",
  667. EXT4_I(inode)->i_file_acl);
  668. error = -EIO;
  669. goto cleanup;
  670. }
  671. /* Find the named attribute. */
  672. bs->s.base = BHDR(bs->bh);
  673. bs->s.first = BFIRST(bs->bh);
  674. bs->s.end = bs->bh->b_data + bs->bh->b_size;
  675. bs->s.here = bs->s.first;
  676. error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
  677. i->name, bs->bh->b_size, 1);
  678. if (error && error != -ENODATA)
  679. goto cleanup;
  680. bs->s.not_found = error;
  681. }
  682. error = 0;
  683. cleanup:
  684. return error;
  685. }
  686. static int
  687. ext4_xattr_block_set(handle_t *handle, struct inode *inode,
  688. struct ext4_xattr_info *i,
  689. struct ext4_xattr_block_find *bs)
  690. {
  691. struct super_block *sb = inode->i_sb;
  692. struct buffer_head *new_bh = NULL;
  693. struct ext4_xattr_search *s = &bs->s;
  694. struct mb_cache_entry *ce = NULL;
  695. int error = 0;
  696. #define header(x) ((struct ext4_xattr_header *)(x))
  697. if (i->value && i->value_len > sb->s_blocksize)
  698. return -ENOSPC;
  699. if (s->base) {
  700. ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev,
  701. bs->bh->b_blocknr);
  702. error = ext4_journal_get_write_access(handle, bs->bh);
  703. if (error)
  704. goto cleanup;
  705. lock_buffer(bs->bh);
  706. if (header(s->base)->h_refcount == cpu_to_le32(1)) {
  707. if (ce) {
  708. mb_cache_entry_free(ce);
  709. ce = NULL;
  710. }
  711. ea_bdebug(bs->bh, "modifying in-place");
  712. error = ext4_xattr_set_entry(i, s);
  713. if (!error) {
  714. if (!IS_LAST_ENTRY(s->first))
  715. ext4_xattr_rehash(header(s->base),
  716. s->here);
  717. ext4_xattr_cache_insert(bs->bh);
  718. }
  719. unlock_buffer(bs->bh);
  720. if (error == -EIO)
  721. goto bad_block;
  722. if (!error)
  723. error = ext4_handle_dirty_xattr_block(handle,
  724. inode,
  725. bs->bh);
  726. if (error)
  727. goto cleanup;
  728. goto inserted;
  729. } else {
  730. int offset = (char *)s->here - bs->bh->b_data;
  731. unlock_buffer(bs->bh);
  732. if (ce) {
  733. mb_cache_entry_release(ce);
  734. ce = NULL;
  735. }
  736. ea_bdebug(bs->bh, "cloning");
  737. s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
  738. error = -ENOMEM;
  739. if (s->base == NULL)
  740. goto cleanup;
  741. memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
  742. s->first = ENTRY(header(s->base)+1);
  743. header(s->base)->h_refcount = cpu_to_le32(1);
  744. s->here = ENTRY(s->base + offset);
  745. s->end = s->base + bs->bh->b_size;
  746. }
  747. } else {
  748. /* Allocate a buffer where we construct the new block. */
  749. s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
  750. /* assert(header == s->base) */
  751. error = -ENOMEM;
  752. if (s->base == NULL)
  753. goto cleanup;
  754. header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  755. header(s->base)->h_blocks = cpu_to_le32(1);
  756. header(s->base)->h_refcount = cpu_to_le32(1);
  757. s->first = ENTRY(header(s->base)+1);
  758. s->here = ENTRY(header(s->base)+1);
  759. s->end = s->base + sb->s_blocksize;
  760. }
  761. error = ext4_xattr_set_entry(i, s);
  762. if (error == -EIO)
  763. goto bad_block;
  764. if (error)
  765. goto cleanup;
  766. if (!IS_LAST_ENTRY(s->first))
  767. ext4_xattr_rehash(header(s->base), s->here);
  768. inserted:
  769. if (!IS_LAST_ENTRY(s->first)) {
  770. new_bh = ext4_xattr_cache_find(inode, header(s->base), &ce);
  771. if (new_bh) {
  772. /* We found an identical block in the cache. */
  773. if (new_bh == bs->bh)
  774. ea_bdebug(new_bh, "keeping");
  775. else {
  776. /* The old block is released after updating
  777. the inode. */
  778. error = dquot_alloc_block(inode,
  779. EXT4_C2B(EXT4_SB(sb), 1));
  780. if (error)
  781. goto cleanup;
  782. error = ext4_journal_get_write_access(handle,
  783. new_bh);
  784. if (error)
  785. goto cleanup_dquot;
  786. lock_buffer(new_bh);
  787. le32_add_cpu(&BHDR(new_bh)->h_refcount, 1);
  788. ea_bdebug(new_bh, "reusing; refcount now=%d",
  789. le32_to_cpu(BHDR(new_bh)->h_refcount));
  790. unlock_buffer(new_bh);
  791. error = ext4_handle_dirty_xattr_block(handle,
  792. inode,
  793. new_bh);
  794. if (error)
  795. goto cleanup_dquot;
  796. }
  797. mb_cache_entry_release(ce);
  798. ce = NULL;
  799. } else if (bs->bh && s->base == bs->bh->b_data) {
  800. /* We were modifying this block in-place. */
  801. ea_bdebug(bs->bh, "keeping this block");
  802. new_bh = bs->bh;
  803. get_bh(new_bh);
  804. } else {
  805. /* We need to allocate a new block */
  806. ext4_fsblk_t goal, block;
  807. goal = ext4_group_first_block_no(sb,
  808. EXT4_I(inode)->i_block_group);
  809. /* non-extent files can't have physical blocks past 2^32 */
  810. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  811. goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
  812. /*
  813. * take i_data_sem because we will test
  814. * i_delalloc_reserved_flag in ext4_mb_new_blocks
  815. */
  816. down_read((&EXT4_I(inode)->i_data_sem));
  817. block = ext4_new_meta_blocks(handle, inode, goal, 0,
  818. NULL, &error);
  819. up_read((&EXT4_I(inode)->i_data_sem));
  820. if (error)
  821. goto cleanup;
  822. if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
  823. BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
  824. ea_idebug(inode, "creating block %llu",
  825. (unsigned long long)block);
  826. new_bh = sb_getblk(sb, block);
  827. if (!new_bh) {
  828. error = -ENOMEM;
  829. getblk_failed:
  830. ext4_free_blocks(handle, inode, NULL, block, 1,
  831. EXT4_FREE_BLOCKS_METADATA);
  832. goto cleanup;
  833. }
  834. lock_buffer(new_bh);
  835. error = ext4_journal_get_create_access(handle, new_bh);
  836. if (error) {
  837. unlock_buffer(new_bh);
  838. error = -EIO;
  839. goto getblk_failed;
  840. }
  841. memcpy(new_bh->b_data, s->base, new_bh->b_size);
  842. set_buffer_uptodate(new_bh);
  843. unlock_buffer(new_bh);
  844. ext4_xattr_cache_insert(new_bh);
  845. error = ext4_handle_dirty_xattr_block(handle,
  846. inode, new_bh);
  847. if (error)
  848. goto cleanup;
  849. }
  850. }
  851. /* Update the inode. */
  852. EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
  853. /* Drop the previous xattr block. */
  854. if (bs->bh && bs->bh != new_bh)
  855. ext4_xattr_release_block(handle, inode, bs->bh);
  856. error = 0;
  857. cleanup:
  858. if (ce)
  859. mb_cache_entry_release(ce);
  860. brelse(new_bh);
  861. if (!(bs->bh && s->base == bs->bh->b_data))
  862. kfree(s->base);
  863. return error;
  864. cleanup_dquot:
  865. dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
  866. goto cleanup;
  867. bad_block:
  868. EXT4_ERROR_INODE(inode, "bad block %llu",
  869. EXT4_I(inode)->i_file_acl);
  870. goto cleanup;
  871. #undef header
  872. }
  873. int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
  874. struct ext4_xattr_ibody_find *is)
  875. {
  876. struct ext4_xattr_ibody_header *header;
  877. struct ext4_inode *raw_inode;
  878. int error;
  879. if (EXT4_I(inode)->i_extra_isize == 0)
  880. return 0;
  881. raw_inode = ext4_raw_inode(&is->iloc);
  882. header = IHDR(inode, raw_inode);
  883. is->s.base = is->s.first = IFIRST(header);
  884. is->s.here = is->s.first;
  885. is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  886. if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
  887. error = ext4_xattr_check_names(IFIRST(header), is->s.end);
  888. if (error)
  889. return error;
  890. /* Find the named attribute. */
  891. error = ext4_xattr_find_entry(&is->s.here, i->name_index,
  892. i->name, is->s.end -
  893. (void *)is->s.base, 0);
  894. if (error && error != -ENODATA)
  895. return error;
  896. is->s.not_found = error;
  897. }
  898. return 0;
  899. }
  900. int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
  901. struct ext4_xattr_info *i,
  902. struct ext4_xattr_ibody_find *is)
  903. {
  904. struct ext4_xattr_ibody_header *header;
  905. struct ext4_xattr_search *s = &is->s;
  906. int error;
  907. if (EXT4_I(inode)->i_extra_isize == 0)
  908. return -ENOSPC;
  909. error = ext4_xattr_set_entry(i, s);
  910. if (error) {
  911. if (error == -ENOSPC &&
  912. ext4_has_inline_data(inode)) {
  913. error = ext4_try_to_evict_inline_data(handle, inode,
  914. EXT4_XATTR_LEN(strlen(i->name) +
  915. EXT4_XATTR_SIZE(i->value_len)));
  916. if (error)
  917. return error;
  918. error = ext4_xattr_ibody_find(inode, i, is);
  919. if (error)
  920. return error;
  921. error = ext4_xattr_set_entry(i, s);
  922. }
  923. if (error)
  924. return error;
  925. }
  926. header = IHDR(inode, ext4_raw_inode(&is->iloc));
  927. if (!IS_LAST_ENTRY(s->first)) {
  928. header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  929. ext4_set_inode_state(inode, EXT4_STATE_XATTR);
  930. } else {
  931. header->h_magic = cpu_to_le32(0);
  932. ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
  933. }
  934. return 0;
  935. }
  936. static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
  937. struct ext4_xattr_info *i,
  938. struct ext4_xattr_ibody_find *is)
  939. {
  940. struct ext4_xattr_ibody_header *header;
  941. struct ext4_xattr_search *s = &is->s;
  942. int error;
  943. if (EXT4_I(inode)->i_extra_isize == 0)
  944. return -ENOSPC;
  945. error = ext4_xattr_set_entry(i, s);
  946. if (error)
  947. return error;
  948. header = IHDR(inode, ext4_raw_inode(&is->iloc));
  949. if (!IS_LAST_ENTRY(s->first)) {
  950. header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
  951. ext4_set_inode_state(inode, EXT4_STATE_XATTR);
  952. } else {
  953. header->h_magic = cpu_to_le32(0);
  954. ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
  955. }
  956. return 0;
  957. }
  958. /*
  959. * ext4_xattr_set_handle()
  960. *
  961. * Create, replace or remove an extended attribute for this inode. Value
  962. * is NULL to remove an existing extended attribute, and non-NULL to
  963. * either replace an existing extended attribute, or create a new extended
  964. * attribute. The flags XATTR_REPLACE and XATTR_CREATE
  965. * specify that an extended attribute must exist and must not exist
  966. * previous to the call, respectively.
  967. *
  968. * Returns 0, or a negative error number on failure.
  969. */
  970. int
  971. ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
  972. const char *name, const void *value, size_t value_len,
  973. int flags)
  974. {
  975. struct ext4_xattr_info i = {
  976. .name_index = name_index,
  977. .name = name,
  978. .value = value,
  979. .value_len = value_len,
  980. };
  981. struct ext4_xattr_ibody_find is = {
  982. .s = { .not_found = -ENODATA, },
  983. };
  984. struct ext4_xattr_block_find bs = {
  985. .s = { .not_found = -ENODATA, },
  986. };
  987. unsigned long no_expand;
  988. int error;
  989. if (!name)
  990. return -EINVAL;
  991. if (strlen(name) > 255)
  992. return -ERANGE;
  993. down_write(&EXT4_I(inode)->xattr_sem);
  994. no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
  995. ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
  996. error = ext4_reserve_inode_write(handle, inode, &is.iloc);
  997. if (error)
  998. goto cleanup;
  999. if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
  1000. struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
  1001. memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
  1002. ext4_clear_inode_state(inode, EXT4_STATE_NEW);
  1003. }
  1004. error = ext4_xattr_ibody_find(inode, &i, &is);
  1005. if (error)
  1006. goto cleanup;
  1007. if (is.s.not_found)
  1008. error = ext4_xattr_block_find(inode, &i, &bs);
  1009. if (error)
  1010. goto cleanup;
  1011. if (is.s.not_found && bs.s.not_found) {
  1012. error = -ENODATA;
  1013. if (flags & XATTR_REPLACE)
  1014. goto cleanup;
  1015. error = 0;
  1016. if (!value)
  1017. goto cleanup;
  1018. } else {
  1019. error = -EEXIST;
  1020. if (flags & XATTR_CREATE)
  1021. goto cleanup;
  1022. }
  1023. if (!value) {
  1024. if (!is.s.not_found)
  1025. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  1026. else if (!bs.s.not_found)
  1027. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  1028. } else {
  1029. error = ext4_xattr_ibody_set(handle, inode, &i, &is);
  1030. if (!error && !bs.s.not_found) {
  1031. i.value = NULL;
  1032. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  1033. } else if (error == -ENOSPC) {
  1034. if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
  1035. error = ext4_xattr_block_find(inode, &i, &bs);
  1036. if (error)
  1037. goto cleanup;
  1038. }
  1039. error = ext4_xattr_block_set(handle, inode, &i, &bs);
  1040. if (error)
  1041. goto cleanup;
  1042. if (!is.s.not_found) {
  1043. i.value = NULL;
  1044. error = ext4_xattr_ibody_set(handle, inode, &i,
  1045. &is);
  1046. }
  1047. }
  1048. }
  1049. if (!error) {
  1050. ext4_xattr_update_super_block(handle, inode->i_sb);
  1051. inode->i_ctime = ext4_current_time(inode);
  1052. if (!value)
  1053. ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
  1054. error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
  1055. /*
  1056. * The bh is consumed by ext4_mark_iloc_dirty, even with
  1057. * error != 0.
  1058. */
  1059. is.iloc.bh = NULL;
  1060. if (IS_SYNC(inode))
  1061. ext4_handle_sync(handle);
  1062. }
  1063. cleanup:
  1064. brelse(is.iloc.bh);
  1065. brelse(bs.bh);
  1066. if (no_expand == 0)
  1067. ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
  1068. up_write(&EXT4_I(inode)->xattr_sem);
  1069. return error;
  1070. }
  1071. /*
  1072. * ext4_xattr_set()
  1073. *
  1074. * Like ext4_xattr_set_handle, but start from an inode. This extended
  1075. * attribute modification is a filesystem transaction by itself.
  1076. *
  1077. * Returns 0, or a negative error number on failure.
  1078. */
  1079. int
  1080. ext4_xattr_set(struct inode *inode, int name_index, const char *name,
  1081. const void *value, size_t value_len, int flags)
  1082. {
  1083. handle_t *handle;
  1084. int error, retries = 0;
  1085. int credits = EXT4_DATA_TRANS_BLOCKS(inode->i_sb);
  1086. retry:
  1087. /*
  1088. * In case of inline data, we may push out the data to a block,
  1089. * So reserve the journal space first.
  1090. */
  1091. if (ext4_has_inline_data(inode))
  1092. credits += ext4_writepage_trans_blocks(inode) + 1;
  1093. handle = ext4_journal_start(inode, credits);
  1094. if (IS_ERR(handle)) {
  1095. error = PTR_ERR(handle);
  1096. } else {
  1097. int error2;
  1098. error = ext4_xattr_set_handle(handle, inode, name_index, name,
  1099. value, value_len, flags);
  1100. error2 = ext4_journal_stop(handle);
  1101. if (error == -ENOSPC &&
  1102. ext4_should_retry_alloc(inode->i_sb, &retries))
  1103. goto retry;
  1104. if (error == 0)
  1105. error = error2;
  1106. }
  1107. return error;
  1108. }
  1109. /*
  1110. * Shift the EA entries in the inode to create space for the increased
  1111. * i_extra_isize.
  1112. */
  1113. static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
  1114. int value_offs_shift, void *to,
  1115. void *from, size_t n, int blocksize)
  1116. {
  1117. struct ext4_xattr_entry *last = entry;
  1118. int new_offs;
  1119. /* Adjust the value offsets of the entries */
  1120. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  1121. if (!last->e_value_block && last->e_value_size) {
  1122. new_offs = le16_to_cpu(last->e_value_offs) +
  1123. value_offs_shift;
  1124. BUG_ON(new_offs + le32_to_cpu(last->e_value_size)
  1125. > blocksize);
  1126. last->e_value_offs = cpu_to_le16(new_offs);
  1127. }
  1128. }
  1129. /* Shift the entries by n bytes */
  1130. memmove(to, from, n);
  1131. }
  1132. /*
  1133. * Expand an inode by new_extra_isize bytes when EAs are present.
  1134. * Returns 0 on success or negative error number on failure.
  1135. */
  1136. int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
  1137. struct ext4_inode *raw_inode, handle_t *handle)
  1138. {
  1139. struct ext4_xattr_ibody_header *header;
  1140. struct ext4_xattr_entry *entry, *last, *first;
  1141. struct buffer_head *bh = NULL;
  1142. struct ext4_xattr_ibody_find *is = NULL;
  1143. struct ext4_xattr_block_find *bs = NULL;
  1144. char *buffer = NULL, *b_entry_name = NULL;
  1145. size_t min_offs, free;
  1146. int total_ino, total_blk;
  1147. void *base, *start, *end;
  1148. int extra_isize = 0, error = 0, tried_min_extra_isize = 0;
  1149. int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
  1150. down_write(&EXT4_I(inode)->xattr_sem);
  1151. retry:
  1152. if (EXT4_I(inode)->i_extra_isize >= new_extra_isize) {
  1153. up_write(&EXT4_I(inode)->xattr_sem);
  1154. return 0;
  1155. }
  1156. header = IHDR(inode, raw_inode);
  1157. entry = IFIRST(header);
  1158. /*
  1159. * Check if enough free space is available in the inode to shift the
  1160. * entries ahead by new_extra_isize.
  1161. */
  1162. base = start = entry;
  1163. end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
  1164. min_offs = end - base;
  1165. last = entry;
  1166. total_ino = sizeof(struct ext4_xattr_ibody_header);
  1167. free = ext4_xattr_free_space(last, &min_offs, base, &total_ino);
  1168. if (free >= new_extra_isize) {
  1169. entry = IFIRST(header);
  1170. ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize
  1171. - new_extra_isize, (void *)raw_inode +
  1172. EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
  1173. (void *)header, total_ino,
  1174. inode->i_sb->s_blocksize);
  1175. EXT4_I(inode)->i_extra_isize = new_extra_isize;
  1176. error = 0;
  1177. goto cleanup;
  1178. }
  1179. /*
  1180. * Enough free space isn't available in the inode, check if
  1181. * EA block can hold new_extra_isize bytes.
  1182. */
  1183. if (EXT4_I(inode)->i_file_acl) {
  1184. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  1185. error = -EIO;
  1186. if (!bh)
  1187. goto cleanup;
  1188. if (ext4_xattr_check_block(inode, bh)) {
  1189. EXT4_ERROR_INODE(inode, "bad block %llu",
  1190. EXT4_I(inode)->i_file_acl);
  1191. error = -EIO;
  1192. goto cleanup;
  1193. }
  1194. base = BHDR(bh);
  1195. first = BFIRST(bh);
  1196. end = bh->b_data + bh->b_size;
  1197. min_offs = end - base;
  1198. free = ext4_xattr_free_space(first, &min_offs, base,
  1199. &total_blk);
  1200. if (free < new_extra_isize) {
  1201. if (!tried_min_extra_isize && s_min_extra_isize) {
  1202. tried_min_extra_isize++;
  1203. new_extra_isize = s_min_extra_isize;
  1204. brelse(bh);
  1205. goto retry;
  1206. }
  1207. error = -1;
  1208. goto cleanup;
  1209. }
  1210. } else {
  1211. free = inode->i_sb->s_blocksize;
  1212. }
  1213. while (new_extra_isize > 0) {
  1214. size_t offs, size, entry_size;
  1215. struct ext4_xattr_entry *small_entry = NULL;
  1216. struct ext4_xattr_info i = {
  1217. .value = NULL,
  1218. .value_len = 0,
  1219. };
  1220. unsigned int total_size; /* EA entry size + value size */
  1221. unsigned int shift_bytes; /* No. of bytes to shift EAs by? */
  1222. unsigned int min_total_size = ~0U;
  1223. is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
  1224. bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
  1225. if (!is || !bs) {
  1226. error = -ENOMEM;
  1227. goto cleanup;
  1228. }
  1229. is->s.not_found = -ENODATA;
  1230. bs->s.not_found = -ENODATA;
  1231. is->iloc.bh = NULL;
  1232. bs->bh = NULL;
  1233. last = IFIRST(header);
  1234. /* Find the entry best suited to be pushed into EA block */
  1235. entry = NULL;
  1236. for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
  1237. total_size =
  1238. EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
  1239. EXT4_XATTR_LEN(last->e_name_len);
  1240. if (total_size <= free && total_size < min_total_size) {
  1241. if (total_size < new_extra_isize) {
  1242. small_entry = last;
  1243. } else {
  1244. entry = last;
  1245. min_total_size = total_size;
  1246. }
  1247. }
  1248. }
  1249. if (entry == NULL) {
  1250. if (small_entry) {
  1251. entry = small_entry;
  1252. } else {
  1253. if (!tried_min_extra_isize &&
  1254. s_min_extra_isize) {
  1255. tried_min_extra_isize++;
  1256. new_extra_isize = s_min_extra_isize;
  1257. kfree(is); is = NULL;
  1258. kfree(bs); bs = NULL;
  1259. brelse(bh);
  1260. goto retry;
  1261. }
  1262. error = -1;
  1263. goto cleanup;
  1264. }
  1265. }
  1266. offs = le16_to_cpu(entry->e_value_offs);
  1267. size = le32_to_cpu(entry->e_value_size);
  1268. entry_size = EXT4_XATTR_LEN(entry->e_name_len);
  1269. i.name_index = entry->e_name_index,
  1270. buffer = kmalloc(EXT4_XATTR_SIZE(size), GFP_NOFS);
  1271. b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
  1272. if (!buffer || !b_entry_name) {
  1273. error = -ENOMEM;
  1274. goto cleanup;
  1275. }
  1276. /* Save the entry name and the entry value */
  1277. memcpy(buffer, (void *)IFIRST(header) + offs,
  1278. EXT4_XATTR_SIZE(size));
  1279. memcpy(b_entry_name, entry->e_name, entry->e_name_len);
  1280. b_entry_name[entry->e_name_len] = '\0';
  1281. i.name = b_entry_name;
  1282. error = ext4_get_inode_loc(inode, &is->iloc);
  1283. if (error)
  1284. goto cleanup;
  1285. error = ext4_xattr_ibody_find(inode, &i, is);
  1286. if (error)
  1287. goto cleanup;
  1288. /* Remove the chosen entry from the inode */
  1289. error = ext4_xattr_ibody_set(handle, inode, &i, is);
  1290. if (error)
  1291. goto cleanup;
  1292. entry = IFIRST(header);
  1293. if (entry_size + EXT4_XATTR_SIZE(size) >= new_extra_isize)
  1294. shift_bytes = new_extra_isize;
  1295. else
  1296. shift_bytes = entry_size + size;
  1297. /* Adjust the offsets and shift the remaining entries ahead */
  1298. ext4_xattr_shift_entries(entry, EXT4_I(inode)->i_extra_isize -
  1299. shift_bytes, (void *)raw_inode +
  1300. EXT4_GOOD_OLD_INODE_SIZE + extra_isize + shift_bytes,
  1301. (void *)header, total_ino - entry_size,
  1302. inode->i_sb->s_blocksize);
  1303. extra_isize += shift_bytes;
  1304. new_extra_isize -= shift_bytes;
  1305. EXT4_I(inode)->i_extra_isize = extra_isize;
  1306. i.name = b_entry_name;
  1307. i.value = buffer;
  1308. i.value_len = size;
  1309. error = ext4_xattr_block_find(inode, &i, bs);
  1310. if (error)
  1311. goto cleanup;
  1312. /* Add entry which was removed from the inode into the block */
  1313. error = ext4_xattr_block_set(handle, inode, &i, bs);
  1314. if (error)
  1315. goto cleanup;
  1316. kfree(b_entry_name);
  1317. kfree(buffer);
  1318. b_entry_name = NULL;
  1319. buffer = NULL;
  1320. brelse(is->iloc.bh);
  1321. kfree(is);
  1322. kfree(bs);
  1323. }
  1324. brelse(bh);
  1325. up_write(&EXT4_I(inode)->xattr_sem);
  1326. return 0;
  1327. cleanup:
  1328. kfree(b_entry_name);
  1329. kfree(buffer);
  1330. if (is)
  1331. brelse(is->iloc.bh);
  1332. kfree(is);
  1333. kfree(bs);
  1334. brelse(bh);
  1335. up_write(&EXT4_I(inode)->xattr_sem);
  1336. return error;
  1337. }
  1338. /*
  1339. * ext4_xattr_delete_inode()
  1340. *
  1341. * Free extended attribute resources associated with this inode. This
  1342. * is called immediately before an inode is freed. We have exclusive
  1343. * access to the inode.
  1344. */
  1345. void
  1346. ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
  1347. {
  1348. struct buffer_head *bh = NULL;
  1349. if (!EXT4_I(inode)->i_file_acl)
  1350. goto cleanup;
  1351. bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
  1352. if (!bh) {
  1353. EXT4_ERROR_INODE(inode, "block %llu read error",
  1354. EXT4_I(inode)->i_file_acl);
  1355. goto cleanup;
  1356. }
  1357. if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
  1358. BHDR(bh)->h_blocks != cpu_to_le32(1)) {
  1359. EXT4_ERROR_INODE(inode, "bad block %llu",
  1360. EXT4_I(inode)->i_file_acl);
  1361. goto cleanup;
  1362. }
  1363. ext4_xattr_release_block(handle, inode, bh);
  1364. EXT4_I(inode)->i_file_acl = 0;
  1365. cleanup:
  1366. brelse(bh);
  1367. }
  1368. /*
  1369. * ext4_xattr_put_super()
  1370. *
  1371. * This is called when a file system is unmounted.
  1372. */
  1373. void
  1374. ext4_xattr_put_super(struct super_block *sb)
  1375. {
  1376. mb_cache_shrink(sb->s_bdev);
  1377. }
  1378. /*
  1379. * ext4_xattr_cache_insert()
  1380. *
  1381. * Create a new entry in the extended attribute cache, and insert
  1382. * it unless such an entry is already in the cache.
  1383. *
  1384. * Returns 0, or a negative error number on failure.
  1385. */
  1386. static void
  1387. ext4_xattr_cache_insert(struct buffer_head *bh)
  1388. {
  1389. __u32 hash = le32_to_cpu(BHDR(bh)->h_hash);
  1390. struct mb_cache_entry *ce;
  1391. int error;
  1392. ce = mb_cache_entry_alloc(ext4_xattr_cache, GFP_NOFS);
  1393. if (!ce) {
  1394. ea_bdebug(bh, "out of memory");
  1395. return;
  1396. }
  1397. error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, hash);
  1398. if (error) {
  1399. mb_cache_entry_free(ce);
  1400. if (error == -EBUSY) {
  1401. ea_bdebug(bh, "already in cache");
  1402. error = 0;
  1403. }
  1404. } else {
  1405. ea_bdebug(bh, "inserting [%x]", (int)hash);
  1406. mb_cache_entry_release(ce);
  1407. }
  1408. }
  1409. /*
  1410. * ext4_xattr_cmp()
  1411. *
  1412. * Compare two extended attribute blocks for equality.
  1413. *
  1414. * Returns 0 if the blocks are equal, 1 if they differ, and
  1415. * a negative error number on errors.
  1416. */
  1417. static int
  1418. ext4_xattr_cmp(struct ext4_xattr_header *header1,
  1419. struct ext4_xattr_header *header2)
  1420. {
  1421. struct ext4_xattr_entry *entry1, *entry2;
  1422. entry1 = ENTRY(header1+1);
  1423. entry2 = ENTRY(header2+1);
  1424. while (!IS_LAST_ENTRY(entry1)) {
  1425. if (IS_LAST_ENTRY(entry2))
  1426. return 1;
  1427. if (entry1->e_hash != entry2->e_hash ||
  1428. entry1->e_name_index != entry2->e_name_index ||
  1429. entry1->e_name_len != entry2->e_name_len ||
  1430. entry1->e_value_size != entry2->e_value_size ||
  1431. memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
  1432. return 1;
  1433. if (entry1->e_value_block != 0 || entry2->e_value_block != 0)
  1434. return -EIO;
  1435. if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
  1436. (char *)header2 + le16_to_cpu(entry2->e_value_offs),
  1437. le32_to_cpu(entry1->e_value_size)))
  1438. return 1;
  1439. entry1 = EXT4_XATTR_NEXT(entry1);
  1440. entry2 = EXT4_XATTR_NEXT(entry2);
  1441. }
  1442. if (!IS_LAST_ENTRY(entry2))
  1443. return 1;
  1444. return 0;
  1445. }
  1446. /*
  1447. * ext4_xattr_cache_find()
  1448. *
  1449. * Find an identical extended attribute block.
  1450. *
  1451. * Returns a pointer to the block found, or NULL if such a block was
  1452. * not found or an error occurred.
  1453. */
  1454. static struct buffer_head *
  1455. ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
  1456. struct mb_cache_entry **pce)
  1457. {
  1458. __u32 hash = le32_to_cpu(header->h_hash);
  1459. struct mb_cache_entry *ce;
  1460. if (!header->h_hash)
  1461. return NULL; /* never share */
  1462. ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
  1463. again:
  1464. ce = mb_cache_entry_find_first(ext4_xattr_cache, inode->i_sb->s_bdev,
  1465. hash);
  1466. while (ce) {
  1467. struct buffer_head *bh;
  1468. if (IS_ERR(ce)) {
  1469. if (PTR_ERR(ce) == -EAGAIN)
  1470. goto again;
  1471. break;
  1472. }
  1473. bh = sb_bread(inode->i_sb, ce->e_block);
  1474. if (!bh) {
  1475. EXT4_ERROR_INODE(inode, "block %lu read error",
  1476. (unsigned long) ce->e_block);
  1477. } else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
  1478. EXT4_XATTR_REFCOUNT_MAX) {
  1479. ea_idebug(inode, "block %lu refcount %d>=%d",
  1480. (unsigned long) ce->e_block,
  1481. le32_to_cpu(BHDR(bh)->h_refcount),
  1482. EXT4_XATTR_REFCOUNT_MAX);
  1483. } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
  1484. *pce = ce;
  1485. return bh;
  1486. }
  1487. brelse(bh);
  1488. ce = mb_cache_entry_find_next(ce, inode->i_sb->s_bdev, hash);
  1489. }
  1490. return NULL;
  1491. }
  1492. #define NAME_HASH_SHIFT 5
  1493. #define VALUE_HASH_SHIFT 16
  1494. /*
  1495. * ext4_xattr_hash_entry()
  1496. *
  1497. * Compute the hash of an extended attribute.
  1498. */
  1499. static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header,
  1500. struct ext4_xattr_entry *entry)
  1501. {
  1502. __u32 hash = 0;
  1503. char *name = entry->e_name;
  1504. int n;
  1505. for (n = 0; n < entry->e_name_len; n++) {
  1506. hash = (hash << NAME_HASH_SHIFT) ^
  1507. (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
  1508. *name++;
  1509. }
  1510. if (entry->e_value_block == 0 && entry->e_value_size != 0) {
  1511. __le32 *value = (__le32 *)((char *)header +
  1512. le16_to_cpu(entry->e_value_offs));
  1513. for (n = (le32_to_cpu(entry->e_value_size) +
  1514. EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) {
  1515. hash = (hash << VALUE_HASH_SHIFT) ^
  1516. (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
  1517. le32_to_cpu(*value++);
  1518. }
  1519. }
  1520. entry->e_hash = cpu_to_le32(hash);
  1521. }
  1522. #undef NAME_HASH_SHIFT
  1523. #undef VALUE_HASH_SHIFT
  1524. #define BLOCK_HASH_SHIFT 16
  1525. /*
  1526. * ext4_xattr_rehash()
  1527. *
  1528. * Re-compute the extended attribute hash value after an entry has changed.
  1529. */
  1530. static void ext4_xattr_rehash(struct ext4_xattr_header *header,
  1531. struct ext4_xattr_entry *entry)
  1532. {
  1533. struct ext4_xattr_entry *here;
  1534. __u32 hash = 0;
  1535. ext4_xattr_hash_entry(header, entry);
  1536. here = ENTRY(header+1);
  1537. while (!IS_LAST_ENTRY(here)) {
  1538. if (!here->e_hash) {
  1539. /* Block is not shared if an entry's hash value == 0 */
  1540. hash = 0;
  1541. break;
  1542. }
  1543. hash = (hash << BLOCK_HASH_SHIFT) ^
  1544. (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
  1545. le32_to_cpu(here->e_hash);
  1546. here = EXT4_XATTR_NEXT(here);
  1547. }
  1548. header->h_hash = cpu_to_le32(hash);
  1549. }
  1550. #undef BLOCK_HASH_SHIFT
  1551. int __init
  1552. ext4_init_xattr(void)
  1553. {
  1554. ext4_xattr_cache = mb_cache_create("ext4_xattr", 6);
  1555. if (!ext4_xattr_cache)
  1556. return -ENOMEM;
  1557. return 0;
  1558. }
  1559. void
  1560. ext4_exit_xattr(void)
  1561. {
  1562. if (ext4_xattr_cache)
  1563. mb_cache_destroy(ext4_xattr_cache);
  1564. ext4_xattr_cache = NULL;
  1565. }