/fs/ext4/super.c

https://bitbucket.org/cyanogenmod/android_kernel_asus_tf300t · C · 5054 lines · 3929 code · 561 blank · 564 comment · 674 complexity · 1908ad9899d5e89a4757b72a72c2b11e MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * linux/fs/ext4/super.c
  3. *
  4. * Copyright (C) 1992, 1993, 1994, 1995
  5. * Remy Card (card@masi.ibp.fr)
  6. * Laboratoire MASI - Institut Blaise Pascal
  7. * Universite Pierre et Marie Curie (Paris VI)
  8. *
  9. * from
  10. *
  11. * linux/fs/minix/inode.c
  12. *
  13. * Copyright (C) 1991, 1992 Linus Torvalds
  14. *
  15. * Big-endian to little-endian byte-swapping/bitmaps by
  16. * David S. Miller (davem@caip.rutgers.edu), 1995
  17. */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/fs.h>
  21. #include <linux/time.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/jbd2.h>
  24. #include <linux/slab.h>
  25. #include <linux/init.h>
  26. #include <linux/blkdev.h>
  27. #include <linux/parser.h>
  28. #include <linux/buffer_head.h>
  29. #include <linux/exportfs.h>
  30. #include <linux/vfs.h>
  31. #include <linux/random.h>
  32. #include <linux/mount.h>
  33. #include <linux/namei.h>
  34. #include <linux/quotaops.h>
  35. #include <linux/seq_file.h>
  36. #include <linux/proc_fs.h>
  37. #include <linux/ctype.h>
  38. #include <linux/log2.h>
  39. #include <linux/crc16.h>
  40. #include <linux/cleancache.h>
  41. #include <asm/uaccess.h>
  42. #include <linux/kthread.h>
  43. #include <linux/freezer.h>
  44. #include "ext4.h"
  45. #include "ext4_jbd2.h"
  46. #include "xattr.h"
  47. #include "acl.h"
  48. #include "mballoc.h"
  49. #define CREATE_TRACE_POINTS
  50. #include <trace/events/ext4.h>
  51. static struct proc_dir_entry *ext4_proc_root;
  52. static struct kset *ext4_kset;
  53. static struct ext4_lazy_init *ext4_li_info;
  54. static struct mutex ext4_li_mtx;
  55. static struct ext4_features *ext4_feat;
  56. static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
  57. unsigned long journal_devnum);
  58. static int ext4_commit_super(struct super_block *sb, int sync);
  59. static void ext4_mark_recovery_complete(struct super_block *sb,
  60. struct ext4_super_block *es);
  61. static void ext4_clear_journal_err(struct super_block *sb,
  62. struct ext4_super_block *es);
  63. static int ext4_sync_fs(struct super_block *sb, int wait);
  64. static const char *ext4_decode_error(struct super_block *sb, int errno,
  65. char nbuf[16]);
  66. static int ext4_remount(struct super_block *sb, int *flags, char *data);
  67. static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
  68. static int ext4_unfreeze(struct super_block *sb);
  69. static void ext4_write_super(struct super_block *sb);
  70. static int ext4_freeze(struct super_block *sb);
  71. static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
  72. const char *dev_name, void *data);
  73. static inline int ext2_feature_set_ok(struct super_block *sb);
  74. static inline int ext3_feature_set_ok(struct super_block *sb);
  75. static int ext4_feature_set_ok(struct super_block *sb, int readonly);
  76. static void ext4_destroy_lazyinit_thread(void);
  77. static void ext4_unregister_li_request(struct super_block *sb);
  78. static void ext4_clear_request_list(void);
  79. #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
  80. static struct file_system_type ext2_fs_type = {
  81. .owner = THIS_MODULE,
  82. .name = "ext2",
  83. .mount = ext4_mount,
  84. .kill_sb = kill_block_super,
  85. .fs_flags = FS_REQUIRES_DEV,
  86. };
  87. #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type)
  88. #else
  89. #define IS_EXT2_SB(sb) (0)
  90. #endif
  91. #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
  92. static struct file_system_type ext3_fs_type = {
  93. .owner = THIS_MODULE,
  94. .name = "ext3",
  95. .mount = ext4_mount,
  96. .kill_sb = kill_block_super,
  97. .fs_flags = FS_REQUIRES_DEV,
  98. };
  99. #define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
  100. #else
  101. #define IS_EXT3_SB(sb) (0)
  102. #endif
  103. void *ext4_kvmalloc(size_t size, gfp_t flags)
  104. {
  105. void *ret;
  106. ret = kmalloc(size, flags);
  107. if (!ret)
  108. ret = __vmalloc(size, flags, PAGE_KERNEL);
  109. return ret;
  110. }
  111. void *ext4_kvzalloc(size_t size, gfp_t flags)
  112. {
  113. void *ret;
  114. ret = kzalloc(size, flags);
  115. if (!ret)
  116. ret = __vmalloc(size, flags | __GFP_ZERO, PAGE_KERNEL);
  117. return ret;
  118. }
  119. void ext4_kvfree(void *ptr)
  120. {
  121. if (is_vmalloc_addr(ptr))
  122. vfree(ptr);
  123. else
  124. kfree(ptr);
  125. }
  126. ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
  127. struct ext4_group_desc *bg)
  128. {
  129. return le32_to_cpu(bg->bg_block_bitmap_lo) |
  130. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  131. (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
  132. }
  133. ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
  134. struct ext4_group_desc *bg)
  135. {
  136. return le32_to_cpu(bg->bg_inode_bitmap_lo) |
  137. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  138. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
  139. }
  140. ext4_fsblk_t ext4_inode_table(struct super_block *sb,
  141. struct ext4_group_desc *bg)
  142. {
  143. return le32_to_cpu(bg->bg_inode_table_lo) |
  144. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  145. (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
  146. }
  147. __u32 ext4_free_blks_count(struct super_block *sb,
  148. struct ext4_group_desc *bg)
  149. {
  150. return le16_to_cpu(bg->bg_free_blocks_count_lo) |
  151. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  152. (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
  153. }
  154. __u32 ext4_free_inodes_count(struct super_block *sb,
  155. struct ext4_group_desc *bg)
  156. {
  157. return le16_to_cpu(bg->bg_free_inodes_count_lo) |
  158. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  159. (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
  160. }
  161. __u32 ext4_used_dirs_count(struct super_block *sb,
  162. struct ext4_group_desc *bg)
  163. {
  164. return le16_to_cpu(bg->bg_used_dirs_count_lo) |
  165. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  166. (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
  167. }
  168. __u32 ext4_itable_unused_count(struct super_block *sb,
  169. struct ext4_group_desc *bg)
  170. {
  171. return le16_to_cpu(bg->bg_itable_unused_lo) |
  172. (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
  173. (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
  174. }
  175. void ext4_block_bitmap_set(struct super_block *sb,
  176. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  177. {
  178. bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
  179. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  180. bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
  181. }
  182. void ext4_inode_bitmap_set(struct super_block *sb,
  183. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  184. {
  185. bg->bg_inode_bitmap_lo = cpu_to_le32((u32)blk);
  186. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  187. bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
  188. }
  189. void ext4_inode_table_set(struct super_block *sb,
  190. struct ext4_group_desc *bg, ext4_fsblk_t blk)
  191. {
  192. bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
  193. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  194. bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
  195. }
  196. void ext4_free_blks_set(struct super_block *sb,
  197. struct ext4_group_desc *bg, __u32 count)
  198. {
  199. bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
  200. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  201. bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
  202. }
  203. void ext4_free_inodes_set(struct super_block *sb,
  204. struct ext4_group_desc *bg, __u32 count)
  205. {
  206. bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
  207. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  208. bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
  209. }
  210. void ext4_used_dirs_set(struct super_block *sb,
  211. struct ext4_group_desc *bg, __u32 count)
  212. {
  213. bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
  214. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  215. bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
  216. }
  217. void ext4_itable_unused_set(struct super_block *sb,
  218. struct ext4_group_desc *bg, __u32 count)
  219. {
  220. bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
  221. if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
  222. bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
  223. }
  224. /* Just increment the non-pointer handle value */
  225. static handle_t *ext4_get_nojournal(void)
  226. {
  227. handle_t *handle = current->journal_info;
  228. unsigned long ref_cnt = (unsigned long)handle;
  229. BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
  230. ref_cnt++;
  231. handle = (handle_t *)ref_cnt;
  232. current->journal_info = handle;
  233. return handle;
  234. }
  235. /* Decrement the non-pointer handle value */
  236. static void ext4_put_nojournal(handle_t *handle)
  237. {
  238. unsigned long ref_cnt = (unsigned long)handle;
  239. BUG_ON(ref_cnt == 0);
  240. ref_cnt--;
  241. handle = (handle_t *)ref_cnt;
  242. current->journal_info = handle;
  243. }
  244. /*
  245. * Wrappers for jbd2_journal_start/end.
  246. *
  247. * The only special thing we need to do here is to make sure that all
  248. * journal_end calls result in the superblock being marked dirty, so
  249. * that sync() will call the filesystem's write_super callback if
  250. * appropriate.
  251. *
  252. * To avoid j_barrier hold in userspace when a user calls freeze(),
  253. * ext4 prevents a new handle from being started by s_frozen, which
  254. * is in an upper layer.
  255. */
  256. handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
  257. {
  258. journal_t *journal;
  259. handle_t *handle;
  260. trace_ext4_journal_start(sb, nblocks, _RET_IP_);
  261. if (sb->s_flags & MS_RDONLY)
  262. return ERR_PTR(-EROFS);
  263. journal = EXT4_SB(sb)->s_journal;
  264. handle = ext4_journal_current_handle();
  265. /*
  266. * If a handle has been started, it should be allowed to
  267. * finish, otherwise deadlock could happen between freeze
  268. * and others(e.g. truncate) due to the restart of the
  269. * journal handle if the filesystem is forzen and active
  270. * handles are not stopped.
  271. */
  272. if (!handle)
  273. vfs_check_frozen(sb, SB_FREEZE_TRANS);
  274. if (!journal)
  275. return ext4_get_nojournal();
  276. /*
  277. * Special case here: if the journal has aborted behind our
  278. * backs (eg. EIO in the commit thread), then we still need to
  279. * take the FS itself readonly cleanly.
  280. */
  281. if (is_journal_aborted(journal)) {
  282. ext4_abort(sb, "Detected aborted journal");
  283. return ERR_PTR(-EROFS);
  284. }
  285. return jbd2_journal_start(journal, nblocks);
  286. }
  287. /*
  288. * The only special thing we need to do here is to make sure that all
  289. * jbd2_journal_stop calls result in the superblock being marked dirty, so
  290. * that sync() will call the filesystem's write_super callback if
  291. * appropriate.
  292. */
  293. int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
  294. {
  295. struct super_block *sb;
  296. int err;
  297. int rc;
  298. if (!ext4_handle_valid(handle)) {
  299. ext4_put_nojournal(handle);
  300. return 0;
  301. }
  302. sb = handle->h_transaction->t_journal->j_private;
  303. err = handle->h_err;
  304. rc = jbd2_journal_stop(handle);
  305. if (!err)
  306. err = rc;
  307. if (err)
  308. __ext4_std_error(sb, where, line, err);
  309. return err;
  310. }
  311. void ext4_journal_abort_handle(const char *caller, unsigned int line,
  312. const char *err_fn, struct buffer_head *bh,
  313. handle_t *handle, int err)
  314. {
  315. char nbuf[16];
  316. const char *errstr = ext4_decode_error(NULL, err, nbuf);
  317. BUG_ON(!ext4_handle_valid(handle));
  318. if (bh)
  319. BUFFER_TRACE(bh, "abort");
  320. if (!handle->h_err)
  321. handle->h_err = err;
  322. if (is_handle_aborted(handle))
  323. return;
  324. printk(KERN_ERR "%s:%d: aborting transaction: %s in %s\n",
  325. caller, line, errstr, err_fn);
  326. jbd2_journal_abort_handle(handle);
  327. }
  328. static void __save_error_info(struct super_block *sb, const char *func,
  329. unsigned int line)
  330. {
  331. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  332. EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
  333. es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
  334. es->s_last_error_time = cpu_to_le32(get_seconds());
  335. strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
  336. es->s_last_error_line = cpu_to_le32(line);
  337. if (!es->s_first_error_time) {
  338. es->s_first_error_time = es->s_last_error_time;
  339. strncpy(es->s_first_error_func, func,
  340. sizeof(es->s_first_error_func));
  341. es->s_first_error_line = cpu_to_le32(line);
  342. es->s_first_error_ino = es->s_last_error_ino;
  343. es->s_first_error_block = es->s_last_error_block;
  344. }
  345. /*
  346. * Start the daily error reporting function if it hasn't been
  347. * started already
  348. */
  349. if (!es->s_error_count)
  350. mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
  351. es->s_error_count = cpu_to_le32(le32_to_cpu(es->s_error_count) + 1);
  352. }
  353. static void save_error_info(struct super_block *sb, const char *func,
  354. unsigned int line)
  355. {
  356. __save_error_info(sb, func, line);
  357. ext4_commit_super(sb, 1);
  358. }
  359. /* Deal with the reporting of failure conditions on a filesystem such as
  360. * inconsistencies detected or read IO failures.
  361. *
  362. * On ext2, we can store the error state of the filesystem in the
  363. * superblock. That is not possible on ext4, because we may have other
  364. * write ordering constraints on the superblock which prevent us from
  365. * writing it out straight away; and given that the journal is about to
  366. * be aborted, we can't rely on the current, or future, transactions to
  367. * write out the superblock safely.
  368. *
  369. * We'll just use the jbd2_journal_abort() error code to record an error in
  370. * the journal instead. On recovery, the journal will complain about
  371. * that error until we've noted it down and cleared it.
  372. */
  373. static void ext4_handle_error(struct super_block *sb)
  374. {
  375. if (sb->s_flags & MS_RDONLY)
  376. return;
  377. if (!test_opt(sb, ERRORS_CONT)) {
  378. journal_t *journal = EXT4_SB(sb)->s_journal;
  379. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  380. if (journal)
  381. jbd2_journal_abort(journal, -EIO);
  382. }
  383. if (test_opt(sb, ERRORS_RO)) {
  384. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  385. sb->s_flags |= MS_RDONLY;
  386. }
  387. if (test_opt(sb, ERRORS_PANIC))
  388. panic("EXT4-fs (device %s): panic forced after error\n",
  389. sb->s_id);
  390. }
  391. void __ext4_error(struct super_block *sb, const char *function,
  392. unsigned int line, const char *fmt, ...)
  393. {
  394. struct va_format vaf;
  395. va_list args;
  396. va_start(args, fmt);
  397. vaf.fmt = fmt;
  398. vaf.va = &args;
  399. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
  400. sb->s_id, function, line, current->comm, &vaf);
  401. va_end(args);
  402. save_error_info(sb, function, line);
  403. ext4_handle_error(sb);
  404. }
  405. void ext4_error_inode(struct inode *inode, const char *function,
  406. unsigned int line, ext4_fsblk_t block,
  407. const char *fmt, ...)
  408. {
  409. va_list args;
  410. struct va_format vaf;
  411. struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
  412. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  413. es->s_last_error_block = cpu_to_le64(block);
  414. save_error_info(inode->i_sb, function, line);
  415. va_start(args, fmt);
  416. vaf.fmt = fmt;
  417. vaf.va = &args;
  418. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
  419. inode->i_sb->s_id, function, line, inode->i_ino);
  420. if (block)
  421. printk(KERN_CONT "block %llu: ", block);
  422. printk(KERN_CONT "comm %s: %pV\n", current->comm, &vaf);
  423. va_end(args);
  424. ext4_handle_error(inode->i_sb);
  425. }
  426. void ext4_error_file(struct file *file, const char *function,
  427. unsigned int line, ext4_fsblk_t block,
  428. const char *fmt, ...)
  429. {
  430. va_list args;
  431. struct va_format vaf;
  432. struct ext4_super_block *es;
  433. struct inode *inode = file->f_dentry->d_inode;
  434. char pathname[80], *path;
  435. es = EXT4_SB(inode->i_sb)->s_es;
  436. es->s_last_error_ino = cpu_to_le32(inode->i_ino);
  437. save_error_info(inode->i_sb, function, line);
  438. path = d_path(&(file->f_path), pathname, sizeof(pathname));
  439. if (IS_ERR(path))
  440. path = "(unknown)";
  441. printk(KERN_CRIT
  442. "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
  443. inode->i_sb->s_id, function, line, inode->i_ino);
  444. if (block)
  445. printk(KERN_CONT "block %llu: ", block);
  446. va_start(args, fmt);
  447. vaf.fmt = fmt;
  448. vaf.va = &args;
  449. printk(KERN_CONT "comm %s: path %s: %pV\n", current->comm, path, &vaf);
  450. va_end(args);
  451. ext4_handle_error(inode->i_sb);
  452. }
  453. static const char *ext4_decode_error(struct super_block *sb, int errno,
  454. char nbuf[16])
  455. {
  456. char *errstr = NULL;
  457. switch (errno) {
  458. case -EIO:
  459. errstr = "IO failure";
  460. break;
  461. case -ENOMEM:
  462. errstr = "Out of memory";
  463. break;
  464. case -EROFS:
  465. if (!sb || (EXT4_SB(sb)->s_journal &&
  466. EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
  467. errstr = "Journal has aborted";
  468. else
  469. errstr = "Readonly filesystem";
  470. break;
  471. default:
  472. /* If the caller passed in an extra buffer for unknown
  473. * errors, textualise them now. Else we just return
  474. * NULL. */
  475. if (nbuf) {
  476. /* Check for truncated error codes... */
  477. if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
  478. errstr = nbuf;
  479. }
  480. break;
  481. }
  482. return errstr;
  483. }
  484. /* __ext4_std_error decodes expected errors from journaling functions
  485. * automatically and invokes the appropriate error response. */
  486. void __ext4_std_error(struct super_block *sb, const char *function,
  487. unsigned int line, int errno)
  488. {
  489. char nbuf[16];
  490. const char *errstr;
  491. /* Special case: if the error is EROFS, and we're not already
  492. * inside a transaction, then there's really no point in logging
  493. * an error. */
  494. if (errno == -EROFS && journal_current_handle() == NULL &&
  495. (sb->s_flags & MS_RDONLY))
  496. return;
  497. errstr = ext4_decode_error(sb, errno, nbuf);
  498. printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
  499. sb->s_id, function, line, errstr);
  500. save_error_info(sb, function, line);
  501. ext4_handle_error(sb);
  502. }
  503. /*
  504. * ext4_abort is a much stronger failure handler than ext4_error. The
  505. * abort function may be used to deal with unrecoverable failures such
  506. * as journal IO errors or ENOMEM at a critical moment in log management.
  507. *
  508. * We unconditionally force the filesystem into an ABORT|READONLY state,
  509. * unless the error response on the fs has been set to panic in which
  510. * case we take the easy way out and panic immediately.
  511. */
  512. void __ext4_abort(struct super_block *sb, const char *function,
  513. unsigned int line, const char *fmt, ...)
  514. {
  515. va_list args;
  516. save_error_info(sb, function, line);
  517. va_start(args, fmt);
  518. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id,
  519. function, line);
  520. vprintk(fmt, args);
  521. printk("\n");
  522. va_end(args);
  523. if ((sb->s_flags & MS_RDONLY) == 0) {
  524. ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
  525. sb->s_flags |= MS_RDONLY;
  526. EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
  527. if (EXT4_SB(sb)->s_journal)
  528. jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
  529. save_error_info(sb, function, line);
  530. }
  531. if (test_opt(sb, ERRORS_PANIC))
  532. panic("EXT4-fs panic from previous error\n");
  533. }
  534. void ext4_msg(struct super_block *sb, const char *prefix, const char *fmt, ...)
  535. {
  536. struct va_format vaf;
  537. va_list args;
  538. va_start(args, fmt);
  539. vaf.fmt = fmt;
  540. vaf.va = &args;
  541. printk("%sEXT4-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
  542. va_end(args);
  543. }
  544. void __ext4_warning(struct super_block *sb, const char *function,
  545. unsigned int line, const char *fmt, ...)
  546. {
  547. struct va_format vaf;
  548. va_list args;
  549. va_start(args, fmt);
  550. vaf.fmt = fmt;
  551. vaf.va = &args;
  552. printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: %pV\n",
  553. sb->s_id, function, line, &vaf);
  554. va_end(args);
  555. }
  556. void __ext4_grp_locked_error(const char *function, unsigned int line,
  557. struct super_block *sb, ext4_group_t grp,
  558. unsigned long ino, ext4_fsblk_t block,
  559. const char *fmt, ...)
  560. __releases(bitlock)
  561. __acquires(bitlock)
  562. {
  563. struct va_format vaf;
  564. va_list args;
  565. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  566. es->s_last_error_ino = cpu_to_le32(ino);
  567. es->s_last_error_block = cpu_to_le64(block);
  568. __save_error_info(sb, function, line);
  569. va_start(args, fmt);
  570. vaf.fmt = fmt;
  571. vaf.va = &args;
  572. printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
  573. sb->s_id, function, line, grp);
  574. if (ino)
  575. printk(KERN_CONT "inode %lu: ", ino);
  576. if (block)
  577. printk(KERN_CONT "block %llu:", (unsigned long long) block);
  578. printk(KERN_CONT "%pV\n", &vaf);
  579. va_end(args);
  580. if (test_opt(sb, ERRORS_CONT)) {
  581. ext4_commit_super(sb, 0);
  582. return;
  583. }
  584. ext4_unlock_group(sb, grp);
  585. ext4_handle_error(sb);
  586. /*
  587. * We only get here in the ERRORS_RO case; relocking the group
  588. * may be dangerous, but nothing bad will happen since the
  589. * filesystem will have already been marked read/only and the
  590. * journal has been aborted. We return 1 as a hint to callers
  591. * who might what to use the return value from
  592. * ext4_grp_locked_error() to distinguish between the
  593. * ERRORS_CONT and ERRORS_RO case, and perhaps return more
  594. * aggressively from the ext4 function in question, with a
  595. * more appropriate error code.
  596. */
  597. ext4_lock_group(sb, grp);
  598. return;
  599. }
  600. void ext4_update_dynamic_rev(struct super_block *sb)
  601. {
  602. struct ext4_super_block *es = EXT4_SB(sb)->s_es;
  603. if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
  604. return;
  605. ext4_warning(sb,
  606. "updating to rev %d because of new feature flag, "
  607. "running e2fsck is recommended",
  608. EXT4_DYNAMIC_REV);
  609. es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
  610. es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
  611. es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
  612. /* leave es->s_feature_*compat flags alone */
  613. /* es->s_uuid will be set by e2fsck if empty */
  614. /*
  615. * The rest of the superblock fields should be zero, and if not it
  616. * means they are likely already in use, so leave them alone. We
  617. * can leave it up to e2fsck to clean up any inconsistencies there.
  618. */
  619. }
  620. /*
  621. * Open the external journal device
  622. */
  623. static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
  624. {
  625. struct block_device *bdev;
  626. char b[BDEVNAME_SIZE];
  627. bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb);
  628. if (IS_ERR(bdev))
  629. goto fail;
  630. return bdev;
  631. fail:
  632. ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
  633. __bdevname(dev, b), PTR_ERR(bdev));
  634. return NULL;
  635. }
  636. /*
  637. * Release the journal device
  638. */
  639. static int ext4_blkdev_put(struct block_device *bdev)
  640. {
  641. return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
  642. }
  643. static int ext4_blkdev_remove(struct ext4_sb_info *sbi)
  644. {
  645. struct block_device *bdev;
  646. int ret = -ENODEV;
  647. bdev = sbi->journal_bdev;
  648. if (bdev) {
  649. ret = ext4_blkdev_put(bdev);
  650. sbi->journal_bdev = NULL;
  651. }
  652. return ret;
  653. }
  654. static inline struct inode *orphan_list_entry(struct list_head *l)
  655. {
  656. return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
  657. }
  658. static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
  659. {
  660. struct list_head *l;
  661. ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
  662. le32_to_cpu(sbi->s_es->s_last_orphan));
  663. printk(KERN_ERR "sb_info orphan list:\n");
  664. list_for_each(l, &sbi->s_orphan) {
  665. struct inode *inode = orphan_list_entry(l);
  666. printk(KERN_ERR " "
  667. "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
  668. inode->i_sb->s_id, inode->i_ino, inode,
  669. inode->i_mode, inode->i_nlink,
  670. NEXT_ORPHAN(inode));
  671. }
  672. }
  673. static void ext4_put_super(struct super_block *sb)
  674. {
  675. struct ext4_sb_info *sbi = EXT4_SB(sb);
  676. struct ext4_super_block *es = sbi->s_es;
  677. int i, err;
  678. ext4_unregister_li_request(sb);
  679. dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
  680. flush_workqueue(sbi->dio_unwritten_wq);
  681. destroy_workqueue(sbi->dio_unwritten_wq);
  682. lock_super(sb);
  683. if (sb->s_dirt)
  684. ext4_commit_super(sb, 1);
  685. if (sbi->s_journal) {
  686. err = jbd2_journal_destroy(sbi->s_journal);
  687. sbi->s_journal = NULL;
  688. if (err < 0)
  689. ext4_abort(sb, "Couldn't clean up the journal");
  690. }
  691. del_timer(&sbi->s_err_report);
  692. ext4_release_system_zone(sb);
  693. ext4_mb_release(sb);
  694. ext4_ext_release(sb);
  695. ext4_xattr_put_super(sb);
  696. if (!(sb->s_flags & MS_RDONLY)) {
  697. EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
  698. es->s_state = cpu_to_le16(sbi->s_mount_state);
  699. ext4_commit_super(sb, 1);
  700. }
  701. if (sbi->s_proc) {
  702. remove_proc_entry(sb->s_id, ext4_proc_root);
  703. }
  704. kobject_del(&sbi->s_kobj);
  705. for (i = 0; i < sbi->s_gdb_count; i++)
  706. brelse(sbi->s_group_desc[i]);
  707. ext4_kvfree(sbi->s_group_desc);
  708. ext4_kvfree(sbi->s_flex_groups);
  709. percpu_counter_destroy(&sbi->s_freeblocks_counter);
  710. percpu_counter_destroy(&sbi->s_freeinodes_counter);
  711. percpu_counter_destroy(&sbi->s_dirs_counter);
  712. percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
  713. brelse(sbi->s_sbh);
  714. #ifdef CONFIG_QUOTA
  715. for (i = 0; i < MAXQUOTAS; i++)
  716. kfree(sbi->s_qf_names[i]);
  717. #endif
  718. /* Debugging code just in case the in-memory inode orphan list
  719. * isn't empty. The on-disk one can be non-empty if we've
  720. * detected an error and taken the fs readonly, but the
  721. * in-memory list had better be clean by this point. */
  722. if (!list_empty(&sbi->s_orphan))
  723. dump_orphan_list(sb, sbi);
  724. J_ASSERT(list_empty(&sbi->s_orphan));
  725. invalidate_bdev(sb->s_bdev);
  726. if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
  727. /*
  728. * Invalidate the journal device's buffers. We don't want them
  729. * floating about in memory - the physical journal device may
  730. * hotswapped, and it breaks the `ro-after' testing code.
  731. */
  732. sync_blockdev(sbi->journal_bdev);
  733. invalidate_bdev(sbi->journal_bdev);
  734. ext4_blkdev_remove(sbi);
  735. }
  736. if (sbi->s_mmp_tsk)
  737. kthread_stop(sbi->s_mmp_tsk);
  738. sb->s_fs_info = NULL;
  739. /*
  740. * Now that we are completely done shutting down the
  741. * superblock, we need to actually destroy the kobject.
  742. */
  743. unlock_super(sb);
  744. kobject_put(&sbi->s_kobj);
  745. wait_for_completion(&sbi->s_kobj_unregister);
  746. kfree(sbi->s_blockgroup_lock);
  747. kfree(sbi);
  748. }
  749. static struct kmem_cache *ext4_inode_cachep;
  750. /*
  751. * Called inside transaction, so use GFP_NOFS
  752. */
  753. static struct inode *ext4_alloc_inode(struct super_block *sb)
  754. {
  755. struct ext4_inode_info *ei;
  756. ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
  757. if (!ei)
  758. return NULL;
  759. ei->vfs_inode.i_version = 1;
  760. ei->vfs_inode.i_data.writeback_index = 0;
  761. memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
  762. INIT_LIST_HEAD(&ei->i_prealloc_list);
  763. spin_lock_init(&ei->i_prealloc_lock);
  764. ei->i_reserved_data_blocks = 0;
  765. ei->i_reserved_meta_blocks = 0;
  766. ei->i_allocated_meta_blocks = 0;
  767. ei->i_da_metadata_calc_len = 0;
  768. spin_lock_init(&(ei->i_block_reservation_lock));
  769. #ifdef CONFIG_QUOTA
  770. ei->i_reserved_quota = 0;
  771. #endif
  772. ei->jinode = NULL;
  773. INIT_LIST_HEAD(&ei->i_completed_io_list);
  774. spin_lock_init(&ei->i_completed_io_lock);
  775. ei->cur_aio_dio = NULL;
  776. ei->i_sync_tid = 0;
  777. ei->i_datasync_tid = 0;
  778. atomic_set(&ei->i_ioend_count, 0);
  779. atomic_set(&ei->i_aiodio_unwritten, 0);
  780. return &ei->vfs_inode;
  781. }
  782. static int ext4_drop_inode(struct inode *inode)
  783. {
  784. int drop = generic_drop_inode(inode);
  785. trace_ext4_drop_inode(inode, drop);
  786. return drop;
  787. }
  788. static void ext4_i_callback(struct rcu_head *head)
  789. {
  790. struct inode *inode = container_of(head, struct inode, i_rcu);
  791. INIT_LIST_HEAD(&inode->i_dentry);
  792. kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
  793. }
  794. static void ext4_destroy_inode(struct inode *inode)
  795. {
  796. if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
  797. ext4_msg(inode->i_sb, KERN_ERR,
  798. "Inode %lu (%p): orphan list check failed!",
  799. inode->i_ino, EXT4_I(inode));
  800. print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
  801. EXT4_I(inode), sizeof(struct ext4_inode_info),
  802. true);
  803. dump_stack();
  804. }
  805. call_rcu(&inode->i_rcu, ext4_i_callback);
  806. }
  807. static void init_once(void *foo)
  808. {
  809. struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
  810. INIT_LIST_HEAD(&ei->i_orphan);
  811. #ifdef CONFIG_EXT4_FS_XATTR
  812. init_rwsem(&ei->xattr_sem);
  813. #endif
  814. init_rwsem(&ei->i_data_sem);
  815. inode_init_once(&ei->vfs_inode);
  816. }
  817. static int init_inodecache(void)
  818. {
  819. ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
  820. sizeof(struct ext4_inode_info),
  821. 0, (SLAB_RECLAIM_ACCOUNT|
  822. SLAB_MEM_SPREAD),
  823. init_once);
  824. if (ext4_inode_cachep == NULL)
  825. return -ENOMEM;
  826. return 0;
  827. }
  828. static void destroy_inodecache(void)
  829. {
  830. kmem_cache_destroy(ext4_inode_cachep);
  831. }
  832. void ext4_clear_inode(struct inode *inode)
  833. {
  834. invalidate_inode_buffers(inode);
  835. end_writeback(inode);
  836. dquot_drop(inode);
  837. ext4_discard_preallocations(inode);
  838. if (EXT4_I(inode)->jinode) {
  839. jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
  840. EXT4_I(inode)->jinode);
  841. jbd2_free_inode(EXT4_I(inode)->jinode);
  842. EXT4_I(inode)->jinode = NULL;
  843. }
  844. }
  845. static inline void ext4_show_quota_options(struct seq_file *seq,
  846. struct super_block *sb)
  847. {
  848. #if defined(CONFIG_QUOTA)
  849. struct ext4_sb_info *sbi = EXT4_SB(sb);
  850. if (sbi->s_jquota_fmt) {
  851. char *fmtname = "";
  852. switch (sbi->s_jquota_fmt) {
  853. case QFMT_VFS_OLD:
  854. fmtname = "vfsold";
  855. break;
  856. case QFMT_VFS_V0:
  857. fmtname = "vfsv0";
  858. break;
  859. case QFMT_VFS_V1:
  860. fmtname = "vfsv1";
  861. break;
  862. }
  863. seq_printf(seq, ",jqfmt=%s", fmtname);
  864. }
  865. if (sbi->s_qf_names[USRQUOTA])
  866. seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
  867. if (sbi->s_qf_names[GRPQUOTA])
  868. seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
  869. if (test_opt(sb, USRQUOTA))
  870. seq_puts(seq, ",usrquota");
  871. if (test_opt(sb, GRPQUOTA))
  872. seq_puts(seq, ",grpquota");
  873. #endif
  874. }
  875. /*
  876. * Show an option if
  877. * - it's set to a non-default value OR
  878. * - if the per-sb default is different from the global default
  879. */
  880. static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
  881. {
  882. int def_errors;
  883. unsigned long def_mount_opts;
  884. struct super_block *sb = vfs->mnt_sb;
  885. struct ext4_sb_info *sbi = EXT4_SB(sb);
  886. struct ext4_super_block *es = sbi->s_es;
  887. def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
  888. def_errors = le16_to_cpu(es->s_errors);
  889. if (sbi->s_sb_block != 1)
  890. seq_printf(seq, ",sb=%llu", sbi->s_sb_block);
  891. if (test_opt(sb, MINIX_DF))
  892. seq_puts(seq, ",minixdf");
  893. if (test_opt(sb, GRPID) && !(def_mount_opts & EXT4_DEFM_BSDGROUPS))
  894. seq_puts(seq, ",grpid");
  895. if (!test_opt(sb, GRPID) && (def_mount_opts & EXT4_DEFM_BSDGROUPS))
  896. seq_puts(seq, ",nogrpid");
  897. if (sbi->s_resuid != EXT4_DEF_RESUID ||
  898. le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) {
  899. seq_printf(seq, ",resuid=%u", sbi->s_resuid);
  900. }
  901. if (sbi->s_resgid != EXT4_DEF_RESGID ||
  902. le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) {
  903. seq_printf(seq, ",resgid=%u", sbi->s_resgid);
  904. }
  905. if (test_opt(sb, ERRORS_RO)) {
  906. if (def_errors == EXT4_ERRORS_PANIC ||
  907. def_errors == EXT4_ERRORS_CONTINUE) {
  908. seq_puts(seq, ",errors=remount-ro");
  909. }
  910. }
  911. if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
  912. seq_puts(seq, ",errors=continue");
  913. if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
  914. seq_puts(seq, ",errors=panic");
  915. if (test_opt(sb, NO_UID32) && !(def_mount_opts & EXT4_DEFM_UID16))
  916. seq_puts(seq, ",nouid32");
  917. if (test_opt(sb, DEBUG) && !(def_mount_opts & EXT4_DEFM_DEBUG))
  918. seq_puts(seq, ",debug");
  919. if (test_opt(sb, OLDALLOC))
  920. seq_puts(seq, ",oldalloc");
  921. #ifdef CONFIG_EXT4_FS_XATTR
  922. if (test_opt(sb, XATTR_USER))
  923. seq_puts(seq, ",user_xattr");
  924. if (!test_opt(sb, XATTR_USER))
  925. seq_puts(seq, ",nouser_xattr");
  926. #endif
  927. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  928. if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL))
  929. seq_puts(seq, ",acl");
  930. if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL))
  931. seq_puts(seq, ",noacl");
  932. #endif
  933. if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
  934. seq_printf(seq, ",commit=%u",
  935. (unsigned) (sbi->s_commit_interval / HZ));
  936. }
  937. if (sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) {
  938. seq_printf(seq, ",min_batch_time=%u",
  939. (unsigned) sbi->s_min_batch_time);
  940. }
  941. if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
  942. seq_printf(seq, ",max_batch_time=%u",
  943. (unsigned) sbi->s_min_batch_time);
  944. }
  945. /*
  946. * We're changing the default of barrier mount option, so
  947. * let's always display its mount state so it's clear what its
  948. * status is.
  949. */
  950. seq_puts(seq, ",barrier=");
  951. seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
  952. if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
  953. seq_puts(seq, ",journal_async_commit");
  954. else if (test_opt(sb, JOURNAL_CHECKSUM))
  955. seq_puts(seq, ",journal_checksum");
  956. if (test_opt(sb, I_VERSION))
  957. seq_puts(seq, ",i_version");
  958. if (!test_opt(sb, DELALLOC) &&
  959. !(def_mount_opts & EXT4_DEFM_NODELALLOC))
  960. seq_puts(seq, ",nodelalloc");
  961. if (!test_opt(sb, MBLK_IO_SUBMIT))
  962. seq_puts(seq, ",nomblk_io_submit");
  963. if (sbi->s_stripe)
  964. seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
  965. /*
  966. * journal mode get enabled in different ways
  967. * So just print the value even if we didn't specify it
  968. */
  969. if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
  970. seq_puts(seq, ",data=journal");
  971. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
  972. seq_puts(seq, ",data=ordered");
  973. else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
  974. seq_puts(seq, ",data=writeback");
  975. if (sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
  976. seq_printf(seq, ",inode_readahead_blks=%u",
  977. sbi->s_inode_readahead_blks);
  978. if (test_opt(sb, DATA_ERR_ABORT))
  979. seq_puts(seq, ",data_err=abort");
  980. if (test_opt(sb, NO_AUTO_DA_ALLOC))
  981. seq_puts(seq, ",noauto_da_alloc");
  982. if (test_opt(sb, DISCARD) && !(def_mount_opts & EXT4_DEFM_DISCARD))
  983. seq_puts(seq, ",discard");
  984. if (test_opt(sb, NOLOAD))
  985. seq_puts(seq, ",norecovery");
  986. if (test_opt(sb, DIOREAD_NOLOCK))
  987. seq_puts(seq, ",dioread_nolock");
  988. if (test_opt(sb, BLOCK_VALIDITY) &&
  989. !(def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY))
  990. seq_puts(seq, ",block_validity");
  991. if (!test_opt(sb, INIT_INODE_TABLE))
  992. seq_puts(seq, ",noinit_itable");
  993. else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
  994. seq_printf(seq, ",init_itable=%u",
  995. (unsigned) sbi->s_li_wait_mult);
  996. ext4_show_quota_options(seq, sb);
  997. return 0;
  998. }
  999. static struct inode *ext4_nfs_get_inode(struct super_block *sb,
  1000. u64 ino, u32 generation)
  1001. {
  1002. struct inode *inode;
  1003. if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
  1004. return ERR_PTR(-ESTALE);
  1005. if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
  1006. return ERR_PTR(-ESTALE);
  1007. /* iget isn't really right if the inode is currently unallocated!!
  1008. *
  1009. * ext4_read_inode will return a bad_inode if the inode had been
  1010. * deleted, so we should be safe.
  1011. *
  1012. * Currently we don't know the generation for parent directory, so
  1013. * a generation of 0 means "accept any"
  1014. */
  1015. inode = ext4_iget(sb, ino);
  1016. if (IS_ERR(inode))
  1017. return ERR_CAST(inode);
  1018. if (generation && inode->i_generation != generation) {
  1019. iput(inode);
  1020. return ERR_PTR(-ESTALE);
  1021. }
  1022. return inode;
  1023. }
  1024. static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
  1025. int fh_len, int fh_type)
  1026. {
  1027. return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
  1028. ext4_nfs_get_inode);
  1029. }
  1030. static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
  1031. int fh_len, int fh_type)
  1032. {
  1033. return generic_fh_to_parent(sb, fid, fh_len, fh_type,
  1034. ext4_nfs_get_inode);
  1035. }
  1036. /*
  1037. * Try to release metadata pages (indirect blocks, directories) which are
  1038. * mapped via the block device. Since these pages could have journal heads
  1039. * which would prevent try_to_free_buffers() from freeing them, we must use
  1040. * jbd2 layer's try_to_free_buffers() function to release them.
  1041. */
  1042. static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
  1043. gfp_t wait)
  1044. {
  1045. journal_t *journal = EXT4_SB(sb)->s_journal;
  1046. WARN_ON(PageChecked(page));
  1047. if (!page_has_buffers(page))
  1048. return 0;
  1049. if (journal)
  1050. return jbd2_journal_try_to_free_buffers(journal, page,
  1051. wait & ~__GFP_WAIT);
  1052. return try_to_free_buffers(page);
  1053. }
  1054. #ifdef CONFIG_QUOTA
  1055. #define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
  1056. #define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
  1057. static int ext4_write_dquot(struct dquot *dquot);
  1058. static int ext4_acquire_dquot(struct dquot *dquot);
  1059. static int ext4_release_dquot(struct dquot *dquot);
  1060. static int ext4_mark_dquot_dirty(struct dquot *dquot);
  1061. static int ext4_write_info(struct super_block *sb, int type);
  1062. static int ext4_quota_on(struct super_block *sb, int type, int format_id,
  1063. struct path *path);
  1064. static int ext4_quota_off(struct super_block *sb, int type);
  1065. static int ext4_quota_on_mount(struct super_block *sb, int type);
  1066. static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
  1067. size_t len, loff_t off);
  1068. static ssize_t ext4_quota_write(struct super_block *sb, int type,
  1069. const char *data, size_t len, loff_t off);
  1070. static const struct dquot_operations ext4_quota_operations = {
  1071. .get_reserved_space = ext4_get_reserved_space,
  1072. .write_dquot = ext4_write_dquot,
  1073. .acquire_dquot = ext4_acquire_dquot,
  1074. .release_dquot = ext4_release_dquot,
  1075. .mark_dirty = ext4_mark_dquot_dirty,
  1076. .write_info = ext4_write_info,
  1077. .alloc_dquot = dquot_alloc,
  1078. .destroy_dquot = dquot_destroy,
  1079. };
  1080. static const struct quotactl_ops ext4_qctl_operations = {
  1081. .quota_on = ext4_quota_on,
  1082. .quota_off = ext4_quota_off,
  1083. .quota_sync = dquot_quota_sync,
  1084. .get_info = dquot_get_dqinfo,
  1085. .set_info = dquot_set_dqinfo,
  1086. .get_dqblk = dquot_get_dqblk,
  1087. .set_dqblk = dquot_set_dqblk
  1088. };
  1089. #endif
  1090. static const struct super_operations ext4_sops = {
  1091. .alloc_inode = ext4_alloc_inode,
  1092. .destroy_inode = ext4_destroy_inode,
  1093. .write_inode = ext4_write_inode,
  1094. .dirty_inode = ext4_dirty_inode,
  1095. .drop_inode = ext4_drop_inode,
  1096. .evict_inode = ext4_evict_inode,
  1097. .put_super = ext4_put_super,
  1098. .sync_fs = ext4_sync_fs,
  1099. .freeze_fs = ext4_freeze,
  1100. .unfreeze_fs = ext4_unfreeze,
  1101. .statfs = ext4_statfs,
  1102. .remount_fs = ext4_remount,
  1103. .show_options = ext4_show_options,
  1104. #ifdef CONFIG_QUOTA
  1105. .quota_read = ext4_quota_read,
  1106. .quota_write = ext4_quota_write,
  1107. #endif
  1108. .bdev_try_to_free_page = bdev_try_to_free_page,
  1109. };
  1110. static const struct super_operations ext4_nojournal_sops = {
  1111. .alloc_inode = ext4_alloc_inode,
  1112. .destroy_inode = ext4_destroy_inode,
  1113. .write_inode = ext4_write_inode,
  1114. .dirty_inode = ext4_dirty_inode,
  1115. .drop_inode = ext4_drop_inode,
  1116. .evict_inode = ext4_evict_inode,
  1117. .write_super = ext4_write_super,
  1118. .put_super = ext4_put_super,
  1119. .statfs = ext4_statfs,
  1120. .remount_fs = ext4_remount,
  1121. .show_options = ext4_show_options,
  1122. #ifdef CONFIG_QUOTA
  1123. .quota_read = ext4_quota_read,
  1124. .quota_write = ext4_quota_write,
  1125. #endif
  1126. .bdev_try_to_free_page = bdev_try_to_free_page,
  1127. };
  1128. static const struct export_operations ext4_export_ops = {
  1129. .fh_to_dentry = ext4_fh_to_dentry,
  1130. .fh_to_parent = ext4_fh_to_parent,
  1131. .get_parent = ext4_get_parent,
  1132. };
  1133. enum {
  1134. Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
  1135. Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
  1136. Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
  1137. Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
  1138. Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, Opt_nobh, Opt_bh,
  1139. Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
  1140. Opt_journal_update, Opt_journal_dev,
  1141. Opt_journal_checksum, Opt_journal_async_commit,
  1142. Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
  1143. Opt_data_err_abort, Opt_data_err_ignore,
  1144. Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
  1145. Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
  1146. Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
  1147. Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version,
  1148. Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
  1149. Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
  1150. Opt_inode_readahead_blks, Opt_journal_ioprio,
  1151. Opt_dioread_nolock, Opt_dioread_lock,
  1152. Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
  1153. };
  1154. static const match_table_t tokens = {
  1155. {Opt_bsd_df, "bsddf"},
  1156. {Opt_minix_df, "minixdf"},
  1157. {Opt_grpid, "grpid"},
  1158. {Opt_grpid, "bsdgroups"},
  1159. {Opt_nogrpid, "nogrpid"},
  1160. {Opt_nogrpid, "sysvgroups"},
  1161. {Opt_resgid, "resgid=%u"},
  1162. {Opt_resuid, "resuid=%u"},
  1163. {Opt_sb, "sb=%u"},
  1164. {Opt_err_cont, "errors=continue"},
  1165. {Opt_err_panic, "errors=panic"},
  1166. {Opt_err_ro, "errors=remount-ro"},
  1167. {Opt_nouid32, "nouid32"},
  1168. {Opt_debug, "debug"},
  1169. {Opt_oldalloc, "oldalloc"},
  1170. {Opt_orlov, "orlov"},
  1171. {Opt_user_xattr, "user_xattr"},
  1172. {Opt_nouser_xattr, "nouser_xattr"},
  1173. {Opt_acl, "acl"},
  1174. {Opt_noacl, "noacl"},
  1175. {Opt_noload, "noload"},
  1176. {Opt_noload, "norecovery"},
  1177. {Opt_nobh, "nobh"},
  1178. {Opt_bh, "bh"},
  1179. {Opt_commit, "commit=%u"},
  1180. {Opt_min_batch_time, "min_batch_time=%u"},
  1181. {Opt_max_batch_time, "max_batch_time=%u"},
  1182. {Opt_journal_update, "journal=update"},
  1183. {Opt_journal_dev, "journal_dev=%u"},
  1184. {Opt_journal_checksum, "journal_checksum"},
  1185. {Opt_journal_async_commit, "journal_async_commit"},
  1186. {Opt_abort, "abort"},
  1187. {Opt_data_journal, "data=journal"},
  1188. {Opt_data_ordered, "data=ordered"},
  1189. {Opt_data_writeback, "data=writeback"},
  1190. {Opt_data_err_abort, "data_err=abort"},
  1191. {Opt_data_err_ignore, "data_err=ignore"},
  1192. {Opt_offusrjquota, "usrjquota="},
  1193. {Opt_usrjquota, "usrjquota=%s"},
  1194. {Opt_offgrpjquota, "grpjquota="},
  1195. {Opt_grpjquota, "grpjquota=%s"},
  1196. {Opt_jqfmt_vfsold, "jqfmt=vfsold"},
  1197. {Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
  1198. {Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
  1199. {Opt_grpquota, "grpquota"},
  1200. {Opt_noquota, "noquota"},
  1201. {Opt_quota, "quota"},
  1202. {Opt_usrquota, "usrquota"},
  1203. {Opt_barrier, "barrier=%u"},
  1204. {Opt_barrier, "barrier"},
  1205. {Opt_nobarrier, "nobarrier"},
  1206. {Opt_i_version, "i_version"},
  1207. {Opt_stripe, "stripe=%u"},
  1208. {Opt_resize, "resize"},
  1209. {Opt_delalloc, "delalloc"},
  1210. {Opt_nodelalloc, "nodelalloc"},
  1211. {Opt_mblk_io_submit, "mblk_io_submit"},
  1212. {Opt_nomblk_io_submit, "nomblk_io_submit"},
  1213. {Opt_block_validity, "block_validity"},
  1214. {Opt_noblock_validity, "noblock_validity"},
  1215. {Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
  1216. {Opt_journal_ioprio, "journal_ioprio=%u"},
  1217. {Opt_auto_da_alloc, "auto_da_alloc=%u"},
  1218. {Opt_auto_da_alloc, "auto_da_alloc"},
  1219. {Opt_noauto_da_alloc, "noauto_da_alloc"},
  1220. {Opt_dioread_nolock, "dioread_nolock"},
  1221. {Opt_dioread_lock, "dioread_lock"},
  1222. {Opt_discard, "discard"},
  1223. {Opt_nodiscard, "nodiscard"},
  1224. {Opt_init_itable, "init_itable=%u"},
  1225. {Opt_init_itable, "init_itable"},
  1226. {Opt_noinit_itable, "noinit_itable"},
  1227. {Opt_err, NULL},
  1228. };
  1229. static ext4_fsblk_t get_sb_block(void **data)
  1230. {
  1231. ext4_fsblk_t sb_block;
  1232. char *options = (char *) *data;
  1233. if (!options || strncmp(options, "sb=", 3) != 0)
  1234. return 1; /* Default location */
  1235. options += 3;
  1236. /* TODO: use simple_strtoll with >32bit ext4 */
  1237. sb_block = simple_strtoul(options, &options, 0);
  1238. if (*options && *options != ',') {
  1239. printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
  1240. (char *) *data);
  1241. return 1;
  1242. }
  1243. if (*options == ',')
  1244. options++;
  1245. *data = (void *) options;
  1246. return sb_block;
  1247. }
  1248. #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
  1249. static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
  1250. "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
  1251. #ifdef CONFIG_QUOTA
  1252. static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
  1253. {
  1254. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1255. char *qname;
  1256. if (sb_any_quota_loaded(sb) &&
  1257. !sbi->s_qf_names[qtype]) {
  1258. ext4_msg(sb, KERN_ERR,
  1259. "Cannot change journaled "
  1260. "quota options when quota turned on");
  1261. return 0;
  1262. }
  1263. qname = match_strdup(args);
  1264. if (!qname) {
  1265. ext4_msg(sb, KERN_ERR,
  1266. "Not enough memory for storing quotafile name");
  1267. return 0;
  1268. }
  1269. if (sbi->s_qf_names[qtype] &&
  1270. strcmp(sbi->s_qf_names[qtype], qname)) {
  1271. ext4_msg(sb, KERN_ERR,
  1272. "%s quota file already specified", QTYPE2NAME(qtype));
  1273. kfree(qname);
  1274. return 0;
  1275. }
  1276. sbi->s_qf_names[qtype] = qname;
  1277. if (strchr(sbi->s_qf_names[qtype], '/')) {
  1278. ext4_msg(sb, KERN_ERR,
  1279. "quotafile must be on filesystem root");
  1280. kfree(sbi->s_qf_names[qtype]);
  1281. sbi->s_qf_names[qtype] = NULL;
  1282. return 0;
  1283. }
  1284. set_opt(sb, QUOTA);
  1285. return 1;
  1286. }
  1287. static int clear_qf_name(struct super_block *sb, int qtype)
  1288. {
  1289. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1290. if (sb_any_quota_loaded(sb) &&
  1291. sbi->s_qf_names[qtype]) {
  1292. ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
  1293. " when quota turned on");
  1294. return 0;
  1295. }
  1296. /*
  1297. * The space will be released later when all options are confirmed
  1298. * to be correct
  1299. */
  1300. sbi->s_qf_names[qtype] = NULL;
  1301. return 1;
  1302. }
  1303. #endif
  1304. static int parse_options(char *options, struct super_block *sb,
  1305. unsigned long *journal_devnum,
  1306. unsigned int *journal_ioprio,
  1307. ext4_fsblk_t *n_blocks_count, int is_remount)
  1308. {
  1309. struct ext4_sb_info *sbi = EXT4_SB(sb);
  1310. char *p;
  1311. substring_t args[MAX_OPT_ARGS];
  1312. int data_opt = 0;
  1313. int option;
  1314. #ifdef CONFIG_QUOTA
  1315. int qfmt;
  1316. #endif
  1317. if (!options)
  1318. return 1;
  1319. while ((p = strsep(&options, ",")) != NULL) {
  1320. int token;
  1321. if (!*p)
  1322. continue;
  1323. /*
  1324. * Initialize args struct so we know whether arg was
  1325. * found; some options take optional arguments.
  1326. */
  1327. args[0].to = args[0].from = NULL;
  1328. token = match_token(p, tokens, args);
  1329. switch (token) {
  1330. case Opt_bsd_df:
  1331. ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
  1332. clear_opt(sb, MINIX_DF);
  1333. break;
  1334. case Opt_minix_df:
  1335. ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
  1336. set_opt(sb, MINIX_DF);
  1337. break;
  1338. case Opt_grpid:
  1339. ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
  1340. set_opt(sb, GRPID);
  1341. break;
  1342. case Opt_nogrpid:
  1343. ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
  1344. clear_opt(sb, GRPID);
  1345. break;
  1346. case Opt_resuid:
  1347. if (match_int(&args[0], &option))
  1348. return 0;
  1349. sbi->s_resuid = option;
  1350. break;
  1351. case Opt_resgid:
  1352. if (match_int(&args[0], &option))
  1353. return 0;
  1354. sbi->s_resgid = option;
  1355. break;
  1356. case Opt_sb:
  1357. /* handled by get_sb_block() instead of here */
  1358. /* *sb_block = match_int(&args[0]); */
  1359. break;
  1360. case Opt_err_panic:
  1361. clear_opt(sb, ERRORS_CONT);
  1362. clear_opt(sb, ERRORS_RO);
  1363. set_opt(sb, ERRORS_PANIC);
  1364. break;
  1365. case Opt_err_ro:
  1366. clear_opt(sb, ERRORS_CONT);
  1367. clear_opt(sb, ERRORS_PANIC);
  1368. set_opt(sb, ERRORS_RO);
  1369. break;
  1370. case Opt_err_cont:
  1371. clear_opt(sb, ERRORS_RO);
  1372. clear_opt(sb, ERRORS_PANIC);
  1373. set_opt(sb, ERRORS_CONT);
  1374. break;
  1375. case Opt_nouid32:
  1376. set_opt(sb, NO_UID32);
  1377. break;
  1378. case Opt_debug:
  1379. set_opt(sb, DEBUG);
  1380. break;
  1381. case Opt_oldalloc:
  1382. set_opt(sb, OLDALLOC);
  1383. break;
  1384. case Opt_orlov:
  1385. clear_opt(sb, OLDALLOC);
  1386. break;
  1387. #ifdef CONFIG_EXT4_FS_XATTR
  1388. case Opt_user_xattr:
  1389. set_opt(sb, XATTR_USER);
  1390. break;
  1391. case Opt_nouser_xattr:
  1392. clear_opt(sb, XATTR_USER);
  1393. break;
  1394. #else
  1395. case Opt_user_xattr:
  1396. case Opt_nouser_xattr:
  1397. ext4_msg(sb, KERN_ERR, "(no)user_xattr options not supported");
  1398. break;
  1399. #endif
  1400. #ifdef CONFIG_EXT4_FS_POSIX_ACL
  1401. case Opt_acl:
  1402. set_opt(sb, POSIX_ACL);
  1403. break;
  1404. case Opt_noacl:
  1405. clear_opt(sb, POSIX_ACL);
  1406. break;
  1407. #else
  1408. case Opt_acl:
  1409. case Opt_noacl:
  1410. ext4_msg(sb, KERN_ERR, "(no)acl options not supported");
  1411. break;
  1412. #endif
  1413. case Opt_journal_update:
  1414. /* @@@ FIXME */
  1415. /* Eventually we will want to be able to create
  1416. a journal file here. For now, only allow the
  1417. user to specify an existing inode to be the
  1418. journal file. */
  1419. if (is_remount) {
  1420. ext4_msg(sb, KERN_ERR,
  1421. "Cannot specify journal on remount");
  1422. return 0;
  1423. }
  1424. set_opt(sb, UPDATE_JOURNAL);
  1425. break;
  1426. case Opt_journal_dev:
  1427. if (is_remount) {
  1428. ext4_msg(sb, KERN_ERR,
  1429. "Cannot specify journal on remount");
  1430. return 0;
  1431. }
  1432. if (match_int(&args[0], &option))
  1433. return 0;
  1434. *journal_devnum = option;
  1435. break;
  1436. case Opt_journal_checksum:
  1437. set_opt(sb, JOURNAL_CHECKSUM);
  1438. break;
  1439. case Opt_journal_async_commit:
  1440. set_opt(sb, JOURNAL_ASYNC_COMMIT);
  1441. set_opt(sb, JOURNAL_CHECKSUM);
  1442. break;
  1443. case Opt_noload:
  1444. set_opt(sb, NOLOAD);
  1445. break;
  1446. case Opt_commit:
  1447. if (match_int(&args[0], &option))
  1448. return 0;
  1449. if (option < 0)
  1450. return 0;
  1451. if (option == 0)
  1452. option = JBD2_DEFAULT_MAX_COMMIT_AGE;
  1453. sbi->s_commit_interval = HZ * option;
  1454. break;
  1455. case Opt_max_batch_time:
  1456. if (match_int(&args[0], &option))
  1457. return 0;
  1458. if (option < 0)
  1459. return 0;
  1460. if (option == 0)
  1461. option = EXT4_DEF_MAX_BATCH_TIME;
  1462. sbi->s_max_batch_time = option;
  1463. break;
  1464. case Opt_min_batch_time:
  1465. if (match_int(&args[0], &option))
  1466. return 0;
  1467. if (option < 0)
  1468. return 0;
  1469. sbi->s_min_batch_time = option;
  1470. break;
  1471. case Opt_data_journal:
  1472. data_opt = EXT4_MOUNT_JOURNAL_DATA;
  1473. goto datacheck;
  1474. case Opt_data_ordered:
  1475. data_opt = EXT4_MOUNT_ORDERED_DATA;
  1476. goto datacheck;
  1477. case Opt_data_writeback:
  1478. data_opt = EXT4_MOUNT_WRITEBACK_DATA;
  1479. datacheck:
  1480. if (is_remount) {
  1481. if (test_opt(sb, DATA_FLAGS) != data_opt) {
  1482. ext4_msg(sb, KERN_ERR,
  1483. "Cannot change data mode on remount");
  1484. return 0;
  1485. }
  1486. } else {
  1487. clear_opt(sb, DATA_FLAGS);
  1488. sbi->s_mount_opt |= data_opt;
  1489. }
  1490. break;
  1491. case Opt_data_err_abort:
  1492. set_opt(sb, DATA_ERR_ABORT);
  1493. break;
  1494. case Opt_data_err_ignore:
  1495. clear_opt(sb, DATA_ERR_ABORT);
  1496. break;
  1497. #ifdef CONFIG_QUOTA
  1498. case Opt_usrjquota:
  1499. if (!set_qf_name(sb, USRQUOTA, &args[0]))
  1500. return 0;
  1501. break;
  1502. case Opt_grpjquota:
  1503. if (!set_qf_name(sb, GRPQUOTA, &args[0]))
  1504. return 0;
  1505. break;
  1506. case Opt_offusrjquota:
  1507. if (!clear_qf_name(sb, USRQUOTA))
  1508. return 0;
  1509. break;
  1510. case Opt_offgrpjquota:
  1511. if (!clear_qf_name(sb, GRPQUOTA))
  1512. return 0;
  1513. break;
  1514. case Opt_jqfmt_vfsold:
  1515. qfmt = QFMT_VFS_OLD;
  1516. goto set_qf_format;
  1517. case Opt_jqfmt_vfsv0:
  1518. qfmt = QFMT_VFS_V0;
  1519. goto set_qf_format;
  1520. case Opt_jqfmt_vfsv1:
  1521. qfmt = QFMT_VFS_V1;
  1522. set_qf_format:
  1523. if (sb_any_quota_loaded(sb) &&
  1524. sbi->s_jquota_fmt != qfmt) {
  1525. ext4_msg(sb, KERN_ERR, "Cannot change "
  1526. "journaled quota options when "
  1527. "quota turned on");
  1528. return 0;
  1529. }
  1530. sbi->s_jquota_fmt = qfmt;
  1531. break;
  1532. case Opt_quota:
  1533. case Opt_usrquota:
  1534. set_opt(sb, QUOTA);
  1535. set_opt(sb, USRQUOTA);
  1536. break;
  1537. case Opt_grpquota:
  1538. set_opt(sb, QUOTA);
  1539. set_opt(sb, GRPQUOTA);
  1540. break;
  1541. case Opt_noquota:
  1542. if (sb_any_quota_loaded(sb)) {
  1543. ext4_msg(sb, KERN_ERR, "Cannot change quota "
  1544. "options when quota turned on");
  1545. return 0;
  1546. }
  1547. clear_opt(sb, QUOTA);
  1548. clear_opt(sb, USRQUOTA);
  1549. clear_opt(sb, GRPQUOTA);
  1550. break;
  1551. #else
  1552. case Opt_quota:
  1553. case Opt_usrquota:
  1554. case Opt_grpquota:
  1555. ext4_msg(sb, KERN_ERR,
  1556. "quota options not supported");
  1557. break;
  1558. case Opt_usrjquota:
  1559. case Opt_grpjquota:
  1560. case Opt_offusrjquota:
  1561. case Opt_offgrpjquota:
  1562. case Opt_jqfmt_vfsold:
  1563. case Opt_jqfmt_vfsv0:
  1564. case Opt_jqfmt_vfsv1:
  1565. ext4_msg(sb, KERN_ERR,
  1566. "journaled quota options not supported");
  1567. break;
  1568. case Opt_noquota:
  1569. break;
  1570. #endif
  1571. case Opt_abort:
  1572. sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
  1573. break;
  1574. case Opt_nobarrier:
  1575. clear_opt(sb, BARRIER);
  1576. break;
  1577. case Opt_barrier:
  1578. if (args[0].from) {
  1579. if (match_int(&args[0], &option))
  1580. return 0;
  1581. } else
  1582. option = 1; /* No argument, default to 1 */
  1583. if (option)
  1584. set_opt(sb, BARRIER);
  1585. else
  1586. clear_opt(sb, BARRIER);
  1587. break;
  1588. case Opt_ignore:
  1589. break;
  1590. case Opt_resize:
  1591. if (!is_remount) {
  1592. ext4_msg(sb, KERN_ERR,
  1593. "resize option only available "
  1594. "for remount");
  1595. return 0;
  1596. }
  1597. if (match_int(&args[0], &option) != 0)