PageRenderTime 66ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 1ms

/fs/gfs2/super.c

https://bitbucket.org/emiliolopez/linux
C | 1755 lines | 1326 code | 225 blank | 204 comment | 211 complexity | 87de66e4cc91a16ce6b62dfb467ff791 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  10. #include <linux/bio.h>
  11. #include <linux/sched/signal.h>
  12. #include <linux/slab.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/completion.h>
  15. #include <linux/buffer_head.h>
  16. #include <linux/statfs.h>
  17. #include <linux/seq_file.h>
  18. #include <linux/mount.h>
  19. #include <linux/kthread.h>
  20. #include <linux/delay.h>
  21. #include <linux/gfs2_ondisk.h>
  22. #include <linux/crc32.h>
  23. #include <linux/time.h>
  24. #include <linux/wait.h>
  25. #include <linux/writeback.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/kernel.h>
  28. #include "gfs2.h"
  29. #include "incore.h"
  30. #include "bmap.h"
  31. #include "dir.h"
  32. #include "glock.h"
  33. #include "glops.h"
  34. #include "inode.h"
  35. #include "log.h"
  36. #include "meta_io.h"
  37. #include "quota.h"
  38. #include "recovery.h"
  39. #include "rgrp.h"
  40. #include "super.h"
  41. #include "trans.h"
  42. #include "util.h"
  43. #include "sys.h"
  44. #include "xattr.h"
  45. #define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
  46. enum {
  47. Opt_lockproto,
  48. Opt_locktable,
  49. Opt_hostdata,
  50. Opt_spectator,
  51. Opt_ignore_local_fs,
  52. Opt_localflocks,
  53. Opt_localcaching,
  54. Opt_debug,
  55. Opt_nodebug,
  56. Opt_upgrade,
  57. Opt_acl,
  58. Opt_noacl,
  59. Opt_quota_off,
  60. Opt_quota_account,
  61. Opt_quota_on,
  62. Opt_quota,
  63. Opt_noquota,
  64. Opt_suiddir,
  65. Opt_nosuiddir,
  66. Opt_data_writeback,
  67. Opt_data_ordered,
  68. Opt_meta,
  69. Opt_discard,
  70. Opt_nodiscard,
  71. Opt_commit,
  72. Opt_err_withdraw,
  73. Opt_err_panic,
  74. Opt_statfs_quantum,
  75. Opt_statfs_percent,
  76. Opt_quota_quantum,
  77. Opt_barrier,
  78. Opt_nobarrier,
  79. Opt_rgrplvb,
  80. Opt_norgrplvb,
  81. Opt_loccookie,
  82. Opt_noloccookie,
  83. Opt_error,
  84. };
  85. static const match_table_t tokens = {
  86. {Opt_lockproto, "lockproto=%s"},
  87. {Opt_locktable, "locktable=%s"},
  88. {Opt_hostdata, "hostdata=%s"},
  89. {Opt_spectator, "spectator"},
  90. {Opt_spectator, "norecovery"},
  91. {Opt_ignore_local_fs, "ignore_local_fs"},
  92. {Opt_localflocks, "localflocks"},
  93. {Opt_localcaching, "localcaching"},
  94. {Opt_debug, "debug"},
  95. {Opt_nodebug, "nodebug"},
  96. {Opt_upgrade, "upgrade"},
  97. {Opt_acl, "acl"},
  98. {Opt_noacl, "noacl"},
  99. {Opt_quota_off, "quota=off"},
  100. {Opt_quota_account, "quota=account"},
  101. {Opt_quota_on, "quota=on"},
  102. {Opt_quota, "quota"},
  103. {Opt_noquota, "noquota"},
  104. {Opt_suiddir, "suiddir"},
  105. {Opt_nosuiddir, "nosuiddir"},
  106. {Opt_data_writeback, "data=writeback"},
  107. {Opt_data_ordered, "data=ordered"},
  108. {Opt_meta, "meta"},
  109. {Opt_discard, "discard"},
  110. {Opt_nodiscard, "nodiscard"},
  111. {Opt_commit, "commit=%d"},
  112. {Opt_err_withdraw, "errors=withdraw"},
  113. {Opt_err_panic, "errors=panic"},
  114. {Opt_statfs_quantum, "statfs_quantum=%d"},
  115. {Opt_statfs_percent, "statfs_percent=%d"},
  116. {Opt_quota_quantum, "quota_quantum=%d"},
  117. {Opt_barrier, "barrier"},
  118. {Opt_nobarrier, "nobarrier"},
  119. {Opt_rgrplvb, "rgrplvb"},
  120. {Opt_norgrplvb, "norgrplvb"},
  121. {Opt_loccookie, "loccookie"},
  122. {Opt_noloccookie, "noloccookie"},
  123. {Opt_error, NULL}
  124. };
  125. /**
  126. * gfs2_mount_args - Parse mount options
  127. * @args: The structure into which the parsed options will be written
  128. * @options: The options to parse
  129. *
  130. * Return: errno
  131. */
  132. int gfs2_mount_args(struct gfs2_args *args, char *options)
  133. {
  134. char *o;
  135. int token;
  136. substring_t tmp[MAX_OPT_ARGS];
  137. int rv;
  138. /* Split the options into tokens with the "," character and
  139. process them */
  140. while (1) {
  141. o = strsep(&options, ",");
  142. if (o == NULL)
  143. break;
  144. if (*o == '\0')
  145. continue;
  146. token = match_token(o, tokens, tmp);
  147. switch (token) {
  148. case Opt_lockproto:
  149. match_strlcpy(args->ar_lockproto, &tmp[0],
  150. GFS2_LOCKNAME_LEN);
  151. break;
  152. case Opt_locktable:
  153. match_strlcpy(args->ar_locktable, &tmp[0],
  154. GFS2_LOCKNAME_LEN);
  155. break;
  156. case Opt_hostdata:
  157. match_strlcpy(args->ar_hostdata, &tmp[0],
  158. GFS2_LOCKNAME_LEN);
  159. break;
  160. case Opt_spectator:
  161. args->ar_spectator = 1;
  162. break;
  163. case Opt_ignore_local_fs:
  164. /* Retained for backwards compat only */
  165. break;
  166. case Opt_localflocks:
  167. args->ar_localflocks = 1;
  168. break;
  169. case Opt_localcaching:
  170. /* Retained for backwards compat only */
  171. break;
  172. case Opt_debug:
  173. if (args->ar_errors == GFS2_ERRORS_PANIC) {
  174. pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
  175. return -EINVAL;
  176. }
  177. args->ar_debug = 1;
  178. break;
  179. case Opt_nodebug:
  180. args->ar_debug = 0;
  181. break;
  182. case Opt_upgrade:
  183. /* Retained for backwards compat only */
  184. break;
  185. case Opt_acl:
  186. args->ar_posix_acl = 1;
  187. break;
  188. case Opt_noacl:
  189. args->ar_posix_acl = 0;
  190. break;
  191. case Opt_quota_off:
  192. case Opt_noquota:
  193. args->ar_quota = GFS2_QUOTA_OFF;
  194. break;
  195. case Opt_quota_account:
  196. args->ar_quota = GFS2_QUOTA_ACCOUNT;
  197. break;
  198. case Opt_quota_on:
  199. case Opt_quota:
  200. args->ar_quota = GFS2_QUOTA_ON;
  201. break;
  202. case Opt_suiddir:
  203. args->ar_suiddir = 1;
  204. break;
  205. case Opt_nosuiddir:
  206. args->ar_suiddir = 0;
  207. break;
  208. case Opt_data_writeback:
  209. args->ar_data = GFS2_DATA_WRITEBACK;
  210. break;
  211. case Opt_data_ordered:
  212. args->ar_data = GFS2_DATA_ORDERED;
  213. break;
  214. case Opt_meta:
  215. args->ar_meta = 1;
  216. break;
  217. case Opt_discard:
  218. args->ar_discard = 1;
  219. break;
  220. case Opt_nodiscard:
  221. args->ar_discard = 0;
  222. break;
  223. case Opt_commit:
  224. rv = match_int(&tmp[0], &args->ar_commit);
  225. if (rv || args->ar_commit <= 0) {
  226. pr_warn("commit mount option requires a positive numeric argument\n");
  227. return rv ? rv : -EINVAL;
  228. }
  229. break;
  230. case Opt_statfs_quantum:
  231. rv = match_int(&tmp[0], &args->ar_statfs_quantum);
  232. if (rv || args->ar_statfs_quantum < 0) {
  233. pr_warn("statfs_quantum mount option requires a non-negative numeric argument\n");
  234. return rv ? rv : -EINVAL;
  235. }
  236. break;
  237. case Opt_quota_quantum:
  238. rv = match_int(&tmp[0], &args->ar_quota_quantum);
  239. if (rv || args->ar_quota_quantum <= 0) {
  240. pr_warn("quota_quantum mount option requires a positive numeric argument\n");
  241. return rv ? rv : -EINVAL;
  242. }
  243. break;
  244. case Opt_statfs_percent:
  245. rv = match_int(&tmp[0], &args->ar_statfs_percent);
  246. if (rv || args->ar_statfs_percent < 0 ||
  247. args->ar_statfs_percent > 100) {
  248. pr_warn("statfs_percent mount option requires a numeric argument between 0 and 100\n");
  249. return rv ? rv : -EINVAL;
  250. }
  251. break;
  252. case Opt_err_withdraw:
  253. args->ar_errors = GFS2_ERRORS_WITHDRAW;
  254. break;
  255. case Opt_err_panic:
  256. if (args->ar_debug) {
  257. pr_warn("-o debug and -o errors=panic are mutually exclusive\n");
  258. return -EINVAL;
  259. }
  260. args->ar_errors = GFS2_ERRORS_PANIC;
  261. break;
  262. case Opt_barrier:
  263. args->ar_nobarrier = 0;
  264. break;
  265. case Opt_nobarrier:
  266. args->ar_nobarrier = 1;
  267. break;
  268. case Opt_rgrplvb:
  269. args->ar_rgrplvb = 1;
  270. break;
  271. case Opt_norgrplvb:
  272. args->ar_rgrplvb = 0;
  273. break;
  274. case Opt_loccookie:
  275. args->ar_loccookie = 1;
  276. break;
  277. case Opt_noloccookie:
  278. args->ar_loccookie = 0;
  279. break;
  280. case Opt_error:
  281. default:
  282. pr_warn("invalid mount option: %s\n", o);
  283. return -EINVAL;
  284. }
  285. }
  286. return 0;
  287. }
  288. /**
  289. * gfs2_jindex_free - Clear all the journal index information
  290. * @sdp: The GFS2 superblock
  291. *
  292. */
  293. void gfs2_jindex_free(struct gfs2_sbd *sdp)
  294. {
  295. struct list_head list;
  296. struct gfs2_jdesc *jd;
  297. spin_lock(&sdp->sd_jindex_spin);
  298. list_add(&list, &sdp->sd_jindex_list);
  299. list_del_init(&sdp->sd_jindex_list);
  300. sdp->sd_journals = 0;
  301. spin_unlock(&sdp->sd_jindex_spin);
  302. while (!list_empty(&list)) {
  303. jd = list_entry(list.next, struct gfs2_jdesc, jd_list);
  304. gfs2_free_journal_extents(jd);
  305. list_del(&jd->jd_list);
  306. iput(jd->jd_inode);
  307. kfree(jd);
  308. }
  309. }
  310. static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
  311. {
  312. struct gfs2_jdesc *jd;
  313. int found = 0;
  314. list_for_each_entry(jd, head, jd_list) {
  315. if (jd->jd_jid == jid) {
  316. found = 1;
  317. break;
  318. }
  319. }
  320. if (!found)
  321. jd = NULL;
  322. return jd;
  323. }
  324. struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
  325. {
  326. struct gfs2_jdesc *jd;
  327. spin_lock(&sdp->sd_jindex_spin);
  328. jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
  329. spin_unlock(&sdp->sd_jindex_spin);
  330. return jd;
  331. }
  332. int gfs2_jdesc_check(struct gfs2_jdesc *jd)
  333. {
  334. struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
  335. struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
  336. u64 size = i_size_read(jd->jd_inode);
  337. if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
  338. return -EIO;
  339. jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
  340. if (gfs2_write_alloc_required(ip, 0, size)) {
  341. gfs2_consist_inode(ip);
  342. return -EIO;
  343. }
  344. return 0;
  345. }
  346. static int init_threads(struct gfs2_sbd *sdp)
  347. {
  348. struct task_struct *p;
  349. int error = 0;
  350. p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
  351. if (IS_ERR(p)) {
  352. error = PTR_ERR(p);
  353. fs_err(sdp, "can't start logd thread: %d\n", error);
  354. return error;
  355. }
  356. sdp->sd_logd_process = p;
  357. p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
  358. if (IS_ERR(p)) {
  359. error = PTR_ERR(p);
  360. fs_err(sdp, "can't start quotad thread: %d\n", error);
  361. goto fail;
  362. }
  363. sdp->sd_quotad_process = p;
  364. return 0;
  365. fail:
  366. kthread_stop(sdp->sd_logd_process);
  367. return error;
  368. }
  369. /**
  370. * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
  371. * @sdp: the filesystem
  372. *
  373. * Returns: errno
  374. */
  375. int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
  376. {
  377. struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
  378. struct gfs2_glock *j_gl = ip->i_gl;
  379. struct gfs2_holder freeze_gh;
  380. struct gfs2_log_header_host head;
  381. int error;
  382. error = init_threads(sdp);
  383. if (error)
  384. return error;
  385. error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
  386. &freeze_gh);
  387. if (error)
  388. goto fail_threads;
  389. j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
  390. error = gfs2_find_jhead(sdp->sd_jdesc, &head);
  391. if (error)
  392. goto fail;
  393. if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
  394. gfs2_consist(sdp);
  395. error = -EIO;
  396. goto fail;
  397. }
  398. /* Initialize some head of the log stuff */
  399. sdp->sd_log_sequence = head.lh_sequence + 1;
  400. gfs2_log_pointers_init(sdp, head.lh_blkno);
  401. error = gfs2_quota_init(sdp);
  402. if (error)
  403. goto fail;
  404. set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
  405. gfs2_glock_dq_uninit(&freeze_gh);
  406. return 0;
  407. fail:
  408. freeze_gh.gh_flags |= GL_NOCACHE;
  409. gfs2_glock_dq_uninit(&freeze_gh);
  410. fail_threads:
  411. kthread_stop(sdp->sd_quotad_process);
  412. kthread_stop(sdp->sd_logd_process);
  413. return error;
  414. }
  415. void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
  416. {
  417. const struct gfs2_statfs_change *str = buf;
  418. sc->sc_total = be64_to_cpu(str->sc_total);
  419. sc->sc_free = be64_to_cpu(str->sc_free);
  420. sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
  421. }
  422. static void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
  423. {
  424. struct gfs2_statfs_change *str = buf;
  425. str->sc_total = cpu_to_be64(sc->sc_total);
  426. str->sc_free = cpu_to_be64(sc->sc_free);
  427. str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
  428. }
  429. int gfs2_statfs_init(struct gfs2_sbd *sdp)
  430. {
  431. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  432. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  433. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  434. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  435. struct buffer_head *m_bh, *l_bh;
  436. struct gfs2_holder gh;
  437. int error;
  438. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
  439. &gh);
  440. if (error)
  441. return error;
  442. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  443. if (error)
  444. goto out;
  445. if (sdp->sd_args.ar_spectator) {
  446. spin_lock(&sdp->sd_statfs_spin);
  447. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  448. sizeof(struct gfs2_dinode));
  449. spin_unlock(&sdp->sd_statfs_spin);
  450. } else {
  451. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  452. if (error)
  453. goto out_m_bh;
  454. spin_lock(&sdp->sd_statfs_spin);
  455. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  456. sizeof(struct gfs2_dinode));
  457. gfs2_statfs_change_in(l_sc, l_bh->b_data +
  458. sizeof(struct gfs2_dinode));
  459. spin_unlock(&sdp->sd_statfs_spin);
  460. brelse(l_bh);
  461. }
  462. out_m_bh:
  463. brelse(m_bh);
  464. out:
  465. gfs2_glock_dq_uninit(&gh);
  466. return 0;
  467. }
  468. void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
  469. s64 dinodes)
  470. {
  471. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  472. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  473. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  474. struct buffer_head *l_bh;
  475. s64 x, y;
  476. int need_sync = 0;
  477. int error;
  478. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  479. if (error)
  480. return;
  481. gfs2_trans_add_meta(l_ip->i_gl, l_bh);
  482. spin_lock(&sdp->sd_statfs_spin);
  483. l_sc->sc_total += total;
  484. l_sc->sc_free += free;
  485. l_sc->sc_dinodes += dinodes;
  486. gfs2_statfs_change_out(l_sc, l_bh->b_data + sizeof(struct gfs2_dinode));
  487. if (sdp->sd_args.ar_statfs_percent) {
  488. x = 100 * l_sc->sc_free;
  489. y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
  490. if (x >= y || x <= -y)
  491. need_sync = 1;
  492. }
  493. spin_unlock(&sdp->sd_statfs_spin);
  494. brelse(l_bh);
  495. if (need_sync)
  496. gfs2_wake_up_statfs(sdp);
  497. }
  498. void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh,
  499. struct buffer_head *l_bh)
  500. {
  501. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  502. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  503. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  504. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  505. gfs2_trans_add_meta(l_ip->i_gl, l_bh);
  506. gfs2_trans_add_meta(m_ip->i_gl, m_bh);
  507. spin_lock(&sdp->sd_statfs_spin);
  508. m_sc->sc_total += l_sc->sc_total;
  509. m_sc->sc_free += l_sc->sc_free;
  510. m_sc->sc_dinodes += l_sc->sc_dinodes;
  511. memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
  512. memset(l_bh->b_data + sizeof(struct gfs2_dinode),
  513. 0, sizeof(struct gfs2_statfs_change));
  514. gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
  515. spin_unlock(&sdp->sd_statfs_spin);
  516. }
  517. int gfs2_statfs_sync(struct super_block *sb, int type)
  518. {
  519. struct gfs2_sbd *sdp = sb->s_fs_info;
  520. struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
  521. struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
  522. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  523. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  524. struct gfs2_holder gh;
  525. struct buffer_head *m_bh, *l_bh;
  526. int error;
  527. sb_start_write(sb);
  528. error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
  529. &gh);
  530. if (error)
  531. goto out;
  532. error = gfs2_meta_inode_buffer(m_ip, &m_bh);
  533. if (error)
  534. goto out_unlock;
  535. spin_lock(&sdp->sd_statfs_spin);
  536. gfs2_statfs_change_in(m_sc, m_bh->b_data +
  537. sizeof(struct gfs2_dinode));
  538. if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
  539. spin_unlock(&sdp->sd_statfs_spin);
  540. goto out_bh;
  541. }
  542. spin_unlock(&sdp->sd_statfs_spin);
  543. error = gfs2_meta_inode_buffer(l_ip, &l_bh);
  544. if (error)
  545. goto out_bh;
  546. error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
  547. if (error)
  548. goto out_bh2;
  549. update_statfs(sdp, m_bh, l_bh);
  550. sdp->sd_statfs_force_sync = 0;
  551. gfs2_trans_end(sdp);
  552. out_bh2:
  553. brelse(l_bh);
  554. out_bh:
  555. brelse(m_bh);
  556. out_unlock:
  557. gfs2_glock_dq_uninit(&gh);
  558. out:
  559. sb_end_write(sb);
  560. return error;
  561. }
  562. struct lfcc {
  563. struct list_head list;
  564. struct gfs2_holder gh;
  565. };
  566. /**
  567. * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
  568. * journals are clean
  569. * @sdp: the file system
  570. * @state: the state to put the transaction lock into
  571. * @t_gh: the hold on the transaction lock
  572. *
  573. * Returns: errno
  574. */
  575. static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
  576. struct gfs2_holder *freeze_gh)
  577. {
  578. struct gfs2_inode *ip;
  579. struct gfs2_jdesc *jd;
  580. struct lfcc *lfcc;
  581. LIST_HEAD(list);
  582. struct gfs2_log_header_host lh;
  583. int error;
  584. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  585. lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
  586. if (!lfcc) {
  587. error = -ENOMEM;
  588. goto out;
  589. }
  590. ip = GFS2_I(jd->jd_inode);
  591. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
  592. if (error) {
  593. kfree(lfcc);
  594. goto out;
  595. }
  596. list_add(&lfcc->list, &list);
  597. }
  598. error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
  599. GL_NOCACHE, freeze_gh);
  600. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  601. error = gfs2_jdesc_check(jd);
  602. if (error)
  603. break;
  604. error = gfs2_find_jhead(jd, &lh);
  605. if (error)
  606. break;
  607. if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
  608. error = -EBUSY;
  609. break;
  610. }
  611. }
  612. if (error)
  613. gfs2_glock_dq_uninit(freeze_gh);
  614. out:
  615. while (!list_empty(&list)) {
  616. lfcc = list_entry(list.next, struct lfcc, list);
  617. list_del(&lfcc->list);
  618. gfs2_glock_dq_uninit(&lfcc->gh);
  619. kfree(lfcc);
  620. }
  621. return error;
  622. }
  623. void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
  624. {
  625. struct gfs2_dinode *str = buf;
  626. str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
  627. str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
  628. str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
  629. str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
  630. str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
  631. str->di_mode = cpu_to_be32(ip->i_inode.i_mode);
  632. str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode));
  633. str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode));
  634. str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink);
  635. str->di_size = cpu_to_be64(i_size_read(&ip->i_inode));
  636. str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
  637. str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec);
  638. str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec);
  639. str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec);
  640. str->di_goal_meta = cpu_to_be64(ip->i_goal);
  641. str->di_goal_data = cpu_to_be64(ip->i_goal);
  642. str->di_generation = cpu_to_be64(ip->i_generation);
  643. str->di_flags = cpu_to_be32(ip->i_diskflags);
  644. str->di_height = cpu_to_be16(ip->i_height);
  645. str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) &&
  646. !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
  647. GFS2_FORMAT_DE : 0);
  648. str->di_depth = cpu_to_be16(ip->i_depth);
  649. str->di_entries = cpu_to_be32(ip->i_entries);
  650. str->di_eattr = cpu_to_be64(ip->i_eattr);
  651. str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec);
  652. str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec);
  653. str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec);
  654. }
  655. /**
  656. * gfs2_write_inode - Make sure the inode is stable on the disk
  657. * @inode: The inode
  658. * @wbc: The writeback control structure
  659. *
  660. * Returns: errno
  661. */
  662. static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
  663. {
  664. struct gfs2_inode *ip = GFS2_I(inode);
  665. struct gfs2_sbd *sdp = GFS2_SB(inode);
  666. struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
  667. struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
  668. int ret = 0;
  669. bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
  670. if (flush_all)
  671. gfs2_log_flush(GFS2_SB(inode), ip->i_gl, NORMAL_FLUSH);
  672. if (bdi->wb.dirty_exceeded)
  673. gfs2_ail1_flush(sdp, wbc);
  674. else
  675. filemap_fdatawrite(metamapping);
  676. if (flush_all)
  677. ret = filemap_fdatawait(metamapping);
  678. if (ret)
  679. mark_inode_dirty_sync(inode);
  680. return ret;
  681. }
  682. /**
  683. * gfs2_dirty_inode - check for atime updates
  684. * @inode: The inode in question
  685. * @flags: The type of dirty
  686. *
  687. * Unfortunately it can be called under any combination of inode
  688. * glock and transaction lock, so we have to check carefully.
  689. *
  690. * At the moment this deals only with atime - it should be possible
  691. * to expand that role in future, once a review of the locking has
  692. * been carried out.
  693. */
  694. static void gfs2_dirty_inode(struct inode *inode, int flags)
  695. {
  696. struct gfs2_inode *ip = GFS2_I(inode);
  697. struct gfs2_sbd *sdp = GFS2_SB(inode);
  698. struct buffer_head *bh;
  699. struct gfs2_holder gh;
  700. int need_unlock = 0;
  701. int need_endtrans = 0;
  702. int ret;
  703. if (!(flags & (I_DIRTY_DATASYNC|I_DIRTY_SYNC)))
  704. return;
  705. if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
  706. return;
  707. if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
  708. ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
  709. if (ret) {
  710. fs_err(sdp, "dirty_inode: glock %d\n", ret);
  711. return;
  712. }
  713. need_unlock = 1;
  714. } else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
  715. return;
  716. if (current->journal_info == NULL) {
  717. ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
  718. if (ret) {
  719. fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
  720. goto out;
  721. }
  722. need_endtrans = 1;
  723. }
  724. ret = gfs2_meta_inode_buffer(ip, &bh);
  725. if (ret == 0) {
  726. gfs2_trans_add_meta(ip->i_gl, bh);
  727. gfs2_dinode_out(ip, bh->b_data);
  728. brelse(bh);
  729. }
  730. if (need_endtrans)
  731. gfs2_trans_end(sdp);
  732. out:
  733. if (need_unlock)
  734. gfs2_glock_dq_uninit(&gh);
  735. }
  736. /**
  737. * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
  738. * @sdp: the filesystem
  739. *
  740. * Returns: errno
  741. */
  742. static int gfs2_make_fs_ro(struct gfs2_sbd *sdp)
  743. {
  744. struct gfs2_holder freeze_gh;
  745. int error;
  746. error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, GL_NOCACHE,
  747. &freeze_gh);
  748. if (error && !test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
  749. return error;
  750. kthread_stop(sdp->sd_quotad_process);
  751. kthread_stop(sdp->sd_logd_process);
  752. flush_workqueue(gfs2_delete_workqueue);
  753. gfs2_quota_sync(sdp->sd_vfs, 0);
  754. gfs2_statfs_sync(sdp->sd_vfs, 0);
  755. gfs2_log_flush(sdp, NULL, SHUTDOWN_FLUSH);
  756. wait_event(sdp->sd_reserving_log_wait, atomic_read(&sdp->sd_reserving_log) == 0);
  757. gfs2_assert_warn(sdp, atomic_read(&sdp->sd_log_blks_free) == sdp->sd_jdesc->jd_blocks);
  758. if (gfs2_holder_initialized(&freeze_gh))
  759. gfs2_glock_dq_uninit(&freeze_gh);
  760. gfs2_quota_cleanup(sdp);
  761. return error;
  762. }
  763. /**
  764. * gfs2_put_super - Unmount the filesystem
  765. * @sb: The VFS superblock
  766. *
  767. */
  768. static void gfs2_put_super(struct super_block *sb)
  769. {
  770. struct gfs2_sbd *sdp = sb->s_fs_info;
  771. int error;
  772. struct gfs2_jdesc *jd;
  773. /* No more recovery requests */
  774. set_bit(SDF_NORECOVERY, &sdp->sd_flags);
  775. smp_mb();
  776. /* Wait on outstanding recovery */
  777. restart:
  778. spin_lock(&sdp->sd_jindex_spin);
  779. list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
  780. if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
  781. continue;
  782. spin_unlock(&sdp->sd_jindex_spin);
  783. wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
  784. TASK_UNINTERRUPTIBLE);
  785. goto restart;
  786. }
  787. spin_unlock(&sdp->sd_jindex_spin);
  788. if (!sb_rdonly(sb)) {
  789. error = gfs2_make_fs_ro(sdp);
  790. if (error)
  791. gfs2_io_error(sdp);
  792. }
  793. /* At this point, we're through modifying the disk */
  794. /* Release stuff */
  795. iput(sdp->sd_jindex);
  796. iput(sdp->sd_statfs_inode);
  797. iput(sdp->sd_rindex);
  798. iput(sdp->sd_quota_inode);
  799. gfs2_glock_put(sdp->sd_rename_gl);
  800. gfs2_glock_put(sdp->sd_freeze_gl);
  801. if (!sdp->sd_args.ar_spectator) {
  802. gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
  803. gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
  804. gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
  805. gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
  806. iput(sdp->sd_sc_inode);
  807. iput(sdp->sd_qc_inode);
  808. }
  809. gfs2_glock_dq_uninit(&sdp->sd_live_gh);
  810. gfs2_clear_rgrpd(sdp);
  811. gfs2_jindex_free(sdp);
  812. /* Take apart glock structures and buffer lists */
  813. gfs2_gl_hash_clear(sdp);
  814. gfs2_delete_debugfs_file(sdp);
  815. /* Unmount the locking protocol */
  816. gfs2_lm_unmount(sdp);
  817. /* At this point, we're through participating in the lockspace */
  818. gfs2_sys_fs_del(sdp);
  819. }
  820. /**
  821. * gfs2_sync_fs - sync the filesystem
  822. * @sb: the superblock
  823. *
  824. * Flushes the log to disk.
  825. */
  826. static int gfs2_sync_fs(struct super_block *sb, int wait)
  827. {
  828. struct gfs2_sbd *sdp = sb->s_fs_info;
  829. gfs2_quota_sync(sb, -1);
  830. if (wait)
  831. gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
  832. return sdp->sd_log_error;
  833. }
  834. void gfs2_freeze_func(struct work_struct *work)
  835. {
  836. int error;
  837. struct gfs2_holder freeze_gh;
  838. struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
  839. struct super_block *sb = sdp->sd_vfs;
  840. atomic_inc(&sb->s_active);
  841. error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_SHARED, 0,
  842. &freeze_gh);
  843. if (error) {
  844. printk(KERN_INFO "GFS2: couln't get freeze lock : %d\n", error);
  845. gfs2_assert_withdraw(sdp, 0);
  846. }
  847. else {
  848. atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
  849. error = thaw_super(sb);
  850. if (error) {
  851. printk(KERN_INFO "GFS2: couldn't thaw filesystem: %d\n",
  852. error);
  853. gfs2_assert_withdraw(sdp, 0);
  854. }
  855. if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
  856. freeze_gh.gh_flags |= GL_NOCACHE;
  857. gfs2_glock_dq_uninit(&freeze_gh);
  858. }
  859. deactivate_super(sb);
  860. return;
  861. }
  862. /**
  863. * gfs2_freeze - prevent further writes to the filesystem
  864. * @sb: the VFS structure for the filesystem
  865. *
  866. */
  867. static int gfs2_freeze(struct super_block *sb)
  868. {
  869. struct gfs2_sbd *sdp = sb->s_fs_info;
  870. int error = 0;
  871. mutex_lock(&sdp->sd_freeze_mutex);
  872. if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN)
  873. goto out;
  874. if (test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) {
  875. error = -EINVAL;
  876. goto out;
  877. }
  878. for (;;) {
  879. error = gfs2_lock_fs_check_clean(sdp, &sdp->sd_freeze_gh);
  880. if (!error)
  881. break;
  882. switch (error) {
  883. case -EBUSY:
  884. fs_err(sdp, "waiting for recovery before freeze\n");
  885. break;
  886. default:
  887. fs_err(sdp, "error freezing FS: %d\n", error);
  888. break;
  889. }
  890. fs_err(sdp, "retrying...\n");
  891. msleep(1000);
  892. }
  893. error = 0;
  894. out:
  895. mutex_unlock(&sdp->sd_freeze_mutex);
  896. return error;
  897. }
  898. /**
  899. * gfs2_unfreeze - reallow writes to the filesystem
  900. * @sb: the VFS structure for the filesystem
  901. *
  902. */
  903. static int gfs2_unfreeze(struct super_block *sb)
  904. {
  905. struct gfs2_sbd *sdp = sb->s_fs_info;
  906. mutex_lock(&sdp->sd_freeze_mutex);
  907. if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
  908. !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
  909. mutex_unlock(&sdp->sd_freeze_mutex);
  910. return 0;
  911. }
  912. gfs2_glock_dq_uninit(&sdp->sd_freeze_gh);
  913. mutex_unlock(&sdp->sd_freeze_mutex);
  914. return 0;
  915. }
  916. /**
  917. * statfs_fill - fill in the sg for a given RG
  918. * @rgd: the RG
  919. * @sc: the sc structure
  920. *
  921. * Returns: 0 on success, -ESTALE if the LVB is invalid
  922. */
  923. static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
  924. struct gfs2_statfs_change_host *sc)
  925. {
  926. gfs2_rgrp_verify(rgd);
  927. sc->sc_total += rgd->rd_data;
  928. sc->sc_free += rgd->rd_free;
  929. sc->sc_dinodes += rgd->rd_dinodes;
  930. return 0;
  931. }
  932. /**
  933. * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
  934. * @sdp: the filesystem
  935. * @sc: the sc info that will be returned
  936. *
  937. * Any error (other than a signal) will cause this routine to fall back
  938. * to the synchronous version.
  939. *
  940. * FIXME: This really shouldn't busy wait like this.
  941. *
  942. * Returns: errno
  943. */
  944. static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
  945. {
  946. struct gfs2_rgrpd *rgd_next;
  947. struct gfs2_holder *gha, *gh;
  948. unsigned int slots = 64;
  949. unsigned int x;
  950. int done;
  951. int error = 0, err;
  952. memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
  953. gha = kmalloc(slots * sizeof(struct gfs2_holder), GFP_KERNEL);
  954. if (!gha)
  955. return -ENOMEM;
  956. for (x = 0; x < slots; x++)
  957. gfs2_holder_mark_uninitialized(gha + x);
  958. rgd_next = gfs2_rgrpd_get_first(sdp);
  959. for (;;) {
  960. done = 1;
  961. for (x = 0; x < slots; x++) {
  962. gh = gha + x;
  963. if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
  964. err = gfs2_glock_wait(gh);
  965. if (err) {
  966. gfs2_holder_uninit(gh);
  967. error = err;
  968. } else {
  969. if (!error) {
  970. struct gfs2_rgrpd *rgd =
  971. gfs2_glock2rgrp(gh->gh_gl);
  972. error = statfs_slow_fill(rgd, sc);
  973. }
  974. gfs2_glock_dq_uninit(gh);
  975. }
  976. }
  977. if (gfs2_holder_initialized(gh))
  978. done = 0;
  979. else if (rgd_next && !error) {
  980. error = gfs2_glock_nq_init(rgd_next->rd_gl,
  981. LM_ST_SHARED,
  982. GL_ASYNC,
  983. gh);
  984. rgd_next = gfs2_rgrpd_get_next(rgd_next);
  985. done = 0;
  986. }
  987. if (signal_pending(current))
  988. error = -ERESTARTSYS;
  989. }
  990. if (done)
  991. break;
  992. yield();
  993. }
  994. kfree(gha);
  995. return error;
  996. }
  997. /**
  998. * gfs2_statfs_i - Do a statfs
  999. * @sdp: the filesystem
  1000. * @sg: the sg structure
  1001. *
  1002. * Returns: errno
  1003. */
  1004. static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
  1005. {
  1006. struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
  1007. struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
  1008. spin_lock(&sdp->sd_statfs_spin);
  1009. *sc = *m_sc;
  1010. sc->sc_total += l_sc->sc_total;
  1011. sc->sc_free += l_sc->sc_free;
  1012. sc->sc_dinodes += l_sc->sc_dinodes;
  1013. spin_unlock(&sdp->sd_statfs_spin);
  1014. if (sc->sc_free < 0)
  1015. sc->sc_free = 0;
  1016. if (sc->sc_free > sc->sc_total)
  1017. sc->sc_free = sc->sc_total;
  1018. if (sc->sc_dinodes < 0)
  1019. sc->sc_dinodes = 0;
  1020. return 0;
  1021. }
  1022. /**
  1023. * gfs2_statfs - Gather and return stats about the filesystem
  1024. * @sb: The superblock
  1025. * @statfsbuf: The buffer
  1026. *
  1027. * Returns: 0 on success or error code
  1028. */
  1029. static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
  1030. {
  1031. struct super_block *sb = dentry->d_sb;
  1032. struct gfs2_sbd *sdp = sb->s_fs_info;
  1033. struct gfs2_statfs_change_host sc;
  1034. int error;
  1035. error = gfs2_rindex_update(sdp);
  1036. if (error)
  1037. return error;
  1038. if (gfs2_tune_get(sdp, gt_statfs_slow))
  1039. error = gfs2_statfs_slow(sdp, &sc);
  1040. else
  1041. error = gfs2_statfs_i(sdp, &sc);
  1042. if (error)
  1043. return error;
  1044. buf->f_type = GFS2_MAGIC;
  1045. buf->f_bsize = sdp->sd_sb.sb_bsize;
  1046. buf->f_blocks = sc.sc_total;
  1047. buf->f_bfree = sc.sc_free;
  1048. buf->f_bavail = sc.sc_free;
  1049. buf->f_files = sc.sc_dinodes + sc.sc_free;
  1050. buf->f_ffree = sc.sc_free;
  1051. buf->f_namelen = GFS2_FNAMESIZE;
  1052. return 0;
  1053. }
  1054. /**
  1055. * gfs2_remount_fs - called when the FS is remounted
  1056. * @sb: the filesystem
  1057. * @flags: the remount flags
  1058. * @data: extra data passed in (not used right now)
  1059. *
  1060. * Returns: errno
  1061. */
  1062. static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
  1063. {
  1064. struct gfs2_sbd *sdp = sb->s_fs_info;
  1065. struct gfs2_args args = sdp->sd_args; /* Default to current settings */
  1066. struct gfs2_tune *gt = &sdp->sd_tune;
  1067. int error;
  1068. sync_filesystem(sb);
  1069. spin_lock(&gt->gt_spin);
  1070. args.ar_commit = gt->gt_logd_secs;
  1071. args.ar_quota_quantum = gt->gt_quota_quantum;
  1072. if (gt->gt_statfs_slow)
  1073. args.ar_statfs_quantum = 0;
  1074. else
  1075. args.ar_statfs_quantum = gt->gt_statfs_quantum;
  1076. spin_unlock(&gt->gt_spin);
  1077. error = gfs2_mount_args(&args, data);
  1078. if (error)
  1079. return error;
  1080. /* Not allowed to change locking details */
  1081. if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
  1082. strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
  1083. strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
  1084. return -EINVAL;
  1085. /* Some flags must not be changed */
  1086. if (args_neq(&args, &sdp->sd_args, spectator) ||
  1087. args_neq(&args, &sdp->sd_args, localflocks) ||
  1088. args_neq(&args, &sdp->sd_args, meta))
  1089. return -EINVAL;
  1090. if (sdp->sd_args.ar_spectator)
  1091. *flags |= MS_RDONLY;
  1092. if ((sb->s_flags ^ *flags) & MS_RDONLY) {
  1093. if (*flags & MS_RDONLY)
  1094. error = gfs2_make_fs_ro(sdp);
  1095. else
  1096. error = gfs2_make_fs_rw(sdp);
  1097. if (error)
  1098. return error;
  1099. }
  1100. sdp->sd_args = args;
  1101. if (sdp->sd_args.ar_posix_acl)
  1102. sb->s_flags |= MS_POSIXACL;
  1103. else
  1104. sb->s_flags &= ~MS_POSIXACL;
  1105. if (sdp->sd_args.ar_nobarrier)
  1106. set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
  1107. else
  1108. clear_bit(SDF_NOBARRIERS, &sdp->sd_flags);
  1109. spin_lock(&gt->gt_spin);
  1110. gt->gt_logd_secs = args.ar_commit;
  1111. gt->gt_quota_quantum = args.ar_quota_quantum;
  1112. if (args.ar_statfs_quantum) {
  1113. gt->gt_statfs_slow = 0;
  1114. gt->gt_statfs_quantum = args.ar_statfs_quantum;
  1115. }
  1116. else {
  1117. gt->gt_statfs_slow = 1;
  1118. gt->gt_statfs_quantum = 30;
  1119. }
  1120. spin_unlock(&gt->gt_spin);
  1121. gfs2_online_uevent(sdp);
  1122. return 0;
  1123. }
  1124. /**
  1125. * gfs2_drop_inode - Drop an inode (test for remote unlink)
  1126. * @inode: The inode to drop
  1127. *
  1128. * If we've received a callback on an iopen lock then it's because a
  1129. * remote node tried to deallocate the inode but failed due to this node
  1130. * still having the inode open. Here we mark the link count zero
  1131. * since we know that it must have reached zero if the GLF_DEMOTE flag
  1132. * is set on the iopen glock. If we didn't do a disk read since the
  1133. * remote node removed the final link then we might otherwise miss
  1134. * this event. This check ensures that this node will deallocate the
  1135. * inode's blocks, or alternatively pass the baton on to another
  1136. * node for later deallocation.
  1137. */
  1138. static int gfs2_drop_inode(struct inode *inode)
  1139. {
  1140. struct gfs2_inode *ip = GFS2_I(inode);
  1141. if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
  1142. inode->i_nlink &&
  1143. gfs2_holder_initialized(&ip->i_iopen_gh)) {
  1144. struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
  1145. if (test_bit(GLF_DEMOTE, &gl->gl_flags))
  1146. clear_nlink(inode);
  1147. }
  1148. /*
  1149. * When under memory pressure when an inode's link count has dropped to
  1150. * zero, defer deleting the inode to the delete workqueue. This avoids
  1151. * calling into DLM under memory pressure, which can deadlock.
  1152. */
  1153. if (!inode->i_nlink &&
  1154. unlikely(current->flags & PF_MEMALLOC) &&
  1155. gfs2_holder_initialized(&ip->i_iopen_gh)) {
  1156. struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
  1157. gfs2_glock_hold(gl);
  1158. if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
  1159. gfs2_glock_queue_put(gl);
  1160. return false;
  1161. }
  1162. return generic_drop_inode(inode);
  1163. }
  1164. static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
  1165. {
  1166. do {
  1167. if (d1 == d2)
  1168. return 1;
  1169. d1 = d1->d_parent;
  1170. } while (!IS_ROOT(d1));
  1171. return 0;
  1172. }
  1173. /**
  1174. * gfs2_show_options - Show mount options for /proc/mounts
  1175. * @s: seq_file structure
  1176. * @root: root of this (sub)tree
  1177. *
  1178. * Returns: 0 on success or error code
  1179. */
  1180. static int gfs2_show_options(struct seq_file *s, struct dentry *root)
  1181. {
  1182. struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
  1183. struct gfs2_args *args = &sdp->sd_args;
  1184. int val;
  1185. if (is_ancestor(root, sdp->sd_master_dir))
  1186. seq_puts(s, ",meta");
  1187. if (args->ar_lockproto[0])
  1188. seq_show_option(s, "lockproto", args->ar_lockproto);
  1189. if (args->ar_locktable[0])
  1190. seq_show_option(s, "locktable", args->ar_locktable);
  1191. if (args->ar_hostdata[0])
  1192. seq_show_option(s, "hostdata", args->ar_hostdata);
  1193. if (args->ar_spectator)
  1194. seq_puts(s, ",spectator");
  1195. if (args->ar_localflocks)
  1196. seq_puts(s, ",localflocks");
  1197. if (args->ar_debug)
  1198. seq_puts(s, ",debug");
  1199. if (args->ar_posix_acl)
  1200. seq_puts(s, ",acl");
  1201. if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
  1202. char *state;
  1203. switch (args->ar_quota) {
  1204. case GFS2_QUOTA_OFF:
  1205. state = "off";
  1206. break;
  1207. case GFS2_QUOTA_ACCOUNT:
  1208. state = "account";
  1209. break;
  1210. case GFS2_QUOTA_ON:
  1211. state = "on";
  1212. break;
  1213. default:
  1214. state = "unknown";
  1215. break;
  1216. }
  1217. seq_printf(s, ",quota=%s", state);
  1218. }
  1219. if (args->ar_suiddir)
  1220. seq_puts(s, ",suiddir");
  1221. if (args->ar_data != GFS2_DATA_DEFAULT) {
  1222. char *state;
  1223. switch (args->ar_data) {
  1224. case GFS2_DATA_WRITEBACK:
  1225. state = "writeback";
  1226. break;
  1227. case GFS2_DATA_ORDERED:
  1228. state = "ordered";
  1229. break;
  1230. default:
  1231. state = "unknown";
  1232. break;
  1233. }
  1234. seq_printf(s, ",data=%s", state);
  1235. }
  1236. if (args->ar_discard)
  1237. seq_puts(s, ",discard");
  1238. val = sdp->sd_tune.gt_logd_secs;
  1239. if (val != 30)
  1240. seq_printf(s, ",commit=%d", val);
  1241. val = sdp->sd_tune.gt_statfs_quantum;
  1242. if (val != 30)
  1243. seq_printf(s, ",statfs_quantum=%d", val);
  1244. else if (sdp->sd_tune.gt_statfs_slow)
  1245. seq_puts(s, ",statfs_quantum=0");
  1246. val = sdp->sd_tune.gt_quota_quantum;
  1247. if (val != 60)
  1248. seq_printf(s, ",quota_quantum=%d", val);
  1249. if (args->ar_statfs_percent)
  1250. seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
  1251. if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
  1252. const char *state;
  1253. switch (args->ar_errors) {
  1254. case GFS2_ERRORS_WITHDRAW:
  1255. state = "withdraw";
  1256. break;
  1257. case GFS2_ERRORS_PANIC:
  1258. state = "panic";
  1259. break;
  1260. default:
  1261. state = "unknown";
  1262. break;
  1263. }
  1264. seq_printf(s, ",errors=%s", state);
  1265. }
  1266. if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
  1267. seq_puts(s, ",nobarrier");
  1268. if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
  1269. seq_puts(s, ",demote_interface_used");
  1270. if (args->ar_rgrplvb)
  1271. seq_puts(s, ",rgrplvb");
  1272. if (args->ar_loccookie)
  1273. seq_puts(s, ",loccookie");
  1274. return 0;
  1275. }
  1276. static void gfs2_final_release_pages(struct gfs2_inode *ip)
  1277. {
  1278. struct inode *inode = &ip->i_inode;
  1279. struct gfs2_glock *gl = ip->i_gl;
  1280. truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
  1281. truncate_inode_pages(&inode->i_data, 0);
  1282. if (atomic_read(&gl->gl_revokes) == 0) {
  1283. clear_bit(GLF_LFLUSH, &gl->gl_flags);
  1284. clear_bit(GLF_DIRTY, &gl->gl_flags);
  1285. }
  1286. }
  1287. static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
  1288. {
  1289. struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
  1290. struct gfs2_rgrpd *rgd;
  1291. struct gfs2_holder gh;
  1292. int error;
  1293. if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
  1294. gfs2_consist_inode(ip);
  1295. return -EIO;
  1296. }
  1297. error = gfs2_rindex_update(sdp);
  1298. if (error)
  1299. return error;
  1300. error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
  1301. if (error)
  1302. return error;
  1303. rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
  1304. if (!rgd) {
  1305. gfs2_consist_inode(ip);
  1306. error = -EIO;
  1307. goto out_qs;
  1308. }
  1309. error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
  1310. if (error)
  1311. goto out_qs;
  1312. error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
  1313. sdp->sd_jdesc->jd_blocks);
  1314. if (error)
  1315. goto out_rg_gunlock;
  1316. gfs2_free_di(rgd, ip);
  1317. gfs2_final_release_pages(ip);
  1318. gfs2_trans_end(sdp);
  1319. out_rg_gunlock:
  1320. gfs2_glock_dq_uninit(&gh);
  1321. out_qs:
  1322. gfs2_quota_unhold(ip);
  1323. return error;
  1324. }
  1325. /**
  1326. * gfs2_glock_put_eventually
  1327. * @gl: The glock to put
  1328. *
  1329. * When under memory pressure, trigger a deferred glock put to make sure we
  1330. * won't call into DLM and deadlock. Otherwise, put the glock directly.
  1331. */
  1332. static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
  1333. {
  1334. if (current->flags & PF_MEMALLOC)
  1335. gfs2_glock_queue_put(gl);
  1336. else
  1337. gfs2_glock_put(gl);
  1338. }
  1339. /**
  1340. * gfs2_evict_inode - Remove an inode from cache
  1341. * @inode: The inode to evict
  1342. *
  1343. * There are three cases to consider:
  1344. * 1. i_nlink == 0, we are final opener (and must deallocate)
  1345. * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
  1346. * 3. i_nlink > 0
  1347. *
  1348. * If the fs is read only, then we have to treat all cases as per #3
  1349. * since we are unable to do any deallocation. The inode will be
  1350. * deallocated by the next read/write node to attempt an allocation
  1351. * in the same resource group
  1352. *
  1353. * We have to (at the moment) hold the inodes main lock to cover
  1354. * the gap between unlocking the shared lock on the iopen lock and
  1355. * taking the exclusive lock. I'd rather do a shared -> exclusive
  1356. * conversion on the iopen lock, but we can change that later. This
  1357. * is safe, just less efficient.
  1358. */
  1359. static void gfs2_evict_inode(struct inode *inode)
  1360. {
  1361. struct super_block *sb = inode->i_sb;
  1362. struct gfs2_sbd *sdp = sb->s_fs_info;
  1363. struct gfs2_inode *ip = GFS2_I(inode);
  1364. struct gfs2_holder gh;
  1365. struct address_space *metamapping;
  1366. int error;
  1367. if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
  1368. clear_inode(inode);
  1369. return;
  1370. }
  1371. if (inode->i_nlink || sb_rdonly(sb))
  1372. goto out;
  1373. if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
  1374. BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
  1375. gfs2_holder_mark_uninitialized(&gh);
  1376. goto alloc_failed;
  1377. }
  1378. /* Deletes should never happen under memory pressure anymore. */
  1379. if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
  1380. goto out;
  1381. /* Must not read inode block until block type has been verified */
  1382. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, &gh);
  1383. if (unlikely(error)) {
  1384. glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
  1385. ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
  1386. gfs2_glock_dq_uninit(&ip->i_iopen_gh);
  1387. goto out;
  1388. }
  1389. error = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
  1390. if (error)
  1391. goto out_truncate;
  1392. if (test_bit(GIF_INVALID, &ip->i_flags)) {
  1393. error = gfs2_inode_refresh(ip);
  1394. if (error)
  1395. goto out_truncate;
  1396. }
  1397. /*
  1398. * The inode may have been recreated in the meantime.
  1399. */
  1400. if (inode->i_nlink)
  1401. goto out_truncate;
  1402. alloc_failed:
  1403. if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
  1404. test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
  1405. ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
  1406. gfs2_glock_dq_wait(&ip->i_iopen_gh);
  1407. gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE,
  1408. &ip->i_iopen_gh);
  1409. error = gfs2_glock_nq(&ip->i_iopen_gh);
  1410. if (error)
  1411. goto out_truncate;
  1412. }
  1413. /* Case 1 starts here */
  1414. if (S_ISDIR(inode->i_mode) &&
  1415. (ip->i_diskflags & GFS2_DIF_EXHASH)) {
  1416. error = gfs2_dir_exhash_dealloc(ip);
  1417. if (error)
  1418. goto out_unlock;
  1419. }
  1420. if (ip->i_eattr) {
  1421. error = gfs2_ea_dealloc(ip);
  1422. if (error)
  1423. goto out_unlock;
  1424. }
  1425. if (!gfs2_is_stuffed(ip)) {
  1426. error = gfs2_file_dealloc(ip);
  1427. if (error)
  1428. goto out_unlock;
  1429. }
  1430. /* We're about to clear the bitmap for the dinode, but as soon as we
  1431. do, gfs2_create_inode can create another inode at the same block
  1432. location and try to set gl_object again. We clear gl_object here so
  1433. that subsequent inode creates don't see an old gl_object. */
  1434. glock_clear_object(ip->i_gl, ip);
  1435. error = gfs2_dinode_dealloc(ip);
  1436. goto out_unlock;
  1437. out_truncate:
  1438. gfs2_log_flush(sdp, ip->i_gl, NORMAL_FLUSH);
  1439. metamapping = gfs2_glock2aspace(ip->i_gl);
  1440. if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
  1441. filemap_fdatawrite(metamapping);
  1442. filemap_fdatawait(metamapping);
  1443. }
  1444. write_inode_now(inode, 1);
  1445. gfs2_ail_flush(ip->i_gl, 0);
  1446. /* Case 2 starts here */
  1447. error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
  1448. if (error)
  1449. goto out_unlock;
  1450. /* Needs to be done before glock release & also in a transaction */
  1451. truncate_inode_pages(&inode->i_data, 0);
  1452. truncate_inode_pages(metamapping, 0);
  1453. gfs2_trans_end(sdp);
  1454. out_unlock:
  1455. /* Error path for case 1 */
  1456. if (gfs2_rs_active(&ip->i_res))
  1457. gfs2_rs_deltree(&ip->i_res);
  1458. if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
  1459. glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
  1460. if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
  1461. ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
  1462. gfs2_glock_dq(&ip->i_iopen_gh);
  1463. }
  1464. gfs2_holder_uninit(&ip->i_iopen_gh);
  1465. }
  1466. if (gfs2_holder_initialized(&gh)) {
  1467. glock_clear_object(ip->i_gl, ip);
  1468. gfs2_glock_dq_uninit(&gh);
  1469. }
  1470. if (error && error != GLR_TRYFAILED && error != -EROFS)
  1471. fs_warn(sdp, "gfs2_evict_inode: %d\n", error);
  1472. out:
  1473. /* Case 3 starts here */
  1474. truncate_inode_pages_final(&inode->i_data);
  1475. gfs2_rsqa_delete(ip, NULL);
  1476. gfs2_ordered_del_inode(ip);
  1477. clear_inode(inode);
  1478. gfs2_dir_hash_inval(ip);
  1479. glock_clear_object(ip->i_gl, ip);
  1480. wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
  1481. gfs2_glock_add_to_lru(ip->i_gl);
  1482. gfs2_glock_put_eventually(ip->i_gl);
  1483. ip->i_gl = NULL;
  1484. if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
  1485. struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
  1486. glock_clear_object(gl, ip);
  1487. ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
  1488. gfs2_glock_hold(gl);
  1489. gfs2_glock_dq_uninit(&ip->i_iopen_gh);
  1490. gfs2_glock_put_eventually(gl);
  1491. }
  1492. }
  1493. static struct inode *gfs2_alloc_inode(struct super_block *sb)
  1494. {
  1495. struct gfs2_inode *ip;
  1496. ip = kmem_cache_alloc(gfs2_inode_cachep, GFP_KERNEL);
  1497. if (ip) {
  1498. ip->i_flags = 0;
  1499. ip->i_gl = NULL;
  1500. ip->i_rgd = NULL;
  1501. memset(&ip->i_res, 0, sizeof(ip->i_res));
  1502. RB_CLEAR_NODE(&ip->i_res.rs_node);
  1503. ip->i_rahead = 0;
  1504. }
  1505. return &ip->i_inode;
  1506. }
  1507. static void gfs2_i_callback(struct rcu_head *head)
  1508. {
  1509. struct inode *inode = container_of(head, struct inode, i_rcu);
  1510. kmem_cache_free(gfs2_inode_cachep, inode);
  1511. }
  1512. static void gfs2_destroy_inode(struct inode *inode)
  1513. {
  1514. call_rcu(&inode->i_rcu, gfs2_i_callback);
  1515. }
  1516. const struct super_operations gfs2_super_ops = {
  1517. .alloc_inode = gfs2_alloc_inode,
  1518. .destroy_inode = gfs2_destroy_inode,
  1519. .write_inode = gfs2_write_inode,
  1520. .dirty_inode = gfs2_dirty_inode,
  1521. .evict_inode = gfs2_evict_inode,
  1522. .put_super = gfs2_put_super,
  1523. .sync_fs = gfs2_sync_fs,
  1524. .freeze_super = gfs2_freeze,
  1525. .thaw_super = gfs2_unfreeze,
  1526. .statfs = gfs2_statfs,
  1527. .remount_fs = gfs2_remount_fs,
  1528. .drop_inode = gfs2_drop_inode,
  1529. .show_options = gfs2_show_options,
  1530. };