PageRenderTime 78ms CodeModel.GetById 19ms RepoModel.GetById 1ms app.codeStats 0ms

/linux-2.6.21.x/fs/gfs2/ops_fstype.c

https://bitbucket.org/altlc/wive-rtnl-ralink-rt305x-routers-firmware-amod
C | 925 lines | 753 code | 143 blank | 29 comment | 95 complexity | 92191734fe4c24ab071eb850462253cd MD5 | raw file
Possible License(s): CC-BY-SA-3.0, BSD-3-Clause, MPL-2.0-no-copyleft-exception, GPL-2.0, GPL-3.0, LGPL-3.0, 0BSD, AGPL-1.0, LGPL-2.1, LGPL-2.0
  1. /*
  2. * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
  3. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
  4. *
  5. * This copyrighted material is made available to anyone wishing to use,
  6. * modify, copy, or redistribute it subject to the terms and conditions
  7. * of the GNU General Public License version 2.
  8. */
  9. #include <linux/sched.h>
  10. #include <linux/slab.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/completion.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/blkdev.h>
  15. #include <linux/kthread.h>
  16. #include <linux/namei.h>
  17. #include <linux/mount.h>
  18. #include <linux/gfs2_ondisk.h>
  19. #include <linux/lm_interface.h>
  20. #include "gfs2.h"
  21. #include "incore.h"
  22. #include "daemon.h"
  23. #include "glock.h"
  24. #include "glops.h"
  25. #include "inode.h"
  26. #include "lm.h"
  27. #include "mount.h"
  28. #include "ops_export.h"
  29. #include "ops_fstype.h"
  30. #include "ops_super.h"
  31. #include "recovery.h"
  32. #include "rgrp.h"
  33. #include "super.h"
  34. #include "sys.h"
  35. #include "util.h"
  36. #define DO 0
  37. #define UNDO 1
  38. extern struct dentry_operations gfs2_dops;
  39. static struct gfs2_sbd *init_sbd(struct super_block *sb)
  40. {
  41. struct gfs2_sbd *sdp;
  42. sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
  43. if (!sdp)
  44. return NULL;
  45. sb->s_fs_info = sdp;
  46. sdp->sd_vfs = sb;
  47. gfs2_tune_init(&sdp->sd_tune);
  48. INIT_LIST_HEAD(&sdp->sd_reclaim_list);
  49. spin_lock_init(&sdp->sd_reclaim_lock);
  50. init_waitqueue_head(&sdp->sd_reclaim_wq);
  51. mutex_init(&sdp->sd_inum_mutex);
  52. spin_lock_init(&sdp->sd_statfs_spin);
  53. mutex_init(&sdp->sd_statfs_mutex);
  54. spin_lock_init(&sdp->sd_rindex_spin);
  55. mutex_init(&sdp->sd_rindex_mutex);
  56. INIT_LIST_HEAD(&sdp->sd_rindex_list);
  57. INIT_LIST_HEAD(&sdp->sd_rindex_mru_list);
  58. INIT_LIST_HEAD(&sdp->sd_rindex_recent_list);
  59. INIT_LIST_HEAD(&sdp->sd_jindex_list);
  60. spin_lock_init(&sdp->sd_jindex_spin);
  61. mutex_init(&sdp->sd_jindex_mutex);
  62. INIT_LIST_HEAD(&sdp->sd_quota_list);
  63. spin_lock_init(&sdp->sd_quota_spin);
  64. mutex_init(&sdp->sd_quota_mutex);
  65. spin_lock_init(&sdp->sd_log_lock);
  66. INIT_LIST_HEAD(&sdp->sd_log_le_gl);
  67. INIT_LIST_HEAD(&sdp->sd_log_le_buf);
  68. INIT_LIST_HEAD(&sdp->sd_log_le_revoke);
  69. INIT_LIST_HEAD(&sdp->sd_log_le_rg);
  70. INIT_LIST_HEAD(&sdp->sd_log_le_databuf);
  71. mutex_init(&sdp->sd_log_reserve_mutex);
  72. INIT_LIST_HEAD(&sdp->sd_ail1_list);
  73. INIT_LIST_HEAD(&sdp->sd_ail2_list);
  74. init_rwsem(&sdp->sd_log_flush_lock);
  75. INIT_LIST_HEAD(&sdp->sd_log_flush_list);
  76. INIT_LIST_HEAD(&sdp->sd_revoke_list);
  77. mutex_init(&sdp->sd_freeze_lock);
  78. return sdp;
  79. }
  80. static void init_vfs(struct super_block *sb, unsigned noatime)
  81. {
  82. struct gfs2_sbd *sdp = sb->s_fs_info;
  83. sb->s_magic = GFS2_MAGIC;
  84. sb->s_op = &gfs2_super_ops;
  85. sb->s_export_op = &gfs2_export_ops;
  86. sb->s_maxbytes = MAX_LFS_FILESIZE;
  87. if (sb->s_flags & (MS_NOATIME | MS_NODIRATIME))
  88. set_bit(noatime, &sdp->sd_flags);
  89. /* Don't let the VFS update atimes. GFS2 handles this itself. */
  90. sb->s_flags |= MS_NOATIME | MS_NODIRATIME;
  91. }
  92. static int init_names(struct gfs2_sbd *sdp, int silent)
  93. {
  94. struct page *page;
  95. char *proto, *table;
  96. int error = 0;
  97. proto = sdp->sd_args.ar_lockproto;
  98. table = sdp->sd_args.ar_locktable;
  99. /* Try to autodetect */
  100. if (!proto[0] || !table[0]) {
  101. struct gfs2_sb *sb;
  102. page = gfs2_read_super(sdp->sd_vfs, GFS2_SB_ADDR >> sdp->sd_fsb2bb_shift);
  103. if (!page)
  104. return -ENOBUFS;
  105. sb = kmap(page);
  106. gfs2_sb_in(&sdp->sd_sb, sb);
  107. kunmap(page);
  108. __free_page(page);
  109. error = gfs2_check_sb(sdp, &sdp->sd_sb, silent);
  110. if (error)
  111. goto out;
  112. if (!proto[0])
  113. proto = sdp->sd_sb.sb_lockproto;
  114. if (!table[0])
  115. table = sdp->sd_sb.sb_locktable;
  116. }
  117. if (!table[0])
  118. table = sdp->sd_vfs->s_id;
  119. snprintf(sdp->sd_proto_name, GFS2_FSNAME_LEN, "%s", proto);
  120. snprintf(sdp->sd_table_name, GFS2_FSNAME_LEN, "%s", table);
  121. out:
  122. return error;
  123. }
  124. static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh,
  125. int undo)
  126. {
  127. struct task_struct *p;
  128. int error = 0;
  129. if (undo)
  130. goto fail_trans;
  131. p = kthread_run(gfs2_scand, sdp, "gfs2_scand");
  132. error = IS_ERR(p);
  133. if (error) {
  134. fs_err(sdp, "can't start scand thread: %d\n", error);
  135. return error;
  136. }
  137. sdp->sd_scand_process = p;
  138. for (sdp->sd_glockd_num = 0;
  139. sdp->sd_glockd_num < sdp->sd_args.ar_num_glockd;
  140. sdp->sd_glockd_num++) {
  141. p = kthread_run(gfs2_glockd, sdp, "gfs2_glockd");
  142. error = IS_ERR(p);
  143. if (error) {
  144. fs_err(sdp, "can't start glockd thread: %d\n", error);
  145. goto fail;
  146. }
  147. sdp->sd_glockd_process[sdp->sd_glockd_num] = p;
  148. }
  149. error = gfs2_glock_nq_num(sdp,
  150. GFS2_MOUNT_LOCK, &gfs2_nondisk_glops,
  151. LM_ST_EXCLUSIVE, LM_FLAG_NOEXP | GL_NOCACHE,
  152. mount_gh);
  153. if (error) {
  154. fs_err(sdp, "can't acquire mount glock: %d\n", error);
  155. goto fail;
  156. }
  157. error = gfs2_glock_nq_num(sdp,
  158. GFS2_LIVE_LOCK, &gfs2_nondisk_glops,
  159. LM_ST_SHARED,
  160. LM_FLAG_NOEXP | GL_EXACT,
  161. &sdp->sd_live_gh);
  162. if (error) {
  163. fs_err(sdp, "can't acquire live glock: %d\n", error);
  164. goto fail_mount;
  165. }
  166. error = gfs2_glock_get(sdp, GFS2_RENAME_LOCK, &gfs2_nondisk_glops,
  167. CREATE, &sdp->sd_rename_gl);
  168. if (error) {
  169. fs_err(sdp, "can't create rename glock: %d\n", error);
  170. goto fail_live;
  171. }
  172. error = gfs2_glock_get(sdp, GFS2_TRANS_LOCK, &gfs2_trans_glops,
  173. CREATE, &sdp->sd_trans_gl);
  174. if (error) {
  175. fs_err(sdp, "can't create transaction glock: %d\n", error);
  176. goto fail_rename;
  177. }
  178. set_bit(GLF_STICKY, &sdp->sd_trans_gl->gl_flags);
  179. return 0;
  180. fail_trans:
  181. gfs2_glock_put(sdp->sd_trans_gl);
  182. fail_rename:
  183. gfs2_glock_put(sdp->sd_rename_gl);
  184. fail_live:
  185. gfs2_glock_dq_uninit(&sdp->sd_live_gh);
  186. fail_mount:
  187. gfs2_glock_dq_uninit(mount_gh);
  188. fail:
  189. while (sdp->sd_glockd_num--)
  190. kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]);
  191. kthread_stop(sdp->sd_scand_process);
  192. return error;
  193. }
  194. static struct inode *gfs2_lookup_root(struct super_block *sb,
  195. struct gfs2_inum_host *inum)
  196. {
  197. return gfs2_inode_lookup(sb, inum, DT_DIR);
  198. }
  199. static int init_sb(struct gfs2_sbd *sdp, int silent, int undo)
  200. {
  201. struct super_block *sb = sdp->sd_vfs;
  202. struct gfs2_holder sb_gh;
  203. struct gfs2_inum_host *inum;
  204. struct inode *inode;
  205. int error = 0;
  206. if (undo) {
  207. if (sb->s_root) {
  208. dput(sb->s_root);
  209. sb->s_root = NULL;
  210. }
  211. return 0;
  212. }
  213. error = gfs2_glock_nq_num(sdp, GFS2_SB_LOCK, &gfs2_meta_glops,
  214. LM_ST_SHARED, 0, &sb_gh);
  215. if (error) {
  216. fs_err(sdp, "can't acquire superblock glock: %d\n", error);
  217. return error;
  218. }
  219. error = gfs2_read_sb(sdp, sb_gh.gh_gl, silent);
  220. if (error) {
  221. fs_err(sdp, "can't read superblock: %d\n", error);
  222. goto out;
  223. }
  224. /* Set up the buffer cache and SB for real */
  225. if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) {
  226. error = -EINVAL;
  227. fs_err(sdp, "FS block size (%u) is too small for device "
  228. "block size (%u)\n",
  229. sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev));
  230. goto out;
  231. }
  232. if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
  233. error = -EINVAL;
  234. fs_err(sdp, "FS block size (%u) is too big for machine "
  235. "page size (%u)\n",
  236. sdp->sd_sb.sb_bsize, (unsigned int)PAGE_SIZE);
  237. goto out;
  238. }
  239. sb_set_blocksize(sb, sdp->sd_sb.sb_bsize);
  240. /* Get the root inode */
  241. inum = &sdp->sd_sb.sb_root_dir;
  242. if (sb->s_type == &gfs2meta_fs_type)
  243. inum = &sdp->sd_sb.sb_master_dir;
  244. inode = gfs2_lookup_root(sb, inum);
  245. if (IS_ERR(inode)) {
  246. error = PTR_ERR(inode);
  247. fs_err(sdp, "can't read in root inode: %d\n", error);
  248. goto out;
  249. }
  250. sb->s_root = d_alloc_root(inode);
  251. if (!sb->s_root) {
  252. fs_err(sdp, "can't get root dentry\n");
  253. error = -ENOMEM;
  254. iput(inode);
  255. }
  256. sb->s_root->d_op = &gfs2_dops;
  257. out:
  258. gfs2_glock_dq_uninit(&sb_gh);
  259. return error;
  260. }
  261. static int init_journal(struct gfs2_sbd *sdp, int undo)
  262. {
  263. struct gfs2_holder ji_gh;
  264. struct task_struct *p;
  265. struct gfs2_inode *ip;
  266. int jindex = 1;
  267. int error = 0;
  268. if (undo) {
  269. jindex = 0;
  270. goto fail_recoverd;
  271. }
  272. sdp->sd_jindex = gfs2_lookup_simple(sdp->sd_master_dir, "jindex");
  273. if (IS_ERR(sdp->sd_jindex)) {
  274. fs_err(sdp, "can't lookup journal index: %d\n", error);
  275. return PTR_ERR(sdp->sd_jindex);
  276. }
  277. ip = GFS2_I(sdp->sd_jindex);
  278. set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  279. /* Load in the journal index special file */
  280. error = gfs2_jindex_hold(sdp, &ji_gh);
  281. if (error) {
  282. fs_err(sdp, "can't read journal index: %d\n", error);
  283. goto fail;
  284. }
  285. error = -EINVAL;
  286. if (!gfs2_jindex_size(sdp)) {
  287. fs_err(sdp, "no journals!\n");
  288. goto fail_jindex;
  289. }
  290. if (sdp->sd_args.ar_spectator) {
  291. sdp->sd_jdesc = gfs2_jdesc_find(sdp, 0);
  292. sdp->sd_log_blks_free = sdp->sd_jdesc->jd_blocks;
  293. } else {
  294. if (sdp->sd_lockstruct.ls_jid >= gfs2_jindex_size(sdp)) {
  295. fs_err(sdp, "can't mount journal #%u\n",
  296. sdp->sd_lockstruct.ls_jid);
  297. fs_err(sdp, "there are only %u journals (0 - %u)\n",
  298. gfs2_jindex_size(sdp),
  299. gfs2_jindex_size(sdp) - 1);
  300. goto fail_jindex;
  301. }
  302. sdp->sd_jdesc = gfs2_jdesc_find(sdp, sdp->sd_lockstruct.ls_jid);
  303. error = gfs2_glock_nq_num(sdp, sdp->sd_lockstruct.ls_jid,
  304. &gfs2_journal_glops,
  305. LM_ST_EXCLUSIVE, LM_FLAG_NOEXP,
  306. &sdp->sd_journal_gh);
  307. if (error) {
  308. fs_err(sdp, "can't acquire journal glock: %d\n", error);
  309. goto fail_jindex;
  310. }
  311. ip = GFS2_I(sdp->sd_jdesc->jd_inode);
  312. error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED,
  313. LM_FLAG_NOEXP | GL_EXACT,
  314. &sdp->sd_jinode_gh);
  315. if (error) {
  316. fs_err(sdp, "can't acquire journal inode glock: %d\n",
  317. error);
  318. goto fail_journal_gh;
  319. }
  320. error = gfs2_jdesc_check(sdp->sd_jdesc);
  321. if (error) {
  322. fs_err(sdp, "my journal (%u) is bad: %d\n",
  323. sdp->sd_jdesc->jd_jid, error);
  324. goto fail_jinode_gh;
  325. }
  326. sdp->sd_log_blks_free = sdp->sd_jdesc->jd_blocks;
  327. }
  328. if (sdp->sd_lockstruct.ls_first) {
  329. unsigned int x;
  330. for (x = 0; x < sdp->sd_journals; x++) {
  331. error = gfs2_recover_journal(gfs2_jdesc_find(sdp, x));
  332. if (error) {
  333. fs_err(sdp, "error recovering journal %u: %d\n",
  334. x, error);
  335. goto fail_jinode_gh;
  336. }
  337. }
  338. gfs2_lm_others_may_mount(sdp);
  339. } else if (!sdp->sd_args.ar_spectator) {
  340. error = gfs2_recover_journal(sdp->sd_jdesc);
  341. if (error) {
  342. fs_err(sdp, "error recovering my journal: %d\n", error);
  343. goto fail_jinode_gh;
  344. }
  345. }
  346. set_bit(SDF_JOURNAL_CHECKED, &sdp->sd_flags);
  347. gfs2_glock_dq_uninit(&ji_gh);
  348. jindex = 0;
  349. p = kthread_run(gfs2_recoverd, sdp, "gfs2_recoverd");
  350. error = IS_ERR(p);
  351. if (error) {
  352. fs_err(sdp, "can't start recoverd thread: %d\n", error);
  353. goto fail_jinode_gh;
  354. }
  355. sdp->sd_recoverd_process = p;
  356. return 0;
  357. fail_recoverd:
  358. kthread_stop(sdp->sd_recoverd_process);
  359. fail_jinode_gh:
  360. if (!sdp->sd_args.ar_spectator)
  361. gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
  362. fail_journal_gh:
  363. if (!sdp->sd_args.ar_spectator)
  364. gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
  365. fail_jindex:
  366. gfs2_jindex_free(sdp);
  367. if (jindex)
  368. gfs2_glock_dq_uninit(&ji_gh);
  369. fail:
  370. iput(sdp->sd_jindex);
  371. return error;
  372. }
  373. static int init_inodes(struct gfs2_sbd *sdp, int undo)
  374. {
  375. int error = 0;
  376. struct gfs2_inode *ip;
  377. struct inode *inode;
  378. if (undo)
  379. goto fail_qinode;
  380. inode = gfs2_lookup_root(sdp->sd_vfs, &sdp->sd_sb.sb_master_dir);
  381. if (IS_ERR(inode)) {
  382. error = PTR_ERR(inode);
  383. fs_err(sdp, "can't read in master directory: %d\n", error);
  384. goto fail;
  385. }
  386. sdp->sd_master_dir = inode;
  387. error = init_journal(sdp, undo);
  388. if (error)
  389. goto fail_master;
  390. /* Read in the master inode number inode */
  391. sdp->sd_inum_inode = gfs2_lookup_simple(sdp->sd_master_dir, "inum");
  392. if (IS_ERR(sdp->sd_inum_inode)) {
  393. error = PTR_ERR(sdp->sd_inum_inode);
  394. fs_err(sdp, "can't read in inum inode: %d\n", error);
  395. goto fail_journal;
  396. }
  397. /* Read in the master statfs inode */
  398. sdp->sd_statfs_inode = gfs2_lookup_simple(sdp->sd_master_dir, "statfs");
  399. if (IS_ERR(sdp->sd_statfs_inode)) {
  400. error = PTR_ERR(sdp->sd_statfs_inode);
  401. fs_err(sdp, "can't read in statfs inode: %d\n", error);
  402. goto fail_inum;
  403. }
  404. /* Read in the resource index inode */
  405. sdp->sd_rindex = gfs2_lookup_simple(sdp->sd_master_dir, "rindex");
  406. if (IS_ERR(sdp->sd_rindex)) {
  407. error = PTR_ERR(sdp->sd_rindex);
  408. fs_err(sdp, "can't get resource index inode: %d\n", error);
  409. goto fail_statfs;
  410. }
  411. ip = GFS2_I(sdp->sd_rindex);
  412. set_bit(GLF_STICKY, &ip->i_gl->gl_flags);
  413. sdp->sd_rindex_vn = ip->i_gl->gl_vn - 1;
  414. /* Read in the quota inode */
  415. sdp->sd_quota_inode = gfs2_lookup_simple(sdp->sd_master_dir, "quota");
  416. if (IS_ERR(sdp->sd_quota_inode)) {
  417. error = PTR_ERR(sdp->sd_quota_inode);
  418. fs_err(sdp, "can't get quota file inode: %d\n", error);
  419. goto fail_rindex;
  420. }
  421. return 0;
  422. fail_qinode:
  423. iput(sdp->sd_quota_inode);
  424. fail_rindex:
  425. gfs2_clear_rgrpd(sdp);
  426. iput(sdp->sd_rindex);
  427. fail_statfs:
  428. iput(sdp->sd_statfs_inode);
  429. fail_inum:
  430. iput(sdp->sd_inum_inode);
  431. fail_journal:
  432. init_journal(sdp, UNDO);
  433. fail_master:
  434. iput(sdp->sd_master_dir);
  435. fail:
  436. return error;
  437. }
  438. static int init_per_node(struct gfs2_sbd *sdp, int undo)
  439. {
  440. struct inode *pn = NULL;
  441. char buf[30];
  442. int error = 0;
  443. struct gfs2_inode *ip;
  444. if (sdp->sd_args.ar_spectator)
  445. return 0;
  446. if (undo)
  447. goto fail_qc_gh;
  448. pn = gfs2_lookup_simple(sdp->sd_master_dir, "per_node");
  449. if (IS_ERR(pn)) {
  450. error = PTR_ERR(pn);
  451. fs_err(sdp, "can't find per_node directory: %d\n", error);
  452. return error;
  453. }
  454. sprintf(buf, "inum_range%u", sdp->sd_jdesc->jd_jid);
  455. sdp->sd_ir_inode = gfs2_lookup_simple(pn, buf);
  456. if (IS_ERR(sdp->sd_ir_inode)) {
  457. error = PTR_ERR(sdp->sd_ir_inode);
  458. fs_err(sdp, "can't find local \"ir\" file: %d\n", error);
  459. goto fail;
  460. }
  461. sprintf(buf, "statfs_change%u", sdp->sd_jdesc->jd_jid);
  462. sdp->sd_sc_inode = gfs2_lookup_simple(pn, buf);
  463. if (IS_ERR(sdp->sd_sc_inode)) {
  464. error = PTR_ERR(sdp->sd_sc_inode);
  465. fs_err(sdp, "can't find local \"sc\" file: %d\n", error);
  466. goto fail_ir_i;
  467. }
  468. sprintf(buf, "quota_change%u", sdp->sd_jdesc->jd_jid);
  469. sdp->sd_qc_inode = gfs2_lookup_simple(pn, buf);
  470. if (IS_ERR(sdp->sd_qc_inode)) {
  471. error = PTR_ERR(sdp->sd_qc_inode);
  472. fs_err(sdp, "can't find local \"qc\" file: %d\n", error);
  473. goto fail_ut_i;
  474. }
  475. iput(pn);
  476. pn = NULL;
  477. ip = GFS2_I(sdp->sd_ir_inode);
  478. error = gfs2_glock_nq_init(ip->i_gl,
  479. LM_ST_EXCLUSIVE, 0,
  480. &sdp->sd_ir_gh);
  481. if (error) {
  482. fs_err(sdp, "can't lock local \"ir\" file: %d\n", error);
  483. goto fail_qc_i;
  484. }
  485. ip = GFS2_I(sdp->sd_sc_inode);
  486. error = gfs2_glock_nq_init(ip->i_gl,
  487. LM_ST_EXCLUSIVE, 0,
  488. &sdp->sd_sc_gh);
  489. if (error) {
  490. fs_err(sdp, "can't lock local \"sc\" file: %d\n", error);
  491. goto fail_ir_gh;
  492. }
  493. ip = GFS2_I(sdp->sd_qc_inode);
  494. error = gfs2_glock_nq_init(ip->i_gl,
  495. LM_ST_EXCLUSIVE, 0,
  496. &sdp->sd_qc_gh);
  497. if (error) {
  498. fs_err(sdp, "can't lock local \"qc\" file: %d\n", error);
  499. goto fail_ut_gh;
  500. }
  501. return 0;
  502. fail_qc_gh:
  503. gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
  504. fail_ut_gh:
  505. gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
  506. fail_ir_gh:
  507. gfs2_glock_dq_uninit(&sdp->sd_ir_gh);
  508. fail_qc_i:
  509. iput(sdp->sd_qc_inode);
  510. fail_ut_i:
  511. iput(sdp->sd_sc_inode);
  512. fail_ir_i:
  513. iput(sdp->sd_ir_inode);
  514. fail:
  515. if (pn)
  516. iput(pn);
  517. return error;
  518. }
  519. static int init_threads(struct gfs2_sbd *sdp, int undo)
  520. {
  521. struct task_struct *p;
  522. int error = 0;
  523. if (undo)
  524. goto fail_quotad;
  525. sdp->sd_log_flush_time = jiffies;
  526. sdp->sd_jindex_refresh_time = jiffies;
  527. p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
  528. error = IS_ERR(p);
  529. if (error) {
  530. fs_err(sdp, "can't start logd thread: %d\n", error);
  531. return error;
  532. }
  533. sdp->sd_logd_process = p;
  534. sdp->sd_statfs_sync_time = jiffies;
  535. sdp->sd_quota_sync_time = jiffies;
  536. p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad");
  537. error = IS_ERR(p);
  538. if (error) {
  539. fs_err(sdp, "can't start quotad thread: %d\n", error);
  540. goto fail;
  541. }
  542. sdp->sd_quotad_process = p;
  543. return 0;
  544. fail_quotad:
  545. kthread_stop(sdp->sd_quotad_process);
  546. fail:
  547. kthread_stop(sdp->sd_logd_process);
  548. return error;
  549. }
  550. /**
  551. * fill_super - Read in superblock
  552. * @sb: The VFS superblock
  553. * @data: Mount options
  554. * @silent: Don't complain if it's not a GFS2 filesystem
  555. *
  556. * Returns: errno
  557. */
  558. static int fill_super(struct super_block *sb, void *data, int silent)
  559. {
  560. struct gfs2_sbd *sdp;
  561. struct gfs2_holder mount_gh;
  562. int error;
  563. sdp = init_sbd(sb);
  564. if (!sdp) {
  565. printk(KERN_WARNING "GFS2: can't alloc struct gfs2_sbd\n");
  566. return -ENOMEM;
  567. }
  568. error = gfs2_mount_args(sdp, (char *)data, 0);
  569. if (error) {
  570. printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
  571. goto fail;
  572. }
  573. init_vfs(sb, SDF_NOATIME);
  574. /* Set up the buffer cache and fill in some fake block size values
  575. to allow us to read-in the on-disk superblock. */
  576. sdp->sd_sb.sb_bsize = sb_min_blocksize(sb, GFS2_BASIC_BLOCK);
  577. sdp->sd_sb.sb_bsize_shift = sb->s_blocksize_bits;
  578. sdp->sd_fsb2bb_shift = sdp->sd_sb.sb_bsize_shift -
  579. GFS2_BASIC_BLOCK_SHIFT;
  580. sdp->sd_fsb2bb = 1 << sdp->sd_fsb2bb_shift;
  581. error = init_names(sdp, silent);
  582. if (error)
  583. goto fail;
  584. error = gfs2_sys_fs_add(sdp);
  585. if (error)
  586. goto fail;
  587. error = gfs2_lm_mount(sdp, silent);
  588. if (error)
  589. goto fail_sys;
  590. error = init_locking(sdp, &mount_gh, DO);
  591. if (error)
  592. goto fail_lm;
  593. error = init_sb(sdp, silent, DO);
  594. if (error)
  595. goto fail_locking;
  596. error = init_inodes(sdp, DO);
  597. if (error)
  598. goto fail_sb;
  599. error = init_per_node(sdp, DO);
  600. if (error)
  601. goto fail_inodes;
  602. error = gfs2_statfs_init(sdp);
  603. if (error) {
  604. fs_err(sdp, "can't initialize statfs subsystem: %d\n", error);
  605. goto fail_per_node;
  606. }
  607. error = init_threads(sdp, DO);
  608. if (error)
  609. goto fail_per_node;
  610. if (!(sb->s_flags & MS_RDONLY)) {
  611. error = gfs2_make_fs_rw(sdp);
  612. if (error) {
  613. fs_err(sdp, "can't make FS RW: %d\n", error);
  614. goto fail_threads;
  615. }
  616. }
  617. gfs2_glock_dq_uninit(&mount_gh);
  618. return 0;
  619. fail_threads:
  620. init_threads(sdp, UNDO);
  621. fail_per_node:
  622. init_per_node(sdp, UNDO);
  623. fail_inodes:
  624. init_inodes(sdp, UNDO);
  625. fail_sb:
  626. init_sb(sdp, 0, UNDO);
  627. fail_locking:
  628. init_locking(sdp, &mount_gh, UNDO);
  629. fail_lm:
  630. gfs2_gl_hash_clear(sdp, WAIT);
  631. gfs2_lm_unmount(sdp);
  632. while (invalidate_inodes(sb))
  633. yield();
  634. fail_sys:
  635. gfs2_sys_fs_del(sdp);
  636. fail:
  637. kfree(sdp);
  638. sb->s_fs_info = NULL;
  639. return error;
  640. }
  641. static int gfs2_get_sb(struct file_system_type *fs_type, int flags,
  642. const char *dev_name, void *data, struct vfsmount *mnt)
  643. {
  644. struct super_block *sb;
  645. struct gfs2_sbd *sdp;
  646. int error = get_sb_bdev(fs_type, flags, dev_name, data, fill_super, mnt);
  647. if (error)
  648. goto out;
  649. sb = mnt->mnt_sb;
  650. sdp = sb->s_fs_info;
  651. sdp->sd_gfs2mnt = mnt;
  652. out:
  653. return error;
  654. }
  655. static int fill_super_meta(struct super_block *sb, struct super_block *new,
  656. void *data, int silent)
  657. {
  658. struct gfs2_sbd *sdp = sb->s_fs_info;
  659. struct inode *inode;
  660. int error = 0;
  661. new->s_fs_info = sdp;
  662. sdp->sd_vfs_meta = sb;
  663. init_vfs(new, SDF_NOATIME);
  664. /* Get the master inode */
  665. inode = igrab(sdp->sd_master_dir);
  666. new->s_root = d_alloc_root(inode);
  667. if (!new->s_root) {
  668. fs_err(sdp, "can't get root dentry\n");
  669. error = -ENOMEM;
  670. iput(inode);
  671. } else
  672. new->s_root->d_op = &gfs2_dops;
  673. return error;
  674. }
  675. static int set_bdev_super(struct super_block *s, void *data)
  676. {
  677. s->s_bdev = data;
  678. s->s_dev = s->s_bdev->bd_dev;
  679. return 0;
  680. }
  681. static int test_bdev_super(struct super_block *s, void *data)
  682. {
  683. return s->s_bdev == data;
  684. }
  685. static struct super_block* get_gfs2_sb(const char *dev_name)
  686. {
  687. struct kstat stat;
  688. struct nameidata nd;
  689. struct file_system_type *fstype;
  690. struct super_block *sb = NULL, *s;
  691. struct list_head *l;
  692. int error;
  693. error = path_lookup(dev_name, LOOKUP_FOLLOW, &nd);
  694. if (error) {
  695. printk(KERN_WARNING "GFS2: path_lookup on %s returned error\n",
  696. dev_name);
  697. goto out;
  698. }
  699. error = vfs_getattr(nd.mnt, nd.dentry, &stat);
  700. fstype = get_fs_type("gfs2");
  701. list_for_each(l, &fstype->fs_supers) {
  702. s = list_entry(l, struct super_block, s_instances);
  703. if ((S_ISBLK(stat.mode) && s->s_dev == stat.rdev) ||
  704. (S_ISDIR(stat.mode) && s == nd.dentry->d_inode->i_sb)) {
  705. sb = s;
  706. goto free_nd;
  707. }
  708. }
  709. printk(KERN_WARNING "GFS2: Unrecognized block device or "
  710. "mount point %s\n", dev_name);
  711. free_nd:
  712. path_release(&nd);
  713. out:
  714. return sb;
  715. }
  716. static int gfs2_get_sb_meta(struct file_system_type *fs_type, int flags,
  717. const char *dev_name, void *data, struct vfsmount *mnt)
  718. {
  719. int error = 0;
  720. struct super_block *sb = NULL, *new;
  721. struct gfs2_sbd *sdp;
  722. sb = get_gfs2_sb(dev_name);
  723. if (!sb) {
  724. printk(KERN_WARNING "GFS2: gfs2 mount does not exist\n");
  725. error = -ENOENT;
  726. goto error;
  727. }
  728. sdp = (struct gfs2_sbd*) sb->s_fs_info;
  729. if (sdp->sd_vfs_meta) {
  730. printk(KERN_WARNING "GFS2: gfs2meta mount already exists\n");
  731. error = -EBUSY;
  732. goto error;
  733. }
  734. down(&sb->s_bdev->bd_mount_sem);
  735. new = sget(fs_type, test_bdev_super, set_bdev_super, sb->s_bdev);
  736. up(&sb->s_bdev->bd_mount_sem);
  737. if (IS_ERR(new)) {
  738. error = PTR_ERR(new);
  739. goto error;
  740. }
  741. module_put(fs_type->owner);
  742. new->s_flags = flags;
  743. strlcpy(new->s_id, sb->s_id, sizeof(new->s_id));
  744. sb_set_blocksize(new, sb->s_blocksize);
  745. error = fill_super_meta(sb, new, data, flags & MS_SILENT ? 1 : 0);
  746. if (error) {
  747. up_write(&new->s_umount);
  748. deactivate_super(new);
  749. goto error;
  750. }
  751. new->s_flags |= MS_ACTIVE;
  752. /* Grab a reference to the gfs2 mount point */
  753. atomic_inc(&sdp->sd_gfs2mnt->mnt_count);
  754. return simple_set_mnt(mnt, new);
  755. error:
  756. return error;
  757. }
  758. static void gfs2_kill_sb(struct super_block *sb)
  759. {
  760. kill_block_super(sb);
  761. }
  762. static void gfs2_kill_sb_meta(struct super_block *sb)
  763. {
  764. struct gfs2_sbd *sdp = sb->s_fs_info;
  765. generic_shutdown_super(sb);
  766. sdp->sd_vfs_meta = NULL;
  767. atomic_dec(&sdp->sd_gfs2mnt->mnt_count);
  768. }
  769. struct file_system_type gfs2_fs_type = {
  770. .name = "gfs2",
  771. .fs_flags = FS_REQUIRES_DEV,
  772. .get_sb = gfs2_get_sb,
  773. .kill_sb = gfs2_kill_sb,
  774. .owner = THIS_MODULE,
  775. };
  776. struct file_system_type gfs2meta_fs_type = {
  777. .name = "gfs2meta",
  778. .fs_flags = FS_REQUIRES_DEV,
  779. .get_sb = gfs2_get_sb_meta,
  780. .kill_sb = gfs2_kill_sb_meta,
  781. .owner = THIS_MODULE,
  782. };