PageRenderTime 24ms CodeModel.GetById 38ms RepoModel.GetById 7ms app.codeStats 0ms

/fs/xfs/xfs_qm.c

https://bitbucket.org/emiliolopez/linux
C | 1971 lines | 1317 code | 237 blank | 417 comment | 269 complexity | 8d1935826a53247b73e78418e1a45437 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_shared.h"
  21. #include "xfs_format.h"
  22. #include "xfs_log_format.h"
  23. #include "xfs_trans_resv.h"
  24. #include "xfs_bit.h"
  25. #include "xfs_sb.h"
  26. #include "xfs_mount.h"
  27. #include "xfs_inode.h"
  28. #include "xfs_ialloc.h"
  29. #include "xfs_itable.h"
  30. #include "xfs_quota.h"
  31. #include "xfs_error.h"
  32. #include "xfs_bmap.h"
  33. #include "xfs_bmap_btree.h"
  34. #include "xfs_bmap_util.h"
  35. #include "xfs_trans.h"
  36. #include "xfs_trans_space.h"
  37. #include "xfs_qm.h"
  38. #include "xfs_trace.h"
  39. #include "xfs_icache.h"
  40. #include "xfs_cksum.h"
  41. /*
  42. * The global quota manager. There is only one of these for the entire
  43. * system, _not_ one per file system. XQM keeps track of the overall
  44. * quota functionality, including maintaining the freelist and hash
  45. * tables of dquots.
  46. */
  47. STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
  48. STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
  49. STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
  50. /*
  51. * We use the batch lookup interface to iterate over the dquots as it
  52. * currently is the only interface into the radix tree code that allows
  53. * fuzzy lookups instead of exact matches. Holding the lock over multiple
  54. * operations is fine as all callers are used either during mount/umount
  55. * or quotaoff.
  56. */
  57. #define XFS_DQ_LOOKUP_BATCH 32
  58. STATIC int
  59. xfs_qm_dquot_walk(
  60. struct xfs_mount *mp,
  61. int type,
  62. int (*execute)(struct xfs_dquot *dqp, void *data),
  63. void *data)
  64. {
  65. struct xfs_quotainfo *qi = mp->m_quotainfo;
  66. struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
  67. uint32_t next_index;
  68. int last_error = 0;
  69. int skipped;
  70. int nr_found;
  71. restart:
  72. skipped = 0;
  73. next_index = 0;
  74. nr_found = 0;
  75. while (1) {
  76. struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  77. int error = 0;
  78. int i;
  79. mutex_lock(&qi->qi_tree_lock);
  80. nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  81. next_index, XFS_DQ_LOOKUP_BATCH);
  82. if (!nr_found) {
  83. mutex_unlock(&qi->qi_tree_lock);
  84. break;
  85. }
  86. for (i = 0; i < nr_found; i++) {
  87. struct xfs_dquot *dqp = batch[i];
  88. next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
  89. error = execute(batch[i], data);
  90. if (error == -EAGAIN) {
  91. skipped++;
  92. continue;
  93. }
  94. if (error && last_error != -EFSCORRUPTED)
  95. last_error = error;
  96. }
  97. mutex_unlock(&qi->qi_tree_lock);
  98. /* bail out if the filesystem is corrupted. */
  99. if (last_error == -EFSCORRUPTED) {
  100. skipped = 0;
  101. break;
  102. }
  103. /* we're done if id overflows back to zero */
  104. if (!next_index)
  105. break;
  106. }
  107. if (skipped) {
  108. delay(1);
  109. goto restart;
  110. }
  111. return last_error;
  112. }
  113. /*
  114. * Purge a dquot from all tracking data structures and free it.
  115. */
  116. STATIC int
  117. xfs_qm_dqpurge(
  118. struct xfs_dquot *dqp,
  119. void *data)
  120. {
  121. struct xfs_mount *mp = dqp->q_mount;
  122. struct xfs_quotainfo *qi = mp->m_quotainfo;
  123. xfs_dqlock(dqp);
  124. if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
  125. xfs_dqunlock(dqp);
  126. return -EAGAIN;
  127. }
  128. dqp->dq_flags |= XFS_DQ_FREEING;
  129. xfs_dqflock(dqp);
  130. /*
  131. * If we are turning this type of quotas off, we don't care
  132. * about the dirty metadata sitting in this dquot. OTOH, if
  133. * we're unmounting, we do care, so we flush it and wait.
  134. */
  135. if (XFS_DQ_IS_DIRTY(dqp)) {
  136. struct xfs_buf *bp = NULL;
  137. int error;
  138. /*
  139. * We don't care about getting disk errors here. We need
  140. * to purge this dquot anyway, so we go ahead regardless.
  141. */
  142. error = xfs_qm_dqflush(dqp, &bp);
  143. if (error) {
  144. xfs_warn(mp, "%s: dquot %p flush failed",
  145. __func__, dqp);
  146. } else {
  147. error = xfs_bwrite(bp);
  148. xfs_buf_relse(bp);
  149. }
  150. xfs_dqflock(dqp);
  151. }
  152. ASSERT(atomic_read(&dqp->q_pincount) == 0);
  153. ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
  154. !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
  155. xfs_dqfunlock(dqp);
  156. xfs_dqunlock(dqp);
  157. radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
  158. be32_to_cpu(dqp->q_core.d_id));
  159. qi->qi_dquots--;
  160. /*
  161. * We move dquots to the freelist as soon as their reference count
  162. * hits zero, so it really should be on the freelist here.
  163. */
  164. ASSERT(!list_empty(&dqp->q_lru));
  165. list_lru_del(&qi->qi_lru, &dqp->q_lru);
  166. XFS_STATS_DEC(mp, xs_qm_dquot_unused);
  167. xfs_qm_dqdestroy(dqp);
  168. return 0;
  169. }
  170. /*
  171. * Purge the dquot cache.
  172. */
  173. void
  174. xfs_qm_dqpurge_all(
  175. struct xfs_mount *mp,
  176. uint flags)
  177. {
  178. if (flags & XFS_QMOPT_UQUOTA)
  179. xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
  180. if (flags & XFS_QMOPT_GQUOTA)
  181. xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
  182. if (flags & XFS_QMOPT_PQUOTA)
  183. xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
  184. }
  185. /*
  186. * Just destroy the quotainfo structure.
  187. */
  188. void
  189. xfs_qm_unmount(
  190. struct xfs_mount *mp)
  191. {
  192. if (mp->m_quotainfo) {
  193. xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
  194. xfs_qm_destroy_quotainfo(mp);
  195. }
  196. }
  197. /*
  198. * Called from the vfsops layer.
  199. */
  200. void
  201. xfs_qm_unmount_quotas(
  202. xfs_mount_t *mp)
  203. {
  204. /*
  205. * Release the dquots that root inode, et al might be holding,
  206. * before we flush quotas and blow away the quotainfo structure.
  207. */
  208. ASSERT(mp->m_rootip);
  209. xfs_qm_dqdetach(mp->m_rootip);
  210. if (mp->m_rbmip)
  211. xfs_qm_dqdetach(mp->m_rbmip);
  212. if (mp->m_rsumip)
  213. xfs_qm_dqdetach(mp->m_rsumip);
  214. /*
  215. * Release the quota inodes.
  216. */
  217. if (mp->m_quotainfo) {
  218. if (mp->m_quotainfo->qi_uquotaip) {
  219. IRELE(mp->m_quotainfo->qi_uquotaip);
  220. mp->m_quotainfo->qi_uquotaip = NULL;
  221. }
  222. if (mp->m_quotainfo->qi_gquotaip) {
  223. IRELE(mp->m_quotainfo->qi_gquotaip);
  224. mp->m_quotainfo->qi_gquotaip = NULL;
  225. }
  226. if (mp->m_quotainfo->qi_pquotaip) {
  227. IRELE(mp->m_quotainfo->qi_pquotaip);
  228. mp->m_quotainfo->qi_pquotaip = NULL;
  229. }
  230. }
  231. }
  232. STATIC int
  233. xfs_qm_dqattach_one(
  234. xfs_inode_t *ip,
  235. xfs_dqid_t id,
  236. uint type,
  237. uint doalloc,
  238. xfs_dquot_t **IO_idqpp)
  239. {
  240. xfs_dquot_t *dqp;
  241. int error;
  242. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  243. error = 0;
  244. /*
  245. * See if we already have it in the inode itself. IO_idqpp is &i_udquot
  246. * or &i_gdquot. This made the code look weird, but made the logic a lot
  247. * simpler.
  248. */
  249. dqp = *IO_idqpp;
  250. if (dqp) {
  251. trace_xfs_dqattach_found(dqp);
  252. return 0;
  253. }
  254. /*
  255. * Find the dquot from somewhere. This bumps the reference count of
  256. * dquot and returns it locked. This can return ENOENT if dquot didn't
  257. * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
  258. * turned off suddenly.
  259. */
  260. error = xfs_qm_dqget(ip->i_mount, ip, id, type,
  261. doalloc | XFS_QMOPT_DOWARN, &dqp);
  262. if (error)
  263. return error;
  264. trace_xfs_dqattach_get(dqp);
  265. /*
  266. * dqget may have dropped and re-acquired the ilock, but it guarantees
  267. * that the dquot returned is the one that should go in the inode.
  268. */
  269. *IO_idqpp = dqp;
  270. xfs_dqunlock(dqp);
  271. return 0;
  272. }
  273. static bool
  274. xfs_qm_need_dqattach(
  275. struct xfs_inode *ip)
  276. {
  277. struct xfs_mount *mp = ip->i_mount;
  278. if (!XFS_IS_QUOTA_RUNNING(mp))
  279. return false;
  280. if (!XFS_IS_QUOTA_ON(mp))
  281. return false;
  282. if (!XFS_NOT_DQATTACHED(mp, ip))
  283. return false;
  284. if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
  285. return false;
  286. return true;
  287. }
  288. /*
  289. * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
  290. * into account.
  291. * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
  292. * Inode may get unlocked and relocked in here, and the caller must deal with
  293. * the consequences.
  294. */
  295. int
  296. xfs_qm_dqattach_locked(
  297. xfs_inode_t *ip,
  298. uint flags)
  299. {
  300. xfs_mount_t *mp = ip->i_mount;
  301. int error = 0;
  302. if (!xfs_qm_need_dqattach(ip))
  303. return 0;
  304. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  305. if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
  306. error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
  307. flags & XFS_QMOPT_DQALLOC,
  308. &ip->i_udquot);
  309. if (error)
  310. goto done;
  311. ASSERT(ip->i_udquot);
  312. }
  313. if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
  314. error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
  315. flags & XFS_QMOPT_DQALLOC,
  316. &ip->i_gdquot);
  317. if (error)
  318. goto done;
  319. ASSERT(ip->i_gdquot);
  320. }
  321. if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
  322. error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
  323. flags & XFS_QMOPT_DQALLOC,
  324. &ip->i_pdquot);
  325. if (error)
  326. goto done;
  327. ASSERT(ip->i_pdquot);
  328. }
  329. done:
  330. /*
  331. * Don't worry about the dquots that we may have attached before any
  332. * error - they'll get detached later if it has not already been done.
  333. */
  334. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  335. return error;
  336. }
  337. int
  338. xfs_qm_dqattach(
  339. struct xfs_inode *ip,
  340. uint flags)
  341. {
  342. int error;
  343. if (!xfs_qm_need_dqattach(ip))
  344. return 0;
  345. xfs_ilock(ip, XFS_ILOCK_EXCL);
  346. error = xfs_qm_dqattach_locked(ip, flags);
  347. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  348. return error;
  349. }
  350. /*
  351. * Release dquots (and their references) if any.
  352. * The inode should be locked EXCL except when this's called by
  353. * xfs_ireclaim.
  354. */
  355. void
  356. xfs_qm_dqdetach(
  357. xfs_inode_t *ip)
  358. {
  359. if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
  360. return;
  361. trace_xfs_dquot_dqdetach(ip);
  362. ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
  363. if (ip->i_udquot) {
  364. xfs_qm_dqrele(ip->i_udquot);
  365. ip->i_udquot = NULL;
  366. }
  367. if (ip->i_gdquot) {
  368. xfs_qm_dqrele(ip->i_gdquot);
  369. ip->i_gdquot = NULL;
  370. }
  371. if (ip->i_pdquot) {
  372. xfs_qm_dqrele(ip->i_pdquot);
  373. ip->i_pdquot = NULL;
  374. }
  375. }
  376. struct xfs_qm_isolate {
  377. struct list_head buffers;
  378. struct list_head dispose;
  379. };
  380. static enum lru_status
  381. xfs_qm_dquot_isolate(
  382. struct list_head *item,
  383. struct list_lru_one *lru,
  384. spinlock_t *lru_lock,
  385. void *arg)
  386. __releases(lru_lock) __acquires(lru_lock)
  387. {
  388. struct xfs_dquot *dqp = container_of(item,
  389. struct xfs_dquot, q_lru);
  390. struct xfs_qm_isolate *isol = arg;
  391. if (!xfs_dqlock_nowait(dqp))
  392. goto out_miss_busy;
  393. /*
  394. * This dquot has acquired a reference in the meantime remove it from
  395. * the freelist and try again.
  396. */
  397. if (dqp->q_nrefs) {
  398. xfs_dqunlock(dqp);
  399. XFS_STATS_INC(dqp->q_mount, xs_qm_dqwants);
  400. trace_xfs_dqreclaim_want(dqp);
  401. list_lru_isolate(lru, &dqp->q_lru);
  402. XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
  403. return LRU_REMOVED;
  404. }
  405. /*
  406. * If the dquot is dirty, flush it. If it's already being flushed, just
  407. * skip it so there is time for the IO to complete before we try to
  408. * reclaim it again on the next LRU pass.
  409. */
  410. if (!xfs_dqflock_nowait(dqp)) {
  411. xfs_dqunlock(dqp);
  412. goto out_miss_busy;
  413. }
  414. if (XFS_DQ_IS_DIRTY(dqp)) {
  415. struct xfs_buf *bp = NULL;
  416. int error;
  417. trace_xfs_dqreclaim_dirty(dqp);
  418. /* we have to drop the LRU lock to flush the dquot */
  419. spin_unlock(lru_lock);
  420. error = xfs_qm_dqflush(dqp, &bp);
  421. if (error) {
  422. xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
  423. __func__, dqp);
  424. goto out_unlock_dirty;
  425. }
  426. xfs_buf_delwri_queue(bp, &isol->buffers);
  427. xfs_buf_relse(bp);
  428. goto out_unlock_dirty;
  429. }
  430. xfs_dqfunlock(dqp);
  431. /*
  432. * Prevent lookups now that we are past the point of no return.
  433. */
  434. dqp->dq_flags |= XFS_DQ_FREEING;
  435. xfs_dqunlock(dqp);
  436. ASSERT(dqp->q_nrefs == 0);
  437. list_lru_isolate_move(lru, &dqp->q_lru, &isol->dispose);
  438. XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot_unused);
  439. trace_xfs_dqreclaim_done(dqp);
  440. XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaims);
  441. return LRU_REMOVED;
  442. out_miss_busy:
  443. trace_xfs_dqreclaim_busy(dqp);
  444. XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
  445. return LRU_SKIP;
  446. out_unlock_dirty:
  447. trace_xfs_dqreclaim_busy(dqp);
  448. XFS_STATS_INC(dqp->q_mount, xs_qm_dqreclaim_misses);
  449. xfs_dqunlock(dqp);
  450. spin_lock(lru_lock);
  451. return LRU_RETRY;
  452. }
  453. static unsigned long
  454. xfs_qm_shrink_scan(
  455. struct shrinker *shrink,
  456. struct shrink_control *sc)
  457. {
  458. struct xfs_quotainfo *qi = container_of(shrink,
  459. struct xfs_quotainfo, qi_shrinker);
  460. struct xfs_qm_isolate isol;
  461. unsigned long freed;
  462. int error;
  463. if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM))
  464. return 0;
  465. INIT_LIST_HEAD(&isol.buffers);
  466. INIT_LIST_HEAD(&isol.dispose);
  467. freed = list_lru_shrink_walk(&qi->qi_lru, sc,
  468. xfs_qm_dquot_isolate, &isol);
  469. error = xfs_buf_delwri_submit(&isol.buffers);
  470. if (error)
  471. xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
  472. while (!list_empty(&isol.dispose)) {
  473. struct xfs_dquot *dqp;
  474. dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
  475. list_del_init(&dqp->q_lru);
  476. xfs_qm_dqfree_one(dqp);
  477. }
  478. return freed;
  479. }
  480. static unsigned long
  481. xfs_qm_shrink_count(
  482. struct shrinker *shrink,
  483. struct shrink_control *sc)
  484. {
  485. struct xfs_quotainfo *qi = container_of(shrink,
  486. struct xfs_quotainfo, qi_shrinker);
  487. return list_lru_shrink_count(&qi->qi_lru, sc);
  488. }
  489. STATIC void
  490. xfs_qm_set_defquota(
  491. xfs_mount_t *mp,
  492. uint type,
  493. xfs_quotainfo_t *qinf)
  494. {
  495. xfs_dquot_t *dqp;
  496. struct xfs_def_quota *defq;
  497. int error;
  498. error = xfs_qm_dqread(mp, 0, type, XFS_QMOPT_DOWARN, &dqp);
  499. if (!error) {
  500. xfs_disk_dquot_t *ddqp = &dqp->q_core;
  501. defq = xfs_get_defquota(dqp, qinf);
  502. /*
  503. * Timers and warnings have been already set, let's just set the
  504. * default limits for this quota type
  505. */
  506. defq->bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
  507. defq->bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
  508. defq->ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
  509. defq->isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
  510. defq->rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
  511. defq->rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
  512. xfs_qm_dqdestroy(dqp);
  513. }
  514. }
  515. /*
  516. * This initializes all the quota information that's kept in the
  517. * mount structure
  518. */
  519. STATIC int
  520. xfs_qm_init_quotainfo(
  521. xfs_mount_t *mp)
  522. {
  523. xfs_quotainfo_t *qinf;
  524. int error;
  525. xfs_dquot_t *dqp;
  526. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  527. qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
  528. error = list_lru_init(&qinf->qi_lru);
  529. if (error)
  530. goto out_free_qinf;
  531. /*
  532. * See if quotainodes are setup, and if not, allocate them,
  533. * and change the superblock accordingly.
  534. */
  535. error = xfs_qm_init_quotainos(mp);
  536. if (error)
  537. goto out_free_lru;
  538. INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
  539. INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
  540. INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
  541. mutex_init(&qinf->qi_tree_lock);
  542. /* mutex used to serialize quotaoffs */
  543. mutex_init(&qinf->qi_quotaofflock);
  544. /* Precalc some constants */
  545. qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
  546. qinf->qi_dqperchunk = xfs_calc_dquots_per_chunk(qinf->qi_dqchunklen);
  547. mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
  548. /*
  549. * We try to get the limits from the superuser's limits fields.
  550. * This is quite hacky, but it is standard quota practice.
  551. *
  552. * Since we may not have done a quotacheck by this point, just read
  553. * the dquot without attaching it to any hashtables or lists.
  554. *
  555. * Timers and warnings are globally set by the first timer found in
  556. * user/group/proj quota types, otherwise a default value is used.
  557. * This should be split into different fields per quota type.
  558. */
  559. error = xfs_qm_dqread(mp, 0,
  560. XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
  561. (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
  562. XFS_DQ_PROJ),
  563. XFS_QMOPT_DOWARN, &dqp);
  564. if (!error) {
  565. xfs_disk_dquot_t *ddqp = &dqp->q_core;
  566. /*
  567. * The warnings and timers set the grace period given to
  568. * a user or group before he or she can not perform any
  569. * more writing. If it is zero, a default is used.
  570. */
  571. qinf->qi_btimelimit = ddqp->d_btimer ?
  572. be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
  573. qinf->qi_itimelimit = ddqp->d_itimer ?
  574. be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
  575. qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
  576. be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
  577. qinf->qi_bwarnlimit = ddqp->d_bwarns ?
  578. be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
  579. qinf->qi_iwarnlimit = ddqp->d_iwarns ?
  580. be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
  581. qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
  582. be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
  583. xfs_qm_dqdestroy(dqp);
  584. } else {
  585. qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
  586. qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
  587. qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
  588. qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
  589. qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
  590. qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
  591. }
  592. if (XFS_IS_UQUOTA_RUNNING(mp))
  593. xfs_qm_set_defquota(mp, XFS_DQ_USER, qinf);
  594. if (XFS_IS_GQUOTA_RUNNING(mp))
  595. xfs_qm_set_defquota(mp, XFS_DQ_GROUP, qinf);
  596. if (XFS_IS_PQUOTA_RUNNING(mp))
  597. xfs_qm_set_defquota(mp, XFS_DQ_PROJ, qinf);
  598. qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
  599. qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
  600. qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
  601. qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
  602. register_shrinker(&qinf->qi_shrinker);
  603. return 0;
  604. out_free_lru:
  605. list_lru_destroy(&qinf->qi_lru);
  606. out_free_qinf:
  607. kmem_free(qinf);
  608. mp->m_quotainfo = NULL;
  609. return error;
  610. }
  611. /*
  612. * Gets called when unmounting a filesystem or when all quotas get
  613. * turned off.
  614. * This purges the quota inodes, destroys locks and frees itself.
  615. */
  616. void
  617. xfs_qm_destroy_quotainfo(
  618. xfs_mount_t *mp)
  619. {
  620. xfs_quotainfo_t *qi;
  621. qi = mp->m_quotainfo;
  622. ASSERT(qi != NULL);
  623. unregister_shrinker(&qi->qi_shrinker);
  624. list_lru_destroy(&qi->qi_lru);
  625. if (qi->qi_uquotaip) {
  626. IRELE(qi->qi_uquotaip);
  627. qi->qi_uquotaip = NULL; /* paranoia */
  628. }
  629. if (qi->qi_gquotaip) {
  630. IRELE(qi->qi_gquotaip);
  631. qi->qi_gquotaip = NULL;
  632. }
  633. if (qi->qi_pquotaip) {
  634. IRELE(qi->qi_pquotaip);
  635. qi->qi_pquotaip = NULL;
  636. }
  637. mutex_destroy(&qi->qi_quotaofflock);
  638. kmem_free(qi);
  639. mp->m_quotainfo = NULL;
  640. }
  641. /*
  642. * Create an inode and return with a reference already taken, but unlocked
  643. * This is how we create quota inodes
  644. */
  645. STATIC int
  646. xfs_qm_qino_alloc(
  647. xfs_mount_t *mp,
  648. xfs_inode_t **ip,
  649. uint flags)
  650. {
  651. xfs_trans_t *tp;
  652. int error;
  653. int committed;
  654. bool need_alloc = true;
  655. *ip = NULL;
  656. /*
  657. * With superblock that doesn't have separate pquotino, we
  658. * share an inode between gquota and pquota. If the on-disk
  659. * superblock has GQUOTA and the filesystem is now mounted
  660. * with PQUOTA, just use sb_gquotino for sb_pquotino and
  661. * vice-versa.
  662. */
  663. if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
  664. (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
  665. xfs_ino_t ino = NULLFSINO;
  666. if ((flags & XFS_QMOPT_PQUOTA) &&
  667. (mp->m_sb.sb_gquotino != NULLFSINO)) {
  668. ino = mp->m_sb.sb_gquotino;
  669. ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
  670. } else if ((flags & XFS_QMOPT_GQUOTA) &&
  671. (mp->m_sb.sb_pquotino != NULLFSINO)) {
  672. ino = mp->m_sb.sb_pquotino;
  673. ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
  674. }
  675. if (ino != NULLFSINO) {
  676. error = xfs_iget(mp, NULL, ino, 0, 0, ip);
  677. if (error)
  678. return error;
  679. mp->m_sb.sb_gquotino = NULLFSINO;
  680. mp->m_sb.sb_pquotino = NULLFSINO;
  681. need_alloc = false;
  682. }
  683. }
  684. error = xfs_trans_alloc(mp, &M_RES(mp)->tr_create,
  685. XFS_QM_QINOCREATE_SPACE_RES(mp), 0, 0, &tp);
  686. if (error)
  687. return error;
  688. if (need_alloc) {
  689. error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
  690. &committed);
  691. if (error) {
  692. xfs_trans_cancel(tp);
  693. return error;
  694. }
  695. }
  696. /*
  697. * Make the changes in the superblock, and log those too.
  698. * sbfields arg may contain fields other than *QUOTINO;
  699. * VERSIONNUM for example.
  700. */
  701. spin_lock(&mp->m_sb_lock);
  702. if (flags & XFS_QMOPT_SBVERSION) {
  703. ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
  704. xfs_sb_version_addquota(&mp->m_sb);
  705. mp->m_sb.sb_uquotino = NULLFSINO;
  706. mp->m_sb.sb_gquotino = NULLFSINO;
  707. mp->m_sb.sb_pquotino = NULLFSINO;
  708. /* qflags will get updated fully _after_ quotacheck */
  709. mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
  710. }
  711. if (flags & XFS_QMOPT_UQUOTA)
  712. mp->m_sb.sb_uquotino = (*ip)->i_ino;
  713. else if (flags & XFS_QMOPT_GQUOTA)
  714. mp->m_sb.sb_gquotino = (*ip)->i_ino;
  715. else
  716. mp->m_sb.sb_pquotino = (*ip)->i_ino;
  717. spin_unlock(&mp->m_sb_lock);
  718. xfs_log_sb(tp);
  719. error = xfs_trans_commit(tp);
  720. if (error) {
  721. ASSERT(XFS_FORCED_SHUTDOWN(mp));
  722. xfs_alert(mp, "%s failed (error %d)!", __func__, error);
  723. }
  724. if (need_alloc)
  725. xfs_finish_inode_setup(*ip);
  726. return error;
  727. }
  728. STATIC void
  729. xfs_qm_reset_dqcounts(
  730. xfs_mount_t *mp,
  731. xfs_buf_t *bp,
  732. xfs_dqid_t id,
  733. uint type)
  734. {
  735. struct xfs_dqblk *dqb;
  736. int j;
  737. trace_xfs_reset_dqcounts(bp, _RET_IP_);
  738. /*
  739. * Reset all counters and timers. They'll be
  740. * started afresh by xfs_qm_quotacheck.
  741. */
  742. #ifdef DEBUG
  743. j = (int)XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB) /
  744. sizeof(xfs_dqblk_t);
  745. ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
  746. #endif
  747. dqb = bp->b_addr;
  748. for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
  749. struct xfs_disk_dquot *ddq;
  750. ddq = (struct xfs_disk_dquot *)&dqb[j];
  751. /*
  752. * Do a sanity check, and if needed, repair the dqblk. Don't
  753. * output any warnings because it's perfectly possible to
  754. * find uninitialised dquot blks. See comment in xfs_dqcheck.
  755. */
  756. xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
  757. "xfs_quotacheck");
  758. /*
  759. * Reset type in case we are reusing group quota file for
  760. * project quotas or vice versa
  761. */
  762. ddq->d_flags = type;
  763. ddq->d_bcount = 0;
  764. ddq->d_icount = 0;
  765. ddq->d_rtbcount = 0;
  766. ddq->d_btimer = 0;
  767. ddq->d_itimer = 0;
  768. ddq->d_rtbtimer = 0;
  769. ddq->d_bwarns = 0;
  770. ddq->d_iwarns = 0;
  771. ddq->d_rtbwarns = 0;
  772. if (xfs_sb_version_hascrc(&mp->m_sb)) {
  773. xfs_update_cksum((char *)&dqb[j],
  774. sizeof(struct xfs_dqblk),
  775. XFS_DQUOT_CRC_OFF);
  776. }
  777. }
  778. }
  779. STATIC int
  780. xfs_qm_dqiter_bufs(
  781. struct xfs_mount *mp,
  782. xfs_dqid_t firstid,
  783. xfs_fsblock_t bno,
  784. xfs_filblks_t blkcnt,
  785. uint flags,
  786. struct list_head *buffer_list)
  787. {
  788. struct xfs_buf *bp;
  789. int error;
  790. int type;
  791. ASSERT(blkcnt > 0);
  792. type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
  793. (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
  794. error = 0;
  795. /*
  796. * Blkcnt arg can be a very big number, and might even be
  797. * larger than the log itself. So, we have to break it up into
  798. * manageable-sized transactions.
  799. * Note that we don't start a permanent transaction here; we might
  800. * not be able to get a log reservation for the whole thing up front,
  801. * and we don't really care to either, because we just discard
  802. * everything if we were to crash in the middle of this loop.
  803. */
  804. while (blkcnt--) {
  805. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
  806. XFS_FSB_TO_DADDR(mp, bno),
  807. mp->m_quotainfo->qi_dqchunklen, 0, &bp,
  808. &xfs_dquot_buf_ops);
  809. /*
  810. * CRC and validation errors will return a EFSCORRUPTED here. If
  811. * this occurs, re-read without CRC validation so that we can
  812. * repair the damage via xfs_qm_reset_dqcounts(). This process
  813. * will leave a trace in the log indicating corruption has
  814. * been detected.
  815. */
  816. if (error == -EFSCORRUPTED) {
  817. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
  818. XFS_FSB_TO_DADDR(mp, bno),
  819. mp->m_quotainfo->qi_dqchunklen, 0, &bp,
  820. NULL);
  821. }
  822. if (error)
  823. break;
  824. /*
  825. * A corrupt buffer might not have a verifier attached, so
  826. * make sure we have the correct one attached before writeback
  827. * occurs.
  828. */
  829. bp->b_ops = &xfs_dquot_buf_ops;
  830. xfs_qm_reset_dqcounts(mp, bp, firstid, type);
  831. xfs_buf_delwri_queue(bp, buffer_list);
  832. xfs_buf_relse(bp);
  833. /* goto the next block. */
  834. bno++;
  835. firstid += mp->m_quotainfo->qi_dqperchunk;
  836. }
  837. return error;
  838. }
  839. /*
  840. * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
  841. * caller supplied function for every chunk of dquots that we find.
  842. */
  843. STATIC int
  844. xfs_qm_dqiterate(
  845. struct xfs_mount *mp,
  846. struct xfs_inode *qip,
  847. uint flags,
  848. struct list_head *buffer_list)
  849. {
  850. struct xfs_bmbt_irec *map;
  851. int i, nmaps; /* number of map entries */
  852. int error; /* return value */
  853. xfs_fileoff_t lblkno;
  854. xfs_filblks_t maxlblkcnt;
  855. xfs_dqid_t firstid;
  856. xfs_fsblock_t rablkno;
  857. xfs_filblks_t rablkcnt;
  858. error = 0;
  859. /*
  860. * This looks racy, but we can't keep an inode lock across a
  861. * trans_reserve. But, this gets called during quotacheck, and that
  862. * happens only at mount time which is single threaded.
  863. */
  864. if (qip->i_d.di_nblocks == 0)
  865. return 0;
  866. map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
  867. lblkno = 0;
  868. maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  869. do {
  870. uint lock_mode;
  871. nmaps = XFS_DQITER_MAP_SIZE;
  872. /*
  873. * We aren't changing the inode itself. Just changing
  874. * some of its data. No new blocks are added here, and
  875. * the inode is never added to the transaction.
  876. */
  877. lock_mode = xfs_ilock_data_map_shared(qip);
  878. error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
  879. map, &nmaps, 0);
  880. xfs_iunlock(qip, lock_mode);
  881. if (error)
  882. break;
  883. ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
  884. for (i = 0; i < nmaps; i++) {
  885. ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
  886. ASSERT(map[i].br_blockcount);
  887. lblkno += map[i].br_blockcount;
  888. if (map[i].br_startblock == HOLESTARTBLOCK)
  889. continue;
  890. firstid = (xfs_dqid_t) map[i].br_startoff *
  891. mp->m_quotainfo->qi_dqperchunk;
  892. /*
  893. * Do a read-ahead on the next extent.
  894. */
  895. if ((i+1 < nmaps) &&
  896. (map[i+1].br_startblock != HOLESTARTBLOCK)) {
  897. rablkcnt = map[i+1].br_blockcount;
  898. rablkno = map[i+1].br_startblock;
  899. while (rablkcnt--) {
  900. xfs_buf_readahead(mp->m_ddev_targp,
  901. XFS_FSB_TO_DADDR(mp, rablkno),
  902. mp->m_quotainfo->qi_dqchunklen,
  903. &xfs_dquot_buf_ops);
  904. rablkno++;
  905. }
  906. }
  907. /*
  908. * Iterate thru all the blks in the extent and
  909. * reset the counters of all the dquots inside them.
  910. */
  911. error = xfs_qm_dqiter_bufs(mp, firstid,
  912. map[i].br_startblock,
  913. map[i].br_blockcount,
  914. flags, buffer_list);
  915. if (error)
  916. goto out;
  917. }
  918. } while (nmaps > 0);
  919. out:
  920. kmem_free(map);
  921. return error;
  922. }
  923. /*
  924. * Called by dqusage_adjust in doing a quotacheck.
  925. *
  926. * Given the inode, and a dquot id this updates both the incore dqout as well
  927. * as the buffer copy. This is so that once the quotacheck is done, we can
  928. * just log all the buffers, as opposed to logging numerous updates to
  929. * individual dquots.
  930. */
  931. STATIC int
  932. xfs_qm_quotacheck_dqadjust(
  933. struct xfs_inode *ip,
  934. xfs_dqid_t id,
  935. uint type,
  936. xfs_qcnt_t nblks,
  937. xfs_qcnt_t rtblks)
  938. {
  939. struct xfs_mount *mp = ip->i_mount;
  940. struct xfs_dquot *dqp;
  941. int error;
  942. error = xfs_qm_dqget(mp, ip, id, type,
  943. XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
  944. if (error) {
  945. /*
  946. * Shouldn't be able to turn off quotas here.
  947. */
  948. ASSERT(error != -ESRCH);
  949. ASSERT(error != -ENOENT);
  950. return error;
  951. }
  952. trace_xfs_dqadjust(dqp);
  953. /*
  954. * Adjust the inode count and the block count to reflect this inode's
  955. * resource usage.
  956. */
  957. be64_add_cpu(&dqp->q_core.d_icount, 1);
  958. dqp->q_res_icount++;
  959. if (nblks) {
  960. be64_add_cpu(&dqp->q_core.d_bcount, nblks);
  961. dqp->q_res_bcount += nblks;
  962. }
  963. if (rtblks) {
  964. be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
  965. dqp->q_res_rtbcount += rtblks;
  966. }
  967. /*
  968. * Set default limits, adjust timers (since we changed usages)
  969. *
  970. * There are no timers for the default values set in the root dquot.
  971. */
  972. if (dqp->q_core.d_id) {
  973. xfs_qm_adjust_dqlimits(mp, dqp);
  974. xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
  975. }
  976. dqp->dq_flags |= XFS_DQ_DIRTY;
  977. xfs_qm_dqput(dqp);
  978. return 0;
  979. }
  980. /*
  981. * callback routine supplied to bulkstat(). Given an inumber, find its
  982. * dquots and update them to account for resources taken by that inode.
  983. */
  984. /* ARGSUSED */
  985. STATIC int
  986. xfs_qm_dqusage_adjust(
  987. xfs_mount_t *mp, /* mount point for filesystem */
  988. xfs_ino_t ino, /* inode number to get data for */
  989. void __user *buffer, /* not used */
  990. int ubsize, /* not used */
  991. int *ubused, /* not used */
  992. int *res) /* result code value */
  993. {
  994. xfs_inode_t *ip;
  995. xfs_qcnt_t nblks;
  996. xfs_filblks_t rtblks = 0; /* total rt blks */
  997. int error;
  998. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  999. /*
  1000. * rootino must have its resources accounted for, not so with the quota
  1001. * inodes.
  1002. */
  1003. if (xfs_is_quota_inode(&mp->m_sb, ino)) {
  1004. *res = BULKSTAT_RV_NOTHING;
  1005. return -EINVAL;
  1006. }
  1007. /*
  1008. * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
  1009. * interface expects the inode to be exclusively locked because that's
  1010. * the case in all other instances. It's OK that we do this because
  1011. * quotacheck is done only at mount time.
  1012. */
  1013. error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
  1014. &ip);
  1015. if (error) {
  1016. *res = BULKSTAT_RV_NOTHING;
  1017. return error;
  1018. }
  1019. ASSERT(ip->i_delayed_blks == 0);
  1020. if (XFS_IS_REALTIME_INODE(ip)) {
  1021. struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
  1022. if (!(ifp->if_flags & XFS_IFEXTENTS)) {
  1023. error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
  1024. if (error)
  1025. goto error0;
  1026. }
  1027. xfs_bmap_count_leaves(ifp, &rtblks);
  1028. }
  1029. nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
  1030. /*
  1031. * Add the (disk blocks and inode) resources occupied by this
  1032. * inode to its dquots. We do this adjustment in the incore dquot,
  1033. * and also copy the changes to its buffer.
  1034. * We don't care about putting these changes in a transaction
  1035. * envelope because if we crash in the middle of a 'quotacheck'
  1036. * we have to start from the beginning anyway.
  1037. * Once we're done, we'll log all the dquot bufs.
  1038. *
  1039. * The *QUOTA_ON checks below may look pretty racy, but quotachecks
  1040. * and quotaoffs don't race. (Quotachecks happen at mount time only).
  1041. */
  1042. if (XFS_IS_UQUOTA_ON(mp)) {
  1043. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
  1044. XFS_DQ_USER, nblks, rtblks);
  1045. if (error)
  1046. goto error0;
  1047. }
  1048. if (XFS_IS_GQUOTA_ON(mp)) {
  1049. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
  1050. XFS_DQ_GROUP, nblks, rtblks);
  1051. if (error)
  1052. goto error0;
  1053. }
  1054. if (XFS_IS_PQUOTA_ON(mp)) {
  1055. error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
  1056. XFS_DQ_PROJ, nblks, rtblks);
  1057. if (error)
  1058. goto error0;
  1059. }
  1060. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1061. IRELE(ip);
  1062. *res = BULKSTAT_RV_DIDONE;
  1063. return 0;
  1064. error0:
  1065. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1066. IRELE(ip);
  1067. *res = BULKSTAT_RV_GIVEUP;
  1068. return error;
  1069. }
  1070. STATIC int
  1071. xfs_qm_flush_one(
  1072. struct xfs_dquot *dqp,
  1073. void *data)
  1074. {
  1075. struct xfs_mount *mp = dqp->q_mount;
  1076. struct list_head *buffer_list = data;
  1077. struct xfs_buf *bp = NULL;
  1078. int error = 0;
  1079. xfs_dqlock(dqp);
  1080. if (dqp->dq_flags & XFS_DQ_FREEING)
  1081. goto out_unlock;
  1082. if (!XFS_DQ_IS_DIRTY(dqp))
  1083. goto out_unlock;
  1084. /*
  1085. * The only way the dquot is already flush locked by the time quotacheck
  1086. * gets here is if reclaim flushed it before the dqadjust walk dirtied
  1087. * it for the final time. Quotacheck collects all dquot bufs in the
  1088. * local delwri queue before dquots are dirtied, so reclaim can't have
  1089. * possibly queued it for I/O. The only way out is to push the buffer to
  1090. * cycle the flush lock.
  1091. */
  1092. if (!xfs_dqflock_nowait(dqp)) {
  1093. /* buf is pinned in-core by delwri list */
  1094. DEFINE_SINGLE_BUF_MAP(map, dqp->q_blkno,
  1095. mp->m_quotainfo->qi_dqchunklen);
  1096. bp = _xfs_buf_find(mp->m_ddev_targp, &map, 1, 0, NULL);
  1097. if (!bp) {
  1098. error = -EINVAL;
  1099. goto out_unlock;
  1100. }
  1101. xfs_buf_unlock(bp);
  1102. xfs_buf_delwri_pushbuf(bp, buffer_list);
  1103. xfs_buf_rele(bp);
  1104. error = -EAGAIN;
  1105. goto out_unlock;
  1106. }
  1107. error = xfs_qm_dqflush(dqp, &bp);
  1108. if (error)
  1109. goto out_unlock;
  1110. xfs_buf_delwri_queue(bp, buffer_list);
  1111. xfs_buf_relse(bp);
  1112. out_unlock:
  1113. xfs_dqunlock(dqp);
  1114. return error;
  1115. }
  1116. /*
  1117. * Walk thru all the filesystem inodes and construct a consistent view
  1118. * of the disk quota world. If the quotacheck fails, disable quotas.
  1119. */
  1120. STATIC int
  1121. xfs_qm_quotacheck(
  1122. xfs_mount_t *mp)
  1123. {
  1124. int done, count, error, error2;
  1125. xfs_ino_t lastino;
  1126. size_t structsz;
  1127. uint flags;
  1128. LIST_HEAD (buffer_list);
  1129. struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
  1130. struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
  1131. struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
  1132. count = INT_MAX;
  1133. structsz = 1;
  1134. lastino = 0;
  1135. flags = 0;
  1136. ASSERT(uip || gip || pip);
  1137. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1138. xfs_notice(mp, "Quotacheck needed: Please wait.");
  1139. /*
  1140. * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
  1141. * their counters to zero. We need a clean slate.
  1142. * We don't log our changes till later.
  1143. */
  1144. if (uip) {
  1145. error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
  1146. &buffer_list);
  1147. if (error)
  1148. goto error_return;
  1149. flags |= XFS_UQUOTA_CHKD;
  1150. }
  1151. if (gip) {
  1152. error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
  1153. &buffer_list);
  1154. if (error)
  1155. goto error_return;
  1156. flags |= XFS_GQUOTA_CHKD;
  1157. }
  1158. if (pip) {
  1159. error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
  1160. &buffer_list);
  1161. if (error)
  1162. goto error_return;
  1163. flags |= XFS_PQUOTA_CHKD;
  1164. }
  1165. do {
  1166. /*
  1167. * Iterate thru all the inodes in the file system,
  1168. * adjusting the corresponding dquot counters in core.
  1169. */
  1170. error = xfs_bulkstat(mp, &lastino, &count,
  1171. xfs_qm_dqusage_adjust,
  1172. structsz, NULL, &done);
  1173. if (error)
  1174. break;
  1175. } while (!done);
  1176. /*
  1177. * We've made all the changes that we need to make incore. Flush them
  1178. * down to disk buffers if everything was updated successfully.
  1179. */
  1180. if (XFS_IS_UQUOTA_ON(mp)) {
  1181. error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
  1182. &buffer_list);
  1183. }
  1184. if (XFS_IS_GQUOTA_ON(mp)) {
  1185. error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
  1186. &buffer_list);
  1187. if (!error)
  1188. error = error2;
  1189. }
  1190. if (XFS_IS_PQUOTA_ON(mp)) {
  1191. error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
  1192. &buffer_list);
  1193. if (!error)
  1194. error = error2;
  1195. }
  1196. error2 = xfs_buf_delwri_submit(&buffer_list);
  1197. if (!error)
  1198. error = error2;
  1199. /*
  1200. * We can get this error if we couldn't do a dquot allocation inside
  1201. * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
  1202. * dirty dquots that might be cached, we just want to get rid of them
  1203. * and turn quotaoff. The dquots won't be attached to any of the inodes
  1204. * at this point (because we intentionally didn't in dqget_noattach).
  1205. */
  1206. if (error) {
  1207. xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
  1208. goto error_return;
  1209. }
  1210. /*
  1211. * If one type of quotas is off, then it will lose its
  1212. * quotachecked status, since we won't be doing accounting for
  1213. * that type anymore.
  1214. */
  1215. mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
  1216. mp->m_qflags |= flags;
  1217. error_return:
  1218. xfs_buf_delwri_cancel(&buffer_list);
  1219. if (error) {
  1220. xfs_warn(mp,
  1221. "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
  1222. error);
  1223. /*
  1224. * We must turn off quotas.
  1225. */
  1226. ASSERT(mp->m_quotainfo != NULL);
  1227. xfs_qm_destroy_quotainfo(mp);
  1228. if (xfs_mount_reset_sbqflags(mp)) {
  1229. xfs_warn(mp,
  1230. "Quotacheck: Failed to reset quota flags.");
  1231. }
  1232. } else
  1233. xfs_notice(mp, "Quotacheck: Done.");
  1234. return error;
  1235. }
  1236. /*
  1237. * This is called from xfs_mountfs to start quotas and initialize all
  1238. * necessary data structures like quotainfo. This is also responsible for
  1239. * running a quotacheck as necessary. We are guaranteed that the superblock
  1240. * is consistently read in at this point.
  1241. *
  1242. * If we fail here, the mount will continue with quota turned off. We don't
  1243. * need to inidicate success or failure at all.
  1244. */
  1245. void
  1246. xfs_qm_mount_quotas(
  1247. struct xfs_mount *mp)
  1248. {
  1249. int error = 0;
  1250. uint sbf;
  1251. /*
  1252. * If quotas on realtime volumes is not supported, we disable
  1253. * quotas immediately.
  1254. */
  1255. if (mp->m_sb.sb_rextents) {
  1256. xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
  1257. mp->m_qflags = 0;
  1258. goto write_changes;
  1259. }
  1260. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1261. /*
  1262. * Allocate the quotainfo structure inside the mount struct, and
  1263. * create quotainode(s), and change/rev superblock if necessary.
  1264. */
  1265. error = xfs_qm_init_quotainfo(mp);
  1266. if (error) {
  1267. /*
  1268. * We must turn off quotas.
  1269. */
  1270. ASSERT(mp->m_quotainfo == NULL);
  1271. mp->m_qflags = 0;
  1272. goto write_changes;
  1273. }
  1274. /*
  1275. * If any of the quotas are not consistent, do a quotacheck.
  1276. */
  1277. if (XFS_QM_NEED_QUOTACHECK(mp)) {
  1278. error = xfs_qm_quotacheck(mp);
  1279. if (error) {
  1280. /* Quotacheck failed and disabled quotas. */
  1281. return;
  1282. }
  1283. }
  1284. /*
  1285. * If one type of quotas is off, then it will lose its
  1286. * quotachecked status, since we won't be doing accounting for
  1287. * that type anymore.
  1288. */
  1289. if (!XFS_IS_UQUOTA_ON(mp))
  1290. mp->m_qflags &= ~XFS_UQUOTA_CHKD;
  1291. if (!XFS_IS_GQUOTA_ON(mp))
  1292. mp->m_qflags &= ~XFS_GQUOTA_CHKD;
  1293. if (!XFS_IS_PQUOTA_ON(mp))
  1294. mp->m_qflags &= ~XFS_PQUOTA_CHKD;
  1295. write_changes:
  1296. /*
  1297. * We actually don't have to acquire the m_sb_lock at all.
  1298. * This can only be called from mount, and that's single threaded. XXX
  1299. */
  1300. spin_lock(&mp->m_sb_lock);
  1301. sbf = mp->m_sb.sb_qflags;
  1302. mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
  1303. spin_unlock(&mp->m_sb_lock);
  1304. if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
  1305. if (xfs_sync_sb(mp, false)) {
  1306. /*
  1307. * We could only have been turning quotas off.
  1308. * We aren't in very good shape actually because
  1309. * the incore structures are convinced that quotas are
  1310. * off, but the on disk superblock doesn't know that !
  1311. */
  1312. ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
  1313. xfs_alert(mp, "%s: Superblock update failed!",
  1314. __func__);
  1315. }
  1316. }
  1317. if (error) {
  1318. xfs_warn(mp, "Failed to initialize disk quotas.");
  1319. return;
  1320. }
  1321. }
  1322. /*
  1323. * This is called after the superblock has been read in and we're ready to
  1324. * iget the quota inodes.
  1325. */
  1326. STATIC int
  1327. xfs_qm_init_quotainos(
  1328. xfs_mount_t *mp)
  1329. {
  1330. struct xfs_inode *uip = NULL;
  1331. struct xfs_inode *gip = NULL;
  1332. struct xfs_inode *pip = NULL;
  1333. int error;
  1334. uint flags = 0;
  1335. ASSERT(mp->m_quotainfo);
  1336. /*
  1337. * Get the uquota and gquota inodes
  1338. */
  1339. if (xfs_sb_version_hasquota(&mp->m_sb)) {
  1340. if (XFS_IS_UQUOTA_ON(mp) &&
  1341. mp->m_sb.sb_uquotino != NULLFSINO) {
  1342. ASSERT(mp->m_sb.sb_uquotino > 0);
  1343. error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
  1344. 0, 0, &uip);
  1345. if (error)
  1346. return error;
  1347. }
  1348. if (XFS_IS_GQUOTA_ON(mp) &&
  1349. mp->m_sb.sb_gquotino != NULLFSINO) {
  1350. ASSERT(mp->m_sb.sb_gquotino > 0);
  1351. error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
  1352. 0, 0, &gip);
  1353. if (error)
  1354. goto error_rele;
  1355. }
  1356. if (XFS_IS_PQUOTA_ON(mp) &&
  1357. mp->m_sb.sb_pquotino != NULLFSINO) {
  1358. ASSERT(mp->m_sb.sb_pquotino > 0);
  1359. error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
  1360. 0, 0, &pip);
  1361. if (error)
  1362. goto error_rele;
  1363. }
  1364. } else {
  1365. flags |= XFS_QMOPT_SBVERSION;
  1366. }
  1367. /*
  1368. * Create the three inodes, if they don't exist already. The changes
  1369. * made above will get added to a transaction and logged in one of
  1370. * the qino_alloc calls below. If the device is readonly,
  1371. * temporarily switch to read-write to do this.
  1372. */
  1373. if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
  1374. error = xfs_qm_qino_alloc(mp, &uip,
  1375. flags | XFS_QMOPT_UQUOTA);
  1376. if (error)
  1377. goto error_rele;
  1378. flags &= ~XFS_QMOPT_SBVERSION;
  1379. }
  1380. if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
  1381. error = xfs_qm_qino_alloc(mp, &gip,
  1382. flags | XFS_QMOPT_GQUOTA);
  1383. if (error)
  1384. goto error_rele;
  1385. flags &= ~XFS_QMOPT_SBVERSION;
  1386. }
  1387. if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
  1388. error = xfs_qm_qino_alloc(mp, &pip,
  1389. flags | XFS_QMOPT_PQUOTA);
  1390. if (error)
  1391. goto error_rele;
  1392. }
  1393. mp->m_quotainfo->qi_uquotaip = uip;
  1394. mp->m_quotainfo->qi_gquotaip = gip;
  1395. mp->m_quotainfo->qi_pquotaip = pip;
  1396. return 0;
  1397. error_rele:
  1398. if (uip)
  1399. IRELE(uip);
  1400. if (gip)
  1401. IRELE(gip);
  1402. if (pip)
  1403. IRELE(pip);
  1404. return error;
  1405. }
  1406. STATIC void
  1407. xfs_qm_dqfree_one(
  1408. struct xfs_dquot *dqp)
  1409. {
  1410. struct xfs_mount *mp = dqp->q_mount;
  1411. struct xfs_quotainfo *qi = mp->m_quotainfo;
  1412. mutex_lock(&qi->qi_tree_lock);
  1413. radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
  1414. be32_to_cpu(dqp->q_core.d_id));
  1415. qi->qi_dquots--;
  1416. mutex_unlock(&qi->qi_tree_lock);
  1417. xfs_qm_dqdestroy(dqp);
  1418. }
  1419. /* --------------- utility functions for vnodeops ---------------- */
  1420. /*
  1421. * Given an inode, a uid, gid and prid make sure that we have
  1422. * allocated relevant dquot(s) on disk, and that we won't exceed inode
  1423. * quotas by creating this file.
  1424. * This also attaches dquot(s) to the given inode after locking it,
  1425. * and returns the dquots corresponding to the uid and/or gid.
  1426. *
  1427. * in : inode (unlocked)
  1428. * out : udquot, gdquot with references taken and unlocked
  1429. */
  1430. int
  1431. xfs_qm_vop_dqalloc(
  1432. struct xfs_inode *ip,
  1433. xfs_dqid_t uid,
  1434. xfs_dqid_t gid,
  1435. prid_t prid,
  1436. uint flags,
  1437. struct xfs_dquot **O_udqpp,
  1438. struct xfs_dquot **O_gdqpp,
  1439. struct xfs_dquot **O_pdqpp)
  1440. {
  1441. struct xfs_mount *mp = ip->i_mount;
  1442. struct xfs_dquot *uq = NULL;
  1443. struct xfs_dquot *gq = NULL;
  1444. struct xfs_dquot *pq = NULL;
  1445. int error;
  1446. uint lockflags;
  1447. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1448. return 0;
  1449. lockflags = XFS_ILOCK_EXCL;
  1450. xfs_ilock(ip, lockflags);
  1451. if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
  1452. gid = ip->i_d.di_gid;
  1453. /*
  1454. * Attach the dquot(s) to this inode, doing a dquot allocation
  1455. * if necessary. The dquot(s) will not be locked.
  1456. */
  1457. if (XFS_NOT_DQATTACHED(mp, ip)) {
  1458. error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
  1459. if (error) {
  1460. xfs_iunlock(ip, lockflags);
  1461. return error;
  1462. }
  1463. }
  1464. if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
  1465. if (ip->i_d.di_uid != uid) {
  1466. /*
  1467. * What we need is the dquot that has this uid, and
  1468. * if we send the inode to dqget, the uid of the inode
  1469. * takes priority over what's sent in the uid argument.
  1470. * We must unlock inode here before calling dqget if
  1471. * we're not sending the inode, because otherwise
  1472. * we'll deadlock by doing trans_reserve while
  1473. * holding ilock.
  1474. */
  1475. xfs_iunlock(ip, lockflags);
  1476. error = xfs_qm_dqget(mp, NULL, uid,
  1477. XFS_DQ_USER,
  1478. XFS_QMOPT_DQALLOC |
  1479. XFS_QMOPT_DOWARN,
  1480. &uq);
  1481. if (error) {
  1482. ASSERT(error != -ENOENT);
  1483. return error;
  1484. }
  1485. /*
  1486. * Get the ilock in the right order.
  1487. */
  1488. xfs_dqunlock(uq);
  1489. lockflags = XFS_ILOCK_SHARED;
  1490. xfs_ilock(ip, lockflags);
  1491. } else {
  1492. /*
  1493. * Take an extra reference, because we'll return
  1494. * this to caller
  1495. */
  1496. ASSERT(ip->i_udquot);
  1497. uq = xfs_qm_dqhold(ip->i_udquot);
  1498. }
  1499. }
  1500. if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
  1501. if (ip->i_d.di_gid != gid) {
  1502. xfs_iunlock(ip, lockflags);
  1503. error = xfs_qm_dqget(mp, NULL, gid,
  1504. XFS_DQ_GROUP,
  1505. XFS_QMOPT_DQALLOC |
  1506. XFS_QMOPT_DOWARN,
  1507. &gq);
  1508. if (error) {
  1509. ASSERT(error != -ENOENT);
  1510. goto error_rele;
  1511. }
  1512. xfs_dqunlock(gq);
  1513. lockflags = XFS_ILOCK_SHARED;
  1514. xfs_ilock(ip, lockflags);
  1515. } else {
  1516. ASSERT(ip->i_gdquot);
  1517. gq = xfs_qm_dqhold(ip->i_gdquot);
  1518. }
  1519. }
  1520. if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
  1521. if (xfs_get_projid(ip) != prid) {
  1522. xfs_iunlock(ip, lockflags);
  1523. error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
  1524. XFS_DQ_PROJ,
  1525. XFS_QMOPT_DQALLOC |
  1526. XFS_QMOPT_DOWARN,
  1527. &pq);
  1528. if (error) {
  1529. ASSERT(error != -ENOENT);
  1530. goto error_rele;
  1531. }
  1532. xfs_dqunlock(pq);
  1533. lockflags = XFS_ILOCK_SHARED;
  1534. xfs_ilock(ip, lockflags);
  1535. } else {
  1536. ASSERT(ip->i_pdquot);
  1537. pq = xfs_qm_dqhold(ip->i_pdquot);
  1538. }
  1539. }
  1540. if (uq)
  1541. trace_xfs_dquot_dqalloc(ip);
  1542. xfs_iunlock(ip, lockflags);
  1543. if (O_udqpp)
  1544. *O_udqpp = uq;
  1545. else
  1546. xfs_qm_dqrele(uq);
  1547. if (O_gdqpp)
  1548. *O_gdqpp = gq;
  1549. else
  1550. xfs_qm_dqrele(gq);
  1551. if (O_pdqpp)
  1552. *O_pdqpp = pq;
  1553. else
  1554. xfs_qm_dqrele(pq);
  1555. return 0;
  1556. error_rele:
  1557. xfs_qm_dqrele(gq);
  1558. xfs_qm_dqrele(uq);
  1559. return error;
  1560. }
  1561. /*
  1562. * Actually transfer ownership, and do dquot modifications.
  1563. * These were already reserved.
  1564. */
  1565. xfs_dquot_t *
  1566. xfs_qm_vop_chown(
  1567. xfs_trans_t *tp,
  1568. xfs_inode_t *ip,
  1569. xfs_dquot_t **IO_olddq,
  1570. xfs_dquot_t *newdq)
  1571. {
  1572. xfs_dquot_t *prevdq;
  1573. uint bfield = XFS_IS_REALTIME_INODE(ip) ?
  1574. XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
  1575. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1576. ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
  1577. /* old dquot */
  1578. prevdq = *IO_olddq;
  1579. ASSERT(prevdq);
  1580. ASSERT(prevdq != newdq);
  1581. xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
  1582. xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
  1583. /* the sparkling new dquot */
  1584. xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
  1585. xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
  1586. /*
  1587. * Take an extra reference, because the inode is going to keep
  1588. * this dquot pointer even after the trans_commit.
  1589. */
  1590. *IO_olddq = xfs_qm_dqhold(newdq);
  1591. return prevdq;
  1592. }
  1593. /*
  1594. * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
  1595. */
  1596. int
  1597. xfs_qm_vop_chown_reserve(
  1598. struct xfs_trans *tp,
  1599. struct xfs_inode *ip,
  1600. struct xfs_dquot *udqp,
  1601. struct xfs_dquot *gdqp,
  1602. struct xfs_dquot *pdqp,
  1603. uint flags)
  1604. {
  1605. struct xfs_mount *mp = ip->i_mount;
  1606. uint delblks, blkflags, prjflags = 0;
  1607. struct xfs_dquot *udq_unres = NULL;
  1608. struct xfs_dquot *gdq_unres = NULL;
  1609. struct xfs_dquot *pdq_unres = NULL;
  1610. struct xfs_dquot *udq_delblks = NULL;
  1611. struct xfs_dquot *gdq_delblks = NULL;
  1612. struct xfs_dquot *pdq_delblks = NULL;
  1613. int error;
  1614. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  1615. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1616. delblks = ip->i_delayed_blks;
  1617. blkflags = XFS_IS_REALTIME_INODE(ip) ?
  1618. XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
  1619. if (XFS_IS_UQUOTA_ON(mp) && udqp &&
  1620. ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
  1621. udq_delblks = udqp;
  1622. /*
  1623. * If there are delayed allocation blocks, then we have to
  1624. * unreserve those from the old dquot, and add them to the
  1625. * new dquot.
  1626. */
  1627. if (delblks) {
  1628. ASSERT(ip->i_udquot);
  1629. udq_unres = ip->i_udquot;
  1630. }
  1631. }
  1632. if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
  1633. ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
  1634. gdq_delblks = gdqp;
  1635. if (delblks) {
  1636. ASSERT(ip->i_gdquot);
  1637. gdq_unres = ip->i_gdquot;
  1638. }
  1639. }
  1640. if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
  1641. xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
  1642. prjflags = XFS_QMOPT_ENOSPC;
  1643. pdq_delblks = pdqp;
  1644. if (delblks) {
  1645. ASSERT(ip->i_pdquot);
  1646. pdq_unres = ip->i_pdquot;
  1647. }
  1648. }
  1649. error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
  1650. udq_delblks, gdq_delblks, pdq_delblks,
  1651. ip->i_d.di_nblocks, 1,
  1652. flags | blkflags | prjflags);
  1653. if (error)
  1654. return error;
  1655. /*
  1656. * Do the delayed blks reservations/unreservations now. Since, these
  1657. * are done without the help of a transaction, if a reservation fails
  1658. * its previous reservations won't be automatically undone by trans
  1659. * code. So, we have to do it manually here.
  1660. */
  1661. if (delblks) {
  1662. /*
  1663. * Do the reservations first. Unreservation can't fail.
  1664. */
  1665. ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
  1666. ASSERT(udq_unres || gdq_unres || pdq_unres);
  1667. error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
  1668. udq_delblks, gdq_delblks, pdq_delblks,
  1669. (xfs_qcnt_t)delblks, 0,
  1670. flags | blkflags | prjflags);
  1671. if (error)
  1672. return error;
  1673. xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
  1674. udq_unres, gdq_unres, pdq_unres,
  1675. -((xfs_qcnt_t)delblks), 0, blkflags);
  1676. }
  1677. return 0;
  1678. }
  1679. int
  1680. xfs_qm_vop_rename_dqattach(
  1681. struct xfs_inode **i_tab)
  1682. {
  1683. struct xfs_mount *mp = i_tab[0]->i_mount;
  1684. int i;
  1685. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1686. return 0;
  1687. for (i = 0; (i < 4 && i_tab[i]); i++) {
  1688. struct xfs_inode *ip = i_tab[i];
  1689. int error;
  1690. /*
  1691. * Watch out for duplicate entries in the table.
  1692. */
  1693. if (i == 0 || ip != i_tab[i-1]) {
  1694. if (XFS_NOT_DQATTACHED(mp, ip)) {
  1695. error = xfs_qm_dqattach(ip, 0);
  1696. if (error)
  1697. return error;
  1698. }
  1699. }
  1700. }
  1701. return 0;
  1702. }
  1703. void
  1704. xfs_qm_vop_create_dqattach(
  1705. struct xfs_trans *tp,
  1706. struct xfs_inode *ip,
  1707. struct xfs_dquot *udqp,
  1708. struct xfs_dquot *gdqp,
  1709. struct xfs_dquot *pdqp)
  1710. {
  1711. struct xfs_mount *mp = tp->t_mountp;
  1712. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1713. return;
  1714. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1715. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1716. if (udqp && XFS_IS_UQUOTA_ON(mp)) {
  1717. ASSERT(ip->i_udquot == NULL);
  1718. ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
  1719. ip->i_udquot = xfs_qm_dqhold(udqp);
  1720. xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
  1721. }
  1722. if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
  1723. ASSERT(ip->i_gdquot == NULL);
  1724. ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
  1725. ip->i_gdquot = xfs_qm_dqhold(gdqp);
  1726. xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
  1727. }
  1728. if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
  1729. ASSERT(ip->i_pdquot == NULL);
  1730. ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
  1731. ip->i_pdquot = xfs_qm_dqhold(pdqp);
  1732. xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
  1733. }
  1734. }