PageRenderTime 55ms CodeModel.GetById 15ms RepoModel.GetById 1ms app.codeStats 0ms

/fs/xfs/xfs_qm.c

https://bitbucket.org/digetx/picasso-kernel
C | 1958 lines | 1280 code | 234 blank | 444 comment | 254 complexity | 5e50c5fc468747abf9785fb7af6c65b8 MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. /*
  2. * Copyright (c) 2000-2005 Silicon Graphics, Inc.
  3. * All Rights Reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it would be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write the Free Software Foundation,
  16. * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include "xfs.h"
  19. #include "xfs_fs.h"
  20. #include "xfs_bit.h"
  21. #include "xfs_log.h"
  22. #include "xfs_trans.h"
  23. #include "xfs_sb.h"
  24. #include "xfs_ag.h"
  25. #include "xfs_alloc.h"
  26. #include "xfs_quota.h"
  27. #include "xfs_mount.h"
  28. #include "xfs_bmap_btree.h"
  29. #include "xfs_ialloc_btree.h"
  30. #include "xfs_dinode.h"
  31. #include "xfs_inode.h"
  32. #include "xfs_ialloc.h"
  33. #include "xfs_itable.h"
  34. #include "xfs_rtalloc.h"
  35. #include "xfs_error.h"
  36. #include "xfs_bmap.h"
  37. #include "xfs_attr.h"
  38. #include "xfs_buf_item.h"
  39. #include "xfs_trans_space.h"
  40. #include "xfs_utils.h"
  41. #include "xfs_qm.h"
  42. #include "xfs_trace.h"
  43. #include "xfs_icache.h"
  44. /*
  45. * The global quota manager. There is only one of these for the entire
  46. * system, _not_ one per file system. XQM keeps track of the overall
  47. * quota functionality, including maintaining the freelist and hash
  48. * tables of dquots.
  49. */
  50. STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
  51. STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
  52. STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *);
  53. /*
  54. * We use the batch lookup interface to iterate over the dquots as it
  55. * currently is the only interface into the radix tree code that allows
  56. * fuzzy lookups instead of exact matches. Holding the lock over multiple
  57. * operations is fine as all callers are used either during mount/umount
  58. * or quotaoff.
  59. */
  60. #define XFS_DQ_LOOKUP_BATCH 32
  61. STATIC int
  62. xfs_qm_dquot_walk(
  63. struct xfs_mount *mp,
  64. int type,
  65. int (*execute)(struct xfs_dquot *dqp, void *data),
  66. void *data)
  67. {
  68. struct xfs_quotainfo *qi = mp->m_quotainfo;
  69. struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type);
  70. uint32_t next_index;
  71. int last_error = 0;
  72. int skipped;
  73. int nr_found;
  74. restart:
  75. skipped = 0;
  76. next_index = 0;
  77. nr_found = 0;
  78. while (1) {
  79. struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
  80. int error = 0;
  81. int i;
  82. mutex_lock(&qi->qi_tree_lock);
  83. nr_found = radix_tree_gang_lookup(tree, (void **)batch,
  84. next_index, XFS_DQ_LOOKUP_BATCH);
  85. if (!nr_found) {
  86. mutex_unlock(&qi->qi_tree_lock);
  87. break;
  88. }
  89. for (i = 0; i < nr_found; i++) {
  90. struct xfs_dquot *dqp = batch[i];
  91. next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
  92. error = execute(batch[i], data);
  93. if (error == EAGAIN) {
  94. skipped++;
  95. continue;
  96. }
  97. if (error && last_error != EFSCORRUPTED)
  98. last_error = error;
  99. }
  100. mutex_unlock(&qi->qi_tree_lock);
  101. /* bail out if the filesystem is corrupted. */
  102. if (last_error == EFSCORRUPTED) {
  103. skipped = 0;
  104. break;
  105. }
  106. }
  107. if (skipped) {
  108. delay(1);
  109. goto restart;
  110. }
  111. return last_error;
  112. }
  113. /*
  114. * Purge a dquot from all tracking data structures and free it.
  115. */
  116. STATIC int
  117. xfs_qm_dqpurge(
  118. struct xfs_dquot *dqp,
  119. void *data)
  120. {
  121. struct xfs_mount *mp = dqp->q_mount;
  122. struct xfs_quotainfo *qi = mp->m_quotainfo;
  123. xfs_dqlock(dqp);
  124. if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
  125. xfs_dqunlock(dqp);
  126. return EAGAIN;
  127. }
  128. dqp->dq_flags |= XFS_DQ_FREEING;
  129. xfs_dqflock(dqp);
  130. /*
  131. * If we are turning this type of quotas off, we don't care
  132. * about the dirty metadata sitting in this dquot. OTOH, if
  133. * we're unmounting, we do care, so we flush it and wait.
  134. */
  135. if (XFS_DQ_IS_DIRTY(dqp)) {
  136. struct xfs_buf *bp = NULL;
  137. int error;
  138. /*
  139. * We don't care about getting disk errors here. We need
  140. * to purge this dquot anyway, so we go ahead regardless.
  141. */
  142. error = xfs_qm_dqflush(dqp, &bp);
  143. if (error) {
  144. xfs_warn(mp, "%s: dquot %p flush failed",
  145. __func__, dqp);
  146. } else {
  147. error = xfs_bwrite(bp);
  148. xfs_buf_relse(bp);
  149. }
  150. xfs_dqflock(dqp);
  151. }
  152. ASSERT(atomic_read(&dqp->q_pincount) == 0);
  153. ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
  154. !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
  155. xfs_dqfunlock(dqp);
  156. xfs_dqunlock(dqp);
  157. radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
  158. be32_to_cpu(dqp->q_core.d_id));
  159. qi->qi_dquots--;
  160. /*
  161. * We move dquots to the freelist as soon as their reference count
  162. * hits zero, so it really should be on the freelist here.
  163. */
  164. mutex_lock(&qi->qi_lru_lock);
  165. ASSERT(!list_empty(&dqp->q_lru));
  166. list_del_init(&dqp->q_lru);
  167. qi->qi_lru_count--;
  168. XFS_STATS_DEC(xs_qm_dquot_unused);
  169. mutex_unlock(&qi->qi_lru_lock);
  170. xfs_qm_dqdestroy(dqp);
  171. return 0;
  172. }
  173. /*
  174. * Release the group or project dquot pointers the user dquots maybe carrying
  175. * around as a hint, and proceed to purge the user dquot cache if requested.
  176. */
  177. STATIC int
  178. xfs_qm_dqpurge_hints(
  179. struct xfs_dquot *dqp,
  180. void *data)
  181. {
  182. struct xfs_dquot *gdqp = NULL;
  183. uint flags = *((uint *)data);
  184. xfs_dqlock(dqp);
  185. if (dqp->dq_flags & XFS_DQ_FREEING) {
  186. xfs_dqunlock(dqp);
  187. return EAGAIN;
  188. }
  189. /* If this quota has a hint attached, prepare for releasing it now */
  190. gdqp = dqp->q_gdquot;
  191. if (gdqp)
  192. dqp->q_gdquot = NULL;
  193. xfs_dqunlock(dqp);
  194. if (gdqp)
  195. xfs_qm_dqrele(gdqp);
  196. if (flags & XFS_QMOPT_UQUOTA)
  197. return xfs_qm_dqpurge(dqp, NULL);
  198. return 0;
  199. }
  200. /*
  201. * Purge the dquot cache.
  202. */
  203. void
  204. xfs_qm_dqpurge_all(
  205. struct xfs_mount *mp,
  206. uint flags)
  207. {
  208. /*
  209. * We have to release group/project dquot hint(s) from the user dquot
  210. * at first if they are there, otherwise we would run into an infinite
  211. * loop while walking through radix tree to purge other type of dquots
  212. * since their refcount is not zero if the user dquot refers to them
  213. * as hint.
  214. *
  215. * Call the special xfs_qm_dqpurge_hints() will end up go through the
  216. * general xfs_qm_dqpurge() against user dquot cache if requested.
  217. */
  218. xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
  219. if (flags & XFS_QMOPT_GQUOTA)
  220. xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
  221. if (flags & XFS_QMOPT_PQUOTA)
  222. xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
  223. }
  224. /*
  225. * Just destroy the quotainfo structure.
  226. */
  227. void
  228. xfs_qm_unmount(
  229. struct xfs_mount *mp)
  230. {
  231. if (mp->m_quotainfo) {
  232. xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
  233. xfs_qm_destroy_quotainfo(mp);
  234. }
  235. }
  236. /*
  237. * This is called from xfs_mountfs to start quotas and initialize all
  238. * necessary data structures like quotainfo. This is also responsible for
  239. * running a quotacheck as necessary. We are guaranteed that the superblock
  240. * is consistently read in at this point.
  241. *
  242. * If we fail here, the mount will continue with quota turned off. We don't
  243. * need to inidicate success or failure at all.
  244. */
  245. void
  246. xfs_qm_mount_quotas(
  247. xfs_mount_t *mp)
  248. {
  249. int error = 0;
  250. uint sbf;
  251. /*
  252. * If quotas on realtime volumes is not supported, we disable
  253. * quotas immediately.
  254. */
  255. if (mp->m_sb.sb_rextents) {
  256. xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
  257. mp->m_qflags = 0;
  258. goto write_changes;
  259. }
  260. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  261. /*
  262. * Allocate the quotainfo structure inside the mount struct, and
  263. * create quotainode(s), and change/rev superblock if necessary.
  264. */
  265. error = xfs_qm_init_quotainfo(mp);
  266. if (error) {
  267. /*
  268. * We must turn off quotas.
  269. */
  270. ASSERT(mp->m_quotainfo == NULL);
  271. mp->m_qflags = 0;
  272. goto write_changes;
  273. }
  274. /*
  275. * If any of the quotas are not consistent, do a quotacheck.
  276. */
  277. if (XFS_QM_NEED_QUOTACHECK(mp)) {
  278. error = xfs_qm_quotacheck(mp);
  279. if (error) {
  280. /* Quotacheck failed and disabled quotas. */
  281. return;
  282. }
  283. }
  284. /*
  285. * If one type of quotas is off, then it will lose its
  286. * quotachecked status, since we won't be doing accounting for
  287. * that type anymore.
  288. */
  289. if (!XFS_IS_UQUOTA_ON(mp))
  290. mp->m_qflags &= ~XFS_UQUOTA_CHKD;
  291. if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp)))
  292. mp->m_qflags &= ~XFS_OQUOTA_CHKD;
  293. write_changes:
  294. /*
  295. * We actually don't have to acquire the m_sb_lock at all.
  296. * This can only be called from mount, and that's single threaded. XXX
  297. */
  298. spin_lock(&mp->m_sb_lock);
  299. sbf = mp->m_sb.sb_qflags;
  300. mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
  301. spin_unlock(&mp->m_sb_lock);
  302. if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
  303. if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
  304. /*
  305. * We could only have been turning quotas off.
  306. * We aren't in very good shape actually because
  307. * the incore structures are convinced that quotas are
  308. * off, but the on disk superblock doesn't know that !
  309. */
  310. ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
  311. xfs_alert(mp, "%s: Superblock update failed!",
  312. __func__);
  313. }
  314. }
  315. if (error) {
  316. xfs_warn(mp, "Failed to initialize disk quotas.");
  317. return;
  318. }
  319. }
  320. /*
  321. * Called from the vfsops layer.
  322. */
  323. void
  324. xfs_qm_unmount_quotas(
  325. xfs_mount_t *mp)
  326. {
  327. /*
  328. * Release the dquots that root inode, et al might be holding,
  329. * before we flush quotas and blow away the quotainfo structure.
  330. */
  331. ASSERT(mp->m_rootip);
  332. xfs_qm_dqdetach(mp->m_rootip);
  333. if (mp->m_rbmip)
  334. xfs_qm_dqdetach(mp->m_rbmip);
  335. if (mp->m_rsumip)
  336. xfs_qm_dqdetach(mp->m_rsumip);
  337. /*
  338. * Release the quota inodes.
  339. */
  340. if (mp->m_quotainfo) {
  341. if (mp->m_quotainfo->qi_uquotaip) {
  342. IRELE(mp->m_quotainfo->qi_uquotaip);
  343. mp->m_quotainfo->qi_uquotaip = NULL;
  344. }
  345. if (mp->m_quotainfo->qi_gquotaip) {
  346. IRELE(mp->m_quotainfo->qi_gquotaip);
  347. mp->m_quotainfo->qi_gquotaip = NULL;
  348. }
  349. }
  350. }
  351. STATIC int
  352. xfs_qm_dqattach_one(
  353. xfs_inode_t *ip,
  354. xfs_dqid_t id,
  355. uint type,
  356. uint doalloc,
  357. xfs_dquot_t *udqhint, /* hint */
  358. xfs_dquot_t **IO_idqpp)
  359. {
  360. xfs_dquot_t *dqp;
  361. int error;
  362. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  363. error = 0;
  364. /*
  365. * See if we already have it in the inode itself. IO_idqpp is
  366. * &i_udquot or &i_gdquot. This made the code look weird, but
  367. * made the logic a lot simpler.
  368. */
  369. dqp = *IO_idqpp;
  370. if (dqp) {
  371. trace_xfs_dqattach_found(dqp);
  372. return 0;
  373. }
  374. /*
  375. * udqhint is the i_udquot field in inode, and is non-NULL only
  376. * when the type arg is group/project. Its purpose is to save a
  377. * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
  378. * the user dquot.
  379. */
  380. if (udqhint) {
  381. ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
  382. xfs_dqlock(udqhint);
  383. /*
  384. * No need to take dqlock to look at the id.
  385. *
  386. * The ID can't change until it gets reclaimed, and it won't
  387. * be reclaimed as long as we have a ref from inode and we
  388. * hold the ilock.
  389. */
  390. dqp = udqhint->q_gdquot;
  391. if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
  392. ASSERT(*IO_idqpp == NULL);
  393. *IO_idqpp = xfs_qm_dqhold(dqp);
  394. xfs_dqunlock(udqhint);
  395. return 0;
  396. }
  397. /*
  398. * We can't hold a dquot lock when we call the dqget code.
  399. * We'll deadlock in no time, because of (not conforming to)
  400. * lock ordering - the inodelock comes before any dquot lock,
  401. * and we may drop and reacquire the ilock in xfs_qm_dqget().
  402. */
  403. xfs_dqunlock(udqhint);
  404. }
  405. /*
  406. * Find the dquot from somewhere. This bumps the
  407. * reference count of dquot and returns it locked.
  408. * This can return ENOENT if dquot didn't exist on
  409. * disk and we didn't ask it to allocate;
  410. * ESRCH if quotas got turned off suddenly.
  411. */
  412. error = xfs_qm_dqget(ip->i_mount, ip, id, type,
  413. doalloc | XFS_QMOPT_DOWARN, &dqp);
  414. if (error)
  415. return error;
  416. trace_xfs_dqattach_get(dqp);
  417. /*
  418. * dqget may have dropped and re-acquired the ilock, but it guarantees
  419. * that the dquot returned is the one that should go in the inode.
  420. */
  421. *IO_idqpp = dqp;
  422. xfs_dqunlock(dqp);
  423. return 0;
  424. }
  425. /*
  426. * Given a udquot and gdquot, attach a ptr to the group dquot in the
  427. * udquot as a hint for future lookups.
  428. */
  429. STATIC void
  430. xfs_qm_dqattach_grouphint(
  431. xfs_dquot_t *udq,
  432. xfs_dquot_t *gdq)
  433. {
  434. xfs_dquot_t *tmp;
  435. xfs_dqlock(udq);
  436. tmp = udq->q_gdquot;
  437. if (tmp) {
  438. if (tmp == gdq)
  439. goto done;
  440. udq->q_gdquot = NULL;
  441. xfs_qm_dqrele(tmp);
  442. }
  443. udq->q_gdquot = xfs_qm_dqhold(gdq);
  444. done:
  445. xfs_dqunlock(udq);
  446. }
  447. static bool
  448. xfs_qm_need_dqattach(
  449. struct xfs_inode *ip)
  450. {
  451. struct xfs_mount *mp = ip->i_mount;
  452. if (!XFS_IS_QUOTA_RUNNING(mp))
  453. return false;
  454. if (!XFS_IS_QUOTA_ON(mp))
  455. return false;
  456. if (!XFS_NOT_DQATTACHED(mp, ip))
  457. return false;
  458. if (ip->i_ino == mp->m_sb.sb_uquotino ||
  459. ip->i_ino == mp->m_sb.sb_gquotino)
  460. return false;
  461. return true;
  462. }
  463. /*
  464. * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
  465. * into account.
  466. * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
  467. * Inode may get unlocked and relocked in here, and the caller must deal with
  468. * the consequences.
  469. */
  470. int
  471. xfs_qm_dqattach_locked(
  472. xfs_inode_t *ip,
  473. uint flags)
  474. {
  475. xfs_mount_t *mp = ip->i_mount;
  476. uint nquotas = 0;
  477. int error = 0;
  478. if (!xfs_qm_need_dqattach(ip))
  479. return 0;
  480. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  481. if (XFS_IS_UQUOTA_ON(mp)) {
  482. error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
  483. flags & XFS_QMOPT_DQALLOC,
  484. NULL, &ip->i_udquot);
  485. if (error)
  486. goto done;
  487. nquotas++;
  488. }
  489. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  490. if (XFS_IS_OQUOTA_ON(mp)) {
  491. error = XFS_IS_GQUOTA_ON(mp) ?
  492. xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
  493. flags & XFS_QMOPT_DQALLOC,
  494. ip->i_udquot, &ip->i_gdquot) :
  495. xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
  496. flags & XFS_QMOPT_DQALLOC,
  497. ip->i_udquot, &ip->i_gdquot);
  498. /*
  499. * Don't worry about the udquot that we may have
  500. * attached above. It'll get detached, if not already.
  501. */
  502. if (error)
  503. goto done;
  504. nquotas++;
  505. }
  506. /*
  507. * Attach this group quota to the user quota as a hint.
  508. * This WON'T, in general, result in a thrash.
  509. */
  510. if (nquotas == 2) {
  511. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  512. ASSERT(ip->i_udquot);
  513. ASSERT(ip->i_gdquot);
  514. /*
  515. * We do not have i_udquot locked at this point, but this check
  516. * is OK since we don't depend on the i_gdquot to be accurate
  517. * 100% all the time. It is just a hint, and this will
  518. * succeed in general.
  519. */
  520. if (ip->i_udquot->q_gdquot != ip->i_gdquot)
  521. xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
  522. }
  523. done:
  524. #ifdef DEBUG
  525. if (!error) {
  526. if (XFS_IS_UQUOTA_ON(mp))
  527. ASSERT(ip->i_udquot);
  528. if (XFS_IS_OQUOTA_ON(mp))
  529. ASSERT(ip->i_gdquot);
  530. }
  531. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  532. #endif
  533. return error;
  534. }
  535. int
  536. xfs_qm_dqattach(
  537. struct xfs_inode *ip,
  538. uint flags)
  539. {
  540. int error;
  541. if (!xfs_qm_need_dqattach(ip))
  542. return 0;
  543. xfs_ilock(ip, XFS_ILOCK_EXCL);
  544. error = xfs_qm_dqattach_locked(ip, flags);
  545. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  546. return error;
  547. }
  548. /*
  549. * Release dquots (and their references) if any.
  550. * The inode should be locked EXCL except when this's called by
  551. * xfs_ireclaim.
  552. */
  553. void
  554. xfs_qm_dqdetach(
  555. xfs_inode_t *ip)
  556. {
  557. if (!(ip->i_udquot || ip->i_gdquot))
  558. return;
  559. trace_xfs_dquot_dqdetach(ip);
  560. ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino);
  561. ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino);
  562. if (ip->i_udquot) {
  563. xfs_qm_dqrele(ip->i_udquot);
  564. ip->i_udquot = NULL;
  565. }
  566. if (ip->i_gdquot) {
  567. xfs_qm_dqrele(ip->i_gdquot);
  568. ip->i_gdquot = NULL;
  569. }
  570. }
  571. /*
  572. * This initializes all the quota information that's kept in the
  573. * mount structure
  574. */
  575. STATIC int
  576. xfs_qm_init_quotainfo(
  577. xfs_mount_t *mp)
  578. {
  579. xfs_quotainfo_t *qinf;
  580. int error;
  581. xfs_dquot_t *dqp;
  582. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  583. qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
  584. /*
  585. * See if quotainodes are setup, and if not, allocate them,
  586. * and change the superblock accordingly.
  587. */
  588. if ((error = xfs_qm_init_quotainos(mp))) {
  589. kmem_free(qinf);
  590. mp->m_quotainfo = NULL;
  591. return error;
  592. }
  593. INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
  594. INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
  595. mutex_init(&qinf->qi_tree_lock);
  596. INIT_LIST_HEAD(&qinf->qi_lru_list);
  597. qinf->qi_lru_count = 0;
  598. mutex_init(&qinf->qi_lru_lock);
  599. /* mutex used to serialize quotaoffs */
  600. mutex_init(&qinf->qi_quotaofflock);
  601. /* Precalc some constants */
  602. qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
  603. ASSERT(qinf->qi_dqchunklen);
  604. qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen);
  605. do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t));
  606. mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
  607. /*
  608. * We try to get the limits from the superuser's limits fields.
  609. * This is quite hacky, but it is standard quota practice.
  610. *
  611. * We look at the USR dquot with id == 0 first, but if user quotas
  612. * are not enabled we goto the GRP dquot with id == 0.
  613. * We don't really care to keep separate default limits for user
  614. * and group quotas, at least not at this point.
  615. *
  616. * Since we may not have done a quotacheck by this point, just read
  617. * the dquot without attaching it to any hashtables or lists.
  618. */
  619. error = xfs_qm_dqread(mp, 0,
  620. XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
  621. (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
  622. XFS_DQ_PROJ),
  623. XFS_QMOPT_DOWARN, &dqp);
  624. if (!error) {
  625. xfs_disk_dquot_t *ddqp = &dqp->q_core;
  626. /*
  627. * The warnings and timers set the grace period given to
  628. * a user or group before he or she can not perform any
  629. * more writing. If it is zero, a default is used.
  630. */
  631. qinf->qi_btimelimit = ddqp->d_btimer ?
  632. be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
  633. qinf->qi_itimelimit = ddqp->d_itimer ?
  634. be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
  635. qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
  636. be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
  637. qinf->qi_bwarnlimit = ddqp->d_bwarns ?
  638. be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
  639. qinf->qi_iwarnlimit = ddqp->d_iwarns ?
  640. be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
  641. qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
  642. be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
  643. qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
  644. qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
  645. qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
  646. qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
  647. qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
  648. qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
  649. xfs_qm_dqdestroy(dqp);
  650. } else {
  651. qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
  652. qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
  653. qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
  654. qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
  655. qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
  656. qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
  657. }
  658. qinf->qi_shrinker.shrink = xfs_qm_shake;
  659. qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
  660. register_shrinker(&qinf->qi_shrinker);
  661. return 0;
  662. }
  663. /*
  664. * Gets called when unmounting a filesystem or when all quotas get
  665. * turned off.
  666. * This purges the quota inodes, destroys locks and frees itself.
  667. */
  668. void
  669. xfs_qm_destroy_quotainfo(
  670. xfs_mount_t *mp)
  671. {
  672. xfs_quotainfo_t *qi;
  673. qi = mp->m_quotainfo;
  674. ASSERT(qi != NULL);
  675. unregister_shrinker(&qi->qi_shrinker);
  676. if (qi->qi_uquotaip) {
  677. IRELE(qi->qi_uquotaip);
  678. qi->qi_uquotaip = NULL; /* paranoia */
  679. }
  680. if (qi->qi_gquotaip) {
  681. IRELE(qi->qi_gquotaip);
  682. qi->qi_gquotaip = NULL;
  683. }
  684. mutex_destroy(&qi->qi_quotaofflock);
  685. kmem_free(qi);
  686. mp->m_quotainfo = NULL;
  687. }
  688. /*
  689. * Create an inode and return with a reference already taken, but unlocked
  690. * This is how we create quota inodes
  691. */
  692. STATIC int
  693. xfs_qm_qino_alloc(
  694. xfs_mount_t *mp,
  695. xfs_inode_t **ip,
  696. __int64_t sbfields,
  697. uint flags)
  698. {
  699. xfs_trans_t *tp;
  700. int error;
  701. int committed;
  702. tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
  703. if ((error = xfs_trans_reserve(tp,
  704. XFS_QM_QINOCREATE_SPACE_RES(mp),
  705. XFS_CREATE_LOG_RES(mp), 0,
  706. XFS_TRANS_PERM_LOG_RES,
  707. XFS_CREATE_LOG_COUNT))) {
  708. xfs_trans_cancel(tp, 0);
  709. return error;
  710. }
  711. error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed);
  712. if (error) {
  713. xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
  714. XFS_TRANS_ABORT);
  715. return error;
  716. }
  717. /*
  718. * Make the changes in the superblock, and log those too.
  719. * sbfields arg may contain fields other than *QUOTINO;
  720. * VERSIONNUM for example.
  721. */
  722. spin_lock(&mp->m_sb_lock);
  723. if (flags & XFS_QMOPT_SBVERSION) {
  724. ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
  725. ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  726. XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) ==
  727. (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  728. XFS_SB_GQUOTINO | XFS_SB_QFLAGS));
  729. xfs_sb_version_addquota(&mp->m_sb);
  730. mp->m_sb.sb_uquotino = NULLFSINO;
  731. mp->m_sb.sb_gquotino = NULLFSINO;
  732. /* qflags will get updated _after_ quotacheck */
  733. mp->m_sb.sb_qflags = 0;
  734. }
  735. if (flags & XFS_QMOPT_UQUOTA)
  736. mp->m_sb.sb_uquotino = (*ip)->i_ino;
  737. else
  738. mp->m_sb.sb_gquotino = (*ip)->i_ino;
  739. spin_unlock(&mp->m_sb_lock);
  740. xfs_mod_sb(tp, sbfields);
  741. if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
  742. xfs_alert(mp, "%s failed (error %d)!", __func__, error);
  743. return error;
  744. }
  745. return 0;
  746. }
  747. STATIC void
  748. xfs_qm_reset_dqcounts(
  749. xfs_mount_t *mp,
  750. xfs_buf_t *bp,
  751. xfs_dqid_t id,
  752. uint type)
  753. {
  754. xfs_disk_dquot_t *ddq;
  755. int j;
  756. trace_xfs_reset_dqcounts(bp, _RET_IP_);
  757. /*
  758. * Reset all counters and timers. They'll be
  759. * started afresh by xfs_qm_quotacheck.
  760. */
  761. #ifdef DEBUG
  762. j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
  763. do_div(j, sizeof(xfs_dqblk_t));
  764. ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
  765. #endif
  766. ddq = bp->b_addr;
  767. for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
  768. /*
  769. * Do a sanity check, and if needed, repair the dqblk. Don't
  770. * output any warnings because it's perfectly possible to
  771. * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
  772. */
  773. (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
  774. "xfs_quotacheck");
  775. ddq->d_bcount = 0;
  776. ddq->d_icount = 0;
  777. ddq->d_rtbcount = 0;
  778. ddq->d_btimer = 0;
  779. ddq->d_itimer = 0;
  780. ddq->d_rtbtimer = 0;
  781. ddq->d_bwarns = 0;
  782. ddq->d_iwarns = 0;
  783. ddq->d_rtbwarns = 0;
  784. ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1);
  785. }
  786. }
  787. STATIC int
  788. xfs_qm_dqiter_bufs(
  789. struct xfs_mount *mp,
  790. xfs_dqid_t firstid,
  791. xfs_fsblock_t bno,
  792. xfs_filblks_t blkcnt,
  793. uint flags,
  794. struct list_head *buffer_list)
  795. {
  796. struct xfs_buf *bp;
  797. int error;
  798. int type;
  799. ASSERT(blkcnt > 0);
  800. type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
  801. (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
  802. error = 0;
  803. /*
  804. * Blkcnt arg can be a very big number, and might even be
  805. * larger than the log itself. So, we have to break it up into
  806. * manageable-sized transactions.
  807. * Note that we don't start a permanent transaction here; we might
  808. * not be able to get a log reservation for the whole thing up front,
  809. * and we don't really care to either, because we just discard
  810. * everything if we were to crash in the middle of this loop.
  811. */
  812. while (blkcnt--) {
  813. error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
  814. XFS_FSB_TO_DADDR(mp, bno),
  815. mp->m_quotainfo->qi_dqchunklen, 0, &bp,
  816. &xfs_dquot_buf_ops);
  817. if (error)
  818. break;
  819. xfs_qm_reset_dqcounts(mp, bp, firstid, type);
  820. xfs_buf_delwri_queue(bp, buffer_list);
  821. xfs_buf_relse(bp);
  822. /*
  823. * goto the next block.
  824. */
  825. bno++;
  826. firstid += mp->m_quotainfo->qi_dqperchunk;
  827. }
  828. return error;
  829. }
  830. /*
  831. * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
  832. * caller supplied function for every chunk of dquots that we find.
  833. */
  834. STATIC int
  835. xfs_qm_dqiterate(
  836. struct xfs_mount *mp,
  837. struct xfs_inode *qip,
  838. uint flags,
  839. struct list_head *buffer_list)
  840. {
  841. struct xfs_bmbt_irec *map;
  842. int i, nmaps; /* number of map entries */
  843. int error; /* return value */
  844. xfs_fileoff_t lblkno;
  845. xfs_filblks_t maxlblkcnt;
  846. xfs_dqid_t firstid;
  847. xfs_fsblock_t rablkno;
  848. xfs_filblks_t rablkcnt;
  849. error = 0;
  850. /*
  851. * This looks racy, but we can't keep an inode lock across a
  852. * trans_reserve. But, this gets called during quotacheck, and that
  853. * happens only at mount time which is single threaded.
  854. */
  855. if (qip->i_d.di_nblocks == 0)
  856. return 0;
  857. map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
  858. lblkno = 0;
  859. maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
  860. do {
  861. nmaps = XFS_DQITER_MAP_SIZE;
  862. /*
  863. * We aren't changing the inode itself. Just changing
  864. * some of its data. No new blocks are added here, and
  865. * the inode is never added to the transaction.
  866. */
  867. xfs_ilock(qip, XFS_ILOCK_SHARED);
  868. error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
  869. map, &nmaps, 0);
  870. xfs_iunlock(qip, XFS_ILOCK_SHARED);
  871. if (error)
  872. break;
  873. ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
  874. for (i = 0; i < nmaps; i++) {
  875. ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
  876. ASSERT(map[i].br_blockcount);
  877. lblkno += map[i].br_blockcount;
  878. if (map[i].br_startblock == HOLESTARTBLOCK)
  879. continue;
  880. firstid = (xfs_dqid_t) map[i].br_startoff *
  881. mp->m_quotainfo->qi_dqperchunk;
  882. /*
  883. * Do a read-ahead on the next extent.
  884. */
  885. if ((i+1 < nmaps) &&
  886. (map[i+1].br_startblock != HOLESTARTBLOCK)) {
  887. rablkcnt = map[i+1].br_blockcount;
  888. rablkno = map[i+1].br_startblock;
  889. while (rablkcnt--) {
  890. xfs_buf_readahead(mp->m_ddev_targp,
  891. XFS_FSB_TO_DADDR(mp, rablkno),
  892. mp->m_quotainfo->qi_dqchunklen,
  893. NULL);
  894. rablkno++;
  895. }
  896. }
  897. /*
  898. * Iterate thru all the blks in the extent and
  899. * reset the counters of all the dquots inside them.
  900. */
  901. error = xfs_qm_dqiter_bufs(mp, firstid,
  902. map[i].br_startblock,
  903. map[i].br_blockcount,
  904. flags, buffer_list);
  905. if (error)
  906. goto out;
  907. }
  908. } while (nmaps > 0);
  909. out:
  910. kmem_free(map);
  911. return error;
  912. }
  913. /*
  914. * Called by dqusage_adjust in doing a quotacheck.
  915. *
  916. * Given the inode, and a dquot id this updates both the incore dqout as well
  917. * as the buffer copy. This is so that once the quotacheck is done, we can
  918. * just log all the buffers, as opposed to logging numerous updates to
  919. * individual dquots.
  920. */
  921. STATIC int
  922. xfs_qm_quotacheck_dqadjust(
  923. struct xfs_inode *ip,
  924. xfs_dqid_t id,
  925. uint type,
  926. xfs_qcnt_t nblks,
  927. xfs_qcnt_t rtblks)
  928. {
  929. struct xfs_mount *mp = ip->i_mount;
  930. struct xfs_dquot *dqp;
  931. int error;
  932. error = xfs_qm_dqget(mp, ip, id, type,
  933. XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
  934. if (error) {
  935. /*
  936. * Shouldn't be able to turn off quotas here.
  937. */
  938. ASSERT(error != ESRCH);
  939. ASSERT(error != ENOENT);
  940. return error;
  941. }
  942. trace_xfs_dqadjust(dqp);
  943. /*
  944. * Adjust the inode count and the block count to reflect this inode's
  945. * resource usage.
  946. */
  947. be64_add_cpu(&dqp->q_core.d_icount, 1);
  948. dqp->q_res_icount++;
  949. if (nblks) {
  950. be64_add_cpu(&dqp->q_core.d_bcount, nblks);
  951. dqp->q_res_bcount += nblks;
  952. }
  953. if (rtblks) {
  954. be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
  955. dqp->q_res_rtbcount += rtblks;
  956. }
  957. /*
  958. * Set default limits, adjust timers (since we changed usages)
  959. *
  960. * There are no timers for the default values set in the root dquot.
  961. */
  962. if (dqp->q_core.d_id) {
  963. xfs_qm_adjust_dqlimits(mp, &dqp->q_core);
  964. xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
  965. }
  966. dqp->dq_flags |= XFS_DQ_DIRTY;
  967. xfs_qm_dqput(dqp);
  968. return 0;
  969. }
  970. STATIC int
  971. xfs_qm_get_rtblks(
  972. xfs_inode_t *ip,
  973. xfs_qcnt_t *O_rtblks)
  974. {
  975. xfs_filblks_t rtblks; /* total rt blks */
  976. xfs_extnum_t idx; /* extent record index */
  977. xfs_ifork_t *ifp; /* inode fork pointer */
  978. xfs_extnum_t nextents; /* number of extent entries */
  979. int error;
  980. ASSERT(XFS_IS_REALTIME_INODE(ip));
  981. ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
  982. if (!(ifp->if_flags & XFS_IFEXTENTS)) {
  983. if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
  984. return error;
  985. }
  986. rtblks = 0;
  987. nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
  988. for (idx = 0; idx < nextents; idx++)
  989. rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
  990. *O_rtblks = (xfs_qcnt_t)rtblks;
  991. return 0;
  992. }
  993. /*
  994. * callback routine supplied to bulkstat(). Given an inumber, find its
  995. * dquots and update them to account for resources taken by that inode.
  996. */
  997. /* ARGSUSED */
  998. STATIC int
  999. xfs_qm_dqusage_adjust(
  1000. xfs_mount_t *mp, /* mount point for filesystem */
  1001. xfs_ino_t ino, /* inode number to get data for */
  1002. void __user *buffer, /* not used */
  1003. int ubsize, /* not used */
  1004. int *ubused, /* not used */
  1005. int *res) /* result code value */
  1006. {
  1007. xfs_inode_t *ip;
  1008. xfs_qcnt_t nblks, rtblks = 0;
  1009. int error;
  1010. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1011. /*
  1012. * rootino must have its resources accounted for, not so with the quota
  1013. * inodes.
  1014. */
  1015. if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
  1016. *res = BULKSTAT_RV_NOTHING;
  1017. return XFS_ERROR(EINVAL);
  1018. }
  1019. /*
  1020. * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
  1021. * interface expects the inode to be exclusively locked because that's
  1022. * the case in all other instances. It's OK that we do this because
  1023. * quotacheck is done only at mount time.
  1024. */
  1025. error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
  1026. if (error) {
  1027. *res = BULKSTAT_RV_NOTHING;
  1028. return error;
  1029. }
  1030. ASSERT(ip->i_delayed_blks == 0);
  1031. if (XFS_IS_REALTIME_INODE(ip)) {
  1032. /*
  1033. * Walk thru the extent list and count the realtime blocks.
  1034. */
  1035. error = xfs_qm_get_rtblks(ip, &rtblks);
  1036. if (error)
  1037. goto error0;
  1038. }
  1039. nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
  1040. /*
  1041. * Add the (disk blocks and inode) resources occupied by this
  1042. * inode to its dquots. We do this adjustment in the incore dquot,
  1043. * and also copy the changes to its buffer.
  1044. * We don't care about putting these changes in a transaction
  1045. * envelope because if we crash in the middle of a 'quotacheck'
  1046. * we have to start from the beginning anyway.
  1047. * Once we're done, we'll log all the dquot bufs.
  1048. *
  1049. * The *QUOTA_ON checks below may look pretty racy, but quotachecks
  1050. * and quotaoffs don't race. (Quotachecks happen at mount time only).
  1051. */
  1052. if (XFS_IS_UQUOTA_ON(mp)) {
  1053. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
  1054. XFS_DQ_USER, nblks, rtblks);
  1055. if (error)
  1056. goto error0;
  1057. }
  1058. if (XFS_IS_GQUOTA_ON(mp)) {
  1059. error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
  1060. XFS_DQ_GROUP, nblks, rtblks);
  1061. if (error)
  1062. goto error0;
  1063. }
  1064. if (XFS_IS_PQUOTA_ON(mp)) {
  1065. error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
  1066. XFS_DQ_PROJ, nblks, rtblks);
  1067. if (error)
  1068. goto error0;
  1069. }
  1070. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1071. IRELE(ip);
  1072. *res = BULKSTAT_RV_DIDONE;
  1073. return 0;
  1074. error0:
  1075. xfs_iunlock(ip, XFS_ILOCK_EXCL);
  1076. IRELE(ip);
  1077. *res = BULKSTAT_RV_GIVEUP;
  1078. return error;
  1079. }
  1080. STATIC int
  1081. xfs_qm_flush_one(
  1082. struct xfs_dquot *dqp,
  1083. void *data)
  1084. {
  1085. struct list_head *buffer_list = data;
  1086. struct xfs_buf *bp = NULL;
  1087. int error = 0;
  1088. xfs_dqlock(dqp);
  1089. if (dqp->dq_flags & XFS_DQ_FREEING)
  1090. goto out_unlock;
  1091. if (!XFS_DQ_IS_DIRTY(dqp))
  1092. goto out_unlock;
  1093. xfs_dqflock(dqp);
  1094. error = xfs_qm_dqflush(dqp, &bp);
  1095. if (error)
  1096. goto out_unlock;
  1097. xfs_buf_delwri_queue(bp, buffer_list);
  1098. xfs_buf_relse(bp);
  1099. out_unlock:
  1100. xfs_dqunlock(dqp);
  1101. return error;
  1102. }
  1103. /*
  1104. * Walk thru all the filesystem inodes and construct a consistent view
  1105. * of the disk quota world. If the quotacheck fails, disable quotas.
  1106. */
  1107. int
  1108. xfs_qm_quotacheck(
  1109. xfs_mount_t *mp)
  1110. {
  1111. int done, count, error, error2;
  1112. xfs_ino_t lastino;
  1113. size_t structsz;
  1114. xfs_inode_t *uip, *gip;
  1115. uint flags;
  1116. LIST_HEAD (buffer_list);
  1117. count = INT_MAX;
  1118. structsz = 1;
  1119. lastino = 0;
  1120. flags = 0;
  1121. ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip);
  1122. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1123. xfs_notice(mp, "Quotacheck needed: Please wait.");
  1124. /*
  1125. * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
  1126. * their counters to zero. We need a clean slate.
  1127. * We don't log our changes till later.
  1128. */
  1129. uip = mp->m_quotainfo->qi_uquotaip;
  1130. if (uip) {
  1131. error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
  1132. &buffer_list);
  1133. if (error)
  1134. goto error_return;
  1135. flags |= XFS_UQUOTA_CHKD;
  1136. }
  1137. gip = mp->m_quotainfo->qi_gquotaip;
  1138. if (gip) {
  1139. error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
  1140. XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA,
  1141. &buffer_list);
  1142. if (error)
  1143. goto error_return;
  1144. flags |= XFS_OQUOTA_CHKD;
  1145. }
  1146. do {
  1147. /*
  1148. * Iterate thru all the inodes in the file system,
  1149. * adjusting the corresponding dquot counters in core.
  1150. */
  1151. error = xfs_bulkstat(mp, &lastino, &count,
  1152. xfs_qm_dqusage_adjust,
  1153. structsz, NULL, &done);
  1154. if (error)
  1155. break;
  1156. } while (!done);
  1157. /*
  1158. * We've made all the changes that we need to make incore. Flush them
  1159. * down to disk buffers if everything was updated successfully.
  1160. */
  1161. if (XFS_IS_UQUOTA_ON(mp)) {
  1162. error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
  1163. &buffer_list);
  1164. }
  1165. if (XFS_IS_GQUOTA_ON(mp)) {
  1166. error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
  1167. &buffer_list);
  1168. if (!error)
  1169. error = error2;
  1170. }
  1171. if (XFS_IS_PQUOTA_ON(mp)) {
  1172. error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
  1173. &buffer_list);
  1174. if (!error)
  1175. error = error2;
  1176. }
  1177. error2 = xfs_buf_delwri_submit(&buffer_list);
  1178. if (!error)
  1179. error = error2;
  1180. /*
  1181. * We can get this error if we couldn't do a dquot allocation inside
  1182. * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
  1183. * dirty dquots that might be cached, we just want to get rid of them
  1184. * and turn quotaoff. The dquots won't be attached to any of the inodes
  1185. * at this point (because we intentionally didn't in dqget_noattach).
  1186. */
  1187. if (error) {
  1188. xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
  1189. goto error_return;
  1190. }
  1191. /*
  1192. * If one type of quotas is off, then it will lose its
  1193. * quotachecked status, since we won't be doing accounting for
  1194. * that type anymore.
  1195. */
  1196. mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
  1197. mp->m_qflags |= flags;
  1198. error_return:
  1199. while (!list_empty(&buffer_list)) {
  1200. struct xfs_buf *bp =
  1201. list_first_entry(&buffer_list, struct xfs_buf, b_list);
  1202. list_del_init(&bp->b_list);
  1203. xfs_buf_relse(bp);
  1204. }
  1205. if (error) {
  1206. xfs_warn(mp,
  1207. "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
  1208. error);
  1209. /*
  1210. * We must turn off quotas.
  1211. */
  1212. ASSERT(mp->m_quotainfo != NULL);
  1213. xfs_qm_destroy_quotainfo(mp);
  1214. if (xfs_mount_reset_sbqflags(mp)) {
  1215. xfs_warn(mp,
  1216. "Quotacheck: Failed to reset quota flags.");
  1217. }
  1218. } else
  1219. xfs_notice(mp, "Quotacheck: Done.");
  1220. return (error);
  1221. }
  1222. /*
  1223. * This is called after the superblock has been read in and we're ready to
  1224. * iget the quota inodes.
  1225. */
  1226. STATIC int
  1227. xfs_qm_init_quotainos(
  1228. xfs_mount_t *mp)
  1229. {
  1230. xfs_inode_t *uip, *gip;
  1231. int error;
  1232. __int64_t sbflags;
  1233. uint flags;
  1234. ASSERT(mp->m_quotainfo);
  1235. uip = gip = NULL;
  1236. sbflags = 0;
  1237. flags = 0;
  1238. /*
  1239. * Get the uquota and gquota inodes
  1240. */
  1241. if (xfs_sb_version_hasquota(&mp->m_sb)) {
  1242. if (XFS_IS_UQUOTA_ON(mp) &&
  1243. mp->m_sb.sb_uquotino != NULLFSINO) {
  1244. ASSERT(mp->m_sb.sb_uquotino > 0);
  1245. if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
  1246. 0, 0, &uip)))
  1247. return XFS_ERROR(error);
  1248. }
  1249. if (XFS_IS_OQUOTA_ON(mp) &&
  1250. mp->m_sb.sb_gquotino != NULLFSINO) {
  1251. ASSERT(mp->m_sb.sb_gquotino > 0);
  1252. if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
  1253. 0, 0, &gip))) {
  1254. if (uip)
  1255. IRELE(uip);
  1256. return XFS_ERROR(error);
  1257. }
  1258. }
  1259. } else {
  1260. flags |= XFS_QMOPT_SBVERSION;
  1261. sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
  1262. XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
  1263. }
  1264. /*
  1265. * Create the two inodes, if they don't exist already. The changes
  1266. * made above will get added to a transaction and logged in one of
  1267. * the qino_alloc calls below. If the device is readonly,
  1268. * temporarily switch to read-write to do this.
  1269. */
  1270. if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
  1271. if ((error = xfs_qm_qino_alloc(mp, &uip,
  1272. sbflags | XFS_SB_UQUOTINO,
  1273. flags | XFS_QMOPT_UQUOTA)))
  1274. return XFS_ERROR(error);
  1275. flags &= ~XFS_QMOPT_SBVERSION;
  1276. }
  1277. if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
  1278. flags |= (XFS_IS_GQUOTA_ON(mp) ?
  1279. XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
  1280. error = xfs_qm_qino_alloc(mp, &gip,
  1281. sbflags | XFS_SB_GQUOTINO, flags);
  1282. if (error) {
  1283. if (uip)
  1284. IRELE(uip);
  1285. return XFS_ERROR(error);
  1286. }
  1287. }
  1288. mp->m_quotainfo->qi_uquotaip = uip;
  1289. mp->m_quotainfo->qi_gquotaip = gip;
  1290. return 0;
  1291. }
  1292. STATIC void
  1293. xfs_qm_dqfree_one(
  1294. struct xfs_dquot *dqp)
  1295. {
  1296. struct xfs_mount *mp = dqp->q_mount;
  1297. struct xfs_quotainfo *qi = mp->m_quotainfo;
  1298. mutex_lock(&qi->qi_tree_lock);
  1299. radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags),
  1300. be32_to_cpu(dqp->q_core.d_id));
  1301. qi->qi_dquots--;
  1302. mutex_unlock(&qi->qi_tree_lock);
  1303. xfs_qm_dqdestroy(dqp);
  1304. }
  1305. STATIC void
  1306. xfs_qm_dqreclaim_one(
  1307. struct xfs_dquot *dqp,
  1308. struct list_head *buffer_list,
  1309. struct list_head *dispose_list)
  1310. {
  1311. struct xfs_mount *mp = dqp->q_mount;
  1312. struct xfs_quotainfo *qi = mp->m_quotainfo;
  1313. int error;
  1314. if (!xfs_dqlock_nowait(dqp))
  1315. goto out_move_tail;
  1316. /*
  1317. * This dquot has acquired a reference in the meantime remove it from
  1318. * the freelist and try again.
  1319. */
  1320. if (dqp->q_nrefs) {
  1321. xfs_dqunlock(dqp);
  1322. trace_xfs_dqreclaim_want(dqp);
  1323. XFS_STATS_INC(xs_qm_dqwants);
  1324. list_del_init(&dqp->q_lru);
  1325. qi->qi_lru_count--;
  1326. XFS_STATS_DEC(xs_qm_dquot_unused);
  1327. return;
  1328. }
  1329. /*
  1330. * Try to grab the flush lock. If this dquot is in the process of
  1331. * getting flushed to disk, we don't want to reclaim it.
  1332. */
  1333. if (!xfs_dqflock_nowait(dqp))
  1334. goto out_unlock_move_tail;
  1335. if (XFS_DQ_IS_DIRTY(dqp)) {
  1336. struct xfs_buf *bp = NULL;
  1337. trace_xfs_dqreclaim_dirty(dqp);
  1338. error = xfs_qm_dqflush(dqp, &bp);
  1339. if (error) {
  1340. xfs_warn(mp, "%s: dquot %p flush failed",
  1341. __func__, dqp);
  1342. goto out_unlock_move_tail;
  1343. }
  1344. xfs_buf_delwri_queue(bp, buffer_list);
  1345. xfs_buf_relse(bp);
  1346. /*
  1347. * Give the dquot another try on the freelist, as the
  1348. * flushing will take some time.
  1349. */
  1350. goto out_unlock_move_tail;
  1351. }
  1352. xfs_dqfunlock(dqp);
  1353. /*
  1354. * Prevent lookups now that we are past the point of no return.
  1355. */
  1356. dqp->dq_flags |= XFS_DQ_FREEING;
  1357. xfs_dqunlock(dqp);
  1358. ASSERT(dqp->q_nrefs == 0);
  1359. list_move_tail(&dqp->q_lru, dispose_list);
  1360. qi->qi_lru_count--;
  1361. XFS_STATS_DEC(xs_qm_dquot_unused);
  1362. trace_xfs_dqreclaim_done(dqp);
  1363. XFS_STATS_INC(xs_qm_dqreclaims);
  1364. return;
  1365. /*
  1366. * Move the dquot to the tail of the list so that we don't spin on it.
  1367. */
  1368. out_unlock_move_tail:
  1369. xfs_dqunlock(dqp);
  1370. out_move_tail:
  1371. list_move_tail(&dqp->q_lru, &qi->qi_lru_list);
  1372. trace_xfs_dqreclaim_busy(dqp);
  1373. XFS_STATS_INC(xs_qm_dqreclaim_misses);
  1374. }
  1375. STATIC int
  1376. xfs_qm_shake(
  1377. struct shrinker *shrink,
  1378. struct shrink_control *sc)
  1379. {
  1380. struct xfs_quotainfo *qi =
  1381. container_of(shrink, struct xfs_quotainfo, qi_shrinker);
  1382. int nr_to_scan = sc->nr_to_scan;
  1383. LIST_HEAD (buffer_list);
  1384. LIST_HEAD (dispose_list);
  1385. struct xfs_dquot *dqp;
  1386. int error;
  1387. if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
  1388. return 0;
  1389. if (!nr_to_scan)
  1390. goto out;
  1391. mutex_lock(&qi->qi_lru_lock);
  1392. while (!list_empty(&qi->qi_lru_list)) {
  1393. if (nr_to_scan-- <= 0)
  1394. break;
  1395. dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot,
  1396. q_lru);
  1397. xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list);
  1398. }
  1399. mutex_unlock(&qi->qi_lru_lock);
  1400. error = xfs_buf_delwri_submit(&buffer_list);
  1401. if (error)
  1402. xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
  1403. while (!list_empty(&dispose_list)) {
  1404. dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru);
  1405. list_del_init(&dqp->q_lru);
  1406. xfs_qm_dqfree_one(dqp);
  1407. }
  1408. out:
  1409. return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure;
  1410. }
  1411. /*
  1412. * Start a transaction and write the incore superblock changes to
  1413. * disk. flags parameter indicates which fields have changed.
  1414. */
  1415. int
  1416. xfs_qm_write_sb_changes(
  1417. xfs_mount_t *mp,
  1418. __int64_t flags)
  1419. {
  1420. xfs_trans_t *tp;
  1421. int error;
  1422. tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
  1423. if ((error = xfs_trans_reserve(tp, 0,
  1424. mp->m_sb.sb_sectsize + 128, 0,
  1425. 0,
  1426. XFS_DEFAULT_LOG_COUNT))) {
  1427. xfs_trans_cancel(tp, 0);
  1428. return error;
  1429. }
  1430. xfs_mod_sb(tp, flags);
  1431. error = xfs_trans_commit(tp, 0);
  1432. return error;
  1433. }
  1434. /* --------------- utility functions for vnodeops ---------------- */
  1435. /*
  1436. * Given an inode, a uid, gid and prid make sure that we have
  1437. * allocated relevant dquot(s) on disk, and that we won't exceed inode
  1438. * quotas by creating this file.
  1439. * This also attaches dquot(s) to the given inode after locking it,
  1440. * and returns the dquots corresponding to the uid and/or gid.
  1441. *
  1442. * in : inode (unlocked)
  1443. * out : udquot, gdquot with references taken and unlocked
  1444. */
  1445. int
  1446. xfs_qm_vop_dqalloc(
  1447. struct xfs_inode *ip,
  1448. uid_t uid,
  1449. gid_t gid,
  1450. prid_t prid,
  1451. uint flags,
  1452. struct xfs_dquot **O_udqpp,
  1453. struct xfs_dquot **O_gdqpp)
  1454. {
  1455. struct xfs_mount *mp = ip->i_mount;
  1456. struct xfs_dquot *uq, *gq;
  1457. int error;
  1458. uint lockflags;
  1459. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1460. return 0;
  1461. lockflags = XFS_ILOCK_EXCL;
  1462. xfs_ilock(ip, lockflags);
  1463. if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
  1464. gid = ip->i_d.di_gid;
  1465. /*
  1466. * Attach the dquot(s) to this inode, doing a dquot allocation
  1467. * if necessary. The dquot(s) will not be locked.
  1468. */
  1469. if (XFS_NOT_DQATTACHED(mp, ip)) {
  1470. error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
  1471. if (error) {
  1472. xfs_iunlock(ip, lockflags);
  1473. return error;
  1474. }
  1475. }
  1476. uq = gq = NULL;
  1477. if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
  1478. if (ip->i_d.di_uid != uid) {
  1479. /*
  1480. * What we need is the dquot that has this uid, and
  1481. * if we send the inode to dqget, the uid of the inode
  1482. * takes priority over what's sent in the uid argument.
  1483. * We must unlock inode here before calling dqget if
  1484. * we're not sending the inode, because otherwise
  1485. * we'll deadlock by doing trans_reserve while
  1486. * holding ilock.
  1487. */
  1488. xfs_iunlock(ip, lockflags);
  1489. if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid,
  1490. XFS_DQ_USER,
  1491. XFS_QMOPT_DQALLOC |
  1492. XFS_QMOPT_DOWARN,
  1493. &uq))) {
  1494. ASSERT(error != ENOENT);
  1495. return error;
  1496. }
  1497. /*
  1498. * Get the ilock in the right order.
  1499. */
  1500. xfs_dqunlock(uq);
  1501. lockflags = XFS_ILOCK_SHARED;
  1502. xfs_ilock(ip, lockflags);
  1503. } else {
  1504. /*
  1505. * Take an extra reference, because we'll return
  1506. * this to caller
  1507. */
  1508. ASSERT(ip->i_udquot);
  1509. uq = xfs_qm_dqhold(ip->i_udquot);
  1510. }
  1511. }
  1512. if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
  1513. if (ip->i_d.di_gid != gid) {
  1514. xfs_iunlock(ip, lockflags);
  1515. if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid,
  1516. XFS_DQ_GROUP,
  1517. XFS_QMOPT_DQALLOC |
  1518. XFS_QMOPT_DOWARN,
  1519. &gq))) {
  1520. if (uq)
  1521. xfs_qm_dqrele(uq);
  1522. ASSERT(error != ENOENT);
  1523. return error;
  1524. }
  1525. xfs_dqunlock(gq);
  1526. lockflags = XFS_ILOCK_SHARED;
  1527. xfs_ilock(ip, lockflags);
  1528. } else {
  1529. ASSERT(ip->i_gdquot);
  1530. gq = xfs_qm_dqhold(ip->i_gdquot);
  1531. }
  1532. } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
  1533. if (xfs_get_projid(ip) != prid) {
  1534. xfs_iunlock(ip, lockflags);
  1535. if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
  1536. XFS_DQ_PROJ,
  1537. XFS_QMOPT_DQALLOC |
  1538. XFS_QMOPT_DOWARN,
  1539. &gq))) {
  1540. if (uq)
  1541. xfs_qm_dqrele(uq);
  1542. ASSERT(error != ENOENT);
  1543. return (error);
  1544. }
  1545. xfs_dqunlock(gq);
  1546. lockflags = XFS_ILOCK_SHARED;
  1547. xfs_ilock(ip, lockflags);
  1548. } else {
  1549. ASSERT(ip->i_gdquot);
  1550. gq = xfs_qm_dqhold(ip->i_gdquot);
  1551. }
  1552. }
  1553. if (uq)
  1554. trace_xfs_dquot_dqalloc(ip);
  1555. xfs_iunlock(ip, lockflags);
  1556. if (O_udqpp)
  1557. *O_udqpp = uq;
  1558. else if (uq)
  1559. xfs_qm_dqrele(uq);
  1560. if (O_gdqpp)
  1561. *O_gdqpp = gq;
  1562. else if (gq)
  1563. xfs_qm_dqrele(gq);
  1564. return 0;
  1565. }
  1566. /*
  1567. * Actually transfer ownership, and do dquot modifications.
  1568. * These were already reserved.
  1569. */
  1570. xfs_dquot_t *
  1571. xfs_qm_vop_chown(
  1572. xfs_trans_t *tp,
  1573. xfs_inode_t *ip,
  1574. xfs_dquot_t **IO_olddq,
  1575. xfs_dquot_t *newdq)
  1576. {
  1577. xfs_dquot_t *prevdq;
  1578. uint bfield = XFS_IS_REALTIME_INODE(ip) ?
  1579. XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
  1580. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1581. ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
  1582. /* old dquot */
  1583. prevdq = *IO_olddq;
  1584. ASSERT(prevdq);
  1585. ASSERT(prevdq != newdq);
  1586. xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
  1587. xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
  1588. /* the sparkling new dquot */
  1589. xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
  1590. xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
  1591. /*
  1592. * Take an extra reference, because the inode is going to keep
  1593. * this dquot pointer even after the trans_commit.
  1594. */
  1595. *IO_olddq = xfs_qm_dqhold(newdq);
  1596. return prevdq;
  1597. }
  1598. /*
  1599. * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
  1600. */
  1601. int
  1602. xfs_qm_vop_chown_reserve(
  1603. xfs_trans_t *tp,
  1604. xfs_inode_t *ip,
  1605. xfs_dquot_t *udqp,
  1606. xfs_dquot_t *gdqp,
  1607. uint flags)
  1608. {
  1609. xfs_mount_t *mp = ip->i_mount;
  1610. uint delblks, blkflags, prjflags = 0;
  1611. xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq;
  1612. int error;
  1613. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
  1614. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1615. delblks = ip->i_delayed_blks;
  1616. delblksudq = delblksgdq = unresudq = unresgdq = NULL;
  1617. blkflags = XFS_IS_REALTIME_INODE(ip) ?
  1618. XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
  1619. if (XFS_IS_UQUOTA_ON(mp) && udqp &&
  1620. ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) {
  1621. delblksudq = udqp;
  1622. /*
  1623. * If there are delayed allocation blocks, then we have to
  1624. * unreserve those from the old dquot, and add them to the
  1625. * new dquot.
  1626. */
  1627. if (delblks) {
  1628. ASSERT(ip->i_udquot);
  1629. unresudq = ip->i_udquot;
  1630. }
  1631. }
  1632. if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
  1633. if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
  1634. xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id))
  1635. prjflags = XFS_QMOPT_ENOSPC;
  1636. if (prjflags ||
  1637. (XFS_IS_GQUOTA_ON(ip->i_mount) &&
  1638. ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
  1639. delblksgdq = gdqp;
  1640. if (delblks) {
  1641. ASSERT(ip->i_gdquot);
  1642. unresgdq = ip->i_gdquot;
  1643. }
  1644. }
  1645. }
  1646. if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
  1647. delblksudq, delblksgdq, ip->i_d.di_nblocks, 1,
  1648. flags | blkflags | prjflags)))
  1649. return (error);
  1650. /*
  1651. * Do the delayed blks reservations/unreservations now. Since, these
  1652. * are done without the help of a transaction, if a reservation fails
  1653. * its previous reservations won't be automatically undone by trans
  1654. * code. So, we have to do it manually here.
  1655. */
  1656. if (delblks) {
  1657. /*
  1658. * Do the reservations first. Unreservation can't fail.
  1659. */
  1660. ASSERT(delblksudq || delblksgdq);
  1661. ASSERT(unresudq || unresgdq);
  1662. if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
  1663. delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0,
  1664. flags | blkflags | prjflags)))
  1665. return (error);
  1666. xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
  1667. unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0,
  1668. blkflags);
  1669. }
  1670. return (0);
  1671. }
  1672. int
  1673. xfs_qm_vop_rename_dqattach(
  1674. struct xfs_inode **i_tab)
  1675. {
  1676. struct xfs_mount *mp = i_tab[0]->i_mount;
  1677. int i;
  1678. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1679. return 0;
  1680. for (i = 0; (i < 4 && i_tab[i]); i++) {
  1681. struct xfs_inode *ip = i_tab[i];
  1682. int error;
  1683. /*
  1684. * Watch out for duplicate entries in the table.
  1685. */
  1686. if (i == 0 || ip != i_tab[i-1]) {
  1687. if (XFS_NOT_DQATTACHED(mp, ip)) {
  1688. error = xfs_qm_dqattach(ip, 0);
  1689. if (error)
  1690. return error;
  1691. }
  1692. }
  1693. }
  1694. return 0;
  1695. }
  1696. void
  1697. xfs_qm_vop_create_dqattach(
  1698. struct xfs_trans *tp,
  1699. struct xfs_inode *ip,
  1700. struct xfs_dquot *udqp,
  1701. struct xfs_dquot *gdqp)
  1702. {
  1703. struct xfs_mount *mp = tp->t_mountp;
  1704. if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
  1705. return;
  1706. ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
  1707. ASSERT(XFS_IS_QUOTA_RUNNING(mp));
  1708. if (udqp) {
  1709. ASSERT(ip->i_udquot == NULL);
  1710. ASSERT(XFS_IS_UQUOTA_ON(mp));
  1711. ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
  1712. ip->i_udquot = xfs_qm_dqhold(udqp);
  1713. xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
  1714. }
  1715. if (gdqp) {
  1716. ASSERT(ip->i_gdquot == NULL);
  1717. ASSERT(XFS_IS_OQUOTA_ON(mp));
  1718. ASSERT((XFS_IS_GQUOTA_ON(mp) ?
  1719. ip->i_d.di_gid : xfs_get_projid(ip)) ==
  1720. be32_to_cpu(gdqp->q_core.d_id));
  1721. ip->i_gdquot = xfs_qm_dqhold(gdqp);
  1722. xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
  1723. }
  1724. }