/fs/xfs/xfs_qm.c
C | 1958 lines | 1280 code | 234 blank | 444 comment | 254 complexity | 5e50c5fc468747abf9785fb7af6c65b8 MD5 | raw file
1/* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18#include "xfs.h" 19#include "xfs_fs.h" 20#include "xfs_bit.h" 21#include "xfs_log.h" 22#include "xfs_trans.h" 23#include "xfs_sb.h" 24#include "xfs_ag.h" 25#include "xfs_alloc.h" 26#include "xfs_quota.h" 27#include "xfs_mount.h" 28#include "xfs_bmap_btree.h" 29#include "xfs_ialloc_btree.h" 30#include "xfs_dinode.h" 31#include "xfs_inode.h" 32#include "xfs_ialloc.h" 33#include "xfs_itable.h" 34#include "xfs_rtalloc.h" 35#include "xfs_error.h" 36#include "xfs_bmap.h" 37#include "xfs_attr.h" 38#include "xfs_buf_item.h" 39#include "xfs_trans_space.h" 40#include "xfs_utils.h" 41#include "xfs_qm.h" 42#include "xfs_trace.h" 43#include "xfs_icache.h" 44 45/* 46 * The global quota manager. There is only one of these for the entire 47 * system, _not_ one per file system. XQM keeps track of the overall 48 * quota functionality, including maintaining the freelist and hash 49 * tables of dquots. 50 */ 51STATIC int xfs_qm_init_quotainos(xfs_mount_t *); 52STATIC int xfs_qm_init_quotainfo(xfs_mount_t *); 53STATIC int xfs_qm_shake(struct shrinker *, struct shrink_control *); 54 55/* 56 * We use the batch lookup interface to iterate over the dquots as it 57 * currently is the only interface into the radix tree code that allows 58 * fuzzy lookups instead of exact matches. Holding the lock over multiple 59 * operations is fine as all callers are used either during mount/umount 60 * or quotaoff. 61 */ 62#define XFS_DQ_LOOKUP_BATCH 32 63 64STATIC int 65xfs_qm_dquot_walk( 66 struct xfs_mount *mp, 67 int type, 68 int (*execute)(struct xfs_dquot *dqp, void *data), 69 void *data) 70{ 71 struct xfs_quotainfo *qi = mp->m_quotainfo; 72 struct radix_tree_root *tree = XFS_DQUOT_TREE(qi, type); 73 uint32_t next_index; 74 int last_error = 0; 75 int skipped; 76 int nr_found; 77 78restart: 79 skipped = 0; 80 next_index = 0; 81 nr_found = 0; 82 83 while (1) { 84 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH]; 85 int error = 0; 86 int i; 87 88 mutex_lock(&qi->qi_tree_lock); 89 nr_found = radix_tree_gang_lookup(tree, (void **)batch, 90 next_index, XFS_DQ_LOOKUP_BATCH); 91 if (!nr_found) { 92 mutex_unlock(&qi->qi_tree_lock); 93 break; 94 } 95 96 for (i = 0; i < nr_found; i++) { 97 struct xfs_dquot *dqp = batch[i]; 98 99 next_index = be32_to_cpu(dqp->q_core.d_id) + 1; 100 101 error = execute(batch[i], data); 102 if (error == EAGAIN) { 103 skipped++; 104 continue; 105 } 106 if (error && last_error != EFSCORRUPTED) 107 last_error = error; 108 } 109 110 mutex_unlock(&qi->qi_tree_lock); 111 112 /* bail out if the filesystem is corrupted. */ 113 if (last_error == EFSCORRUPTED) { 114 skipped = 0; 115 break; 116 } 117 } 118 119 if (skipped) { 120 delay(1); 121 goto restart; 122 } 123 124 return last_error; 125} 126 127 128/* 129 * Purge a dquot from all tracking data structures and free it. 130 */ 131STATIC int 132xfs_qm_dqpurge( 133 struct xfs_dquot *dqp, 134 void *data) 135{ 136 struct xfs_mount *mp = dqp->q_mount; 137 struct xfs_quotainfo *qi = mp->m_quotainfo; 138 139 xfs_dqlock(dqp); 140 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) { 141 xfs_dqunlock(dqp); 142 return EAGAIN; 143 } 144 145 dqp->dq_flags |= XFS_DQ_FREEING; 146 147 xfs_dqflock(dqp); 148 149 /* 150 * If we are turning this type of quotas off, we don't care 151 * about the dirty metadata sitting in this dquot. OTOH, if 152 * we're unmounting, we do care, so we flush it and wait. 153 */ 154 if (XFS_DQ_IS_DIRTY(dqp)) { 155 struct xfs_buf *bp = NULL; 156 int error; 157 158 /* 159 * We don't care about getting disk errors here. We need 160 * to purge this dquot anyway, so we go ahead regardless. 161 */ 162 error = xfs_qm_dqflush(dqp, &bp); 163 if (error) { 164 xfs_warn(mp, "%s: dquot %p flush failed", 165 __func__, dqp); 166 } else { 167 error = xfs_bwrite(bp); 168 xfs_buf_relse(bp); 169 } 170 xfs_dqflock(dqp); 171 } 172 173 ASSERT(atomic_read(&dqp->q_pincount) == 0); 174 ASSERT(XFS_FORCED_SHUTDOWN(mp) || 175 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL)); 176 177 xfs_dqfunlock(dqp); 178 xfs_dqunlock(dqp); 179 180 radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), 181 be32_to_cpu(dqp->q_core.d_id)); 182 qi->qi_dquots--; 183 184 /* 185 * We move dquots to the freelist as soon as their reference count 186 * hits zero, so it really should be on the freelist here. 187 */ 188 mutex_lock(&qi->qi_lru_lock); 189 ASSERT(!list_empty(&dqp->q_lru)); 190 list_del_init(&dqp->q_lru); 191 qi->qi_lru_count--; 192 XFS_STATS_DEC(xs_qm_dquot_unused); 193 mutex_unlock(&qi->qi_lru_lock); 194 195 xfs_qm_dqdestroy(dqp); 196 return 0; 197} 198 199/* 200 * Release the group or project dquot pointers the user dquots maybe carrying 201 * around as a hint, and proceed to purge the user dquot cache if requested. 202*/ 203STATIC int 204xfs_qm_dqpurge_hints( 205 struct xfs_dquot *dqp, 206 void *data) 207{ 208 struct xfs_dquot *gdqp = NULL; 209 uint flags = *((uint *)data); 210 211 xfs_dqlock(dqp); 212 if (dqp->dq_flags & XFS_DQ_FREEING) { 213 xfs_dqunlock(dqp); 214 return EAGAIN; 215 } 216 217 /* If this quota has a hint attached, prepare for releasing it now */ 218 gdqp = dqp->q_gdquot; 219 if (gdqp) 220 dqp->q_gdquot = NULL; 221 222 xfs_dqunlock(dqp); 223 224 if (gdqp) 225 xfs_qm_dqrele(gdqp); 226 227 if (flags & XFS_QMOPT_UQUOTA) 228 return xfs_qm_dqpurge(dqp, NULL); 229 230 return 0; 231} 232 233/* 234 * Purge the dquot cache. 235 */ 236void 237xfs_qm_dqpurge_all( 238 struct xfs_mount *mp, 239 uint flags) 240{ 241 /* 242 * We have to release group/project dquot hint(s) from the user dquot 243 * at first if they are there, otherwise we would run into an infinite 244 * loop while walking through radix tree to purge other type of dquots 245 * since their refcount is not zero if the user dquot refers to them 246 * as hint. 247 * 248 * Call the special xfs_qm_dqpurge_hints() will end up go through the 249 * general xfs_qm_dqpurge() against user dquot cache if requested. 250 */ 251 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags); 252 253 if (flags & XFS_QMOPT_GQUOTA) 254 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL); 255 if (flags & XFS_QMOPT_PQUOTA) 256 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL); 257} 258 259/* 260 * Just destroy the quotainfo structure. 261 */ 262void 263xfs_qm_unmount( 264 struct xfs_mount *mp) 265{ 266 if (mp->m_quotainfo) { 267 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 268 xfs_qm_destroy_quotainfo(mp); 269 } 270} 271 272 273/* 274 * This is called from xfs_mountfs to start quotas and initialize all 275 * necessary data structures like quotainfo. This is also responsible for 276 * running a quotacheck as necessary. We are guaranteed that the superblock 277 * is consistently read in at this point. 278 * 279 * If we fail here, the mount will continue with quota turned off. We don't 280 * need to inidicate success or failure at all. 281 */ 282void 283xfs_qm_mount_quotas( 284 xfs_mount_t *mp) 285{ 286 int error = 0; 287 uint sbf; 288 289 /* 290 * If quotas on realtime volumes is not supported, we disable 291 * quotas immediately. 292 */ 293 if (mp->m_sb.sb_rextents) { 294 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem"); 295 mp->m_qflags = 0; 296 goto write_changes; 297 } 298 299 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 300 301 /* 302 * Allocate the quotainfo structure inside the mount struct, and 303 * create quotainode(s), and change/rev superblock if necessary. 304 */ 305 error = xfs_qm_init_quotainfo(mp); 306 if (error) { 307 /* 308 * We must turn off quotas. 309 */ 310 ASSERT(mp->m_quotainfo == NULL); 311 mp->m_qflags = 0; 312 goto write_changes; 313 } 314 /* 315 * If any of the quotas are not consistent, do a quotacheck. 316 */ 317 if (XFS_QM_NEED_QUOTACHECK(mp)) { 318 error = xfs_qm_quotacheck(mp); 319 if (error) { 320 /* Quotacheck failed and disabled quotas. */ 321 return; 322 } 323 } 324 /* 325 * If one type of quotas is off, then it will lose its 326 * quotachecked status, since we won't be doing accounting for 327 * that type anymore. 328 */ 329 if (!XFS_IS_UQUOTA_ON(mp)) 330 mp->m_qflags &= ~XFS_UQUOTA_CHKD; 331 if (!(XFS_IS_GQUOTA_ON(mp) || XFS_IS_PQUOTA_ON(mp))) 332 mp->m_qflags &= ~XFS_OQUOTA_CHKD; 333 334 write_changes: 335 /* 336 * We actually don't have to acquire the m_sb_lock at all. 337 * This can only be called from mount, and that's single threaded. XXX 338 */ 339 spin_lock(&mp->m_sb_lock); 340 sbf = mp->m_sb.sb_qflags; 341 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL; 342 spin_unlock(&mp->m_sb_lock); 343 344 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) { 345 if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) { 346 /* 347 * We could only have been turning quotas off. 348 * We aren't in very good shape actually because 349 * the incore structures are convinced that quotas are 350 * off, but the on disk superblock doesn't know that ! 351 */ 352 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp))); 353 xfs_alert(mp, "%s: Superblock update failed!", 354 __func__); 355 } 356 } 357 358 if (error) { 359 xfs_warn(mp, "Failed to initialize disk quotas."); 360 return; 361 } 362} 363 364/* 365 * Called from the vfsops layer. 366 */ 367void 368xfs_qm_unmount_quotas( 369 xfs_mount_t *mp) 370{ 371 /* 372 * Release the dquots that root inode, et al might be holding, 373 * before we flush quotas and blow away the quotainfo structure. 374 */ 375 ASSERT(mp->m_rootip); 376 xfs_qm_dqdetach(mp->m_rootip); 377 if (mp->m_rbmip) 378 xfs_qm_dqdetach(mp->m_rbmip); 379 if (mp->m_rsumip) 380 xfs_qm_dqdetach(mp->m_rsumip); 381 382 /* 383 * Release the quota inodes. 384 */ 385 if (mp->m_quotainfo) { 386 if (mp->m_quotainfo->qi_uquotaip) { 387 IRELE(mp->m_quotainfo->qi_uquotaip); 388 mp->m_quotainfo->qi_uquotaip = NULL; 389 } 390 if (mp->m_quotainfo->qi_gquotaip) { 391 IRELE(mp->m_quotainfo->qi_gquotaip); 392 mp->m_quotainfo->qi_gquotaip = NULL; 393 } 394 } 395} 396 397STATIC int 398xfs_qm_dqattach_one( 399 xfs_inode_t *ip, 400 xfs_dqid_t id, 401 uint type, 402 uint doalloc, 403 xfs_dquot_t *udqhint, /* hint */ 404 xfs_dquot_t **IO_idqpp) 405{ 406 xfs_dquot_t *dqp; 407 int error; 408 409 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 410 error = 0; 411 412 /* 413 * See if we already have it in the inode itself. IO_idqpp is 414 * &i_udquot or &i_gdquot. This made the code look weird, but 415 * made the logic a lot simpler. 416 */ 417 dqp = *IO_idqpp; 418 if (dqp) { 419 trace_xfs_dqattach_found(dqp); 420 return 0; 421 } 422 423 /* 424 * udqhint is the i_udquot field in inode, and is non-NULL only 425 * when the type arg is group/project. Its purpose is to save a 426 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside 427 * the user dquot. 428 */ 429 if (udqhint) { 430 ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ); 431 xfs_dqlock(udqhint); 432 433 /* 434 * No need to take dqlock to look at the id. 435 * 436 * The ID can't change until it gets reclaimed, and it won't 437 * be reclaimed as long as we have a ref from inode and we 438 * hold the ilock. 439 */ 440 dqp = udqhint->q_gdquot; 441 if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) { 442 ASSERT(*IO_idqpp == NULL); 443 444 *IO_idqpp = xfs_qm_dqhold(dqp); 445 xfs_dqunlock(udqhint); 446 return 0; 447 } 448 449 /* 450 * We can't hold a dquot lock when we call the dqget code. 451 * We'll deadlock in no time, because of (not conforming to) 452 * lock ordering - the inodelock comes before any dquot lock, 453 * and we may drop and reacquire the ilock in xfs_qm_dqget(). 454 */ 455 xfs_dqunlock(udqhint); 456 } 457 458 /* 459 * Find the dquot from somewhere. This bumps the 460 * reference count of dquot and returns it locked. 461 * This can return ENOENT if dquot didn't exist on 462 * disk and we didn't ask it to allocate; 463 * ESRCH if quotas got turned off suddenly. 464 */ 465 error = xfs_qm_dqget(ip->i_mount, ip, id, type, 466 doalloc | XFS_QMOPT_DOWARN, &dqp); 467 if (error) 468 return error; 469 470 trace_xfs_dqattach_get(dqp); 471 472 /* 473 * dqget may have dropped and re-acquired the ilock, but it guarantees 474 * that the dquot returned is the one that should go in the inode. 475 */ 476 *IO_idqpp = dqp; 477 xfs_dqunlock(dqp); 478 return 0; 479} 480 481 482/* 483 * Given a udquot and gdquot, attach a ptr to the group dquot in the 484 * udquot as a hint for future lookups. 485 */ 486STATIC void 487xfs_qm_dqattach_grouphint( 488 xfs_dquot_t *udq, 489 xfs_dquot_t *gdq) 490{ 491 xfs_dquot_t *tmp; 492 493 xfs_dqlock(udq); 494 495 tmp = udq->q_gdquot; 496 if (tmp) { 497 if (tmp == gdq) 498 goto done; 499 500 udq->q_gdquot = NULL; 501 xfs_qm_dqrele(tmp); 502 } 503 504 udq->q_gdquot = xfs_qm_dqhold(gdq); 505done: 506 xfs_dqunlock(udq); 507} 508 509static bool 510xfs_qm_need_dqattach( 511 struct xfs_inode *ip) 512{ 513 struct xfs_mount *mp = ip->i_mount; 514 515 if (!XFS_IS_QUOTA_RUNNING(mp)) 516 return false; 517 if (!XFS_IS_QUOTA_ON(mp)) 518 return false; 519 if (!XFS_NOT_DQATTACHED(mp, ip)) 520 return false; 521 if (ip->i_ino == mp->m_sb.sb_uquotino || 522 ip->i_ino == mp->m_sb.sb_gquotino) 523 return false; 524 return true; 525} 526 527/* 528 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON 529 * into account. 530 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed. 531 * Inode may get unlocked and relocked in here, and the caller must deal with 532 * the consequences. 533 */ 534int 535xfs_qm_dqattach_locked( 536 xfs_inode_t *ip, 537 uint flags) 538{ 539 xfs_mount_t *mp = ip->i_mount; 540 uint nquotas = 0; 541 int error = 0; 542 543 if (!xfs_qm_need_dqattach(ip)) 544 return 0; 545 546 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 547 548 if (XFS_IS_UQUOTA_ON(mp)) { 549 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER, 550 flags & XFS_QMOPT_DQALLOC, 551 NULL, &ip->i_udquot); 552 if (error) 553 goto done; 554 nquotas++; 555 } 556 557 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 558 if (XFS_IS_OQUOTA_ON(mp)) { 559 error = XFS_IS_GQUOTA_ON(mp) ? 560 xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP, 561 flags & XFS_QMOPT_DQALLOC, 562 ip->i_udquot, &ip->i_gdquot) : 563 xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ, 564 flags & XFS_QMOPT_DQALLOC, 565 ip->i_udquot, &ip->i_gdquot); 566 /* 567 * Don't worry about the udquot that we may have 568 * attached above. It'll get detached, if not already. 569 */ 570 if (error) 571 goto done; 572 nquotas++; 573 } 574 575 /* 576 * Attach this group quota to the user quota as a hint. 577 * This WON'T, in general, result in a thrash. 578 */ 579 if (nquotas == 2) { 580 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 581 ASSERT(ip->i_udquot); 582 ASSERT(ip->i_gdquot); 583 584 /* 585 * We do not have i_udquot locked at this point, but this check 586 * is OK since we don't depend on the i_gdquot to be accurate 587 * 100% all the time. It is just a hint, and this will 588 * succeed in general. 589 */ 590 if (ip->i_udquot->q_gdquot != ip->i_gdquot) 591 xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot); 592 } 593 594 done: 595#ifdef DEBUG 596 if (!error) { 597 if (XFS_IS_UQUOTA_ON(mp)) 598 ASSERT(ip->i_udquot); 599 if (XFS_IS_OQUOTA_ON(mp)) 600 ASSERT(ip->i_gdquot); 601 } 602 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 603#endif 604 return error; 605} 606 607int 608xfs_qm_dqattach( 609 struct xfs_inode *ip, 610 uint flags) 611{ 612 int error; 613 614 if (!xfs_qm_need_dqattach(ip)) 615 return 0; 616 617 xfs_ilock(ip, XFS_ILOCK_EXCL); 618 error = xfs_qm_dqattach_locked(ip, flags); 619 xfs_iunlock(ip, XFS_ILOCK_EXCL); 620 621 return error; 622} 623 624/* 625 * Release dquots (and their references) if any. 626 * The inode should be locked EXCL except when this's called by 627 * xfs_ireclaim. 628 */ 629void 630xfs_qm_dqdetach( 631 xfs_inode_t *ip) 632{ 633 if (!(ip->i_udquot || ip->i_gdquot)) 634 return; 635 636 trace_xfs_dquot_dqdetach(ip); 637 638 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_uquotino); 639 ASSERT(ip->i_ino != ip->i_mount->m_sb.sb_gquotino); 640 if (ip->i_udquot) { 641 xfs_qm_dqrele(ip->i_udquot); 642 ip->i_udquot = NULL; 643 } 644 if (ip->i_gdquot) { 645 xfs_qm_dqrele(ip->i_gdquot); 646 ip->i_gdquot = NULL; 647 } 648} 649 650/* 651 * This initializes all the quota information that's kept in the 652 * mount structure 653 */ 654STATIC int 655xfs_qm_init_quotainfo( 656 xfs_mount_t *mp) 657{ 658 xfs_quotainfo_t *qinf; 659 int error; 660 xfs_dquot_t *dqp; 661 662 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 663 664 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); 665 666 /* 667 * See if quotainodes are setup, and if not, allocate them, 668 * and change the superblock accordingly. 669 */ 670 if ((error = xfs_qm_init_quotainos(mp))) { 671 kmem_free(qinf); 672 mp->m_quotainfo = NULL; 673 return error; 674 } 675 676 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS); 677 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS); 678 mutex_init(&qinf->qi_tree_lock); 679 680 INIT_LIST_HEAD(&qinf->qi_lru_list); 681 qinf->qi_lru_count = 0; 682 mutex_init(&qinf->qi_lru_lock); 683 684 /* mutex used to serialize quotaoffs */ 685 mutex_init(&qinf->qi_quotaofflock); 686 687 /* Precalc some constants */ 688 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 689 ASSERT(qinf->qi_dqchunklen); 690 qinf->qi_dqperchunk = BBTOB(qinf->qi_dqchunklen); 691 do_div(qinf->qi_dqperchunk, sizeof(xfs_dqblk_t)); 692 693 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD); 694 695 /* 696 * We try to get the limits from the superuser's limits fields. 697 * This is quite hacky, but it is standard quota practice. 698 * 699 * We look at the USR dquot with id == 0 first, but if user quotas 700 * are not enabled we goto the GRP dquot with id == 0. 701 * We don't really care to keep separate default limits for user 702 * and group quotas, at least not at this point. 703 * 704 * Since we may not have done a quotacheck by this point, just read 705 * the dquot without attaching it to any hashtables or lists. 706 */ 707 error = xfs_qm_dqread(mp, 0, 708 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER : 709 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP : 710 XFS_DQ_PROJ), 711 XFS_QMOPT_DOWARN, &dqp); 712 if (!error) { 713 xfs_disk_dquot_t *ddqp = &dqp->q_core; 714 715 /* 716 * The warnings and timers set the grace period given to 717 * a user or group before he or she can not perform any 718 * more writing. If it is zero, a default is used. 719 */ 720 qinf->qi_btimelimit = ddqp->d_btimer ? 721 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT; 722 qinf->qi_itimelimit = ddqp->d_itimer ? 723 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT; 724 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ? 725 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT; 726 qinf->qi_bwarnlimit = ddqp->d_bwarns ? 727 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT; 728 qinf->qi_iwarnlimit = ddqp->d_iwarns ? 729 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT; 730 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ? 731 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT; 732 qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit); 733 qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit); 734 qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit); 735 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit); 736 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit); 737 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit); 738 739 xfs_qm_dqdestroy(dqp); 740 } else { 741 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT; 742 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT; 743 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT; 744 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT; 745 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT; 746 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT; 747 } 748 749 qinf->qi_shrinker.shrink = xfs_qm_shake; 750 qinf->qi_shrinker.seeks = DEFAULT_SEEKS; 751 register_shrinker(&qinf->qi_shrinker); 752 return 0; 753} 754 755 756/* 757 * Gets called when unmounting a filesystem or when all quotas get 758 * turned off. 759 * This purges the quota inodes, destroys locks and frees itself. 760 */ 761void 762xfs_qm_destroy_quotainfo( 763 xfs_mount_t *mp) 764{ 765 xfs_quotainfo_t *qi; 766 767 qi = mp->m_quotainfo; 768 ASSERT(qi != NULL); 769 770 unregister_shrinker(&qi->qi_shrinker); 771 772 if (qi->qi_uquotaip) { 773 IRELE(qi->qi_uquotaip); 774 qi->qi_uquotaip = NULL; /* paranoia */ 775 } 776 if (qi->qi_gquotaip) { 777 IRELE(qi->qi_gquotaip); 778 qi->qi_gquotaip = NULL; 779 } 780 mutex_destroy(&qi->qi_quotaofflock); 781 kmem_free(qi); 782 mp->m_quotainfo = NULL; 783} 784 785/* 786 * Create an inode and return with a reference already taken, but unlocked 787 * This is how we create quota inodes 788 */ 789STATIC int 790xfs_qm_qino_alloc( 791 xfs_mount_t *mp, 792 xfs_inode_t **ip, 793 __int64_t sbfields, 794 uint flags) 795{ 796 xfs_trans_t *tp; 797 int error; 798 int committed; 799 800 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE); 801 if ((error = xfs_trans_reserve(tp, 802 XFS_QM_QINOCREATE_SPACE_RES(mp), 803 XFS_CREATE_LOG_RES(mp), 0, 804 XFS_TRANS_PERM_LOG_RES, 805 XFS_CREATE_LOG_COUNT))) { 806 xfs_trans_cancel(tp, 0); 807 return error; 808 } 809 810 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, &committed); 811 if (error) { 812 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | 813 XFS_TRANS_ABORT); 814 return error; 815 } 816 817 /* 818 * Make the changes in the superblock, and log those too. 819 * sbfields arg may contain fields other than *QUOTINO; 820 * VERSIONNUM for example. 821 */ 822 spin_lock(&mp->m_sb_lock); 823 if (flags & XFS_QMOPT_SBVERSION) { 824 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb)); 825 ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 826 XFS_SB_GQUOTINO | XFS_SB_QFLAGS)) == 827 (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 828 XFS_SB_GQUOTINO | XFS_SB_QFLAGS)); 829 830 xfs_sb_version_addquota(&mp->m_sb); 831 mp->m_sb.sb_uquotino = NULLFSINO; 832 mp->m_sb.sb_gquotino = NULLFSINO; 833 834 /* qflags will get updated _after_ quotacheck */ 835 mp->m_sb.sb_qflags = 0; 836 } 837 if (flags & XFS_QMOPT_UQUOTA) 838 mp->m_sb.sb_uquotino = (*ip)->i_ino; 839 else 840 mp->m_sb.sb_gquotino = (*ip)->i_ino; 841 spin_unlock(&mp->m_sb_lock); 842 xfs_mod_sb(tp, sbfields); 843 844 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) { 845 xfs_alert(mp, "%s failed (error %d)!", __func__, error); 846 return error; 847 } 848 return 0; 849} 850 851 852STATIC void 853xfs_qm_reset_dqcounts( 854 xfs_mount_t *mp, 855 xfs_buf_t *bp, 856 xfs_dqid_t id, 857 uint type) 858{ 859 xfs_disk_dquot_t *ddq; 860 int j; 861 862 trace_xfs_reset_dqcounts(bp, _RET_IP_); 863 864 /* 865 * Reset all counters and timers. They'll be 866 * started afresh by xfs_qm_quotacheck. 867 */ 868#ifdef DEBUG 869 j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB); 870 do_div(j, sizeof(xfs_dqblk_t)); 871 ASSERT(mp->m_quotainfo->qi_dqperchunk == j); 872#endif 873 ddq = bp->b_addr; 874 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) { 875 /* 876 * Do a sanity check, and if needed, repair the dqblk. Don't 877 * output any warnings because it's perfectly possible to 878 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck. 879 */ 880 (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, 881 "xfs_quotacheck"); 882 ddq->d_bcount = 0; 883 ddq->d_icount = 0; 884 ddq->d_rtbcount = 0; 885 ddq->d_btimer = 0; 886 ddq->d_itimer = 0; 887 ddq->d_rtbtimer = 0; 888 ddq->d_bwarns = 0; 889 ddq->d_iwarns = 0; 890 ddq->d_rtbwarns = 0; 891 ddq = (xfs_disk_dquot_t *) ((xfs_dqblk_t *)ddq + 1); 892 } 893} 894 895STATIC int 896xfs_qm_dqiter_bufs( 897 struct xfs_mount *mp, 898 xfs_dqid_t firstid, 899 xfs_fsblock_t bno, 900 xfs_filblks_t blkcnt, 901 uint flags, 902 struct list_head *buffer_list) 903{ 904 struct xfs_buf *bp; 905 int error; 906 int type; 907 908 ASSERT(blkcnt > 0); 909 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER : 910 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP); 911 error = 0; 912 913 /* 914 * Blkcnt arg can be a very big number, and might even be 915 * larger than the log itself. So, we have to break it up into 916 * manageable-sized transactions. 917 * Note that we don't start a permanent transaction here; we might 918 * not be able to get a log reservation for the whole thing up front, 919 * and we don't really care to either, because we just discard 920 * everything if we were to crash in the middle of this loop. 921 */ 922 while (blkcnt--) { 923 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, 924 XFS_FSB_TO_DADDR(mp, bno), 925 mp->m_quotainfo->qi_dqchunklen, 0, &bp, 926 &xfs_dquot_buf_ops); 927 if (error) 928 break; 929 930 xfs_qm_reset_dqcounts(mp, bp, firstid, type); 931 xfs_buf_delwri_queue(bp, buffer_list); 932 xfs_buf_relse(bp); 933 /* 934 * goto the next block. 935 */ 936 bno++; 937 firstid += mp->m_quotainfo->qi_dqperchunk; 938 } 939 940 return error; 941} 942 943/* 944 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a 945 * caller supplied function for every chunk of dquots that we find. 946 */ 947STATIC int 948xfs_qm_dqiterate( 949 struct xfs_mount *mp, 950 struct xfs_inode *qip, 951 uint flags, 952 struct list_head *buffer_list) 953{ 954 struct xfs_bmbt_irec *map; 955 int i, nmaps; /* number of map entries */ 956 int error; /* return value */ 957 xfs_fileoff_t lblkno; 958 xfs_filblks_t maxlblkcnt; 959 xfs_dqid_t firstid; 960 xfs_fsblock_t rablkno; 961 xfs_filblks_t rablkcnt; 962 963 error = 0; 964 /* 965 * This looks racy, but we can't keep an inode lock across a 966 * trans_reserve. But, this gets called during quotacheck, and that 967 * happens only at mount time which is single threaded. 968 */ 969 if (qip->i_d.di_nblocks == 0) 970 return 0; 971 972 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP); 973 974 lblkno = 0; 975 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 976 do { 977 nmaps = XFS_DQITER_MAP_SIZE; 978 /* 979 * We aren't changing the inode itself. Just changing 980 * some of its data. No new blocks are added here, and 981 * the inode is never added to the transaction. 982 */ 983 xfs_ilock(qip, XFS_ILOCK_SHARED); 984 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno, 985 map, &nmaps, 0); 986 xfs_iunlock(qip, XFS_ILOCK_SHARED); 987 if (error) 988 break; 989 990 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE); 991 for (i = 0; i < nmaps; i++) { 992 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK); 993 ASSERT(map[i].br_blockcount); 994 995 996 lblkno += map[i].br_blockcount; 997 998 if (map[i].br_startblock == HOLESTARTBLOCK) 999 continue; 1000 1001 firstid = (xfs_dqid_t) map[i].br_startoff * 1002 mp->m_quotainfo->qi_dqperchunk; 1003 /* 1004 * Do a read-ahead on the next extent. 1005 */ 1006 if ((i+1 < nmaps) && 1007 (map[i+1].br_startblock != HOLESTARTBLOCK)) { 1008 rablkcnt = map[i+1].br_blockcount; 1009 rablkno = map[i+1].br_startblock; 1010 while (rablkcnt--) { 1011 xfs_buf_readahead(mp->m_ddev_targp, 1012 XFS_FSB_TO_DADDR(mp, rablkno), 1013 mp->m_quotainfo->qi_dqchunklen, 1014 NULL); 1015 rablkno++; 1016 } 1017 } 1018 /* 1019 * Iterate thru all the blks in the extent and 1020 * reset the counters of all the dquots inside them. 1021 */ 1022 error = xfs_qm_dqiter_bufs(mp, firstid, 1023 map[i].br_startblock, 1024 map[i].br_blockcount, 1025 flags, buffer_list); 1026 if (error) 1027 goto out; 1028 } 1029 } while (nmaps > 0); 1030 1031out: 1032 kmem_free(map); 1033 return error; 1034} 1035 1036/* 1037 * Called by dqusage_adjust in doing a quotacheck. 1038 * 1039 * Given the inode, and a dquot id this updates both the incore dqout as well 1040 * as the buffer copy. This is so that once the quotacheck is done, we can 1041 * just log all the buffers, as opposed to logging numerous updates to 1042 * individual dquots. 1043 */ 1044STATIC int 1045xfs_qm_quotacheck_dqadjust( 1046 struct xfs_inode *ip, 1047 xfs_dqid_t id, 1048 uint type, 1049 xfs_qcnt_t nblks, 1050 xfs_qcnt_t rtblks) 1051{ 1052 struct xfs_mount *mp = ip->i_mount; 1053 struct xfs_dquot *dqp; 1054 int error; 1055 1056 error = xfs_qm_dqget(mp, ip, id, type, 1057 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp); 1058 if (error) { 1059 /* 1060 * Shouldn't be able to turn off quotas here. 1061 */ 1062 ASSERT(error != ESRCH); 1063 ASSERT(error != ENOENT); 1064 return error; 1065 } 1066 1067 trace_xfs_dqadjust(dqp); 1068 1069 /* 1070 * Adjust the inode count and the block count to reflect this inode's 1071 * resource usage. 1072 */ 1073 be64_add_cpu(&dqp->q_core.d_icount, 1); 1074 dqp->q_res_icount++; 1075 if (nblks) { 1076 be64_add_cpu(&dqp->q_core.d_bcount, nblks); 1077 dqp->q_res_bcount += nblks; 1078 } 1079 if (rtblks) { 1080 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks); 1081 dqp->q_res_rtbcount += rtblks; 1082 } 1083 1084 /* 1085 * Set default limits, adjust timers (since we changed usages) 1086 * 1087 * There are no timers for the default values set in the root dquot. 1088 */ 1089 if (dqp->q_core.d_id) { 1090 xfs_qm_adjust_dqlimits(mp, &dqp->q_core); 1091 xfs_qm_adjust_dqtimers(mp, &dqp->q_core); 1092 } 1093 1094 dqp->dq_flags |= XFS_DQ_DIRTY; 1095 xfs_qm_dqput(dqp); 1096 return 0; 1097} 1098 1099STATIC int 1100xfs_qm_get_rtblks( 1101 xfs_inode_t *ip, 1102 xfs_qcnt_t *O_rtblks) 1103{ 1104 xfs_filblks_t rtblks; /* total rt blks */ 1105 xfs_extnum_t idx; /* extent record index */ 1106 xfs_ifork_t *ifp; /* inode fork pointer */ 1107 xfs_extnum_t nextents; /* number of extent entries */ 1108 int error; 1109 1110 ASSERT(XFS_IS_REALTIME_INODE(ip)); 1111 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); 1112 if (!(ifp->if_flags & XFS_IFEXTENTS)) { 1113 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK))) 1114 return error; 1115 } 1116 rtblks = 0; 1117 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); 1118 for (idx = 0; idx < nextents; idx++) 1119 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx)); 1120 *O_rtblks = (xfs_qcnt_t)rtblks; 1121 return 0; 1122} 1123 1124/* 1125 * callback routine supplied to bulkstat(). Given an inumber, find its 1126 * dquots and update them to account for resources taken by that inode. 1127 */ 1128/* ARGSUSED */ 1129STATIC int 1130xfs_qm_dqusage_adjust( 1131 xfs_mount_t *mp, /* mount point for filesystem */ 1132 xfs_ino_t ino, /* inode number to get data for */ 1133 void __user *buffer, /* not used */ 1134 int ubsize, /* not used */ 1135 int *ubused, /* not used */ 1136 int *res) /* result code value */ 1137{ 1138 xfs_inode_t *ip; 1139 xfs_qcnt_t nblks, rtblks = 0; 1140 int error; 1141 1142 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1143 1144 /* 1145 * rootino must have its resources accounted for, not so with the quota 1146 * inodes. 1147 */ 1148 if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { 1149 *res = BULKSTAT_RV_NOTHING; 1150 return XFS_ERROR(EINVAL); 1151 } 1152 1153 /* 1154 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget 1155 * interface expects the inode to be exclusively locked because that's 1156 * the case in all other instances. It's OK that we do this because 1157 * quotacheck is done only at mount time. 1158 */ 1159 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip); 1160 if (error) { 1161 *res = BULKSTAT_RV_NOTHING; 1162 return error; 1163 } 1164 1165 ASSERT(ip->i_delayed_blks == 0); 1166 1167 if (XFS_IS_REALTIME_INODE(ip)) { 1168 /* 1169 * Walk thru the extent list and count the realtime blocks. 1170 */ 1171 error = xfs_qm_get_rtblks(ip, &rtblks); 1172 if (error) 1173 goto error0; 1174 } 1175 1176 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks; 1177 1178 /* 1179 * Add the (disk blocks and inode) resources occupied by this 1180 * inode to its dquots. We do this adjustment in the incore dquot, 1181 * and also copy the changes to its buffer. 1182 * We don't care about putting these changes in a transaction 1183 * envelope because if we crash in the middle of a 'quotacheck' 1184 * we have to start from the beginning anyway. 1185 * Once we're done, we'll log all the dquot bufs. 1186 * 1187 * The *QUOTA_ON checks below may look pretty racy, but quotachecks 1188 * and quotaoffs don't race. (Quotachecks happen at mount time only). 1189 */ 1190 if (XFS_IS_UQUOTA_ON(mp)) { 1191 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid, 1192 XFS_DQ_USER, nblks, rtblks); 1193 if (error) 1194 goto error0; 1195 } 1196 1197 if (XFS_IS_GQUOTA_ON(mp)) { 1198 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid, 1199 XFS_DQ_GROUP, nblks, rtblks); 1200 if (error) 1201 goto error0; 1202 } 1203 1204 if (XFS_IS_PQUOTA_ON(mp)) { 1205 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip), 1206 XFS_DQ_PROJ, nblks, rtblks); 1207 if (error) 1208 goto error0; 1209 } 1210 1211 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1212 IRELE(ip); 1213 *res = BULKSTAT_RV_DIDONE; 1214 return 0; 1215 1216error0: 1217 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1218 IRELE(ip); 1219 *res = BULKSTAT_RV_GIVEUP; 1220 return error; 1221} 1222 1223STATIC int 1224xfs_qm_flush_one( 1225 struct xfs_dquot *dqp, 1226 void *data) 1227{ 1228 struct list_head *buffer_list = data; 1229 struct xfs_buf *bp = NULL; 1230 int error = 0; 1231 1232 xfs_dqlock(dqp); 1233 if (dqp->dq_flags & XFS_DQ_FREEING) 1234 goto out_unlock; 1235 if (!XFS_DQ_IS_DIRTY(dqp)) 1236 goto out_unlock; 1237 1238 xfs_dqflock(dqp); 1239 error = xfs_qm_dqflush(dqp, &bp); 1240 if (error) 1241 goto out_unlock; 1242 1243 xfs_buf_delwri_queue(bp, buffer_list); 1244 xfs_buf_relse(bp); 1245out_unlock: 1246 xfs_dqunlock(dqp); 1247 return error; 1248} 1249 1250/* 1251 * Walk thru all the filesystem inodes and construct a consistent view 1252 * of the disk quota world. If the quotacheck fails, disable quotas. 1253 */ 1254int 1255xfs_qm_quotacheck( 1256 xfs_mount_t *mp) 1257{ 1258 int done, count, error, error2; 1259 xfs_ino_t lastino; 1260 size_t structsz; 1261 xfs_inode_t *uip, *gip; 1262 uint flags; 1263 LIST_HEAD (buffer_list); 1264 1265 count = INT_MAX; 1266 structsz = 1; 1267 lastino = 0; 1268 flags = 0; 1269 1270 ASSERT(mp->m_quotainfo->qi_uquotaip || mp->m_quotainfo->qi_gquotaip); 1271 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1272 1273 xfs_notice(mp, "Quotacheck needed: Please wait."); 1274 1275 /* 1276 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset 1277 * their counters to zero. We need a clean slate. 1278 * We don't log our changes till later. 1279 */ 1280 uip = mp->m_quotainfo->qi_uquotaip; 1281 if (uip) { 1282 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA, 1283 &buffer_list); 1284 if (error) 1285 goto error_return; 1286 flags |= XFS_UQUOTA_CHKD; 1287 } 1288 1289 gip = mp->m_quotainfo->qi_gquotaip; 1290 if (gip) { 1291 error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ? 1292 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA, 1293 &buffer_list); 1294 if (error) 1295 goto error_return; 1296 flags |= XFS_OQUOTA_CHKD; 1297 } 1298 1299 do { 1300 /* 1301 * Iterate thru all the inodes in the file system, 1302 * adjusting the corresponding dquot counters in core. 1303 */ 1304 error = xfs_bulkstat(mp, &lastino, &count, 1305 xfs_qm_dqusage_adjust, 1306 structsz, NULL, &done); 1307 if (error) 1308 break; 1309 1310 } while (!done); 1311 1312 /* 1313 * We've made all the changes that we need to make incore. Flush them 1314 * down to disk buffers if everything was updated successfully. 1315 */ 1316 if (XFS_IS_UQUOTA_ON(mp)) { 1317 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one, 1318 &buffer_list); 1319 } 1320 if (XFS_IS_GQUOTA_ON(mp)) { 1321 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one, 1322 &buffer_list); 1323 if (!error) 1324 error = error2; 1325 } 1326 if (XFS_IS_PQUOTA_ON(mp)) { 1327 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one, 1328 &buffer_list); 1329 if (!error) 1330 error = error2; 1331 } 1332 1333 error2 = xfs_buf_delwri_submit(&buffer_list); 1334 if (!error) 1335 error = error2; 1336 1337 /* 1338 * We can get this error if we couldn't do a dquot allocation inside 1339 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the 1340 * dirty dquots that might be cached, we just want to get rid of them 1341 * and turn quotaoff. The dquots won't be attached to any of the inodes 1342 * at this point (because we intentionally didn't in dqget_noattach). 1343 */ 1344 if (error) { 1345 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL); 1346 goto error_return; 1347 } 1348 1349 /* 1350 * If one type of quotas is off, then it will lose its 1351 * quotachecked status, since we won't be doing accounting for 1352 * that type anymore. 1353 */ 1354 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD; 1355 mp->m_qflags |= flags; 1356 1357 error_return: 1358 while (!list_empty(&buffer_list)) { 1359 struct xfs_buf *bp = 1360 list_first_entry(&buffer_list, struct xfs_buf, b_list); 1361 list_del_init(&bp->b_list); 1362 xfs_buf_relse(bp); 1363 } 1364 1365 if (error) { 1366 xfs_warn(mp, 1367 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.", 1368 error); 1369 /* 1370 * We must turn off quotas. 1371 */ 1372 ASSERT(mp->m_quotainfo != NULL); 1373 xfs_qm_destroy_quotainfo(mp); 1374 if (xfs_mount_reset_sbqflags(mp)) { 1375 xfs_warn(mp, 1376 "Quotacheck: Failed to reset quota flags."); 1377 } 1378 } else 1379 xfs_notice(mp, "Quotacheck: Done."); 1380 return (error); 1381} 1382 1383/* 1384 * This is called after the superblock has been read in and we're ready to 1385 * iget the quota inodes. 1386 */ 1387STATIC int 1388xfs_qm_init_quotainos( 1389 xfs_mount_t *mp) 1390{ 1391 xfs_inode_t *uip, *gip; 1392 int error; 1393 __int64_t sbflags; 1394 uint flags; 1395 1396 ASSERT(mp->m_quotainfo); 1397 uip = gip = NULL; 1398 sbflags = 0; 1399 flags = 0; 1400 1401 /* 1402 * Get the uquota and gquota inodes 1403 */ 1404 if (xfs_sb_version_hasquota(&mp->m_sb)) { 1405 if (XFS_IS_UQUOTA_ON(mp) && 1406 mp->m_sb.sb_uquotino != NULLFSINO) { 1407 ASSERT(mp->m_sb.sb_uquotino > 0); 1408 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 1409 0, 0, &uip))) 1410 return XFS_ERROR(error); 1411 } 1412 if (XFS_IS_OQUOTA_ON(mp) && 1413 mp->m_sb.sb_gquotino != NULLFSINO) { 1414 ASSERT(mp->m_sb.sb_gquotino > 0); 1415 if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 1416 0, 0, &gip))) { 1417 if (uip) 1418 IRELE(uip); 1419 return XFS_ERROR(error); 1420 } 1421 } 1422 } else { 1423 flags |= XFS_QMOPT_SBVERSION; 1424 sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | 1425 XFS_SB_GQUOTINO | XFS_SB_QFLAGS); 1426 } 1427 1428 /* 1429 * Create the two inodes, if they don't exist already. The changes 1430 * made above will get added to a transaction and logged in one of 1431 * the qino_alloc calls below. If the device is readonly, 1432 * temporarily switch to read-write to do this. 1433 */ 1434 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) { 1435 if ((error = xfs_qm_qino_alloc(mp, &uip, 1436 sbflags | XFS_SB_UQUOTINO, 1437 flags | XFS_QMOPT_UQUOTA))) 1438 return XFS_ERROR(error); 1439 1440 flags &= ~XFS_QMOPT_SBVERSION; 1441 } 1442 if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) { 1443 flags |= (XFS_IS_GQUOTA_ON(mp) ? 1444 XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA); 1445 error = xfs_qm_qino_alloc(mp, &gip, 1446 sbflags | XFS_SB_GQUOTINO, flags); 1447 if (error) { 1448 if (uip) 1449 IRELE(uip); 1450 1451 return XFS_ERROR(error); 1452 } 1453 } 1454 1455 mp->m_quotainfo->qi_uquotaip = uip; 1456 mp->m_quotainfo->qi_gquotaip = gip; 1457 1458 return 0; 1459} 1460 1461STATIC void 1462xfs_qm_dqfree_one( 1463 struct xfs_dquot *dqp) 1464{ 1465 struct xfs_mount *mp = dqp->q_mount; 1466 struct xfs_quotainfo *qi = mp->m_quotainfo; 1467 1468 mutex_lock(&qi->qi_tree_lock); 1469 radix_tree_delete(XFS_DQUOT_TREE(qi, dqp->q_core.d_flags), 1470 be32_to_cpu(dqp->q_core.d_id)); 1471 1472 qi->qi_dquots--; 1473 mutex_unlock(&qi->qi_tree_lock); 1474 1475 xfs_qm_dqdestroy(dqp); 1476} 1477 1478STATIC void 1479xfs_qm_dqreclaim_one( 1480 struct xfs_dquot *dqp, 1481 struct list_head *buffer_list, 1482 struct list_head *dispose_list) 1483{ 1484 struct xfs_mount *mp = dqp->q_mount; 1485 struct xfs_quotainfo *qi = mp->m_quotainfo; 1486 int error; 1487 1488 if (!xfs_dqlock_nowait(dqp)) 1489 goto out_move_tail; 1490 1491 /* 1492 * This dquot has acquired a reference in the meantime remove it from 1493 * the freelist and try again. 1494 */ 1495 if (dqp->q_nrefs) { 1496 xfs_dqunlock(dqp); 1497 1498 trace_xfs_dqreclaim_want(dqp); 1499 XFS_STATS_INC(xs_qm_dqwants); 1500 1501 list_del_init(&dqp->q_lru); 1502 qi->qi_lru_count--; 1503 XFS_STATS_DEC(xs_qm_dquot_unused); 1504 return; 1505 } 1506 1507 /* 1508 * Try to grab the flush lock. If this dquot is in the process of 1509 * getting flushed to disk, we don't want to reclaim it. 1510 */ 1511 if (!xfs_dqflock_nowait(dqp)) 1512 goto out_unlock_move_tail; 1513 1514 if (XFS_DQ_IS_DIRTY(dqp)) { 1515 struct xfs_buf *bp = NULL; 1516 1517 trace_xfs_dqreclaim_dirty(dqp); 1518 1519 error = xfs_qm_dqflush(dqp, &bp); 1520 if (error) { 1521 xfs_warn(mp, "%s: dquot %p flush failed", 1522 __func__, dqp); 1523 goto out_unlock_move_tail; 1524 } 1525 1526 xfs_buf_delwri_queue(bp, buffer_list); 1527 xfs_buf_relse(bp); 1528 /* 1529 * Give the dquot another try on the freelist, as the 1530 * flushing will take some time. 1531 */ 1532 goto out_unlock_move_tail; 1533 } 1534 xfs_dqfunlock(dqp); 1535 1536 /* 1537 * Prevent lookups now that we are past the point of no return. 1538 */ 1539 dqp->dq_flags |= XFS_DQ_FREEING; 1540 xfs_dqunlock(dqp); 1541 1542 ASSERT(dqp->q_nrefs == 0); 1543 list_move_tail(&dqp->q_lru, dispose_list); 1544 qi->qi_lru_count--; 1545 XFS_STATS_DEC(xs_qm_dquot_unused); 1546 1547 trace_xfs_dqreclaim_done(dqp); 1548 XFS_STATS_INC(xs_qm_dqreclaims); 1549 return; 1550 1551 /* 1552 * Move the dquot to the tail of the list so that we don't spin on it. 1553 */ 1554out_unlock_move_tail: 1555 xfs_dqunlock(dqp); 1556out_move_tail: 1557 list_move_tail(&dqp->q_lru, &qi->qi_lru_list); 1558 trace_xfs_dqreclaim_busy(dqp); 1559 XFS_STATS_INC(xs_qm_dqreclaim_misses); 1560} 1561 1562STATIC int 1563xfs_qm_shake( 1564 struct shrinker *shrink, 1565 struct shrink_control *sc) 1566{ 1567 struct xfs_quotainfo *qi = 1568 container_of(shrink, struct xfs_quotainfo, qi_shrinker); 1569 int nr_to_scan = sc->nr_to_scan; 1570 LIST_HEAD (buffer_list); 1571 LIST_HEAD (dispose_list); 1572 struct xfs_dquot *dqp; 1573 int error; 1574 1575 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT)) 1576 return 0; 1577 if (!nr_to_scan) 1578 goto out; 1579 1580 mutex_lock(&qi->qi_lru_lock); 1581 while (!list_empty(&qi->qi_lru_list)) { 1582 if (nr_to_scan-- <= 0) 1583 break; 1584 dqp = list_first_entry(&qi->qi_lru_list, struct xfs_dquot, 1585 q_lru); 1586 xfs_qm_dqreclaim_one(dqp, &buffer_list, &dispose_list); 1587 } 1588 mutex_unlock(&qi->qi_lru_lock); 1589 1590 error = xfs_buf_delwri_submit(&buffer_list); 1591 if (error) 1592 xfs_warn(NULL, "%s: dquot reclaim failed", __func__); 1593 1594 while (!list_empty(&dispose_list)) { 1595 dqp = list_first_entry(&dispose_list, struct xfs_dquot, q_lru); 1596 list_del_init(&dqp->q_lru); 1597 xfs_qm_dqfree_one(dqp); 1598 } 1599 1600out: 1601 return (qi->qi_lru_count / 100) * sysctl_vfs_cache_pressure; 1602} 1603 1604/* 1605 * Start a transaction and write the incore superblock changes to 1606 * disk. flags parameter indicates which fields have changed. 1607 */ 1608int 1609xfs_qm_write_sb_changes( 1610 xfs_mount_t *mp, 1611 __int64_t flags) 1612{ 1613 xfs_trans_t *tp; 1614 int error; 1615 1616 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); 1617 if ((error = xfs_trans_reserve(tp, 0, 1618 mp->m_sb.sb_sectsize + 128, 0, 1619 0, 1620 XFS_DEFAULT_LOG_COUNT))) { 1621 xfs_trans_cancel(tp, 0); 1622 return error; 1623 } 1624 1625 xfs_mod_sb(tp, flags); 1626 error = xfs_trans_commit(tp, 0); 1627 1628 return error; 1629} 1630 1631 1632/* --------------- utility functions for vnodeops ---------------- */ 1633 1634 1635/* 1636 * Given an inode, a uid, gid and prid make sure that we have 1637 * allocated relevant dquot(s) on disk, and that we won't exceed inode 1638 * quotas by creating this file. 1639 * This also attaches dquot(s) to the given inode after locking it, 1640 * and returns the dquots corresponding to the uid and/or gid. 1641 * 1642 * in : inode (unlocked) 1643 * out : udquot, gdquot with references taken and unlocked 1644 */ 1645int 1646xfs_qm_vop_dqalloc( 1647 struct xfs_inode *ip, 1648 uid_t uid, 1649 gid_t gid, 1650 prid_t prid, 1651 uint flags, 1652 struct xfs_dquot **O_udqpp, 1653 struct xfs_dquot **O_gdqpp) 1654{ 1655 struct xfs_mount *mp = ip->i_mount; 1656 struct xfs_dquot *uq, *gq; 1657 int error; 1658 uint lockflags; 1659 1660 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1661 return 0; 1662 1663 lockflags = XFS_ILOCK_EXCL; 1664 xfs_ilock(ip, lockflags); 1665 1666 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip)) 1667 gid = ip->i_d.di_gid; 1668 1669 /* 1670 * Attach the dquot(s) to this inode, doing a dquot allocation 1671 * if necessary. The dquot(s) will not be locked. 1672 */ 1673 if (XFS_NOT_DQATTACHED(mp, ip)) { 1674 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC); 1675 if (error) { 1676 xfs_iunlock(ip, lockflags); 1677 return error; 1678 } 1679 } 1680 1681 uq = gq = NULL; 1682 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) { 1683 if (ip->i_d.di_uid != uid) { 1684 /* 1685 * What we need is the dquot that has this uid, and 1686 * if we send the inode to dqget, the uid of the inode 1687 * takes priority over what's sent in the uid argument. 1688 * We must unlock inode here before calling dqget if 1689 * we're not sending the inode, because otherwise 1690 * we'll deadlock by doing trans_reserve while 1691 * holding ilock. 1692 */ 1693 xfs_iunlock(ip, lockflags); 1694 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t) uid, 1695 XFS_DQ_USER, 1696 XFS_QMOPT_DQALLOC | 1697 XFS_QMOPT_DOWARN, 1698 &uq))) { 1699 ASSERT(error != ENOENT); 1700 return error; 1701 } 1702 /* 1703 * Get the ilock in the right order. 1704 */ 1705 xfs_dqunlock(uq); 1706 lockflags = XFS_ILOCK_SHARED; 1707 xfs_ilock(ip, lockflags); 1708 } else { 1709 /* 1710 * Take an extra reference, because we'll return 1711 * this to caller 1712 */ 1713 ASSERT(ip->i_udquot); 1714 uq = xfs_qm_dqhold(ip->i_udquot); 1715 } 1716 } 1717 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) { 1718 if (ip->i_d.di_gid != gid) { 1719 xfs_iunlock(ip, lockflags); 1720 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)gid, 1721 XFS_DQ_GROUP, 1722 XFS_QMOPT_DQALLOC | 1723 XFS_QMOPT_DOWARN, 1724 &gq))) { 1725 if (uq) 1726 xfs_qm_dqrele(uq); 1727 ASSERT(error != ENOENT); 1728 return error; 1729 } 1730 xfs_dqunlock(gq); 1731 lockflags = XFS_ILOCK_SHARED; 1732 xfs_ilock(ip, lockflags); 1733 } else { 1734 ASSERT(ip->i_gdquot); 1735 gq = xfs_qm_dqhold(ip->i_gdquot); 1736 } 1737 } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) { 1738 if (xfs_get_projid(ip) != prid) { 1739 xfs_iunlock(ip, lockflags); 1740 if ((error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid, 1741 XFS_DQ_PROJ, 1742 XFS_QMOPT_DQALLOC | 1743 XFS_QMOPT_DOWARN, 1744 &gq))) { 1745 if (uq) 1746 xfs_qm_dqrele(uq); 1747 ASSERT(error != ENOENT); 1748 return (error); 1749 } 1750 xfs_dqunlock(gq); 1751 lockflags = XFS_ILOCK_SHARED; 1752 xfs_ilock(ip, lockflags); 1753 } else { 1754 ASSERT(ip->i_gdquot); 1755 gq = xfs_qm_dqhold(ip->i_gdquot); 1756 } 1757 } 1758 if (uq) 1759 trace_xfs_dquot_dqalloc(ip); 1760 1761 xfs_iunlock(ip, lockflags); 1762 if (O_udqpp) 1763 *O_udqpp = uq; 1764 else if (uq) 1765 xfs_qm_dqrele(uq); 1766 if (O_gdqpp) 1767 *O_gdqpp = gq; 1768 else if (gq) 1769 xfs_qm_dqrele(gq); 1770 return 0; 1771} 1772 1773/* 1774 * Actually transfer ownership, and do dquot modifications. 1775 * These were already reserved. 1776 */ 1777xfs_dquot_t * 1778xfs_qm_vop_chown( 1779 xfs_trans_t *tp, 1780 xfs_inode_t *ip, 1781 xfs_dquot_t **IO_olddq, 1782 xfs_dquot_t *newdq) 1783{ 1784 xfs_dquot_t *prevdq; 1785 uint bfield = XFS_IS_REALTIME_INODE(ip) ? 1786 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT; 1787 1788 1789 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1790 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount)); 1791 1792 /* old dquot */ 1793 prevdq = *IO_olddq; 1794 ASSERT(prevdq); 1795 ASSERT(prevdq != newdq); 1796 1797 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks)); 1798 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1); 1799 1800 /* the sparkling new dquot */ 1801 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks); 1802 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1); 1803 1804 /* 1805 * Take an extra reference, because the inode is going to keep 1806 * this dquot pointer even after the trans_commit. 1807 */ 1808 *IO_olddq = xfs_qm_dqhold(newdq); 1809 1810 return prevdq; 1811} 1812 1813/* 1814 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID). 1815 */ 1816int 1817xfs_qm_vop_chown_reserve( 1818 xfs_trans_t *tp, 1819 xfs_inode_t *ip, 1820 xfs_dquot_t *udqp, 1821 xfs_dquot_t *gdqp, 1822 uint flags) 1823{ 1824 xfs_mount_t *mp = ip->i_mount; 1825 uint delblks, blkflags, prjflags = 0; 1826 xfs_dquot_t *unresudq, *unresgdq, *delblksudq, *delblksgdq; 1827 int error; 1828 1829 1830 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); 1831 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1832 1833 delblks = ip->i_delayed_blks; 1834 delblksudq = delblksgdq = unresudq = unresgdq = NULL; 1835 blkflags = XFS_IS_REALTIME_INODE(ip) ? 1836 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS; 1837 1838 if (XFS_IS_UQUOTA_ON(mp) && udqp && 1839 ip->i_d.di_uid != (uid_t)be32_to_cpu(udqp->q_core.d_id)) { 1840 delblksudq = udqp; 1841 /* 1842 * If there are delayed allocation blocks, then we have to 1843 * unreserve those from the old dquot, and add them to the 1844 * new dquot. 1845 */ 1846 if (delblks) { 1847 ASSERT(ip->i_udquot); 1848 unresudq = ip->i_udquot; 1849 } 1850 } 1851 if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) { 1852 if (XFS_IS_PQUOTA_ON(ip->i_mount) && 1853 xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id)) 1854 prjflags = XFS_QMOPT_ENOSPC; 1855 1856 if (prjflags || 1857 (XFS_IS_GQUOTA_ON(ip->i_mount) && 1858 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) { 1859 delblksgdq = gdqp; 1860 if (delblks) { 1861 ASSERT(ip->i_gdquot); 1862 unresgdq = ip->i_gdquot; 1863 } 1864 } 1865 } 1866 1867 if ((error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount, 1868 delblksudq, delblksgdq, ip->i_d.di_nblocks, 1, 1869 flags | blkflags | prjflags))) 1870 return (error); 1871 1872 /* 1873 * Do the delayed blks reservations/unreservations now. Since, these 1874 * are done without the help of a transaction, if a reservation fails 1875 * its previous reservations won't be automatically undone by trans 1876 * code. So, we have to do it manually here. 1877 */ 1878 if (delblks) { 1879 /* 1880 * Do the reservations first. Unreservation can't fail. 1881 */ 1882 ASSERT(delblksudq || delblksgdq); 1883 ASSERT(unresudq || unresgdq); 1884 if ((error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 1885 delblksudq, delblksgdq, (xfs_qcnt_t)delblks, 0, 1886 flags | blkflags | prjflags))) 1887 return (error); 1888 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount, 1889 unresudq, unresgdq, -((xfs_qcnt_t)delblks), 0, 1890 blkflags); 1891 } 1892 1893 return (0); 1894} 1895 1896int 1897xfs_qm_vop_rename_dqattach( 1898 struct xfs_inode **i_tab) 1899{ 1900 struct xfs_mount *mp = i_tab[0]->i_mount; 1901 int i; 1902 1903 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1904 return 0; 1905 1906 for (i = 0; (i < 4 && i_tab[i]); i++) { 1907 struct xfs_inode *ip = i_tab[i]; 1908 int error; 1909 1910 /* 1911 * Watch out for duplicate entries in the table. 1912 */ 1913 if (i == 0 || ip != i_tab[i-1]) { 1914 if (XFS_NOT_DQATTACHED(mp, ip)) { 1915 error = xfs_qm_dqattach(ip, 0); 1916 if (error) 1917 return error; 1918 } 1919 } 1920 } 1921 return 0; 1922} 1923 1924void 1925xfs_qm_vop_create_dqattach( 1926 struct xfs_trans *tp, 1927 struct xfs_inode *ip, 1928 struct xfs_dquot *udqp, 1929 struct xfs_dquot *gdqp) 1930{ 1931 struct xfs_mount *mp = tp->t_mountp; 1932 1933 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) 1934 return; 1935 1936 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 1937 ASSERT(XFS_IS_QUOTA_RUNNING(mp)); 1938 1939 if (udqp) { 1940 ASSERT(ip->i_udquot == NULL); 1941 ASSERT(XFS_IS_UQUOTA_ON(mp)); 1942 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id)); 1943 1944 ip->i_udquot = xfs_qm_dqhold(udqp); 1945 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1); 1946 } 1947 if (gdqp) { 1948 ASSERT(ip->i_gdquot == NULL); 1949 ASSERT(XFS_IS_OQUOTA_ON(mp)); 1950 ASSERT((XFS_IS_GQUOTA_ON(mp) ? 1951 ip->i_d.di_gid : xfs_get_projid(ip)) == 1952 be32_to_cpu(gdqp->q_core.d_id)); 1953 1954 ip->i_gdquot = xfs_qm_dqhold(gdqp); 1955 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1); 1956 } 1957} 1958