/fs/xfs/xfs_file.c
C | 1118 lines | 718 code | 146 blank | 254 comment | 121 complexity | 0c045492e141b345ae5b69e892611918 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
1/* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18#include "xfs.h" 19#include "xfs_fs.h" 20#include "xfs_bit.h" 21#include "xfs_log.h" 22#include "xfs_inum.h" 23#include "xfs_sb.h" 24#include "xfs_ag.h" 25#include "xfs_trans.h" 26#include "xfs_mount.h" 27#include "xfs_bmap_btree.h" 28#include "xfs_alloc.h" 29#include "xfs_dinode.h" 30#include "xfs_inode.h" 31#include "xfs_inode_item.h" 32#include "xfs_bmap.h" 33#include "xfs_error.h" 34#include "xfs_vnodeops.h" 35#include "xfs_da_btree.h" 36#include "xfs_ioctl.h" 37#include "xfs_trace.h" 38 39#include <linux/dcache.h> 40#include <linux/falloc.h> 41 42static const struct vm_operations_struct xfs_file_vm_ops; 43 44/* 45 * Locking primitives for read and write IO paths to ensure we consistently use 46 * and order the inode->i_mutex, ip->i_lock and ip->i_iolock. 47 */ 48static inline void 49xfs_rw_ilock( 50 struct xfs_inode *ip, 51 int type) 52{ 53 if (type & XFS_IOLOCK_EXCL) 54 mutex_lock(&VFS_I(ip)->i_mutex); 55 xfs_ilock(ip, type); 56} 57 58static inline void 59xfs_rw_iunlock( 60 struct xfs_inode *ip, 61 int type) 62{ 63 xfs_iunlock(ip, type); 64 if (type & XFS_IOLOCK_EXCL) 65 mutex_unlock(&VFS_I(ip)->i_mutex); 66} 67 68static inline void 69xfs_rw_ilock_demote( 70 struct xfs_inode *ip, 71 int type) 72{ 73 xfs_ilock_demote(ip, type); 74 if (type & XFS_IOLOCK_EXCL) 75 mutex_unlock(&VFS_I(ip)->i_mutex); 76} 77 78/* 79 * xfs_iozero 80 * 81 * xfs_iozero clears the specified range of buffer supplied, 82 * and marks all the affected blocks as valid and modified. If 83 * an affected block is not allocated, it will be allocated. If 84 * an affected block is not completely overwritten, and is not 85 * valid before the operation, it will be read from disk before 86 * being partially zeroed. 87 */ 88STATIC int 89xfs_iozero( 90 struct xfs_inode *ip, /* inode */ 91 loff_t pos, /* offset in file */ 92 size_t count) /* size of data to zero */ 93{ 94 struct page *page; 95 struct address_space *mapping; 96 int status; 97 98 mapping = VFS_I(ip)->i_mapping; 99 do { 100 unsigned offset, bytes; 101 void *fsdata; 102 103 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ 104 bytes = PAGE_CACHE_SIZE - offset; 105 if (bytes > count) 106 bytes = count; 107 108 status = pagecache_write_begin(NULL, mapping, pos, bytes, 109 AOP_FLAG_UNINTERRUPTIBLE, 110 &page, &fsdata); 111 if (status) 112 break; 113 114 zero_user(page, offset, bytes); 115 116 status = pagecache_write_end(NULL, mapping, pos, bytes, bytes, 117 page, fsdata); 118 WARN_ON(status <= 0); /* can't return less than zero! */ 119 pos += bytes; 120 count -= bytes; 121 status = 0; 122 } while (count); 123 124 return (-status); 125} 126 127STATIC int 128xfs_file_fsync( 129 struct file *file, 130 loff_t start, 131 loff_t end, 132 int datasync) 133{ 134 struct inode *inode = file->f_mapping->host; 135 struct xfs_inode *ip = XFS_I(inode); 136 struct xfs_mount *mp = ip->i_mount; 137 struct xfs_trans *tp; 138 int error = 0; 139 int log_flushed = 0; 140 141 trace_xfs_file_fsync(ip); 142 143 error = filemap_write_and_wait_range(inode->i_mapping, start, end); 144 if (error) 145 return error; 146 147 if (XFS_FORCED_SHUTDOWN(mp)) 148 return -XFS_ERROR(EIO); 149 150 xfs_iflags_clear(ip, XFS_ITRUNCATED); 151 152 xfs_ilock(ip, XFS_IOLOCK_SHARED); 153 xfs_ioend_wait(ip); 154 xfs_iunlock(ip, XFS_IOLOCK_SHARED); 155 156 if (mp->m_flags & XFS_MOUNT_BARRIER) { 157 /* 158 * If we have an RT and/or log subvolume we need to make sure 159 * to flush the write cache the device used for file data 160 * first. This is to ensure newly written file data make 161 * it to disk before logging the new inode size in case of 162 * an extending write. 163 */ 164 if (XFS_IS_REALTIME_INODE(ip)) 165 xfs_blkdev_issue_flush(mp->m_rtdev_targp); 166 else if (mp->m_logdev_targp != mp->m_ddev_targp) 167 xfs_blkdev_issue_flush(mp->m_ddev_targp); 168 } 169 170 /* 171 * We always need to make sure that the required inode state is safe on 172 * disk. The inode might be clean but we still might need to force the 173 * log because of committed transactions that haven't hit the disk yet. 174 * Likewise, there could be unflushed non-transactional changes to the 175 * inode core that have to go to disk and this requires us to issue 176 * a synchronous transaction to capture these changes correctly. 177 * 178 * This code relies on the assumption that if the i_update_core field 179 * of the inode is clear and the inode is unpinned then it is clean 180 * and no action is required. 181 */ 182 xfs_ilock(ip, XFS_ILOCK_SHARED); 183 184 /* 185 * First check if the VFS inode is marked dirty. All the dirtying 186 * of non-transactional updates no goes through mark_inode_dirty*, 187 * which allows us to distinguish beteeen pure timestamp updates 188 * and i_size updates which need to be caught for fdatasync. 189 * After that also theck for the dirty state in the XFS inode, which 190 * might gets cleared when the inode gets written out via the AIL 191 * or xfs_iflush_cluster. 192 */ 193 if (((inode->i_state & I_DIRTY_DATASYNC) || 194 ((inode->i_state & I_DIRTY_SYNC) && !datasync)) && 195 ip->i_update_core) { 196 /* 197 * Kick off a transaction to log the inode core to get the 198 * updates. The sync transaction will also force the log. 199 */ 200 xfs_iunlock(ip, XFS_ILOCK_SHARED); 201 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS); 202 error = xfs_trans_reserve(tp, 0, 203 XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0); 204 if (error) { 205 xfs_trans_cancel(tp, 0); 206 return -error; 207 } 208 xfs_ilock(ip, XFS_ILOCK_EXCL); 209 210 /* 211 * Note - it's possible that we might have pushed ourselves out 212 * of the way during trans_reserve which would flush the inode. 213 * But there's no guarantee that the inode buffer has actually 214 * gone out yet (it's delwri). Plus the buffer could be pinned 215 * anyway if it's part of an inode in another recent 216 * transaction. So we play it safe and fire off the 217 * transaction anyway. 218 */ 219 xfs_trans_ijoin(tp, ip); 220 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 221 xfs_trans_set_sync(tp); 222 error = _xfs_trans_commit(tp, 0, &log_flushed); 223 224 xfs_iunlock(ip, XFS_ILOCK_EXCL); 225 } else { 226 /* 227 * Timestamps/size haven't changed since last inode flush or 228 * inode transaction commit. That means either nothing got 229 * written or a transaction committed which caught the updates. 230 * If the latter happened and the transaction hasn't hit the 231 * disk yet, the inode will be still be pinned. If it is, 232 * force the log. 233 */ 234 if (xfs_ipincount(ip)) { 235 error = _xfs_log_force_lsn(mp, 236 ip->i_itemp->ili_last_lsn, 237 XFS_LOG_SYNC, &log_flushed); 238 } 239 xfs_iunlock(ip, XFS_ILOCK_SHARED); 240 } 241 242 /* 243 * If we only have a single device, and the log force about was 244 * a no-op we might have to flush the data device cache here. 245 * This can only happen for fdatasync/O_DSYNC if we were overwriting 246 * an already allocated file and thus do not have any metadata to 247 * commit. 248 */ 249 if ((mp->m_flags & XFS_MOUNT_BARRIER) && 250 mp->m_logdev_targp == mp->m_ddev_targp && 251 !XFS_IS_REALTIME_INODE(ip) && 252 !log_flushed) 253 xfs_blkdev_issue_flush(mp->m_ddev_targp); 254 255 return -error; 256} 257 258STATIC ssize_t 259xfs_file_aio_read( 260 struct kiocb *iocb, 261 const struct iovec *iovp, 262 unsigned long nr_segs, 263 loff_t pos) 264{ 265 struct file *file = iocb->ki_filp; 266 struct inode *inode = file->f_mapping->host; 267 struct xfs_inode *ip = XFS_I(inode); 268 struct xfs_mount *mp = ip->i_mount; 269 size_t size = 0; 270 ssize_t ret = 0; 271 int ioflags = 0; 272 xfs_fsize_t n; 273 unsigned long seg; 274 275 XFS_STATS_INC(xs_read_calls); 276 277 BUG_ON(iocb->ki_pos != pos); 278 279 if (unlikely(file->f_flags & O_DIRECT)) 280 ioflags |= IO_ISDIRECT; 281 if (file->f_mode & FMODE_NOCMTIME) 282 ioflags |= IO_INVIS; 283 284 /* START copy & waste from filemap.c */ 285 for (seg = 0; seg < nr_segs; seg++) { 286 const struct iovec *iv = &iovp[seg]; 287 288 /* 289 * If any segment has a negative length, or the cumulative 290 * length ever wraps negative then return -EINVAL. 291 */ 292 size += iv->iov_len; 293 if (unlikely((ssize_t)(size|iv->iov_len) < 0)) 294 return XFS_ERROR(-EINVAL); 295 } 296 /* END copy & waste from filemap.c */ 297 298 if (unlikely(ioflags & IO_ISDIRECT)) { 299 xfs_buftarg_t *target = 300 XFS_IS_REALTIME_INODE(ip) ? 301 mp->m_rtdev_targp : mp->m_ddev_targp; 302 if ((iocb->ki_pos & target->bt_smask) || 303 (size & target->bt_smask)) { 304 if (iocb->ki_pos == ip->i_size) 305 return 0; 306 return -XFS_ERROR(EINVAL); 307 } 308 } 309 310 n = XFS_MAXIOFFSET(mp) - iocb->ki_pos; 311 if (n <= 0 || size == 0) 312 return 0; 313 314 if (n < size) 315 size = n; 316 317 if (XFS_FORCED_SHUTDOWN(mp)) 318 return -EIO; 319 320 /* 321 * Locking is a bit tricky here. If we take an exclusive lock 322 * for direct IO, we effectively serialise all new concurrent 323 * read IO to this file and block it behind IO that is currently in 324 * progress because IO in progress holds the IO lock shared. We only 325 * need to hold the lock exclusive to blow away the page cache, so 326 * only take lock exclusively if the page cache needs invalidation. 327 * This allows the normal direct IO case of no page cache pages to 328 * proceeed concurrently without serialisation. 329 */ 330 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 331 if ((ioflags & IO_ISDIRECT) && inode->i_mapping->nrpages) { 332 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 333 xfs_rw_ilock(ip, XFS_IOLOCK_EXCL); 334 335 if (inode->i_mapping->nrpages) { 336 ret = -xfs_flushinval_pages(ip, 337 (iocb->ki_pos & PAGE_CACHE_MASK), 338 -1, FI_REMAPF_LOCKED); 339 if (ret) { 340 xfs_rw_iunlock(ip, XFS_IOLOCK_EXCL); 341 return ret; 342 } 343 } 344 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 345 } 346 347 trace_xfs_file_read(ip, size, iocb->ki_pos, ioflags); 348 349 ret = generic_file_aio_read(iocb, iovp, nr_segs, iocb->ki_pos); 350 if (ret > 0) 351 XFS_STATS_ADD(xs_read_bytes, ret); 352 353 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 354 return ret; 355} 356 357STATIC ssize_t 358xfs_file_splice_read( 359 struct file *infilp, 360 loff_t *ppos, 361 struct pipe_inode_info *pipe, 362 size_t count, 363 unsigned int flags) 364{ 365 struct xfs_inode *ip = XFS_I(infilp->f_mapping->host); 366 int ioflags = 0; 367 ssize_t ret; 368 369 XFS_STATS_INC(xs_read_calls); 370 371 if (infilp->f_mode & FMODE_NOCMTIME) 372 ioflags |= IO_INVIS; 373 374 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 375 return -EIO; 376 377 xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); 378 379 trace_xfs_file_splice_read(ip, count, *ppos, ioflags); 380 381 ret = generic_file_splice_read(infilp, ppos, pipe, count, flags); 382 if (ret > 0) 383 XFS_STATS_ADD(xs_read_bytes, ret); 384 385 xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); 386 return ret; 387} 388 389STATIC void 390xfs_aio_write_isize_update( 391 struct inode *inode, 392 loff_t *ppos, 393 ssize_t bytes_written) 394{ 395 struct xfs_inode *ip = XFS_I(inode); 396 xfs_fsize_t isize = i_size_read(inode); 397 398 if (bytes_written > 0) 399 XFS_STATS_ADD(xs_write_bytes, bytes_written); 400 401 if (unlikely(bytes_written < 0 && bytes_written != -EFAULT && 402 *ppos > isize)) 403 *ppos = isize; 404 405 if (*ppos > ip->i_size) { 406 xfs_rw_ilock(ip, XFS_ILOCK_EXCL); 407 if (*ppos > ip->i_size) 408 ip->i_size = *ppos; 409 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); 410 } 411} 412 413/* 414 * If this was a direct or synchronous I/O that failed (such as ENOSPC) then 415 * part of the I/O may have been written to disk before the error occurred. In 416 * this case the on-disk file size may have been adjusted beyond the in-memory 417 * file size and now needs to be truncated back. 418 */ 419STATIC void 420xfs_aio_write_newsize_update( 421 struct xfs_inode *ip) 422{ 423 if (ip->i_new_size) { 424 xfs_rw_ilock(ip, XFS_ILOCK_EXCL); 425 ip->i_new_size = 0; 426 if (ip->i_d.di_size > ip->i_size) 427 ip->i_d.di_size = ip->i_size; 428 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); 429 } 430} 431 432/* 433 * xfs_file_splice_write() does not use xfs_rw_ilock() because 434 * generic_file_splice_write() takes the i_mutex itself. This, in theory, 435 * couuld cause lock inversions between the aio_write path and the splice path 436 * if someone is doing concurrent splice(2) based writes and write(2) based 437 * writes to the same inode. The only real way to fix this is to re-implement 438 * the generic code here with correct locking orders. 439 */ 440STATIC ssize_t 441xfs_file_splice_write( 442 struct pipe_inode_info *pipe, 443 struct file *outfilp, 444 loff_t *ppos, 445 size_t count, 446 unsigned int flags) 447{ 448 struct inode *inode = outfilp->f_mapping->host; 449 struct xfs_inode *ip = XFS_I(inode); 450 xfs_fsize_t new_size; 451 int ioflags = 0; 452 ssize_t ret; 453 454 XFS_STATS_INC(xs_write_calls); 455 456 if (outfilp->f_mode & FMODE_NOCMTIME) 457 ioflags |= IO_INVIS; 458 459 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 460 return -EIO; 461 462 xfs_ilock(ip, XFS_IOLOCK_EXCL); 463 464 new_size = *ppos + count; 465 466 xfs_ilock(ip, XFS_ILOCK_EXCL); 467 if (new_size > ip->i_size) 468 ip->i_new_size = new_size; 469 xfs_iunlock(ip, XFS_ILOCK_EXCL); 470 471 trace_xfs_file_splice_write(ip, count, *ppos, ioflags); 472 473 ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags); 474 475 xfs_aio_write_isize_update(inode, ppos, ret); 476 xfs_aio_write_newsize_update(ip); 477 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 478 return ret; 479} 480 481/* 482 * This routine is called to handle zeroing any space in the last 483 * block of the file that is beyond the EOF. We do this since the 484 * size is being increased without writing anything to that block 485 * and we don't want anyone to read the garbage on the disk. 486 */ 487STATIC int /* error (positive) */ 488xfs_zero_last_block( 489 xfs_inode_t *ip, 490 xfs_fsize_t offset, 491 xfs_fsize_t isize) 492{ 493 xfs_fileoff_t last_fsb; 494 xfs_mount_t *mp = ip->i_mount; 495 int nimaps; 496 int zero_offset; 497 int zero_len; 498 int error = 0; 499 xfs_bmbt_irec_t imap; 500 501 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); 502 503 zero_offset = XFS_B_FSB_OFFSET(mp, isize); 504 if (zero_offset == 0) { 505 /* 506 * There are no extra bytes in the last block on disk to 507 * zero, so return. 508 */ 509 return 0; 510 } 511 512 last_fsb = XFS_B_TO_FSBT(mp, isize); 513 nimaps = 1; 514 error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap, 515 &nimaps, NULL); 516 if (error) { 517 return error; 518 } 519 ASSERT(nimaps > 0); 520 /* 521 * If the block underlying isize is just a hole, then there 522 * is nothing to zero. 523 */ 524 if (imap.br_startblock == HOLESTARTBLOCK) { 525 return 0; 526 } 527 /* 528 * Zero the part of the last block beyond the EOF, and write it 529 * out sync. We need to drop the ilock while we do this so we 530 * don't deadlock when the buffer cache calls back to us. 531 */ 532 xfs_iunlock(ip, XFS_ILOCK_EXCL); 533 534 zero_len = mp->m_sb.sb_blocksize - zero_offset; 535 if (isize + zero_len > offset) 536 zero_len = offset - isize; 537 error = xfs_iozero(ip, isize, zero_len); 538 539 xfs_ilock(ip, XFS_ILOCK_EXCL); 540 ASSERT(error >= 0); 541 return error; 542} 543 544/* 545 * Zero any on disk space between the current EOF and the new, 546 * larger EOF. This handles the normal case of zeroing the remainder 547 * of the last block in the file and the unusual case of zeroing blocks 548 * out beyond the size of the file. This second case only happens 549 * with fixed size extents and when the system crashes before the inode 550 * size was updated but after blocks were allocated. If fill is set, 551 * then any holes in the range are filled and zeroed. If not, the holes 552 * are left alone as holes. 553 */ 554 555int /* error (positive) */ 556xfs_zero_eof( 557 xfs_inode_t *ip, 558 xfs_off_t offset, /* starting I/O offset */ 559 xfs_fsize_t isize) /* current inode size */ 560{ 561 xfs_mount_t *mp = ip->i_mount; 562 xfs_fileoff_t start_zero_fsb; 563 xfs_fileoff_t end_zero_fsb; 564 xfs_fileoff_t zero_count_fsb; 565 xfs_fileoff_t last_fsb; 566 xfs_fileoff_t zero_off; 567 xfs_fsize_t zero_len; 568 int nimaps; 569 int error = 0; 570 xfs_bmbt_irec_t imap; 571 572 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 573 ASSERT(offset > isize); 574 575 /* 576 * First handle zeroing the block on which isize resides. 577 * We only zero a part of that block so it is handled specially. 578 */ 579 error = xfs_zero_last_block(ip, offset, isize); 580 if (error) { 581 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 582 return error; 583 } 584 585 /* 586 * Calculate the range between the new size and the old 587 * where blocks needing to be zeroed may exist. To get the 588 * block where the last byte in the file currently resides, 589 * we need to subtract one from the size and truncate back 590 * to a block boundary. We subtract 1 in case the size is 591 * exactly on a block boundary. 592 */ 593 last_fsb = isize ? XFS_B_TO_FSBT(mp, isize - 1) : (xfs_fileoff_t)-1; 594 start_zero_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize); 595 end_zero_fsb = XFS_B_TO_FSBT(mp, offset - 1); 596 ASSERT((xfs_sfiloff_t)last_fsb < (xfs_sfiloff_t)start_zero_fsb); 597 if (last_fsb == end_zero_fsb) { 598 /* 599 * The size was only incremented on its last block. 600 * We took care of that above, so just return. 601 */ 602 return 0; 603 } 604 605 ASSERT(start_zero_fsb <= end_zero_fsb); 606 while (start_zero_fsb <= end_zero_fsb) { 607 nimaps = 1; 608 zero_count_fsb = end_zero_fsb - start_zero_fsb + 1; 609 error = xfs_bmapi(NULL, ip, start_zero_fsb, zero_count_fsb, 610 0, NULL, 0, &imap, &nimaps, NULL); 611 if (error) { 612 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL)); 613 return error; 614 } 615 ASSERT(nimaps > 0); 616 617 if (imap.br_state == XFS_EXT_UNWRITTEN || 618 imap.br_startblock == HOLESTARTBLOCK) { 619 /* 620 * This loop handles initializing pages that were 621 * partially initialized by the code below this 622 * loop. It basically zeroes the part of the page 623 * that sits on a hole and sets the page as P_HOLE 624 * and calls remapf if it is a mapped file. 625 */ 626 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 627 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 628 continue; 629 } 630 631 /* 632 * There are blocks we need to zero. 633 * Drop the inode lock while we're doing the I/O. 634 * We'll still have the iolock to protect us. 635 */ 636 xfs_iunlock(ip, XFS_ILOCK_EXCL); 637 638 zero_off = XFS_FSB_TO_B(mp, start_zero_fsb); 639 zero_len = XFS_FSB_TO_B(mp, imap.br_blockcount); 640 641 if ((zero_off + zero_len) > offset) 642 zero_len = offset - zero_off; 643 644 error = xfs_iozero(ip, zero_off, zero_len); 645 if (error) { 646 goto out_lock; 647 } 648 649 start_zero_fsb = imap.br_startoff + imap.br_blockcount; 650 ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); 651 652 xfs_ilock(ip, XFS_ILOCK_EXCL); 653 } 654 655 return 0; 656 657out_lock: 658 xfs_ilock(ip, XFS_ILOCK_EXCL); 659 ASSERT(error >= 0); 660 return error; 661} 662 663/* 664 * Common pre-write limit and setup checks. 665 * 666 * Returns with iolock held according to @iolock. 667 */ 668STATIC ssize_t 669xfs_file_aio_write_checks( 670 struct file *file, 671 loff_t *pos, 672 size_t *count, 673 int *iolock) 674{ 675 struct inode *inode = file->f_mapping->host; 676 struct xfs_inode *ip = XFS_I(inode); 677 xfs_fsize_t new_size; 678 int error = 0; 679 680 xfs_rw_ilock(ip, XFS_ILOCK_EXCL); 681 error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode)); 682 if (error) { 683 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock); 684 *iolock = 0; 685 return error; 686 } 687 688 new_size = *pos + *count; 689 if (new_size > ip->i_size) 690 ip->i_new_size = new_size; 691 692 if (likely(!(file->f_mode & FMODE_NOCMTIME))) 693 file_update_time(file); 694 695 /* 696 * If the offset is beyond the size of the file, we need to zero any 697 * blocks that fall between the existing EOF and the start of this 698 * write. 699 */ 700 if (*pos > ip->i_size) 701 error = -xfs_zero_eof(ip, *pos, ip->i_size); 702 703 xfs_rw_iunlock(ip, XFS_ILOCK_EXCL); 704 if (error) 705 return error; 706 707 /* 708 * If we're writing the file then make sure to clear the setuid and 709 * setgid bits if the process is not being run by root. This keeps 710 * people from modifying setuid and setgid binaries. 711 */ 712 return file_remove_suid(file); 713 714} 715 716/* 717 * xfs_file_dio_aio_write - handle direct IO writes 718 * 719 * Lock the inode appropriately to prepare for and issue a direct IO write. 720 * By separating it from the buffered write path we remove all the tricky to 721 * follow locking changes and looping. 722 * 723 * If there are cached pages or we're extending the file, we need IOLOCK_EXCL 724 * until we're sure the bytes at the new EOF have been zeroed and/or the cached 725 * pages are flushed out. 726 * 727 * In most cases the direct IO writes will be done holding IOLOCK_SHARED 728 * allowing them to be done in parallel with reads and other direct IO writes. 729 * However, if the IO is not aligned to filesystem blocks, the direct IO layer 730 * needs to do sub-block zeroing and that requires serialisation against other 731 * direct IOs to the same block. In this case we need to serialise the 732 * submission of the unaligned IOs so that we don't get racing block zeroing in 733 * the dio layer. To avoid the problem with aio, we also need to wait for 734 * outstanding IOs to complete so that unwritten extent conversion is completed 735 * before we try to map the overlapping block. This is currently implemented by 736 * hitting it with a big hammer (i.e. xfs_ioend_wait()). 737 * 738 * Returns with locks held indicated by @iolock and errors indicated by 739 * negative return values. 740 */ 741STATIC ssize_t 742xfs_file_dio_aio_write( 743 struct kiocb *iocb, 744 const struct iovec *iovp, 745 unsigned long nr_segs, 746 loff_t pos, 747 size_t ocount, 748 int *iolock) 749{ 750 struct file *file = iocb->ki_filp; 751 struct address_space *mapping = file->f_mapping; 752 struct inode *inode = mapping->host; 753 struct xfs_inode *ip = XFS_I(inode); 754 struct xfs_mount *mp = ip->i_mount; 755 ssize_t ret = 0; 756 size_t count = ocount; 757 int unaligned_io = 0; 758 struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? 759 mp->m_rtdev_targp : mp->m_ddev_targp; 760 761 *iolock = 0; 762 if ((pos & target->bt_smask) || (count & target->bt_smask)) 763 return -XFS_ERROR(EINVAL); 764 765 if ((pos & mp->m_blockmask) || ((pos + count) & mp->m_blockmask)) 766 unaligned_io = 1; 767 768 if (unaligned_io || mapping->nrpages || pos > ip->i_size) 769 *iolock = XFS_IOLOCK_EXCL; 770 else 771 *iolock = XFS_IOLOCK_SHARED; 772 xfs_rw_ilock(ip, *iolock); 773 774 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); 775 if (ret) 776 return ret; 777 778 /* 779 * Recheck if there are cached pages that need invalidate after we got 780 * the iolock to protect against other threads adding new pages while 781 * we were waiting for the iolock. 782 */ 783 if (mapping->nrpages && *iolock == XFS_IOLOCK_SHARED) { 784 xfs_rw_iunlock(ip, *iolock); 785 *iolock = XFS_IOLOCK_EXCL; 786 xfs_rw_ilock(ip, *iolock); 787 } 788 789 if (mapping->nrpages) { 790 ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, 791 FI_REMAPF_LOCKED); 792 if (ret) 793 return ret; 794 } 795 796 /* 797 * If we are doing unaligned IO, wait for all other IO to drain, 798 * otherwise demote the lock if we had to flush cached pages 799 */ 800 if (unaligned_io) 801 xfs_ioend_wait(ip); 802 else if (*iolock == XFS_IOLOCK_EXCL) { 803 xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); 804 *iolock = XFS_IOLOCK_SHARED; 805 } 806 807 trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); 808 ret = generic_file_direct_write(iocb, iovp, 809 &nr_segs, pos, &iocb->ki_pos, count, ocount); 810 811 /* No fallback to buffered IO on errors for XFS. */ 812 ASSERT(ret < 0 || ret == count); 813 return ret; 814} 815 816STATIC ssize_t 817xfs_file_buffered_aio_write( 818 struct kiocb *iocb, 819 const struct iovec *iovp, 820 unsigned long nr_segs, 821 loff_t pos, 822 size_t ocount, 823 int *iolock) 824{ 825 struct file *file = iocb->ki_filp; 826 struct address_space *mapping = file->f_mapping; 827 struct inode *inode = mapping->host; 828 struct xfs_inode *ip = XFS_I(inode); 829 ssize_t ret; 830 int enospc = 0; 831 size_t count = ocount; 832 833 *iolock = XFS_IOLOCK_EXCL; 834 xfs_rw_ilock(ip, *iolock); 835 836 ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); 837 if (ret) 838 return ret; 839 840 /* We can write back this queue in page reclaim */ 841 current->backing_dev_info = mapping->backing_dev_info; 842 843write_retry: 844 trace_xfs_file_buffered_write(ip, count, iocb->ki_pos, 0); 845 ret = generic_file_buffered_write(iocb, iovp, nr_segs, 846 pos, &iocb->ki_pos, count, ret); 847 /* 848 * if we just got an ENOSPC, flush the inode now we aren't holding any 849 * page locks and retry *once* 850 */ 851 if (ret == -ENOSPC && !enospc) { 852 ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); 853 if (ret) 854 return ret; 855 enospc = 1; 856 goto write_retry; 857 } 858 current->backing_dev_info = NULL; 859 return ret; 860} 861 862STATIC ssize_t 863xfs_file_aio_write( 864 struct kiocb *iocb, 865 const struct iovec *iovp, 866 unsigned long nr_segs, 867 loff_t pos) 868{ 869 struct file *file = iocb->ki_filp; 870 struct address_space *mapping = file->f_mapping; 871 struct inode *inode = mapping->host; 872 struct xfs_inode *ip = XFS_I(inode); 873 ssize_t ret; 874 int iolock; 875 size_t ocount = 0; 876 877 XFS_STATS_INC(xs_write_calls); 878 879 BUG_ON(iocb->ki_pos != pos); 880 881 ret = generic_segment_checks(iovp, &nr_segs, &ocount, VERIFY_READ); 882 if (ret) 883 return ret; 884 885 if (ocount == 0) 886 return 0; 887 888 xfs_wait_for_freeze(ip->i_mount, SB_FREEZE_WRITE); 889 890 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 891 return -EIO; 892 893 if (unlikely(file->f_flags & O_DIRECT)) 894 ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, 895 ocount, &iolock); 896 else 897 ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, 898 ocount, &iolock); 899 900 xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret); 901 902 if (ret <= 0) 903 goto out_unlock; 904 905 /* Handle various SYNC-type writes */ 906 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { 907 loff_t end = pos + ret - 1; 908 int error; 909 910 xfs_rw_iunlock(ip, iolock); 911 error = xfs_file_fsync(file, pos, end, 912 (file->f_flags & __O_SYNC) ? 0 : 1); 913 xfs_rw_ilock(ip, iolock); 914 if (error) 915 ret = error; 916 } 917 918out_unlock: 919 xfs_aio_write_newsize_update(ip); 920 xfs_rw_iunlock(ip, iolock); 921 return ret; 922} 923 924STATIC long 925xfs_file_fallocate( 926 struct file *file, 927 int mode, 928 loff_t offset, 929 loff_t len) 930{ 931 struct inode *inode = file->f_path.dentry->d_inode; 932 long error; 933 loff_t new_size = 0; 934 xfs_flock64_t bf; 935 xfs_inode_t *ip = XFS_I(inode); 936 int cmd = XFS_IOC_RESVSP; 937 int attr_flags = XFS_ATTR_NOLOCK; 938 939 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) 940 return -EOPNOTSUPP; 941 942 bf.l_whence = 0; 943 bf.l_start = offset; 944 bf.l_len = len; 945 946 xfs_ilock(ip, XFS_IOLOCK_EXCL); 947 948 if (mode & FALLOC_FL_PUNCH_HOLE) 949 cmd = XFS_IOC_UNRESVSP; 950 951 /* check the new inode size is valid before allocating */ 952 if (!(mode & FALLOC_FL_KEEP_SIZE) && 953 offset + len > i_size_read(inode)) { 954 new_size = offset + len; 955 error = inode_newsize_ok(inode, new_size); 956 if (error) 957 goto out_unlock; 958 } 959 960 if (file->f_flags & O_DSYNC) 961 attr_flags |= XFS_ATTR_SYNC; 962 963 error = -xfs_change_file_space(ip, cmd, &bf, 0, attr_flags); 964 if (error) 965 goto out_unlock; 966 967 /* Change file size if needed */ 968 if (new_size) { 969 struct iattr iattr; 970 971 iattr.ia_valid = ATTR_SIZE; 972 iattr.ia_size = new_size; 973 error = -xfs_setattr_size(ip, &iattr, XFS_ATTR_NOLOCK); 974 } 975 976out_unlock: 977 xfs_iunlock(ip, XFS_IOLOCK_EXCL); 978 return error; 979} 980 981 982STATIC int 983xfs_file_open( 984 struct inode *inode, 985 struct file *file) 986{ 987 if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS) 988 return -EFBIG; 989 if (XFS_FORCED_SHUTDOWN(XFS_M(inode->i_sb))) 990 return -EIO; 991 return 0; 992} 993 994STATIC int 995xfs_dir_open( 996 struct inode *inode, 997 struct file *file) 998{ 999 struct xfs_inode *ip = XFS_I(inode); 1000 int mode; 1001 int error; 1002 1003 error = xfs_file_open(inode, file); 1004 if (error) 1005 return error; 1006 1007 /* 1008 * If there are any blocks, read-ahead block 0 as we're almost 1009 * certain to have the next operation be a read there. 1010 */ 1011 mode = xfs_ilock_map_shared(ip); 1012 if (ip->i_d.di_nextents > 0) 1013 xfs_da_reada_buf(NULL, ip, 0, XFS_DATA_FORK); 1014 xfs_iunlock(ip, mode); 1015 return 0; 1016} 1017 1018STATIC int 1019xfs_file_release( 1020 struct inode *inode, 1021 struct file *filp) 1022{ 1023 return -xfs_release(XFS_I(inode)); 1024} 1025 1026STATIC int 1027xfs_file_readdir( 1028 struct file *filp, 1029 void *dirent, 1030 filldir_t filldir) 1031{ 1032 struct inode *inode = filp->f_path.dentry->d_inode; 1033 xfs_inode_t *ip = XFS_I(inode); 1034 int error; 1035 size_t bufsize; 1036 1037 /* 1038 * The Linux API doesn't pass down the total size of the buffer 1039 * we read into down to the filesystem. With the filldir concept 1040 * it's not needed for correct information, but the XFS dir2 leaf 1041 * code wants an estimate of the buffer size to calculate it's 1042 * readahead window and size the buffers used for mapping to 1043 * physical blocks. 1044 * 1045 * Try to give it an estimate that's good enough, maybe at some 1046 * point we can change the ->readdir prototype to include the 1047 * buffer size. For now we use the current glibc buffer size. 1048 */ 1049 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); 1050 1051 error = xfs_readdir(ip, dirent, bufsize, 1052 (xfs_off_t *)&filp->f_pos, filldir); 1053 if (error) 1054 return -error; 1055 return 0; 1056} 1057 1058STATIC int 1059xfs_file_mmap( 1060 struct file *filp, 1061 struct vm_area_struct *vma) 1062{ 1063 vma->vm_ops = &xfs_file_vm_ops; 1064 vma->vm_flags |= VM_CAN_NONLINEAR; 1065 1066 file_accessed(filp); 1067 return 0; 1068} 1069 1070/* 1071 * mmap()d file has taken write protection fault and is being made 1072 * writable. We can set the page state up correctly for a writable 1073 * page, which means we can do correct delalloc accounting (ENOSPC 1074 * checking!) and unwritten extent mapping. 1075 */ 1076STATIC int 1077xfs_vm_page_mkwrite( 1078 struct vm_area_struct *vma, 1079 struct vm_fault *vmf) 1080{ 1081 return block_page_mkwrite(vma, vmf, xfs_get_blocks); 1082} 1083 1084const struct file_operations xfs_file_operations = { 1085 .llseek = generic_file_llseek, 1086 .read = do_sync_read, 1087 .write = do_sync_write, 1088 .aio_read = xfs_file_aio_read, 1089 .aio_write = xfs_file_aio_write, 1090 .splice_read = xfs_file_splice_read, 1091 .splice_write = xfs_file_splice_write, 1092 .unlocked_ioctl = xfs_file_ioctl, 1093#ifdef CONFIG_COMPAT 1094 .compat_ioctl = xfs_file_compat_ioctl, 1095#endif 1096 .mmap = xfs_file_mmap, 1097 .open = xfs_file_open, 1098 .release = xfs_file_release, 1099 .fsync = xfs_file_fsync, 1100 .fallocate = xfs_file_fallocate, 1101}; 1102 1103const struct file_operations xfs_dir_file_operations = { 1104 .open = xfs_dir_open, 1105 .read = generic_read_dir, 1106 .readdir = xfs_file_readdir, 1107 .llseek = generic_file_llseek, 1108 .unlocked_ioctl = xfs_file_ioctl, 1109#ifdef CONFIG_COMPAT 1110 .compat_ioctl = xfs_file_compat_ioctl, 1111#endif 1112 .fsync = xfs_file_fsync, 1113}; 1114 1115static const struct vm_operations_struct xfs_file_vm_ops = { 1116 .fault = filemap_fault, 1117 .page_mkwrite = xfs_vm_page_mkwrite, 1118};