/fs/buffer.c
C | 1793 lines | 1042 code | 166 blank | 585 comment | 184 complexity | 2236fd0cc51a3c1130becbc8ed1ab81d MD5 | raw file
- /*
- * linux/fs/buffer.c
- *
- * Copyright (C) 1991, 1992, 2002 Linus Torvalds
- */
- /*
- * Start bdflush() with kernel_thread not syscall - Paul Gortmaker, 12/95
- *
- * Removed a lot of unnecessary code and simplified things now that
- * the buffer cache isn't our primary cache - Andrew Tridgell 12/96
- *
- * Speed up hash, lru, and free list operations. Use gfp() for allocating
- * hash table, use SLAB cache for buffer heads. SMP threading. -DaveM
- *
- * Added 32k buffer block sizes - these are required older ARM systems. - RMK
- *
- * async buffer flushing, 1999 Andrea Arcangeli <andrea@suse.de>
- */
- #include <linux/kernel.h>
- #include <linux/syscalls.h>
- #include <linux/fs.h>
- #include <linux/mm.h>
- #include <linux/percpu.h>
- #include <linux/slab.h>
- #include <linux/capability.h>
- #include <linux/blkdev.h>
- #include <linux/file.h>
- #include <linux/quotaops.h>
- #include <linux/highmem.h>
- #include <linux/module.h>
- #include <linux/writeback.h>
- #include <linux/hash.h>
- #include <linux/suspend.h>
- #include <linux/buffer_head.h>
- #include <linux/task_io_accounting_ops.h>
- #include <linux/bio.h>
- #include <linux/notifier.h>
- #include <linux/cpu.h>
- #include <linux/bitops.h>
- #include <linux/mpage.h>
- #include <linux/bit_spinlock.h>
- #include <linux/cleancache.h>
- static int fsync_buffers_list(spinlock_t *lock, struct list_head *list);
- #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_assoc_buffers)
- inline void
- init_buffer(struct buffer_head *bh, bh_end_io_t *handler, void *private)
- {
- bh->b_end_io = handler;
- bh->b_private = private;
- }
- EXPORT_SYMBOL(init_buffer);
- static int sleep_on_buffer(void *word)
- {
- io_schedule();
- return 0;
- }
- void __lock_buffer(struct buffer_head *bh)
- {
- wait_on_bit_lock(&bh->b_state, BH_Lock, sleep_on_buffer,
- TASK_UNINTERRUPTIBLE);
- }
- EXPORT_SYMBOL(__lock_buffer);
- void unlock_buffer(struct buffer_head *bh)
- {
- clear_bit_unlock(BH_Lock, &bh->b_state);
- smp_mb__after_clear_bit();
- wake_up_bit(&bh->b_state, BH_Lock);
- }
- EXPORT_SYMBOL(unlock_buffer);
- /*
- * Block until a buffer comes unlocked. This doesn't stop it
- * from becoming locked again - you have to lock it yourself
- * if you want to preserve its state.
- */
- void __wait_on_buffer(struct buffer_head * bh)
- {
- wait_on_bit(&bh->b_state, BH_Lock, sleep_on_buffer, TASK_UNINTERRUPTIBLE);
- }
- EXPORT_SYMBOL(__wait_on_buffer);
- static void
- __clear_page_buffers(struct page *page)
- {
- ClearPagePrivate(page);
- set_page_private(page, 0);
- page_cache_release(page);
- }
- static int quiet_error(struct buffer_head *bh)
- {
- if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
- return 0;
- return 1;
- }
- static void buffer_io_error(struct buffer_head *bh)
- {
- char b[BDEVNAME_SIZE];
- printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
- bdevname(bh->b_bdev, b),
- (unsigned long long)bh->b_blocknr);
- }
- /*
- * End-of-IO handler helper function which does not touch the bh after
- * unlocking it.
- * Note: unlock_buffer() sort-of does touch the bh after unlocking it, but
- * a race there is benign: unlock_buffer() only use the bh's address for
- * hashing after unlocking the buffer, so it doesn't actually touch the bh
- * itself.
- */
- static void __end_buffer_read_notouch(struct buffer_head *bh, int uptodate)
- {
- if (uptodate) {
- set_buffer_uptodate(bh);
- } else {
- /* This happens, due to failed READA attempts. */
- clear_buffer_uptodate(bh);
- }
- unlock_buffer(bh);
- }
- /*
- * Default synchronous end-of-IO handler.. Just mark it up-to-date and
- * unlock the buffer. This is what ll_rw_block uses too.
- */
- void end_buffer_read_sync(struct buffer_head *bh, int uptodate)
- {
- __end_buffer_read_notouch(bh, uptodate);
- put_bh(bh);
- }
- EXPORT_SYMBOL(end_buffer_read_sync);
- void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
- {
- char b[BDEVNAME_SIZE];
- if (uptodate) {
- set_buffer_uptodate(bh);
- } else {
- if (!quiet_error(bh)) {
- buffer_io_error(bh);
- printk(KERN_WARNING "lost page write due to "
- "I/O error on %s\n",
- bdevname(bh->b_bdev, b));
- }
- set_buffer_write_io_error(bh);
- clear_buffer_uptodate(bh);
- }
- unlock_buffer(bh);
- put_bh(bh);
- }
- EXPORT_SYMBOL(end_buffer_write_sync);
- /*
- * Various filesystems appear to want __find_get_block to be non-blocking.
- * But it's the page lock which protects the buffers. To get around this,
- * we get exclusion from try_to_free_buffers with the blockdev mapping's
- * private_lock.
- *
- * Hack idea: for the blockdev mapping, i_bufferlist_lock contention
- * may be quite high. This code could TryLock the page, and if that
- * succeeds, there is no need to take private_lock. (But if
- * private_lock is contended then so is mapping->tree_lock).
- */
- static struct buffer_head *
- __find_get_block_slow(struct block_device *bdev, sector_t block)
- {
- struct inode *bd_inode = bdev->bd_inode;
- struct address_space *bd_mapping = bd_inode->i_mapping;
- struct buffer_head *ret = NULL;
- pgoff_t index;
- struct buffer_head *bh;
- struct buffer_head *head;
- struct page *page;
- int all_mapped = 1;
- index = block >> (PAGE_CACHE_SHIFT - bd_inode->i_blkbits);
- page = find_get_page(bd_mapping, index);
- if (!page)
- goto out;
- spin_lock(&bd_mapping->private_lock);
- if (!page_has_buffers(page))
- goto out_unlock;
- head = page_buffers(page);
- bh = head;
- do {
- if (!buffer_mapped(bh))
- all_mapped = 0;
- else if (bh->b_blocknr == block) {
- ret = bh;
- get_bh(bh);
- goto out_unlock;
- }
- bh = bh->b_this_page;
- } while (bh != head);
- /* we might be here because some of the buffers on this page are
- * not mapped. This is due to various races between
- * file io on the block device and getblk. It gets dealt with
- * elsewhere, don't buffer_error if we had some unmapped buffers
- */
- if (all_mapped) {
- printk("__find_get_block_slow() failed. "
- "block=%llu, b_blocknr=%llu\n",
- (unsigned long long)block,
- (unsigned long long)bh->b_blocknr);
- printk("b_state=0x%08lx, b_size=%zu\n",
- bh->b_state, bh->b_size);
- printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits);
- }
- out_unlock:
- spin_unlock(&bd_mapping->private_lock);
- page_cache_release(page);
- out:
- return ret;
- }
- /* If invalidate_buffers() will trash dirty buffers, it means some kind
- of fs corruption is going on. Trashing dirty data always imply losing
- information that was supposed to be just stored on the physical layer
- by the user.
- Thus invalidate_buffers in general usage is not allwowed to trash
- dirty buffers. For example ioctl(FLSBLKBUF) expects dirty data to
- be preserved. These buffers are simply skipped.
-
- We also skip buffers which are still in use. For example this can
- happen if a userspace program is reading the block device.
- NOTE: In the case where the user removed a removable-media-disk even if
- there's still dirty data not synced on disk (due a bug in the device driver
- or due an error of the user), by not destroying the dirty buffers we could
- generate corruption also on the next media inserted, thus a parameter is
- necessary to handle this case in the most safe way possible (trying
- to not corrupt also the new disk inserted with the data belonging to
- the old now corrupted disk). Also for the ramdisk the natural thing
- to do in order to release the ramdisk memory is to destroy dirty buffers.
- These are two special cases. Normal usage imply the device driver
- to issue a sync on the device (without waiting I/O completion) and
- then an invalidate_buffers call that doesn't trash dirty buffers.
- For handling cache coherency with the blkdev pagecache the 'update' case
- is been introduced. It is needed to re-read from disk any pinned
- buffer. NOTE: re-reading from disk is destructive so we can do it only
- when we assume nobody is changing the buffercache under our I/O and when
- we think the disk contains more recent information than the buffercache.
- The update == 1 pass marks the buffers we need to update, the update == 2
- pass does the actual I/O. */
- void invalidate_bdev(struct block_device *bdev)
- {
- struct address_space *mapping = bdev->bd_inode->i_mapping;
- if (mapping->nrpages == 0)
- return;
- invalidate_bh_lrus();
- lru_add_drain_all(); /* make sure all lru add caches are flushed */
- invalidate_mapping_pages(mapping, 0, -1);
- /* 99% of the time, we don't need to flush the cleancache on the bdev.
- * But, for the strange corners, lets be cautious
- */
- cleancache_flush_inode(mapping);
- }
- EXPORT_SYMBOL(invalidate_bdev);
- /*
- * Kick the writeback threads then try to free up some ZONE_NORMAL memory.
- */
- static void free_more_memory(void)
- {
- struct zone *zone;
- int nid;
- wakeup_flusher_threads(1024);
- yield();
- for_each_online_node(nid) {
- (void)first_zones_zonelist(node_zonelist(nid, GFP_NOFS),
- gfp_zone(GFP_NOFS), NULL,
- &zone);
- if (zone)
- try_to_free_pages(node_zonelist(nid, GFP_NOFS), 0,
- GFP_NOFS, NULL);
- }
- }
- /*
- * I/O completion handler for block_read_full_page() - pages
- * which come unlocked at the end of I/O.
- */
- static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
- {
- unsigned long flags;
- struct buffer_head *first;
- struct buffer_head *tmp;
- struct page *page;
- int page_uptodate = 1;
- BUG_ON(!buffer_async_read(bh));
- page = bh->b_page;
- if (uptodate) {
- set_buffer_uptodate(bh);
- } else {
- clear_buffer_uptodate(bh);
- if (!quiet_error(bh))
- buffer_io_error(bh);
- SetPageError(page);
- }
- /*
- * Be _very_ careful from here on. Bad things can happen if
- * two buffer heads end IO at almost the same time and both
- * decide that the page is now completely done.
- */
- first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
- clear_buffer_async_read(bh);
- unlock_buffer(bh);
- tmp = bh;
- do {
- if (!buffer_uptodate(tmp))
- page_uptodate = 0;
- if (buffer_async_read(tmp)) {
- BUG_ON(!buffer_locked(tmp));
- goto still_busy;
- }
- tmp = tmp->b_this_page;
- } while (tmp != bh);
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- /*
- * If none of the buffers had errors and they are all
- * uptodate then we can set the page uptodate.
- */
- if (page_uptodate && !PageError(page))
- SetPageUptodate(page);
- unlock_page(page);
- return;
- still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
- }
- /*
- * Completion handler for block_write_full_page() - pages which are unlocked
- * during I/O, and which have PageWriteback cleared upon I/O completion.
- */
- void end_buffer_async_write(struct buffer_head *bh, int uptodate)
- {
- char b[BDEVNAME_SIZE];
- unsigned long flags;
- struct buffer_head *first;
- struct buffer_head *tmp;
- struct page *page;
- BUG_ON(!buffer_async_write(bh));
- page = bh->b_page;
- if (uptodate) {
- set_buffer_uptodate(bh);
- } else {
- if (!quiet_error(bh)) {
- buffer_io_error(bh);
- printk(KERN_WARNING "lost page write due to "
- "I/O error on %s\n",
- bdevname(bh->b_bdev, b));
- }
- set_bit(AS_EIO, &page->mapping->flags);
- set_buffer_write_io_error(bh);
- clear_buffer_uptodate(bh);
- SetPageError(page);
- }
- first = page_buffers(page);
- local_irq_save(flags);
- bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
- clear_buffer_async_write(bh);
- unlock_buffer(bh);
- tmp = bh->b_this_page;
- while (tmp != bh) {
- if (buffer_async_write(tmp)) {
- BUG_ON(!buffer_locked(tmp));
- goto still_busy;
- }
- tmp = tmp->b_this_page;
- }
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- end_page_writeback(page);
- return;
- still_busy:
- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
- local_irq_restore(flags);
- return;
- }
- EXPORT_SYMBOL(end_buffer_async_write);
- /*
- * If a page's buffers are under async readin (end_buffer_async_read
- * completion) then there is a possibility that another thread of
- * control could lock one of the buffers after it has completed
- * but while some of the other buffers have not completed. This
- * locked buffer would confuse end_buffer_async_read() into not unlocking
- * the page. So the absence of BH_Async_Read tells end_buffer_async_read()
- * that this buffer is not under async I/O.
- *
- * The page comes unlocked when it has no locked buffer_async buffers
- * left.
- *
- * PageLocked prevents anyone starting new async I/O reads any of
- * the buffers.
- *
- * PageWriteback is used to prevent simultaneous writeout of the same
- * page.
- *
- * PageLocked prevents anyone from starting writeback of a page which is
- * under read I/O (PageWriteback is only ever set against a locked page).
- */
- static void mark_buffer_async_read(struct buffer_head *bh)
- {
- bh->b_end_io = end_buffer_async_read;
- set_buffer_async_read(bh);
- }
- static void mark_buffer_async_write_endio(struct buffer_head *bh,
- bh_end_io_t *handler)
- {
- bh->b_end_io = handler;
- set_buffer_async_write(bh);
- }
- void mark_buffer_async_write(struct buffer_head *bh)
- {
- mark_buffer_async_write_endio(bh, end_buffer_async_write);
- }
- EXPORT_SYMBOL(mark_buffer_async_write);
- /*
- * fs/buffer.c contains helper functions for buffer-backed address space's
- * fsync functions. A common requirement for buffer-based filesystems is
- * that certain data from the backing blockdev needs to be written out for
- * a successful fsync(). For example, ext2 indirect blocks need to be
- * written back and waited upon before fsync() returns.
- *
- * The functions mark_buffer_inode_dirty(), fsync_inode_buffers(),
- * inode_has_buffers() and invalidate_inode_buffers() are provided for the
- * management of a list of dependent buffers at ->i_mapping->private_list.
- *
- * Locking is a little subtle: try_to_free_buffers() will remove buffers
- * from their controlling inode's queue when they are being freed. But
- * try_to_free_buffers() will be operating against the *blockdev* mapping
- * at the time, not against the S_ISREG file which depends on those buffers.
- * So the locking for private_list is via the private_lock in the address_space
- * which backs the buffers. Which is different from the address_space
- * against which the buffers are listed. So for a particular address_space,
- * mapping->private_lock does *not* protect mapping->private_list! In fact,
- * mapping->private_list will always be protected by the backing blockdev's
- * ->private_lock.
- *
- * Which introduces a requirement: all buffers on an address_space's
- * ->private_list must be from the same address_space: the blockdev's.
- *
- * address_spaces which do not place buffers at ->private_list via these
- * utility functions are free to use private_lock and private_list for
- * whatever they want. The only requirement is that list_empty(private_list)
- * be true at clear_inode() time.
- *
- * FIXME: clear_inode should not call invalidate_inode_buffers(). The
- * filesystems should do that. invalidate_inode_buffers() should just go
- * BUG_ON(!list_empty).
- *
- * FIXME: mark_buffer_dirty_inode() is a data-plane operation. It should
- * take an address_space, not an inode. And it should be called
- * mark_buffer_dirty_fsync() to clearly define why those buffers are being
- * queued up.
- *
- * FIXME: mark_buffer_dirty_inode() doesn't need to add the buffer to the
- * list if it is already on a list. Because if the buffer is on a list,
- * it *must* already be on the right one. If not, the filesystem is being
- * silly. This will save a ton of locking. But first we have to ensure
- * that buffers are taken *off* the old inode's list when they are freed
- * (presumably in truncate). That requires careful auditing of all
- * filesystems (do it inside bforget()). It could also be done by bringing
- * b_inode back.
- */
- /*
- * The buffer's backing address_space's private_lock must be held
- */
- static void __remove_assoc_queue(struct buffer_head *bh)
- {
- list_del_init(&bh->b_assoc_buffers);
- WARN_ON(!bh->b_assoc_map);
- if (buffer_write_io_error(bh))
- set_bit(AS_EIO, &bh->b_assoc_map->flags);
- bh->b_assoc_map = NULL;
- }
- int inode_has_buffers(struct inode *inode)
- {
- return !list_empty(&inode->i_data.private_list);
- }
- /*
- * osync is designed to support O_SYNC io. It waits synchronously for
- * all already-submitted IO to complete, but does not queue any new
- * writes to the disk.
- *
- * To do O_SYNC writes, just queue the buffer writes with ll_rw_block as
- * you dirty the buffers, and then use osync_inode_buffers to wait for
- * completion. Any other dirty buffers which are not yet queued for
- * write will not be flushed to disk by the osync.
- */
- static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
- {
- struct buffer_head *bh;
- struct list_head *p;
- int err = 0;
- spin_lock(lock);
- repeat:
- list_for_each_prev(p, list) {
- bh = BH_ENTRY(p);
- if (buffer_locked(bh)) {
- get_bh(bh);
- spin_unlock(lock);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- err = -EIO;
- brelse(bh);
- spin_lock(lock);
- goto repeat;
- }
- }
- spin_unlock(lock);
- return err;
- }
- static void do_thaw_one(struct super_block *sb, void *unused)
- {
- char b[BDEVNAME_SIZE];
- while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
- printk(KERN_WARNING "Emergency Thaw on %s\n",
- bdevname(sb->s_bdev, b));
- }
- static void do_thaw_all(struct work_struct *work)
- {
- iterate_supers(do_thaw_one, NULL);
- kfree(work);
- printk(KERN_WARNING "Emergency Thaw complete\n");
- }
- /**
- * emergency_thaw_all -- forcibly thaw every frozen filesystem
- *
- * Used for emergency unfreeze of all filesystems via SysRq
- */
- void emergency_thaw_all(void)
- {
- struct work_struct *work;
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (work) {
- INIT_WORK(work, do_thaw_all);
- schedule_work(work);
- }
- }
- /**
- * sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
- * @mapping: the mapping which wants those buffers written
- *
- * Starts I/O against the buffers at mapping->private_list, and waits upon
- * that I/O.
- *
- * Basically, this is a convenience function for fsync().
- * @mapping is a file or directory which needs those buffers to be written for
- * a successful fsync().
- */
- int sync_mapping_buffers(struct address_space *mapping)
- {
- struct address_space *buffer_mapping = mapping->assoc_mapping;
- if (buffer_mapping == NULL || list_empty(&mapping->private_list))
- return 0;
- return fsync_buffers_list(&buffer_mapping->private_lock,
- &mapping->private_list);
- }
- EXPORT_SYMBOL(sync_mapping_buffers);
- /*
- * Called when we've recently written block `bblock', and it is known that
- * `bblock' was for a buffer_boundary() buffer. This means that the block at
- * `bblock + 1' is probably a dirty indirect block. Hunt it down and, if it's
- * dirty, schedule it for IO. So that indirects merge nicely with their data.
- */
- void write_boundary_block(struct block_device *bdev,
- sector_t bblock, unsigned blocksize)
- {
- struct buffer_head *bh = __find_get_block(bdev, bblock + 1, blocksize);
- if (bh) {
- if (buffer_dirty(bh))
- ll_rw_block(WRITE, 1, &bh);
- put_bh(bh);
- }
- }
- void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
- {
- struct address_space *mapping = inode->i_mapping;
- struct address_space *buffer_mapping = bh->b_page->mapping;
- mark_buffer_dirty(bh);
- if (!mapping->assoc_mapping) {
- mapping->assoc_mapping = buffer_mapping;
- } else {
- BUG_ON(mapping->assoc_mapping != buffer_mapping);
- }
- if (!bh->b_assoc_map) {
- spin_lock(&buffer_mapping->private_lock);
- list_move_tail(&bh->b_assoc_buffers,
- &mapping->private_list);
- bh->b_assoc_map = mapping;
- spin_unlock(&buffer_mapping->private_lock);
- }
- }
- EXPORT_SYMBOL(mark_buffer_dirty_inode);
- /*
- * Mark the page dirty, and set it dirty in the radix tree, and mark the inode
- * dirty.
- *
- * If warn is true, then emit a warning if the page is not uptodate and has
- * not been truncated.
- */
- static void __set_page_dirty(struct page *page,
- struct address_space *mapping, int warn)
- {
- spin_lock_irq(&mapping->tree_lock);
- if (page->mapping) { /* Race with truncate? */
- WARN_ON_ONCE(warn && !PageUptodate(page));
- account_page_dirtied(page, mapping);
- radix_tree_tag_set(&mapping->page_tree,
- page_index(page), PAGECACHE_TAG_DIRTY);
- }
- spin_unlock_irq(&mapping->tree_lock);
- __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
- }
- /*
- * Add a page to the dirty page list.
- *
- * It is a sad fact of life that this function is called from several places
- * deeply under spinlocking. It may not sleep.
- *
- * If the page has buffers, the uptodate buffers are set dirty, to preserve
- * dirty-state coherency between the page and the buffers. It the page does
- * not have buffers then when they are later attached they will all be set
- * dirty.
- *
- * The buffers are dirtied before the page is dirtied. There's a small race
- * window in which a writepage caller may see the page cleanness but not the
- * buffer dirtiness. That's fine. If this code were to set the page dirty
- * before the buffers, a concurrent writepage caller could clear the page dirty
- * bit, see a bunch of clean buffers and we'd end up with dirty buffers/clean
- * page on the dirty page list.
- *
- * We use private_lock to lock against try_to_free_buffers while using the
- * page's buffer list. Also use this to protect against clean buffers being
- * added to the page after it was set dirty.
- *
- * FIXME: may need to call ->reservepage here as well. That's rather up to the
- * address_space though.
- */
- int __set_page_dirty_buffers(struct page *page)
- {
- int newly_dirty;
- struct address_space *mapping = page_mapping(page);
- if (unlikely(!mapping))
- return !TestSetPageDirty(page);
- spin_lock(&mapping->private_lock);
- if (page_has_buffers(page)) {
- struct buffer_head *head = page_buffers(page);
- struct buffer_head *bh = head;
- do {
- set_buffer_dirty(bh);
- bh = bh->b_this_page;
- } while (bh != head);
- }
- newly_dirty = !TestSetPageDirty(page);
- spin_unlock(&mapping->private_lock);
- if (newly_dirty)
- __set_page_dirty(page, mapping, 1);
- return newly_dirty;
- }
- EXPORT_SYMBOL(__set_page_dirty_buffers);
- /*
- * Write out and wait upon a list of buffers.
- *
- * We have conflicting pressures: we want to make sure that all
- * initially dirty buffers get waited on, but that any subsequently
- * dirtied buffers don't. After all, we don't want fsync to last
- * forever if somebody is actively writing to the file.
- *
- * Do this in two main stages: first we copy dirty buffers to a
- * temporary inode list, queueing the writes as we go. Then we clean
- * up, waiting for those writes to complete.
- *
- * During this second stage, any subsequent updates to the file may end
- * up refiling the buffer on the original inode's dirty list again, so
- * there is a chance we will end up with a buffer queued for write but
- * not yet completed on that list. So, as a final cleanup we go through
- * the osync code to catch these locked, dirty buffers without requeuing
- * any newly dirty buffers for write.
- */
- static int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
- {
- struct buffer_head *bh;
- struct list_head tmp;
- struct address_space *mapping;
- int err = 0, err2;
- struct blk_plug plug;
- INIT_LIST_HEAD(&tmp);
- blk_start_plug(&plug);
- spin_lock(lock);
- while (!list_empty(list)) {
- bh = BH_ENTRY(list->next);
- mapping = bh->b_assoc_map;
- __remove_assoc_queue(bh);
- /* Avoid race with mark_buffer_dirty_inode() which does
- * a lockless check and we rely on seeing the dirty bit */
- smp_mb();
- if (buffer_dirty(bh) || buffer_locked(bh)) {
- list_add(&bh->b_assoc_buffers, &tmp);
- bh->b_assoc_map = mapping;
- if (buffer_dirty(bh)) {
- get_bh(bh);
- spin_unlock(lock);
- /*
- * Ensure any pending I/O completes so that
- * write_dirty_buffer() actually writes the
- * current contents - it is a noop if I/O is
- * still in flight on potentially older
- * contents.
- */
- write_dirty_buffer(bh, WRITE_SYNC);
- /*
- * Kick off IO for the previous mapping. Note
- * that we will not run the very last mapping,
- * wait_on_buffer() will do that for us
- * through sync_buffer().
- */
- brelse(bh);
- spin_lock(lock);
- }
- }
- }
- spin_unlock(lock);
- blk_finish_plug(&plug);
- spin_lock(lock);
- while (!list_empty(&tmp)) {
- bh = BH_ENTRY(tmp.prev);
- get_bh(bh);
- mapping = bh->b_assoc_map;
- __remove_assoc_queue(bh);
- /* Avoid race with mark_buffer_dirty_inode() which does
- * a lockless check and we rely on seeing the dirty bit */
- smp_mb();
- if (buffer_dirty(bh)) {
- list_add(&bh->b_assoc_buffers,
- &mapping->private_list);
- bh->b_assoc_map = mapping;
- }
- spin_unlock(lock);
- wait_on_buffer(bh);
- if (!buffer_uptodate(bh))
- err = -EIO;
- brelse(bh);
- spin_lock(lock);
- }
-
- spin_unlock(lock);
- err2 = osync_buffers_list(lock, list);
- if (err)
- return err;
- else
- return err2;
- }
- /*
- * Invalidate any and all dirty buffers on a given inode. We are
- * probably unmounting the fs, but that doesn't mean we have already
- * done a sync(). Just drop the buffers from the inode list.
- *
- * NOTE: we take the inode's blockdev's mapping's private_lock. Which
- * assumes that all the buffers are against the blockdev. Not true
- * for reiserfs.
- */
- void invalidate_inode_buffers(struct inode *inode)
- {
- if (inode_has_buffers(inode)) {
- struct address_space *mapping = &inode->i_data;
- struct list_head *list = &mapping->private_list;
- struct address_space *buffer_mapping = mapping->assoc_mapping;
- spin_lock(&buffer_mapping->private_lock);
- while (!list_empty(list))
- __remove_assoc_queue(BH_ENTRY(list->next));
- spin_unlock(&buffer_mapping->private_lock);
- }
- }
- EXPORT_SYMBOL(invalidate_inode_buffers);
- /*
- * Remove any clean buffers from the inode's buffer list. This is called
- * when we're trying to free the inode itself. Those buffers can pin it.
- *
- * Returns true if all buffers were removed.
- */
- int remove_inode_buffers(struct inode *inode)
- {
- int ret = 1;
- if (inode_has_buffers(inode)) {
- struct address_space *mapping = &inode->i_data;
- struct list_head *list = &mapping->private_list;
- struct address_space *buffer_mapping = mapping->assoc_mapping;
- spin_lock(&buffer_mapping->private_lock);
- while (!list_empty(list)) {
- struct buffer_head *bh = BH_ENTRY(list->next);
- if (buffer_dirty(bh)) {
- ret = 0;
- break;
- }
- __remove_assoc_queue(bh);
- }
- spin_unlock(&buffer_mapping->private_lock);
- }
- return ret;
- }
- /*
- * Create the appropriate buffers when given a page for data area and
- * the size of each buffer.. Use the bh->b_this_page linked list to
- * follow the buffers created. Return NULL if unable to create more
- * buffers.
- *
- * The retry flag is used to differentiate async IO (paging, swapping)
- * which may not fail from ordinary buffer allocations.
- */
- struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
- int retry)
- {
- struct buffer_head *bh, *head;
- long offset;
- try_again:
- head = NULL;
- offset = PAGE_SIZE;
- while ((offset -= size) >= 0) {
- bh = alloc_buffer_head(GFP_NOFS);
- if (!bh)
- goto no_grow;
- bh->b_bdev = NULL;
- bh->b_this_page = head;
- bh->b_blocknr = -1;
- head = bh;
- bh->b_state = 0;
- atomic_set(&bh->b_count, 0);
- bh->b_size = size;
- /* Link the buffer to its page */
- set_bh_page(bh, page, offset);
- init_buffer(bh, NULL, NULL);
- }
- return head;
- /*
- * In case anything failed, we just free everything we got.
- */
- no_grow:
- if (head) {
- do {
- bh = head;
- head = head->b_this_page;
- free_buffer_head(bh);
- } while (head);
- }
- /*
- * Return failure for non-async IO requests. Async IO requests
- * are not allowed to fail, so we have to wait until buffer heads
- * become available. But we don't want tasks sleeping with
- * partially complete buffers, so all were released above.
- */
- if (!retry)
- return NULL;
- /* We're _really_ low on memory. Now we just
- * wait for old buffer heads to become free due to
- * finishing IO. Since this is an async request and
- * the reserve list is empty, we're sure there are
- * async buffer heads in use.
- */
- free_more_memory();
- goto try_again;
- }
- EXPORT_SYMBOL_GPL(alloc_page_buffers);
- static inline void
- link_dev_buffers(struct page *page, struct buffer_head *head)
- {
- struct buffer_head *bh, *tail;
- bh = head;
- do {
- tail = bh;
- bh = bh->b_this_page;
- } while (bh);
- tail->b_this_page = head;
- attach_page_buffers(page, head);
- }
- /*
- * Initialise the state of a blockdev page's buffers.
- */
- static void
- init_page_buffers(struct page *page, struct block_device *bdev,
- sector_t block, int size)
- {
- struct buffer_head *head = page_buffers(page);
- struct buffer_head *bh = head;
- int uptodate = PageUptodate(page);
- do {
- if (!buffer_mapped(bh)) {
- init_buffer(bh, NULL, NULL);
- bh->b_bdev = bdev;
- bh->b_blocknr = block;
- if (uptodate)
- set_buffer_uptodate(bh);
- set_buffer_mapped(bh);
- }
- block++;
- bh = bh->b_this_page;
- } while (bh != head);
- }
- /*
- * Create the page-cache page that contains the requested block.
- *
- * This is user purely for blockdev mappings.
- */
- static struct page *
- grow_dev_page(struct block_device *bdev, sector_t block,
- pgoff_t index, int size)
- {
- struct inode *inode = bdev->bd_inode;
- struct page *page;
- struct buffer_head *bh;
- page = find_or_create_page(inode->i_mapping, index,
- (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE);
- if (!page)
- return NULL;
- BUG_ON(!PageLocked(page));
- if (page_has_buffers(page)) {
- bh = page_buffers(page);
- if (bh->b_size == size) {
- init_page_buffers(page, bdev, block, size);
- return page;
- }
- if (!try_to_free_buffers(page))
- goto failed;
- }
- /*
- * Allocate some buffers for this page
- */
- bh = alloc_page_buffers(page, size, 0);
- if (!bh)
- goto failed;
- /*
- * Link the page to the buffers and initialise them. Take the
- * lock to be atomic wrt __find_get_block(), which does not
- * run under the page lock.
- */
- spin_lock(&inode->i_mapping->private_lock);
- link_dev_buffers(page, bh);
- init_page_buffers(page, bdev, block, size);
- spin_unlock(&inode->i_mapping->private_lock);
- return page;
- failed:
- BUG();
- unlock_page(page);
- page_cache_release(page);
- return NULL;
- }
- /*
- * Create buffers for the specified block device block's page. If
- * that page was dirty, the buffers are set dirty also.
- */
- static int
- grow_buffers(struct block_device *bdev, sector_t block, int size)
- {
- struct page *page;
- pgoff_t index;
- int sizebits;
- sizebits = -1;
- do {
- sizebits++;
- } while ((size << sizebits) < PAGE_SIZE);
- index = block >> sizebits;
- /*
- * Check for a block which wants to lie outside our maximum possible
- * pagecache index. (this comparison is done using sector_t types).
- */
- if (unlikely(index != block >> sizebits)) {
- char b[BDEVNAME_SIZE];
- printk(KERN_ERR "%s: requested out-of-range block %llu for "
- "device %s\n",
- __func__, (unsigned long long)block,
- bdevname(bdev, b));
- return -EIO;
- }
- block = index << sizebits;
- /* Create a page with the proper size buffers.. */
- page = grow_dev_page(bdev, block, index, size);
- if (!page)
- return 0;
- unlock_page(page);
- page_cache_release(page);
- return 1;
- }
- static struct buffer_head *
- __getblk_slow(struct block_device *bdev, sector_t block, int size)
- {
- /* Size must be multiple of hard sectorsize */
- if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
- (size < 512 || size > PAGE_SIZE))) {
- printk(KERN_ERR "getblk(): invalid block size %d requested\n",
- size);
- printk(KERN_ERR "logical block size: %d\n",
- bdev_logical_block_size(bdev));
- dump_stack();
- return NULL;
- }
- for (;;) {
- struct buffer_head * bh;
- int ret;
- bh = __find_get_block(bdev, block, size);
- if (bh)
- return bh;
- ret = grow_buffers(bdev, block, size);
- if (ret < 0)
- return NULL;
- if (ret == 0)
- free_more_memory();
- }
- }
- /*
- * The relationship between dirty buffers and dirty pages:
- *
- * Whenever a page has any dirty buffers, the page's dirty bit is set, and
- * the page is tagged dirty in its radix tree.
- *
- * At all times, the dirtiness of the buffers represents the dirtiness of
- * subsections of the page. If the page has buffers, the page dirty bit is
- * merely a hint about the true dirty state.
- *
- * When a page is set dirty in its entirety, all its buffers are marked dirty
- * (if the page has buffers).
- *
- * When a buffer is marked dirty, its page is dirtied, but the page's other
- * buffers are not.
- *
- * Also. When blockdev buffers are explicitly read with bread(), they
- * individually become uptodate. But their backing page remains not
- * uptodate - even if all of its buffers are uptodate. A subsequent
- * block_read_full_page() against that page will discover all the uptodate
- * buffers, will set the page uptodate and will perform no I/O.
- */
- /**
- * mark_buffer_dirty - mark a buffer_head as needing writeout
- * @bh: the buffer_head to mark dirty
- *
- * mark_buffer_dirty() will set the dirty bit against the buffer, then set its
- * backing page dirty, then tag the page as dirty in its address_space's radix
- * tree and then attach the address_space's inode to its superblock's dirty
- * inode list.
- *
- * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock,
- * mapping->tree_lock and mapping->host->i_lock.
- */
- void mark_buffer_dirty(struct buffer_head *bh)
- {
- WARN_ON_ONCE(!buffer_uptodate(bh));
- /*
- * Very *carefully* optimize the it-is-already-dirty case.
- *
- * Don't let the final "is it dirty" escape to before we
- * perhaps modified the buffer.
- */
- if (buffer_dirty(bh)) {
- smp_mb();
- if (buffer_dirty(bh))
- return;
- }
- if (!test_set_buffer_dirty(bh)) {
- struct page *page = bh->b_page;
- if (!TestSetPageDirty(page)) {
- struct address_space *mapping = page_mapping(page);
- if (mapping)
- __set_page_dirty(page, mapping, 0);
- }
- }
- }
- EXPORT_SYMBOL(mark_buffer_dirty);
- /*
- * Decrement a buffer_head's reference count. If all buffers against a page
- * have zero reference count, are clean and unlocked, and if the page is clean
- * and unlocked then try_to_free_buffers() may strip the buffers from the page
- * in preparation for freeing it (sometimes, rarely, buffers are removed from
- * a page but it ends up not being freed, and buffers may later be reattached).
- */
- void __brelse(struct buffer_head * buf)
- {
- if (atomic_read(&buf->b_count)) {
- put_bh(buf);
- return;
- }
- WARN(1, KERN_ERR "VFS: brelse: Trying to free free buffer\n");
- }
- EXPORT_SYMBOL(__brelse);
- /*
- * bforget() is like brelse(), except it discards any
- * potentially dirty data.
- */
- void __bforget(struct buffer_head *bh)
- {
- clear_buffer_dirty(bh);
- if (bh->b_assoc_map) {
- struct address_space *buffer_mapping = bh->b_page->mapping;
- spin_lock(&buffer_mapping->private_lock);
- list_del_init(&bh->b_assoc_buffers);
- bh->b_assoc_map = NULL;
- spin_unlock(&buffer_mapping->private_lock);
- }
- __brelse(bh);
- }
- EXPORT_SYMBOL(__bforget);
- static struct buffer_head *__bread_slow(struct buffer_head *bh)
- {
- lock_buffer(bh);
- if (buffer_uptodate(bh)) {
- unlock_buffer(bh);
- return bh;
- } else {
- get_bh(bh);
- bh->b_end_io = end_buffer_read_sync;
- submit_bh(READ, bh);
- wait_on_buffer(bh);
- if (buffer_uptodate(bh))
- return bh;
- }
- brelse(bh);
- return NULL;
- }
- /*
- * Per-cpu buffer LRU implementation. To reduce the cost of __find_get_block().
- * The bhs[] array is sorted - newest buffer is at bhs[0]. Buffers have their
- * refcount elevated by one when they're in an LRU. A buffer can only appear
- * once in a particular CPU's LRU. A single buffer can be present in multiple
- * CPU's LRUs at the same time.
- *
- * This is a transparent caching front-end to sb_bread(), sb_getblk() and
- * sb_find_get_block().
- *
- * The LRUs themselves only need locking against invalidate_bh_lrus. We use
- * a local interrupt disable for that.
- */
- #define BH_LRU_SIZE 8
- struct bh_lru {
- struct buffer_head *bhs[BH_LRU_SIZE];
- };
- static DEFINE_PER_CPU(struct bh_lru, bh_lrus) = {{ NULL }};
- #ifdef CONFIG_SMP
- #define bh_lru_lock() local_irq_disable()
- #define bh_lru_unlock() local_irq_enable()
- #else
- #define bh_lru_lock() preempt_disable()
- #define bh_lru_unlock() preempt_enable()
- #endif
- static inline void check_irqs_on(void)
- {
- #ifdef irqs_disabled
- BUG_ON(irqs_disabled());
- #endif
- }
- /*
- * The LRU management algorithm is dopey-but-simple. Sorry.
- */
- static void bh_lru_install(struct buffer_head *bh)
- {
- struct buffer_head *evictee = NULL;
- check_irqs_on();
- bh_lru_lock();
- if (__this_cpu_read(bh_lrus.bhs[0]) != bh) {
- struct buffer_head *bhs[BH_LRU_SIZE];
- int in;
- int out = 0;
- get_bh(bh);
- bhs[out++] = bh;
- for (in = 0; in < BH_LRU_SIZE; in++) {
- struct buffer_head *bh2 =
- __this_cpu_read(bh_lrus.bhs[in]);
- if (bh2 == bh) {
- __brelse(bh2);
- } else {
- if (out >= BH_LRU_SIZE) {
- BUG_ON(evictee != NULL);
- evictee = bh2;
- } else {
- bhs[out++] = bh2;
- }
- }
- }
- while (out < BH_LRU_SIZE)
- bhs[out++] = NULL;
- memcpy(__this_cpu_ptr(&bh_lrus.bhs), bhs, sizeof(bhs));
- }
- bh_lru_unlock();
- if (evictee)
- __brelse(evictee);
- }
- /*
- * Look up the bh in this cpu's LRU. If it's there, move it to the head.
- */
- static struct buffer_head *
- lookup_bh_lru(struct block_device *bdev, sector_t block, unsigned size)
- {
- struct buffer_head *ret = NULL;
- unsigned int i;
- check_irqs_on();
- bh_lru_lock();
- for (i = 0; i < BH_LRU_SIZE; i++) {
- struct buffer_head *bh = __this_cpu_read(bh_lrus.bhs[i]);
- if (bh && bh->b_bdev == bdev &&
- bh->b_blocknr == block && bh->b_size == size) {
- if (i) {
- while (i) {
- __this_cpu_write(bh_lrus.bhs[i],
- __this_cpu_read(bh_lrus.bhs[i - 1]));
- i--;
- }
- __this_cpu_write(bh_lrus.bhs[0], bh);
- }
- get_bh(bh);
- ret = bh;
- break;
- }
- }
- bh_lru_unlock();
- return ret;
- }
- /*
- * Perform a pagecache lookup for the matching buffer. If it's there, refresh
- * it in the LRU and mark it as accessed. If it is not present then return
- * NULL
- */
- struct buffer_head *
- __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
- {
- struct buffer_head *bh = lookup_bh_lru(bdev, block, size);
- if (bh == NULL) {
- bh = __find_get_block_slow(bdev, block);
- if (bh)
- bh_lru_install(bh);
- }
- if (bh)
- touch_buffer(bh);
- return bh;
- }
- EXPORT_SYMBOL(__find_get_block);
- /*
- * __getblk will locate (and, if necessary, create) the buffer_head
- * which corresponds to the passed block_device, block and size. The
- * returned buffer has its reference count incremented.
- *
- * __getblk() cannot fail - it just keeps trying. If you pass it an
- * illegal block number, __getblk() will happily return a buffer_head
- * which represents the non-existent block. Very weird.
- *
- * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers()
- * attempt is failing. FIXME, perhaps?
- */
- struct buffer_head *
- __getblk(struct block_device *bdev, sector_t block, unsigned size)
- {
- struct buffer_head *bh = __find_get_block(bdev, block, size);
- might_sleep();
- if (bh == NULL)
- bh = __getblk_slow(bdev, block, size);
- return bh;
- }
- EXPORT_SYMBOL(__getblk);
- /*
- * Do async read-ahead on a buffer..
- */
- void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
- {
- struct buffer_head *bh = __getblk(bdev, block, size);
- if (likely(bh)) {
- ll_rw_block(READA, 1, &bh);
- brelse(bh);
- }
- }
- EXPORT_SYMBOL(__breadahead);
- /**
- * __bread() - reads a specified block and returns the bh
- * @bdev: the block_device to read from
- * @block: number of block
- * @size: size (in bytes) to read
- *
- * Reads a specified block, and returns buffer head that contains it.
- * It returns NULL if the block was unreadable.
- */
- struct buffer_head *
- __bread(struct block_device *bdev, sector_t block, unsigned size)
- {
- struct buffer_head *bh = __getblk(bdev, block, size);
- if (likely(bh) && !buffer_uptodate(bh))
- bh = __bread_slow(bh);
- return bh;
- }
- EXPORT_SYMBOL(__bread);
- /*
- * invalidate_bh_lrus() is called rarely - but not only at unmount.
- * This doesn't race because it runs in each cpu either in irq
- * or with preempt disabled.
- */
- static void invalidate_bh_lru(void *arg)
- {
- struct bh_lru *b = &get_cpu_var(bh_lrus);
- int i;
- for (i = 0; i < BH_LRU_SIZE; i++) {
- brelse(b->bhs[i]);
- b->bhs[i] = NULL;
- }
- put_cpu_var(bh_lrus);
- }
-
- void invalidate_bh_lrus(void)
- {
- on_each_cpu(invalidate_bh_lru, NULL, 1);
- }
- EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
- void set_bh_page(struct buffer_head *bh,
- struct page *page, unsigned long offset)
- {
- bh->b_page = page;
- BUG_ON(offset >= PAGE_SIZE);
- if (PageHighMem(page))
- /*
- * This catches illegal uses and preserves the offset:
- */
- bh->b_data = (char *)(0 + offset);
- else
- bh->b_data = page_address(page) + offset;
- }
- EXPORT_SYMBOL(set_bh_page);
- /*
- * Called when truncating a buffer on a page completely.
- */
- static void discard_buffer(struct buffer_head * bh)
- {
- lock_buffer(bh);
- clear_buffer_dirty(bh);
- bh->b_bdev = NULL;
- clear_buffer_mapped(bh);
- clear_buffer_req(bh);
- clear_buffer_new(bh);
- clear_buffer_delay(bh);
- clear_buffer_unwritten(bh);
- unlock_buffer(bh);
- }
- /**
- * block_invalidatepage - invalidate part of all of a buffer-backed page
- *
- * @page: the page which is affected
- * @offset: the index of the truncation point
- *
- * block_invalidatepage() is called when all or part of the page has become
- * invalidatedby a truncate operation.
- *
- * block_invalidatepage() does not have to release all buffers, but it must
- * ensure that no dirty buffer is left outside @offset and that no I/O
- * is underway against any of the blocks which are outside the truncation
- * point. Because the caller is about to free (and possibly reuse) those
- * blocks on-disk.
- */
- void block_invalidatepage(struct page *page, unsigned long offset)
- {
- struct buffer_head *head, *bh, *next;
- unsigned int curr_off = 0;
- BUG_ON(!PageLocked(page));
- if (!page_has_buffers(page))
- goto out;
- head = page_buffers(page);
- bh = head;
- do {
- unsigned int next_off = curr_off + bh->b_size;
- next = bh->b_this_page;
- /*
- * is this block fully invalidated?
- */
- if (offset <= curr_off)
- discard_buffer(bh);
- curr_off = next_off;
- bh = next;
- } while (bh != head);
- /*
- * We release buffers only if the entire page is being invalidated.
- * The get_block cached value has been unconditionally invalidated,
- * so real IO is not possible anymore.
- */
- if (offset == 0)
- try_to_release_page(page, 0);
- out:
- return;
- }
- EXPORT_SYMBOL(block_invalidatepage);
- /*
- * We attach and possibly dirty the buffers atomically wrt
- * __set_page_dirty_buffers() via private_lock. try_to_free_buffers
- * is already excluded via the page lock.
- */
- void create_empty_buffers(struct page *page,
- unsigned long blocksize, unsigned long b_state)
- {
- struct buffer_head *bh, *head, *tail;
- head = alloc_page_buffers(page, blocksize, 1);
- bh = head;
- do {
- bh->b_state |= b_state;
- tail = bh;
- bh = bh->b_this_page;
- } while (bh);
- tail->b_this_page = head;
- spin_lock(&page->mapping->private_lock);
- if (PageUptodate(page) || PageDirty(page)) {
- bh = head;
- do {
- if (PageDirty(page))
- set_buffer_dirty(bh);
- if (PageUptodate(page))
- set_buffer_uptodate(bh);
- bh = bh->b_this_page;
- } while (bh != head);
- }
- attach_page_buffers(page, head);
- spin_unlock(&page->mapping->private_lock);
- }
- EXPORT_SYMBOL(create_empty_buffers);
- /*
- * We are taking a block for data and we don't want any output from any
- * buffer-cache aliases starting from return from that function and
- * until the moment when something will explicitly mark the buffer
- * dirty (hopefully that will not happen until we will free that block ;-)
- * We don't even need to mark it not-uptodate - nobody can expect
- * anything from a newly allocated buffer anyway. We used to used
- * unmap_buffer() for such invalidation, but that was wrong. We definitely
- * don't want to mark the alias unmapped, for example - it would confuse
- * anyone who might pick it with bread() afterwards...
- *
- * Also.. Note that bforget() doesn't lock the buffer. So there can
- * be writeout I/O going on against recently-freed buffers. We don't
- * wait on that I/O in bforget() - it's more efficient to wait on the I/O
- * only if we really need to. That happens here.
- */
- void unmap_underlying_metadata(struct block_device *bdev, sector_t block)
- {
- struct buffer_head *old_bh;
- might_sleep();
- old_bh = __find_get_block_slow(bdev, block);
- if (old_bh) {
- clear_buffer_dirty(old_bh);
- wait_on_buffer(old_bh);
- clear_buffer_req(old_bh);
- __brelse(old_bh);
- }
- }
- EXPORT_SYMBOL(unmap_underlying_metadata);
- /*
- * NOTE! All mapped/uptodate combinations are valid:
- *
- * Mapped Uptodate Meaning
- *
- * No No "unknown" - must do get_block()
- * No Yes "hole" - zero-filled
- * Yes No "allocated" - allocated on disk, not read in
- * Yes Yes "valid" - allocated and up-to-date in memory.
- *
- * "Dirty" is valid only with the last case (mapped+uptodate).
- */
- /*
- * While block_write_full_page is writing back the dirty buffers under
- * the page lock, whoever dirtied the buffers may decide to clean them
- * again at any time. We handle that by only looking at the buffer
- * state inside lock_buffer().
- *
- * If block_write_full_page() is called for regular writeback
- * (wbc->sync_mode == WB_SYNC_NONE) then it will redirty a page which has a
- * locked buffer. This only can happen if someone has written the buffer
- * directly, with submit_bh(). At the address_space level PageWriteback
- * prevents this contention from occurring.
- *
- * If block_write_full_page() is called with wbc->sync_mode ==
- * WB_SYNC_ALL, the writes are posted using WRITE_SYNC; this
- * causes the writes to be flagged as synchronous writes.
- */
- static int __block_write_full_page(struct inode *inode, struct page *page,
- get_block_t *get_block, struct writeback_control *wbc,
- bh_end_io_t *handler)
- {
- int err;
- sector_t block;
- sector_t last_block;
- struct buffer_head *bh, *head;
- const unsigned blocksize = 1 << inode->i_blkbits;
- int nr_underway = 0;
- int write_op = (wbc->sync_mode == WB_SYNC_ALL ?
- WRITE_SYNC : WRITE);
- BUG_ON(!PageLocked(page));
- last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
- if (!page_has_buffers(page)) {
- create_empty_buffers(page, blocksize,
- (1 << BH_Dirty)|(1 << BH_Uptodate));
- }
- /*
- * Be very careful. We have no exclusion from __set_page_dirty_buffers
- * here, and the (potentially unmapped) buffers may become dirty at
- * any time. If a buffer becomes dirty here after we've inspected it
- * then we just miss that fact, and the page stays dirty.
- *
- * Buffers outside i_size may be dirtied by __set_page_dirty_buffers;
- * handle that here by just cleaning them.
- */
- block = (sector_t)page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
- head = page_buffers(page);
- bh = head;
- /*
- * Get all the dirty buffers mapped to disk addresses and
- * handle any aliases from the underlying blockdev's mapping.
- */
- do {
- if (block > last_block) {
- /*
- * mapped buffers outside i_size will occur, because
- * this page can be outside i_size when there is a
- * truncate in progress.
- */
- /*
- * The buffer was zeroed by block_write_full_page()
- */
- clear_buffer_dirty(bh);
- set_buffer_uptodate(bh);
- } else if ((!buffer_mapped(bh) || buffer_delay(bh)) &&
- buffer_dirty(bh)) {
- WARN_ON(bh->b_size != blocksize);
- err = get_block(inode, block, bh, 1);
- if (err)
- goto recover;
- clear_buffer_delay(bh);
- if (buffer_new(bh)) {
- /* blockdev mappings never come here */
- clear_buffer_new(bh);
- unmap_underlying_metadata(bh->b_bdev,
- bh->b_blocknr);
- }
- }
- bh = bh->b_this_page;
- block++;
- } while (bh != head);
- do {
- if (!buffer_mapped(bh))
- continue;
- /*
- * If it's a fully non-blocking write attempt and we cannot
- * lock the buffer then redirty the page. Note that this can
- * potentially cause a busy-wait loop from writeback threads
- * and kswapd activity, but those code paths have their own
- * higher-level throttling.
- */
- if (wbc->sync_mode != WB_SYNC_NONE) {
- lock_buffer(bh);
- } else if (!trylock_buffer(bh)) {
- redirty_page_for_writepage(wbc, page);
- continue;
- }
- if (test_clear_buffer_dirty(bh)) {
- mark_buffer_async_write_endio(bh, handler);
- } else {
- unlock_buffer(bh);
- }
- } while ((bh = bh->b_this_page) != head);
- /*
- * The page and its buffers are protected by PageWriteback(), so we can
- * drop the bh refcounts early.
- */
- BUG_ON(PageWriteback(page));
- set_page_writeback(page);
- do {
- struct buffer_head *next = bh->b_this_page;
- if (buffer_async_write(bh)) {
- submit_bh(write_op, bh);
- nr_underway++;
- }
- bh = next;
- } while (bh != head);
- unlock_page(page);
- err = 0;
- done:
- if (nr_underway == 0) {
- /*
- * The page was marked dirty, but the buffers were
- * clean. Someone wrote them back by hand with
- * ll_rw_block/submit_bh. A rare case.
- */
- end_page_writeback(page);
- /*
- * The page and buffer_heads can be released at any time from
- * here on.
- */
- }
- return err;
- recover:
- /*
- * ENOSPC, or some other error. We may already have added some
- * blocks to the file, so we need to write these out to avoid
- * exposing stale data.
- * The page is currently locked and not marked for writeback
- */
- bh = head;
- /* Recovery: lock and submit the mapped buffers */
- do {
- if (buffer_mapped(bh) && buffer_dirty(bh) &&
- !buffer_delay(bh)) {
- lock_buffer(bh);
- mark_buffer_async_write_endio(bh, handler);
- } else {
- /*
- * The buffer may have been set dirty during
- * attachment to a dirty page.
- */
- clear_buffer_dirty(bh);
- }
- } while ((bh = bh->b_this_page) != head);
- SetPageError(page);
- BUG_ON(PageWriteback(page));
- mapping_set_error(page->mapping, err);
- set_page_writeback(page);
- do {
- struct buffer_head *next = bh->b_this_page;
- if (buffer_async_write(bh)) {
- clear_buffer_dirty(bh);
- submit_bh(write_op, bh);
- nr_underway++;
- }
- bh = next;
- } while (bh != head);
- unlock_page(page);
- goto done;
- }
- /*
- * If a page has any new buffers, zero them out here, and mark them uptodate
- * and dirty so they'll be written out (in order to prevent uninitialised
- * block data from leaking). And clear the new bit.
- */
- void page_zero_new_buffers(struct page *page, unsigned from, unsigned to)
- {
- unsigned int block_start, block_end;
- struct buffer_head *head, *bh