/fs/btrfs/extent_io.c
C | 2015 lines | 1528 code | 210 blank | 277 comment | 338 complexity | d75ea07dcfe2865ddc06de1a6ffb9a26 MD5 | raw file
- #include <linux/bitops.h>
- #include <linux/slab.h>
- #include <linux/bio.h>
- #include <linux/mm.h>
- #include <linux/pagemap.h>
- #include <linux/page-flags.h>
- #include <linux/module.h>
- #include <linux/spinlock.h>
- #include <linux/blkdev.h>
- #include <linux/swap.h>
- #include <linux/writeback.h>
- #include <linux/pagevec.h>
- #include <linux/prefetch.h>
- #include <linux/cleancache.h>
- #include "extent_io.h"
- #include "extent_map.h"
- #include "compat.h"
- #include "ctree.h"
- #include "btrfs_inode.h"
- static struct kmem_cache *extent_state_cache;
- static struct kmem_cache *extent_buffer_cache;
- static LIST_HEAD(buffers);
- static LIST_HEAD(states);
- #define LEAK_DEBUG 0
- #if LEAK_DEBUG
- static DEFINE_SPINLOCK(leak_lock);
- #endif
- #define BUFFER_LRU_MAX 64
- struct tree_entry {
- u64 start;
- u64 end;
- struct rb_node rb_node;
- };
- struct extent_page_data {
- struct bio *bio;
- struct extent_io_tree *tree;
- get_extent_t *get_extent;
- /* tells writepage not to lock the state bits for this range
- * it still does the unlocking
- */
- unsigned int extent_locked:1;
- /* tells the submit_bio code to use a WRITE_SYNC */
- unsigned int sync_io:1;
- };
- int __init extent_io_init(void)
- {
- extent_state_cache = kmem_cache_create("extent_state",
- sizeof(struct extent_state), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
- if (!extent_state_cache)
- return -ENOMEM;
- extent_buffer_cache = kmem_cache_create("extent_buffers",
- sizeof(struct extent_buffer), 0,
- SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
- if (!extent_buffer_cache)
- goto free_state_cache;
- return 0;
- free_state_cache:
- kmem_cache_destroy(extent_state_cache);
- return -ENOMEM;
- }
- void extent_io_exit(void)
- {
- struct extent_state *state;
- struct extent_buffer *eb;
- while (!list_empty(&states)) {
- state = list_entry(states.next, struct extent_state, leak_list);
- printk(KERN_ERR "btrfs state leak: start %llu end %llu "
- "state %lu in tree %p refs %d\n",
- (unsigned long long)state->start,
- (unsigned long long)state->end,
- state->state, state->tree, atomic_read(&state->refs));
- list_del(&state->leak_list);
- kmem_cache_free(extent_state_cache, state);
- }
- while (!list_empty(&buffers)) {
- eb = list_entry(buffers.next, struct extent_buffer, leak_list);
- printk(KERN_ERR "btrfs buffer leak start %llu len %lu "
- "refs %d\n", (unsigned long long)eb->start,
- eb->len, atomic_read(&eb->refs));
- list_del(&eb->leak_list);
- kmem_cache_free(extent_buffer_cache, eb);
- }
- if (extent_state_cache)
- kmem_cache_destroy(extent_state_cache);
- if (extent_buffer_cache)
- kmem_cache_destroy(extent_buffer_cache);
- }
- void extent_io_tree_init(struct extent_io_tree *tree,
- struct address_space *mapping)
- {
- tree->state = RB_ROOT;
- INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC);
- tree->ops = NULL;
- tree->dirty_bytes = 0;
- spin_lock_init(&tree->lock);
- spin_lock_init(&tree->buffer_lock);
- tree->mapping = mapping;
- }
- static struct extent_state *alloc_extent_state(gfp_t mask)
- {
- struct extent_state *state;
- #if LEAK_DEBUG
- unsigned long flags;
- #endif
- state = kmem_cache_alloc(extent_state_cache, mask);
- if (!state)
- return state;
- state->state = 0;
- state->private = 0;
- state->tree = NULL;
- #if LEAK_DEBUG
- spin_lock_irqsave(&leak_lock, flags);
- list_add(&state->leak_list, &states);
- spin_unlock_irqrestore(&leak_lock, flags);
- #endif
- atomic_set(&state->refs, 1);
- init_waitqueue_head(&state->wq);
- return state;
- }
- void free_extent_state(struct extent_state *state)
- {
- if (!state)
- return;
- if (atomic_dec_and_test(&state->refs)) {
- #if LEAK_DEBUG
- unsigned long flags;
- #endif
- WARN_ON(state->tree);
- #if LEAK_DEBUG
- spin_lock_irqsave(&leak_lock, flags);
- list_del(&state->leak_list);
- spin_unlock_irqrestore(&leak_lock, flags);
- #endif
- kmem_cache_free(extent_state_cache, state);
- }
- }
- static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
- struct rb_node *node)
- {
- struct rb_node **p = &root->rb_node;
- struct rb_node *parent = NULL;
- struct tree_entry *entry;
- while (*p) {
- parent = *p;
- entry = rb_entry(parent, struct tree_entry, rb_node);
- if (offset < entry->start)
- p = &(*p)->rb_left;
- else if (offset > entry->end)
- p = &(*p)->rb_right;
- else
- return parent;
- }
- entry = rb_entry(node, struct tree_entry, rb_node);
- rb_link_node(node, parent, p);
- rb_insert_color(node, root);
- return NULL;
- }
- static struct rb_node *__etree_search(struct extent_io_tree *tree, u64 offset,
- struct rb_node **prev_ret,
- struct rb_node **next_ret)
- {
- struct rb_root *root = &tree->state;
- struct rb_node *n = root->rb_node;
- struct rb_node *prev = NULL;
- struct rb_node *orig_prev = NULL;
- struct tree_entry *entry;
- struct tree_entry *prev_entry = NULL;
- while (n) {
- entry = rb_entry(n, struct tree_entry, rb_node);
- prev = n;
- prev_entry = entry;
- if (offset < entry->start)
- n = n->rb_left;
- else if (offset > entry->end)
- n = n->rb_right;
- else
- return n;
- }
- if (prev_ret) {
- orig_prev = prev;
- while (prev && offset > prev_entry->end) {
- prev = rb_next(prev);
- prev_entry = rb_entry(prev, struct tree_entry, rb_node);
- }
- *prev_ret = prev;
- prev = orig_prev;
- }
- if (next_ret) {
- prev_entry = rb_entry(prev, struct tree_entry, rb_node);
- while (prev && offset < prev_entry->start) {
- prev = rb_prev(prev);
- prev_entry = rb_entry(prev, struct tree_entry, rb_node);
- }
- *next_ret = prev;
- }
- return NULL;
- }
- static inline struct rb_node *tree_search(struct extent_io_tree *tree,
- u64 offset)
- {
- struct rb_node *prev = NULL;
- struct rb_node *ret;
- ret = __etree_search(tree, offset, &prev, NULL);
- if (!ret)
- return prev;
- return ret;
- }
- static void merge_cb(struct extent_io_tree *tree, struct extent_state *new,
- struct extent_state *other)
- {
- if (tree->ops && tree->ops->merge_extent_hook)
- tree->ops->merge_extent_hook(tree->mapping->host, new,
- other);
- }
- /*
- * utility function to look for merge candidates inside a given range.
- * Any extents with matching state are merged together into a single
- * extent in the tree. Extents with EXTENT_IO in their state field
- * are not merged because the end_io handlers need to be able to do
- * operations on them without sleeping (or doing allocations/splits).
- *
- * This should be called with the tree lock held.
- */
- static void merge_state(struct extent_io_tree *tree,
- struct extent_state *state)
- {
- struct extent_state *other;
- struct rb_node *other_node;
- if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY))
- return;
- other_node = rb_prev(&state->rb_node);
- if (other_node) {
- other = rb_entry(other_node, struct extent_state, rb_node);
- if (other->end == state->start - 1 &&
- other->state == state->state) {
- merge_cb(tree, state, other);
- state->start = other->start;
- other->tree = NULL;
- rb_erase(&other->rb_node, &tree->state);
- free_extent_state(other);
- }
- }
- other_node = rb_next(&state->rb_node);
- if (other_node) {
- other = rb_entry(other_node, struct extent_state, rb_node);
- if (other->start == state->end + 1 &&
- other->state == state->state) {
- merge_cb(tree, state, other);
- state->end = other->end;
- other->tree = NULL;
- rb_erase(&other->rb_node, &tree->state);
- free_extent_state(other);
- }
- }
- }
- static void set_state_cb(struct extent_io_tree *tree,
- struct extent_state *state, int *bits)
- {
- if (tree->ops && tree->ops->set_bit_hook)
- tree->ops->set_bit_hook(tree->mapping->host, state, bits);
- }
- static void clear_state_cb(struct extent_io_tree *tree,
- struct extent_state *state, int *bits)
- {
- if (tree->ops && tree->ops->clear_bit_hook)
- tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
- }
- static void set_state_bits(struct extent_io_tree *tree,
- struct extent_state *state, int *bits);
- /*
- * insert an extent_state struct into the tree. 'bits' are set on the
- * struct before it is inserted.
- *
- * This may return -EEXIST if the extent is already there, in which case the
- * state struct is freed.
- *
- * The tree lock is not taken internally. This is a utility function and
- * probably isn't what you want to call (see set/clear_extent_bit).
- */
- static int insert_state(struct extent_io_tree *tree,
- struct extent_state *state, u64 start, u64 end,
- int *bits)
- {
- struct rb_node *node;
- if (end < start) {
- printk(KERN_ERR "btrfs end < start %llu %llu\n",
- (unsigned long long)end,
- (unsigned long long)start);
- WARN_ON(1);
- }
- state->start = start;
- state->end = end;
- set_state_bits(tree, state, bits);
- node = tree_insert(&tree->state, end, &state->rb_node);
- if (node) {
- struct extent_state *found;
- found = rb_entry(node, struct extent_state, rb_node);
- printk(KERN_ERR "btrfs found node %llu %llu on insert of "
- "%llu %llu\n", (unsigned long long)found->start,
- (unsigned long long)found->end,
- (unsigned long long)start, (unsigned long long)end);
- return -EEXIST;
- }
- state->tree = tree;
- merge_state(tree, state);
- return 0;
- }
- static void split_cb(struct extent_io_tree *tree, struct extent_state *orig,
- u64 split)
- {
- if (tree->ops && tree->ops->split_extent_hook)
- tree->ops->split_extent_hook(tree->mapping->host, orig, split);
- }
- /*
- * split a given extent state struct in two, inserting the preallocated
- * struct 'prealloc' as the newly created second half. 'split' indicates an
- * offset inside 'orig' where it should be split.
- *
- * Before calling,
- * the tree has 'orig' at [orig->start, orig->end]. After calling, there
- * are two extent state structs in the tree:
- * prealloc: [orig->start, split - 1]
- * orig: [ split, orig->end ]
- *
- * The tree locks are not taken by this function. They need to be held
- * by the caller.
- */
- static int split_state(struct extent_io_tree *tree, struct extent_state *orig,
- struct extent_state *prealloc, u64 split)
- {
- struct rb_node *node;
- split_cb(tree, orig, split);
- prealloc->start = orig->start;
- prealloc->end = split - 1;
- prealloc->state = orig->state;
- orig->start = split;
- node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
- if (node) {
- free_extent_state(prealloc);
- return -EEXIST;
- }
- prealloc->tree = tree;
- return 0;
- }
- /*
- * utility function to clear some bits in an extent state struct.
- * it will optionally wake up any one waiting on this state (wake == 1), or
- * forcibly remove the state from the tree (delete == 1).
- *
- * If no bits are set on the state struct after clearing things, the
- * struct is freed and removed from the tree
- */
- static int clear_state_bit(struct extent_io_tree *tree,
- struct extent_state *state,
- int *bits, int wake)
- {
- int bits_to_clear = *bits & ~EXTENT_CTLBITS;
- int ret = state->state & bits_to_clear;
- if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
- u64 range = state->end - state->start + 1;
- WARN_ON(range > tree->dirty_bytes);
- tree->dirty_bytes -= range;
- }
- clear_state_cb(tree, state, bits);
- state->state &= ~bits_to_clear;
- if (wake)
- wake_up(&state->wq);
- if (state->state == 0) {
- if (state->tree) {
- rb_erase(&state->rb_node, &tree->state);
- state->tree = NULL;
- free_extent_state(state);
- } else {
- WARN_ON(1);
- }
- } else {
- merge_state(tree, state);
- }
- return ret;
- }
- static struct extent_state *
- alloc_extent_state_atomic(struct extent_state *prealloc)
- {
- if (!prealloc)
- prealloc = alloc_extent_state(GFP_ATOMIC);
- return prealloc;
- }
- /*
- * clear some bits on a range in the tree. This may require splitting
- * or inserting elements in the tree, so the gfp mask is used to
- * indicate which allocations or sleeping are allowed.
- *
- * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
- * the given range from the tree regardless of state (ie for truncate).
- *
- * the range [start, end] is inclusive.
- *
- * This takes the tree lock, and returns < 0 on error, > 0 if any of the
- * bits were already set, or zero if none of the bits were already set.
- */
- int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int wake, int delete,
- struct extent_state **cached_state,
- gfp_t mask)
- {
- struct extent_state *state;
- struct extent_state *cached;
- struct extent_state *prealloc = NULL;
- struct rb_node *next_node;
- struct rb_node *node;
- u64 last_end;
- int err;
- int set = 0;
- int clear = 0;
- if (delete)
- bits |= ~EXTENT_CTLBITS;
- bits |= EXTENT_FIRST_DELALLOC;
- if (bits & (EXTENT_IOBITS | EXTENT_BOUNDARY))
- clear = 1;
- again:
- if (!prealloc && (mask & __GFP_WAIT)) {
- prealloc = alloc_extent_state(mask);
- if (!prealloc)
- return -ENOMEM;
- }
- spin_lock(&tree->lock);
- if (cached_state) {
- cached = *cached_state;
- if (clear) {
- *cached_state = NULL;
- cached_state = NULL;
- }
- if (cached && cached->tree && cached->start <= start &&
- cached->end > start) {
- if (clear)
- atomic_dec(&cached->refs);
- state = cached;
- goto hit_next;
- }
- if (clear)
- free_extent_state(cached);
- }
- /*
- * this search will find the extents that end after
- * our range starts
- */
- node = tree_search(tree, start);
- if (!node)
- goto out;
- state = rb_entry(node, struct extent_state, rb_node);
- hit_next:
- if (state->start > end)
- goto out;
- WARN_ON(state->end < start);
- last_end = state->end;
- /*
- * | ---- desired range ---- |
- * | state | or
- * | ------------- state -------------- |
- *
- * We need to split the extent we found, and may flip
- * bits on second half.
- *
- * If the extent we found extends past our range, we
- * just split and search again. It'll get split again
- * the next time though.
- *
- * If the extent we found is inside our range, we clear
- * the desired bit on it.
- */
- if (state->start < start) {
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
- err = split_state(tree, state, prealloc, start);
- BUG_ON(err == -EEXIST);
- prealloc = NULL;
- if (err)
- goto out;
- if (state->end <= end) {
- set |= clear_state_bit(tree, state, &bits, wake);
- if (last_end == (u64)-1)
- goto out;
- start = last_end + 1;
- }
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state |
- * We need to split the extent, and clear the bit
- * on the first half
- */
- if (state->start <= end && state->end > end) {
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
- err = split_state(tree, state, prealloc, end + 1);
- BUG_ON(err == -EEXIST);
- if (wake)
- wake_up(&state->wq);
- set |= clear_state_bit(tree, prealloc, &bits, wake);
- prealloc = NULL;
- goto out;
- }
- if (state->end < end && prealloc && !need_resched())
- next_node = rb_next(&state->rb_node);
- else
- next_node = NULL;
- set |= clear_state_bit(tree, state, &bits, wake);
- if (last_end == (u64)-1)
- goto out;
- start = last_end + 1;
- if (start <= end && next_node) {
- state = rb_entry(next_node, struct extent_state,
- rb_node);
- if (state->start == start)
- goto hit_next;
- }
- goto search_again;
- out:
- spin_unlock(&tree->lock);
- if (prealloc)
- free_extent_state(prealloc);
- return set;
- search_again:
- if (start > end)
- goto out;
- spin_unlock(&tree->lock);
- if (mask & __GFP_WAIT)
- cond_resched();
- goto again;
- }
- static int wait_on_state(struct extent_io_tree *tree,
- struct extent_state *state)
- __releases(tree->lock)
- __acquires(tree->lock)
- {
- DEFINE_WAIT(wait);
- prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
- spin_unlock(&tree->lock);
- schedule();
- spin_lock(&tree->lock);
- finish_wait(&state->wq, &wait);
- return 0;
- }
- /*
- * waits for one or more bits to clear on a range in the state tree.
- * The range [start, end] is inclusive.
- * The tree lock is taken by this function
- */
- int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits)
- {
- struct extent_state *state;
- struct rb_node *node;
- spin_lock(&tree->lock);
- again:
- while (1) {
- /*
- * this search will find all the extents that end after
- * our range starts
- */
- node = tree_search(tree, start);
- if (!node)
- break;
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start > end)
- goto out;
- if (state->state & bits) {
- start = state->start;
- atomic_inc(&state->refs);
- wait_on_state(tree, state);
- free_extent_state(state);
- goto again;
- }
- start = state->end + 1;
- if (start > end)
- break;
- cond_resched_lock(&tree->lock);
- }
- out:
- spin_unlock(&tree->lock);
- return 0;
- }
- static void set_state_bits(struct extent_io_tree *tree,
- struct extent_state *state,
- int *bits)
- {
- int bits_to_set = *bits & ~EXTENT_CTLBITS;
- set_state_cb(tree, state, bits);
- if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
- u64 range = state->end - state->start + 1;
- tree->dirty_bytes += range;
- }
- state->state |= bits_to_set;
- }
- static void cache_state(struct extent_state *state,
- struct extent_state **cached_ptr)
- {
- if (cached_ptr && !(*cached_ptr)) {
- if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) {
- *cached_ptr = state;
- atomic_inc(&state->refs);
- }
- }
- }
- static void uncache_state(struct extent_state **cached_ptr)
- {
- if (cached_ptr && (*cached_ptr)) {
- struct extent_state *state = *cached_ptr;
- *cached_ptr = NULL;
- free_extent_state(state);
- }
- }
- /*
- * set some bits on a range in the tree. This may require allocations or
- * sleeping, so the gfp mask is used to indicate what is allowed.
- *
- * If any of the exclusive bits are set, this will fail with -EEXIST if some
- * part of the range already has the desired bits set. The start of the
- * existing range is returned in failed_start in this case.
- *
- * [start, end] is inclusive This takes the tree lock.
- */
- int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int exclusive_bits, u64 *failed_start,
- struct extent_state **cached_state, gfp_t mask)
- {
- struct extent_state *state;
- struct extent_state *prealloc = NULL;
- struct rb_node *node;
- int err = 0;
- u64 last_start;
- u64 last_end;
- bits |= EXTENT_FIRST_DELALLOC;
- again:
- if (!prealloc && (mask & __GFP_WAIT)) {
- prealloc = alloc_extent_state(mask);
- BUG_ON(!prealloc);
- }
- spin_lock(&tree->lock);
- if (cached_state && *cached_state) {
- state = *cached_state;
- if (state->start <= start && state->end > start &&
- state->tree) {
- node = &state->rb_node;
- goto hit_next;
- }
- }
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(tree, start);
- if (!node) {
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
- err = insert_state(tree, prealloc, start, end, &bits);
- prealloc = NULL;
- BUG_ON(err == -EEXIST);
- goto out;
- }
- state = rb_entry(node, struct extent_state, rb_node);
- hit_next:
- last_start = state->start;
- last_end = state->end;
- /*
- * | ---- desired range ---- |
- * | state |
- *
- * Just lock what we found and keep going
- */
- if (state->start == start && state->end <= end) {
- struct rb_node *next_node;
- if (state->state & exclusive_bits) {
- *failed_start = state->start;
- err = -EEXIST;
- goto out;
- }
- set_state_bits(tree, state, &bits);
- cache_state(state, cached_state);
- merge_state(tree, state);
- if (last_end == (u64)-1)
- goto out;
- start = last_end + 1;
- next_node = rb_next(&state->rb_node);
- if (next_node && start < end && prealloc && !need_resched()) {
- state = rb_entry(next_node, struct extent_state,
- rb_node);
- if (state->start == start)
- goto hit_next;
- }
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state |
- * or
- * | ------------- state -------------- |
- *
- * We need to split the extent we found, and may flip bits on
- * second half.
- *
- * If the extent we found extends past our
- * range, we just split and search again. It'll get split
- * again the next time though.
- *
- * If the extent we found is inside our range, we set the
- * desired bit on it.
- */
- if (state->start < start) {
- if (state->state & exclusive_bits) {
- *failed_start = start;
- err = -EEXIST;
- goto out;
- }
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
- err = split_state(tree, state, prealloc, start);
- BUG_ON(err == -EEXIST);
- prealloc = NULL;
- if (err)
- goto out;
- if (state->end <= end) {
- set_state_bits(tree, state, &bits);
- cache_state(state, cached_state);
- merge_state(tree, state);
- if (last_end == (u64)-1)
- goto out;
- start = last_end + 1;
- }
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state | or | state |
- *
- * There's a hole, we need to insert something in it and
- * ignore the extent we found.
- */
- if (state->start > start) {
- u64 this_end;
- if (end < last_start)
- this_end = end;
- else
- this_end = last_start - 1;
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
- /*
- * Avoid to free 'prealloc' if it can be merged with
- * the later extent.
- */
- err = insert_state(tree, prealloc, start, this_end,
- &bits);
- BUG_ON(err == -EEXIST);
- if (err) {
- free_extent_state(prealloc);
- prealloc = NULL;
- goto out;
- }
- cache_state(prealloc, cached_state);
- prealloc = NULL;
- start = this_end + 1;
- goto search_again;
- }
- /*
- * | ---- desired range ---- |
- * | state |
- * We need to split the extent, and set the bit
- * on the first half
- */
- if (state->start <= end && state->end > end) {
- if (state->state & exclusive_bits) {
- *failed_start = start;
- err = -EEXIST;
- goto out;
- }
- prealloc = alloc_extent_state_atomic(prealloc);
- BUG_ON(!prealloc);
- err = split_state(tree, state, prealloc, end + 1);
- BUG_ON(err == -EEXIST);
- set_state_bits(tree, prealloc, &bits);
- cache_state(prealloc, cached_state);
- merge_state(tree, prealloc);
- prealloc = NULL;
- goto out;
- }
- goto search_again;
- out:
- spin_unlock(&tree->lock);
- if (prealloc)
- free_extent_state(prealloc);
- return err;
- search_again:
- if (start > end)
- goto out;
- spin_unlock(&tree->lock);
- if (mask & __GFP_WAIT)
- cond_resched();
- goto again;
- }
- /* wrappers around set/clear extent bit */
- int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
- NULL, mask);
- }
- int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, gfp_t mask)
- {
- return set_extent_bit(tree, start, end, bits, 0, NULL,
- NULL, mask);
- }
- int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, gfp_t mask)
- {
- return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
- }
- int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached_state, gfp_t mask)
- {
- return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
- 0, NULL, cached_state, mask);
- }
- int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return clear_extent_bit(tree, start, end,
- EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING, 0, 0, NULL, mask);
- }
- int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
- NULL, mask);
- }
- int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached_state, gfp_t mask)
- {
- return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0,
- NULL, cached_state, mask);
- }
- static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state,
- gfp_t mask)
- {
- return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
- cached_state, mask);
- }
- /*
- * either insert or lock state struct between start and end use mask to tell
- * us if waiting is desired.
- */
- int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, struct extent_state **cached_state, gfp_t mask)
- {
- int err;
- u64 failed_start;
- while (1) {
- err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
- EXTENT_LOCKED, &failed_start,
- cached_state, mask);
- if (err == -EEXIST && (mask & __GFP_WAIT)) {
- wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
- start = failed_start;
- } else {
- break;
- }
- WARN_ON(start > end);
- }
- return err;
- }
- int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
- {
- return lock_extent_bits(tree, start, end, 0, NULL, mask);
- }
- int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
- gfp_t mask)
- {
- int err;
- u64 failed_start;
- err = set_extent_bit(tree, start, end, EXTENT_LOCKED, EXTENT_LOCKED,
- &failed_start, NULL, mask);
- if (err == -EEXIST) {
- if (failed_start > start)
- clear_extent_bit(tree, start, failed_start - 1,
- EXTENT_LOCKED, 1, 0, NULL, mask);
- return 0;
- }
- return 1;
- }
- int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
- struct extent_state **cached, gfp_t mask)
- {
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
- mask);
- }
- int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
- {
- return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL,
- mask);
- }
- /*
- * helper function to set both pages and extents in the tree writeback
- */
- static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
- {
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
- struct page *page;
- while (index <= end_index) {
- page = find_get_page(tree->mapping, index);
- BUG_ON(!page);
- set_page_writeback(page);
- page_cache_release(page);
- index++;
- }
- return 0;
- }
- /* find the first state struct with 'bits' set after 'start', and
- * return it. tree->lock must be held. NULL will returned if
- * nothing was found after 'start'
- */
- struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
- u64 start, int bits)
- {
- struct rb_node *node;
- struct extent_state *state;
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(tree, start);
- if (!node)
- goto out;
- while (1) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->end >= start && (state->state & bits))
- return state;
- node = rb_next(node);
- if (!node)
- break;
- }
- out:
- return NULL;
- }
- /*
- * find the first offset in the io tree with 'bits' set. zero is
- * returned if we find something, and *start_ret and *end_ret are
- * set to reflect the state struct that was found.
- *
- * If nothing was found, 1 is returned, < 0 on error
- */
- int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
- u64 *start_ret, u64 *end_ret, int bits)
- {
- struct extent_state *state;
- int ret = 1;
- spin_lock(&tree->lock);
- state = find_first_extent_bit_state(tree, start, bits);
- if (state) {
- *start_ret = state->start;
- *end_ret = state->end;
- ret = 0;
- }
- spin_unlock(&tree->lock);
- return ret;
- }
- /*
- * find a contiguous range of bytes in the file marked as delalloc, not
- * more than 'max_bytes'. start and end are used to return the range,
- *
- * 1 is returned if we find something, 0 if nothing was in the tree
- */
- static noinline u64 find_delalloc_range(struct extent_io_tree *tree,
- u64 *start, u64 *end, u64 max_bytes,
- struct extent_state **cached_state)
- {
- struct rb_node *node;
- struct extent_state *state;
- u64 cur_start = *start;
- u64 found = 0;
- u64 total_bytes = 0;
- spin_lock(&tree->lock);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(tree, cur_start);
- if (!node) {
- if (!found)
- *end = (u64)-1;
- goto out;
- }
- while (1) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (found && (state->start != cur_start ||
- (state->state & EXTENT_BOUNDARY))) {
- goto out;
- }
- if (!(state->state & EXTENT_DELALLOC)) {
- if (!found)
- *end = state->end;
- goto out;
- }
- if (!found) {
- *start = state->start;
- *cached_state = state;
- atomic_inc(&state->refs);
- }
- found++;
- *end = state->end;
- cur_start = state->end + 1;
- node = rb_next(node);
- if (!node)
- break;
- total_bytes += state->end - state->start + 1;
- if (total_bytes >= max_bytes)
- break;
- }
- out:
- spin_unlock(&tree->lock);
- return found;
- }
- static noinline int __unlock_for_delalloc(struct inode *inode,
- struct page *locked_page,
- u64 start, u64 end)
- {
- int ret;
- struct page *pages[16];
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
- unsigned long nr_pages = end_index - index + 1;
- int i;
- if (index == locked_page->index && end_index == index)
- return 0;
- while (nr_pages > 0) {
- ret = find_get_pages_contig(inode->i_mapping, index,
- min_t(unsigned long, nr_pages,
- ARRAY_SIZE(pages)), pages);
- for (i = 0; i < ret; i++) {
- if (pages[i] != locked_page)
- unlock_page(pages[i]);
- page_cache_release(pages[i]);
- }
- nr_pages -= ret;
- index += ret;
- cond_resched();
- }
- return 0;
- }
- static noinline int lock_delalloc_pages(struct inode *inode,
- struct page *locked_page,
- u64 delalloc_start,
- u64 delalloc_end)
- {
- unsigned long index = delalloc_start >> PAGE_CACHE_SHIFT;
- unsigned long start_index = index;
- unsigned long end_index = delalloc_end >> PAGE_CACHE_SHIFT;
- unsigned long pages_locked = 0;
- struct page *pages[16];
- unsigned long nrpages;
- int ret;
- int i;
- /* the caller is responsible for locking the start index */
- if (index == locked_page->index && index == end_index)
- return 0;
- /* skip the page at the start index */
- nrpages = end_index - index + 1;
- while (nrpages > 0) {
- ret = find_get_pages_contig(inode->i_mapping, index,
- min_t(unsigned long,
- nrpages, ARRAY_SIZE(pages)), pages);
- if (ret == 0) {
- ret = -EAGAIN;
- goto done;
- }
- /* now we have an array of pages, lock them all */
- for (i = 0; i < ret; i++) {
- /*
- * the caller is taking responsibility for
- * locked_page
- */
- if (pages[i] != locked_page) {
- lock_page(pages[i]);
- if (!PageDirty(pages[i]) ||
- pages[i]->mapping != inode->i_mapping) {
- ret = -EAGAIN;
- unlock_page(pages[i]);
- page_cache_release(pages[i]);
- goto done;
- }
- }
- page_cache_release(pages[i]);
- pages_locked++;
- }
- nrpages -= ret;
- index += ret;
- cond_resched();
- }
- ret = 0;
- done:
- if (ret && pages_locked) {
- __unlock_for_delalloc(inode, locked_page,
- delalloc_start,
- ((u64)(start_index + pages_locked - 1)) <<
- PAGE_CACHE_SHIFT);
- }
- return ret;
- }
- /*
- * find a contiguous range of bytes in the file marked as delalloc, not
- * more than 'max_bytes'. start and end are used to return the range,
- *
- * 1 is returned if we find something, 0 if nothing was in the tree
- */
- static noinline u64 find_lock_delalloc_range(struct inode *inode,
- struct extent_io_tree *tree,
- struct page *locked_page,
- u64 *start, u64 *end,
- u64 max_bytes)
- {
- u64 delalloc_start;
- u64 delalloc_end;
- u64 found;
- struct extent_state *cached_state = NULL;
- int ret;
- int loops = 0;
- again:
- /* step one, find a bunch of delalloc bytes starting at start */
- delalloc_start = *start;
- delalloc_end = 0;
- found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
- max_bytes, &cached_state);
- if (!found || delalloc_end <= *start) {
- *start = delalloc_start;
- *end = delalloc_end;
- free_extent_state(cached_state);
- return found;
- }
- /*
- * start comes from the offset of locked_page. We have to lock
- * pages in order, so we can't process delalloc bytes before
- * locked_page
- */
- if (delalloc_start < *start)
- delalloc_start = *start;
- /*
- * make sure to limit the number of pages we try to lock down
- * if we're looping.
- */
- if (delalloc_end + 1 - delalloc_start > max_bytes && loops)
- delalloc_end = delalloc_start + PAGE_CACHE_SIZE - 1;
- /* step two, lock all the pages after the page that has start */
- ret = lock_delalloc_pages(inode, locked_page,
- delalloc_start, delalloc_end);
- if (ret == -EAGAIN) {
- /* some of the pages are gone, lets avoid looping by
- * shortening the size of the delalloc range we're searching
- */
- free_extent_state(cached_state);
- if (!loops) {
- unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
- max_bytes = PAGE_CACHE_SIZE - offset;
- loops = 1;
- goto again;
- } else {
- found = 0;
- goto out_failed;
- }
- }
- BUG_ON(ret);
- /* step three, lock the state bits for the whole range */
- lock_extent_bits(tree, delalloc_start, delalloc_end,
- 0, &cached_state, GFP_NOFS);
- /* then test to make sure it is all still delalloc */
- ret = test_range_bit(tree, delalloc_start, delalloc_end,
- EXTENT_DELALLOC, 1, cached_state);
- if (!ret) {
- unlock_extent_cached(tree, delalloc_start, delalloc_end,
- &cached_state, GFP_NOFS);
- __unlock_for_delalloc(inode, locked_page,
- delalloc_start, delalloc_end);
- cond_resched();
- goto again;
- }
- free_extent_state(cached_state);
- *start = delalloc_start;
- *end = delalloc_end;
- out_failed:
- return found;
- }
- int extent_clear_unlock_delalloc(struct inode *inode,
- struct extent_io_tree *tree,
- u64 start, u64 end, struct page *locked_page,
- unsigned long op)
- {
- int ret;
- struct page *pages[16];
- unsigned long index = start >> PAGE_CACHE_SHIFT;
- unsigned long end_index = end >> PAGE_CACHE_SHIFT;
- unsigned long nr_pages = end_index - index + 1;
- int i;
- int clear_bits = 0;
- if (op & EXTENT_CLEAR_UNLOCK)
- clear_bits |= EXTENT_LOCKED;
- if (op & EXTENT_CLEAR_DIRTY)
- clear_bits |= EXTENT_DIRTY;
- if (op & EXTENT_CLEAR_DELALLOC)
- clear_bits |= EXTENT_DELALLOC;
- clear_extent_bit(tree, start, end, clear_bits, 1, 0, NULL, GFP_NOFS);
- if (!(op & (EXTENT_CLEAR_UNLOCK_PAGE | EXTENT_CLEAR_DIRTY |
- EXTENT_SET_WRITEBACK | EXTENT_END_WRITEBACK |
- EXTENT_SET_PRIVATE2)))
- return 0;
- while (nr_pages > 0) {
- ret = find_get_pages_contig(inode->i_mapping, index,
- min_t(unsigned long,
- nr_pages, ARRAY_SIZE(pages)), pages);
- for (i = 0; i < ret; i++) {
- if (op & EXTENT_SET_PRIVATE2)
- SetPagePrivate2(pages[i]);
- if (pages[i] == locked_page) {
- page_cache_release(pages[i]);
- continue;
- }
- if (op & EXTENT_CLEAR_DIRTY)
- clear_page_dirty_for_io(pages[i]);
- if (op & EXTENT_SET_WRITEBACK)
- set_page_writeback(pages[i]);
- if (op & EXTENT_END_WRITEBACK)
- end_page_writeback(pages[i]);
- if (op & EXTENT_CLEAR_UNLOCK_PAGE)
- unlock_page(pages[i]);
- page_cache_release(pages[i]);
- }
- nr_pages -= ret;
- index += ret;
- cond_resched();
- }
- return 0;
- }
- /*
- * count the number of bytes in the tree that have a given bit(s)
- * set. This can be fairly slow, except for EXTENT_DIRTY which is
- * cached. The total number found is returned.
- */
- u64 count_range_bits(struct extent_io_tree *tree,
- u64 *start, u64 search_end, u64 max_bytes,
- unsigned long bits, int contig)
- {
- struct rb_node *node;
- struct extent_state *state;
- u64 cur_start = *start;
- u64 total_bytes = 0;
- u64 last = 0;
- int found = 0;
- if (search_end <= cur_start) {
- WARN_ON(1);
- return 0;
- }
- spin_lock(&tree->lock);
- if (cur_start == 0 && bits == EXTENT_DIRTY) {
- total_bytes = tree->dirty_bytes;
- goto out;
- }
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(tree, cur_start);
- if (!node)
- goto out;
- while (1) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start > search_end)
- break;
- if (contig && found && state->start > last + 1)
- break;
- if (state->end >= cur_start && (state->state & bits) == bits) {
- total_bytes += min(search_end, state->end) + 1 -
- max(cur_start, state->start);
- if (total_bytes >= max_bytes)
- break;
- if (!found) {
- *start = max(cur_start, state->start);
- found = 1;
- }
- last = state->end;
- } else if (contig && found) {
- break;
- }
- node = rb_next(node);
- if (!node)
- break;
- }
- out:
- spin_unlock(&tree->lock);
- return total_bytes;
- }
- /*
- * set the private field for a given byte offset in the tree. If there isn't
- * an extent_state there already, this does nothing.
- */
- int set_state_private(struct extent_io_tree *tree, u64 start, u64 private)
- {
- struct rb_node *node;
- struct extent_state *state;
- int ret = 0;
- spin_lock(&tree->lock);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(tree, start);
- if (!node) {
- ret = -ENOENT;
- goto out;
- }
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start != start) {
- ret = -ENOENT;
- goto out;
- }
- state->private = private;
- out:
- spin_unlock(&tree->lock);
- return ret;
- }
- int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private)
- {
- struct rb_node *node;
- struct extent_state *state;
- int ret = 0;
- spin_lock(&tree->lock);
- /*
- * this search will find all the extents that end after
- * our range starts.
- */
- node = tree_search(tree, start);
- if (!node) {
- ret = -ENOENT;
- goto out;
- }
- state = rb_entry(node, struct extent_state, rb_node);
- if (state->start != start) {
- ret = -ENOENT;
- goto out;
- }
- *private = state->private;
- out:
- spin_unlock(&tree->lock);
- return ret;
- }
- /*
- * searches a range in the state tree for a given mask.
- * If 'filled' == 1, this returns 1 only if every extent in the tree
- * has the bits set. Otherwise, 1 is returned if any bit in the
- * range is found set.
- */
- int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
- int bits, int filled, struct extent_state *cached)
- {
- struct extent_state *state = NULL;
- struct rb_node *node;
- int bitset = 0;
- spin_lock(&tree->lock);
- if (cached && cached->tree && cached->start <= start &&
- cached->end > start)
- node = &cached->rb_node;
- else
- node = tree_search(tree, start);
- while (node && start <= end) {
- state = rb_entry(node, struct extent_state, rb_node);
- if (filled && state->start > start) {
- bitset = 0;
- break;
- }
- if (state->start > end)
- break;
- if (state->state & bits) {
- bitset = 1;
- if (!filled)
- break;
- } else if (filled) {
- bitset = 0;
- break;
- }
- if (state->end == (u64)-1)
- break;
- start = state->end + 1;
- if (start > end)
- break;
- node = rb_next(node);
- if (!node) {
- if (filled)
- bitset = 0;
- break;
- }
- }
- spin_unlock(&tree->lock);
- return bitset;
- }
- /*
- * helper function to set a given page up to date if all the
- * extents in the tree for that page are up to date
- */
- static int check_page_uptodate(struct extent_io_tree *tree,
- struct page *page)
- {
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
- u64 end = start + PAGE_CACHE_SIZE - 1;
- if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1, NULL))
- SetPageUptodate(page);
- return 0;
- }
- /*
- * helper function to unlock a page if all the extents in the tree
- * for that page are unlocked
- */
- static int check_page_locked(struct extent_io_tree *tree,
- struct page *page)
- {
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
- u64 end = start + PAGE_CACHE_SIZE - 1;
- if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
- unlock_page(page);
- return 0;
- }
- /*
- * helper function to end page writeback if all the extents
- * in the tree for that page are done with writeback
- */
- static int check_page_writeback(struct extent_io_tree *tree,
- struct page *page)
- {
- end_page_writeback(page);
- return 0;
- }
- /* lots and lots of room for performance fixes in the end_bio funcs */
- /*
- * after a writepage IO is done, we need to:
- * clear the uptodate bits on error
- * clear the writeback bits in the extent tree for this IO
- * end_page_writeback if the page has no more pending IO
- *
- * Scheduling is not allowed, so the extent state tree is expected
- * to have one and only one object corresponding to this IO.
- */
- static void end_bio_extent_writepage(struct bio *bio, int err)
- {
- int uptodate = err == 0;
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct extent_io_tree *tree;
- u64 start;
- u64 end;
- int whole_page;
- int ret;
- do {
- struct page *page = bvec->bv_page;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- start = ((u64)page->index << PAGE_CACHE_SHIFT) +
- bvec->bv_offset;
- end = start + bvec->bv_len - 1;
- if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
- whole_page = 1;
- else
- whole_page = 0;
- if (--bvec >= bio->bi_io_vec)
- prefetchw(&bvec->bv_page->flags);
- if (tree->ops && tree->ops->writepage_end_io_hook) {
- ret = tree->ops->writepage_end_io_hook(page, start,
- end, NULL, uptodate);
- if (ret)
- uptodate = 0;
- }
- if (!uptodate && tree->ops &&
- tree->ops->writepage_io_failed_hook) {
- ret = tree->ops->writepage_io_failed_hook(bio, page,
- start, end, NULL);
- if (ret == 0) {
- uptodate = (err == 0);
- continue;
- }
- }
- if (!uptodate) {
- clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
- ClearPageUptodate(page);
- SetPageError(page);
- }
- if (whole_page)
- end_page_writeback(page);
- else
- check_page_writeback(tree, page);
- } while (bvec >= bio->bi_io_vec);
- bio_put(bio);
- }
- /*
- * after a readpage IO is done, we need to:
- * clear the uptodate bits on error
- * set the uptodate bits if things worked
- * set the page up to date if all extents in the tree are uptodate
- * clear the lock bit in the extent tree
- * unlock the page if there are no other extents locked for it
- *
- * Scheduling is not allowed, so the extent state tree is expected
- * to have one and only one object corresponding to this IO.
- */
- static void end_bio_extent_readpage(struct bio *bio, int err)
- {
- int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
- struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct bio_vec *bvec = bio->bi_io_vec;
- struct extent_io_tree *tree;
- u64 start;
- u64 end;
- int whole_page;
- int ret;
- if (err)
- uptodate = 0;
- do {
- struct page *page = bvec->bv_page;
- struct extent_state *cached = NULL;
- struct extent_state *state;
- tree = &BTRFS_I(page->mapping->host)->io_tree;
- start = ((u64)page->index << PAGE_CACHE_SHIFT) +
- bvec->bv_offset;
- end = start + bvec->bv_len - 1;
- if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
- whole_page = 1;
- else
- whole_page = 0;
- if (++bvec <= bvec_end)
- prefetchw(&bvec->bv_page->flags);
- spin_lock(&tree->lock);
- state = find_first_extent_bit_state(tree, start, EXTENT_LOCKED);
- if (state && state->start == start) {
- /*
- * take a reference on the state, unlock will drop
- * the ref
- */
- cache_state(state, &cached);
- }
- spin_unlock(&tree->lock);
- if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
- ret = tree->ops->readpage_end_io_hook(page, start, end,
- state);
- if (ret)
- uptodate = 0;
- }
- if (!uptodate && tree->ops &&
- tree->ops->readpage_io_failed_hook) {
- ret = tree->ops->readpage_io_failed_hook(bio, page,
- start, end, NULL);
- if (ret == 0) {
- uptodate =
- test_bit(BIO_UPTODATE, &bio->bi_flags);
- if (err)
- uptodate = 0;
- uncache_state(&cached);
- continue;
- }
- }
- if (uptodate) {
- set_extent_uptodate(tree, start, end, &cached,
- GFP_ATOMIC);
- }
- unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
- if (whole_page) {
- if (uptodate) {
- SetPageUptodate(page);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- unlock_page(page);
- } else {
- if (uptodate) {
- check_page_uptodate(tree, page);
- } else {
- ClearPageUptodate(page);
- SetPageError(page);
- }
- check_page_locked(tree, page);
- }
- } while (bvec <= bvec_end);
- bio_put(bio);
- }
- struct bio *
- btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
- gfp_t gfp_flags)
- {
- struct bio *bio;
- bio = bio_alloc(gfp_flags, nr_vecs);
- if (bio == NULL && (current->flags & PF_MEMALLOC)) {
- while (!bio && (nr_vecs /= 2))
- bio = bio_alloc(gfp_flags, nr_vecs);
- }
- if (bio) {
- bio->bi_size = 0;
- bio->bi_bdev = bdev;
- bio->bi_sector = first_sector;
- }
- return bio;
- }
- static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
- unsigned long bio_flags)
- {
- int ret = 0;
- struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
- struct page *page = bvec->bv_page;
- struct extent_io_tree *tree = bio->bi_private;
- u64 start;
- start = ((u64)page->index << PAGE_CACHE_SHIFT) + bvec->bv_offset;
- bio->bi_private = NULL;
- bio_get(bio);
- if (tree->ops && tree->ops->submit_bio_hook)
- ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
- mirror_num, bio_flags, start);
- else
- submit_bio(rw, bio);
- if (bio_flagged(bio, BIO_EOPNOTSUPP))
- ret = -EOPNOTSUPP;
- bio_put(bio);
- return ret;
- }
- static int submit_extent_page(int rw, struct extent_io_tree *tree,
- struct page *page, sector_t sector,
- size_t size, unsigned long offset,
- struct block_device *bdev,
- struct bio **bio_ret,
- unsigned long max_pages,
- bio_end_io_t end_io_func,
- int mirror_num,
- unsigned long prev_bio_flags,
- unsigned long bio_flags)
- {
- int ret = 0;
- struct bio *bio;
- int nr;
- int contig = 0;
- int this_compressed = bio_flags & EXTENT_BIO_COMPRESSED;
- int old_compressed = prev_bio_flags & EXTENT_BIO_COMPRESSED;
- size_t page_size = min_t(size_t, size, PAGE_CACHE_SIZE);
- if (bio_ret && *bio_ret) {
- bio = *bio_ret;
- if (old_compressed)
- contig = bio->bi_sector == sector;
- else
- contig = bio->bi_sector + (bio->bi_size >> 9) ==
- sector;
- if (prev_bio_flags != bio_flags || !contig ||
- (tree->ops && tree->ops->merge_bio_hook &&
- tree->ops->merge_bio_hook(page, offset, page_size, bio,
- bio_flags)) ||
- bio_add_page(bio, page, page_size, offset) < page_size) {
- ret = submit_one_bio(rw, bio, mirror_num,
- prev_bio_flags);
- bio = NULL;
- } else {
- return 0;
- }
- }
- if (this_compressed)
- nr = BIO_MAX_PAGES;
- else
- nr = bio_get_nr_vecs(bdev);
- bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
- if (!bio)
- return -ENOMEM;
- bio_add_page(bio, page, page_size, offset);
- bio->bi_end_io = end_io_func;
- bio->bi_private = tree;
- if (bio_ret)
- *bio_ret = bio;
- else
- ret = submit_one_bio(rw, bio, mirror_num, bio_flags);
- return ret;
- }
- void set_page_extent_mapped(struct page *page)
- {
- if (!PagePrivate(page)) {
- SetPagePrivate(page);
- page_cache_get(page);
- set_page_private(page, EXTENT_PAGE_PRIVATE);
- }
- }
- static void set_page_extent_head(struct page *page, unsigned long len)
- {
- WARN_ON(!PagePrivate(page));
- set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
- }
- /*
- * basic readpage implementation. Locked extent state structs are inserted
- * into the tree that are removed when the IO is done (by the end_io
- * handlers)
- */
- static int __extent_read_full_page(struct extent_io_tree *tree,
- struct page *page,
- get_extent_t *get_extent,
- struct bio **bio, int mirror_num,
- unsigned long *bio_flags)
- {
- struct inode *inode = page->mapping->host;
- u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
- u64 page_end = start + PAGE_CACHE_SIZE - 1;
- u64 end;
- u64 cur = start;
- u64 extent_offset;
- u64 last_byte = i_size_read(inode);
- u64 block_start;
- u64 cur_end;
- sector_t sector;
- struct extent_map *em;
- struct block_device *bdev;
- struct btrfs_ordered_extent *ordered;
- int ret;
- int nr = 0;
- size_t pg_offset = 0;
- size_t iosize;
- size_t disk_io_size;
- size_t blocksize = inode->i_sb->s_blocksize;
- unsigned long this_bio_flag = 0;
- set_page_extent_mapped(page);
- if (!PageUptodate(page)) {
- if (cleancache_get_page(page) == 0) {
- BUG_ON(blocksize != PAGE_SIZE);
- goto out;
- }
- }
- end = page_end;
- while (1) {
- lock_extent(tree, start, end, GFP_NOFS);
- ordered = btrfs_lookup_ordered_extent(inode, start);
- if (!ordered)
- break;
- unlock_extent(tree, start, end, GFP_NOFS);
- btrfs_start_ordered_extent(inode, ordered, 1);
- btrfs_put_ordered_extent(ordered);
- }
- if (page->index == last_byte >> PAGE_CACHE_SHIFT) {
- char *userpage;
- size_t zero_offset = last_byte & (PAGE_CACHE_SIZE - 1);
- if (zero_offset) {
- iosize = PAGE_CACHE_SIZE - zero_offset;
- userpage = kmap_atomic(page, KM_USER0);
- memset(userpage + zero_offset, 0, iosize);
- flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
- }
- }
- while (cur <= end) {
- if (cur >= last_byte) {
- char *userpage;
- struct extent_state *cached = NULL;
- iosize = PAGE_CACHE_SIZE - pg_offset;
- userpage = kmap_atomic(page, KM_USER0);
- memset(userpage + pg_offset, 0, iosize);
- flush_dcache_page(page);
- kunmap_atomic(userpage, KM_USER0);
- set_extent_uptodate(tree, cur, cur + iosize - 1,
- &cached, GFP_NOFS);
- unlock_extent_cached(tree, cur, cur + iosize - 1,
- &cached, GFP_NOFS);
- break;
- }
- em = get_extent(inode, page, pg_offset, cur,
- end - cur + 1, 0);
- if (IS_ERR_OR_NULL(em)) {
- SetPageError(page);
- unlock_extent(tree, cur, end, GFP_NOFS);
- break;
- }
- extent_offset = cur - em->start;
- BUG_ON(extent_map_end(em) <= cur);
- BUG_ON(end < cur);
- if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) {
- this_bio_flag = EXTENT_BIO_COMPRESSED;
- extent_set_compress_type(&this_bio_flag,
- em->compress_type);
- }
- iosize = min(extent_map_end(em) - cur, end - cur + 1);
- cur_end = min(extent_map_end(em) - 1, end);
- iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
- if (this_bio_flag & EXTENT_BIO_COMPRESSED) {
- disk_io_size = em->block_len;
- sector = em->block_start >> 9;
- } else {
- sector = (em->block_start + extent_offset) >> 9;
- disk_io_size = iosize;
- }
- bdev = em->bdev;
- block_start = em->block_start;
- if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
- block_start = EXTENT_MAP_HOLE;
- free_extent_map(em);
- em = NULL;
- /* we've found a hole, just zero and go on */
- if (block_start == EXTENT_MAP_HOLE) {
- char *userpage;
- struct extent_state *cached = NULL;
- userpage = kmap_atomic(page, KM_USER0);
- memset(userpage + pg_offset, 0, iosize)