/fs/ceph/caps.c
https://bitbucket.org/slukk/jb-tsm-kernel-4.2 · C · 3087 lines · 2272 code · 326 blank · 489 comment · 450 complexity · 420bae9a1f20f122fa6e49b116857865 MD5 · raw file
Large files are truncated click here to view the full file
- #include <linux/ceph/ceph_debug.h>
- #include <linux/fs.h>
- #include <linux/kernel.h>
- #include <linux/sched.h>
- #include <linux/slab.h>
- #include <linux/vmalloc.h>
- #include <linux/wait.h>
- #include <linux/writeback.h>
- #include "super.h"
- #include "mds_client.h"
- #include <linux/ceph/decode.h>
- #include <linux/ceph/messenger.h>
- /*
- * Capability management
- *
- * The Ceph metadata servers control client access to inode metadata
- * and file data by issuing capabilities, granting clients permission
- * to read and/or write both inode field and file data to OSDs
- * (storage nodes). Each capability consists of a set of bits
- * indicating which operations are allowed.
- *
- * If the client holds a *_SHARED cap, the client has a coherent value
- * that can be safely read from the cached inode.
- *
- * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
- * client is allowed to change inode attributes (e.g., file size,
- * mtime), note its dirty state in the ceph_cap, and asynchronously
- * flush that metadata change to the MDS.
- *
- * In the event of a conflicting operation (perhaps by another
- * client), the MDS will revoke the conflicting client capabilities.
- *
- * In order for a client to cache an inode, it must hold a capability
- * with at least one MDS server. When inodes are released, release
- * notifications are batched and periodically sent en masse to the MDS
- * cluster to release server state.
- */
- /*
- * Generate readable cap strings for debugging output.
- */
- #define MAX_CAP_STR 20
- static char cap_str[MAX_CAP_STR][40];
- static DEFINE_SPINLOCK(cap_str_lock);
- static int last_cap_str;
- static char *gcap_string(char *s, int c)
- {
- if (c & CEPH_CAP_GSHARED)
- *s++ = 's';
- if (c & CEPH_CAP_GEXCL)
- *s++ = 'x';
- if (c & CEPH_CAP_GCACHE)
- *s++ = 'c';
- if (c & CEPH_CAP_GRD)
- *s++ = 'r';
- if (c & CEPH_CAP_GWR)
- *s++ = 'w';
- if (c & CEPH_CAP_GBUFFER)
- *s++ = 'b';
- if (c & CEPH_CAP_GLAZYIO)
- *s++ = 'l';
- return s;
- }
- const char *ceph_cap_string(int caps)
- {
- int i;
- char *s;
- int c;
- spin_lock(&cap_str_lock);
- i = last_cap_str++;
- if (last_cap_str == MAX_CAP_STR)
- last_cap_str = 0;
- spin_unlock(&cap_str_lock);
- s = cap_str[i];
- if (caps & CEPH_CAP_PIN)
- *s++ = 'p';
- c = (caps >> CEPH_CAP_SAUTH) & 3;
- if (c) {
- *s++ = 'A';
- s = gcap_string(s, c);
- }
- c = (caps >> CEPH_CAP_SLINK) & 3;
- if (c) {
- *s++ = 'L';
- s = gcap_string(s, c);
- }
- c = (caps >> CEPH_CAP_SXATTR) & 3;
- if (c) {
- *s++ = 'X';
- s = gcap_string(s, c);
- }
- c = caps >> CEPH_CAP_SFILE;
- if (c) {
- *s++ = 'F';
- s = gcap_string(s, c);
- }
- if (s == cap_str[i])
- *s++ = '-';
- *s = 0;
- return cap_str[i];
- }
- void ceph_caps_init(struct ceph_mds_client *mdsc)
- {
- INIT_LIST_HEAD(&mdsc->caps_list);
- spin_lock_init(&mdsc->caps_list_lock);
- }
- void ceph_caps_finalize(struct ceph_mds_client *mdsc)
- {
- struct ceph_cap *cap;
- spin_lock(&mdsc->caps_list_lock);
- while (!list_empty(&mdsc->caps_list)) {
- cap = list_first_entry(&mdsc->caps_list,
- struct ceph_cap, caps_item);
- list_del(&cap->caps_item);
- kmem_cache_free(ceph_cap_cachep, cap);
- }
- mdsc->caps_total_count = 0;
- mdsc->caps_avail_count = 0;
- mdsc->caps_use_count = 0;
- mdsc->caps_reserve_count = 0;
- mdsc->caps_min_count = 0;
- spin_unlock(&mdsc->caps_list_lock);
- }
- void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
- {
- spin_lock(&mdsc->caps_list_lock);
- mdsc->caps_min_count += delta;
- BUG_ON(mdsc->caps_min_count < 0);
- spin_unlock(&mdsc->caps_list_lock);
- }
- int ceph_reserve_caps(struct ceph_mds_client *mdsc,
- struct ceph_cap_reservation *ctx, int need)
- {
- int i;
- struct ceph_cap *cap;
- int have;
- int alloc = 0;
- LIST_HEAD(newcaps);
- int ret = 0;
- dout("reserve caps ctx=%p need=%d\n", ctx, need);
- /* first reserve any caps that are already allocated */
- spin_lock(&mdsc->caps_list_lock);
- if (mdsc->caps_avail_count >= need)
- have = need;
- else
- have = mdsc->caps_avail_count;
- mdsc->caps_avail_count -= have;
- mdsc->caps_reserve_count += have;
- BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
- mdsc->caps_reserve_count +
- mdsc->caps_avail_count);
- spin_unlock(&mdsc->caps_list_lock);
- for (i = have; i < need; i++) {
- cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
- if (!cap) {
- ret = -ENOMEM;
- goto out_alloc_count;
- }
- list_add(&cap->caps_item, &newcaps);
- alloc++;
- }
- BUG_ON(have + alloc != need);
- spin_lock(&mdsc->caps_list_lock);
- mdsc->caps_total_count += alloc;
- mdsc->caps_reserve_count += alloc;
- list_splice(&newcaps, &mdsc->caps_list);
- BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
- mdsc->caps_reserve_count +
- mdsc->caps_avail_count);
- spin_unlock(&mdsc->caps_list_lock);
- ctx->count = need;
- dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
- ctx, mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
- return 0;
- out_alloc_count:
- /* we didn't manage to reserve as much as we needed */
- pr_warning("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
- ctx, need, have);
- return ret;
- }
- int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
- struct ceph_cap_reservation *ctx)
- {
- dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
- if (ctx->count) {
- spin_lock(&mdsc->caps_list_lock);
- BUG_ON(mdsc->caps_reserve_count < ctx->count);
- mdsc->caps_reserve_count -= ctx->count;
- mdsc->caps_avail_count += ctx->count;
- ctx->count = 0;
- dout("unreserve caps %d = %d used + %d resv + %d avail\n",
- mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
- BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
- mdsc->caps_reserve_count +
- mdsc->caps_avail_count);
- spin_unlock(&mdsc->caps_list_lock);
- }
- return 0;
- }
- static struct ceph_cap *get_cap(struct ceph_mds_client *mdsc,
- struct ceph_cap_reservation *ctx)
- {
- struct ceph_cap *cap = NULL;
- /* temporary, until we do something about cap import/export */
- if (!ctx) {
- cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
- if (cap) {
- mdsc->caps_use_count++;
- mdsc->caps_total_count++;
- }
- return cap;
- }
- spin_lock(&mdsc->caps_list_lock);
- dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
- ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
- BUG_ON(!ctx->count);
- BUG_ON(ctx->count > mdsc->caps_reserve_count);
- BUG_ON(list_empty(&mdsc->caps_list));
- ctx->count--;
- mdsc->caps_reserve_count--;
- mdsc->caps_use_count++;
- cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
- list_del(&cap->caps_item);
- BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
- mdsc->caps_reserve_count + mdsc->caps_avail_count);
- spin_unlock(&mdsc->caps_list_lock);
- return cap;
- }
- void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
- {
- spin_lock(&mdsc->caps_list_lock);
- dout("put_cap %p %d = %d used + %d resv + %d avail\n",
- cap, mdsc->caps_total_count, mdsc->caps_use_count,
- mdsc->caps_reserve_count, mdsc->caps_avail_count);
- mdsc->caps_use_count--;
- /*
- * Keep some preallocated caps around (ceph_min_count), to
- * avoid lots of free/alloc churn.
- */
- if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
- mdsc->caps_min_count) {
- mdsc->caps_total_count--;
- kmem_cache_free(ceph_cap_cachep, cap);
- } else {
- mdsc->caps_avail_count++;
- list_add(&cap->caps_item, &mdsc->caps_list);
- }
- BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
- mdsc->caps_reserve_count + mdsc->caps_avail_count);
- spin_unlock(&mdsc->caps_list_lock);
- }
- void ceph_reservation_status(struct ceph_fs_client *fsc,
- int *total, int *avail, int *used, int *reserved,
- int *min)
- {
- struct ceph_mds_client *mdsc = fsc->mdsc;
- if (total)
- *total = mdsc->caps_total_count;
- if (avail)
- *avail = mdsc->caps_avail_count;
- if (used)
- *used = mdsc->caps_use_count;
- if (reserved)
- *reserved = mdsc->caps_reserve_count;
- if (min)
- *min = mdsc->caps_min_count;
- }
- /*
- * Find ceph_cap for given mds, if any.
- *
- * Called with i_lock held.
- */
- static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
- {
- struct ceph_cap *cap;
- struct rb_node *n = ci->i_caps.rb_node;
- while (n) {
- cap = rb_entry(n, struct ceph_cap, ci_node);
- if (mds < cap->mds)
- n = n->rb_left;
- else if (mds > cap->mds)
- n = n->rb_right;
- else
- return cap;
- }
- return NULL;
- }
- struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
- {
- struct ceph_cap *cap;
- spin_lock(&ci->vfs_inode.i_lock);
- cap = __get_cap_for_mds(ci, mds);
- spin_unlock(&ci->vfs_inode.i_lock);
- return cap;
- }
- /*
- * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
- */
- static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
- {
- struct ceph_cap *cap;
- int mds = -1;
- struct rb_node *p;
- /* prefer mds with WR|BUFFER|EXCL caps */
- for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
- cap = rb_entry(p, struct ceph_cap, ci_node);
- mds = cap->mds;
- if (cap->issued & (CEPH_CAP_FILE_WR |
- CEPH_CAP_FILE_BUFFER |
- CEPH_CAP_FILE_EXCL))
- break;
- }
- return mds;
- }
- int ceph_get_cap_mds(struct inode *inode)
- {
- int mds;
- spin_lock(&inode->i_lock);
- mds = __ceph_get_cap_mds(ceph_inode(inode));
- spin_unlock(&inode->i_lock);
- return mds;
- }
- /*
- * Called under i_lock.
- */
- static void __insert_cap_node(struct ceph_inode_info *ci,
- struct ceph_cap *new)
- {
- struct rb_node **p = &ci->i_caps.rb_node;
- struct rb_node *parent = NULL;
- struct ceph_cap *cap = NULL;
- while (*p) {
- parent = *p;
- cap = rb_entry(parent, struct ceph_cap, ci_node);
- if (new->mds < cap->mds)
- p = &(*p)->rb_left;
- else if (new->mds > cap->mds)
- p = &(*p)->rb_right;
- else
- BUG();
- }
- rb_link_node(&new->ci_node, parent, p);
- rb_insert_color(&new->ci_node, &ci->i_caps);
- }
- /*
- * (re)set cap hold timeouts, which control the delayed release
- * of unused caps back to the MDS. Should be called on cap use.
- */
- static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
- struct ceph_inode_info *ci)
- {
- struct ceph_mount_options *ma = mdsc->fsc->mount_options;
- ci->i_hold_caps_min = round_jiffies(jiffies +
- ma->caps_wanted_delay_min * HZ);
- ci->i_hold_caps_max = round_jiffies(jiffies +
- ma->caps_wanted_delay_max * HZ);
- dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
- ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
- }
- /*
- * (Re)queue cap at the end of the delayed cap release list.
- *
- * If I_FLUSH is set, leave the inode at the front of the list.
- *
- * Caller holds i_lock
- * -> we take mdsc->cap_delay_lock
- */
- static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
- struct ceph_inode_info *ci)
- {
- __cap_set_timeouts(mdsc, ci);
- dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
- ci->i_ceph_flags, ci->i_hold_caps_max);
- if (!mdsc->stopping) {
- spin_lock(&mdsc->cap_delay_lock);
- if (!list_empty(&ci->i_cap_delay_list)) {
- if (ci->i_ceph_flags & CEPH_I_FLUSH)
- goto no_change;
- list_del_init(&ci->i_cap_delay_list);
- }
- list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
- no_change:
- spin_unlock(&mdsc->cap_delay_lock);
- }
- }
- /*
- * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
- * indicating we should send a cap message to flush dirty metadata
- * asap, and move to the front of the delayed cap list.
- */
- static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
- struct ceph_inode_info *ci)
- {
- dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
- spin_lock(&mdsc->cap_delay_lock);
- ci->i_ceph_flags |= CEPH_I_FLUSH;
- if (!list_empty(&ci->i_cap_delay_list))
- list_del_init(&ci->i_cap_delay_list);
- list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
- spin_unlock(&mdsc->cap_delay_lock);
- }
- /*
- * Cancel delayed work on cap.
- *
- * Caller must hold i_lock.
- */
- static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
- struct ceph_inode_info *ci)
- {
- dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
- if (list_empty(&ci->i_cap_delay_list))
- return;
- spin_lock(&mdsc->cap_delay_lock);
- list_del_init(&ci->i_cap_delay_list);
- spin_unlock(&mdsc->cap_delay_lock);
- }
- /*
- * Common issue checks for add_cap, handle_cap_grant.
- */
- static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
- unsigned issued)
- {
- unsigned had = __ceph_caps_issued(ci, NULL);
- /*
- * Each time we receive FILE_CACHE anew, we increment
- * i_rdcache_gen.
- */
- if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
- (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0)
- ci->i_rdcache_gen++;
- /*
- * if we are newly issued FILE_SHARED, clear I_COMPLETE; we
- * don't know what happened to this directory while we didn't
- * have the cap.
- */
- if ((issued & CEPH_CAP_FILE_SHARED) &&
- (had & CEPH_CAP_FILE_SHARED) == 0) {
- ci->i_shared_gen++;
- if (S_ISDIR(ci->vfs_inode.i_mode)) {
- dout(" marking %p NOT complete\n", &ci->vfs_inode);
- ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
- }
- }
- }
- /*
- * Add a capability under the given MDS session.
- *
- * Caller should hold session snap_rwsem (read) and s_mutex.
- *
- * @fmode is the open file mode, if we are opening a file, otherwise
- * it is < 0. (This is so we can atomically add the cap and add an
- * open file reference to it.)
- */
- int ceph_add_cap(struct inode *inode,
- struct ceph_mds_session *session, u64 cap_id,
- int fmode, unsigned issued, unsigned wanted,
- unsigned seq, unsigned mseq, u64 realmino, int flags,
- struct ceph_cap_reservation *caps_reservation)
- {
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
- struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_cap *new_cap = NULL;
- struct ceph_cap *cap;
- int mds = session->s_mds;
- int actual_wanted;
- dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
- session->s_mds, cap_id, ceph_cap_string(issued), seq);
- /*
- * If we are opening the file, include file mode wanted bits
- * in wanted.
- */
- if (fmode >= 0)
- wanted |= ceph_caps_for_mode(fmode);
- retry:
- spin_lock(&inode->i_lock);
- cap = __get_cap_for_mds(ci, mds);
- if (!cap) {
- if (new_cap) {
- cap = new_cap;
- new_cap = NULL;
- } else {
- spin_unlock(&inode->i_lock);
- new_cap = get_cap(mdsc, caps_reservation);
- if (new_cap == NULL)
- return -ENOMEM;
- goto retry;
- }
- cap->issued = 0;
- cap->implemented = 0;
- cap->mds = mds;
- cap->mds_wanted = 0;
- cap->ci = ci;
- __insert_cap_node(ci, cap);
- /* clear out old exporting info? (i.e. on cap import) */
- if (ci->i_cap_exporting_mds == mds) {
- ci->i_cap_exporting_issued = 0;
- ci->i_cap_exporting_mseq = 0;
- ci->i_cap_exporting_mds = -1;
- }
- /* add to session cap list */
- cap->session = session;
- spin_lock(&session->s_cap_lock);
- list_add_tail(&cap->session_caps, &session->s_caps);
- session->s_nr_caps++;
- spin_unlock(&session->s_cap_lock);
- } else if (new_cap)
- ceph_put_cap(mdsc, new_cap);
- if (!ci->i_snap_realm) {
- /*
- * add this inode to the appropriate snap realm
- */
- struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
- realmino);
- if (realm) {
- ceph_get_snap_realm(mdsc, realm);
- spin_lock(&realm->inodes_with_caps_lock);
- ci->i_snap_realm = realm;
- list_add(&ci->i_snap_realm_item,
- &realm->inodes_with_caps);
- spin_unlock(&realm->inodes_with_caps_lock);
- } else {
- pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
- realmino);
- WARN_ON(!realm);
- }
- }
- __check_cap_issue(ci, cap, issued);
- /*
- * If we are issued caps we don't want, or the mds' wanted
- * value appears to be off, queue a check so we'll release
- * later and/or update the mds wanted value.
- */
- actual_wanted = __ceph_caps_wanted(ci);
- if ((wanted & ~actual_wanted) ||
- (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
- dout(" issued %s, mds wanted %s, actual %s, queueing\n",
- ceph_cap_string(issued), ceph_cap_string(wanted),
- ceph_cap_string(actual_wanted));
- __cap_delay_requeue(mdsc, ci);
- }
- if (flags & CEPH_CAP_FLAG_AUTH)
- ci->i_auth_cap = cap;
- else if (ci->i_auth_cap == cap)
- ci->i_auth_cap = NULL;
- dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
- inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
- ceph_cap_string(issued|cap->issued), seq, mds);
- cap->cap_id = cap_id;
- cap->issued = issued;
- cap->implemented |= issued;
- cap->mds_wanted |= wanted;
- cap->seq = seq;
- cap->issue_seq = seq;
- cap->mseq = mseq;
- cap->cap_gen = session->s_cap_gen;
- if (fmode >= 0)
- __ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
- wake_up_all(&ci->i_cap_wq);
- return 0;
- }
- /*
- * Return true if cap has not timed out and belongs to the current
- * generation of the MDS session (i.e. has not gone 'stale' due to
- * us losing touch with the mds).
- */
- static int __cap_is_valid(struct ceph_cap *cap)
- {
- unsigned long ttl;
- u32 gen;
- spin_lock(&cap->session->s_cap_lock);
- gen = cap->session->s_cap_gen;
- ttl = cap->session->s_cap_ttl;
- spin_unlock(&cap->session->s_cap_lock);
- if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
- dout("__cap_is_valid %p cap %p issued %s "
- "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
- cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
- return 0;
- }
- return 1;
- }
- /*
- * Return set of valid cap bits issued to us. Note that caps time
- * out, and may be invalidated in bulk if the client session times out
- * and session->s_cap_gen is bumped.
- */
- int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
- {
- int have = ci->i_snap_caps | ci->i_cap_exporting_issued;
- struct ceph_cap *cap;
- struct rb_node *p;
- if (implemented)
- *implemented = 0;
- for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
- cap = rb_entry(p, struct ceph_cap, ci_node);
- if (!__cap_is_valid(cap))
- continue;
- dout("__ceph_caps_issued %p cap %p issued %s\n",
- &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
- have |= cap->issued;
- if (implemented)
- *implemented |= cap->implemented;
- }
- return have;
- }
- /*
- * Get cap bits issued by caps other than @ocap
- */
- int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
- {
- int have = ci->i_snap_caps;
- struct ceph_cap *cap;
- struct rb_node *p;
- for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
- cap = rb_entry(p, struct ceph_cap, ci_node);
- if (cap == ocap)
- continue;
- if (!__cap_is_valid(cap))
- continue;
- have |= cap->issued;
- }
- return have;
- }
- /*
- * Move a cap to the end of the LRU (oldest caps at list head, newest
- * at list tail).
- */
- static void __touch_cap(struct ceph_cap *cap)
- {
- struct ceph_mds_session *s = cap->session;
- spin_lock(&s->s_cap_lock);
- if (s->s_cap_iterator == NULL) {
- dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
- s->s_mds);
- list_move_tail(&cap->session_caps, &s->s_caps);
- } else {
- dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
- &cap->ci->vfs_inode, cap, s->s_mds);
- }
- spin_unlock(&s->s_cap_lock);
- }
- /*
- * Check if we hold the given mask. If so, move the cap(s) to the
- * front of their respective LRUs. (This is the preferred way for
- * callers to check for caps they want.)
- */
- int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
- {
- struct ceph_cap *cap;
- struct rb_node *p;
- int have = ci->i_snap_caps;
- if ((have & mask) == mask) {
- dout("__ceph_caps_issued_mask %p snap issued %s"
- " (mask %s)\n", &ci->vfs_inode,
- ceph_cap_string(have),
- ceph_cap_string(mask));
- return 1;
- }
- for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
- cap = rb_entry(p, struct ceph_cap, ci_node);
- if (!__cap_is_valid(cap))
- continue;
- if ((cap->issued & mask) == mask) {
- dout("__ceph_caps_issued_mask %p cap %p issued %s"
- " (mask %s)\n", &ci->vfs_inode, cap,
- ceph_cap_string(cap->issued),
- ceph_cap_string(mask));
- if (touch)
- __touch_cap(cap);
- return 1;
- }
- /* does a combination of caps satisfy mask? */
- have |= cap->issued;
- if ((have & mask) == mask) {
- dout("__ceph_caps_issued_mask %p combo issued %s"
- " (mask %s)\n", &ci->vfs_inode,
- ceph_cap_string(cap->issued),
- ceph_cap_string(mask));
- if (touch) {
- struct rb_node *q;
- /* touch this + preceding caps */
- __touch_cap(cap);
- for (q = rb_first(&ci->i_caps); q != p;
- q = rb_next(q)) {
- cap = rb_entry(q, struct ceph_cap,
- ci_node);
- if (!__cap_is_valid(cap))
- continue;
- __touch_cap(cap);
- }
- }
- return 1;
- }
- }
- return 0;
- }
- /*
- * Return true if mask caps are currently being revoked by an MDS.
- */
- int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
- {
- struct inode *inode = &ci->vfs_inode;
- struct ceph_cap *cap;
- struct rb_node *p;
- int ret = 0;
- spin_lock(&inode->i_lock);
- for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
- cap = rb_entry(p, struct ceph_cap, ci_node);
- if (__cap_is_valid(cap) &&
- (cap->implemented & ~cap->issued & mask)) {
- ret = 1;
- break;
- }
- }
- spin_unlock(&inode->i_lock);
- dout("ceph_caps_revoking %p %s = %d\n", inode,
- ceph_cap_string(mask), ret);
- return ret;
- }
- int __ceph_caps_used(struct ceph_inode_info *ci)
- {
- int used = 0;
- if (ci->i_pin_ref)
- used |= CEPH_CAP_PIN;
- if (ci->i_rd_ref)
- used |= CEPH_CAP_FILE_RD;
- if (ci->i_rdcache_ref || ci->vfs_inode.i_data.nrpages)
- used |= CEPH_CAP_FILE_CACHE;
- if (ci->i_wr_ref)
- used |= CEPH_CAP_FILE_WR;
- if (ci->i_wb_ref || ci->i_wrbuffer_ref)
- used |= CEPH_CAP_FILE_BUFFER;
- return used;
- }
- /*
- * wanted, by virtue of open file modes
- */
- int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
- {
- int want = 0;
- int mode;
- for (mode = 0; mode < CEPH_FILE_MODE_NUM; mode++)
- if (ci->i_nr_by_mode[mode])
- want |= ceph_caps_for_mode(mode);
- return want;
- }
- /*
- * Return caps we have registered with the MDS(s) as 'wanted'.
- */
- int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
- {
- struct ceph_cap *cap;
- struct rb_node *p;
- int mds_wanted = 0;
- for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
- cap = rb_entry(p, struct ceph_cap, ci_node);
- if (!__cap_is_valid(cap))
- continue;
- mds_wanted |= cap->mds_wanted;
- }
- return mds_wanted;
- }
- /*
- * called under i_lock
- */
- static int __ceph_is_any_caps(struct ceph_inode_info *ci)
- {
- return !RB_EMPTY_ROOT(&ci->i_caps) || ci->i_cap_exporting_mds >= 0;
- }
- /*
- * Remove a cap. Take steps to deal with a racing iterate_session_caps.
- *
- * caller should hold i_lock.
- * caller will not hold session s_mutex if called from destroy_inode.
- */
- void __ceph_remove_cap(struct ceph_cap *cap)
- {
- struct ceph_mds_session *session = cap->session;
- struct ceph_inode_info *ci = cap->ci;
- struct ceph_mds_client *mdsc =
- ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
- int removed = 0;
- dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
- /* remove from session list */
- spin_lock(&session->s_cap_lock);
- if (session->s_cap_iterator == cap) {
- /* not yet, we are iterating over this very cap */
- dout("__ceph_remove_cap delaying %p removal from session %p\n",
- cap, cap->session);
- } else {
- list_del_init(&cap->session_caps);
- session->s_nr_caps--;
- cap->session = NULL;
- removed = 1;
- }
- /* protect backpointer with s_cap_lock: see iterate_session_caps */
- cap->ci = NULL;
- spin_unlock(&session->s_cap_lock);
- /* remove from inode list */
- rb_erase(&cap->ci_node, &ci->i_caps);
- if (ci->i_auth_cap == cap)
- ci->i_auth_cap = NULL;
- if (removed)
- ceph_put_cap(mdsc, cap);
- if (!__ceph_is_any_caps(ci) && ci->i_snap_realm) {
- struct ceph_snap_realm *realm = ci->i_snap_realm;
- spin_lock(&realm->inodes_with_caps_lock);
- list_del_init(&ci->i_snap_realm_item);
- ci->i_snap_realm_counter++;
- ci->i_snap_realm = NULL;
- spin_unlock(&realm->inodes_with_caps_lock);
- ceph_put_snap_realm(mdsc, realm);
- }
- if (!__ceph_is_any_real_caps(ci))
- __cap_delay_cancel(mdsc, ci);
- }
- /*
- * Build and send a cap message to the given MDS.
- *
- * Caller should be holding s_mutex.
- */
- static int send_cap_msg(struct ceph_mds_session *session,
- u64 ino, u64 cid, int op,
- int caps, int wanted, int dirty,
- u32 seq, u64 flush_tid, u32 issue_seq, u32 mseq,
- u64 size, u64 max_size,
- struct timespec *mtime, struct timespec *atime,
- u64 time_warp_seq,
- uid_t uid, gid_t gid, mode_t mode,
- u64 xattr_version,
- struct ceph_buffer *xattrs_buf,
- u64 follows)
- {
- struct ceph_mds_caps *fc;
- struct ceph_msg *msg;
- dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
- " seq %u/%u mseq %u follows %lld size %llu/%llu"
- " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
- cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
- ceph_cap_string(dirty),
- seq, issue_seq, mseq, follows, size, max_size,
- xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
- msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc), GFP_NOFS);
- if (!msg)
- return -ENOMEM;
- msg->hdr.tid = cpu_to_le64(flush_tid);
- fc = msg->front.iov_base;
- memset(fc, 0, sizeof(*fc));
- fc->cap_id = cpu_to_le64(cid);
- fc->op = cpu_to_le32(op);
- fc->seq = cpu_to_le32(seq);
- fc->issue_seq = cpu_to_le32(issue_seq);
- fc->migrate_seq = cpu_to_le32(mseq);
- fc->caps = cpu_to_le32(caps);
- fc->wanted = cpu_to_le32(wanted);
- fc->dirty = cpu_to_le32(dirty);
- fc->ino = cpu_to_le64(ino);
- fc->snap_follows = cpu_to_le64(follows);
- fc->size = cpu_to_le64(size);
- fc->max_size = cpu_to_le64(max_size);
- if (mtime)
- ceph_encode_timespec(&fc->mtime, mtime);
- if (atime)
- ceph_encode_timespec(&fc->atime, atime);
- fc->time_warp_seq = cpu_to_le32(time_warp_seq);
- fc->uid = cpu_to_le32(uid);
- fc->gid = cpu_to_le32(gid);
- fc->mode = cpu_to_le32(mode);
- fc->xattr_version = cpu_to_le64(xattr_version);
- if (xattrs_buf) {
- msg->middle = ceph_buffer_get(xattrs_buf);
- fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
- msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
- }
- ceph_con_send(&session->s_con, msg);
- return 0;
- }
- static void __queue_cap_release(struct ceph_mds_session *session,
- u64 ino, u64 cap_id, u32 migrate_seq,
- u32 issue_seq)
- {
- struct ceph_msg *msg;
- struct ceph_mds_cap_release *head;
- struct ceph_mds_cap_item *item;
- spin_lock(&session->s_cap_lock);
- BUG_ON(!session->s_num_cap_releases);
- msg = list_first_entry(&session->s_cap_releases,
- struct ceph_msg, list_head);
- dout(" adding %llx release to mds%d msg %p (%d left)\n",
- ino, session->s_mds, msg, session->s_num_cap_releases);
- BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE);
- head = msg->front.iov_base;
- head->num = cpu_to_le32(le32_to_cpu(head->num) + 1);
- item = msg->front.iov_base + msg->front.iov_len;
- item->ino = cpu_to_le64(ino);
- item->cap_id = cpu_to_le64(cap_id);
- item->migrate_seq = cpu_to_le32(migrate_seq);
- item->seq = cpu_to_le32(issue_seq);
- session->s_num_cap_releases--;
- msg->front.iov_len += sizeof(*item);
- if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
- dout(" release msg %p full\n", msg);
- list_move_tail(&msg->list_head, &session->s_cap_releases_done);
- } else {
- dout(" release msg %p at %d/%d (%d)\n", msg,
- (int)le32_to_cpu(head->num),
- (int)CEPH_CAPS_PER_RELEASE,
- (int)msg->front.iov_len);
- }
- spin_unlock(&session->s_cap_lock);
- }
- /*
- * Queue cap releases when an inode is dropped from our cache. Since
- * inode is about to be destroyed, there is no need for i_lock.
- */
- void ceph_queue_caps_release(struct inode *inode)
- {
- struct ceph_inode_info *ci = ceph_inode(inode);
- struct rb_node *p;
- p = rb_first(&ci->i_caps);
- while (p) {
- struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
- struct ceph_mds_session *session = cap->session;
- __queue_cap_release(session, ceph_ino(inode), cap->cap_id,
- cap->mseq, cap->issue_seq);
- p = rb_next(p);
- __ceph_remove_cap(cap);
- }
- }
- /*
- * Send a cap msg on the given inode. Update our caps state, then
- * drop i_lock and send the message.
- *
- * Make note of max_size reported/requested from mds, revoked caps
- * that have now been implemented.
- *
- * Make half-hearted attempt ot to invalidate page cache if we are
- * dropping RDCACHE. Note that this will leave behind locked pages
- * that we'll then need to deal with elsewhere.
- *
- * Return non-zero if delayed release, or we experienced an error
- * such that the caller should requeue + retry later.
- *
- * called with i_lock, then drops it.
- * caller should hold snap_rwsem (read), s_mutex.
- */
- static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
- int op, int used, int want, int retain, int flushing,
- unsigned *pflush_tid)
- __releases(cap->ci->vfs_inode->i_lock)
- {
- struct ceph_inode_info *ci = cap->ci;
- struct inode *inode = &ci->vfs_inode;
- u64 cap_id = cap->cap_id;
- int held, revoking, dropping, keep;
- u64 seq, issue_seq, mseq, time_warp_seq, follows;
- u64 size, max_size;
- struct timespec mtime, atime;
- int wake = 0;
- mode_t mode;
- uid_t uid;
- gid_t gid;
- struct ceph_mds_session *session;
- u64 xattr_version = 0;
- struct ceph_buffer *xattr_blob = NULL;
- int delayed = 0;
- u64 flush_tid = 0;
- int i;
- int ret;
- held = cap->issued | cap->implemented;
- revoking = cap->implemented & ~cap->issued;
- retain &= ~revoking;
- dropping = cap->issued & ~retain;
- dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
- inode, cap, cap->session,
- ceph_cap_string(held), ceph_cap_string(held & retain),
- ceph_cap_string(revoking));
- BUG_ON((retain & CEPH_CAP_PIN) == 0);
- session = cap->session;
- /* don't release wanted unless we've waited a bit. */
- if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
- time_before(jiffies, ci->i_hold_caps_min)) {
- dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
- ceph_cap_string(cap->issued),
- ceph_cap_string(cap->issued & retain),
- ceph_cap_string(cap->mds_wanted),
- ceph_cap_string(want));
- want |= cap->mds_wanted;
- retain |= cap->issued;
- delayed = 1;
- }
- ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
- cap->issued &= retain; /* drop bits we don't want */
- if (cap->implemented & ~cap->issued) {
- /*
- * Wake up any waiters on wanted -> needed transition.
- * This is due to the weird transition from buffered
- * to sync IO... we need to flush dirty pages _before_
- * allowing sync writes to avoid reordering.
- */
- wake = 1;
- }
- cap->implemented &= cap->issued | used;
- cap->mds_wanted = want;
- if (flushing) {
- /*
- * assign a tid for flush operations so we can avoid
- * flush1 -> dirty1 -> flush2 -> flushack1 -> mark
- * clean type races. track latest tid for every bit
- * so we can handle flush AxFw, flush Fw, and have the
- * first ack clean Ax.
- */
- flush_tid = ++ci->i_cap_flush_last_tid;
- if (pflush_tid)
- *pflush_tid = flush_tid;
- dout(" cap_flush_tid %d\n", (int)flush_tid);
- for (i = 0; i < CEPH_CAP_BITS; i++)
- if (flushing & (1 << i))
- ci->i_cap_flush_tid[i] = flush_tid;
- follows = ci->i_head_snapc->seq;
- } else {
- follows = 0;
- }
- keep = cap->implemented;
- seq = cap->seq;
- issue_seq = cap->issue_seq;
- mseq = cap->mseq;
- size = inode->i_size;
- ci->i_reported_size = size;
- max_size = ci->i_wanted_max_size;
- ci->i_requested_max_size = max_size;
- mtime = inode->i_mtime;
- atime = inode->i_atime;
- time_warp_seq = ci->i_time_warp_seq;
- uid = inode->i_uid;
- gid = inode->i_gid;
- mode = inode->i_mode;
- if (flushing & CEPH_CAP_XATTR_EXCL) {
- __ceph_build_xattrs_blob(ci);
- xattr_blob = ci->i_xattrs.blob;
- xattr_version = ci->i_xattrs.version;
- }
- spin_unlock(&inode->i_lock);
- ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
- op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
- size, max_size, &mtime, &atime, time_warp_seq,
- uid, gid, mode, xattr_version, xattr_blob,
- follows);
- if (ret < 0) {
- dout("error sending cap msg, must requeue %p\n", inode);
- delayed = 1;
- }
- if (wake)
- wake_up_all(&ci->i_cap_wq);
- return delayed;
- }
- /*
- * When a snapshot is taken, clients accumulate dirty metadata on
- * inodes with capabilities in ceph_cap_snaps to describe the file
- * state at the time the snapshot was taken. This must be flushed
- * asynchronously back to the MDS once sync writes complete and dirty
- * data is written out.
- *
- * Unless @again is true, skip cap_snaps that were already sent to
- * the MDS (i.e., during this session).
- *
- * Called under i_lock. Takes s_mutex as needed.
- */
- void __ceph_flush_snaps(struct ceph_inode_info *ci,
- struct ceph_mds_session **psession,
- int again)
- __releases(ci->vfs_inode->i_lock)
- __acquires(ci->vfs_inode->i_lock)
- {
- struct inode *inode = &ci->vfs_inode;
- int mds;
- struct ceph_cap_snap *capsnap;
- u32 mseq;
- struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
- struct ceph_mds_session *session = NULL; /* if session != NULL, we hold
- session->s_mutex */
- u64 next_follows = 0; /* keep track of how far we've gotten through the
- i_cap_snaps list, and skip these entries next time
- around to avoid an infinite loop */
- if (psession)
- session = *psession;
- dout("__flush_snaps %p\n", inode);
- retry:
- list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
- /* avoid an infiniute loop after retry */
- if (capsnap->follows < next_follows)
- continue;
- /*
- * we need to wait for sync writes to complete and for dirty
- * pages to be written out.
- */
- if (capsnap->dirty_pages || capsnap->writing)
- break;
- /*
- * if cap writeback already occurred, we should have dropped
- * the capsnap in ceph_put_wrbuffer_cap_refs.
- */
- BUG_ON(capsnap->dirty == 0);
- /* pick mds, take s_mutex */
- if (ci->i_auth_cap == NULL) {
- dout("no auth cap (migrating?), doing nothing\n");
- goto out;
- }
- /* only flush each capsnap once */
- if (!again && !list_empty(&capsnap->flushing_item)) {
- dout("already flushed %p, skipping\n", capsnap);
- continue;
- }
- mds = ci->i_auth_cap->session->s_mds;
- mseq = ci->i_auth_cap->mseq;
- if (session && session->s_mds != mds) {
- dout("oops, wrong session %p mutex\n", session);
- mutex_unlock(&session->s_mutex);
- ceph_put_mds_session(session);
- session = NULL;
- }
- if (!session) {
- spin_unlock(&inode->i_lock);
- mutex_lock(&mdsc->mutex);
- session = __ceph_lookup_mds_session(mdsc, mds);
- mutex_unlock(&mdsc->mutex);
- if (session) {
- dout("inverting session/ino locks on %p\n",
- session);
- mutex_lock(&session->s_mutex);
- }
- /*
- * if session == NULL, we raced against a cap
- * deletion or migration. retry, and we'll
- * get a better @mds value next time.
- */
- spin_lock(&inode->i_lock);
- goto retry;
- }
- capsnap->flush_tid = ++ci->i_cap_flush_last_tid;
- atomic_inc(&capsnap->nref);
- if (!list_empty(&capsnap->flushing_item))
- list_del_init(&capsnap->flushing_item);
- list_add_tail(&capsnap->flushing_item,
- &session->s_cap_snaps_flushing);
- spin_unlock(&inode->i_lock);
- dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
- inode, capsnap, capsnap->follows, capsnap->flush_tid);
- send_cap_msg(session, ceph_vino(inode).ino, 0,
- CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
- capsnap->dirty, 0, capsnap->flush_tid, 0, mseq,
- capsnap->size, 0,
- &capsnap->mtime, &capsnap->atime,
- capsnap->time_warp_seq,
- capsnap->uid, capsnap->gid, capsnap->mode,
- capsnap->xattr_version, capsnap->xattr_blob,
- capsnap->follows);
- next_follows = capsnap->follows + 1;
- ceph_put_cap_snap(capsnap);
- spin_lock(&inode->i_lock);
- goto retry;
- }
- /* we flushed them all; remove this inode from the queue */
- spin_lock(&mdsc->snap_flush_lock);
- list_del_init(&ci->i_snap_flush_item);
- spin_unlock(&mdsc->snap_flush_lock);
- out:
- if (psession)
- *psession = session;
- else if (session) {
- mutex_unlock(&session->s_mutex);
- ceph_put_mds_session(session);
- }
- }
- static void ceph_flush_snaps(struct ceph_inode_info *ci)
- {
- struct inode *inode = &ci->vfs_inode;
- spin_lock(&inode->i_lock);
- __ceph_flush_snaps(ci, NULL, 0);
- spin_unlock(&inode->i_lock);
- }
- /*
- * Mark caps dirty. If inode is newly dirty, return the dirty flags.
- * Caller is then responsible for calling __mark_inode_dirty with the
- * returned flags value.
- */
- int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
- {
- struct ceph_mds_client *mdsc =
- ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
- struct inode *inode = &ci->vfs_inode;
- int was = ci->i_dirty_caps;
- int dirty = 0;
- dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
- ceph_cap_string(mask), ceph_cap_string(was),
- ceph_cap_string(was | mask));
- ci->i_dirty_caps |= mask;
- if (was == 0) {
- if (!ci->i_head_snapc)
- ci->i_head_snapc = ceph_get_snap_context(
- ci->i_snap_realm->cached_context);
- dout(" inode %p now dirty snapc %p\n", &ci->vfs_inode,
- ci->i_head_snapc);
- BUG_ON(!list_empty(&ci->i_dirty_item));
- spin_lock(&mdsc->cap_dirty_lock);
- list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
- spin_unlock(&mdsc->cap_dirty_lock);
- if (ci->i_flushing_caps == 0) {
- ihold(inode);
- dirty |= I_DIRTY_SYNC;
- }
- }
- BUG_ON(list_empty(&ci->i_dirty_item));
- if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
- (mask & CEPH_CAP_FILE_BUFFER))
- dirty |= I_DIRTY_DATASYNC;
- __cap_delay_requeue(mdsc, ci);
- return dirty;
- }
- /*
- * Add dirty inode to the flushing list. Assigned a seq number so we
- * can wait for caps to flush without starving.
- *
- * Called under i_lock.
- */
- static int __mark_caps_flushing(struct inode *inode,
- struct ceph_mds_session *session)
- {
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
- struct ceph_inode_info *ci = ceph_inode(inode);
- int flushing;
- BUG_ON(ci->i_dirty_caps == 0);
- BUG_ON(list_empty(&ci->i_dirty_item));
- flushing = ci->i_dirty_caps;
- dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
- ceph_cap_string(flushing),
- ceph_cap_string(ci->i_flushing_caps),
- ceph_cap_string(ci->i_flushing_caps | flushing));
- ci->i_flushing_caps |= flushing;
- ci->i_dirty_caps = 0;
- dout(" inode %p now !dirty\n", inode);
- spin_lock(&mdsc->cap_dirty_lock);
- list_del_init(&ci->i_dirty_item);
- ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
- if (list_empty(&ci->i_flushing_item)) {
- list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
- mdsc->num_cap_flushing++;
- dout(" inode %p now flushing seq %lld\n", inode,
- ci->i_cap_flush_seq);
- } else {
- list_move_tail(&ci->i_flushing_item, &session->s_cap_flushing);
- dout(" inode %p now flushing (more) seq %lld\n", inode,
- ci->i_cap_flush_seq);
- }
- spin_unlock(&mdsc->cap_dirty_lock);
- return flushing;
- }
- /*
- * try to invalidate mapping pages without blocking.
- */
- static int try_nonblocking_invalidate(struct inode *inode)
- {
- struct ceph_inode_info *ci = ceph_inode(inode);
- u32 invalidating_gen = ci->i_rdcache_gen;
- spin_unlock(&inode->i_lock);
- invalidate_mapping_pages(&inode->i_data, 0, -1);
- spin_lock(&inode->i_lock);
- if (inode->i_data.nrpages == 0 &&
- invalidating_gen == ci->i_rdcache_gen) {
- /* success. */
- dout("try_nonblocking_invalidate %p success\n", inode);
- /* save any racing async invalidate some trouble */
- ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
- return 0;
- }
- dout("try_nonblocking_invalidate %p failed\n", inode);
- return -1;
- }
- /*
- * Swiss army knife function to examine currently used and wanted
- * versus held caps. Release, flush, ack revoked caps to mds as
- * appropriate.
- *
- * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
- * cap release further.
- * CHECK_CAPS_AUTHONLY - we should only check the auth cap
- * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
- * further delay.
- */
- void ceph_check_caps(struct ceph_inode_info *ci, int flags,
- struct ceph_mds_session *session)
- {
- struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
- struct ceph_mds_client *mdsc = fsc->mdsc;
- struct inode *inode = &ci->vfs_inode;
- struct ceph_cap *cap;
- int file_wanted, used;
- int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
- int issued, implemented, want, retain, revoking, flushing = 0;
- int mds = -1; /* keep track of how far we've gone through i_caps list
- to avoid an infinite loop on retry */
- struct rb_node *p;
- int tried_invalidate = 0;
- int delayed = 0, sent = 0, force_requeue = 0, num;
- int queue_invalidate = 0;
- int is_delayed = flags & CHECK_CAPS_NODELAY;
- /* if we are unmounting, flush any unused caps immediately. */
- if (mdsc->stopping)
- is_delayed = 1;
- spin_lock(&inode->i_lock);
- if (ci->i_ceph_flags & CEPH_I_FLUSH)
- flags |= CHECK_CAPS_FLUSH;
- /* flush snaps first time around only */
- if (!list_empty(&ci->i_cap_snaps))
- __ceph_flush_snaps(ci, &session, 0);
- goto retry_locked;
- retry:
- spin_lock(&inode->i_lock);
- retry_locked:
- file_wanted = __ceph_caps_file_wanted(ci);
- used = __ceph_caps_used(ci);
- want = file_wanted | used;
- issued = __ceph_caps_issued(ci, &implemented);
- revoking = implemented & ~issued;
- retain = want | CEPH_CAP_PIN;
- if (!mdsc->stopping && inode->i_nlink > 0) {
- if (want) {
- retain |= CEPH_CAP_ANY; /* be greedy */
- } else {
- retain |= CEPH_CAP_ANY_SHARED;
- /*
- * keep RD only if we didn't have the file open RW,
- * because then the mds would revoke it anyway to
- * journal max_size=0.
- */
- if (ci->i_max_size == 0)
- retain |= CEPH_CAP_ANY_RD;
- }
- }
- dout("check_caps %p file_want %s used %s dirty %s flushing %s"
- " issued %s revoking %s retain %s %s%s%s\n", inode,
- ceph_cap_string(file_wanted),
- ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
- ceph_cap_string(ci->i_flushing_caps),
- ceph_cap_string(issued), ceph_cap_string(revoking),
- ceph_cap_string(retain),
- (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
- (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
- (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
- /*
- * If we no longer need to hold onto old our caps, and we may
- * have cached pages, but don't want them, then try to invalidate.
- * If we fail, it's because pages are locked.... try again later.
- */
- if ((!is_delayed || mdsc->stopping) &&
- ci->i_wrbuffer_ref == 0 && /* no dirty pages... */
- inode->i_data.nrpages && /* have cached pages */
- (file_wanted == 0 || /* no open files */
- (revoking & (CEPH_CAP_FILE_CACHE|
- CEPH_CAP_FILE_LAZYIO))) && /* or revoking cache */
- !tried_invalidate) {
- dout("check_caps trying to invalidate on %p\n", inode);
- if (try_nonblocking_invalidate(inode) < 0) {
- if (revoking & (CEPH_CAP_FILE_CACHE|
- CEPH_CAP_FILE_LAZYIO)) {
- dout("check_caps queuing invalidate\n");
- queue_invalidate = 1;
- ci->i_rdcache_revoking = ci->i_rdcache_gen;
- } else {
- dout("check_caps failed to invalidate pages\n");
- /* we failed to invalidate pages. check these
- caps again later. */
- force_requeue = 1;
- __cap_set_timeouts(mdsc, ci);
- }
- }
- tried_invalidate = 1;
- goto retry_locked;
- }
- num = 0;
- for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
- cap = rb_entry(p, struct ceph_cap, ci_node);
- num++;
- /* avoid looping forever */
- if (mds >= cap->mds ||
- ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
- continue;
- /* NOTE: no side-effects allowed, until we take s_mutex */
- revoking = cap->implemented & ~cap->issued;
- dout(" mds%d cap %p issued %s implemented %s revoking %s\n",
- cap->mds, cap, ceph_cap_string(cap->issued),
- ceph_cap_string(cap->implemented),
- ceph_cap_string(revoking));
- if (cap == ci->i_auth_cap &&
- (cap->issued & CEPH_CAP_FILE_WR)) {
- /* request larger max_size from MDS? */
- if (ci->i_wanted_max_size > ci->i_max_size &&
- ci->i_wanted_max_size > ci->i_requested_max_size) {
- dout("requesting new max_size\n");
- goto ack;
- }
- /* approaching file_max? */
- if ((inode->i_size << 1) >= ci->i_max_size &&
- (ci->i_reported_size << 1) < ci->i_max_size) {
- dout("i_size approaching max_size\n");
- goto ack;
- }
- }
- /* flush anything dirty? */
- if (cap == ci->i_auth_cap && (flags & CHECK_CAPS_FLUSH) &&
- ci->i_dirty_caps) {
- dout("flushing dirty caps\n");
- goto ack;
- }
- /* completed revocation? going down and there are no caps? */
- if (revoking && (revoking & used) == 0) {
- dout("completed revocation of %s\n",
- ceph_cap_string(cap->implemented & ~cap->issued));
- goto ack;
- }
- /* want more caps from mds? */
- if (want & ~(cap->mds_wanted | cap->issued))
- goto ack;
- /* things we might delay */
- if ((cap->issued & ~retain) == 0 &&
- cap->mds_wanted == want)
- continue; /* nope, all good */
- if (is_delayed)
- goto ack;
- /* delay? */
- if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
- time_before(jiffies, ci->i_hold_caps_max)) {
- dout(" delaying issued %s -> %s, wanted %s -> %s\n",
- ceph_cap_string(cap->issued),
- ceph_cap_string(cap->issued & retain),
- ceph_cap_string(cap->mds_wanted),
- ceph_cap_string(want));
- delayed++;
- continue;
- }
- ack:
- if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
- dout(" skipping %p I_NOFLUSH set\n", inode);
- continue;
- }
- if (session && session != cap->session) {
- dout("oops, wrong session %p mutex\n", session);
- mutex_unlock(&session->s_mutex);
- session = NULL;
- }
- if (!session) {
- session = cap->session;
- if (mutex_trylock(&session->s_mutex) == 0) {
- dout("inverting session/ino locks on %p\n",
- session);
- spin_unlock(&inode->i_lock);
- if (took_snap_rwsem) {
- up_read(&mdsc->snap_rwsem);
- took_snap_rwsem = 0;
- }
- mutex_lock(&session->s_mutex);
- goto retry;
- }
- }
- /* take snap_rwsem after session mutex */
- if (!took_snap_rwsem) {
- if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
- dout("inverting snap/in locks on %p\n",
- inode);
- spin_unlock(&inode->i_lock);
- down_read(&mdsc->snap_rwsem);
- took_snap_rwsem = 1;
- goto retry;
- }
- took_snap_rwsem = 1;
- }
- if (cap == ci->i_auth_cap && ci->i_dirty_caps)
- flushing = __mark_caps_flushing(inode, session);
- else
- flushing = 0;
- mds = cap->mds; /* remember mds, so we don't repeat */
- sent++;
- /* __send_cap drops i_lock */
- delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
- retain, flushing, NULL);
- goto retry; /* retake i_lock and restart our cap scan. */
- }
- /*
- * Reschedule delayed caps release if we delayed anything,
- * otherwise cancel.
- */
- if (delayed && is_delayed)
- force_requeue = 1; /* __send_cap delayed release; requeue */
- if (!delayed && !is_delayed)
- __cap_delay_cancel(mdsc, ci);
- else if (!is_delayed || force_requeue)
- __cap_delay_requeue(mdsc, ci);
- spin_unlock(&inode->i_lock);
- if (queue_invalidate)
- ceph_queue_invalidate(inode);
- if (session)
- mutex_unlock(&session->s_mutex);
- if (took_snap_rwsem)
- up_read(&mdsc->snap_rwsem);
- }
- /*
- * Try to flush dirty caps back to the auth mds.
- */
- static int try_flush_caps(struct inode *inode, struct ceph_mds_session *session,
- unsigned *flush_tid)
- {
- struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
- struct ceph_inode_info *ci = ceph_inode(inode);
- int unlock_session = session ? 0 : 1;
- int flushing = 0;
- retry:
- spin_lock(&inode->i_lock);
- if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
- dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
- goto out;
- }
- if (ci->i_dirty_caps && ci->i_auth_cap) {
- struct ceph_cap *cap = ci->i_auth_cap;
- int used = __ceph_caps_used(ci);
- int want = __ceph_caps_wanted(ci);
- int delayed;
- if (!session) {
- spin_unlock(&inode->i_lock);
- session = cap->session;
- mutex_lock(&session->s_mutex);
- goto retry;
- }
- BUG_ON(session != cap->session);
- if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
- goto out;
- flushing = __mark_caps_flushing(inode, session);
- /* __send_cap drops i_lock */
- delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
- cap->issued | cap->implemented, flushing,
- flush_tid);
- if (!delayed)
- goto out_unlocked;
- spin_lock(&inode->i_lock);
- __cap_delay_requeue(mdsc, ci);
- }
- out:
- spin_unlock(&inode->i_lock);
- out_unlocked:
- if (session && unlock_session)
- mutex_unlock(&session->s_mutex);
- return flushing;
- }
- /*
- * Return true if we've flushed caps through the given flush_tid.
- */
- static int caps_are_flushed(struct inode *inode, unsigned tid)
- {
- struct ceph_inode_info *ci = ceph_inode(inode);
- int i, ret = 1;
- spin_lock(&inode->i_lock);
- for (i = 0; i < CEPH_CAP_BITS; i++)
- if ((ci->i_flushing_caps & (1 << i)) &&
- ci->i_cap_flush_tid[i] <= tid) {
- /* still flushing this bit */
- ret = 0;
- break;
- }
- spin_unlock(&inode->i_lock);
- return ret;
- }
- /*
- * Wait on any unsafe replies for the given inode. First wait on the
- * newest request, and make that the upper bound. Then, if there are
- * more requests, keep waiting on the oldest as long as it is still older
- * than the original request.
- */
- static void sync_write_wait(struct inode *inode)
- {
- struct ceph_inode_info *ci = ceph_inode(inode);
- struct list_head *head = &ci->i_unsafe_writes;
- struct ceph_osd_request *req;
- u64 last_tid;
- spin_lock(&ci->i_unsafe_lock);
- if (list_empty(head))
- goto out;
- /* set upper bound as _last_ entry in chain */
- req = list_entry(head->prev, struct ceph_osd_request,
- r_unsafe_item);
- last_tid = req->r_tid;
- do {
- ceph_osdc_get_request(req);
- spin_unlock(&ci->i_unsafe_lock);
- dout("sync_write_wait on tid %llu (until %llu)\n",
- req->r_tid, last_tid);
- wait_for_completion(&req->r_safe_completion);
- spin_lock(&ci->i_unsafe_lock);
- ceph_osdc_put_request(req);
- /*
- * from here on look at first entry in chain, since we
- * only want to wait for anything older than last_tid
- */
- if (list_empty(head))
- break;
- req = list_entry(head->next, struct ceph_osd_request,
- r_unsafe_item);
- } while (req->r_tid < last_tid);
- out:
- spin_unlock(&ci->i_unsafe_lock);
- }
- int cep…