/fs/xfs/xfs_log.c
https://bitbucket.org/cyanogenmod/android_kernel_asus_tf300t · C · 3753 lines · 2261 code · 440 blank · 1052 comment · 420 complexity · 0d6ab3726121eb7735de7103e0722919 MD5 · raw file
Large files are truncated click here to view the full file
- /*
- * Copyright (c) 2000-2005 Silicon Graphics, Inc.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it would be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
- #include "xfs.h"
- #include "xfs_fs.h"
- #include "xfs_types.h"
- #include "xfs_bit.h"
- #include "xfs_log.h"
- #include "xfs_inum.h"
- #include "xfs_trans.h"
- #include "xfs_sb.h"
- #include "xfs_ag.h"
- #include "xfs_mount.h"
- #include "xfs_error.h"
- #include "xfs_log_priv.h"
- #include "xfs_buf_item.h"
- #include "xfs_bmap_btree.h"
- #include "xfs_alloc_btree.h"
- #include "xfs_ialloc_btree.h"
- #include "xfs_log_recover.h"
- #include "xfs_trans_priv.h"
- #include "xfs_dinode.h"
- #include "xfs_inode.h"
- #include "xfs_rw.h"
- #include "xfs_trace.h"
- kmem_zone_t *xfs_log_ticket_zone;
- /* Local miscellaneous function prototypes */
- STATIC int xlog_commit_record(struct log *log, struct xlog_ticket *ticket,
- xlog_in_core_t **, xfs_lsn_t *);
- STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
- xfs_buftarg_t *log_target,
- xfs_daddr_t blk_offset,
- int num_bblks);
- STATIC int xlog_space_left(struct log *log, atomic64_t *head);
- STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
- STATIC void xlog_dealloc_log(xlog_t *log);
- /* local state machine functions */
- STATIC void xlog_state_done_syncing(xlog_in_core_t *iclog, int);
- STATIC void xlog_state_do_callback(xlog_t *log,int aborted, xlog_in_core_t *iclog);
- STATIC int xlog_state_get_iclog_space(xlog_t *log,
- int len,
- xlog_in_core_t **iclog,
- xlog_ticket_t *ticket,
- int *continued_write,
- int *logoffsetp);
- STATIC int xlog_state_release_iclog(xlog_t *log,
- xlog_in_core_t *iclog);
- STATIC void xlog_state_switch_iclogs(xlog_t *log,
- xlog_in_core_t *iclog,
- int eventual_size);
- STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
- /* local functions to manipulate grant head */
- STATIC int xlog_grant_log_space(xlog_t *log,
- xlog_ticket_t *xtic);
- STATIC void xlog_grant_push_ail(struct log *log,
- int need_bytes);
- STATIC void xlog_regrant_reserve_log_space(xlog_t *log,
- xlog_ticket_t *ticket);
- STATIC int xlog_regrant_write_log_space(xlog_t *log,
- xlog_ticket_t *ticket);
- STATIC void xlog_ungrant_log_space(xlog_t *log,
- xlog_ticket_t *ticket);
- #if defined(DEBUG)
- STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr);
- STATIC void xlog_verify_grant_tail(struct log *log);
- STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
- int count, boolean_t syncing);
- STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
- xfs_lsn_t tail_lsn);
- #else
- #define xlog_verify_dest_ptr(a,b)
- #define xlog_verify_grant_tail(a)
- #define xlog_verify_iclog(a,b,c,d)
- #define xlog_verify_tail_lsn(a,b,c)
- #endif
- STATIC int xlog_iclogs_empty(xlog_t *log);
- static void
- xlog_grant_sub_space(
- struct log *log,
- atomic64_t *head,
- int bytes)
- {
- int64_t head_val = atomic64_read(head);
- int64_t new, old;
- do {
- int cycle, space;
- xlog_crack_grant_head_val(head_val, &cycle, &space);
- space -= bytes;
- if (space < 0) {
- space += log->l_logsize;
- cycle--;
- }
- old = head_val;
- new = xlog_assign_grant_head_val(cycle, space);
- head_val = atomic64_cmpxchg(head, old, new);
- } while (head_val != old);
- }
- static void
- xlog_grant_add_space(
- struct log *log,
- atomic64_t *head,
- int bytes)
- {
- int64_t head_val = atomic64_read(head);
- int64_t new, old;
- do {
- int tmp;
- int cycle, space;
- xlog_crack_grant_head_val(head_val, &cycle, &space);
- tmp = log->l_logsize - space;
- if (tmp > bytes)
- space += bytes;
- else {
- space = bytes - tmp;
- cycle++;
- }
- old = head_val;
- new = xlog_assign_grant_head_val(cycle, space);
- head_val = atomic64_cmpxchg(head, old, new);
- } while (head_val != old);
- }
- static void
- xlog_tic_reset_res(xlog_ticket_t *tic)
- {
- tic->t_res_num = 0;
- tic->t_res_arr_sum = 0;
- tic->t_res_num_ophdrs = 0;
- }
- static void
- xlog_tic_add_region(xlog_ticket_t *tic, uint len, uint type)
- {
- if (tic->t_res_num == XLOG_TIC_LEN_MAX) {
- /* add to overflow and start again */
- tic->t_res_o_flow += tic->t_res_arr_sum;
- tic->t_res_num = 0;
- tic->t_res_arr_sum = 0;
- }
- tic->t_res_arr[tic->t_res_num].r_len = len;
- tic->t_res_arr[tic->t_res_num].r_type = type;
- tic->t_res_arr_sum += len;
- tic->t_res_num++;
- }
- /*
- * NOTES:
- *
- * 1. currblock field gets updated at startup and after in-core logs
- * marked as with WANT_SYNC.
- */
- /*
- * This routine is called when a user of a log manager ticket is done with
- * the reservation. If the ticket was ever used, then a commit record for
- * the associated transaction is written out as a log operation header with
- * no data. The flag XLOG_TIC_INITED is set when the first write occurs with
- * a given ticket. If the ticket was one with a permanent reservation, then
- * a few operations are done differently. Permanent reservation tickets by
- * default don't release the reservation. They just commit the current
- * transaction with the belief that the reservation is still needed. A flag
- * must be passed in before permanent reservations are actually released.
- * When these type of tickets are not released, they need to be set into
- * the inited state again. By doing this, a start record will be written
- * out when the next write occurs.
- */
- xfs_lsn_t
- xfs_log_done(
- struct xfs_mount *mp,
- struct xlog_ticket *ticket,
- struct xlog_in_core **iclog,
- uint flags)
- {
- struct log *log = mp->m_log;
- xfs_lsn_t lsn = 0;
- if (XLOG_FORCED_SHUTDOWN(log) ||
- /*
- * If nothing was ever written, don't write out commit record.
- * If we get an error, just continue and give back the log ticket.
- */
- (((ticket->t_flags & XLOG_TIC_INITED) == 0) &&
- (xlog_commit_record(log, ticket, iclog, &lsn)))) {
- lsn = (xfs_lsn_t) -1;
- if (ticket->t_flags & XLOG_TIC_PERM_RESERV) {
- flags |= XFS_LOG_REL_PERM_RESERV;
- }
- }
- if ((ticket->t_flags & XLOG_TIC_PERM_RESERV) == 0 ||
- (flags & XFS_LOG_REL_PERM_RESERV)) {
- trace_xfs_log_done_nonperm(log, ticket);
- /*
- * Release ticket if not permanent reservation or a specific
- * request has been made to release a permanent reservation.
- */
- xlog_ungrant_log_space(log, ticket);
- xfs_log_ticket_put(ticket);
- } else {
- trace_xfs_log_done_perm(log, ticket);
- xlog_regrant_reserve_log_space(log, ticket);
- /* If this ticket was a permanent reservation and we aren't
- * trying to release it, reset the inited flags; so next time
- * we write, a start record will be written out.
- */
- ticket->t_flags |= XLOG_TIC_INITED;
- }
- return lsn;
- }
- /*
- * Attaches a new iclog I/O completion callback routine during
- * transaction commit. If the log is in error state, a non-zero
- * return code is handed back and the caller is responsible for
- * executing the callback at an appropriate time.
- */
- int
- xfs_log_notify(
- struct xfs_mount *mp,
- struct xlog_in_core *iclog,
- xfs_log_callback_t *cb)
- {
- int abortflg;
- spin_lock(&iclog->ic_callback_lock);
- abortflg = (iclog->ic_state & XLOG_STATE_IOERROR);
- if (!abortflg) {
- ASSERT_ALWAYS((iclog->ic_state == XLOG_STATE_ACTIVE) ||
- (iclog->ic_state == XLOG_STATE_WANT_SYNC));
- cb->cb_next = NULL;
- *(iclog->ic_callback_tail) = cb;
- iclog->ic_callback_tail = &(cb->cb_next);
- }
- spin_unlock(&iclog->ic_callback_lock);
- return abortflg;
- }
- int
- xfs_log_release_iclog(
- struct xfs_mount *mp,
- struct xlog_in_core *iclog)
- {
- if (xlog_state_release_iclog(mp->m_log, iclog)) {
- xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
- return EIO;
- }
- return 0;
- }
- /*
- * 1. Reserve an amount of on-disk log space and return a ticket corresponding
- * to the reservation.
- * 2. Potentially, push buffers at tail of log to disk.
- *
- * Each reservation is going to reserve extra space for a log record header.
- * When writes happen to the on-disk log, we don't subtract the length of the
- * log record header from any reservation. By wasting space in each
- * reservation, we prevent over allocation problems.
- */
- int
- xfs_log_reserve(
- struct xfs_mount *mp,
- int unit_bytes,
- int cnt,
- struct xlog_ticket **ticket,
- __uint8_t client,
- uint flags,
- uint t_type)
- {
- struct log *log = mp->m_log;
- struct xlog_ticket *internal_ticket;
- int retval = 0;
- ASSERT(client == XFS_TRANSACTION || client == XFS_LOG);
- if (XLOG_FORCED_SHUTDOWN(log))
- return XFS_ERROR(EIO);
- XFS_STATS_INC(xs_try_logspace);
- if (*ticket != NULL) {
- ASSERT(flags & XFS_LOG_PERM_RESERV);
- internal_ticket = *ticket;
- /*
- * this is a new transaction on the ticket, so we need to
- * change the transaction ID so that the next transaction has a
- * different TID in the log. Just add one to the existing tid
- * so that we can see chains of rolling transactions in the log
- * easily.
- */
- internal_ticket->t_tid++;
- trace_xfs_log_reserve(log, internal_ticket);
- xlog_grant_push_ail(log, internal_ticket->t_unit_res);
- retval = xlog_regrant_write_log_space(log, internal_ticket);
- } else {
- /* may sleep if need to allocate more tickets */
- internal_ticket = xlog_ticket_alloc(log, unit_bytes, cnt,
- client, flags,
- KM_SLEEP|KM_MAYFAIL);
- if (!internal_ticket)
- return XFS_ERROR(ENOMEM);
- internal_ticket->t_trans_type = t_type;
- *ticket = internal_ticket;
- trace_xfs_log_reserve(log, internal_ticket);
- xlog_grant_push_ail(log,
- (internal_ticket->t_unit_res *
- internal_ticket->t_cnt));
- retval = xlog_grant_log_space(log, internal_ticket);
- }
- return retval;
- } /* xfs_log_reserve */
- /*
- * Mount a log filesystem
- *
- * mp - ubiquitous xfs mount point structure
- * log_target - buftarg of on-disk log device
- * blk_offset - Start block # where block size is 512 bytes (BBSIZE)
- * num_bblocks - Number of BBSIZE blocks in on-disk log
- *
- * Return error or zero.
- */
- int
- xfs_log_mount(
- xfs_mount_t *mp,
- xfs_buftarg_t *log_target,
- xfs_daddr_t blk_offset,
- int num_bblks)
- {
- int error;
- if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
- xfs_notice(mp, "Mounting Filesystem");
- else {
- xfs_notice(mp,
- "Mounting filesystem in no-recovery mode. Filesystem will be inconsistent.");
- ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
- }
- mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
- if (IS_ERR(mp->m_log)) {
- error = -PTR_ERR(mp->m_log);
- goto out;
- }
- /*
- * Initialize the AIL now we have a log.
- */
- error = xfs_trans_ail_init(mp);
- if (error) {
- xfs_warn(mp, "AIL initialisation failed: error %d", error);
- goto out_free_log;
- }
- mp->m_log->l_ailp = mp->m_ail;
- /*
- * skip log recovery on a norecovery mount. pretend it all
- * just worked.
- */
- if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) {
- int readonly = (mp->m_flags & XFS_MOUNT_RDONLY);
- if (readonly)
- mp->m_flags &= ~XFS_MOUNT_RDONLY;
- error = xlog_recover(mp->m_log);
- if (readonly)
- mp->m_flags |= XFS_MOUNT_RDONLY;
- if (error) {
- xfs_warn(mp, "log mount/recovery failed: error %d",
- error);
- goto out_destroy_ail;
- }
- }
- /* Normal transactions can now occur */
- mp->m_log->l_flags &= ~XLOG_ACTIVE_RECOVERY;
- /*
- * Now the log has been fully initialised and we know were our
- * space grant counters are, we can initialise the permanent ticket
- * needed for delayed logging to work.
- */
- xlog_cil_init_post_recovery(mp->m_log);
- return 0;
- out_destroy_ail:
- xfs_trans_ail_destroy(mp);
- out_free_log:
- xlog_dealloc_log(mp->m_log);
- out:
- return error;
- }
- /*
- * Finish the recovery of the file system. This is separate from
- * the xfs_log_mount() call, because it depends on the code in
- * xfs_mountfs() to read in the root and real-time bitmap inodes
- * between calling xfs_log_mount() and here.
- *
- * mp - ubiquitous xfs mount point structure
- */
- int
- xfs_log_mount_finish(xfs_mount_t *mp)
- {
- int error;
- if (!(mp->m_flags & XFS_MOUNT_NORECOVERY))
- error = xlog_recover_finish(mp->m_log);
- else {
- error = 0;
- ASSERT(mp->m_flags & XFS_MOUNT_RDONLY);
- }
- return error;
- }
- /*
- * Final log writes as part of unmount.
- *
- * Mark the filesystem clean as unmount happens. Note that during relocation
- * this routine needs to be executed as part of source-bag while the
- * deallocation must not be done until source-end.
- */
- /*
- * Unmount record used to have a string "Unmount filesystem--" in the
- * data section where the "Un" was really a magic number (XLOG_UNMOUNT_TYPE).
- * We just write the magic number now since that particular field isn't
- * currently architecture converted and "nUmount" is a bit foo.
- * As far as I know, there weren't any dependencies on the old behaviour.
- */
- int
- xfs_log_unmount_write(xfs_mount_t *mp)
- {
- xlog_t *log = mp->m_log;
- xlog_in_core_t *iclog;
- #ifdef DEBUG
- xlog_in_core_t *first_iclog;
- #endif
- xlog_ticket_t *tic = NULL;
- xfs_lsn_t lsn;
- int error;
- /*
- * Don't write out unmount record on read-only mounts.
- * Or, if we are doing a forced umount (typically because of IO errors).
- */
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- return 0;
- error = _xfs_log_force(mp, XFS_LOG_SYNC, NULL);
- ASSERT(error || !(XLOG_FORCED_SHUTDOWN(log)));
- #ifdef DEBUG
- first_iclog = iclog = log->l_iclog;
- do {
- if (!(iclog->ic_state & XLOG_STATE_IOERROR)) {
- ASSERT(iclog->ic_state & XLOG_STATE_ACTIVE);
- ASSERT(iclog->ic_offset == 0);
- }
- iclog = iclog->ic_next;
- } while (iclog != first_iclog);
- #endif
- if (! (XLOG_FORCED_SHUTDOWN(log))) {
- error = xfs_log_reserve(mp, 600, 1, &tic,
- XFS_LOG, 0, XLOG_UNMOUNT_REC_TYPE);
- if (!error) {
- /* the data section must be 32 bit size aligned */
- struct {
- __uint16_t magic;
- __uint16_t pad1;
- __uint32_t pad2; /* may as well make it 64 bits */
- } magic = {
- .magic = XLOG_UNMOUNT_TYPE,
- };
- struct xfs_log_iovec reg = {
- .i_addr = &magic,
- .i_len = sizeof(magic),
- .i_type = XLOG_REG_TYPE_UNMOUNT,
- };
- struct xfs_log_vec vec = {
- .lv_niovecs = 1,
- .lv_iovecp = ®,
- };
- /* remove inited flag */
- tic->t_flags = 0;
- error = xlog_write(log, &vec, tic, &lsn,
- NULL, XLOG_UNMOUNT_TRANS);
- /*
- * At this point, we're umounting anyway,
- * so there's no point in transitioning log state
- * to IOERROR. Just continue...
- */
- }
- if (error)
- xfs_alert(mp, "%s: unmount record failed", __func__);
- spin_lock(&log->l_icloglock);
- iclog = log->l_iclog;
- atomic_inc(&iclog->ic_refcnt);
- xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
- error = xlog_state_release_iclog(log, iclog);
- spin_lock(&log->l_icloglock);
- if (!(iclog->ic_state == XLOG_STATE_ACTIVE ||
- iclog->ic_state == XLOG_STATE_DIRTY)) {
- if (!XLOG_FORCED_SHUTDOWN(log)) {
- xlog_wait(&iclog->ic_force_wait,
- &log->l_icloglock);
- } else {
- spin_unlock(&log->l_icloglock);
- }
- } else {
- spin_unlock(&log->l_icloglock);
- }
- if (tic) {
- trace_xfs_log_umount_write(log, tic);
- xlog_ungrant_log_space(log, tic);
- xfs_log_ticket_put(tic);
- }
- } else {
- /*
- * We're already in forced_shutdown mode, couldn't
- * even attempt to write out the unmount transaction.
- *
- * Go through the motions of sync'ing and releasing
- * the iclog, even though no I/O will actually happen,
- * we need to wait for other log I/Os that may already
- * be in progress. Do this as a separate section of
- * code so we'll know if we ever get stuck here that
- * we're in this odd situation of trying to unmount
- * a file system that went into forced_shutdown as
- * the result of an unmount..
- */
- spin_lock(&log->l_icloglock);
- iclog = log->l_iclog;
- atomic_inc(&iclog->ic_refcnt);
- xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
- error = xlog_state_release_iclog(log, iclog);
- spin_lock(&log->l_icloglock);
- if ( ! ( iclog->ic_state == XLOG_STATE_ACTIVE
- || iclog->ic_state == XLOG_STATE_DIRTY
- || iclog->ic_state == XLOG_STATE_IOERROR) ) {
- xlog_wait(&iclog->ic_force_wait,
- &log->l_icloglock);
- } else {
- spin_unlock(&log->l_icloglock);
- }
- }
- return error;
- } /* xfs_log_unmount_write */
- /*
- * Deallocate log structures for unmount/relocation.
- *
- * We need to stop the aild from running before we destroy
- * and deallocate the log as the aild references the log.
- */
- void
- xfs_log_unmount(xfs_mount_t *mp)
- {
- xfs_trans_ail_destroy(mp);
- xlog_dealloc_log(mp->m_log);
- }
- void
- xfs_log_item_init(
- struct xfs_mount *mp,
- struct xfs_log_item *item,
- int type,
- struct xfs_item_ops *ops)
- {
- item->li_mountp = mp;
- item->li_ailp = mp->m_ail;
- item->li_type = type;
- item->li_ops = ops;
- item->li_lv = NULL;
- INIT_LIST_HEAD(&item->li_ail);
- INIT_LIST_HEAD(&item->li_cil);
- }
- /*
- * Write region vectors to log. The write happens using the space reservation
- * of the ticket (tic). It is not a requirement that all writes for a given
- * transaction occur with one call to xfs_log_write(). However, it is important
- * to note that the transaction reservation code makes an assumption about the
- * number of log headers a transaction requires that may be violated if you
- * don't pass all the transaction vectors in one call....
- */
- int
- xfs_log_write(
- struct xfs_mount *mp,
- struct xfs_log_iovec reg[],
- int nentries,
- struct xlog_ticket *tic,
- xfs_lsn_t *start_lsn)
- {
- struct log *log = mp->m_log;
- int error;
- struct xfs_log_vec vec = {
- .lv_niovecs = nentries,
- .lv_iovecp = reg,
- };
- if (XLOG_FORCED_SHUTDOWN(log))
- return XFS_ERROR(EIO);
- error = xlog_write(log, &vec, tic, start_lsn, NULL, 0);
- if (error)
- xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
- return error;
- }
- void
- xfs_log_move_tail(xfs_mount_t *mp,
- xfs_lsn_t tail_lsn)
- {
- xlog_ticket_t *tic;
- xlog_t *log = mp->m_log;
- int need_bytes, free_bytes;
- if (XLOG_FORCED_SHUTDOWN(log))
- return;
- if (tail_lsn == 0)
- tail_lsn = atomic64_read(&log->l_last_sync_lsn);
- /* tail_lsn == 1 implies that we weren't passed a valid value. */
- if (tail_lsn != 1)
- atomic64_set(&log->l_tail_lsn, tail_lsn);
- if (!list_empty_careful(&log->l_writeq)) {
- #ifdef DEBUG
- if (log->l_flags & XLOG_ACTIVE_RECOVERY)
- panic("Recovery problem");
- #endif
- spin_lock(&log->l_grant_write_lock);
- free_bytes = xlog_space_left(log, &log->l_grant_write_head);
- list_for_each_entry(tic, &log->l_writeq, t_queue) {
- ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
- if (free_bytes < tic->t_unit_res && tail_lsn != 1)
- break;
- tail_lsn = 0;
- free_bytes -= tic->t_unit_res;
- trace_xfs_log_regrant_write_wake_up(log, tic);
- wake_up(&tic->t_wait);
- }
- spin_unlock(&log->l_grant_write_lock);
- }
- if (!list_empty_careful(&log->l_reserveq)) {
- #ifdef DEBUG
- if (log->l_flags & XLOG_ACTIVE_RECOVERY)
- panic("Recovery problem");
- #endif
- spin_lock(&log->l_grant_reserve_lock);
- free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
- list_for_each_entry(tic, &log->l_reserveq, t_queue) {
- if (tic->t_flags & XLOG_TIC_PERM_RESERV)
- need_bytes = tic->t_unit_res*tic->t_cnt;
- else
- need_bytes = tic->t_unit_res;
- if (free_bytes < need_bytes && tail_lsn != 1)
- break;
- tail_lsn = 0;
- free_bytes -= need_bytes;
- trace_xfs_log_grant_wake_up(log, tic);
- wake_up(&tic->t_wait);
- }
- spin_unlock(&log->l_grant_reserve_lock);
- }
- }
- /*
- * Determine if we have a transaction that has gone to disk
- * that needs to be covered. To begin the transition to the idle state
- * firstly the log needs to be idle (no AIL and nothing in the iclogs).
- * If we are then in a state where covering is needed, the caller is informed
- * that dummy transactions are required to move the log into the idle state.
- *
- * Because this is called as part of the sync process, we should also indicate
- * that dummy transactions should be issued in anything but the covered or
- * idle states. This ensures that the log tail is accurately reflected in
- * the log at the end of the sync, hence if a crash occurrs avoids replay
- * of transactions where the metadata is already on disk.
- */
- int
- xfs_log_need_covered(xfs_mount_t *mp)
- {
- int needed = 0;
- xlog_t *log = mp->m_log;
- if (!xfs_fs_writable(mp))
- return 0;
- spin_lock(&log->l_icloglock);
- switch (log->l_covered_state) {
- case XLOG_STATE_COVER_DONE:
- case XLOG_STATE_COVER_DONE2:
- case XLOG_STATE_COVER_IDLE:
- break;
- case XLOG_STATE_COVER_NEED:
- case XLOG_STATE_COVER_NEED2:
- if (!xfs_ail_min_lsn(log->l_ailp) &&
- xlog_iclogs_empty(log)) {
- if (log->l_covered_state == XLOG_STATE_COVER_NEED)
- log->l_covered_state = XLOG_STATE_COVER_DONE;
- else
- log->l_covered_state = XLOG_STATE_COVER_DONE2;
- }
- /* FALLTHRU */
- default:
- needed = 1;
- break;
- }
- spin_unlock(&log->l_icloglock);
- return needed;
- }
- /******************************************************************************
- *
- * local routines
- *
- ******************************************************************************
- */
- /* xfs_trans_tail_ail returns 0 when there is nothing in the list.
- * The log manager must keep track of the last LR which was committed
- * to disk. The lsn of this LR will become the new tail_lsn whenever
- * xfs_trans_tail_ail returns 0. If we don't do this, we run into
- * the situation where stuff could be written into the log but nothing
- * was ever in the AIL when asked. Eventually, we panic since the
- * tail hits the head.
- *
- * We may be holding the log iclog lock upon entering this routine.
- */
- xfs_lsn_t
- xlog_assign_tail_lsn(
- struct xfs_mount *mp)
- {
- xfs_lsn_t tail_lsn;
- struct log *log = mp->m_log;
- tail_lsn = xfs_ail_min_lsn(mp->m_ail);
- if (!tail_lsn)
- tail_lsn = atomic64_read(&log->l_last_sync_lsn);
- atomic64_set(&log->l_tail_lsn, tail_lsn);
- return tail_lsn;
- }
- /*
- * Return the space in the log between the tail and the head. The head
- * is passed in the cycle/bytes formal parms. In the special case where
- * the reserve head has wrapped passed the tail, this calculation is no
- * longer valid. In this case, just return 0 which means there is no space
- * in the log. This works for all places where this function is called
- * with the reserve head. Of course, if the write head were to ever
- * wrap the tail, we should blow up. Rather than catch this case here,
- * we depend on other ASSERTions in other parts of the code. XXXmiken
- *
- * This code also handles the case where the reservation head is behind
- * the tail. The details of this case are described below, but the end
- * result is that we return the size of the log as the amount of space left.
- */
- STATIC int
- xlog_space_left(
- struct log *log,
- atomic64_t *head)
- {
- int free_bytes;
- int tail_bytes;
- int tail_cycle;
- int head_cycle;
- int head_bytes;
- xlog_crack_grant_head(head, &head_cycle, &head_bytes);
- xlog_crack_atomic_lsn(&log->l_tail_lsn, &tail_cycle, &tail_bytes);
- tail_bytes = BBTOB(tail_bytes);
- if (tail_cycle == head_cycle && head_bytes >= tail_bytes)
- free_bytes = log->l_logsize - (head_bytes - tail_bytes);
- else if (tail_cycle + 1 < head_cycle)
- return 0;
- else if (tail_cycle < head_cycle) {
- ASSERT(tail_cycle == (head_cycle - 1));
- free_bytes = tail_bytes - head_bytes;
- } else {
- /*
- * The reservation head is behind the tail.
- * In this case we just want to return the size of the
- * log as the amount of space left.
- */
- xfs_alert(log->l_mp,
- "xlog_space_left: head behind tail\n"
- " tail_cycle = %d, tail_bytes = %d\n"
- " GH cycle = %d, GH bytes = %d",
- tail_cycle, tail_bytes, head_cycle, head_bytes);
- ASSERT(0);
- free_bytes = log->l_logsize;
- }
- return free_bytes;
- }
- /*
- * Log function which is called when an io completes.
- *
- * The log manager needs its own routine, in order to control what
- * happens with the buffer after the write completes.
- */
- void
- xlog_iodone(xfs_buf_t *bp)
- {
- xlog_in_core_t *iclog = bp->b_fspriv;
- xlog_t *l = iclog->ic_log;
- int aborted = 0;
- /*
- * Race to shutdown the filesystem if we see an error.
- */
- if (XFS_TEST_ERROR((xfs_buf_geterror(bp)), l->l_mp,
- XFS_ERRTAG_IODONE_IOERR, XFS_RANDOM_IODONE_IOERR)) {
- xfs_ioerror_alert("xlog_iodone", l->l_mp, bp, XFS_BUF_ADDR(bp));
- XFS_BUF_STALE(bp);
- xfs_force_shutdown(l->l_mp, SHUTDOWN_LOG_IO_ERROR);
- /*
- * This flag will be propagated to the trans-committed
- * callback routines to let them know that the log-commit
- * didn't succeed.
- */
- aborted = XFS_LI_ABORTED;
- } else if (iclog->ic_state & XLOG_STATE_IOERROR) {
- aborted = XFS_LI_ABORTED;
- }
- /* log I/O is always issued ASYNC */
- ASSERT(XFS_BUF_ISASYNC(bp));
- xlog_state_done_syncing(iclog, aborted);
- /*
- * do not reference the buffer (bp) here as we could race
- * with it being freed after writing the unmount record to the
- * log.
- */
- } /* xlog_iodone */
- /*
- * Return size of each in-core log record buffer.
- *
- * All machines get 8 x 32kB buffers by default, unless tuned otherwise.
- *
- * If the filesystem blocksize is too large, we may need to choose a
- * larger size since the directory code currently logs entire blocks.
- */
- STATIC void
- xlog_get_iclog_buffer_size(xfs_mount_t *mp,
- xlog_t *log)
- {
- int size;
- int xhdrs;
- if (mp->m_logbufs <= 0)
- log->l_iclog_bufs = XLOG_MAX_ICLOGS;
- else
- log->l_iclog_bufs = mp->m_logbufs;
- /*
- * Buffer size passed in from mount system call.
- */
- if (mp->m_logbsize > 0) {
- size = log->l_iclog_size = mp->m_logbsize;
- log->l_iclog_size_log = 0;
- while (size != 1) {
- log->l_iclog_size_log++;
- size >>= 1;
- }
- if (xfs_sb_version_haslogv2(&mp->m_sb)) {
- /* # headers = size / 32k
- * one header holds cycles from 32k of data
- */
- xhdrs = mp->m_logbsize / XLOG_HEADER_CYCLE_SIZE;
- if (mp->m_logbsize % XLOG_HEADER_CYCLE_SIZE)
- xhdrs++;
- log->l_iclog_hsize = xhdrs << BBSHIFT;
- log->l_iclog_heads = xhdrs;
- } else {
- ASSERT(mp->m_logbsize <= XLOG_BIG_RECORD_BSIZE);
- log->l_iclog_hsize = BBSIZE;
- log->l_iclog_heads = 1;
- }
- goto done;
- }
- /* All machines use 32kB buffers by default. */
- log->l_iclog_size = XLOG_BIG_RECORD_BSIZE;
- log->l_iclog_size_log = XLOG_BIG_RECORD_BSHIFT;
- /* the default log size is 16k or 32k which is one header sector */
- log->l_iclog_hsize = BBSIZE;
- log->l_iclog_heads = 1;
- done:
- /* are we being asked to make the sizes selected above visible? */
- if (mp->m_logbufs == 0)
- mp->m_logbufs = log->l_iclog_bufs;
- if (mp->m_logbsize == 0)
- mp->m_logbsize = log->l_iclog_size;
- } /* xlog_get_iclog_buffer_size */
- /*
- * This routine initializes some of the log structure for a given mount point.
- * Its primary purpose is to fill in enough, so recovery can occur. However,
- * some other stuff may be filled in too.
- */
- STATIC xlog_t *
- xlog_alloc_log(xfs_mount_t *mp,
- xfs_buftarg_t *log_target,
- xfs_daddr_t blk_offset,
- int num_bblks)
- {
- xlog_t *log;
- xlog_rec_header_t *head;
- xlog_in_core_t **iclogp;
- xlog_in_core_t *iclog, *prev_iclog=NULL;
- xfs_buf_t *bp;
- int i;
- int error = ENOMEM;
- uint log2_size = 0;
- log = kmem_zalloc(sizeof(xlog_t), KM_MAYFAIL);
- if (!log) {
- xfs_warn(mp, "Log allocation failed: No memory!");
- goto out;
- }
- log->l_mp = mp;
- log->l_targ = log_target;
- log->l_logsize = BBTOB(num_bblks);
- log->l_logBBstart = blk_offset;
- log->l_logBBsize = num_bblks;
- log->l_covered_state = XLOG_STATE_COVER_IDLE;
- log->l_flags |= XLOG_ACTIVE_RECOVERY;
- log->l_prev_block = -1;
- /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
- xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
- xlog_assign_atomic_lsn(&log->l_last_sync_lsn, 1, 0);
- log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
- xlog_assign_grant_head(&log->l_grant_reserve_head, 1, 0);
- xlog_assign_grant_head(&log->l_grant_write_head, 1, 0);
- INIT_LIST_HEAD(&log->l_reserveq);
- INIT_LIST_HEAD(&log->l_writeq);
- spin_lock_init(&log->l_grant_reserve_lock);
- spin_lock_init(&log->l_grant_write_lock);
- error = EFSCORRUPTED;
- if (xfs_sb_version_hassector(&mp->m_sb)) {
- log2_size = mp->m_sb.sb_logsectlog;
- if (log2_size < BBSHIFT) {
- xfs_warn(mp, "Log sector size too small (0x%x < 0x%x)",
- log2_size, BBSHIFT);
- goto out_free_log;
- }
- log2_size -= BBSHIFT;
- if (log2_size > mp->m_sectbb_log) {
- xfs_warn(mp, "Log sector size too large (0x%x > 0x%x)",
- log2_size, mp->m_sectbb_log);
- goto out_free_log;
- }
- /* for larger sector sizes, must have v2 or external log */
- if (log2_size && log->l_logBBstart > 0 &&
- !xfs_sb_version_haslogv2(&mp->m_sb)) {
- xfs_warn(mp,
- "log sector size (0x%x) invalid for configuration.",
- log2_size);
- goto out_free_log;
- }
- }
- log->l_sectBBsize = 1 << log2_size;
- xlog_get_iclog_buffer_size(mp, log);
- error = ENOMEM;
- bp = xfs_buf_get_empty(log->l_iclog_size, mp->m_logdev_targp);
- if (!bp)
- goto out_free_log;
- bp->b_iodone = xlog_iodone;
- ASSERT(xfs_buf_islocked(bp));
- log->l_xbuf = bp;
- spin_lock_init(&log->l_icloglock);
- init_waitqueue_head(&log->l_flush_wait);
- /* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
- ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
- iclogp = &log->l_iclog;
- /*
- * The amount of memory to allocate for the iclog structure is
- * rather funky due to the way the structure is defined. It is
- * done this way so that we can use different sizes for machines
- * with different amounts of memory. See the definition of
- * xlog_in_core_t in xfs_log_priv.h for details.
- */
- ASSERT(log->l_iclog_size >= 4096);
- for (i=0; i < log->l_iclog_bufs; i++) {
- *iclogp = kmem_zalloc(sizeof(xlog_in_core_t), KM_MAYFAIL);
- if (!*iclogp)
- goto out_free_iclog;
- iclog = *iclogp;
- iclog->ic_prev = prev_iclog;
- prev_iclog = iclog;
- bp = xfs_buf_get_uncached(mp->m_logdev_targp,
- log->l_iclog_size, 0);
- if (!bp)
- goto out_free_iclog;
- bp->b_iodone = xlog_iodone;
- iclog->ic_bp = bp;
- iclog->ic_data = bp->b_addr;
- #ifdef DEBUG
- log->l_iclog_bak[i] = (xfs_caddr_t)&(iclog->ic_header);
- #endif
- head = &iclog->ic_header;
- memset(head, 0, sizeof(xlog_rec_header_t));
- head->h_magicno = cpu_to_be32(XLOG_HEADER_MAGIC_NUM);
- head->h_version = cpu_to_be32(
- xfs_sb_version_haslogv2(&log->l_mp->m_sb) ? 2 : 1);
- head->h_size = cpu_to_be32(log->l_iclog_size);
- /* new fields */
- head->h_fmt = cpu_to_be32(XLOG_FMT);
- memcpy(&head->h_fs_uuid, &mp->m_sb.sb_uuid, sizeof(uuid_t));
- iclog->ic_size = XFS_BUF_SIZE(bp) - log->l_iclog_hsize;
- iclog->ic_state = XLOG_STATE_ACTIVE;
- iclog->ic_log = log;
- atomic_set(&iclog->ic_refcnt, 0);
- spin_lock_init(&iclog->ic_callback_lock);
- iclog->ic_callback_tail = &(iclog->ic_callback);
- iclog->ic_datap = (char *)iclog->ic_data + log->l_iclog_hsize;
- ASSERT(xfs_buf_islocked(iclog->ic_bp));
- init_waitqueue_head(&iclog->ic_force_wait);
- init_waitqueue_head(&iclog->ic_write_wait);
- iclogp = &iclog->ic_next;
- }
- *iclogp = log->l_iclog; /* complete ring */
- log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */
- error = xlog_cil_init(log);
- if (error)
- goto out_free_iclog;
- return log;
- out_free_iclog:
- for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
- prev_iclog = iclog->ic_next;
- if (iclog->ic_bp)
- xfs_buf_free(iclog->ic_bp);
- kmem_free(iclog);
- }
- spinlock_destroy(&log->l_icloglock);
- xfs_buf_free(log->l_xbuf);
- out_free_log:
- kmem_free(log);
- out:
- return ERR_PTR(-error);
- } /* xlog_alloc_log */
- /*
- * Write out the commit record of a transaction associated with the given
- * ticket. Return the lsn of the commit record.
- */
- STATIC int
- xlog_commit_record(
- struct log *log,
- struct xlog_ticket *ticket,
- struct xlog_in_core **iclog,
- xfs_lsn_t *commitlsnp)
- {
- struct xfs_mount *mp = log->l_mp;
- int error;
- struct xfs_log_iovec reg = {
- .i_addr = NULL,
- .i_len = 0,
- .i_type = XLOG_REG_TYPE_COMMIT,
- };
- struct xfs_log_vec vec = {
- .lv_niovecs = 1,
- .lv_iovecp = ®,
- };
- ASSERT_ALWAYS(iclog);
- error = xlog_write(log, &vec, ticket, commitlsnp, iclog,
- XLOG_COMMIT_TRANS);
- if (error)
- xfs_force_shutdown(mp, SHUTDOWN_LOG_IO_ERROR);
- return error;
- }
- /*
- * Push on the buffer cache code if we ever use more than 75% of the on-disk
- * log space. This code pushes on the lsn which would supposedly free up
- * the 25% which we want to leave free. We may need to adopt a policy which
- * pushes on an lsn which is further along in the log once we reach the high
- * water mark. In this manner, we would be creating a low water mark.
- */
- STATIC void
- xlog_grant_push_ail(
- struct log *log,
- int need_bytes)
- {
- xfs_lsn_t threshold_lsn = 0;
- xfs_lsn_t last_sync_lsn;
- int free_blocks;
- int free_bytes;
- int threshold_block;
- int threshold_cycle;
- int free_threshold;
- ASSERT(BTOBB(need_bytes) < log->l_logBBsize);
- free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
- free_blocks = BTOBBT(free_bytes);
- /*
- * Set the threshold for the minimum number of free blocks in the
- * log to the maximum of what the caller needs, one quarter of the
- * log, and 256 blocks.
- */
- free_threshold = BTOBB(need_bytes);
- free_threshold = MAX(free_threshold, (log->l_logBBsize >> 2));
- free_threshold = MAX(free_threshold, 256);
- if (free_blocks >= free_threshold)
- return;
- xlog_crack_atomic_lsn(&log->l_tail_lsn, &threshold_cycle,
- &threshold_block);
- threshold_block += free_threshold;
- if (threshold_block >= log->l_logBBsize) {
- threshold_block -= log->l_logBBsize;
- threshold_cycle += 1;
- }
- threshold_lsn = xlog_assign_lsn(threshold_cycle,
- threshold_block);
- /*
- * Don't pass in an lsn greater than the lsn of the last
- * log record known to be on disk. Use a snapshot of the last sync lsn
- * so that it doesn't change between the compare and the set.
- */
- last_sync_lsn = atomic64_read(&log->l_last_sync_lsn);
- if (XFS_LSN_CMP(threshold_lsn, last_sync_lsn) > 0)
- threshold_lsn = last_sync_lsn;
- /*
- * Get the transaction layer to kick the dirty buffers out to
- * disk asynchronously. No point in trying to do this if
- * the filesystem is shutting down.
- */
- if (!XLOG_FORCED_SHUTDOWN(log))
- xfs_ail_push(log->l_ailp, threshold_lsn);
- }
- /*
- * The bdstrat callback function for log bufs. This gives us a central
- * place to trap bufs in case we get hit by a log I/O error and need to
- * shutdown. Actually, in practice, even when we didn't get a log error,
- * we transition the iclogs to IOERROR state *after* flushing all existing
- * iclogs to disk. This is because we don't want anymore new transactions to be
- * started or completed afterwards.
- */
- STATIC int
- xlog_bdstrat(
- struct xfs_buf *bp)
- {
- struct xlog_in_core *iclog = bp->b_fspriv;
- if (iclog->ic_state & XLOG_STATE_IOERROR) {
- xfs_buf_ioerror(bp, EIO);
- XFS_BUF_STALE(bp);
- xfs_buf_ioend(bp, 0);
- /*
- * It would seem logical to return EIO here, but we rely on
- * the log state machine to propagate I/O errors instead of
- * doing it here.
- */
- return 0;
- }
- xfs_buf_iorequest(bp);
- return 0;
- }
- /*
- * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
- * fashion. Previously, we should have moved the current iclog
- * ptr in the log to point to the next available iclog. This allows further
- * write to continue while this code syncs out an iclog ready to go.
- * Before an in-core log can be written out, the data section must be scanned
- * to save away the 1st word of each BBSIZE block into the header. We replace
- * it with the current cycle count. Each BBSIZE block is tagged with the
- * cycle count because there in an implicit assumption that drives will
- * guarantee that entire 512 byte blocks get written at once. In other words,
- * we can't have part of a 512 byte block written and part not written. By
- * tagging each block, we will know which blocks are valid when recovering
- * after an unclean shutdown.
- *
- * This routine is single threaded on the iclog. No other thread can be in
- * this routine with the same iclog. Changing contents of iclog can there-
- * fore be done without grabbing the state machine lock. Updating the global
- * log will require grabbing the lock though.
- *
- * The entire log manager uses a logical block numbering scheme. Only
- * log_sync (and then only bwrite()) know about the fact that the log may
- * not start with block zero on a given device. The log block start offset
- * is added immediately before calling bwrite().
- */
- STATIC int
- xlog_sync(xlog_t *log,
- xlog_in_core_t *iclog)
- {
- xfs_caddr_t dptr; /* pointer to byte sized element */
- xfs_buf_t *bp;
- int i;
- uint count; /* byte count of bwrite */
- uint count_init; /* initial count before roundup */
- int roundoff; /* roundoff to BB or stripe */
- int split = 0; /* split write into two regions */
- int error;
- int v2 = xfs_sb_version_haslogv2(&log->l_mp->m_sb);
- XFS_STATS_INC(xs_log_writes);
- ASSERT(atomic_read(&iclog->ic_refcnt) == 0);
- /* Add for LR header */
- count_init = log->l_iclog_hsize + iclog->ic_offset;
- /* Round out the log write size */
- if (v2 && log->l_mp->m_sb.sb_logsunit > 1) {
- /* we have a v2 stripe unit to use */
- count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
- } else {
- count = BBTOB(BTOBB(count_init));
- }
- roundoff = count - count_init;
- ASSERT(roundoff >= 0);
- ASSERT((v2 && log->l_mp->m_sb.sb_logsunit > 1 &&
- roundoff < log->l_mp->m_sb.sb_logsunit)
- ||
- (log->l_mp->m_sb.sb_logsunit <= 1 &&
- roundoff < BBTOB(1)));
- /* move grant heads by roundoff in sync */
- xlog_grant_add_space(log, &log->l_grant_reserve_head, roundoff);
- xlog_grant_add_space(log, &log->l_grant_write_head, roundoff);
- /* put cycle number in every block */
- xlog_pack_data(log, iclog, roundoff);
- /* real byte length */
- if (v2) {
- iclog->ic_header.h_len =
- cpu_to_be32(iclog->ic_offset + roundoff);
- } else {
- iclog->ic_header.h_len =
- cpu_to_be32(iclog->ic_offset);
- }
- bp = iclog->ic_bp;
- XFS_BUF_SET_ADDR(bp, BLOCK_LSN(be64_to_cpu(iclog->ic_header.h_lsn)));
- XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
- /* Do we need to split this write into 2 parts? */
- if (XFS_BUF_ADDR(bp) + BTOBB(count) > log->l_logBBsize) {
- split = count - (BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp)));
- count = BBTOB(log->l_logBBsize - XFS_BUF_ADDR(bp));
- iclog->ic_bwritecnt = 2; /* split into 2 writes */
- } else {
- iclog->ic_bwritecnt = 1;
- }
- XFS_BUF_SET_COUNT(bp, count);
- bp->b_fspriv = iclog;
- XFS_BUF_ZEROFLAGS(bp);
- XFS_BUF_ASYNC(bp);
- bp->b_flags |= XBF_SYNCIO;
- if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
- bp->b_flags |= XBF_FUA;
- /*
- * Flush the data device before flushing the log to make
- * sure all meta data written back from the AIL actually made
- * it to disk before stamping the new log tail LSN into the
- * log buffer. For an external log we need to issue the
- * flush explicitly, and unfortunately synchronously here;
- * for an internal log we can simply use the block layer
- * state machine for preflushes.
- */
- if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
- xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
- else
- bp->b_flags |= XBF_FLUSH;
- }
- ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
- ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
- xlog_verify_iclog(log, iclog, count, B_TRUE);
- /* account for log which doesn't start at block #0 */
- XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
- /*
- * Don't call xfs_bwrite here. We do log-syncs even when the filesystem
- * is shutting down.
- */
- XFS_BUF_WRITE(bp);
- if ((error = xlog_bdstrat(bp))) {
- xfs_ioerror_alert("xlog_sync", log->l_mp, bp,
- XFS_BUF_ADDR(bp));
- return error;
- }
- if (split) {
- bp = iclog->ic_log->l_xbuf;
- XFS_BUF_SET_ADDR(bp, 0); /* logical 0 */
- xfs_buf_associate_memory(bp,
- (char *)&iclog->ic_header + count, split);
- bp->b_fspriv = iclog;
- XFS_BUF_ZEROFLAGS(bp);
- XFS_BUF_ASYNC(bp);
- bp->b_flags |= XBF_SYNCIO;
- if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
- bp->b_flags |= XBF_FUA;
- dptr = bp->b_addr;
- /*
- * Bump the cycle numbers at the start of each block
- * since this part of the buffer is at the start of
- * a new cycle. Watch out for the header magic number
- * case, though.
- */
- for (i = 0; i < split; i += BBSIZE) {
- be32_add_cpu((__be32 *)dptr, 1);
- if (be32_to_cpu(*(__be32 *)dptr) == XLOG_HEADER_MAGIC_NUM)
- be32_add_cpu((__be32 *)dptr, 1);
- dptr += BBSIZE;
- }
- ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
- ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
- /* account for internal log which doesn't start at block #0 */
- XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
- XFS_BUF_WRITE(bp);
- if ((error = xlog_bdstrat(bp))) {
- xfs_ioerror_alert("xlog_sync (split)", log->l_mp,
- bp, XFS_BUF_ADDR(bp));
- return error;
- }
- }
- return 0;
- } /* xlog_sync */
- /*
- * Deallocate a log structure
- */
- STATIC void
- xlog_dealloc_log(xlog_t *log)
- {
- xlog_in_core_t *iclog, *next_iclog;
- int i;
- xlog_cil_destroy(log);
- /*
- * always need to ensure that the extra buffer does not point to memory
- * owned by another log buffer before we free it.
- */
- xfs_buf_set_empty(log->l_xbuf, log->l_iclog_size);
- xfs_buf_free(log->l_xbuf);
- iclog = log->l_iclog;
- for (i=0; i<log->l_iclog_bufs; i++) {
- xfs_buf_free(iclog->ic_bp);
- next_iclog = iclog->ic_next;
- kmem_free(iclog);
- iclog = next_iclog;
- }
- spinlock_destroy(&log->l_icloglock);
- log->l_mp->m_log = NULL;
- kmem_free(log);
- } /* xlog_dealloc_log */
- /*
- * Update counters atomically now that memcpy is done.
- */
- /* ARGSUSED */
- static inline void
- xlog_state_finish_copy(xlog_t *log,
- xlog_in_core_t *iclog,
- int record_cnt,
- int copy_bytes)
- {
- spin_lock(&log->l_icloglock);
- be32_add_cpu(&iclog->ic_header.h_num_logops, record_cnt);
- iclog->ic_offset += copy_bytes;
- spin_unlock(&log->l_icloglock);
- } /* xlog_state_finish_copy */
- /*
- * print out info relating to regions written which consume
- * the reservation
- */
- void
- xlog_print_tic_res(
- struct xfs_mount *mp,
- struct xlog_ticket *ticket)
- {
- uint i;
- uint ophdr_spc = ticket->t_res_num_ophdrs * (uint)sizeof(xlog_op_header_t);
- /* match with XLOG_REG_TYPE_* in xfs_log.h */
- static char *res_type_str[XLOG_REG_TYPE_MAX] = {
- "bformat",
- "bchunk",
- "efi_format",
- "efd_format",
- "iformat",
- "icore",
- "iext",
- "ibroot",
- "ilocal",
- "iattr_ext",
- "iattr_broot",
- "iattr_local",
- "qformat",
- "dquot",
- "quotaoff",
- "LR header",
- "unmount",
- "commit",
- "trans header"
- };
- static char *trans_type_str[XFS_TRANS_TYPE_MAX] = {
- "SETATTR_NOT_SIZE",
- "SETATTR_SIZE",
- "INACTIVE",
- "CREATE",
- "CREATE_TRUNC",
- "TRUNCATE_FILE",
- "REMOVE",
- "LINK",
- "RENAME",
- "MKDIR",
- "RMDIR",
- "SYMLINK",
- "SET_DMATTRS",
- "GROWFS",
- "STRAT_WRITE",
- "DIOSTRAT",
- "WRITE_SYNC",
- "WRITEID",
- "ADDAFORK",
- "ATTRINVAL",
- "ATRUNCATE",
- "ATTR_SET",
- "ATTR_RM",
- "ATTR_FLAG",
- "CLEAR_AGI_BUCKET",
- "QM_SBCHANGE",
- "DUMMY1",
- "DUMMY2",
- "QM_QUOTAOFF",
- "QM_DQALLOC",
- "QM_SETQLIM",
- "QM_DQCLUSTER",
- "QM_QINOCREATE",
- "QM_QUOTAOFF_END",
- "SB_UNIT",
- "FSYNC_TS",
- "GROWFSRT_ALLOC",
- "GROWFSRT_ZERO",
- "GROWFSRT_FREE",
- "SWAPEXT"
- };
- xfs_warn(mp,
- "xfs_log_write: reservation summary:\n"
- " trans type = %s (%u)\n"
- " unit res = %d bytes\n"
- " current res = %d bytes\n"
- " total reg = %u bytes (o/flow = %u bytes)\n"
- " ophdrs = %u (ophdr space = %u bytes)\n"
- " ophdr + reg = %u bytes\n"
- " num regions = %u\n",
- ((ticket->t_trans_type <= 0 ||
- ticket->t_trans_type > XFS_TRANS_TYPE_MAX) ?
- "bad-trans-type" : trans_type_str[ticket->t_trans_type-1]),
- ticket->t_trans_type,
- ticket->t_unit_res,
- ticket->t_curr_res,
- ticket->t_res_arr_sum, ticket->t_res_o_flow,
- ticket->t_res_num_ophdrs, ophdr_spc,
- ticket->t_res_arr_sum +
- ticket->t_res_o_flow + ophdr_spc,
- ticket->t_res_num);
- for (i = 0; i < ticket->t_res_num; i++) {
- uint r_type = ticket->t_res_arr[i].r_type;
- xfs_warn(mp, "region[%u]: %s - %u bytes\n", i,
- ((r_type <= 0 || r_type > XLOG_REG_TYPE_MAX) ?
- "bad-rtype" : res_type_str[r_type-1]),
- ticket->t_res_arr[i].r_len);
- }
- xfs_alert_tag(mp, XFS_PTAG_LOGRES,
- "xfs_log_write: reservation ran out. Need to up reservation");
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- }
- /*
- * Calculate the potential space needed by the log vector. Each region gets
- * its own xlog_op_header_t and may need to be double word aligned.
- */
- static int
- xlog_write_calc_vec_length(
- struct xlog_ticket *ticket,
- struct xfs_log_vec *log_vector)
- {
- struct xfs_log_vec *lv;
- int headers = 0;
- int len = 0;
- int i;
- /* acct for start rec of xact */
- if (ticket->t_flags & XLOG_TIC_INITED)
- headers++;
- for (lv = log_vector; lv; lv = lv->lv_next) {
- headers += lv->lv_niovecs;
- for (i = 0; i < lv->lv_niovecs; i++) {
- struct xfs_log_iovec *vecp = &lv->lv_iovecp[i];
- len += vecp->i_len;
- xlog_tic_add_region(ticket, vecp->i_len, vecp->i_type);
- }
- }
- ticket->t_res_num_ophdrs += headers;
- len += headers * sizeof(struct xlog_op_header);
- return len;
- }
- /*
- * If first write for transaction, insert start record We can't be trying to
- * commit if we are inited. We can't have any "partial_copy" if we are inited.
- */
- static int
- xlog_write_start_rec(
- struct xlog_op_header *ophdr,
- struct xlog_ticket *ticket)
- {
- if (!(ticket->t_flags & XLOG_TIC_INITED))
- return 0;
- ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
- ophdr->oh_clientid = ticket->t_clientid;
- ophdr->oh_len = 0;
- ophdr->oh_flags = XLOG_START_TRANS;
- ophdr->oh_res2 = 0;
- ticket->t_flags &= ~XLOG_TIC_INITED;
- return sizeof(struct xlog_op_header);
- }
- static xlog_op_header_t *
- xlog_write_setup_ophdr(
- struct log *log,
- struct xlog_op_header *ophdr,
- struct xlog_ticket *ticket,
- uint flags)
- {
- ophdr->oh_tid = cpu_to_be32(ticket->t_tid);
- ophdr->oh_clientid = ticket->t_clientid;
- ophdr->oh_res2 = 0;
- /* are we copying a commit or unmount record? */
- ophdr->oh_flags = flags;
- /*
- * We've seen logs corrupted with bad transaction client ids. This
- * makes sure that XFS doesn't generate them on. Turn this into an EIO
- * and shut down the filesystem.
- */
- switch (ophdr->oh_clientid) {
- case XFS_TRANSACTION:
- case XFS_VOLUME:
- case XFS_LOG:
- break;
- default:
- xfs_warn(log->l_mp,
- "Bad XFS transaction clientid 0x%x in ticket 0x%p",
- ophdr->oh_clientid, ticket);
- return NULL;
- }
- return ophdr;
- }
- /*
- * Set up the parameters of the region copy into the log. This has
- * to handle region write split across multiple log buffers - this
- * state is kept external to this function so that this code can
- * can be written in an obvious, self documenting manner.
- */
- static int
- xlog_write_setup_copy(
- struct xlog_ticket *ticket,
- struct xlog_op_header *ophdr,
- int space_available,
- int space_required,
- int *copy_off,
- int *copy_len,
- int *last_was_partial_copy,
- int *bytes_consumed)
- {
- int still_to_copy;
- still_to_copy = space_required - *bytes_consumed;
- *copy_off = *bytes_consumed;
- if (still_to_copy <= space_available) {
- /* write of region completes here */
- *copy_len = still_to_copy;
- ophdr->oh_len = cpu_to_be32(*copy_len);
- if (*last_was_partial_copy)
- ophdr->oh_flags |= (XLOG_END_TRANS|XLOG_WAS_CONT_TRANS);
- *last_was_partial_copy = 0;
- *bytes_consumed = 0;
- return 0;
- }
- /* partial write of region, needs extra log op header reservation */
- *copy_len = space_available;
- ophdr->oh_len = cpu_to_be32(*copy_len);
- ophdr->oh_flags |= XLOG_CONTINUE_TRANS;
- if (*last_was_partial_copy)
- ophdr->oh_flags |= XLOG_WAS_CONT_TRANS;
- *bytes_consumed += *copy_len;
- (*last_was_partial_copy)++;
- /* account for new log op header */
- ticket->t_curr_res -= sizeof(struct xlog_op_header);
- ticket->t_res_num_ophdrs++;
- return sizeof(struct xlog_op_header);
- }
- static int
- xlog_write_copy_finish(
- struct log *log,
- struct xlog_in_core *iclog,
- uint flags,
- int *record_cnt,
- int *data_cnt,
- int *partial_copy,
- int *partial_copy_len,
- int log_offset,
- struct xlog_in_core **commit_iclog)
- {
- if (*partial_copy) {
- /*
- * This iclog has already been marked WANT_SYNC by
- * xlog_state_get_iclog_space.
- */
- xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
- *record_cnt = 0;
- *data_cnt = 0;
- return xlog_state_release_iclog(log, iclog);
- }
- *partial_copy = 0;
- *partial_copy_len = 0;
- if (iclog->ic_size - log_offset <= sizeof(xlog_op_header_t)) {
- /* no more space in this iclog - push it. */
- xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
- *record_cnt = 0;
- *data_cnt = 0;
- spin_lock(&log->l_icloglock);
- xlog_state_want_sync(log, iclog);
- spin_unlock(&log->l_icloglock);
- if (!commit_iclog)
- return xlog_state_release_iclog(log, iclog);
- ASSERT(flags & XLOG_COMMIT_TRANS);
- *commit_iclog = iclog;
- }
- return 0;
- }
- /*
- * Write some region out to in-core log
- *
- * This will be called when writing externally provided regions or when
- * writing out a commit record for a given transaction.
- *
- * General algorithm:
- * 1. Find total length of this write. This may include adding to the
- * lengths passed in.
- * 2. Check whether we violate the tickets reservation.
- * 3. While writing to this iclog
- * A. Reserve as much space in this iclog as can get
- * B. If this is first write, save away start lsn
- * C. While writing this region:
- * 1. If first write of transaction, write start record
- * 2. Write log operation header (header per region)
- * 3. Find out if we can fit entire region into this iclog
- * 4. Potentially, verify destination memcpy ptr
- * 5. Memcpy (partial) region
- * 6. If partial copy, release iclog; otherwise, continue
- * copying more regions into cu…