PageRenderTime 131ms CodeModel.GetById 32ms app.highlight 80ms RepoModel.GetById 1ms app.codeStats 2ms

/drivers/scsi/lpfc/lpfc_debugfs.c

http://github.com/mirrors/linux
C | 6513 lines | 4590 code | 705 blank | 1218 comment | 900 complexity | bb68be07d654c3c150c885b902e6526c MD5 | raw file

Large files files are truncated, but you can click here to view the full file

   1/*******************************************************************
   2 * This file is part of the Emulex Linux Device Driver for         *
   3 * Fibre Channel Host Bus Adapters.                                *
   4 * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
   5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.  *
   6 * Copyright (C) 2007-2015 Emulex.  All rights reserved.           *
   7 * EMULEX and SLI are trademarks of Emulex.                        *
   8 * www.broadcom.com                                                *
   9 *                                                                 *
  10 * This program is free software; you can redistribute it and/or   *
  11 * modify it under the terms of version 2 of the GNU General       *
  12 * Public License as published by the Free Software Foundation.    *
  13 * This program is distributed in the hope that it will be useful. *
  14 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
  15 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
  16 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
  17 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  18 * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
  19 * more details, a copy of which can be found in the file COPYING  *
  20 * included with this package.                                     *
  21 *******************************************************************/
  22
  23#include <linux/blkdev.h>
  24#include <linux/delay.h>
  25#include <linux/module.h>
  26#include <linux/dma-mapping.h>
  27#include <linux/idr.h>
  28#include <linux/interrupt.h>
  29#include <linux/kthread.h>
  30#include <linux/slab.h>
  31#include <linux/pci.h>
  32#include <linux/spinlock.h>
  33#include <linux/ctype.h>
  34#include <linux/vmalloc.h>
  35
  36#include <scsi/scsi.h>
  37#include <scsi/scsi_device.h>
  38#include <scsi/scsi_host.h>
  39#include <scsi/scsi_transport_fc.h>
  40#include <scsi/fc/fc_fs.h>
  41
  42#include <linux/nvme-fc-driver.h>
  43
  44#include "lpfc_hw4.h"
  45#include "lpfc_hw.h"
  46#include "lpfc_sli.h"
  47#include "lpfc_sli4.h"
  48#include "lpfc_nl.h"
  49#include "lpfc_disc.h"
  50#include "lpfc.h"
  51#include "lpfc_scsi.h"
  52#include "lpfc_nvme.h"
  53#include "lpfc_nvmet.h"
  54#include "lpfc_logmsg.h"
  55#include "lpfc_crtn.h"
  56#include "lpfc_vport.h"
  57#include "lpfc_version.h"
  58#include "lpfc_compat.h"
  59#include "lpfc_debugfs.h"
  60#include "lpfc_bsg.h"
  61
  62#ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  63/*
  64 * debugfs interface
  65 *
  66 * To access this interface the user should:
  67 * # mount -t debugfs none /sys/kernel/debug
  68 *
  69 * The lpfc debugfs directory hierarchy is:
  70 * /sys/kernel/debug/lpfc/fnX/vportY
  71 * where X is the lpfc hba function unique_id
  72 * where Y is the vport VPI on that hba
  73 *
  74 * Debugging services available per vport:
  75 * discovery_trace
  76 * This is an ACSII readable file that contains a trace of the last
  77 * lpfc_debugfs_max_disc_trc events that happened on a specific vport.
  78 * See lpfc_debugfs.h for different categories of  discovery events.
  79 * To enable the discovery trace, the following module parameters must be set:
  80 * lpfc_debugfs_enable=1         Turns on lpfc debugfs filesystem support
  81 * lpfc_debugfs_max_disc_trc=X   Where X is the event trace depth for
  82 *                               EACH vport. X MUST also be a power of 2.
  83 * lpfc_debugfs_mask_disc_trc=Y  Where Y is an event mask as defined in
  84 *                               lpfc_debugfs.h .
  85 *
  86 * slow_ring_trace
  87 * This is an ACSII readable file that contains a trace of the last
  88 * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA.
  89 * To enable the slow ring trace, the following module parameters must be set:
  90 * lpfc_debugfs_enable=1         Turns on lpfc debugfs filesystem support
  91 * lpfc_debugfs_max_slow_ring_trc=X   Where X is the event trace depth for
  92 *                               the HBA. X MUST also be a power of 2.
  93 */
  94static int lpfc_debugfs_enable = 1;
  95module_param(lpfc_debugfs_enable, int, S_IRUGO);
  96MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
  97
  98/* This MUST be a power of 2 */
  99static int lpfc_debugfs_max_disc_trc;
 100module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO);
 101MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
 102	"Set debugfs discovery trace depth");
 103
 104/* This MUST be a power of 2 */
 105static int lpfc_debugfs_max_slow_ring_trc;
 106module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO);
 107MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
 108	"Set debugfs slow ring trace depth");
 109
 110/* This MUST be a power of 2 */
 111static int lpfc_debugfs_max_nvmeio_trc;
 112module_param(lpfc_debugfs_max_nvmeio_trc, int, 0444);
 113MODULE_PARM_DESC(lpfc_debugfs_max_nvmeio_trc,
 114		 "Set debugfs NVME IO trace depth");
 115
 116static int lpfc_debugfs_mask_disc_trc;
 117module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO);
 118MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
 119	"Set debugfs discovery trace mask");
 120
 121#include <linux/debugfs.h>
 122
 123static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
 124static unsigned long lpfc_debugfs_start_time = 0L;
 125
 126/* iDiag */
 127static struct lpfc_idiag idiag;
 128
 129/**
 130 * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer
 131 * @vport: The vport to gather the log info from.
 132 * @buf: The buffer to dump log into.
 133 * @size: The maximum amount of data to process.
 134 *
 135 * Description:
 136 * This routine gathers the lpfc discovery debugfs data from the @vport and
 137 * dumps it to @buf up to @size number of bytes. It will start at the next entry
 138 * in the log and process the log until the end of the buffer. Then it will
 139 * gather from the beginning of the log and process until the current entry.
 140 *
 141 * Notes:
 142 * Discovery logging will be disabled while while this routine dumps the log.
 143 *
 144 * Return Value:
 145 * This routine returns the amount of bytes that were dumped into @buf and will
 146 * not exceed @size.
 147 **/
 148static int
 149lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
 150{
 151	int i, index, len, enable;
 152	uint32_t ms;
 153	struct lpfc_debugfs_trc *dtp;
 154	char *buffer;
 155
 156	buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
 157	if (!buffer)
 158		return 0;
 159
 160	enable = lpfc_debugfs_enable;
 161	lpfc_debugfs_enable = 0;
 162
 163	len = 0;
 164	index = (atomic_read(&vport->disc_trc_cnt) + 1) &
 165		(lpfc_debugfs_max_disc_trc - 1);
 166	for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
 167		dtp = vport->disc_trc + i;
 168		if (!dtp->fmt)
 169			continue;
 170		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
 171		snprintf(buffer,
 172			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 173			dtp->seq_cnt, ms, dtp->fmt);
 174		len +=  scnprintf(buf+len, size-len, buffer,
 175			dtp->data1, dtp->data2, dtp->data3);
 176	}
 177	for (i = 0; i < index; i++) {
 178		dtp = vport->disc_trc + i;
 179		if (!dtp->fmt)
 180			continue;
 181		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
 182		snprintf(buffer,
 183			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 184			dtp->seq_cnt, ms, dtp->fmt);
 185		len +=  scnprintf(buf+len, size-len, buffer,
 186			dtp->data1, dtp->data2, dtp->data3);
 187	}
 188
 189	lpfc_debugfs_enable = enable;
 190	kfree(buffer);
 191
 192	return len;
 193}
 194
 195/**
 196 * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer
 197 * @phba: The HBA to gather the log info from.
 198 * @buf: The buffer to dump log into.
 199 * @size: The maximum amount of data to process.
 200 *
 201 * Description:
 202 * This routine gathers the lpfc slow ring debugfs data from the @phba and
 203 * dumps it to @buf up to @size number of bytes. It will start at the next entry
 204 * in the log and process the log until the end of the buffer. Then it will
 205 * gather from the beginning of the log and process until the current entry.
 206 *
 207 * Notes:
 208 * Slow ring logging will be disabled while while this routine dumps the log.
 209 *
 210 * Return Value:
 211 * This routine returns the amount of bytes that were dumped into @buf and will
 212 * not exceed @size.
 213 **/
 214static int
 215lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
 216{
 217	int i, index, len, enable;
 218	uint32_t ms;
 219	struct lpfc_debugfs_trc *dtp;
 220	char *buffer;
 221
 222	buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
 223	if (!buffer)
 224		return 0;
 225
 226	enable = lpfc_debugfs_enable;
 227	lpfc_debugfs_enable = 0;
 228
 229	len = 0;
 230	index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
 231		(lpfc_debugfs_max_slow_ring_trc - 1);
 232	for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
 233		dtp = phba->slow_ring_trc + i;
 234		if (!dtp->fmt)
 235			continue;
 236		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
 237		snprintf(buffer,
 238			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 239			dtp->seq_cnt, ms, dtp->fmt);
 240		len +=  scnprintf(buf+len, size-len, buffer,
 241			dtp->data1, dtp->data2, dtp->data3);
 242	}
 243	for (i = 0; i < index; i++) {
 244		dtp = phba->slow_ring_trc + i;
 245		if (!dtp->fmt)
 246			continue;
 247		ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
 248		snprintf(buffer,
 249			LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
 250			dtp->seq_cnt, ms, dtp->fmt);
 251		len +=  scnprintf(buf+len, size-len, buffer,
 252			dtp->data1, dtp->data2, dtp->data3);
 253	}
 254
 255	lpfc_debugfs_enable = enable;
 256	kfree(buffer);
 257
 258	return len;
 259}
 260
 261static int lpfc_debugfs_last_hbq = -1;
 262
 263/**
 264 * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer
 265 * @phba: The HBA to gather host buffer info from.
 266 * @buf: The buffer to dump log into.
 267 * @size: The maximum amount of data to process.
 268 *
 269 * Description:
 270 * This routine dumps the host buffer queue info from the @phba to @buf up to
 271 * @size number of bytes. A header that describes the current hbq state will be
 272 * dumped to @buf first and then info on each hbq entry will be dumped to @buf
 273 * until @size bytes have been dumped or all the hbq info has been dumped.
 274 *
 275 * Notes:
 276 * This routine will rotate through each configured HBQ each time called.
 277 *
 278 * Return Value:
 279 * This routine returns the amount of bytes that were dumped into @buf and will
 280 * not exceed @size.
 281 **/
 282static int
 283lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
 284{
 285	int len = 0;
 286	int i, j, found, posted, low;
 287	uint32_t phys, raw_index, getidx;
 288	struct lpfc_hbq_init *hip;
 289	struct hbq_s *hbqs;
 290	struct lpfc_hbq_entry *hbqe;
 291	struct lpfc_dmabuf *d_buf;
 292	struct hbq_dmabuf *hbq_buf;
 293
 294	if (phba->sli_rev != 3)
 295		return 0;
 296
 297	spin_lock_irq(&phba->hbalock);
 298
 299	/* toggle between multiple hbqs, if any */
 300	i = lpfc_sli_hbq_count();
 301	if (i > 1) {
 302		 lpfc_debugfs_last_hbq++;
 303		 if (lpfc_debugfs_last_hbq >= i)
 304			lpfc_debugfs_last_hbq = 0;
 305	}
 306	else
 307		lpfc_debugfs_last_hbq = 0;
 308
 309	i = lpfc_debugfs_last_hbq;
 310
 311	len +=  scnprintf(buf+len, size-len, "HBQ %d Info\n", i);
 312
 313	hbqs =  &phba->hbqs[i];
 314	posted = 0;
 315	list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list)
 316		posted++;
 317
 318	hip =  lpfc_hbq_defs[i];
 319	len +=  scnprintf(buf+len, size-len,
 320		"idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n",
 321		hip->hbq_index, hip->profile, hip->rn,
 322		hip->buffer_count, hip->init_count, hip->add_count, posted);
 323
 324	raw_index = phba->hbq_get[i];
 325	getidx = le32_to_cpu(raw_index);
 326	len +=  scnprintf(buf+len, size-len,
 327		"entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
 328		hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
 329		hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
 330
 331	hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
 332	for (j=0; j<hbqs->entry_count; j++) {
 333		len +=  scnprintf(buf+len, size-len,
 334			"%03d: %08x %04x %05x ", j,
 335			le32_to_cpu(hbqe->bde.addrLow),
 336			le32_to_cpu(hbqe->bde.tus.w),
 337			le32_to_cpu(hbqe->buffer_tag));
 338		i = 0;
 339		found = 0;
 340
 341		/* First calculate if slot has an associated posted buffer */
 342		low = hbqs->hbqPutIdx - posted;
 343		if (low >= 0) {
 344			if ((j >= hbqs->hbqPutIdx) || (j < low)) {
 345				len +=  scnprintf(buf + len, size - len,
 346						"Unused\n");
 347				goto skipit;
 348			}
 349		}
 350		else {
 351			if ((j >= hbqs->hbqPutIdx) &&
 352				(j < (hbqs->entry_count+low))) {
 353				len +=  scnprintf(buf + len, size - len,
 354						"Unused\n");
 355				goto skipit;
 356			}
 357		}
 358
 359		/* Get the Buffer info for the posted buffer */
 360		list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) {
 361			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
 362			phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
 363			if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
 364				len +=  scnprintf(buf+len, size-len,
 365					"Buf%d: x%px %06x\n", i,
 366					hbq_buf->dbuf.virt, hbq_buf->tag);
 367				found = 1;
 368				break;
 369			}
 370			i++;
 371		}
 372		if (!found) {
 373			len +=  scnprintf(buf+len, size-len, "No DMAinfo?\n");
 374		}
 375skipit:
 376		hbqe++;
 377		if (len > LPFC_HBQINFO_SIZE - 54)
 378			break;
 379	}
 380	spin_unlock_irq(&phba->hbalock);
 381	return len;
 382}
 383
 384static int lpfc_debugfs_last_xripool;
 385
 386/**
 387 * lpfc_debugfs_common_xri_data - Dump Hardware Queue info to a buffer
 388 * @phba: The HBA to gather host buffer info from.
 389 * @buf: The buffer to dump log into.
 390 * @size: The maximum amount of data to process.
 391 *
 392 * Description:
 393 * This routine dumps the Hardware Queue info from the @phba to @buf up to
 394 * @size number of bytes. A header that describes the current hdwq state will be
 395 * dumped to @buf first and then info on each hdwq entry will be dumped to @buf
 396 * until @size bytes have been dumped or all the hdwq info has been dumped.
 397 *
 398 * Notes:
 399 * This routine will rotate through each configured Hardware Queue each
 400 * time called.
 401 *
 402 * Return Value:
 403 * This routine returns the amount of bytes that were dumped into @buf and will
 404 * not exceed @size.
 405 **/
 406static int
 407lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
 408{
 409	struct lpfc_sli4_hdw_queue *qp;
 410	int len = 0;
 411	int i, out;
 412	unsigned long iflag;
 413
 414	for (i = 0; i < phba->cfg_hdw_queue; i++) {
 415		if (len > (LPFC_DUMP_MULTIXRIPOOL_SIZE - 80))
 416			break;
 417		qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
 418
 419		len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
 420		spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
 421		spin_lock(&qp->io_buf_list_get_lock);
 422		spin_lock(&qp->io_buf_list_put_lock);
 423		out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
 424			qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs);
 425		len += scnprintf(buf + len, size - len,
 426				 "tot:%d get:%d put:%d mt:%d "
 427				 "ABTS scsi:%d nvme:%d Out:%d\n",
 428			qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs,
 429			qp->empty_io_bufs, qp->abts_scsi_io_bufs,
 430			qp->abts_nvme_io_bufs, out);
 431		spin_unlock(&qp->io_buf_list_put_lock);
 432		spin_unlock(&qp->io_buf_list_get_lock);
 433		spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
 434
 435		lpfc_debugfs_last_xripool++;
 436		if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
 437			lpfc_debugfs_last_xripool = 0;
 438	}
 439
 440	return len;
 441}
 442
 443/**
 444 * lpfc_debugfs_multixripools_data - Display multi-XRI pools information
 445 * @phba: The HBA to gather host buffer info from.
 446 * @buf: The buffer to dump log into.
 447 * @size: The maximum amount of data to process.
 448 *
 449 * Description:
 450 * This routine displays current multi-XRI pools information including XRI
 451 * count in public, private and txcmplq. It also displays current high and
 452 * low watermark.
 453 *
 454 * Return Value:
 455 * This routine returns the amount of bytes that were dumped into @buf and will
 456 * not exceed @size.
 457 **/
 458static int
 459lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
 460{
 461	u32 i;
 462	u32 hwq_count;
 463	struct lpfc_sli4_hdw_queue *qp;
 464	struct lpfc_multixri_pool *multixri_pool;
 465	struct lpfc_pvt_pool *pvt_pool;
 466	struct lpfc_pbl_pool *pbl_pool;
 467	u32 txcmplq_cnt;
 468	char tmp[LPFC_DEBUG_OUT_LINE_SZ] = {0};
 469
 470	if (phba->sli_rev != LPFC_SLI_REV4)
 471		return 0;
 472
 473	if (!phba->sli4_hba.hdwq)
 474		return 0;
 475
 476	if (!phba->cfg_xri_rebalancing) {
 477		i = lpfc_debugfs_commonxripools_data(phba, buf, size);
 478		return i;
 479	}
 480
 481	/*
 482	 * Pbl: Current number of free XRIs in public pool
 483	 * Pvt: Current number of free XRIs in private pool
 484	 * Busy: Current number of outstanding XRIs
 485	 * HWM: Current high watermark
 486	 * pvt_empty: Incremented by 1 when IO submission fails (no xri)
 487	 * pbl_empty: Incremented by 1 when all pbl_pool are empty during
 488	 *            IO submission
 489	 */
 490	scnprintf(tmp, sizeof(tmp),
 491		  "HWQ:  Pbl  Pvt Busy  HWM |  pvt_empty  pbl_empty ");
 492	if (strlcat(buf, tmp, size) >= size)
 493		return strnlen(buf, size);
 494
 495#ifdef LPFC_MXP_STAT
 496	/*
 497	 * MAXH: Max high watermark seen so far
 498	 * above_lmt: Incremented by 1 if xri_owned > xri_limit during
 499	 *            IO submission
 500	 * below_lmt: Incremented by 1 if xri_owned <= xri_limit  during
 501	 *            IO submission
 502	 * locPbl_hit: Incremented by 1 if successfully get a batch of XRI from
 503	 *             local pbl_pool
 504	 * othPbl_hit: Incremented by 1 if successfully get a batch of XRI from
 505	 *             other pbl_pool
 506	 */
 507	scnprintf(tmp, sizeof(tmp),
 508		  "MAXH  above_lmt  below_lmt locPbl_hit othPbl_hit");
 509	if (strlcat(buf, tmp, size) >= size)
 510		return strnlen(buf, size);
 511
 512	/*
 513	 * sPbl: snapshot of Pbl 15 sec after stat gets cleared
 514	 * sPvt: snapshot of Pvt 15 sec after stat gets cleared
 515	 * sBusy: snapshot of Busy 15 sec after stat gets cleared
 516	 */
 517	scnprintf(tmp, sizeof(tmp),
 518		  " | sPbl sPvt sBusy");
 519	if (strlcat(buf, tmp, size) >= size)
 520		return strnlen(buf, size);
 521#endif
 522
 523	scnprintf(tmp, sizeof(tmp), "\n");
 524	if (strlcat(buf, tmp, size) >= size)
 525		return strnlen(buf, size);
 526
 527	hwq_count = phba->cfg_hdw_queue;
 528	for (i = 0; i < hwq_count; i++) {
 529		qp = &phba->sli4_hba.hdwq[i];
 530		multixri_pool = qp->p_multixri_pool;
 531		if (!multixri_pool)
 532			continue;
 533		pbl_pool = &multixri_pool->pbl_pool;
 534		pvt_pool = &multixri_pool->pvt_pool;
 535		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
 536
 537		scnprintf(tmp, sizeof(tmp),
 538			  "%03d: %4d %4d %4d %4d | %10d %10d ",
 539			  i, pbl_pool->count, pvt_pool->count,
 540			  txcmplq_cnt, pvt_pool->high_watermark,
 541			  qp->empty_io_bufs, multixri_pool->pbl_empty_count);
 542		if (strlcat(buf, tmp, size) >= size)
 543			break;
 544
 545#ifdef LPFC_MXP_STAT
 546		scnprintf(tmp, sizeof(tmp),
 547			  "%4d %10d %10d %10d %10d",
 548			  multixri_pool->stat_max_hwm,
 549			  multixri_pool->above_limit_count,
 550			  multixri_pool->below_limit_count,
 551			  multixri_pool->local_pbl_hit_count,
 552			  multixri_pool->other_pbl_hit_count);
 553		if (strlcat(buf, tmp, size) >= size)
 554			break;
 555
 556		scnprintf(tmp, sizeof(tmp),
 557			  " | %4d %4d %5d",
 558			  multixri_pool->stat_pbl_count,
 559			  multixri_pool->stat_pvt_count,
 560			  multixri_pool->stat_busy_count);
 561		if (strlcat(buf, tmp, size) >= size)
 562			break;
 563#endif
 564
 565		scnprintf(tmp, sizeof(tmp), "\n");
 566		if (strlcat(buf, tmp, size) >= size)
 567			break;
 568	}
 569	return strnlen(buf, size);
 570}
 571
 572
 573#ifdef LPFC_HDWQ_LOCK_STAT
 574static int lpfc_debugfs_last_lock;
 575
 576/**
 577 * lpfc_debugfs_lockstat_data - Dump Hardware Queue info to a buffer
 578 * @phba: The HBA to gather host buffer info from.
 579 * @buf: The buffer to dump log into.
 580 * @size: The maximum amount of data to process.
 581 *
 582 * Description:
 583 * This routine dumps the Hardware Queue info from the @phba to @buf up to
 584 * @size number of bytes. A header that describes the current hdwq state will be
 585 * dumped to @buf first and then info on each hdwq entry will be dumped to @buf
 586 * until @size bytes have been dumped or all the hdwq info has been dumped.
 587 *
 588 * Notes:
 589 * This routine will rotate through each configured Hardware Queue each
 590 * time called.
 591 *
 592 * Return Value:
 593 * This routine returns the amount of bytes that were dumped into @buf and will
 594 * not exceed @size.
 595 **/
 596static int
 597lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size)
 598{
 599	struct lpfc_sli4_hdw_queue *qp;
 600	int len = 0;
 601	int i;
 602
 603	if (phba->sli_rev != LPFC_SLI_REV4)
 604		return 0;
 605
 606	if (!phba->sli4_hba.hdwq)
 607		return 0;
 608
 609	for (i = 0; i < phba->cfg_hdw_queue; i++) {
 610		if (len > (LPFC_HDWQINFO_SIZE - 100))
 611			break;
 612		qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_lock];
 613
 614		len += scnprintf(buf + len, size - len, "HdwQ %03d Lock ", i);
 615		if (phba->cfg_xri_rebalancing) {
 616			len += scnprintf(buf + len, size - len,
 617					 "get_pvt:%d mv_pvt:%d "
 618					 "mv2pub:%d mv2pvt:%d "
 619					 "put_pvt:%d put_pub:%d wq:%d\n",
 620					 qp->lock_conflict.alloc_pvt_pool,
 621					 qp->lock_conflict.mv_from_pvt_pool,
 622					 qp->lock_conflict.mv_to_pub_pool,
 623					 qp->lock_conflict.mv_to_pvt_pool,
 624					 qp->lock_conflict.free_pvt_pool,
 625					 qp->lock_conflict.free_pub_pool,
 626					 qp->lock_conflict.wq_access);
 627		} else {
 628			len += scnprintf(buf + len, size - len,
 629					 "get:%d put:%d free:%d wq:%d\n",
 630					 qp->lock_conflict.alloc_xri_get,
 631					 qp->lock_conflict.alloc_xri_put,
 632					 qp->lock_conflict.free_xri,
 633					 qp->lock_conflict.wq_access);
 634		}
 635
 636		lpfc_debugfs_last_lock++;
 637		if (lpfc_debugfs_last_lock >= phba->cfg_hdw_queue)
 638			lpfc_debugfs_last_lock = 0;
 639	}
 640
 641	return len;
 642}
 643#endif
 644
 645static int lpfc_debugfs_last_hba_slim_off;
 646
 647/**
 648 * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer
 649 * @phba: The HBA to gather SLIM info from.
 650 * @buf: The buffer to dump log into.
 651 * @size: The maximum amount of data to process.
 652 *
 653 * Description:
 654 * This routine dumps the current contents of HBA SLIM for the HBA associated
 655 * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data.
 656 *
 657 * Notes:
 658 * This routine will only dump up to 1024 bytes of data each time called and
 659 * should be called multiple times to dump the entire HBA SLIM.
 660 *
 661 * Return Value:
 662 * This routine returns the amount of bytes that were dumped into @buf and will
 663 * not exceed @size.
 664 **/
 665static int
 666lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
 667{
 668	int len = 0;
 669	int i, off;
 670	uint32_t *ptr;
 671	char *buffer;
 672
 673	buffer = kmalloc(1024, GFP_KERNEL);
 674	if (!buffer)
 675		return 0;
 676
 677	off = 0;
 678	spin_lock_irq(&phba->hbalock);
 679
 680	len +=  scnprintf(buf+len, size-len, "HBA SLIM\n");
 681	lpfc_memcpy_from_slim(buffer,
 682		phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024);
 683
 684	ptr = (uint32_t *)&buffer[0];
 685	off = lpfc_debugfs_last_hba_slim_off;
 686
 687	/* Set it up for the next time */
 688	lpfc_debugfs_last_hba_slim_off += 1024;
 689	if (lpfc_debugfs_last_hba_slim_off >= 4096)
 690		lpfc_debugfs_last_hba_slim_off = 0;
 691
 692	i = 1024;
 693	while (i > 0) {
 694		len +=  scnprintf(buf+len, size-len,
 695		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
 696		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
 697		*(ptr+5), *(ptr+6), *(ptr+7));
 698		ptr += 8;
 699		i -= (8 * sizeof(uint32_t));
 700		off += (8 * sizeof(uint32_t));
 701	}
 702
 703	spin_unlock_irq(&phba->hbalock);
 704	kfree(buffer);
 705
 706	return len;
 707}
 708
 709/**
 710 * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer
 711 * @phba: The HBA to gather Host SLIM info from.
 712 * @buf: The buffer to dump log into.
 713 * @size: The maximum amount of data to process.
 714 *
 715 * Description:
 716 * This routine dumps the current contents of host SLIM for the host associated
 717 * with @phba to @buf up to @size bytes of data. The dump will contain the
 718 * Mailbox, PCB, Rings, and Registers that are located in host memory.
 719 *
 720 * Return Value:
 721 * This routine returns the amount of bytes that were dumped into @buf and will
 722 * not exceed @size.
 723 **/
 724static int
 725lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
 726{
 727	int len = 0;
 728	int i, off;
 729	uint32_t word0, word1, word2, word3;
 730	uint32_t *ptr;
 731	struct lpfc_pgp *pgpp;
 732	struct lpfc_sli *psli = &phba->sli;
 733	struct lpfc_sli_ring *pring;
 734
 735	off = 0;
 736	spin_lock_irq(&phba->hbalock);
 737
 738	len +=  scnprintf(buf+len, size-len, "SLIM Mailbox\n");
 739	ptr = (uint32_t *)phba->slim2p.virt;
 740	i = sizeof(MAILBOX_t);
 741	while (i > 0) {
 742		len +=  scnprintf(buf+len, size-len,
 743		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
 744		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
 745		*(ptr+5), *(ptr+6), *(ptr+7));
 746		ptr += 8;
 747		i -= (8 * sizeof(uint32_t));
 748		off += (8 * sizeof(uint32_t));
 749	}
 750
 751	len +=  scnprintf(buf+len, size-len, "SLIM PCB\n");
 752	ptr = (uint32_t *)phba->pcb;
 753	i = sizeof(PCB_t);
 754	while (i > 0) {
 755		len +=  scnprintf(buf+len, size-len,
 756		"%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
 757		off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
 758		*(ptr+5), *(ptr+6), *(ptr+7));
 759		ptr += 8;
 760		i -= (8 * sizeof(uint32_t));
 761		off += (8 * sizeof(uint32_t));
 762	}
 763
 764	if (phba->sli_rev <= LPFC_SLI_REV3) {
 765		for (i = 0; i < 4; i++) {
 766			pgpp = &phba->port_gp[i];
 767			pring = &psli->sli3_ring[i];
 768			len +=  scnprintf(buf+len, size-len,
 769					 "Ring %d: CMD GetInx:%d "
 770					 "(Max:%d Next:%d "
 771					 "Local:%d flg:x%x)  "
 772					 "RSP PutInx:%d Max:%d\n",
 773					 i, pgpp->cmdGetInx,
 774					 pring->sli.sli3.numCiocb,
 775					 pring->sli.sli3.next_cmdidx,
 776					 pring->sli.sli3.local_getidx,
 777					 pring->flag, pgpp->rspPutInx,
 778					 pring->sli.sli3.numRiocb);
 779		}
 780
 781		word0 = readl(phba->HAregaddr);
 782		word1 = readl(phba->CAregaddr);
 783		word2 = readl(phba->HSregaddr);
 784		word3 = readl(phba->HCregaddr);
 785		len +=  scnprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
 786				 "HC:%08x\n", word0, word1, word2, word3);
 787	}
 788	spin_unlock_irq(&phba->hbalock);
 789	return len;
 790}
 791
 792/**
 793 * lpfc_debugfs_nodelist_data - Dump target node list to a buffer
 794 * @vport: The vport to gather target node info from.
 795 * @buf: The buffer to dump log into.
 796 * @size: The maximum amount of data to process.
 797 *
 798 * Description:
 799 * This routine dumps the current target node list associated with @vport to
 800 * @buf up to @size bytes of data. Each node entry in the dump will contain a
 801 * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields.
 802 *
 803 * Return Value:
 804 * This routine returns the amount of bytes that were dumped into @buf and will
 805 * not exceed @size.
 806 **/
 807static int
 808lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
 809{
 810	int len = 0;
 811	int i, iocnt, outio, cnt;
 812	struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 813	struct lpfc_hba  *phba = vport->phba;
 814	struct lpfc_nodelist *ndlp;
 815	unsigned char *statep;
 816	struct nvme_fc_local_port *localport;
 817	struct nvme_fc_remote_port *nrport = NULL;
 818	struct lpfc_nvme_rport *rport;
 819
 820	cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
 821	outio = 0;
 822
 823	len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
 824	spin_lock_irq(shost->host_lock);
 825	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 826		iocnt = 0;
 827		if (!cnt) {
 828			len +=  scnprintf(buf+len, size-len,
 829				"Missing Nodelist Entries\n");
 830			break;
 831		}
 832		cnt--;
 833		switch (ndlp->nlp_state) {
 834		case NLP_STE_UNUSED_NODE:
 835			statep = "UNUSED";
 836			break;
 837		case NLP_STE_PLOGI_ISSUE:
 838			statep = "PLOGI ";
 839			break;
 840		case NLP_STE_ADISC_ISSUE:
 841			statep = "ADISC ";
 842			break;
 843		case NLP_STE_REG_LOGIN_ISSUE:
 844			statep = "REGLOG";
 845			break;
 846		case NLP_STE_PRLI_ISSUE:
 847			statep = "PRLI  ";
 848			break;
 849		case NLP_STE_LOGO_ISSUE:
 850			statep = "LOGO  ";
 851			break;
 852		case NLP_STE_UNMAPPED_NODE:
 853			statep = "UNMAP ";
 854			iocnt = 1;
 855			break;
 856		case NLP_STE_MAPPED_NODE:
 857			statep = "MAPPED";
 858			iocnt = 1;
 859			break;
 860		case NLP_STE_NPR_NODE:
 861			statep = "NPR   ";
 862			break;
 863		default:
 864			statep = "UNKNOWN";
 865		}
 866		len += scnprintf(buf+len, size-len, "%s DID:x%06x ",
 867				statep, ndlp->nlp_DID);
 868		len += scnprintf(buf+len, size-len,
 869				"WWPN x%llx ",
 870				wwn_to_u64(ndlp->nlp_portname.u.wwn));
 871		len += scnprintf(buf+len, size-len,
 872				"WWNN x%llx ",
 873				wwn_to_u64(ndlp->nlp_nodename.u.wwn));
 874		if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
 875			len += scnprintf(buf+len, size-len, "RPI:%03d ",
 876					ndlp->nlp_rpi);
 877		else
 878			len += scnprintf(buf+len, size-len, "RPI:none ");
 879		len +=  scnprintf(buf+len, size-len, "flag:x%08x ",
 880			ndlp->nlp_flag);
 881		if (!ndlp->nlp_type)
 882			len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE ");
 883		if (ndlp->nlp_type & NLP_FC_NODE)
 884			len += scnprintf(buf+len, size-len, "FC_NODE ");
 885		if (ndlp->nlp_type & NLP_FABRIC) {
 886			len += scnprintf(buf+len, size-len, "FABRIC ");
 887			iocnt = 0;
 888		}
 889		if (ndlp->nlp_type & NLP_FCP_TARGET)
 890			len += scnprintf(buf+len, size-len, "FCP_TGT sid:%d ",
 891				ndlp->nlp_sid);
 892		if (ndlp->nlp_type & NLP_FCP_INITIATOR)
 893			len += scnprintf(buf+len, size-len, "FCP_INITIATOR ");
 894		if (ndlp->nlp_type & NLP_NVME_TARGET)
 895			len += scnprintf(buf + len,
 896					size - len, "NVME_TGT sid:%d ",
 897					NLP_NO_SID);
 898		if (ndlp->nlp_type & NLP_NVME_INITIATOR)
 899			len += scnprintf(buf + len,
 900					size - len, "NVME_INITIATOR ");
 901		len += scnprintf(buf+len, size-len, "usgmap:%x ",
 902			ndlp->nlp_usg_map);
 903		len += scnprintf(buf+len, size-len, "refcnt:%x",
 904			kref_read(&ndlp->kref));
 905		if (iocnt) {
 906			i = atomic_read(&ndlp->cmd_pending);
 907			len += scnprintf(buf + len, size - len,
 908					" OutIO:x%x Qdepth x%x",
 909					i, ndlp->cmd_qdepth);
 910			outio += i;
 911		}
 912		len += scnprintf(buf + len, size - len, "defer:%x ",
 913			ndlp->nlp_defer_did);
 914		len +=  scnprintf(buf+len, size-len, "\n");
 915	}
 916	spin_unlock_irq(shost->host_lock);
 917
 918	len += scnprintf(buf + len, size - len,
 919			"\nOutstanding IO x%x\n",  outio);
 920
 921	if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
 922		len += scnprintf(buf + len, size - len,
 923				"\nNVME Targetport Entry ...\n");
 924
 925		/* Port state is only one of two values for now. */
 926		if (phba->targetport->port_id)
 927			statep = "REGISTERED";
 928		else
 929			statep = "INIT";
 930		len += scnprintf(buf + len, size - len,
 931				"TGT WWNN x%llx WWPN x%llx State %s\n",
 932				wwn_to_u64(vport->fc_nodename.u.wwn),
 933				wwn_to_u64(vport->fc_portname.u.wwn),
 934				statep);
 935		len += scnprintf(buf + len, size - len,
 936				"    Targetport DID x%06x\n",
 937				phba->targetport->port_id);
 938		goto out_exit;
 939	}
 940
 941	len += scnprintf(buf + len, size - len,
 942				"\nNVME Lport/Rport Entries ...\n");
 943
 944	localport = vport->localport;
 945	if (!localport)
 946		goto out_exit;
 947
 948	spin_lock_irq(shost->host_lock);
 949
 950	/* Port state is only one of two values for now. */
 951	if (localport->port_id)
 952		statep = "ONLINE";
 953	else
 954		statep = "UNKNOWN ";
 955
 956	len += scnprintf(buf + len, size - len,
 957			"Lport DID x%06x PortState %s\n",
 958			localport->port_id, statep);
 959
 960	len += scnprintf(buf + len, size - len, "\tRport List:\n");
 961	list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
 962		/* local short-hand pointer. */
 963		spin_lock(&phba->hbalock);
 964		rport = lpfc_ndlp_get_nrport(ndlp);
 965		if (rport)
 966			nrport = rport->remoteport;
 967		else
 968			nrport = NULL;
 969		spin_unlock(&phba->hbalock);
 970		if (!nrport)
 971			continue;
 972
 973		/* Port state is only one of two values for now. */
 974		switch (nrport->port_state) {
 975		case FC_OBJSTATE_ONLINE:
 976			statep = "ONLINE";
 977			break;
 978		case FC_OBJSTATE_UNKNOWN:
 979			statep = "UNKNOWN ";
 980			break;
 981		default:
 982			statep = "UNSUPPORTED";
 983			break;
 984		}
 985
 986		/* Tab in to show lport ownership. */
 987		len += scnprintf(buf + len, size - len,
 988				"\t%s Port ID:x%06x ",
 989				statep, nrport->port_id);
 990		len += scnprintf(buf + len, size - len, "WWPN x%llx ",
 991				nrport->port_name);
 992		len += scnprintf(buf + len, size - len, "WWNN x%llx ",
 993				nrport->node_name);
 994
 995		/* An NVME rport can have multiple roles. */
 996		if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
 997			len +=  scnprintf(buf + len, size - len,
 998					 "INITIATOR ");
 999		if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
1000			len +=  scnprintf(buf + len, size - len,
1001					 "TARGET ");
1002		if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
1003			len +=  scnprintf(buf + len, size - len,
1004					 "DISCSRVC ");
1005		if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
1006					  FC_PORT_ROLE_NVME_TARGET |
1007					  FC_PORT_ROLE_NVME_DISCOVERY))
1008			len +=  scnprintf(buf + len, size - len,
1009					 "UNKNOWN ROLE x%x",
1010					 nrport->port_role);
1011		/* Terminate the string. */
1012		len +=  scnprintf(buf + len, size - len, "\n");
1013	}
1014
1015	spin_unlock_irq(shost->host_lock);
1016 out_exit:
1017	return len;
1018}
1019
1020/**
1021 * lpfc_debugfs_nvmestat_data - Dump target node list to a buffer
1022 * @vport: The vport to gather target node info from.
1023 * @buf: The buffer to dump log into.
1024 * @size: The maximum amount of data to process.
1025 *
1026 * Description:
1027 * This routine dumps the NVME statistics associated with @vport
1028 *
1029 * Return Value:
1030 * This routine returns the amount of bytes that were dumped into @buf and will
1031 * not exceed @size.
1032 **/
1033static int
1034lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
1035{
1036	struct lpfc_hba   *phba = vport->phba;
1037	struct lpfc_nvmet_tgtport *tgtp;
1038	struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
1039	struct nvme_fc_local_port *localport;
1040	struct lpfc_fc4_ctrl_stat *cstat;
1041	struct lpfc_nvme_lport *lport;
1042	uint64_t data1, data2, data3;
1043	uint64_t tot, totin, totout;
1044	int cnt, i;
1045	int len = 0;
1046
1047	if (phba->nvmet_support) {
1048		if (!phba->targetport)
1049			return len;
1050		tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
1051		len += scnprintf(buf + len, size - len,
1052				"\nNVME Targetport Statistics\n");
1053
1054		len += scnprintf(buf + len, size - len,
1055				"LS: Rcv %08x Drop %08x Abort %08x\n",
1056				atomic_read(&tgtp->rcv_ls_req_in),
1057				atomic_read(&tgtp->rcv_ls_req_drop),
1058				atomic_read(&tgtp->xmt_ls_abort));
1059		if (atomic_read(&tgtp->rcv_ls_req_in) !=
1060		    atomic_read(&tgtp->rcv_ls_req_out)) {
1061			len += scnprintf(buf + len, size - len,
1062					"Rcv LS: in %08x != out %08x\n",
1063					atomic_read(&tgtp->rcv_ls_req_in),
1064					atomic_read(&tgtp->rcv_ls_req_out));
1065		}
1066
1067		len += scnprintf(buf + len, size - len,
1068				"LS: Xmt %08x Drop %08x Cmpl %08x\n",
1069				atomic_read(&tgtp->xmt_ls_rsp),
1070				atomic_read(&tgtp->xmt_ls_drop),
1071				atomic_read(&tgtp->xmt_ls_rsp_cmpl));
1072
1073		len += scnprintf(buf + len, size - len,
1074				"LS: RSP Abort %08x xb %08x Err %08x\n",
1075				atomic_read(&tgtp->xmt_ls_rsp_aborted),
1076				atomic_read(&tgtp->xmt_ls_rsp_xb_set),
1077				atomic_read(&tgtp->xmt_ls_rsp_error));
1078
1079		len += scnprintf(buf + len, size - len,
1080				"FCP: Rcv %08x Defer %08x Release %08x "
1081				"Drop %08x\n",
1082				atomic_read(&tgtp->rcv_fcp_cmd_in),
1083				atomic_read(&tgtp->rcv_fcp_cmd_defer),
1084				atomic_read(&tgtp->xmt_fcp_release),
1085				atomic_read(&tgtp->rcv_fcp_cmd_drop));
1086
1087		if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
1088		    atomic_read(&tgtp->rcv_fcp_cmd_out)) {
1089			len += scnprintf(buf + len, size - len,
1090					"Rcv FCP: in %08x != out %08x\n",
1091					atomic_read(&tgtp->rcv_fcp_cmd_in),
1092					atomic_read(&tgtp->rcv_fcp_cmd_out));
1093		}
1094
1095		len += scnprintf(buf + len, size - len,
1096				"FCP Rsp: read %08x readrsp %08x "
1097				"write %08x rsp %08x\n",
1098				atomic_read(&tgtp->xmt_fcp_read),
1099				atomic_read(&tgtp->xmt_fcp_read_rsp),
1100				atomic_read(&tgtp->xmt_fcp_write),
1101				atomic_read(&tgtp->xmt_fcp_rsp));
1102
1103		len += scnprintf(buf + len, size - len,
1104				"FCP Rsp Cmpl: %08x err %08x drop %08x\n",
1105				atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
1106				atomic_read(&tgtp->xmt_fcp_rsp_error),
1107				atomic_read(&tgtp->xmt_fcp_rsp_drop));
1108
1109		len += scnprintf(buf + len, size - len,
1110				"FCP Rsp Abort: %08x xb %08x xricqe  %08x\n",
1111				atomic_read(&tgtp->xmt_fcp_rsp_aborted),
1112				atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
1113				atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
1114
1115		len += scnprintf(buf + len, size - len,
1116				"ABORT: Xmt %08x Cmpl %08x\n",
1117				atomic_read(&tgtp->xmt_fcp_abort),
1118				atomic_read(&tgtp->xmt_fcp_abort_cmpl));
1119
1120		len += scnprintf(buf + len, size - len,
1121				"ABORT: Sol %08x  Usol %08x Err %08x Cmpl %08x",
1122				atomic_read(&tgtp->xmt_abort_sol),
1123				atomic_read(&tgtp->xmt_abort_unsol),
1124				atomic_read(&tgtp->xmt_abort_rsp),
1125				atomic_read(&tgtp->xmt_abort_rsp_error));
1126
1127		len +=  scnprintf(buf + len, size - len, "\n");
1128
1129		cnt = 0;
1130		spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1131		list_for_each_entry_safe(ctxp, next_ctxp,
1132				&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1133				list) {
1134			cnt++;
1135		}
1136		spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1137		if (cnt) {
1138			len += scnprintf(buf + len, size - len,
1139					"ABORT: %d ctx entries\n", cnt);
1140			spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1141			list_for_each_entry_safe(ctxp, next_ctxp,
1142				    &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
1143				    list) {
1144				if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ))
1145					break;
1146				len += scnprintf(buf + len, size - len,
1147						"Entry: oxid %x state %x "
1148						"flag %x\n",
1149						ctxp->oxid, ctxp->state,
1150						ctxp->flag);
1151			}
1152			spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
1153		}
1154
1155		/* Calculate outstanding IOs */
1156		tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
1157		tot += atomic_read(&tgtp->xmt_fcp_release);
1158		tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
1159
1160		len += scnprintf(buf + len, size - len,
1161				"IO_CTX: %08x  WAIT: cur %08x tot %08x\n"
1162				"CTX Outstanding %08llx\n",
1163				phba->sli4_hba.nvmet_xri_cnt,
1164				phba->sli4_hba.nvmet_io_wait_cnt,
1165				phba->sli4_hba.nvmet_io_wait_total,
1166				tot);
1167	} else {
1168		if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
1169			return len;
1170
1171		localport = vport->localport;
1172		if (!localport)
1173			return len;
1174		lport = (struct lpfc_nvme_lport *)localport->private;
1175		if (!lport)
1176			return len;
1177
1178		len += scnprintf(buf + len, size - len,
1179				"\nNVME HDWQ Statistics\n");
1180
1181		len += scnprintf(buf + len, size - len,
1182				"LS: Xmt %016x Cmpl %016x\n",
1183				atomic_read(&lport->fc4NvmeLsRequests),
1184				atomic_read(&lport->fc4NvmeLsCmpls));
1185
1186		totin = 0;
1187		totout = 0;
1188		for (i = 0; i < phba->cfg_hdw_queue; i++) {
1189			cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
1190			tot = cstat->io_cmpls;
1191			totin += tot;
1192			data1 = cstat->input_requests;
1193			data2 = cstat->output_requests;
1194			data3 = cstat->control_requests;
1195			totout += (data1 + data2 + data3);
1196
1197			/* Limit to 32, debugfs display buffer limitation */
1198			if (i >= 32)
1199				continue;
1200
1201			len += scnprintf(buf + len, PAGE_SIZE - len,
1202					"HDWQ (%d): Rd %016llx Wr %016llx "
1203					"IO %016llx ",
1204					i, data1, data2, data3);
1205			len += scnprintf(buf + len, PAGE_SIZE - len,
1206					"Cmpl %016llx OutIO %016llx\n",
1207					tot, ((data1 + data2 + data3) - tot));
1208		}
1209		len += scnprintf(buf + len, PAGE_SIZE - len,
1210				"Total FCP Cmpl %016llx Issue %016llx "
1211				"OutIO %016llx\n",
1212				totin, totout, totout - totin);
1213
1214		len += scnprintf(buf + len, size - len,
1215				"LS Xmt Err: Abrt %08x Err %08x  "
1216				"Cmpl Err: xb %08x Err %08x\n",
1217				atomic_read(&lport->xmt_ls_abort),
1218				atomic_read(&lport->xmt_ls_err),
1219				atomic_read(&lport->cmpl_ls_xb),
1220				atomic_read(&lport->cmpl_ls_err));
1221
1222		len += scnprintf(buf + len, size - len,
1223				"FCP Xmt Err: noxri %06x nondlp %06x "
1224				"qdepth %06x wqerr %06x err %06x Abrt %06x\n",
1225				atomic_read(&lport->xmt_fcp_noxri),
1226				atomic_read(&lport->xmt_fcp_bad_ndlp),
1227				atomic_read(&lport->xmt_fcp_qdepth),
1228				atomic_read(&lport->xmt_fcp_wqerr),
1229				atomic_read(&lport->xmt_fcp_err),
1230				atomic_read(&lport->xmt_fcp_abort));
1231
1232		len += scnprintf(buf + len, size - len,
1233				"FCP Cmpl Err: xb %08x Err %08x\n",
1234				atomic_read(&lport->cmpl_fcp_xb),
1235				atomic_read(&lport->cmpl_fcp_err));
1236
1237	}
1238
1239	return len;
1240}
1241
1242/**
1243 * lpfc_debugfs_scsistat_data - Dump target node list to a buffer
1244 * @vport: The vport to gather target node info from.
1245 * @buf: The buffer to dump log into.
1246 * @size: The maximum amount of data to process.
1247 *
1248 * Description:
1249 * This routine dumps the SCSI statistics associated with @vport
1250 *
1251 * Return Value:
1252 * This routine returns the amount of bytes that were dumped into @buf and will
1253 * not exceed @size.
1254 **/
1255static int
1256lpfc_debugfs_scsistat_data(struct lpfc_vport *vport, char *buf, int size)
1257{
1258	int len;
1259	struct lpfc_hba *phba = vport->phba;
1260	struct lpfc_fc4_ctrl_stat *cstat;
1261	u64 data1, data2, data3;
1262	u64 tot, totin, totout;
1263	int i;
1264	char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
1265
1266	if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
1267	    (phba->sli_rev != LPFC_SLI_REV4))
1268		return 0;
1269
1270	scnprintf(buf, size, "SCSI HDWQ Statistics\n");
1271
1272	totin = 0;
1273	totout = 0;
1274	for (i = 0; i < phba->cfg_hdw_queue; i++) {
1275		cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
1276		tot = cstat->io_cmpls;
1277		totin += tot;
1278		data1 = cstat->input_requests;
1279		data2 = cstat->output_requests;
1280		data3 = cstat->control_requests;
1281		totout += (data1 + data2 + data3);
1282
1283		scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
1284			  "IO %016llx ", i, data1, data2, data3);
1285		if (strlcat(buf, tmp, size) >= size)
1286			goto buffer_done;
1287
1288		scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
1289			  tot, ((data1 + data2 + data3) - tot));
1290		if (strlcat(buf, tmp, size) >= size)
1291			goto buffer_done;
1292	}
1293	scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
1294		  "OutIO %016llx\n", totin, totout, totout - totin);
1295	strlcat(buf, tmp, size);
1296
1297buffer_done:
1298	len = strnlen(buf, size);
1299
1300	return len;
1301}
1302
1303void
1304lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
1305{
1306	uint64_t seg1, seg2, seg3, seg4;
1307	uint64_t segsum;
1308
1309	if (!lpfc_cmd->ts_last_cmd ||
1310	    !lpfc_cmd->ts_cmd_start ||
1311	    !lpfc_cmd->ts_cmd_wqput ||
1312	    !lpfc_cmd->ts_isr_cmpl ||
1313	    !lpfc_cmd->ts_data_io)
1314		return;
1315
1316	if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_cmd_start)
1317		return;
1318	if (lpfc_cmd->ts_cmd_start < lpfc_cmd->ts_last_cmd)
1319		return;
1320	if (lpfc_cmd->ts_cmd_wqput < lpfc_cmd->ts_cmd_start)
1321		return;
1322	if (lpfc_cmd->ts_isr_cmpl < lpfc_cmd->ts_cmd_wqput)
1323		return;
1324	if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_isr_cmpl)
1325		return;
1326	/*
1327	 * Segment 1 - Time from Last FCP command cmpl is handed
1328	 * off to NVME Layer to start of next command.
1329	 * Segment 2 - Time from Driver receives a IO cmd start
1330	 * from NVME Layer to WQ put is done on IO cmd.
1331	 * Segment 3 - Time from Driver WQ put is done on IO cmd
1332	 * to MSI-X ISR for IO cmpl.
1333	 * Segment 4 - Time from MSI-X ISR for IO cmpl to when
1334	 * cmpl is handled off to the NVME Layer.
1335	 */
1336	seg1 = lpfc_cmd->ts_cmd_start - lpfc_cmd->ts_last_cmd;
1337	if (seg1 > 5000000)  /* 5 ms - for sequential IOs only */
1338		seg1 = 0;
1339
1340	/* Calculate times relative to start of IO */
1341	seg2 = (lpfc_cmd->ts_cmd_wqput - lpfc_cmd->ts_cmd_start);
1342	segsum = seg2;
1343	seg3 = lpfc_cmd->ts_isr_cmpl - lpfc_cmd->ts_cmd_start;
1344	if (segsum > seg3)
1345		return;
1346	seg3 -= segsum;
1347	segsum += seg3;
1348
1349	seg4 = lpfc_cmd->ts_data_io - lpfc_cmd->ts_cmd_start;
1350	if (segsum > seg4)
1351		return;
1352	seg4 -= segsum;
1353
1354	phba->ktime_data_samples++;
1355	phba->ktime_seg1_total += seg1;
1356	if (seg1 < phba->ktime_seg1_min)
1357		phba->ktime_seg1_min = seg1;
1358	else if (seg1 > phba->ktime_seg1_max)
1359		phba->ktime_seg1_max = seg1;
1360	phba->ktime_seg2_total += seg2;
1361	if (seg2 < phba->ktime_seg2_min)
1362		phba->ktime_seg2_min = seg2;
1363	else if (seg2 > phba->ktime_seg2_max)
1364		phba->ktime_seg2_max = seg2;
1365	phba->ktime_seg3_total += seg3;
1366	if (seg3 < phba->ktime_seg3_min)
1367		phba->ktime_seg3_min = seg3;
1368	else if (seg3 > phba->ktime_seg3_max)
1369		phba->ktime_seg3_max = seg3;
1370	phba->ktime_seg4_total += seg4;
1371	if (seg4 < phba->ktime_seg4_min)
1372		phba->ktime_seg4_min = seg4;
1373	else if (seg4 > phba->ktime_seg4_max)
1374		phba->ktime_seg4_max = seg4;
1375
1376	lpfc_cmd->ts_last_cmd = 0;
1377	lpfc_cmd->ts_cmd_start = 0;
1378	lpfc_cmd->ts_cmd_wqput  = 0;
1379	lpfc_cmd->ts_isr_cmpl = 0;
1380	lpfc_cmd->ts_data_io = 0;
1381}
1382
1383/**
1384 * lpfc_debugfs_ioktime_data - Dump target node list to a buffer
1385 * @vport: The vport to gather target node info from.
1386 * @buf: The buffer to dump log into.
1387 * @size: The maximum amount of data to process.
1388 *
1389 * Description:
1390 * This routine dumps the NVME statistics associated with @vport
1391 *
1392 * Return Value:
1393 * This routine returns the amount of bytes that were dumped into @buf and will
1394 * not exceed @size.
1395 **/
1396static int
1397lpfc_debugfs_ioktime_data(struct lpfc_vport *vport, char *buf, int size)
1398{
1399	struct lpfc_hba   *phba = vport->phba;
1400	int len = 0;
1401
1402	if (phba->nvmet_support == 0) {
1403		/* Initiator */
1404		len += scnprintf(buf + len, PAGE_SIZE - len,
1405				"ktime %s: Total Samples: %lld\n",
1406				(phba->ktime_on ?  "Enabled" : "Disabled"),
1407				phba->ktime_data_samples);
1408		if (phba->ktime_data_samples == 0)
1409			return len;
1410
1411		len += scnprintf(
1412			buf + len, PAGE_SIZE - len,
1413			"Segment 1: Last Cmd cmpl "
1414			"done -to- Start of next Cmd (in driver)\n");
1415		len += scnprintf(
1416			buf + len, PAGE_SIZE - len,
1417			"avg:%08lld min:%08lld max %08lld\n",
1418			div_u64(phba->ktime_seg1_total,
1419				phba->ktime_data_samples),
1420			phba->ktime_seg1_min,
1421			phba->ktime_seg1_max);
1422		len += scnprintf(
1423			buf + len, PAGE_SIZE - len,
1424			"Segment 2: Driver start of Cmd "
1425			"-to- Firmware WQ doorbell\n");
1426		len += scnprintf(
1427			buf + len, PAGE_SIZE - len,
1428			"avg:%08lld min:%08lld max %08lld\n",
1429			div_u64(phba->ktime_seg2_total,
1430				phba->ktime_data_samples),
1431			phba->ktime_seg2_min,
1432			phba->ktime_seg2_max);
1433		len += scnprintf(
1434			buf + len, PAGE_SIZE - len,
1435			"Segment 3: Firmware WQ doorbell -to- "
1436			"MSI-X ISR cmpl\n");
1437		len += scnprintf(
1438			buf + len, PAGE_SIZE - len,
1439			"avg:%08lld min:%08lld max %08lld\n",
1440			div_u64(phba->ktime_seg3_total,
1441				phba->ktime_data_samples),
1442			phba->ktime_seg3_min,
1443			phba->ktime_seg3_max);
1444		len += scnprintf(
1445			buf + len, PAGE_SIZE - len,
1446			"Segment 4: MSI-X ISR cmpl -to- "
1447			"Cmd cmpl done\n");
1448		len += scnprintf(
1449			buf + len, PAGE_SIZE - len,
1450			"avg:%08lld min:%08lld max %08lld\n",
1451			div_u64(phba->ktime_seg4_total,
1452				phba->ktime_data_samples),
1453			phba->ktime_seg4_min,
1454			phba->ktime_seg4_max);
1455		len += scnprintf(
1456			buf + len, PAGE_SIZE - len,
1457			"Total IO avg time: %08lld\n",
1458			div_u64(phba->ktime_seg1_total +
1459			phba->ktime_seg2_total  +
1460			phba->ktime_seg3_total +
1461			phba->ktime_seg4_total,
1462			phba->ktime_data_samples));
1463		return len;
1464	}
1465
1466	/* NVME Target */
1467	len += scnprintf(buf + len, PAGE_SIZE-len,
1468			"ktime %s: Total Samples: %lld %lld\n",
1469			(phba->ktime_on ? "Enabled" : "Disabled"),
1470			phba->ktime_data_samples,
1471			phba->ktime_status_samples);
1472	if (phba->ktime_data_samples == 0)
1473		return len;
1474
1475	len += scnprintf(buf + len, PAGE_SIZE-len,
1476			"Segment 1: MSI-X ISR Rcv cmd -to- "
1477			"cmd pass to NVME Layer\n");
1478	len += scnprintf(buf + len, PAGE_SIZE-len,
1479			"avg:%08lld min:%08lld max %08lld\n",
1480			div_u64(phba->ktime_seg1_total,
1481				phba->ktime_data_samples),
1482			phba->ktime_seg1_min,
1483			phba->ktime_seg1_max);
1484	len += scnprintf(buf + len, PAGE_SIZE-len,
1485			"Segment 2: cmd pass to NVME Layer- "
1486			"-to- Driver rcv cmd OP (action)\n");
1487	len += scnprintf(buf + len, PAGE_SIZE-len,
1488			"avg:%08lld min:%08lld max %08lld\n",
1489			div_u64(phba->ktime_seg2_total,
1490				phba->ktime_data_samples),
1491			phba->ktime_seg2_min,
1492			phba->ktime_seg2_max);
1493	len += scnprintf(buf + len, PAGE_SIZE-len,
1494			"Segment 3: Driver rcv cmd OP -to- "
1495			"Firmware WQ doorbell: cmd\n");
1496	len += scnprintf(buf + len, PAGE_SIZE-len,
1497			"avg:%08lld min:%08lld max %08lld\n",
1498			div_u64(phba->ktime_seg3_total,
1499				phba->ktime_data_samples),
1500			phba->ktime_seg3_min,
1501			phba->ktime_seg3_max);
1502	len += scnprintf(buf + len, PAGE_SIZE-len,
1503			"Segment 4: Firmware WQ doorbell: cmd "
1504			"-to- MSI-X ISR for cmd cmpl\n");
1505	len += scnprintf(buf + len, PAGE_SIZE-len,
1506			"avg:%08lld min:%08lld max %08lld\n",
1507			div_u64(phba->ktime_seg4_total,
1508				phba->ktime_data_samples),
1509			phba->ktime_seg4_min,
1510			phba->ktime_seg4_max);
1511	len += scnprintf(buf + len, PAGE_SIZE-len,
1512			"Segment 5: MSI-X ISR for cmd cmpl "
1513			"-to- NVME layer passed cmd done\n");
1514	len += scnprintf(buf + len, PAGE_SIZE-len,
1515			"avg:%08lld min:%08lld max %08lld\n",
1516			div_u64(phba->ktime_seg5_total,
1517				phba->ktime_data_samples),
1518			phba->ktime_seg5_min,
1519			phba->ktime_seg5_max);
1520
1521	if (phba->ktime_status_samples == 0) {
1522		len += scnprintf(buf + len, PAGE_SIZE-len,
1523				"Total: cmd received by MSI-X ISR "
1524				"-to- cmd completed on wire\n");
1525		len += scnprintf(buf + len, PAGE_SIZE-len,
1526				"avg:%08lld min:%08lld "
1527				"max %08lld\n",
1528				div_u64(phba->ktime_seg10_total,
1529					phba->ktime_data_samples),
1530				phba->ktime_seg10_min,
1531				phba->ktime_seg10_max);
1532		return len;
1533	}
1534
1535	len += scnprintf(buf + len, PAGE_SIZE-len,
1536			"Segment 6: NVME layer passed cmd done "
1537			"-to- Driver rcv rsp status OP\n");
1538	len += scnprintf(buf + len, PAGE_SIZE-len,
1539			"avg:%08lld min:%08lld max %08lld\n",
1540			div_u64(phba->ktime_seg6_total,
1541				phba->ktime_status_samples),
1542			phba->ktime_seg6_min,
1543			phba->ktime_seg6_max);
1544	len += scnprintf(buf + len, PAGE_SIZE-len,
1545			"Segment 7: Driver rcv rsp status OP "
1546			"-to- Firmware WQ doorbell: status\n");
1547	len += scnprintf(buf + len, PAGE_SIZE-len,
1548			"avg:%08lld min:%08lld max %08lld\n",
1549			div_u64(phba->ktime_seg7_total,
1550				phba->ktime_status_samples),
1551			phba->ktime_seg7_min,
1552			phba->ktime_seg7_max);
1553	len += scnprintf(buf + len, PAGE_SIZE-len,
1554			"Segment 8: Firmware WQ doorbell: status"
1555			" -to- MSI-X ISR for status cmpl\n");
1556	len += scnprintf(buf + len, PAGE_SIZE-len,
1557			"avg:%08lld min:%08lld max %08lld\n",
1558			div_u64(phba->ktime_seg8_total,
1559				phba->ktime_status_samples),
1560			phba->ktime_seg8_min,
1561			phba->ktime_seg8_max);
1562	len += scnprintf(buf + len, PAGE_SIZE-len,
1563			"Segment 9: MSI-X ISR for status cmpl  "
1564			"-to- NVME layer passed status done\n");
1565	len += scnprintf(buf + len, PAGE_SIZE-len,
1566			"avg:%08lld min:%08lld max %08lld\n",
1567			div_u64(phba->ktime_seg9_total,
1568				phba->ktime_status_samples),
1569			phba->ktime_seg9_min,
1570			phba->ktime_seg9_max);
1571	len += scnprintf(buf + len, PAGE_SIZE-len,
1572			"Total: cmd received by MSI-X ISR -to- "
1573			"cmd completed on wire\n");
1574	len += scnprintf(buf + len, PAGE_SIZE-len,
1575			"avg:%08lld min:%08lld max %08lld\n",
1576			div_u64(phba->ktime_seg10_total,
1577				phba->ktime_status_samples),
1578			phba->ktime_seg10_min,
1579			phba->ktime_seg10_max);
1580	return len;
1581}
1582
1583/**
1584 * lpfc_debugfs_nvmeio_trc_data - Dump NVME IO trace list to a buffer
1585 * @phba: The phba to gather target node info from.
1586 * @buf: The buffer to dump log into.
1587 * @size: The maximum amount of data to process.
1588 *
1589 * Description:
1590 * This routine dumps the NVME IO trace associated with @phba
1591 *
1592 * Return Value:
1593 * This routine returns the amount of bytes that were dumped into @buf and will
1594 * not exceed @size.
1595 **/
1596static int
1597lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
1598{
1599	struct lpfc_debugfs_nvmeio_trc *dtp;
1600	int i, state, index, skip;
1601	int len = 0;
1602
1603	state = phba->nvmeio_trc_on;
1604
1605	index = (atomic_read(&phba->nvmeio_trc_cnt) + 1) &
1606		(phba->nvmeio_trc_size - 1);
1607	skip = phba->nvmeio_trc_output_idx;
1608
1609	len += scnprintf(buf + len, size - len,
1610			"%s IO Trace %s: next_idx %d skip %d size %d\n",
1611			(phba->nvmet_support ? "NVME" : "NVMET"),
1612			(state ? "Enabled" : "Disabled"),
1613			index, skip, phba->nvmeio_trc_size);
1614
1615	if (!phba->nvmeio_trc || state)
1616		return len;
1617
1618	/* trace MUST bhe off to continue */
1619
1620	for (i = index; i < phba->nvmeio_trc_size; i++) {
1621		if (skip) {
1622			skip--;
1623			continue;
1624		}
1625		dtp = phba->nvmeio_trc + i;
1626		phba->nvmeio_trc_output_idx++;
1627
1628		if (!dtp->fmt)
1629			continue;
1630
1631		len +=  scnprintf(buf + len, size - len, dtp->fmt,
1632			dtp->data1, dtp->data2, dtp->data3);
1633
1634		if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
1635			phba->nvmeio_trc_output_idx = 0;
1636			len += scnprintf(buf + len, size - len,
1637					"Trace Complete\n");
1638			goto out;
1639		}
1640
1641		if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
1642			len += scnprintf(buf + len, size - len,
1643					"Trace Continue (%d of %d)\n",
1644					phba->nvmeio_trc_output_idx,
1645					phba->nvmeio_trc_size);
1646			goto out;
1647		}
1648	}
1649	for (i = 0; i < in

Large files files are truncated, but you can click here to view the full file