/drivers/scsi/lpfc/lpfc_debugfs.c

http://github.com/mirrors/linux · C · 6513 lines · 4590 code · 705 blank · 1218 comment · 900 complexity · bb68be07d654c3c150c885b902e6526c MD5 · raw file

Large files are truncated click here to view the full file

  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017-2019 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. *
  6. * Copyright (C) 2007-2015 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * *
  10. * This program is free software; you can redistribute it and/or *
  11. * modify it under the terms of version 2 of the GNU General *
  12. * Public License as published by the Free Software Foundation. *
  13. * This program is distributed in the hope that it will be useful. *
  14. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  15. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  16. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  17. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  18. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  19. * more details, a copy of which can be found in the file COPYING *
  20. * included with this package. *
  21. *******************************************************************/
  22. #include <linux/blkdev.h>
  23. #include <linux/delay.h>
  24. #include <linux/module.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/idr.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/kthread.h>
  29. #include <linux/slab.h>
  30. #include <linux/pci.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/ctype.h>
  33. #include <linux/vmalloc.h>
  34. #include <scsi/scsi.h>
  35. #include <scsi/scsi_device.h>
  36. #include <scsi/scsi_host.h>
  37. #include <scsi/scsi_transport_fc.h>
  38. #include <scsi/fc/fc_fs.h>
  39. #include <linux/nvme-fc-driver.h>
  40. #include "lpfc_hw4.h"
  41. #include "lpfc_hw.h"
  42. #include "lpfc_sli.h"
  43. #include "lpfc_sli4.h"
  44. #include "lpfc_nl.h"
  45. #include "lpfc_disc.h"
  46. #include "lpfc.h"
  47. #include "lpfc_scsi.h"
  48. #include "lpfc_nvme.h"
  49. #include "lpfc_nvmet.h"
  50. #include "lpfc_logmsg.h"
  51. #include "lpfc_crtn.h"
  52. #include "lpfc_vport.h"
  53. #include "lpfc_version.h"
  54. #include "lpfc_compat.h"
  55. #include "lpfc_debugfs.h"
  56. #include "lpfc_bsg.h"
  57. #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
  58. /*
  59. * debugfs interface
  60. *
  61. * To access this interface the user should:
  62. * # mount -t debugfs none /sys/kernel/debug
  63. *
  64. * The lpfc debugfs directory hierarchy is:
  65. * /sys/kernel/debug/lpfc/fnX/vportY
  66. * where X is the lpfc hba function unique_id
  67. * where Y is the vport VPI on that hba
  68. *
  69. * Debugging services available per vport:
  70. * discovery_trace
  71. * This is an ACSII readable file that contains a trace of the last
  72. * lpfc_debugfs_max_disc_trc events that happened on a specific vport.
  73. * See lpfc_debugfs.h for different categories of discovery events.
  74. * To enable the discovery trace, the following module parameters must be set:
  75. * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
  76. * lpfc_debugfs_max_disc_trc=X Where X is the event trace depth for
  77. * EACH vport. X MUST also be a power of 2.
  78. * lpfc_debugfs_mask_disc_trc=Y Where Y is an event mask as defined in
  79. * lpfc_debugfs.h .
  80. *
  81. * slow_ring_trace
  82. * This is an ACSII readable file that contains a trace of the last
  83. * lpfc_debugfs_max_slow_ring_trc events that happened on a specific HBA.
  84. * To enable the slow ring trace, the following module parameters must be set:
  85. * lpfc_debugfs_enable=1 Turns on lpfc debugfs filesystem support
  86. * lpfc_debugfs_max_slow_ring_trc=X Where X is the event trace depth for
  87. * the HBA. X MUST also be a power of 2.
  88. */
  89. static int lpfc_debugfs_enable = 1;
  90. module_param(lpfc_debugfs_enable, int, S_IRUGO);
  91. MODULE_PARM_DESC(lpfc_debugfs_enable, "Enable debugfs services");
  92. /* This MUST be a power of 2 */
  93. static int lpfc_debugfs_max_disc_trc;
  94. module_param(lpfc_debugfs_max_disc_trc, int, S_IRUGO);
  95. MODULE_PARM_DESC(lpfc_debugfs_max_disc_trc,
  96. "Set debugfs discovery trace depth");
  97. /* This MUST be a power of 2 */
  98. static int lpfc_debugfs_max_slow_ring_trc;
  99. module_param(lpfc_debugfs_max_slow_ring_trc, int, S_IRUGO);
  100. MODULE_PARM_DESC(lpfc_debugfs_max_slow_ring_trc,
  101. "Set debugfs slow ring trace depth");
  102. /* This MUST be a power of 2 */
  103. static int lpfc_debugfs_max_nvmeio_trc;
  104. module_param(lpfc_debugfs_max_nvmeio_trc, int, 0444);
  105. MODULE_PARM_DESC(lpfc_debugfs_max_nvmeio_trc,
  106. "Set debugfs NVME IO trace depth");
  107. static int lpfc_debugfs_mask_disc_trc;
  108. module_param(lpfc_debugfs_mask_disc_trc, int, S_IRUGO);
  109. MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
  110. "Set debugfs discovery trace mask");
  111. #include <linux/debugfs.h>
  112. static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
  113. static unsigned long lpfc_debugfs_start_time = 0L;
  114. /* iDiag */
  115. static struct lpfc_idiag idiag;
  116. /**
  117. * lpfc_debugfs_disc_trc_data - Dump discovery logging to a buffer
  118. * @vport: The vport to gather the log info from.
  119. * @buf: The buffer to dump log into.
  120. * @size: The maximum amount of data to process.
  121. *
  122. * Description:
  123. * This routine gathers the lpfc discovery debugfs data from the @vport and
  124. * dumps it to @buf up to @size number of bytes. It will start at the next entry
  125. * in the log and process the log until the end of the buffer. Then it will
  126. * gather from the beginning of the log and process until the current entry.
  127. *
  128. * Notes:
  129. * Discovery logging will be disabled while while this routine dumps the log.
  130. *
  131. * Return Value:
  132. * This routine returns the amount of bytes that were dumped into @buf and will
  133. * not exceed @size.
  134. **/
  135. static int
  136. lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
  137. {
  138. int i, index, len, enable;
  139. uint32_t ms;
  140. struct lpfc_debugfs_trc *dtp;
  141. char *buffer;
  142. buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
  143. if (!buffer)
  144. return 0;
  145. enable = lpfc_debugfs_enable;
  146. lpfc_debugfs_enable = 0;
  147. len = 0;
  148. index = (atomic_read(&vport->disc_trc_cnt) + 1) &
  149. (lpfc_debugfs_max_disc_trc - 1);
  150. for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
  151. dtp = vport->disc_trc + i;
  152. if (!dtp->fmt)
  153. continue;
  154. ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
  155. snprintf(buffer,
  156. LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
  157. dtp->seq_cnt, ms, dtp->fmt);
  158. len += scnprintf(buf+len, size-len, buffer,
  159. dtp->data1, dtp->data2, dtp->data3);
  160. }
  161. for (i = 0; i < index; i++) {
  162. dtp = vport->disc_trc + i;
  163. if (!dtp->fmt)
  164. continue;
  165. ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
  166. snprintf(buffer,
  167. LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
  168. dtp->seq_cnt, ms, dtp->fmt);
  169. len += scnprintf(buf+len, size-len, buffer,
  170. dtp->data1, dtp->data2, dtp->data3);
  171. }
  172. lpfc_debugfs_enable = enable;
  173. kfree(buffer);
  174. return len;
  175. }
  176. /**
  177. * lpfc_debugfs_slow_ring_trc_data - Dump slow ring logging to a buffer
  178. * @phba: The HBA to gather the log info from.
  179. * @buf: The buffer to dump log into.
  180. * @size: The maximum amount of data to process.
  181. *
  182. * Description:
  183. * This routine gathers the lpfc slow ring debugfs data from the @phba and
  184. * dumps it to @buf up to @size number of bytes. It will start at the next entry
  185. * in the log and process the log until the end of the buffer. Then it will
  186. * gather from the beginning of the log and process until the current entry.
  187. *
  188. * Notes:
  189. * Slow ring logging will be disabled while while this routine dumps the log.
  190. *
  191. * Return Value:
  192. * This routine returns the amount of bytes that were dumped into @buf and will
  193. * not exceed @size.
  194. **/
  195. static int
  196. lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
  197. {
  198. int i, index, len, enable;
  199. uint32_t ms;
  200. struct lpfc_debugfs_trc *dtp;
  201. char *buffer;
  202. buffer = kmalloc(LPFC_DEBUG_TRC_ENTRY_SIZE, GFP_KERNEL);
  203. if (!buffer)
  204. return 0;
  205. enable = lpfc_debugfs_enable;
  206. lpfc_debugfs_enable = 0;
  207. len = 0;
  208. index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
  209. (lpfc_debugfs_max_slow_ring_trc - 1);
  210. for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
  211. dtp = phba->slow_ring_trc + i;
  212. if (!dtp->fmt)
  213. continue;
  214. ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
  215. snprintf(buffer,
  216. LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
  217. dtp->seq_cnt, ms, dtp->fmt);
  218. len += scnprintf(buf+len, size-len, buffer,
  219. dtp->data1, dtp->data2, dtp->data3);
  220. }
  221. for (i = 0; i < index; i++) {
  222. dtp = phba->slow_ring_trc + i;
  223. if (!dtp->fmt)
  224. continue;
  225. ms = jiffies_to_msecs(dtp->jif - lpfc_debugfs_start_time);
  226. snprintf(buffer,
  227. LPFC_DEBUG_TRC_ENTRY_SIZE, "%010d:%010d ms:%s\n",
  228. dtp->seq_cnt, ms, dtp->fmt);
  229. len += scnprintf(buf+len, size-len, buffer,
  230. dtp->data1, dtp->data2, dtp->data3);
  231. }
  232. lpfc_debugfs_enable = enable;
  233. kfree(buffer);
  234. return len;
  235. }
  236. static int lpfc_debugfs_last_hbq = -1;
  237. /**
  238. * lpfc_debugfs_hbqinfo_data - Dump host buffer queue info to a buffer
  239. * @phba: The HBA to gather host buffer info from.
  240. * @buf: The buffer to dump log into.
  241. * @size: The maximum amount of data to process.
  242. *
  243. * Description:
  244. * This routine dumps the host buffer queue info from the @phba to @buf up to
  245. * @size number of bytes. A header that describes the current hbq state will be
  246. * dumped to @buf first and then info on each hbq entry will be dumped to @buf
  247. * until @size bytes have been dumped or all the hbq info has been dumped.
  248. *
  249. * Notes:
  250. * This routine will rotate through each configured HBQ each time called.
  251. *
  252. * Return Value:
  253. * This routine returns the amount of bytes that were dumped into @buf and will
  254. * not exceed @size.
  255. **/
  256. static int
  257. lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
  258. {
  259. int len = 0;
  260. int i, j, found, posted, low;
  261. uint32_t phys, raw_index, getidx;
  262. struct lpfc_hbq_init *hip;
  263. struct hbq_s *hbqs;
  264. struct lpfc_hbq_entry *hbqe;
  265. struct lpfc_dmabuf *d_buf;
  266. struct hbq_dmabuf *hbq_buf;
  267. if (phba->sli_rev != 3)
  268. return 0;
  269. spin_lock_irq(&phba->hbalock);
  270. /* toggle between multiple hbqs, if any */
  271. i = lpfc_sli_hbq_count();
  272. if (i > 1) {
  273. lpfc_debugfs_last_hbq++;
  274. if (lpfc_debugfs_last_hbq >= i)
  275. lpfc_debugfs_last_hbq = 0;
  276. }
  277. else
  278. lpfc_debugfs_last_hbq = 0;
  279. i = lpfc_debugfs_last_hbq;
  280. len += scnprintf(buf+len, size-len, "HBQ %d Info\n", i);
  281. hbqs = &phba->hbqs[i];
  282. posted = 0;
  283. list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list)
  284. posted++;
  285. hip = lpfc_hbq_defs[i];
  286. len += scnprintf(buf+len, size-len,
  287. "idx:%d prof:%d rn:%d bufcnt:%d icnt:%d acnt:%d posted %d\n",
  288. hip->hbq_index, hip->profile, hip->rn,
  289. hip->buffer_count, hip->init_count, hip->add_count, posted);
  290. raw_index = phba->hbq_get[i];
  291. getidx = le32_to_cpu(raw_index);
  292. len += scnprintf(buf+len, size-len,
  293. "entries:%d bufcnt:%d Put:%d nPut:%d localGet:%d hbaGet:%d\n",
  294. hbqs->entry_count, hbqs->buffer_count, hbqs->hbqPutIdx,
  295. hbqs->next_hbqPutIdx, hbqs->local_hbqGetIdx, getidx);
  296. hbqe = (struct lpfc_hbq_entry *) phba->hbqs[i].hbq_virt;
  297. for (j=0; j<hbqs->entry_count; j++) {
  298. len += scnprintf(buf+len, size-len,
  299. "%03d: %08x %04x %05x ", j,
  300. le32_to_cpu(hbqe->bde.addrLow),
  301. le32_to_cpu(hbqe->bde.tus.w),
  302. le32_to_cpu(hbqe->buffer_tag));
  303. i = 0;
  304. found = 0;
  305. /* First calculate if slot has an associated posted buffer */
  306. low = hbqs->hbqPutIdx - posted;
  307. if (low >= 0) {
  308. if ((j >= hbqs->hbqPutIdx) || (j < low)) {
  309. len += scnprintf(buf + len, size - len,
  310. "Unused\n");
  311. goto skipit;
  312. }
  313. }
  314. else {
  315. if ((j >= hbqs->hbqPutIdx) &&
  316. (j < (hbqs->entry_count+low))) {
  317. len += scnprintf(buf + len, size - len,
  318. "Unused\n");
  319. goto skipit;
  320. }
  321. }
  322. /* Get the Buffer info for the posted buffer */
  323. list_for_each_entry(d_buf, &hbqs->hbq_buffer_list, list) {
  324. hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
  325. phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff);
  326. if (phys == le32_to_cpu(hbqe->bde.addrLow)) {
  327. len += scnprintf(buf+len, size-len,
  328. "Buf%d: x%px %06x\n", i,
  329. hbq_buf->dbuf.virt, hbq_buf->tag);
  330. found = 1;
  331. break;
  332. }
  333. i++;
  334. }
  335. if (!found) {
  336. len += scnprintf(buf+len, size-len, "No DMAinfo?\n");
  337. }
  338. skipit:
  339. hbqe++;
  340. if (len > LPFC_HBQINFO_SIZE - 54)
  341. break;
  342. }
  343. spin_unlock_irq(&phba->hbalock);
  344. return len;
  345. }
  346. static int lpfc_debugfs_last_xripool;
  347. /**
  348. * lpfc_debugfs_common_xri_data - Dump Hardware Queue info to a buffer
  349. * @phba: The HBA to gather host buffer info from.
  350. * @buf: The buffer to dump log into.
  351. * @size: The maximum amount of data to process.
  352. *
  353. * Description:
  354. * This routine dumps the Hardware Queue info from the @phba to @buf up to
  355. * @size number of bytes. A header that describes the current hdwq state will be
  356. * dumped to @buf first and then info on each hdwq entry will be dumped to @buf
  357. * until @size bytes have been dumped or all the hdwq info has been dumped.
  358. *
  359. * Notes:
  360. * This routine will rotate through each configured Hardware Queue each
  361. * time called.
  362. *
  363. * Return Value:
  364. * This routine returns the amount of bytes that were dumped into @buf and will
  365. * not exceed @size.
  366. **/
  367. static int
  368. lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size)
  369. {
  370. struct lpfc_sli4_hdw_queue *qp;
  371. int len = 0;
  372. int i, out;
  373. unsigned long iflag;
  374. for (i = 0; i < phba->cfg_hdw_queue; i++) {
  375. if (len > (LPFC_DUMP_MULTIXRIPOOL_SIZE - 80))
  376. break;
  377. qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool];
  378. len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i);
  379. spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag);
  380. spin_lock(&qp->io_buf_list_get_lock);
  381. spin_lock(&qp->io_buf_list_put_lock);
  382. out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs +
  383. qp->abts_scsi_io_bufs + qp->abts_nvme_io_bufs);
  384. len += scnprintf(buf + len, size - len,
  385. "tot:%d get:%d put:%d mt:%d "
  386. "ABTS scsi:%d nvme:%d Out:%d\n",
  387. qp->total_io_bufs, qp->get_io_bufs, qp->put_io_bufs,
  388. qp->empty_io_bufs, qp->abts_scsi_io_bufs,
  389. qp->abts_nvme_io_bufs, out);
  390. spin_unlock(&qp->io_buf_list_put_lock);
  391. spin_unlock(&qp->io_buf_list_get_lock);
  392. spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag);
  393. lpfc_debugfs_last_xripool++;
  394. if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue)
  395. lpfc_debugfs_last_xripool = 0;
  396. }
  397. return len;
  398. }
  399. /**
  400. * lpfc_debugfs_multixripools_data - Display multi-XRI pools information
  401. * @phba: The HBA to gather host buffer info from.
  402. * @buf: The buffer to dump log into.
  403. * @size: The maximum amount of data to process.
  404. *
  405. * Description:
  406. * This routine displays current multi-XRI pools information including XRI
  407. * count in public, private and txcmplq. It also displays current high and
  408. * low watermark.
  409. *
  410. * Return Value:
  411. * This routine returns the amount of bytes that were dumped into @buf and will
  412. * not exceed @size.
  413. **/
  414. static int
  415. lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size)
  416. {
  417. u32 i;
  418. u32 hwq_count;
  419. struct lpfc_sli4_hdw_queue *qp;
  420. struct lpfc_multixri_pool *multixri_pool;
  421. struct lpfc_pvt_pool *pvt_pool;
  422. struct lpfc_pbl_pool *pbl_pool;
  423. u32 txcmplq_cnt;
  424. char tmp[LPFC_DEBUG_OUT_LINE_SZ] = {0};
  425. if (phba->sli_rev != LPFC_SLI_REV4)
  426. return 0;
  427. if (!phba->sli4_hba.hdwq)
  428. return 0;
  429. if (!phba->cfg_xri_rebalancing) {
  430. i = lpfc_debugfs_commonxripools_data(phba, buf, size);
  431. return i;
  432. }
  433. /*
  434. * Pbl: Current number of free XRIs in public pool
  435. * Pvt: Current number of free XRIs in private pool
  436. * Busy: Current number of outstanding XRIs
  437. * HWM: Current high watermark
  438. * pvt_empty: Incremented by 1 when IO submission fails (no xri)
  439. * pbl_empty: Incremented by 1 when all pbl_pool are empty during
  440. * IO submission
  441. */
  442. scnprintf(tmp, sizeof(tmp),
  443. "HWQ: Pbl Pvt Busy HWM | pvt_empty pbl_empty ");
  444. if (strlcat(buf, tmp, size) >= size)
  445. return strnlen(buf, size);
  446. #ifdef LPFC_MXP_STAT
  447. /*
  448. * MAXH: Max high watermark seen so far
  449. * above_lmt: Incremented by 1 if xri_owned > xri_limit during
  450. * IO submission
  451. * below_lmt: Incremented by 1 if xri_owned <= xri_limit during
  452. * IO submission
  453. * locPbl_hit: Incremented by 1 if successfully get a batch of XRI from
  454. * local pbl_pool
  455. * othPbl_hit: Incremented by 1 if successfully get a batch of XRI from
  456. * other pbl_pool
  457. */
  458. scnprintf(tmp, sizeof(tmp),
  459. "MAXH above_lmt below_lmt locPbl_hit othPbl_hit");
  460. if (strlcat(buf, tmp, size) >= size)
  461. return strnlen(buf, size);
  462. /*
  463. * sPbl: snapshot of Pbl 15 sec after stat gets cleared
  464. * sPvt: snapshot of Pvt 15 sec after stat gets cleared
  465. * sBusy: snapshot of Busy 15 sec after stat gets cleared
  466. */
  467. scnprintf(tmp, sizeof(tmp),
  468. " | sPbl sPvt sBusy");
  469. if (strlcat(buf, tmp, size) >= size)
  470. return strnlen(buf, size);
  471. #endif
  472. scnprintf(tmp, sizeof(tmp), "\n");
  473. if (strlcat(buf, tmp, size) >= size)
  474. return strnlen(buf, size);
  475. hwq_count = phba->cfg_hdw_queue;
  476. for (i = 0; i < hwq_count; i++) {
  477. qp = &phba->sli4_hba.hdwq[i];
  478. multixri_pool = qp->p_multixri_pool;
  479. if (!multixri_pool)
  480. continue;
  481. pbl_pool = &multixri_pool->pbl_pool;
  482. pvt_pool = &multixri_pool->pvt_pool;
  483. txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
  484. scnprintf(tmp, sizeof(tmp),
  485. "%03d: %4d %4d %4d %4d | %10d %10d ",
  486. i, pbl_pool->count, pvt_pool->count,
  487. txcmplq_cnt, pvt_pool->high_watermark,
  488. qp->empty_io_bufs, multixri_pool->pbl_empty_count);
  489. if (strlcat(buf, tmp, size) >= size)
  490. break;
  491. #ifdef LPFC_MXP_STAT
  492. scnprintf(tmp, sizeof(tmp),
  493. "%4d %10d %10d %10d %10d",
  494. multixri_pool->stat_max_hwm,
  495. multixri_pool->above_limit_count,
  496. multixri_pool->below_limit_count,
  497. multixri_pool->local_pbl_hit_count,
  498. multixri_pool->other_pbl_hit_count);
  499. if (strlcat(buf, tmp, size) >= size)
  500. break;
  501. scnprintf(tmp, sizeof(tmp),
  502. " | %4d %4d %5d",
  503. multixri_pool->stat_pbl_count,
  504. multixri_pool->stat_pvt_count,
  505. multixri_pool->stat_busy_count);
  506. if (strlcat(buf, tmp, size) >= size)
  507. break;
  508. #endif
  509. scnprintf(tmp, sizeof(tmp), "\n");
  510. if (strlcat(buf, tmp, size) >= size)
  511. break;
  512. }
  513. return strnlen(buf, size);
  514. }
  515. #ifdef LPFC_HDWQ_LOCK_STAT
  516. static int lpfc_debugfs_last_lock;
  517. /**
  518. * lpfc_debugfs_lockstat_data - Dump Hardware Queue info to a buffer
  519. * @phba: The HBA to gather host buffer info from.
  520. * @buf: The buffer to dump log into.
  521. * @size: The maximum amount of data to process.
  522. *
  523. * Description:
  524. * This routine dumps the Hardware Queue info from the @phba to @buf up to
  525. * @size number of bytes. A header that describes the current hdwq state will be
  526. * dumped to @buf first and then info on each hdwq entry will be dumped to @buf
  527. * until @size bytes have been dumped or all the hdwq info has been dumped.
  528. *
  529. * Notes:
  530. * This routine will rotate through each configured Hardware Queue each
  531. * time called.
  532. *
  533. * Return Value:
  534. * This routine returns the amount of bytes that were dumped into @buf and will
  535. * not exceed @size.
  536. **/
  537. static int
  538. lpfc_debugfs_lockstat_data(struct lpfc_hba *phba, char *buf, int size)
  539. {
  540. struct lpfc_sli4_hdw_queue *qp;
  541. int len = 0;
  542. int i;
  543. if (phba->sli_rev != LPFC_SLI_REV4)
  544. return 0;
  545. if (!phba->sli4_hba.hdwq)
  546. return 0;
  547. for (i = 0; i < phba->cfg_hdw_queue; i++) {
  548. if (len > (LPFC_HDWQINFO_SIZE - 100))
  549. break;
  550. qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_lock];
  551. len += scnprintf(buf + len, size - len, "HdwQ %03d Lock ", i);
  552. if (phba->cfg_xri_rebalancing) {
  553. len += scnprintf(buf + len, size - len,
  554. "get_pvt:%d mv_pvt:%d "
  555. "mv2pub:%d mv2pvt:%d "
  556. "put_pvt:%d put_pub:%d wq:%d\n",
  557. qp->lock_conflict.alloc_pvt_pool,
  558. qp->lock_conflict.mv_from_pvt_pool,
  559. qp->lock_conflict.mv_to_pub_pool,
  560. qp->lock_conflict.mv_to_pvt_pool,
  561. qp->lock_conflict.free_pvt_pool,
  562. qp->lock_conflict.free_pub_pool,
  563. qp->lock_conflict.wq_access);
  564. } else {
  565. len += scnprintf(buf + len, size - len,
  566. "get:%d put:%d free:%d wq:%d\n",
  567. qp->lock_conflict.alloc_xri_get,
  568. qp->lock_conflict.alloc_xri_put,
  569. qp->lock_conflict.free_xri,
  570. qp->lock_conflict.wq_access);
  571. }
  572. lpfc_debugfs_last_lock++;
  573. if (lpfc_debugfs_last_lock >= phba->cfg_hdw_queue)
  574. lpfc_debugfs_last_lock = 0;
  575. }
  576. return len;
  577. }
  578. #endif
  579. static int lpfc_debugfs_last_hba_slim_off;
  580. /**
  581. * lpfc_debugfs_dumpHBASlim_data - Dump HBA SLIM info to a buffer
  582. * @phba: The HBA to gather SLIM info from.
  583. * @buf: The buffer to dump log into.
  584. * @size: The maximum amount of data to process.
  585. *
  586. * Description:
  587. * This routine dumps the current contents of HBA SLIM for the HBA associated
  588. * with @phba to @buf up to @size bytes of data. This is the raw HBA SLIM data.
  589. *
  590. * Notes:
  591. * This routine will only dump up to 1024 bytes of data each time called and
  592. * should be called multiple times to dump the entire HBA SLIM.
  593. *
  594. * Return Value:
  595. * This routine returns the amount of bytes that were dumped into @buf and will
  596. * not exceed @size.
  597. **/
  598. static int
  599. lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
  600. {
  601. int len = 0;
  602. int i, off;
  603. uint32_t *ptr;
  604. char *buffer;
  605. buffer = kmalloc(1024, GFP_KERNEL);
  606. if (!buffer)
  607. return 0;
  608. off = 0;
  609. spin_lock_irq(&phba->hbalock);
  610. len += scnprintf(buf+len, size-len, "HBA SLIM\n");
  611. lpfc_memcpy_from_slim(buffer,
  612. phba->MBslimaddr + lpfc_debugfs_last_hba_slim_off, 1024);
  613. ptr = (uint32_t *)&buffer[0];
  614. off = lpfc_debugfs_last_hba_slim_off;
  615. /* Set it up for the next time */
  616. lpfc_debugfs_last_hba_slim_off += 1024;
  617. if (lpfc_debugfs_last_hba_slim_off >= 4096)
  618. lpfc_debugfs_last_hba_slim_off = 0;
  619. i = 1024;
  620. while (i > 0) {
  621. len += scnprintf(buf+len, size-len,
  622. "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
  623. off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
  624. *(ptr+5), *(ptr+6), *(ptr+7));
  625. ptr += 8;
  626. i -= (8 * sizeof(uint32_t));
  627. off += (8 * sizeof(uint32_t));
  628. }
  629. spin_unlock_irq(&phba->hbalock);
  630. kfree(buffer);
  631. return len;
  632. }
  633. /**
  634. * lpfc_debugfs_dumpHostSlim_data - Dump host SLIM info to a buffer
  635. * @phba: The HBA to gather Host SLIM info from.
  636. * @buf: The buffer to dump log into.
  637. * @size: The maximum amount of data to process.
  638. *
  639. * Description:
  640. * This routine dumps the current contents of host SLIM for the host associated
  641. * with @phba to @buf up to @size bytes of data. The dump will contain the
  642. * Mailbox, PCB, Rings, and Registers that are located in host memory.
  643. *
  644. * Return Value:
  645. * This routine returns the amount of bytes that were dumped into @buf and will
  646. * not exceed @size.
  647. **/
  648. static int
  649. lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
  650. {
  651. int len = 0;
  652. int i, off;
  653. uint32_t word0, word1, word2, word3;
  654. uint32_t *ptr;
  655. struct lpfc_pgp *pgpp;
  656. struct lpfc_sli *psli = &phba->sli;
  657. struct lpfc_sli_ring *pring;
  658. off = 0;
  659. spin_lock_irq(&phba->hbalock);
  660. len += scnprintf(buf+len, size-len, "SLIM Mailbox\n");
  661. ptr = (uint32_t *)phba->slim2p.virt;
  662. i = sizeof(MAILBOX_t);
  663. while (i > 0) {
  664. len += scnprintf(buf+len, size-len,
  665. "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
  666. off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
  667. *(ptr+5), *(ptr+6), *(ptr+7));
  668. ptr += 8;
  669. i -= (8 * sizeof(uint32_t));
  670. off += (8 * sizeof(uint32_t));
  671. }
  672. len += scnprintf(buf+len, size-len, "SLIM PCB\n");
  673. ptr = (uint32_t *)phba->pcb;
  674. i = sizeof(PCB_t);
  675. while (i > 0) {
  676. len += scnprintf(buf+len, size-len,
  677. "%08x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
  678. off, *ptr, *(ptr+1), *(ptr+2), *(ptr+3), *(ptr+4),
  679. *(ptr+5), *(ptr+6), *(ptr+7));
  680. ptr += 8;
  681. i -= (8 * sizeof(uint32_t));
  682. off += (8 * sizeof(uint32_t));
  683. }
  684. if (phba->sli_rev <= LPFC_SLI_REV3) {
  685. for (i = 0; i < 4; i++) {
  686. pgpp = &phba->port_gp[i];
  687. pring = &psli->sli3_ring[i];
  688. len += scnprintf(buf+len, size-len,
  689. "Ring %d: CMD GetInx:%d "
  690. "(Max:%d Next:%d "
  691. "Local:%d flg:x%x) "
  692. "RSP PutInx:%d Max:%d\n",
  693. i, pgpp->cmdGetInx,
  694. pring->sli.sli3.numCiocb,
  695. pring->sli.sli3.next_cmdidx,
  696. pring->sli.sli3.local_getidx,
  697. pring->flag, pgpp->rspPutInx,
  698. pring->sli.sli3.numRiocb);
  699. }
  700. word0 = readl(phba->HAregaddr);
  701. word1 = readl(phba->CAregaddr);
  702. word2 = readl(phba->HSregaddr);
  703. word3 = readl(phba->HCregaddr);
  704. len += scnprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
  705. "HC:%08x\n", word0, word1, word2, word3);
  706. }
  707. spin_unlock_irq(&phba->hbalock);
  708. return len;
  709. }
  710. /**
  711. * lpfc_debugfs_nodelist_data - Dump target node list to a buffer
  712. * @vport: The vport to gather target node info from.
  713. * @buf: The buffer to dump log into.
  714. * @size: The maximum amount of data to process.
  715. *
  716. * Description:
  717. * This routine dumps the current target node list associated with @vport to
  718. * @buf up to @size bytes of data. Each node entry in the dump will contain a
  719. * node state, DID, WWPN, WWNN, RPI, flags, type, and other useful fields.
  720. *
  721. * Return Value:
  722. * This routine returns the amount of bytes that were dumped into @buf and will
  723. * not exceed @size.
  724. **/
  725. static int
  726. lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size)
  727. {
  728. int len = 0;
  729. int i, iocnt, outio, cnt;
  730. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  731. struct lpfc_hba *phba = vport->phba;
  732. struct lpfc_nodelist *ndlp;
  733. unsigned char *statep;
  734. struct nvme_fc_local_port *localport;
  735. struct nvme_fc_remote_port *nrport = NULL;
  736. struct lpfc_nvme_rport *rport;
  737. cnt = (LPFC_NODELIST_SIZE / LPFC_NODELIST_ENTRY_SIZE);
  738. outio = 0;
  739. len += scnprintf(buf+len, size-len, "\nFCP Nodelist Entries ...\n");
  740. spin_lock_irq(shost->host_lock);
  741. list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
  742. iocnt = 0;
  743. if (!cnt) {
  744. len += scnprintf(buf+len, size-len,
  745. "Missing Nodelist Entries\n");
  746. break;
  747. }
  748. cnt--;
  749. switch (ndlp->nlp_state) {
  750. case NLP_STE_UNUSED_NODE:
  751. statep = "UNUSED";
  752. break;
  753. case NLP_STE_PLOGI_ISSUE:
  754. statep = "PLOGI ";
  755. break;
  756. case NLP_STE_ADISC_ISSUE:
  757. statep = "ADISC ";
  758. break;
  759. case NLP_STE_REG_LOGIN_ISSUE:
  760. statep = "REGLOG";
  761. break;
  762. case NLP_STE_PRLI_ISSUE:
  763. statep = "PRLI ";
  764. break;
  765. case NLP_STE_LOGO_ISSUE:
  766. statep = "LOGO ";
  767. break;
  768. case NLP_STE_UNMAPPED_NODE:
  769. statep = "UNMAP ";
  770. iocnt = 1;
  771. break;
  772. case NLP_STE_MAPPED_NODE:
  773. statep = "MAPPED";
  774. iocnt = 1;
  775. break;
  776. case NLP_STE_NPR_NODE:
  777. statep = "NPR ";
  778. break;
  779. default:
  780. statep = "UNKNOWN";
  781. }
  782. len += scnprintf(buf+len, size-len, "%s DID:x%06x ",
  783. statep, ndlp->nlp_DID);
  784. len += scnprintf(buf+len, size-len,
  785. "WWPN x%llx ",
  786. wwn_to_u64(ndlp->nlp_portname.u.wwn));
  787. len += scnprintf(buf+len, size-len,
  788. "WWNN x%llx ",
  789. wwn_to_u64(ndlp->nlp_nodename.u.wwn));
  790. if (ndlp->nlp_flag & NLP_RPI_REGISTERED)
  791. len += scnprintf(buf+len, size-len, "RPI:%03d ",
  792. ndlp->nlp_rpi);
  793. else
  794. len += scnprintf(buf+len, size-len, "RPI:none ");
  795. len += scnprintf(buf+len, size-len, "flag:x%08x ",
  796. ndlp->nlp_flag);
  797. if (!ndlp->nlp_type)
  798. len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE ");
  799. if (ndlp->nlp_type & NLP_FC_NODE)
  800. len += scnprintf(buf+len, size-len, "FC_NODE ");
  801. if (ndlp->nlp_type & NLP_FABRIC) {
  802. len += scnprintf(buf+len, size-len, "FABRIC ");
  803. iocnt = 0;
  804. }
  805. if (ndlp->nlp_type & NLP_FCP_TARGET)
  806. len += scnprintf(buf+len, size-len, "FCP_TGT sid:%d ",
  807. ndlp->nlp_sid);
  808. if (ndlp->nlp_type & NLP_FCP_INITIATOR)
  809. len += scnprintf(buf+len, size-len, "FCP_INITIATOR ");
  810. if (ndlp->nlp_type & NLP_NVME_TARGET)
  811. len += scnprintf(buf + len,
  812. size - len, "NVME_TGT sid:%d ",
  813. NLP_NO_SID);
  814. if (ndlp->nlp_type & NLP_NVME_INITIATOR)
  815. len += scnprintf(buf + len,
  816. size - len, "NVME_INITIATOR ");
  817. len += scnprintf(buf+len, size-len, "usgmap:%x ",
  818. ndlp->nlp_usg_map);
  819. len += scnprintf(buf+len, size-len, "refcnt:%x",
  820. kref_read(&ndlp->kref));
  821. if (iocnt) {
  822. i = atomic_read(&ndlp->cmd_pending);
  823. len += scnprintf(buf + len, size - len,
  824. " OutIO:x%x Qdepth x%x",
  825. i, ndlp->cmd_qdepth);
  826. outio += i;
  827. }
  828. len += scnprintf(buf + len, size - len, "defer:%x ",
  829. ndlp->nlp_defer_did);
  830. len += scnprintf(buf+len, size-len, "\n");
  831. }
  832. spin_unlock_irq(shost->host_lock);
  833. len += scnprintf(buf + len, size - len,
  834. "\nOutstanding IO x%x\n", outio);
  835. if (phba->nvmet_support && phba->targetport && (vport == phba->pport)) {
  836. len += scnprintf(buf + len, size - len,
  837. "\nNVME Targetport Entry ...\n");
  838. /* Port state is only one of two values for now. */
  839. if (phba->targetport->port_id)
  840. statep = "REGISTERED";
  841. else
  842. statep = "INIT";
  843. len += scnprintf(buf + len, size - len,
  844. "TGT WWNN x%llx WWPN x%llx State %s\n",
  845. wwn_to_u64(vport->fc_nodename.u.wwn),
  846. wwn_to_u64(vport->fc_portname.u.wwn),
  847. statep);
  848. len += scnprintf(buf + len, size - len,
  849. " Targetport DID x%06x\n",
  850. phba->targetport->port_id);
  851. goto out_exit;
  852. }
  853. len += scnprintf(buf + len, size - len,
  854. "\nNVME Lport/Rport Entries ...\n");
  855. localport = vport->localport;
  856. if (!localport)
  857. goto out_exit;
  858. spin_lock_irq(shost->host_lock);
  859. /* Port state is only one of two values for now. */
  860. if (localport->port_id)
  861. statep = "ONLINE";
  862. else
  863. statep = "UNKNOWN ";
  864. len += scnprintf(buf + len, size - len,
  865. "Lport DID x%06x PortState %s\n",
  866. localport->port_id, statep);
  867. len += scnprintf(buf + len, size - len, "\tRport List:\n");
  868. list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
  869. /* local short-hand pointer. */
  870. spin_lock(&phba->hbalock);
  871. rport = lpfc_ndlp_get_nrport(ndlp);
  872. if (rport)
  873. nrport = rport->remoteport;
  874. else
  875. nrport = NULL;
  876. spin_unlock(&phba->hbalock);
  877. if (!nrport)
  878. continue;
  879. /* Port state is only one of two values for now. */
  880. switch (nrport->port_state) {
  881. case FC_OBJSTATE_ONLINE:
  882. statep = "ONLINE";
  883. break;
  884. case FC_OBJSTATE_UNKNOWN:
  885. statep = "UNKNOWN ";
  886. break;
  887. default:
  888. statep = "UNSUPPORTED";
  889. break;
  890. }
  891. /* Tab in to show lport ownership. */
  892. len += scnprintf(buf + len, size - len,
  893. "\t%s Port ID:x%06x ",
  894. statep, nrport->port_id);
  895. len += scnprintf(buf + len, size - len, "WWPN x%llx ",
  896. nrport->port_name);
  897. len += scnprintf(buf + len, size - len, "WWNN x%llx ",
  898. nrport->node_name);
  899. /* An NVME rport can have multiple roles. */
  900. if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR)
  901. len += scnprintf(buf + len, size - len,
  902. "INITIATOR ");
  903. if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET)
  904. len += scnprintf(buf + len, size - len,
  905. "TARGET ");
  906. if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY)
  907. len += scnprintf(buf + len, size - len,
  908. "DISCSRVC ");
  909. if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
  910. FC_PORT_ROLE_NVME_TARGET |
  911. FC_PORT_ROLE_NVME_DISCOVERY))
  912. len += scnprintf(buf + len, size - len,
  913. "UNKNOWN ROLE x%x",
  914. nrport->port_role);
  915. /* Terminate the string. */
  916. len += scnprintf(buf + len, size - len, "\n");
  917. }
  918. spin_unlock_irq(shost->host_lock);
  919. out_exit:
  920. return len;
  921. }
  922. /**
  923. * lpfc_debugfs_nvmestat_data - Dump target node list to a buffer
  924. * @vport: The vport to gather target node info from.
  925. * @buf: The buffer to dump log into.
  926. * @size: The maximum amount of data to process.
  927. *
  928. * Description:
  929. * This routine dumps the NVME statistics associated with @vport
  930. *
  931. * Return Value:
  932. * This routine returns the amount of bytes that were dumped into @buf and will
  933. * not exceed @size.
  934. **/
  935. static int
  936. lpfc_debugfs_nvmestat_data(struct lpfc_vport *vport, char *buf, int size)
  937. {
  938. struct lpfc_hba *phba = vport->phba;
  939. struct lpfc_nvmet_tgtport *tgtp;
  940. struct lpfc_nvmet_rcv_ctx *ctxp, *next_ctxp;
  941. struct nvme_fc_local_port *localport;
  942. struct lpfc_fc4_ctrl_stat *cstat;
  943. struct lpfc_nvme_lport *lport;
  944. uint64_t data1, data2, data3;
  945. uint64_t tot, totin, totout;
  946. int cnt, i;
  947. int len = 0;
  948. if (phba->nvmet_support) {
  949. if (!phba->targetport)
  950. return len;
  951. tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
  952. len += scnprintf(buf + len, size - len,
  953. "\nNVME Targetport Statistics\n");
  954. len += scnprintf(buf + len, size - len,
  955. "LS: Rcv %08x Drop %08x Abort %08x\n",
  956. atomic_read(&tgtp->rcv_ls_req_in),
  957. atomic_read(&tgtp->rcv_ls_req_drop),
  958. atomic_read(&tgtp->xmt_ls_abort));
  959. if (atomic_read(&tgtp->rcv_ls_req_in) !=
  960. atomic_read(&tgtp->rcv_ls_req_out)) {
  961. len += scnprintf(buf + len, size - len,
  962. "Rcv LS: in %08x != out %08x\n",
  963. atomic_read(&tgtp->rcv_ls_req_in),
  964. atomic_read(&tgtp->rcv_ls_req_out));
  965. }
  966. len += scnprintf(buf + len, size - len,
  967. "LS: Xmt %08x Drop %08x Cmpl %08x\n",
  968. atomic_read(&tgtp->xmt_ls_rsp),
  969. atomic_read(&tgtp->xmt_ls_drop),
  970. atomic_read(&tgtp->xmt_ls_rsp_cmpl));
  971. len += scnprintf(buf + len, size - len,
  972. "LS: RSP Abort %08x xb %08x Err %08x\n",
  973. atomic_read(&tgtp->xmt_ls_rsp_aborted),
  974. atomic_read(&tgtp->xmt_ls_rsp_xb_set),
  975. atomic_read(&tgtp->xmt_ls_rsp_error));
  976. len += scnprintf(buf + len, size - len,
  977. "FCP: Rcv %08x Defer %08x Release %08x "
  978. "Drop %08x\n",
  979. atomic_read(&tgtp->rcv_fcp_cmd_in),
  980. atomic_read(&tgtp->rcv_fcp_cmd_defer),
  981. atomic_read(&tgtp->xmt_fcp_release),
  982. atomic_read(&tgtp->rcv_fcp_cmd_drop));
  983. if (atomic_read(&tgtp->rcv_fcp_cmd_in) !=
  984. atomic_read(&tgtp->rcv_fcp_cmd_out)) {
  985. len += scnprintf(buf + len, size - len,
  986. "Rcv FCP: in %08x != out %08x\n",
  987. atomic_read(&tgtp->rcv_fcp_cmd_in),
  988. atomic_read(&tgtp->rcv_fcp_cmd_out));
  989. }
  990. len += scnprintf(buf + len, size - len,
  991. "FCP Rsp: read %08x readrsp %08x "
  992. "write %08x rsp %08x\n",
  993. atomic_read(&tgtp->xmt_fcp_read),
  994. atomic_read(&tgtp->xmt_fcp_read_rsp),
  995. atomic_read(&tgtp->xmt_fcp_write),
  996. atomic_read(&tgtp->xmt_fcp_rsp));
  997. len += scnprintf(buf + len, size - len,
  998. "FCP Rsp Cmpl: %08x err %08x drop %08x\n",
  999. atomic_read(&tgtp->xmt_fcp_rsp_cmpl),
  1000. atomic_read(&tgtp->xmt_fcp_rsp_error),
  1001. atomic_read(&tgtp->xmt_fcp_rsp_drop));
  1002. len += scnprintf(buf + len, size - len,
  1003. "FCP Rsp Abort: %08x xb %08x xricqe %08x\n",
  1004. atomic_read(&tgtp->xmt_fcp_rsp_aborted),
  1005. atomic_read(&tgtp->xmt_fcp_rsp_xb_set),
  1006. atomic_read(&tgtp->xmt_fcp_xri_abort_cqe));
  1007. len += scnprintf(buf + len, size - len,
  1008. "ABORT: Xmt %08x Cmpl %08x\n",
  1009. atomic_read(&tgtp->xmt_fcp_abort),
  1010. atomic_read(&tgtp->xmt_fcp_abort_cmpl));
  1011. len += scnprintf(buf + len, size - len,
  1012. "ABORT: Sol %08x Usol %08x Err %08x Cmpl %08x",
  1013. atomic_read(&tgtp->xmt_abort_sol),
  1014. atomic_read(&tgtp->xmt_abort_unsol),
  1015. atomic_read(&tgtp->xmt_abort_rsp),
  1016. atomic_read(&tgtp->xmt_abort_rsp_error));
  1017. len += scnprintf(buf + len, size - len, "\n");
  1018. cnt = 0;
  1019. spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
  1020. list_for_each_entry_safe(ctxp, next_ctxp,
  1021. &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
  1022. list) {
  1023. cnt++;
  1024. }
  1025. spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
  1026. if (cnt) {
  1027. len += scnprintf(buf + len, size - len,
  1028. "ABORT: %d ctx entries\n", cnt);
  1029. spin_lock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
  1030. list_for_each_entry_safe(ctxp, next_ctxp,
  1031. &phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
  1032. list) {
  1033. if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ))
  1034. break;
  1035. len += scnprintf(buf + len, size - len,
  1036. "Entry: oxid %x state %x "
  1037. "flag %x\n",
  1038. ctxp->oxid, ctxp->state,
  1039. ctxp->flag);
  1040. }
  1041. spin_unlock(&phba->sli4_hba.abts_nvmet_buf_list_lock);
  1042. }
  1043. /* Calculate outstanding IOs */
  1044. tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
  1045. tot += atomic_read(&tgtp->xmt_fcp_release);
  1046. tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
  1047. len += scnprintf(buf + len, size - len,
  1048. "IO_CTX: %08x WAIT: cur %08x tot %08x\n"
  1049. "CTX Outstanding %08llx\n",
  1050. phba->sli4_hba.nvmet_xri_cnt,
  1051. phba->sli4_hba.nvmet_io_wait_cnt,
  1052. phba->sli4_hba.nvmet_io_wait_total,
  1053. tot);
  1054. } else {
  1055. if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
  1056. return len;
  1057. localport = vport->localport;
  1058. if (!localport)
  1059. return len;
  1060. lport = (struct lpfc_nvme_lport *)localport->private;
  1061. if (!lport)
  1062. return len;
  1063. len += scnprintf(buf + len, size - len,
  1064. "\nNVME HDWQ Statistics\n");
  1065. len += scnprintf(buf + len, size - len,
  1066. "LS: Xmt %016x Cmpl %016x\n",
  1067. atomic_read(&lport->fc4NvmeLsRequests),
  1068. atomic_read(&lport->fc4NvmeLsCmpls));
  1069. totin = 0;
  1070. totout = 0;
  1071. for (i = 0; i < phba->cfg_hdw_queue; i++) {
  1072. cstat = &phba->sli4_hba.hdwq[i].nvme_cstat;
  1073. tot = cstat->io_cmpls;
  1074. totin += tot;
  1075. data1 = cstat->input_requests;
  1076. data2 = cstat->output_requests;
  1077. data3 = cstat->control_requests;
  1078. totout += (data1 + data2 + data3);
  1079. /* Limit to 32, debugfs display buffer limitation */
  1080. if (i >= 32)
  1081. continue;
  1082. len += scnprintf(buf + len, PAGE_SIZE - len,
  1083. "HDWQ (%d): Rd %016llx Wr %016llx "
  1084. "IO %016llx ",
  1085. i, data1, data2, data3);
  1086. len += scnprintf(buf + len, PAGE_SIZE - len,
  1087. "Cmpl %016llx OutIO %016llx\n",
  1088. tot, ((data1 + data2 + data3) - tot));
  1089. }
  1090. len += scnprintf(buf + len, PAGE_SIZE - len,
  1091. "Total FCP Cmpl %016llx Issue %016llx "
  1092. "OutIO %016llx\n",
  1093. totin, totout, totout - totin);
  1094. len += scnprintf(buf + len, size - len,
  1095. "LS Xmt Err: Abrt %08x Err %08x "
  1096. "Cmpl Err: xb %08x Err %08x\n",
  1097. atomic_read(&lport->xmt_ls_abort),
  1098. atomic_read(&lport->xmt_ls_err),
  1099. atomic_read(&lport->cmpl_ls_xb),
  1100. atomic_read(&lport->cmpl_ls_err));
  1101. len += scnprintf(buf + len, size - len,
  1102. "FCP Xmt Err: noxri %06x nondlp %06x "
  1103. "qdepth %06x wqerr %06x err %06x Abrt %06x\n",
  1104. atomic_read(&lport->xmt_fcp_noxri),
  1105. atomic_read(&lport->xmt_fcp_bad_ndlp),
  1106. atomic_read(&lport->xmt_fcp_qdepth),
  1107. atomic_read(&lport->xmt_fcp_wqerr),
  1108. atomic_read(&lport->xmt_fcp_err),
  1109. atomic_read(&lport->xmt_fcp_abort));
  1110. len += scnprintf(buf + len, size - len,
  1111. "FCP Cmpl Err: xb %08x Err %08x\n",
  1112. atomic_read(&lport->cmpl_fcp_xb),
  1113. atomic_read(&lport->cmpl_fcp_err));
  1114. }
  1115. return len;
  1116. }
  1117. /**
  1118. * lpfc_debugfs_scsistat_data - Dump target node list to a buffer
  1119. * @vport: The vport to gather target node info from.
  1120. * @buf: The buffer to dump log into.
  1121. * @size: The maximum amount of data to process.
  1122. *
  1123. * Description:
  1124. * This routine dumps the SCSI statistics associated with @vport
  1125. *
  1126. * Return Value:
  1127. * This routine returns the amount of bytes that were dumped into @buf and will
  1128. * not exceed @size.
  1129. **/
  1130. static int
  1131. lpfc_debugfs_scsistat_data(struct lpfc_vport *vport, char *buf, int size)
  1132. {
  1133. int len;
  1134. struct lpfc_hba *phba = vport->phba;
  1135. struct lpfc_fc4_ctrl_stat *cstat;
  1136. u64 data1, data2, data3;
  1137. u64 tot, totin, totout;
  1138. int i;
  1139. char tmp[LPFC_MAX_SCSI_INFO_TMP_LEN] = {0};
  1140. if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ||
  1141. (phba->sli_rev != LPFC_SLI_REV4))
  1142. return 0;
  1143. scnprintf(buf, size, "SCSI HDWQ Statistics\n");
  1144. totin = 0;
  1145. totout = 0;
  1146. for (i = 0; i < phba->cfg_hdw_queue; i++) {
  1147. cstat = &phba->sli4_hba.hdwq[i].scsi_cstat;
  1148. tot = cstat->io_cmpls;
  1149. totin += tot;
  1150. data1 = cstat->input_requests;
  1151. data2 = cstat->output_requests;
  1152. data3 = cstat->control_requests;
  1153. totout += (data1 + data2 + data3);
  1154. scnprintf(tmp, sizeof(tmp), "HDWQ (%d): Rd %016llx Wr %016llx "
  1155. "IO %016llx ", i, data1, data2, data3);
  1156. if (strlcat(buf, tmp, size) >= size)
  1157. goto buffer_done;
  1158. scnprintf(tmp, sizeof(tmp), "Cmpl %016llx OutIO %016llx\n",
  1159. tot, ((data1 + data2 + data3) - tot));
  1160. if (strlcat(buf, tmp, size) >= size)
  1161. goto buffer_done;
  1162. }
  1163. scnprintf(tmp, sizeof(tmp), "Total FCP Cmpl %016llx Issue %016llx "
  1164. "OutIO %016llx\n", totin, totout, totout - totin);
  1165. strlcat(buf, tmp, size);
  1166. buffer_done:
  1167. len = strnlen(buf, size);
  1168. return len;
  1169. }
  1170. void
  1171. lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
  1172. {
  1173. uint64_t seg1, seg2, seg3, seg4;
  1174. uint64_t segsum;
  1175. if (!lpfc_cmd->ts_last_cmd ||
  1176. !lpfc_cmd->ts_cmd_start ||
  1177. !lpfc_cmd->ts_cmd_wqput ||
  1178. !lpfc_cmd->ts_isr_cmpl ||
  1179. !lpfc_cmd->ts_data_io)
  1180. return;
  1181. if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_cmd_start)
  1182. return;
  1183. if (lpfc_cmd->ts_cmd_start < lpfc_cmd->ts_last_cmd)
  1184. return;
  1185. if (lpfc_cmd->ts_cmd_wqput < lpfc_cmd->ts_cmd_start)
  1186. return;
  1187. if (lpfc_cmd->ts_isr_cmpl < lpfc_cmd->ts_cmd_wqput)
  1188. return;
  1189. if (lpfc_cmd->ts_data_io < lpfc_cmd->ts_isr_cmpl)
  1190. return;
  1191. /*
  1192. * Segment 1 - Time from Last FCP command cmpl is handed
  1193. * off to NVME Layer to start of next command.
  1194. * Segment 2 - Time from Driver receives a IO cmd start
  1195. * from NVME Layer to WQ put is done on IO cmd.
  1196. * Segment 3 - Time from Driver WQ put is done on IO cmd
  1197. * to MSI-X ISR for IO cmpl.
  1198. * Segment 4 - Time from MSI-X ISR for IO cmpl to when
  1199. * cmpl is handled off to the NVME Layer.
  1200. */
  1201. seg1 = lpfc_cmd->ts_cmd_start - lpfc_cmd->ts_last_cmd;
  1202. if (seg1 > 5000000) /* 5 ms - for sequential IOs only */
  1203. seg1 = 0;
  1204. /* Calculate times relative to start of IO */
  1205. seg2 = (lpfc_cmd->ts_cmd_wqput - lpfc_cmd->ts_cmd_start);
  1206. segsum = seg2;
  1207. seg3 = lpfc_cmd->ts_isr_cmpl - lpfc_cmd->ts_cmd_start;
  1208. if (segsum > seg3)
  1209. return;
  1210. seg3 -= segsum;
  1211. segsum += seg3;
  1212. seg4 = lpfc_cmd->ts_data_io - lpfc_cmd->ts_cmd_start;
  1213. if (segsum > seg4)
  1214. return;
  1215. seg4 -= segsum;
  1216. phba->ktime_data_samples++;
  1217. phba->ktime_seg1_total += seg1;
  1218. if (seg1 < phba->ktime_seg1_min)
  1219. phba->ktime_seg1_min = seg1;
  1220. else if (seg1 > phba->ktime_seg1_max)
  1221. phba->ktime_seg1_max = seg1;
  1222. phba->ktime_seg2_total += seg2;
  1223. if (seg2 < phba->ktime_seg2_min)
  1224. phba->ktime_seg2_min = seg2;
  1225. else if (seg2 > phba->ktime_seg2_max)
  1226. phba->ktime_seg2_max = seg2;
  1227. phba->ktime_seg3_total += seg3;
  1228. if (seg3 < phba->ktime_seg3_min)
  1229. phba->ktime_seg3_min = seg3;
  1230. else if (seg3 > phba->ktime_seg3_max)
  1231. phba->ktime_seg3_max = seg3;
  1232. phba->ktime_seg4_total += seg4;
  1233. if (seg4 < phba->ktime_seg4_min)
  1234. phba->ktime_seg4_min = seg4;
  1235. else if (seg4 > phba->ktime_seg4_max)
  1236. phba->ktime_seg4_max = seg4;
  1237. lpfc_cmd->ts_last_cmd = 0;
  1238. lpfc_cmd->ts_cmd_start = 0;
  1239. lpfc_cmd->ts_cmd_wqput = 0;
  1240. lpfc_cmd->ts_isr_cmpl = 0;
  1241. lpfc_cmd->ts_data_io = 0;
  1242. }
  1243. /**
  1244. * lpfc_debugfs_ioktime_data - Dump target node list to a buffer
  1245. * @vport: The vport to gather target node info from.
  1246. * @buf: The buffer to dump log into.
  1247. * @size: The maximum amount of data to process.
  1248. *
  1249. * Description:
  1250. * This routine dumps the NVME statistics associated with @vport
  1251. *
  1252. * Return Value:
  1253. * This routine returns the amount of bytes that were dumped into @buf and will
  1254. * not exceed @size.
  1255. **/
  1256. static int
  1257. lpfc_debugfs_ioktime_data(struct lpfc_vport *vport, char *buf, int size)
  1258. {
  1259. struct lpfc_hba *phba = vport->phba;
  1260. int len = 0;
  1261. if (phba->nvmet_support == 0) {
  1262. /* Initiator */
  1263. len += scnprintf(buf + len, PAGE_SIZE - len,
  1264. "ktime %s: Total Samples: %lld\n",
  1265. (phba->ktime_on ? "Enabled" : "Disabled"),
  1266. phba->ktime_data_samples);
  1267. if (phba->ktime_data_samples == 0)
  1268. return len;
  1269. len += scnprintf(
  1270. buf + len, PAGE_SIZE - len,
  1271. "Segment 1: Last Cmd cmpl "
  1272. "done -to- Start of next Cmd (in driver)\n");
  1273. len += scnprintf(
  1274. buf + len, PAGE_SIZE - len,
  1275. "avg:%08lld min:%08lld max %08lld\n",
  1276. div_u64(phba->ktime_seg1_total,
  1277. phba->ktime_data_samples),
  1278. phba->ktime_seg1_min,
  1279. phba->ktime_seg1_max);
  1280. len += scnprintf(
  1281. buf + len, PAGE_SIZE - len,
  1282. "Segment 2: Driver start of Cmd "
  1283. "-to- Firmware WQ doorbell\n");
  1284. len += scnprintf(
  1285. buf + len, PAGE_SIZE - len,
  1286. "avg:%08lld min:%08lld max %08lld\n",
  1287. div_u64(phba->ktime_seg2_total,
  1288. phba->ktime_data_samples),
  1289. phba->ktime_seg2_min,
  1290. phba->ktime_seg2_max);
  1291. len += scnprintf(
  1292. buf + len, PAGE_SIZE - len,
  1293. "Segment 3: Firmware WQ doorbell -to- "
  1294. "MSI-X ISR cmpl\n");
  1295. len += scnprintf(
  1296. buf + len, PAGE_SIZE - len,
  1297. "avg:%08lld min:%08lld max %08lld\n",
  1298. div_u64(phba->ktime_seg3_total,
  1299. phba->ktime_data_samples),
  1300. phba->ktime_seg3_min,
  1301. phba->ktime_seg3_max);
  1302. len += scnprintf(
  1303. buf + len, PAGE_SIZE - len,
  1304. "Segment 4: MSI-X ISR cmpl -to- "
  1305. "Cmd cmpl done\n");
  1306. len += scnprintf(
  1307. buf + len, PAGE_SIZE - len,
  1308. "avg:%08lld min:%08lld max %08lld\n",
  1309. div_u64(phba->ktime_seg4_total,
  1310. phba->ktime_data_samples),
  1311. phba->ktime_seg4_min,
  1312. phba->ktime_seg4_max);
  1313. len += scnprintf(
  1314. buf + len, PAGE_SIZE - len,
  1315. "Total IO avg time: %08lld\n",
  1316. div_u64(phba->ktime_seg1_total +
  1317. phba->ktime_seg2_total +
  1318. phba->ktime_seg3_total +
  1319. phba->ktime_seg4_total,
  1320. phba->ktime_data_samples));
  1321. return len;
  1322. }
  1323. /* NVME Target */
  1324. len += scnprintf(buf + len, PAGE_SIZE-len,
  1325. "ktime %s: Total Samples: %lld %lld\n",
  1326. (phba->ktime_on ? "Enabled" : "Disabled"),
  1327. phba->ktime_data_samples,
  1328. phba->ktime_status_samples);
  1329. if (phba->ktime_data_samples == 0)
  1330. return len;
  1331. len += scnprintf(buf + len, PAGE_SIZE-len,
  1332. "Segment 1: MSI-X ISR Rcv cmd -to- "
  1333. "cmd pass to NVME Layer\n");
  1334. len += scnprintf(buf + len, PAGE_SIZE-len,
  1335. "avg:%08lld min:%08lld max %08lld\n",
  1336. div_u64(phba->ktime_seg1_total,
  1337. phba->ktime_data_samples),
  1338. phba->ktime_seg1_min,
  1339. phba->ktime_seg1_max);
  1340. len += scnprintf(buf + len, PAGE_SIZE-len,
  1341. "Segment 2: cmd pass to NVME Layer- "
  1342. "-to- Driver rcv cmd OP (action)\n");
  1343. len += scnprintf(buf + len, PAGE_SIZE-len,
  1344. "avg:%08lld min:%08lld max %08lld\n",
  1345. div_u64(phba->ktime_seg2_total,
  1346. phba->ktime_data_samples),
  1347. phba->ktime_seg2_min,
  1348. phba->ktime_seg2_max);
  1349. len += scnprintf(buf + len, PAGE_SIZE-len,
  1350. "Segment 3: Driver rcv cmd OP -to- "
  1351. "Firmware WQ doorbell: cmd\n");
  1352. len += scnprintf(buf + len, PAGE_SIZE-len,
  1353. "avg:%08lld min:%08lld max %08lld\n",
  1354. div_u64(phba->ktime_seg3_total,
  1355. phba->ktime_data_samples),
  1356. phba->ktime_seg3_min,
  1357. phba->ktime_seg3_max);
  1358. len += scnprintf(buf + len, PAGE_SIZE-len,
  1359. "Segment 4: Firmware WQ doorbell: cmd "
  1360. "-to- MSI-X ISR for cmd cmpl\n");
  1361. len += scnprintf(buf + len, PAGE_SIZE-len,
  1362. "avg:%08lld min:%08lld max %08lld\n",
  1363. div_u64(phba->ktime_seg4_total,
  1364. phba->ktime_data_samples),
  1365. phba->ktime_seg4_min,
  1366. phba->ktime_seg4_max);
  1367. len += scnprintf(buf + len, PAGE_SIZE-len,
  1368. "Segment 5: MSI-X ISR for cmd cmpl "
  1369. "-to- NVME layer passed cmd done\n");
  1370. len += scnprintf(buf + len, PAGE_SIZE-len,
  1371. "avg:%08lld min:%08lld max %08lld\n",
  1372. div_u64(phba->ktime_seg5_total,
  1373. phba->ktime_data_samples),
  1374. phba->ktime_seg5_min,
  1375. phba->ktime_seg5_max);
  1376. if (phba->ktime_status_samples == 0) {
  1377. len += scnprintf(buf + len, PAGE_SIZE-len,
  1378. "Total: cmd received by MSI-X ISR "
  1379. "-to- cmd completed on wire\n");
  1380. len += scnprintf(buf + len, PAGE_SIZE-len,
  1381. "avg:%08lld min:%08lld "
  1382. "max %08lld\n",
  1383. div_u64(phba->ktime_seg10_total,
  1384. phba->ktime_data_samples),
  1385. phba->ktime_seg10_min,
  1386. phba->ktime_seg10_max);
  1387. return len;
  1388. }
  1389. len += scnprintf(buf + len, PAGE_SIZE-len,
  1390. "Segment 6: NVME layer passed cmd done "
  1391. "-to- Driver rcv rsp status OP\n");
  1392. len += scnprintf(buf + len, PAGE_SIZE-len,
  1393. "avg:%08lld min:%08lld max %08lld\n",
  1394. div_u64(phba->ktime_seg6_total,
  1395. phba->ktime_status_samples),
  1396. phba->ktime_seg6_min,
  1397. phba->ktime_seg6_max);
  1398. len += scnprintf(buf + len, PAGE_SIZE-len,
  1399. "Segment 7: Driver rcv rsp status OP "
  1400. "-to- Firmware WQ doorbell: status\n");
  1401. len += scnprintf(buf + len, PAGE_SIZE-len,
  1402. "avg:%08lld min:%08lld max %08lld\n",
  1403. div_u64(phba->ktime_seg7_total,
  1404. phba->ktime_status_samples),
  1405. phba->ktime_seg7_min,
  1406. phba->ktime_seg7_max);
  1407. len += scnprintf(buf + len, PAGE_SIZE-len,
  1408. "Segment 8: Firmware WQ doorbell: status"
  1409. " -to- MSI-X ISR for status cmpl\n");
  1410. len += scnprintf(buf + len, PAGE_SIZE-len,
  1411. "avg:%08lld min:%08lld max %08lld\n",
  1412. div_u64(phba->ktime_seg8_total,
  1413. phba->ktime_status_samples),
  1414. phba->ktime_seg8_min,
  1415. phba->ktime_seg8_max);
  1416. len += scnprintf(buf + len, PAGE_SIZE-len,
  1417. "Segment 9: MSI-X ISR for status cmpl "
  1418. "-to- NVME layer passed status done\n");
  1419. len += scnprintf(buf + len, PAGE_SIZE-len,
  1420. "avg:%08lld min:%08lld max %08lld\n",
  1421. div_u64(phba->ktime_seg9_total,
  1422. phba->ktime_status_samples),
  1423. phba->ktime_seg9_min,
  1424. phba->ktime_seg9_max);
  1425. len += scnprintf(buf + len, PAGE_SIZE-len,
  1426. "Total: cmd received by MSI-X ISR -to- "
  1427. "cmd completed on wire\n");
  1428. len += scnprintf(buf + len, PAGE_SIZE-len,
  1429. "avg:%08lld min:%08lld max %08lld\n",
  1430. div_u64(phba->ktime_seg10_total,
  1431. phba->ktime_status_samples),
  1432. phba->ktime_seg10_min,
  1433. phba->ktime_seg10_max);
  1434. return len;
  1435. }
  1436. /**
  1437. * lpfc_debugfs_nvmeio_trc_data - Dump NVME IO trace list to a buffer
  1438. * @phba: The phba to gather target node info from.
  1439. * @buf: The buffer to dump log into.
  1440. * @size: The maximum amount of data to process.
  1441. *
  1442. * Description:
  1443. * This routine dumps the NVME IO trace associated with @phba
  1444. *
  1445. * Return Value:
  1446. * This routine returns the amount of bytes that were dumped into @buf and will
  1447. * not exceed @size.
  1448. **/
  1449. static int
  1450. lpfc_debugfs_nvmeio_trc_data(struct lpfc_hba *phba, char *buf, int size)
  1451. {
  1452. struct lpfc_debugfs_nvmeio_trc *dtp;
  1453. int i, state, index, skip;
  1454. int len = 0;
  1455. state = phba->nvmeio_trc_on;
  1456. index = (atomic_read(&phba->nvmeio_trc_cnt) + 1) &
  1457. (phba->nvmeio_trc_size - 1);
  1458. skip = phba->nvmeio_trc_output_idx;
  1459. len += scnprintf(buf + len, size - len,
  1460. "%s IO Trace %s: next_idx %d skip %d size %d\n",
  1461. (phba->nvmet_support ? "NVME" : "NVMET"),
  1462. (state ? "Enabled" : "Disabled"),
  1463. index, skip, phba->nvmeio_trc_size);
  1464. if (!phba->nvmeio_trc || state)
  1465. return len;
  1466. /* trace MUST bhe off to continue */
  1467. for (i = index; i < phba->nvmeio_trc_size; i++) {
  1468. if (skip) {
  1469. skip--;
  1470. continue;
  1471. }
  1472. dtp = phba->nvmeio_trc + i;
  1473. phba->nvmeio_trc_output_idx++;
  1474. if (!dtp->fmt)
  1475. continue;
  1476. len += scnprintf(buf + len, size - len, dtp->fmt,
  1477. dtp->data1, dtp->data2, dtp->data3);
  1478. if (phba->nvmeio_trc_output_idx >= phba->nvmeio_trc_size) {
  1479. phba->nvmeio_trc_output_idx = 0;
  1480. len += scnprintf(buf + len, size - len,
  1481. "Trace Complete\n");
  1482. goto out;
  1483. }
  1484. if (len >= (size - LPFC_DEBUG_OUT_LINE_SZ)) {
  1485. len += scnprintf(buf + len, size - len,
  1486. "Trace Continue (%d of %d)\n",
  1487. phba->nvmeio_trc_output_idx,
  1488. phba->nvmeio_trc_size);
  1489. goto out;
  1490. }
  1491. }
  1492. for (i = 0; i < in