/drivers/scsi/qla2xxx/qla_dfs.c

http://github.com/mirrors/linux · C · 531 lines · 433 code · 89 blank · 9 comment · 45 complexity · 70c2103c7fa8588a8e31c4f9c059327b MD5 · raw file

  1. /*
  2. * QLogic Fibre Channel HBA Driver
  3. * Copyright (c) 2003-2014 QLogic Corporation
  4. *
  5. * See LICENSE.qla2xxx for copyright and licensing details.
  6. */
  7. #include "qla_def.h"
  8. #include <linux/debugfs.h>
  9. #include <linux/seq_file.h>
  10. static struct dentry *qla2x00_dfs_root;
  11. static atomic_t qla2x00_dfs_root_count;
  12. static int
  13. qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
  14. {
  15. scsi_qla_host_t *vha = s->private;
  16. struct qla_hw_data *ha = vha->hw;
  17. unsigned long flags;
  18. struct fc_port *sess = NULL;
  19. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  20. seq_printf(s, "%s\n", vha->host_str);
  21. if (tgt) {
  22. seq_puts(s, "Port ID Port Name Handle\n");
  23. spin_lock_irqsave(&ha->tgt.sess_lock, flags);
  24. list_for_each_entry(sess, &vha->vp_fcports, list)
  25. seq_printf(s, "%02x:%02x:%02x %8phC %d\n",
  26. sess->d_id.b.domain, sess->d_id.b.area,
  27. sess->d_id.b.al_pa, sess->port_name,
  28. sess->loop_id);
  29. spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
  30. }
  31. return 0;
  32. }
  33. static int
  34. qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
  35. {
  36. scsi_qla_host_t *vha = inode->i_private;
  37. return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
  38. }
  39. static const struct file_operations dfs_tgt_sess_ops = {
  40. .open = qla2x00_dfs_tgt_sess_open,
  41. .read = seq_read,
  42. .llseek = seq_lseek,
  43. .release = single_release,
  44. };
  45. static int
  46. qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
  47. {
  48. scsi_qla_host_t *vha = s->private;
  49. struct qla_hw_data *ha = vha->hw;
  50. struct gid_list_info *gid_list, *gid;
  51. dma_addr_t gid_list_dma;
  52. fc_port_t fc_port;
  53. int rc, i;
  54. uint16_t entries, loop_id;
  55. struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  56. seq_printf(s, "%s\n", vha->host_str);
  57. if (tgt) {
  58. gid_list = dma_alloc_coherent(&ha->pdev->dev,
  59. qla2x00_gid_list_size(ha),
  60. &gid_list_dma, GFP_KERNEL);
  61. if (!gid_list) {
  62. ql_dbg(ql_dbg_user, vha, 0x7018,
  63. "DMA allocation failed for %u\n",
  64. qla2x00_gid_list_size(ha));
  65. return 0;
  66. }
  67. rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
  68. &entries);
  69. if (rc != QLA_SUCCESS)
  70. goto out_free_id_list;
  71. gid = gid_list;
  72. seq_puts(s, "Port Name Port ID Loop ID\n");
  73. for (i = 0; i < entries; i++) {
  74. loop_id = le16_to_cpu(gid->loop_id);
  75. memset(&fc_port, 0, sizeof(fc_port_t));
  76. fc_port.loop_id = loop_id;
  77. rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
  78. seq_printf(s, "%8phC %02x%02x%02x %d\n",
  79. fc_port.port_name, fc_port.d_id.b.domain,
  80. fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
  81. fc_port.loop_id);
  82. gid = (void *)gid + ha->gid_list_info_size;
  83. }
  84. out_free_id_list:
  85. dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
  86. gid_list, gid_list_dma);
  87. }
  88. return 0;
  89. }
  90. static int
  91. qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
  92. {
  93. scsi_qla_host_t *vha = inode->i_private;
  94. return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
  95. }
  96. static const struct file_operations dfs_tgt_port_database_ops = {
  97. .open = qla2x00_dfs_tgt_port_database_open,
  98. .read = seq_read,
  99. .llseek = seq_lseek,
  100. .release = single_release,
  101. };
  102. static int
  103. qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
  104. {
  105. struct scsi_qla_host *vha = s->private;
  106. uint16_t mb[MAX_IOCB_MB_REG];
  107. int rc;
  108. rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
  109. if (rc != QLA_SUCCESS) {
  110. seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
  111. } else {
  112. seq_puts(s, "FW Resource count\n\n");
  113. seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
  114. seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
  115. seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
  116. seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
  117. seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
  118. seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
  119. seq_printf(s, "MAX VP count[%d]\n", mb[11]);
  120. seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
  121. seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
  122. mb[20]);
  123. seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
  124. mb[21]);
  125. seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
  126. mb[22]);
  127. seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
  128. mb[23]);
  129. }
  130. return 0;
  131. }
  132. static int
  133. qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
  134. {
  135. struct scsi_qla_host *vha = inode->i_private;
  136. return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
  137. }
  138. static const struct file_operations dfs_fw_resource_cnt_ops = {
  139. .open = qla_dfs_fw_resource_cnt_open,
  140. .read = seq_read,
  141. .llseek = seq_lseek,
  142. .release = single_release,
  143. };
  144. static int
  145. qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
  146. {
  147. struct scsi_qla_host *vha = s->private;
  148. struct qla_qpair *qpair = vha->hw->base_qpair;
  149. uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
  150. core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
  151. num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
  152. u16 i;
  153. qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
  154. core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
  155. qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
  156. core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
  157. qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
  158. core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
  159. num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
  160. num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
  161. num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
  162. for (i = 0; i < vha->hw->max_qpairs; i++) {
  163. qpair = vha->hw->queue_pair_map[i];
  164. if (!qpair)
  165. continue;
  166. qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
  167. core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
  168. qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
  169. core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
  170. qla_core_ret_sta_ctio +=
  171. qpair->tgt_counters.qla_core_ret_sta_ctio;
  172. core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
  173. num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
  174. num_alloc_iocb_failed +=
  175. qpair->tgt_counters.num_alloc_iocb_failed;
  176. num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
  177. }
  178. seq_puts(s, "Target Counters\n");
  179. seq_printf(s, "qla_core_sbt_cmd = %lld\n",
  180. qla_core_sbt_cmd);
  181. seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
  182. qla_core_ret_sta_ctio);
  183. seq_printf(s, "qla_core_ret_ctio = %lld\n",
  184. qla_core_ret_ctio);
  185. seq_printf(s, "core_qla_que_buf = %lld\n",
  186. core_qla_que_buf);
  187. seq_printf(s, "core_qla_snd_status = %lld\n",
  188. core_qla_snd_status);
  189. seq_printf(s, "core_qla_free_cmd = %lld\n",
  190. core_qla_free_cmd);
  191. seq_printf(s, "num alloc iocb failed = %lld\n",
  192. num_alloc_iocb_failed);
  193. seq_printf(s, "num term exchange sent = %lld\n",
  194. num_term_xchg_sent);
  195. seq_printf(s, "num Q full sent = %lld\n",
  196. num_q_full_sent);
  197. /* DIF stats */
  198. seq_printf(s, "DIF Inp Bytes = %lld\n",
  199. vha->qla_stats.qla_dif_stats.dif_input_bytes);
  200. seq_printf(s, "DIF Outp Bytes = %lld\n",
  201. vha->qla_stats.qla_dif_stats.dif_output_bytes);
  202. seq_printf(s, "DIF Inp Req = %lld\n",
  203. vha->qla_stats.qla_dif_stats.dif_input_requests);
  204. seq_printf(s, "DIF Outp Req = %lld\n",
  205. vha->qla_stats.qla_dif_stats.dif_output_requests);
  206. seq_printf(s, "DIF Guard err = %d\n",
  207. vha->qla_stats.qla_dif_stats.dif_guard_err);
  208. seq_printf(s, "DIF Ref tag err = %d\n",
  209. vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
  210. seq_printf(s, "DIF App tag err = %d\n",
  211. vha->qla_stats.qla_dif_stats.dif_app_tag_err);
  212. return 0;
  213. }
  214. static int
  215. qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
  216. {
  217. struct scsi_qla_host *vha = inode->i_private;
  218. return single_open(file, qla_dfs_tgt_counters_show, vha);
  219. }
  220. static const struct file_operations dfs_tgt_counters_ops = {
  221. .open = qla_dfs_tgt_counters_open,
  222. .read = seq_read,
  223. .llseek = seq_lseek,
  224. .release = single_release,
  225. };
  226. static int
  227. qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
  228. {
  229. scsi_qla_host_t *vha = s->private;
  230. uint32_t cnt;
  231. uint32_t *fce;
  232. uint64_t fce_start;
  233. struct qla_hw_data *ha = vha->hw;
  234. mutex_lock(&ha->fce_mutex);
  235. seq_puts(s, "FCE Trace Buffer\n");
  236. seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
  237. seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
  238. seq_puts(s, "FCE Enable Registers\n");
  239. seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
  240. ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
  241. ha->fce_mb[5], ha->fce_mb[6]);
  242. fce = (uint32_t *) ha->fce;
  243. fce_start = (unsigned long long) ha->fce_dma;
  244. for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
  245. if (cnt % 8 == 0)
  246. seq_printf(s, "\n%llx: ",
  247. (unsigned long long)((cnt * 4) + fce_start));
  248. else
  249. seq_putc(s, ' ');
  250. seq_printf(s, "%08x", *fce++);
  251. }
  252. seq_puts(s, "\nEnd\n");
  253. mutex_unlock(&ha->fce_mutex);
  254. return 0;
  255. }
  256. static int
  257. qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
  258. {
  259. scsi_qla_host_t *vha = inode->i_private;
  260. struct qla_hw_data *ha = vha->hw;
  261. int rval;
  262. if (!ha->flags.fce_enabled)
  263. goto out;
  264. mutex_lock(&ha->fce_mutex);
  265. /* Pause tracing to flush FCE buffers. */
  266. rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
  267. if (rval)
  268. ql_dbg(ql_dbg_user, vha, 0x705c,
  269. "DebugFS: Unable to disable FCE (%d).\n", rval);
  270. ha->flags.fce_enabled = 0;
  271. mutex_unlock(&ha->fce_mutex);
  272. out:
  273. return single_open(file, qla2x00_dfs_fce_show, vha);
  274. }
  275. static int
  276. qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
  277. {
  278. scsi_qla_host_t *vha = inode->i_private;
  279. struct qla_hw_data *ha = vha->hw;
  280. int rval;
  281. if (ha->flags.fce_enabled)
  282. goto out;
  283. mutex_lock(&ha->fce_mutex);
  284. /* Re-enable FCE tracing. */
  285. ha->flags.fce_enabled = 1;
  286. memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
  287. rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
  288. ha->fce_mb, &ha->fce_bufs);
  289. if (rval) {
  290. ql_dbg(ql_dbg_user, vha, 0x700d,
  291. "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
  292. ha->flags.fce_enabled = 0;
  293. }
  294. mutex_unlock(&ha->fce_mutex);
  295. out:
  296. return single_release(inode, file);
  297. }
  298. static const struct file_operations dfs_fce_ops = {
  299. .open = qla2x00_dfs_fce_open,
  300. .read = seq_read,
  301. .llseek = seq_lseek,
  302. .release = qla2x00_dfs_fce_release,
  303. };
  304. static int
  305. qla_dfs_naqp_show(struct seq_file *s, void *unused)
  306. {
  307. struct scsi_qla_host *vha = s->private;
  308. struct qla_hw_data *ha = vha->hw;
  309. seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
  310. return 0;
  311. }
  312. static int
  313. qla_dfs_naqp_open(struct inode *inode, struct file *file)
  314. {
  315. struct scsi_qla_host *vha = inode->i_private;
  316. return single_open(file, qla_dfs_naqp_show, vha);
  317. }
  318. static ssize_t
  319. qla_dfs_naqp_write(struct file *file, const char __user *buffer,
  320. size_t count, loff_t *pos)
  321. {
  322. struct seq_file *s = file->private_data;
  323. struct scsi_qla_host *vha = s->private;
  324. struct qla_hw_data *ha = vha->hw;
  325. char *buf;
  326. int rc = 0;
  327. unsigned long num_act_qp;
  328. if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
  329. pr_err("host%ld: this adapter does not support Multi Q.",
  330. vha->host_no);
  331. return -EINVAL;
  332. }
  333. if (!vha->flags.qpairs_available) {
  334. pr_err("host%ld: Driver is not setup with Multi Q.",
  335. vha->host_no);
  336. return -EINVAL;
  337. }
  338. buf = memdup_user_nul(buffer, count);
  339. if (IS_ERR(buf)) {
  340. pr_err("host%ld: fail to copy user buffer.",
  341. vha->host_no);
  342. return PTR_ERR(buf);
  343. }
  344. num_act_qp = simple_strtoul(buf, NULL, 0);
  345. if (num_act_qp >= vha->hw->max_qpairs) {
  346. pr_err("User set invalid number of qpairs %lu. Max = %d",
  347. num_act_qp, vha->hw->max_qpairs);
  348. rc = -EINVAL;
  349. goto out_free;
  350. }
  351. if (num_act_qp != ha->tgt.num_act_qpairs) {
  352. ha->tgt.num_act_qpairs = num_act_qp;
  353. qlt_clr_qp_table(vha);
  354. }
  355. rc = count;
  356. out_free:
  357. kfree(buf);
  358. return rc;
  359. }
  360. static const struct file_operations dfs_naqp_ops = {
  361. .open = qla_dfs_naqp_open,
  362. .read = seq_read,
  363. .llseek = seq_lseek,
  364. .release = single_release,
  365. .write = qla_dfs_naqp_write,
  366. };
  367. int
  368. qla2x00_dfs_setup(scsi_qla_host_t *vha)
  369. {
  370. struct qla_hw_data *ha = vha->hw;
  371. if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
  372. !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
  373. goto out;
  374. if (!ha->fce)
  375. goto out;
  376. if (qla2x00_dfs_root)
  377. goto create_dir;
  378. atomic_set(&qla2x00_dfs_root_count, 0);
  379. qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
  380. create_dir:
  381. if (ha->dfs_dir)
  382. goto create_nodes;
  383. mutex_init(&ha->fce_mutex);
  384. ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
  385. atomic_inc(&qla2x00_dfs_root_count);
  386. create_nodes:
  387. ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
  388. S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
  389. ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
  390. ha->dfs_dir, vha, &dfs_tgt_counters_ops);
  391. ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
  392. S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
  393. ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
  394. &dfs_fce_ops);
  395. ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
  396. S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
  397. if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
  398. ha->tgt.dfs_naqp = debugfs_create_file("naqp",
  399. 0400, ha->dfs_dir, vha, &dfs_naqp_ops);
  400. out:
  401. return 0;
  402. }
  403. int
  404. qla2x00_dfs_remove(scsi_qla_host_t *vha)
  405. {
  406. struct qla_hw_data *ha = vha->hw;
  407. if (ha->tgt.dfs_naqp) {
  408. debugfs_remove(ha->tgt.dfs_naqp);
  409. ha->tgt.dfs_naqp = NULL;
  410. }
  411. if (ha->tgt.dfs_tgt_sess) {
  412. debugfs_remove(ha->tgt.dfs_tgt_sess);
  413. ha->tgt.dfs_tgt_sess = NULL;
  414. }
  415. if (ha->tgt.dfs_tgt_port_database) {
  416. debugfs_remove(ha->tgt.dfs_tgt_port_database);
  417. ha->tgt.dfs_tgt_port_database = NULL;
  418. }
  419. if (ha->dfs_fw_resource_cnt) {
  420. debugfs_remove(ha->dfs_fw_resource_cnt);
  421. ha->dfs_fw_resource_cnt = NULL;
  422. }
  423. if (ha->dfs_tgt_counters) {
  424. debugfs_remove(ha->dfs_tgt_counters);
  425. ha->dfs_tgt_counters = NULL;
  426. }
  427. if (ha->dfs_fce) {
  428. debugfs_remove(ha->dfs_fce);
  429. ha->dfs_fce = NULL;
  430. }
  431. if (ha->dfs_dir) {
  432. debugfs_remove(ha->dfs_dir);
  433. ha->dfs_dir = NULL;
  434. atomic_dec(&qla2x00_dfs_root_count);
  435. }
  436. if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
  437. qla2x00_dfs_root) {
  438. debugfs_remove(qla2x00_dfs_root);
  439. qla2x00_dfs_root = NULL;
  440. }
  441. return 0;
  442. }