PageRenderTime 37ms CodeModel.GetById 2ms app.highlight 30ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/scsi/qla2xxx/qla_dfs.c

http://github.com/mirrors/linux
C | 531 lines | 433 code | 89 blank | 9 comment | 45 complexity | 70c2103c7fa8588a8e31c4f9c059327b MD5 | raw file
  1/*
  2 * QLogic Fibre Channel HBA Driver
  3 * Copyright (c)  2003-2014 QLogic Corporation
  4 *
  5 * See LICENSE.qla2xxx for copyright and licensing details.
  6 */
  7#include "qla_def.h"
  8
  9#include <linux/debugfs.h>
 10#include <linux/seq_file.h>
 11
 12static struct dentry *qla2x00_dfs_root;
 13static atomic_t qla2x00_dfs_root_count;
 14
 15static int
 16qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
 17{
 18	scsi_qla_host_t *vha = s->private;
 19	struct qla_hw_data *ha = vha->hw;
 20	unsigned long flags;
 21	struct fc_port *sess = NULL;
 22	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 23
 24	seq_printf(s, "%s\n", vha->host_str);
 25	if (tgt) {
 26		seq_puts(s, "Port ID   Port Name                Handle\n");
 27
 28		spin_lock_irqsave(&ha->tgt.sess_lock, flags);
 29		list_for_each_entry(sess, &vha->vp_fcports, list)
 30			seq_printf(s, "%02x:%02x:%02x  %8phC  %d\n",
 31			    sess->d_id.b.domain, sess->d_id.b.area,
 32			    sess->d_id.b.al_pa, sess->port_name,
 33			    sess->loop_id);
 34		spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
 35	}
 36
 37	return 0;
 38}
 39
 40static int
 41qla2x00_dfs_tgt_sess_open(struct inode *inode, struct file *file)
 42{
 43	scsi_qla_host_t *vha = inode->i_private;
 44
 45	return single_open(file, qla2x00_dfs_tgt_sess_show, vha);
 46}
 47
 48static const struct file_operations dfs_tgt_sess_ops = {
 49	.open		= qla2x00_dfs_tgt_sess_open,
 50	.read		= seq_read,
 51	.llseek		= seq_lseek,
 52	.release	= single_release,
 53};
 54
 55static int
 56qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
 57{
 58	scsi_qla_host_t *vha = s->private;
 59	struct qla_hw_data *ha = vha->hw;
 60	struct gid_list_info *gid_list, *gid;
 61	dma_addr_t gid_list_dma;
 62	fc_port_t fc_port;
 63	int rc, i;
 64	uint16_t entries, loop_id;
 65	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 66
 67	seq_printf(s, "%s\n", vha->host_str);
 68	if (tgt) {
 69		gid_list = dma_alloc_coherent(&ha->pdev->dev,
 70		    qla2x00_gid_list_size(ha),
 71		    &gid_list_dma, GFP_KERNEL);
 72		if (!gid_list) {
 73			ql_dbg(ql_dbg_user, vha, 0x7018,
 74			    "DMA allocation failed for %u\n",
 75			     qla2x00_gid_list_size(ha));
 76			return 0;
 77		}
 78
 79		rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
 80		    &entries);
 81		if (rc != QLA_SUCCESS)
 82			goto out_free_id_list;
 83
 84		gid = gid_list;
 85
 86		seq_puts(s, "Port Name	Port ID 	Loop ID\n");
 87
 88		for (i = 0; i < entries; i++) {
 89			loop_id = le16_to_cpu(gid->loop_id);
 90			memset(&fc_port, 0, sizeof(fc_port_t));
 91
 92			fc_port.loop_id = loop_id;
 93
 94			rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
 95			seq_printf(s, "%8phC  %02x%02x%02x  %d\n",
 96				fc_port.port_name, fc_port.d_id.b.domain,
 97				fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
 98				fc_port.loop_id);
 99			gid = (void *)gid + ha->gid_list_info_size;
100		}
101out_free_id_list:
102		dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
103		    gid_list, gid_list_dma);
104	}
105
106	return 0;
107}
108
109static int
110qla2x00_dfs_tgt_port_database_open(struct inode *inode, struct file *file)
111{
112	scsi_qla_host_t *vha = inode->i_private;
113
114	return single_open(file, qla2x00_dfs_tgt_port_database_show, vha);
115}
116
117static const struct file_operations dfs_tgt_port_database_ops = {
118	.open		= qla2x00_dfs_tgt_port_database_open,
119	.read		= seq_read,
120	.llseek		= seq_lseek,
121	.release	= single_release,
122};
123
124static int
125qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
126{
127	struct scsi_qla_host *vha = s->private;
128	uint16_t mb[MAX_IOCB_MB_REG];
129	int rc;
130
131	rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
132	if (rc != QLA_SUCCESS) {
133		seq_printf(s, "Mailbox Command failed %d, mb %#x", rc, mb[0]);
134	} else {
135		seq_puts(s, "FW Resource count\n\n");
136		seq_printf(s, "Original TGT exchg count[%d]\n", mb[1]);
137		seq_printf(s, "Current TGT exchg count[%d]\n", mb[2]);
138		seq_printf(s, "Current Initiator Exchange count[%d]\n", mb[3]);
139		seq_printf(s, "Original Initiator Exchange count[%d]\n", mb[6]);
140		seq_printf(s, "Current IOCB count[%d]\n", mb[7]);
141		seq_printf(s, "Original IOCB count[%d]\n", mb[10]);
142		seq_printf(s, "MAX VP count[%d]\n", mb[11]);
143		seq_printf(s, "MAX FCF count[%d]\n", mb[12]);
144		seq_printf(s, "Current free pageable XCB buffer cnt[%d]\n",
145		    mb[20]);
146		seq_printf(s, "Original Initiator fast XCB buffer cnt[%d]\n",
147		    mb[21]);
148		seq_printf(s, "Current free Initiator fast XCB buffer cnt[%d]\n",
149		    mb[22]);
150		seq_printf(s, "Original Target fast XCB buffer cnt[%d]\n",
151		    mb[23]);
152	}
153
154	return 0;
155}
156
157static int
158qla_dfs_fw_resource_cnt_open(struct inode *inode, struct file *file)
159{
160	struct scsi_qla_host *vha = inode->i_private;
161
162	return single_open(file, qla_dfs_fw_resource_cnt_show, vha);
163}
164
165static const struct file_operations dfs_fw_resource_cnt_ops = {
166	.open           = qla_dfs_fw_resource_cnt_open,
167	.read           = seq_read,
168	.llseek         = seq_lseek,
169	.release        = single_release,
170};
171
172static int
173qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
174{
175	struct scsi_qla_host *vha = s->private;
176	struct qla_qpair *qpair = vha->hw->base_qpair;
177	uint64_t qla_core_sbt_cmd, core_qla_que_buf, qla_core_ret_ctio,
178		core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
179		num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
180	u16 i;
181
182	qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
183	core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
184	qla_core_ret_ctio = qpair->tgt_counters.qla_core_ret_ctio;
185	core_qla_snd_status = qpair->tgt_counters.core_qla_snd_status;
186	qla_core_ret_sta_ctio = qpair->tgt_counters.qla_core_ret_sta_ctio;
187	core_qla_free_cmd = qpair->tgt_counters.core_qla_free_cmd;
188	num_q_full_sent = qpair->tgt_counters.num_q_full_sent;
189	num_alloc_iocb_failed = qpair->tgt_counters.num_alloc_iocb_failed;
190	num_term_xchg_sent = qpair->tgt_counters.num_term_xchg_sent;
191
192	for (i = 0; i < vha->hw->max_qpairs; i++) {
193		qpair = vha->hw->queue_pair_map[i];
194		if (!qpair)
195			continue;
196		qla_core_sbt_cmd += qpair->tgt_counters.qla_core_sbt_cmd;
197		core_qla_que_buf += qpair->tgt_counters.core_qla_que_buf;
198		qla_core_ret_ctio += qpair->tgt_counters.qla_core_ret_ctio;
199		core_qla_snd_status += qpair->tgt_counters.core_qla_snd_status;
200		qla_core_ret_sta_ctio +=
201		    qpair->tgt_counters.qla_core_ret_sta_ctio;
202		core_qla_free_cmd += qpair->tgt_counters.core_qla_free_cmd;
203		num_q_full_sent += qpair->tgt_counters.num_q_full_sent;
204		num_alloc_iocb_failed +=
205		    qpair->tgt_counters.num_alloc_iocb_failed;
206		num_term_xchg_sent += qpair->tgt_counters.num_term_xchg_sent;
207	}
208
209	seq_puts(s, "Target Counters\n");
210	seq_printf(s, "qla_core_sbt_cmd = %lld\n",
211		qla_core_sbt_cmd);
212	seq_printf(s, "qla_core_ret_sta_ctio = %lld\n",
213		qla_core_ret_sta_ctio);
214	seq_printf(s, "qla_core_ret_ctio = %lld\n",
215		qla_core_ret_ctio);
216	seq_printf(s, "core_qla_que_buf = %lld\n",
217		core_qla_que_buf);
218	seq_printf(s, "core_qla_snd_status = %lld\n",
219		core_qla_snd_status);
220	seq_printf(s, "core_qla_free_cmd = %lld\n",
221		core_qla_free_cmd);
222	seq_printf(s, "num alloc iocb failed = %lld\n",
223		num_alloc_iocb_failed);
224	seq_printf(s, "num term exchange sent = %lld\n",
225		num_term_xchg_sent);
226	seq_printf(s, "num Q full sent = %lld\n",
227		num_q_full_sent);
228
229	/* DIF stats */
230	seq_printf(s, "DIF Inp Bytes = %lld\n",
231		vha->qla_stats.qla_dif_stats.dif_input_bytes);
232	seq_printf(s, "DIF Outp Bytes = %lld\n",
233		vha->qla_stats.qla_dif_stats.dif_output_bytes);
234	seq_printf(s, "DIF Inp Req = %lld\n",
235		vha->qla_stats.qla_dif_stats.dif_input_requests);
236	seq_printf(s, "DIF Outp Req = %lld\n",
237		vha->qla_stats.qla_dif_stats.dif_output_requests);
238	seq_printf(s, "DIF Guard err = %d\n",
239		vha->qla_stats.qla_dif_stats.dif_guard_err);
240	seq_printf(s, "DIF Ref tag err = %d\n",
241		vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
242	seq_printf(s, "DIF App tag err = %d\n",
243		vha->qla_stats.qla_dif_stats.dif_app_tag_err);
244	return 0;
245}
246
247static int
248qla_dfs_tgt_counters_open(struct inode *inode, struct file *file)
249{
250	struct scsi_qla_host *vha = inode->i_private;
251
252	return single_open(file, qla_dfs_tgt_counters_show, vha);
253}
254
255static const struct file_operations dfs_tgt_counters_ops = {
256	.open           = qla_dfs_tgt_counters_open,
257	.read           = seq_read,
258	.llseek         = seq_lseek,
259	.release        = single_release,
260};
261
262static int
263qla2x00_dfs_fce_show(struct seq_file *s, void *unused)
264{
265	scsi_qla_host_t *vha = s->private;
266	uint32_t cnt;
267	uint32_t *fce;
268	uint64_t fce_start;
269	struct qla_hw_data *ha = vha->hw;
270
271	mutex_lock(&ha->fce_mutex);
272
273	seq_puts(s, "FCE Trace Buffer\n");
274	seq_printf(s, "In Pointer = %llx\n\n", (unsigned long long)ha->fce_wr);
275	seq_printf(s, "Base = %llx\n\n", (unsigned long long) ha->fce_dma);
276	seq_puts(s, "FCE Enable Registers\n");
277	seq_printf(s, "%08x %08x %08x %08x %08x %08x\n",
278	    ha->fce_mb[0], ha->fce_mb[2], ha->fce_mb[3], ha->fce_mb[4],
279	    ha->fce_mb[5], ha->fce_mb[6]);
280
281	fce = (uint32_t *) ha->fce;
282	fce_start = (unsigned long long) ha->fce_dma;
283	for (cnt = 0; cnt < fce_calc_size(ha->fce_bufs) / 4; cnt++) {
284		if (cnt % 8 == 0)
285			seq_printf(s, "\n%llx: ",
286			    (unsigned long long)((cnt * 4) + fce_start));
287		else
288			seq_putc(s, ' ');
289		seq_printf(s, "%08x", *fce++);
290	}
291
292	seq_puts(s, "\nEnd\n");
293
294	mutex_unlock(&ha->fce_mutex);
295
296	return 0;
297}
298
299static int
300qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
301{
302	scsi_qla_host_t *vha = inode->i_private;
303	struct qla_hw_data *ha = vha->hw;
304	int rval;
305
306	if (!ha->flags.fce_enabled)
307		goto out;
308
309	mutex_lock(&ha->fce_mutex);
310
311	/* Pause tracing to flush FCE buffers. */
312	rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
313	if (rval)
314		ql_dbg(ql_dbg_user, vha, 0x705c,
315		    "DebugFS: Unable to disable FCE (%d).\n", rval);
316
317	ha->flags.fce_enabled = 0;
318
319	mutex_unlock(&ha->fce_mutex);
320out:
321	return single_open(file, qla2x00_dfs_fce_show, vha);
322}
323
324static int
325qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
326{
327	scsi_qla_host_t *vha = inode->i_private;
328	struct qla_hw_data *ha = vha->hw;
329	int rval;
330
331	if (ha->flags.fce_enabled)
332		goto out;
333
334	mutex_lock(&ha->fce_mutex);
335
336	/* Re-enable FCE tracing. */
337	ha->flags.fce_enabled = 1;
338	memset(ha->fce, 0, fce_calc_size(ha->fce_bufs));
339	rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
340	    ha->fce_mb, &ha->fce_bufs);
341	if (rval) {
342		ql_dbg(ql_dbg_user, vha, 0x700d,
343		    "DebugFS: Unable to reinitialize FCE (%d).\n", rval);
344		ha->flags.fce_enabled = 0;
345	}
346
347	mutex_unlock(&ha->fce_mutex);
348out:
349	return single_release(inode, file);
350}
351
352static const struct file_operations dfs_fce_ops = {
353	.open		= qla2x00_dfs_fce_open,
354	.read		= seq_read,
355	.llseek		= seq_lseek,
356	.release	= qla2x00_dfs_fce_release,
357};
358
359static int
360qla_dfs_naqp_show(struct seq_file *s, void *unused)
361{
362	struct scsi_qla_host *vha = s->private;
363	struct qla_hw_data *ha = vha->hw;
364
365	seq_printf(s, "%d\n", ha->tgt.num_act_qpairs);
366	return 0;
367}
368
369static int
370qla_dfs_naqp_open(struct inode *inode, struct file *file)
371{
372	struct scsi_qla_host *vha = inode->i_private;
373
374	return single_open(file, qla_dfs_naqp_show, vha);
375}
376
377static ssize_t
378qla_dfs_naqp_write(struct file *file, const char __user *buffer,
379    size_t count, loff_t *pos)
380{
381	struct seq_file *s = file->private_data;
382	struct scsi_qla_host *vha = s->private;
383	struct qla_hw_data *ha = vha->hw;
384	char *buf;
385	int rc = 0;
386	unsigned long num_act_qp;
387
388	if (!(IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))) {
389		pr_err("host%ld: this adapter does not support Multi Q.",
390		    vha->host_no);
391		return -EINVAL;
392	}
393
394	if (!vha->flags.qpairs_available) {
395		pr_err("host%ld: Driver is not setup with Multi Q.",
396		    vha->host_no);
397		return -EINVAL;
398	}
399	buf = memdup_user_nul(buffer, count);
400	if (IS_ERR(buf)) {
401		pr_err("host%ld: fail to copy user buffer.",
402		    vha->host_no);
403		return PTR_ERR(buf);
404	}
405
406	num_act_qp = simple_strtoul(buf, NULL, 0);
407
408	if (num_act_qp >= vha->hw->max_qpairs) {
409		pr_err("User set invalid number of qpairs %lu. Max = %d",
410		    num_act_qp, vha->hw->max_qpairs);
411		rc = -EINVAL;
412		goto out_free;
413	}
414
415	if (num_act_qp != ha->tgt.num_act_qpairs) {
416		ha->tgt.num_act_qpairs = num_act_qp;
417		qlt_clr_qp_table(vha);
418	}
419	rc = count;
420out_free:
421	kfree(buf);
422	return rc;
423}
424
425static const struct file_operations dfs_naqp_ops = {
426	.open		= qla_dfs_naqp_open,
427	.read		= seq_read,
428	.llseek		= seq_lseek,
429	.release	= single_release,
430	.write		= qla_dfs_naqp_write,
431};
432
433
434int
435qla2x00_dfs_setup(scsi_qla_host_t *vha)
436{
437	struct qla_hw_data *ha = vha->hw;
438
439	if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) &&
440	    !IS_QLA27XX(ha) && !IS_QLA28XX(ha))
441		goto out;
442	if (!ha->fce)
443		goto out;
444
445	if (qla2x00_dfs_root)
446		goto create_dir;
447
448	atomic_set(&qla2x00_dfs_root_count, 0);
449	qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
450
451create_dir:
452	if (ha->dfs_dir)
453		goto create_nodes;
454
455	mutex_init(&ha->fce_mutex);
456	ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
457
458	atomic_inc(&qla2x00_dfs_root_count);
459
460create_nodes:
461	ha->dfs_fw_resource_cnt = debugfs_create_file("fw_resource_count",
462	    S_IRUSR, ha->dfs_dir, vha, &dfs_fw_resource_cnt_ops);
463
464	ha->dfs_tgt_counters = debugfs_create_file("tgt_counters", S_IRUSR,
465	    ha->dfs_dir, vha, &dfs_tgt_counters_ops);
466
467	ha->tgt.dfs_tgt_port_database = debugfs_create_file("tgt_port_database",
468	    S_IRUSR,  ha->dfs_dir, vha, &dfs_tgt_port_database_ops);
469
470	ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
471	    &dfs_fce_ops);
472
473	ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
474		S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
475
476	if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
477		ha->tgt.dfs_naqp = debugfs_create_file("naqp",
478		    0400, ha->dfs_dir, vha, &dfs_naqp_ops);
479out:
480	return 0;
481}
482
483int
484qla2x00_dfs_remove(scsi_qla_host_t *vha)
485{
486	struct qla_hw_data *ha = vha->hw;
487
488	if (ha->tgt.dfs_naqp) {
489		debugfs_remove(ha->tgt.dfs_naqp);
490		ha->tgt.dfs_naqp = NULL;
491	}
492
493	if (ha->tgt.dfs_tgt_sess) {
494		debugfs_remove(ha->tgt.dfs_tgt_sess);
495		ha->tgt.dfs_tgt_sess = NULL;
496	}
497
498	if (ha->tgt.dfs_tgt_port_database) {
499		debugfs_remove(ha->tgt.dfs_tgt_port_database);
500		ha->tgt.dfs_tgt_port_database = NULL;
501	}
502
503	if (ha->dfs_fw_resource_cnt) {
504		debugfs_remove(ha->dfs_fw_resource_cnt);
505		ha->dfs_fw_resource_cnt = NULL;
506	}
507
508	if (ha->dfs_tgt_counters) {
509		debugfs_remove(ha->dfs_tgt_counters);
510		ha->dfs_tgt_counters = NULL;
511	}
512
513	if (ha->dfs_fce) {
514		debugfs_remove(ha->dfs_fce);
515		ha->dfs_fce = NULL;
516	}
517
518	if (ha->dfs_dir) {
519		debugfs_remove(ha->dfs_dir);
520		ha->dfs_dir = NULL;
521		atomic_dec(&qla2x00_dfs_root_count);
522	}
523
524	if (atomic_read(&qla2x00_dfs_root_count) == 0 &&
525	    qla2x00_dfs_root) {
526		debugfs_remove(qla2x00_dfs_root);
527		qla2x00_dfs_root = NULL;
528	}
529
530	return 0;
531}