PageRenderTime 91ms CodeModel.GetById 20ms app.highlight 58ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/infiniband/hw/cxgb4/cm.c

https://bitbucket.org/cresqo/cm7-p500-kernel
C | 2376 lines | 1917 code | 279 blank | 180 comment | 181 complexity | e1f1177c8a14e73d09e77e80a03f78a6 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0

Large files files are truncated, but you can click here to view the full file

   1/*
   2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
   3 *
   4 * This software is available to you under a choice of one of two
   5 * licenses.  You may choose to be licensed under the terms of the GNU
   6 * General Public License (GPL) Version 2, available from the file
   7 * COPYING in the main directory of this source tree, or the
   8 * OpenIB.org BSD license below:
   9 *
  10 *     Redistribution and use in source and binary forms, with or
  11 *     without modification, are permitted provided that the following
  12 *     conditions are met:
  13 *
  14 *      - Redistributions of source code must retain the above
  15 *	  copyright notice, this list of conditions and the following
  16 *	  disclaimer.
  17 *
  18 *      - Redistributions in binary form must reproduce the above
  19 *	  copyright notice, this list of conditions and the following
  20 *	  disclaimer in the documentation and/or other materials
  21 *	  provided with the distribution.
  22 *
  23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30 * SOFTWARE.
  31 */
  32#include <linux/module.h>
  33#include <linux/list.h>
  34#include <linux/workqueue.h>
  35#include <linux/skbuff.h>
  36#include <linux/timer.h>
  37#include <linux/notifier.h>
  38#include <linux/inetdevice.h>
  39#include <linux/ip.h>
  40#include <linux/tcp.h>
  41
  42#include <net/neighbour.h>
  43#include <net/netevent.h>
  44#include <net/route.h>
  45
  46#include "iw_cxgb4.h"
  47
  48static char *states[] = {
  49	"idle",
  50	"listen",
  51	"connecting",
  52	"mpa_wait_req",
  53	"mpa_req_sent",
  54	"mpa_req_rcvd",
  55	"mpa_rep_sent",
  56	"fpdu_mode",
  57	"aborting",
  58	"closing",
  59	"moribund",
  60	"dead",
  61	NULL,
  62};
  63
  64int c4iw_max_read_depth = 8;
  65module_param(c4iw_max_read_depth, int, 0644);
  66MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
  67
  68static int enable_tcp_timestamps;
  69module_param(enable_tcp_timestamps, int, 0644);
  70MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
  71
  72static int enable_tcp_sack;
  73module_param(enable_tcp_sack, int, 0644);
  74MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
  75
  76static int enable_tcp_window_scaling = 1;
  77module_param(enable_tcp_window_scaling, int, 0644);
  78MODULE_PARM_DESC(enable_tcp_window_scaling,
  79		 "Enable tcp window scaling (default=1)");
  80
  81int c4iw_debug;
  82module_param(c4iw_debug, int, 0644);
  83MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
  84
  85static int peer2peer;
  86module_param(peer2peer, int, 0644);
  87MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
  88
  89static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
  90module_param(p2p_type, int, 0644);
  91MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
  92			   "1=RDMA_READ 0=RDMA_WRITE (default 1)");
  93
  94static int ep_timeout_secs = 60;
  95module_param(ep_timeout_secs, int, 0644);
  96MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
  97				   "in seconds (default=60)");
  98
  99static int mpa_rev = 1;
 100module_param(mpa_rev, int, 0644);
 101MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
 102		 "1 is spec compliant. (default=1)");
 103
 104static int markers_enabled;
 105module_param(markers_enabled, int, 0644);
 106MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
 107
 108static int crc_enabled = 1;
 109module_param(crc_enabled, int, 0644);
 110MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
 111
 112static int rcv_win = 256 * 1024;
 113module_param(rcv_win, int, 0644);
 114MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
 115
 116static int snd_win = 32 * 1024;
 117module_param(snd_win, int, 0644);
 118MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
 119
 120static struct workqueue_struct *workq;
 121
 122static struct sk_buff_head rxq;
 123
 124static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
 125static void ep_timeout(unsigned long arg);
 126static void connect_reply_upcall(struct c4iw_ep *ep, int status);
 127
 128static LIST_HEAD(timeout_list);
 129static spinlock_t timeout_lock;
 130
 131static void start_ep_timer(struct c4iw_ep *ep)
 132{
 133	PDBG("%s ep %p\n", __func__, ep);
 134	if (timer_pending(&ep->timer)) {
 135		PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
 136		del_timer_sync(&ep->timer);
 137	} else
 138		c4iw_get_ep(&ep->com);
 139	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
 140	ep->timer.data = (unsigned long)ep;
 141	ep->timer.function = ep_timeout;
 142	add_timer(&ep->timer);
 143}
 144
 145static void stop_ep_timer(struct c4iw_ep *ep)
 146{
 147	PDBG("%s ep %p\n", __func__, ep);
 148	if (!timer_pending(&ep->timer)) {
 149		printk(KERN_ERR "%s timer stopped when its not running! "
 150		       "ep %p state %u\n", __func__, ep, ep->com.state);
 151		WARN_ON(1);
 152		return;
 153	}
 154	del_timer_sync(&ep->timer);
 155	c4iw_put_ep(&ep->com);
 156}
 157
 158static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
 159		  struct l2t_entry *l2e)
 160{
 161	int	error = 0;
 162
 163	if (c4iw_fatal_error(rdev)) {
 164		kfree_skb(skb);
 165		PDBG("%s - device in error state - dropping\n", __func__);
 166		return -EIO;
 167	}
 168	error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
 169	if (error < 0)
 170		kfree_skb(skb);
 171	return error;
 172}
 173
 174int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
 175{
 176	int	error = 0;
 177
 178	if (c4iw_fatal_error(rdev)) {
 179		kfree_skb(skb);
 180		PDBG("%s - device in error state - dropping\n", __func__);
 181		return -EIO;
 182	}
 183	error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
 184	if (error < 0)
 185		kfree_skb(skb);
 186	return error;
 187}
 188
 189static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
 190{
 191	struct cpl_tid_release *req;
 192
 193	skb = get_skb(skb, sizeof *req, GFP_KERNEL);
 194	if (!skb)
 195		return;
 196	req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
 197	INIT_TP_WR(req, hwtid);
 198	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
 199	set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
 200	c4iw_ofld_send(rdev, skb);
 201	return;
 202}
 203
 204static void set_emss(struct c4iw_ep *ep, u16 opt)
 205{
 206	ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
 207	ep->mss = ep->emss;
 208	if (GET_TCPOPT_TSTAMP(opt))
 209		ep->emss -= 12;
 210	if (ep->emss < 128)
 211		ep->emss = 128;
 212	PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
 213	     ep->mss, ep->emss);
 214}
 215
 216static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
 217{
 218	unsigned long flags;
 219	enum c4iw_ep_state state;
 220
 221	spin_lock_irqsave(&epc->lock, flags);
 222	state = epc->state;
 223	spin_unlock_irqrestore(&epc->lock, flags);
 224	return state;
 225}
 226
 227static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
 228{
 229	epc->state = new;
 230}
 231
 232static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
 233{
 234	unsigned long flags;
 235
 236	spin_lock_irqsave(&epc->lock, flags);
 237	PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
 238	__state_set(epc, new);
 239	spin_unlock_irqrestore(&epc->lock, flags);
 240	return;
 241}
 242
 243static void *alloc_ep(int size, gfp_t gfp)
 244{
 245	struct c4iw_ep_common *epc;
 246
 247	epc = kzalloc(size, gfp);
 248	if (epc) {
 249		kref_init(&epc->kref);
 250		spin_lock_init(&epc->lock);
 251		init_waitqueue_head(&epc->waitq);
 252	}
 253	PDBG("%s alloc ep %p\n", __func__, epc);
 254	return epc;
 255}
 256
 257void _c4iw_free_ep(struct kref *kref)
 258{
 259	struct c4iw_ep *ep;
 260
 261	ep = container_of(kref, struct c4iw_ep, com.kref);
 262	PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
 263	if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
 264		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
 265		dst_release(ep->dst);
 266		cxgb4_l2t_release(ep->l2t);
 267	}
 268	kfree(ep);
 269}
 270
 271static void release_ep_resources(struct c4iw_ep *ep)
 272{
 273	set_bit(RELEASE_RESOURCES, &ep->com.flags);
 274	c4iw_put_ep(&ep->com);
 275}
 276
 277static int status2errno(int status)
 278{
 279	switch (status) {
 280	case CPL_ERR_NONE:
 281		return 0;
 282	case CPL_ERR_CONN_RESET:
 283		return -ECONNRESET;
 284	case CPL_ERR_ARP_MISS:
 285		return -EHOSTUNREACH;
 286	case CPL_ERR_CONN_TIMEDOUT:
 287		return -ETIMEDOUT;
 288	case CPL_ERR_TCAM_FULL:
 289		return -ENOMEM;
 290	case CPL_ERR_CONN_EXIST:
 291		return -EADDRINUSE;
 292	default:
 293		return -EIO;
 294	}
 295}
 296
 297/*
 298 * Try and reuse skbs already allocated...
 299 */
 300static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
 301{
 302	if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
 303		skb_trim(skb, 0);
 304		skb_get(skb);
 305		skb_reset_transport_header(skb);
 306	} else {
 307		skb = alloc_skb(len, gfp);
 308	}
 309	return skb;
 310}
 311
 312static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
 313				 __be32 peer_ip, __be16 local_port,
 314				 __be16 peer_port, u8 tos)
 315{
 316	struct rtable *rt;
 317	struct flowi fl = {
 318		.oif = 0,
 319		.nl_u = {
 320			 .ip4_u = {
 321				   .daddr = peer_ip,
 322				   .saddr = local_ip,
 323				   .tos = tos}
 324			 },
 325		.proto = IPPROTO_TCP,
 326		.uli_u = {
 327			  .ports = {
 328				    .sport = local_port,
 329				    .dport = peer_port}
 330			  }
 331	};
 332
 333	if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
 334		return NULL;
 335	return rt;
 336}
 337
 338static void arp_failure_discard(void *handle, struct sk_buff *skb)
 339{
 340	PDBG("%s c4iw_dev %p\n", __func__, handle);
 341	kfree_skb(skb);
 342}
 343
 344/*
 345 * Handle an ARP failure for an active open.
 346 */
 347static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
 348{
 349	printk(KERN_ERR MOD "ARP failure duing connect\n");
 350	kfree_skb(skb);
 351}
 352
 353/*
 354 * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant
 355 * and send it along.
 356 */
 357static void abort_arp_failure(void *handle, struct sk_buff *skb)
 358{
 359	struct c4iw_rdev *rdev = handle;
 360	struct cpl_abort_req *req = cplhdr(skb);
 361
 362	PDBG("%s rdev %p\n", __func__, rdev);
 363	req->cmd = CPL_ABORT_NO_RST;
 364	c4iw_ofld_send(rdev, skb);
 365}
 366
 367static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
 368{
 369	unsigned int flowclen = 80;
 370	struct fw_flowc_wr *flowc;
 371	int i;
 372
 373	skb = get_skb(skb, flowclen, GFP_KERNEL);
 374	flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
 375
 376	flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
 377					   FW_FLOWC_WR_NPARAMS(8));
 378	flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
 379					  16)) | FW_WR_FLOWID(ep->hwtid));
 380
 381	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
 382	flowc->mnemval[0].val = cpu_to_be32(0);
 383	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
 384	flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
 385	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
 386	flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
 387	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
 388	flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
 389	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
 390	flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
 391	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
 392	flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
 393	flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
 394	flowc->mnemval[6].val = cpu_to_be32(snd_win);
 395	flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
 396	flowc->mnemval[7].val = cpu_to_be32(ep->emss);
 397	/* Pad WR to 16 byte boundary */
 398	flowc->mnemval[8].mnemonic = 0;
 399	flowc->mnemval[8].val = 0;
 400	for (i = 0; i < 9; i++) {
 401		flowc->mnemval[i].r4[0] = 0;
 402		flowc->mnemval[i].r4[1] = 0;
 403		flowc->mnemval[i].r4[2] = 0;
 404	}
 405
 406	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
 407	c4iw_ofld_send(&ep->com.dev->rdev, skb);
 408}
 409
 410static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
 411{
 412	struct cpl_close_con_req *req;
 413	struct sk_buff *skb;
 414	int wrlen = roundup(sizeof *req, 16);
 415
 416	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 417	skb = get_skb(NULL, wrlen, gfp);
 418	if (!skb) {
 419		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
 420		return -ENOMEM;
 421	}
 422	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
 423	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
 424	req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
 425	memset(req, 0, wrlen);
 426	INIT_TP_WR(req, ep->hwtid);
 427	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
 428						    ep->hwtid));
 429	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 430}
 431
 432static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
 433{
 434	struct cpl_abort_req *req;
 435	int wrlen = roundup(sizeof *req, 16);
 436
 437	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 438	skb = get_skb(skb, wrlen, gfp);
 439	if (!skb) {
 440		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
 441		       __func__);
 442		return -ENOMEM;
 443	}
 444	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
 445	t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
 446	req = (struct cpl_abort_req *) skb_put(skb, wrlen);
 447	memset(req, 0, wrlen);
 448	INIT_TP_WR(req, ep->hwtid);
 449	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
 450	req->cmd = CPL_ABORT_SEND_RST;
 451	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 452}
 453
 454static int send_connect(struct c4iw_ep *ep)
 455{
 456	struct cpl_act_open_req *req;
 457	struct sk_buff *skb;
 458	u64 opt0;
 459	u32 opt2;
 460	unsigned int mtu_idx;
 461	int wscale;
 462	int wrlen = roundup(sizeof *req, 16);
 463
 464	PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
 465
 466	skb = get_skb(NULL, wrlen, GFP_KERNEL);
 467	if (!skb) {
 468		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
 469		       __func__);
 470		return -ENOMEM;
 471	}
 472	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
 473
 474	cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
 475	wscale = compute_wscale(rcv_win);
 476	opt0 = KEEP_ALIVE(1) |
 477	       WND_SCALE(wscale) |
 478	       MSS_IDX(mtu_idx) |
 479	       L2T_IDX(ep->l2t->idx) |
 480	       TX_CHAN(ep->tx_chan) |
 481	       SMAC_SEL(ep->smac_idx) |
 482	       DSCP(ep->tos) |
 483	       RCV_BUFSIZ(rcv_win>>10);
 484	opt2 = RX_CHANNEL(0) |
 485	       RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
 486	if (enable_tcp_timestamps)
 487		opt2 |= TSTAMPS_EN(1);
 488	if (enable_tcp_sack)
 489		opt2 |= SACK_EN(1);
 490	if (wscale && enable_tcp_window_scaling)
 491		opt2 |= WND_SCALE_EN(1);
 492	t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
 493
 494	req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
 495	INIT_TP_WR(req, 0);
 496	OPCODE_TID(req) = cpu_to_be32(
 497		MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
 498	req->local_port = ep->com.local_addr.sin_port;
 499	req->peer_port = ep->com.remote_addr.sin_port;
 500	req->local_ip = ep->com.local_addr.sin_addr.s_addr;
 501	req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
 502	req->opt0 = cpu_to_be64(opt0);
 503	req->params = 0;
 504	req->opt2 = cpu_to_be32(opt2);
 505	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 506}
 507
 508static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
 509{
 510	int mpalen, wrlen;
 511	struct fw_ofld_tx_data_wr *req;
 512	struct mpa_message *mpa;
 513
 514	PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
 515
 516	BUG_ON(skb_cloned(skb));
 517
 518	mpalen = sizeof(*mpa) + ep->plen;
 519	wrlen = roundup(mpalen + sizeof *req, 16);
 520	skb = get_skb(skb, wrlen, GFP_KERNEL);
 521	if (!skb) {
 522		connect_reply_upcall(ep, -ENOMEM);
 523		return;
 524	}
 525	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
 526
 527	req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
 528	memset(req, 0, wrlen);
 529	req->op_to_immdlen = cpu_to_be32(
 530		FW_WR_OP(FW_OFLD_TX_DATA_WR) |
 531		FW_WR_COMPL(1) |
 532		FW_WR_IMMDLEN(mpalen));
 533	req->flowid_len16 = cpu_to_be32(
 534		FW_WR_FLOWID(ep->hwtid) |
 535		FW_WR_LEN16(wrlen >> 4));
 536	req->plen = cpu_to_be32(mpalen);
 537	req->tunnel_to_proxy = cpu_to_be32(
 538		FW_OFLD_TX_DATA_WR_FLUSH(1) |
 539		FW_OFLD_TX_DATA_WR_SHOVE(1));
 540
 541	mpa = (struct mpa_message *)(req + 1);
 542	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
 543	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
 544		     (markers_enabled ? MPA_MARKERS : 0);
 545	mpa->private_data_size = htons(ep->plen);
 546	mpa->revision = mpa_rev;
 547
 548	if (ep->plen)
 549		memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
 550
 551	/*
 552	 * Reference the mpa skb.  This ensures the data area
 553	 * will remain in memory until the hw acks the tx.
 554	 * Function fw4_ack() will deref it.
 555	 */
 556	skb_get(skb);
 557	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
 558	BUG_ON(ep->mpa_skb);
 559	ep->mpa_skb = skb;
 560	c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 561	start_ep_timer(ep);
 562	state_set(&ep->com, MPA_REQ_SENT);
 563	ep->mpa_attr.initiator = 1;
 564	return;
 565}
 566
 567static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
 568{
 569	int mpalen, wrlen;
 570	struct fw_ofld_tx_data_wr *req;
 571	struct mpa_message *mpa;
 572	struct sk_buff *skb;
 573
 574	PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
 575
 576	mpalen = sizeof(*mpa) + plen;
 577	wrlen = roundup(mpalen + sizeof *req, 16);
 578
 579	skb = get_skb(NULL, wrlen, GFP_KERNEL);
 580	if (!skb) {
 581		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
 582		return -ENOMEM;
 583	}
 584	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
 585
 586	req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
 587	memset(req, 0, wrlen);
 588	req->op_to_immdlen = cpu_to_be32(
 589		FW_WR_OP(FW_OFLD_TX_DATA_WR) |
 590		FW_WR_COMPL(1) |
 591		FW_WR_IMMDLEN(mpalen));
 592	req->flowid_len16 = cpu_to_be32(
 593		FW_WR_FLOWID(ep->hwtid) |
 594		FW_WR_LEN16(wrlen >> 4));
 595	req->plen = cpu_to_be32(mpalen);
 596	req->tunnel_to_proxy = cpu_to_be32(
 597		FW_OFLD_TX_DATA_WR_FLUSH(1) |
 598		FW_OFLD_TX_DATA_WR_SHOVE(1));
 599
 600	mpa = (struct mpa_message *)(req + 1);
 601	memset(mpa, 0, sizeof(*mpa));
 602	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
 603	mpa->flags = MPA_REJECT;
 604	mpa->revision = mpa_rev;
 605	mpa->private_data_size = htons(plen);
 606	if (plen)
 607		memcpy(mpa->private_data, pdata, plen);
 608
 609	/*
 610	 * Reference the mpa skb again.  This ensures the data area
 611	 * will remain in memory until the hw acks the tx.
 612	 * Function fw4_ack() will deref it.
 613	 */
 614	skb_get(skb);
 615	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
 616	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
 617	BUG_ON(ep->mpa_skb);
 618	ep->mpa_skb = skb;
 619	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 620}
 621
 622static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
 623{
 624	int mpalen, wrlen;
 625	struct fw_ofld_tx_data_wr *req;
 626	struct mpa_message *mpa;
 627	struct sk_buff *skb;
 628
 629	PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
 630
 631	mpalen = sizeof(*mpa) + plen;
 632	wrlen = roundup(mpalen + sizeof *req, 16);
 633
 634	skb = get_skb(NULL, wrlen, GFP_KERNEL);
 635	if (!skb) {
 636		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
 637		return -ENOMEM;
 638	}
 639	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
 640
 641	req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
 642	memset(req, 0, wrlen);
 643	req->op_to_immdlen = cpu_to_be32(
 644		FW_WR_OP(FW_OFLD_TX_DATA_WR) |
 645		FW_WR_COMPL(1) |
 646		FW_WR_IMMDLEN(mpalen));
 647	req->flowid_len16 = cpu_to_be32(
 648		FW_WR_FLOWID(ep->hwtid) |
 649		FW_WR_LEN16(wrlen >> 4));
 650	req->plen = cpu_to_be32(mpalen);
 651	req->tunnel_to_proxy = cpu_to_be32(
 652		FW_OFLD_TX_DATA_WR_FLUSH(1) |
 653		FW_OFLD_TX_DATA_WR_SHOVE(1));
 654
 655	mpa = (struct mpa_message *)(req + 1);
 656	memset(mpa, 0, sizeof(*mpa));
 657	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
 658	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
 659		     (markers_enabled ? MPA_MARKERS : 0);
 660	mpa->revision = mpa_rev;
 661	mpa->private_data_size = htons(plen);
 662	if (plen)
 663		memcpy(mpa->private_data, pdata, plen);
 664
 665	/*
 666	 * Reference the mpa skb.  This ensures the data area
 667	 * will remain in memory until the hw acks the tx.
 668	 * Function fw4_ack() will deref it.
 669	 */
 670	skb_get(skb);
 671	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
 672	ep->mpa_skb = skb;
 673	state_set(&ep->com, MPA_REP_SENT);
 674	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
 675}
 676
 677static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
 678{
 679	struct c4iw_ep *ep;
 680	struct cpl_act_establish *req = cplhdr(skb);
 681	unsigned int tid = GET_TID(req);
 682	unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
 683	struct tid_info *t = dev->rdev.lldi.tids;
 684
 685	ep = lookup_atid(t, atid);
 686
 687	PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
 688	     be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
 689
 690	dst_confirm(ep->dst);
 691
 692	/* setup the hwtid for this connection */
 693	ep->hwtid = tid;
 694	cxgb4_insert_tid(t, ep, tid);
 695
 696	ep->snd_seq = be32_to_cpu(req->snd_isn);
 697	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
 698
 699	set_emss(ep, ntohs(req->tcp_opt));
 700
 701	/* dealloc the atid */
 702	cxgb4_free_atid(t, atid);
 703
 704	/* start MPA negotiation */
 705	send_flowc(ep, NULL);
 706	send_mpa_req(ep, skb);
 707
 708	return 0;
 709}
 710
 711static void close_complete_upcall(struct c4iw_ep *ep)
 712{
 713	struct iw_cm_event event;
 714
 715	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 716	memset(&event, 0, sizeof(event));
 717	event.event = IW_CM_EVENT_CLOSE;
 718	if (ep->com.cm_id) {
 719		PDBG("close complete delivered ep %p cm_id %p tid %u\n",
 720		     ep, ep->com.cm_id, ep->hwtid);
 721		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 722		ep->com.cm_id->rem_ref(ep->com.cm_id);
 723		ep->com.cm_id = NULL;
 724		ep->com.qp = NULL;
 725	}
 726}
 727
 728static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
 729{
 730	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 731	close_complete_upcall(ep);
 732	state_set(&ep->com, ABORTING);
 733	return send_abort(ep, skb, gfp);
 734}
 735
 736static void peer_close_upcall(struct c4iw_ep *ep)
 737{
 738	struct iw_cm_event event;
 739
 740	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 741	memset(&event, 0, sizeof(event));
 742	event.event = IW_CM_EVENT_DISCONNECT;
 743	if (ep->com.cm_id) {
 744		PDBG("peer close delivered ep %p cm_id %p tid %u\n",
 745		     ep, ep->com.cm_id, ep->hwtid);
 746		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 747	}
 748}
 749
 750static void peer_abort_upcall(struct c4iw_ep *ep)
 751{
 752	struct iw_cm_event event;
 753
 754	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 755	memset(&event, 0, sizeof(event));
 756	event.event = IW_CM_EVENT_CLOSE;
 757	event.status = -ECONNRESET;
 758	if (ep->com.cm_id) {
 759		PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
 760		     ep->com.cm_id, ep->hwtid);
 761		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 762		ep->com.cm_id->rem_ref(ep->com.cm_id);
 763		ep->com.cm_id = NULL;
 764		ep->com.qp = NULL;
 765	}
 766}
 767
 768static void connect_reply_upcall(struct c4iw_ep *ep, int status)
 769{
 770	struct iw_cm_event event;
 771
 772	PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
 773	memset(&event, 0, sizeof(event));
 774	event.event = IW_CM_EVENT_CONNECT_REPLY;
 775	event.status = status;
 776	event.local_addr = ep->com.local_addr;
 777	event.remote_addr = ep->com.remote_addr;
 778
 779	if ((status == 0) || (status == -ECONNREFUSED)) {
 780		event.private_data_len = ep->plen;
 781		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
 782	}
 783	if (ep->com.cm_id) {
 784		PDBG("%s ep %p tid %u status %d\n", __func__, ep,
 785		     ep->hwtid, status);
 786		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 787	}
 788	if (status < 0) {
 789		ep->com.cm_id->rem_ref(ep->com.cm_id);
 790		ep->com.cm_id = NULL;
 791		ep->com.qp = NULL;
 792	}
 793}
 794
 795static void connect_request_upcall(struct c4iw_ep *ep)
 796{
 797	struct iw_cm_event event;
 798
 799	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 800	memset(&event, 0, sizeof(event));
 801	event.event = IW_CM_EVENT_CONNECT_REQUEST;
 802	event.local_addr = ep->com.local_addr;
 803	event.remote_addr = ep->com.remote_addr;
 804	event.private_data_len = ep->plen;
 805	event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
 806	event.provider_data = ep;
 807	if (state_read(&ep->parent_ep->com) != DEAD) {
 808		c4iw_get_ep(&ep->com);
 809		ep->parent_ep->com.cm_id->event_handler(
 810						ep->parent_ep->com.cm_id,
 811						&event);
 812	}
 813	c4iw_put_ep(&ep->parent_ep->com);
 814	ep->parent_ep = NULL;
 815}
 816
 817static void established_upcall(struct c4iw_ep *ep)
 818{
 819	struct iw_cm_event event;
 820
 821	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 822	memset(&event, 0, sizeof(event));
 823	event.event = IW_CM_EVENT_ESTABLISHED;
 824	if (ep->com.cm_id) {
 825		PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 826		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
 827	}
 828}
 829
 830static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
 831{
 832	struct cpl_rx_data_ack *req;
 833	struct sk_buff *skb;
 834	int wrlen = roundup(sizeof *req, 16);
 835
 836	PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
 837	skb = get_skb(NULL, wrlen, GFP_KERNEL);
 838	if (!skb) {
 839		printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
 840		return 0;
 841	}
 842
 843	req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
 844	memset(req, 0, wrlen);
 845	INIT_TP_WR(req, ep->hwtid);
 846	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
 847						    ep->hwtid));
 848	req->credit_dack = cpu_to_be32(credits);
 849	set_wr_txq(skb, CPL_PRIORITY_ACK, ep->txq_idx);
 850	c4iw_ofld_send(&ep->com.dev->rdev, skb);
 851	return credits;
 852}
 853
 854static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
 855{
 856	struct mpa_message *mpa;
 857	u16 plen;
 858	struct c4iw_qp_attributes attrs;
 859	enum c4iw_qp_attr_mask mask;
 860	int err;
 861
 862	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 863
 864	/*
 865	 * Stop mpa timer.  If it expired, then the state has
 866	 * changed and we bail since ep_timeout already aborted
 867	 * the connection.
 868	 */
 869	stop_ep_timer(ep);
 870	if (state_read(&ep->com) != MPA_REQ_SENT)
 871		return;
 872
 873	/*
 874	 * If we get more than the supported amount of private data
 875	 * then we must fail this connection.
 876	 */
 877	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
 878		err = -EINVAL;
 879		goto err;
 880	}
 881
 882	/*
 883	 * copy the new data into our accumulation buffer.
 884	 */
 885	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
 886				  skb->len);
 887	ep->mpa_pkt_len += skb->len;
 888
 889	/*
 890	 * if we don't even have the mpa message, then bail.
 891	 */
 892	if (ep->mpa_pkt_len < sizeof(*mpa))
 893		return;
 894	mpa = (struct mpa_message *) ep->mpa_pkt;
 895
 896	/* Validate MPA header. */
 897	if (mpa->revision != mpa_rev) {
 898		err = -EPROTO;
 899		goto err;
 900	}
 901	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
 902		err = -EPROTO;
 903		goto err;
 904	}
 905
 906	plen = ntohs(mpa->private_data_size);
 907
 908	/*
 909	 * Fail if there's too much private data.
 910	 */
 911	if (plen > MPA_MAX_PRIVATE_DATA) {
 912		err = -EPROTO;
 913		goto err;
 914	}
 915
 916	/*
 917	 * If plen does not account for pkt size
 918	 */
 919	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
 920		err = -EPROTO;
 921		goto err;
 922	}
 923
 924	ep->plen = (u8) plen;
 925
 926	/*
 927	 * If we don't have all the pdata yet, then bail.
 928	 * We'll continue process when more data arrives.
 929	 */
 930	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
 931		return;
 932
 933	if (mpa->flags & MPA_REJECT) {
 934		err = -ECONNREFUSED;
 935		goto err;
 936	}
 937
 938	/*
 939	 * If we get here we have accumulated the entire mpa
 940	 * start reply message including private data. And
 941	 * the MPA header is valid.
 942	 */
 943	state_set(&ep->com, FPDU_MODE);
 944	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
 945	ep->mpa_attr.recv_marker_enabled = markers_enabled;
 946	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
 947	ep->mpa_attr.version = mpa_rev;
 948	ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
 949					    FW_RI_INIT_P2PTYPE_DISABLED;
 950	PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
 951	     "xmit_marker_enabled=%d, version=%d\n", __func__,
 952	     ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
 953	     ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
 954
 955	attrs.mpa_attr = ep->mpa_attr;
 956	attrs.max_ird = ep->ird;
 957	attrs.max_ord = ep->ord;
 958	attrs.llp_stream_handle = ep;
 959	attrs.next_state = C4IW_QP_STATE_RTS;
 960
 961	mask = C4IW_QP_ATTR_NEXT_STATE |
 962	    C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
 963	    C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
 964
 965	/* bind QP and TID with INIT_WR */
 966	err = c4iw_modify_qp(ep->com.qp->rhp,
 967			     ep->com.qp, mask, &attrs, 1);
 968	if (err)
 969		goto err;
 970	goto out;
 971err:
 972	state_set(&ep->com, ABORTING);
 973	send_abort(ep, skb, GFP_KERNEL);
 974out:
 975	connect_reply_upcall(ep, err);
 976	return;
 977}
 978
 979static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
 980{
 981	struct mpa_message *mpa;
 982	u16 plen;
 983
 984	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 985
 986	if (state_read(&ep->com) != MPA_REQ_WAIT)
 987		return;
 988
 989	/*
 990	 * If we get more than the supported amount of private data
 991	 * then we must fail this connection.
 992	 */
 993	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
 994		stop_ep_timer(ep);
 995		abort_connection(ep, skb, GFP_KERNEL);
 996		return;
 997	}
 998
 999	PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1000
1001	/*
1002	 * Copy the new data into our accumulation buffer.
1003	 */
1004	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1005				  skb->len);
1006	ep->mpa_pkt_len += skb->len;
1007
1008	/*
1009	 * If we don't even have the mpa message, then bail.
1010	 * We'll continue process when more data arrives.
1011	 */
1012	if (ep->mpa_pkt_len < sizeof(*mpa))
1013		return;
1014
1015	PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1016	stop_ep_timer(ep);
1017	mpa = (struct mpa_message *) ep->mpa_pkt;
1018
1019	/*
1020	 * Validate MPA Header.
1021	 */
1022	if (mpa->revision != mpa_rev) {
1023		abort_connection(ep, skb, GFP_KERNEL);
1024		return;
1025	}
1026
1027	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1028		abort_connection(ep, skb, GFP_KERNEL);
1029		return;
1030	}
1031
1032	plen = ntohs(mpa->private_data_size);
1033
1034	/*
1035	 * Fail if there's too much private data.
1036	 */
1037	if (plen > MPA_MAX_PRIVATE_DATA) {
1038		abort_connection(ep, skb, GFP_KERNEL);
1039		return;
1040	}
1041
1042	/*
1043	 * If plen does not account for pkt size
1044	 */
1045	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1046		abort_connection(ep, skb, GFP_KERNEL);
1047		return;
1048	}
1049	ep->plen = (u8) plen;
1050
1051	/*
1052	 * If we don't have all the pdata yet, then bail.
1053	 */
1054	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1055		return;
1056
1057	/*
1058	 * If we get here we have accumulated the entire mpa
1059	 * start reply message including private data.
1060	 */
1061	ep->mpa_attr.initiator = 0;
1062	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1063	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1064	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1065	ep->mpa_attr.version = mpa_rev;
1066	ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
1067					    FW_RI_INIT_P2PTYPE_DISABLED;
1068	PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1069	     "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1070	     ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1071	     ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1072	     ep->mpa_attr.p2p_type);
1073
1074	state_set(&ep->com, MPA_REQ_RCVD);
1075
1076	/* drive upcall */
1077	connect_request_upcall(ep);
1078	return;
1079}
1080
1081static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1082{
1083	struct c4iw_ep *ep;
1084	struct cpl_rx_data *hdr = cplhdr(skb);
1085	unsigned int dlen = ntohs(hdr->len);
1086	unsigned int tid = GET_TID(hdr);
1087	struct tid_info *t = dev->rdev.lldi.tids;
1088
1089	ep = lookup_tid(t, tid);
1090	PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1091	skb_pull(skb, sizeof(*hdr));
1092	skb_trim(skb, dlen);
1093
1094	ep->rcv_seq += dlen;
1095	BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1096
1097	/* update RX credits */
1098	update_rx_credits(ep, dlen);
1099
1100	switch (state_read(&ep->com)) {
1101	case MPA_REQ_SENT:
1102		process_mpa_reply(ep, skb);
1103		break;
1104	case MPA_REQ_WAIT:
1105		process_mpa_request(ep, skb);
1106		break;
1107	case MPA_REP_SENT:
1108		break;
1109	default:
1110		printk(KERN_ERR MOD "%s Unexpected streaming data."
1111		       " ep %p state %d tid %u\n",
1112		       __func__, ep, state_read(&ep->com), ep->hwtid);
1113
1114		/*
1115		 * The ep will timeout and inform the ULP of the failure.
1116		 * See ep_timeout().
1117		 */
1118		break;
1119	}
1120	return 0;
1121}
1122
1123static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1124{
1125	struct c4iw_ep *ep;
1126	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1127	unsigned long flags;
1128	int release = 0;
1129	unsigned int tid = GET_TID(rpl);
1130	struct tid_info *t = dev->rdev.lldi.tids;
1131
1132	ep = lookup_tid(t, tid);
1133	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1134	BUG_ON(!ep);
1135	spin_lock_irqsave(&ep->com.lock, flags);
1136	switch (ep->com.state) {
1137	case ABORTING:
1138		__state_set(&ep->com, DEAD);
1139		release = 1;
1140		break;
1141	default:
1142		printk(KERN_ERR "%s ep %p state %d\n",
1143		     __func__, ep, ep->com.state);
1144		break;
1145	}
1146	spin_unlock_irqrestore(&ep->com.lock, flags);
1147
1148	if (release)
1149		release_ep_resources(ep);
1150	return 0;
1151}
1152
1153/*
1154 * Return whether a failed active open has allocated a TID
1155 */
1156static inline int act_open_has_tid(int status)
1157{
1158	return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1159	       status != CPL_ERR_ARP_MISS;
1160}
1161
1162static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1163{
1164	struct c4iw_ep *ep;
1165	struct cpl_act_open_rpl *rpl = cplhdr(skb);
1166	unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1167					ntohl(rpl->atid_status)));
1168	struct tid_info *t = dev->rdev.lldi.tids;
1169	int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
1170
1171	ep = lookup_atid(t, atid);
1172
1173	PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1174	     status, status2errno(status));
1175
1176	if (status == CPL_ERR_RTX_NEG_ADVICE) {
1177		printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1178			atid);
1179		return 0;
1180	}
1181
1182	connect_reply_upcall(ep, status2errno(status));
1183	state_set(&ep->com, DEAD);
1184
1185	if (status && act_open_has_tid(status))
1186		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1187
1188	cxgb4_free_atid(t, atid);
1189	dst_release(ep->dst);
1190	cxgb4_l2t_release(ep->l2t);
1191	c4iw_put_ep(&ep->com);
1192
1193	return 0;
1194}
1195
1196static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1197{
1198	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1199	struct tid_info *t = dev->rdev.lldi.tids;
1200	unsigned int stid = GET_TID(rpl);
1201	struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1202
1203	if (!ep) {
1204		printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
1205		return 0;
1206	}
1207	PDBG("%s ep %p status %d error %d\n", __func__, ep,
1208	     rpl->status, status2errno(rpl->status));
1209	ep->com.rpl_err = status2errno(rpl->status);
1210	ep->com.rpl_done = 1;
1211	wake_up(&ep->com.waitq);
1212
1213	return 0;
1214}
1215
1216static int listen_stop(struct c4iw_listen_ep *ep)
1217{
1218	struct sk_buff *skb;
1219	struct cpl_close_listsvr_req *req;
1220
1221	PDBG("%s ep %p\n", __func__, ep);
1222	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1223	if (!skb) {
1224		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1225		return -ENOMEM;
1226	}
1227	req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
1228	INIT_TP_WR(req, 0);
1229	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
1230						    ep->stid));
1231	req->reply_ctrl = cpu_to_be16(
1232			  QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
1233	set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1234	return c4iw_ofld_send(&ep->com.dev->rdev, skb);
1235}
1236
1237static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1238{
1239	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1240	struct tid_info *t = dev->rdev.lldi.tids;
1241	unsigned int stid = GET_TID(rpl);
1242	struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1243
1244	PDBG("%s ep %p\n", __func__, ep);
1245	ep->com.rpl_err = status2errno(rpl->status);
1246	ep->com.rpl_done = 1;
1247	wake_up(&ep->com.waitq);
1248	return 0;
1249}
1250
1251static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1252		      struct cpl_pass_accept_req *req)
1253{
1254	struct cpl_pass_accept_rpl *rpl;
1255	unsigned int mtu_idx;
1256	u64 opt0;
1257	u32 opt2;
1258	int wscale;
1259
1260	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1261	BUG_ON(skb_cloned(skb));
1262	skb_trim(skb, sizeof(*rpl));
1263	skb_get(skb);
1264	cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1265	wscale = compute_wscale(rcv_win);
1266	opt0 = KEEP_ALIVE(1) |
1267	       WND_SCALE(wscale) |
1268	       MSS_IDX(mtu_idx) |
1269	       L2T_IDX(ep->l2t->idx) |
1270	       TX_CHAN(ep->tx_chan) |
1271	       SMAC_SEL(ep->smac_idx) |
1272	       DSCP(ep->tos) |
1273	       RCV_BUFSIZ(rcv_win>>10);
1274	opt2 = RX_CHANNEL(0) |
1275	       RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1276
1277	if (enable_tcp_timestamps && req->tcpopt.tstamp)
1278		opt2 |= TSTAMPS_EN(1);
1279	if (enable_tcp_sack && req->tcpopt.sack)
1280		opt2 |= SACK_EN(1);
1281	if (wscale && enable_tcp_window_scaling)
1282		opt2 |= WND_SCALE_EN(1);
1283
1284	rpl = cplhdr(skb);
1285	INIT_TP_WR(rpl, ep->hwtid);
1286	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1287				      ep->hwtid));
1288	rpl->opt0 = cpu_to_be64(opt0);
1289	rpl->opt2 = cpu_to_be32(opt2);
1290	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->txq_idx);
1291	c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1292
1293	return;
1294}
1295
1296static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
1297		      struct sk_buff *skb)
1298{
1299	PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
1300	     peer_ip);
1301	BUG_ON(skb_cloned(skb));
1302	skb_trim(skb, sizeof(struct cpl_tid_release));
1303	skb_get(skb);
1304	release_tid(&dev->rdev, hwtid, skb);
1305	return;
1306}
1307
1308static void get_4tuple(struct cpl_pass_accept_req *req,
1309		       __be32 *local_ip, __be32 *peer_ip,
1310		       __be16 *local_port, __be16 *peer_port)
1311{
1312	int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
1313	int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
1314	struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
1315	struct tcphdr *tcp = (struct tcphdr *)
1316			     ((u8 *)(req + 1) + eth_len + ip_len);
1317
1318	PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
1319	     ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
1320	     ntohs(tcp->dest));
1321
1322	*peer_ip = ip->saddr;
1323	*local_ip = ip->daddr;
1324	*peer_port = tcp->source;
1325	*local_port = tcp->dest;
1326
1327	return;
1328}
1329
1330static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1331{
1332	struct c4iw_ep *child_ep, *parent_ep;
1333	struct cpl_pass_accept_req *req = cplhdr(skb);
1334	unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1335	struct tid_info *t = dev->rdev.lldi.tids;
1336	unsigned int hwtid = GET_TID(req);
1337	struct dst_entry *dst;
1338	struct l2t_entry *l2t;
1339	struct rtable *rt;
1340	__be32 local_ip, peer_ip;
1341	__be16 local_port, peer_port;
1342	struct net_device *pdev;
1343	u32 tx_chan, smac_idx;
1344	u16 rss_qid;
1345	u32 mtu;
1346	int step;
1347	int txq_idx;
1348
1349	parent_ep = lookup_stid(t, stid);
1350	PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1351
1352	get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1353
1354	if (state_read(&parent_ep->com) != LISTEN) {
1355		printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1356		       __func__);
1357		goto reject;
1358	}
1359
1360	/* Find output route */
1361	rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
1362			GET_POPEN_TOS(ntohl(req->tos_stid)));
1363	if (!rt) {
1364		printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1365		       __func__);
1366		goto reject;
1367	}
1368	dst = &rt->u.dst;
1369	if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
1370		pdev = ip_dev_find(&init_net, peer_ip);
1371		BUG_ON(!pdev);
1372		l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1373				    pdev, 0);
1374		mtu = pdev->mtu;
1375		tx_chan = cxgb4_port_chan(pdev);
1376		smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1377		step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1378		txq_idx = cxgb4_port_idx(pdev) * step;
1379		step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1380		rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
1381		dev_put(pdev);
1382	} else {
1383		l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1384					dst->neighbour->dev, 0);
1385		mtu = dst_mtu(dst);
1386		tx_chan = cxgb4_port_chan(dst->neighbour->dev);
1387		smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
1388		step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1389		txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
1390		step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1391		rss_qid = dev->rdev.lldi.rxq_ids[
1392			  cxgb4_port_idx(dst->neighbour->dev) * step];
1393	}
1394	if (!l2t) {
1395		printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1396		       __func__);
1397		dst_release(dst);
1398		goto reject;
1399	}
1400
1401	child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1402	if (!child_ep) {
1403		printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1404		       __func__);
1405		cxgb4_l2t_release(l2t);
1406		dst_release(dst);
1407		goto reject;
1408	}
1409	state_set(&child_ep->com, CONNECTING);
1410	child_ep->com.dev = dev;
1411	child_ep->com.cm_id = NULL;
1412	child_ep->com.local_addr.sin_family = PF_INET;
1413	child_ep->com.local_addr.sin_port = local_port;
1414	child_ep->com.local_addr.sin_addr.s_addr = local_ip;
1415	child_ep->com.remote_addr.sin_family = PF_INET;
1416	child_ep->com.remote_addr.sin_port = peer_port;
1417	child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
1418	c4iw_get_ep(&parent_ep->com);
1419	child_ep->parent_ep = parent_ep;
1420	child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
1421	child_ep->l2t = l2t;
1422	child_ep->dst = dst;
1423	child_ep->hwtid = hwtid;
1424	child_ep->tx_chan = tx_chan;
1425	child_ep->smac_idx = smac_idx;
1426	child_ep->rss_qid = rss_qid;
1427	child_ep->mtu = mtu;
1428	child_ep->txq_idx = txq_idx;
1429
1430	PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
1431	     tx_chan, smac_idx, rss_qid);
1432
1433	init_timer(&child_ep->timer);
1434	cxgb4_insert_tid(t, child_ep, hwtid);
1435	accept_cr(child_ep, peer_ip, skb, req);
1436	goto out;
1437reject:
1438	reject_cr(dev, hwtid, peer_ip, skb);
1439out:
1440	return 0;
1441}
1442
1443static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1444{
1445	struct c4iw_ep *ep;
1446	struct cpl_pass_establish *req = cplhdr(skb);
1447	struct tid_info *t = dev->rdev.lldi.tids;
1448	unsigned int tid = GET_TID(req);
1449
1450	ep = lookup_tid(t, tid);
1451	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1452	ep->snd_seq = be32_to_cpu(req->snd_isn);
1453	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1454
1455	set_emss(ep, ntohs(req->tcp_opt));
1456
1457	dst_confirm(ep->dst);
1458	state_set(&ep->com, MPA_REQ_WAIT);
1459	start_ep_timer(ep);
1460	send_flowc(ep, skb);
1461
1462	return 0;
1463}
1464
1465static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1466{
1467	struct cpl_peer_close *hdr = cplhdr(skb);
1468	struct c4iw_ep *ep;
1469	struct c4iw_qp_attributes attrs;
1470	unsigned long flags;
1471	int disconnect = 1;
1472	int release = 0;
1473	int closing = 0;
1474	struct tid_info *t = dev->rdev.lldi.tids;
1475	unsigned int tid = GET_TID(hdr);
1476	int start_timer = 0;
1477	int stop_timer = 0;
1478
1479	ep = lookup_tid(t, tid);
1480	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1481	dst_confirm(ep->dst);
1482
1483	spin_lock_irqsave(&ep->com.lock, flags);
1484	switch (ep->com.state) {
1485	case MPA_REQ_WAIT:
1486		__state_set(&ep->com, CLOSING);
1487		break;
1488	case MPA_REQ_SENT:
1489		__state_set(&ep->com, CLOSING);
1490		connect_reply_upcall(ep, -ECONNRESET);
1491		break;
1492	case MPA_REQ_RCVD:
1493
1494		/*
1495		 * We're gonna mark this puppy DEAD, but keep
1496		 * the reference on it until the ULP accepts or
1497		 * rejects the CR. Also wake up anyone waiting
1498		 * in rdma connection migration (see c4iw_accept_cr()).
1499		 */
1500		__state_set(&ep->com, CLOSING);
1501		ep->com.rpl_done = 1;
1502		ep->com.rpl_err = -ECONNRESET;
1503		PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1504		wake_up(&ep->com.waitq);
1505		break;
1506	case MPA_REP_SENT:
1507		__state_set(&ep->com, CLOSING);
1508		ep->com.rpl_done = 1;
1509		ep->com.rpl_err = -ECONNRESET;
1510		PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1511		wake_up(&ep->com.waitq);
1512		break;
1513	case FPDU_MODE:
1514		start_timer = 1;
1515		__state_set(&ep->com, CLOSING);
1516		closing = 1;
1517		peer_close_upcall(ep);
1518		break;
1519	case ABORTING:
1520		disconnect = 0;
1521		break;
1522	case CLOSING:
1523		__state_set(&ep->com, MORIBUND);
1524		disconnect = 0;
1525		break;
1526	case MORIBUND:
1527		stop_timer = 1;
1528		if (ep->com.cm_id && ep->com.qp) {
1529			attrs.next_state = C4IW_QP_STATE_IDLE;
1530			c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1531				       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1532		}
1533		close_complete_upcall(ep);
1534		__state_set(&ep->com, DEAD);
1535		release = 1;
1536		disconnect = 0;
1537		break;
1538	case DEAD:
1539		disconnect = 0;
1540		break;
1541	default:
1542		BUG_ON(1);
1543	}
1544	spin_unlock_irqrestore(&ep->com.lock, flags);
1545	if (closing) {
1546		attrs.next_state = C4IW_QP_STATE_CLOSING;
1547		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1548			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1549	}
1550	if (start_timer)
1551		start_ep_timer(ep);
1552	if (stop_timer)
1553		stop_ep_timer(ep);
1554	if (disconnect)
1555		c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1556	if (release)
1557		release_ep_resources(ep);
1558	return 0;
1559}
1560
1561/*
1562 * Returns whether an ABORT_REQ_RSS message is a negative advice.
1563 */
1564static int is_neg_adv_abort(unsigned int status)
1565{
1566	return status == CPL_ERR_RTX_NEG_ADVICE ||
1567	       status == CPL_ERR_PERSIST_NEG_ADVICE;
1568}
1569
1570static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1571{
1572	struct cpl_abort_req_rss *req = cplhdr(skb);
1573	struct c4iw_ep *ep;
1574	struct cpl_abort_rpl *rpl;
1575	struct sk_buff *rpl_skb;
1576	struct c4iw_qp_attributes attrs;
1577	int ret;
1578	int release = 0;
1579	unsigned long flags;
1580	struct tid_info *t = dev->rdev.lldi.tids;
1581	unsigned int tid = GET_TID(req);
1582	int stop_timer = 0;
1583
1584	ep = lookup_tid(t, tid);
1585	if (is_neg_adv_abort(req->status)) {
1586		PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
1587		     ep->hwtid);
1588		return 0;
1589	}
1590	spin_lock_irqsave(&ep->com.lock, flags);
1591	PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
1592	     ep->com.state);
1593	switch (ep->com.state) {
1594	case CONNECTING:
1595		break;
1596	case MPA_REQ_WAIT:
1597		stop_timer = 1;
1598		break;
1599	case MPA_REQ_SENT:
1600		stop_timer = 1;
1601		connect_reply_upcall(ep, -ECONNRESET);
1602		break;
1603	case MPA_REP_SENT:
1604		ep->com.rpl_done = 1;
1605		ep->com.rpl_err = -ECONNRESET;
1606		PDBG("waking up ep %p\n", ep);
1607		wake_up(&ep->com.waitq);
1608		break;
1609	case MPA_REQ_RCVD:
1610
1611		/*
1612		 * We're gonna mark this puppy DEAD, but keep
1613		 * the reference on it until the ULP accepts or
1614		 * rejects the CR. Also wake up anyone waiting
1615		 * in rdma connection migration (see c4iw_accept_cr()).
1616		 */
1617		ep->com.rpl_done = 1;
1618		ep->com.rpl_err = -ECONNRESET;
1619		PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1620		wake_up(&ep->com.waitq);
1621		break;
1622	case MORIBUND:
1623	case CLOSING:
1624		stop_timer = 1;
1625		/*FALLTHROUGH*/
1626	case FPDU_MODE:
1627		if (ep->com.cm_id && ep->com.qp) {
1628			attrs.next_state = C4IW_QP_STATE_ERROR;
1629			ret = c4iw_modify_qp(ep->com.qp->rhp,
1630				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
1631				     &attrs, 1);
1632			if (ret)
1633				printk(KERN_ERR MOD
1634				       "%s - qp <- error failed!\n",
1635				       __func__);
1636		}
1637		peer_abort_upcall(ep);
1638		break;
1639	case ABORTING:
1640		break;
1641	case DEAD:
1642		PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1643		spin_unlock_irqrestore(&ep->com.lock, flags);
1644		return 0;
1645	default:
1646		BUG_ON(1);
1647		break;
1648	}
1649	dst_confirm(ep->dst);
1650	if (ep->com.state != ABORTING) {
1651		__state_set(&ep->com, DEAD);
1652		release = 1;
1653	}
1654	spin_unlock_irqrestore(&ep->com.lock, flags);
1655
1656	rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1657	if (!rpl_skb) {
1658		printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1659		       __func__);
1660		release = 1;
1661		goto out;
1662	}
1663	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1664	rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1665	INIT_TP_WR(rpl, ep->hwtid);
1666	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1667	rpl->cmd = CPL_ABORT_NO_RST;
1668	c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
1669out:
1670	if (stop_timer)
1671		stop_ep_timer(ep);
1672	if (release)
1673		release_ep_resources(ep);
1674	return 0;
1675}
1676
1677static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1678{
1679	struct c4iw_ep *ep;
1680	struct c4iw_qp_attributes attrs;
1681	struct cpl_close_con_rpl *rpl = cplhdr(skb);
1682	unsigned long flags;
1683	int release = 0;
1684	struct tid_info *t = dev->rdev.lldi.tids;
1685	unsigned int tid = GET_TID(rpl);
1686	int stop_timer = 0;
1687
1688	ep = lookup_tid(t, tid);
1689
1690	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1691	BUG_ON(!ep);
1692
1693	/* The cm_id may be null if we failed to connect */
1694	spin_lock_irqsave(&ep->com.lock, flags);
1695	switch (ep->com.state) {
1696	case CLOSING:
1697		__state_set(&ep->com, MORIBUND);
1698		break;
1699	case MORIBUND:
1700		stop_timer = 1;
1701		if ((ep->com.cm_id) && (ep->com.qp)) {
1702			attrs.next_state = C4IW_QP_STATE_IDLE;
1703			c4iw_modify_qp(ep->com.qp->rhp,
1704					     ep->com.qp,
1705					     C4IW_QP_ATTR_NEXT_STATE,
1706					     &attrs, 1);
1707		}
1708		close_complete_upcall(ep);
1709		__state_set(&ep->com, DEAD);
1710		release = 1;
1711		break;
1712	case ABORTING:
1713	case DEAD:
1714		break;
1715	default:
1716		BUG_ON(1);
1717		break;
1718	}
1719	spin_unlock_irqrestore(&ep->com.lock, flags);
1720	if (stop_timer)
1721		stop_ep_timer(ep);
1722	if (release)
1723		release_ep_resources(ep);
1724	return 0;
1725}
1726
1727static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1728{
1729	struct c4iw_ep *ep;
1730	struct cpl_rdma_terminate *term = cplhdr(skb);
1731	struct tid_info *t = dev->rdev.lldi.tids;
1732	unsigned int tid = GET_TID(term);
1733
1734	ep = lookup_tid(t, tid);
1735
1736	if (state_read(&ep->com) != FPDU_MODE)
1737		return 0;
1738
1739	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1740	skb_pull(skb, sizeof *term);
1741	PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1742	skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1743				  skb->len);
1744	ep->com.qp->attr.terminate_msg_len = skb->len;
1745	ep->com.qp->attr.is_terminate_local = 0;
1746	return 0;
1747}
1748
1749/*
1750 * Upcall from the adapter indicating data has been transmitted.
1751 * For us its just the single MPA request or reply.  We can now free
1752 * the skb holding the mpa message.
1753 */
1754static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
1755{
1756	struct c4iw_ep *ep;
1757	struct cpl_fw4_ack *hdr = cplhdr(skb);
1758	u8 credits = hdr->credits;
1759	unsigned int tid = GET_TID(hdr);
1760	struct tid_info *t = dev->rdev.lldi.tids;
1761
1762
1763	ep = lookup_tid(t, tid);
1764	PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1765	if (credits == 0) {
1766		PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
1767			__func__, ep, ep->hwtid, state_read(&ep->com));
1768		return 0;
1769	}
1770
1771	dst_confirm(ep->dst);
1772	if (ep->mpa_skb) {
1773		PDBG("%s last streaming msg ack ep %p tid %u state %u "
1774		     "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
1775		     state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
1776		kfree_skb(ep->mpa_skb);
1777		ep->mpa_skb = NULL;
1778	}
1779	return 0;
1780}
1781
1782int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1783{
1784	int err;
1785	struct c4iw_ep *ep = to_ep(cm_id);
1786	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1787
1788	if (state_read(&ep->com) == DEAD) {
1789		c4iw_put_ep(&ep->com);
1790		return -ECONNRESET;
1791	}
1792	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1793	if (mpa_rev == 0)
1794		abort_connection(ep, NULL, GFP_KERNEL);
1795	else {
1796		err = send_mpa_reject(ep, pdata, pdata_len);
1797		err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1798	}
1799	c4iw_put_ep(&ep->com);
1800	return 0;
1801}
1802
1803int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1804{
1805	int err;
1806	struct c4iw_qp_attributes attrs;
1807	enum c4iw_qp_attr_mask mask;
1808	struct c4iw_ep *ep = to_ep(cm_id);
1809	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1810	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1811
1812	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1813	if (state_read(&ep->com) == DEAD) {
1814		err = -ECONNRESET;
1815		goto err;
1816	}
1817
1818	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1819	BUG_ON(!qp);
1820
1821	if ((conn_param->ord > c4iw_max_read_depth) ||
1822	    (conn_param->ird > c4iw_max_read_depth)) {
1823		abort_connection(ep, NULL, GFP_KERNEL);
1824		err = -EINVAL;
1825		goto err;
1826	}
1827
1828	cm_id->add_ref(cm_id);
1829	ep->com.cm_id = cm_id;
1830	ep->com.qp = qp;
1831
1832	ep->ird = conn_param->ird;
1833	ep->ord = conn_param->ord;
1834
1835	if (peer2peer && ep->ird == 0)
1836		ep->ird = 1;
1837
1838	PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1839
1840	/* bind QP to EP and move to RTS */
1841	attrs.mpa_attr = ep->mpa_attr;
1842	attrs.max_ird = ep->ird;
1843	attrs.max_ord = ep->ord;
1844	attrs.llp_stream_handle = ep;
1845	attrs.next_state = C4IW_QP_STATE_RTS;
1846
1847	/* bind QP and TID with INIT_WR */
1848	mask = C4IW_QP_ATTR_NEXT_STATE |
1849			     C4IW_QP_ATTR_LLP_STREAM_HANDLE |
1850			     C4IW_QP_ATTR_MPA_ATTR |
1851			     C4IW_QP_ATTR_MAX_IRD |
1852			     C4IW_QP_ATTR_MAX_ORD;
1853
1854	err = c4iw_modify_qp(ep->com.qp->rhp,
1855			     ep->com.qp, mask, &attrs, 1);
1856	if (err)
1857		goto err1;
1858	err = send_mpa_reply(ep, conn_param->private_data,
1859			 

Large files files are truncated, but you can click here to view the full file