PageRenderTime 58ms CodeModel.GetById 32ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/scsi/cxgb3i/cxgb3i_pdu.c

https://github.com/kipill-nn/Kernel-for-Mega
C | 495 lines | 391 code | 69 blank | 35 comment | 67 complexity | 2dcf1d4fe683b86f7dec1f1bb75ff65f MD5 | raw file
  1. /*
  2. * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
  3. *
  4. * Copyright (c) 2008 Chelsio Communications, Inc.
  5. * Copyright (c) 2008 Mike Christie
  6. * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. *
  12. * Written by: Karen Xie (kxie@chelsio.com)
  13. */
  14. #include <linux/skbuff.h>
  15. #include <linux/crypto.h>
  16. #include <scsi/scsi_cmnd.h>
  17. #include <scsi/scsi_host.h>
  18. #include "cxgb3i.h"
  19. #include "cxgb3i_pdu.h"
  20. #ifdef __DEBUG_CXGB3I_RX__
  21. #define cxgb3i_rx_debug cxgb3i_log_debug
  22. #else
  23. #define cxgb3i_rx_debug(fmt...)
  24. #endif
  25. #ifdef __DEBUG_CXGB3I_TX__
  26. #define cxgb3i_tx_debug cxgb3i_log_debug
  27. #else
  28. #define cxgb3i_tx_debug(fmt...)
  29. #endif
  30. /* always allocate rooms for AHS */
  31. #define SKB_TX_PDU_HEADER_LEN \
  32. (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
  33. static unsigned int skb_extra_headroom;
  34. static struct page *pad_page;
  35. /*
  36. * pdu receive, interact with libiscsi_tcp
  37. */
  38. static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
  39. unsigned int offset, int offloaded)
  40. {
  41. int status = 0;
  42. int bytes_read;
  43. bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
  44. switch (status) {
  45. case ISCSI_TCP_CONN_ERR:
  46. return -EIO;
  47. case ISCSI_TCP_SUSPENDED:
  48. /* no transfer - just have caller flush queue */
  49. return bytes_read;
  50. case ISCSI_TCP_SKB_DONE:
  51. /*
  52. * pdus should always fit in the skb and we should get
  53. * segment done notifcation.
  54. */
  55. iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
  56. return -EFAULT;
  57. case ISCSI_TCP_SEGMENT_DONE:
  58. return bytes_read;
  59. default:
  60. iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
  61. "status %d\n", status);
  62. return -EINVAL;
  63. }
  64. }
  65. static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn,
  66. struct sk_buff *skb)
  67. {
  68. struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
  69. bool offloaded = 0;
  70. unsigned int offset;
  71. int rc;
  72. cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
  73. conn, skb, skb->len, skb_ulp_mode(skb));
  74. if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
  75. iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
  76. return -EIO;
  77. }
  78. if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) {
  79. iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
  80. return -EIO;
  81. }
  82. if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
  83. iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
  84. return -EIO;
  85. }
  86. /* iscsi hdr */
  87. rc = read_pdu_skb(conn, skb, 0, 0);
  88. if (rc <= 0)
  89. return rc;
  90. if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
  91. return 0;
  92. offset = rc;
  93. if (conn->hdrdgst_en)
  94. offset += ISCSI_DIGEST_SIZE;
  95. /* iscsi data */
  96. if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
  97. cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
  98. "itt 0x%x.\n",
  99. skb,
  100. tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
  101. tcp_conn->in.datalen,
  102. ntohl(tcp_conn->in.hdr->itt));
  103. offloaded = 1;
  104. } else {
  105. cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
  106. "itt 0x%x.\n",
  107. skb,
  108. tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
  109. tcp_conn->in.datalen,
  110. ntohl(tcp_conn->in.hdr->itt));
  111. offset += sizeof(struct cpl_iscsi_hdr_norss);
  112. }
  113. rc = read_pdu_skb(conn, skb, offset, offloaded);
  114. if (rc < 0)
  115. return rc;
  116. else
  117. return 0;
  118. }
  119. /*
  120. * pdu transmit, interact with libiscsi_tcp
  121. */
  122. static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
  123. {
  124. u8 submode = 0;
  125. if (hcrc)
  126. submode |= 1;
  127. if (dcrc)
  128. submode |= 2;
  129. skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
  130. }
  131. void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
  132. {
  133. struct cxgb3i_task_data *tdata = task->dd_data +
  134. sizeof(struct iscsi_tcp_task);
  135. /* never reached the xmit task callout */
  136. if (tdata->skb)
  137. __kfree_skb(tdata->skb);
  138. memset(tdata, 0, sizeof(struct cxgb3i_task_data));
  139. /* MNC - Do we need a check in case this is called but
  140. * cxgb3i_conn_alloc_pdu has never been called on the task */
  141. cxgb3i_release_itt(task, task->hdr_itt);
  142. iscsi_tcp_cleanup_task(task);
  143. }
  144. static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
  145. unsigned int offset, unsigned int *off,
  146. struct scatterlist **sgp)
  147. {
  148. int i;
  149. struct scatterlist *sg;
  150. for_each_sg(sgl, sg, sgcnt, i) {
  151. if (offset < sg->length) {
  152. *off = offset;
  153. *sgp = sg;
  154. return 0;
  155. }
  156. offset -= sg->length;
  157. }
  158. return -EFAULT;
  159. }
  160. static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
  161. unsigned int dlen, skb_frag_t *frags,
  162. int frag_max)
  163. {
  164. unsigned int datalen = dlen;
  165. unsigned int sglen = sg->length - sgoffset;
  166. struct page *page = sg_page(sg);
  167. int i;
  168. i = 0;
  169. do {
  170. unsigned int copy;
  171. if (!sglen) {
  172. sg = sg_next(sg);
  173. if (!sg) {
  174. cxgb3i_log_error("%s, sg NULL, len %u/%u.\n",
  175. __func__, datalen, dlen);
  176. return -EINVAL;
  177. }
  178. sgoffset = 0;
  179. sglen = sg->length;
  180. page = sg_page(sg);
  181. }
  182. copy = min(datalen, sglen);
  183. if (i && page == frags[i - 1].page &&
  184. sgoffset + sg->offset ==
  185. frags[i - 1].page_offset + frags[i - 1].size) {
  186. frags[i - 1].size += copy;
  187. } else {
  188. if (i >= frag_max) {
  189. cxgb3i_log_error("%s, too many pages %u, "
  190. "dlen %u.\n", __func__,
  191. frag_max, dlen);
  192. return -EINVAL;
  193. }
  194. frags[i].page = page;
  195. frags[i].page_offset = sg->offset + sgoffset;
  196. frags[i].size = copy;
  197. i++;
  198. }
  199. datalen -= copy;
  200. sgoffset += copy;
  201. sglen -= copy;
  202. } while (datalen);
  203. return i;
  204. }
  205. int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
  206. {
  207. struct iscsi_conn *conn = task->conn;
  208. struct iscsi_tcp_task *tcp_task = task->dd_data;
  209. struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task);
  210. struct scsi_cmnd *sc = task->sc;
  211. int headroom = SKB_TX_PDU_HEADER_LEN;
  212. tcp_task->dd_data = tdata;
  213. task->hdr = NULL;
  214. /* write command, need to send data pdus */
  215. if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT ||
  216. (opcode == ISCSI_OP_SCSI_CMD &&
  217. (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
  218. headroom += min(skb_extra_headroom, conn->max_xmit_dlength);
  219. tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC);
  220. if (!tdata->skb)
  221. return -ENOMEM;
  222. skb_reserve(tdata->skb, TX_HEADER_LEN);
  223. cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
  224. task, opcode, tdata->skb);
  225. task->hdr = (struct iscsi_hdr *)tdata->skb->data;
  226. task->hdr_max = SKB_TX_PDU_HEADER_LEN;
  227. /* data_out uses scsi_cmd's itt */
  228. if (opcode != ISCSI_OP_SCSI_DATA_OUT)
  229. cxgb3i_reserve_itt(task, &task->hdr->itt);
  230. return 0;
  231. }
  232. int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
  233. unsigned int count)
  234. {
  235. struct iscsi_conn *conn = task->conn;
  236. struct iscsi_tcp_task *tcp_task = task->dd_data;
  237. struct cxgb3i_task_data *tdata = tcp_task->dd_data;
  238. struct sk_buff *skb = tdata->skb;
  239. unsigned int datalen = count;
  240. int i, padlen = iscsi_padding(count);
  241. struct page *pg;
  242. cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
  243. task, task->sc, offset, count, skb);
  244. skb_put(skb, task->hdr_len);
  245. tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
  246. if (!count)
  247. return 0;
  248. if (task->sc) {
  249. struct scsi_data_buffer *sdb = scsi_out(task->sc);
  250. struct scatterlist *sg = NULL;
  251. int err;
  252. tdata->offset = offset;
  253. tdata->count = count;
  254. err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents,
  255. tdata->offset, &tdata->sgoffset, &sg);
  256. if (err < 0) {
  257. cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
  258. sdb->table.nents, tdata->offset,
  259. sdb->length);
  260. return err;
  261. }
  262. err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
  263. tdata->frags, MAX_PDU_FRAGS);
  264. if (err < 0) {
  265. cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
  266. sdb->table.nents, tdata->offset,
  267. tdata->count);
  268. return err;
  269. }
  270. tdata->nr_frags = err;
  271. if (tdata->nr_frags > MAX_SKB_FRAGS ||
  272. (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
  273. char *dst = skb->data + task->hdr_len;
  274. skb_frag_t *frag = tdata->frags;
  275. /* data fits in the skb's headroom */
  276. for (i = 0; i < tdata->nr_frags; i++, frag++) {
  277. char *src = kmap_atomic(frag->page,
  278. KM_SOFTIRQ0);
  279. memcpy(dst, src+frag->page_offset, frag->size);
  280. dst += frag->size;
  281. kunmap_atomic(src, KM_SOFTIRQ0);
  282. }
  283. if (padlen) {
  284. memset(dst, 0, padlen);
  285. padlen = 0;
  286. }
  287. skb_put(skb, count + padlen);
  288. } else {
  289. /* data fit into frag_list */
  290. for (i = 0; i < tdata->nr_frags; i++)
  291. get_page(tdata->frags[i].page);
  292. memcpy(skb_shinfo(skb)->frags, tdata->frags,
  293. sizeof(skb_frag_t) * tdata->nr_frags);
  294. skb_shinfo(skb)->nr_frags = tdata->nr_frags;
  295. skb->len += count;
  296. skb->data_len += count;
  297. skb->truesize += count;
  298. }
  299. } else {
  300. pg = virt_to_page(task->data);
  301. get_page(pg);
  302. skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
  303. count);
  304. skb->len += count;
  305. skb->data_len += count;
  306. skb->truesize += count;
  307. }
  308. if (padlen) {
  309. i = skb_shinfo(skb)->nr_frags;
  310. get_page(pad_page);
  311. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0,
  312. padlen);
  313. skb->data_len += padlen;
  314. skb->truesize += padlen;
  315. skb->len += padlen;
  316. }
  317. return 0;
  318. }
  319. int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
  320. {
  321. struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
  322. struct cxgb3i_conn *cconn = tcp_conn->dd_data;
  323. struct iscsi_tcp_task *tcp_task = task->dd_data;
  324. struct cxgb3i_task_data *tdata = tcp_task->dd_data;
  325. struct sk_buff *skb = tdata->skb;
  326. unsigned int datalen;
  327. int err;
  328. if (!skb)
  329. return 0;
  330. datalen = skb->data_len;
  331. tdata->skb = NULL;
  332. err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
  333. if (err > 0) {
  334. int pdulen = err;
  335. cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
  336. task, skb, skb->len, skb->data_len, err);
  337. if (task->conn->hdrdgst_en)
  338. pdulen += ISCSI_DIGEST_SIZE;
  339. if (datalen && task->conn->datadgst_en)
  340. pdulen += ISCSI_DIGEST_SIZE;
  341. task->conn->txdata_octets += pdulen;
  342. return 0;
  343. }
  344. if (err < 0 && err != -EAGAIN) {
  345. kfree_skb(skb);
  346. cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
  347. task->itt, skb, skb->len, skb->data_len, err);
  348. iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
  349. iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
  350. return err;
  351. }
  352. /* reset skb to send when we are called again */
  353. tdata->skb = skb;
  354. return -EAGAIN;
  355. }
  356. int cxgb3i_pdu_init(void)
  357. {
  358. if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS))
  359. skb_extra_headroom = SKB_TX_HEADROOM;
  360. pad_page = alloc_page(GFP_KERNEL);
  361. if (!pad_page)
  362. return -ENOMEM;
  363. memset(page_address(pad_page), 0, PAGE_SIZE);
  364. return 0;
  365. }
  366. void cxgb3i_pdu_cleanup(void)
  367. {
  368. if (pad_page) {
  369. __free_page(pad_page);
  370. pad_page = NULL;
  371. }
  372. }
  373. void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
  374. {
  375. struct sk_buff *skb;
  376. unsigned int read = 0;
  377. struct iscsi_conn *conn = c3cn->user_data;
  378. int err = 0;
  379. cxgb3i_rx_debug("cn 0x%p.\n", c3cn);
  380. read_lock(&c3cn->callback_lock);
  381. if (unlikely(!conn || conn->suspend_rx)) {
  382. cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
  383. conn, conn ? conn->id : 0xFF,
  384. conn ? conn->suspend_rx : 0xFF);
  385. read_unlock(&c3cn->callback_lock);
  386. return;
  387. }
  388. skb = skb_peek(&c3cn->receive_queue);
  389. while (!err && skb) {
  390. __skb_unlink(skb, &c3cn->receive_queue);
  391. read += skb_rx_pdulen(skb);
  392. cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
  393. conn, c3cn, skb, skb_rx_pdulen(skb));
  394. err = cxgb3i_conn_read_pdu_skb(conn, skb);
  395. __kfree_skb(skb);
  396. skb = skb_peek(&c3cn->receive_queue);
  397. }
  398. read_unlock(&c3cn->callback_lock);
  399. if (c3cn) {
  400. c3cn->copied_seq += read;
  401. cxgb3i_c3cn_rx_credits(c3cn, read);
  402. }
  403. conn->rxdata_octets += read;
  404. if (err) {
  405. cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err);
  406. iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
  407. }
  408. }
  409. void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
  410. {
  411. struct iscsi_conn *conn = c3cn->user_data;
  412. cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
  413. if (conn) {
  414. cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
  415. scsi_queue_work(conn->session->host, &conn->xmitwork);
  416. }
  417. }
  418. void cxgb3i_conn_closing(struct s3_conn *c3cn)
  419. {
  420. struct iscsi_conn *conn;
  421. read_lock(&c3cn->callback_lock);
  422. conn = c3cn->user_data;
  423. if (conn && c3cn->state != C3CN_STATE_ESTABLISHED)
  424. iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
  425. read_unlock(&c3cn->callback_lock);
  426. }