/net/sunrpc/xprtrdma/rpc_rdma.c

http://github.com/mirrors/linux · C · 1499 lines · 939 code · 194 blank · 366 comment · 159 complexity · a75995c06a9b870896563c56b0dbf267 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /*
  3. * Copyright (c) 2014-2017 Oracle. All rights reserved.
  4. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the BSD-type
  10. * license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or without
  13. * modification, are permitted provided that the following conditions
  14. * are met:
  15. *
  16. * Redistributions of source code must retain the above copyright
  17. * notice, this list of conditions and the following disclaimer.
  18. *
  19. * Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials provided
  22. * with the distribution.
  23. *
  24. * Neither the name of the Network Appliance, Inc. nor the names of
  25. * its contributors may be used to endorse or promote products
  26. * derived from this software without specific prior written
  27. * permission.
  28. *
  29. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  30. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  31. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  32. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  33. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  34. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  35. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  36. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  37. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  38. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  39. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  40. */
  41. /*
  42. * rpc_rdma.c
  43. *
  44. * This file contains the guts of the RPC RDMA protocol, and
  45. * does marshaling/unmarshaling, etc. It is also where interfacing
  46. * to the Linux RPC framework lives.
  47. */
  48. #include <linux/highmem.h>
  49. #include <linux/sunrpc/svc_rdma.h>
  50. #include "xprt_rdma.h"
  51. #include <trace/events/rpcrdma.h>
  52. #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  53. # define RPCDBG_FACILITY RPCDBG_TRANS
  54. #endif
  55. /* Returns size of largest RPC-over-RDMA header in a Call message
  56. *
  57. * The largest Call header contains a full-size Read list and a
  58. * minimal Reply chunk.
  59. */
  60. static unsigned int rpcrdma_max_call_header_size(unsigned int maxsegs)
  61. {
  62. unsigned int size;
  63. /* Fixed header fields and list discriminators */
  64. size = RPCRDMA_HDRLEN_MIN;
  65. /* Maximum Read list size */
  66. size = maxsegs * rpcrdma_readchunk_maxsz * sizeof(__be32);
  67. /* Minimal Read chunk size */
  68. size += sizeof(__be32); /* segment count */
  69. size += rpcrdma_segment_maxsz * sizeof(__be32);
  70. size += sizeof(__be32); /* list discriminator */
  71. return size;
  72. }
  73. /* Returns size of largest RPC-over-RDMA header in a Reply message
  74. *
  75. * There is only one Write list or one Reply chunk per Reply
  76. * message. The larger list is the Write list.
  77. */
  78. static unsigned int rpcrdma_max_reply_header_size(unsigned int maxsegs)
  79. {
  80. unsigned int size;
  81. /* Fixed header fields and list discriminators */
  82. size = RPCRDMA_HDRLEN_MIN;
  83. /* Maximum Write list size */
  84. size = sizeof(__be32); /* segment count */
  85. size += maxsegs * rpcrdma_segment_maxsz * sizeof(__be32);
  86. size += sizeof(__be32); /* list discriminator */
  87. return size;
  88. }
  89. /**
  90. * rpcrdma_set_max_header_sizes - Initialize inline payload sizes
  91. * @ep: endpoint to initialize
  92. *
  93. * The max_inline fields contain the maximum size of an RPC message
  94. * so the marshaling code doesn't have to repeat this calculation
  95. * for every RPC.
  96. */
  97. void rpcrdma_set_max_header_sizes(struct rpcrdma_ep *ep)
  98. {
  99. unsigned int maxsegs = ep->re_max_rdma_segs;
  100. ep->re_max_inline_send =
  101. ep->re_inline_send - rpcrdma_max_call_header_size(maxsegs);
  102. ep->re_max_inline_recv =
  103. ep->re_inline_recv - rpcrdma_max_reply_header_size(maxsegs);
  104. }
  105. /* The client can send a request inline as long as the RPCRDMA header
  106. * plus the RPC call fit under the transport's inline limit. If the
  107. * combined call message size exceeds that limit, the client must use
  108. * a Read chunk for this operation.
  109. *
  110. * A Read chunk is also required if sending the RPC call inline would
  111. * exceed this device's max_sge limit.
  112. */
  113. static bool rpcrdma_args_inline(struct rpcrdma_xprt *r_xprt,
  114. struct rpc_rqst *rqst)
  115. {
  116. struct xdr_buf *xdr = &rqst->rq_snd_buf;
  117. struct rpcrdma_ep *ep = r_xprt->rx_ep;
  118. unsigned int count, remaining, offset;
  119. if (xdr->len > ep->re_max_inline_send)
  120. return false;
  121. if (xdr->page_len) {
  122. remaining = xdr->page_len;
  123. offset = offset_in_page(xdr->page_base);
  124. count = RPCRDMA_MIN_SEND_SGES;
  125. while (remaining) {
  126. remaining -= min_t(unsigned int,
  127. PAGE_SIZE - offset, remaining);
  128. offset = 0;
  129. if (++count > ep->re_attr.cap.max_send_sge)
  130. return false;
  131. }
  132. }
  133. return true;
  134. }
  135. /* The client can't know how large the actual reply will be. Thus it
  136. * plans for the largest possible reply for that particular ULP
  137. * operation. If the maximum combined reply message size exceeds that
  138. * limit, the client must provide a write list or a reply chunk for
  139. * this request.
  140. */
  141. static bool rpcrdma_results_inline(struct rpcrdma_xprt *r_xprt,
  142. struct rpc_rqst *rqst)
  143. {
  144. return rqst->rq_rcv_buf.buflen <= r_xprt->rx_ep->re_max_inline_recv;
  145. }
  146. /* The client is required to provide a Reply chunk if the maximum
  147. * size of the non-payload part of the RPC Reply is larger than
  148. * the inline threshold.
  149. */
  150. static bool
  151. rpcrdma_nonpayload_inline(const struct rpcrdma_xprt *r_xprt,
  152. const struct rpc_rqst *rqst)
  153. {
  154. const struct xdr_buf *buf = &rqst->rq_rcv_buf;
  155. return (buf->head[0].iov_len + buf->tail[0].iov_len) <
  156. r_xprt->rx_ep->re_max_inline_recv;
  157. }
  158. /* Split @vec on page boundaries into SGEs. FMR registers pages, not
  159. * a byte range. Other modes coalesce these SGEs into a single MR
  160. * when they can.
  161. *
  162. * Returns pointer to next available SGE, and bumps the total number
  163. * of SGEs consumed.
  164. */
  165. static struct rpcrdma_mr_seg *
  166. rpcrdma_convert_kvec(struct kvec *vec, struct rpcrdma_mr_seg *seg,
  167. unsigned int *n)
  168. {
  169. u32 remaining, page_offset;
  170. char *base;
  171. base = vec->iov_base;
  172. page_offset = offset_in_page(base);
  173. remaining = vec->iov_len;
  174. while (remaining) {
  175. seg->mr_page = NULL;
  176. seg->mr_offset = base;
  177. seg->mr_len = min_t(u32, PAGE_SIZE - page_offset, remaining);
  178. remaining -= seg->mr_len;
  179. base += seg->mr_len;
  180. ++seg;
  181. ++(*n);
  182. page_offset = 0;
  183. }
  184. return seg;
  185. }
  186. /* Convert @xdrbuf into SGEs no larger than a page each. As they
  187. * are registered, these SGEs are then coalesced into RDMA segments
  188. * when the selected memreg mode supports it.
  189. *
  190. * Returns positive number of SGEs consumed, or a negative errno.
  191. */
  192. static int
  193. rpcrdma_convert_iovs(struct rpcrdma_xprt *r_xprt, struct xdr_buf *xdrbuf,
  194. unsigned int pos, enum rpcrdma_chunktype type,
  195. struct rpcrdma_mr_seg *seg)
  196. {
  197. unsigned long page_base;
  198. unsigned int len, n;
  199. struct page **ppages;
  200. n = 0;
  201. if (pos == 0)
  202. seg = rpcrdma_convert_kvec(&xdrbuf->head[0], seg, &n);
  203. len = xdrbuf->page_len;
  204. ppages = xdrbuf->pages + (xdrbuf->page_base >> PAGE_SHIFT);
  205. page_base = offset_in_page(xdrbuf->page_base);
  206. while (len) {
  207. /* ACL likes to be lazy in allocating pages - ACLs
  208. * are small by default but can get huge.
  209. */
  210. if (unlikely(xdrbuf->flags & XDRBUF_SPARSE_PAGES)) {
  211. if (!*ppages)
  212. *ppages = alloc_page(GFP_NOWAIT | __GFP_NOWARN);
  213. if (!*ppages)
  214. return -ENOBUFS;
  215. }
  216. seg->mr_page = *ppages;
  217. seg->mr_offset = (char *)page_base;
  218. seg->mr_len = min_t(u32, PAGE_SIZE - page_base, len);
  219. len -= seg->mr_len;
  220. ++ppages;
  221. ++seg;
  222. ++n;
  223. page_base = 0;
  224. }
  225. /* When encoding a Read chunk, the tail iovec contains an
  226. * XDR pad and may be omitted.
  227. */
  228. if (type == rpcrdma_readch && r_xprt->rx_ep->re_implicit_roundup)
  229. goto out;
  230. /* When encoding a Write chunk, some servers need to see an
  231. * extra segment for non-XDR-aligned Write chunks. The upper
  232. * layer provides space in the tail iovec that may be used
  233. * for this purpose.
  234. */
  235. if (type == rpcrdma_writech && r_xprt->rx_ep->re_implicit_roundup)
  236. goto out;
  237. if (xdrbuf->tail[0].iov_len)
  238. seg = rpcrdma_convert_kvec(&xdrbuf->tail[0], seg, &n);
  239. out:
  240. if (unlikely(n > RPCRDMA_MAX_SEGS))
  241. return -EIO;
  242. return n;
  243. }
  244. static void
  245. xdr_encode_rdma_segment(__be32 *iptr, struct rpcrdma_mr *mr)
  246. {
  247. *iptr++ = cpu_to_be32(mr->mr_handle);
  248. *iptr++ = cpu_to_be32(mr->mr_length);
  249. xdr_encode_hyper(iptr, mr->mr_offset);
  250. }
  251. static int
  252. encode_rdma_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr)
  253. {
  254. __be32 *p;
  255. p = xdr_reserve_space(xdr, 4 * sizeof(*p));
  256. if (unlikely(!p))
  257. return -EMSGSIZE;
  258. xdr_encode_rdma_segment(p, mr);
  259. return 0;
  260. }
  261. static int
  262. encode_read_segment(struct xdr_stream *xdr, struct rpcrdma_mr *mr,
  263. u32 position)
  264. {
  265. __be32 *p;
  266. p = xdr_reserve_space(xdr, 6 * sizeof(*p));
  267. if (unlikely(!p))
  268. return -EMSGSIZE;
  269. *p++ = xdr_one; /* Item present */
  270. *p++ = cpu_to_be32(position);
  271. xdr_encode_rdma_segment(p, mr);
  272. return 0;
  273. }
  274. static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
  275. struct rpcrdma_req *req,
  276. struct rpcrdma_mr_seg *seg,
  277. int nsegs, bool writing,
  278. struct rpcrdma_mr **mr)
  279. {
  280. *mr = rpcrdma_mr_pop(&req->rl_free_mrs);
  281. if (!*mr) {
  282. *mr = rpcrdma_mr_get(r_xprt);
  283. if (!*mr)
  284. goto out_getmr_err;
  285. trace_xprtrdma_mr_get(req);
  286. (*mr)->mr_req = req;
  287. }
  288. rpcrdma_mr_push(*mr, &req->rl_registered);
  289. return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
  290. out_getmr_err:
  291. trace_xprtrdma_nomrs(req);
  292. xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
  293. rpcrdma_mrs_refresh(r_xprt);
  294. return ERR_PTR(-EAGAIN);
  295. }
  296. /* Register and XDR encode the Read list. Supports encoding a list of read
  297. * segments that belong to a single read chunk.
  298. *
  299. * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
  300. *
  301. * Read chunklist (a linked list):
  302. * N elements, position P (same P for all chunks of same arg!):
  303. * 1 - PHLOO - 1 - PHLOO - ... - 1 - PHLOO - 0
  304. *
  305. * Returns zero on success, or a negative errno if a failure occurred.
  306. * @xdr is advanced to the next position in the stream.
  307. *
  308. * Only a single @pos value is currently supported.
  309. */
  310. static int rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
  311. struct rpcrdma_req *req,
  312. struct rpc_rqst *rqst,
  313. enum rpcrdma_chunktype rtype)
  314. {
  315. struct xdr_stream *xdr = &req->rl_stream;
  316. struct rpcrdma_mr_seg *seg;
  317. struct rpcrdma_mr *mr;
  318. unsigned int pos;
  319. int nsegs;
  320. if (rtype == rpcrdma_noch_pullup || rtype == rpcrdma_noch_mapped)
  321. goto done;
  322. pos = rqst->rq_snd_buf.head[0].iov_len;
  323. if (rtype == rpcrdma_areadch)
  324. pos = 0;
  325. seg = req->rl_segments;
  326. nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_snd_buf, pos,
  327. rtype, seg);
  328. if (nsegs < 0)
  329. return nsegs;
  330. do {
  331. seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, false, &mr);
  332. if (IS_ERR(seg))
  333. return PTR_ERR(seg);
  334. if (encode_read_segment(xdr, mr, pos) < 0)
  335. return -EMSGSIZE;
  336. trace_xprtrdma_chunk_read(rqst->rq_task, pos, mr, nsegs);
  337. r_xprt->rx_stats.read_chunk_count++;
  338. nsegs -= mr->mr_nents;
  339. } while (nsegs);
  340. done:
  341. if (xdr_stream_encode_item_absent(xdr) < 0)
  342. return -EMSGSIZE;
  343. return 0;
  344. }
  345. /* Register and XDR encode the Write list. Supports encoding a list
  346. * containing one array of plain segments that belong to a single
  347. * write chunk.
  348. *
  349. * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
  350. *
  351. * Write chunklist (a list of (one) counted array):
  352. * N elements:
  353. * 1 - N - HLOO - HLOO - ... - HLOO - 0
  354. *
  355. * Returns zero on success, or a negative errno if a failure occurred.
  356. * @xdr is advanced to the next position in the stream.
  357. *
  358. * Only a single Write chunk is currently supported.
  359. */
  360. static int rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt,
  361. struct rpcrdma_req *req,
  362. struct rpc_rqst *rqst,
  363. enum rpcrdma_chunktype wtype)
  364. {
  365. struct xdr_stream *xdr = &req->rl_stream;
  366. struct rpcrdma_mr_seg *seg;
  367. struct rpcrdma_mr *mr;
  368. int nsegs, nchunks;
  369. __be32 *segcount;
  370. if (wtype != rpcrdma_writech)
  371. goto done;
  372. seg = req->rl_segments;
  373. nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf,
  374. rqst->rq_rcv_buf.head[0].iov_len,
  375. wtype, seg);
  376. if (nsegs < 0)
  377. return nsegs;
  378. if (xdr_stream_encode_item_present(xdr) < 0)
  379. return -EMSGSIZE;
  380. segcount = xdr_reserve_space(xdr, sizeof(*segcount));
  381. if (unlikely(!segcount))
  382. return -EMSGSIZE;
  383. /* Actual value encoded below */
  384. nchunks = 0;
  385. do {
  386. seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
  387. if (IS_ERR(seg))
  388. return PTR_ERR(seg);
  389. if (encode_rdma_segment(xdr, mr) < 0)
  390. return -EMSGSIZE;
  391. trace_xprtrdma_chunk_write(rqst->rq_task, mr, nsegs);
  392. r_xprt->rx_stats.write_chunk_count++;
  393. r_xprt->rx_stats.total_rdma_request += mr->mr_length;
  394. nchunks++;
  395. nsegs -= mr->mr_nents;
  396. } while (nsegs);
  397. /* Update count of segments in this Write chunk */
  398. *segcount = cpu_to_be32(nchunks);
  399. done:
  400. if (xdr_stream_encode_item_absent(xdr) < 0)
  401. return -EMSGSIZE;
  402. return 0;
  403. }
  404. /* Register and XDR encode the Reply chunk. Supports encoding an array
  405. * of plain segments that belong to a single write (reply) chunk.
  406. *
  407. * Encoding key for single-list chunks (HLOO = Handle32 Length32 Offset64):
  408. *
  409. * Reply chunk (a counted array):
  410. * N elements:
  411. * 1 - N - HLOO - HLOO - ... - HLOO
  412. *
  413. * Returns zero on success, or a negative errno if a failure occurred.
  414. * @xdr is advanced to the next position in the stream.
  415. */
  416. static int rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
  417. struct rpcrdma_req *req,
  418. struct rpc_rqst *rqst,
  419. enum rpcrdma_chunktype wtype)
  420. {
  421. struct xdr_stream *xdr = &req->rl_stream;
  422. struct rpcrdma_mr_seg *seg;
  423. struct rpcrdma_mr *mr;
  424. int nsegs, nchunks;
  425. __be32 *segcount;
  426. if (wtype != rpcrdma_replych) {
  427. if (xdr_stream_encode_item_absent(xdr) < 0)
  428. return -EMSGSIZE;
  429. return 0;
  430. }
  431. seg = req->rl_segments;
  432. nsegs = rpcrdma_convert_iovs(r_xprt, &rqst->rq_rcv_buf, 0, wtype, seg);
  433. if (nsegs < 0)
  434. return nsegs;
  435. if (xdr_stream_encode_item_present(xdr) < 0)
  436. return -EMSGSIZE;
  437. segcount = xdr_reserve_space(xdr, sizeof(*segcount));
  438. if (unlikely(!segcount))
  439. return -EMSGSIZE;
  440. /* Actual value encoded below */
  441. nchunks = 0;
  442. do {
  443. seg = rpcrdma_mr_prepare(r_xprt, req, seg, nsegs, true, &mr);
  444. if (IS_ERR(seg))
  445. return PTR_ERR(seg);
  446. if (encode_rdma_segment(xdr, mr) < 0)
  447. return -EMSGSIZE;
  448. trace_xprtrdma_chunk_reply(rqst->rq_task, mr, nsegs);
  449. r_xprt->rx_stats.reply_chunk_count++;
  450. r_xprt->rx_stats.total_rdma_request += mr->mr_length;
  451. nchunks++;
  452. nsegs -= mr->mr_nents;
  453. } while (nsegs);
  454. /* Update count of segments in the Reply chunk */
  455. *segcount = cpu_to_be32(nchunks);
  456. return 0;
  457. }
  458. static void rpcrdma_sendctx_done(struct kref *kref)
  459. {
  460. struct rpcrdma_req *req =
  461. container_of(kref, struct rpcrdma_req, rl_kref);
  462. struct rpcrdma_rep *rep = req->rl_reply;
  463. rpcrdma_complete_rqst(rep);
  464. rep->rr_rxprt->rx_stats.reply_waits_for_send++;
  465. }
  466. /**
  467. * rpcrdma_sendctx_unmap - DMA-unmap Send buffer
  468. * @sc: sendctx containing SGEs to unmap
  469. *
  470. */
  471. void rpcrdma_sendctx_unmap(struct rpcrdma_sendctx *sc)
  472. {
  473. struct rpcrdma_regbuf *rb = sc->sc_req->rl_sendbuf;
  474. struct ib_sge *sge;
  475. if (!sc->sc_unmap_count)
  476. return;
  477. /* The first two SGEs contain the transport header and
  478. * the inline buffer. These are always left mapped so
  479. * they can be cheaply re-used.
  480. */
  481. for (sge = &sc->sc_sges[2]; sc->sc_unmap_count;
  482. ++sge, --sc->sc_unmap_count)
  483. ib_dma_unmap_page(rdmab_device(rb), sge->addr, sge->length,
  484. DMA_TO_DEVICE);
  485. kref_put(&sc->sc_req->rl_kref, rpcrdma_sendctx_done);
  486. }
  487. /* Prepare an SGE for the RPC-over-RDMA transport header.
  488. */
  489. static void rpcrdma_prepare_hdr_sge(struct rpcrdma_xprt *r_xprt,
  490. struct rpcrdma_req *req, u32 len)
  491. {
  492. struct rpcrdma_sendctx *sc = req->rl_sendctx;
  493. struct rpcrdma_regbuf *rb = req->rl_rdmabuf;
  494. struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
  495. sge->addr = rdmab_addr(rb);
  496. sge->length = len;
  497. sge->lkey = rdmab_lkey(rb);
  498. ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
  499. DMA_TO_DEVICE);
  500. }
  501. /* The head iovec is straightforward, as it is usually already
  502. * DMA-mapped. Sync the content that has changed.
  503. */
  504. static bool rpcrdma_prepare_head_iov(struct rpcrdma_xprt *r_xprt,
  505. struct rpcrdma_req *req, unsigned int len)
  506. {
  507. struct rpcrdma_sendctx *sc = req->rl_sendctx;
  508. struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
  509. struct rpcrdma_regbuf *rb = req->rl_sendbuf;
  510. if (!rpcrdma_regbuf_dma_map(r_xprt, rb))
  511. return false;
  512. sge->addr = rdmab_addr(rb);
  513. sge->length = len;
  514. sge->lkey = rdmab_lkey(rb);
  515. ib_dma_sync_single_for_device(rdmab_device(rb), sge->addr, sge->length,
  516. DMA_TO_DEVICE);
  517. return true;
  518. }
  519. /* If there is a page list present, DMA map and prepare an
  520. * SGE for each page to be sent.
  521. */
  522. static bool rpcrdma_prepare_pagelist(struct rpcrdma_req *req,
  523. struct xdr_buf *xdr)
  524. {
  525. struct rpcrdma_sendctx *sc = req->rl_sendctx;
  526. struct rpcrdma_regbuf *rb = req->rl_sendbuf;
  527. unsigned int page_base, len, remaining;
  528. struct page **ppages;
  529. struct ib_sge *sge;
  530. ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
  531. page_base = offset_in_page(xdr->page_base);
  532. remaining = xdr->page_len;
  533. while (remaining) {
  534. sge = &sc->sc_sges[req->rl_wr.num_sge++];
  535. len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
  536. sge->addr = ib_dma_map_page(rdmab_device(rb), *ppages,
  537. page_base, len, DMA_TO_DEVICE);
  538. if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
  539. goto out_mapping_err;
  540. sge->length = len;
  541. sge->lkey = rdmab_lkey(rb);
  542. sc->sc_unmap_count++;
  543. ppages++;
  544. remaining -= len;
  545. page_base = 0;
  546. }
  547. return true;
  548. out_mapping_err:
  549. trace_xprtrdma_dma_maperr(sge->addr);
  550. return false;
  551. }
  552. /* The tail iovec may include an XDR pad for the page list,
  553. * as well as additional content, and may not reside in the
  554. * same page as the head iovec.
  555. */
  556. static bool rpcrdma_prepare_tail_iov(struct rpcrdma_req *req,
  557. struct xdr_buf *xdr,
  558. unsigned int page_base, unsigned int len)
  559. {
  560. struct rpcrdma_sendctx *sc = req->rl_sendctx;
  561. struct ib_sge *sge = &sc->sc_sges[req->rl_wr.num_sge++];
  562. struct rpcrdma_regbuf *rb = req->rl_sendbuf;
  563. struct page *page = virt_to_page(xdr->tail[0].iov_base);
  564. sge->addr = ib_dma_map_page(rdmab_device(rb), page, page_base, len,
  565. DMA_TO_DEVICE);
  566. if (ib_dma_mapping_error(rdmab_device(rb), sge->addr))
  567. goto out_mapping_err;
  568. sge->length = len;
  569. sge->lkey = rdmab_lkey(rb);
  570. ++sc->sc_unmap_count;
  571. return true;
  572. out_mapping_err:
  573. trace_xprtrdma_dma_maperr(sge->addr);
  574. return false;
  575. }
  576. /* Copy the tail to the end of the head buffer.
  577. */
  578. static void rpcrdma_pullup_tail_iov(struct rpcrdma_xprt *r_xprt,
  579. struct rpcrdma_req *req,
  580. struct xdr_buf *xdr)
  581. {
  582. unsigned char *dst;
  583. dst = (unsigned char *)xdr->head[0].iov_base;
  584. dst += xdr->head[0].iov_len + xdr->page_len;
  585. memmove(dst, xdr->tail[0].iov_base, xdr->tail[0].iov_len);
  586. r_xprt->rx_stats.pullup_copy_count += xdr->tail[0].iov_len;
  587. }
  588. /* Copy pagelist content into the head buffer.
  589. */
  590. static void rpcrdma_pullup_pagelist(struct rpcrdma_xprt *r_xprt,
  591. struct rpcrdma_req *req,
  592. struct xdr_buf *xdr)
  593. {
  594. unsigned int len, page_base, remaining;
  595. struct page **ppages;
  596. unsigned char *src, *dst;
  597. dst = (unsigned char *)xdr->head[0].iov_base;
  598. dst += xdr->head[0].iov_len;
  599. ppages = xdr->pages + (xdr->page_base >> PAGE_SHIFT);
  600. page_base = offset_in_page(xdr->page_base);
  601. remaining = xdr->page_len;
  602. while (remaining) {
  603. src = page_address(*ppages);
  604. src += page_base;
  605. len = min_t(unsigned int, PAGE_SIZE - page_base, remaining);
  606. memcpy(dst, src, len);
  607. r_xprt->rx_stats.pullup_copy_count += len;
  608. ppages++;
  609. dst += len;
  610. remaining -= len;
  611. page_base = 0;
  612. }
  613. }
  614. /* Copy the contents of @xdr into @rl_sendbuf and DMA sync it.
  615. * When the head, pagelist, and tail are small, a pull-up copy
  616. * is considerably less costly than DMA mapping the components
  617. * of @xdr.
  618. *
  619. * Assumptions:
  620. * - the caller has already verified that the total length
  621. * of the RPC Call body will fit into @rl_sendbuf.
  622. */
  623. static bool rpcrdma_prepare_noch_pullup(struct rpcrdma_xprt *r_xprt,
  624. struct rpcrdma_req *req,
  625. struct xdr_buf *xdr)
  626. {
  627. if (unlikely(xdr->tail[0].iov_len))
  628. rpcrdma_pullup_tail_iov(r_xprt, req, xdr);
  629. if (unlikely(xdr->page_len))
  630. rpcrdma_pullup_pagelist(r_xprt, req, xdr);
  631. /* The whole RPC message resides in the head iovec now */
  632. return rpcrdma_prepare_head_iov(r_xprt, req, xdr->len);
  633. }
  634. static bool rpcrdma_prepare_noch_mapped(struct rpcrdma_xprt *r_xprt,
  635. struct rpcrdma_req *req,
  636. struct xdr_buf *xdr)
  637. {
  638. struct kvec *tail = &xdr->tail[0];
  639. if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
  640. return false;
  641. if (xdr->page_len)
  642. if (!rpcrdma_prepare_pagelist(req, xdr))
  643. return false;
  644. if (tail->iov_len)
  645. if (!rpcrdma_prepare_tail_iov(req, xdr,
  646. offset_in_page(tail->iov_base),
  647. tail->iov_len))
  648. return false;
  649. if (req->rl_sendctx->sc_unmap_count)
  650. kref_get(&req->rl_kref);
  651. return true;
  652. }
  653. static bool rpcrdma_prepare_readch(struct rpcrdma_xprt *r_xprt,
  654. struct rpcrdma_req *req,
  655. struct xdr_buf *xdr)
  656. {
  657. if (!rpcrdma_prepare_head_iov(r_xprt, req, xdr->head[0].iov_len))
  658. return false;
  659. /* If there is a Read chunk, the page list is being handled
  660. * via explicit RDMA, and thus is skipped here.
  661. */
  662. /* Do not include the tail if it is only an XDR pad */
  663. if (xdr->tail[0].iov_len > 3) {
  664. unsigned int page_base, len;
  665. /* If the content in the page list is an odd length,
  666. * xdr_write_pages() adds a pad at the beginning of
  667. * the tail iovec. Force the tail's non-pad content to
  668. * land at the next XDR position in the Send message.
  669. */
  670. page_base = offset_in_page(xdr->tail[0].iov_base);
  671. len = xdr->tail[0].iov_len;
  672. page_base += len & 3;
  673. len -= len & 3;
  674. if (!rpcrdma_prepare_tail_iov(req, xdr, page_base, len))
  675. return false;
  676. kref_get(&req->rl_kref);
  677. }
  678. return true;
  679. }
  680. /**
  681. * rpcrdma_prepare_send_sges - Construct SGEs for a Send WR
  682. * @r_xprt: controlling transport
  683. * @req: context of RPC Call being marshalled
  684. * @hdrlen: size of transport header, in bytes
  685. * @xdr: xdr_buf containing RPC Call
  686. * @rtype: chunk type being encoded
  687. *
  688. * Returns 0 on success; otherwise a negative errno is returned.
  689. */
  690. inline int rpcrdma_prepare_send_sges(struct rpcrdma_xprt *r_xprt,
  691. struct rpcrdma_req *req, u32 hdrlen,
  692. struct xdr_buf *xdr,
  693. enum rpcrdma_chunktype rtype)
  694. {
  695. int ret;
  696. ret = -EAGAIN;
  697. req->rl_sendctx = rpcrdma_sendctx_get_locked(r_xprt);
  698. if (!req->rl_sendctx)
  699. goto out_nosc;
  700. req->rl_sendctx->sc_unmap_count = 0;
  701. req->rl_sendctx->sc_req = req;
  702. kref_init(&req->rl_kref);
  703. req->rl_wr.wr_cqe = &req->rl_sendctx->sc_cqe;
  704. req->rl_wr.sg_list = req->rl_sendctx->sc_sges;
  705. req->rl_wr.num_sge = 0;
  706. req->rl_wr.opcode = IB_WR_SEND;
  707. rpcrdma_prepare_hdr_sge(r_xprt, req, hdrlen);
  708. ret = -EIO;
  709. switch (rtype) {
  710. case rpcrdma_noch_pullup:
  711. if (!rpcrdma_prepare_noch_pullup(r_xprt, req, xdr))
  712. goto out_unmap;
  713. break;
  714. case rpcrdma_noch_mapped:
  715. if (!rpcrdma_prepare_noch_mapped(r_xprt, req, xdr))
  716. goto out_unmap;
  717. break;
  718. case rpcrdma_readch:
  719. if (!rpcrdma_prepare_readch(r_xprt, req, xdr))
  720. goto out_unmap;
  721. break;
  722. case rpcrdma_areadch:
  723. break;
  724. default:
  725. goto out_unmap;
  726. }
  727. return 0;
  728. out_unmap:
  729. rpcrdma_sendctx_unmap(req->rl_sendctx);
  730. out_nosc:
  731. trace_xprtrdma_prepsend_failed(&req->rl_slot, ret);
  732. return ret;
  733. }
  734. /**
  735. * rpcrdma_marshal_req - Marshal and send one RPC request
  736. * @r_xprt: controlling transport
  737. * @rqst: RPC request to be marshaled
  738. *
  739. * For the RPC in "rqst", this function:
  740. * - Chooses the transfer mode (eg., RDMA_MSG or RDMA_NOMSG)
  741. * - Registers Read, Write, and Reply chunks
  742. * - Constructs the transport header
  743. * - Posts a Send WR to send the transport header and request
  744. *
  745. * Returns:
  746. * %0 if the RPC was sent successfully,
  747. * %-ENOTCONN if the connection was lost,
  748. * %-EAGAIN if the caller should call again with the same arguments,
  749. * %-ENOBUFS if the caller should call again after a delay,
  750. * %-EMSGSIZE if the transport header is too small,
  751. * %-EIO if a permanent problem occurred while marshaling.
  752. */
  753. int
  754. rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst)
  755. {
  756. struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
  757. struct xdr_stream *xdr = &req->rl_stream;
  758. enum rpcrdma_chunktype rtype, wtype;
  759. struct xdr_buf *buf = &rqst->rq_snd_buf;
  760. bool ddp_allowed;
  761. __be32 *p;
  762. int ret;
  763. rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
  764. xdr_init_encode(xdr, &req->rl_hdrbuf, rdmab_data(req->rl_rdmabuf),
  765. rqst);
  766. /* Fixed header fields */
  767. ret = -EMSGSIZE;
  768. p = xdr_reserve_space(xdr, 4 * sizeof(*p));
  769. if (!p)
  770. goto out_err;
  771. *p++ = rqst->rq_xid;
  772. *p++ = rpcrdma_version;
  773. *p++ = r_xprt->rx_buf.rb_max_requests;
  774. /* When the ULP employs a GSS flavor that guarantees integrity
  775. * or privacy, direct data placement of individual data items
  776. * is not allowed.
  777. */
  778. ddp_allowed = !(rqst->rq_cred->cr_auth->au_flags &
  779. RPCAUTH_AUTH_DATATOUCH);
  780. /*
  781. * Chunks needed for results?
  782. *
  783. * o If the expected result is under the inline threshold, all ops
  784. * return as inline.
  785. * o Large read ops return data as write chunk(s), header as
  786. * inline.
  787. * o Large non-read ops return as a single reply chunk.
  788. */
  789. if (rpcrdma_results_inline(r_xprt, rqst))
  790. wtype = rpcrdma_noch;
  791. else if ((ddp_allowed && rqst->rq_rcv_buf.flags & XDRBUF_READ) &&
  792. rpcrdma_nonpayload_inline(r_xprt, rqst))
  793. wtype = rpcrdma_writech;
  794. else
  795. wtype = rpcrdma_replych;
  796. /*
  797. * Chunks needed for arguments?
  798. *
  799. * o If the total request is under the inline threshold, all ops
  800. * are sent as inline.
  801. * o Large write ops transmit data as read chunk(s), header as
  802. * inline.
  803. * o Large non-write ops are sent with the entire message as a
  804. * single read chunk (protocol 0-position special case).
  805. *
  806. * This assumes that the upper layer does not present a request
  807. * that both has a data payload, and whose non-data arguments
  808. * by themselves are larger than the inline threshold.
  809. */
  810. if (rpcrdma_args_inline(r_xprt, rqst)) {
  811. *p++ = rdma_msg;
  812. rtype = buf->len < rdmab_length(req->rl_sendbuf) ?
  813. rpcrdma_noch_pullup : rpcrdma_noch_mapped;
  814. } else if (ddp_allowed && buf->flags & XDRBUF_WRITE) {
  815. *p++ = rdma_msg;
  816. rtype = rpcrdma_readch;
  817. } else {
  818. r_xprt->rx_stats.nomsg_call_count++;
  819. *p++ = rdma_nomsg;
  820. rtype = rpcrdma_areadch;
  821. }
  822. /* This implementation supports the following combinations
  823. * of chunk lists in one RPC-over-RDMA Call message:
  824. *
  825. * - Read list
  826. * - Write list
  827. * - Reply chunk
  828. * - Read list + Reply chunk
  829. *
  830. * It might not yet support the following combinations:
  831. *
  832. * - Read list + Write list
  833. *
  834. * It does not support the following combinations:
  835. *
  836. * - Write list + Reply chunk
  837. * - Read list + Write list + Reply chunk
  838. *
  839. * This implementation supports only a single chunk in each
  840. * Read or Write list. Thus for example the client cannot
  841. * send a Call message with a Position Zero Read chunk and a
  842. * regular Read chunk at the same time.
  843. */
  844. ret = rpcrdma_encode_read_list(r_xprt, req, rqst, rtype);
  845. if (ret)
  846. goto out_err;
  847. ret = rpcrdma_encode_write_list(r_xprt, req, rqst, wtype);
  848. if (ret)
  849. goto out_err;
  850. ret = rpcrdma_encode_reply_chunk(r_xprt, req, rqst, wtype);
  851. if (ret)
  852. goto out_err;
  853. ret = rpcrdma_prepare_send_sges(r_xprt, req, req->rl_hdrbuf.len,
  854. buf, rtype);
  855. if (ret)
  856. goto out_err;
  857. trace_xprtrdma_marshal(req, rtype, wtype);
  858. return 0;
  859. out_err:
  860. trace_xprtrdma_marshal_failed(rqst, ret);
  861. r_xprt->rx_stats.failed_marshal_count++;
  862. frwr_reset(req);
  863. return ret;
  864. }
  865. static void __rpcrdma_update_cwnd_locked(struct rpc_xprt *xprt,
  866. struct rpcrdma_buffer *buf,
  867. u32 grant)
  868. {
  869. buf->rb_credits = grant;
  870. xprt->cwnd = grant << RPC_CWNDSHIFT;
  871. }
  872. static void rpcrdma_update_cwnd(struct rpcrdma_xprt *r_xprt, u32 grant)
  873. {
  874. struct rpc_xprt *xprt = &r_xprt->rx_xprt;
  875. spin_lock(&xprt->transport_lock);
  876. __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, grant);
  877. spin_unlock(&xprt->transport_lock);
  878. }
  879. /**
  880. * rpcrdma_reset_cwnd - Reset the xprt's congestion window
  881. * @r_xprt: controlling transport instance
  882. *
  883. * Prepare @r_xprt for the next connection by reinitializing
  884. * its credit grant to one (see RFC 8166, Section 3.3.3).
  885. */
  886. void rpcrdma_reset_cwnd(struct rpcrdma_xprt *r_xprt)
  887. {
  888. struct rpc_xprt *xprt = &r_xprt->rx_xprt;
  889. spin_lock(&xprt->transport_lock);
  890. xprt->cong = 0;
  891. __rpcrdma_update_cwnd_locked(xprt, &r_xprt->rx_buf, 1);
  892. spin_unlock(&xprt->transport_lock);
  893. }
  894. /**
  895. * rpcrdma_inline_fixup - Scatter inline received data into rqst's iovecs
  896. * @rqst: controlling RPC request
  897. * @srcp: points to RPC message payload in receive buffer
  898. * @copy_len: remaining length of receive buffer content
  899. * @pad: Write chunk pad bytes needed (zero for pure inline)
  900. *
  901. * The upper layer has set the maximum number of bytes it can
  902. * receive in each component of rq_rcv_buf. These values are set in
  903. * the head.iov_len, page_len, tail.iov_len, and buflen fields.
  904. *
  905. * Unlike the TCP equivalent (xdr_partial_copy_from_skb), in
  906. * many cases this function simply updates iov_base pointers in
  907. * rq_rcv_buf to point directly to the received reply data, to
  908. * avoid copying reply data.
  909. *
  910. * Returns the count of bytes which had to be memcopied.
  911. */
  912. static unsigned long
  913. rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
  914. {
  915. unsigned long fixup_copy_count;
  916. int i, npages, curlen;
  917. char *destp;
  918. struct page **ppages;
  919. int page_base;
  920. /* The head iovec is redirected to the RPC reply message
  921. * in the receive buffer, to avoid a memcopy.
  922. */
  923. rqst->rq_rcv_buf.head[0].iov_base = srcp;
  924. rqst->rq_private_buf.head[0].iov_base = srcp;
  925. /* The contents of the receive buffer that follow
  926. * head.iov_len bytes are copied into the page list.
  927. */
  928. curlen = rqst->rq_rcv_buf.head[0].iov_len;
  929. if (curlen > copy_len)
  930. curlen = copy_len;
  931. srcp += curlen;
  932. copy_len -= curlen;
  933. ppages = rqst->rq_rcv_buf.pages +
  934. (rqst->rq_rcv_buf.page_base >> PAGE_SHIFT);
  935. page_base = offset_in_page(rqst->rq_rcv_buf.page_base);
  936. fixup_copy_count = 0;
  937. if (copy_len && rqst->rq_rcv_buf.page_len) {
  938. int pagelist_len;
  939. pagelist_len = rqst->rq_rcv_buf.page_len;
  940. if (pagelist_len > copy_len)
  941. pagelist_len = copy_len;
  942. npages = PAGE_ALIGN(page_base + pagelist_len) >> PAGE_SHIFT;
  943. for (i = 0; i < npages; i++) {
  944. curlen = PAGE_SIZE - page_base;
  945. if (curlen > pagelist_len)
  946. curlen = pagelist_len;
  947. destp = kmap_atomic(ppages[i]);
  948. memcpy(destp + page_base, srcp, curlen);
  949. flush_dcache_page(ppages[i]);
  950. kunmap_atomic(destp);
  951. srcp += curlen;
  952. copy_len -= curlen;
  953. fixup_copy_count += curlen;
  954. pagelist_len -= curlen;
  955. if (!pagelist_len)
  956. break;
  957. page_base = 0;
  958. }
  959. /* Implicit padding for the last segment in a Write
  960. * chunk is inserted inline at the front of the tail
  961. * iovec. The upper layer ignores the content of
  962. * the pad. Simply ensure inline content in the tail
  963. * that follows the Write chunk is properly aligned.
  964. */
  965. if (pad)
  966. srcp -= pad;
  967. }
  968. /* The tail iovec is redirected to the remaining data
  969. * in the receive buffer, to avoid a memcopy.
  970. */
  971. if (copy_len || pad) {
  972. rqst->rq_rcv_buf.tail[0].iov_base = srcp;
  973. rqst->rq_private_buf.tail[0].iov_base = srcp;
  974. }
  975. if (fixup_copy_count)
  976. trace_xprtrdma_fixup(rqst, fixup_copy_count);
  977. return fixup_copy_count;
  978. }
  979. /* By convention, backchannel calls arrive via rdma_msg type
  980. * messages, and never populate the chunk lists. This makes
  981. * the RPC/RDMA header small and fixed in size, so it is
  982. * straightforward to check the RPC header's direction field.
  983. */
  984. static bool
  985. rpcrdma_is_bcall(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
  986. #if defined(CONFIG_SUNRPC_BACKCHANNEL)
  987. {
  988. struct xdr_stream *xdr = &rep->rr_stream;
  989. __be32 *p;
  990. if (rep->rr_proc != rdma_msg)
  991. return false;
  992. /* Peek at stream contents without advancing. */
  993. p = xdr_inline_decode(xdr, 0);
  994. /* Chunk lists */
  995. if (*p++ != xdr_zero)
  996. return false;
  997. if (*p++ != xdr_zero)
  998. return false;
  999. if (*p++ != xdr_zero)
  1000. return false;
  1001. /* RPC header */
  1002. if (*p++ != rep->rr_xid)
  1003. return false;
  1004. if (*p != cpu_to_be32(RPC_CALL))
  1005. return false;
  1006. /* Now that we are sure this is a backchannel call,
  1007. * advance to the RPC header.
  1008. */
  1009. p = xdr_inline_decode(xdr, 3 * sizeof(*p));
  1010. if (unlikely(!p))
  1011. goto out_short;
  1012. rpcrdma_bc_receive_call(r_xprt, rep);
  1013. return true;
  1014. out_short:
  1015. pr_warn("RPC/RDMA short backward direction call\n");
  1016. return true;
  1017. }
  1018. #else /* CONFIG_SUNRPC_BACKCHANNEL */
  1019. {
  1020. return false;
  1021. }
  1022. #endif /* CONFIG_SUNRPC_BACKCHANNEL */
  1023. static int decode_rdma_segment(struct xdr_stream *xdr, u32 *length)
  1024. {
  1025. u32 handle;
  1026. u64 offset;
  1027. __be32 *p;
  1028. p = xdr_inline_decode(xdr, 4 * sizeof(*p));
  1029. if (unlikely(!p))
  1030. return -EIO;
  1031. handle = be32_to_cpup(p++);
  1032. *length = be32_to_cpup(p++);
  1033. xdr_decode_hyper(p, &offset);
  1034. trace_xprtrdma_decode_seg(handle, *length, offset);
  1035. return 0;
  1036. }
  1037. static int decode_write_chunk(struct xdr_stream *xdr, u32 *length)
  1038. {
  1039. u32 segcount, seglength;
  1040. __be32 *p;
  1041. p = xdr_inline_decode(xdr, sizeof(*p));
  1042. if (unlikely(!p))
  1043. return -EIO;
  1044. *length = 0;
  1045. segcount = be32_to_cpup(p);
  1046. while (segcount--) {
  1047. if (decode_rdma_segment(xdr, &seglength))
  1048. return -EIO;
  1049. *length += seglength;
  1050. }
  1051. return 0;
  1052. }
  1053. /* In RPC-over-RDMA Version One replies, a Read list is never
  1054. * expected. This decoder is a stub that returns an error if
  1055. * a Read list is present.
  1056. */
  1057. static int decode_read_list(struct xdr_stream *xdr)
  1058. {
  1059. __be32 *p;
  1060. p = xdr_inline_decode(xdr, sizeof(*p));
  1061. if (unlikely(!p))
  1062. return -EIO;
  1063. if (unlikely(*p != xdr_zero))
  1064. return -EIO;
  1065. return 0;
  1066. }
  1067. /* Supports only one Write chunk in the Write list
  1068. */
  1069. static int decode_write_list(struct xdr_stream *xdr, u32 *length)
  1070. {
  1071. u32 chunklen;
  1072. bool first;
  1073. __be32 *p;
  1074. *length = 0;
  1075. first = true;
  1076. do {
  1077. p = xdr_inline_decode(xdr, sizeof(*p));
  1078. if (unlikely(!p))
  1079. return -EIO;
  1080. if (*p == xdr_zero)
  1081. break;
  1082. if (!first)
  1083. return -EIO;
  1084. if (decode_write_chunk(xdr, &chunklen))
  1085. return -EIO;
  1086. *length += chunklen;
  1087. first = false;
  1088. } while (true);
  1089. return 0;
  1090. }
  1091. static int decode_reply_chunk(struct xdr_stream *xdr, u32 *length)
  1092. {
  1093. __be32 *p;
  1094. p = xdr_inline_decode(xdr, sizeof(*p));
  1095. if (unlikely(!p))
  1096. return -EIO;
  1097. *length = 0;
  1098. if (*p != xdr_zero)
  1099. if (decode_write_chunk(xdr, length))
  1100. return -EIO;
  1101. return 0;
  1102. }
  1103. static int
  1104. rpcrdma_decode_msg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
  1105. struct rpc_rqst *rqst)
  1106. {
  1107. struct xdr_stream *xdr = &rep->rr_stream;
  1108. u32 writelist, replychunk, rpclen;
  1109. char *base;
  1110. /* Decode the chunk lists */
  1111. if (decode_read_list(xdr))
  1112. return -EIO;
  1113. if (decode_write_list(xdr, &writelist))
  1114. return -EIO;
  1115. if (decode_reply_chunk(xdr, &replychunk))
  1116. return -EIO;
  1117. /* RDMA_MSG sanity checks */
  1118. if (unlikely(replychunk))
  1119. return -EIO;
  1120. /* Build the RPC reply's Payload stream in rqst->rq_rcv_buf */
  1121. base = (char *)xdr_inline_decode(xdr, 0);
  1122. rpclen = xdr_stream_remaining(xdr);
  1123. r_xprt->rx_stats.fixup_copy_count +=
  1124. rpcrdma_inline_fixup(rqst, base, rpclen, writelist & 3);
  1125. r_xprt->rx_stats.total_rdma_reply += writelist;
  1126. return rpclen + xdr_align_size(writelist);
  1127. }
  1128. static noinline int
  1129. rpcrdma_decode_nomsg(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep)
  1130. {
  1131. struct xdr_stream *xdr = &rep->rr_stream;
  1132. u32 writelist, replychunk;
  1133. /* Decode the chunk lists */
  1134. if (decode_read_list(xdr))
  1135. return -EIO;
  1136. if (decode_write_list(xdr, &writelist))
  1137. return -EIO;
  1138. if (decode_reply_chunk(xdr, &replychunk))
  1139. return -EIO;
  1140. /* RDMA_NOMSG sanity checks */
  1141. if (unlikely(writelist))
  1142. return -EIO;
  1143. if (unlikely(!replychunk))
  1144. return -EIO;
  1145. /* Reply chunk buffer already is the reply vector */
  1146. r_xprt->rx_stats.total_rdma_reply += replychunk;
  1147. return replychunk;
  1148. }
  1149. static noinline int
  1150. rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
  1151. struct rpc_rqst *rqst)
  1152. {
  1153. struct xdr_stream *xdr = &rep->rr_stream;
  1154. __be32 *p;
  1155. p = xdr_inline_decode(xdr, sizeof(*p));
  1156. if (unlikely(!p))
  1157. return -EIO;
  1158. switch (*p) {
  1159. case err_vers:
  1160. p = xdr_inline_decode(xdr, 2 * sizeof(*p));
  1161. if (!p)
  1162. break;
  1163. dprintk("RPC: %s: server reports "
  1164. "version error (%u-%u), xid %08x\n", __func__,
  1165. be32_to_cpup(p), be32_to_cpu(*(p + 1)),
  1166. be32_to_cpu(rep->rr_xid));
  1167. break;
  1168. case err_chunk:
  1169. dprintk("RPC: %s: server reports "
  1170. "header decoding error, xid %08x\n", __func__,
  1171. be32_to_cpu(rep->rr_xid));
  1172. break;
  1173. default:
  1174. dprintk("RPC: %s: server reports "
  1175. "unrecognized error %d, xid %08x\n", __func__,
  1176. be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
  1177. }
  1178. r_xprt->rx_stats.bad_reply_count++;
  1179. return -EREMOTEIO;
  1180. }
  1181. /* Perform XID lookup, reconstruction of the RPC reply, and
  1182. * RPC completion while holding the transport lock to ensure
  1183. * the rep, rqst, and rq_task pointers remain stable.
  1184. */
  1185. void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
  1186. {
  1187. struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
  1188. struct rpc_xprt *xprt = &r_xprt->rx_xprt;
  1189. struct rpc_rqst *rqst = rep->rr_rqst;
  1190. int status;
  1191. switch (rep->rr_proc) {
  1192. case rdma_msg:
  1193. status = rpcrdma_decode_msg(r_xprt, rep, rqst);
  1194. break;
  1195. case rdma_nomsg:
  1196. status = rpcrdma_decode_nomsg(r_xprt, rep);
  1197. break;
  1198. case rdma_error:
  1199. status = rpcrdma_decode_error(r_xprt, rep, rqst);
  1200. break;
  1201. default:
  1202. status = -EIO;
  1203. }
  1204. if (status < 0)
  1205. goto out_badheader;
  1206. out:
  1207. spin_lock(&xprt->queue_lock);
  1208. xprt_complete_rqst(rqst->rq_task, status);
  1209. xprt_unpin_rqst(rqst);
  1210. spin_unlock(&xprt->queue_lock);
  1211. return;
  1212. /* If the incoming reply terminated a pending RPC, the next
  1213. * RPC call will post a replacement receive buffer as it is
  1214. * being marshaled.
  1215. */
  1216. out_badheader:
  1217. trace_xprtrdma_reply_hdr(rep);
  1218. r_xprt->rx_stats.bad_reply_count++;
  1219. goto out;
  1220. }
  1221. static void rpcrdma_reply_done(struct kref *kref)
  1222. {
  1223. struct rpcrdma_req *req =
  1224. container_of(kref, struct rpcrdma_req, rl_kref);
  1225. rpcrdma_complete_rqst(req->rl_reply);
  1226. }
  1227. /**
  1228. * rpcrdma_reply_handler - Process received RPC/RDMA messages
  1229. * @rep: Incoming rpcrdma_rep object to process
  1230. *
  1231. * Errors must result in the RPC task either being awakened, or
  1232. * allowed to timeout, to discover the errors at that time.
  1233. */
  1234. void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
  1235. {
  1236. struct rpcrdma_xprt *r_xprt = rep->rr_rxprt;
  1237. struct rpc_xprt *xprt = &r_xprt->rx_xprt;
  1238. struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
  1239. struct rpcrdma_req *req;
  1240. struct rpc_rqst *rqst;
  1241. u32 credits;
  1242. __be32 *p;
  1243. /* Any data means we had a useful conversation, so
  1244. * then we don't need to delay the next reconnect.
  1245. */
  1246. if (xprt->reestablish_timeout)
  1247. xprt->reestablish_timeout = 0;
  1248. /* Fixed transport header fields */
  1249. xdr_init_decode(&rep->rr_stream, &rep->rr_hdrbuf,
  1250. rep->rr_hdrbuf.head[0].iov_base, NULL);
  1251. p = xdr_inline_decode(&rep->rr_stream, 4 * sizeof(*p));
  1252. if (unlikely(!p))
  1253. goto out_shortreply;
  1254. rep->rr_xid = *p++;
  1255. rep->rr_vers = *p++;
  1256. credits = be32_to_cpu(*p++);
  1257. rep->rr_proc = *p++;
  1258. if (rep->rr_vers != rpcrdma_version)
  1259. goto out_badversion;
  1260. if (rpcrdma_is_bcall(r_xprt, rep))
  1261. return;
  1262. /* Match incoming rpcrdma_rep to an rpcrdma_req to
  1263. * get context for handling any incoming chunks.
  1264. */
  1265. spin_lock(&xprt->queue_lock);
  1266. rqst = xprt_lookup_rqst(xprt, rep->rr_xid);
  1267. if (!rqst)
  1268. goto out_norqst;
  1269. xprt_pin_rqst(rqst);
  1270. spin_unlock(&xprt->queue_lock);
  1271. if (credits == 0)
  1272. credits = 1; /* don't deadlock */
  1273. else if (credits > r_xprt->rx_ep->re_max_requests)
  1274. credits = r_xprt->rx_ep->re_max_requests;
  1275. if (buf->rb_credits != credits)
  1276. rpcrdma_update_cwnd(r_xprt, credits);
  1277. rpcrdma_post_recvs(r_xprt, false);
  1278. req = rpcr_to_rdmar(rqst);
  1279. if (req->rl_reply) {
  1280. trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
  1281. rpcrdma_recv_buffer_put(req->rl_reply);
  1282. }
  1283. req->rl_reply = rep;
  1284. rep->rr_rqst = rqst;
  1285. trace_xprtrdma_reply(rqst->rq_task, rep, req, credits);
  1286. if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
  1287. frwr_reminv(rep, &req->rl_registered);
  1288. if (!list_empty(&req->rl_registered))
  1289. frwr_unmap_async(r_xprt, req);
  1290. /* LocalInv completion will complete the RPC */
  1291. else
  1292. kref_put(&req->rl_kref, rpcrdma_reply_done);
  1293. return;
  1294. out_badversion:
  1295. trace_xprtrdma_reply_vers(rep);
  1296. goto out;
  1297. out_norqst:
  1298. spin_unlock(&xprt->queue_lock);
  1299. trace_xprtrdma_reply_rqst(rep);
  1300. goto out;
  1301. out_shortreply:
  1302. trace_xprtrdma_reply_short(rep);
  1303. out:
  1304. rpcrdma_recv_buffer_put(rep);
  1305. }