PageRenderTime 71ms CodeModel.GetById 33ms RepoModel.GetById 1ms app.codeStats 0ms

/openvswitch/lib/netlink-socket.c

https://github.com/kevinfhell/dpdk-ovs
C | 1219 lines | 841 code | 139 blank | 239 comment | 165 complexity | 3913d49b11d2d3228717b73c7d9e9c88 MD5 | raw file
Possible License(s): BSD-3-Clause, GPL-3.0, LGPL-3.0, Apache-2.0, LGPL-2.1, GPL-2.0, MIT
  1. /*
  2. * Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013 Nicira, Inc.
  3. *
  4. * Licensed under the Apache License, Version 2.0 (the "License");
  5. * you may not use this file except in compliance with the License.
  6. * You may obtain a copy of the License at:
  7. *
  8. * http://www.apache.org/licenses/LICENSE-2.0
  9. *
  10. * Unless required by applicable law or agreed to in writing, software
  11. * distributed under the License is distributed on an "AS IS" BASIS,
  12. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. * See the License for the specific language governing permissions and
  14. * limitations under the License.
  15. */
  16. #include <config.h>
  17. #include "netlink-socket.h"
  18. #include <errno.h>
  19. #include <inttypes.h>
  20. #include <stdlib.h>
  21. #include <sys/types.h>
  22. #include <sys/uio.h>
  23. #include <unistd.h>
  24. #include "coverage.h"
  25. #include "dynamic-string.h"
  26. #include "hash.h"
  27. #include "hmap.h"
  28. #include "netlink.h"
  29. #include "netlink-protocol.h"
  30. #include "ofpbuf.h"
  31. #include "ovs-thread.h"
  32. #include "poll-loop.h"
  33. #include "socket-util.h"
  34. #include "util.h"
  35. #include "vlog.h"
  36. VLOG_DEFINE_THIS_MODULE(netlink_socket);
  37. COVERAGE_DEFINE(netlink_overflow);
  38. COVERAGE_DEFINE(netlink_received);
  39. COVERAGE_DEFINE(netlink_recv_jumbo);
  40. COVERAGE_DEFINE(netlink_send);
  41. COVERAGE_DEFINE(netlink_sent);
  42. /* Linux header file confusion causes this to be undefined. */
  43. #ifndef SOL_NETLINK
  44. #define SOL_NETLINK 270
  45. #endif
  46. /* A single (bad) Netlink message can in theory dump out many, many log
  47. * messages, so the burst size is set quite high here to avoid missing useful
  48. * information. Also, at high logging levels we log *all* Netlink messages. */
  49. static struct vlog_rate_limit rl = VLOG_RATE_LIMIT_INIT(60, 600);
  50. static uint32_t nl_sock_allocate_seq(struct nl_sock *, unsigned int n);
  51. static void log_nlmsg(const char *function, int error,
  52. const void *message, size_t size, int protocol);
  53. /* Netlink sockets. */
  54. struct nl_sock {
  55. int fd;
  56. uint32_t next_seq;
  57. uint32_t pid;
  58. int protocol;
  59. unsigned int rcvbuf; /* Receive buffer size (SO_RCVBUF). */
  60. };
  61. /* Compile-time limit on iovecs, so that we can allocate a maximum-size array
  62. * of iovecs on the stack. */
  63. #define MAX_IOVS 128
  64. /* Maximum number of iovecs that may be passed to sendmsg, capped at a
  65. * minimum of _XOPEN_IOV_MAX (16) and a maximum of MAX_IOVS.
  66. *
  67. * Initialized by nl_sock_create(). */
  68. static int max_iovs;
  69. static int nl_pool_alloc(int protocol, struct nl_sock **sockp);
  70. static void nl_pool_release(struct nl_sock *);
  71. /* Creates a new netlink socket for the given netlink 'protocol'
  72. * (NETLINK_ROUTE, NETLINK_GENERIC, ...). Returns 0 and sets '*sockp' to the
  73. * new socket if successful, otherwise returns a positive errno value. */
  74. int
  75. nl_sock_create(int protocol, struct nl_sock **sockp)
  76. {
  77. static struct ovsthread_once once = OVSTHREAD_ONCE_INITIALIZER;
  78. struct nl_sock *sock;
  79. struct sockaddr_nl local, remote;
  80. socklen_t local_size;
  81. int rcvbuf;
  82. int retval = 0;
  83. if (ovsthread_once_start(&once)) {
  84. int save_errno = errno;
  85. errno = 0;
  86. max_iovs = sysconf(_SC_UIO_MAXIOV);
  87. if (max_iovs < _XOPEN_IOV_MAX) {
  88. if (max_iovs == -1 && errno) {
  89. VLOG_WARN("sysconf(_SC_UIO_MAXIOV): %s", ovs_strerror(errno));
  90. }
  91. max_iovs = _XOPEN_IOV_MAX;
  92. } else if (max_iovs > MAX_IOVS) {
  93. max_iovs = MAX_IOVS;
  94. }
  95. errno = save_errno;
  96. ovsthread_once_done(&once);
  97. }
  98. *sockp = NULL;
  99. sock = xmalloc(sizeof *sock);
  100. sock->fd = socket(AF_NETLINK, SOCK_RAW, protocol);
  101. if (sock->fd < 0) {
  102. VLOG_ERR("fcntl: %s", ovs_strerror(errno));
  103. goto error;
  104. }
  105. sock->protocol = protocol;
  106. sock->next_seq = 1;
  107. rcvbuf = 1024 * 1024;
  108. if (setsockopt(sock->fd, SOL_SOCKET, SO_RCVBUFFORCE,
  109. &rcvbuf, sizeof rcvbuf)) {
  110. /* Only root can use SO_RCVBUFFORCE. Everyone else gets EPERM.
  111. * Warn only if the failure is therefore unexpected. */
  112. if (errno != EPERM) {
  113. VLOG_WARN_RL(&rl, "setting %d-byte socket receive buffer failed "
  114. "(%s)", rcvbuf, ovs_strerror(errno));
  115. }
  116. }
  117. retval = get_socket_rcvbuf(sock->fd);
  118. if (retval < 0) {
  119. retval = -retval;
  120. goto error;
  121. }
  122. sock->rcvbuf = retval;
  123. /* Connect to kernel (pid 0) as remote address. */
  124. memset(&remote, 0, sizeof remote);
  125. remote.nl_family = AF_NETLINK;
  126. remote.nl_pid = 0;
  127. if (connect(sock->fd, (struct sockaddr *) &remote, sizeof remote) < 0) {
  128. VLOG_ERR("connect(0): %s", ovs_strerror(errno));
  129. goto error;
  130. }
  131. /* Obtain pid assigned by kernel. */
  132. local_size = sizeof local;
  133. if (getsockname(sock->fd, (struct sockaddr *) &local, &local_size) < 0) {
  134. VLOG_ERR("getsockname: %s", ovs_strerror(errno));
  135. goto error;
  136. }
  137. if (local_size < sizeof local || local.nl_family != AF_NETLINK) {
  138. VLOG_ERR("getsockname returned bad Netlink name");
  139. retval = EINVAL;
  140. goto error;
  141. }
  142. sock->pid = local.nl_pid;
  143. *sockp = sock;
  144. return 0;
  145. error:
  146. if (retval == 0) {
  147. retval = errno;
  148. if (retval == 0) {
  149. retval = EINVAL;
  150. }
  151. }
  152. if (sock->fd >= 0) {
  153. close(sock->fd);
  154. }
  155. free(sock);
  156. return retval;
  157. }
  158. /* Creates a new netlink socket for the same protocol as 'src'. Returns 0 and
  159. * sets '*sockp' to the new socket if successful, otherwise returns a positive
  160. * errno value. */
  161. int
  162. nl_sock_clone(const struct nl_sock *src, struct nl_sock **sockp)
  163. {
  164. return nl_sock_create(src->protocol, sockp);
  165. }
  166. /* Destroys netlink socket 'sock'. */
  167. void
  168. nl_sock_destroy(struct nl_sock *sock)
  169. {
  170. if (sock) {
  171. close(sock->fd);
  172. free(sock);
  173. }
  174. }
  175. /* Tries to add 'sock' as a listener for 'multicast_group'. Returns 0 if
  176. * successful, otherwise a positive errno value.
  177. *
  178. * A socket that is subscribed to a multicast group that receives asynchronous
  179. * notifications must not be used for Netlink transactions or dumps, because
  180. * transactions and dumps can cause notifications to be lost.
  181. *
  182. * Multicast group numbers are always positive.
  183. *
  184. * It is not an error to attempt to join a multicast group to which a socket
  185. * already belongs. */
  186. int
  187. nl_sock_join_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
  188. {
  189. if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
  190. &multicast_group, sizeof multicast_group) < 0) {
  191. VLOG_WARN("could not join multicast group %u (%s)",
  192. multicast_group, ovs_strerror(errno));
  193. return errno;
  194. }
  195. return 0;
  196. }
  197. /* Tries to make 'sock' stop listening to 'multicast_group'. Returns 0 if
  198. * successful, otherwise a positive errno value.
  199. *
  200. * Multicast group numbers are always positive.
  201. *
  202. * It is not an error to attempt to leave a multicast group to which a socket
  203. * does not belong.
  204. *
  205. * On success, reading from 'sock' will still return any messages that were
  206. * received on 'multicast_group' before the group was left. */
  207. int
  208. nl_sock_leave_mcgroup(struct nl_sock *sock, unsigned int multicast_group)
  209. {
  210. if (setsockopt(sock->fd, SOL_NETLINK, NETLINK_DROP_MEMBERSHIP,
  211. &multicast_group, sizeof multicast_group) < 0) {
  212. VLOG_WARN("could not leave multicast group %u (%s)",
  213. multicast_group, ovs_strerror(errno));
  214. return errno;
  215. }
  216. return 0;
  217. }
  218. static int
  219. nl_sock_send__(struct nl_sock *sock, const struct ofpbuf *msg,
  220. uint32_t nlmsg_seq, bool wait)
  221. {
  222. struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(msg);
  223. int error;
  224. nlmsg->nlmsg_len = msg->size;
  225. nlmsg->nlmsg_seq = nlmsg_seq;
  226. nlmsg->nlmsg_pid = sock->pid;
  227. do {
  228. int retval;
  229. retval = send(sock->fd, msg->data, msg->size, wait ? 0 : MSG_DONTWAIT);
  230. error = retval < 0 ? errno : 0;
  231. } while (error == EINTR);
  232. log_nlmsg(__func__, error, msg->data, msg->size, sock->protocol);
  233. if (!error) {
  234. COVERAGE_INC(netlink_sent);
  235. }
  236. return error;
  237. }
  238. /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
  239. * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
  240. * will be set to 'sock''s pid, and nlmsg_seq will be initialized to a fresh
  241. * sequence number, before the message is sent.
  242. *
  243. * Returns 0 if successful, otherwise a positive errno value. If
  244. * 'wait' is true, then the send will wait until buffer space is ready;
  245. * otherwise, returns EAGAIN if the 'sock' send buffer is full. */
  246. int
  247. nl_sock_send(struct nl_sock *sock, const struct ofpbuf *msg, bool wait)
  248. {
  249. return nl_sock_send_seq(sock, msg, nl_sock_allocate_seq(sock, 1), wait);
  250. }
  251. /* Tries to send 'msg', which must contain a Netlink message, to the kernel on
  252. * 'sock'. nlmsg_len in 'msg' will be finalized to match msg->size, nlmsg_pid
  253. * will be set to 'sock''s pid, and nlmsg_seq will be initialized to
  254. * 'nlmsg_seq', before the message is sent.
  255. *
  256. * Returns 0 if successful, otherwise a positive errno value. If
  257. * 'wait' is true, then the send will wait until buffer space is ready;
  258. * otherwise, returns EAGAIN if the 'sock' send buffer is full.
  259. *
  260. * This function is suitable for sending a reply to a request that was received
  261. * with sequence number 'nlmsg_seq'. Otherwise, use nl_sock_send() instead. */
  262. int
  263. nl_sock_send_seq(struct nl_sock *sock, const struct ofpbuf *msg,
  264. uint32_t nlmsg_seq, bool wait)
  265. {
  266. return nl_sock_send__(sock, msg, nlmsg_seq, wait);
  267. }
  268. static int
  269. nl_sock_recv__(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
  270. {
  271. /* We can't accurately predict the size of the data to be received. The
  272. * caller is supposed to have allocated enough space in 'buf' to handle the
  273. * "typical" case. To handle exceptions, we make available enough space in
  274. * 'tail' to allow Netlink messages to be up to 64 kB long (a reasonable
  275. * figure since that's the maximum length of a Netlink attribute). */
  276. struct nlmsghdr *nlmsghdr;
  277. uint8_t tail[65536];
  278. struct iovec iov[2];
  279. struct msghdr msg;
  280. ssize_t retval;
  281. ovs_assert(buf->allocated >= sizeof *nlmsghdr);
  282. ofpbuf_clear(buf);
  283. iov[0].iov_base = buf->base;
  284. iov[0].iov_len = buf->allocated;
  285. iov[1].iov_base = tail;
  286. iov[1].iov_len = sizeof tail;
  287. memset(&msg, 0, sizeof msg);
  288. msg.msg_iov = iov;
  289. msg.msg_iovlen = 2;
  290. do {
  291. retval = recvmsg(sock->fd, &msg, wait ? 0 : MSG_DONTWAIT);
  292. } while (retval < 0 && errno == EINTR);
  293. if (retval < 0) {
  294. int error = errno;
  295. if (error == ENOBUFS) {
  296. /* Socket receive buffer overflow dropped one or more messages that
  297. * the kernel tried to send to us. */
  298. COVERAGE_INC(netlink_overflow);
  299. }
  300. return error;
  301. }
  302. if (msg.msg_flags & MSG_TRUNC) {
  303. VLOG_ERR_RL(&rl, "truncated message (longer than %zu bytes)",
  304. sizeof tail);
  305. return E2BIG;
  306. }
  307. nlmsghdr = buf->data;
  308. if (retval < sizeof *nlmsghdr
  309. || nlmsghdr->nlmsg_len < sizeof *nlmsghdr
  310. || nlmsghdr->nlmsg_len > retval) {
  311. VLOG_ERR_RL(&rl, "received invalid nlmsg (%zd bytes < %zu)",
  312. retval, sizeof *nlmsghdr);
  313. return EPROTO;
  314. }
  315. buf->size = MIN(retval, buf->allocated);
  316. if (retval > buf->allocated) {
  317. COVERAGE_INC(netlink_recv_jumbo);
  318. ofpbuf_put(buf, tail, retval - buf->allocated);
  319. }
  320. log_nlmsg(__func__, 0, buf->data, buf->size, sock->protocol);
  321. COVERAGE_INC(netlink_received);
  322. return 0;
  323. }
  324. /* Tries to receive a Netlink message from the kernel on 'sock' into 'buf'. If
  325. * 'wait' is true, waits for a message to be ready. Otherwise, fails with
  326. * EAGAIN if the 'sock' receive buffer is empty.
  327. *
  328. * The caller must have initialized 'buf' with an allocation of at least
  329. * NLMSG_HDRLEN bytes. For best performance, the caller should allocate enough
  330. * space for a "typical" message.
  331. *
  332. * On success, returns 0 and replaces 'buf''s previous content by the received
  333. * message. This function expands 'buf''s allocated memory, as necessary, to
  334. * hold the actual size of the received message.
  335. *
  336. * On failure, returns a positive errno value and clears 'buf' to zero length.
  337. * 'buf' retains its previous memory allocation.
  338. *
  339. * Regardless of success or failure, this function resets 'buf''s headroom to
  340. * 0. */
  341. int
  342. nl_sock_recv(struct nl_sock *sock, struct ofpbuf *buf, bool wait)
  343. {
  344. return nl_sock_recv__(sock, buf, wait);
  345. }
  346. static void
  347. nl_sock_record_errors__(struct nl_transaction **transactions, size_t n,
  348. int error)
  349. {
  350. size_t i;
  351. for (i = 0; i < n; i++) {
  352. struct nl_transaction *txn = transactions[i];
  353. txn->error = error;
  354. if (txn->reply) {
  355. ofpbuf_clear(txn->reply);
  356. }
  357. }
  358. }
  359. static int
  360. nl_sock_transact_multiple__(struct nl_sock *sock,
  361. struct nl_transaction **transactions, size_t n,
  362. size_t *done)
  363. {
  364. uint64_t tmp_reply_stub[1024 / 8];
  365. struct nl_transaction tmp_txn;
  366. struct ofpbuf tmp_reply;
  367. uint32_t base_seq;
  368. struct iovec iovs[MAX_IOVS];
  369. struct msghdr msg;
  370. int error;
  371. int i;
  372. base_seq = nl_sock_allocate_seq(sock, n);
  373. *done = 0;
  374. for (i = 0; i < n; i++) {
  375. struct nl_transaction *txn = transactions[i];
  376. struct nlmsghdr *nlmsg = nl_msg_nlmsghdr(txn->request);
  377. nlmsg->nlmsg_len = txn->request->size;
  378. nlmsg->nlmsg_seq = base_seq + i;
  379. nlmsg->nlmsg_pid = sock->pid;
  380. iovs[i].iov_base = txn->request->data;
  381. iovs[i].iov_len = txn->request->size;
  382. }
  383. memset(&msg, 0, sizeof msg);
  384. msg.msg_iov = iovs;
  385. msg.msg_iovlen = n;
  386. do {
  387. error = sendmsg(sock->fd, &msg, 0) < 0 ? errno : 0;
  388. } while (error == EINTR);
  389. for (i = 0; i < n; i++) {
  390. struct nl_transaction *txn = transactions[i];
  391. log_nlmsg(__func__, error, txn->request->data, txn->request->size,
  392. sock->protocol);
  393. }
  394. if (!error) {
  395. COVERAGE_ADD(netlink_sent, n);
  396. }
  397. if (error) {
  398. return error;
  399. }
  400. ofpbuf_use_stub(&tmp_reply, tmp_reply_stub, sizeof tmp_reply_stub);
  401. tmp_txn.request = NULL;
  402. tmp_txn.reply = &tmp_reply;
  403. tmp_txn.error = 0;
  404. while (n > 0) {
  405. struct nl_transaction *buf_txn, *txn;
  406. uint32_t seq;
  407. /* Find a transaction whose buffer we can use for receiving a reply.
  408. * If no such transaction is left, use tmp_txn. */
  409. buf_txn = &tmp_txn;
  410. for (i = 0; i < n; i++) {
  411. if (transactions[i]->reply) {
  412. buf_txn = transactions[i];
  413. break;
  414. }
  415. }
  416. /* Receive a reply. */
  417. error = nl_sock_recv__(sock, buf_txn->reply, false);
  418. if (error) {
  419. if (error == EAGAIN) {
  420. nl_sock_record_errors__(transactions, n, 0);
  421. *done += n;
  422. error = 0;
  423. }
  424. break;
  425. }
  426. /* Match the reply up with a transaction. */
  427. seq = nl_msg_nlmsghdr(buf_txn->reply)->nlmsg_seq;
  428. if (seq < base_seq || seq >= base_seq + n) {
  429. VLOG_DBG_RL(&rl, "ignoring unexpected seq %#"PRIx32, seq);
  430. continue;
  431. }
  432. i = seq - base_seq;
  433. txn = transactions[i];
  434. /* Fill in the results for 'txn'. */
  435. if (nl_msg_nlmsgerr(buf_txn->reply, &txn->error)) {
  436. if (txn->reply) {
  437. ofpbuf_clear(txn->reply);
  438. }
  439. if (txn->error) {
  440. VLOG_DBG_RL(&rl, "received NAK error=%d (%s)",
  441. error, ovs_strerror(txn->error));
  442. }
  443. } else {
  444. txn->error = 0;
  445. if (txn->reply && txn != buf_txn) {
  446. /* Swap buffers. */
  447. struct ofpbuf *reply = buf_txn->reply;
  448. buf_txn->reply = txn->reply;
  449. txn->reply = reply;
  450. }
  451. }
  452. /* Fill in the results for transactions before 'txn'. (We have to do
  453. * this after the results for 'txn' itself because of the buffer swap
  454. * above.) */
  455. nl_sock_record_errors__(transactions, i, 0);
  456. /* Advance. */
  457. *done += i + 1;
  458. transactions += i + 1;
  459. n -= i + 1;
  460. base_seq += i + 1;
  461. }
  462. ofpbuf_uninit(&tmp_reply);
  463. return error;
  464. }
  465. /* Sends the 'request' member of the 'n' transactions in 'transactions' on
  466. * 'sock', in order, and receives responses to all of them. Fills in the
  467. * 'error' member of each transaction with 0 if it was successful, otherwise
  468. * with a positive errno value. If 'reply' is nonnull, then it will be filled
  469. * with the reply if the message receives a detailed reply. In other cases,
  470. * i.e. where the request failed or had no reply beyond an indication of
  471. * success, 'reply' will be cleared if it is nonnull.
  472. *
  473. * The caller is responsible for destroying each request and reply, and the
  474. * transactions array itself.
  475. *
  476. * Before sending each message, this function will finalize nlmsg_len in each
  477. * 'request' to match the ofpbuf's size, set nlmsg_pid to 'sock''s pid, and
  478. * initialize nlmsg_seq.
  479. *
  480. * Bare Netlink is an unreliable transport protocol. This function layers
  481. * reliable delivery and reply semantics on top of bare Netlink. See
  482. * nl_sock_transact() for some caveats.
  483. */
  484. void
  485. nl_sock_transact_multiple(struct nl_sock *sock,
  486. struct nl_transaction **transactions, size_t n)
  487. {
  488. int max_batch_count;
  489. int error;
  490. if (!n) {
  491. return;
  492. }
  493. /* In theory, every request could have a 64 kB reply. But the default and
  494. * maximum socket rcvbuf size with typical Dom0 memory sizes both tend to
  495. * be a bit below 128 kB, so that would only allow a single message in a
  496. * "batch". So we assume that replies average (at most) 4 kB, which allows
  497. * a good deal of batching.
  498. *
  499. * In practice, most of the requests that we batch either have no reply at
  500. * all or a brief reply. */
  501. max_batch_count = MAX(sock->rcvbuf / 4096, 1);
  502. max_batch_count = MIN(max_batch_count, max_iovs);
  503. while (n > 0) {
  504. size_t count, bytes;
  505. size_t done;
  506. /* Batch up to 'max_batch_count' transactions. But cap it at about a
  507. * page of requests total because big skbuffs are expensive to
  508. * allocate in the kernel. */
  509. #if defined(PAGESIZE)
  510. enum { MAX_BATCH_BYTES = MAX(1, PAGESIZE - 512) };
  511. #else
  512. enum { MAX_BATCH_BYTES = 4096 - 512 };
  513. #endif
  514. bytes = transactions[0]->request->size;
  515. for (count = 1; count < n && count < max_batch_count; count++) {
  516. if (bytes + transactions[count]->request->size > MAX_BATCH_BYTES) {
  517. break;
  518. }
  519. bytes += transactions[count]->request->size;
  520. }
  521. error = nl_sock_transact_multiple__(sock, transactions, count, &done);
  522. transactions += done;
  523. n -= done;
  524. if (error == ENOBUFS) {
  525. VLOG_DBG_RL(&rl, "receive buffer overflow, resending request");
  526. } else if (error) {
  527. VLOG_ERR_RL(&rl, "transaction error (%s)", ovs_strerror(error));
  528. nl_sock_record_errors__(transactions, n, error);
  529. }
  530. }
  531. }
  532. /* Sends 'request' to the kernel via 'sock' and waits for a response. If
  533. * successful, returns 0. On failure, returns a positive errno value.
  534. *
  535. * If 'replyp' is nonnull, then on success '*replyp' is set to the kernel's
  536. * reply, which the caller is responsible for freeing with ofpbuf_delete(), and
  537. * on failure '*replyp' is set to NULL. If 'replyp' is null, then the kernel's
  538. * reply, if any, is discarded.
  539. *
  540. * Before the message is sent, nlmsg_len in 'request' will be finalized to
  541. * match msg->size, nlmsg_pid will be set to 'sock''s pid, and nlmsg_seq will
  542. * be initialized, NLM_F_ACK will be set in nlmsg_flags.
  543. *
  544. * The caller is responsible for destroying 'request'.
  545. *
  546. * Bare Netlink is an unreliable transport protocol. This function layers
  547. * reliable delivery and reply semantics on top of bare Netlink.
  548. *
  549. * In Netlink, sending a request to the kernel is reliable enough, because the
  550. * kernel will tell us if the message cannot be queued (and we will in that
  551. * case put it on the transmit queue and wait until it can be delivered).
  552. *
  553. * Receiving the reply is the real problem: if the socket buffer is full when
  554. * the kernel tries to send the reply, the reply will be dropped. However, the
  555. * kernel sets a flag that a reply has been dropped. The next call to recv
  556. * then returns ENOBUFS. We can then re-send the request.
  557. *
  558. * Caveats:
  559. *
  560. * 1. Netlink depends on sequence numbers to match up requests and
  561. * replies. The sender of a request supplies a sequence number, and
  562. * the reply echos back that sequence number.
  563. *
  564. * This is fine, but (1) some kernel netlink implementations are
  565. * broken, in that they fail to echo sequence numbers and (2) this
  566. * function will drop packets with non-matching sequence numbers, so
  567. * that only a single request can be usefully transacted at a time.
  568. *
  569. * 2. Resending the request causes it to be re-executed, so the request
  570. * needs to be idempotent.
  571. */
  572. int
  573. nl_sock_transact(struct nl_sock *sock, const struct ofpbuf *request,
  574. struct ofpbuf **replyp)
  575. {
  576. struct nl_transaction *transactionp;
  577. struct nl_transaction transaction;
  578. transaction.request = CONST_CAST(struct ofpbuf *, request);
  579. transaction.reply = replyp ? ofpbuf_new(1024) : NULL;
  580. transactionp = &transaction;
  581. nl_sock_transact_multiple(sock, &transactionp, 1);
  582. if (replyp) {
  583. if (transaction.error) {
  584. ofpbuf_delete(transaction.reply);
  585. *replyp = NULL;
  586. } else {
  587. *replyp = transaction.reply;
  588. }
  589. }
  590. return transaction.error;
  591. }
  592. /* Drain all the messages currently in 'sock''s receive queue. */
  593. int
  594. nl_sock_drain(struct nl_sock *sock)
  595. {
  596. return drain_rcvbuf(sock->fd);
  597. }
  598. /* Starts a Netlink "dump" operation, by sending 'request' to the kernel on a
  599. * Netlink socket created with the given 'protocol', and initializes 'dump' to
  600. * reflect the state of the operation.
  601. *
  602. * nlmsg_len in 'msg' will be finalized to match msg->size, and nlmsg_pid will
  603. * be set to the Netlink socket's pid, before the message is sent. NLM_F_DUMP
  604. * and NLM_F_ACK will be set in nlmsg_flags.
  605. *
  606. * The design of this Netlink socket library ensures that the dump is reliable.
  607. *
  608. * This function provides no status indication. An error status for the entire
  609. * dump operation is provided when it is completed by calling nl_dump_done().
  610. *
  611. * The caller is responsible for destroying 'request'.
  612. */
  613. void
  614. nl_dump_start(struct nl_dump *dump, int protocol, const struct ofpbuf *request)
  615. {
  616. ofpbuf_init(&dump->buffer, 4096);
  617. dump->status = nl_pool_alloc(protocol, &dump->sock);
  618. if (dump->status) {
  619. return;
  620. }
  621. nl_msg_nlmsghdr(request)->nlmsg_flags |= NLM_F_DUMP | NLM_F_ACK;
  622. dump->status = nl_sock_send__(dump->sock, request,
  623. nl_sock_allocate_seq(dump->sock, 1), true);
  624. dump->seq = nl_msg_nlmsghdr(request)->nlmsg_seq;
  625. }
  626. /* Helper function for nl_dump_next(). */
  627. static int
  628. nl_dump_recv(struct nl_dump *dump)
  629. {
  630. struct nlmsghdr *nlmsghdr;
  631. int retval;
  632. retval = nl_sock_recv__(dump->sock, &dump->buffer, true);
  633. if (retval) {
  634. return retval == EINTR ? EAGAIN : retval;
  635. }
  636. nlmsghdr = nl_msg_nlmsghdr(&dump->buffer);
  637. if (dump->seq != nlmsghdr->nlmsg_seq) {
  638. VLOG_DBG_RL(&rl, "ignoring seq %#"PRIx32" != expected %#"PRIx32,
  639. nlmsghdr->nlmsg_seq, dump->seq);
  640. return EAGAIN;
  641. }
  642. if (nl_msg_nlmsgerr(&dump->buffer, &retval)) {
  643. VLOG_INFO_RL(&rl, "netlink dump request error (%s)",
  644. ovs_strerror(retval));
  645. return retval && retval != EAGAIN ? retval : EPROTO;
  646. }
  647. return 0;
  648. }
  649. /* Attempts to retrieve another reply from 'dump', which must have been
  650. * initialized with nl_dump_start().
  651. *
  652. * If successful, returns true and points 'reply->data' and 'reply->size' to
  653. * the message that was retrieved. The caller must not modify 'reply' (because
  654. * it points into the middle of a larger buffer).
  655. *
  656. * On failure, returns false and sets 'reply->data' to NULL and 'reply->size'
  657. * to 0. Failure might indicate an actual error or merely the end of replies.
  658. * An error status for the entire dump operation is provided when it is
  659. * completed by calling nl_dump_done().
  660. */
  661. bool
  662. nl_dump_next(struct nl_dump *dump, struct ofpbuf *reply)
  663. {
  664. struct nlmsghdr *nlmsghdr;
  665. reply->data = NULL;
  666. reply->size = 0;
  667. if (dump->status) {
  668. return false;
  669. }
  670. while (!dump->buffer.size) {
  671. int retval = nl_dump_recv(dump);
  672. if (retval) {
  673. ofpbuf_clear(&dump->buffer);
  674. if (retval != EAGAIN) {
  675. dump->status = retval;
  676. return false;
  677. }
  678. }
  679. }
  680. nlmsghdr = nl_msg_next(&dump->buffer, reply);
  681. if (!nlmsghdr) {
  682. VLOG_WARN_RL(&rl, "netlink dump reply contains message fragment");
  683. dump->status = EPROTO;
  684. return false;
  685. } else if (nlmsghdr->nlmsg_type == NLMSG_DONE) {
  686. dump->status = EOF;
  687. return false;
  688. }
  689. return true;
  690. }
  691. /* Completes Netlink dump operation 'dump', which must have been initialized
  692. * with nl_dump_start(). Returns 0 if the dump operation was error-free,
  693. * otherwise a positive errno value describing the problem. */
  694. int
  695. nl_dump_done(struct nl_dump *dump)
  696. {
  697. /* Drain any remaining messages that the client didn't read. Otherwise the
  698. * kernel will continue to queue them up and waste buffer space.
  699. *
  700. * XXX We could just destroy and discard the socket in this case. */
  701. while (!dump->status) {
  702. struct ofpbuf reply;
  703. if (!nl_dump_next(dump, &reply)) {
  704. ovs_assert(dump->status);
  705. }
  706. }
  707. nl_pool_release(dump->sock);
  708. ofpbuf_uninit(&dump->buffer);
  709. return dump->status == EOF ? 0 : dump->status;
  710. }
  711. /* Causes poll_block() to wake up when any of the specified 'events' (which is
  712. * a OR'd combination of POLLIN, POLLOUT, etc.) occur on 'sock'. */
  713. void
  714. nl_sock_wait(const struct nl_sock *sock, short int events)
  715. {
  716. poll_fd_wait(sock->fd, events);
  717. }
  718. /* Returns the underlying fd for 'sock', for use in "poll()"-like operations
  719. * that can't use nl_sock_wait().
  720. *
  721. * It's a little tricky to use the returned fd correctly, because nl_sock does
  722. * "copy on write" to allow a single nl_sock to be used for notifications,
  723. * transactions, and dumps. If 'sock' is used only for notifications and
  724. * transactions (and never for dump) then the usage is safe. */
  725. int
  726. nl_sock_fd(const struct nl_sock *sock)
  727. {
  728. return sock->fd;
  729. }
  730. /* Returns the PID associated with this socket. */
  731. uint32_t
  732. nl_sock_pid(const struct nl_sock *sock)
  733. {
  734. return sock->pid;
  735. }
  736. /* Miscellaneous. */
  737. struct genl_family {
  738. struct hmap_node hmap_node;
  739. uint16_t id;
  740. char *name;
  741. };
  742. static struct hmap genl_families = HMAP_INITIALIZER(&genl_families);
  743. static const struct nl_policy family_policy[CTRL_ATTR_MAX + 1] = {
  744. [CTRL_ATTR_FAMILY_ID] = {.type = NL_A_U16},
  745. [CTRL_ATTR_MCAST_GROUPS] = {.type = NL_A_NESTED, .optional = true},
  746. };
  747. static struct genl_family *
  748. find_genl_family_by_id(uint16_t id)
  749. {
  750. struct genl_family *family;
  751. HMAP_FOR_EACH_IN_BUCKET (family, hmap_node, hash_int(id, 0),
  752. &genl_families) {
  753. if (family->id == id) {
  754. return family;
  755. }
  756. }
  757. return NULL;
  758. }
  759. static void
  760. define_genl_family(uint16_t id, const char *name)
  761. {
  762. struct genl_family *family = find_genl_family_by_id(id);
  763. if (family) {
  764. if (!strcmp(family->name, name)) {
  765. return;
  766. }
  767. free(family->name);
  768. } else {
  769. family = xmalloc(sizeof *family);
  770. family->id = id;
  771. hmap_insert(&genl_families, &family->hmap_node, hash_int(id, 0));
  772. }
  773. family->name = xstrdup(name);
  774. }
  775. static const char *
  776. genl_family_to_name(uint16_t id)
  777. {
  778. if (id == GENL_ID_CTRL) {
  779. return "control";
  780. } else {
  781. struct genl_family *family = find_genl_family_by_id(id);
  782. return family ? family->name : "unknown";
  783. }
  784. }
  785. static int
  786. do_lookup_genl_family(const char *name, struct nlattr **attrs,
  787. struct ofpbuf **replyp)
  788. {
  789. struct nl_sock *sock;
  790. struct ofpbuf request, *reply;
  791. int error;
  792. *replyp = NULL;
  793. error = nl_sock_create(NETLINK_GENERIC, &sock);
  794. if (error) {
  795. return error;
  796. }
  797. ofpbuf_init(&request, 0);
  798. nl_msg_put_genlmsghdr(&request, 0, GENL_ID_CTRL, NLM_F_REQUEST,
  799. CTRL_CMD_GETFAMILY, 1);
  800. nl_msg_put_string(&request, CTRL_ATTR_FAMILY_NAME, name);
  801. error = nl_sock_transact(sock, &request, &reply);
  802. ofpbuf_uninit(&request);
  803. if (error) {
  804. nl_sock_destroy(sock);
  805. return error;
  806. }
  807. if (!nl_policy_parse(reply, NLMSG_HDRLEN + GENL_HDRLEN,
  808. family_policy, attrs, ARRAY_SIZE(family_policy))
  809. || nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]) == 0) {
  810. nl_sock_destroy(sock);
  811. ofpbuf_delete(reply);
  812. return EPROTO;
  813. }
  814. nl_sock_destroy(sock);
  815. *replyp = reply;
  816. return 0;
  817. }
  818. /* Finds the multicast group called 'group_name' in genl family 'family_name'.
  819. * When successful, writes its result to 'multicast_group' and returns 0.
  820. * Otherwise, clears 'multicast_group' and returns a positive error code.
  821. */
  822. int
  823. nl_lookup_genl_mcgroup(const char *family_name, const char *group_name,
  824. unsigned int *multicast_group)
  825. {
  826. struct nlattr *family_attrs[ARRAY_SIZE(family_policy)];
  827. const struct nlattr *mc;
  828. struct ofpbuf *reply;
  829. unsigned int left;
  830. int error;
  831. *multicast_group = 0;
  832. error = do_lookup_genl_family(family_name, family_attrs, &reply);
  833. if (error) {
  834. return error;
  835. }
  836. if (!family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
  837. error = EPROTO;
  838. goto exit;
  839. }
  840. NL_NESTED_FOR_EACH (mc, left, family_attrs[CTRL_ATTR_MCAST_GROUPS]) {
  841. static const struct nl_policy mc_policy[] = {
  842. [CTRL_ATTR_MCAST_GRP_ID] = {.type = NL_A_U32},
  843. [CTRL_ATTR_MCAST_GRP_NAME] = {.type = NL_A_STRING},
  844. };
  845. struct nlattr *mc_attrs[ARRAY_SIZE(mc_policy)];
  846. const char *mc_name;
  847. if (!nl_parse_nested(mc, mc_policy, mc_attrs, ARRAY_SIZE(mc_policy))) {
  848. error = EPROTO;
  849. goto exit;
  850. }
  851. mc_name = nl_attr_get_string(mc_attrs[CTRL_ATTR_MCAST_GRP_NAME]);
  852. if (!strcmp(group_name, mc_name)) {
  853. *multicast_group =
  854. nl_attr_get_u32(mc_attrs[CTRL_ATTR_MCAST_GRP_ID]);
  855. error = 0;
  856. goto exit;
  857. }
  858. }
  859. error = EPROTO;
  860. exit:
  861. ofpbuf_delete(reply);
  862. return error;
  863. }
  864. /* If '*number' is 0, translates the given Generic Netlink family 'name' to a
  865. * number and stores it in '*number'. If successful, returns 0 and the caller
  866. * may use '*number' as the family number. On failure, returns a positive
  867. * errno value and '*number' caches the errno value. */
  868. int
  869. nl_lookup_genl_family(const char *name, int *number)
  870. {
  871. if (*number == 0) {
  872. struct nlattr *attrs[ARRAY_SIZE(family_policy)];
  873. struct ofpbuf *reply;
  874. int error;
  875. error = do_lookup_genl_family(name, attrs, &reply);
  876. if (!error) {
  877. *number = nl_attr_get_u16(attrs[CTRL_ATTR_FAMILY_ID]);
  878. define_genl_family(*number, name);
  879. } else {
  880. *number = -error;
  881. }
  882. ofpbuf_delete(reply);
  883. ovs_assert(*number != 0);
  884. }
  885. return *number > 0 ? 0 : -*number;
  886. }
  887. struct nl_pool {
  888. struct nl_sock *socks[16];
  889. int n;
  890. };
  891. static struct ovs_mutex pool_mutex = OVS_MUTEX_INITIALIZER;
  892. static struct nl_pool pools[MAX_LINKS] OVS_GUARDED_BY(pool_mutex);
  893. static int
  894. nl_pool_alloc(int protocol, struct nl_sock **sockp)
  895. {
  896. struct nl_sock *sock = NULL;
  897. struct nl_pool *pool;
  898. ovs_assert(protocol >= 0 && protocol < ARRAY_SIZE(pools));
  899. ovs_mutex_lock(&pool_mutex);
  900. pool = &pools[protocol];
  901. if (pool->n > 0) {
  902. sock = pool->socks[--pool->n];
  903. }
  904. ovs_mutex_unlock(&pool_mutex);
  905. if (sock) {
  906. *sockp = sock;
  907. return 0;
  908. } else {
  909. return nl_sock_create(protocol, sockp);
  910. }
  911. }
  912. static void
  913. nl_pool_release(struct nl_sock *sock)
  914. {
  915. if (sock) {
  916. struct nl_pool *pool = &pools[sock->protocol];
  917. ovs_mutex_lock(&pool_mutex);
  918. if (pool->n < ARRAY_SIZE(pool->socks)) {
  919. pool->socks[pool->n++] = sock;
  920. sock = NULL;
  921. }
  922. ovs_mutex_unlock(&pool_mutex);
  923. nl_sock_destroy(sock);
  924. }
  925. }
  926. int
  927. nl_transact(int protocol, const struct ofpbuf *request,
  928. struct ofpbuf **replyp)
  929. {
  930. struct nl_sock *sock;
  931. int error;
  932. error = nl_pool_alloc(protocol, &sock);
  933. if (error) {
  934. *replyp = NULL;
  935. return error;
  936. }
  937. error = nl_sock_transact(sock, request, replyp);
  938. nl_pool_release(sock);
  939. return error;
  940. }
  941. void
  942. nl_transact_multiple(int protocol,
  943. struct nl_transaction **transactions, size_t n)
  944. {
  945. struct nl_sock *sock;
  946. int error;
  947. error = nl_pool_alloc(protocol, &sock);
  948. if (!error) {
  949. nl_sock_transact_multiple(sock, transactions, n);
  950. nl_pool_release(sock);
  951. } else {
  952. nl_sock_record_errors__(transactions, n, error);
  953. }
  954. }
  955. static uint32_t
  956. nl_sock_allocate_seq(struct nl_sock *sock, unsigned int n)
  957. {
  958. uint32_t seq = sock->next_seq;
  959. sock->next_seq += n;
  960. /* Make it impossible for the next request for sequence numbers to wrap
  961. * around to 0. Start over with 1 to avoid ever using a sequence number of
  962. * 0, because the kernel uses sequence number 0 for notifications. */
  963. if (sock->next_seq >= UINT32_MAX / 2) {
  964. sock->next_seq = 1;
  965. }
  966. return seq;
  967. }
  968. static void
  969. nlmsghdr_to_string(const struct nlmsghdr *h, int protocol, struct ds *ds)
  970. {
  971. struct nlmsg_flag {
  972. unsigned int bits;
  973. const char *name;
  974. };
  975. static const struct nlmsg_flag flags[] = {
  976. { NLM_F_REQUEST, "REQUEST" },
  977. { NLM_F_MULTI, "MULTI" },
  978. { NLM_F_ACK, "ACK" },
  979. { NLM_F_ECHO, "ECHO" },
  980. { NLM_F_DUMP, "DUMP" },
  981. { NLM_F_ROOT, "ROOT" },
  982. { NLM_F_MATCH, "MATCH" },
  983. { NLM_F_ATOMIC, "ATOMIC" },
  984. };
  985. const struct nlmsg_flag *flag;
  986. uint16_t flags_left;
  987. ds_put_format(ds, "nl(len:%"PRIu32", type=%"PRIu16,
  988. h->nlmsg_len, h->nlmsg_type);
  989. if (h->nlmsg_type == NLMSG_NOOP) {
  990. ds_put_cstr(ds, "(no-op)");
  991. } else if (h->nlmsg_type == NLMSG_ERROR) {
  992. ds_put_cstr(ds, "(error)");
  993. } else if (h->nlmsg_type == NLMSG_DONE) {
  994. ds_put_cstr(ds, "(done)");
  995. } else if (h->nlmsg_type == NLMSG_OVERRUN) {
  996. ds_put_cstr(ds, "(overrun)");
  997. } else if (h->nlmsg_type < NLMSG_MIN_TYPE) {
  998. ds_put_cstr(ds, "(reserved)");
  999. } else if (protocol == NETLINK_GENERIC) {
  1000. ds_put_format(ds, "(%s)", genl_family_to_name(h->nlmsg_type));
  1001. } else {
  1002. ds_put_cstr(ds, "(family-defined)");
  1003. }
  1004. ds_put_format(ds, ", flags=%"PRIx16, h->nlmsg_flags);
  1005. flags_left = h->nlmsg_flags;
  1006. for (flag = flags; flag < &flags[ARRAY_SIZE(flags)]; flag++) {
  1007. if ((flags_left & flag->bits) == flag->bits) {
  1008. ds_put_format(ds, "[%s]", flag->name);
  1009. flags_left &= ~flag->bits;
  1010. }
  1011. }
  1012. if (flags_left) {
  1013. ds_put_format(ds, "[OTHER:%"PRIx16"]", flags_left);
  1014. }
  1015. ds_put_format(ds, ", seq=%"PRIx32", pid=%"PRIu32,
  1016. h->nlmsg_seq, h->nlmsg_pid);
  1017. }
  1018. static char *
  1019. nlmsg_to_string(const struct ofpbuf *buffer, int protocol)
  1020. {
  1021. struct ds ds = DS_EMPTY_INITIALIZER;
  1022. const struct nlmsghdr *h = ofpbuf_at(buffer, 0, NLMSG_HDRLEN);
  1023. if (h) {
  1024. nlmsghdr_to_string(h, protocol, &ds);
  1025. if (h->nlmsg_type == NLMSG_ERROR) {
  1026. const struct nlmsgerr *e;
  1027. e = ofpbuf_at(buffer, NLMSG_HDRLEN,
  1028. NLMSG_ALIGN(sizeof(struct nlmsgerr)));
  1029. if (e) {
  1030. ds_put_format(&ds, " error(%d", e->error);
  1031. if (e->error < 0) {
  1032. ds_put_format(&ds, "(%s)", ovs_strerror(-e->error));
  1033. }
  1034. ds_put_cstr(&ds, ", in-reply-to(");
  1035. nlmsghdr_to_string(&e->msg, protocol, &ds);
  1036. ds_put_cstr(&ds, "))");
  1037. } else {
  1038. ds_put_cstr(&ds, " error(truncated)");
  1039. }
  1040. } else if (h->nlmsg_type == NLMSG_DONE) {
  1041. int *error = ofpbuf_at(buffer, NLMSG_HDRLEN, sizeof *error);
  1042. if (error) {
  1043. ds_put_format(&ds, " done(%d", *error);
  1044. if (*error < 0) {
  1045. ds_put_format(&ds, "(%s)", ovs_strerror(-*error));
  1046. }
  1047. ds_put_cstr(&ds, ")");
  1048. } else {
  1049. ds_put_cstr(&ds, " done(truncated)");
  1050. }
  1051. } else if (protocol == NETLINK_GENERIC) {
  1052. struct genlmsghdr *genl = nl_msg_genlmsghdr(buffer);
  1053. if (genl) {
  1054. ds_put_format(&ds, ",genl(cmd=%"PRIu8",version=%"PRIu8")",
  1055. genl->cmd, genl->version);
  1056. }
  1057. }
  1058. } else {
  1059. ds_put_cstr(&ds, "nl(truncated)");
  1060. }
  1061. return ds.string;
  1062. }
  1063. static void
  1064. log_nlmsg(const char *function, int error,
  1065. const void *message, size_t size, int protocol)
  1066. {
  1067. struct ofpbuf buffer;
  1068. char *nlmsg;
  1069. if (!VLOG_IS_DBG_ENABLED()) {
  1070. return;
  1071. }
  1072. ofpbuf_use_const(&buffer, message, size);
  1073. nlmsg = nlmsg_to_string(&buffer, protocol);
  1074. VLOG_DBG_RL(&rl, "%s (%s): %s", function, ovs_strerror(error), nlmsg);
  1075. free(nlmsg);
  1076. }