PageRenderTime 55ms CodeModel.GetById 21ms RepoModel.GetById 1ms app.codeStats 0ms

/net/mctp/af_mctp.c

https://github.com/tiwai/sound
C | 689 lines | 519 code | 134 blank | 36 comment | 87 complexity | 1b86ee00a2ef1143a12f3e2c862fb615 MD5 | raw file
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Management Component Transport Protocol (MCTP)
  4. *
  5. * Copyright (c) 2021 Code Construct
  6. * Copyright (c) 2021 Google
  7. */
  8. #include <linux/compat.h>
  9. #include <linux/if_arp.h>
  10. #include <linux/net.h>
  11. #include <linux/mctp.h>
  12. #include <linux/module.h>
  13. #include <linux/socket.h>
  14. #include <net/mctp.h>
  15. #include <net/mctpdevice.h>
  16. #include <net/sock.h>
  17. #define CREATE_TRACE_POINTS
  18. #include <trace/events/mctp.h>
  19. /* socket implementation */
  20. static void mctp_sk_expire_keys(struct timer_list *timer);
  21. static int mctp_release(struct socket *sock)
  22. {
  23. struct sock *sk = sock->sk;
  24. if (sk) {
  25. sock->sk = NULL;
  26. sk->sk_prot->close(sk, 0);
  27. }
  28. return 0;
  29. }
  30. /* Generic sockaddr checks, padding checks only so far */
  31. static bool mctp_sockaddr_is_ok(const struct sockaddr_mctp *addr)
  32. {
  33. return !addr->__smctp_pad0 && !addr->__smctp_pad1;
  34. }
  35. static bool mctp_sockaddr_ext_is_ok(const struct sockaddr_mctp_ext *addr)
  36. {
  37. return !addr->__smctp_pad0[0] &&
  38. !addr->__smctp_pad0[1] &&
  39. !addr->__smctp_pad0[2];
  40. }
  41. static int mctp_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
  42. {
  43. struct sock *sk = sock->sk;
  44. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  45. struct sockaddr_mctp *smctp;
  46. int rc;
  47. if (addrlen < sizeof(*smctp))
  48. return -EINVAL;
  49. if (addr->sa_family != AF_MCTP)
  50. return -EAFNOSUPPORT;
  51. if (!capable(CAP_NET_BIND_SERVICE))
  52. return -EACCES;
  53. /* it's a valid sockaddr for MCTP, cast and do protocol checks */
  54. smctp = (struct sockaddr_mctp *)addr;
  55. if (!mctp_sockaddr_is_ok(smctp))
  56. return -EINVAL;
  57. lock_sock(sk);
  58. /* TODO: allow rebind */
  59. if (sk_hashed(sk)) {
  60. rc = -EADDRINUSE;
  61. goto out_release;
  62. }
  63. msk->bind_net = smctp->smctp_network;
  64. msk->bind_addr = smctp->smctp_addr.s_addr;
  65. msk->bind_type = smctp->smctp_type & 0x7f; /* ignore the IC bit */
  66. rc = sk->sk_prot->hash(sk);
  67. out_release:
  68. release_sock(sk);
  69. return rc;
  70. }
  71. static int mctp_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
  72. {
  73. DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
  74. int rc, addrlen = msg->msg_namelen;
  75. struct sock *sk = sock->sk;
  76. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  77. struct mctp_skb_cb *cb;
  78. struct mctp_route *rt;
  79. struct sk_buff *skb = NULL;
  80. int hlen;
  81. if (addr) {
  82. const u8 tagbits = MCTP_TAG_MASK | MCTP_TAG_OWNER |
  83. MCTP_TAG_PREALLOC;
  84. if (addrlen < sizeof(struct sockaddr_mctp))
  85. return -EINVAL;
  86. if (addr->smctp_family != AF_MCTP)
  87. return -EINVAL;
  88. if (!mctp_sockaddr_is_ok(addr))
  89. return -EINVAL;
  90. if (addr->smctp_tag & ~tagbits)
  91. return -EINVAL;
  92. /* can't preallocate a non-owned tag */
  93. if (addr->smctp_tag & MCTP_TAG_PREALLOC &&
  94. !(addr->smctp_tag & MCTP_TAG_OWNER))
  95. return -EINVAL;
  96. } else {
  97. /* TODO: connect()ed sockets */
  98. return -EDESTADDRREQ;
  99. }
  100. if (!capable(CAP_NET_RAW))
  101. return -EACCES;
  102. if (addr->smctp_network == MCTP_NET_ANY)
  103. addr->smctp_network = mctp_default_net(sock_net(sk));
  104. /* direct addressing */
  105. if (msk->addr_ext && addrlen >= sizeof(struct sockaddr_mctp_ext)) {
  106. DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
  107. extaddr, msg->msg_name);
  108. struct net_device *dev;
  109. rc = -EINVAL;
  110. rcu_read_lock();
  111. dev = dev_get_by_index_rcu(sock_net(sk), extaddr->smctp_ifindex);
  112. /* check for correct halen */
  113. if (dev && extaddr->smctp_halen == dev->addr_len) {
  114. hlen = LL_RESERVED_SPACE(dev) + sizeof(struct mctp_hdr);
  115. rc = 0;
  116. }
  117. rcu_read_unlock();
  118. if (rc)
  119. goto err_free;
  120. rt = NULL;
  121. } else {
  122. rt = mctp_route_lookup(sock_net(sk), addr->smctp_network,
  123. addr->smctp_addr.s_addr);
  124. if (!rt) {
  125. rc = -EHOSTUNREACH;
  126. goto err_free;
  127. }
  128. hlen = LL_RESERVED_SPACE(rt->dev->dev) + sizeof(struct mctp_hdr);
  129. }
  130. skb = sock_alloc_send_skb(sk, hlen + 1 + len,
  131. msg->msg_flags & MSG_DONTWAIT, &rc);
  132. if (!skb)
  133. return rc;
  134. skb_reserve(skb, hlen);
  135. /* set type as fist byte in payload */
  136. *(u8 *)skb_put(skb, 1) = addr->smctp_type;
  137. rc = memcpy_from_msg((void *)skb_put(skb, len), msg, len);
  138. if (rc < 0)
  139. goto err_free;
  140. /* set up cb */
  141. cb = __mctp_cb(skb);
  142. cb->net = addr->smctp_network;
  143. if (!rt) {
  144. /* fill extended address in cb */
  145. DECLARE_SOCKADDR(struct sockaddr_mctp_ext *,
  146. extaddr, msg->msg_name);
  147. if (!mctp_sockaddr_ext_is_ok(extaddr) ||
  148. extaddr->smctp_halen > sizeof(cb->haddr)) {
  149. rc = -EINVAL;
  150. goto err_free;
  151. }
  152. cb->ifindex = extaddr->smctp_ifindex;
  153. /* smctp_halen is checked above */
  154. cb->halen = extaddr->smctp_halen;
  155. memcpy(cb->haddr, extaddr->smctp_haddr, cb->halen);
  156. }
  157. rc = mctp_local_output(sk, rt, skb, addr->smctp_addr.s_addr,
  158. addr->smctp_tag);
  159. return rc ? : len;
  160. err_free:
  161. kfree_skb(skb);
  162. return rc;
  163. }
  164. static int mctp_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
  165. int flags)
  166. {
  167. DECLARE_SOCKADDR(struct sockaddr_mctp *, addr, msg->msg_name);
  168. struct sock *sk = sock->sk;
  169. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  170. struct sk_buff *skb;
  171. size_t msglen;
  172. u8 type;
  173. int rc;
  174. if (flags & ~(MSG_DONTWAIT | MSG_TRUNC | MSG_PEEK))
  175. return -EOPNOTSUPP;
  176. skb = skb_recv_datagram(sk, flags, &rc);
  177. if (!skb)
  178. return rc;
  179. if (!skb->len) {
  180. rc = 0;
  181. goto out_free;
  182. }
  183. /* extract message type, remove from data */
  184. type = *((u8 *)skb->data);
  185. msglen = skb->len - 1;
  186. if (len < msglen)
  187. msg->msg_flags |= MSG_TRUNC;
  188. else
  189. len = msglen;
  190. rc = skb_copy_datagram_msg(skb, 1, msg, len);
  191. if (rc < 0)
  192. goto out_free;
  193. sock_recv_cmsgs(msg, sk, skb);
  194. if (addr) {
  195. struct mctp_skb_cb *cb = mctp_cb(skb);
  196. /* TODO: expand mctp_skb_cb for header fields? */
  197. struct mctp_hdr *hdr = mctp_hdr(skb);
  198. addr = msg->msg_name;
  199. addr->smctp_family = AF_MCTP;
  200. addr->__smctp_pad0 = 0;
  201. addr->smctp_network = cb->net;
  202. addr->smctp_addr.s_addr = hdr->src;
  203. addr->smctp_type = type;
  204. addr->smctp_tag = hdr->flags_seq_tag &
  205. (MCTP_HDR_TAG_MASK | MCTP_HDR_FLAG_TO);
  206. addr->__smctp_pad1 = 0;
  207. msg->msg_namelen = sizeof(*addr);
  208. if (msk->addr_ext) {
  209. DECLARE_SOCKADDR(struct sockaddr_mctp_ext *, ae,
  210. msg->msg_name);
  211. msg->msg_namelen = sizeof(*ae);
  212. ae->smctp_ifindex = cb->ifindex;
  213. ae->smctp_halen = cb->halen;
  214. memset(ae->__smctp_pad0, 0x0, sizeof(ae->__smctp_pad0));
  215. memset(ae->smctp_haddr, 0x0, sizeof(ae->smctp_haddr));
  216. memcpy(ae->smctp_haddr, cb->haddr, cb->halen);
  217. }
  218. }
  219. rc = len;
  220. if (flags & MSG_TRUNC)
  221. rc = msglen;
  222. out_free:
  223. skb_free_datagram(sk, skb);
  224. return rc;
  225. }
  226. /* We're done with the key; invalidate, stop reassembly, and remove from lists.
  227. */
  228. static void __mctp_key_remove(struct mctp_sk_key *key, struct net *net,
  229. unsigned long flags, unsigned long reason)
  230. __releases(&key->lock)
  231. __must_hold(&net->mctp.keys_lock)
  232. {
  233. struct sk_buff *skb;
  234. trace_mctp_key_release(key, reason);
  235. skb = key->reasm_head;
  236. key->reasm_head = NULL;
  237. key->reasm_dead = true;
  238. key->valid = false;
  239. mctp_dev_release_key(key->dev, key);
  240. spin_unlock_irqrestore(&key->lock, flags);
  241. hlist_del(&key->hlist);
  242. hlist_del(&key->sklist);
  243. /* unref for the lists */
  244. mctp_key_unref(key);
  245. kfree_skb(skb);
  246. }
  247. static int mctp_setsockopt(struct socket *sock, int level, int optname,
  248. sockptr_t optval, unsigned int optlen)
  249. {
  250. struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
  251. int val;
  252. if (level != SOL_MCTP)
  253. return -EINVAL;
  254. if (optname == MCTP_OPT_ADDR_EXT) {
  255. if (optlen != sizeof(int))
  256. return -EINVAL;
  257. if (copy_from_sockptr(&val, optval, sizeof(int)))
  258. return -EFAULT;
  259. msk->addr_ext = val;
  260. return 0;
  261. }
  262. return -ENOPROTOOPT;
  263. }
  264. static int mctp_getsockopt(struct socket *sock, int level, int optname,
  265. char __user *optval, int __user *optlen)
  266. {
  267. struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
  268. int len, val;
  269. if (level != SOL_MCTP)
  270. return -EINVAL;
  271. if (get_user(len, optlen))
  272. return -EFAULT;
  273. if (optname == MCTP_OPT_ADDR_EXT) {
  274. if (len != sizeof(int))
  275. return -EINVAL;
  276. val = !!msk->addr_ext;
  277. if (copy_to_user(optval, &val, len))
  278. return -EFAULT;
  279. return 0;
  280. }
  281. return -EINVAL;
  282. }
  283. static int mctp_ioctl_alloctag(struct mctp_sock *msk, unsigned long arg)
  284. {
  285. struct net *net = sock_net(&msk->sk);
  286. struct mctp_sk_key *key = NULL;
  287. struct mctp_ioc_tag_ctl ctl;
  288. unsigned long flags;
  289. u8 tag;
  290. if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
  291. return -EFAULT;
  292. if (ctl.tag)
  293. return -EINVAL;
  294. if (ctl.flags)
  295. return -EINVAL;
  296. key = mctp_alloc_local_tag(msk, ctl.peer_addr, MCTP_ADDR_ANY,
  297. true, &tag);
  298. if (IS_ERR(key))
  299. return PTR_ERR(key);
  300. ctl.tag = tag | MCTP_TAG_OWNER | MCTP_TAG_PREALLOC;
  301. if (copy_to_user((void __user *)arg, &ctl, sizeof(ctl))) {
  302. spin_lock_irqsave(&key->lock, flags);
  303. __mctp_key_remove(key, net, flags, MCTP_TRACE_KEY_DROPPED);
  304. mctp_key_unref(key);
  305. return -EFAULT;
  306. }
  307. mctp_key_unref(key);
  308. return 0;
  309. }
  310. static int mctp_ioctl_droptag(struct mctp_sock *msk, unsigned long arg)
  311. {
  312. struct net *net = sock_net(&msk->sk);
  313. struct mctp_ioc_tag_ctl ctl;
  314. unsigned long flags, fl2;
  315. struct mctp_sk_key *key;
  316. struct hlist_node *tmp;
  317. int rc;
  318. u8 tag;
  319. if (copy_from_user(&ctl, (void __user *)arg, sizeof(ctl)))
  320. return -EFAULT;
  321. if (ctl.flags)
  322. return -EINVAL;
  323. /* Must be a local tag, TO set, preallocated */
  324. if ((ctl.tag & ~MCTP_TAG_MASK) != (MCTP_TAG_OWNER | MCTP_TAG_PREALLOC))
  325. return -EINVAL;
  326. tag = ctl.tag & MCTP_TAG_MASK;
  327. rc = -EINVAL;
  328. spin_lock_irqsave(&net->mctp.keys_lock, flags);
  329. hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
  330. /* we do an irqsave here, even though we know the irq state,
  331. * so we have the flags to pass to __mctp_key_remove
  332. */
  333. spin_lock_irqsave(&key->lock, fl2);
  334. if (key->manual_alloc &&
  335. ctl.peer_addr == key->peer_addr &&
  336. tag == key->tag) {
  337. __mctp_key_remove(key, net, fl2,
  338. MCTP_TRACE_KEY_DROPPED);
  339. rc = 0;
  340. } else {
  341. spin_unlock_irqrestore(&key->lock, fl2);
  342. }
  343. }
  344. spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
  345. return rc;
  346. }
  347. static int mctp_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  348. {
  349. struct mctp_sock *msk = container_of(sock->sk, struct mctp_sock, sk);
  350. switch (cmd) {
  351. case SIOCMCTPALLOCTAG:
  352. return mctp_ioctl_alloctag(msk, arg);
  353. case SIOCMCTPDROPTAG:
  354. return mctp_ioctl_droptag(msk, arg);
  355. }
  356. return -EINVAL;
  357. }
  358. #ifdef CONFIG_COMPAT
  359. static int mctp_compat_ioctl(struct socket *sock, unsigned int cmd,
  360. unsigned long arg)
  361. {
  362. void __user *argp = compat_ptr(arg);
  363. switch (cmd) {
  364. /* These have compatible ptr layouts */
  365. case SIOCMCTPALLOCTAG:
  366. case SIOCMCTPDROPTAG:
  367. return mctp_ioctl(sock, cmd, (unsigned long)argp);
  368. }
  369. return -ENOIOCTLCMD;
  370. }
  371. #endif
  372. static const struct proto_ops mctp_dgram_ops = {
  373. .family = PF_MCTP,
  374. .release = mctp_release,
  375. .bind = mctp_bind,
  376. .connect = sock_no_connect,
  377. .socketpair = sock_no_socketpair,
  378. .accept = sock_no_accept,
  379. .getname = sock_no_getname,
  380. .poll = datagram_poll,
  381. .ioctl = mctp_ioctl,
  382. .gettstamp = sock_gettstamp,
  383. .listen = sock_no_listen,
  384. .shutdown = sock_no_shutdown,
  385. .setsockopt = mctp_setsockopt,
  386. .getsockopt = mctp_getsockopt,
  387. .sendmsg = mctp_sendmsg,
  388. .recvmsg = mctp_recvmsg,
  389. .mmap = sock_no_mmap,
  390. .sendpage = sock_no_sendpage,
  391. #ifdef CONFIG_COMPAT
  392. .compat_ioctl = mctp_compat_ioctl,
  393. #endif
  394. };
  395. static void mctp_sk_expire_keys(struct timer_list *timer)
  396. {
  397. struct mctp_sock *msk = container_of(timer, struct mctp_sock,
  398. key_expiry);
  399. struct net *net = sock_net(&msk->sk);
  400. unsigned long next_expiry, flags, fl2;
  401. struct mctp_sk_key *key;
  402. struct hlist_node *tmp;
  403. bool next_expiry_valid = false;
  404. spin_lock_irqsave(&net->mctp.keys_lock, flags);
  405. hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
  406. /* don't expire. manual_alloc is immutable, no locking
  407. * required.
  408. */
  409. if (key->manual_alloc)
  410. continue;
  411. spin_lock_irqsave(&key->lock, fl2);
  412. if (!time_after_eq(key->expiry, jiffies)) {
  413. __mctp_key_remove(key, net, fl2,
  414. MCTP_TRACE_KEY_TIMEOUT);
  415. continue;
  416. }
  417. if (next_expiry_valid) {
  418. if (time_before(key->expiry, next_expiry))
  419. next_expiry = key->expiry;
  420. } else {
  421. next_expiry = key->expiry;
  422. next_expiry_valid = true;
  423. }
  424. spin_unlock_irqrestore(&key->lock, fl2);
  425. }
  426. spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
  427. if (next_expiry_valid)
  428. mod_timer(timer, next_expiry);
  429. }
  430. static int mctp_sk_init(struct sock *sk)
  431. {
  432. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  433. INIT_HLIST_HEAD(&msk->keys);
  434. timer_setup(&msk->key_expiry, mctp_sk_expire_keys, 0);
  435. return 0;
  436. }
  437. static void mctp_sk_close(struct sock *sk, long timeout)
  438. {
  439. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  440. del_timer_sync(&msk->key_expiry);
  441. sk_common_release(sk);
  442. }
  443. static int mctp_sk_hash(struct sock *sk)
  444. {
  445. struct net *net = sock_net(sk);
  446. mutex_lock(&net->mctp.bind_lock);
  447. sk_add_node_rcu(sk, &net->mctp.binds);
  448. mutex_unlock(&net->mctp.bind_lock);
  449. return 0;
  450. }
  451. static void mctp_sk_unhash(struct sock *sk)
  452. {
  453. struct mctp_sock *msk = container_of(sk, struct mctp_sock, sk);
  454. struct net *net = sock_net(sk);
  455. unsigned long flags, fl2;
  456. struct mctp_sk_key *key;
  457. struct hlist_node *tmp;
  458. /* remove from any type-based binds */
  459. mutex_lock(&net->mctp.bind_lock);
  460. sk_del_node_init_rcu(sk);
  461. mutex_unlock(&net->mctp.bind_lock);
  462. /* remove tag allocations */
  463. spin_lock_irqsave(&net->mctp.keys_lock, flags);
  464. hlist_for_each_entry_safe(key, tmp, &msk->keys, sklist) {
  465. spin_lock_irqsave(&key->lock, fl2);
  466. __mctp_key_remove(key, net, fl2, MCTP_TRACE_KEY_CLOSED);
  467. }
  468. spin_unlock_irqrestore(&net->mctp.keys_lock, flags);
  469. }
  470. static struct proto mctp_proto = {
  471. .name = "MCTP",
  472. .owner = THIS_MODULE,
  473. .obj_size = sizeof(struct mctp_sock),
  474. .init = mctp_sk_init,
  475. .close = mctp_sk_close,
  476. .hash = mctp_sk_hash,
  477. .unhash = mctp_sk_unhash,
  478. };
  479. static int mctp_pf_create(struct net *net, struct socket *sock,
  480. int protocol, int kern)
  481. {
  482. const struct proto_ops *ops;
  483. struct proto *proto;
  484. struct sock *sk;
  485. int rc;
  486. if (protocol)
  487. return -EPROTONOSUPPORT;
  488. /* only datagram sockets are supported */
  489. if (sock->type != SOCK_DGRAM)
  490. return -ESOCKTNOSUPPORT;
  491. proto = &mctp_proto;
  492. ops = &mctp_dgram_ops;
  493. sock->state = SS_UNCONNECTED;
  494. sock->ops = ops;
  495. sk = sk_alloc(net, PF_MCTP, GFP_KERNEL, proto, kern);
  496. if (!sk)
  497. return -ENOMEM;
  498. sock_init_data(sock, sk);
  499. rc = 0;
  500. if (sk->sk_prot->init)
  501. rc = sk->sk_prot->init(sk);
  502. if (rc)
  503. goto err_sk_put;
  504. return 0;
  505. err_sk_put:
  506. sock_orphan(sk);
  507. sock_put(sk);
  508. return rc;
  509. }
  510. static struct net_proto_family mctp_pf = {
  511. .family = PF_MCTP,
  512. .create = mctp_pf_create,
  513. .owner = THIS_MODULE,
  514. };
  515. static __init int mctp_init(void)
  516. {
  517. int rc;
  518. /* ensure our uapi tag definitions match the header format */
  519. BUILD_BUG_ON(MCTP_TAG_OWNER != MCTP_HDR_FLAG_TO);
  520. BUILD_BUG_ON(MCTP_TAG_MASK != MCTP_HDR_TAG_MASK);
  521. pr_info("mctp: management component transport protocol core\n");
  522. rc = sock_register(&mctp_pf);
  523. if (rc)
  524. return rc;
  525. rc = proto_register(&mctp_proto, 0);
  526. if (rc)
  527. goto err_unreg_sock;
  528. rc = mctp_routes_init();
  529. if (rc)
  530. goto err_unreg_proto;
  531. rc = mctp_neigh_init();
  532. if (rc)
  533. goto err_unreg_proto;
  534. mctp_device_init();
  535. return 0;
  536. err_unreg_proto:
  537. proto_unregister(&mctp_proto);
  538. err_unreg_sock:
  539. sock_unregister(PF_MCTP);
  540. return rc;
  541. }
  542. static __exit void mctp_exit(void)
  543. {
  544. mctp_device_exit();
  545. mctp_neigh_exit();
  546. mctp_routes_exit();
  547. proto_unregister(&mctp_proto);
  548. sock_unregister(PF_MCTP);
  549. }
  550. subsys_initcall(mctp_init);
  551. module_exit(mctp_exit);
  552. MODULE_DESCRIPTION("MCTP core");
  553. MODULE_LICENSE("GPL v2");
  554. MODULE_AUTHOR("Jeremy Kerr <jk@codeconstruct.com.au>");
  555. MODULE_ALIAS_NETPROTO(PF_MCTP);