PageRenderTime 111ms CodeModel.GetById 35ms RepoModel.GetById 2ms app.codeStats 0ms

/SPECS/linux/0014-hv_sock-introduce-Hyper-V-Sockets.patch

https://gitlab.com/unofficial-mirrors/vmware-photon
Patch | 1791 lines | 1775 code | 16 blank | 0 comment | 0 complexity | 728c4c47770423ecc00ed34e8e40264a MD5 | raw file
  1. From dd53a1fc57f6a549aeb50dae4b4567690a16c120 Mon Sep 17 00:00:00 2001
  2. From: Dexuan Cui <decui@microsoft.com>
  3. Date: Sat, 23 Jul 2016 01:35:51 +0000
  4. Subject: [PATCH 03/13] hv_sock: introduce Hyper-V Sockets
  5. Hyper-V Sockets (hv_sock) supplies a byte-stream based communication
  6. mechanism between the host and the guest. It's somewhat like TCP over
  7. VMBus, but the transportation layer (VMBus) is much simpler than IP.
  8. With Hyper-V Sockets, applications between the host and the guest can talk
  9. to each other directly by the traditional BSD-style socket APIs.
  10. Hyper-V Sockets is only available on new Windows hosts, like Windows Server
  11. 2016. More info is in this article "Make your own integration services":
  12. https://msdn.microsoft.com/en-us/virtualization/hyperv_on_windows/develop/make_mgmt_service
  13. The patch implements the necessary support in the guest side by introducing
  14. a new socket address family AF_HYPERV.
  15. Signed-off-by: Dexuan Cui <decui@microsoft.com>
  16. Cc: "K. Y. Srinivasan" <kys@microsoft.com>
  17. Cc: Haiyang Zhang <haiyangz@microsoft.com>
  18. Cc: Vitaly Kuznetsov <vkuznets@redhat.com>
  19. Cc: Cathy Avery <cavery@redhat.com>
  20. Cc: Olaf Hering <olaf@aepfle.de>
  21. Origin: https://patchwork.kernel.org/patch/9244467/
  22. ---
  23. MAINTAINERS | 2 +
  24. include/linux/hyperv.h | 13 +
  25. include/linux/socket.h | 4 +-
  26. include/net/af_hvsock.h | 78 +++
  27. include/uapi/linux/hyperv.h | 23 +
  28. net/Kconfig | 1 +
  29. net/Makefile | 1 +
  30. net/hv_sock/Kconfig | 10 +
  31. net/hv_sock/Makefile | 3 +
  32. net/hv_sock/af_hvsock.c | 1507 +++++++++++++++++++++++++++++++++++++++++++
  33. 10 files changed, 1641 insertions(+), 1 deletion(-)
  34. create mode 100644 include/net/af_hvsock.h
  35. create mode 100644 net/hv_sock/Kconfig
  36. create mode 100644 net/hv_sock/Makefile
  37. create mode 100644 net/hv_sock/af_hvsock.c
  38. diff --git a/MAINTAINERS b/MAINTAINERS
  39. index 63cefa62324c..e64920219d88 100644
  40. --- a/MAINTAINERS
  41. +++ b/MAINTAINERS
  42. @@ -5853,7 +5853,9 @@ F: drivers/pci/host/pci-hyperv.c
  43. F: drivers/net/hyperv/
  44. F: drivers/scsi/storvsc_drv.c
  45. F: drivers/video/fbdev/hyperv_fb.c
  46. +F: net/hv_sock/
  47. F: include/linux/hyperv.h
  48. +F: include/net/af_hvsock.h
  49. F: tools/hv/
  50. F: Documentation/ABI/stable/sysfs-bus-vmbus
  51. diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
  52. index d596a076da11..489ad74c1e6e 100644
  53. --- a/include/linux/hyperv.h
  54. +++ b/include/linux/hyperv.h
  55. @@ -1613,5 +1613,18 @@ static inline void commit_rd_index(struct vmbus_channel *channel)
  56. hv_signal_on_read(channel);
  57. }
  58. +struct vmpipe_proto_header {
  59. + u32 pkt_type;
  60. + u32 data_size;
  61. +};
  62. +
  63. +#define HVSOCK_HEADER_LEN (sizeof(struct vmpacket_descriptor) + \
  64. + sizeof(struct vmpipe_proto_header))
  65. +
  66. +/* See 'prev_indices' in hv_ringbuffer_read(), hv_ringbuffer_write() */
  67. +#define PREV_INDICES_LEN (sizeof(u64))
  68. +#define HVSOCK_PKT_LEN(payload_len) (HVSOCK_HEADER_LEN + \
  69. + ALIGN((payload_len), 8) + \
  70. + PREV_INDICES_LEN)
  71. #endif /* _HYPERV_H */
  72. diff --git a/include/linux/socket.h b/include/linux/socket.h
  73. index b5cc5a6d7011..0b68b587d6ee 100644
  74. --- a/include/linux/socket.h
  75. +++ b/include/linux/socket.h
  76. @@ -202,8 +202,9 @@ struct ucred {
  77. #define AF_VSOCK 40 /* vSockets */
  78. #define AF_KCM 41 /* Kernel Connection Multiplexor*/
  79. #define AF_QIPCRTR 42 /* Qualcomm IPC Router */
  80. +#define AF_HYPERV 43 /* Hyper-V Sockets */
  81. -#define AF_MAX 43 /* For now.. */
  82. +#define AF_MAX 44 /* For now.. */
  83. /* Protocol families, same as address families. */
  84. #define PF_UNSPEC AF_UNSPEC
  85. @@ -251,6 +252,7 @@ struct ucred {
  86. #define PF_VSOCK AF_VSOCK
  87. #define PF_KCM AF_KCM
  88. #define PF_QIPCRTR AF_QIPCRTR
  89. +#define PF_HYPERV AF_HYPERV
  90. #define PF_MAX AF_MAX
  91. /* Maximum queue length specifiable by listen. */
  92. diff --git a/include/net/af_hvsock.h b/include/net/af_hvsock.h
  93. new file mode 100644
  94. index 000000000000..e7a8a3ae08e8
  95. --- /dev/null
  96. +++ b/include/net/af_hvsock.h
  97. @@ -0,0 +1,78 @@
  98. +#ifndef __AF_HVSOCK_H__
  99. +#define __AF_HVSOCK_H__
  100. +
  101. +#include <linux/kernel.h>
  102. +#include <linux/hyperv.h>
  103. +#include <net/sock.h>
  104. +
  105. +/* The host side's design of the feature requires 5 exact 4KB pages for
  106. + * recv/send rings respectively -- this is suboptimal considering memory
  107. + * consumption, however unluckily we have to live with it, before the
  108. + * host comes up with a better design in the future.
  109. + */
  110. +#define PAGE_SIZE_4K 4096
  111. +#define RINGBUFFER_HVSOCK_RCV_SIZE (PAGE_SIZE_4K * 5)
  112. +#define RINGBUFFER_HVSOCK_SND_SIZE (PAGE_SIZE_4K * 5)
  113. +
  114. +/* The MTU is 16KB per the host side's design.
  115. + * In future, the buffer can be elimiated when we switch to use the coming
  116. + * new VMBus ringbuffer "in-place consumption" APIs, by which we can
  117. + * directly copy data from VMBus ringbuffer into the userspace buffer.
  118. + */
  119. +#define HVSOCK_MTU_SIZE (1024 * 16)
  120. +struct hvsock_recv_buf {
  121. + unsigned int data_len;
  122. + unsigned int data_offset;
  123. +
  124. + struct vmpipe_proto_header hdr;
  125. + u8 buf[HVSOCK_MTU_SIZE];
  126. +};
  127. +
  128. +/* In the VM, actually we can send up to HVSOCK_MTU_SIZE bytes of payload,
  129. + * but for now let's use a smaller size to minimize the dynamically-allocated
  130. + * buffer. Note: the buffer can be elimiated in future when we add new VMBus
  131. + * ringbuffer APIs that allow us to directly copy data from userspace buf to
  132. + * VMBus ringbuffer.
  133. + */
  134. +#define HVSOCK_MAX_SND_SIZE_BY_VM (1024 * 4)
  135. +struct hvsock_send_buf {
  136. + struct vmpipe_proto_header hdr;
  137. + u8 buf[HVSOCK_MAX_SND_SIZE_BY_VM];
  138. +};
  139. +
  140. +struct hvsock_sock {
  141. + /* sk must be the first member. */
  142. + struct sock sk;
  143. +
  144. + struct sockaddr_hv local_addr;
  145. + struct sockaddr_hv remote_addr;
  146. +
  147. + /* protected by the global hvsock_mutex */
  148. + struct list_head bound_list;
  149. + struct list_head connected_list;
  150. +
  151. + struct list_head accept_queue;
  152. + /* used by enqueue and dequeue */
  153. + struct mutex accept_queue_mutex;
  154. +
  155. + struct delayed_work dwork;
  156. +
  157. + u32 peer_shutdown;
  158. +
  159. + struct vmbus_channel *channel;
  160. +
  161. + struct hvsock_send_buf *send;
  162. + struct hvsock_recv_buf *recv;
  163. +};
  164. +
  165. +static inline struct hvsock_sock *sk_to_hvsock(struct sock *sk)
  166. +{
  167. + return (struct hvsock_sock *)sk;
  168. +}
  169. +
  170. +static inline struct sock *hvsock_to_sk(struct hvsock_sock *hvsk)
  171. +{
  172. + return (struct sock *)hvsk;
  173. +}
  174. +
  175. +#endif /* __AF_HVSOCK_H__ */
  176. diff --git a/include/uapi/linux/hyperv.h b/include/uapi/linux/hyperv.h
  177. index e347b24ef9fb..eb3e44b69a5d 100644
  178. --- a/include/uapi/linux/hyperv.h
  179. +++ b/include/uapi/linux/hyperv.h
  180. @@ -26,6 +26,7 @@
  181. #define _UAPI_HYPERV_H
  182. #include <linux/uuid.h>
  183. +#include <linux/socket.h>
  184. /*
  185. * Framework version for util services.
  186. @@ -396,4 +397,26 @@ struct hv_kvp_ip_msg {
  187. struct hv_kvp_ipaddr_value kvp_ip_val;
  188. } __attribute__((packed));
  189. +/* This is the address format of Hyper-V Sockets.
  190. + * Note: here we just borrow the kernel's built-in type uuid_le. When
  191. + * an application calls bind() or connect(), the 2 members of struct
  192. + * sockaddr_hv must be of GUID.
  193. + * The GUID format differs from the UUID format only in the byte order of
  194. + * the first 3 fields. Refer to:
  195. + * https://en.wikipedia.org/wiki/Globally_unique_identifier
  196. + */
  197. +struct sockaddr_hv {
  198. + __kernel_sa_family_t shv_family; /* Address family */
  199. + u16 reserved; /* Must be Zero */
  200. + uuid_le shv_vm_guid; /* VM ID */
  201. + uuid_le shv_service_guid; /* Service ID */
  202. +};
  203. +
  204. +#define SHV_VMID_GUEST NULL_UUID_LE
  205. +#define SHV_VMID_HOST NULL_UUID_LE
  206. +
  207. +#define SHV_SERVICE_ID_ANY NULL_UUID_LE
  208. +
  209. +#define SHV_PROTO_RAW 1
  210. +
  211. #endif /* _UAPI_HYPERV_H */
  212. diff --git a/net/Kconfig b/net/Kconfig
  213. index 7b6cd340b72b..a9be6907a620 100644
  214. --- a/net/Kconfig
  215. +++ b/net/Kconfig
  216. @@ -231,6 +231,7 @@ source "net/dns_resolver/Kconfig"
  217. source "net/batman-adv/Kconfig"
  218. source "net/openvswitch/Kconfig"
  219. source "net/vmw_vsock/Kconfig"
  220. +source "net/hv_sock/Kconfig"
  221. source "net/netlink/Kconfig"
  222. source "net/mpls/Kconfig"
  223. source "net/hsr/Kconfig"
  224. diff --git a/net/Makefile b/net/Makefile
  225. index 4cafaa2b4667..2b357eb81865 100644
  226. --- a/net/Makefile
  227. +++ b/net/Makefile
  228. @@ -71,6 +71,7 @@ obj-$(CONFIG_BATMAN_ADV) += batman-adv/
  229. obj-$(CONFIG_NFC) += nfc/
  230. obj-$(CONFIG_OPENVSWITCH) += openvswitch/
  231. obj-$(CONFIG_VSOCKETS) += vmw_vsock/
  232. +obj-$(CONFIG_HYPERV_SOCK) += hv_sock/
  233. obj-$(CONFIG_MPLS) += mpls/
  234. obj-$(CONFIG_HSR) += hsr/
  235. ifneq ($(CONFIG_NET_SWITCHDEV),)
  236. diff --git a/net/hv_sock/Kconfig b/net/hv_sock/Kconfig
  237. new file mode 100644
  238. index 000000000000..ff84875564d1
  239. --- /dev/null
  240. +++ b/net/hv_sock/Kconfig
  241. @@ -0,0 +1,10 @@
  242. +config HYPERV_SOCK
  243. + tristate "Hyper-V Sockets"
  244. + depends on HYPERV
  245. + default m if HYPERV
  246. + help
  247. + Hyper-V Sockets is a socket interface for high speed
  248. + communication between Linux guest and Hyper-V host over VMBus.
  249. +
  250. + To compile this driver as a module, choose M here: the module
  251. + will be called hv_sock.
  252. diff --git a/net/hv_sock/Makefile b/net/hv_sock/Makefile
  253. new file mode 100644
  254. index 000000000000..716c01230129
  255. --- /dev/null
  256. +++ b/net/hv_sock/Makefile
  257. @@ -0,0 +1,3 @@
  258. +obj-$(CONFIG_HYPERV_SOCK) += hv_sock.o
  259. +
  260. +hv_sock-y += af_hvsock.o
  261. diff --git a/net/hv_sock/af_hvsock.c b/net/hv_sock/af_hvsock.c
  262. new file mode 100644
  263. index 000000000000..331d3759f5cb
  264. --- /dev/null
  265. +++ b/net/hv_sock/af_hvsock.c
  266. @@ -0,0 +1,1507 @@
  267. +/*
  268. + * Hyper-V Sockets -- a socket-based communication channel between the
  269. + * Hyper-V host and the virtual machines running on it.
  270. + *
  271. + * Copyright (c) 2016 Microsoft Corporation.
  272. + *
  273. + * All rights reserved.
  274. + *
  275. + * Redistribution and use in source and binary forms, with or without
  276. + * modification, are permitted provided that the following conditions
  277. + * are met:
  278. + *
  279. + * 1. Redistributions of source code must retain the above copyright
  280. + * notice, this list of conditions and the following disclaimer.
  281. + * 2. Redistributions in binary form must reproduce the above copyright
  282. + * notice, this list of conditions and the following disclaimer in the
  283. + * documentation and/or other materials provided with the distribution.
  284. + * 3. The name of the author may not be used to endorse or promote
  285. + * products derived from this software without specific prior written
  286. + * permission.
  287. + *
  288. + * Alternatively, this software may be distributed under the terms of the
  289. + * GNU General Public License ("GPL") version 2 as published by the Free
  290. + * Software Foundation.
  291. + *
  292. + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  293. + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  294. + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  295. + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
  296. + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  297. + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
  298. + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  299. + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
  300. + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
  301. + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  302. + * POSSIBILITY OF SUCH DAMAGE.
  303. + */
  304. +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  305. +
  306. +#include <linux/init.h>
  307. +#include <linux/module.h>
  308. +#include <linux/vmalloc.h>
  309. +#include <net/af_hvsock.h>
  310. +
  311. +static struct proto hvsock_proto = {
  312. + .name = "HV_SOCK",
  313. + .owner = THIS_MODULE,
  314. + .obj_size = sizeof(struct hvsock_sock),
  315. +};
  316. +
  317. +#define SS_LISTEN 255
  318. +
  319. +#define HVSOCK_CONNECT_TIMEOUT (30 * HZ)
  320. +
  321. +/* This is an artificial limit */
  322. +#define HVSOCK_MAX_BACKLOG 128
  323. +
  324. +static LIST_HEAD(hvsock_bound_list);
  325. +static LIST_HEAD(hvsock_connected_list);
  326. +static DEFINE_MUTEX(hvsock_mutex);
  327. +
  328. +static struct sock *hvsock_find_bound_socket(const struct sockaddr_hv *addr)
  329. +{
  330. + struct hvsock_sock *hvsk;
  331. +
  332. + list_for_each_entry(hvsk, &hvsock_bound_list, bound_list) {
  333. + if (!uuid_le_cmp(addr->shv_service_guid,
  334. + hvsk->local_addr.shv_service_guid))
  335. + return hvsock_to_sk(hvsk);
  336. + }
  337. + return NULL;
  338. +}
  339. +
  340. +static struct sock *hvsock_find_connected_socket_by_channel(
  341. + const struct vmbus_channel *channel)
  342. +{
  343. + struct hvsock_sock *hvsk;
  344. +
  345. + list_for_each_entry(hvsk, &hvsock_connected_list, connected_list) {
  346. + if (hvsk->channel == channel)
  347. + return hvsock_to_sk(hvsk);
  348. + }
  349. + return NULL;
  350. +}
  351. +
  352. +static void hvsock_enqueue_accept(struct sock *listener,
  353. + struct sock *connected)
  354. +{
  355. + struct hvsock_sock *hvconnected;
  356. + struct hvsock_sock *hvlistener;
  357. +
  358. + hvlistener = sk_to_hvsock(listener);
  359. + hvconnected = sk_to_hvsock(connected);
  360. +
  361. + sock_hold(connected);
  362. + sock_hold(listener);
  363. +
  364. + mutex_lock(&hvlistener->accept_queue_mutex);
  365. + list_add_tail(&hvconnected->accept_queue, &hvlistener->accept_queue);
  366. + listener->sk_ack_backlog++;
  367. + mutex_unlock(&hvlistener->accept_queue_mutex);
  368. +}
  369. +
  370. +static struct sock *hvsock_dequeue_accept(struct sock *listener)
  371. +{
  372. + struct hvsock_sock *hvconnected;
  373. + struct hvsock_sock *hvlistener;
  374. +
  375. + hvlistener = sk_to_hvsock(listener);
  376. +
  377. + mutex_lock(&hvlistener->accept_queue_mutex);
  378. +
  379. + if (list_empty(&hvlistener->accept_queue)) {
  380. + mutex_unlock(&hvlistener->accept_queue_mutex);
  381. + return NULL;
  382. + }
  383. +
  384. + hvconnected = list_entry(hvlistener->accept_queue.next,
  385. + struct hvsock_sock, accept_queue);
  386. +
  387. + list_del_init(&hvconnected->accept_queue);
  388. + listener->sk_ack_backlog--;
  389. +
  390. + mutex_unlock(&hvlistener->accept_queue_mutex);
  391. +
  392. + sock_put(listener);
  393. + /* The caller will need a reference on the connected socket so we let
  394. + * it call sock_put().
  395. + */
  396. +
  397. + return hvsock_to_sk(hvconnected);
  398. +}
  399. +
  400. +static bool hvsock_is_accept_queue_empty(struct sock *sk)
  401. +{
  402. + struct hvsock_sock *hvsk = sk_to_hvsock(sk);
  403. + int ret;
  404. +
  405. + mutex_lock(&hvsk->accept_queue_mutex);
  406. + ret = list_empty(&hvsk->accept_queue);
  407. + mutex_unlock(&hvsk->accept_queue_mutex);
  408. +
  409. + return ret;
  410. +}
  411. +
  412. +static void hvsock_addr_init(struct sockaddr_hv *addr, uuid_le service_id)
  413. +{
  414. + memset(addr, 0, sizeof(*addr));
  415. + addr->shv_family = AF_HYPERV;
  416. + addr->shv_service_guid = service_id;
  417. +}
  418. +
  419. +static int hvsock_addr_validate(const struct sockaddr_hv *addr)
  420. +{
  421. + if (!addr)
  422. + return -EFAULT;
  423. +
  424. + if (addr->shv_family != AF_HYPERV)
  425. + return -EAFNOSUPPORT;
  426. +
  427. + if (addr->reserved != 0)
  428. + return -EINVAL;
  429. +
  430. + return 0;
  431. +}
  432. +
  433. +static bool hvsock_addr_bound(const struct sockaddr_hv *addr)
  434. +{
  435. + return !!uuid_le_cmp(addr->shv_service_guid, SHV_SERVICE_ID_ANY);
  436. +}
  437. +
  438. +static int hvsock_addr_cast(const struct sockaddr *addr, size_t len,
  439. + struct sockaddr_hv **out_addr)
  440. +{
  441. + if (len < sizeof(**out_addr))
  442. + return -EFAULT;
  443. +
  444. + *out_addr = (struct sockaddr_hv *)addr;
  445. + return hvsock_addr_validate(*out_addr);
  446. +}
  447. +
  448. +static int __hvsock_do_bind(struct hvsock_sock *hvsk,
  449. + struct sockaddr_hv *addr)
  450. +{
  451. + struct sockaddr_hv hv_addr;
  452. + int ret = 0;
  453. +
  454. + hvsock_addr_init(&hv_addr, addr->shv_service_guid);
  455. +
  456. + mutex_lock(&hvsock_mutex);
  457. +
  458. + if (!uuid_le_cmp(addr->shv_service_guid, SHV_SERVICE_ID_ANY)) {
  459. + do {
  460. + uuid_le_gen(&hv_addr.shv_service_guid);
  461. + } while (hvsock_find_bound_socket(&hv_addr));
  462. + } else {
  463. + if (hvsock_find_bound_socket(&hv_addr)) {
  464. + ret = -EADDRINUSE;
  465. + goto out;
  466. + }
  467. + }
  468. +
  469. + hvsock_addr_init(&hvsk->local_addr, hv_addr.shv_service_guid);
  470. +
  471. + sock_hold(&hvsk->sk);
  472. + list_add(&hvsk->bound_list, &hvsock_bound_list);
  473. +out:
  474. + mutex_unlock(&hvsock_mutex);
  475. +
  476. + return ret;
  477. +}
  478. +
  479. +static int __hvsock_bind(struct sock *sk, struct sockaddr_hv *addr)
  480. +{
  481. + struct hvsock_sock *hvsk = sk_to_hvsock(sk);
  482. + int ret;
  483. +
  484. + if (hvsock_addr_bound(&hvsk->local_addr))
  485. + return -EINVAL;
  486. +
  487. + switch (sk->sk_socket->type) {
  488. + case SOCK_STREAM:
  489. + ret = __hvsock_do_bind(hvsk, addr);
  490. + break;
  491. +
  492. + default:
  493. + ret = -EINVAL;
  494. + break;
  495. + }
  496. +
  497. + return ret;
  498. +}
  499. +
  500. +/* Autobind this socket to the local address if necessary. */
  501. +static int hvsock_auto_bind(struct hvsock_sock *hvsk)
  502. +{
  503. + struct sock *sk = hvsock_to_sk(hvsk);
  504. + struct sockaddr_hv local_addr;
  505. +
  506. + if (hvsock_addr_bound(&hvsk->local_addr))
  507. + return 0;
  508. + hvsock_addr_init(&local_addr, SHV_SERVICE_ID_ANY);
  509. + return __hvsock_bind(sk, &local_addr);
  510. +}
  511. +
  512. +static void hvsock_sk_destruct(struct sock *sk)
  513. +{
  514. + struct vmbus_channel *channel;
  515. + struct hvsock_sock *hvsk;
  516. +
  517. + hvsk = sk_to_hvsock(sk);
  518. + vfree(hvsk->send);
  519. + vfree(hvsk->recv);
  520. +
  521. + channel = hvsk->channel;
  522. + if (!channel)
  523. + return;
  524. +
  525. + vmbus_hvsock_device_unregister(channel);
  526. +}
  527. +
  528. +static void __hvsock_release(struct sock *sk)
  529. +{
  530. + struct hvsock_sock *hvsk;
  531. + struct sock *pending;
  532. +
  533. + hvsk = sk_to_hvsock(sk);
  534. +
  535. + mutex_lock(&hvsock_mutex);
  536. +
  537. + if (!list_empty(&hvsk->bound_list)) {
  538. + list_del_init(&hvsk->bound_list);
  539. + sock_put(&hvsk->sk);
  540. + }
  541. +
  542. + if (!list_empty(&hvsk->connected_list)) {
  543. + list_del_init(&hvsk->connected_list);
  544. + sock_put(&hvsk->sk);
  545. + }
  546. +
  547. + mutex_unlock(&hvsock_mutex);
  548. +
  549. + lock_sock(sk);
  550. + sock_orphan(sk);
  551. + sk->sk_shutdown = SHUTDOWN_MASK;
  552. +
  553. + /* Clean up any sockets that never were accepted. */
  554. + while ((pending = hvsock_dequeue_accept(sk)) != NULL) {
  555. + __hvsock_release(pending);
  556. + sock_put(pending);
  557. + }
  558. +
  559. + release_sock(sk);
  560. + sock_put(sk);
  561. +}
  562. +
  563. +static int hvsock_release(struct socket *sock)
  564. +{
  565. + /* If accept() is interrupted by a signal, the temporary socket
  566. + * struct's sock->sk is NULL.
  567. + */
  568. + if (sock->sk) {
  569. + __hvsock_release(sock->sk);
  570. + sock->sk = NULL;
  571. + }
  572. +
  573. + sock->state = SS_FREE;
  574. + return 0;
  575. +}
  576. +
  577. +static struct sock *hvsock_create(struct net *net, struct socket *sock,
  578. + gfp_t priority, unsigned short type)
  579. +{
  580. + struct hvsock_sock *hvsk;
  581. + struct sock *sk;
  582. +
  583. + sk = sk_alloc(net, AF_HYPERV, priority, &hvsock_proto, 0);
  584. + if (!sk)
  585. + return NULL;
  586. +
  587. + sock_init_data(sock, sk);
  588. +
  589. + /* sk->sk_type is normally set in sock_init_data, but only if sock
  590. + * is non-NULL. We make sure that our sockets always have a type by
  591. + * setting it here if needed.
  592. + */
  593. + if (!sock)
  594. + sk->sk_type = type;
  595. +
  596. + sk->sk_destruct = hvsock_sk_destruct;
  597. +
  598. + /* Looks stream-based socket doesn't need this. */
  599. + sk->sk_backlog_rcv = NULL;
  600. +
  601. + sk->sk_state = 0;
  602. + sock_reset_flag(sk, SOCK_DONE);
  603. +
  604. + hvsk = sk_to_hvsock(sk);
  605. +
  606. + hvsk->send = NULL;
  607. + hvsk->recv = NULL;
  608. +
  609. + hvsock_addr_init(&hvsk->local_addr, SHV_SERVICE_ID_ANY);
  610. + hvsock_addr_init(&hvsk->remote_addr, SHV_SERVICE_ID_ANY);
  611. +
  612. + INIT_LIST_HEAD(&hvsk->bound_list);
  613. + INIT_LIST_HEAD(&hvsk->connected_list);
  614. +
  615. + INIT_LIST_HEAD(&hvsk->accept_queue);
  616. + mutex_init(&hvsk->accept_queue_mutex);
  617. +
  618. + hvsk->peer_shutdown = 0;
  619. +
  620. + return sk;
  621. +}
  622. +
  623. +static int hvsock_bind(struct socket *sock, struct sockaddr *addr,
  624. + int addr_len)
  625. +{
  626. + struct sockaddr_hv *hv_addr;
  627. + struct sock *sk;
  628. + int ret;
  629. +
  630. + sk = sock->sk;
  631. +
  632. + if (hvsock_addr_cast(addr, addr_len, &hv_addr) != 0)
  633. + return -EINVAL;
  634. +
  635. + if (uuid_le_cmp(hv_addr->shv_vm_guid, NULL_UUID_LE))
  636. + return -EINVAL;
  637. +
  638. + lock_sock(sk);
  639. + ret = __hvsock_bind(sk, hv_addr);
  640. + release_sock(sk);
  641. +
  642. + return ret;
  643. +}
  644. +
  645. +static int hvsock_getname(struct socket *sock,
  646. + struct sockaddr *addr, int *addr_len, int peer)
  647. +{
  648. + struct sockaddr_hv *hv_addr;
  649. + struct hvsock_sock *hvsk;
  650. + struct sock *sk;
  651. + int ret;
  652. +
  653. + sk = sock->sk;
  654. + hvsk = sk_to_hvsock(sk);
  655. + ret = 0;
  656. +
  657. + lock_sock(sk);
  658. +
  659. + if (peer) {
  660. + if (sock->state != SS_CONNECTED) {
  661. + ret = -ENOTCONN;
  662. + goto out;
  663. + }
  664. + hv_addr = &hvsk->remote_addr;
  665. + } else {
  666. + hv_addr = &hvsk->local_addr;
  667. + }
  668. +
  669. + __sockaddr_check_size(sizeof(*hv_addr));
  670. +
  671. + memcpy(addr, hv_addr, sizeof(*hv_addr));
  672. + *addr_len = sizeof(*hv_addr);
  673. +
  674. +out:
  675. + release_sock(sk);
  676. + return ret;
  677. +}
  678. +
  679. +static void get_ringbuffer_rw_status(struct vmbus_channel *channel,
  680. + bool *can_read, bool *can_write)
  681. +{
  682. + u32 avl_read_bytes, avl_write_bytes, dummy;
  683. +
  684. + if (can_read) {
  685. + hv_get_ringbuffer_availbytes(&channel->inbound,
  686. + &avl_read_bytes,
  687. + &dummy);
  688. + /* 0-size payload means FIN */
  689. + *can_read = avl_read_bytes >= HVSOCK_PKT_LEN(0);
  690. + }
  691. +
  692. + if (can_write) {
  693. + hv_get_ringbuffer_availbytes(&channel->outbound,
  694. + &dummy,
  695. + &avl_write_bytes);
  696. +
  697. + /* We only write if there is enough space */
  698. + *can_write = avl_write_bytes > HVSOCK_PKT_LEN(PAGE_SIZE_4K);
  699. + }
  700. +}
  701. +
  702. +static size_t get_ringbuffer_writable_bytes(struct vmbus_channel *channel)
  703. +{
  704. + u32 avl_write_bytes, dummy;
  705. + size_t ret;
  706. +
  707. + hv_get_ringbuffer_availbytes(&channel->outbound,
  708. + &dummy,
  709. + &avl_write_bytes);
  710. +
  711. + /* The ringbuffer mustn't be 100% full, and we should reserve a
  712. + * zero-length-payload packet for the FIN: see hv_ringbuffer_write()
  713. + * and hvsock_shutdown().
  714. + */
  715. + if (avl_write_bytes < HVSOCK_PKT_LEN(1) + HVSOCK_PKT_LEN(0))
  716. + return 0;
  717. + ret = avl_write_bytes - HVSOCK_PKT_LEN(1) - HVSOCK_PKT_LEN(0);
  718. +
  719. + return round_down(ret, 8);
  720. +}
  721. +
  722. +static int hvsock_get_send_buf(struct hvsock_sock *hvsk)
  723. +{
  724. + hvsk->send = vmalloc(sizeof(*hvsk->send));
  725. + return hvsk->send ? 0 : -ENOMEM;
  726. +}
  727. +
  728. +static void hvsock_put_send_buf(struct hvsock_sock *hvsk)
  729. +{
  730. + vfree(hvsk->send);
  731. + hvsk->send = NULL;
  732. +}
  733. +
  734. +static int hvsock_send_data(struct vmbus_channel *channel,
  735. + struct hvsock_sock *hvsk,
  736. + size_t to_write)
  737. +{
  738. + hvsk->send->hdr.pkt_type = 1;
  739. + hvsk->send->hdr.data_size = to_write;
  740. + return vmbus_sendpacket(channel, &hvsk->send->hdr,
  741. + sizeof(hvsk->send->hdr) + to_write,
  742. + 0, VM_PKT_DATA_INBAND, 0);
  743. +}
  744. +
  745. +static int hvsock_get_recv_buf(struct hvsock_sock *hvsk)
  746. +{
  747. + hvsk->recv = vmalloc(sizeof(*hvsk->recv));
  748. + return hvsk->recv ? 0 : -ENOMEM;
  749. +}
  750. +
  751. +static void hvsock_put_recv_buf(struct hvsock_sock *hvsk)
  752. +{
  753. + vfree(hvsk->recv);
  754. + hvsk->recv = NULL;
  755. +}
  756. +
  757. +static int hvsock_recv_data(struct vmbus_channel *channel,
  758. + struct hvsock_sock *hvsk,
  759. + size_t *payload_len)
  760. +{
  761. + u32 buffer_actual_len;
  762. + u64 dummy_req_id;
  763. + int ret;
  764. +
  765. + ret = vmbus_recvpacket(channel, &hvsk->recv->hdr,
  766. + sizeof(hvsk->recv->hdr) +
  767. + sizeof(hvsk->recv->buf),
  768. + &buffer_actual_len, &dummy_req_id);
  769. + if (ret != 0 || buffer_actual_len <= sizeof(hvsk->recv->hdr))
  770. + *payload_len = 0;
  771. + else
  772. + *payload_len = hvsk->recv->hdr.data_size;
  773. +
  774. + return ret;
  775. +}
  776. +
  777. +static int hvsock_shutdown(struct socket *sock, int mode)
  778. +{
  779. + struct hvsock_sock *hvsk;
  780. + struct sock *sk;
  781. + int ret = 0;
  782. +
  783. + if (mode < SHUT_RD || mode > SHUT_RDWR)
  784. + return -EINVAL;
  785. + /* This maps:
  786. + * SHUT_RD (0) -> RCV_SHUTDOWN (1)
  787. + * SHUT_WR (1) -> SEND_SHUTDOWN (2)
  788. + * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
  789. + */
  790. + ++mode;
  791. +
  792. + if (sock->state != SS_CONNECTED)
  793. + return -ENOTCONN;
  794. +
  795. + sock->state = SS_DISCONNECTING;
  796. +
  797. + sk = sock->sk;
  798. +
  799. + lock_sock(sk);
  800. +
  801. + sk->sk_shutdown |= mode;
  802. + sk->sk_state_change(sk);
  803. +
  804. + if (mode & SEND_SHUTDOWN) {
  805. + hvsk = sk_to_hvsock(sk);
  806. +
  807. + ret = hvsock_get_send_buf(hvsk);
  808. + if (ret < 0)
  809. + goto out;
  810. +
  811. + /* It can't fail: see get_ringbuffer_writable_bytes(). */
  812. + (void)hvsock_send_data(hvsk->channel, hvsk, 0);
  813. +
  814. + hvsock_put_send_buf(hvsk);
  815. + }
  816. +
  817. +out:
  818. + release_sock(sk);
  819. +
  820. + return ret;
  821. +}
  822. +
  823. +static unsigned int hvsock_poll(struct file *file, struct socket *sock,
  824. + poll_table *wait)
  825. +{
  826. + struct vmbus_channel *channel;
  827. + bool can_read, can_write;
  828. + struct hvsock_sock *hvsk;
  829. + unsigned int mask;
  830. + struct sock *sk;
  831. +
  832. + sk = sock->sk;
  833. + hvsk = sk_to_hvsock(sk);
  834. +
  835. + poll_wait(file, sk_sleep(sk), wait);
  836. + mask = 0;
  837. +
  838. + if (sk->sk_err)
  839. + /* Signify that there has been an error on this socket. */
  840. + mask |= POLLERR;
  841. +
  842. + /* INET sockets treat local write shutdown and peer write shutdown as a
  843. + * case of POLLHUP set.
  844. + */
  845. + if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
  846. + ((sk->sk_shutdown & SEND_SHUTDOWN) &&
  847. + (hvsk->peer_shutdown & SEND_SHUTDOWN))) {
  848. + mask |= POLLHUP;
  849. + }
  850. +
  851. + if (sk->sk_shutdown & RCV_SHUTDOWN ||
  852. + hvsk->peer_shutdown & SEND_SHUTDOWN) {
  853. + mask |= POLLRDHUP;
  854. + }
  855. +
  856. + lock_sock(sk);
  857. +
  858. + /* Listening sockets that have connections in their accept
  859. + * queue can be read.
  860. + */
  861. + if (sk->sk_state == SS_LISTEN && !hvsock_is_accept_queue_empty(sk))
  862. + mask |= POLLIN | POLLRDNORM;
  863. +
  864. + /* The mutex is to against hvsock_open_connection() */
  865. + mutex_lock(&hvsock_mutex);
  866. +
  867. + channel = hvsk->channel;
  868. + if (channel) {
  869. + /* If there is something in the queue then we can read */
  870. + get_ringbuffer_rw_status(channel, &can_read, &can_write);
  871. +
  872. + if (!can_read && hvsk->recv)
  873. + can_read = true;
  874. +
  875. + if (!(sk->sk_shutdown & RCV_SHUTDOWN) && can_read)
  876. + mask |= POLLIN | POLLRDNORM;
  877. + } else {
  878. + can_write = false;
  879. + }
  880. +
  881. + mutex_unlock(&hvsock_mutex);
  882. +
  883. + /* Sockets whose connections have been closed terminated should
  884. + * also be considered read, and we check the shutdown flag for that.
  885. + */
  886. + if (sk->sk_shutdown & RCV_SHUTDOWN ||
  887. + hvsk->peer_shutdown & SEND_SHUTDOWN) {
  888. + mask |= POLLIN | POLLRDNORM;
  889. + }
  890. +
  891. + /* Connected sockets that can produce data can be written. */
  892. + if (sk->sk_state == SS_CONNECTED && can_write &&
  893. + !(sk->sk_shutdown & SEND_SHUTDOWN)) {
  894. + /* Remove POLLWRBAND since INET sockets are not setting it.
  895. + */
  896. + mask |= POLLOUT | POLLWRNORM;
  897. + }
  898. +
  899. + /* Simulate INET socket poll behaviors, which sets
  900. + * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
  901. + * but local send is not shutdown.
  902. + */
  903. + if (sk->sk_state == SS_UNCONNECTED &&
  904. + !(sk->sk_shutdown & SEND_SHUTDOWN))
  905. + mask |= POLLOUT | POLLWRNORM;
  906. +
  907. + release_sock(sk);
  908. +
  909. + return mask;
  910. +}
  911. +
  912. +/* This function runs in the tasklet context of process_chn_event() */
  913. +static void hvsock_on_channel_cb(void *ctx)
  914. +{
  915. + struct sock *sk = (struct sock *)ctx;
  916. + struct vmbus_channel *channel;
  917. + struct hvsock_sock *hvsk;
  918. + bool can_read, can_write;
  919. +
  920. + hvsk = sk_to_hvsock(sk);
  921. + channel = hvsk->channel;
  922. + BUG_ON(!channel);
  923. +
  924. + get_ringbuffer_rw_status(channel, &can_read, &can_write);
  925. +
  926. + if (can_read)
  927. + sk->sk_data_ready(sk);
  928. +
  929. + if (can_write)
  930. + sk->sk_write_space(sk);
  931. +}
  932. +
  933. +static void hvsock_close_connection(struct vmbus_channel *channel)
  934. +{
  935. + struct hvsock_sock *hvsk;
  936. + struct sock *sk;
  937. +
  938. + mutex_lock(&hvsock_mutex);
  939. +
  940. + sk = hvsock_find_connected_socket_by_channel(channel);
  941. +
  942. + /* The guest has already closed the connection? */
  943. + if (!sk)
  944. + goto out;
  945. +
  946. + sk->sk_state = SS_UNCONNECTED;
  947. + sock_set_flag(sk, SOCK_DONE);
  948. +
  949. + hvsk = sk_to_hvsock(sk);
  950. + hvsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN;
  951. +
  952. + sk->sk_state_change(sk);
  953. +out:
  954. + mutex_unlock(&hvsock_mutex);
  955. +}
  956. +
  957. +static int hvsock_open_connection(struct vmbus_channel *channel)
  958. +{
  959. + struct hvsock_sock *hvsk = NULL, *new_hvsk = NULL;
  960. + uuid_le *instance, *service_id;
  961. + unsigned char conn_from_host;
  962. + struct sockaddr_hv hv_addr;
  963. + struct sock *sk, *new_sk = NULL;
  964. + int ret;
  965. +
  966. + instance = &channel->offermsg.offer.if_instance;
  967. + service_id = &channel->offermsg.offer.if_type;
  968. +
  969. + /* The first byte != 0 means the host initiated the connection. */
  970. + conn_from_host = channel->offermsg.offer.u.pipe.user_def[0];
  971. +
  972. + mutex_lock(&hvsock_mutex);
  973. +
  974. + hvsock_addr_init(&hv_addr, conn_from_host ? *service_id : *instance);
  975. + sk = hvsock_find_bound_socket(&hv_addr);
  976. +
  977. + if (!sk || (conn_from_host && sk->sk_state != SS_LISTEN) ||
  978. + (!conn_from_host && sk->sk_state != SS_CONNECTING)) {
  979. + ret = -ENXIO;
  980. + goto out;
  981. + }
  982. +
  983. + if (conn_from_host) {
  984. + if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) {
  985. + ret = -ECONNREFUSED;
  986. + goto out;
  987. + }
  988. +
  989. + new_sk = hvsock_create(sock_net(sk), NULL, GFP_KERNEL,
  990. + sk->sk_type);
  991. + if (!new_sk) {
  992. + ret = -ENOMEM;
  993. + goto out;
  994. + }
  995. +
  996. + new_sk->sk_state = SS_CONNECTING;
  997. + new_hvsk = sk_to_hvsock(new_sk);
  998. + new_hvsk->channel = channel;
  999. + hvsock_addr_init(&new_hvsk->local_addr, *service_id);
  1000. + hvsock_addr_init(&new_hvsk->remote_addr, *instance);
  1001. + } else {
  1002. + hvsk = sk_to_hvsock(sk);
  1003. + hvsk->channel = channel;
  1004. + }
  1005. +
  1006. + set_channel_read_state(channel, false);
  1007. + ret = vmbus_open(channel, RINGBUFFER_HVSOCK_SND_SIZE,
  1008. + RINGBUFFER_HVSOCK_RCV_SIZE, NULL, 0,
  1009. + hvsock_on_channel_cb, conn_from_host ? new_sk : sk);
  1010. + if (ret != 0) {
  1011. + if (conn_from_host) {
  1012. + new_hvsk->channel = NULL;
  1013. + sock_put(new_sk);
  1014. + } else {
  1015. + hvsk->channel = NULL;
  1016. + }
  1017. + goto out;
  1018. + }
  1019. +
  1020. + vmbus_set_chn_rescind_callback(channel, hvsock_close_connection);
  1021. +
  1022. + /* see get_ringbuffer_rw_status() */
  1023. + set_channel_pending_send_size(channel,
  1024. + HVSOCK_PKT_LEN(PAGE_SIZE_4K) + 1);
  1025. +
  1026. + if (conn_from_host) {
  1027. + new_sk->sk_state = SS_CONNECTED;
  1028. +
  1029. + sock_hold(&new_hvsk->sk);
  1030. + list_add(&new_hvsk->connected_list, &hvsock_connected_list);
  1031. +
  1032. + hvsock_enqueue_accept(sk, new_sk);
  1033. + } else {
  1034. + sk->sk_state = SS_CONNECTED;
  1035. + sk->sk_socket->state = SS_CONNECTED;
  1036. +
  1037. + sock_hold(&hvsk->sk);
  1038. + list_add(&hvsk->connected_list, &hvsock_connected_list);
  1039. + }
  1040. +
  1041. + sk->sk_state_change(sk);
  1042. +out:
  1043. + mutex_unlock(&hvsock_mutex);
  1044. + return ret;
  1045. +}
  1046. +
  1047. +static void hvsock_connect_timeout(struct work_struct *work)
  1048. +{
  1049. + struct hvsock_sock *hvsk;
  1050. + struct sock *sk;
  1051. +
  1052. + hvsk = container_of(work, struct hvsock_sock, dwork.work);
  1053. + sk = hvsock_to_sk(hvsk);
  1054. +
  1055. + lock_sock(sk);
  1056. + if ((sk->sk_state == SS_CONNECTING) &&
  1057. + (sk->sk_shutdown != SHUTDOWN_MASK)) {
  1058. + sk->sk_state = SS_UNCONNECTED;
  1059. + sk->sk_err = ETIMEDOUT;
  1060. + sk->sk_error_report(sk);
  1061. + }
  1062. + release_sock(sk);
  1063. +
  1064. + sock_put(sk);
  1065. +}
  1066. +
  1067. +static int hvsock_connect_wait(struct socket *sock,
  1068. + int flags, int current_ret)
  1069. +{
  1070. + struct sock *sk = sock->sk;
  1071. + struct hvsock_sock *hvsk;
  1072. + int ret = current_ret;
  1073. + DEFINE_WAIT(wait);
  1074. + long timeout;
  1075. +
  1076. + hvsk = sk_to_hvsock(sk);
  1077. + timeout = HVSOCK_CONNECT_TIMEOUT;
  1078. + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1079. +
  1080. + while (sk->sk_state != SS_CONNECTED && sk->sk_err == 0) {
  1081. + if (flags & O_NONBLOCK) {
  1082. + /* If we're not going to block, we schedule a timeout
  1083. + * function to generate a timeout on the connection
  1084. + * attempt, in case the peer doesn't respond in a
  1085. + * timely manner. We hold on to the socket until the
  1086. + * timeout fires.
  1087. + */
  1088. + sock_hold(sk);
  1089. + INIT_DELAYED_WORK(&hvsk->dwork,
  1090. + hvsock_connect_timeout);
  1091. + schedule_delayed_work(&hvsk->dwork, timeout);
  1092. +
  1093. + /* Skip ahead to preserve error code set above. */
  1094. + goto out_wait;
  1095. + }
  1096. +
  1097. + release_sock(sk);
  1098. + timeout = schedule_timeout(timeout);
  1099. + lock_sock(sk);
  1100. +
  1101. + if (signal_pending(current)) {
  1102. + ret = sock_intr_errno(timeout);
  1103. + goto out_wait_error;
  1104. + } else if (timeout == 0) {
  1105. + ret = -ETIMEDOUT;
  1106. + goto out_wait_error;
  1107. + }
  1108. +
  1109. + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1110. + }
  1111. +
  1112. + ret = sk->sk_err ? -sk->sk_err : 0;
  1113. +
  1114. +out_wait_error:
  1115. + if (ret < 0) {
  1116. + sk->sk_state = SS_UNCONNECTED;
  1117. + sock->state = SS_UNCONNECTED;
  1118. + }
  1119. +out_wait:
  1120. + finish_wait(sk_sleep(sk), &wait);
  1121. + return ret;
  1122. +}
  1123. +
  1124. +static int hvsock_connect(struct socket *sock, struct sockaddr *addr,
  1125. + int addr_len, int flags)
  1126. +{
  1127. + struct sockaddr_hv *remote_addr;
  1128. + struct hvsock_sock *hvsk;
  1129. + struct sock *sk;
  1130. + int ret = 0;
  1131. +
  1132. + sk = sock->sk;
  1133. + hvsk = sk_to_hvsock(sk);
  1134. +
  1135. + lock_sock(sk);
  1136. +
  1137. + switch (sock->state) {
  1138. + case SS_CONNECTED:
  1139. + ret = -EISCONN;
  1140. + goto out;
  1141. + case SS_DISCONNECTING:
  1142. + ret = -EINVAL;
  1143. + goto out;
  1144. + case SS_CONNECTING:
  1145. + /* This continues on so we can move sock into the SS_CONNECTED
  1146. + * state once the connection has completed (at which point err
  1147. + * will be set to zero also). Otherwise, we will either wait
  1148. + * for the connection or return -EALREADY should this be a
  1149. + * non-blocking call.
  1150. + */
  1151. + ret = -EALREADY;
  1152. + break;
  1153. + default:
  1154. + if ((sk->sk_state == SS_LISTEN) ||
  1155. + hvsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
  1156. + ret = -EINVAL;
  1157. + goto out;
  1158. + }
  1159. +
  1160. + /* Set the remote address that we are connecting to. */
  1161. + memcpy(&hvsk->remote_addr, remote_addr,
  1162. + sizeof(hvsk->remote_addr));
  1163. +
  1164. + ret = hvsock_auto_bind(hvsk);
  1165. + if (ret)
  1166. + goto out;
  1167. +
  1168. + sk->sk_state = SS_CONNECTING;
  1169. +
  1170. + ret = vmbus_send_tl_connect_request(
  1171. + &hvsk->local_addr.shv_service_guid,
  1172. + &hvsk->remote_addr.shv_service_guid);
  1173. + if (ret < 0)
  1174. + goto out;
  1175. +
  1176. + /* Mark sock as connecting and set the error code to in
  1177. + * progress in case this is a non-blocking connect.
  1178. + */
  1179. + sock->state = SS_CONNECTING;
  1180. + ret = -EINPROGRESS;
  1181. + }
  1182. +
  1183. + ret = hvsock_connect_wait(sock, flags, ret);
  1184. +out:
  1185. + release_sock(sk);
  1186. + return ret;
  1187. +}
  1188. +
  1189. +static int hvsock_accept_wait(struct sock *listener,
  1190. + struct socket *newsock, int flags)
  1191. +{
  1192. + struct hvsock_sock *hvconnected;
  1193. + struct sock *connected;
  1194. +
  1195. + DEFINE_WAIT(wait);
  1196. + long timeout;
  1197. +
  1198. + int ret = 0;
  1199. +
  1200. + /* Wait for children sockets to appear; these are the new sockets
  1201. + * created upon connection establishment.
  1202. + */
  1203. + timeout = sock_sndtimeo(listener, flags & O_NONBLOCK);
  1204. + prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
  1205. +
  1206. + while ((connected = hvsock_dequeue_accept(listener)) == NULL &&
  1207. + listener->sk_err == 0) {
  1208. + release_sock(listener);
  1209. + timeout = schedule_timeout(timeout);
  1210. + lock_sock(listener);
  1211. +
  1212. + if (signal_pending(current)) {
  1213. + ret = sock_intr_errno(timeout);
  1214. + goto out_wait;
  1215. + } else if (timeout == 0) {
  1216. + ret = -EAGAIN;
  1217. + goto out_wait;
  1218. + }
  1219. +
  1220. + prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
  1221. + }
  1222. +
  1223. + if (listener->sk_err)
  1224. + ret = -listener->sk_err;
  1225. +
  1226. + if (connected) {
  1227. + lock_sock(connected);
  1228. + hvconnected = sk_to_hvsock(connected);
  1229. +
  1230. + if (!ret) {
  1231. + newsock->state = SS_CONNECTED;
  1232. + sock_graft(connected, newsock);
  1233. + }
  1234. + release_sock(connected);
  1235. + sock_put(connected);
  1236. + }
  1237. +
  1238. +out_wait:
  1239. + finish_wait(sk_sleep(listener), &wait);
  1240. + return ret;
  1241. +}
  1242. +
  1243. +static int hvsock_accept(struct socket *sock, struct socket *newsock,
  1244. + int flags)
  1245. +{
  1246. + struct sock *listener;
  1247. + int ret;
  1248. +
  1249. + listener = sock->sk;
  1250. +
  1251. + lock_sock(listener);
  1252. +
  1253. + if (sock->type != SOCK_STREAM) {
  1254. + ret = -EOPNOTSUPP;
  1255. + goto out;
  1256. + }
  1257. +
  1258. + if (listener->sk_state != SS_LISTEN) {
  1259. + ret = -EINVAL;
  1260. + goto out;
  1261. + }
  1262. +
  1263. + ret = hvsock_accept_wait(listener, newsock, flags);
  1264. +out:
  1265. + release_sock(listener);
  1266. + return ret;
  1267. +}
  1268. +
  1269. +static int hvsock_listen(struct socket *sock, int backlog)
  1270. +{
  1271. + struct hvsock_sock *hvsk;
  1272. + struct sock *sk;
  1273. + int ret = 0;
  1274. +
  1275. + sk = sock->sk;
  1276. + lock_sock(sk);
  1277. +
  1278. + if (sock->type != SOCK_STREAM) {
  1279. + ret = -EOPNOTSUPP;
  1280. + goto out;
  1281. + }
  1282. +
  1283. + if (sock->state != SS_UNCONNECTED) {
  1284. + ret = -EINVAL;
  1285. + goto out;
  1286. + }
  1287. +
  1288. + if (backlog <= 0) {
  1289. + ret = -EINVAL;
  1290. + goto out;
  1291. + }
  1292. + if (backlog > HVSOCK_MAX_BACKLOG)
  1293. + backlog = HVSOCK_MAX_BACKLOG;
  1294. +
  1295. + hvsk = sk_to_hvsock(sk);
  1296. + if (!hvsock_addr_bound(&hvsk->local_addr)) {
  1297. + ret = -EINVAL;
  1298. + goto out;
  1299. + }
  1300. +
  1301. + sk->sk_ack_backlog = 0;
  1302. + sk->sk_max_ack_backlog = backlog;
  1303. + sk->sk_state = SS_LISTEN;
  1304. +out:
  1305. + release_sock(sk);
  1306. + return ret;
  1307. +}
  1308. +
  1309. +static int hvsock_sendmsg_wait(struct sock *sk, struct msghdr *msg,
  1310. + size_t len)
  1311. +{
  1312. + struct hvsock_sock *hvsk = sk_to_hvsock(sk);
  1313. + struct vmbus_channel *channel;
  1314. + size_t total_to_write = len;
  1315. + size_t total_written = 0;
  1316. + DEFINE_WAIT(wait);
  1317. + bool can_write;
  1318. + long timeout;
  1319. + int ret = -EIO;
  1320. +
  1321. + timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
  1322. + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1323. + channel = hvsk->channel;
  1324. +
  1325. + while (total_to_write > 0) {
  1326. + size_t to_write, max_writable;
  1327. +
  1328. + while (1) {
  1329. + get_ringbuffer_rw_status(channel, NULL, &can_write);
  1330. +
  1331. + if (can_write || sk->sk_err != 0 ||
  1332. + (sk->sk_shutdown & SEND_SHUTDOWN) ||
  1333. + (hvsk->peer_shutdown & RCV_SHUTDOWN))
  1334. + break;
  1335. +
  1336. + /* Don't wait for non-blocking sockets. */
  1337. + if (timeout == 0) {
  1338. + ret = -EAGAIN;
  1339. + goto out_wait;
  1340. + }
  1341. +
  1342. + release_sock(sk);
  1343. +
  1344. + timeout = schedule_timeout(timeout);
  1345. +
  1346. + lock_sock(sk);
  1347. + if (signal_pending(current)) {
  1348. + ret = sock_intr_errno(timeout);
  1349. + goto out_wait;
  1350. + } else if (timeout == 0) {
  1351. + ret = -EAGAIN;
  1352. + goto out_wait;
  1353. + }
  1354. +
  1355. + prepare_to_wait(sk_sleep(sk), &wait,
  1356. + TASK_INTERRUPTIBLE);
  1357. + }
  1358. +
  1359. + /* These checks occur both as part of and after the loop
  1360. + * conditional since we need to check before and after
  1361. + * sleeping.
  1362. + */
  1363. + if (sk->sk_err) {
  1364. + ret = -sk->sk_err;
  1365. + goto out_wait;
  1366. + } else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
  1367. + (hvsk->peer_shutdown & RCV_SHUTDOWN)) {
  1368. + ret = -EPIPE;
  1369. + goto out_wait;
  1370. + }
  1371. +
  1372. + /* Note: that write will only write as many bytes as possible
  1373. + * in the ringbuffer. It is the caller's responsibility to
  1374. + * check how many bytes we actually wrote.
  1375. + */
  1376. + do {
  1377. + max_writable = get_ringbuffer_writable_bytes(channel);
  1378. + if (max_writable == 0)
  1379. + goto out_wait;
  1380. +
  1381. + to_write = min_t(size_t, sizeof(hvsk->send->buf),
  1382. + total_to_write);
  1383. + if (to_write > max_writable)
  1384. + to_write = max_writable;
  1385. +
  1386. + ret = hvsock_get_send_buf(hvsk);
  1387. + if (ret < 0)
  1388. + goto out_wait;
  1389. +
  1390. + ret = memcpy_from_msg(hvsk->send->buf, msg, to_write);
  1391. + if (ret != 0) {
  1392. + hvsock_put_send_buf(hvsk);
  1393. + goto out_wait;
  1394. + }
  1395. +
  1396. + ret = hvsock_send_data(channel, hvsk, to_write);
  1397. + hvsock_put_send_buf(hvsk);
  1398. + if (ret != 0)
  1399. + goto out_wait;
  1400. +
  1401. + total_written += to_write;
  1402. + total_to_write -= to_write;
  1403. + } while (total_to_write > 0);
  1404. + }
  1405. +
  1406. +out_wait:
  1407. + if (total_written > 0)
  1408. + ret = total_written;
  1409. +
  1410. + finish_wait(sk_sleep(sk), &wait);
  1411. + return ret;
  1412. +}
  1413. +
  1414. +static int hvsock_sendmsg(struct socket *sock, struct msghdr *msg,
  1415. + size_t len)
  1416. +{
  1417. + struct hvsock_sock *hvsk;
  1418. + struct sock *sk;
  1419. + int ret;
  1420. +
  1421. + if (len == 0)
  1422. + return -EINVAL;
  1423. +
  1424. + if (msg->msg_flags & ~MSG_DONTWAIT)
  1425. + return -EOPNOTSUPP;
  1426. +
  1427. + sk = sock->sk;
  1428. + hvsk = sk_to_hvsock(sk);
  1429. +
  1430. + lock_sock(sk);
  1431. +
  1432. + /* Callers should not provide a destination with stream sockets. */
  1433. + if (msg->msg_namelen) {
  1434. + ret = -EOPNOTSUPP;
  1435. + goto out;
  1436. + }
  1437. +
  1438. + /* Send data only if both sides are not shutdown in the direction. */
  1439. + if (sk->sk_shutdown & SEND_SHUTDOWN ||
  1440. + hvsk->peer_shutdown & RCV_SHUTDOWN) {
  1441. + ret = -EPIPE;
  1442. + goto out;
  1443. + }
  1444. +
  1445. + if (sk->sk_state != SS_CONNECTED ||
  1446. + !hvsock_addr_bound(&hvsk->local_addr)) {
  1447. + ret = -ENOTCONN;
  1448. + goto out;
  1449. + }
  1450. +
  1451. + if (!hvsock_addr_bound(&hvsk->remote_addr)) {
  1452. + ret = -EDESTADDRREQ;
  1453. + goto out;
  1454. + }
  1455. +
  1456. + ret = hvsock_sendmsg_wait(sk, msg, len);
  1457. +out:
  1458. + release_sock(sk);
  1459. +
  1460. + /* ret should be a bigger-than-0 total_written or a negative err
  1461. + * code.
  1462. + */
  1463. + BUG_ON(ret == 0);
  1464. +
  1465. + return ret;
  1466. +}
  1467. +
  1468. +static int hvsock_recvmsg_wait(struct sock *sk, struct msghdr *msg,
  1469. + size_t len, int flags)
  1470. +{
  1471. + struct hvsock_sock *hvsk = sk_to_hvsock(sk);
  1472. + size_t to_read, total_to_read = len;
  1473. + struct vmbus_channel *channel;
  1474. + DEFINE_WAIT(wait);
  1475. + size_t copied = 0;
  1476. + bool can_read;
  1477. + long timeout;
  1478. + int ret = 0;
  1479. +
  1480. + timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
  1481. + prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
  1482. + channel = hvsk->channel;
  1483. +
  1484. + while (1) {
  1485. + bool need_refill = !hvsk->recv;
  1486. +
  1487. + if (need_refill) {
  1488. + if (hvsk->peer_shutdown & SEND_SHUTDOWN)
  1489. + can_read = false;
  1490. + else
  1491. + get_ringbuffer_rw_status(channel, &can_read,
  1492. + NULL);
  1493. + } else {
  1494. + can_read = true;
  1495. + }
  1496. +
  1497. + if (can_read) {
  1498. + size_t payload_len;
  1499. +
  1500. + if (need_refill) {
  1501. + ret = hvsock_get_recv_buf(hvsk);
  1502. + if (ret < 0) {
  1503. + if (copied > 0)
  1504. + ret = copied;
  1505. + goto out_wait;
  1506. + }
  1507. +
  1508. + ret = hvsock_recv_data(channel, hvsk,
  1509. + &payload_len);
  1510. + if (ret != 0 ||
  1511. + payload_len > sizeof(hvsk->recv->buf)) {
  1512. + ret = -EIO;
  1513. + hvsock_put_recv_buf(hvsk);
  1514. + goto out_wait;
  1515. + }
  1516. +
  1517. + if (payload_len == 0) {
  1518. + ret = copied;
  1519. + hvsock_put_recv_buf(hvsk);
  1520. + hvsk->peer_shutdown |= SEND_SHUTDOWN;
  1521. + break;
  1522. + }
  1523. +
  1524. + hvsk->recv->data_len = payload_len;
  1525. + hvsk->recv->data_offset = 0;
  1526. + }
  1527. +
  1528. + to_read = min_t(size_t, total_to_read,
  1529. + hvsk->recv->data_len);
  1530. +
  1531. + ret = memcpy_to_msg(msg, hvsk->recv->buf +
  1532. + hvsk->recv->data_offset,
  1533. + to_read);
  1534. + if (ret != 0)
  1535. + break;
  1536. +
  1537. + copied += to_read;
  1538. + total_to_read -= to_read;
  1539. +
  1540. + hvsk->recv->data_len -= to_read;
  1541. +
  1542. + if (hvsk->recv->data_len == 0)
  1543. + hvsock_put_recv_buf(hvsk);
  1544. + else
  1545. + hvsk->recv->data_offset += to_read;
  1546. +
  1547. + if (total_to_read == 0)
  1548. + break;
  1549. + } else {
  1550. + if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN) ||
  1551. + (hvsk->peer_shutdown & SEND_SHUTDOWN))
  1552. + break;
  1553. +
  1554. + /* Don't wait for non-blocking sockets. */
  1555. + if (timeout == 0) {
  1556. + ret = -EAGAIN;
  1557. + break;
  1558. + }
  1559. +
  1560. + if (copied > 0)
  1561. + break;
  1562. +
  1563. + release_sock(sk);
  1564. + timeout = schedule_timeout(timeout);
  1565. + lock_sock(sk);
  1566. +
  1567. + if (signal_pending(current)) {
  1568. + ret = sock_intr_errno(timeout);
  1569. + break;
  1570. + } else if (timeout == 0) {
  1571. + ret = -EAGAIN;
  1572. + break;
  1573. + }
  1574. +
  1575. + prepare_to_wait(sk_sleep(sk), &wait,
  1576. + TASK_INTERRUPTIBLE);
  1577. + }
  1578. + }
  1579. +
  1580. + if (sk->sk_err)
  1581. + ret = -sk->sk_err;
  1582. + else if (sk->sk_shutdown & RCV_SHUTDOWN)
  1583. + ret = 0;
  1584. +
  1585. + if (copied > 0)
  1586. + ret = copied;
  1587. +out_wait:
  1588. + finish_wait(sk_sleep(sk), &wait);
  1589. + return ret;
  1590. +}
  1591. +
  1592. +static int hvsock_recvmsg(struct socket *sock, struct msghdr *msg,
  1593. + size_t len, int flags)
  1594. +{
  1595. + struct sock *sk = sock->sk;
  1596. + int ret;
  1597. +
  1598. + lock_sock(sk);
  1599. +
  1600. + if (sk->sk_state != SS_CONNECTED) {
  1601. + /* Recvmsg is supposed to return 0 if a peer performs an
  1602. + * orderly shutdown. Differentiate between that case and when a
  1603. + * peer has not connected or a local shutdown occurred with the
  1604. + * SOCK_DONE flag.
  1605. + */
  1606. + if (sock_flag(sk, SOCK_DONE))
  1607. + ret = 0;
  1608. + else
  1609. + ret = -ENOTCONN;
  1610. +
  1611. + goto out;
  1612. + }
  1613. +
  1614. + /* We ignore msg->addr_name/len. */
  1615. + if (flags & ~MSG_DONTWAIT) {
  1616. + ret = -EOPNOTSUPP;
  1617. + goto out;
  1618. + }
  1619. +
  1620. + /* We don't check peer_shutdown flag here since peer may actually shut
  1621. + * down, but there can be data in the queue that a local socket can
  1622. + * receive.
  1623. + */
  1624. + if (sk->sk_shutdown & RCV_SHUTDOWN) {
  1625. + ret = 0;
  1626. + goto out;
  1627. + }
  1628. +
  1629. + /* It is valid on Linux to pass in a zero-length receive buffer. This
  1630. + * is not an error. We may as well bail out now.
  1631. + */
  1632. + if (!len) {
  1633. + ret = 0;
  1634. + goto out;
  1635. + }
  1636. +
  1637. + ret = hvsock_recvmsg_wait(sk, msg, len, flags);
  1638. +out:
  1639. + release_sock(sk);
  1640. + return ret;
  1641. +}
  1642. +
  1643. +static const struct proto_ops hvsock_ops = {
  1644. + .family = PF_HYPERV,
  1645. + .owner = THIS_MODULE,
  1646. + .release = hvsock_release,
  1647. + .bind = hvsock_bind,
  1648. + .connect = hvsock_connect,
  1649. + .socketpair = sock_no_socketpair,
  1650. + .accept = hvsock_accept,
  1651. + .getname = hvsock_getname,
  1652. + .poll = hvsock_poll,
  1653. + .ioctl = sock_no_ioctl,
  1654. + .listen = hvsock_listen,
  1655. + .shutdown = hvsock_shutdown,
  1656. + .setsockopt = sock_no_setsockopt,
  1657. + .getsockopt = sock_no_getsockopt,
  1658. + .sendmsg = hvsock_sendmsg,
  1659. + .recvmsg = hvsock_recvmsg,
  1660. + .mmap = sock_no_mmap,
  1661. + .sendpage = sock_no_sendpage,
  1662. +};
  1663. +
  1664. +static int hvsock_create_sock(struct net *net, struct socket *sock,
  1665. + int protocol, int kern)
  1666. +{
  1667. + struct sock *sk;
  1668. +
  1669. + if (protocol != 0 && protocol != SHV_PROTO_RAW)
  1670. + return -EPROTONOSUPPORT;
  1671. +
  1672. + switch (sock->type) {
  1673. + case SOCK_STREAM:
  1674. + sock->ops = &hvsock_ops;
  1675. + break;
  1676. + default:
  1677. + return -ESOCKTNOSUPPORT;
  1678. + }
  1679. +
  1680. + sock->state = SS_UNCONNECTED;
  1681. +
  1682. + sk = hvsock_create(net, sock, GFP_KERNEL, 0);
  1683. + return sk ? 0 : -ENOMEM;
  1684. +}
  1685. +
  1686. +static const struct net_proto_family hvsock_family_ops = {
  1687. + .family = AF_HYPERV,
  1688. + .create = hvsock_create_sock,
  1689. + .owner = THIS_MODULE,
  1690. +};
  1691. +
  1692. +static int hvsock_probe(struct hv_device *hdev,
  1693. + const struct hv_vmbus_device_id *dev_id)
  1694. +{
  1695. + struct vmbus_channel *channel = hdev->channel;
  1696. +
  1697. + /* We ignore the error return code to suppress the unnecessary
  1698. + * error message in vmbus_probe(): on error the host will rescind
  1699. + * the offer in 30 seconds and we can do cleanup at that time.
  1700. + */
  1701. + (void)hvsock_open_connection(channel);
  1702. +
  1703. + return 0;
  1704. +}
  1705. +
  1706. +static int hvsock_remove(struct hv_device *hdev)
  1707. +{
  1708. + struct vmbus_channel *channel = hdev->channel;
  1709. +
  1710. + vmbus_close(channel);
  1711. +
  1712. + return 0;
  1713. +}
  1714. +
  1715. +/* It's not really used. See vmbus_match() and vmbus_probe(). */
  1716. +static const struct hv_vmbus_device_id id_table[] = {
  1717. + {},
  1718. +};
  1719. +
  1720. +static struct hv_driver hvsock_drv = {
  1721. + .name = "hv_sock",
  1722. + .hvsock = true,
  1723. + .id_table = id_table,
  1724. + .probe = hvsock_probe,
  1725. + .remove = hvsock_remove,
  1726. +};
  1727. +
  1728. +static int __init hvsock_init(void)
  1729. +{
  1730. + int ret;
  1731. +
  1732. + if (vmbus_proto_version < VERSION_WIN10)
  1733. + return -ENODEV;
  1734. +
  1735. + ret = vmbus_driver_register(&hvsock_drv);
  1736. + if (ret) {
  1737. + pr_err("failed to register hv_sock driver\n");
  1738. + return ret;
  1739. + }
  1740. +
  1741. + ret = proto_register(&hvsock_proto, 0);
  1742. + if (ret) {
  1743. + pr_err("failed to register protocol\n");
  1744. + goto unreg_hvsock_drv;
  1745. + }
  1746. +
  1747. + ret = sock_register(&hvsock_family_ops);
  1748. + if (ret) {
  1749. + pr_err("failed to register address family\n");
  1750. + goto unreg_proto;
  1751. + }
  1752. +
  1753. + return 0;
  1754. +
  1755. +unreg_proto:
  1756. + proto_unregister(&hvsock_proto);
  1757. +unreg_hvsock_drv:
  1758. + vmbus_driver_unregister(&hvsock_drv);
  1759. + return ret;
  1760. +}
  1761. +
  1762. +static void __exit hvsock_exit(void)
  1763. +{
  1764. + sock_unregister(AF_HYPERV);
  1765. + proto_unregister(&hvsock_proto);
  1766. + vmbus_driver_unregister(&hvsock_drv);
  1767. +}
  1768. +
  1769. +module_init(hvsock_init);
  1770. +module_exit(hvsock_exit);
  1771. +
  1772. +MODULE_DESCRIPTION("Hyper-V Sockets");
  1773. +MODULE_LICENSE("Dual BSD/GPL");
  1774. --
  1775. 2.13.0