PageRenderTime 43ms CodeModel.GetById 16ms RepoModel.GetById 1ms app.codeStats 0ms

/servers/lwip/socket.c

http://www.minix3.org/
C | 644 lines | 493 code | 97 blank | 54 comment | 114 complexity | 72cf40928c9fc9dcdaf7cc7140b04a63 MD5 | raw file
Possible License(s): MIT, WTFPL, AGPL-1.0, BSD-3-Clause, GPL-3.0, LGPL-2.0, JSON, 0BSD
  1. /*
  2. * This file implements handling of socket-related requests from VFS
  3. */
  4. #include <stdlib.h>
  5. #include <stdio.h>
  6. #include <assert.h>
  7. #include <minix/ipc.h>
  8. #include <minix/com.h>
  9. #include <minix/callnr.h>
  10. #include <minix/sysutil.h>
  11. #include <lwip/tcp.h>
  12. #include <sys/ioc_net.h>
  13. #include "inet_config.h"
  14. #include "proto.h"
  15. #include "socket.h"
  16. #if 0
  17. #define debug_sock_print(str, ...) printf("LWIP %s:%d : " str "\n", \
  18. __func__, __LINE__, ##__VA_ARGS__)
  19. #else
  20. #define debug_sock_print(...) debug_print(__VA_ARGS__)
  21. #endif
  22. struct socket socket[MAX_SOCKETS];
  23. static int notified;
  24. #define recv_q_alloc() debug_malloc(sizeof(struct recv_q))
  25. #define recv_q_free debug_free
  26. struct mq {
  27. message m;
  28. struct mq * prev;
  29. struct mq * next;
  30. };
  31. #define mq_alloc() debug_malloc(sizeof(struct mq))
  32. #define mq_free debug_free
  33. static struct mq * mq_head, *mq_tail;
  34. static int mq_enqueue(message * m)
  35. {
  36. struct mq * mq;
  37. debug_sock_print("sock %d op %d", m->DEVICE, m->m_type);
  38. mq = mq_alloc();
  39. if (mq == NULL)
  40. return -1;
  41. mq->next = NULL;
  42. mq->m = *m;
  43. if (mq_head) {
  44. mq->prev = mq_tail;
  45. mq_tail->next = mq;
  46. mq_tail = mq;
  47. }
  48. else {
  49. mq->prev = NULL;
  50. mq_head = mq_tail = mq;
  51. }
  52. return 0;
  53. }
  54. __unused static struct mq * mq_dequeue_head(void)
  55. {
  56. struct mq * ret;
  57. if (!mq_head)
  58. return NULL;
  59. ret = mq_head;
  60. if (mq_head != mq_tail) {
  61. mq_head = mq_head->next;
  62. mq_head->prev = NULL;
  63. } else
  64. mq_head = mq_tail = NULL;
  65. debug_sock_print("socket %d\n", ret->m.DEVICE);
  66. return ret;
  67. }
  68. static void mq_dequeue(struct mq * mq)
  69. {
  70. if (mq_head == mq_tail)
  71. mq_head = mq_tail = NULL;
  72. else {
  73. if (mq->prev == NULL) {
  74. mq_head = mq->next;
  75. mq_head->prev = NULL;
  76. } else
  77. mq->prev->next = mq->next;
  78. if (mq->next == NULL) {
  79. mq_tail = mq->prev;
  80. mq_tail->next = NULL;
  81. } else
  82. mq->next->prev = mq->prev;
  83. }
  84. }
  85. static int mq_cancel(message * m)
  86. {
  87. struct mq * mq;
  88. for (mq = mq_tail; mq; mq = mq->prev) {
  89. if (m->DEVICE == mq->m.DEVICE &&
  90. m->USER_ENDPT == mq->m.USER_ENDPT &&
  91. m->IO_GRANT == mq->m.IO_GRANT) {
  92. debug_sock_print("socket %d\n", mq->m.DEVICE);
  93. break;
  94. }
  95. }
  96. mq_dequeue(mq);
  97. mq_free(mq);
  98. return 1;
  99. }
  100. int sock_enqueue_data(struct socket * sock, void * data, unsigned size)
  101. {
  102. struct recv_q * r;
  103. if (!(r = recv_q_alloc()))
  104. return ENOMEM;
  105. r->data = data;
  106. r->next = NULL;
  107. if (sock->recv_head) {
  108. sock->recv_tail->next = r;
  109. sock->recv_tail = r;
  110. } else {
  111. sock->recv_head = sock->recv_tail = r;
  112. }
  113. assert(size > 0);
  114. sock->recv_data_size += size;
  115. return OK;
  116. }
  117. void * sock_dequeue_data(struct socket * sock)
  118. {
  119. void * data;
  120. struct recv_q * r;
  121. if ((r = sock->recv_head)) {
  122. data = r->data;
  123. if (!(sock->recv_head = r->next))
  124. sock->recv_tail = NULL;
  125. recv_q_free(r);
  126. return data;
  127. }
  128. return NULL;
  129. }
  130. void sock_dequeue_data_all(struct socket * sock,
  131. recv_data_free_fn data_free)
  132. {
  133. void * data;
  134. while ((data = sock_dequeue_data(sock)))
  135. data_free(data);
  136. sock->recv_data_size = 0;
  137. }
  138. static void set_reply_msg(message * m, int status)
  139. {
  140. int proc, ref;
  141. proc= m->USER_ENDPT;
  142. ref= (int)m->IO_GRANT;
  143. m->REP_ENDPT= proc;
  144. m->REP_STATUS= status;
  145. m->REP_IO_GRANT= ref;
  146. }
  147. void send_reply(message * m, int status)
  148. {
  149. int result;
  150. debug_sock_print("status %d", status);
  151. set_reply_msg(m, status);
  152. m->m_type = TASK_REPLY;
  153. result = send(m->m_source, m);
  154. if (result != OK)
  155. panic("LWIP : unable to send (err %d)", result);
  156. }
  157. void sock_revive(struct socket * sock, int status)
  158. {
  159. int result;
  160. assert(!(sock->flags & SOCK_FLG_OP_REVIVING));
  161. assert(sock->flags & (SOCK_FLG_OP_PENDING | SOCK_FLG_OP_SUSPENDED));
  162. if (notified) {
  163. debug_sock_print("already notified");
  164. return;
  165. }
  166. else {
  167. assert(sock->mess.m_type != DEV_REVIVE);
  168. notified = 1;
  169. }
  170. debug_sock_print("socket num %ld, status %d",
  171. get_sock_num(sock), status);
  172. sock->mess.m_type = DEV_REVIVE;
  173. set_reply_msg(&sock->mess, status);
  174. result = notify(sock->mess.m_source);
  175. if (result != OK)
  176. panic("LWIP : unable to notify (err %d)", result);
  177. sock->flags |= SOCK_FLG_OP_REVIVING;
  178. }
  179. void sock_select_notify(struct socket * sock)
  180. {
  181. int result;
  182. debug_sock_print("socket num %ld", get_sock_num(sock));
  183. assert(sock->select_ep != NONE);
  184. sock->flags |= SOCK_FLG_SEL_CHECK;
  185. if (notified) {
  186. debug_sock_print("already notified");
  187. return;
  188. }
  189. else
  190. notified = 1;
  191. result = notify(sock->select_ep);
  192. if (result != OK)
  193. panic("LWIP : unable to notify (err %d)", result);
  194. }
  195. void sock_reply(struct socket * sock, int status)
  196. {
  197. debug_sock_print("socket num %ld status %d type %d",
  198. get_sock_num(sock), status, sock->mess.m_type);
  199. /*
  200. * If the status is SUSPEND send the
  201. * message only if this operation wasn't
  202. * suspended already, e.g. by enqueing the
  203. * message when the socket was busy
  204. * because of another pending message
  205. *
  206. * If there is a pending operation or we a reprocessing a suspended
  207. * operation, revive.
  208. *
  209. * Otherwise send a message straightaway
  210. */
  211. if (status == SUSPEND) {
  212. if (sock->flags & SOCK_FLG_OP_SUSPENDED) {
  213. debug_sock_print("suspended before");
  214. sock->flags &= ~SOCK_FLG_OP_SUSPENDED;
  215. return;
  216. }
  217. message m = sock->mess;
  218. debug_sock_print("SUSPEND");
  219. send_reply(&m, status);
  220. } else if (sock->flags & (SOCK_FLG_OP_PENDING | SOCK_FLG_OP_SUSPENDED)) {
  221. sock_revive(sock, status);
  222. /*
  223. * From now on, we process suspended calls as any other. The
  224. * status is set and will be collected
  225. */
  226. sock->flags &= ~SOCK_FLG_OP_SUSPENDED;
  227. } else
  228. send_reply(&sock->mess, status);
  229. }
  230. struct socket * get_unused_sock(void)
  231. {
  232. int i;
  233. for (i = SOCK_TYPES + MAX_DEVS; i < MAX_SOCKETS; i++) {
  234. if (socket[i].ops == NULL) {
  235. /* clear it all */
  236. memset(&socket[i], 0, sizeof(struct socket));
  237. return &socket[i];
  238. }
  239. }
  240. return NULL;
  241. }
  242. struct socket * get_nic_sock(unsigned dev)
  243. {
  244. if (dev < MAX_DEVS)
  245. return &socket[dev + SOCK_TYPES];
  246. else
  247. return NULL;
  248. }
  249. static void socket_open(message * m)
  250. {
  251. struct sock_ops * ops;
  252. struct socket * sock;
  253. int ret = OK;
  254. switch (m->DEVICE) {
  255. case SOCK_TYPE_TCP:
  256. ops = &sock_tcp_ops;
  257. break;
  258. case SOCK_TYPE_UDP:
  259. ops = &sock_udp_ops;
  260. break;
  261. case SOCK_TYPE_IP:
  262. ops = &sock_raw_ip_ops;
  263. break;
  264. default:
  265. if (m->DEVICE - SOCK_TYPES < MAX_DEVS) {
  266. m->DEVICE -= SOCK_TYPES;
  267. nic_open(m);
  268. return;
  269. }
  270. printf("LWIP unknown socket type %d\n", m->DEVICE);
  271. send_reply(m, EINVAL);
  272. return;
  273. }
  274. sock = get_unused_sock();
  275. if (!sock) {
  276. printf("LWIP : no free socket\n");
  277. send_reply(m, EAGAIN);
  278. return;
  279. }
  280. sock->ops = ops;
  281. sock->select_ep = NONE;
  282. sock->recv_data_size = 0;
  283. if (sock->ops && sock->ops->open)
  284. ret = sock->ops->open(sock, m);
  285. if (ret == OK) {
  286. debug_sock_print("new socket %ld", get_sock_num(sock));
  287. send_reply(m, get_sock_num(sock));
  288. } else {
  289. debug_sock_print("failed %d", ret);
  290. send_reply(m, ret);
  291. }
  292. }
  293. static void do_status(message * m)
  294. {
  295. int i;
  296. debug_sock_print("called");
  297. notified = 0;
  298. for (i = 0; i < MAX_SOCKETS; i++) {
  299. struct socket * sock = &socket[i];
  300. if (!sock->ops) {
  301. continue;
  302. }
  303. if (sock->flags & (SOCK_FLG_OP_REVIVING)) {
  304. /*
  305. * We send the reply and we are done with this request
  306. */
  307. debug_sock_print("status %d ep %d sent sock %ld type %d",
  308. sock->mess.REP_STATUS,
  309. sock->mess.REP_ENDPT,
  310. get_sock_num(sock),
  311. sock->mess.m_type);
  312. send(m->m_source, &sock->mess);
  313. /*
  314. * Remove only the reviving flag, i.e. the status has
  315. * been consumed. SOCK_FLG_OP_PENDING may stay set. For
  316. * instance in case of a TCP write, the application is
  317. * already notified while the process of sending is
  318. * still going on
  319. */
  320. sock->flags &= ~SOCK_FLG_OP_REVIVING;
  321. return;
  322. }
  323. /*
  324. * We check select AFTER possible reviving an operation,
  325. * otherwise the select will fail as the socket is still
  326. * blocking
  327. */
  328. if (sock_select_check_set(sock)) {
  329. if (sock->ops && sock->ops->select_reply) {
  330. message msg;
  331. msg.m_type = DEV_IO_READY;
  332. msg.DEV_MINOR = get_sock_num(sock);
  333. msg.DEV_SEL_OPS = 0;
  334. sock->ops->select_reply(sock, &msg);
  335. if (msg.DEV_SEL_OPS) {
  336. int result;
  337. debug_sock_print("socket num %d select "
  338. "result 0x%x sent",
  339. msg.DEV_MINOR,
  340. msg.DEV_SEL_OPS);
  341. result = send(sock->select_ep, &msg);
  342. if (result != OK)
  343. panic("LWIP : unable to send "
  344. "(err %d)", result);
  345. sock_clear_select(sock);
  346. sock->select_ep = NONE;
  347. return;
  348. }
  349. }
  350. }
  351. }
  352. debug_sock_print("no status");
  353. m->m_type = DEV_NO_STATUS;
  354. send(m->m_source, m);
  355. }
  356. static void socket_request_socket(struct socket * sock, message * m)
  357. {
  358. switch (m->m_type) {
  359. case DEV_READ_S:
  360. if (sock && sock->ops && sock->ops->read)
  361. sock->ops->read(sock, m);
  362. else
  363. send_reply(m, EINVAL);
  364. return;
  365. case DEV_WRITE_S:
  366. if (sock && sock->ops && sock->ops->write)
  367. sock->ops->write(sock, m);
  368. else
  369. send_reply(m, EINVAL);
  370. return;
  371. case DEV_IOCTL_S:
  372. if (sock && sock->ops && sock->ops->ioctl)
  373. sock->ops->ioctl(sock, m);
  374. else
  375. send_reply(m, EINVAL);
  376. return;
  377. default:
  378. panic("LWIP : cannot happen!");
  379. }
  380. }
  381. void socket_request(message * m)
  382. {
  383. struct socket * sock;
  384. switch (m->m_type) {
  385. case DEV_OPEN:
  386. socket_open(m);
  387. return;
  388. case DEV_CLOSE:
  389. sock = get_sock(m->DEVICE);
  390. if (sock && sock->ops && sock->ops->close) {
  391. sock->flags &= ~SOCK_FLG_OP_PENDING;
  392. sock->mess = *m;
  393. sock->ops->close(sock, m);
  394. } else
  395. send_reply(m, EINVAL);
  396. return;
  397. case DEV_READ_S:
  398. case DEV_WRITE_S:
  399. case DEV_IOCTL_S:
  400. sock = get_sock(m->DEVICE);
  401. if (!sock) {
  402. send_reply(m, EINVAL);
  403. return;
  404. }
  405. /*
  406. * If an operation is pending (blocking operation) or writing is
  407. * still going and we want to read, suspend the new operation
  408. */
  409. if ((sock->flags & (SOCK_FLG_OP_PENDING | SOCK_FLG_OP_REVIVING)) |
  410. (m->m_type == DEV_READ_S &&
  411. sock->flags & SOCK_FLG_OP_WRITING)) {
  412. char * o = "\0";
  413. if (sock->flags & SOCK_FLG_OP_READING)
  414. o = "READ";
  415. else if (sock->flags & SOCK_FLG_OP_WRITING)
  416. o = "WRITE";
  417. else
  418. o = "non R/W op";
  419. debug_sock_print("socket %ld is busy by %s\n",
  420. get_sock_num(sock), o);
  421. if (mq_enqueue(m) == 0) {
  422. send_reply(m, SUSPEND);
  423. } else {
  424. debug_sock_print("Enqueuing suspended "
  425. "call failed");
  426. send_reply(m, ENOMEM);
  427. }
  428. return;
  429. }
  430. sock->mess = *m;
  431. socket_request_socket(sock, m);
  432. return;
  433. case CANCEL:
  434. sock = get_sock(m->DEVICE);
  435. debug_sock_print("socket num %ld", get_sock_num(sock));
  436. /* Cancel the last operation in the queue */
  437. if (mq_cancel(m)) {
  438. send_reply(m, EINTR);
  439. return;
  440. /* ... or a blocked read */
  441. } else if (sock->flags & SOCK_FLG_OP_PENDING &&
  442. sock->flags & SOCK_FLG_OP_READING) {
  443. sock->flags &= ~SOCK_FLG_OP_PENDING;
  444. send_reply(m, EINTR);
  445. return;
  446. /*
  447. * .. or return the status of the operation which was finished
  448. * before canceled
  449. */
  450. } else if (sock->flags & SOCK_FLG_OP_REVIVING) {
  451. sock->flags &= ~SOCK_FLG_OP_REVIVING;
  452. send_reply(m, sock->mess.REP_STATUS);
  453. } else
  454. panic("LWIP : no operation to cancel");
  455. return;
  456. case DEV_SELECT:
  457. /*
  458. * Select is always executed immediately and is never suspended.
  459. * Although, it sets actions which must be monitored
  460. */
  461. sock = get_sock(m->DEVICE);
  462. assert(sock->select_ep == NONE || sock->select_ep == m->m_source);
  463. if (sock && sock->ops && sock->ops->select) {
  464. sock->ops->select(sock, m);
  465. if (sock_select_set(sock))
  466. sock->select_ep = m->m_source;
  467. } else
  468. send_reply(m, EINVAL);
  469. return;
  470. case DEV_STATUS:
  471. do_status(m);
  472. return;
  473. default:
  474. printf("LWIP : unknown message from VFS, type %d\n",
  475. m->m_type);
  476. }
  477. send_reply(m, EGENERIC);
  478. }
  479. void mq_process(void)
  480. {
  481. struct mq * mq;
  482. struct socket * sock;
  483. mq = mq_head;
  484. while(mq) {
  485. struct mq * next = mq->next;
  486. sock = get_sock(mq->m.DEVICE);
  487. if (!(sock->flags &
  488. (SOCK_FLG_OP_PENDING | SOCK_FLG_OP_REVIVING)) &&
  489. !(mq->m.m_type == DEV_READ_S &&
  490. sock->flags & SOCK_FLG_OP_WRITING)) {
  491. sock->flags = SOCK_FLG_OP_SUSPENDED;
  492. debug_sock_print("resuming op on sock %ld\n",
  493. get_sock_num(sock));
  494. sock->mess = mq->m;
  495. socket_request_socket(sock, &sock->mess);
  496. mq_dequeue(mq);
  497. mq_free(mq);
  498. return;
  499. }
  500. mq = next;
  501. }
  502. }
  503. void generic_op_select(struct socket * sock, message * m)
  504. {
  505. int retsel = 0, sel;
  506. debug_print("socket num %ld 0x%x", get_sock_num(sock), m->USER_ENDPT);
  507. sel = m->USER_ENDPT;
  508. /* in this case any operation would block, no error */
  509. if (sock->flags & SOCK_FLG_OP_PENDING) {
  510. if (sel & SEL_NOTIFY) {
  511. if (sel & SEL_RD)
  512. sock->flags |= SOCK_FLG_SEL_READ;
  513. if (sel & SEL_WR)
  514. sock->flags |= SOCK_FLG_SEL_WRITE;
  515. /* FIXME we do not monitor error */
  516. }
  517. send_reply(m, 0);
  518. return;
  519. }
  520. if (sel & SEL_RD) {
  521. if (sock->recv_head)
  522. retsel |= SEL_RD;
  523. else if (sel & SEL_NOTIFY)
  524. sock->flags |= SOCK_FLG_SEL_READ;
  525. }
  526. /* FIXME generic packet socket never blocks on write */
  527. if (sel & SEL_WR)
  528. retsel |= SEL_WR;
  529. /* FIXME SEL_ERR is ignored, we do not generate exceptions */
  530. send_reply(m, retsel);
  531. }
  532. void generic_op_select_reply(struct socket * sock, __unused message * m)
  533. {
  534. assert(sock->select_ep != NONE);
  535. debug_print("socket num %ld", get_sock_num(sock));
  536. /* unused for generic packet socket, see generic_op_select() */
  537. assert((sock->flags & (SOCK_FLG_SEL_WRITE | SOCK_FLG_SEL_ERROR)) == 0);
  538. if (sock->flags & (SOCK_FLG_OP_PENDING | SOCK_FLG_OP_REVIVING)) {
  539. debug_print("WARNING socket still blocking!");
  540. return;
  541. }
  542. if (sock->flags & SOCK_FLG_SEL_READ && sock->recv_head)
  543. m->DEV_SEL_OPS |= SEL_RD;
  544. if (m->DEV_SEL_OPS)
  545. sock->flags &= ~(SOCK_FLG_SEL_WRITE | SOCK_FLG_SEL_READ |
  546. SOCK_FLG_SEL_ERROR);
  547. }