PageRenderTime 47ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

servers/lwip/raw_ip.c

http://www.minix3.org/
C | 368 lines | 271 code | 76 blank | 21 comment | 49 complexity | bb9c955921e58fe8a3cacf0dde16b169 MD5 | raw file
Possible License(s): MIT, WTFPL, AGPL-1.0, BSD-3-Clause, GPL-3.0, LGPL-2.0, JSON, 0BSD
  1. #include <stdlib.h>
  2. #include <sys/ioc_net.h>
  3. #include <net/gen/in.h>
  4. #include <net/gen/ip_io.h>
  5. #include <lwip/raw.h>
  6. #include <lwip/ip_addr.h>
  7. #include "socket.h"
  8. #include "proto.h"
  9. #define RAW_IP_BUF_SIZE (32 << 10)
  10. #define sock_alloc_buf(s) debug_malloc(s)
  11. #define sock_free_buf(x) debug_free(x)
  12. struct raw_ip_recv_data {
  13. ip_addr_t ip;
  14. struct pbuf * pbuf;
  15. };
  16. #define raw_ip_recv_alloc() debug_malloc(sizeof(struct raw_ip_recv_data))
  17. static void raw_ip_recv_free(void * data)
  18. {
  19. if (((struct raw_ip_recv_data *)data)->pbuf)
  20. pbuf_free(((struct raw_ip_recv_data *)data)->pbuf);
  21. debug_free(data);
  22. }
  23. static int raw_ip_op_open(struct socket * sock, __unused message * m)
  24. {
  25. debug_print("socket num %ld", get_sock_num(sock));
  26. if (!(sock->buf = sock_alloc_buf(RAW_IP_BUF_SIZE))) {
  27. return ENOMEM;
  28. }
  29. sock->buf_size = RAW_IP_BUF_SIZE;
  30. return OK;
  31. }
  32. static void raw_ip_close(struct socket * sock)
  33. {
  34. /* deque and free all enqueued data before closing */
  35. sock_dequeue_data_all(sock, raw_ip_recv_free);
  36. if (sock->pcb)
  37. raw_remove(sock->pcb);
  38. if (sock->buf)
  39. sock_free_buf(sock->buf);
  40. /* mark it as unused */
  41. sock->ops = NULL;
  42. }
  43. static void raw_ip_op_close(struct socket * sock, __unused message * m)
  44. {
  45. debug_print("socket num %ld", get_sock_num(sock));
  46. raw_ip_close(sock);
  47. sock_reply(sock, OK);
  48. }
  49. static int raw_ip_do_receive(message * m,
  50. struct pbuf *pbuf)
  51. {
  52. struct pbuf * p;
  53. unsigned rem_len = m->COUNT;
  54. unsigned written = 0, hdr_sz = 0;
  55. int err;
  56. debug_print("user buffer size : %d\n", rem_len);
  57. for (p = pbuf; p && rem_len; p = p->next) {
  58. size_t cp_len;
  59. cp_len = (rem_len < p->len) ? rem_len : p->len;
  60. err = copy_to_user(m->m_source, p->payload, cp_len,
  61. (cp_grant_id_t) m->IO_GRANT,
  62. hdr_sz + written);
  63. if (err != OK)
  64. return err;
  65. written += cp_len;
  66. rem_len -= cp_len;
  67. }
  68. debug_print("copied %d bytes\n", written + hdr_sz);
  69. return written + hdr_sz;
  70. }
  71. static u8_t raw_ip_op_receive(void *arg,
  72. __unused struct raw_pcb *pcb,
  73. struct pbuf *pbuf,
  74. ip_addr_t *addr)
  75. {
  76. struct socket * sock = (struct socket *) arg;
  77. struct raw_ip_recv_data * data;
  78. int ret;
  79. debug_print("socket num : %ld addr : %x\n",
  80. get_sock_num(sock), (unsigned int) addr->addr);
  81. if (sock->flags & SOCK_FLG_OP_PENDING) {
  82. /* we are resuming a suspended operation */
  83. ret = raw_ip_do_receive(&sock->mess, pbuf);
  84. if (ret > 0) {
  85. sock_revive(sock, ret);
  86. sock->flags &= ~SOCK_FLG_OP_PENDING;
  87. if (sock->usr_flags & NWIO_EXCL) {
  88. pbuf_free(pbuf);
  89. return 1;
  90. } else
  91. return 0;
  92. } else {
  93. sock_revive(sock, ret);
  94. sock->flags &= ~SOCK_FLG_OP_PENDING;
  95. }
  96. }
  97. /* Do not enqueue more data than allowed */
  98. if (sock->recv_data_size > RAW_IP_BUF_SIZE)
  99. return 0;
  100. /*
  101. * nobody is waiting for the data or an error occured above, we enqueue
  102. * the packet
  103. */
  104. if (!(data = raw_ip_recv_alloc())) {
  105. return 0;
  106. }
  107. data->ip = *addr;
  108. if (sock->usr_flags & NWIO_EXCL) {
  109. data->pbuf = pbuf;
  110. ret = 1;
  111. } else {
  112. /* we store a copy of this packet */
  113. data->pbuf = pbuf_alloc(PBUF_RAW, pbuf->tot_len, PBUF_RAM);
  114. if (data->pbuf == NULL) {
  115. debug_print("LWIP : cannot allocated new pbuf\n");
  116. raw_ip_recv_free(data);
  117. return 0;
  118. }
  119. if (pbuf_copy(data->pbuf, pbuf) != ERR_OK) {
  120. debug_print("LWIP : cannot copy pbuf\n");
  121. raw_ip_recv_free(data);
  122. return 0;
  123. }
  124. ret = 0;
  125. }
  126. /*
  127. * If we didn't managed to enqueue the packet we report it as not
  128. * consumed
  129. */
  130. if (sock_enqueue_data(sock, data, data->pbuf->tot_len) != OK) {
  131. raw_ip_recv_free(data);
  132. ret = 0;
  133. }
  134. return ret;
  135. }
  136. static void raw_ip_op_read(struct socket * sock, message * m)
  137. {
  138. debug_print("socket num %ld", get_sock_num(sock));
  139. if (sock->pcb == NULL) {
  140. sock_reply(sock, EIO);
  141. return;
  142. }
  143. if (sock->recv_head) {
  144. /* data available receive immeditely */
  145. struct raw_ip_recv_data * data;
  146. int ret;
  147. data = (struct raw_ip_recv_data *) sock->recv_head->data;
  148. ret = raw_ip_do_receive(m, data->pbuf);
  149. if (ret > 0) {
  150. sock_dequeue_data(sock);
  151. sock->recv_data_size -= data->pbuf->tot_len;
  152. raw_ip_recv_free(data);
  153. }
  154. sock_reply(sock, ret);
  155. } else {
  156. /* store the message so we know how to reply */
  157. sock->mess = *m;
  158. /* operation is being processes */
  159. sock->flags |= SOCK_FLG_OP_PENDING;
  160. debug_print("no data to read, suspending");
  161. sock_reply(sock, SUSPEND);
  162. }
  163. }
  164. static void raw_ip_op_write(struct socket * sock, message * m)
  165. {
  166. int ret;
  167. struct pbuf * pbuf;
  168. struct ip_hdr * ip_hdr;
  169. debug_print("socket num %ld data size %d",
  170. get_sock_num(sock), m->COUNT);
  171. if (sock->pcb == NULL) {
  172. ret = EIO;
  173. goto write_err;
  174. }
  175. if ((size_t) m->COUNT > sock->buf_size) {
  176. ret = ENOMEM;
  177. goto write_err;
  178. }
  179. pbuf = pbuf_alloc(PBUF_LINK, m->COUNT, PBUF_RAM);
  180. if (!pbuf) {
  181. ret = ENOMEM;
  182. goto write_err;
  183. }
  184. if ((ret = copy_from_user(m->m_source, pbuf->payload, m->COUNT,
  185. (cp_grant_id_t) m->IO_GRANT, 0)) != OK) {
  186. pbuf_free(pbuf);
  187. goto write_err;
  188. }
  189. ip_hdr = (struct ip_hdr *) pbuf->payload;
  190. if (pbuf_header(pbuf, -IP_HLEN)) {
  191. pbuf_free(pbuf);
  192. ret = EIO;
  193. goto write_err;
  194. }
  195. if ((ret = raw_sendto((struct raw_pcb *)sock->pcb, pbuf,
  196. (ip_addr_t *) &ip_hdr->dest)) != OK) {
  197. debug_print("raw_sendto failed %d", ret);
  198. ret = EIO;
  199. } else
  200. ret = m->COUNT;
  201. pbuf_free(pbuf);
  202. write_err:
  203. sock_reply(sock, ret);
  204. }
  205. static void raw_ip_set_opt(struct socket * sock, message * m)
  206. {
  207. int err;
  208. nwio_ipopt_t ipopt;
  209. struct raw_pcb * pcb;
  210. err = copy_from_user(m->m_source, &ipopt, sizeof(ipopt),
  211. (cp_grant_id_t) m->IO_GRANT, 0);
  212. if (err != OK)
  213. sock_reply(sock, err);
  214. debug_print("ipopt.nwio_flags = 0x%lx", ipopt.nwio_flags);
  215. debug_print("ipopt.nwio_proto = 0x%x", ipopt.nwio_proto);
  216. debug_print("ipopt.nwio_rem = 0x%x",
  217. (unsigned int) ipopt.nwio_rem);
  218. if (sock->pcb == NULL) {
  219. if (!(pcb = raw_new(ipopt.nwio_proto))) {
  220. raw_ip_close(sock);
  221. sock_reply(sock, ENOMEM);
  222. return;
  223. }
  224. sock->pcb = pcb;
  225. } else
  226. pcb = (struct raw_pcb *) sock->pcb;
  227. if (pcb->protocol != ipopt.nwio_proto) {
  228. debug_print("conflicting ip socket protocols\n");
  229. sock_reply(sock, EBADIOCTL);
  230. }
  231. sock->usr_flags = ipopt.nwio_flags;
  232. #if 0
  233. if (raw_bind(pcb, (ip_addr_t *)&ipopt.nwio_rem) == ERR_USE) {
  234. raw_ip_close(sock);
  235. sock_reply(sock, EADDRINUSE);
  236. return;
  237. }
  238. #endif
  239. /* register a receive hook */
  240. raw_recv((struct raw_pcb *) sock->pcb, raw_ip_op_receive, sock);
  241. sock_reply(sock, OK);
  242. }
  243. static void raw_ip_get_opt(struct socket * sock, message * m)
  244. {
  245. int err;
  246. nwio_ipopt_t ipopt;
  247. struct raw_pcb * pcb = (struct raw_pcb *) sock->pcb;
  248. assert(pcb);
  249. ipopt.nwio_rem = pcb->remote_ip.addr;
  250. ipopt.nwio_flags = sock->usr_flags;
  251. if ((unsigned) m->COUNT < sizeof(ipopt)) {
  252. sock_reply(sock, EINVAL);
  253. return;
  254. }
  255. err = copy_to_user(m->m_source, &ipopt, sizeof(ipopt),
  256. (cp_grant_id_t) m->IO_GRANT, 0);
  257. if (err != OK)
  258. sock_reply(sock, err);
  259. sock_reply(sock, OK);
  260. }
  261. static void raw_ip_op_ioctl(struct socket * sock, message * m)
  262. {
  263. debug_print("socket num %ld req %c %d %d",
  264. get_sock_num(sock),
  265. (m->REQUEST >> 8) & 0xff,
  266. m->REQUEST & 0xff,
  267. (m->REQUEST >> 16) & _IOCPARM_MASK);
  268. switch (m->REQUEST) {
  269. case NWIOSIPOPT:
  270. raw_ip_set_opt(sock, m);
  271. break;
  272. case NWIOGIPOPT:
  273. raw_ip_get_opt(sock, m);
  274. break;
  275. default:
  276. /*
  277. * /dev/ip can be also accessed as a default device to be
  278. * configured
  279. */
  280. nic_default_ioctl(m);
  281. return;
  282. }
  283. }
  284. struct sock_ops sock_raw_ip_ops = {
  285. .open = raw_ip_op_open,
  286. .close = raw_ip_op_close,
  287. .read = raw_ip_op_read,
  288. .write = raw_ip_op_write,
  289. .ioctl = raw_ip_op_ioctl,
  290. .select = generic_op_select,
  291. .select_reply = generic_op_select_reply
  292. };