PageRenderTime 27ms CodeModel.GetById 27ms RepoModel.GetById 1ms app.codeStats 0ms

/net/socket.c

https://bitbucket.org/kgp700/siyahkernel
C | 3131 lines | 2240 code | 465 blank | 426 comment | 346 complexity | 954e96a7354c1cb43320299ef0120304 MD5 | raw file
Possible License(s): GPL-2.0

Large files files are truncated, but you can click here to view the full file

  1. /*
  2. * NET An implementation of the SOCKET network access protocol.
  3. *
  4. * Version: @(#)socket.c 1.1.93 18/02/95
  5. *
  6. * Authors: Orest Zborowski, <obz@Kodak.COM>
  7. * Ross Biro
  8. * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
  9. *
  10. * Fixes:
  11. * Anonymous : NOTSOCK/BADF cleanup. Error fix in
  12. * shutdown()
  13. * Alan Cox : verify_area() fixes
  14. * Alan Cox : Removed DDI
  15. * Jonathan Kamens : SOCK_DGRAM reconnect bug
  16. * Alan Cox : Moved a load of checks to the very
  17. * top level.
  18. * Alan Cox : Move address structures to/from user
  19. * mode above the protocol layers.
  20. * Rob Janssen : Allow 0 length sends.
  21. * Alan Cox : Asynchronous I/O support (cribbed from the
  22. * tty drivers).
  23. * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style)
  24. * Jeff Uphoff : Made max number of sockets command-line
  25. * configurable.
  26. * Matti Aarnio : Made the number of sockets dynamic,
  27. * to be allocated when needed, and mr.
  28. * Uphoff's max is used as max to be
  29. * allowed to allocate.
  30. * Linus : Argh. removed all the socket allocation
  31. * altogether: it's in the inode now.
  32. * Alan Cox : Made sock_alloc()/sock_release() public
  33. * for NetROM and future kernel nfsd type
  34. * stuff.
  35. * Alan Cox : sendmsg/recvmsg basics.
  36. * Tom Dyas : Export net symbols.
  37. * Marcin Dalecki : Fixed problems with CONFIG_NET="n".
  38. * Alan Cox : Added thread locking to sys_* calls
  39. * for sockets. May have errors at the
  40. * moment.
  41. * Kevin Buhr : Fixed the dumb errors in the above.
  42. * Andi Kleen : Some small cleanups, optimizations,
  43. * and fixed a copy_from_user() bug.
  44. * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0)
  45. * Tigran Aivazian : Made listen(2) backlog sanity checks
  46. * protocol-independent
  47. *
  48. *
  49. * This program is free software; you can redistribute it and/or
  50. * modify it under the terms of the GNU General Public License
  51. * as published by the Free Software Foundation; either version
  52. * 2 of the License, or (at your option) any later version.
  53. *
  54. *
  55. * This module is effectively the top level interface to the BSD socket
  56. * paradigm.
  57. *
  58. * Based upon Swansea University Computer Society NET3.039
  59. */
  60. #include <linux/mm.h>
  61. #include <linux/socket.h>
  62. #include <linux/file.h>
  63. #include <linux/net.h>
  64. #include <linux/interrupt.h>
  65. #include <linux/thread_info.h>
  66. #include <linux/rcupdate.h>
  67. #include <linux/netdevice.h>
  68. #include <linux/proc_fs.h>
  69. #include <linux/seq_file.h>
  70. #include <linux/mutex.h>
  71. #include <linux/wanrouter.h>
  72. #include <linux/if_bridge.h>
  73. #include <linux/if_frad.h>
  74. #include <linux/if_vlan.h>
  75. #include <linux/init.h>
  76. #include <linux/poll.h>
  77. #include <linux/cache.h>
  78. #include <linux/module.h>
  79. #include <linux/highmem.h>
  80. #include <linux/mount.h>
  81. #include <linux/security.h>
  82. #include <linux/syscalls.h>
  83. #include <linux/compat.h>
  84. #include <linux/kmod.h>
  85. #include <linux/audit.h>
  86. #include <linux/wireless.h>
  87. #include <linux/nsproxy.h>
  88. #include <linux/magic.h>
  89. #include <linux/slab.h>
  90. #include <asm/uaccess.h>
  91. #include <asm/unistd.h>
  92. #include <net/compat.h>
  93. #include <net/wext.h>
  94. #include <net/cls_cgroup.h>
  95. #include <net/sock.h>
  96. #include <linux/netfilter.h>
  97. #include <linux/if_tun.h>
  98. #include <linux/ipv6_route.h>
  99. #include <linux/route.h>
  100. #include <linux/sockios.h>
  101. #include <linux/atalk.h>
  102. static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
  103. static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
  104. unsigned long nr_segs, loff_t pos);
  105. static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
  106. unsigned long nr_segs, loff_t pos);
  107. static int sock_mmap(struct file *file, struct vm_area_struct *vma);
  108. static int sock_close(struct inode *inode, struct file *file);
  109. static unsigned int sock_poll(struct file *file,
  110. struct poll_table_struct *wait);
  111. static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
  112. #ifdef CONFIG_COMPAT
  113. static long compat_sock_ioctl(struct file *file,
  114. unsigned int cmd, unsigned long arg);
  115. #endif
  116. static int sock_fasync(int fd, struct file *filp, int on);
  117. static ssize_t sock_sendpage(struct file *file, struct page *page,
  118. int offset, size_t size, loff_t *ppos, int more);
  119. static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
  120. struct pipe_inode_info *pipe, size_t len,
  121. unsigned int flags);
  122. /*
  123. * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear
  124. * in the operation structures but are done directly via the socketcall() multiplexor.
  125. */
  126. static const struct file_operations socket_file_ops = {
  127. .owner = THIS_MODULE,
  128. .llseek = no_llseek,
  129. .aio_read = sock_aio_read,
  130. .aio_write = sock_aio_write,
  131. .poll = sock_poll,
  132. .unlocked_ioctl = sock_ioctl,
  133. #ifdef CONFIG_COMPAT
  134. .compat_ioctl = compat_sock_ioctl,
  135. #endif
  136. .mmap = sock_mmap,
  137. .open = sock_no_open, /* special open code to disallow open via /proc */
  138. .release = sock_close,
  139. .fasync = sock_fasync,
  140. .sendpage = sock_sendpage,
  141. .splice_write = generic_splice_sendpage,
  142. .splice_read = sock_splice_read,
  143. };
  144. /*
  145. * The protocol list. Each protocol is registered in here.
  146. */
  147. static DEFINE_SPINLOCK(net_family_lock);
  148. static const struct net_proto_family *net_families[NPROTO] __read_mostly;
  149. /*
  150. * Statistics counters of the socket lists
  151. */
  152. static DEFINE_PER_CPU(int, sockets_in_use) = 0;
  153. /*
  154. * Support routines.
  155. * Move socket addresses back and forth across the kernel/user
  156. * divide and look after the messy bits.
  157. */
  158. #define MAX_SOCK_ADDR 128 /* 108 for Unix domain -
  159. 16 for IP, 16 for IPX,
  160. 24 for IPv6,
  161. about 80 for AX.25
  162. must be at least one bigger than
  163. the AF_UNIX size (see net/unix/af_unix.c
  164. :unix_mkname()).
  165. */
  166. /**
  167. * move_addr_to_kernel - copy a socket address into kernel space
  168. * @uaddr: Address in user space
  169. * @kaddr: Address in kernel space
  170. * @ulen: Length in user space
  171. *
  172. * The address is copied into kernel space. If the provided address is
  173. * too long an error code of -EINVAL is returned. If the copy gives
  174. * invalid addresses -EFAULT is returned. On a success 0 is returned.
  175. */
  176. int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr)
  177. {
  178. if (ulen < 0 || ulen > sizeof(struct sockaddr_storage))
  179. return -EINVAL;
  180. if (ulen == 0)
  181. return 0;
  182. if (copy_from_user(kaddr, uaddr, ulen))
  183. return -EFAULT;
  184. return audit_sockaddr(ulen, kaddr);
  185. }
  186. /**
  187. * move_addr_to_user - copy an address to user space
  188. * @kaddr: kernel space address
  189. * @klen: length of address in kernel
  190. * @uaddr: user space address
  191. * @ulen: pointer to user length field
  192. *
  193. * The value pointed to by ulen on entry is the buffer length available.
  194. * This is overwritten with the buffer space used. -EINVAL is returned
  195. * if an overlong buffer is specified or a negative buffer size. -EFAULT
  196. * is returned if either the buffer or the length field are not
  197. * accessible.
  198. * After copying the data up to the limit the user specifies, the true
  199. * length of the data is written over the length limit the user
  200. * specified. Zero is returned for a success.
  201. */
  202. int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr,
  203. int __user *ulen)
  204. {
  205. int err;
  206. int len;
  207. err = get_user(len, ulen);
  208. if (err)
  209. return err;
  210. if (len > klen)
  211. len = klen;
  212. if (len < 0 || len > sizeof(struct sockaddr_storage))
  213. return -EINVAL;
  214. if (len) {
  215. if (audit_sockaddr(klen, kaddr))
  216. return -ENOMEM;
  217. if (copy_to_user(uaddr, kaddr, len))
  218. return -EFAULT;
  219. }
  220. /*
  221. * "fromlen shall refer to the value before truncation.."
  222. * 1003.1g
  223. */
  224. return __put_user(klen, ulen);
  225. }
  226. static struct kmem_cache *sock_inode_cachep __read_mostly;
  227. static struct inode *sock_alloc_inode(struct super_block *sb)
  228. {
  229. struct socket_alloc *ei;
  230. ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
  231. if (!ei)
  232. return NULL;
  233. ei->socket.wq = kmalloc(sizeof(struct socket_wq), GFP_KERNEL);
  234. if (!ei->socket.wq) {
  235. kmem_cache_free(sock_inode_cachep, ei);
  236. return NULL;
  237. }
  238. init_waitqueue_head(&ei->socket.wq->wait);
  239. ei->socket.wq->fasync_list = NULL;
  240. ei->socket.state = SS_UNCONNECTED;
  241. ei->socket.flags = 0;
  242. ei->socket.ops = NULL;
  243. ei->socket.sk = NULL;
  244. ei->socket.file = NULL;
  245. return &ei->vfs_inode;
  246. }
  247. static void wq_free_rcu(struct rcu_head *head)
  248. {
  249. struct socket_wq *wq = container_of(head, struct socket_wq, rcu);
  250. kfree(wq);
  251. }
  252. static void sock_destroy_inode(struct inode *inode)
  253. {
  254. struct socket_alloc *ei;
  255. ei = container_of(inode, struct socket_alloc, vfs_inode);
  256. call_rcu(&ei->socket.wq->rcu, wq_free_rcu);
  257. kmem_cache_free(sock_inode_cachep, ei);
  258. }
  259. static void init_once(void *foo)
  260. {
  261. struct socket_alloc *ei = (struct socket_alloc *)foo;
  262. inode_init_once(&ei->vfs_inode);
  263. }
  264. static int init_inodecache(void)
  265. {
  266. sock_inode_cachep = kmem_cache_create("sock_inode_cache",
  267. sizeof(struct socket_alloc),
  268. 0,
  269. (SLAB_HWCACHE_ALIGN |
  270. SLAB_RECLAIM_ACCOUNT |
  271. SLAB_MEM_SPREAD),
  272. init_once);
  273. if (sock_inode_cachep == NULL)
  274. return -ENOMEM;
  275. return 0;
  276. }
  277. static const struct super_operations sockfs_ops = {
  278. .alloc_inode = sock_alloc_inode,
  279. .destroy_inode =sock_destroy_inode,
  280. .statfs = simple_statfs,
  281. };
  282. static int sockfs_get_sb(struct file_system_type *fs_type,
  283. int flags, const char *dev_name, void *data,
  284. struct vfsmount *mnt)
  285. {
  286. return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC,
  287. mnt);
  288. }
  289. static struct vfsmount *sock_mnt __read_mostly;
  290. static struct file_system_type sock_fs_type = {
  291. .name = "sockfs",
  292. .get_sb = sockfs_get_sb,
  293. .kill_sb = kill_anon_super,
  294. };
  295. /*
  296. * sockfs_dname() is called from d_path().
  297. */
  298. static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
  299. {
  300. return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]",
  301. dentry->d_inode->i_ino);
  302. }
  303. static const struct dentry_operations sockfs_dentry_operations = {
  304. .d_dname = sockfs_dname,
  305. };
  306. /*
  307. * Obtains the first available file descriptor and sets it up for use.
  308. *
  309. * These functions create file structures and maps them to fd space
  310. * of the current process. On success it returns file descriptor
  311. * and file struct implicitly stored in sock->file.
  312. * Note that another thread may close file descriptor before we return
  313. * from this function. We use the fact that now we do not refer
  314. * to socket after mapping. If one day we will need it, this
  315. * function will increment ref. count on file by 1.
  316. *
  317. * In any case returned fd MAY BE not valid!
  318. * This race condition is unavoidable
  319. * with shared fd spaces, we cannot solve it inside kernel,
  320. * but we take care of internal coherence yet.
  321. */
  322. static int sock_alloc_file(struct socket *sock, struct file **f, int flags)
  323. {
  324. struct qstr name = { .name = "" };
  325. struct path path;
  326. struct file *file;
  327. int fd;
  328. fd = get_unused_fd_flags(flags);
  329. if (unlikely(fd < 0))
  330. return fd;
  331. path.dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
  332. if (unlikely(!path.dentry)) {
  333. put_unused_fd(fd);
  334. return -ENOMEM;
  335. }
  336. path.mnt = mntget(sock_mnt);
  337. path.dentry->d_op = &sockfs_dentry_operations;
  338. d_instantiate(path.dentry, SOCK_INODE(sock));
  339. SOCK_INODE(sock)->i_fop = &socket_file_ops;
  340. file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
  341. &socket_file_ops);
  342. if (unlikely(!file)) {
  343. /* drop dentry, keep inode */
  344. atomic_inc(&path.dentry->d_inode->i_count);
  345. path_put(&path);
  346. put_unused_fd(fd);
  347. return -ENFILE;
  348. }
  349. sock->file = file;
  350. file->f_flags = O_RDWR | (flags & O_NONBLOCK);
  351. file->f_pos = 0;
  352. file->private_data = sock;
  353. *f = file;
  354. return fd;
  355. }
  356. int sock_map_fd(struct socket *sock, int flags)
  357. {
  358. struct file *newfile;
  359. int fd = sock_alloc_file(sock, &newfile, flags);
  360. if (likely(fd >= 0))
  361. fd_install(fd, newfile);
  362. return fd;
  363. }
  364. static struct socket *sock_from_file(struct file *file, int *err)
  365. {
  366. if (file->f_op == &socket_file_ops)
  367. return file->private_data; /* set in sock_map_fd */
  368. *err = -ENOTSOCK;
  369. return NULL;
  370. }
  371. /**
  372. * sockfd_lookup - Go from a file number to its socket slot
  373. * @fd: file handle
  374. * @err: pointer to an error code return
  375. *
  376. * The file handle passed in is locked and the socket it is bound
  377. * too is returned. If an error occurs the err pointer is overwritten
  378. * with a negative errno code and NULL is returned. The function checks
  379. * for both invalid handles and passing a handle which is not a socket.
  380. *
  381. * On a success the socket object pointer is returned.
  382. */
  383. struct socket *sockfd_lookup(int fd, int *err)
  384. {
  385. struct file *file;
  386. struct socket *sock;
  387. file = fget(fd);
  388. if (!file) {
  389. *err = -EBADF;
  390. return NULL;
  391. }
  392. sock = sock_from_file(file, err);
  393. if (!sock)
  394. fput(file);
  395. return sock;
  396. }
  397. static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
  398. {
  399. struct file *file;
  400. struct socket *sock;
  401. *err = -EBADF;
  402. file = fget_light(fd, fput_needed);
  403. if (file) {
  404. sock = sock_from_file(file, err);
  405. if (sock)
  406. return sock;
  407. fput_light(file, *fput_needed);
  408. }
  409. return NULL;
  410. }
  411. /**
  412. * sock_alloc - allocate a socket
  413. *
  414. * Allocate a new inode and socket object. The two are bound together
  415. * and initialised. The socket is then returned. If we are out of inodes
  416. * NULL is returned.
  417. */
  418. static struct socket *sock_alloc(void)
  419. {
  420. struct inode *inode;
  421. struct socket *sock;
  422. inode = new_inode(sock_mnt->mnt_sb);
  423. if (!inode)
  424. return NULL;
  425. sock = SOCKET_I(inode);
  426. kmemcheck_annotate_bitfield(sock, type);
  427. inode->i_mode = S_IFSOCK | S_IRWXUGO;
  428. inode->i_uid = current_fsuid();
  429. inode->i_gid = current_fsgid();
  430. percpu_add(sockets_in_use, 1);
  431. return sock;
  432. }
  433. /*
  434. * In theory you can't get an open on this inode, but /proc provides
  435. * a back door. Remember to keep it shut otherwise you'll let the
  436. * creepy crawlies in.
  437. */
  438. static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
  439. {
  440. return -ENXIO;
  441. }
  442. const struct file_operations bad_sock_fops = {
  443. .owner = THIS_MODULE,
  444. .open = sock_no_open,
  445. };
  446. /**
  447. * sock_release - close a socket
  448. * @sock: socket to close
  449. *
  450. * The socket is released from the protocol stack if it has a release
  451. * callback, and the inode is then released if the socket is bound to
  452. * an inode not a file.
  453. */
  454. void sock_release(struct socket *sock)
  455. {
  456. if (sock->ops) {
  457. struct module *owner = sock->ops->owner;
  458. sock->ops->release(sock);
  459. sock->ops = NULL;
  460. module_put(owner);
  461. }
  462. if (sock->wq->fasync_list)
  463. printk(KERN_ERR "sock_release: fasync list not empty!\n");
  464. percpu_sub(sockets_in_use, 1);
  465. if (!sock->file) {
  466. iput(SOCK_INODE(sock));
  467. return;
  468. }
  469. sock->file = NULL;
  470. }
  471. int sock_tx_timestamp(struct msghdr *msg, struct sock *sk,
  472. union skb_shared_tx *shtx)
  473. {
  474. shtx->flags = 0;
  475. if (sock_flag(sk, SOCK_TIMESTAMPING_TX_HARDWARE))
  476. shtx->hardware = 1;
  477. if (sock_flag(sk, SOCK_TIMESTAMPING_TX_SOFTWARE))
  478. shtx->software = 1;
  479. return 0;
  480. }
  481. EXPORT_SYMBOL(sock_tx_timestamp);
  482. static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
  483. struct msghdr *msg, size_t size)
  484. {
  485. struct sock_iocb *si = kiocb_to_siocb(iocb);
  486. int err;
  487. sock_update_classid(sock->sk);
  488. si->sock = sock;
  489. si->scm = NULL;
  490. si->msg = msg;
  491. si->size = size;
  492. err = security_socket_sendmsg(sock, msg, size);
  493. if (err)
  494. return err;
  495. return sock->ops->sendmsg(iocb, sock, msg, size);
  496. }
  497. int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
  498. {
  499. struct kiocb iocb;
  500. struct sock_iocb siocb;
  501. int ret;
  502. init_sync_kiocb(&iocb, NULL);
  503. iocb.private = &siocb;
  504. ret = __sock_sendmsg(&iocb, sock, msg, size);
  505. if (-EIOCBQUEUED == ret)
  506. ret = wait_on_sync_kiocb(&iocb);
  507. return ret;
  508. }
  509. int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
  510. struct kvec *vec, size_t num, size_t size)
  511. {
  512. mm_segment_t oldfs = get_fs();
  513. int result;
  514. set_fs(KERNEL_DS);
  515. /*
  516. * the following is safe, since for compiler definitions of kvec and
  517. * iovec are identical, yielding the same in-core layout and alignment
  518. */
  519. msg->msg_iov = (struct iovec *)vec;
  520. msg->msg_iovlen = num;
  521. result = sock_sendmsg(sock, msg, size);
  522. set_fs(oldfs);
  523. return result;
  524. }
  525. static int ktime2ts(ktime_t kt, struct timespec *ts)
  526. {
  527. if (kt.tv64) {
  528. *ts = ktime_to_timespec(kt);
  529. return 1;
  530. } else {
  531. return 0;
  532. }
  533. }
  534. /*
  535. * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
  536. */
  537. void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
  538. struct sk_buff *skb)
  539. {
  540. int need_software_tstamp = sock_flag(sk, SOCK_RCVTSTAMP);
  541. struct timespec ts[3];
  542. int empty = 1;
  543. struct skb_shared_hwtstamps *shhwtstamps =
  544. skb_hwtstamps(skb);
  545. /* Race occurred between timestamp enabling and packet
  546. receiving. Fill in the current time for now. */
  547. if (need_software_tstamp && skb->tstamp.tv64 == 0)
  548. __net_timestamp(skb);
  549. if (need_software_tstamp) {
  550. if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
  551. struct timeval tv;
  552. skb_get_timestamp(skb, &tv);
  553. put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
  554. sizeof(tv), &tv);
  555. } else {
  556. skb_get_timestampns(skb, &ts[0]);
  557. put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS,
  558. sizeof(ts[0]), &ts[0]);
  559. }
  560. }
  561. memset(ts, 0, sizeof(ts));
  562. if (skb->tstamp.tv64 &&
  563. sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) {
  564. skb_get_timestampns(skb, ts + 0);
  565. empty = 0;
  566. }
  567. if (shhwtstamps) {
  568. if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) &&
  569. ktime2ts(shhwtstamps->syststamp, ts + 1))
  570. empty = 0;
  571. if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) &&
  572. ktime2ts(shhwtstamps->hwtstamp, ts + 2))
  573. empty = 0;
  574. }
  575. if (!empty)
  576. put_cmsg(msg, SOL_SOCKET,
  577. SCM_TIMESTAMPING, sizeof(ts), &ts);
  578. }
  579. EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
  580. inline void sock_recv_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
  581. {
  582. if (sock_flag(sk, SOCK_RXQ_OVFL) && skb && skb->dropcount)
  583. put_cmsg(msg, SOL_SOCKET, SO_RXQ_OVFL,
  584. sizeof(__u32), &skb->dropcount);
  585. }
  586. void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
  587. struct sk_buff *skb)
  588. {
  589. sock_recv_timestamp(msg, sk, skb);
  590. sock_recv_drops(msg, sk, skb);
  591. }
  592. EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops);
  593. static inline int __sock_recvmsg_nosec(struct kiocb *iocb, struct socket *sock,
  594. struct msghdr *msg, size_t size, int flags)
  595. {
  596. struct sock_iocb *si = kiocb_to_siocb(iocb);
  597. sock_update_classid(sock->sk);
  598. si->sock = sock;
  599. si->scm = NULL;
  600. si->msg = msg;
  601. si->size = size;
  602. si->flags = flags;
  603. return sock->ops->recvmsg(iocb, sock, msg, size, flags);
  604. }
  605. static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
  606. struct msghdr *msg, size_t size, int flags)
  607. {
  608. int err = security_socket_recvmsg(sock, msg, size, flags);
  609. return err ?: __sock_recvmsg_nosec(iocb, sock, msg, size, flags);
  610. }
  611. int sock_recvmsg(struct socket *sock, struct msghdr *msg,
  612. size_t size, int flags)
  613. {
  614. struct kiocb iocb;
  615. struct sock_iocb siocb;
  616. int ret;
  617. init_sync_kiocb(&iocb, NULL);
  618. iocb.private = &siocb;
  619. ret = __sock_recvmsg(&iocb, sock, msg, size, flags);
  620. if (-EIOCBQUEUED == ret)
  621. ret = wait_on_sync_kiocb(&iocb);
  622. return ret;
  623. }
  624. static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg,
  625. size_t size, int flags)
  626. {
  627. struct kiocb iocb;
  628. struct sock_iocb siocb;
  629. int ret;
  630. init_sync_kiocb(&iocb, NULL);
  631. iocb.private = &siocb;
  632. ret = __sock_recvmsg_nosec(&iocb, sock, msg, size, flags);
  633. if (-EIOCBQUEUED == ret)
  634. ret = wait_on_sync_kiocb(&iocb);
  635. return ret;
  636. }
  637. int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
  638. struct kvec *vec, size_t num, size_t size, int flags)
  639. {
  640. mm_segment_t oldfs = get_fs();
  641. int result;
  642. set_fs(KERNEL_DS);
  643. /*
  644. * the following is safe, since for compiler definitions of kvec and
  645. * iovec are identical, yielding the same in-core layout and alignment
  646. */
  647. msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num;
  648. result = sock_recvmsg(sock, msg, size, flags);
  649. set_fs(oldfs);
  650. return result;
  651. }
  652. static void sock_aio_dtor(struct kiocb *iocb)
  653. {
  654. kfree(iocb->private);
  655. }
  656. static ssize_t sock_sendpage(struct file *file, struct page *page,
  657. int offset, size_t size, loff_t *ppos, int more)
  658. {
  659. struct socket *sock;
  660. int flags;
  661. sock = file->private_data;
  662. flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
  663. if (more)
  664. flags |= MSG_MORE;
  665. return kernel_sendpage(sock, page, offset, size, flags);
  666. }
  667. static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
  668. struct pipe_inode_info *pipe, size_t len,
  669. unsigned int flags)
  670. {
  671. struct socket *sock = file->private_data;
  672. if (unlikely(!sock->ops->splice_read))
  673. return -EINVAL;
  674. sock_update_classid(sock->sk);
  675. return sock->ops->splice_read(sock, ppos, pipe, len, flags);
  676. }
  677. static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
  678. struct sock_iocb *siocb)
  679. {
  680. if (!is_sync_kiocb(iocb)) {
  681. siocb = kmalloc(sizeof(*siocb), GFP_KERNEL);
  682. if (!siocb)
  683. return NULL;
  684. iocb->ki_dtor = sock_aio_dtor;
  685. }
  686. siocb->kiocb = iocb;
  687. iocb->private = siocb;
  688. return siocb;
  689. }
  690. static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
  691. struct file *file, const struct iovec *iov,
  692. unsigned long nr_segs)
  693. {
  694. struct socket *sock = file->private_data;
  695. size_t size = 0;
  696. int i;
  697. for (i = 0; i < nr_segs; i++)
  698. size += iov[i].iov_len;
  699. msg->msg_name = NULL;
  700. msg->msg_namelen = 0;
  701. msg->msg_control = NULL;
  702. msg->msg_controllen = 0;
  703. msg->msg_iov = (struct iovec *)iov;
  704. msg->msg_iovlen = nr_segs;
  705. msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
  706. return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags);
  707. }
  708. static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
  709. unsigned long nr_segs, loff_t pos)
  710. {
  711. struct sock_iocb siocb, *x;
  712. if (pos != 0)
  713. return -ESPIPE;
  714. if (iocb->ki_left == 0) /* Match SYS5 behaviour */
  715. return 0;
  716. x = alloc_sock_iocb(iocb, &siocb);
  717. if (!x)
  718. return -ENOMEM;
  719. return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
  720. }
  721. static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
  722. struct file *file, const struct iovec *iov,
  723. unsigned long nr_segs)
  724. {
  725. struct socket *sock = file->private_data;
  726. size_t size = 0;
  727. int i;
  728. for (i = 0; i < nr_segs; i++)
  729. size += iov[i].iov_len;
  730. msg->msg_name = NULL;
  731. msg->msg_namelen = 0;
  732. msg->msg_control = NULL;
  733. msg->msg_controllen = 0;
  734. msg->msg_iov = (struct iovec *)iov;
  735. msg->msg_iovlen = nr_segs;
  736. msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
  737. if (sock->type == SOCK_SEQPACKET)
  738. msg->msg_flags |= MSG_EOR;
  739. return __sock_sendmsg(iocb, sock, msg, size);
  740. }
  741. static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
  742. unsigned long nr_segs, loff_t pos)
  743. {
  744. struct sock_iocb siocb, *x;
  745. if (pos != 0)
  746. return -ESPIPE;
  747. x = alloc_sock_iocb(iocb, &siocb);
  748. if (!x)
  749. return -ENOMEM;
  750. return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
  751. }
  752. /*
  753. * Atomic setting of ioctl hooks to avoid race
  754. * with module unload.
  755. */
  756. static DEFINE_MUTEX(br_ioctl_mutex);
  757. static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL;
  758. void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *))
  759. {
  760. mutex_lock(&br_ioctl_mutex);
  761. br_ioctl_hook = hook;
  762. mutex_unlock(&br_ioctl_mutex);
  763. }
  764. EXPORT_SYMBOL(brioctl_set);
  765. static DEFINE_MUTEX(vlan_ioctl_mutex);
  766. static int (*vlan_ioctl_hook) (struct net *, void __user *arg);
  767. void vlan_ioctl_set(int (*hook) (struct net *, void __user *))
  768. {
  769. mutex_lock(&vlan_ioctl_mutex);
  770. vlan_ioctl_hook = hook;
  771. mutex_unlock(&vlan_ioctl_mutex);
  772. }
  773. EXPORT_SYMBOL(vlan_ioctl_set);
  774. static DEFINE_MUTEX(dlci_ioctl_mutex);
  775. static int (*dlci_ioctl_hook) (unsigned int, void __user *);
  776. void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
  777. {
  778. mutex_lock(&dlci_ioctl_mutex);
  779. dlci_ioctl_hook = hook;
  780. mutex_unlock(&dlci_ioctl_mutex);
  781. }
  782. EXPORT_SYMBOL(dlci_ioctl_set);
  783. static long sock_do_ioctl(struct net *net, struct socket *sock,
  784. unsigned int cmd, unsigned long arg)
  785. {
  786. int err;
  787. void __user *argp = (void __user *)arg;
  788. err = sock->ops->ioctl(sock, cmd, arg);
  789. /*
  790. * If this ioctl is unknown try to hand it down
  791. * to the NIC driver.
  792. */
  793. if (err == -ENOIOCTLCMD)
  794. err = dev_ioctl(net, cmd, argp);
  795. return err;
  796. }
  797. /*
  798. * With an ioctl, arg may well be a user mode pointer, but we don't know
  799. * what to do with it - that's up to the protocol still.
  800. */
  801. static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
  802. {
  803. struct socket *sock;
  804. struct sock *sk;
  805. void __user *argp = (void __user *)arg;
  806. int pid, err;
  807. struct net *net;
  808. sock = file->private_data;
  809. sk = sock->sk;
  810. net = sock_net(sk);
  811. if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
  812. err = dev_ioctl(net, cmd, argp);
  813. } else
  814. #ifdef CONFIG_WEXT_CORE
  815. if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
  816. err = dev_ioctl(net, cmd, argp);
  817. } else
  818. #endif
  819. switch (cmd) {
  820. case FIOSETOWN:
  821. case SIOCSPGRP:
  822. err = -EFAULT;
  823. if (get_user(pid, (int __user *)argp))
  824. break;
  825. err = f_setown(sock->file, pid, 1);
  826. break;
  827. case FIOGETOWN:
  828. case SIOCGPGRP:
  829. err = put_user(f_getown(sock->file),
  830. (int __user *)argp);
  831. break;
  832. case SIOCGIFBR:
  833. case SIOCSIFBR:
  834. case SIOCBRADDBR:
  835. case SIOCBRDELBR:
  836. err = -ENOPKG;
  837. if (!br_ioctl_hook)
  838. request_module("bridge");
  839. mutex_lock(&br_ioctl_mutex);
  840. if (br_ioctl_hook)
  841. err = br_ioctl_hook(net, cmd, argp);
  842. mutex_unlock(&br_ioctl_mutex);
  843. break;
  844. case SIOCGIFVLAN:
  845. case SIOCSIFVLAN:
  846. err = -ENOPKG;
  847. if (!vlan_ioctl_hook)
  848. request_module("8021q");
  849. mutex_lock(&vlan_ioctl_mutex);
  850. if (vlan_ioctl_hook)
  851. err = vlan_ioctl_hook(net, argp);
  852. mutex_unlock(&vlan_ioctl_mutex);
  853. break;
  854. case SIOCADDDLCI:
  855. case SIOCDELDLCI:
  856. err = -ENOPKG;
  857. if (!dlci_ioctl_hook)
  858. request_module("dlci");
  859. mutex_lock(&dlci_ioctl_mutex);
  860. if (dlci_ioctl_hook)
  861. err = dlci_ioctl_hook(cmd, argp);
  862. mutex_unlock(&dlci_ioctl_mutex);
  863. break;
  864. default:
  865. err = sock_do_ioctl(net, sock, cmd, arg);
  866. break;
  867. }
  868. return err;
  869. }
  870. int sock_create_lite(int family, int type, int protocol, struct socket **res)
  871. {
  872. int err;
  873. struct socket *sock = NULL;
  874. err = security_socket_create(family, type, protocol, 1);
  875. if (err)
  876. goto out;
  877. sock = sock_alloc();
  878. if (!sock) {
  879. err = -ENOMEM;
  880. goto out;
  881. }
  882. sock->type = type;
  883. err = security_socket_post_create(sock, family, type, protocol, 1);
  884. if (err)
  885. goto out_release;
  886. out:
  887. *res = sock;
  888. return err;
  889. out_release:
  890. sock_release(sock);
  891. sock = NULL;
  892. goto out;
  893. }
  894. /* No kernel lock held - perfect */
  895. static unsigned int sock_poll(struct file *file, poll_table *wait)
  896. {
  897. struct socket *sock;
  898. /*
  899. * We can't return errors to poll, so it's either yes or no.
  900. */
  901. sock = file->private_data;
  902. return sock->ops->poll(file, sock, wait);
  903. }
  904. static int sock_mmap(struct file *file, struct vm_area_struct *vma)
  905. {
  906. struct socket *sock = file->private_data;
  907. return sock->ops->mmap(file, sock, vma);
  908. }
  909. static int sock_close(struct inode *inode, struct file *filp)
  910. {
  911. /*
  912. * It was possible the inode is NULL we were
  913. * closing an unfinished socket.
  914. */
  915. if (!inode) {
  916. printk(KERN_DEBUG "sock_close: NULL inode\n");
  917. return 0;
  918. }
  919. sock_release(SOCKET_I(inode));
  920. return 0;
  921. }
  922. /*
  923. * Update the socket async list
  924. *
  925. * Fasync_list locking strategy.
  926. *
  927. * 1. fasync_list is modified only under process context socket lock
  928. * i.e. under semaphore.
  929. * 2. fasync_list is used under read_lock(&sk->sk_callback_lock)
  930. * or under socket lock
  931. */
  932. static int sock_fasync(int fd, struct file *filp, int on)
  933. {
  934. struct socket *sock = filp->private_data;
  935. struct sock *sk = sock->sk;
  936. if (sk == NULL)
  937. return -EINVAL;
  938. lock_sock(sk);
  939. fasync_helper(fd, filp, on, &sock->wq->fasync_list);
  940. if (!sock->wq->fasync_list)
  941. sock_reset_flag(sk, SOCK_FASYNC);
  942. else
  943. sock_set_flag(sk, SOCK_FASYNC);
  944. release_sock(sk);
  945. return 0;
  946. }
  947. /* This function may be called only under socket lock or callback_lock or rcu_lock */
  948. int sock_wake_async(struct socket *sock, int how, int band)
  949. {
  950. struct socket_wq *wq;
  951. if (!sock)
  952. return -1;
  953. rcu_read_lock();
  954. wq = rcu_dereference(sock->wq);
  955. if (!wq || !wq->fasync_list) {
  956. rcu_read_unlock();
  957. return -1;
  958. }
  959. switch (how) {
  960. case SOCK_WAKE_WAITD:
  961. if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
  962. break;
  963. goto call_kill;
  964. case SOCK_WAKE_SPACE:
  965. if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags))
  966. break;
  967. /* fall through */
  968. case SOCK_WAKE_IO:
  969. call_kill:
  970. kill_fasync(&wq->fasync_list, SIGIO, band);
  971. break;
  972. case SOCK_WAKE_URG:
  973. kill_fasync(&wq->fasync_list, SIGURG, band);
  974. }
  975. rcu_read_unlock();
  976. return 0;
  977. }
  978. static int __sock_create(struct net *net, int family, int type, int protocol,
  979. struct socket **res, int kern)
  980. {
  981. int err;
  982. struct socket *sock;
  983. const struct net_proto_family *pf;
  984. /*
  985. * Check protocol is in range
  986. */
  987. if (family < 0 || family >= NPROTO)
  988. return -EAFNOSUPPORT;
  989. if (type < 0 || type >= SOCK_MAX)
  990. return -EINVAL;
  991. /* Compatibility.
  992. This uglymoron is moved from INET layer to here to avoid
  993. deadlock in module load.
  994. */
  995. if (family == PF_INET && type == SOCK_PACKET) {
  996. static int warned;
  997. if (!warned) {
  998. warned = 1;
  999. printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n",
  1000. current->comm);
  1001. }
  1002. family = PF_PACKET;
  1003. }
  1004. err = security_socket_create(family, type, protocol, kern);
  1005. if (err)
  1006. return err;
  1007. /*
  1008. * Allocate the socket and allow the family to set things up. if
  1009. * the protocol is 0, the family is instructed to select an appropriate
  1010. * default.
  1011. */
  1012. sock = sock_alloc();
  1013. if (!sock) {
  1014. if (net_ratelimit())
  1015. printk(KERN_WARNING "socket: no more sockets\n");
  1016. return -ENFILE; /* Not exactly a match, but its the
  1017. closest posix thing */
  1018. }
  1019. sock->type = type;
  1020. #ifdef CONFIG_MODULES
  1021. /* Attempt to load a protocol module if the find failed.
  1022. *
  1023. * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user
  1024. * requested real, full-featured networking support upon configuration.
  1025. * Otherwise module support will break!
  1026. */
  1027. if (net_families[family] == NULL)
  1028. request_module("net-pf-%d", family);
  1029. #endif
  1030. rcu_read_lock();
  1031. pf = rcu_dereference(net_families[family]);
  1032. err = -EAFNOSUPPORT;
  1033. if (!pf)
  1034. goto out_release;
  1035. /*
  1036. * We will call the ->create function, that possibly is in a loadable
  1037. * module, so we have to bump that loadable module refcnt first.
  1038. */
  1039. if (!try_module_get(pf->owner))
  1040. goto out_release;
  1041. /* Now protected by module ref count */
  1042. rcu_read_unlock();
  1043. err = pf->create(net, sock, protocol, kern);
  1044. if (err < 0)
  1045. goto out_module_put;
  1046. /*
  1047. * Now to bump the refcnt of the [loadable] module that owns this
  1048. * socket at sock_release time we decrement its refcnt.
  1049. */
  1050. if (!try_module_get(sock->ops->owner))
  1051. goto out_module_busy;
  1052. /*
  1053. * Now that we're done with the ->create function, the [loadable]
  1054. * module can have its refcnt decremented
  1055. */
  1056. module_put(pf->owner);
  1057. err = security_socket_post_create(sock, family, type, protocol, kern);
  1058. if (err)
  1059. goto out_sock_release;
  1060. *res = sock;
  1061. return 0;
  1062. out_module_busy:
  1063. err = -EAFNOSUPPORT;
  1064. out_module_put:
  1065. sock->ops = NULL;
  1066. module_put(pf->owner);
  1067. out_sock_release:
  1068. sock_release(sock);
  1069. return err;
  1070. out_release:
  1071. rcu_read_unlock();
  1072. goto out_sock_release;
  1073. }
  1074. int sock_create(int family, int type, int protocol, struct socket **res)
  1075. {
  1076. return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
  1077. }
  1078. int sock_create_kern(int family, int type, int protocol, struct socket **res)
  1079. {
  1080. return __sock_create(&init_net, family, type, protocol, res, 1);
  1081. }
  1082. SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
  1083. {
  1084. int retval;
  1085. struct socket *sock;
  1086. int flags;
  1087. /* Check the SOCK_* constants for consistency. */
  1088. BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC);
  1089. BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK);
  1090. BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK);
  1091. BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK);
  1092. flags = type & ~SOCK_TYPE_MASK;
  1093. if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
  1094. return -EINVAL;
  1095. type &= SOCK_TYPE_MASK;
  1096. if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
  1097. flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
  1098. retval = sock_create(family, type, protocol, &sock);
  1099. if (retval < 0)
  1100. goto out;
  1101. retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
  1102. if (retval < 0)
  1103. goto out_release;
  1104. out:
  1105. /* It may be already another descriptor 8) Not kernel problem. */
  1106. return retval;
  1107. out_release:
  1108. sock_release(sock);
  1109. return retval;
  1110. }
  1111. /*
  1112. * Create a pair of connected sockets.
  1113. */
  1114. SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
  1115. int __user *, usockvec)
  1116. {
  1117. struct socket *sock1, *sock2;
  1118. int fd1, fd2, err;
  1119. struct file *newfile1, *newfile2;
  1120. int flags;
  1121. flags = type & ~SOCK_TYPE_MASK;
  1122. if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
  1123. return -EINVAL;
  1124. type &= SOCK_TYPE_MASK;
  1125. if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
  1126. flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
  1127. /*
  1128. * Obtain the first socket and check if the underlying protocol
  1129. * supports the socketpair call.
  1130. */
  1131. err = sock_create(family, type, protocol, &sock1);
  1132. if (err < 0)
  1133. goto out;
  1134. err = sock_create(family, type, protocol, &sock2);
  1135. if (err < 0)
  1136. goto out_release_1;
  1137. err = sock1->ops->socketpair(sock1, sock2);
  1138. if (err < 0)
  1139. goto out_release_both;
  1140. fd1 = sock_alloc_file(sock1, &newfile1, flags);
  1141. if (unlikely(fd1 < 0)) {
  1142. err = fd1;
  1143. goto out_release_both;
  1144. }
  1145. fd2 = sock_alloc_file(sock2, &newfile2, flags);
  1146. if (unlikely(fd2 < 0)) {
  1147. err = fd2;
  1148. fput(newfile1);
  1149. put_unused_fd(fd1);
  1150. sock_release(sock2);
  1151. goto out;
  1152. }
  1153. audit_fd_pair(fd1, fd2);
  1154. fd_install(fd1, newfile1);
  1155. fd_install(fd2, newfile2);
  1156. /* fd1 and fd2 may be already another descriptors.
  1157. * Not kernel problem.
  1158. */
  1159. err = put_user(fd1, &usockvec[0]);
  1160. if (!err)
  1161. err = put_user(fd2, &usockvec[1]);
  1162. if (!err)
  1163. return 0;
  1164. sys_close(fd2);
  1165. sys_close(fd1);
  1166. return err;
  1167. out_release_both:
  1168. sock_release(sock2);
  1169. out_release_1:
  1170. sock_release(sock1);
  1171. out:
  1172. return err;
  1173. }
  1174. /*
  1175. * Bind a name to a socket. Nothing much to do here since it's
  1176. * the protocol's responsibility to handle the local address.
  1177. *
  1178. * We move the socket address to kernel space before we call
  1179. * the protocol layer (having also checked the address is ok).
  1180. */
  1181. SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
  1182. {
  1183. struct socket *sock;
  1184. struct sockaddr_storage address;
  1185. int err, fput_needed;
  1186. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1187. if (sock) {
  1188. err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
  1189. if (err >= 0) {
  1190. err = security_socket_bind(sock,
  1191. (struct sockaddr *)&address,
  1192. addrlen);
  1193. if (!err)
  1194. err = sock->ops->bind(sock,
  1195. (struct sockaddr *)
  1196. &address, addrlen);
  1197. }
  1198. fput_light(sock->file, fput_needed);
  1199. }
  1200. return err;
  1201. }
  1202. /*
  1203. * Perform a listen. Basically, we allow the protocol to do anything
  1204. * necessary for a listen, and if that works, we mark the socket as
  1205. * ready for listening.
  1206. */
  1207. SYSCALL_DEFINE2(listen, int, fd, int, backlog)
  1208. {
  1209. struct socket *sock;
  1210. int err, fput_needed;
  1211. int somaxconn;
  1212. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1213. if (sock) {
  1214. somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
  1215. if ((unsigned)backlog > somaxconn)
  1216. backlog = somaxconn;
  1217. err = security_socket_listen(sock, backlog);
  1218. if (!err)
  1219. err = sock->ops->listen(sock, backlog);
  1220. fput_light(sock->file, fput_needed);
  1221. }
  1222. return err;
  1223. }
  1224. /*
  1225. * For accept, we attempt to create a new socket, set up the link
  1226. * with the client, wake up the client, then return the new
  1227. * connected fd. We collect the address of the connector in kernel
  1228. * space and move it to user at the very end. This is unclean because
  1229. * we open the socket then return an error.
  1230. *
  1231. * 1003.1g adds the ability to recvmsg() to query connection pending
  1232. * status to recvmsg. We need to add that support in a way thats
  1233. * clean when we restucture accept also.
  1234. */
  1235. SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
  1236. int __user *, upeer_addrlen, int, flags)
  1237. {
  1238. struct socket *sock, *newsock;
  1239. struct file *newfile;
  1240. int err, len, newfd, fput_needed;
  1241. struct sockaddr_storage address;
  1242. if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
  1243. return -EINVAL;
  1244. if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
  1245. flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
  1246. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1247. if (!sock)
  1248. goto out;
  1249. err = -ENFILE;
  1250. if (!(newsock = sock_alloc()))
  1251. goto out_put;
  1252. newsock->type = sock->type;
  1253. newsock->ops = sock->ops;
  1254. /*
  1255. * We don't need try_module_get here, as the listening socket (sock)
  1256. * has the protocol module (sock->ops->owner) held.
  1257. */
  1258. __module_get(newsock->ops->owner);
  1259. newfd = sock_alloc_file(newsock, &newfile, flags);
  1260. if (unlikely(newfd < 0)) {
  1261. err = newfd;
  1262. sock_release(newsock);
  1263. goto out_put;
  1264. }
  1265. err = security_socket_accept(sock, newsock);
  1266. if (err)
  1267. goto out_fd;
  1268. err = sock->ops->accept(sock, newsock, sock->file->f_flags);
  1269. if (err < 0)
  1270. goto out_fd;
  1271. if (upeer_sockaddr) {
  1272. if (newsock->ops->getname(newsock, (struct sockaddr *)&address,
  1273. &len, 2) < 0) {
  1274. err = -ECONNABORTED;
  1275. goto out_fd;
  1276. }
  1277. err = move_addr_to_user((struct sockaddr *)&address,
  1278. len, upeer_sockaddr, upeer_addrlen);
  1279. if (err < 0)
  1280. goto out_fd;
  1281. }
  1282. /* File flags are not inherited via accept() unlike another OSes. */
  1283. fd_install(newfd, newfile);
  1284. err = newfd;
  1285. out_put:
  1286. fput_light(sock->file, fput_needed);
  1287. out:
  1288. return err;
  1289. out_fd:
  1290. fput(newfile);
  1291. put_unused_fd(newfd);
  1292. goto out_put;
  1293. }
  1294. SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr,
  1295. int __user *, upeer_addrlen)
  1296. {
  1297. return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0);
  1298. }
  1299. /*
  1300. * Attempt to connect to a socket with the server address. The address
  1301. * is in user space so we verify it is OK and move it to kernel space.
  1302. *
  1303. * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to
  1304. * break bindings
  1305. *
  1306. * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and
  1307. * other SEQPACKET protocols that take time to connect() as it doesn't
  1308. * include the -EINPROGRESS status for such sockets.
  1309. */
  1310. SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
  1311. int, addrlen)
  1312. {
  1313. struct socket *sock;
  1314. struct sockaddr_storage address;
  1315. int err, fput_needed;
  1316. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1317. if (!sock)
  1318. goto out;
  1319. err = move_addr_to_kernel(uservaddr, addrlen, (struct sockaddr *)&address);
  1320. if (err < 0)
  1321. goto out_put;
  1322. err =
  1323. security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
  1324. if (err)
  1325. goto out_put;
  1326. err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen,
  1327. sock->file->f_flags);
  1328. out_put:
  1329. fput_light(sock->file, fput_needed);
  1330. out:
  1331. return err;
  1332. }
  1333. /*
  1334. * Get the local address ('name') of a socket object. Move the obtained
  1335. * name to user space.
  1336. */
  1337. SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
  1338. int __user *, usockaddr_len)
  1339. {
  1340. struct socket *sock;
  1341. struct sockaddr_storage address;
  1342. int len, err, fput_needed;
  1343. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1344. if (!sock)
  1345. goto out;
  1346. err = security_socket_getsockname(sock);
  1347. if (err)
  1348. goto out_put;
  1349. err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0);
  1350. if (err)
  1351. goto out_put;
  1352. err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len);
  1353. out_put:
  1354. fput_light(sock->file, fput_needed);
  1355. out:
  1356. return err;
  1357. }
  1358. /*
  1359. * Get the remote address ('name') of a socket object. Move the obtained
  1360. * name to user space.
  1361. */
  1362. SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
  1363. int __user *, usockaddr_len)
  1364. {
  1365. struct socket *sock;
  1366. struct sockaddr_storage address;
  1367. int len, err, fput_needed;
  1368. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1369. if (sock != NULL) {
  1370. err = security_socket_getpeername(sock);
  1371. if (err) {
  1372. fput_light(sock->file, fput_needed);
  1373. return err;
  1374. }
  1375. err =
  1376. sock->ops->getname(sock, (struct sockaddr *)&address, &len,
  1377. 1);
  1378. if (!err)
  1379. err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr,
  1380. usockaddr_len);
  1381. fput_light(sock->file, fput_needed);
  1382. }
  1383. return err;
  1384. }
  1385. /*
  1386. * Send a datagram to a given address. We move the address into kernel
  1387. * space and check the user space data area is readable before invoking
  1388. * the protocol.
  1389. */
  1390. SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
  1391. unsigned, flags, struct sockaddr __user *, addr,
  1392. int, addr_len)
  1393. {
  1394. struct socket *sock;
  1395. struct sockaddr_storage address;
  1396. int err;
  1397. struct msghdr msg;
  1398. struct iovec iov;
  1399. int fput_needed;
  1400. if (len > INT_MAX)
  1401. len = INT_MAX;
  1402. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1403. if (!sock)
  1404. goto out;
  1405. iov.iov_base = buff;
  1406. iov.iov_len = len;
  1407. msg.msg_name = NULL;
  1408. msg.msg_iov = &iov;
  1409. msg.msg_iovlen = 1;
  1410. msg.msg_control = NULL;
  1411. msg.msg_controllen = 0;
  1412. msg.msg_namelen = 0;
  1413. if (addr) {
  1414. err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address);
  1415. if (err < 0)
  1416. goto out_put;
  1417. msg.msg_name = (struct sockaddr *)&address;
  1418. msg.msg_namelen = addr_len;
  1419. }
  1420. if (sock->file->f_flags & O_NONBLOCK)
  1421. flags |= MSG_DONTWAIT;
  1422. msg.msg_flags = flags;
  1423. err = sock_sendmsg(sock, &msg, len);
  1424. out_put:
  1425. fput_light(sock->file, fput_needed);
  1426. out:
  1427. return err;
  1428. }
  1429. /*
  1430. * Send a datagram down a socket.
  1431. */
  1432. SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
  1433. unsigned, flags)
  1434. {
  1435. return sys_sendto(fd, buff, len, flags, NULL, 0);
  1436. }
  1437. /*
  1438. * Receive a frame from the socket and optionally record the address of the
  1439. * sender. We verify the buffers are writable and if needed move the
  1440. * sender address from kernel to user space.
  1441. */
  1442. SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
  1443. unsigned, flags, struct sockaddr __user *, addr,
  1444. int __user *, addr_len)
  1445. {
  1446. struct socket *sock;
  1447. struct iovec iov;
  1448. struct msghdr msg;
  1449. struct sockaddr_storage address;
  1450. int err, err2;
  1451. int fput_needed;
  1452. if (size > INT_MAX)
  1453. size = INT_MAX;
  1454. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1455. if (!sock)
  1456. goto out;
  1457. msg.msg_control = NULL;
  1458. msg.msg_controllen = 0;
  1459. msg.msg_iovlen = 1;
  1460. msg.msg_iov = &iov;
  1461. iov.iov_len = size;
  1462. iov.iov_base = ubuf;
  1463. msg.msg_name = (struct sockaddr *)&address;
  1464. msg.msg_namelen = sizeof(address);
  1465. if (sock->file->f_flags & O_NONBLOCK)
  1466. flags |= MSG_DONTWAIT;
  1467. err = sock_recvmsg(sock, &msg, size, flags);
  1468. if (err >= 0 && addr != NULL) {
  1469. err2 = move_addr_to_user((struct sockaddr *)&address,
  1470. msg.msg_namelen, addr, addr_len);
  1471. if (err2 < 0)
  1472. err = err2;
  1473. }
  1474. fput_light(sock->file, fput_needed);
  1475. out:
  1476. return err;
  1477. }
  1478. /*
  1479. * Receive a datagram from a socket.
  1480. */
  1481. asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
  1482. unsigned flags)
  1483. {
  1484. return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
  1485. }
  1486. /*
  1487. * Set a socket option. Because we don't know the option lengths we have
  1488. * to pass the user mode parameter for the protocols to sort out.
  1489. */
  1490. SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
  1491. char __user *, optval, int, optlen)
  1492. {
  1493. int err, fput_needed;
  1494. struct socket *sock;
  1495. if (optlen < 0)
  1496. return -EINVAL;
  1497. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1498. if (sock != NULL) {
  1499. err = security_socket_setsockopt(sock, level, optname);
  1500. if (err)
  1501. goto out_put;
  1502. if (level == SOL_SOCKET)
  1503. err =
  1504. sock_setsockopt(sock, level, optname, optval,
  1505. optlen);
  1506. else
  1507. err =
  1508. sock->ops->setsockopt(sock, level, optname, optval,
  1509. optlen);
  1510. out_put:
  1511. fput_light(sock->file, fput_needed);
  1512. }
  1513. return err;
  1514. }
  1515. /*
  1516. * Get a socket option. Because we don't know the option lengths we have
  1517. * to pass a user mode parameter for the protocols to sort out.
  1518. */
  1519. SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
  1520. char __user *, optval, int __user *, optlen)
  1521. {
  1522. int err, fput_needed;
  1523. struct socket *sock;
  1524. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1525. if (sock != NULL) {
  1526. err = security_socket_getsockopt(sock, level, optname);
  1527. if (err)
  1528. goto out_put;
  1529. if (level == SOL_SOCKET)
  1530. err =
  1531. sock_getsockopt(sock, level, optname, optval,
  1532. optlen);
  1533. else
  1534. err =
  1535. sock->ops->getsockopt(sock, level, optname, optval,
  1536. optlen);
  1537. out_put:
  1538. fput_light(sock->file, fput_needed);
  1539. }
  1540. return err;
  1541. }
  1542. /*
  1543. * Shutdown a socket.
  1544. */
  1545. SYSCALL_DEFINE2(shutdown, int, fd, int, how)
  1546. {
  1547. int err, fput_needed;
  1548. struct socket *sock;
  1549. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1550. if (sock != NULL) {
  1551. err = security_socket_shutdown(sock, how);
  1552. if (!err)
  1553. err = sock->ops->shutdown(sock, how);
  1554. fput_light(sock->file, fput_needed);
  1555. }
  1556. return err;
  1557. }
  1558. /* A couple of helpful macros for getting the address of the 32/64 bit
  1559. * fields which are the same type (int / unsigned) on our platforms.
  1560. */
  1561. #define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member)
  1562. #define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
  1563. #define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
  1564. /*
  1565. * BSD sendmsg interface
  1566. */
  1567. SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
  1568. {
  1569. struct compat_msghdr __user *msg_compat =
  1570. (struct compat_msghdr __user *)msg;
  1571. struct socket *sock;
  1572. struct sockaddr_storage address;
  1573. struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
  1574. unsigned char ctl[sizeof(struct cmsghdr) + 20]
  1575. __attribute__ ((aligned(sizeof(__kernel_size_t))));
  1576. /* 20 is size of ipv6_pktinfo */
  1577. unsigned char *ctl_buf = ctl;
  1578. struct msghdr msg_sys;
  1579. int err, ctl_len, iov_size, total_len;
  1580. int fput_needed;
  1581. err = -EFAULT;
  1582. if (MSG_CMSG_COMPAT & flags) {
  1583. if (get_compat_msghdr(&msg_sys, msg_compat))
  1584. return -EFAULT;
  1585. }
  1586. else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
  1587. return -EFAULT;
  1588. sock = sockfd_lookup_light(fd, &err, &fput_needed);
  1589. if (!sock)
  1590. goto out;
  1591. /* do not move before msg_sys is valid */
  1592. err = -EMSGSIZE;
  1593. if (msg_sys.msg_iovlen > UIO_MAXIOV)
  1594. goto out_put;
  1595. /* Check whether to allocate the iovec area */
  1596. err = -ENOMEM;
  1597. iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
  1598. if (msg_sys.msg_iovlen > UIO_FASTIOV) {
  1599. iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
  1600. if (!iov)
  1601. goto out_put;
  1602. }
  1603. /* This will also move the address data into kernel space */
  1604. if (MSG_CMSG_COMPAT & flags) {
  1605. err = verify_compat_iovec(&msg_sys, iov,
  1606. (struct sockaddr *)&address,
  1607. VERIFY_READ);
  1608. } else
  1609. err = verify_iovec(&msg_sys, iov,
  1610. (struct sockaddr *)&address,
  1611. VERIFY_READ);
  1612. if (err < 0)
  1613. goto out_freeiov;
  1614. total_len = err;
  1615. err = -ENOBUFS;
  1616. if (msg_sys.msg_controllen > INT_MAX)
  1617. goto out_freeiov;
  1618. ctl_len = msg_sys.msg_controllen;
  1619. if ((MSG_CMSG_COMPAT & flags) && ctl_len) {
  1620. err =
  1621. cmsghdr_from_user_compat_to_kern(&msg_sys, sock->sk, ctl,
  1622. sizeof(ctl));
  1623. if (err)
  1624. goto out_freeiov;
  1625. ctl_buf = msg_sys.msg_control;
  1626. ctl_len = msg_sys.msg_controllen;
  1627. } else if (ctl_len) {
  1628. if (ctl_len > sizeof(ctl)) {
  1629. ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
  1630. if (ctl_buf == NULL)
  1631. goto out_freeiov;
  1632. }
  1633. err = -EFAULT;
  1634. /*
  1635. * Careful! Before this, msg_sys.msg_control contains a user pointer.
  1636. * Afterwards, it will be a kernel pointer. Thus the compiler-assisted
  1637. * checking falls down on this.
  1638. */
  1639. if (copy_from_user(ctl_buf, (void __user *)msg_sys.msg_control,
  1640. ctl_len))
  1641. goto out_freectl;
  1642. msg_sys.msg_control = ctl_buf;
  1643. }
  1644. msg_sys.msg_flags = flags;
  1645. if (sock->file->f_flags & O_NONBLOCK)
  1646. msg_sys.msg_flags |= MSG_DONTWAIT;
  1647. err = sock_sendmsg(sock, &msg_sys, total_len);
  1648. out_freectl:
  1649. if (ctl_buf != ctl)
  1650. sock_kfree_s(sock->sk, ctl_buf, ctl_len);
  1651. out_freeiov:
  1652. if (iov != iovstack)
  1653. sock_kfree_s(sock->sk, iov, iov_size);
  1654. out_put:
  1655. fput_light(sock->file, fput_needed);
  1656. out:
  1657. return err;
  1658. }
  1659. static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
  1660. struct msghdr *msg_sys, unsigned flags, int nosec)
  1661. {
  1662. struct compat_msghdr __user *msg_compat =
  1663. (struct compat_msghdr __user *)msg;
  1664. struct iovec iovstack[UIO_FASTIOV];
  1665. struct iovec *iov = iovstack;
  1666. unsigned long cmsg_ptr;
  1667. int err, iov_size, total_len, len;
  1668. /* kernel mode address */
  1669. struct sockaddr_storage addr;
  1670. /* user mode address pointers */
  1671. struct sockaddr __user *uaddr;
  1672. int __user *uaddr_len;
  1673. if (MSG_CMSG_COMPAT & flags) {
  1674. if (get_compat_msghdr(msg_sys, msg_compat))
  1675. return -EFAULT;
  1676. }
  1677. else if (copy_from_user(msg_sys, msg, sizeof(struct msghdr)))
  1678. return -EFAULT;
  1679. err = -EMSGSIZE;
  1680. if (msg_sys->msg_iovlen > UIO_MAXIOV)
  1681. goto out;
  1682. /* Check whether to allocate the iovec area */
  1683. err = -ENOMEM;
  1684. iov_size = msg_sys->msg_iovlen * sizeof(struct iovec);
  1685. if (msg_sys->msg_iovlen > UIO_FASTIOV) {
  1686. iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
  1687. if (!iov)
  1688. goto out;
  1689. }
  1690. /*
  1691. * Save the user-mode address (verify_iovec will change the
  1692. * kernel msghdr to use the kernel address space)
  1693. */
  1694. uaddr = (__force void __user *)msg_sys->msg_name;
  1695. uaddr_len = COMPAT_NAMELEN(msg);
  1696. if (MSG_CMSG_COMPAT & flags) {
  1697. err = verify_compat_iovec(msg_sys, iov,
  1698. (struct sockaddr *)&addr,
  1699. VERIFY_WRITE);
  1700. } else
  1701. err = verify_iovec(msg_sys, iov,
  1702. (struct sockaddr *)&addr,
  1703. VERIFY_WRITE);
  1704. if (err < 0)
  1705. goto out_freeiov;
  1706. total_len = err;
  1707. cmsg_ptr = (unsigned long)msg_sys->msg_control;
  1708. msg_sys->msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
  1709. if (sock->file->f_flags & O_NONBLOCK)
  1710. flags |= MSG_DONTWAIT;
  1711. err = (nosec ? sock_recvmsg_nosec : sock_recvmsg)(sock, msg_sys,
  1712. total_len, flags);
  1713. if (err < 0)
  1714. goto out_freeiov;
  1715. len = err;
  1716. if (uaddr != NULL) {
  1717. err = move_addr_to_user((struct sockaddr *)&addr,
  1718. msg_sys->msg_namelen, uaddr,
  1719. uaddr_len);
  1720. if (err < 0)
  1721. goto out_freeiov;
  1722. }
  1723. err = __put_user((msg_sys->msg_flags & ~MSG_CMSG_COMPAT),
  1724. COMPAT_FLAGS(msg));
  1725. if (err)
  1726. goto out_freeiov;
  1727. if (MSG_CMSG_COMPAT & flags)
  1728. err = __put_user((unsigned long)msg_sys->msg_control - cmsg_ptr,
  1729. &msg_compat->msg_controllen);
  1730. else
  1731. err = __put_user((un

Large files files are truncated, but you can click here to view the full file