PageRenderTime 1372ms CodeModel.GetById 1ms RepoModel.GetById 1ms app.codeStats 0ms

/net/can/af_can.c

https://github.com/Mengqi/linux-2.6
C | 889 lines | 502 code | 162 blank | 225 comment | 103 complexity | 714eb605661ec1b19901c8be11ed5e6c MD5 | raw file
  1. /*
  2. * af_can.c - Protocol family CAN core module
  3. * (used by different CAN protocol modules)
  4. *
  5. * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions
  10. * are met:
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the name of Volkswagen nor the names of its contributors
  17. * may be used to endorse or promote products derived from this software
  18. * without specific prior written permission.
  19. *
  20. * Alternatively, provided that this notice is retained in full, this
  21. * software may be distributed under the terms of the GNU General
  22. * Public License ("GPL") version 2, in which case the provisions of the
  23. * GPL apply INSTEAD OF those given above.
  24. *
  25. * The provided data structures and external interfaces from this code
  26. * are not restricted to be used by modules with a GPL compatible license.
  27. *
  28. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  29. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  30. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  31. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  32. * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  33. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  34. * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  35. * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  36. * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  37. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  38. * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
  39. * DAMAGE.
  40. *
  41. * Send feedback to <socketcan-users@lists.berlios.de>
  42. *
  43. */
  44. #include <linux/module.h>
  45. #include <linux/init.h>
  46. #include <linux/kmod.h>
  47. #include <linux/slab.h>
  48. #include <linux/list.h>
  49. #include <linux/spinlock.h>
  50. #include <linux/rcupdate.h>
  51. #include <linux/uaccess.h>
  52. #include <linux/net.h>
  53. #include <linux/netdevice.h>
  54. #include <linux/socket.h>
  55. #include <linux/if_ether.h>
  56. #include <linux/if_arp.h>
  57. #include <linux/skbuff.h>
  58. #include <linux/can.h>
  59. #include <linux/can/core.h>
  60. #include <linux/ratelimit.h>
  61. #include <net/net_namespace.h>
  62. #include <net/sock.h>
  63. #include "af_can.h"
  64. static __initdata const char banner[] = KERN_INFO
  65. "can: controller area network core (" CAN_VERSION_STRING ")\n";
  66. MODULE_DESCRIPTION("Controller Area Network PF_CAN core");
  67. MODULE_LICENSE("Dual BSD/GPL");
  68. MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>, "
  69. "Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
  70. MODULE_ALIAS_NETPROTO(PF_CAN);
  71. static int stats_timer __read_mostly = 1;
  72. module_param(stats_timer, int, S_IRUGO);
  73. MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)");
  74. /* receive filters subscribed for 'all' CAN devices */
  75. struct dev_rcv_lists can_rx_alldev_list;
  76. static DEFINE_SPINLOCK(can_rcvlists_lock);
  77. static struct kmem_cache *rcv_cache __read_mostly;
  78. /* table of registered CAN protocols */
  79. static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly;
  80. static DEFINE_MUTEX(proto_tab_lock);
  81. struct timer_list can_stattimer; /* timer for statistics update */
  82. struct s_stats can_stats; /* packet statistics */
  83. struct s_pstats can_pstats; /* receive list statistics */
  84. /*
  85. * af_can socket functions
  86. */
  87. int can_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
  88. {
  89. struct sock *sk = sock->sk;
  90. switch (cmd) {
  91. case SIOCGSTAMP:
  92. return sock_get_timestamp(sk, (struct timeval __user *)arg);
  93. default:
  94. return -ENOIOCTLCMD;
  95. }
  96. }
  97. EXPORT_SYMBOL(can_ioctl);
  98. static void can_sock_destruct(struct sock *sk)
  99. {
  100. skb_queue_purge(&sk->sk_receive_queue);
  101. }
  102. static const struct can_proto *can_get_proto(int protocol)
  103. {
  104. const struct can_proto *cp;
  105. rcu_read_lock();
  106. cp = rcu_dereference(proto_tab[protocol]);
  107. if (cp && !try_module_get(cp->prot->owner))
  108. cp = NULL;
  109. rcu_read_unlock();
  110. return cp;
  111. }
  112. static inline void can_put_proto(const struct can_proto *cp)
  113. {
  114. module_put(cp->prot->owner);
  115. }
  116. static int can_create(struct net *net, struct socket *sock, int protocol,
  117. int kern)
  118. {
  119. struct sock *sk;
  120. const struct can_proto *cp;
  121. int err = 0;
  122. sock->state = SS_UNCONNECTED;
  123. if (protocol < 0 || protocol >= CAN_NPROTO)
  124. return -EINVAL;
  125. if (!net_eq(net, &init_net))
  126. return -EAFNOSUPPORT;
  127. cp = can_get_proto(protocol);
  128. #ifdef CONFIG_MODULES
  129. if (!cp) {
  130. /* try to load protocol module if kernel is modular */
  131. err = request_module("can-proto-%d", protocol);
  132. /*
  133. * In case of error we only print a message but don't
  134. * return the error code immediately. Below we will
  135. * return -EPROTONOSUPPORT
  136. */
  137. if (err)
  138. printk_ratelimited(KERN_ERR "can: request_module "
  139. "(can-proto-%d) failed.\n", protocol);
  140. cp = can_get_proto(protocol);
  141. }
  142. #endif
  143. /* check for available protocol and correct usage */
  144. if (!cp)
  145. return -EPROTONOSUPPORT;
  146. if (cp->type != sock->type) {
  147. err = -EPROTOTYPE;
  148. goto errout;
  149. }
  150. sock->ops = cp->ops;
  151. sk = sk_alloc(net, PF_CAN, GFP_KERNEL, cp->prot);
  152. if (!sk) {
  153. err = -ENOMEM;
  154. goto errout;
  155. }
  156. sock_init_data(sock, sk);
  157. sk->sk_destruct = can_sock_destruct;
  158. if (sk->sk_prot->init)
  159. err = sk->sk_prot->init(sk);
  160. if (err) {
  161. /* release sk on errors */
  162. sock_orphan(sk);
  163. sock_put(sk);
  164. }
  165. errout:
  166. can_put_proto(cp);
  167. return err;
  168. }
  169. /*
  170. * af_can tx path
  171. */
  172. /**
  173. * can_send - transmit a CAN frame (optional with local loopback)
  174. * @skb: pointer to socket buffer with CAN frame in data section
  175. * @loop: loopback for listeners on local CAN sockets (recommended default!)
  176. *
  177. * Due to the loopback this routine must not be called from hardirq context.
  178. *
  179. * Return:
  180. * 0 on success
  181. * -ENETDOWN when the selected interface is down
  182. * -ENOBUFS on full driver queue (see net_xmit_errno())
  183. * -ENOMEM when local loopback failed at calling skb_clone()
  184. * -EPERM when trying to send on a non-CAN interface
  185. * -EINVAL when the skb->data does not contain a valid CAN frame
  186. */
  187. int can_send(struct sk_buff *skb, int loop)
  188. {
  189. struct sk_buff *newskb = NULL;
  190. struct can_frame *cf = (struct can_frame *)skb->data;
  191. int err;
  192. if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) {
  193. kfree_skb(skb);
  194. return -EINVAL;
  195. }
  196. if (skb->dev->type != ARPHRD_CAN) {
  197. kfree_skb(skb);
  198. return -EPERM;
  199. }
  200. if (!(skb->dev->flags & IFF_UP)) {
  201. kfree_skb(skb);
  202. return -ENETDOWN;
  203. }
  204. skb->protocol = htons(ETH_P_CAN);
  205. skb_reset_network_header(skb);
  206. skb_reset_transport_header(skb);
  207. if (loop) {
  208. /* local loopback of sent CAN frames */
  209. /* indication for the CAN driver: do loopback */
  210. skb->pkt_type = PACKET_LOOPBACK;
  211. /*
  212. * The reference to the originating sock may be required
  213. * by the receiving socket to check whether the frame is
  214. * its own. Example: can_raw sockopt CAN_RAW_RECV_OWN_MSGS
  215. * Therefore we have to ensure that skb->sk remains the
  216. * reference to the originating sock by restoring skb->sk
  217. * after each skb_clone() or skb_orphan() usage.
  218. */
  219. if (!(skb->dev->flags & IFF_ECHO)) {
  220. /*
  221. * If the interface is not capable to do loopback
  222. * itself, we do it here.
  223. */
  224. newskb = skb_clone(skb, GFP_ATOMIC);
  225. if (!newskb) {
  226. kfree_skb(skb);
  227. return -ENOMEM;
  228. }
  229. newskb->sk = skb->sk;
  230. newskb->ip_summed = CHECKSUM_UNNECESSARY;
  231. newskb->pkt_type = PACKET_BROADCAST;
  232. }
  233. } else {
  234. /* indication for the CAN driver: no loopback required */
  235. skb->pkt_type = PACKET_HOST;
  236. }
  237. /* send to netdevice */
  238. err = dev_queue_xmit(skb);
  239. if (err > 0)
  240. err = net_xmit_errno(err);
  241. if (err) {
  242. kfree_skb(newskb);
  243. return err;
  244. }
  245. if (newskb)
  246. netif_rx_ni(newskb);
  247. /* update statistics */
  248. can_stats.tx_frames++;
  249. can_stats.tx_frames_delta++;
  250. return 0;
  251. }
  252. EXPORT_SYMBOL(can_send);
  253. /*
  254. * af_can rx path
  255. */
  256. static struct dev_rcv_lists *find_dev_rcv_lists(struct net_device *dev)
  257. {
  258. if (!dev)
  259. return &can_rx_alldev_list;
  260. else
  261. return (struct dev_rcv_lists *)dev->ml_priv;
  262. }
  263. /**
  264. * find_rcv_list - determine optimal filterlist inside device filter struct
  265. * @can_id: pointer to CAN identifier of a given can_filter
  266. * @mask: pointer to CAN mask of a given can_filter
  267. * @d: pointer to the device filter struct
  268. *
  269. * Description:
  270. * Returns the optimal filterlist to reduce the filter handling in the
  271. * receive path. This function is called by service functions that need
  272. * to register or unregister a can_filter in the filter lists.
  273. *
  274. * A filter matches in general, when
  275. *
  276. * <received_can_id> & mask == can_id & mask
  277. *
  278. * so every bit set in the mask (even CAN_EFF_FLAG, CAN_RTR_FLAG) describe
  279. * relevant bits for the filter.
  280. *
  281. * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
  282. * filter for error frames (CAN_ERR_FLAG bit set in mask). For error frames
  283. * there is a special filterlist and a special rx path filter handling.
  284. *
  285. * Return:
  286. * Pointer to optimal filterlist for the given can_id/mask pair.
  287. * Constistency checked mask.
  288. * Reduced can_id to have a preprocessed filter compare value.
  289. */
  290. static struct hlist_head *find_rcv_list(canid_t *can_id, canid_t *mask,
  291. struct dev_rcv_lists *d)
  292. {
  293. canid_t inv = *can_id & CAN_INV_FILTER; /* save flag before masking */
  294. /* filter for error frames in extra filterlist */
  295. if (*mask & CAN_ERR_FLAG) {
  296. /* clear CAN_ERR_FLAG in filter entry */
  297. *mask &= CAN_ERR_MASK;
  298. return &d->rx[RX_ERR];
  299. }
  300. /* with cleared CAN_ERR_FLAG we have a simple mask/value filterpair */
  301. #define CAN_EFF_RTR_FLAGS (CAN_EFF_FLAG | CAN_RTR_FLAG)
  302. /* ensure valid values in can_mask for 'SFF only' frame filtering */
  303. if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG))
  304. *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS);
  305. /* reduce condition testing at receive time */
  306. *can_id &= *mask;
  307. /* inverse can_id/can_mask filter */
  308. if (inv)
  309. return &d->rx[RX_INV];
  310. /* mask == 0 => no condition testing at receive time */
  311. if (!(*mask))
  312. return &d->rx[RX_ALL];
  313. /* extra filterlists for the subscription of a single non-RTR can_id */
  314. if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) &&
  315. !(*can_id & CAN_RTR_FLAG)) {
  316. if (*can_id & CAN_EFF_FLAG) {
  317. if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) {
  318. /* RFC: a future use-case for hash-tables? */
  319. return &d->rx[RX_EFF];
  320. }
  321. } else {
  322. if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS))
  323. return &d->rx_sff[*can_id];
  324. }
  325. }
  326. /* default: filter via can_id/can_mask */
  327. return &d->rx[RX_FIL];
  328. }
  329. /**
  330. * can_rx_register - subscribe CAN frames from a specific interface
  331. * @dev: pointer to netdevice (NULL => subcribe from 'all' CAN devices list)
  332. * @can_id: CAN identifier (see description)
  333. * @mask: CAN mask (see description)
  334. * @func: callback function on filter match
  335. * @data: returned parameter for callback function
  336. * @ident: string for calling module indentification
  337. *
  338. * Description:
  339. * Invokes the callback function with the received sk_buff and the given
  340. * parameter 'data' on a matching receive filter. A filter matches, when
  341. *
  342. * <received_can_id> & mask == can_id & mask
  343. *
  344. * The filter can be inverted (CAN_INV_FILTER bit set in can_id) or it can
  345. * filter for error frames (CAN_ERR_FLAG bit set in mask).
  346. *
  347. * The provided pointer to the sk_buff is guaranteed to be valid as long as
  348. * the callback function is running. The callback function must *not* free
  349. * the given sk_buff while processing it's task. When the given sk_buff is
  350. * needed after the end of the callback function it must be cloned inside
  351. * the callback function with skb_clone().
  352. *
  353. * Return:
  354. * 0 on success
  355. * -ENOMEM on missing cache mem to create subscription entry
  356. * -ENODEV unknown device
  357. */
  358. int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
  359. void (*func)(struct sk_buff *, void *), void *data,
  360. char *ident)
  361. {
  362. struct receiver *r;
  363. struct hlist_head *rl;
  364. struct dev_rcv_lists *d;
  365. int err = 0;
  366. /* insert new receiver (dev,canid,mask) -> (func,data) */
  367. if (dev && dev->type != ARPHRD_CAN)
  368. return -ENODEV;
  369. r = kmem_cache_alloc(rcv_cache, GFP_KERNEL);
  370. if (!r)
  371. return -ENOMEM;
  372. spin_lock(&can_rcvlists_lock);
  373. d = find_dev_rcv_lists(dev);
  374. if (d) {
  375. rl = find_rcv_list(&can_id, &mask, d);
  376. r->can_id = can_id;
  377. r->mask = mask;
  378. r->matches = 0;
  379. r->func = func;
  380. r->data = data;
  381. r->ident = ident;
  382. hlist_add_head_rcu(&r->list, rl);
  383. d->entries++;
  384. can_pstats.rcv_entries++;
  385. if (can_pstats.rcv_entries_max < can_pstats.rcv_entries)
  386. can_pstats.rcv_entries_max = can_pstats.rcv_entries;
  387. } else {
  388. kmem_cache_free(rcv_cache, r);
  389. err = -ENODEV;
  390. }
  391. spin_unlock(&can_rcvlists_lock);
  392. return err;
  393. }
  394. EXPORT_SYMBOL(can_rx_register);
  395. /*
  396. * can_rx_delete_receiver - rcu callback for single receiver entry removal
  397. */
  398. static void can_rx_delete_receiver(struct rcu_head *rp)
  399. {
  400. struct receiver *r = container_of(rp, struct receiver, rcu);
  401. kmem_cache_free(rcv_cache, r);
  402. }
  403. /**
  404. * can_rx_unregister - unsubscribe CAN frames from a specific interface
  405. * @dev: pointer to netdevice (NULL => unsubcribe from 'all' CAN devices list)
  406. * @can_id: CAN identifier
  407. * @mask: CAN mask
  408. * @func: callback function on filter match
  409. * @data: returned parameter for callback function
  410. *
  411. * Description:
  412. * Removes subscription entry depending on given (subscription) values.
  413. */
  414. void can_rx_unregister(struct net_device *dev, canid_t can_id, canid_t mask,
  415. void (*func)(struct sk_buff *, void *), void *data)
  416. {
  417. struct receiver *r = NULL;
  418. struct hlist_head *rl;
  419. struct hlist_node *next;
  420. struct dev_rcv_lists *d;
  421. if (dev && dev->type != ARPHRD_CAN)
  422. return;
  423. spin_lock(&can_rcvlists_lock);
  424. d = find_dev_rcv_lists(dev);
  425. if (!d) {
  426. printk(KERN_ERR "BUG: receive list not found for "
  427. "dev %s, id %03X, mask %03X\n",
  428. DNAME(dev), can_id, mask);
  429. goto out;
  430. }
  431. rl = find_rcv_list(&can_id, &mask, d);
  432. /*
  433. * Search the receiver list for the item to delete. This should
  434. * exist, since no receiver may be unregistered that hasn't
  435. * been registered before.
  436. */
  437. hlist_for_each_entry_rcu(r, next, rl, list) {
  438. if (r->can_id == can_id && r->mask == mask &&
  439. r->func == func && r->data == data)
  440. break;
  441. }
  442. /*
  443. * Check for bugs in CAN protocol implementations:
  444. * If no matching list item was found, the list cursor variable next
  445. * will be NULL, while r will point to the last item of the list.
  446. */
  447. if (!next) {
  448. printk(KERN_ERR "BUG: receive list entry not found for "
  449. "dev %s, id %03X, mask %03X\n",
  450. DNAME(dev), can_id, mask);
  451. r = NULL;
  452. goto out;
  453. }
  454. hlist_del_rcu(&r->list);
  455. d->entries--;
  456. if (can_pstats.rcv_entries > 0)
  457. can_pstats.rcv_entries--;
  458. /* remove device structure requested by NETDEV_UNREGISTER */
  459. if (d->remove_on_zero_entries && !d->entries) {
  460. kfree(d);
  461. dev->ml_priv = NULL;
  462. }
  463. out:
  464. spin_unlock(&can_rcvlists_lock);
  465. /* schedule the receiver item for deletion */
  466. if (r)
  467. call_rcu(&r->rcu, can_rx_delete_receiver);
  468. }
  469. EXPORT_SYMBOL(can_rx_unregister);
  470. static inline void deliver(struct sk_buff *skb, struct receiver *r)
  471. {
  472. r->func(skb, r->data);
  473. r->matches++;
  474. }
  475. static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb)
  476. {
  477. struct receiver *r;
  478. struct hlist_node *n;
  479. int matches = 0;
  480. struct can_frame *cf = (struct can_frame *)skb->data;
  481. canid_t can_id = cf->can_id;
  482. if (d->entries == 0)
  483. return 0;
  484. if (can_id & CAN_ERR_FLAG) {
  485. /* check for error frame entries only */
  486. hlist_for_each_entry_rcu(r, n, &d->rx[RX_ERR], list) {
  487. if (can_id & r->mask) {
  488. deliver(skb, r);
  489. matches++;
  490. }
  491. }
  492. return matches;
  493. }
  494. /* check for unfiltered entries */
  495. hlist_for_each_entry_rcu(r, n, &d->rx[RX_ALL], list) {
  496. deliver(skb, r);
  497. matches++;
  498. }
  499. /* check for can_id/mask entries */
  500. hlist_for_each_entry_rcu(r, n, &d->rx[RX_FIL], list) {
  501. if ((can_id & r->mask) == r->can_id) {
  502. deliver(skb, r);
  503. matches++;
  504. }
  505. }
  506. /* check for inverted can_id/mask entries */
  507. hlist_for_each_entry_rcu(r, n, &d->rx[RX_INV], list) {
  508. if ((can_id & r->mask) != r->can_id) {
  509. deliver(skb, r);
  510. matches++;
  511. }
  512. }
  513. /* check filterlists for single non-RTR can_ids */
  514. if (can_id & CAN_RTR_FLAG)
  515. return matches;
  516. if (can_id & CAN_EFF_FLAG) {
  517. hlist_for_each_entry_rcu(r, n, &d->rx[RX_EFF], list) {
  518. if (r->can_id == can_id) {
  519. deliver(skb, r);
  520. matches++;
  521. }
  522. }
  523. } else {
  524. can_id &= CAN_SFF_MASK;
  525. hlist_for_each_entry_rcu(r, n, &d->rx_sff[can_id], list) {
  526. deliver(skb, r);
  527. matches++;
  528. }
  529. }
  530. return matches;
  531. }
  532. static int can_rcv(struct sk_buff *skb, struct net_device *dev,
  533. struct packet_type *pt, struct net_device *orig_dev)
  534. {
  535. struct dev_rcv_lists *d;
  536. struct can_frame *cf = (struct can_frame *)skb->data;
  537. int matches;
  538. if (!net_eq(dev_net(dev), &init_net))
  539. goto drop;
  540. if (WARN_ONCE(dev->type != ARPHRD_CAN ||
  541. skb->len != sizeof(struct can_frame) ||
  542. cf->can_dlc > 8,
  543. "PF_CAN: dropped non conform skbuf: "
  544. "dev type %d, len %d, can_dlc %d\n",
  545. dev->type, skb->len, cf->can_dlc))
  546. goto drop;
  547. /* update statistics */
  548. can_stats.rx_frames++;
  549. can_stats.rx_frames_delta++;
  550. rcu_read_lock();
  551. /* deliver the packet to sockets listening on all devices */
  552. matches = can_rcv_filter(&can_rx_alldev_list, skb);
  553. /* find receive list for this device */
  554. d = find_dev_rcv_lists(dev);
  555. if (d)
  556. matches += can_rcv_filter(d, skb);
  557. rcu_read_unlock();
  558. /* consume the skbuff allocated by the netdevice driver */
  559. consume_skb(skb);
  560. if (matches > 0) {
  561. can_stats.matches++;
  562. can_stats.matches_delta++;
  563. }
  564. return NET_RX_SUCCESS;
  565. drop:
  566. kfree_skb(skb);
  567. return NET_RX_DROP;
  568. }
  569. /*
  570. * af_can protocol functions
  571. */
  572. /**
  573. * can_proto_register - register CAN transport protocol
  574. * @cp: pointer to CAN protocol structure
  575. *
  576. * Return:
  577. * 0 on success
  578. * -EINVAL invalid (out of range) protocol number
  579. * -EBUSY protocol already in use
  580. * -ENOBUF if proto_register() fails
  581. */
  582. int can_proto_register(const struct can_proto *cp)
  583. {
  584. int proto = cp->protocol;
  585. int err = 0;
  586. if (proto < 0 || proto >= CAN_NPROTO) {
  587. printk(KERN_ERR "can: protocol number %d out of range\n",
  588. proto);
  589. return -EINVAL;
  590. }
  591. err = proto_register(cp->prot, 0);
  592. if (err < 0)
  593. return err;
  594. mutex_lock(&proto_tab_lock);
  595. if (proto_tab[proto]) {
  596. printk(KERN_ERR "can: protocol %d already registered\n",
  597. proto);
  598. err = -EBUSY;
  599. } else
  600. rcu_assign_pointer(proto_tab[proto], cp);
  601. mutex_unlock(&proto_tab_lock);
  602. if (err < 0)
  603. proto_unregister(cp->prot);
  604. return err;
  605. }
  606. EXPORT_SYMBOL(can_proto_register);
  607. /**
  608. * can_proto_unregister - unregister CAN transport protocol
  609. * @cp: pointer to CAN protocol structure
  610. */
  611. void can_proto_unregister(const struct can_proto *cp)
  612. {
  613. int proto = cp->protocol;
  614. mutex_lock(&proto_tab_lock);
  615. BUG_ON(proto_tab[proto] != cp);
  616. rcu_assign_pointer(proto_tab[proto], NULL);
  617. mutex_unlock(&proto_tab_lock);
  618. synchronize_rcu();
  619. proto_unregister(cp->prot);
  620. }
  621. EXPORT_SYMBOL(can_proto_unregister);
  622. /*
  623. * af_can notifier to create/remove CAN netdevice specific structs
  624. */
  625. static int can_notifier(struct notifier_block *nb, unsigned long msg,
  626. void *data)
  627. {
  628. struct net_device *dev = (struct net_device *)data;
  629. struct dev_rcv_lists *d;
  630. if (!net_eq(dev_net(dev), &init_net))
  631. return NOTIFY_DONE;
  632. if (dev->type != ARPHRD_CAN)
  633. return NOTIFY_DONE;
  634. switch (msg) {
  635. case NETDEV_REGISTER:
  636. /* create new dev_rcv_lists for this device */
  637. d = kzalloc(sizeof(*d), GFP_KERNEL);
  638. if (!d) {
  639. printk(KERN_ERR
  640. "can: allocation of receive list failed\n");
  641. return NOTIFY_DONE;
  642. }
  643. BUG_ON(dev->ml_priv);
  644. dev->ml_priv = d;
  645. break;
  646. case NETDEV_UNREGISTER:
  647. spin_lock(&can_rcvlists_lock);
  648. d = dev->ml_priv;
  649. if (d) {
  650. if (d->entries)
  651. d->remove_on_zero_entries = 1;
  652. else {
  653. kfree(d);
  654. dev->ml_priv = NULL;
  655. }
  656. } else
  657. printk(KERN_ERR "can: notifier: receive list not "
  658. "found for dev %s\n", dev->name);
  659. spin_unlock(&can_rcvlists_lock);
  660. break;
  661. }
  662. return NOTIFY_DONE;
  663. }
  664. /*
  665. * af_can module init/exit functions
  666. */
  667. static struct packet_type can_packet __read_mostly = {
  668. .type = cpu_to_be16(ETH_P_CAN),
  669. .dev = NULL,
  670. .func = can_rcv,
  671. };
  672. static const struct net_proto_family can_family_ops = {
  673. .family = PF_CAN,
  674. .create = can_create,
  675. .owner = THIS_MODULE,
  676. };
  677. /* notifier block for netdevice event */
  678. static struct notifier_block can_netdev_notifier __read_mostly = {
  679. .notifier_call = can_notifier,
  680. };
  681. static __init int can_init(void)
  682. {
  683. printk(banner);
  684. memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list));
  685. rcv_cache = kmem_cache_create("can_receiver", sizeof(struct receiver),
  686. 0, 0, NULL);
  687. if (!rcv_cache)
  688. return -ENOMEM;
  689. if (stats_timer) {
  690. /* the statistics are updated every second (timer triggered) */
  691. setup_timer(&can_stattimer, can_stat_update, 0);
  692. mod_timer(&can_stattimer, round_jiffies(jiffies + HZ));
  693. } else
  694. can_stattimer.function = NULL;
  695. can_init_proc();
  696. /* protocol register */
  697. sock_register(&can_family_ops);
  698. register_netdevice_notifier(&can_netdev_notifier);
  699. dev_add_pack(&can_packet);
  700. return 0;
  701. }
  702. static __exit void can_exit(void)
  703. {
  704. struct net_device *dev;
  705. if (stats_timer)
  706. del_timer(&can_stattimer);
  707. can_remove_proc();
  708. /* protocol unregister */
  709. dev_remove_pack(&can_packet);
  710. unregister_netdevice_notifier(&can_netdev_notifier);
  711. sock_unregister(PF_CAN);
  712. /* remove created dev_rcv_lists from still registered CAN devices */
  713. rcu_read_lock();
  714. for_each_netdev_rcu(&init_net, dev) {
  715. if (dev->type == ARPHRD_CAN && dev->ml_priv){
  716. struct dev_rcv_lists *d = dev->ml_priv;
  717. BUG_ON(d->entries);
  718. kfree(d);
  719. dev->ml_priv = NULL;
  720. }
  721. }
  722. rcu_read_unlock();
  723. rcu_barrier(); /* Wait for completion of call_rcu()'s */
  724. kmem_cache_destroy(rcv_cache);
  725. }
  726. module_init(can_init);
  727. module_exit(can_exit);