PageRenderTime 1247ms CodeModel.GetById 11ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/can/bfin_can.c

https://github.com/Mengqi/linux-2.6
C | 693 lines | 506 code | 117 blank | 70 comment | 75 complexity | 78e4c9903eab492ce022c1dcea6bdeea MD5 | raw file
  1. /*
  2. * Blackfin On-Chip CAN Driver
  3. *
  4. * Copyright 2004-2009 Analog Devices Inc.
  5. *
  6. * Enter bugs at http://blackfin.uclinux.org/
  7. *
  8. * Licensed under the GPL-2 or later.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/bitops.h>
  14. #include <linux/interrupt.h>
  15. #include <linux/errno.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/skbuff.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/can/dev.h>
  20. #include <linux/can/error.h>
  21. #include <asm/bfin_can.h>
  22. #include <asm/portmux.h>
  23. #define DRV_NAME "bfin_can"
  24. #define BFIN_CAN_TIMEOUT 100
  25. #define TX_ECHO_SKB_MAX 1
  26. /*
  27. * bfin can private data
  28. */
  29. struct bfin_can_priv {
  30. struct can_priv can; /* must be the first member */
  31. struct net_device *dev;
  32. void __iomem *membase;
  33. int rx_irq;
  34. int tx_irq;
  35. int err_irq;
  36. unsigned short *pin_list;
  37. };
  38. /*
  39. * bfin can timing parameters
  40. */
  41. static struct can_bittiming_const bfin_can_bittiming_const = {
  42. .name = DRV_NAME,
  43. .tseg1_min = 1,
  44. .tseg1_max = 16,
  45. .tseg2_min = 1,
  46. .tseg2_max = 8,
  47. .sjw_max = 4,
  48. /*
  49. * Although the BRP field can be set to any value, it is recommended
  50. * that the value be greater than or equal to 4, as restrictions
  51. * apply to the bit timing configuration when BRP is less than 4.
  52. */
  53. .brp_min = 4,
  54. .brp_max = 1024,
  55. .brp_inc = 1,
  56. };
  57. static int bfin_can_set_bittiming(struct net_device *dev)
  58. {
  59. struct bfin_can_priv *priv = netdev_priv(dev);
  60. struct bfin_can_regs __iomem *reg = priv->membase;
  61. struct can_bittiming *bt = &priv->can.bittiming;
  62. u16 clk, timing;
  63. clk = bt->brp - 1;
  64. timing = ((bt->sjw - 1) << 8) | (bt->prop_seg + bt->phase_seg1 - 1) |
  65. ((bt->phase_seg2 - 1) << 4);
  66. /*
  67. * If the SAM bit is set, the input signal is oversampled three times
  68. * at the SCLK rate.
  69. */
  70. if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
  71. timing |= SAM;
  72. bfin_write(&reg->clock, clk);
  73. bfin_write(&reg->timing, timing);
  74. dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
  75. clk, timing);
  76. return 0;
  77. }
  78. static void bfin_can_set_reset_mode(struct net_device *dev)
  79. {
  80. struct bfin_can_priv *priv = netdev_priv(dev);
  81. struct bfin_can_regs __iomem *reg = priv->membase;
  82. int timeout = BFIN_CAN_TIMEOUT;
  83. int i;
  84. /* disable interrupts */
  85. bfin_write(&reg->mbim1, 0);
  86. bfin_write(&reg->mbim2, 0);
  87. bfin_write(&reg->gim, 0);
  88. /* reset can and enter configuration mode */
  89. bfin_write(&reg->control, SRS | CCR);
  90. SSYNC();
  91. bfin_write(&reg->control, CCR);
  92. SSYNC();
  93. while (!(bfin_read(&reg->control) & CCA)) {
  94. udelay(10);
  95. if (--timeout == 0) {
  96. dev_err(dev->dev.parent,
  97. "fail to enter configuration mode\n");
  98. BUG();
  99. }
  100. }
  101. /*
  102. * All mailbox configurations are marked as inactive
  103. * by writing to CAN Mailbox Configuration Registers 1 and 2
  104. * For all bits: 0 - Mailbox disabled, 1 - Mailbox enabled
  105. */
  106. bfin_write(&reg->mc1, 0);
  107. bfin_write(&reg->mc2, 0);
  108. /* Set Mailbox Direction */
  109. bfin_write(&reg->md1, 0xFFFF); /* mailbox 1-16 are RX */
  110. bfin_write(&reg->md2, 0); /* mailbox 17-32 are TX */
  111. /* RECEIVE_STD_CHL */
  112. for (i = 0; i < 2; i++) {
  113. bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id0, 0);
  114. bfin_write(&reg->chl[RECEIVE_STD_CHL + i].id1, AME);
  115. bfin_write(&reg->chl[RECEIVE_STD_CHL + i].dlc, 0);
  116. bfin_write(&reg->msk[RECEIVE_STD_CHL + i].amh, 0x1FFF);
  117. bfin_write(&reg->msk[RECEIVE_STD_CHL + i].aml, 0xFFFF);
  118. }
  119. /* RECEIVE_EXT_CHL */
  120. for (i = 0; i < 2; i++) {
  121. bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id0, 0);
  122. bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].id1, AME | IDE);
  123. bfin_write(&reg->chl[RECEIVE_EXT_CHL + i].dlc, 0);
  124. bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].amh, 0x1FFF);
  125. bfin_write(&reg->msk[RECEIVE_EXT_CHL + i].aml, 0xFFFF);
  126. }
  127. bfin_write(&reg->mc2, BIT(TRANSMIT_CHL - 16));
  128. bfin_write(&reg->mc1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
  129. SSYNC();
  130. priv->can.state = CAN_STATE_STOPPED;
  131. }
  132. static void bfin_can_set_normal_mode(struct net_device *dev)
  133. {
  134. struct bfin_can_priv *priv = netdev_priv(dev);
  135. struct bfin_can_regs __iomem *reg = priv->membase;
  136. int timeout = BFIN_CAN_TIMEOUT;
  137. /*
  138. * leave configuration mode
  139. */
  140. bfin_write(&reg->control, bfin_read(&reg->control) & ~CCR);
  141. while (bfin_read(&reg->status) & CCA) {
  142. udelay(10);
  143. if (--timeout == 0) {
  144. dev_err(dev->dev.parent,
  145. "fail to leave configuration mode\n");
  146. BUG();
  147. }
  148. }
  149. /*
  150. * clear _All_ tx and rx interrupts
  151. */
  152. bfin_write(&reg->mbtif1, 0xFFFF);
  153. bfin_write(&reg->mbtif2, 0xFFFF);
  154. bfin_write(&reg->mbrif1, 0xFFFF);
  155. bfin_write(&reg->mbrif2, 0xFFFF);
  156. /*
  157. * clear global interrupt status register
  158. */
  159. bfin_write(&reg->gis, 0x7FF); /* overwrites with '1' */
  160. /*
  161. * Initialize Interrupts
  162. * - set bits in the mailbox interrupt mask register
  163. * - global interrupt mask
  164. */
  165. bfin_write(&reg->mbim1, BIT(RECEIVE_STD_CHL) + BIT(RECEIVE_EXT_CHL));
  166. bfin_write(&reg->mbim2, BIT(TRANSMIT_CHL - 16));
  167. bfin_write(&reg->gim, EPIM | BOIM | RMLIM);
  168. SSYNC();
  169. }
  170. static void bfin_can_start(struct net_device *dev)
  171. {
  172. struct bfin_can_priv *priv = netdev_priv(dev);
  173. /* enter reset mode */
  174. if (priv->can.state != CAN_STATE_STOPPED)
  175. bfin_can_set_reset_mode(dev);
  176. /* leave reset mode */
  177. bfin_can_set_normal_mode(dev);
  178. }
  179. static int bfin_can_set_mode(struct net_device *dev, enum can_mode mode)
  180. {
  181. switch (mode) {
  182. case CAN_MODE_START:
  183. bfin_can_start(dev);
  184. if (netif_queue_stopped(dev))
  185. netif_wake_queue(dev);
  186. break;
  187. default:
  188. return -EOPNOTSUPP;
  189. }
  190. return 0;
  191. }
  192. static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
  193. {
  194. struct bfin_can_priv *priv = netdev_priv(dev);
  195. struct bfin_can_regs __iomem *reg = priv->membase;
  196. struct can_frame *cf = (struct can_frame *)skb->data;
  197. u8 dlc = cf->can_dlc;
  198. canid_t id = cf->can_id;
  199. u8 *data = cf->data;
  200. u16 val;
  201. int i;
  202. if (can_dropped_invalid_skb(dev, skb))
  203. return NETDEV_TX_OK;
  204. netif_stop_queue(dev);
  205. /* fill id */
  206. if (id & CAN_EFF_FLAG) {
  207. bfin_write(&reg->chl[TRANSMIT_CHL].id0, id);
  208. val = ((id & 0x1FFF0000) >> 16) | IDE;
  209. } else
  210. val = (id << 2);
  211. if (id & CAN_RTR_FLAG)
  212. val |= RTR;
  213. bfin_write(&reg->chl[TRANSMIT_CHL].id1, val | AME);
  214. /* fill payload */
  215. for (i = 0; i < 8; i += 2) {
  216. val = ((7 - i) < dlc ? (data[7 - i]) : 0) +
  217. ((6 - i) < dlc ? (data[6 - i] << 8) : 0);
  218. bfin_write(&reg->chl[TRANSMIT_CHL].data[i], val);
  219. }
  220. /* fill data length code */
  221. bfin_write(&reg->chl[TRANSMIT_CHL].dlc, dlc);
  222. can_put_echo_skb(skb, dev, 0);
  223. /* set transmit request */
  224. bfin_write(&reg->trs2, BIT(TRANSMIT_CHL - 16));
  225. return 0;
  226. }
  227. static void bfin_can_rx(struct net_device *dev, u16 isrc)
  228. {
  229. struct bfin_can_priv *priv = netdev_priv(dev);
  230. struct net_device_stats *stats = &dev->stats;
  231. struct bfin_can_regs __iomem *reg = priv->membase;
  232. struct can_frame *cf;
  233. struct sk_buff *skb;
  234. int obj;
  235. int i;
  236. u16 val;
  237. skb = alloc_can_skb(dev, &cf);
  238. if (skb == NULL)
  239. return;
  240. /* get id */
  241. if (isrc & BIT(RECEIVE_EXT_CHL)) {
  242. /* extended frame format (EFF) */
  243. cf->can_id = ((bfin_read(&reg->chl[RECEIVE_EXT_CHL].id1)
  244. & 0x1FFF) << 16)
  245. + bfin_read(&reg->chl[RECEIVE_EXT_CHL].id0);
  246. cf->can_id |= CAN_EFF_FLAG;
  247. obj = RECEIVE_EXT_CHL;
  248. } else {
  249. /* standard frame format (SFF) */
  250. cf->can_id = (bfin_read(&reg->chl[RECEIVE_STD_CHL].id1)
  251. & 0x1ffc) >> 2;
  252. obj = RECEIVE_STD_CHL;
  253. }
  254. if (bfin_read(&reg->chl[obj].id1) & RTR)
  255. cf->can_id |= CAN_RTR_FLAG;
  256. /* get data length code */
  257. cf->can_dlc = get_can_dlc(bfin_read(&reg->chl[obj].dlc) & 0xF);
  258. /* get payload */
  259. for (i = 0; i < 8; i += 2) {
  260. val = bfin_read(&reg->chl[obj].data[i]);
  261. cf->data[7 - i] = (7 - i) < cf->can_dlc ? val : 0;
  262. cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
  263. }
  264. netif_rx(skb);
  265. stats->rx_packets++;
  266. stats->rx_bytes += cf->can_dlc;
  267. }
  268. static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
  269. {
  270. struct bfin_can_priv *priv = netdev_priv(dev);
  271. struct bfin_can_regs __iomem *reg = priv->membase;
  272. struct net_device_stats *stats = &dev->stats;
  273. struct can_frame *cf;
  274. struct sk_buff *skb;
  275. enum can_state state = priv->can.state;
  276. skb = alloc_can_err_skb(dev, &cf);
  277. if (skb == NULL)
  278. return -ENOMEM;
  279. if (isrc & RMLIS) {
  280. /* data overrun interrupt */
  281. dev_dbg(dev->dev.parent, "data overrun interrupt\n");
  282. cf->can_id |= CAN_ERR_CRTL;
  283. cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
  284. stats->rx_over_errors++;
  285. stats->rx_errors++;
  286. }
  287. if (isrc & BOIS) {
  288. dev_dbg(dev->dev.parent, "bus-off mode interrupt\n");
  289. state = CAN_STATE_BUS_OFF;
  290. cf->can_id |= CAN_ERR_BUSOFF;
  291. can_bus_off(dev);
  292. }
  293. if (isrc & EPIS) {
  294. /* error passive interrupt */
  295. dev_dbg(dev->dev.parent, "error passive interrupt\n");
  296. state = CAN_STATE_ERROR_PASSIVE;
  297. }
  298. if ((isrc & EWTIS) || (isrc & EWRIS)) {
  299. dev_dbg(dev->dev.parent,
  300. "Error Warning Transmit/Receive Interrupt\n");
  301. state = CAN_STATE_ERROR_WARNING;
  302. }
  303. if (state != priv->can.state && (state == CAN_STATE_ERROR_WARNING ||
  304. state == CAN_STATE_ERROR_PASSIVE)) {
  305. u16 cec = bfin_read(&reg->cec);
  306. u8 rxerr = cec;
  307. u8 txerr = cec >> 8;
  308. cf->can_id |= CAN_ERR_CRTL;
  309. if (state == CAN_STATE_ERROR_WARNING) {
  310. priv->can.can_stats.error_warning++;
  311. cf->data[1] = (txerr > rxerr) ?
  312. CAN_ERR_CRTL_TX_WARNING :
  313. CAN_ERR_CRTL_RX_WARNING;
  314. } else {
  315. priv->can.can_stats.error_passive++;
  316. cf->data[1] = (txerr > rxerr) ?
  317. CAN_ERR_CRTL_TX_PASSIVE :
  318. CAN_ERR_CRTL_RX_PASSIVE;
  319. }
  320. }
  321. if (status) {
  322. priv->can.can_stats.bus_error++;
  323. cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
  324. if (status & BEF)
  325. cf->data[2] |= CAN_ERR_PROT_BIT;
  326. else if (status & FER)
  327. cf->data[2] |= CAN_ERR_PROT_FORM;
  328. else if (status & SER)
  329. cf->data[2] |= CAN_ERR_PROT_STUFF;
  330. else
  331. cf->data[2] |= CAN_ERR_PROT_UNSPEC;
  332. }
  333. priv->can.state = state;
  334. netif_rx(skb);
  335. stats->rx_packets++;
  336. stats->rx_bytes += cf->can_dlc;
  337. return 0;
  338. }
  339. irqreturn_t bfin_can_interrupt(int irq, void *dev_id)
  340. {
  341. struct net_device *dev = dev_id;
  342. struct bfin_can_priv *priv = netdev_priv(dev);
  343. struct bfin_can_regs __iomem *reg = priv->membase;
  344. struct net_device_stats *stats = &dev->stats;
  345. u16 status, isrc;
  346. if ((irq == priv->tx_irq) && bfin_read(&reg->mbtif2)) {
  347. /* transmission complete interrupt */
  348. bfin_write(&reg->mbtif2, 0xFFFF);
  349. stats->tx_packets++;
  350. stats->tx_bytes += bfin_read(&reg->chl[TRANSMIT_CHL].dlc);
  351. can_get_echo_skb(dev, 0);
  352. netif_wake_queue(dev);
  353. } else if ((irq == priv->rx_irq) && bfin_read(&reg->mbrif1)) {
  354. /* receive interrupt */
  355. isrc = bfin_read(&reg->mbrif1);
  356. bfin_write(&reg->mbrif1, 0xFFFF);
  357. bfin_can_rx(dev, isrc);
  358. } else if ((irq == priv->err_irq) && bfin_read(&reg->gis)) {
  359. /* error interrupt */
  360. isrc = bfin_read(&reg->gis);
  361. status = bfin_read(&reg->esr);
  362. bfin_write(&reg->gis, 0x7FF);
  363. bfin_can_err(dev, isrc, status);
  364. } else {
  365. return IRQ_NONE;
  366. }
  367. return IRQ_HANDLED;
  368. }
  369. static int bfin_can_open(struct net_device *dev)
  370. {
  371. struct bfin_can_priv *priv = netdev_priv(dev);
  372. int err;
  373. /* set chip into reset mode */
  374. bfin_can_set_reset_mode(dev);
  375. /* common open */
  376. err = open_candev(dev);
  377. if (err)
  378. goto exit_open;
  379. /* register interrupt handler */
  380. err = request_irq(priv->rx_irq, &bfin_can_interrupt, 0,
  381. "bfin-can-rx", dev);
  382. if (err)
  383. goto exit_rx_irq;
  384. err = request_irq(priv->tx_irq, &bfin_can_interrupt, 0,
  385. "bfin-can-tx", dev);
  386. if (err)
  387. goto exit_tx_irq;
  388. err = request_irq(priv->err_irq, &bfin_can_interrupt, 0,
  389. "bfin-can-err", dev);
  390. if (err)
  391. goto exit_err_irq;
  392. bfin_can_start(dev);
  393. netif_start_queue(dev);
  394. return 0;
  395. exit_err_irq:
  396. free_irq(priv->tx_irq, dev);
  397. exit_tx_irq:
  398. free_irq(priv->rx_irq, dev);
  399. exit_rx_irq:
  400. close_candev(dev);
  401. exit_open:
  402. return err;
  403. }
  404. static int bfin_can_close(struct net_device *dev)
  405. {
  406. struct bfin_can_priv *priv = netdev_priv(dev);
  407. netif_stop_queue(dev);
  408. bfin_can_set_reset_mode(dev);
  409. close_candev(dev);
  410. free_irq(priv->rx_irq, dev);
  411. free_irq(priv->tx_irq, dev);
  412. free_irq(priv->err_irq, dev);
  413. return 0;
  414. }
  415. struct net_device *alloc_bfin_candev(void)
  416. {
  417. struct net_device *dev;
  418. struct bfin_can_priv *priv;
  419. dev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX);
  420. if (!dev)
  421. return NULL;
  422. priv = netdev_priv(dev);
  423. priv->dev = dev;
  424. priv->can.bittiming_const = &bfin_can_bittiming_const;
  425. priv->can.do_set_bittiming = bfin_can_set_bittiming;
  426. priv->can.do_set_mode = bfin_can_set_mode;
  427. priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
  428. return dev;
  429. }
  430. static const struct net_device_ops bfin_can_netdev_ops = {
  431. .ndo_open = bfin_can_open,
  432. .ndo_stop = bfin_can_close,
  433. .ndo_start_xmit = bfin_can_start_xmit,
  434. };
  435. static int __devinit bfin_can_probe(struct platform_device *pdev)
  436. {
  437. int err;
  438. struct net_device *dev;
  439. struct bfin_can_priv *priv;
  440. struct resource *res_mem, *rx_irq, *tx_irq, *err_irq;
  441. unsigned short *pdata;
  442. pdata = pdev->dev.platform_data;
  443. if (!pdata) {
  444. dev_err(&pdev->dev, "No platform data provided!\n");
  445. err = -EINVAL;
  446. goto exit;
  447. }
  448. res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  449. rx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  450. tx_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
  451. err_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
  452. if (!res_mem || !rx_irq || !tx_irq || !err_irq) {
  453. err = -EINVAL;
  454. goto exit;
  455. }
  456. if (!request_mem_region(res_mem->start, resource_size(res_mem),
  457. dev_name(&pdev->dev))) {
  458. err = -EBUSY;
  459. goto exit;
  460. }
  461. /* request peripheral pins */
  462. err = peripheral_request_list(pdata, dev_name(&pdev->dev));
  463. if (err)
  464. goto exit_mem_release;
  465. dev = alloc_bfin_candev();
  466. if (!dev) {
  467. err = -ENOMEM;
  468. goto exit_peri_pin_free;
  469. }
  470. priv = netdev_priv(dev);
  471. priv->membase = (void __iomem *)res_mem->start;
  472. priv->rx_irq = rx_irq->start;
  473. priv->tx_irq = tx_irq->start;
  474. priv->err_irq = err_irq->start;
  475. priv->pin_list = pdata;
  476. priv->can.clock.freq = get_sclk();
  477. dev_set_drvdata(&pdev->dev, dev);
  478. SET_NETDEV_DEV(dev, &pdev->dev);
  479. dev->flags |= IFF_ECHO; /* we support local echo */
  480. dev->netdev_ops = &bfin_can_netdev_ops;
  481. bfin_can_set_reset_mode(dev);
  482. err = register_candev(dev);
  483. if (err) {
  484. dev_err(&pdev->dev, "registering failed (err=%d)\n", err);
  485. goto exit_candev_free;
  486. }
  487. dev_info(&pdev->dev,
  488. "%s device registered"
  489. "(&reg_base=%p, rx_irq=%d, tx_irq=%d, err_irq=%d, sclk=%d)\n",
  490. DRV_NAME, (void *)priv->membase, priv->rx_irq,
  491. priv->tx_irq, priv->err_irq, priv->can.clock.freq);
  492. return 0;
  493. exit_candev_free:
  494. free_candev(dev);
  495. exit_peri_pin_free:
  496. peripheral_free_list(pdata);
  497. exit_mem_release:
  498. release_mem_region(res_mem->start, resource_size(res_mem));
  499. exit:
  500. return err;
  501. }
  502. static int __devexit bfin_can_remove(struct platform_device *pdev)
  503. {
  504. struct net_device *dev = dev_get_drvdata(&pdev->dev);
  505. struct bfin_can_priv *priv = netdev_priv(dev);
  506. struct resource *res;
  507. bfin_can_set_reset_mode(dev);
  508. unregister_candev(dev);
  509. dev_set_drvdata(&pdev->dev, NULL);
  510. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  511. release_mem_region(res->start, resource_size(res));
  512. peripheral_free_list(priv->pin_list);
  513. free_candev(dev);
  514. return 0;
  515. }
  516. #ifdef CONFIG_PM
  517. static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
  518. {
  519. struct net_device *dev = dev_get_drvdata(&pdev->dev);
  520. struct bfin_can_priv *priv = netdev_priv(dev);
  521. struct bfin_can_regs __iomem *reg = priv->membase;
  522. int timeout = BFIN_CAN_TIMEOUT;
  523. if (netif_running(dev)) {
  524. /* enter sleep mode */
  525. bfin_write(&reg->control, bfin_read(&reg->control) | SMR);
  526. SSYNC();
  527. while (!(bfin_read(&reg->intr) & SMACK)) {
  528. udelay(10);
  529. if (--timeout == 0) {
  530. dev_err(dev->dev.parent,
  531. "fail to enter sleep mode\n");
  532. BUG();
  533. }
  534. }
  535. }
  536. return 0;
  537. }
  538. static int bfin_can_resume(struct platform_device *pdev)
  539. {
  540. struct net_device *dev = dev_get_drvdata(&pdev->dev);
  541. struct bfin_can_priv *priv = netdev_priv(dev);
  542. struct bfin_can_regs __iomem *reg = priv->membase;
  543. if (netif_running(dev)) {
  544. /* leave sleep mode */
  545. bfin_write(&reg->intr, 0);
  546. SSYNC();
  547. }
  548. return 0;
  549. }
  550. #else
  551. #define bfin_can_suspend NULL
  552. #define bfin_can_resume NULL
  553. #endif /* CONFIG_PM */
  554. static struct platform_driver bfin_can_driver = {
  555. .probe = bfin_can_probe,
  556. .remove = __devexit_p(bfin_can_remove),
  557. .suspend = bfin_can_suspend,
  558. .resume = bfin_can_resume,
  559. .driver = {
  560. .name = DRV_NAME,
  561. .owner = THIS_MODULE,
  562. },
  563. };
  564. static int __init bfin_can_init(void)
  565. {
  566. return platform_driver_register(&bfin_can_driver);
  567. }
  568. module_init(bfin_can_init);
  569. static void __exit bfin_can_exit(void)
  570. {
  571. platform_driver_unregister(&bfin_can_driver);
  572. }
  573. module_exit(bfin_can_exit);
  574. MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
  575. MODULE_LICENSE("GPL");
  576. MODULE_DESCRIPTION("Blackfin on-chip CAN netdevice driver");