/kern_oII/drivers/net/a2065.c

http://omnia2droid.googlecode.com/ · C · 808 lines · 565 code · 146 blank · 97 comment · 73 complexity · da4d63d57e79c31f7138c57a15d297a6 MD5 · raw file

  1. /*
  2. * Amiga Linux/68k A2065 Ethernet Driver
  3. *
  4. * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
  5. *
  6. * Fixes and tips by:
  7. * - Janos Farkas (CHEXUM@sparta.banki.hu)
  8. * - Jes Degn Soerensen (jds@kom.auc.dk)
  9. * - Matt Domsch (Matt_Domsch@dell.com)
  10. *
  11. * ----------------------------------------------------------------------------
  12. *
  13. * This program is based on
  14. *
  15. * ariadne.?: Amiga Linux/68k Ariadne Ethernet Driver
  16. * (C) Copyright 1995 by Geert Uytterhoeven,
  17. * Peter De Schrijver
  18. *
  19. * lance.c: An AMD LANCE ethernet driver for linux.
  20. * Written 1993-94 by Donald Becker.
  21. *
  22. * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
  23. * Advanced Micro Devices
  24. * Publication #16907, Rev. B, Amendment/0, May 1994
  25. *
  26. * ----------------------------------------------------------------------------
  27. *
  28. * This file is subject to the terms and conditions of the GNU General Public
  29. * License. See the file COPYING in the main directory of the Linux
  30. * distribution for more details.
  31. *
  32. * ----------------------------------------------------------------------------
  33. *
  34. * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
  35. *
  36. * - an Am7990 Local Area Network Controller for Ethernet (LANCE) with
  37. * both 10BASE-2 (thin coax) and AUI (DB-15) connectors
  38. */
  39. #include <linux/errno.h>
  40. #include <linux/netdevice.h>
  41. #include <linux/etherdevice.h>
  42. #include <linux/module.h>
  43. #include <linux/stddef.h>
  44. #include <linux/kernel.h>
  45. #include <linux/interrupt.h>
  46. #include <linux/ioport.h>
  47. #include <linux/skbuff.h>
  48. #include <linux/slab.h>
  49. #include <linux/string.h>
  50. #include <linux/init.h>
  51. #include <linux/crc32.h>
  52. #include <linux/zorro.h>
  53. #include <linux/bitops.h>
  54. #include <asm/irq.h>
  55. #include <asm/amigaints.h>
  56. #include <asm/amigahw.h>
  57. #include "a2065.h"
  58. /*
  59. * Transmit/Receive Ring Definitions
  60. */
  61. #define LANCE_LOG_TX_BUFFERS (2)
  62. #define LANCE_LOG_RX_BUFFERS (4)
  63. #define TX_RING_SIZE (1<<LANCE_LOG_TX_BUFFERS)
  64. #define RX_RING_SIZE (1<<LANCE_LOG_RX_BUFFERS)
  65. #define TX_RING_MOD_MASK (TX_RING_SIZE-1)
  66. #define RX_RING_MOD_MASK (RX_RING_SIZE-1)
  67. #define PKT_BUF_SIZE (1544)
  68. #define RX_BUFF_SIZE PKT_BUF_SIZE
  69. #define TX_BUFF_SIZE PKT_BUF_SIZE
  70. /*
  71. * Layout of the Lance's RAM Buffer
  72. */
  73. struct lance_init_block {
  74. unsigned short mode; /* Pre-set mode (reg. 15) */
  75. unsigned char phys_addr[6]; /* Physical ethernet address */
  76. unsigned filter[2]; /* Multicast filter. */
  77. /* Receive and transmit ring base, along with extra bits. */
  78. unsigned short rx_ptr; /* receive descriptor addr */
  79. unsigned short rx_len; /* receive len and high addr */
  80. unsigned short tx_ptr; /* transmit descriptor addr */
  81. unsigned short tx_len; /* transmit len and high addr */
  82. /* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
  83. struct lance_rx_desc brx_ring[RX_RING_SIZE];
  84. struct lance_tx_desc btx_ring[TX_RING_SIZE];
  85. char rx_buf [RX_RING_SIZE][RX_BUFF_SIZE];
  86. char tx_buf [TX_RING_SIZE][TX_BUFF_SIZE];
  87. };
  88. /*
  89. * Private Device Data
  90. */
  91. struct lance_private {
  92. char *name;
  93. volatile struct lance_regs *ll;
  94. volatile struct lance_init_block *init_block; /* Hosts view */
  95. volatile struct lance_init_block *lance_init_block; /* Lance view */
  96. int rx_new, tx_new;
  97. int rx_old, tx_old;
  98. int lance_log_rx_bufs, lance_log_tx_bufs;
  99. int rx_ring_mod_mask, tx_ring_mod_mask;
  100. int tpe; /* cable-selection is TPE */
  101. int auto_select; /* cable-selection by carrier */
  102. unsigned short busmaster_regval;
  103. #ifdef CONFIG_SUNLANCE
  104. struct Linux_SBus_DMA *ledma; /* if set this points to ledma and arch=4m */
  105. int burst_sizes; /* ledma SBus burst sizes */
  106. #endif
  107. struct timer_list multicast_timer;
  108. };
  109. #define TX_BUFFS_AVAIL ((lp->tx_old<=lp->tx_new)?\
  110. lp->tx_old+lp->tx_ring_mod_mask-lp->tx_new:\
  111. lp->tx_old - lp->tx_new-1)
  112. #define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
  113. /* Load the CSR registers */
  114. static void load_csrs (struct lance_private *lp)
  115. {
  116. volatile struct lance_regs *ll = lp->ll;
  117. volatile struct lance_init_block *aib = lp->lance_init_block;
  118. int leptr;
  119. leptr = LANCE_ADDR (aib);
  120. ll->rap = LE_CSR1;
  121. ll->rdp = (leptr & 0xFFFF);
  122. ll->rap = LE_CSR2;
  123. ll->rdp = leptr >> 16;
  124. ll->rap = LE_CSR3;
  125. ll->rdp = lp->busmaster_regval;
  126. /* Point back to csr0 */
  127. ll->rap = LE_CSR0;
  128. }
  129. #define ZERO 0
  130. /* Setup the Lance Rx and Tx rings */
  131. static void lance_init_ring (struct net_device *dev)
  132. {
  133. struct lance_private *lp = netdev_priv(dev);
  134. volatile struct lance_init_block *ib = lp->init_block;
  135. volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
  136. int leptr;
  137. int i;
  138. aib = lp->lance_init_block;
  139. /* Lock out other processes while setting up hardware */
  140. netif_stop_queue(dev);
  141. lp->rx_new = lp->tx_new = 0;
  142. lp->rx_old = lp->tx_old = 0;
  143. ib->mode = 0;
  144. /* Copy the ethernet address to the lance init block
  145. * Note that on the sparc you need to swap the ethernet address.
  146. */
  147. ib->phys_addr [0] = dev->dev_addr [1];
  148. ib->phys_addr [1] = dev->dev_addr [0];
  149. ib->phys_addr [2] = dev->dev_addr [3];
  150. ib->phys_addr [3] = dev->dev_addr [2];
  151. ib->phys_addr [4] = dev->dev_addr [5];
  152. ib->phys_addr [5] = dev->dev_addr [4];
  153. if (ZERO)
  154. printk(KERN_DEBUG "TX rings:\n");
  155. /* Setup the Tx ring entries */
  156. for (i = 0; i <= (1<<lp->lance_log_tx_bufs); i++) {
  157. leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
  158. ib->btx_ring [i].tmd0 = leptr;
  159. ib->btx_ring [i].tmd1_hadr = leptr >> 16;
  160. ib->btx_ring [i].tmd1_bits = 0;
  161. ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */
  162. ib->btx_ring [i].misc = 0;
  163. if (i < 3 && ZERO)
  164. printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr);
  165. }
  166. /* Setup the Rx ring entries */
  167. if (ZERO)
  168. printk(KERN_DEBUG "RX rings:\n");
  169. for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) {
  170. leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
  171. ib->brx_ring [i].rmd0 = leptr;
  172. ib->brx_ring [i].rmd1_hadr = leptr >> 16;
  173. ib->brx_ring [i].rmd1_bits = LE_R1_OWN;
  174. ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000;
  175. ib->brx_ring [i].mblength = 0;
  176. if (i < 3 && ZERO)
  177. printk(KERN_DEBUG "%d: 0x%8.8x\n", i, leptr);
  178. }
  179. /* Setup the initialization block */
  180. /* Setup rx descriptor pointer */
  181. leptr = LANCE_ADDR(&aib->brx_ring);
  182. ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
  183. ib->rx_ptr = leptr;
  184. if (ZERO)
  185. printk(KERN_DEBUG "RX ptr: %8.8x\n", leptr);
  186. /* Setup tx descriptor pointer */
  187. leptr = LANCE_ADDR(&aib->btx_ring);
  188. ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
  189. ib->tx_ptr = leptr;
  190. if (ZERO)
  191. printk(KERN_DEBUG "TX ptr: %8.8x\n", leptr);
  192. /* Clear the multicast filter */
  193. ib->filter [0] = 0;
  194. ib->filter [1] = 0;
  195. }
  196. static int init_restart_lance (struct lance_private *lp)
  197. {
  198. volatile struct lance_regs *ll = lp->ll;
  199. int i;
  200. ll->rap = LE_CSR0;
  201. ll->rdp = LE_C0_INIT;
  202. /* Wait for the lance to complete initialization */
  203. for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
  204. barrier();
  205. if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
  206. printk(KERN_ERR "LANCE unopened after %d ticks, csr0=%4.4x.\n",
  207. i, ll->rdp);
  208. return -EIO;
  209. }
  210. /* Clear IDON by writing a "1", enable interrupts and start lance */
  211. ll->rdp = LE_C0_IDON;
  212. ll->rdp = LE_C0_INEA | LE_C0_STRT;
  213. return 0;
  214. }
  215. static int lance_rx (struct net_device *dev)
  216. {
  217. struct lance_private *lp = netdev_priv(dev);
  218. volatile struct lance_init_block *ib = lp->init_block;
  219. volatile struct lance_regs *ll = lp->ll;
  220. volatile struct lance_rx_desc *rd;
  221. unsigned char bits;
  222. #ifdef TEST_HITS
  223. int i;
  224. printk(KERN_DEBUG "[");
  225. for (i = 0; i < RX_RING_SIZE; i++) {
  226. if (i == lp->rx_new)
  227. printk ("%s",
  228. ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X");
  229. else
  230. printk ("%s",
  231. ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1");
  232. }
  233. printk ("]\n");
  234. #endif
  235. ll->rdp = LE_C0_RINT|LE_C0_INEA;
  236. for (rd = &ib->brx_ring [lp->rx_new];
  237. !((bits = rd->rmd1_bits) & LE_R1_OWN);
  238. rd = &ib->brx_ring [lp->rx_new]) {
  239. /* We got an incomplete frame? */
  240. if ((bits & LE_R1_POK) != LE_R1_POK) {
  241. dev->stats.rx_over_errors++;
  242. dev->stats.rx_errors++;
  243. continue;
  244. } else if (bits & LE_R1_ERR) {
  245. /* Count only the end frame as a rx error,
  246. * not the beginning
  247. */
  248. if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++;
  249. if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++;
  250. if (bits & LE_R1_OFL) dev->stats.rx_over_errors++;
  251. if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++;
  252. if (bits & LE_R1_EOP) dev->stats.rx_errors++;
  253. } else {
  254. int len = (rd->mblength & 0xfff) - 4;
  255. struct sk_buff *skb = dev_alloc_skb (len+2);
  256. if (!skb) {
  257. printk(KERN_WARNING "%s: Memory squeeze, "
  258. "deferring packet.\n", dev->name);
  259. dev->stats.rx_dropped++;
  260. rd->mblength = 0;
  261. rd->rmd1_bits = LE_R1_OWN;
  262. lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
  263. return 0;
  264. }
  265. skb_reserve (skb, 2); /* 16 byte align */
  266. skb_put (skb, len); /* make room */
  267. skb_copy_to_linear_data(skb,
  268. (unsigned char *)&(ib->rx_buf [lp->rx_new][0]),
  269. len);
  270. skb->protocol = eth_type_trans (skb, dev);
  271. netif_rx (skb);
  272. dev->stats.rx_packets++;
  273. dev->stats.rx_bytes += len;
  274. }
  275. /* Return the packet to the pool */
  276. rd->mblength = 0;
  277. rd->rmd1_bits = LE_R1_OWN;
  278. lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
  279. }
  280. return 0;
  281. }
  282. static int lance_tx (struct net_device *dev)
  283. {
  284. struct lance_private *lp = netdev_priv(dev);
  285. volatile struct lance_init_block *ib = lp->init_block;
  286. volatile struct lance_regs *ll = lp->ll;
  287. volatile struct lance_tx_desc *td;
  288. int i, j;
  289. int status;
  290. /* csr0 is 2f3 */
  291. ll->rdp = LE_C0_TINT | LE_C0_INEA;
  292. /* csr0 is 73 */
  293. j = lp->tx_old;
  294. for (i = j; i != lp->tx_new; i = j) {
  295. td = &ib->btx_ring [i];
  296. /* If we hit a packet not owned by us, stop */
  297. if (td->tmd1_bits & LE_T1_OWN)
  298. break;
  299. if (td->tmd1_bits & LE_T1_ERR) {
  300. status = td->misc;
  301. dev->stats.tx_errors++;
  302. if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++;
  303. if (status & LE_T3_LCOL) dev->stats.tx_window_errors++;
  304. if (status & LE_T3_CLOS) {
  305. dev->stats.tx_carrier_errors++;
  306. if (lp->auto_select) {
  307. lp->tpe = 1 - lp->tpe;
  308. printk(KERN_ERR "%s: Carrier Lost, "
  309. "trying %s\n", dev->name,
  310. lp->tpe?"TPE":"AUI");
  311. /* Stop the lance */
  312. ll->rap = LE_CSR0;
  313. ll->rdp = LE_C0_STOP;
  314. lance_init_ring (dev);
  315. load_csrs (lp);
  316. init_restart_lance (lp);
  317. return 0;
  318. }
  319. }
  320. /* buffer errors and underflows turn off the transmitter */
  321. /* Restart the adapter */
  322. if (status & (LE_T3_BUF|LE_T3_UFL)) {
  323. dev->stats.tx_fifo_errors++;
  324. printk(KERN_ERR "%s: Tx: ERR_BUF|ERR_UFL, "
  325. "restarting\n", dev->name);
  326. /* Stop the lance */
  327. ll->rap = LE_CSR0;
  328. ll->rdp = LE_C0_STOP;
  329. lance_init_ring (dev);
  330. load_csrs (lp);
  331. init_restart_lance (lp);
  332. return 0;
  333. }
  334. } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
  335. /*
  336. * So we don't count the packet more than once.
  337. */
  338. td->tmd1_bits &= ~(LE_T1_POK);
  339. /* One collision before packet was sent. */
  340. if (td->tmd1_bits & LE_T1_EONE)
  341. dev->stats.collisions++;
  342. /* More than one collision, be optimistic. */
  343. if (td->tmd1_bits & LE_T1_EMORE)
  344. dev->stats.collisions += 2;
  345. dev->stats.tx_packets++;
  346. }
  347. j = (j + 1) & lp->tx_ring_mod_mask;
  348. }
  349. lp->tx_old = j;
  350. ll->rdp = LE_C0_TINT | LE_C0_INEA;
  351. return 0;
  352. }
  353. static irqreturn_t lance_interrupt (int irq, void *dev_id)
  354. {
  355. struct net_device *dev;
  356. struct lance_private *lp;
  357. volatile struct lance_regs *ll;
  358. int csr0;
  359. dev = (struct net_device *) dev_id;
  360. lp = netdev_priv(dev);
  361. ll = lp->ll;
  362. ll->rap = LE_CSR0; /* LANCE Controller Status */
  363. csr0 = ll->rdp;
  364. if (!(csr0 & LE_C0_INTR)) /* Check if any interrupt has */
  365. return IRQ_NONE; /* been generated by the Lance. */
  366. /* Acknowledge all the interrupt sources ASAP */
  367. ll->rdp = csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|
  368. LE_C0_INIT);
  369. if ((csr0 & LE_C0_ERR)) {
  370. /* Clear the error condition */
  371. ll->rdp = LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA;
  372. }
  373. if (csr0 & LE_C0_RINT)
  374. lance_rx (dev);
  375. if (csr0 & LE_C0_TINT)
  376. lance_tx (dev);
  377. /* Log misc errors. */
  378. if (csr0 & LE_C0_BABL)
  379. dev->stats.tx_errors++; /* Tx babble. */
  380. if (csr0 & LE_C0_MISS)
  381. dev->stats.rx_errors++; /* Missed a Rx frame. */
  382. if (csr0 & LE_C0_MERR) {
  383. printk(KERN_ERR "%s: Bus master arbitration failure, status "
  384. "%4.4x.\n", dev->name, csr0);
  385. /* Restart the chip. */
  386. ll->rdp = LE_C0_STRT;
  387. }
  388. if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL > 0)
  389. netif_wake_queue(dev);
  390. ll->rap = LE_CSR0;
  391. ll->rdp = LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|
  392. LE_C0_IDON|LE_C0_INEA;
  393. return IRQ_HANDLED;
  394. }
  395. static int lance_open (struct net_device *dev)
  396. {
  397. struct lance_private *lp = netdev_priv(dev);
  398. volatile struct lance_regs *ll = lp->ll;
  399. int ret;
  400. /* Stop the Lance */
  401. ll->rap = LE_CSR0;
  402. ll->rdp = LE_C0_STOP;
  403. /* Install the Interrupt handler */
  404. ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED,
  405. dev->name, dev);
  406. if (ret) return ret;
  407. load_csrs (lp);
  408. lance_init_ring (dev);
  409. netif_start_queue(dev);
  410. return init_restart_lance (lp);
  411. }
  412. static int lance_close (struct net_device *dev)
  413. {
  414. struct lance_private *lp = netdev_priv(dev);
  415. volatile struct lance_regs *ll = lp->ll;
  416. netif_stop_queue(dev);
  417. del_timer_sync(&lp->multicast_timer);
  418. /* Stop the card */
  419. ll->rap = LE_CSR0;
  420. ll->rdp = LE_C0_STOP;
  421. free_irq(IRQ_AMIGA_PORTS, dev);
  422. return 0;
  423. }
  424. static inline int lance_reset (struct net_device *dev)
  425. {
  426. struct lance_private *lp = netdev_priv(dev);
  427. volatile struct lance_regs *ll = lp->ll;
  428. int status;
  429. /* Stop the lance */
  430. ll->rap = LE_CSR0;
  431. ll->rdp = LE_C0_STOP;
  432. load_csrs (lp);
  433. lance_init_ring (dev);
  434. dev->trans_start = jiffies;
  435. netif_start_queue(dev);
  436. status = init_restart_lance (lp);
  437. #ifdef DEBUG_DRIVER
  438. printk(KERN_DEBUG "Lance restart=%d\n", status);
  439. #endif
  440. return status;
  441. }
  442. static void lance_tx_timeout(struct net_device *dev)
  443. {
  444. struct lance_private *lp = netdev_priv(dev);
  445. volatile struct lance_regs *ll = lp->ll;
  446. printk(KERN_ERR "%s: transmit timed out, status %04x, reset\n",
  447. dev->name, ll->rdp);
  448. lance_reset(dev);
  449. netif_wake_queue(dev);
  450. }
  451. static int lance_start_xmit (struct sk_buff *skb, struct net_device *dev)
  452. {
  453. struct lance_private *lp = netdev_priv(dev);
  454. volatile struct lance_regs *ll = lp->ll;
  455. volatile struct lance_init_block *ib = lp->init_block;
  456. int entry, skblen;
  457. int status = 0;
  458. unsigned long flags;
  459. if (skb_padto(skb, ETH_ZLEN))
  460. return 0;
  461. skblen = max_t(unsigned, skb->len, ETH_ZLEN);
  462. local_irq_save(flags);
  463. if (!TX_BUFFS_AVAIL){
  464. local_irq_restore(flags);
  465. return NETDEV_TX_LOCKED;
  466. }
  467. #ifdef DEBUG_DRIVER
  468. /* dump the packet */
  469. print_hex_dump(KERN_DEBUG, "skb->data: ", DUMP_PREFIX_NONE,
  470. 16, 1, skb->data, 64, true);
  471. #endif
  472. entry = lp->tx_new & lp->tx_ring_mod_mask;
  473. ib->btx_ring [entry].length = (-skblen) | 0xf000;
  474. ib->btx_ring [entry].misc = 0;
  475. skb_copy_from_linear_data(skb, (void *)&ib->tx_buf [entry][0], skblen);
  476. /* Now, give the packet to the lance */
  477. ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
  478. lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
  479. dev->stats.tx_bytes += skblen;
  480. if (TX_BUFFS_AVAIL <= 0)
  481. netif_stop_queue(dev);
  482. /* Kick the lance: transmit now */
  483. ll->rdp = LE_C0_INEA | LE_C0_TDMD;
  484. dev->trans_start = jiffies;
  485. dev_kfree_skb (skb);
  486. local_irq_restore(flags);
  487. return status;
  488. }
  489. /* taken from the depca driver */
  490. static void lance_load_multicast (struct net_device *dev)
  491. {
  492. struct lance_private *lp = netdev_priv(dev);
  493. volatile struct lance_init_block *ib = lp->init_block;
  494. volatile u16 *mcast_table = (u16 *)&ib->filter;
  495. struct dev_mc_list *dmi=dev->mc_list;
  496. char *addrs;
  497. int i;
  498. u32 crc;
  499. /* set all multicast bits */
  500. if (dev->flags & IFF_ALLMULTI){
  501. ib->filter [0] = 0xffffffff;
  502. ib->filter [1] = 0xffffffff;
  503. return;
  504. }
  505. /* clear the multicast filter */
  506. ib->filter [0] = 0;
  507. ib->filter [1] = 0;
  508. /* Add addresses */
  509. for (i = 0; i < dev->mc_count; i++){
  510. addrs = dmi->dmi_addr;
  511. dmi = dmi->next;
  512. /* multicast address? */
  513. if (!(*addrs & 1))
  514. continue;
  515. crc = ether_crc_le(6, addrs);
  516. crc = crc >> 26;
  517. mcast_table [crc >> 4] |= 1 << (crc & 0xf);
  518. }
  519. return;
  520. }
  521. static void lance_set_multicast (struct net_device *dev)
  522. {
  523. struct lance_private *lp = netdev_priv(dev);
  524. volatile struct lance_init_block *ib = lp->init_block;
  525. volatile struct lance_regs *ll = lp->ll;
  526. if (!netif_running(dev))
  527. return;
  528. if (lp->tx_old != lp->tx_new) {
  529. mod_timer(&lp->multicast_timer, jiffies + 4);
  530. netif_wake_queue(dev);
  531. return;
  532. }
  533. netif_stop_queue(dev);
  534. ll->rap = LE_CSR0;
  535. ll->rdp = LE_C0_STOP;
  536. lance_init_ring (dev);
  537. if (dev->flags & IFF_PROMISC) {
  538. ib->mode |= LE_MO_PROM;
  539. } else {
  540. ib->mode &= ~LE_MO_PROM;
  541. lance_load_multicast (dev);
  542. }
  543. load_csrs (lp);
  544. init_restart_lance (lp);
  545. netif_wake_queue(dev);
  546. }
  547. static int __devinit a2065_init_one(struct zorro_dev *z,
  548. const struct zorro_device_id *ent);
  549. static void __devexit a2065_remove_one(struct zorro_dev *z);
  550. static struct zorro_device_id a2065_zorro_tbl[] __devinitdata = {
  551. { ZORRO_PROD_CBM_A2065_1 },
  552. { ZORRO_PROD_CBM_A2065_2 },
  553. { ZORRO_PROD_AMERISTAR_A2065 },
  554. { 0 }
  555. };
  556. static struct zorro_driver a2065_driver = {
  557. .name = "a2065",
  558. .id_table = a2065_zorro_tbl,
  559. .probe = a2065_init_one,
  560. .remove = __devexit_p(a2065_remove_one),
  561. };
  562. static const struct net_device_ops lance_netdev_ops = {
  563. .ndo_open = lance_open,
  564. .ndo_stop = lance_close,
  565. .ndo_start_xmit = lance_start_xmit,
  566. .ndo_tx_timeout = lance_tx_timeout,
  567. .ndo_set_multicast_list = lance_set_multicast,
  568. .ndo_validate_addr = eth_validate_addr,
  569. .ndo_change_mtu = eth_change_mtu,
  570. .ndo_set_mac_address = eth_mac_addr,
  571. };
  572. static int __devinit a2065_init_one(struct zorro_dev *z,
  573. const struct zorro_device_id *ent)
  574. {
  575. struct net_device *dev;
  576. struct lance_private *priv;
  577. unsigned long board, base_addr, mem_start;
  578. struct resource *r1, *r2;
  579. int err;
  580. board = z->resource.start;
  581. base_addr = board+A2065_LANCE;
  582. mem_start = board+A2065_RAM;
  583. r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
  584. "Am7990");
  585. if (!r1)
  586. return -EBUSY;
  587. r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
  588. if (!r2) {
  589. release_resource(r1);
  590. return -EBUSY;
  591. }
  592. dev = alloc_etherdev(sizeof(struct lance_private));
  593. if (dev == NULL) {
  594. release_resource(r1);
  595. release_resource(r2);
  596. return -ENOMEM;
  597. }
  598. priv = netdev_priv(dev);
  599. r1->name = dev->name;
  600. r2->name = dev->name;
  601. dev->dev_addr[0] = 0x00;
  602. if (z->id != ZORRO_PROD_AMERISTAR_A2065) { /* Commodore */
  603. dev->dev_addr[1] = 0x80;
  604. dev->dev_addr[2] = 0x10;
  605. } else { /* Ameristar */
  606. dev->dev_addr[1] = 0x00;
  607. dev->dev_addr[2] = 0x9f;
  608. }
  609. dev->dev_addr[3] = (z->rom.er_SerialNumber>>16) & 0xff;
  610. dev->dev_addr[4] = (z->rom.er_SerialNumber>>8) & 0xff;
  611. dev->dev_addr[5] = z->rom.er_SerialNumber & 0xff;
  612. dev->base_addr = ZTWO_VADDR(base_addr);
  613. dev->mem_start = ZTWO_VADDR(mem_start);
  614. dev->mem_end = dev->mem_start+A2065_RAM_SIZE;
  615. priv->ll = (volatile struct lance_regs *)dev->base_addr;
  616. priv->init_block = (struct lance_init_block *)dev->mem_start;
  617. priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
  618. priv->auto_select = 0;
  619. priv->busmaster_regval = LE_C3_BSWP;
  620. priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
  621. priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
  622. priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
  623. priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
  624. dev->netdev_ops = &lance_netdev_ops;
  625. dev->watchdog_timeo = 5*HZ;
  626. dev->dma = 0;
  627. init_timer(&priv->multicast_timer);
  628. priv->multicast_timer.data = (unsigned long) dev;
  629. priv->multicast_timer.function =
  630. (void (*)(unsigned long)) &lance_set_multicast;
  631. err = register_netdev(dev);
  632. if (err) {
  633. release_resource(r1);
  634. release_resource(r2);
  635. free_netdev(dev);
  636. return err;
  637. }
  638. zorro_set_drvdata(z, dev);
  639. printk(KERN_INFO "%s: A2065 at 0x%08lx, Ethernet Address "
  640. "%pM\n", dev->name, board, dev->dev_addr);
  641. return 0;
  642. }
  643. static void __devexit a2065_remove_one(struct zorro_dev *z)
  644. {
  645. struct net_device *dev = zorro_get_drvdata(z);
  646. unregister_netdev(dev);
  647. release_mem_region(ZTWO_PADDR(dev->base_addr),
  648. sizeof(struct lance_regs));
  649. release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
  650. free_netdev(dev);
  651. }
  652. static int __init a2065_init_module(void)
  653. {
  654. return zorro_register_driver(&a2065_driver);
  655. }
  656. static void __exit a2065_cleanup_module(void)
  657. {
  658. zorro_unregister_driver(&a2065_driver);
  659. }
  660. module_init(a2065_init_module);
  661. module_exit(a2065_cleanup_module);
  662. MODULE_LICENSE("GPL");