PageRenderTime 58ms CodeModel.GetById 16ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/net/ethernet/dec/tulip/de2104x.c

https://github.com/mturquette/linux
C | 1995 lines | 1502 code | 358 blank | 135 comment | 265 complexity | 0464e7c4f2aeb12a87607a0de30e2a9c MD5 | raw file
  1. /* de2104x.c: A Linux PCI Ethernet driver for Intel/Digital 21040/1 chips. */
  2. /*
  3. Copyright 2001,2003 Jeff Garzik <jgarzik@pobox.com>
  4. Copyright 1994, 1995 Digital Equipment Corporation. [de4x5.c]
  5. Written/copyright 1994-2001 by Donald Becker. [tulip.c]
  6. This software may be used and distributed according to the terms of
  7. the GNU General Public License (GPL), incorporated herein by reference.
  8. Drivers based on or derived from this code fall under the GPL and must
  9. retain the authorship, copyright and license notice. This file is not
  10. a complete program and may only be used when the entire operating
  11. system is licensed under the GPL.
  12. See the file COPYING in this distribution for more information.
  13. TODO, in rough priority order:
  14. * Support forcing media type with a module parameter,
  15. like dl2k.c/sundance.c
  16. * Constants (module parms?) for Rx work limit
  17. * Complete reset on PciErr
  18. * Jumbo frames / dev->change_mtu
  19. * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
  20. * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
  21. * Implement Tx software interrupt mitigation via
  22. Tx descriptor bit
  23. */
  24. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  25. #define DRV_NAME "de2104x"
  26. #define DRV_VERSION "0.7"
  27. #define DRV_RELDATE "Mar 17, 2004"
  28. #include <linux/module.h>
  29. #include <linux/kernel.h>
  30. #include <linux/netdevice.h>
  31. #include <linux/etherdevice.h>
  32. #include <linux/init.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/pci.h>
  35. #include <linux/delay.h>
  36. #include <linux/ethtool.h>
  37. #include <linux/compiler.h>
  38. #include <linux/rtnetlink.h>
  39. #include <linux/crc32.h>
  40. #include <linux/slab.h>
  41. #include <asm/io.h>
  42. #include <asm/irq.h>
  43. #include <asm/uaccess.h>
  44. #include <asm/unaligned.h>
  45. /* These identify the driver base version and may not be removed. */
  46. static char version[] =
  47. "PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")";
  48. MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  49. MODULE_DESCRIPTION("Intel/Digital 21040/1 series PCI Ethernet driver");
  50. MODULE_LICENSE("GPL");
  51. MODULE_VERSION(DRV_VERSION);
  52. static int debug = -1;
  53. module_param (debug, int, 0);
  54. MODULE_PARM_DESC (debug, "de2104x bitmapped message enable number");
  55. /* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
  56. #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
  57. defined(CONFIG_SPARC) || defined(__ia64__) || \
  58. defined(__sh__) || defined(__mips__)
  59. static int rx_copybreak = 1518;
  60. #else
  61. static int rx_copybreak = 100;
  62. #endif
  63. module_param (rx_copybreak, int, 0);
  64. MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copied");
  65. #define DE_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
  66. NETIF_MSG_PROBE | \
  67. NETIF_MSG_LINK | \
  68. NETIF_MSG_IFDOWN | \
  69. NETIF_MSG_IFUP | \
  70. NETIF_MSG_RX_ERR | \
  71. NETIF_MSG_TX_ERR)
  72. /* Descriptor skip length in 32 bit longwords. */
  73. #ifndef CONFIG_DE2104X_DSL
  74. #define DSL 0
  75. #else
  76. #define DSL CONFIG_DE2104X_DSL
  77. #endif
  78. #define DE_RX_RING_SIZE 64
  79. #define DE_TX_RING_SIZE 64
  80. #define DE_RING_BYTES \
  81. ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \
  82. (sizeof(struct de_desc) * DE_TX_RING_SIZE))
  83. #define NEXT_TX(N) (((N) + 1) & (DE_TX_RING_SIZE - 1))
  84. #define NEXT_RX(N) (((N) + 1) & (DE_RX_RING_SIZE - 1))
  85. #define TX_BUFFS_AVAIL(CP) \
  86. (((CP)->tx_tail <= (CP)->tx_head) ? \
  87. (CP)->tx_tail + (DE_TX_RING_SIZE - 1) - (CP)->tx_head : \
  88. (CP)->tx_tail - (CP)->tx_head - 1)
  89. #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
  90. #define RX_OFFSET 2
  91. #define DE_SETUP_SKB ((struct sk_buff *) 1)
  92. #define DE_DUMMY_SKB ((struct sk_buff *) 2)
  93. #define DE_SETUP_FRAME_WORDS 96
  94. #define DE_EEPROM_WORDS 256
  95. #define DE_EEPROM_SIZE (DE_EEPROM_WORDS * sizeof(u16))
  96. #define DE_MAX_MEDIA 5
  97. #define DE_MEDIA_TP_AUTO 0
  98. #define DE_MEDIA_BNC 1
  99. #define DE_MEDIA_AUI 2
  100. #define DE_MEDIA_TP 3
  101. #define DE_MEDIA_TP_FD 4
  102. #define DE_MEDIA_INVALID DE_MAX_MEDIA
  103. #define DE_MEDIA_FIRST 0
  104. #define DE_MEDIA_LAST (DE_MAX_MEDIA - 1)
  105. #define DE_AUI_BNC (SUPPORTED_AUI | SUPPORTED_BNC)
  106. #define DE_TIMER_LINK (60 * HZ)
  107. #define DE_TIMER_NO_LINK (5 * HZ)
  108. #define DE_NUM_REGS 16
  109. #define DE_REGS_SIZE (DE_NUM_REGS * sizeof(u32))
  110. #define DE_REGS_VER 1
  111. /* Time in jiffies before concluding the transmitter is hung. */
  112. #define TX_TIMEOUT (6*HZ)
  113. /* This is a mysterious value that can be written to CSR11 in the 21040 (only)
  114. to support a pre-NWay full-duplex signaling mechanism using short frames.
  115. No one knows what it should be, but if left at its default value some
  116. 10base2(!) packets trigger a full-duplex-request interrupt. */
  117. #define FULL_DUPLEX_MAGIC 0x6969
  118. enum {
  119. /* NIC registers */
  120. BusMode = 0x00,
  121. TxPoll = 0x08,
  122. RxPoll = 0x10,
  123. RxRingAddr = 0x18,
  124. TxRingAddr = 0x20,
  125. MacStatus = 0x28,
  126. MacMode = 0x30,
  127. IntrMask = 0x38,
  128. RxMissed = 0x40,
  129. ROMCmd = 0x48,
  130. CSR11 = 0x58,
  131. SIAStatus = 0x60,
  132. CSR13 = 0x68,
  133. CSR14 = 0x70,
  134. CSR15 = 0x78,
  135. PCIPM = 0x40,
  136. /* BusMode bits */
  137. CmdReset = (1 << 0),
  138. CacheAlign16 = 0x00008000,
  139. BurstLen4 = 0x00000400,
  140. DescSkipLen = (DSL << 2),
  141. /* Rx/TxPoll bits */
  142. NormalTxPoll = (1 << 0),
  143. NormalRxPoll = (1 << 0),
  144. /* Tx/Rx descriptor status bits */
  145. DescOwn = (1 << 31),
  146. RxError = (1 << 15),
  147. RxErrLong = (1 << 7),
  148. RxErrCRC = (1 << 1),
  149. RxErrFIFO = (1 << 0),
  150. RxErrRunt = (1 << 11),
  151. RxErrFrame = (1 << 14),
  152. RingEnd = (1 << 25),
  153. FirstFrag = (1 << 29),
  154. LastFrag = (1 << 30),
  155. TxError = (1 << 15),
  156. TxFIFOUnder = (1 << 1),
  157. TxLinkFail = (1 << 2) | (1 << 10) | (1 << 11),
  158. TxMaxCol = (1 << 8),
  159. TxOWC = (1 << 9),
  160. TxJabber = (1 << 14),
  161. SetupFrame = (1 << 27),
  162. TxSwInt = (1 << 31),
  163. /* MacStatus bits */
  164. IntrOK = (1 << 16),
  165. IntrErr = (1 << 15),
  166. RxIntr = (1 << 6),
  167. RxEmpty = (1 << 7),
  168. TxIntr = (1 << 0),
  169. TxEmpty = (1 << 2),
  170. PciErr = (1 << 13),
  171. TxState = (1 << 22) | (1 << 21) | (1 << 20),
  172. RxState = (1 << 19) | (1 << 18) | (1 << 17),
  173. LinkFail = (1 << 12),
  174. LinkPass = (1 << 4),
  175. RxStopped = (1 << 8),
  176. TxStopped = (1 << 1),
  177. /* MacMode bits */
  178. TxEnable = (1 << 13),
  179. RxEnable = (1 << 1),
  180. RxTx = TxEnable | RxEnable,
  181. FullDuplex = (1 << 9),
  182. AcceptAllMulticast = (1 << 7),
  183. AcceptAllPhys = (1 << 6),
  184. BOCnt = (1 << 5),
  185. MacModeClear = (1<<12) | (1<<11) | (1<<10) | (1<<8) | (1<<3) |
  186. RxTx | BOCnt | AcceptAllPhys | AcceptAllMulticast,
  187. /* ROMCmd bits */
  188. EE_SHIFT_CLK = 0x02, /* EEPROM shift clock. */
  189. EE_CS = 0x01, /* EEPROM chip select. */
  190. EE_DATA_WRITE = 0x04, /* Data from the Tulip to EEPROM. */
  191. EE_WRITE_0 = 0x01,
  192. EE_WRITE_1 = 0x05,
  193. EE_DATA_READ = 0x08, /* Data from the EEPROM chip. */
  194. EE_ENB = (0x4800 | EE_CS),
  195. /* The EEPROM commands include the alway-set leading bit. */
  196. EE_READ_CMD = 6,
  197. /* RxMissed bits */
  198. RxMissedOver = (1 << 16),
  199. RxMissedMask = 0xffff,
  200. /* SROM-related bits */
  201. SROMC0InfoLeaf = 27,
  202. MediaBlockMask = 0x3f,
  203. MediaCustomCSRs = (1 << 6),
  204. /* PCIPM bits */
  205. PM_Sleep = (1 << 31),
  206. PM_Snooze = (1 << 30),
  207. PM_Mask = PM_Sleep | PM_Snooze,
  208. /* SIAStatus bits */
  209. NWayState = (1 << 14) | (1 << 13) | (1 << 12),
  210. NWayRestart = (1 << 12),
  211. NonselPortActive = (1 << 9),
  212. SelPortActive = (1 << 8),
  213. LinkFailStatus = (1 << 2),
  214. NetCxnErr = (1 << 1),
  215. };
  216. static const u32 de_intr_mask =
  217. IntrOK | IntrErr | RxIntr | RxEmpty | TxIntr | TxEmpty |
  218. LinkPass | LinkFail | PciErr;
  219. /*
  220. * Set the programmable burst length to 4 longwords for all:
  221. * DMA errors result without these values. Cache align 16 long.
  222. */
  223. static const u32 de_bus_mode = CacheAlign16 | BurstLen4 | DescSkipLen;
  224. struct de_srom_media_block {
  225. u8 opts;
  226. u16 csr13;
  227. u16 csr14;
  228. u16 csr15;
  229. } __packed;
  230. struct de_srom_info_leaf {
  231. u16 default_media;
  232. u8 n_blocks;
  233. u8 unused;
  234. } __packed;
  235. struct de_desc {
  236. __le32 opts1;
  237. __le32 opts2;
  238. __le32 addr1;
  239. __le32 addr2;
  240. #if DSL
  241. __le32 skip[DSL];
  242. #endif
  243. };
  244. struct media_info {
  245. u16 type; /* DE_MEDIA_xxx */
  246. u16 csr13;
  247. u16 csr14;
  248. u16 csr15;
  249. };
  250. struct ring_info {
  251. struct sk_buff *skb;
  252. dma_addr_t mapping;
  253. };
  254. struct de_private {
  255. unsigned tx_head;
  256. unsigned tx_tail;
  257. unsigned rx_tail;
  258. void __iomem *regs;
  259. struct net_device *dev;
  260. spinlock_t lock;
  261. struct de_desc *rx_ring;
  262. struct de_desc *tx_ring;
  263. struct ring_info tx_skb[DE_TX_RING_SIZE];
  264. struct ring_info rx_skb[DE_RX_RING_SIZE];
  265. unsigned rx_buf_sz;
  266. dma_addr_t ring_dma;
  267. u32 msg_enable;
  268. struct net_device_stats net_stats;
  269. struct pci_dev *pdev;
  270. u16 setup_frame[DE_SETUP_FRAME_WORDS];
  271. u32 media_type;
  272. u32 media_supported;
  273. u32 media_advertise;
  274. struct media_info media[DE_MAX_MEDIA];
  275. struct timer_list media_timer;
  276. u8 *ee_data;
  277. unsigned board_idx;
  278. unsigned de21040 : 1;
  279. unsigned media_lock : 1;
  280. };
  281. static void de_set_rx_mode (struct net_device *dev);
  282. static void de_tx (struct de_private *de);
  283. static void de_clean_rings (struct de_private *de);
  284. static void de_media_interrupt (struct de_private *de, u32 status);
  285. static void de21040_media_timer (unsigned long data);
  286. static void de21041_media_timer (unsigned long data);
  287. static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
  288. static const struct pci_device_id de_pci_tbl[] = {
  289. { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
  290. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
  291. { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
  292. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
  293. { },
  294. };
  295. MODULE_DEVICE_TABLE(pci, de_pci_tbl);
  296. static const char * const media_name[DE_MAX_MEDIA] = {
  297. "10baseT auto",
  298. "BNC",
  299. "AUI",
  300. "10baseT-HD",
  301. "10baseT-FD"
  302. };
  303. /* 21040 transceiver register settings:
  304. * TP AUTO(unused), BNC(unused), AUI, TP, TP FD*/
  305. static u16 t21040_csr13[] = { 0, 0, 0x8F09, 0x8F01, 0x8F01, };
  306. static u16 t21040_csr14[] = { 0, 0, 0x0705, 0xFFFF, 0xFFFD, };
  307. static u16 t21040_csr15[] = { 0, 0, 0x0006, 0x0000, 0x0000, };
  308. /* 21041 transceiver register settings: TP AUTO, BNC, AUI, TP, TP FD*/
  309. static u16 t21041_csr13[] = { 0xEF01, 0xEF09, 0xEF09, 0xEF01, 0xEF09, };
  310. static u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
  311. /* If on-chip autonegotiation is broken, use half-duplex (FF3F) instead */
  312. static u16 t21041_csr14_brk[] = { 0xFF3F, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
  313. static u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
  314. #define dr32(reg) ioread32(de->regs + (reg))
  315. #define dw32(reg, val) iowrite32((val), de->regs + (reg))
  316. static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
  317. u32 status, u32 len)
  318. {
  319. netif_dbg(de, rx_err, de->dev,
  320. "rx err, slot %d status 0x%x len %d\n",
  321. rx_tail, status, len);
  322. if ((status & 0x38000300) != 0x0300) {
  323. /* Ingore earlier buffers. */
  324. if ((status & 0xffff) != 0x7fff) {
  325. netif_warn(de, rx_err, de->dev,
  326. "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
  327. status);
  328. de->net_stats.rx_length_errors++;
  329. }
  330. } else if (status & RxError) {
  331. /* There was a fatal error. */
  332. de->net_stats.rx_errors++; /* end of a packet.*/
  333. if (status & 0x0890) de->net_stats.rx_length_errors++;
  334. if (status & RxErrCRC) de->net_stats.rx_crc_errors++;
  335. if (status & RxErrFIFO) de->net_stats.rx_fifo_errors++;
  336. }
  337. }
  338. static void de_rx (struct de_private *de)
  339. {
  340. unsigned rx_tail = de->rx_tail;
  341. unsigned rx_work = DE_RX_RING_SIZE;
  342. unsigned drop = 0;
  343. int rc;
  344. while (--rx_work) {
  345. u32 status, len;
  346. dma_addr_t mapping;
  347. struct sk_buff *skb, *copy_skb;
  348. unsigned copying_skb, buflen;
  349. skb = de->rx_skb[rx_tail].skb;
  350. BUG_ON(!skb);
  351. rmb();
  352. status = le32_to_cpu(de->rx_ring[rx_tail].opts1);
  353. if (status & DescOwn)
  354. break;
  355. len = ((status >> 16) & 0x7ff) - 4;
  356. mapping = de->rx_skb[rx_tail].mapping;
  357. if (unlikely(drop)) {
  358. de->net_stats.rx_dropped++;
  359. goto rx_next;
  360. }
  361. if (unlikely((status & 0x38008300) != 0x0300)) {
  362. de_rx_err_acct(de, rx_tail, status, len);
  363. goto rx_next;
  364. }
  365. copying_skb = (len <= rx_copybreak);
  366. netif_dbg(de, rx_status, de->dev,
  367. "rx slot %d status 0x%x len %d copying? %d\n",
  368. rx_tail, status, len, copying_skb);
  369. buflen = copying_skb ? (len + RX_OFFSET) : de->rx_buf_sz;
  370. copy_skb = netdev_alloc_skb(de->dev, buflen);
  371. if (unlikely(!copy_skb)) {
  372. de->net_stats.rx_dropped++;
  373. drop = 1;
  374. rx_work = 100;
  375. goto rx_next;
  376. }
  377. if (!copying_skb) {
  378. pci_unmap_single(de->pdev, mapping,
  379. buflen, PCI_DMA_FROMDEVICE);
  380. skb_put(skb, len);
  381. mapping =
  382. de->rx_skb[rx_tail].mapping =
  383. pci_map_single(de->pdev, copy_skb->data,
  384. buflen, PCI_DMA_FROMDEVICE);
  385. de->rx_skb[rx_tail].skb = copy_skb;
  386. } else {
  387. pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
  388. skb_reserve(copy_skb, RX_OFFSET);
  389. skb_copy_from_linear_data(skb, skb_put(copy_skb, len),
  390. len);
  391. pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
  392. /* We'll reuse the original ring buffer. */
  393. skb = copy_skb;
  394. }
  395. skb->protocol = eth_type_trans (skb, de->dev);
  396. de->net_stats.rx_packets++;
  397. de->net_stats.rx_bytes += skb->len;
  398. rc = netif_rx (skb);
  399. if (rc == NET_RX_DROP)
  400. drop = 1;
  401. rx_next:
  402. if (rx_tail == (DE_RX_RING_SIZE - 1))
  403. de->rx_ring[rx_tail].opts2 =
  404. cpu_to_le32(RingEnd | de->rx_buf_sz);
  405. else
  406. de->rx_ring[rx_tail].opts2 = cpu_to_le32(de->rx_buf_sz);
  407. de->rx_ring[rx_tail].addr1 = cpu_to_le32(mapping);
  408. wmb();
  409. de->rx_ring[rx_tail].opts1 = cpu_to_le32(DescOwn);
  410. rx_tail = NEXT_RX(rx_tail);
  411. }
  412. if (!rx_work)
  413. netdev_warn(de->dev, "rx work limit reached\n");
  414. de->rx_tail = rx_tail;
  415. }
  416. static irqreturn_t de_interrupt (int irq, void *dev_instance)
  417. {
  418. struct net_device *dev = dev_instance;
  419. struct de_private *de = netdev_priv(dev);
  420. u32 status;
  421. status = dr32(MacStatus);
  422. if ((!(status & (IntrOK|IntrErr))) || (status == 0xFFFF))
  423. return IRQ_NONE;
  424. netif_dbg(de, intr, dev, "intr, status %08x mode %08x desc %u/%u/%u\n",
  425. status, dr32(MacMode),
  426. de->rx_tail, de->tx_head, de->tx_tail);
  427. dw32(MacStatus, status);
  428. if (status & (RxIntr | RxEmpty)) {
  429. de_rx(de);
  430. if (status & RxEmpty)
  431. dw32(RxPoll, NormalRxPoll);
  432. }
  433. spin_lock(&de->lock);
  434. if (status & (TxIntr | TxEmpty))
  435. de_tx(de);
  436. if (status & (LinkPass | LinkFail))
  437. de_media_interrupt(de, status);
  438. spin_unlock(&de->lock);
  439. if (status & PciErr) {
  440. u16 pci_status;
  441. pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
  442. pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
  443. netdev_err(de->dev,
  444. "PCI bus error, status=%08x, PCI status=%04x\n",
  445. status, pci_status);
  446. }
  447. return IRQ_HANDLED;
  448. }
  449. static void de_tx (struct de_private *de)
  450. {
  451. unsigned tx_head = de->tx_head;
  452. unsigned tx_tail = de->tx_tail;
  453. while (tx_tail != tx_head) {
  454. struct sk_buff *skb;
  455. u32 status;
  456. rmb();
  457. status = le32_to_cpu(de->tx_ring[tx_tail].opts1);
  458. if (status & DescOwn)
  459. break;
  460. skb = de->tx_skb[tx_tail].skb;
  461. BUG_ON(!skb);
  462. if (unlikely(skb == DE_DUMMY_SKB))
  463. goto next;
  464. if (unlikely(skb == DE_SETUP_SKB)) {
  465. pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
  466. sizeof(de->setup_frame), PCI_DMA_TODEVICE);
  467. goto next;
  468. }
  469. pci_unmap_single(de->pdev, de->tx_skb[tx_tail].mapping,
  470. skb->len, PCI_DMA_TODEVICE);
  471. if (status & LastFrag) {
  472. if (status & TxError) {
  473. netif_dbg(de, tx_err, de->dev,
  474. "tx err, status 0x%x\n",
  475. status);
  476. de->net_stats.tx_errors++;
  477. if (status & TxOWC)
  478. de->net_stats.tx_window_errors++;
  479. if (status & TxMaxCol)
  480. de->net_stats.tx_aborted_errors++;
  481. if (status & TxLinkFail)
  482. de->net_stats.tx_carrier_errors++;
  483. if (status & TxFIFOUnder)
  484. de->net_stats.tx_fifo_errors++;
  485. } else {
  486. de->net_stats.tx_packets++;
  487. de->net_stats.tx_bytes += skb->len;
  488. netif_dbg(de, tx_done, de->dev,
  489. "tx done, slot %d\n", tx_tail);
  490. }
  491. dev_kfree_skb_irq(skb);
  492. }
  493. next:
  494. de->tx_skb[tx_tail].skb = NULL;
  495. tx_tail = NEXT_TX(tx_tail);
  496. }
  497. de->tx_tail = tx_tail;
  498. if (netif_queue_stopped(de->dev) && (TX_BUFFS_AVAIL(de) > (DE_TX_RING_SIZE / 4)))
  499. netif_wake_queue(de->dev);
  500. }
  501. static netdev_tx_t de_start_xmit (struct sk_buff *skb,
  502. struct net_device *dev)
  503. {
  504. struct de_private *de = netdev_priv(dev);
  505. unsigned int entry, tx_free;
  506. u32 mapping, len, flags = FirstFrag | LastFrag;
  507. struct de_desc *txd;
  508. spin_lock_irq(&de->lock);
  509. tx_free = TX_BUFFS_AVAIL(de);
  510. if (tx_free == 0) {
  511. netif_stop_queue(dev);
  512. spin_unlock_irq(&de->lock);
  513. return NETDEV_TX_BUSY;
  514. }
  515. tx_free--;
  516. entry = de->tx_head;
  517. txd = &de->tx_ring[entry];
  518. len = skb->len;
  519. mapping = pci_map_single(de->pdev, skb->data, len, PCI_DMA_TODEVICE);
  520. if (entry == (DE_TX_RING_SIZE - 1))
  521. flags |= RingEnd;
  522. if (!tx_free || (tx_free == (DE_TX_RING_SIZE / 2)))
  523. flags |= TxSwInt;
  524. flags |= len;
  525. txd->opts2 = cpu_to_le32(flags);
  526. txd->addr1 = cpu_to_le32(mapping);
  527. de->tx_skb[entry].skb = skb;
  528. de->tx_skb[entry].mapping = mapping;
  529. wmb();
  530. txd->opts1 = cpu_to_le32(DescOwn);
  531. wmb();
  532. de->tx_head = NEXT_TX(entry);
  533. netif_dbg(de, tx_queued, dev, "tx queued, slot %d, skblen %d\n",
  534. entry, skb->len);
  535. if (tx_free == 0)
  536. netif_stop_queue(dev);
  537. spin_unlock_irq(&de->lock);
  538. /* Trigger an immediate transmit demand. */
  539. dw32(TxPoll, NormalTxPoll);
  540. return NETDEV_TX_OK;
  541. }
  542. /* Set or clear the multicast filter for this adaptor.
  543. Note that we only use exclusion around actually queueing the
  544. new frame, not around filling de->setup_frame. This is non-deterministic
  545. when re-entered but still correct. */
  546. static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
  547. {
  548. struct de_private *de = netdev_priv(dev);
  549. u16 hash_table[32];
  550. struct netdev_hw_addr *ha;
  551. int i;
  552. u16 *eaddrs;
  553. memset(hash_table, 0, sizeof(hash_table));
  554. __set_bit_le(255, hash_table); /* Broadcast entry */
  555. /* This should work on big-endian machines as well. */
  556. netdev_for_each_mc_addr(ha, dev) {
  557. int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
  558. __set_bit_le(index, hash_table);
  559. }
  560. for (i = 0; i < 32; i++) {
  561. *setup_frm++ = hash_table[i];
  562. *setup_frm++ = hash_table[i];
  563. }
  564. setup_frm = &de->setup_frame[13*6];
  565. /* Fill the final entry with our physical address. */
  566. eaddrs = (u16 *)dev->dev_addr;
  567. *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
  568. *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
  569. *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
  570. }
  571. static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
  572. {
  573. struct de_private *de = netdev_priv(dev);
  574. struct netdev_hw_addr *ha;
  575. u16 *eaddrs;
  576. /* We have <= 14 addresses so we can use the wonderful
  577. 16 address perfect filtering of the Tulip. */
  578. netdev_for_each_mc_addr(ha, dev) {
  579. eaddrs = (u16 *) ha->addr;
  580. *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
  581. *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
  582. *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
  583. }
  584. /* Fill the unused entries with the broadcast address. */
  585. memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
  586. setup_frm = &de->setup_frame[15*6];
  587. /* Fill the final entry with our physical address. */
  588. eaddrs = (u16 *)dev->dev_addr;
  589. *setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
  590. *setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
  591. *setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
  592. }
  593. static void __de_set_rx_mode (struct net_device *dev)
  594. {
  595. struct de_private *de = netdev_priv(dev);
  596. u32 macmode;
  597. unsigned int entry;
  598. u32 mapping;
  599. struct de_desc *txd;
  600. struct de_desc *dummy_txd = NULL;
  601. macmode = dr32(MacMode) & ~(AcceptAllMulticast | AcceptAllPhys);
  602. if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
  603. macmode |= AcceptAllMulticast | AcceptAllPhys;
  604. goto out;
  605. }
  606. if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
  607. /* Too many to filter well -- accept all multicasts. */
  608. macmode |= AcceptAllMulticast;
  609. goto out;
  610. }
  611. /* Note that only the low-address shortword of setup_frame is valid!
  612. The values are doubled for big-endian architectures. */
  613. if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
  614. build_setup_frame_hash (de->setup_frame, dev);
  615. else
  616. build_setup_frame_perfect (de->setup_frame, dev);
  617. /*
  618. * Now add this frame to the Tx list.
  619. */
  620. entry = de->tx_head;
  621. /* Avoid a chip errata by prefixing a dummy entry. */
  622. if (entry != 0) {
  623. de->tx_skb[entry].skb = DE_DUMMY_SKB;
  624. dummy_txd = &de->tx_ring[entry];
  625. dummy_txd->opts2 = (entry == (DE_TX_RING_SIZE - 1)) ?
  626. cpu_to_le32(RingEnd) : 0;
  627. dummy_txd->addr1 = 0;
  628. /* Must set DescOwned later to avoid race with chip */
  629. entry = NEXT_TX(entry);
  630. }
  631. de->tx_skb[entry].skb = DE_SETUP_SKB;
  632. de->tx_skb[entry].mapping = mapping =
  633. pci_map_single (de->pdev, de->setup_frame,
  634. sizeof (de->setup_frame), PCI_DMA_TODEVICE);
  635. /* Put the setup frame on the Tx list. */
  636. txd = &de->tx_ring[entry];
  637. if (entry == (DE_TX_RING_SIZE - 1))
  638. txd->opts2 = cpu_to_le32(SetupFrame | RingEnd | sizeof (de->setup_frame));
  639. else
  640. txd->opts2 = cpu_to_le32(SetupFrame | sizeof (de->setup_frame));
  641. txd->addr1 = cpu_to_le32(mapping);
  642. wmb();
  643. txd->opts1 = cpu_to_le32(DescOwn);
  644. wmb();
  645. if (dummy_txd) {
  646. dummy_txd->opts1 = cpu_to_le32(DescOwn);
  647. wmb();
  648. }
  649. de->tx_head = NEXT_TX(entry);
  650. if (TX_BUFFS_AVAIL(de) == 0)
  651. netif_stop_queue(dev);
  652. /* Trigger an immediate transmit demand. */
  653. dw32(TxPoll, NormalTxPoll);
  654. out:
  655. if (macmode != dr32(MacMode))
  656. dw32(MacMode, macmode);
  657. }
  658. static void de_set_rx_mode (struct net_device *dev)
  659. {
  660. unsigned long flags;
  661. struct de_private *de = netdev_priv(dev);
  662. spin_lock_irqsave (&de->lock, flags);
  663. __de_set_rx_mode(dev);
  664. spin_unlock_irqrestore (&de->lock, flags);
  665. }
  666. static inline void de_rx_missed(struct de_private *de, u32 rx_missed)
  667. {
  668. if (unlikely(rx_missed & RxMissedOver))
  669. de->net_stats.rx_missed_errors += RxMissedMask;
  670. else
  671. de->net_stats.rx_missed_errors += (rx_missed & RxMissedMask);
  672. }
  673. static void __de_get_stats(struct de_private *de)
  674. {
  675. u32 tmp = dr32(RxMissed); /* self-clearing */
  676. de_rx_missed(de, tmp);
  677. }
  678. static struct net_device_stats *de_get_stats(struct net_device *dev)
  679. {
  680. struct de_private *de = netdev_priv(dev);
  681. /* The chip only need report frame silently dropped. */
  682. spin_lock_irq(&de->lock);
  683. if (netif_running(dev) && netif_device_present(dev))
  684. __de_get_stats(de);
  685. spin_unlock_irq(&de->lock);
  686. return &de->net_stats;
  687. }
  688. static inline int de_is_running (struct de_private *de)
  689. {
  690. return (dr32(MacStatus) & (RxState | TxState)) ? 1 : 0;
  691. }
  692. static void de_stop_rxtx (struct de_private *de)
  693. {
  694. u32 macmode;
  695. unsigned int i = 1300/100;
  696. macmode = dr32(MacMode);
  697. if (macmode & RxTx) {
  698. dw32(MacMode, macmode & ~RxTx);
  699. dr32(MacMode);
  700. }
  701. /* wait until in-flight frame completes.
  702. * Max time @ 10BT: 1500*8b/10Mbps == 1200us (+ 100us margin)
  703. * Typically expect this loop to end in < 50 us on 100BT.
  704. */
  705. while (--i) {
  706. if (!de_is_running(de))
  707. return;
  708. udelay(100);
  709. }
  710. netdev_warn(de->dev, "timeout expired, stopping DMA\n");
  711. }
  712. static inline void de_start_rxtx (struct de_private *de)
  713. {
  714. u32 macmode;
  715. macmode = dr32(MacMode);
  716. if ((macmode & RxTx) != RxTx) {
  717. dw32(MacMode, macmode | RxTx);
  718. dr32(MacMode);
  719. }
  720. }
  721. static void de_stop_hw (struct de_private *de)
  722. {
  723. udelay(5);
  724. dw32(IntrMask, 0);
  725. de_stop_rxtx(de);
  726. dw32(MacStatus, dr32(MacStatus));
  727. udelay(10);
  728. de->rx_tail = 0;
  729. de->tx_head = de->tx_tail = 0;
  730. }
  731. static void de_link_up(struct de_private *de)
  732. {
  733. if (!netif_carrier_ok(de->dev)) {
  734. netif_carrier_on(de->dev);
  735. netif_info(de, link, de->dev, "link up, media %s\n",
  736. media_name[de->media_type]);
  737. }
  738. }
  739. static void de_link_down(struct de_private *de)
  740. {
  741. if (netif_carrier_ok(de->dev)) {
  742. netif_carrier_off(de->dev);
  743. netif_info(de, link, de->dev, "link down\n");
  744. }
  745. }
  746. static void de_set_media (struct de_private *de)
  747. {
  748. unsigned media = de->media_type;
  749. u32 macmode = dr32(MacMode);
  750. if (de_is_running(de))
  751. netdev_warn(de->dev, "chip is running while changing media!\n");
  752. if (de->de21040)
  753. dw32(CSR11, FULL_DUPLEX_MAGIC);
  754. dw32(CSR13, 0); /* Reset phy */
  755. dw32(CSR14, de->media[media].csr14);
  756. dw32(CSR15, de->media[media].csr15);
  757. dw32(CSR13, de->media[media].csr13);
  758. /* must delay 10ms before writing to other registers,
  759. * especially CSR6
  760. */
  761. mdelay(10);
  762. if (media == DE_MEDIA_TP_FD)
  763. macmode |= FullDuplex;
  764. else
  765. macmode &= ~FullDuplex;
  766. netif_info(de, link, de->dev, "set link %s\n", media_name[media]);
  767. netif_info(de, hw, de->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
  768. dr32(MacMode), dr32(SIAStatus),
  769. dr32(CSR13), dr32(CSR14), dr32(CSR15));
  770. netif_info(de, hw, de->dev, "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
  771. macmode, de->media[media].csr13,
  772. de->media[media].csr14, de->media[media].csr15);
  773. if (macmode != dr32(MacMode))
  774. dw32(MacMode, macmode);
  775. }
  776. static void de_next_media (struct de_private *de, const u32 *media,
  777. unsigned int n_media)
  778. {
  779. unsigned int i;
  780. for (i = 0; i < n_media; i++) {
  781. if (de_ok_to_advertise(de, media[i])) {
  782. de->media_type = media[i];
  783. return;
  784. }
  785. }
  786. }
  787. static void de21040_media_timer (unsigned long data)
  788. {
  789. struct de_private *de = (struct de_private *) data;
  790. struct net_device *dev = de->dev;
  791. u32 status = dr32(SIAStatus);
  792. unsigned int carrier;
  793. unsigned long flags;
  794. carrier = (status & NetCxnErr) ? 0 : 1;
  795. if (carrier) {
  796. if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
  797. goto no_link_yet;
  798. de->media_timer.expires = jiffies + DE_TIMER_LINK;
  799. add_timer(&de->media_timer);
  800. if (!netif_carrier_ok(dev))
  801. de_link_up(de);
  802. else
  803. netif_info(de, timer, dev, "%s link ok, status %x\n",
  804. media_name[de->media_type], status);
  805. return;
  806. }
  807. de_link_down(de);
  808. if (de->media_lock)
  809. return;
  810. if (de->media_type == DE_MEDIA_AUI) {
  811. static const u32 next_state = DE_MEDIA_TP;
  812. de_next_media(de, &next_state, 1);
  813. } else {
  814. static const u32 next_state = DE_MEDIA_AUI;
  815. de_next_media(de, &next_state, 1);
  816. }
  817. spin_lock_irqsave(&de->lock, flags);
  818. de_stop_rxtx(de);
  819. spin_unlock_irqrestore(&de->lock, flags);
  820. de_set_media(de);
  821. de_start_rxtx(de);
  822. no_link_yet:
  823. de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
  824. add_timer(&de->media_timer);
  825. netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
  826. media_name[de->media_type], status);
  827. }
  828. static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
  829. {
  830. switch (new_media) {
  831. case DE_MEDIA_TP_AUTO:
  832. if (!(de->media_advertise & ADVERTISED_Autoneg))
  833. return 0;
  834. if (!(de->media_advertise & (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full)))
  835. return 0;
  836. break;
  837. case DE_MEDIA_BNC:
  838. if (!(de->media_advertise & ADVERTISED_BNC))
  839. return 0;
  840. break;
  841. case DE_MEDIA_AUI:
  842. if (!(de->media_advertise & ADVERTISED_AUI))
  843. return 0;
  844. break;
  845. case DE_MEDIA_TP:
  846. if (!(de->media_advertise & ADVERTISED_10baseT_Half))
  847. return 0;
  848. break;
  849. case DE_MEDIA_TP_FD:
  850. if (!(de->media_advertise & ADVERTISED_10baseT_Full))
  851. return 0;
  852. break;
  853. }
  854. return 1;
  855. }
  856. static void de21041_media_timer (unsigned long data)
  857. {
  858. struct de_private *de = (struct de_private *) data;
  859. struct net_device *dev = de->dev;
  860. u32 status = dr32(SIAStatus);
  861. unsigned int carrier;
  862. unsigned long flags;
  863. /* clear port active bits */
  864. dw32(SIAStatus, NonselPortActive | SelPortActive);
  865. carrier = (status & NetCxnErr) ? 0 : 1;
  866. if (carrier) {
  867. if ((de->media_type == DE_MEDIA_TP_AUTO ||
  868. de->media_type == DE_MEDIA_TP ||
  869. de->media_type == DE_MEDIA_TP_FD) &&
  870. (status & LinkFailStatus))
  871. goto no_link_yet;
  872. de->media_timer.expires = jiffies + DE_TIMER_LINK;
  873. add_timer(&de->media_timer);
  874. if (!netif_carrier_ok(dev))
  875. de_link_up(de);
  876. else
  877. netif_info(de, timer, dev,
  878. "%s link ok, mode %x status %x\n",
  879. media_name[de->media_type],
  880. dr32(MacMode), status);
  881. return;
  882. }
  883. de_link_down(de);
  884. /* if media type locked, don't switch media */
  885. if (de->media_lock)
  886. goto set_media;
  887. /* if activity detected, use that as hint for new media type */
  888. if (status & NonselPortActive) {
  889. unsigned int have_media = 1;
  890. /* if AUI/BNC selected, then activity is on TP port */
  891. if (de->media_type == DE_MEDIA_AUI ||
  892. de->media_type == DE_MEDIA_BNC) {
  893. if (de_ok_to_advertise(de, DE_MEDIA_TP_AUTO))
  894. de->media_type = DE_MEDIA_TP_AUTO;
  895. else
  896. have_media = 0;
  897. }
  898. /* TP selected. If there is only TP and BNC, then it's BNC */
  899. else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_BNC) &&
  900. de_ok_to_advertise(de, DE_MEDIA_BNC))
  901. de->media_type = DE_MEDIA_BNC;
  902. /* TP selected. If there is only TP and AUI, then it's AUI */
  903. else if (((de->media_supported & DE_AUI_BNC) == SUPPORTED_AUI) &&
  904. de_ok_to_advertise(de, DE_MEDIA_AUI))
  905. de->media_type = DE_MEDIA_AUI;
  906. /* otherwise, ignore the hint */
  907. else
  908. have_media = 0;
  909. if (have_media)
  910. goto set_media;
  911. }
  912. /*
  913. * Absent or ambiguous activity hint, move to next advertised
  914. * media state. If de->media_type is left unchanged, this
  915. * simply resets the PHY and reloads the current media settings.
  916. */
  917. if (de->media_type == DE_MEDIA_AUI) {
  918. static const u32 next_states[] = {
  919. DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
  920. };
  921. de_next_media(de, next_states, ARRAY_SIZE(next_states));
  922. } else if (de->media_type == DE_MEDIA_BNC) {
  923. static const u32 next_states[] = {
  924. DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
  925. };
  926. de_next_media(de, next_states, ARRAY_SIZE(next_states));
  927. } else {
  928. static const u32 next_states[] = {
  929. DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
  930. };
  931. de_next_media(de, next_states, ARRAY_SIZE(next_states));
  932. }
  933. set_media:
  934. spin_lock_irqsave(&de->lock, flags);
  935. de_stop_rxtx(de);
  936. spin_unlock_irqrestore(&de->lock, flags);
  937. de_set_media(de);
  938. de_start_rxtx(de);
  939. no_link_yet:
  940. de->media_timer.expires = jiffies + DE_TIMER_NO_LINK;
  941. add_timer(&de->media_timer);
  942. netif_info(de, timer, dev, "no link, trying media %s, status %x\n",
  943. media_name[de->media_type], status);
  944. }
  945. static void de_media_interrupt (struct de_private *de, u32 status)
  946. {
  947. if (status & LinkPass) {
  948. /* Ignore if current media is AUI or BNC and we can't use TP */
  949. if ((de->media_type == DE_MEDIA_AUI ||
  950. de->media_type == DE_MEDIA_BNC) &&
  951. (de->media_lock ||
  952. !de_ok_to_advertise(de, DE_MEDIA_TP_AUTO)))
  953. return;
  954. /* If current media is not TP, change it to TP */
  955. if ((de->media_type == DE_MEDIA_AUI ||
  956. de->media_type == DE_MEDIA_BNC)) {
  957. de->media_type = DE_MEDIA_TP_AUTO;
  958. de_stop_rxtx(de);
  959. de_set_media(de);
  960. de_start_rxtx(de);
  961. }
  962. de_link_up(de);
  963. mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
  964. return;
  965. }
  966. BUG_ON(!(status & LinkFail));
  967. /* Mark the link as down only if current media is TP */
  968. if (netif_carrier_ok(de->dev) && de->media_type != DE_MEDIA_AUI &&
  969. de->media_type != DE_MEDIA_BNC) {
  970. de_link_down(de);
  971. mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
  972. }
  973. }
  974. static int de_reset_mac (struct de_private *de)
  975. {
  976. u32 status, tmp;
  977. /*
  978. * Reset MAC. de4x5.c and tulip.c examined for "advice"
  979. * in this area.
  980. */
  981. if (dr32(BusMode) == 0xffffffff)
  982. return -EBUSY;
  983. /* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
  984. dw32 (BusMode, CmdReset);
  985. mdelay (1);
  986. dw32 (BusMode, de_bus_mode);
  987. mdelay (1);
  988. for (tmp = 0; tmp < 5; tmp++) {
  989. dr32 (BusMode);
  990. mdelay (1);
  991. }
  992. mdelay (1);
  993. status = dr32(MacStatus);
  994. if (status & (RxState | TxState))
  995. return -EBUSY;
  996. if (status == 0xffffffff)
  997. return -ENODEV;
  998. return 0;
  999. }
  1000. static void de_adapter_wake (struct de_private *de)
  1001. {
  1002. u32 pmctl;
  1003. if (de->de21040)
  1004. return;
  1005. pci_read_config_dword(de->pdev, PCIPM, &pmctl);
  1006. if (pmctl & PM_Mask) {
  1007. pmctl &= ~PM_Mask;
  1008. pci_write_config_dword(de->pdev, PCIPM, pmctl);
  1009. /* de4x5.c delays, so we do too */
  1010. msleep(10);
  1011. }
  1012. }
  1013. static void de_adapter_sleep (struct de_private *de)
  1014. {
  1015. u32 pmctl;
  1016. if (de->de21040)
  1017. return;
  1018. dw32(CSR13, 0); /* Reset phy */
  1019. pci_read_config_dword(de->pdev, PCIPM, &pmctl);
  1020. pmctl |= PM_Sleep;
  1021. pci_write_config_dword(de->pdev, PCIPM, pmctl);
  1022. }
  1023. static int de_init_hw (struct de_private *de)
  1024. {
  1025. struct net_device *dev = de->dev;
  1026. u32 macmode;
  1027. int rc;
  1028. de_adapter_wake(de);
  1029. macmode = dr32(MacMode) & ~MacModeClear;
  1030. rc = de_reset_mac(de);
  1031. if (rc)
  1032. return rc;
  1033. de_set_media(de); /* reset phy */
  1034. dw32(RxRingAddr, de->ring_dma);
  1035. dw32(TxRingAddr, de->ring_dma + (sizeof(struct de_desc) * DE_RX_RING_SIZE));
  1036. dw32(MacMode, RxTx | macmode);
  1037. dr32(RxMissed); /* self-clearing */
  1038. dw32(IntrMask, de_intr_mask);
  1039. de_set_rx_mode(dev);
  1040. return 0;
  1041. }
  1042. static int de_refill_rx (struct de_private *de)
  1043. {
  1044. unsigned i;
  1045. for (i = 0; i < DE_RX_RING_SIZE; i++) {
  1046. struct sk_buff *skb;
  1047. skb = netdev_alloc_skb(de->dev, de->rx_buf_sz);
  1048. if (!skb)
  1049. goto err_out;
  1050. de->rx_skb[i].mapping = pci_map_single(de->pdev,
  1051. skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
  1052. de->rx_skb[i].skb = skb;
  1053. de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
  1054. if (i == (DE_RX_RING_SIZE - 1))
  1055. de->rx_ring[i].opts2 =
  1056. cpu_to_le32(RingEnd | de->rx_buf_sz);
  1057. else
  1058. de->rx_ring[i].opts2 = cpu_to_le32(de->rx_buf_sz);
  1059. de->rx_ring[i].addr1 = cpu_to_le32(de->rx_skb[i].mapping);
  1060. de->rx_ring[i].addr2 = 0;
  1061. }
  1062. return 0;
  1063. err_out:
  1064. de_clean_rings(de);
  1065. return -ENOMEM;
  1066. }
  1067. static int de_init_rings (struct de_private *de)
  1068. {
  1069. memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
  1070. de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
  1071. de->rx_tail = 0;
  1072. de->tx_head = de->tx_tail = 0;
  1073. return de_refill_rx (de);
  1074. }
  1075. static int de_alloc_rings (struct de_private *de)
  1076. {
  1077. de->rx_ring = pci_alloc_consistent(de->pdev, DE_RING_BYTES, &de->ring_dma);
  1078. if (!de->rx_ring)
  1079. return -ENOMEM;
  1080. de->tx_ring = &de->rx_ring[DE_RX_RING_SIZE];
  1081. return de_init_rings(de);
  1082. }
  1083. static void de_clean_rings (struct de_private *de)
  1084. {
  1085. unsigned i;
  1086. memset(de->rx_ring, 0, sizeof(struct de_desc) * DE_RX_RING_SIZE);
  1087. de->rx_ring[DE_RX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
  1088. wmb();
  1089. memset(de->tx_ring, 0, sizeof(struct de_desc) * DE_TX_RING_SIZE);
  1090. de->tx_ring[DE_TX_RING_SIZE - 1].opts2 = cpu_to_le32(RingEnd);
  1091. wmb();
  1092. for (i = 0; i < DE_RX_RING_SIZE; i++) {
  1093. if (de->rx_skb[i].skb) {
  1094. pci_unmap_single(de->pdev, de->rx_skb[i].mapping,
  1095. de->rx_buf_sz, PCI_DMA_FROMDEVICE);
  1096. dev_kfree_skb(de->rx_skb[i].skb);
  1097. }
  1098. }
  1099. for (i = 0; i < DE_TX_RING_SIZE; i++) {
  1100. struct sk_buff *skb = de->tx_skb[i].skb;
  1101. if ((skb) && (skb != DE_DUMMY_SKB)) {
  1102. if (skb != DE_SETUP_SKB) {
  1103. de->net_stats.tx_dropped++;
  1104. pci_unmap_single(de->pdev,
  1105. de->tx_skb[i].mapping,
  1106. skb->len, PCI_DMA_TODEVICE);
  1107. dev_kfree_skb(skb);
  1108. } else {
  1109. pci_unmap_single(de->pdev,
  1110. de->tx_skb[i].mapping,
  1111. sizeof(de->setup_frame),
  1112. PCI_DMA_TODEVICE);
  1113. }
  1114. }
  1115. }
  1116. memset(&de->rx_skb, 0, sizeof(struct ring_info) * DE_RX_RING_SIZE);
  1117. memset(&de->tx_skb, 0, sizeof(struct ring_info) * DE_TX_RING_SIZE);
  1118. }
  1119. static void de_free_rings (struct de_private *de)
  1120. {
  1121. de_clean_rings(de);
  1122. pci_free_consistent(de->pdev, DE_RING_BYTES, de->rx_ring, de->ring_dma);
  1123. de->rx_ring = NULL;
  1124. de->tx_ring = NULL;
  1125. }
  1126. static int de_open (struct net_device *dev)
  1127. {
  1128. struct de_private *de = netdev_priv(dev);
  1129. const int irq = de->pdev->irq;
  1130. int rc;
  1131. netif_dbg(de, ifup, dev, "enabling interface\n");
  1132. de->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
  1133. rc = de_alloc_rings(de);
  1134. if (rc) {
  1135. netdev_err(dev, "ring allocation failure, err=%d\n", rc);
  1136. return rc;
  1137. }
  1138. dw32(IntrMask, 0);
  1139. rc = request_irq(irq, de_interrupt, IRQF_SHARED, dev->name, dev);
  1140. if (rc) {
  1141. netdev_err(dev, "IRQ %d request failure, err=%d\n", irq, rc);
  1142. goto err_out_free;
  1143. }
  1144. rc = de_init_hw(de);
  1145. if (rc) {
  1146. netdev_err(dev, "h/w init failure, err=%d\n", rc);
  1147. goto err_out_free_irq;
  1148. }
  1149. netif_start_queue(dev);
  1150. mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
  1151. return 0;
  1152. err_out_free_irq:
  1153. free_irq(irq, dev);
  1154. err_out_free:
  1155. de_free_rings(de);
  1156. return rc;
  1157. }
  1158. static int de_close (struct net_device *dev)
  1159. {
  1160. struct de_private *de = netdev_priv(dev);
  1161. unsigned long flags;
  1162. netif_dbg(de, ifdown, dev, "disabling interface\n");
  1163. del_timer_sync(&de->media_timer);
  1164. spin_lock_irqsave(&de->lock, flags);
  1165. de_stop_hw(de);
  1166. netif_stop_queue(dev);
  1167. netif_carrier_off(dev);
  1168. spin_unlock_irqrestore(&de->lock, flags);
  1169. free_irq(de->pdev->irq, dev);
  1170. de_free_rings(de);
  1171. de_adapter_sleep(de);
  1172. return 0;
  1173. }
  1174. static void de_tx_timeout (struct net_device *dev)
  1175. {
  1176. struct de_private *de = netdev_priv(dev);
  1177. const int irq = de->pdev->irq;
  1178. netdev_dbg(dev, "NIC status %08x mode %08x sia %08x desc %u/%u/%u\n",
  1179. dr32(MacStatus), dr32(MacMode), dr32(SIAStatus),
  1180. de->rx_tail, de->tx_head, de->tx_tail);
  1181. del_timer_sync(&de->media_timer);
  1182. disable_irq(irq);
  1183. spin_lock_irq(&de->lock);
  1184. de_stop_hw(de);
  1185. netif_stop_queue(dev);
  1186. netif_carrier_off(dev);
  1187. spin_unlock_irq(&de->lock);
  1188. enable_irq(irq);
  1189. /* Update the error counts. */
  1190. __de_get_stats(de);
  1191. synchronize_irq(irq);
  1192. de_clean_rings(de);
  1193. de_init_rings(de);
  1194. de_init_hw(de);
  1195. netif_wake_queue(dev);
  1196. }
  1197. static void __de_get_regs(struct de_private *de, u8 *buf)
  1198. {
  1199. int i;
  1200. u32 *rbuf = (u32 *)buf;
  1201. /* read all CSRs */
  1202. for (i = 0; i < DE_NUM_REGS; i++)
  1203. rbuf[i] = dr32(i * 8);
  1204. /* handle self-clearing RxMissed counter, CSR8 */
  1205. de_rx_missed(de, rbuf[8]);
  1206. }
  1207. static int __de_get_settings(struct de_private *de, struct ethtool_cmd *ecmd)
  1208. {
  1209. ecmd->supported = de->media_supported;
  1210. ecmd->transceiver = XCVR_INTERNAL;
  1211. ecmd->phy_address = 0;
  1212. ecmd->advertising = de->media_advertise;
  1213. switch (de->media_type) {
  1214. case DE_MEDIA_AUI:
  1215. ecmd->port = PORT_AUI;
  1216. break;
  1217. case DE_MEDIA_BNC:
  1218. ecmd->port = PORT_BNC;
  1219. break;
  1220. default:
  1221. ecmd->port = PORT_TP;
  1222. break;
  1223. }
  1224. ethtool_cmd_speed_set(ecmd, 10);
  1225. if (dr32(MacMode) & FullDuplex)
  1226. ecmd->duplex = DUPLEX_FULL;
  1227. else
  1228. ecmd->duplex = DUPLEX_HALF;
  1229. if (de->media_lock)
  1230. ecmd->autoneg = AUTONEG_DISABLE;
  1231. else
  1232. ecmd->autoneg = AUTONEG_ENABLE;
  1233. /* ignore maxtxpkt, maxrxpkt for now */
  1234. return 0;
  1235. }
  1236. static int __de_set_settings(struct de_private *de, struct ethtool_cmd *ecmd)
  1237. {
  1238. u32 new_media;
  1239. unsigned int media_lock;
  1240. if (ethtool_cmd_speed(ecmd) != 10)
  1241. return -EINVAL;
  1242. if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL)
  1243. return -EINVAL;
  1244. if (ecmd->port != PORT_TP && ecmd->port != PORT_AUI && ecmd->port != PORT_BNC)
  1245. return -EINVAL;
  1246. if (de->de21040 && ecmd->port == PORT_BNC)
  1247. return -EINVAL;
  1248. if (ecmd->transceiver != XCVR_INTERNAL)
  1249. return -EINVAL;
  1250. if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE)
  1251. return -EINVAL;
  1252. if (ecmd->advertising & ~de->media_supported)
  1253. return -EINVAL;
  1254. if (ecmd->autoneg == AUTONEG_ENABLE &&
  1255. (!(ecmd->advertising & ADVERTISED_Autoneg)))
  1256. return -EINVAL;
  1257. switch (ecmd->port) {
  1258. case PORT_AUI:
  1259. new_media = DE_MEDIA_AUI;
  1260. if (!(ecmd->advertising & ADVERTISED_AUI))
  1261. return -EINVAL;
  1262. break;
  1263. case PORT_BNC:
  1264. new_media = DE_MEDIA_BNC;
  1265. if (!(ecmd->advertising & ADVERTISED_BNC))
  1266. return -EINVAL;
  1267. break;
  1268. default:
  1269. if (ecmd->autoneg == AUTONEG_ENABLE)
  1270. new_media = DE_MEDIA_TP_AUTO;
  1271. else if (ecmd->duplex == DUPLEX_FULL)
  1272. new_media = DE_MEDIA_TP_FD;
  1273. else
  1274. new_media = DE_MEDIA_TP;
  1275. if (!(ecmd->advertising & ADVERTISED_TP))
  1276. return -EINVAL;
  1277. if (!(ecmd->advertising & (ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half)))
  1278. return -EINVAL;
  1279. break;
  1280. }
  1281. media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
  1282. if ((new_media == de->media_type) &&
  1283. (media_lock == de->media_lock) &&
  1284. (ecmd->advertising == de->media_advertise))
  1285. return 0; /* nothing to change */
  1286. de_link_down(de);
  1287. mod_timer(&de->media_timer, jiffies + DE_TIMER_NO_LINK);
  1288. de_stop_rxtx(de);
  1289. de->media_type = new_media;
  1290. de->media_lock = media_lock;
  1291. de->media_advertise = ecmd->advertising;
  1292. de_set_media(de);
  1293. if (netif_running(de->dev))
  1294. de_start_rxtx(de);
  1295. return 0;
  1296. }
  1297. static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
  1298. {
  1299. struct de_private *de = netdev_priv(dev);
  1300. strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
  1301. strlcpy(info->version, DRV_VERSION, sizeof(info->version));
  1302. strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
  1303. }
  1304. static int de_get_regs_len(struct net_device *dev)
  1305. {
  1306. return DE_REGS_SIZE;
  1307. }
  1308. static int de_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  1309. {
  1310. struct de_private *de = netdev_priv(dev);
  1311. int rc;
  1312. spin_lock_irq(&de->lock);
  1313. rc = __de_get_settings(de, ecmd);
  1314. spin_unlock_irq(&de->lock);
  1315. return rc;
  1316. }
  1317. static int de_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
  1318. {
  1319. struct de_private *de = netdev_priv(dev);
  1320. int rc;
  1321. spin_lock_irq(&de->lock);
  1322. rc = __de_set_settings(de, ecmd);
  1323. spin_unlock_irq(&de->lock);
  1324. return rc;
  1325. }
  1326. static u32 de_get_msglevel(struct net_device *dev)
  1327. {
  1328. struct de_private *de = netdev_priv(dev);
  1329. return de->msg_enable;
  1330. }
  1331. static void de_set_msglevel(struct net_device *dev, u32 msglvl)
  1332. {
  1333. struct de_private *de = netdev_priv(dev);
  1334. de->msg_enable = msglvl;
  1335. }
  1336. static int de_get_eeprom(struct net_device *dev,
  1337. struct ethtool_eeprom *eeprom, u8 *data)
  1338. {
  1339. struct de_private *de = netdev_priv(dev);
  1340. if (!de->ee_data)
  1341. return -EOPNOTSUPP;
  1342. if ((eeprom->offset != 0) || (eeprom->magic != 0) ||
  1343. (eeprom->len != DE_EEPROM_SIZE))
  1344. return -EINVAL;
  1345. memcpy(data, de->ee_data, eeprom->len);
  1346. return 0;
  1347. }
  1348. static int de_nway_reset(struct net_device *dev)
  1349. {
  1350. struct de_private *de = netdev_priv(dev);
  1351. u32 status;
  1352. if (de->media_type != DE_MEDIA_TP_AUTO)
  1353. return -EINVAL;
  1354. if (netif_carrier_ok(de->dev))
  1355. de_link_down(de);
  1356. status = dr32(SIAStatus);
  1357. dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
  1358. netif_info(de, link, dev, "link nway restart, status %x,%x\n",
  1359. status, dr32(SIAStatus));
  1360. return 0;
  1361. }
  1362. static void de_get_regs(struct net_device *dev, struct ethtool_regs *regs,
  1363. void *data)
  1364. {
  1365. struct de_private *de = netdev_priv(dev);
  1366. regs->version = (DE_REGS_VER << 2) | de->de21040;
  1367. spin_lock_irq(&de->lock);
  1368. __de_get_regs(de, data);
  1369. spin_unlock_irq(&de->lock);
  1370. }
  1371. static const struct ethtool_ops de_ethtool_ops = {
  1372. .get_link = ethtool_op_get_link,
  1373. .get_drvinfo = de_get_drvinfo,
  1374. .get_regs_len = de_get_regs_len,
  1375. .get_settings = de_get_settings,
  1376. .set_settings = de_set_settings,
  1377. .get_msglevel = de_get_msglevel,
  1378. .set_msglevel = de_set_msglevel,
  1379. .get_eeprom = de_get_eeprom,
  1380. .nway_reset = de_nway_reset,
  1381. .get_regs = de_get_regs,
  1382. };
  1383. static void de21040_get_mac_address(struct de_private *de)
  1384. {
  1385. unsigned i;
  1386. dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
  1387. udelay(5);
  1388. for (i = 0; i < 6; i++) {
  1389. int value, boguscnt = 100000;
  1390. do {
  1391. value = dr32(ROMCmd);
  1392. rmb();
  1393. } while (value < 0 && --boguscnt > 0);
  1394. de->dev->dev_addr[i] = value;
  1395. udelay(1);
  1396. if (boguscnt <= 0)
  1397. pr_warn("timeout reading 21040 MAC address byte %u\n",
  1398. i);
  1399. }
  1400. }
  1401. static void de21040_get_media_info(struct de_private *de)
  1402. {
  1403. unsigned int i;
  1404. de->media_type = DE_MEDIA_TP;
  1405. de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full |
  1406. SUPPORTED_10baseT_Half | SUPPORTED_AUI;
  1407. de->media_advertise = de->media_supported;
  1408. for (i = 0; i < DE_MAX_MEDIA; i++) {
  1409. switch (i) {
  1410. case DE_MEDIA_AUI:
  1411. case DE_MEDIA_TP:
  1412. case DE_MEDIA_TP_FD:
  1413. de->media[i].type = i;
  1414. de->media[i].csr13 = t21040_csr13[i];
  1415. de->media[i].csr14 = t21040_csr14[i];
  1416. de->media[i].csr15 = t21040_csr15[i];
  1417. break;
  1418. default:
  1419. de->media[i].type = DE_MEDIA_INVALID;
  1420. break;
  1421. }
  1422. }
  1423. }
  1424. /* Note: this routine returns extra data bits for size detection. */
  1425. static unsigned tulip_read_eeprom(void __iomem *regs, int location,
  1426. int addr_len)
  1427. {
  1428. int i;
  1429. unsigned retval = 0;
  1430. void __iomem *ee_addr = regs + ROMCmd;
  1431. int read_cmd = location | (EE_READ_CMD << addr_len);
  1432. writel(EE_ENB & ~EE_CS, ee_addr);
  1433. writel(EE_ENB, ee_addr);
  1434. /* Shift the read command bits out. */
  1435. for (i = 4 + addr_len; i >= 0; i--) {
  1436. short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
  1437. writel(EE_ENB | dataval, ee_addr);
  1438. readl(ee_addr);
  1439. writel(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
  1440. readl(ee_addr);
  1441. retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
  1442. }
  1443. writel(EE_ENB, ee_addr);
  1444. readl(ee_addr);
  1445. for (i = 16; i > 0; i--) {
  1446. writel(EE_ENB | EE_SHIFT_CLK, ee_addr);
  1447. readl(ee_addr);
  1448. retval = (retval << 1) | ((readl(ee_addr) & EE_DATA_READ) ? 1 : 0);
  1449. writel(EE_ENB, ee_addr);
  1450. readl(ee_addr);
  1451. }
  1452. /* Terminate the EEPROM access. */
  1453. writel(EE_ENB & ~EE_CS, ee_addr);
  1454. return retval;
  1455. }
  1456. static void de21041_get_srom_info(struct de_private *de)
  1457. {
  1458. unsigned i, sa_offset = 0, ofs;
  1459. u8 ee_data[DE_EEPROM_SIZE + 6] = {};
  1460. unsigned ee_addr_size = tulip_read_eeprom(de->regs, 0xff, 8) & 0x40000 ? 8 : 6;
  1461. struct de_srom_info_leaf *il;
  1462. void *bufp;
  1463. /* download entire eeprom */
  1464. for (i = 0; i < DE_EEPROM_WORDS; i++)
  1465. ((__le16 *)ee_data)[i] =
  1466. cpu_to_le16(tulip_read_eeprom(de->regs, i, ee_addr_size));
  1467. /* DEC now has a specification but early board makers
  1468. just put the address in the first EEPROM locations. */
  1469. /* This does memcmp(eedata, eedata+16, 8) */
  1470. #ifndef CONFIG_MIPS_COBALT
  1471. for (i = 0; i < 8; i ++)
  1472. if (ee_data[i] != ee_data[16+i])
  1473. sa_offset = 20;
  1474. #endif
  1475. /* store MAC address */
  1476. for (i = 0; i < 6; i ++)
  1477. de->dev->dev_addr[i] = ee_data[i + sa_offset];
  1478. /* get offset of controller 0 info leaf. ignore 2nd byte. */
  1479. ofs = ee_data[SROMC0InfoLeaf];
  1480. if (ofs >= (sizeof(ee_data) - sizeof(struct de_srom_info_leaf) - sizeof(struct de_srom_media_block)))
  1481. goto bad_srom;
  1482. /* get pointer to info leaf */
  1483. il = (struct de_srom_info_leaf *) &ee_data[ofs];
  1484. /* paranoia checks */
  1485. if (il->n_blocks == 0)
  1486. goto bad_srom;
  1487. if ((sizeof(ee_data) - ofs) <
  1488. (sizeof(struct de_srom_info_leaf) + (sizeof(struct de_srom_media_block) * il->n_blocks)))
  1489. goto bad_srom;
  1490. /* get default media type */
  1491. switch (get_unaligned(&il->default_media)) {
  1492. case 0x0001: de->media_type = DE_MEDIA_BNC; break;
  1493. case 0x0002: de->media_type = DE_MEDIA_AUI; break;
  1494. case 0x0204: de->media_type = DE_MEDIA_TP_FD; break;
  1495. default: de->media_type = DE_MEDIA_TP_AUTO; break;
  1496. }
  1497. if (netif_msg_probe(de))
  1498. pr_info("de%d: SROM leaf offset %u, default media %s\n",
  1499. de->board_idx, ofs, media_name[de->media_type]);
  1500. /* init SIA register values to defaults */
  1501. for (i = 0; i < DE_MAX_MEDIA; i++) {
  1502. de->media[i].type = DE_MEDIA_INVALID;
  1503. de->media[i].csr13 = 0xffff;
  1504. de->media[i].csr14 = 0xffff;
  1505. de->media[i].csr15 = 0xffff;
  1506. }
  1507. /* parse media blocks to see what medias are supported,
  1508. * and if any custom CSR values are provided
  1509. */
  1510. bufp = ((void *)il) + sizeof(*il);
  1511. for (i = 0; i < il->n_blocks; i++) {
  1512. struct de_srom_media_block *ib = bufp;
  1513. unsigned idx;
  1514. /* index based on media type in media block */
  1515. switch(ib->opts & MediaBlockMask) {
  1516. case 0: /* 10baseT */
  1517. de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Half
  1518. | SUPPORTED_Autoneg;
  1519. idx = DE_MEDIA_TP;
  1520. de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
  1521. break;
  1522. case 1: /* BNC */
  1523. de->media_supported |= SUPPORTED_BNC;
  1524. idx = DE_MEDIA_BNC;
  1525. break;
  1526. case 2: /* AUI */
  1527. de->media_supported |= SUPPORTED_AUI;
  1528. idx = DE_MEDIA_AUI;
  1529. break;
  1530. case 4: /* 10baseT-FD */
  1531. de->media_supported |= SUPPORTED_TP | SUPPORTED_10baseT_Full
  1532. | SUPPORTED_Autoneg;
  1533. idx = DE_MEDIA_TP_FD;
  1534. de->media[DE_MEDIA_TP_AUTO].type = DE_MEDIA_TP_AUTO;
  1535. break;
  1536. default:
  1537. goto bad_srom;
  1538. }
  1539. de->media[idx].type = idx;
  1540. if (netif_msg_probe(de))
  1541. pr_info("de%d: media block #%u: %s",
  1542. de->board_idx, i,
  1543. media_name[de->media[idx].type]);
  1544. bufp += sizeof (ib->opts);
  1545. if (ib->opts & MediaCustomCSRs) {
  1546. de->media[idx].csr13 = get_unaligned(&ib->csr13);
  1547. de->media[idx].csr14 = get_unaligned(&ib->csr14);
  1548. de->media[idx].csr15 = get_unaligned(&ib->csr15);
  1549. bufp += sizeof(ib->csr13) + sizeof(ib->csr14) +
  1550. sizeof(ib->csr15);
  1551. if (netif_msg_probe(de))
  1552. pr_cont(" (%x,%x,%x)\n",
  1553. de->media[idx].csr13,
  1554. de->media[idx].csr14,
  1555. de->media[idx].csr15);
  1556. } else {
  1557. if (netif_msg_probe(de))
  1558. pr_cont("\n");
  1559. }
  1560. if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
  1561. break;
  1562. }
  1563. de->media_advertise = de->media_supported;
  1564. fill_defaults:
  1565. /* fill in defaults, for cases where custom CSRs not used */
  1566. for (i = 0; i < DE_MAX_MEDIA; i++) {
  1567. if (de->media[i].csr13 == 0xffff)
  1568. de->media[i].csr13 = t21041_csr13[i];
  1569. if (de->media[i].csr14 == 0xffff) {
  1570. /* autonegotiation is broken at least on some chip
  1571. revisions - rev. 0x21 works, 0x11 does not */
  1572. if (de->pdev->revision < 0x20)
  1573. de->media[i].csr14 = t21041_csr14_brk[i];
  1574. else
  1575. de->media[i].csr14 = t21041_csr14[i];
  1576. }
  1577. if (de->media[i].csr15 == 0xffff)
  1578. de->media[i].csr15 = t21041_csr15[i];
  1579. }
  1580. de->ee_data = kmemdup(&ee_data[0], DE_EEPROM_SIZE, GFP_KERNEL);
  1581. return;
  1582. bad_srom:
  1583. /* for error cases, it's ok to assume we support all these */
  1584. for (i = 0; i < DE_MAX_MEDIA; i++)
  1585. de->media[i].type = i;
  1586. de->media_supported =
  1587. SUPPORTED_10baseT_Half |
  1588. SUPPORTED_10baseT_Full |
  1589. SUPPORTED_Autoneg |
  1590. SUPPORTED_TP |
  1591. SUPPORTED_AUI |
  1592. SUPPORTED_BNC;
  1593. goto fill_defaults;
  1594. }
  1595. static const struct net_device_ops de_netdev_ops = {
  1596. .ndo_open = de_open,
  1597. .ndo_stop = de_close,
  1598. .ndo_set_rx_mode = de_set_rx_mode,
  1599. .ndo_start_xmit = de_start_xmit,
  1600. .ndo_get_stats = de_get_stats,
  1601. .ndo_tx_timeout = de_tx_timeout,
  1602. .ndo_change_mtu = eth_change_mtu,
  1603. .ndo_set_mac_address = eth_mac_addr,
  1604. .ndo_validate_addr = eth_validate_addr,
  1605. };
  1606. static int de_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  1607. {
  1608. struct net_device *dev;
  1609. struct de_private *de;
  1610. int rc;
  1611. void __iomem *regs;
  1612. unsigned long pciaddr;
  1613. static int board_idx = -1;
  1614. board_idx++;
  1615. #ifndef MODULE
  1616. if (board_idx == 0)
  1617. pr_info("%s\n", version);
  1618. #endif
  1619. /* allocate a new ethernet device structure, and fill in defaults */
  1620. dev = alloc_etherdev(sizeof(struct de_private));
  1621. if (!dev)
  1622. return -ENOMEM;
  1623. dev->netdev_ops = &de_netdev_ops;
  1624. SET_NETDEV_DEV(dev, &pdev->dev);
  1625. dev->ethtool_ops = &de_ethtool_ops;
  1626. dev->watchdog_timeo = TX_TIMEOUT;
  1627. de = netdev_priv(dev);
  1628. de->de21040 = ent->driver_data == 0 ? 1 : 0;
  1629. de->pdev = pdev;
  1630. de->dev = dev;
  1631. de->msg_enable = (debug < 0 ? DE_DEF_MSG_ENABLE : debug);
  1632. de->board_idx =