PageRenderTime 69ms CodeModel.GetById 30ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/net/pci-skeleton.c

https://gitlab.com/TeamCarbonXtreme/android_kernel_samsung_bcm21553-common
C | 1923 lines | 1307 code | 369 blank | 247 comment | 149 complexity | 1245d09dd399d3b3da79ba96ab4be696 MD5 | raw file
Possible License(s): GPL-2.0
  1. /*
  2. drivers/net/pci-skeleton.c
  3. Maintained by Jeff Garzik <jgarzik@pobox.com>
  4. Original code came from 8139too.c, which in turns was based
  5. originally on Donald Becker's rtl8139.c driver, versions 1.11
  6. and older. This driver was originally based on rtl8139.c
  7. version 1.07. Header of rtl8139.c version 1.11:
  8. -----<snip>-----
  9. Written 1997-2000 by Donald Becker.
  10. This software may be used and distributed according to the
  11. terms of the GNU General Public License (GPL), incorporated
  12. herein by reference. Drivers based on or derived from this
  13. code fall under the GPL and must retain the authorship,
  14. copyright and license notice. This file is not a complete
  15. program and may only be used when the entire operating
  16. system is licensed under the GPL.
  17. This driver is for boards based on the RTL8129 and RTL8139
  18. PCI ethernet chips.
  19. The author may be reached as becker@scyld.com, or C/O Scyld
  20. Computing Corporation 410 Severn Ave., Suite 210 Annapolis
  21. MD 21403
  22. Support and updates available at
  23. http://www.scyld.com/network/rtl8139.html
  24. Twister-tuning table provided by Kinston
  25. <shangh@realtek.com.tw>.
  26. -----<snip>-----
  27. This software may be used and distributed according to the terms
  28. of the GNU General Public License, incorporated herein by reference.
  29. -----------------------------------------------------------------------------
  30. Theory of Operation
  31. I. Board Compatibility
  32. This device driver is designed for the RealTek RTL8139 series, the RealTek
  33. Fast Ethernet controllers for PCI and CardBus. This chip is used on many
  34. low-end boards, sometimes with its markings changed.
  35. II. Board-specific settings
  36. PCI bus devices are configured by the system at boot time, so no jumpers
  37. need to be set on the board. The system BIOS will assign the
  38. PCI INTA signal to a (preferably otherwise unused) system IRQ line.
  39. III. Driver operation
  40. IIIa. Rx Ring buffers
  41. The receive unit uses a single linear ring buffer rather than the more
  42. common (and more efficient) descriptor-based architecture. Incoming frames
  43. are sequentially stored into the Rx region, and the host copies them into
  44. skbuffs.
  45. Comment: While it is theoretically possible to process many frames in place,
  46. any delay in Rx processing would cause us to drop frames. More importantly,
  47. the Linux protocol stack is not designed to operate in this manner.
  48. IIIb. Tx operation
  49. The RTL8139 uses a fixed set of four Tx descriptors in register space.
  50. In a stunningly bad design choice, Tx frames must be 32 bit aligned. Linux
  51. aligns the IP header on word boundaries, and 14 byte ethernet header means
  52. that almost all frames will need to be copied to an alignment buffer.
  53. IVb. References
  54. http://www.realtek.com.tw/cn/cn.html
  55. http://www.scyld.com/expert/NWay.html
  56. IVc. Errata
  57. */
  58. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  59. #include <linux/module.h>
  60. #include <linux/kernel.h>
  61. #include <linux/pci.h>
  62. #include <linux/init.h>
  63. #include <linux/ioport.h>
  64. #include <linux/netdevice.h>
  65. #include <linux/etherdevice.h>
  66. #include <linux/delay.h>
  67. #include <linux/ethtool.h>
  68. #include <linux/mii.h>
  69. #include <linux/crc32.h>
  70. #include <linux/io.h>
  71. #define NETDRV_VERSION "1.0.1"
  72. #define MODNAME "netdrv"
  73. #define NETDRV_DRIVER_LOAD_MSG "MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded"
  74. static char version[] __devinitdata =
  75. KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
  76. " Support available from http://foo.com/bar/baz.html\n";
  77. /* define to 1 to enable PIO instead of MMIO */
  78. #undef USE_IO_OPS
  79. /* define to 1 to enable copious debugging info */
  80. #undef NETDRV_DEBUG
  81. /* define to 1 to disable lightweight runtime debugging checks */
  82. #undef NETDRV_NDEBUG
  83. #ifdef NETDRV_DEBUG
  84. /* note: prints function name for you */
  85. #define DPRINTK(fmt, args...) \
  86. printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
  87. #else
  88. #define DPRINTK(fmt, args...) \
  89. do { \
  90. if (0) \
  91. printk(KERN_DEBUG fmt, ##args); \
  92. } while (0)
  93. #endif
  94. #ifdef NETDRV_NDEBUG
  95. #define assert(expr) do {} while (0)
  96. #else
  97. #define assert(expr) \
  98. if (!(expr)) { \
  99. printk("Assertion failed! %s,%s,%s,line=%d\n", \
  100. #expr, __FILE__, __func__, __LINE__); \
  101. }
  102. #endif
  103. /* A few user-configurable values. */
  104. /* media options */
  105. static int media[] = {-1, -1, -1, -1, -1, -1, -1, -1};
  106. /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
  107. static int max_interrupt_work = 20;
  108. /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
  109. The RTL chips use a 64 element hash table based on the Ethernet CRC. */
  110. static int multicast_filter_limit = 32;
  111. /* Size of the in-memory receive ring. */
  112. #define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
  113. #define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
  114. #define RX_BUF_PAD 16
  115. #define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
  116. #define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
  117. /* Number of Tx descriptor registers. */
  118. #define NUM_TX_DESC 4
  119. /* max supported ethernet frame size -- must be at least (dev->mtu+14+4).*/
  120. #define MAX_ETH_FRAME_SIZE 1536
  121. /* Size of the Tx bounce buffers -- must be at least (dev->mtu+14+4). */
  122. #define TX_BUF_SIZE MAX_ETH_FRAME_SIZE
  123. #define TX_BUF_TOT_LEN (TX_BUF_SIZE * NUM_TX_DESC)
  124. /* PCI Tuning Parameters
  125. Threshold is bytes transferred to chip before transmission starts. */
  126. #define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
  127. /* The following settings are log_2(bytes)-4:
  128. 0==16 bytes 1==32 2==64 3==128 4==256 5==512 6==1024 7==end of packet.
  129. */
  130. #define RX_FIFO_THRESH 6 /* Rx buffer level before first PCI xfer. */
  131. #define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
  132. #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
  133. /* Operational parameters that usually are not changed. */
  134. /* Time in jiffies before concluding the transmitter is hung. */
  135. #define TX_TIMEOUT (6 * HZ)
  136. enum {
  137. HAS_CHIP_XCVR = 0x020000,
  138. HAS_LNK_CHNG = 0x040000,
  139. };
  140. #define NETDRV_MIN_IO_SIZE 0x80
  141. #define RTL8139B_IO_SIZE 256
  142. #define NETDRV_CAPS (HAS_CHIP_XCVR | HAS_LNK_CHNG)
  143. typedef enum {
  144. RTL8139 = 0,
  145. NETDRV_CB,
  146. SMC1211TX,
  147. /*MPX5030,*/
  148. DELTA8139,
  149. ADDTRON8139,
  150. } board_t;
  151. /* indexed by board_t, above */
  152. static struct {
  153. const char *name;
  154. } board_info[] __devinitdata = {
  155. { "RealTek RTL8139 Fast Ethernet" },
  156. { "RealTek RTL8139B PCI/CardBus" },
  157. { "SMC1211TX EZCard 10/100 (RealTek RTL8139)" },
  158. /* { MPX5030, "Accton MPX5030 (RealTek RTL8139)" },*/
  159. { "Delta Electronics 8139 10/100BaseTX" },
  160. { "Addtron Technolgy 8139 10/100BaseTX" },
  161. };
  162. static DEFINE_PCI_DEVICE_TABLE(netdrv_pci_tbl) = {
  163. {0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
  164. {0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
  165. {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
  166. /* {0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MPX5030 },*/
  167. {0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DELTA8139 },
  168. {0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ADDTRON8139 },
  169. {0,}
  170. };
  171. MODULE_DEVICE_TABLE(pci, netdrv_pci_tbl);
  172. /* The rest of these values should never change. */
  173. /* Symbolic offsets to registers. */
  174. enum NETDRV_registers {
  175. MAC0 = 0, /* Ethernet hardware address. */
  176. MAR0 = 8, /* Multicast filter. */
  177. TxStatus0 = 0x10, /* Transmit status (Four 32bit registers). */
  178. TxAddr0 = 0x20, /* Tx descriptors (also four 32bit). */
  179. RxBuf = 0x30,
  180. RxEarlyCnt = 0x34,
  181. RxEarlyStatus = 0x36,
  182. ChipCmd = 0x37,
  183. RxBufPtr = 0x38,
  184. RxBufAddr = 0x3A,
  185. IntrMask = 0x3C,
  186. IntrStatus = 0x3E,
  187. TxConfig = 0x40,
  188. ChipVersion = 0x43,
  189. RxConfig = 0x44,
  190. Timer = 0x48, /* A general-purpose counter. */
  191. RxMissed = 0x4C, /* 24 bits valid, write clears. */
  192. Cfg9346 = 0x50,
  193. Config0 = 0x51,
  194. Config1 = 0x52,
  195. FlashReg = 0x54,
  196. MediaStatus = 0x58,
  197. Config3 = 0x59,
  198. Config4 = 0x5A, /* absent on RTL-8139A */
  199. HltClk = 0x5B,
  200. MultiIntr = 0x5C,
  201. TxSummary = 0x60,
  202. BasicModeCtrl = 0x62,
  203. BasicModeStatus = 0x64,
  204. NWayAdvert = 0x66,
  205. NWayLPAR = 0x68,
  206. NWayExpansion = 0x6A,
  207. /* Undocumented registers, but required for proper operation. */
  208. FIFOTMS = 0x70, /* FIFO Control and test. */
  209. CSCR = 0x74, /* Chip Status and Configuration Register. */
  210. PARA78 = 0x78,
  211. PARA7c = 0x7c, /* Magic transceiver parameter register. */
  212. Config5 = 0xD8, /* absent on RTL-8139A */
  213. };
  214. enum ClearBitMasks {
  215. MultiIntrClear = 0xF000,
  216. ChipCmdClear = 0xE2,
  217. Config1Clear = (1 << 7) | (1 << 6) | (1 << 3) | (1 << 2) | (1 << 1),
  218. };
  219. enum ChipCmdBits {
  220. CmdReset = 0x10,
  221. CmdRxEnb = 0x08,
  222. CmdTxEnb = 0x04,
  223. RxBufEmpty = 0x01,
  224. };
  225. /* Interrupt register bits, using my own meaningful names. */
  226. enum IntrStatusBits {
  227. PCIErr = 0x8000,
  228. PCSTimeout = 0x4000,
  229. RxFIFOOver = 0x40,
  230. RxUnderrun = 0x20,
  231. RxOverflow = 0x10,
  232. TxErr = 0x08,
  233. TxOK = 0x04,
  234. RxErr = 0x02,
  235. RxOK = 0x01,
  236. };
  237. enum TxStatusBits {
  238. TxHostOwns = 0x2000,
  239. TxUnderrun = 0x4000,
  240. TxStatOK = 0x8000,
  241. TxOutOfWindow = 0x20000000,
  242. TxAborted = 0x40000000,
  243. TxCarrierLost = 0x80000000,
  244. };
  245. enum RxStatusBits {
  246. RxMulticast = 0x8000,
  247. RxPhysical = 0x4000,
  248. RxBroadcast = 0x2000,
  249. RxBadSymbol = 0x0020,
  250. RxRunt = 0x0010,
  251. RxTooLong = 0x0008,
  252. RxCRCErr = 0x0004,
  253. RxBadAlign = 0x0002,
  254. RxStatusOK = 0x0001,
  255. };
  256. /* Bits in RxConfig. */
  257. enum rx_mode_bits {
  258. AcceptErr = 0x20,
  259. AcceptRunt = 0x10,
  260. AcceptBroadcast = 0x08,
  261. AcceptMulticast = 0x04,
  262. AcceptMyPhys = 0x02,
  263. AcceptAllPhys = 0x01,
  264. };
  265. /* Bits in TxConfig. */
  266. enum tx_config_bits {
  267. TxIFG1 = (1 << 25), /* Interframe Gap Time */
  268. TxIFG0 = (1 << 24), /* Enabling these bits violates IEEE 802.3 */
  269. TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
  270. TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */
  271. TxClearAbt = (1 << 0), /* Clear abort (WO) */
  272. TxDMAShift = 8, /* DMA burst value(0-7) is shift this many bits */
  273. TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
  274. };
  275. /* Bits in Config1 */
  276. enum Config1Bits {
  277. Cfg1_PM_Enable = 0x01,
  278. Cfg1_VPD_Enable = 0x02,
  279. Cfg1_PIO = 0x04,
  280. Cfg1_MMIO = 0x08,
  281. Cfg1_LWAKE = 0x10,
  282. Cfg1_Driver_Load = 0x20,
  283. Cfg1_LED0 = 0x40,
  284. Cfg1_LED1 = 0x80,
  285. };
  286. enum RxConfigBits {
  287. /* Early Rx threshold, none or X/16 */
  288. RxCfgEarlyRxNone = 0,
  289. RxCfgEarlyRxShift = 24,
  290. /* rx fifo threshold */
  291. RxCfgFIFOShift = 13,
  292. RxCfgFIFONone = (7 << RxCfgFIFOShift),
  293. /* Max DMA burst */
  294. RxCfgDMAShift = 8,
  295. RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
  296. /* rx ring buffer length */
  297. RxCfgRcv8K = 0,
  298. RxCfgRcv16K = (1 << 11),
  299. RxCfgRcv32K = (1 << 12),
  300. RxCfgRcv64K = (1 << 11) | (1 << 12),
  301. /* Disable packet wrap at end of Rx buffer */
  302. RxNoWrap = (1 << 7),
  303. };
  304. /* Twister tuning parameters from RealTek.
  305. Completely undocumented, but required to tune bad links. */
  306. enum CSCRBits {
  307. CSCR_LinkOKBit = 0x0400,
  308. CSCR_LinkChangeBit = 0x0800,
  309. CSCR_LinkStatusBits = 0x0f000,
  310. CSCR_LinkDownOffCmd = 0x003c0,
  311. CSCR_LinkDownCmd = 0x0f3c0,
  312. };
  313. enum Cfg9346Bits {
  314. Cfg9346_Lock = 0x00,
  315. Cfg9346_Unlock = 0xC0,
  316. };
  317. #define PARA78_default 0x78fa8388
  318. #define PARA7c_default 0xcb38de43 /* param[0][3] */
  319. #define PARA7c_xxx 0xcb38de43
  320. static const unsigned long param[4][4] = {
  321. {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
  322. {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
  323. {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
  324. {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
  325. };
  326. struct ring_info {
  327. struct sk_buff *skb;
  328. dma_addr_t mapping;
  329. };
  330. typedef enum {
  331. CH_8139 = 0,
  332. CH_8139_K,
  333. CH_8139A,
  334. CH_8139B,
  335. CH_8130,
  336. CH_8139C,
  337. } chip_t;
  338. /* directly indexed by chip_t, above */
  339. static const struct {
  340. const char *name;
  341. u8 version; /* from RTL8139C docs */
  342. u32 RxConfigMask; /* should clear the bits supported by this chip */
  343. } rtl_chip_info[] = {
  344. { "RTL-8139",
  345. 0x40,
  346. 0xf0fe0040, /* XXX copied from RTL8139A, verify */
  347. },
  348. { "RTL-8139 rev K",
  349. 0x60,
  350. 0xf0fe0040,
  351. },
  352. { "RTL-8139A",
  353. 0x70,
  354. 0xf0fe0040,
  355. },
  356. { "RTL-8139B",
  357. 0x78,
  358. 0xf0fc0040
  359. },
  360. { "RTL-8130",
  361. 0x7C,
  362. 0xf0fe0040, /* XXX copied from RTL8139A, verify */
  363. },
  364. { "RTL-8139C",
  365. 0x74,
  366. 0xf0fc0040, /* XXX copied from RTL8139B, verify */
  367. },
  368. };
  369. struct netdrv_private {
  370. board_t board;
  371. void *mmio_addr;
  372. int drv_flags;
  373. struct pci_dev *pci_dev;
  374. struct timer_list timer; /* Media selection timer. */
  375. unsigned char *rx_ring;
  376. unsigned int cur_rx; /* Index into the Rx buffer of next Rx pkt. */
  377. unsigned int tx_flag;
  378. atomic_t cur_tx;
  379. atomic_t dirty_tx;
  380. /* The saved address of a sent-in-place packet/buffer, for skfree(). */
  381. struct ring_info tx_info[NUM_TX_DESC];
  382. unsigned char *tx_buf[NUM_TX_DESC]; /* Tx bounce buffers */
  383. unsigned char *tx_bufs; /* Tx bounce buffer region. */
  384. dma_addr_t rx_ring_dma;
  385. dma_addr_t tx_bufs_dma;
  386. char phys[4]; /* MII device addresses. */
  387. char twistie, twist_row, twist_col; /* Twister tune state. */
  388. unsigned int full_duplex:1; /* Full-duplex operation requested. */
  389. unsigned int duplex_lock:1;
  390. unsigned int default_port:4; /* Last dev->if_port value. */
  391. unsigned int media2:4; /* Secondary monitored media port. */
  392. unsigned int medialock:1; /* Don't sense media type. */
  393. unsigned int mediasense:1; /* Media sensing in progress. */
  394. spinlock_t lock;
  395. chip_t chipset;
  396. };
  397. MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
  398. MODULE_DESCRIPTION("Skeleton for a PCI Fast Ethernet driver");
  399. MODULE_LICENSE("GPL");
  400. module_param(multicast_filter_limit, int, 0);
  401. module_param(max_interrupt_work, int, 0);
  402. module_param_array(media, int, NULL, 0);
  403. MODULE_PARM_DESC(multicast_filter_limit,
  404. MODNAME " maximum number of filtered multicast addresses");
  405. MODULE_PARM_DESC(max_interrupt_work,
  406. MODNAME " maximum events handled per interrupt");
  407. MODULE_PARM_DESC(media,
  408. MODNAME " Bits 0-3: media type, bit 17: full duplex");
  409. static int read_eeprom(void *ioaddr, int location, int addr_len);
  410. static int netdrv_open(struct net_device *dev);
  411. static int mdio_read(struct net_device *dev, int phy_id, int location);
  412. static void mdio_write(struct net_device *dev, int phy_id, int location,
  413. int val);
  414. static void netdrv_timer(unsigned long data);
  415. static void netdrv_tx_timeout(struct net_device *dev);
  416. static void netdrv_init_ring(struct net_device *dev);
  417. static int netdrv_start_xmit(struct sk_buff *skb,
  418. struct net_device *dev);
  419. static irqreturn_t netdrv_interrupt(int irq, void *dev_instance);
  420. static int netdrv_close(struct net_device *dev);
  421. static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
  422. static void netdrv_set_rx_mode(struct net_device *dev);
  423. static void netdrv_hw_start(struct net_device *dev);
  424. #ifdef USE_IO_OPS
  425. #define NETDRV_R8(reg) inb(((unsigned long)ioaddr) + (reg))
  426. #define NETDRV_R16(reg) inw(((unsigned long)ioaddr) + (reg))
  427. #define NETDRV_R32(reg) ((unsigned long)inl(((unsigned long)ioaddr) + (reg)))
  428. #define NETDRV_W8(reg, val8) outb((val8), ((unsigned long)ioaddr) + (reg))
  429. #define NETDRV_W16(reg, val16) outw((val16), ((unsigned long)ioaddr) + (reg))
  430. #define NETDRV_W32(reg, val32) outl((val32), ((unsigned long)ioaddr) + (reg))
  431. #define NETDRV_W8_F NETDRV_W8
  432. #define NETDRV_W16_F NETDRV_W16
  433. #define NETDRV_W32_F NETDRV_W32
  434. #undef readb
  435. #undef readw
  436. #undef readl
  437. #undef writeb
  438. #undef writew
  439. #undef writel
  440. #define readb(addr) inb((unsigned long)(addr))
  441. #define readw(addr) inw((unsigned long)(addr))
  442. #define readl(addr) inl((unsigned long)(addr))
  443. #define writeb(val, addr) outb((val), (unsigned long)(addr))
  444. #define writew(val, addr) outw((val), (unsigned long)(addr))
  445. #define writel(val, addr) outl((val), (unsigned long)(addr))
  446. #else
  447. /* write MMIO register, with flush */
  448. /* Flush avoids rtl8139 bug w/ posted MMIO writes */
  449. #define NETDRV_W8_F(reg, val8) \
  450. do { \
  451. writeb((val8), ioaddr + (reg)); \
  452. readb(ioaddr + (reg)); \
  453. } while (0)
  454. #define NETDRV_W16_F(reg, val16) \
  455. do { \
  456. writew((val16), ioaddr + (reg)); \
  457. readw(ioaddr + (reg)); \
  458. } while (0)
  459. #define NETDRV_W32_F(reg, val32) \
  460. do { \
  461. writel((val32), ioaddr + (reg)); \
  462. readl(ioaddr + (reg)); \
  463. } while (0)
  464. #ifdef MMIO_FLUSH_AUDIT_COMPLETE
  465. /* write MMIO register */
  466. #define NETDRV_W8(reg, val8) writeb((val8), ioaddr + (reg))
  467. #define NETDRV_W16(reg, val16) writew((val16), ioaddr + (reg))
  468. #define NETDRV_W32(reg, val32) writel((val32), ioaddr + (reg))
  469. #else
  470. /* write MMIO register, then flush */
  471. #define NETDRV_W8 NETDRV_W8_F
  472. #define NETDRV_W16 NETDRV_W16_F
  473. #define NETDRV_W32 NETDRV_W32_F
  474. #endif /* MMIO_FLUSH_AUDIT_COMPLETE */
  475. /* read MMIO register */
  476. #define NETDRV_R8(reg) readb(ioaddr + (reg))
  477. #define NETDRV_R16(reg) readw(ioaddr + (reg))
  478. #define NETDRV_R32(reg) ((unsigned long) readl(ioaddr + (reg)))
  479. #endif /* USE_IO_OPS */
  480. static const u16 netdrv_intr_mask =
  481. PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
  482. TxErr | TxOK | RxErr | RxOK;
  483. static const unsigned int netdrv_rx_config =
  484. RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap |
  485. (RX_FIFO_THRESH << RxCfgFIFOShift) |
  486. (RX_DMA_BURST << RxCfgDMAShift);
  487. static int __devinit netdrv_init_board(struct pci_dev *pdev,
  488. struct net_device **dev_out,
  489. void **ioaddr_out)
  490. {
  491. void *ioaddr = NULL;
  492. struct net_device *dev;
  493. struct netdrv_private *tp;
  494. int rc, i;
  495. u32 pio_start, pio_end, pio_flags, pio_len;
  496. unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
  497. u32 tmp;
  498. DPRINTK("ENTER\n");
  499. assert(pdev != NULL);
  500. assert(ioaddr_out != NULL);
  501. *ioaddr_out = NULL;
  502. *dev_out = NULL;
  503. /* dev zeroed in alloc_etherdev */
  504. dev = alloc_etherdev(sizeof(*tp));
  505. if (dev == NULL) {
  506. dev_err(&pdev->dev, "unable to alloc new ethernet\n");
  507. DPRINTK("EXIT, returning -ENOMEM\n");
  508. return -ENOMEM;
  509. }
  510. SET_NETDEV_DEV(dev, &pdev->dev);
  511. tp = netdev_priv(dev);
  512. /* enable device(incl. PCI PM wakeup), and bus-mastering */
  513. rc = pci_enable_device(pdev);
  514. if (rc)
  515. goto err_out;
  516. pio_start = pci_resource_start(pdev, 0);
  517. pio_end = pci_resource_end(pdev, 0);
  518. pio_flags = pci_resource_flags(pdev, 0);
  519. pio_len = pci_resource_len(pdev, 0);
  520. mmio_start = pci_resource_start(pdev, 1);
  521. mmio_end = pci_resource_end(pdev, 1);
  522. mmio_flags = pci_resource_flags(pdev, 1);
  523. mmio_len = pci_resource_len(pdev, 1);
  524. /* set this immediately, we need to know before
  525. * we talk to the chip directly */
  526. DPRINTK("PIO region size == %#02X\n", pio_len);
  527. DPRINTK("MMIO region size == %#02lX\n", mmio_len);
  528. /* make sure PCI base addr 0 is PIO */
  529. if (!(pio_flags & IORESOURCE_IO)) {
  530. dev_err(&pdev->dev, "region #0 not a PIO resource, aborting\n");
  531. rc = -ENODEV;
  532. goto err_out;
  533. }
  534. /* make sure PCI base addr 1 is MMIO */
  535. if (!(mmio_flags & IORESOURCE_MEM)) {
  536. dev_err(&pdev->dev, "region #1 not an MMIO resource, aborting\n");
  537. rc = -ENODEV;
  538. goto err_out;
  539. }
  540. /* check for weird/broken PCI region reporting */
  541. if ((pio_len < NETDRV_MIN_IO_SIZE) ||
  542. (mmio_len < NETDRV_MIN_IO_SIZE)) {
  543. dev_err(&pdev->dev, "Invalid PCI region size(s), aborting\n");
  544. rc = -ENODEV;
  545. goto err_out;
  546. }
  547. rc = pci_request_regions(pdev, MODNAME);
  548. if (rc)
  549. goto err_out;
  550. pci_set_master(pdev);
  551. #ifdef USE_IO_OPS
  552. ioaddr = (void *)pio_start;
  553. #else
  554. /* ioremap MMIO region */
  555. ioaddr = ioremap(mmio_start, mmio_len);
  556. if (ioaddr == NULL) {
  557. dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
  558. rc = -EIO;
  559. goto err_out_free_res;
  560. }
  561. #endif /* USE_IO_OPS */
  562. /* Soft reset the chip. */
  563. NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
  564. /* Check that the chip has finished the reset. */
  565. for (i = 1000; i > 0; i--)
  566. if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
  567. break;
  568. else
  569. udelay(10);
  570. /* Bring the chip out of low-power mode. */
  571. /* <insert device-specific code here> */
  572. #ifndef USE_IO_OPS
  573. /* sanity checks -- ensure PIO and MMIO registers agree */
  574. assert(inb(pio_start+Config0) == readb(ioaddr+Config0));
  575. assert(inb(pio_start+Config1) == readb(ioaddr+Config1));
  576. assert(inb(pio_start+TxConfig) == readb(ioaddr+TxConfig));
  577. assert(inb(pio_start+RxConfig) == readb(ioaddr+RxConfig));
  578. #endif /* !USE_IO_OPS */
  579. /* identify chip attached to board */
  580. tmp = NETDRV_R8(ChipVersion);
  581. for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--)
  582. if (tmp == rtl_chip_info[i].version) {
  583. tp->chipset = i;
  584. goto match;
  585. }
  586. /* if unknown chip, assume array element #0, original RTL-8139 in this case */
  587. dev_printk(KERN_DEBUG, &pdev->dev,
  588. "unknown chip version, assuming RTL-8139\n");
  589. dev_printk(KERN_DEBUG, &pdev->dev, "TxConfig = %#lx\n",
  590. NETDRV_R32(TxConfig));
  591. tp->chipset = 0;
  592. match:
  593. DPRINTK("chipset id(%d) == index %d, '%s'\n",
  594. tmp, tp->chipset, rtl_chip_info[tp->chipset].name);
  595. rc = register_netdev(dev);
  596. if (rc)
  597. goto err_out_unmap;
  598. DPRINTK("EXIT, returning 0\n");
  599. *ioaddr_out = ioaddr;
  600. *dev_out = dev;
  601. return 0;
  602. err_out_unmap:
  603. #ifndef USE_IO_OPS
  604. iounmap(ioaddr);
  605. err_out_free_res:
  606. #endif
  607. pci_release_regions(pdev);
  608. err_out:
  609. free_netdev(dev);
  610. DPRINTK("EXIT, returning %d\n", rc);
  611. return rc;
  612. }
  613. static const struct net_device_ops netdrv_netdev_ops = {
  614. .ndo_open = netdrv_open,
  615. .ndo_stop = netdrv_close,
  616. .ndo_start_xmit = netdrv_start_xmit,
  617. .ndo_set_multicast_list = netdrv_set_rx_mode,
  618. .ndo_do_ioctl = netdrv_ioctl,
  619. .ndo_tx_timeout = netdrv_tx_timeout,
  620. .ndo_change_mtu = eth_change_mtu,
  621. .ndo_validate_addr = eth_validate_addr,
  622. .ndo_set_mac_address = eth_mac_addr,
  623. };
  624. static int __devinit netdrv_init_one(struct pci_dev *pdev,
  625. const struct pci_device_id *ent)
  626. {
  627. struct net_device *dev = NULL;
  628. struct netdrv_private *tp;
  629. int i, addr_len, option;
  630. void *ioaddr = NULL;
  631. static int board_idx = -1;
  632. /* when built into the kernel, we only print version if device is found */
  633. #ifndef MODULE
  634. static int printed_version;
  635. if (!printed_version++)
  636. printk(version);
  637. #endif
  638. DPRINTK("ENTER\n");
  639. assert(pdev != NULL);
  640. assert(ent != NULL);
  641. board_idx++;
  642. i = netdrv_init_board(pdev, &dev, &ioaddr);
  643. if (i < 0) {
  644. DPRINTK("EXIT, returning %d\n", i);
  645. return i;
  646. }
  647. tp = netdev_priv(dev);
  648. assert(ioaddr != NULL);
  649. assert(dev != NULL);
  650. assert(tp != NULL);
  651. addr_len = read_eeprom(ioaddr, 0, 8) == 0x8129 ? 8 : 6;
  652. for (i = 0; i < 3; i++)
  653. ((u16 *)(dev->dev_addr))[i] =
  654. le16_to_cpu(read_eeprom(ioaddr, i + 7, addr_len));
  655. dev->netdev_ops = &netdrv_netdev_ops;
  656. dev->watchdog_timeo = TX_TIMEOUT;
  657. dev->irq = pdev->irq;
  658. dev->base_addr = (unsigned long) ioaddr;
  659. /* netdev_priv()/tp zeroed and aligned in alloc_etherdev */
  660. tp = netdev_priv(dev);
  661. /* note: tp->chipset set in netdrv_init_board */
  662. tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
  663. PCI_COMMAND_MASTER | NETDRV_CAPS;
  664. tp->pci_dev = pdev;
  665. tp->board = ent->driver_data;
  666. tp->mmio_addr = ioaddr;
  667. spin_lock_init(&tp->lock);
  668. pci_set_drvdata(pdev, dev);
  669. tp->phys[0] = 32;
  670. netdev_info(dev, "%s at %#lx, %pM IRQ %d\n",
  671. board_info[ent->driver_data].name,
  672. dev->base_addr, dev->dev_addr, dev->irq);
  673. netdev_printk(KERN_DEBUG, dev, "Identified 8139 chip type '%s'\n",
  674. rtl_chip_info[tp->chipset].name);
  675. /* Put the chip into low-power mode. */
  676. NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
  677. /* The lower four bits are the media type. */
  678. option = (board_idx > 7) ? 0 : media[board_idx];
  679. if (option > 0) {
  680. tp->full_duplex = (option & 0x200) ? 1 : 0;
  681. tp->default_port = option & 15;
  682. if (tp->default_port)
  683. tp->medialock = 1;
  684. }
  685. if (tp->full_duplex) {
  686. netdev_info(dev, "Media type forced to Full Duplex\n");
  687. mdio_write(dev, tp->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
  688. tp->duplex_lock = 1;
  689. }
  690. DPRINTK("EXIT - returning 0\n");
  691. return 0;
  692. }
  693. static void __devexit netdrv_remove_one(struct pci_dev *pdev)
  694. {
  695. struct net_device *dev = pci_get_drvdata(pdev);
  696. struct netdrv_private *np;
  697. DPRINTK("ENTER\n");
  698. assert(dev != NULL);
  699. np = netdev_priv(dev);
  700. assert(np != NULL);
  701. unregister_netdev(dev);
  702. #ifndef USE_IO_OPS
  703. iounmap(np->mmio_addr);
  704. #endif /* !USE_IO_OPS */
  705. pci_release_regions(pdev);
  706. free_netdev(dev);
  707. pci_set_drvdata(pdev, NULL);
  708. pci_disable_device(pdev);
  709. DPRINTK("EXIT\n");
  710. }
  711. /* Serial EEPROM section. */
  712. /* EEPROM_Ctrl bits. */
  713. #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
  714. #define EE_CS 0x08 /* EEPROM chip select. */
  715. #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
  716. #define EE_WRITE_0 0x00
  717. #define EE_WRITE_1 0x02
  718. #define EE_DATA_READ 0x01 /* EEPROM chip data out. */
  719. #define EE_ENB (0x80 | EE_CS)
  720. /* Delay between EEPROM clock transitions.
  721. No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
  722. */
  723. #define eeprom_delay() readl(ee_addr)
  724. /* The EEPROM commands include the alway-set leading bit. */
  725. #define EE_WRITE_CMD (5)
  726. #define EE_READ_CMD (6)
  727. #define EE_ERASE_CMD (7)
  728. static int __devinit read_eeprom(void *ioaddr, int location, int addr_len)
  729. {
  730. int i;
  731. unsigned retval = 0;
  732. void *ee_addr = ioaddr + Cfg9346;
  733. int read_cmd = location | (EE_READ_CMD << addr_len);
  734. DPRINTK("ENTER\n");
  735. writeb(EE_ENB & ~EE_CS, ee_addr);
  736. writeb(EE_ENB, ee_addr);
  737. eeprom_delay();
  738. /* Shift the read command bits out. */
  739. for (i = 4 + addr_len; i >= 0; i--) {
  740. int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
  741. writeb(EE_ENB | dataval, ee_addr);
  742. eeprom_delay();
  743. writeb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
  744. eeprom_delay();
  745. }
  746. writeb(EE_ENB, ee_addr);
  747. eeprom_delay();
  748. for (i = 16; i > 0; i--) {
  749. writeb(EE_ENB | EE_SHIFT_CLK, ee_addr);
  750. eeprom_delay();
  751. retval =
  752. (retval << 1) | ((readb(ee_addr) & EE_DATA_READ) ? 1 :
  753. 0);
  754. writeb(EE_ENB, ee_addr);
  755. eeprom_delay();
  756. }
  757. /* Terminate the EEPROM access. */
  758. writeb(~EE_CS, ee_addr);
  759. eeprom_delay();
  760. DPRINTK("EXIT - returning %d\n", retval);
  761. return retval;
  762. }
  763. /* MII serial management: mostly bogus for now. */
  764. /* Read and write the MII management registers using software-generated
  765. serial MDIO protocol.
  766. The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
  767. met by back-to-back PCI I/O cycles, but we insert a delay to avoid
  768. "overclocking" issues. */
  769. #define MDIO_DIR 0x80
  770. #define MDIO_DATA_OUT 0x04
  771. #define MDIO_DATA_IN 0x02
  772. #define MDIO_CLK 0x01
  773. #define MDIO_WRITE0 (MDIO_DIR)
  774. #define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
  775. #define mdio_delay() readb(mdio_addr)
  776. static char mii_2_8139_map[8] = {
  777. BasicModeCtrl,
  778. BasicModeStatus,
  779. 0,
  780. 0,
  781. NWayAdvert,
  782. NWayLPAR,
  783. NWayExpansion,
  784. 0
  785. };
  786. /* Syncronize the MII management interface by shifting 32 one bits out. */
  787. static void mdio_sync(void *mdio_addr)
  788. {
  789. int i;
  790. DPRINTK("ENTER\n");
  791. for (i = 32; i >= 0; i--) {
  792. writeb(MDIO_WRITE1, mdio_addr);
  793. mdio_delay();
  794. writeb(MDIO_WRITE1 | MDIO_CLK, mdio_addr);
  795. mdio_delay();
  796. }
  797. DPRINTK("EXIT\n");
  798. }
  799. static int mdio_read(struct net_device *dev, int phy_id, int location)
  800. {
  801. struct netdrv_private *tp = netdev_priv(dev);
  802. void *mdio_addr = tp->mmio_addr + Config4;
  803. int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
  804. int retval = 0;
  805. int i;
  806. DPRINTK("ENTER\n");
  807. if (phy_id > 31) { /* Really a 8139. Use internal registers. */
  808. DPRINTK("EXIT after directly using 8139 internal regs\n");
  809. return location < 8 && mii_2_8139_map[location] ?
  810. readw(tp->mmio_addr + mii_2_8139_map[location]) : 0;
  811. }
  812. mdio_sync(mdio_addr);
  813. /* Shift the read command bits out. */
  814. for (i = 15; i >= 0; i--) {
  815. int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
  816. writeb(MDIO_DIR | dataval, mdio_addr);
  817. mdio_delay();
  818. writeb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
  819. mdio_delay();
  820. }
  821. /* Read the two transition, 16 data, and wire-idle bits. */
  822. for (i = 19; i > 0; i--) {
  823. writeb(0, mdio_addr);
  824. mdio_delay();
  825. retval = ((retval << 1) | ((readb(mdio_addr) & MDIO_DATA_IN))
  826. ? 1 : 0);
  827. writeb(MDIO_CLK, mdio_addr);
  828. mdio_delay();
  829. }
  830. DPRINTK("EXIT, returning %d\n", (retval >> 1) & 0xffff);
  831. return (retval >> 1) & 0xffff;
  832. }
  833. static void mdio_write(struct net_device *dev, int phy_id, int location,
  834. int value)
  835. {
  836. struct netdrv_private *tp = netdev_priv(dev);
  837. void *mdio_addr = tp->mmio_addr + Config4;
  838. int mii_cmd =
  839. (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
  840. int i;
  841. DPRINTK("ENTER\n");
  842. if (phy_id > 31) { /* Really a 8139. Use internal registers. */
  843. if (location < 8 && mii_2_8139_map[location]) {
  844. writew(value,
  845. tp->mmio_addr + mii_2_8139_map[location]);
  846. readw(tp->mmio_addr + mii_2_8139_map[location]);
  847. }
  848. DPRINTK("EXIT after directly using 8139 internal regs\n");
  849. return;
  850. }
  851. mdio_sync(mdio_addr);
  852. /* Shift the command bits out. */
  853. for (i = 31; i >= 0; i--) {
  854. int dataval =
  855. (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
  856. writeb(dataval, mdio_addr);
  857. mdio_delay();
  858. writeb(dataval | MDIO_CLK, mdio_addr);
  859. mdio_delay();
  860. }
  861. /* Clear out extra bits. */
  862. for (i = 2; i > 0; i--) {
  863. writeb(0, mdio_addr);
  864. mdio_delay();
  865. writeb(MDIO_CLK, mdio_addr);
  866. mdio_delay();
  867. }
  868. DPRINTK("EXIT\n");
  869. }
  870. static int netdrv_open(struct net_device *dev)
  871. {
  872. struct netdrv_private *tp = netdev_priv(dev);
  873. int retval;
  874. void *ioaddr = tp->mmio_addr;
  875. DPRINTK("ENTER\n");
  876. retval = request_irq(dev->irq, netdrv_interrupt, IRQF_SHARED, dev->name, dev);
  877. if (retval) {
  878. DPRINTK("EXIT, returning %d\n", retval);
  879. return retval;
  880. }
  881. tp->tx_bufs = pci_alloc_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
  882. &tp->tx_bufs_dma);
  883. tp->rx_ring = pci_alloc_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
  884. &tp->rx_ring_dma);
  885. if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
  886. free_irq(dev->irq, dev);
  887. if (tp->tx_bufs)
  888. pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
  889. tp->tx_bufs, tp->tx_bufs_dma);
  890. if (tp->rx_ring)
  891. pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
  892. tp->rx_ring, tp->rx_ring_dma);
  893. DPRINTK("EXIT, returning -ENOMEM\n");
  894. return -ENOMEM;
  895. }
  896. tp->full_duplex = tp->duplex_lock;
  897. tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
  898. netdrv_init_ring(dev);
  899. netdrv_hw_start(dev);
  900. netdev_dbg(dev, "ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
  901. (unsigned long long)pci_resource_start(tp->pci_dev, 1),
  902. dev->irq, NETDRV_R8(MediaStatus),
  903. tp->full_duplex ? "full" : "half");
  904. /* Set the timer to switch to check for link beat and perhaps switch
  905. to an alternate media type. */
  906. init_timer(&tp->timer);
  907. tp->timer.expires = jiffies + 3 * HZ;
  908. tp->timer.data = (unsigned long) dev;
  909. tp->timer.function = &netdrv_timer;
  910. add_timer(&tp->timer);
  911. DPRINTK("EXIT, returning 0\n");
  912. return 0;
  913. }
  914. /* Start the hardware at open or resume. */
  915. static void netdrv_hw_start(struct net_device *dev)
  916. {
  917. struct netdrv_private *tp = netdev_priv(dev);
  918. void *ioaddr = tp->mmio_addr;
  919. u32 i;
  920. DPRINTK("ENTER\n");
  921. /* Soft reset the chip. */
  922. NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
  923. udelay(100);
  924. /* Check that the chip has finished the reset. */
  925. for (i = 1000; i > 0; i--)
  926. if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
  927. break;
  928. /* Restore our idea of the MAC address. */
  929. NETDRV_W32_F(MAC0 + 0, cpu_to_le32(*(u32 *)(dev->dev_addr + 0)));
  930. NETDRV_W32_F(MAC0 + 4, cpu_to_le32(*(u32 *)(dev->dev_addr + 4)));
  931. /* Must enable Tx/Rx before setting transfer thresholds! */
  932. NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
  933. CmdRxEnb | CmdTxEnb);
  934. i = netdrv_rx_config |
  935. (NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
  936. NETDRV_W32_F(RxConfig, i);
  937. /* Check this value: the documentation for IFG contradicts ifself. */
  938. NETDRV_W32(TxConfig, (TX_DMA_BURST << TxDMAShift));
  939. /* unlock Config[01234] and BMCR register writes */
  940. NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
  941. udelay(10);
  942. tp->cur_rx = 0;
  943. /* Lock Config[01234] and BMCR register writes */
  944. NETDRV_W8_F(Cfg9346, Cfg9346_Lock);
  945. udelay(10);
  946. /* init Rx ring buffer DMA address */
  947. NETDRV_W32_F(RxBuf, tp->rx_ring_dma);
  948. /* init Tx buffer DMA addresses */
  949. for (i = 0; i < NUM_TX_DESC; i++)
  950. NETDRV_W32_F(TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
  951. NETDRV_W32_F(RxMissed, 0);
  952. netdrv_set_rx_mode(dev);
  953. /* no early-rx interrupts */
  954. NETDRV_W16(MultiIntr, NETDRV_R16(MultiIntr) & MultiIntrClear);
  955. /* make sure RxTx has started */
  956. NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
  957. CmdRxEnb | CmdTxEnb);
  958. /* Enable all known interrupts by setting the interrupt mask. */
  959. NETDRV_W16_F(IntrMask, netdrv_intr_mask);
  960. netif_start_queue(dev);
  961. DPRINTK("EXIT\n");
  962. }
  963. /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
  964. static void netdrv_init_ring(struct net_device *dev)
  965. {
  966. struct netdrv_private *tp = netdev_priv(dev);
  967. int i;
  968. DPRINTK("ENTER\n");
  969. tp->cur_rx = 0;
  970. atomic_set(&tp->cur_tx, 0);
  971. atomic_set(&tp->dirty_tx, 0);
  972. for (i = 0; i < NUM_TX_DESC; i++) {
  973. tp->tx_info[i].skb = NULL;
  974. tp->tx_info[i].mapping = 0;
  975. tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
  976. }
  977. DPRINTK("EXIT\n");
  978. }
  979. static void netdrv_timer(unsigned long data)
  980. {
  981. struct net_device *dev = (struct net_device *) data;
  982. struct netdrv_private *tp = netdev_priv(dev);
  983. void *ioaddr = tp->mmio_addr;
  984. int next_tick = 60 * HZ;
  985. int mii_lpa;
  986. mii_lpa = mdio_read(dev, tp->phys[0], MII_LPA);
  987. if (!tp->duplex_lock && mii_lpa != 0xffff) {
  988. int duplex = ((mii_lpa & LPA_100FULL) ||
  989. (mii_lpa & 0x01C0) == 0x0040);
  990. if (tp->full_duplex != duplex) {
  991. tp->full_duplex = duplex;
  992. netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
  993. tp->full_duplex ? "full" : "half",
  994. tp->phys[0], mii_lpa);
  995. NETDRV_W8(Cfg9346, Cfg9346_Unlock);
  996. NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
  997. NETDRV_W8(Cfg9346, Cfg9346_Lock);
  998. }
  999. }
  1000. netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
  1001. NETDRV_R16(NWayLPAR));
  1002. netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x RxStatus %04lx\n",
  1003. NETDRV_R16(IntrMask),
  1004. NETDRV_R16(IntrStatus),
  1005. NETDRV_R32(RxEarlyStatus));
  1006. netdev_dbg(dev, "Chip config %02x %02x\n",
  1007. NETDRV_R8(Config0), NETDRV_R8(Config1));
  1008. tp->timer.expires = jiffies + next_tick;
  1009. add_timer(&tp->timer);
  1010. }
  1011. static void netdrv_tx_clear(struct net_device *dev)
  1012. {
  1013. int i;
  1014. struct netdrv_private *tp = netdev_priv(dev);
  1015. atomic_set(&tp->cur_tx, 0);
  1016. atomic_set(&tp->dirty_tx, 0);
  1017. /* Dump the unsent Tx packets. */
  1018. for (i = 0; i < NUM_TX_DESC; i++) {
  1019. struct ring_info *rp = &tp->tx_info[i];
  1020. if (rp->mapping != 0) {
  1021. pci_unmap_single(tp->pci_dev, rp->mapping,
  1022. rp->skb->len, PCI_DMA_TODEVICE);
  1023. rp->mapping = 0;
  1024. }
  1025. if (rp->skb) {
  1026. dev_kfree_skb(rp->skb);
  1027. rp->skb = NULL;
  1028. dev->stats.tx_dropped++;
  1029. }
  1030. }
  1031. }
  1032. static void netdrv_tx_timeout(struct net_device *dev)
  1033. {
  1034. struct netdrv_private *tp = netdev_priv(dev);
  1035. void *ioaddr = tp->mmio_addr;
  1036. int i;
  1037. u8 tmp8;
  1038. unsigned long flags;
  1039. netdev_dbg(dev, "Transmit timeout, status %02x %04x media %02x\n",
  1040. NETDRV_R8(ChipCmd),
  1041. NETDRV_R16(IntrStatus),
  1042. NETDRV_R8(MediaStatus));
  1043. /* disable Tx ASAP, if not already */
  1044. tmp8 = NETDRV_R8(ChipCmd);
  1045. if (tmp8 & CmdTxEnb)
  1046. NETDRV_W8(ChipCmd, tmp8 & ~CmdTxEnb);
  1047. /* Disable interrupts by clearing the interrupt mask. */
  1048. NETDRV_W16(IntrMask, 0x0000);
  1049. /* Emit info to figure out what went wrong. */
  1050. netdev_dbg(dev, "Tx queue start entry %d dirty entry %d\n",
  1051. atomic_read(&tp->cur_tx),
  1052. atomic_read(&tp->dirty_tx));
  1053. for (i = 0; i < NUM_TX_DESC; i++)
  1054. netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
  1055. i, NETDRV_R32(TxStatus0 + (i * 4)),
  1056. i == atomic_read(&tp->dirty_tx) % NUM_TX_DESC ?
  1057. "(queue head)" : "");
  1058. /* Stop a shared interrupt from scavenging while we are. */
  1059. spin_lock_irqsave(&tp->lock, flags);
  1060. netdrv_tx_clear(dev);
  1061. spin_unlock_irqrestore(&tp->lock, flags);
  1062. /* ...and finally, reset everything */
  1063. netdrv_hw_start(dev);
  1064. netif_wake_queue(dev);
  1065. }
  1066. static int netdrv_start_xmit(struct sk_buff *skb, struct net_device *dev)
  1067. {
  1068. struct netdrv_private *tp = netdev_priv(dev);
  1069. void *ioaddr = tp->mmio_addr;
  1070. int entry;
  1071. /* Calculate the next Tx descriptor entry. */
  1072. entry = atomic_read(&tp->cur_tx) % NUM_TX_DESC;
  1073. assert(tp->tx_info[entry].skb == NULL);
  1074. assert(tp->tx_info[entry].mapping == 0);
  1075. tp->tx_info[entry].skb = skb;
  1076. /* tp->tx_info[entry].mapping = 0; */
  1077. skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len);
  1078. /* Note: the chip doesn't have auto-pad! */
  1079. NETDRV_W32(TxStatus0 + (entry * sizeof(u32)),
  1080. tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
  1081. atomic_inc(&tp->cur_tx);
  1082. if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC)
  1083. netif_stop_queue(dev);
  1084. netdev_dbg(dev, "Queued Tx packet at %p size %u to slot %d\n",
  1085. skb->data, skb->len, entry);
  1086. return NETDEV_TX_OK;
  1087. }
  1088. static void netdrv_tx_interrupt(struct net_device *dev,
  1089. struct netdrv_private *tp,
  1090. void *ioaddr)
  1091. {
  1092. int cur_tx, dirty_tx, tx_left;
  1093. assert(dev != NULL);
  1094. assert(tp != NULL);
  1095. assert(ioaddr != NULL);
  1096. dirty_tx = atomic_read(&tp->dirty_tx);
  1097. cur_tx = atomic_read(&tp->cur_tx);
  1098. tx_left = cur_tx - dirty_tx;
  1099. while (tx_left > 0) {
  1100. int entry = dirty_tx % NUM_TX_DESC;
  1101. int txstatus;
  1102. txstatus = NETDRV_R32(TxStatus0 + (entry * sizeof(u32)));
  1103. if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
  1104. break; /* It still hasn't been Txed */
  1105. /* Note: TxCarrierLost is always asserted at 100mbps. */
  1106. if (txstatus & (TxOutOfWindow | TxAborted)) {
  1107. /* There was an major error, log it. */
  1108. netdev_dbg(dev, "Transmit error, Tx status %#08x\n",
  1109. txstatus);
  1110. dev->stats.tx_errors++;
  1111. if (txstatus & TxAborted) {
  1112. dev->stats.tx_aborted_errors++;
  1113. NETDRV_W32(TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
  1114. }
  1115. if (txstatus & TxCarrierLost)
  1116. dev->stats.tx_carrier_errors++;
  1117. if (txstatus & TxOutOfWindow)
  1118. dev->stats.tx_window_errors++;
  1119. } else {
  1120. if (txstatus & TxUnderrun) {
  1121. /* Add 64 to the Tx FIFO threshold. */
  1122. if (tp->tx_flag < 0x00300000)
  1123. tp->tx_flag += 0x00020000;
  1124. dev->stats.tx_fifo_errors++;
  1125. }
  1126. dev->stats.collisions += (txstatus >> 24) & 15;
  1127. dev->stats.tx_bytes += txstatus & 0x7ff;
  1128. dev->stats.tx_packets++;
  1129. }
  1130. /* Free the original skb. */
  1131. if (tp->tx_info[entry].mapping != 0) {
  1132. pci_unmap_single(tp->pci_dev,
  1133. tp->tx_info[entry].mapping,
  1134. tp->tx_info[entry].skb->len,
  1135. PCI_DMA_TODEVICE);
  1136. tp->tx_info[entry].mapping = 0;
  1137. }
  1138. dev_kfree_skb_irq(tp->tx_info[entry].skb);
  1139. tp->tx_info[entry].skb = NULL;
  1140. dirty_tx++;
  1141. if (dirty_tx < 0) { /* handle signed int overflow */
  1142. atomic_sub(cur_tx, &tp->cur_tx); /* XXX racy? */
  1143. dirty_tx = cur_tx - tx_left + 1;
  1144. }
  1145. if (netif_queue_stopped(dev))
  1146. netif_wake_queue(dev);
  1147. cur_tx = atomic_read(&tp->cur_tx);
  1148. tx_left = cur_tx - dirty_tx;
  1149. }
  1150. #ifndef NETDRV_NDEBUG
  1151. if (atomic_read(&tp->cur_tx) - dirty_tx > NUM_TX_DESC) {
  1152. netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d\n",
  1153. dirty_tx, atomic_read(&tp->cur_tx));
  1154. dirty_tx += NUM_TX_DESC;
  1155. }
  1156. #endif /* NETDRV_NDEBUG */
  1157. atomic_set(&tp->dirty_tx, dirty_tx);
  1158. }
  1159. /* TODO: clean this up! Rx reset need not be this intensive */
  1160. static void netdrv_rx_err(u32 rx_status, struct net_device *dev,
  1161. struct netdrv_private *tp, void *ioaddr)
  1162. {
  1163. u8 tmp8;
  1164. int tmp_work = 1000;
  1165. netdev_dbg(dev, "Ethernet frame had errors, status %08x\n", rx_status);
  1166. if (rx_status & RxTooLong)
  1167. netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
  1168. rx_status);
  1169. /* A.C.: The chip hangs here. */
  1170. dev->stats.rx_errors++;
  1171. if (rx_status & (RxBadSymbol | RxBadAlign))
  1172. dev->stats.rx_frame_errors++;
  1173. if (rx_status & (RxRunt | RxTooLong))
  1174. dev->stats.rx_length_errors++;
  1175. if (rx_status & RxCRCErr)
  1176. dev->stats.rx_crc_errors++;
  1177. /* Reset the receiver, based on RealTek recommendation.(Bug?) */
  1178. tp->cur_rx = 0;
  1179. /* disable receive */
  1180. tmp8 = NETDRV_R8(ChipCmd) & ChipCmdClear;
  1181. NETDRV_W8_F(ChipCmd, tmp8 | CmdTxEnb);
  1182. /* A.C.: Reset the multicast list. */
  1183. netdrv_set_rx_mode(dev);
  1184. /* XXX potentially temporary hack to
  1185. * restart hung receiver */
  1186. while (--tmp_work > 0) {
  1187. tmp8 = NETDRV_R8(ChipCmd);
  1188. if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
  1189. break;
  1190. NETDRV_W8_F(ChipCmd,
  1191. (tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb);
  1192. }
  1193. /* G.S.: Re-enable receiver */
  1194. /* XXX temporary hack to work around receiver hang */
  1195. netdrv_set_rx_mode(dev);
  1196. if (tmp_work <= 0)
  1197. netdev_warn(dev, "tx/rx enable wait too long\n");
  1198. }
  1199. /* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
  1200. field alignments and semantics. */
  1201. static void netdrv_rx_interrupt(struct net_device *dev,
  1202. struct netdrv_private *tp, void *ioaddr)
  1203. {
  1204. unsigned char *rx_ring;
  1205. u16 cur_rx;
  1206. assert(dev != NULL);
  1207. assert(tp != NULL);
  1208. assert(ioaddr != NULL);
  1209. rx_ring = tp->rx_ring;
  1210. cur_rx = tp->cur_rx;
  1211. netdev_dbg(dev, "In netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
  1212. cur_rx, NETDRV_R16(RxBufAddr),
  1213. NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
  1214. while ((NETDRV_R8(ChipCmd) & RxBufEmpty) == 0) {
  1215. int ring_offset = cur_rx % RX_BUF_LEN;
  1216. u32 rx_status;
  1217. unsigned int rx_size;
  1218. unsigned int pkt_size;
  1219. struct sk_buff *skb;
  1220. /* read size+status of next frame from DMA ring buffer */
  1221. rx_status = le32_to_cpu(*(u32 *)(rx_ring + ring_offset));
  1222. rx_size = rx_status >> 16;
  1223. pkt_size = rx_size - 4;
  1224. netdev_dbg(dev, "netdrv_rx() status %04x, size %04x, cur %04x\n",
  1225. rx_status, rx_size, cur_rx);
  1226. #if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2)
  1227. print_hex_dump_bytes("Frame contents: ", HEX_DUMP_OFFSET,
  1228. &rx_ring[ring_offset], 70);
  1229. #endif
  1230. /* If Rx err or invalid rx_size/rx_status received
  1231. *(which happens if we get lost in the ring),
  1232. * Rx process gets reset, so we abort any further
  1233. * Rx processing.
  1234. */
  1235. if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
  1236. (!(rx_status & RxStatusOK))) {
  1237. netdrv_rx_err(rx_status, dev, tp, ioaddr);
  1238. return;
  1239. }
  1240. /* Malloc up new buffer, compatible with net-2e. */
  1241. /* Omit the four octet CRC from the length. */
  1242. /* TODO: consider allocating skb's outside of
  1243. * interrupt context, both to speed interrupt processing,
  1244. * and also to reduce the chances of having to
  1245. * drop packets here under memory pressure.
  1246. */
  1247. skb = dev_alloc_skb(pkt_size + 2);
  1248. if (skb) {
  1249. skb_reserve(skb, 2); /* 16 byte align the IP fields. */
  1250. skb_copy_to_linear_data(skb, &rx_ring[ring_offset + 4], pkt_size);
  1251. skb_put(skb, pkt_size);
  1252. skb->protocol = eth_type_trans(skb, dev);
  1253. netif_rx(skb);
  1254. dev->stats.rx_bytes += pkt_size;
  1255. dev->stats.rx_packets++;
  1256. } else {
  1257. netdev_warn(dev, "Memory squeeze, dropping packet\n");
  1258. dev->stats.rx_dropped++;
  1259. }
  1260. cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
  1261. NETDRV_W16_F(RxBufPtr, cur_rx - 16);
  1262. }
  1263. netdev_dbg(dev, "Done netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
  1264. cur_rx, NETDRV_R16(RxBufAddr),
  1265. NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
  1266. tp->cur_rx = cur_rx;
  1267. }
  1268. static void netdrv_weird_interrupt(struct net_device *dev,
  1269. struct netdrv_private *tp,
  1270. void *ioaddr,
  1271. int status, int link_changed)
  1272. {
  1273. netdev_printk(KERN_DEBUG, dev, "Abnormal interrupt, status %08x\n",
  1274. status);
  1275. assert(dev != NULL);
  1276. assert(tp != NULL);
  1277. assert(ioaddr != NULL);
  1278. /* Update the error count. */
  1279. dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
  1280. NETDRV_W32(RxMissed, 0);
  1281. if ((status & RxUnderrun) && link_changed &&
  1282. (tp->drv_flags & HAS_LNK_CHNG)) {
  1283. /* Really link-change on new chips. */
  1284. int lpar = NETDRV_R16(NWayLPAR);
  1285. int duplex = ((lpar & 0x0100) || (lpar & 0x01C0) == 0x0040 ||
  1286. tp->duplex_lock);
  1287. if (tp->full_duplex != duplex) {
  1288. tp->full_duplex = duplex;
  1289. NETDRV_W8(Cfg9346, Cfg9346_Unlock);
  1290. NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
  1291. NETDRV_W8(Cfg9346, Cfg9346_Lock);
  1292. }
  1293. status &= ~RxUnderrun;
  1294. }
  1295. /* XXX along with netdrv_rx_err, are we double-counting errors? */
  1296. if (status & (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
  1297. dev->stats.rx_errors++;
  1298. if (status & (PCSTimeout))
  1299. dev->stats.rx_length_errors++;
  1300. if (status & (RxUnderrun | RxFIFOOver))
  1301. dev->stats.rx_fifo_errors++;
  1302. if (status & RxOverflow) {
  1303. dev->stats.rx_over_errors++;
  1304. tp->cur_rx = NETDRV_R16(RxBufAddr) % RX_BUF_LEN;
  1305. NETDRV_W16_F(RxBufPtr, tp->cur_rx - 16);
  1306. }
  1307. if (status & PCIErr) {
  1308. u16 pci_cmd_status;
  1309. pci_read_config_word(tp->pci_dev, PCI_STATUS, &pci_cmd_status);
  1310. netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
  1311. }
  1312. }
  1313. /* The interrupt handler does all of the Rx thread work and cleans up
  1314. after the Tx thread. */
  1315. static irqreturn_t netdrv_interrupt(int irq, void *dev_instance)
  1316. {
  1317. struct net_device *dev = (struct net_device *) dev_instance;
  1318. struct netdrv_private *tp = netdev_priv(dev);
  1319. int boguscnt = max_interrupt_work;
  1320. void *ioaddr = tp->mmio_addr;
  1321. int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
  1322. int handled = 0;
  1323. spin_lock(&tp->lock);
  1324. do {
  1325. status = NETDRV_R16(IntrStatus);
  1326. /* h/w no longer present(hotplug?) or major error, bail */
  1327. if (status == 0xFFFF)
  1328. break;
  1329. handled = 1;
  1330. /* Acknowledge all of the current interrupt sources ASAP */
  1331. NETDRV_W16_F(IntrStatus, status);
  1332. netdev_dbg(dev, "interrupt status=%#04x new intstat=%#04x\n",
  1333. status, NETDRV_R16(IntrStatus));
  1334. if ((status &
  1335. (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
  1336. RxFIFOOver | TxErr | TxOK | RxErr | RxOK)) == 0)
  1337. break;
  1338. /* Check uncommon events with one test. */
  1339. if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
  1340. RxFIFOOver | TxErr | RxErr))
  1341. netdrv_weird_interrupt(dev, tp, ioaddr,
  1342. status, link_changed);
  1343. if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) /* Rx interrupt */
  1344. netdrv_rx_interrupt(dev, tp, ioaddr);
  1345. if (status & (TxOK | TxErr))
  1346. netdrv_tx_interrupt(dev, tp, ioaddr);
  1347. boguscnt--;
  1348. } while (boguscnt > 0);
  1349. if (boguscnt <= 0) {
  1350. netdev_warn(dev, "Too much work at interrupt, IntrStatus=%#04x\n",
  1351. status);
  1352. /* Clear all interrupt sources. */
  1353. NETDRV_W16(IntrStatus, 0xffff);
  1354. }
  1355. spin_unlock(&tp->lock);
  1356. netdev_dbg(dev, "exiting interrupt, intr_status=%#04x\n",
  1357. NETDRV_R16(IntrStatus));
  1358. return IRQ_RETVAL(handled);
  1359. }
  1360. static int netdrv_close(struct net_device *dev)
  1361. {
  1362. struct netdrv_private *tp = netdev_priv(dev);
  1363. void *ioaddr = tp->mmio_addr;
  1364. unsigned long flags;
  1365. DPRINTK("ENTER\n");
  1366. netif_stop_queue(dev);
  1367. netdev_dbg(dev, "Shutting down ethercard, status was %#04x\n",
  1368. NETDRV_R16(IntrStatus));
  1369. del_timer_sync(&tp->timer);
  1370. spin_lock_irqsave(&tp->lock, flags);
  1371. /* Stop the chip's Tx and Rx DMA processes. */
  1372. NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
  1373. /* Disable interrupts by clearing the interrupt mask. */
  1374. NETDRV_W16(IntrMask, 0x0000);
  1375. /* Update the error counts. */
  1376. dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
  1377. NETDRV_W32(RxMissed, 0);
  1378. spin_unlock_irqrestore(&tp->lock, flags);
  1379. free_irq(dev->irq, dev);
  1380. netdrv_tx_clear(dev);
  1381. pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
  1382. tp->rx_ring, tp->rx_ring_dma);
  1383. pci_free_consistent(tp->pci_dev, TX_BUF_TOT_LEN,
  1384. tp->tx_bufs, tp->tx_bufs_dma);
  1385. tp->rx_ring = NULL;
  1386. tp->tx_bufs = NULL;
  1387. /* Green! Put the chip in low-power mode. */
  1388. NETDRV_W8(Cfg9346, Cfg9346_Unlock);
  1389. NETDRV_W8(Config1, 0x03);
  1390. NETDRV_W8(Cfg9346, Cfg9346_Lock);
  1391. DPRINTK("EXIT\n");
  1392. return 0;
  1393. }
  1394. static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
  1395. {
  1396. struct netdrv_private *tp = netdev_priv(dev);
  1397. struct mii_ioctl_data *data = if_mii(rq);
  1398. unsigned long flags;
  1399. int rc = 0;
  1400. DPRINTK("ENTER\n");
  1401. switch (cmd) {
  1402. case SIOCGMIIPHY: /* Get address of MII PHY in use. */
  1403. data->phy_id = tp->phys[0] & 0x3f;
  1404. /* Fall Through */
  1405. case SIOCGMIIREG: /* Read MII PHY register. */
  1406. spin_lock_irqsave(&tp->lock, flags);
  1407. data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
  1408. spin_unlock_irqrestore(&tp->lock, flags);
  1409. break;
  1410. case SIOCSMIIREG: /* Write MII PHY register. */
  1411. spin_lock_irqsave(&tp->lock, flags);
  1412. mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
  1413. spin_unlock_irqrestore(&tp->lock, flags);
  1414. break;
  1415. default:
  1416. rc = -EOPNOTSUPP;
  1417. break;
  1418. }
  1419. DPRINTK("EXIT, returning %d\n", rc);
  1420. return rc;
  1421. }
  1422. /* Set or clear the multicast filter for this adaptor.
  1423. This routine is not state sensitive and need not be SMP locked. */
  1424. static void netdrv_set_rx_mode(struct net_device *dev)
  1425. {
  1426. struct netdrv_private *tp = netdev_priv(dev);
  1427. void *ioaddr = tp->mmio_addr;
  1428. u32 mc_filter[2]; /* Multicast hash filter */
  1429. int rx_mode;
  1430. u32 tmp;
  1431. DPRINTK("ENTER\n");
  1432. netdev_dbg(dev, "%s(%04x) done -- Rx config %08lx\n",
  1433. __func__, dev->flags, NETDRV_R32(RxConfig));
  1434. /* Note: do not reorder, GCC is clever about common statements. */
  1435. if (dev->flags & IFF_PROMISC) {
  1436. rx_mode =
  1437. AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
  1438. AcceptAllPhys;
  1439. mc_filter[1] = mc_filter[0] = 0xffffffff;
  1440. } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
  1441. (dev->flags & IFF_ALLMULTI)) {
  1442. /* Too many to filter perfectly -- accept all multicasts. */
  1443. rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
  1444. mc_filter[1] = mc_filter[0] = 0xffffffff;
  1445. } else {
  1446. struct netdev_hw_addr *ha;
  1447. rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
  1448. mc_filter[1] = mc_filter[0] = 0;
  1449. netdev_for_each_mc_addr(ha, dev) {
  1450. int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
  1451. mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
  1452. }
  1453. }
  1454. /* if called from irq handler, lock already acquired */
  1455. if (!in_irq())
  1456. spin_lock_irq(&tp->lock);
  1457. /* We can safely update without stopping the chip. */
  1458. tmp = netdrv_rx_config | rx_mode |
  1459. (NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
  1460. NETDRV_W32_F(RxConfig, tmp);
  1461. NETDRV_W32_F(MAR0 + 0, mc_filter[0]);
  1462. NETDRV_W32_F(MAR0 + 4, mc_filter[1]);
  1463. if (!in_irq())
  1464. spin_unlock_irq(&tp->lock);
  1465. DPRINTK("EXIT\n");
  1466. }
  1467. #ifdef CONFIG_PM
  1468. static int netdrv_suspend(struct pci_dev *pdev, pm_message_t state)
  1469. {
  1470. struct net_device *dev = pci_get_drvdata(pdev);
  1471. struct netdrv_private *tp = netdev_priv(dev);
  1472. void *ioaddr = tp->mmio_addr;
  1473. unsigned long flags;
  1474. if (!netif_running(dev))
  1475. return 0;
  1476. netif_device_detach(dev);
  1477. spin_lock_irqsave(&tp->lock, flags);
  1478. /* Disable interrupts, stop Tx and Rx. */
  1479. NETDRV_W16(IntrMask, 0x0000);
  1480. NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
  1481. /* Update the error counts. */
  1482. dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
  1483. NETDRV_W32(RxMissed, 0);
  1484. spin_unlock_irqrestore(&tp->lock, flags);
  1485. pci_save_state(pdev);
  1486. pci_set_power_state(pdev, PCI_D3hot);
  1487. return 0;
  1488. }
  1489. static int netdrv_resume(struct pci_dev *pdev)
  1490. {
  1491. struct net_device *dev = pci_get_drvdata(pdev);
  1492. /*struct netdrv_private *tp = netdev_priv(dev);*/
  1493. if (!netif_running(dev))
  1494. return 0;
  1495. pci_set_power_state(pdev, PCI_D0);
  1496. pci_restore_state(pdev);
  1497. netif_device_attach(dev);
  1498. netdrv_hw_start(dev);
  1499. return 0;
  1500. }
  1501. #endif /* CONFIG_PM */
  1502. static struct pci_driver netdrv_pci_driver = {
  1503. .name = MODNAME,
  1504. .id_table = netdrv_pci_tbl,
  1505. .probe = netdrv_init_one,
  1506. .remove = __devexit_p(netdrv_remove_one),
  1507. #ifdef CONFIG_PM
  1508. .suspend = netdrv_suspend,
  1509. .resume = netdrv_resume,
  1510. #endif /* CONFIG_PM */
  1511. };
  1512. static int __init netdrv_init_module(void)
  1513. {
  1514. /* when a module, this is printed whether or not devices are found in probe */
  1515. #ifdef MODULE
  1516. printk(version);
  1517. #endif
  1518. return pci_register_driver(&netdrv_pci_driver);
  1519. }
  1520. static void __exit netdrv_cleanup_module(void)
  1521. {
  1522. pci_unregister_driver(&netdrv_pci_driver);
  1523. }
  1524. module_init(netdrv_init_module);
  1525. module_exit(netdrv_cleanup_module);