/drivers/net/cassini.c

https://bitbucket.org/slukk/jb-tsm-kernel-4.2 · C · 5304 lines · 3807 code · 715 blank · 782 comment · 712 complexity · 721c6ff0c6b6841be5c326fb80e861ca MD5 · raw file

Large files are truncated click here to view the full file

  1. /* cassini.c: Sun Microsystems Cassini(+) ethernet driver.
  2. *
  3. * Copyright (C) 2004 Sun Microsystems Inc.
  4. * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation; either version 2 of the
  9. * License, or (at your option) any later version.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
  19. * 02111-1307, USA.
  20. *
  21. * This driver uses the sungem driver (c) David Miller
  22. * (davem@redhat.com) as its basis.
  23. *
  24. * The cassini chip has a number of features that distinguish it from
  25. * the gem chip:
  26. * 4 transmit descriptor rings that are used for either QoS (VLAN) or
  27. * load balancing (non-VLAN mode)
  28. * batching of multiple packets
  29. * multiple CPU dispatching
  30. * page-based RX descriptor engine with separate completion rings
  31. * Gigabit support (GMII and PCS interface)
  32. * MIF link up/down detection works
  33. *
  34. * RX is handled by page sized buffers that are attached as fragments to
  35. * the skb. here's what's done:
  36. * -- driver allocates pages at a time and keeps reference counts
  37. * on them.
  38. * -- the upper protocol layers assume that the header is in the skb
  39. * itself. as a result, cassini will copy a small amount (64 bytes)
  40. * to make them happy.
  41. * -- driver appends the rest of the data pages as frags to skbuffs
  42. * and increments the reference count
  43. * -- on page reclamation, the driver swaps the page with a spare page.
  44. * if that page is still in use, it frees its reference to that page,
  45. * and allocates a new page for use. otherwise, it just recycles the
  46. * the page.
  47. *
  48. * NOTE: cassini can parse the header. however, it's not worth it
  49. * as long as the network stack requires a header copy.
  50. *
  51. * TX has 4 queues. currently these queues are used in a round-robin
  52. * fashion for load balancing. They can also be used for QoS. for that
  53. * to work, however, QoS information needs to be exposed down to the driver
  54. * level so that subqueues get targeted to particular transmit rings.
  55. * alternatively, the queues can be configured via use of the all-purpose
  56. * ioctl.
  57. *
  58. * RX DATA: the rx completion ring has all the info, but the rx desc
  59. * ring has all of the data. RX can conceivably come in under multiple
  60. * interrupts, but the INT# assignment needs to be set up properly by
  61. * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do
  62. * that. also, the two descriptor rings are designed to distinguish between
  63. * encrypted and non-encrypted packets, but we use them for buffering
  64. * instead.
  65. *
  66. * by default, the selective clear mask is set up to process rx packets.
  67. */
  68. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  69. #include <linux/module.h>
  70. #include <linux/kernel.h>
  71. #include <linux/types.h>
  72. #include <linux/compiler.h>
  73. #include <linux/slab.h>
  74. #include <linux/delay.h>
  75. #include <linux/init.h>
  76. #include <linux/vmalloc.h>
  77. #include <linux/ioport.h>
  78. #include <linux/pci.h>
  79. #include <linux/mm.h>
  80. #include <linux/highmem.h>
  81. #include <linux/list.h>
  82. #include <linux/dma-mapping.h>
  83. #include <linux/netdevice.h>
  84. #include <linux/etherdevice.h>
  85. #include <linux/skbuff.h>
  86. #include <linux/ethtool.h>
  87. #include <linux/crc32.h>
  88. #include <linux/random.h>
  89. #include <linux/mii.h>
  90. #include <linux/ip.h>
  91. #include <linux/tcp.h>
  92. #include <linux/mutex.h>
  93. #include <linux/firmware.h>
  94. #include <net/checksum.h>
  95. #include <asm/atomic.h>
  96. #include <asm/system.h>
  97. #include <asm/io.h>
  98. #include <asm/byteorder.h>
  99. #include <asm/uaccess.h>
  100. #define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
  101. #define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
  102. #define CAS_NCPUS num_online_cpus()
  103. #define cas_skb_release(x) netif_rx(x)
  104. /* select which firmware to use */
  105. #define USE_HP_WORKAROUND
  106. #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */
  107. #define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */
  108. #include "cassini.h"
  109. #define USE_TX_COMPWB /* use completion writeback registers */
  110. #define USE_CSMA_CD_PROTO /* standard CSMA/CD */
  111. #define USE_RX_BLANK /* hw interrupt mitigation */
  112. #undef USE_ENTROPY_DEV /* don't test for entropy device */
  113. /* NOTE: these aren't useable unless PCI interrupts can be assigned.
  114. * also, we need to make cp->lock finer-grained.
  115. */
  116. #undef USE_PCI_INTB
  117. #undef USE_PCI_INTC
  118. #undef USE_PCI_INTD
  119. #undef USE_QOS
  120. #undef USE_VPD_DEBUG /* debug vpd information if defined */
  121. /* rx processing options */
  122. #define USE_PAGE_ORDER /* specify to allocate large rx pages */
  123. #define RX_DONT_BATCH 0 /* if 1, don't batch flows */
  124. #define RX_COPY_ALWAYS 0 /* if 0, use frags */
  125. #define RX_COPY_MIN 64 /* copy a little to make upper layers happy */
  126. #undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
  127. #define DRV_MODULE_NAME "cassini"
  128. #define DRV_MODULE_VERSION "1.6"
  129. #define DRV_MODULE_RELDATE "21 May 2008"
  130. #define CAS_DEF_MSG_ENABLE \
  131. (NETIF_MSG_DRV | \
  132. NETIF_MSG_PROBE | \
  133. NETIF_MSG_LINK | \
  134. NETIF_MSG_TIMER | \
  135. NETIF_MSG_IFDOWN | \
  136. NETIF_MSG_IFUP | \
  137. NETIF_MSG_RX_ERR | \
  138. NETIF_MSG_TX_ERR)
  139. /* length of time before we decide the hardware is borked,
  140. * and dev->tx_timeout() should be called to fix the problem
  141. */
  142. #define CAS_TX_TIMEOUT (HZ)
  143. #define CAS_LINK_TIMEOUT (22*HZ/10)
  144. #define CAS_LINK_FAST_TIMEOUT (1)
  145. /* timeout values for state changing. these specify the number
  146. * of 10us delays to be used before giving up.
  147. */
  148. #define STOP_TRIES_PHY 1000
  149. #define STOP_TRIES 5000
  150. /* specify a minimum frame size to deal with some fifo issues
  151. * max mtu == 2 * page size - ethernet header - 64 - swivel =
  152. * 2 * page_size - 0x50
  153. */
  154. #define CAS_MIN_FRAME 97
  155. #define CAS_1000MB_MIN_FRAME 255
  156. #define CAS_MIN_MTU 60
  157. #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000)
  158. #if 1
  159. /*
  160. * Eliminate these and use separate atomic counters for each, to
  161. * avoid a race condition.
  162. */
  163. #else
  164. #define CAS_RESET_MTU 1
  165. #define CAS_RESET_ALL 2
  166. #define CAS_RESET_SPARE 3
  167. #endif
  168. static char version[] __devinitdata =
  169. DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
  170. static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */
  171. static int link_mode;
  172. MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)");
  173. MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver");
  174. MODULE_LICENSE("GPL");
  175. MODULE_FIRMWARE("sun/cassini.bin");
  176. module_param(cassini_debug, int, 0);
  177. MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value");
  178. module_param(link_mode, int, 0);
  179. MODULE_PARM_DESC(link_mode, "default link mode");
  180. /*
  181. * Work around for a PCS bug in which the link goes down due to the chip
  182. * being confused and never showing a link status of "up."
  183. */
  184. #define DEFAULT_LINKDOWN_TIMEOUT 5
  185. /*
  186. * Value in seconds, for user input.
  187. */
  188. static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT;
  189. module_param(linkdown_timeout, int, 0);
  190. MODULE_PARM_DESC(linkdown_timeout,
  191. "min reset interval in sec. for PCS linkdown issue; disabled if not positive");
  192. /*
  193. * value in 'ticks' (units used by jiffies). Set when we init the
  194. * module because 'HZ' in actually a function call on some flavors of
  195. * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ.
  196. */
  197. static int link_transition_timeout;
  198. static u16 link_modes[] __devinitdata = {
  199. BMCR_ANENABLE, /* 0 : autoneg */
  200. 0, /* 1 : 10bt half duplex */
  201. BMCR_SPEED100, /* 2 : 100bt half duplex */
  202. BMCR_FULLDPLX, /* 3 : 10bt full duplex */
  203. BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */
  204. CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
  205. };
  206. static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
  207. { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
  208. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  209. { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
  210. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  211. { 0, }
  212. };
  213. MODULE_DEVICE_TABLE(pci, cas_pci_tbl);
  214. static void cas_set_link_modes(struct cas *cp);
  215. static inline void cas_lock_tx(struct cas *cp)
  216. {
  217. int i;
  218. for (i = 0; i < N_TX_RINGS; i++)
  219. spin_lock(&cp->tx_lock[i]);
  220. }
  221. static inline void cas_lock_all(struct cas *cp)
  222. {
  223. spin_lock_irq(&cp->lock);
  224. cas_lock_tx(cp);
  225. }
  226. /* WTZ: QA was finding deadlock problems with the previous
  227. * versions after long test runs with multiple cards per machine.
  228. * See if replacing cas_lock_all with safer versions helps. The
  229. * symptoms QA is reporting match those we'd expect if interrupts
  230. * aren't being properly restored, and we fixed a previous deadlock
  231. * with similar symptoms by using save/restore versions in other
  232. * places.
  233. */
  234. #define cas_lock_all_save(cp, flags) \
  235. do { \
  236. struct cas *xxxcp = (cp); \
  237. spin_lock_irqsave(&xxxcp->lock, flags); \
  238. cas_lock_tx(xxxcp); \
  239. } while (0)
  240. static inline void cas_unlock_tx(struct cas *cp)
  241. {
  242. int i;
  243. for (i = N_TX_RINGS; i > 0; i--)
  244. spin_unlock(&cp->tx_lock[i - 1]);
  245. }
  246. static inline void cas_unlock_all(struct cas *cp)
  247. {
  248. cas_unlock_tx(cp);
  249. spin_unlock_irq(&cp->lock);
  250. }
  251. #define cas_unlock_all_restore(cp, flags) \
  252. do { \
  253. struct cas *xxxcp = (cp); \
  254. cas_unlock_tx(xxxcp); \
  255. spin_unlock_irqrestore(&xxxcp->lock, flags); \
  256. } while (0)
  257. static void cas_disable_irq(struct cas *cp, const int ring)
  258. {
  259. /* Make sure we won't get any more interrupts */
  260. if (ring == 0) {
  261. writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK);
  262. return;
  263. }
  264. /* disable completion interrupts and selectively mask */
  265. if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
  266. switch (ring) {
  267. #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
  268. #ifdef USE_PCI_INTB
  269. case 1:
  270. #endif
  271. #ifdef USE_PCI_INTC
  272. case 2:
  273. #endif
  274. #ifdef USE_PCI_INTD
  275. case 3:
  276. #endif
  277. writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN,
  278. cp->regs + REG_PLUS_INTRN_MASK(ring));
  279. break;
  280. #endif
  281. default:
  282. writel(INTRN_MASK_CLEAR_ALL, cp->regs +
  283. REG_PLUS_INTRN_MASK(ring));
  284. break;
  285. }
  286. }
  287. }
  288. static inline void cas_mask_intr(struct cas *cp)
  289. {
  290. int i;
  291. for (i = 0; i < N_RX_COMP_RINGS; i++)
  292. cas_disable_irq(cp, i);
  293. }
  294. static void cas_enable_irq(struct cas *cp, const int ring)
  295. {
  296. if (ring == 0) { /* all but TX_DONE */
  297. writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK);
  298. return;
  299. }
  300. if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
  301. switch (ring) {
  302. #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD)
  303. #ifdef USE_PCI_INTB
  304. case 1:
  305. #endif
  306. #ifdef USE_PCI_INTC
  307. case 2:
  308. #endif
  309. #ifdef USE_PCI_INTD
  310. case 3:
  311. #endif
  312. writel(INTRN_MASK_RX_EN, cp->regs +
  313. REG_PLUS_INTRN_MASK(ring));
  314. break;
  315. #endif
  316. default:
  317. break;
  318. }
  319. }
  320. }
  321. static inline void cas_unmask_intr(struct cas *cp)
  322. {
  323. int i;
  324. for (i = 0; i < N_RX_COMP_RINGS; i++)
  325. cas_enable_irq(cp, i);
  326. }
  327. static inline void cas_entropy_gather(struct cas *cp)
  328. {
  329. #ifdef USE_ENTROPY_DEV
  330. if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
  331. return;
  332. batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV),
  333. readl(cp->regs + REG_ENTROPY_IV),
  334. sizeof(uint64_t)*8);
  335. #endif
  336. }
  337. static inline void cas_entropy_reset(struct cas *cp)
  338. {
  339. #ifdef USE_ENTROPY_DEV
  340. if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0)
  341. return;
  342. writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT,
  343. cp->regs + REG_BIM_LOCAL_DEV_EN);
  344. writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET);
  345. writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG);
  346. /* if we read back 0x0, we don't have an entropy device */
  347. if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0)
  348. cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV;
  349. #endif
  350. }
  351. /* access to the phy. the following assumes that we've initialized the MIF to
  352. * be in frame rather than bit-bang mode
  353. */
  354. static u16 cas_phy_read(struct cas *cp, int reg)
  355. {
  356. u32 cmd;
  357. int limit = STOP_TRIES_PHY;
  358. cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ;
  359. cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
  360. cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
  361. cmd |= MIF_FRAME_TURN_AROUND_MSB;
  362. writel(cmd, cp->regs + REG_MIF_FRAME);
  363. /* poll for completion */
  364. while (limit-- > 0) {
  365. udelay(10);
  366. cmd = readl(cp->regs + REG_MIF_FRAME);
  367. if (cmd & MIF_FRAME_TURN_AROUND_LSB)
  368. return cmd & MIF_FRAME_DATA_MASK;
  369. }
  370. return 0xFFFF; /* -1 */
  371. }
  372. static int cas_phy_write(struct cas *cp, int reg, u16 val)
  373. {
  374. int limit = STOP_TRIES_PHY;
  375. u32 cmd;
  376. cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE;
  377. cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr);
  378. cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg);
  379. cmd |= MIF_FRAME_TURN_AROUND_MSB;
  380. cmd |= val & MIF_FRAME_DATA_MASK;
  381. writel(cmd, cp->regs + REG_MIF_FRAME);
  382. /* poll for completion */
  383. while (limit-- > 0) {
  384. udelay(10);
  385. cmd = readl(cp->regs + REG_MIF_FRAME);
  386. if (cmd & MIF_FRAME_TURN_AROUND_LSB)
  387. return 0;
  388. }
  389. return -1;
  390. }
  391. static void cas_phy_powerup(struct cas *cp)
  392. {
  393. u16 ctl = cas_phy_read(cp, MII_BMCR);
  394. if ((ctl & BMCR_PDOWN) == 0)
  395. return;
  396. ctl &= ~BMCR_PDOWN;
  397. cas_phy_write(cp, MII_BMCR, ctl);
  398. }
  399. static void cas_phy_powerdown(struct cas *cp)
  400. {
  401. u16 ctl = cas_phy_read(cp, MII_BMCR);
  402. if (ctl & BMCR_PDOWN)
  403. return;
  404. ctl |= BMCR_PDOWN;
  405. cas_phy_write(cp, MII_BMCR, ctl);
  406. }
  407. /* cp->lock held. note: the last put_page will free the buffer */
  408. static int cas_page_free(struct cas *cp, cas_page_t *page)
  409. {
  410. pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size,
  411. PCI_DMA_FROMDEVICE);
  412. __free_pages(page->buffer, cp->page_order);
  413. kfree(page);
  414. return 0;
  415. }
  416. #ifdef RX_COUNT_BUFFERS
  417. #define RX_USED_ADD(x, y) ((x)->used += (y))
  418. #define RX_USED_SET(x, y) ((x)->used = (y))
  419. #else
  420. #define RX_USED_ADD(x, y)
  421. #define RX_USED_SET(x, y)
  422. #endif
  423. /* local page allocation routines for the receive buffers. jumbo pages
  424. * require at least 8K contiguous and 8K aligned buffers.
  425. */
  426. static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags)
  427. {
  428. cas_page_t *page;
  429. page = kmalloc(sizeof(cas_page_t), flags);
  430. if (!page)
  431. return NULL;
  432. INIT_LIST_HEAD(&page->list);
  433. RX_USED_SET(page, 0);
  434. page->buffer = alloc_pages(flags, cp->page_order);
  435. if (!page->buffer)
  436. goto page_err;
  437. page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0,
  438. cp->page_size, PCI_DMA_FROMDEVICE);
  439. return page;
  440. page_err:
  441. kfree(page);
  442. return NULL;
  443. }
  444. /* initialize spare pool of rx buffers, but allocate during the open */
  445. static void cas_spare_init(struct cas *cp)
  446. {
  447. spin_lock(&cp->rx_inuse_lock);
  448. INIT_LIST_HEAD(&cp->rx_inuse_list);
  449. spin_unlock(&cp->rx_inuse_lock);
  450. spin_lock(&cp->rx_spare_lock);
  451. INIT_LIST_HEAD(&cp->rx_spare_list);
  452. cp->rx_spares_needed = RX_SPARE_COUNT;
  453. spin_unlock(&cp->rx_spare_lock);
  454. }
  455. /* used on close. free all the spare buffers. */
  456. static void cas_spare_free(struct cas *cp)
  457. {
  458. struct list_head list, *elem, *tmp;
  459. /* free spare buffers */
  460. INIT_LIST_HEAD(&list);
  461. spin_lock(&cp->rx_spare_lock);
  462. list_splice_init(&cp->rx_spare_list, &list);
  463. spin_unlock(&cp->rx_spare_lock);
  464. list_for_each_safe(elem, tmp, &list) {
  465. cas_page_free(cp, list_entry(elem, cas_page_t, list));
  466. }
  467. INIT_LIST_HEAD(&list);
  468. #if 1
  469. /*
  470. * Looks like Adrian had protected this with a different
  471. * lock than used everywhere else to manipulate this list.
  472. */
  473. spin_lock(&cp->rx_inuse_lock);
  474. list_splice_init(&cp->rx_inuse_list, &list);
  475. spin_unlock(&cp->rx_inuse_lock);
  476. #else
  477. spin_lock(&cp->rx_spare_lock);
  478. list_splice_init(&cp->rx_inuse_list, &list);
  479. spin_unlock(&cp->rx_spare_lock);
  480. #endif
  481. list_for_each_safe(elem, tmp, &list) {
  482. cas_page_free(cp, list_entry(elem, cas_page_t, list));
  483. }
  484. }
  485. /* replenish spares if needed */
  486. static void cas_spare_recover(struct cas *cp, const gfp_t flags)
  487. {
  488. struct list_head list, *elem, *tmp;
  489. int needed, i;
  490. /* check inuse list. if we don't need any more free buffers,
  491. * just free it
  492. */
  493. /* make a local copy of the list */
  494. INIT_LIST_HEAD(&list);
  495. spin_lock(&cp->rx_inuse_lock);
  496. list_splice_init(&cp->rx_inuse_list, &list);
  497. spin_unlock(&cp->rx_inuse_lock);
  498. list_for_each_safe(elem, tmp, &list) {
  499. cas_page_t *page = list_entry(elem, cas_page_t, list);
  500. /*
  501. * With the lockless pagecache, cassini buffering scheme gets
  502. * slightly less accurate: we might find that a page has an
  503. * elevated reference count here, due to a speculative ref,
  504. * and skip it as in-use. Ideally we would be able to reclaim
  505. * it. However this would be such a rare case, it doesn't
  506. * matter too much as we should pick it up the next time round.
  507. *
  508. * Importantly, if we find that the page has a refcount of 1
  509. * here (our refcount), then we know it is definitely not inuse
  510. * so we can reuse it.
  511. */
  512. if (page_count(page->buffer) > 1)
  513. continue;
  514. list_del(elem);
  515. spin_lock(&cp->rx_spare_lock);
  516. if (cp->rx_spares_needed > 0) {
  517. list_add(elem, &cp->rx_spare_list);
  518. cp->rx_spares_needed--;
  519. spin_unlock(&cp->rx_spare_lock);
  520. } else {
  521. spin_unlock(&cp->rx_spare_lock);
  522. cas_page_free(cp, page);
  523. }
  524. }
  525. /* put any inuse buffers back on the list */
  526. if (!list_empty(&list)) {
  527. spin_lock(&cp->rx_inuse_lock);
  528. list_splice(&list, &cp->rx_inuse_list);
  529. spin_unlock(&cp->rx_inuse_lock);
  530. }
  531. spin_lock(&cp->rx_spare_lock);
  532. needed = cp->rx_spares_needed;
  533. spin_unlock(&cp->rx_spare_lock);
  534. if (!needed)
  535. return;
  536. /* we still need spares, so try to allocate some */
  537. INIT_LIST_HEAD(&list);
  538. i = 0;
  539. while (i < needed) {
  540. cas_page_t *spare = cas_page_alloc(cp, flags);
  541. if (!spare)
  542. break;
  543. list_add(&spare->list, &list);
  544. i++;
  545. }
  546. spin_lock(&cp->rx_spare_lock);
  547. list_splice(&list, &cp->rx_spare_list);
  548. cp->rx_spares_needed -= i;
  549. spin_unlock(&cp->rx_spare_lock);
  550. }
  551. /* pull a page from the list. */
  552. static cas_page_t *cas_page_dequeue(struct cas *cp)
  553. {
  554. struct list_head *entry;
  555. int recover;
  556. spin_lock(&cp->rx_spare_lock);
  557. if (list_empty(&cp->rx_spare_list)) {
  558. /* try to do a quick recovery */
  559. spin_unlock(&cp->rx_spare_lock);
  560. cas_spare_recover(cp, GFP_ATOMIC);
  561. spin_lock(&cp->rx_spare_lock);
  562. if (list_empty(&cp->rx_spare_list)) {
  563. netif_err(cp, rx_err, cp->dev,
  564. "no spare buffers available\n");
  565. spin_unlock(&cp->rx_spare_lock);
  566. return NULL;
  567. }
  568. }
  569. entry = cp->rx_spare_list.next;
  570. list_del(entry);
  571. recover = ++cp->rx_spares_needed;
  572. spin_unlock(&cp->rx_spare_lock);
  573. /* trigger the timer to do the recovery */
  574. if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) {
  575. #if 1
  576. atomic_inc(&cp->reset_task_pending);
  577. atomic_inc(&cp->reset_task_pending_spare);
  578. schedule_work(&cp->reset_task);
  579. #else
  580. atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE);
  581. schedule_work(&cp->reset_task);
  582. #endif
  583. }
  584. return list_entry(entry, cas_page_t, list);
  585. }
  586. static void cas_mif_poll(struct cas *cp, const int enable)
  587. {
  588. u32 cfg;
  589. cfg = readl(cp->regs + REG_MIF_CFG);
  590. cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1);
  591. if (cp->phy_type & CAS_PHY_MII_MDIO1)
  592. cfg |= MIF_CFG_PHY_SELECT;
  593. /* poll and interrupt on link status change. */
  594. if (enable) {
  595. cfg |= MIF_CFG_POLL_EN;
  596. cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR);
  597. cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr);
  598. }
  599. writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF,
  600. cp->regs + REG_MIF_MASK);
  601. writel(cfg, cp->regs + REG_MIF_CFG);
  602. }
  603. /* Must be invoked under cp->lock */
  604. static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
  605. {
  606. u16 ctl;
  607. #if 1
  608. int lcntl;
  609. int changed = 0;
  610. int oldstate = cp->lstate;
  611. int link_was_not_down = !(oldstate == link_down);
  612. #endif
  613. /* Setup link parameters */
  614. if (!ep)
  615. goto start_aneg;
  616. lcntl = cp->link_cntl;
  617. if (ep->autoneg == AUTONEG_ENABLE)
  618. cp->link_cntl = BMCR_ANENABLE;
  619. else {
  620. u32 speed = ethtool_cmd_speed(ep);
  621. cp->link_cntl = 0;
  622. if (speed == SPEED_100)
  623. cp->link_cntl |= BMCR_SPEED100;
  624. else if (speed == SPEED_1000)
  625. cp->link_cntl |= CAS_BMCR_SPEED1000;
  626. if (ep->duplex == DUPLEX_FULL)
  627. cp->link_cntl |= BMCR_FULLDPLX;
  628. }
  629. #if 1
  630. changed = (lcntl != cp->link_cntl);
  631. #endif
  632. start_aneg:
  633. if (cp->lstate == link_up) {
  634. netdev_info(cp->dev, "PCS link down\n");
  635. } else {
  636. if (changed) {
  637. netdev_info(cp->dev, "link configuration changed\n");
  638. }
  639. }
  640. cp->lstate = link_down;
  641. cp->link_transition = LINK_TRANSITION_LINK_DOWN;
  642. if (!cp->hw_running)
  643. return;
  644. #if 1
  645. /*
  646. * WTZ: If the old state was link_up, we turn off the carrier
  647. * to replicate everything we do elsewhere on a link-down
  648. * event when we were already in a link-up state..
  649. */
  650. if (oldstate == link_up)
  651. netif_carrier_off(cp->dev);
  652. if (changed && link_was_not_down) {
  653. /*
  654. * WTZ: This branch will simply schedule a full reset after
  655. * we explicitly changed link modes in an ioctl. See if this
  656. * fixes the link-problems we were having for forced mode.
  657. */
  658. atomic_inc(&cp->reset_task_pending);
  659. atomic_inc(&cp->reset_task_pending_all);
  660. schedule_work(&cp->reset_task);
  661. cp->timer_ticks = 0;
  662. mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
  663. return;
  664. }
  665. #endif
  666. if (cp->phy_type & CAS_PHY_SERDES) {
  667. u32 val = readl(cp->regs + REG_PCS_MII_CTRL);
  668. if (cp->link_cntl & BMCR_ANENABLE) {
  669. val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN);
  670. cp->lstate = link_aneg;
  671. } else {
  672. if (cp->link_cntl & BMCR_FULLDPLX)
  673. val |= PCS_MII_CTRL_DUPLEX;
  674. val &= ~PCS_MII_AUTONEG_EN;
  675. cp->lstate = link_force_ok;
  676. }
  677. cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
  678. writel(val, cp->regs + REG_PCS_MII_CTRL);
  679. } else {
  680. cas_mif_poll(cp, 0);
  681. ctl = cas_phy_read(cp, MII_BMCR);
  682. ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 |
  683. CAS_BMCR_SPEED1000 | BMCR_ANENABLE);
  684. ctl |= cp->link_cntl;
  685. if (ctl & BMCR_ANENABLE) {
  686. ctl |= BMCR_ANRESTART;
  687. cp->lstate = link_aneg;
  688. } else {
  689. cp->lstate = link_force_ok;
  690. }
  691. cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
  692. cas_phy_write(cp, MII_BMCR, ctl);
  693. cas_mif_poll(cp, 1);
  694. }
  695. cp->timer_ticks = 0;
  696. mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT);
  697. }
  698. /* Must be invoked under cp->lock. */
  699. static int cas_reset_mii_phy(struct cas *cp)
  700. {
  701. int limit = STOP_TRIES_PHY;
  702. u16 val;
  703. cas_phy_write(cp, MII_BMCR, BMCR_RESET);
  704. udelay(100);
  705. while (--limit) {
  706. val = cas_phy_read(cp, MII_BMCR);
  707. if ((val & BMCR_RESET) == 0)
  708. break;
  709. udelay(10);
  710. }
  711. return limit <= 0;
  712. }
  713. static int cas_saturn_firmware_init(struct cas *cp)
  714. {
  715. const struct firmware *fw;
  716. const char fw_name[] = "sun/cassini.bin";
  717. int err;
  718. if (PHY_NS_DP83065 != cp->phy_id)
  719. return 0;
  720. err = request_firmware(&fw, fw_name, &cp->pdev->dev);
  721. if (err) {
  722. pr_err("Failed to load firmware \"%s\"\n",
  723. fw_name);
  724. return err;
  725. }
  726. if (fw->size < 2) {
  727. pr_err("bogus length %zu in \"%s\"\n",
  728. fw->size, fw_name);
  729. err = -EINVAL;
  730. goto out;
  731. }
  732. cp->fw_load_addr= fw->data[1] << 8 | fw->data[0];
  733. cp->fw_size = fw->size - 2;
  734. cp->fw_data = vmalloc(cp->fw_size);
  735. if (!cp->fw_data) {
  736. err = -ENOMEM;
  737. pr_err("\"%s\" Failed %d\n", fw_name, err);
  738. goto out;
  739. }
  740. memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
  741. out:
  742. release_firmware(fw);
  743. return err;
  744. }
  745. static void cas_saturn_firmware_load(struct cas *cp)
  746. {
  747. int i;
  748. cas_phy_powerdown(cp);
  749. /* expanded memory access mode */
  750. cas_phy_write(cp, DP83065_MII_MEM, 0x0);
  751. /* pointer configuration for new firmware */
  752. cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9);
  753. cas_phy_write(cp, DP83065_MII_REGD, 0xbd);
  754. cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa);
  755. cas_phy_write(cp, DP83065_MII_REGD, 0x82);
  756. cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb);
  757. cas_phy_write(cp, DP83065_MII_REGD, 0x0);
  758. cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc);
  759. cas_phy_write(cp, DP83065_MII_REGD, 0x39);
  760. /* download new firmware */
  761. cas_phy_write(cp, DP83065_MII_MEM, 0x1);
  762. cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr);
  763. for (i = 0; i < cp->fw_size; i++)
  764. cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]);
  765. /* enable firmware */
  766. cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8);
  767. cas_phy_write(cp, DP83065_MII_REGD, 0x1);
  768. }
  769. /* phy initialization */
  770. static void cas_phy_init(struct cas *cp)
  771. {
  772. u16 val;
  773. /* if we're in MII/GMII mode, set up phy */
  774. if (CAS_PHY_MII(cp->phy_type)) {
  775. writel(PCS_DATAPATH_MODE_MII,
  776. cp->regs + REG_PCS_DATAPATH_MODE);
  777. cas_mif_poll(cp, 0);
  778. cas_reset_mii_phy(cp); /* take out of isolate mode */
  779. if (PHY_LUCENT_B0 == cp->phy_id) {
  780. /* workaround link up/down issue with lucent */
  781. cas_phy_write(cp, LUCENT_MII_REG, 0x8000);
  782. cas_phy_write(cp, MII_BMCR, 0x00f1);
  783. cas_phy_write(cp, LUCENT_MII_REG, 0x0);
  784. } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) {
  785. /* workarounds for broadcom phy */
  786. cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20);
  787. cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012);
  788. cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804);
  789. cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013);
  790. cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204);
  791. cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
  792. cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132);
  793. cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006);
  794. cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232);
  795. cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F);
  796. cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20);
  797. } else if (PHY_BROADCOM_5411 == cp->phy_id) {
  798. val = cas_phy_read(cp, BROADCOM_MII_REG4);
  799. val = cas_phy_read(cp, BROADCOM_MII_REG4);
  800. if (val & 0x0080) {
  801. /* link workaround */
  802. cas_phy_write(cp, BROADCOM_MII_REG4,
  803. val & ~0x0080);
  804. }
  805. } else if (cp->cas_flags & CAS_FLAG_SATURN) {
  806. writel((cp->phy_type & CAS_PHY_MII_MDIO0) ?
  807. SATURN_PCFG_FSI : 0x0,
  808. cp->regs + REG_SATURN_PCFG);
  809. /* load firmware to address 10Mbps auto-negotiation
  810. * issue. NOTE: this will need to be changed if the
  811. * default firmware gets fixed.
  812. */
  813. if (PHY_NS_DP83065 == cp->phy_id) {
  814. cas_saturn_firmware_load(cp);
  815. }
  816. cas_phy_powerup(cp);
  817. }
  818. /* advertise capabilities */
  819. val = cas_phy_read(cp, MII_BMCR);
  820. val &= ~BMCR_ANENABLE;
  821. cas_phy_write(cp, MII_BMCR, val);
  822. udelay(10);
  823. cas_phy_write(cp, MII_ADVERTISE,
  824. cas_phy_read(cp, MII_ADVERTISE) |
  825. (ADVERTISE_10HALF | ADVERTISE_10FULL |
  826. ADVERTISE_100HALF | ADVERTISE_100FULL |
  827. CAS_ADVERTISE_PAUSE |
  828. CAS_ADVERTISE_ASYM_PAUSE));
  829. if (cp->cas_flags & CAS_FLAG_1000MB_CAP) {
  830. /* make sure that we don't advertise half
  831. * duplex to avoid a chip issue
  832. */
  833. val = cas_phy_read(cp, CAS_MII_1000_CTRL);
  834. val &= ~CAS_ADVERTISE_1000HALF;
  835. val |= CAS_ADVERTISE_1000FULL;
  836. cas_phy_write(cp, CAS_MII_1000_CTRL, val);
  837. }
  838. } else {
  839. /* reset pcs for serdes */
  840. u32 val;
  841. int limit;
  842. writel(PCS_DATAPATH_MODE_SERDES,
  843. cp->regs + REG_PCS_DATAPATH_MODE);
  844. /* enable serdes pins on saturn */
  845. if (cp->cas_flags & CAS_FLAG_SATURN)
  846. writel(0, cp->regs + REG_SATURN_PCFG);
  847. /* Reset PCS unit. */
  848. val = readl(cp->regs + REG_PCS_MII_CTRL);
  849. val |= PCS_MII_RESET;
  850. writel(val, cp->regs + REG_PCS_MII_CTRL);
  851. limit = STOP_TRIES;
  852. while (--limit > 0) {
  853. udelay(10);
  854. if ((readl(cp->regs + REG_PCS_MII_CTRL) &
  855. PCS_MII_RESET) == 0)
  856. break;
  857. }
  858. if (limit <= 0)
  859. netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
  860. readl(cp->regs + REG_PCS_STATE_MACHINE));
  861. /* Make sure PCS is disabled while changing advertisement
  862. * configuration.
  863. */
  864. writel(0x0, cp->regs + REG_PCS_CFG);
  865. /* Advertise all capabilities except half-duplex. */
  866. val = readl(cp->regs + REG_PCS_MII_ADVERT);
  867. val &= ~PCS_MII_ADVERT_HD;
  868. val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE |
  869. PCS_MII_ADVERT_ASYM_PAUSE);
  870. writel(val, cp->regs + REG_PCS_MII_ADVERT);
  871. /* enable PCS */
  872. writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG);
  873. /* pcs workaround: enable sync detect */
  874. writel(PCS_SERDES_CTRL_SYNCD_EN,
  875. cp->regs + REG_PCS_SERDES_CTRL);
  876. }
  877. }
  878. static int cas_pcs_link_check(struct cas *cp)
  879. {
  880. u32 stat, state_machine;
  881. int retval = 0;
  882. /* The link status bit latches on zero, so you must
  883. * read it twice in such a case to see a transition
  884. * to the link being up.
  885. */
  886. stat = readl(cp->regs + REG_PCS_MII_STATUS);
  887. if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0)
  888. stat = readl(cp->regs + REG_PCS_MII_STATUS);
  889. /* The remote-fault indication is only valid
  890. * when autoneg has completed.
  891. */
  892. if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
  893. PCS_MII_STATUS_REMOTE_FAULT)) ==
  894. (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
  895. netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
  896. /* work around link detection issue by querying the PCS state
  897. * machine directly.
  898. */
  899. state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE);
  900. if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) {
  901. stat &= ~PCS_MII_STATUS_LINK_STATUS;
  902. } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) {
  903. stat |= PCS_MII_STATUS_LINK_STATUS;
  904. }
  905. if (stat & PCS_MII_STATUS_LINK_STATUS) {
  906. if (cp->lstate != link_up) {
  907. if (cp->opened) {
  908. cp->lstate = link_up;
  909. cp->link_transition = LINK_TRANSITION_LINK_UP;
  910. cas_set_link_modes(cp);
  911. netif_carrier_on(cp->dev);
  912. }
  913. }
  914. } else if (cp->lstate == link_up) {
  915. cp->lstate = link_down;
  916. if (link_transition_timeout != 0 &&
  917. cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
  918. !cp->link_transition_jiffies_valid) {
  919. /*
  920. * force a reset, as a workaround for the
  921. * link-failure problem. May want to move this to a
  922. * point a bit earlier in the sequence. If we had
  923. * generated a reset a short time ago, we'll wait for
  924. * the link timer to check the status until a
  925. * timer expires (link_transistion_jiffies_valid is
  926. * true when the timer is running.) Instead of using
  927. * a system timer, we just do a check whenever the
  928. * link timer is running - this clears the flag after
  929. * a suitable delay.
  930. */
  931. retval = 1;
  932. cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
  933. cp->link_transition_jiffies = jiffies;
  934. cp->link_transition_jiffies_valid = 1;
  935. } else {
  936. cp->link_transition = LINK_TRANSITION_ON_FAILURE;
  937. }
  938. netif_carrier_off(cp->dev);
  939. if (cp->opened)
  940. netif_info(cp, link, cp->dev, "PCS link down\n");
  941. /* Cassini only: if you force a mode, there can be
  942. * sync problems on link down. to fix that, the following
  943. * things need to be checked:
  944. * 1) read serialink state register
  945. * 2) read pcs status register to verify link down.
  946. * 3) if link down and serial link == 0x03, then you need
  947. * to global reset the chip.
  948. */
  949. if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) {
  950. /* should check to see if we're in a forced mode */
  951. stat = readl(cp->regs + REG_PCS_SERDES_STATE);
  952. if (stat == 0x03)
  953. return 1;
  954. }
  955. } else if (cp->lstate == link_down) {
  956. if (link_transition_timeout != 0 &&
  957. cp->link_transition != LINK_TRANSITION_REQUESTED_RESET &&
  958. !cp->link_transition_jiffies_valid) {
  959. /* force a reset, as a workaround for the
  960. * link-failure problem. May want to move
  961. * this to a point a bit earlier in the
  962. * sequence.
  963. */
  964. retval = 1;
  965. cp->link_transition = LINK_TRANSITION_REQUESTED_RESET;
  966. cp->link_transition_jiffies = jiffies;
  967. cp->link_transition_jiffies_valid = 1;
  968. } else {
  969. cp->link_transition = LINK_TRANSITION_STILL_FAILED;
  970. }
  971. }
  972. return retval;
  973. }
  974. static int cas_pcs_interrupt(struct net_device *dev,
  975. struct cas *cp, u32 status)
  976. {
  977. u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS);
  978. if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0)
  979. return 0;
  980. return cas_pcs_link_check(cp);
  981. }
  982. static int cas_txmac_interrupt(struct net_device *dev,
  983. struct cas *cp, u32 status)
  984. {
  985. u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS);
  986. if (!txmac_stat)
  987. return 0;
  988. netif_printk(cp, intr, KERN_DEBUG, cp->dev,
  989. "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
  990. /* Defer timer expiration is quite normal,
  991. * don't even log the event.
  992. */
  993. if ((txmac_stat & MAC_TX_DEFER_TIMER) &&
  994. !(txmac_stat & ~MAC_TX_DEFER_TIMER))
  995. return 0;
  996. spin_lock(&cp->stat_lock[0]);
  997. if (txmac_stat & MAC_TX_UNDERRUN) {
  998. netdev_err(dev, "TX MAC xmit underrun\n");
  999. cp->net_stats[0].tx_fifo_errors++;
  1000. }
  1001. if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
  1002. netdev_err(dev, "TX MAC max packet size error\n");
  1003. cp->net_stats[0].tx_errors++;
  1004. }
  1005. /* The rest are all cases of one of the 16-bit TX
  1006. * counters expiring.
  1007. */
  1008. if (txmac_stat & MAC_TX_COLL_NORMAL)
  1009. cp->net_stats[0].collisions += 0x10000;
  1010. if (txmac_stat & MAC_TX_COLL_EXCESS) {
  1011. cp->net_stats[0].tx_aborted_errors += 0x10000;
  1012. cp->net_stats[0].collisions += 0x10000;
  1013. }
  1014. if (txmac_stat & MAC_TX_COLL_LATE) {
  1015. cp->net_stats[0].tx_aborted_errors += 0x10000;
  1016. cp->net_stats[0].collisions += 0x10000;
  1017. }
  1018. spin_unlock(&cp->stat_lock[0]);
  1019. /* We do not keep track of MAC_TX_COLL_FIRST and
  1020. * MAC_TX_PEAK_ATTEMPTS events.
  1021. */
  1022. return 0;
  1023. }
  1024. static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware)
  1025. {
  1026. cas_hp_inst_t *inst;
  1027. u32 val;
  1028. int i;
  1029. i = 0;
  1030. while ((inst = firmware) && inst->note) {
  1031. writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR);
  1032. val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val);
  1033. val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask);
  1034. writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI);
  1035. val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10);
  1036. val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop);
  1037. val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext);
  1038. val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff);
  1039. val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext);
  1040. val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff);
  1041. val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op);
  1042. writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID);
  1043. val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask);
  1044. val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift);
  1045. val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab);
  1046. val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg);
  1047. writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW);
  1048. ++firmware;
  1049. ++i;
  1050. }
  1051. }
  1052. static void cas_init_rx_dma(struct cas *cp)
  1053. {
  1054. u64 desc_dma = cp->block_dvma;
  1055. u32 val;
  1056. int i, size;
  1057. /* rx free descriptors */
  1058. val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL);
  1059. val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0));
  1060. val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0));
  1061. if ((N_RX_DESC_RINGS > 1) &&
  1062. (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */
  1063. val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1));
  1064. writel(val, cp->regs + REG_RX_CFG);
  1065. val = (unsigned long) cp->init_rxds[0] -
  1066. (unsigned long) cp->init_block;
  1067. writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI);
  1068. writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW);
  1069. writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK);
  1070. if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
  1071. /* rx desc 2 is for IPSEC packets. however,
  1072. * we don't it that for that purpose.
  1073. */
  1074. val = (unsigned long) cp->init_rxds[1] -
  1075. (unsigned long) cp->init_block;
  1076. writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI);
  1077. writel((desc_dma + val) & 0xffffffff, cp->regs +
  1078. REG_PLUS_RX_DB1_LOW);
  1079. writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs +
  1080. REG_PLUS_RX_KICK1);
  1081. }
  1082. /* rx completion registers */
  1083. val = (unsigned long) cp->init_rxcs[0] -
  1084. (unsigned long) cp->init_block;
  1085. writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI);
  1086. writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW);
  1087. if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
  1088. /* rx comp 2-4 */
  1089. for (i = 1; i < MAX_RX_COMP_RINGS; i++) {
  1090. val = (unsigned long) cp->init_rxcs[i] -
  1091. (unsigned long) cp->init_block;
  1092. writel((desc_dma + val) >> 32, cp->regs +
  1093. REG_PLUS_RX_CBN_HI(i));
  1094. writel((desc_dma + val) & 0xffffffff, cp->regs +
  1095. REG_PLUS_RX_CBN_LOW(i));
  1096. }
  1097. }
  1098. /* read selective clear regs to prevent spurious interrupts
  1099. * on reset because complete == kick.
  1100. * selective clear set up to prevent interrupts on resets
  1101. */
  1102. readl(cp->regs + REG_INTR_STATUS_ALIAS);
  1103. writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR);
  1104. if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
  1105. for (i = 1; i < N_RX_COMP_RINGS; i++)
  1106. readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i));
  1107. /* 2 is different from 3 and 4 */
  1108. if (N_RX_COMP_RINGS > 1)
  1109. writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1,
  1110. cp->regs + REG_PLUS_ALIASN_CLEAR(1));
  1111. for (i = 2; i < N_RX_COMP_RINGS; i++)
  1112. writel(INTR_RX_DONE_ALT,
  1113. cp->regs + REG_PLUS_ALIASN_CLEAR(i));
  1114. }
  1115. /* set up pause thresholds */
  1116. val = CAS_BASE(RX_PAUSE_THRESH_OFF,
  1117. cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM);
  1118. val |= CAS_BASE(RX_PAUSE_THRESH_ON,
  1119. cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM);
  1120. writel(val, cp->regs + REG_RX_PAUSE_THRESH);
  1121. /* zero out dma reassembly buffers */
  1122. for (i = 0; i < 64; i++) {
  1123. writel(i, cp->regs + REG_RX_TABLE_ADDR);
  1124. writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW);
  1125. writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID);
  1126. writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI);
  1127. }
  1128. /* make sure address register is 0 for normal operation */
  1129. writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR);
  1130. writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR);
  1131. /* interrupt mitigation */
  1132. #ifdef USE_RX_BLANK
  1133. val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL);
  1134. val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL);
  1135. writel(val, cp->regs + REG_RX_BLANK);
  1136. #else
  1137. writel(0x0, cp->regs + REG_RX_BLANK);
  1138. #endif
  1139. /* interrupt generation as a function of low water marks for
  1140. * free desc and completion entries. these are used to trigger
  1141. * housekeeping for rx descs. we don't use the free interrupt
  1142. * as it's not very useful
  1143. */
  1144. /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */
  1145. val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL);
  1146. writel(val, cp->regs + REG_RX_AE_THRESH);
  1147. if (cp->cas_flags & CAS_FLAG_REG_PLUS) {
  1148. val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1));
  1149. writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH);
  1150. }
  1151. /* Random early detect registers. useful for congestion avoidance.
  1152. * this should be tunable.
  1153. */
  1154. writel(0x0, cp->regs + REG_RX_RED);
  1155. /* receive page sizes. default == 2K (0x800) */
  1156. val = 0;
  1157. if (cp->page_size == 0x1000)
  1158. val = 0x1;
  1159. else if (cp->page_size == 0x2000)
  1160. val = 0x2;
  1161. else if (cp->page_size == 0x4000)
  1162. val = 0x3;
  1163. /* round mtu + offset. constrain to page size. */
  1164. size = cp->dev->mtu + 64;
  1165. if (size > cp->page_size)
  1166. size = cp->page_size;
  1167. if (size <= 0x400)
  1168. i = 0x0;
  1169. else if (size <= 0x800)
  1170. i = 0x1;
  1171. else if (size <= 0x1000)
  1172. i = 0x2;
  1173. else
  1174. i = 0x3;
  1175. cp->mtu_stride = 1 << (i + 10);
  1176. val = CAS_BASE(RX_PAGE_SIZE, val);
  1177. val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i);
  1178. val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10));
  1179. val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1);
  1180. writel(val, cp->regs + REG_RX_PAGE_SIZE);
  1181. /* enable the header parser if desired */
  1182. if (CAS_HP_FIRMWARE == cas_prog_null)
  1183. return;
  1184. val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS);
  1185. val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK;
  1186. val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL);
  1187. writel(val, cp->regs + REG_HP_CFG);
  1188. }
  1189. static inline void cas_rxc_init(struct cas_rx_comp *rxc)
  1190. {
  1191. memset(rxc, 0, sizeof(*rxc));
  1192. rxc->word4 = cpu_to_le64(RX_COMP4_ZERO);
  1193. }
  1194. /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1]
  1195. * flipping is protected by the fact that the chip will not
  1196. * hand back the same page index while it's being processed.
  1197. */
  1198. static inline cas_page_t *cas_page_spare(struct cas *cp, const int index)
  1199. {
  1200. cas_page_t *page = cp->rx_pages[1][index];
  1201. cas_page_t *new;
  1202. if (page_count(page->buffer) == 1)
  1203. return page;
  1204. new = cas_page_dequeue(cp);
  1205. if (new) {
  1206. spin_lock(&cp->rx_inuse_lock);
  1207. list_add(&page->list, &cp->rx_inuse_list);
  1208. spin_unlock(&cp->rx_inuse_lock);
  1209. }
  1210. return new;
  1211. }
  1212. /* this needs to be changed if we actually use the ENC RX DESC ring */
  1213. static cas_page_t *cas_page_swap(struct cas *cp, const int ring,
  1214. const int index)
  1215. {
  1216. cas_page_t **page0 = cp->rx_pages[0];
  1217. cas_page_t **page1 = cp->rx_pages[1];
  1218. /* swap if buffer is in use */
  1219. if (page_count(page0[index]->buffer) > 1) {
  1220. cas_page_t *new = cas_page_spare(cp, index);
  1221. if (new) {
  1222. page1[index] = page0[index];
  1223. page0[index] = new;
  1224. }
  1225. }
  1226. RX_USED_SET(page0[index], 0);
  1227. return page0[index];
  1228. }
  1229. static void cas_clean_rxds(struct cas *cp)
  1230. {
  1231. /* only clean ring 0 as ring 1 is used for spare buffers */
  1232. struct cas_rx_desc *rxd = cp->init_rxds[0];
  1233. int i, size;
  1234. /* release all rx flows */
  1235. for (i = 0; i < N_RX_FLOWS; i++) {
  1236. struct sk_buff *skb;
  1237. while ((skb = __skb_dequeue(&cp->rx_flows[i]))) {
  1238. cas_skb_release(skb);
  1239. }
  1240. }
  1241. /* initialize descriptors */
  1242. size = RX_DESC_RINGN_SIZE(0);
  1243. for (i = 0; i < size; i++) {
  1244. cas_page_t *page = cas_page_swap(cp, 0, i);
  1245. rxd[i].buffer = cpu_to_le64(page->dma_addr);
  1246. rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) |
  1247. CAS_BASE(RX_INDEX_RING, 0));
  1248. }
  1249. cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4;
  1250. cp->rx_last[0] = 0;
  1251. cp->cas_flags &= ~CAS_FLAG_RXD_POST(0);
  1252. }
  1253. static void cas_clean_rxcs(struct cas *cp)
  1254. {
  1255. int i, j;
  1256. /* take ownership of rx comp descriptors */
  1257. memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS);
  1258. memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS);
  1259. for (i = 0; i < N_RX_COMP_RINGS; i++) {
  1260. struct cas_rx_comp *rxc = cp->init_rxcs[i];
  1261. for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) {
  1262. cas_rxc_init(rxc + j);
  1263. }
  1264. }
  1265. }
  1266. #if 0
  1267. /* When we get a RX fifo overflow, the RX unit is probably hung
  1268. * so we do the following.
  1269. *
  1270. * If any part of the reset goes wrong, we return 1 and that causes the
  1271. * whole chip to be reset.
  1272. */
  1273. static int cas_rxmac_reset(struct cas *cp)
  1274. {
  1275. struct net_device *dev = cp->dev;
  1276. int limit;
  1277. u32 val;
  1278. /* First, reset MAC RX. */
  1279. writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
  1280. for (limit = 0; limit < STOP_TRIES; limit++) {
  1281. if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN))
  1282. break;
  1283. udelay(10);
  1284. }
  1285. if (limit == STOP_TRIES) {
  1286. netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
  1287. return 1;
  1288. }
  1289. /* Second, disable RX DMA. */
  1290. writel(0, cp->regs + REG_RX_CFG);
  1291. for (limit = 0; limit < STOP_TRIES; limit++) {
  1292. if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN))
  1293. break;
  1294. udelay(10);
  1295. }
  1296. if (limit == STOP_TRIES) {
  1297. netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
  1298. return 1;
  1299. }
  1300. mdelay(5);
  1301. /* Execute RX reset command. */
  1302. writel(SW_RESET_RX, cp->regs + REG_SW_RESET);
  1303. for (limit = 0; limit < STOP_TRIES; limit++) {
  1304. if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX))
  1305. break;
  1306. udelay(10);
  1307. }
  1308. if (limit == STOP_TRIES) {
  1309. netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
  1310. return 1;
  1311. }
  1312. /* reset driver rx state */
  1313. cas_clean_rxds(cp);
  1314. cas_clean_rxcs(cp);
  1315. /* Now, reprogram the rest of RX unit. */
  1316. cas_init_rx_dma(cp);
  1317. /* re-enable */
  1318. val = readl(cp->regs + REG_RX_CFG);
  1319. writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG);
  1320. writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK);
  1321. val = readl(cp->regs + REG_MAC_RX_CFG);
  1322. writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG);
  1323. return 0;
  1324. }
  1325. #endif
  1326. static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
  1327. u32 status)
  1328. {
  1329. u32 stat = readl(cp->regs + REG_MAC_RX_STATUS);
  1330. if (!stat)
  1331. return 0;
  1332. netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
  1333. /* these are all rollovers */
  1334. spin_lock(&cp->stat_lock[0]);
  1335. if (stat & MAC_RX_ALIGN_ERR)
  1336. cp->net_stats[0].rx_frame_errors += 0x10000;
  1337. if (stat & MAC_RX_CRC_ERR)
  1338. cp->net_stats[0].rx_crc_errors += 0x10000;
  1339. if (stat & MAC_RX_LEN_ERR)
  1340. cp->net_stats[0].rx_length_errors += 0x10000;
  1341. if (stat & MAC_RX_OVERFLOW) {
  1342. cp->net_stats[0].rx_over_errors++;
  1343. cp->net_stats[0].rx_fifo_errors++;
  1344. }
  1345. /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR
  1346. * events.
  1347. */
  1348. spin_unlock(&cp->stat_lock[0]);
  1349. return 0;
  1350. }
  1351. static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
  1352. u32 status)
  1353. {
  1354. u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS);
  1355. if (!stat)
  1356. return 0;
  1357. netif_printk(cp, intr, KERN_DEBUG, cp->dev,
  1358. "mac interrupt, stat: 0x%x\n", stat);
  1359. /* This interrupt is just for pause frame and pause
  1360. * tracking. It is useful for diagnostics and debug
  1361. * but probably by default we will mask these events.
  1362. */
  1363. if (stat & MAC_CTRL_PAUSE_STATE)
  1364. cp->pause_entered++;
  1365. if (stat & MAC_CTRL_PAUSE_RECEIVED)
  1366. cp->pause_last_time_recvd = (stat >> 16);
  1367. return 0;
  1368. }
  1369. /* Must be invoked under cp->lock. */
  1370. static inline int cas_mdio_link_not_up(struct cas *cp)
  1371. {
  1372. u16 val;
  1373. switch (cp->lstate) {
  1374. case link_force_ret:
  1375. netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
  1376. cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
  1377. cp->timer_ticks = 5;
  1378. cp->lstate = link_force_ok;
  1379. cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
  1380. break;
  1381. case link_aneg:
  1382. val = cas_phy_read(cp, MII_BMCR);
  1383. /* Try forced modes. we try things in the following order:
  1384. * 1000 full -> 100 full/half -> 10 half
  1385. */
  1386. val &= ~(BMCR_ANRESTART | BMCR_ANENABLE);
  1387. val |= BMCR_FULLDPLX;
  1388. val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ?
  1389. CAS_BMCR_SPEED1000 : BMCR_SPEED100;
  1390. cas_phy_write(cp, MII_BMCR, val);
  1391. cp->timer_ticks = 5;
  1392. cp->lstate = link_force_try;
  1393. cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
  1394. break;
  1395. case link_force_try:
  1396. /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */
  1397. val = cas_phy_read(cp, MII_BMCR);
  1398. cp->timer_ticks = 5;
  1399. if (val & CAS_BMCR_SPEED1000) { /* gigabit */
  1400. val &= ~CAS_BMCR_SPEED1000;
  1401. val |= (BMCR_SPEED100 | BMCR_FULLDPLX);
  1402. cas_phy_write(cp, MII_BMCR, val);
  1403. break;
  1404. }
  1405. if (val & BMCR_SPEED100) {
  1406. if (val & BMCR_FULLDPLX) /* fd failed */
  1407. val &= ~BMCR_FULLDPLX;
  1408. else { /* 100Mbps failed */
  1409. val &= ~BMCR_SPEED100;
  1410. }
  1411. cas_phy_write(cp, MII_BMCR, val);
  1412. break;
  1413. }
  1414. default:
  1415. break;
  1416. }
  1417. return 0;
  1418. }
  1419. /* must be invoked with cp->lock held */
  1420. static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
  1421. {
  1422. int restart;
  1423. if (bmsr & BMSR_LSTATUS) {
  1424. /* Ok, here we got a link. If we had it due to a forced
  1425. * fallback, and we were configured for autoneg, we
  1426. * retry a short autoneg pass. If you know your hub is
  1427. * broken, use ethtool ;)
  1428. */
  1429. if ((cp->lstate == link_force_try) &&
  1430. (cp->link_cntl & BMCR_ANENABLE)) {
  1431. cp->lstate = link_force_ret;
  1432. cp->link_transition = LINK_TRANSITION_LINK_CONFIG;
  1433. cas_mif_poll(cp, 0);
  1434. cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
  1435. cp->timer_ticks = 5;
  1436. if (cp->opened)
  1437. netif_info(cp, link, cp->dev,
  1438. "Got link after fallback, retrying autoneg once...\n");
  1439. cas_phy_write(cp, MII_BMCR,
  1440. cp->link_fcntl | BMCR_ANENABLE |
  1441. BMCR_ANRESTART);
  1442. cas_mif_poll(cp, 1);
  1443. } else if (cp->lstate != link_up) {
  1444. cp->lstate = link_up;
  1445. cp->link_transition = LINK_TRANSITION_LINK_UP;
  1446. if (cp->opened) {
  1447. cas_set_link_modes(cp);
  1448. netif_carrier_on(cp->dev);
  1449. }
  1450. }
  1451. return 0;
  1452. }
  1453. /* link not up. if the link was previously up, we restart the
  1454. * whole process
  1455. */
  1456. restart = 0;
  1457. if (cp->lstate == link_up) {
  1458. cp->lstate = link_down;
  1459. cp->link_transition = LINK_TRANSITION_LINK_DOWN;
  1460. netif_carrier_off(cp->dev);
  1461. if (cp->opened)
  1462. netif_info(cp, link, cp->dev, "Link down\n");
  1463. restart = 1;
  1464. } else if (++cp->timer_ticks > 10)
  1465. cas_mdio_link_not_up(cp);
  1466. return restart;
  1467. }
  1468. static int cas_mif_interrupt(struct net_device *dev, struct cas *cp,
  1469. u32 status)
  1470. {
  1471. u32 stat = readl(cp->regs + REG_MIF_STATUS);
  1472. u16 bmsr;
  1473. /* check for a link change */
  1474. if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0)
  1475. return 0;
  1476. bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat);
  1477. return cas_mii_link_check(cp, bmsr);
  1478. }
  1479. static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
  1480. u32 status)
  1481. {
  1482. u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS);
  1483. if (!stat)
  1484. return 0;
  1485. netdev_err(dev, "PCI error [%04x:%04x]",
  1486. stat, readl(cp->regs + REG_BIM_DIAG));
  1487. /* cassini+ has this reserved */
  1488. if ((stat & PCI_ERR_BADACK) &&
  1489. ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
  1490. pr_cont(" <No ACK64# during ABS64 cycle>");
  1491. if (stat & PCI_ERR_DTRTO)
  1492. pr_cont(" <Delayed transaction timeout>");
  1493. if (stat & PCI_ERR_OTHER)
  1494. pr_cont(" <other>");
  1495. if (stat & PCI_ERR_BIM_DMA_WRITE)
  1496. pr_cont(" <BIM DMA 0 write req>");
  1497. if (stat & PCI_ERR_BIM_DMA_READ)
  1498. pr_cont(" <BIM DMA 0 read req>");
  1499. pr_cont("\n");
  1500. if (stat & PCI_ERR_OTHER) {
  1501. u16 cfg;
  1502. /* Interrogate PCI config space for the
  1503. * true cause.
  1504. */
  1505. pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
  1506. netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
  1507. if (cfg & PCI_STATUS_PARITY)
  1508. netdev_err(dev, "PCI parity error detected\n");
  1509. if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
  1510. netdev_err(dev, "PCI target abort\n");
  1511. if (cfg & PCI_STATUS_REC_TARGET_ABORT)
  1512. netdev_err(dev, "PCI master acks target abort\n");
  1513. if (cfg & PCI_STATUS_REC_MASTER_ABORT)
  1514. netdev_err(dev, "PCI master abort\n");
  1515. if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
  1516. netdev_err(dev, "PCI system error SERR#\n");
  1517. if (cfg & PCI_STATUS_DETECTED_PARITY)
  1518. netdev_err(dev, "PCI parity error\n");
  1519. /* Write the error bits back to clear them. */
  1520. cfg &= (PCI_STATUS_PARITY |
  1521. PCI_STATUS_SIG_TARGET_ABORT |
  1522. PCI_STATUS_REC_TARGET_ABORT |
  1523. PCI_STATUS_REC_MASTER_ABORT |
  1524. PCI_STATUS_SIG_SYSTEM_ERROR |
  1525. PCI_STATUS_DETECTED_PARITY);
  1526. pci_write_config_word(cp->pdev, PCI_STATUS, cfg);
  1527. }
  1528. /* For all PCI errors, we should reset the chip. */
  1529. return 1;
  1530. }
  1531. /* All non-normal interrupt conditions get serviced here.
  1532. * Returns non-zero if we should just exit the interrupt
  1533. * handler right now (ie. if we reset the card which invalidates
  1534. * all of the other original irq status bits).
  1535. */
  1536. static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
  1537. u32 status)
  1538. {
  1539. if (status & INTR_RX_TAG_ERROR) {
  1540. /* corrupt…