/drivers/net/ethernet/toshiba/spider_net.c

http://github.com/mirrors/linux · C · 2536 lines · 1534 code · 347 blank · 655 comment · 217 complexity · c7a54bf7689ad572b30378f10f09287f MD5 · raw file

Large files are truncated click here to view the full file

  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Network device driver for Cell Processor-Based Blade and Celleb platform
  4. *
  5. * (C) Copyright IBM Corp. 2005
  6. * (C) Copyright 2006 TOSHIBA CORPORATION
  7. *
  8. * Authors : Utz Bacher <utz.bacher@de.ibm.com>
  9. * Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
  10. */
  11. #include <linux/compiler.h>
  12. #include <linux/crc32.h>
  13. #include <linux/delay.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/ethtool.h>
  16. #include <linux/firmware.h>
  17. #include <linux/if_vlan.h>
  18. #include <linux/in.h>
  19. #include <linux/init.h>
  20. #include <linux/interrupt.h>
  21. #include <linux/gfp.h>
  22. #include <linux/ioport.h>
  23. #include <linux/ip.h>
  24. #include <linux/kernel.h>
  25. #include <linux/mii.h>
  26. #include <linux/module.h>
  27. #include <linux/netdevice.h>
  28. #include <linux/device.h>
  29. #include <linux/pci.h>
  30. #include <linux/skbuff.h>
  31. #include <linux/tcp.h>
  32. #include <linux/types.h>
  33. #include <linux/vmalloc.h>
  34. #include <linux/wait.h>
  35. #include <linux/workqueue.h>
  36. #include <linux/bitops.h>
  37. #include <net/checksum.h>
  38. #include "spider_net.h"
  39. MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
  40. "<Jens.Osterkamp@de.ibm.com>");
  41. MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
  42. MODULE_LICENSE("GPL");
  43. MODULE_VERSION(VERSION);
  44. MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
  45. static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
  46. static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
  47. module_param(rx_descriptors, int, 0444);
  48. module_param(tx_descriptors, int, 0444);
  49. MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
  50. "in rx chains");
  51. MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
  52. "in tx chain");
  53. char spider_net_driver_name[] = "spidernet";
  54. static const struct pci_device_id spider_net_pci_tbl[] = {
  55. { PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
  56. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
  57. { 0, }
  58. };
  59. MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
  60. /**
  61. * spider_net_read_reg - reads an SMMIO register of a card
  62. * @card: device structure
  63. * @reg: register to read from
  64. *
  65. * returns the content of the specified SMMIO register.
  66. */
  67. static inline u32
  68. spider_net_read_reg(struct spider_net_card *card, u32 reg)
  69. {
  70. /* We use the powerpc specific variants instead of readl_be() because
  71. * we know spidernet is not a real PCI device and we can thus avoid the
  72. * performance hit caused by the PCI workarounds.
  73. */
  74. return in_be32(card->regs + reg);
  75. }
  76. /**
  77. * spider_net_write_reg - writes to an SMMIO register of a card
  78. * @card: device structure
  79. * @reg: register to write to
  80. * @value: value to write into the specified SMMIO register
  81. */
  82. static inline void
  83. spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
  84. {
  85. /* We use the powerpc specific variants instead of writel_be() because
  86. * we know spidernet is not a real PCI device and we can thus avoid the
  87. * performance hit caused by the PCI workarounds.
  88. */
  89. out_be32(card->regs + reg, value);
  90. }
  91. /**
  92. * spider_net_write_phy - write to phy register
  93. * @netdev: adapter to be written to
  94. * @mii_id: id of MII
  95. * @reg: PHY register
  96. * @val: value to be written to phy register
  97. *
  98. * spider_net_write_phy_register writes to an arbitrary PHY
  99. * register via the spider GPCWOPCMD register. We assume the queue does
  100. * not run full (not more than 15 commands outstanding).
  101. **/
  102. static void
  103. spider_net_write_phy(struct net_device *netdev, int mii_id,
  104. int reg, int val)
  105. {
  106. struct spider_net_card *card = netdev_priv(netdev);
  107. u32 writevalue;
  108. writevalue = ((u32)mii_id << 21) |
  109. ((u32)reg << 16) | ((u32)val);
  110. spider_net_write_reg(card, SPIDER_NET_GPCWOPCMD, writevalue);
  111. }
  112. /**
  113. * spider_net_read_phy - read from phy register
  114. * @netdev: network device to be read from
  115. * @mii_id: id of MII
  116. * @reg: PHY register
  117. *
  118. * Returns value read from PHY register
  119. *
  120. * spider_net_write_phy reads from an arbitrary PHY
  121. * register via the spider GPCROPCMD register
  122. **/
  123. static int
  124. spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
  125. {
  126. struct spider_net_card *card = netdev_priv(netdev);
  127. u32 readvalue;
  128. readvalue = ((u32)mii_id << 21) | ((u32)reg << 16);
  129. spider_net_write_reg(card, SPIDER_NET_GPCROPCMD, readvalue);
  130. /* we don't use semaphores to wait for an SPIDER_NET_GPROPCMPINT
  131. * interrupt, as we poll for the completion of the read operation
  132. * in spider_net_read_phy. Should take about 50 us */
  133. do {
  134. readvalue = spider_net_read_reg(card, SPIDER_NET_GPCROPCMD);
  135. } while (readvalue & SPIDER_NET_GPREXEC);
  136. readvalue &= SPIDER_NET_GPRDAT_MASK;
  137. return readvalue;
  138. }
  139. /**
  140. * spider_net_setup_aneg - initial auto-negotiation setup
  141. * @card: device structure
  142. **/
  143. static void
  144. spider_net_setup_aneg(struct spider_net_card *card)
  145. {
  146. struct mii_phy *phy = &card->phy;
  147. u32 advertise = 0;
  148. u16 bmsr, estat;
  149. bmsr = spider_net_read_phy(card->netdev, phy->mii_id, MII_BMSR);
  150. estat = spider_net_read_phy(card->netdev, phy->mii_id, MII_ESTATUS);
  151. if (bmsr & BMSR_10HALF)
  152. advertise |= ADVERTISED_10baseT_Half;
  153. if (bmsr & BMSR_10FULL)
  154. advertise |= ADVERTISED_10baseT_Full;
  155. if (bmsr & BMSR_100HALF)
  156. advertise |= ADVERTISED_100baseT_Half;
  157. if (bmsr & BMSR_100FULL)
  158. advertise |= ADVERTISED_100baseT_Full;
  159. if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_TFULL))
  160. advertise |= SUPPORTED_1000baseT_Full;
  161. if ((bmsr & BMSR_ESTATEN) && (estat & ESTATUS_1000_THALF))
  162. advertise |= SUPPORTED_1000baseT_Half;
  163. sungem_phy_probe(phy, phy->mii_id);
  164. phy->def->ops->setup_aneg(phy, advertise);
  165. }
  166. /**
  167. * spider_net_rx_irq_off - switch off rx irq on this spider card
  168. * @card: device structure
  169. *
  170. * switches off rx irq by masking them out in the GHIINTnMSK register
  171. */
  172. static void
  173. spider_net_rx_irq_off(struct spider_net_card *card)
  174. {
  175. u32 regvalue;
  176. regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
  177. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
  178. }
  179. /**
  180. * spider_net_rx_irq_on - switch on rx irq on this spider card
  181. * @card: device structure
  182. *
  183. * switches on rx irq by enabling them in the GHIINTnMSK register
  184. */
  185. static void
  186. spider_net_rx_irq_on(struct spider_net_card *card)
  187. {
  188. u32 regvalue;
  189. regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
  190. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
  191. }
  192. /**
  193. * spider_net_set_promisc - sets the unicast address or the promiscuous mode
  194. * @card: card structure
  195. *
  196. * spider_net_set_promisc sets the unicast destination address filter and
  197. * thus either allows for non-promisc mode or promisc mode
  198. */
  199. static void
  200. spider_net_set_promisc(struct spider_net_card *card)
  201. {
  202. u32 macu, macl;
  203. struct net_device *netdev = card->netdev;
  204. if (netdev->flags & IFF_PROMISC) {
  205. /* clear destination entry 0 */
  206. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, 0);
  207. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, 0);
  208. spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
  209. SPIDER_NET_PROMISC_VALUE);
  210. } else {
  211. macu = netdev->dev_addr[0];
  212. macu <<= 8;
  213. macu |= netdev->dev_addr[1];
  214. memcpy(&macl, &netdev->dev_addr[2], sizeof(macl));
  215. macu |= SPIDER_NET_UA_DESCR_VALUE;
  216. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR, macu);
  217. spider_net_write_reg(card, SPIDER_NET_GMRUAFILnR + 0x04, macl);
  218. spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R,
  219. SPIDER_NET_NONPROMISC_VALUE);
  220. }
  221. }
  222. /**
  223. * spider_net_get_descr_status -- returns the status of a descriptor
  224. * @descr: descriptor to look at
  225. *
  226. * returns the status as in the dmac_cmd_status field of the descriptor
  227. */
  228. static inline int
  229. spider_net_get_descr_status(struct spider_net_hw_descr *hwdescr)
  230. {
  231. return hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
  232. }
  233. /**
  234. * spider_net_free_chain - free descriptor chain
  235. * @card: card structure
  236. * @chain: address of chain
  237. *
  238. */
  239. static void
  240. spider_net_free_chain(struct spider_net_card *card,
  241. struct spider_net_descr_chain *chain)
  242. {
  243. struct spider_net_descr *descr;
  244. descr = chain->ring;
  245. do {
  246. descr->bus_addr = 0;
  247. descr->hwdescr->next_descr_addr = 0;
  248. descr = descr->next;
  249. } while (descr != chain->ring);
  250. dma_free_coherent(&card->pdev->dev, chain->num_desc,
  251. chain->hwring, chain->dma_addr);
  252. }
  253. /**
  254. * spider_net_init_chain - alloc and link descriptor chain
  255. * @card: card structure
  256. * @chain: address of chain
  257. *
  258. * We manage a circular list that mirrors the hardware structure,
  259. * except that the hardware uses bus addresses.
  260. *
  261. * Returns 0 on success, <0 on failure
  262. */
  263. static int
  264. spider_net_init_chain(struct spider_net_card *card,
  265. struct spider_net_descr_chain *chain)
  266. {
  267. int i;
  268. struct spider_net_descr *descr;
  269. struct spider_net_hw_descr *hwdescr;
  270. dma_addr_t buf;
  271. size_t alloc_size;
  272. alloc_size = chain->num_desc * sizeof(struct spider_net_hw_descr);
  273. chain->hwring = dma_alloc_coherent(&card->pdev->dev, alloc_size,
  274. &chain->dma_addr, GFP_KERNEL);
  275. if (!chain->hwring)
  276. return -ENOMEM;
  277. memset(chain->ring, 0, chain->num_desc * sizeof(struct spider_net_descr));
  278. /* Set up the hardware pointers in each descriptor */
  279. descr = chain->ring;
  280. hwdescr = chain->hwring;
  281. buf = chain->dma_addr;
  282. for (i=0; i < chain->num_desc; i++, descr++, hwdescr++) {
  283. hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  284. hwdescr->next_descr_addr = 0;
  285. descr->hwdescr = hwdescr;
  286. descr->bus_addr = buf;
  287. descr->next = descr + 1;
  288. descr->prev = descr - 1;
  289. buf += sizeof(struct spider_net_hw_descr);
  290. }
  291. /* do actual circular list */
  292. (descr-1)->next = chain->ring;
  293. chain->ring->prev = descr-1;
  294. spin_lock_init(&chain->lock);
  295. chain->head = chain->ring;
  296. chain->tail = chain->ring;
  297. return 0;
  298. }
  299. /**
  300. * spider_net_free_rx_chain_contents - frees descr contents in rx chain
  301. * @card: card structure
  302. *
  303. * returns 0 on success, <0 on failure
  304. */
  305. static void
  306. spider_net_free_rx_chain_contents(struct spider_net_card *card)
  307. {
  308. struct spider_net_descr *descr;
  309. descr = card->rx_chain.head;
  310. do {
  311. if (descr->skb) {
  312. pci_unmap_single(card->pdev, descr->hwdescr->buf_addr,
  313. SPIDER_NET_MAX_FRAME,
  314. PCI_DMA_BIDIRECTIONAL);
  315. dev_kfree_skb(descr->skb);
  316. descr->skb = NULL;
  317. }
  318. descr = descr->next;
  319. } while (descr != card->rx_chain.head);
  320. }
  321. /**
  322. * spider_net_prepare_rx_descr - Reinitialize RX descriptor
  323. * @card: card structure
  324. * @descr: descriptor to re-init
  325. *
  326. * Return 0 on success, <0 on failure.
  327. *
  328. * Allocates a new rx skb, iommu-maps it and attaches it to the
  329. * descriptor. Mark the descriptor as activated, ready-to-use.
  330. */
  331. static int
  332. spider_net_prepare_rx_descr(struct spider_net_card *card,
  333. struct spider_net_descr *descr)
  334. {
  335. struct spider_net_hw_descr *hwdescr = descr->hwdescr;
  336. dma_addr_t buf;
  337. int offset;
  338. int bufsize;
  339. /* we need to round up the buffer size to a multiple of 128 */
  340. bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
  341. (~(SPIDER_NET_RXBUF_ALIGN - 1));
  342. /* and we need to have it 128 byte aligned, therefore we allocate a
  343. * bit more */
  344. /* allocate an skb */
  345. descr->skb = netdev_alloc_skb(card->netdev,
  346. bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
  347. if (!descr->skb) {
  348. if (netif_msg_rx_err(card) && net_ratelimit())
  349. dev_err(&card->netdev->dev,
  350. "Not enough memory to allocate rx buffer\n");
  351. card->spider_stats.alloc_rx_skb_error++;
  352. return -ENOMEM;
  353. }
  354. hwdescr->buf_size = bufsize;
  355. hwdescr->result_size = 0;
  356. hwdescr->valid_size = 0;
  357. hwdescr->data_status = 0;
  358. hwdescr->data_error = 0;
  359. offset = ((unsigned long)descr->skb->data) &
  360. (SPIDER_NET_RXBUF_ALIGN - 1);
  361. if (offset)
  362. skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
  363. /* iommu-map the skb */
  364. buf = pci_map_single(card->pdev, descr->skb->data,
  365. SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
  366. if (pci_dma_mapping_error(card->pdev, buf)) {
  367. dev_kfree_skb_any(descr->skb);
  368. descr->skb = NULL;
  369. if (netif_msg_rx_err(card) && net_ratelimit())
  370. dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
  371. card->spider_stats.rx_iommu_map_error++;
  372. hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  373. } else {
  374. hwdescr->buf_addr = buf;
  375. wmb();
  376. hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
  377. SPIDER_NET_DMAC_NOINTR_COMPLETE;
  378. }
  379. return 0;
  380. }
  381. /**
  382. * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
  383. * @card: card structure
  384. *
  385. * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
  386. * chip by writing to the appropriate register. DMA is enabled in
  387. * spider_net_enable_rxdmac.
  388. */
  389. static inline void
  390. spider_net_enable_rxchtails(struct spider_net_card *card)
  391. {
  392. /* assume chain is aligned correctly */
  393. spider_net_write_reg(card, SPIDER_NET_GDADCHA ,
  394. card->rx_chain.tail->bus_addr);
  395. }
  396. /**
  397. * spider_net_enable_rxdmac - enables a receive DMA controller
  398. * @card: card structure
  399. *
  400. * spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
  401. * in the GDADMACCNTR register
  402. */
  403. static inline void
  404. spider_net_enable_rxdmac(struct spider_net_card *card)
  405. {
  406. wmb();
  407. spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
  408. SPIDER_NET_DMA_RX_VALUE);
  409. }
  410. /**
  411. * spider_net_disable_rxdmac - disables the receive DMA controller
  412. * @card: card structure
  413. *
  414. * spider_net_disable_rxdmac terminates processing on the DMA controller
  415. * by turing off the DMA controller, with the force-end flag set.
  416. */
  417. static inline void
  418. spider_net_disable_rxdmac(struct spider_net_card *card)
  419. {
  420. spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
  421. SPIDER_NET_DMA_RX_FEND_VALUE);
  422. }
  423. /**
  424. * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
  425. * @card: card structure
  426. *
  427. * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
  428. */
  429. static void
  430. spider_net_refill_rx_chain(struct spider_net_card *card)
  431. {
  432. struct spider_net_descr_chain *chain = &card->rx_chain;
  433. unsigned long flags;
  434. /* one context doing the refill (and a second context seeing that
  435. * and omitting it) is ok. If called by NAPI, we'll be called again
  436. * as spider_net_decode_one_descr is called several times. If some
  437. * interrupt calls us, the NAPI is about to clean up anyway. */
  438. if (!spin_trylock_irqsave(&chain->lock, flags))
  439. return;
  440. while (spider_net_get_descr_status(chain->head->hwdescr) ==
  441. SPIDER_NET_DESCR_NOT_IN_USE) {
  442. if (spider_net_prepare_rx_descr(card, chain->head))
  443. break;
  444. chain->head = chain->head->next;
  445. }
  446. spin_unlock_irqrestore(&chain->lock, flags);
  447. }
  448. /**
  449. * spider_net_alloc_rx_skbs - Allocates rx skbs in rx descriptor chains
  450. * @card: card structure
  451. *
  452. * Returns 0 on success, <0 on failure.
  453. */
  454. static int
  455. spider_net_alloc_rx_skbs(struct spider_net_card *card)
  456. {
  457. struct spider_net_descr_chain *chain = &card->rx_chain;
  458. struct spider_net_descr *start = chain->tail;
  459. struct spider_net_descr *descr = start;
  460. /* Link up the hardware chain pointers */
  461. do {
  462. descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
  463. descr = descr->next;
  464. } while (descr != start);
  465. /* Put at least one buffer into the chain. if this fails,
  466. * we've got a problem. If not, spider_net_refill_rx_chain
  467. * will do the rest at the end of this function. */
  468. if (spider_net_prepare_rx_descr(card, chain->head))
  469. goto error;
  470. else
  471. chain->head = chain->head->next;
  472. /* This will allocate the rest of the rx buffers;
  473. * if not, it's business as usual later on. */
  474. spider_net_refill_rx_chain(card);
  475. spider_net_enable_rxdmac(card);
  476. return 0;
  477. error:
  478. spider_net_free_rx_chain_contents(card);
  479. return -ENOMEM;
  480. }
  481. /**
  482. * spider_net_get_multicast_hash - generates hash for multicast filter table
  483. * @addr: multicast address
  484. *
  485. * returns the hash value.
  486. *
  487. * spider_net_get_multicast_hash calculates a hash value for a given multicast
  488. * address, that is used to set the multicast filter tables
  489. */
  490. static u8
  491. spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
  492. {
  493. u32 crc;
  494. u8 hash;
  495. char addr_for_crc[ETH_ALEN] = { 0, };
  496. int i, bit;
  497. for (i = 0; i < ETH_ALEN * 8; i++) {
  498. bit = (addr[i / 8] >> (i % 8)) & 1;
  499. addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
  500. }
  501. crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
  502. hash = (crc >> 27);
  503. hash <<= 3;
  504. hash |= crc & 7;
  505. hash &= 0xff;
  506. return hash;
  507. }
  508. /**
  509. * spider_net_set_multi - sets multicast addresses and promisc flags
  510. * @netdev: interface device structure
  511. *
  512. * spider_net_set_multi configures multicast addresses as needed for the
  513. * netdev interface. It also sets up multicast, allmulti and promisc
  514. * flags appropriately
  515. */
  516. static void
  517. spider_net_set_multi(struct net_device *netdev)
  518. {
  519. struct netdev_hw_addr *ha;
  520. u8 hash;
  521. int i;
  522. u32 reg;
  523. struct spider_net_card *card = netdev_priv(netdev);
  524. DECLARE_BITMAP(bitmask, SPIDER_NET_MULTICAST_HASHES) = {};
  525. spider_net_set_promisc(card);
  526. if (netdev->flags & IFF_ALLMULTI) {
  527. for (i = 0; i < SPIDER_NET_MULTICAST_HASHES; i++) {
  528. set_bit(i, bitmask);
  529. }
  530. goto write_hash;
  531. }
  532. /* well, we know, what the broadcast hash value is: it's xfd
  533. hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
  534. set_bit(0xfd, bitmask);
  535. netdev_for_each_mc_addr(ha, netdev) {
  536. hash = spider_net_get_multicast_hash(netdev, ha->addr);
  537. set_bit(hash, bitmask);
  538. }
  539. write_hash:
  540. for (i = 0; i < SPIDER_NET_MULTICAST_HASHES / 4; i++) {
  541. reg = 0;
  542. if (test_bit(i * 4, bitmask))
  543. reg += 0x08;
  544. reg <<= 8;
  545. if (test_bit(i * 4 + 1, bitmask))
  546. reg += 0x08;
  547. reg <<= 8;
  548. if (test_bit(i * 4 + 2, bitmask))
  549. reg += 0x08;
  550. reg <<= 8;
  551. if (test_bit(i * 4 + 3, bitmask))
  552. reg += 0x08;
  553. spider_net_write_reg(card, SPIDER_NET_GMRMHFILnR + i * 4, reg);
  554. }
  555. }
  556. /**
  557. * spider_net_prepare_tx_descr - fill tx descriptor with skb data
  558. * @card: card structure
  559. * @skb: packet to use
  560. *
  561. * returns 0 on success, <0 on failure.
  562. *
  563. * fills out the descriptor structure with skb data and len. Copies data,
  564. * if needed (32bit DMA!)
  565. */
  566. static int
  567. spider_net_prepare_tx_descr(struct spider_net_card *card,
  568. struct sk_buff *skb)
  569. {
  570. struct spider_net_descr_chain *chain = &card->tx_chain;
  571. struct spider_net_descr *descr;
  572. struct spider_net_hw_descr *hwdescr;
  573. dma_addr_t buf;
  574. unsigned long flags;
  575. buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
  576. if (pci_dma_mapping_error(card->pdev, buf)) {
  577. if (netif_msg_tx_err(card) && net_ratelimit())
  578. dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
  579. "Dropping packet\n", skb->data, skb->len);
  580. card->spider_stats.tx_iommu_map_error++;
  581. return -ENOMEM;
  582. }
  583. spin_lock_irqsave(&chain->lock, flags);
  584. descr = card->tx_chain.head;
  585. if (descr->next == chain->tail->prev) {
  586. spin_unlock_irqrestore(&chain->lock, flags);
  587. pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE);
  588. return -ENOMEM;
  589. }
  590. hwdescr = descr->hwdescr;
  591. chain->head = descr->next;
  592. descr->skb = skb;
  593. hwdescr->buf_addr = buf;
  594. hwdescr->buf_size = skb->len;
  595. hwdescr->next_descr_addr = 0;
  596. hwdescr->data_status = 0;
  597. hwdescr->dmac_cmd_status =
  598. SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
  599. spin_unlock_irqrestore(&chain->lock, flags);
  600. if (skb->ip_summed == CHECKSUM_PARTIAL)
  601. switch (ip_hdr(skb)->protocol) {
  602. case IPPROTO_TCP:
  603. hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
  604. break;
  605. case IPPROTO_UDP:
  606. hwdescr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
  607. break;
  608. }
  609. /* Chain the bus address, so that the DMA engine finds this descr. */
  610. wmb();
  611. descr->prev->hwdescr->next_descr_addr = descr->bus_addr;
  612. netif_trans_update(card->netdev); /* set netdev watchdog timer */
  613. return 0;
  614. }
  615. static int
  616. spider_net_set_low_watermark(struct spider_net_card *card)
  617. {
  618. struct spider_net_descr *descr = card->tx_chain.tail;
  619. struct spider_net_hw_descr *hwdescr;
  620. unsigned long flags;
  621. int status;
  622. int cnt=0;
  623. int i;
  624. /* Measure the length of the queue. Measurement does not
  625. * need to be precise -- does not need a lock. */
  626. while (descr != card->tx_chain.head) {
  627. status = descr->hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
  628. if (status == SPIDER_NET_DESCR_NOT_IN_USE)
  629. break;
  630. descr = descr->next;
  631. cnt++;
  632. }
  633. /* If TX queue is short, don't even bother with interrupts */
  634. if (cnt < card->tx_chain.num_desc/4)
  635. return cnt;
  636. /* Set low-watermark 3/4th's of the way into the queue. */
  637. descr = card->tx_chain.tail;
  638. cnt = (cnt*3)/4;
  639. for (i=0;i<cnt; i++)
  640. descr = descr->next;
  641. /* Set the new watermark, clear the old watermark */
  642. spin_lock_irqsave(&card->tx_chain.lock, flags);
  643. descr->hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
  644. if (card->low_watermark && card->low_watermark != descr) {
  645. hwdescr = card->low_watermark->hwdescr;
  646. hwdescr->dmac_cmd_status =
  647. hwdescr->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
  648. }
  649. card->low_watermark = descr;
  650. spin_unlock_irqrestore(&card->tx_chain.lock, flags);
  651. return cnt;
  652. }
  653. /**
  654. * spider_net_release_tx_chain - processes sent tx descriptors
  655. * @card: adapter structure
  656. * @brutal: if set, don't care about whether descriptor seems to be in use
  657. *
  658. * returns 0 if the tx ring is empty, otherwise 1.
  659. *
  660. * spider_net_release_tx_chain releases the tx descriptors that spider has
  661. * finished with (if non-brutal) or simply release tx descriptors (if brutal).
  662. * If some other context is calling this function, we return 1 so that we're
  663. * scheduled again (if we were scheduled) and will not lose initiative.
  664. */
  665. static int
  666. spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
  667. {
  668. struct net_device *dev = card->netdev;
  669. struct spider_net_descr_chain *chain = &card->tx_chain;
  670. struct spider_net_descr *descr;
  671. struct spider_net_hw_descr *hwdescr;
  672. struct sk_buff *skb;
  673. u32 buf_addr;
  674. unsigned long flags;
  675. int status;
  676. while (1) {
  677. spin_lock_irqsave(&chain->lock, flags);
  678. if (chain->tail == chain->head) {
  679. spin_unlock_irqrestore(&chain->lock, flags);
  680. return 0;
  681. }
  682. descr = chain->tail;
  683. hwdescr = descr->hwdescr;
  684. status = spider_net_get_descr_status(hwdescr);
  685. switch (status) {
  686. case SPIDER_NET_DESCR_COMPLETE:
  687. dev->stats.tx_packets++;
  688. dev->stats.tx_bytes += descr->skb->len;
  689. break;
  690. case SPIDER_NET_DESCR_CARDOWNED:
  691. if (!brutal) {
  692. spin_unlock_irqrestore(&chain->lock, flags);
  693. return 1;
  694. }
  695. /* fallthrough, if we release the descriptors
  696. * brutally (then we don't care about
  697. * SPIDER_NET_DESCR_CARDOWNED) */
  698. /* Fall through */
  699. case SPIDER_NET_DESCR_RESPONSE_ERROR:
  700. case SPIDER_NET_DESCR_PROTECTION_ERROR:
  701. case SPIDER_NET_DESCR_FORCE_END:
  702. if (netif_msg_tx_err(card))
  703. dev_err(&card->netdev->dev, "forcing end of tx descriptor "
  704. "with status x%02x\n", status);
  705. dev->stats.tx_errors++;
  706. break;
  707. default:
  708. dev->stats.tx_dropped++;
  709. if (!brutal) {
  710. spin_unlock_irqrestore(&chain->lock, flags);
  711. return 1;
  712. }
  713. }
  714. chain->tail = descr->next;
  715. hwdescr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
  716. skb = descr->skb;
  717. descr->skb = NULL;
  718. buf_addr = hwdescr->buf_addr;
  719. spin_unlock_irqrestore(&chain->lock, flags);
  720. /* unmap the skb */
  721. if (skb) {
  722. pci_unmap_single(card->pdev, buf_addr, skb->len,
  723. PCI_DMA_TODEVICE);
  724. dev_consume_skb_any(skb);
  725. }
  726. }
  727. return 0;
  728. }
  729. /**
  730. * spider_net_kick_tx_dma - enables TX DMA processing
  731. * @card: card structure
  732. *
  733. * This routine will start the transmit DMA running if
  734. * it is not already running. This routine ned only be
  735. * called when queueing a new packet to an empty tx queue.
  736. * Writes the current tx chain head as start address
  737. * of the tx descriptor chain and enables the transmission
  738. * DMA engine.
  739. */
  740. static inline void
  741. spider_net_kick_tx_dma(struct spider_net_card *card)
  742. {
  743. struct spider_net_descr *descr;
  744. if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
  745. SPIDER_NET_TX_DMA_EN)
  746. goto out;
  747. descr = card->tx_chain.tail;
  748. for (;;) {
  749. if (spider_net_get_descr_status(descr->hwdescr) ==
  750. SPIDER_NET_DESCR_CARDOWNED) {
  751. spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
  752. descr->bus_addr);
  753. spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
  754. SPIDER_NET_DMA_TX_VALUE);
  755. break;
  756. }
  757. if (descr == card->tx_chain.head)
  758. break;
  759. descr = descr->next;
  760. }
  761. out:
  762. mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
  763. }
  764. /**
  765. * spider_net_xmit - transmits a frame over the device
  766. * @skb: packet to send out
  767. * @netdev: interface device structure
  768. *
  769. * returns NETDEV_TX_OK on success, NETDEV_TX_BUSY on failure
  770. */
  771. static netdev_tx_t
  772. spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
  773. {
  774. int cnt;
  775. struct spider_net_card *card = netdev_priv(netdev);
  776. spider_net_release_tx_chain(card, 0);
  777. if (spider_net_prepare_tx_descr(card, skb) != 0) {
  778. netdev->stats.tx_dropped++;
  779. netif_stop_queue(netdev);
  780. return NETDEV_TX_BUSY;
  781. }
  782. cnt = spider_net_set_low_watermark(card);
  783. if (cnt < 5)
  784. spider_net_kick_tx_dma(card);
  785. return NETDEV_TX_OK;
  786. }
  787. /**
  788. * spider_net_cleanup_tx_ring - cleans up the TX ring
  789. * @card: card structure
  790. *
  791. * spider_net_cleanup_tx_ring is called by either the tx_timer
  792. * or from the NAPI polling routine.
  793. * This routine releases resources associted with transmitted
  794. * packets, including updating the queue tail pointer.
  795. */
  796. static void
  797. spider_net_cleanup_tx_ring(struct timer_list *t)
  798. {
  799. struct spider_net_card *card = from_timer(card, t, tx_timer);
  800. if ((spider_net_release_tx_chain(card, 0) != 0) &&
  801. (card->netdev->flags & IFF_UP)) {
  802. spider_net_kick_tx_dma(card);
  803. netif_wake_queue(card->netdev);
  804. }
  805. }
  806. /**
  807. * spider_net_do_ioctl - called for device ioctls
  808. * @netdev: interface device structure
  809. * @ifr: request parameter structure for ioctl
  810. * @cmd: command code for ioctl
  811. *
  812. * returns 0 on success, <0 on failure. Currently, we have no special ioctls.
  813. * -EOPNOTSUPP is returned, if an unknown ioctl was requested
  814. */
  815. static int
  816. spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  817. {
  818. switch (cmd) {
  819. default:
  820. return -EOPNOTSUPP;
  821. }
  822. }
  823. /**
  824. * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
  825. * @descr: descriptor to process
  826. * @card: card structure
  827. *
  828. * Fills out skb structure and passes the data to the stack.
  829. * The descriptor state is not changed.
  830. */
  831. static void
  832. spider_net_pass_skb_up(struct spider_net_descr *descr,
  833. struct spider_net_card *card)
  834. {
  835. struct spider_net_hw_descr *hwdescr = descr->hwdescr;
  836. struct sk_buff *skb = descr->skb;
  837. struct net_device *netdev = card->netdev;
  838. u32 data_status = hwdescr->data_status;
  839. u32 data_error = hwdescr->data_error;
  840. skb_put(skb, hwdescr->valid_size);
  841. /* the card seems to add 2 bytes of junk in front
  842. * of the ethernet frame */
  843. #define SPIDER_MISALIGN 2
  844. skb_pull(skb, SPIDER_MISALIGN);
  845. skb->protocol = eth_type_trans(skb, netdev);
  846. /* checksum offload */
  847. skb_checksum_none_assert(skb);
  848. if (netdev->features & NETIF_F_RXCSUM) {
  849. if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
  850. SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
  851. !(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
  852. skb->ip_summed = CHECKSUM_UNNECESSARY;
  853. }
  854. if (data_status & SPIDER_NET_VLAN_PACKET) {
  855. /* further enhancements: HW-accel VLAN */
  856. }
  857. /* update netdevice statistics */
  858. netdev->stats.rx_packets++;
  859. netdev->stats.rx_bytes += skb->len;
  860. /* pass skb up to stack */
  861. netif_receive_skb(skb);
  862. }
  863. static void show_rx_chain(struct spider_net_card *card)
  864. {
  865. struct spider_net_descr_chain *chain = &card->rx_chain;
  866. struct spider_net_descr *start= chain->tail;
  867. struct spider_net_descr *descr= start;
  868. struct spider_net_hw_descr *hwd = start->hwdescr;
  869. struct device *dev = &card->netdev->dev;
  870. u32 curr_desc, next_desc;
  871. int status;
  872. int tot = 0;
  873. int cnt = 0;
  874. int off = start - chain->ring;
  875. int cstat = hwd->dmac_cmd_status;
  876. dev_info(dev, "Total number of descrs=%d\n",
  877. chain->num_desc);
  878. dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
  879. off, cstat);
  880. curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
  881. next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
  882. status = cstat;
  883. do
  884. {
  885. hwd = descr->hwdescr;
  886. off = descr - chain->ring;
  887. status = hwd->dmac_cmd_status;
  888. if (descr == chain->head)
  889. dev_info(dev, "Chain head is at %d, head status=0x%x\n",
  890. off, status);
  891. if (curr_desc == descr->bus_addr)
  892. dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
  893. off, status);
  894. if (next_desc == descr->bus_addr)
  895. dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
  896. off, status);
  897. if (hwd->next_descr_addr == 0)
  898. dev_info(dev, "chain is cut at %d\n", off);
  899. if (cstat != status) {
  900. int from = (chain->num_desc + off - cnt) % chain->num_desc;
  901. int to = (chain->num_desc + off - 1) % chain->num_desc;
  902. dev_info(dev, "Have %d (from %d to %d) descrs "
  903. "with stat=0x%08x\n", cnt, from, to, cstat);
  904. cstat = status;
  905. cnt = 0;
  906. }
  907. cnt ++;
  908. tot ++;
  909. descr = descr->next;
  910. } while (descr != start);
  911. dev_info(dev, "Last %d descrs with stat=0x%08x "
  912. "for a total of %d descrs\n", cnt, cstat, tot);
  913. #ifdef DEBUG
  914. /* Now dump the whole ring */
  915. descr = start;
  916. do
  917. {
  918. struct spider_net_hw_descr *hwd = descr->hwdescr;
  919. status = spider_net_get_descr_status(hwd);
  920. cnt = descr - chain->ring;
  921. dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
  922. cnt, status, descr->skb);
  923. dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
  924. descr->bus_addr, hwd->buf_addr, hwd->buf_size);
  925. dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
  926. hwd->next_descr_addr, hwd->result_size,
  927. hwd->valid_size);
  928. dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
  929. hwd->dmac_cmd_status, hwd->data_status,
  930. hwd->data_error);
  931. dev_info(dev, "\n");
  932. descr = descr->next;
  933. } while (descr != start);
  934. #endif
  935. }
  936. /**
  937. * spider_net_resync_head_ptr - Advance head ptr past empty descrs
  938. *
  939. * If the driver fails to keep up and empty the queue, then the
  940. * hardware wil run out of room to put incoming packets. This
  941. * will cause the hardware to skip descrs that are full (instead
  942. * of halting/retrying). Thus, once the driver runs, it wil need
  943. * to "catch up" to where the hardware chain pointer is at.
  944. */
  945. static void spider_net_resync_head_ptr(struct spider_net_card *card)
  946. {
  947. unsigned long flags;
  948. struct spider_net_descr_chain *chain = &card->rx_chain;
  949. struct spider_net_descr *descr;
  950. int i, status;
  951. /* Advance head pointer past any empty descrs */
  952. descr = chain->head;
  953. status = spider_net_get_descr_status(descr->hwdescr);
  954. if (status == SPIDER_NET_DESCR_NOT_IN_USE)
  955. return;
  956. spin_lock_irqsave(&chain->lock, flags);
  957. descr = chain->head;
  958. status = spider_net_get_descr_status(descr->hwdescr);
  959. for (i=0; i<chain->num_desc; i++) {
  960. if (status != SPIDER_NET_DESCR_CARDOWNED) break;
  961. descr = descr->next;
  962. status = spider_net_get_descr_status(descr->hwdescr);
  963. }
  964. chain->head = descr;
  965. spin_unlock_irqrestore(&chain->lock, flags);
  966. }
  967. static int spider_net_resync_tail_ptr(struct spider_net_card *card)
  968. {
  969. struct spider_net_descr_chain *chain = &card->rx_chain;
  970. struct spider_net_descr *descr;
  971. int i, status;
  972. /* Advance tail pointer past any empty and reaped descrs */
  973. descr = chain->tail;
  974. status = spider_net_get_descr_status(descr->hwdescr);
  975. for (i=0; i<chain->num_desc; i++) {
  976. if ((status != SPIDER_NET_DESCR_CARDOWNED) &&
  977. (status != SPIDER_NET_DESCR_NOT_IN_USE)) break;
  978. descr = descr->next;
  979. status = spider_net_get_descr_status(descr->hwdescr);
  980. }
  981. chain->tail = descr;
  982. if ((i == chain->num_desc) || (i == 0))
  983. return 1;
  984. return 0;
  985. }
  986. /**
  987. * spider_net_decode_one_descr - processes an RX descriptor
  988. * @card: card structure
  989. *
  990. * Returns 1 if a packet has been sent to the stack, otherwise 0.
  991. *
  992. * Processes an RX descriptor by iommu-unmapping the data buffer
  993. * and passing the packet up to the stack. This function is called
  994. * in softirq context, e.g. either bottom half from interrupt or
  995. * NAPI polling context.
  996. */
  997. static int
  998. spider_net_decode_one_descr(struct spider_net_card *card)
  999. {
  1000. struct net_device *dev = card->netdev;
  1001. struct spider_net_descr_chain *chain = &card->rx_chain;
  1002. struct spider_net_descr *descr = chain->tail;
  1003. struct spider_net_hw_descr *hwdescr = descr->hwdescr;
  1004. u32 hw_buf_addr;
  1005. int status;
  1006. status = spider_net_get_descr_status(hwdescr);
  1007. /* Nothing in the descriptor, or ring must be empty */
  1008. if ((status == SPIDER_NET_DESCR_CARDOWNED) ||
  1009. (status == SPIDER_NET_DESCR_NOT_IN_USE))
  1010. return 0;
  1011. /* descriptor definitively used -- move on tail */
  1012. chain->tail = descr->next;
  1013. /* unmap descriptor */
  1014. hw_buf_addr = hwdescr->buf_addr;
  1015. hwdescr->buf_addr = 0xffffffff;
  1016. pci_unmap_single(card->pdev, hw_buf_addr,
  1017. SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
  1018. if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
  1019. (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
  1020. (status == SPIDER_NET_DESCR_FORCE_END) ) {
  1021. if (netif_msg_rx_err(card))
  1022. dev_err(&dev->dev,
  1023. "dropping RX descriptor with state %d\n", status);
  1024. dev->stats.rx_dropped++;
  1025. goto bad_desc;
  1026. }
  1027. if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
  1028. (status != SPIDER_NET_DESCR_FRAME_END) ) {
  1029. if (netif_msg_rx_err(card))
  1030. dev_err(&card->netdev->dev,
  1031. "RX descriptor with unknown state %d\n", status);
  1032. card->spider_stats.rx_desc_unk_state++;
  1033. goto bad_desc;
  1034. }
  1035. /* The cases we'll throw away the packet immediately */
  1036. if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
  1037. if (netif_msg_rx_err(card))
  1038. dev_err(&card->netdev->dev,
  1039. "error in received descriptor found, "
  1040. "data_status=x%08x, data_error=x%08x\n",
  1041. hwdescr->data_status, hwdescr->data_error);
  1042. goto bad_desc;
  1043. }
  1044. if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
  1045. dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
  1046. hwdescr->dmac_cmd_status);
  1047. pr_err("buf_addr=x%08x\n", hw_buf_addr);
  1048. pr_err("buf_size=x%08x\n", hwdescr->buf_size);
  1049. pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
  1050. pr_err("result_size=x%08x\n", hwdescr->result_size);
  1051. pr_err("valid_size=x%08x\n", hwdescr->valid_size);
  1052. pr_err("data_status=x%08x\n", hwdescr->data_status);
  1053. pr_err("data_error=x%08x\n", hwdescr->data_error);
  1054. pr_err("which=%ld\n", descr - card->rx_chain.ring);
  1055. card->spider_stats.rx_desc_error++;
  1056. goto bad_desc;
  1057. }
  1058. /* Ok, we've got a packet in descr */
  1059. spider_net_pass_skb_up(descr, card);
  1060. descr->skb = NULL;
  1061. hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  1062. return 1;
  1063. bad_desc:
  1064. if (netif_msg_rx_err(card))
  1065. show_rx_chain(card);
  1066. dev_kfree_skb_irq(descr->skb);
  1067. descr->skb = NULL;
  1068. hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
  1069. return 0;
  1070. }
  1071. /**
  1072. * spider_net_poll - NAPI poll function called by the stack to return packets
  1073. * @netdev: interface device structure
  1074. * @budget: number of packets we can pass to the stack at most
  1075. *
  1076. * returns 0 if no more packets available to the driver/stack. Returns 1,
  1077. * if the quota is exceeded, but the driver has still packets.
  1078. *
  1079. * spider_net_poll returns all packets from the rx descriptors to the stack
  1080. * (using netif_receive_skb). If all/enough packets are up, the driver
  1081. * reenables interrupts and returns 0. If not, 1 is returned.
  1082. */
  1083. static int spider_net_poll(struct napi_struct *napi, int budget)
  1084. {
  1085. struct spider_net_card *card = container_of(napi, struct spider_net_card, napi);
  1086. int packets_done = 0;
  1087. while (packets_done < budget) {
  1088. if (!spider_net_decode_one_descr(card))
  1089. break;
  1090. packets_done++;
  1091. }
  1092. if ((packets_done == 0) && (card->num_rx_ints != 0)) {
  1093. if (!spider_net_resync_tail_ptr(card))
  1094. packets_done = budget;
  1095. spider_net_resync_head_ptr(card);
  1096. }
  1097. card->num_rx_ints = 0;
  1098. spider_net_refill_rx_chain(card);
  1099. spider_net_enable_rxdmac(card);
  1100. spider_net_cleanup_tx_ring(&card->tx_timer);
  1101. /* if all packets are in the stack, enable interrupts and return 0 */
  1102. /* if not, return 1 */
  1103. if (packets_done < budget) {
  1104. napi_complete_done(napi, packets_done);
  1105. spider_net_rx_irq_on(card);
  1106. card->ignore_rx_ramfull = 0;
  1107. }
  1108. return packets_done;
  1109. }
  1110. /**
  1111. * spider_net_set_mac - sets the MAC of an interface
  1112. * @netdev: interface device structure
  1113. * @ptr: pointer to new MAC address
  1114. *
  1115. * Returns 0 on success, <0 on failure. Currently, we don't support this
  1116. * and will always return EOPNOTSUPP.
  1117. */
  1118. static int
  1119. spider_net_set_mac(struct net_device *netdev, void *p)
  1120. {
  1121. struct spider_net_card *card = netdev_priv(netdev);
  1122. u32 macl, macu, regvalue;
  1123. struct sockaddr *addr = p;
  1124. if (!is_valid_ether_addr(addr->sa_data))
  1125. return -EADDRNOTAVAIL;
  1126. memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN);
  1127. /* switch off GMACTPE and GMACRPE */
  1128. regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
  1129. regvalue &= ~((1 << 5) | (1 << 6));
  1130. spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
  1131. /* write mac */
  1132. macu = (netdev->dev_addr[0]<<24) + (netdev->dev_addr[1]<<16) +
  1133. (netdev->dev_addr[2]<<8) + (netdev->dev_addr[3]);
  1134. macl = (netdev->dev_addr[4]<<8) + (netdev->dev_addr[5]);
  1135. spider_net_write_reg(card, SPIDER_NET_GMACUNIMACU, macu);
  1136. spider_net_write_reg(card, SPIDER_NET_GMACUNIMACL, macl);
  1137. /* switch GMACTPE and GMACRPE back on */
  1138. regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
  1139. regvalue |= ((1 << 5) | (1 << 6));
  1140. spider_net_write_reg(card, SPIDER_NET_GMACOPEMD, regvalue);
  1141. spider_net_set_promisc(card);
  1142. return 0;
  1143. }
  1144. /**
  1145. * spider_net_link_reset
  1146. * @netdev: net device structure
  1147. *
  1148. * This is called when the PHY_LINK signal is asserted. For the blade this is
  1149. * not connected so we should never get here.
  1150. *
  1151. */
  1152. static void
  1153. spider_net_link_reset(struct net_device *netdev)
  1154. {
  1155. struct spider_net_card *card = netdev_priv(netdev);
  1156. del_timer_sync(&card->aneg_timer);
  1157. /* clear interrupt, block further interrupts */
  1158. spider_net_write_reg(card, SPIDER_NET_GMACST,
  1159. spider_net_read_reg(card, SPIDER_NET_GMACST));
  1160. spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
  1161. /* reset phy and setup aneg */
  1162. card->aneg_count = 0;
  1163. card->medium = BCM54XX_COPPER;
  1164. spider_net_setup_aneg(card);
  1165. mod_timer(&card->aneg_timer, jiffies + SPIDER_NET_ANEG_TIMER);
  1166. }
  1167. /**
  1168. * spider_net_handle_error_irq - handles errors raised by an interrupt
  1169. * @card: card structure
  1170. * @status_reg: interrupt status register 0 (GHIINT0STS)
  1171. *
  1172. * spider_net_handle_error_irq treats or ignores all error conditions
  1173. * found when an interrupt is presented
  1174. */
  1175. static void
  1176. spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg,
  1177. u32 error_reg1, u32 error_reg2)
  1178. {
  1179. u32 i;
  1180. int show_error = 1;
  1181. /* check GHIINT0STS ************************************/
  1182. if (status_reg)
  1183. for (i = 0; i < 32; i++)
  1184. if (status_reg & (1<<i))
  1185. switch (i)
  1186. {
  1187. /* let error_reg1 and error_reg2 evaluation decide, what to do
  1188. case SPIDER_NET_PHYINT:
  1189. case SPIDER_NET_GMAC2INT:
  1190. case SPIDER_NET_GMAC1INT:
  1191. case SPIDER_NET_GFIFOINT:
  1192. case SPIDER_NET_DMACINT:
  1193. case SPIDER_NET_GSYSINT:
  1194. break; */
  1195. case SPIDER_NET_GIPSINT:
  1196. show_error = 0;
  1197. break;
  1198. case SPIDER_NET_GPWOPCMPINT:
  1199. /* PHY write operation completed */
  1200. show_error = 0;
  1201. break;
  1202. case SPIDER_NET_GPROPCMPINT:
  1203. /* PHY read operation completed */
  1204. /* we don't use semaphores, as we poll for the completion
  1205. * of the read operation in spider_net_read_phy. Should take
  1206. * about 50 us */
  1207. show_error = 0;
  1208. break;
  1209. case SPIDER_NET_GPWFFINT:
  1210. /* PHY command queue full */
  1211. if (netif_msg_intr(card))
  1212. dev_err(&card->netdev->dev, "PHY write queue full\n");
  1213. show_error = 0;
  1214. break;
  1215. /* case SPIDER_NET_GRMDADRINT: not used. print a message */
  1216. /* case SPIDER_NET_GRMARPINT: not used. print a message */
  1217. /* case SPIDER_NET_GRMMPINT: not used. print a message */
  1218. case SPIDER_NET_GDTDEN0INT:
  1219. /* someone has set TX_DMA_EN to 0 */
  1220. show_error = 0;
  1221. break;
  1222. case SPIDER_NET_GDDDEN0INT: /* fallthrough */
  1223. case SPIDER_NET_GDCDEN0INT: /* fallthrough */
  1224. case SPIDER_NET_GDBDEN0INT: /* fallthrough */
  1225. case SPIDER_NET_GDADEN0INT:
  1226. /* someone has set RX_DMA_EN to 0 */
  1227. show_error = 0;
  1228. break;
  1229. /* RX interrupts */
  1230. case SPIDER_NET_GDDFDCINT:
  1231. case SPIDER_NET_GDCFDCINT:
  1232. case SPIDER_NET_GDBFDCINT:
  1233. case SPIDER_NET_GDAFDCINT:
  1234. /* case SPIDER_NET_GDNMINT: not used. print a message */
  1235. /* case SPIDER_NET_GCNMINT: not used. print a message */
  1236. /* case SPIDER_NET_GBNMINT: not used. print a message */
  1237. /* case SPIDER_NET_GANMINT: not used. print a message */
  1238. /* case SPIDER_NET_GRFNMINT: not used. print a message */
  1239. show_error = 0;
  1240. break;
  1241. /* TX interrupts */
  1242. case SPIDER_NET_GDTFDCINT:
  1243. show_error = 0;
  1244. break;
  1245. case SPIDER_NET_GTTEDINT:
  1246. show_error = 0;
  1247. break;
  1248. case SPIDER_NET_GDTDCEINT:
  1249. /* chain end. If a descriptor should be sent, kick off
  1250. * tx dma
  1251. if (card->tx_chain.tail != card->tx_chain.head)
  1252. spider_net_kick_tx_dma(card);
  1253. */
  1254. show_error = 0;
  1255. break;
  1256. /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
  1257. /* case SPIDER_NET_GFREECNTINT: not used. print a message */
  1258. }
  1259. /* check GHIINT1STS ************************************/
  1260. if (error_reg1)
  1261. for (i = 0; i < 32; i++)
  1262. if (error_reg1 & (1<<i))
  1263. switch (i)
  1264. {
  1265. case SPIDER_NET_GTMFLLINT:
  1266. /* TX RAM full may happen on a usual case.
  1267. * Logging is not needed. */
  1268. show_error = 0;
  1269. break;
  1270. case SPIDER_NET_GRFDFLLINT: /* fallthrough */
  1271. case SPIDER_NET_GRFCFLLINT: /* fallthrough */
  1272. case SPIDER_NET_GRFBFLLINT: /* fallthrough */
  1273. case SPIDER_NET_GRFAFLLINT: /* fallthrough */
  1274. case SPIDER_NET_GRMFLLINT:
  1275. /* Could happen when rx chain is full */
  1276. if (card->ignore_rx_ramfull == 0) {
  1277. card->ignore_rx_ramfull = 1;
  1278. spider_net_resync_head_ptr(card);
  1279. spider_net_refill_rx_chain(card);
  1280. spider_net_enable_rxdmac(card);
  1281. card->num_rx_ints ++;
  1282. napi_schedule(&card->napi);
  1283. }
  1284. show_error = 0;
  1285. break;
  1286. /* case SPIDER_NET_GTMSHTINT: problem, print a message */
  1287. case SPIDER_NET_GDTINVDINT:
  1288. /* allrighty. tx from previous descr ok */
  1289. show_error = 0;
  1290. break;
  1291. /* chain end */
  1292. case SPIDER_NET_GDDDCEINT: /* fallthrough */
  1293. case SPIDER_NET_GDCDCEINT: /* fallthrough */
  1294. case SPIDER_NET_GDBDCEINT: /* fallthrough */
  1295. case SPIDER_NET_GDADCEINT:
  1296. spider_net_resync_head_ptr(card);
  1297. spider_net_refill_rx_chain(card);
  1298. spider_net_enable_rxdmac(card);
  1299. card->num_rx_ints ++;
  1300. napi_schedule(&card->napi);
  1301. show_error = 0;
  1302. break;
  1303. /* invalid descriptor */
  1304. case SPIDER_NET_GDDINVDINT: /* fallthrough */
  1305. case SPIDER_NET_GDCINVDINT: /* fallthrough */
  1306. case SPIDER_NET_GDBINVDINT: /* fallthrough */
  1307. case SPIDER_NET_GDAINVDINT:
  1308. /* Could happen when rx chain is full */
  1309. spider_net_resync_head_ptr(card);
  1310. spider_net_refill_rx_chain(card);
  1311. spider_net_enable_rxdmac(card);
  1312. card->num_rx_ints ++;
  1313. napi_schedule(&card->napi);
  1314. show_error = 0;
  1315. break;
  1316. /* case SPIDER_NET_GDTRSERINT: problem, print a message */
  1317. /* case SPIDER_NET_GDDRSERINT: problem, print a message */
  1318. /* case SPIDER_NET_GDCRSERINT: problem, print a message */
  1319. /* case SPIDER_NET_GDBRSERINT: problem, print a message */
  1320. /* case SPIDER_NET_GDARSERINT: problem, print a message */
  1321. /* case SPIDER_NET_GDSERINT: problem, print a message */
  1322. /* case SPIDER_NET_GDTPTERINT: problem, print a message */
  1323. /* case SPIDER_NET_GDDPTERINT: problem, print a message */
  1324. /* case SPIDER_NET_GDCPTERINT: problem, print a message */
  1325. /* case SPIDER_NET_GDBPTERINT: problem, print a message */
  1326. /* case SPIDER_NET_GDAPTERINT: problem, print a message */
  1327. default:
  1328. show_error = 1;
  1329. break;
  1330. }
  1331. /* check GHIINT2STS ************************************/
  1332. if (error_reg2)
  1333. for (i = 0; i < 32; i++)
  1334. if (error_reg2 & (1<<i))
  1335. switch (i)
  1336. {
  1337. /* there is nothing we can (want to) do at this time. Log a
  1338. * message, we can switch on and off the specific values later on
  1339. case SPIDER_NET_GPROPERINT:
  1340. case SPIDER_NET_GMCTCRSNGINT:
  1341. case SPIDER_NET_GMCTLCOLINT:
  1342. case SPIDER_NET_GMCTTMOTINT:
  1343. case SPIDER_NET_GMCRCAERINT:
  1344. case SPIDER_NET_GMCRCALERINT:
  1345. case SPIDER_NET_GMCRALNERINT:
  1346. case SPIDER_NET_GMCROVRINT:
  1347. case SPIDER_NET_GMCRRNTINT:
  1348. case SPIDER_NET_GMCRRXERINT:
  1349. case SPIDER_NET_GTITCSERINT:
  1350. case SPIDER_NET_GTIFMTERINT:
  1351. case SPIDER_NET_GTIPKTRVKINT:
  1352. case SPIDER_NET_GTISPINGINT:
  1353. case SPIDER_NET_GTISADNGINT:
  1354. case SPIDER_NET_GTISPDNGINT:
  1355. case SPIDER_NET_GRIFMTERINT:
  1356. case SPIDER_NET_GRIPKTRVKINT:
  1357. case SPIDER_NET_GRISPINGINT:
  1358. case SPIDER_NET_GRISADNGINT:
  1359. case SPIDER_NET_GRISPDNGINT:
  1360. break;
  1361. */
  1362. default:
  1363. break;
  1364. }
  1365. if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
  1366. dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
  1367. "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
  1368. status_reg, error_reg1, error_reg2);
  1369. /* clear interrupt sources */
  1370. spider_net_write_reg(card, SPIDER_NET_GHIINT1STS, error_reg1);
  1371. spider_net_write_reg(card, SPIDER_NET_GHIINT2STS, error_reg2);
  1372. }
  1373. /**
  1374. * spider_net_interrupt - interrupt handler for spider_net
  1375. * @irq: interrupt number
  1376. * @ptr: pointer to net_device
  1377. *
  1378. * returns IRQ_HANDLED, if interrupt was for driver, or IRQ_NONE, if no
  1379. * interrupt found raised by card.
  1380. *
  1381. * This is the interrupt handler, that turns off
  1382. * interrupts for this device and makes the stack poll the driver
  1383. */
  1384. static irqreturn_t
  1385. spider_net_interrupt(int irq, void *ptr)
  1386. {
  1387. struct net_device *netdev = ptr;
  1388. struct spider_net_card *card = netdev_priv(netdev);
  1389. u32 status_reg, error_reg1, error_reg2;
  1390. status_reg = spider_net_read_reg(card, SPIDER_NET_GHIINT0STS);
  1391. error_reg1 = spider_net_read_reg(card, SPIDER_NET_GHIINT1STS);
  1392. error_reg2 = spider_net_read_reg(card, SPIDER_NET_GHIINT2STS);
  1393. if (!(status_reg & SPIDER_NET_INT0_MASK_VALUE) &&
  1394. !(error_reg1 & SPIDER_NET_INT1_MASK_VALUE) &&
  1395. !(error_reg2 & SPIDER_NET_INT2_MASK_VALUE))
  1396. return IRQ_NONE;
  1397. if (status_reg & SPIDER_NET_RXINT ) {
  1398. spider_net_rx_irq_off(card);
  1399. napi_schedule(&card->napi);
  1400. card->num_rx_ints ++;
  1401. }
  1402. if (status_reg & SPIDER_NET_TXINT)
  1403. napi_schedule(&card->napi);
  1404. if (status_reg & SPIDER_NET_LINKINT)
  1405. spider_net_link_reset(netdev);
  1406. if (status_reg & SPIDER_NET_ERRINT )
  1407. spider_net_handle_error_irq(card, status_reg,
  1408. error_reg1, error_reg2);
  1409. /* clear interrupt sources */
  1410. spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
  1411. return IRQ_HANDLED;
  1412. }
  1413. #ifdef CONFIG_NET_POLL_CONTROLLER
  1414. /**
  1415. * spider_net_poll_controller - artificial interrupt for netconsole etc.
  1416. * @netdev: interface device structure
  1417. *
  1418. * see Documentation/networking/netconsole.txt
  1419. */
  1420. static void
  1421. spider_net_poll_controller(struct net_device *netdev)
  1422. {
  1423. disable_irq(netdev->irq);
  1424. spider_net_interrupt(netdev->irq, netdev);
  1425. enable_irq(netdev->irq);
  1426. }
  1427. #endif /* CONFIG_NET_POLL_CONTROLLER */
  1428. /**
  1429. * spider_net_enable_interrupts - enable interrupts
  1430. * @card: card structure
  1431. *
  1432. * spider_net_enable_interrupt enables several interrupts
  1433. */
  1434. static void
  1435. spider_net_enable_interrupts(struct spider_net_card *card)
  1436. {
  1437. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK,
  1438. SPIDER_NET_INT0_MASK_VALUE);
  1439. spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK,
  1440. SPIDER_NET_INT1_MASK_VALUE);
  1441. spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
  1442. SPIDER_NET_INT2_MASK_VALUE);
  1443. }
  1444. /**
  1445. * spider_net_disable_interrupts - disable interrupts
  1446. * @card: card structure
  1447. *
  1448. * spider_net_disable_interrupts disables all the interrupts
  1449. */
  1450. static void
  1451. spider_net_disable_interrupts(struct spider_net_card *card)
  1452. {
  1453. spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
  1454. spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
  1455. spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
  1456. spider_net_write_reg(card, SPIDER_NET_GMACINTEN, 0);
  1457. }
  1458. /**
  1459. * spider_net_init_card - initializes the card
  1460. * @card: card structure
  1461. *
  1462. * spider_net_init_card initializes the card so that other registers can
  1463. * be used
  1464. */
  1465. static void
  1466. spider_net_init_card(struct spider_net_card *card)
  1467. {
  1468. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1469. SPIDER_NET_CKRCTRL_STOP_VALUE);
  1470. spider_net_write_reg(card, SPIDER_NET_CKRCTRL,
  1471. SPIDER_NET_CKRCTRL_RUN_VALUE);
  1472. /* trigger ETOMOD signal */
  1473. spider_net_write_reg(card, SPIDER_NET_GMACOPEMD,
  1474. spider_net_read_reg(card, SPIDER_NET_GMACOPEMD) | 0x4);
  1475. spider_net_disable_interrupts(card);
  1476. }
  1477. /**
  1478. * spider_net_enable_card - enables the card by setting all kinds of regs
  1479. * @card: card structure
  1480. *
  1481. * spider_net_enable_card sets a lot of SMMIO registers to enable the device
  1482. */
  1483. static void
  1484. spider_net_enable_card(struct spider_net_card *card)
  1485. {
  1486. int i;
  1487. /* the following array consists of (register),(value) pairs
  1488. * that are set in this function. A register of 0 ends the list */
  1489. u32 regs[][2] = {
  1490. { SPIDER_NET_GRESUMINTNUM, 0 },
  1491. { SPIDER_NET_GREINTNUM, 0 },
  1492. /* set interrupt frame number registers */
  1493. /* clear the single DMA engine registers first */
  1494. { SPIDER_NET_GFAFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1495. { SPIDER_NET_GFBFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1496. { SPIDER_NET_GFCFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1497. { SPIDER_NET_GFDFRMNUM, SPIDER_NET_GFXFRAMES_VALUE },
  1498. /* then set, what we really need */
  1499. { SPIDER_NET_GFFRMNUM, SPIDER_NET_FRAMENUM_VALUE },
  1500. /* timer counter registers and stuff */
  1501. { SPIDER_NET_GFREECNNUM, 0 },
  1502. { SPIDER_NET_GONETIMENUM, 0 },
  1503. { SPIDER_NET_GTOUTFRMNUM, 0 },
  1504. /* RX mode setting */
  1505. { SPIDER_NET_GRXMDSET, SPIDER_NET_RXMODE_VALUE },
  1506. /* TX mode setting */
  1507. { SPIDER_NET_GTXMDSET, SPIDER_NET_TXMODE_VALUE },
  1508. /* IPSEC mode setting */
  1509. { SPIDER_NET_GIPSECINIT, SPIDER_NET_IPSECINIT_VALUE },
  1510. { SPIDER_NET_GFTRESTRT, SPIDER_NET_RESTART_VALUE },
  1511. { SPIDER_NET_GMRWOLCTRL, 0 },
  1512. { SPIDER_NET_GTESTMD, 0x10000000 },
  1513. { SPIDER_NET_GTTQMSK, 0x00400040 },
  1514. { SPIDER_NET_GMACINTEN, 0 },
  1515. /* flow control stuff */
  1516. { SPIDER_NET_GMACAPAUSE, SPIDER_NET_MACAPAUSE_VALUE },
  1517. { SPIDER_NET_GMACTXPAUSE, SPIDER_NET_TXPAUSE_VALUE },
  1518. { SPIDER_NET_GMACBSTLMT, SPIDER_NET_BURSTLMT_VALUE },
  1519. { 0, 0}
  1520. };
  1521. i = 0;
  1522. while (regs[i][0]) {
  1523. spider_net_write_reg(card, regs[i][0], regs[i][1]);
  1524. i++;
  1525. }
  1526. /* clear unicast filter table entries 1 to 14 */
  1527. for (i = 1; i <= 14; i++) {
  1528. spider_net_write_reg(card,
  1529. SPIDER_NET_GMRUAFILnR + i * 8,
  1530. 0x00080000);
  1531. spider_net_write_reg(card,
  1532. SPIDER_NET_GMRUAFILnR + i * 8 + 4,
  1533. 0x00000000);
  1534. }
  1535. spider_net_write_reg(card, SPIDER_NET_GMRUA0FIL15R, 0x08080000);
  1536. spider_net_write_reg(card, SPIDER_NET_ECMODE, SPIDER_NET_ECMODE_VALUE);
  1537. /* set chain tail address fo…