/drivers/net/ethernet/broadcom/bnx2.c

http://github.com/mirrors/linux · C · 8845 lines · 6903 code · 1592 blank · 350 comment · 1328 complexity · 08a381652f80ecee18c3449f12095e97 MD5 · raw file

Large files are truncated click here to view the full file

  1. /* bnx2.c: QLogic bnx2 network driver.
  2. *
  3. * Copyright (c) 2004-2014 Broadcom Corporation
  4. * Copyright (c) 2014-2015 QLogic Corporation
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation.
  9. *
  10. * Written by: Michael Chan (mchan@broadcom.com)
  11. */
  12. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  13. #include <linux/module.h>
  14. #include <linux/moduleparam.h>
  15. #include <linux/stringify.h>
  16. #include <linux/kernel.h>
  17. #include <linux/timer.h>
  18. #include <linux/errno.h>
  19. #include <linux/ioport.h>
  20. #include <linux/slab.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/pci.h>
  24. #include <linux/netdevice.h>
  25. #include <linux/etherdevice.h>
  26. #include <linux/skbuff.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/bitops.h>
  29. #include <asm/io.h>
  30. #include <asm/irq.h>
  31. #include <linux/delay.h>
  32. #include <asm/byteorder.h>
  33. #include <asm/page.h>
  34. #include <linux/time.h>
  35. #include <linux/ethtool.h>
  36. #include <linux/mii.h>
  37. #include <linux/if.h>
  38. #include <linux/if_vlan.h>
  39. #include <net/ip.h>
  40. #include <net/tcp.h>
  41. #include <net/checksum.h>
  42. #include <linux/workqueue.h>
  43. #include <linux/crc32.h>
  44. #include <linux/prefetch.h>
  45. #include <linux/cache.h>
  46. #include <linux/firmware.h>
  47. #include <linux/log2.h>
  48. #include <linux/aer.h>
  49. #include <linux/crash_dump.h>
  50. #if IS_ENABLED(CONFIG_CNIC)
  51. #define BCM_CNIC 1
  52. #include "cnic_if.h"
  53. #endif
  54. #include "bnx2.h"
  55. #include "bnx2_fw.h"
  56. #define DRV_MODULE_NAME "bnx2"
  57. #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
  58. #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
  59. #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
  60. #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
  61. #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
  62. #define RUN_AT(x) (jiffies + (x))
  63. /* Time in jiffies before concluding the transmitter is hung. */
  64. #define TX_TIMEOUT (5*HZ)
  65. MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
  66. MODULE_DESCRIPTION("QLogic BCM5706/5708/5709/5716 Driver");
  67. MODULE_LICENSE("GPL");
  68. MODULE_FIRMWARE(FW_MIPS_FILE_06);
  69. MODULE_FIRMWARE(FW_RV2P_FILE_06);
  70. MODULE_FIRMWARE(FW_MIPS_FILE_09);
  71. MODULE_FIRMWARE(FW_RV2P_FILE_09);
  72. MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
  73. static int disable_msi = 0;
  74. module_param(disable_msi, int, 0444);
  75. MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
  76. typedef enum {
  77. BCM5706 = 0,
  78. NC370T,
  79. NC370I,
  80. BCM5706S,
  81. NC370F,
  82. BCM5708,
  83. BCM5708S,
  84. BCM5709,
  85. BCM5709S,
  86. BCM5716,
  87. BCM5716S,
  88. } board_t;
  89. /* indexed by board_t, above */
  90. static struct {
  91. char *name;
  92. } board_info[] = {
  93. { "Broadcom NetXtreme II BCM5706 1000Base-T" },
  94. { "HP NC370T Multifunction Gigabit Server Adapter" },
  95. { "HP NC370i Multifunction Gigabit Server Adapter" },
  96. { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
  97. { "HP NC370F Multifunction Gigabit Server Adapter" },
  98. { "Broadcom NetXtreme II BCM5708 1000Base-T" },
  99. { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
  100. { "Broadcom NetXtreme II BCM5709 1000Base-T" },
  101. { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
  102. { "Broadcom NetXtreme II BCM5716 1000Base-T" },
  103. { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
  104. };
  105. static const struct pci_device_id bnx2_pci_tbl[] = {
  106. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  107. PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
  108. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  109. PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
  110. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
  111. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
  112. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
  113. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
  114. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
  115. PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
  116. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
  117. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
  118. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
  119. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
  120. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
  121. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
  122. { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
  123. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
  124. { PCI_VENDOR_ID_BROADCOM, 0x163b,
  125. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
  126. { PCI_VENDOR_ID_BROADCOM, 0x163c,
  127. PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
  128. { 0, }
  129. };
  130. static const struct flash_spec flash_table[] =
  131. {
  132. #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
  133. #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
  134. /* Slow EEPROM */
  135. {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
  136. BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
  137. SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
  138. "EEPROM - slow"},
  139. /* Expansion entry 0001 */
  140. {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
  141. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  142. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  143. "Entry 0001"},
  144. /* Saifun SA25F010 (non-buffered flash) */
  145. /* strap, cfg1, & write1 need updates */
  146. {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
  147. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  148. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
  149. "Non-buffered flash (128kB)"},
  150. /* Saifun SA25F020 (non-buffered flash) */
  151. /* strap, cfg1, & write1 need updates */
  152. {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
  153. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  154. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
  155. "Non-buffered flash (256kB)"},
  156. /* Expansion entry 0100 */
  157. {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
  158. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  159. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  160. "Entry 0100"},
  161. /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
  162. {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
  163. NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
  164. ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
  165. "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
  166. /* Entry 0110: ST M45PE20 (non-buffered flash)*/
  167. {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
  168. NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
  169. ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
  170. "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
  171. /* Saifun SA25F005 (non-buffered flash) */
  172. /* strap, cfg1, & write1 need updates */
  173. {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
  174. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  175. SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
  176. "Non-buffered flash (64kB)"},
  177. /* Fast EEPROM */
  178. {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
  179. BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
  180. SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
  181. "EEPROM - fast"},
  182. /* Expansion entry 1001 */
  183. {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
  184. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  185. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  186. "Entry 1001"},
  187. /* Expansion entry 1010 */
  188. {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
  189. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  190. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  191. "Entry 1010"},
  192. /* ATMEL AT45DB011B (buffered flash) */
  193. {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
  194. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  195. BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
  196. "Buffered flash (128kB)"},
  197. /* Expansion entry 1100 */
  198. {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
  199. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  200. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  201. "Entry 1100"},
  202. /* Expansion entry 1101 */
  203. {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
  204. NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
  205. SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
  206. "Entry 1101"},
  207. /* Ateml Expansion entry 1110 */
  208. {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
  209. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  210. BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
  211. "Entry 1110 (Atmel)"},
  212. /* ATMEL AT45DB021B (buffered flash) */
  213. {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
  214. BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
  215. BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
  216. "Buffered flash (256kB)"},
  217. };
  218. static const struct flash_spec flash_5709 = {
  219. .flags = BNX2_NV_BUFFERED,
  220. .page_bits = BCM5709_FLASH_PAGE_BITS,
  221. .page_size = BCM5709_FLASH_PAGE_SIZE,
  222. .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
  223. .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
  224. .name = "5709 Buffered flash (256kB)",
  225. };
  226. MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
  227. static void bnx2_init_napi(struct bnx2 *bp);
  228. static void bnx2_del_napi(struct bnx2 *bp);
  229. static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
  230. {
  231. u32 diff;
  232. /* The ring uses 256 indices for 255 entries, one of them
  233. * needs to be skipped.
  234. */
  235. diff = READ_ONCE(txr->tx_prod) - READ_ONCE(txr->tx_cons);
  236. if (unlikely(diff >= BNX2_TX_DESC_CNT)) {
  237. diff &= 0xffff;
  238. if (diff == BNX2_TX_DESC_CNT)
  239. diff = BNX2_MAX_TX_DESC_CNT;
  240. }
  241. return bp->tx_ring_size - diff;
  242. }
  243. static u32
  244. bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
  245. {
  246. unsigned long flags;
  247. u32 val;
  248. spin_lock_irqsave(&bp->indirect_lock, flags);
  249. BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
  250. val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
  251. spin_unlock_irqrestore(&bp->indirect_lock, flags);
  252. return val;
  253. }
  254. static void
  255. bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
  256. {
  257. unsigned long flags;
  258. spin_lock_irqsave(&bp->indirect_lock, flags);
  259. BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
  260. BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
  261. spin_unlock_irqrestore(&bp->indirect_lock, flags);
  262. }
  263. static void
  264. bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
  265. {
  266. bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
  267. }
  268. static u32
  269. bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
  270. {
  271. return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
  272. }
  273. static void
  274. bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
  275. {
  276. unsigned long flags;
  277. offset += cid_addr;
  278. spin_lock_irqsave(&bp->indirect_lock, flags);
  279. if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
  280. int i;
  281. BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
  282. BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
  283. offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
  284. for (i = 0; i < 5; i++) {
  285. val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
  286. if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
  287. break;
  288. udelay(5);
  289. }
  290. } else {
  291. BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
  292. BNX2_WR(bp, BNX2_CTX_DATA, val);
  293. }
  294. spin_unlock_irqrestore(&bp->indirect_lock, flags);
  295. }
  296. #ifdef BCM_CNIC
  297. static int
  298. bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
  299. {
  300. struct bnx2 *bp = netdev_priv(dev);
  301. struct drv_ctl_io *io = &info->data.io;
  302. switch (info->cmd) {
  303. case DRV_CTL_IO_WR_CMD:
  304. bnx2_reg_wr_ind(bp, io->offset, io->data);
  305. break;
  306. case DRV_CTL_IO_RD_CMD:
  307. io->data = bnx2_reg_rd_ind(bp, io->offset);
  308. break;
  309. case DRV_CTL_CTX_WR_CMD:
  310. bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
  311. break;
  312. default:
  313. return -EINVAL;
  314. }
  315. return 0;
  316. }
  317. static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
  318. {
  319. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  320. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  321. int sb_id;
  322. if (bp->flags & BNX2_FLAG_USING_MSIX) {
  323. cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
  324. bnapi->cnic_present = 0;
  325. sb_id = bp->irq_nvecs;
  326. cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
  327. } else {
  328. cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
  329. bnapi->cnic_tag = bnapi->last_status_idx;
  330. bnapi->cnic_present = 1;
  331. sb_id = 0;
  332. cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
  333. }
  334. cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
  335. cp->irq_arr[0].status_blk = (void *)
  336. ((unsigned long) bnapi->status_blk.msi +
  337. (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
  338. cp->irq_arr[0].status_blk_num = sb_id;
  339. cp->num_irq = 1;
  340. }
  341. static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
  342. void *data)
  343. {
  344. struct bnx2 *bp = netdev_priv(dev);
  345. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  346. if (!ops)
  347. return -EINVAL;
  348. if (cp->drv_state & CNIC_DRV_STATE_REGD)
  349. return -EBUSY;
  350. if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
  351. return -ENODEV;
  352. bp->cnic_data = data;
  353. rcu_assign_pointer(bp->cnic_ops, ops);
  354. cp->num_irq = 0;
  355. cp->drv_state = CNIC_DRV_STATE_REGD;
  356. bnx2_setup_cnic_irq_info(bp);
  357. return 0;
  358. }
  359. static int bnx2_unregister_cnic(struct net_device *dev)
  360. {
  361. struct bnx2 *bp = netdev_priv(dev);
  362. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  363. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  364. mutex_lock(&bp->cnic_lock);
  365. cp->drv_state = 0;
  366. bnapi->cnic_present = 0;
  367. RCU_INIT_POINTER(bp->cnic_ops, NULL);
  368. mutex_unlock(&bp->cnic_lock);
  369. synchronize_rcu();
  370. return 0;
  371. }
  372. static struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
  373. {
  374. struct bnx2 *bp = netdev_priv(dev);
  375. struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
  376. if (!cp->max_iscsi_conn)
  377. return NULL;
  378. cp->drv_owner = THIS_MODULE;
  379. cp->chip_id = bp->chip_id;
  380. cp->pdev = bp->pdev;
  381. cp->io_base = bp->regview;
  382. cp->drv_ctl = bnx2_drv_ctl;
  383. cp->drv_register_cnic = bnx2_register_cnic;
  384. cp->drv_unregister_cnic = bnx2_unregister_cnic;
  385. return cp;
  386. }
  387. static void
  388. bnx2_cnic_stop(struct bnx2 *bp)
  389. {
  390. struct cnic_ops *c_ops;
  391. struct cnic_ctl_info info;
  392. mutex_lock(&bp->cnic_lock);
  393. c_ops = rcu_dereference_protected(bp->cnic_ops,
  394. lockdep_is_held(&bp->cnic_lock));
  395. if (c_ops) {
  396. info.cmd = CNIC_CTL_STOP_CMD;
  397. c_ops->cnic_ctl(bp->cnic_data, &info);
  398. }
  399. mutex_unlock(&bp->cnic_lock);
  400. }
  401. static void
  402. bnx2_cnic_start(struct bnx2 *bp)
  403. {
  404. struct cnic_ops *c_ops;
  405. struct cnic_ctl_info info;
  406. mutex_lock(&bp->cnic_lock);
  407. c_ops = rcu_dereference_protected(bp->cnic_ops,
  408. lockdep_is_held(&bp->cnic_lock));
  409. if (c_ops) {
  410. if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
  411. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  412. bnapi->cnic_tag = bnapi->last_status_idx;
  413. }
  414. info.cmd = CNIC_CTL_START_CMD;
  415. c_ops->cnic_ctl(bp->cnic_data, &info);
  416. }
  417. mutex_unlock(&bp->cnic_lock);
  418. }
  419. #else
  420. static void
  421. bnx2_cnic_stop(struct bnx2 *bp)
  422. {
  423. }
  424. static void
  425. bnx2_cnic_start(struct bnx2 *bp)
  426. {
  427. }
  428. #endif
  429. static int
  430. bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
  431. {
  432. u32 val1;
  433. int i, ret;
  434. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  435. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  436. val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  437. BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  438. BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  439. udelay(40);
  440. }
  441. val1 = (bp->phy_addr << 21) | (reg << 16) |
  442. BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
  443. BNX2_EMAC_MDIO_COMM_START_BUSY;
  444. BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
  445. for (i = 0; i < 50; i++) {
  446. udelay(10);
  447. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
  448. if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
  449. udelay(5);
  450. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
  451. val1 &= BNX2_EMAC_MDIO_COMM_DATA;
  452. break;
  453. }
  454. }
  455. if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
  456. *val = 0x0;
  457. ret = -EBUSY;
  458. }
  459. else {
  460. *val = val1;
  461. ret = 0;
  462. }
  463. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  464. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  465. val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  466. BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  467. BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  468. udelay(40);
  469. }
  470. return ret;
  471. }
  472. static int
  473. bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
  474. {
  475. u32 val1;
  476. int i, ret;
  477. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  478. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  479. val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  480. BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  481. BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  482. udelay(40);
  483. }
  484. val1 = (bp->phy_addr << 21) | (reg << 16) | val |
  485. BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
  486. BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
  487. BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
  488. for (i = 0; i < 50; i++) {
  489. udelay(10);
  490. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
  491. if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
  492. udelay(5);
  493. break;
  494. }
  495. }
  496. if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
  497. ret = -EBUSY;
  498. else
  499. ret = 0;
  500. if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
  501. val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  502. val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
  503. BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
  504. BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
  505. udelay(40);
  506. }
  507. return ret;
  508. }
  509. static void
  510. bnx2_disable_int(struct bnx2 *bp)
  511. {
  512. int i;
  513. struct bnx2_napi *bnapi;
  514. for (i = 0; i < bp->irq_nvecs; i++) {
  515. bnapi = &bp->bnx2_napi[i];
  516. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  517. BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
  518. }
  519. BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
  520. }
  521. static void
  522. bnx2_enable_int(struct bnx2 *bp)
  523. {
  524. int i;
  525. struct bnx2_napi *bnapi;
  526. for (i = 0; i < bp->irq_nvecs; i++) {
  527. bnapi = &bp->bnx2_napi[i];
  528. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  529. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  530. BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
  531. bnapi->last_status_idx);
  532. BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
  533. BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
  534. bnapi->last_status_idx);
  535. }
  536. BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
  537. }
  538. static void
  539. bnx2_disable_int_sync(struct bnx2 *bp)
  540. {
  541. int i;
  542. atomic_inc(&bp->intr_sem);
  543. if (!netif_running(bp->dev))
  544. return;
  545. bnx2_disable_int(bp);
  546. for (i = 0; i < bp->irq_nvecs; i++)
  547. synchronize_irq(bp->irq_tbl[i].vector);
  548. }
  549. static void
  550. bnx2_napi_disable(struct bnx2 *bp)
  551. {
  552. int i;
  553. for (i = 0; i < bp->irq_nvecs; i++)
  554. napi_disable(&bp->bnx2_napi[i].napi);
  555. }
  556. static void
  557. bnx2_napi_enable(struct bnx2 *bp)
  558. {
  559. int i;
  560. for (i = 0; i < bp->irq_nvecs; i++)
  561. napi_enable(&bp->bnx2_napi[i].napi);
  562. }
  563. static void
  564. bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
  565. {
  566. if (stop_cnic)
  567. bnx2_cnic_stop(bp);
  568. if (netif_running(bp->dev)) {
  569. bnx2_napi_disable(bp);
  570. netif_tx_disable(bp->dev);
  571. }
  572. bnx2_disable_int_sync(bp);
  573. netif_carrier_off(bp->dev); /* prevent tx timeout */
  574. }
  575. static void
  576. bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
  577. {
  578. if (atomic_dec_and_test(&bp->intr_sem)) {
  579. if (netif_running(bp->dev)) {
  580. netif_tx_wake_all_queues(bp->dev);
  581. spin_lock_bh(&bp->phy_lock);
  582. if (bp->link_up)
  583. netif_carrier_on(bp->dev);
  584. spin_unlock_bh(&bp->phy_lock);
  585. bnx2_napi_enable(bp);
  586. bnx2_enable_int(bp);
  587. if (start_cnic)
  588. bnx2_cnic_start(bp);
  589. }
  590. }
  591. }
  592. static void
  593. bnx2_free_tx_mem(struct bnx2 *bp)
  594. {
  595. int i;
  596. for (i = 0; i < bp->num_tx_rings; i++) {
  597. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  598. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  599. if (txr->tx_desc_ring) {
  600. dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
  601. txr->tx_desc_ring,
  602. txr->tx_desc_mapping);
  603. txr->tx_desc_ring = NULL;
  604. }
  605. kfree(txr->tx_buf_ring);
  606. txr->tx_buf_ring = NULL;
  607. }
  608. }
  609. static void
  610. bnx2_free_rx_mem(struct bnx2 *bp)
  611. {
  612. int i;
  613. for (i = 0; i < bp->num_rx_rings; i++) {
  614. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  615. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  616. int j;
  617. for (j = 0; j < bp->rx_max_ring; j++) {
  618. if (rxr->rx_desc_ring[j])
  619. dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
  620. rxr->rx_desc_ring[j],
  621. rxr->rx_desc_mapping[j]);
  622. rxr->rx_desc_ring[j] = NULL;
  623. }
  624. vfree(rxr->rx_buf_ring);
  625. rxr->rx_buf_ring = NULL;
  626. for (j = 0; j < bp->rx_max_pg_ring; j++) {
  627. if (rxr->rx_pg_desc_ring[j])
  628. dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
  629. rxr->rx_pg_desc_ring[j],
  630. rxr->rx_pg_desc_mapping[j]);
  631. rxr->rx_pg_desc_ring[j] = NULL;
  632. }
  633. vfree(rxr->rx_pg_ring);
  634. rxr->rx_pg_ring = NULL;
  635. }
  636. }
  637. static int
  638. bnx2_alloc_tx_mem(struct bnx2 *bp)
  639. {
  640. int i;
  641. for (i = 0; i < bp->num_tx_rings; i++) {
  642. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  643. struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
  644. txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
  645. if (!txr->tx_buf_ring)
  646. return -ENOMEM;
  647. txr->tx_desc_ring =
  648. dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
  649. &txr->tx_desc_mapping, GFP_KERNEL);
  650. if (!txr->tx_desc_ring)
  651. return -ENOMEM;
  652. }
  653. return 0;
  654. }
  655. static int
  656. bnx2_alloc_rx_mem(struct bnx2 *bp)
  657. {
  658. int i;
  659. for (i = 0; i < bp->num_rx_rings; i++) {
  660. struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
  661. struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
  662. int j;
  663. rxr->rx_buf_ring =
  664. vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
  665. if (!rxr->rx_buf_ring)
  666. return -ENOMEM;
  667. for (j = 0; j < bp->rx_max_ring; j++) {
  668. rxr->rx_desc_ring[j] =
  669. dma_alloc_coherent(&bp->pdev->dev,
  670. RXBD_RING_SIZE,
  671. &rxr->rx_desc_mapping[j],
  672. GFP_KERNEL);
  673. if (!rxr->rx_desc_ring[j])
  674. return -ENOMEM;
  675. }
  676. if (bp->rx_pg_ring_size) {
  677. rxr->rx_pg_ring =
  678. vzalloc(array_size(SW_RXPG_RING_SIZE,
  679. bp->rx_max_pg_ring));
  680. if (!rxr->rx_pg_ring)
  681. return -ENOMEM;
  682. }
  683. for (j = 0; j < bp->rx_max_pg_ring; j++) {
  684. rxr->rx_pg_desc_ring[j] =
  685. dma_alloc_coherent(&bp->pdev->dev,
  686. RXBD_RING_SIZE,
  687. &rxr->rx_pg_desc_mapping[j],
  688. GFP_KERNEL);
  689. if (!rxr->rx_pg_desc_ring[j])
  690. return -ENOMEM;
  691. }
  692. }
  693. return 0;
  694. }
  695. static void
  696. bnx2_free_stats_blk(struct net_device *dev)
  697. {
  698. struct bnx2 *bp = netdev_priv(dev);
  699. if (bp->status_blk) {
  700. dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
  701. bp->status_blk,
  702. bp->status_blk_mapping);
  703. bp->status_blk = NULL;
  704. bp->stats_blk = NULL;
  705. }
  706. }
  707. static int
  708. bnx2_alloc_stats_blk(struct net_device *dev)
  709. {
  710. int status_blk_size;
  711. void *status_blk;
  712. struct bnx2 *bp = netdev_priv(dev);
  713. /* Combine status and statistics blocks into one allocation. */
  714. status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
  715. if (bp->flags & BNX2_FLAG_MSIX_CAP)
  716. status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
  717. BNX2_SBLK_MSIX_ALIGN_SIZE);
  718. bp->status_stats_size = status_blk_size +
  719. sizeof(struct statistics_block);
  720. status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
  721. &bp->status_blk_mapping, GFP_KERNEL);
  722. if (!status_blk)
  723. return -ENOMEM;
  724. bp->status_blk = status_blk;
  725. bp->stats_blk = status_blk + status_blk_size;
  726. bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
  727. return 0;
  728. }
  729. static void
  730. bnx2_free_mem(struct bnx2 *bp)
  731. {
  732. int i;
  733. struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
  734. bnx2_free_tx_mem(bp);
  735. bnx2_free_rx_mem(bp);
  736. for (i = 0; i < bp->ctx_pages; i++) {
  737. if (bp->ctx_blk[i]) {
  738. dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
  739. bp->ctx_blk[i],
  740. bp->ctx_blk_mapping[i]);
  741. bp->ctx_blk[i] = NULL;
  742. }
  743. }
  744. if (bnapi->status_blk.msi)
  745. bnapi->status_blk.msi = NULL;
  746. }
  747. static int
  748. bnx2_alloc_mem(struct bnx2 *bp)
  749. {
  750. int i, err;
  751. struct bnx2_napi *bnapi;
  752. bnapi = &bp->bnx2_napi[0];
  753. bnapi->status_blk.msi = bp->status_blk;
  754. bnapi->hw_tx_cons_ptr =
  755. &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
  756. bnapi->hw_rx_cons_ptr =
  757. &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
  758. if (bp->flags & BNX2_FLAG_MSIX_CAP) {
  759. for (i = 1; i < bp->irq_nvecs; i++) {
  760. struct status_block_msix *sblk;
  761. bnapi = &bp->bnx2_napi[i];
  762. sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
  763. bnapi->status_blk.msix = sblk;
  764. bnapi->hw_tx_cons_ptr =
  765. &sblk->status_tx_quick_consumer_index;
  766. bnapi->hw_rx_cons_ptr =
  767. &sblk->status_rx_quick_consumer_index;
  768. bnapi->int_num = i << 24;
  769. }
  770. }
  771. if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
  772. bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
  773. if (bp->ctx_pages == 0)
  774. bp->ctx_pages = 1;
  775. for (i = 0; i < bp->ctx_pages; i++) {
  776. bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
  777. BNX2_PAGE_SIZE,
  778. &bp->ctx_blk_mapping[i],
  779. GFP_KERNEL);
  780. if (!bp->ctx_blk[i])
  781. goto alloc_mem_err;
  782. }
  783. }
  784. err = bnx2_alloc_rx_mem(bp);
  785. if (err)
  786. goto alloc_mem_err;
  787. err = bnx2_alloc_tx_mem(bp);
  788. if (err)
  789. goto alloc_mem_err;
  790. return 0;
  791. alloc_mem_err:
  792. bnx2_free_mem(bp);
  793. return -ENOMEM;
  794. }
  795. static void
  796. bnx2_report_fw_link(struct bnx2 *bp)
  797. {
  798. u32 fw_link_status = 0;
  799. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  800. return;
  801. if (bp->link_up) {
  802. u32 bmsr;
  803. switch (bp->line_speed) {
  804. case SPEED_10:
  805. if (bp->duplex == DUPLEX_HALF)
  806. fw_link_status = BNX2_LINK_STATUS_10HALF;
  807. else
  808. fw_link_status = BNX2_LINK_STATUS_10FULL;
  809. break;
  810. case SPEED_100:
  811. if (bp->duplex == DUPLEX_HALF)
  812. fw_link_status = BNX2_LINK_STATUS_100HALF;
  813. else
  814. fw_link_status = BNX2_LINK_STATUS_100FULL;
  815. break;
  816. case SPEED_1000:
  817. if (bp->duplex == DUPLEX_HALF)
  818. fw_link_status = BNX2_LINK_STATUS_1000HALF;
  819. else
  820. fw_link_status = BNX2_LINK_STATUS_1000FULL;
  821. break;
  822. case SPEED_2500:
  823. if (bp->duplex == DUPLEX_HALF)
  824. fw_link_status = BNX2_LINK_STATUS_2500HALF;
  825. else
  826. fw_link_status = BNX2_LINK_STATUS_2500FULL;
  827. break;
  828. }
  829. fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
  830. if (bp->autoneg) {
  831. fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
  832. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  833. bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
  834. if (!(bmsr & BMSR_ANEGCOMPLETE) ||
  835. bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
  836. fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
  837. else
  838. fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
  839. }
  840. }
  841. else
  842. fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
  843. bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
  844. }
  845. static char *
  846. bnx2_xceiver_str(struct bnx2 *bp)
  847. {
  848. return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
  849. ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
  850. "Copper");
  851. }
  852. static void
  853. bnx2_report_link(struct bnx2 *bp)
  854. {
  855. if (bp->link_up) {
  856. netif_carrier_on(bp->dev);
  857. netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
  858. bnx2_xceiver_str(bp),
  859. bp->line_speed,
  860. bp->duplex == DUPLEX_FULL ? "full" : "half");
  861. if (bp->flow_ctrl) {
  862. if (bp->flow_ctrl & FLOW_CTRL_RX) {
  863. pr_cont(", receive ");
  864. if (bp->flow_ctrl & FLOW_CTRL_TX)
  865. pr_cont("& transmit ");
  866. }
  867. else {
  868. pr_cont(", transmit ");
  869. }
  870. pr_cont("flow control ON");
  871. }
  872. pr_cont("\n");
  873. } else {
  874. netif_carrier_off(bp->dev);
  875. netdev_err(bp->dev, "NIC %s Link is Down\n",
  876. bnx2_xceiver_str(bp));
  877. }
  878. bnx2_report_fw_link(bp);
  879. }
  880. static void
  881. bnx2_resolve_flow_ctrl(struct bnx2 *bp)
  882. {
  883. u32 local_adv, remote_adv;
  884. bp->flow_ctrl = 0;
  885. if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
  886. (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
  887. if (bp->duplex == DUPLEX_FULL) {
  888. bp->flow_ctrl = bp->req_flow_ctrl;
  889. }
  890. return;
  891. }
  892. if (bp->duplex != DUPLEX_FULL) {
  893. return;
  894. }
  895. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  896. (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
  897. u32 val;
  898. bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
  899. if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
  900. bp->flow_ctrl |= FLOW_CTRL_TX;
  901. if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
  902. bp->flow_ctrl |= FLOW_CTRL_RX;
  903. return;
  904. }
  905. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  906. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  907. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  908. u32 new_local_adv = 0;
  909. u32 new_remote_adv = 0;
  910. if (local_adv & ADVERTISE_1000XPAUSE)
  911. new_local_adv |= ADVERTISE_PAUSE_CAP;
  912. if (local_adv & ADVERTISE_1000XPSE_ASYM)
  913. new_local_adv |= ADVERTISE_PAUSE_ASYM;
  914. if (remote_adv & ADVERTISE_1000XPAUSE)
  915. new_remote_adv |= ADVERTISE_PAUSE_CAP;
  916. if (remote_adv & ADVERTISE_1000XPSE_ASYM)
  917. new_remote_adv |= ADVERTISE_PAUSE_ASYM;
  918. local_adv = new_local_adv;
  919. remote_adv = new_remote_adv;
  920. }
  921. /* See Table 28B-3 of 802.3ab-1999 spec. */
  922. if (local_adv & ADVERTISE_PAUSE_CAP) {
  923. if(local_adv & ADVERTISE_PAUSE_ASYM) {
  924. if (remote_adv & ADVERTISE_PAUSE_CAP) {
  925. bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  926. }
  927. else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
  928. bp->flow_ctrl = FLOW_CTRL_RX;
  929. }
  930. }
  931. else {
  932. if (remote_adv & ADVERTISE_PAUSE_CAP) {
  933. bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
  934. }
  935. }
  936. }
  937. else if (local_adv & ADVERTISE_PAUSE_ASYM) {
  938. if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
  939. (remote_adv & ADVERTISE_PAUSE_ASYM)) {
  940. bp->flow_ctrl = FLOW_CTRL_TX;
  941. }
  942. }
  943. }
  944. static int
  945. bnx2_5709s_linkup(struct bnx2 *bp)
  946. {
  947. u32 val, speed;
  948. bp->link_up = 1;
  949. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
  950. bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
  951. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  952. if ((bp->autoneg & AUTONEG_SPEED) == 0) {
  953. bp->line_speed = bp->req_line_speed;
  954. bp->duplex = bp->req_duplex;
  955. return 0;
  956. }
  957. speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
  958. switch (speed) {
  959. case MII_BNX2_GP_TOP_AN_SPEED_10:
  960. bp->line_speed = SPEED_10;
  961. break;
  962. case MII_BNX2_GP_TOP_AN_SPEED_100:
  963. bp->line_speed = SPEED_100;
  964. break;
  965. case MII_BNX2_GP_TOP_AN_SPEED_1G:
  966. case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
  967. bp->line_speed = SPEED_1000;
  968. break;
  969. case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
  970. bp->line_speed = SPEED_2500;
  971. break;
  972. }
  973. if (val & MII_BNX2_GP_TOP_AN_FD)
  974. bp->duplex = DUPLEX_FULL;
  975. else
  976. bp->duplex = DUPLEX_HALF;
  977. return 0;
  978. }
  979. static int
  980. bnx2_5708s_linkup(struct bnx2 *bp)
  981. {
  982. u32 val;
  983. bp->link_up = 1;
  984. bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
  985. switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
  986. case BCM5708S_1000X_STAT1_SPEED_10:
  987. bp->line_speed = SPEED_10;
  988. break;
  989. case BCM5708S_1000X_STAT1_SPEED_100:
  990. bp->line_speed = SPEED_100;
  991. break;
  992. case BCM5708S_1000X_STAT1_SPEED_1G:
  993. bp->line_speed = SPEED_1000;
  994. break;
  995. case BCM5708S_1000X_STAT1_SPEED_2G5:
  996. bp->line_speed = SPEED_2500;
  997. break;
  998. }
  999. if (val & BCM5708S_1000X_STAT1_FD)
  1000. bp->duplex = DUPLEX_FULL;
  1001. else
  1002. bp->duplex = DUPLEX_HALF;
  1003. return 0;
  1004. }
  1005. static int
  1006. bnx2_5706s_linkup(struct bnx2 *bp)
  1007. {
  1008. u32 bmcr, local_adv, remote_adv, common;
  1009. bp->link_up = 1;
  1010. bp->line_speed = SPEED_1000;
  1011. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1012. if (bmcr & BMCR_FULLDPLX) {
  1013. bp->duplex = DUPLEX_FULL;
  1014. }
  1015. else {
  1016. bp->duplex = DUPLEX_HALF;
  1017. }
  1018. if (!(bmcr & BMCR_ANENABLE)) {
  1019. return 0;
  1020. }
  1021. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  1022. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  1023. common = local_adv & remote_adv;
  1024. if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
  1025. if (common & ADVERTISE_1000XFULL) {
  1026. bp->duplex = DUPLEX_FULL;
  1027. }
  1028. else {
  1029. bp->duplex = DUPLEX_HALF;
  1030. }
  1031. }
  1032. return 0;
  1033. }
  1034. static int
  1035. bnx2_copper_linkup(struct bnx2 *bp)
  1036. {
  1037. u32 bmcr;
  1038. bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
  1039. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1040. if (bmcr & BMCR_ANENABLE) {
  1041. u32 local_adv, remote_adv, common;
  1042. bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
  1043. bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
  1044. common = local_adv & (remote_adv >> 2);
  1045. if (common & ADVERTISE_1000FULL) {
  1046. bp->line_speed = SPEED_1000;
  1047. bp->duplex = DUPLEX_FULL;
  1048. }
  1049. else if (common & ADVERTISE_1000HALF) {
  1050. bp->line_speed = SPEED_1000;
  1051. bp->duplex = DUPLEX_HALF;
  1052. }
  1053. else {
  1054. bnx2_read_phy(bp, bp->mii_adv, &local_adv);
  1055. bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
  1056. common = local_adv & remote_adv;
  1057. if (common & ADVERTISE_100FULL) {
  1058. bp->line_speed = SPEED_100;
  1059. bp->duplex = DUPLEX_FULL;
  1060. }
  1061. else if (common & ADVERTISE_100HALF) {
  1062. bp->line_speed = SPEED_100;
  1063. bp->duplex = DUPLEX_HALF;
  1064. }
  1065. else if (common & ADVERTISE_10FULL) {
  1066. bp->line_speed = SPEED_10;
  1067. bp->duplex = DUPLEX_FULL;
  1068. }
  1069. else if (common & ADVERTISE_10HALF) {
  1070. bp->line_speed = SPEED_10;
  1071. bp->duplex = DUPLEX_HALF;
  1072. }
  1073. else {
  1074. bp->line_speed = 0;
  1075. bp->link_up = 0;
  1076. }
  1077. }
  1078. }
  1079. else {
  1080. if (bmcr & BMCR_SPEED100) {
  1081. bp->line_speed = SPEED_100;
  1082. }
  1083. else {
  1084. bp->line_speed = SPEED_10;
  1085. }
  1086. if (bmcr & BMCR_FULLDPLX) {
  1087. bp->duplex = DUPLEX_FULL;
  1088. }
  1089. else {
  1090. bp->duplex = DUPLEX_HALF;
  1091. }
  1092. }
  1093. if (bp->link_up) {
  1094. u32 ext_status;
  1095. bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
  1096. if (ext_status & EXT_STATUS_MDIX)
  1097. bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
  1098. }
  1099. return 0;
  1100. }
  1101. static void
  1102. bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
  1103. {
  1104. u32 val, rx_cid_addr = GET_CID_ADDR(cid);
  1105. val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
  1106. val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
  1107. val |= 0x02 << 8;
  1108. if (bp->flow_ctrl & FLOW_CTRL_TX)
  1109. val |= BNX2_L2CTX_FLOW_CTRL_ENABLE;
  1110. bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
  1111. }
  1112. static void
  1113. bnx2_init_all_rx_contexts(struct bnx2 *bp)
  1114. {
  1115. int i;
  1116. u32 cid;
  1117. for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
  1118. if (i == 1)
  1119. cid = RX_RSS_CID;
  1120. bnx2_init_rx_context(bp, cid);
  1121. }
  1122. }
  1123. static void
  1124. bnx2_set_mac_link(struct bnx2 *bp)
  1125. {
  1126. u32 val;
  1127. BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
  1128. if (bp->link_up && (bp->line_speed == SPEED_1000) &&
  1129. (bp->duplex == DUPLEX_HALF)) {
  1130. BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
  1131. }
  1132. /* Configure the EMAC mode register. */
  1133. val = BNX2_RD(bp, BNX2_EMAC_MODE);
  1134. val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
  1135. BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
  1136. BNX2_EMAC_MODE_25G_MODE);
  1137. if (bp->link_up) {
  1138. switch (bp->line_speed) {
  1139. case SPEED_10:
  1140. if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
  1141. val |= BNX2_EMAC_MODE_PORT_MII_10M;
  1142. break;
  1143. }
  1144. /* fall through */
  1145. case SPEED_100:
  1146. val |= BNX2_EMAC_MODE_PORT_MII;
  1147. break;
  1148. case SPEED_2500:
  1149. val |= BNX2_EMAC_MODE_25G_MODE;
  1150. /* fall through */
  1151. case SPEED_1000:
  1152. val |= BNX2_EMAC_MODE_PORT_GMII;
  1153. break;
  1154. }
  1155. }
  1156. else {
  1157. val |= BNX2_EMAC_MODE_PORT_GMII;
  1158. }
  1159. /* Set the MAC to operate in the appropriate duplex mode. */
  1160. if (bp->duplex == DUPLEX_HALF)
  1161. val |= BNX2_EMAC_MODE_HALF_DUPLEX;
  1162. BNX2_WR(bp, BNX2_EMAC_MODE, val);
  1163. /* Enable/disable rx PAUSE. */
  1164. bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
  1165. if (bp->flow_ctrl & FLOW_CTRL_RX)
  1166. bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
  1167. BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
  1168. /* Enable/disable tx PAUSE. */
  1169. val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
  1170. val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
  1171. if (bp->flow_ctrl & FLOW_CTRL_TX)
  1172. val |= BNX2_EMAC_TX_MODE_FLOW_EN;
  1173. BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
  1174. /* Acknowledge the interrupt. */
  1175. BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
  1176. bnx2_init_all_rx_contexts(bp);
  1177. }
  1178. static void
  1179. bnx2_enable_bmsr1(struct bnx2 *bp)
  1180. {
  1181. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1182. (BNX2_CHIP(bp) == BNX2_CHIP_5709))
  1183. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1184. MII_BNX2_BLK_ADDR_GP_STATUS);
  1185. }
  1186. static void
  1187. bnx2_disable_bmsr1(struct bnx2 *bp)
  1188. {
  1189. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1190. (BNX2_CHIP(bp) == BNX2_CHIP_5709))
  1191. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1192. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1193. }
  1194. static int
  1195. bnx2_test_and_enable_2g5(struct bnx2 *bp)
  1196. {
  1197. u32 up1;
  1198. int ret = 1;
  1199. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1200. return 0;
  1201. if (bp->autoneg & AUTONEG_SPEED)
  1202. bp->advertising |= ADVERTISED_2500baseX_Full;
  1203. if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
  1204. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1205. bnx2_read_phy(bp, bp->mii_up1, &up1);
  1206. if (!(up1 & BCM5708S_UP1_2G5)) {
  1207. up1 |= BCM5708S_UP1_2G5;
  1208. bnx2_write_phy(bp, bp->mii_up1, up1);
  1209. ret = 0;
  1210. }
  1211. if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
  1212. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1213. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1214. return ret;
  1215. }
  1216. static int
  1217. bnx2_test_and_disable_2g5(struct bnx2 *bp)
  1218. {
  1219. u32 up1;
  1220. int ret = 0;
  1221. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1222. return 0;
  1223. if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
  1224. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
  1225. bnx2_read_phy(bp, bp->mii_up1, &up1);
  1226. if (up1 & BCM5708S_UP1_2G5) {
  1227. up1 &= ~BCM5708S_UP1_2G5;
  1228. bnx2_write_phy(bp, bp->mii_up1, up1);
  1229. ret = 1;
  1230. }
  1231. if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
  1232. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1233. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1234. return ret;
  1235. }
  1236. static void
  1237. bnx2_enable_forced_2g5(struct bnx2 *bp)
  1238. {
  1239. u32 uninitialized_var(bmcr);
  1240. int err;
  1241. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1242. return;
  1243. if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
  1244. u32 val;
  1245. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1246. MII_BNX2_BLK_ADDR_SERDES_DIG);
  1247. if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
  1248. val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
  1249. val |= MII_BNX2_SD_MISC1_FORCE |
  1250. MII_BNX2_SD_MISC1_FORCE_2_5G;
  1251. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
  1252. }
  1253. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1254. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1255. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1256. } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
  1257. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1258. if (!err)
  1259. bmcr |= BCM5708S_BMCR_FORCE_2500;
  1260. } else {
  1261. return;
  1262. }
  1263. if (err)
  1264. return;
  1265. if (bp->autoneg & AUTONEG_SPEED) {
  1266. bmcr &= ~BMCR_ANENABLE;
  1267. if (bp->req_duplex == DUPLEX_FULL)
  1268. bmcr |= BMCR_FULLDPLX;
  1269. }
  1270. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1271. }
  1272. static void
  1273. bnx2_disable_forced_2g5(struct bnx2 *bp)
  1274. {
  1275. u32 uninitialized_var(bmcr);
  1276. int err;
  1277. if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
  1278. return;
  1279. if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
  1280. u32 val;
  1281. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1282. MII_BNX2_BLK_ADDR_SERDES_DIG);
  1283. if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
  1284. val &= ~MII_BNX2_SD_MISC1_FORCE;
  1285. bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
  1286. }
  1287. bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
  1288. MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
  1289. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1290. } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
  1291. err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1292. if (!err)
  1293. bmcr &= ~BCM5708S_BMCR_FORCE_2500;
  1294. } else {
  1295. return;
  1296. }
  1297. if (err)
  1298. return;
  1299. if (bp->autoneg & AUTONEG_SPEED)
  1300. bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
  1301. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1302. }
  1303. static void
  1304. bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
  1305. {
  1306. u32 val;
  1307. bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
  1308. bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
  1309. if (start)
  1310. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
  1311. else
  1312. bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
  1313. }
  1314. static int
  1315. bnx2_set_link(struct bnx2 *bp)
  1316. {
  1317. u32 bmsr;
  1318. u8 link_up;
  1319. if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
  1320. bp->link_up = 1;
  1321. return 0;
  1322. }
  1323. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  1324. return 0;
  1325. link_up = bp->link_up;
  1326. bnx2_enable_bmsr1(bp);
  1327. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  1328. bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
  1329. bnx2_disable_bmsr1(bp);
  1330. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1331. (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
  1332. u32 val, an_dbg;
  1333. if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
  1334. bnx2_5706s_force_link_dn(bp, 0);
  1335. bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
  1336. }
  1337. val = BNX2_RD(bp, BNX2_EMAC_STATUS);
  1338. bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
  1339. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  1340. bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
  1341. if ((val & BNX2_EMAC_STATUS_LINK) &&
  1342. !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
  1343. bmsr |= BMSR_LSTATUS;
  1344. else
  1345. bmsr &= ~BMSR_LSTATUS;
  1346. }
  1347. if (bmsr & BMSR_LSTATUS) {
  1348. bp->link_up = 1;
  1349. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1350. if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
  1351. bnx2_5706s_linkup(bp);
  1352. else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
  1353. bnx2_5708s_linkup(bp);
  1354. else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
  1355. bnx2_5709s_linkup(bp);
  1356. }
  1357. else {
  1358. bnx2_copper_linkup(bp);
  1359. }
  1360. bnx2_resolve_flow_ctrl(bp);
  1361. }
  1362. else {
  1363. if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
  1364. (bp->autoneg & AUTONEG_SPEED))
  1365. bnx2_disable_forced_2g5(bp);
  1366. if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
  1367. u32 bmcr;
  1368. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1369. bmcr |= BMCR_ANENABLE;
  1370. bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
  1371. bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
  1372. }
  1373. bp->link_up = 0;
  1374. }
  1375. if (bp->link_up != link_up) {
  1376. bnx2_report_link(bp);
  1377. }
  1378. bnx2_set_mac_link(bp);
  1379. return 0;
  1380. }
  1381. static int
  1382. bnx2_reset_phy(struct bnx2 *bp)
  1383. {
  1384. int i;
  1385. u32 reg;
  1386. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
  1387. #define PHY_RESET_MAX_WAIT 100
  1388. for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
  1389. udelay(10);
  1390. bnx2_read_phy(bp, bp->mii_bmcr, &reg);
  1391. if (!(reg & BMCR_RESET)) {
  1392. udelay(20);
  1393. break;
  1394. }
  1395. }
  1396. if (i == PHY_RESET_MAX_WAIT) {
  1397. return -EBUSY;
  1398. }
  1399. return 0;
  1400. }
  1401. static u32
  1402. bnx2_phy_get_pause_adv(struct bnx2 *bp)
  1403. {
  1404. u32 adv = 0;
  1405. if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
  1406. (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
  1407. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1408. adv = ADVERTISE_1000XPAUSE;
  1409. }
  1410. else {
  1411. adv = ADVERTISE_PAUSE_CAP;
  1412. }
  1413. }
  1414. else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
  1415. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1416. adv = ADVERTISE_1000XPSE_ASYM;
  1417. }
  1418. else {
  1419. adv = ADVERTISE_PAUSE_ASYM;
  1420. }
  1421. }
  1422. else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
  1423. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1424. adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
  1425. }
  1426. else {
  1427. adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
  1428. }
  1429. }
  1430. return adv;
  1431. }
  1432. static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
  1433. static int
  1434. bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
  1435. __releases(&bp->phy_lock)
  1436. __acquires(&bp->phy_lock)
  1437. {
  1438. u32 speed_arg = 0, pause_adv;
  1439. pause_adv = bnx2_phy_get_pause_adv(bp);
  1440. if (bp->autoneg & AUTONEG_SPEED) {
  1441. speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
  1442. if (bp->advertising & ADVERTISED_10baseT_Half)
  1443. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
  1444. if (bp->advertising & ADVERTISED_10baseT_Full)
  1445. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
  1446. if (bp->advertising & ADVERTISED_100baseT_Half)
  1447. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
  1448. if (bp->advertising & ADVERTISED_100baseT_Full)
  1449. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
  1450. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1451. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
  1452. if (bp->advertising & ADVERTISED_2500baseX_Full)
  1453. speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
  1454. } else {
  1455. if (bp->req_line_speed == SPEED_2500)
  1456. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
  1457. else if (bp->req_line_speed == SPEED_1000)
  1458. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
  1459. else if (bp->req_line_speed == SPEED_100) {
  1460. if (bp->req_duplex == DUPLEX_FULL)
  1461. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
  1462. else
  1463. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
  1464. } else if (bp->req_line_speed == SPEED_10) {
  1465. if (bp->req_duplex == DUPLEX_FULL)
  1466. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
  1467. else
  1468. speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
  1469. }
  1470. }
  1471. if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
  1472. speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
  1473. if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
  1474. speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
  1475. if (port == PORT_TP)
  1476. speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
  1477. BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
  1478. bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
  1479. spin_unlock_bh(&bp->phy_lock);
  1480. bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
  1481. spin_lock_bh(&bp->phy_lock);
  1482. return 0;
  1483. }
  1484. static int
  1485. bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
  1486. __releases(&bp->phy_lock)
  1487. __acquires(&bp->phy_lock)
  1488. {
  1489. u32 adv, bmcr;
  1490. u32 new_adv = 0;
  1491. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
  1492. return bnx2_setup_remote_phy(bp, port);
  1493. if (!(bp->autoneg & AUTONEG_SPEED)) {
  1494. u32 new_bmcr;
  1495. int force_link_down = 0;
  1496. if (bp->req_line_speed == SPEED_2500) {
  1497. if (!bnx2_test_and_enable_2g5(bp))
  1498. force_link_down = 1;
  1499. } else if (bp->req_line_speed == SPEED_1000) {
  1500. if (bnx2_test_and_disable_2g5(bp))
  1501. force_link_down = 1;
  1502. }
  1503. bnx2_read_phy(bp, bp->mii_adv, &adv);
  1504. adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
  1505. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1506. new_bmcr = bmcr & ~BMCR_ANENABLE;
  1507. new_bmcr |= BMCR_SPEED1000;
  1508. if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
  1509. if (bp->req_line_speed == SPEED_2500)
  1510. bnx2_enable_forced_2g5(bp);
  1511. else if (bp->req_line_speed == SPEED_1000) {
  1512. bnx2_disable_forced_2g5(bp);
  1513. new_bmcr &= ~0x2000;
  1514. }
  1515. } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
  1516. if (bp->req_line_speed == SPEED_2500)
  1517. new_bmcr |= BCM5708S_BMCR_FORCE_2500;
  1518. else
  1519. new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
  1520. }
  1521. if (bp->req_duplex == DUPLEX_FULL) {
  1522. adv |= ADVERTISE_1000XFULL;
  1523. new_bmcr |= BMCR_FULLDPLX;
  1524. }
  1525. else {
  1526. adv |= ADVERTISE_1000XHALF;
  1527. new_bmcr &= ~BMCR_FULLDPLX;
  1528. }
  1529. if ((new_bmcr != bmcr) || (force_link_down)) {
  1530. /* Force a link down visible on the other side */
  1531. if (bp->link_up) {
  1532. bnx2_write_phy(bp, bp->mii_adv, adv &
  1533. ~(ADVERTISE_1000XFULL |
  1534. ADVERTISE_1000XHALF));
  1535. bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
  1536. BMCR_ANRESTART | BMCR_ANENABLE);
  1537. bp->link_up = 0;
  1538. netif_carrier_off(bp->dev);
  1539. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1540. bnx2_report_link(bp);
  1541. }
  1542. bnx2_write_phy(bp, bp->mii_adv, adv);
  1543. bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
  1544. } else {
  1545. bnx2_resolve_flow_ctrl(bp);
  1546. bnx2_set_mac_link(bp);
  1547. }
  1548. return 0;
  1549. }
  1550. bnx2_test_and_enable_2g5(bp);
  1551. if (bp->advertising & ADVERTISED_1000baseT_Full)
  1552. new_adv |= ADVERTISE_1000XFULL;
  1553. new_adv |= bnx2_phy_get_pause_adv(bp);
  1554. bnx2_read_phy(bp, bp->mii_adv, &adv);
  1555. bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
  1556. bp->serdes_an_pending = 0;
  1557. if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
  1558. /* Force a link down visible on the other side */
  1559. if (bp->link_up) {
  1560. bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
  1561. spin_unlock_bh(&bp->phy_lock);
  1562. msleep(20);
  1563. spin_lock_bh(&bp->phy_lock);
  1564. }
  1565. bnx2_write_phy(bp, bp->mii_adv, new_adv);
  1566. bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
  1567. BMCR_ANENABLE);
  1568. /* Speed up link-up time when the link partner
  1569. * does not autonegotiate which is very common
  1570. * in blade servers. Some blade servers use
  1571. * IPMI for kerboard input and it's important
  1572. * to minimize link disruptions. Autoneg. involves
  1573. * exchanging base pages plus 3 next pages and
  1574. * normally completes in about 120 msec.
  1575. */
  1576. bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
  1577. bp->serdes_an_pending = 1;
  1578. mod_timer(&bp->timer, jiffies + bp->current_interval);
  1579. } else {
  1580. bnx2_resolve_flow_ctrl(bp);
  1581. bnx2_set_mac_link(bp);
  1582. }
  1583. return 0;
  1584. }
  1585. #define ETHTOOL_ALL_FIBRE_SPEED \
  1586. (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
  1587. (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
  1588. (ADVERTISED_1000baseT_Full)
  1589. #define ETHTOOL_ALL_COPPER_SPEED \
  1590. (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
  1591. ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
  1592. ADVERTISED_1000baseT_Full)
  1593. #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
  1594. ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
  1595. #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
  1596. static void
  1597. bnx2_set_default_remote_link(struct bnx2 *bp)
  1598. {
  1599. u32 link;
  1600. if (bp->phy_port == PORT_TP)
  1601. link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
  1602. else
  1603. link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
  1604. if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
  1605. bp->req_line_speed = 0;
  1606. bp->autoneg |= AUTONEG_SPEED;
  1607. bp->advertising = ADVERTISED_Autoneg;
  1608. if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
  1609. bp->advertising |= ADVERTISED_10baseT_Half;
  1610. if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
  1611. bp->advertising |= ADVERTISED_10baseT_Full;
  1612. if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
  1613. bp->advertising |= ADVERTISED_100baseT_Half;
  1614. if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
  1615. bp->advertising |= ADVERTISED_100baseT_Full;
  1616. if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
  1617. bp->advertising |= ADVERTISED_1000baseT_Full;
  1618. if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
  1619. bp->advertising |= ADVERTISED_2500baseX_Full;
  1620. } else {
  1621. bp->autoneg = 0;
  1622. bp->advertising = 0;
  1623. bp->req_duplex = DUPLEX_FULL;
  1624. if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
  1625. bp->req_line_speed = SPEED_10;
  1626. if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
  1627. bp->req_duplex = DUPLEX_HALF;
  1628. }
  1629. if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
  1630. bp->req_line_speed = SPEED_100;
  1631. if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
  1632. bp->req_duplex = DUPLEX_HALF;
  1633. }
  1634. if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
  1635. bp->req_line_speed = SPEED_1000;
  1636. if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
  1637. bp->req_line_speed = SPEED_2500;
  1638. }
  1639. }
  1640. static void
  1641. bnx2_set_default_link(struct bnx2 *bp)
  1642. {
  1643. if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
  1644. bnx2_set_default_remote_link(bp);
  1645. return;
  1646. }
  1647. bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
  1648. bp->req_line_speed = 0;
  1649. if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
  1650. u32 reg;
  1651. bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
  1652. reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
  1653. reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
  1654. if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
  1655. bp->autoneg = 0;
  1656. bp->req_line_speed = bp->line_speed = SPEED_1000;
  1657. bp->req_duplex = DUPLEX_FULL;
  1658. }
  1659. } else
  1660. bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
  1661. }
  1662. static void
  1663. bnx2_send_heart_beat(struct bnx2 *bp)
  1664. {
  1665. u32 msg;
  1666. u32 addr;
  1667. spin_lock(&bp->indirect_lock);
  1668. msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
  1669. addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
  1670. BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
  1671. BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
  1672. spin_unlock(&bp->indirect_lock);
  1673. }
  1674. static void
  1675. bnx2_remote_phy_event(struct bnx2 *bp)
  1676. {
  1677. u32 msg;
  1678. u8 link_up = bp->link_up;
  1679. u8 old_port;
  1680. msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
  1681. if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
  1682. bnx2_send_heart_beat(bp);
  1683. msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
  1684. if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
  1685. bp->link_up = 0;
  1686. else {
  1687. u32 speed;
  1688. bp->link_up = 1;
  1689. speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
  1690. bp->duplex = DUPLEX_FULL;
  1691. switch (speed) {
  1692. case BNX2_LINK_STATUS_10HALF:
  1693. bp->duplex = DUPLEX_HALF;
  1694. /* fall through */
  1695. case BNX2_LINK_STATUS_10FULL:
  1696. bp->line_speed = SPEED_10;
  1697. break;
  1698. case BNX2_LINK_STATUS_100HALF:
  1699. bp->duplex = DUPLEX_HALF;
  1700. /* fall through */
  1701. case BNX2_LINK_STATUS_100BASE_T4:
  1702. ca