/drivers/net/ethernet/qlogic/qlge/qlge_main.c

http://github.com/mirrors/linux · C · 5026 lines · 3842 code · 547 blank · 637 comment · 541 complexity · e21be1e547e074043d555e0c6e178a8b MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * QLogic qlge NIC HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. * See LICENSE.qlge for copyright and licensing details.
  5. * Author: Linux qlge network device driver by
  6. * Ron Mercer <ron.mercer@qlogic.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/bitops.h>
  10. #include <linux/types.h>
  11. #include <linux/module.h>
  12. #include <linux/list.h>
  13. #include <linux/pci.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/sched.h>
  17. #include <linux/slab.h>
  18. #include <linux/dmapool.h>
  19. #include <linux/mempool.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/kthread.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/errno.h>
  24. #include <linux/ioport.h>
  25. #include <linux/in.h>
  26. #include <linux/ip.h>
  27. #include <linux/ipv6.h>
  28. #include <net/ipv6.h>
  29. #include <linux/tcp.h>
  30. #include <linux/udp.h>
  31. #include <linux/if_arp.h>
  32. #include <linux/if_ether.h>
  33. #include <linux/netdevice.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/ethtool.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/skbuff.h>
  38. #include <linux/delay.h>
  39. #include <linux/mm.h>
  40. #include <linux/vmalloc.h>
  41. #include <linux/prefetch.h>
  42. #include <net/ip6_checksum.h>
  43. #include "qlge.h"
  44. char qlge_driver_name[] = DRV_NAME;
  45. const char qlge_driver_version[] = DRV_VERSION;
  46. MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  47. MODULE_DESCRIPTION(DRV_STRING " ");
  48. MODULE_LICENSE("GPL");
  49. MODULE_VERSION(DRV_VERSION);
  50. static const u32 default_msg =
  51. NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  52. /* NETIF_MSG_TIMER | */
  53. NETIF_MSG_IFDOWN |
  54. NETIF_MSG_IFUP |
  55. NETIF_MSG_RX_ERR |
  56. NETIF_MSG_TX_ERR |
  57. /* NETIF_MSG_TX_QUEUED | */
  58. /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
  59. /* NETIF_MSG_PKTDATA | */
  60. NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  61. static int debug = -1; /* defaults above */
  62. module_param(debug, int, 0664);
  63. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  64. #define MSIX_IRQ 0
  65. #define MSI_IRQ 1
  66. #define LEG_IRQ 2
  67. static int qlge_irq_type = MSIX_IRQ;
  68. module_param(qlge_irq_type, int, 0664);
  69. MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  70. static int qlge_mpi_coredump;
  71. module_param(qlge_mpi_coredump, int, 0);
  72. MODULE_PARM_DESC(qlge_mpi_coredump,
  73. "Option to enable MPI firmware dump. "
  74. "Default is OFF - Do Not allocate memory. ");
  75. static int qlge_force_coredump;
  76. module_param(qlge_force_coredump, int, 0);
  77. MODULE_PARM_DESC(qlge_force_coredump,
  78. "Option to allow force of firmware core dump. "
  79. "Default is OFF - Do not allow.");
  80. static const struct pci_device_id qlge_pci_tbl[] = {
  81. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
  82. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
  83. /* required last entry */
  84. {0,}
  85. };
  86. MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  87. static int ql_wol(struct ql_adapter *);
  88. static void qlge_set_multicast_list(struct net_device *);
  89. static int ql_adapter_down(struct ql_adapter *);
  90. static int ql_adapter_up(struct ql_adapter *);
  91. /* This hardware semaphore causes exclusive access to
  92. * resources shared between the NIC driver, MPI firmware,
  93. * FCOE firmware and the FC driver.
  94. */
  95. static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
  96. {
  97. u32 sem_bits = 0;
  98. switch (sem_mask) {
  99. case SEM_XGMAC0_MASK:
  100. sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
  101. break;
  102. case SEM_XGMAC1_MASK:
  103. sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
  104. break;
  105. case SEM_ICB_MASK:
  106. sem_bits = SEM_SET << SEM_ICB_SHIFT;
  107. break;
  108. case SEM_MAC_ADDR_MASK:
  109. sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
  110. break;
  111. case SEM_FLASH_MASK:
  112. sem_bits = SEM_SET << SEM_FLASH_SHIFT;
  113. break;
  114. case SEM_PROBE_MASK:
  115. sem_bits = SEM_SET << SEM_PROBE_SHIFT;
  116. break;
  117. case SEM_RT_IDX_MASK:
  118. sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
  119. break;
  120. case SEM_PROC_REG_MASK:
  121. sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
  122. break;
  123. default:
  124. netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
  125. return -EINVAL;
  126. }
  127. ql_write32(qdev, SEM, sem_bits | sem_mask);
  128. return !(ql_read32(qdev, SEM) & sem_bits);
  129. }
  130. int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
  131. {
  132. unsigned int wait_count = 30;
  133. do {
  134. if (!ql_sem_trylock(qdev, sem_mask))
  135. return 0;
  136. udelay(100);
  137. } while (--wait_count);
  138. return -ETIMEDOUT;
  139. }
  140. void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
  141. {
  142. ql_write32(qdev, SEM, sem_mask);
  143. ql_read32(qdev, SEM); /* flush */
  144. }
  145. /* This function waits for a specific bit to come ready
  146. * in a given register. It is used mostly by the initialize
  147. * process, but is also used in kernel thread API such as
  148. * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
  149. */
  150. int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
  151. {
  152. u32 temp;
  153. int count = UDELAY_COUNT;
  154. while (count) {
  155. temp = ql_read32(qdev, reg);
  156. /* check for errors */
  157. if (temp & err_bit) {
  158. netif_alert(qdev, probe, qdev->ndev,
  159. "register 0x%.08x access error, value = 0x%.08x!.\n",
  160. reg, temp);
  161. return -EIO;
  162. } else if (temp & bit)
  163. return 0;
  164. udelay(UDELAY_DELAY);
  165. count--;
  166. }
  167. netif_alert(qdev, probe, qdev->ndev,
  168. "Timed out waiting for reg %x to come ready.\n", reg);
  169. return -ETIMEDOUT;
  170. }
  171. /* The CFG register is used to download TX and RX control blocks
  172. * to the chip. This function waits for an operation to complete.
  173. */
  174. static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
  175. {
  176. int count = UDELAY_COUNT;
  177. u32 temp;
  178. while (count) {
  179. temp = ql_read32(qdev, CFG);
  180. if (temp & CFG_LE)
  181. return -EIO;
  182. if (!(temp & bit))
  183. return 0;
  184. udelay(UDELAY_DELAY);
  185. count--;
  186. }
  187. return -ETIMEDOUT;
  188. }
  189. /* Used to issue init control blocks to hw. Maps control block,
  190. * sets address, triggers download, waits for completion.
  191. */
  192. int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
  193. u16 q_id)
  194. {
  195. u64 map;
  196. int status = 0;
  197. int direction;
  198. u32 mask;
  199. u32 value;
  200. direction =
  201. (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
  202. PCI_DMA_FROMDEVICE;
  203. map = pci_map_single(qdev->pdev, ptr, size, direction);
  204. if (pci_dma_mapping_error(qdev->pdev, map)) {
  205. netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
  206. return -ENOMEM;
  207. }
  208. status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
  209. if (status)
  210. return status;
  211. status = ql_wait_cfg(qdev, bit);
  212. if (status) {
  213. netif_err(qdev, ifup, qdev->ndev,
  214. "Timed out waiting for CFG to come ready.\n");
  215. goto exit;
  216. }
  217. ql_write32(qdev, ICB_L, (u32) map);
  218. ql_write32(qdev, ICB_H, (u32) (map >> 32));
  219. mask = CFG_Q_MASK | (bit << 16);
  220. value = bit | (q_id << CFG_Q_SHIFT);
  221. ql_write32(qdev, CFG, (mask | value));
  222. /*
  223. * Wait for the bit to clear after signaling hw.
  224. */
  225. status = ql_wait_cfg(qdev, bit);
  226. exit:
  227. ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
  228. pci_unmap_single(qdev->pdev, map, size, direction);
  229. return status;
  230. }
  231. /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
  232. int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
  233. u32 *value)
  234. {
  235. u32 offset = 0;
  236. int status;
  237. switch (type) {
  238. case MAC_ADDR_TYPE_MULTI_MAC:
  239. case MAC_ADDR_TYPE_CAM_MAC:
  240. {
  241. status =
  242. ql_wait_reg_rdy(qdev,
  243. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  244. if (status)
  245. goto exit;
  246. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  247. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  248. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  249. status =
  250. ql_wait_reg_rdy(qdev,
  251. MAC_ADDR_IDX, MAC_ADDR_MR, 0);
  252. if (status)
  253. goto exit;
  254. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  255. status =
  256. ql_wait_reg_rdy(qdev,
  257. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  258. if (status)
  259. goto exit;
  260. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  261. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  262. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  263. status =
  264. ql_wait_reg_rdy(qdev,
  265. MAC_ADDR_IDX, MAC_ADDR_MR, 0);
  266. if (status)
  267. goto exit;
  268. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  269. if (type == MAC_ADDR_TYPE_CAM_MAC) {
  270. status =
  271. ql_wait_reg_rdy(qdev,
  272. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  273. if (status)
  274. goto exit;
  275. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  276. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  277. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  278. status =
  279. ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
  280. MAC_ADDR_MR, 0);
  281. if (status)
  282. goto exit;
  283. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  284. }
  285. break;
  286. }
  287. case MAC_ADDR_TYPE_VLAN:
  288. case MAC_ADDR_TYPE_MULTI_FLTR:
  289. default:
  290. netif_crit(qdev, ifup, qdev->ndev,
  291. "Address type %d not yet supported.\n", type);
  292. status = -EPERM;
  293. }
  294. exit:
  295. return status;
  296. }
  297. /* Set up a MAC, multicast or VLAN address for the
  298. * inbound frame matching.
  299. */
  300. static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
  301. u16 index)
  302. {
  303. u32 offset = 0;
  304. int status = 0;
  305. switch (type) {
  306. case MAC_ADDR_TYPE_MULTI_MAC:
  307. {
  308. u32 upper = (addr[0] << 8) | addr[1];
  309. u32 lower = (addr[2] << 24) | (addr[3] << 16) |
  310. (addr[4] << 8) | (addr[5]);
  311. status =
  312. ql_wait_reg_rdy(qdev,
  313. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  314. if (status)
  315. goto exit;
  316. ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
  317. (index << MAC_ADDR_IDX_SHIFT) |
  318. type | MAC_ADDR_E);
  319. ql_write32(qdev, MAC_ADDR_DATA, lower);
  320. status =
  321. ql_wait_reg_rdy(qdev,
  322. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  323. if (status)
  324. goto exit;
  325. ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
  326. (index << MAC_ADDR_IDX_SHIFT) |
  327. type | MAC_ADDR_E);
  328. ql_write32(qdev, MAC_ADDR_DATA, upper);
  329. status =
  330. ql_wait_reg_rdy(qdev,
  331. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  332. if (status)
  333. goto exit;
  334. break;
  335. }
  336. case MAC_ADDR_TYPE_CAM_MAC:
  337. {
  338. u32 cam_output;
  339. u32 upper = (addr[0] << 8) | addr[1];
  340. u32 lower =
  341. (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
  342. (addr[5]);
  343. status =
  344. ql_wait_reg_rdy(qdev,
  345. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  346. if (status)
  347. goto exit;
  348. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  349. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  350. type); /* type */
  351. ql_write32(qdev, MAC_ADDR_DATA, lower);
  352. status =
  353. ql_wait_reg_rdy(qdev,
  354. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  355. if (status)
  356. goto exit;
  357. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  358. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  359. type); /* type */
  360. ql_write32(qdev, MAC_ADDR_DATA, upper);
  361. status =
  362. ql_wait_reg_rdy(qdev,
  363. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  364. if (status)
  365. goto exit;
  366. ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
  367. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  368. type); /* type */
  369. /* This field should also include the queue id
  370. and possibly the function id. Right now we hardcode
  371. the route field to NIC core.
  372. */
  373. cam_output = (CAM_OUT_ROUTE_NIC |
  374. (qdev->
  375. func << CAM_OUT_FUNC_SHIFT) |
  376. (0 << CAM_OUT_CQ_ID_SHIFT));
  377. if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
  378. cam_output |= CAM_OUT_RV;
  379. /* route to NIC core */
  380. ql_write32(qdev, MAC_ADDR_DATA, cam_output);
  381. break;
  382. }
  383. case MAC_ADDR_TYPE_VLAN:
  384. {
  385. u32 enable_bit = *((u32 *) &addr[0]);
  386. /* For VLAN, the addr actually holds a bit that
  387. * either enables or disables the vlan id we are
  388. * addressing. It's either MAC_ADDR_E on or off.
  389. * That's bit-27 we're talking about.
  390. */
  391. status =
  392. ql_wait_reg_rdy(qdev,
  393. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  394. if (status)
  395. goto exit;
  396. ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
  397. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  398. type | /* type */
  399. enable_bit); /* enable/disable */
  400. break;
  401. }
  402. case MAC_ADDR_TYPE_MULTI_FLTR:
  403. default:
  404. netif_crit(qdev, ifup, qdev->ndev,
  405. "Address type %d not yet supported.\n", type);
  406. status = -EPERM;
  407. }
  408. exit:
  409. return status;
  410. }
  411. /* Set or clear MAC address in hardware. We sometimes
  412. * have to clear it to prevent wrong frame routing
  413. * especially in a bonding environment.
  414. */
  415. static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
  416. {
  417. int status;
  418. char zero_mac_addr[ETH_ALEN];
  419. char *addr;
  420. if (set) {
  421. addr = &qdev->current_mac_addr[0];
  422. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  423. "Set Mac addr %pM\n", addr);
  424. } else {
  425. eth_zero_addr(zero_mac_addr);
  426. addr = &zero_mac_addr[0];
  427. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  428. "Clearing MAC address\n");
  429. }
  430. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  431. if (status)
  432. return status;
  433. status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
  434. MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
  435. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  436. if (status)
  437. netif_err(qdev, ifup, qdev->ndev,
  438. "Failed to init mac address.\n");
  439. return status;
  440. }
  441. void ql_link_on(struct ql_adapter *qdev)
  442. {
  443. netif_err(qdev, link, qdev->ndev, "Link is up.\n");
  444. netif_carrier_on(qdev->ndev);
  445. ql_set_mac_addr(qdev, 1);
  446. }
  447. void ql_link_off(struct ql_adapter *qdev)
  448. {
  449. netif_err(qdev, link, qdev->ndev, "Link is down.\n");
  450. netif_carrier_off(qdev->ndev);
  451. ql_set_mac_addr(qdev, 0);
  452. }
  453. /* Get a specific frame routing value from the CAM.
  454. * Used for debug and reg dump.
  455. */
  456. int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
  457. {
  458. int status = 0;
  459. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
  460. if (status)
  461. goto exit;
  462. ql_write32(qdev, RT_IDX,
  463. RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
  464. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
  465. if (status)
  466. goto exit;
  467. *value = ql_read32(qdev, RT_DATA);
  468. exit:
  469. return status;
  470. }
  471. /* The NIC function for this chip has 16 routing indexes. Each one can be used
  472. * to route different frame types to various inbound queues. We send broadcast/
  473. * multicast/error frames to the default queue for slow handling,
  474. * and CAM hit/RSS frames to the fast handling queues.
  475. */
  476. static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
  477. int enable)
  478. {
  479. int status = -EINVAL; /* Return error if no mask match. */
  480. u32 value = 0;
  481. switch (mask) {
  482. case RT_IDX_CAM_HIT:
  483. {
  484. value = RT_IDX_DST_CAM_Q | /* dest */
  485. RT_IDX_TYPE_NICQ | /* type */
  486. (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
  487. break;
  488. }
  489. case RT_IDX_VALID: /* Promiscuous Mode frames. */
  490. {
  491. value = RT_IDX_DST_DFLT_Q | /* dest */
  492. RT_IDX_TYPE_NICQ | /* type */
  493. (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
  494. break;
  495. }
  496. case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
  497. {
  498. value = RT_IDX_DST_DFLT_Q | /* dest */
  499. RT_IDX_TYPE_NICQ | /* type */
  500. (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
  501. break;
  502. }
  503. case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
  504. {
  505. value = RT_IDX_DST_DFLT_Q | /* dest */
  506. RT_IDX_TYPE_NICQ | /* type */
  507. (RT_IDX_IP_CSUM_ERR_SLOT <<
  508. RT_IDX_IDX_SHIFT); /* index */
  509. break;
  510. }
  511. case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
  512. {
  513. value = RT_IDX_DST_DFLT_Q | /* dest */
  514. RT_IDX_TYPE_NICQ | /* type */
  515. (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
  516. RT_IDX_IDX_SHIFT); /* index */
  517. break;
  518. }
  519. case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
  520. {
  521. value = RT_IDX_DST_DFLT_Q | /* dest */
  522. RT_IDX_TYPE_NICQ | /* type */
  523. (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
  524. break;
  525. }
  526. case RT_IDX_MCAST: /* Pass up All Multicast frames. */
  527. {
  528. value = RT_IDX_DST_DFLT_Q | /* dest */
  529. RT_IDX_TYPE_NICQ | /* type */
  530. (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
  531. break;
  532. }
  533. case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
  534. {
  535. value = RT_IDX_DST_DFLT_Q | /* dest */
  536. RT_IDX_TYPE_NICQ | /* type */
  537. (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  538. break;
  539. }
  540. case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
  541. {
  542. value = RT_IDX_DST_RSS | /* dest */
  543. RT_IDX_TYPE_NICQ | /* type */
  544. (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  545. break;
  546. }
  547. case 0: /* Clear the E-bit on an entry. */
  548. {
  549. value = RT_IDX_DST_DFLT_Q | /* dest */
  550. RT_IDX_TYPE_NICQ | /* type */
  551. (index << RT_IDX_IDX_SHIFT);/* index */
  552. break;
  553. }
  554. default:
  555. netif_err(qdev, ifup, qdev->ndev,
  556. "Mask type %d not yet supported.\n", mask);
  557. status = -EPERM;
  558. goto exit;
  559. }
  560. if (value) {
  561. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
  562. if (status)
  563. goto exit;
  564. value |= (enable ? RT_IDX_E : 0);
  565. ql_write32(qdev, RT_IDX, value);
  566. ql_write32(qdev, RT_DATA, enable ? mask : 0);
  567. }
  568. exit:
  569. return status;
  570. }
  571. static void ql_enable_interrupts(struct ql_adapter *qdev)
  572. {
  573. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
  574. }
  575. static void ql_disable_interrupts(struct ql_adapter *qdev)
  576. {
  577. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
  578. }
  579. /* If we're running with multiple MSI-X vectors then we enable on the fly.
  580. * Otherwise, we may have multiple outstanding workers and don't want to
  581. * enable until the last one finishes. In this case, the irq_cnt gets
  582. * incremented every time we queue a worker and decremented every time
  583. * a worker finishes. Once it hits zero we enable the interrupt.
  584. */
  585. u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  586. {
  587. u32 var = 0;
  588. unsigned long hw_flags = 0;
  589. struct intr_context *ctx = qdev->intr_context + intr;
  590. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
  591. /* Always enable if we're MSIX multi interrupts and
  592. * it's not the default (zeroeth) interrupt.
  593. */
  594. ql_write32(qdev, INTR_EN,
  595. ctx->intr_en_mask);
  596. var = ql_read32(qdev, STS);
  597. return var;
  598. }
  599. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  600. if (atomic_dec_and_test(&ctx->irq_cnt)) {
  601. ql_write32(qdev, INTR_EN,
  602. ctx->intr_en_mask);
  603. var = ql_read32(qdev, STS);
  604. }
  605. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  606. return var;
  607. }
  608. static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  609. {
  610. u32 var = 0;
  611. struct intr_context *ctx;
  612. /* HW disables for us if we're MSIX multi interrupts and
  613. * it's not the default (zeroeth) interrupt.
  614. */
  615. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
  616. return 0;
  617. ctx = qdev->intr_context + intr;
  618. spin_lock(&qdev->hw_lock);
  619. if (!atomic_read(&ctx->irq_cnt)) {
  620. ql_write32(qdev, INTR_EN,
  621. ctx->intr_dis_mask);
  622. var = ql_read32(qdev, STS);
  623. }
  624. atomic_inc(&ctx->irq_cnt);
  625. spin_unlock(&qdev->hw_lock);
  626. return var;
  627. }
  628. static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
  629. {
  630. int i;
  631. for (i = 0; i < qdev->intr_count; i++) {
  632. /* The enable call does a atomic_dec_and_test
  633. * and enables only if the result is zero.
  634. * So we precharge it here.
  635. */
  636. if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
  637. i == 0))
  638. atomic_set(&qdev->intr_context[i].irq_cnt, 1);
  639. ql_enable_completion_interrupt(qdev, i);
  640. }
  641. }
  642. static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
  643. {
  644. int status, i;
  645. u16 csum = 0;
  646. __le16 *flash = (__le16 *)&qdev->flash;
  647. status = strncmp((char *)&qdev->flash, str, 4);
  648. if (status) {
  649. netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
  650. return status;
  651. }
  652. for (i = 0; i < size; i++)
  653. csum += le16_to_cpu(*flash++);
  654. if (csum)
  655. netif_err(qdev, ifup, qdev->ndev,
  656. "Invalid flash checksum, csum = 0x%.04x.\n", csum);
  657. return csum;
  658. }
  659. static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
  660. {
  661. int status = 0;
  662. /* wait for reg to come ready */
  663. status = ql_wait_reg_rdy(qdev,
  664. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  665. if (status)
  666. goto exit;
  667. /* set up for reg read */
  668. ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
  669. /* wait for reg to come ready */
  670. status = ql_wait_reg_rdy(qdev,
  671. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  672. if (status)
  673. goto exit;
  674. /* This data is stored on flash as an array of
  675. * __le32. Since ql_read32() returns cpu endian
  676. * we need to swap it back.
  677. */
  678. *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
  679. exit:
  680. return status;
  681. }
  682. static int ql_get_8000_flash_params(struct ql_adapter *qdev)
  683. {
  684. u32 i, size;
  685. int status;
  686. __le32 *p = (__le32 *)&qdev->flash;
  687. u32 offset;
  688. u8 mac_addr[6];
  689. /* Get flash offset for function and adjust
  690. * for dword access.
  691. */
  692. if (!qdev->port)
  693. offset = FUNC0_FLASH_OFFSET / sizeof(u32);
  694. else
  695. offset = FUNC1_FLASH_OFFSET / sizeof(u32);
  696. if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
  697. return -ETIMEDOUT;
  698. size = sizeof(struct flash_params_8000) / sizeof(u32);
  699. for (i = 0; i < size; i++, p++) {
  700. status = ql_read_flash_word(qdev, i+offset, p);
  701. if (status) {
  702. netif_err(qdev, ifup, qdev->ndev,
  703. "Error reading flash.\n");
  704. goto exit;
  705. }
  706. }
  707. status = ql_validate_flash(qdev,
  708. sizeof(struct flash_params_8000) / sizeof(u16),
  709. "8000");
  710. if (status) {
  711. netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
  712. status = -EINVAL;
  713. goto exit;
  714. }
  715. /* Extract either manufacturer or BOFM modified
  716. * MAC address.
  717. */
  718. if (qdev->flash.flash_params_8000.data_type1 == 2)
  719. memcpy(mac_addr,
  720. qdev->flash.flash_params_8000.mac_addr1,
  721. qdev->ndev->addr_len);
  722. else
  723. memcpy(mac_addr,
  724. qdev->flash.flash_params_8000.mac_addr,
  725. qdev->ndev->addr_len);
  726. if (!is_valid_ether_addr(mac_addr)) {
  727. netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
  728. status = -EINVAL;
  729. goto exit;
  730. }
  731. memcpy(qdev->ndev->dev_addr,
  732. mac_addr,
  733. qdev->ndev->addr_len);
  734. exit:
  735. ql_sem_unlock(qdev, SEM_FLASH_MASK);
  736. return status;
  737. }
  738. static int ql_get_8012_flash_params(struct ql_adapter *qdev)
  739. {
  740. int i;
  741. int status;
  742. __le32 *p = (__le32 *)&qdev->flash;
  743. u32 offset = 0;
  744. u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
  745. /* Second function's parameters follow the first
  746. * function's.
  747. */
  748. if (qdev->port)
  749. offset = size;
  750. if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
  751. return -ETIMEDOUT;
  752. for (i = 0; i < size; i++, p++) {
  753. status = ql_read_flash_word(qdev, i+offset, p);
  754. if (status) {
  755. netif_err(qdev, ifup, qdev->ndev,
  756. "Error reading flash.\n");
  757. goto exit;
  758. }
  759. }
  760. status = ql_validate_flash(qdev,
  761. sizeof(struct flash_params_8012) / sizeof(u16),
  762. "8012");
  763. if (status) {
  764. netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
  765. status = -EINVAL;
  766. goto exit;
  767. }
  768. if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
  769. status = -EINVAL;
  770. goto exit;
  771. }
  772. memcpy(qdev->ndev->dev_addr,
  773. qdev->flash.flash_params_8012.mac_addr,
  774. qdev->ndev->addr_len);
  775. exit:
  776. ql_sem_unlock(qdev, SEM_FLASH_MASK);
  777. return status;
  778. }
  779. /* xgmac register are located behind the xgmac_addr and xgmac_data
  780. * register pair. Each read/write requires us to wait for the ready
  781. * bit before reading/writing the data.
  782. */
  783. static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
  784. {
  785. int status;
  786. /* wait for reg to come ready */
  787. status = ql_wait_reg_rdy(qdev,
  788. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  789. if (status)
  790. return status;
  791. /* write the data to the data reg */
  792. ql_write32(qdev, XGMAC_DATA, data);
  793. /* trigger the write */
  794. ql_write32(qdev, XGMAC_ADDR, reg);
  795. return status;
  796. }
  797. /* xgmac register are located behind the xgmac_addr and xgmac_data
  798. * register pair. Each read/write requires us to wait for the ready
  799. * bit before reading/writing the data.
  800. */
  801. int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
  802. {
  803. int status = 0;
  804. /* wait for reg to come ready */
  805. status = ql_wait_reg_rdy(qdev,
  806. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  807. if (status)
  808. goto exit;
  809. /* set up for reg read */
  810. ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
  811. /* wait for reg to come ready */
  812. status = ql_wait_reg_rdy(qdev,
  813. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  814. if (status)
  815. goto exit;
  816. /* get the data */
  817. *data = ql_read32(qdev, XGMAC_DATA);
  818. exit:
  819. return status;
  820. }
  821. /* This is used for reading the 64-bit statistics regs. */
  822. int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
  823. {
  824. int status = 0;
  825. u32 hi = 0;
  826. u32 lo = 0;
  827. status = ql_read_xgmac_reg(qdev, reg, &lo);
  828. if (status)
  829. goto exit;
  830. status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
  831. if (status)
  832. goto exit;
  833. *data = (u64) lo | ((u64) hi << 32);
  834. exit:
  835. return status;
  836. }
  837. static int ql_8000_port_initialize(struct ql_adapter *qdev)
  838. {
  839. int status;
  840. /*
  841. * Get MPI firmware version for driver banner
  842. * and ethool info.
  843. */
  844. status = ql_mb_about_fw(qdev);
  845. if (status)
  846. goto exit;
  847. status = ql_mb_get_fw_state(qdev);
  848. if (status)
  849. goto exit;
  850. /* Wake up a worker to get/set the TX/RX frame sizes. */
  851. queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
  852. exit:
  853. return status;
  854. }
  855. /* Take the MAC Core out of reset.
  856. * Enable statistics counting.
  857. * Take the transmitter/receiver out of reset.
  858. * This functionality may be done in the MPI firmware at a
  859. * later date.
  860. */
  861. static int ql_8012_port_initialize(struct ql_adapter *qdev)
  862. {
  863. int status = 0;
  864. u32 data;
  865. if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
  866. /* Another function has the semaphore, so
  867. * wait for the port init bit to come ready.
  868. */
  869. netif_info(qdev, link, qdev->ndev,
  870. "Another function has the semaphore, so wait for the port init bit to come ready.\n");
  871. status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
  872. if (status) {
  873. netif_crit(qdev, link, qdev->ndev,
  874. "Port initialize timed out.\n");
  875. }
  876. return status;
  877. }
  878. netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
  879. /* Set the core reset. */
  880. status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
  881. if (status)
  882. goto end;
  883. data |= GLOBAL_CFG_RESET;
  884. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  885. if (status)
  886. goto end;
  887. /* Clear the core reset and turn on jumbo for receiver. */
  888. data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
  889. data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
  890. data |= GLOBAL_CFG_TX_STAT_EN;
  891. data |= GLOBAL_CFG_RX_STAT_EN;
  892. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  893. if (status)
  894. goto end;
  895. /* Enable transmitter, and clear it's reset. */
  896. status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
  897. if (status)
  898. goto end;
  899. data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
  900. data |= TX_CFG_EN; /* Enable the transmitter. */
  901. status = ql_write_xgmac_reg(qdev, TX_CFG, data);
  902. if (status)
  903. goto end;
  904. /* Enable receiver and clear it's reset. */
  905. status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
  906. if (status)
  907. goto end;
  908. data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
  909. data |= RX_CFG_EN; /* Enable the receiver. */
  910. status = ql_write_xgmac_reg(qdev, RX_CFG, data);
  911. if (status)
  912. goto end;
  913. /* Turn on jumbo. */
  914. status =
  915. ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
  916. if (status)
  917. goto end;
  918. status =
  919. ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
  920. if (status)
  921. goto end;
  922. /* Signal to the world that the port is enabled. */
  923. ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
  924. end:
  925. ql_sem_unlock(qdev, qdev->xg_sem_mask);
  926. return status;
  927. }
  928. static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
  929. {
  930. return PAGE_SIZE << qdev->lbq_buf_order;
  931. }
  932. /* Get the next large buffer. */
  933. static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
  934. {
  935. struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
  936. rx_ring->lbq_curr_idx++;
  937. if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
  938. rx_ring->lbq_curr_idx = 0;
  939. rx_ring->lbq_free_cnt++;
  940. return lbq_desc;
  941. }
  942. static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
  943. struct rx_ring *rx_ring)
  944. {
  945. struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
  946. pci_dma_sync_single_for_cpu(qdev->pdev,
  947. dma_unmap_addr(lbq_desc, mapaddr),
  948. rx_ring->lbq_buf_size,
  949. PCI_DMA_FROMDEVICE);
  950. /* If it's the last chunk of our master page then
  951. * we unmap it.
  952. */
  953. if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
  954. == ql_lbq_block_size(qdev))
  955. pci_unmap_page(qdev->pdev,
  956. lbq_desc->p.pg_chunk.map,
  957. ql_lbq_block_size(qdev),
  958. PCI_DMA_FROMDEVICE);
  959. return lbq_desc;
  960. }
  961. /* Get the next small buffer. */
  962. static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
  963. {
  964. struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
  965. rx_ring->sbq_curr_idx++;
  966. if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
  967. rx_ring->sbq_curr_idx = 0;
  968. rx_ring->sbq_free_cnt++;
  969. return sbq_desc;
  970. }
  971. /* Update an rx ring index. */
  972. static void ql_update_cq(struct rx_ring *rx_ring)
  973. {
  974. rx_ring->cnsmr_idx++;
  975. rx_ring->curr_entry++;
  976. if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
  977. rx_ring->cnsmr_idx = 0;
  978. rx_ring->curr_entry = rx_ring->cq_base;
  979. }
  980. }
  981. static void ql_write_cq_idx(struct rx_ring *rx_ring)
  982. {
  983. ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
  984. }
  985. static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
  986. struct bq_desc *lbq_desc)
  987. {
  988. if (!rx_ring->pg_chunk.page) {
  989. u64 map;
  990. rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
  991. GFP_ATOMIC,
  992. qdev->lbq_buf_order);
  993. if (unlikely(!rx_ring->pg_chunk.page)) {
  994. netif_err(qdev, drv, qdev->ndev,
  995. "page allocation failed.\n");
  996. return -ENOMEM;
  997. }
  998. rx_ring->pg_chunk.offset = 0;
  999. map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
  1000. 0, ql_lbq_block_size(qdev),
  1001. PCI_DMA_FROMDEVICE);
  1002. if (pci_dma_mapping_error(qdev->pdev, map)) {
  1003. __free_pages(rx_ring->pg_chunk.page,
  1004. qdev->lbq_buf_order);
  1005. rx_ring->pg_chunk.page = NULL;
  1006. netif_err(qdev, drv, qdev->ndev,
  1007. "PCI mapping failed.\n");
  1008. return -ENOMEM;
  1009. }
  1010. rx_ring->pg_chunk.map = map;
  1011. rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
  1012. }
  1013. /* Copy the current master pg_chunk info
  1014. * to the current descriptor.
  1015. */
  1016. lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
  1017. /* Adjust the master page chunk for next
  1018. * buffer get.
  1019. */
  1020. rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
  1021. if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
  1022. rx_ring->pg_chunk.page = NULL;
  1023. lbq_desc->p.pg_chunk.last_flag = 1;
  1024. } else {
  1025. rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
  1026. get_page(rx_ring->pg_chunk.page);
  1027. lbq_desc->p.pg_chunk.last_flag = 0;
  1028. }
  1029. return 0;
  1030. }
  1031. /* Process (refill) a large buffer queue. */
  1032. static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1033. {
  1034. u32 clean_idx = rx_ring->lbq_clean_idx;
  1035. u32 start_idx = clean_idx;
  1036. struct bq_desc *lbq_desc;
  1037. u64 map;
  1038. int i;
  1039. while (rx_ring->lbq_free_cnt > 32) {
  1040. for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
  1041. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1042. "lbq: try cleaning clean_idx = %d.\n",
  1043. clean_idx);
  1044. lbq_desc = &rx_ring->lbq[clean_idx];
  1045. if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
  1046. rx_ring->lbq_clean_idx = clean_idx;
  1047. netif_err(qdev, ifup, qdev->ndev,
  1048. "Could not get a page chunk, i=%d, clean_idx =%d .\n",
  1049. i, clean_idx);
  1050. return;
  1051. }
  1052. map = lbq_desc->p.pg_chunk.map +
  1053. lbq_desc->p.pg_chunk.offset;
  1054. dma_unmap_addr_set(lbq_desc, mapaddr, map);
  1055. dma_unmap_len_set(lbq_desc, maplen,
  1056. rx_ring->lbq_buf_size);
  1057. *lbq_desc->addr = cpu_to_le64(map);
  1058. pci_dma_sync_single_for_device(qdev->pdev, map,
  1059. rx_ring->lbq_buf_size,
  1060. PCI_DMA_FROMDEVICE);
  1061. clean_idx++;
  1062. if (clean_idx == rx_ring->lbq_len)
  1063. clean_idx = 0;
  1064. }
  1065. rx_ring->lbq_clean_idx = clean_idx;
  1066. rx_ring->lbq_prod_idx += 16;
  1067. if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
  1068. rx_ring->lbq_prod_idx = 0;
  1069. rx_ring->lbq_free_cnt -= 16;
  1070. }
  1071. if (start_idx != clean_idx) {
  1072. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1073. "lbq: updating prod idx = %d.\n",
  1074. rx_ring->lbq_prod_idx);
  1075. ql_write_db_reg(rx_ring->lbq_prod_idx,
  1076. rx_ring->lbq_prod_idx_db_reg);
  1077. }
  1078. }
  1079. /* Process (refill) a small buffer queue. */
  1080. static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1081. {
  1082. u32 clean_idx = rx_ring->sbq_clean_idx;
  1083. u32 start_idx = clean_idx;
  1084. struct bq_desc *sbq_desc;
  1085. u64 map;
  1086. int i;
  1087. while (rx_ring->sbq_free_cnt > 16) {
  1088. for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
  1089. sbq_desc = &rx_ring->sbq[clean_idx];
  1090. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1091. "sbq: try cleaning clean_idx = %d.\n",
  1092. clean_idx);
  1093. if (sbq_desc->p.skb == NULL) {
  1094. netif_printk(qdev, rx_status, KERN_DEBUG,
  1095. qdev->ndev,
  1096. "sbq: getting new skb for index %d.\n",
  1097. sbq_desc->index);
  1098. sbq_desc->p.skb =
  1099. netdev_alloc_skb(qdev->ndev,
  1100. SMALL_BUFFER_SIZE);
  1101. if (sbq_desc->p.skb == NULL) {
  1102. rx_ring->sbq_clean_idx = clean_idx;
  1103. return;
  1104. }
  1105. skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
  1106. map = pci_map_single(qdev->pdev,
  1107. sbq_desc->p.skb->data,
  1108. rx_ring->sbq_buf_size,
  1109. PCI_DMA_FROMDEVICE);
  1110. if (pci_dma_mapping_error(qdev->pdev, map)) {
  1111. netif_err(qdev, ifup, qdev->ndev,
  1112. "PCI mapping failed.\n");
  1113. rx_ring->sbq_clean_idx = clean_idx;
  1114. dev_kfree_skb_any(sbq_desc->p.skb);
  1115. sbq_desc->p.skb = NULL;
  1116. return;
  1117. }
  1118. dma_unmap_addr_set(sbq_desc, mapaddr, map);
  1119. dma_unmap_len_set(sbq_desc, maplen,
  1120. rx_ring->sbq_buf_size);
  1121. *sbq_desc->addr = cpu_to_le64(map);
  1122. }
  1123. clean_idx++;
  1124. if (clean_idx == rx_ring->sbq_len)
  1125. clean_idx = 0;
  1126. }
  1127. rx_ring->sbq_clean_idx = clean_idx;
  1128. rx_ring->sbq_prod_idx += 16;
  1129. if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
  1130. rx_ring->sbq_prod_idx = 0;
  1131. rx_ring->sbq_free_cnt -= 16;
  1132. }
  1133. if (start_idx != clean_idx) {
  1134. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1135. "sbq: updating prod idx = %d.\n",
  1136. rx_ring->sbq_prod_idx);
  1137. ql_write_db_reg(rx_ring->sbq_prod_idx,
  1138. rx_ring->sbq_prod_idx_db_reg);
  1139. }
  1140. }
  1141. static void ql_update_buffer_queues(struct ql_adapter *qdev,
  1142. struct rx_ring *rx_ring)
  1143. {
  1144. ql_update_sbq(qdev, rx_ring);
  1145. ql_update_lbq(qdev, rx_ring);
  1146. }
  1147. /* Unmaps tx buffers. Can be called from send() if a pci mapping
  1148. * fails at some stage, or from the interrupt when a tx completes.
  1149. */
  1150. static void ql_unmap_send(struct ql_adapter *qdev,
  1151. struct tx_ring_desc *tx_ring_desc, int mapped)
  1152. {
  1153. int i;
  1154. for (i = 0; i < mapped; i++) {
  1155. if (i == 0 || (i == 7 && mapped > 7)) {
  1156. /*
  1157. * Unmap the skb->data area, or the
  1158. * external sglist (AKA the Outbound
  1159. * Address List (OAL)).
  1160. * If its the zeroeth element, then it's
  1161. * the skb->data area. If it's the 7th
  1162. * element and there is more than 6 frags,
  1163. * then its an OAL.
  1164. */
  1165. if (i == 7) {
  1166. netif_printk(qdev, tx_done, KERN_DEBUG,
  1167. qdev->ndev,
  1168. "unmapping OAL area.\n");
  1169. }
  1170. pci_unmap_single(qdev->pdev,
  1171. dma_unmap_addr(&tx_ring_desc->map[i],
  1172. mapaddr),
  1173. dma_unmap_len(&tx_ring_desc->map[i],
  1174. maplen),
  1175. PCI_DMA_TODEVICE);
  1176. } else {
  1177. netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
  1178. "unmapping frag %d.\n", i);
  1179. pci_unmap_page(qdev->pdev,
  1180. dma_unmap_addr(&tx_ring_desc->map[i],
  1181. mapaddr),
  1182. dma_unmap_len(&tx_ring_desc->map[i],
  1183. maplen), PCI_DMA_TODEVICE);
  1184. }
  1185. }
  1186. }
  1187. /* Map the buffers for this transmit. This will return
  1188. * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  1189. */
  1190. static int ql_map_send(struct ql_adapter *qdev,
  1191. struct ob_mac_iocb_req *mac_iocb_ptr,
  1192. struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
  1193. {
  1194. int len = skb_headlen(skb);
  1195. dma_addr_t map;
  1196. int frag_idx, err, map_idx = 0;
  1197. struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
  1198. int frag_cnt = skb_shinfo(skb)->nr_frags;
  1199. if (frag_cnt) {
  1200. netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
  1201. "frag_cnt = %d.\n", frag_cnt);
  1202. }
  1203. /*
  1204. * Map the skb buffer first.
  1205. */
  1206. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  1207. err = pci_dma_mapping_error(qdev->pdev, map);
  1208. if (err) {
  1209. netif_err(qdev, tx_queued, qdev->ndev,
  1210. "PCI mapping failed with error: %d\n", err);
  1211. return NETDEV_TX_BUSY;
  1212. }
  1213. tbd->len = cpu_to_le32(len);
  1214. tbd->addr = cpu_to_le64(map);
  1215. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  1216. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
  1217. map_idx++;
  1218. /*
  1219. * This loop fills the remainder of the 8 address descriptors
  1220. * in the IOCB. If there are more than 7 fragments, then the
  1221. * eighth address desc will point to an external list (OAL).
  1222. * When this happens, the remainder of the frags will be stored
  1223. * in this list.
  1224. */
  1225. for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
  1226. skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
  1227. tbd++;
  1228. if (frag_idx == 6 && frag_cnt > 7) {
  1229. /* Let's tack on an sglist.
  1230. * Our control block will now
  1231. * look like this:
  1232. * iocb->seg[0] = skb->data
  1233. * iocb->seg[1] = frag[0]
  1234. * iocb->seg[2] = frag[1]
  1235. * iocb->seg[3] = frag[2]
  1236. * iocb->seg[4] = frag[3]
  1237. * iocb->seg[5] = frag[4]
  1238. * iocb->seg[6] = frag[5]
  1239. * iocb->seg[7] = ptr to OAL (external sglist)
  1240. * oal->seg[0] = frag[6]
  1241. * oal->seg[1] = frag[7]
  1242. * oal->seg[2] = frag[8]
  1243. * oal->seg[3] = frag[9]
  1244. * oal->seg[4] = frag[10]
  1245. * etc...
  1246. */
  1247. /* Tack on the OAL in the eighth segment of IOCB. */
  1248. map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
  1249. sizeof(struct oal),
  1250. PCI_DMA_TODEVICE);
  1251. err = pci_dma_mapping_error(qdev->pdev, map);
  1252. if (err) {
  1253. netif_err(qdev, tx_queued, qdev->ndev,
  1254. "PCI mapping outbound address list with error: %d\n",
  1255. err);
  1256. goto map_error;
  1257. }
  1258. tbd->addr = cpu_to_le64(map);
  1259. /*
  1260. * The length is the number of fragments
  1261. * that remain to be mapped times the length
  1262. * of our sglist (OAL).
  1263. */
  1264. tbd->len =
  1265. cpu_to_le32((sizeof(struct tx_buf_desc) *
  1266. (frag_cnt - frag_idx)) | TX_DESC_C);
  1267. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
  1268. map);
  1269. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1270. sizeof(struct oal));
  1271. tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
  1272. map_idx++;
  1273. }
  1274. map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
  1275. DMA_TO_DEVICE);
  1276. err = dma_mapping_error(&qdev->pdev->dev, map);
  1277. if (err) {
  1278. netif_err(qdev, tx_queued, qdev->ndev,
  1279. "PCI mapping frags failed with error: %d.\n",
  1280. err);
  1281. goto map_error;
  1282. }
  1283. tbd->addr = cpu_to_le64(map);
  1284. tbd->len = cpu_to_le32(skb_frag_size(frag));
  1285. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  1286. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1287. skb_frag_size(frag));
  1288. }
  1289. /* Save the number of segments we've mapped. */
  1290. tx_ring_desc->map_cnt = map_idx;
  1291. /* Terminate the last segment. */
  1292. tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
  1293. return NETDEV_TX_OK;
  1294. map_error:
  1295. /*
  1296. * If the first frag mapping failed, then i will be zero.
  1297. * This causes the unmap of the skb->data area. Otherwise
  1298. * we pass in the number of frags that mapped successfully
  1299. * so they can be umapped.
  1300. */
  1301. ql_unmap_send(qdev, tx_ring_desc, map_idx);
  1302. return NETDEV_TX_BUSY;
  1303. }
  1304. /* Categorizing receive firmware frame errors */
  1305. static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
  1306. struct rx_ring *rx_ring)
  1307. {
  1308. struct nic_stats *stats = &qdev->nic_stats;
  1309. stats->rx_err_count++;
  1310. rx_ring->rx_errors++;
  1311. switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
  1312. case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
  1313. stats->rx_code_err++;
  1314. break;
  1315. case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
  1316. stats->rx_oversize_err++;
  1317. break;
  1318. case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
  1319. stats->rx_undersize_err++;
  1320. break;
  1321. case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
  1322. stats->rx_preamble_err++;
  1323. break;
  1324. case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
  1325. stats->rx_frame_len_err++;
  1326. break;
  1327. case IB_MAC_IOCB_RSP_ERR_CRC:
  1328. stats->rx_crc_err++;
  1329. default:
  1330. break;
  1331. }
  1332. }
  1333. /**
  1334. * ql_update_mac_hdr_len - helper routine to update the mac header length
  1335. * based on vlan tags if present
  1336. */
  1337. static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
  1338. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1339. void *page, size_t *len)
  1340. {
  1341. u16 *tags;
  1342. if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
  1343. return;
  1344. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
  1345. tags = (u16 *)page;
  1346. /* Look for stacked vlan tags in ethertype field */
  1347. if (tags[6] == ETH_P_8021Q &&
  1348. tags[8] == ETH_P_8021Q)
  1349. *len += 2 * VLAN_HLEN;
  1350. else
  1351. *len += VLAN_HLEN;
  1352. }
  1353. }
  1354. /* Process an inbound completion from an rx ring. */
  1355. static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
  1356. struct rx_ring *rx_ring,
  1357. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1358. u32 length,
  1359. u16 vlan_id)
  1360. {
  1361. struct sk_buff *skb;
  1362. struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1363. struct napi_struct *napi = &rx_ring->napi;
  1364. /* Frame error, so drop the packet. */
  1365. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1366. ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
  1367. put_page(lbq_desc->p.pg_chunk.page);
  1368. return;
  1369. }
  1370. napi->dev = qdev->ndev;
  1371. skb = napi_get_frags(napi);
  1372. if (!skb) {
  1373. netif_err(qdev, drv, qdev->ndev,
  1374. "Couldn't get an skb, exiting.\n");
  1375. rx_ring->rx_dropped++;
  1376. put_page(lbq_desc->p.pg_chunk.page);
  1377. return;
  1378. }
  1379. prefetch(lbq_desc->p.pg_chunk.va);
  1380. __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  1381. lbq_desc->p.pg_chunk.page,
  1382. lbq_desc->p.pg_chunk.offset,
  1383. length);
  1384. skb->len += length;
  1385. skb->data_len += length;
  1386. skb->truesize += length;
  1387. skb_shinfo(skb)->nr_frags++;
  1388. rx_ring->rx_packets++;
  1389. rx_ring->rx_bytes += length;
  1390. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1391. skb_record_rx_queue(skb, rx_ring->cq_id);
  1392. if (vlan_id != 0xffff)
  1393. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  1394. napi_gro_frags(napi);
  1395. }
  1396. /* Process an inbound completion from an rx ring. */
  1397. static void ql_process_mac_rx_page(struct ql_adapter *qdev,
  1398. struct rx_ring *rx_ring,
  1399. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1400. u32 length,
  1401. u16 vlan_id)
  1402. {
  1403. struct net_device *ndev = qdev->ndev;
  1404. struct sk_buff *skb = NULL;
  1405. void *addr;
  1406. struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1407. struct napi_struct *napi = &rx_ring->napi;
  1408. size_t hlen = ETH_HLEN;
  1409. skb = netdev_alloc_skb(ndev, length);
  1410. if (!skb) {
  1411. rx_ring->rx_dropped++;
  1412. put_page(lbq_desc->p.pg_chunk.page);
  1413. return;
  1414. }
  1415. addr = lbq_desc->p.pg_chunk.va;
  1416. prefetch(addr);
  1417. /* Frame error, so drop the packet. */
  1418. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1419. ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
  1420. goto err_out;
  1421. }
  1422. /* Update the MAC header length*/
  1423. ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
  1424. /* The max framesize filter on this chip is set higher than
  1425. * MTU since FCoE uses 2k frames.
  1426. */
  1427. if (skb->len > ndev->mtu + hlen) {
  1428. netif_err(qdev, drv, qdev->ndev,
  1429. "Segment too small, dropping.\n");
  1430. rx_ring->rx_dropped++;
  1431. goto err_out;
  1432. }
  1433. memcpy(skb_put(skb, hlen), addr, hlen);
  1434. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1435. "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
  1436. length);
  1437. skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
  1438. lbq_desc->p.pg_chunk.offset + hlen,
  1439. length - hlen);
  1440. skb->len += length - hlen;
  1441. skb->data_len += length - hlen;
  1442. skb->truesize += length - hlen;
  1443. rx_ring->rx_packets++;
  1444. rx_ring->rx_bytes += skb->len;
  1445. skb->protocol = eth_type_trans(skb, ndev);
  1446. skb_checksum_none_assert(skb);
  1447. if ((ndev->features & NETIF_F_RXCSUM) &&
  1448. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
  1449. /* TCP frame. */
  1450. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
  1451. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1452. "TCP checksum done!\n");
  1453. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1454. } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1455. (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
  1456. /* Unfragmented ipv4 UDP frame. */
  1457. struct iphdr *iph =
  1458. (struct iphdr *)((u8 *)addr + hlen);
  1459. if (!(iph->frag_off &
  1460. htons(IP_MF|IP_OFFSET))) {
  1461. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1462. netif_printk(qdev, rx_status, KERN_DEBUG,
  1463. qdev->ndev,
  1464. "UDP checksum done!\n");
  1465. }
  1466. }
  1467. }
  1468. skb_record_rx_queue(skb, rx_ring->cq_id);
  1469. if (vlan_id != 0xffff)
  1470. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  1471. if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  1472. napi_gro_receive(napi, skb);
  1473. else
  1474. netif_receive_skb(skb);
  1475. return;
  1476. err_out:
  1477. dev_kfree_skb_any(skb);
  1478. put_page(lbq_desc->p.pg_chunk.page);
  1479. }
  1480. /* Process an inbound completion from an rx ring. */
  1481. static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
  1482. struct rx_ring *rx_ring,
  1483. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1484. u32 length,
  1485. u16 vlan_id)
  1486. {
  1487. struct net_device *ndev = qdev->ndev;
  1488. struct sk_buff *skb = NULL;
  1489. struct sk_buff *new_skb = NULL;
  1490. struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
  1491. skb = sbq_desc->p.skb;
  1492. /* Allocate new_skb and copy */
  1493. new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
  1494. if (new_skb == NULL) {
  1495. rx_ring->rx_dropped++;
  1496. return;
  1497. }
  1498. skb_reserve(new_skb, NET_IP_ALIGN);
  1499. pci_dma_sync_single_for_cpu(qdev->pdev,
  1500. dma_unmap_addr(sbq_desc, mapaddr),
  1501. dma_unmap_len(sbq_desc, maplen),
  1502. PCI_DMA_FROMDEVICE);
  1503. memcpy(skb_put(new_skb, length), skb->data, length);
  1504. pci_dma_sync_single_for_device(qdev->pdev,
  1505. dma_unmap_addr(sbq_desc, mapaddr),
  1506. dma_unmap_len(sbq_desc, maplen),
  1507. PCI_DMA_FROMDEVICE);
  1508. skb = new_skb;
  1509. /* Frame error, so drop the packet. */
  1510. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1511. ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
  1512. dev_kfree_skb_any(skb);
  1513. return;
  1514. }
  1515. /* loopback self test for ethtool */
  1516. if (test_bit(QL_SELFTEST, &qdev->flags)) {
  1517. ql_check_lb_frame(qdev, skb);
  1518. dev_kfree_skb_any(skb);
  1519. return;
  1520. }
  1521. /* The max framesize filter on this chip is set higher than
  1522. * MTU since FCoE uses 2k frames.
  1523. */
  1524. if (skb->len > ndev->mtu + ETH_HLEN) {
  1525. dev_kfree_skb_any(skb);
  1526. rx_ring->rx_dropped++;
  1527. return;
  1528. }
  1529. prefetch(skb->data);
  1530. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
  1531. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1532. "%s Multicast.\n",
  1533. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1534. IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
  1535. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1536. IB_MAC_IOCB_RSP_M_REG ? "Registered" :
  1537. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1538. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1539. }
  1540. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
  1541. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1542. "Promiscuous Packet.\n");
  1543. rx_ring->rx_packets++;
  1544. rx_ring->rx_bytes += skb->len;
  1545. skb->protocol = eth_type_trans(skb, ndev);
  1546. skb_checksum_none_assert(skb);
  1547. /* If rx checksum is on, and there are no
  1548. * csum or frame errors.
  1549. */
  1550. if ((ndev->features & NETIF_F_RXCSUM) &&
  1551. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
  1552. /* TCP frame. */
  1553. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
  1554. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1555. "TCP checksum done!\n");
  1556. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1557. } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1558. (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
  1559. /* Unfragmented ipv4 UDP frame. */
  1560. struct iphdr *iph = (struct iphdr *) skb->data;
  1561. if (!(iph->frag_off &
  1562. htons(IP_MF|IP_OFFSET))) {
  1563. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1564. netif_printk(qdev, rx_status, KERN_DEBUG,
  1565. qdev->ndev,
  1566. "UDP checksum done!\n");
  1567. }
  1568. }
  1569. }
  1570. skb_record_rx_queue(skb, rx_ring->cq_id);
  1571. if (vlan_id != 0xffff)
  1572. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  1573. if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  1574. napi_gro_receive(&rx_ring->napi, skb);
  1575. else
  1576. netif_receive_skb(skb);
  1577. }
  1578. static void ql_realign_skb(struct sk_buff *skb, int len)
  1579. {
  1580. void *temp_addr = skb->data;
  1581. /* Undo the skb_reserve(skb,32) we did before
  1582. * giving to hardware, and realign data on
  1583. * a 2-byte boundary.
  1584. */
  1585. skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
  1586. skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
  1587. skb_copy_to_linear_data(skb, temp_addr,
  1588. (unsigned int)len);
  1589. }
  1590. /*
  1591. * This function builds an skb for the given inbound
  1592. * completion. It will be rewritten for readability in the near
  1593. * future, but for not it works well.
  1594. */
  1595. static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
  1596. struct rx_ring *rx_ring,
  1597. struct ib_mac_iocb_rsp *ib_mac_rsp)
  1598. {
  1599. struct bq_desc *lbq_desc;
  1600. struct bq_desc *sbq_desc;
  1601. struct sk_buff *skb = NULL;
  1602. u32 length = le32_to_cpu(ib_mac_rsp->data_len);
  1603. u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
  1604. size_t hlen = ETH_HLEN;
  1605. /*
  1606. * Handle the header buffer if present.
  1607. */
  1608. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
  1609. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1610. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1611. "Header of %d bytes in small buffer.\n", hdr_len);
  1612. /*
  1613. * Headers fit nicely into a small buffer.
  1614. */
  1615. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1616. pci_unmap_single(qdev->pdev,
  1617. dma_unmap_addr(sbq_desc, mapaddr),
  1618. dma_unmap_len(sbq_desc, maplen),
  1619. PCI_DMA_FROMDEVICE);
  1620. skb = sbq_desc->p.skb;
  1621. ql_realign_skb(skb, hdr_len);
  1622. skb_put(skb, hdr_len);
  1623. sbq_desc->p.skb = NULL;
  1624. }
  1625. /*
  1626. * Handle the data buffer(s).
  1627. */
  1628. if (unlikely(!length)) { /* Is there data too? */
  1629. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1630. "No Data buffer in this packet.\n");
  1631. return skb;
  1632. }
  1633. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
  1634. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1635. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1636. "Headers in small, data of %d bytes in small, combine them.\n",
  1637. length);
  1638. /*
  1639. * Data is less than small buffer size so it's
  1640. * stuffed in a small buffer.
  1641. * For this case we append the data
  1642. * from the "data" small buffer to the "header" small
  1643. * buffer.
  1644. */
  1645. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1646. pci_dma_sync_single_for_cpu(qdev->pdev,
  1647. dma_unmap_addr
  1648. (sbq_desc, mapaddr),
  1649. dma_unmap_len
  1650. (sbq_desc, maplen),
  1651. PCI_DMA_FROMDEVICE);
  1652. memcpy(skb_put(skb, length),
  1653. sbq_desc->p.skb->data, length);
  1654. pci_dma_sync_single_for_device(qdev->pdev,
  1655. dma_unmap_addr
  1656. (sbq_desc,
  1657. mapaddr),