/drivers/net/ethernet/brocade/bna/bnad.c

http://github.com/mirrors/linux · C · 3870 lines · 2878 code · 662 blank · 330 comment · 405 complexity · 931867b9dc29ed3d6af87762bdebce51 MD5 · raw file

Large files are truncated click here to view the full file

  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Linux network driver for QLogic BR-series Converged Network Adapter.
  4. */
  5. /*
  6. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  7. * Copyright (c) 2014-2015 QLogic Corporation
  8. * All rights reserved
  9. * www.qlogic.com
  10. */
  11. #include <linux/bitops.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/in.h>
  16. #include <linux/ethtool.h>
  17. #include <linux/if_vlan.h>
  18. #include <linux/if_ether.h>
  19. #include <linux/ip.h>
  20. #include <linux/prefetch.h>
  21. #include <linux/module.h>
  22. #include "bnad.h"
  23. #include "bna.h"
  24. #include "cna.h"
  25. static DEFINE_MUTEX(bnad_fwimg_mutex);
  26. /*
  27. * Module params
  28. */
  29. static uint bnad_msix_disable;
  30. module_param(bnad_msix_disable, uint, 0444);
  31. MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
  32. static uint bnad_ioc_auto_recover = 1;
  33. module_param(bnad_ioc_auto_recover, uint, 0444);
  34. MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
  35. static uint bna_debugfs_enable = 1;
  36. module_param(bna_debugfs_enable, uint, 0644);
  37. MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
  38. " Range[false:0|true:1]");
  39. /*
  40. * Global variables
  41. */
  42. static u32 bnad_rxqs_per_cq = 2;
  43. static atomic_t bna_id;
  44. static const u8 bnad_bcast_addr[] __aligned(2) =
  45. { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  46. /*
  47. * Local MACROS
  48. */
  49. #define BNAD_GET_MBOX_IRQ(_bnad) \
  50. (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
  51. ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
  52. ((_bnad)->pcidev->irq))
  53. #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
  54. do { \
  55. (_res_info)->res_type = BNA_RES_T_MEM; \
  56. (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
  57. (_res_info)->res_u.mem_info.num = (_num); \
  58. (_res_info)->res_u.mem_info.len = (_size); \
  59. } while (0)
  60. /*
  61. * Reinitialize completions in CQ, once Rx is taken down
  62. */
  63. static void
  64. bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
  65. {
  66. struct bna_cq_entry *cmpl;
  67. int i;
  68. for (i = 0; i < ccb->q_depth; i++) {
  69. cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
  70. cmpl->valid = 0;
  71. }
  72. }
  73. /* Tx Datapath functions */
  74. /* Caller should ensure that the entry at unmap_q[index] is valid */
  75. static u32
  76. bnad_tx_buff_unmap(struct bnad *bnad,
  77. struct bnad_tx_unmap *unmap_q,
  78. u32 q_depth, u32 index)
  79. {
  80. struct bnad_tx_unmap *unmap;
  81. struct sk_buff *skb;
  82. int vector, nvecs;
  83. unmap = &unmap_q[index];
  84. nvecs = unmap->nvecs;
  85. skb = unmap->skb;
  86. unmap->skb = NULL;
  87. unmap->nvecs = 0;
  88. dma_unmap_single(&bnad->pcidev->dev,
  89. dma_unmap_addr(&unmap->vectors[0], dma_addr),
  90. skb_headlen(skb), DMA_TO_DEVICE);
  91. dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
  92. nvecs--;
  93. vector = 0;
  94. while (nvecs) {
  95. vector++;
  96. if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
  97. vector = 0;
  98. BNA_QE_INDX_INC(index, q_depth);
  99. unmap = &unmap_q[index];
  100. }
  101. dma_unmap_page(&bnad->pcidev->dev,
  102. dma_unmap_addr(&unmap->vectors[vector], dma_addr),
  103. dma_unmap_len(&unmap->vectors[vector], dma_len),
  104. DMA_TO_DEVICE);
  105. dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
  106. nvecs--;
  107. }
  108. BNA_QE_INDX_INC(index, q_depth);
  109. return index;
  110. }
  111. /*
  112. * Frees all pending Tx Bufs
  113. * At this point no activity is expected on the Q,
  114. * so DMA unmap & freeing is fine.
  115. */
  116. static void
  117. bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
  118. {
  119. struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
  120. struct sk_buff *skb;
  121. int i;
  122. for (i = 0; i < tcb->q_depth; i++) {
  123. skb = unmap_q[i].skb;
  124. if (!skb)
  125. continue;
  126. bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
  127. dev_kfree_skb_any(skb);
  128. }
  129. }
  130. /*
  131. * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
  132. * Can be called in a) Interrupt context
  133. * b) Sending context
  134. */
  135. static u32
  136. bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
  137. {
  138. u32 sent_packets = 0, sent_bytes = 0;
  139. u32 wis, unmap_wis, hw_cons, cons, q_depth;
  140. struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
  141. struct bnad_tx_unmap *unmap;
  142. struct sk_buff *skb;
  143. /* Just return if TX is stopped */
  144. if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
  145. return 0;
  146. hw_cons = *(tcb->hw_consumer_index);
  147. rmb();
  148. cons = tcb->consumer_index;
  149. q_depth = tcb->q_depth;
  150. wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
  151. BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
  152. while (wis) {
  153. unmap = &unmap_q[cons];
  154. skb = unmap->skb;
  155. sent_packets++;
  156. sent_bytes += skb->len;
  157. unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
  158. wis -= unmap_wis;
  159. cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
  160. dev_kfree_skb_any(skb);
  161. }
  162. /* Update consumer pointers. */
  163. tcb->consumer_index = hw_cons;
  164. tcb->txq->tx_packets += sent_packets;
  165. tcb->txq->tx_bytes += sent_bytes;
  166. return sent_packets;
  167. }
  168. static u32
  169. bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
  170. {
  171. struct net_device *netdev = bnad->netdev;
  172. u32 sent = 0;
  173. if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
  174. return 0;
  175. sent = bnad_txcmpl_process(bnad, tcb);
  176. if (sent) {
  177. if (netif_queue_stopped(netdev) &&
  178. netif_carrier_ok(netdev) &&
  179. BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
  180. BNAD_NETIF_WAKE_THRESHOLD) {
  181. if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
  182. netif_wake_queue(netdev);
  183. BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
  184. }
  185. }
  186. }
  187. if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
  188. bna_ib_ack(tcb->i_dbell, sent);
  189. smp_mb__before_atomic();
  190. clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
  191. return sent;
  192. }
  193. /* MSIX Tx Completion Handler */
  194. static irqreturn_t
  195. bnad_msix_tx(int irq, void *data)
  196. {
  197. struct bna_tcb *tcb = (struct bna_tcb *)data;
  198. struct bnad *bnad = tcb->bnad;
  199. bnad_tx_complete(bnad, tcb);
  200. return IRQ_HANDLED;
  201. }
  202. static inline void
  203. bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
  204. {
  205. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  206. unmap_q->reuse_pi = -1;
  207. unmap_q->alloc_order = -1;
  208. unmap_q->map_size = 0;
  209. unmap_q->type = BNAD_RXBUF_NONE;
  210. }
  211. /* Default is page-based allocation. Multi-buffer support - TBD */
  212. static int
  213. bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
  214. {
  215. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  216. int order;
  217. bnad_rxq_alloc_uninit(bnad, rcb);
  218. order = get_order(rcb->rxq->buffer_size);
  219. unmap_q->type = BNAD_RXBUF_PAGE;
  220. if (bna_is_small_rxq(rcb->id)) {
  221. unmap_q->alloc_order = 0;
  222. unmap_q->map_size = rcb->rxq->buffer_size;
  223. } else {
  224. if (rcb->rxq->multi_buffer) {
  225. unmap_q->alloc_order = 0;
  226. unmap_q->map_size = rcb->rxq->buffer_size;
  227. unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
  228. } else {
  229. unmap_q->alloc_order = order;
  230. unmap_q->map_size =
  231. (rcb->rxq->buffer_size > 2048) ?
  232. PAGE_SIZE << order : 2048;
  233. }
  234. }
  235. BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
  236. return 0;
  237. }
  238. static inline void
  239. bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
  240. {
  241. if (!unmap->page)
  242. return;
  243. dma_unmap_page(&bnad->pcidev->dev,
  244. dma_unmap_addr(&unmap->vector, dma_addr),
  245. unmap->vector.len, DMA_FROM_DEVICE);
  246. put_page(unmap->page);
  247. unmap->page = NULL;
  248. dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
  249. unmap->vector.len = 0;
  250. }
  251. static inline void
  252. bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
  253. {
  254. if (!unmap->skb)
  255. return;
  256. dma_unmap_single(&bnad->pcidev->dev,
  257. dma_unmap_addr(&unmap->vector, dma_addr),
  258. unmap->vector.len, DMA_FROM_DEVICE);
  259. dev_kfree_skb_any(unmap->skb);
  260. unmap->skb = NULL;
  261. dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
  262. unmap->vector.len = 0;
  263. }
  264. static void
  265. bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
  266. {
  267. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  268. int i;
  269. for (i = 0; i < rcb->q_depth; i++) {
  270. struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
  271. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
  272. bnad_rxq_cleanup_skb(bnad, unmap);
  273. else
  274. bnad_rxq_cleanup_page(bnad, unmap);
  275. }
  276. bnad_rxq_alloc_uninit(bnad, rcb);
  277. }
  278. static u32
  279. bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
  280. {
  281. u32 alloced, prod, q_depth;
  282. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  283. struct bnad_rx_unmap *unmap, *prev;
  284. struct bna_rxq_entry *rxent;
  285. struct page *page;
  286. u32 page_offset, alloc_size;
  287. dma_addr_t dma_addr;
  288. prod = rcb->producer_index;
  289. q_depth = rcb->q_depth;
  290. alloc_size = PAGE_SIZE << unmap_q->alloc_order;
  291. alloced = 0;
  292. while (nalloc--) {
  293. unmap = &unmap_q->unmap[prod];
  294. if (unmap_q->reuse_pi < 0) {
  295. page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
  296. unmap_q->alloc_order);
  297. page_offset = 0;
  298. } else {
  299. prev = &unmap_q->unmap[unmap_q->reuse_pi];
  300. page = prev->page;
  301. page_offset = prev->page_offset + unmap_q->map_size;
  302. get_page(page);
  303. }
  304. if (unlikely(!page)) {
  305. BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
  306. rcb->rxq->rxbuf_alloc_failed++;
  307. goto finishing;
  308. }
  309. dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
  310. unmap_q->map_size, DMA_FROM_DEVICE);
  311. if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
  312. put_page(page);
  313. BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
  314. rcb->rxq->rxbuf_map_failed++;
  315. goto finishing;
  316. }
  317. unmap->page = page;
  318. unmap->page_offset = page_offset;
  319. dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
  320. unmap->vector.len = unmap_q->map_size;
  321. page_offset += unmap_q->map_size;
  322. if (page_offset < alloc_size)
  323. unmap_q->reuse_pi = prod;
  324. else
  325. unmap_q->reuse_pi = -1;
  326. rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
  327. BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
  328. BNA_QE_INDX_INC(prod, q_depth);
  329. alloced++;
  330. }
  331. finishing:
  332. if (likely(alloced)) {
  333. rcb->producer_index = prod;
  334. smp_mb();
  335. if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
  336. bna_rxq_prod_indx_doorbell(rcb);
  337. }
  338. return alloced;
  339. }
  340. static u32
  341. bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
  342. {
  343. u32 alloced, prod, q_depth, buff_sz;
  344. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  345. struct bnad_rx_unmap *unmap;
  346. struct bna_rxq_entry *rxent;
  347. struct sk_buff *skb;
  348. dma_addr_t dma_addr;
  349. buff_sz = rcb->rxq->buffer_size;
  350. prod = rcb->producer_index;
  351. q_depth = rcb->q_depth;
  352. alloced = 0;
  353. while (nalloc--) {
  354. unmap = &unmap_q->unmap[prod];
  355. skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
  356. if (unlikely(!skb)) {
  357. BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
  358. rcb->rxq->rxbuf_alloc_failed++;
  359. goto finishing;
  360. }
  361. dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
  362. buff_sz, DMA_FROM_DEVICE);
  363. if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
  364. dev_kfree_skb_any(skb);
  365. BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
  366. rcb->rxq->rxbuf_map_failed++;
  367. goto finishing;
  368. }
  369. unmap->skb = skb;
  370. dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
  371. unmap->vector.len = buff_sz;
  372. rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
  373. BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
  374. BNA_QE_INDX_INC(prod, q_depth);
  375. alloced++;
  376. }
  377. finishing:
  378. if (likely(alloced)) {
  379. rcb->producer_index = prod;
  380. smp_mb();
  381. if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
  382. bna_rxq_prod_indx_doorbell(rcb);
  383. }
  384. return alloced;
  385. }
  386. static inline void
  387. bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
  388. {
  389. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  390. u32 to_alloc;
  391. to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
  392. if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
  393. return;
  394. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
  395. bnad_rxq_refill_skb(bnad, rcb, to_alloc);
  396. else
  397. bnad_rxq_refill_page(bnad, rcb, to_alloc);
  398. }
  399. #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
  400. BNA_CQ_EF_IPV6 | \
  401. BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
  402. BNA_CQ_EF_L4_CKSUM_OK)
  403. #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
  404. BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
  405. #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
  406. BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
  407. #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
  408. BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
  409. #define flags_udp6 (BNA_CQ_EF_IPV6 | \
  410. BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
  411. static void
  412. bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
  413. u32 sop_ci, u32 nvecs)
  414. {
  415. struct bnad_rx_unmap_q *unmap_q;
  416. struct bnad_rx_unmap *unmap;
  417. u32 ci, vec;
  418. unmap_q = rcb->unmap_q;
  419. for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
  420. unmap = &unmap_q->unmap[ci];
  421. BNA_QE_INDX_INC(ci, rcb->q_depth);
  422. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
  423. bnad_rxq_cleanup_skb(bnad, unmap);
  424. else
  425. bnad_rxq_cleanup_page(bnad, unmap);
  426. }
  427. }
  428. static void
  429. bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
  430. {
  431. struct bna_rcb *rcb;
  432. struct bnad *bnad;
  433. struct bnad_rx_unmap_q *unmap_q;
  434. struct bna_cq_entry *cq, *cmpl;
  435. u32 ci, pi, totlen = 0;
  436. cq = ccb->sw_q;
  437. pi = ccb->producer_index;
  438. cmpl = &cq[pi];
  439. rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
  440. unmap_q = rcb->unmap_q;
  441. bnad = rcb->bnad;
  442. ci = rcb->consumer_index;
  443. /* prefetch header */
  444. prefetch(page_address(unmap_q->unmap[ci].page) +
  445. unmap_q->unmap[ci].page_offset);
  446. while (nvecs--) {
  447. struct bnad_rx_unmap *unmap;
  448. u32 len;
  449. unmap = &unmap_q->unmap[ci];
  450. BNA_QE_INDX_INC(ci, rcb->q_depth);
  451. dma_unmap_page(&bnad->pcidev->dev,
  452. dma_unmap_addr(&unmap->vector, dma_addr),
  453. unmap->vector.len, DMA_FROM_DEVICE);
  454. len = ntohs(cmpl->length);
  455. skb->truesize += unmap->vector.len;
  456. totlen += len;
  457. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  458. unmap->page, unmap->page_offset, len);
  459. unmap->page = NULL;
  460. unmap->vector.len = 0;
  461. BNA_QE_INDX_INC(pi, ccb->q_depth);
  462. cmpl = &cq[pi];
  463. }
  464. skb->len += totlen;
  465. skb->data_len += totlen;
  466. }
  467. static inline void
  468. bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
  469. struct bnad_rx_unmap *unmap, u32 len)
  470. {
  471. prefetch(skb->data);
  472. dma_unmap_single(&bnad->pcidev->dev,
  473. dma_unmap_addr(&unmap->vector, dma_addr),
  474. unmap->vector.len, DMA_FROM_DEVICE);
  475. skb_put(skb, len);
  476. skb->protocol = eth_type_trans(skb, bnad->netdev);
  477. unmap->skb = NULL;
  478. unmap->vector.len = 0;
  479. }
  480. static u32
  481. bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
  482. {
  483. struct bna_cq_entry *cq, *cmpl, *next_cmpl;
  484. struct bna_rcb *rcb = NULL;
  485. struct bnad_rx_unmap_q *unmap_q;
  486. struct bnad_rx_unmap *unmap = NULL;
  487. struct sk_buff *skb = NULL;
  488. struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
  489. struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
  490. u32 packets = 0, len = 0, totlen = 0;
  491. u32 pi, vec, sop_ci = 0, nvecs = 0;
  492. u32 flags, masked_flags;
  493. prefetch(bnad->netdev);
  494. cq = ccb->sw_q;
  495. while (packets < budget) {
  496. cmpl = &cq[ccb->producer_index];
  497. if (!cmpl->valid)
  498. break;
  499. /* The 'valid' field is set by the adapter, only after writing
  500. * the other fields of completion entry. Hence, do not load
  501. * other fields of completion entry *before* the 'valid' is
  502. * loaded. Adding the rmb() here prevents the compiler and/or
  503. * CPU from reordering the reads which would potentially result
  504. * in reading stale values in completion entry.
  505. */
  506. rmb();
  507. BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
  508. if (bna_is_small_rxq(cmpl->rxq_id))
  509. rcb = ccb->rcb[1];
  510. else
  511. rcb = ccb->rcb[0];
  512. unmap_q = rcb->unmap_q;
  513. /* start of packet ci */
  514. sop_ci = rcb->consumer_index;
  515. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
  516. unmap = &unmap_q->unmap[sop_ci];
  517. skb = unmap->skb;
  518. } else {
  519. skb = napi_get_frags(&rx_ctrl->napi);
  520. if (unlikely(!skb))
  521. break;
  522. }
  523. prefetch(skb);
  524. flags = ntohl(cmpl->flags);
  525. len = ntohs(cmpl->length);
  526. totlen = len;
  527. nvecs = 1;
  528. /* Check all the completions for this frame.
  529. * busy-wait doesn't help much, break here.
  530. */
  531. if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
  532. (flags & BNA_CQ_EF_EOP) == 0) {
  533. pi = ccb->producer_index;
  534. do {
  535. BNA_QE_INDX_INC(pi, ccb->q_depth);
  536. next_cmpl = &cq[pi];
  537. if (!next_cmpl->valid)
  538. break;
  539. /* The 'valid' field is set by the adapter, only
  540. * after writing the other fields of completion
  541. * entry. Hence, do not load other fields of
  542. * completion entry *before* the 'valid' is
  543. * loaded. Adding the rmb() here prevents the
  544. * compiler and/or CPU from reordering the reads
  545. * which would potentially result in reading
  546. * stale values in completion entry.
  547. */
  548. rmb();
  549. len = ntohs(next_cmpl->length);
  550. flags = ntohl(next_cmpl->flags);
  551. nvecs++;
  552. totlen += len;
  553. } while ((flags & BNA_CQ_EF_EOP) == 0);
  554. if (!next_cmpl->valid)
  555. break;
  556. }
  557. packets++;
  558. /* TODO: BNA_CQ_EF_LOCAL ? */
  559. if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
  560. BNA_CQ_EF_FCS_ERROR |
  561. BNA_CQ_EF_TOO_LONG))) {
  562. bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
  563. rcb->rxq->rx_packets_with_error++;
  564. goto next;
  565. }
  566. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
  567. bnad_cq_setup_skb(bnad, skb, unmap, len);
  568. else
  569. bnad_cq_setup_skb_frags(ccb, skb, nvecs);
  570. rcb->rxq->rx_packets++;
  571. rcb->rxq->rx_bytes += totlen;
  572. ccb->bytes_per_intr += totlen;
  573. masked_flags = flags & flags_cksum_prot_mask;
  574. if (likely
  575. ((bnad->netdev->features & NETIF_F_RXCSUM) &&
  576. ((masked_flags == flags_tcp4) ||
  577. (masked_flags == flags_udp4) ||
  578. (masked_flags == flags_tcp6) ||
  579. (masked_flags == flags_udp6))))
  580. skb->ip_summed = CHECKSUM_UNNECESSARY;
  581. else
  582. skb_checksum_none_assert(skb);
  583. if ((flags & BNA_CQ_EF_VLAN) &&
  584. (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
  585. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
  586. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
  587. netif_receive_skb(skb);
  588. else
  589. napi_gro_frags(&rx_ctrl->napi);
  590. next:
  591. BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
  592. for (vec = 0; vec < nvecs; vec++) {
  593. cmpl = &cq[ccb->producer_index];
  594. cmpl->valid = 0;
  595. BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
  596. }
  597. }
  598. napi_gro_flush(&rx_ctrl->napi, false);
  599. if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
  600. bna_ib_ack_disable_irq(ccb->i_dbell, packets);
  601. bnad_rxq_post(bnad, ccb->rcb[0]);
  602. if (ccb->rcb[1])
  603. bnad_rxq_post(bnad, ccb->rcb[1]);
  604. return packets;
  605. }
  606. static void
  607. bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
  608. {
  609. struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
  610. struct napi_struct *napi = &rx_ctrl->napi;
  611. if (likely(napi_schedule_prep(napi))) {
  612. __napi_schedule(napi);
  613. rx_ctrl->rx_schedule++;
  614. }
  615. }
  616. /* MSIX Rx Path Handler */
  617. static irqreturn_t
  618. bnad_msix_rx(int irq, void *data)
  619. {
  620. struct bna_ccb *ccb = (struct bna_ccb *)data;
  621. if (ccb) {
  622. ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
  623. bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
  624. }
  625. return IRQ_HANDLED;
  626. }
  627. /* Interrupt handlers */
  628. /* Mbox Interrupt Handlers */
  629. static irqreturn_t
  630. bnad_msix_mbox_handler(int irq, void *data)
  631. {
  632. u32 intr_status;
  633. unsigned long flags;
  634. struct bnad *bnad = (struct bnad *)data;
  635. spin_lock_irqsave(&bnad->bna_lock, flags);
  636. if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
  637. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  638. return IRQ_HANDLED;
  639. }
  640. bna_intr_status_get(&bnad->bna, intr_status);
  641. if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
  642. bna_mbox_handler(&bnad->bna, intr_status);
  643. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  644. return IRQ_HANDLED;
  645. }
  646. static irqreturn_t
  647. bnad_isr(int irq, void *data)
  648. {
  649. int i, j;
  650. u32 intr_status;
  651. unsigned long flags;
  652. struct bnad *bnad = (struct bnad *)data;
  653. struct bnad_rx_info *rx_info;
  654. struct bnad_rx_ctrl *rx_ctrl;
  655. struct bna_tcb *tcb = NULL;
  656. spin_lock_irqsave(&bnad->bna_lock, flags);
  657. if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
  658. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  659. return IRQ_NONE;
  660. }
  661. bna_intr_status_get(&bnad->bna, intr_status);
  662. if (unlikely(!intr_status)) {
  663. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  664. return IRQ_NONE;
  665. }
  666. if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
  667. bna_mbox_handler(&bnad->bna, intr_status);
  668. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  669. if (!BNA_IS_INTX_DATA_INTR(intr_status))
  670. return IRQ_HANDLED;
  671. /* Process data interrupts */
  672. /* Tx processing */
  673. for (i = 0; i < bnad->num_tx; i++) {
  674. for (j = 0; j < bnad->num_txq_per_tx; j++) {
  675. tcb = bnad->tx_info[i].tcb[j];
  676. if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
  677. bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
  678. }
  679. }
  680. /* Rx processing */
  681. for (i = 0; i < bnad->num_rx; i++) {
  682. rx_info = &bnad->rx_info[i];
  683. if (!rx_info->rx)
  684. continue;
  685. for (j = 0; j < bnad->num_rxp_per_rx; j++) {
  686. rx_ctrl = &rx_info->rx_ctrl[j];
  687. if (rx_ctrl->ccb)
  688. bnad_netif_rx_schedule_poll(bnad,
  689. rx_ctrl->ccb);
  690. }
  691. }
  692. return IRQ_HANDLED;
  693. }
  694. /*
  695. * Called in interrupt / callback context
  696. * with bna_lock held, so cfg_flags access is OK
  697. */
  698. static void
  699. bnad_enable_mbox_irq(struct bnad *bnad)
  700. {
  701. clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
  702. BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
  703. }
  704. /*
  705. * Called with bnad->bna_lock held b'cos of
  706. * bnad->cfg_flags access.
  707. */
  708. static void
  709. bnad_disable_mbox_irq(struct bnad *bnad)
  710. {
  711. set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
  712. BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
  713. }
  714. static void
  715. bnad_set_netdev_perm_addr(struct bnad *bnad)
  716. {
  717. struct net_device *netdev = bnad->netdev;
  718. ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
  719. if (is_zero_ether_addr(netdev->dev_addr))
  720. ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
  721. }
  722. /* Control Path Handlers */
  723. /* Callbacks */
  724. void
  725. bnad_cb_mbox_intr_enable(struct bnad *bnad)
  726. {
  727. bnad_enable_mbox_irq(bnad);
  728. }
  729. void
  730. bnad_cb_mbox_intr_disable(struct bnad *bnad)
  731. {
  732. bnad_disable_mbox_irq(bnad);
  733. }
  734. void
  735. bnad_cb_ioceth_ready(struct bnad *bnad)
  736. {
  737. bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
  738. complete(&bnad->bnad_completions.ioc_comp);
  739. }
  740. void
  741. bnad_cb_ioceth_failed(struct bnad *bnad)
  742. {
  743. bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
  744. complete(&bnad->bnad_completions.ioc_comp);
  745. }
  746. void
  747. bnad_cb_ioceth_disabled(struct bnad *bnad)
  748. {
  749. bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
  750. complete(&bnad->bnad_completions.ioc_comp);
  751. }
  752. static void
  753. bnad_cb_enet_disabled(void *arg)
  754. {
  755. struct bnad *bnad = (struct bnad *)arg;
  756. netif_carrier_off(bnad->netdev);
  757. complete(&bnad->bnad_completions.enet_comp);
  758. }
  759. void
  760. bnad_cb_ethport_link_status(struct bnad *bnad,
  761. enum bna_link_status link_status)
  762. {
  763. bool link_up = false;
  764. link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
  765. if (link_status == BNA_CEE_UP) {
  766. if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
  767. BNAD_UPDATE_CTR(bnad, cee_toggle);
  768. set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
  769. } else {
  770. if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
  771. BNAD_UPDATE_CTR(bnad, cee_toggle);
  772. clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
  773. }
  774. if (link_up) {
  775. if (!netif_carrier_ok(bnad->netdev)) {
  776. uint tx_id, tcb_id;
  777. netdev_info(bnad->netdev, "link up\n");
  778. netif_carrier_on(bnad->netdev);
  779. BNAD_UPDATE_CTR(bnad, link_toggle);
  780. for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
  781. for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
  782. tcb_id++) {
  783. struct bna_tcb *tcb =
  784. bnad->tx_info[tx_id].tcb[tcb_id];
  785. u32 txq_id;
  786. if (!tcb)
  787. continue;
  788. txq_id = tcb->id;
  789. if (test_bit(BNAD_TXQ_TX_STARTED,
  790. &tcb->flags)) {
  791. /*
  792. * Force an immediate
  793. * Transmit Schedule */
  794. netif_wake_subqueue(
  795. bnad->netdev,
  796. txq_id);
  797. BNAD_UPDATE_CTR(bnad,
  798. netif_queue_wakeup);
  799. } else {
  800. netif_stop_subqueue(
  801. bnad->netdev,
  802. txq_id);
  803. BNAD_UPDATE_CTR(bnad,
  804. netif_queue_stop);
  805. }
  806. }
  807. }
  808. }
  809. } else {
  810. if (netif_carrier_ok(bnad->netdev)) {
  811. netdev_info(bnad->netdev, "link down\n");
  812. netif_carrier_off(bnad->netdev);
  813. BNAD_UPDATE_CTR(bnad, link_toggle);
  814. }
  815. }
  816. }
  817. static void
  818. bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
  819. {
  820. struct bnad *bnad = (struct bnad *)arg;
  821. complete(&bnad->bnad_completions.tx_comp);
  822. }
  823. static void
  824. bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
  825. {
  826. struct bnad_tx_info *tx_info =
  827. (struct bnad_tx_info *)tcb->txq->tx->priv;
  828. tcb->priv = tcb;
  829. tx_info->tcb[tcb->id] = tcb;
  830. }
  831. static void
  832. bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
  833. {
  834. struct bnad_tx_info *tx_info =
  835. (struct bnad_tx_info *)tcb->txq->tx->priv;
  836. tx_info->tcb[tcb->id] = NULL;
  837. tcb->priv = NULL;
  838. }
  839. static void
  840. bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
  841. {
  842. struct bnad_rx_info *rx_info =
  843. (struct bnad_rx_info *)ccb->cq->rx->priv;
  844. rx_info->rx_ctrl[ccb->id].ccb = ccb;
  845. ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
  846. }
  847. static void
  848. bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
  849. {
  850. struct bnad_rx_info *rx_info =
  851. (struct bnad_rx_info *)ccb->cq->rx->priv;
  852. rx_info->rx_ctrl[ccb->id].ccb = NULL;
  853. }
  854. static void
  855. bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
  856. {
  857. struct bnad_tx_info *tx_info =
  858. (struct bnad_tx_info *)tx->priv;
  859. struct bna_tcb *tcb;
  860. u32 txq_id;
  861. int i;
  862. for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
  863. tcb = tx_info->tcb[i];
  864. if (!tcb)
  865. continue;
  866. txq_id = tcb->id;
  867. clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
  868. netif_stop_subqueue(bnad->netdev, txq_id);
  869. }
  870. }
  871. static void
  872. bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
  873. {
  874. struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
  875. struct bna_tcb *tcb;
  876. u32 txq_id;
  877. int i;
  878. for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
  879. tcb = tx_info->tcb[i];
  880. if (!tcb)
  881. continue;
  882. txq_id = tcb->id;
  883. BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
  884. set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
  885. BUG_ON(*(tcb->hw_consumer_index) != 0);
  886. if (netif_carrier_ok(bnad->netdev)) {
  887. netif_wake_subqueue(bnad->netdev, txq_id);
  888. BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
  889. }
  890. }
  891. /*
  892. * Workaround for first ioceth enable failure & we
  893. * get a 0 MAC address. We try to get the MAC address
  894. * again here.
  895. */
  896. if (is_zero_ether_addr(bnad->perm_addr)) {
  897. bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
  898. bnad_set_netdev_perm_addr(bnad);
  899. }
  900. }
  901. /*
  902. * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
  903. */
  904. static void
  905. bnad_tx_cleanup(struct delayed_work *work)
  906. {
  907. struct bnad_tx_info *tx_info =
  908. container_of(work, struct bnad_tx_info, tx_cleanup_work);
  909. struct bnad *bnad = NULL;
  910. struct bna_tcb *tcb;
  911. unsigned long flags;
  912. u32 i, pending = 0;
  913. for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
  914. tcb = tx_info->tcb[i];
  915. if (!tcb)
  916. continue;
  917. bnad = tcb->bnad;
  918. if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
  919. pending++;
  920. continue;
  921. }
  922. bnad_txq_cleanup(bnad, tcb);
  923. smp_mb__before_atomic();
  924. clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
  925. }
  926. if (pending) {
  927. queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
  928. msecs_to_jiffies(1));
  929. return;
  930. }
  931. spin_lock_irqsave(&bnad->bna_lock, flags);
  932. bna_tx_cleanup_complete(tx_info->tx);
  933. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  934. }
  935. static void
  936. bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
  937. {
  938. struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
  939. struct bna_tcb *tcb;
  940. int i;
  941. for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
  942. tcb = tx_info->tcb[i];
  943. if (!tcb)
  944. continue;
  945. }
  946. queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
  947. }
  948. static void
  949. bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
  950. {
  951. struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
  952. struct bna_ccb *ccb;
  953. struct bnad_rx_ctrl *rx_ctrl;
  954. int i;
  955. for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
  956. rx_ctrl = &rx_info->rx_ctrl[i];
  957. ccb = rx_ctrl->ccb;
  958. if (!ccb)
  959. continue;
  960. clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
  961. if (ccb->rcb[1])
  962. clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
  963. }
  964. }
  965. /*
  966. * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
  967. */
  968. static void
  969. bnad_rx_cleanup(void *work)
  970. {
  971. struct bnad_rx_info *rx_info =
  972. container_of(work, struct bnad_rx_info, rx_cleanup_work);
  973. struct bnad_rx_ctrl *rx_ctrl;
  974. struct bnad *bnad = NULL;
  975. unsigned long flags;
  976. u32 i;
  977. for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
  978. rx_ctrl = &rx_info->rx_ctrl[i];
  979. if (!rx_ctrl->ccb)
  980. continue;
  981. bnad = rx_ctrl->ccb->bnad;
  982. /*
  983. * Wait till the poll handler has exited
  984. * and nothing can be scheduled anymore
  985. */
  986. napi_disable(&rx_ctrl->napi);
  987. bnad_cq_cleanup(bnad, rx_ctrl->ccb);
  988. bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
  989. if (rx_ctrl->ccb->rcb[1])
  990. bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
  991. }
  992. spin_lock_irqsave(&bnad->bna_lock, flags);
  993. bna_rx_cleanup_complete(rx_info->rx);
  994. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  995. }
  996. static void
  997. bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
  998. {
  999. struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
  1000. struct bna_ccb *ccb;
  1001. struct bnad_rx_ctrl *rx_ctrl;
  1002. int i;
  1003. for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
  1004. rx_ctrl = &rx_info->rx_ctrl[i];
  1005. ccb = rx_ctrl->ccb;
  1006. if (!ccb)
  1007. continue;
  1008. clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
  1009. if (ccb->rcb[1])
  1010. clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
  1011. }
  1012. queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
  1013. }
  1014. static void
  1015. bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
  1016. {
  1017. struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
  1018. struct bna_ccb *ccb;
  1019. struct bna_rcb *rcb;
  1020. struct bnad_rx_ctrl *rx_ctrl;
  1021. int i, j;
  1022. for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
  1023. rx_ctrl = &rx_info->rx_ctrl[i];
  1024. ccb = rx_ctrl->ccb;
  1025. if (!ccb)
  1026. continue;
  1027. napi_enable(&rx_ctrl->napi);
  1028. for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
  1029. rcb = ccb->rcb[j];
  1030. if (!rcb)
  1031. continue;
  1032. bnad_rxq_alloc_init(bnad, rcb);
  1033. set_bit(BNAD_RXQ_STARTED, &rcb->flags);
  1034. set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
  1035. bnad_rxq_post(bnad, rcb);
  1036. }
  1037. }
  1038. }
  1039. static void
  1040. bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
  1041. {
  1042. struct bnad *bnad = (struct bnad *)arg;
  1043. complete(&bnad->bnad_completions.rx_comp);
  1044. }
  1045. static void
  1046. bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
  1047. {
  1048. bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
  1049. complete(&bnad->bnad_completions.mcast_comp);
  1050. }
  1051. void
  1052. bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
  1053. struct bna_stats *stats)
  1054. {
  1055. if (status == BNA_CB_SUCCESS)
  1056. BNAD_UPDATE_CTR(bnad, hw_stats_updates);
  1057. if (!netif_running(bnad->netdev) ||
  1058. !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
  1059. return;
  1060. mod_timer(&bnad->stats_timer,
  1061. jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
  1062. }
  1063. static void
  1064. bnad_cb_enet_mtu_set(struct bnad *bnad)
  1065. {
  1066. bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
  1067. complete(&bnad->bnad_completions.mtu_comp);
  1068. }
  1069. void
  1070. bnad_cb_completion(void *arg, enum bfa_status status)
  1071. {
  1072. struct bnad_iocmd_comp *iocmd_comp =
  1073. (struct bnad_iocmd_comp *)arg;
  1074. iocmd_comp->comp_status = (u32) status;
  1075. complete(&iocmd_comp->comp);
  1076. }
  1077. /* Resource allocation, free functions */
  1078. static void
  1079. bnad_mem_free(struct bnad *bnad,
  1080. struct bna_mem_info *mem_info)
  1081. {
  1082. int i;
  1083. dma_addr_t dma_pa;
  1084. if (mem_info->mdl == NULL)
  1085. return;
  1086. for (i = 0; i < mem_info->num; i++) {
  1087. if (mem_info->mdl[i].kva != NULL) {
  1088. if (mem_info->mem_type == BNA_MEM_T_DMA) {
  1089. BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
  1090. dma_pa);
  1091. dma_free_coherent(&bnad->pcidev->dev,
  1092. mem_info->mdl[i].len,
  1093. mem_info->mdl[i].kva, dma_pa);
  1094. } else
  1095. kfree(mem_info->mdl[i].kva);
  1096. }
  1097. }
  1098. kfree(mem_info->mdl);
  1099. mem_info->mdl = NULL;
  1100. }
  1101. static int
  1102. bnad_mem_alloc(struct bnad *bnad,
  1103. struct bna_mem_info *mem_info)
  1104. {
  1105. int i;
  1106. dma_addr_t dma_pa;
  1107. if ((mem_info->num == 0) || (mem_info->len == 0)) {
  1108. mem_info->mdl = NULL;
  1109. return 0;
  1110. }
  1111. mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
  1112. GFP_KERNEL);
  1113. if (mem_info->mdl == NULL)
  1114. return -ENOMEM;
  1115. if (mem_info->mem_type == BNA_MEM_T_DMA) {
  1116. for (i = 0; i < mem_info->num; i++) {
  1117. mem_info->mdl[i].len = mem_info->len;
  1118. mem_info->mdl[i].kva =
  1119. dma_alloc_coherent(&bnad->pcidev->dev,
  1120. mem_info->len, &dma_pa,
  1121. GFP_KERNEL);
  1122. if (mem_info->mdl[i].kva == NULL)
  1123. goto err_return;
  1124. BNA_SET_DMA_ADDR(dma_pa,
  1125. &(mem_info->mdl[i].dma));
  1126. }
  1127. } else {
  1128. for (i = 0; i < mem_info->num; i++) {
  1129. mem_info->mdl[i].len = mem_info->len;
  1130. mem_info->mdl[i].kva = kzalloc(mem_info->len,
  1131. GFP_KERNEL);
  1132. if (mem_info->mdl[i].kva == NULL)
  1133. goto err_return;
  1134. }
  1135. }
  1136. return 0;
  1137. err_return:
  1138. bnad_mem_free(bnad, mem_info);
  1139. return -ENOMEM;
  1140. }
  1141. /* Free IRQ for Mailbox */
  1142. static void
  1143. bnad_mbox_irq_free(struct bnad *bnad)
  1144. {
  1145. int irq;
  1146. unsigned long flags;
  1147. spin_lock_irqsave(&bnad->bna_lock, flags);
  1148. bnad_disable_mbox_irq(bnad);
  1149. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1150. irq = BNAD_GET_MBOX_IRQ(bnad);
  1151. free_irq(irq, bnad);
  1152. }
  1153. /*
  1154. * Allocates IRQ for Mailbox, but keep it disabled
  1155. * This will be enabled once we get the mbox enable callback
  1156. * from bna
  1157. */
  1158. static int
  1159. bnad_mbox_irq_alloc(struct bnad *bnad)
  1160. {
  1161. int err = 0;
  1162. unsigned long irq_flags, flags;
  1163. u32 irq;
  1164. irq_handler_t irq_handler;
  1165. spin_lock_irqsave(&bnad->bna_lock, flags);
  1166. if (bnad->cfg_flags & BNAD_CF_MSIX) {
  1167. irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
  1168. irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
  1169. irq_flags = 0;
  1170. } else {
  1171. irq_handler = (irq_handler_t)bnad_isr;
  1172. irq = bnad->pcidev->irq;
  1173. irq_flags = IRQF_SHARED;
  1174. }
  1175. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1176. sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
  1177. /*
  1178. * Set the Mbox IRQ disable flag, so that the IRQ handler
  1179. * called from request_irq() for SHARED IRQs do not execute
  1180. */
  1181. set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
  1182. BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
  1183. err = request_irq(irq, irq_handler, irq_flags,
  1184. bnad->mbox_irq_name, bnad);
  1185. return err;
  1186. }
  1187. static void
  1188. bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
  1189. {
  1190. kfree(intr_info->idl);
  1191. intr_info->idl = NULL;
  1192. }
  1193. /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
  1194. static int
  1195. bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
  1196. u32 txrx_id, struct bna_intr_info *intr_info)
  1197. {
  1198. int i, vector_start = 0;
  1199. u32 cfg_flags;
  1200. unsigned long flags;
  1201. spin_lock_irqsave(&bnad->bna_lock, flags);
  1202. cfg_flags = bnad->cfg_flags;
  1203. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1204. if (cfg_flags & BNAD_CF_MSIX) {
  1205. intr_info->intr_type = BNA_INTR_T_MSIX;
  1206. intr_info->idl = kcalloc(intr_info->num,
  1207. sizeof(struct bna_intr_descr),
  1208. GFP_KERNEL);
  1209. if (!intr_info->idl)
  1210. return -ENOMEM;
  1211. switch (src) {
  1212. case BNAD_INTR_TX:
  1213. vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
  1214. break;
  1215. case BNAD_INTR_RX:
  1216. vector_start = BNAD_MAILBOX_MSIX_VECTORS +
  1217. (bnad->num_tx * bnad->num_txq_per_tx) +
  1218. txrx_id;
  1219. break;
  1220. default:
  1221. BUG();
  1222. }
  1223. for (i = 0; i < intr_info->num; i++)
  1224. intr_info->idl[i].vector = vector_start + i;
  1225. } else {
  1226. intr_info->intr_type = BNA_INTR_T_INTX;
  1227. intr_info->num = 1;
  1228. intr_info->idl = kcalloc(intr_info->num,
  1229. sizeof(struct bna_intr_descr),
  1230. GFP_KERNEL);
  1231. if (!intr_info->idl)
  1232. return -ENOMEM;
  1233. switch (src) {
  1234. case BNAD_INTR_TX:
  1235. intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
  1236. break;
  1237. case BNAD_INTR_RX:
  1238. intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
  1239. break;
  1240. }
  1241. }
  1242. return 0;
  1243. }
  1244. /* NOTE: Should be called for MSIX only
  1245. * Unregisters Tx MSIX vector(s) from the kernel
  1246. */
  1247. static void
  1248. bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
  1249. int num_txqs)
  1250. {
  1251. int i;
  1252. int vector_num;
  1253. for (i = 0; i < num_txqs; i++) {
  1254. if (tx_info->tcb[i] == NULL)
  1255. continue;
  1256. vector_num = tx_info->tcb[i]->intr_vector;
  1257. free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
  1258. }
  1259. }
  1260. /* NOTE: Should be called for MSIX only
  1261. * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
  1262. */
  1263. static int
  1264. bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
  1265. u32 tx_id, int num_txqs)
  1266. {
  1267. int i;
  1268. int err;
  1269. int vector_num;
  1270. for (i = 0; i < num_txqs; i++) {
  1271. vector_num = tx_info->tcb[i]->intr_vector;
  1272. sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
  1273. tx_id + tx_info->tcb[i]->id);
  1274. err = request_irq(bnad->msix_table[vector_num].vector,
  1275. (irq_handler_t)bnad_msix_tx, 0,
  1276. tx_info->tcb[i]->name,
  1277. tx_info->tcb[i]);
  1278. if (err)
  1279. goto err_return;
  1280. }
  1281. return 0;
  1282. err_return:
  1283. if (i > 0)
  1284. bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
  1285. return -1;
  1286. }
  1287. /* NOTE: Should be called for MSIX only
  1288. * Unregisters Rx MSIX vector(s) from the kernel
  1289. */
  1290. static void
  1291. bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
  1292. int num_rxps)
  1293. {
  1294. int i;
  1295. int vector_num;
  1296. for (i = 0; i < num_rxps; i++) {
  1297. if (rx_info->rx_ctrl[i].ccb == NULL)
  1298. continue;
  1299. vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
  1300. free_irq(bnad->msix_table[vector_num].vector,
  1301. rx_info->rx_ctrl[i].ccb);
  1302. }
  1303. }
  1304. /* NOTE: Should be called for MSIX only
  1305. * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
  1306. */
  1307. static int
  1308. bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
  1309. u32 rx_id, int num_rxps)
  1310. {
  1311. int i;
  1312. int err;
  1313. int vector_num;
  1314. for (i = 0; i < num_rxps; i++) {
  1315. vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
  1316. sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
  1317. bnad->netdev->name,
  1318. rx_id + rx_info->rx_ctrl[i].ccb->id);
  1319. err = request_irq(bnad->msix_table[vector_num].vector,
  1320. (irq_handler_t)bnad_msix_rx, 0,
  1321. rx_info->rx_ctrl[i].ccb->name,
  1322. rx_info->rx_ctrl[i].ccb);
  1323. if (err)
  1324. goto err_return;
  1325. }
  1326. return 0;
  1327. err_return:
  1328. if (i > 0)
  1329. bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
  1330. return -1;
  1331. }
  1332. /* Free Tx object Resources */
  1333. static void
  1334. bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
  1335. {
  1336. int i;
  1337. for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
  1338. if (res_info[i].res_type == BNA_RES_T_MEM)
  1339. bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
  1340. else if (res_info[i].res_type == BNA_RES_T_INTR)
  1341. bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
  1342. }
  1343. }
  1344. /* Allocates memory and interrupt resources for Tx object */
  1345. static int
  1346. bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
  1347. u32 tx_id)
  1348. {
  1349. int i, err = 0;
  1350. for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
  1351. if (res_info[i].res_type == BNA_RES_T_MEM)
  1352. err = bnad_mem_alloc(bnad,
  1353. &res_info[i].res_u.mem_info);
  1354. else if (res_info[i].res_type == BNA_RES_T_INTR)
  1355. err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
  1356. &res_info[i].res_u.intr_info);
  1357. if (err)
  1358. goto err_return;
  1359. }
  1360. return 0;
  1361. err_return:
  1362. bnad_tx_res_free(bnad, res_info);
  1363. return err;
  1364. }
  1365. /* Free Rx object Resources */
  1366. static void
  1367. bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
  1368. {
  1369. int i;
  1370. for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
  1371. if (res_info[i].res_type == BNA_RES_T_MEM)
  1372. bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
  1373. else if (res_info[i].res_type == BNA_RES_T_INTR)
  1374. bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
  1375. }
  1376. }
  1377. /* Allocates memory and interrupt resources for Rx object */
  1378. static int
  1379. bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
  1380. uint rx_id)
  1381. {
  1382. int i, err = 0;
  1383. /* All memory needs to be allocated before setup_ccbs */
  1384. for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
  1385. if (res_info[i].res_type == BNA_RES_T_MEM)
  1386. err = bnad_mem_alloc(bnad,
  1387. &res_info[i].res_u.mem_info);
  1388. else if (res_info[i].res_type == BNA_RES_T_INTR)
  1389. err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
  1390. &res_info[i].res_u.intr_info);
  1391. if (err)
  1392. goto err_return;
  1393. }
  1394. return 0;
  1395. err_return:
  1396. bnad_rx_res_free(bnad, res_info);
  1397. return err;
  1398. }
  1399. /* Timer callbacks */
  1400. /* a) IOC timer */
  1401. static void
  1402. bnad_ioc_timeout(struct timer_list *t)
  1403. {
  1404. struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
  1405. unsigned long flags;
  1406. spin_lock_irqsave(&bnad->bna_lock, flags);
  1407. bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
  1408. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1409. }
  1410. static void
  1411. bnad_ioc_hb_check(struct timer_list *t)
  1412. {
  1413. struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
  1414. unsigned long flags;
  1415. spin_lock_irqsave(&bnad->bna_lock, flags);
  1416. bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
  1417. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1418. }
  1419. static void
  1420. bnad_iocpf_timeout(struct timer_list *t)
  1421. {
  1422. struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
  1423. unsigned long flags;
  1424. spin_lock_irqsave(&bnad->bna_lock, flags);
  1425. bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
  1426. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1427. }
  1428. static void
  1429. bnad_iocpf_sem_timeout(struct timer_list *t)
  1430. {
  1431. struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
  1432. unsigned long flags;
  1433. spin_lock_irqsave(&bnad->bna_lock, flags);
  1434. bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
  1435. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1436. }
  1437. /*
  1438. * All timer routines use bnad->bna_lock to protect against
  1439. * the following race, which may occur in case of no locking:
  1440. * Time CPU m CPU n
  1441. * 0 1 = test_bit
  1442. * 1 clear_bit
  1443. * 2 del_timer_sync
  1444. * 3 mod_timer
  1445. */
  1446. /* b) Dynamic Interrupt Moderation Timer */
  1447. static void
  1448. bnad_dim_timeout(struct timer_list *t)
  1449. {
  1450. struct bnad *bnad = from_timer(bnad, t, dim_timer);
  1451. struct bnad_rx_info *rx_info;
  1452. struct bnad_rx_ctrl *rx_ctrl;
  1453. int i, j;
  1454. unsigned long flags;
  1455. if (!netif_carrier_ok(bnad->netdev))
  1456. return;
  1457. spin_lock_irqsave(&bnad->bna_lock, flags);
  1458. for (i = 0; i < bnad->num_rx; i++) {
  1459. rx_info = &bnad->rx_info[i];
  1460. if (!rx_info->rx)
  1461. continue;
  1462. for (j = 0; j < bnad->num_rxp_per_rx; j++) {
  1463. rx_ctrl = &rx_info->rx_ctrl[j];
  1464. if (!rx_ctrl->ccb)
  1465. continue;
  1466. bna_rx_dim_update(rx_ctrl->ccb);
  1467. }
  1468. }
  1469. /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
  1470. if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
  1471. mod_timer(&bnad->dim_timer,
  1472. jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
  1473. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1474. }
  1475. /* c) Statistics Timer */
  1476. static void
  1477. bnad_stats_timeout(struct timer_list *t)
  1478. {
  1479. struct bnad *bnad = from_timer(bnad, t, stats_timer);
  1480. unsigned long flags;
  1481. if (!netif_running(bnad->netdev) ||
  1482. !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
  1483. return;
  1484. spin_lock_irqsave(&bnad->bna_lock, flags);
  1485. bna_hw_stats_get(&bnad->bna);
  1486. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1487. }
  1488. /*
  1489. * Set up timer for DIM
  1490. * Called with bnad->bna_lock held
  1491. */
  1492. void
  1493. bnad_dim_timer_start(struct bnad *bnad)
  1494. {
  1495. if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
  1496. !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
  1497. timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
  1498. set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
  1499. mod_timer(&bnad->dim_timer,
  1500. jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
  1501. }
  1502. }
  1503. /*
  1504. * Set up timer for statistics
  1505. * Called with mutex_lock(&bnad->conf_mutex) held
  1506. */
  1507. static void
  1508. bnad_stats_timer_start(struct bnad *bnad)
  1509. {
  1510. unsigned long flags;
  1511. spin_lock_irqsave(&bnad->bna_lock, flags);
  1512. if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
  1513. timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
  1514. mod_timer(&bnad->stats_timer,
  1515. jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
  1516. }
  1517. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1518. }
  1519. /*
  1520. * Stops the stats timer
  1521. * Called with mutex_lock(&bnad->conf_mutex) held
  1522. */
  1523. static void
  1524. bnad_stats_timer_stop(struct bnad *bnad)
  1525. {
  1526. int to_del = 0;
  1527. unsigned long flags;
  1528. spin_lock_irqsave(&bnad->bna_lock, flags);
  1529. if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
  1530. to_del = 1;
  1531. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1532. if (to_del)
  1533. del_timer_sync(&bnad->stats_timer);
  1534. }
  1535. /* Utilities */
  1536. static void
  1537. bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
  1538. {
  1539. int i = 1; /* Index 0 has broadcast address */
  1540. struct netdev_hw_addr *mc_addr;
  1541. netdev_for_each_mc_addr(mc_addr, netdev) {
  1542. ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
  1543. i++;
  1544. }
  1545. }
  1546. static int
  1547. bnad_napi_poll_rx(struct napi_struct *napi, int budget)
  1548. {
  1549. struct bnad_rx_ctrl *rx_ctrl =
  1550. container_of(napi, struct bnad_rx_ctrl, napi);
  1551. struct bnad *bnad = rx_ctrl->bnad;
  1552. int rcvd = 0;
  1553. rx_ctrl->rx_poll_ctr++;
  1554. if (!netif_carrier_ok(bnad->netdev))
  1555. goto poll_exit;
  1556. rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
  1557. if (rcvd >= budget)
  1558. return rcvd;
  1559. poll_exit:
  1560. napi_complete_done(napi, rcvd);
  1561. rx_ctrl->rx_complete++;
  1562. if (rx_ctrl->ccb)
  1563. bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
  1564. return rcvd;
  1565. }
  1566. #define BNAD_NAPI_POLL_QUOTA 64
  1567. static void
  1568. bnad_napi_add(struct bnad *bnad, u32 rx_id)
  1569. {
  1570. struct bnad_rx_ctrl *rx_ctrl;
  1571. int i;
  1572. /* Initialize & enable NAPI */
  1573. for (i = 0; i < bnad->num_rxp_per_rx; i++) {
  1574. rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
  1575. netif_napi_add(bnad->netdev, &rx_ctrl->napi,
  1576. bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
  1577. }
  1578. }
  1579. static void
  1580. bnad_napi_delete(struct bnad *bnad, u32 rx_id)
  1581. {
  1582. int i;
  1583. /* First disable and then clean up */
  1584. for (i = 0; i < bnad->num_rxp_per_rx; i++)
  1585. netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
  1586. }
  1587. /* Should be held with conf_lock held */
  1588. void
  1589. bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
  1590. {
  1591. struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
  1592. struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
  1593. unsigned long flags;
  1594. if (!tx_info->tx)
  1595. return;
  1596. init_completion(&bnad->bnad_completions.tx_comp);
  1597. spin_lock_irqsave(&bnad->bna_lock, flags);
  1598. bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
  1599. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1600. wait_for_completion(&bnad->bnad_completions.tx_comp);
  1601. if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
  1602. bnad_tx_msix_unregister(bnad, tx_info,
  1603. bnad->num_txq_per_tx);
  1604. spin_lock_irqsave(&bnad->bna_lock, flags);
  1605. bna_tx_destroy(tx_info->tx);
  1606. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1607. tx_info->tx = NULL;
  1608. tx_info->tx_id = 0;
  1609. bnad_tx_res_free(bnad, res_info);
  1610. }
  1611. /* Should be held with conf_lock held */
  1612. int
  1613. bnad_setup_tx(struct bnad *bnad, u32 tx_id)
  1614. {
  1615. int err;
  1616. struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
  1617. struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
  1618. struct bna_intr_info *intr_info =
  1619. &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
  1620. struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
  1621. static const struct bna_tx_event_cbfn tx_cbfn = {
  1622. .tcb_setup_cbfn = bnad_cb_tcb_setup,
  1623. .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
  1624. .tx_stall_cbfn = bnad_cb_tx_stall,
  1625. .tx_resume_cbfn = bnad_cb_tx_resume,
  1626. .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
  1627. };
  1628. struct bna_tx *tx;
  1629. unsigned long flags;
  1630. tx_info->tx_id = tx_id;
  1631. /* Initialize the Tx object configuration */
  1632. tx_config->num_txq = bnad->num_txq_per_tx;
  1633. tx_config->txq_depth = bnad->txq_depth;
  1634. tx_config->tx_type = BNA_TX_T_REGULAR;
  1635. tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
  1636. /* Get BNA's resource requirement for one tx object */
  1637. spin_lock_irqsave(&bnad->bna_lock, flags);
  1638. bna_tx_res_req(bnad->num_txq_per_tx,
  1639. bnad->txq_depth, res_info);
  1640. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1641. /* Fill Unmap Q memory requirements */
  1642. BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
  1643. bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
  1644. bnad->txq_depth));
  1645. /* Allocate resources */
  1646. err = bnad_tx_res_alloc(bnad, res_info, tx_id);
  1647. if (err)
  1648. return err;
  1649. /* Ask BNA to create one Tx object, supplying required resources */
  1650. spin_lock_irqsave(&bnad->bna_lock, flags);
  1651. tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
  1652. tx_info);
  1653. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1654. if (!tx) {
  1655. err = -ENOMEM;
  1656. goto err_return;
  1657. }
  1658. tx_info->tx = tx;
  1659. INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
  1660. (work_func_t)bnad_tx_cleanup);
  1661. /* Register ISR for the Tx object */
  1662. if (intr_info->intr_type == BNA_INTR_T_MSIX) {
  1663. err = bnad_tx_msix_register(bnad, tx_info,
  1664. tx_id, bnad->num_txq_per_tx);
  1665. if (err)
  1666. goto cleanup_tx;
  1667. }
  1668. spin_lock_irqsave(&bnad->bna_lock, flags);
  1669. bna_tx_enable(tx);
  1670. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1671. return 0;
  1672. cleanup_tx:
  1673. spin_lock_irqsave(&bnad->bna_lock, flags);
  1674. bna_tx_destroy(tx_info->tx);
  1675. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1676. tx_info->tx = NULL;
  1677. tx_info->tx_id = 0;
  1678. err_return:
  1679. bnad_tx_res_free(bnad, res_info);
  1680. return err;
  1681. }
  1682. /* Setup the rx config for bna_rx_create */
  1683. /* bnad decides the configuration */
  1684. static void
  1685. bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
  1686. {
  1687. memset(rx_config, 0, sizeof(*rx_config));
  1688. rx_config->rx_type = BNA_RX_T_REGULAR;
  1689. rx_config->num_paths = bnad->num_rxp_per_rx;
  1690. rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
  1691. if (bnad->num_rxp_per_rx > 1) {
  1692. rx_config->rss_status = BNA_STATUS_T_ENABLED;
  1693. rx_config->rss_config.hash_type =
  1694. (BFI_ENET_RSS_IPV6 |
  1695. BFI_ENET_RSS_IPV6_TCP |
  1696. BFI_ENET_RSS_IPV4 |
  1697. BFI_ENET_RSS_IPV4_TCP);
  1698. rx_config->rss_config.hash_mask =
  1699. bnad->num_rxp_per_rx - 1;
  1700. netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
  1701. sizeof(rx_config->rss_config.toeplitz_hash_key));
  1702. } else {
  1703. rx_config->rss_status = BNA_STATUS_T_DISABLED;
  1704. memset(&rx_config->rss_config, 0,
  1705. sizeof(rx_config->rss_config));
  1706. }
  1707. rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
  1708. rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
  1709. /* BNA_RXP_SINGLE - one data-buffer queue
  1710. * BNA_RXP_SLR - one small-buffer and one large-buffer queues
  1711. * BNA_RXP_HDS - one header-buffer and one data-buffer queues
  1712. */
  1713. /* TODO: configurable param for queue type */
  1714. rx_config->rxp_type = BNA_RXP_SLR;
  1715. if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
  1716. rx_config->frame_size > 4096) {
  1717. /* though size_routing_enable is set in SLR,
  1718. * small packets may get routed to same rxq.
  1719. * set buf_size to 2048 instead of PAGE_SIZE.
  1720. */
  1721. rx_config->q0_buf_size = 2048;
  1722. /* this should be in multiples of 2 */
  1723. rx_config->q0_num_vecs = 4;
  1724. rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
  1725. rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
  1726. } else {
  1727. rx_config->q0_buf_size = rx_config->frame_size;
  1728. rx_config->q0_num_vecs = 1;
  1729. rx_config->q0_depth