PageRenderTime 63ms CodeModel.GetById 18ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/net/ethernet/brocade/bna/bnad.c

http://github.com/mirrors/linux
C | 3870 lines | 2878 code | 662 blank | 330 comment | 405 complexity | 931867b9dc29ed3d6af87762bdebce51 MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Linux network driver for QLogic BR-series Converged Network Adapter.
  4. */
  5. /*
  6. * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
  7. * Copyright (c) 2014-2015 QLogic Corporation
  8. * All rights reserved
  9. * www.qlogic.com
  10. */
  11. #include <linux/bitops.h>
  12. #include <linux/netdevice.h>
  13. #include <linux/skbuff.h>
  14. #include <linux/etherdevice.h>
  15. #include <linux/in.h>
  16. #include <linux/ethtool.h>
  17. #include <linux/if_vlan.h>
  18. #include <linux/if_ether.h>
  19. #include <linux/ip.h>
  20. #include <linux/prefetch.h>
  21. #include <linux/module.h>
  22. #include "bnad.h"
  23. #include "bna.h"
  24. #include "cna.h"
  25. static DEFINE_MUTEX(bnad_fwimg_mutex);
  26. /*
  27. * Module params
  28. */
  29. static uint bnad_msix_disable;
  30. module_param(bnad_msix_disable, uint, 0444);
  31. MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
  32. static uint bnad_ioc_auto_recover = 1;
  33. module_param(bnad_ioc_auto_recover, uint, 0444);
  34. MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
  35. static uint bna_debugfs_enable = 1;
  36. module_param(bna_debugfs_enable, uint, 0644);
  37. MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
  38. " Range[false:0|true:1]");
  39. /*
  40. * Global variables
  41. */
  42. static u32 bnad_rxqs_per_cq = 2;
  43. static atomic_t bna_id;
  44. static const u8 bnad_bcast_addr[] __aligned(2) =
  45. { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
  46. /*
  47. * Local MACROS
  48. */
  49. #define BNAD_GET_MBOX_IRQ(_bnad) \
  50. (((_bnad)->cfg_flags & BNAD_CF_MSIX) ? \
  51. ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
  52. ((_bnad)->pcidev->irq))
  53. #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size) \
  54. do { \
  55. (_res_info)->res_type = BNA_RES_T_MEM; \
  56. (_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA; \
  57. (_res_info)->res_u.mem_info.num = (_num); \
  58. (_res_info)->res_u.mem_info.len = (_size); \
  59. } while (0)
  60. /*
  61. * Reinitialize completions in CQ, once Rx is taken down
  62. */
  63. static void
  64. bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
  65. {
  66. struct bna_cq_entry *cmpl;
  67. int i;
  68. for (i = 0; i < ccb->q_depth; i++) {
  69. cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
  70. cmpl->valid = 0;
  71. }
  72. }
  73. /* Tx Datapath functions */
  74. /* Caller should ensure that the entry at unmap_q[index] is valid */
  75. static u32
  76. bnad_tx_buff_unmap(struct bnad *bnad,
  77. struct bnad_tx_unmap *unmap_q,
  78. u32 q_depth, u32 index)
  79. {
  80. struct bnad_tx_unmap *unmap;
  81. struct sk_buff *skb;
  82. int vector, nvecs;
  83. unmap = &unmap_q[index];
  84. nvecs = unmap->nvecs;
  85. skb = unmap->skb;
  86. unmap->skb = NULL;
  87. unmap->nvecs = 0;
  88. dma_unmap_single(&bnad->pcidev->dev,
  89. dma_unmap_addr(&unmap->vectors[0], dma_addr),
  90. skb_headlen(skb), DMA_TO_DEVICE);
  91. dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
  92. nvecs--;
  93. vector = 0;
  94. while (nvecs) {
  95. vector++;
  96. if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
  97. vector = 0;
  98. BNA_QE_INDX_INC(index, q_depth);
  99. unmap = &unmap_q[index];
  100. }
  101. dma_unmap_page(&bnad->pcidev->dev,
  102. dma_unmap_addr(&unmap->vectors[vector], dma_addr),
  103. dma_unmap_len(&unmap->vectors[vector], dma_len),
  104. DMA_TO_DEVICE);
  105. dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
  106. nvecs--;
  107. }
  108. BNA_QE_INDX_INC(index, q_depth);
  109. return index;
  110. }
  111. /*
  112. * Frees all pending Tx Bufs
  113. * At this point no activity is expected on the Q,
  114. * so DMA unmap & freeing is fine.
  115. */
  116. static void
  117. bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
  118. {
  119. struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
  120. struct sk_buff *skb;
  121. int i;
  122. for (i = 0; i < tcb->q_depth; i++) {
  123. skb = unmap_q[i].skb;
  124. if (!skb)
  125. continue;
  126. bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
  127. dev_kfree_skb_any(skb);
  128. }
  129. }
  130. /*
  131. * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
  132. * Can be called in a) Interrupt context
  133. * b) Sending context
  134. */
  135. static u32
  136. bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
  137. {
  138. u32 sent_packets = 0, sent_bytes = 0;
  139. u32 wis, unmap_wis, hw_cons, cons, q_depth;
  140. struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
  141. struct bnad_tx_unmap *unmap;
  142. struct sk_buff *skb;
  143. /* Just return if TX is stopped */
  144. if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
  145. return 0;
  146. hw_cons = *(tcb->hw_consumer_index);
  147. rmb();
  148. cons = tcb->consumer_index;
  149. q_depth = tcb->q_depth;
  150. wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
  151. BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
  152. while (wis) {
  153. unmap = &unmap_q[cons];
  154. skb = unmap->skb;
  155. sent_packets++;
  156. sent_bytes += skb->len;
  157. unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
  158. wis -= unmap_wis;
  159. cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
  160. dev_kfree_skb_any(skb);
  161. }
  162. /* Update consumer pointers. */
  163. tcb->consumer_index = hw_cons;
  164. tcb->txq->tx_packets += sent_packets;
  165. tcb->txq->tx_bytes += sent_bytes;
  166. return sent_packets;
  167. }
  168. static u32
  169. bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
  170. {
  171. struct net_device *netdev = bnad->netdev;
  172. u32 sent = 0;
  173. if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
  174. return 0;
  175. sent = bnad_txcmpl_process(bnad, tcb);
  176. if (sent) {
  177. if (netif_queue_stopped(netdev) &&
  178. netif_carrier_ok(netdev) &&
  179. BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
  180. BNAD_NETIF_WAKE_THRESHOLD) {
  181. if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
  182. netif_wake_queue(netdev);
  183. BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
  184. }
  185. }
  186. }
  187. if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
  188. bna_ib_ack(tcb->i_dbell, sent);
  189. smp_mb__before_atomic();
  190. clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
  191. return sent;
  192. }
  193. /* MSIX Tx Completion Handler */
  194. static irqreturn_t
  195. bnad_msix_tx(int irq, void *data)
  196. {
  197. struct bna_tcb *tcb = (struct bna_tcb *)data;
  198. struct bnad *bnad = tcb->bnad;
  199. bnad_tx_complete(bnad, tcb);
  200. return IRQ_HANDLED;
  201. }
  202. static inline void
  203. bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
  204. {
  205. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  206. unmap_q->reuse_pi = -1;
  207. unmap_q->alloc_order = -1;
  208. unmap_q->map_size = 0;
  209. unmap_q->type = BNAD_RXBUF_NONE;
  210. }
  211. /* Default is page-based allocation. Multi-buffer support - TBD */
  212. static int
  213. bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
  214. {
  215. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  216. int order;
  217. bnad_rxq_alloc_uninit(bnad, rcb);
  218. order = get_order(rcb->rxq->buffer_size);
  219. unmap_q->type = BNAD_RXBUF_PAGE;
  220. if (bna_is_small_rxq(rcb->id)) {
  221. unmap_q->alloc_order = 0;
  222. unmap_q->map_size = rcb->rxq->buffer_size;
  223. } else {
  224. if (rcb->rxq->multi_buffer) {
  225. unmap_q->alloc_order = 0;
  226. unmap_q->map_size = rcb->rxq->buffer_size;
  227. unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
  228. } else {
  229. unmap_q->alloc_order = order;
  230. unmap_q->map_size =
  231. (rcb->rxq->buffer_size > 2048) ?
  232. PAGE_SIZE << order : 2048;
  233. }
  234. }
  235. BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
  236. return 0;
  237. }
  238. static inline void
  239. bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
  240. {
  241. if (!unmap->page)
  242. return;
  243. dma_unmap_page(&bnad->pcidev->dev,
  244. dma_unmap_addr(&unmap->vector, dma_addr),
  245. unmap->vector.len, DMA_FROM_DEVICE);
  246. put_page(unmap->page);
  247. unmap->page = NULL;
  248. dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
  249. unmap->vector.len = 0;
  250. }
  251. static inline void
  252. bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
  253. {
  254. if (!unmap->skb)
  255. return;
  256. dma_unmap_single(&bnad->pcidev->dev,
  257. dma_unmap_addr(&unmap->vector, dma_addr),
  258. unmap->vector.len, DMA_FROM_DEVICE);
  259. dev_kfree_skb_any(unmap->skb);
  260. unmap->skb = NULL;
  261. dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
  262. unmap->vector.len = 0;
  263. }
  264. static void
  265. bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
  266. {
  267. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  268. int i;
  269. for (i = 0; i < rcb->q_depth; i++) {
  270. struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
  271. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
  272. bnad_rxq_cleanup_skb(bnad, unmap);
  273. else
  274. bnad_rxq_cleanup_page(bnad, unmap);
  275. }
  276. bnad_rxq_alloc_uninit(bnad, rcb);
  277. }
  278. static u32
  279. bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
  280. {
  281. u32 alloced, prod, q_depth;
  282. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  283. struct bnad_rx_unmap *unmap, *prev;
  284. struct bna_rxq_entry *rxent;
  285. struct page *page;
  286. u32 page_offset, alloc_size;
  287. dma_addr_t dma_addr;
  288. prod = rcb->producer_index;
  289. q_depth = rcb->q_depth;
  290. alloc_size = PAGE_SIZE << unmap_q->alloc_order;
  291. alloced = 0;
  292. while (nalloc--) {
  293. unmap = &unmap_q->unmap[prod];
  294. if (unmap_q->reuse_pi < 0) {
  295. page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
  296. unmap_q->alloc_order);
  297. page_offset = 0;
  298. } else {
  299. prev = &unmap_q->unmap[unmap_q->reuse_pi];
  300. page = prev->page;
  301. page_offset = prev->page_offset + unmap_q->map_size;
  302. get_page(page);
  303. }
  304. if (unlikely(!page)) {
  305. BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
  306. rcb->rxq->rxbuf_alloc_failed++;
  307. goto finishing;
  308. }
  309. dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
  310. unmap_q->map_size, DMA_FROM_DEVICE);
  311. if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
  312. put_page(page);
  313. BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
  314. rcb->rxq->rxbuf_map_failed++;
  315. goto finishing;
  316. }
  317. unmap->page = page;
  318. unmap->page_offset = page_offset;
  319. dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
  320. unmap->vector.len = unmap_q->map_size;
  321. page_offset += unmap_q->map_size;
  322. if (page_offset < alloc_size)
  323. unmap_q->reuse_pi = prod;
  324. else
  325. unmap_q->reuse_pi = -1;
  326. rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
  327. BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
  328. BNA_QE_INDX_INC(prod, q_depth);
  329. alloced++;
  330. }
  331. finishing:
  332. if (likely(alloced)) {
  333. rcb->producer_index = prod;
  334. smp_mb();
  335. if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
  336. bna_rxq_prod_indx_doorbell(rcb);
  337. }
  338. return alloced;
  339. }
  340. static u32
  341. bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
  342. {
  343. u32 alloced, prod, q_depth, buff_sz;
  344. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  345. struct bnad_rx_unmap *unmap;
  346. struct bna_rxq_entry *rxent;
  347. struct sk_buff *skb;
  348. dma_addr_t dma_addr;
  349. buff_sz = rcb->rxq->buffer_size;
  350. prod = rcb->producer_index;
  351. q_depth = rcb->q_depth;
  352. alloced = 0;
  353. while (nalloc--) {
  354. unmap = &unmap_q->unmap[prod];
  355. skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
  356. if (unlikely(!skb)) {
  357. BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
  358. rcb->rxq->rxbuf_alloc_failed++;
  359. goto finishing;
  360. }
  361. dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
  362. buff_sz, DMA_FROM_DEVICE);
  363. if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
  364. dev_kfree_skb_any(skb);
  365. BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
  366. rcb->rxq->rxbuf_map_failed++;
  367. goto finishing;
  368. }
  369. unmap->skb = skb;
  370. dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
  371. unmap->vector.len = buff_sz;
  372. rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
  373. BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
  374. BNA_QE_INDX_INC(prod, q_depth);
  375. alloced++;
  376. }
  377. finishing:
  378. if (likely(alloced)) {
  379. rcb->producer_index = prod;
  380. smp_mb();
  381. if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
  382. bna_rxq_prod_indx_doorbell(rcb);
  383. }
  384. return alloced;
  385. }
  386. static inline void
  387. bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
  388. {
  389. struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
  390. u32 to_alloc;
  391. to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
  392. if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
  393. return;
  394. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
  395. bnad_rxq_refill_skb(bnad, rcb, to_alloc);
  396. else
  397. bnad_rxq_refill_page(bnad, rcb, to_alloc);
  398. }
  399. #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
  400. BNA_CQ_EF_IPV6 | \
  401. BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
  402. BNA_CQ_EF_L4_CKSUM_OK)
  403. #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
  404. BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
  405. #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
  406. BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
  407. #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
  408. BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
  409. #define flags_udp6 (BNA_CQ_EF_IPV6 | \
  410. BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
  411. static void
  412. bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
  413. u32 sop_ci, u32 nvecs)
  414. {
  415. struct bnad_rx_unmap_q *unmap_q;
  416. struct bnad_rx_unmap *unmap;
  417. u32 ci, vec;
  418. unmap_q = rcb->unmap_q;
  419. for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
  420. unmap = &unmap_q->unmap[ci];
  421. BNA_QE_INDX_INC(ci, rcb->q_depth);
  422. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
  423. bnad_rxq_cleanup_skb(bnad, unmap);
  424. else
  425. bnad_rxq_cleanup_page(bnad, unmap);
  426. }
  427. }
  428. static void
  429. bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
  430. {
  431. struct bna_rcb *rcb;
  432. struct bnad *bnad;
  433. struct bnad_rx_unmap_q *unmap_q;
  434. struct bna_cq_entry *cq, *cmpl;
  435. u32 ci, pi, totlen = 0;
  436. cq = ccb->sw_q;
  437. pi = ccb->producer_index;
  438. cmpl = &cq[pi];
  439. rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
  440. unmap_q = rcb->unmap_q;
  441. bnad = rcb->bnad;
  442. ci = rcb->consumer_index;
  443. /* prefetch header */
  444. prefetch(page_address(unmap_q->unmap[ci].page) +
  445. unmap_q->unmap[ci].page_offset);
  446. while (nvecs--) {
  447. struct bnad_rx_unmap *unmap;
  448. u32 len;
  449. unmap = &unmap_q->unmap[ci];
  450. BNA_QE_INDX_INC(ci, rcb->q_depth);
  451. dma_unmap_page(&bnad->pcidev->dev,
  452. dma_unmap_addr(&unmap->vector, dma_addr),
  453. unmap->vector.len, DMA_FROM_DEVICE);
  454. len = ntohs(cmpl->length);
  455. skb->truesize += unmap->vector.len;
  456. totlen += len;
  457. skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  458. unmap->page, unmap->page_offset, len);
  459. unmap->page = NULL;
  460. unmap->vector.len = 0;
  461. BNA_QE_INDX_INC(pi, ccb->q_depth);
  462. cmpl = &cq[pi];
  463. }
  464. skb->len += totlen;
  465. skb->data_len += totlen;
  466. }
  467. static inline void
  468. bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
  469. struct bnad_rx_unmap *unmap, u32 len)
  470. {
  471. prefetch(skb->data);
  472. dma_unmap_single(&bnad->pcidev->dev,
  473. dma_unmap_addr(&unmap->vector, dma_addr),
  474. unmap->vector.len, DMA_FROM_DEVICE);
  475. skb_put(skb, len);
  476. skb->protocol = eth_type_trans(skb, bnad->netdev);
  477. unmap->skb = NULL;
  478. unmap->vector.len = 0;
  479. }
  480. static u32
  481. bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
  482. {
  483. struct bna_cq_entry *cq, *cmpl, *next_cmpl;
  484. struct bna_rcb *rcb = NULL;
  485. struct bnad_rx_unmap_q *unmap_q;
  486. struct bnad_rx_unmap *unmap = NULL;
  487. struct sk_buff *skb = NULL;
  488. struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
  489. struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
  490. u32 packets = 0, len = 0, totlen = 0;
  491. u32 pi, vec, sop_ci = 0, nvecs = 0;
  492. u32 flags, masked_flags;
  493. prefetch(bnad->netdev);
  494. cq = ccb->sw_q;
  495. while (packets < budget) {
  496. cmpl = &cq[ccb->producer_index];
  497. if (!cmpl->valid)
  498. break;
  499. /* The 'valid' field is set by the adapter, only after writing
  500. * the other fields of completion entry. Hence, do not load
  501. * other fields of completion entry *before* the 'valid' is
  502. * loaded. Adding the rmb() here prevents the compiler and/or
  503. * CPU from reordering the reads which would potentially result
  504. * in reading stale values in completion entry.
  505. */
  506. rmb();
  507. BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
  508. if (bna_is_small_rxq(cmpl->rxq_id))
  509. rcb = ccb->rcb[1];
  510. else
  511. rcb = ccb->rcb[0];
  512. unmap_q = rcb->unmap_q;
  513. /* start of packet ci */
  514. sop_ci = rcb->consumer_index;
  515. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
  516. unmap = &unmap_q->unmap[sop_ci];
  517. skb = unmap->skb;
  518. } else {
  519. skb = napi_get_frags(&rx_ctrl->napi);
  520. if (unlikely(!skb))
  521. break;
  522. }
  523. prefetch(skb);
  524. flags = ntohl(cmpl->flags);
  525. len = ntohs(cmpl->length);
  526. totlen = len;
  527. nvecs = 1;
  528. /* Check all the completions for this frame.
  529. * busy-wait doesn't help much, break here.
  530. */
  531. if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
  532. (flags & BNA_CQ_EF_EOP) == 0) {
  533. pi = ccb->producer_index;
  534. do {
  535. BNA_QE_INDX_INC(pi, ccb->q_depth);
  536. next_cmpl = &cq[pi];
  537. if (!next_cmpl->valid)
  538. break;
  539. /* The 'valid' field is set by the adapter, only
  540. * after writing the other fields of completion
  541. * entry. Hence, do not load other fields of
  542. * completion entry *before* the 'valid' is
  543. * loaded. Adding the rmb() here prevents the
  544. * compiler and/or CPU from reordering the reads
  545. * which would potentially result in reading
  546. * stale values in completion entry.
  547. */
  548. rmb();
  549. len = ntohs(next_cmpl->length);
  550. flags = ntohl(next_cmpl->flags);
  551. nvecs++;
  552. totlen += len;
  553. } while ((flags & BNA_CQ_EF_EOP) == 0);
  554. if (!next_cmpl->valid)
  555. break;
  556. }
  557. packets++;
  558. /* TODO: BNA_CQ_EF_LOCAL ? */
  559. if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
  560. BNA_CQ_EF_FCS_ERROR |
  561. BNA_CQ_EF_TOO_LONG))) {
  562. bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
  563. rcb->rxq->rx_packets_with_error++;
  564. goto next;
  565. }
  566. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
  567. bnad_cq_setup_skb(bnad, skb, unmap, len);
  568. else
  569. bnad_cq_setup_skb_frags(ccb, skb, nvecs);
  570. rcb->rxq->rx_packets++;
  571. rcb->rxq->rx_bytes += totlen;
  572. ccb->bytes_per_intr += totlen;
  573. masked_flags = flags & flags_cksum_prot_mask;
  574. if (likely
  575. ((bnad->netdev->features & NETIF_F_RXCSUM) &&
  576. ((masked_flags == flags_tcp4) ||
  577. (masked_flags == flags_udp4) ||
  578. (masked_flags == flags_tcp6) ||
  579. (masked_flags == flags_udp6))))
  580. skb->ip_summed = CHECKSUM_UNNECESSARY;
  581. else
  582. skb_checksum_none_assert(skb);
  583. if ((flags & BNA_CQ_EF_VLAN) &&
  584. (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
  585. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
  586. if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
  587. netif_receive_skb(skb);
  588. else
  589. napi_gro_frags(&rx_ctrl->napi);
  590. next:
  591. BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
  592. for (vec = 0; vec < nvecs; vec++) {
  593. cmpl = &cq[ccb->producer_index];
  594. cmpl->valid = 0;
  595. BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
  596. }
  597. }
  598. napi_gro_flush(&rx_ctrl->napi, false);
  599. if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
  600. bna_ib_ack_disable_irq(ccb->i_dbell, packets);
  601. bnad_rxq_post(bnad, ccb->rcb[0]);
  602. if (ccb->rcb[1])
  603. bnad_rxq_post(bnad, ccb->rcb[1]);
  604. return packets;
  605. }
  606. static void
  607. bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
  608. {
  609. struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
  610. struct napi_struct *napi = &rx_ctrl->napi;
  611. if (likely(napi_schedule_prep(napi))) {
  612. __napi_schedule(napi);
  613. rx_ctrl->rx_schedule++;
  614. }
  615. }
  616. /* MSIX Rx Path Handler */
  617. static irqreturn_t
  618. bnad_msix_rx(int irq, void *data)
  619. {
  620. struct bna_ccb *ccb = (struct bna_ccb *)data;
  621. if (ccb) {
  622. ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
  623. bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
  624. }
  625. return IRQ_HANDLED;
  626. }
  627. /* Interrupt handlers */
  628. /* Mbox Interrupt Handlers */
  629. static irqreturn_t
  630. bnad_msix_mbox_handler(int irq, void *data)
  631. {
  632. u32 intr_status;
  633. unsigned long flags;
  634. struct bnad *bnad = (struct bnad *)data;
  635. spin_lock_irqsave(&bnad->bna_lock, flags);
  636. if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
  637. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  638. return IRQ_HANDLED;
  639. }
  640. bna_intr_status_get(&bnad->bna, intr_status);
  641. if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
  642. bna_mbox_handler(&bnad->bna, intr_status);
  643. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  644. return IRQ_HANDLED;
  645. }
  646. static irqreturn_t
  647. bnad_isr(int irq, void *data)
  648. {
  649. int i, j;
  650. u32 intr_status;
  651. unsigned long flags;
  652. struct bnad *bnad = (struct bnad *)data;
  653. struct bnad_rx_info *rx_info;
  654. struct bnad_rx_ctrl *rx_ctrl;
  655. struct bna_tcb *tcb = NULL;
  656. spin_lock_irqsave(&bnad->bna_lock, flags);
  657. if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
  658. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  659. return IRQ_NONE;
  660. }
  661. bna_intr_status_get(&bnad->bna, intr_status);
  662. if (unlikely(!intr_status)) {
  663. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  664. return IRQ_NONE;
  665. }
  666. if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
  667. bna_mbox_handler(&bnad->bna, intr_status);
  668. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  669. if (!BNA_IS_INTX_DATA_INTR(intr_status))
  670. return IRQ_HANDLED;
  671. /* Process data interrupts */
  672. /* Tx processing */
  673. for (i = 0; i < bnad->num_tx; i++) {
  674. for (j = 0; j < bnad->num_txq_per_tx; j++) {
  675. tcb = bnad->tx_info[i].tcb[j];
  676. if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
  677. bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
  678. }
  679. }
  680. /* Rx processing */
  681. for (i = 0; i < bnad->num_rx; i++) {
  682. rx_info = &bnad->rx_info[i];
  683. if (!rx_info->rx)
  684. continue;
  685. for (j = 0; j < bnad->num_rxp_per_rx; j++) {
  686. rx_ctrl = &rx_info->rx_ctrl[j];
  687. if (rx_ctrl->ccb)
  688. bnad_netif_rx_schedule_poll(bnad,
  689. rx_ctrl->ccb);
  690. }
  691. }
  692. return IRQ_HANDLED;
  693. }
  694. /*
  695. * Called in interrupt / callback context
  696. * with bna_lock held, so cfg_flags access is OK
  697. */
  698. static void
  699. bnad_enable_mbox_irq(struct bnad *bnad)
  700. {
  701. clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
  702. BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
  703. }
  704. /*
  705. * Called with bnad->bna_lock held b'cos of
  706. * bnad->cfg_flags access.
  707. */
  708. static void
  709. bnad_disable_mbox_irq(struct bnad *bnad)
  710. {
  711. set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
  712. BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
  713. }
  714. static void
  715. bnad_set_netdev_perm_addr(struct bnad *bnad)
  716. {
  717. struct net_device *netdev = bnad->netdev;
  718. ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
  719. if (is_zero_ether_addr(netdev->dev_addr))
  720. ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
  721. }
  722. /* Control Path Handlers */
  723. /* Callbacks */
  724. void
  725. bnad_cb_mbox_intr_enable(struct bnad *bnad)
  726. {
  727. bnad_enable_mbox_irq(bnad);
  728. }
  729. void
  730. bnad_cb_mbox_intr_disable(struct bnad *bnad)
  731. {
  732. bnad_disable_mbox_irq(bnad);
  733. }
  734. void
  735. bnad_cb_ioceth_ready(struct bnad *bnad)
  736. {
  737. bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
  738. complete(&bnad->bnad_completions.ioc_comp);
  739. }
  740. void
  741. bnad_cb_ioceth_failed(struct bnad *bnad)
  742. {
  743. bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
  744. complete(&bnad->bnad_completions.ioc_comp);
  745. }
  746. void
  747. bnad_cb_ioceth_disabled(struct bnad *bnad)
  748. {
  749. bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
  750. complete(&bnad->bnad_completions.ioc_comp);
  751. }
  752. static void
  753. bnad_cb_enet_disabled(void *arg)
  754. {
  755. struct bnad *bnad = (struct bnad *)arg;
  756. netif_carrier_off(bnad->netdev);
  757. complete(&bnad->bnad_completions.enet_comp);
  758. }
  759. void
  760. bnad_cb_ethport_link_status(struct bnad *bnad,
  761. enum bna_link_status link_status)
  762. {
  763. bool link_up = false;
  764. link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
  765. if (link_status == BNA_CEE_UP) {
  766. if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
  767. BNAD_UPDATE_CTR(bnad, cee_toggle);
  768. set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
  769. } else {
  770. if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
  771. BNAD_UPDATE_CTR(bnad, cee_toggle);
  772. clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
  773. }
  774. if (link_up) {
  775. if (!netif_carrier_ok(bnad->netdev)) {
  776. uint tx_id, tcb_id;
  777. netdev_info(bnad->netdev, "link up\n");
  778. netif_carrier_on(bnad->netdev);
  779. BNAD_UPDATE_CTR(bnad, link_toggle);
  780. for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
  781. for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
  782. tcb_id++) {
  783. struct bna_tcb *tcb =
  784. bnad->tx_info[tx_id].tcb[tcb_id];
  785. u32 txq_id;
  786. if (!tcb)
  787. continue;
  788. txq_id = tcb->id;
  789. if (test_bit(BNAD_TXQ_TX_STARTED,
  790. &tcb->flags)) {
  791. /*
  792. * Force an immediate
  793. * Transmit Schedule */
  794. netif_wake_subqueue(
  795. bnad->netdev,
  796. txq_id);
  797. BNAD_UPDATE_CTR(bnad,
  798. netif_queue_wakeup);
  799. } else {
  800. netif_stop_subqueue(
  801. bnad->netdev,
  802. txq_id);
  803. BNAD_UPDATE_CTR(bnad,
  804. netif_queue_stop);
  805. }
  806. }
  807. }
  808. }
  809. } else {
  810. if (netif_carrier_ok(bnad->netdev)) {
  811. netdev_info(bnad->netdev, "link down\n");
  812. netif_carrier_off(bnad->netdev);
  813. BNAD_UPDATE_CTR(bnad, link_toggle);
  814. }
  815. }
  816. }
  817. static void
  818. bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
  819. {
  820. struct bnad *bnad = (struct bnad *)arg;
  821. complete(&bnad->bnad_completions.tx_comp);
  822. }
  823. static void
  824. bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
  825. {
  826. struct bnad_tx_info *tx_info =
  827. (struct bnad_tx_info *)tcb->txq->tx->priv;
  828. tcb->priv = tcb;
  829. tx_info->tcb[tcb->id] = tcb;
  830. }
  831. static void
  832. bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
  833. {
  834. struct bnad_tx_info *tx_info =
  835. (struct bnad_tx_info *)tcb->txq->tx->priv;
  836. tx_info->tcb[tcb->id] = NULL;
  837. tcb->priv = NULL;
  838. }
  839. static void
  840. bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
  841. {
  842. struct bnad_rx_info *rx_info =
  843. (struct bnad_rx_info *)ccb->cq->rx->priv;
  844. rx_info->rx_ctrl[ccb->id].ccb = ccb;
  845. ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
  846. }
  847. static void
  848. bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
  849. {
  850. struct bnad_rx_info *rx_info =
  851. (struct bnad_rx_info *)ccb->cq->rx->priv;
  852. rx_info->rx_ctrl[ccb->id].ccb = NULL;
  853. }
  854. static void
  855. bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
  856. {
  857. struct bnad_tx_info *tx_info =
  858. (struct bnad_tx_info *)tx->priv;
  859. struct bna_tcb *tcb;
  860. u32 txq_id;
  861. int i;
  862. for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
  863. tcb = tx_info->tcb[i];
  864. if (!tcb)
  865. continue;
  866. txq_id = tcb->id;
  867. clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
  868. netif_stop_subqueue(bnad->netdev, txq_id);
  869. }
  870. }
  871. static void
  872. bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
  873. {
  874. struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
  875. struct bna_tcb *tcb;
  876. u32 txq_id;
  877. int i;
  878. for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
  879. tcb = tx_info->tcb[i];
  880. if (!tcb)
  881. continue;
  882. txq_id = tcb->id;
  883. BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
  884. set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
  885. BUG_ON(*(tcb->hw_consumer_index) != 0);
  886. if (netif_carrier_ok(bnad->netdev)) {
  887. netif_wake_subqueue(bnad->netdev, txq_id);
  888. BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
  889. }
  890. }
  891. /*
  892. * Workaround for first ioceth enable failure & we
  893. * get a 0 MAC address. We try to get the MAC address
  894. * again here.
  895. */
  896. if (is_zero_ether_addr(bnad->perm_addr)) {
  897. bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
  898. bnad_set_netdev_perm_addr(bnad);
  899. }
  900. }
  901. /*
  902. * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
  903. */
  904. static void
  905. bnad_tx_cleanup(struct delayed_work *work)
  906. {
  907. struct bnad_tx_info *tx_info =
  908. container_of(work, struct bnad_tx_info, tx_cleanup_work);
  909. struct bnad *bnad = NULL;
  910. struct bna_tcb *tcb;
  911. unsigned long flags;
  912. u32 i, pending = 0;
  913. for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
  914. tcb = tx_info->tcb[i];
  915. if (!tcb)
  916. continue;
  917. bnad = tcb->bnad;
  918. if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
  919. pending++;
  920. continue;
  921. }
  922. bnad_txq_cleanup(bnad, tcb);
  923. smp_mb__before_atomic();
  924. clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
  925. }
  926. if (pending) {
  927. queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
  928. msecs_to_jiffies(1));
  929. return;
  930. }
  931. spin_lock_irqsave(&bnad->bna_lock, flags);
  932. bna_tx_cleanup_complete(tx_info->tx);
  933. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  934. }
  935. static void
  936. bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
  937. {
  938. struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
  939. struct bna_tcb *tcb;
  940. int i;
  941. for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
  942. tcb = tx_info->tcb[i];
  943. if (!tcb)
  944. continue;
  945. }
  946. queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
  947. }
  948. static void
  949. bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
  950. {
  951. struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
  952. struct bna_ccb *ccb;
  953. struct bnad_rx_ctrl *rx_ctrl;
  954. int i;
  955. for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
  956. rx_ctrl = &rx_info->rx_ctrl[i];
  957. ccb = rx_ctrl->ccb;
  958. if (!ccb)
  959. continue;
  960. clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
  961. if (ccb->rcb[1])
  962. clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
  963. }
  964. }
  965. /*
  966. * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
  967. */
  968. static void
  969. bnad_rx_cleanup(void *work)
  970. {
  971. struct bnad_rx_info *rx_info =
  972. container_of(work, struct bnad_rx_info, rx_cleanup_work);
  973. struct bnad_rx_ctrl *rx_ctrl;
  974. struct bnad *bnad = NULL;
  975. unsigned long flags;
  976. u32 i;
  977. for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
  978. rx_ctrl = &rx_info->rx_ctrl[i];
  979. if (!rx_ctrl->ccb)
  980. continue;
  981. bnad = rx_ctrl->ccb->bnad;
  982. /*
  983. * Wait till the poll handler has exited
  984. * and nothing can be scheduled anymore
  985. */
  986. napi_disable(&rx_ctrl->napi);
  987. bnad_cq_cleanup(bnad, rx_ctrl->ccb);
  988. bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
  989. if (rx_ctrl->ccb->rcb[1])
  990. bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
  991. }
  992. spin_lock_irqsave(&bnad->bna_lock, flags);
  993. bna_rx_cleanup_complete(rx_info->rx);
  994. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  995. }
  996. static void
  997. bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
  998. {
  999. struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
  1000. struct bna_ccb *ccb;
  1001. struct bnad_rx_ctrl *rx_ctrl;
  1002. int i;
  1003. for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
  1004. rx_ctrl = &rx_info->rx_ctrl[i];
  1005. ccb = rx_ctrl->ccb;
  1006. if (!ccb)
  1007. continue;
  1008. clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
  1009. if (ccb->rcb[1])
  1010. clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
  1011. }
  1012. queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
  1013. }
  1014. static void
  1015. bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
  1016. {
  1017. struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
  1018. struct bna_ccb *ccb;
  1019. struct bna_rcb *rcb;
  1020. struct bnad_rx_ctrl *rx_ctrl;
  1021. int i, j;
  1022. for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
  1023. rx_ctrl = &rx_info->rx_ctrl[i];
  1024. ccb = rx_ctrl->ccb;
  1025. if (!ccb)
  1026. continue;
  1027. napi_enable(&rx_ctrl->napi);
  1028. for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
  1029. rcb = ccb->rcb[j];
  1030. if (!rcb)
  1031. continue;
  1032. bnad_rxq_alloc_init(bnad, rcb);
  1033. set_bit(BNAD_RXQ_STARTED, &rcb->flags);
  1034. set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
  1035. bnad_rxq_post(bnad, rcb);
  1036. }
  1037. }
  1038. }
  1039. static void
  1040. bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
  1041. {
  1042. struct bnad *bnad = (struct bnad *)arg;
  1043. complete(&bnad->bnad_completions.rx_comp);
  1044. }
  1045. static void
  1046. bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
  1047. {
  1048. bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
  1049. complete(&bnad->bnad_completions.mcast_comp);
  1050. }
  1051. void
  1052. bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
  1053. struct bna_stats *stats)
  1054. {
  1055. if (status == BNA_CB_SUCCESS)
  1056. BNAD_UPDATE_CTR(bnad, hw_stats_updates);
  1057. if (!netif_running(bnad->netdev) ||
  1058. !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
  1059. return;
  1060. mod_timer(&bnad->stats_timer,
  1061. jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
  1062. }
  1063. static void
  1064. bnad_cb_enet_mtu_set(struct bnad *bnad)
  1065. {
  1066. bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
  1067. complete(&bnad->bnad_completions.mtu_comp);
  1068. }
  1069. void
  1070. bnad_cb_completion(void *arg, enum bfa_status status)
  1071. {
  1072. struct bnad_iocmd_comp *iocmd_comp =
  1073. (struct bnad_iocmd_comp *)arg;
  1074. iocmd_comp->comp_status = (u32) status;
  1075. complete(&iocmd_comp->comp);
  1076. }
  1077. /* Resource allocation, free functions */
  1078. static void
  1079. bnad_mem_free(struct bnad *bnad,
  1080. struct bna_mem_info *mem_info)
  1081. {
  1082. int i;
  1083. dma_addr_t dma_pa;
  1084. if (mem_info->mdl == NULL)
  1085. return;
  1086. for (i = 0; i < mem_info->num; i++) {
  1087. if (mem_info->mdl[i].kva != NULL) {
  1088. if (mem_info->mem_type == BNA_MEM_T_DMA) {
  1089. BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
  1090. dma_pa);
  1091. dma_free_coherent(&bnad->pcidev->dev,
  1092. mem_info->mdl[i].len,
  1093. mem_info->mdl[i].kva, dma_pa);
  1094. } else
  1095. kfree(mem_info->mdl[i].kva);
  1096. }
  1097. }
  1098. kfree(mem_info->mdl);
  1099. mem_info->mdl = NULL;
  1100. }
  1101. static int
  1102. bnad_mem_alloc(struct bnad *bnad,
  1103. struct bna_mem_info *mem_info)
  1104. {
  1105. int i;
  1106. dma_addr_t dma_pa;
  1107. if ((mem_info->num == 0) || (mem_info->len == 0)) {
  1108. mem_info->mdl = NULL;
  1109. return 0;
  1110. }
  1111. mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
  1112. GFP_KERNEL);
  1113. if (mem_info->mdl == NULL)
  1114. return -ENOMEM;
  1115. if (mem_info->mem_type == BNA_MEM_T_DMA) {
  1116. for (i = 0; i < mem_info->num; i++) {
  1117. mem_info->mdl[i].len = mem_info->len;
  1118. mem_info->mdl[i].kva =
  1119. dma_alloc_coherent(&bnad->pcidev->dev,
  1120. mem_info->len, &dma_pa,
  1121. GFP_KERNEL);
  1122. if (mem_info->mdl[i].kva == NULL)
  1123. goto err_return;
  1124. BNA_SET_DMA_ADDR(dma_pa,
  1125. &(mem_info->mdl[i].dma));
  1126. }
  1127. } else {
  1128. for (i = 0; i < mem_info->num; i++) {
  1129. mem_info->mdl[i].len = mem_info->len;
  1130. mem_info->mdl[i].kva = kzalloc(mem_info->len,
  1131. GFP_KERNEL);
  1132. if (mem_info->mdl[i].kva == NULL)
  1133. goto err_return;
  1134. }
  1135. }
  1136. return 0;
  1137. err_return:
  1138. bnad_mem_free(bnad, mem_info);
  1139. return -ENOMEM;
  1140. }
  1141. /* Free IRQ for Mailbox */
  1142. static void
  1143. bnad_mbox_irq_free(struct bnad *bnad)
  1144. {
  1145. int irq;
  1146. unsigned long flags;
  1147. spin_lock_irqsave(&bnad->bna_lock, flags);
  1148. bnad_disable_mbox_irq(bnad);
  1149. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1150. irq = BNAD_GET_MBOX_IRQ(bnad);
  1151. free_irq(irq, bnad);
  1152. }
  1153. /*
  1154. * Allocates IRQ for Mailbox, but keep it disabled
  1155. * This will be enabled once we get the mbox enable callback
  1156. * from bna
  1157. */
  1158. static int
  1159. bnad_mbox_irq_alloc(struct bnad *bnad)
  1160. {
  1161. int err = 0;
  1162. unsigned long irq_flags, flags;
  1163. u32 irq;
  1164. irq_handler_t irq_handler;
  1165. spin_lock_irqsave(&bnad->bna_lock, flags);
  1166. if (bnad->cfg_flags & BNAD_CF_MSIX) {
  1167. irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
  1168. irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
  1169. irq_flags = 0;
  1170. } else {
  1171. irq_handler = (irq_handler_t)bnad_isr;
  1172. irq = bnad->pcidev->irq;
  1173. irq_flags = IRQF_SHARED;
  1174. }
  1175. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1176. sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
  1177. /*
  1178. * Set the Mbox IRQ disable flag, so that the IRQ handler
  1179. * called from request_irq() for SHARED IRQs do not execute
  1180. */
  1181. set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
  1182. BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
  1183. err = request_irq(irq, irq_handler, irq_flags,
  1184. bnad->mbox_irq_name, bnad);
  1185. return err;
  1186. }
  1187. static void
  1188. bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
  1189. {
  1190. kfree(intr_info->idl);
  1191. intr_info->idl = NULL;
  1192. }
  1193. /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
  1194. static int
  1195. bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
  1196. u32 txrx_id, struct bna_intr_info *intr_info)
  1197. {
  1198. int i, vector_start = 0;
  1199. u32 cfg_flags;
  1200. unsigned long flags;
  1201. spin_lock_irqsave(&bnad->bna_lock, flags);
  1202. cfg_flags = bnad->cfg_flags;
  1203. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1204. if (cfg_flags & BNAD_CF_MSIX) {
  1205. intr_info->intr_type = BNA_INTR_T_MSIX;
  1206. intr_info->idl = kcalloc(intr_info->num,
  1207. sizeof(struct bna_intr_descr),
  1208. GFP_KERNEL);
  1209. if (!intr_info->idl)
  1210. return -ENOMEM;
  1211. switch (src) {
  1212. case BNAD_INTR_TX:
  1213. vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
  1214. break;
  1215. case BNAD_INTR_RX:
  1216. vector_start = BNAD_MAILBOX_MSIX_VECTORS +
  1217. (bnad->num_tx * bnad->num_txq_per_tx) +
  1218. txrx_id;
  1219. break;
  1220. default:
  1221. BUG();
  1222. }
  1223. for (i = 0; i < intr_info->num; i++)
  1224. intr_info->idl[i].vector = vector_start + i;
  1225. } else {
  1226. intr_info->intr_type = BNA_INTR_T_INTX;
  1227. intr_info->num = 1;
  1228. intr_info->idl = kcalloc(intr_info->num,
  1229. sizeof(struct bna_intr_descr),
  1230. GFP_KERNEL);
  1231. if (!intr_info->idl)
  1232. return -ENOMEM;
  1233. switch (src) {
  1234. case BNAD_INTR_TX:
  1235. intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
  1236. break;
  1237. case BNAD_INTR_RX:
  1238. intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
  1239. break;
  1240. }
  1241. }
  1242. return 0;
  1243. }
  1244. /* NOTE: Should be called for MSIX only
  1245. * Unregisters Tx MSIX vector(s) from the kernel
  1246. */
  1247. static void
  1248. bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
  1249. int num_txqs)
  1250. {
  1251. int i;
  1252. int vector_num;
  1253. for (i = 0; i < num_txqs; i++) {
  1254. if (tx_info->tcb[i] == NULL)
  1255. continue;
  1256. vector_num = tx_info->tcb[i]->intr_vector;
  1257. free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
  1258. }
  1259. }
  1260. /* NOTE: Should be called for MSIX only
  1261. * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
  1262. */
  1263. static int
  1264. bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
  1265. u32 tx_id, int num_txqs)
  1266. {
  1267. int i;
  1268. int err;
  1269. int vector_num;
  1270. for (i = 0; i < num_txqs; i++) {
  1271. vector_num = tx_info->tcb[i]->intr_vector;
  1272. sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
  1273. tx_id + tx_info->tcb[i]->id);
  1274. err = request_irq(bnad->msix_table[vector_num].vector,
  1275. (irq_handler_t)bnad_msix_tx, 0,
  1276. tx_info->tcb[i]->name,
  1277. tx_info->tcb[i]);
  1278. if (err)
  1279. goto err_return;
  1280. }
  1281. return 0;
  1282. err_return:
  1283. if (i > 0)
  1284. bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
  1285. return -1;
  1286. }
  1287. /* NOTE: Should be called for MSIX only
  1288. * Unregisters Rx MSIX vector(s) from the kernel
  1289. */
  1290. static void
  1291. bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
  1292. int num_rxps)
  1293. {
  1294. int i;
  1295. int vector_num;
  1296. for (i = 0; i < num_rxps; i++) {
  1297. if (rx_info->rx_ctrl[i].ccb == NULL)
  1298. continue;
  1299. vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
  1300. free_irq(bnad->msix_table[vector_num].vector,
  1301. rx_info->rx_ctrl[i].ccb);
  1302. }
  1303. }
  1304. /* NOTE: Should be called for MSIX only
  1305. * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
  1306. */
  1307. static int
  1308. bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
  1309. u32 rx_id, int num_rxps)
  1310. {
  1311. int i;
  1312. int err;
  1313. int vector_num;
  1314. for (i = 0; i < num_rxps; i++) {
  1315. vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
  1316. sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
  1317. bnad->netdev->name,
  1318. rx_id + rx_info->rx_ctrl[i].ccb->id);
  1319. err = request_irq(bnad->msix_table[vector_num].vector,
  1320. (irq_handler_t)bnad_msix_rx, 0,
  1321. rx_info->rx_ctrl[i].ccb->name,
  1322. rx_info->rx_ctrl[i].ccb);
  1323. if (err)
  1324. goto err_return;
  1325. }
  1326. return 0;
  1327. err_return:
  1328. if (i > 0)
  1329. bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
  1330. return -1;
  1331. }
  1332. /* Free Tx object Resources */
  1333. static void
  1334. bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
  1335. {
  1336. int i;
  1337. for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
  1338. if (res_info[i].res_type == BNA_RES_T_MEM)
  1339. bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
  1340. else if (res_info[i].res_type == BNA_RES_T_INTR)
  1341. bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
  1342. }
  1343. }
  1344. /* Allocates memory and interrupt resources for Tx object */
  1345. static int
  1346. bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
  1347. u32 tx_id)
  1348. {
  1349. int i, err = 0;
  1350. for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
  1351. if (res_info[i].res_type == BNA_RES_T_MEM)
  1352. err = bnad_mem_alloc(bnad,
  1353. &res_info[i].res_u.mem_info);
  1354. else if (res_info[i].res_type == BNA_RES_T_INTR)
  1355. err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
  1356. &res_info[i].res_u.intr_info);
  1357. if (err)
  1358. goto err_return;
  1359. }
  1360. return 0;
  1361. err_return:
  1362. bnad_tx_res_free(bnad, res_info);
  1363. return err;
  1364. }
  1365. /* Free Rx object Resources */
  1366. static void
  1367. bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
  1368. {
  1369. int i;
  1370. for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
  1371. if (res_info[i].res_type == BNA_RES_T_MEM)
  1372. bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
  1373. else if (res_info[i].res_type == BNA_RES_T_INTR)
  1374. bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
  1375. }
  1376. }
  1377. /* Allocates memory and interrupt resources for Rx object */
  1378. static int
  1379. bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
  1380. uint rx_id)
  1381. {
  1382. int i, err = 0;
  1383. /* All memory needs to be allocated before setup_ccbs */
  1384. for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
  1385. if (res_info[i].res_type == BNA_RES_T_MEM)
  1386. err = bnad_mem_alloc(bnad,
  1387. &res_info[i].res_u.mem_info);
  1388. else if (res_info[i].res_type == BNA_RES_T_INTR)
  1389. err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
  1390. &res_info[i].res_u.intr_info);
  1391. if (err)
  1392. goto err_return;
  1393. }
  1394. return 0;
  1395. err_return:
  1396. bnad_rx_res_free(bnad, res_info);
  1397. return err;
  1398. }
  1399. /* Timer callbacks */
  1400. /* a) IOC timer */
  1401. static void
  1402. bnad_ioc_timeout(struct timer_list *t)
  1403. {
  1404. struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
  1405. unsigned long flags;
  1406. spin_lock_irqsave(&bnad->bna_lock, flags);
  1407. bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
  1408. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1409. }
  1410. static void
  1411. bnad_ioc_hb_check(struct timer_list *t)
  1412. {
  1413. struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
  1414. unsigned long flags;
  1415. spin_lock_irqsave(&bnad->bna_lock, flags);
  1416. bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
  1417. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1418. }
  1419. static void
  1420. bnad_iocpf_timeout(struct timer_list *t)
  1421. {
  1422. struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
  1423. unsigned long flags;
  1424. spin_lock_irqsave(&bnad->bna_lock, flags);
  1425. bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
  1426. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1427. }
  1428. static void
  1429. bnad_iocpf_sem_timeout(struct timer_list *t)
  1430. {
  1431. struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
  1432. unsigned long flags;
  1433. spin_lock_irqsave(&bnad->bna_lock, flags);
  1434. bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
  1435. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1436. }
  1437. /*
  1438. * All timer routines use bnad->bna_lock to protect against
  1439. * the following race, which may occur in case of no locking:
  1440. * Time CPU m CPU n
  1441. * 0 1 = test_bit
  1442. * 1 clear_bit
  1443. * 2 del_timer_sync
  1444. * 3 mod_timer
  1445. */
  1446. /* b) Dynamic Interrupt Moderation Timer */
  1447. static void
  1448. bnad_dim_timeout(struct timer_list *t)
  1449. {
  1450. struct bnad *bnad = from_timer(bnad, t, dim_timer);
  1451. struct bnad_rx_info *rx_info;
  1452. struct bnad_rx_ctrl *rx_ctrl;
  1453. int i, j;
  1454. unsigned long flags;
  1455. if (!netif_carrier_ok(bnad->netdev))
  1456. return;
  1457. spin_lock_irqsave(&bnad->bna_lock, flags);
  1458. for (i = 0; i < bnad->num_rx; i++) {
  1459. rx_info = &bnad->rx_info[i];
  1460. if (!rx_info->rx)
  1461. continue;
  1462. for (j = 0; j < bnad->num_rxp_per_rx; j++) {
  1463. rx_ctrl = &rx_info->rx_ctrl[j];
  1464. if (!rx_ctrl->ccb)
  1465. continue;
  1466. bna_rx_dim_update(rx_ctrl->ccb);
  1467. }
  1468. }
  1469. /* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
  1470. if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
  1471. mod_timer(&bnad->dim_timer,
  1472. jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
  1473. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1474. }
  1475. /* c) Statistics Timer */
  1476. static void
  1477. bnad_stats_timeout(struct timer_list *t)
  1478. {
  1479. struct bnad *bnad = from_timer(bnad, t, stats_timer);
  1480. unsigned long flags;
  1481. if (!netif_running(bnad->netdev) ||
  1482. !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
  1483. return;
  1484. spin_lock_irqsave(&bnad->bna_lock, flags);
  1485. bna_hw_stats_get(&bnad->bna);
  1486. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1487. }
  1488. /*
  1489. * Set up timer for DIM
  1490. * Called with bnad->bna_lock held
  1491. */
  1492. void
  1493. bnad_dim_timer_start(struct bnad *bnad)
  1494. {
  1495. if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
  1496. !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
  1497. timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
  1498. set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
  1499. mod_timer(&bnad->dim_timer,
  1500. jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
  1501. }
  1502. }
  1503. /*
  1504. * Set up timer for statistics
  1505. * Called with mutex_lock(&bnad->conf_mutex) held
  1506. */
  1507. static void
  1508. bnad_stats_timer_start(struct bnad *bnad)
  1509. {
  1510. unsigned long flags;
  1511. spin_lock_irqsave(&bnad->bna_lock, flags);
  1512. if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
  1513. timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
  1514. mod_timer(&bnad->stats_timer,
  1515. jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
  1516. }
  1517. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1518. }
  1519. /*
  1520. * Stops the stats timer
  1521. * Called with mutex_lock(&bnad->conf_mutex) held
  1522. */
  1523. static void
  1524. bnad_stats_timer_stop(struct bnad *bnad)
  1525. {
  1526. int to_del = 0;
  1527. unsigned long flags;
  1528. spin_lock_irqsave(&bnad->bna_lock, flags);
  1529. if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
  1530. to_del = 1;
  1531. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1532. if (to_del)
  1533. del_timer_sync(&bnad->stats_timer);
  1534. }
  1535. /* Utilities */
  1536. static void
  1537. bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
  1538. {
  1539. int i = 1; /* Index 0 has broadcast address */
  1540. struct netdev_hw_addr *mc_addr;
  1541. netdev_for_each_mc_addr(mc_addr, netdev) {
  1542. ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
  1543. i++;
  1544. }
  1545. }
  1546. static int
  1547. bnad_napi_poll_rx(struct napi_struct *napi, int budget)
  1548. {
  1549. struct bnad_rx_ctrl *rx_ctrl =
  1550. container_of(napi, struct bnad_rx_ctrl, napi);
  1551. struct bnad *bnad = rx_ctrl->bnad;
  1552. int rcvd = 0;
  1553. rx_ctrl->rx_poll_ctr++;
  1554. if (!netif_carrier_ok(bnad->netdev))
  1555. goto poll_exit;
  1556. rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
  1557. if (rcvd >= budget)
  1558. return rcvd;
  1559. poll_exit:
  1560. napi_complete_done(napi, rcvd);
  1561. rx_ctrl->rx_complete++;
  1562. if (rx_ctrl->ccb)
  1563. bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
  1564. return rcvd;
  1565. }
  1566. #define BNAD_NAPI_POLL_QUOTA 64
  1567. static void
  1568. bnad_napi_add(struct bnad *bnad, u32 rx_id)
  1569. {
  1570. struct bnad_rx_ctrl *rx_ctrl;
  1571. int i;
  1572. /* Initialize & enable NAPI */
  1573. for (i = 0; i < bnad->num_rxp_per_rx; i++) {
  1574. rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
  1575. netif_napi_add(bnad->netdev, &rx_ctrl->napi,
  1576. bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
  1577. }
  1578. }
  1579. static void
  1580. bnad_napi_delete(struct bnad *bnad, u32 rx_id)
  1581. {
  1582. int i;
  1583. /* First disable and then clean up */
  1584. for (i = 0; i < bnad->num_rxp_per_rx; i++)
  1585. netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
  1586. }
  1587. /* Should be held with conf_lock held */
  1588. void
  1589. bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
  1590. {
  1591. struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
  1592. struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
  1593. unsigned long flags;
  1594. if (!tx_info->tx)
  1595. return;
  1596. init_completion(&bnad->bnad_completions.tx_comp);
  1597. spin_lock_irqsave(&bnad->bna_lock, flags);
  1598. bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
  1599. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1600. wait_for_completion(&bnad->bnad_completions.tx_comp);
  1601. if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
  1602. bnad_tx_msix_unregister(bnad, tx_info,
  1603. bnad->num_txq_per_tx);
  1604. spin_lock_irqsave(&bnad->bna_lock, flags);
  1605. bna_tx_destroy(tx_info->tx);
  1606. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1607. tx_info->tx = NULL;
  1608. tx_info->tx_id = 0;
  1609. bnad_tx_res_free(bnad, res_info);
  1610. }
  1611. /* Should be held with conf_lock held */
  1612. int
  1613. bnad_setup_tx(struct bnad *bnad, u32 tx_id)
  1614. {
  1615. int err;
  1616. struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
  1617. struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
  1618. struct bna_intr_info *intr_info =
  1619. &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
  1620. struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
  1621. static const struct bna_tx_event_cbfn tx_cbfn = {
  1622. .tcb_setup_cbfn = bnad_cb_tcb_setup,
  1623. .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
  1624. .tx_stall_cbfn = bnad_cb_tx_stall,
  1625. .tx_resume_cbfn = bnad_cb_tx_resume,
  1626. .tx_cleanup_cbfn = bnad_cb_tx_cleanup,
  1627. };
  1628. struct bna_tx *tx;
  1629. unsigned long flags;
  1630. tx_info->tx_id = tx_id;
  1631. /* Initialize the Tx object configuration */
  1632. tx_config->num_txq = bnad->num_txq_per_tx;
  1633. tx_config->txq_depth = bnad->txq_depth;
  1634. tx_config->tx_type = BNA_TX_T_REGULAR;
  1635. tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
  1636. /* Get BNA's resource requirement for one tx object */
  1637. spin_lock_irqsave(&bnad->bna_lock, flags);
  1638. bna_tx_res_req(bnad->num_txq_per_tx,
  1639. bnad->txq_depth, res_info);
  1640. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1641. /* Fill Unmap Q memory requirements */
  1642. BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
  1643. bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
  1644. bnad->txq_depth));
  1645. /* Allocate resources */
  1646. err = bnad_tx_res_alloc(bnad, res_info, tx_id);
  1647. if (err)
  1648. return err;
  1649. /* Ask BNA to create one Tx object, supplying required resources */
  1650. spin_lock_irqsave(&bnad->bna_lock, flags);
  1651. tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
  1652. tx_info);
  1653. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1654. if (!tx) {
  1655. err = -ENOMEM;
  1656. goto err_return;
  1657. }
  1658. tx_info->tx = tx;
  1659. INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
  1660. (work_func_t)bnad_tx_cleanup);
  1661. /* Register ISR for the Tx object */
  1662. if (intr_info->intr_type == BNA_INTR_T_MSIX) {
  1663. err = bnad_tx_msix_register(bnad, tx_info,
  1664. tx_id, bnad->num_txq_per_tx);
  1665. if (err)
  1666. goto cleanup_tx;
  1667. }
  1668. spin_lock_irqsave(&bnad->bna_lock, flags);
  1669. bna_tx_enable(tx);
  1670. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1671. return 0;
  1672. cleanup_tx:
  1673. spin_lock_irqsave(&bnad->bna_lock, flags);
  1674. bna_tx_destroy(tx_info->tx);
  1675. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1676. tx_info->tx = NULL;
  1677. tx_info->tx_id = 0;
  1678. err_return:
  1679. bnad_tx_res_free(bnad, res_info);
  1680. return err;
  1681. }
  1682. /* Setup the rx config for bna_rx_create */
  1683. /* bnad decides the configuration */
  1684. static void
  1685. bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
  1686. {
  1687. memset(rx_config, 0, sizeof(*rx_config));
  1688. rx_config->rx_type = BNA_RX_T_REGULAR;
  1689. rx_config->num_paths = bnad->num_rxp_per_rx;
  1690. rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
  1691. if (bnad->num_rxp_per_rx > 1) {
  1692. rx_config->rss_status = BNA_STATUS_T_ENABLED;
  1693. rx_config->rss_config.hash_type =
  1694. (BFI_ENET_RSS_IPV6 |
  1695. BFI_ENET_RSS_IPV6_TCP |
  1696. BFI_ENET_RSS_IPV4 |
  1697. BFI_ENET_RSS_IPV4_TCP);
  1698. rx_config->rss_config.hash_mask =
  1699. bnad->num_rxp_per_rx - 1;
  1700. netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
  1701. sizeof(rx_config->rss_config.toeplitz_hash_key));
  1702. } else {
  1703. rx_config->rss_status = BNA_STATUS_T_DISABLED;
  1704. memset(&rx_config->rss_config, 0,
  1705. sizeof(rx_config->rss_config));
  1706. }
  1707. rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
  1708. rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
  1709. /* BNA_RXP_SINGLE - one data-buffer queue
  1710. * BNA_RXP_SLR - one small-buffer and one large-buffer queues
  1711. * BNA_RXP_HDS - one header-buffer and one data-buffer queues
  1712. */
  1713. /* TODO: configurable param for queue type */
  1714. rx_config->rxp_type = BNA_RXP_SLR;
  1715. if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
  1716. rx_config->frame_size > 4096) {
  1717. /* though size_routing_enable is set in SLR,
  1718. * small packets may get routed to same rxq.
  1719. * set buf_size to 2048 instead of PAGE_SIZE.
  1720. */
  1721. rx_config->q0_buf_size = 2048;
  1722. /* this should be in multiples of 2 */
  1723. rx_config->q0_num_vecs = 4;
  1724. rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
  1725. rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
  1726. } else {
  1727. rx_config->q0_buf_size = rx_config->frame_size;
  1728. rx_config->q0_num_vecs = 1;
  1729. rx_config->q0_depth = bnad->rxq_depth;
  1730. }
  1731. /* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
  1732. if (rx_config->rxp_type == BNA_RXP_SLR) {
  1733. rx_config->q1_depth = bnad->rxq_depth;
  1734. rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
  1735. }
  1736. rx_config->vlan_strip_status =
  1737. (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
  1738. BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
  1739. }
  1740. static void
  1741. bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
  1742. {
  1743. struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
  1744. int i;
  1745. for (i = 0; i < bnad->num_rxp_per_rx; i++)
  1746. rx_info->rx_ctrl[i].bnad = bnad;
  1747. }
  1748. /* Called with mutex_lock(&bnad->conf_mutex) held */
  1749. static u32
  1750. bnad_reinit_rx(struct bnad *bnad)
  1751. {
  1752. struct net_device *netdev = bnad->netdev;
  1753. u32 err = 0, current_err = 0;
  1754. u32 rx_id = 0, count = 0;
  1755. unsigned long flags;
  1756. /* destroy and create new rx objects */
  1757. for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
  1758. if (!bnad->rx_info[rx_id].rx)
  1759. continue;
  1760. bnad_destroy_rx(bnad, rx_id);
  1761. }
  1762. spin_lock_irqsave(&bnad->bna_lock, flags);
  1763. bna_enet_mtu_set(&bnad->bna.enet,
  1764. BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
  1765. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1766. for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
  1767. count++;
  1768. current_err = bnad_setup_rx(bnad, rx_id);
  1769. if (current_err && !err) {
  1770. err = current_err;
  1771. netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
  1772. }
  1773. }
  1774. /* restore rx configuration */
  1775. if (bnad->rx_info[0].rx && !err) {
  1776. bnad_restore_vlans(bnad, 0);
  1777. bnad_enable_default_bcast(bnad);
  1778. spin_lock_irqsave(&bnad->bna_lock, flags);
  1779. bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
  1780. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1781. bnad_set_rx_mode(netdev);
  1782. }
  1783. return count;
  1784. }
  1785. /* Called with bnad_conf_lock() held */
  1786. void
  1787. bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
  1788. {
  1789. struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
  1790. struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
  1791. struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
  1792. unsigned long flags;
  1793. int to_del = 0;
  1794. if (!rx_info->rx)
  1795. return;
  1796. if (0 == rx_id) {
  1797. spin_lock_irqsave(&bnad->bna_lock, flags);
  1798. if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
  1799. test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
  1800. clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
  1801. to_del = 1;
  1802. }
  1803. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1804. if (to_del)
  1805. del_timer_sync(&bnad->dim_timer);
  1806. }
  1807. init_completion(&bnad->bnad_completions.rx_comp);
  1808. spin_lock_irqsave(&bnad->bna_lock, flags);
  1809. bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
  1810. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1811. wait_for_completion(&bnad->bnad_completions.rx_comp);
  1812. if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
  1813. bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
  1814. bnad_napi_delete(bnad, rx_id);
  1815. spin_lock_irqsave(&bnad->bna_lock, flags);
  1816. bna_rx_destroy(rx_info->rx);
  1817. rx_info->rx = NULL;
  1818. rx_info->rx_id = 0;
  1819. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1820. bnad_rx_res_free(bnad, res_info);
  1821. }
  1822. /* Called with mutex_lock(&bnad->conf_mutex) held */
  1823. int
  1824. bnad_setup_rx(struct bnad *bnad, u32 rx_id)
  1825. {
  1826. int err;
  1827. struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
  1828. struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
  1829. struct bna_intr_info *intr_info =
  1830. &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
  1831. struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
  1832. static const struct bna_rx_event_cbfn rx_cbfn = {
  1833. .rcb_setup_cbfn = NULL,
  1834. .rcb_destroy_cbfn = NULL,
  1835. .ccb_setup_cbfn = bnad_cb_ccb_setup,
  1836. .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
  1837. .rx_stall_cbfn = bnad_cb_rx_stall,
  1838. .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
  1839. .rx_post_cbfn = bnad_cb_rx_post,
  1840. };
  1841. struct bna_rx *rx;
  1842. unsigned long flags;
  1843. rx_info->rx_id = rx_id;
  1844. /* Initialize the Rx object configuration */
  1845. bnad_init_rx_config(bnad, rx_config);
  1846. /* Get BNA's resource requirement for one Rx object */
  1847. spin_lock_irqsave(&bnad->bna_lock, flags);
  1848. bna_rx_res_req(rx_config, res_info);
  1849. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1850. /* Fill Unmap Q memory requirements */
  1851. BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
  1852. rx_config->num_paths,
  1853. (rx_config->q0_depth *
  1854. sizeof(struct bnad_rx_unmap)) +
  1855. sizeof(struct bnad_rx_unmap_q));
  1856. if (rx_config->rxp_type != BNA_RXP_SINGLE) {
  1857. BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
  1858. rx_config->num_paths,
  1859. (rx_config->q1_depth *
  1860. sizeof(struct bnad_rx_unmap) +
  1861. sizeof(struct bnad_rx_unmap_q)));
  1862. }
  1863. /* Allocate resource */
  1864. err = bnad_rx_res_alloc(bnad, res_info, rx_id);
  1865. if (err)
  1866. return err;
  1867. bnad_rx_ctrl_init(bnad, rx_id);
  1868. /* Ask BNA to create one Rx object, supplying required resources */
  1869. spin_lock_irqsave(&bnad->bna_lock, flags);
  1870. rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
  1871. rx_info);
  1872. if (!rx) {
  1873. err = -ENOMEM;
  1874. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1875. goto err_return;
  1876. }
  1877. rx_info->rx = rx;
  1878. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1879. INIT_WORK(&rx_info->rx_cleanup_work,
  1880. (work_func_t)(bnad_rx_cleanup));
  1881. /*
  1882. * Init NAPI, so that state is set to NAPI_STATE_SCHED,
  1883. * so that IRQ handler cannot schedule NAPI at this point.
  1884. */
  1885. bnad_napi_add(bnad, rx_id);
  1886. /* Register ISR for the Rx object */
  1887. if (intr_info->intr_type == BNA_INTR_T_MSIX) {
  1888. err = bnad_rx_msix_register(bnad, rx_info, rx_id,
  1889. rx_config->num_paths);
  1890. if (err)
  1891. goto err_return;
  1892. }
  1893. spin_lock_irqsave(&bnad->bna_lock, flags);
  1894. if (0 == rx_id) {
  1895. /* Set up Dynamic Interrupt Moderation Vector */
  1896. if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
  1897. bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
  1898. /* Enable VLAN filtering only on the default Rx */
  1899. bna_rx_vlanfilter_enable(rx);
  1900. /* Start the DIM timer */
  1901. bnad_dim_timer_start(bnad);
  1902. }
  1903. bna_rx_enable(rx);
  1904. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1905. return 0;
  1906. err_return:
  1907. bnad_destroy_rx(bnad, rx_id);
  1908. return err;
  1909. }
  1910. /* Called with conf_lock & bnad->bna_lock held */
  1911. void
  1912. bnad_tx_coalescing_timeo_set(struct bnad *bnad)
  1913. {
  1914. struct bnad_tx_info *tx_info;
  1915. tx_info = &bnad->tx_info[0];
  1916. if (!tx_info->tx)
  1917. return;
  1918. bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
  1919. }
  1920. /* Called with conf_lock & bnad->bna_lock held */
  1921. void
  1922. bnad_rx_coalescing_timeo_set(struct bnad *bnad)
  1923. {
  1924. struct bnad_rx_info *rx_info;
  1925. int i;
  1926. for (i = 0; i < bnad->num_rx; i++) {
  1927. rx_info = &bnad->rx_info[i];
  1928. if (!rx_info->rx)
  1929. continue;
  1930. bna_rx_coalescing_timeo_set(rx_info->rx,
  1931. bnad->rx_coalescing_timeo);
  1932. }
  1933. }
  1934. /*
  1935. * Called with bnad->bna_lock held
  1936. */
  1937. int
  1938. bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
  1939. {
  1940. int ret;
  1941. if (!is_valid_ether_addr(mac_addr))
  1942. return -EADDRNOTAVAIL;
  1943. /* If datapath is down, pretend everything went through */
  1944. if (!bnad->rx_info[0].rx)
  1945. return 0;
  1946. ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
  1947. if (ret != BNA_CB_SUCCESS)
  1948. return -EADDRNOTAVAIL;
  1949. return 0;
  1950. }
  1951. /* Should be called with conf_lock held */
  1952. int
  1953. bnad_enable_default_bcast(struct bnad *bnad)
  1954. {
  1955. struct bnad_rx_info *rx_info = &bnad->rx_info[0];
  1956. int ret;
  1957. unsigned long flags;
  1958. init_completion(&bnad->bnad_completions.mcast_comp);
  1959. spin_lock_irqsave(&bnad->bna_lock, flags);
  1960. ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
  1961. bnad_cb_rx_mcast_add);
  1962. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1963. if (ret == BNA_CB_SUCCESS)
  1964. wait_for_completion(&bnad->bnad_completions.mcast_comp);
  1965. else
  1966. return -ENODEV;
  1967. if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
  1968. return -ENODEV;
  1969. return 0;
  1970. }
  1971. /* Called with mutex_lock(&bnad->conf_mutex) held */
  1972. void
  1973. bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
  1974. {
  1975. u16 vid;
  1976. unsigned long flags;
  1977. for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
  1978. spin_lock_irqsave(&bnad->bna_lock, flags);
  1979. bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
  1980. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  1981. }
  1982. }
  1983. /* Statistics utilities */
  1984. void
  1985. bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
  1986. {
  1987. int i, j;
  1988. for (i = 0; i < bnad->num_rx; i++) {
  1989. for (j = 0; j < bnad->num_rxp_per_rx; j++) {
  1990. if (bnad->rx_info[i].rx_ctrl[j].ccb) {
  1991. stats->rx_packets += bnad->rx_info[i].
  1992. rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
  1993. stats->rx_bytes += bnad->rx_info[i].
  1994. rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
  1995. if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
  1996. bnad->rx_info[i].rx_ctrl[j].ccb->
  1997. rcb[1]->rxq) {
  1998. stats->rx_packets +=
  1999. bnad->rx_info[i].rx_ctrl[j].
  2000. ccb->rcb[1]->rxq->rx_packets;
  2001. stats->rx_bytes +=
  2002. bnad->rx_info[i].rx_ctrl[j].
  2003. ccb->rcb[1]->rxq->rx_bytes;
  2004. }
  2005. }
  2006. }
  2007. }
  2008. for (i = 0; i < bnad->num_tx; i++) {
  2009. for (j = 0; j < bnad->num_txq_per_tx; j++) {
  2010. if (bnad->tx_info[i].tcb[j]) {
  2011. stats->tx_packets +=
  2012. bnad->tx_info[i].tcb[j]->txq->tx_packets;
  2013. stats->tx_bytes +=
  2014. bnad->tx_info[i].tcb[j]->txq->tx_bytes;
  2015. }
  2016. }
  2017. }
  2018. }
  2019. /*
  2020. * Must be called with the bna_lock held.
  2021. */
  2022. void
  2023. bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
  2024. {
  2025. struct bfi_enet_stats_mac *mac_stats;
  2026. u32 bmap;
  2027. int i;
  2028. mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
  2029. stats->rx_errors =
  2030. mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
  2031. mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
  2032. mac_stats->rx_undersize;
  2033. stats->tx_errors = mac_stats->tx_fcs_error +
  2034. mac_stats->tx_undersize;
  2035. stats->rx_dropped = mac_stats->rx_drop;
  2036. stats->tx_dropped = mac_stats->tx_drop;
  2037. stats->multicast = mac_stats->rx_multicast;
  2038. stats->collisions = mac_stats->tx_total_collision;
  2039. stats->rx_length_errors = mac_stats->rx_frame_length_error;
  2040. /* receive ring buffer overflow ?? */
  2041. stats->rx_crc_errors = mac_stats->rx_fcs_error;
  2042. stats->rx_frame_errors = mac_stats->rx_alignment_error;
  2043. /* recv'r fifo overrun */
  2044. bmap = bna_rx_rid_mask(&bnad->bna);
  2045. for (i = 0; bmap; i++) {
  2046. if (bmap & 1) {
  2047. stats->rx_fifo_errors +=
  2048. bnad->stats.bna_stats->
  2049. hw_stats.rxf_stats[i].frame_drops;
  2050. break;
  2051. }
  2052. bmap >>= 1;
  2053. }
  2054. }
  2055. static void
  2056. bnad_mbox_irq_sync(struct bnad *bnad)
  2057. {
  2058. u32 irq;
  2059. unsigned long flags;
  2060. spin_lock_irqsave(&bnad->bna_lock, flags);
  2061. if (bnad->cfg_flags & BNAD_CF_MSIX)
  2062. irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
  2063. else
  2064. irq = bnad->pcidev->irq;
  2065. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2066. synchronize_irq(irq);
  2067. }
  2068. /* Utility used by bnad_start_xmit, for doing TSO */
  2069. static int
  2070. bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
  2071. {
  2072. int err;
  2073. err = skb_cow_head(skb, 0);
  2074. if (err < 0) {
  2075. BNAD_UPDATE_CTR(bnad, tso_err);
  2076. return err;
  2077. }
  2078. /*
  2079. * For TSO, the TCP checksum field is seeded with pseudo-header sum
  2080. * excluding the length field.
  2081. */
  2082. if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
  2083. struct iphdr *iph = ip_hdr(skb);
  2084. /* Do we really need these? */
  2085. iph->tot_len = 0;
  2086. iph->check = 0;
  2087. tcp_hdr(skb)->check =
  2088. ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
  2089. IPPROTO_TCP, 0);
  2090. BNAD_UPDATE_CTR(bnad, tso4);
  2091. } else {
  2092. tcp_v6_gso_csum_prep(skb);
  2093. BNAD_UPDATE_CTR(bnad, tso6);
  2094. }
  2095. return 0;
  2096. }
  2097. /*
  2098. * Initialize Q numbers depending on Rx Paths
  2099. * Called with bnad->bna_lock held, because of cfg_flags
  2100. * access.
  2101. */
  2102. static void
  2103. bnad_q_num_init(struct bnad *bnad)
  2104. {
  2105. int rxps;
  2106. rxps = min((uint)num_online_cpus(),
  2107. (uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
  2108. if (!(bnad->cfg_flags & BNAD_CF_MSIX))
  2109. rxps = 1; /* INTx */
  2110. bnad->num_rx = 1;
  2111. bnad->num_tx = 1;
  2112. bnad->num_rxp_per_rx = rxps;
  2113. bnad->num_txq_per_tx = BNAD_TXQ_NUM;
  2114. }
  2115. /*
  2116. * Adjusts the Q numbers, given a number of msix vectors
  2117. * Give preference to RSS as opposed to Tx priority Queues,
  2118. * in such a case, just use 1 Tx Q
  2119. * Called with bnad->bna_lock held b'cos of cfg_flags access
  2120. */
  2121. static void
  2122. bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
  2123. {
  2124. bnad->num_txq_per_tx = 1;
  2125. if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
  2126. bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
  2127. (bnad->cfg_flags & BNAD_CF_MSIX)) {
  2128. bnad->num_rxp_per_rx = msix_vectors -
  2129. (bnad->num_tx * bnad->num_txq_per_tx) -
  2130. BNAD_MAILBOX_MSIX_VECTORS;
  2131. } else
  2132. bnad->num_rxp_per_rx = 1;
  2133. }
  2134. /* Enable / disable ioceth */
  2135. static int
  2136. bnad_ioceth_disable(struct bnad *bnad)
  2137. {
  2138. unsigned long flags;
  2139. int err = 0;
  2140. spin_lock_irqsave(&bnad->bna_lock, flags);
  2141. init_completion(&bnad->bnad_completions.ioc_comp);
  2142. bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
  2143. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2144. wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
  2145. msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
  2146. err = bnad->bnad_completions.ioc_comp_status;
  2147. return err;
  2148. }
  2149. static int
  2150. bnad_ioceth_enable(struct bnad *bnad)
  2151. {
  2152. int err = 0;
  2153. unsigned long flags;
  2154. spin_lock_irqsave(&bnad->bna_lock, flags);
  2155. init_completion(&bnad->bnad_completions.ioc_comp);
  2156. bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
  2157. bna_ioceth_enable(&bnad->bna.ioceth);
  2158. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2159. wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
  2160. msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
  2161. err = bnad->bnad_completions.ioc_comp_status;
  2162. return err;
  2163. }
  2164. /* Free BNA resources */
  2165. static void
  2166. bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
  2167. u32 res_val_max)
  2168. {
  2169. int i;
  2170. for (i = 0; i < res_val_max; i++)
  2171. bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
  2172. }
  2173. /* Allocates memory and interrupt resources for BNA */
  2174. static int
  2175. bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
  2176. u32 res_val_max)
  2177. {
  2178. int i, err;
  2179. for (i = 0; i < res_val_max; i++) {
  2180. err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
  2181. if (err)
  2182. goto err_return;
  2183. }
  2184. return 0;
  2185. err_return:
  2186. bnad_res_free(bnad, res_info, res_val_max);
  2187. return err;
  2188. }
  2189. /* Interrupt enable / disable */
  2190. static void
  2191. bnad_enable_msix(struct bnad *bnad)
  2192. {
  2193. int i, ret;
  2194. unsigned long flags;
  2195. spin_lock_irqsave(&bnad->bna_lock, flags);
  2196. if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
  2197. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2198. return;
  2199. }
  2200. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2201. if (bnad->msix_table)
  2202. return;
  2203. bnad->msix_table =
  2204. kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
  2205. if (!bnad->msix_table)
  2206. goto intx_mode;
  2207. for (i = 0; i < bnad->msix_num; i++)
  2208. bnad->msix_table[i].entry = i;
  2209. ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
  2210. 1, bnad->msix_num);
  2211. if (ret < 0) {
  2212. goto intx_mode;
  2213. } else if (ret < bnad->msix_num) {
  2214. dev_warn(&bnad->pcidev->dev,
  2215. "%d MSI-X vectors allocated < %d requested\n",
  2216. ret, bnad->msix_num);
  2217. spin_lock_irqsave(&bnad->bna_lock, flags);
  2218. /* ret = #of vectors that we got */
  2219. bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
  2220. (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
  2221. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2222. bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
  2223. BNAD_MAILBOX_MSIX_VECTORS;
  2224. if (bnad->msix_num > ret) {
  2225. pci_disable_msix(bnad->pcidev);
  2226. goto intx_mode;
  2227. }
  2228. }
  2229. pci_intx(bnad->pcidev, 0);
  2230. return;
  2231. intx_mode:
  2232. dev_warn(&bnad->pcidev->dev,
  2233. "MSI-X enable failed - operating in INTx mode\n");
  2234. kfree(bnad->msix_table);
  2235. bnad->msix_table = NULL;
  2236. bnad->msix_num = 0;
  2237. spin_lock_irqsave(&bnad->bna_lock, flags);
  2238. bnad->cfg_flags &= ~BNAD_CF_MSIX;
  2239. bnad_q_num_init(bnad);
  2240. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2241. }
  2242. static void
  2243. bnad_disable_msix(struct bnad *bnad)
  2244. {
  2245. u32 cfg_flags;
  2246. unsigned long flags;
  2247. spin_lock_irqsave(&bnad->bna_lock, flags);
  2248. cfg_flags = bnad->cfg_flags;
  2249. if (bnad->cfg_flags & BNAD_CF_MSIX)
  2250. bnad->cfg_flags &= ~BNAD_CF_MSIX;
  2251. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2252. if (cfg_flags & BNAD_CF_MSIX) {
  2253. pci_disable_msix(bnad->pcidev);
  2254. kfree(bnad->msix_table);
  2255. bnad->msix_table = NULL;
  2256. }
  2257. }
  2258. /* Netdev entry points */
  2259. static int
  2260. bnad_open(struct net_device *netdev)
  2261. {
  2262. int err;
  2263. struct bnad *bnad = netdev_priv(netdev);
  2264. struct bna_pause_config pause_config;
  2265. unsigned long flags;
  2266. mutex_lock(&bnad->conf_mutex);
  2267. /* Tx */
  2268. err = bnad_setup_tx(bnad, 0);
  2269. if (err)
  2270. goto err_return;
  2271. /* Rx */
  2272. err = bnad_setup_rx(bnad, 0);
  2273. if (err)
  2274. goto cleanup_tx;
  2275. /* Port */
  2276. pause_config.tx_pause = 0;
  2277. pause_config.rx_pause = 0;
  2278. spin_lock_irqsave(&bnad->bna_lock, flags);
  2279. bna_enet_mtu_set(&bnad->bna.enet,
  2280. BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
  2281. bna_enet_pause_config(&bnad->bna.enet, &pause_config);
  2282. bna_enet_enable(&bnad->bna.enet);
  2283. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2284. /* Enable broadcast */
  2285. bnad_enable_default_bcast(bnad);
  2286. /* Restore VLANs, if any */
  2287. bnad_restore_vlans(bnad, 0);
  2288. /* Set the UCAST address */
  2289. spin_lock_irqsave(&bnad->bna_lock, flags);
  2290. bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
  2291. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2292. /* Start the stats timer */
  2293. bnad_stats_timer_start(bnad);
  2294. mutex_unlock(&bnad->conf_mutex);
  2295. return 0;
  2296. cleanup_tx:
  2297. bnad_destroy_tx(bnad, 0);
  2298. err_return:
  2299. mutex_unlock(&bnad->conf_mutex);
  2300. return err;
  2301. }
  2302. static int
  2303. bnad_stop(struct net_device *netdev)
  2304. {
  2305. struct bnad *bnad = netdev_priv(netdev);
  2306. unsigned long flags;
  2307. mutex_lock(&bnad->conf_mutex);
  2308. /* Stop the stats timer */
  2309. bnad_stats_timer_stop(bnad);
  2310. init_completion(&bnad->bnad_completions.enet_comp);
  2311. spin_lock_irqsave(&bnad->bna_lock, flags);
  2312. bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
  2313. bnad_cb_enet_disabled);
  2314. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2315. wait_for_completion(&bnad->bnad_completions.enet_comp);
  2316. bnad_destroy_tx(bnad, 0);
  2317. bnad_destroy_rx(bnad, 0);
  2318. /* Synchronize mailbox IRQ */
  2319. bnad_mbox_irq_sync(bnad);
  2320. mutex_unlock(&bnad->conf_mutex);
  2321. return 0;
  2322. }
  2323. /* TX */
  2324. /* Returns 0 for success */
  2325. static int
  2326. bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
  2327. struct sk_buff *skb, struct bna_txq_entry *txqent)
  2328. {
  2329. u16 flags = 0;
  2330. u32 gso_size;
  2331. u16 vlan_tag = 0;
  2332. if (skb_vlan_tag_present(skb)) {
  2333. vlan_tag = (u16)skb_vlan_tag_get(skb);
  2334. flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
  2335. }
  2336. if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
  2337. vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
  2338. | (vlan_tag & 0x1fff);
  2339. flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
  2340. }
  2341. txqent->hdr.wi.vlan_tag = htons(vlan_tag);
  2342. if (skb_is_gso(skb)) {
  2343. gso_size = skb_shinfo(skb)->gso_size;
  2344. if (unlikely(gso_size > bnad->netdev->mtu)) {
  2345. BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
  2346. return -EINVAL;
  2347. }
  2348. if (unlikely((gso_size + skb_transport_offset(skb) +
  2349. tcp_hdrlen(skb)) >= skb->len)) {
  2350. txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
  2351. txqent->hdr.wi.lso_mss = 0;
  2352. BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
  2353. } else {
  2354. txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
  2355. txqent->hdr.wi.lso_mss = htons(gso_size);
  2356. }
  2357. if (bnad_tso_prepare(bnad, skb)) {
  2358. BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
  2359. return -EINVAL;
  2360. }
  2361. flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
  2362. txqent->hdr.wi.l4_hdr_size_n_offset =
  2363. htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
  2364. tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
  2365. } else {
  2366. txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
  2367. txqent->hdr.wi.lso_mss = 0;
  2368. if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
  2369. BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
  2370. return -EINVAL;
  2371. }
  2372. if (skb->ip_summed == CHECKSUM_PARTIAL) {
  2373. __be16 net_proto = vlan_get_protocol(skb);
  2374. u8 proto = 0;
  2375. if (net_proto == htons(ETH_P_IP))
  2376. proto = ip_hdr(skb)->protocol;
  2377. #ifdef NETIF_F_IPV6_CSUM
  2378. else if (net_proto == htons(ETH_P_IPV6)) {
  2379. /* nexthdr may not be TCP immediately. */
  2380. proto = ipv6_hdr(skb)->nexthdr;
  2381. }
  2382. #endif
  2383. if (proto == IPPROTO_TCP) {
  2384. flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
  2385. txqent->hdr.wi.l4_hdr_size_n_offset =
  2386. htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
  2387. (0, skb_transport_offset(skb)));
  2388. BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
  2389. if (unlikely(skb_headlen(skb) <
  2390. skb_transport_offset(skb) +
  2391. tcp_hdrlen(skb))) {
  2392. BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
  2393. return -EINVAL;
  2394. }
  2395. } else if (proto == IPPROTO_UDP) {
  2396. flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
  2397. txqent->hdr.wi.l4_hdr_size_n_offset =
  2398. htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
  2399. (0, skb_transport_offset(skb)));
  2400. BNAD_UPDATE_CTR(bnad, udpcsum_offload);
  2401. if (unlikely(skb_headlen(skb) <
  2402. skb_transport_offset(skb) +
  2403. sizeof(struct udphdr))) {
  2404. BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
  2405. return -EINVAL;
  2406. }
  2407. } else {
  2408. BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
  2409. return -EINVAL;
  2410. }
  2411. } else
  2412. txqent->hdr.wi.l4_hdr_size_n_offset = 0;
  2413. }
  2414. txqent->hdr.wi.flags = htons(flags);
  2415. txqent->hdr.wi.frame_length = htonl(skb->len);
  2416. return 0;
  2417. }
  2418. /*
  2419. * bnad_start_xmit : Netdev entry point for Transmit
  2420. * Called under lock held by net_device
  2421. */
  2422. static netdev_tx_t
  2423. bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
  2424. {
  2425. struct bnad *bnad = netdev_priv(netdev);
  2426. u32 txq_id = 0;
  2427. struct bna_tcb *tcb = NULL;
  2428. struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
  2429. u32 prod, q_depth, vect_id;
  2430. u32 wis, vectors, len;
  2431. int i;
  2432. dma_addr_t dma_addr;
  2433. struct bna_txq_entry *txqent;
  2434. len = skb_headlen(skb);
  2435. /* Sanity checks for the skb */
  2436. if (unlikely(skb->len <= ETH_HLEN)) {
  2437. dev_kfree_skb_any(skb);
  2438. BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
  2439. return NETDEV_TX_OK;
  2440. }
  2441. if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
  2442. dev_kfree_skb_any(skb);
  2443. BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
  2444. return NETDEV_TX_OK;
  2445. }
  2446. if (unlikely(len == 0)) {
  2447. dev_kfree_skb_any(skb);
  2448. BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
  2449. return NETDEV_TX_OK;
  2450. }
  2451. tcb = bnad->tx_info[0].tcb[txq_id];
  2452. /*
  2453. * Takes care of the Tx that is scheduled between clearing the flag
  2454. * and the netif_tx_stop_all_queues() call.
  2455. */
  2456. if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
  2457. dev_kfree_skb_any(skb);
  2458. BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
  2459. return NETDEV_TX_OK;
  2460. }
  2461. q_depth = tcb->q_depth;
  2462. prod = tcb->producer_index;
  2463. unmap_q = tcb->unmap_q;
  2464. vectors = 1 + skb_shinfo(skb)->nr_frags;
  2465. wis = BNA_TXQ_WI_NEEDED(vectors); /* 4 vectors per work item */
  2466. if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
  2467. dev_kfree_skb_any(skb);
  2468. BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
  2469. return NETDEV_TX_OK;
  2470. }
  2471. /* Check for available TxQ resources */
  2472. if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
  2473. if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
  2474. !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
  2475. u32 sent;
  2476. sent = bnad_txcmpl_process(bnad, tcb);
  2477. if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
  2478. bna_ib_ack(tcb->i_dbell, sent);
  2479. smp_mb__before_atomic();
  2480. clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
  2481. } else {
  2482. netif_stop_queue(netdev);
  2483. BNAD_UPDATE_CTR(bnad, netif_queue_stop);
  2484. }
  2485. smp_mb();
  2486. /*
  2487. * Check again to deal with race condition between
  2488. * netif_stop_queue here, and netif_wake_queue in
  2489. * interrupt handler which is not inside netif tx lock.
  2490. */
  2491. if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
  2492. BNAD_UPDATE_CTR(bnad, netif_queue_stop);
  2493. return NETDEV_TX_BUSY;
  2494. } else {
  2495. netif_wake_queue(netdev);
  2496. BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
  2497. }
  2498. }
  2499. txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
  2500. head_unmap = &unmap_q[prod];
  2501. /* Program the opcode, flags, frame_len, num_vectors in WI */
  2502. if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
  2503. dev_kfree_skb_any(skb);
  2504. return NETDEV_TX_OK;
  2505. }
  2506. txqent->hdr.wi.reserved = 0;
  2507. txqent->hdr.wi.num_vectors = vectors;
  2508. head_unmap->skb = skb;
  2509. head_unmap->nvecs = 0;
  2510. /* Program the vectors */
  2511. unmap = head_unmap;
  2512. dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
  2513. len, DMA_TO_DEVICE);
  2514. if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
  2515. dev_kfree_skb_any(skb);
  2516. BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
  2517. return NETDEV_TX_OK;
  2518. }
  2519. BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
  2520. txqent->vector[0].length = htons(len);
  2521. dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
  2522. head_unmap->nvecs++;
  2523. for (i = 0, vect_id = 0; i < vectors - 1; i++) {
  2524. const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  2525. u32 size = skb_frag_size(frag);
  2526. if (unlikely(size == 0)) {
  2527. /* Undo the changes starting at tcb->producer_index */
  2528. bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
  2529. tcb->producer_index);
  2530. dev_kfree_skb_any(skb);
  2531. BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
  2532. return NETDEV_TX_OK;
  2533. }
  2534. len += size;
  2535. vect_id++;
  2536. if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
  2537. vect_id = 0;
  2538. BNA_QE_INDX_INC(prod, q_depth);
  2539. txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
  2540. txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
  2541. unmap = &unmap_q[prod];
  2542. }
  2543. dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
  2544. 0, size, DMA_TO_DEVICE);
  2545. if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
  2546. /* Undo the changes starting at tcb->producer_index */
  2547. bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
  2548. tcb->producer_index);
  2549. dev_kfree_skb_any(skb);
  2550. BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
  2551. return NETDEV_TX_OK;
  2552. }
  2553. dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
  2554. BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
  2555. txqent->vector[vect_id].length = htons(size);
  2556. dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
  2557. dma_addr);
  2558. head_unmap->nvecs++;
  2559. }
  2560. if (unlikely(len != skb->len)) {
  2561. /* Undo the changes starting at tcb->producer_index */
  2562. bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
  2563. dev_kfree_skb_any(skb);
  2564. BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
  2565. return NETDEV_TX_OK;
  2566. }
  2567. BNA_QE_INDX_INC(prod, q_depth);
  2568. tcb->producer_index = prod;
  2569. wmb();
  2570. if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
  2571. return NETDEV_TX_OK;
  2572. skb_tx_timestamp(skb);
  2573. bna_txq_prod_indx_doorbell(tcb);
  2574. return NETDEV_TX_OK;
  2575. }
  2576. /*
  2577. * Used spin_lock to synchronize reading of stats structures, which
  2578. * is written by BNA under the same lock.
  2579. */
  2580. static void
  2581. bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
  2582. {
  2583. struct bnad *bnad = netdev_priv(netdev);
  2584. unsigned long flags;
  2585. spin_lock_irqsave(&bnad->bna_lock, flags);
  2586. bnad_netdev_qstats_fill(bnad, stats);
  2587. bnad_netdev_hwstats_fill(bnad, stats);
  2588. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2589. }
  2590. static void
  2591. bnad_set_rx_ucast_fltr(struct bnad *bnad)
  2592. {
  2593. struct net_device *netdev = bnad->netdev;
  2594. int uc_count = netdev_uc_count(netdev);
  2595. enum bna_cb_status ret;
  2596. u8 *mac_list;
  2597. struct netdev_hw_addr *ha;
  2598. int entry;
  2599. if (netdev_uc_empty(bnad->netdev)) {
  2600. bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
  2601. return;
  2602. }
  2603. if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
  2604. goto mode_default;
  2605. mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC);
  2606. if (mac_list == NULL)
  2607. goto mode_default;
  2608. entry = 0;
  2609. netdev_for_each_uc_addr(ha, netdev) {
  2610. ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
  2611. entry++;
  2612. }
  2613. ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
  2614. kfree(mac_list);
  2615. if (ret != BNA_CB_SUCCESS)
  2616. goto mode_default;
  2617. return;
  2618. /* ucast packets not in UCAM are routed to default function */
  2619. mode_default:
  2620. bnad->cfg_flags |= BNAD_CF_DEFAULT;
  2621. bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
  2622. }
  2623. static void
  2624. bnad_set_rx_mcast_fltr(struct bnad *bnad)
  2625. {
  2626. struct net_device *netdev = bnad->netdev;
  2627. int mc_count = netdev_mc_count(netdev);
  2628. enum bna_cb_status ret;
  2629. u8 *mac_list;
  2630. if (netdev->flags & IFF_ALLMULTI)
  2631. goto mode_allmulti;
  2632. if (netdev_mc_empty(netdev))
  2633. return;
  2634. if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
  2635. goto mode_allmulti;
  2636. mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC);
  2637. if (mac_list == NULL)
  2638. goto mode_allmulti;
  2639. ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
  2640. /* copy rest of the MCAST addresses */
  2641. bnad_netdev_mc_list_get(netdev, mac_list);
  2642. ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
  2643. kfree(mac_list);
  2644. if (ret != BNA_CB_SUCCESS)
  2645. goto mode_allmulti;
  2646. return;
  2647. mode_allmulti:
  2648. bnad->cfg_flags |= BNAD_CF_ALLMULTI;
  2649. bna_rx_mcast_delall(bnad->rx_info[0].rx);
  2650. }
  2651. void
  2652. bnad_set_rx_mode(struct net_device *netdev)
  2653. {
  2654. struct bnad *bnad = netdev_priv(netdev);
  2655. enum bna_rxmode new_mode, mode_mask;
  2656. unsigned long flags;
  2657. spin_lock_irqsave(&bnad->bna_lock, flags);
  2658. if (bnad->rx_info[0].rx == NULL) {
  2659. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2660. return;
  2661. }
  2662. /* clear bnad flags to update it with new settings */
  2663. bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
  2664. BNAD_CF_ALLMULTI);
  2665. new_mode = 0;
  2666. if (netdev->flags & IFF_PROMISC) {
  2667. new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
  2668. bnad->cfg_flags |= BNAD_CF_PROMISC;
  2669. } else {
  2670. bnad_set_rx_mcast_fltr(bnad);
  2671. if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
  2672. new_mode |= BNA_RXMODE_ALLMULTI;
  2673. bnad_set_rx_ucast_fltr(bnad);
  2674. if (bnad->cfg_flags & BNAD_CF_DEFAULT)
  2675. new_mode |= BNA_RXMODE_DEFAULT;
  2676. }
  2677. mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
  2678. BNA_RXMODE_ALLMULTI;
  2679. bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
  2680. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2681. }
  2682. /*
  2683. * bna_lock is used to sync writes to netdev->addr
  2684. * conf_lock cannot be used since this call may be made
  2685. * in a non-blocking context.
  2686. */
  2687. static int
  2688. bnad_set_mac_address(struct net_device *netdev, void *addr)
  2689. {
  2690. int err;
  2691. struct bnad *bnad = netdev_priv(netdev);
  2692. struct sockaddr *sa = (struct sockaddr *)addr;
  2693. unsigned long flags;
  2694. spin_lock_irqsave(&bnad->bna_lock, flags);
  2695. err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
  2696. if (!err)
  2697. ether_addr_copy(netdev->dev_addr, sa->sa_data);
  2698. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2699. return err;
  2700. }
  2701. static int
  2702. bnad_mtu_set(struct bnad *bnad, int frame_size)
  2703. {
  2704. unsigned long flags;
  2705. init_completion(&bnad->bnad_completions.mtu_comp);
  2706. spin_lock_irqsave(&bnad->bna_lock, flags);
  2707. bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
  2708. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2709. wait_for_completion(&bnad->bnad_completions.mtu_comp);
  2710. return bnad->bnad_completions.mtu_comp_status;
  2711. }
  2712. static int
  2713. bnad_change_mtu(struct net_device *netdev, int new_mtu)
  2714. {
  2715. int err, mtu;
  2716. struct bnad *bnad = netdev_priv(netdev);
  2717. u32 rx_count = 0, frame, new_frame;
  2718. mutex_lock(&bnad->conf_mutex);
  2719. mtu = netdev->mtu;
  2720. netdev->mtu = new_mtu;
  2721. frame = BNAD_FRAME_SIZE(mtu);
  2722. new_frame = BNAD_FRAME_SIZE(new_mtu);
  2723. /* check if multi-buffer needs to be enabled */
  2724. if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
  2725. netif_running(bnad->netdev)) {
  2726. /* only when transition is over 4K */
  2727. if ((frame <= 4096 && new_frame > 4096) ||
  2728. (frame > 4096 && new_frame <= 4096))
  2729. rx_count = bnad_reinit_rx(bnad);
  2730. }
  2731. /* rx_count > 0 - new rx created
  2732. * - Linux set err = 0 and return
  2733. */
  2734. err = bnad_mtu_set(bnad, new_frame);
  2735. if (err)
  2736. err = -EBUSY;
  2737. mutex_unlock(&bnad->conf_mutex);
  2738. return err;
  2739. }
  2740. static int
  2741. bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
  2742. {
  2743. struct bnad *bnad = netdev_priv(netdev);
  2744. unsigned long flags;
  2745. if (!bnad->rx_info[0].rx)
  2746. return 0;
  2747. mutex_lock(&bnad->conf_mutex);
  2748. spin_lock_irqsave(&bnad->bna_lock, flags);
  2749. bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
  2750. set_bit(vid, bnad->active_vlans);
  2751. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2752. mutex_unlock(&bnad->conf_mutex);
  2753. return 0;
  2754. }
  2755. static int
  2756. bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
  2757. {
  2758. struct bnad *bnad = netdev_priv(netdev);
  2759. unsigned long flags;
  2760. if (!bnad->rx_info[0].rx)
  2761. return 0;
  2762. mutex_lock(&bnad->conf_mutex);
  2763. spin_lock_irqsave(&bnad->bna_lock, flags);
  2764. clear_bit(vid, bnad->active_vlans);
  2765. bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
  2766. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2767. mutex_unlock(&bnad->conf_mutex);
  2768. return 0;
  2769. }
  2770. static int bnad_set_features(struct net_device *dev, netdev_features_t features)
  2771. {
  2772. struct bnad *bnad = netdev_priv(dev);
  2773. netdev_features_t changed = features ^ dev->features;
  2774. if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
  2775. unsigned long flags;
  2776. spin_lock_irqsave(&bnad->bna_lock, flags);
  2777. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  2778. bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
  2779. else
  2780. bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
  2781. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2782. }
  2783. return 0;
  2784. }
  2785. #ifdef CONFIG_NET_POLL_CONTROLLER
  2786. static void
  2787. bnad_netpoll(struct net_device *netdev)
  2788. {
  2789. struct bnad *bnad = netdev_priv(netdev);
  2790. struct bnad_rx_info *rx_info;
  2791. struct bnad_rx_ctrl *rx_ctrl;
  2792. u32 curr_mask;
  2793. int i, j;
  2794. if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
  2795. bna_intx_disable(&bnad->bna, curr_mask);
  2796. bnad_isr(bnad->pcidev->irq, netdev);
  2797. bna_intx_enable(&bnad->bna, curr_mask);
  2798. } else {
  2799. /*
  2800. * Tx processing may happen in sending context, so no need
  2801. * to explicitly process completions here
  2802. */
  2803. /* Rx processing */
  2804. for (i = 0; i < bnad->num_rx; i++) {
  2805. rx_info = &bnad->rx_info[i];
  2806. if (!rx_info->rx)
  2807. continue;
  2808. for (j = 0; j < bnad->num_rxp_per_rx; j++) {
  2809. rx_ctrl = &rx_info->rx_ctrl[j];
  2810. if (rx_ctrl->ccb)
  2811. bnad_netif_rx_schedule_poll(bnad,
  2812. rx_ctrl->ccb);
  2813. }
  2814. }
  2815. }
  2816. }
  2817. #endif
  2818. static const struct net_device_ops bnad_netdev_ops = {
  2819. .ndo_open = bnad_open,
  2820. .ndo_stop = bnad_stop,
  2821. .ndo_start_xmit = bnad_start_xmit,
  2822. .ndo_get_stats64 = bnad_get_stats64,
  2823. .ndo_set_rx_mode = bnad_set_rx_mode,
  2824. .ndo_validate_addr = eth_validate_addr,
  2825. .ndo_set_mac_address = bnad_set_mac_address,
  2826. .ndo_change_mtu = bnad_change_mtu,
  2827. .ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
  2828. .ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
  2829. .ndo_set_features = bnad_set_features,
  2830. #ifdef CONFIG_NET_POLL_CONTROLLER
  2831. .ndo_poll_controller = bnad_netpoll
  2832. #endif
  2833. };
  2834. static void
  2835. bnad_netdev_init(struct bnad *bnad, bool using_dac)
  2836. {
  2837. struct net_device *netdev = bnad->netdev;
  2838. netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
  2839. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  2840. NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
  2841. NETIF_F_HW_VLAN_CTAG_RX;
  2842. netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
  2843. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  2844. NETIF_F_TSO | NETIF_F_TSO6;
  2845. netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
  2846. if (using_dac)
  2847. netdev->features |= NETIF_F_HIGHDMA;
  2848. netdev->mem_start = bnad->mmio_start;
  2849. netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
  2850. /* MTU range: 46 - 9000 */
  2851. netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
  2852. netdev->max_mtu = BNAD_JUMBO_MTU;
  2853. netdev->netdev_ops = &bnad_netdev_ops;
  2854. bnad_set_ethtool_ops(netdev);
  2855. }
  2856. /*
  2857. * 1. Initialize the bnad structure
  2858. * 2. Setup netdev pointer in pci_dev
  2859. * 3. Initialize no. of TxQ & CQs & MSIX vectors
  2860. * 4. Initialize work queue.
  2861. */
  2862. static int
  2863. bnad_init(struct bnad *bnad,
  2864. struct pci_dev *pdev, struct net_device *netdev)
  2865. {
  2866. unsigned long flags;
  2867. SET_NETDEV_DEV(netdev, &pdev->dev);
  2868. pci_set_drvdata(pdev, netdev);
  2869. bnad->netdev = netdev;
  2870. bnad->pcidev = pdev;
  2871. bnad->mmio_start = pci_resource_start(pdev, 0);
  2872. bnad->mmio_len = pci_resource_len(pdev, 0);
  2873. bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
  2874. if (!bnad->bar0) {
  2875. dev_err(&pdev->dev, "ioremap for bar0 failed\n");
  2876. return -ENOMEM;
  2877. }
  2878. dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
  2879. (unsigned long long) bnad->mmio_len);
  2880. spin_lock_irqsave(&bnad->bna_lock, flags);
  2881. if (!bnad_msix_disable)
  2882. bnad->cfg_flags = BNAD_CF_MSIX;
  2883. bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
  2884. bnad_q_num_init(bnad);
  2885. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  2886. bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
  2887. (bnad->num_rx * bnad->num_rxp_per_rx) +
  2888. BNAD_MAILBOX_MSIX_VECTORS;
  2889. bnad->txq_depth = BNAD_TXQ_DEPTH;
  2890. bnad->rxq_depth = BNAD_RXQ_DEPTH;
  2891. bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
  2892. bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
  2893. sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
  2894. bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
  2895. if (!bnad->work_q) {
  2896. iounmap(bnad->bar0);
  2897. return -ENOMEM;
  2898. }
  2899. return 0;
  2900. }
  2901. /*
  2902. * Must be called after bnad_pci_uninit()
  2903. * so that iounmap() and pci_set_drvdata(NULL)
  2904. * happens only after PCI uninitialization.
  2905. */
  2906. static void
  2907. bnad_uninit(struct bnad *bnad)
  2908. {
  2909. if (bnad->work_q) {
  2910. flush_workqueue(bnad->work_q);
  2911. destroy_workqueue(bnad->work_q);
  2912. bnad->work_q = NULL;
  2913. }
  2914. if (bnad->bar0)
  2915. iounmap(bnad->bar0);
  2916. }
  2917. /*
  2918. * Initialize locks
  2919. a) Per ioceth mutes used for serializing configuration
  2920. changes from OS interface
  2921. b) spin lock used to protect bna state machine
  2922. */
  2923. static void
  2924. bnad_lock_init(struct bnad *bnad)
  2925. {
  2926. spin_lock_init(&bnad->bna_lock);
  2927. mutex_init(&bnad->conf_mutex);
  2928. }
  2929. static void
  2930. bnad_lock_uninit(struct bnad *bnad)
  2931. {
  2932. mutex_destroy(&bnad->conf_mutex);
  2933. }
  2934. /* PCI Initialization */
  2935. static int
  2936. bnad_pci_init(struct bnad *bnad,
  2937. struct pci_dev *pdev, bool *using_dac)
  2938. {
  2939. int err;
  2940. err = pci_enable_device(pdev);
  2941. if (err)
  2942. return err;
  2943. err = pci_request_regions(pdev, BNAD_NAME);
  2944. if (err)
  2945. goto disable_device;
  2946. if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
  2947. *using_dac = true;
  2948. } else {
  2949. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  2950. if (err)
  2951. goto release_regions;
  2952. *using_dac = false;
  2953. }
  2954. pci_set_master(pdev);
  2955. return 0;
  2956. release_regions:
  2957. pci_release_regions(pdev);
  2958. disable_device:
  2959. pci_disable_device(pdev);
  2960. return err;
  2961. }
  2962. static void
  2963. bnad_pci_uninit(struct pci_dev *pdev)
  2964. {
  2965. pci_release_regions(pdev);
  2966. pci_disable_device(pdev);
  2967. }
  2968. static int
  2969. bnad_pci_probe(struct pci_dev *pdev,
  2970. const struct pci_device_id *pcidev_id)
  2971. {
  2972. bool using_dac;
  2973. int err;
  2974. struct bnad *bnad;
  2975. struct bna *bna;
  2976. struct net_device *netdev;
  2977. struct bfa_pcidev pcidev_info;
  2978. unsigned long flags;
  2979. mutex_lock(&bnad_fwimg_mutex);
  2980. if (!cna_get_firmware_buf(pdev)) {
  2981. mutex_unlock(&bnad_fwimg_mutex);
  2982. dev_err(&pdev->dev, "failed to load firmware image!\n");
  2983. return -ENODEV;
  2984. }
  2985. mutex_unlock(&bnad_fwimg_mutex);
  2986. /*
  2987. * Allocates sizeof(struct net_device + struct bnad)
  2988. * bnad = netdev->priv
  2989. */
  2990. netdev = alloc_etherdev(sizeof(struct bnad));
  2991. if (!netdev) {
  2992. err = -ENOMEM;
  2993. return err;
  2994. }
  2995. bnad = netdev_priv(netdev);
  2996. bnad_lock_init(bnad);
  2997. bnad->id = atomic_inc_return(&bna_id) - 1;
  2998. mutex_lock(&bnad->conf_mutex);
  2999. /*
  3000. * PCI initialization
  3001. * Output : using_dac = 1 for 64 bit DMA
  3002. * = 0 for 32 bit DMA
  3003. */
  3004. using_dac = false;
  3005. err = bnad_pci_init(bnad, pdev, &using_dac);
  3006. if (err)
  3007. goto unlock_mutex;
  3008. /*
  3009. * Initialize bnad structure
  3010. * Setup relation between pci_dev & netdev
  3011. */
  3012. err = bnad_init(bnad, pdev, netdev);
  3013. if (err)
  3014. goto pci_uninit;
  3015. /* Initialize netdev structure, set up ethtool ops */
  3016. bnad_netdev_init(bnad, using_dac);
  3017. /* Set link to down state */
  3018. netif_carrier_off(netdev);
  3019. /* Setup the debugfs node for this bfad */
  3020. if (bna_debugfs_enable)
  3021. bnad_debugfs_init(bnad);
  3022. /* Get resource requirement form bna */
  3023. spin_lock_irqsave(&bnad->bna_lock, flags);
  3024. bna_res_req(&bnad->res_info[0]);
  3025. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  3026. /* Allocate resources from bna */
  3027. err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
  3028. if (err)
  3029. goto drv_uninit;
  3030. bna = &bnad->bna;
  3031. /* Setup pcidev_info for bna_init() */
  3032. pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
  3033. pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
  3034. pcidev_info.device_id = bnad->pcidev->device;
  3035. pcidev_info.pci_bar_kva = bnad->bar0;
  3036. spin_lock_irqsave(&bnad->bna_lock, flags);
  3037. bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
  3038. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  3039. bnad->stats.bna_stats = &bna->stats;
  3040. bnad_enable_msix(bnad);
  3041. err = bnad_mbox_irq_alloc(bnad);
  3042. if (err)
  3043. goto res_free;
  3044. /* Set up timers */
  3045. timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
  3046. timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
  3047. timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
  3048. timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
  3049. 0);
  3050. /*
  3051. * Start the chip
  3052. * If the call back comes with error, we bail out.
  3053. * This is a catastrophic error.
  3054. */
  3055. err = bnad_ioceth_enable(bnad);
  3056. if (err) {
  3057. dev_err(&pdev->dev, "initialization failed err=%d\n", err);
  3058. goto probe_success;
  3059. }
  3060. spin_lock_irqsave(&bnad->bna_lock, flags);
  3061. if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
  3062. bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
  3063. bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
  3064. bna_attr(bna)->num_rxp - 1);
  3065. if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
  3066. bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
  3067. err = -EIO;
  3068. }
  3069. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  3070. if (err)
  3071. goto disable_ioceth;
  3072. spin_lock_irqsave(&bnad->bna_lock, flags);
  3073. bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
  3074. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  3075. err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
  3076. if (err) {
  3077. err = -EIO;
  3078. goto disable_ioceth;
  3079. }
  3080. spin_lock_irqsave(&bnad->bna_lock, flags);
  3081. bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
  3082. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  3083. /* Get the burnt-in mac */
  3084. spin_lock_irqsave(&bnad->bna_lock, flags);
  3085. bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
  3086. bnad_set_netdev_perm_addr(bnad);
  3087. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  3088. mutex_unlock(&bnad->conf_mutex);
  3089. /* Finally, reguister with net_device layer */
  3090. err = register_netdev(netdev);
  3091. if (err) {
  3092. dev_err(&pdev->dev, "registering net device failed\n");
  3093. goto probe_uninit;
  3094. }
  3095. set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
  3096. return 0;
  3097. probe_success:
  3098. mutex_unlock(&bnad->conf_mutex);
  3099. return 0;
  3100. probe_uninit:
  3101. mutex_lock(&bnad->conf_mutex);
  3102. bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
  3103. disable_ioceth:
  3104. bnad_ioceth_disable(bnad);
  3105. del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
  3106. del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
  3107. del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
  3108. spin_lock_irqsave(&bnad->bna_lock, flags);
  3109. bna_uninit(bna);
  3110. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  3111. bnad_mbox_irq_free(bnad);
  3112. bnad_disable_msix(bnad);
  3113. res_free:
  3114. bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
  3115. drv_uninit:
  3116. /* Remove the debugfs node for this bnad */
  3117. kfree(bnad->regdata);
  3118. bnad_debugfs_uninit(bnad);
  3119. bnad_uninit(bnad);
  3120. pci_uninit:
  3121. bnad_pci_uninit(pdev);
  3122. unlock_mutex:
  3123. mutex_unlock(&bnad->conf_mutex);
  3124. bnad_lock_uninit(bnad);
  3125. free_netdev(netdev);
  3126. return err;
  3127. }
  3128. static void
  3129. bnad_pci_remove(struct pci_dev *pdev)
  3130. {
  3131. struct net_device *netdev = pci_get_drvdata(pdev);
  3132. struct bnad *bnad;
  3133. struct bna *bna;
  3134. unsigned long flags;
  3135. if (!netdev)
  3136. return;
  3137. bnad = netdev_priv(netdev);
  3138. bna = &bnad->bna;
  3139. if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
  3140. unregister_netdev(netdev);
  3141. mutex_lock(&bnad->conf_mutex);
  3142. bnad_ioceth_disable(bnad);
  3143. del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
  3144. del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
  3145. del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
  3146. spin_lock_irqsave(&bnad->bna_lock, flags);
  3147. bna_uninit(bna);
  3148. spin_unlock_irqrestore(&bnad->bna_lock, flags);
  3149. bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
  3150. bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
  3151. bnad_mbox_irq_free(bnad);
  3152. bnad_disable_msix(bnad);
  3153. bnad_pci_uninit(pdev);
  3154. mutex_unlock(&bnad->conf_mutex);
  3155. bnad_lock_uninit(bnad);
  3156. /* Remove the debugfs node for this bnad */
  3157. kfree(bnad->regdata);
  3158. bnad_debugfs_uninit(bnad);
  3159. bnad_uninit(bnad);
  3160. free_netdev(netdev);
  3161. }
  3162. static const struct pci_device_id bnad_pci_id_table[] = {
  3163. {
  3164. PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
  3165. PCI_DEVICE_ID_BROCADE_CT),
  3166. .class = PCI_CLASS_NETWORK_ETHERNET << 8,
  3167. .class_mask = 0xffff00
  3168. },
  3169. {
  3170. PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
  3171. BFA_PCI_DEVICE_ID_CT2),
  3172. .class = PCI_CLASS_NETWORK_ETHERNET << 8,
  3173. .class_mask = 0xffff00
  3174. },
  3175. {0, },
  3176. };
  3177. MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
  3178. static struct pci_driver bnad_pci_driver = {
  3179. .name = BNAD_NAME,
  3180. .id_table = bnad_pci_id_table,
  3181. .probe = bnad_pci_probe,
  3182. .remove = bnad_pci_remove,
  3183. };
  3184. static int __init
  3185. bnad_module_init(void)
  3186. {
  3187. int err;
  3188. bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
  3189. err = pci_register_driver(&bnad_pci_driver);
  3190. if (err < 0) {
  3191. pr_err("bna: PCI driver registration failed err=%d\n", err);
  3192. return err;
  3193. }
  3194. return 0;
  3195. }
  3196. static void __exit
  3197. bnad_module_exit(void)
  3198. {
  3199. pci_unregister_driver(&bnad_pci_driver);
  3200. release_firmware(bfi_fw);
  3201. }
  3202. module_init(bnad_module_init);
  3203. module_exit(bnad_module_exit);
  3204. MODULE_AUTHOR("Brocade");
  3205. MODULE_LICENSE("GPL");
  3206. MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
  3207. MODULE_FIRMWARE(CNA_FW_FILE_CT);
  3208. MODULE_FIRMWARE(CNA_FW_FILE_CT2);