/drivers/net/ethernet/stmicro/stmmac/chain_mode.c

http://github.com/mirrors/linux · C · 167 lines · 119 code · 21 blank · 27 comment · 19 complexity · bd929b74203ffb0a81f976248422e8b0 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*******************************************************************************
  3. Specialised functions for managing Chained mode
  4. Copyright(C) 2011 STMicroelectronics Ltd
  5. It defines all the functions used to handle the normal/enhanced
  6. descriptors in case of the DMA is configured to work in chained or
  7. in ring mode.
  8. Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  9. *******************************************************************************/
  10. #include "stmmac.h"
  11. static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
  12. {
  13. struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
  14. unsigned int nopaged_len = skb_headlen(skb);
  15. struct stmmac_priv *priv = tx_q->priv_data;
  16. unsigned int entry = tx_q->cur_tx;
  17. unsigned int bmax, des2;
  18. unsigned int i = 1, len;
  19. struct dma_desc *desc;
  20. desc = tx_q->dma_tx + entry;
  21. if (priv->plat->enh_desc)
  22. bmax = BUF_SIZE_8KiB;
  23. else
  24. bmax = BUF_SIZE_2KiB;
  25. len = nopaged_len - bmax;
  26. des2 = dma_map_single(priv->device, skb->data,
  27. bmax, DMA_TO_DEVICE);
  28. desc->des2 = cpu_to_le32(des2);
  29. if (dma_mapping_error(priv->device, des2))
  30. return -1;
  31. tx_q->tx_skbuff_dma[entry].buf = des2;
  32. tx_q->tx_skbuff_dma[entry].len = bmax;
  33. /* do not close the descriptor and do not set own bit */
  34. stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
  35. 0, false, skb->len);
  36. while (len != 0) {
  37. tx_q->tx_skbuff[entry] = NULL;
  38. entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
  39. desc = tx_q->dma_tx + entry;
  40. if (len > bmax) {
  41. des2 = dma_map_single(priv->device,
  42. (skb->data + bmax * i),
  43. bmax, DMA_TO_DEVICE);
  44. desc->des2 = cpu_to_le32(des2);
  45. if (dma_mapping_error(priv->device, des2))
  46. return -1;
  47. tx_q->tx_skbuff_dma[entry].buf = des2;
  48. tx_q->tx_skbuff_dma[entry].len = bmax;
  49. stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum,
  50. STMMAC_CHAIN_MODE, 1, false, skb->len);
  51. len -= bmax;
  52. i++;
  53. } else {
  54. des2 = dma_map_single(priv->device,
  55. (skb->data + bmax * i), len,
  56. DMA_TO_DEVICE);
  57. desc->des2 = cpu_to_le32(des2);
  58. if (dma_mapping_error(priv->device, des2))
  59. return -1;
  60. tx_q->tx_skbuff_dma[entry].buf = des2;
  61. tx_q->tx_skbuff_dma[entry].len = len;
  62. /* last descriptor can be set now */
  63. stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
  64. STMMAC_CHAIN_MODE, 1, true, skb->len);
  65. len = 0;
  66. }
  67. }
  68. tx_q->cur_tx = entry;
  69. return entry;
  70. }
  71. static unsigned int is_jumbo_frm(int len, int enh_desc)
  72. {
  73. unsigned int ret = 0;
  74. if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
  75. (!enh_desc && (len > BUF_SIZE_2KiB))) {
  76. ret = 1;
  77. }
  78. return ret;
  79. }
  80. static void init_dma_chain(void *des, dma_addr_t phy_addr,
  81. unsigned int size, unsigned int extend_desc)
  82. {
  83. /*
  84. * In chained mode the des3 points to the next element in the ring.
  85. * The latest element has to point to the head.
  86. */
  87. int i;
  88. dma_addr_t dma_phy = phy_addr;
  89. if (extend_desc) {
  90. struct dma_extended_desc *p = (struct dma_extended_desc *)des;
  91. for (i = 0; i < (size - 1); i++) {
  92. dma_phy += sizeof(struct dma_extended_desc);
  93. p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
  94. p++;
  95. }
  96. p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
  97. } else {
  98. struct dma_desc *p = (struct dma_desc *)des;
  99. for (i = 0; i < (size - 1); i++) {
  100. dma_phy += sizeof(struct dma_desc);
  101. p->des3 = cpu_to_le32((unsigned int)dma_phy);
  102. p++;
  103. }
  104. p->des3 = cpu_to_le32((unsigned int)phy_addr);
  105. }
  106. }
  107. static void refill_desc3(void *priv_ptr, struct dma_desc *p)
  108. {
  109. struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
  110. struct stmmac_priv *priv = rx_q->priv_data;
  111. if (priv->hwts_rx_en && !priv->extend_desc)
  112. /* NOTE: Device will overwrite des3 with timestamp value if
  113. * 1588-2002 time stamping is enabled, hence reinitialize it
  114. * to keep explicit chaining in the descriptor.
  115. */
  116. p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
  117. (((rx_q->dirty_rx) + 1) %
  118. DMA_RX_SIZE) *
  119. sizeof(struct dma_desc)));
  120. }
  121. static void clean_desc3(void *priv_ptr, struct dma_desc *p)
  122. {
  123. struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
  124. struct stmmac_priv *priv = tx_q->priv_data;
  125. unsigned int entry = tx_q->dirty_tx;
  126. if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
  127. priv->hwts_tx_en)
  128. /* NOTE: Device will overwrite des3 with timestamp value if
  129. * 1588-2002 time stamping is enabled, hence reinitialize it
  130. * to keep explicit chaining in the descriptor.
  131. */
  132. p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
  133. ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
  134. * sizeof(struct dma_desc)));
  135. }
  136. const struct stmmac_mode_ops chain_mode_ops = {
  137. .init = init_dma_chain,
  138. .is_jumbo_frm = is_jumbo_frm,
  139. .jumbo_frm = jumbo_frm,
  140. .refill_desc3 = refill_desc3,
  141. .clean_desc3 = clean_desc3,
  142. };