/drivers/net/ethernet/stmicro/stmmac/norm_desc.c

http://github.com/mirrors/linux · C · 329 lines · 252 code · 54 blank · 23 comment · 41 complexity · 644822d6033cbd44c898f377f868706a MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*******************************************************************************
  3. This contains the functions to handle the normal descriptors.
  4. Copyright (C) 2007-2009 STMicroelectronics Ltd
  5. Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
  6. *******************************************************************************/
  7. #include <linux/stmmac.h>
  8. #include "common.h"
  9. #include "descs_com.h"
  10. static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
  11. struct dma_desc *p, void __iomem *ioaddr)
  12. {
  13. struct net_device_stats *stats = (struct net_device_stats *)data;
  14. unsigned int tdes0 = le32_to_cpu(p->des0);
  15. unsigned int tdes1 = le32_to_cpu(p->des1);
  16. int ret = tx_done;
  17. /* Get tx owner first */
  18. if (unlikely(tdes0 & TDES0_OWN))
  19. return tx_dma_own;
  20. /* Verify tx error by looking at the last segment. */
  21. if (likely(!(tdes1 & TDES1_LAST_SEGMENT)))
  22. return tx_not_ls;
  23. if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
  24. if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
  25. x->tx_underflow++;
  26. stats->tx_fifo_errors++;
  27. }
  28. if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
  29. x->tx_carrier++;
  30. stats->tx_carrier_errors++;
  31. }
  32. if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
  33. x->tx_losscarrier++;
  34. stats->tx_carrier_errors++;
  35. }
  36. if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
  37. (tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
  38. (tdes0 & TDES0_LATE_COLLISION))) {
  39. unsigned int collisions;
  40. collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
  41. stats->collisions += collisions;
  42. }
  43. ret = tx_err;
  44. }
  45. if (tdes0 & TDES0_VLAN_FRAME)
  46. x->tx_vlan++;
  47. if (unlikely(tdes0 & TDES0_DEFERRED))
  48. x->tx_deferred++;
  49. return ret;
  50. }
  51. static int ndesc_get_tx_len(struct dma_desc *p)
  52. {
  53. return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
  54. }
  55. /* This function verifies if each incoming frame has some errors
  56. * and, if required, updates the multicast statistics.
  57. * In case of success, it returns good_frame because the GMAC device
  58. * is supposed to be able to compute the csum in HW. */
  59. static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
  60. struct dma_desc *p)
  61. {
  62. int ret = good_frame;
  63. unsigned int rdes0 = le32_to_cpu(p->des0);
  64. struct net_device_stats *stats = (struct net_device_stats *)data;
  65. if (unlikely(rdes0 & RDES0_OWN))
  66. return dma_own;
  67. if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
  68. stats->rx_length_errors++;
  69. return discard_frame;
  70. }
  71. if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
  72. if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR))
  73. x->rx_desc++;
  74. if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL))
  75. x->sa_filter_fail++;
  76. if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
  77. x->overflow_error++;
  78. if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
  79. x->ipc_csum_error++;
  80. if (unlikely(rdes0 & RDES0_COLLISION)) {
  81. x->rx_collision++;
  82. stats->collisions++;
  83. }
  84. if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
  85. x->rx_crc_errors++;
  86. stats->rx_crc_errors++;
  87. }
  88. ret = discard_frame;
  89. }
  90. if (unlikely(rdes0 & RDES0_DRIBBLING))
  91. x->dribbling_bit++;
  92. if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
  93. x->rx_length++;
  94. ret = discard_frame;
  95. }
  96. if (unlikely(rdes0 & RDES0_MII_ERROR)) {
  97. x->rx_mii++;
  98. ret = discard_frame;
  99. }
  100. #ifdef STMMAC_VLAN_TAG_USED
  101. if (rdes0 & RDES0_VLAN_TAG)
  102. x->vlan_tag++;
  103. #endif
  104. return ret;
  105. }
  106. static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
  107. int end, int bfsize)
  108. {
  109. int bfsize1;
  110. p->des0 |= cpu_to_le32(RDES0_OWN);
  111. bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
  112. p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
  113. if (mode == STMMAC_CHAIN_MODE)
  114. ndesc_rx_set_on_chain(p, end);
  115. else
  116. ndesc_rx_set_on_ring(p, end, bfsize);
  117. if (disable_rx_ic)
  118. p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
  119. }
  120. static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
  121. {
  122. p->des0 &= cpu_to_le32(~TDES0_OWN);
  123. if (mode == STMMAC_CHAIN_MODE)
  124. ndesc_tx_set_on_chain(p);
  125. else
  126. ndesc_end_tx_desc_on_ring(p, end);
  127. }
  128. static int ndesc_get_tx_owner(struct dma_desc *p)
  129. {
  130. return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
  131. }
  132. static void ndesc_set_tx_owner(struct dma_desc *p)
  133. {
  134. p->des0 |= cpu_to_le32(TDES0_OWN);
  135. }
  136. static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
  137. {
  138. p->des0 |= cpu_to_le32(RDES0_OWN);
  139. }
  140. static int ndesc_get_tx_ls(struct dma_desc *p)
  141. {
  142. return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
  143. }
  144. static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
  145. {
  146. int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
  147. memset(p, 0, offsetof(struct dma_desc, des2));
  148. if (mode == STMMAC_CHAIN_MODE)
  149. ndesc_tx_set_on_chain(p);
  150. else
  151. ndesc_end_tx_desc_on_ring(p, ter);
  152. }
  153. static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
  154. bool csum_flag, int mode, bool tx_own,
  155. bool ls, unsigned int tot_pkt_len)
  156. {
  157. unsigned int tdes1 = le32_to_cpu(p->des1);
  158. if (is_fs)
  159. tdes1 |= TDES1_FIRST_SEGMENT;
  160. else
  161. tdes1 &= ~TDES1_FIRST_SEGMENT;
  162. if (likely(csum_flag))
  163. tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
  164. else
  165. tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);
  166. if (ls)
  167. tdes1 |= TDES1_LAST_SEGMENT;
  168. p->des1 = cpu_to_le32(tdes1);
  169. if (mode == STMMAC_CHAIN_MODE)
  170. norm_set_tx_desc_len_on_chain(p, len);
  171. else
  172. norm_set_tx_desc_len_on_ring(p, len);
  173. if (tx_own)
  174. p->des0 |= cpu_to_le32(TDES0_OWN);
  175. }
  176. static void ndesc_set_tx_ic(struct dma_desc *p)
  177. {
  178. p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
  179. }
  180. static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
  181. {
  182. unsigned int csum = 0;
  183. /* The type-1 checksum offload engines append the checksum at
  184. * the end of frame and the two bytes of checksum are added in
  185. * the length.
  186. * Adjust for that in the framelen for type-1 checksum offload
  187. * engines
  188. */
  189. if (rx_coe_type == STMMAC_RX_COE_TYPE1)
  190. csum = 2;
  191. return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
  192. >> RDES0_FRAME_LEN_SHIFT) -
  193. csum);
  194. }
  195. static void ndesc_enable_tx_timestamp(struct dma_desc *p)
  196. {
  197. p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
  198. }
  199. static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
  200. {
  201. return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
  202. }
  203. static void ndesc_get_timestamp(void *desc, u32 ats, u64 *ts)
  204. {
  205. struct dma_desc *p = (struct dma_desc *)desc;
  206. u64 ns;
  207. ns = le32_to_cpu(p->des2);
  208. /* convert high/sec time stamp value to nanosecond */
  209. ns += le32_to_cpu(p->des3) * 1000000000ULL;
  210. *ts = ns;
  211. }
  212. static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
  213. {
  214. struct dma_desc *p = (struct dma_desc *)desc;
  215. if ((le32_to_cpu(p->des2) == 0xffffffff) &&
  216. (le32_to_cpu(p->des3) == 0xffffffff))
  217. /* timestamp is corrupted, hence don't store it */
  218. return 0;
  219. else
  220. return 1;
  221. }
  222. static void ndesc_display_ring(void *head, unsigned int size, bool rx)
  223. {
  224. struct dma_desc *p = (struct dma_desc *)head;
  225. int i;
  226. pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
  227. for (i = 0; i < size; i++) {
  228. u64 x;
  229. x = *(u64 *)p;
  230. pr_info("%03d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
  231. i, (unsigned int)virt_to_phys(p),
  232. (unsigned int)x, (unsigned int)(x >> 32),
  233. p->des2, p->des3);
  234. p++;
  235. }
  236. pr_info("\n");
  237. }
  238. static void ndesc_get_addr(struct dma_desc *p, unsigned int *addr)
  239. {
  240. *addr = le32_to_cpu(p->des2);
  241. }
  242. static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
  243. {
  244. p->des2 = cpu_to_le32(addr);
  245. }
  246. static void ndesc_clear(struct dma_desc *p)
  247. {
  248. p->des2 = 0;
  249. }
  250. const struct stmmac_desc_ops ndesc_ops = {
  251. .tx_status = ndesc_get_tx_status,
  252. .rx_status = ndesc_get_rx_status,
  253. .get_tx_len = ndesc_get_tx_len,
  254. .init_rx_desc = ndesc_init_rx_desc,
  255. .init_tx_desc = ndesc_init_tx_desc,
  256. .get_tx_owner = ndesc_get_tx_owner,
  257. .release_tx_desc = ndesc_release_tx_desc,
  258. .prepare_tx_desc = ndesc_prepare_tx_desc,
  259. .set_tx_ic = ndesc_set_tx_ic,
  260. .get_tx_ls = ndesc_get_tx_ls,
  261. .set_tx_owner = ndesc_set_tx_owner,
  262. .set_rx_owner = ndesc_set_rx_owner,
  263. .get_rx_frame_len = ndesc_get_rx_frame_len,
  264. .enable_tx_timestamp = ndesc_enable_tx_timestamp,
  265. .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
  266. .get_timestamp = ndesc_get_timestamp,
  267. .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
  268. .display_ring = ndesc_display_ring,
  269. .get_addr = ndesc_get_addr,
  270. .set_addr = ndesc_set_addr,
  271. .clear = ndesc_clear,
  272. };