PageRenderTime 66ms CodeModel.GetById 20ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h

https://codeberg.org/ddevault/linux
C Header | 997 lines | 810 code | 130 blank | 57 comment | 1 complexity | 6bba5de85da86072ccc47e35a60f8ade MD5 | raw file
Possible License(s): GPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0+
  2. // Copyright (c) 2016-2017 Hisilicon Limited.
  3. #ifndef __HCLGE_MAIN_H
  4. #define __HCLGE_MAIN_H
  5. #include <linux/fs.h>
  6. #include <linux/types.h>
  7. #include <linux/phy.h>
  8. #include <linux/if_vlan.h>
  9. #include <linux/kfifo.h>
  10. #include "hclge_cmd.h"
  11. #include "hnae3.h"
  12. #define HCLGE_MOD_VERSION "1.0"
  13. #define HCLGE_DRIVER_NAME "hclge"
  14. #define HCLGE_MAX_PF_NUM 8
  15. #define HCLGE_RD_FIRST_STATS_NUM 2
  16. #define HCLGE_RD_OTHER_STATS_NUM 4
  17. #define HCLGE_INVALID_VPORT 0xffff
  18. #define HCLGE_PF_CFG_BLOCK_SIZE 32
  19. #define HCLGE_PF_CFG_DESC_NUM \
  20. (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
  21. #define HCLGE_VECTOR_REG_BASE 0x20000
  22. #define HCLGE_MISC_VECTOR_REG_BASE 0x20400
  23. #define HCLGE_VECTOR_REG_OFFSET 0x4
  24. #define HCLGE_VECTOR_VF_OFFSET 0x100000
  25. #define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000
  26. #define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004
  27. #define HCLGE_CMDQ_TX_DEPTH_REG 0x27008
  28. #define HCLGE_CMDQ_TX_TAIL_REG 0x27010
  29. #define HCLGE_CMDQ_TX_HEAD_REG 0x27014
  30. #define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018
  31. #define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C
  32. #define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
  33. #define HCLGE_CMDQ_RX_TAIL_REG 0x27024
  34. #define HCLGE_CMDQ_RX_HEAD_REG 0x27028
  35. #define HCLGE_CMDQ_INTR_SRC_REG 0x27100
  36. #define HCLGE_CMDQ_INTR_STS_REG 0x27104
  37. #define HCLGE_CMDQ_INTR_EN_REG 0x27108
  38. #define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
  39. /* bar registers for common func */
  40. #define HCLGE_VECTOR0_OTER_EN_REG 0x20600
  41. #define HCLGE_RAS_OTHER_STS_REG 0x20B00
  42. #define HCLGE_FUNC_RESET_STS_REG 0x20C00
  43. #define HCLGE_GRO_EN_REG 0x28000
  44. /* bar registers for rcb */
  45. #define HCLGE_RING_RX_ADDR_L_REG 0x80000
  46. #define HCLGE_RING_RX_ADDR_H_REG 0x80004
  47. #define HCLGE_RING_RX_BD_NUM_REG 0x80008
  48. #define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C
  49. #define HCLGE_RING_RX_MERGE_EN_REG 0x80014
  50. #define HCLGE_RING_RX_TAIL_REG 0x80018
  51. #define HCLGE_RING_RX_HEAD_REG 0x8001C
  52. #define HCLGE_RING_RX_FBD_NUM_REG 0x80020
  53. #define HCLGE_RING_RX_OFFSET_REG 0x80024
  54. #define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028
  55. #define HCLGE_RING_RX_STASH_REG 0x80030
  56. #define HCLGE_RING_RX_BD_ERR_REG 0x80034
  57. #define HCLGE_RING_TX_ADDR_L_REG 0x80040
  58. #define HCLGE_RING_TX_ADDR_H_REG 0x80044
  59. #define HCLGE_RING_TX_BD_NUM_REG 0x80048
  60. #define HCLGE_RING_TX_PRIORITY_REG 0x8004C
  61. #define HCLGE_RING_TX_TC_REG 0x80050
  62. #define HCLGE_RING_TX_MERGE_EN_REG 0x80054
  63. #define HCLGE_RING_TX_TAIL_REG 0x80058
  64. #define HCLGE_RING_TX_HEAD_REG 0x8005C
  65. #define HCLGE_RING_TX_FBD_NUM_REG 0x80060
  66. #define HCLGE_RING_TX_OFFSET_REG 0x80064
  67. #define HCLGE_RING_TX_EBD_NUM_REG 0x80068
  68. #define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070
  69. #define HCLGE_RING_TX_BD_ERR_REG 0x80074
  70. #define HCLGE_RING_EN_REG 0x80090
  71. /* bar registers for tqp interrupt */
  72. #define HCLGE_TQP_INTR_CTRL_REG 0x20000
  73. #define HCLGE_TQP_INTR_GL0_REG 0x20100
  74. #define HCLGE_TQP_INTR_GL1_REG 0x20200
  75. #define HCLGE_TQP_INTR_GL2_REG 0x20300
  76. #define HCLGE_TQP_INTR_RL_REG 0x20900
  77. #define HCLGE_RSS_IND_TBL_SIZE 512
  78. #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
  79. #define HCLGE_RSS_KEY_SIZE 40
  80. #define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
  81. #define HCLGE_RSS_HASH_ALGO_SIMPLE 1
  82. #define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
  83. #define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
  84. #define HCLGE_RSS_CFG_TBL_NUM \
  85. (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
  86. #define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
  87. #define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
  88. #define HCLGE_D_PORT_BIT BIT(0)
  89. #define HCLGE_S_PORT_BIT BIT(1)
  90. #define HCLGE_D_IP_BIT BIT(2)
  91. #define HCLGE_S_IP_BIT BIT(3)
  92. #define HCLGE_V_TAG_BIT BIT(4)
  93. #define HCLGE_RSS_TC_SIZE_0 1
  94. #define HCLGE_RSS_TC_SIZE_1 2
  95. #define HCLGE_RSS_TC_SIZE_2 4
  96. #define HCLGE_RSS_TC_SIZE_3 8
  97. #define HCLGE_RSS_TC_SIZE_4 16
  98. #define HCLGE_RSS_TC_SIZE_5 32
  99. #define HCLGE_RSS_TC_SIZE_6 64
  100. #define HCLGE_RSS_TC_SIZE_7 128
  101. #define HCLGE_UMV_TBL_SIZE 3072
  102. #define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
  103. (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
  104. #define HCLGE_TQP_RESET_TRY_TIMES 200
  105. #define HCLGE_PHY_PAGE_MDIX 0
  106. #define HCLGE_PHY_PAGE_COPPER 0
  107. /* Page Selection Reg. */
  108. #define HCLGE_PHY_PAGE_REG 22
  109. /* Copper Specific Control Register */
  110. #define HCLGE_PHY_CSC_REG 16
  111. /* Copper Specific Status Register */
  112. #define HCLGE_PHY_CSS_REG 17
  113. #define HCLGE_PHY_MDIX_CTRL_S 5
  114. #define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
  115. #define HCLGE_PHY_MDIX_STATUS_B 6
  116. #define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
  117. /* Factor used to calculate offset and bitmap of VF num */
  118. #define HCLGE_VF_NUM_PER_CMD 64
  119. #define HCLGE_VF_NUM_PER_BYTE 8
  120. enum HLCGE_PORT_TYPE {
  121. HOST_PORT,
  122. NETWORK_PORT
  123. };
  124. #define PF_VPORT_ID 0
  125. #define HCLGE_PF_ID_S 0
  126. #define HCLGE_PF_ID_M GENMASK(2, 0)
  127. #define HCLGE_VF_ID_S 3
  128. #define HCLGE_VF_ID_M GENMASK(10, 3)
  129. #define HCLGE_PORT_TYPE_B 11
  130. #define HCLGE_NETWORK_PORT_ID_S 0
  131. #define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
  132. /* Reset related Registers */
  133. #define HCLGE_PF_OTHER_INT_REG 0x20600
  134. #define HCLGE_MISC_RESET_STS_REG 0x20700
  135. #define HCLGE_MISC_VECTOR_INT_STS 0x20800
  136. #define HCLGE_GLOBAL_RESET_REG 0x20A00
  137. #define HCLGE_GLOBAL_RESET_BIT 0
  138. #define HCLGE_CORE_RESET_BIT 1
  139. #define HCLGE_IMP_RESET_BIT 2
  140. #define HCLGE_RESET_INT_M GENMASK(2, 0)
  141. #define HCLGE_FUN_RST_ING 0x20C00
  142. #define HCLGE_FUN_RST_ING_B 0
  143. /* Vector0 register bits define */
  144. #define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
  145. #define HCLGE_VECTOR0_CORERESET_INT_B 6
  146. #define HCLGE_VECTOR0_IMPRESET_INT_B 7
  147. /* Vector0 interrupt CMDQ event source register(RW) */
  148. #define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100
  149. /* CMDQ register bits for RX event(=MBX event) */
  150. #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
  151. #define HCLGE_VECTOR0_IMP_RESET_INT_B 1
  152. #define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U
  153. #define HCLGE_VECTOR0_IMP_RD_POISON_B 5U
  154. #define HCLGE_MAC_DEFAULT_FRAME \
  155. (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
  156. #define HCLGE_MAC_MIN_FRAME 64
  157. #define HCLGE_MAC_MAX_FRAME 9728
  158. #define HCLGE_SUPPORT_1G_BIT BIT(0)
  159. #define HCLGE_SUPPORT_10G_BIT BIT(1)
  160. #define HCLGE_SUPPORT_25G_BIT BIT(2)
  161. #define HCLGE_SUPPORT_50G_BIT BIT(3)
  162. #define HCLGE_SUPPORT_100G_BIT BIT(4)
  163. /* to be compatible with exsit board */
  164. #define HCLGE_SUPPORT_40G_BIT BIT(5)
  165. #define HCLGE_SUPPORT_100M_BIT BIT(6)
  166. #define HCLGE_SUPPORT_10M_BIT BIT(7)
  167. #define HCLGE_SUPPORT_GE \
  168. (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
  169. enum HCLGE_DEV_STATE {
  170. HCLGE_STATE_REINITING,
  171. HCLGE_STATE_DOWN,
  172. HCLGE_STATE_DISABLED,
  173. HCLGE_STATE_REMOVING,
  174. HCLGE_STATE_NIC_REGISTERED,
  175. HCLGE_STATE_ROCE_REGISTERED,
  176. HCLGE_STATE_SERVICE_INITED,
  177. HCLGE_STATE_SERVICE_SCHED,
  178. HCLGE_STATE_RST_SERVICE_SCHED,
  179. HCLGE_STATE_RST_HANDLING,
  180. HCLGE_STATE_MBX_SERVICE_SCHED,
  181. HCLGE_STATE_MBX_HANDLING,
  182. HCLGE_STATE_STATISTICS_UPDATING,
  183. HCLGE_STATE_CMD_DISABLE,
  184. HCLGE_STATE_MAX
  185. };
  186. enum hclge_evt_cause {
  187. HCLGE_VECTOR0_EVENT_RST,
  188. HCLGE_VECTOR0_EVENT_MBX,
  189. HCLGE_VECTOR0_EVENT_ERR,
  190. HCLGE_VECTOR0_EVENT_OTHER,
  191. };
  192. #define HCLGE_MPF_ENBALE 1
  193. enum HCLGE_MAC_SPEED {
  194. HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */
  195. HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
  196. HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */
  197. HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */
  198. HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */
  199. HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */
  200. HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */
  201. HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */
  202. HCLGE_MAC_SPEED_100G = 100000 /* 100000 Mbps = 100 Gbps */
  203. };
  204. enum HCLGE_MAC_DUPLEX {
  205. HCLGE_MAC_HALF,
  206. HCLGE_MAC_FULL
  207. };
  208. #define QUERY_SFP_SPEED 0
  209. #define QUERY_ACTIVE_SPEED 1
  210. struct hclge_mac {
  211. u8 phy_addr;
  212. u8 flag;
  213. u8 media_type; /* port media type, e.g. fibre/copper/backplane */
  214. u8 mac_addr[ETH_ALEN];
  215. u8 autoneg;
  216. u8 duplex;
  217. u8 support_autoneg;
  218. u8 speed_type; /* 0: sfp speed, 1: active speed */
  219. u32 speed;
  220. u32 speed_ability; /* speed ability supported by current media */
  221. u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
  222. u32 fec_mode; /* active fec mode */
  223. u32 user_fec_mode;
  224. u32 fec_ability;
  225. int link; /* store the link status of mac & phy (if phy exit) */
  226. struct phy_device *phydev;
  227. struct mii_bus *mdio_bus;
  228. phy_interface_t phy_if;
  229. __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
  230. __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
  231. };
  232. struct hclge_hw {
  233. void __iomem *io_base;
  234. struct hclge_mac mac;
  235. int num_vec;
  236. struct hclge_cmq cmq;
  237. };
  238. /* TQP stats */
  239. struct hlcge_tqp_stats {
  240. /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
  241. u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
  242. /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
  243. u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
  244. };
  245. struct hclge_tqp {
  246. /* copy of device pointer from pci_dev,
  247. * used when perform DMA mapping
  248. */
  249. struct device *dev;
  250. struct hnae3_queue q;
  251. struct hlcge_tqp_stats tqp_stats;
  252. u16 index; /* Global index in a NIC controller */
  253. bool alloced;
  254. };
  255. enum hclge_fc_mode {
  256. HCLGE_FC_NONE,
  257. HCLGE_FC_RX_PAUSE,
  258. HCLGE_FC_TX_PAUSE,
  259. HCLGE_FC_FULL,
  260. HCLGE_FC_PFC,
  261. HCLGE_FC_DEFAULT
  262. };
  263. enum hclge_link_fail_code {
  264. HCLGE_LF_NORMAL,
  265. HCLGE_LF_REF_CLOCK_LOST,
  266. HCLGE_LF_XSFP_TX_DISABLE,
  267. HCLGE_LF_XSFP_ABSENT,
  268. };
  269. #define HCLGE_PG_NUM 4
  270. #define HCLGE_SCH_MODE_SP 0
  271. #define HCLGE_SCH_MODE_DWRR 1
  272. struct hclge_pg_info {
  273. u8 pg_id;
  274. u8 pg_sch_mode; /* 0: sp; 1: dwrr */
  275. u8 tc_bit_map;
  276. u32 bw_limit;
  277. u8 tc_dwrr[HNAE3_MAX_TC];
  278. };
  279. struct hclge_tc_info {
  280. u8 tc_id;
  281. u8 tc_sch_mode; /* 0: sp; 1: dwrr */
  282. u8 pgid;
  283. u32 bw_limit;
  284. };
  285. struct hclge_cfg {
  286. u8 vmdq_vport_num;
  287. u8 tc_num;
  288. u16 tqp_desc_num;
  289. u16 rx_buf_len;
  290. u16 rss_size_max;
  291. u8 phy_addr;
  292. u8 media_type;
  293. u8 mac_addr[ETH_ALEN];
  294. u8 default_speed;
  295. u32 numa_node_map;
  296. u8 speed_ability;
  297. u16 umv_space;
  298. };
  299. struct hclge_tm_info {
  300. u8 num_tc;
  301. u8 num_pg; /* It must be 1 if vNET-Base schd */
  302. u8 pg_dwrr[HCLGE_PG_NUM];
  303. u8 prio_tc[HNAE3_MAX_USER_PRIO];
  304. struct hclge_pg_info pg_info[HCLGE_PG_NUM];
  305. struct hclge_tc_info tc_info[HNAE3_MAX_TC];
  306. enum hclge_fc_mode fc_mode;
  307. u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
  308. u8 pfc_en; /* PFC enabled or not for user priority */
  309. };
  310. struct hclge_comm_stats_str {
  311. char desc[ETH_GSTRING_LEN];
  312. unsigned long offset;
  313. };
  314. /* mac stats ,opcode id: 0x0032 */
  315. struct hclge_mac_stats {
  316. u64 mac_tx_mac_pause_num;
  317. u64 mac_rx_mac_pause_num;
  318. u64 mac_tx_pfc_pri0_pkt_num;
  319. u64 mac_tx_pfc_pri1_pkt_num;
  320. u64 mac_tx_pfc_pri2_pkt_num;
  321. u64 mac_tx_pfc_pri3_pkt_num;
  322. u64 mac_tx_pfc_pri4_pkt_num;
  323. u64 mac_tx_pfc_pri5_pkt_num;
  324. u64 mac_tx_pfc_pri6_pkt_num;
  325. u64 mac_tx_pfc_pri7_pkt_num;
  326. u64 mac_rx_pfc_pri0_pkt_num;
  327. u64 mac_rx_pfc_pri1_pkt_num;
  328. u64 mac_rx_pfc_pri2_pkt_num;
  329. u64 mac_rx_pfc_pri3_pkt_num;
  330. u64 mac_rx_pfc_pri4_pkt_num;
  331. u64 mac_rx_pfc_pri5_pkt_num;
  332. u64 mac_rx_pfc_pri6_pkt_num;
  333. u64 mac_rx_pfc_pri7_pkt_num;
  334. u64 mac_tx_total_pkt_num;
  335. u64 mac_tx_total_oct_num;
  336. u64 mac_tx_good_pkt_num;
  337. u64 mac_tx_bad_pkt_num;
  338. u64 mac_tx_good_oct_num;
  339. u64 mac_tx_bad_oct_num;
  340. u64 mac_tx_uni_pkt_num;
  341. u64 mac_tx_multi_pkt_num;
  342. u64 mac_tx_broad_pkt_num;
  343. u64 mac_tx_undersize_pkt_num;
  344. u64 mac_tx_oversize_pkt_num;
  345. u64 mac_tx_64_oct_pkt_num;
  346. u64 mac_tx_65_127_oct_pkt_num;
  347. u64 mac_tx_128_255_oct_pkt_num;
  348. u64 mac_tx_256_511_oct_pkt_num;
  349. u64 mac_tx_512_1023_oct_pkt_num;
  350. u64 mac_tx_1024_1518_oct_pkt_num;
  351. u64 mac_tx_1519_2047_oct_pkt_num;
  352. u64 mac_tx_2048_4095_oct_pkt_num;
  353. u64 mac_tx_4096_8191_oct_pkt_num;
  354. u64 rsv0;
  355. u64 mac_tx_8192_9216_oct_pkt_num;
  356. u64 mac_tx_9217_12287_oct_pkt_num;
  357. u64 mac_tx_12288_16383_oct_pkt_num;
  358. u64 mac_tx_1519_max_good_oct_pkt_num;
  359. u64 mac_tx_1519_max_bad_oct_pkt_num;
  360. u64 mac_rx_total_pkt_num;
  361. u64 mac_rx_total_oct_num;
  362. u64 mac_rx_good_pkt_num;
  363. u64 mac_rx_bad_pkt_num;
  364. u64 mac_rx_good_oct_num;
  365. u64 mac_rx_bad_oct_num;
  366. u64 mac_rx_uni_pkt_num;
  367. u64 mac_rx_multi_pkt_num;
  368. u64 mac_rx_broad_pkt_num;
  369. u64 mac_rx_undersize_pkt_num;
  370. u64 mac_rx_oversize_pkt_num;
  371. u64 mac_rx_64_oct_pkt_num;
  372. u64 mac_rx_65_127_oct_pkt_num;
  373. u64 mac_rx_128_255_oct_pkt_num;
  374. u64 mac_rx_256_511_oct_pkt_num;
  375. u64 mac_rx_512_1023_oct_pkt_num;
  376. u64 mac_rx_1024_1518_oct_pkt_num;
  377. u64 mac_rx_1519_2047_oct_pkt_num;
  378. u64 mac_rx_2048_4095_oct_pkt_num;
  379. u64 mac_rx_4096_8191_oct_pkt_num;
  380. u64 rsv1;
  381. u64 mac_rx_8192_9216_oct_pkt_num;
  382. u64 mac_rx_9217_12287_oct_pkt_num;
  383. u64 mac_rx_12288_16383_oct_pkt_num;
  384. u64 mac_rx_1519_max_good_oct_pkt_num;
  385. u64 mac_rx_1519_max_bad_oct_pkt_num;
  386. u64 mac_tx_fragment_pkt_num;
  387. u64 mac_tx_undermin_pkt_num;
  388. u64 mac_tx_jabber_pkt_num;
  389. u64 mac_tx_err_all_pkt_num;
  390. u64 mac_tx_from_app_good_pkt_num;
  391. u64 mac_tx_from_app_bad_pkt_num;
  392. u64 mac_rx_fragment_pkt_num;
  393. u64 mac_rx_undermin_pkt_num;
  394. u64 mac_rx_jabber_pkt_num;
  395. u64 mac_rx_fcs_err_pkt_num;
  396. u64 mac_rx_send_app_good_pkt_num;
  397. u64 mac_rx_send_app_bad_pkt_num;
  398. u64 mac_tx_pfc_pause_pkt_num;
  399. u64 mac_rx_pfc_pause_pkt_num;
  400. u64 mac_tx_ctrl_pkt_num;
  401. u64 mac_rx_ctrl_pkt_num;
  402. };
  403. #define HCLGE_STATS_TIMER_INTERVAL (60 * 5)
  404. struct hclge_hw_stats {
  405. struct hclge_mac_stats mac_stats;
  406. u32 stats_timer;
  407. };
  408. struct hclge_vlan_type_cfg {
  409. u16 rx_ot_fst_vlan_type;
  410. u16 rx_ot_sec_vlan_type;
  411. u16 rx_in_fst_vlan_type;
  412. u16 rx_in_sec_vlan_type;
  413. u16 tx_ot_vlan_type;
  414. u16 tx_in_vlan_type;
  415. };
  416. enum HCLGE_FD_MODE {
  417. HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
  418. HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
  419. HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
  420. HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
  421. };
  422. enum HCLGE_FD_KEY_TYPE {
  423. HCLGE_FD_KEY_BASE_ON_PTYPE,
  424. HCLGE_FD_KEY_BASE_ON_TUPLE,
  425. };
  426. enum HCLGE_FD_STAGE {
  427. HCLGE_FD_STAGE_1,
  428. HCLGE_FD_STAGE_2,
  429. MAX_STAGE_NUM,
  430. };
  431. /* OUTER_XXX indicates tuples in tunnel header of tunnel packet
  432. * INNER_XXX indicate tuples in tunneled header of tunnel packet or
  433. * tuples of non-tunnel packet
  434. */
  435. enum HCLGE_FD_TUPLE {
  436. OUTER_DST_MAC,
  437. OUTER_SRC_MAC,
  438. OUTER_VLAN_TAG_FST,
  439. OUTER_VLAN_TAG_SEC,
  440. OUTER_ETH_TYPE,
  441. OUTER_L2_RSV,
  442. OUTER_IP_TOS,
  443. OUTER_IP_PROTO,
  444. OUTER_SRC_IP,
  445. OUTER_DST_IP,
  446. OUTER_L3_RSV,
  447. OUTER_SRC_PORT,
  448. OUTER_DST_PORT,
  449. OUTER_L4_RSV,
  450. OUTER_TUN_VNI,
  451. OUTER_TUN_FLOW_ID,
  452. INNER_DST_MAC,
  453. INNER_SRC_MAC,
  454. INNER_VLAN_TAG_FST,
  455. INNER_VLAN_TAG_SEC,
  456. INNER_ETH_TYPE,
  457. INNER_L2_RSV,
  458. INNER_IP_TOS,
  459. INNER_IP_PROTO,
  460. INNER_SRC_IP,
  461. INNER_DST_IP,
  462. INNER_L3_RSV,
  463. INNER_SRC_PORT,
  464. INNER_DST_PORT,
  465. INNER_L4_RSV,
  466. MAX_TUPLE,
  467. };
  468. enum HCLGE_FD_META_DATA {
  469. PACKET_TYPE_ID,
  470. IP_FRAGEMENT,
  471. ROCE_TYPE,
  472. NEXT_KEY,
  473. VLAN_NUMBER,
  474. SRC_VPORT,
  475. DST_VPORT,
  476. TUNNEL_PACKET,
  477. MAX_META_DATA,
  478. };
  479. struct key_info {
  480. u8 key_type;
  481. u8 key_length; /* use bit as unit */
  482. };
  483. #define MAX_KEY_LENGTH 400
  484. #define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
  485. #define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
  486. #define MAX_META_DATA_LENGTH 32
  487. /* assigned by firmware, the real filter number for each pf may be less */
  488. #define MAX_FD_FILTER_NUM 4096
  489. #define HCLGE_FD_ARFS_EXPIRE_TIMER_INTERVAL 5
  490. enum HCLGE_FD_ACTIVE_RULE_TYPE {
  491. HCLGE_FD_RULE_NONE,
  492. HCLGE_FD_ARFS_ACTIVE,
  493. HCLGE_FD_EP_ACTIVE,
  494. };
  495. enum HCLGE_FD_PACKET_TYPE {
  496. NIC_PACKET,
  497. ROCE_PACKET,
  498. };
  499. enum HCLGE_FD_ACTION {
  500. HCLGE_FD_ACTION_ACCEPT_PACKET,
  501. HCLGE_FD_ACTION_DROP_PACKET,
  502. };
  503. struct hclge_fd_key_cfg {
  504. u8 key_sel;
  505. u8 inner_sipv6_word_en;
  506. u8 inner_dipv6_word_en;
  507. u8 outer_sipv6_word_en;
  508. u8 outer_dipv6_word_en;
  509. u32 tuple_active;
  510. u32 meta_data_active;
  511. };
  512. struct hclge_fd_cfg {
  513. u8 fd_mode;
  514. u16 max_key_length; /* use bit as unit */
  515. u32 proto_support;
  516. u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
  517. u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
  518. struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
  519. };
  520. #define IPV4_INDEX 3
  521. #define IPV6_SIZE 4
  522. struct hclge_fd_rule_tuples {
  523. u8 src_mac[ETH_ALEN];
  524. u8 dst_mac[ETH_ALEN];
  525. /* Be compatible for ip address of both ipv4 and ipv6.
  526. * For ipv4 address, we store it in src/dst_ip[3].
  527. */
  528. u32 src_ip[IPV6_SIZE];
  529. u32 dst_ip[IPV6_SIZE];
  530. u16 src_port;
  531. u16 dst_port;
  532. u16 vlan_tag1;
  533. u16 ether_proto;
  534. u8 ip_tos;
  535. u8 ip_proto;
  536. };
  537. struct hclge_fd_rule {
  538. struct hlist_node rule_node;
  539. struct hclge_fd_rule_tuples tuples;
  540. struct hclge_fd_rule_tuples tuples_mask;
  541. u32 unused_tuple;
  542. u32 flow_type;
  543. u8 action;
  544. u16 vf_id;
  545. u16 queue_id;
  546. u16 location;
  547. u16 flow_id; /* only used for arfs */
  548. enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
  549. };
  550. struct hclge_fd_ad_data {
  551. u16 ad_id;
  552. u8 drop_packet;
  553. u8 forward_to_direct_queue;
  554. u16 queue_id;
  555. u8 use_counter;
  556. u8 counter_id;
  557. u8 use_next_stage;
  558. u8 write_rule_id_to_bd;
  559. u8 next_input_key;
  560. u16 rule_id;
  561. };
  562. struct hclge_vport_mac_addr_cfg {
  563. struct list_head node;
  564. int hd_tbl_status;
  565. u8 mac_addr[ETH_ALEN];
  566. };
  567. enum HCLGE_MAC_ADDR_TYPE {
  568. HCLGE_MAC_ADDR_UC,
  569. HCLGE_MAC_ADDR_MC
  570. };
  571. struct hclge_vport_vlan_cfg {
  572. struct list_head node;
  573. int hd_tbl_status;
  574. u16 vlan_id;
  575. };
  576. struct hclge_rst_stats {
  577. u32 reset_done_cnt; /* the number of reset has completed */
  578. u32 hw_reset_done_cnt; /* the number of HW reset has completed */
  579. u32 pf_rst_cnt; /* the number of PF reset */
  580. u32 flr_rst_cnt; /* the number of FLR */
  581. u32 core_rst_cnt; /* the number of CORE reset */
  582. u32 global_rst_cnt; /* the number of GLOBAL */
  583. u32 imp_rst_cnt; /* the number of IMP reset */
  584. u32 reset_cnt; /* the number of reset */
  585. u32 reset_fail_cnt; /* the number of reset fail */
  586. };
  587. /* time and register status when mac tunnel interruption occur */
  588. struct hclge_mac_tnl_stats {
  589. u64 time;
  590. u32 status;
  591. };
  592. #define HCLGE_RESET_INTERVAL (10 * HZ)
  593. #define HCLGE_WAIT_RESET_DONE 100
  594. #pragma pack(1)
  595. struct hclge_vf_vlan_cfg {
  596. u8 mbx_cmd;
  597. u8 subcode;
  598. u8 is_kill;
  599. u16 vlan;
  600. u16 proto;
  601. };
  602. #pragma pack()
  603. /* For each bit of TCAM entry, it uses a pair of 'x' and
  604. * 'y' to indicate which value to match, like below:
  605. * ----------------------------------
  606. * | bit x | bit y | search value |
  607. * ----------------------------------
  608. * | 0 | 0 | always hit |
  609. * ----------------------------------
  610. * | 1 | 0 | match '0' |
  611. * ----------------------------------
  612. * | 0 | 1 | match '1' |
  613. * ----------------------------------
  614. * | 1 | 1 | invalid |
  615. * ----------------------------------
  616. * Then for input key(k) and mask(v), we can calculate the value by
  617. * the formulae:
  618. * x = (~k) & v
  619. * y = (k ^ ~v) & k
  620. */
  621. #define calc_x(x, k, v) ((x) = (~(k) & (v)))
  622. #define calc_y(y, k, v) \
  623. do { \
  624. const typeof(k) _k_ = (k); \
  625. const typeof(v) _v_ = (v); \
  626. (y) = (_k_ ^ ~_v_) & (_k_); \
  627. } while (0)
  628. #define HCLGE_MAC_TNL_LOG_SIZE 8
  629. #define HCLGE_VPORT_NUM 256
  630. struct hclge_dev {
  631. struct pci_dev *pdev;
  632. struct hnae3_ae_dev *ae_dev;
  633. struct hclge_hw hw;
  634. struct hclge_misc_vector misc_vector;
  635. struct hclge_hw_stats hw_stats;
  636. unsigned long state;
  637. unsigned long flr_state;
  638. unsigned long last_reset_time;
  639. enum hnae3_reset_type reset_type;
  640. enum hnae3_reset_type reset_level;
  641. unsigned long default_reset_request;
  642. unsigned long reset_request; /* reset has been requested */
  643. unsigned long reset_pending; /* client rst is pending to be served */
  644. struct hclge_rst_stats rst_stats;
  645. u32 fw_version;
  646. u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
  647. u16 num_tqps; /* Num task queue pairs of this PF */
  648. u16 num_req_vfs; /* Num VFs requested for this PF */
  649. u16 base_tqp_pid; /* Base task tqp physical id of this PF */
  650. u16 alloc_rss_size; /* Allocated RSS task queue */
  651. u16 rss_size_max; /* HW defined max RSS task queue */
  652. u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
  653. u16 num_alloc_vport; /* Num vports this driver supports */
  654. u32 numa_node_mask;
  655. u16 rx_buf_len;
  656. u16 num_tx_desc; /* desc num of per tx queue */
  657. u16 num_rx_desc; /* desc num of per rx queue */
  658. u8 hw_tc_map;
  659. u8 tc_num_last_time;
  660. enum hclge_fc_mode fc_mode_last_time;
  661. u8 support_sfp_query;
  662. #define HCLGE_FLAG_TC_BASE_SCH_MODE 1
  663. #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
  664. u8 tx_sch_mode;
  665. u8 tc_max;
  666. u8 pfc_max;
  667. u8 default_up;
  668. u8 dcbx_cap;
  669. struct hclge_tm_info tm_info;
  670. u16 num_msi;
  671. u16 num_msi_left;
  672. u16 num_msi_used;
  673. u16 roce_base_msix_offset;
  674. u32 base_msi_vector;
  675. u16 *vector_status;
  676. int *vector_irq;
  677. u16 num_nic_msi; /* Num of nic vectors for this PF */
  678. u16 num_roce_msi; /* Num of roce vectors for this PF */
  679. int roce_base_vector;
  680. u16 pending_udp_bitmap;
  681. u16 rx_itr_default;
  682. u16 tx_itr_default;
  683. u16 adminq_work_limit; /* Num of admin receive queue desc to process */
  684. unsigned long service_timer_period;
  685. unsigned long service_timer_previous;
  686. struct timer_list reset_timer;
  687. struct delayed_work service_task;
  688. struct work_struct rst_service_task;
  689. struct work_struct mbx_service_task;
  690. bool cur_promisc;
  691. int num_alloc_vfs; /* Actual number of VFs allocated */
  692. struct hclge_tqp *htqp;
  693. struct hclge_vport *vport;
  694. struct dentry *hclge_dbgfs;
  695. struct hnae3_client *nic_client;
  696. struct hnae3_client *roce_client;
  697. #define HCLGE_FLAG_MAIN BIT(0)
  698. #define HCLGE_FLAG_DCB_CAPABLE BIT(1)
  699. #define HCLGE_FLAG_DCB_ENABLE BIT(2)
  700. #define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
  701. u32 flag;
  702. u32 pkt_buf_size; /* Total pf buf size for tx/rx */
  703. u32 tx_buf_size; /* Tx buffer size for each TC */
  704. u32 dv_buf_size; /* Dv buffer size for each TC */
  705. u32 mps; /* Max packet size */
  706. /* vport_lock protect resource shared by vports */
  707. struct mutex vport_lock;
  708. struct hclge_vlan_type_cfg vlan_type_cfg;
  709. unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
  710. unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
  711. struct hclge_fd_cfg fd_cfg;
  712. struct hlist_head fd_rule_list;
  713. spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
  714. u16 hclge_fd_rule_num;
  715. u16 fd_arfs_expire_timer;
  716. unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
  717. enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
  718. u8 fd_en;
  719. u16 wanted_umv_size;
  720. /* max available unicast mac vlan space */
  721. u16 max_umv_size;
  722. /* private unicast mac vlan space, it's same for PF and its VFs */
  723. u16 priv_umv_size;
  724. /* unicast mac vlan space shared by PF and its VFs */
  725. u16 share_umv_size;
  726. struct mutex umv_mutex; /* protect share_umv_size */
  727. struct mutex vport_cfg_mutex; /* Protect stored vf table */
  728. DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
  729. HCLGE_MAC_TNL_LOG_SIZE);
  730. /* affinity mask and notify for misc interrupt */
  731. cpumask_t affinity_mask;
  732. struct irq_affinity_notify affinity_notify;
  733. };
  734. /* VPort level vlan tag configuration for TX direction */
  735. struct hclge_tx_vtag_cfg {
  736. bool accept_tag1; /* Whether accept tag1 packet from host */
  737. bool accept_untag1; /* Whether accept untag1 packet from host */
  738. bool accept_tag2;
  739. bool accept_untag2;
  740. bool insert_tag1_en; /* Whether insert inner vlan tag */
  741. bool insert_tag2_en; /* Whether insert outer vlan tag */
  742. u16 default_tag1; /* The default inner vlan tag to insert */
  743. u16 default_tag2; /* The default outer vlan tag to insert */
  744. };
  745. /* VPort level vlan tag configuration for RX direction */
  746. struct hclge_rx_vtag_cfg {
  747. u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */
  748. u8 strip_tag1_en; /* Whether strip inner vlan tag */
  749. u8 strip_tag2_en; /* Whether strip outer vlan tag */
  750. u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */
  751. u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */
  752. };
  753. struct hclge_rss_tuple_cfg {
  754. u8 ipv4_tcp_en;
  755. u8 ipv4_udp_en;
  756. u8 ipv4_sctp_en;
  757. u8 ipv4_fragment_en;
  758. u8 ipv6_tcp_en;
  759. u8 ipv6_udp_en;
  760. u8 ipv6_sctp_en;
  761. u8 ipv6_fragment_en;
  762. };
  763. enum HCLGE_VPORT_STATE {
  764. HCLGE_VPORT_STATE_ALIVE,
  765. HCLGE_VPORT_STATE_MAX
  766. };
  767. struct hclge_vlan_info {
  768. u16 vlan_proto; /* so far support 802.1Q only */
  769. u16 qos;
  770. u16 vlan_tag;
  771. };
  772. struct hclge_port_base_vlan_config {
  773. u16 state;
  774. struct hclge_vlan_info vlan_info;
  775. };
  776. struct hclge_vport {
  777. u16 alloc_tqps; /* Allocated Tx/Rx queues */
  778. u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
  779. /* User configured lookup table entries */
  780. u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
  781. int rss_algo; /* User configured hash algorithm */
  782. /* User configured rss tuple sets */
  783. struct hclge_rss_tuple_cfg rss_tuple_sets;
  784. u16 alloc_rss_size;
  785. u16 qs_offset;
  786. u32 bw_limit; /* VSI BW Limit (0 = disabled) */
  787. u8 dwrr;
  788. unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
  789. struct hclge_port_base_vlan_config port_base_vlan_cfg;
  790. struct hclge_tx_vtag_cfg txvlan_cfg;
  791. struct hclge_rx_vtag_cfg rxvlan_cfg;
  792. u16 used_umv_num;
  793. u16 vport_id;
  794. struct hclge_dev *back; /* Back reference to associated dev */
  795. struct hnae3_handle nic;
  796. struct hnae3_handle roce;
  797. unsigned long state;
  798. unsigned long last_active_jiffies;
  799. u32 mps; /* Max packet size */
  800. struct list_head uc_mac_list; /* Store VF unicast table */
  801. struct list_head mc_mac_list; /* Store VF multicast table */
  802. struct list_head vlan_list; /* Store VF vlan table */
  803. };
  804. void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc,
  805. bool en_mc, bool en_bc, int vport_id);
  806. int hclge_add_uc_addr_common(struct hclge_vport *vport,
  807. const unsigned char *addr);
  808. int hclge_rm_uc_addr_common(struct hclge_vport *vport,
  809. const unsigned char *addr);
  810. int hclge_add_mc_addr_common(struct hclge_vport *vport,
  811. const unsigned char *addr);
  812. int hclge_rm_mc_addr_common(struct hclge_vport *vport,
  813. const unsigned char *addr);
  814. struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
  815. int hclge_bind_ring_with_vector(struct hclge_vport *vport,
  816. int vector_id, bool en,
  817. struct hnae3_ring_chain_node *ring_chain);
  818. static inline int hclge_get_queue_id(struct hnae3_queue *queue)
  819. {
  820. struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
  821. return tqp->index;
  822. }
  823. static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
  824. {
  825. return !!hdev->reset_pending;
  826. }
  827. int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
  828. int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
  829. int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
  830. u16 vlan_id, bool is_kill);
  831. int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
  832. int hclge_buffer_alloc(struct hclge_dev *hdev);
  833. int hclge_rss_init_hw(struct hclge_dev *hdev);
  834. void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
  835. void hclge_mbx_handler(struct hclge_dev *hdev);
  836. int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
  837. void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
  838. int hclge_cfg_flowctrl(struct hclge_dev *hdev);
  839. int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
  840. int hclge_vport_start(struct hclge_vport *vport);
  841. void hclge_vport_stop(struct hclge_vport *vport);
  842. int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
  843. int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
  844. u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
  845. int hclge_notify_client(struct hclge_dev *hdev,
  846. enum hnae3_reset_notify_type type);
  847. void hclge_add_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
  848. enum HCLGE_MAC_ADDR_TYPE mac_type);
  849. void hclge_rm_vport_mac_table(struct hclge_vport *vport, const u8 *mac_addr,
  850. bool is_write_tbl,
  851. enum HCLGE_MAC_ADDR_TYPE mac_type);
  852. void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
  853. enum HCLGE_MAC_ADDR_TYPE mac_type);
  854. void hclge_uninit_vport_mac_table(struct hclge_dev *hdev);
  855. void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
  856. void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
  857. int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
  858. struct hclge_vlan_info *vlan_info);
  859. int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
  860. u16 state, u16 vlan_tag, u16 qos,
  861. u16 vlan_proto);
  862. void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
  863. int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
  864. struct hclge_desc *desc);
  865. void hclge_report_hw_error(struct hclge_dev *hdev,
  866. enum hnae3_hw_error_type type);
  867. #endif