PageRenderTime 54ms CodeModel.GetById 24ms RepoModel.GetById 0ms app.codeStats 0ms

/include/net/flow_offload.h

https://bitbucket.org/mirror/linux
C Header | 545 lines | 457 code | 73 blank | 15 comment | 12 complexity | 34b78a5408957e844b6105fe866d8196 MD5 | raw file
Possible License(s): LGPL-2.0, AGPL-1.0, GPL-2.0
  1. #ifndef _NET_FLOW_OFFLOAD_H
  2. #define _NET_FLOW_OFFLOAD_H
  3. #include <linux/kernel.h>
  4. #include <linux/list.h>
  5. #include <linux/netlink.h>
  6. #include <net/flow_dissector.h>
  7. #include <linux/rhashtable.h>
  8. struct flow_match {
  9. struct flow_dissector *dissector;
  10. void *mask;
  11. void *key;
  12. };
  13. struct flow_match_meta {
  14. struct flow_dissector_key_meta *key, *mask;
  15. };
  16. struct flow_match_basic {
  17. struct flow_dissector_key_basic *key, *mask;
  18. };
  19. struct flow_match_control {
  20. struct flow_dissector_key_control *key, *mask;
  21. };
  22. struct flow_match_eth_addrs {
  23. struct flow_dissector_key_eth_addrs *key, *mask;
  24. };
  25. struct flow_match_vlan {
  26. struct flow_dissector_key_vlan *key, *mask;
  27. };
  28. struct flow_match_ipv4_addrs {
  29. struct flow_dissector_key_ipv4_addrs *key, *mask;
  30. };
  31. struct flow_match_ipv6_addrs {
  32. struct flow_dissector_key_ipv6_addrs *key, *mask;
  33. };
  34. struct flow_match_ip {
  35. struct flow_dissector_key_ip *key, *mask;
  36. };
  37. struct flow_match_ports {
  38. struct flow_dissector_key_ports *key, *mask;
  39. };
  40. struct flow_match_icmp {
  41. struct flow_dissector_key_icmp *key, *mask;
  42. };
  43. struct flow_match_tcp {
  44. struct flow_dissector_key_tcp *key, *mask;
  45. };
  46. struct flow_match_mpls {
  47. struct flow_dissector_key_mpls *key, *mask;
  48. };
  49. struct flow_match_enc_keyid {
  50. struct flow_dissector_key_keyid *key, *mask;
  51. };
  52. struct flow_match_enc_opts {
  53. struct flow_dissector_key_enc_opts *key, *mask;
  54. };
  55. struct flow_match_ct {
  56. struct flow_dissector_key_ct *key, *mask;
  57. };
  58. struct flow_rule;
  59. void flow_rule_match_meta(const struct flow_rule *rule,
  60. struct flow_match_meta *out);
  61. void flow_rule_match_basic(const struct flow_rule *rule,
  62. struct flow_match_basic *out);
  63. void flow_rule_match_control(const struct flow_rule *rule,
  64. struct flow_match_control *out);
  65. void flow_rule_match_eth_addrs(const struct flow_rule *rule,
  66. struct flow_match_eth_addrs *out);
  67. void flow_rule_match_vlan(const struct flow_rule *rule,
  68. struct flow_match_vlan *out);
  69. void flow_rule_match_cvlan(const struct flow_rule *rule,
  70. struct flow_match_vlan *out);
  71. void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
  72. struct flow_match_ipv4_addrs *out);
  73. void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
  74. struct flow_match_ipv6_addrs *out);
  75. void flow_rule_match_ip(const struct flow_rule *rule,
  76. struct flow_match_ip *out);
  77. void flow_rule_match_ports(const struct flow_rule *rule,
  78. struct flow_match_ports *out);
  79. void flow_rule_match_tcp(const struct flow_rule *rule,
  80. struct flow_match_tcp *out);
  81. void flow_rule_match_icmp(const struct flow_rule *rule,
  82. struct flow_match_icmp *out);
  83. void flow_rule_match_mpls(const struct flow_rule *rule,
  84. struct flow_match_mpls *out);
  85. void flow_rule_match_enc_control(const struct flow_rule *rule,
  86. struct flow_match_control *out);
  87. void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule,
  88. struct flow_match_ipv4_addrs *out);
  89. void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule,
  90. struct flow_match_ipv6_addrs *out);
  91. void flow_rule_match_enc_ip(const struct flow_rule *rule,
  92. struct flow_match_ip *out);
  93. void flow_rule_match_enc_ports(const struct flow_rule *rule,
  94. struct flow_match_ports *out);
  95. void flow_rule_match_enc_keyid(const struct flow_rule *rule,
  96. struct flow_match_enc_keyid *out);
  97. void flow_rule_match_enc_opts(const struct flow_rule *rule,
  98. struct flow_match_enc_opts *out);
  99. void flow_rule_match_ct(const struct flow_rule *rule,
  100. struct flow_match_ct *out);
  101. enum flow_action_id {
  102. FLOW_ACTION_ACCEPT = 0,
  103. FLOW_ACTION_DROP,
  104. FLOW_ACTION_TRAP,
  105. FLOW_ACTION_GOTO,
  106. FLOW_ACTION_REDIRECT,
  107. FLOW_ACTION_MIRRED,
  108. FLOW_ACTION_REDIRECT_INGRESS,
  109. FLOW_ACTION_MIRRED_INGRESS,
  110. FLOW_ACTION_VLAN_PUSH,
  111. FLOW_ACTION_VLAN_POP,
  112. FLOW_ACTION_VLAN_MANGLE,
  113. FLOW_ACTION_TUNNEL_ENCAP,
  114. FLOW_ACTION_TUNNEL_DECAP,
  115. FLOW_ACTION_MANGLE,
  116. FLOW_ACTION_ADD,
  117. FLOW_ACTION_CSUM,
  118. FLOW_ACTION_MARK,
  119. FLOW_ACTION_PTYPE,
  120. FLOW_ACTION_PRIORITY,
  121. FLOW_ACTION_WAKE,
  122. FLOW_ACTION_QUEUE,
  123. FLOW_ACTION_SAMPLE,
  124. FLOW_ACTION_POLICE,
  125. FLOW_ACTION_CT,
  126. FLOW_ACTION_CT_METADATA,
  127. FLOW_ACTION_MPLS_PUSH,
  128. FLOW_ACTION_MPLS_POP,
  129. FLOW_ACTION_MPLS_MANGLE,
  130. FLOW_ACTION_GATE,
  131. NUM_FLOW_ACTIONS,
  132. };
  133. /* This is mirroring enum pedit_header_type definition for easy mapping between
  134. * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to
  135. * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver.
  136. */
  137. enum flow_action_mangle_base {
  138. FLOW_ACT_MANGLE_UNSPEC = 0,
  139. FLOW_ACT_MANGLE_HDR_TYPE_ETH,
  140. FLOW_ACT_MANGLE_HDR_TYPE_IP4,
  141. FLOW_ACT_MANGLE_HDR_TYPE_IP6,
  142. FLOW_ACT_MANGLE_HDR_TYPE_TCP,
  143. FLOW_ACT_MANGLE_HDR_TYPE_UDP,
  144. };
  145. enum flow_action_hw_stats_bit {
  146. FLOW_ACTION_HW_STATS_IMMEDIATE_BIT,
  147. FLOW_ACTION_HW_STATS_DELAYED_BIT,
  148. FLOW_ACTION_HW_STATS_DISABLED_BIT,
  149. FLOW_ACTION_HW_STATS_NUM_BITS
  150. };
  151. enum flow_action_hw_stats {
  152. FLOW_ACTION_HW_STATS_IMMEDIATE =
  153. BIT(FLOW_ACTION_HW_STATS_IMMEDIATE_BIT),
  154. FLOW_ACTION_HW_STATS_DELAYED = BIT(FLOW_ACTION_HW_STATS_DELAYED_BIT),
  155. FLOW_ACTION_HW_STATS_ANY = FLOW_ACTION_HW_STATS_IMMEDIATE |
  156. FLOW_ACTION_HW_STATS_DELAYED,
  157. FLOW_ACTION_HW_STATS_DISABLED =
  158. BIT(FLOW_ACTION_HW_STATS_DISABLED_BIT),
  159. FLOW_ACTION_HW_STATS_DONT_CARE = BIT(FLOW_ACTION_HW_STATS_NUM_BITS) - 1,
  160. };
  161. typedef void (*action_destr)(void *priv);
  162. struct flow_action_cookie {
  163. u32 cookie_len;
  164. u8 cookie[];
  165. };
  166. struct flow_action_cookie *flow_action_cookie_create(void *data,
  167. unsigned int len,
  168. gfp_t gfp);
  169. void flow_action_cookie_destroy(struct flow_action_cookie *cookie);
  170. struct flow_action_entry {
  171. enum flow_action_id id;
  172. enum flow_action_hw_stats hw_stats;
  173. action_destr destructor;
  174. void *destructor_priv;
  175. union {
  176. u32 chain_index; /* FLOW_ACTION_GOTO */
  177. struct net_device *dev; /* FLOW_ACTION_REDIRECT */
  178. struct { /* FLOW_ACTION_VLAN */
  179. u16 vid;
  180. __be16 proto;
  181. u8 prio;
  182. } vlan;
  183. struct { /* FLOW_ACTION_MANGLE */
  184. /* FLOW_ACTION_ADD */
  185. enum flow_action_mangle_base htype;
  186. u32 offset;
  187. u32 mask;
  188. u32 val;
  189. } mangle;
  190. struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */
  191. u32 csum_flags; /* FLOW_ACTION_CSUM */
  192. u32 mark; /* FLOW_ACTION_MARK */
  193. u16 ptype; /* FLOW_ACTION_PTYPE */
  194. u32 priority; /* FLOW_ACTION_PRIORITY */
  195. struct { /* FLOW_ACTION_QUEUE */
  196. u32 ctx;
  197. u32 index;
  198. u8 vf;
  199. } queue;
  200. struct { /* FLOW_ACTION_SAMPLE */
  201. struct psample_group *psample_group;
  202. u32 rate;
  203. u32 trunc_size;
  204. bool truncate;
  205. } sample;
  206. struct { /* FLOW_ACTION_POLICE */
  207. s64 burst;
  208. u64 rate_bytes_ps;
  209. } police;
  210. struct { /* FLOW_ACTION_CT */
  211. int action;
  212. u16 zone;
  213. struct nf_flowtable *flow_table;
  214. } ct;
  215. struct {
  216. unsigned long cookie;
  217. u32 mark;
  218. u32 labels[4];
  219. } ct_metadata;
  220. struct { /* FLOW_ACTION_MPLS_PUSH */
  221. u32 label;
  222. __be16 proto;
  223. u8 tc;
  224. u8 bos;
  225. u8 ttl;
  226. } mpls_push;
  227. struct { /* FLOW_ACTION_MPLS_POP */
  228. __be16 proto;
  229. } mpls_pop;
  230. struct { /* FLOW_ACTION_MPLS_MANGLE */
  231. u32 label;
  232. u8 tc;
  233. u8 bos;
  234. u8 ttl;
  235. } mpls_mangle;
  236. struct {
  237. u32 index;
  238. s32 prio;
  239. u64 basetime;
  240. u64 cycletime;
  241. u64 cycletimeext;
  242. u32 num_entries;
  243. struct action_gate_entry *entries;
  244. } gate;
  245. };
  246. struct flow_action_cookie *cookie; /* user defined action cookie */
  247. };
  248. struct flow_action {
  249. unsigned int num_entries;
  250. struct flow_action_entry entries[];
  251. };
  252. static inline bool flow_action_has_entries(const struct flow_action *action)
  253. {
  254. return action->num_entries;
  255. }
  256. /**
  257. * flow_action_has_one_action() - check if exactly one action is present
  258. * @action: tc filter flow offload action
  259. *
  260. * Returns true if exactly one action is present.
  261. */
  262. static inline bool flow_offload_has_one_action(const struct flow_action *action)
  263. {
  264. return action->num_entries == 1;
  265. }
  266. #define flow_action_for_each(__i, __act, __actions) \
  267. for (__i = 0, __act = &(__actions)->entries[0]; \
  268. __i < (__actions)->num_entries; \
  269. __act = &(__actions)->entries[++__i])
  270. static inline bool
  271. flow_action_mixed_hw_stats_check(const struct flow_action *action,
  272. struct netlink_ext_ack *extack)
  273. {
  274. const struct flow_action_entry *action_entry;
  275. u8 uninitialized_var(last_hw_stats);
  276. int i;
  277. if (flow_offload_has_one_action(action))
  278. return true;
  279. flow_action_for_each(i, action_entry, action) {
  280. if (i && action_entry->hw_stats != last_hw_stats) {
  281. NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported");
  282. return false;
  283. }
  284. last_hw_stats = action_entry->hw_stats;
  285. }
  286. return true;
  287. }
  288. static inline const struct flow_action_entry *
  289. flow_action_first_entry_get(const struct flow_action *action)
  290. {
  291. WARN_ON(!flow_action_has_entries(action));
  292. return &action->entries[0];
  293. }
  294. static inline bool
  295. __flow_action_hw_stats_check(const struct flow_action *action,
  296. struct netlink_ext_ack *extack,
  297. bool check_allow_bit,
  298. enum flow_action_hw_stats_bit allow_bit)
  299. {
  300. const struct flow_action_entry *action_entry;
  301. if (!flow_action_has_entries(action))
  302. return true;
  303. if (!flow_action_mixed_hw_stats_check(action, extack))
  304. return false;
  305. action_entry = flow_action_first_entry_get(action);
  306. /* Zero is not a legal value for hw_stats, catch anyone passing it */
  307. WARN_ON_ONCE(!action_entry->hw_stats);
  308. if (!check_allow_bit &&
  309. ~action_entry->hw_stats & FLOW_ACTION_HW_STATS_ANY) {
  310. NL_SET_ERR_MSG_MOD(extack, "Driver supports only default HW stats type \"any\"");
  311. return false;
  312. } else if (check_allow_bit &&
  313. !(action_entry->hw_stats & BIT(allow_bit))) {
  314. NL_SET_ERR_MSG_MOD(extack, "Driver does not support selected HW stats type");
  315. return false;
  316. }
  317. return true;
  318. }
  319. static inline bool
  320. flow_action_hw_stats_check(const struct flow_action *action,
  321. struct netlink_ext_ack *extack,
  322. enum flow_action_hw_stats_bit allow_bit)
  323. {
  324. return __flow_action_hw_stats_check(action, extack, true, allow_bit);
  325. }
  326. static inline bool
  327. flow_action_basic_hw_stats_check(const struct flow_action *action,
  328. struct netlink_ext_ack *extack)
  329. {
  330. return __flow_action_hw_stats_check(action, extack, false, 0);
  331. }
  332. struct flow_rule {
  333. struct flow_match match;
  334. struct flow_action action;
  335. };
  336. struct flow_rule *flow_rule_alloc(unsigned int num_actions);
  337. static inline bool flow_rule_match_key(const struct flow_rule *rule,
  338. enum flow_dissector_key_id key)
  339. {
  340. return dissector_uses_key(rule->match.dissector, key);
  341. }
  342. struct flow_stats {
  343. u64 pkts;
  344. u64 bytes;
  345. u64 lastused;
  346. enum flow_action_hw_stats used_hw_stats;
  347. bool used_hw_stats_valid;
  348. };
  349. static inline void flow_stats_update(struct flow_stats *flow_stats,
  350. u64 bytes, u64 pkts, u64 lastused,
  351. enum flow_action_hw_stats used_hw_stats)
  352. {
  353. flow_stats->pkts += pkts;
  354. flow_stats->bytes += bytes;
  355. flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused);
  356. /* The driver should pass value with a maximum of one bit set.
  357. * Passing FLOW_ACTION_HW_STATS_ANY is invalid.
  358. */
  359. WARN_ON(used_hw_stats == FLOW_ACTION_HW_STATS_ANY);
  360. flow_stats->used_hw_stats |= used_hw_stats;
  361. flow_stats->used_hw_stats_valid = true;
  362. }
  363. enum flow_block_command {
  364. FLOW_BLOCK_BIND,
  365. FLOW_BLOCK_UNBIND,
  366. };
  367. enum flow_block_binder_type {
  368. FLOW_BLOCK_BINDER_TYPE_UNSPEC,
  369. FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS,
  370. FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS,
  371. };
  372. struct flow_block {
  373. struct list_head cb_list;
  374. };
  375. struct netlink_ext_ack;
  376. struct flow_block_offload {
  377. enum flow_block_command command;
  378. enum flow_block_binder_type binder_type;
  379. bool block_shared;
  380. bool unlocked_driver_cb;
  381. struct net *net;
  382. struct flow_block *block;
  383. struct list_head cb_list;
  384. struct list_head *driver_block_list;
  385. struct netlink_ext_ack *extack;
  386. };
  387. enum tc_setup_type;
  388. typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data,
  389. void *cb_priv);
  390. struct flow_block_cb;
  391. struct flow_block_indr {
  392. struct list_head list;
  393. struct net_device *dev;
  394. enum flow_block_binder_type binder_type;
  395. void *data;
  396. void (*cleanup)(struct flow_block_cb *block_cb);
  397. };
  398. struct flow_block_cb {
  399. struct list_head driver_list;
  400. struct list_head list;
  401. flow_setup_cb_t *cb;
  402. void *cb_ident;
  403. void *cb_priv;
  404. void (*release)(void *cb_priv);
  405. struct flow_block_indr indr;
  406. unsigned int refcnt;
  407. };
  408. struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb,
  409. void *cb_ident, void *cb_priv,
  410. void (*release)(void *cb_priv));
  411. void flow_block_cb_free(struct flow_block_cb *block_cb);
  412. struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block,
  413. flow_setup_cb_t *cb, void *cb_ident);
  414. void *flow_block_cb_priv(struct flow_block_cb *block_cb);
  415. void flow_block_cb_incref(struct flow_block_cb *block_cb);
  416. unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb);
  417. static inline void flow_block_cb_add(struct flow_block_cb *block_cb,
  418. struct flow_block_offload *offload)
  419. {
  420. list_add_tail(&block_cb->list, &offload->cb_list);
  421. }
  422. static inline void flow_block_cb_remove(struct flow_block_cb *block_cb,
  423. struct flow_block_offload *offload)
  424. {
  425. list_move(&block_cb->list, &offload->cb_list);
  426. }
  427. bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident,
  428. struct list_head *driver_block_list);
  429. int flow_block_cb_setup_simple(struct flow_block_offload *f,
  430. struct list_head *driver_list,
  431. flow_setup_cb_t *cb,
  432. void *cb_ident, void *cb_priv, bool ingress_only);
  433. enum flow_cls_command {
  434. FLOW_CLS_REPLACE,
  435. FLOW_CLS_DESTROY,
  436. FLOW_CLS_STATS,
  437. FLOW_CLS_TMPLT_CREATE,
  438. FLOW_CLS_TMPLT_DESTROY,
  439. };
  440. struct flow_cls_common_offload {
  441. u32 chain_index;
  442. __be16 protocol;
  443. u32 prio;
  444. struct netlink_ext_ack *extack;
  445. };
  446. struct flow_cls_offload {
  447. struct flow_cls_common_offload common;
  448. enum flow_cls_command command;
  449. unsigned long cookie;
  450. struct flow_rule *rule;
  451. struct flow_stats stats;
  452. u32 classid;
  453. };
  454. static inline struct flow_rule *
  455. flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd)
  456. {
  457. return flow_cmd->rule;
  458. }
  459. static inline void flow_block_init(struct flow_block *flow_block)
  460. {
  461. INIT_LIST_HEAD(&flow_block->cb_list);
  462. }
  463. typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
  464. enum tc_setup_type type, void *type_data);
  465. int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
  466. void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
  467. flow_setup_cb_t *setup_cb);
  468. int flow_indr_dev_setup_offload(struct net_device *dev,
  469. enum tc_setup_type type, void *data,
  470. struct flow_block_offload *bo,
  471. void (*cleanup)(struct flow_block_cb *block_cb));
  472. #endif /* _NET_FLOW_OFFLOAD_H */