PageRenderTime 63ms CodeModel.GetById 23ms RepoModel.GetById 1ms app.codeStats 0ms

/net/dcb/dcbnl.c

http://github.com/mirrors/linux
C | 2065 lines | 1574 code | 349 blank | 142 comment | 324 complexity | 3da6c68d4288be363a4197b8ae0c7b86 MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2008-2011, Intel Corporation.
  4. *
  5. * Description: Data Center Bridging netlink interface
  6. * Author: Lucy Liu <lucy.liu@intel.com>
  7. */
  8. #include <linux/netdevice.h>
  9. #include <linux/netlink.h>
  10. #include <linux/slab.h>
  11. #include <net/netlink.h>
  12. #include <net/rtnetlink.h>
  13. #include <linux/dcbnl.h>
  14. #include <net/dcbevent.h>
  15. #include <linux/rtnetlink.h>
  16. #include <linux/init.h>
  17. #include <net/sock.h>
  18. /* Data Center Bridging (DCB) is a collection of Ethernet enhancements
  19. * intended to allow network traffic with differing requirements
  20. * (highly reliable, no drops vs. best effort vs. low latency) to operate
  21. * and co-exist on Ethernet. Current DCB features are:
  22. *
  23. * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
  24. * framework for assigning bandwidth guarantees to traffic classes.
  25. *
  26. * Priority-based Flow Control (PFC) - provides a flow control mechanism which
  27. * can work independently for each 802.1p priority.
  28. *
  29. * Congestion Notification - provides a mechanism for end-to-end congestion
  30. * control for protocols which do not have built-in congestion management.
  31. *
  32. * More information about the emerging standards for these Ethernet features
  33. * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
  34. *
  35. * This file implements an rtnetlink interface to allow configuration of DCB
  36. * features for capable devices.
  37. */
  38. /**************** DCB attribute policies *************************************/
  39. /* DCB netlink attributes policy */
  40. static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
  41. [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
  42. [DCB_ATTR_STATE] = {.type = NLA_U8},
  43. [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
  44. [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
  45. [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
  46. [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
  47. [DCB_ATTR_CAP] = {.type = NLA_NESTED},
  48. [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
  49. [DCB_ATTR_BCN] = {.type = NLA_NESTED},
  50. [DCB_ATTR_APP] = {.type = NLA_NESTED},
  51. [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
  52. [DCB_ATTR_DCBX] = {.type = NLA_U8},
  53. [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
  54. };
  55. /* DCB priority flow control to User Priority nested attributes */
  56. static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
  57. [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
  58. [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
  59. [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
  60. [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
  61. [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
  62. [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
  63. [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
  64. [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
  65. [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
  66. };
  67. /* DCB priority grouping nested attributes */
  68. static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
  69. [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
  70. [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
  71. [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
  72. [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
  73. [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
  74. [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
  75. [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
  76. [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
  77. [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
  78. [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
  79. [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
  80. [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
  81. [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
  82. [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
  83. [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
  84. [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
  85. [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
  86. [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
  87. };
  88. /* DCB traffic class nested attributes. */
  89. static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
  90. [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
  91. [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
  92. [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
  93. [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
  94. [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
  95. };
  96. /* DCB capabilities nested attributes. */
  97. static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
  98. [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
  99. [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
  100. [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
  101. [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
  102. [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
  103. [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
  104. [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
  105. [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
  106. [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
  107. };
  108. /* DCB capabilities nested attributes. */
  109. static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
  110. [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
  111. [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
  112. [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
  113. };
  114. /* DCB BCN nested attributes. */
  115. static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
  116. [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
  117. [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
  118. [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
  119. [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
  120. [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
  121. [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
  122. [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
  123. [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
  124. [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
  125. [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
  126. [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
  127. [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
  128. [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
  129. [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
  130. [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
  131. [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
  132. [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
  133. [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
  134. [DCB_BCN_ATTR_W] = {.type = NLA_U32},
  135. [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
  136. [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
  137. [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
  138. [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
  139. [DCB_BCN_ATTR_C] = {.type = NLA_U32},
  140. [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
  141. };
  142. /* DCB APP nested attributes. */
  143. static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
  144. [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
  145. [DCB_APP_ATTR_ID] = {.type = NLA_U16},
  146. [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
  147. };
  148. /* IEEE 802.1Qaz nested attributes. */
  149. static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
  150. [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
  151. [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
  152. [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
  153. [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
  154. [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
  155. [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
  156. [DCB_ATTR_DCB_BUFFER] = {.len = sizeof(struct dcbnl_buffer)},
  157. };
  158. /* DCB number of traffic classes nested attributes. */
  159. static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
  160. [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
  161. [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
  162. [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
  163. [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
  164. };
  165. static LIST_HEAD(dcb_app_list);
  166. static DEFINE_SPINLOCK(dcb_lock);
  167. static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
  168. u32 flags, struct nlmsghdr **nlhp)
  169. {
  170. struct sk_buff *skb;
  171. struct dcbmsg *dcb;
  172. struct nlmsghdr *nlh;
  173. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  174. if (!skb)
  175. return NULL;
  176. nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
  177. BUG_ON(!nlh);
  178. dcb = nlmsg_data(nlh);
  179. dcb->dcb_family = AF_UNSPEC;
  180. dcb->cmd = cmd;
  181. dcb->dcb_pad = 0;
  182. if (nlhp)
  183. *nlhp = nlh;
  184. return skb;
  185. }
  186. static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
  187. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  188. {
  189. /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
  190. if (!netdev->dcbnl_ops->getstate)
  191. return -EOPNOTSUPP;
  192. return nla_put_u8(skb, DCB_ATTR_STATE,
  193. netdev->dcbnl_ops->getstate(netdev));
  194. }
  195. static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
  196. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  197. {
  198. struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
  199. u8 value;
  200. int ret;
  201. int i;
  202. int getall = 0;
  203. if (!tb[DCB_ATTR_PFC_CFG])
  204. return -EINVAL;
  205. if (!netdev->dcbnl_ops->getpfccfg)
  206. return -EOPNOTSUPP;
  207. ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
  208. tb[DCB_ATTR_PFC_CFG],
  209. dcbnl_pfc_up_nest, NULL);
  210. if (ret)
  211. return ret;
  212. nest = nla_nest_start_noflag(skb, DCB_ATTR_PFC_CFG);
  213. if (!nest)
  214. return -EMSGSIZE;
  215. if (data[DCB_PFC_UP_ATTR_ALL])
  216. getall = 1;
  217. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  218. if (!getall && !data[i])
  219. continue;
  220. netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
  221. &value);
  222. ret = nla_put_u8(skb, i, value);
  223. if (ret) {
  224. nla_nest_cancel(skb, nest);
  225. return ret;
  226. }
  227. }
  228. nla_nest_end(skb, nest);
  229. return 0;
  230. }
  231. static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
  232. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  233. {
  234. u8 perm_addr[MAX_ADDR_LEN];
  235. if (!netdev->dcbnl_ops->getpermhwaddr)
  236. return -EOPNOTSUPP;
  237. memset(perm_addr, 0, sizeof(perm_addr));
  238. netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
  239. return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
  240. }
  241. static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
  242. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  243. {
  244. struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
  245. u8 value;
  246. int ret;
  247. int i;
  248. int getall = 0;
  249. if (!tb[DCB_ATTR_CAP])
  250. return -EINVAL;
  251. if (!netdev->dcbnl_ops->getcap)
  252. return -EOPNOTSUPP;
  253. ret = nla_parse_nested_deprecated(data, DCB_CAP_ATTR_MAX,
  254. tb[DCB_ATTR_CAP], dcbnl_cap_nest,
  255. NULL);
  256. if (ret)
  257. return ret;
  258. nest = nla_nest_start_noflag(skb, DCB_ATTR_CAP);
  259. if (!nest)
  260. return -EMSGSIZE;
  261. if (data[DCB_CAP_ATTR_ALL])
  262. getall = 1;
  263. for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
  264. if (!getall && !data[i])
  265. continue;
  266. if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
  267. ret = nla_put_u8(skb, i, value);
  268. if (ret) {
  269. nla_nest_cancel(skb, nest);
  270. return ret;
  271. }
  272. }
  273. }
  274. nla_nest_end(skb, nest);
  275. return 0;
  276. }
  277. static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
  278. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  279. {
  280. struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
  281. u8 value;
  282. int ret;
  283. int i;
  284. int getall = 0;
  285. if (!tb[DCB_ATTR_NUMTCS])
  286. return -EINVAL;
  287. if (!netdev->dcbnl_ops->getnumtcs)
  288. return -EOPNOTSUPP;
  289. ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
  290. tb[DCB_ATTR_NUMTCS],
  291. dcbnl_numtcs_nest, NULL);
  292. if (ret)
  293. return ret;
  294. nest = nla_nest_start_noflag(skb, DCB_ATTR_NUMTCS);
  295. if (!nest)
  296. return -EMSGSIZE;
  297. if (data[DCB_NUMTCS_ATTR_ALL])
  298. getall = 1;
  299. for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
  300. if (!getall && !data[i])
  301. continue;
  302. ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
  303. if (!ret) {
  304. ret = nla_put_u8(skb, i, value);
  305. if (ret) {
  306. nla_nest_cancel(skb, nest);
  307. return ret;
  308. }
  309. } else
  310. return -EINVAL;
  311. }
  312. nla_nest_end(skb, nest);
  313. return 0;
  314. }
  315. static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
  316. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  317. {
  318. struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
  319. int ret;
  320. u8 value;
  321. int i;
  322. if (!tb[DCB_ATTR_NUMTCS])
  323. return -EINVAL;
  324. if (!netdev->dcbnl_ops->setnumtcs)
  325. return -EOPNOTSUPP;
  326. ret = nla_parse_nested_deprecated(data, DCB_NUMTCS_ATTR_MAX,
  327. tb[DCB_ATTR_NUMTCS],
  328. dcbnl_numtcs_nest, NULL);
  329. if (ret)
  330. return ret;
  331. for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
  332. if (data[i] == NULL)
  333. continue;
  334. value = nla_get_u8(data[i]);
  335. ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
  336. if (ret)
  337. break;
  338. }
  339. return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
  340. }
  341. static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
  342. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  343. {
  344. if (!netdev->dcbnl_ops->getpfcstate)
  345. return -EOPNOTSUPP;
  346. return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
  347. netdev->dcbnl_ops->getpfcstate(netdev));
  348. }
  349. static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
  350. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  351. {
  352. u8 value;
  353. if (!tb[DCB_ATTR_PFC_STATE])
  354. return -EINVAL;
  355. if (!netdev->dcbnl_ops->setpfcstate)
  356. return -EOPNOTSUPP;
  357. value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
  358. netdev->dcbnl_ops->setpfcstate(netdev, value);
  359. return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
  360. }
  361. static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
  362. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  363. {
  364. struct nlattr *app_nest;
  365. struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
  366. u16 id;
  367. u8 up, idtype;
  368. int ret;
  369. if (!tb[DCB_ATTR_APP])
  370. return -EINVAL;
  371. ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
  372. tb[DCB_ATTR_APP], dcbnl_app_nest,
  373. NULL);
  374. if (ret)
  375. return ret;
  376. /* all must be non-null */
  377. if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
  378. (!app_tb[DCB_APP_ATTR_ID]))
  379. return -EINVAL;
  380. /* either by eth type or by socket number */
  381. idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
  382. if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
  383. (idtype != DCB_APP_IDTYPE_PORTNUM))
  384. return -EINVAL;
  385. id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
  386. if (netdev->dcbnl_ops->getapp) {
  387. ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
  388. if (ret < 0)
  389. return ret;
  390. else
  391. up = ret;
  392. } else {
  393. struct dcb_app app = {
  394. .selector = idtype,
  395. .protocol = id,
  396. };
  397. up = dcb_getapp(netdev, &app);
  398. }
  399. app_nest = nla_nest_start_noflag(skb, DCB_ATTR_APP);
  400. if (!app_nest)
  401. return -EMSGSIZE;
  402. ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
  403. if (ret)
  404. goto out_cancel;
  405. ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
  406. if (ret)
  407. goto out_cancel;
  408. ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
  409. if (ret)
  410. goto out_cancel;
  411. nla_nest_end(skb, app_nest);
  412. return 0;
  413. out_cancel:
  414. nla_nest_cancel(skb, app_nest);
  415. return ret;
  416. }
  417. static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
  418. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  419. {
  420. int ret;
  421. u16 id;
  422. u8 up, idtype;
  423. struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
  424. if (!tb[DCB_ATTR_APP])
  425. return -EINVAL;
  426. ret = nla_parse_nested_deprecated(app_tb, DCB_APP_ATTR_MAX,
  427. tb[DCB_ATTR_APP], dcbnl_app_nest,
  428. NULL);
  429. if (ret)
  430. return ret;
  431. /* all must be non-null */
  432. if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
  433. (!app_tb[DCB_APP_ATTR_ID]) ||
  434. (!app_tb[DCB_APP_ATTR_PRIORITY]))
  435. return -EINVAL;
  436. /* either by eth type or by socket number */
  437. idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
  438. if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
  439. (idtype != DCB_APP_IDTYPE_PORTNUM))
  440. return -EINVAL;
  441. id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
  442. up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
  443. if (netdev->dcbnl_ops->setapp) {
  444. ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
  445. if (ret < 0)
  446. return ret;
  447. } else {
  448. struct dcb_app app;
  449. app.selector = idtype;
  450. app.protocol = id;
  451. app.priority = up;
  452. ret = dcb_setapp(netdev, &app);
  453. }
  454. ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
  455. dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
  456. return ret;
  457. }
  458. static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  459. struct nlattr **tb, struct sk_buff *skb, int dir)
  460. {
  461. struct nlattr *pg_nest, *param_nest, *data;
  462. struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
  463. struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
  464. u8 prio, pgid, tc_pct, up_map;
  465. int ret;
  466. int getall = 0;
  467. int i;
  468. if (!tb[DCB_ATTR_PG_CFG])
  469. return -EINVAL;
  470. if (!netdev->dcbnl_ops->getpgtccfgtx ||
  471. !netdev->dcbnl_ops->getpgtccfgrx ||
  472. !netdev->dcbnl_ops->getpgbwgcfgtx ||
  473. !netdev->dcbnl_ops->getpgbwgcfgrx)
  474. return -EOPNOTSUPP;
  475. ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
  476. tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
  477. NULL);
  478. if (ret)
  479. return ret;
  480. pg_nest = nla_nest_start_noflag(skb, DCB_ATTR_PG_CFG);
  481. if (!pg_nest)
  482. return -EMSGSIZE;
  483. if (pg_tb[DCB_PG_ATTR_TC_ALL])
  484. getall = 1;
  485. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  486. if (!getall && !pg_tb[i])
  487. continue;
  488. if (pg_tb[DCB_PG_ATTR_TC_ALL])
  489. data = pg_tb[DCB_PG_ATTR_TC_ALL];
  490. else
  491. data = pg_tb[i];
  492. ret = nla_parse_nested_deprecated(param_tb,
  493. DCB_TC_ATTR_PARAM_MAX, data,
  494. dcbnl_tc_param_nest, NULL);
  495. if (ret)
  496. goto err_pg;
  497. param_nest = nla_nest_start_noflag(skb, i);
  498. if (!param_nest)
  499. goto err_pg;
  500. pgid = DCB_ATTR_VALUE_UNDEFINED;
  501. prio = DCB_ATTR_VALUE_UNDEFINED;
  502. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  503. up_map = DCB_ATTR_VALUE_UNDEFINED;
  504. if (dir) {
  505. /* Rx */
  506. netdev->dcbnl_ops->getpgtccfgrx(netdev,
  507. i - DCB_PG_ATTR_TC_0, &prio,
  508. &pgid, &tc_pct, &up_map);
  509. } else {
  510. /* Tx */
  511. netdev->dcbnl_ops->getpgtccfgtx(netdev,
  512. i - DCB_PG_ATTR_TC_0, &prio,
  513. &pgid, &tc_pct, &up_map);
  514. }
  515. if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
  516. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  517. ret = nla_put_u8(skb,
  518. DCB_TC_ATTR_PARAM_PGID, pgid);
  519. if (ret)
  520. goto err_param;
  521. }
  522. if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
  523. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  524. ret = nla_put_u8(skb,
  525. DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
  526. if (ret)
  527. goto err_param;
  528. }
  529. if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
  530. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  531. ret = nla_put_u8(skb,
  532. DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
  533. if (ret)
  534. goto err_param;
  535. }
  536. if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
  537. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  538. ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
  539. tc_pct);
  540. if (ret)
  541. goto err_param;
  542. }
  543. nla_nest_end(skb, param_nest);
  544. }
  545. if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
  546. getall = 1;
  547. else
  548. getall = 0;
  549. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  550. if (!getall && !pg_tb[i])
  551. continue;
  552. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  553. if (dir) {
  554. /* Rx */
  555. netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
  556. i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
  557. } else {
  558. /* Tx */
  559. netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
  560. i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
  561. }
  562. ret = nla_put_u8(skb, i, tc_pct);
  563. if (ret)
  564. goto err_pg;
  565. }
  566. nla_nest_end(skb, pg_nest);
  567. return 0;
  568. err_param:
  569. nla_nest_cancel(skb, param_nest);
  570. err_pg:
  571. nla_nest_cancel(skb, pg_nest);
  572. return -EMSGSIZE;
  573. }
  574. static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  575. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  576. {
  577. return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
  578. }
  579. static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  580. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  581. {
  582. return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
  583. }
  584. static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
  585. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  586. {
  587. u8 value;
  588. if (!tb[DCB_ATTR_STATE])
  589. return -EINVAL;
  590. if (!netdev->dcbnl_ops->setstate)
  591. return -EOPNOTSUPP;
  592. value = nla_get_u8(tb[DCB_ATTR_STATE]);
  593. return nla_put_u8(skb, DCB_ATTR_STATE,
  594. netdev->dcbnl_ops->setstate(netdev, value));
  595. }
  596. static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
  597. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  598. {
  599. struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
  600. int i;
  601. int ret;
  602. u8 value;
  603. if (!tb[DCB_ATTR_PFC_CFG])
  604. return -EINVAL;
  605. if (!netdev->dcbnl_ops->setpfccfg)
  606. return -EOPNOTSUPP;
  607. ret = nla_parse_nested_deprecated(data, DCB_PFC_UP_ATTR_MAX,
  608. tb[DCB_ATTR_PFC_CFG],
  609. dcbnl_pfc_up_nest, NULL);
  610. if (ret)
  611. return ret;
  612. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  613. if (data[i] == NULL)
  614. continue;
  615. value = nla_get_u8(data[i]);
  616. netdev->dcbnl_ops->setpfccfg(netdev,
  617. data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
  618. }
  619. return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
  620. }
  621. static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
  622. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  623. {
  624. int ret;
  625. if (!tb[DCB_ATTR_SET_ALL])
  626. return -EINVAL;
  627. if (!netdev->dcbnl_ops->setall)
  628. return -EOPNOTSUPP;
  629. ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
  630. netdev->dcbnl_ops->setall(netdev));
  631. dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
  632. return ret;
  633. }
  634. static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  635. u32 seq, struct nlattr **tb, struct sk_buff *skb,
  636. int dir)
  637. {
  638. struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
  639. struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
  640. int ret;
  641. int i;
  642. u8 pgid;
  643. u8 up_map;
  644. u8 prio;
  645. u8 tc_pct;
  646. if (!tb[DCB_ATTR_PG_CFG])
  647. return -EINVAL;
  648. if (!netdev->dcbnl_ops->setpgtccfgtx ||
  649. !netdev->dcbnl_ops->setpgtccfgrx ||
  650. !netdev->dcbnl_ops->setpgbwgcfgtx ||
  651. !netdev->dcbnl_ops->setpgbwgcfgrx)
  652. return -EOPNOTSUPP;
  653. ret = nla_parse_nested_deprecated(pg_tb, DCB_PG_ATTR_MAX,
  654. tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest,
  655. NULL);
  656. if (ret)
  657. return ret;
  658. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  659. if (!pg_tb[i])
  660. continue;
  661. ret = nla_parse_nested_deprecated(param_tb,
  662. DCB_TC_ATTR_PARAM_MAX,
  663. pg_tb[i],
  664. dcbnl_tc_param_nest, NULL);
  665. if (ret)
  666. return ret;
  667. pgid = DCB_ATTR_VALUE_UNDEFINED;
  668. prio = DCB_ATTR_VALUE_UNDEFINED;
  669. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  670. up_map = DCB_ATTR_VALUE_UNDEFINED;
  671. if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
  672. prio =
  673. nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
  674. if (param_tb[DCB_TC_ATTR_PARAM_PGID])
  675. pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
  676. if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
  677. tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
  678. if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
  679. up_map =
  680. nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
  681. /* dir: Tx = 0, Rx = 1 */
  682. if (dir) {
  683. /* Rx */
  684. netdev->dcbnl_ops->setpgtccfgrx(netdev,
  685. i - DCB_PG_ATTR_TC_0,
  686. prio, pgid, tc_pct, up_map);
  687. } else {
  688. /* Tx */
  689. netdev->dcbnl_ops->setpgtccfgtx(netdev,
  690. i - DCB_PG_ATTR_TC_0,
  691. prio, pgid, tc_pct, up_map);
  692. }
  693. }
  694. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  695. if (!pg_tb[i])
  696. continue;
  697. tc_pct = nla_get_u8(pg_tb[i]);
  698. /* dir: Tx = 0, Rx = 1 */
  699. if (dir) {
  700. /* Rx */
  701. netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
  702. i - DCB_PG_ATTR_BW_ID_0, tc_pct);
  703. } else {
  704. /* Tx */
  705. netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
  706. i - DCB_PG_ATTR_BW_ID_0, tc_pct);
  707. }
  708. }
  709. return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
  710. }
  711. static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  712. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  713. {
  714. return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
  715. }
  716. static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  717. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  718. {
  719. return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
  720. }
  721. static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  722. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  723. {
  724. struct nlattr *bcn_nest;
  725. struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
  726. u8 value_byte;
  727. u32 value_integer;
  728. int ret;
  729. bool getall = false;
  730. int i;
  731. if (!tb[DCB_ATTR_BCN])
  732. return -EINVAL;
  733. if (!netdev->dcbnl_ops->getbcnrp ||
  734. !netdev->dcbnl_ops->getbcncfg)
  735. return -EOPNOTSUPP;
  736. ret = nla_parse_nested_deprecated(bcn_tb, DCB_BCN_ATTR_MAX,
  737. tb[DCB_ATTR_BCN], dcbnl_bcn_nest,
  738. NULL);
  739. if (ret)
  740. return ret;
  741. bcn_nest = nla_nest_start_noflag(skb, DCB_ATTR_BCN);
  742. if (!bcn_nest)
  743. return -EMSGSIZE;
  744. if (bcn_tb[DCB_BCN_ATTR_ALL])
  745. getall = true;
  746. for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
  747. if (!getall && !bcn_tb[i])
  748. continue;
  749. netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
  750. &value_byte);
  751. ret = nla_put_u8(skb, i, value_byte);
  752. if (ret)
  753. goto err_bcn;
  754. }
  755. for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
  756. if (!getall && !bcn_tb[i])
  757. continue;
  758. netdev->dcbnl_ops->getbcncfg(netdev, i,
  759. &value_integer);
  760. ret = nla_put_u32(skb, i, value_integer);
  761. if (ret)
  762. goto err_bcn;
  763. }
  764. nla_nest_end(skb, bcn_nest);
  765. return 0;
  766. err_bcn:
  767. nla_nest_cancel(skb, bcn_nest);
  768. return ret;
  769. }
  770. static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  771. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  772. {
  773. struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
  774. int i;
  775. int ret;
  776. u8 value_byte;
  777. u32 value_int;
  778. if (!tb[DCB_ATTR_BCN])
  779. return -EINVAL;
  780. if (!netdev->dcbnl_ops->setbcncfg ||
  781. !netdev->dcbnl_ops->setbcnrp)
  782. return -EOPNOTSUPP;
  783. ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX,
  784. tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest,
  785. NULL);
  786. if (ret)
  787. return ret;
  788. for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
  789. if (data[i] == NULL)
  790. continue;
  791. value_byte = nla_get_u8(data[i]);
  792. netdev->dcbnl_ops->setbcnrp(netdev,
  793. data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
  794. }
  795. for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
  796. if (data[i] == NULL)
  797. continue;
  798. value_int = nla_get_u32(data[i]);
  799. netdev->dcbnl_ops->setbcncfg(netdev,
  800. i, value_int);
  801. }
  802. return nla_put_u8(skb, DCB_ATTR_BCN, 0);
  803. }
  804. static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
  805. int app_nested_type, int app_info_type,
  806. int app_entry_type)
  807. {
  808. struct dcb_peer_app_info info;
  809. struct dcb_app *table = NULL;
  810. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  811. u16 app_count;
  812. int err;
  813. /**
  814. * retrieve the peer app configuration form the driver. If the driver
  815. * handlers fail exit without doing anything
  816. */
  817. err = ops->peer_getappinfo(netdev, &info, &app_count);
  818. if (!err && app_count) {
  819. table = kmalloc_array(app_count, sizeof(struct dcb_app),
  820. GFP_KERNEL);
  821. if (!table)
  822. return -ENOMEM;
  823. err = ops->peer_getapptable(netdev, table);
  824. }
  825. if (!err) {
  826. u16 i;
  827. struct nlattr *app;
  828. /**
  829. * build the message, from here on the only possible failure
  830. * is due to the skb size
  831. */
  832. err = -EMSGSIZE;
  833. app = nla_nest_start_noflag(skb, app_nested_type);
  834. if (!app)
  835. goto nla_put_failure;
  836. if (app_info_type &&
  837. nla_put(skb, app_info_type, sizeof(info), &info))
  838. goto nla_put_failure;
  839. for (i = 0; i < app_count; i++) {
  840. if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
  841. &table[i]))
  842. goto nla_put_failure;
  843. }
  844. nla_nest_end(skb, app);
  845. }
  846. err = 0;
  847. nla_put_failure:
  848. kfree(table);
  849. return err;
  850. }
  851. /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
  852. static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
  853. {
  854. struct nlattr *ieee, *app;
  855. struct dcb_app_type *itr;
  856. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  857. int dcbx;
  858. int err;
  859. if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
  860. return -EMSGSIZE;
  861. ieee = nla_nest_start_noflag(skb, DCB_ATTR_IEEE);
  862. if (!ieee)
  863. return -EMSGSIZE;
  864. if (ops->ieee_getets) {
  865. struct ieee_ets ets;
  866. memset(&ets, 0, sizeof(ets));
  867. err = ops->ieee_getets(netdev, &ets);
  868. if (!err &&
  869. nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
  870. return -EMSGSIZE;
  871. }
  872. if (ops->ieee_getmaxrate) {
  873. struct ieee_maxrate maxrate;
  874. memset(&maxrate, 0, sizeof(maxrate));
  875. err = ops->ieee_getmaxrate(netdev, &maxrate);
  876. if (!err) {
  877. err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
  878. sizeof(maxrate), &maxrate);
  879. if (err)
  880. return -EMSGSIZE;
  881. }
  882. }
  883. if (ops->ieee_getqcn) {
  884. struct ieee_qcn qcn;
  885. memset(&qcn, 0, sizeof(qcn));
  886. err = ops->ieee_getqcn(netdev, &qcn);
  887. if (!err) {
  888. err = nla_put(skb, DCB_ATTR_IEEE_QCN,
  889. sizeof(qcn), &qcn);
  890. if (err)
  891. return -EMSGSIZE;
  892. }
  893. }
  894. if (ops->ieee_getqcnstats) {
  895. struct ieee_qcn_stats qcn_stats;
  896. memset(&qcn_stats, 0, sizeof(qcn_stats));
  897. err = ops->ieee_getqcnstats(netdev, &qcn_stats);
  898. if (!err) {
  899. err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
  900. sizeof(qcn_stats), &qcn_stats);
  901. if (err)
  902. return -EMSGSIZE;
  903. }
  904. }
  905. if (ops->ieee_getpfc) {
  906. struct ieee_pfc pfc;
  907. memset(&pfc, 0, sizeof(pfc));
  908. err = ops->ieee_getpfc(netdev, &pfc);
  909. if (!err &&
  910. nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
  911. return -EMSGSIZE;
  912. }
  913. if (ops->dcbnl_getbuffer) {
  914. struct dcbnl_buffer buffer;
  915. memset(&buffer, 0, sizeof(buffer));
  916. err = ops->dcbnl_getbuffer(netdev, &buffer);
  917. if (!err &&
  918. nla_put(skb, DCB_ATTR_DCB_BUFFER, sizeof(buffer), &buffer))
  919. return -EMSGSIZE;
  920. }
  921. app = nla_nest_start_noflag(skb, DCB_ATTR_IEEE_APP_TABLE);
  922. if (!app)
  923. return -EMSGSIZE;
  924. spin_lock_bh(&dcb_lock);
  925. list_for_each_entry(itr, &dcb_app_list, list) {
  926. if (itr->ifindex == netdev->ifindex) {
  927. err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
  928. &itr->app);
  929. if (err) {
  930. spin_unlock_bh(&dcb_lock);
  931. return -EMSGSIZE;
  932. }
  933. }
  934. }
  935. if (netdev->dcbnl_ops->getdcbx)
  936. dcbx = netdev->dcbnl_ops->getdcbx(netdev);
  937. else
  938. dcbx = -EOPNOTSUPP;
  939. spin_unlock_bh(&dcb_lock);
  940. nla_nest_end(skb, app);
  941. /* get peer info if available */
  942. if (ops->ieee_peer_getets) {
  943. struct ieee_ets ets;
  944. memset(&ets, 0, sizeof(ets));
  945. err = ops->ieee_peer_getets(netdev, &ets);
  946. if (!err &&
  947. nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
  948. return -EMSGSIZE;
  949. }
  950. if (ops->ieee_peer_getpfc) {
  951. struct ieee_pfc pfc;
  952. memset(&pfc, 0, sizeof(pfc));
  953. err = ops->ieee_peer_getpfc(netdev, &pfc);
  954. if (!err &&
  955. nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
  956. return -EMSGSIZE;
  957. }
  958. if (ops->peer_getappinfo && ops->peer_getapptable) {
  959. err = dcbnl_build_peer_app(netdev, skb,
  960. DCB_ATTR_IEEE_PEER_APP,
  961. DCB_ATTR_IEEE_APP_UNSPEC,
  962. DCB_ATTR_IEEE_APP);
  963. if (err)
  964. return -EMSGSIZE;
  965. }
  966. nla_nest_end(skb, ieee);
  967. if (dcbx >= 0) {
  968. err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
  969. if (err)
  970. return -EMSGSIZE;
  971. }
  972. return 0;
  973. }
  974. static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
  975. int dir)
  976. {
  977. u8 pgid, up_map, prio, tc_pct;
  978. const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
  979. int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
  980. struct nlattr *pg = nla_nest_start_noflag(skb, i);
  981. if (!pg)
  982. return -EMSGSIZE;
  983. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  984. struct nlattr *tc_nest = nla_nest_start_noflag(skb, i);
  985. if (!tc_nest)
  986. return -EMSGSIZE;
  987. pgid = DCB_ATTR_VALUE_UNDEFINED;
  988. prio = DCB_ATTR_VALUE_UNDEFINED;
  989. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  990. up_map = DCB_ATTR_VALUE_UNDEFINED;
  991. if (!dir)
  992. ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
  993. &prio, &pgid, &tc_pct, &up_map);
  994. else
  995. ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
  996. &prio, &pgid, &tc_pct, &up_map);
  997. if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
  998. nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
  999. nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
  1000. nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
  1001. return -EMSGSIZE;
  1002. nla_nest_end(skb, tc_nest);
  1003. }
  1004. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  1005. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  1006. if (!dir)
  1007. ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
  1008. &tc_pct);
  1009. else
  1010. ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
  1011. &tc_pct);
  1012. if (nla_put_u8(skb, i, tc_pct))
  1013. return -EMSGSIZE;
  1014. }
  1015. nla_nest_end(skb, pg);
  1016. return 0;
  1017. }
  1018. static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
  1019. {
  1020. struct nlattr *cee, *app;
  1021. struct dcb_app_type *itr;
  1022. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1023. int dcbx, i, err = -EMSGSIZE;
  1024. u8 value;
  1025. if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
  1026. goto nla_put_failure;
  1027. cee = nla_nest_start_noflag(skb, DCB_ATTR_CEE);
  1028. if (!cee)
  1029. goto nla_put_failure;
  1030. /* local pg */
  1031. if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
  1032. err = dcbnl_cee_pg_fill(skb, netdev, 1);
  1033. if (err)
  1034. goto nla_put_failure;
  1035. }
  1036. if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
  1037. err = dcbnl_cee_pg_fill(skb, netdev, 0);
  1038. if (err)
  1039. goto nla_put_failure;
  1040. }
  1041. /* local pfc */
  1042. if (ops->getpfccfg) {
  1043. struct nlattr *pfc_nest = nla_nest_start_noflag(skb,
  1044. DCB_ATTR_CEE_PFC);
  1045. if (!pfc_nest)
  1046. goto nla_put_failure;
  1047. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  1048. ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
  1049. if (nla_put_u8(skb, i, value))
  1050. goto nla_put_failure;
  1051. }
  1052. nla_nest_end(skb, pfc_nest);
  1053. }
  1054. /* local app */
  1055. spin_lock_bh(&dcb_lock);
  1056. app = nla_nest_start_noflag(skb, DCB_ATTR_CEE_APP_TABLE);
  1057. if (!app)
  1058. goto dcb_unlock;
  1059. list_for_each_entry(itr, &dcb_app_list, list) {
  1060. if (itr->ifindex == netdev->ifindex) {
  1061. struct nlattr *app_nest = nla_nest_start_noflag(skb,
  1062. DCB_ATTR_APP);
  1063. if (!app_nest)
  1064. goto dcb_unlock;
  1065. err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
  1066. itr->app.selector);
  1067. if (err)
  1068. goto dcb_unlock;
  1069. err = nla_put_u16(skb, DCB_APP_ATTR_ID,
  1070. itr->app.protocol);
  1071. if (err)
  1072. goto dcb_unlock;
  1073. err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
  1074. itr->app.priority);
  1075. if (err)
  1076. goto dcb_unlock;
  1077. nla_nest_end(skb, app_nest);
  1078. }
  1079. }
  1080. nla_nest_end(skb, app);
  1081. if (netdev->dcbnl_ops->getdcbx)
  1082. dcbx = netdev->dcbnl_ops->getdcbx(netdev);
  1083. else
  1084. dcbx = -EOPNOTSUPP;
  1085. spin_unlock_bh(&dcb_lock);
  1086. /* features flags */
  1087. if (ops->getfeatcfg) {
  1088. struct nlattr *feat = nla_nest_start_noflag(skb,
  1089. DCB_ATTR_CEE_FEAT);
  1090. if (!feat)
  1091. goto nla_put_failure;
  1092. for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
  1093. i++)
  1094. if (!ops->getfeatcfg(netdev, i, &value) &&
  1095. nla_put_u8(skb, i, value))
  1096. goto nla_put_failure;
  1097. nla_nest_end(skb, feat);
  1098. }
  1099. /* peer info if available */
  1100. if (ops->cee_peer_getpg) {
  1101. struct cee_pg pg;
  1102. memset(&pg, 0, sizeof(pg));
  1103. err = ops->cee_peer_getpg(netdev, &pg);
  1104. if (!err &&
  1105. nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
  1106. goto nla_put_failure;
  1107. }
  1108. if (ops->cee_peer_getpfc) {
  1109. struct cee_pfc pfc;
  1110. memset(&pfc, 0, sizeof(pfc));
  1111. err = ops->cee_peer_getpfc(netdev, &pfc);
  1112. if (!err &&
  1113. nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
  1114. goto nla_put_failure;
  1115. }
  1116. if (ops->peer_getappinfo && ops->peer_getapptable) {
  1117. err = dcbnl_build_peer_app(netdev, skb,
  1118. DCB_ATTR_CEE_PEER_APP_TABLE,
  1119. DCB_ATTR_CEE_PEER_APP_INFO,
  1120. DCB_ATTR_CEE_PEER_APP);
  1121. if (err)
  1122. goto nla_put_failure;
  1123. }
  1124. nla_nest_end(skb, cee);
  1125. /* DCBX state */
  1126. if (dcbx >= 0) {
  1127. err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
  1128. if (err)
  1129. goto nla_put_failure;
  1130. }
  1131. return 0;
  1132. dcb_unlock:
  1133. spin_unlock_bh(&dcb_lock);
  1134. nla_put_failure:
  1135. err = -EMSGSIZE;
  1136. return err;
  1137. }
  1138. static int dcbnl_notify(struct net_device *dev, int event, int cmd,
  1139. u32 seq, u32 portid, int dcbx_ver)
  1140. {
  1141. struct net *net = dev_net(dev);
  1142. struct sk_buff *skb;
  1143. struct nlmsghdr *nlh;
  1144. const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
  1145. int err;
  1146. if (!ops)
  1147. return -EOPNOTSUPP;
  1148. skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
  1149. if (!skb)
  1150. return -ENOBUFS;
  1151. if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
  1152. err = dcbnl_ieee_fill(skb, dev);
  1153. else
  1154. err = dcbnl_cee_fill(skb, dev);
  1155. if (err < 0) {
  1156. /* Report error to broadcast listeners */
  1157. nlmsg_free(skb);
  1158. rtnl_set_sk_err(net, RTNLGRP_DCB, err);
  1159. } else {
  1160. /* End nlmsg and notify broadcast listeners */
  1161. nlmsg_end(skb, nlh);
  1162. rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
  1163. }
  1164. return err;
  1165. }
  1166. int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
  1167. u32 seq, u32 portid)
  1168. {
  1169. return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
  1170. }
  1171. EXPORT_SYMBOL(dcbnl_ieee_notify);
  1172. int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
  1173. u32 seq, u32 portid)
  1174. {
  1175. return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
  1176. }
  1177. EXPORT_SYMBOL(dcbnl_cee_notify);
  1178. /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
  1179. * If any requested operation can not be completed
  1180. * the entire msg is aborted and error value is returned.
  1181. * No attempt is made to reconcile the case where only part of the
  1182. * cmd can be completed.
  1183. */
  1184. static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
  1185. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1186. {
  1187. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1188. struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
  1189. int err;
  1190. if (!ops)
  1191. return -EOPNOTSUPP;
  1192. if (!tb[DCB_ATTR_IEEE])
  1193. return -EINVAL;
  1194. err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
  1195. tb[DCB_ATTR_IEEE],
  1196. dcbnl_ieee_policy, NULL);
  1197. if (err)
  1198. return err;
  1199. if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
  1200. struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
  1201. err = ops->ieee_setets(netdev, ets);
  1202. if (err)
  1203. goto err;
  1204. }
  1205. if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
  1206. struct ieee_maxrate *maxrate =
  1207. nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
  1208. err = ops->ieee_setmaxrate(netdev, maxrate);
  1209. if (err)
  1210. goto err;
  1211. }
  1212. if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
  1213. struct ieee_qcn *qcn =
  1214. nla_data(ieee[DCB_ATTR_IEEE_QCN]);
  1215. err = ops->ieee_setqcn(netdev, qcn);
  1216. if (err)
  1217. goto err;
  1218. }
  1219. if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
  1220. struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
  1221. err = ops->ieee_setpfc(netdev, pfc);
  1222. if (err)
  1223. goto err;
  1224. }
  1225. if (ieee[DCB_ATTR_DCB_BUFFER] && ops->dcbnl_setbuffer) {
  1226. struct dcbnl_buffer *buffer =
  1227. nla_data(ieee[DCB_ATTR_DCB_BUFFER]);
  1228. err = ops->dcbnl_setbuffer(netdev, buffer);
  1229. if (err)
  1230. goto err;
  1231. }
  1232. if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
  1233. struct nlattr *attr;
  1234. int rem;
  1235. nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
  1236. struct dcb_app *app_data;
  1237. if (nla_type(attr) != DCB_ATTR_IEEE_APP)
  1238. continue;
  1239. if (nla_len(attr) < sizeof(struct dcb_app)) {
  1240. err = -ERANGE;
  1241. goto err;
  1242. }
  1243. app_data = nla_data(attr);
  1244. if (ops->ieee_setapp)
  1245. err = ops->ieee_setapp(netdev, app_data);
  1246. else
  1247. err = dcb_ieee_setapp(netdev, app_data);
  1248. if (err)
  1249. goto err;
  1250. }
  1251. }
  1252. err:
  1253. err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
  1254. dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
  1255. return err;
  1256. }
  1257. static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
  1258. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1259. {
  1260. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1261. if (!ops)
  1262. return -EOPNOTSUPP;
  1263. return dcbnl_ieee_fill(skb, netdev);
  1264. }
  1265. static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
  1266. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1267. {
  1268. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1269. struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
  1270. int err;
  1271. if (!ops)
  1272. return -EOPNOTSUPP;
  1273. if (!tb[DCB_ATTR_IEEE])
  1274. return -EINVAL;
  1275. err = nla_parse_nested_deprecated(ieee, DCB_ATTR_IEEE_MAX,
  1276. tb[DCB_ATTR_IEEE],
  1277. dcbnl_ieee_policy, NULL);
  1278. if (err)
  1279. return err;
  1280. if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
  1281. struct nlattr *attr;
  1282. int rem;
  1283. nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
  1284. struct dcb_app *app_data;
  1285. if (nla_type(attr) != DCB_ATTR_IEEE_APP)
  1286. continue;
  1287. app_data = nla_data(attr);
  1288. if (ops->ieee_delapp)
  1289. err = ops->ieee_delapp(netdev, app_data);
  1290. else
  1291. err = dcb_ieee_delapp(netdev, app_data);
  1292. if (err)
  1293. goto err;
  1294. }
  1295. }
  1296. err:
  1297. err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
  1298. dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
  1299. return err;
  1300. }
  1301. /* DCBX configuration */
  1302. static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
  1303. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1304. {
  1305. if (!netdev->dcbnl_ops->getdcbx)
  1306. return -EOPNOTSUPP;
  1307. return nla_put_u8(skb, DCB_ATTR_DCBX,
  1308. netdev->dcbnl_ops->getdcbx(netdev));
  1309. }
  1310. static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
  1311. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1312. {
  1313. u8 value;
  1314. if (!netdev->dcbnl_ops->setdcbx)
  1315. return -EOPNOTSUPP;
  1316. if (!tb[DCB_ATTR_DCBX])
  1317. return -EINVAL;
  1318. value = nla_get_u8(tb[DCB_ATTR_DCBX]);
  1319. return nla_put_u8(skb, DCB_ATTR_DCBX,
  1320. netdev->dcbnl_ops->setdcbx(netdev, value));
  1321. }
  1322. static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  1323. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1324. {
  1325. struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
  1326. u8 value;
  1327. int ret, i;
  1328. int getall = 0;
  1329. if (!netdev->dcbnl_ops->getfeatcfg)
  1330. return -EOPNOTSUPP;
  1331. if (!tb[DCB_ATTR_FEATCFG])
  1332. return -EINVAL;
  1333. ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
  1334. tb[DCB_ATTR_FEATCFG],
  1335. dcbnl_featcfg_nest, NULL);
  1336. if (ret)
  1337. return ret;
  1338. nest = nla_nest_start_noflag(skb, DCB_ATTR_FEATCFG);
  1339. if (!nest)
  1340. return -EMSGSIZE;
  1341. if (data[DCB_FEATCFG_ATTR_ALL])
  1342. getall = 1;
  1343. for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
  1344. if (!getall && !data[i])
  1345. continue;
  1346. ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
  1347. if (!ret)
  1348. ret = nla_put_u8(skb, i, value);
  1349. if (ret) {
  1350. nla_nest_cancel(skb, nest);
  1351. goto nla_put_failure;
  1352. }
  1353. }
  1354. nla_nest_end(skb, nest);
  1355. nla_put_failure:
  1356. return ret;
  1357. }
  1358. static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  1359. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1360. {
  1361. struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
  1362. int ret, i;
  1363. u8 value;
  1364. if (!netdev->dcbnl_ops->setfeatcfg)
  1365. return -ENOTSUPP;
  1366. if (!tb[DCB_ATTR_FEATCFG])
  1367. return -EINVAL;
  1368. ret = nla_parse_nested_deprecated(data, DCB_FEATCFG_ATTR_MAX,
  1369. tb[DCB_ATTR_FEATCFG],
  1370. dcbnl_featcfg_nest, NULL);
  1371. if (ret)
  1372. goto err;
  1373. for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
  1374. if (data[i] == NULL)
  1375. continue;
  1376. value = nla_get_u8(data[i]);
  1377. ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
  1378. if (ret)
  1379. goto err;
  1380. }
  1381. err:
  1382. ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
  1383. return ret;
  1384. }
  1385. /* Handle CEE DCBX GET commands. */
  1386. static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
  1387. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1388. {
  1389. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1390. if (!ops)
  1391. return -EOPNOTSUPP;
  1392. return dcbnl_cee_fill(skb, netdev);
  1393. }
  1394. struct reply_func {
  1395. /* reply netlink message type */
  1396. int type;
  1397. /* function to fill message contents */
  1398. int (*cb)(struct net_device *, struct nlmsghdr *, u32,
  1399. struct nlattr **, struct sk_buff *);
  1400. };
  1401. static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
  1402. [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
  1403. [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
  1404. [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
  1405. [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
  1406. [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
  1407. [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
  1408. [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
  1409. [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
  1410. [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
  1411. [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
  1412. [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
  1413. [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
  1414. [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
  1415. [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
  1416. [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
  1417. [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
  1418. [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
  1419. [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
  1420. [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
  1421. [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
  1422. [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
  1423. [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
  1424. [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
  1425. [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
  1426. [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
  1427. [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
  1428. [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
  1429. };
  1430. static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
  1431. struct netlink_ext_ack *extack)
  1432. {
  1433. struct net *net = sock_net(skb->sk);
  1434. struct net_device *netdev;
  1435. struct dcbmsg *dcb = nlmsg_data(nlh);
  1436. struct nlattr *tb[DCB_ATTR_MAX + 1];
  1437. u32 portid = skb ? NETLINK_CB(skb).portid : 0;
  1438. int ret = -EINVAL;
  1439. struct sk_buff *reply_skb;
  1440. struct nlmsghdr *reply_nlh = NULL;
  1441. const struct reply_func *fn;
  1442. if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
  1443. return -EPERM;
  1444. ret = nlmsg_parse_deprecated(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
  1445. dcbnl_rtnl_policy, extack);
  1446. if (ret < 0)
  1447. return ret;
  1448. if (dcb->cmd > DCB_CMD_MAX)
  1449. return -EINVAL;
  1450. /* check if a reply function has been defined for the command */
  1451. fn = &reply_funcs[dcb->cmd];
  1452. if (!fn->cb)
  1453. return -EOPNOTSUPP;
  1454. if (!tb[DCB_ATTR_IFNAME])
  1455. return -EINVAL;
  1456. netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
  1457. if (!netdev)
  1458. return -ENODEV;
  1459. if (!netdev->dcbnl_ops)
  1460. return -EOPNOTSUPP;
  1461. reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
  1462. nlh->nlmsg_flags, &reply_nlh);
  1463. if (!reply_skb)
  1464. return -ENOBUFS;
  1465. ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
  1466. if (ret < 0) {
  1467. nlmsg_free(reply_skb);
  1468. goto out;
  1469. }
  1470. nlmsg_end(reply_skb, reply_nlh);
  1471. ret = rtnl_unicast(reply_skb, net, portid);
  1472. out:
  1473. return ret;
  1474. }
  1475. static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
  1476. int ifindex, int prio)
  1477. {
  1478. struct dcb_app_type *itr;
  1479. list_for_each_entry(itr, &dcb_app_list, list) {
  1480. if (itr->app.selector == app->selector &&
  1481. itr->app.protocol == app->protocol &&
  1482. itr->ifindex == ifindex &&
  1483. ((prio == -1) || itr->app.priority == prio))
  1484. return itr;
  1485. }
  1486. return NULL;
  1487. }
  1488. static int dcb_app_add(const struct dcb_app *app, int ifindex)
  1489. {
  1490. struct dcb_app_type *entry;
  1491. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  1492. if (!entry)
  1493. return -ENOMEM;
  1494. memcpy(&entry->app, app, sizeof(*app));
  1495. entry->ifindex = ifindex;
  1496. list_add(&entry->list, &dcb_app_list);
  1497. return 0;
  1498. }
  1499. /**
  1500. * dcb_getapp - retrieve the DCBX application user priority
  1501. *
  1502. * On success returns a non-zero 802.1p user priority bitmap
  1503. * otherwise returns 0 as the invalid user priority bitmap to
  1504. * indicate an error.
  1505. */
  1506. u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
  1507. {
  1508. struct dcb_app_type *itr;
  1509. u8 prio = 0;
  1510. spin_lock_bh(&dcb_lock);
  1511. itr = dcb_app_lookup(app, dev->ifindex, -1);
  1512. if (itr)
  1513. prio = itr->app.priority;
  1514. spin_unlock_bh(&dcb_lock);
  1515. return prio;
  1516. }
  1517. EXPORT_SYMBOL(dcb_getapp);
  1518. /**
  1519. * dcb_setapp - add CEE dcb application data to app list
  1520. *
  1521. * Priority 0 is an invalid priority in CEE spec. This routine
  1522. * removes applications from the app list if the priority is
  1523. * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
  1524. */
  1525. int dcb_setapp(struct net_device *dev, struct dcb_app *new)
  1526. {
  1527. struct dcb_app_type *itr;
  1528. struct dcb_app_type event;
  1529. int err = 0;
  1530. event.ifindex = dev->ifindex;
  1531. memcpy(&event.app, new, sizeof(event.app));
  1532. if (dev->dcbnl_ops->getdcbx)
  1533. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1534. spin_lock_bh(&dcb_lock);
  1535. /* Search for existing match and replace */
  1536. itr = dcb_app_lookup(new, dev->ifindex, -1);
  1537. if (itr) {
  1538. if (new->priority)
  1539. itr->app.priority = new->priority;
  1540. else {
  1541. list_del(&itr->list);
  1542. kfree(itr);
  1543. }
  1544. goto out;
  1545. }
  1546. /* App type does not exist add new application type */
  1547. if (new->priority)
  1548. err = dcb_app_add(new, dev->ifindex);
  1549. out:
  1550. spin_unlock_bh(&dcb_lock);
  1551. if (!err)
  1552. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1553. return err;
  1554. }
  1555. EXPORT_SYMBOL(dcb_setapp);
  1556. /**
  1557. * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
  1558. *
  1559. * Helper routine which on success returns a non-zero 802.1Qaz user
  1560. * priority bitmap otherwise returns 0 to indicate the dcb_app was
  1561. * not found in APP list.
  1562. */
  1563. u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
  1564. {
  1565. struct dcb_app_type *itr;
  1566. u8 prio = 0;
  1567. spin_lock_bh(&dcb_lock);
  1568. itr = dcb_app_lookup(app, dev->ifindex, -1);
  1569. if (itr)
  1570. prio |= 1 << itr->app.priority;
  1571. spin_unlock_bh(&dcb_lock);
  1572. return prio;
  1573. }
  1574. EXPORT_SYMBOL(dcb_ieee_getapp_mask);
  1575. /**
  1576. * dcb_ieee_setapp - add IEEE dcb application data to app list
  1577. *
  1578. * This adds Application data to the list. Multiple application
  1579. * entries may exists for the same selector and protocol as long
  1580. * as the priorities are different. Priority is expected to be a
  1581. * 3-bit unsigned integer
  1582. */
  1583. int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
  1584. {
  1585. struct dcb_app_type event;
  1586. int err = 0;
  1587. event.ifindex = dev->ifindex;
  1588. memcpy(&event.app, new, sizeof(event.app));
  1589. if (dev->dcbnl_ops->getdcbx)
  1590. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1591. spin_lock_bh(&dcb_lock);
  1592. /* Search for existing match and abort if found */
  1593. if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
  1594. err = -EEXIST;
  1595. goto out;
  1596. }
  1597. err = dcb_app_add(new, dev->ifindex);
  1598. out:
  1599. spin_unlock_bh(&dcb_lock);
  1600. if (!err)
  1601. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1602. return err;
  1603. }
  1604. EXPORT_SYMBOL(dcb_ieee_setapp);
  1605. /**
  1606. * dcb_ieee_delapp - delete IEEE dcb application data from list
  1607. *
  1608. * This removes a matching APP data from the APP list
  1609. */
  1610. int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
  1611. {
  1612. struct dcb_app_type *itr;
  1613. struct dcb_app_type event;
  1614. int err = -ENOENT;
  1615. event.ifindex = dev->ifindex;
  1616. memcpy(&event.app, del, sizeof(event.app));
  1617. if (dev->dcbnl_ops->getdcbx)
  1618. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1619. spin_lock_bh(&dcb_lock);
  1620. /* Search for existing match and remove it. */
  1621. if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
  1622. list_del(&itr->list);
  1623. kfree(itr);
  1624. err = 0;
  1625. }
  1626. spin_unlock_bh(&dcb_lock);
  1627. if (!err)
  1628. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1629. return err;
  1630. }
  1631. EXPORT_SYMBOL(dcb_ieee_delapp);
  1632. /**
  1633. * dcb_ieee_getapp_prio_dscp_mask_map - For a given device, find mapping from
  1634. * priorities to the DSCP values assigned to that priority. Initialize p_map
  1635. * such that each map element holds a bit mask of DSCP values configured for
  1636. * that priority by APP entries.
  1637. */
  1638. void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
  1639. struct dcb_ieee_app_prio_map *p_map)
  1640. {
  1641. int ifindex = dev->ifindex;
  1642. struct dcb_app_type *itr;
  1643. u8 prio;
  1644. memset(p_map->map, 0, sizeof(p_map->map));
  1645. spin_lock_bh(&dcb_lock);
  1646. list_for_each_entry(itr, &dcb_app_list, list) {
  1647. if (itr->ifindex == ifindex &&
  1648. itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
  1649. itr->app.protocol < 64 &&
  1650. itr->app.priority < IEEE_8021QAZ_MAX_TCS) {
  1651. prio = itr->app.priority;
  1652. p_map->map[prio] |= 1ULL << itr->app.protocol;
  1653. }
  1654. }
  1655. spin_unlock_bh(&dcb_lock);
  1656. }
  1657. EXPORT_SYMBOL(dcb_ieee_getapp_prio_dscp_mask_map);
  1658. /**
  1659. * dcb_ieee_getapp_dscp_prio_mask_map - For a given device, find mapping from
  1660. * DSCP values to the priorities assigned to that DSCP value. Initialize p_map
  1661. * such that each map element holds a bit mask of priorities configured for a
  1662. * given DSCP value by APP entries.
  1663. */
  1664. void
  1665. dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
  1666. struct dcb_ieee_app_dscp_map *p_map)
  1667. {
  1668. int ifindex = dev->ifindex;
  1669. struct dcb_app_type *itr;
  1670. memset(p_map->map, 0, sizeof(p_map->map));
  1671. spin_lock_bh(&dcb_lock);
  1672. list_for_each_entry(itr, &dcb_app_list, list) {
  1673. if (itr->ifindex == ifindex &&
  1674. itr->app.selector == IEEE_8021QAZ_APP_SEL_DSCP &&
  1675. itr->app.protocol < 64 &&
  1676. itr->app.priority < IEEE_8021QAZ_MAX_TCS)
  1677. p_map->map[itr->app.protocol] |= 1 << itr->app.priority;
  1678. }
  1679. spin_unlock_bh(&dcb_lock);
  1680. }
  1681. EXPORT_SYMBOL(dcb_ieee_getapp_dscp_prio_mask_map);
  1682. /**
  1683. * Per 802.1Q-2014, the selector value of 1 is used for matching on Ethernet
  1684. * type, with valid PID values >= 1536. A special meaning is then assigned to
  1685. * protocol value of 0: "default priority. For use when priority is not
  1686. * otherwise specified".
  1687. *
  1688. * dcb_ieee_getapp_default_prio_mask - For a given device, find all APP entries
  1689. * of the form {$PRIO, ETHERTYPE, 0} and construct a bit mask of all default
  1690. * priorities set by these entries.
  1691. */
  1692. u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev)
  1693. {
  1694. int ifindex = dev->ifindex;
  1695. struct dcb_app_type *itr;
  1696. u8 mask = 0;
  1697. spin_lock_bh(&dcb_lock);
  1698. list_for_each_entry(itr, &dcb_app_list, list) {
  1699. if (itr->ifindex == ifindex &&
  1700. itr->app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
  1701. itr->app.protocol == 0 &&
  1702. itr->app.priority < IEEE_8021QAZ_MAX_TCS)
  1703. mask |= 1 << itr->app.priority;
  1704. }
  1705. spin_unlock_bh(&dcb_lock);
  1706. return mask;
  1707. }
  1708. EXPORT_SYMBOL(dcb_ieee_getapp_default_prio_mask);
  1709. static int __init dcbnl_init(void)
  1710. {
  1711. INIT_LIST_HEAD(&dcb_app_list);
  1712. rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
  1713. rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
  1714. return 0;
  1715. }
  1716. device_initcall(dcbnl_init);