/net/sched/sch_prio.c

http://github.com/mirrors/linux · C · 444 lines · 361 code · 73 blank · 10 comment · 49 complexity · 93363b1a90e2fc94b7cb34b10dac9304 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * net/sched/sch_prio.c Simple 3-band priority "scheduler".
  4. *
  5. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  6. * Fixes: 19990609: J Hadi Salim <hadi@nortelnetworks.com>:
  7. * Init -- EINVAL when opt undefined
  8. */
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <linux/types.h>
  12. #include <linux/kernel.h>
  13. #include <linux/string.h>
  14. #include <linux/errno.h>
  15. #include <linux/skbuff.h>
  16. #include <net/netlink.h>
  17. #include <net/pkt_sched.h>
  18. #include <net/pkt_cls.h>
  19. struct prio_sched_data {
  20. int bands;
  21. struct tcf_proto __rcu *filter_list;
  22. struct tcf_block *block;
  23. u8 prio2band[TC_PRIO_MAX+1];
  24. struct Qdisc *queues[TCQ_PRIO_BANDS];
  25. };
  26. static struct Qdisc *
  27. prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
  28. {
  29. struct prio_sched_data *q = qdisc_priv(sch);
  30. u32 band = skb->priority;
  31. struct tcf_result res;
  32. struct tcf_proto *fl;
  33. int err;
  34. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  35. if (TC_H_MAJ(skb->priority) != sch->handle) {
  36. fl = rcu_dereference_bh(q->filter_list);
  37. err = tcf_classify(skb, fl, &res, false);
  38. #ifdef CONFIG_NET_CLS_ACT
  39. switch (err) {
  40. case TC_ACT_STOLEN:
  41. case TC_ACT_QUEUED:
  42. case TC_ACT_TRAP:
  43. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  44. /* fall through */
  45. case TC_ACT_SHOT:
  46. return NULL;
  47. }
  48. #endif
  49. if (!fl || err < 0) {
  50. if (TC_H_MAJ(band))
  51. band = 0;
  52. return q->queues[q->prio2band[band & TC_PRIO_MAX]];
  53. }
  54. band = res.classid;
  55. }
  56. band = TC_H_MIN(band) - 1;
  57. if (band >= q->bands)
  58. return q->queues[q->prio2band[0]];
  59. return q->queues[band];
  60. }
  61. static int
  62. prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
  63. {
  64. unsigned int len = qdisc_pkt_len(skb);
  65. struct Qdisc *qdisc;
  66. int ret;
  67. qdisc = prio_classify(skb, sch, &ret);
  68. #ifdef CONFIG_NET_CLS_ACT
  69. if (qdisc == NULL) {
  70. if (ret & __NET_XMIT_BYPASS)
  71. qdisc_qstats_drop(sch);
  72. __qdisc_drop(skb, to_free);
  73. return ret;
  74. }
  75. #endif
  76. ret = qdisc_enqueue(skb, qdisc, to_free);
  77. if (ret == NET_XMIT_SUCCESS) {
  78. sch->qstats.backlog += len;
  79. sch->q.qlen++;
  80. return NET_XMIT_SUCCESS;
  81. }
  82. if (net_xmit_drop_count(ret))
  83. qdisc_qstats_drop(sch);
  84. return ret;
  85. }
  86. static struct sk_buff *prio_peek(struct Qdisc *sch)
  87. {
  88. struct prio_sched_data *q = qdisc_priv(sch);
  89. int prio;
  90. for (prio = 0; prio < q->bands; prio++) {
  91. struct Qdisc *qdisc = q->queues[prio];
  92. struct sk_buff *skb = qdisc->ops->peek(qdisc);
  93. if (skb)
  94. return skb;
  95. }
  96. return NULL;
  97. }
  98. static struct sk_buff *prio_dequeue(struct Qdisc *sch)
  99. {
  100. struct prio_sched_data *q = qdisc_priv(sch);
  101. int prio;
  102. for (prio = 0; prio < q->bands; prio++) {
  103. struct Qdisc *qdisc = q->queues[prio];
  104. struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
  105. if (skb) {
  106. qdisc_bstats_update(sch, skb);
  107. qdisc_qstats_backlog_dec(sch, skb);
  108. sch->q.qlen--;
  109. return skb;
  110. }
  111. }
  112. return NULL;
  113. }
  114. static void
  115. prio_reset(struct Qdisc *sch)
  116. {
  117. int prio;
  118. struct prio_sched_data *q = qdisc_priv(sch);
  119. for (prio = 0; prio < q->bands; prio++)
  120. qdisc_reset(q->queues[prio]);
  121. sch->qstats.backlog = 0;
  122. sch->q.qlen = 0;
  123. }
  124. static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
  125. {
  126. struct net_device *dev = qdisc_dev(sch);
  127. struct tc_prio_qopt_offload opt = {
  128. .handle = sch->handle,
  129. .parent = sch->parent,
  130. };
  131. if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
  132. return -EOPNOTSUPP;
  133. if (qopt) {
  134. opt.command = TC_PRIO_REPLACE;
  135. opt.replace_params.bands = qopt->bands;
  136. memcpy(&opt.replace_params.priomap, qopt->priomap,
  137. TC_PRIO_MAX + 1);
  138. opt.replace_params.qstats = &sch->qstats;
  139. } else {
  140. opt.command = TC_PRIO_DESTROY;
  141. }
  142. return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_PRIO, &opt);
  143. }
  144. static void
  145. prio_destroy(struct Qdisc *sch)
  146. {
  147. int prio;
  148. struct prio_sched_data *q = qdisc_priv(sch);
  149. tcf_block_put(q->block);
  150. prio_offload(sch, NULL);
  151. for (prio = 0; prio < q->bands; prio++)
  152. qdisc_put(q->queues[prio]);
  153. }
  154. static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
  155. struct netlink_ext_ack *extack)
  156. {
  157. struct prio_sched_data *q = qdisc_priv(sch);
  158. struct Qdisc *queues[TCQ_PRIO_BANDS];
  159. int oldbands = q->bands, i;
  160. struct tc_prio_qopt *qopt;
  161. if (nla_len(opt) < sizeof(*qopt))
  162. return -EINVAL;
  163. qopt = nla_data(opt);
  164. if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
  165. return -EINVAL;
  166. for (i = 0; i <= TC_PRIO_MAX; i++) {
  167. if (qopt->priomap[i] >= qopt->bands)
  168. return -EINVAL;
  169. }
  170. /* Before commit, make sure we can allocate all new qdiscs */
  171. for (i = oldbands; i < qopt->bands; i++) {
  172. queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
  173. TC_H_MAKE(sch->handle, i + 1),
  174. extack);
  175. if (!queues[i]) {
  176. while (i > oldbands)
  177. qdisc_put(queues[--i]);
  178. return -ENOMEM;
  179. }
  180. }
  181. prio_offload(sch, qopt);
  182. sch_tree_lock(sch);
  183. q->bands = qopt->bands;
  184. memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
  185. for (i = q->bands; i < oldbands; i++)
  186. qdisc_tree_flush_backlog(q->queues[i]);
  187. for (i = oldbands; i < q->bands; i++) {
  188. q->queues[i] = queues[i];
  189. if (q->queues[i] != &noop_qdisc)
  190. qdisc_hash_add(q->queues[i], true);
  191. }
  192. sch_tree_unlock(sch);
  193. for (i = q->bands; i < oldbands; i++)
  194. qdisc_put(q->queues[i]);
  195. return 0;
  196. }
  197. static int prio_init(struct Qdisc *sch, struct nlattr *opt,
  198. struct netlink_ext_ack *extack)
  199. {
  200. struct prio_sched_data *q = qdisc_priv(sch);
  201. int err;
  202. if (!opt)
  203. return -EINVAL;
  204. err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
  205. if (err)
  206. return err;
  207. return prio_tune(sch, opt, extack);
  208. }
  209. static int prio_dump_offload(struct Qdisc *sch)
  210. {
  211. struct tc_prio_qopt_offload hw_stats = {
  212. .command = TC_PRIO_STATS,
  213. .handle = sch->handle,
  214. .parent = sch->parent,
  215. {
  216. .stats = {
  217. .bstats = &sch->bstats,
  218. .qstats = &sch->qstats,
  219. },
  220. },
  221. };
  222. return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_PRIO, &hw_stats);
  223. }
  224. static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
  225. {
  226. struct prio_sched_data *q = qdisc_priv(sch);
  227. unsigned char *b = skb_tail_pointer(skb);
  228. struct tc_prio_qopt opt;
  229. int err;
  230. opt.bands = q->bands;
  231. memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);
  232. err = prio_dump_offload(sch);
  233. if (err)
  234. goto nla_put_failure;
  235. if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
  236. goto nla_put_failure;
  237. return skb->len;
  238. nla_put_failure:
  239. nlmsg_trim(skb, b);
  240. return -1;
  241. }
  242. static int prio_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
  243. struct Qdisc **old, struct netlink_ext_ack *extack)
  244. {
  245. struct prio_sched_data *q = qdisc_priv(sch);
  246. struct tc_prio_qopt_offload graft_offload;
  247. unsigned long band = arg - 1;
  248. if (!new) {
  249. new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
  250. TC_H_MAKE(sch->handle, arg), extack);
  251. if (!new)
  252. new = &noop_qdisc;
  253. else
  254. qdisc_hash_add(new, true);
  255. }
  256. *old = qdisc_replace(sch, new, &q->queues[band]);
  257. graft_offload.handle = sch->handle;
  258. graft_offload.parent = sch->parent;
  259. graft_offload.graft_params.band = band;
  260. graft_offload.graft_params.child_handle = new->handle;
  261. graft_offload.command = TC_PRIO_GRAFT;
  262. qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, *old,
  263. TC_SETUP_QDISC_PRIO, &graft_offload,
  264. extack);
  265. return 0;
  266. }
  267. static struct Qdisc *
  268. prio_leaf(struct Qdisc *sch, unsigned long arg)
  269. {
  270. struct prio_sched_data *q = qdisc_priv(sch);
  271. unsigned long band = arg - 1;
  272. return q->queues[band];
  273. }
  274. static unsigned long prio_find(struct Qdisc *sch, u32 classid)
  275. {
  276. struct prio_sched_data *q = qdisc_priv(sch);
  277. unsigned long band = TC_H_MIN(classid);
  278. if (band - 1 >= q->bands)
  279. return 0;
  280. return band;
  281. }
  282. static unsigned long prio_bind(struct Qdisc *sch, unsigned long parent, u32 classid)
  283. {
  284. return prio_find(sch, classid);
  285. }
  286. static void prio_unbind(struct Qdisc *q, unsigned long cl)
  287. {
  288. }
  289. static int prio_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb,
  290. struct tcmsg *tcm)
  291. {
  292. struct prio_sched_data *q = qdisc_priv(sch);
  293. tcm->tcm_handle |= TC_H_MIN(cl);
  294. tcm->tcm_info = q->queues[cl-1]->handle;
  295. return 0;
  296. }
  297. static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
  298. struct gnet_dump *d)
  299. {
  300. struct prio_sched_data *q = qdisc_priv(sch);
  301. struct Qdisc *cl_q;
  302. cl_q = q->queues[cl - 1];
  303. if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
  304. d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
  305. qdisc_qstats_copy(d, cl_q) < 0)
  306. return -1;
  307. return 0;
  308. }
  309. static void prio_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  310. {
  311. struct prio_sched_data *q = qdisc_priv(sch);
  312. int prio;
  313. if (arg->stop)
  314. return;
  315. for (prio = 0; prio < q->bands; prio++) {
  316. if (arg->count < arg->skip) {
  317. arg->count++;
  318. continue;
  319. }
  320. if (arg->fn(sch, prio + 1, arg) < 0) {
  321. arg->stop = 1;
  322. break;
  323. }
  324. arg->count++;
  325. }
  326. }
  327. static struct tcf_block *prio_tcf_block(struct Qdisc *sch, unsigned long cl,
  328. struct netlink_ext_ack *extack)
  329. {
  330. struct prio_sched_data *q = qdisc_priv(sch);
  331. if (cl)
  332. return NULL;
  333. return q->block;
  334. }
  335. static const struct Qdisc_class_ops prio_class_ops = {
  336. .graft = prio_graft,
  337. .leaf = prio_leaf,
  338. .find = prio_find,
  339. .walk = prio_walk,
  340. .tcf_block = prio_tcf_block,
  341. .bind_tcf = prio_bind,
  342. .unbind_tcf = prio_unbind,
  343. .dump = prio_dump_class,
  344. .dump_stats = prio_dump_class_stats,
  345. };
  346. static struct Qdisc_ops prio_qdisc_ops __read_mostly = {
  347. .next = NULL,
  348. .cl_ops = &prio_class_ops,
  349. .id = "prio",
  350. .priv_size = sizeof(struct prio_sched_data),
  351. .enqueue = prio_enqueue,
  352. .dequeue = prio_dequeue,
  353. .peek = prio_peek,
  354. .init = prio_init,
  355. .reset = prio_reset,
  356. .destroy = prio_destroy,
  357. .change = prio_tune,
  358. .dump = prio_dump,
  359. .owner = THIS_MODULE,
  360. };
  361. static int __init prio_module_init(void)
  362. {
  363. return register_qdisc(&prio_qdisc_ops);
  364. }
  365. static void __exit prio_module_exit(void)
  366. {
  367. unregister_qdisc(&prio_qdisc_ops);
  368. }
  369. module_init(prio_module_init)
  370. module_exit(prio_module_exit)
  371. MODULE_LICENSE("GPL");