/net/sched/sch_atm.c

http://github.com/mirrors/linux · C · 707 lines · 558 code · 75 blank · 74 comment · 87 complexity · b072e1179cbd0dc7d4ab0e6488140321 MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
  3. /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
  4. #include <linux/module.h>
  5. #include <linux/slab.h>
  6. #include <linux/init.h>
  7. #include <linux/interrupt.h>
  8. #include <linux/string.h>
  9. #include <linux/errno.h>
  10. #include <linux/skbuff.h>
  11. #include <linux/atmdev.h>
  12. #include <linux/atmclip.h>
  13. #include <linux/rtnetlink.h>
  14. #include <linux/file.h> /* for fput */
  15. #include <net/netlink.h>
  16. #include <net/pkt_sched.h>
  17. #include <net/pkt_cls.h>
  18. /*
  19. * The ATM queuing discipline provides a framework for invoking classifiers
  20. * (aka "filters"), which in turn select classes of this queuing discipline.
  21. * Each class maps the flow(s) it is handling to a given VC. Multiple classes
  22. * may share the same VC.
  23. *
  24. * When creating a class, VCs are specified by passing the number of the open
  25. * socket descriptor by which the calling process references the VC. The kernel
  26. * keeps the VC open at least until all classes using it are removed.
  27. *
  28. * In this file, most functions are named atm_tc_* to avoid confusion with all
  29. * the atm_* in net/atm. This naming convention differs from what's used in the
  30. * rest of net/sched.
  31. *
  32. * Known bugs:
  33. * - sometimes messes up the IP stack
  34. * - any manipulations besides the few operations described in the README, are
  35. * untested and likely to crash the system
  36. * - should lock the flow while there is data in the queue (?)
  37. */
  38. #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
  39. struct atm_flow_data {
  40. struct Qdisc_class_common common;
  41. struct Qdisc *q; /* FIFO, TBF, etc. */
  42. struct tcf_proto __rcu *filter_list;
  43. struct tcf_block *block;
  44. struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
  45. void (*old_pop)(struct atm_vcc *vcc,
  46. struct sk_buff *skb); /* chaining */
  47. struct atm_qdisc_data *parent; /* parent qdisc */
  48. struct socket *sock; /* for closing */
  49. int ref; /* reference count */
  50. struct gnet_stats_basic_packed bstats;
  51. struct gnet_stats_queue qstats;
  52. struct list_head list;
  53. struct atm_flow_data *excess; /* flow for excess traffic;
  54. NULL to set CLP instead */
  55. int hdr_len;
  56. unsigned char hdr[]; /* header data; MUST BE LAST */
  57. };
  58. struct atm_qdisc_data {
  59. struct atm_flow_data link; /* unclassified skbs go here */
  60. struct list_head flows; /* NB: "link" is also on this
  61. list */
  62. struct tasklet_struct task; /* dequeue tasklet */
  63. };
  64. /* ------------------------- Class/flow operations ------------------------- */
  65. static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
  66. {
  67. struct atm_qdisc_data *p = qdisc_priv(sch);
  68. struct atm_flow_data *flow;
  69. list_for_each_entry(flow, &p->flows, list) {
  70. if (flow->common.classid == classid)
  71. return flow;
  72. }
  73. return NULL;
  74. }
  75. static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
  76. struct Qdisc *new, struct Qdisc **old,
  77. struct netlink_ext_ack *extack)
  78. {
  79. struct atm_qdisc_data *p = qdisc_priv(sch);
  80. struct atm_flow_data *flow = (struct atm_flow_data *)arg;
  81. pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
  82. sch, p, flow, new, old);
  83. if (list_empty(&flow->list))
  84. return -EINVAL;
  85. if (!new)
  86. new = &noop_qdisc;
  87. *old = flow->q;
  88. flow->q = new;
  89. if (*old)
  90. qdisc_reset(*old);
  91. return 0;
  92. }
  93. static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
  94. {
  95. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  96. pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
  97. return flow ? flow->q : NULL;
  98. }
  99. static unsigned long atm_tc_find(struct Qdisc *sch, u32 classid)
  100. {
  101. struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
  102. struct atm_flow_data *flow;
  103. pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
  104. flow = lookup_flow(sch, classid);
  105. pr_debug("%s: flow %p\n", __func__, flow);
  106. return (unsigned long)flow;
  107. }
  108. static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
  109. unsigned long parent, u32 classid)
  110. {
  111. struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
  112. struct atm_flow_data *flow;
  113. pr_debug("%s(sch %p,[qdisc %p],classid %x)\n", __func__, sch, p, classid);
  114. flow = lookup_flow(sch, classid);
  115. if (flow)
  116. flow->ref++;
  117. pr_debug("%s: flow %p\n", __func__, flow);
  118. return (unsigned long)flow;
  119. }
  120. /*
  121. * atm_tc_put handles all destructions, including the ones that are explicitly
  122. * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
  123. * anything that still seems to be in use.
  124. */
  125. static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
  126. {
  127. struct atm_qdisc_data *p = qdisc_priv(sch);
  128. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  129. pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
  130. if (--flow->ref)
  131. return;
  132. pr_debug("atm_tc_put: destroying\n");
  133. list_del_init(&flow->list);
  134. pr_debug("atm_tc_put: qdisc %p\n", flow->q);
  135. qdisc_put(flow->q);
  136. tcf_block_put(flow->block);
  137. if (flow->sock) {
  138. pr_debug("atm_tc_put: f_count %ld\n",
  139. file_count(flow->sock->file));
  140. flow->vcc->pop = flow->old_pop;
  141. sockfd_put(flow->sock);
  142. }
  143. if (flow->excess)
  144. atm_tc_put(sch, (unsigned long)flow->excess);
  145. if (flow != &p->link)
  146. kfree(flow);
  147. /*
  148. * If flow == &p->link, the qdisc no longer works at this point and
  149. * needs to be removed. (By the caller of atm_tc_put.)
  150. */
  151. }
  152. static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
  153. {
  154. struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
  155. pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
  156. VCC2FLOW(vcc)->old_pop(vcc, skb);
  157. tasklet_schedule(&p->task);
  158. }
  159. static const u8 llc_oui_ip[] = {
  160. 0xaa, /* DSAP: non-ISO */
  161. 0xaa, /* SSAP: non-ISO */
  162. 0x03, /* Ctrl: Unnumbered Information Command PDU */
  163. 0x00, /* OUI: EtherType */
  164. 0x00, 0x00,
  165. 0x08, 0x00
  166. }; /* Ethertype IP (0800) */
  167. static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
  168. [TCA_ATM_FD] = { .type = NLA_U32 },
  169. [TCA_ATM_EXCESS] = { .type = NLA_U32 },
  170. };
  171. static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
  172. struct nlattr **tca, unsigned long *arg,
  173. struct netlink_ext_ack *extack)
  174. {
  175. struct atm_qdisc_data *p = qdisc_priv(sch);
  176. struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
  177. struct atm_flow_data *excess = NULL;
  178. struct nlattr *opt = tca[TCA_OPTIONS];
  179. struct nlattr *tb[TCA_ATM_MAX + 1];
  180. struct socket *sock;
  181. int fd, error, hdr_len;
  182. void *hdr;
  183. pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
  184. "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
  185. /*
  186. * The concept of parents doesn't apply for this qdisc.
  187. */
  188. if (parent && parent != TC_H_ROOT && parent != sch->handle)
  189. return -EINVAL;
  190. /*
  191. * ATM classes cannot be changed. In order to change properties of the
  192. * ATM connection, that socket needs to be modified directly (via the
  193. * native ATM API. In order to send a flow to a different VC, the old
  194. * class needs to be removed and a new one added. (This may be changed
  195. * later.)
  196. */
  197. if (flow)
  198. return -EBUSY;
  199. if (opt == NULL)
  200. return -EINVAL;
  201. error = nla_parse_nested_deprecated(tb, TCA_ATM_MAX, opt, atm_policy,
  202. NULL);
  203. if (error < 0)
  204. return error;
  205. if (!tb[TCA_ATM_FD])
  206. return -EINVAL;
  207. fd = nla_get_u32(tb[TCA_ATM_FD]);
  208. pr_debug("atm_tc_change: fd %d\n", fd);
  209. if (tb[TCA_ATM_HDR]) {
  210. hdr_len = nla_len(tb[TCA_ATM_HDR]);
  211. hdr = nla_data(tb[TCA_ATM_HDR]);
  212. } else {
  213. hdr_len = RFC1483LLC_LEN;
  214. hdr = NULL; /* default LLC/SNAP for IP */
  215. }
  216. if (!tb[TCA_ATM_EXCESS])
  217. excess = NULL;
  218. else {
  219. excess = (struct atm_flow_data *)
  220. atm_tc_find(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
  221. if (!excess)
  222. return -ENOENT;
  223. }
  224. pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
  225. opt->nla_type, nla_len(opt), hdr_len);
  226. sock = sockfd_lookup(fd, &error);
  227. if (!sock)
  228. return error; /* f_count++ */
  229. pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
  230. if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
  231. error = -EPROTOTYPE;
  232. goto err_out;
  233. }
  234. /* @@@ should check if the socket is really operational or we'll crash
  235. on vcc->send */
  236. if (classid) {
  237. if (TC_H_MAJ(classid ^ sch->handle)) {
  238. pr_debug("atm_tc_change: classid mismatch\n");
  239. error = -EINVAL;
  240. goto err_out;
  241. }
  242. } else {
  243. int i;
  244. unsigned long cl;
  245. for (i = 1; i < 0x8000; i++) {
  246. classid = TC_H_MAKE(sch->handle, 0x8000 | i);
  247. cl = atm_tc_find(sch, classid);
  248. if (!cl)
  249. break;
  250. }
  251. }
  252. pr_debug("atm_tc_change: new id %x\n", classid);
  253. flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
  254. pr_debug("atm_tc_change: flow %p\n", flow);
  255. if (!flow) {
  256. error = -ENOBUFS;
  257. goto err_out;
  258. }
  259. error = tcf_block_get(&flow->block, &flow->filter_list, sch,
  260. extack);
  261. if (error) {
  262. kfree(flow);
  263. goto err_out;
  264. }
  265. flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid,
  266. extack);
  267. if (!flow->q)
  268. flow->q = &noop_qdisc;
  269. pr_debug("atm_tc_change: qdisc %p\n", flow->q);
  270. flow->sock = sock;
  271. flow->vcc = ATM_SD(sock); /* speedup */
  272. flow->vcc->user_back = flow;
  273. pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
  274. flow->old_pop = flow->vcc->pop;
  275. flow->parent = p;
  276. flow->vcc->pop = sch_atm_pop;
  277. flow->common.classid = classid;
  278. flow->ref = 1;
  279. flow->excess = excess;
  280. list_add(&flow->list, &p->link.list);
  281. flow->hdr_len = hdr_len;
  282. if (hdr)
  283. memcpy(flow->hdr, hdr, hdr_len);
  284. else
  285. memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
  286. *arg = (unsigned long)flow;
  287. return 0;
  288. err_out:
  289. sockfd_put(sock);
  290. return error;
  291. }
  292. static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
  293. {
  294. struct atm_qdisc_data *p = qdisc_priv(sch);
  295. struct atm_flow_data *flow = (struct atm_flow_data *)arg;
  296. pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
  297. if (list_empty(&flow->list))
  298. return -EINVAL;
  299. if (rcu_access_pointer(flow->filter_list) || flow == &p->link)
  300. return -EBUSY;
  301. /*
  302. * Reference count must be 2: one for "keepalive" (set at class
  303. * creation), and one for the reference held when calling delete.
  304. */
  305. if (flow->ref < 2) {
  306. pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
  307. return -EINVAL;
  308. }
  309. if (flow->ref > 2)
  310. return -EBUSY; /* catch references via excess, etc. */
  311. atm_tc_put(sch, arg);
  312. return 0;
  313. }
  314. static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
  315. {
  316. struct atm_qdisc_data *p = qdisc_priv(sch);
  317. struct atm_flow_data *flow;
  318. pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
  319. if (walker->stop)
  320. return;
  321. list_for_each_entry(flow, &p->flows, list) {
  322. if (walker->count >= walker->skip &&
  323. walker->fn(sch, (unsigned long)flow, walker) < 0) {
  324. walker->stop = 1;
  325. break;
  326. }
  327. walker->count++;
  328. }
  329. }
  330. static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
  331. struct netlink_ext_ack *extack)
  332. {
  333. struct atm_qdisc_data *p = qdisc_priv(sch);
  334. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  335. pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
  336. return flow ? flow->block : p->link.block;
  337. }
  338. /* --------------------------- Qdisc operations ---------------------------- */
  339. static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  340. struct sk_buff **to_free)
  341. {
  342. struct atm_qdisc_data *p = qdisc_priv(sch);
  343. struct atm_flow_data *flow;
  344. struct tcf_result res;
  345. int result;
  346. int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  347. pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
  348. result = TC_ACT_OK; /* be nice to gcc */
  349. flow = NULL;
  350. if (TC_H_MAJ(skb->priority) != sch->handle ||
  351. !(flow = (struct atm_flow_data *)atm_tc_find(sch, skb->priority))) {
  352. struct tcf_proto *fl;
  353. list_for_each_entry(flow, &p->flows, list) {
  354. fl = rcu_dereference_bh(flow->filter_list);
  355. if (fl) {
  356. result = tcf_classify(skb, fl, &res, true);
  357. if (result < 0)
  358. continue;
  359. flow = (struct atm_flow_data *)res.class;
  360. if (!flow)
  361. flow = lookup_flow(sch, res.classid);
  362. goto done;
  363. }
  364. }
  365. flow = NULL;
  366. done:
  367. ;
  368. }
  369. if (!flow) {
  370. flow = &p->link;
  371. } else {
  372. if (flow->vcc)
  373. ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
  374. /*@@@ looks good ... but it's not supposed to work :-) */
  375. #ifdef CONFIG_NET_CLS_ACT
  376. switch (result) {
  377. case TC_ACT_QUEUED:
  378. case TC_ACT_STOLEN:
  379. case TC_ACT_TRAP:
  380. __qdisc_drop(skb, to_free);
  381. return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  382. case TC_ACT_SHOT:
  383. __qdisc_drop(skb, to_free);
  384. goto drop;
  385. case TC_ACT_RECLASSIFY:
  386. if (flow->excess)
  387. flow = flow->excess;
  388. else
  389. ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
  390. break;
  391. }
  392. #endif
  393. }
  394. ret = qdisc_enqueue(skb, flow->q, to_free);
  395. if (ret != NET_XMIT_SUCCESS) {
  396. drop: __maybe_unused
  397. if (net_xmit_drop_count(ret)) {
  398. qdisc_qstats_drop(sch);
  399. if (flow)
  400. flow->qstats.drops++;
  401. }
  402. return ret;
  403. }
  404. /*
  405. * Okay, this may seem weird. We pretend we've dropped the packet if
  406. * it goes via ATM. The reason for this is that the outer qdisc
  407. * expects to be able to q->dequeue the packet later on if we return
  408. * success at this place. Also, sch->q.qdisc needs to reflect whether
  409. * there is a packet egligible for dequeuing or not. Note that the
  410. * statistics of the outer qdisc are necessarily wrong because of all
  411. * this. There's currently no correct solution for this.
  412. */
  413. if (flow == &p->link) {
  414. sch->q.qlen++;
  415. return NET_XMIT_SUCCESS;
  416. }
  417. tasklet_schedule(&p->task);
  418. return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  419. }
  420. /*
  421. * Dequeue packets and send them over ATM. Note that we quite deliberately
  422. * avoid checking net_device's flow control here, simply because sch_atm
  423. * uses its own channels, which have nothing to do with any CLIP/LANE/or
  424. * non-ATM interfaces.
  425. */
  426. static void sch_atm_dequeue(unsigned long data)
  427. {
  428. struct Qdisc *sch = (struct Qdisc *)data;
  429. struct atm_qdisc_data *p = qdisc_priv(sch);
  430. struct atm_flow_data *flow;
  431. struct sk_buff *skb;
  432. pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
  433. list_for_each_entry(flow, &p->flows, list) {
  434. if (flow == &p->link)
  435. continue;
  436. /*
  437. * If traffic is properly shaped, this won't generate nasty
  438. * little bursts. Otherwise, it may ... (but that's okay)
  439. */
  440. while ((skb = flow->q->ops->peek(flow->q))) {
  441. if (!atm_may_send(flow->vcc, skb->truesize))
  442. break;
  443. skb = qdisc_dequeue_peeked(flow->q);
  444. if (unlikely(!skb))
  445. break;
  446. qdisc_bstats_update(sch, skb);
  447. bstats_update(&flow->bstats, skb);
  448. pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
  449. /* remove any LL header somebody else has attached */
  450. skb_pull(skb, skb_network_offset(skb));
  451. if (skb_headroom(skb) < flow->hdr_len) {
  452. struct sk_buff *new;
  453. new = skb_realloc_headroom(skb, flow->hdr_len);
  454. dev_kfree_skb(skb);
  455. if (!new)
  456. continue;
  457. skb = new;
  458. }
  459. pr_debug("sch_atm_dequeue: ip %p, data %p\n",
  460. skb_network_header(skb), skb->data);
  461. ATM_SKB(skb)->vcc = flow->vcc;
  462. memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
  463. flow->hdr_len);
  464. refcount_add(skb->truesize,
  465. &sk_atm(flow->vcc)->sk_wmem_alloc);
  466. /* atm.atm_options are already set by atm_tc_enqueue */
  467. flow->vcc->send(flow->vcc, skb);
  468. }
  469. }
  470. }
  471. static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
  472. {
  473. struct atm_qdisc_data *p = qdisc_priv(sch);
  474. struct sk_buff *skb;
  475. pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
  476. tasklet_schedule(&p->task);
  477. skb = qdisc_dequeue_peeked(p->link.q);
  478. if (skb)
  479. sch->q.qlen--;
  480. return skb;
  481. }
  482. static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
  483. {
  484. struct atm_qdisc_data *p = qdisc_priv(sch);
  485. pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
  486. return p->link.q->ops->peek(p->link.q);
  487. }
  488. static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt,
  489. struct netlink_ext_ack *extack)
  490. {
  491. struct atm_qdisc_data *p = qdisc_priv(sch);
  492. int err;
  493. pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
  494. INIT_LIST_HEAD(&p->flows);
  495. INIT_LIST_HEAD(&p->link.list);
  496. list_add(&p->link.list, &p->flows);
  497. p->link.q = qdisc_create_dflt(sch->dev_queue,
  498. &pfifo_qdisc_ops, sch->handle, extack);
  499. if (!p->link.q)
  500. p->link.q = &noop_qdisc;
  501. pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
  502. err = tcf_block_get(&p->link.block, &p->link.filter_list, sch,
  503. extack);
  504. if (err)
  505. return err;
  506. p->link.vcc = NULL;
  507. p->link.sock = NULL;
  508. p->link.common.classid = sch->handle;
  509. p->link.ref = 1;
  510. tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
  511. return 0;
  512. }
  513. static void atm_tc_reset(struct Qdisc *sch)
  514. {
  515. struct atm_qdisc_data *p = qdisc_priv(sch);
  516. struct atm_flow_data *flow;
  517. pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
  518. list_for_each_entry(flow, &p->flows, list)
  519. qdisc_reset(flow->q);
  520. sch->q.qlen = 0;
  521. }
  522. static void atm_tc_destroy(struct Qdisc *sch)
  523. {
  524. struct atm_qdisc_data *p = qdisc_priv(sch);
  525. struct atm_flow_data *flow, *tmp;
  526. pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
  527. list_for_each_entry(flow, &p->flows, list) {
  528. tcf_block_put(flow->block);
  529. flow->block = NULL;
  530. }
  531. list_for_each_entry_safe(flow, tmp, &p->flows, list) {
  532. if (flow->ref > 1)
  533. pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
  534. atm_tc_put(sch, (unsigned long)flow);
  535. }
  536. tasklet_kill(&p->task);
  537. }
  538. static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
  539. struct sk_buff *skb, struct tcmsg *tcm)
  540. {
  541. struct atm_qdisc_data *p = qdisc_priv(sch);
  542. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  543. struct nlattr *nest;
  544. pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
  545. sch, p, flow, skb, tcm);
  546. if (list_empty(&flow->list))
  547. return -EINVAL;
  548. tcm->tcm_handle = flow->common.classid;
  549. tcm->tcm_info = flow->q->handle;
  550. nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
  551. if (nest == NULL)
  552. goto nla_put_failure;
  553. if (nla_put(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr))
  554. goto nla_put_failure;
  555. if (flow->vcc) {
  556. struct sockaddr_atmpvc pvc;
  557. int state;
  558. memset(&pvc, 0, sizeof(pvc));
  559. pvc.sap_family = AF_ATMPVC;
  560. pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
  561. pvc.sap_addr.vpi = flow->vcc->vpi;
  562. pvc.sap_addr.vci = flow->vcc->vci;
  563. if (nla_put(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc))
  564. goto nla_put_failure;
  565. state = ATM_VF2VS(flow->vcc->flags);
  566. if (nla_put_u32(skb, TCA_ATM_STATE, state))
  567. goto nla_put_failure;
  568. }
  569. if (flow->excess) {
  570. if (nla_put_u32(skb, TCA_ATM_EXCESS, flow->common.classid))
  571. goto nla_put_failure;
  572. } else {
  573. if (nla_put_u32(skb, TCA_ATM_EXCESS, 0))
  574. goto nla_put_failure;
  575. }
  576. return nla_nest_end(skb, nest);
  577. nla_put_failure:
  578. nla_nest_cancel(skb, nest);
  579. return -1;
  580. }
  581. static int
  582. atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
  583. struct gnet_dump *d)
  584. {
  585. struct atm_flow_data *flow = (struct atm_flow_data *)arg;
  586. if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
  587. d, NULL, &flow->bstats) < 0 ||
  588. gnet_stats_copy_queue(d, NULL, &flow->qstats, flow->q->q.qlen) < 0)
  589. return -1;
  590. return 0;
  591. }
  592. static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
  593. {
  594. return 0;
  595. }
  596. static const struct Qdisc_class_ops atm_class_ops = {
  597. .graft = atm_tc_graft,
  598. .leaf = atm_tc_leaf,
  599. .find = atm_tc_find,
  600. .change = atm_tc_change,
  601. .delete = atm_tc_delete,
  602. .walk = atm_tc_walk,
  603. .tcf_block = atm_tc_tcf_block,
  604. .bind_tcf = atm_tc_bind_filter,
  605. .unbind_tcf = atm_tc_put,
  606. .dump = atm_tc_dump_class,
  607. .dump_stats = atm_tc_dump_class_stats,
  608. };
  609. static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
  610. .cl_ops = &atm_class_ops,
  611. .id = "atm",
  612. .priv_size = sizeof(struct atm_qdisc_data),
  613. .enqueue = atm_tc_enqueue,
  614. .dequeue = atm_tc_dequeue,
  615. .peek = atm_tc_peek,
  616. .init = atm_tc_init,
  617. .reset = atm_tc_reset,
  618. .destroy = atm_tc_destroy,
  619. .dump = atm_tc_dump,
  620. .owner = THIS_MODULE,
  621. };
  622. static int __init atm_init(void)
  623. {
  624. return register_qdisc(&atm_qdisc_ops);
  625. }
  626. static void __exit atm_exit(void)
  627. {
  628. unregister_qdisc(&atm_qdisc_ops);
  629. }
  630. module_init(atm_init)
  631. module_exit(atm_exit)
  632. MODULE_LICENSE("GPL");