PageRenderTime 27ms CodeModel.GetById 16ms RepoModel.GetById 0ms app.codeStats 0ms

/net/sched/sch_atm.c

https://github.com/mstsirkin/kvm
C | 691 lines | 544 code | 74 blank | 73 comment | 82 complexity | 41e5dbc590a922b666dee6432b50fb85 MD5 | raw file
  1. /* net/sched/sch_atm.c - ATM VC selection "queueing discipline" */
  2. /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
  3. #include <linux/module.h>
  4. #include <linux/slab.h>
  5. #include <linux/init.h>
  6. #include <linux/interrupt.h>
  7. #include <linux/string.h>
  8. #include <linux/errno.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/atmdev.h>
  11. #include <linux/atmclip.h>
  12. #include <linux/rtnetlink.h>
  13. #include <linux/file.h> /* for fput */
  14. #include <net/netlink.h>
  15. #include <net/pkt_sched.h>
  16. extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
  17. /*
  18. * The ATM queuing discipline provides a framework for invoking classifiers
  19. * (aka "filters"), which in turn select classes of this queuing discipline.
  20. * Each class maps the flow(s) it is handling to a given VC. Multiple classes
  21. * may share the same VC.
  22. *
  23. * When creating a class, VCs are specified by passing the number of the open
  24. * socket descriptor by which the calling process references the VC. The kernel
  25. * keeps the VC open at least until all classes using it are removed.
  26. *
  27. * In this file, most functions are named atm_tc_* to avoid confusion with all
  28. * the atm_* in net/atm. This naming convention differs from what's used in the
  29. * rest of net/sched.
  30. *
  31. * Known bugs:
  32. * - sometimes messes up the IP stack
  33. * - any manipulations besides the few operations described in the README, are
  34. * untested and likely to crash the system
  35. * - should lock the flow while there is data in the queue (?)
  36. */
  37. #define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
  38. struct atm_flow_data {
  39. struct Qdisc *q; /* FIFO, TBF, etc. */
  40. struct tcf_proto *filter_list;
  41. struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
  42. void (*old_pop)(struct atm_vcc *vcc,
  43. struct sk_buff *skb); /* chaining */
  44. struct atm_qdisc_data *parent; /* parent qdisc */
  45. struct socket *sock; /* for closing */
  46. u32 classid; /* x:y type ID */
  47. int ref; /* reference count */
  48. struct gnet_stats_basic_packed bstats;
  49. struct gnet_stats_queue qstats;
  50. struct list_head list;
  51. struct atm_flow_data *excess; /* flow for excess traffic;
  52. NULL to set CLP instead */
  53. int hdr_len;
  54. unsigned char hdr[0]; /* header data; MUST BE LAST */
  55. };
  56. struct atm_qdisc_data {
  57. struct atm_flow_data link; /* unclassified skbs go here */
  58. struct list_head flows; /* NB: "link" is also on this
  59. list */
  60. struct tasklet_struct task; /* dequeue tasklet */
  61. };
  62. /* ------------------------- Class/flow operations ------------------------- */
  63. static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
  64. {
  65. struct atm_qdisc_data *p = qdisc_priv(sch);
  66. struct atm_flow_data *flow;
  67. list_for_each_entry(flow, &p->flows, list) {
  68. if (flow->classid == classid)
  69. return flow;
  70. }
  71. return NULL;
  72. }
  73. static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
  74. struct Qdisc *new, struct Qdisc **old)
  75. {
  76. struct atm_qdisc_data *p = qdisc_priv(sch);
  77. struct atm_flow_data *flow = (struct atm_flow_data *)arg;
  78. pr_debug("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
  79. sch, p, flow, new, old);
  80. if (list_empty(&flow->list))
  81. return -EINVAL;
  82. if (!new)
  83. new = &noop_qdisc;
  84. *old = flow->q;
  85. flow->q = new;
  86. if (*old)
  87. qdisc_reset(*old);
  88. return 0;
  89. }
  90. static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
  91. {
  92. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  93. pr_debug("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
  94. return flow ? flow->q : NULL;
  95. }
  96. static unsigned long atm_tc_get(struct Qdisc *sch, u32 classid)
  97. {
  98. struct atm_qdisc_data *p __maybe_unused = qdisc_priv(sch);
  99. struct atm_flow_data *flow;
  100. pr_debug("atm_tc_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid);
  101. flow = lookup_flow(sch, classid);
  102. if (flow)
  103. flow->ref++;
  104. pr_debug("atm_tc_get: flow %p\n", flow);
  105. return (unsigned long)flow;
  106. }
  107. static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
  108. unsigned long parent, u32 classid)
  109. {
  110. return atm_tc_get(sch, classid);
  111. }
  112. /*
  113. * atm_tc_put handles all destructions, including the ones that are explicitly
  114. * requested (atm_tc_destroy, etc.). The assumption here is that we never drop
  115. * anything that still seems to be in use.
  116. */
  117. static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
  118. {
  119. struct atm_qdisc_data *p = qdisc_priv(sch);
  120. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  121. pr_debug("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
  122. if (--flow->ref)
  123. return;
  124. pr_debug("atm_tc_put: destroying\n");
  125. list_del_init(&flow->list);
  126. pr_debug("atm_tc_put: qdisc %p\n", flow->q);
  127. qdisc_destroy(flow->q);
  128. tcf_destroy_chain(&flow->filter_list);
  129. if (flow->sock) {
  130. pr_debug("atm_tc_put: f_count %ld\n",
  131. file_count(flow->sock->file));
  132. flow->vcc->pop = flow->old_pop;
  133. sockfd_put(flow->sock);
  134. }
  135. if (flow->excess)
  136. atm_tc_put(sch, (unsigned long)flow->excess);
  137. if (flow != &p->link)
  138. kfree(flow);
  139. /*
  140. * If flow == &p->link, the qdisc no longer works at this point and
  141. * needs to be removed. (By the caller of atm_tc_put.)
  142. */
  143. }
  144. static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
  145. {
  146. struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
  147. pr_debug("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
  148. VCC2FLOW(vcc)->old_pop(vcc, skb);
  149. tasklet_schedule(&p->task);
  150. }
  151. static const u8 llc_oui_ip[] = {
  152. 0xaa, /* DSAP: non-ISO */
  153. 0xaa, /* SSAP: non-ISO */
  154. 0x03, /* Ctrl: Unnumbered Information Command PDU */
  155. 0x00, /* OUI: EtherType */
  156. 0x00, 0x00,
  157. 0x08, 0x00
  158. }; /* Ethertype IP (0800) */
  159. static const struct nla_policy atm_policy[TCA_ATM_MAX + 1] = {
  160. [TCA_ATM_FD] = { .type = NLA_U32 },
  161. [TCA_ATM_EXCESS] = { .type = NLA_U32 },
  162. };
  163. static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
  164. struct nlattr **tca, unsigned long *arg)
  165. {
  166. struct atm_qdisc_data *p = qdisc_priv(sch);
  167. struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
  168. struct atm_flow_data *excess = NULL;
  169. struct nlattr *opt = tca[TCA_OPTIONS];
  170. struct nlattr *tb[TCA_ATM_MAX + 1];
  171. struct socket *sock;
  172. int fd, error, hdr_len;
  173. void *hdr;
  174. pr_debug("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
  175. "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
  176. /*
  177. * The concept of parents doesn't apply for this qdisc.
  178. */
  179. if (parent && parent != TC_H_ROOT && parent != sch->handle)
  180. return -EINVAL;
  181. /*
  182. * ATM classes cannot be changed. In order to change properties of the
  183. * ATM connection, that socket needs to be modified directly (via the
  184. * native ATM API. In order to send a flow to a different VC, the old
  185. * class needs to be removed and a new one added. (This may be changed
  186. * later.)
  187. */
  188. if (flow)
  189. return -EBUSY;
  190. if (opt == NULL)
  191. return -EINVAL;
  192. error = nla_parse_nested(tb, TCA_ATM_MAX, opt, atm_policy);
  193. if (error < 0)
  194. return error;
  195. if (!tb[TCA_ATM_FD])
  196. return -EINVAL;
  197. fd = nla_get_u32(tb[TCA_ATM_FD]);
  198. pr_debug("atm_tc_change: fd %d\n", fd);
  199. if (tb[TCA_ATM_HDR]) {
  200. hdr_len = nla_len(tb[TCA_ATM_HDR]);
  201. hdr = nla_data(tb[TCA_ATM_HDR]);
  202. } else {
  203. hdr_len = RFC1483LLC_LEN;
  204. hdr = NULL; /* default LLC/SNAP for IP */
  205. }
  206. if (!tb[TCA_ATM_EXCESS])
  207. excess = NULL;
  208. else {
  209. excess = (struct atm_flow_data *)
  210. atm_tc_get(sch, nla_get_u32(tb[TCA_ATM_EXCESS]));
  211. if (!excess)
  212. return -ENOENT;
  213. }
  214. pr_debug("atm_tc_change: type %d, payload %d, hdr_len %d\n",
  215. opt->nla_type, nla_len(opt), hdr_len);
  216. sock = sockfd_lookup(fd, &error);
  217. if (!sock)
  218. return error; /* f_count++ */
  219. pr_debug("atm_tc_change: f_count %ld\n", file_count(sock->file));
  220. if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
  221. error = -EPROTOTYPE;
  222. goto err_out;
  223. }
  224. /* @@@ should check if the socket is really operational or we'll crash
  225. on vcc->send */
  226. if (classid) {
  227. if (TC_H_MAJ(classid ^ sch->handle)) {
  228. pr_debug("atm_tc_change: classid mismatch\n");
  229. error = -EINVAL;
  230. goto err_out;
  231. }
  232. } else {
  233. int i;
  234. unsigned long cl;
  235. for (i = 1; i < 0x8000; i++) {
  236. classid = TC_H_MAKE(sch->handle, 0x8000 | i);
  237. cl = atm_tc_get(sch, classid);
  238. if (!cl)
  239. break;
  240. atm_tc_put(sch, cl);
  241. }
  242. }
  243. pr_debug("atm_tc_change: new id %x\n", classid);
  244. flow = kzalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
  245. pr_debug("atm_tc_change: flow %p\n", flow);
  246. if (!flow) {
  247. error = -ENOBUFS;
  248. goto err_out;
  249. }
  250. flow->filter_list = NULL;
  251. flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
  252. if (!flow->q)
  253. flow->q = &noop_qdisc;
  254. pr_debug("atm_tc_change: qdisc %p\n", flow->q);
  255. flow->sock = sock;
  256. flow->vcc = ATM_SD(sock); /* speedup */
  257. flow->vcc->user_back = flow;
  258. pr_debug("atm_tc_change: vcc %p\n", flow->vcc);
  259. flow->old_pop = flow->vcc->pop;
  260. flow->parent = p;
  261. flow->vcc->pop = sch_atm_pop;
  262. flow->classid = classid;
  263. flow->ref = 1;
  264. flow->excess = excess;
  265. list_add(&flow->list, &p->link.list);
  266. flow->hdr_len = hdr_len;
  267. if (hdr)
  268. memcpy(flow->hdr, hdr, hdr_len);
  269. else
  270. memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
  271. *arg = (unsigned long)flow;
  272. return 0;
  273. err_out:
  274. if (excess)
  275. atm_tc_put(sch, (unsigned long)excess);
  276. sockfd_put(sock);
  277. return error;
  278. }
  279. static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
  280. {
  281. struct atm_qdisc_data *p = qdisc_priv(sch);
  282. struct atm_flow_data *flow = (struct atm_flow_data *)arg;
  283. pr_debug("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
  284. if (list_empty(&flow->list))
  285. return -EINVAL;
  286. if (flow->filter_list || flow == &p->link)
  287. return -EBUSY;
  288. /*
  289. * Reference count must be 2: one for "keepalive" (set at class
  290. * creation), and one for the reference held when calling delete.
  291. */
  292. if (flow->ref < 2) {
  293. pr_err("atm_tc_delete: flow->ref == %d\n", flow->ref);
  294. return -EINVAL;
  295. }
  296. if (flow->ref > 2)
  297. return -EBUSY; /* catch references via excess, etc. */
  298. atm_tc_put(sch, arg);
  299. return 0;
  300. }
  301. static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
  302. {
  303. struct atm_qdisc_data *p = qdisc_priv(sch);
  304. struct atm_flow_data *flow;
  305. pr_debug("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
  306. if (walker->stop)
  307. return;
  308. list_for_each_entry(flow, &p->flows, list) {
  309. if (walker->count >= walker->skip &&
  310. walker->fn(sch, (unsigned long)flow, walker) < 0) {
  311. walker->stop = 1;
  312. break;
  313. }
  314. walker->count++;
  315. }
  316. }
  317. static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl)
  318. {
  319. struct atm_qdisc_data *p = qdisc_priv(sch);
  320. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  321. pr_debug("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
  322. return flow ? &flow->filter_list : &p->link.filter_list;
  323. }
  324. /* --------------------------- Qdisc operations ---------------------------- */
  325. static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
  326. {
  327. struct atm_qdisc_data *p = qdisc_priv(sch);
  328. struct atm_flow_data *flow;
  329. struct tcf_result res;
  330. int result;
  331. int ret = NET_XMIT_POLICED;
  332. pr_debug("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
  333. result = TC_POLICE_OK; /* be nice to gcc */
  334. flow = NULL;
  335. if (TC_H_MAJ(skb->priority) != sch->handle ||
  336. !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority))) {
  337. list_for_each_entry(flow, &p->flows, list) {
  338. if (flow->filter_list) {
  339. result = tc_classify_compat(skb,
  340. flow->filter_list,
  341. &res);
  342. if (result < 0)
  343. continue;
  344. flow = (struct atm_flow_data *)res.class;
  345. if (!flow)
  346. flow = lookup_flow(sch, res.classid);
  347. goto done;
  348. }
  349. }
  350. flow = NULL;
  351. done:
  352. ;
  353. }
  354. if (!flow) {
  355. flow = &p->link;
  356. } else {
  357. if (flow->vcc)
  358. ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
  359. /*@@@ looks good ... but it's not supposed to work :-) */
  360. #ifdef CONFIG_NET_CLS_ACT
  361. switch (result) {
  362. case TC_ACT_QUEUED:
  363. case TC_ACT_STOLEN:
  364. kfree_skb(skb);
  365. return NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  366. case TC_ACT_SHOT:
  367. kfree_skb(skb);
  368. goto drop;
  369. case TC_POLICE_RECLASSIFY:
  370. if (flow->excess)
  371. flow = flow->excess;
  372. else
  373. ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
  374. break;
  375. }
  376. #endif
  377. }
  378. ret = qdisc_enqueue(skb, flow->q);
  379. if (ret != NET_XMIT_SUCCESS) {
  380. drop: __maybe_unused
  381. if (net_xmit_drop_count(ret)) {
  382. sch->qstats.drops++;
  383. if (flow)
  384. flow->qstats.drops++;
  385. }
  386. return ret;
  387. }
  388. qdisc_bstats_update(sch, skb);
  389. bstats_update(&flow->bstats, skb);
  390. /*
  391. * Okay, this may seem weird. We pretend we've dropped the packet if
  392. * it goes via ATM. The reason for this is that the outer qdisc
  393. * expects to be able to q->dequeue the packet later on if we return
  394. * success at this place. Also, sch->q.qdisc needs to reflect whether
  395. * there is a packet egligible for dequeuing or not. Note that the
  396. * statistics of the outer qdisc are necessarily wrong because of all
  397. * this. There's currently no correct solution for this.
  398. */
  399. if (flow == &p->link) {
  400. sch->q.qlen++;
  401. return NET_XMIT_SUCCESS;
  402. }
  403. tasklet_schedule(&p->task);
  404. return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  405. }
  406. /*
  407. * Dequeue packets and send them over ATM. Note that we quite deliberately
  408. * avoid checking net_device's flow control here, simply because sch_atm
  409. * uses its own channels, which have nothing to do with any CLIP/LANE/or
  410. * non-ATM interfaces.
  411. */
  412. static void sch_atm_dequeue(unsigned long data)
  413. {
  414. struct Qdisc *sch = (struct Qdisc *)data;
  415. struct atm_qdisc_data *p = qdisc_priv(sch);
  416. struct atm_flow_data *flow;
  417. struct sk_buff *skb;
  418. pr_debug("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
  419. list_for_each_entry(flow, &p->flows, list) {
  420. if (flow == &p->link)
  421. continue;
  422. /*
  423. * If traffic is properly shaped, this won't generate nasty
  424. * little bursts. Otherwise, it may ... (but that's okay)
  425. */
  426. while ((skb = flow->q->ops->peek(flow->q))) {
  427. if (!atm_may_send(flow->vcc, skb->truesize))
  428. break;
  429. skb = qdisc_dequeue_peeked(flow->q);
  430. if (unlikely(!skb))
  431. break;
  432. pr_debug("atm_tc_dequeue: sending on class %p\n", flow);
  433. /* remove any LL header somebody else has attached */
  434. skb_pull(skb, skb_network_offset(skb));
  435. if (skb_headroom(skb) < flow->hdr_len) {
  436. struct sk_buff *new;
  437. new = skb_realloc_headroom(skb, flow->hdr_len);
  438. dev_kfree_skb(skb);
  439. if (!new)
  440. continue;
  441. skb = new;
  442. }
  443. pr_debug("sch_atm_dequeue: ip %p, data %p\n",
  444. skb_network_header(skb), skb->data);
  445. ATM_SKB(skb)->vcc = flow->vcc;
  446. memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
  447. flow->hdr_len);
  448. atomic_add(skb->truesize,
  449. &sk_atm(flow->vcc)->sk_wmem_alloc);
  450. /* atm.atm_options are already set by atm_tc_enqueue */
  451. flow->vcc->send(flow->vcc, skb);
  452. }
  453. }
  454. }
  455. static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
  456. {
  457. struct atm_qdisc_data *p = qdisc_priv(sch);
  458. struct sk_buff *skb;
  459. pr_debug("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
  460. tasklet_schedule(&p->task);
  461. skb = qdisc_dequeue_peeked(p->link.q);
  462. if (skb)
  463. sch->q.qlen--;
  464. return skb;
  465. }
  466. static struct sk_buff *atm_tc_peek(struct Qdisc *sch)
  467. {
  468. struct atm_qdisc_data *p = qdisc_priv(sch);
  469. pr_debug("atm_tc_peek(sch %p,[qdisc %p])\n", sch, p);
  470. return p->link.q->ops->peek(p->link.q);
  471. }
  472. static unsigned int atm_tc_drop(struct Qdisc *sch)
  473. {
  474. struct atm_qdisc_data *p = qdisc_priv(sch);
  475. struct atm_flow_data *flow;
  476. unsigned int len;
  477. pr_debug("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p);
  478. list_for_each_entry(flow, &p->flows, list) {
  479. if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q)))
  480. return len;
  481. }
  482. return 0;
  483. }
  484. static int atm_tc_init(struct Qdisc *sch, struct nlattr *opt)
  485. {
  486. struct atm_qdisc_data *p = qdisc_priv(sch);
  487. pr_debug("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
  488. INIT_LIST_HEAD(&p->flows);
  489. INIT_LIST_HEAD(&p->link.list);
  490. list_add(&p->link.list, &p->flows);
  491. p->link.q = qdisc_create_dflt(sch->dev_queue,
  492. &pfifo_qdisc_ops, sch->handle);
  493. if (!p->link.q)
  494. p->link.q = &noop_qdisc;
  495. pr_debug("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
  496. p->link.filter_list = NULL;
  497. p->link.vcc = NULL;
  498. p->link.sock = NULL;
  499. p->link.classid = sch->handle;
  500. p->link.ref = 1;
  501. tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
  502. return 0;
  503. }
  504. static void atm_tc_reset(struct Qdisc *sch)
  505. {
  506. struct atm_qdisc_data *p = qdisc_priv(sch);
  507. struct atm_flow_data *flow;
  508. pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
  509. list_for_each_entry(flow, &p->flows, list)
  510. qdisc_reset(flow->q);
  511. sch->q.qlen = 0;
  512. }
  513. static void atm_tc_destroy(struct Qdisc *sch)
  514. {
  515. struct atm_qdisc_data *p = qdisc_priv(sch);
  516. struct atm_flow_data *flow, *tmp;
  517. pr_debug("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
  518. list_for_each_entry(flow, &p->flows, list)
  519. tcf_destroy_chain(&flow->filter_list);
  520. list_for_each_entry_safe(flow, tmp, &p->flows, list) {
  521. if (flow->ref > 1)
  522. pr_err("atm_destroy: %p->ref = %d\n", flow, flow->ref);
  523. atm_tc_put(sch, (unsigned long)flow);
  524. }
  525. tasklet_kill(&p->task);
  526. }
  527. static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
  528. struct sk_buff *skb, struct tcmsg *tcm)
  529. {
  530. struct atm_qdisc_data *p = qdisc_priv(sch);
  531. struct atm_flow_data *flow = (struct atm_flow_data *)cl;
  532. struct nlattr *nest;
  533. pr_debug("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
  534. sch, p, flow, skb, tcm);
  535. if (list_empty(&flow->list))
  536. return -EINVAL;
  537. tcm->tcm_handle = flow->classid;
  538. tcm->tcm_info = flow->q->handle;
  539. nest = nla_nest_start(skb, TCA_OPTIONS);
  540. if (nest == NULL)
  541. goto nla_put_failure;
  542. NLA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr);
  543. if (flow->vcc) {
  544. struct sockaddr_atmpvc pvc;
  545. int state;
  546. pvc.sap_family = AF_ATMPVC;
  547. pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
  548. pvc.sap_addr.vpi = flow->vcc->vpi;
  549. pvc.sap_addr.vci = flow->vcc->vci;
  550. NLA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc);
  551. state = ATM_VF2VS(flow->vcc->flags);
  552. NLA_PUT_U32(skb, TCA_ATM_STATE, state);
  553. }
  554. if (flow->excess)
  555. NLA_PUT_U32(skb, TCA_ATM_EXCESS, flow->classid);
  556. else
  557. NLA_PUT_U32(skb, TCA_ATM_EXCESS, 0);
  558. nla_nest_end(skb, nest);
  559. return skb->len;
  560. nla_put_failure:
  561. nla_nest_cancel(skb, nest);
  562. return -1;
  563. }
  564. static int
  565. atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
  566. struct gnet_dump *d)
  567. {
  568. struct atm_flow_data *flow = (struct atm_flow_data *)arg;
  569. flow->qstats.qlen = flow->q->q.qlen;
  570. if (gnet_stats_copy_basic(d, &flow->bstats) < 0 ||
  571. gnet_stats_copy_queue(d, &flow->qstats) < 0)
  572. return -1;
  573. return 0;
  574. }
  575. static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
  576. {
  577. return 0;
  578. }
  579. static const struct Qdisc_class_ops atm_class_ops = {
  580. .graft = atm_tc_graft,
  581. .leaf = atm_tc_leaf,
  582. .get = atm_tc_get,
  583. .put = atm_tc_put,
  584. .change = atm_tc_change,
  585. .delete = atm_tc_delete,
  586. .walk = atm_tc_walk,
  587. .tcf_chain = atm_tc_find_tcf,
  588. .bind_tcf = atm_tc_bind_filter,
  589. .unbind_tcf = atm_tc_put,
  590. .dump = atm_tc_dump_class,
  591. .dump_stats = atm_tc_dump_class_stats,
  592. };
  593. static struct Qdisc_ops atm_qdisc_ops __read_mostly = {
  594. .cl_ops = &atm_class_ops,
  595. .id = "atm",
  596. .priv_size = sizeof(struct atm_qdisc_data),
  597. .enqueue = atm_tc_enqueue,
  598. .dequeue = atm_tc_dequeue,
  599. .peek = atm_tc_peek,
  600. .drop = atm_tc_drop,
  601. .init = atm_tc_init,
  602. .reset = atm_tc_reset,
  603. .destroy = atm_tc_destroy,
  604. .dump = atm_tc_dump,
  605. .owner = THIS_MODULE,
  606. };
  607. static int __init atm_init(void)
  608. {
  609. return register_qdisc(&atm_qdisc_ops);
  610. }
  611. static void __exit atm_exit(void)
  612. {
  613. unregister_qdisc(&atm_qdisc_ops);
  614. }
  615. module_init(atm_init)
  616. module_exit(atm_exit)
  617. MODULE_LICENSE("GPL");