/kern_oII/net/netfilter/nf_conntrack_netlink.c

http://omnia2droid.googlecode.com/ · C · 1989 lines · 1593 code · 343 blank · 53 comment · 293 complexity · 4ea8b92591b3958e41d5eaec08592ff9 MD5 · raw file

  1. /* Connection tracking via netlink socket. Allows for user space
  2. * protocol helpers and general trouble making from userspace.
  3. *
  4. * (C) 2001 by Jay Schulist <jschlst@samba.org>
  5. * (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
  6. * (C) 2003 by Patrick Mchardy <kaber@trash.net>
  7. * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
  8. *
  9. * Initial connection tracking via netlink development funded and
  10. * generally made possible by Network Robots, Inc. (www.networkrobots.com)
  11. *
  12. * Further development of this code funded by Astaro AG (http://www.astaro.com)
  13. *
  14. * This software may be used and distributed according to the terms
  15. * of the GNU General Public License, incorporated herein by reference.
  16. */
  17. #include <linux/init.h>
  18. #include <linux/module.h>
  19. #include <linux/kernel.h>
  20. #include <linux/rculist.h>
  21. #include <linux/rculist_nulls.h>
  22. #include <linux/types.h>
  23. #include <linux/timer.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/errno.h>
  26. #include <linux/netlink.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/interrupt.h>
  29. #include <linux/netfilter.h>
  30. #include <net/netlink.h>
  31. #include <net/netfilter/nf_conntrack.h>
  32. #include <net/netfilter/nf_conntrack_core.h>
  33. #include <net/netfilter/nf_conntrack_expect.h>
  34. #include <net/netfilter/nf_conntrack_helper.h>
  35. #include <net/netfilter/nf_conntrack_l3proto.h>
  36. #include <net/netfilter/nf_conntrack_l4proto.h>
  37. #include <net/netfilter/nf_conntrack_tuple.h>
  38. #include <net/netfilter/nf_conntrack_acct.h>
  39. #ifdef CONFIG_NF_NAT_NEEDED
  40. #include <net/netfilter/nf_nat_core.h>
  41. #include <net/netfilter/nf_nat_protocol.h>
  42. #endif
  43. #include <linux/netfilter/nfnetlink.h>
  44. #include <linux/netfilter/nfnetlink_conntrack.h>
  45. MODULE_LICENSE("GPL");
  46. static char __initdata version[] = "0.93";
  47. static inline int
  48. ctnetlink_dump_tuples_proto(struct sk_buff *skb,
  49. const struct nf_conntrack_tuple *tuple,
  50. struct nf_conntrack_l4proto *l4proto)
  51. {
  52. int ret = 0;
  53. struct nlattr *nest_parms;
  54. nest_parms = nla_nest_start(skb, CTA_TUPLE_PROTO | NLA_F_NESTED);
  55. if (!nest_parms)
  56. goto nla_put_failure;
  57. NLA_PUT_U8(skb, CTA_PROTO_NUM, tuple->dst.protonum);
  58. if (likely(l4proto->tuple_to_nlattr))
  59. ret = l4proto->tuple_to_nlattr(skb, tuple);
  60. nla_nest_end(skb, nest_parms);
  61. return ret;
  62. nla_put_failure:
  63. return -1;
  64. }
  65. static inline int
  66. ctnetlink_dump_tuples_ip(struct sk_buff *skb,
  67. const struct nf_conntrack_tuple *tuple,
  68. struct nf_conntrack_l3proto *l3proto)
  69. {
  70. int ret = 0;
  71. struct nlattr *nest_parms;
  72. nest_parms = nla_nest_start(skb, CTA_TUPLE_IP | NLA_F_NESTED);
  73. if (!nest_parms)
  74. goto nla_put_failure;
  75. if (likely(l3proto->tuple_to_nlattr))
  76. ret = l3proto->tuple_to_nlattr(skb, tuple);
  77. nla_nest_end(skb, nest_parms);
  78. return ret;
  79. nla_put_failure:
  80. return -1;
  81. }
  82. static int
  83. ctnetlink_dump_tuples(struct sk_buff *skb,
  84. const struct nf_conntrack_tuple *tuple)
  85. {
  86. int ret;
  87. struct nf_conntrack_l3proto *l3proto;
  88. struct nf_conntrack_l4proto *l4proto;
  89. l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
  90. ret = ctnetlink_dump_tuples_ip(skb, tuple, l3proto);
  91. if (unlikely(ret < 0))
  92. return ret;
  93. l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
  94. ret = ctnetlink_dump_tuples_proto(skb, tuple, l4proto);
  95. return ret;
  96. }
  97. static inline int
  98. ctnetlink_dump_status(struct sk_buff *skb, const struct nf_conn *ct)
  99. {
  100. NLA_PUT_BE32(skb, CTA_STATUS, htonl(ct->status));
  101. return 0;
  102. nla_put_failure:
  103. return -1;
  104. }
  105. static inline int
  106. ctnetlink_dump_timeout(struct sk_buff *skb, const struct nf_conn *ct)
  107. {
  108. long timeout = (ct->timeout.expires - jiffies) / HZ;
  109. if (timeout < 0)
  110. timeout = 0;
  111. NLA_PUT_BE32(skb, CTA_TIMEOUT, htonl(timeout));
  112. return 0;
  113. nla_put_failure:
  114. return -1;
  115. }
  116. static inline int
  117. ctnetlink_dump_protoinfo(struct sk_buff *skb, struct nf_conn *ct)
  118. {
  119. struct nf_conntrack_l4proto *l4proto;
  120. struct nlattr *nest_proto;
  121. int ret;
  122. l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
  123. if (!l4proto->to_nlattr)
  124. return 0;
  125. nest_proto = nla_nest_start(skb, CTA_PROTOINFO | NLA_F_NESTED);
  126. if (!nest_proto)
  127. goto nla_put_failure;
  128. ret = l4proto->to_nlattr(skb, nest_proto, ct);
  129. nla_nest_end(skb, nest_proto);
  130. return ret;
  131. nla_put_failure:
  132. return -1;
  133. }
  134. static inline int
  135. ctnetlink_dump_helpinfo(struct sk_buff *skb, const struct nf_conn *ct)
  136. {
  137. struct nlattr *nest_helper;
  138. const struct nf_conn_help *help = nfct_help(ct);
  139. struct nf_conntrack_helper *helper;
  140. if (!help)
  141. return 0;
  142. helper = rcu_dereference(help->helper);
  143. if (!helper)
  144. goto out;
  145. nest_helper = nla_nest_start(skb, CTA_HELP | NLA_F_NESTED);
  146. if (!nest_helper)
  147. goto nla_put_failure;
  148. NLA_PUT_STRING(skb, CTA_HELP_NAME, helper->name);
  149. if (helper->to_nlattr)
  150. helper->to_nlattr(skb, ct);
  151. nla_nest_end(skb, nest_helper);
  152. out:
  153. return 0;
  154. nla_put_failure:
  155. return -1;
  156. }
  157. static int
  158. ctnetlink_dump_counters(struct sk_buff *skb, const struct nf_conn *ct,
  159. enum ip_conntrack_dir dir)
  160. {
  161. enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
  162. struct nlattr *nest_count;
  163. const struct nf_conn_counter *acct;
  164. acct = nf_conn_acct_find(ct);
  165. if (!acct)
  166. return 0;
  167. nest_count = nla_nest_start(skb, type | NLA_F_NESTED);
  168. if (!nest_count)
  169. goto nla_put_failure;
  170. NLA_PUT_BE64(skb, CTA_COUNTERS_PACKETS,
  171. cpu_to_be64(acct[dir].packets));
  172. NLA_PUT_BE64(skb, CTA_COUNTERS_BYTES,
  173. cpu_to_be64(acct[dir].bytes));
  174. nla_nest_end(skb, nest_count);
  175. return 0;
  176. nla_put_failure:
  177. return -1;
  178. }
  179. #ifdef CONFIG_NF_CONNTRACK_MARK
  180. static inline int
  181. ctnetlink_dump_mark(struct sk_buff *skb, const struct nf_conn *ct)
  182. {
  183. NLA_PUT_BE32(skb, CTA_MARK, htonl(ct->mark));
  184. return 0;
  185. nla_put_failure:
  186. return -1;
  187. }
  188. #else
  189. #define ctnetlink_dump_mark(a, b) (0)
  190. #endif
  191. #ifdef CONFIG_NF_CONNTRACK_SECMARK
  192. static inline int
  193. ctnetlink_dump_secmark(struct sk_buff *skb, const struct nf_conn *ct)
  194. {
  195. NLA_PUT_BE32(skb, CTA_SECMARK, htonl(ct->secmark));
  196. return 0;
  197. nla_put_failure:
  198. return -1;
  199. }
  200. #else
  201. #define ctnetlink_dump_secmark(a, b) (0)
  202. #endif
  203. #define master_tuple(ct) &(ct->master->tuplehash[IP_CT_DIR_ORIGINAL].tuple)
  204. static inline int
  205. ctnetlink_dump_master(struct sk_buff *skb, const struct nf_conn *ct)
  206. {
  207. struct nlattr *nest_parms;
  208. if (!(ct->status & IPS_EXPECTED))
  209. return 0;
  210. nest_parms = nla_nest_start(skb, CTA_TUPLE_MASTER | NLA_F_NESTED);
  211. if (!nest_parms)
  212. goto nla_put_failure;
  213. if (ctnetlink_dump_tuples(skb, master_tuple(ct)) < 0)
  214. goto nla_put_failure;
  215. nla_nest_end(skb, nest_parms);
  216. return 0;
  217. nla_put_failure:
  218. return -1;
  219. }
  220. #ifdef CONFIG_NF_NAT_NEEDED
  221. static int
  222. dump_nat_seq_adj(struct sk_buff *skb, const struct nf_nat_seq *natseq, int type)
  223. {
  224. struct nlattr *nest_parms;
  225. nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
  226. if (!nest_parms)
  227. goto nla_put_failure;
  228. NLA_PUT_BE32(skb, CTA_NAT_SEQ_CORRECTION_POS,
  229. htonl(natseq->correction_pos));
  230. NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_BEFORE,
  231. htonl(natseq->offset_before));
  232. NLA_PUT_BE32(skb, CTA_NAT_SEQ_OFFSET_AFTER,
  233. htonl(natseq->offset_after));
  234. nla_nest_end(skb, nest_parms);
  235. return 0;
  236. nla_put_failure:
  237. return -1;
  238. }
  239. static inline int
  240. ctnetlink_dump_nat_seq_adj(struct sk_buff *skb, const struct nf_conn *ct)
  241. {
  242. struct nf_nat_seq *natseq;
  243. struct nf_conn_nat *nat = nfct_nat(ct);
  244. if (!(ct->status & IPS_SEQ_ADJUST) || !nat)
  245. return 0;
  246. natseq = &nat->seq[IP_CT_DIR_ORIGINAL];
  247. if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_ORIG) == -1)
  248. return -1;
  249. natseq = &nat->seq[IP_CT_DIR_REPLY];
  250. if (dump_nat_seq_adj(skb, natseq, CTA_NAT_SEQ_ADJ_REPLY) == -1)
  251. return -1;
  252. return 0;
  253. }
  254. #else
  255. #define ctnetlink_dump_nat_seq_adj(a, b) (0)
  256. #endif
  257. static inline int
  258. ctnetlink_dump_id(struct sk_buff *skb, const struct nf_conn *ct)
  259. {
  260. NLA_PUT_BE32(skb, CTA_ID, htonl((unsigned long)ct));
  261. return 0;
  262. nla_put_failure:
  263. return -1;
  264. }
  265. static inline int
  266. ctnetlink_dump_use(struct sk_buff *skb, const struct nf_conn *ct)
  267. {
  268. NLA_PUT_BE32(skb, CTA_USE, htonl(atomic_read(&ct->ct_general.use)));
  269. return 0;
  270. nla_put_failure:
  271. return -1;
  272. }
  273. static int
  274. ctnetlink_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
  275. int event, struct nf_conn *ct)
  276. {
  277. struct nlmsghdr *nlh;
  278. struct nfgenmsg *nfmsg;
  279. struct nlattr *nest_parms;
  280. unsigned int flags = pid ? NLM_F_MULTI : 0;
  281. event |= NFNL_SUBSYS_CTNETLINK << 8;
  282. nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
  283. if (nlh == NULL)
  284. goto nlmsg_failure;
  285. nfmsg = nlmsg_data(nlh);
  286. nfmsg->nfgen_family = nf_ct_l3num(ct);
  287. nfmsg->version = NFNETLINK_V0;
  288. nfmsg->res_id = 0;
  289. nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
  290. if (!nest_parms)
  291. goto nla_put_failure;
  292. if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
  293. goto nla_put_failure;
  294. nla_nest_end(skb, nest_parms);
  295. nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
  296. if (!nest_parms)
  297. goto nla_put_failure;
  298. if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
  299. goto nla_put_failure;
  300. nla_nest_end(skb, nest_parms);
  301. if (ctnetlink_dump_status(skb, ct) < 0 ||
  302. ctnetlink_dump_timeout(skb, ct) < 0 ||
  303. ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
  304. ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0 ||
  305. ctnetlink_dump_protoinfo(skb, ct) < 0 ||
  306. ctnetlink_dump_helpinfo(skb, ct) < 0 ||
  307. ctnetlink_dump_mark(skb, ct) < 0 ||
  308. ctnetlink_dump_secmark(skb, ct) < 0 ||
  309. ctnetlink_dump_id(skb, ct) < 0 ||
  310. ctnetlink_dump_use(skb, ct) < 0 ||
  311. ctnetlink_dump_master(skb, ct) < 0 ||
  312. ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
  313. goto nla_put_failure;
  314. nlmsg_end(skb, nlh);
  315. return skb->len;
  316. nlmsg_failure:
  317. nla_put_failure:
  318. nlmsg_cancel(skb, nlh);
  319. return -1;
  320. }
  321. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  322. static inline size_t
  323. ctnetlink_proto_size(const struct nf_conn *ct)
  324. {
  325. struct nf_conntrack_l3proto *l3proto;
  326. struct nf_conntrack_l4proto *l4proto;
  327. size_t len = 0;
  328. rcu_read_lock();
  329. l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
  330. len += l3proto->nla_size;
  331. l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
  332. len += l4proto->nla_size;
  333. rcu_read_unlock();
  334. return len;
  335. }
  336. static inline size_t
  337. ctnetlink_nlmsg_size(const struct nf_conn *ct)
  338. {
  339. return NLMSG_ALIGN(sizeof(struct nfgenmsg))
  340. + 3 * nla_total_size(0) /* CTA_TUPLE_ORIG|REPL|MASTER */
  341. + 3 * nla_total_size(0) /* CTA_TUPLE_IP */
  342. + 3 * nla_total_size(0) /* CTA_TUPLE_PROTO */
  343. + 3 * nla_total_size(sizeof(u_int8_t)) /* CTA_PROTO_NUM */
  344. + nla_total_size(sizeof(u_int32_t)) /* CTA_ID */
  345. + nla_total_size(sizeof(u_int32_t)) /* CTA_STATUS */
  346. #ifdef CONFIG_NF_CT_ACCT
  347. + 2 * nla_total_size(0) /* CTA_COUNTERS_ORIG|REPL */
  348. + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_PACKETS */
  349. + 2 * nla_total_size(sizeof(uint64_t)) /* CTA_COUNTERS_BYTES */
  350. #endif
  351. + nla_total_size(sizeof(u_int32_t)) /* CTA_TIMEOUT */
  352. + nla_total_size(0) /* CTA_PROTOINFO */
  353. + nla_total_size(0) /* CTA_HELP */
  354. + nla_total_size(NF_CT_HELPER_NAME_LEN) /* CTA_HELP_NAME */
  355. #ifdef CONFIG_NF_CONNTRACK_SECMARK
  356. + nla_total_size(sizeof(u_int32_t)) /* CTA_SECMARK */
  357. #endif
  358. #ifdef CONFIG_NF_NAT_NEEDED
  359. + 2 * nla_total_size(0) /* CTA_NAT_SEQ_ADJ_ORIG|REPL */
  360. + 6 * nla_total_size(sizeof(u_int32_t)) /* CTA_NAT_SEQ_OFFSET */
  361. #endif
  362. #ifdef CONFIG_NF_CONNTRACK_MARK
  363. + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
  364. #endif
  365. + ctnetlink_proto_size(ct)
  366. ;
  367. }
  368. static int
  369. ctnetlink_conntrack_event(unsigned int events, struct nf_ct_event *item)
  370. {
  371. struct nlmsghdr *nlh;
  372. struct nfgenmsg *nfmsg;
  373. struct nlattr *nest_parms;
  374. struct nf_conn *ct = item->ct;
  375. struct sk_buff *skb;
  376. unsigned int type;
  377. unsigned int flags = 0, group;
  378. int err;
  379. /* ignore our fake conntrack entry */
  380. if (ct == &nf_conntrack_untracked)
  381. return 0;
  382. if (events & (1 << IPCT_DESTROY)) {
  383. type = IPCTNL_MSG_CT_DELETE;
  384. group = NFNLGRP_CONNTRACK_DESTROY;
  385. } else if (events & ((1 << IPCT_NEW) | (1 << IPCT_RELATED))) {
  386. type = IPCTNL_MSG_CT_NEW;
  387. flags = NLM_F_CREATE|NLM_F_EXCL;
  388. group = NFNLGRP_CONNTRACK_NEW;
  389. } else if (events) {
  390. type = IPCTNL_MSG_CT_NEW;
  391. group = NFNLGRP_CONNTRACK_UPDATE;
  392. } else
  393. return 0;
  394. if (!item->report && !nfnetlink_has_listeners(group))
  395. return 0;
  396. skb = nlmsg_new(ctnetlink_nlmsg_size(ct), GFP_ATOMIC);
  397. if (skb == NULL)
  398. goto errout;
  399. type |= NFNL_SUBSYS_CTNETLINK << 8;
  400. nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
  401. if (nlh == NULL)
  402. goto nlmsg_failure;
  403. nfmsg = nlmsg_data(nlh);
  404. nfmsg->nfgen_family = nf_ct_l3num(ct);
  405. nfmsg->version = NFNETLINK_V0;
  406. nfmsg->res_id = 0;
  407. rcu_read_lock();
  408. nest_parms = nla_nest_start(skb, CTA_TUPLE_ORIG | NLA_F_NESTED);
  409. if (!nest_parms)
  410. goto nla_put_failure;
  411. if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_ORIGINAL)) < 0)
  412. goto nla_put_failure;
  413. nla_nest_end(skb, nest_parms);
  414. nest_parms = nla_nest_start(skb, CTA_TUPLE_REPLY | NLA_F_NESTED);
  415. if (!nest_parms)
  416. goto nla_put_failure;
  417. if (ctnetlink_dump_tuples(skb, nf_ct_tuple(ct, IP_CT_DIR_REPLY)) < 0)
  418. goto nla_put_failure;
  419. nla_nest_end(skb, nest_parms);
  420. if (ctnetlink_dump_id(skb, ct) < 0)
  421. goto nla_put_failure;
  422. if (ctnetlink_dump_status(skb, ct) < 0)
  423. goto nla_put_failure;
  424. if (events & (1 << IPCT_DESTROY)) {
  425. if (ctnetlink_dump_counters(skb, ct, IP_CT_DIR_ORIGINAL) < 0 ||
  426. ctnetlink_dump_counters(skb, ct, IP_CT_DIR_REPLY) < 0)
  427. goto nla_put_failure;
  428. } else {
  429. if (ctnetlink_dump_timeout(skb, ct) < 0)
  430. goto nla_put_failure;
  431. if (events & (1 << IPCT_PROTOINFO)
  432. && ctnetlink_dump_protoinfo(skb, ct) < 0)
  433. goto nla_put_failure;
  434. if ((events & (1 << IPCT_HELPER) || nfct_help(ct))
  435. && ctnetlink_dump_helpinfo(skb, ct) < 0)
  436. goto nla_put_failure;
  437. #ifdef CONFIG_NF_CONNTRACK_SECMARK
  438. if ((events & (1 << IPCT_SECMARK) || ct->secmark)
  439. && ctnetlink_dump_secmark(skb, ct) < 0)
  440. goto nla_put_failure;
  441. #endif
  442. if (events & (1 << IPCT_RELATED) &&
  443. ctnetlink_dump_master(skb, ct) < 0)
  444. goto nla_put_failure;
  445. if (events & (1 << IPCT_NATSEQADJ) &&
  446. ctnetlink_dump_nat_seq_adj(skb, ct) < 0)
  447. goto nla_put_failure;
  448. }
  449. #ifdef CONFIG_NF_CONNTRACK_MARK
  450. if ((events & (1 << IPCT_MARK) || ct->mark)
  451. && ctnetlink_dump_mark(skb, ct) < 0)
  452. goto nla_put_failure;
  453. #endif
  454. rcu_read_unlock();
  455. nlmsg_end(skb, nlh);
  456. err = nfnetlink_send(skb, item->pid, group, item->report, GFP_ATOMIC);
  457. if (err == -ENOBUFS || err == -EAGAIN)
  458. return -ENOBUFS;
  459. return 0;
  460. nla_put_failure:
  461. rcu_read_unlock();
  462. nlmsg_cancel(skb, nlh);
  463. nlmsg_failure:
  464. kfree_skb(skb);
  465. errout:
  466. nfnetlink_set_err(0, group, -ENOBUFS);
  467. return 0;
  468. }
  469. #endif /* CONFIG_NF_CONNTRACK_EVENTS */
  470. static int ctnetlink_done(struct netlink_callback *cb)
  471. {
  472. if (cb->args[1])
  473. nf_ct_put((struct nf_conn *)cb->args[1]);
  474. return 0;
  475. }
  476. static int
  477. ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
  478. {
  479. struct nf_conn *ct, *last;
  480. struct nf_conntrack_tuple_hash *h;
  481. struct hlist_nulls_node *n;
  482. struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
  483. u_int8_t l3proto = nfmsg->nfgen_family;
  484. rcu_read_lock();
  485. last = (struct nf_conn *)cb->args[1];
  486. for (; cb->args[0] < nf_conntrack_htable_size; cb->args[0]++) {
  487. restart:
  488. hlist_nulls_for_each_entry_rcu(h, n, &init_net.ct.hash[cb->args[0]],
  489. hnnode) {
  490. if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
  491. continue;
  492. ct = nf_ct_tuplehash_to_ctrack(h);
  493. if (!atomic_inc_not_zero(&ct->ct_general.use))
  494. continue;
  495. /* Dump entries of a given L3 protocol number.
  496. * If it is not specified, ie. l3proto == 0,
  497. * then dump everything. */
  498. if (l3proto && nf_ct_l3num(ct) != l3proto)
  499. goto releasect;
  500. if (cb->args[1]) {
  501. if (ct != last)
  502. goto releasect;
  503. cb->args[1] = 0;
  504. }
  505. if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
  506. cb->nlh->nlmsg_seq,
  507. IPCTNL_MSG_CT_NEW, ct) < 0) {
  508. cb->args[1] = (unsigned long)ct;
  509. goto out;
  510. }
  511. if (NFNL_MSG_TYPE(cb->nlh->nlmsg_type) ==
  512. IPCTNL_MSG_CT_GET_CTRZERO) {
  513. struct nf_conn_counter *acct;
  514. acct = nf_conn_acct_find(ct);
  515. if (acct)
  516. memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
  517. }
  518. releasect:
  519. nf_ct_put(ct);
  520. }
  521. if (cb->args[1]) {
  522. cb->args[1] = 0;
  523. goto restart;
  524. }
  525. }
  526. out:
  527. rcu_read_unlock();
  528. if (last)
  529. nf_ct_put(last);
  530. return skb->len;
  531. }
  532. static inline int
  533. ctnetlink_parse_tuple_ip(struct nlattr *attr, struct nf_conntrack_tuple *tuple)
  534. {
  535. struct nlattr *tb[CTA_IP_MAX+1];
  536. struct nf_conntrack_l3proto *l3proto;
  537. int ret = 0;
  538. nla_parse_nested(tb, CTA_IP_MAX, attr, NULL);
  539. rcu_read_lock();
  540. l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
  541. if (likely(l3proto->nlattr_to_tuple)) {
  542. ret = nla_validate_nested(attr, CTA_IP_MAX,
  543. l3proto->nla_policy);
  544. if (ret == 0)
  545. ret = l3proto->nlattr_to_tuple(tb, tuple);
  546. }
  547. rcu_read_unlock();
  548. return ret;
  549. }
  550. static const struct nla_policy proto_nla_policy[CTA_PROTO_MAX+1] = {
  551. [CTA_PROTO_NUM] = { .type = NLA_U8 },
  552. };
  553. static inline int
  554. ctnetlink_parse_tuple_proto(struct nlattr *attr,
  555. struct nf_conntrack_tuple *tuple)
  556. {
  557. struct nlattr *tb[CTA_PROTO_MAX+1];
  558. struct nf_conntrack_l4proto *l4proto;
  559. int ret = 0;
  560. ret = nla_parse_nested(tb, CTA_PROTO_MAX, attr, proto_nla_policy);
  561. if (ret < 0)
  562. return ret;
  563. if (!tb[CTA_PROTO_NUM])
  564. return -EINVAL;
  565. tuple->dst.protonum = nla_get_u8(tb[CTA_PROTO_NUM]);
  566. rcu_read_lock();
  567. l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
  568. if (likely(l4proto->nlattr_to_tuple)) {
  569. ret = nla_validate_nested(attr, CTA_PROTO_MAX,
  570. l4proto->nla_policy);
  571. if (ret == 0)
  572. ret = l4proto->nlattr_to_tuple(tb, tuple);
  573. }
  574. rcu_read_unlock();
  575. return ret;
  576. }
  577. static int
  578. ctnetlink_parse_tuple(struct nlattr *cda[], struct nf_conntrack_tuple *tuple,
  579. enum ctattr_tuple type, u_int8_t l3num)
  580. {
  581. struct nlattr *tb[CTA_TUPLE_MAX+1];
  582. int err;
  583. memset(tuple, 0, sizeof(*tuple));
  584. nla_parse_nested(tb, CTA_TUPLE_MAX, cda[type], NULL);
  585. if (!tb[CTA_TUPLE_IP])
  586. return -EINVAL;
  587. tuple->src.l3num = l3num;
  588. err = ctnetlink_parse_tuple_ip(tb[CTA_TUPLE_IP], tuple);
  589. if (err < 0)
  590. return err;
  591. if (!tb[CTA_TUPLE_PROTO])
  592. return -EINVAL;
  593. err = ctnetlink_parse_tuple_proto(tb[CTA_TUPLE_PROTO], tuple);
  594. if (err < 0)
  595. return err;
  596. /* orig and expect tuples get DIR_ORIGINAL */
  597. if (type == CTA_TUPLE_REPLY)
  598. tuple->dst.dir = IP_CT_DIR_REPLY;
  599. else
  600. tuple->dst.dir = IP_CT_DIR_ORIGINAL;
  601. return 0;
  602. }
  603. static inline int
  604. ctnetlink_parse_help(struct nlattr *attr, char **helper_name)
  605. {
  606. struct nlattr *tb[CTA_HELP_MAX+1];
  607. nla_parse_nested(tb, CTA_HELP_MAX, attr, NULL);
  608. if (!tb[CTA_HELP_NAME])
  609. return -EINVAL;
  610. *helper_name = nla_data(tb[CTA_HELP_NAME]);
  611. return 0;
  612. }
  613. static const struct nla_policy ct_nla_policy[CTA_MAX+1] = {
  614. [CTA_STATUS] = { .type = NLA_U32 },
  615. [CTA_TIMEOUT] = { .type = NLA_U32 },
  616. [CTA_MARK] = { .type = NLA_U32 },
  617. [CTA_USE] = { .type = NLA_U32 },
  618. [CTA_ID] = { .type = NLA_U32 },
  619. };
  620. static int
  621. ctnetlink_del_conntrack(struct sock *ctnl, struct sk_buff *skb,
  622. struct nlmsghdr *nlh, struct nlattr *cda[])
  623. {
  624. struct nf_conntrack_tuple_hash *h;
  625. struct nf_conntrack_tuple tuple;
  626. struct nf_conn *ct;
  627. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  628. u_int8_t u3 = nfmsg->nfgen_family;
  629. int err = 0;
  630. if (cda[CTA_TUPLE_ORIG])
  631. err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
  632. else if (cda[CTA_TUPLE_REPLY])
  633. err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
  634. else {
  635. /* Flush the whole table */
  636. nf_conntrack_flush_report(&init_net,
  637. NETLINK_CB(skb).pid,
  638. nlmsg_report(nlh));
  639. return 0;
  640. }
  641. if (err < 0)
  642. return err;
  643. h = nf_conntrack_find_get(&init_net, &tuple);
  644. if (!h)
  645. return -ENOENT;
  646. ct = nf_ct_tuplehash_to_ctrack(h);
  647. if (cda[CTA_ID]) {
  648. u_int32_t id = ntohl(nla_get_be32(cda[CTA_ID]));
  649. if (id != (u32)(unsigned long)ct) {
  650. nf_ct_put(ct);
  651. return -ENOENT;
  652. }
  653. }
  654. if (nf_conntrack_event_report(IPCT_DESTROY, ct,
  655. NETLINK_CB(skb).pid,
  656. nlmsg_report(nlh)) < 0) {
  657. nf_ct_delete_from_lists(ct);
  658. /* we failed to report the event, try later */
  659. nf_ct_insert_dying_list(ct);
  660. nf_ct_put(ct);
  661. return 0;
  662. }
  663. /* death_by_timeout would report the event again */
  664. set_bit(IPS_DYING_BIT, &ct->status);
  665. nf_ct_kill(ct);
  666. nf_ct_put(ct);
  667. return 0;
  668. }
  669. static int
  670. ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
  671. struct nlmsghdr *nlh, struct nlattr *cda[])
  672. {
  673. struct nf_conntrack_tuple_hash *h;
  674. struct nf_conntrack_tuple tuple;
  675. struct nf_conn *ct;
  676. struct sk_buff *skb2 = NULL;
  677. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  678. u_int8_t u3 = nfmsg->nfgen_family;
  679. int err = 0;
  680. if (nlh->nlmsg_flags & NLM_F_DUMP)
  681. return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
  682. ctnetlink_done);
  683. if (cda[CTA_TUPLE_ORIG])
  684. err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_ORIG, u3);
  685. else if (cda[CTA_TUPLE_REPLY])
  686. err = ctnetlink_parse_tuple(cda, &tuple, CTA_TUPLE_REPLY, u3);
  687. else
  688. return -EINVAL;
  689. if (err < 0)
  690. return err;
  691. h = nf_conntrack_find_get(&init_net, &tuple);
  692. if (!h)
  693. return -ENOENT;
  694. ct = nf_ct_tuplehash_to_ctrack(h);
  695. err = -ENOMEM;
  696. skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  697. if (skb2 == NULL) {
  698. nf_ct_put(ct);
  699. return -ENOMEM;
  700. }
  701. rcu_read_lock();
  702. err = ctnetlink_fill_info(skb2, NETLINK_CB(skb).pid, nlh->nlmsg_seq,
  703. IPCTNL_MSG_CT_NEW, ct);
  704. rcu_read_unlock();
  705. nf_ct_put(ct);
  706. if (err <= 0)
  707. goto free;
  708. err = netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
  709. if (err < 0)
  710. goto out;
  711. return 0;
  712. free:
  713. kfree_skb(skb2);
  714. out:
  715. return err;
  716. }
  717. #ifdef CONFIG_NF_NAT_NEEDED
  718. static int
  719. ctnetlink_parse_nat_setup(struct nf_conn *ct,
  720. enum nf_nat_manip_type manip,
  721. struct nlattr *attr)
  722. {
  723. typeof(nfnetlink_parse_nat_setup_hook) parse_nat_setup;
  724. parse_nat_setup = rcu_dereference(nfnetlink_parse_nat_setup_hook);
  725. if (!parse_nat_setup) {
  726. #ifdef CONFIG_MODULES
  727. rcu_read_unlock();
  728. spin_unlock_bh(&nf_conntrack_lock);
  729. nfnl_unlock();
  730. if (request_module("nf-nat-ipv4") < 0) {
  731. nfnl_lock();
  732. spin_lock_bh(&nf_conntrack_lock);
  733. rcu_read_lock();
  734. return -EOPNOTSUPP;
  735. }
  736. nfnl_lock();
  737. spin_lock_bh(&nf_conntrack_lock);
  738. rcu_read_lock();
  739. if (nfnetlink_parse_nat_setup_hook)
  740. return -EAGAIN;
  741. #endif
  742. return -EOPNOTSUPP;
  743. }
  744. return parse_nat_setup(ct, manip, attr);
  745. }
  746. #endif
  747. static int
  748. ctnetlink_change_status(struct nf_conn *ct, struct nlattr *cda[])
  749. {
  750. unsigned long d;
  751. unsigned int status = ntohl(nla_get_be32(cda[CTA_STATUS]));
  752. d = ct->status ^ status;
  753. if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING))
  754. /* unchangeable */
  755. return -EBUSY;
  756. if (d & IPS_SEEN_REPLY && !(status & IPS_SEEN_REPLY))
  757. /* SEEN_REPLY bit can only be set */
  758. return -EBUSY;
  759. if (d & IPS_ASSURED && !(status & IPS_ASSURED))
  760. /* ASSURED bit can only be set */
  761. return -EBUSY;
  762. /* Be careful here, modifying NAT bits can screw up things,
  763. * so don't let users modify them directly if they don't pass
  764. * nf_nat_range. */
  765. ct->status |= status & ~(IPS_NAT_DONE_MASK | IPS_NAT_MASK);
  766. return 0;
  767. }
  768. static int
  769. ctnetlink_change_nat(struct nf_conn *ct, struct nlattr *cda[])
  770. {
  771. #ifdef CONFIG_NF_NAT_NEEDED
  772. int ret;
  773. if (cda[CTA_NAT_DST]) {
  774. ret = ctnetlink_parse_nat_setup(ct,
  775. IP_NAT_MANIP_DST,
  776. cda[CTA_NAT_DST]);
  777. if (ret < 0)
  778. return ret;
  779. }
  780. if (cda[CTA_NAT_SRC]) {
  781. ret = ctnetlink_parse_nat_setup(ct,
  782. IP_NAT_MANIP_SRC,
  783. cda[CTA_NAT_SRC]);
  784. if (ret < 0)
  785. return ret;
  786. }
  787. return 0;
  788. #else
  789. return -EOPNOTSUPP;
  790. #endif
  791. }
  792. static inline int
  793. ctnetlink_change_helper(struct nf_conn *ct, struct nlattr *cda[])
  794. {
  795. struct nf_conntrack_helper *helper;
  796. struct nf_conn_help *help = nfct_help(ct);
  797. char *helpname = NULL;
  798. int err;
  799. /* don't change helper of sibling connections */
  800. if (ct->master)
  801. return -EBUSY;
  802. err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
  803. if (err < 0)
  804. return err;
  805. if (!strcmp(helpname, "")) {
  806. if (help && help->helper) {
  807. /* we had a helper before ... */
  808. nf_ct_remove_expectations(ct);
  809. rcu_assign_pointer(help->helper, NULL);
  810. }
  811. return 0;
  812. }
  813. helper = __nf_conntrack_helper_find_byname(helpname);
  814. if (helper == NULL) {
  815. #ifdef CONFIG_MODULES
  816. spin_unlock_bh(&nf_conntrack_lock);
  817. if (request_module("nfct-helper-%s", helpname) < 0) {
  818. spin_lock_bh(&nf_conntrack_lock);
  819. return -EOPNOTSUPP;
  820. }
  821. spin_lock_bh(&nf_conntrack_lock);
  822. helper = __nf_conntrack_helper_find_byname(helpname);
  823. if (helper)
  824. return -EAGAIN;
  825. #endif
  826. return -EOPNOTSUPP;
  827. }
  828. if (help) {
  829. if (help->helper == helper)
  830. return 0;
  831. if (help->helper)
  832. return -EBUSY;
  833. /* need to zero data of old helper */
  834. memset(&help->help, 0, sizeof(help->help));
  835. } else {
  836. help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
  837. if (help == NULL)
  838. return -ENOMEM;
  839. }
  840. rcu_assign_pointer(help->helper, helper);
  841. return 0;
  842. }
  843. static inline int
  844. ctnetlink_change_timeout(struct nf_conn *ct, struct nlattr *cda[])
  845. {
  846. u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
  847. if (!del_timer(&ct->timeout))
  848. return -ETIME;
  849. ct->timeout.expires = jiffies + timeout * HZ;
  850. add_timer(&ct->timeout);
  851. return 0;
  852. }
  853. static inline int
  854. ctnetlink_change_protoinfo(struct nf_conn *ct, struct nlattr *cda[])
  855. {
  856. struct nlattr *tb[CTA_PROTOINFO_MAX+1], *attr = cda[CTA_PROTOINFO];
  857. struct nf_conntrack_l4proto *l4proto;
  858. int err = 0;
  859. nla_parse_nested(tb, CTA_PROTOINFO_MAX, attr, NULL);
  860. rcu_read_lock();
  861. l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
  862. if (l4proto->from_nlattr)
  863. err = l4proto->from_nlattr(tb, ct);
  864. rcu_read_unlock();
  865. return err;
  866. }
  867. #ifdef CONFIG_NF_NAT_NEEDED
  868. static inline int
  869. change_nat_seq_adj(struct nf_nat_seq *natseq, struct nlattr *attr)
  870. {
  871. struct nlattr *cda[CTA_NAT_SEQ_MAX+1];
  872. nla_parse_nested(cda, CTA_NAT_SEQ_MAX, attr, NULL);
  873. if (!cda[CTA_NAT_SEQ_CORRECTION_POS])
  874. return -EINVAL;
  875. natseq->correction_pos =
  876. ntohl(nla_get_be32(cda[CTA_NAT_SEQ_CORRECTION_POS]));
  877. if (!cda[CTA_NAT_SEQ_OFFSET_BEFORE])
  878. return -EINVAL;
  879. natseq->offset_before =
  880. ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_BEFORE]));
  881. if (!cda[CTA_NAT_SEQ_OFFSET_AFTER])
  882. return -EINVAL;
  883. natseq->offset_after =
  884. ntohl(nla_get_be32(cda[CTA_NAT_SEQ_OFFSET_AFTER]));
  885. return 0;
  886. }
  887. static int
  888. ctnetlink_change_nat_seq_adj(struct nf_conn *ct, struct nlattr *cda[])
  889. {
  890. int ret = 0;
  891. struct nf_conn_nat *nat = nfct_nat(ct);
  892. if (!nat)
  893. return 0;
  894. if (cda[CTA_NAT_SEQ_ADJ_ORIG]) {
  895. ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_ORIGINAL],
  896. cda[CTA_NAT_SEQ_ADJ_ORIG]);
  897. if (ret < 0)
  898. return ret;
  899. ct->status |= IPS_SEQ_ADJUST;
  900. }
  901. if (cda[CTA_NAT_SEQ_ADJ_REPLY]) {
  902. ret = change_nat_seq_adj(&nat->seq[IP_CT_DIR_REPLY],
  903. cda[CTA_NAT_SEQ_ADJ_REPLY]);
  904. if (ret < 0)
  905. return ret;
  906. ct->status |= IPS_SEQ_ADJUST;
  907. }
  908. return 0;
  909. }
  910. #endif
  911. static int
  912. ctnetlink_change_conntrack(struct nf_conn *ct, struct nlattr *cda[])
  913. {
  914. int err;
  915. /* only allow NAT changes and master assignation for new conntracks */
  916. if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST] || cda[CTA_TUPLE_MASTER])
  917. return -EOPNOTSUPP;
  918. if (cda[CTA_HELP]) {
  919. err = ctnetlink_change_helper(ct, cda);
  920. if (err < 0)
  921. return err;
  922. }
  923. if (cda[CTA_TIMEOUT]) {
  924. err = ctnetlink_change_timeout(ct, cda);
  925. if (err < 0)
  926. return err;
  927. }
  928. if (cda[CTA_STATUS]) {
  929. err = ctnetlink_change_status(ct, cda);
  930. if (err < 0)
  931. return err;
  932. }
  933. if (cda[CTA_PROTOINFO]) {
  934. err = ctnetlink_change_protoinfo(ct, cda);
  935. if (err < 0)
  936. return err;
  937. }
  938. #if defined(CONFIG_NF_CONNTRACK_MARK)
  939. if (cda[CTA_MARK])
  940. ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
  941. #endif
  942. #ifdef CONFIG_NF_NAT_NEEDED
  943. if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
  944. err = ctnetlink_change_nat_seq_adj(ct, cda);
  945. if (err < 0)
  946. return err;
  947. }
  948. #endif
  949. return 0;
  950. }
  951. static struct nf_conn *
  952. ctnetlink_create_conntrack(struct nlattr *cda[],
  953. struct nf_conntrack_tuple *otuple,
  954. struct nf_conntrack_tuple *rtuple,
  955. u8 u3)
  956. {
  957. struct nf_conn *ct;
  958. int err = -EINVAL;
  959. struct nf_conntrack_helper *helper;
  960. ct = nf_conntrack_alloc(&init_net, otuple, rtuple, GFP_ATOMIC);
  961. if (IS_ERR(ct))
  962. return ERR_PTR(-ENOMEM);
  963. if (!cda[CTA_TIMEOUT])
  964. goto err1;
  965. ct->timeout.expires = ntohl(nla_get_be32(cda[CTA_TIMEOUT]));
  966. ct->timeout.expires = jiffies + ct->timeout.expires * HZ;
  967. ct->status |= IPS_CONFIRMED;
  968. rcu_read_lock();
  969. if (cda[CTA_HELP]) {
  970. char *helpname = NULL;
  971. err = ctnetlink_parse_help(cda[CTA_HELP], &helpname);
  972. if (err < 0)
  973. goto err2;
  974. helper = __nf_conntrack_helper_find_byname(helpname);
  975. if (helper == NULL) {
  976. rcu_read_unlock();
  977. #ifdef CONFIG_MODULES
  978. if (request_module("nfct-helper-%s", helpname) < 0) {
  979. err = -EOPNOTSUPP;
  980. goto err1;
  981. }
  982. rcu_read_lock();
  983. helper = __nf_conntrack_helper_find_byname(helpname);
  984. if (helper) {
  985. err = -EAGAIN;
  986. goto err2;
  987. }
  988. rcu_read_unlock();
  989. #endif
  990. err = -EOPNOTSUPP;
  991. goto err1;
  992. } else {
  993. struct nf_conn_help *help;
  994. help = nf_ct_helper_ext_add(ct, GFP_ATOMIC);
  995. if (help == NULL) {
  996. err = -ENOMEM;
  997. goto err2;
  998. }
  999. /* not in hash table yet so not strictly necessary */
  1000. rcu_assign_pointer(help->helper, helper);
  1001. }
  1002. } else {
  1003. /* try an implicit helper assignation */
  1004. err = __nf_ct_try_assign_helper(ct, GFP_ATOMIC);
  1005. if (err < 0)
  1006. goto err2;
  1007. }
  1008. if (cda[CTA_STATUS]) {
  1009. err = ctnetlink_change_status(ct, cda);
  1010. if (err < 0)
  1011. goto err2;
  1012. }
  1013. if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
  1014. err = ctnetlink_change_nat(ct, cda);
  1015. if (err < 0)
  1016. goto err2;
  1017. }
  1018. #ifdef CONFIG_NF_NAT_NEEDED
  1019. if (cda[CTA_NAT_SEQ_ADJ_ORIG] || cda[CTA_NAT_SEQ_ADJ_REPLY]) {
  1020. err = ctnetlink_change_nat_seq_adj(ct, cda);
  1021. if (err < 0)
  1022. goto err2;
  1023. }
  1024. #endif
  1025. if (cda[CTA_PROTOINFO]) {
  1026. err = ctnetlink_change_protoinfo(ct, cda);
  1027. if (err < 0)
  1028. goto err2;
  1029. }
  1030. nf_ct_acct_ext_add(ct, GFP_ATOMIC);
  1031. nf_ct_ecache_ext_add(ct, GFP_ATOMIC);
  1032. #if defined(CONFIG_NF_CONNTRACK_MARK)
  1033. if (cda[CTA_MARK])
  1034. ct->mark = ntohl(nla_get_be32(cda[CTA_MARK]));
  1035. #endif
  1036. /* setup master conntrack: this is a confirmed expectation */
  1037. if (cda[CTA_TUPLE_MASTER]) {
  1038. struct nf_conntrack_tuple master;
  1039. struct nf_conntrack_tuple_hash *master_h;
  1040. struct nf_conn *master_ct;
  1041. err = ctnetlink_parse_tuple(cda, &master, CTA_TUPLE_MASTER, u3);
  1042. if (err < 0)
  1043. goto err2;
  1044. master_h = nf_conntrack_find_get(&init_net, &master);
  1045. if (master_h == NULL) {
  1046. err = -ENOENT;
  1047. goto err2;
  1048. }
  1049. master_ct = nf_ct_tuplehash_to_ctrack(master_h);
  1050. __set_bit(IPS_EXPECTED_BIT, &ct->status);
  1051. ct->master = master_ct;
  1052. }
  1053. add_timer(&ct->timeout);
  1054. nf_conntrack_hash_insert(ct);
  1055. rcu_read_unlock();
  1056. return ct;
  1057. err2:
  1058. rcu_read_unlock();
  1059. err1:
  1060. nf_conntrack_free(ct);
  1061. return ERR_PTR(err);
  1062. }
  1063. static int
  1064. ctnetlink_new_conntrack(struct sock *ctnl, struct sk_buff *skb,
  1065. struct nlmsghdr *nlh, struct nlattr *cda[])
  1066. {
  1067. struct nf_conntrack_tuple otuple, rtuple;
  1068. struct nf_conntrack_tuple_hash *h = NULL;
  1069. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  1070. u_int8_t u3 = nfmsg->nfgen_family;
  1071. int err = 0;
  1072. if (cda[CTA_TUPLE_ORIG]) {
  1073. err = ctnetlink_parse_tuple(cda, &otuple, CTA_TUPLE_ORIG, u3);
  1074. if (err < 0)
  1075. return err;
  1076. }
  1077. if (cda[CTA_TUPLE_REPLY]) {
  1078. err = ctnetlink_parse_tuple(cda, &rtuple, CTA_TUPLE_REPLY, u3);
  1079. if (err < 0)
  1080. return err;
  1081. }
  1082. spin_lock_bh(&nf_conntrack_lock);
  1083. if (cda[CTA_TUPLE_ORIG])
  1084. h = __nf_conntrack_find(&init_net, &otuple);
  1085. else if (cda[CTA_TUPLE_REPLY])
  1086. h = __nf_conntrack_find(&init_net, &rtuple);
  1087. if (h == NULL) {
  1088. err = -ENOENT;
  1089. if (nlh->nlmsg_flags & NLM_F_CREATE) {
  1090. struct nf_conn *ct;
  1091. enum ip_conntrack_events events;
  1092. ct = ctnetlink_create_conntrack(cda, &otuple,
  1093. &rtuple, u3);
  1094. if (IS_ERR(ct)) {
  1095. err = PTR_ERR(ct);
  1096. goto out_unlock;
  1097. }
  1098. err = 0;
  1099. nf_conntrack_get(&ct->ct_general);
  1100. spin_unlock_bh(&nf_conntrack_lock);
  1101. if (test_bit(IPS_EXPECTED_BIT, &ct->status))
  1102. events = IPCT_RELATED;
  1103. else
  1104. events = IPCT_NEW;
  1105. nf_conntrack_eventmask_report((1 << IPCT_STATUS) |
  1106. (1 << IPCT_HELPER) |
  1107. (1 << IPCT_PROTOINFO) |
  1108. (1 << IPCT_NATSEQADJ) |
  1109. (1 << IPCT_MARK) | events,
  1110. ct, NETLINK_CB(skb).pid,
  1111. nlmsg_report(nlh));
  1112. nf_ct_put(ct);
  1113. } else
  1114. spin_unlock_bh(&nf_conntrack_lock);
  1115. return err;
  1116. }
  1117. /* implicit 'else' */
  1118. /* We manipulate the conntrack inside the global conntrack table lock,
  1119. * so there's no need to increase the refcount */
  1120. err = -EEXIST;
  1121. if (!(nlh->nlmsg_flags & NLM_F_EXCL)) {
  1122. struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
  1123. err = ctnetlink_change_conntrack(ct, cda);
  1124. if (err == 0) {
  1125. nf_conntrack_get(&ct->ct_general);
  1126. spin_unlock_bh(&nf_conntrack_lock);
  1127. nf_conntrack_eventmask_report((1 << IPCT_STATUS) |
  1128. (1 << IPCT_HELPER) |
  1129. (1 << IPCT_PROTOINFO) |
  1130. (1 << IPCT_NATSEQADJ) |
  1131. (1 << IPCT_MARK),
  1132. ct, NETLINK_CB(skb).pid,
  1133. nlmsg_report(nlh));
  1134. nf_ct_put(ct);
  1135. } else
  1136. spin_unlock_bh(&nf_conntrack_lock);
  1137. return err;
  1138. }
  1139. out_unlock:
  1140. spin_unlock_bh(&nf_conntrack_lock);
  1141. return err;
  1142. }
  1143. /***********************************************************************
  1144. * EXPECT
  1145. ***********************************************************************/
  1146. static inline int
  1147. ctnetlink_exp_dump_tuple(struct sk_buff *skb,
  1148. const struct nf_conntrack_tuple *tuple,
  1149. enum ctattr_expect type)
  1150. {
  1151. struct nlattr *nest_parms;
  1152. nest_parms = nla_nest_start(skb, type | NLA_F_NESTED);
  1153. if (!nest_parms)
  1154. goto nla_put_failure;
  1155. if (ctnetlink_dump_tuples(skb, tuple) < 0)
  1156. goto nla_put_failure;
  1157. nla_nest_end(skb, nest_parms);
  1158. return 0;
  1159. nla_put_failure:
  1160. return -1;
  1161. }
  1162. static inline int
  1163. ctnetlink_exp_dump_mask(struct sk_buff *skb,
  1164. const struct nf_conntrack_tuple *tuple,
  1165. const struct nf_conntrack_tuple_mask *mask)
  1166. {
  1167. int ret;
  1168. struct nf_conntrack_l3proto *l3proto;
  1169. struct nf_conntrack_l4proto *l4proto;
  1170. struct nf_conntrack_tuple m;
  1171. struct nlattr *nest_parms;
  1172. memset(&m, 0xFF, sizeof(m));
  1173. m.src.u.all = mask->src.u.all;
  1174. memcpy(&m.src.u3, &mask->src.u3, sizeof(m.src.u3));
  1175. nest_parms = nla_nest_start(skb, CTA_EXPECT_MASK | NLA_F_NESTED);
  1176. if (!nest_parms)
  1177. goto nla_put_failure;
  1178. l3proto = __nf_ct_l3proto_find(tuple->src.l3num);
  1179. ret = ctnetlink_dump_tuples_ip(skb, &m, l3proto);
  1180. if (unlikely(ret < 0))
  1181. goto nla_put_failure;
  1182. l4proto = __nf_ct_l4proto_find(tuple->src.l3num, tuple->dst.protonum);
  1183. ret = ctnetlink_dump_tuples_proto(skb, &m, l4proto);
  1184. if (unlikely(ret < 0))
  1185. goto nla_put_failure;
  1186. nla_nest_end(skb, nest_parms);
  1187. return 0;
  1188. nla_put_failure:
  1189. return -1;
  1190. }
  1191. static int
  1192. ctnetlink_exp_dump_expect(struct sk_buff *skb,
  1193. const struct nf_conntrack_expect *exp)
  1194. {
  1195. struct nf_conn *master = exp->master;
  1196. long timeout = (exp->timeout.expires - jiffies) / HZ;
  1197. if (timeout < 0)
  1198. timeout = 0;
  1199. if (ctnetlink_exp_dump_tuple(skb, &exp->tuple, CTA_EXPECT_TUPLE) < 0)
  1200. goto nla_put_failure;
  1201. if (ctnetlink_exp_dump_mask(skb, &exp->tuple, &exp->mask) < 0)
  1202. goto nla_put_failure;
  1203. if (ctnetlink_exp_dump_tuple(skb,
  1204. &master->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
  1205. CTA_EXPECT_MASTER) < 0)
  1206. goto nla_put_failure;
  1207. NLA_PUT_BE32(skb, CTA_EXPECT_TIMEOUT, htonl(timeout));
  1208. NLA_PUT_BE32(skb, CTA_EXPECT_ID, htonl((unsigned long)exp));
  1209. return 0;
  1210. nla_put_failure:
  1211. return -1;
  1212. }
  1213. static int
  1214. ctnetlink_exp_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
  1215. int event, const struct nf_conntrack_expect *exp)
  1216. {
  1217. struct nlmsghdr *nlh;
  1218. struct nfgenmsg *nfmsg;
  1219. unsigned int flags = pid ? NLM_F_MULTI : 0;
  1220. event |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
  1221. nlh = nlmsg_put(skb, pid, seq, event, sizeof(*nfmsg), flags);
  1222. if (nlh == NULL)
  1223. goto nlmsg_failure;
  1224. nfmsg = nlmsg_data(nlh);
  1225. nfmsg->nfgen_family = exp->tuple.src.l3num;
  1226. nfmsg->version = NFNETLINK_V0;
  1227. nfmsg->res_id = 0;
  1228. if (ctnetlink_exp_dump_expect(skb, exp) < 0)
  1229. goto nla_put_failure;
  1230. nlmsg_end(skb, nlh);
  1231. return skb->len;
  1232. nlmsg_failure:
  1233. nla_put_failure:
  1234. nlmsg_cancel(skb, nlh);
  1235. return -1;
  1236. }
  1237. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  1238. static int
  1239. ctnetlink_expect_event(unsigned int events, struct nf_exp_event *item)
  1240. {
  1241. struct nlmsghdr *nlh;
  1242. struct nfgenmsg *nfmsg;
  1243. struct nf_conntrack_expect *exp = item->exp;
  1244. struct sk_buff *skb;
  1245. unsigned int type;
  1246. int flags = 0;
  1247. if (events & (1 << IPEXP_NEW)) {
  1248. type = IPCTNL_MSG_EXP_NEW;
  1249. flags = NLM_F_CREATE|NLM_F_EXCL;
  1250. } else
  1251. return 0;
  1252. if (!item->report &&
  1253. !nfnetlink_has_listeners(NFNLGRP_CONNTRACK_EXP_NEW))
  1254. return 0;
  1255. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
  1256. if (skb == NULL)
  1257. goto errout;
  1258. type |= NFNL_SUBSYS_CTNETLINK_EXP << 8;
  1259. nlh = nlmsg_put(skb, item->pid, 0, type, sizeof(*nfmsg), flags);
  1260. if (nlh == NULL)
  1261. goto nlmsg_failure;
  1262. nfmsg = nlmsg_data(nlh);
  1263. nfmsg->nfgen_family = exp->tuple.src.l3num;
  1264. nfmsg->version = NFNETLINK_V0;
  1265. nfmsg->res_id = 0;
  1266. rcu_read_lock();
  1267. if (ctnetlink_exp_dump_expect(skb, exp) < 0)
  1268. goto nla_put_failure;
  1269. rcu_read_unlock();
  1270. nlmsg_end(skb, nlh);
  1271. nfnetlink_send(skb, item->pid, NFNLGRP_CONNTRACK_EXP_NEW,
  1272. item->report, GFP_ATOMIC);
  1273. return 0;
  1274. nla_put_failure:
  1275. rcu_read_unlock();
  1276. nlmsg_cancel(skb, nlh);
  1277. nlmsg_failure:
  1278. kfree_skb(skb);
  1279. errout:
  1280. nfnetlink_set_err(0, 0, -ENOBUFS);
  1281. return 0;
  1282. }
  1283. #endif
  1284. static int ctnetlink_exp_done(struct netlink_callback *cb)
  1285. {
  1286. if (cb->args[1])
  1287. nf_ct_expect_put((struct nf_conntrack_expect *)cb->args[1]);
  1288. return 0;
  1289. }
  1290. static int
  1291. ctnetlink_exp_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
  1292. {
  1293. struct net *net = &init_net;
  1294. struct nf_conntrack_expect *exp, *last;
  1295. struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
  1296. struct hlist_node *n;
  1297. u_int8_t l3proto = nfmsg->nfgen_family;
  1298. rcu_read_lock();
  1299. last = (struct nf_conntrack_expect *)cb->args[1];
  1300. for (; cb->args[0] < nf_ct_expect_hsize; cb->args[0]++) {
  1301. restart:
  1302. hlist_for_each_entry(exp, n, &net->ct.expect_hash[cb->args[0]],
  1303. hnode) {
  1304. if (l3proto && exp->tuple.src.l3num != l3proto)
  1305. continue;
  1306. if (cb->args[1]) {
  1307. if (exp != last)
  1308. continue;
  1309. cb->args[1] = 0;
  1310. }
  1311. if (ctnetlink_exp_fill_info(skb,
  1312. NETLINK_CB(cb->skb).pid,
  1313. cb->nlh->nlmsg_seq,
  1314. IPCTNL_MSG_EXP_NEW,
  1315. exp) < 0) {
  1316. if (!atomic_inc_not_zero(&exp->use))
  1317. continue;
  1318. cb->args[1] = (unsigned long)exp;
  1319. goto out;
  1320. }
  1321. }
  1322. if (cb->args[1]) {
  1323. cb->args[1] = 0;
  1324. goto restart;
  1325. }
  1326. }
  1327. out:
  1328. rcu_read_unlock();
  1329. if (last)
  1330. nf_ct_expect_put(last);
  1331. return skb->len;
  1332. }
  1333. static const struct nla_policy exp_nla_policy[CTA_EXPECT_MAX+1] = {
  1334. [CTA_EXPECT_TIMEOUT] = { .type = NLA_U32 },
  1335. [CTA_EXPECT_ID] = { .type = NLA_U32 },
  1336. };
  1337. static int
  1338. ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
  1339. struct nlmsghdr *nlh, struct nlattr *cda[])
  1340. {
  1341. struct nf_conntrack_tuple tuple;
  1342. struct nf_conntrack_expect *exp;
  1343. struct sk_buff *skb2;
  1344. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  1345. u_int8_t u3 = nfmsg->nfgen_family;
  1346. int err = 0;
  1347. if (nlh->nlmsg_flags & NLM_F_DUMP) {
  1348. return netlink_dump_start(ctnl, skb, nlh,
  1349. ctnetlink_exp_dump_table,
  1350. ctnetlink_exp_done);
  1351. }
  1352. if (cda[CTA_EXPECT_MASTER])
  1353. err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_MASTER, u3);
  1354. else
  1355. return -EINVAL;
  1356. if (err < 0)
  1357. return err;
  1358. exp = nf_ct_expect_find_get(&init_net, &tuple);
  1359. if (!exp)
  1360. return -ENOENT;
  1361. if (cda[CTA_EXPECT_ID]) {
  1362. __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
  1363. if (ntohl(id) != (u32)(unsigned long)exp) {
  1364. nf_ct_expect_put(exp);
  1365. return -ENOENT;
  1366. }
  1367. }
  1368. err = -ENOMEM;
  1369. skb2 = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1370. if (skb2 == NULL)
  1371. goto out;
  1372. rcu_read_lock();
  1373. err = ctnetlink_exp_fill_info(skb2, NETLINK_CB(skb).pid,
  1374. nlh->nlmsg_seq, IPCTNL_MSG_EXP_NEW, exp);
  1375. rcu_read_unlock();
  1376. if (err <= 0)
  1377. goto free;
  1378. nf_ct_expect_put(exp);
  1379. return netlink_unicast(ctnl, skb2, NETLINK_CB(skb).pid, MSG_DONTWAIT);
  1380. free:
  1381. kfree_skb(skb2);
  1382. out:
  1383. nf_ct_expect_put(exp);
  1384. return err;
  1385. }
  1386. static int
  1387. ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
  1388. struct nlmsghdr *nlh, struct nlattr *cda[])
  1389. {
  1390. struct nf_conntrack_expect *exp;
  1391. struct nf_conntrack_tuple tuple;
  1392. struct nf_conntrack_helper *h;
  1393. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  1394. struct hlist_node *n, *next;
  1395. u_int8_t u3 = nfmsg->nfgen_family;
  1396. unsigned int i;
  1397. int err;
  1398. if (cda[CTA_EXPECT_TUPLE]) {
  1399. /* delete a single expect by tuple */
  1400. err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
  1401. if (err < 0)
  1402. return err;
  1403. /* bump usage count to 2 */
  1404. exp = nf_ct_expect_find_get(&init_net, &tuple);
  1405. if (!exp)
  1406. return -ENOENT;
  1407. if (cda[CTA_EXPECT_ID]) {
  1408. __be32 id = nla_get_be32(cda[CTA_EXPECT_ID]);
  1409. if (ntohl(id) != (u32)(unsigned long)exp) {
  1410. nf_ct_expect_put(exp);
  1411. return -ENOENT;
  1412. }
  1413. }
  1414. /* after list removal, usage count == 1 */
  1415. nf_ct_unexpect_related(exp);
  1416. /* have to put what we 'get' above.
  1417. * after this line usage count == 0 */
  1418. nf_ct_expect_put(exp);
  1419. } else if (cda[CTA_EXPECT_HELP_NAME]) {
  1420. char *name = nla_data(cda[CTA_EXPECT_HELP_NAME]);
  1421. struct nf_conn_help *m_help;
  1422. /* delete all expectations for this helper */
  1423. spin_lock_bh(&nf_conntrack_lock);
  1424. h = __nf_conntrack_helper_find_byname(name);
  1425. if (!h) {
  1426. spin_unlock_bh(&nf_conntrack_lock);
  1427. return -EOPNOTSUPP;
  1428. }
  1429. for (i = 0; i < nf_ct_expect_hsize; i++) {
  1430. hlist_for_each_entry_safe(exp, n, next,
  1431. &init_net.ct.expect_hash[i],
  1432. hnode) {
  1433. m_help = nfct_help(exp->master);
  1434. if (m_help->helper == h
  1435. && del_timer(&exp->timeout)) {
  1436. nf_ct_unlink_expect(exp);
  1437. nf_ct_expect_put(exp);
  1438. }
  1439. }
  1440. }
  1441. spin_unlock_bh(&nf_conntrack_lock);
  1442. } else {
  1443. /* This basically means we have to flush everything*/
  1444. spin_lock_bh(&nf_conntrack_lock);
  1445. for (i = 0; i < nf_ct_expect_hsize; i++) {
  1446. hlist_for_each_entry_safe(exp, n, next,
  1447. &init_net.ct.expect_hash[i],
  1448. hnode) {
  1449. if (del_timer(&exp->timeout)) {
  1450. nf_ct_unlink_expect(exp);
  1451. nf_ct_expect_put(exp);
  1452. }
  1453. }
  1454. }
  1455. spin_unlock_bh(&nf_conntrack_lock);
  1456. }
  1457. return 0;
  1458. }
  1459. static int
  1460. ctnetlink_change_expect(struct nf_conntrack_expect *x, struct nlattr *cda[])
  1461. {
  1462. return -EOPNOTSUPP;
  1463. }
  1464. static int
  1465. ctnetlink_create_expect(struct nlattr *cda[], u_int8_t u3, u32 pid, int report)
  1466. {
  1467. struct nf_conntrack_tuple tuple, mask, master_tuple;
  1468. struct nf_conntrack_tuple_hash *h = NULL;
  1469. struct nf_conntrack_expect *exp;
  1470. struct nf_conn *ct;
  1471. struct nf_conn_help *help;
  1472. int err = 0;
  1473. /* caller guarantees that those three CTA_EXPECT_* exist */
  1474. err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
  1475. if (err < 0)
  1476. return err;
  1477. err = ctnetlink_parse_tuple(cda, &mask, CTA_EXPECT_MASK, u3);
  1478. if (err < 0)
  1479. return err;
  1480. err = ctnetlink_parse_tuple(cda, &master_tuple, CTA_EXPECT_MASTER, u3);
  1481. if (err < 0)
  1482. return err;
  1483. /* Look for master conntrack of this expectation */
  1484. h = nf_conntrack_find_get(&init_net, &master_tuple);
  1485. if (!h)
  1486. return -ENOENT;
  1487. ct = nf_ct_tuplehash_to_ctrack(h);
  1488. help = nfct_help(ct);
  1489. if (!help || !help->helper) {
  1490. /* such conntrack hasn't got any helper, abort */
  1491. err = -EOPNOTSUPP;
  1492. goto out;
  1493. }
  1494. exp = nf_ct_expect_alloc(ct);
  1495. if (!exp) {
  1496. err = -ENOMEM;
  1497. goto out;
  1498. }
  1499. exp->class = 0;
  1500. exp->expectfn = NULL;
  1501. exp->flags = 0;
  1502. exp->master = ct;
  1503. exp->helper = NULL;
  1504. memcpy(&exp->tuple, &tuple, sizeof(struct nf_conntrack_tuple));
  1505. memcpy(&exp->mask.src.u3, &mask.src.u3, sizeof(exp->mask.src.u3));
  1506. exp->mask.src.u.all = mask.src.u.all;
  1507. err = nf_ct_expect_related_report(exp, pid, report);
  1508. nf_ct_expect_put(exp);
  1509. out:
  1510. nf_ct_put(nf_ct_tuplehash_to_ctrack(h));
  1511. return err;
  1512. }
  1513. static int
  1514. ctnetlink_new_expect(struct sock *ctnl, struct sk_buff *skb,
  1515. struct nlmsghdr *nlh, struct nlattr *cda[])
  1516. {
  1517. struct nf_conntrack_tuple tuple;
  1518. struct nf_conntrack_expect *exp;
  1519. struct nfgenmsg *nfmsg = nlmsg_data(nlh);
  1520. u_int8_t u3 = nfmsg->nfgen_family;
  1521. int err = 0;
  1522. if (!cda[CTA_EXPECT_TUPLE]
  1523. || !cda[CTA_EXPECT_MASK]
  1524. || !cda[CTA_EXPECT_MASTER])
  1525. return -EINVAL;
  1526. err = ctnetlink_parse_tuple(cda, &tuple, CTA_EXPECT_TUPLE, u3);
  1527. if (err < 0)
  1528. return err;
  1529. spin_lock_bh(&nf_conntrack_lock);
  1530. exp = __nf_ct_expect_find(&init_net, &tuple);
  1531. if (!exp) {
  1532. spin_unlock_bh(&nf_conntrack_lock);
  1533. err = -ENOENT;
  1534. if (nlh->nlmsg_flags & NLM_F_CREATE) {
  1535. err = ctnetlink_create_expect(cda,
  1536. u3,
  1537. NETLINK_CB(skb).pid,
  1538. nlmsg_report(nlh));
  1539. }
  1540. return err;
  1541. }
  1542. err = -EEXIST;
  1543. if (!(nlh->nlmsg_flags & NLM_F_EXCL))
  1544. err = ctnetlink_change_expect(exp, cda);
  1545. spin_unlock_bh(&nf_conntrack_lock);
  1546. return err;
  1547. }
  1548. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  1549. static struct nf_ct_event_notifier ctnl_notifier = {
  1550. .fcn = ctnetlink_conntrack_event,
  1551. };
  1552. static struct nf_exp_event_notifier ctnl_notifier_exp = {
  1553. .fcn = ctnetlink_expect_event,
  1554. };
  1555. #endif
  1556. static const struct nfnl_callback ctnl_cb[IPCTNL_MSG_MAX] = {
  1557. [IPCTNL_MSG_CT_NEW] = { .call = ctnetlink_new_conntrack,
  1558. .attr_count = CTA_MAX,
  1559. .policy = ct_nla_policy },
  1560. [IPCTNL_MSG_CT_GET] = { .call = ctnetlink_get_conntrack,
  1561. .attr_count = CTA_MAX,
  1562. .policy = ct_nla_policy },
  1563. [IPCTNL_MSG_CT_DELETE] = { .call = ctnetlink_del_conntrack,
  1564. .attr_count = CTA_MAX,
  1565. .policy = ct_nla_policy },
  1566. [IPCTNL_MSG_CT_GET_CTRZERO] = { .call = ctnetlink_get_conntrack,
  1567. .attr_count = CTA_MAX,
  1568. .policy = ct_nla_policy },
  1569. };
  1570. static const struct nfnl_callback ctnl_exp_cb[IPCTNL_MSG_EXP_MAX] = {
  1571. [IPCTNL_MSG_EXP_GET] = { .call = ctnetlink_get_expect,
  1572. .attr_count = CTA_EXPECT_MAX,
  1573. .policy = exp_nla_policy },
  1574. [IPCTNL_MSG_EXP_NEW] = { .call = ctnetlink_new_expect,
  1575. .attr_count = CTA_EXPECT_MAX,
  1576. .policy = exp_nla_policy },
  1577. [IPCTNL_MSG_EXP_DELETE] = { .call = ctnetlink_del_expect,
  1578. .attr_count = CTA_EXPECT_MAX,
  1579. .policy = exp_nla_policy },
  1580. };
  1581. static const struct nfnetlink_subsystem ctnl_subsys = {
  1582. .name = "conntrack",
  1583. .subsys_id = NFNL_SUBSYS_CTNETLINK,
  1584. .cb_count = IPCTNL_MSG_MAX,
  1585. .cb = ctnl_cb,
  1586. };
  1587. static const struct nfnetlink_subsystem ctnl_exp_subsys = {
  1588. .name = "conntrack_expect",
  1589. .subsys_id = NFNL_SUBSYS_CTNETLINK_EXP,
  1590. .cb_count = IPCTNL_MSG_EXP_MAX,
  1591. .cb = ctnl_exp_cb,
  1592. };
  1593. MODULE_ALIAS("ip_conntrack_netlink");
  1594. MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
  1595. MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
  1596. static int __init ctnetlink_init(void)
  1597. {
  1598. int ret;
  1599. printk("ctnetlink v%s: registering with nfnetlink.\n", version);
  1600. ret = nfnetlink_subsys_register(&ctnl_subsys);
  1601. if (ret < 0) {
  1602. printk("ctnetlink_init: cannot register with nfnetlink.\n");
  1603. goto err_out;
  1604. }
  1605. ret = nfnetlink_subsys_register(&ctnl_exp_subsys);
  1606. if (ret < 0) {
  1607. printk("ctnetlink_init: cannot register exp with nfnetlink.\n");
  1608. goto err_unreg_subsys;
  1609. }
  1610. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  1611. ret = nf_conntrack_register_notifier(&ctnl_notifier);
  1612. if (ret < 0) {
  1613. printk("ctnetlink_init: cannot register notifier.\n");
  1614. goto err_unreg_exp_subsys;
  1615. }
  1616. ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
  1617. if (ret < 0) {
  1618. printk("ctnetlink_init: cannot expect register notifier.\n");
  1619. goto err_unreg_notifier;
  1620. }
  1621. #endif
  1622. return 0;
  1623. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  1624. err_unreg_notifier:
  1625. nf_conntrack_unregister_notifier(&ctnl_notifier);
  1626. err_unreg_exp_subsys:
  1627. nfnetlink_subsys_unregister(&ctnl_exp_subsys);
  1628. #endif
  1629. err_unreg_subsys:
  1630. nfnetlink_subsys_unregister(&ctnl_subsys);
  1631. err_out:
  1632. return ret;
  1633. }
  1634. static void __exit ctnetlink_exit(void)
  1635. {
  1636. printk("ctnetlink: unregistering from nfnetlink.\n");
  1637. #ifdef CONFIG_NF_CONNTRACK_EVENTS
  1638. nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
  1639. nf_conntrack_unregister_notifier(&ctnl_notifier);
  1640. #endif
  1641. nfnetlink_subsys_unregister(&ctnl_exp_subsys);
  1642. nfnetlink_subsys_unregister(&ctnl_subsys);
  1643. return;
  1644. }
  1645. module_init(ctnetlink_init);
  1646. module_exit(ctnetlink_exit);