/net/bridge/br_fdb.c

http://github.com/mirrors/linux · C · 1208 lines · 931 code · 185 blank · 92 comment · 205 complexity · e2b9bb89280007da717b26da203f40ed MD5 · raw file

  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Forwarding database
  4. * Linux ethernet bridge
  5. *
  6. * Authors:
  7. * Lennert Buytenhek <buytenh@gnu.org>
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/init.h>
  11. #include <linux/rculist.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/times.h>
  14. #include <linux/netdevice.h>
  15. #include <linux/etherdevice.h>
  16. #include <linux/jhash.h>
  17. #include <linux/random.h>
  18. #include <linux/slab.h>
  19. #include <linux/atomic.h>
  20. #include <asm/unaligned.h>
  21. #include <linux/if_vlan.h>
  22. #include <net/switchdev.h>
  23. #include <trace/events/bridge.h>
  24. #include "br_private.h"
  25. static const struct rhashtable_params br_fdb_rht_params = {
  26. .head_offset = offsetof(struct net_bridge_fdb_entry, rhnode),
  27. .key_offset = offsetof(struct net_bridge_fdb_entry, key),
  28. .key_len = sizeof(struct net_bridge_fdb_key),
  29. .automatic_shrinking = true,
  30. };
  31. static struct kmem_cache *br_fdb_cache __read_mostly;
  32. static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
  33. const unsigned char *addr, u16 vid);
  34. static void fdb_notify(struct net_bridge *br,
  35. const struct net_bridge_fdb_entry *, int, bool);
  36. int __init br_fdb_init(void)
  37. {
  38. br_fdb_cache = kmem_cache_create("bridge_fdb_cache",
  39. sizeof(struct net_bridge_fdb_entry),
  40. 0,
  41. SLAB_HWCACHE_ALIGN, NULL);
  42. if (!br_fdb_cache)
  43. return -ENOMEM;
  44. return 0;
  45. }
  46. void br_fdb_fini(void)
  47. {
  48. kmem_cache_destroy(br_fdb_cache);
  49. }
  50. int br_fdb_hash_init(struct net_bridge *br)
  51. {
  52. return rhashtable_init(&br->fdb_hash_tbl, &br_fdb_rht_params);
  53. }
  54. void br_fdb_hash_fini(struct net_bridge *br)
  55. {
  56. rhashtable_destroy(&br->fdb_hash_tbl);
  57. }
  58. /* if topology_changing then use forward_delay (default 15 sec)
  59. * otherwise keep longer (default 5 minutes)
  60. */
  61. static inline unsigned long hold_time(const struct net_bridge *br)
  62. {
  63. return br->topology_change ? br->forward_delay : br->ageing_time;
  64. }
  65. static inline int has_expired(const struct net_bridge *br,
  66. const struct net_bridge_fdb_entry *fdb)
  67. {
  68. return !test_bit(BR_FDB_STATIC, &fdb->flags) &&
  69. !test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags) &&
  70. time_before_eq(fdb->updated + hold_time(br), jiffies);
  71. }
  72. static void fdb_rcu_free(struct rcu_head *head)
  73. {
  74. struct net_bridge_fdb_entry *ent
  75. = container_of(head, struct net_bridge_fdb_entry, rcu);
  76. kmem_cache_free(br_fdb_cache, ent);
  77. }
  78. static struct net_bridge_fdb_entry *fdb_find_rcu(struct rhashtable *tbl,
  79. const unsigned char *addr,
  80. __u16 vid)
  81. {
  82. struct net_bridge_fdb_key key;
  83. WARN_ON_ONCE(!rcu_read_lock_held());
  84. key.vlan_id = vid;
  85. memcpy(key.addr.addr, addr, sizeof(key.addr.addr));
  86. return rhashtable_lookup(tbl, &key, br_fdb_rht_params);
  87. }
  88. /* requires bridge hash_lock */
  89. static struct net_bridge_fdb_entry *br_fdb_find(struct net_bridge *br,
  90. const unsigned char *addr,
  91. __u16 vid)
  92. {
  93. struct net_bridge_fdb_entry *fdb;
  94. lockdep_assert_held_once(&br->hash_lock);
  95. rcu_read_lock();
  96. fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  97. rcu_read_unlock();
  98. return fdb;
  99. }
  100. struct net_device *br_fdb_find_port(const struct net_device *br_dev,
  101. const unsigned char *addr,
  102. __u16 vid)
  103. {
  104. struct net_bridge_fdb_entry *f;
  105. struct net_device *dev = NULL;
  106. struct net_bridge *br;
  107. ASSERT_RTNL();
  108. if (!netif_is_bridge_master(br_dev))
  109. return NULL;
  110. br = netdev_priv(br_dev);
  111. rcu_read_lock();
  112. f = br_fdb_find_rcu(br, addr, vid);
  113. if (f && f->dst)
  114. dev = f->dst->dev;
  115. rcu_read_unlock();
  116. return dev;
  117. }
  118. EXPORT_SYMBOL_GPL(br_fdb_find_port);
  119. struct net_bridge_fdb_entry *br_fdb_find_rcu(struct net_bridge *br,
  120. const unsigned char *addr,
  121. __u16 vid)
  122. {
  123. return fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  124. }
  125. /* When a static FDB entry is added, the mac address from the entry is
  126. * added to the bridge private HW address list and all required ports
  127. * are then updated with the new information.
  128. * Called under RTNL.
  129. */
  130. static void fdb_add_hw_addr(struct net_bridge *br, const unsigned char *addr)
  131. {
  132. int err;
  133. struct net_bridge_port *p;
  134. ASSERT_RTNL();
  135. list_for_each_entry(p, &br->port_list, list) {
  136. if (!br_promisc_port(p)) {
  137. err = dev_uc_add(p->dev, addr);
  138. if (err)
  139. goto undo;
  140. }
  141. }
  142. return;
  143. undo:
  144. list_for_each_entry_continue_reverse(p, &br->port_list, list) {
  145. if (!br_promisc_port(p))
  146. dev_uc_del(p->dev, addr);
  147. }
  148. }
  149. /* When a static FDB entry is deleted, the HW address from that entry is
  150. * also removed from the bridge private HW address list and updates all
  151. * the ports with needed information.
  152. * Called under RTNL.
  153. */
  154. static void fdb_del_hw_addr(struct net_bridge *br, const unsigned char *addr)
  155. {
  156. struct net_bridge_port *p;
  157. ASSERT_RTNL();
  158. list_for_each_entry(p, &br->port_list, list) {
  159. if (!br_promisc_port(p))
  160. dev_uc_del(p->dev, addr);
  161. }
  162. }
  163. static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f,
  164. bool swdev_notify)
  165. {
  166. trace_fdb_delete(br, f);
  167. if (test_bit(BR_FDB_STATIC, &f->flags))
  168. fdb_del_hw_addr(br, f->key.addr.addr);
  169. hlist_del_init_rcu(&f->fdb_node);
  170. rhashtable_remove_fast(&br->fdb_hash_tbl, &f->rhnode,
  171. br_fdb_rht_params);
  172. fdb_notify(br, f, RTM_DELNEIGH, swdev_notify);
  173. call_rcu(&f->rcu, fdb_rcu_free);
  174. }
  175. /* Delete a local entry if no other port had the same address. */
  176. static void fdb_delete_local(struct net_bridge *br,
  177. const struct net_bridge_port *p,
  178. struct net_bridge_fdb_entry *f)
  179. {
  180. const unsigned char *addr = f->key.addr.addr;
  181. struct net_bridge_vlan_group *vg;
  182. const struct net_bridge_vlan *v;
  183. struct net_bridge_port *op;
  184. u16 vid = f->key.vlan_id;
  185. /* Maybe another port has same hw addr? */
  186. list_for_each_entry(op, &br->port_list, list) {
  187. vg = nbp_vlan_group(op);
  188. if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
  189. (!vid || br_vlan_find(vg, vid))) {
  190. f->dst = op;
  191. clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
  192. return;
  193. }
  194. }
  195. vg = br_vlan_group(br);
  196. v = br_vlan_find(vg, vid);
  197. /* Maybe bridge device has same hw addr? */
  198. if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
  199. (!vid || (v && br_vlan_should_use(v)))) {
  200. f->dst = NULL;
  201. clear_bit(BR_FDB_ADDED_BY_USER, &f->flags);
  202. return;
  203. }
  204. fdb_delete(br, f, true);
  205. }
  206. void br_fdb_find_delete_local(struct net_bridge *br,
  207. const struct net_bridge_port *p,
  208. const unsigned char *addr, u16 vid)
  209. {
  210. struct net_bridge_fdb_entry *f;
  211. spin_lock_bh(&br->hash_lock);
  212. f = br_fdb_find(br, addr, vid);
  213. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  214. !test_bit(BR_FDB_ADDED_BY_USER, &f->flags) && f->dst == p)
  215. fdb_delete_local(br, p, f);
  216. spin_unlock_bh(&br->hash_lock);
  217. }
  218. void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
  219. {
  220. struct net_bridge_vlan_group *vg;
  221. struct net_bridge_fdb_entry *f;
  222. struct net_bridge *br = p->br;
  223. struct net_bridge_vlan *v;
  224. spin_lock_bh(&br->hash_lock);
  225. vg = nbp_vlan_group(p);
  226. hlist_for_each_entry(f, &br->fdb_list, fdb_node) {
  227. if (f->dst == p && test_bit(BR_FDB_LOCAL, &f->flags) &&
  228. !test_bit(BR_FDB_ADDED_BY_USER, &f->flags)) {
  229. /* delete old one */
  230. fdb_delete_local(br, p, f);
  231. /* if this port has no vlan information
  232. * configured, we can safely be done at
  233. * this point.
  234. */
  235. if (!vg || !vg->num_vlans)
  236. goto insert;
  237. }
  238. }
  239. insert:
  240. /* insert new address, may fail if invalid address or dup. */
  241. fdb_insert(br, p, newaddr, 0);
  242. if (!vg || !vg->num_vlans)
  243. goto done;
  244. /* Now add entries for every VLAN configured on the port.
  245. * This function runs under RTNL so the bitmap will not change
  246. * from under us.
  247. */
  248. list_for_each_entry(v, &vg->vlan_list, vlist)
  249. fdb_insert(br, p, newaddr, v->vid);
  250. done:
  251. spin_unlock_bh(&br->hash_lock);
  252. }
  253. void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
  254. {
  255. struct net_bridge_vlan_group *vg;
  256. struct net_bridge_fdb_entry *f;
  257. struct net_bridge_vlan *v;
  258. spin_lock_bh(&br->hash_lock);
  259. /* If old entry was unassociated with any port, then delete it. */
  260. f = br_fdb_find(br, br->dev->dev_addr, 0);
  261. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  262. !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
  263. fdb_delete_local(br, NULL, f);
  264. fdb_insert(br, NULL, newaddr, 0);
  265. vg = br_vlan_group(br);
  266. if (!vg || !vg->num_vlans)
  267. goto out;
  268. /* Now remove and add entries for every VLAN configured on the
  269. * bridge. This function runs under RTNL so the bitmap will not
  270. * change from under us.
  271. */
  272. list_for_each_entry(v, &vg->vlan_list, vlist) {
  273. if (!br_vlan_should_use(v))
  274. continue;
  275. f = br_fdb_find(br, br->dev->dev_addr, v->vid);
  276. if (f && test_bit(BR_FDB_LOCAL, &f->flags) &&
  277. !f->dst && !test_bit(BR_FDB_ADDED_BY_USER, &f->flags))
  278. fdb_delete_local(br, NULL, f);
  279. fdb_insert(br, NULL, newaddr, v->vid);
  280. }
  281. out:
  282. spin_unlock_bh(&br->hash_lock);
  283. }
  284. void br_fdb_cleanup(struct work_struct *work)
  285. {
  286. struct net_bridge *br = container_of(work, struct net_bridge,
  287. gc_work.work);
  288. struct net_bridge_fdb_entry *f = NULL;
  289. unsigned long delay = hold_time(br);
  290. unsigned long work_delay = delay;
  291. unsigned long now = jiffies;
  292. /* this part is tricky, in order to avoid blocking learning and
  293. * consequently forwarding, we rely on rcu to delete objects with
  294. * delayed freeing allowing us to continue traversing
  295. */
  296. rcu_read_lock();
  297. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  298. unsigned long this_timer;
  299. if (test_bit(BR_FDB_STATIC, &f->flags) ||
  300. test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &f->flags))
  301. continue;
  302. this_timer = f->updated + delay;
  303. if (time_after(this_timer, now)) {
  304. work_delay = min(work_delay, this_timer - now);
  305. } else {
  306. spin_lock_bh(&br->hash_lock);
  307. if (!hlist_unhashed(&f->fdb_node))
  308. fdb_delete(br, f, true);
  309. spin_unlock_bh(&br->hash_lock);
  310. }
  311. }
  312. rcu_read_unlock();
  313. /* Cleanup minimum 10 milliseconds apart */
  314. work_delay = max_t(unsigned long, work_delay, msecs_to_jiffies(10));
  315. mod_delayed_work(system_long_wq, &br->gc_work, work_delay);
  316. }
  317. /* Completely flush all dynamic entries in forwarding database.*/
  318. void br_fdb_flush(struct net_bridge *br)
  319. {
  320. struct net_bridge_fdb_entry *f;
  321. struct hlist_node *tmp;
  322. spin_lock_bh(&br->hash_lock);
  323. hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
  324. if (!test_bit(BR_FDB_STATIC, &f->flags))
  325. fdb_delete(br, f, true);
  326. }
  327. spin_unlock_bh(&br->hash_lock);
  328. }
  329. /* Flush all entries referring to a specific port.
  330. * if do_all is set also flush static entries
  331. * if vid is set delete all entries that match the vlan_id
  332. */
  333. void br_fdb_delete_by_port(struct net_bridge *br,
  334. const struct net_bridge_port *p,
  335. u16 vid,
  336. int do_all)
  337. {
  338. struct net_bridge_fdb_entry *f;
  339. struct hlist_node *tmp;
  340. spin_lock_bh(&br->hash_lock);
  341. hlist_for_each_entry_safe(f, tmp, &br->fdb_list, fdb_node) {
  342. if (f->dst != p)
  343. continue;
  344. if (!do_all)
  345. if (test_bit(BR_FDB_STATIC, &f->flags) ||
  346. (vid && f->key.vlan_id != vid))
  347. continue;
  348. if (test_bit(BR_FDB_LOCAL, &f->flags))
  349. fdb_delete_local(br, p, f);
  350. else
  351. fdb_delete(br, f, true);
  352. }
  353. spin_unlock_bh(&br->hash_lock);
  354. }
  355. #if IS_ENABLED(CONFIG_ATM_LANE)
  356. /* Interface used by ATM LANE hook to test
  357. * if an addr is on some other bridge port */
  358. int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
  359. {
  360. struct net_bridge_fdb_entry *fdb;
  361. struct net_bridge_port *port;
  362. int ret;
  363. rcu_read_lock();
  364. port = br_port_get_rcu(dev);
  365. if (!port)
  366. ret = 0;
  367. else {
  368. fdb = br_fdb_find_rcu(port->br, addr, 0);
  369. ret = fdb && fdb->dst && fdb->dst->dev != dev &&
  370. fdb->dst->state == BR_STATE_FORWARDING;
  371. }
  372. rcu_read_unlock();
  373. return ret;
  374. }
  375. #endif /* CONFIG_ATM_LANE */
  376. /*
  377. * Fill buffer with forwarding table records in
  378. * the API format.
  379. */
  380. int br_fdb_fillbuf(struct net_bridge *br, void *buf,
  381. unsigned long maxnum, unsigned long skip)
  382. {
  383. struct net_bridge_fdb_entry *f;
  384. struct __fdb_entry *fe = buf;
  385. int num = 0;
  386. memset(buf, 0, maxnum*sizeof(struct __fdb_entry));
  387. rcu_read_lock();
  388. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  389. if (num >= maxnum)
  390. break;
  391. if (has_expired(br, f))
  392. continue;
  393. /* ignore pseudo entry for local MAC address */
  394. if (!f->dst)
  395. continue;
  396. if (skip) {
  397. --skip;
  398. continue;
  399. }
  400. /* convert from internal format to API */
  401. memcpy(fe->mac_addr, f->key.addr.addr, ETH_ALEN);
  402. /* due to ABI compat need to split into hi/lo */
  403. fe->port_no = f->dst->port_no;
  404. fe->port_hi = f->dst->port_no >> 8;
  405. fe->is_local = test_bit(BR_FDB_LOCAL, &f->flags);
  406. if (!test_bit(BR_FDB_STATIC, &f->flags))
  407. fe->ageing_timer_value = jiffies_delta_to_clock_t(jiffies - f->updated);
  408. ++fe;
  409. ++num;
  410. }
  411. rcu_read_unlock();
  412. return num;
  413. }
  414. static struct net_bridge_fdb_entry *fdb_create(struct net_bridge *br,
  415. struct net_bridge_port *source,
  416. const unsigned char *addr,
  417. __u16 vid,
  418. unsigned long flags)
  419. {
  420. struct net_bridge_fdb_entry *fdb;
  421. fdb = kmem_cache_alloc(br_fdb_cache, GFP_ATOMIC);
  422. if (fdb) {
  423. memcpy(fdb->key.addr.addr, addr, ETH_ALEN);
  424. fdb->dst = source;
  425. fdb->key.vlan_id = vid;
  426. fdb->flags = flags;
  427. fdb->updated = fdb->used = jiffies;
  428. if (rhashtable_lookup_insert_fast(&br->fdb_hash_tbl,
  429. &fdb->rhnode,
  430. br_fdb_rht_params)) {
  431. kmem_cache_free(br_fdb_cache, fdb);
  432. fdb = NULL;
  433. } else {
  434. hlist_add_head_rcu(&fdb->fdb_node, &br->fdb_list);
  435. }
  436. }
  437. return fdb;
  438. }
  439. static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
  440. const unsigned char *addr, u16 vid)
  441. {
  442. struct net_bridge_fdb_entry *fdb;
  443. if (!is_valid_ether_addr(addr))
  444. return -EINVAL;
  445. fdb = br_fdb_find(br, addr, vid);
  446. if (fdb) {
  447. /* it is okay to have multiple ports with same
  448. * address, just use the first one.
  449. */
  450. if (test_bit(BR_FDB_LOCAL, &fdb->flags))
  451. return 0;
  452. br_warn(br, "adding interface %s with same address as a received packet (addr:%pM, vlan:%u)\n",
  453. source ? source->dev->name : br->dev->name, addr, vid);
  454. fdb_delete(br, fdb, true);
  455. }
  456. fdb = fdb_create(br, source, addr, vid,
  457. BIT(BR_FDB_LOCAL) | BIT(BR_FDB_STATIC));
  458. if (!fdb)
  459. return -ENOMEM;
  460. fdb_add_hw_addr(br, addr);
  461. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  462. return 0;
  463. }
  464. /* Add entry for local address of interface */
  465. int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
  466. const unsigned char *addr, u16 vid)
  467. {
  468. int ret;
  469. spin_lock_bh(&br->hash_lock);
  470. ret = fdb_insert(br, source, addr, vid);
  471. spin_unlock_bh(&br->hash_lock);
  472. return ret;
  473. }
  474. void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
  475. const unsigned char *addr, u16 vid, unsigned long flags)
  476. {
  477. struct net_bridge_fdb_entry *fdb;
  478. bool fdb_modified = false;
  479. /* some users want to always flood. */
  480. if (hold_time(br) == 0)
  481. return;
  482. fdb = fdb_find_rcu(&br->fdb_hash_tbl, addr, vid);
  483. if (likely(fdb)) {
  484. /* attempt to update an entry for a local interface */
  485. if (unlikely(test_bit(BR_FDB_LOCAL, &fdb->flags))) {
  486. if (net_ratelimit())
  487. br_warn(br, "received packet on %s with own address as source address (addr:%pM, vlan:%u)\n",
  488. source->dev->name, addr, vid);
  489. } else {
  490. unsigned long now = jiffies;
  491. /* fastpath: update of existing entry */
  492. if (unlikely(source != fdb->dst &&
  493. !test_bit(BR_FDB_STICKY, &fdb->flags))) {
  494. fdb->dst = source;
  495. fdb_modified = true;
  496. /* Take over HW learned entry */
  497. if (unlikely(test_bit(BR_FDB_ADDED_BY_EXT_LEARN,
  498. &fdb->flags)))
  499. clear_bit(BR_FDB_ADDED_BY_EXT_LEARN,
  500. &fdb->flags);
  501. }
  502. if (now != fdb->updated)
  503. fdb->updated = now;
  504. if (unlikely(test_bit(BR_FDB_ADDED_BY_USER, &flags)))
  505. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  506. if (unlikely(fdb_modified)) {
  507. trace_br_fdb_update(br, source, addr, vid, flags);
  508. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  509. }
  510. }
  511. } else {
  512. spin_lock(&br->hash_lock);
  513. fdb = fdb_create(br, source, addr, vid, flags);
  514. if (fdb) {
  515. trace_br_fdb_update(br, source, addr, vid, flags);
  516. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  517. }
  518. /* else we lose race and someone else inserts
  519. * it first, don't bother updating
  520. */
  521. spin_unlock(&br->hash_lock);
  522. }
  523. }
  524. static int fdb_to_nud(const struct net_bridge *br,
  525. const struct net_bridge_fdb_entry *fdb)
  526. {
  527. if (test_bit(BR_FDB_LOCAL, &fdb->flags))
  528. return NUD_PERMANENT;
  529. else if (test_bit(BR_FDB_STATIC, &fdb->flags))
  530. return NUD_NOARP;
  531. else if (has_expired(br, fdb))
  532. return NUD_STALE;
  533. else
  534. return NUD_REACHABLE;
  535. }
  536. static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
  537. const struct net_bridge_fdb_entry *fdb,
  538. u32 portid, u32 seq, int type, unsigned int flags)
  539. {
  540. unsigned long now = jiffies;
  541. struct nda_cacheinfo ci;
  542. struct nlmsghdr *nlh;
  543. struct ndmsg *ndm;
  544. nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
  545. if (nlh == NULL)
  546. return -EMSGSIZE;
  547. ndm = nlmsg_data(nlh);
  548. ndm->ndm_family = AF_BRIDGE;
  549. ndm->ndm_pad1 = 0;
  550. ndm->ndm_pad2 = 0;
  551. ndm->ndm_flags = 0;
  552. ndm->ndm_type = 0;
  553. ndm->ndm_ifindex = fdb->dst ? fdb->dst->dev->ifindex : br->dev->ifindex;
  554. ndm->ndm_state = fdb_to_nud(br, fdb);
  555. if (test_bit(BR_FDB_OFFLOADED, &fdb->flags))
  556. ndm->ndm_flags |= NTF_OFFLOADED;
  557. if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
  558. ndm->ndm_flags |= NTF_EXT_LEARNED;
  559. if (test_bit(BR_FDB_STICKY, &fdb->flags))
  560. ndm->ndm_flags |= NTF_STICKY;
  561. if (nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->key.addr))
  562. goto nla_put_failure;
  563. if (nla_put_u32(skb, NDA_MASTER, br->dev->ifindex))
  564. goto nla_put_failure;
  565. ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
  566. ci.ndm_confirmed = 0;
  567. ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
  568. ci.ndm_refcnt = 0;
  569. if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
  570. goto nla_put_failure;
  571. if (fdb->key.vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16),
  572. &fdb->key.vlan_id))
  573. goto nla_put_failure;
  574. nlmsg_end(skb, nlh);
  575. return 0;
  576. nla_put_failure:
  577. nlmsg_cancel(skb, nlh);
  578. return -EMSGSIZE;
  579. }
  580. static inline size_t fdb_nlmsg_size(void)
  581. {
  582. return NLMSG_ALIGN(sizeof(struct ndmsg))
  583. + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
  584. + nla_total_size(sizeof(u32)) /* NDA_MASTER */
  585. + nla_total_size(sizeof(u16)) /* NDA_VLAN */
  586. + nla_total_size(sizeof(struct nda_cacheinfo));
  587. }
  588. static void fdb_notify(struct net_bridge *br,
  589. const struct net_bridge_fdb_entry *fdb, int type,
  590. bool swdev_notify)
  591. {
  592. struct net *net = dev_net(br->dev);
  593. struct sk_buff *skb;
  594. int err = -ENOBUFS;
  595. if (swdev_notify)
  596. br_switchdev_fdb_notify(fdb, type);
  597. skb = nlmsg_new(fdb_nlmsg_size(), GFP_ATOMIC);
  598. if (skb == NULL)
  599. goto errout;
  600. err = fdb_fill_info(skb, br, fdb, 0, 0, type, 0);
  601. if (err < 0) {
  602. /* -EMSGSIZE implies BUG in fdb_nlmsg_size() */
  603. WARN_ON(err == -EMSGSIZE);
  604. kfree_skb(skb);
  605. goto errout;
  606. }
  607. rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
  608. return;
  609. errout:
  610. rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
  611. }
  612. /* Dump information about entries, in response to GETNEIGH */
  613. int br_fdb_dump(struct sk_buff *skb,
  614. struct netlink_callback *cb,
  615. struct net_device *dev,
  616. struct net_device *filter_dev,
  617. int *idx)
  618. {
  619. struct net_bridge *br = netdev_priv(dev);
  620. struct net_bridge_fdb_entry *f;
  621. int err = 0;
  622. if (!(dev->priv_flags & IFF_EBRIDGE))
  623. return err;
  624. if (!filter_dev) {
  625. err = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
  626. if (err < 0)
  627. return err;
  628. }
  629. rcu_read_lock();
  630. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  631. if (*idx < cb->args[2])
  632. goto skip;
  633. if (filter_dev && (!f->dst || f->dst->dev != filter_dev)) {
  634. if (filter_dev != dev)
  635. goto skip;
  636. /* !f->dst is a special case for bridge
  637. * It means the MAC belongs to the bridge
  638. * Therefore need a little more filtering
  639. * we only want to dump the !f->dst case
  640. */
  641. if (f->dst)
  642. goto skip;
  643. }
  644. if (!filter_dev && f->dst)
  645. goto skip;
  646. err = fdb_fill_info(skb, br, f,
  647. NETLINK_CB(cb->skb).portid,
  648. cb->nlh->nlmsg_seq,
  649. RTM_NEWNEIGH,
  650. NLM_F_MULTI);
  651. if (err < 0)
  652. break;
  653. skip:
  654. *idx += 1;
  655. }
  656. rcu_read_unlock();
  657. return err;
  658. }
  659. int br_fdb_get(struct sk_buff *skb,
  660. struct nlattr *tb[],
  661. struct net_device *dev,
  662. const unsigned char *addr,
  663. u16 vid, u32 portid, u32 seq,
  664. struct netlink_ext_ack *extack)
  665. {
  666. struct net_bridge *br = netdev_priv(dev);
  667. struct net_bridge_fdb_entry *f;
  668. int err = 0;
  669. rcu_read_lock();
  670. f = br_fdb_find_rcu(br, addr, vid);
  671. if (!f) {
  672. NL_SET_ERR_MSG(extack, "Fdb entry not found");
  673. err = -ENOENT;
  674. goto errout;
  675. }
  676. err = fdb_fill_info(skb, br, f, portid, seq,
  677. RTM_NEWNEIGH, 0);
  678. errout:
  679. rcu_read_unlock();
  680. return err;
  681. }
  682. /* Update (create or replace) forwarding database entry */
  683. static int fdb_add_entry(struct net_bridge *br, struct net_bridge_port *source,
  684. const u8 *addr, u16 state, u16 flags, u16 vid,
  685. u8 ndm_flags)
  686. {
  687. bool is_sticky = !!(ndm_flags & NTF_STICKY);
  688. struct net_bridge_fdb_entry *fdb;
  689. bool modified = false;
  690. /* If the port cannot learn allow only local and static entries */
  691. if (source && !(state & NUD_PERMANENT) && !(state & NUD_NOARP) &&
  692. !(source->state == BR_STATE_LEARNING ||
  693. source->state == BR_STATE_FORWARDING))
  694. return -EPERM;
  695. if (!source && !(state & NUD_PERMANENT)) {
  696. pr_info("bridge: RTM_NEWNEIGH %s without NUD_PERMANENT\n",
  697. br->dev->name);
  698. return -EINVAL;
  699. }
  700. if (is_sticky && (state & NUD_PERMANENT))
  701. return -EINVAL;
  702. fdb = br_fdb_find(br, addr, vid);
  703. if (fdb == NULL) {
  704. if (!(flags & NLM_F_CREATE))
  705. return -ENOENT;
  706. fdb = fdb_create(br, source, addr, vid, 0);
  707. if (!fdb)
  708. return -ENOMEM;
  709. modified = true;
  710. } else {
  711. if (flags & NLM_F_EXCL)
  712. return -EEXIST;
  713. if (fdb->dst != source) {
  714. fdb->dst = source;
  715. modified = true;
  716. }
  717. }
  718. if (fdb_to_nud(br, fdb) != state) {
  719. if (state & NUD_PERMANENT) {
  720. set_bit(BR_FDB_LOCAL, &fdb->flags);
  721. if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
  722. fdb_add_hw_addr(br, addr);
  723. } else if (state & NUD_NOARP) {
  724. clear_bit(BR_FDB_LOCAL, &fdb->flags);
  725. if (!test_and_set_bit(BR_FDB_STATIC, &fdb->flags))
  726. fdb_add_hw_addr(br, addr);
  727. } else {
  728. clear_bit(BR_FDB_LOCAL, &fdb->flags);
  729. if (test_and_clear_bit(BR_FDB_STATIC, &fdb->flags))
  730. fdb_del_hw_addr(br, addr);
  731. }
  732. modified = true;
  733. }
  734. if (is_sticky != test_bit(BR_FDB_STICKY, &fdb->flags)) {
  735. change_bit(BR_FDB_STICKY, &fdb->flags);
  736. modified = true;
  737. }
  738. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  739. fdb->used = jiffies;
  740. if (modified) {
  741. fdb->updated = jiffies;
  742. fdb_notify(br, fdb, RTM_NEWNEIGH, true);
  743. }
  744. return 0;
  745. }
  746. static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge *br,
  747. struct net_bridge_port *p, const unsigned char *addr,
  748. u16 nlh_flags, u16 vid)
  749. {
  750. int err = 0;
  751. if (ndm->ndm_flags & NTF_USE) {
  752. if (!p) {
  753. pr_info("bridge: RTM_NEWNEIGH %s with NTF_USE is not supported\n",
  754. br->dev->name);
  755. return -EINVAL;
  756. }
  757. if (!nbp_state_should_learn(p))
  758. return 0;
  759. local_bh_disable();
  760. rcu_read_lock();
  761. br_fdb_update(br, p, addr, vid, BIT(BR_FDB_ADDED_BY_USER));
  762. rcu_read_unlock();
  763. local_bh_enable();
  764. } else if (ndm->ndm_flags & NTF_EXT_LEARNED) {
  765. err = br_fdb_external_learn_add(br, p, addr, vid, true);
  766. } else {
  767. spin_lock_bh(&br->hash_lock);
  768. err = fdb_add_entry(br, p, addr, ndm->ndm_state,
  769. nlh_flags, vid, ndm->ndm_flags);
  770. spin_unlock_bh(&br->hash_lock);
  771. }
  772. return err;
  773. }
  774. /* Add new permanent fdb entry with RTM_NEWNEIGH */
  775. int br_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  776. struct net_device *dev,
  777. const unsigned char *addr, u16 vid, u16 nlh_flags,
  778. struct netlink_ext_ack *extack)
  779. {
  780. struct net_bridge_vlan_group *vg;
  781. struct net_bridge_port *p = NULL;
  782. struct net_bridge_vlan *v;
  783. struct net_bridge *br = NULL;
  784. int err = 0;
  785. trace_br_fdb_add(ndm, dev, addr, vid, nlh_flags);
  786. if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_NOARP|NUD_REACHABLE))) {
  787. pr_info("bridge: RTM_NEWNEIGH with invalid state %#x\n", ndm->ndm_state);
  788. return -EINVAL;
  789. }
  790. if (is_zero_ether_addr(addr)) {
  791. pr_info("bridge: RTM_NEWNEIGH with invalid ether address\n");
  792. return -EINVAL;
  793. }
  794. if (dev->priv_flags & IFF_EBRIDGE) {
  795. br = netdev_priv(dev);
  796. vg = br_vlan_group(br);
  797. } else {
  798. p = br_port_get_rtnl(dev);
  799. if (!p) {
  800. pr_info("bridge: RTM_NEWNEIGH %s not a bridge port\n",
  801. dev->name);
  802. return -EINVAL;
  803. }
  804. br = p->br;
  805. vg = nbp_vlan_group(p);
  806. }
  807. if (vid) {
  808. v = br_vlan_find(vg, vid);
  809. if (!v || !br_vlan_should_use(v)) {
  810. pr_info("bridge: RTM_NEWNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
  811. return -EINVAL;
  812. }
  813. /* VID was specified, so use it. */
  814. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, vid);
  815. } else {
  816. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, 0);
  817. if (err || !vg || !vg->num_vlans)
  818. goto out;
  819. /* We have vlans configured on this port and user didn't
  820. * specify a VLAN. To be nice, add/update entry for every
  821. * vlan on this port.
  822. */
  823. list_for_each_entry(v, &vg->vlan_list, vlist) {
  824. if (!br_vlan_should_use(v))
  825. continue;
  826. err = __br_fdb_add(ndm, br, p, addr, nlh_flags, v->vid);
  827. if (err)
  828. goto out;
  829. }
  830. }
  831. out:
  832. return err;
  833. }
  834. static int fdb_delete_by_addr_and_port(struct net_bridge *br,
  835. const struct net_bridge_port *p,
  836. const u8 *addr, u16 vlan)
  837. {
  838. struct net_bridge_fdb_entry *fdb;
  839. fdb = br_fdb_find(br, addr, vlan);
  840. if (!fdb || fdb->dst != p)
  841. return -ENOENT;
  842. fdb_delete(br, fdb, true);
  843. return 0;
  844. }
  845. static int __br_fdb_delete(struct net_bridge *br,
  846. const struct net_bridge_port *p,
  847. const unsigned char *addr, u16 vid)
  848. {
  849. int err;
  850. spin_lock_bh(&br->hash_lock);
  851. err = fdb_delete_by_addr_and_port(br, p, addr, vid);
  852. spin_unlock_bh(&br->hash_lock);
  853. return err;
  854. }
  855. /* Remove neighbor entry with RTM_DELNEIGH */
  856. int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
  857. struct net_device *dev,
  858. const unsigned char *addr, u16 vid)
  859. {
  860. struct net_bridge_vlan_group *vg;
  861. struct net_bridge_port *p = NULL;
  862. struct net_bridge_vlan *v;
  863. struct net_bridge *br;
  864. int err;
  865. if (dev->priv_flags & IFF_EBRIDGE) {
  866. br = netdev_priv(dev);
  867. vg = br_vlan_group(br);
  868. } else {
  869. p = br_port_get_rtnl(dev);
  870. if (!p) {
  871. pr_info("bridge: RTM_DELNEIGH %s not a bridge port\n",
  872. dev->name);
  873. return -EINVAL;
  874. }
  875. vg = nbp_vlan_group(p);
  876. br = p->br;
  877. }
  878. if (vid) {
  879. v = br_vlan_find(vg, vid);
  880. if (!v) {
  881. pr_info("bridge: RTM_DELNEIGH with unconfigured vlan %d on %s\n", vid, dev->name);
  882. return -EINVAL;
  883. }
  884. err = __br_fdb_delete(br, p, addr, vid);
  885. } else {
  886. err = -ENOENT;
  887. err &= __br_fdb_delete(br, p, addr, 0);
  888. if (!vg || !vg->num_vlans)
  889. return err;
  890. list_for_each_entry(v, &vg->vlan_list, vlist) {
  891. if (!br_vlan_should_use(v))
  892. continue;
  893. err &= __br_fdb_delete(br, p, addr, v->vid);
  894. }
  895. }
  896. return err;
  897. }
  898. int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p)
  899. {
  900. struct net_bridge_fdb_entry *f, *tmp;
  901. int err = 0;
  902. ASSERT_RTNL();
  903. /* the key here is that static entries change only under rtnl */
  904. rcu_read_lock();
  905. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  906. /* We only care for static entries */
  907. if (!test_bit(BR_FDB_STATIC, &f->flags))
  908. continue;
  909. err = dev_uc_add(p->dev, f->key.addr.addr);
  910. if (err)
  911. goto rollback;
  912. }
  913. done:
  914. rcu_read_unlock();
  915. return err;
  916. rollback:
  917. hlist_for_each_entry_rcu(tmp, &br->fdb_list, fdb_node) {
  918. /* We only care for static entries */
  919. if (!test_bit(BR_FDB_STATIC, &tmp->flags))
  920. continue;
  921. if (tmp == f)
  922. break;
  923. dev_uc_del(p->dev, tmp->key.addr.addr);
  924. }
  925. goto done;
  926. }
  927. void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p)
  928. {
  929. struct net_bridge_fdb_entry *f;
  930. ASSERT_RTNL();
  931. rcu_read_lock();
  932. hlist_for_each_entry_rcu(f, &br->fdb_list, fdb_node) {
  933. /* We only care for static entries */
  934. if (!test_bit(BR_FDB_STATIC, &f->flags))
  935. continue;
  936. dev_uc_del(p->dev, f->key.addr.addr);
  937. }
  938. rcu_read_unlock();
  939. }
  940. int br_fdb_external_learn_add(struct net_bridge *br, struct net_bridge_port *p,
  941. const unsigned char *addr, u16 vid,
  942. bool swdev_notify)
  943. {
  944. struct net_bridge_fdb_entry *fdb;
  945. bool modified = false;
  946. int err = 0;
  947. trace_br_fdb_external_learn_add(br, p, addr, vid);
  948. spin_lock_bh(&br->hash_lock);
  949. fdb = br_fdb_find(br, addr, vid);
  950. if (!fdb) {
  951. unsigned long flags = BIT(BR_FDB_ADDED_BY_EXT_LEARN);
  952. if (swdev_notify)
  953. flags |= BIT(BR_FDB_ADDED_BY_USER);
  954. fdb = fdb_create(br, p, addr, vid, flags);
  955. if (!fdb) {
  956. err = -ENOMEM;
  957. goto err_unlock;
  958. }
  959. fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
  960. } else {
  961. fdb->updated = jiffies;
  962. if (fdb->dst != p) {
  963. fdb->dst = p;
  964. modified = true;
  965. }
  966. if (test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags)) {
  967. /* Refresh entry */
  968. fdb->used = jiffies;
  969. } else if (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags)) {
  970. /* Take over SW learned entry */
  971. set_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags);
  972. modified = true;
  973. }
  974. if (swdev_notify)
  975. set_bit(BR_FDB_ADDED_BY_USER, &fdb->flags);
  976. if (modified)
  977. fdb_notify(br, fdb, RTM_NEWNEIGH, swdev_notify);
  978. }
  979. err_unlock:
  980. spin_unlock_bh(&br->hash_lock);
  981. return err;
  982. }
  983. int br_fdb_external_learn_del(struct net_bridge *br, struct net_bridge_port *p,
  984. const unsigned char *addr, u16 vid,
  985. bool swdev_notify)
  986. {
  987. struct net_bridge_fdb_entry *fdb;
  988. int err = 0;
  989. spin_lock_bh(&br->hash_lock);
  990. fdb = br_fdb_find(br, addr, vid);
  991. if (fdb && test_bit(BR_FDB_ADDED_BY_EXT_LEARN, &fdb->flags))
  992. fdb_delete(br, fdb, swdev_notify);
  993. else
  994. err = -ENOENT;
  995. spin_unlock_bh(&br->hash_lock);
  996. return err;
  997. }
  998. void br_fdb_offloaded_set(struct net_bridge *br, struct net_bridge_port *p,
  999. const unsigned char *addr, u16 vid, bool offloaded)
  1000. {
  1001. struct net_bridge_fdb_entry *fdb;
  1002. spin_lock_bh(&br->hash_lock);
  1003. fdb = br_fdb_find(br, addr, vid);
  1004. if (fdb && offloaded != test_bit(BR_FDB_OFFLOADED, &fdb->flags))
  1005. change_bit(BR_FDB_OFFLOADED, &fdb->flags);
  1006. spin_unlock_bh(&br->hash_lock);
  1007. }
  1008. void br_fdb_clear_offload(const struct net_device *dev, u16 vid)
  1009. {
  1010. struct net_bridge_fdb_entry *f;
  1011. struct net_bridge_port *p;
  1012. ASSERT_RTNL();
  1013. p = br_port_get_rtnl(dev);
  1014. if (!p)
  1015. return;
  1016. spin_lock_bh(&p->br->hash_lock);
  1017. hlist_for_each_entry(f, &p->br->fdb_list, fdb_node) {
  1018. if (f->dst == p && f->key.vlan_id == vid)
  1019. clear_bit(BR_FDB_OFFLOADED, &f->flags);
  1020. }
  1021. spin_unlock_bh(&p->br->hash_lock);
  1022. }
  1023. EXPORT_SYMBOL_GPL(br_fdb_clear_offload);