PageRenderTime 79ms CodeModel.GetById 26ms RepoModel.GetById 0ms app.codeStats 0ms

/net/tipc/node.c

http://github.com/mirrors/linux
C | 2995 lines | 2289 code | 392 blank | 314 comment | 419 complexity | 13d7adf4b7fb625a557c27eeeef4fb2a MD5 | raw file
Possible License(s): AGPL-1.0, GPL-2.0, LGPL-2.0
  1. /*
  2. * net/tipc/node.c: TIPC node management routines
  3. *
  4. * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
  5. * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
  6. * All rights reserved.
  7. *
  8. * Redistribution and use in source and binary forms, with or without
  9. * modification, are permitted provided that the following conditions are met:
  10. *
  11. * 1. Redistributions of source code must retain the above copyright
  12. * notice, this list of conditions and the following disclaimer.
  13. * 2. Redistributions in binary form must reproduce the above copyright
  14. * notice, this list of conditions and the following disclaimer in the
  15. * documentation and/or other materials provided with the distribution.
  16. * 3. Neither the names of the copyright holders nor the names of its
  17. * contributors may be used to endorse or promote products derived from
  18. * this software without specific prior written permission.
  19. *
  20. * Alternatively, this software may be distributed under the terms of the
  21. * GNU General Public License ("GPL") version 2 as published by the Free
  22. * Software Foundation.
  23. *
  24. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  25. * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  26. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  27. * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
  28. * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
  31. * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
  32. * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
  33. * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  34. * POSSIBILITY OF SUCH DAMAGE.
  35. */
  36. #include "core.h"
  37. #include "link.h"
  38. #include "node.h"
  39. #include "name_distr.h"
  40. #include "socket.h"
  41. #include "bcast.h"
  42. #include "monitor.h"
  43. #include "discover.h"
  44. #include "netlink.h"
  45. #include "trace.h"
  46. #include "crypto.h"
  47. #define INVALID_NODE_SIG 0x10000
  48. #define NODE_CLEANUP_AFTER 300000
  49. /* Flags used to take different actions according to flag type
  50. * TIPC_NOTIFY_NODE_DOWN: notify node is down
  51. * TIPC_NOTIFY_NODE_UP: notify node is up
  52. * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
  53. */
  54. enum {
  55. TIPC_NOTIFY_NODE_DOWN = (1 << 3),
  56. TIPC_NOTIFY_NODE_UP = (1 << 4),
  57. TIPC_NOTIFY_LINK_UP = (1 << 6),
  58. TIPC_NOTIFY_LINK_DOWN = (1 << 7)
  59. };
  60. struct tipc_link_entry {
  61. struct tipc_link *link;
  62. spinlock_t lock; /* per link */
  63. u32 mtu;
  64. struct sk_buff_head inputq;
  65. struct tipc_media_addr maddr;
  66. };
  67. struct tipc_bclink_entry {
  68. struct tipc_link *link;
  69. struct sk_buff_head inputq1;
  70. struct sk_buff_head arrvq;
  71. struct sk_buff_head inputq2;
  72. struct sk_buff_head namedq;
  73. };
  74. /**
  75. * struct tipc_node - TIPC node structure
  76. * @addr: network address of node
  77. * @ref: reference counter to node object
  78. * @lock: rwlock governing access to structure
  79. * @net: the applicable net namespace
  80. * @hash: links to adjacent nodes in unsorted hash chain
  81. * @inputq: pointer to input queue containing messages for msg event
  82. * @namedq: pointer to name table input queue with name table messages
  83. * @active_links: bearer ids of active links, used as index into links[] array
  84. * @links: array containing references to all links to node
  85. * @action_flags: bit mask of different types of node actions
  86. * @state: connectivity state vs peer node
  87. * @preliminary: a preliminary node or not
  88. * @sync_point: sequence number where synch/failover is finished
  89. * @list: links to adjacent nodes in sorted list of cluster's nodes
  90. * @working_links: number of working links to node (both active and standby)
  91. * @link_cnt: number of links to node
  92. * @capabilities: bitmap, indicating peer node's functional capabilities
  93. * @signature: node instance identifier
  94. * @link_id: local and remote bearer ids of changing link, if any
  95. * @publ_list: list of publications
  96. * @rcu: rcu struct for tipc_node
  97. * @delete_at: indicates the time for deleting a down node
  98. * @crypto_rx: RX crypto handler
  99. */
  100. struct tipc_node {
  101. u32 addr;
  102. struct kref kref;
  103. rwlock_t lock;
  104. struct net *net;
  105. struct hlist_node hash;
  106. int active_links[2];
  107. struct tipc_link_entry links[MAX_BEARERS];
  108. struct tipc_bclink_entry bc_entry;
  109. int action_flags;
  110. struct list_head list;
  111. int state;
  112. bool preliminary;
  113. bool failover_sent;
  114. u16 sync_point;
  115. int link_cnt;
  116. u16 working_links;
  117. u16 capabilities;
  118. u32 signature;
  119. u32 link_id;
  120. u8 peer_id[16];
  121. char peer_id_string[NODE_ID_STR_LEN];
  122. struct list_head publ_list;
  123. struct list_head conn_sks;
  124. unsigned long keepalive_intv;
  125. struct timer_list timer;
  126. struct rcu_head rcu;
  127. unsigned long delete_at;
  128. struct net *peer_net;
  129. u32 peer_hash_mix;
  130. #ifdef CONFIG_TIPC_CRYPTO
  131. struct tipc_crypto *crypto_rx;
  132. #endif
  133. };
  134. /* Node FSM states and events:
  135. */
  136. enum {
  137. SELF_DOWN_PEER_DOWN = 0xdd,
  138. SELF_UP_PEER_UP = 0xaa,
  139. SELF_DOWN_PEER_LEAVING = 0xd1,
  140. SELF_UP_PEER_COMING = 0xac,
  141. SELF_COMING_PEER_UP = 0xca,
  142. SELF_LEAVING_PEER_DOWN = 0x1d,
  143. NODE_FAILINGOVER = 0xf0,
  144. NODE_SYNCHING = 0xcc
  145. };
  146. enum {
  147. SELF_ESTABL_CONTACT_EVT = 0xece,
  148. SELF_LOST_CONTACT_EVT = 0x1ce,
  149. PEER_ESTABL_CONTACT_EVT = 0x9ece,
  150. PEER_LOST_CONTACT_EVT = 0x91ce,
  151. NODE_FAILOVER_BEGIN_EVT = 0xfbe,
  152. NODE_FAILOVER_END_EVT = 0xfee,
  153. NODE_SYNCH_BEGIN_EVT = 0xcbe,
  154. NODE_SYNCH_END_EVT = 0xcee
  155. };
  156. static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
  157. struct sk_buff_head *xmitq,
  158. struct tipc_media_addr **maddr);
  159. static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
  160. bool delete);
  161. static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
  162. static void tipc_node_delete(struct tipc_node *node);
  163. static void tipc_node_timeout(struct timer_list *t);
  164. static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
  165. static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
  166. static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
  167. static bool node_is_up(struct tipc_node *n);
  168. static void tipc_node_delete_from_list(struct tipc_node *node);
  169. struct tipc_sock_conn {
  170. u32 port;
  171. u32 peer_port;
  172. u32 peer_node;
  173. struct list_head list;
  174. };
  175. static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
  176. {
  177. int bearer_id = n->active_links[sel & 1];
  178. if (unlikely(bearer_id == INVALID_BEARER_ID))
  179. return NULL;
  180. return n->links[bearer_id].link;
  181. }
  182. int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
  183. {
  184. struct tipc_node *n;
  185. int bearer_id;
  186. unsigned int mtu = MAX_MSG_SIZE;
  187. n = tipc_node_find(net, addr);
  188. if (unlikely(!n))
  189. return mtu;
  190. /* Allow MAX_MSG_SIZE when building connection oriented message
  191. * if they are in the same core network
  192. */
  193. if (n->peer_net && connected) {
  194. tipc_node_put(n);
  195. return mtu;
  196. }
  197. bearer_id = n->active_links[sel & 1];
  198. if (likely(bearer_id != INVALID_BEARER_ID))
  199. mtu = n->links[bearer_id].mtu;
  200. tipc_node_put(n);
  201. return mtu;
  202. }
  203. bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
  204. {
  205. u8 *own_id = tipc_own_id(net);
  206. struct tipc_node *n;
  207. if (!own_id)
  208. return true;
  209. if (addr == tipc_own_addr(net)) {
  210. memcpy(id, own_id, TIPC_NODEID_LEN);
  211. return true;
  212. }
  213. n = tipc_node_find(net, addr);
  214. if (!n)
  215. return false;
  216. memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
  217. tipc_node_put(n);
  218. return true;
  219. }
  220. u16 tipc_node_get_capabilities(struct net *net, u32 addr)
  221. {
  222. struct tipc_node *n;
  223. u16 caps;
  224. n = tipc_node_find(net, addr);
  225. if (unlikely(!n))
  226. return TIPC_NODE_CAPABILITIES;
  227. caps = n->capabilities;
  228. tipc_node_put(n);
  229. return caps;
  230. }
  231. u32 tipc_node_get_addr(struct tipc_node *node)
  232. {
  233. return (node) ? node->addr : 0;
  234. }
  235. char *tipc_node_get_id_str(struct tipc_node *node)
  236. {
  237. return node->peer_id_string;
  238. }
  239. #ifdef CONFIG_TIPC_CRYPTO
  240. /**
  241. * tipc_node_crypto_rx - Retrieve crypto RX handle from node
  242. * Note: node ref counter must be held first!
  243. */
  244. struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
  245. {
  246. return (__n) ? __n->crypto_rx : NULL;
  247. }
  248. struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
  249. {
  250. return container_of(pos, struct tipc_node, list)->crypto_rx;
  251. }
  252. #endif
  253. static void tipc_node_free(struct rcu_head *rp)
  254. {
  255. struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
  256. #ifdef CONFIG_TIPC_CRYPTO
  257. tipc_crypto_stop(&n->crypto_rx);
  258. #endif
  259. kfree(n);
  260. }
  261. static void tipc_node_kref_release(struct kref *kref)
  262. {
  263. struct tipc_node *n = container_of(kref, struct tipc_node, kref);
  264. kfree(n->bc_entry.link);
  265. call_rcu(&n->rcu, tipc_node_free);
  266. }
  267. void tipc_node_put(struct tipc_node *node)
  268. {
  269. kref_put(&node->kref, tipc_node_kref_release);
  270. }
  271. static void tipc_node_get(struct tipc_node *node)
  272. {
  273. kref_get(&node->kref);
  274. }
  275. /*
  276. * tipc_node_find - locate specified node object, if it exists
  277. */
  278. static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
  279. {
  280. struct tipc_net *tn = tipc_net(net);
  281. struct tipc_node *node;
  282. unsigned int thash = tipc_hashfn(addr);
  283. rcu_read_lock();
  284. hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
  285. if (node->addr != addr || node->preliminary)
  286. continue;
  287. if (!kref_get_unless_zero(&node->kref))
  288. node = NULL;
  289. break;
  290. }
  291. rcu_read_unlock();
  292. return node;
  293. }
  294. /* tipc_node_find_by_id - locate specified node object by its 128-bit id
  295. * Note: this function is called only when a discovery request failed
  296. * to find the node by its 32-bit id, and is not time critical
  297. */
  298. static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
  299. {
  300. struct tipc_net *tn = tipc_net(net);
  301. struct tipc_node *n;
  302. bool found = false;
  303. rcu_read_lock();
  304. list_for_each_entry_rcu(n, &tn->node_list, list) {
  305. read_lock_bh(&n->lock);
  306. if (!memcmp(id, n->peer_id, 16) &&
  307. kref_get_unless_zero(&n->kref))
  308. found = true;
  309. read_unlock_bh(&n->lock);
  310. if (found)
  311. break;
  312. }
  313. rcu_read_unlock();
  314. return found ? n : NULL;
  315. }
  316. static void tipc_node_read_lock(struct tipc_node *n)
  317. {
  318. read_lock_bh(&n->lock);
  319. }
  320. static void tipc_node_read_unlock(struct tipc_node *n)
  321. {
  322. read_unlock_bh(&n->lock);
  323. }
  324. static void tipc_node_write_lock(struct tipc_node *n)
  325. {
  326. write_lock_bh(&n->lock);
  327. }
  328. static void tipc_node_write_unlock_fast(struct tipc_node *n)
  329. {
  330. write_unlock_bh(&n->lock);
  331. }
  332. static void tipc_node_write_unlock(struct tipc_node *n)
  333. {
  334. struct net *net = n->net;
  335. u32 addr = 0;
  336. u32 flags = n->action_flags;
  337. u32 link_id = 0;
  338. u32 bearer_id;
  339. struct list_head *publ_list;
  340. if (likely(!flags)) {
  341. write_unlock_bh(&n->lock);
  342. return;
  343. }
  344. addr = n->addr;
  345. link_id = n->link_id;
  346. bearer_id = link_id & 0xffff;
  347. publ_list = &n->publ_list;
  348. n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
  349. TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
  350. write_unlock_bh(&n->lock);
  351. if (flags & TIPC_NOTIFY_NODE_DOWN)
  352. tipc_publ_notify(net, publ_list, addr);
  353. if (flags & TIPC_NOTIFY_NODE_UP)
  354. tipc_named_node_up(net, addr);
  355. if (flags & TIPC_NOTIFY_LINK_UP) {
  356. tipc_mon_peer_up(net, addr, bearer_id);
  357. tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
  358. TIPC_NODE_SCOPE, link_id, link_id);
  359. }
  360. if (flags & TIPC_NOTIFY_LINK_DOWN) {
  361. tipc_mon_peer_down(net, addr, bearer_id);
  362. tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
  363. addr, link_id);
  364. }
  365. }
  366. static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
  367. {
  368. int net_id = tipc_netid(n->net);
  369. struct tipc_net *tn_peer;
  370. struct net *tmp;
  371. u32 hash_chk;
  372. if (n->peer_net)
  373. return;
  374. for_each_net_rcu(tmp) {
  375. tn_peer = tipc_net(tmp);
  376. if (!tn_peer)
  377. continue;
  378. /* Integrity checking whether node exists in namespace or not */
  379. if (tn_peer->net_id != net_id)
  380. continue;
  381. if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
  382. continue;
  383. hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
  384. if (hash_mixes ^ hash_chk)
  385. continue;
  386. n->peer_net = tmp;
  387. n->peer_hash_mix = hash_mixes;
  388. break;
  389. }
  390. }
  391. struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
  392. u16 capabilities, u32 hash_mixes,
  393. bool preliminary)
  394. {
  395. struct tipc_net *tn = net_generic(net, tipc_net_id);
  396. struct tipc_node *n, *temp_node;
  397. struct tipc_link *l;
  398. unsigned long intv;
  399. int bearer_id;
  400. int i;
  401. spin_lock_bh(&tn->node_list_lock);
  402. n = tipc_node_find(net, addr) ?:
  403. tipc_node_find_by_id(net, peer_id);
  404. if (n) {
  405. if (!n->preliminary)
  406. goto update;
  407. if (preliminary)
  408. goto exit;
  409. /* A preliminary node becomes "real" now, refresh its data */
  410. tipc_node_write_lock(n);
  411. n->preliminary = false;
  412. n->addr = addr;
  413. hlist_del_rcu(&n->hash);
  414. hlist_add_head_rcu(&n->hash,
  415. &tn->node_htable[tipc_hashfn(addr)]);
  416. list_del_rcu(&n->list);
  417. list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
  418. if (n->addr < temp_node->addr)
  419. break;
  420. }
  421. list_add_tail_rcu(&n->list, &temp_node->list);
  422. tipc_node_write_unlock_fast(n);
  423. update:
  424. if (n->peer_hash_mix ^ hash_mixes)
  425. tipc_node_assign_peer_net(n, hash_mixes);
  426. if (n->capabilities == capabilities)
  427. goto exit;
  428. /* Same node may come back with new capabilities */
  429. tipc_node_write_lock(n);
  430. n->capabilities = capabilities;
  431. for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
  432. l = n->links[bearer_id].link;
  433. if (l)
  434. tipc_link_update_caps(l, capabilities);
  435. }
  436. tipc_node_write_unlock_fast(n);
  437. /* Calculate cluster capabilities */
  438. tn->capabilities = TIPC_NODE_CAPABILITIES;
  439. list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
  440. tn->capabilities &= temp_node->capabilities;
  441. }
  442. tipc_bcast_toggle_rcast(net,
  443. (tn->capabilities & TIPC_BCAST_RCAST));
  444. goto exit;
  445. }
  446. n = kzalloc(sizeof(*n), GFP_ATOMIC);
  447. if (!n) {
  448. pr_warn("Node creation failed, no memory\n");
  449. goto exit;
  450. }
  451. tipc_nodeid2string(n->peer_id_string, peer_id);
  452. #ifdef CONFIG_TIPC_CRYPTO
  453. if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
  454. pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
  455. kfree(n);
  456. n = NULL;
  457. goto exit;
  458. }
  459. #endif
  460. n->addr = addr;
  461. n->preliminary = preliminary;
  462. memcpy(&n->peer_id, peer_id, 16);
  463. n->net = net;
  464. n->peer_net = NULL;
  465. n->peer_hash_mix = 0;
  466. /* Assign kernel local namespace if exists */
  467. tipc_node_assign_peer_net(n, hash_mixes);
  468. n->capabilities = capabilities;
  469. kref_init(&n->kref);
  470. rwlock_init(&n->lock);
  471. INIT_HLIST_NODE(&n->hash);
  472. INIT_LIST_HEAD(&n->list);
  473. INIT_LIST_HEAD(&n->publ_list);
  474. INIT_LIST_HEAD(&n->conn_sks);
  475. skb_queue_head_init(&n->bc_entry.namedq);
  476. skb_queue_head_init(&n->bc_entry.inputq1);
  477. __skb_queue_head_init(&n->bc_entry.arrvq);
  478. skb_queue_head_init(&n->bc_entry.inputq2);
  479. for (i = 0; i < MAX_BEARERS; i++)
  480. spin_lock_init(&n->links[i].lock);
  481. n->state = SELF_DOWN_PEER_LEAVING;
  482. n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
  483. n->signature = INVALID_NODE_SIG;
  484. n->active_links[0] = INVALID_BEARER_ID;
  485. n->active_links[1] = INVALID_BEARER_ID;
  486. n->bc_entry.link = NULL;
  487. tipc_node_get(n);
  488. timer_setup(&n->timer, tipc_node_timeout, 0);
  489. /* Start a slow timer anyway, crypto needs it */
  490. n->keepalive_intv = 10000;
  491. intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
  492. if (!mod_timer(&n->timer, intv))
  493. tipc_node_get(n);
  494. hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
  495. list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
  496. if (n->addr < temp_node->addr)
  497. break;
  498. }
  499. list_add_tail_rcu(&n->list, &temp_node->list);
  500. /* Calculate cluster capabilities */
  501. tn->capabilities = TIPC_NODE_CAPABILITIES;
  502. list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
  503. tn->capabilities &= temp_node->capabilities;
  504. }
  505. tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
  506. trace_tipc_node_create(n, true, " ");
  507. exit:
  508. spin_unlock_bh(&tn->node_list_lock);
  509. return n;
  510. }
  511. static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
  512. {
  513. unsigned long tol = tipc_link_tolerance(l);
  514. unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
  515. /* Link with lowest tolerance determines timer interval */
  516. if (intv < n->keepalive_intv)
  517. n->keepalive_intv = intv;
  518. /* Ensure link's abort limit corresponds to current tolerance */
  519. tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
  520. }
  521. static void tipc_node_delete_from_list(struct tipc_node *node)
  522. {
  523. list_del_rcu(&node->list);
  524. hlist_del_rcu(&node->hash);
  525. tipc_node_put(node);
  526. }
  527. static void tipc_node_delete(struct tipc_node *node)
  528. {
  529. trace_tipc_node_delete(node, true, " ");
  530. tipc_node_delete_from_list(node);
  531. del_timer_sync(&node->timer);
  532. tipc_node_put(node);
  533. }
  534. void tipc_node_stop(struct net *net)
  535. {
  536. struct tipc_net *tn = tipc_net(net);
  537. struct tipc_node *node, *t_node;
  538. spin_lock_bh(&tn->node_list_lock);
  539. list_for_each_entry_safe(node, t_node, &tn->node_list, list)
  540. tipc_node_delete(node);
  541. spin_unlock_bh(&tn->node_list_lock);
  542. }
  543. void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
  544. {
  545. struct tipc_node *n;
  546. if (in_own_node(net, addr))
  547. return;
  548. n = tipc_node_find(net, addr);
  549. if (!n) {
  550. pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
  551. return;
  552. }
  553. tipc_node_write_lock(n);
  554. list_add_tail(subscr, &n->publ_list);
  555. tipc_node_write_unlock_fast(n);
  556. tipc_node_put(n);
  557. }
  558. void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
  559. {
  560. struct tipc_node *n;
  561. if (in_own_node(net, addr))
  562. return;
  563. n = tipc_node_find(net, addr);
  564. if (!n) {
  565. pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
  566. return;
  567. }
  568. tipc_node_write_lock(n);
  569. list_del_init(subscr);
  570. tipc_node_write_unlock_fast(n);
  571. tipc_node_put(n);
  572. }
  573. int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
  574. {
  575. struct tipc_node *node;
  576. struct tipc_sock_conn *conn;
  577. int err = 0;
  578. if (in_own_node(net, dnode))
  579. return 0;
  580. node = tipc_node_find(net, dnode);
  581. if (!node) {
  582. pr_warn("Connecting sock to node 0x%x failed\n", dnode);
  583. return -EHOSTUNREACH;
  584. }
  585. conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
  586. if (!conn) {
  587. err = -EHOSTUNREACH;
  588. goto exit;
  589. }
  590. conn->peer_node = dnode;
  591. conn->port = port;
  592. conn->peer_port = peer_port;
  593. tipc_node_write_lock(node);
  594. list_add_tail(&conn->list, &node->conn_sks);
  595. tipc_node_write_unlock(node);
  596. exit:
  597. tipc_node_put(node);
  598. return err;
  599. }
  600. void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
  601. {
  602. struct tipc_node *node;
  603. struct tipc_sock_conn *conn, *safe;
  604. if (in_own_node(net, dnode))
  605. return;
  606. node = tipc_node_find(net, dnode);
  607. if (!node)
  608. return;
  609. tipc_node_write_lock(node);
  610. list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
  611. if (port != conn->port)
  612. continue;
  613. list_del(&conn->list);
  614. kfree(conn);
  615. }
  616. tipc_node_write_unlock(node);
  617. tipc_node_put(node);
  618. }
  619. static void tipc_node_clear_links(struct tipc_node *node)
  620. {
  621. int i;
  622. for (i = 0; i < MAX_BEARERS; i++) {
  623. struct tipc_link_entry *le = &node->links[i];
  624. if (le->link) {
  625. kfree(le->link);
  626. le->link = NULL;
  627. node->link_cnt--;
  628. }
  629. }
  630. }
  631. /* tipc_node_cleanup - delete nodes that does not
  632. * have active links for NODE_CLEANUP_AFTER time
  633. */
  634. static bool tipc_node_cleanup(struct tipc_node *peer)
  635. {
  636. struct tipc_node *temp_node;
  637. struct tipc_net *tn = tipc_net(peer->net);
  638. bool deleted = false;
  639. /* If lock held by tipc_node_stop() the node will be deleted anyway */
  640. if (!spin_trylock_bh(&tn->node_list_lock))
  641. return false;
  642. tipc_node_write_lock(peer);
  643. if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
  644. tipc_node_clear_links(peer);
  645. tipc_node_delete_from_list(peer);
  646. deleted = true;
  647. }
  648. tipc_node_write_unlock(peer);
  649. if (!deleted) {
  650. spin_unlock_bh(&tn->node_list_lock);
  651. return deleted;
  652. }
  653. /* Calculate cluster capabilities */
  654. tn->capabilities = TIPC_NODE_CAPABILITIES;
  655. list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
  656. tn->capabilities &= temp_node->capabilities;
  657. }
  658. tipc_bcast_toggle_rcast(peer->net,
  659. (tn->capabilities & TIPC_BCAST_RCAST));
  660. spin_unlock_bh(&tn->node_list_lock);
  661. return deleted;
  662. }
  663. /* tipc_node_timeout - handle expiration of node timer
  664. */
  665. static void tipc_node_timeout(struct timer_list *t)
  666. {
  667. struct tipc_node *n = from_timer(n, t, timer);
  668. struct tipc_link_entry *le;
  669. struct sk_buff_head xmitq;
  670. int remains = n->link_cnt;
  671. int bearer_id;
  672. int rc = 0;
  673. trace_tipc_node_timeout(n, false, " ");
  674. if (!node_is_up(n) && tipc_node_cleanup(n)) {
  675. /*Removing the reference of Timer*/
  676. tipc_node_put(n);
  677. return;
  678. }
  679. #ifdef CONFIG_TIPC_CRYPTO
  680. /* Take any crypto key related actions first */
  681. tipc_crypto_timeout(n->crypto_rx);
  682. #endif
  683. __skb_queue_head_init(&xmitq);
  684. /* Initial node interval to value larger (10 seconds), then it will be
  685. * recalculated with link lowest tolerance
  686. */
  687. tipc_node_read_lock(n);
  688. n->keepalive_intv = 10000;
  689. tipc_node_read_unlock(n);
  690. for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
  691. tipc_node_read_lock(n);
  692. le = &n->links[bearer_id];
  693. if (le->link) {
  694. spin_lock_bh(&le->lock);
  695. /* Link tolerance may change asynchronously: */
  696. tipc_node_calculate_timer(n, le->link);
  697. rc = tipc_link_timeout(le->link, &xmitq);
  698. spin_unlock_bh(&le->lock);
  699. remains--;
  700. }
  701. tipc_node_read_unlock(n);
  702. tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
  703. if (rc & TIPC_LINK_DOWN_EVT)
  704. tipc_node_link_down(n, bearer_id, false);
  705. }
  706. mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
  707. }
  708. /**
  709. * __tipc_node_link_up - handle addition of link
  710. * Node lock must be held by caller
  711. * Link becomes active (alone or shared) or standby, depending on its priority.
  712. */
  713. static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
  714. struct sk_buff_head *xmitq)
  715. {
  716. int *slot0 = &n->active_links[0];
  717. int *slot1 = &n->active_links[1];
  718. struct tipc_link *ol = node_active_link(n, 0);
  719. struct tipc_link *nl = n->links[bearer_id].link;
  720. if (!nl || tipc_link_is_up(nl))
  721. return;
  722. tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
  723. if (!tipc_link_is_up(nl))
  724. return;
  725. n->working_links++;
  726. n->action_flags |= TIPC_NOTIFY_LINK_UP;
  727. n->link_id = tipc_link_id(nl);
  728. /* Leave room for tunnel header when returning 'mtu' to users: */
  729. n->links[bearer_id].mtu = tipc_link_mss(nl);
  730. tipc_bearer_add_dest(n->net, bearer_id, n->addr);
  731. tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
  732. pr_debug("Established link <%s> on network plane %c\n",
  733. tipc_link_name(nl), tipc_link_plane(nl));
  734. trace_tipc_node_link_up(n, true, " ");
  735. /* Ensure that a STATE message goes first */
  736. tipc_link_build_state_msg(nl, xmitq);
  737. /* First link? => give it both slots */
  738. if (!ol) {
  739. *slot0 = bearer_id;
  740. *slot1 = bearer_id;
  741. tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
  742. n->action_flags |= TIPC_NOTIFY_NODE_UP;
  743. tipc_link_set_active(nl, true);
  744. tipc_bcast_add_peer(n->net, nl, xmitq);
  745. return;
  746. }
  747. /* Second link => redistribute slots */
  748. if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
  749. pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
  750. *slot0 = bearer_id;
  751. *slot1 = bearer_id;
  752. tipc_link_set_active(nl, true);
  753. tipc_link_set_active(ol, false);
  754. } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
  755. tipc_link_set_active(nl, true);
  756. *slot1 = bearer_id;
  757. } else {
  758. pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
  759. }
  760. /* Prepare synchronization with first link */
  761. tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
  762. }
  763. /**
  764. * tipc_node_link_up - handle addition of link
  765. *
  766. * Link becomes active (alone or shared) or standby, depending on its priority.
  767. */
  768. static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
  769. struct sk_buff_head *xmitq)
  770. {
  771. struct tipc_media_addr *maddr;
  772. tipc_node_write_lock(n);
  773. __tipc_node_link_up(n, bearer_id, xmitq);
  774. maddr = &n->links[bearer_id].maddr;
  775. tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
  776. tipc_node_write_unlock(n);
  777. }
  778. /**
  779. * tipc_node_link_failover() - start failover in case "half-failover"
  780. *
  781. * This function is only called in a very special situation where link
  782. * failover can be already started on peer node but not on this node.
  783. * This can happen when e.g.
  784. * 1. Both links <1A-2A>, <1B-2B> down
  785. * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
  786. * disturbance, wrong session, etc.)
  787. * 3. Link <1B-2B> up
  788. * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
  789. * 5. Node 2 starts failover onto link <1B-2B>
  790. *
  791. * ==> Node 1 does never start link/node failover!
  792. *
  793. * @n: tipc node structure
  794. * @l: link peer endpoint failingover (- can be NULL)
  795. * @tnl: tunnel link
  796. * @xmitq: queue for messages to be xmited on tnl link later
  797. */
  798. static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
  799. struct tipc_link *tnl,
  800. struct sk_buff_head *xmitq)
  801. {
  802. /* Avoid to be "self-failover" that can never end */
  803. if (!tipc_link_is_up(tnl))
  804. return;
  805. /* Don't rush, failure link may be in the process of resetting */
  806. if (l && !tipc_link_is_reset(l))
  807. return;
  808. tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
  809. tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
  810. n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
  811. tipc_link_failover_prepare(l, tnl, xmitq);
  812. if (l)
  813. tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
  814. tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
  815. }
  816. /**
  817. * __tipc_node_link_down - handle loss of link
  818. */
  819. static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
  820. struct sk_buff_head *xmitq,
  821. struct tipc_media_addr **maddr)
  822. {
  823. struct tipc_link_entry *le = &n->links[*bearer_id];
  824. int *slot0 = &n->active_links[0];
  825. int *slot1 = &n->active_links[1];
  826. int i, highest = 0, prio;
  827. struct tipc_link *l, *_l, *tnl;
  828. l = n->links[*bearer_id].link;
  829. if (!l || tipc_link_is_reset(l))
  830. return;
  831. n->working_links--;
  832. n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
  833. n->link_id = tipc_link_id(l);
  834. tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
  835. pr_debug("Lost link <%s> on network plane %c\n",
  836. tipc_link_name(l), tipc_link_plane(l));
  837. /* Select new active link if any available */
  838. *slot0 = INVALID_BEARER_ID;
  839. *slot1 = INVALID_BEARER_ID;
  840. for (i = 0; i < MAX_BEARERS; i++) {
  841. _l = n->links[i].link;
  842. if (!_l || !tipc_link_is_up(_l))
  843. continue;
  844. if (_l == l)
  845. continue;
  846. prio = tipc_link_prio(_l);
  847. if (prio < highest)
  848. continue;
  849. if (prio > highest) {
  850. highest = prio;
  851. *slot0 = i;
  852. *slot1 = i;
  853. continue;
  854. }
  855. *slot1 = i;
  856. }
  857. if (!node_is_up(n)) {
  858. if (tipc_link_peer_is_down(l))
  859. tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
  860. tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
  861. trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
  862. tipc_link_fsm_evt(l, LINK_RESET_EVT);
  863. tipc_link_reset(l);
  864. tipc_link_build_reset_msg(l, xmitq);
  865. *maddr = &n->links[*bearer_id].maddr;
  866. node_lost_contact(n, &le->inputq);
  867. tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
  868. return;
  869. }
  870. tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
  871. /* There is still a working link => initiate failover */
  872. *bearer_id = n->active_links[0];
  873. tnl = n->links[*bearer_id].link;
  874. tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
  875. tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
  876. n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
  877. tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
  878. trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
  879. tipc_link_reset(l);
  880. tipc_link_fsm_evt(l, LINK_RESET_EVT);
  881. tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
  882. tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
  883. *maddr = &n->links[*bearer_id].maddr;
  884. }
  885. static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
  886. {
  887. struct tipc_link_entry *le = &n->links[bearer_id];
  888. struct tipc_media_addr *maddr = NULL;
  889. struct tipc_link *l = le->link;
  890. int old_bearer_id = bearer_id;
  891. struct sk_buff_head xmitq;
  892. if (!l)
  893. return;
  894. __skb_queue_head_init(&xmitq);
  895. tipc_node_write_lock(n);
  896. if (!tipc_link_is_establishing(l)) {
  897. __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
  898. } else {
  899. /* Defuse pending tipc_node_link_up() */
  900. tipc_link_reset(l);
  901. tipc_link_fsm_evt(l, LINK_RESET_EVT);
  902. }
  903. if (delete) {
  904. kfree(l);
  905. le->link = NULL;
  906. n->link_cnt--;
  907. }
  908. trace_tipc_node_link_down(n, true, "node link down or deleted!");
  909. tipc_node_write_unlock(n);
  910. if (delete)
  911. tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
  912. if (!skb_queue_empty(&xmitq))
  913. tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
  914. tipc_sk_rcv(n->net, &le->inputq);
  915. }
  916. static bool node_is_up(struct tipc_node *n)
  917. {
  918. return n->active_links[0] != INVALID_BEARER_ID;
  919. }
  920. bool tipc_node_is_up(struct net *net, u32 addr)
  921. {
  922. struct tipc_node *n;
  923. bool retval = false;
  924. if (in_own_node(net, addr))
  925. return true;
  926. n = tipc_node_find(net, addr);
  927. if (!n)
  928. return false;
  929. retval = node_is_up(n);
  930. tipc_node_put(n);
  931. return retval;
  932. }
  933. static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
  934. {
  935. struct tipc_node *n;
  936. addr ^= tipc_net(net)->random;
  937. while ((n = tipc_node_find(net, addr))) {
  938. tipc_node_put(n);
  939. addr++;
  940. }
  941. return addr;
  942. }
  943. /* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
  944. * Returns suggested address if any, otherwise 0
  945. */
  946. u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
  947. {
  948. struct tipc_net *tn = tipc_net(net);
  949. struct tipc_node *n;
  950. bool preliminary;
  951. u32 sugg_addr;
  952. /* Suggest new address if some other peer is using this one */
  953. n = tipc_node_find(net, addr);
  954. if (n) {
  955. if (!memcmp(n->peer_id, id, NODE_ID_LEN))
  956. addr = 0;
  957. tipc_node_put(n);
  958. if (!addr)
  959. return 0;
  960. return tipc_node_suggest_addr(net, addr);
  961. }
  962. /* Suggest previously used address if peer is known */
  963. n = tipc_node_find_by_id(net, id);
  964. if (n) {
  965. sugg_addr = n->addr;
  966. preliminary = n->preliminary;
  967. tipc_node_put(n);
  968. if (!preliminary)
  969. return sugg_addr;
  970. }
  971. /* Even this node may be in conflict */
  972. if (tn->trial_addr == addr)
  973. return tipc_node_suggest_addr(net, addr);
  974. return 0;
  975. }
  976. void tipc_node_check_dest(struct net *net, u32 addr,
  977. u8 *peer_id, struct tipc_bearer *b,
  978. u16 capabilities, u32 signature, u32 hash_mixes,
  979. struct tipc_media_addr *maddr,
  980. bool *respond, bool *dupl_addr)
  981. {
  982. struct tipc_node *n;
  983. struct tipc_link *l, *snd_l;
  984. struct tipc_link_entry *le;
  985. bool addr_match = false;
  986. bool sign_match = false;
  987. bool link_up = false;
  988. bool accept_addr = false;
  989. bool reset = true;
  990. char *if_name;
  991. unsigned long intv;
  992. u16 session;
  993. *dupl_addr = false;
  994. *respond = false;
  995. n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
  996. false);
  997. if (!n)
  998. return;
  999. tipc_node_write_lock(n);
  1000. if (unlikely(!n->bc_entry.link)) {
  1001. snd_l = tipc_bc_sndlink(net);
  1002. if (!tipc_link_bc_create(net, tipc_own_addr(net),
  1003. addr, U16_MAX,
  1004. tipc_link_min_win(snd_l),
  1005. tipc_link_max_win(snd_l),
  1006. n->capabilities,
  1007. &n->bc_entry.inputq1,
  1008. &n->bc_entry.namedq, snd_l,
  1009. &n->bc_entry.link)) {
  1010. pr_warn("Broadcast rcv link creation failed, no mem\n");
  1011. tipc_node_write_unlock_fast(n);
  1012. tipc_node_put(n);
  1013. return;
  1014. }
  1015. }
  1016. le = &n->links[b->identity];
  1017. /* Prepare to validate requesting node's signature and media address */
  1018. l = le->link;
  1019. link_up = l && tipc_link_is_up(l);
  1020. addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
  1021. sign_match = (signature == n->signature);
  1022. /* These three flags give us eight permutations: */
  1023. if (sign_match && addr_match && link_up) {
  1024. /* All is fine. Do nothing. */
  1025. reset = false;
  1026. /* Peer node is not a container/local namespace */
  1027. if (!n->peer_hash_mix)
  1028. n->peer_hash_mix = hash_mixes;
  1029. } else if (sign_match && addr_match && !link_up) {
  1030. /* Respond. The link will come up in due time */
  1031. *respond = true;
  1032. } else if (sign_match && !addr_match && link_up) {
  1033. /* Peer has changed i/f address without rebooting.
  1034. * If so, the link will reset soon, and the next
  1035. * discovery will be accepted. So we can ignore it.
  1036. * It may also be an cloned or malicious peer having
  1037. * chosen the same node address and signature as an
  1038. * existing one.
  1039. * Ignore requests until the link goes down, if ever.
  1040. */
  1041. *dupl_addr = true;
  1042. } else if (sign_match && !addr_match && !link_up) {
  1043. /* Peer link has changed i/f address without rebooting.
  1044. * It may also be a cloned or malicious peer; we can't
  1045. * distinguish between the two.
  1046. * The signature is correct, so we must accept.
  1047. */
  1048. accept_addr = true;
  1049. *respond = true;
  1050. } else if (!sign_match && addr_match && link_up) {
  1051. /* Peer node rebooted. Two possibilities:
  1052. * - Delayed re-discovery; this link endpoint has already
  1053. * reset and re-established contact with the peer, before
  1054. * receiving a discovery message from that node.
  1055. * (The peer happened to receive one from this node first).
  1056. * - The peer came back so fast that our side has not
  1057. * discovered it yet. Probing from this side will soon
  1058. * reset the link, since there can be no working link
  1059. * endpoint at the peer end, and the link will re-establish.
  1060. * Accept the signature, since it comes from a known peer.
  1061. */
  1062. n->signature = signature;
  1063. } else if (!sign_match && addr_match && !link_up) {
  1064. /* The peer node has rebooted.
  1065. * Accept signature, since it is a known peer.
  1066. */
  1067. n->signature = signature;
  1068. *respond = true;
  1069. } else if (!sign_match && !addr_match && link_up) {
  1070. /* Peer rebooted with new address, or a new/duplicate peer.
  1071. * Ignore until the link goes down, if ever.
  1072. */
  1073. *dupl_addr = true;
  1074. } else if (!sign_match && !addr_match && !link_up) {
  1075. /* Peer rebooted with new address, or it is a new peer.
  1076. * Accept signature and address.
  1077. */
  1078. n->signature = signature;
  1079. accept_addr = true;
  1080. *respond = true;
  1081. }
  1082. if (!accept_addr)
  1083. goto exit;
  1084. /* Now create new link if not already existing */
  1085. if (!l) {
  1086. if (n->link_cnt == 2)
  1087. goto exit;
  1088. if_name = strchr(b->name, ':') + 1;
  1089. get_random_bytes(&session, sizeof(u16));
  1090. if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
  1091. b->net_plane, b->mtu, b->priority,
  1092. b->min_win, b->max_win, session,
  1093. tipc_own_addr(net), addr, peer_id,
  1094. n->capabilities,
  1095. tipc_bc_sndlink(n->net), n->bc_entry.link,
  1096. &le->inputq,
  1097. &n->bc_entry.namedq, &l)) {
  1098. *respond = false;
  1099. goto exit;
  1100. }
  1101. trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
  1102. tipc_link_reset(l);
  1103. tipc_link_fsm_evt(l, LINK_RESET_EVT);
  1104. if (n->state == NODE_FAILINGOVER)
  1105. tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
  1106. le->link = l;
  1107. n->link_cnt++;
  1108. tipc_node_calculate_timer(n, l);
  1109. if (n->link_cnt == 1) {
  1110. intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
  1111. if (!mod_timer(&n->timer, intv))
  1112. tipc_node_get(n);
  1113. }
  1114. }
  1115. memcpy(&le->maddr, maddr, sizeof(*maddr));
  1116. exit:
  1117. tipc_node_write_unlock(n);
  1118. if (reset && l && !tipc_link_is_reset(l))
  1119. tipc_node_link_down(n, b->identity, false);
  1120. tipc_node_put(n);
  1121. }
  1122. void tipc_node_delete_links(struct net *net, int bearer_id)
  1123. {
  1124. struct tipc_net *tn = net_generic(net, tipc_net_id);
  1125. struct tipc_node *n;
  1126. rcu_read_lock();
  1127. list_for_each_entry_rcu(n, &tn->node_list, list) {
  1128. tipc_node_link_down(n, bearer_id, true);
  1129. }
  1130. rcu_read_unlock();
  1131. }
  1132. static void tipc_node_reset_links(struct tipc_node *n)
  1133. {
  1134. int i;
  1135. pr_warn("Resetting all links to %x\n", n->addr);
  1136. trace_tipc_node_reset_links(n, true, " ");
  1137. for (i = 0; i < MAX_BEARERS; i++) {
  1138. tipc_node_link_down(n, i, false);
  1139. }
  1140. }
  1141. /* tipc_node_fsm_evt - node finite state machine
  1142. * Determines when contact is allowed with peer node
  1143. */
  1144. static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
  1145. {
  1146. int state = n->state;
  1147. switch (state) {
  1148. case SELF_DOWN_PEER_DOWN:
  1149. switch (evt) {
  1150. case SELF_ESTABL_CONTACT_EVT:
  1151. state = SELF_UP_PEER_COMING;
  1152. break;
  1153. case PEER_ESTABL_CONTACT_EVT:
  1154. state = SELF_COMING_PEER_UP;
  1155. break;
  1156. case SELF_LOST_CONTACT_EVT:
  1157. case PEER_LOST_CONTACT_EVT:
  1158. break;
  1159. case NODE_SYNCH_END_EVT:
  1160. case NODE_SYNCH_BEGIN_EVT:
  1161. case NODE_FAILOVER_BEGIN_EVT:
  1162. case NODE_FAILOVER_END_EVT:
  1163. default:
  1164. goto illegal_evt;
  1165. }
  1166. break;
  1167. case SELF_UP_PEER_UP:
  1168. switch (evt) {
  1169. case SELF_LOST_CONTACT_EVT:
  1170. state = SELF_DOWN_PEER_LEAVING;
  1171. break;
  1172. case PEER_LOST_CONTACT_EVT:
  1173. state = SELF_LEAVING_PEER_DOWN;
  1174. break;
  1175. case NODE_SYNCH_BEGIN_EVT:
  1176. state = NODE_SYNCHING;
  1177. break;
  1178. case NODE_FAILOVER_BEGIN_EVT:
  1179. state = NODE_FAILINGOVER;
  1180. break;
  1181. case SELF_ESTABL_CONTACT_EVT:
  1182. case PEER_ESTABL_CONTACT_EVT:
  1183. case NODE_SYNCH_END_EVT:
  1184. case NODE_FAILOVER_END_EVT:
  1185. break;
  1186. default:
  1187. goto illegal_evt;
  1188. }
  1189. break;
  1190. case SELF_DOWN_PEER_LEAVING:
  1191. switch (evt) {
  1192. case PEER_LOST_CONTACT_EVT:
  1193. state = SELF_DOWN_PEER_DOWN;
  1194. break;
  1195. case SELF_ESTABL_CONTACT_EVT:
  1196. case PEER_ESTABL_CONTACT_EVT:
  1197. case SELF_LOST_CONTACT_EVT:
  1198. break;
  1199. case NODE_SYNCH_END_EVT:
  1200. case NODE_SYNCH_BEGIN_EVT:
  1201. case NODE_FAILOVER_BEGIN_EVT:
  1202. case NODE_FAILOVER_END_EVT:
  1203. default:
  1204. goto illegal_evt;
  1205. }
  1206. break;
  1207. case SELF_UP_PEER_COMING:
  1208. switch (evt) {
  1209. case PEER_ESTABL_CONTACT_EVT:
  1210. state = SELF_UP_PEER_UP;
  1211. break;
  1212. case SELF_LOST_CONTACT_EVT:
  1213. state = SELF_DOWN_PEER_DOWN;
  1214. break;
  1215. case SELF_ESTABL_CONTACT_EVT:
  1216. case PEER_LOST_CONTACT_EVT:
  1217. case NODE_SYNCH_END_EVT:
  1218. case NODE_FAILOVER_BEGIN_EVT:
  1219. break;
  1220. case NODE_SYNCH_BEGIN_EVT:
  1221. case NODE_FAILOVER_END_EVT:
  1222. default:
  1223. goto illegal_evt;
  1224. }
  1225. break;
  1226. case SELF_COMING_PEER_UP:
  1227. switch (evt) {
  1228. case SELF_ESTABL_CONTACT_EVT:
  1229. state = SELF_UP_PEER_UP;
  1230. break;
  1231. case PEER_LOST_CONTACT_EVT:
  1232. state = SELF_DOWN_PEER_DOWN;
  1233. break;
  1234. case SELF_LOST_CONTACT_EVT:
  1235. case PEER_ESTABL_CONTACT_EVT:
  1236. break;
  1237. case NODE_SYNCH_END_EVT:
  1238. case NODE_SYNCH_BEGIN_EVT:
  1239. case NODE_FAILOVER_BEGIN_EVT:
  1240. case NODE_FAILOVER_END_EVT:
  1241. default:
  1242. goto illegal_evt;
  1243. }
  1244. break;
  1245. case SELF_LEAVING_PEER_DOWN:
  1246. switch (evt) {
  1247. case SELF_LOST_CONTACT_EVT:
  1248. state = SELF_DOWN_PEER_DOWN;
  1249. break;
  1250. case SELF_ESTABL_CONTACT_EVT:
  1251. case PEER_ESTABL_CONTACT_EVT:
  1252. case PEER_LOST_CONTACT_EVT:
  1253. break;
  1254. case NODE_SYNCH_END_EVT:
  1255. case NODE_SYNCH_BEGIN_EVT:
  1256. case NODE_FAILOVER_BEGIN_EVT:
  1257. case NODE_FAILOVER_END_EVT:
  1258. default:
  1259. goto illegal_evt;
  1260. }
  1261. break;
  1262. case NODE_FAILINGOVER:
  1263. switch (evt) {
  1264. case SELF_LOST_CONTACT_EVT:
  1265. state = SELF_DOWN_PEER_LEAVING;
  1266. break;
  1267. case PEER_LOST_CONTACT_EVT:
  1268. state = SELF_LEAVING_PEER_DOWN;
  1269. break;
  1270. case NODE_FAILOVER_END_EVT:
  1271. state = SELF_UP_PEER_UP;
  1272. break;
  1273. case NODE_FAILOVER_BEGIN_EVT:
  1274. case SELF_ESTABL_CONTACT_EVT:
  1275. case PEER_ESTABL_CONTACT_EVT:
  1276. break;
  1277. case NODE_SYNCH_BEGIN_EVT:
  1278. case NODE_SYNCH_END_EVT:
  1279. default:
  1280. goto illegal_evt;
  1281. }
  1282. break;
  1283. case NODE_SYNCHING:
  1284. switch (evt) {
  1285. case SELF_LOST_CONTACT_EVT:
  1286. state = SELF_DOWN_PEER_LEAVING;
  1287. break;
  1288. case PEER_LOST_CONTACT_EVT:
  1289. state = SELF_LEAVING_PEER_DOWN;
  1290. break;
  1291. case NODE_SYNCH_END_EVT:
  1292. state = SELF_UP_PEER_UP;
  1293. break;
  1294. case NODE_FAILOVER_BEGIN_EVT:
  1295. state = NODE_FAILINGOVER;
  1296. break;
  1297. case NODE_SYNCH_BEGIN_EVT:
  1298. case SELF_ESTABL_CONTACT_EVT:
  1299. case PEER_ESTABL_CONTACT_EVT:
  1300. break;
  1301. case NODE_FAILOVER_END_EVT:
  1302. default:
  1303. goto illegal_evt;
  1304. }
  1305. break;
  1306. default:
  1307. pr_err("Unknown node fsm state %x\n", state);
  1308. break;
  1309. }
  1310. trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
  1311. n->state = state;
  1312. return;
  1313. illegal_evt:
  1314. pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
  1315. trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
  1316. }
  1317. static void node_lost_contact(struct tipc_node *n,
  1318. struct sk_buff_head *inputq)
  1319. {
  1320. struct tipc_sock_conn *conn, *safe;
  1321. struct tipc_link *l;
  1322. struct list_head *conns = &n->conn_sks;
  1323. struct sk_buff *skb;
  1324. uint i;
  1325. pr_debug("Lost contact with %x\n", n->addr);
  1326. n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
  1327. trace_tipc_node_lost_contact(n, true, " ");
  1328. /* Clean up broadcast state */
  1329. tipc_bcast_remove_peer(n->net, n->bc_entry.link);
  1330. /* Abort any ongoing link failover */
  1331. for (i = 0; i < MAX_BEARERS; i++) {
  1332. l = n->links[i].link;
  1333. if (l)
  1334. tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
  1335. }
  1336. /* Notify publications from this node */
  1337. n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
  1338. n->peer_net = NULL;
  1339. n->peer_hash_mix = 0;
  1340. /* Notify sockets connected to node */
  1341. list_for_each_entry_safe(conn, safe, conns, list) {
  1342. skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
  1343. SHORT_H_SIZE, 0, tipc_own_addr(n->net),
  1344. conn->peer_node, conn->port,
  1345. conn->peer_port, TIPC_ERR_NO_NODE);
  1346. if (likely(skb))
  1347. skb_queue_tail(inputq, skb);
  1348. list_del(&conn->list);
  1349. kfree(conn);
  1350. }
  1351. }
  1352. /**
  1353. * tipc_node_get_linkname - get the name of a link
  1354. *
  1355. * @bearer_id: id of the bearer
  1356. * @node: peer node address
  1357. * @linkname: link name output buffer
  1358. *
  1359. * Returns 0 on success
  1360. */
  1361. int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
  1362. char *linkname, size_t len)
  1363. {
  1364. struct tipc_link *link;
  1365. int err = -EINVAL;
  1366. struct tipc_node *node = tipc_node_find(net, addr);
  1367. if (!node)
  1368. return err;
  1369. if (bearer_id >= MAX_BEARERS)
  1370. goto exit;
  1371. tipc_node_read_lock(node);
  1372. link = node->links[bearer_id].link;
  1373. if (link) {
  1374. strncpy(linkname, tipc_link_name(link), len);
  1375. err = 0;
  1376. }
  1377. tipc_node_read_unlock(node);
  1378. exit:
  1379. tipc_node_put(node);
  1380. return err;
  1381. }
  1382. /* Caller should hold node lock for the passed node */
  1383. static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
  1384. {
  1385. void *hdr;
  1386. struct nlattr *attrs;
  1387. hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
  1388. NLM_F_MULTI, TIPC_NL_NODE_GET);
  1389. if (!hdr)
  1390. return -EMSGSIZE;
  1391. attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
  1392. if (!attrs)
  1393. goto msg_full;
  1394. if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
  1395. goto attr_msg_full;
  1396. if (node_is_up(node))
  1397. if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
  1398. goto attr_msg_full;
  1399. nla_nest_end(msg->skb, attrs);
  1400. genlmsg_end(msg->skb, hdr);
  1401. return 0;
  1402. attr_msg_full:
  1403. nla_nest_cancel(msg->skb, attrs);
  1404. msg_full:
  1405. genlmsg_cancel(msg->skb, hdr);
  1406. return -EMSGSIZE;
  1407. }
  1408. static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
  1409. {
  1410. struct tipc_msg *hdr = buf_msg(skb_peek(list));
  1411. struct sk_buff_head inputq;
  1412. switch (msg_user(hdr)) {
  1413. case TIPC_LOW_IMPORTANCE:
  1414. case TIPC_MEDIUM_IMPORTANCE:
  1415. case TIPC_HIGH_IMPORTANCE:
  1416. case TIPC_CRITICAL_IMPORTANCE:
  1417. if (msg_connected(hdr) || msg_named(hdr) ||
  1418. msg_direct(hdr)) {
  1419. tipc_loopback_trace(peer_net, list);
  1420. spin_lock_init(&list->lock);
  1421. tipc_sk_rcv(peer_net, list);
  1422. return;
  1423. }
  1424. if (msg_mcast(hdr)) {
  1425. tipc_loopback_trace(peer_net, list);
  1426. skb_queue_head_init(&inputq);
  1427. tipc_sk_mcast_rcv(peer_net, list, &inputq);
  1428. __skb_queue_purge(list);
  1429. skb_queue_purge(&inputq);
  1430. return;
  1431. }
  1432. return;
  1433. case MSG_FRAGMENTER:
  1434. if (tipc_msg_assemble(list)) {
  1435. tipc_loopback_trace(peer_net, list);
  1436. skb_queue_head_init(&inputq);
  1437. tipc_sk_mcast_rcv(peer_net, list, &inputq);
  1438. __skb_queue_purge(list);
  1439. skb_queue_purge(&inputq);
  1440. }
  1441. return;
  1442. case GROUP_PROTOCOL:
  1443. case CONN_MANAGER:
  1444. tipc_loopback_trace(peer_net, list);
  1445. spin_lock_init(&list->lock);
  1446. tipc_sk_rcv(peer_net, list);
  1447. return;
  1448. case LINK_PROTOCOL:
  1449. case NAME_DISTRIBUTOR:
  1450. case TUNNEL_PROTOCOL:
  1451. case BCAST_PROTOCOL:
  1452. return;
  1453. default:
  1454. return;
  1455. };
  1456. }
  1457. /**
  1458. * tipc_node_xmit() is the general link level function for message sending
  1459. * @net: the applicable net namespace
  1460. * @list: chain of buffers containing message
  1461. * @dnode: address of destination node
  1462. * @selector: a number used for deterministic link selection
  1463. * Consumes the buffer chain.
  1464. * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
  1465. */
  1466. int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
  1467. u32 dnode, int selector)
  1468. {
  1469. struct tipc_link_entry *le = NULL;
  1470. struct tipc_node *n;
  1471. struct sk_buff_head xmitq;
  1472. bool node_up = false;
  1473. int bearer_id;
  1474. int rc;
  1475. if (in_own_node(net, dnode)) {
  1476. tipc_loopback_trace(net, list);
  1477. spin_lock_init(&list->lock);
  1478. tipc_sk_rcv(net, list);
  1479. return 0;
  1480. }
  1481. n = tipc_node_find(net, dnode);
  1482. if (unlikely(!n)) {
  1483. __skb_queue_purge(list);
  1484. return -EHOSTUNREACH;
  1485. }
  1486. tipc_node_read_lock(n);
  1487. node_up = node_is_up(n);
  1488. if (node_up && n->peer_net && check_net(n->peer_net)) {
  1489. /* xmit inner linux container */
  1490. tipc_lxc_xmit(n->peer_net, list);
  1491. if (likely(skb_queue_empty(list))) {
  1492. tipc_node_read_unlock(n);
  1493. tipc_node_put(n);
  1494. return 0;
  1495. }
  1496. }
  1497. bearer_id = n->active_links[selector & 1];
  1498. if (unlikely(bearer_id == INVALID_BEARER_ID)) {
  1499. tipc_node_read_unlock(n);
  1500. tipc_node_put(n);
  1501. __skb_queue_purge(list);
  1502. return -EHOSTUNREACH;
  1503. }
  1504. __skb_queue_head_init(&xmitq);
  1505. le = &n->links[bearer_id];
  1506. spin_lock_bh(&le->lock);
  1507. rc = tipc_link_xmit(le->link, list, &xmitq);
  1508. spin_unlock_bh(&le->lock);
  1509. tipc_node_read_unlock(n);
  1510. if (unlikely(rc == -ENOBUFS))
  1511. tipc_node_link_down(n, bearer_id, false);
  1512. else
  1513. tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
  1514. tipc_node_put(n);
  1515. return rc;
  1516. }
  1517. /* tipc_node_xmit_skb(): send single buffer to destination
  1518. * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
  1519. * messages, which will not be rejected
  1520. * The only exception is datagram messages rerouted after secondary
  1521. * lookup, which are rare and safe to dispose of anyway.
  1522. */
  1523. int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
  1524. u32 selector)
  1525. {
  1526. struct sk_buff_head head;
  1527. __skb_queue_head_init(&head);
  1528. __skb_queue_tail(&head, skb);
  1529. tipc_node_xmit(net, &head, dnode, selector);
  1530. return 0;
  1531. }
  1532. /* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
  1533. * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
  1534. */
  1535. int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
  1536. {
  1537. struct sk_buff *skb;
  1538. u32 selector, dnode;
  1539. while ((skb = __skb_dequeue(xmitq))) {
  1540. selector = msg_origport(buf_msg(skb));
  1541. dnode = msg_destnode(buf_msg(skb));
  1542. tipc_node_xmit_skb(net, skb, dnode, selector);
  1543. }
  1544. return 0;
  1545. }
  1546. void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
  1547. {
  1548. struct sk_buff *txskb;
  1549. struct tipc_node *n;
  1550. u32 dst;
  1551. rcu_read_lock();
  1552. list_for_each_entry_rcu(n, tipc_nodes(net), list) {
  1553. dst = n->addr;
  1554. if (in_own_node(net, dst))
  1555. continue;
  1556. if (!node_is_up(n))
  1557. continue;
  1558. txskb = pskb_copy(skb, GFP_ATOMIC);
  1559. if (!txskb)
  1560. break;
  1561. msg_set_destnode(buf_msg(txskb), dst);
  1562. tipc_node_xmit_skb(net, txskb, dst, 0);
  1563. }
  1564. rcu_read_unlock();
  1565. kfree_skb(skb);
  1566. }
  1567. static void tipc_node_mcast_rcv(struct tipc_node *n)
  1568. {
  1569. struct tipc_bclink_entry *be = &n->bc_entry;
  1570. /* 'arrvq' is under inputq2's lock protection */
  1571. spin_lock_bh(&be->inputq2.lock);
  1572. spin_lock_bh(&be->inputq1.lock);
  1573. skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
  1574. spin_unlock_bh(&be->inputq1.lock);
  1575. spin_unlock_bh(&be->inputq2.lock);
  1576. tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
  1577. }
  1578. static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
  1579. int bearer_id, struct sk_buff_head *xmitq)
  1580. {
  1581. struct tipc_link *ucl;
  1582. int rc;
  1583. rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr);
  1584. if (rc & TIPC_LINK_DOWN_EVT) {
  1585. tipc_node_reset_links(n);
  1586. return;
  1587. }
  1588. if (!(rc & TIPC_LINK_SND_STATE))
  1589. return;
  1590. /* If probe message, a STATE response will be sent anyway */
  1591. if (msg_probe(hdr))
  1592. return;
  1593. /* Produce a STATE message carrying broadcast NACK */
  1594. tipc_node_read_lock(n);
  1595. ucl = n->links[bearer_id].link;
  1596. if (ucl)
  1597. tipc_link_build_state_msg(ucl, xmitq);
  1598. tipc_node_read_unlock(n);
  1599. }
  1600. /**
  1601. * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
  1602. * @net: the applicable net namespace
  1603. * @skb: TIPC packet
  1604. * @bearer_id: id of bearer message arrived on
  1605. *
  1606. * Invoked with no locks held.
  1607. */
  1608. static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
  1609. {
  1610. int rc;
  1611. struct sk_buff_head xmitq;
  1612. struct tipc_bclink_entry *be;
  1613. struct tipc_link_entry *le;
  1614. struct tipc_msg *hdr = buf_msg(skb);
  1615. int usr = msg_user(hdr);
  1616. u32 dnode = msg_destnode(hdr);
  1617. struct tipc_node *n;
  1618. __skb_queue_head_init(&xmitq);
  1619. /* If NACK for other node, let rcv link for that node peek into it */
  1620. if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
  1621. n = tipc_node_find(net, dnode);
  1622. else
  1623. n = tipc_node_find(net, msg_prevnode(hdr));
  1624. if (!n) {
  1625. kfree_skb(skb);
  1626. return;
  1627. }
  1628. be = &n->bc_entry;
  1629. le = &n->links[bearer_id];
  1630. rc = tipc_bcast_rcv(net, be->link, skb);
  1631. /* Broadcast ACKs are sent on a unicast link */
  1632. if (rc & TIPC_LINK_SND_STATE) {
  1633. tipc_node_read_lock(n);
  1634. tipc_link_build_state_msg(le->link, &xmitq);
  1635. tipc_node_read_unlock(n);
  1636. }
  1637. if (!skb_queue_empty(&xmitq))
  1638. tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
  1639. if (!skb_queue_empty(&be->inputq1))
  1640. tipc_node_mcast_rcv(n);
  1641. /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
  1642. if (!skb_queue_empty(&n->bc_entry.namedq))
  1643. tipc_named_rcv(net, &n->bc_entry.namedq);
  1644. /* If reassembly or retransmission failure => reset all links to peer */
  1645. if (rc & TIPC_LINK_DOWN_EVT)
  1646. tipc_node_reset_links(n);
  1647. tipc_node_put(n);
  1648. }
  1649. /**
  1650. * tipc_node_check_state - check and if necessary update node state
  1651. * @skb: TIPC packet
  1652. * @bearer_id: identity of bearer delivering the packet
  1653. * Returns true if state and msg are ok, otherwise false
  1654. */
  1655. static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
  1656. int bearer_id, struct sk_buff_head *xmitq)
  1657. {
  1658. struct tipc_msg *hdr = buf_msg(skb);
  1659. int usr = msg_user(hdr);
  1660. int mtyp = msg_type(hdr);
  1661. u16 oseqno = msg_seqno(hdr);
  1662. u16 exp_pkts = msg_msgcnt(hdr);
  1663. u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
  1664. int state = n->state;
  1665. struct tipc_link *l, *tnl, *pl = NULL;
  1666. struct tipc_media_addr *maddr;
  1667. int pb_id;
  1668. if (trace_tipc_node_check_state_enabled()) {
  1669. trace_tipc_skb_dump(skb, false, "skb for node state check");
  1670. trace_tipc_node_check_state(n, true, " ");
  1671. }
  1672. l = n->links[bearer_id].link;
  1673. if (!l)
  1674. return false;
  1675. rcv_nxt = tipc_link_rcv_nxt(l);
  1676. if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
  1677. return true;
  1678. /* Find parallel link, if any */
  1679. for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
  1680. if ((pb_id != bearer_id) && n->links[pb_id].link) {
  1681. pl = n->links[pb_id].link;
  1682. break;
  1683. }
  1684. }
  1685. if (!tipc_link_validate_msg(l, hdr)) {
  1686. trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
  1687. trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
  1688. return false;
  1689. }
  1690. /* Check and update node accesibility if applicable */
  1691. if (state == SELF_UP_PEER_COMING) {
  1692. if (!tipc_link_is_up(l))
  1693. return true;
  1694. if (!msg_peer_link_is_up(hdr))
  1695. return true;
  1696. tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
  1697. }
  1698. if (state == SELF_DOWN_PEER_LEAVING) {
  1699. if (msg_peer_node_is_up(hdr))
  1700. return false;
  1701. tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
  1702. return true;
  1703. }
  1704. if (state == SELF_LEAVING_PEER_DOWN)
  1705. return false;
  1706. /* Ignore duplicate packets */
  1707. if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
  1708. return true;
  1709. /* Initiate or update failover mode if applicable */
  1710. if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
  1711. syncpt = oseqno + exp_pkts - 1;
  1712. if (pl && !tipc_link_is_reset(pl)) {
  1713. __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
  1714. trace_tipc_node_link_down(n, true,
  1715. "node link down <- failover!");
  1716. tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
  1717. tipc_link_inputq(l));
  1718. }
  1719. /* If parallel link was already down, and this happened before
  1720. * the tunnel link came up, node failover was never started.
  1721. * Ensure that a FAILOVER_MSG is sent to get peer out of
  1722. * NODE_FAILINGOVER state, also this node must accept
  1723. * TUNNEL_MSGs from peer.
  1724. */
  1725. if (n->state != NODE_FAILINGOVER)
  1726. tipc_node_link_failover(n, pl, l, xmitq);
  1727. /* If pkts arrive out of order, use lowest calculated syncpt */
  1728. if (less(syncpt, n->sync_point))
  1729. n->sync_point = syncpt;
  1730. }
  1731. /* Open parallel link when tunnel link reaches synch point */
  1732. if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
  1733. if (!more(rcv_nxt, n->sync_point))
  1734. return true;
  1735. tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
  1736. if (pl)
  1737. tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
  1738. return true;
  1739. }
  1740. /* No synching needed if only one link */
  1741. if (!pl || !tipc_link_is_up(pl))
  1742. return true;
  1743. /* Initiate synch mode if applicable */
  1744. if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
  1745. if (n->capabilities & TIPC_TUNNEL_ENHANCED)
  1746. syncpt = msg_syncpt(hdr);
  1747. else
  1748. syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
  1749. if (!tipc_link_is_up(l))
  1750. __tipc_node_link_up(n, bearer_id, xmitq);
  1751. if (n->state == SELF_UP_PEER_UP) {
  1752. n->sync_point = syncpt;
  1753. tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
  1754. tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
  1755. }
  1756. }
  1757. /* Open tunnel link when parallel link reaches synch point */
  1758. if (n->state == NODE_SYNCHING) {
  1759. if (tipc_link_is_synching(l)) {
  1760. tnl = l;
  1761. } else {
  1762. tnl = pl;
  1763. pl = l;
  1764. }
  1765. inputq_len = skb_queue_len(tipc_link_inputq(pl));
  1766. dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
  1767. if (more(dlv_nxt, n->sync_point)) {
  1768. tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
  1769. tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
  1770. return true;
  1771. }
  1772. if (l == pl)
  1773. return true;
  1774. if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
  1775. return true;
  1776. if (usr == LINK_PROTOCOL)
  1777. return true;
  1778. return false;
  1779. }
  1780. return true;
  1781. }
  1782. /**
  1783. * tipc_rcv - process TIPC packets/messages arriving from off-node
  1784. * @net: the applicable net namespace
  1785. * @skb: TIPC packet
  1786. * @bearer: pointer to bearer message arrived on
  1787. *
  1788. * Invoked with no locks held. Bearer pointer must point to a valid bearer
  1789. * structure (i.e. cannot be NULL), but bearer can be inactive.
  1790. */
  1791. void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
  1792. {
  1793. struct sk_buff_head xmitq;
  1794. struct tipc_link_entry *le;
  1795. struct tipc_msg *hdr;
  1796. struct tipc_node *n;
  1797. int bearer_id = b->identity;
  1798. u32 self = tipc_own_addr(net);
  1799. int usr, rc = 0;
  1800. u16 bc_ack;
  1801. #ifdef CONFIG_TIPC_CRYPTO
  1802. struct tipc_ehdr *ehdr;
  1803. /* Check if message must be decrypted first */
  1804. if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
  1805. goto rcv;
  1806. ehdr = (struct tipc_ehdr *)skb->data;
  1807. if (likely(ehdr->user != LINK_CONFIG)) {
  1808. n = tipc_node_find(net, ntohl(ehdr->addr));
  1809. if (unlikely(!n))
  1810. goto discard;
  1811. } else {
  1812. n = tipc_node_find_by_id(net, ehdr->id);
  1813. }
  1814. tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
  1815. tipc_node_put(n);
  1816. if (!skb)
  1817. return;
  1818. rcv:
  1819. #endif
  1820. /* Ensure message is well-formed before touching the header */
  1821. if (unlikely(!tipc_msg_validate(&skb)))
  1822. goto discard;
  1823. __skb_queue_head_init(&xmitq);
  1824. hdr = buf_msg(skb);
  1825. usr = msg_user(hdr);
  1826. bc_ack = msg_bcast_ack(hdr);
  1827. /* Handle arrival of discovery or broadcast packet */
  1828. if (unlikely(msg_non_seq(hdr))) {
  1829. if (unlikely(usr == LINK_CONFIG))
  1830. return tipc_disc_rcv(net, skb, b);
  1831. else
  1832. return tipc_node_bc_rcv(net, skb, bearer_id);
  1833. }
  1834. /* Discard unicast link messages destined for another node */
  1835. if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
  1836. goto discard;
  1837. /* Locate neighboring node that sent packet */
  1838. n = tipc_node_find(net, msg_prevnode(hdr));
  1839. if (unlikely(!n))
  1840. goto discard;
  1841. le = &n->links[bearer_id];
  1842. /* Ensure broadcast reception is in synch with peer's send state */
  1843. if (unlikely(usr == LINK_PROTOCOL))
  1844. tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
  1845. else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
  1846. tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
  1847. /* Receive packet directly if conditions permit */
  1848. tipc_node_read_lock(n);
  1849. if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
  1850. spin_lock_bh(&le->lock);
  1851. if (le->link) {
  1852. rc = tipc_link_rcv(le->link, skb, &xmitq);
  1853. skb = NULL;
  1854. }
  1855. spin_unlock_bh(&le->lock);
  1856. }
  1857. tipc_node_read_unlock(n);
  1858. /* Check/update node state before receiving */
  1859. if (unlikely(skb)) {
  1860. if (unlikely(skb_linearize(skb)))
  1861. goto out_node_put;
  1862. tipc_node_write_lock(n);
  1863. if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
  1864. if (le->link) {
  1865. rc = tipc_link_rcv(le->link, skb, &xmitq);
  1866. skb = NULL;
  1867. }
  1868. }
  1869. tipc_node_write_unlock(n);
  1870. }
  1871. if (unlikely(rc & TIPC_LINK_UP_EVT))
  1872. tipc_node_link_up(n, bearer_id, &xmitq);
  1873. if (unlikely(rc & TIPC_LINK_DOWN_EVT))
  1874. tipc_node_link_down(n, bearer_id, false);
  1875. if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
  1876. tipc_named_rcv(net, &n->bc_entry.namedq);
  1877. if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
  1878. tipc_node_mcast_rcv(n);
  1879. if (!skb_queue_empty(&le->inputq))
  1880. tipc_sk_rcv(net, &le->inputq);
  1881. if (!skb_queue_empty(&xmitq))
  1882. tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
  1883. out_node_put:
  1884. tipc_node_put(n);
  1885. discard:
  1886. kfree_skb(skb);
  1887. }
  1888. void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
  1889. int prop)
  1890. {
  1891. struct tipc_net *tn = tipc_net(net);
  1892. int bearer_id = b->identity;
  1893. struct sk_buff_head xmitq;
  1894. struct tipc_link_entry *e;
  1895. struct tipc_node *n;
  1896. __skb_queue_head_init(&xmitq);
  1897. rcu_read_lock();
  1898. list_for_each_entry_rcu(n, &tn->node_list, list) {
  1899. tipc_node_write_lock(n);
  1900. e = &n->links[bearer_id];
  1901. if (e->link) {
  1902. if (prop == TIPC_NLA_PROP_TOL)
  1903. tipc_link_set_tolerance(e->link, b->tolerance,
  1904. &xmitq);
  1905. else if (prop == TIPC_NLA_PROP_MTU)
  1906. tipc_link_set_mtu(e->link, b->mtu);
  1907. }
  1908. tipc_node_write_unlock(n);
  1909. tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
  1910. }
  1911. rcu_read_unlock();
  1912. }
  1913. int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
  1914. {
  1915. struct net *net = sock_net(skb->sk);
  1916. struct tipc_net *tn = net_generic(net, tipc_net_id);
  1917. struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
  1918. struct tipc_node *peer, *temp_node;
  1919. u32 addr;
  1920. int err;
  1921. /* We identify the peer by its net */
  1922. if (!info->attrs[TIPC_NLA_NET])
  1923. return -EINVAL;
  1924. err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
  1925. info->attrs[TIPC_NLA_NET],
  1926. tipc_nl_net_policy, info->extack);
  1927. if (err)
  1928. return err;
  1929. if (!attrs[TIPC_NLA_NET_ADDR])
  1930. return -EINVAL;
  1931. addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
  1932. if (in_own_node(net, addr))
  1933. return -ENOTSUPP;
  1934. spin_lock_bh(&tn->node_list_lock);
  1935. peer = tipc_node_find(net, addr);
  1936. if (!peer) {
  1937. spin_unlock_bh(&tn->node_list_lock);
  1938. return -ENXIO;
  1939. }
  1940. tipc_node_write_lock(peer);
  1941. if (peer->state != SELF_DOWN_PEER_DOWN &&
  1942. peer->state != SELF_DOWN_PEER_LEAVING) {
  1943. tipc_node_write_unlock(peer);
  1944. err = -EBUSY;
  1945. goto err_out;
  1946. }
  1947. tipc_node_clear_links(peer);
  1948. tipc_node_write_unlock(peer);
  1949. tipc_node_delete(peer);
  1950. /* Calculate cluster capabilities */
  1951. tn->capabilities = TIPC_NODE_CAPABILITIES;
  1952. list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
  1953. tn->capabilities &= temp_node->capabilities;
  1954. }
  1955. tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
  1956. err = 0;
  1957. err_out:
  1958. tipc_node_put(peer);
  1959. spin_unlock_bh(&tn->node_list_lock);
  1960. return err;
  1961. }
  1962. int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
  1963. {
  1964. int err;
  1965. struct net *net = sock_net(skb->sk);
  1966. struct tipc_net *tn = net_generic(net, tipc_net_id);
  1967. int done = cb->args[0];
  1968. int last_addr = cb->args[1];
  1969. struct tipc_node *node;
  1970. struct tipc_nl_msg msg;
  1971. if (done)
  1972. return 0;
  1973. msg.skb = skb;
  1974. msg.portid = NETLINK_CB(cb->skb).portid;
  1975. msg.seq = cb->nlh->nlmsg_seq;
  1976. rcu_read_lock();
  1977. if (last_addr) {
  1978. node = tipc_node_find(net, last_addr);
  1979. if (!node) {
  1980. rcu_read_unlock();
  1981. /* We never set seq or call nl_dump_check_consistent()
  1982. * this means that setting prev_seq here will cause the
  1983. * consistence check to fail in the netlink callback
  1984. * handler. Resulting in the NLMSG_DONE message having
  1985. * the NLM_F_DUMP_INTR flag set if the node state
  1986. * changed while we released the lock.
  1987. */
  1988. cb->prev_seq = 1;
  1989. return -EPIPE;
  1990. }
  1991. tipc_node_put(node);
  1992. }
  1993. list_for_each_entry_rcu(node, &tn->node_list, list) {
  1994. if (node->preliminary)
  1995. continue;
  1996. if (last_addr) {
  1997. if (node->addr == last_addr)
  1998. last_addr = 0;
  1999. else
  2000. continue;
  2001. }
  2002. tipc_node_read_lock(node);
  2003. err = __tipc_nl_add_node(&msg, node);
  2004. if (err) {
  2005. last_addr = node->addr;
  2006. tipc_node_read_unlock(node);
  2007. goto out;
  2008. }
  2009. tipc_node_read_unlock(node);
  2010. }
  2011. done = 1;
  2012. out:
  2013. cb->args[0] = done;
  2014. cb->args[1] = last_addr;
  2015. rcu_read_unlock();
  2016. return skb->len;
  2017. }
  2018. /* tipc_node_find_by_name - locate owner node of link by link's name
  2019. * @net: the applicable net namespace
  2020. * @name: pointer to link name string
  2021. * @bearer_id: pointer to index in 'node->links' array where the link was found.
  2022. *
  2023. * Returns pointer to node owning the link, or 0 if no matching link is found.
  2024. */
  2025. static struct tipc_node *tipc_node_find_by_name(struct net *net,
  2026. const char *link_name,
  2027. unsigned int *bearer_id)
  2028. {
  2029. struct tipc_net *tn = net_generic(net, tipc_net_id);
  2030. struct tipc_link *l;
  2031. struct tipc_node *n;
  2032. struct tipc_node *found_node = NULL;
  2033. int i;
  2034. *bearer_id = 0;
  2035. rcu_read_lock();
  2036. list_for_each_entry_rcu(n, &tn->node_list, list) {
  2037. tipc_node_read_lock(n);
  2038. for (i = 0; i < MAX_BEARERS; i++) {
  2039. l = n->links[i].link;
  2040. if (l && !strcmp(tipc_link_name(l), link_name)) {
  2041. *bearer_id = i;
  2042. found_node = n;
  2043. break;
  2044. }
  2045. }
  2046. tipc_node_read_unlock(n);
  2047. if (found_node)
  2048. break;
  2049. }
  2050. rcu_read_unlock();
  2051. return found_node;
  2052. }
  2053. int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
  2054. {
  2055. int err;
  2056. int res = 0;
  2057. int bearer_id;
  2058. char *name;
  2059. struct tipc_link *link;
  2060. struct tipc_node *node;
  2061. struct sk_buff_head xmitq;
  2062. struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
  2063. struct net *net = sock_net(skb->sk);
  2064. __skb_queue_head_init(&xmitq);
  2065. if (!info->attrs[TIPC_NLA_LINK])
  2066. return -EINVAL;
  2067. err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
  2068. info->attrs[TIPC_NLA_LINK],
  2069. tipc_nl_link_policy, info->extack);
  2070. if (err)
  2071. return err;
  2072. if (!attrs[TIPC_NLA_LINK_NAME])
  2073. return -EINVAL;
  2074. name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
  2075. if (strcmp(name, tipc_bclink_name) == 0)
  2076. return tipc_nl_bc_link_set(net, attrs);
  2077. node = tipc_node_find_by_name(net, name, &bearer_id);
  2078. if (!node)
  2079. return -EINVAL;
  2080. tipc_node_read_lock(node);
  2081. link = node->links[bearer_id].link;
  2082. if (!link) {
  2083. res = -EINVAL;
  2084. goto out;
  2085. }
  2086. if (attrs[TIPC_NLA_LINK_PROP]) {
  2087. struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
  2088. err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
  2089. if (err) {
  2090. res = err;
  2091. goto out;
  2092. }
  2093. if (props[TIPC_NLA_PROP_TOL]) {
  2094. u32 tol;
  2095. tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
  2096. tipc_link_set_tolerance(link, tol, &xmitq);
  2097. }
  2098. if (props[TIPC_NLA_PROP_PRIO]) {
  2099. u32 prio;
  2100. prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
  2101. tipc_link_set_prio(link, prio, &xmitq);
  2102. }
  2103. if (props[TIPC_NLA_PROP_WIN]) {
  2104. u32 max_win;
  2105. max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
  2106. tipc_link_set_queue_limits(link,
  2107. tipc_link_min_win(link),
  2108. max_win);
  2109. }
  2110. }
  2111. out:
  2112. tipc_node_read_unlock(node);
  2113. tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
  2114. NULL);
  2115. return res;
  2116. }
  2117. int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
  2118. {
  2119. struct net *net = genl_info_net(info);
  2120. struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
  2121. struct tipc_nl_msg msg;
  2122. char *name;
  2123. int err;
  2124. msg.portid = info->snd_portid;
  2125. msg.seq = info->snd_seq;
  2126. if (!info->attrs[TIPC_NLA_LINK])
  2127. return -EINVAL;
  2128. err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
  2129. info->attrs[TIPC_NLA_LINK],
  2130. tipc_nl_link_policy, info->extack);
  2131. if (err)
  2132. return err;
  2133. if (!attrs[TIPC_NLA_LINK_NAME])
  2134. return -EINVAL;
  2135. name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
  2136. msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  2137. if (!msg.skb)
  2138. return -ENOMEM;
  2139. if (strcmp(name, tipc_bclink_name) == 0) {
  2140. err = tipc_nl_add_bc_link(net, &msg);
  2141. if (err)
  2142. goto err_free;
  2143. } else {
  2144. int bearer_id;
  2145. struct tipc_node *node;
  2146. struct tipc_link *link;
  2147. node = tipc_node_find_by_name(net, name, &bearer_id);
  2148. if (!node) {
  2149. err = -EINVAL;
  2150. goto err_free;
  2151. }
  2152. tipc_node_read_lock(node);
  2153. link = node->links[bearer_id].link;
  2154. if (!link) {
  2155. tipc_node_read_unlock(node);
  2156. err = -EINVAL;
  2157. goto err_free;
  2158. }
  2159. err = __tipc_nl_add_link(net, &msg, link, 0);
  2160. tipc_node_read_unlock(node);
  2161. if (err)
  2162. goto err_free;
  2163. }
  2164. return genlmsg_reply(msg.skb, info);
  2165. err_free:
  2166. nlmsg_free(msg.skb);
  2167. return err;
  2168. }
  2169. int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
  2170. {
  2171. int err;
  2172. char *link_name;
  2173. unsigned int bearer_id;
  2174. struct tipc_link *link;
  2175. struct tipc_node *node;
  2176. struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
  2177. struct net *net = sock_net(skb->sk);
  2178. struct tipc_link_entry *le;
  2179. if (!info->attrs[TIPC_NLA_LINK])
  2180. return -EINVAL;
  2181. err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
  2182. info->attrs[TIPC_NLA_LINK],
  2183. tipc_nl_link_policy, info->extack);
  2184. if (err)
  2185. return err;
  2186. if (!attrs[TIPC_NLA_LINK_NAME])
  2187. return -EINVAL;
  2188. link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
  2189. if (strcmp(link_name, tipc_bclink_name) == 0) {
  2190. err = tipc_bclink_reset_stats(net);
  2191. if (err)
  2192. return err;
  2193. return 0;
  2194. }
  2195. node = tipc_node_find_by_name(net, link_name, &bearer_id);
  2196. if (!node)
  2197. return -EINVAL;
  2198. le = &node->links[bearer_id];
  2199. tipc_node_read_lock(node);
  2200. spin_lock_bh(&le->lock);
  2201. link = node->links[bearer_id].link;
  2202. if (!link) {
  2203. spin_unlock_bh(&le->lock);
  2204. tipc_node_read_unlock(node);
  2205. return -EINVAL;
  2206. }
  2207. tipc_link_reset_stats(link);
  2208. spin_unlock_bh(&le->lock);
  2209. tipc_node_read_unlock(node);
  2210. return 0;
  2211. }
  2212. /* Caller should hold node lock */
  2213. static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
  2214. struct tipc_node *node, u32 *prev_link)
  2215. {
  2216. u32 i;
  2217. int err;
  2218. for (i = *prev_link; i < MAX_BEARERS; i++) {
  2219. *prev_link = i;
  2220. if (!node->links[i].link)
  2221. continue;
  2222. err = __tipc_nl_add_link(net, msg,
  2223. node->links[i].link, NLM_F_MULTI);
  2224. if (err)
  2225. return err;
  2226. }
  2227. *prev_link = 0;
  2228. return 0;
  2229. }
  2230. int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
  2231. {
  2232. struct net *net = sock_net(skb->sk);
  2233. struct tipc_net *tn = net_generic(net, tipc_net_id);
  2234. struct tipc_node *node;
  2235. struct tipc_nl_msg msg;
  2236. u32 prev_node = cb->args[0];
  2237. u32 prev_link = cb->args[1];
  2238. int done = cb->args[2];
  2239. int err;
  2240. if (done)
  2241. return 0;
  2242. msg.skb = skb;
  2243. msg.portid = NETLINK_CB(cb->skb).portid;
  2244. msg.seq = cb->nlh->nlmsg_seq;
  2245. rcu_read_lock();
  2246. if (prev_node) {
  2247. node = tipc_node_find(net, prev_node);
  2248. if (!node) {
  2249. /* We never set seq or call nl_dump_check_consistent()
  2250. * this means that setting prev_seq here will cause the
  2251. * consistence check to fail in the netlink callback
  2252. * handler. Resulting in the last NLMSG_DONE message
  2253. * having the NLM_F_DUMP_INTR flag set.
  2254. */
  2255. cb->prev_seq = 1;
  2256. goto out;
  2257. }
  2258. tipc_node_put(node);
  2259. list_for_each_entry_continue_rcu(node, &tn->node_list,
  2260. list) {
  2261. tipc_node_read_lock(node);
  2262. err = __tipc_nl_add_node_links(net, &msg, node,
  2263. &prev_link);
  2264. tipc_node_read_unlock(node);
  2265. if (err)
  2266. goto out;
  2267. prev_node = node->addr;
  2268. }
  2269. } else {
  2270. err = tipc_nl_add_bc_link(net, &msg);
  2271. if (err)
  2272. goto out;
  2273. list_for_each_entry_rcu(node, &tn->node_list, list) {
  2274. tipc_node_read_lock(node);
  2275. err = __tipc_nl_add_node_links(net, &msg, node,
  2276. &prev_link);
  2277. tipc_node_read_unlock(node);
  2278. if (err)
  2279. goto out;
  2280. prev_node = node->addr;
  2281. }
  2282. }
  2283. done = 1;
  2284. out:
  2285. rcu_read_unlock();
  2286. cb->args[0] = prev_node;
  2287. cb->args[1] = prev_link;
  2288. cb->args[2] = done;
  2289. return skb->len;
  2290. }
  2291. int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
  2292. {
  2293. struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
  2294. struct net *net = sock_net(skb->sk);
  2295. int err;
  2296. if (!info->attrs[TIPC_NLA_MON])
  2297. return -EINVAL;
  2298. err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
  2299. info->attrs[TIPC_NLA_MON],
  2300. tipc_nl_monitor_policy,
  2301. info->extack);
  2302. if (err)
  2303. return err;
  2304. if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
  2305. u32 val;
  2306. val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
  2307. err = tipc_nl_monitor_set_threshold(net, val);
  2308. if (err)
  2309. return err;
  2310. }
  2311. return 0;
  2312. }
  2313. static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
  2314. {
  2315. struct nlattr *attrs;
  2316. void *hdr;
  2317. u32 val;
  2318. hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
  2319. 0, TIPC_NL_MON_GET);
  2320. if (!hdr)
  2321. return -EMSGSIZE;
  2322. attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
  2323. if (!attrs)
  2324. goto msg_full;
  2325. val = tipc_nl_monitor_get_threshold(net);
  2326. if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
  2327. goto attr_msg_full;
  2328. nla_nest_end(msg->skb, attrs);
  2329. genlmsg_end(msg->skb, hdr);
  2330. return 0;
  2331. attr_msg_full:
  2332. nla_nest_cancel(msg->skb, attrs);
  2333. msg_full:
  2334. genlmsg_cancel(msg->skb, hdr);
  2335. return -EMSGSIZE;
  2336. }
  2337. int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
  2338. {
  2339. struct net *net = sock_net(skb->sk);
  2340. struct tipc_nl_msg msg;
  2341. int err;
  2342. msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
  2343. if (!msg.skb)
  2344. return -ENOMEM;
  2345. msg.portid = info->snd_portid;
  2346. msg.seq = info->snd_seq;
  2347. err = __tipc_nl_add_monitor_prop(net, &msg);
  2348. if (err) {
  2349. nlmsg_free(msg.skb);
  2350. return err;
  2351. }
  2352. return genlmsg_reply(msg.skb, info);
  2353. }
  2354. int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
  2355. {
  2356. struct net *net = sock_net(skb->sk);
  2357. u32 prev_bearer = cb->args[0];
  2358. struct tipc_nl_msg msg;
  2359. int bearer_id;
  2360. int err;
  2361. if (prev_bearer == MAX_BEARERS)
  2362. return 0;
  2363. msg.skb = skb;
  2364. msg.portid = NETLINK_CB(cb->skb).portid;
  2365. msg.seq = cb->nlh->nlmsg_seq;
  2366. rtnl_lock();
  2367. for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
  2368. err = __tipc_nl_add_monitor(net, &msg, bearer_id);
  2369. if (err)
  2370. break;
  2371. }
  2372. rtnl_unlock();
  2373. cb->args[0] = bearer_id;
  2374. return skb->len;
  2375. }
  2376. int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
  2377. struct netlink_callback *cb)
  2378. {
  2379. struct net *net = sock_net(skb->sk);
  2380. u32 prev_node = cb->args[1];
  2381. u32 bearer_id = cb->args[2];
  2382. int done = cb->args[0];
  2383. struct tipc_nl_msg msg;
  2384. int err;
  2385. if (!prev_node) {
  2386. struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
  2387. struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
  2388. if (!attrs[TIPC_NLA_MON])
  2389. return -EINVAL;
  2390. err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
  2391. attrs[TIPC_NLA_MON],
  2392. tipc_nl_monitor_policy,
  2393. NULL);
  2394. if (err)
  2395. return err;
  2396. if (!mon[TIPC_NLA_MON_REF])
  2397. return -EINVAL;
  2398. bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
  2399. if (bearer_id >= MAX_BEARERS)
  2400. return -EINVAL;
  2401. }
  2402. if (done)
  2403. return 0;
  2404. msg.skb = skb;
  2405. msg.portid = NETLINK_CB(cb->skb).portid;
  2406. msg.seq = cb->nlh->nlmsg_seq;
  2407. rtnl_lock();
  2408. err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
  2409. if (!err)
  2410. done = 1;
  2411. rtnl_unlock();
  2412. cb->args[0] = done;
  2413. cb->args[1] = prev_node;
  2414. cb->args[2] = bearer_id;
  2415. return skb->len;
  2416. }
  2417. #ifdef CONFIG_TIPC_CRYPTO
  2418. static int tipc_nl_retrieve_key(struct nlattr **attrs,
  2419. struct tipc_aead_key **key)
  2420. {
  2421. struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
  2422. if (!attr)
  2423. return -ENODATA;
  2424. *key = (struct tipc_aead_key *)nla_data(attr);
  2425. if (nla_len(attr) < tipc_aead_key_size(*key))
  2426. return -EINVAL;
  2427. return 0;
  2428. }
  2429. static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
  2430. {
  2431. struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
  2432. if (!attr)
  2433. return -ENODATA;
  2434. if (nla_len(attr) < TIPC_NODEID_LEN)
  2435. return -EINVAL;
  2436. *node_id = (u8 *)nla_data(attr);
  2437. return 0;
  2438. }
  2439. static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
  2440. {
  2441. struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
  2442. struct net *net = sock_net(skb->sk);
  2443. struct tipc_net *tn = tipc_net(net);
  2444. struct tipc_node *n = NULL;
  2445. struct tipc_aead_key *ukey;
  2446. struct tipc_crypto *c;
  2447. u8 *id, *own_id;
  2448. int rc = 0;
  2449. if (!info->attrs[TIPC_NLA_NODE])
  2450. return -EINVAL;
  2451. rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
  2452. info->attrs[TIPC_NLA_NODE],
  2453. tipc_nl_node_policy, info->extack);
  2454. if (rc)
  2455. goto exit;
  2456. own_id = tipc_own_id(net);
  2457. if (!own_id) {
  2458. rc = -EPERM;
  2459. goto exit;
  2460. }
  2461. rc = tipc_nl_retrieve_key(attrs, &ukey);
  2462. if (rc)
  2463. goto exit;
  2464. rc = tipc_aead_key_validate(ukey);
  2465. if (rc)
  2466. goto exit;
  2467. rc = tipc_nl_retrieve_nodeid(attrs, &id);
  2468. switch (rc) {
  2469. case -ENODATA:
  2470. /* Cluster key mode */
  2471. rc = tipc_crypto_key_init(tn->crypto_tx, ukey, CLUSTER_KEY);
  2472. break;
  2473. case 0:
  2474. /* Per-node key mode */
  2475. if (!memcmp(id, own_id, NODE_ID_LEN)) {
  2476. c = tn->crypto_tx;
  2477. } else {
  2478. n = tipc_node_find_by_id(net, id) ?:
  2479. tipc_node_create(net, 0, id, 0xffffu, 0, true);
  2480. if (unlikely(!n)) {
  2481. rc = -ENOMEM;
  2482. break;
  2483. }
  2484. c = n->crypto_rx;
  2485. }
  2486. rc = tipc_crypto_key_init(c, ukey, PER_NODE_KEY);
  2487. if (n)
  2488. tipc_node_put(n);
  2489. break;
  2490. default:
  2491. break;
  2492. }
  2493. exit:
  2494. return (rc < 0) ? rc : 0;
  2495. }
  2496. int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
  2497. {
  2498. int err;
  2499. rtnl_lock();
  2500. err = __tipc_nl_node_set_key(skb, info);
  2501. rtnl_unlock();
  2502. return err;
  2503. }
  2504. static int __tipc_nl_node_flush_key(struct sk_buff *skb,
  2505. struct genl_info *info)
  2506. {
  2507. struct net *net = sock_net(skb->sk);
  2508. struct tipc_net *tn = tipc_net(net);
  2509. struct tipc_node *n;
  2510. tipc_crypto_key_flush(tn->crypto_tx);
  2511. rcu_read_lock();
  2512. list_for_each_entry_rcu(n, &tn->node_list, list)
  2513. tipc_crypto_key_flush(n->crypto_rx);
  2514. rcu_read_unlock();
  2515. pr_info("All keys are flushed!\n");
  2516. return 0;
  2517. }
  2518. int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
  2519. {
  2520. int err;
  2521. rtnl_lock();
  2522. err = __tipc_nl_node_flush_key(skb, info);
  2523. rtnl_unlock();
  2524. return err;
  2525. }
  2526. #endif
  2527. /**
  2528. * tipc_node_dump - dump TIPC node data
  2529. * @n: tipc node to be dumped
  2530. * @more: dump more?
  2531. * - false: dump only tipc node data
  2532. * - true: dump node link data as well
  2533. * @buf: returned buffer of dump data in format
  2534. */
  2535. int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
  2536. {
  2537. int i = 0;
  2538. size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
  2539. if (!n) {
  2540. i += scnprintf(buf, sz, "node data: (null)\n");
  2541. return i;
  2542. }
  2543. i += scnprintf(buf, sz, "node data: %x", n->addr);
  2544. i += scnprintf(buf + i, sz - i, " %x", n->state);
  2545. i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
  2546. i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
  2547. i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
  2548. i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
  2549. i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
  2550. i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
  2551. i += scnprintf(buf + i, sz - i, " %u", n->working_links);
  2552. i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
  2553. i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
  2554. if (!more)
  2555. return i;
  2556. i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
  2557. i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
  2558. i += scnprintf(buf + i, sz - i, " media: ");
  2559. i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
  2560. i += scnprintf(buf + i, sz - i, "\n");
  2561. i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
  2562. i += scnprintf(buf + i, sz - i, " inputq: ");
  2563. i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
  2564. i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
  2565. i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
  2566. i += scnprintf(buf + i, sz - i, " media: ");
  2567. i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
  2568. i += scnprintf(buf + i, sz - i, "\n");
  2569. i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
  2570. i += scnprintf(buf + i, sz - i, " inputq: ");
  2571. i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
  2572. i += scnprintf(buf + i, sz - i, "bclink:\n ");
  2573. i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
  2574. return i;
  2575. }
  2576. void tipc_node_pre_cleanup_net(struct net *exit_net)
  2577. {
  2578. struct tipc_node *n;
  2579. struct tipc_net *tn;
  2580. struct net *tmp;
  2581. rcu_read_lock();
  2582. for_each_net_rcu(tmp) {
  2583. if (tmp == exit_net)
  2584. continue;
  2585. tn = tipc_net(tmp);
  2586. if (!tn)
  2587. continue;
  2588. spin_lock_bh(&tn->node_list_lock);
  2589. list_for_each_entry_rcu(n, &tn->node_list, list) {
  2590. if (!n->peer_net)
  2591. continue;
  2592. if (n->peer_net != exit_net)
  2593. continue;
  2594. tipc_node_write_lock(n);
  2595. n->peer_net = NULL;
  2596. n->peer_hash_mix = 0;
  2597. tipc_node_write_unlock_fast(n);
  2598. break;
  2599. }
  2600. spin_unlock_bh(&tn->node_list_lock);
  2601. }
  2602. rcu_read_unlock();
  2603. }