PageRenderTime 52ms CodeModel.GetById 13ms RepoModel.GetById 0ms app.codeStats 0ms

/net/rxrpc/conn_object.c

https://gitlab.com/oyvholm/linux
C | 471 lines | 307 code | 76 blank | 88 comment | 47 complexity | 94c267d0e7cfb6d97392bad70d2393c9 MD5 | raw file
  1. /* RxRPC virtual connection handler, common bits.
  2. *
  3. * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  12. #include <linux/module.h>
  13. #include <linux/slab.h>
  14. #include <linux/net.h>
  15. #include <linux/skbuff.h>
  16. #include "ar-internal.h"
  17. /*
  18. * Time till a connection expires after last use (in seconds).
  19. */
  20. unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
  21. unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
  22. static void rxrpc_destroy_connection(struct rcu_head *);
  23. static void rxrpc_connection_timer(struct timer_list *timer)
  24. {
  25. struct rxrpc_connection *conn =
  26. container_of(timer, struct rxrpc_connection, timer);
  27. rxrpc_queue_conn(conn);
  28. }
  29. /*
  30. * allocate a new connection
  31. */
  32. struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
  33. {
  34. struct rxrpc_connection *conn;
  35. _enter("");
  36. conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
  37. if (conn) {
  38. INIT_LIST_HEAD(&conn->cache_link);
  39. spin_lock_init(&conn->channel_lock);
  40. INIT_LIST_HEAD(&conn->waiting_calls);
  41. timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
  42. INIT_WORK(&conn->processor, &rxrpc_process_connection);
  43. INIT_LIST_HEAD(&conn->proc_link);
  44. INIT_LIST_HEAD(&conn->link);
  45. skb_queue_head_init(&conn->rx_queue);
  46. conn->security = &rxrpc_no_security;
  47. spin_lock_init(&conn->state_lock);
  48. conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
  49. conn->size_align = 4;
  50. conn->idle_timestamp = jiffies;
  51. }
  52. _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
  53. return conn;
  54. }
  55. /*
  56. * Look up a connection in the cache by protocol parameters.
  57. *
  58. * If successful, a pointer to the connection is returned, but no ref is taken.
  59. * NULL is returned if there is no match.
  60. *
  61. * The caller must be holding the RCU read lock.
  62. */
  63. struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
  64. struct sk_buff *skb)
  65. {
  66. struct rxrpc_connection *conn;
  67. struct rxrpc_conn_proto k;
  68. struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
  69. struct sockaddr_rxrpc srx;
  70. struct rxrpc_peer *peer;
  71. _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
  72. if (rxrpc_extract_addr_from_skb(local, &srx, skb) < 0)
  73. goto not_found;
  74. k.epoch = sp->hdr.epoch;
  75. k.cid = sp->hdr.cid & RXRPC_CIDMASK;
  76. /* We may have to handle mixing IPv4 and IPv6 */
  77. if (srx.transport.family != local->srx.transport.family) {
  78. pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n",
  79. srx.transport.family,
  80. local->srx.transport.family);
  81. goto not_found;
  82. }
  83. k.epoch = sp->hdr.epoch;
  84. k.cid = sp->hdr.cid & RXRPC_CIDMASK;
  85. if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) {
  86. /* We need to look up service connections by the full protocol
  87. * parameter set. We look up the peer first as an intermediate
  88. * step and then the connection from the peer's tree.
  89. */
  90. peer = rxrpc_lookup_peer_rcu(local, &srx);
  91. if (!peer)
  92. goto not_found;
  93. conn = rxrpc_find_service_conn_rcu(peer, skb);
  94. if (!conn || atomic_read(&conn->usage) == 0)
  95. goto not_found;
  96. _leave(" = %p", conn);
  97. return conn;
  98. } else {
  99. /* Look up client connections by connection ID alone as their
  100. * IDs are unique for this machine.
  101. */
  102. conn = idr_find(&rxrpc_client_conn_ids,
  103. sp->hdr.cid >> RXRPC_CIDSHIFT);
  104. if (!conn || atomic_read(&conn->usage) == 0) {
  105. _debug("no conn");
  106. goto not_found;
  107. }
  108. if (conn->proto.epoch != k.epoch ||
  109. conn->params.local != local)
  110. goto not_found;
  111. peer = conn->params.peer;
  112. switch (srx.transport.family) {
  113. case AF_INET:
  114. if (peer->srx.transport.sin.sin_port !=
  115. srx.transport.sin.sin_port ||
  116. peer->srx.transport.sin.sin_addr.s_addr !=
  117. srx.transport.sin.sin_addr.s_addr)
  118. goto not_found;
  119. break;
  120. #ifdef CONFIG_AF_RXRPC_IPV6
  121. case AF_INET6:
  122. if (peer->srx.transport.sin6.sin6_port !=
  123. srx.transport.sin6.sin6_port ||
  124. memcmp(&peer->srx.transport.sin6.sin6_addr,
  125. &srx.transport.sin6.sin6_addr,
  126. sizeof(struct in6_addr)) != 0)
  127. goto not_found;
  128. break;
  129. #endif
  130. default:
  131. BUG();
  132. }
  133. _leave(" = %p", conn);
  134. return conn;
  135. }
  136. not_found:
  137. _leave(" = NULL");
  138. return NULL;
  139. }
  140. /*
  141. * Disconnect a call and clear any channel it occupies when that call
  142. * terminates. The caller must hold the channel_lock and must release the
  143. * call's ref on the connection.
  144. */
  145. void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
  146. struct rxrpc_call *call)
  147. {
  148. struct rxrpc_channel *chan =
  149. &conn->channels[call->cid & RXRPC_CHANNELMASK];
  150. _enter("%d,%x", conn->debug_id, call->cid);
  151. if (rcu_access_pointer(chan->call) == call) {
  152. /* Save the result of the call so that we can repeat it if necessary
  153. * through the channel, whilst disposing of the actual call record.
  154. */
  155. trace_rxrpc_disconnect_call(call);
  156. if (call->abort_code) {
  157. chan->last_abort = call->abort_code;
  158. chan->last_type = RXRPC_PACKET_TYPE_ABORT;
  159. } else {
  160. chan->last_seq = call->rx_hard_ack;
  161. chan->last_type = RXRPC_PACKET_TYPE_ACK;
  162. }
  163. /* Sync with rxrpc_conn_retransmit(). */
  164. smp_wmb();
  165. chan->last_call = chan->call_id;
  166. chan->call_id = chan->call_counter;
  167. rcu_assign_pointer(chan->call, NULL);
  168. }
  169. _leave("");
  170. }
  171. /*
  172. * Disconnect a call and clear any channel it occupies when that call
  173. * terminates.
  174. */
  175. void rxrpc_disconnect_call(struct rxrpc_call *call)
  176. {
  177. struct rxrpc_connection *conn = call->conn;
  178. call->peer->cong_cwnd = call->cong_cwnd;
  179. spin_lock_bh(&conn->params.peer->lock);
  180. hlist_del_init(&call->error_link);
  181. spin_unlock_bh(&conn->params.peer->lock);
  182. if (rxrpc_is_client_call(call))
  183. return rxrpc_disconnect_client_call(call);
  184. spin_lock(&conn->channel_lock);
  185. __rxrpc_disconnect_call(conn, call);
  186. spin_unlock(&conn->channel_lock);
  187. call->conn = NULL;
  188. conn->idle_timestamp = jiffies;
  189. rxrpc_put_connection(conn);
  190. }
  191. /*
  192. * Kill off a connection.
  193. */
  194. void rxrpc_kill_connection(struct rxrpc_connection *conn)
  195. {
  196. struct rxrpc_net *rxnet = conn->params.local->rxnet;
  197. ASSERT(!rcu_access_pointer(conn->channels[0].call) &&
  198. !rcu_access_pointer(conn->channels[1].call) &&
  199. !rcu_access_pointer(conn->channels[2].call) &&
  200. !rcu_access_pointer(conn->channels[3].call));
  201. ASSERT(list_empty(&conn->cache_link));
  202. write_lock(&rxnet->conn_lock);
  203. list_del_init(&conn->proc_link);
  204. write_unlock(&rxnet->conn_lock);
  205. /* Drain the Rx queue. Note that even though we've unpublished, an
  206. * incoming packet could still be being added to our Rx queue, so we
  207. * will need to drain it again in the RCU cleanup handler.
  208. */
  209. rxrpc_purge_queue(&conn->rx_queue);
  210. /* Leave final destruction to RCU. The connection processor work item
  211. * must carry a ref on the connection to prevent us getting here whilst
  212. * it is queued or running.
  213. */
  214. call_rcu(&conn->rcu, rxrpc_destroy_connection);
  215. }
  216. /*
  217. * Queue a connection's work processor, getting a ref to pass to the work
  218. * queue.
  219. */
  220. bool rxrpc_queue_conn(struct rxrpc_connection *conn)
  221. {
  222. const void *here = __builtin_return_address(0);
  223. int n = __atomic_add_unless(&conn->usage, 1, 0);
  224. if (n == 0)
  225. return false;
  226. if (rxrpc_queue_work(&conn->processor))
  227. trace_rxrpc_conn(conn, rxrpc_conn_queued, n + 1, here);
  228. else
  229. rxrpc_put_connection(conn);
  230. return true;
  231. }
  232. /*
  233. * Note the re-emergence of a connection.
  234. */
  235. void rxrpc_see_connection(struct rxrpc_connection *conn)
  236. {
  237. const void *here = __builtin_return_address(0);
  238. if (conn) {
  239. int n = atomic_read(&conn->usage);
  240. trace_rxrpc_conn(conn, rxrpc_conn_seen, n, here);
  241. }
  242. }
  243. /*
  244. * Get a ref on a connection.
  245. */
  246. void rxrpc_get_connection(struct rxrpc_connection *conn)
  247. {
  248. const void *here = __builtin_return_address(0);
  249. int n = atomic_inc_return(&conn->usage);
  250. trace_rxrpc_conn(conn, rxrpc_conn_got, n, here);
  251. }
  252. /*
  253. * Try to get a ref on a connection.
  254. */
  255. struct rxrpc_connection *
  256. rxrpc_get_connection_maybe(struct rxrpc_connection *conn)
  257. {
  258. const void *here = __builtin_return_address(0);
  259. if (conn) {
  260. int n = __atomic_add_unless(&conn->usage, 1, 0);
  261. if (n > 0)
  262. trace_rxrpc_conn(conn, rxrpc_conn_got, n + 1, here);
  263. else
  264. conn = NULL;
  265. }
  266. return conn;
  267. }
  268. /*
  269. * Set the service connection reap timer.
  270. */
  271. static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
  272. unsigned long reap_at)
  273. {
  274. if (rxnet->live)
  275. timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
  276. }
  277. /*
  278. * Release a service connection
  279. */
  280. void rxrpc_put_service_conn(struct rxrpc_connection *conn)
  281. {
  282. const void *here = __builtin_return_address(0);
  283. int n;
  284. n = atomic_dec_return(&conn->usage);
  285. trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
  286. ASSERTCMP(n, >=, 0);
  287. if (n == 1)
  288. rxrpc_set_service_reap_timer(conn->params.local->rxnet,
  289. jiffies + rxrpc_connection_expiry);
  290. }
  291. /*
  292. * destroy a virtual connection
  293. */
  294. static void rxrpc_destroy_connection(struct rcu_head *rcu)
  295. {
  296. struct rxrpc_connection *conn =
  297. container_of(rcu, struct rxrpc_connection, rcu);
  298. _enter("{%d,u=%d}", conn->debug_id, atomic_read(&conn->usage));
  299. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  300. _net("DESTROY CONN %d", conn->debug_id);
  301. del_timer_sync(&conn->timer);
  302. rxrpc_purge_queue(&conn->rx_queue);
  303. conn->security->clear(conn);
  304. key_put(conn->params.key);
  305. key_put(conn->server_key);
  306. rxrpc_put_peer(conn->params.peer);
  307. rxrpc_put_local(conn->params.local);
  308. kfree(conn);
  309. _leave("");
  310. }
  311. /*
  312. * reap dead service connections
  313. */
  314. void rxrpc_service_connection_reaper(struct work_struct *work)
  315. {
  316. struct rxrpc_connection *conn, *_p;
  317. struct rxrpc_net *rxnet =
  318. container_of(work, struct rxrpc_net, service_conn_reaper);
  319. unsigned long expire_at, earliest, idle_timestamp, now;
  320. LIST_HEAD(graveyard);
  321. _enter("");
  322. now = jiffies;
  323. earliest = now + MAX_JIFFY_OFFSET;
  324. write_lock(&rxnet->conn_lock);
  325. list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
  326. ASSERTCMP(atomic_read(&conn->usage), >, 0);
  327. if (likely(atomic_read(&conn->usage) > 1))
  328. continue;
  329. if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
  330. continue;
  331. if (rxnet->live) {
  332. idle_timestamp = READ_ONCE(conn->idle_timestamp);
  333. expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
  334. if (conn->params.local->service_closed)
  335. expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
  336. _debug("reap CONN %d { u=%d,t=%ld }",
  337. conn->debug_id, atomic_read(&conn->usage),
  338. (long)expire_at - (long)now);
  339. if (time_before(now, expire_at)) {
  340. if (time_before(expire_at, earliest))
  341. earliest = expire_at;
  342. continue;
  343. }
  344. }
  345. /* The usage count sits at 1 whilst the object is unused on the
  346. * list; we reduce that to 0 to make the object unavailable.
  347. */
  348. if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
  349. continue;
  350. trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
  351. if (rxrpc_conn_is_client(conn))
  352. BUG();
  353. else
  354. rxrpc_unpublish_service_conn(conn);
  355. list_move_tail(&conn->link, &graveyard);
  356. }
  357. write_unlock(&rxnet->conn_lock);
  358. if (earliest != now + MAX_JIFFY_OFFSET) {
  359. _debug("reschedule reaper %ld", (long)earliest - (long)now);
  360. ASSERT(time_after(earliest, now));
  361. rxrpc_set_service_reap_timer(rxnet, earliest);
  362. }
  363. while (!list_empty(&graveyard)) {
  364. conn = list_entry(graveyard.next, struct rxrpc_connection,
  365. link);
  366. list_del_init(&conn->link);
  367. ASSERTCMP(atomic_read(&conn->usage), ==, 0);
  368. rxrpc_kill_connection(conn);
  369. }
  370. _leave("");
  371. }
  372. /*
  373. * preemptively destroy all the service connection records rather than
  374. * waiting for them to time out
  375. */
  376. void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
  377. {
  378. struct rxrpc_connection *conn, *_p;
  379. bool leak = false;
  380. _enter("");
  381. rxrpc_destroy_all_client_connections(rxnet);
  382. del_timer_sync(&rxnet->service_conn_reap_timer);
  383. rxrpc_queue_work(&rxnet->service_conn_reaper);
  384. flush_workqueue(rxrpc_workqueue);
  385. write_lock(&rxnet->conn_lock);
  386. list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
  387. pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
  388. conn, atomic_read(&conn->usage));
  389. leak = true;
  390. }
  391. write_unlock(&rxnet->conn_lock);
  392. BUG_ON(leak);
  393. ASSERT(list_empty(&rxnet->conn_proc_list));
  394. _leave("");
  395. }