/drivers/infiniband/core/addr.c

https://bitbucket.org/ndreys/linux-sunxi · C · 460 lines · 365 code · 59 blank · 36 comment · 52 complexity · 24b8e7b55c39d8eded34f03b18477b4d MD5 · raw file

  1. /*
  2. * Copyright (c) 2005 Voltaire Inc. All rights reserved.
  3. * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
  4. * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
  5. * Copyright (c) 2005 Intel Corporation. All rights reserved.
  6. *
  7. * This software is available to you under a choice of one of two
  8. * licenses. You may choose to be licensed under the terms of the GNU
  9. * General Public License (GPL) Version 2, available from the file
  10. * COPYING in the main directory of this source tree, or the
  11. * OpenIB.org BSD license below:
  12. *
  13. * Redistribution and use in source and binary forms, with or
  14. * without modification, are permitted provided that the following
  15. * conditions are met:
  16. *
  17. * - Redistributions of source code must retain the above
  18. * copyright notice, this list of conditions and the following
  19. * disclaimer.
  20. *
  21. * - Redistributions in binary form must reproduce the above
  22. * copyright notice, this list of conditions and the following
  23. * disclaimer in the documentation and/or other materials
  24. * provided with the distribution.
  25. *
  26. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  27. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  28. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  29. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  30. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  31. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  32. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  33. * SOFTWARE.
  34. */
  35. #include <linux/mutex.h>
  36. #include <linux/inetdevice.h>
  37. #include <linux/slab.h>
  38. #include <linux/workqueue.h>
  39. #include <net/arp.h>
  40. #include <net/neighbour.h>
  41. #include <net/route.h>
  42. #include <net/netevent.h>
  43. #include <net/addrconf.h>
  44. #include <net/ip6_route.h>
  45. #include <rdma/ib_addr.h>
  46. MODULE_AUTHOR("Sean Hefty");
  47. MODULE_DESCRIPTION("IB Address Translation");
  48. MODULE_LICENSE("Dual BSD/GPL");
  49. struct addr_req {
  50. struct list_head list;
  51. struct sockaddr_storage src_addr;
  52. struct sockaddr_storage dst_addr;
  53. struct rdma_dev_addr *addr;
  54. struct rdma_addr_client *client;
  55. void *context;
  56. void (*callback)(int status, struct sockaddr *src_addr,
  57. struct rdma_dev_addr *addr, void *context);
  58. unsigned long timeout;
  59. int status;
  60. };
  61. static void process_req(struct work_struct *work);
  62. static DEFINE_MUTEX(lock);
  63. static LIST_HEAD(req_list);
  64. static DECLARE_DELAYED_WORK(work, process_req);
  65. static struct workqueue_struct *addr_wq;
  66. void rdma_addr_register_client(struct rdma_addr_client *client)
  67. {
  68. atomic_set(&client->refcount, 1);
  69. init_completion(&client->comp);
  70. }
  71. EXPORT_SYMBOL(rdma_addr_register_client);
  72. static inline void put_client(struct rdma_addr_client *client)
  73. {
  74. if (atomic_dec_and_test(&client->refcount))
  75. complete(&client->comp);
  76. }
  77. void rdma_addr_unregister_client(struct rdma_addr_client *client)
  78. {
  79. put_client(client);
  80. wait_for_completion(&client->comp);
  81. }
  82. EXPORT_SYMBOL(rdma_addr_unregister_client);
  83. int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
  84. const unsigned char *dst_dev_addr)
  85. {
  86. dev_addr->dev_type = dev->type;
  87. memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
  88. memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
  89. if (dst_dev_addr)
  90. memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
  91. dev_addr->bound_dev_if = dev->ifindex;
  92. return 0;
  93. }
  94. EXPORT_SYMBOL(rdma_copy_addr);
  95. int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
  96. {
  97. struct net_device *dev;
  98. int ret = -EADDRNOTAVAIL;
  99. if (dev_addr->bound_dev_if) {
  100. dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
  101. if (!dev)
  102. return -ENODEV;
  103. ret = rdma_copy_addr(dev_addr, dev, NULL);
  104. dev_put(dev);
  105. return ret;
  106. }
  107. switch (addr->sa_family) {
  108. case AF_INET:
  109. dev = ip_dev_find(&init_net,
  110. ((struct sockaddr_in *) addr)->sin_addr.s_addr);
  111. if (!dev)
  112. return ret;
  113. ret = rdma_copy_addr(dev_addr, dev, NULL);
  114. dev_put(dev);
  115. break;
  116. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  117. case AF_INET6:
  118. rcu_read_lock();
  119. for_each_netdev_rcu(&init_net, dev) {
  120. if (ipv6_chk_addr(&init_net,
  121. &((struct sockaddr_in6 *) addr)->sin6_addr,
  122. dev, 1)) {
  123. ret = rdma_copy_addr(dev_addr, dev, NULL);
  124. break;
  125. }
  126. }
  127. rcu_read_unlock();
  128. break;
  129. #endif
  130. }
  131. return ret;
  132. }
  133. EXPORT_SYMBOL(rdma_translate_ip);
  134. static void set_timeout(unsigned long time)
  135. {
  136. unsigned long delay;
  137. cancel_delayed_work(&work);
  138. delay = time - jiffies;
  139. if ((long)delay <= 0)
  140. delay = 1;
  141. queue_delayed_work(addr_wq, &work, delay);
  142. }
  143. static void queue_req(struct addr_req *req)
  144. {
  145. struct addr_req *temp_req;
  146. mutex_lock(&lock);
  147. list_for_each_entry_reverse(temp_req, &req_list, list) {
  148. if (time_after_eq(req->timeout, temp_req->timeout))
  149. break;
  150. }
  151. list_add(&req->list, &temp_req->list);
  152. if (req_list.next == &req->list)
  153. set_timeout(req->timeout);
  154. mutex_unlock(&lock);
  155. }
  156. static int addr4_resolve(struct sockaddr_in *src_in,
  157. struct sockaddr_in *dst_in,
  158. struct rdma_dev_addr *addr)
  159. {
  160. __be32 src_ip = src_in->sin_addr.s_addr;
  161. __be32 dst_ip = dst_in->sin_addr.s_addr;
  162. struct rtable *rt;
  163. struct neighbour *neigh;
  164. struct flowi4 fl4;
  165. int ret;
  166. memset(&fl4, 0, sizeof(fl4));
  167. fl4.daddr = dst_ip;
  168. fl4.saddr = src_ip;
  169. fl4.flowi4_oif = addr->bound_dev_if;
  170. rt = ip_route_output_key(&init_net, &fl4);
  171. if (IS_ERR(rt)) {
  172. ret = PTR_ERR(rt);
  173. goto out;
  174. }
  175. src_in->sin_family = AF_INET;
  176. src_in->sin_addr.s_addr = fl4.saddr;
  177. if (rt->dst.dev->flags & IFF_LOOPBACK) {
  178. ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
  179. if (!ret)
  180. memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
  181. goto put;
  182. }
  183. /* If the device does ARP internally, return 'done' */
  184. if (rt->dst.dev->flags & IFF_NOARP) {
  185. ret = rdma_copy_addr(addr, rt->dst.dev, NULL);
  186. goto put;
  187. }
  188. neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
  189. if (!neigh || !(neigh->nud_state & NUD_VALID)) {
  190. rcu_read_lock();
  191. neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
  192. rcu_read_unlock();
  193. ret = -ENODATA;
  194. if (neigh)
  195. goto release;
  196. goto put;
  197. }
  198. ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
  199. release:
  200. neigh_release(neigh);
  201. put:
  202. ip_rt_put(rt);
  203. out:
  204. return ret;
  205. }
  206. #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
  207. static int addr6_resolve(struct sockaddr_in6 *src_in,
  208. struct sockaddr_in6 *dst_in,
  209. struct rdma_dev_addr *addr)
  210. {
  211. struct flowi6 fl6;
  212. struct neighbour *neigh;
  213. struct dst_entry *dst;
  214. int ret;
  215. memset(&fl6, 0, sizeof fl6);
  216. ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
  217. ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
  218. fl6.flowi6_oif = addr->bound_dev_if;
  219. dst = ip6_route_output(&init_net, NULL, &fl6);
  220. if ((ret = dst->error))
  221. goto put;
  222. if (ipv6_addr_any(&fl6.saddr)) {
  223. ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
  224. &fl6.daddr, 0, &fl6.saddr);
  225. if (ret)
  226. goto put;
  227. src_in->sin6_family = AF_INET6;
  228. ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
  229. }
  230. if (dst->dev->flags & IFF_LOOPBACK) {
  231. ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
  232. if (!ret)
  233. memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
  234. goto put;
  235. }
  236. /* If the device does ARP internally, return 'done' */
  237. if (dst->dev->flags & IFF_NOARP) {
  238. ret = rdma_copy_addr(addr, dst->dev, NULL);
  239. goto put;
  240. }
  241. rcu_read_lock();
  242. neigh = dst_get_neighbour(dst);
  243. if (!neigh || !(neigh->nud_state & NUD_VALID)) {
  244. if (neigh)
  245. neigh_event_send(neigh, NULL);
  246. ret = -ENODATA;
  247. } else {
  248. ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
  249. }
  250. rcu_read_unlock();
  251. put:
  252. dst_release(dst);
  253. return ret;
  254. }
  255. #else
  256. static int addr6_resolve(struct sockaddr_in6 *src_in,
  257. struct sockaddr_in6 *dst_in,
  258. struct rdma_dev_addr *addr)
  259. {
  260. return -EADDRNOTAVAIL;
  261. }
  262. #endif
  263. static int addr_resolve(struct sockaddr *src_in,
  264. struct sockaddr *dst_in,
  265. struct rdma_dev_addr *addr)
  266. {
  267. if (src_in->sa_family == AF_INET) {
  268. return addr4_resolve((struct sockaddr_in *) src_in,
  269. (struct sockaddr_in *) dst_in, addr);
  270. } else
  271. return addr6_resolve((struct sockaddr_in6 *) src_in,
  272. (struct sockaddr_in6 *) dst_in, addr);
  273. }
  274. static void process_req(struct work_struct *work)
  275. {
  276. struct addr_req *req, *temp_req;
  277. struct sockaddr *src_in, *dst_in;
  278. struct list_head done_list;
  279. INIT_LIST_HEAD(&done_list);
  280. mutex_lock(&lock);
  281. list_for_each_entry_safe(req, temp_req, &req_list, list) {
  282. if (req->status == -ENODATA) {
  283. src_in = (struct sockaddr *) &req->src_addr;
  284. dst_in = (struct sockaddr *) &req->dst_addr;
  285. req->status = addr_resolve(src_in, dst_in, req->addr);
  286. if (req->status && time_after_eq(jiffies, req->timeout))
  287. req->status = -ETIMEDOUT;
  288. else if (req->status == -ENODATA)
  289. continue;
  290. }
  291. list_move_tail(&req->list, &done_list);
  292. }
  293. if (!list_empty(&req_list)) {
  294. req = list_entry(req_list.next, struct addr_req, list);
  295. set_timeout(req->timeout);
  296. }
  297. mutex_unlock(&lock);
  298. list_for_each_entry_safe(req, temp_req, &done_list, list) {
  299. list_del(&req->list);
  300. req->callback(req->status, (struct sockaddr *) &req->src_addr,
  301. req->addr, req->context);
  302. put_client(req->client);
  303. kfree(req);
  304. }
  305. }
  306. int rdma_resolve_ip(struct rdma_addr_client *client,
  307. struct sockaddr *src_addr, struct sockaddr *dst_addr,
  308. struct rdma_dev_addr *addr, int timeout_ms,
  309. void (*callback)(int status, struct sockaddr *src_addr,
  310. struct rdma_dev_addr *addr, void *context),
  311. void *context)
  312. {
  313. struct sockaddr *src_in, *dst_in;
  314. struct addr_req *req;
  315. int ret = 0;
  316. req = kzalloc(sizeof *req, GFP_KERNEL);
  317. if (!req)
  318. return -ENOMEM;
  319. src_in = (struct sockaddr *) &req->src_addr;
  320. dst_in = (struct sockaddr *) &req->dst_addr;
  321. if (src_addr) {
  322. if (src_addr->sa_family != dst_addr->sa_family) {
  323. ret = -EINVAL;
  324. goto err;
  325. }
  326. memcpy(src_in, src_addr, ip_addr_size(src_addr));
  327. } else {
  328. src_in->sa_family = dst_addr->sa_family;
  329. }
  330. memcpy(dst_in, dst_addr, ip_addr_size(dst_addr));
  331. req->addr = addr;
  332. req->callback = callback;
  333. req->context = context;
  334. req->client = client;
  335. atomic_inc(&client->refcount);
  336. req->status = addr_resolve(src_in, dst_in, addr);
  337. switch (req->status) {
  338. case 0:
  339. req->timeout = jiffies;
  340. queue_req(req);
  341. break;
  342. case -ENODATA:
  343. req->timeout = msecs_to_jiffies(timeout_ms) + jiffies;
  344. queue_req(req);
  345. break;
  346. default:
  347. ret = req->status;
  348. atomic_dec(&client->refcount);
  349. goto err;
  350. }
  351. return ret;
  352. err:
  353. kfree(req);
  354. return ret;
  355. }
  356. EXPORT_SYMBOL(rdma_resolve_ip);
  357. void rdma_addr_cancel(struct rdma_dev_addr *addr)
  358. {
  359. struct addr_req *req, *temp_req;
  360. mutex_lock(&lock);
  361. list_for_each_entry_safe(req, temp_req, &req_list, list) {
  362. if (req->addr == addr) {
  363. req->status = -ECANCELED;
  364. req->timeout = jiffies;
  365. list_move(&req->list, &req_list);
  366. set_timeout(req->timeout);
  367. break;
  368. }
  369. }
  370. mutex_unlock(&lock);
  371. }
  372. EXPORT_SYMBOL(rdma_addr_cancel);
  373. static int netevent_callback(struct notifier_block *self, unsigned long event,
  374. void *ctx)
  375. {
  376. if (event == NETEVENT_NEIGH_UPDATE) {
  377. struct neighbour *neigh = ctx;
  378. if (neigh->nud_state & NUD_VALID) {
  379. set_timeout(jiffies);
  380. }
  381. }
  382. return 0;
  383. }
  384. static struct notifier_block nb = {
  385. .notifier_call = netevent_callback
  386. };
  387. static int __init addr_init(void)
  388. {
  389. addr_wq = create_singlethread_workqueue("ib_addr");
  390. if (!addr_wq)
  391. return -ENOMEM;
  392. register_netevent_notifier(&nb);
  393. return 0;
  394. }
  395. static void __exit addr_cleanup(void)
  396. {
  397. unregister_netevent_notifier(&nb);
  398. destroy_workqueue(addr_wq);
  399. }
  400. module_init(addr_init);
  401. module_exit(addr_cleanup);