PageRenderTime 33ms CodeModel.GetById 16ms app.highlight 13ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/infiniband/core/addr.c

https://bitbucket.org/ndreys/linux-sunxi
C | 460 lines | 365 code | 59 blank | 36 comment | 52 complexity | 24b8e7b55c39d8eded34f03b18477b4d MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1/*
  2 * Copyright (c) 2005 Voltaire Inc.  All rights reserved.
  3 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
  4 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
  5 * Copyright (c) 2005 Intel Corporation.  All rights reserved.
  6 *
  7 * This software is available to you under a choice of one of two
  8 * licenses.  You may choose to be licensed under the terms of the GNU
  9 * General Public License (GPL) Version 2, available from the file
 10 * COPYING in the main directory of this source tree, or the
 11 * OpenIB.org BSD license below:
 12 *
 13 *     Redistribution and use in source and binary forms, with or
 14 *     without modification, are permitted provided that the following
 15 *     conditions are met:
 16 *
 17 *      - Redistributions of source code must retain the above
 18 *        copyright notice, this list of conditions and the following
 19 *        disclaimer.
 20 *
 21 *      - Redistributions in binary form must reproduce the above
 22 *        copyright notice, this list of conditions and the following
 23 *        disclaimer in the documentation and/or other materials
 24 *        provided with the distribution.
 25 *
 26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 33 * SOFTWARE.
 34 */
 35
 36#include <linux/mutex.h>
 37#include <linux/inetdevice.h>
 38#include <linux/slab.h>
 39#include <linux/workqueue.h>
 40#include <net/arp.h>
 41#include <net/neighbour.h>
 42#include <net/route.h>
 43#include <net/netevent.h>
 44#include <net/addrconf.h>
 45#include <net/ip6_route.h>
 46#include <rdma/ib_addr.h>
 47
 48MODULE_AUTHOR("Sean Hefty");
 49MODULE_DESCRIPTION("IB Address Translation");
 50MODULE_LICENSE("Dual BSD/GPL");
 51
 52struct addr_req {
 53	struct list_head list;
 54	struct sockaddr_storage src_addr;
 55	struct sockaddr_storage dst_addr;
 56	struct rdma_dev_addr *addr;
 57	struct rdma_addr_client *client;
 58	void *context;
 59	void (*callback)(int status, struct sockaddr *src_addr,
 60			 struct rdma_dev_addr *addr, void *context);
 61	unsigned long timeout;
 62	int status;
 63};
 64
 65static void process_req(struct work_struct *work);
 66
 67static DEFINE_MUTEX(lock);
 68static LIST_HEAD(req_list);
 69static DECLARE_DELAYED_WORK(work, process_req);
 70static struct workqueue_struct *addr_wq;
 71
 72void rdma_addr_register_client(struct rdma_addr_client *client)
 73{
 74	atomic_set(&client->refcount, 1);
 75	init_completion(&client->comp);
 76}
 77EXPORT_SYMBOL(rdma_addr_register_client);
 78
 79static inline void put_client(struct rdma_addr_client *client)
 80{
 81	if (atomic_dec_and_test(&client->refcount))
 82		complete(&client->comp);
 83}
 84
 85void rdma_addr_unregister_client(struct rdma_addr_client *client)
 86{
 87	put_client(client);
 88	wait_for_completion(&client->comp);
 89}
 90EXPORT_SYMBOL(rdma_addr_unregister_client);
 91
 92int rdma_copy_addr(struct rdma_dev_addr *dev_addr, struct net_device *dev,
 93		     const unsigned char *dst_dev_addr)
 94{
 95	dev_addr->dev_type = dev->type;
 96	memcpy(dev_addr->src_dev_addr, dev->dev_addr, MAX_ADDR_LEN);
 97	memcpy(dev_addr->broadcast, dev->broadcast, MAX_ADDR_LEN);
 98	if (dst_dev_addr)
 99		memcpy(dev_addr->dst_dev_addr, dst_dev_addr, MAX_ADDR_LEN);
100	dev_addr->bound_dev_if = dev->ifindex;
101	return 0;
102}
103EXPORT_SYMBOL(rdma_copy_addr);
104
105int rdma_translate_ip(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
106{
107	struct net_device *dev;
108	int ret = -EADDRNOTAVAIL;
109
110	if (dev_addr->bound_dev_if) {
111		dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
112		if (!dev)
113			return -ENODEV;
114		ret = rdma_copy_addr(dev_addr, dev, NULL);
115		dev_put(dev);
116		return ret;
117	}
118
119	switch (addr->sa_family) {
120	case AF_INET:
121		dev = ip_dev_find(&init_net,
122			((struct sockaddr_in *) addr)->sin_addr.s_addr);
123
124		if (!dev)
125			return ret;
126
127		ret = rdma_copy_addr(dev_addr, dev, NULL);
128		dev_put(dev);
129		break;
130
131#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
132	case AF_INET6:
133		rcu_read_lock();
134		for_each_netdev_rcu(&init_net, dev) {
135			if (ipv6_chk_addr(&init_net,
136					  &((struct sockaddr_in6 *) addr)->sin6_addr,
137					  dev, 1)) {
138				ret = rdma_copy_addr(dev_addr, dev, NULL);
139				break;
140			}
141		}
142		rcu_read_unlock();
143		break;
144#endif
145	}
146	return ret;
147}
148EXPORT_SYMBOL(rdma_translate_ip);
149
150static void set_timeout(unsigned long time)
151{
152	unsigned long delay;
153
154	cancel_delayed_work(&work);
155
156	delay = time - jiffies;
157	if ((long)delay <= 0)
158		delay = 1;
159
160	queue_delayed_work(addr_wq, &work, delay);
161}
162
163static void queue_req(struct addr_req *req)
164{
165	struct addr_req *temp_req;
166
167	mutex_lock(&lock);
168	list_for_each_entry_reverse(temp_req, &req_list, list) {
169		if (time_after_eq(req->timeout, temp_req->timeout))
170			break;
171	}
172
173	list_add(&req->list, &temp_req->list);
174
175	if (req_list.next == &req->list)
176		set_timeout(req->timeout);
177	mutex_unlock(&lock);
178}
179
180static int addr4_resolve(struct sockaddr_in *src_in,
181			 struct sockaddr_in *dst_in,
182			 struct rdma_dev_addr *addr)
183{
184	__be32 src_ip = src_in->sin_addr.s_addr;
185	__be32 dst_ip = dst_in->sin_addr.s_addr;
186	struct rtable *rt;
187	struct neighbour *neigh;
188	struct flowi4 fl4;
189	int ret;
190
191	memset(&fl4, 0, sizeof(fl4));
192	fl4.daddr = dst_ip;
193	fl4.saddr = src_ip;
194	fl4.flowi4_oif = addr->bound_dev_if;
195	rt = ip_route_output_key(&init_net, &fl4);
196	if (IS_ERR(rt)) {
197		ret = PTR_ERR(rt);
198		goto out;
199	}
200	src_in->sin_family = AF_INET;
201	src_in->sin_addr.s_addr = fl4.saddr;
202
203	if (rt->dst.dev->flags & IFF_LOOPBACK) {
204		ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
205		if (!ret)
206			memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
207		goto put;
208	}
209
210	/* If the device does ARP internally, return 'done' */
211	if (rt->dst.dev->flags & IFF_NOARP) {
212		ret = rdma_copy_addr(addr, rt->dst.dev, NULL);
213		goto put;
214	}
215
216	neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
217	if (!neigh || !(neigh->nud_state & NUD_VALID)) {
218		rcu_read_lock();
219		neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
220		rcu_read_unlock();
221		ret = -ENODATA;
222		if (neigh)
223			goto release;
224		goto put;
225	}
226
227	ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
228release:
229	neigh_release(neigh);
230put:
231	ip_rt_put(rt);
232out:
233	return ret;
234}
235
236#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
237static int addr6_resolve(struct sockaddr_in6 *src_in,
238			 struct sockaddr_in6 *dst_in,
239			 struct rdma_dev_addr *addr)
240{
241	struct flowi6 fl6;
242	struct neighbour *neigh;
243	struct dst_entry *dst;
244	int ret;
245
246	memset(&fl6, 0, sizeof fl6);
247	ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
248	ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
249	fl6.flowi6_oif = addr->bound_dev_if;
250
251	dst = ip6_route_output(&init_net, NULL, &fl6);
252	if ((ret = dst->error))
253		goto put;
254
255	if (ipv6_addr_any(&fl6.saddr)) {
256		ret = ipv6_dev_get_saddr(&init_net, ip6_dst_idev(dst)->dev,
257					 &fl6.daddr, 0, &fl6.saddr);
258		if (ret)
259			goto put;
260
261		src_in->sin6_family = AF_INET6;
262		ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
263	}
264
265	if (dst->dev->flags & IFF_LOOPBACK) {
266		ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
267		if (!ret)
268			memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
269		goto put;
270	}
271
272	/* If the device does ARP internally, return 'done' */
273	if (dst->dev->flags & IFF_NOARP) {
274		ret = rdma_copy_addr(addr, dst->dev, NULL);
275		goto put;
276	}
277
278	rcu_read_lock();
279	neigh = dst_get_neighbour(dst);
280	if (!neigh || !(neigh->nud_state & NUD_VALID)) {
281		if (neigh)
282			neigh_event_send(neigh, NULL);
283		ret = -ENODATA;
284	} else {
285		ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
286	}
287	rcu_read_unlock();
288put:
289	dst_release(dst);
290	return ret;
291}
292#else
293static int addr6_resolve(struct sockaddr_in6 *src_in,
294			 struct sockaddr_in6 *dst_in,
295			 struct rdma_dev_addr *addr)
296{
297	return -EADDRNOTAVAIL;
298}
299#endif
300
301static int addr_resolve(struct sockaddr *src_in,
302			struct sockaddr *dst_in,
303			struct rdma_dev_addr *addr)
304{
305	if (src_in->sa_family == AF_INET) {
306		return addr4_resolve((struct sockaddr_in *) src_in,
307			(struct sockaddr_in *) dst_in, addr);
308	} else
309		return addr6_resolve((struct sockaddr_in6 *) src_in,
310			(struct sockaddr_in6 *) dst_in, addr);
311}
312
313static void process_req(struct work_struct *work)
314{
315	struct addr_req *req, *temp_req;
316	struct sockaddr *src_in, *dst_in;
317	struct list_head done_list;
318
319	INIT_LIST_HEAD(&done_list);
320
321	mutex_lock(&lock);
322	list_for_each_entry_safe(req, temp_req, &req_list, list) {
323		if (req->status == -ENODATA) {
324			src_in = (struct sockaddr *) &req->src_addr;
325			dst_in = (struct sockaddr *) &req->dst_addr;
326			req->status = addr_resolve(src_in, dst_in, req->addr);
327			if (req->status && time_after_eq(jiffies, req->timeout))
328				req->status = -ETIMEDOUT;
329			else if (req->status == -ENODATA)
330				continue;
331		}
332		list_move_tail(&req->list, &done_list);
333	}
334
335	if (!list_empty(&req_list)) {
336		req = list_entry(req_list.next, struct addr_req, list);
337		set_timeout(req->timeout);
338	}
339	mutex_unlock(&lock);
340
341	list_for_each_entry_safe(req, temp_req, &done_list, list) {
342		list_del(&req->list);
343		req->callback(req->status, (struct sockaddr *) &req->src_addr,
344			req->addr, req->context);
345		put_client(req->client);
346		kfree(req);
347	}
348}
349
350int rdma_resolve_ip(struct rdma_addr_client *client,
351		    struct sockaddr *src_addr, struct sockaddr *dst_addr,
352		    struct rdma_dev_addr *addr, int timeout_ms,
353		    void (*callback)(int status, struct sockaddr *src_addr,
354				     struct rdma_dev_addr *addr, void *context),
355		    void *context)
356{
357	struct sockaddr *src_in, *dst_in;
358	struct addr_req *req;
359	int ret = 0;
360
361	req = kzalloc(sizeof *req, GFP_KERNEL);
362	if (!req)
363		return -ENOMEM;
364
365	src_in = (struct sockaddr *) &req->src_addr;
366	dst_in = (struct sockaddr *) &req->dst_addr;
367
368	if (src_addr) {
369		if (src_addr->sa_family != dst_addr->sa_family) {
370			ret = -EINVAL;
371			goto err;
372		}
373
374		memcpy(src_in, src_addr, ip_addr_size(src_addr));
375	} else {
376		src_in->sa_family = dst_addr->sa_family;
377	}
378
379	memcpy(dst_in, dst_addr, ip_addr_size(dst_addr));
380	req->addr = addr;
381	req->callback = callback;
382	req->context = context;
383	req->client = client;
384	atomic_inc(&client->refcount);
385
386	req->status = addr_resolve(src_in, dst_in, addr);
387	switch (req->status) {
388	case 0:
389		req->timeout = jiffies;
390		queue_req(req);
391		break;
392	case -ENODATA:
393		req->timeout = msecs_to_jiffies(timeout_ms) + jiffies;
394		queue_req(req);
395		break;
396	default:
397		ret = req->status;
398		atomic_dec(&client->refcount);
399		goto err;
400	}
401	return ret;
402err:
403	kfree(req);
404	return ret;
405}
406EXPORT_SYMBOL(rdma_resolve_ip);
407
408void rdma_addr_cancel(struct rdma_dev_addr *addr)
409{
410	struct addr_req *req, *temp_req;
411
412	mutex_lock(&lock);
413	list_for_each_entry_safe(req, temp_req, &req_list, list) {
414		if (req->addr == addr) {
415			req->status = -ECANCELED;
416			req->timeout = jiffies;
417			list_move(&req->list, &req_list);
418			set_timeout(req->timeout);
419			break;
420		}
421	}
422	mutex_unlock(&lock);
423}
424EXPORT_SYMBOL(rdma_addr_cancel);
425
426static int netevent_callback(struct notifier_block *self, unsigned long event,
427	void *ctx)
428{
429	if (event == NETEVENT_NEIGH_UPDATE) {
430		struct neighbour *neigh = ctx;
431
432		if (neigh->nud_state & NUD_VALID) {
433			set_timeout(jiffies);
434		}
435	}
436	return 0;
437}
438
439static struct notifier_block nb = {
440	.notifier_call = netevent_callback
441};
442
443static int __init addr_init(void)
444{
445	addr_wq = create_singlethread_workqueue("ib_addr");
446	if (!addr_wq)
447		return -ENOMEM;
448
449	register_netevent_notifier(&nb);
450	return 0;
451}
452
453static void __exit addr_cleanup(void)
454{
455	unregister_netevent_notifier(&nb);
456	destroy_workqueue(addr_wq);
457}
458
459module_init(addr_init);
460module_exit(addr_cleanup);