PageRenderTime 65ms CodeModel.GetById 8ms app.highlight 52ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/infiniband/core/fmr_pool.c

https://bitbucket.org/evzijst/gittest
C | 507 lines | 318 code | 87 blank | 102 comment | 37 complexity | 70bffab036c117755d7d27c1b0531d0a MD5 | raw file
  1/*
  2 * Copyright (c) 2004 Topspin Communications.  All rights reserved.
  3 *
  4 * This software is available to you under a choice of one of two
  5 * licenses.  You may choose to be licensed under the terms of the GNU
  6 * General Public License (GPL) Version 2, available from the file
  7 * COPYING in the main directory of this source tree, or the
  8 * OpenIB.org BSD license below:
  9 *
 10 *     Redistribution and use in source and binary forms, with or
 11 *     without modification, are permitted provided that the following
 12 *     conditions are met:
 13 *
 14 *      - Redistributions of source code must retain the above
 15 *        copyright notice, this list of conditions and the following
 16 *        disclaimer.
 17 *
 18 *      - Redistributions in binary form must reproduce the above
 19 *        copyright notice, this list of conditions and the following
 20 *        disclaimer in the documentation and/or other materials
 21 *        provided with the distribution.
 22 *
 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
 30 * SOFTWARE.
 31 *
 32 * $Id: fmr_pool.c 1349 2004-12-16 21:09:43Z roland $
 33 */
 34
 35#include <linux/errno.h>
 36#include <linux/spinlock.h>
 37#include <linux/slab.h>
 38#include <linux/jhash.h>
 39#include <linux/kthread.h>
 40
 41#include <ib_fmr_pool.h>
 42
 43#include "core_priv.h"
 44
 45enum {
 46	IB_FMR_MAX_REMAPS = 32,
 47
 48	IB_FMR_HASH_BITS  = 8,
 49	IB_FMR_HASH_SIZE  = 1 << IB_FMR_HASH_BITS,
 50	IB_FMR_HASH_MASK  = IB_FMR_HASH_SIZE - 1
 51};
 52
 53/*
 54 * If an FMR is not in use, then the list member will point to either
 55 * its pool's free_list (if the FMR can be mapped again; that is,
 56 * remap_count < IB_FMR_MAX_REMAPS) or its pool's dirty_list (if the
 57 * FMR needs to be unmapped before being remapped).  In either of
 58 * these cases it is a bug if the ref_count is not 0.  In other words,
 59 * if ref_count is > 0, then the list member must not be linked into
 60 * either free_list or dirty_list.
 61 *
 62 * The cache_node member is used to link the FMR into a cache bucket
 63 * (if caching is enabled).  This is independent of the reference
 64 * count of the FMR.  When a valid FMR is released, its ref_count is
 65 * decremented, and if ref_count reaches 0, the FMR is placed in
 66 * either free_list or dirty_list as appropriate.  However, it is not
 67 * removed from the cache and may be "revived" if a call to
 68 * ib_fmr_register_physical() occurs before the FMR is remapped.  In
 69 * this case we just increment the ref_count and remove the FMR from
 70 * free_list/dirty_list.
 71 *
 72 * Before we remap an FMR from free_list, we remove it from the cache
 73 * (to prevent another user from obtaining a stale FMR).  When an FMR
 74 * is released, we add it to the tail of the free list, so that our
 75 * cache eviction policy is "least recently used."
 76 *
 77 * All manipulation of ref_count, list and cache_node is protected by
 78 * pool_lock to maintain consistency.
 79 */
 80
 81struct ib_fmr_pool {
 82	spinlock_t                pool_lock;
 83
 84	int                       pool_size;
 85	int                       max_pages;
 86	int                       dirty_watermark;
 87	int                       dirty_len;
 88	struct list_head          free_list;
 89	struct list_head          dirty_list;
 90	struct hlist_head        *cache_bucket;
 91
 92	void                     (*flush_function)(struct ib_fmr_pool *pool,
 93						   void *              arg);
 94	void                     *flush_arg;
 95
 96	struct task_struct       *thread;
 97
 98	atomic_t                  req_ser;
 99	atomic_t                  flush_ser;
100
101	wait_queue_head_t         force_wait;
102};
103
104static inline u32 ib_fmr_hash(u64 first_page)
105{
106	return jhash_2words((u32) first_page,
107			    (u32) (first_page >> 32),
108			    0);
109}
110
111/* Caller must hold pool_lock */
112static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
113						      u64 *page_list,
114						      int  page_list_len,
115						      u64  io_virtual_address)
116{
117	struct hlist_head *bucket;
118	struct ib_pool_fmr *fmr;
119	struct hlist_node *pos;
120
121	if (!pool->cache_bucket)
122		return NULL;
123
124	bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
125
126	hlist_for_each_entry(fmr, pos, bucket, cache_node)
127		if (io_virtual_address == fmr->io_virtual_address &&
128		    page_list_len      == fmr->page_list_len      &&
129		    !memcmp(page_list, fmr->page_list,
130			    page_list_len * sizeof *page_list))
131			return fmr;
132
133	return NULL;
134}
135
136static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
137{
138	int                 ret;
139	struct ib_pool_fmr *fmr;
140	LIST_HEAD(unmap_list);
141	LIST_HEAD(fmr_list);
142
143	spin_lock_irq(&pool->pool_lock);
144
145	list_for_each_entry(fmr, &pool->dirty_list, list) {
146		hlist_del_init(&fmr->cache_node);
147		fmr->remap_count = 0;
148		list_add_tail(&fmr->fmr->list, &fmr_list);
149
150#ifdef DEBUG
151		if (fmr->ref_count !=0) {
152			printk(KERN_WARNING "Unmapping FMR 0x%08x with ref count %d",
153			       fmr, fmr->ref_count);
154		}
155#endif
156	}
157
158	list_splice(&pool->dirty_list, &unmap_list);
159	INIT_LIST_HEAD(&pool->dirty_list);
160	pool->dirty_len = 0;
161
162	spin_unlock_irq(&pool->pool_lock);
163
164	if (list_empty(&unmap_list)) {
165		return;
166	}
167
168	ret = ib_unmap_fmr(&fmr_list);
169	if (ret)
170		printk(KERN_WARNING "ib_unmap_fmr returned %d", ret);
171
172	spin_lock_irq(&pool->pool_lock);
173	list_splice(&unmap_list, &pool->free_list);
174	spin_unlock_irq(&pool->pool_lock);
175}
176
177static int ib_fmr_cleanup_thread(void *pool_ptr)
178{
179	struct ib_fmr_pool *pool = pool_ptr;
180
181	do {
182		if (pool->dirty_len >= pool->dirty_watermark ||
183		    atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
184			ib_fmr_batch_release(pool);
185
186			atomic_inc(&pool->flush_ser);
187			wake_up_interruptible(&pool->force_wait);
188
189			if (pool->flush_function)
190				pool->flush_function(pool, pool->flush_arg);
191		}
192
193		set_current_state(TASK_INTERRUPTIBLE);
194		if (pool->dirty_len < pool->dirty_watermark &&
195		    atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
196		    !kthread_should_stop())
197			schedule();
198		__set_current_state(TASK_RUNNING);
199	} while (!kthread_should_stop());
200
201	return 0;
202}
203
204/**
205 * ib_create_fmr_pool - Create an FMR pool
206 * @pd:Protection domain for FMRs
207 * @params:FMR pool parameters
208 *
209 * Create a pool of FMRs.  Return value is pointer to new pool or
210 * error code if creation failed.
211 */
212struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
213				       struct ib_fmr_pool_param *params)
214{
215	struct ib_device   *device;
216	struct ib_fmr_pool *pool;
217	int i;
218	int ret;
219
220	if (!params)
221		return ERR_PTR(-EINVAL);
222
223	device = pd->device;
224	if (!device->alloc_fmr    || !device->dealloc_fmr  ||
225	    !device->map_phys_fmr || !device->unmap_fmr) {
226		printk(KERN_WARNING "Device %s does not support fast memory regions",
227		       device->name);
228		return ERR_PTR(-ENOSYS);
229	}
230
231	pool = kmalloc(sizeof *pool, GFP_KERNEL);
232	if (!pool) {
233		printk(KERN_WARNING "couldn't allocate pool struct");
234		return ERR_PTR(-ENOMEM);
235	}
236
237	pool->cache_bucket   = NULL;
238
239	pool->flush_function = params->flush_function;
240	pool->flush_arg      = params->flush_arg;
241
242	INIT_LIST_HEAD(&pool->free_list);
243	INIT_LIST_HEAD(&pool->dirty_list);
244
245	if (params->cache) {
246		pool->cache_bucket =
247			kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
248				GFP_KERNEL);
249		if (!pool->cache_bucket) {
250			printk(KERN_WARNING "Failed to allocate cache in pool");
251			ret = -ENOMEM;
252			goto out_free_pool;
253		}
254
255		for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
256			INIT_HLIST_HEAD(pool->cache_bucket + i);
257	}
258
259	pool->pool_size       = 0;
260	pool->max_pages       = params->max_pages_per_fmr;
261	pool->dirty_watermark = params->dirty_watermark;
262	pool->dirty_len       = 0;
263	spin_lock_init(&pool->pool_lock);
264	atomic_set(&pool->req_ser,   0);
265	atomic_set(&pool->flush_ser, 0);
266	init_waitqueue_head(&pool->force_wait);
267
268	pool->thread = kthread_create(ib_fmr_cleanup_thread,
269				      pool,
270				      "ib_fmr(%s)",
271				      device->name);
272	if (IS_ERR(pool->thread)) {
273		printk(KERN_WARNING "couldn't start cleanup thread");
274		ret = PTR_ERR(pool->thread);
275		goto out_free_pool;
276	}
277
278	{
279		struct ib_pool_fmr *fmr;
280		struct ib_fmr_attr attr = {
281			.max_pages = params->max_pages_per_fmr,
282			.max_maps  = IB_FMR_MAX_REMAPS,
283			.page_size = PAGE_SHIFT
284		};
285
286		for (i = 0; i < params->pool_size; ++i) {
287			fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64),
288				      GFP_KERNEL);
289			if (!fmr) {
290				printk(KERN_WARNING "failed to allocate fmr struct "
291				       "for FMR %d", i);
292				goto out_fail;
293			}
294
295			fmr->pool             = pool;
296			fmr->remap_count      = 0;
297			fmr->ref_count        = 0;
298			INIT_HLIST_NODE(&fmr->cache_node);
299
300			fmr->fmr = ib_alloc_fmr(pd, params->access, &attr);
301			if (IS_ERR(fmr->fmr)) {
302				printk(KERN_WARNING "fmr_create failed for FMR %d", i);
303				kfree(fmr);
304				goto out_fail;
305			}
306
307			list_add_tail(&fmr->list, &pool->free_list);
308			++pool->pool_size;
309		}
310	}
311
312	return pool;
313
314 out_free_pool:
315	kfree(pool->cache_bucket);
316	kfree(pool);
317
318	return ERR_PTR(ret);
319
320 out_fail:
321	ib_destroy_fmr_pool(pool);
322
323	return ERR_PTR(-ENOMEM);
324}
325EXPORT_SYMBOL(ib_create_fmr_pool);
326
327/**
328 * ib_destroy_fmr_pool - Free FMR pool
329 * @pool:FMR pool to free
330 *
331 * Destroy an FMR pool and free all associated resources.
332 */
333int ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
334{
335	struct ib_pool_fmr *fmr;
336	struct ib_pool_fmr *tmp;
337	int                 i;
338
339	kthread_stop(pool->thread);
340	ib_fmr_batch_release(pool);
341
342	i = 0;
343	list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
344		ib_dealloc_fmr(fmr->fmr);
345		list_del(&fmr->list);
346		kfree(fmr);
347		++i;
348	}
349
350	if (i < pool->pool_size)
351		printk(KERN_WARNING "pool still has %d regions registered",
352		       pool->pool_size - i);
353
354	kfree(pool->cache_bucket);
355	kfree(pool);
356
357	return 0;
358}
359EXPORT_SYMBOL(ib_destroy_fmr_pool);
360
361/**
362 * ib_flush_fmr_pool - Invalidate all unmapped FMRs
363 * @pool:FMR pool to flush
364 *
365 * Ensure that all unmapped FMRs are fully invalidated.
366 */
367int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
368{
369	int serial;
370
371	atomic_inc(&pool->req_ser);
372	/*
373	 * It's OK if someone else bumps req_ser again here -- we'll
374	 * just wait a little longer.
375	 */
376	serial = atomic_read(&pool->req_ser);
377
378	wake_up_process(pool->thread);
379
380	if (wait_event_interruptible(pool->force_wait,
381				     atomic_read(&pool->flush_ser) -
382				     atomic_read(&pool->req_ser) >= 0))
383		return -EINTR;
384
385	return 0;
386}
387EXPORT_SYMBOL(ib_flush_fmr_pool);
388
389/**
390 * ib_fmr_pool_map_phys -
391 * @pool:FMR pool to allocate FMR from
392 * @page_list:List of pages to map
393 * @list_len:Number of pages in @page_list
394 * @io_virtual_address:I/O virtual address for new FMR
395 *
396 * Map an FMR from an FMR pool.
397 */
398struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
399					 u64                *page_list,
400					 int                 list_len,
401					 u64                *io_virtual_address)
402{
403	struct ib_fmr_pool *pool = pool_handle;
404	struct ib_pool_fmr *fmr;
405	unsigned long       flags;
406	int                 result;
407
408	if (list_len < 1 || list_len > pool->max_pages)
409		return ERR_PTR(-EINVAL);
410
411	spin_lock_irqsave(&pool->pool_lock, flags);
412	fmr = ib_fmr_cache_lookup(pool,
413				  page_list,
414				  list_len,
415				  *io_virtual_address);
416	if (fmr) {
417		/* found in cache */
418		++fmr->ref_count;
419		if (fmr->ref_count == 1) {
420			list_del(&fmr->list);
421		}
422
423		spin_unlock_irqrestore(&pool->pool_lock, flags);
424
425		return fmr;
426	}
427
428	if (list_empty(&pool->free_list)) {
429		spin_unlock_irqrestore(&pool->pool_lock, flags);
430		return ERR_PTR(-EAGAIN);
431	}
432
433	fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
434	list_del(&fmr->list);
435	hlist_del_init(&fmr->cache_node);
436	spin_unlock_irqrestore(&pool->pool_lock, flags);
437
438	result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
439				 *io_virtual_address);
440
441	if (result) {
442		spin_lock_irqsave(&pool->pool_lock, flags);
443		list_add(&fmr->list, &pool->free_list);
444		spin_unlock_irqrestore(&pool->pool_lock, flags);
445
446		printk(KERN_WARNING "fmr_map returns %d",
447		       result);
448
449		return ERR_PTR(result);
450	}
451
452	++fmr->remap_count;
453	fmr->ref_count = 1;
454
455	if (pool->cache_bucket) {
456		fmr->io_virtual_address = *io_virtual_address;
457		fmr->page_list_len      = list_len;
458		memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
459
460		spin_lock_irqsave(&pool->pool_lock, flags);
461		hlist_add_head(&fmr->cache_node,
462			       pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
463		spin_unlock_irqrestore(&pool->pool_lock, flags);
464	}
465
466	return fmr;
467}
468EXPORT_SYMBOL(ib_fmr_pool_map_phys);
469
470/**
471 * ib_fmr_pool_unmap - Unmap FMR
472 * @fmr:FMR to unmap
473 *
474 * Unmap an FMR.  The FMR mapping may remain valid until the FMR is
475 * reused (or until ib_flush_fmr_pool() is called).
476 */
477int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
478{
479	struct ib_fmr_pool *pool;
480	unsigned long flags;
481
482	pool = fmr->pool;
483
484	spin_lock_irqsave(&pool->pool_lock, flags);
485
486	--fmr->ref_count;
487	if (!fmr->ref_count) {
488		if (fmr->remap_count < IB_FMR_MAX_REMAPS) {
489			list_add_tail(&fmr->list, &pool->free_list);
490		} else {
491			list_add_tail(&fmr->list, &pool->dirty_list);
492			++pool->dirty_len;
493			wake_up_process(pool->thread);
494		}
495	}
496
497#ifdef DEBUG
498	if (fmr->ref_count < 0)
499		printk(KERN_WARNING "FMR %p has ref count %d < 0",
500		       fmr, fmr->ref_count);
501#endif
502
503	spin_unlock_irqrestore(&pool->pool_lock, flags);
504
505	return 0;
506}
507EXPORT_SYMBOL(ib_fmr_pool_unmap);