PageRenderTime 51ms CodeModel.GetById 11ms app.highlight 20ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/sh/kernel/cpu/sh4/sq.c

https://bitbucket.org/thekraven/iscream_thunderc-2.6.35
C | 413 lines | 272 code | 80 blank | 61 comment | 34 complexity | cda2d3773dbe163fe018303895344fa6 MD5 | raw file
Possible License(s): GPL-2.0, LGPL-2.0, AGPL-1.0
  1/*
  2 * arch/sh/kernel/cpu/sh4/sq.c
  3 *
  4 * General management API for SH-4 integrated Store Queues
  5 *
  6 * Copyright (C) 2001 - 2006  Paul Mundt
  7 * Copyright (C) 2001, 2002  M. R. Brown
  8 *
  9 * This file is subject to the terms and conditions of the GNU General Public
 10 * License.  See the file "COPYING" in the main directory of this archive
 11 * for more details.
 12 */
 13#include <linux/init.h>
 14#include <linux/cpu.h>
 15#include <linux/bitmap.h>
 16#include <linux/sysdev.h>
 17#include <linux/kernel.h>
 18#include <linux/module.h>
 19#include <linux/slab.h>
 20#include <linux/vmalloc.h>
 21#include <linux/mm.h>
 22#include <linux/io.h>
 23#include <asm/page.h>
 24#include <asm/cacheflush.h>
 25#include <cpu/sq.h>
 26
 27struct sq_mapping;
 28
 29struct sq_mapping {
 30	const char *name;
 31
 32	unsigned long sq_addr;
 33	unsigned long addr;
 34	unsigned int size;
 35
 36	struct sq_mapping *next;
 37};
 38
 39static struct sq_mapping *sq_mapping_list;
 40static DEFINE_SPINLOCK(sq_mapping_lock);
 41static struct kmem_cache *sq_cache;
 42static unsigned long *sq_bitmap;
 43
 44#define store_queue_barrier()			\
 45do {						\
 46	(void)__raw_readl(P4SEG_STORE_QUE);	\
 47	__raw_writel(0, P4SEG_STORE_QUE + 0);	\
 48	__raw_writel(0, P4SEG_STORE_QUE + 8);	\
 49} while (0);
 50
 51/**
 52 * sq_flush_range - Flush (prefetch) a specific SQ range
 53 * @start: the store queue address to start flushing from
 54 * @len: the length to flush
 55 *
 56 * Flushes the store queue cache from @start to @start + @len in a
 57 * linear fashion.
 58 */
 59void sq_flush_range(unsigned long start, unsigned int len)
 60{
 61	unsigned long *sq = (unsigned long *)start;
 62
 63	/* Flush the queues */
 64	for (len >>= 5; len--; sq += 8)
 65		prefetchw(sq);
 66
 67	/* Wait for completion */
 68	store_queue_barrier();
 69}
 70EXPORT_SYMBOL(sq_flush_range);
 71
 72static inline void sq_mapping_list_add(struct sq_mapping *map)
 73{
 74	struct sq_mapping **p, *tmp;
 75
 76	spin_lock_irq(&sq_mapping_lock);
 77
 78	p = &sq_mapping_list;
 79	while ((tmp = *p) != NULL)
 80		p = &tmp->next;
 81
 82	map->next = tmp;
 83	*p = map;
 84
 85	spin_unlock_irq(&sq_mapping_lock);
 86}
 87
 88static inline void sq_mapping_list_del(struct sq_mapping *map)
 89{
 90	struct sq_mapping **p, *tmp;
 91
 92	spin_lock_irq(&sq_mapping_lock);
 93
 94	for (p = &sq_mapping_list; (tmp = *p); p = &tmp->next)
 95		if (tmp == map) {
 96			*p = tmp->next;
 97			break;
 98		}
 99
100	spin_unlock_irq(&sq_mapping_lock);
101}
102
103static int __sq_remap(struct sq_mapping *map, pgprot_t prot)
104{
105#if defined(CONFIG_MMU)
106	struct vm_struct *vma;
107
108	vma = __get_vm_area(map->size, VM_ALLOC, map->sq_addr, SQ_ADDRMAX);
109	if (!vma)
110		return -ENOMEM;
111
112	vma->phys_addr = map->addr;
113
114	if (ioremap_page_range((unsigned long)vma->addr,
115			       (unsigned long)vma->addr + map->size,
116			       vma->phys_addr, prot)) {
117		vunmap(vma->addr);
118		return -EAGAIN;
119	}
120#else
121	/*
122	 * Without an MMU (or with it turned off), this is much more
123	 * straightforward, as we can just load up each queue's QACR with
124	 * the physical address appropriately masked.
125	 */
126	__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR0);
127	__raw_writel(((map->addr >> 26) << 2) & 0x1c, SQ_QACR1);
128#endif
129
130	return 0;
131}
132
133/**
134 * sq_remap - Map a physical address through the Store Queues
135 * @phys: Physical address of mapping.
136 * @size: Length of mapping.
137 * @name: User invoking mapping.
138 * @prot: Protection bits.
139 *
140 * Remaps the physical address @phys through the next available store queue
141 * address of @size length. @name is logged at boot time as well as through
142 * the sysfs interface.
143 */
144unsigned long sq_remap(unsigned long phys, unsigned int size,
145		       const char *name, pgprot_t prot)
146{
147	struct sq_mapping *map;
148	unsigned long end;
149	unsigned int psz;
150	int ret, page;
151
152	/* Don't allow wraparound or zero size */
153	end = phys + size - 1;
154	if (unlikely(!size || end < phys))
155		return -EINVAL;
156	/* Don't allow anyone to remap normal memory.. */
157	if (unlikely(phys < virt_to_phys(high_memory)))
158		return -EINVAL;
159
160	phys &= PAGE_MASK;
161	size = PAGE_ALIGN(end + 1) - phys;
162
163	map = kmem_cache_alloc(sq_cache, GFP_KERNEL);
164	if (unlikely(!map))
165		return -ENOMEM;
166
167	map->addr = phys;
168	map->size = size;
169	map->name = name;
170
171	page = bitmap_find_free_region(sq_bitmap, 0x04000000 >> PAGE_SHIFT,
172				       get_order(map->size));
173	if (unlikely(page < 0)) {
174		ret = -ENOSPC;
175		goto out;
176	}
177
178	map->sq_addr = P4SEG_STORE_QUE + (page << PAGE_SHIFT);
179
180	ret = __sq_remap(map, prot);
181	if (unlikely(ret != 0))
182		goto out;
183
184	psz = (size + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
185	pr_info("sqremap: %15s  [%4d page%s]  va 0x%08lx   pa 0x%08lx\n",
186		likely(map->name) ? map->name : "???",
187		psz, psz == 1 ? " " : "s",
188		map->sq_addr, map->addr);
189
190	sq_mapping_list_add(map);
191
192	return map->sq_addr;
193
194out:
195	kmem_cache_free(sq_cache, map);
196	return ret;
197}
198EXPORT_SYMBOL(sq_remap);
199
200/**
201 * sq_unmap - Unmap a Store Queue allocation
202 * @vaddr: Pre-allocated Store Queue mapping.
203 *
204 * Unmaps the store queue allocation @map that was previously created by
205 * sq_remap(). Also frees up the pte that was previously inserted into
206 * the kernel page table and discards the UTLB translation.
207 */
208void sq_unmap(unsigned long vaddr)
209{
210	struct sq_mapping **p, *map;
211	int page;
212
213	for (p = &sq_mapping_list; (map = *p); p = &map->next)
214		if (map->sq_addr == vaddr)
215			break;
216
217	if (unlikely(!map)) {
218		printk("%s: bad store queue address 0x%08lx\n",
219		       __func__, vaddr);
220		return;
221	}
222
223	page = (map->sq_addr - P4SEG_STORE_QUE) >> PAGE_SHIFT;
224	bitmap_release_region(sq_bitmap, page, get_order(map->size));
225
226#ifdef CONFIG_MMU
227	{
228		/*
229		 * Tear down the VMA in the MMU case.
230		 */
231		struct vm_struct *vma;
232
233		vma = remove_vm_area((void *)(map->sq_addr & PAGE_MASK));
234		if (!vma) {
235			printk(KERN_ERR "%s: bad address 0x%08lx\n",
236			       __func__, map->sq_addr);
237			return;
238		}
239	}
240#endif
241
242	sq_mapping_list_del(map);
243
244	kmem_cache_free(sq_cache, map);
245}
246EXPORT_SYMBOL(sq_unmap);
247
248/*
249 * Needlessly complex sysfs interface. Unfortunately it doesn't seem like
250 * there is any other easy way to add things on a per-cpu basis without
251 * putting the directory entries somewhere stupid and having to create
252 * links in sysfs by hand back in to the per-cpu directories.
253 *
254 * Some day we may want to have an additional abstraction per store
255 * queue, but considering the kobject hell we already have to deal with,
256 * it's simply not worth the trouble.
257 */
258static struct kobject *sq_kobject[NR_CPUS];
259
260struct sq_sysfs_attr {
261	struct attribute attr;
262	ssize_t (*show)(char *buf);
263	ssize_t (*store)(const char *buf, size_t count);
264};
265
266#define to_sq_sysfs_attr(a)	container_of(a, struct sq_sysfs_attr, attr)
267
268static ssize_t sq_sysfs_show(struct kobject *kobj, struct attribute *attr,
269			     char *buf)
270{
271	struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
272
273	if (likely(sattr->show))
274		return sattr->show(buf);
275
276	return -EIO;
277}
278
279static ssize_t sq_sysfs_store(struct kobject *kobj, struct attribute *attr,
280			      const char *buf, size_t count)
281{
282	struct sq_sysfs_attr *sattr = to_sq_sysfs_attr(attr);
283
284	if (likely(sattr->store))
285		return sattr->store(buf, count);
286
287	return -EIO;
288}
289
290static ssize_t mapping_show(char *buf)
291{
292	struct sq_mapping **list, *entry;
293	char *p = buf;
294
295	for (list = &sq_mapping_list; (entry = *list); list = &entry->next)
296		p += sprintf(p, "%08lx-%08lx [%08lx]: %s\n",
297			     entry->sq_addr, entry->sq_addr + entry->size,
298			     entry->addr, entry->name);
299
300	return p - buf;
301}
302
303static ssize_t mapping_store(const char *buf, size_t count)
304{
305	unsigned long base = 0, len = 0;
306
307	sscanf(buf, "%lx %lx", &base, &len);
308	if (!base)
309		return -EIO;
310
311	if (likely(len)) {
312		int ret = sq_remap(base, len, "Userspace", PAGE_SHARED);
313		if (ret < 0)
314			return ret;
315	} else
316		sq_unmap(base);
317
318	return count;
319}
320
321static struct sq_sysfs_attr mapping_attr =
322	__ATTR(mapping, 0644, mapping_show, mapping_store);
323
324static struct attribute *sq_sysfs_attrs[] = {
325	&mapping_attr.attr,
326	NULL,
327};
328
329static const struct sysfs_ops sq_sysfs_ops = {
330	.show	= sq_sysfs_show,
331	.store	= sq_sysfs_store,
332};
333
334static struct kobj_type ktype_percpu_entry = {
335	.sysfs_ops	= &sq_sysfs_ops,
336	.default_attrs	= sq_sysfs_attrs,
337};
338
339static int __devinit sq_sysdev_add(struct sys_device *sysdev)
340{
341	unsigned int cpu = sysdev->id;
342	struct kobject *kobj;
343	int error;
344
345	sq_kobject[cpu] = kzalloc(sizeof(struct kobject), GFP_KERNEL);
346	if (unlikely(!sq_kobject[cpu]))
347		return -ENOMEM;
348
349	kobj = sq_kobject[cpu];
350	error = kobject_init_and_add(kobj, &ktype_percpu_entry, &sysdev->kobj,
351				     "%s", "sq");
352	if (!error)
353		kobject_uevent(kobj, KOBJ_ADD);
354	return error;
355}
356
357static int __devexit sq_sysdev_remove(struct sys_device *sysdev)
358{
359	unsigned int cpu = sysdev->id;
360	struct kobject *kobj = sq_kobject[cpu];
361
362	kobject_put(kobj);
363	return 0;
364}
365
366static struct sysdev_driver sq_sysdev_driver = {
367	.add		= sq_sysdev_add,
368	.remove		= __devexit_p(sq_sysdev_remove),
369};
370
371static int __init sq_api_init(void)
372{
373	unsigned int nr_pages = 0x04000000 >> PAGE_SHIFT;
374	unsigned int size = (nr_pages + (BITS_PER_LONG - 1)) / BITS_PER_LONG;
375	int ret = -ENOMEM;
376
377	printk(KERN_NOTICE "sq: Registering store queue API.\n");
378
379	sq_cache = kmem_cache_create("store_queue_cache",
380				sizeof(struct sq_mapping), 0, 0, NULL);
381	if (unlikely(!sq_cache))
382		return ret;
383
384	sq_bitmap = kzalloc(size, GFP_KERNEL);
385	if (unlikely(!sq_bitmap))
386		goto out;
387
388	ret = sysdev_driver_register(&cpu_sysdev_class, &sq_sysdev_driver);
389	if (unlikely(ret != 0))
390		goto out;
391
392	return 0;
393
394out:
395	kfree(sq_bitmap);
396	kmem_cache_destroy(sq_cache);
397
398	return ret;
399}
400
401static void __exit sq_api_exit(void)
402{
403	sysdev_driver_unregister(&cpu_sysdev_class, &sq_sysdev_driver);
404	kfree(sq_bitmap);
405	kmem_cache_destroy(sq_cache);
406}
407
408module_init(sq_api_init);
409module_exit(sq_api_exit);
410
411MODULE_AUTHOR("Paul Mundt <lethal@linux-sh.org>, M. R. Brown <mrbrown@0xd6.org>");
412MODULE_DESCRIPTION("Simple API for SH-4 integrated Store Queues");
413MODULE_LICENSE("GPL");