PageRenderTime 25ms CodeModel.GetById 24ms RepoModel.GetById 1ms app.codeStats 0ms

/drivers/char/mspec.c

https://github.com/Dabary/linux_gt-i9000
C | 449 lines | 304 code | 58 blank | 87 comment | 48 complexity | ee94166894947ce95aa21b3bf9b5a863 MD5 | raw file
  1. /*
  2. * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights
  3. * reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms of version 2 of the GNU General Public License
  7. * as published by the Free Software Foundation.
  8. */
  9. /*
  10. * SN Platform Special Memory (mspec) Support
  11. *
  12. * This driver exports the SN special memory (mspec) facility to user
  13. * processes.
  14. * There are three types of memory made available thru this driver:
  15. * fetchops, uncached and cached.
  16. *
  17. * Fetchops are atomic memory operations that are implemented in the
  18. * memory controller on SGI SN hardware.
  19. *
  20. * Uncached are used for memory write combining feature of the ia64
  21. * cpu.
  22. *
  23. * Cached are used for areas of memory that are used as cached addresses
  24. * on our partition and used as uncached addresses from other partitions.
  25. * Due to a design constraint of the SN2 Shub, you can not have processors
  26. * on the same FSB perform both a cached and uncached reference to the
  27. * same cache line. These special memory cached regions prevent the
  28. * kernel from ever dropping in a TLB entry and therefore prevent the
  29. * processor from ever speculating a cache line from this page.
  30. */
  31. #include <linux/types.h>
  32. #include <linux/kernel.h>
  33. #include <linux/module.h>
  34. #include <linux/init.h>
  35. #include <linux/errno.h>
  36. #include <linux/miscdevice.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/mm.h>
  39. #include <linux/fs.h>
  40. #include <linux/vmalloc.h>
  41. #include <linux/string.h>
  42. #include <linux/slab.h>
  43. #include <linux/numa.h>
  44. #include <asm/page.h>
  45. #include <asm/system.h>
  46. #include <asm/pgtable.h>
  47. #include <asm/atomic.h>
  48. #include <asm/tlbflush.h>
  49. #include <asm/uncached.h>
  50. #include <asm/sn/addrs.h>
  51. #include <asm/sn/arch.h>
  52. #include <asm/sn/mspec.h>
  53. #include <asm/sn/sn_cpuid.h>
  54. #include <asm/sn/io.h>
  55. #include <asm/sn/bte.h>
  56. #include <asm/sn/shubio.h>
  57. #define FETCHOP_ID "SGI Fetchop,"
  58. #define CACHED_ID "Cached,"
  59. #define UNCACHED_ID "Uncached"
  60. #define REVISION "4.0"
  61. #define MSPEC_BASENAME "mspec"
  62. /*
  63. * Page types allocated by the device.
  64. */
  65. enum mspec_page_type {
  66. MSPEC_FETCHOP = 1,
  67. MSPEC_CACHED,
  68. MSPEC_UNCACHED
  69. };
  70. #ifdef CONFIG_SGI_SN
  71. static int is_sn2;
  72. #else
  73. #define is_sn2 0
  74. #endif
  75. /*
  76. * One of these structures is allocated when an mspec region is mmaped. The
  77. * structure is pointed to by the vma->vm_private_data field in the vma struct.
  78. * This structure is used to record the addresses of the mspec pages.
  79. * This structure is shared by all vma's that are split off from the
  80. * original vma when split_vma()'s are done.
  81. *
  82. * The refcnt is incremented atomically because mm->mmap_sem does not
  83. * protect in fork case where multiple tasks share the vma_data.
  84. */
  85. struct vma_data {
  86. atomic_t refcnt; /* Number of vmas sharing the data. */
  87. spinlock_t lock; /* Serialize access to this structure. */
  88. int count; /* Number of pages allocated. */
  89. enum mspec_page_type type; /* Type of pages allocated. */
  90. int flags; /* See VMD_xxx below. */
  91. unsigned long vm_start; /* Original (unsplit) base. */
  92. unsigned long vm_end; /* Original (unsplit) end. */
  93. unsigned long maddr[0]; /* Array of MSPEC addresses. */
  94. };
  95. #define VMD_VMALLOCED 0x1 /* vmalloc'd rather than kmalloc'd */
  96. /* used on shub2 to clear FOP cache in the HUB */
  97. static unsigned long scratch_page[MAX_NUMNODES];
  98. #define SH2_AMO_CACHE_ENTRIES 4
  99. static inline int
  100. mspec_zero_block(unsigned long addr, int len)
  101. {
  102. int status;
  103. if (is_sn2) {
  104. if (is_shub2()) {
  105. int nid;
  106. void *p;
  107. int i;
  108. nid = nasid_to_cnodeid(get_node_number(__pa(addr)));
  109. p = (void *)TO_AMO(scratch_page[nid]);
  110. for (i=0; i < SH2_AMO_CACHE_ENTRIES; i++) {
  111. FETCHOP_LOAD_OP(p, FETCHOP_LOAD);
  112. p += FETCHOP_VAR_SIZE;
  113. }
  114. }
  115. status = bte_copy(0, addr & ~__IA64_UNCACHED_OFFSET, len,
  116. BTE_WACQUIRE | BTE_ZERO_FILL, NULL);
  117. } else {
  118. memset((char *) addr, 0, len);
  119. status = 0;
  120. }
  121. return status;
  122. }
  123. /*
  124. * mspec_open
  125. *
  126. * Called when a device mapping is created by a means other than mmap
  127. * (via fork, munmap, etc.). Increments the reference count on the
  128. * underlying mspec data so it is not freed prematurely.
  129. */
  130. static void
  131. mspec_open(struct vm_area_struct *vma)
  132. {
  133. struct vma_data *vdata;
  134. vdata = vma->vm_private_data;
  135. atomic_inc(&vdata->refcnt);
  136. }
  137. /*
  138. * mspec_close
  139. *
  140. * Called when unmapping a device mapping. Frees all mspec pages
  141. * belonging to all the vma's sharing this vma_data structure.
  142. */
  143. static void
  144. mspec_close(struct vm_area_struct *vma)
  145. {
  146. struct vma_data *vdata;
  147. int index, last_index;
  148. unsigned long my_page;
  149. vdata = vma->vm_private_data;
  150. if (!atomic_dec_and_test(&vdata->refcnt))
  151. return;
  152. last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
  153. for (index = 0; index < last_index; index++) {
  154. if (vdata->maddr[index] == 0)
  155. continue;
  156. /*
  157. * Clear the page before sticking it back
  158. * into the pool.
  159. */
  160. my_page = vdata->maddr[index];
  161. vdata->maddr[index] = 0;
  162. if (!mspec_zero_block(my_page, PAGE_SIZE))
  163. uncached_free_page(my_page, 1);
  164. else
  165. printk(KERN_WARNING "mspec_close(): "
  166. "failed to zero page %ld\n", my_page);
  167. }
  168. if (vdata->flags & VMD_VMALLOCED)
  169. vfree(vdata);
  170. else
  171. kfree(vdata);
  172. }
  173. /*
  174. * mspec_fault
  175. *
  176. * Creates a mspec page and maps it to user space.
  177. */
  178. static int
  179. mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  180. {
  181. unsigned long paddr, maddr;
  182. unsigned long pfn;
  183. pgoff_t index = vmf->pgoff;
  184. struct vma_data *vdata = vma->vm_private_data;
  185. maddr = (volatile unsigned long) vdata->maddr[index];
  186. if (maddr == 0) {
  187. maddr = uncached_alloc_page(numa_node_id(), 1);
  188. if (maddr == 0)
  189. return VM_FAULT_OOM;
  190. spin_lock(&vdata->lock);
  191. if (vdata->maddr[index] == 0) {
  192. vdata->count++;
  193. vdata->maddr[index] = maddr;
  194. } else {
  195. uncached_free_page(maddr, 1);
  196. maddr = vdata->maddr[index];
  197. }
  198. spin_unlock(&vdata->lock);
  199. }
  200. if (vdata->type == MSPEC_FETCHOP)
  201. paddr = TO_AMO(maddr);
  202. else
  203. paddr = maddr & ~__IA64_UNCACHED_OFFSET;
  204. pfn = paddr >> PAGE_SHIFT;
  205. /*
  206. * vm_insert_pfn can fail with -EBUSY, but in that case it will
  207. * be because another thread has installed the pte first, so it
  208. * is no problem.
  209. */
  210. vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
  211. return VM_FAULT_NOPAGE;
  212. }
  213. static const struct vm_operations_struct mspec_vm_ops = {
  214. .open = mspec_open,
  215. .close = mspec_close,
  216. .fault = mspec_fault,
  217. };
  218. /*
  219. * mspec_mmap
  220. *
  221. * Called when mmapping the device. Initializes the vma with a fault handler
  222. * and private data structure necessary to allocate, track, and free the
  223. * underlying pages.
  224. */
  225. static int
  226. mspec_mmap(struct file *file, struct vm_area_struct *vma,
  227. enum mspec_page_type type)
  228. {
  229. struct vma_data *vdata;
  230. int pages, vdata_size, flags = 0;
  231. if (vma->vm_pgoff != 0)
  232. return -EINVAL;
  233. if ((vma->vm_flags & VM_SHARED) == 0)
  234. return -EINVAL;
  235. if ((vma->vm_flags & VM_WRITE) == 0)
  236. return -EPERM;
  237. pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
  238. vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
  239. if (vdata_size <= PAGE_SIZE)
  240. vdata = kmalloc(vdata_size, GFP_KERNEL);
  241. else {
  242. vdata = vmalloc(vdata_size);
  243. flags = VMD_VMALLOCED;
  244. }
  245. if (!vdata)
  246. return -ENOMEM;
  247. memset(vdata, 0, vdata_size);
  248. vdata->vm_start = vma->vm_start;
  249. vdata->vm_end = vma->vm_end;
  250. vdata->flags = flags;
  251. vdata->type = type;
  252. spin_lock_init(&vdata->lock);
  253. vdata->refcnt = ATOMIC_INIT(1);
  254. vma->vm_private_data = vdata;
  255. vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
  256. if (vdata->type == MSPEC_FETCHOP || vdata->type == MSPEC_UNCACHED)
  257. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  258. vma->vm_ops = &mspec_vm_ops;
  259. return 0;
  260. }
  261. static int
  262. fetchop_mmap(struct file *file, struct vm_area_struct *vma)
  263. {
  264. return mspec_mmap(file, vma, MSPEC_FETCHOP);
  265. }
  266. static int
  267. cached_mmap(struct file *file, struct vm_area_struct *vma)
  268. {
  269. return mspec_mmap(file, vma, MSPEC_CACHED);
  270. }
  271. static int
  272. uncached_mmap(struct file *file, struct vm_area_struct *vma)
  273. {
  274. return mspec_mmap(file, vma, MSPEC_UNCACHED);
  275. }
  276. static const struct file_operations fetchop_fops = {
  277. .owner = THIS_MODULE,
  278. .mmap = fetchop_mmap
  279. };
  280. static struct miscdevice fetchop_miscdev = {
  281. .minor = MISC_DYNAMIC_MINOR,
  282. .name = "sgi_fetchop",
  283. .fops = &fetchop_fops
  284. };
  285. static const struct file_operations cached_fops = {
  286. .owner = THIS_MODULE,
  287. .mmap = cached_mmap
  288. };
  289. static struct miscdevice cached_miscdev = {
  290. .minor = MISC_DYNAMIC_MINOR,
  291. .name = "mspec_cached",
  292. .fops = &cached_fops
  293. };
  294. static const struct file_operations uncached_fops = {
  295. .owner = THIS_MODULE,
  296. .mmap = uncached_mmap
  297. };
  298. static struct miscdevice uncached_miscdev = {
  299. .minor = MISC_DYNAMIC_MINOR,
  300. .name = "mspec_uncached",
  301. .fops = &uncached_fops
  302. };
  303. /*
  304. * mspec_init
  305. *
  306. * Called at boot time to initialize the mspec facility.
  307. */
  308. static int __init
  309. mspec_init(void)
  310. {
  311. int ret;
  312. int nid;
  313. /*
  314. * The fetchop device only works on SN2 hardware, uncached and cached
  315. * memory drivers should both be valid on all ia64 hardware
  316. */
  317. #ifdef CONFIG_SGI_SN
  318. if (ia64_platform_is("sn2")) {
  319. is_sn2 = 1;
  320. if (is_shub2()) {
  321. ret = -ENOMEM;
  322. for_each_node_state(nid, N_ONLINE) {
  323. int actual_nid;
  324. int nasid;
  325. unsigned long phys;
  326. scratch_page[nid] = uncached_alloc_page(nid, 1);
  327. if (scratch_page[nid] == 0)
  328. goto free_scratch_pages;
  329. phys = __pa(scratch_page[nid]);
  330. nasid = get_node_number(phys);
  331. actual_nid = nasid_to_cnodeid(nasid);
  332. if (actual_nid != nid)
  333. goto free_scratch_pages;
  334. }
  335. }
  336. ret = misc_register(&fetchop_miscdev);
  337. if (ret) {
  338. printk(KERN_ERR
  339. "%s: failed to register device %i\n",
  340. FETCHOP_ID, ret);
  341. goto free_scratch_pages;
  342. }
  343. }
  344. #endif
  345. ret = misc_register(&cached_miscdev);
  346. if (ret) {
  347. printk(KERN_ERR "%s: failed to register device %i\n",
  348. CACHED_ID, ret);
  349. if (is_sn2)
  350. misc_deregister(&fetchop_miscdev);
  351. goto free_scratch_pages;
  352. }
  353. ret = misc_register(&uncached_miscdev);
  354. if (ret) {
  355. printk(KERN_ERR "%s: failed to register device %i\n",
  356. UNCACHED_ID, ret);
  357. misc_deregister(&cached_miscdev);
  358. if (is_sn2)
  359. misc_deregister(&fetchop_miscdev);
  360. goto free_scratch_pages;
  361. }
  362. printk(KERN_INFO "%s %s initialized devices: %s %s %s\n",
  363. MSPEC_BASENAME, REVISION, is_sn2 ? FETCHOP_ID : "",
  364. CACHED_ID, UNCACHED_ID);
  365. return 0;
  366. free_scratch_pages:
  367. for_each_node(nid) {
  368. if (scratch_page[nid] != 0)
  369. uncached_free_page(scratch_page[nid], 1);
  370. }
  371. return ret;
  372. }
  373. static void __exit
  374. mspec_exit(void)
  375. {
  376. int nid;
  377. misc_deregister(&uncached_miscdev);
  378. misc_deregister(&cached_miscdev);
  379. if (is_sn2) {
  380. misc_deregister(&fetchop_miscdev);
  381. for_each_node(nid) {
  382. if (scratch_page[nid] != 0)
  383. uncached_free_page(scratch_page[nid], 1);
  384. }
  385. }
  386. }
  387. module_init(mspec_init);
  388. module_exit(mspec_exit);
  389. MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>");
  390. MODULE_DESCRIPTION("Driver for SGI SN special memory operations");
  391. MODULE_LICENSE("GPL");