PageRenderTime 1578ms CodeModel.GetById 19ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/staging/vme/devices/vme_user.c

https://github.com/Mengqi/linux-2.6
C | 916 lines | 621 code | 148 blank | 147 comment | 86 complexity | a83308c6bb497192700ce45807b0cb8f MD5 | raw file
  1. /*
  2. * VMEbus User access driver
  3. *
  4. * Author: Martyn Welch <martyn.welch@ge.com>
  5. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  6. *
  7. * Based on work by:
  8. * Tom Armistead and Ajit Prem
  9. * Copyright 2004 Motorola Inc.
  10. *
  11. *
  12. * This program is free software; you can redistribute it and/or modify it
  13. * under the terms of the GNU General Public License as published by the
  14. * Free Software Foundation; either version 2 of the License, or (at your
  15. * option) any later version.
  16. */
  17. #include <linux/cdev.h>
  18. #include <linux/delay.h>
  19. #include <linux/device.h>
  20. #include <linux/dma-mapping.h>
  21. #include <linux/errno.h>
  22. #include <linux/init.h>
  23. #include <linux/ioctl.h>
  24. #include <linux/kernel.h>
  25. #include <linux/mm.h>
  26. #include <linux/module.h>
  27. #include <linux/pagemap.h>
  28. #include <linux/pci.h>
  29. #include <linux/semaphore.h>
  30. #include <linux/slab.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/syscalls.h>
  33. #include <linux/mutex.h>
  34. #include <linux/types.h>
  35. #include <linux/io.h>
  36. #include <linux/uaccess.h>
  37. #include "../vme.h"
  38. #include "vme_user.h"
  39. static DEFINE_MUTEX(vme_user_mutex);
  40. static const char driver_name[] = "vme_user";
  41. static int bus[USER_BUS_MAX];
  42. static unsigned int bus_num;
  43. /* Currently Documentation/devices.txt defines the following for VME:
  44. *
  45. * 221 char VME bus
  46. * 0 = /dev/bus/vme/m0 First master image
  47. * 1 = /dev/bus/vme/m1 Second master image
  48. * 2 = /dev/bus/vme/m2 Third master image
  49. * 3 = /dev/bus/vme/m3 Fourth master image
  50. * 4 = /dev/bus/vme/s0 First slave image
  51. * 5 = /dev/bus/vme/s1 Second slave image
  52. * 6 = /dev/bus/vme/s2 Third slave image
  53. * 7 = /dev/bus/vme/s3 Fourth slave image
  54. * 8 = /dev/bus/vme/ctl Control
  55. *
  56. * It is expected that all VME bus drivers will use the
  57. * same interface. For interface documentation see
  58. * http://www.vmelinux.org/.
  59. *
  60. * However the VME driver at http://www.vmelinux.org/ is rather old and doesn't
  61. * even support the tsi148 chipset (which has 8 master and 8 slave windows).
  62. * We'll run with this or now as far as possible, however it probably makes
  63. * sense to get rid of the old mappings and just do everything dynamically.
  64. *
  65. * So for now, we'll restrict the driver to providing 4 masters and 4 slaves as
  66. * defined above and try to support at least some of the interface from
  67. * http://www.vmelinux.org/ as an alternative drive can be written providing a
  68. * saner interface later.
  69. *
  70. * The vmelinux.org driver never supported slave images, the devices reserved
  71. * for slaves were repurposed to support all 8 master images on the UniverseII!
  72. * We shall support 4 masters and 4 slaves with this driver.
  73. */
  74. #define VME_MAJOR 221 /* VME Major Device Number */
  75. #define VME_DEVS 9 /* Number of dev entries */
  76. #define MASTER_MINOR 0
  77. #define MASTER_MAX 3
  78. #define SLAVE_MINOR 4
  79. #define SLAVE_MAX 7
  80. #define CONTROL_MINOR 8
  81. #define PCI_BUF_SIZE 0x20000 /* Size of one slave image buffer */
  82. /*
  83. * Structure to handle image related parameters.
  84. */
  85. struct image_desc {
  86. void *kern_buf; /* Buffer address in kernel space */
  87. dma_addr_t pci_buf; /* Buffer address in PCI address space */
  88. unsigned long long size_buf; /* Buffer size */
  89. struct semaphore sem; /* Semaphore for locking image */
  90. struct device *device; /* Sysfs device */
  91. struct vme_resource *resource; /* VME resource */
  92. int users; /* Number of current users */
  93. };
  94. static struct image_desc image[VME_DEVS];
  95. struct driver_stats {
  96. unsigned long reads;
  97. unsigned long writes;
  98. unsigned long ioctls;
  99. unsigned long irqs;
  100. unsigned long berrs;
  101. unsigned long dmaErrors;
  102. unsigned long timeouts;
  103. unsigned long external;
  104. };
  105. static struct driver_stats statistics;
  106. static struct cdev *vme_user_cdev; /* Character device */
  107. static struct class *vme_user_sysfs_class; /* Sysfs class */
  108. static struct device *vme_user_bridge; /* Pointer to bridge device */
  109. static const int type[VME_DEVS] = { MASTER_MINOR, MASTER_MINOR,
  110. MASTER_MINOR, MASTER_MINOR,
  111. SLAVE_MINOR, SLAVE_MINOR,
  112. SLAVE_MINOR, SLAVE_MINOR,
  113. CONTROL_MINOR
  114. };
  115. static int vme_user_open(struct inode *, struct file *);
  116. static int vme_user_release(struct inode *, struct file *);
  117. static ssize_t vme_user_read(struct file *, char __user *, size_t, loff_t *);
  118. static ssize_t vme_user_write(struct file *, const char __user *, size_t,
  119. loff_t *);
  120. static loff_t vme_user_llseek(struct file *, loff_t, int);
  121. static long vme_user_unlocked_ioctl(struct file *, unsigned int, unsigned long);
  122. static int __devinit vme_user_probe(struct device *, int, int);
  123. static int __devexit vme_user_remove(struct device *, int, int);
  124. static const struct file_operations vme_user_fops = {
  125. .open = vme_user_open,
  126. .release = vme_user_release,
  127. .read = vme_user_read,
  128. .write = vme_user_write,
  129. .llseek = vme_user_llseek,
  130. .unlocked_ioctl = vme_user_unlocked_ioctl,
  131. };
  132. /*
  133. * Reset all the statistic counters
  134. */
  135. static void reset_counters(void)
  136. {
  137. statistics.reads = 0;
  138. statistics.writes = 0;
  139. statistics.ioctls = 0;
  140. statistics.irqs = 0;
  141. statistics.berrs = 0;
  142. statistics.dmaErrors = 0;
  143. statistics.timeouts = 0;
  144. }
  145. static int vme_user_open(struct inode *inode, struct file *file)
  146. {
  147. int err;
  148. unsigned int minor = MINOR(inode->i_rdev);
  149. down(&image[minor].sem);
  150. /* Allow device to be opened if a resource is needed and allocated. */
  151. if (minor < CONTROL_MINOR && image[minor].resource == NULL) {
  152. printk(KERN_ERR "No resources allocated for device\n");
  153. err = -EINVAL;
  154. goto err_res;
  155. }
  156. /* Increment user count */
  157. image[minor].users++;
  158. up(&image[minor].sem);
  159. return 0;
  160. err_res:
  161. up(&image[minor].sem);
  162. return err;
  163. }
  164. static int vme_user_release(struct inode *inode, struct file *file)
  165. {
  166. unsigned int minor = MINOR(inode->i_rdev);
  167. down(&image[minor].sem);
  168. /* Decrement user count */
  169. image[minor].users--;
  170. up(&image[minor].sem);
  171. return 0;
  172. }
  173. /*
  174. * We are going ot alloc a page during init per window for small transfers.
  175. * Small transfers will go VME -> buffer -> user space. Larger (more than a
  176. * page) transfers will lock the user space buffer into memory and then
  177. * transfer the data directly into the user space buffers.
  178. */
  179. static ssize_t resource_to_user(int minor, char __user *buf, size_t count,
  180. loff_t *ppos)
  181. {
  182. ssize_t retval;
  183. ssize_t copied = 0;
  184. if (count <= image[minor].size_buf) {
  185. /* We copy to kernel buffer */
  186. copied = vme_master_read(image[minor].resource,
  187. image[minor].kern_buf, count, *ppos);
  188. if (copied < 0)
  189. return (int)copied;
  190. retval = __copy_to_user(buf, image[minor].kern_buf,
  191. (unsigned long)copied);
  192. if (retval != 0) {
  193. copied = (copied - retval);
  194. printk(KERN_INFO "User copy failed\n");
  195. return -EINVAL;
  196. }
  197. } else {
  198. /* XXX Need to write this */
  199. printk(KERN_INFO "Currently don't support large transfers\n");
  200. /* Map in pages from userspace */
  201. /* Call vme_master_read to do the transfer */
  202. return -EINVAL;
  203. }
  204. return copied;
  205. }
  206. /*
  207. * We are going ot alloc a page during init per window for small transfers.
  208. * Small transfers will go user space -> buffer -> VME. Larger (more than a
  209. * page) transfers will lock the user space buffer into memory and then
  210. * transfer the data directly from the user space buffers out to VME.
  211. */
  212. static ssize_t resource_from_user(unsigned int minor, const char __user *buf,
  213. size_t count, loff_t *ppos)
  214. {
  215. ssize_t retval;
  216. ssize_t copied = 0;
  217. if (count <= image[minor].size_buf) {
  218. retval = __copy_from_user(image[minor].kern_buf, buf,
  219. (unsigned long)count);
  220. if (retval != 0)
  221. copied = (copied - retval);
  222. else
  223. copied = count;
  224. copied = vme_master_write(image[minor].resource,
  225. image[minor].kern_buf, copied, *ppos);
  226. } else {
  227. /* XXX Need to write this */
  228. printk(KERN_INFO "Currently don't support large transfers\n");
  229. /* Map in pages from userspace */
  230. /* Call vme_master_write to do the transfer */
  231. return -EINVAL;
  232. }
  233. return copied;
  234. }
  235. static ssize_t buffer_to_user(unsigned int minor, char __user *buf,
  236. size_t count, loff_t *ppos)
  237. {
  238. void *image_ptr;
  239. ssize_t retval;
  240. image_ptr = image[minor].kern_buf + *ppos;
  241. retval = __copy_to_user(buf, image_ptr, (unsigned long)count);
  242. if (retval != 0) {
  243. retval = (count - retval);
  244. printk(KERN_WARNING "Partial copy to userspace\n");
  245. } else
  246. retval = count;
  247. /* Return number of bytes successfully read */
  248. return retval;
  249. }
  250. static ssize_t buffer_from_user(unsigned int minor, const char __user *buf,
  251. size_t count, loff_t *ppos)
  252. {
  253. void *image_ptr;
  254. size_t retval;
  255. image_ptr = image[minor].kern_buf + *ppos;
  256. retval = __copy_from_user(image_ptr, buf, (unsigned long)count);
  257. if (retval != 0) {
  258. retval = (count - retval);
  259. printk(KERN_WARNING "Partial copy to userspace\n");
  260. } else
  261. retval = count;
  262. /* Return number of bytes successfully read */
  263. return retval;
  264. }
  265. static ssize_t vme_user_read(struct file *file, char __user *buf, size_t count,
  266. loff_t *ppos)
  267. {
  268. unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
  269. ssize_t retval;
  270. size_t image_size;
  271. size_t okcount;
  272. if (minor == CONTROL_MINOR)
  273. return 0;
  274. down(&image[minor].sem);
  275. /* XXX Do we *really* want this helper - we can use vme_*_get ? */
  276. image_size = vme_get_size(image[minor].resource);
  277. /* Ensure we are starting at a valid location */
  278. if ((*ppos < 0) || (*ppos > (image_size - 1))) {
  279. up(&image[minor].sem);
  280. return 0;
  281. }
  282. /* Ensure not reading past end of the image */
  283. if (*ppos + count > image_size)
  284. okcount = image_size - *ppos;
  285. else
  286. okcount = count;
  287. switch (type[minor]) {
  288. case MASTER_MINOR:
  289. retval = resource_to_user(minor, buf, okcount, ppos);
  290. break;
  291. case SLAVE_MINOR:
  292. retval = buffer_to_user(minor, buf, okcount, ppos);
  293. break;
  294. default:
  295. retval = -EINVAL;
  296. }
  297. up(&image[minor].sem);
  298. if (retval > 0)
  299. *ppos += retval;
  300. return retval;
  301. }
  302. static ssize_t vme_user_write(struct file *file, const char __user *buf,
  303. size_t count, loff_t *ppos)
  304. {
  305. unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
  306. ssize_t retval;
  307. size_t image_size;
  308. size_t okcount;
  309. if (minor == CONTROL_MINOR)
  310. return 0;
  311. down(&image[minor].sem);
  312. image_size = vme_get_size(image[minor].resource);
  313. /* Ensure we are starting at a valid location */
  314. if ((*ppos < 0) || (*ppos > (image_size - 1))) {
  315. up(&image[minor].sem);
  316. return 0;
  317. }
  318. /* Ensure not reading past end of the image */
  319. if (*ppos + count > image_size)
  320. okcount = image_size - *ppos;
  321. else
  322. okcount = count;
  323. switch (type[minor]) {
  324. case MASTER_MINOR:
  325. retval = resource_from_user(minor, buf, okcount, ppos);
  326. break;
  327. case SLAVE_MINOR:
  328. retval = buffer_from_user(minor, buf, okcount, ppos);
  329. break;
  330. default:
  331. retval = -EINVAL;
  332. }
  333. up(&image[minor].sem);
  334. if (retval > 0)
  335. *ppos += retval;
  336. return retval;
  337. }
  338. static loff_t vme_user_llseek(struct file *file, loff_t off, int whence)
  339. {
  340. loff_t absolute = -1;
  341. unsigned int minor = MINOR(file->f_dentry->d_inode->i_rdev);
  342. size_t image_size;
  343. if (minor == CONTROL_MINOR)
  344. return -EINVAL;
  345. down(&image[minor].sem);
  346. image_size = vme_get_size(image[minor].resource);
  347. switch (whence) {
  348. case SEEK_SET:
  349. absolute = off;
  350. break;
  351. case SEEK_CUR:
  352. absolute = file->f_pos + off;
  353. break;
  354. case SEEK_END:
  355. absolute = image_size + off;
  356. break;
  357. default:
  358. up(&image[minor].sem);
  359. return -EINVAL;
  360. break;
  361. }
  362. if ((absolute < 0) || (absolute >= image_size)) {
  363. up(&image[minor].sem);
  364. return -EINVAL;
  365. }
  366. file->f_pos = absolute;
  367. up(&image[minor].sem);
  368. return absolute;
  369. }
  370. /*
  371. * The ioctls provided by the old VME access method (the one at vmelinux.org)
  372. * are most certainly wrong as the effectively push the registers layout
  373. * through to user space. Given that the VME core can handle multiple bridges,
  374. * with different register layouts this is most certainly not the way to go.
  375. *
  376. * We aren't using the structures defined in the Motorola driver either - these
  377. * are also quite low level, however we should use the definitions that have
  378. * already been defined.
  379. */
  380. static int vme_user_ioctl(struct inode *inode, struct file *file,
  381. unsigned int cmd, unsigned long arg)
  382. {
  383. struct vme_master master;
  384. struct vme_slave slave;
  385. struct vme_irq_id irq_req;
  386. unsigned long copied;
  387. unsigned int minor = MINOR(inode->i_rdev);
  388. int retval;
  389. dma_addr_t pci_addr;
  390. void __user *argp = (void __user *)arg;
  391. statistics.ioctls++;
  392. switch (type[minor]) {
  393. case CONTROL_MINOR:
  394. switch (cmd) {
  395. case VME_IRQ_GEN:
  396. copied = copy_from_user(&irq_req, (char *)arg,
  397. sizeof(struct vme_irq_id));
  398. if (copied != 0) {
  399. printk(KERN_WARNING "Partial copy from userspace\n");
  400. return -EFAULT;
  401. }
  402. retval = vme_irq_generate(vme_user_bridge,
  403. irq_req.level,
  404. irq_req.statid);
  405. return retval;
  406. }
  407. break;
  408. case MASTER_MINOR:
  409. switch (cmd) {
  410. case VME_GET_MASTER:
  411. memset(&master, 0, sizeof(struct vme_master));
  412. /* XXX We do not want to push aspace, cycle and width
  413. * to userspace as they are
  414. */
  415. retval = vme_master_get(image[minor].resource,
  416. &master.enable, &master.vme_addr,
  417. &master.size, &master.aspace,
  418. &master.cycle, &master.dwidth);
  419. copied = copy_to_user(argp, &master,
  420. sizeof(struct vme_master));
  421. if (copied != 0) {
  422. printk(KERN_WARNING "Partial copy to "
  423. "userspace\n");
  424. return -EFAULT;
  425. }
  426. return retval;
  427. break;
  428. case VME_SET_MASTER:
  429. copied = copy_from_user(&master, argp, sizeof(master));
  430. if (copied != 0) {
  431. printk(KERN_WARNING "Partial copy from "
  432. "userspace\n");
  433. return -EFAULT;
  434. }
  435. /* XXX We do not want to push aspace, cycle and width
  436. * to userspace as they are
  437. */
  438. return vme_master_set(image[minor].resource,
  439. master.enable, master.vme_addr, master.size,
  440. master.aspace, master.cycle, master.dwidth);
  441. break;
  442. }
  443. break;
  444. case SLAVE_MINOR:
  445. switch (cmd) {
  446. case VME_GET_SLAVE:
  447. memset(&slave, 0, sizeof(struct vme_slave));
  448. /* XXX We do not want to push aspace, cycle and width
  449. * to userspace as they are
  450. */
  451. retval = vme_slave_get(image[minor].resource,
  452. &slave.enable, &slave.vme_addr,
  453. &slave.size, &pci_addr, &slave.aspace,
  454. &slave.cycle);
  455. copied = copy_to_user(argp, &slave,
  456. sizeof(struct vme_slave));
  457. if (copied != 0) {
  458. printk(KERN_WARNING "Partial copy to "
  459. "userspace\n");
  460. return -EFAULT;
  461. }
  462. return retval;
  463. break;
  464. case VME_SET_SLAVE:
  465. copied = copy_from_user(&slave, argp, sizeof(slave));
  466. if (copied != 0) {
  467. printk(KERN_WARNING "Partial copy from "
  468. "userspace\n");
  469. return -EFAULT;
  470. }
  471. /* XXX We do not want to push aspace, cycle and width
  472. * to userspace as they are
  473. */
  474. return vme_slave_set(image[minor].resource,
  475. slave.enable, slave.vme_addr, slave.size,
  476. image[minor].pci_buf, slave.aspace,
  477. slave.cycle);
  478. break;
  479. }
  480. break;
  481. }
  482. return -EINVAL;
  483. }
  484. static long
  485. vme_user_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  486. {
  487. int ret;
  488. mutex_lock(&vme_user_mutex);
  489. ret = vme_user_ioctl(file->f_path.dentry->d_inode, file, cmd, arg);
  490. mutex_unlock(&vme_user_mutex);
  491. return ret;
  492. }
  493. /*
  494. * Unallocate a previously allocated buffer
  495. */
  496. static void buf_unalloc(int num)
  497. {
  498. if (image[num].kern_buf) {
  499. #ifdef VME_DEBUG
  500. printk(KERN_DEBUG "UniverseII:Releasing buffer at %p\n",
  501. image[num].pci_buf);
  502. #endif
  503. vme_free_consistent(image[num].resource, image[num].size_buf,
  504. image[num].kern_buf, image[num].pci_buf);
  505. image[num].kern_buf = NULL;
  506. image[num].pci_buf = 0;
  507. image[num].size_buf = 0;
  508. #ifdef VME_DEBUG
  509. } else {
  510. printk(KERN_DEBUG "UniverseII: Buffer not allocated\n");
  511. #endif
  512. }
  513. }
  514. static struct vme_driver vme_user_driver = {
  515. .name = driver_name,
  516. .probe = vme_user_probe,
  517. .remove = __devexit_p(vme_user_remove),
  518. };
  519. static int __init vme_user_init(void)
  520. {
  521. int retval = 0;
  522. int i;
  523. struct vme_device_id *ids;
  524. printk(KERN_INFO "VME User Space Access Driver\n");
  525. if (bus_num == 0) {
  526. printk(KERN_ERR "%s: No cards, skipping registration\n",
  527. driver_name);
  528. retval = -ENODEV;
  529. goto err_nocard;
  530. }
  531. /* Let's start by supporting one bus, we can support more than one
  532. * in future revisions if that ever becomes necessary.
  533. */
  534. if (bus_num > USER_BUS_MAX) {
  535. printk(KERN_ERR "%s: Driver only able to handle %d buses\n",
  536. driver_name, USER_BUS_MAX);
  537. bus_num = USER_BUS_MAX;
  538. }
  539. /* Dynamically create the bind table based on module parameters */
  540. ids = kmalloc(sizeof(struct vme_device_id) * (bus_num + 1), GFP_KERNEL);
  541. if (ids == NULL) {
  542. printk(KERN_ERR "%s: Unable to allocate ID table\n",
  543. driver_name);
  544. retval = -ENOMEM;
  545. goto err_id;
  546. }
  547. memset(ids, 0, (sizeof(struct vme_device_id) * (bus_num + 1)));
  548. for (i = 0; i < bus_num; i++) {
  549. ids[i].bus = bus[i];
  550. /*
  551. * We register the driver against the slot occupied by *this*
  552. * card, since it's really a low level way of controlling
  553. * the VME bridge
  554. */
  555. ids[i].slot = VME_SLOT_CURRENT;
  556. }
  557. vme_user_driver.bind_table = ids;
  558. retval = vme_register_driver(&vme_user_driver);
  559. if (retval != 0)
  560. goto err_reg;
  561. return retval;
  562. err_reg:
  563. kfree(ids);
  564. err_id:
  565. err_nocard:
  566. return retval;
  567. }
  568. /*
  569. * In this simple access driver, the old behaviour is being preserved as much
  570. * as practical. We will therefore reserve the buffers and request the images
  571. * here so that we don't have to do it later.
  572. */
  573. static int __devinit vme_user_probe(struct device *dev, int cur_bus,
  574. int cur_slot)
  575. {
  576. int i, err;
  577. char name[12];
  578. /* Save pointer to the bridge device */
  579. if (vme_user_bridge != NULL) {
  580. printk(KERN_ERR "%s: Driver can only be loaded for 1 device\n",
  581. driver_name);
  582. err = -EINVAL;
  583. goto err_dev;
  584. }
  585. vme_user_bridge = dev;
  586. /* Initialise descriptors */
  587. for (i = 0; i < VME_DEVS; i++) {
  588. image[i].kern_buf = NULL;
  589. image[i].pci_buf = 0;
  590. sema_init(&image[i].sem, 1);
  591. image[i].device = NULL;
  592. image[i].resource = NULL;
  593. image[i].users = 0;
  594. }
  595. /* Initialise statistics counters */
  596. reset_counters();
  597. /* Assign major and minor numbers for the driver */
  598. err = register_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS,
  599. driver_name);
  600. if (err) {
  601. printk(KERN_WARNING "%s: Error getting Major Number %d for "
  602. "driver.\n", driver_name, VME_MAJOR);
  603. goto err_region;
  604. }
  605. /* Register the driver as a char device */
  606. vme_user_cdev = cdev_alloc();
  607. vme_user_cdev->ops = &vme_user_fops;
  608. vme_user_cdev->owner = THIS_MODULE;
  609. err = cdev_add(vme_user_cdev, MKDEV(VME_MAJOR, 0), VME_DEVS);
  610. if (err) {
  611. printk(KERN_WARNING "%s: cdev_all failed\n", driver_name);
  612. goto err_char;
  613. }
  614. /* Request slave resources and allocate buffers (128kB wide) */
  615. for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
  616. /* XXX Need to properly request attributes */
  617. /* For ca91cx42 bridge there are only two slave windows
  618. * supporting A16 addressing, so we request A24 supported
  619. * by all windows.
  620. */
  621. image[i].resource = vme_slave_request(vme_user_bridge,
  622. VME_A24, VME_SCT);
  623. if (image[i].resource == NULL) {
  624. printk(KERN_WARNING "Unable to allocate slave "
  625. "resource\n");
  626. goto err_slave;
  627. }
  628. image[i].size_buf = PCI_BUF_SIZE;
  629. image[i].kern_buf = vme_alloc_consistent(image[i].resource,
  630. image[i].size_buf, &image[i].pci_buf);
  631. if (image[i].kern_buf == NULL) {
  632. printk(KERN_WARNING "Unable to allocate memory for "
  633. "buffer\n");
  634. image[i].pci_buf = 0;
  635. vme_slave_free(image[i].resource);
  636. err = -ENOMEM;
  637. goto err_slave;
  638. }
  639. }
  640. /*
  641. * Request master resources allocate page sized buffers for small
  642. * reads and writes
  643. */
  644. for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
  645. /* XXX Need to properly request attributes */
  646. image[i].resource = vme_master_request(vme_user_bridge,
  647. VME_A32, VME_SCT, VME_D32);
  648. if (image[i].resource == NULL) {
  649. printk(KERN_WARNING "Unable to allocate master "
  650. "resource\n");
  651. goto err_master;
  652. }
  653. image[i].size_buf = PCI_BUF_SIZE;
  654. image[i].kern_buf = kmalloc(image[i].size_buf, GFP_KERNEL);
  655. if (image[i].kern_buf == NULL) {
  656. printk(KERN_WARNING "Unable to allocate memory for "
  657. "master window buffers\n");
  658. err = -ENOMEM;
  659. goto err_master_buf;
  660. }
  661. }
  662. /* Create sysfs entries - on udev systems this creates the dev files */
  663. vme_user_sysfs_class = class_create(THIS_MODULE, driver_name);
  664. if (IS_ERR(vme_user_sysfs_class)) {
  665. printk(KERN_ERR "Error creating vme_user class.\n");
  666. err = PTR_ERR(vme_user_sysfs_class);
  667. goto err_class;
  668. }
  669. /* Add sysfs Entries */
  670. for (i = 0; i < VME_DEVS; i++) {
  671. int num;
  672. switch (type[i]) {
  673. case MASTER_MINOR:
  674. sprintf(name, "bus/vme/m%%d");
  675. break;
  676. case CONTROL_MINOR:
  677. sprintf(name, "bus/vme/ctl");
  678. break;
  679. case SLAVE_MINOR:
  680. sprintf(name, "bus/vme/s%%d");
  681. break;
  682. default:
  683. err = -EINVAL;
  684. goto err_sysfs;
  685. break;
  686. }
  687. num = (type[i] == SLAVE_MINOR) ? i - (MASTER_MAX + 1) : i;
  688. image[i].device = device_create(vme_user_sysfs_class, NULL,
  689. MKDEV(VME_MAJOR, i), NULL, name, num);
  690. if (IS_ERR(image[i].device)) {
  691. printk(KERN_INFO "%s: Error creating sysfs device\n",
  692. driver_name);
  693. err = PTR_ERR(image[i].device);
  694. goto err_sysfs;
  695. }
  696. }
  697. return 0;
  698. /* Ensure counter set correcty to destroy all sysfs devices */
  699. i = VME_DEVS;
  700. err_sysfs:
  701. while (i > 0) {
  702. i--;
  703. device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
  704. }
  705. class_destroy(vme_user_sysfs_class);
  706. /* Ensure counter set correcty to unalloc all master windows */
  707. i = MASTER_MAX + 1;
  708. err_master_buf:
  709. for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++)
  710. kfree(image[i].kern_buf);
  711. err_master:
  712. while (i > MASTER_MINOR) {
  713. i--;
  714. vme_master_free(image[i].resource);
  715. }
  716. /*
  717. * Ensure counter set correcty to unalloc all slave windows and buffers
  718. */
  719. i = SLAVE_MAX + 1;
  720. err_slave:
  721. while (i > SLAVE_MINOR) {
  722. i--;
  723. buf_unalloc(i);
  724. vme_slave_free(image[i].resource);
  725. }
  726. err_class:
  727. cdev_del(vme_user_cdev);
  728. err_char:
  729. unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
  730. err_region:
  731. err_dev:
  732. return err;
  733. }
  734. static int __devexit vme_user_remove(struct device *dev, int cur_bus,
  735. int cur_slot)
  736. {
  737. int i;
  738. /* Remove sysfs Entries */
  739. for (i = 0; i < VME_DEVS; i++)
  740. device_destroy(vme_user_sysfs_class, MKDEV(VME_MAJOR, i));
  741. class_destroy(vme_user_sysfs_class);
  742. for (i = MASTER_MINOR; i < (MASTER_MAX + 1); i++) {
  743. kfree(image[i].kern_buf);
  744. vme_master_free(image[i].resource);
  745. }
  746. for (i = SLAVE_MINOR; i < (SLAVE_MAX + 1); i++) {
  747. vme_slave_set(image[i].resource, 0, 0, 0, 0, VME_A32, 0);
  748. buf_unalloc(i);
  749. vme_slave_free(image[i].resource);
  750. }
  751. /* Unregister device driver */
  752. cdev_del(vme_user_cdev);
  753. /* Unregiser the major and minor device numbers */
  754. unregister_chrdev_region(MKDEV(VME_MAJOR, 0), VME_DEVS);
  755. return 0;
  756. }
  757. static void __exit vme_user_exit(void)
  758. {
  759. vme_unregister_driver(&vme_user_driver);
  760. kfree(vme_user_driver.bind_table);
  761. }
  762. MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the driver is connected");
  763. module_param_array(bus, int, &bus_num, 0);
  764. MODULE_DESCRIPTION("VME User Space Access Driver");
  765. MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
  766. MODULE_LICENSE("GPL");
  767. module_init(vme_user_init);
  768. module_exit(vme_user_exit);