/drivers/infiniband/core/user_mad.c

https://bitbucket.org/evzijst/gittest · C · 840 lines · 630 code · 169 blank · 41 comment · 82 complexity · db24e19ae0fca6f8a87aba41f79bf5e8 MD5 · raw file

  1. /*
  2. * Copyright (c) 2004 Topspin Communications. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. * $Id: user_mad.c 1389 2004-12-27 22:56:47Z roland $
  33. */
  34. #include <linux/module.h>
  35. #include <linux/init.h>
  36. #include <linux/device.h>
  37. #include <linux/err.h>
  38. #include <linux/fs.h>
  39. #include <linux/cdev.h>
  40. #include <linux/pci.h>
  41. #include <linux/dma-mapping.h>
  42. #include <linux/poll.h>
  43. #include <linux/rwsem.h>
  44. #include <linux/kref.h>
  45. #include <asm/uaccess.h>
  46. #include <asm/semaphore.h>
  47. #include <ib_mad.h>
  48. #include <ib_user_mad.h>
  49. MODULE_AUTHOR("Roland Dreier");
  50. MODULE_DESCRIPTION("InfiniBand userspace MAD packet access");
  51. MODULE_LICENSE("Dual BSD/GPL");
  52. enum {
  53. IB_UMAD_MAX_PORTS = 64,
  54. IB_UMAD_MAX_AGENTS = 32,
  55. IB_UMAD_MAJOR = 231,
  56. IB_UMAD_MINOR_BASE = 0
  57. };
  58. struct ib_umad_port {
  59. int devnum;
  60. struct cdev dev;
  61. struct class_device class_dev;
  62. int sm_devnum;
  63. struct cdev sm_dev;
  64. struct class_device sm_class_dev;
  65. struct semaphore sm_sem;
  66. struct ib_device *ib_dev;
  67. struct ib_umad_device *umad_dev;
  68. u8 port_num;
  69. };
  70. struct ib_umad_device {
  71. int start_port, end_port;
  72. struct kref ref;
  73. struct ib_umad_port port[0];
  74. };
  75. struct ib_umad_file {
  76. struct ib_umad_port *port;
  77. spinlock_t recv_lock;
  78. struct list_head recv_list;
  79. wait_queue_head_t recv_wait;
  80. struct rw_semaphore agent_mutex;
  81. struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
  82. struct ib_mr *mr[IB_UMAD_MAX_AGENTS];
  83. };
  84. struct ib_umad_packet {
  85. struct ib_user_mad mad;
  86. struct ib_ah *ah;
  87. struct list_head list;
  88. DECLARE_PCI_UNMAP_ADDR(mapping)
  89. };
  90. static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
  91. static spinlock_t map_lock;
  92. static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS * 2);
  93. static void ib_umad_add_one(struct ib_device *device);
  94. static void ib_umad_remove_one(struct ib_device *device);
  95. static int queue_packet(struct ib_umad_file *file,
  96. struct ib_mad_agent *agent,
  97. struct ib_umad_packet *packet)
  98. {
  99. int ret = 1;
  100. down_read(&file->agent_mutex);
  101. for (packet->mad.id = 0;
  102. packet->mad.id < IB_UMAD_MAX_AGENTS;
  103. packet->mad.id++)
  104. if (agent == file->agent[packet->mad.id]) {
  105. spin_lock_irq(&file->recv_lock);
  106. list_add_tail(&packet->list, &file->recv_list);
  107. spin_unlock_irq(&file->recv_lock);
  108. wake_up_interruptible(&file->recv_wait);
  109. ret = 0;
  110. break;
  111. }
  112. up_read(&file->agent_mutex);
  113. return ret;
  114. }
  115. static void send_handler(struct ib_mad_agent *agent,
  116. struct ib_mad_send_wc *send_wc)
  117. {
  118. struct ib_umad_file *file = agent->context;
  119. struct ib_umad_packet *packet =
  120. (void *) (unsigned long) send_wc->wr_id;
  121. dma_unmap_single(agent->device->dma_device,
  122. pci_unmap_addr(packet, mapping),
  123. sizeof packet->mad.data,
  124. DMA_TO_DEVICE);
  125. ib_destroy_ah(packet->ah);
  126. if (send_wc->status == IB_WC_RESP_TIMEOUT_ERR) {
  127. packet->mad.status = ETIMEDOUT;
  128. if (!queue_packet(file, agent, packet))
  129. return;
  130. }
  131. kfree(packet);
  132. }
  133. static void recv_handler(struct ib_mad_agent *agent,
  134. struct ib_mad_recv_wc *mad_recv_wc)
  135. {
  136. struct ib_umad_file *file = agent->context;
  137. struct ib_umad_packet *packet;
  138. if (mad_recv_wc->wc->status != IB_WC_SUCCESS)
  139. goto out;
  140. packet = kmalloc(sizeof *packet, GFP_KERNEL);
  141. if (!packet)
  142. goto out;
  143. memset(packet, 0, sizeof *packet);
  144. memcpy(packet->mad.data, mad_recv_wc->recv_buf.mad, sizeof packet->mad.data);
  145. packet->mad.status = 0;
  146. packet->mad.qpn = cpu_to_be32(mad_recv_wc->wc->src_qp);
  147. packet->mad.lid = cpu_to_be16(mad_recv_wc->wc->slid);
  148. packet->mad.sl = mad_recv_wc->wc->sl;
  149. packet->mad.path_bits = mad_recv_wc->wc->dlid_path_bits;
  150. packet->mad.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
  151. if (packet->mad.grh_present) {
  152. /* XXX parse GRH */
  153. packet->mad.gid_index = 0;
  154. packet->mad.hop_limit = 0;
  155. packet->mad.traffic_class = 0;
  156. memset(packet->mad.gid, 0, 16);
  157. packet->mad.flow_label = 0;
  158. }
  159. if (queue_packet(file, agent, packet))
  160. kfree(packet);
  161. out:
  162. ib_free_recv_mad(mad_recv_wc);
  163. }
  164. static ssize_t ib_umad_read(struct file *filp, char __user *buf,
  165. size_t count, loff_t *pos)
  166. {
  167. struct ib_umad_file *file = filp->private_data;
  168. struct ib_umad_packet *packet;
  169. ssize_t ret;
  170. if (count < sizeof (struct ib_user_mad))
  171. return -EINVAL;
  172. spin_lock_irq(&file->recv_lock);
  173. while (list_empty(&file->recv_list)) {
  174. spin_unlock_irq(&file->recv_lock);
  175. if (filp->f_flags & O_NONBLOCK)
  176. return -EAGAIN;
  177. if (wait_event_interruptible(file->recv_wait,
  178. !list_empty(&file->recv_list)))
  179. return -ERESTARTSYS;
  180. spin_lock_irq(&file->recv_lock);
  181. }
  182. packet = list_entry(file->recv_list.next, struct ib_umad_packet, list);
  183. list_del(&packet->list);
  184. spin_unlock_irq(&file->recv_lock);
  185. if (copy_to_user(buf, &packet->mad, sizeof packet->mad))
  186. ret = -EFAULT;
  187. else
  188. ret = sizeof packet->mad;
  189. kfree(packet);
  190. return ret;
  191. }
  192. static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
  193. size_t count, loff_t *pos)
  194. {
  195. struct ib_umad_file *file = filp->private_data;
  196. struct ib_umad_packet *packet;
  197. struct ib_mad_agent *agent;
  198. struct ib_ah_attr ah_attr;
  199. struct ib_sge gather_list;
  200. struct ib_send_wr *bad_wr, wr = {
  201. .opcode = IB_WR_SEND,
  202. .sg_list = &gather_list,
  203. .num_sge = 1,
  204. .send_flags = IB_SEND_SIGNALED,
  205. };
  206. u8 method;
  207. u64 *tid;
  208. int ret;
  209. if (count < sizeof (struct ib_user_mad))
  210. return -EINVAL;
  211. packet = kmalloc(sizeof *packet, GFP_KERNEL);
  212. if (!packet)
  213. return -ENOMEM;
  214. if (copy_from_user(&packet->mad, buf, sizeof packet->mad)) {
  215. kfree(packet);
  216. return -EFAULT;
  217. }
  218. if (packet->mad.id < 0 || packet->mad.id >= IB_UMAD_MAX_AGENTS) {
  219. ret = -EINVAL;
  220. goto err;
  221. }
  222. down_read(&file->agent_mutex);
  223. agent = file->agent[packet->mad.id];
  224. if (!agent) {
  225. ret = -EINVAL;
  226. goto err_up;
  227. }
  228. /*
  229. * If userspace is generating a request that will generate a
  230. * response, we need to make sure the high-order part of the
  231. * transaction ID matches the agent being used to send the
  232. * MAD.
  233. */
  234. method = ((struct ib_mad_hdr *) packet->mad.data)->method;
  235. if (!(method & IB_MGMT_METHOD_RESP) &&
  236. method != IB_MGMT_METHOD_TRAP_REPRESS &&
  237. method != IB_MGMT_METHOD_SEND) {
  238. tid = &((struct ib_mad_hdr *) packet->mad.data)->tid;
  239. *tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
  240. (be64_to_cpup(tid) & 0xffffffff));
  241. }
  242. memset(&ah_attr, 0, sizeof ah_attr);
  243. ah_attr.dlid = be16_to_cpu(packet->mad.lid);
  244. ah_attr.sl = packet->mad.sl;
  245. ah_attr.src_path_bits = packet->mad.path_bits;
  246. ah_attr.port_num = file->port->port_num;
  247. if (packet->mad.grh_present) {
  248. ah_attr.ah_flags = IB_AH_GRH;
  249. memcpy(ah_attr.grh.dgid.raw, packet->mad.gid, 16);
  250. ah_attr.grh.flow_label = packet->mad.flow_label;
  251. ah_attr.grh.hop_limit = packet->mad.hop_limit;
  252. ah_attr.grh.traffic_class = packet->mad.traffic_class;
  253. }
  254. packet->ah = ib_create_ah(agent->qp->pd, &ah_attr);
  255. if (IS_ERR(packet->ah)) {
  256. ret = PTR_ERR(packet->ah);
  257. goto err_up;
  258. }
  259. gather_list.addr = dma_map_single(agent->device->dma_device,
  260. packet->mad.data,
  261. sizeof packet->mad.data,
  262. DMA_TO_DEVICE);
  263. gather_list.length = sizeof packet->mad.data;
  264. gather_list.lkey = file->mr[packet->mad.id]->lkey;
  265. pci_unmap_addr_set(packet, mapping, gather_list.addr);
  266. wr.wr.ud.mad_hdr = (struct ib_mad_hdr *) packet->mad.data;
  267. wr.wr.ud.ah = packet->ah;
  268. wr.wr.ud.remote_qpn = be32_to_cpu(packet->mad.qpn);
  269. wr.wr.ud.remote_qkey = be32_to_cpu(packet->mad.qkey);
  270. wr.wr.ud.timeout_ms = packet->mad.timeout_ms;
  271. wr.wr_id = (unsigned long) packet;
  272. ret = ib_post_send_mad(agent, &wr, &bad_wr);
  273. if (ret) {
  274. dma_unmap_single(agent->device->dma_device,
  275. pci_unmap_addr(packet, mapping),
  276. sizeof packet->mad.data,
  277. DMA_TO_DEVICE);
  278. goto err_up;
  279. }
  280. up_read(&file->agent_mutex);
  281. return sizeof packet->mad;
  282. err_up:
  283. up_read(&file->agent_mutex);
  284. err:
  285. kfree(packet);
  286. return ret;
  287. }
  288. static unsigned int ib_umad_poll(struct file *filp, struct poll_table_struct *wait)
  289. {
  290. struct ib_umad_file *file = filp->private_data;
  291. /* we will always be able to post a MAD send */
  292. unsigned int mask = POLLOUT | POLLWRNORM;
  293. poll_wait(filp, &file->recv_wait, wait);
  294. if (!list_empty(&file->recv_list))
  295. mask |= POLLIN | POLLRDNORM;
  296. return mask;
  297. }
  298. static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
  299. {
  300. struct ib_user_mad_reg_req ureq;
  301. struct ib_mad_reg_req req;
  302. struct ib_mad_agent *agent;
  303. int agent_id;
  304. int ret;
  305. down_write(&file->agent_mutex);
  306. if (copy_from_user(&ureq, (void __user *) arg, sizeof ureq)) {
  307. ret = -EFAULT;
  308. goto out;
  309. }
  310. if (ureq.qpn != 0 && ureq.qpn != 1) {
  311. ret = -EINVAL;
  312. goto out;
  313. }
  314. for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
  315. if (!file->agent[agent_id])
  316. goto found;
  317. ret = -ENOMEM;
  318. goto out;
  319. found:
  320. req.mgmt_class = ureq.mgmt_class;
  321. req.mgmt_class_version = ureq.mgmt_class_version;
  322. memcpy(req.method_mask, ureq.method_mask, sizeof req.method_mask);
  323. memcpy(req.oui, ureq.oui, sizeof req.oui);
  324. agent = ib_register_mad_agent(file->port->ib_dev, file->port->port_num,
  325. ureq.qpn ? IB_QPT_GSI : IB_QPT_SMI,
  326. &req, 0, send_handler, recv_handler,
  327. file);
  328. if (IS_ERR(agent)) {
  329. ret = PTR_ERR(agent);
  330. goto out;
  331. }
  332. file->agent[agent_id] = agent;
  333. file->mr[agent_id] = ib_get_dma_mr(agent->qp->pd, IB_ACCESS_LOCAL_WRITE);
  334. if (IS_ERR(file->mr[agent_id])) {
  335. ret = -ENOMEM;
  336. goto err;
  337. }
  338. if (put_user(agent_id,
  339. (u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
  340. ret = -EFAULT;
  341. goto err_mr;
  342. }
  343. ret = 0;
  344. goto out;
  345. err_mr:
  346. ib_dereg_mr(file->mr[agent_id]);
  347. err:
  348. file->agent[agent_id] = NULL;
  349. ib_unregister_mad_agent(agent);
  350. out:
  351. up_write(&file->agent_mutex);
  352. return ret;
  353. }
  354. static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
  355. {
  356. u32 id;
  357. int ret = 0;
  358. down_write(&file->agent_mutex);
  359. if (get_user(id, (u32 __user *) arg)) {
  360. ret = -EFAULT;
  361. goto out;
  362. }
  363. if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !file->agent[id]) {
  364. ret = -EINVAL;
  365. goto out;
  366. }
  367. ib_dereg_mr(file->mr[id]);
  368. ib_unregister_mad_agent(file->agent[id]);
  369. file->agent[id] = NULL;
  370. out:
  371. up_write(&file->agent_mutex);
  372. return ret;
  373. }
  374. static long ib_umad_ioctl(struct file *filp,
  375. unsigned int cmd, unsigned long arg)
  376. {
  377. switch (cmd) {
  378. case IB_USER_MAD_REGISTER_AGENT:
  379. return ib_umad_reg_agent(filp->private_data, arg);
  380. case IB_USER_MAD_UNREGISTER_AGENT:
  381. return ib_umad_unreg_agent(filp->private_data, arg);
  382. default:
  383. return -ENOIOCTLCMD;
  384. }
  385. }
  386. static int ib_umad_open(struct inode *inode, struct file *filp)
  387. {
  388. struct ib_umad_port *port =
  389. container_of(inode->i_cdev, struct ib_umad_port, dev);
  390. struct ib_umad_file *file;
  391. file = kmalloc(sizeof *file, GFP_KERNEL);
  392. if (!file)
  393. return -ENOMEM;
  394. memset(file, 0, sizeof *file);
  395. spin_lock_init(&file->recv_lock);
  396. init_rwsem(&file->agent_mutex);
  397. INIT_LIST_HEAD(&file->recv_list);
  398. init_waitqueue_head(&file->recv_wait);
  399. file->port = port;
  400. filp->private_data = file;
  401. return 0;
  402. }
  403. static int ib_umad_close(struct inode *inode, struct file *filp)
  404. {
  405. struct ib_umad_file *file = filp->private_data;
  406. int i;
  407. for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
  408. if (file->agent[i]) {
  409. ib_dereg_mr(file->mr[i]);
  410. ib_unregister_mad_agent(file->agent[i]);
  411. }
  412. kfree(file);
  413. return 0;
  414. }
  415. static struct file_operations umad_fops = {
  416. .owner = THIS_MODULE,
  417. .read = ib_umad_read,
  418. .write = ib_umad_write,
  419. .poll = ib_umad_poll,
  420. .unlocked_ioctl = ib_umad_ioctl,
  421. .compat_ioctl = ib_umad_ioctl,
  422. .open = ib_umad_open,
  423. .release = ib_umad_close
  424. };
  425. static int ib_umad_sm_open(struct inode *inode, struct file *filp)
  426. {
  427. struct ib_umad_port *port =
  428. container_of(inode->i_cdev, struct ib_umad_port, sm_dev);
  429. struct ib_port_modify props = {
  430. .set_port_cap_mask = IB_PORT_SM
  431. };
  432. int ret;
  433. if (filp->f_flags & O_NONBLOCK) {
  434. if (down_trylock(&port->sm_sem))
  435. return -EAGAIN;
  436. } else {
  437. if (down_interruptible(&port->sm_sem))
  438. return -ERESTARTSYS;
  439. }
  440. ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
  441. if (ret) {
  442. up(&port->sm_sem);
  443. return ret;
  444. }
  445. filp->private_data = port;
  446. return 0;
  447. }
  448. static int ib_umad_sm_close(struct inode *inode, struct file *filp)
  449. {
  450. struct ib_umad_port *port = filp->private_data;
  451. struct ib_port_modify props = {
  452. .clr_port_cap_mask = IB_PORT_SM
  453. };
  454. int ret;
  455. ret = ib_modify_port(port->ib_dev, port->port_num, 0, &props);
  456. up(&port->sm_sem);
  457. return ret;
  458. }
  459. static struct file_operations umad_sm_fops = {
  460. .owner = THIS_MODULE,
  461. .open = ib_umad_sm_open,
  462. .release = ib_umad_sm_close
  463. };
  464. static struct ib_client umad_client = {
  465. .name = "umad",
  466. .add = ib_umad_add_one,
  467. .remove = ib_umad_remove_one
  468. };
  469. static ssize_t show_dev(struct class_device *class_dev, char *buf)
  470. {
  471. struct ib_umad_port *port = class_get_devdata(class_dev);
  472. if (class_dev == &port->class_dev)
  473. return print_dev_t(buf, port->dev.dev);
  474. else
  475. return print_dev_t(buf, port->sm_dev.dev);
  476. }
  477. static CLASS_DEVICE_ATTR(dev, S_IRUGO, show_dev, NULL);
  478. static ssize_t show_ibdev(struct class_device *class_dev, char *buf)
  479. {
  480. struct ib_umad_port *port = class_get_devdata(class_dev);
  481. return sprintf(buf, "%s\n", port->ib_dev->name);
  482. }
  483. static CLASS_DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
  484. static ssize_t show_port(struct class_device *class_dev, char *buf)
  485. {
  486. struct ib_umad_port *port = class_get_devdata(class_dev);
  487. return sprintf(buf, "%d\n", port->port_num);
  488. }
  489. static CLASS_DEVICE_ATTR(port, S_IRUGO, show_port, NULL);
  490. static void ib_umad_release_dev(struct kref *ref)
  491. {
  492. struct ib_umad_device *dev =
  493. container_of(ref, struct ib_umad_device, ref);
  494. kfree(dev);
  495. }
  496. static void ib_umad_release_port(struct class_device *class_dev)
  497. {
  498. struct ib_umad_port *port = class_get_devdata(class_dev);
  499. if (class_dev == &port->class_dev) {
  500. cdev_del(&port->dev);
  501. clear_bit(port->devnum, dev_map);
  502. } else {
  503. cdev_del(&port->sm_dev);
  504. clear_bit(port->sm_devnum, dev_map);
  505. }
  506. kref_put(&port->umad_dev->ref, ib_umad_release_dev);
  507. }
  508. static struct class umad_class = {
  509. .name = "infiniband_mad",
  510. .release = ib_umad_release_port
  511. };
  512. static ssize_t show_abi_version(struct class *class, char *buf)
  513. {
  514. return sprintf(buf, "%d\n", IB_USER_MAD_ABI_VERSION);
  515. }
  516. static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
  517. static int ib_umad_init_port(struct ib_device *device, int port_num,
  518. struct ib_umad_port *port)
  519. {
  520. spin_lock(&map_lock);
  521. port->devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
  522. if (port->devnum >= IB_UMAD_MAX_PORTS) {
  523. spin_unlock(&map_lock);
  524. return -1;
  525. }
  526. port->sm_devnum = find_next_zero_bit(dev_map, IB_UMAD_MAX_PORTS * 2, IB_UMAD_MAX_PORTS);
  527. if (port->sm_devnum >= IB_UMAD_MAX_PORTS * 2) {
  528. spin_unlock(&map_lock);
  529. return -1;
  530. }
  531. set_bit(port->devnum, dev_map);
  532. set_bit(port->sm_devnum, dev_map);
  533. spin_unlock(&map_lock);
  534. port->ib_dev = device;
  535. port->port_num = port_num;
  536. init_MUTEX(&port->sm_sem);
  537. cdev_init(&port->dev, &umad_fops);
  538. port->dev.owner = THIS_MODULE;
  539. kobject_set_name(&port->dev.kobj, "umad%d", port->devnum);
  540. if (cdev_add(&port->dev, base_dev + port->devnum, 1))
  541. return -1;
  542. port->class_dev.class = &umad_class;
  543. port->class_dev.dev = device->dma_device;
  544. snprintf(port->class_dev.class_id, BUS_ID_SIZE, "umad%d", port->devnum);
  545. if (class_device_register(&port->class_dev))
  546. goto err_cdev;
  547. class_set_devdata(&port->class_dev, port);
  548. kref_get(&port->umad_dev->ref);
  549. if (class_device_create_file(&port->class_dev, &class_device_attr_dev))
  550. goto err_class;
  551. if (class_device_create_file(&port->class_dev, &class_device_attr_ibdev))
  552. goto err_class;
  553. if (class_device_create_file(&port->class_dev, &class_device_attr_port))
  554. goto err_class;
  555. cdev_init(&port->sm_dev, &umad_sm_fops);
  556. port->sm_dev.owner = THIS_MODULE;
  557. kobject_set_name(&port->dev.kobj, "issm%d", port->sm_devnum - IB_UMAD_MAX_PORTS);
  558. if (cdev_add(&port->sm_dev, base_dev + port->sm_devnum, 1))
  559. return -1;
  560. port->sm_class_dev.class = &umad_class;
  561. port->sm_class_dev.dev = device->dma_device;
  562. snprintf(port->sm_class_dev.class_id, BUS_ID_SIZE, "issm%d", port->sm_devnum - IB_UMAD_MAX_PORTS);
  563. if (class_device_register(&port->sm_class_dev))
  564. goto err_sm_cdev;
  565. class_set_devdata(&port->sm_class_dev, port);
  566. kref_get(&port->umad_dev->ref);
  567. if (class_device_create_file(&port->sm_class_dev, &class_device_attr_dev))
  568. goto err_sm_class;
  569. if (class_device_create_file(&port->sm_class_dev, &class_device_attr_ibdev))
  570. goto err_sm_class;
  571. if (class_device_create_file(&port->sm_class_dev, &class_device_attr_port))
  572. goto err_sm_class;
  573. return 0;
  574. err_sm_class:
  575. class_device_unregister(&port->sm_class_dev);
  576. err_sm_cdev:
  577. cdev_del(&port->sm_dev);
  578. err_class:
  579. class_device_unregister(&port->class_dev);
  580. err_cdev:
  581. cdev_del(&port->dev);
  582. clear_bit(port->devnum, dev_map);
  583. return -1;
  584. }
  585. static void ib_umad_add_one(struct ib_device *device)
  586. {
  587. struct ib_umad_device *umad_dev;
  588. int s, e, i;
  589. if (device->node_type == IB_NODE_SWITCH)
  590. s = e = 0;
  591. else {
  592. s = 1;
  593. e = device->phys_port_cnt;
  594. }
  595. umad_dev = kmalloc(sizeof *umad_dev +
  596. (e - s + 1) * sizeof (struct ib_umad_port),
  597. GFP_KERNEL);
  598. if (!umad_dev)
  599. return;
  600. memset(umad_dev, 0, sizeof *umad_dev +
  601. (e - s + 1) * sizeof (struct ib_umad_port));
  602. kref_init(&umad_dev->ref);
  603. umad_dev->start_port = s;
  604. umad_dev->end_port = e;
  605. for (i = s; i <= e; ++i) {
  606. umad_dev->port[i - s].umad_dev = umad_dev;
  607. if (ib_umad_init_port(device, i, &umad_dev->port[i - s]))
  608. goto err;
  609. }
  610. ib_set_client_data(device, &umad_client, umad_dev);
  611. return;
  612. err:
  613. while (--i >= s) {
  614. class_device_unregister(&umad_dev->port[i - s].class_dev);
  615. class_device_unregister(&umad_dev->port[i - s].sm_class_dev);
  616. }
  617. kref_put(&umad_dev->ref, ib_umad_release_dev);
  618. }
  619. static void ib_umad_remove_one(struct ib_device *device)
  620. {
  621. struct ib_umad_device *umad_dev = ib_get_client_data(device, &umad_client);
  622. int i;
  623. if (!umad_dev)
  624. return;
  625. for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) {
  626. class_device_unregister(&umad_dev->port[i].class_dev);
  627. class_device_unregister(&umad_dev->port[i].sm_class_dev);
  628. }
  629. kref_put(&umad_dev->ref, ib_umad_release_dev);
  630. }
  631. static int __init ib_umad_init(void)
  632. {
  633. int ret;
  634. spin_lock_init(&map_lock);
  635. ret = register_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2,
  636. "infiniband_mad");
  637. if (ret) {
  638. printk(KERN_ERR "user_mad: couldn't register device number\n");
  639. goto out;
  640. }
  641. ret = class_register(&umad_class);
  642. if (ret) {
  643. printk(KERN_ERR "user_mad: couldn't create class infiniband_mad\n");
  644. goto out_chrdev;
  645. }
  646. ret = class_create_file(&umad_class, &class_attr_abi_version);
  647. if (ret) {
  648. printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n");
  649. goto out_class;
  650. }
  651. ret = ib_register_client(&umad_client);
  652. if (ret) {
  653. printk(KERN_ERR "user_mad: couldn't register ib_umad client\n");
  654. goto out_class;
  655. }
  656. return 0;
  657. out_class:
  658. class_unregister(&umad_class);
  659. out_chrdev:
  660. unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
  661. out:
  662. return ret;
  663. }
  664. static void __exit ib_umad_cleanup(void)
  665. {
  666. ib_unregister_client(&umad_client);
  667. class_unregister(&umad_class);
  668. unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
  669. }
  670. module_init(ib_umad_init);
  671. module_exit(ib_umad_cleanup);