/drivers/block/xen-blkback/xenbus.c

https://bitbucket.org/ndreys/linux-sunxi · C · 767 lines · 572 code · 132 blank · 63 comment · 96 complexity · 917511f2fc00bc57cba0d6c4950896a6 MD5 · raw file

  1. /* Xenbus code for blkif backend
  2. Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
  3. Copyright (C) 2005 XenSource Ltd
  4. This program is free software; you can redistribute it and/or modify
  5. it under the terms of the GNU General Public License as published by
  6. the Free Software Foundation; either version 2 of the License, or
  7. (at your option) any later version.
  8. This program is distributed in the hope that it will be useful,
  9. but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. GNU General Public License for more details.
  12. */
  13. #include <stdarg.h>
  14. #include <linux/module.h>
  15. #include <linux/kthread.h>
  16. #include <xen/events.h>
  17. #include <xen/grant_table.h>
  18. #include "common.h"
  19. struct backend_info {
  20. struct xenbus_device *dev;
  21. struct xen_blkif *blkif;
  22. struct xenbus_watch backend_watch;
  23. unsigned major;
  24. unsigned minor;
  25. char *mode;
  26. };
  27. static struct kmem_cache *xen_blkif_cachep;
  28. static void connect(struct backend_info *);
  29. static int connect_ring(struct backend_info *);
  30. static void backend_changed(struct xenbus_watch *, const char **,
  31. unsigned int);
  32. struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be)
  33. {
  34. return be->dev;
  35. }
  36. static int blkback_name(struct xen_blkif *blkif, char *buf)
  37. {
  38. char *devpath, *devname;
  39. struct xenbus_device *dev = blkif->be->dev;
  40. devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
  41. if (IS_ERR(devpath))
  42. return PTR_ERR(devpath);
  43. devname = strstr(devpath, "/dev/");
  44. if (devname != NULL)
  45. devname += strlen("/dev/");
  46. else
  47. devname = devpath;
  48. snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
  49. kfree(devpath);
  50. return 0;
  51. }
  52. static void xen_update_blkif_status(struct xen_blkif *blkif)
  53. {
  54. int err;
  55. char name[TASK_COMM_LEN];
  56. /* Not ready to connect? */
  57. if (!blkif->irq || !blkif->vbd.bdev)
  58. return;
  59. /* Already connected? */
  60. if (blkif->be->dev->state == XenbusStateConnected)
  61. return;
  62. /* Attempt to connect: exit if we fail to. */
  63. connect(blkif->be);
  64. if (blkif->be->dev->state != XenbusStateConnected)
  65. return;
  66. err = blkback_name(blkif, name);
  67. if (err) {
  68. xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
  69. return;
  70. }
  71. err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
  72. if (err) {
  73. xenbus_dev_error(blkif->be->dev, err, "block flush");
  74. return;
  75. }
  76. invalidate_inode_pages2(blkif->vbd.bdev->bd_inode->i_mapping);
  77. blkif->xenblkd = kthread_run(xen_blkif_schedule, blkif, name);
  78. if (IS_ERR(blkif->xenblkd)) {
  79. err = PTR_ERR(blkif->xenblkd);
  80. blkif->xenblkd = NULL;
  81. xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
  82. }
  83. }
  84. static struct xen_blkif *xen_blkif_alloc(domid_t domid)
  85. {
  86. struct xen_blkif *blkif;
  87. blkif = kmem_cache_alloc(xen_blkif_cachep, GFP_KERNEL);
  88. if (!blkif)
  89. return ERR_PTR(-ENOMEM);
  90. memset(blkif, 0, sizeof(*blkif));
  91. blkif->domid = domid;
  92. spin_lock_init(&blkif->blk_ring_lock);
  93. atomic_set(&blkif->refcnt, 1);
  94. init_waitqueue_head(&blkif->wq);
  95. blkif->st_print = jiffies;
  96. init_waitqueue_head(&blkif->waiting_to_free);
  97. return blkif;
  98. }
  99. static int map_frontend_page(struct xen_blkif *blkif, unsigned long shared_page)
  100. {
  101. struct gnttab_map_grant_ref op;
  102. gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
  103. GNTMAP_host_map, shared_page, blkif->domid);
  104. if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
  105. BUG();
  106. if (op.status) {
  107. DPRINTK("Grant table operation failure !\n");
  108. return op.status;
  109. }
  110. blkif->shmem_ref = shared_page;
  111. blkif->shmem_handle = op.handle;
  112. return 0;
  113. }
  114. static void unmap_frontend_page(struct xen_blkif *blkif)
  115. {
  116. struct gnttab_unmap_grant_ref op;
  117. gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
  118. GNTMAP_host_map, blkif->shmem_handle);
  119. if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
  120. BUG();
  121. }
  122. static int xen_blkif_map(struct xen_blkif *blkif, unsigned long shared_page,
  123. unsigned int evtchn)
  124. {
  125. int err;
  126. /* Already connected through? */
  127. if (blkif->irq)
  128. return 0;
  129. blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE);
  130. if (!blkif->blk_ring_area)
  131. return -ENOMEM;
  132. err = map_frontend_page(blkif, shared_page);
  133. if (err) {
  134. free_vm_area(blkif->blk_ring_area);
  135. return err;
  136. }
  137. switch (blkif->blk_protocol) {
  138. case BLKIF_PROTOCOL_NATIVE:
  139. {
  140. struct blkif_sring *sring;
  141. sring = (struct blkif_sring *)blkif->blk_ring_area->addr;
  142. BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
  143. break;
  144. }
  145. case BLKIF_PROTOCOL_X86_32:
  146. {
  147. struct blkif_x86_32_sring *sring_x86_32;
  148. sring_x86_32 = (struct blkif_x86_32_sring *)blkif->blk_ring_area->addr;
  149. BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
  150. break;
  151. }
  152. case BLKIF_PROTOCOL_X86_64:
  153. {
  154. struct blkif_x86_64_sring *sring_x86_64;
  155. sring_x86_64 = (struct blkif_x86_64_sring *)blkif->blk_ring_area->addr;
  156. BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
  157. break;
  158. }
  159. default:
  160. BUG();
  161. }
  162. err = bind_interdomain_evtchn_to_irqhandler(blkif->domid, evtchn,
  163. xen_blkif_be_int, 0,
  164. "blkif-backend", blkif);
  165. if (err < 0) {
  166. unmap_frontend_page(blkif);
  167. free_vm_area(blkif->blk_ring_area);
  168. blkif->blk_rings.common.sring = NULL;
  169. return err;
  170. }
  171. blkif->irq = err;
  172. return 0;
  173. }
  174. static void xen_blkif_disconnect(struct xen_blkif *blkif)
  175. {
  176. if (blkif->xenblkd) {
  177. kthread_stop(blkif->xenblkd);
  178. blkif->xenblkd = NULL;
  179. }
  180. atomic_dec(&blkif->refcnt);
  181. wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
  182. atomic_inc(&blkif->refcnt);
  183. if (blkif->irq) {
  184. unbind_from_irqhandler(blkif->irq, blkif);
  185. blkif->irq = 0;
  186. }
  187. if (blkif->blk_rings.common.sring) {
  188. unmap_frontend_page(blkif);
  189. free_vm_area(blkif->blk_ring_area);
  190. blkif->blk_rings.common.sring = NULL;
  191. }
  192. }
  193. void xen_blkif_free(struct xen_blkif *blkif)
  194. {
  195. if (!atomic_dec_and_test(&blkif->refcnt))
  196. BUG();
  197. kmem_cache_free(xen_blkif_cachep, blkif);
  198. }
  199. int __init xen_blkif_interface_init(void)
  200. {
  201. xen_blkif_cachep = kmem_cache_create("blkif_cache",
  202. sizeof(struct xen_blkif),
  203. 0, 0, NULL);
  204. if (!xen_blkif_cachep)
  205. return -ENOMEM;
  206. return 0;
  207. }
  208. /*
  209. * sysfs interface for VBD I/O requests
  210. */
  211. #define VBD_SHOW(name, format, args...) \
  212. static ssize_t show_##name(struct device *_dev, \
  213. struct device_attribute *attr, \
  214. char *buf) \
  215. { \
  216. struct xenbus_device *dev = to_xenbus_device(_dev); \
  217. struct backend_info *be = dev_get_drvdata(&dev->dev); \
  218. \
  219. return sprintf(buf, format, ##args); \
  220. } \
  221. static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
  222. VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
  223. VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
  224. VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
  225. VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req);
  226. VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
  227. VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
  228. static struct attribute *xen_vbdstat_attrs[] = {
  229. &dev_attr_oo_req.attr,
  230. &dev_attr_rd_req.attr,
  231. &dev_attr_wr_req.attr,
  232. &dev_attr_f_req.attr,
  233. &dev_attr_rd_sect.attr,
  234. &dev_attr_wr_sect.attr,
  235. NULL
  236. };
  237. static struct attribute_group xen_vbdstat_group = {
  238. .name = "statistics",
  239. .attrs = xen_vbdstat_attrs,
  240. };
  241. VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
  242. VBD_SHOW(mode, "%s\n", be->mode);
  243. int xenvbd_sysfs_addif(struct xenbus_device *dev)
  244. {
  245. int error;
  246. error = device_create_file(&dev->dev, &dev_attr_physical_device);
  247. if (error)
  248. goto fail1;
  249. error = device_create_file(&dev->dev, &dev_attr_mode);
  250. if (error)
  251. goto fail2;
  252. error = sysfs_create_group(&dev->dev.kobj, &xen_vbdstat_group);
  253. if (error)
  254. goto fail3;
  255. return 0;
  256. fail3: sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
  257. fail2: device_remove_file(&dev->dev, &dev_attr_mode);
  258. fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
  259. return error;
  260. }
  261. void xenvbd_sysfs_delif(struct xenbus_device *dev)
  262. {
  263. sysfs_remove_group(&dev->dev.kobj, &xen_vbdstat_group);
  264. device_remove_file(&dev->dev, &dev_attr_mode);
  265. device_remove_file(&dev->dev, &dev_attr_physical_device);
  266. }
  267. static void xen_vbd_free(struct xen_vbd *vbd)
  268. {
  269. if (vbd->bdev)
  270. blkdev_put(vbd->bdev, vbd->readonly ? FMODE_READ : FMODE_WRITE);
  271. vbd->bdev = NULL;
  272. }
  273. static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
  274. unsigned major, unsigned minor, int readonly,
  275. int cdrom)
  276. {
  277. struct xen_vbd *vbd;
  278. struct block_device *bdev;
  279. struct request_queue *q;
  280. vbd = &blkif->vbd;
  281. vbd->handle = handle;
  282. vbd->readonly = readonly;
  283. vbd->type = 0;
  284. vbd->pdevice = MKDEV(major, minor);
  285. bdev = blkdev_get_by_dev(vbd->pdevice, vbd->readonly ?
  286. FMODE_READ : FMODE_WRITE, NULL);
  287. if (IS_ERR(bdev)) {
  288. DPRINTK("xen_vbd_create: device %08x could not be opened.\n",
  289. vbd->pdevice);
  290. return -ENOENT;
  291. }
  292. vbd->bdev = bdev;
  293. if (vbd->bdev->bd_disk == NULL) {
  294. DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
  295. vbd->pdevice);
  296. xen_vbd_free(vbd);
  297. return -ENOENT;
  298. }
  299. vbd->size = vbd_sz(vbd);
  300. if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
  301. vbd->type |= VDISK_CDROM;
  302. if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
  303. vbd->type |= VDISK_REMOVABLE;
  304. q = bdev_get_queue(bdev);
  305. if (q && q->flush_flags)
  306. vbd->flush_support = true;
  307. DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
  308. handle, blkif->domid);
  309. return 0;
  310. }
  311. static int xen_blkbk_remove(struct xenbus_device *dev)
  312. {
  313. struct backend_info *be = dev_get_drvdata(&dev->dev);
  314. DPRINTK("");
  315. if (be->major || be->minor)
  316. xenvbd_sysfs_delif(dev);
  317. if (be->backend_watch.node) {
  318. unregister_xenbus_watch(&be->backend_watch);
  319. kfree(be->backend_watch.node);
  320. be->backend_watch.node = NULL;
  321. }
  322. if (be->blkif) {
  323. xen_blkif_disconnect(be->blkif);
  324. xen_vbd_free(&be->blkif->vbd);
  325. xen_blkif_free(be->blkif);
  326. be->blkif = NULL;
  327. }
  328. kfree(be);
  329. dev_set_drvdata(&dev->dev, NULL);
  330. return 0;
  331. }
  332. int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
  333. struct backend_info *be, int state)
  334. {
  335. struct xenbus_device *dev = be->dev;
  336. int err;
  337. err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
  338. "%d", state);
  339. if (err)
  340. xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
  341. return err;
  342. }
  343. /*
  344. * Entry point to this code when a new device is created. Allocate the basic
  345. * structures, and watch the store waiting for the hotplug scripts to tell us
  346. * the device's physical major and minor numbers. Switch to InitWait.
  347. */
  348. static int xen_blkbk_probe(struct xenbus_device *dev,
  349. const struct xenbus_device_id *id)
  350. {
  351. int err;
  352. struct backend_info *be = kzalloc(sizeof(struct backend_info),
  353. GFP_KERNEL);
  354. if (!be) {
  355. xenbus_dev_fatal(dev, -ENOMEM,
  356. "allocating backend structure");
  357. return -ENOMEM;
  358. }
  359. be->dev = dev;
  360. dev_set_drvdata(&dev->dev, be);
  361. be->blkif = xen_blkif_alloc(dev->otherend_id);
  362. if (IS_ERR(be->blkif)) {
  363. err = PTR_ERR(be->blkif);
  364. be->blkif = NULL;
  365. xenbus_dev_fatal(dev, err, "creating block interface");
  366. goto fail;
  367. }
  368. /* setup back pointer */
  369. be->blkif->be = be;
  370. err = xenbus_watch_pathfmt(dev, &be->backend_watch, backend_changed,
  371. "%s/%s", dev->nodename, "physical-device");
  372. if (err)
  373. goto fail;
  374. err = xenbus_switch_state(dev, XenbusStateInitWait);
  375. if (err)
  376. goto fail;
  377. return 0;
  378. fail:
  379. DPRINTK("failed");
  380. xen_blkbk_remove(dev);
  381. return err;
  382. }
  383. /*
  384. * Callback received when the hotplug scripts have placed the physical-device
  385. * node. Read it and the mode node, and create a vbd. If the frontend is
  386. * ready, connect.
  387. */
  388. static void backend_changed(struct xenbus_watch *watch,
  389. const char **vec, unsigned int len)
  390. {
  391. int err;
  392. unsigned major;
  393. unsigned minor;
  394. struct backend_info *be
  395. = container_of(watch, struct backend_info, backend_watch);
  396. struct xenbus_device *dev = be->dev;
  397. int cdrom = 0;
  398. char *device_type;
  399. DPRINTK("");
  400. err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
  401. &major, &minor);
  402. if (XENBUS_EXIST_ERR(err)) {
  403. /*
  404. * Since this watch will fire once immediately after it is
  405. * registered, we expect this. Ignore it, and wait for the
  406. * hotplug scripts.
  407. */
  408. return;
  409. }
  410. if (err != 2) {
  411. xenbus_dev_fatal(dev, err, "reading physical-device");
  412. return;
  413. }
  414. if ((be->major || be->minor) &&
  415. ((be->major != major) || (be->minor != minor))) {
  416. pr_warn(DRV_PFX "changing physical device (from %x:%x to %x:%x) not supported.\n",
  417. be->major, be->minor, major, minor);
  418. return;
  419. }
  420. be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
  421. if (IS_ERR(be->mode)) {
  422. err = PTR_ERR(be->mode);
  423. be->mode = NULL;
  424. xenbus_dev_fatal(dev, err, "reading mode");
  425. return;
  426. }
  427. device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
  428. if (!IS_ERR(device_type)) {
  429. cdrom = strcmp(device_type, "cdrom") == 0;
  430. kfree(device_type);
  431. }
  432. if (be->major == 0 && be->minor == 0) {
  433. /* Front end dir is a number, which is used as the handle. */
  434. char *p = strrchr(dev->otherend, '/') + 1;
  435. long handle;
  436. err = strict_strtoul(p, 0, &handle);
  437. if (err)
  438. return;
  439. be->major = major;
  440. be->minor = minor;
  441. err = xen_vbd_create(be->blkif, handle, major, minor,
  442. (NULL == strchr(be->mode, 'w')), cdrom);
  443. if (err) {
  444. be->major = 0;
  445. be->minor = 0;
  446. xenbus_dev_fatal(dev, err, "creating vbd structure");
  447. return;
  448. }
  449. err = xenvbd_sysfs_addif(dev);
  450. if (err) {
  451. xen_vbd_free(&be->blkif->vbd);
  452. be->major = 0;
  453. be->minor = 0;
  454. xenbus_dev_fatal(dev, err, "creating sysfs entries");
  455. return;
  456. }
  457. /* We're potentially connected now */
  458. xen_update_blkif_status(be->blkif);
  459. }
  460. }
  461. /*
  462. * Callback received when the frontend's state changes.
  463. */
  464. static void frontend_changed(struct xenbus_device *dev,
  465. enum xenbus_state frontend_state)
  466. {
  467. struct backend_info *be = dev_get_drvdata(&dev->dev);
  468. int err;
  469. DPRINTK("%s", xenbus_strstate(frontend_state));
  470. switch (frontend_state) {
  471. case XenbusStateInitialising:
  472. if (dev->state == XenbusStateClosed) {
  473. pr_info(DRV_PFX "%s: prepare for reconnect\n",
  474. dev->nodename);
  475. xenbus_switch_state(dev, XenbusStateInitWait);
  476. }
  477. break;
  478. case XenbusStateInitialised:
  479. case XenbusStateConnected:
  480. /*
  481. * Ensure we connect even when two watches fire in
  482. * close successsion and we miss the intermediate value
  483. * of frontend_state.
  484. */
  485. if (dev->state == XenbusStateConnected)
  486. break;
  487. /*
  488. * Enforce precondition before potential leak point.
  489. * blkif_disconnect() is idempotent.
  490. */
  491. xen_blkif_disconnect(be->blkif);
  492. err = connect_ring(be);
  493. if (err)
  494. break;
  495. xen_update_blkif_status(be->blkif);
  496. break;
  497. case XenbusStateClosing:
  498. xen_blkif_disconnect(be->blkif);
  499. xenbus_switch_state(dev, XenbusStateClosing);
  500. break;
  501. case XenbusStateClosed:
  502. xenbus_switch_state(dev, XenbusStateClosed);
  503. if (xenbus_dev_is_online(dev))
  504. break;
  505. /* fall through if not online */
  506. case XenbusStateUnknown:
  507. /* implies blkif_disconnect() via blkback_remove() */
  508. device_unregister(&dev->dev);
  509. break;
  510. default:
  511. xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
  512. frontend_state);
  513. break;
  514. }
  515. }
  516. /* ** Connection ** */
  517. /*
  518. * Write the physical details regarding the block device to the store, and
  519. * switch to Connected state.
  520. */
  521. static void connect(struct backend_info *be)
  522. {
  523. struct xenbus_transaction xbt;
  524. int err;
  525. struct xenbus_device *dev = be->dev;
  526. DPRINTK("%s", dev->otherend);
  527. /* Supply the information about the device the frontend needs */
  528. again:
  529. err = xenbus_transaction_start(&xbt);
  530. if (err) {
  531. xenbus_dev_fatal(dev, err, "starting transaction");
  532. return;
  533. }
  534. err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
  535. if (err)
  536. goto abort;
  537. err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
  538. (unsigned long long)vbd_sz(&be->blkif->vbd));
  539. if (err) {
  540. xenbus_dev_fatal(dev, err, "writing %s/sectors",
  541. dev->nodename);
  542. goto abort;
  543. }
  544. /* FIXME: use a typename instead */
  545. err = xenbus_printf(xbt, dev->nodename, "info", "%u",
  546. be->blkif->vbd.type |
  547. (be->blkif->vbd.readonly ? VDISK_READONLY : 0));
  548. if (err) {
  549. xenbus_dev_fatal(dev, err, "writing %s/info",
  550. dev->nodename);
  551. goto abort;
  552. }
  553. err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
  554. (unsigned long)
  555. bdev_logical_block_size(be->blkif->vbd.bdev));
  556. if (err) {
  557. xenbus_dev_fatal(dev, err, "writing %s/sector-size",
  558. dev->nodename);
  559. goto abort;
  560. }
  561. err = xenbus_transaction_end(xbt, 0);
  562. if (err == -EAGAIN)
  563. goto again;
  564. if (err)
  565. xenbus_dev_fatal(dev, err, "ending transaction");
  566. err = xenbus_switch_state(dev, XenbusStateConnected);
  567. if (err)
  568. xenbus_dev_fatal(dev, err, "switching to Connected state",
  569. dev->nodename);
  570. return;
  571. abort:
  572. xenbus_transaction_end(xbt, 1);
  573. }
  574. static int connect_ring(struct backend_info *be)
  575. {
  576. struct xenbus_device *dev = be->dev;
  577. unsigned long ring_ref;
  578. unsigned int evtchn;
  579. char protocol[64] = "";
  580. int err;
  581. DPRINTK("%s", dev->otherend);
  582. err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
  583. &ring_ref, "event-channel", "%u", &evtchn, NULL);
  584. if (err) {
  585. xenbus_dev_fatal(dev, err,
  586. "reading %s/ring-ref and event-channel",
  587. dev->otherend);
  588. return err;
  589. }
  590. be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
  591. err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
  592. "%63s", protocol, NULL);
  593. if (err)
  594. strcpy(protocol, "unspecified, assuming native");
  595. else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
  596. be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
  597. else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
  598. be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
  599. else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
  600. be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
  601. else {
  602. xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
  603. return -1;
  604. }
  605. pr_info(DRV_PFX "ring-ref %ld, event-channel %d, protocol %d (%s)\n",
  606. ring_ref, evtchn, be->blkif->blk_protocol, protocol);
  607. /* Map the shared frame, irq etc. */
  608. err = xen_blkif_map(be->blkif, ring_ref, evtchn);
  609. if (err) {
  610. xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
  611. ring_ref, evtchn);
  612. return err;
  613. }
  614. return 0;
  615. }
  616. /* ** Driver Registration ** */
  617. static const struct xenbus_device_id xen_blkbk_ids[] = {
  618. { "vbd" },
  619. { "" }
  620. };
  621. static struct xenbus_driver xen_blkbk = {
  622. .name = "vbd",
  623. .owner = THIS_MODULE,
  624. .ids = xen_blkbk_ids,
  625. .probe = xen_blkbk_probe,
  626. .remove = xen_blkbk_remove,
  627. .otherend_changed = frontend_changed
  628. };
  629. int xen_blkif_xenbus_init(void)
  630. {
  631. return xenbus_register_backend(&xen_blkbk);
  632. }