PageRenderTime 42ms CodeModel.GetById 17ms RepoModel.GetById 0ms app.codeStats 1ms

/drivers/video/fbdev/hyperv_fb.c

https://gitlab.com/deepcypher/linux
C | 1391 lines | 1007 code | 253 blank | 131 comment | 119 complexity | 7378859bdbd366534e63cced5ee71b33 MD5 | raw file
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012, Microsoft Corporation.
  4. *
  5. * Author:
  6. * Haiyang Zhang <haiyangz@microsoft.com>
  7. */
  8. /*
  9. * Hyper-V Synthetic Video Frame Buffer Driver
  10. *
  11. * This is the driver for the Hyper-V Synthetic Video, which supports
  12. * screen resolution up to Full HD 1920x1080 with 32 bit color on Windows
  13. * Server 2012, and 1600x1200 with 16 bit color on Windows Server 2008 R2
  14. * or earlier.
  15. *
  16. * It also solves the double mouse cursor issue of the emulated video mode.
  17. *
  18. * The default screen resolution is 1152x864, which may be changed by a
  19. * kernel parameter:
  20. * video=hyperv_fb:<width>x<height>
  21. * For example: video=hyperv_fb:1280x1024
  22. *
  23. * Portrait orientation is also supported:
  24. * For example: video=hyperv_fb:864x1152
  25. *
  26. * When a Windows 10 RS5+ host is used, the virtual machine screen
  27. * resolution is obtained from the host. The "video=hyperv_fb" option is
  28. * not needed, but still can be used to overwrite what the host specifies.
  29. * The VM resolution on the host could be set by executing the powershell
  30. * "set-vmvideo" command. For example
  31. * set-vmvideo -vmname name -horizontalresolution:1920 \
  32. * -verticalresolution:1200 -resolutiontype single
  33. *
  34. * Gen 1 VMs also support direct using VM's physical memory for framebuffer.
  35. * It could improve the efficiency and performance for framebuffer and VM.
  36. * This requires to allocate contiguous physical memory from Linux kernel's
  37. * CMA memory allocator. To enable this, supply a kernel parameter to give
  38. * enough memory space to CMA allocator for framebuffer. For example:
  39. * cma=130m
  40. * This gives 130MB memory to CMA allocator that can be allocated to
  41. * framebuffer. For reference, 8K resolution (7680x4320) takes about
  42. * 127MB memory.
  43. */
  44. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  45. #include <linux/module.h>
  46. #include <linux/kernel.h>
  47. #include <linux/vmalloc.h>
  48. #include <linux/init.h>
  49. #include <linux/completion.h>
  50. #include <linux/fb.h>
  51. #include <linux/pci.h>
  52. #include <linux/panic_notifier.h>
  53. #include <linux/efi.h>
  54. #include <linux/console.h>
  55. #include <linux/hyperv.h>
  56. /* Hyper-V Synthetic Video Protocol definitions and structures */
  57. #define MAX_VMBUS_PKT_SIZE 0x4000
  58. #define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
  59. /* Support for VERSION_WIN7 is removed. #define is retained for reference. */
  60. #define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
  61. #define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
  62. #define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
  63. #define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
  64. #define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
  65. #define SYNTHVID_DEPTH_WIN8 32
  66. #define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
  67. #define PCI_VENDOR_ID_MICROSOFT 0x1414
  68. #define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
  69. enum pipe_msg_type {
  70. PIPE_MSG_INVALID,
  71. PIPE_MSG_DATA,
  72. PIPE_MSG_MAX
  73. };
  74. struct pipe_msg_hdr {
  75. u32 type;
  76. u32 size; /* size of message after this field */
  77. } __packed;
  78. enum synthvid_msg_type {
  79. SYNTHVID_ERROR = 0,
  80. SYNTHVID_VERSION_REQUEST = 1,
  81. SYNTHVID_VERSION_RESPONSE = 2,
  82. SYNTHVID_VRAM_LOCATION = 3,
  83. SYNTHVID_VRAM_LOCATION_ACK = 4,
  84. SYNTHVID_SITUATION_UPDATE = 5,
  85. SYNTHVID_SITUATION_UPDATE_ACK = 6,
  86. SYNTHVID_POINTER_POSITION = 7,
  87. SYNTHVID_POINTER_SHAPE = 8,
  88. SYNTHVID_FEATURE_CHANGE = 9,
  89. SYNTHVID_DIRT = 10,
  90. SYNTHVID_RESOLUTION_REQUEST = 13,
  91. SYNTHVID_RESOLUTION_RESPONSE = 14,
  92. SYNTHVID_MAX = 15
  93. };
  94. #define SYNTHVID_EDID_BLOCK_SIZE 128
  95. #define SYNTHVID_MAX_RESOLUTION_COUNT 64
  96. struct hvd_screen_info {
  97. u16 width;
  98. u16 height;
  99. } __packed;
  100. struct synthvid_msg_hdr {
  101. u32 type;
  102. u32 size; /* size of this header + payload after this field*/
  103. } __packed;
  104. struct synthvid_version_req {
  105. u32 version;
  106. } __packed;
  107. struct synthvid_version_resp {
  108. u32 version;
  109. u8 is_accepted;
  110. u8 max_video_outputs;
  111. } __packed;
  112. struct synthvid_supported_resolution_req {
  113. u8 maximum_resolution_count;
  114. } __packed;
  115. struct synthvid_supported_resolution_resp {
  116. u8 edid_block[SYNTHVID_EDID_BLOCK_SIZE];
  117. u8 resolution_count;
  118. u8 default_resolution_index;
  119. u8 is_standard;
  120. struct hvd_screen_info
  121. supported_resolution[SYNTHVID_MAX_RESOLUTION_COUNT];
  122. } __packed;
  123. struct synthvid_vram_location {
  124. u64 user_ctx;
  125. u8 is_vram_gpa_specified;
  126. u64 vram_gpa;
  127. } __packed;
  128. struct synthvid_vram_location_ack {
  129. u64 user_ctx;
  130. } __packed;
  131. struct video_output_situation {
  132. u8 active;
  133. u32 vram_offset;
  134. u8 depth_bits;
  135. u32 width_pixels;
  136. u32 height_pixels;
  137. u32 pitch_bytes;
  138. } __packed;
  139. struct synthvid_situation_update {
  140. u64 user_ctx;
  141. u8 video_output_count;
  142. struct video_output_situation video_output[1];
  143. } __packed;
  144. struct synthvid_situation_update_ack {
  145. u64 user_ctx;
  146. } __packed;
  147. struct synthvid_pointer_position {
  148. u8 is_visible;
  149. u8 video_output;
  150. s32 image_x;
  151. s32 image_y;
  152. } __packed;
  153. #define CURSOR_MAX_X 96
  154. #define CURSOR_MAX_Y 96
  155. #define CURSOR_ARGB_PIXEL_SIZE 4
  156. #define CURSOR_MAX_SIZE (CURSOR_MAX_X * CURSOR_MAX_Y * CURSOR_ARGB_PIXEL_SIZE)
  157. #define CURSOR_COMPLETE (-1)
  158. struct synthvid_pointer_shape {
  159. u8 part_idx;
  160. u8 is_argb;
  161. u32 width; /* CURSOR_MAX_X at most */
  162. u32 height; /* CURSOR_MAX_Y at most */
  163. u32 hot_x; /* hotspot relative to upper-left of pointer image */
  164. u32 hot_y;
  165. u8 data[4];
  166. } __packed;
  167. struct synthvid_feature_change {
  168. u8 is_dirt_needed;
  169. u8 is_ptr_pos_needed;
  170. u8 is_ptr_shape_needed;
  171. u8 is_situ_needed;
  172. } __packed;
  173. struct rect {
  174. s32 x1, y1; /* top left corner */
  175. s32 x2, y2; /* bottom right corner, exclusive */
  176. } __packed;
  177. struct synthvid_dirt {
  178. u8 video_output;
  179. u8 dirt_count;
  180. struct rect rect[1];
  181. } __packed;
  182. struct synthvid_msg {
  183. struct pipe_msg_hdr pipe_hdr;
  184. struct synthvid_msg_hdr vid_hdr;
  185. union {
  186. struct synthvid_version_req ver_req;
  187. struct synthvid_version_resp ver_resp;
  188. struct synthvid_vram_location vram;
  189. struct synthvid_vram_location_ack vram_ack;
  190. struct synthvid_situation_update situ;
  191. struct synthvid_situation_update_ack situ_ack;
  192. struct synthvid_pointer_position ptr_pos;
  193. struct synthvid_pointer_shape ptr_shape;
  194. struct synthvid_feature_change feature_chg;
  195. struct synthvid_dirt dirt;
  196. struct synthvid_supported_resolution_req resolution_req;
  197. struct synthvid_supported_resolution_resp resolution_resp;
  198. };
  199. } __packed;
  200. /* FB driver definitions and structures */
  201. #define HVFB_WIDTH 1152 /* default screen width */
  202. #define HVFB_HEIGHT 864 /* default screen height */
  203. #define HVFB_WIDTH_MIN 640
  204. #define HVFB_HEIGHT_MIN 480
  205. #define RING_BUFSIZE (256 * 1024)
  206. #define VSP_TIMEOUT (10 * HZ)
  207. #define HVFB_UPDATE_DELAY (HZ / 20)
  208. #define HVFB_ONDEMAND_THROTTLE (HZ / 20)
  209. struct hvfb_par {
  210. struct fb_info *info;
  211. struct resource *mem;
  212. bool fb_ready; /* fb device is ready */
  213. struct completion wait;
  214. u32 synthvid_version;
  215. struct delayed_work dwork;
  216. bool update;
  217. bool update_saved; /* The value of 'update' before hibernation */
  218. u32 pseudo_palette[16];
  219. u8 init_buf[MAX_VMBUS_PKT_SIZE];
  220. u8 recv_buf[MAX_VMBUS_PKT_SIZE];
  221. /* If true, the VSC notifies the VSP on every framebuffer change */
  222. bool synchronous_fb;
  223. /* If true, need to copy from deferred IO mem to framebuffer mem */
  224. bool need_docopy;
  225. struct notifier_block hvfb_panic_nb;
  226. /* Memory for deferred IO and frame buffer itself */
  227. unsigned char *dio_vp;
  228. unsigned char *mmio_vp;
  229. phys_addr_t mmio_pp;
  230. /* Dirty rectangle, protected by delayed_refresh_lock */
  231. int x1, y1, x2, y2;
  232. bool delayed_refresh;
  233. spinlock_t delayed_refresh_lock;
  234. };
  235. static uint screen_width = HVFB_WIDTH;
  236. static uint screen_height = HVFB_HEIGHT;
  237. static uint screen_depth;
  238. static uint screen_fb_size;
  239. static uint dio_fb_size; /* FB size for deferred IO */
  240. /* Send message to Hyper-V host */
  241. static inline int synthvid_send(struct hv_device *hdev,
  242. struct synthvid_msg *msg)
  243. {
  244. static atomic64_t request_id = ATOMIC64_INIT(0);
  245. int ret;
  246. msg->pipe_hdr.type = PIPE_MSG_DATA;
  247. msg->pipe_hdr.size = msg->vid_hdr.size;
  248. ret = vmbus_sendpacket(hdev->channel, msg,
  249. msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
  250. atomic64_inc_return(&request_id),
  251. VM_PKT_DATA_INBAND, 0);
  252. if (ret)
  253. pr_err_ratelimited("Unable to send packet via vmbus; error %d\n", ret);
  254. return ret;
  255. }
  256. /* Send screen resolution info to host */
  257. static int synthvid_send_situ(struct hv_device *hdev)
  258. {
  259. struct fb_info *info = hv_get_drvdata(hdev);
  260. struct synthvid_msg msg;
  261. if (!info)
  262. return -ENODEV;
  263. memset(&msg, 0, sizeof(struct synthvid_msg));
  264. msg.vid_hdr.type = SYNTHVID_SITUATION_UPDATE;
  265. msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  266. sizeof(struct synthvid_situation_update);
  267. msg.situ.user_ctx = 0;
  268. msg.situ.video_output_count = 1;
  269. msg.situ.video_output[0].active = 1;
  270. msg.situ.video_output[0].vram_offset = 0;
  271. msg.situ.video_output[0].depth_bits = info->var.bits_per_pixel;
  272. msg.situ.video_output[0].width_pixels = info->var.xres;
  273. msg.situ.video_output[0].height_pixels = info->var.yres;
  274. msg.situ.video_output[0].pitch_bytes = info->fix.line_length;
  275. synthvid_send(hdev, &msg);
  276. return 0;
  277. }
  278. /* Send mouse pointer info to host */
  279. static int synthvid_send_ptr(struct hv_device *hdev)
  280. {
  281. struct synthvid_msg msg;
  282. memset(&msg, 0, sizeof(struct synthvid_msg));
  283. msg.vid_hdr.type = SYNTHVID_POINTER_POSITION;
  284. msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  285. sizeof(struct synthvid_pointer_position);
  286. msg.ptr_pos.is_visible = 1;
  287. msg.ptr_pos.video_output = 0;
  288. msg.ptr_pos.image_x = 0;
  289. msg.ptr_pos.image_y = 0;
  290. synthvid_send(hdev, &msg);
  291. memset(&msg, 0, sizeof(struct synthvid_msg));
  292. msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE;
  293. msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  294. sizeof(struct synthvid_pointer_shape);
  295. msg.ptr_shape.part_idx = CURSOR_COMPLETE;
  296. msg.ptr_shape.is_argb = 1;
  297. msg.ptr_shape.width = 1;
  298. msg.ptr_shape.height = 1;
  299. msg.ptr_shape.hot_x = 0;
  300. msg.ptr_shape.hot_y = 0;
  301. msg.ptr_shape.data[0] = 0;
  302. msg.ptr_shape.data[1] = 1;
  303. msg.ptr_shape.data[2] = 1;
  304. msg.ptr_shape.data[3] = 1;
  305. synthvid_send(hdev, &msg);
  306. return 0;
  307. }
  308. /* Send updated screen area (dirty rectangle) location to host */
  309. static int
  310. synthvid_update(struct fb_info *info, int x1, int y1, int x2, int y2)
  311. {
  312. struct hv_device *hdev = device_to_hv_device(info->device);
  313. struct synthvid_msg msg;
  314. memset(&msg, 0, sizeof(struct synthvid_msg));
  315. if (x2 == INT_MAX)
  316. x2 = info->var.xres;
  317. if (y2 == INT_MAX)
  318. y2 = info->var.yres;
  319. msg.vid_hdr.type = SYNTHVID_DIRT;
  320. msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  321. sizeof(struct synthvid_dirt);
  322. msg.dirt.video_output = 0;
  323. msg.dirt.dirt_count = 1;
  324. msg.dirt.rect[0].x1 = (x1 > x2) ? 0 : x1;
  325. msg.dirt.rect[0].y1 = (y1 > y2) ? 0 : y1;
  326. msg.dirt.rect[0].x2 =
  327. (x2 < x1 || x2 > info->var.xres) ? info->var.xres : x2;
  328. msg.dirt.rect[0].y2 =
  329. (y2 < y1 || y2 > info->var.yres) ? info->var.yres : y2;
  330. synthvid_send(hdev, &msg);
  331. return 0;
  332. }
  333. static void hvfb_docopy(struct hvfb_par *par,
  334. unsigned long offset,
  335. unsigned long size)
  336. {
  337. if (!par || !par->mmio_vp || !par->dio_vp || !par->fb_ready ||
  338. size == 0 || offset >= dio_fb_size)
  339. return;
  340. if (offset + size > dio_fb_size)
  341. size = dio_fb_size - offset;
  342. memcpy(par->mmio_vp + offset, par->dio_vp + offset, size);
  343. }
  344. /* Deferred IO callback */
  345. static void synthvid_deferred_io(struct fb_info *p, struct list_head *pagereflist)
  346. {
  347. struct hvfb_par *par = p->par;
  348. struct fb_deferred_io_pageref *pageref;
  349. unsigned long start, end;
  350. int y1, y2, miny, maxy;
  351. miny = INT_MAX;
  352. maxy = 0;
  353. /*
  354. * Merge dirty pages. It is possible that last page cross
  355. * over the end of frame buffer row yres. This is taken care of
  356. * in synthvid_update function by clamping the y2
  357. * value to yres.
  358. */
  359. list_for_each_entry(pageref, pagereflist, list) {
  360. start = pageref->offset;
  361. end = start + PAGE_SIZE - 1;
  362. y1 = start / p->fix.line_length;
  363. y2 = end / p->fix.line_length;
  364. miny = min_t(int, miny, y1);
  365. maxy = max_t(int, maxy, y2);
  366. /* Copy from dio space to mmio address */
  367. if (par->fb_ready && par->need_docopy)
  368. hvfb_docopy(par, start, PAGE_SIZE);
  369. }
  370. if (par->fb_ready && par->update)
  371. synthvid_update(p, 0, miny, p->var.xres, maxy + 1);
  372. }
  373. static struct fb_deferred_io synthvid_defio = {
  374. .delay = HZ / 20,
  375. .deferred_io = synthvid_deferred_io,
  376. };
  377. /*
  378. * Actions on received messages from host:
  379. * Complete the wait event.
  380. * Or, reply with screen and cursor info.
  381. */
  382. static void synthvid_recv_sub(struct hv_device *hdev)
  383. {
  384. struct fb_info *info = hv_get_drvdata(hdev);
  385. struct hvfb_par *par;
  386. struct synthvid_msg *msg;
  387. if (!info)
  388. return;
  389. par = info->par;
  390. msg = (struct synthvid_msg *)par->recv_buf;
  391. /* Complete the wait event */
  392. if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE ||
  393. msg->vid_hdr.type == SYNTHVID_RESOLUTION_RESPONSE ||
  394. msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) {
  395. memcpy(par->init_buf, msg, MAX_VMBUS_PKT_SIZE);
  396. complete(&par->wait);
  397. return;
  398. }
  399. /* Reply with screen and cursor info */
  400. if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) {
  401. if (par->fb_ready) {
  402. synthvid_send_ptr(hdev);
  403. synthvid_send_situ(hdev);
  404. }
  405. par->update = msg->feature_chg.is_dirt_needed;
  406. if (par->update)
  407. schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
  408. }
  409. }
  410. /* Receive callback for messages from the host */
  411. static void synthvid_receive(void *ctx)
  412. {
  413. struct hv_device *hdev = ctx;
  414. struct fb_info *info = hv_get_drvdata(hdev);
  415. struct hvfb_par *par;
  416. struct synthvid_msg *recv_buf;
  417. u32 bytes_recvd;
  418. u64 req_id;
  419. int ret;
  420. if (!info)
  421. return;
  422. par = info->par;
  423. recv_buf = (struct synthvid_msg *)par->recv_buf;
  424. do {
  425. ret = vmbus_recvpacket(hdev->channel, recv_buf,
  426. MAX_VMBUS_PKT_SIZE,
  427. &bytes_recvd, &req_id);
  428. if (bytes_recvd > 0 &&
  429. recv_buf->pipe_hdr.type == PIPE_MSG_DATA)
  430. synthvid_recv_sub(hdev);
  431. } while (bytes_recvd > 0 && ret == 0);
  432. }
  433. /* Check if the ver1 version is equal or greater than ver2 */
  434. static inline bool synthvid_ver_ge(u32 ver1, u32 ver2)
  435. {
  436. if (SYNTHVID_VER_GET_MAJOR(ver1) > SYNTHVID_VER_GET_MAJOR(ver2) ||
  437. (SYNTHVID_VER_GET_MAJOR(ver1) == SYNTHVID_VER_GET_MAJOR(ver2) &&
  438. SYNTHVID_VER_GET_MINOR(ver1) >= SYNTHVID_VER_GET_MINOR(ver2)))
  439. return true;
  440. return false;
  441. }
  442. /* Check synthetic video protocol version with the host */
  443. static int synthvid_negotiate_ver(struct hv_device *hdev, u32 ver)
  444. {
  445. struct fb_info *info = hv_get_drvdata(hdev);
  446. struct hvfb_par *par = info->par;
  447. struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
  448. int ret = 0;
  449. unsigned long t;
  450. memset(msg, 0, sizeof(struct synthvid_msg));
  451. msg->vid_hdr.type = SYNTHVID_VERSION_REQUEST;
  452. msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  453. sizeof(struct synthvid_version_req);
  454. msg->ver_req.version = ver;
  455. synthvid_send(hdev, msg);
  456. t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
  457. if (!t) {
  458. pr_err("Time out on waiting version response\n");
  459. ret = -ETIMEDOUT;
  460. goto out;
  461. }
  462. if (!msg->ver_resp.is_accepted) {
  463. ret = -ENODEV;
  464. goto out;
  465. }
  466. par->synthvid_version = ver;
  467. pr_info("Synthvid Version major %d, minor %d\n",
  468. SYNTHVID_VER_GET_MAJOR(ver), SYNTHVID_VER_GET_MINOR(ver));
  469. out:
  470. return ret;
  471. }
  472. /* Get current resolution from the host */
  473. static int synthvid_get_supported_resolution(struct hv_device *hdev)
  474. {
  475. struct fb_info *info = hv_get_drvdata(hdev);
  476. struct hvfb_par *par = info->par;
  477. struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
  478. int ret = 0;
  479. unsigned long t;
  480. u8 index;
  481. memset(msg, 0, sizeof(struct synthvid_msg));
  482. msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
  483. msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  484. sizeof(struct synthvid_supported_resolution_req);
  485. msg->resolution_req.maximum_resolution_count =
  486. SYNTHVID_MAX_RESOLUTION_COUNT;
  487. synthvid_send(hdev, msg);
  488. t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
  489. if (!t) {
  490. pr_err("Time out on waiting resolution response\n");
  491. ret = -ETIMEDOUT;
  492. goto out;
  493. }
  494. if (msg->resolution_resp.resolution_count == 0) {
  495. pr_err("No supported resolutions\n");
  496. ret = -ENODEV;
  497. goto out;
  498. }
  499. index = msg->resolution_resp.default_resolution_index;
  500. if (index >= msg->resolution_resp.resolution_count) {
  501. pr_err("Invalid resolution index: %d\n", index);
  502. ret = -ENODEV;
  503. goto out;
  504. }
  505. screen_width =
  506. msg->resolution_resp.supported_resolution[index].width;
  507. screen_height =
  508. msg->resolution_resp.supported_resolution[index].height;
  509. out:
  510. return ret;
  511. }
  512. /* Connect to VSP (Virtual Service Provider) on host */
  513. static int synthvid_connect_vsp(struct hv_device *hdev)
  514. {
  515. struct fb_info *info = hv_get_drvdata(hdev);
  516. struct hvfb_par *par = info->par;
  517. int ret;
  518. ret = vmbus_open(hdev->channel, RING_BUFSIZE, RING_BUFSIZE,
  519. NULL, 0, synthvid_receive, hdev);
  520. if (ret) {
  521. pr_err("Unable to open vmbus channel\n");
  522. return ret;
  523. }
  524. /* Negotiate the protocol version with host */
  525. switch (vmbus_proto_version) {
  526. case VERSION_WIN10:
  527. case VERSION_WIN10_V5:
  528. ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
  529. if (!ret)
  530. break;
  531. fallthrough;
  532. case VERSION_WIN8:
  533. case VERSION_WIN8_1:
  534. ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8);
  535. break;
  536. default:
  537. ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
  538. break;
  539. }
  540. if (ret) {
  541. pr_err("Synthetic video device version not accepted\n");
  542. goto error;
  543. }
  544. screen_depth = SYNTHVID_DEPTH_WIN8;
  545. if (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10)) {
  546. ret = synthvid_get_supported_resolution(hdev);
  547. if (ret)
  548. pr_info("Failed to get supported resolution from host, use default\n");
  549. }
  550. screen_fb_size = hdev->channel->offermsg.offer.
  551. mmio_megabytes * 1024 * 1024;
  552. return 0;
  553. error:
  554. vmbus_close(hdev->channel);
  555. return ret;
  556. }
  557. /* Send VRAM and Situation messages to the host */
  558. static int synthvid_send_config(struct hv_device *hdev)
  559. {
  560. struct fb_info *info = hv_get_drvdata(hdev);
  561. struct hvfb_par *par = info->par;
  562. struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
  563. int ret = 0;
  564. unsigned long t;
  565. /* Send VRAM location */
  566. memset(msg, 0, sizeof(struct synthvid_msg));
  567. msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION;
  568. msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  569. sizeof(struct synthvid_vram_location);
  570. msg->vram.user_ctx = msg->vram.vram_gpa = par->mmio_pp;
  571. msg->vram.is_vram_gpa_specified = 1;
  572. synthvid_send(hdev, msg);
  573. t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
  574. if (!t) {
  575. pr_err("Time out on waiting vram location ack\n");
  576. ret = -ETIMEDOUT;
  577. goto out;
  578. }
  579. if (msg->vram_ack.user_ctx != par->mmio_pp) {
  580. pr_err("Unable to set VRAM location\n");
  581. ret = -ENODEV;
  582. goto out;
  583. }
  584. /* Send pointer and situation update */
  585. synthvid_send_ptr(hdev);
  586. synthvid_send_situ(hdev);
  587. out:
  588. return ret;
  589. }
  590. /*
  591. * Delayed work callback:
  592. * It is scheduled to call whenever update request is received and it has
  593. * not been called in last HVFB_ONDEMAND_THROTTLE time interval.
  594. */
  595. static void hvfb_update_work(struct work_struct *w)
  596. {
  597. struct hvfb_par *par = container_of(w, struct hvfb_par, dwork.work);
  598. struct fb_info *info = par->info;
  599. unsigned long flags;
  600. int x1, x2, y1, y2;
  601. int j;
  602. spin_lock_irqsave(&par->delayed_refresh_lock, flags);
  603. /* Reset the request flag */
  604. par->delayed_refresh = false;
  605. /* Store the dirty rectangle to local variables */
  606. x1 = par->x1;
  607. x2 = par->x2;
  608. y1 = par->y1;
  609. y2 = par->y2;
  610. /* Clear dirty rectangle */
  611. par->x1 = par->y1 = INT_MAX;
  612. par->x2 = par->y2 = 0;
  613. spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
  614. if (x1 > info->var.xres || x2 > info->var.xres ||
  615. y1 > info->var.yres || y2 > info->var.yres || x2 <= x1)
  616. return;
  617. /* Copy the dirty rectangle to frame buffer memory */
  618. if (par->need_docopy)
  619. for (j = y1; j < y2; j++)
  620. hvfb_docopy(par,
  621. j * info->fix.line_length +
  622. (x1 * screen_depth / 8),
  623. (x2 - x1) * screen_depth / 8);
  624. /* Refresh */
  625. if (par->fb_ready && par->update)
  626. synthvid_update(info, x1, y1, x2, y2);
  627. }
  628. /*
  629. * Control the on-demand refresh frequency. It schedules a delayed
  630. * screen update if it has not yet.
  631. */
  632. static void hvfb_ondemand_refresh_throttle(struct hvfb_par *par,
  633. int x1, int y1, int w, int h)
  634. {
  635. unsigned long flags;
  636. int x2 = x1 + w;
  637. int y2 = y1 + h;
  638. spin_lock_irqsave(&par->delayed_refresh_lock, flags);
  639. /* Merge dirty rectangle */
  640. par->x1 = min_t(int, par->x1, x1);
  641. par->y1 = min_t(int, par->y1, y1);
  642. par->x2 = max_t(int, par->x2, x2);
  643. par->y2 = max_t(int, par->y2, y2);
  644. /* Schedule a delayed screen update if not yet */
  645. if (par->delayed_refresh == false) {
  646. schedule_delayed_work(&par->dwork,
  647. HVFB_ONDEMAND_THROTTLE);
  648. par->delayed_refresh = true;
  649. }
  650. spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
  651. }
  652. static int hvfb_on_panic(struct notifier_block *nb,
  653. unsigned long e, void *p)
  654. {
  655. struct hvfb_par *par;
  656. struct fb_info *info;
  657. par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
  658. par->synchronous_fb = true;
  659. info = par->info;
  660. if (par->need_docopy)
  661. hvfb_docopy(par, 0, dio_fb_size);
  662. synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
  663. return NOTIFY_DONE;
  664. }
  665. /* Framebuffer operation handlers */
  666. static int hvfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
  667. {
  668. if (var->xres < HVFB_WIDTH_MIN || var->yres < HVFB_HEIGHT_MIN ||
  669. var->xres > screen_width || var->yres > screen_height ||
  670. var->bits_per_pixel != screen_depth)
  671. return -EINVAL;
  672. var->xres_virtual = var->xres;
  673. var->yres_virtual = var->yres;
  674. return 0;
  675. }
  676. static int hvfb_set_par(struct fb_info *info)
  677. {
  678. struct hv_device *hdev = device_to_hv_device(info->device);
  679. return synthvid_send_situ(hdev);
  680. }
  681. static inline u32 chan_to_field(u32 chan, struct fb_bitfield *bf)
  682. {
  683. return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset;
  684. }
  685. static int hvfb_setcolreg(unsigned regno, unsigned red, unsigned green,
  686. unsigned blue, unsigned transp, struct fb_info *info)
  687. {
  688. u32 *pal = info->pseudo_palette;
  689. if (regno > 15)
  690. return -EINVAL;
  691. pal[regno] = chan_to_field(red, &info->var.red)
  692. | chan_to_field(green, &info->var.green)
  693. | chan_to_field(blue, &info->var.blue)
  694. | chan_to_field(transp, &info->var.transp);
  695. return 0;
  696. }
  697. static int hvfb_blank(int blank, struct fb_info *info)
  698. {
  699. return 1; /* get fb_blank to set the colormap to all black */
  700. }
  701. static void hvfb_cfb_fillrect(struct fb_info *p,
  702. const struct fb_fillrect *rect)
  703. {
  704. struct hvfb_par *par = p->par;
  705. cfb_fillrect(p, rect);
  706. if (par->synchronous_fb)
  707. synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
  708. else
  709. hvfb_ondemand_refresh_throttle(par, rect->dx, rect->dy,
  710. rect->width, rect->height);
  711. }
  712. static void hvfb_cfb_copyarea(struct fb_info *p,
  713. const struct fb_copyarea *area)
  714. {
  715. struct hvfb_par *par = p->par;
  716. cfb_copyarea(p, area);
  717. if (par->synchronous_fb)
  718. synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
  719. else
  720. hvfb_ondemand_refresh_throttle(par, area->dx, area->dy,
  721. area->width, area->height);
  722. }
  723. static void hvfb_cfb_imageblit(struct fb_info *p,
  724. const struct fb_image *image)
  725. {
  726. struct hvfb_par *par = p->par;
  727. cfb_imageblit(p, image);
  728. if (par->synchronous_fb)
  729. synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
  730. else
  731. hvfb_ondemand_refresh_throttle(par, image->dx, image->dy,
  732. image->width, image->height);
  733. }
  734. static const struct fb_ops hvfb_ops = {
  735. .owner = THIS_MODULE,
  736. .fb_check_var = hvfb_check_var,
  737. .fb_set_par = hvfb_set_par,
  738. .fb_setcolreg = hvfb_setcolreg,
  739. .fb_fillrect = hvfb_cfb_fillrect,
  740. .fb_copyarea = hvfb_cfb_copyarea,
  741. .fb_imageblit = hvfb_cfb_imageblit,
  742. .fb_blank = hvfb_blank,
  743. .fb_mmap = fb_deferred_io_mmap,
  744. };
  745. /* Get options from kernel paramenter "video=" */
  746. static void hvfb_get_option(struct fb_info *info)
  747. {
  748. struct hvfb_par *par = info->par;
  749. char *opt = NULL, *p;
  750. uint x = 0, y = 0;
  751. if (fb_get_options(KBUILD_MODNAME, &opt) || !opt || !*opt)
  752. return;
  753. p = strsep(&opt, "x");
  754. if (!*p || kstrtouint(p, 0, &x) ||
  755. !opt || !*opt || kstrtouint(opt, 0, &y)) {
  756. pr_err("Screen option is invalid: skipped\n");
  757. return;
  758. }
  759. if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
  760. (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
  761. (x * y * screen_depth / 8 > screen_fb_size)) ||
  762. (par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
  763. x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8)) {
  764. pr_err("Screen resolution option is out of range: skipped\n");
  765. return;
  766. }
  767. screen_width = x;
  768. screen_height = y;
  769. return;
  770. }
  771. /*
  772. * Allocate enough contiguous physical memory.
  773. * Return physical address if succeeded or -1 if failed.
  774. */
  775. static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
  776. unsigned int request_size)
  777. {
  778. struct page *page = NULL;
  779. dma_addr_t dma_handle;
  780. void *vmem;
  781. phys_addr_t paddr = 0;
  782. unsigned int order = get_order(request_size);
  783. if (request_size == 0)
  784. return -1;
  785. if (order < MAX_ORDER) {
  786. /* Call alloc_pages if the size is less than 2^MAX_ORDER */
  787. page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
  788. if (!page)
  789. return -1;
  790. paddr = (page_to_pfn(page) << PAGE_SHIFT);
  791. } else {
  792. /* Allocate from CMA */
  793. hdev->device.coherent_dma_mask = DMA_BIT_MASK(64);
  794. vmem = dma_alloc_coherent(&hdev->device,
  795. round_up(request_size, PAGE_SIZE),
  796. &dma_handle,
  797. GFP_KERNEL | __GFP_NOWARN);
  798. if (!vmem)
  799. return -1;
  800. paddr = virt_to_phys(vmem);
  801. }
  802. return paddr;
  803. }
  804. /* Release contiguous physical memory */
  805. static void hvfb_release_phymem(struct hv_device *hdev,
  806. phys_addr_t paddr, unsigned int size)
  807. {
  808. unsigned int order = get_order(size);
  809. if (order < MAX_ORDER)
  810. __free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
  811. else
  812. dma_free_coherent(&hdev->device,
  813. round_up(size, PAGE_SIZE),
  814. phys_to_virt(paddr),
  815. paddr);
  816. }
  817. /* Get framebuffer memory from Hyper-V video pci space */
  818. static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
  819. {
  820. struct hvfb_par *par = info->par;
  821. struct pci_dev *pdev = NULL;
  822. void __iomem *fb_virt;
  823. int gen2vm = efi_enabled(EFI_BOOT);
  824. phys_addr_t paddr;
  825. int ret;
  826. info->apertures = alloc_apertures(1);
  827. if (!info->apertures)
  828. return -ENOMEM;
  829. if (!gen2vm) {
  830. pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
  831. PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
  832. if (!pdev) {
  833. pr_err("Unable to find PCI Hyper-V video\n");
  834. return -ENODEV;
  835. }
  836. info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
  837. info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
  838. /*
  839. * For Gen 1 VM, we can directly use the contiguous memory
  840. * from VM. If we succeed, deferred IO happens directly
  841. * on this allocated framebuffer memory, avoiding extra
  842. * memory copy.
  843. */
  844. paddr = hvfb_get_phymem(hdev, screen_fb_size);
  845. if (paddr != (phys_addr_t) -1) {
  846. par->mmio_pp = paddr;
  847. par->mmio_vp = par->dio_vp = __va(paddr);
  848. info->fix.smem_start = paddr;
  849. info->fix.smem_len = screen_fb_size;
  850. info->screen_base = par->mmio_vp;
  851. info->screen_size = screen_fb_size;
  852. par->need_docopy = false;
  853. goto getmem_done;
  854. }
  855. pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
  856. } else {
  857. info->apertures->ranges[0].base = screen_info.lfb_base;
  858. info->apertures->ranges[0].size = screen_info.lfb_size;
  859. }
  860. /*
  861. * Cannot use the contiguous physical memory.
  862. * Allocate mmio space for framebuffer.
  863. */
  864. dio_fb_size =
  865. screen_width * screen_height * screen_depth / 8;
  866. ret = vmbus_allocate_mmio(&par->mem, hdev, 0, -1,
  867. screen_fb_size, 0x100000, true);
  868. if (ret != 0) {
  869. pr_err("Unable to allocate framebuffer memory\n");
  870. goto err1;
  871. }
  872. /*
  873. * Map the VRAM cacheable for performance. This is also required for
  874. * VM Connect to display properly for ARM64 Linux VM, as the host also
  875. * maps the VRAM cacheable.
  876. */
  877. fb_virt = ioremap_cache(par->mem->start, screen_fb_size);
  878. if (!fb_virt)
  879. goto err2;
  880. /* Allocate memory for deferred IO */
  881. par->dio_vp = vzalloc(round_up(dio_fb_size, PAGE_SIZE));
  882. if (par->dio_vp == NULL)
  883. goto err3;
  884. /* Physical address of FB device */
  885. par->mmio_pp = par->mem->start;
  886. /* Virtual address of FB device */
  887. par->mmio_vp = (unsigned char *) fb_virt;
  888. info->fix.smem_start = par->mem->start;
  889. info->fix.smem_len = dio_fb_size;
  890. info->screen_base = par->dio_vp;
  891. info->screen_size = dio_fb_size;
  892. getmem_done:
  893. remove_conflicting_framebuffers(info->apertures,
  894. KBUILD_MODNAME, false);
  895. if (gen2vm) {
  896. /* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
  897. screen_info.lfb_size = 0;
  898. screen_info.lfb_base = 0;
  899. screen_info.orig_video_isVGA = 0;
  900. } else {
  901. pci_dev_put(pdev);
  902. }
  903. return 0;
  904. err3:
  905. iounmap(fb_virt);
  906. err2:
  907. vmbus_free_mmio(par->mem->start, screen_fb_size);
  908. par->mem = NULL;
  909. err1:
  910. if (!gen2vm)
  911. pci_dev_put(pdev);
  912. return -ENOMEM;
  913. }
  914. /* Release the framebuffer */
  915. static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
  916. {
  917. struct hvfb_par *par = info->par;
  918. if (par->need_docopy) {
  919. vfree(par->dio_vp);
  920. iounmap(info->screen_base);
  921. vmbus_free_mmio(par->mem->start, screen_fb_size);
  922. } else {
  923. hvfb_release_phymem(hdev, info->fix.smem_start,
  924. screen_fb_size);
  925. }
  926. par->mem = NULL;
  927. }
  928. static int hvfb_probe(struct hv_device *hdev,
  929. const struct hv_vmbus_device_id *dev_id)
  930. {
  931. struct fb_info *info;
  932. struct hvfb_par *par;
  933. int ret;
  934. info = framebuffer_alloc(sizeof(struct hvfb_par), &hdev->device);
  935. if (!info)
  936. return -ENOMEM;
  937. par = info->par;
  938. par->info = info;
  939. par->fb_ready = false;
  940. par->need_docopy = true;
  941. init_completion(&par->wait);
  942. INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);
  943. par->delayed_refresh = false;
  944. spin_lock_init(&par->delayed_refresh_lock);
  945. par->x1 = par->y1 = INT_MAX;
  946. par->x2 = par->y2 = 0;
  947. /* Connect to VSP */
  948. hv_set_drvdata(hdev, info);
  949. ret = synthvid_connect_vsp(hdev);
  950. if (ret) {
  951. pr_err("Unable to connect to VSP\n");
  952. goto error1;
  953. }
  954. hvfb_get_option(info);
  955. pr_info("Screen resolution: %dx%d, Color depth: %d, Frame buffer size: %d\n",
  956. screen_width, screen_height, screen_depth, screen_fb_size);
  957. ret = hvfb_getmem(hdev, info);
  958. if (ret) {
  959. pr_err("No memory for framebuffer\n");
  960. goto error2;
  961. }
  962. /* Set up fb_info */
  963. info->flags = FBINFO_DEFAULT;
  964. info->var.xres_virtual = info->var.xres = screen_width;
  965. info->var.yres_virtual = info->var.yres = screen_height;
  966. info->var.bits_per_pixel = screen_depth;
  967. if (info->var.bits_per_pixel == 16) {
  968. info->var.red = (struct fb_bitfield){11, 5, 0};
  969. info->var.green = (struct fb_bitfield){5, 6, 0};
  970. info->var.blue = (struct fb_bitfield){0, 5, 0};
  971. info->var.transp = (struct fb_bitfield){0, 0, 0};
  972. } else {
  973. info->var.red = (struct fb_bitfield){16, 8, 0};
  974. info->var.green = (struct fb_bitfield){8, 8, 0};
  975. info->var.blue = (struct fb_bitfield){0, 8, 0};
  976. info->var.transp = (struct fb_bitfield){24, 8, 0};
  977. }
  978. info->var.activate = FB_ACTIVATE_NOW;
  979. info->var.height = -1;
  980. info->var.width = -1;
  981. info->var.vmode = FB_VMODE_NONINTERLACED;
  982. strcpy(info->fix.id, KBUILD_MODNAME);
  983. info->fix.type = FB_TYPE_PACKED_PIXELS;
  984. info->fix.visual = FB_VISUAL_TRUECOLOR;
  985. info->fix.line_length = screen_width * screen_depth / 8;
  986. info->fix.accel = FB_ACCEL_NONE;
  987. info->fbops = &hvfb_ops;
  988. info->pseudo_palette = par->pseudo_palette;
  989. /* Initialize deferred IO */
  990. info->fbdefio = &synthvid_defio;
  991. fb_deferred_io_init(info);
  992. /* Send config to host */
  993. ret = synthvid_send_config(hdev);
  994. if (ret)
  995. goto error;
  996. ret = register_framebuffer(info);
  997. if (ret) {
  998. pr_err("Unable to register framebuffer\n");
  999. goto error;
  1000. }
  1001. par->fb_ready = true;
  1002. par->synchronous_fb = false;
  1003. par->hvfb_panic_nb.notifier_call = hvfb_on_panic;
  1004. atomic_notifier_chain_register(&panic_notifier_list,
  1005. &par->hvfb_panic_nb);
  1006. return 0;
  1007. error:
  1008. fb_deferred_io_cleanup(info);
  1009. hvfb_putmem(hdev, info);
  1010. error2:
  1011. vmbus_close(hdev->channel);
  1012. error1:
  1013. cancel_delayed_work_sync(&par->dwork);
  1014. hv_set_drvdata(hdev, NULL);
  1015. framebuffer_release(info);
  1016. return ret;
  1017. }
  1018. static int hvfb_remove(struct hv_device *hdev)
  1019. {
  1020. struct fb_info *info = hv_get_drvdata(hdev);
  1021. struct hvfb_par *par = info->par;
  1022. atomic_notifier_chain_unregister(&panic_notifier_list,
  1023. &par->hvfb_panic_nb);
  1024. par->update = false;
  1025. par->fb_ready = false;
  1026. fb_deferred_io_cleanup(info);
  1027. unregister_framebuffer(info);
  1028. cancel_delayed_work_sync(&par->dwork);
  1029. vmbus_close(hdev->channel);
  1030. hv_set_drvdata(hdev, NULL);
  1031. hvfb_putmem(hdev, info);
  1032. framebuffer_release(info);
  1033. return 0;
  1034. }
  1035. static int hvfb_suspend(struct hv_device *hdev)
  1036. {
  1037. struct fb_info *info = hv_get_drvdata(hdev);
  1038. struct hvfb_par *par = info->par;
  1039. console_lock();
  1040. /* 1 means do suspend */
  1041. fb_set_suspend(info, 1);
  1042. cancel_delayed_work_sync(&par->dwork);
  1043. cancel_delayed_work_sync(&info->deferred_work);
  1044. par->update_saved = par->update;
  1045. par->update = false;
  1046. par->fb_ready = false;
  1047. vmbus_close(hdev->channel);
  1048. console_unlock();
  1049. return 0;
  1050. }
  1051. static int hvfb_resume(struct hv_device *hdev)
  1052. {
  1053. struct fb_info *info = hv_get_drvdata(hdev);
  1054. struct hvfb_par *par = info->par;
  1055. int ret;
  1056. console_lock();
  1057. ret = synthvid_connect_vsp(hdev);
  1058. if (ret != 0)
  1059. goto out;
  1060. ret = synthvid_send_config(hdev);
  1061. if (ret != 0) {
  1062. vmbus_close(hdev->channel);
  1063. goto out;
  1064. }
  1065. par->fb_ready = true;
  1066. par->update = par->update_saved;
  1067. schedule_delayed_work(&info->deferred_work, info->fbdefio->delay);
  1068. schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
  1069. /* 0 means do resume */
  1070. fb_set_suspend(info, 0);
  1071. out:
  1072. console_unlock();
  1073. return ret;
  1074. }
  1075. static const struct pci_device_id pci_stub_id_table[] = {
  1076. {
  1077. .vendor = PCI_VENDOR_ID_MICROSOFT,
  1078. .device = PCI_DEVICE_ID_HYPERV_VIDEO,
  1079. },
  1080. { /* end of list */ }
  1081. };
  1082. static const struct hv_vmbus_device_id id_table[] = {
  1083. /* Synthetic Video Device GUID */
  1084. {HV_SYNTHVID_GUID},
  1085. {}
  1086. };
  1087. MODULE_DEVICE_TABLE(pci, pci_stub_id_table);
  1088. MODULE_DEVICE_TABLE(vmbus, id_table);
  1089. static struct hv_driver hvfb_drv = {
  1090. .name = KBUILD_MODNAME,
  1091. .id_table = id_table,
  1092. .probe = hvfb_probe,
  1093. .remove = hvfb_remove,
  1094. .suspend = hvfb_suspend,
  1095. .resume = hvfb_resume,
  1096. .driver = {
  1097. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1098. },
  1099. };
  1100. static int hvfb_pci_stub_probe(struct pci_dev *pdev,
  1101. const struct pci_device_id *ent)
  1102. {
  1103. return 0;
  1104. }
  1105. static void hvfb_pci_stub_remove(struct pci_dev *pdev)
  1106. {
  1107. }
  1108. static struct pci_driver hvfb_pci_stub_driver = {
  1109. .name = KBUILD_MODNAME,
  1110. .id_table = pci_stub_id_table,
  1111. .probe = hvfb_pci_stub_probe,
  1112. .remove = hvfb_pci_stub_remove,
  1113. .driver = {
  1114. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1115. }
  1116. };
  1117. static int __init hvfb_drv_init(void)
  1118. {
  1119. int ret;
  1120. ret = vmbus_driver_register(&hvfb_drv);
  1121. if (ret != 0)
  1122. return ret;
  1123. ret = pci_register_driver(&hvfb_pci_stub_driver);
  1124. if (ret != 0) {
  1125. vmbus_driver_unregister(&hvfb_drv);
  1126. return ret;
  1127. }
  1128. return 0;
  1129. }
  1130. static void __exit hvfb_drv_exit(void)
  1131. {
  1132. pci_unregister_driver(&hvfb_pci_stub_driver);
  1133. vmbus_driver_unregister(&hvfb_drv);
  1134. }
  1135. module_init(hvfb_drv_init);
  1136. module_exit(hvfb_drv_exit);
  1137. MODULE_LICENSE("GPL");
  1138. MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic Video Frame Buffer Driver");