PageRenderTime 127ms CodeModel.GetById 27ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/video/fbdev/hyperv_fb.c

https://github.com/kvaneesh/linux
C | 1435 lines | 1046 code | 259 blank | 130 comment | 129 complexity | 316c094d7d81eccb27c0140cc06c9b47 MD5 | raw file
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2012, Microsoft Corporation.
  4. *
  5. * Author:
  6. * Haiyang Zhang <haiyangz@microsoft.com>
  7. */
  8. /*
  9. * Hyper-V Synthetic Video Frame Buffer Driver
  10. *
  11. * This is the driver for the Hyper-V Synthetic Video, which supports
  12. * screen resolution up to Full HD 1920x1080 with 32 bit color on Windows
  13. * Server 2012, and 1600x1200 with 16 bit color on Windows Server 2008 R2
  14. * or earlier.
  15. *
  16. * It also solves the double mouse cursor issue of the emulated video mode.
  17. *
  18. * The default screen resolution is 1152x864, which may be changed by a
  19. * kernel parameter:
  20. * video=hyperv_fb:<width>x<height>
  21. * For example: video=hyperv_fb:1280x1024
  22. *
  23. * Portrait orientation is also supported:
  24. * For example: video=hyperv_fb:864x1152
  25. *
  26. * When a Windows 10 RS5+ host is used, the virtual machine screen
  27. * resolution is obtained from the host. The "video=hyperv_fb" option is
  28. * not needed, but still can be used to overwrite what the host specifies.
  29. * The VM resolution on the host could be set by executing the powershell
  30. * "set-vmvideo" command. For example
  31. * set-vmvideo -vmname name -horizontalresolution:1920 \
  32. * -verticalresolution:1200 -resolutiontype single
  33. *
  34. * Gen 1 VMs also support direct using VM's physical memory for framebuffer.
  35. * It could improve the efficiency and performance for framebuffer and VM.
  36. * This requires to allocate contiguous physical memory from Linux kernel's
  37. * CMA memory allocator. To enable this, supply a kernel parameter to give
  38. * enough memory space to CMA allocator for framebuffer. For example:
  39. * cma=130m
  40. * This gives 130MB memory to CMA allocator that can be allocated to
  41. * framebuffer. For reference, 8K resolution (7680x4320) takes about
  42. * 127MB memory.
  43. */
  44. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  45. #include <linux/module.h>
  46. #include <linux/kernel.h>
  47. #include <linux/vmalloc.h>
  48. #include <linux/init.h>
  49. #include <linux/completion.h>
  50. #include <linux/fb.h>
  51. #include <linux/pci.h>
  52. #include <linux/panic_notifier.h>
  53. #include <linux/efi.h>
  54. #include <linux/console.h>
  55. #include <linux/hyperv.h>
  56. /* Hyper-V Synthetic Video Protocol definitions and structures */
  57. #define MAX_VMBUS_PKT_SIZE 0x4000
  58. #define SYNTHVID_VERSION(major, minor) ((minor) << 16 | (major))
  59. #define SYNTHVID_VERSION_WIN7 SYNTHVID_VERSION(3, 0)
  60. #define SYNTHVID_VERSION_WIN8 SYNTHVID_VERSION(3, 2)
  61. #define SYNTHVID_VERSION_WIN10 SYNTHVID_VERSION(3, 5)
  62. #define SYNTHVID_VER_GET_MAJOR(ver) (ver & 0x0000ffff)
  63. #define SYNTHVID_VER_GET_MINOR(ver) ((ver & 0xffff0000) >> 16)
  64. #define SYNTHVID_DEPTH_WIN7 16
  65. #define SYNTHVID_DEPTH_WIN8 32
  66. #define SYNTHVID_FB_SIZE_WIN7 (4 * 1024 * 1024)
  67. #define SYNTHVID_WIDTH_MAX_WIN7 1600
  68. #define SYNTHVID_HEIGHT_MAX_WIN7 1200
  69. #define SYNTHVID_FB_SIZE_WIN8 (8 * 1024 * 1024)
  70. #define PCI_VENDOR_ID_MICROSOFT 0x1414
  71. #define PCI_DEVICE_ID_HYPERV_VIDEO 0x5353
  72. enum pipe_msg_type {
  73. PIPE_MSG_INVALID,
  74. PIPE_MSG_DATA,
  75. PIPE_MSG_MAX
  76. };
  77. struct pipe_msg_hdr {
  78. u32 type;
  79. u32 size; /* size of message after this field */
  80. } __packed;
  81. enum synthvid_msg_type {
  82. SYNTHVID_ERROR = 0,
  83. SYNTHVID_VERSION_REQUEST = 1,
  84. SYNTHVID_VERSION_RESPONSE = 2,
  85. SYNTHVID_VRAM_LOCATION = 3,
  86. SYNTHVID_VRAM_LOCATION_ACK = 4,
  87. SYNTHVID_SITUATION_UPDATE = 5,
  88. SYNTHVID_SITUATION_UPDATE_ACK = 6,
  89. SYNTHVID_POINTER_POSITION = 7,
  90. SYNTHVID_POINTER_SHAPE = 8,
  91. SYNTHVID_FEATURE_CHANGE = 9,
  92. SYNTHVID_DIRT = 10,
  93. SYNTHVID_RESOLUTION_REQUEST = 13,
  94. SYNTHVID_RESOLUTION_RESPONSE = 14,
  95. SYNTHVID_MAX = 15
  96. };
  97. #define SYNTHVID_EDID_BLOCK_SIZE 128
  98. #define SYNTHVID_MAX_RESOLUTION_COUNT 64
  99. struct hvd_screen_info {
  100. u16 width;
  101. u16 height;
  102. } __packed;
  103. struct synthvid_msg_hdr {
  104. u32 type;
  105. u32 size; /* size of this header + payload after this field*/
  106. } __packed;
  107. struct synthvid_version_req {
  108. u32 version;
  109. } __packed;
  110. struct synthvid_version_resp {
  111. u32 version;
  112. u8 is_accepted;
  113. u8 max_video_outputs;
  114. } __packed;
  115. struct synthvid_supported_resolution_req {
  116. u8 maximum_resolution_count;
  117. } __packed;
  118. struct synthvid_supported_resolution_resp {
  119. u8 edid_block[SYNTHVID_EDID_BLOCK_SIZE];
  120. u8 resolution_count;
  121. u8 default_resolution_index;
  122. u8 is_standard;
  123. struct hvd_screen_info
  124. supported_resolution[SYNTHVID_MAX_RESOLUTION_COUNT];
  125. } __packed;
  126. struct synthvid_vram_location {
  127. u64 user_ctx;
  128. u8 is_vram_gpa_specified;
  129. u64 vram_gpa;
  130. } __packed;
  131. struct synthvid_vram_location_ack {
  132. u64 user_ctx;
  133. } __packed;
  134. struct video_output_situation {
  135. u8 active;
  136. u32 vram_offset;
  137. u8 depth_bits;
  138. u32 width_pixels;
  139. u32 height_pixels;
  140. u32 pitch_bytes;
  141. } __packed;
  142. struct synthvid_situation_update {
  143. u64 user_ctx;
  144. u8 video_output_count;
  145. struct video_output_situation video_output[1];
  146. } __packed;
  147. struct synthvid_situation_update_ack {
  148. u64 user_ctx;
  149. } __packed;
  150. struct synthvid_pointer_position {
  151. u8 is_visible;
  152. u8 video_output;
  153. s32 image_x;
  154. s32 image_y;
  155. } __packed;
  156. #define CURSOR_MAX_X 96
  157. #define CURSOR_MAX_Y 96
  158. #define CURSOR_ARGB_PIXEL_SIZE 4
  159. #define CURSOR_MAX_SIZE (CURSOR_MAX_X * CURSOR_MAX_Y * CURSOR_ARGB_PIXEL_SIZE)
  160. #define CURSOR_COMPLETE (-1)
  161. struct synthvid_pointer_shape {
  162. u8 part_idx;
  163. u8 is_argb;
  164. u32 width; /* CURSOR_MAX_X at most */
  165. u32 height; /* CURSOR_MAX_Y at most */
  166. u32 hot_x; /* hotspot relative to upper-left of pointer image */
  167. u32 hot_y;
  168. u8 data[4];
  169. } __packed;
  170. struct synthvid_feature_change {
  171. u8 is_dirt_needed;
  172. u8 is_ptr_pos_needed;
  173. u8 is_ptr_shape_needed;
  174. u8 is_situ_needed;
  175. } __packed;
  176. struct rect {
  177. s32 x1, y1; /* top left corner */
  178. s32 x2, y2; /* bottom right corner, exclusive */
  179. } __packed;
  180. struct synthvid_dirt {
  181. u8 video_output;
  182. u8 dirt_count;
  183. struct rect rect[1];
  184. } __packed;
  185. struct synthvid_msg {
  186. struct pipe_msg_hdr pipe_hdr;
  187. struct synthvid_msg_hdr vid_hdr;
  188. union {
  189. struct synthvid_version_req ver_req;
  190. struct synthvid_version_resp ver_resp;
  191. struct synthvid_vram_location vram;
  192. struct synthvid_vram_location_ack vram_ack;
  193. struct synthvid_situation_update situ;
  194. struct synthvid_situation_update_ack situ_ack;
  195. struct synthvid_pointer_position ptr_pos;
  196. struct synthvid_pointer_shape ptr_shape;
  197. struct synthvid_feature_change feature_chg;
  198. struct synthvid_dirt dirt;
  199. struct synthvid_supported_resolution_req resolution_req;
  200. struct synthvid_supported_resolution_resp resolution_resp;
  201. };
  202. } __packed;
  203. /* FB driver definitions and structures */
  204. #define HVFB_WIDTH 1152 /* default screen width */
  205. #define HVFB_HEIGHT 864 /* default screen height */
  206. #define HVFB_WIDTH_MIN 640
  207. #define HVFB_HEIGHT_MIN 480
  208. #define RING_BUFSIZE (256 * 1024)
  209. #define VSP_TIMEOUT (10 * HZ)
  210. #define HVFB_UPDATE_DELAY (HZ / 20)
  211. #define HVFB_ONDEMAND_THROTTLE (HZ / 20)
  212. struct hvfb_par {
  213. struct fb_info *info;
  214. struct resource *mem;
  215. bool fb_ready; /* fb device is ready */
  216. struct completion wait;
  217. u32 synthvid_version;
  218. struct delayed_work dwork;
  219. bool update;
  220. bool update_saved; /* The value of 'update' before hibernation */
  221. u32 pseudo_palette[16];
  222. u8 init_buf[MAX_VMBUS_PKT_SIZE];
  223. u8 recv_buf[MAX_VMBUS_PKT_SIZE];
  224. /* If true, the VSC notifies the VSP on every framebuffer change */
  225. bool synchronous_fb;
  226. /* If true, need to copy from deferred IO mem to framebuffer mem */
  227. bool need_docopy;
  228. struct notifier_block hvfb_panic_nb;
  229. /* Memory for deferred IO and frame buffer itself */
  230. unsigned char *dio_vp;
  231. unsigned char *mmio_vp;
  232. phys_addr_t mmio_pp;
  233. /* Dirty rectangle, protected by delayed_refresh_lock */
  234. int x1, y1, x2, y2;
  235. bool delayed_refresh;
  236. spinlock_t delayed_refresh_lock;
  237. };
  238. static uint screen_width = HVFB_WIDTH;
  239. static uint screen_height = HVFB_HEIGHT;
  240. static uint screen_width_max = HVFB_WIDTH;
  241. static uint screen_height_max = HVFB_HEIGHT;
  242. static uint screen_depth;
  243. static uint screen_fb_size;
  244. static uint dio_fb_size; /* FB size for deferred IO */
  245. /* Send message to Hyper-V host */
  246. static inline int synthvid_send(struct hv_device *hdev,
  247. struct synthvid_msg *msg)
  248. {
  249. static atomic64_t request_id = ATOMIC64_INIT(0);
  250. int ret;
  251. msg->pipe_hdr.type = PIPE_MSG_DATA;
  252. msg->pipe_hdr.size = msg->vid_hdr.size;
  253. ret = vmbus_sendpacket(hdev->channel, msg,
  254. msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
  255. atomic64_inc_return(&request_id),
  256. VM_PKT_DATA_INBAND, 0);
  257. if (ret)
  258. pr_err_ratelimited("Unable to send packet via vmbus; error %d\n", ret);
  259. return ret;
  260. }
  261. /* Send screen resolution info to host */
  262. static int synthvid_send_situ(struct hv_device *hdev)
  263. {
  264. struct fb_info *info = hv_get_drvdata(hdev);
  265. struct synthvid_msg msg;
  266. if (!info)
  267. return -ENODEV;
  268. memset(&msg, 0, sizeof(struct synthvid_msg));
  269. msg.vid_hdr.type = SYNTHVID_SITUATION_UPDATE;
  270. msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  271. sizeof(struct synthvid_situation_update);
  272. msg.situ.user_ctx = 0;
  273. msg.situ.video_output_count = 1;
  274. msg.situ.video_output[0].active = 1;
  275. msg.situ.video_output[0].vram_offset = 0;
  276. msg.situ.video_output[0].depth_bits = info->var.bits_per_pixel;
  277. msg.situ.video_output[0].width_pixels = info->var.xres;
  278. msg.situ.video_output[0].height_pixels = info->var.yres;
  279. msg.situ.video_output[0].pitch_bytes = info->fix.line_length;
  280. synthvid_send(hdev, &msg);
  281. return 0;
  282. }
  283. /* Send mouse pointer info to host */
  284. static int synthvid_send_ptr(struct hv_device *hdev)
  285. {
  286. struct synthvid_msg msg;
  287. memset(&msg, 0, sizeof(struct synthvid_msg));
  288. msg.vid_hdr.type = SYNTHVID_POINTER_POSITION;
  289. msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  290. sizeof(struct synthvid_pointer_position);
  291. msg.ptr_pos.is_visible = 1;
  292. msg.ptr_pos.video_output = 0;
  293. msg.ptr_pos.image_x = 0;
  294. msg.ptr_pos.image_y = 0;
  295. synthvid_send(hdev, &msg);
  296. memset(&msg, 0, sizeof(struct synthvid_msg));
  297. msg.vid_hdr.type = SYNTHVID_POINTER_SHAPE;
  298. msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  299. sizeof(struct synthvid_pointer_shape);
  300. msg.ptr_shape.part_idx = CURSOR_COMPLETE;
  301. msg.ptr_shape.is_argb = 1;
  302. msg.ptr_shape.width = 1;
  303. msg.ptr_shape.height = 1;
  304. msg.ptr_shape.hot_x = 0;
  305. msg.ptr_shape.hot_y = 0;
  306. msg.ptr_shape.data[0] = 0;
  307. msg.ptr_shape.data[1] = 1;
  308. msg.ptr_shape.data[2] = 1;
  309. msg.ptr_shape.data[3] = 1;
  310. synthvid_send(hdev, &msg);
  311. return 0;
  312. }
  313. /* Send updated screen area (dirty rectangle) location to host */
  314. static int
  315. synthvid_update(struct fb_info *info, int x1, int y1, int x2, int y2)
  316. {
  317. struct hv_device *hdev = device_to_hv_device(info->device);
  318. struct synthvid_msg msg;
  319. memset(&msg, 0, sizeof(struct synthvid_msg));
  320. if (x2 == INT_MAX)
  321. x2 = info->var.xres;
  322. if (y2 == INT_MAX)
  323. y2 = info->var.yres;
  324. msg.vid_hdr.type = SYNTHVID_DIRT;
  325. msg.vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  326. sizeof(struct synthvid_dirt);
  327. msg.dirt.video_output = 0;
  328. msg.dirt.dirt_count = 1;
  329. msg.dirt.rect[0].x1 = (x1 > x2) ? 0 : x1;
  330. msg.dirt.rect[0].y1 = (y1 > y2) ? 0 : y1;
  331. msg.dirt.rect[0].x2 =
  332. (x2 < x1 || x2 > info->var.xres) ? info->var.xres : x2;
  333. msg.dirt.rect[0].y2 =
  334. (y2 < y1 || y2 > info->var.yres) ? info->var.yres : y2;
  335. synthvid_send(hdev, &msg);
  336. return 0;
  337. }
  338. static void hvfb_docopy(struct hvfb_par *par,
  339. unsigned long offset,
  340. unsigned long size)
  341. {
  342. if (!par || !par->mmio_vp || !par->dio_vp || !par->fb_ready ||
  343. size == 0 || offset >= dio_fb_size)
  344. return;
  345. if (offset + size > dio_fb_size)
  346. size = dio_fb_size - offset;
  347. memcpy(par->mmio_vp + offset, par->dio_vp + offset, size);
  348. }
  349. /* Deferred IO callback */
  350. static void synthvid_deferred_io(struct fb_info *p,
  351. struct list_head *pagelist)
  352. {
  353. struct hvfb_par *par = p->par;
  354. struct page *page;
  355. unsigned long start, end;
  356. int y1, y2, miny, maxy;
  357. miny = INT_MAX;
  358. maxy = 0;
  359. /*
  360. * Merge dirty pages. It is possible that last page cross
  361. * over the end of frame buffer row yres. This is taken care of
  362. * in synthvid_update function by clamping the y2
  363. * value to yres.
  364. */
  365. list_for_each_entry(page, pagelist, lru) {
  366. start = page->index << PAGE_SHIFT;
  367. end = start + PAGE_SIZE - 1;
  368. y1 = start / p->fix.line_length;
  369. y2 = end / p->fix.line_length;
  370. miny = min_t(int, miny, y1);
  371. maxy = max_t(int, maxy, y2);
  372. /* Copy from dio space to mmio address */
  373. if (par->fb_ready && par->need_docopy)
  374. hvfb_docopy(par, start, PAGE_SIZE);
  375. }
  376. if (par->fb_ready && par->update)
  377. synthvid_update(p, 0, miny, p->var.xres, maxy + 1);
  378. }
  379. static struct fb_deferred_io synthvid_defio = {
  380. .delay = HZ / 20,
  381. .deferred_io = synthvid_deferred_io,
  382. };
  383. /*
  384. * Actions on received messages from host:
  385. * Complete the wait event.
  386. * Or, reply with screen and cursor info.
  387. */
  388. static void synthvid_recv_sub(struct hv_device *hdev)
  389. {
  390. struct fb_info *info = hv_get_drvdata(hdev);
  391. struct hvfb_par *par;
  392. struct synthvid_msg *msg;
  393. if (!info)
  394. return;
  395. par = info->par;
  396. msg = (struct synthvid_msg *)par->recv_buf;
  397. /* Complete the wait event */
  398. if (msg->vid_hdr.type == SYNTHVID_VERSION_RESPONSE ||
  399. msg->vid_hdr.type == SYNTHVID_RESOLUTION_RESPONSE ||
  400. msg->vid_hdr.type == SYNTHVID_VRAM_LOCATION_ACK) {
  401. memcpy(par->init_buf, msg, MAX_VMBUS_PKT_SIZE);
  402. complete(&par->wait);
  403. return;
  404. }
  405. /* Reply with screen and cursor info */
  406. if (msg->vid_hdr.type == SYNTHVID_FEATURE_CHANGE) {
  407. if (par->fb_ready) {
  408. synthvid_send_ptr(hdev);
  409. synthvid_send_situ(hdev);
  410. }
  411. par->update = msg->feature_chg.is_dirt_needed;
  412. if (par->update)
  413. schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
  414. }
  415. }
  416. /* Receive callback for messages from the host */
  417. static void synthvid_receive(void *ctx)
  418. {
  419. struct hv_device *hdev = ctx;
  420. struct fb_info *info = hv_get_drvdata(hdev);
  421. struct hvfb_par *par;
  422. struct synthvid_msg *recv_buf;
  423. u32 bytes_recvd;
  424. u64 req_id;
  425. int ret;
  426. if (!info)
  427. return;
  428. par = info->par;
  429. recv_buf = (struct synthvid_msg *)par->recv_buf;
  430. do {
  431. ret = vmbus_recvpacket(hdev->channel, recv_buf,
  432. MAX_VMBUS_PKT_SIZE,
  433. &bytes_recvd, &req_id);
  434. if (bytes_recvd > 0 &&
  435. recv_buf->pipe_hdr.type == PIPE_MSG_DATA)
  436. synthvid_recv_sub(hdev);
  437. } while (bytes_recvd > 0 && ret == 0);
  438. }
  439. /* Check if the ver1 version is equal or greater than ver2 */
  440. static inline bool synthvid_ver_ge(u32 ver1, u32 ver2)
  441. {
  442. if (SYNTHVID_VER_GET_MAJOR(ver1) > SYNTHVID_VER_GET_MAJOR(ver2) ||
  443. (SYNTHVID_VER_GET_MAJOR(ver1) == SYNTHVID_VER_GET_MAJOR(ver2) &&
  444. SYNTHVID_VER_GET_MINOR(ver1) >= SYNTHVID_VER_GET_MINOR(ver2)))
  445. return true;
  446. return false;
  447. }
  448. /* Check synthetic video protocol version with the host */
  449. static int synthvid_negotiate_ver(struct hv_device *hdev, u32 ver)
  450. {
  451. struct fb_info *info = hv_get_drvdata(hdev);
  452. struct hvfb_par *par = info->par;
  453. struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
  454. int ret = 0;
  455. unsigned long t;
  456. memset(msg, 0, sizeof(struct synthvid_msg));
  457. msg->vid_hdr.type = SYNTHVID_VERSION_REQUEST;
  458. msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  459. sizeof(struct synthvid_version_req);
  460. msg->ver_req.version = ver;
  461. synthvid_send(hdev, msg);
  462. t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
  463. if (!t) {
  464. pr_err("Time out on waiting version response\n");
  465. ret = -ETIMEDOUT;
  466. goto out;
  467. }
  468. if (!msg->ver_resp.is_accepted) {
  469. ret = -ENODEV;
  470. goto out;
  471. }
  472. par->synthvid_version = ver;
  473. pr_info("Synthvid Version major %d, minor %d\n",
  474. SYNTHVID_VER_GET_MAJOR(ver), SYNTHVID_VER_GET_MINOR(ver));
  475. out:
  476. return ret;
  477. }
  478. /* Get current resolution from the host */
  479. static int synthvid_get_supported_resolution(struct hv_device *hdev)
  480. {
  481. struct fb_info *info = hv_get_drvdata(hdev);
  482. struct hvfb_par *par = info->par;
  483. struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
  484. int ret = 0;
  485. unsigned long t;
  486. u8 index;
  487. int i;
  488. memset(msg, 0, sizeof(struct synthvid_msg));
  489. msg->vid_hdr.type = SYNTHVID_RESOLUTION_REQUEST;
  490. msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  491. sizeof(struct synthvid_supported_resolution_req);
  492. msg->resolution_req.maximum_resolution_count =
  493. SYNTHVID_MAX_RESOLUTION_COUNT;
  494. synthvid_send(hdev, msg);
  495. t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
  496. if (!t) {
  497. pr_err("Time out on waiting resolution response\n");
  498. ret = -ETIMEDOUT;
  499. goto out;
  500. }
  501. if (msg->resolution_resp.resolution_count == 0) {
  502. pr_err("No supported resolutions\n");
  503. ret = -ENODEV;
  504. goto out;
  505. }
  506. index = msg->resolution_resp.default_resolution_index;
  507. if (index >= msg->resolution_resp.resolution_count) {
  508. pr_err("Invalid resolution index: %d\n", index);
  509. ret = -ENODEV;
  510. goto out;
  511. }
  512. for (i = 0; i < msg->resolution_resp.resolution_count; i++) {
  513. screen_width_max = max_t(unsigned int, screen_width_max,
  514. msg->resolution_resp.supported_resolution[i].width);
  515. screen_height_max = max_t(unsigned int, screen_height_max,
  516. msg->resolution_resp.supported_resolution[i].height);
  517. }
  518. screen_width =
  519. msg->resolution_resp.supported_resolution[index].width;
  520. screen_height =
  521. msg->resolution_resp.supported_resolution[index].height;
  522. out:
  523. return ret;
  524. }
  525. /* Connect to VSP (Virtual Service Provider) on host */
  526. static int synthvid_connect_vsp(struct hv_device *hdev)
  527. {
  528. struct fb_info *info = hv_get_drvdata(hdev);
  529. struct hvfb_par *par = info->par;
  530. int ret;
  531. ret = vmbus_open(hdev->channel, RING_BUFSIZE, RING_BUFSIZE,
  532. NULL, 0, synthvid_receive, hdev);
  533. if (ret) {
  534. pr_err("Unable to open vmbus channel\n");
  535. return ret;
  536. }
  537. /* Negotiate the protocol version with host */
  538. switch (vmbus_proto_version) {
  539. case VERSION_WIN10:
  540. case VERSION_WIN10_V5:
  541. ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
  542. if (!ret)
  543. break;
  544. fallthrough;
  545. case VERSION_WIN8:
  546. case VERSION_WIN8_1:
  547. ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN8);
  548. if (!ret)
  549. break;
  550. fallthrough;
  551. case VERSION_WS2008:
  552. case VERSION_WIN7:
  553. ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN7);
  554. break;
  555. default:
  556. ret = synthvid_negotiate_ver(hdev, SYNTHVID_VERSION_WIN10);
  557. break;
  558. }
  559. if (ret) {
  560. pr_err("Synthetic video device version not accepted\n");
  561. goto error;
  562. }
  563. if (par->synthvid_version == SYNTHVID_VERSION_WIN7)
  564. screen_depth = SYNTHVID_DEPTH_WIN7;
  565. else
  566. screen_depth = SYNTHVID_DEPTH_WIN8;
  567. if (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10)) {
  568. ret = synthvid_get_supported_resolution(hdev);
  569. if (ret)
  570. pr_info("Failed to get supported resolution from host, use default\n");
  571. }
  572. screen_fb_size = hdev->channel->offermsg.offer.
  573. mmio_megabytes * 1024 * 1024;
  574. return 0;
  575. error:
  576. vmbus_close(hdev->channel);
  577. return ret;
  578. }
  579. /* Send VRAM and Situation messages to the host */
  580. static int synthvid_send_config(struct hv_device *hdev)
  581. {
  582. struct fb_info *info = hv_get_drvdata(hdev);
  583. struct hvfb_par *par = info->par;
  584. struct synthvid_msg *msg = (struct synthvid_msg *)par->init_buf;
  585. int ret = 0;
  586. unsigned long t;
  587. /* Send VRAM location */
  588. memset(msg, 0, sizeof(struct synthvid_msg));
  589. msg->vid_hdr.type = SYNTHVID_VRAM_LOCATION;
  590. msg->vid_hdr.size = sizeof(struct synthvid_msg_hdr) +
  591. sizeof(struct synthvid_vram_location);
  592. msg->vram.user_ctx = msg->vram.vram_gpa = par->mmio_pp;
  593. msg->vram.is_vram_gpa_specified = 1;
  594. synthvid_send(hdev, msg);
  595. t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
  596. if (!t) {
  597. pr_err("Time out on waiting vram location ack\n");
  598. ret = -ETIMEDOUT;
  599. goto out;
  600. }
  601. if (msg->vram_ack.user_ctx != par->mmio_pp) {
  602. pr_err("Unable to set VRAM location\n");
  603. ret = -ENODEV;
  604. goto out;
  605. }
  606. /* Send pointer and situation update */
  607. synthvid_send_ptr(hdev);
  608. synthvid_send_situ(hdev);
  609. out:
  610. return ret;
  611. }
  612. /*
  613. * Delayed work callback:
  614. * It is scheduled to call whenever update request is received and it has
  615. * not been called in last HVFB_ONDEMAND_THROTTLE time interval.
  616. */
  617. static void hvfb_update_work(struct work_struct *w)
  618. {
  619. struct hvfb_par *par = container_of(w, struct hvfb_par, dwork.work);
  620. struct fb_info *info = par->info;
  621. unsigned long flags;
  622. int x1, x2, y1, y2;
  623. int j;
  624. spin_lock_irqsave(&par->delayed_refresh_lock, flags);
  625. /* Reset the request flag */
  626. par->delayed_refresh = false;
  627. /* Store the dirty rectangle to local variables */
  628. x1 = par->x1;
  629. x2 = par->x2;
  630. y1 = par->y1;
  631. y2 = par->y2;
  632. /* Clear dirty rectangle */
  633. par->x1 = par->y1 = INT_MAX;
  634. par->x2 = par->y2 = 0;
  635. spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
  636. if (x1 > info->var.xres || x2 > info->var.xres ||
  637. y1 > info->var.yres || y2 > info->var.yres || x2 <= x1)
  638. return;
  639. /* Copy the dirty rectangle to frame buffer memory */
  640. if (par->need_docopy)
  641. for (j = y1; j < y2; j++)
  642. hvfb_docopy(par,
  643. j * info->fix.line_length +
  644. (x1 * screen_depth / 8),
  645. (x2 - x1) * screen_depth / 8);
  646. /* Refresh */
  647. if (par->fb_ready && par->update)
  648. synthvid_update(info, x1, y1, x2, y2);
  649. }
  650. /*
  651. * Control the on-demand refresh frequency. It schedules a delayed
  652. * screen update if it has not yet.
  653. */
  654. static void hvfb_ondemand_refresh_throttle(struct hvfb_par *par,
  655. int x1, int y1, int w, int h)
  656. {
  657. unsigned long flags;
  658. int x2 = x1 + w;
  659. int y2 = y1 + h;
  660. spin_lock_irqsave(&par->delayed_refresh_lock, flags);
  661. /* Merge dirty rectangle */
  662. par->x1 = min_t(int, par->x1, x1);
  663. par->y1 = min_t(int, par->y1, y1);
  664. par->x2 = max_t(int, par->x2, x2);
  665. par->y2 = max_t(int, par->y2, y2);
  666. /* Schedule a delayed screen update if not yet */
  667. if (par->delayed_refresh == false) {
  668. schedule_delayed_work(&par->dwork,
  669. HVFB_ONDEMAND_THROTTLE);
  670. par->delayed_refresh = true;
  671. }
  672. spin_unlock_irqrestore(&par->delayed_refresh_lock, flags);
  673. }
  674. static int hvfb_on_panic(struct notifier_block *nb,
  675. unsigned long e, void *p)
  676. {
  677. struct hvfb_par *par;
  678. struct fb_info *info;
  679. par = container_of(nb, struct hvfb_par, hvfb_panic_nb);
  680. par->synchronous_fb = true;
  681. info = par->info;
  682. if (par->need_docopy)
  683. hvfb_docopy(par, 0, dio_fb_size);
  684. synthvid_update(info, 0, 0, INT_MAX, INT_MAX);
  685. return NOTIFY_DONE;
  686. }
  687. /* Framebuffer operation handlers */
  688. static int hvfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
  689. {
  690. if (var->xres < HVFB_WIDTH_MIN || var->yres < HVFB_HEIGHT_MIN ||
  691. var->xres > screen_width || var->yres > screen_height ||
  692. var->bits_per_pixel != screen_depth)
  693. return -EINVAL;
  694. var->xres_virtual = var->xres;
  695. var->yres_virtual = var->yres;
  696. return 0;
  697. }
  698. static int hvfb_set_par(struct fb_info *info)
  699. {
  700. struct hv_device *hdev = device_to_hv_device(info->device);
  701. return synthvid_send_situ(hdev);
  702. }
  703. static inline u32 chan_to_field(u32 chan, struct fb_bitfield *bf)
  704. {
  705. return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset;
  706. }
  707. static int hvfb_setcolreg(unsigned regno, unsigned red, unsigned green,
  708. unsigned blue, unsigned transp, struct fb_info *info)
  709. {
  710. u32 *pal = info->pseudo_palette;
  711. if (regno > 15)
  712. return -EINVAL;
  713. pal[regno] = chan_to_field(red, &info->var.red)
  714. | chan_to_field(green, &info->var.green)
  715. | chan_to_field(blue, &info->var.blue)
  716. | chan_to_field(transp, &info->var.transp);
  717. return 0;
  718. }
  719. static int hvfb_blank(int blank, struct fb_info *info)
  720. {
  721. return 1; /* get fb_blank to set the colormap to all black */
  722. }
  723. static void hvfb_cfb_fillrect(struct fb_info *p,
  724. const struct fb_fillrect *rect)
  725. {
  726. struct hvfb_par *par = p->par;
  727. cfb_fillrect(p, rect);
  728. if (par->synchronous_fb)
  729. synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
  730. else
  731. hvfb_ondemand_refresh_throttle(par, rect->dx, rect->dy,
  732. rect->width, rect->height);
  733. }
  734. static void hvfb_cfb_copyarea(struct fb_info *p,
  735. const struct fb_copyarea *area)
  736. {
  737. struct hvfb_par *par = p->par;
  738. cfb_copyarea(p, area);
  739. if (par->synchronous_fb)
  740. synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
  741. else
  742. hvfb_ondemand_refresh_throttle(par, area->dx, area->dy,
  743. area->width, area->height);
  744. }
  745. static void hvfb_cfb_imageblit(struct fb_info *p,
  746. const struct fb_image *image)
  747. {
  748. struct hvfb_par *par = p->par;
  749. cfb_imageblit(p, image);
  750. if (par->synchronous_fb)
  751. synthvid_update(p, 0, 0, INT_MAX, INT_MAX);
  752. else
  753. hvfb_ondemand_refresh_throttle(par, image->dx, image->dy,
  754. image->width, image->height);
  755. }
  756. static const struct fb_ops hvfb_ops = {
  757. .owner = THIS_MODULE,
  758. .fb_check_var = hvfb_check_var,
  759. .fb_set_par = hvfb_set_par,
  760. .fb_setcolreg = hvfb_setcolreg,
  761. .fb_fillrect = hvfb_cfb_fillrect,
  762. .fb_copyarea = hvfb_cfb_copyarea,
  763. .fb_imageblit = hvfb_cfb_imageblit,
  764. .fb_blank = hvfb_blank,
  765. };
  766. /* Get options from kernel paramenter "video=" */
  767. static void hvfb_get_option(struct fb_info *info)
  768. {
  769. struct hvfb_par *par = info->par;
  770. char *opt = NULL, *p;
  771. uint x = 0, y = 0;
  772. if (fb_get_options(KBUILD_MODNAME, &opt) || !opt || !*opt)
  773. return;
  774. p = strsep(&opt, "x");
  775. if (!*p || kstrtouint(p, 0, &x) ||
  776. !opt || !*opt || kstrtouint(opt, 0, &y)) {
  777. pr_err("Screen option is invalid: skipped\n");
  778. return;
  779. }
  780. if (x < HVFB_WIDTH_MIN || y < HVFB_HEIGHT_MIN ||
  781. (synthvid_ver_ge(par->synthvid_version, SYNTHVID_VERSION_WIN10) &&
  782. (x > screen_width_max || y > screen_height_max)) ||
  783. (par->synthvid_version == SYNTHVID_VERSION_WIN8 &&
  784. x * y * screen_depth / 8 > SYNTHVID_FB_SIZE_WIN8) ||
  785. (par->synthvid_version == SYNTHVID_VERSION_WIN7 &&
  786. (x > SYNTHVID_WIDTH_MAX_WIN7 || y > SYNTHVID_HEIGHT_MAX_WIN7))) {
  787. pr_err("Screen resolution option is out of range: skipped\n");
  788. return;
  789. }
  790. screen_width = x;
  791. screen_height = y;
  792. return;
  793. }
  794. /*
  795. * Allocate enough contiguous physical memory.
  796. * Return physical address if succeeded or -1 if failed.
  797. */
  798. static phys_addr_t hvfb_get_phymem(struct hv_device *hdev,
  799. unsigned int request_size)
  800. {
  801. struct page *page = NULL;
  802. dma_addr_t dma_handle;
  803. void *vmem;
  804. phys_addr_t paddr = 0;
  805. unsigned int order = get_order(request_size);
  806. if (request_size == 0)
  807. return -1;
  808. if (order < MAX_ORDER) {
  809. /* Call alloc_pages if the size is less than 2^MAX_ORDER */
  810. page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
  811. if (!page)
  812. return -1;
  813. paddr = (page_to_pfn(page) << PAGE_SHIFT);
  814. } else {
  815. /* Allocate from CMA */
  816. hdev->device.coherent_dma_mask = DMA_BIT_MASK(64);
  817. vmem = dma_alloc_coherent(&hdev->device,
  818. round_up(request_size, PAGE_SIZE),
  819. &dma_handle,
  820. GFP_KERNEL | __GFP_NOWARN);
  821. if (!vmem)
  822. return -1;
  823. paddr = virt_to_phys(vmem);
  824. }
  825. return paddr;
  826. }
  827. /* Release contiguous physical memory */
  828. static void hvfb_release_phymem(struct hv_device *hdev,
  829. phys_addr_t paddr, unsigned int size)
  830. {
  831. unsigned int order = get_order(size);
  832. if (order < MAX_ORDER)
  833. __free_pages(pfn_to_page(paddr >> PAGE_SHIFT), order);
  834. else
  835. dma_free_coherent(&hdev->device,
  836. round_up(size, PAGE_SIZE),
  837. phys_to_virt(paddr),
  838. paddr);
  839. }
  840. /* Get framebuffer memory from Hyper-V video pci space */
  841. static int hvfb_getmem(struct hv_device *hdev, struct fb_info *info)
  842. {
  843. struct hvfb_par *par = info->par;
  844. struct pci_dev *pdev = NULL;
  845. void __iomem *fb_virt;
  846. int gen2vm = efi_enabled(EFI_BOOT);
  847. resource_size_t pot_start, pot_end;
  848. phys_addr_t paddr;
  849. int ret;
  850. info->apertures = alloc_apertures(1);
  851. if (!info->apertures)
  852. return -ENOMEM;
  853. if (!gen2vm) {
  854. pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
  855. PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
  856. if (!pdev) {
  857. pr_err("Unable to find PCI Hyper-V video\n");
  858. return -ENODEV;
  859. }
  860. info->apertures->ranges[0].base = pci_resource_start(pdev, 0);
  861. info->apertures->ranges[0].size = pci_resource_len(pdev, 0);
  862. /*
  863. * For Gen 1 VM, we can directly use the contiguous memory
  864. * from VM. If we succeed, deferred IO happens directly
  865. * on this allocated framebuffer memory, avoiding extra
  866. * memory copy.
  867. */
  868. paddr = hvfb_get_phymem(hdev, screen_fb_size);
  869. if (paddr != (phys_addr_t) -1) {
  870. par->mmio_pp = paddr;
  871. par->mmio_vp = par->dio_vp = __va(paddr);
  872. info->fix.smem_start = paddr;
  873. info->fix.smem_len = screen_fb_size;
  874. info->screen_base = par->mmio_vp;
  875. info->screen_size = screen_fb_size;
  876. par->need_docopy = false;
  877. goto getmem_done;
  878. }
  879. pr_info("Unable to allocate enough contiguous physical memory on Gen 1 VM. Using MMIO instead.\n");
  880. } else {
  881. info->apertures->ranges[0].base = screen_info.lfb_base;
  882. info->apertures->ranges[0].size = screen_info.lfb_size;
  883. }
  884. /*
  885. * Cannot use the contiguous physical memory.
  886. * Allocate mmio space for framebuffer.
  887. */
  888. dio_fb_size =
  889. screen_width * screen_height * screen_depth / 8;
  890. if (gen2vm) {
  891. pot_start = 0;
  892. pot_end = -1;
  893. } else {
  894. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
  895. pci_resource_len(pdev, 0) < screen_fb_size) {
  896. pr_err("Resource not available or (0x%lx < 0x%lx)\n",
  897. (unsigned long) pci_resource_len(pdev, 0),
  898. (unsigned long) screen_fb_size);
  899. goto err1;
  900. }
  901. pot_end = pci_resource_end(pdev, 0);
  902. pot_start = pot_end - screen_fb_size + 1;
  903. }
  904. ret = vmbus_allocate_mmio(&par->mem, hdev, pot_start, pot_end,
  905. screen_fb_size, 0x100000, true);
  906. if (ret != 0) {
  907. pr_err("Unable to allocate framebuffer memory\n");
  908. goto err1;
  909. }
  910. /*
  911. * Map the VRAM cacheable for performance. This is also required for
  912. * VM Connect to display properly for ARM64 Linux VM, as the host also
  913. * maps the VRAM cacheable.
  914. */
  915. fb_virt = ioremap_cache(par->mem->start, screen_fb_size);
  916. if (!fb_virt)
  917. goto err2;
  918. /* Allocate memory for deferred IO */
  919. par->dio_vp = vzalloc(round_up(dio_fb_size, PAGE_SIZE));
  920. if (par->dio_vp == NULL)
  921. goto err3;
  922. /* Physical address of FB device */
  923. par->mmio_pp = par->mem->start;
  924. /* Virtual address of FB device */
  925. par->mmio_vp = (unsigned char *) fb_virt;
  926. info->fix.smem_start = par->mem->start;
  927. info->fix.smem_len = dio_fb_size;
  928. info->screen_base = par->dio_vp;
  929. info->screen_size = dio_fb_size;
  930. getmem_done:
  931. remove_conflicting_framebuffers(info->apertures,
  932. KBUILD_MODNAME, false);
  933. if (gen2vm) {
  934. /* framebuffer is reallocated, clear screen_info to avoid misuse from kexec */
  935. screen_info.lfb_size = 0;
  936. screen_info.lfb_base = 0;
  937. screen_info.orig_video_isVGA = 0;
  938. } else {
  939. pci_dev_put(pdev);
  940. }
  941. return 0;
  942. err3:
  943. iounmap(fb_virt);
  944. err2:
  945. vmbus_free_mmio(par->mem->start, screen_fb_size);
  946. par->mem = NULL;
  947. err1:
  948. if (!gen2vm)
  949. pci_dev_put(pdev);
  950. return -ENOMEM;
  951. }
  952. /* Release the framebuffer */
  953. static void hvfb_putmem(struct hv_device *hdev, struct fb_info *info)
  954. {
  955. struct hvfb_par *par = info->par;
  956. if (par->need_docopy) {
  957. vfree(par->dio_vp);
  958. iounmap(info->screen_base);
  959. vmbus_free_mmio(par->mem->start, screen_fb_size);
  960. } else {
  961. hvfb_release_phymem(hdev, info->fix.smem_start,
  962. screen_fb_size);
  963. }
  964. par->mem = NULL;
  965. }
  966. static int hvfb_probe(struct hv_device *hdev,
  967. const struct hv_vmbus_device_id *dev_id)
  968. {
  969. struct fb_info *info;
  970. struct hvfb_par *par;
  971. int ret;
  972. info = framebuffer_alloc(sizeof(struct hvfb_par), &hdev->device);
  973. if (!info)
  974. return -ENOMEM;
  975. par = info->par;
  976. par->info = info;
  977. par->fb_ready = false;
  978. par->need_docopy = true;
  979. init_completion(&par->wait);
  980. INIT_DELAYED_WORK(&par->dwork, hvfb_update_work);
  981. par->delayed_refresh = false;
  982. spin_lock_init(&par->delayed_refresh_lock);
  983. par->x1 = par->y1 = INT_MAX;
  984. par->x2 = par->y2 = 0;
  985. /* Connect to VSP */
  986. hv_set_drvdata(hdev, info);
  987. ret = synthvid_connect_vsp(hdev);
  988. if (ret) {
  989. pr_err("Unable to connect to VSP\n");
  990. goto error1;
  991. }
  992. hvfb_get_option(info);
  993. pr_info("Screen resolution: %dx%d, Color depth: %d\n",
  994. screen_width, screen_height, screen_depth);
  995. ret = hvfb_getmem(hdev, info);
  996. if (ret) {
  997. pr_err("No memory for framebuffer\n");
  998. goto error2;
  999. }
  1000. /* Set up fb_info */
  1001. info->flags = FBINFO_DEFAULT;
  1002. info->var.xres_virtual = info->var.xres = screen_width;
  1003. info->var.yres_virtual = info->var.yres = screen_height;
  1004. info->var.bits_per_pixel = screen_depth;
  1005. if (info->var.bits_per_pixel == 16) {
  1006. info->var.red = (struct fb_bitfield){11, 5, 0};
  1007. info->var.green = (struct fb_bitfield){5, 6, 0};
  1008. info->var.blue = (struct fb_bitfield){0, 5, 0};
  1009. info->var.transp = (struct fb_bitfield){0, 0, 0};
  1010. } else {
  1011. info->var.red = (struct fb_bitfield){16, 8, 0};
  1012. info->var.green = (struct fb_bitfield){8, 8, 0};
  1013. info->var.blue = (struct fb_bitfield){0, 8, 0};
  1014. info->var.transp = (struct fb_bitfield){24, 8, 0};
  1015. }
  1016. info->var.activate = FB_ACTIVATE_NOW;
  1017. info->var.height = -1;
  1018. info->var.width = -1;
  1019. info->var.vmode = FB_VMODE_NONINTERLACED;
  1020. strcpy(info->fix.id, KBUILD_MODNAME);
  1021. info->fix.type = FB_TYPE_PACKED_PIXELS;
  1022. info->fix.visual = FB_VISUAL_TRUECOLOR;
  1023. info->fix.line_length = screen_width * screen_depth / 8;
  1024. info->fix.accel = FB_ACCEL_NONE;
  1025. info->fbops = &hvfb_ops;
  1026. info->pseudo_palette = par->pseudo_palette;
  1027. /* Initialize deferred IO */
  1028. info->fbdefio = &synthvid_defio;
  1029. fb_deferred_io_init(info);
  1030. /* Send config to host */
  1031. ret = synthvid_send_config(hdev);
  1032. if (ret)
  1033. goto error;
  1034. ret = register_framebuffer(info);
  1035. if (ret) {
  1036. pr_err("Unable to register framebuffer\n");
  1037. goto error;
  1038. }
  1039. par->fb_ready = true;
  1040. par->synchronous_fb = false;
  1041. par->hvfb_panic_nb.notifier_call = hvfb_on_panic;
  1042. atomic_notifier_chain_register(&panic_notifier_list,
  1043. &par->hvfb_panic_nb);
  1044. return 0;
  1045. error:
  1046. fb_deferred_io_cleanup(info);
  1047. hvfb_putmem(hdev, info);
  1048. error2:
  1049. vmbus_close(hdev->channel);
  1050. error1:
  1051. cancel_delayed_work_sync(&par->dwork);
  1052. hv_set_drvdata(hdev, NULL);
  1053. framebuffer_release(info);
  1054. return ret;
  1055. }
  1056. static int hvfb_remove(struct hv_device *hdev)
  1057. {
  1058. struct fb_info *info = hv_get_drvdata(hdev);
  1059. struct hvfb_par *par = info->par;
  1060. atomic_notifier_chain_unregister(&panic_notifier_list,
  1061. &par->hvfb_panic_nb);
  1062. par->update = false;
  1063. par->fb_ready = false;
  1064. fb_deferred_io_cleanup(info);
  1065. unregister_framebuffer(info);
  1066. cancel_delayed_work_sync(&par->dwork);
  1067. vmbus_close(hdev->channel);
  1068. hv_set_drvdata(hdev, NULL);
  1069. hvfb_putmem(hdev, info);
  1070. framebuffer_release(info);
  1071. return 0;
  1072. }
  1073. static int hvfb_suspend(struct hv_device *hdev)
  1074. {
  1075. struct fb_info *info = hv_get_drvdata(hdev);
  1076. struct hvfb_par *par = info->par;
  1077. console_lock();
  1078. /* 1 means do suspend */
  1079. fb_set_suspend(info, 1);
  1080. cancel_delayed_work_sync(&par->dwork);
  1081. cancel_delayed_work_sync(&info->deferred_work);
  1082. par->update_saved = par->update;
  1083. par->update = false;
  1084. par->fb_ready = false;
  1085. vmbus_close(hdev->channel);
  1086. console_unlock();
  1087. return 0;
  1088. }
  1089. static int hvfb_resume(struct hv_device *hdev)
  1090. {
  1091. struct fb_info *info = hv_get_drvdata(hdev);
  1092. struct hvfb_par *par = info->par;
  1093. int ret;
  1094. console_lock();
  1095. ret = synthvid_connect_vsp(hdev);
  1096. if (ret != 0)
  1097. goto out;
  1098. ret = synthvid_send_config(hdev);
  1099. if (ret != 0) {
  1100. vmbus_close(hdev->channel);
  1101. goto out;
  1102. }
  1103. par->fb_ready = true;
  1104. par->update = par->update_saved;
  1105. schedule_delayed_work(&info->deferred_work, info->fbdefio->delay);
  1106. schedule_delayed_work(&par->dwork, HVFB_UPDATE_DELAY);
  1107. /* 0 means do resume */
  1108. fb_set_suspend(info, 0);
  1109. out:
  1110. console_unlock();
  1111. return ret;
  1112. }
  1113. static const struct pci_device_id pci_stub_id_table[] = {
  1114. {
  1115. .vendor = PCI_VENDOR_ID_MICROSOFT,
  1116. .device = PCI_DEVICE_ID_HYPERV_VIDEO,
  1117. },
  1118. { /* end of list */ }
  1119. };
  1120. static const struct hv_vmbus_device_id id_table[] = {
  1121. /* Synthetic Video Device GUID */
  1122. {HV_SYNTHVID_GUID},
  1123. {}
  1124. };
  1125. MODULE_DEVICE_TABLE(pci, pci_stub_id_table);
  1126. MODULE_DEVICE_TABLE(vmbus, id_table);
  1127. static struct hv_driver hvfb_drv = {
  1128. .name = KBUILD_MODNAME,
  1129. .id_table = id_table,
  1130. .probe = hvfb_probe,
  1131. .remove = hvfb_remove,
  1132. .suspend = hvfb_suspend,
  1133. .resume = hvfb_resume,
  1134. .driver = {
  1135. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1136. },
  1137. };
  1138. static int hvfb_pci_stub_probe(struct pci_dev *pdev,
  1139. const struct pci_device_id *ent)
  1140. {
  1141. return 0;
  1142. }
  1143. static void hvfb_pci_stub_remove(struct pci_dev *pdev)
  1144. {
  1145. }
  1146. static struct pci_driver hvfb_pci_stub_driver = {
  1147. .name = KBUILD_MODNAME,
  1148. .id_table = pci_stub_id_table,
  1149. .probe = hvfb_pci_stub_probe,
  1150. .remove = hvfb_pci_stub_remove,
  1151. .driver = {
  1152. .probe_type = PROBE_PREFER_ASYNCHRONOUS,
  1153. }
  1154. };
  1155. static int __init hvfb_drv_init(void)
  1156. {
  1157. int ret;
  1158. ret = vmbus_driver_register(&hvfb_drv);
  1159. if (ret != 0)
  1160. return ret;
  1161. ret = pci_register_driver(&hvfb_pci_stub_driver);
  1162. if (ret != 0) {
  1163. vmbus_driver_unregister(&hvfb_drv);
  1164. return ret;
  1165. }
  1166. return 0;
  1167. }
  1168. static void __exit hvfb_drv_exit(void)
  1169. {
  1170. pci_unregister_driver(&hvfb_pci_stub_driver);
  1171. vmbus_driver_unregister(&hvfb_drv);
  1172. }
  1173. module_init(hvfb_drv_init);
  1174. module_exit(hvfb_drv_exit);
  1175. MODULE_LICENSE("GPL");
  1176. MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic Video Frame Buffer Driver");