/drivers/gpu/drm/amd/amdgpu/vega10_ih.c

https://gitlab.com/sunny256/linux · C · 523 lines · 325 code · 79 blank · 119 comment · 26 complexity · 2f222f35c470662f5691269d29ced170 MD5 · raw file

  1. /*
  2. * Copyright 2016 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <drm/drmP.h>
  24. #include "amdgpu.h"
  25. #include "amdgpu_ih.h"
  26. #include "soc15.h"
  27. #include "vega10/soc15ip.h"
  28. #include "vega10/OSSSYS/osssys_4_0_offset.h"
  29. #include "vega10/OSSSYS/osssys_4_0_sh_mask.h"
  30. #include "soc15_common.h"
  31. #include "vega10_ih.h"
  32. static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
  33. /**
  34. * vega10_ih_enable_interrupts - Enable the interrupt ring buffer
  35. *
  36. * @adev: amdgpu_device pointer
  37. *
  38. * Enable the interrupt ring buffer (VEGA10).
  39. */
  40. static void vega10_ih_enable_interrupts(struct amdgpu_device *adev)
  41. {
  42. u32 ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
  43. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 1);
  44. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 1);
  45. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
  46. adev->irq.ih.enabled = true;
  47. }
  48. /**
  49. * vega10_ih_disable_interrupts - Disable the interrupt ring buffer
  50. *
  51. * @adev: amdgpu_device pointer
  52. *
  53. * Disable the interrupt ring buffer (VEGA10).
  54. */
  55. static void vega10_ih_disable_interrupts(struct amdgpu_device *adev)
  56. {
  57. u32 ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
  58. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_ENABLE, 0);
  59. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, ENABLE_INTR, 0);
  60. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
  61. /* set rptr, wptr to 0 */
  62. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), 0);
  63. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR), 0);
  64. adev->irq.ih.enabled = false;
  65. adev->irq.ih.rptr = 0;
  66. }
  67. /**
  68. * vega10_ih_irq_init - init and enable the interrupt ring
  69. *
  70. * @adev: amdgpu_device pointer
  71. *
  72. * Allocate a ring buffer for the interrupt controller,
  73. * enable the RLC, disable interrupts, enable the IH
  74. * ring buffer and enable it (VI).
  75. * Called at device load and reume.
  76. * Returns 0 for success, errors for failure.
  77. */
  78. static int vega10_ih_irq_init(struct amdgpu_device *adev)
  79. {
  80. int ret = 0;
  81. int rb_bufsz;
  82. u32 ih_rb_cntl, ih_doorbell_rtpr;
  83. u32 tmp;
  84. u64 wptr_off;
  85. /* disable irqs */
  86. vega10_ih_disable_interrupts(adev);
  87. if (adev->flags & AMD_IS_APU)
  88. nbio_v7_0_ih_control(adev);
  89. else
  90. nbio_v6_1_ih_control(adev);
  91. ih_rb_cntl = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
  92. /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/
  93. if (adev->irq.ih.use_bus_addr) {
  94. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE), adev->irq.ih.rb_dma_addr >> 8);
  95. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI), ((u64)adev->irq.ih.rb_dma_addr >> 40) & 0xff);
  96. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 1);
  97. } else {
  98. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE), adev->irq.ih.gpu_addr >> 8);
  99. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_BASE_HI), (adev->irq.ih.gpu_addr >> 40) & 0xff);
  100. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SPACE, 4);
  101. }
  102. rb_bufsz = order_base_2(adev->irq.ih.ring_size / 4);
  103. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
  104. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 1);
  105. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RB_SIZE, rb_bufsz);
  106. /* Ring Buffer write pointer writeback. If enabled, IH_RB_WPTR register value is written to memory */
  107. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, WPTR_WRITEBACK_ENABLE, 1);
  108. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_SNOOP, 1);
  109. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_RO, 0);
  110. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, MC_VMID, 0);
  111. if (adev->irq.msi_enabled)
  112. ih_rb_cntl = REG_SET_FIELD(ih_rb_cntl, IH_RB_CNTL, RPTR_REARM, 1);
  113. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), ih_rb_cntl);
  114. /* set the writeback address whether it's enabled or not */
  115. if (adev->irq.ih.use_bus_addr)
  116. wptr_off = adev->irq.ih.rb_dma_addr + (adev->irq.ih.wptr_offs * 4);
  117. else
  118. wptr_off = adev->wb.gpu_addr + (adev->irq.ih.wptr_offs * 4);
  119. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_LO), lower_32_bits(wptr_off));
  120. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR_ADDR_HI), upper_32_bits(wptr_off) & 0xFF);
  121. /* set rptr, wptr to 0 */
  122. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), 0);
  123. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_WPTR), 0);
  124. ih_doorbell_rtpr = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR));
  125. if (adev->irq.ih.use_doorbell) {
  126. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  127. OFFSET, adev->irq.ih.doorbell_index);
  128. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  129. ENABLE, 1);
  130. } else {
  131. ih_doorbell_rtpr = REG_SET_FIELD(ih_doorbell_rtpr, IH_DOORBELL_RPTR,
  132. ENABLE, 0);
  133. }
  134. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_DOORBELL_RPTR), ih_doorbell_rtpr);
  135. if (adev->flags & AMD_IS_APU)
  136. nbio_v7_0_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
  137. else
  138. nbio_v6_1_ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index);
  139. tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL));
  140. tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
  141. CLIENT18_IS_STORM_CLIENT, 1);
  142. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL), tmp);
  143. tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_INT_FLOOD_CNTL));
  144. tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
  145. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_INT_FLOOD_CNTL), tmp);
  146. pci_set_master(adev->pdev);
  147. /* enable interrupts */
  148. vega10_ih_enable_interrupts(adev);
  149. return ret;
  150. }
  151. /**
  152. * vega10_ih_irq_disable - disable interrupts
  153. *
  154. * @adev: amdgpu_device pointer
  155. *
  156. * Disable interrupts on the hw (VEGA10).
  157. */
  158. static void vega10_ih_irq_disable(struct amdgpu_device *adev)
  159. {
  160. vega10_ih_disable_interrupts(adev);
  161. /* Wait and acknowledge irq */
  162. mdelay(1);
  163. }
  164. /**
  165. * vega10_ih_get_wptr - get the IH ring buffer wptr
  166. *
  167. * @adev: amdgpu_device pointer
  168. *
  169. * Get the IH ring buffer wptr from either the register
  170. * or the writeback memory buffer (VEGA10). Also check for
  171. * ring buffer overflow and deal with it.
  172. * Returns the value of the wptr.
  173. */
  174. static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
  175. {
  176. u32 wptr, tmp;
  177. if (adev->irq.ih.use_bus_addr)
  178. wptr = le32_to_cpu(adev->irq.ih.ring[adev->irq.ih.wptr_offs]);
  179. else
  180. wptr = le32_to_cpu(adev->wb.wb[adev->irq.ih.wptr_offs]);
  181. if (REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW)) {
  182. wptr = REG_SET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW, 0);
  183. /* When a ring buffer overflow happen start parsing interrupt
  184. * from the last not overwritten vector (wptr + 32). Hopefully
  185. * this should allow us to catchup.
  186. */
  187. tmp = (wptr + 32) & adev->irq.ih.ptr_mask;
  188. dev_warn(adev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n",
  189. wptr, adev->irq.ih.rptr, tmp);
  190. adev->irq.ih.rptr = tmp;
  191. tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
  192. tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
  193. WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
  194. }
  195. return (wptr & adev->irq.ih.ptr_mask);
  196. }
  197. /**
  198. * vega10_ih_prescreen_iv - prescreen an interrupt vector
  199. *
  200. * @adev: amdgpu_device pointer
  201. *
  202. * Returns true if the interrupt vector should be further processed.
  203. */
  204. static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
  205. {
  206. u32 ring_index = adev->irq.ih.rptr >> 2;
  207. u32 dw0, dw3, dw4, dw5;
  208. u16 pasid;
  209. u64 addr, key;
  210. struct amdgpu_vm *vm;
  211. int r;
  212. dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
  213. dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
  214. dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
  215. dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
  216. /* Filter retry page faults, let only the first one pass. If
  217. * there are too many outstanding faults, ignore them until
  218. * some faults get cleared.
  219. */
  220. switch (dw0 & 0xff) {
  221. case AMDGPU_IH_CLIENTID_VMC:
  222. case AMDGPU_IH_CLIENTID_UTCL2:
  223. break;
  224. default:
  225. /* Not a VM fault */
  226. return true;
  227. }
  228. pasid = dw3 & 0xffff;
  229. /* No PASID, can't identify faulting process */
  230. if (!pasid)
  231. return true;
  232. /* Not a retry fault, check fault credit */
  233. if (!(dw5 & 0x80)) {
  234. if (!amdgpu_vm_pasid_fault_credit(adev, pasid))
  235. goto ignore_iv;
  236. return true;
  237. }
  238. addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
  239. key = AMDGPU_VM_FAULT(pasid, addr);
  240. r = amdgpu_ih_add_fault(adev, key);
  241. /* Hash table is full or the fault is already being processed,
  242. * ignore further page faults
  243. */
  244. if (r != 0)
  245. goto ignore_iv;
  246. /* Track retry faults in per-VM fault FIFO. */
  247. spin_lock(&adev->vm_manager.pasid_lock);
  248. vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
  249. spin_unlock(&adev->vm_manager.pasid_lock);
  250. if (WARN_ON_ONCE(!vm)) {
  251. /* VM not found, process it normally */
  252. amdgpu_ih_clear_fault(adev, key);
  253. return true;
  254. }
  255. /* No locking required with single writer and single reader */
  256. r = kfifo_put(&vm->faults, key);
  257. if (!r) {
  258. /* FIFO is full. Ignore it until there is space */
  259. amdgpu_ih_clear_fault(adev, key);
  260. goto ignore_iv;
  261. }
  262. /* It's the first fault for this address, process it normally */
  263. return true;
  264. ignore_iv:
  265. adev->irq.ih.rptr += 32;
  266. return false;
  267. }
  268. /**
  269. * vega10_ih_decode_iv - decode an interrupt vector
  270. *
  271. * @adev: amdgpu_device pointer
  272. *
  273. * Decodes the interrupt vector at the current rptr
  274. * position and also advance the position.
  275. */
  276. static void vega10_ih_decode_iv(struct amdgpu_device *adev,
  277. struct amdgpu_iv_entry *entry)
  278. {
  279. /* wptr/rptr are in bytes! */
  280. u32 ring_index = adev->irq.ih.rptr >> 2;
  281. uint32_t dw[8];
  282. dw[0] = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
  283. dw[1] = le32_to_cpu(adev->irq.ih.ring[ring_index + 1]);
  284. dw[2] = le32_to_cpu(adev->irq.ih.ring[ring_index + 2]);
  285. dw[3] = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
  286. dw[4] = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
  287. dw[5] = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
  288. dw[6] = le32_to_cpu(adev->irq.ih.ring[ring_index + 6]);
  289. dw[7] = le32_to_cpu(adev->irq.ih.ring[ring_index + 7]);
  290. entry->client_id = dw[0] & 0xff;
  291. entry->src_id = (dw[0] >> 8) & 0xff;
  292. entry->ring_id = (dw[0] >> 16) & 0xff;
  293. entry->vm_id = (dw[0] >> 24) & 0xf;
  294. entry->vm_id_src = (dw[0] >> 31);
  295. entry->timestamp = dw[1] | ((u64)(dw[2] & 0xffff) << 32);
  296. entry->timestamp_src = dw[2] >> 31;
  297. entry->pas_id = dw[3] & 0xffff;
  298. entry->pasid_src = dw[3] >> 31;
  299. entry->src_data[0] = dw[4];
  300. entry->src_data[1] = dw[5];
  301. entry->src_data[2] = dw[6];
  302. entry->src_data[3] = dw[7];
  303. /* wptr/rptr are in bytes! */
  304. adev->irq.ih.rptr += 32;
  305. }
  306. /**
  307. * vega10_ih_set_rptr - set the IH ring buffer rptr
  308. *
  309. * @adev: amdgpu_device pointer
  310. *
  311. * Set the IH ring buffer rptr.
  312. */
  313. static void vega10_ih_set_rptr(struct amdgpu_device *adev)
  314. {
  315. if (adev->irq.ih.use_doorbell) {
  316. /* XXX check if swapping is necessary on BE */
  317. if (adev->irq.ih.use_bus_addr)
  318. adev->irq.ih.ring[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
  319. else
  320. adev->wb.wb[adev->irq.ih.rptr_offs] = adev->irq.ih.rptr;
  321. WDOORBELL32(adev->irq.ih.doorbell_index, adev->irq.ih.rptr);
  322. } else {
  323. WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR), adev->irq.ih.rptr);
  324. }
  325. }
  326. static int vega10_ih_early_init(void *handle)
  327. {
  328. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  329. vega10_ih_set_interrupt_funcs(adev);
  330. return 0;
  331. }
  332. static int vega10_ih_sw_init(void *handle)
  333. {
  334. int r;
  335. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  336. r = amdgpu_ih_ring_init(adev, 256 * 1024, true);
  337. if (r)
  338. return r;
  339. adev->irq.ih.use_doorbell = true;
  340. adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
  341. adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL);
  342. if (!adev->irq.ih.faults)
  343. return -ENOMEM;
  344. INIT_CHASH_TABLE(adev->irq.ih.faults->hash,
  345. AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
  346. spin_lock_init(&adev->irq.ih.faults->lock);
  347. adev->irq.ih.faults->count = 0;
  348. r = amdgpu_irq_init(adev);
  349. return r;
  350. }
  351. static int vega10_ih_sw_fini(void *handle)
  352. {
  353. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  354. amdgpu_irq_fini(adev);
  355. amdgpu_ih_ring_fini(adev);
  356. kfree(adev->irq.ih.faults);
  357. adev->irq.ih.faults = NULL;
  358. return 0;
  359. }
  360. static int vega10_ih_hw_init(void *handle)
  361. {
  362. int r;
  363. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  364. r = vega10_ih_irq_init(adev);
  365. if (r)
  366. return r;
  367. return 0;
  368. }
  369. static int vega10_ih_hw_fini(void *handle)
  370. {
  371. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  372. vega10_ih_irq_disable(adev);
  373. return 0;
  374. }
  375. static int vega10_ih_suspend(void *handle)
  376. {
  377. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  378. return vega10_ih_hw_fini(adev);
  379. }
  380. static int vega10_ih_resume(void *handle)
  381. {
  382. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  383. return vega10_ih_hw_init(adev);
  384. }
  385. static bool vega10_ih_is_idle(void *handle)
  386. {
  387. /* todo */
  388. return true;
  389. }
  390. static int vega10_ih_wait_for_idle(void *handle)
  391. {
  392. /* todo */
  393. return -ETIMEDOUT;
  394. }
  395. static int vega10_ih_soft_reset(void *handle)
  396. {
  397. /* todo */
  398. return 0;
  399. }
  400. static int vega10_ih_set_clockgating_state(void *handle,
  401. enum amd_clockgating_state state)
  402. {
  403. return 0;
  404. }
  405. static int vega10_ih_set_powergating_state(void *handle,
  406. enum amd_powergating_state state)
  407. {
  408. return 0;
  409. }
  410. const struct amd_ip_funcs vega10_ih_ip_funcs = {
  411. .name = "vega10_ih",
  412. .early_init = vega10_ih_early_init,
  413. .late_init = NULL,
  414. .sw_init = vega10_ih_sw_init,
  415. .sw_fini = vega10_ih_sw_fini,
  416. .hw_init = vega10_ih_hw_init,
  417. .hw_fini = vega10_ih_hw_fini,
  418. .suspend = vega10_ih_suspend,
  419. .resume = vega10_ih_resume,
  420. .is_idle = vega10_ih_is_idle,
  421. .wait_for_idle = vega10_ih_wait_for_idle,
  422. .soft_reset = vega10_ih_soft_reset,
  423. .set_clockgating_state = vega10_ih_set_clockgating_state,
  424. .set_powergating_state = vega10_ih_set_powergating_state,
  425. };
  426. static const struct amdgpu_ih_funcs vega10_ih_funcs = {
  427. .get_wptr = vega10_ih_get_wptr,
  428. .prescreen_iv = vega10_ih_prescreen_iv,
  429. .decode_iv = vega10_ih_decode_iv,
  430. .set_rptr = vega10_ih_set_rptr
  431. };
  432. static void vega10_ih_set_interrupt_funcs(struct amdgpu_device *adev)
  433. {
  434. if (adev->irq.ih_funcs == NULL)
  435. adev->irq.ih_funcs = &vega10_ih_funcs;
  436. }
  437. const struct amdgpu_ip_block_version vega10_ih_ip_block =
  438. {
  439. .type = AMD_IP_BLOCK_TYPE_IH,
  440. .major = 4,
  441. .minor = 0,
  442. .rev = 0,
  443. .funcs = &vega10_ih_ip_funcs,
  444. };