/drivers/gpu/drm/amd/amdgpu/kv_dpm.c

https://gitlab.com/sunny256/linux · C · 3348 lines · 2791 code · 516 blank · 41 comment · 492 complexity · 2a47ad81d9880f45bc499c8a8b3a5140 MD5 · raw file

Large files are truncated click here to view the full file

  1. /*
  2. * Copyright 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <drm/drmP.h>
  24. #include "amdgpu.h"
  25. #include "amdgpu_pm.h"
  26. #include "cikd.h"
  27. #include "atom.h"
  28. #include "amdgpu_atombios.h"
  29. #include "amdgpu_dpm.h"
  30. #include "kv_dpm.h"
  31. #include "gfx_v7_0.h"
  32. #include <linux/seq_file.h>
  33. #include "smu/smu_7_0_0_d.h"
  34. #include "smu/smu_7_0_0_sh_mask.h"
  35. #include "gca/gfx_7_2_d.h"
  36. #include "gca/gfx_7_2_sh_mask.h"
  37. #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
  38. #define KV_MINIMUM_ENGINE_CLOCK 800
  39. #define SMC_RAM_END 0x40000
  40. static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
  41. static int kv_enable_nb_dpm(struct amdgpu_device *adev,
  42. bool enable);
  43. static void kv_init_graphics_levels(struct amdgpu_device *adev);
  44. static int kv_calculate_ds_divider(struct amdgpu_device *adev);
  45. static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev);
  46. static int kv_calculate_dpm_settings(struct amdgpu_device *adev);
  47. static void kv_enable_new_levels(struct amdgpu_device *adev);
  48. static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
  49. struct amdgpu_ps *new_rps);
  50. static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level);
  51. static int kv_set_enabled_levels(struct amdgpu_device *adev);
  52. static int kv_force_dpm_highest(struct amdgpu_device *adev);
  53. static int kv_force_dpm_lowest(struct amdgpu_device *adev);
  54. static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
  55. struct amdgpu_ps *new_rps,
  56. struct amdgpu_ps *old_rps);
  57. static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
  58. int min_temp, int max_temp);
  59. static int kv_init_fps_limits(struct amdgpu_device *adev);
  60. static void kv_dpm_powergate_uvd(void *handle, bool gate);
  61. static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
  62. static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
  63. static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
  64. static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev,
  65. struct sumo_vid_mapping_table *vid_mapping_table,
  66. u32 vid_2bit)
  67. {
  68. struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
  69. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  70. u32 i;
  71. if (vddc_sclk_table && vddc_sclk_table->count) {
  72. if (vid_2bit < vddc_sclk_table->count)
  73. return vddc_sclk_table->entries[vid_2bit].v;
  74. else
  75. return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
  76. } else {
  77. for (i = 0; i < vid_mapping_table->num_entries; i++) {
  78. if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
  79. return vid_mapping_table->entries[i].vid_7bit;
  80. }
  81. return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
  82. }
  83. }
  84. static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev,
  85. struct sumo_vid_mapping_table *vid_mapping_table,
  86. u32 vid_7bit)
  87. {
  88. struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
  89. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  90. u32 i;
  91. if (vddc_sclk_table && vddc_sclk_table->count) {
  92. for (i = 0; i < vddc_sclk_table->count; i++) {
  93. if (vddc_sclk_table->entries[i].v == vid_7bit)
  94. return i;
  95. }
  96. return vddc_sclk_table->count - 1;
  97. } else {
  98. for (i = 0; i < vid_mapping_table->num_entries; i++) {
  99. if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
  100. return vid_mapping_table->entries[i].vid_2bit;
  101. }
  102. return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
  103. }
  104. }
  105. static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
  106. {
  107. /* This bit selects who handles display phy powergating.
  108. * Clear the bit to let atom handle it.
  109. * Set it to let the driver handle it.
  110. * For now we just let atom handle it.
  111. */
  112. #if 0
  113. u32 v = RREG32(mmDOUT_SCRATCH3);
  114. if (enable)
  115. v |= 0x4;
  116. else
  117. v &= 0xFFFFFFFB;
  118. WREG32(mmDOUT_SCRATCH3, v);
  119. #endif
  120. }
  121. static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
  122. struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
  123. ATOM_AVAILABLE_SCLK_LIST *table)
  124. {
  125. u32 i;
  126. u32 n = 0;
  127. u32 prev_sclk = 0;
  128. for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
  129. if (table[i].ulSupportedSCLK > prev_sclk) {
  130. sclk_voltage_mapping_table->entries[n].sclk_frequency =
  131. table[i].ulSupportedSCLK;
  132. sclk_voltage_mapping_table->entries[n].vid_2bit =
  133. table[i].usVoltageIndex;
  134. prev_sclk = table[i].ulSupportedSCLK;
  135. n++;
  136. }
  137. }
  138. sclk_voltage_mapping_table->num_max_dpm_entries = n;
  139. }
  140. static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
  141. struct sumo_vid_mapping_table *vid_mapping_table,
  142. ATOM_AVAILABLE_SCLK_LIST *table)
  143. {
  144. u32 i, j;
  145. for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
  146. if (table[i].ulSupportedSCLK != 0) {
  147. vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
  148. table[i].usVoltageID;
  149. vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
  150. table[i].usVoltageIndex;
  151. }
  152. }
  153. for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
  154. if (vid_mapping_table->entries[i].vid_7bit == 0) {
  155. for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
  156. if (vid_mapping_table->entries[j].vid_7bit != 0) {
  157. vid_mapping_table->entries[i] =
  158. vid_mapping_table->entries[j];
  159. vid_mapping_table->entries[j].vid_7bit = 0;
  160. break;
  161. }
  162. }
  163. if (j == SUMO_MAX_NUMBER_VOLTAGES)
  164. break;
  165. }
  166. }
  167. vid_mapping_table->num_entries = i;
  168. }
  169. #if 0
  170. static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
  171. {
  172. { 0, 4, 1 },
  173. { 1, 4, 1 },
  174. { 2, 5, 1 },
  175. { 3, 4, 2 },
  176. { 4, 1, 1 },
  177. { 5, 5, 2 },
  178. { 6, 6, 1 },
  179. { 7, 9, 2 },
  180. { 0xffffffff }
  181. };
  182. static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
  183. {
  184. { 0, 4, 1 },
  185. { 0xffffffff }
  186. };
  187. static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
  188. {
  189. { 0, 4, 1 },
  190. { 0xffffffff }
  191. };
  192. static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
  193. {
  194. { 0, 4, 1 },
  195. { 0xffffffff }
  196. };
  197. static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
  198. {
  199. { 0, 4, 1 },
  200. { 0xffffffff }
  201. };
  202. static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
  203. {
  204. { 0, 4, 1 },
  205. { 1, 4, 1 },
  206. { 2, 5, 1 },
  207. { 3, 4, 1 },
  208. { 4, 1, 1 },
  209. { 5, 5, 1 },
  210. { 6, 6, 1 },
  211. { 7, 9, 1 },
  212. { 8, 4, 1 },
  213. { 9, 2, 1 },
  214. { 10, 3, 1 },
  215. { 11, 6, 1 },
  216. { 12, 8, 2 },
  217. { 13, 1, 1 },
  218. { 14, 2, 1 },
  219. { 15, 3, 1 },
  220. { 16, 1, 1 },
  221. { 17, 4, 1 },
  222. { 18, 3, 1 },
  223. { 19, 1, 1 },
  224. { 20, 8, 1 },
  225. { 21, 5, 1 },
  226. { 22, 1, 1 },
  227. { 23, 1, 1 },
  228. { 24, 4, 1 },
  229. { 27, 6, 1 },
  230. { 28, 1, 1 },
  231. { 0xffffffff }
  232. };
  233. static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
  234. {
  235. { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  236. };
  237. static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
  238. {
  239. { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  240. };
  241. static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
  242. {
  243. { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  244. };
  245. static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
  246. {
  247. { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  248. };
  249. static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
  250. {
  251. { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  252. };
  253. static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
  254. {
  255. { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  256. };
  257. #endif
  258. static const struct kv_pt_config_reg didt_config_kv[] =
  259. {
  260. { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  261. { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  262. { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  263. { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  264. { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  265. { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  266. { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  267. { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  268. { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  269. { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  270. { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  271. { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  272. { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  273. { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  274. { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  275. { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  276. { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  277. { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  278. { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  279. { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  280. { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  281. { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  282. { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  283. { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  284. { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  285. { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  286. { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  287. { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  288. { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  289. { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  290. { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  291. { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  292. { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  293. { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  294. { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  295. { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  296. { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  297. { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  298. { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  299. { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  300. { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  301. { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  302. { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  303. { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  304. { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  305. { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  306. { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  307. { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  308. { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  309. { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  310. { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  311. { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  312. { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  313. { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  314. { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  315. { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  316. { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  317. { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  318. { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  319. { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  320. { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  321. { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  322. { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  323. { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  324. { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  325. { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  326. { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  327. { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  328. { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  329. { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  330. { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  331. { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  332. { 0xFFFFFFFF }
  333. };
  334. static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps)
  335. {
  336. struct kv_ps *ps = rps->ps_priv;
  337. return ps;
  338. }
  339. static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev)
  340. {
  341. struct kv_power_info *pi = adev->pm.dpm.priv;
  342. return pi;
  343. }
  344. #if 0
  345. static void kv_program_local_cac_table(struct amdgpu_device *adev,
  346. const struct kv_lcac_config_values *local_cac_table,
  347. const struct kv_lcac_config_reg *local_cac_reg)
  348. {
  349. u32 i, count, data;
  350. const struct kv_lcac_config_values *values = local_cac_table;
  351. while (values->block_id != 0xffffffff) {
  352. count = values->signal_id;
  353. for (i = 0; i < count; i++) {
  354. data = ((values->block_id << local_cac_reg->block_shift) &
  355. local_cac_reg->block_mask);
  356. data |= ((i << local_cac_reg->signal_shift) &
  357. local_cac_reg->signal_mask);
  358. data |= ((values->t << local_cac_reg->t_shift) &
  359. local_cac_reg->t_mask);
  360. data |= ((1 << local_cac_reg->enable_shift) &
  361. local_cac_reg->enable_mask);
  362. WREG32_SMC(local_cac_reg->cntl, data);
  363. }
  364. values++;
  365. }
  366. }
  367. #endif
  368. static int kv_program_pt_config_registers(struct amdgpu_device *adev,
  369. const struct kv_pt_config_reg *cac_config_regs)
  370. {
  371. const struct kv_pt_config_reg *config_regs = cac_config_regs;
  372. u32 data;
  373. u32 cache = 0;
  374. if (config_regs == NULL)
  375. return -EINVAL;
  376. while (config_regs->offset != 0xFFFFFFFF) {
  377. if (config_regs->type == KV_CONFIGREG_CACHE) {
  378. cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
  379. } else {
  380. switch (config_regs->type) {
  381. case KV_CONFIGREG_SMC_IND:
  382. data = RREG32_SMC(config_regs->offset);
  383. break;
  384. case KV_CONFIGREG_DIDT_IND:
  385. data = RREG32_DIDT(config_regs->offset);
  386. break;
  387. default:
  388. data = RREG32(config_regs->offset);
  389. break;
  390. }
  391. data &= ~config_regs->mask;
  392. data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
  393. data |= cache;
  394. cache = 0;
  395. switch (config_regs->type) {
  396. case KV_CONFIGREG_SMC_IND:
  397. WREG32_SMC(config_regs->offset, data);
  398. break;
  399. case KV_CONFIGREG_DIDT_IND:
  400. WREG32_DIDT(config_regs->offset, data);
  401. break;
  402. default:
  403. WREG32(config_regs->offset, data);
  404. break;
  405. }
  406. }
  407. config_regs++;
  408. }
  409. return 0;
  410. }
  411. static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable)
  412. {
  413. struct kv_power_info *pi = kv_get_pi(adev);
  414. u32 data;
  415. if (pi->caps_sq_ramping) {
  416. data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
  417. if (enable)
  418. data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
  419. else
  420. data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
  421. WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
  422. }
  423. if (pi->caps_db_ramping) {
  424. data = RREG32_DIDT(ixDIDT_DB_CTRL0);
  425. if (enable)
  426. data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
  427. else
  428. data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
  429. WREG32_DIDT(ixDIDT_DB_CTRL0, data);
  430. }
  431. if (pi->caps_td_ramping) {
  432. data = RREG32_DIDT(ixDIDT_TD_CTRL0);
  433. if (enable)
  434. data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
  435. else
  436. data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
  437. WREG32_DIDT(ixDIDT_TD_CTRL0, data);
  438. }
  439. if (pi->caps_tcp_ramping) {
  440. data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
  441. if (enable)
  442. data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
  443. else
  444. data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
  445. WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
  446. }
  447. }
  448. static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
  449. {
  450. struct kv_power_info *pi = kv_get_pi(adev);
  451. int ret;
  452. if (pi->caps_sq_ramping ||
  453. pi->caps_db_ramping ||
  454. pi->caps_td_ramping ||
  455. pi->caps_tcp_ramping) {
  456. adev->gfx.rlc.funcs->enter_safe_mode(adev);
  457. if (enable) {
  458. ret = kv_program_pt_config_registers(adev, didt_config_kv);
  459. if (ret) {
  460. adev->gfx.rlc.funcs->exit_safe_mode(adev);
  461. return ret;
  462. }
  463. }
  464. kv_do_enable_didt(adev, enable);
  465. adev->gfx.rlc.funcs->exit_safe_mode(adev);
  466. }
  467. return 0;
  468. }
  469. #if 0
  470. static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev)
  471. {
  472. struct kv_power_info *pi = kv_get_pi(adev);
  473. if (pi->caps_cac) {
  474. WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0);
  475. WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0);
  476. kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
  477. WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0);
  478. WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0);
  479. kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
  480. WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0);
  481. WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0);
  482. kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
  483. WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0);
  484. WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0);
  485. kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
  486. WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0);
  487. WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0);
  488. kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
  489. WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0);
  490. WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0);
  491. kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
  492. }
  493. }
  494. #endif
  495. static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable)
  496. {
  497. struct kv_power_info *pi = kv_get_pi(adev);
  498. int ret = 0;
  499. if (pi->caps_cac) {
  500. if (enable) {
  501. ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac);
  502. if (ret)
  503. pi->cac_enabled = false;
  504. else
  505. pi->cac_enabled = true;
  506. } else if (pi->cac_enabled) {
  507. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac);
  508. pi->cac_enabled = false;
  509. }
  510. }
  511. return ret;
  512. }
  513. static int kv_process_firmware_header(struct amdgpu_device *adev)
  514. {
  515. struct kv_power_info *pi = kv_get_pi(adev);
  516. u32 tmp;
  517. int ret;
  518. ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
  519. offsetof(SMU7_Firmware_Header, DpmTable),
  520. &tmp, pi->sram_end);
  521. if (ret == 0)
  522. pi->dpm_table_start = tmp;
  523. ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
  524. offsetof(SMU7_Firmware_Header, SoftRegisters),
  525. &tmp, pi->sram_end);
  526. if (ret == 0)
  527. pi->soft_regs_start = tmp;
  528. return ret;
  529. }
  530. static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev)
  531. {
  532. struct kv_power_info *pi = kv_get_pi(adev);
  533. int ret;
  534. pi->graphics_voltage_change_enable = 1;
  535. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  536. pi->dpm_table_start +
  537. offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
  538. &pi->graphics_voltage_change_enable,
  539. sizeof(u8), pi->sram_end);
  540. return ret;
  541. }
  542. static int kv_set_dpm_interval(struct amdgpu_device *adev)
  543. {
  544. struct kv_power_info *pi = kv_get_pi(adev);
  545. int ret;
  546. pi->graphics_interval = 1;
  547. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  548. pi->dpm_table_start +
  549. offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
  550. &pi->graphics_interval,
  551. sizeof(u8), pi->sram_end);
  552. return ret;
  553. }
  554. static int kv_set_dpm_boot_state(struct amdgpu_device *adev)
  555. {
  556. struct kv_power_info *pi = kv_get_pi(adev);
  557. int ret;
  558. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  559. pi->dpm_table_start +
  560. offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
  561. &pi->graphics_boot_level,
  562. sizeof(u8), pi->sram_end);
  563. return ret;
  564. }
  565. static void kv_program_vc(struct amdgpu_device *adev)
  566. {
  567. WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100);
  568. }
  569. static void kv_clear_vc(struct amdgpu_device *adev)
  570. {
  571. WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
  572. }
  573. static int kv_set_divider_value(struct amdgpu_device *adev,
  574. u32 index, u32 sclk)
  575. {
  576. struct kv_power_info *pi = kv_get_pi(adev);
  577. struct atom_clock_dividers dividers;
  578. int ret;
  579. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  580. sclk, false, &dividers);
  581. if (ret)
  582. return ret;
  583. pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
  584. pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
  585. return 0;
  586. }
  587. static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
  588. u16 voltage)
  589. {
  590. return 6200 - (voltage * 25);
  591. }
  592. static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev,
  593. u32 vid_2bit)
  594. {
  595. struct kv_power_info *pi = kv_get_pi(adev);
  596. u32 vid_8bit = kv_convert_vid2_to_vid7(adev,
  597. &pi->sys_info.vid_mapping_table,
  598. vid_2bit);
  599. return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit);
  600. }
  601. static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid)
  602. {
  603. struct kv_power_info *pi = kv_get_pi(adev);
  604. pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
  605. pi->graphics_level[index].MinVddNb =
  606. cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid));
  607. return 0;
  608. }
  609. static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at)
  610. {
  611. struct kv_power_info *pi = kv_get_pi(adev);
  612. pi->graphics_level[index].AT = cpu_to_be16((u16)at);
  613. return 0;
  614. }
  615. static void kv_dpm_power_level_enable(struct amdgpu_device *adev,
  616. u32 index, bool enable)
  617. {
  618. struct kv_power_info *pi = kv_get_pi(adev);
  619. pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
  620. }
  621. static void kv_start_dpm(struct amdgpu_device *adev)
  622. {
  623. u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
  624. tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
  625. WREG32_SMC(ixGENERAL_PWRMGT, tmp);
  626. amdgpu_kv_smc_dpm_enable(adev, true);
  627. }
  628. static void kv_stop_dpm(struct amdgpu_device *adev)
  629. {
  630. amdgpu_kv_smc_dpm_enable(adev, false);
  631. }
  632. static void kv_start_am(struct amdgpu_device *adev)
  633. {
  634. u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
  635. sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
  636. SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
  637. sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
  638. WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
  639. }
  640. static void kv_reset_am(struct amdgpu_device *adev)
  641. {
  642. u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
  643. sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
  644. SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
  645. WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
  646. }
  647. static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze)
  648. {
  649. return amdgpu_kv_notify_message_to_smu(adev, freeze ?
  650. PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
  651. }
  652. static int kv_force_lowest_valid(struct amdgpu_device *adev)
  653. {
  654. return kv_force_dpm_lowest(adev);
  655. }
  656. static int kv_unforce_levels(struct amdgpu_device *adev)
  657. {
  658. if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
  659. return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel);
  660. else
  661. return kv_set_enabled_levels(adev);
  662. }
  663. static int kv_update_sclk_t(struct amdgpu_device *adev)
  664. {
  665. struct kv_power_info *pi = kv_get_pi(adev);
  666. u32 low_sclk_interrupt_t = 0;
  667. int ret = 0;
  668. if (pi->caps_sclk_throttle_low_notification) {
  669. low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
  670. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  671. pi->dpm_table_start +
  672. offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
  673. (u8 *)&low_sclk_interrupt_t,
  674. sizeof(u32), pi->sram_end);
  675. }
  676. return ret;
  677. }
  678. static int kv_program_bootup_state(struct amdgpu_device *adev)
  679. {
  680. struct kv_power_info *pi = kv_get_pi(adev);
  681. u32 i;
  682. struct amdgpu_clock_voltage_dependency_table *table =
  683. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  684. if (table && table->count) {
  685. for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
  686. if (table->entries[i].clk == pi->boot_pl.sclk)
  687. break;
  688. }
  689. pi->graphics_boot_level = (u8)i;
  690. kv_dpm_power_level_enable(adev, i, true);
  691. } else {
  692. struct sumo_sclk_voltage_mapping_table *table =
  693. &pi->sys_info.sclk_voltage_mapping_table;
  694. if (table->num_max_dpm_entries == 0)
  695. return -EINVAL;
  696. for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
  697. if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
  698. break;
  699. }
  700. pi->graphics_boot_level = (u8)i;
  701. kv_dpm_power_level_enable(adev, i, true);
  702. }
  703. return 0;
  704. }
  705. static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev)
  706. {
  707. struct kv_power_info *pi = kv_get_pi(adev);
  708. int ret;
  709. pi->graphics_therm_throttle_enable = 1;
  710. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  711. pi->dpm_table_start +
  712. offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
  713. &pi->graphics_therm_throttle_enable,
  714. sizeof(u8), pi->sram_end);
  715. return ret;
  716. }
  717. static int kv_upload_dpm_settings(struct amdgpu_device *adev)
  718. {
  719. struct kv_power_info *pi = kv_get_pi(adev);
  720. int ret;
  721. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  722. pi->dpm_table_start +
  723. offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
  724. (u8 *)&pi->graphics_level,
  725. sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
  726. pi->sram_end);
  727. if (ret)
  728. return ret;
  729. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  730. pi->dpm_table_start +
  731. offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
  732. &pi->graphics_dpm_level_count,
  733. sizeof(u8), pi->sram_end);
  734. return ret;
  735. }
  736. static u32 kv_get_clock_difference(u32 a, u32 b)
  737. {
  738. return (a >= b) ? a - b : b - a;
  739. }
  740. static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk)
  741. {
  742. struct kv_power_info *pi = kv_get_pi(adev);
  743. u32 value;
  744. if (pi->caps_enable_dfs_bypass) {
  745. if (kv_get_clock_difference(clk, 40000) < 200)
  746. value = 3;
  747. else if (kv_get_clock_difference(clk, 30000) < 200)
  748. value = 2;
  749. else if (kv_get_clock_difference(clk, 20000) < 200)
  750. value = 7;
  751. else if (kv_get_clock_difference(clk, 15000) < 200)
  752. value = 6;
  753. else if (kv_get_clock_difference(clk, 10000) < 200)
  754. value = 8;
  755. else
  756. value = 0;
  757. } else {
  758. value = 0;
  759. }
  760. return value;
  761. }
  762. static int kv_populate_uvd_table(struct amdgpu_device *adev)
  763. {
  764. struct kv_power_info *pi = kv_get_pi(adev);
  765. struct amdgpu_uvd_clock_voltage_dependency_table *table =
  766. &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
  767. struct atom_clock_dividers dividers;
  768. int ret;
  769. u32 i;
  770. if (table == NULL || table->count == 0)
  771. return 0;
  772. pi->uvd_level_count = 0;
  773. for (i = 0; i < table->count; i++) {
  774. if (pi->high_voltage_t &&
  775. (pi->high_voltage_t < table->entries[i].v))
  776. break;
  777. pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
  778. pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
  779. pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
  780. pi->uvd_level[i].VClkBypassCntl =
  781. (u8)kv_get_clk_bypass(adev, table->entries[i].vclk);
  782. pi->uvd_level[i].DClkBypassCntl =
  783. (u8)kv_get_clk_bypass(adev, table->entries[i].dclk);
  784. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  785. table->entries[i].vclk, false, &dividers);
  786. if (ret)
  787. return ret;
  788. pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
  789. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  790. table->entries[i].dclk, false, &dividers);
  791. if (ret)
  792. return ret;
  793. pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
  794. pi->uvd_level_count++;
  795. }
  796. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  797. pi->dpm_table_start +
  798. offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
  799. (u8 *)&pi->uvd_level_count,
  800. sizeof(u8), pi->sram_end);
  801. if (ret)
  802. return ret;
  803. pi->uvd_interval = 1;
  804. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  805. pi->dpm_table_start +
  806. offsetof(SMU7_Fusion_DpmTable, UVDInterval),
  807. &pi->uvd_interval,
  808. sizeof(u8), pi->sram_end);
  809. if (ret)
  810. return ret;
  811. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  812. pi->dpm_table_start +
  813. offsetof(SMU7_Fusion_DpmTable, UvdLevel),
  814. (u8 *)&pi->uvd_level,
  815. sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
  816. pi->sram_end);
  817. return ret;
  818. }
  819. static int kv_populate_vce_table(struct amdgpu_device *adev)
  820. {
  821. struct kv_power_info *pi = kv_get_pi(adev);
  822. int ret;
  823. u32 i;
  824. struct amdgpu_vce_clock_voltage_dependency_table *table =
  825. &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  826. struct atom_clock_dividers dividers;
  827. if (table == NULL || table->count == 0)
  828. return 0;
  829. pi->vce_level_count = 0;
  830. for (i = 0; i < table->count; i++) {
  831. if (pi->high_voltage_t &&
  832. pi->high_voltage_t < table->entries[i].v)
  833. break;
  834. pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
  835. pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  836. pi->vce_level[i].ClkBypassCntl =
  837. (u8)kv_get_clk_bypass(adev, table->entries[i].evclk);
  838. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  839. table->entries[i].evclk, false, &dividers);
  840. if (ret)
  841. return ret;
  842. pi->vce_level[i].Divider = (u8)dividers.post_div;
  843. pi->vce_level_count++;
  844. }
  845. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  846. pi->dpm_table_start +
  847. offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
  848. (u8 *)&pi->vce_level_count,
  849. sizeof(u8),
  850. pi->sram_end);
  851. if (ret)
  852. return ret;
  853. pi->vce_interval = 1;
  854. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  855. pi->dpm_table_start +
  856. offsetof(SMU7_Fusion_DpmTable, VCEInterval),
  857. (u8 *)&pi->vce_interval,
  858. sizeof(u8),
  859. pi->sram_end);
  860. if (ret)
  861. return ret;
  862. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  863. pi->dpm_table_start +
  864. offsetof(SMU7_Fusion_DpmTable, VceLevel),
  865. (u8 *)&pi->vce_level,
  866. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
  867. pi->sram_end);
  868. return ret;
  869. }
  870. static int kv_populate_samu_table(struct amdgpu_device *adev)
  871. {
  872. struct kv_power_info *pi = kv_get_pi(adev);
  873. struct amdgpu_clock_voltage_dependency_table *table =
  874. &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
  875. struct atom_clock_dividers dividers;
  876. int ret;
  877. u32 i;
  878. if (table == NULL || table->count == 0)
  879. return 0;
  880. pi->samu_level_count = 0;
  881. for (i = 0; i < table->count; i++) {
  882. if (pi->high_voltage_t &&
  883. pi->high_voltage_t < table->entries[i].v)
  884. break;
  885. pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
  886. pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  887. pi->samu_level[i].ClkBypassCntl =
  888. (u8)kv_get_clk_bypass(adev, table->entries[i].clk);
  889. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  890. table->entries[i].clk, false, &dividers);
  891. if (ret)
  892. return ret;
  893. pi->samu_level[i].Divider = (u8)dividers.post_div;
  894. pi->samu_level_count++;
  895. }
  896. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  897. pi->dpm_table_start +
  898. offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
  899. (u8 *)&pi->samu_level_count,
  900. sizeof(u8),
  901. pi->sram_end);
  902. if (ret)
  903. return ret;
  904. pi->samu_interval = 1;
  905. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  906. pi->dpm_table_start +
  907. offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
  908. (u8 *)&pi->samu_interval,
  909. sizeof(u8),
  910. pi->sram_end);
  911. if (ret)
  912. return ret;
  913. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  914. pi->dpm_table_start +
  915. offsetof(SMU7_Fusion_DpmTable, SamuLevel),
  916. (u8 *)&pi->samu_level,
  917. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
  918. pi->sram_end);
  919. if (ret)
  920. return ret;
  921. return ret;
  922. }
  923. static int kv_populate_acp_table(struct amdgpu_device *adev)
  924. {
  925. struct kv_power_info *pi = kv_get_pi(adev);
  926. struct amdgpu_clock_voltage_dependency_table *table =
  927. &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
  928. struct atom_clock_dividers dividers;
  929. int ret;
  930. u32 i;
  931. if (table == NULL || table->count == 0)
  932. return 0;
  933. pi->acp_level_count = 0;
  934. for (i = 0; i < table->count; i++) {
  935. pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
  936. pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  937. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  938. table->entries[i].clk, false, &dividers);
  939. if (ret)
  940. return ret;
  941. pi->acp_level[i].Divider = (u8)dividers.post_div;
  942. pi->acp_level_count++;
  943. }
  944. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  945. pi->dpm_table_start +
  946. offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
  947. (u8 *)&pi->acp_level_count,
  948. sizeof(u8),
  949. pi->sram_end);
  950. if (ret)
  951. return ret;
  952. pi->acp_interval = 1;
  953. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  954. pi->dpm_table_start +
  955. offsetof(SMU7_Fusion_DpmTable, ACPInterval),
  956. (u8 *)&pi->acp_interval,
  957. sizeof(u8),
  958. pi->sram_end);
  959. if (ret)
  960. return ret;
  961. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  962. pi->dpm_table_start +
  963. offsetof(SMU7_Fusion_DpmTable, AcpLevel),
  964. (u8 *)&pi->acp_level,
  965. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
  966. pi->sram_end);
  967. if (ret)
  968. return ret;
  969. return ret;
  970. }
  971. static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev)
  972. {
  973. struct kv_power_info *pi = kv_get_pi(adev);
  974. u32 i;
  975. struct amdgpu_clock_voltage_dependency_table *table =
  976. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  977. if (table && table->count) {
  978. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  979. if (pi->caps_enable_dfs_bypass) {
  980. if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
  981. pi->graphics_level[i].ClkBypassCntl = 3;
  982. else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
  983. pi->graphics_level[i].ClkBypassCntl = 2;
  984. else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
  985. pi->graphics_level[i].ClkBypassCntl = 7;
  986. else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
  987. pi->graphics_level[i].ClkBypassCntl = 6;
  988. else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
  989. pi->graphics_level[i].ClkBypassCntl = 8;
  990. else
  991. pi->graphics_level[i].ClkBypassCntl = 0;
  992. } else {
  993. pi->graphics_level[i].ClkBypassCntl = 0;
  994. }
  995. }
  996. } else {
  997. struct sumo_sclk_voltage_mapping_table *table =
  998. &pi->sys_info.sclk_voltage_mapping_table;
  999. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  1000. if (pi->caps_enable_dfs_bypass) {
  1001. if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
  1002. pi->graphics_level[i].ClkBypassCntl = 3;
  1003. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
  1004. pi->graphics_level[i].ClkBypassCntl = 2;
  1005. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
  1006. pi->graphics_level[i].ClkBypassCntl = 7;
  1007. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
  1008. pi->graphics_level[i].ClkBypassCntl = 6;
  1009. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
  1010. pi->graphics_level[i].ClkBypassCntl = 8;
  1011. else
  1012. pi->graphics_level[i].ClkBypassCntl = 0;
  1013. } else {
  1014. pi->graphics_level[i].ClkBypassCntl = 0;
  1015. }
  1016. }
  1017. }
  1018. }
  1019. static int kv_enable_ulv(struct amdgpu_device *adev, bool enable)
  1020. {
  1021. return amdgpu_kv_notify_message_to_smu(adev, enable ?
  1022. PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
  1023. }
  1024. static void kv_reset_acp_boot_level(struct amdgpu_device *adev)
  1025. {
  1026. struct kv_power_info *pi = kv_get_pi(adev);
  1027. pi->acp_boot_level = 0xff;
  1028. }
  1029. static void kv_update_current_ps(struct amdgpu_device *adev,
  1030. struct amdgpu_ps *rps)
  1031. {
  1032. struct kv_ps *new_ps = kv_get_ps(rps);
  1033. struct kv_power_info *pi = kv_get_pi(adev);
  1034. pi->current_rps = *rps;
  1035. pi->current_ps = *new_ps;
  1036. pi->current_rps.ps_priv = &pi->current_ps;
  1037. adev->pm.dpm.current_ps = &pi->current_rps;
  1038. }
  1039. static void kv_update_requested_ps(struct amdgpu_device *adev,
  1040. struct amdgpu_ps *rps)
  1041. {
  1042. struct kv_ps *new_ps = kv_get_ps(rps);
  1043. struct kv_power_info *pi = kv_get_pi(adev);
  1044. pi->requested_rps = *rps;
  1045. pi->requested_ps = *new_ps;
  1046. pi->requested_rps.ps_priv = &pi->requested_ps;
  1047. adev->pm.dpm.requested_ps = &pi->requested_rps;
  1048. }
  1049. static void kv_dpm_enable_bapm(void *handle, bool enable)
  1050. {
  1051. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1052. struct kv_power_info *pi = kv_get_pi(adev);
  1053. int ret;
  1054. if (pi->bapm_enable) {
  1055. ret = amdgpu_kv_smc_bapm_enable(adev, enable);
  1056. if (ret)
  1057. DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
  1058. }
  1059. }
  1060. static int kv_dpm_enable(struct amdgpu_device *adev)
  1061. {
  1062. struct kv_power_info *pi = kv_get_pi(adev);
  1063. int ret;
  1064. ret = kv_process_firmware_header(adev);
  1065. if (ret) {
  1066. DRM_ERROR("kv_process_firmware_header failed\n");
  1067. return ret;
  1068. }
  1069. kv_init_fps_limits(adev);
  1070. kv_init_graphics_levels(adev);
  1071. ret = kv_program_bootup_state(adev);
  1072. if (ret) {
  1073. DRM_ERROR("kv_program_bootup_state failed\n");
  1074. return ret;
  1075. }
  1076. kv_calculate_dfs_bypass_settings(adev);
  1077. ret = kv_upload_dpm_settings(adev);
  1078. if (ret) {
  1079. DRM_ERROR("kv_upload_dpm_settings failed\n");
  1080. return ret;
  1081. }
  1082. ret = kv_populate_uvd_table(adev);
  1083. if (ret) {
  1084. DRM_ERROR("kv_populate_uvd_table failed\n");
  1085. return ret;
  1086. }
  1087. ret = kv_populate_vce_table(adev);
  1088. if (ret) {
  1089. DRM_ERROR("kv_populate_vce_table failed\n");
  1090. return ret;
  1091. }
  1092. ret = kv_populate_samu_table(adev);
  1093. if (ret) {
  1094. DRM_ERROR("kv_populate_samu_table failed\n");
  1095. return ret;
  1096. }
  1097. ret = kv_populate_acp_table(adev);
  1098. if (ret) {
  1099. DRM_ERROR("kv_populate_acp_table failed\n");
  1100. return ret;
  1101. }
  1102. kv_program_vc(adev);
  1103. #if 0
  1104. kv_initialize_hardware_cac_manager(adev);
  1105. #endif
  1106. kv_start_am(adev);
  1107. if (pi->enable_auto_thermal_throttling) {
  1108. ret = kv_enable_auto_thermal_throttling(adev);
  1109. if (ret) {
  1110. DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
  1111. return ret;
  1112. }
  1113. }
  1114. ret = kv_enable_dpm_voltage_scaling(adev);
  1115. if (ret) {
  1116. DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
  1117. return ret;
  1118. }
  1119. ret = kv_set_dpm_interval(adev);
  1120. if (ret) {
  1121. DRM_ERROR("kv_set_dpm_interval failed\n");
  1122. return ret;
  1123. }
  1124. ret = kv_set_dpm_boot_state(adev);
  1125. if (ret) {
  1126. DRM_ERROR("kv_set_dpm_boot_state failed\n");
  1127. return ret;
  1128. }
  1129. ret = kv_enable_ulv(adev, true);
  1130. if (ret) {
  1131. DRM_ERROR("kv_enable_ulv failed\n");
  1132. return ret;
  1133. }
  1134. kv_start_dpm(adev);
  1135. ret = kv_enable_didt(adev, true);
  1136. if (ret) {
  1137. DRM_ERROR("kv_enable_didt failed\n");
  1138. return ret;
  1139. }
  1140. ret = kv_enable_smc_cac(adev, true);
  1141. if (ret) {
  1142. DRM_ERROR("kv_enable_smc_cac failed\n");
  1143. return ret;
  1144. }
  1145. kv_reset_acp_boot_level(adev);
  1146. ret = amdgpu_kv_smc_bapm_enable(adev, false);
  1147. if (ret) {
  1148. DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
  1149. return ret;
  1150. }
  1151. kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
  1152. if (adev->irq.installed &&
  1153. amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
  1154. ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
  1155. if (ret) {
  1156. DRM_ERROR("kv_set_thermal_temperature_range failed\n");
  1157. return ret;
  1158. }
  1159. amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
  1160. AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
  1161. amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
  1162. AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
  1163. }
  1164. return ret;
  1165. }
  1166. static void kv_dpm_disable(struct amdgpu_device *adev)
  1167. {
  1168. amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
  1169. AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
  1170. amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
  1171. AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
  1172. amdgpu_kv_smc_bapm_enable(adev, false);
  1173. if (adev->asic_type == CHIP_MULLINS)
  1174. kv_enable_nb_dpm(adev, false);
  1175. /* powerup blocks */
  1176. kv_dpm_powergate_acp(adev, false);
  1177. kv_dpm_powergate_samu(adev, false);
  1178. kv_dpm_powergate_vce(adev, false);
  1179. kv_dpm_powergate_uvd(adev, false);
  1180. kv_enable_smc_cac(adev, false);
  1181. kv_enable_didt(adev, false);
  1182. kv_clear_vc(adev);
  1183. kv_stop_dpm(adev);
  1184. kv_enable_ulv(adev, false);
  1185. kv_reset_am(adev);
  1186. kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
  1187. }
  1188. #if 0
  1189. static int kv_write_smc_soft_register(struct amdgpu_device *adev,
  1190. u16 reg_offset, u32 value)
  1191. {
  1192. struct kv_power_info *pi = kv_get_pi(adev);
  1193. return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset,
  1194. (u8 *)&value, sizeof(u16), pi->sram_end);
  1195. }
  1196. static int kv_read_smc_soft_register(struct amdgpu_device *adev,
  1197. u16 reg_offset, u32 *value)
  1198. {
  1199. struct kv_power_info *pi = kv_get_pi(adev);
  1200. return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset,
  1201. value, pi->sram_end);
  1202. }
  1203. #endif
  1204. static void kv_init_sclk_t(struct amdgpu_device *adev)
  1205. {
  1206. struct kv_power_info *pi = kv_get_pi(adev);
  1207. pi->low_sclk_interrupt_t = 0;
  1208. }
  1209. static int kv_init_fps_limits(struct amdgpu_device *adev)
  1210. {
  1211. struct kv_power_info *pi = kv_get_pi(adev);
  1212. int ret = 0;
  1213. if (pi->caps_fps) {
  1214. u16 tmp;
  1215. tmp = 45;
  1216. pi->fps_high_t = cpu_to_be16(tmp);
  1217. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1218. pi->dpm_table_start +
  1219. offsetof(SMU7_Fusion_DpmTable, FpsHighT),
  1220. (u8 *)&pi->fps_high_t,
  1221. sizeof(u16), pi->sram_end);
  1222. tmp = 30;
  1223. pi->fps_low_t = cpu_to_be16(tmp);
  1224. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1225. pi->dpm_table_start +
  1226. offsetof(SMU7_Fusion_DpmTable, FpsLowT),
  1227. (u8 *)&pi->fps_low_t,
  1228. sizeof(u16), pi->sram_end);
  1229. }
  1230. return ret;
  1231. }
  1232. static void kv_init_powergate_state(struct amdgpu_device *adev)
  1233. {
  1234. struct kv_power_info *pi = kv_get_pi(adev);
  1235. pi->uvd_power_gated = false;
  1236. pi->vce_power_gated = false;
  1237. pi->samu_power_gated = false;
  1238. pi->acp_power_gated = false;
  1239. }
  1240. static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
  1241. {
  1242. return amdgpu_kv_notify_message_to_smu(adev, enable ?
  1243. PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
  1244. }
  1245. static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
  1246. {
  1247. return amdgpu_kv_notify_message_to_smu(adev, enable ?
  1248. PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
  1249. }
  1250. static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
  1251. {
  1252. return amdgpu_kv_notify_message_to_smu(adev, enable ?
  1253. PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
  1254. }
  1255. static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
  1256. {
  1257. return amdgpu_kv_notify_message_to_smu(adev, enable ?
  1258. PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
  1259. }
  1260. static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
  1261. {
  1262. struct kv_power_info *pi = kv_get_pi(adev);
  1263. struct amdgpu_uvd_clock_voltage_dependency_table *table =
  1264. &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
  1265. int ret;
  1266. u32 mask;
  1267. if (!gate) {
  1268. if (table->count)
  1269. pi->uvd_boot_level = table->count - 1;
  1270. else
  1271. pi->uvd_boot_level = 0;
  1272. if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
  1273. mask = 1 << pi->uvd_boot_level;
  1274. } else {
  1275. mask = 0x1f;
  1276. }
  1277. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1278. pi->dpm_table_start +
  1279. offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
  1280. (uint8_t *)&pi->uvd_boot_level,
  1281. sizeof(u8), pi->sram_end);
  1282. if (ret)
  1283. return ret;
  1284. amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  1285. PPSMC_MSG_UVDDPM_SetEnabledMask,
  1286. mask);
  1287. }
  1288. return kv_enable_uvd_dpm(adev, !gate);
  1289. }
  1290. static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk)
  1291. {
  1292. u8 i;
  1293. struct amdgpu_vce_clock_voltage_dependency_table *table =
  1294. &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  1295. for (i = 0; i < table->count; i++) {
  1296. if (table->entries[i].evclk >= evclk)
  1297. break;
  1298. }
  1299. return i;
  1300. }
  1301. static int kv_update_vce_dpm(struct amdgpu_device *adev,
  1302. struct amdgpu_ps *amdgpu_new_state,
  1303. struct amdgpu_ps *amdgpu_current_state)
  1304. {
  1305. struct kv_power_info *pi = kv_get_pi(adev);
  1306. struct amdgpu_vce_clock_voltage_dependency_table *table =
  1307. &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  1308. int ret;
  1309. if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
  1310. kv_dpm_powergate_vce(adev, false);
  1311. if (pi->caps_stable_p_state)
  1312. pi->vce_boot_level = table->count - 1;
  1313. else
  1314. pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk);
  1315. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1316. pi->dpm_table_start +
  1317. offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
  1318. (u8 *)&pi->vce_boot_level,
  1319. sizeof(u8),
  1320. pi->sram_end);
  1321. if (ret)
  1322. return ret;
  1323. if (pi->caps_stable_p_state)
  1324. amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  1325. PPSMC_MSG_VCEDPM_SetEnabledMask,
  1326. (1 << pi->vce_boot_level));
  1327. kv_enable_vce_dpm(adev, true);
  1328. } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
  1329. kv_enable_vce_dpm(adev, false);
  1330. kv_dpm_powergate_vce(adev, true);
  1331. }
  1332. return 0;
  1333. }
  1334. static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
  1335. {
  1336. struct kv_power_info *pi = kv_get_pi(adev);
  1337. struct amdgpu_clock_voltage_dependency_table *table =
  1338. &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
  1339. int ret;
  1340. if (!gate) {
  1341. if (pi->caps_stable_p_state)
  1342. pi->samu_boot_level = table->count - 1;
  1343. else
  1344. pi->samu_boot_level = 0;
  1345. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1346. pi->dpm_table_start +
  1347. offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
  1348. (u8 *)&pi->samu_boot_level,
  1349. sizeof(u8),
  1350. pi->sram_end);
  1351. if (ret)
  1352. return ret;
  1353. if (pi->caps_stable_p_state)
  1354. amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  1355. PPSMC_MSG_SAMUDPM_SetEnabledMask,
  1356. (1 << pi->samu_boot_level));
  1357. }
  1358. return kv_enable_samu_dpm(adev, !gate);
  1359. }
  1360. static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
  1361. {
  1362. u8 i;
  1363. struct amdgpu_clock_voltage_dependency_table *table =
  1364. &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
  1365. for (i = 0; i < table->count; i++) {
  1366. if (table->entries[i].clk >= 0) /* XXX */
  1367. break;
  1368. }
  1369. if (i >= table->count)
  1370. i = table->count - 1;
  1371. return i;
  1372. }
  1373. static void kv_update_acp_boot_level(struct amdgpu_device *adev)
  1374. {
  1375. struct kv_power_info *pi = kv_get_pi(adev);
  1376. u8 acp_boot_level;
  1377. if (!pi->caps_stable_p_state) {
  1378. acp_boot_level = kv_get_acp_boot_level(adev);
  1379. if (acp_boot_level != pi->acp_boot_level) {
  1380. pi->acp_boot_level = acp_boot_level;
  1381. amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  1382. PPSMC_MSG_ACPDPM_SetEnabledMask,
  1383. (1 << pi->acp_boot_level));
  1384. }
  1385. }
  1386. }
  1387. static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
  1388. {
  1389. struct kv_power_info *pi = kv_get_pi(adev);
  1390. struct amdgpu_clock_voltage_dependency_table *table =
  1391. &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
  1392. int ret;
  1393. if (!gate) {
  1394. if (pi->caps_stable_p_state)
  1395. pi->acp_boot_level = table->count - 1;
  1396. else
  1397. pi->acp_boot_level = kv_get_acp_boot_level(adev);
  1398. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1399. pi->dpm_table_start +
  1400. offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
  1401. (u8 *)&pi->acp_boot_level,
  1402. sizeof(u8),
  1403. pi->sram_end);
  1404. if (ret)
  1405. return ret;
  1406. if (pi->caps_stable_p_state)
  1407. amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  1408. PPSMC_MSG_ACPDPM_SetEnabledMask,
  1409. (1 << pi->acp_boot_level));
  1410. }
  1411. return kv_enable_acp_dpm(adev, !gate);
  1412. }
  1413. static void kv_dpm_powergate_uvd(void *handle, bool gate)
  1414. {
  1415. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1416. struct kv_power_info *pi = kv_get_pi(adev);
  1417. int ret;
  1418. pi->uvd_power_gated = gate;
  1419. if (gate) {
  1420. /* stop the UVD block */
  1421. ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
  1422. AMD_PG_STATE_GATE);
  1423. kv_update_uvd_dpm(adev, gate);
  1424. if (pi->caps_uvd_pg)
  1425. /* power off the UVD block */
  1426. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
  1427. } else {
  1428. if (pi->caps_uvd_pg)
  1429. /* power on the UVD block */
  1430. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
  1431. /* re-init the UVD block */
  1432. kv_update_uvd_dpm(adev, gate);
  1433. ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
  1434. AMD_PG_STATE_UNGATE);
  1435. }
  1436. }
  1437. static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
  1438. {
  1439. struct kv_power_info *pi = kv_get_pi(adev);
  1440. if (pi->vce_power_gated == gate)
  1441. return;
  1442. pi->vce_power_gated = gate;
  1443. if (!pi->caps_vce_pg)
  1444. return;
  1445. if (gate)
  1446. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
  1447. else
  1448. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
  1449. }
  1450. static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
  1451. {
  1452. struct kv_power_info *pi = kv_get_pi(adev);
  1453. if (pi->samu_power_gated == gate)
  1454. return;
  1455. pi->samu_power_gated = gate;
  1456. if (gate) {
  1457. kv_update_samu_dpm(adev, true);
  1458. if (pi->caps_samu_pg)
  1459. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF);
  1460. } else {
  1461. if (pi->caps_samu_pg)
  1462. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON);
  1463. kv_update_samu_dpm(adev, false);
  1464. }
  1465. }
  1466. static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate)
  1467. {
  1468. struct kv_power_info *pi = kv_get_pi(adev);
  1469. if (pi->acp_power_gated == gate)
  1470. return;
  1471. if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
  1472. return;
  1473. pi->acp_power_gated = gate;
  1474. if (gate) {
  1475. kv_update_acp_dpm(adev, true);
  1476. if (pi->caps_acp_pg)
  1477. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF);
  1478. } else {
  1479. if (pi->caps_acp_pg)
  1480. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON);
  1481. kv_update_acp_dpm(adev, false);
  1482. }
  1483. }
  1484. static void kv_set_valid_clock_range(struct amdgpu_device *adev,
  1485. struct amdgpu_ps *new_rps)
  1486. {
  1487. struct kv_ps *new_ps = kv_get_ps(new_rps);
  1488. struct kv_power_info *pi = kv_get_pi(adev);
  1489. u32 i;
  1490. struct amdgpu_clock_voltage_dependency_table *table =
  1491. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1492. if (table && table->count) {
  1493. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  1494. if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
  1495. (i == (pi->graphics_dpm_level_count - 1))) {
  1496. pi->lowest_valid = i;
  1497. break;
  1498. }
  1499. }
  1500. for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
  1501. if