PageRenderTime 62ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 0ms

/drivers/gpu/drm/amd/amdgpu/kv_dpm.c

https://gitlab.com/sunny256/linux
C | 3348 lines | 2791 code | 516 blank | 41 comment | 492 complexity | 2a47ad81d9880f45bc499c8a8b3a5140 MD5 | raw file
Possible License(s): GPL-2.0
  1. /*
  2. * Copyright 2013 Advanced Micro Devices, Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. */
  23. #include <drm/drmP.h>
  24. #include "amdgpu.h"
  25. #include "amdgpu_pm.h"
  26. #include "cikd.h"
  27. #include "atom.h"
  28. #include "amdgpu_atombios.h"
  29. #include "amdgpu_dpm.h"
  30. #include "kv_dpm.h"
  31. #include "gfx_v7_0.h"
  32. #include <linux/seq_file.h>
  33. #include "smu/smu_7_0_0_d.h"
  34. #include "smu/smu_7_0_0_sh_mask.h"
  35. #include "gca/gfx_7_2_d.h"
  36. #include "gca/gfx_7_2_sh_mask.h"
  37. #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
  38. #define KV_MINIMUM_ENGINE_CLOCK 800
  39. #define SMC_RAM_END 0x40000
  40. static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
  41. static int kv_enable_nb_dpm(struct amdgpu_device *adev,
  42. bool enable);
  43. static void kv_init_graphics_levels(struct amdgpu_device *adev);
  44. static int kv_calculate_ds_divider(struct amdgpu_device *adev);
  45. static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev);
  46. static int kv_calculate_dpm_settings(struct amdgpu_device *adev);
  47. static void kv_enable_new_levels(struct amdgpu_device *adev);
  48. static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
  49. struct amdgpu_ps *new_rps);
  50. static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level);
  51. static int kv_set_enabled_levels(struct amdgpu_device *adev);
  52. static int kv_force_dpm_highest(struct amdgpu_device *adev);
  53. static int kv_force_dpm_lowest(struct amdgpu_device *adev);
  54. static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
  55. struct amdgpu_ps *new_rps,
  56. struct amdgpu_ps *old_rps);
  57. static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
  58. int min_temp, int max_temp);
  59. static int kv_init_fps_limits(struct amdgpu_device *adev);
  60. static void kv_dpm_powergate_uvd(void *handle, bool gate);
  61. static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
  62. static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
  63. static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
  64. static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev,
  65. struct sumo_vid_mapping_table *vid_mapping_table,
  66. u32 vid_2bit)
  67. {
  68. struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
  69. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  70. u32 i;
  71. if (vddc_sclk_table && vddc_sclk_table->count) {
  72. if (vid_2bit < vddc_sclk_table->count)
  73. return vddc_sclk_table->entries[vid_2bit].v;
  74. else
  75. return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
  76. } else {
  77. for (i = 0; i < vid_mapping_table->num_entries; i++) {
  78. if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
  79. return vid_mapping_table->entries[i].vid_7bit;
  80. }
  81. return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
  82. }
  83. }
  84. static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev,
  85. struct sumo_vid_mapping_table *vid_mapping_table,
  86. u32 vid_7bit)
  87. {
  88. struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
  89. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  90. u32 i;
  91. if (vddc_sclk_table && vddc_sclk_table->count) {
  92. for (i = 0; i < vddc_sclk_table->count; i++) {
  93. if (vddc_sclk_table->entries[i].v == vid_7bit)
  94. return i;
  95. }
  96. return vddc_sclk_table->count - 1;
  97. } else {
  98. for (i = 0; i < vid_mapping_table->num_entries; i++) {
  99. if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
  100. return vid_mapping_table->entries[i].vid_2bit;
  101. }
  102. return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
  103. }
  104. }
  105. static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
  106. {
  107. /* This bit selects who handles display phy powergating.
  108. * Clear the bit to let atom handle it.
  109. * Set it to let the driver handle it.
  110. * For now we just let atom handle it.
  111. */
  112. #if 0
  113. u32 v = RREG32(mmDOUT_SCRATCH3);
  114. if (enable)
  115. v |= 0x4;
  116. else
  117. v &= 0xFFFFFFFB;
  118. WREG32(mmDOUT_SCRATCH3, v);
  119. #endif
  120. }
  121. static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
  122. struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
  123. ATOM_AVAILABLE_SCLK_LIST *table)
  124. {
  125. u32 i;
  126. u32 n = 0;
  127. u32 prev_sclk = 0;
  128. for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
  129. if (table[i].ulSupportedSCLK > prev_sclk) {
  130. sclk_voltage_mapping_table->entries[n].sclk_frequency =
  131. table[i].ulSupportedSCLK;
  132. sclk_voltage_mapping_table->entries[n].vid_2bit =
  133. table[i].usVoltageIndex;
  134. prev_sclk = table[i].ulSupportedSCLK;
  135. n++;
  136. }
  137. }
  138. sclk_voltage_mapping_table->num_max_dpm_entries = n;
  139. }
  140. static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
  141. struct sumo_vid_mapping_table *vid_mapping_table,
  142. ATOM_AVAILABLE_SCLK_LIST *table)
  143. {
  144. u32 i, j;
  145. for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
  146. if (table[i].ulSupportedSCLK != 0) {
  147. vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
  148. table[i].usVoltageID;
  149. vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
  150. table[i].usVoltageIndex;
  151. }
  152. }
  153. for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
  154. if (vid_mapping_table->entries[i].vid_7bit == 0) {
  155. for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
  156. if (vid_mapping_table->entries[j].vid_7bit != 0) {
  157. vid_mapping_table->entries[i] =
  158. vid_mapping_table->entries[j];
  159. vid_mapping_table->entries[j].vid_7bit = 0;
  160. break;
  161. }
  162. }
  163. if (j == SUMO_MAX_NUMBER_VOLTAGES)
  164. break;
  165. }
  166. }
  167. vid_mapping_table->num_entries = i;
  168. }
  169. #if 0
  170. static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
  171. {
  172. { 0, 4, 1 },
  173. { 1, 4, 1 },
  174. { 2, 5, 1 },
  175. { 3, 4, 2 },
  176. { 4, 1, 1 },
  177. { 5, 5, 2 },
  178. { 6, 6, 1 },
  179. { 7, 9, 2 },
  180. { 0xffffffff }
  181. };
  182. static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
  183. {
  184. { 0, 4, 1 },
  185. { 0xffffffff }
  186. };
  187. static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
  188. {
  189. { 0, 4, 1 },
  190. { 0xffffffff }
  191. };
  192. static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
  193. {
  194. { 0, 4, 1 },
  195. { 0xffffffff }
  196. };
  197. static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
  198. {
  199. { 0, 4, 1 },
  200. { 0xffffffff }
  201. };
  202. static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
  203. {
  204. { 0, 4, 1 },
  205. { 1, 4, 1 },
  206. { 2, 5, 1 },
  207. { 3, 4, 1 },
  208. { 4, 1, 1 },
  209. { 5, 5, 1 },
  210. { 6, 6, 1 },
  211. { 7, 9, 1 },
  212. { 8, 4, 1 },
  213. { 9, 2, 1 },
  214. { 10, 3, 1 },
  215. { 11, 6, 1 },
  216. { 12, 8, 2 },
  217. { 13, 1, 1 },
  218. { 14, 2, 1 },
  219. { 15, 3, 1 },
  220. { 16, 1, 1 },
  221. { 17, 4, 1 },
  222. { 18, 3, 1 },
  223. { 19, 1, 1 },
  224. { 20, 8, 1 },
  225. { 21, 5, 1 },
  226. { 22, 1, 1 },
  227. { 23, 1, 1 },
  228. { 24, 4, 1 },
  229. { 27, 6, 1 },
  230. { 28, 1, 1 },
  231. { 0xffffffff }
  232. };
  233. static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
  234. {
  235. { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  236. };
  237. static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
  238. {
  239. { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  240. };
  241. static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
  242. {
  243. { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  244. };
  245. static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
  246. {
  247. { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  248. };
  249. static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
  250. {
  251. { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  252. };
  253. static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
  254. {
  255. { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
  256. };
  257. #endif
  258. static const struct kv_pt_config_reg didt_config_kv[] =
  259. {
  260. { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  261. { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  262. { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  263. { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  264. { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  265. { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  266. { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  267. { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  268. { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  269. { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  270. { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  271. { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  272. { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  273. { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  274. { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  275. { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  276. { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  277. { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  278. { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  279. { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  280. { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  281. { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  282. { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  283. { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  284. { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  285. { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  286. { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  287. { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  288. { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  289. { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  290. { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  291. { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  292. { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  293. { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  294. { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  295. { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  296. { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  297. { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  298. { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  299. { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  300. { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  301. { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  302. { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  303. { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  304. { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  305. { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  306. { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  307. { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  308. { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  309. { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  310. { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  311. { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  312. { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  313. { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  314. { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  315. { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  316. { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  317. { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  318. { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  319. { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  320. { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  321. { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  322. { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  323. { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
  324. { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
  325. { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
  326. { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
  327. { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
  328. { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
  329. { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  330. { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
  331. { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
  332. { 0xFFFFFFFF }
  333. };
  334. static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps)
  335. {
  336. struct kv_ps *ps = rps->ps_priv;
  337. return ps;
  338. }
  339. static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev)
  340. {
  341. struct kv_power_info *pi = adev->pm.dpm.priv;
  342. return pi;
  343. }
  344. #if 0
  345. static void kv_program_local_cac_table(struct amdgpu_device *adev,
  346. const struct kv_lcac_config_values *local_cac_table,
  347. const struct kv_lcac_config_reg *local_cac_reg)
  348. {
  349. u32 i, count, data;
  350. const struct kv_lcac_config_values *values = local_cac_table;
  351. while (values->block_id != 0xffffffff) {
  352. count = values->signal_id;
  353. for (i = 0; i < count; i++) {
  354. data = ((values->block_id << local_cac_reg->block_shift) &
  355. local_cac_reg->block_mask);
  356. data |= ((i << local_cac_reg->signal_shift) &
  357. local_cac_reg->signal_mask);
  358. data |= ((values->t << local_cac_reg->t_shift) &
  359. local_cac_reg->t_mask);
  360. data |= ((1 << local_cac_reg->enable_shift) &
  361. local_cac_reg->enable_mask);
  362. WREG32_SMC(local_cac_reg->cntl, data);
  363. }
  364. values++;
  365. }
  366. }
  367. #endif
  368. static int kv_program_pt_config_registers(struct amdgpu_device *adev,
  369. const struct kv_pt_config_reg *cac_config_regs)
  370. {
  371. const struct kv_pt_config_reg *config_regs = cac_config_regs;
  372. u32 data;
  373. u32 cache = 0;
  374. if (config_regs == NULL)
  375. return -EINVAL;
  376. while (config_regs->offset != 0xFFFFFFFF) {
  377. if (config_regs->type == KV_CONFIGREG_CACHE) {
  378. cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
  379. } else {
  380. switch (config_regs->type) {
  381. case KV_CONFIGREG_SMC_IND:
  382. data = RREG32_SMC(config_regs->offset);
  383. break;
  384. case KV_CONFIGREG_DIDT_IND:
  385. data = RREG32_DIDT(config_regs->offset);
  386. break;
  387. default:
  388. data = RREG32(config_regs->offset);
  389. break;
  390. }
  391. data &= ~config_regs->mask;
  392. data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
  393. data |= cache;
  394. cache = 0;
  395. switch (config_regs->type) {
  396. case KV_CONFIGREG_SMC_IND:
  397. WREG32_SMC(config_regs->offset, data);
  398. break;
  399. case KV_CONFIGREG_DIDT_IND:
  400. WREG32_DIDT(config_regs->offset, data);
  401. break;
  402. default:
  403. WREG32(config_regs->offset, data);
  404. break;
  405. }
  406. }
  407. config_regs++;
  408. }
  409. return 0;
  410. }
  411. static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable)
  412. {
  413. struct kv_power_info *pi = kv_get_pi(adev);
  414. u32 data;
  415. if (pi->caps_sq_ramping) {
  416. data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
  417. if (enable)
  418. data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
  419. else
  420. data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
  421. WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
  422. }
  423. if (pi->caps_db_ramping) {
  424. data = RREG32_DIDT(ixDIDT_DB_CTRL0);
  425. if (enable)
  426. data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
  427. else
  428. data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
  429. WREG32_DIDT(ixDIDT_DB_CTRL0, data);
  430. }
  431. if (pi->caps_td_ramping) {
  432. data = RREG32_DIDT(ixDIDT_TD_CTRL0);
  433. if (enable)
  434. data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
  435. else
  436. data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
  437. WREG32_DIDT(ixDIDT_TD_CTRL0, data);
  438. }
  439. if (pi->caps_tcp_ramping) {
  440. data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
  441. if (enable)
  442. data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
  443. else
  444. data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
  445. WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
  446. }
  447. }
  448. static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
  449. {
  450. struct kv_power_info *pi = kv_get_pi(adev);
  451. int ret;
  452. if (pi->caps_sq_ramping ||
  453. pi->caps_db_ramping ||
  454. pi->caps_td_ramping ||
  455. pi->caps_tcp_ramping) {
  456. adev->gfx.rlc.funcs->enter_safe_mode(adev);
  457. if (enable) {
  458. ret = kv_program_pt_config_registers(adev, didt_config_kv);
  459. if (ret) {
  460. adev->gfx.rlc.funcs->exit_safe_mode(adev);
  461. return ret;
  462. }
  463. }
  464. kv_do_enable_didt(adev, enable);
  465. adev->gfx.rlc.funcs->exit_safe_mode(adev);
  466. }
  467. return 0;
  468. }
  469. #if 0
  470. static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev)
  471. {
  472. struct kv_power_info *pi = kv_get_pi(adev);
  473. if (pi->caps_cac) {
  474. WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0);
  475. WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0);
  476. kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
  477. WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0);
  478. WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0);
  479. kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
  480. WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0);
  481. WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0);
  482. kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
  483. WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0);
  484. WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0);
  485. kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
  486. WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0);
  487. WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0);
  488. kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
  489. WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0);
  490. WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0);
  491. kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
  492. }
  493. }
  494. #endif
  495. static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable)
  496. {
  497. struct kv_power_info *pi = kv_get_pi(adev);
  498. int ret = 0;
  499. if (pi->caps_cac) {
  500. if (enable) {
  501. ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac);
  502. if (ret)
  503. pi->cac_enabled = false;
  504. else
  505. pi->cac_enabled = true;
  506. } else if (pi->cac_enabled) {
  507. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac);
  508. pi->cac_enabled = false;
  509. }
  510. }
  511. return ret;
  512. }
  513. static int kv_process_firmware_header(struct amdgpu_device *adev)
  514. {
  515. struct kv_power_info *pi = kv_get_pi(adev);
  516. u32 tmp;
  517. int ret;
  518. ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
  519. offsetof(SMU7_Firmware_Header, DpmTable),
  520. &tmp, pi->sram_end);
  521. if (ret == 0)
  522. pi->dpm_table_start = tmp;
  523. ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
  524. offsetof(SMU7_Firmware_Header, SoftRegisters),
  525. &tmp, pi->sram_end);
  526. if (ret == 0)
  527. pi->soft_regs_start = tmp;
  528. return ret;
  529. }
  530. static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev)
  531. {
  532. struct kv_power_info *pi = kv_get_pi(adev);
  533. int ret;
  534. pi->graphics_voltage_change_enable = 1;
  535. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  536. pi->dpm_table_start +
  537. offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
  538. &pi->graphics_voltage_change_enable,
  539. sizeof(u8), pi->sram_end);
  540. return ret;
  541. }
  542. static int kv_set_dpm_interval(struct amdgpu_device *adev)
  543. {
  544. struct kv_power_info *pi = kv_get_pi(adev);
  545. int ret;
  546. pi->graphics_interval = 1;
  547. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  548. pi->dpm_table_start +
  549. offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
  550. &pi->graphics_interval,
  551. sizeof(u8), pi->sram_end);
  552. return ret;
  553. }
  554. static int kv_set_dpm_boot_state(struct amdgpu_device *adev)
  555. {
  556. struct kv_power_info *pi = kv_get_pi(adev);
  557. int ret;
  558. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  559. pi->dpm_table_start +
  560. offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
  561. &pi->graphics_boot_level,
  562. sizeof(u8), pi->sram_end);
  563. return ret;
  564. }
  565. static void kv_program_vc(struct amdgpu_device *adev)
  566. {
  567. WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100);
  568. }
  569. static void kv_clear_vc(struct amdgpu_device *adev)
  570. {
  571. WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
  572. }
  573. static int kv_set_divider_value(struct amdgpu_device *adev,
  574. u32 index, u32 sclk)
  575. {
  576. struct kv_power_info *pi = kv_get_pi(adev);
  577. struct atom_clock_dividers dividers;
  578. int ret;
  579. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  580. sclk, false, &dividers);
  581. if (ret)
  582. return ret;
  583. pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
  584. pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
  585. return 0;
  586. }
  587. static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
  588. u16 voltage)
  589. {
  590. return 6200 - (voltage * 25);
  591. }
  592. static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev,
  593. u32 vid_2bit)
  594. {
  595. struct kv_power_info *pi = kv_get_pi(adev);
  596. u32 vid_8bit = kv_convert_vid2_to_vid7(adev,
  597. &pi->sys_info.vid_mapping_table,
  598. vid_2bit);
  599. return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit);
  600. }
  601. static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid)
  602. {
  603. struct kv_power_info *pi = kv_get_pi(adev);
  604. pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
  605. pi->graphics_level[index].MinVddNb =
  606. cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid));
  607. return 0;
  608. }
  609. static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at)
  610. {
  611. struct kv_power_info *pi = kv_get_pi(adev);
  612. pi->graphics_level[index].AT = cpu_to_be16((u16)at);
  613. return 0;
  614. }
  615. static void kv_dpm_power_level_enable(struct amdgpu_device *adev,
  616. u32 index, bool enable)
  617. {
  618. struct kv_power_info *pi = kv_get_pi(adev);
  619. pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
  620. }
  621. static void kv_start_dpm(struct amdgpu_device *adev)
  622. {
  623. u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
  624. tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
  625. WREG32_SMC(ixGENERAL_PWRMGT, tmp);
  626. amdgpu_kv_smc_dpm_enable(adev, true);
  627. }
  628. static void kv_stop_dpm(struct amdgpu_device *adev)
  629. {
  630. amdgpu_kv_smc_dpm_enable(adev, false);
  631. }
  632. static void kv_start_am(struct amdgpu_device *adev)
  633. {
  634. u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
  635. sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
  636. SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
  637. sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
  638. WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
  639. }
  640. static void kv_reset_am(struct amdgpu_device *adev)
  641. {
  642. u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
  643. sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
  644. SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
  645. WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
  646. }
  647. static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze)
  648. {
  649. return amdgpu_kv_notify_message_to_smu(adev, freeze ?
  650. PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
  651. }
  652. static int kv_force_lowest_valid(struct amdgpu_device *adev)
  653. {
  654. return kv_force_dpm_lowest(adev);
  655. }
  656. static int kv_unforce_levels(struct amdgpu_device *adev)
  657. {
  658. if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
  659. return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel);
  660. else
  661. return kv_set_enabled_levels(adev);
  662. }
  663. static int kv_update_sclk_t(struct amdgpu_device *adev)
  664. {
  665. struct kv_power_info *pi = kv_get_pi(adev);
  666. u32 low_sclk_interrupt_t = 0;
  667. int ret = 0;
  668. if (pi->caps_sclk_throttle_low_notification) {
  669. low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
  670. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  671. pi->dpm_table_start +
  672. offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
  673. (u8 *)&low_sclk_interrupt_t,
  674. sizeof(u32), pi->sram_end);
  675. }
  676. return ret;
  677. }
  678. static int kv_program_bootup_state(struct amdgpu_device *adev)
  679. {
  680. struct kv_power_info *pi = kv_get_pi(adev);
  681. u32 i;
  682. struct amdgpu_clock_voltage_dependency_table *table =
  683. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  684. if (table && table->count) {
  685. for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
  686. if (table->entries[i].clk == pi->boot_pl.sclk)
  687. break;
  688. }
  689. pi->graphics_boot_level = (u8)i;
  690. kv_dpm_power_level_enable(adev, i, true);
  691. } else {
  692. struct sumo_sclk_voltage_mapping_table *table =
  693. &pi->sys_info.sclk_voltage_mapping_table;
  694. if (table->num_max_dpm_entries == 0)
  695. return -EINVAL;
  696. for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
  697. if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
  698. break;
  699. }
  700. pi->graphics_boot_level = (u8)i;
  701. kv_dpm_power_level_enable(adev, i, true);
  702. }
  703. return 0;
  704. }
  705. static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev)
  706. {
  707. struct kv_power_info *pi = kv_get_pi(adev);
  708. int ret;
  709. pi->graphics_therm_throttle_enable = 1;
  710. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  711. pi->dpm_table_start +
  712. offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
  713. &pi->graphics_therm_throttle_enable,
  714. sizeof(u8), pi->sram_end);
  715. return ret;
  716. }
  717. static int kv_upload_dpm_settings(struct amdgpu_device *adev)
  718. {
  719. struct kv_power_info *pi = kv_get_pi(adev);
  720. int ret;
  721. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  722. pi->dpm_table_start +
  723. offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
  724. (u8 *)&pi->graphics_level,
  725. sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
  726. pi->sram_end);
  727. if (ret)
  728. return ret;
  729. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  730. pi->dpm_table_start +
  731. offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
  732. &pi->graphics_dpm_level_count,
  733. sizeof(u8), pi->sram_end);
  734. return ret;
  735. }
  736. static u32 kv_get_clock_difference(u32 a, u32 b)
  737. {
  738. return (a >= b) ? a - b : b - a;
  739. }
  740. static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk)
  741. {
  742. struct kv_power_info *pi = kv_get_pi(adev);
  743. u32 value;
  744. if (pi->caps_enable_dfs_bypass) {
  745. if (kv_get_clock_difference(clk, 40000) < 200)
  746. value = 3;
  747. else if (kv_get_clock_difference(clk, 30000) < 200)
  748. value = 2;
  749. else if (kv_get_clock_difference(clk, 20000) < 200)
  750. value = 7;
  751. else if (kv_get_clock_difference(clk, 15000) < 200)
  752. value = 6;
  753. else if (kv_get_clock_difference(clk, 10000) < 200)
  754. value = 8;
  755. else
  756. value = 0;
  757. } else {
  758. value = 0;
  759. }
  760. return value;
  761. }
  762. static int kv_populate_uvd_table(struct amdgpu_device *adev)
  763. {
  764. struct kv_power_info *pi = kv_get_pi(adev);
  765. struct amdgpu_uvd_clock_voltage_dependency_table *table =
  766. &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
  767. struct atom_clock_dividers dividers;
  768. int ret;
  769. u32 i;
  770. if (table == NULL || table->count == 0)
  771. return 0;
  772. pi->uvd_level_count = 0;
  773. for (i = 0; i < table->count; i++) {
  774. if (pi->high_voltage_t &&
  775. (pi->high_voltage_t < table->entries[i].v))
  776. break;
  777. pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
  778. pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
  779. pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
  780. pi->uvd_level[i].VClkBypassCntl =
  781. (u8)kv_get_clk_bypass(adev, table->entries[i].vclk);
  782. pi->uvd_level[i].DClkBypassCntl =
  783. (u8)kv_get_clk_bypass(adev, table->entries[i].dclk);
  784. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  785. table->entries[i].vclk, false, &dividers);
  786. if (ret)
  787. return ret;
  788. pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
  789. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  790. table->entries[i].dclk, false, &dividers);
  791. if (ret)
  792. return ret;
  793. pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
  794. pi->uvd_level_count++;
  795. }
  796. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  797. pi->dpm_table_start +
  798. offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
  799. (u8 *)&pi->uvd_level_count,
  800. sizeof(u8), pi->sram_end);
  801. if (ret)
  802. return ret;
  803. pi->uvd_interval = 1;
  804. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  805. pi->dpm_table_start +
  806. offsetof(SMU7_Fusion_DpmTable, UVDInterval),
  807. &pi->uvd_interval,
  808. sizeof(u8), pi->sram_end);
  809. if (ret)
  810. return ret;
  811. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  812. pi->dpm_table_start +
  813. offsetof(SMU7_Fusion_DpmTable, UvdLevel),
  814. (u8 *)&pi->uvd_level,
  815. sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
  816. pi->sram_end);
  817. return ret;
  818. }
  819. static int kv_populate_vce_table(struct amdgpu_device *adev)
  820. {
  821. struct kv_power_info *pi = kv_get_pi(adev);
  822. int ret;
  823. u32 i;
  824. struct amdgpu_vce_clock_voltage_dependency_table *table =
  825. &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  826. struct atom_clock_dividers dividers;
  827. if (table == NULL || table->count == 0)
  828. return 0;
  829. pi->vce_level_count = 0;
  830. for (i = 0; i < table->count; i++) {
  831. if (pi->high_voltage_t &&
  832. pi->high_voltage_t < table->entries[i].v)
  833. break;
  834. pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
  835. pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  836. pi->vce_level[i].ClkBypassCntl =
  837. (u8)kv_get_clk_bypass(adev, table->entries[i].evclk);
  838. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  839. table->entries[i].evclk, false, &dividers);
  840. if (ret)
  841. return ret;
  842. pi->vce_level[i].Divider = (u8)dividers.post_div;
  843. pi->vce_level_count++;
  844. }
  845. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  846. pi->dpm_table_start +
  847. offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
  848. (u8 *)&pi->vce_level_count,
  849. sizeof(u8),
  850. pi->sram_end);
  851. if (ret)
  852. return ret;
  853. pi->vce_interval = 1;
  854. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  855. pi->dpm_table_start +
  856. offsetof(SMU7_Fusion_DpmTable, VCEInterval),
  857. (u8 *)&pi->vce_interval,
  858. sizeof(u8),
  859. pi->sram_end);
  860. if (ret)
  861. return ret;
  862. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  863. pi->dpm_table_start +
  864. offsetof(SMU7_Fusion_DpmTable, VceLevel),
  865. (u8 *)&pi->vce_level,
  866. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
  867. pi->sram_end);
  868. return ret;
  869. }
  870. static int kv_populate_samu_table(struct amdgpu_device *adev)
  871. {
  872. struct kv_power_info *pi = kv_get_pi(adev);
  873. struct amdgpu_clock_voltage_dependency_table *table =
  874. &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
  875. struct atom_clock_dividers dividers;
  876. int ret;
  877. u32 i;
  878. if (table == NULL || table->count == 0)
  879. return 0;
  880. pi->samu_level_count = 0;
  881. for (i = 0; i < table->count; i++) {
  882. if (pi->high_voltage_t &&
  883. pi->high_voltage_t < table->entries[i].v)
  884. break;
  885. pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
  886. pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  887. pi->samu_level[i].ClkBypassCntl =
  888. (u8)kv_get_clk_bypass(adev, table->entries[i].clk);
  889. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  890. table->entries[i].clk, false, &dividers);
  891. if (ret)
  892. return ret;
  893. pi->samu_level[i].Divider = (u8)dividers.post_div;
  894. pi->samu_level_count++;
  895. }
  896. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  897. pi->dpm_table_start +
  898. offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
  899. (u8 *)&pi->samu_level_count,
  900. sizeof(u8),
  901. pi->sram_end);
  902. if (ret)
  903. return ret;
  904. pi->samu_interval = 1;
  905. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  906. pi->dpm_table_start +
  907. offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
  908. (u8 *)&pi->samu_interval,
  909. sizeof(u8),
  910. pi->sram_end);
  911. if (ret)
  912. return ret;
  913. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  914. pi->dpm_table_start +
  915. offsetof(SMU7_Fusion_DpmTable, SamuLevel),
  916. (u8 *)&pi->samu_level,
  917. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
  918. pi->sram_end);
  919. if (ret)
  920. return ret;
  921. return ret;
  922. }
  923. static int kv_populate_acp_table(struct amdgpu_device *adev)
  924. {
  925. struct kv_power_info *pi = kv_get_pi(adev);
  926. struct amdgpu_clock_voltage_dependency_table *table =
  927. &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
  928. struct atom_clock_dividers dividers;
  929. int ret;
  930. u32 i;
  931. if (table == NULL || table->count == 0)
  932. return 0;
  933. pi->acp_level_count = 0;
  934. for (i = 0; i < table->count; i++) {
  935. pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
  936. pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
  937. ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
  938. table->entries[i].clk, false, &dividers);
  939. if (ret)
  940. return ret;
  941. pi->acp_level[i].Divider = (u8)dividers.post_div;
  942. pi->acp_level_count++;
  943. }
  944. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  945. pi->dpm_table_start +
  946. offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
  947. (u8 *)&pi->acp_level_count,
  948. sizeof(u8),
  949. pi->sram_end);
  950. if (ret)
  951. return ret;
  952. pi->acp_interval = 1;
  953. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  954. pi->dpm_table_start +
  955. offsetof(SMU7_Fusion_DpmTable, ACPInterval),
  956. (u8 *)&pi->acp_interval,
  957. sizeof(u8),
  958. pi->sram_end);
  959. if (ret)
  960. return ret;
  961. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  962. pi->dpm_table_start +
  963. offsetof(SMU7_Fusion_DpmTable, AcpLevel),
  964. (u8 *)&pi->acp_level,
  965. sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
  966. pi->sram_end);
  967. if (ret)
  968. return ret;
  969. return ret;
  970. }
  971. static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev)
  972. {
  973. struct kv_power_info *pi = kv_get_pi(adev);
  974. u32 i;
  975. struct amdgpu_clock_voltage_dependency_table *table =
  976. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  977. if (table && table->count) {
  978. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  979. if (pi->caps_enable_dfs_bypass) {
  980. if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
  981. pi->graphics_level[i].ClkBypassCntl = 3;
  982. else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
  983. pi->graphics_level[i].ClkBypassCntl = 2;
  984. else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
  985. pi->graphics_level[i].ClkBypassCntl = 7;
  986. else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
  987. pi->graphics_level[i].ClkBypassCntl = 6;
  988. else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
  989. pi->graphics_level[i].ClkBypassCntl = 8;
  990. else
  991. pi->graphics_level[i].ClkBypassCntl = 0;
  992. } else {
  993. pi->graphics_level[i].ClkBypassCntl = 0;
  994. }
  995. }
  996. } else {
  997. struct sumo_sclk_voltage_mapping_table *table =
  998. &pi->sys_info.sclk_voltage_mapping_table;
  999. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  1000. if (pi->caps_enable_dfs_bypass) {
  1001. if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
  1002. pi->graphics_level[i].ClkBypassCntl = 3;
  1003. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
  1004. pi->graphics_level[i].ClkBypassCntl = 2;
  1005. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
  1006. pi->graphics_level[i].ClkBypassCntl = 7;
  1007. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
  1008. pi->graphics_level[i].ClkBypassCntl = 6;
  1009. else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
  1010. pi->graphics_level[i].ClkBypassCntl = 8;
  1011. else
  1012. pi->graphics_level[i].ClkBypassCntl = 0;
  1013. } else {
  1014. pi->graphics_level[i].ClkBypassCntl = 0;
  1015. }
  1016. }
  1017. }
  1018. }
  1019. static int kv_enable_ulv(struct amdgpu_device *adev, bool enable)
  1020. {
  1021. return amdgpu_kv_notify_message_to_smu(adev, enable ?
  1022. PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
  1023. }
  1024. static void kv_reset_acp_boot_level(struct amdgpu_device *adev)
  1025. {
  1026. struct kv_power_info *pi = kv_get_pi(adev);
  1027. pi->acp_boot_level = 0xff;
  1028. }
  1029. static void kv_update_current_ps(struct amdgpu_device *adev,
  1030. struct amdgpu_ps *rps)
  1031. {
  1032. struct kv_ps *new_ps = kv_get_ps(rps);
  1033. struct kv_power_info *pi = kv_get_pi(adev);
  1034. pi->current_rps = *rps;
  1035. pi->current_ps = *new_ps;
  1036. pi->current_rps.ps_priv = &pi->current_ps;
  1037. adev->pm.dpm.current_ps = &pi->current_rps;
  1038. }
  1039. static void kv_update_requested_ps(struct amdgpu_device *adev,
  1040. struct amdgpu_ps *rps)
  1041. {
  1042. struct kv_ps *new_ps = kv_get_ps(rps);
  1043. struct kv_power_info *pi = kv_get_pi(adev);
  1044. pi->requested_rps = *rps;
  1045. pi->requested_ps = *new_ps;
  1046. pi->requested_rps.ps_priv = &pi->requested_ps;
  1047. adev->pm.dpm.requested_ps = &pi->requested_rps;
  1048. }
  1049. static void kv_dpm_enable_bapm(void *handle, bool enable)
  1050. {
  1051. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1052. struct kv_power_info *pi = kv_get_pi(adev);
  1053. int ret;
  1054. if (pi->bapm_enable) {
  1055. ret = amdgpu_kv_smc_bapm_enable(adev, enable);
  1056. if (ret)
  1057. DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
  1058. }
  1059. }
  1060. static int kv_dpm_enable(struct amdgpu_device *adev)
  1061. {
  1062. struct kv_power_info *pi = kv_get_pi(adev);
  1063. int ret;
  1064. ret = kv_process_firmware_header(adev);
  1065. if (ret) {
  1066. DRM_ERROR("kv_process_firmware_header failed\n");
  1067. return ret;
  1068. }
  1069. kv_init_fps_limits(adev);
  1070. kv_init_graphics_levels(adev);
  1071. ret = kv_program_bootup_state(adev);
  1072. if (ret) {
  1073. DRM_ERROR("kv_program_bootup_state failed\n");
  1074. return ret;
  1075. }
  1076. kv_calculate_dfs_bypass_settings(adev);
  1077. ret = kv_upload_dpm_settings(adev);
  1078. if (ret) {
  1079. DRM_ERROR("kv_upload_dpm_settings failed\n");
  1080. return ret;
  1081. }
  1082. ret = kv_populate_uvd_table(adev);
  1083. if (ret) {
  1084. DRM_ERROR("kv_populate_uvd_table failed\n");
  1085. return ret;
  1086. }
  1087. ret = kv_populate_vce_table(adev);
  1088. if (ret) {
  1089. DRM_ERROR("kv_populate_vce_table failed\n");
  1090. return ret;
  1091. }
  1092. ret = kv_populate_samu_table(adev);
  1093. if (ret) {
  1094. DRM_ERROR("kv_populate_samu_table failed\n");
  1095. return ret;
  1096. }
  1097. ret = kv_populate_acp_table(adev);
  1098. if (ret) {
  1099. DRM_ERROR("kv_populate_acp_table failed\n");
  1100. return ret;
  1101. }
  1102. kv_program_vc(adev);
  1103. #if 0
  1104. kv_initialize_hardware_cac_manager(adev);
  1105. #endif
  1106. kv_start_am(adev);
  1107. if (pi->enable_auto_thermal_throttling) {
  1108. ret = kv_enable_auto_thermal_throttling(adev);
  1109. if (ret) {
  1110. DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
  1111. return ret;
  1112. }
  1113. }
  1114. ret = kv_enable_dpm_voltage_scaling(adev);
  1115. if (ret) {
  1116. DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
  1117. return ret;
  1118. }
  1119. ret = kv_set_dpm_interval(adev);
  1120. if (ret) {
  1121. DRM_ERROR("kv_set_dpm_interval failed\n");
  1122. return ret;
  1123. }
  1124. ret = kv_set_dpm_boot_state(adev);
  1125. if (ret) {
  1126. DRM_ERROR("kv_set_dpm_boot_state failed\n");
  1127. return ret;
  1128. }
  1129. ret = kv_enable_ulv(adev, true);
  1130. if (ret) {
  1131. DRM_ERROR("kv_enable_ulv failed\n");
  1132. return ret;
  1133. }
  1134. kv_start_dpm(adev);
  1135. ret = kv_enable_didt(adev, true);
  1136. if (ret) {
  1137. DRM_ERROR("kv_enable_didt failed\n");
  1138. return ret;
  1139. }
  1140. ret = kv_enable_smc_cac(adev, true);
  1141. if (ret) {
  1142. DRM_ERROR("kv_enable_smc_cac failed\n");
  1143. return ret;
  1144. }
  1145. kv_reset_acp_boot_level(adev);
  1146. ret = amdgpu_kv_smc_bapm_enable(adev, false);
  1147. if (ret) {
  1148. DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
  1149. return ret;
  1150. }
  1151. kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
  1152. if (adev->irq.installed &&
  1153. amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
  1154. ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
  1155. if (ret) {
  1156. DRM_ERROR("kv_set_thermal_temperature_range failed\n");
  1157. return ret;
  1158. }
  1159. amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
  1160. AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
  1161. amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
  1162. AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
  1163. }
  1164. return ret;
  1165. }
  1166. static void kv_dpm_disable(struct amdgpu_device *adev)
  1167. {
  1168. amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
  1169. AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
  1170. amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
  1171. AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
  1172. amdgpu_kv_smc_bapm_enable(adev, false);
  1173. if (adev->asic_type == CHIP_MULLINS)
  1174. kv_enable_nb_dpm(adev, false);
  1175. /* powerup blocks */
  1176. kv_dpm_powergate_acp(adev, false);
  1177. kv_dpm_powergate_samu(adev, false);
  1178. kv_dpm_powergate_vce(adev, false);
  1179. kv_dpm_powergate_uvd(adev, false);
  1180. kv_enable_smc_cac(adev, false);
  1181. kv_enable_didt(adev, false);
  1182. kv_clear_vc(adev);
  1183. kv_stop_dpm(adev);
  1184. kv_enable_ulv(adev, false);
  1185. kv_reset_am(adev);
  1186. kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
  1187. }
  1188. #if 0
  1189. static int kv_write_smc_soft_register(struct amdgpu_device *adev,
  1190. u16 reg_offset, u32 value)
  1191. {
  1192. struct kv_power_info *pi = kv_get_pi(adev);
  1193. return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset,
  1194. (u8 *)&value, sizeof(u16), pi->sram_end);
  1195. }
  1196. static int kv_read_smc_soft_register(struct amdgpu_device *adev,
  1197. u16 reg_offset, u32 *value)
  1198. {
  1199. struct kv_power_info *pi = kv_get_pi(adev);
  1200. return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset,
  1201. value, pi->sram_end);
  1202. }
  1203. #endif
  1204. static void kv_init_sclk_t(struct amdgpu_device *adev)
  1205. {
  1206. struct kv_power_info *pi = kv_get_pi(adev);
  1207. pi->low_sclk_interrupt_t = 0;
  1208. }
  1209. static int kv_init_fps_limits(struct amdgpu_device *adev)
  1210. {
  1211. struct kv_power_info *pi = kv_get_pi(adev);
  1212. int ret = 0;
  1213. if (pi->caps_fps) {
  1214. u16 tmp;
  1215. tmp = 45;
  1216. pi->fps_high_t = cpu_to_be16(tmp);
  1217. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1218. pi->dpm_table_start +
  1219. offsetof(SMU7_Fusion_DpmTable, FpsHighT),
  1220. (u8 *)&pi->fps_high_t,
  1221. sizeof(u16), pi->sram_end);
  1222. tmp = 30;
  1223. pi->fps_low_t = cpu_to_be16(tmp);
  1224. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1225. pi->dpm_table_start +
  1226. offsetof(SMU7_Fusion_DpmTable, FpsLowT),
  1227. (u8 *)&pi->fps_low_t,
  1228. sizeof(u16), pi->sram_end);
  1229. }
  1230. return ret;
  1231. }
  1232. static void kv_init_powergate_state(struct amdgpu_device *adev)
  1233. {
  1234. struct kv_power_info *pi = kv_get_pi(adev);
  1235. pi->uvd_power_gated = false;
  1236. pi->vce_power_gated = false;
  1237. pi->samu_power_gated = false;
  1238. pi->acp_power_gated = false;
  1239. }
  1240. static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
  1241. {
  1242. return amdgpu_kv_notify_message_to_smu(adev, enable ?
  1243. PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
  1244. }
  1245. static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
  1246. {
  1247. return amdgpu_kv_notify_message_to_smu(adev, enable ?
  1248. PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
  1249. }
  1250. static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
  1251. {
  1252. return amdgpu_kv_notify_message_to_smu(adev, enable ?
  1253. PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
  1254. }
  1255. static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
  1256. {
  1257. return amdgpu_kv_notify_message_to_smu(adev, enable ?
  1258. PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
  1259. }
  1260. static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
  1261. {
  1262. struct kv_power_info *pi = kv_get_pi(adev);
  1263. struct amdgpu_uvd_clock_voltage_dependency_table *table =
  1264. &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
  1265. int ret;
  1266. u32 mask;
  1267. if (!gate) {
  1268. if (table->count)
  1269. pi->uvd_boot_level = table->count - 1;
  1270. else
  1271. pi->uvd_boot_level = 0;
  1272. if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
  1273. mask = 1 << pi->uvd_boot_level;
  1274. } else {
  1275. mask = 0x1f;
  1276. }
  1277. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1278. pi->dpm_table_start +
  1279. offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
  1280. (uint8_t *)&pi->uvd_boot_level,
  1281. sizeof(u8), pi->sram_end);
  1282. if (ret)
  1283. return ret;
  1284. amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  1285. PPSMC_MSG_UVDDPM_SetEnabledMask,
  1286. mask);
  1287. }
  1288. return kv_enable_uvd_dpm(adev, !gate);
  1289. }
  1290. static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk)
  1291. {
  1292. u8 i;
  1293. struct amdgpu_vce_clock_voltage_dependency_table *table =
  1294. &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  1295. for (i = 0; i < table->count; i++) {
  1296. if (table->entries[i].evclk >= evclk)
  1297. break;
  1298. }
  1299. return i;
  1300. }
  1301. static int kv_update_vce_dpm(struct amdgpu_device *adev,
  1302. struct amdgpu_ps *amdgpu_new_state,
  1303. struct amdgpu_ps *amdgpu_current_state)
  1304. {
  1305. struct kv_power_info *pi = kv_get_pi(adev);
  1306. struct amdgpu_vce_clock_voltage_dependency_table *table =
  1307. &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  1308. int ret;
  1309. if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
  1310. kv_dpm_powergate_vce(adev, false);
  1311. if (pi->caps_stable_p_state)
  1312. pi->vce_boot_level = table->count - 1;
  1313. else
  1314. pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk);
  1315. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1316. pi->dpm_table_start +
  1317. offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
  1318. (u8 *)&pi->vce_boot_level,
  1319. sizeof(u8),
  1320. pi->sram_end);
  1321. if (ret)
  1322. return ret;
  1323. if (pi->caps_stable_p_state)
  1324. amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  1325. PPSMC_MSG_VCEDPM_SetEnabledMask,
  1326. (1 << pi->vce_boot_level));
  1327. kv_enable_vce_dpm(adev, true);
  1328. } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
  1329. kv_enable_vce_dpm(adev, false);
  1330. kv_dpm_powergate_vce(adev, true);
  1331. }
  1332. return 0;
  1333. }
  1334. static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
  1335. {
  1336. struct kv_power_info *pi = kv_get_pi(adev);
  1337. struct amdgpu_clock_voltage_dependency_table *table =
  1338. &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
  1339. int ret;
  1340. if (!gate) {
  1341. if (pi->caps_stable_p_state)
  1342. pi->samu_boot_level = table->count - 1;
  1343. else
  1344. pi->samu_boot_level = 0;
  1345. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1346. pi->dpm_table_start +
  1347. offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
  1348. (u8 *)&pi->samu_boot_level,
  1349. sizeof(u8),
  1350. pi->sram_end);
  1351. if (ret)
  1352. return ret;
  1353. if (pi->caps_stable_p_state)
  1354. amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  1355. PPSMC_MSG_SAMUDPM_SetEnabledMask,
  1356. (1 << pi->samu_boot_level));
  1357. }
  1358. return kv_enable_samu_dpm(adev, !gate);
  1359. }
  1360. static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
  1361. {
  1362. u8 i;
  1363. struct amdgpu_clock_voltage_dependency_table *table =
  1364. &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
  1365. for (i = 0; i < table->count; i++) {
  1366. if (table->entries[i].clk >= 0) /* XXX */
  1367. break;
  1368. }
  1369. if (i >= table->count)
  1370. i = table->count - 1;
  1371. return i;
  1372. }
  1373. static void kv_update_acp_boot_level(struct amdgpu_device *adev)
  1374. {
  1375. struct kv_power_info *pi = kv_get_pi(adev);
  1376. u8 acp_boot_level;
  1377. if (!pi->caps_stable_p_state) {
  1378. acp_boot_level = kv_get_acp_boot_level(adev);
  1379. if (acp_boot_level != pi->acp_boot_level) {
  1380. pi->acp_boot_level = acp_boot_level;
  1381. amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  1382. PPSMC_MSG_ACPDPM_SetEnabledMask,
  1383. (1 << pi->acp_boot_level));
  1384. }
  1385. }
  1386. }
  1387. static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
  1388. {
  1389. struct kv_power_info *pi = kv_get_pi(adev);
  1390. struct amdgpu_clock_voltage_dependency_table *table =
  1391. &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
  1392. int ret;
  1393. if (!gate) {
  1394. if (pi->caps_stable_p_state)
  1395. pi->acp_boot_level = table->count - 1;
  1396. else
  1397. pi->acp_boot_level = kv_get_acp_boot_level(adev);
  1398. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1399. pi->dpm_table_start +
  1400. offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
  1401. (u8 *)&pi->acp_boot_level,
  1402. sizeof(u8),
  1403. pi->sram_end);
  1404. if (ret)
  1405. return ret;
  1406. if (pi->caps_stable_p_state)
  1407. amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  1408. PPSMC_MSG_ACPDPM_SetEnabledMask,
  1409. (1 << pi->acp_boot_level));
  1410. }
  1411. return kv_enable_acp_dpm(adev, !gate);
  1412. }
  1413. static void kv_dpm_powergate_uvd(void *handle, bool gate)
  1414. {
  1415. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1416. struct kv_power_info *pi = kv_get_pi(adev);
  1417. int ret;
  1418. pi->uvd_power_gated = gate;
  1419. if (gate) {
  1420. /* stop the UVD block */
  1421. ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
  1422. AMD_PG_STATE_GATE);
  1423. kv_update_uvd_dpm(adev, gate);
  1424. if (pi->caps_uvd_pg)
  1425. /* power off the UVD block */
  1426. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
  1427. } else {
  1428. if (pi->caps_uvd_pg)
  1429. /* power on the UVD block */
  1430. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
  1431. /* re-init the UVD block */
  1432. kv_update_uvd_dpm(adev, gate);
  1433. ret = amdgpu_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
  1434. AMD_PG_STATE_UNGATE);
  1435. }
  1436. }
  1437. static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
  1438. {
  1439. struct kv_power_info *pi = kv_get_pi(adev);
  1440. if (pi->vce_power_gated == gate)
  1441. return;
  1442. pi->vce_power_gated = gate;
  1443. if (!pi->caps_vce_pg)
  1444. return;
  1445. if (gate)
  1446. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
  1447. else
  1448. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
  1449. }
  1450. static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
  1451. {
  1452. struct kv_power_info *pi = kv_get_pi(adev);
  1453. if (pi->samu_power_gated == gate)
  1454. return;
  1455. pi->samu_power_gated = gate;
  1456. if (gate) {
  1457. kv_update_samu_dpm(adev, true);
  1458. if (pi->caps_samu_pg)
  1459. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF);
  1460. } else {
  1461. if (pi->caps_samu_pg)
  1462. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON);
  1463. kv_update_samu_dpm(adev, false);
  1464. }
  1465. }
  1466. static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate)
  1467. {
  1468. struct kv_power_info *pi = kv_get_pi(adev);
  1469. if (pi->acp_power_gated == gate)
  1470. return;
  1471. if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
  1472. return;
  1473. pi->acp_power_gated = gate;
  1474. if (gate) {
  1475. kv_update_acp_dpm(adev, true);
  1476. if (pi->caps_acp_pg)
  1477. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF);
  1478. } else {
  1479. if (pi->caps_acp_pg)
  1480. amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON);
  1481. kv_update_acp_dpm(adev, false);
  1482. }
  1483. }
  1484. static void kv_set_valid_clock_range(struct amdgpu_device *adev,
  1485. struct amdgpu_ps *new_rps)
  1486. {
  1487. struct kv_ps *new_ps = kv_get_ps(new_rps);
  1488. struct kv_power_info *pi = kv_get_pi(adev);
  1489. u32 i;
  1490. struct amdgpu_clock_voltage_dependency_table *table =
  1491. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1492. if (table && table->count) {
  1493. for (i = 0; i < pi->graphics_dpm_level_count; i++) {
  1494. if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
  1495. (i == (pi->graphics_dpm_level_count - 1))) {
  1496. pi->lowest_valid = i;
  1497. break;
  1498. }
  1499. }
  1500. for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
  1501. if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
  1502. break;
  1503. }
  1504. pi->highest_valid = i;
  1505. if (pi->lowest_valid > pi->highest_valid) {
  1506. if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
  1507. (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
  1508. pi->highest_valid = pi->lowest_valid;
  1509. else
  1510. pi->lowest_valid = pi->highest_valid;
  1511. }
  1512. } else {
  1513. struct sumo_sclk_voltage_mapping_table *table =
  1514. &pi->sys_info.sclk_voltage_mapping_table;
  1515. for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
  1516. if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
  1517. i == (int)(pi->graphics_dpm_level_count - 1)) {
  1518. pi->lowest_valid = i;
  1519. break;
  1520. }
  1521. }
  1522. for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
  1523. if (table->entries[i].sclk_frequency <=
  1524. new_ps->levels[new_ps->num_levels - 1].sclk)
  1525. break;
  1526. }
  1527. pi->highest_valid = i;
  1528. if (pi->lowest_valid > pi->highest_valid) {
  1529. if ((new_ps->levels[0].sclk -
  1530. table->entries[pi->highest_valid].sclk_frequency) >
  1531. (table->entries[pi->lowest_valid].sclk_frequency -
  1532. new_ps->levels[new_ps->num_levels -1].sclk))
  1533. pi->highest_valid = pi->lowest_valid;
  1534. else
  1535. pi->lowest_valid = pi->highest_valid;
  1536. }
  1537. }
  1538. }
  1539. static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev,
  1540. struct amdgpu_ps *new_rps)
  1541. {
  1542. struct kv_ps *new_ps = kv_get_ps(new_rps);
  1543. struct kv_power_info *pi = kv_get_pi(adev);
  1544. int ret = 0;
  1545. u8 clk_bypass_cntl;
  1546. if (pi->caps_enable_dfs_bypass) {
  1547. clk_bypass_cntl = new_ps->need_dfs_bypass ?
  1548. pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
  1549. ret = amdgpu_kv_copy_bytes_to_smc(adev,
  1550. (pi->dpm_table_start +
  1551. offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
  1552. (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
  1553. offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
  1554. &clk_bypass_cntl,
  1555. sizeof(u8), pi->sram_end);
  1556. }
  1557. return ret;
  1558. }
  1559. static int kv_enable_nb_dpm(struct amdgpu_device *adev,
  1560. bool enable)
  1561. {
  1562. struct kv_power_info *pi = kv_get_pi(adev);
  1563. int ret = 0;
  1564. if (enable) {
  1565. if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
  1566. ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable);
  1567. if (ret == 0)
  1568. pi->nb_dpm_enabled = true;
  1569. }
  1570. } else {
  1571. if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
  1572. ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable);
  1573. if (ret == 0)
  1574. pi->nb_dpm_enabled = false;
  1575. }
  1576. }
  1577. return ret;
  1578. }
  1579. static int kv_dpm_force_performance_level(void *handle,
  1580. enum amd_dpm_forced_level level)
  1581. {
  1582. int ret;
  1583. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1584. if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
  1585. ret = kv_force_dpm_highest(adev);
  1586. if (ret)
  1587. return ret;
  1588. } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
  1589. ret = kv_force_dpm_lowest(adev);
  1590. if (ret)
  1591. return ret;
  1592. } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
  1593. ret = kv_unforce_levels(adev);
  1594. if (ret)
  1595. return ret;
  1596. }
  1597. adev->pm.dpm.forced_level = level;
  1598. return 0;
  1599. }
  1600. static int kv_dpm_pre_set_power_state(void *handle)
  1601. {
  1602. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1603. struct kv_power_info *pi = kv_get_pi(adev);
  1604. struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
  1605. struct amdgpu_ps *new_ps = &requested_ps;
  1606. kv_update_requested_ps(adev, new_ps);
  1607. kv_apply_state_adjust_rules(adev,
  1608. &pi->requested_rps,
  1609. &pi->current_rps);
  1610. return 0;
  1611. }
  1612. static int kv_dpm_set_power_state(void *handle)
  1613. {
  1614. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1615. struct kv_power_info *pi = kv_get_pi(adev);
  1616. struct amdgpu_ps *new_ps = &pi->requested_rps;
  1617. struct amdgpu_ps *old_ps = &pi->current_rps;
  1618. int ret;
  1619. if (pi->bapm_enable) {
  1620. ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.dpm.ac_power);
  1621. if (ret) {
  1622. DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
  1623. return ret;
  1624. }
  1625. }
  1626. if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
  1627. if (pi->enable_dpm) {
  1628. kv_set_valid_clock_range(adev, new_ps);
  1629. kv_update_dfs_bypass_settings(adev, new_ps);
  1630. ret = kv_calculate_ds_divider(adev);
  1631. if (ret) {
  1632. DRM_ERROR("kv_calculate_ds_divider failed\n");
  1633. return ret;
  1634. }
  1635. kv_calculate_nbps_level_settings(adev);
  1636. kv_calculate_dpm_settings(adev);
  1637. kv_force_lowest_valid(adev);
  1638. kv_enable_new_levels(adev);
  1639. kv_upload_dpm_settings(adev);
  1640. kv_program_nbps_index_settings(adev, new_ps);
  1641. kv_unforce_levels(adev);
  1642. kv_set_enabled_levels(adev);
  1643. kv_force_lowest_valid(adev);
  1644. kv_unforce_levels(adev);
  1645. ret = kv_update_vce_dpm(adev, new_ps, old_ps);
  1646. if (ret) {
  1647. DRM_ERROR("kv_update_vce_dpm failed\n");
  1648. return ret;
  1649. }
  1650. kv_update_sclk_t(adev);
  1651. if (adev->asic_type == CHIP_MULLINS)
  1652. kv_enable_nb_dpm(adev, true);
  1653. }
  1654. } else {
  1655. if (pi->enable_dpm) {
  1656. kv_set_valid_clock_range(adev, new_ps);
  1657. kv_update_dfs_bypass_settings(adev, new_ps);
  1658. ret = kv_calculate_ds_divider(adev);
  1659. if (ret) {
  1660. DRM_ERROR("kv_calculate_ds_divider failed\n");
  1661. return ret;
  1662. }
  1663. kv_calculate_nbps_level_settings(adev);
  1664. kv_calculate_dpm_settings(adev);
  1665. kv_freeze_sclk_dpm(adev, true);
  1666. kv_upload_dpm_settings(adev);
  1667. kv_program_nbps_index_settings(adev, new_ps);
  1668. kv_freeze_sclk_dpm(adev, false);
  1669. kv_set_enabled_levels(adev);
  1670. ret = kv_update_vce_dpm(adev, new_ps, old_ps);
  1671. if (ret) {
  1672. DRM_ERROR("kv_update_vce_dpm failed\n");
  1673. return ret;
  1674. }
  1675. kv_update_acp_boot_level(adev);
  1676. kv_update_sclk_t(adev);
  1677. kv_enable_nb_dpm(adev, true);
  1678. }
  1679. }
  1680. return 0;
  1681. }
  1682. static void kv_dpm_post_set_power_state(void *handle)
  1683. {
  1684. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  1685. struct kv_power_info *pi = kv_get_pi(adev);
  1686. struct amdgpu_ps *new_ps = &pi->requested_rps;
  1687. kv_update_current_ps(adev, new_ps);
  1688. }
  1689. static void kv_dpm_setup_asic(struct amdgpu_device *adev)
  1690. {
  1691. sumo_take_smu_control(adev, true);
  1692. kv_init_powergate_state(adev);
  1693. kv_init_sclk_t(adev);
  1694. }
  1695. #if 0
  1696. static void kv_dpm_reset_asic(struct amdgpu_device *adev)
  1697. {
  1698. struct kv_power_info *pi = kv_get_pi(adev);
  1699. if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
  1700. kv_force_lowest_valid(adev);
  1701. kv_init_graphics_levels(adev);
  1702. kv_program_bootup_state(adev);
  1703. kv_upload_dpm_settings(adev);
  1704. kv_force_lowest_valid(adev);
  1705. kv_unforce_levels(adev);
  1706. } else {
  1707. kv_init_graphics_levels(adev);
  1708. kv_program_bootup_state(adev);
  1709. kv_freeze_sclk_dpm(adev, true);
  1710. kv_upload_dpm_settings(adev);
  1711. kv_freeze_sclk_dpm(adev, false);
  1712. kv_set_enabled_level(adev, pi->graphics_boot_level);
  1713. }
  1714. }
  1715. #endif
  1716. static void kv_construct_max_power_limits_table(struct amdgpu_device *adev,
  1717. struct amdgpu_clock_and_voltage_limits *table)
  1718. {
  1719. struct kv_power_info *pi = kv_get_pi(adev);
  1720. if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
  1721. int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
  1722. table->sclk =
  1723. pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
  1724. table->vddc =
  1725. kv_convert_2bit_index_to_voltage(adev,
  1726. pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
  1727. }
  1728. table->mclk = pi->sys_info.nbp_memory_clock[0];
  1729. }
  1730. static void kv_patch_voltage_values(struct amdgpu_device *adev)
  1731. {
  1732. int i;
  1733. struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
  1734. &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
  1735. struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
  1736. &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
  1737. struct amdgpu_clock_voltage_dependency_table *samu_table =
  1738. &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
  1739. struct amdgpu_clock_voltage_dependency_table *acp_table =
  1740. &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
  1741. if (uvd_table->count) {
  1742. for (i = 0; i < uvd_table->count; i++)
  1743. uvd_table->entries[i].v =
  1744. kv_convert_8bit_index_to_voltage(adev,
  1745. uvd_table->entries[i].v);
  1746. }
  1747. if (vce_table->count) {
  1748. for (i = 0; i < vce_table->count; i++)
  1749. vce_table->entries[i].v =
  1750. kv_convert_8bit_index_to_voltage(adev,
  1751. vce_table->entries[i].v);
  1752. }
  1753. if (samu_table->count) {
  1754. for (i = 0; i < samu_table->count; i++)
  1755. samu_table->entries[i].v =
  1756. kv_convert_8bit_index_to_voltage(adev,
  1757. samu_table->entries[i].v);
  1758. }
  1759. if (acp_table->count) {
  1760. for (i = 0; i < acp_table->count; i++)
  1761. acp_table->entries[i].v =
  1762. kv_convert_8bit_index_to_voltage(adev,
  1763. acp_table->entries[i].v);
  1764. }
  1765. }
  1766. static void kv_construct_boot_state(struct amdgpu_device *adev)
  1767. {
  1768. struct kv_power_info *pi = kv_get_pi(adev);
  1769. pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
  1770. pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
  1771. pi->boot_pl.ds_divider_index = 0;
  1772. pi->boot_pl.ss_divider_index = 0;
  1773. pi->boot_pl.allow_gnb_slow = 1;
  1774. pi->boot_pl.force_nbp_state = 0;
  1775. pi->boot_pl.display_wm = 0;
  1776. pi->boot_pl.vce_wm = 0;
  1777. }
  1778. static int kv_force_dpm_highest(struct amdgpu_device *adev)
  1779. {
  1780. int ret;
  1781. u32 enable_mask, i;
  1782. ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
  1783. if (ret)
  1784. return ret;
  1785. for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
  1786. if (enable_mask & (1 << i))
  1787. break;
  1788. }
  1789. if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
  1790. return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
  1791. else
  1792. return kv_set_enabled_level(adev, i);
  1793. }
  1794. static int kv_force_dpm_lowest(struct amdgpu_device *adev)
  1795. {
  1796. int ret;
  1797. u32 enable_mask, i;
  1798. ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
  1799. if (ret)
  1800. return ret;
  1801. for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
  1802. if (enable_mask & (1 << i))
  1803. break;
  1804. }
  1805. if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
  1806. return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
  1807. else
  1808. return kv_set_enabled_level(adev, i);
  1809. }
  1810. static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
  1811. u32 sclk, u32 min_sclk_in_sr)
  1812. {
  1813. struct kv_power_info *pi = kv_get_pi(adev);
  1814. u32 i;
  1815. u32 temp;
  1816. u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK);
  1817. if (sclk < min)
  1818. return 0;
  1819. if (!pi->caps_sclk_ds)
  1820. return 0;
  1821. for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
  1822. temp = sclk >> i;
  1823. if (temp >= min)
  1824. break;
  1825. }
  1826. return (u8)i;
  1827. }
  1828. static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit)
  1829. {
  1830. struct kv_power_info *pi = kv_get_pi(adev);
  1831. struct amdgpu_clock_voltage_dependency_table *table =
  1832. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1833. int i;
  1834. if (table && table->count) {
  1835. for (i = table->count - 1; i >= 0; i--) {
  1836. if (pi->high_voltage_t &&
  1837. (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <=
  1838. pi->high_voltage_t)) {
  1839. *limit = i;
  1840. return 0;
  1841. }
  1842. }
  1843. } else {
  1844. struct sumo_sclk_voltage_mapping_table *table =
  1845. &pi->sys_info.sclk_voltage_mapping_table;
  1846. for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
  1847. if (pi->high_voltage_t &&
  1848. (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <=
  1849. pi->high_voltage_t)) {
  1850. *limit = i;
  1851. return 0;
  1852. }
  1853. }
  1854. }
  1855. *limit = 0;
  1856. return 0;
  1857. }
  1858. static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
  1859. struct amdgpu_ps *new_rps,
  1860. struct amdgpu_ps *old_rps)
  1861. {
  1862. struct kv_ps *ps = kv_get_ps(new_rps);
  1863. struct kv_power_info *pi = kv_get_pi(adev);
  1864. u32 min_sclk = 10000; /* ??? */
  1865. u32 sclk, mclk = 0;
  1866. int i, limit;
  1867. bool force_high;
  1868. struct amdgpu_clock_voltage_dependency_table *table =
  1869. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  1870. u32 stable_p_state_sclk = 0;
  1871. struct amdgpu_clock_and_voltage_limits *max_limits =
  1872. &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
  1873. if (new_rps->vce_active) {
  1874. new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
  1875. new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
  1876. } else {
  1877. new_rps->evclk = 0;
  1878. new_rps->ecclk = 0;
  1879. }
  1880. mclk = max_limits->mclk;
  1881. sclk = min_sclk;
  1882. if (pi->caps_stable_p_state) {
  1883. stable_p_state_sclk = (max_limits->sclk * 75) / 100;
  1884. for (i = table->count - 1; i >= 0; i--) {
  1885. if (stable_p_state_sclk >= table->entries[i].clk) {
  1886. stable_p_state_sclk = table->entries[i].clk;
  1887. break;
  1888. }
  1889. }
  1890. if (i > 0)
  1891. stable_p_state_sclk = table->entries[0].clk;
  1892. sclk = stable_p_state_sclk;
  1893. }
  1894. if (new_rps->vce_active) {
  1895. if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
  1896. sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
  1897. }
  1898. ps->need_dfs_bypass = true;
  1899. for (i = 0; i < ps->num_levels; i++) {
  1900. if (ps->levels[i].sclk < sclk)
  1901. ps->levels[i].sclk = sclk;
  1902. }
  1903. if (table && table->count) {
  1904. for (i = 0; i < ps->num_levels; i++) {
  1905. if (pi->high_voltage_t &&
  1906. (pi->high_voltage_t <
  1907. kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
  1908. kv_get_high_voltage_limit(adev, &limit);
  1909. ps->levels[i].sclk = table->entries[limit].clk;
  1910. }
  1911. }
  1912. } else {
  1913. struct sumo_sclk_voltage_mapping_table *table =
  1914. &pi->sys_info.sclk_voltage_mapping_table;
  1915. for (i = 0; i < ps->num_levels; i++) {
  1916. if (pi->high_voltage_t &&
  1917. (pi->high_voltage_t <
  1918. kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
  1919. kv_get_high_voltage_limit(adev, &limit);
  1920. ps->levels[i].sclk = table->entries[limit].sclk_frequency;
  1921. }
  1922. }
  1923. }
  1924. if (pi->caps_stable_p_state) {
  1925. for (i = 0; i < ps->num_levels; i++) {
  1926. ps->levels[i].sclk = stable_p_state_sclk;
  1927. }
  1928. }
  1929. pi->video_start = new_rps->dclk || new_rps->vclk ||
  1930. new_rps->evclk || new_rps->ecclk;
  1931. if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
  1932. ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
  1933. pi->battery_state = true;
  1934. else
  1935. pi->battery_state = false;
  1936. if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
  1937. ps->dpm0_pg_nb_ps_lo = 0x1;
  1938. ps->dpm0_pg_nb_ps_hi = 0x0;
  1939. ps->dpmx_nb_ps_lo = 0x1;
  1940. ps->dpmx_nb_ps_hi = 0x0;
  1941. } else {
  1942. ps->dpm0_pg_nb_ps_lo = 0x3;
  1943. ps->dpm0_pg_nb_ps_hi = 0x0;
  1944. ps->dpmx_nb_ps_lo = 0x3;
  1945. ps->dpmx_nb_ps_hi = 0x0;
  1946. if (pi->sys_info.nb_dpm_enable) {
  1947. force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
  1948. pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
  1949. pi->disable_nb_ps3_in_battery;
  1950. ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
  1951. ps->dpm0_pg_nb_ps_hi = 0x2;
  1952. ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
  1953. ps->dpmx_nb_ps_hi = 0x2;
  1954. }
  1955. }
  1956. }
  1957. static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev,
  1958. u32 index, bool enable)
  1959. {
  1960. struct kv_power_info *pi = kv_get_pi(adev);
  1961. pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
  1962. }
  1963. static int kv_calculate_ds_divider(struct amdgpu_device *adev)
  1964. {
  1965. struct kv_power_info *pi = kv_get_pi(adev);
  1966. u32 sclk_in_sr = 10000; /* ??? */
  1967. u32 i;
  1968. if (pi->lowest_valid > pi->highest_valid)
  1969. return -EINVAL;
  1970. for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
  1971. pi->graphics_level[i].DeepSleepDivId =
  1972. kv_get_sleep_divider_id_from_clock(adev,
  1973. be32_to_cpu(pi->graphics_level[i].SclkFrequency),
  1974. sclk_in_sr);
  1975. }
  1976. return 0;
  1977. }
  1978. static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
  1979. {
  1980. struct kv_power_info *pi = kv_get_pi(adev);
  1981. u32 i;
  1982. bool force_high;
  1983. struct amdgpu_clock_and_voltage_limits *max_limits =
  1984. &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
  1985. u32 mclk = max_limits->mclk;
  1986. if (pi->lowest_valid > pi->highest_valid)
  1987. return -EINVAL;
  1988. if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
  1989. for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
  1990. pi->graphics_level[i].GnbSlow = 1;
  1991. pi->graphics_level[i].ForceNbPs1 = 0;
  1992. pi->graphics_level[i].UpH = 0;
  1993. }
  1994. if (!pi->sys_info.nb_dpm_enable)
  1995. return 0;
  1996. force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
  1997. (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
  1998. if (force_high) {
  1999. for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
  2000. pi->graphics_level[i].GnbSlow = 0;
  2001. } else {
  2002. if (pi->battery_state)
  2003. pi->graphics_level[0].ForceNbPs1 = 1;
  2004. pi->graphics_level[1].GnbSlow = 0;
  2005. pi->graphics_level[2].GnbSlow = 0;
  2006. pi->graphics_level[3].GnbSlow = 0;
  2007. pi->graphics_level[4].GnbSlow = 0;
  2008. }
  2009. } else {
  2010. for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
  2011. pi->graphics_level[i].GnbSlow = 1;
  2012. pi->graphics_level[i].ForceNbPs1 = 0;
  2013. pi->graphics_level[i].UpH = 0;
  2014. }
  2015. if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
  2016. pi->graphics_level[pi->lowest_valid].UpH = 0x28;
  2017. pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
  2018. if (pi->lowest_valid != pi->highest_valid)
  2019. pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
  2020. }
  2021. }
  2022. return 0;
  2023. }
  2024. static int kv_calculate_dpm_settings(struct amdgpu_device *adev)
  2025. {
  2026. struct kv_power_info *pi = kv_get_pi(adev);
  2027. u32 i;
  2028. if (pi->lowest_valid > pi->highest_valid)
  2029. return -EINVAL;
  2030. for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
  2031. pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
  2032. return 0;
  2033. }
  2034. static void kv_init_graphics_levels(struct amdgpu_device *adev)
  2035. {
  2036. struct kv_power_info *pi = kv_get_pi(adev);
  2037. u32 i;
  2038. struct amdgpu_clock_voltage_dependency_table *table =
  2039. &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
  2040. if (table && table->count) {
  2041. u32 vid_2bit;
  2042. pi->graphics_dpm_level_count = 0;
  2043. for (i = 0; i < table->count; i++) {
  2044. if (pi->high_voltage_t &&
  2045. (pi->high_voltage_t <
  2046. kv_convert_8bit_index_to_voltage(adev, table->entries[i].v)))
  2047. break;
  2048. kv_set_divider_value(adev, i, table->entries[i].clk);
  2049. vid_2bit = kv_convert_vid7_to_vid2(adev,
  2050. &pi->sys_info.vid_mapping_table,
  2051. table->entries[i].v);
  2052. kv_set_vid(adev, i, vid_2bit);
  2053. kv_set_at(adev, i, pi->at[i]);
  2054. kv_dpm_power_level_enabled_for_throttle(adev, i, true);
  2055. pi->graphics_dpm_level_count++;
  2056. }
  2057. } else {
  2058. struct sumo_sclk_voltage_mapping_table *table =
  2059. &pi->sys_info.sclk_voltage_mapping_table;
  2060. pi->graphics_dpm_level_count = 0;
  2061. for (i = 0; i < table->num_max_dpm_entries; i++) {
  2062. if (pi->high_voltage_t &&
  2063. pi->high_voltage_t <
  2064. kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit))
  2065. break;
  2066. kv_set_divider_value(adev, i, table->entries[i].sclk_frequency);
  2067. kv_set_vid(adev, i, table->entries[i].vid_2bit);
  2068. kv_set_at(adev, i, pi->at[i]);
  2069. kv_dpm_power_level_enabled_for_throttle(adev, i, true);
  2070. pi->graphics_dpm_level_count++;
  2071. }
  2072. }
  2073. for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
  2074. kv_dpm_power_level_enable(adev, i, false);
  2075. }
  2076. static void kv_enable_new_levels(struct amdgpu_device *adev)
  2077. {
  2078. struct kv_power_info *pi = kv_get_pi(adev);
  2079. u32 i;
  2080. for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
  2081. if (i >= pi->lowest_valid && i <= pi->highest_valid)
  2082. kv_dpm_power_level_enable(adev, i, true);
  2083. }
  2084. }
  2085. static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level)
  2086. {
  2087. u32 new_mask = (1 << level);
  2088. return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  2089. PPSMC_MSG_SCLKDPM_SetEnabledMask,
  2090. new_mask);
  2091. }
  2092. static int kv_set_enabled_levels(struct amdgpu_device *adev)
  2093. {
  2094. struct kv_power_info *pi = kv_get_pi(adev);
  2095. u32 i, new_mask = 0;
  2096. for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
  2097. new_mask |= (1 << i);
  2098. return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
  2099. PPSMC_MSG_SCLKDPM_SetEnabledMask,
  2100. new_mask);
  2101. }
  2102. static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
  2103. struct amdgpu_ps *new_rps)
  2104. {
  2105. struct kv_ps *new_ps = kv_get_ps(new_rps);
  2106. struct kv_power_info *pi = kv_get_pi(adev);
  2107. u32 nbdpmconfig1;
  2108. if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
  2109. return;
  2110. if (pi->sys_info.nb_dpm_enable) {
  2111. nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1);
  2112. nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK |
  2113. NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK |
  2114. NB_DPM_CONFIG_1__DpmXNbPsLo_MASK |
  2115. NB_DPM_CONFIG_1__DpmXNbPsHi_MASK);
  2116. nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) |
  2117. (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) |
  2118. (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) |
  2119. (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT);
  2120. WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1);
  2121. }
  2122. }
  2123. static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
  2124. int min_temp, int max_temp)
  2125. {
  2126. int low_temp = 0 * 1000;
  2127. int high_temp = 255 * 1000;
  2128. u32 tmp;
  2129. if (low_temp < min_temp)
  2130. low_temp = min_temp;
  2131. if (high_temp > max_temp)
  2132. high_temp = max_temp;
  2133. if (high_temp < low_temp) {
  2134. DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
  2135. return -EINVAL;
  2136. }
  2137. tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
  2138. tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK |
  2139. CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
  2140. tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) |
  2141. ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
  2142. WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp);
  2143. adev->pm.dpm.thermal.min_temp = low_temp;
  2144. adev->pm.dpm.thermal.max_temp = high_temp;
  2145. return 0;
  2146. }
  2147. union igp_info {
  2148. struct _ATOM_INTEGRATED_SYSTEM_INFO info;
  2149. struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
  2150. struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
  2151. struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
  2152. struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
  2153. struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
  2154. };
  2155. static int kv_parse_sys_info_table(struct amdgpu_device *adev)
  2156. {
  2157. struct kv_power_info *pi = kv_get_pi(adev);
  2158. struct amdgpu_mode_info *mode_info = &adev->mode_info;
  2159. int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
  2160. union igp_info *igp_info;
  2161. u8 frev, crev;
  2162. u16 data_offset;
  2163. int i;
  2164. if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
  2165. &frev, &crev, &data_offset)) {
  2166. igp_info = (union igp_info *)(mode_info->atom_context->bios +
  2167. data_offset);
  2168. if (crev != 8) {
  2169. DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
  2170. return -EINVAL;
  2171. }
  2172. pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
  2173. pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
  2174. pi->sys_info.bootup_nb_voltage_index =
  2175. le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
  2176. if (igp_info->info_8.ucHtcTmpLmt == 0)
  2177. pi->sys_info.htc_tmp_lmt = 203;
  2178. else
  2179. pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
  2180. if (igp_info->info_8.ucHtcHystLmt == 0)
  2181. pi->sys_info.htc_hyst_lmt = 5;
  2182. else
  2183. pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
  2184. if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
  2185. DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
  2186. }
  2187. if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
  2188. pi->sys_info.nb_dpm_enable = true;
  2189. else
  2190. pi->sys_info.nb_dpm_enable = false;
  2191. for (i = 0; i < KV_NUM_NBPSTATES; i++) {
  2192. pi->sys_info.nbp_memory_clock[i] =
  2193. le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
  2194. pi->sys_info.nbp_n_clock[i] =
  2195. le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
  2196. }
  2197. if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
  2198. SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
  2199. pi->caps_enable_dfs_bypass = true;
  2200. sumo_construct_sclk_voltage_mapping_table(adev,
  2201. &pi->sys_info.sclk_voltage_mapping_table,
  2202. igp_info->info_8.sAvail_SCLK);
  2203. sumo_construct_vid_mapping_table(adev,
  2204. &pi->sys_info.vid_mapping_table,
  2205. igp_info->info_8.sAvail_SCLK);
  2206. kv_construct_max_power_limits_table(adev,
  2207. &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
  2208. }
  2209. return 0;
  2210. }
  2211. union power_info {
  2212. struct _ATOM_POWERPLAY_INFO info;
  2213. struct _ATOM_POWERPLAY_INFO_V2 info_2;
  2214. struct _ATOM_POWERPLAY_INFO_V3 info_3;
  2215. struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
  2216. struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
  2217. struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
  2218. };
  2219. union pplib_clock_info {
  2220. struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
  2221. struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
  2222. struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
  2223. struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
  2224. };
  2225. union pplib_power_state {
  2226. struct _ATOM_PPLIB_STATE v1;
  2227. struct _ATOM_PPLIB_STATE_V2 v2;
  2228. };
  2229. static void kv_patch_boot_state(struct amdgpu_device *adev,
  2230. struct kv_ps *ps)
  2231. {
  2232. struct kv_power_info *pi = kv_get_pi(adev);
  2233. ps->num_levels = 1;
  2234. ps->levels[0] = pi->boot_pl;
  2235. }
  2236. static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev,
  2237. struct amdgpu_ps *rps,
  2238. struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
  2239. u8 table_rev)
  2240. {
  2241. struct kv_ps *ps = kv_get_ps(rps);
  2242. rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
  2243. rps->class = le16_to_cpu(non_clock_info->usClassification);
  2244. rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
  2245. if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
  2246. rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
  2247. rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
  2248. } else {
  2249. rps->vclk = 0;
  2250. rps->dclk = 0;
  2251. }
  2252. if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
  2253. adev->pm.dpm.boot_ps = rps;
  2254. kv_patch_boot_state(adev, ps);
  2255. }
  2256. if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
  2257. adev->pm.dpm.uvd_ps = rps;
  2258. }
  2259. static void kv_parse_pplib_clock_info(struct amdgpu_device *adev,
  2260. struct amdgpu_ps *rps, int index,
  2261. union pplib_clock_info *clock_info)
  2262. {
  2263. struct kv_power_info *pi = kv_get_pi(adev);
  2264. struct kv_ps *ps = kv_get_ps(rps);
  2265. struct kv_pl *pl = &ps->levels[index];
  2266. u32 sclk;
  2267. sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
  2268. sclk |= clock_info->sumo.ucEngineClockHigh << 16;
  2269. pl->sclk = sclk;
  2270. pl->vddc_index = clock_info->sumo.vddcIndex;
  2271. ps->num_levels = index + 1;
  2272. if (pi->caps_sclk_ds) {
  2273. pl->ds_divider_index = 5;
  2274. pl->ss_divider_index = 5;
  2275. }
  2276. }
  2277. static int kv_parse_power_table(struct amdgpu_device *adev)
  2278. {
  2279. struct amdgpu_mode_info *mode_info = &adev->mode_info;
  2280. struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
  2281. union pplib_power_state *power_state;
  2282. int i, j, k, non_clock_array_index, clock_array_index;
  2283. union pplib_clock_info *clock_info;
  2284. struct _StateArray *state_array;
  2285. struct _ClockInfoArray *clock_info_array;
  2286. struct _NonClockInfoArray *non_clock_info_array;
  2287. union power_info *power_info;
  2288. int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
  2289. u16 data_offset;
  2290. u8 frev, crev;
  2291. u8 *power_state_offset;
  2292. struct kv_ps *ps;
  2293. if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
  2294. &frev, &crev, &data_offset))
  2295. return -EINVAL;
  2296. power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
  2297. amdgpu_add_thermal_controller(adev);
  2298. state_array = (struct _StateArray *)
  2299. (mode_info->atom_context->bios + data_offset +
  2300. le16_to_cpu(power_info->pplib.usStateArrayOffset));
  2301. clock_info_array = (struct _ClockInfoArray *)
  2302. (mode_info->atom_context->bios + data_offset +
  2303. le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
  2304. non_clock_info_array = (struct _NonClockInfoArray *)
  2305. (mode_info->atom_context->bios + data_offset +
  2306. le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
  2307. adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
  2308. state_array->ucNumEntries, GFP_KERNEL);
  2309. if (!adev->pm.dpm.ps)
  2310. return -ENOMEM;
  2311. power_state_offset = (u8 *)state_array->states;
  2312. for (i = 0; i < state_array->ucNumEntries; i++) {
  2313. u8 *idx;
  2314. power_state = (union pplib_power_state *)power_state_offset;
  2315. non_clock_array_index = power_state->v2.nonClockInfoIndex;
  2316. non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
  2317. &non_clock_info_array->nonClockInfo[non_clock_array_index];
  2318. ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
  2319. if (ps == NULL) {
  2320. kfree(adev->pm.dpm.ps);
  2321. return -ENOMEM;
  2322. }
  2323. adev->pm.dpm.ps[i].ps_priv = ps;
  2324. k = 0;
  2325. idx = (u8 *)&power_state->v2.clockInfoIndex[0];
  2326. for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
  2327. clock_array_index = idx[j];
  2328. if (clock_array_index >= clock_info_array->ucNumEntries)
  2329. continue;
  2330. if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
  2331. break;
  2332. clock_info = (union pplib_clock_info *)
  2333. ((u8 *)&clock_info_array->clockInfo[0] +
  2334. (clock_array_index * clock_info_array->ucEntrySize));
  2335. kv_parse_pplib_clock_info(adev,
  2336. &adev->pm.dpm.ps[i], k,
  2337. clock_info);
  2338. k++;
  2339. }
  2340. kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
  2341. non_clock_info,
  2342. non_clock_info_array->ucEntrySize);
  2343. power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
  2344. }
  2345. adev->pm.dpm.num_ps = state_array->ucNumEntries;
  2346. /* fill in the vce power states */
  2347. for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
  2348. u32 sclk;
  2349. clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
  2350. clock_info = (union pplib_clock_info *)
  2351. &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
  2352. sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
  2353. sclk |= clock_info->sumo.ucEngineClockHigh << 16;
  2354. adev->pm.dpm.vce_states[i].sclk = sclk;
  2355. adev->pm.dpm.vce_states[i].mclk = 0;
  2356. }
  2357. return 0;
  2358. }
  2359. static int kv_dpm_init(struct amdgpu_device *adev)
  2360. {
  2361. struct kv_power_info *pi;
  2362. int ret, i;
  2363. pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
  2364. if (pi == NULL)
  2365. return -ENOMEM;
  2366. adev->pm.dpm.priv = pi;
  2367. ret = amdgpu_get_platform_caps(adev);
  2368. if (ret)
  2369. return ret;
  2370. ret = amdgpu_parse_extended_power_table(adev);
  2371. if (ret)
  2372. return ret;
  2373. for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
  2374. pi->at[i] = TRINITY_AT_DFLT;
  2375. pi->sram_end = SMC_RAM_END;
  2376. pi->enable_nb_dpm = true;
  2377. pi->caps_power_containment = true;
  2378. pi->caps_cac = true;
  2379. pi->enable_didt = false;
  2380. if (pi->enable_didt) {
  2381. pi->caps_sq_ramping = true;
  2382. pi->caps_db_ramping = true;
  2383. pi->caps_td_ramping = true;
  2384. pi->caps_tcp_ramping = true;
  2385. }
  2386. if (amdgpu_pp_feature_mask & SCLK_DEEP_SLEEP_MASK)
  2387. pi->caps_sclk_ds = true;
  2388. else
  2389. pi->caps_sclk_ds = false;
  2390. pi->enable_auto_thermal_throttling = true;
  2391. pi->disable_nb_ps3_in_battery = false;
  2392. if (amdgpu_bapm == 0)
  2393. pi->bapm_enable = false;
  2394. else
  2395. pi->bapm_enable = true;
  2396. pi->voltage_drop_t = 0;
  2397. pi->caps_sclk_throttle_low_notification = false;
  2398. pi->caps_fps = false; /* true? */
  2399. pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
  2400. pi->caps_uvd_dpm = true;
  2401. pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
  2402. pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
  2403. pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
  2404. pi->caps_stable_p_state = false;
  2405. ret = kv_parse_sys_info_table(adev);
  2406. if (ret)
  2407. return ret;
  2408. kv_patch_voltage_values(adev);
  2409. kv_construct_boot_state(adev);
  2410. ret = kv_parse_power_table(adev);
  2411. if (ret)
  2412. return ret;
  2413. pi->enable_dpm = true;
  2414. return 0;
  2415. }
  2416. static void
  2417. kv_dpm_debugfs_print_current_performance_level(void *handle,
  2418. struct seq_file *m)
  2419. {
  2420. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2421. struct kv_power_info *pi = kv_get_pi(adev);
  2422. u32 current_index =
  2423. (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
  2424. TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
  2425. TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
  2426. u32 sclk, tmp;
  2427. u16 vddc;
  2428. if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
  2429. seq_printf(m, "invalid dpm profile %d\n", current_index);
  2430. } else {
  2431. sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
  2432. tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
  2433. SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
  2434. SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
  2435. vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp);
  2436. seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
  2437. seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
  2438. seq_printf(m, "power level %d sclk: %u vddc: %u\n",
  2439. current_index, sclk, vddc);
  2440. }
  2441. }
  2442. static void
  2443. kv_dpm_print_power_state(void *handle, void *request_ps)
  2444. {
  2445. int i;
  2446. struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
  2447. struct kv_ps *ps = kv_get_ps(rps);
  2448. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2449. amdgpu_dpm_print_class_info(rps->class, rps->class2);
  2450. amdgpu_dpm_print_cap_info(rps->caps);
  2451. printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
  2452. for (i = 0; i < ps->num_levels; i++) {
  2453. struct kv_pl *pl = &ps->levels[i];
  2454. printk("\t\tpower level %d sclk: %u vddc: %u\n",
  2455. i, pl->sclk,
  2456. kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
  2457. }
  2458. amdgpu_dpm_print_ps_status(adev, rps);
  2459. }
  2460. static void kv_dpm_fini(struct amdgpu_device *adev)
  2461. {
  2462. int i;
  2463. for (i = 0; i < adev->pm.dpm.num_ps; i++) {
  2464. kfree(adev->pm.dpm.ps[i].ps_priv);
  2465. }
  2466. kfree(adev->pm.dpm.ps);
  2467. kfree(adev->pm.dpm.priv);
  2468. amdgpu_free_extended_power_table(adev);
  2469. }
  2470. static void kv_dpm_display_configuration_changed(void *handle)
  2471. {
  2472. }
  2473. static u32 kv_dpm_get_sclk(void *handle, bool low)
  2474. {
  2475. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2476. struct kv_power_info *pi = kv_get_pi(adev);
  2477. struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
  2478. if (low)
  2479. return requested_state->levels[0].sclk;
  2480. else
  2481. return requested_state->levels[requested_state->num_levels - 1].sclk;
  2482. }
  2483. static u32 kv_dpm_get_mclk(void *handle, bool low)
  2484. {
  2485. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2486. struct kv_power_info *pi = kv_get_pi(adev);
  2487. return pi->sys_info.bootup_uma_clk;
  2488. }
  2489. /* get temperature in millidegrees */
  2490. static int kv_dpm_get_temp(void *handle)
  2491. {
  2492. u32 temp;
  2493. int actual_temp = 0;
  2494. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2495. temp = RREG32_SMC(0xC0300E0C);
  2496. if (temp)
  2497. actual_temp = (temp / 8) - 49;
  2498. else
  2499. actual_temp = 0;
  2500. actual_temp = actual_temp * 1000;
  2501. return actual_temp;
  2502. }
  2503. static int kv_dpm_early_init(void *handle)
  2504. {
  2505. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2506. kv_dpm_set_irq_funcs(adev);
  2507. return 0;
  2508. }
  2509. static int kv_dpm_late_init(void *handle)
  2510. {
  2511. /* powerdown unused blocks for now */
  2512. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2513. if (!amdgpu_dpm)
  2514. return 0;
  2515. kv_dpm_powergate_acp(adev, true);
  2516. kv_dpm_powergate_samu(adev, true);
  2517. return 0;
  2518. }
  2519. static int kv_dpm_sw_init(void *handle)
  2520. {
  2521. int ret;
  2522. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2523. ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
  2524. &adev->pm.dpm.thermal.irq);
  2525. if (ret)
  2526. return ret;
  2527. ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
  2528. &adev->pm.dpm.thermal.irq);
  2529. if (ret)
  2530. return ret;
  2531. /* default to balanced state */
  2532. adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
  2533. adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
  2534. adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
  2535. adev->pm.default_sclk = adev->clock.default_sclk;
  2536. adev->pm.default_mclk = adev->clock.default_mclk;
  2537. adev->pm.current_sclk = adev->clock.default_sclk;
  2538. adev->pm.current_mclk = adev->clock.default_mclk;
  2539. adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
  2540. if (amdgpu_dpm == 0)
  2541. return 0;
  2542. INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
  2543. mutex_lock(&adev->pm.mutex);
  2544. ret = kv_dpm_init(adev);
  2545. if (ret)
  2546. goto dpm_failed;
  2547. adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
  2548. if (amdgpu_dpm == 1)
  2549. amdgpu_pm_print_power_states(adev);
  2550. mutex_unlock(&adev->pm.mutex);
  2551. DRM_INFO("amdgpu: dpm initialized\n");
  2552. return 0;
  2553. dpm_failed:
  2554. kv_dpm_fini(adev);
  2555. mutex_unlock(&adev->pm.mutex);
  2556. DRM_ERROR("amdgpu: dpm initialization failed\n");
  2557. return ret;
  2558. }
  2559. static int kv_dpm_sw_fini(void *handle)
  2560. {
  2561. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2562. flush_work(&adev->pm.dpm.thermal.work);
  2563. mutex_lock(&adev->pm.mutex);
  2564. kv_dpm_fini(adev);
  2565. mutex_unlock(&adev->pm.mutex);
  2566. return 0;
  2567. }
  2568. static int kv_dpm_hw_init(void *handle)
  2569. {
  2570. int ret;
  2571. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2572. if (!amdgpu_dpm)
  2573. return 0;
  2574. mutex_lock(&adev->pm.mutex);
  2575. kv_dpm_setup_asic(adev);
  2576. ret = kv_dpm_enable(adev);
  2577. if (ret)
  2578. adev->pm.dpm_enabled = false;
  2579. else
  2580. adev->pm.dpm_enabled = true;
  2581. mutex_unlock(&adev->pm.mutex);
  2582. return ret;
  2583. }
  2584. static int kv_dpm_hw_fini(void *handle)
  2585. {
  2586. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2587. if (adev->pm.dpm_enabled) {
  2588. mutex_lock(&adev->pm.mutex);
  2589. kv_dpm_disable(adev);
  2590. mutex_unlock(&adev->pm.mutex);
  2591. }
  2592. return 0;
  2593. }
  2594. static int kv_dpm_suspend(void *handle)
  2595. {
  2596. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2597. if (adev->pm.dpm_enabled) {
  2598. mutex_lock(&adev->pm.mutex);
  2599. /* disable dpm */
  2600. kv_dpm_disable(adev);
  2601. /* reset the power state */
  2602. adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
  2603. mutex_unlock(&adev->pm.mutex);
  2604. }
  2605. return 0;
  2606. }
  2607. static int kv_dpm_resume(void *handle)
  2608. {
  2609. int ret;
  2610. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2611. if (adev->pm.dpm_enabled) {
  2612. /* asic init will reset to the boot state */
  2613. mutex_lock(&adev->pm.mutex);
  2614. kv_dpm_setup_asic(adev);
  2615. ret = kv_dpm_enable(adev);
  2616. if (ret)
  2617. adev->pm.dpm_enabled = false;
  2618. else
  2619. adev->pm.dpm_enabled = true;
  2620. mutex_unlock(&adev->pm.mutex);
  2621. if (adev->pm.dpm_enabled)
  2622. amdgpu_pm_compute_clocks(adev);
  2623. }
  2624. return 0;
  2625. }
  2626. static bool kv_dpm_is_idle(void *handle)
  2627. {
  2628. return true;
  2629. }
  2630. static int kv_dpm_wait_for_idle(void *handle)
  2631. {
  2632. return 0;
  2633. }
  2634. static int kv_dpm_soft_reset(void *handle)
  2635. {
  2636. return 0;
  2637. }
  2638. static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev,
  2639. struct amdgpu_irq_src *src,
  2640. unsigned type,
  2641. enum amdgpu_interrupt_state state)
  2642. {
  2643. u32 cg_thermal_int;
  2644. switch (type) {
  2645. case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
  2646. switch (state) {
  2647. case AMDGPU_IRQ_STATE_DISABLE:
  2648. cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
  2649. cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
  2650. WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
  2651. break;
  2652. case AMDGPU_IRQ_STATE_ENABLE:
  2653. cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
  2654. cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
  2655. WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
  2656. break;
  2657. default:
  2658. break;
  2659. }
  2660. break;
  2661. case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
  2662. switch (state) {
  2663. case AMDGPU_IRQ_STATE_DISABLE:
  2664. cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
  2665. cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
  2666. WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
  2667. break;
  2668. case AMDGPU_IRQ_STATE_ENABLE:
  2669. cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
  2670. cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
  2671. WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
  2672. break;
  2673. default:
  2674. break;
  2675. }
  2676. break;
  2677. default:
  2678. break;
  2679. }
  2680. return 0;
  2681. }
  2682. static int kv_dpm_process_interrupt(struct amdgpu_device *adev,
  2683. struct amdgpu_irq_src *source,
  2684. struct amdgpu_iv_entry *entry)
  2685. {
  2686. bool queue_thermal = false;
  2687. if (entry == NULL)
  2688. return -EINVAL;
  2689. switch (entry->src_id) {
  2690. case 230: /* thermal low to high */
  2691. DRM_DEBUG("IH: thermal low to high\n");
  2692. adev->pm.dpm.thermal.high_to_low = false;
  2693. queue_thermal = true;
  2694. break;
  2695. case 231: /* thermal high to low */
  2696. DRM_DEBUG("IH: thermal high to low\n");
  2697. adev->pm.dpm.thermal.high_to_low = true;
  2698. queue_thermal = true;
  2699. break;
  2700. default:
  2701. break;
  2702. }
  2703. if (queue_thermal)
  2704. schedule_work(&adev->pm.dpm.thermal.work);
  2705. return 0;
  2706. }
  2707. static int kv_dpm_set_clockgating_state(void *handle,
  2708. enum amd_clockgating_state state)
  2709. {
  2710. return 0;
  2711. }
  2712. static int kv_dpm_set_powergating_state(void *handle,
  2713. enum amd_powergating_state state)
  2714. {
  2715. return 0;
  2716. }
  2717. static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
  2718. const struct kv_pl *kv_cpl2)
  2719. {
  2720. return ((kv_cpl1->sclk == kv_cpl2->sclk) &&
  2721. (kv_cpl1->vddc_index == kv_cpl2->vddc_index) &&
  2722. (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) &&
  2723. (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
  2724. }
  2725. static int kv_check_state_equal(void *handle,
  2726. void *current_ps,
  2727. void *request_ps,
  2728. bool *equal)
  2729. {
  2730. struct kv_ps *kv_cps;
  2731. struct kv_ps *kv_rps;
  2732. int i;
  2733. struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
  2734. struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
  2735. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2736. if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
  2737. return -EINVAL;
  2738. kv_cps = kv_get_ps(cps);
  2739. kv_rps = kv_get_ps(rps);
  2740. if (kv_cps == NULL) {
  2741. *equal = false;
  2742. return 0;
  2743. }
  2744. if (kv_cps->num_levels != kv_rps->num_levels) {
  2745. *equal = false;
  2746. return 0;
  2747. }
  2748. for (i = 0; i < kv_cps->num_levels; i++) {
  2749. if (!kv_are_power_levels_equal(&(kv_cps->levels[i]),
  2750. &(kv_rps->levels[i]))) {
  2751. *equal = false;
  2752. return 0;
  2753. }
  2754. }
  2755. /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
  2756. *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
  2757. *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
  2758. return 0;
  2759. }
  2760. static int kv_dpm_read_sensor(void *handle, int idx,
  2761. void *value, int *size)
  2762. {
  2763. struct amdgpu_device *adev = (struct amdgpu_device *)handle;
  2764. struct kv_power_info *pi = kv_get_pi(adev);
  2765. uint32_t sclk;
  2766. u32 pl_index =
  2767. (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
  2768. TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
  2769. TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
  2770. /* size must be at least 4 bytes for all sensors */
  2771. if (*size < 4)
  2772. return -EINVAL;
  2773. switch (idx) {
  2774. case AMDGPU_PP_SENSOR_GFX_SCLK:
  2775. if (pl_index < SMU__NUM_SCLK_DPM_STATE) {
  2776. sclk = be32_to_cpu(
  2777. pi->graphics_level[pl_index].SclkFrequency);
  2778. *((uint32_t *)value) = sclk;
  2779. *size = 4;
  2780. return 0;
  2781. }
  2782. return -EINVAL;
  2783. case AMDGPU_PP_SENSOR_GPU_TEMP:
  2784. *((uint32_t *)value) = kv_dpm_get_temp(adev);
  2785. *size = 4;
  2786. return 0;
  2787. default:
  2788. return -EINVAL;
  2789. }
  2790. }
  2791. const struct amd_ip_funcs kv_dpm_ip_funcs = {
  2792. .name = "kv_dpm",
  2793. .early_init = kv_dpm_early_init,
  2794. .late_init = kv_dpm_late_init,
  2795. .sw_init = kv_dpm_sw_init,
  2796. .sw_fini = kv_dpm_sw_fini,
  2797. .hw_init = kv_dpm_hw_init,
  2798. .hw_fini = kv_dpm_hw_fini,
  2799. .suspend = kv_dpm_suspend,
  2800. .resume = kv_dpm_resume,
  2801. .is_idle = kv_dpm_is_idle,
  2802. .wait_for_idle = kv_dpm_wait_for_idle,
  2803. .soft_reset = kv_dpm_soft_reset,
  2804. .set_clockgating_state = kv_dpm_set_clockgating_state,
  2805. .set_powergating_state = kv_dpm_set_powergating_state,
  2806. };
  2807. const struct amd_pm_funcs kv_dpm_funcs = {
  2808. .get_temperature = &kv_dpm_get_temp,
  2809. .pre_set_power_state = &kv_dpm_pre_set_power_state,
  2810. .set_power_state = &kv_dpm_set_power_state,
  2811. .post_set_power_state = &kv_dpm_post_set_power_state,
  2812. .display_configuration_changed = &kv_dpm_display_configuration_changed,
  2813. .get_sclk = &kv_dpm_get_sclk,
  2814. .get_mclk = &kv_dpm_get_mclk,
  2815. .print_power_state = &kv_dpm_print_power_state,
  2816. .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
  2817. .force_performance_level = &kv_dpm_force_performance_level,
  2818. .powergate_uvd = &kv_dpm_powergate_uvd,
  2819. .enable_bapm = &kv_dpm_enable_bapm,
  2820. .get_vce_clock_state = amdgpu_get_vce_clock_state,
  2821. .check_state_equal = kv_check_state_equal,
  2822. .read_sensor = &kv_dpm_read_sensor,
  2823. };
  2824. static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
  2825. .set = kv_dpm_set_interrupt_state,
  2826. .process = kv_dpm_process_interrupt,
  2827. };
  2828. static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
  2829. {
  2830. adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
  2831. adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;
  2832. }